aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-04-08 12:52:16 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-04-08 12:52:16 -0400
commite9f37d3a8d126e73f5737ef548cdf6f618e295e4 (patch)
tree831eb4952637828a7bbafa361185e0ca57aa86ed /drivers
parent5fb6b953bb7aa86a9c8ea760934982cedc45c52b (diff)
parentc39b06951f1dc2e384650288676c5b7dcc0ec92c (diff)
Merge branch 'drm-next' of git://people.freedesktop.org/~airlied/linux
Pull drm updates from Dave Airlie: "Highlights: - drm: Generic display port aux features, primary plane support, drm master management fixes, logging cleanups, enforced locking checks (instead of docs), documentation improvements, minor number handling cleanup, pseudofs for shared inodes. - ttm: add ability to allocate from both ends - i915: broadwell features, power domain and runtime pm, per-process address space infrastructure (not enabled) - msm: power management, hdmi audio support - nouveau: ongoing GPU fault recovery, initial maxwell support, random fixes - exynos: refactored driver to clean up a lot of abstraction, DP support moved into drm, LVDS bridge support added, parallel panel support - gma500: SGX MMU support, SGX irq handling, asle irq work fixes - radeon: video engine bringup, ring handling fixes, use dp aux helpers - vmwgfx: add rendernode support" * 'drm-next' of git://people.freedesktop.org/~airlied/linux: (849 commits) DRM: armada: fix corruption while loading cursors drm/dp_helper: don't return EPROTO for defers (v2) drm/bridge: export ptn3460_init function drm/exynos: remove MODULE_DEVICE_TABLE definitions ARM: dts: exynos4412-trats2: enable exynos/fimd node ARM: dts: exynos4210-trats: enable exynos/fimd node ARM: dts: exynos4412-trats2: add panel node ARM: dts: exynos4210-trats: add panel node ARM: dts: exynos4: add MIPI DSI Master node drm/panel: add S6E8AA0 driver ARM: dts: exynos4210-universal_c210: add proper panel node drm/panel: add ld9040 driver panel/ld9040: add DT bindings panel/s6e8aa0: add DT bindings drm/exynos: add DSIM driver exynos/dsim: add DT bindings drm/exynos: disallow fbdev initialization if no device is connected drm/mipi_dsi: create dsi devices only for nodes with reg property drm/mipi_dsi: add flags to DSI messages Skip intel_crt_init for Dell XPS 8700 ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/drm/Kconfig2
-rw-r--r--drivers/gpu/drm/Makefile4
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.c24
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c12
-rw-r--r--drivers/gpu/drm/ast/ast_ttm.c5
-rw-r--r--drivers/gpu/drm/bochs/bochs_kms.c4
-rw-r--r--drivers/gpu/drm/bochs/bochs_mm.c6
-rw-r--r--drivers/gpu/drm/bridge/Kconfig5
-rw-r--r--drivers/gpu/drm/bridge/Makefile3
-rw-r--r--drivers/gpu/drm/bridge/ptn3460.c350
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_mode.c10
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_ttm.c5
-rw-r--r--drivers/gpu/drm/drm_crtc.c936
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c245
-rw-r--r--drivers/gpu/drm/drm_crtc_internal.h38
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c396
-rw-r--r--drivers/gpu/drm/drm_drv.c136
-rw-r--r--drivers/gpu/drm/drm_edid.c34
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c45
-rw-r--r--drivers/gpu/drm/drm_fops.c121
-rw-r--r--drivers/gpu/drm/drm_gem.c71
-rw-r--r--drivers/gpu/drm/drm_gem_cma_helper.c38
-rw-r--r--drivers/gpu/drm/drm_ioctl.c7
-rw-r--r--drivers/gpu/drm/drm_mipi_dsi.c6
-rw-r--r--drivers/gpu/drm/drm_mm.c285
-rw-r--r--drivers/gpu/drm/drm_modes.c346
-rw-r--r--drivers/gpu/drm/drm_pci.c2
-rw-r--r--drivers/gpu/drm/drm_plane_helper.c333
-rw-r--r--drivers/gpu/drm/drm_platform.c2
-rw-r--r--drivers/gpu/drm/drm_prime.c110
-rw-r--r--drivers/gpu/drm/drm_stub.c503
-rw-r--r--drivers/gpu/drm/drm_usb.c2
-rw-r--r--drivers/gpu/drm/exynos/Kconfig24
-rw-r--r--drivers/gpu/drm/exynos/Makefile9
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp_core.c (renamed from drivers/video/exynos/exynos_dp_core.c)304
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp_core.h (renamed from drivers/video/exynos/exynos_dp_core.h)9
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp_reg.c (renamed from drivers/video/exynos/exynos_dp_reg.c)0
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp_reg.h (renamed from drivers/video/exynos/exynos_dp_reg.h)0
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_connector.c92
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_connector.h4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_core.c193
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c159
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.h20
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dpi.c339
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c197
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h163
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c1524
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_encoder.c359
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_encoder.h18
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c7
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c23
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c700
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_hdmi.c439
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_hdmi.h67
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.c19
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.h2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c441
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c472
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c573
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.h20
-rw-r--r--drivers/gpu/drm/gma500/Makefile2
-rw-r--r--drivers/gpu/drm/gma500/blitter.c51
-rw-r--r--drivers/gpu/drm/gma500/blitter.h22
-rw-r--r--drivers/gpu/drm/gma500/cdv_device.c40
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_crt.c9
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_display.c73
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_dp.c2
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_hdmi.c11
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_lvds.c5
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c2
-rw-r--r--drivers/gpu/drm/gma500/gem.c56
-rw-r--r--drivers/gpu/drm/gma500/gem.h21
-rw-r--r--drivers/gpu/drm/gma500/gma_device.c56
-rw-r--r--drivers/gpu/drm/gma500/gma_device.h21
-rw-r--r--drivers/gpu/drm/gma500/gma_display.c23
-rw-r--r--drivers/gpu/drm/gma500/gma_display.h3
-rw-r--r--drivers/gpu/drm/gma500/gtt.c45
-rw-r--r--drivers/gpu/drm/gma500/gtt.h3
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_output.c2
-rw-r--r--drivers/gpu/drm/gma500/mdfld_intel_display.c16
-rw-r--r--drivers/gpu/drm/gma500/mmu.c297
-rw-r--r--drivers/gpu/drm/gma500/mmu.h93
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_crtc.c12
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_hdmi.c9
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_lvds.c5
-rw-r--r--drivers/gpu/drm/gma500/opregion.c25
-rw-r--r--drivers/gpu/drm/gma500/psb_device.c42
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c404
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h203
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_display.c32
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_drv.h2
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_lvds.c5
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_sdvo.c22
-rw-r--r--drivers/gpu/drm/gma500/psb_irq.c81
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c602
-rw-r--r--drivers/gpu/drm/i915/Makefile80
-rw-r--r--drivers/gpu/drm/i915/dvo_ch7xxx.c4
-rw-r--r--drivers/gpu/drm/i915/dvo_ivch.c30
-rw-r--r--drivers/gpu/drm/i915/dvo_ns2501.c10
-rw-r--r--drivers/gpu/drm/i915/dvo_sil164.c10
-rw-r--r--drivers/gpu/drm/i915/dvo_tfp410.c24
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c485
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c716
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c162
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c299
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h729
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c697
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c483
-rw-r--r--drivers/gpu/drm/i915/i915_gem_debug.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c63
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c198
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c1256
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c8
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c616
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c836
-rw-r--r--drivers/gpu/drm/i915/i915_params.c154
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h560
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c40
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c71
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h24
-rw-r--r--drivers/gpu/drm/i915/i915_ums.c8
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c8
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h174
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c70
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c117
-rw-r--r--drivers/gpu/drm/i915/intel_display.c1579
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c912
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h106
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c23
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c6
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c371
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c108
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c24
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c40
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c21
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c1202
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c152
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h42
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c78
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c18
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c21
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c281
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c26
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_ttm.c5
-rw-r--r--drivers/gpu/drm/msm/Makefile1
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c105
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c65
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h16
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c50
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.h25
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_audio.c273
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_bridge.c26
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c33
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c8
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c27
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c8
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp_kms.c3
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c139
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h4
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c15
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c85
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h16
-rw-r--r--drivers/gpu/drm/nouveau/Makefile16
-rw-r--r--drivers/gpu/drm/nouveau/core/core/namedb.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/core/parent.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/base.c85
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/gm100.c106
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv04.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv10.c16
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv20.c8
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv30.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv40.c32
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv50.c28
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nvc0.c44
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nve0.c20
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/dport.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/gm107.c101
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv04.c12
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv50.c370
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv50.h54
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv84.c191
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv94.c50
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nva0.c67
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nva3.c22
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c361
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nve0.c187
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c22
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/priv.h10
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/falcon.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c509
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c507
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxgm107.c989
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnv108.c1041
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c308
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.h170
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc1.c270
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc4.c (renamed from drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc3.c)78
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc8.c144
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd7.c216
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd9.c278
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnve4.c285
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnvf0.c782
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/com.fuc2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpc.fuc7
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcgm107.fuc542
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcgm107.fuc5.h473
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnv108.fuc5.h354
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvc0.fuc.h336
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvd7.fuc.h396
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc.h396
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvf0.fuc.h396
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubgm107.fuc540
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubgm107.fuc5.h916
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnv108.fuc5.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvd7.fuc.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvf0.fuc.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/macros.fuc1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/gm107.c465
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv108.c133
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv20.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv40.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv50.c45
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c312
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h214
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvc1.c115
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvc4.c (renamed from drivers/gpu/drm/nouveau/core/engine/graph/nvc3.c)86
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvc8.c104
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvd7.c175
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvd9.c154
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nve4.c192
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvf0.c136
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/xtensa.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/class.h12
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/device.h31
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/namedb.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/device.h18
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/disp.h19
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/graph.h3
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/P0260.h23
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/conn.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/ramcfg.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/therm.h7
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/devinit.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/fb.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/ltcg.h3
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/mc.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/therm.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/timer.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/os.h12
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bar/base.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bar/nv50.c8
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c15
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/P0260.c109
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/base.c55
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c29
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/init.c18
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/ramcfg.c9
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/therm.c11
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/base.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/fbmem.h8
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/gm107.c56
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv05.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv10.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv20.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.h3
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nva3.c50
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nvc0.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/gm107.c38
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c9
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c9
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.h8
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramgm107.c56
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnva3.c34
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c9
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c9
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c7
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c7
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/ltcg/gf100.c (renamed from drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c)109
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/ltcg/gf100.h21
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/ltcg/gm107.c142
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/base.c55
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mxm/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/base.c14
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/fan.c25
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/fanpwm.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/nv50.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c40
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/timer/gk20a.c57
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c83
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/timer/nv04.h27
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/timer/priv.h6
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/crtc.c22
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/dfp.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c22
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_agp.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c22
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c14
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c88
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.h5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hwmon.c17
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sysfs.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c35
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vga.c5
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c17
-rw-r--r--drivers/gpu/drm/omapdrm/omap_crtc.c13
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fb.c5
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c34
-rw-r--r--drivers/gpu/drm/panel/Kconfig14
-rw-r--r--drivers/gpu/drm/panel/Makefile2
-rw-r--r--drivers/gpu/drm/panel/panel-ld9040.c376
-rw-r--r--drivers/gpu/drm/panel/panel-s6e8aa0.c1069
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c157
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c10
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c7
-rw-r--r--drivers/gpu/drm/radeon/Makefile8
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c20
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c268
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c82
-rw-r--r--drivers/gpu/drm/radeon/btc_dpm.c4
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c67
-rw-r--r--drivers/gpu/drm/radeon/cik.c142
-rw-r--r--drivers/gpu/drm/radeon/cik_sdma.c6
-rw-r--r--drivers/gpu/drm/radeon/cikd.h49
-rw-r--r--drivers/gpu/drm/radeon/cypress_dpm.c4
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c6
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c210
-rw-r--r--drivers/gpu/drm/radeon/evergreen_dma.c4
-rw-r--r--drivers/gpu/drm/radeon/kv_dpm.c64
-rw-r--r--drivers/gpu/drm/radeon/ni.c8
-rw-r--r--drivers/gpu/drm/radeon/ni_dma.c6
-rw-r--r--drivers/gpu/drm/radeon/ni_dpm.c7
-rw-r--r--drivers/gpu/drm/radeon/r100.c49
-rw-r--r--drivers/gpu/drm/radeon/r200.c20
-rw-r--r--drivers/gpu/drm/radeon/r300.c32
-rw-r--r--drivers/gpu/drm/radeon/r600.c6
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c110
-rw-r--r--drivers/gpu/drm/radeon/r600_dma.c6
-rw-r--r--drivers/gpu/drm/radeon/r600_dpm.c48
-rw-r--r--drivers/gpu/drm/radeon/r600_dpm.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon.h158
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c19
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h14
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c19
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c163
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c20
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c248
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c59
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c958
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c49
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c30
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c149
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h9
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c32
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c119
-rw-r--r--drivers/gpu/drm/radeon/radeon_sa.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_semaphore.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c39
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_vce.c699
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c966
-rw-r--r--drivers/gpu/drm/radeon/rs780_dpm.c7
-rw-r--r--drivers/gpu/drm/radeon/rv6xx_dpm.c7
-rw-r--r--drivers/gpu/drm/radeon/rv770_dpm.c7
-rw-r--r--drivers/gpu/drm/radeon/si.c10
-rw-r--r--drivers/gpu/drm/radeon/si_dma.c4
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c7
-rw-r--r--drivers/gpu/drm/radeon/sid.h47
-rw-r--r--drivers/gpu/drm/radeon/sumo_dpm.c7
-rw-r--r--drivers/gpu/drm/radeon/trinity_dpm.c7
-rw-r--r--drivers/gpu/drm/radeon/uvd_v1_0.c2
-rw-r--r--drivers/gpu/drm/radeon/vce_v1_0.c187
-rw-r--r--drivers/gpu/drm/radeon/vce_v2_0.c181
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.c10
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.c5
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_crtc.c16
-rw-r--r--drivers/gpu/drm/tegra/Makefile2
-rw-r--r--drivers/gpu/drm/tegra/bus.c2
-rw-r--r--drivers/gpu/drm/tegra/dc.c16
-rw-r--r--drivers/gpu/drm/tegra/dc.h1
-rw-r--r--drivers/gpu/drm/tegra/dpaux.c544
-rw-r--r--drivers/gpu/drm/tegra/dpaux.h73
-rw-r--r--drivers/gpu/drm/tegra/drm.c19
-rw-r--r--drivers/gpu/drm/tegra/drm.h20
-rw-r--r--drivers/gpu/drm/tegra/dsi.c20
-rw-r--r--drivers/gpu/drm/tegra/dsi.h20
-rw-r--r--drivers/gpu/drm/tegra/gem.c25
-rw-r--r--drivers/gpu/drm/tegra/gem.h14
-rw-r--r--drivers/gpu/drm/tegra/gr2d.c14
-rw-r--r--drivers/gpu/drm/tegra/mipi-phy.c20
-rw-r--r--drivers/gpu/drm/tegra/mipi-phy.h20
-rw-r--r--drivers/gpu/drm/tegra/output.c8
-rw-r--r--drivers/gpu/drm/tegra/sor.c1092
-rw-r--r--drivers/gpu/drm/tegra/sor.h278
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_crtc.c8
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c29
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_manager.c11
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c8
-rw-r--r--drivers/gpu/drm/ttm/ttm_object.c46
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c2
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c2
-rw-r--r--drivers/gpu/drm/udl/udl_modeset.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_context.c7
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c15
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c148
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h9
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c5
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c14
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c29
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c24
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c5
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c143
-rw-r--r--drivers/gpu/host1x/syncpt.c1
-rw-r--r--drivers/staging/imx-drm/ipuv3-crtc.c6
-rw-r--r--drivers/staging/imx-drm/ipuv3-plane.c2
-rw-r--r--drivers/video/exynos/Kconfig7
-rw-r--r--drivers/video/exynos/Makefile1
441 files changed, 35607 insertions, 15287 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 8e7fa4dbaed8..d1cc2f613a78 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -199,3 +199,5 @@ source "drivers/gpu/drm/msm/Kconfig"
199source "drivers/gpu/drm/tegra/Kconfig" 199source "drivers/gpu/drm/tegra/Kconfig"
200 200
201source "drivers/gpu/drm/panel/Kconfig" 201source "drivers/gpu/drm/panel/Kconfig"
202
203source "drivers/gpu/drm/bridge/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 292a79d64146..9d25dbbe6771 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -13,7 +13,8 @@ drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \
13 drm_crtc.o drm_modes.o drm_edid.o \ 13 drm_crtc.o drm_modes.o drm_edid.o \
14 drm_info.o drm_debugfs.o drm_encoder_slave.o \ 14 drm_info.o drm_debugfs.o drm_encoder_slave.o \
15 drm_trace_points.o drm_global.o drm_prime.o \ 15 drm_trace_points.o drm_global.o drm_prime.o \
16 drm_rect.o drm_vma_manager.o drm_flip_work.o 16 drm_rect.o drm_vma_manager.o drm_flip_work.o \
17 drm_plane_helper.o
17 18
18drm-$(CONFIG_COMPAT) += drm_ioc32.o 19drm-$(CONFIG_COMPAT) += drm_ioc32.o
19drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o 20drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o
@@ -63,3 +64,4 @@ obj-$(CONFIG_DRM_MSM) += msm/
63obj-$(CONFIG_DRM_TEGRA) += tegra/ 64obj-$(CONFIG_DRM_TEGRA) += tegra/
64obj-y += i2c/ 65obj-y += i2c/
65obj-y += panel/ 66obj-y += panel/
67obj-y += bridge/
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
index d8e398275ca8..81c34f949dfc 100644
--- a/drivers/gpu/drm/armada/armada_crtc.c
+++ b/drivers/gpu/drm/armada/armada_crtc.c
@@ -478,11 +478,12 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
478 unsigned i; 478 unsigned i;
479 bool interlaced; 479 bool interlaced;
480 480
481 drm_framebuffer_reference(crtc->fb); 481 drm_framebuffer_reference(crtc->primary->fb);
482 482
483 interlaced = !!(adj->flags & DRM_MODE_FLAG_INTERLACE); 483 interlaced = !!(adj->flags & DRM_MODE_FLAG_INTERLACE);
484 484
485 i = armada_drm_crtc_calc_fb(dcrtc->crtc.fb, x, y, regs, interlaced); 485 i = armada_drm_crtc_calc_fb(dcrtc->crtc.primary->fb,
486 x, y, regs, interlaced);
486 487
487 rm = adj->crtc_hsync_start - adj->crtc_hdisplay; 488 rm = adj->crtc_hsync_start - adj->crtc_hdisplay;
488 lm = adj->crtc_htotal - adj->crtc_hsync_end; 489 lm = adj->crtc_htotal - adj->crtc_hsync_end;
@@ -567,10 +568,10 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
567 } 568 }
568 569
569 val = CFG_GRA_ENA | CFG_GRA_HSMOOTH; 570 val = CFG_GRA_ENA | CFG_GRA_HSMOOTH;
570 val |= CFG_GRA_FMT(drm_fb_to_armada_fb(dcrtc->crtc.fb)->fmt); 571 val |= CFG_GRA_FMT(drm_fb_to_armada_fb(dcrtc->crtc.primary->fb)->fmt);
571 val |= CFG_GRA_MOD(drm_fb_to_armada_fb(dcrtc->crtc.fb)->mod); 572 val |= CFG_GRA_MOD(drm_fb_to_armada_fb(dcrtc->crtc.primary->fb)->mod);
572 573
573 if (drm_fb_to_armada_fb(dcrtc->crtc.fb)->fmt > CFG_420) 574 if (drm_fb_to_armada_fb(dcrtc->crtc.primary->fb)->fmt > CFG_420)
574 val |= CFG_PALETTE_ENA; 575 val |= CFG_PALETTE_ENA;
575 576
576 if (interlaced) 577 if (interlaced)
@@ -608,7 +609,7 @@ static int armada_drm_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
608 struct armada_regs regs[4]; 609 struct armada_regs regs[4];
609 unsigned i; 610 unsigned i;
610 611
611 i = armada_drm_crtc_calc_fb(crtc->fb, crtc->x, crtc->y, regs, 612 i = armada_drm_crtc_calc_fb(crtc->primary->fb, crtc->x, crtc->y, regs,
612 dcrtc->interlaced); 613 dcrtc->interlaced);
613 armada_reg_queue_end(regs, i); 614 armada_reg_queue_end(regs, i);
614 615
@@ -616,7 +617,7 @@ static int armada_drm_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
616 wait_event(dcrtc->frame_wait, !dcrtc->frame_work); 617 wait_event(dcrtc->frame_wait, !dcrtc->frame_work);
617 618
618 /* Take a reference to the new fb as we're using it */ 619 /* Take a reference to the new fb as we're using it */
619 drm_framebuffer_reference(crtc->fb); 620 drm_framebuffer_reference(crtc->primary->fb);
620 621
621 /* Update the base in the CRTC */ 622 /* Update the base in the CRTC */
622 armada_drm_crtc_update_regs(dcrtc, regs); 623 armada_drm_crtc_update_regs(dcrtc, regs);
@@ -637,7 +638,7 @@ static void armada_drm_crtc_disable(struct drm_crtc *crtc)
637 struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc); 638 struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
638 639
639 armada_drm_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); 640 armada_drm_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
640 armada_drm_crtc_finish_fb(dcrtc, crtc->fb, true); 641 armada_drm_crtc_finish_fb(dcrtc, crtc->primary->fb, true);
641 642
642 /* Power down most RAMs and FIFOs */ 643 /* Power down most RAMs and FIFOs */
643 writel_relaxed(CFG_PDWN256x32 | CFG_PDWN256x24 | CFG_PDWN256x8 | 644 writel_relaxed(CFG_PDWN256x32 | CFG_PDWN256x24 | CFG_PDWN256x8 |
@@ -678,6 +679,7 @@ static void armada_load_cursor_argb(void __iomem *base, uint32_t *pix,
678 base + LCD_SPU_SRAM_WRDAT); 679 base + LCD_SPU_SRAM_WRDAT);
679 writel_relaxed(addr | SRAM_WRITE, 680 writel_relaxed(addr | SRAM_WRITE,
680 base + LCD_SPU_SRAM_CTRL); 681 base + LCD_SPU_SRAM_CTRL);
682 readl_relaxed(base + LCD_SPU_HWC_OVSA_HPXL_VLN);
681 addr += 1; 683 addr += 1;
682 if ((addr & 0x00ff) == 0) 684 if ((addr & 0x00ff) == 0)
683 addr += 0xf00; 685 addr += 0xf00;
@@ -904,7 +906,7 @@ static int armada_drm_crtc_page_flip(struct drm_crtc *crtc,
904 int ret; 906 int ret;
905 907
906 /* We don't support changing the pixel format */ 908 /* We don't support changing the pixel format */
907 if (fb->pixel_format != crtc->fb->pixel_format) 909 if (fb->pixel_format != crtc->primary->fb->pixel_format)
908 return -EINVAL; 910 return -EINVAL;
909 911
910 work = kmalloc(sizeof(*work), GFP_KERNEL); 912 work = kmalloc(sizeof(*work), GFP_KERNEL);
@@ -912,7 +914,7 @@ static int armada_drm_crtc_page_flip(struct drm_crtc *crtc,
912 return -ENOMEM; 914 return -ENOMEM;
913 915
914 work->event = event; 916 work->event = event;
915 work->old_fb = dcrtc->crtc.fb; 917 work->old_fb = dcrtc->crtc.primary->fb;
916 918
917 i = armada_drm_crtc_calc_fb(fb, crtc->x, crtc->y, work->regs, 919 i = armada_drm_crtc_calc_fb(fb, crtc->x, crtc->y, work->regs,
918 dcrtc->interlaced); 920 dcrtc->interlaced);
@@ -941,7 +943,7 @@ static int armada_drm_crtc_page_flip(struct drm_crtc *crtc,
941 * will _not_ drop that reference on successful return from this 943 * will _not_ drop that reference on successful return from this
942 * function. Simply mark this new framebuffer as the current one. 944 * function. Simply mark this new framebuffer as the current one.
943 */ 945 */
944 dcrtc->crtc.fb = fb; 946 dcrtc->crtc.primary->fb = fb;
945 947
946 /* 948 /*
947 * Finally, if the display is blanked, we won't receive an 949 * Finally, if the display is blanked, we won't receive an
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index cca063b11083..a4afdc8bb578 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -81,7 +81,7 @@ static bool ast_get_vbios_mode_info(struct drm_crtc *crtc, struct drm_display_mo
81 u32 refresh_rate_index = 0, mode_id, color_index, refresh_rate; 81 u32 refresh_rate_index = 0, mode_id, color_index, refresh_rate;
82 u32 hborder, vborder; 82 u32 hborder, vborder;
83 83
84 switch (crtc->fb->bits_per_pixel) { 84 switch (crtc->primary->fb->bits_per_pixel) {
85 case 8: 85 case 8:
86 vbios_mode->std_table = &vbios_stdtable[VGAModeIndex]; 86 vbios_mode->std_table = &vbios_stdtable[VGAModeIndex];
87 color_index = VGAModeIndex - 1; 87 color_index = VGAModeIndex - 1;
@@ -176,7 +176,7 @@ static bool ast_get_vbios_mode_info(struct drm_crtc *crtc, struct drm_display_mo
176 ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x8e, mode_id & 0xff); 176 ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x8e, mode_id & 0xff);
177 177
178 ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x91, 0xa8); 178 ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x91, 0xa8);
179 ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x92, crtc->fb->bits_per_pixel); 179 ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x92, crtc->primary->fb->bits_per_pixel);
180 ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x93, adjusted_mode->clock / 1000); 180 ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x93, adjusted_mode->clock / 1000);
181 ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x94, adjusted_mode->crtc_hdisplay); 181 ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x94, adjusted_mode->crtc_hdisplay);
182 ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x95, adjusted_mode->crtc_hdisplay >> 8); 182 ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x95, adjusted_mode->crtc_hdisplay >> 8);
@@ -340,7 +340,7 @@ static void ast_set_offset_reg(struct drm_crtc *crtc)
340 340
341 u16 offset; 341 u16 offset;
342 342
343 offset = crtc->fb->pitches[0] >> 3; 343 offset = crtc->primary->fb->pitches[0] >> 3;
344 ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x13, (offset & 0xff)); 344 ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x13, (offset & 0xff));
345 ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xb0, (offset >> 8) & 0x3f); 345 ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xb0, (offset >> 8) & 0x3f);
346} 346}
@@ -365,7 +365,7 @@ static void ast_set_ext_reg(struct drm_crtc *crtc, struct drm_display_mode *mode
365 struct ast_private *ast = crtc->dev->dev_private; 365 struct ast_private *ast = crtc->dev->dev_private;
366 u8 jregA0 = 0, jregA3 = 0, jregA8 = 0; 366 u8 jregA0 = 0, jregA3 = 0, jregA8 = 0;
367 367
368 switch (crtc->fb->bits_per_pixel) { 368 switch (crtc->primary->fb->bits_per_pixel) {
369 case 8: 369 case 8:
370 jregA0 = 0x70; 370 jregA0 = 0x70;
371 jregA3 = 0x01; 371 jregA3 = 0x01;
@@ -418,7 +418,7 @@ static void ast_set_sync_reg(struct drm_device *dev, struct drm_display_mode *mo
418static bool ast_set_dac_reg(struct drm_crtc *crtc, struct drm_display_mode *mode, 418static bool ast_set_dac_reg(struct drm_crtc *crtc, struct drm_display_mode *mode,
419 struct ast_vbios_mode_info *vbios_mode) 419 struct ast_vbios_mode_info *vbios_mode)
420{ 420{
421 switch (crtc->fb->bits_per_pixel) { 421 switch (crtc->primary->fb->bits_per_pixel) {
422 case 8: 422 case 8:
423 break; 423 break;
424 default: 424 default:
@@ -490,7 +490,7 @@ static int ast_crtc_do_set_base(struct drm_crtc *crtc,
490 ast_bo_unreserve(bo); 490 ast_bo_unreserve(bo);
491 } 491 }
492 492
493 ast_fb = to_ast_framebuffer(crtc->fb); 493 ast_fb = to_ast_framebuffer(crtc->primary->fb);
494 obj = ast_fb->obj; 494 obj = ast_fb->obj;
495 bo = gem_to_ast_bo(obj); 495 bo = gem_to_ast_bo(obj);
496 496
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c
index 4ea9b17ac17a..b8246227bab0 100644
--- a/drivers/gpu/drm/ast/ast_ttm.c
+++ b/drivers/gpu/drm/ast/ast_ttm.c
@@ -259,7 +259,9 @@ int ast_mm_init(struct ast_private *ast)
259 259
260 ret = ttm_bo_device_init(&ast->ttm.bdev, 260 ret = ttm_bo_device_init(&ast->ttm.bdev,
261 ast->ttm.bo_global_ref.ref.object, 261 ast->ttm.bo_global_ref.ref.object,
262 &ast_bo_driver, DRM_FILE_PAGE_OFFSET, 262 &ast_bo_driver,
263 dev->anon_inode->i_mapping,
264 DRM_FILE_PAGE_OFFSET,
263 true); 265 true);
264 if (ret) { 266 if (ret) {
265 DRM_ERROR("Error initialising bo driver; %d\n", ret); 267 DRM_ERROR("Error initialising bo driver; %d\n", ret);
@@ -324,7 +326,6 @@ int ast_bo_create(struct drm_device *dev, int size, int align,
324 } 326 }
325 327
326 astbo->bo.bdev = &ast->ttm.bdev; 328 astbo->bo.bdev = &ast->ttm.bdev;
327 astbo->bo.bdev->dev_mapping = dev->dev_mapping;
328 329
329 ast_ttm_placement(astbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM); 330 ast_ttm_placement(astbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
330 331
diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c
index 62ec7d4b3816..dcf2e55f4ae9 100644
--- a/drivers/gpu/drm/bochs/bochs_kms.c
+++ b/drivers/gpu/drm/bochs/bochs_kms.c
@@ -62,10 +62,10 @@ static int bochs_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
62 } 62 }
63 } 63 }
64 64
65 if (WARN_ON(crtc->fb == NULL)) 65 if (WARN_ON(crtc->primary->fb == NULL))
66 return -EINVAL; 66 return -EINVAL;
67 67
68 bochs_fb = to_bochs_framebuffer(crtc->fb); 68 bochs_fb = to_bochs_framebuffer(crtc->primary->fb);
69 bo = gem_to_bochs_bo(bochs_fb->obj); 69 bo = gem_to_bochs_bo(bochs_fb->obj);
70 ret = ttm_bo_reserve(&bo->bo, true, false, false, 0); 70 ret = ttm_bo_reserve(&bo->bo, true, false, false, 0);
71 if (ret) 71 if (ret)
diff --git a/drivers/gpu/drm/bochs/bochs_mm.c b/drivers/gpu/drm/bochs/bochs_mm.c
index ce6858765b37..f488be55d650 100644
--- a/drivers/gpu/drm/bochs/bochs_mm.c
+++ b/drivers/gpu/drm/bochs/bochs_mm.c
@@ -225,7 +225,9 @@ int bochs_mm_init(struct bochs_device *bochs)
225 225
226 ret = ttm_bo_device_init(&bochs->ttm.bdev, 226 ret = ttm_bo_device_init(&bochs->ttm.bdev,
227 bochs->ttm.bo_global_ref.ref.object, 227 bochs->ttm.bo_global_ref.ref.object,
228 &bochs_bo_driver, DRM_FILE_PAGE_OFFSET, 228 &bochs_bo_driver,
229 bochs->dev->anon_inode->i_mapping,
230 DRM_FILE_PAGE_OFFSET,
229 true); 231 true);
230 if (ret) { 232 if (ret) {
231 DRM_ERROR("Error initialising bo driver; %d\n", ret); 233 DRM_ERROR("Error initialising bo driver; %d\n", ret);
@@ -359,7 +361,7 @@ static int bochs_bo_create(struct drm_device *dev, int size, int align,
359 } 361 }
360 362
361 bochsbo->bo.bdev = &bochs->ttm.bdev; 363 bochsbo->bo.bdev = &bochs->ttm.bdev;
362 bochsbo->bo.bdev->dev_mapping = dev->dev_mapping; 364 bochsbo->bo.bdev->dev_mapping = dev->anon_inode->i_mapping;
363 365
364 bochs_ttm_placement(bochsbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM); 366 bochs_ttm_placement(bochsbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
365 367
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
new file mode 100644
index 000000000000..884923f982d9
--- /dev/null
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -0,0 +1,5 @@
1config DRM_PTN3460
2 tristate "PTN3460 DP/LVDS bridge"
3 depends on DRM
4 select DRM_KMS_HELPER
5 ---help---
diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile
new file mode 100644
index 000000000000..b4733e1fbd2e
--- /dev/null
+++ b/drivers/gpu/drm/bridge/Makefile
@@ -0,0 +1,3 @@
1ccflags-y := -Iinclude/drm
2
3obj-$(CONFIG_DRM_PTN3460) += ptn3460.o
diff --git a/drivers/gpu/drm/bridge/ptn3460.c b/drivers/gpu/drm/bridge/ptn3460.c
new file mode 100644
index 000000000000..b171901a3553
--- /dev/null
+++ b/drivers/gpu/drm/bridge/ptn3460.c
@@ -0,0 +1,350 @@
1/*
2 * NXP PTN3460 DP/LVDS bridge driver
3 *
4 * Copyright (C) 2013 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#include <linux/module.h>
17#include <linux/of.h>
18#include <linux/of_gpio.h>
19#include <linux/i2c.h>
20#include <linux/gpio.h>
21#include <linux/delay.h>
22
23#include "drmP.h"
24#include "drm_edid.h"
25#include "drm_crtc.h"
26#include "drm_crtc_helper.h"
27
28#include "bridge/ptn3460.h"
29
30#define PTN3460_EDID_ADDR 0x0
31#define PTN3460_EDID_EMULATION_ADDR 0x84
32#define PTN3460_EDID_ENABLE_EMULATION 0
33#define PTN3460_EDID_EMULATION_SELECTION 1
34#define PTN3460_EDID_SRAM_LOAD_ADDR 0x85
35
36struct ptn3460_bridge {
37 struct drm_connector connector;
38 struct i2c_client *client;
39 struct drm_encoder *encoder;
40 struct drm_bridge *bridge;
41 struct edid *edid;
42 int gpio_pd_n;
43 int gpio_rst_n;
44 u32 edid_emulation;
45 bool enabled;
46};
47
48static int ptn3460_read_bytes(struct ptn3460_bridge *ptn_bridge, char addr,
49 u8 *buf, int len)
50{
51 int ret;
52
53 ret = i2c_master_send(ptn_bridge->client, &addr, 1);
54 if (ret <= 0) {
55 DRM_ERROR("Failed to send i2c command, ret=%d\n", ret);
56 return ret;
57 }
58
59 ret = i2c_master_recv(ptn_bridge->client, buf, len);
60 if (ret <= 0) {
61 DRM_ERROR("Failed to recv i2c data, ret=%d\n", ret);
62 return ret;
63 }
64
65 return 0;
66}
67
68static int ptn3460_write_byte(struct ptn3460_bridge *ptn_bridge, char addr,
69 char val)
70{
71 int ret;
72 char buf[2];
73
74 buf[0] = addr;
75 buf[1] = val;
76
77 ret = i2c_master_send(ptn_bridge->client, buf, ARRAY_SIZE(buf));
78 if (ret <= 0) {
79 DRM_ERROR("Failed to send i2c command, ret=%d\n", ret);
80 return ret;
81 }
82
83 return 0;
84}
85
86static int ptn3460_select_edid(struct ptn3460_bridge *ptn_bridge)
87{
88 int ret;
89 char val;
90
91 /* Load the selected edid into SRAM (accessed at PTN3460_EDID_ADDR) */
92 ret = ptn3460_write_byte(ptn_bridge, PTN3460_EDID_SRAM_LOAD_ADDR,
93 ptn_bridge->edid_emulation);
94 if (ret) {
95 DRM_ERROR("Failed to transfer edid to sram, ret=%d\n", ret);
96 return ret;
97 }
98
99 /* Enable EDID emulation and select the desired EDID */
100 val = 1 << PTN3460_EDID_ENABLE_EMULATION |
101 ptn_bridge->edid_emulation << PTN3460_EDID_EMULATION_SELECTION;
102
103 ret = ptn3460_write_byte(ptn_bridge, PTN3460_EDID_EMULATION_ADDR, val);
104 if (ret) {
105 DRM_ERROR("Failed to write edid value, ret=%d\n", ret);
106 return ret;
107 }
108
109 return 0;
110}
111
112static void ptn3460_pre_enable(struct drm_bridge *bridge)
113{
114 struct ptn3460_bridge *ptn_bridge = bridge->driver_private;
115 int ret;
116
117 if (ptn_bridge->enabled)
118 return;
119
120 if (gpio_is_valid(ptn_bridge->gpio_pd_n))
121 gpio_set_value(ptn_bridge->gpio_pd_n, 1);
122
123 if (gpio_is_valid(ptn_bridge->gpio_rst_n)) {
124 gpio_set_value(ptn_bridge->gpio_rst_n, 0);
125 udelay(10);
126 gpio_set_value(ptn_bridge->gpio_rst_n, 1);
127 }
128
129 /*
130 * There's a bug in the PTN chip where it falsely asserts hotplug before
131 * it is fully functional. We're forced to wait for the maximum start up
132 * time specified in the chip's datasheet to make sure we're really up.
133 */
134 msleep(90);
135
136 ret = ptn3460_select_edid(ptn_bridge);
137 if (ret)
138 DRM_ERROR("Select edid failed ret=%d\n", ret);
139
140 ptn_bridge->enabled = true;
141}
142
143static void ptn3460_enable(struct drm_bridge *bridge)
144{
145}
146
147static void ptn3460_disable(struct drm_bridge *bridge)
148{
149 struct ptn3460_bridge *ptn_bridge = bridge->driver_private;
150
151 if (!ptn_bridge->enabled)
152 return;
153
154 ptn_bridge->enabled = false;
155
156 if (gpio_is_valid(ptn_bridge->gpio_rst_n))
157 gpio_set_value(ptn_bridge->gpio_rst_n, 1);
158
159 if (gpio_is_valid(ptn_bridge->gpio_pd_n))
160 gpio_set_value(ptn_bridge->gpio_pd_n, 0);
161}
162
163static void ptn3460_post_disable(struct drm_bridge *bridge)
164{
165}
166
167void ptn3460_bridge_destroy(struct drm_bridge *bridge)
168{
169 struct ptn3460_bridge *ptn_bridge = bridge->driver_private;
170
171 drm_bridge_cleanup(bridge);
172 if (gpio_is_valid(ptn_bridge->gpio_pd_n))
173 gpio_free(ptn_bridge->gpio_pd_n);
174 if (gpio_is_valid(ptn_bridge->gpio_rst_n))
175 gpio_free(ptn_bridge->gpio_rst_n);
176 /* Nothing else to free, we've got devm allocated memory */
177}
178
179struct drm_bridge_funcs ptn3460_bridge_funcs = {
180 .pre_enable = ptn3460_pre_enable,
181 .enable = ptn3460_enable,
182 .disable = ptn3460_disable,
183 .post_disable = ptn3460_post_disable,
184 .destroy = ptn3460_bridge_destroy,
185};
186
187int ptn3460_get_modes(struct drm_connector *connector)
188{
189 struct ptn3460_bridge *ptn_bridge;
190 u8 *edid;
191 int ret, num_modes;
192 bool power_off;
193
194 ptn_bridge = container_of(connector, struct ptn3460_bridge, connector);
195
196 if (ptn_bridge->edid)
197 return drm_add_edid_modes(connector, ptn_bridge->edid);
198
199 power_off = !ptn_bridge->enabled;
200 ptn3460_pre_enable(ptn_bridge->bridge);
201
202 edid = kmalloc(EDID_LENGTH, GFP_KERNEL);
203 if (!edid) {
204 DRM_ERROR("Failed to allocate edid\n");
205 return 0;
206 }
207
208 ret = ptn3460_read_bytes(ptn_bridge, PTN3460_EDID_ADDR, edid,
209 EDID_LENGTH);
210 if (ret) {
211 kfree(edid);
212 num_modes = 0;
213 goto out;
214 }
215
216 ptn_bridge->edid = (struct edid *)edid;
217 drm_mode_connector_update_edid_property(connector, ptn_bridge->edid);
218
219 num_modes = drm_add_edid_modes(connector, ptn_bridge->edid);
220
221out:
222 if (power_off)
223 ptn3460_disable(ptn_bridge->bridge);
224
225 return num_modes;
226}
227
228static int ptn3460_mode_valid(struct drm_connector *connector,
229 struct drm_display_mode *mode)
230{
231 return MODE_OK;
232}
233
234struct drm_encoder *ptn3460_best_encoder(struct drm_connector *connector)
235{
236 struct ptn3460_bridge *ptn_bridge;
237
238 ptn_bridge = container_of(connector, struct ptn3460_bridge, connector);
239
240 return ptn_bridge->encoder;
241}
242
243struct drm_connector_helper_funcs ptn3460_connector_helper_funcs = {
244 .get_modes = ptn3460_get_modes,
245 .mode_valid = ptn3460_mode_valid,
246 .best_encoder = ptn3460_best_encoder,
247};
248
249enum drm_connector_status ptn3460_detect(struct drm_connector *connector,
250 bool force)
251{
252 return connector_status_connected;
253}
254
255void ptn3460_connector_destroy(struct drm_connector *connector)
256{
257 drm_connector_cleanup(connector);
258}
259
260struct drm_connector_funcs ptn3460_connector_funcs = {
261 .dpms = drm_helper_connector_dpms,
262 .fill_modes = drm_helper_probe_single_connector_modes,
263 .detect = ptn3460_detect,
264 .destroy = ptn3460_connector_destroy,
265};
266
267int ptn3460_init(struct drm_device *dev, struct drm_encoder *encoder,
268 struct i2c_client *client, struct device_node *node)
269{
270 int ret;
271 struct drm_bridge *bridge;
272 struct ptn3460_bridge *ptn_bridge;
273
274 bridge = devm_kzalloc(dev->dev, sizeof(*bridge), GFP_KERNEL);
275 if (!bridge) {
276 DRM_ERROR("Failed to allocate drm bridge\n");
277 return -ENOMEM;
278 }
279
280 ptn_bridge = devm_kzalloc(dev->dev, sizeof(*ptn_bridge), GFP_KERNEL);
281 if (!ptn_bridge) {
282 DRM_ERROR("Failed to allocate ptn bridge\n");
283 return -ENOMEM;
284 }
285
286 ptn_bridge->client = client;
287 ptn_bridge->encoder = encoder;
288 ptn_bridge->bridge = bridge;
289 ptn_bridge->gpio_pd_n = of_get_named_gpio(node, "powerdown-gpio", 0);
290 if (gpio_is_valid(ptn_bridge->gpio_pd_n)) {
291 ret = gpio_request_one(ptn_bridge->gpio_pd_n,
292 GPIOF_OUT_INIT_HIGH, "PTN3460_PD_N");
293 if (ret) {
294 DRM_ERROR("Request powerdown-gpio failed (%d)\n", ret);
295 return ret;
296 }
297 }
298
299 ptn_bridge->gpio_rst_n = of_get_named_gpio(node, "reset-gpio", 0);
300 if (gpio_is_valid(ptn_bridge->gpio_rst_n)) {
301 /*
302 * Request the reset pin low to avoid the bridge being
303 * initialized prematurely
304 */
305 ret = gpio_request_one(ptn_bridge->gpio_rst_n,
306 GPIOF_OUT_INIT_LOW, "PTN3460_RST_N");
307 if (ret) {
308 DRM_ERROR("Request reset-gpio failed (%d)\n", ret);
309 gpio_free(ptn_bridge->gpio_pd_n);
310 return ret;
311 }
312 }
313
314 ret = of_property_read_u32(node, "edid-emulation",
315 &ptn_bridge->edid_emulation);
316 if (ret) {
317 DRM_ERROR("Can't read edid emulation value\n");
318 goto err;
319 }
320
321 ret = drm_bridge_init(dev, bridge, &ptn3460_bridge_funcs);
322 if (ret) {
323 DRM_ERROR("Failed to initialize bridge with drm\n");
324 goto err;
325 }
326
327 bridge->driver_private = ptn_bridge;
328 encoder->bridge = bridge;
329
330 ret = drm_connector_init(dev, &ptn_bridge->connector,
331 &ptn3460_connector_funcs, DRM_MODE_CONNECTOR_LVDS);
332 if (ret) {
333 DRM_ERROR("Failed to initialize connector with drm\n");
334 goto err;
335 }
336 drm_connector_helper_add(&ptn_bridge->connector,
337 &ptn3460_connector_helper_funcs);
338 drm_sysfs_connector_add(&ptn_bridge->connector);
339 drm_mode_connector_attach_encoder(&ptn_bridge->connector, encoder);
340
341 return 0;
342
343err:
344 if (gpio_is_valid(ptn_bridge->gpio_pd_n))
345 gpio_free(ptn_bridge->gpio_pd_n);
346 if (gpio_is_valid(ptn_bridge->gpio_rst_n))
347 gpio_free(ptn_bridge->gpio_rst_n);
348 return ret;
349}
350EXPORT_SYMBOL(ptn3460_init);
diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c
index 530f78f84dee..2d64aea83df2 100644
--- a/drivers/gpu/drm/cirrus/cirrus_mode.c
+++ b/drivers/gpu/drm/cirrus/cirrus_mode.c
@@ -149,7 +149,7 @@ static int cirrus_crtc_do_set_base(struct drm_crtc *crtc,
149 cirrus_bo_unreserve(bo); 149 cirrus_bo_unreserve(bo);
150 } 150 }
151 151
152 cirrus_fb = to_cirrus_framebuffer(crtc->fb); 152 cirrus_fb = to_cirrus_framebuffer(crtc->primary->fb);
153 obj = cirrus_fb->obj; 153 obj = cirrus_fb->obj;
154 bo = gem_to_cirrus_bo(obj); 154 bo = gem_to_cirrus_bo(obj);
155 155
@@ -268,7 +268,7 @@ static int cirrus_crtc_mode_set(struct drm_crtc *crtc,
268 sr07 = RREG8(SEQ_DATA); 268 sr07 = RREG8(SEQ_DATA);
269 sr07 &= 0xe0; 269 sr07 &= 0xe0;
270 hdr = 0; 270 hdr = 0;
271 switch (crtc->fb->bits_per_pixel) { 271 switch (crtc->primary->fb->bits_per_pixel) {
272 case 8: 272 case 8:
273 sr07 |= 0x11; 273 sr07 |= 0x11;
274 break; 274 break;
@@ -291,13 +291,13 @@ static int cirrus_crtc_mode_set(struct drm_crtc *crtc,
291 WREG_SEQ(0x7, sr07); 291 WREG_SEQ(0x7, sr07);
292 292
293 /* Program the pitch */ 293 /* Program the pitch */
294 tmp = crtc->fb->pitches[0] / 8; 294 tmp = crtc->primary->fb->pitches[0] / 8;
295 WREG_CRT(VGA_CRTC_OFFSET, tmp); 295 WREG_CRT(VGA_CRTC_OFFSET, tmp);
296 296
297 /* Enable extended blanking and pitch bits, and enable full memory */ 297 /* Enable extended blanking and pitch bits, and enable full memory */
298 tmp = 0x22; 298 tmp = 0x22;
299 tmp |= (crtc->fb->pitches[0] >> 7) & 0x10; 299 tmp |= (crtc->primary->fb->pitches[0] >> 7) & 0x10;
300 tmp |= (crtc->fb->pitches[0] >> 6) & 0x40; 300 tmp |= (crtc->primary->fb->pitches[0] >> 6) & 0x40;
301 WREG_CRT(0x1b, tmp); 301 WREG_CRT(0x1b, tmp);
302 302
303 /* Enable high-colour modes */ 303 /* Enable high-colour modes */
diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c
index 8b37c25ff9bd..92e6b7786097 100644
--- a/drivers/gpu/drm/cirrus/cirrus_ttm.c
+++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c
@@ -259,7 +259,9 @@ int cirrus_mm_init(struct cirrus_device *cirrus)
259 259
260 ret = ttm_bo_device_init(&cirrus->ttm.bdev, 260 ret = ttm_bo_device_init(&cirrus->ttm.bdev,
261 cirrus->ttm.bo_global_ref.ref.object, 261 cirrus->ttm.bo_global_ref.ref.object,
262 &cirrus_bo_driver, DRM_FILE_PAGE_OFFSET, 262 &cirrus_bo_driver,
263 dev->anon_inode->i_mapping,
264 DRM_FILE_PAGE_OFFSET,
263 true); 265 true);
264 if (ret) { 266 if (ret) {
265 DRM_ERROR("Error initialising bo driver; %d\n", ret); 267 DRM_ERROR("Error initialising bo driver; %d\n", ret);
@@ -329,7 +331,6 @@ int cirrus_bo_create(struct drm_device *dev, int size, int align,
329 } 331 }
330 332
331 cirrusbo->bo.bdev = &cirrus->ttm.bdev; 333 cirrusbo->bo.bdev = &cirrus->ttm.bdev;
332 cirrusbo->bo.bdev->dev_mapping = dev->dev_mapping;
333 334
334 cirrus_ttm_placement(cirrusbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM); 335 cirrus_ttm_placement(cirrusbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
335 336
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 3b7d32da1604..d8b7099abece 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -38,12 +38,15 @@
38#include <drm/drm_edid.h> 38#include <drm/drm_edid.h>
39#include <drm/drm_fourcc.h> 39#include <drm/drm_fourcc.h>
40 40
41#include "drm_crtc_internal.h"
42
41/** 43/**
42 * drm_modeset_lock_all - take all modeset locks 44 * drm_modeset_lock_all - take all modeset locks
43 * @dev: drm device 45 * @dev: drm device
44 * 46 *
45 * This function takes all modeset locks, suitable where a more fine-grained 47 * This function takes all modeset locks, suitable where a more fine-grained
46 * scheme isn't (yet) implemented. 48 * scheme isn't (yet) implemented. Locks must be dropped with
49 * drm_modeset_unlock_all.
47 */ 50 */
48void drm_modeset_lock_all(struct drm_device *dev) 51void drm_modeset_lock_all(struct drm_device *dev)
49{ 52{
@@ -59,6 +62,8 @@ EXPORT_SYMBOL(drm_modeset_lock_all);
59/** 62/**
60 * drm_modeset_unlock_all - drop all modeset locks 63 * drm_modeset_unlock_all - drop all modeset locks
61 * @dev: device 64 * @dev: device
65 *
66 * This function drop all modeset locks taken by drm_modeset_lock_all.
62 */ 67 */
63void drm_modeset_unlock_all(struct drm_device *dev) 68void drm_modeset_unlock_all(struct drm_device *dev)
64{ 69{
@@ -74,6 +79,8 @@ EXPORT_SYMBOL(drm_modeset_unlock_all);
74/** 79/**
75 * drm_warn_on_modeset_not_all_locked - check that all modeset locks are locked 80 * drm_warn_on_modeset_not_all_locked - check that all modeset locks are locked
76 * @dev: device 81 * @dev: device
82 *
83 * Useful as a debug assert.
77 */ 84 */
78void drm_warn_on_modeset_not_all_locked(struct drm_device *dev) 85void drm_warn_on_modeset_not_all_locked(struct drm_device *dev)
79{ 86{
@@ -114,6 +121,13 @@ static const struct drm_prop_enum_list drm_dpms_enum_list[] =
114 121
115DRM_ENUM_NAME_FN(drm_get_dpms_name, drm_dpms_enum_list) 122DRM_ENUM_NAME_FN(drm_get_dpms_name, drm_dpms_enum_list)
116 123
124static const struct drm_prop_enum_list drm_plane_type_enum_list[] =
125{
126 { DRM_PLANE_TYPE_OVERLAY, "Overlay" },
127 { DRM_PLANE_TYPE_PRIMARY, "Primary" },
128 { DRM_PLANE_TYPE_CURSOR, "Cursor" },
129};
130
117/* 131/*
118 * Optional properties 132 * Optional properties
119 */ 133 */
@@ -215,6 +229,16 @@ static const struct drm_prop_enum_list drm_encoder_enum_list[] =
215 { DRM_MODE_ENCODER_DSI, "DSI" }, 229 { DRM_MODE_ENCODER_DSI, "DSI" },
216}; 230};
217 231
232static const struct drm_prop_enum_list drm_subpixel_enum_list[] =
233{
234 { SubPixelUnknown, "Unknown" },
235 { SubPixelHorizontalRGB, "Horizontal RGB" },
236 { SubPixelHorizontalBGR, "Horizontal BGR" },
237 { SubPixelVerticalRGB, "Vertical RGB" },
238 { SubPixelVerticalBGR, "Vertical BGR" },
239 { SubPixelNone, "None" },
240};
241
218void drm_connector_ida_init(void) 242void drm_connector_ida_init(void)
219{ 243{
220 int i; 244 int i;
@@ -231,6 +255,15 @@ void drm_connector_ida_destroy(void)
231 ida_destroy(&drm_connector_enum_list[i].ida); 255 ida_destroy(&drm_connector_enum_list[i].ida);
232} 256}
233 257
258/**
259 * drm_get_encoder_name - return a string for encoder
260 * @encoder: encoder to compute name of
261 *
262 * Note that the buffer used by this function is globally shared and owned by
263 * the function itself.
264 *
265 * FIXME: This isn't really multithreading safe.
266 */
234const char *drm_get_encoder_name(const struct drm_encoder *encoder) 267const char *drm_get_encoder_name(const struct drm_encoder *encoder)
235{ 268{
236 static char buf[32]; 269 static char buf[32];
@@ -242,6 +275,15 @@ const char *drm_get_encoder_name(const struct drm_encoder *encoder)
242} 275}
243EXPORT_SYMBOL(drm_get_encoder_name); 276EXPORT_SYMBOL(drm_get_encoder_name);
244 277
278/**
279 * drm_get_connector_name - return a string for connector
280 * @connector: connector to compute name of
281 *
282 * Note that the buffer used by this function is globally shared and owned by
283 * the function itself.
284 *
285 * FIXME: This isn't really multithreading safe.
286 */
245const char *drm_get_connector_name(const struct drm_connector *connector) 287const char *drm_get_connector_name(const struct drm_connector *connector)
246{ 288{
247 static char buf[32]; 289 static char buf[32];
@@ -253,6 +295,13 @@ const char *drm_get_connector_name(const struct drm_connector *connector)
253} 295}
254EXPORT_SYMBOL(drm_get_connector_name); 296EXPORT_SYMBOL(drm_get_connector_name);
255 297
298/**
299 * drm_get_connector_status_name - return a string for connector status
300 * @status: connector status to compute name of
301 *
302 * In contrast to the other drm_get_*_name functions this one here returns a
303 * const pointer and hence is threadsafe.
304 */
256const char *drm_get_connector_status_name(enum drm_connector_status status) 305const char *drm_get_connector_status_name(enum drm_connector_status status)
257{ 306{
258 if (status == connector_status_connected) 307 if (status == connector_status_connected)
@@ -264,11 +313,33 @@ const char *drm_get_connector_status_name(enum drm_connector_status status)
264} 313}
265EXPORT_SYMBOL(drm_get_connector_status_name); 314EXPORT_SYMBOL(drm_get_connector_status_name);
266 315
316/**
317 * drm_get_subpixel_order_name - return a string for a given subpixel enum
318 * @order: enum of subpixel_order
319 *
320 * Note you could abuse this and return something out of bounds, but that
321 * would be a caller error. No unscrubbed user data should make it here.
322 */
323const char *drm_get_subpixel_order_name(enum subpixel_order order)
324{
325 return drm_subpixel_enum_list[order].name;
326}
327EXPORT_SYMBOL(drm_get_subpixel_order_name);
328
267static char printable_char(int c) 329static char printable_char(int c)
268{ 330{
269 return isascii(c) && isprint(c) ? c : '?'; 331 return isascii(c) && isprint(c) ? c : '?';
270} 332}
271 333
334/**
335 * drm_get_format_name - return a string for drm fourcc format
336 * @format: format to compute name of
337 *
338 * Note that the buffer used by this function is globally shared and owned by
339 * the function itself.
340 *
341 * FIXME: This isn't really multithreading safe.
342 */
272const char *drm_get_format_name(uint32_t format) 343const char *drm_get_format_name(uint32_t format)
273{ 344{
274 static char buf[32]; 345 static char buf[32];
@@ -293,14 +364,16 @@ EXPORT_SYMBOL(drm_get_format_name);
293 * @obj_type: object type 364 * @obj_type: object type
294 * 365 *
295 * Create a unique identifier based on @ptr in @dev's identifier space. Used 366 * Create a unique identifier based on @ptr in @dev's identifier space. Used
296 * for tracking modes, CRTCs and connectors. 367 * for tracking modes, CRTCs and connectors. Note that despite the _get postfix
368 * modeset identifiers are _not_ reference counted. Hence don't use this for
369 * reference counted modeset objects like framebuffers.
297 * 370 *
298 * RETURNS: 371 * Returns:
299 * New unique (relative to other objects in @dev) integer identifier for the 372 * New unique (relative to other objects in @dev) integer identifier for the
300 * object. 373 * object.
301 */ 374 */
302static int drm_mode_object_get(struct drm_device *dev, 375int drm_mode_object_get(struct drm_device *dev,
303 struct drm_mode_object *obj, uint32_t obj_type) 376 struct drm_mode_object *obj, uint32_t obj_type)
304{ 377{
305 int ret; 378 int ret;
306 379
@@ -324,10 +397,12 @@ static int drm_mode_object_get(struct drm_device *dev,
324 * @dev: DRM device 397 * @dev: DRM device
325 * @object: object to free 398 * @object: object to free
326 * 399 *
327 * Free @id from @dev's unique identifier pool. 400 * Free @id from @dev's unique identifier pool. Note that despite the _get
401 * postfix modeset identifiers are _not_ reference counted. Hence don't use this
402 * for reference counted modeset objects like framebuffers.
328 */ 403 */
329static void drm_mode_object_put(struct drm_device *dev, 404void drm_mode_object_put(struct drm_device *dev,
330 struct drm_mode_object *object) 405 struct drm_mode_object *object)
331{ 406{
332 mutex_lock(&dev->mode_config.idr_mutex); 407 mutex_lock(&dev->mode_config.idr_mutex);
333 idr_remove(&dev->mode_config.crtc_idr, object->id); 408 idr_remove(&dev->mode_config.crtc_idr, object->id);
@@ -377,7 +452,7 @@ EXPORT_SYMBOL(drm_mode_object_find);
377 * since all the fb attributes are invariant over its lifetime, no further 452 * since all the fb attributes are invariant over its lifetime, no further
378 * locking but only correct reference counting is required. 453 * locking but only correct reference counting is required.
379 * 454 *
380 * RETURNS: 455 * Returns:
381 * Zero on success, error code on failure. 456 * Zero on success, error code on failure.
382 */ 457 */
383int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb, 458int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb,
@@ -438,7 +513,7 @@ static struct drm_framebuffer *__drm_framebuffer_lookup(struct drm_device *dev,
438 * 513 *
439 * If successful, this grabs an additional reference to the framebuffer - 514 * If successful, this grabs an additional reference to the framebuffer -
440 * callers need to make sure to eventually unreference the returned framebuffer 515 * callers need to make sure to eventually unreference the returned framebuffer
441 * again. 516 * again, using @drm_framebuffer_unreference.
442 */ 517 */
443struct drm_framebuffer *drm_framebuffer_lookup(struct drm_device *dev, 518struct drm_framebuffer *drm_framebuffer_lookup(struct drm_device *dev,
444 uint32_t id) 519 uint32_t id)
@@ -471,6 +546,8 @@ EXPORT_SYMBOL(drm_framebuffer_unreference);
471/** 546/**
472 * drm_framebuffer_reference - incr the fb refcnt 547 * drm_framebuffer_reference - incr the fb refcnt
473 * @fb: framebuffer 548 * @fb: framebuffer
549 *
550 * This functions increments the fb's refcount.
474 */ 551 */
475void drm_framebuffer_reference(struct drm_framebuffer *fb) 552void drm_framebuffer_reference(struct drm_framebuffer *fb)
476{ 553{
@@ -527,8 +604,9 @@ EXPORT_SYMBOL(drm_framebuffer_unregister_private);
527 * drm_framebuffer_cleanup - remove a framebuffer object 604 * drm_framebuffer_cleanup - remove a framebuffer object
528 * @fb: framebuffer to remove 605 * @fb: framebuffer to remove
529 * 606 *
530 * Cleanup references to a user-created framebuffer. This function is intended 607 * Cleanup framebuffer. This function is intended to be used from the drivers
531 * to be used from the drivers ->destroy callback. 608 * ->destroy callback. It can also be used to clean up driver private
609 * framebuffers embedded into a larger structure.
532 * 610 *
533 * Note that this function does not remove the fb from active usuage - if it is 611 * Note that this function does not remove the fb from active usuage - if it is
534 * still used anywhere, hilarity can ensue since userspace could call getfb on 612 * still used anywhere, hilarity can ensue since userspace could call getfb on
@@ -591,7 +669,7 @@ void drm_framebuffer_remove(struct drm_framebuffer *fb)
591 drm_modeset_lock_all(dev); 669 drm_modeset_lock_all(dev);
592 /* remove from any CRTC */ 670 /* remove from any CRTC */
593 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 671 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
594 if (crtc->fb == fb) { 672 if (crtc->primary->fb == fb) {
595 /* should turn off the crtc */ 673 /* should turn off the crtc */
596 memset(&set, 0, sizeof(struct drm_mode_set)); 674 memset(&set, 0, sizeof(struct drm_mode_set));
597 set.crtc = crtc; 675 set.crtc = crtc;
@@ -614,18 +692,23 @@ void drm_framebuffer_remove(struct drm_framebuffer *fb)
614EXPORT_SYMBOL(drm_framebuffer_remove); 692EXPORT_SYMBOL(drm_framebuffer_remove);
615 693
616/** 694/**
617 * drm_crtc_init - Initialise a new CRTC object 695 * drm_crtc_init_with_planes - Initialise a new CRTC object with
696 * specified primary and cursor planes.
618 * @dev: DRM device 697 * @dev: DRM device
619 * @crtc: CRTC object to init 698 * @crtc: CRTC object to init
699 * @primary: Primary plane for CRTC
700 * @cursor: Cursor plane for CRTC
620 * @funcs: callbacks for the new CRTC 701 * @funcs: callbacks for the new CRTC
621 * 702 *
622 * Inits a new object created as base part of a driver crtc object. 703 * Inits a new object created as base part of a driver crtc object.
623 * 704 *
624 * RETURNS: 705 * Returns:
625 * Zero on success, error code on failure. 706 * Zero on success, error code on failure.
626 */ 707 */
627int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc, 708int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
628 const struct drm_crtc_funcs *funcs) 709 struct drm_plane *primary,
710 void *cursor,
711 const struct drm_crtc_funcs *funcs)
629{ 712{
630 int ret; 713 int ret;
631 714
@@ -646,12 +729,16 @@ int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
646 list_add_tail(&crtc->head, &dev->mode_config.crtc_list); 729 list_add_tail(&crtc->head, &dev->mode_config.crtc_list);
647 dev->mode_config.num_crtc++; 730 dev->mode_config.num_crtc++;
648 731
732 crtc->primary = primary;
733 if (primary)
734 primary->possible_crtcs = 1 << drm_crtc_index(crtc);
735
649 out: 736 out:
650 drm_modeset_unlock_all(dev); 737 drm_modeset_unlock_all(dev);
651 738
652 return ret; 739 return ret;
653} 740}
654EXPORT_SYMBOL(drm_crtc_init); 741EXPORT_SYMBOL(drm_crtc_init_with_planes);
655 742
656/** 743/**
657 * drm_crtc_cleanup - Clean up the core crtc usage 744 * drm_crtc_cleanup - Clean up the core crtc usage
@@ -697,20 +784,6 @@ unsigned int drm_crtc_index(struct drm_crtc *crtc)
697} 784}
698EXPORT_SYMBOL(drm_crtc_index); 785EXPORT_SYMBOL(drm_crtc_index);
699 786
700/**
701 * drm_mode_probed_add - add a mode to a connector's probed mode list
702 * @connector: connector the new mode
703 * @mode: mode data
704 *
705 * Add @mode to @connector's mode list for later use.
706 */
707void drm_mode_probed_add(struct drm_connector *connector,
708 struct drm_display_mode *mode)
709{
710 list_add_tail(&mode->head, &connector->probed_modes);
711}
712EXPORT_SYMBOL(drm_mode_probed_add);
713
714/* 787/*
715 * drm_mode_remove - remove and free a mode 788 * drm_mode_remove - remove and free a mode
716 * @connector: connector list to modify 789 * @connector: connector list to modify
@@ -735,7 +808,7 @@ static void drm_mode_remove(struct drm_connector *connector,
735 * Initialises a preallocated connector. Connectors should be 808 * Initialises a preallocated connector. Connectors should be
736 * subclassed as part of driver connector objects. 809 * subclassed as part of driver connector objects.
737 * 810 *
738 * RETURNS: 811 * Returns:
739 * Zero on success, error code on failure. 812 * Zero on success, error code on failure.
740 */ 813 */
741int drm_connector_init(struct drm_device *dev, 814int drm_connector_init(struct drm_device *dev,
@@ -813,6 +886,14 @@ void drm_connector_cleanup(struct drm_connector *connector)
813} 886}
814EXPORT_SYMBOL(drm_connector_cleanup); 887EXPORT_SYMBOL(drm_connector_cleanup);
815 888
889/**
890 * drm_connector_unplug_all - unregister connector userspace interfaces
891 * @dev: drm device
892 *
893 * This function unregisters all connector userspace interfaces in sysfs. Should
894 * be call when the device is disconnected, e.g. from an usb driver's
895 * ->disconnect callback.
896 */
816void drm_connector_unplug_all(struct drm_device *dev) 897void drm_connector_unplug_all(struct drm_device *dev)
817{ 898{
818 struct drm_connector *connector; 899 struct drm_connector *connector;
@@ -824,6 +905,18 @@ void drm_connector_unplug_all(struct drm_device *dev)
824} 905}
825EXPORT_SYMBOL(drm_connector_unplug_all); 906EXPORT_SYMBOL(drm_connector_unplug_all);
826 907
908/**
909 * drm_bridge_init - initialize a drm transcoder/bridge
910 * @dev: drm device
911 * @bridge: transcoder/bridge to set up
912 * @funcs: bridge function table
913 *
914 * Initialises a preallocated bridge. Bridges should be
915 * subclassed as part of driver connector objects.
916 *
917 * Returns:
918 * Zero on success, error code on failure.
919 */
827int drm_bridge_init(struct drm_device *dev, struct drm_bridge *bridge, 920int drm_bridge_init(struct drm_device *dev, struct drm_bridge *bridge,
828 const struct drm_bridge_funcs *funcs) 921 const struct drm_bridge_funcs *funcs)
829{ 922{
@@ -847,6 +940,12 @@ int drm_bridge_init(struct drm_device *dev, struct drm_bridge *bridge,
847} 940}
848EXPORT_SYMBOL(drm_bridge_init); 941EXPORT_SYMBOL(drm_bridge_init);
849 942
943/**
944 * drm_bridge_cleanup - cleans up an initialised bridge
945 * @bridge: bridge to cleanup
946 *
947 * Cleans up the bridge but doesn't free the object.
948 */
850void drm_bridge_cleanup(struct drm_bridge *bridge) 949void drm_bridge_cleanup(struct drm_bridge *bridge)
851{ 950{
852 struct drm_device *dev = bridge->dev; 951 struct drm_device *dev = bridge->dev;
@@ -859,6 +958,19 @@ void drm_bridge_cleanup(struct drm_bridge *bridge)
859} 958}
860EXPORT_SYMBOL(drm_bridge_cleanup); 959EXPORT_SYMBOL(drm_bridge_cleanup);
861 960
961/**
962 * drm_encoder_init - Init a preallocated encoder
963 * @dev: drm device
964 * @encoder: the encoder to init
965 * @funcs: callbacks for this encoder
966 * @encoder_type: user visible type of the encoder
967 *
968 * Initialises a preallocated encoder. Encoder should be
969 * subclassed as part of driver encoder objects.
970 *
971 * Returns:
972 * Zero on success, error code on failure.
973 */
862int drm_encoder_init(struct drm_device *dev, 974int drm_encoder_init(struct drm_device *dev,
863 struct drm_encoder *encoder, 975 struct drm_encoder *encoder,
864 const struct drm_encoder_funcs *funcs, 976 const struct drm_encoder_funcs *funcs,
@@ -886,6 +998,12 @@ int drm_encoder_init(struct drm_device *dev,
886} 998}
887EXPORT_SYMBOL(drm_encoder_init); 999EXPORT_SYMBOL(drm_encoder_init);
888 1000
1001/**
1002 * drm_encoder_cleanup - cleans up an initialised encoder
1003 * @encoder: encoder to cleanup
1004 *
1005 * Cleans up the encoder but doesn't free the object.
1006 */
889void drm_encoder_cleanup(struct drm_encoder *encoder) 1007void drm_encoder_cleanup(struct drm_encoder *encoder)
890{ 1008{
891 struct drm_device *dev = encoder->dev; 1009 struct drm_device *dev = encoder->dev;
@@ -898,25 +1016,25 @@ void drm_encoder_cleanup(struct drm_encoder *encoder)
898EXPORT_SYMBOL(drm_encoder_cleanup); 1016EXPORT_SYMBOL(drm_encoder_cleanup);
899 1017
900/** 1018/**
901 * drm_plane_init - Initialise a new plane object 1019 * drm_universal_plane_init - Initialize a new universal plane object
902 * @dev: DRM device 1020 * @dev: DRM device
903 * @plane: plane object to init 1021 * @plane: plane object to init
904 * @possible_crtcs: bitmask of possible CRTCs 1022 * @possible_crtcs: bitmask of possible CRTCs
905 * @funcs: callbacks for the new plane 1023 * @funcs: callbacks for the new plane
906 * @formats: array of supported formats (%DRM_FORMAT_*) 1024 * @formats: array of supported formats (%DRM_FORMAT_*)
907 * @format_count: number of elements in @formats 1025 * @format_count: number of elements in @formats
908 * @priv: plane is private (hidden from userspace)? 1026 * @type: type of plane (overlay, primary, cursor)
909 * 1027 *
910 * Inits a new object created as base part of a driver plane object. 1028 * Initializes a plane object of type @type.
911 * 1029 *
912 * RETURNS: 1030 * Returns:
913 * Zero on success, error code on failure. 1031 * Zero on success, error code on failure.
914 */ 1032 */
915int drm_plane_init(struct drm_device *dev, struct drm_plane *plane, 1033int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
916 unsigned long possible_crtcs, 1034 unsigned long possible_crtcs,
917 const struct drm_plane_funcs *funcs, 1035 const struct drm_plane_funcs *funcs,
918 const uint32_t *formats, uint32_t format_count, 1036 const uint32_t *formats, uint32_t format_count,
919 bool priv) 1037 enum drm_plane_type type)
920{ 1038{
921 int ret; 1039 int ret;
922 1040
@@ -941,23 +1059,53 @@ int drm_plane_init(struct drm_device *dev, struct drm_plane *plane,
941 memcpy(plane->format_types, formats, format_count * sizeof(uint32_t)); 1059 memcpy(plane->format_types, formats, format_count * sizeof(uint32_t));
942 plane->format_count = format_count; 1060 plane->format_count = format_count;
943 plane->possible_crtcs = possible_crtcs; 1061 plane->possible_crtcs = possible_crtcs;
1062 plane->type = type;
944 1063
945 /* private planes are not exposed to userspace, but depending on 1064 list_add_tail(&plane->head, &dev->mode_config.plane_list);
946 * display hardware, might be convenient to allow sharing programming 1065 dev->mode_config.num_total_plane++;
947 * for the scanout engine with the crtc implementation. 1066 if (plane->type == DRM_PLANE_TYPE_OVERLAY)
948 */ 1067 dev->mode_config.num_overlay_plane++;
949 if (!priv) { 1068
950 list_add_tail(&plane->head, &dev->mode_config.plane_list); 1069 drm_object_attach_property(&plane->base,
951 dev->mode_config.num_plane++; 1070 dev->mode_config.plane_type_property,
952 } else { 1071 plane->type);
953 INIT_LIST_HEAD(&plane->head);
954 }
955 1072
956 out: 1073 out:
957 drm_modeset_unlock_all(dev); 1074 drm_modeset_unlock_all(dev);
958 1075
959 return ret; 1076 return ret;
960} 1077}
1078EXPORT_SYMBOL(drm_universal_plane_init);
1079
1080/**
1081 * drm_plane_init - Initialize a legacy plane
1082 * @dev: DRM device
1083 * @plane: plane object to init
1084 * @possible_crtcs: bitmask of possible CRTCs
1085 * @funcs: callbacks for the new plane
1086 * @formats: array of supported formats (%DRM_FORMAT_*)
1087 * @format_count: number of elements in @formats
1088 * @is_primary: plane type (primary vs overlay)
1089 *
1090 * Legacy API to initialize a DRM plane.
1091 *
1092 * New drivers should call drm_universal_plane_init() instead.
1093 *
1094 * Returns:
1095 * Zero on success, error code on failure.
1096 */
1097int drm_plane_init(struct drm_device *dev, struct drm_plane *plane,
1098 unsigned long possible_crtcs,
1099 const struct drm_plane_funcs *funcs,
1100 const uint32_t *formats, uint32_t format_count,
1101 bool is_primary)
1102{
1103 enum drm_plane_type type;
1104
1105 type = is_primary ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY;
1106 return drm_universal_plane_init(dev, plane, possible_crtcs, funcs,
1107 formats, format_count, type);
1108}
961EXPORT_SYMBOL(drm_plane_init); 1109EXPORT_SYMBOL(drm_plane_init);
962 1110
963/** 1111/**
@@ -975,11 +1123,13 @@ void drm_plane_cleanup(struct drm_plane *plane)
975 drm_modeset_lock_all(dev); 1123 drm_modeset_lock_all(dev);
976 kfree(plane->format_types); 1124 kfree(plane->format_types);
977 drm_mode_object_put(dev, &plane->base); 1125 drm_mode_object_put(dev, &plane->base);
978 /* if not added to a list, it must be a private plane */ 1126
979 if (!list_empty(&plane->head)) { 1127 BUG_ON(list_empty(&plane->head));
980 list_del(&plane->head); 1128
981 dev->mode_config.num_plane--; 1129 list_del(&plane->head);
982 } 1130 dev->mode_config.num_total_plane--;
1131 if (plane->type == DRM_PLANE_TYPE_OVERLAY)
1132 dev->mode_config.num_overlay_plane--;
983 drm_modeset_unlock_all(dev); 1133 drm_modeset_unlock_all(dev);
984} 1134}
985EXPORT_SYMBOL(drm_plane_cleanup); 1135EXPORT_SYMBOL(drm_plane_cleanup);
@@ -1010,50 +1160,6 @@ void drm_plane_force_disable(struct drm_plane *plane)
1010} 1160}
1011EXPORT_SYMBOL(drm_plane_force_disable); 1161EXPORT_SYMBOL(drm_plane_force_disable);
1012 1162
1013/**
1014 * drm_mode_create - create a new display mode
1015 * @dev: DRM device
1016 *
1017 * Create a new drm_display_mode, give it an ID, and return it.
1018 *
1019 * RETURNS:
1020 * Pointer to new mode on success, NULL on error.
1021 */
1022struct drm_display_mode *drm_mode_create(struct drm_device *dev)
1023{
1024 struct drm_display_mode *nmode;
1025
1026 nmode = kzalloc(sizeof(struct drm_display_mode), GFP_KERNEL);
1027 if (!nmode)
1028 return NULL;
1029
1030 if (drm_mode_object_get(dev, &nmode->base, DRM_MODE_OBJECT_MODE)) {
1031 kfree(nmode);
1032 return NULL;
1033 }
1034
1035 return nmode;
1036}
1037EXPORT_SYMBOL(drm_mode_create);
1038
1039/**
1040 * drm_mode_destroy - remove a mode
1041 * @dev: DRM device
1042 * @mode: mode to remove
1043 *
1044 * Free @mode's unique identifier, then free it.
1045 */
1046void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode)
1047{
1048 if (!mode)
1049 return;
1050
1051 drm_mode_object_put(dev, &mode->base);
1052
1053 kfree(mode);
1054}
1055EXPORT_SYMBOL(drm_mode_destroy);
1056
1057static int drm_mode_create_standard_connector_properties(struct drm_device *dev) 1163static int drm_mode_create_standard_connector_properties(struct drm_device *dev)
1058{ 1164{
1059 struct drm_property *edid; 1165 struct drm_property *edid;
@@ -1075,6 +1181,21 @@ static int drm_mode_create_standard_connector_properties(struct drm_device *dev)
1075 return 0; 1181 return 0;
1076} 1182}
1077 1183
1184static int drm_mode_create_standard_plane_properties(struct drm_device *dev)
1185{
1186 struct drm_property *type;
1187
1188 /*
1189 * Standard properties (apply to all planes)
1190 */
1191 type = drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE,
1192 "type", drm_plane_type_enum_list,
1193 ARRAY_SIZE(drm_plane_type_enum_list));
1194 dev->mode_config.plane_type_property = type;
1195
1196 return 0;
1197}
1198
1078/** 1199/**
1079 * drm_mode_create_dvi_i_properties - create DVI-I specific connector properties 1200 * drm_mode_create_dvi_i_properties - create DVI-I specific connector properties
1080 * @dev: DRM device 1201 * @dev: DRM device
@@ -1257,6 +1378,10 @@ static int drm_mode_group_init(struct drm_device *dev, struct drm_mode_group *gr
1257 return 0; 1378 return 0;
1258} 1379}
1259 1380
1381/*
1382 * NOTE: Driver's shouldn't ever call drm_mode_group_init_legacy_group - it is
1383 * the drm core's responsibility to set up mode control groups.
1384 */
1260int drm_mode_group_init_legacy_group(struct drm_device *dev, 1385int drm_mode_group_init_legacy_group(struct drm_device *dev,
1261 struct drm_mode_group *group) 1386 struct drm_mode_group *group)
1262{ 1387{
@@ -1333,7 +1458,7 @@ static void drm_crtc_convert_to_umode(struct drm_mode_modeinfo *out,
1333 * Convert a drm_mode_modeinfo into a drm_display_mode structure to return to 1458 * Convert a drm_mode_modeinfo into a drm_display_mode structure to return to
1334 * the caller. 1459 * the caller.
1335 * 1460 *
1336 * RETURNS: 1461 * Returns:
1337 * Zero on success, errno on failure. 1462 * Zero on success, errno on failure.
1338 */ 1463 */
1339static int drm_crtc_convert_umode(struct drm_display_mode *out, 1464static int drm_crtc_convert_umode(struct drm_display_mode *out,
@@ -1376,7 +1501,7 @@ static int drm_crtc_convert_umode(struct drm_display_mode *out,
1376 * 1501 *
1377 * Called by the user via ioctl. 1502 * Called by the user via ioctl.
1378 * 1503 *
1379 * RETURNS: 1504 * Returns:
1380 * Zero on success, errno on failure. 1505 * Zero on success, errno on failure.
1381 */ 1506 */
1382int drm_mode_getresources(struct drm_device *dev, void *data, 1507int drm_mode_getresources(struct drm_device *dev, void *data,
@@ -1429,9 +1554,9 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
1429 mutex_unlock(&file_priv->fbs_lock); 1554 mutex_unlock(&file_priv->fbs_lock);
1430 1555
1431 drm_modeset_lock_all(dev); 1556 drm_modeset_lock_all(dev);
1432 mode_group = &file_priv->master->minor->mode_group; 1557 if (!drm_is_primary_client(file_priv)) {
1433 if (file_priv->master->minor->type == DRM_MINOR_CONTROL) {
1434 1558
1559 mode_group = NULL;
1435 list_for_each(lh, &dev->mode_config.crtc_list) 1560 list_for_each(lh, &dev->mode_config.crtc_list)
1436 crtc_count++; 1561 crtc_count++;
1437 1562
@@ -1442,6 +1567,7 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
1442 encoder_count++; 1567 encoder_count++;
1443 } else { 1568 } else {
1444 1569
1570 mode_group = &file_priv->master->minor->mode_group;
1445 crtc_count = mode_group->num_crtcs; 1571 crtc_count = mode_group->num_crtcs;
1446 connector_count = mode_group->num_connectors; 1572 connector_count = mode_group->num_connectors;
1447 encoder_count = mode_group->num_encoders; 1573 encoder_count = mode_group->num_encoders;
@@ -1456,7 +1582,7 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
1456 if (card_res->count_crtcs >= crtc_count) { 1582 if (card_res->count_crtcs >= crtc_count) {
1457 copied = 0; 1583 copied = 0;
1458 crtc_id = (uint32_t __user *)(unsigned long)card_res->crtc_id_ptr; 1584 crtc_id = (uint32_t __user *)(unsigned long)card_res->crtc_id_ptr;
1459 if (file_priv->master->minor->type == DRM_MINOR_CONTROL) { 1585 if (!mode_group) {
1460 list_for_each_entry(crtc, &dev->mode_config.crtc_list, 1586 list_for_each_entry(crtc, &dev->mode_config.crtc_list,
1461 head) { 1587 head) {
1462 DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id); 1588 DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
@@ -1483,7 +1609,7 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
1483 if (card_res->count_encoders >= encoder_count) { 1609 if (card_res->count_encoders >= encoder_count) {
1484 copied = 0; 1610 copied = 0;
1485 encoder_id = (uint32_t __user *)(unsigned long)card_res->encoder_id_ptr; 1611 encoder_id = (uint32_t __user *)(unsigned long)card_res->encoder_id_ptr;
1486 if (file_priv->master->minor->type == DRM_MINOR_CONTROL) { 1612 if (!mode_group) {
1487 list_for_each_entry(encoder, 1613 list_for_each_entry(encoder,
1488 &dev->mode_config.encoder_list, 1614 &dev->mode_config.encoder_list,
1489 head) { 1615 head) {
@@ -1514,7 +1640,7 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
1514 if (card_res->count_connectors >= connector_count) { 1640 if (card_res->count_connectors >= connector_count) {
1515 copied = 0; 1641 copied = 0;
1516 connector_id = (uint32_t __user *)(unsigned long)card_res->connector_id_ptr; 1642 connector_id = (uint32_t __user *)(unsigned long)card_res->connector_id_ptr;
1517 if (file_priv->master->minor->type == DRM_MINOR_CONTROL) { 1643 if (!mode_group) {
1518 list_for_each_entry(connector, 1644 list_for_each_entry(connector,
1519 &dev->mode_config.connector_list, 1645 &dev->mode_config.connector_list,
1520 head) { 1646 head) {
@@ -1561,7 +1687,7 @@ out:
1561 * 1687 *
1562 * Called by the user via ioctl. 1688 * Called by the user via ioctl.
1563 * 1689 *
1564 * RETURNS: 1690 * Returns:
1565 * Zero on success, errno on failure. 1691 * Zero on success, errno on failure.
1566 */ 1692 */
1567int drm_mode_getcrtc(struct drm_device *dev, 1693int drm_mode_getcrtc(struct drm_device *dev,
@@ -1588,8 +1714,8 @@ int drm_mode_getcrtc(struct drm_device *dev,
1588 crtc_resp->x = crtc->x; 1714 crtc_resp->x = crtc->x;
1589 crtc_resp->y = crtc->y; 1715 crtc_resp->y = crtc->y;
1590 crtc_resp->gamma_size = crtc->gamma_size; 1716 crtc_resp->gamma_size = crtc->gamma_size;
1591 if (crtc->fb) 1717 if (crtc->primary->fb)
1592 crtc_resp->fb_id = crtc->fb->base.id; 1718 crtc_resp->fb_id = crtc->primary->fb->base.id;
1593 else 1719 else
1594 crtc_resp->fb_id = 0; 1720 crtc_resp->fb_id = 0;
1595 1721
@@ -1630,7 +1756,7 @@ static bool drm_mode_expose_to_userspace(const struct drm_display_mode *mode,
1630 * 1756 *
1631 * Called by the user via ioctl. 1757 * Called by the user via ioctl.
1632 * 1758 *
1633 * RETURNS: 1759 * Returns:
1634 * Zero on success, errno on failure. 1760 * Zero on success, errno on failure.
1635 */ 1761 */
1636int drm_mode_getconnector(struct drm_device *dev, void *data, 1762int drm_mode_getconnector(struct drm_device *dev, void *data,
@@ -1765,6 +1891,19 @@ out:
1765 return ret; 1891 return ret;
1766} 1892}
1767 1893
1894/**
1895 * drm_mode_getencoder - get encoder configuration
1896 * @dev: drm device for the ioctl
1897 * @data: data pointer for the ioctl
1898 * @file_priv: drm file for the ioctl call
1899 *
1900 * Construct a encoder configuration structure to return to the user.
1901 *
1902 * Called by the user via ioctl.
1903 *
1904 * Returns:
1905 * Zero on success, errno on failure.
1906 */
1768int drm_mode_getencoder(struct drm_device *dev, void *data, 1907int drm_mode_getencoder(struct drm_device *dev, void *data,
1769 struct drm_file *file_priv) 1908 struct drm_file *file_priv)
1770{ 1909{
@@ -1800,21 +1939,27 @@ out:
1800} 1939}
1801 1940
1802/** 1941/**
1803 * drm_mode_getplane_res - get plane info 1942 * drm_mode_getplane_res - enumerate all plane resources
1804 * @dev: DRM device 1943 * @dev: DRM device
1805 * @data: ioctl data 1944 * @data: ioctl data
1806 * @file_priv: DRM file info 1945 * @file_priv: DRM file info
1807 * 1946 *
1808 * Return an plane count and set of IDs. 1947 * Construct a list of plane ids to return to the user.
1948 *
1949 * Called by the user via ioctl.
1950 *
1951 * Returns:
1952 * Zero on success, errno on failure.
1809 */ 1953 */
1810int drm_mode_getplane_res(struct drm_device *dev, void *data, 1954int drm_mode_getplane_res(struct drm_device *dev, void *data,
1811 struct drm_file *file_priv) 1955 struct drm_file *file_priv)
1812{ 1956{
1813 struct drm_mode_get_plane_res *plane_resp = data; 1957 struct drm_mode_get_plane_res *plane_resp = data;
1814 struct drm_mode_config *config; 1958 struct drm_mode_config *config;
1815 struct drm_plane *plane; 1959 struct drm_plane *plane;
1816 uint32_t __user *plane_ptr; 1960 uint32_t __user *plane_ptr;
1817 int copied = 0, ret = 0; 1961 int copied = 0, ret = 0;
1962 unsigned num_planes;
1818 1963
1819 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 1964 if (!drm_core_check_feature(dev, DRIVER_MODESET))
1820 return -EINVAL; 1965 return -EINVAL;
@@ -1822,15 +1967,28 @@ int drm_mode_getplane_res(struct drm_device *dev, void *data,
1822 drm_modeset_lock_all(dev); 1967 drm_modeset_lock_all(dev);
1823 config = &dev->mode_config; 1968 config = &dev->mode_config;
1824 1969
1970 if (file_priv->universal_planes)
1971 num_planes = config->num_total_plane;
1972 else
1973 num_planes = config->num_overlay_plane;
1974
1825 /* 1975 /*
1826 * This ioctl is called twice, once to determine how much space is 1976 * This ioctl is called twice, once to determine how much space is
1827 * needed, and the 2nd time to fill it. 1977 * needed, and the 2nd time to fill it.
1828 */ 1978 */
1829 if (config->num_plane && 1979 if (num_planes &&
1830 (plane_resp->count_planes >= config->num_plane)) { 1980 (plane_resp->count_planes >= num_planes)) {
1831 plane_ptr = (uint32_t __user *)(unsigned long)plane_resp->plane_id_ptr; 1981 plane_ptr = (uint32_t __user *)(unsigned long)plane_resp->plane_id_ptr;
1832 1982
1833 list_for_each_entry(plane, &config->plane_list, head) { 1983 list_for_each_entry(plane, &config->plane_list, head) {
1984 /*
1985 * Unless userspace set the 'universal planes'
1986 * capability bit, only advertise overlays.
1987 */
1988 if (plane->type != DRM_PLANE_TYPE_OVERLAY &&
1989 !file_priv->universal_planes)
1990 continue;
1991
1834 if (put_user(plane->base.id, plane_ptr + copied)) { 1992 if (put_user(plane->base.id, plane_ptr + copied)) {
1835 ret = -EFAULT; 1993 ret = -EFAULT;
1836 goto out; 1994 goto out;
@@ -1838,7 +1996,7 @@ int drm_mode_getplane_res(struct drm_device *dev, void *data,
1838 copied++; 1996 copied++;
1839 } 1997 }
1840 } 1998 }
1841 plane_resp->count_planes = config->num_plane; 1999 plane_resp->count_planes = num_planes;
1842 2000
1843out: 2001out:
1844 drm_modeset_unlock_all(dev); 2002 drm_modeset_unlock_all(dev);
@@ -1846,16 +2004,20 @@ out:
1846} 2004}
1847 2005
1848/** 2006/**
1849 * drm_mode_getplane - get plane info 2007 * drm_mode_getplane - get plane configuration
1850 * @dev: DRM device 2008 * @dev: DRM device
1851 * @data: ioctl data 2009 * @data: ioctl data
1852 * @file_priv: DRM file info 2010 * @file_priv: DRM file info
1853 * 2011 *
1854 * Return plane info, including formats supported, gamma size, any 2012 * Construct a plane configuration structure to return to the user.
1855 * current fb, etc. 2013 *
2014 * Called by the user via ioctl.
2015 *
2016 * Returns:
2017 * Zero on success, errno on failure.
1856 */ 2018 */
1857int drm_mode_getplane(struct drm_device *dev, void *data, 2019int drm_mode_getplane(struct drm_device *dev, void *data,
1858 struct drm_file *file_priv) 2020 struct drm_file *file_priv)
1859{ 2021{
1860 struct drm_mode_get_plane *plane_resp = data; 2022 struct drm_mode_get_plane *plane_resp = data;
1861 struct drm_mode_object *obj; 2023 struct drm_mode_object *obj;
@@ -1911,16 +2073,19 @@ out:
1911} 2073}
1912 2074
1913/** 2075/**
1914 * drm_mode_setplane - set up or tear down an plane 2076 * drm_mode_setplane - configure a plane's configuration
1915 * @dev: DRM device 2077 * @dev: DRM device
1916 * @data: ioctl data* 2078 * @data: ioctl data*
1917 * @file_priv: DRM file info 2079 * @file_priv: DRM file info
1918 * 2080 *
1919 * Set plane info, including placement, fb, scaling, and other factors. 2081 * Set plane configuration, including placement, fb, scaling, and other factors.
1920 * Or pass a NULL fb to disable. 2082 * Or pass a NULL fb to disable.
2083 *
2084 * Returns:
2085 * Zero on success, errno on failure.
1921 */ 2086 */
1922int drm_mode_setplane(struct drm_device *dev, void *data, 2087int drm_mode_setplane(struct drm_device *dev, void *data,
1923 struct drm_file *file_priv) 2088 struct drm_file *file_priv)
1924{ 2089{
1925 struct drm_mode_set_plane *plane_req = data; 2090 struct drm_mode_set_plane *plane_req = data;
1926 struct drm_mode_object *obj; 2091 struct drm_mode_object *obj;
@@ -2050,6 +2215,9 @@ out:
2050 * 2215 *
2051 * This is a little helper to wrap internal calls to the ->set_config driver 2216 * This is a little helper to wrap internal calls to the ->set_config driver
2052 * interface. The only thing it adds is correct refcounting dance. 2217 * interface. The only thing it adds is correct refcounting dance.
2218 *
2219 * Returns:
2220 * Zero on success, errno on failure.
2053 */ 2221 */
2054int drm_mode_set_config_internal(struct drm_mode_set *set) 2222int drm_mode_set_config_internal(struct drm_mode_set *set)
2055{ 2223{
@@ -2064,19 +2232,21 @@ int drm_mode_set_config_internal(struct drm_mode_set *set)
2064 * crtcs. Atomic modeset will have saner semantics ... 2232 * crtcs. Atomic modeset will have saner semantics ...
2065 */ 2233 */
2066 list_for_each_entry(tmp, &crtc->dev->mode_config.crtc_list, head) 2234 list_for_each_entry(tmp, &crtc->dev->mode_config.crtc_list, head)
2067 tmp->old_fb = tmp->fb; 2235 tmp->old_fb = tmp->primary->fb;
2068 2236
2069 fb = set->fb; 2237 fb = set->fb;
2070 2238
2071 ret = crtc->funcs->set_config(set); 2239 ret = crtc->funcs->set_config(set);
2072 if (ret == 0) { 2240 if (ret == 0) {
2241 crtc->primary->crtc = crtc;
2242
2073 /* crtc->fb must be updated by ->set_config, enforces this. */ 2243 /* crtc->fb must be updated by ->set_config, enforces this. */
2074 WARN_ON(fb != crtc->fb); 2244 WARN_ON(fb != crtc->primary->fb);
2075 } 2245 }
2076 2246
2077 list_for_each_entry(tmp, &crtc->dev->mode_config.crtc_list, head) { 2247 list_for_each_entry(tmp, &crtc->dev->mode_config.crtc_list, head) {
2078 if (tmp->fb) 2248 if (tmp->primary->fb)
2079 drm_framebuffer_reference(tmp->fb); 2249 drm_framebuffer_reference(tmp->primary->fb);
2080 if (tmp->old_fb) 2250 if (tmp->old_fb)
2081 drm_framebuffer_unreference(tmp->old_fb); 2251 drm_framebuffer_unreference(tmp->old_fb);
2082 } 2252 }
@@ -2085,14 +2255,19 @@ int drm_mode_set_config_internal(struct drm_mode_set *set)
2085} 2255}
2086EXPORT_SYMBOL(drm_mode_set_config_internal); 2256EXPORT_SYMBOL(drm_mode_set_config_internal);
2087 2257
2088/* 2258/**
2089 * Checks that the framebuffer is big enough for the CRTC viewport 2259 * drm_crtc_check_viewport - Checks that a framebuffer is big enough for the
2090 * (x, y, hdisplay, vdisplay) 2260 * CRTC viewport
2261 * @crtc: CRTC that framebuffer will be displayed on
2262 * @x: x panning
2263 * @y: y panning
2264 * @mode: mode that framebuffer will be displayed under
2265 * @fb: framebuffer to check size of
2091 */ 2266 */
2092static int drm_crtc_check_viewport(const struct drm_crtc *crtc, 2267int drm_crtc_check_viewport(const struct drm_crtc *crtc,
2093 int x, int y, 2268 int x, int y,
2094 const struct drm_display_mode *mode, 2269 const struct drm_display_mode *mode,
2095 const struct drm_framebuffer *fb) 2270 const struct drm_framebuffer *fb)
2096 2271
2097{ 2272{
2098 int hdisplay, vdisplay; 2273 int hdisplay, vdisplay;
@@ -2123,6 +2298,7 @@ static int drm_crtc_check_viewport(const struct drm_crtc *crtc,
2123 2298
2124 return 0; 2299 return 0;
2125} 2300}
2301EXPORT_SYMBOL(drm_crtc_check_viewport);
2126 2302
2127/** 2303/**
2128 * drm_mode_setcrtc - set CRTC configuration 2304 * drm_mode_setcrtc - set CRTC configuration
@@ -2134,7 +2310,7 @@ static int drm_crtc_check_viewport(const struct drm_crtc *crtc,
2134 * 2310 *
2135 * Called by the user via ioctl. 2311 * Called by the user via ioctl.
2136 * 2312 *
2137 * RETURNS: 2313 * Returns:
2138 * Zero on success, errno on failure. 2314 * Zero on success, errno on failure.
2139 */ 2315 */
2140int drm_mode_setcrtc(struct drm_device *dev, void *data, 2316int drm_mode_setcrtc(struct drm_device *dev, void *data,
@@ -2174,12 +2350,12 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
2174 /* If we have a mode we need a framebuffer. */ 2350 /* If we have a mode we need a framebuffer. */
2175 /* If we pass -1, set the mode with the currently bound fb */ 2351 /* If we pass -1, set the mode with the currently bound fb */
2176 if (crtc_req->fb_id == -1) { 2352 if (crtc_req->fb_id == -1) {
2177 if (!crtc->fb) { 2353 if (!crtc->primary->fb) {
2178 DRM_DEBUG_KMS("CRTC doesn't have current FB\n"); 2354 DRM_DEBUG_KMS("CRTC doesn't have current FB\n");
2179 ret = -EINVAL; 2355 ret = -EINVAL;
2180 goto out; 2356 goto out;
2181 } 2357 }
2182 fb = crtc->fb; 2358 fb = crtc->primary->fb;
2183 /* Make refcounting symmetric with the lookup path. */ 2359 /* Make refcounting symmetric with the lookup path. */
2184 drm_framebuffer_reference(fb); 2360 drm_framebuffer_reference(fb);
2185 } else { 2361 } else {
@@ -2336,8 +2512,23 @@ out:
2336 return ret; 2512 return ret;
2337 2513
2338} 2514}
2515
2516
2517/**
2518 * drm_mode_cursor_ioctl - set CRTC's cursor configuration
2519 * @dev: drm device for the ioctl
2520 * @data: data pointer for the ioctl
2521 * @file_priv: drm file for the ioctl call
2522 *
2523 * Set the cursor configuration based on user request.
2524 *
2525 * Called by the user via ioctl.
2526 *
2527 * Returns:
2528 * Zero on success, errno on failure.
2529 */
2339int drm_mode_cursor_ioctl(struct drm_device *dev, 2530int drm_mode_cursor_ioctl(struct drm_device *dev,
2340 void *data, struct drm_file *file_priv) 2531 void *data, struct drm_file *file_priv)
2341{ 2532{
2342 struct drm_mode_cursor *req = data; 2533 struct drm_mode_cursor *req = data;
2343 struct drm_mode_cursor2 new_req; 2534 struct drm_mode_cursor2 new_req;
@@ -2348,6 +2539,21 @@ int drm_mode_cursor_ioctl(struct drm_device *dev,
2348 return drm_mode_cursor_common(dev, &new_req, file_priv); 2539 return drm_mode_cursor_common(dev, &new_req, file_priv);
2349} 2540}
2350 2541
2542/**
2543 * drm_mode_cursor2_ioctl - set CRTC's cursor configuration
2544 * @dev: drm device for the ioctl
2545 * @data: data pointer for the ioctl
2546 * @file_priv: drm file for the ioctl call
2547 *
2548 * Set the cursor configuration based on user request. This implements the 2nd
2549 * version of the cursor ioctl, which allows userspace to additionally specify
2550 * the hotspot of the pointer.
2551 *
2552 * Called by the user via ioctl.
2553 *
2554 * Returns:
2555 * Zero on success, errno on failure.
2556 */
2351int drm_mode_cursor2_ioctl(struct drm_device *dev, 2557int drm_mode_cursor2_ioctl(struct drm_device *dev,
2352 void *data, struct drm_file *file_priv) 2558 void *data, struct drm_file *file_priv)
2353{ 2559{
@@ -2355,7 +2561,14 @@ int drm_mode_cursor2_ioctl(struct drm_device *dev,
2355 return drm_mode_cursor_common(dev, req, file_priv); 2561 return drm_mode_cursor_common(dev, req, file_priv);
2356} 2562}
2357 2563
2358/* Original addfb only supported RGB formats, so figure out which one */ 2564/**
2565 * drm_mode_legacy_fb_format - compute drm fourcc code from legacy description
2566 * @bpp: bits per pixels
2567 * @depth: bit depth per pixel
2568 *
2569 * Computes a drm fourcc pixel format code for the given @bpp/@depth values.
2570 * Useful in fbdev emulation code, since that deals in those values.
2571 */
2359uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth) 2572uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth)
2360{ 2573{
2361 uint32_t fmt; 2574 uint32_t fmt;
@@ -2397,11 +2610,12 @@ EXPORT_SYMBOL(drm_mode_legacy_fb_format);
2397 * @data: data pointer for the ioctl 2610 * @data: data pointer for the ioctl
2398 * @file_priv: drm file for the ioctl call 2611 * @file_priv: drm file for the ioctl call
2399 * 2612 *
2400 * Add a new FB to the specified CRTC, given a user request. 2613 * Add a new FB to the specified CRTC, given a user request. This is the
2614 * original addfb ioclt which only supported RGB formats.
2401 * 2615 *
2402 * Called by the user via ioctl. 2616 * Called by the user via ioctl.
2403 * 2617 *
2404 * RETURNS: 2618 * Returns:
2405 * Zero on success, errno on failure. 2619 * Zero on success, errno on failure.
2406 */ 2620 */
2407int drm_mode_addfb(struct drm_device *dev, 2621int drm_mode_addfb(struct drm_device *dev,
@@ -2574,11 +2788,13 @@ static int framebuffer_check(const struct drm_mode_fb_cmd2 *r)
2574 * @data: data pointer for the ioctl 2788 * @data: data pointer for the ioctl
2575 * @file_priv: drm file for the ioctl call 2789 * @file_priv: drm file for the ioctl call
2576 * 2790 *
2577 * Add a new FB to the specified CRTC, given a user request with format. 2791 * Add a new FB to the specified CRTC, given a user request with format. This is
2792 * the 2nd version of the addfb ioctl, which supports multi-planar framebuffers
2793 * and uses fourcc codes as pixel format specifiers.
2578 * 2794 *
2579 * Called by the user via ioctl. 2795 * Called by the user via ioctl.
2580 * 2796 *
2581 * RETURNS: 2797 * Returns:
2582 * Zero on success, errno on failure. 2798 * Zero on success, errno on failure.
2583 */ 2799 */
2584int drm_mode_addfb2(struct drm_device *dev, 2800int drm_mode_addfb2(struct drm_device *dev,
@@ -2638,7 +2854,7 @@ int drm_mode_addfb2(struct drm_device *dev,
2638 * 2854 *
2639 * Called by the user via ioctl. 2855 * Called by the user via ioctl.
2640 * 2856 *
2641 * RETURNS: 2857 * Returns:
2642 * Zero on success, errno on failure. 2858 * Zero on success, errno on failure.
2643 */ 2859 */
2644int drm_mode_rmfb(struct drm_device *dev, 2860int drm_mode_rmfb(struct drm_device *dev,
@@ -2692,7 +2908,7 @@ fail_lookup:
2692 * 2908 *
2693 * Called by the user via ioctl. 2909 * Called by the user via ioctl.
2694 * 2910 *
2695 * RETURNS: 2911 * Returns:
2696 * Zero on success, errno on failure. 2912 * Zero on success, errno on failure.
2697 */ 2913 */
2698int drm_mode_getfb(struct drm_device *dev, 2914int drm_mode_getfb(struct drm_device *dev,
@@ -2715,7 +2931,8 @@ int drm_mode_getfb(struct drm_device *dev,
2715 r->bpp = fb->bits_per_pixel; 2931 r->bpp = fb->bits_per_pixel;
2716 r->pitch = fb->pitches[0]; 2932 r->pitch = fb->pitches[0];
2717 if (fb->funcs->create_handle) { 2933 if (fb->funcs->create_handle) {
2718 if (file_priv->is_master || capable(CAP_SYS_ADMIN)) { 2934 if (file_priv->is_master || capable(CAP_SYS_ADMIN) ||
2935 drm_is_control_client(file_priv)) {
2719 ret = fb->funcs->create_handle(fb, file_priv, 2936 ret = fb->funcs->create_handle(fb, file_priv,
2720 &r->handle); 2937 &r->handle);
2721 } else { 2938 } else {
@@ -2736,6 +2953,25 @@ int drm_mode_getfb(struct drm_device *dev,
2736 return ret; 2953 return ret;
2737} 2954}
2738 2955
2956/**
2957 * drm_mode_dirtyfb_ioctl - flush frontbuffer rendering on an FB
2958 * @dev: drm device for the ioctl
2959 * @data: data pointer for the ioctl
2960 * @file_priv: drm file for the ioctl call
2961 *
2962 * Lookup the FB and flush out the damaged area supplied by userspace as a clip
2963 * rectangle list. Generic userspace which does frontbuffer rendering must call
2964 * this ioctl to flush out the changes on manual-update display outputs, e.g.
2965 * usb display-link, mipi manual update panels or edp panel self refresh modes.
2966 *
2967 * Modesetting drivers which always update the frontbuffer do not need to
2968 * implement the corresponding ->dirty framebuffer callback.
2969 *
2970 * Called by the user via ioctl.
2971 *
2972 * Returns:
2973 * Zero on success, errno on failure.
2974 */
2739int drm_mode_dirtyfb_ioctl(struct drm_device *dev, 2975int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
2740 void *data, struct drm_file *file_priv) 2976 void *data, struct drm_file *file_priv)
2741{ 2977{
@@ -2813,7 +3049,7 @@ out_err1:
2813 * 3049 *
2814 * Called by the user via ioctl. 3050 * Called by the user via ioctl.
2815 * 3051 *
2816 * RETURNS: 3052 * Returns:
2817 * Zero on success, errno on failure. 3053 * Zero on success, errno on failure.
2818 */ 3054 */
2819void drm_fb_release(struct drm_file *priv) 3055void drm_fb_release(struct drm_file *priv)
@@ -2837,6 +3073,20 @@ void drm_fb_release(struct drm_file *priv)
2837 mutex_unlock(&priv->fbs_lock); 3073 mutex_unlock(&priv->fbs_lock);
2838} 3074}
2839 3075
3076/**
3077 * drm_property_create - create a new property type
3078 * @dev: drm device
3079 * @flags: flags specifying the property type
3080 * @name: name of the property
3081 * @num_values: number of pre-defined values
3082 *
3083 * This creates a new generic drm property which can then be attached to a drm
3084 * object with drm_object_attach_property. The returned property object must be
3085 * freed with drm_property_destroy.
3086 *
3087 * Returns:
3088 * A pointer to the newly created property on success, NULL on failure.
3089 */
2840struct drm_property *drm_property_create(struct drm_device *dev, int flags, 3090struct drm_property *drm_property_create(struct drm_device *dev, int flags,
2841 const char *name, int num_values) 3091 const char *name, int num_values)
2842{ 3092{
@@ -2875,6 +3125,24 @@ fail:
2875} 3125}
2876EXPORT_SYMBOL(drm_property_create); 3126EXPORT_SYMBOL(drm_property_create);
2877 3127
3128/**
3129 * drm_property_create - create a new enumeration property type
3130 * @dev: drm device
3131 * @flags: flags specifying the property type
3132 * @name: name of the property
3133 * @props: enumeration lists with property values
3134 * @num_values: number of pre-defined values
3135 *
3136 * This creates a new generic drm property which can then be attached to a drm
3137 * object with drm_object_attach_property. The returned property object must be
3138 * freed with drm_property_destroy.
3139 *
3140 * Userspace is only allowed to set one of the predefined values for enumeration
3141 * properties.
3142 *
3143 * Returns:
3144 * A pointer to the newly created property on success, NULL on failure.
3145 */
2878struct drm_property *drm_property_create_enum(struct drm_device *dev, int flags, 3146struct drm_property *drm_property_create_enum(struct drm_device *dev, int flags,
2879 const char *name, 3147 const char *name,
2880 const struct drm_prop_enum_list *props, 3148 const struct drm_prop_enum_list *props,
@@ -2903,6 +3171,24 @@ struct drm_property *drm_property_create_enum(struct drm_device *dev, int flags,
2903} 3171}
2904EXPORT_SYMBOL(drm_property_create_enum); 3172EXPORT_SYMBOL(drm_property_create_enum);
2905 3173
3174/**
3175 * drm_property_create - create a new bitmask property type
3176 * @dev: drm device
3177 * @flags: flags specifying the property type
3178 * @name: name of the property
3179 * @props: enumeration lists with property bitflags
3180 * @num_values: number of pre-defined values
3181 *
3182 * This creates a new generic drm property which can then be attached to a drm
3183 * object with drm_object_attach_property. The returned property object must be
3184 * freed with drm_property_destroy.
3185 *
3186 * Compared to plain enumeration properties userspace is allowed to set any
3187 * or'ed together combination of the predefined property bitflag values
3188 *
3189 * Returns:
3190 * A pointer to the newly created property on success, NULL on failure.
3191 */
2906struct drm_property *drm_property_create_bitmask(struct drm_device *dev, 3192struct drm_property *drm_property_create_bitmask(struct drm_device *dev,
2907 int flags, const char *name, 3193 int flags, const char *name,
2908 const struct drm_prop_enum_list *props, 3194 const struct drm_prop_enum_list *props,
@@ -2931,6 +3217,24 @@ struct drm_property *drm_property_create_bitmask(struct drm_device *dev,
2931} 3217}
2932EXPORT_SYMBOL(drm_property_create_bitmask); 3218EXPORT_SYMBOL(drm_property_create_bitmask);
2933 3219
3220/**
3221 * drm_property_create - create a new ranged property type
3222 * @dev: drm device
3223 * @flags: flags specifying the property type
3224 * @name: name of the property
3225 * @min: minimum value of the property
3226 * @max: maximum value of the property
3227 *
3228 * This creates a new generic drm property which can then be attached to a drm
3229 * object with drm_object_attach_property. The returned property object must be
3230 * freed with drm_property_destroy.
3231 *
3232 * Userspace is allowed to set any interger value in the (min, max) range
3233 * inclusive.
3234 *
3235 * Returns:
3236 * A pointer to the newly created property on success, NULL on failure.
3237 */
2934struct drm_property *drm_property_create_range(struct drm_device *dev, int flags, 3238struct drm_property *drm_property_create_range(struct drm_device *dev, int flags,
2935 const char *name, 3239 const char *name,
2936 uint64_t min, uint64_t max) 3240 uint64_t min, uint64_t max)
@@ -2950,6 +3254,21 @@ struct drm_property *drm_property_create_range(struct drm_device *dev, int flags
2950} 3254}
2951EXPORT_SYMBOL(drm_property_create_range); 3255EXPORT_SYMBOL(drm_property_create_range);
2952 3256
3257/**
3258 * drm_property_add_enum - add a possible value to an enumeration property
3259 * @property: enumeration property to change
3260 * @index: index of the new enumeration
3261 * @value: value of the new enumeration
3262 * @name: symbolic name of the new enumeration
3263 *
3264 * This functions adds enumerations to a property.
3265 *
3266 * It's use is deprecated, drivers should use one of the more specific helpers
3267 * to directly create the property with all enumerations already attached.
3268 *
3269 * Returns:
3270 * Zero on success, error code on failure.
3271 */
2953int drm_property_add_enum(struct drm_property *property, int index, 3272int drm_property_add_enum(struct drm_property *property, int index,
2954 uint64_t value, const char *name) 3273 uint64_t value, const char *name)
2955{ 3274{
@@ -2989,6 +3308,14 @@ int drm_property_add_enum(struct drm_property *property, int index,
2989} 3308}
2990EXPORT_SYMBOL(drm_property_add_enum); 3309EXPORT_SYMBOL(drm_property_add_enum);
2991 3310
3311/**
3312 * drm_property_destroy - destroy a drm property
3313 * @dev: drm device
3314 * @property: property to destry
3315 *
3316 * This function frees a property including any attached resources like
3317 * enumeration values.
3318 */
2992void drm_property_destroy(struct drm_device *dev, struct drm_property *property) 3319void drm_property_destroy(struct drm_device *dev, struct drm_property *property)
2993{ 3320{
2994 struct drm_property_enum *prop_enum, *pt; 3321 struct drm_property_enum *prop_enum, *pt;
@@ -3006,6 +3333,16 @@ void drm_property_destroy(struct drm_device *dev, struct drm_property *property)
3006} 3333}
3007EXPORT_SYMBOL(drm_property_destroy); 3334EXPORT_SYMBOL(drm_property_destroy);
3008 3335
3336/**
3337 * drm_object_attach_property - attach a property to a modeset object
3338 * @obj: drm modeset object
3339 * @property: property to attach
3340 * @init_val: initial value of the property
3341 *
3342 * This attaches the given property to the modeset object with the given initial
3343 * value. Currently this function cannot fail since the properties are stored in
3344 * a statically sized array.
3345 */
3009void drm_object_attach_property(struct drm_mode_object *obj, 3346void drm_object_attach_property(struct drm_mode_object *obj,
3010 struct drm_property *property, 3347 struct drm_property *property,
3011 uint64_t init_val) 3348 uint64_t init_val)
@@ -3026,6 +3363,19 @@ void drm_object_attach_property(struct drm_mode_object *obj,
3026} 3363}
3027EXPORT_SYMBOL(drm_object_attach_property); 3364EXPORT_SYMBOL(drm_object_attach_property);
3028 3365
3366/**
3367 * drm_object_property_set_value - set the value of a property
3368 * @obj: drm mode object to set property value for
3369 * @property: property to set
3370 * @val: value the property should be set to
3371 *
3372 * This functions sets a given property on a given object. This function only
3373 * changes the software state of the property, it does not call into the
3374 * driver's ->set_property callback.
3375 *
3376 * Returns:
3377 * Zero on success, error code on failure.
3378 */
3029int drm_object_property_set_value(struct drm_mode_object *obj, 3379int drm_object_property_set_value(struct drm_mode_object *obj,
3030 struct drm_property *property, uint64_t val) 3380 struct drm_property *property, uint64_t val)
3031{ 3381{
@@ -3042,6 +3392,20 @@ int drm_object_property_set_value(struct drm_mode_object *obj,
3042} 3392}
3043EXPORT_SYMBOL(drm_object_property_set_value); 3393EXPORT_SYMBOL(drm_object_property_set_value);
3044 3394
3395/**
3396 * drm_object_property_get_value - retrieve the value of a property
3397 * @obj: drm mode object to get property value from
3398 * @property: property to retrieve
3399 * @val: storage for the property value
3400 *
3401 * This function retrieves the softare state of the given property for the given
3402 * property. Since there is no driver callback to retrieve the current property
3403 * value this might be out of sync with the hardware, depending upon the driver
3404 * and property.
3405 *
3406 * Returns:
3407 * Zero on success, error code on failure.
3408 */
3045int drm_object_property_get_value(struct drm_mode_object *obj, 3409int drm_object_property_get_value(struct drm_mode_object *obj,
3046 struct drm_property *property, uint64_t *val) 3410 struct drm_property *property, uint64_t *val)
3047{ 3411{
@@ -3058,6 +3422,19 @@ int drm_object_property_get_value(struct drm_mode_object *obj,
3058} 3422}
3059EXPORT_SYMBOL(drm_object_property_get_value); 3423EXPORT_SYMBOL(drm_object_property_get_value);
3060 3424
3425/**
3426 * drm_mode_getproperty_ioctl - get the current value of a connector's property
3427 * @dev: DRM device
3428 * @data: ioctl data
3429 * @file_priv: DRM file info
3430 *
3431 * This function retrieves the current value for an connectors's property.
3432 *
3433 * Called by the user via ioctl.
3434 *
3435 * Returns:
3436 * Zero on success, errno on failure.
3437 */
3061int drm_mode_getproperty_ioctl(struct drm_device *dev, 3438int drm_mode_getproperty_ioctl(struct drm_device *dev,
3062 void *data, struct drm_file *file_priv) 3439 void *data, struct drm_file *file_priv)
3063{ 3440{
@@ -3196,6 +3573,20 @@ static void drm_property_destroy_blob(struct drm_device *dev,
3196 kfree(blob); 3573 kfree(blob);
3197} 3574}
3198 3575
3576/**
3577 * drm_mode_getblob_ioctl - get the contents of a blob property value
3578 * @dev: DRM device
3579 * @data: ioctl data
3580 * @file_priv: DRM file info
3581 *
3582 * This function retrieves the contents of a blob property. The value stored in
3583 * an object's blob property is just a normal modeset object id.
3584 *
3585 * Called by the user via ioctl.
3586 *
3587 * Returns:
3588 * Zero on success, errno on failure.
3589 */
3199int drm_mode_getblob_ioctl(struct drm_device *dev, 3590int drm_mode_getblob_ioctl(struct drm_device *dev,
3200 void *data, struct drm_file *file_priv) 3591 void *data, struct drm_file *file_priv)
3201{ 3592{
@@ -3230,6 +3621,17 @@ done:
3230 return ret; 3621 return ret;
3231} 3622}
3232 3623
3624/**
3625 * drm_mode_connector_update_edid_property - update the edid property of a connector
3626 * @connector: drm connector
3627 * @edid: new value of the edid property
3628 *
3629 * This function creates a new blob modeset object and assigns its id to the
3630 * connector's edid property.
3631 *
3632 * Returns:
3633 * Zero on success, errno on failure.
3634 */
3233int drm_mode_connector_update_edid_property(struct drm_connector *connector, 3635int drm_mode_connector_update_edid_property(struct drm_connector *connector,
3234 struct edid *edid) 3636 struct edid *edid)
3235{ 3637{
@@ -3287,6 +3689,20 @@ static bool drm_property_change_is_valid(struct drm_property *property,
3287 } 3689 }
3288} 3690}
3289 3691
3692/**
3693 * drm_mode_connector_property_set_ioctl - set the current value of a connector property
3694 * @dev: DRM device
3695 * @data: ioctl data
3696 * @file_priv: DRM file info
3697 *
3698 * This function sets the current value for a connectors's property. It also
3699 * calls into a driver's ->set_property callback to update the hardware state
3700 *
3701 * Called by the user via ioctl.
3702 *
3703 * Returns:
3704 * Zero on success, errno on failure.
3705 */
3290int drm_mode_connector_property_set_ioctl(struct drm_device *dev, 3706int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
3291 void *data, struct drm_file *file_priv) 3707 void *data, struct drm_file *file_priv)
3292{ 3708{
@@ -3353,6 +3769,21 @@ static int drm_mode_plane_set_obj_prop(struct drm_mode_object *obj,
3353 return ret; 3769 return ret;
3354} 3770}
3355 3771
3772/**
3773 * drm_mode_getproperty_ioctl - get the current value of a object's property
3774 * @dev: DRM device
3775 * @data: ioctl data
3776 * @file_priv: DRM file info
3777 *
3778 * This function retrieves the current value for an object's property. Compared
3779 * to the connector specific ioctl this one is extended to also work on crtc and
3780 * plane objects.
3781 *
3782 * Called by the user via ioctl.
3783 *
3784 * Returns:
3785 * Zero on success, errno on failure.
3786 */
3356int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data, 3787int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
3357 struct drm_file *file_priv) 3788 struct drm_file *file_priv)
3358{ 3789{
@@ -3409,6 +3840,22 @@ out:
3409 return ret; 3840 return ret;
3410} 3841}
3411 3842
3843/**
3844 * drm_mode_obj_set_property_ioctl - set the current value of an object's property
3845 * @dev: DRM device
3846 * @data: ioctl data
3847 * @file_priv: DRM file info
3848 *
3849 * This function sets the current value for an object's property. It also calls
3850 * into a driver's ->set_property callback to update the hardware state.
3851 * Compared to the connector specific ioctl this one is extended to also work on
3852 * crtc and plane objects.
3853 *
3854 * Called by the user via ioctl.
3855 *
3856 * Returns:
3857 * Zero on success, errno on failure.
3858 */
3412int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data, 3859int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
3413 struct drm_file *file_priv) 3860 struct drm_file *file_priv)
3414{ 3861{
@@ -3468,6 +3915,18 @@ out:
3468 return ret; 3915 return ret;
3469} 3916}
3470 3917
3918/**
3919 * drm_mode_connector_attach_encoder - attach a connector to an encoder
3920 * @connector: connector to attach
3921 * @encoder: encoder to attach @connector to
3922 *
3923 * This function links up a connector to an encoder. Note that the routing
3924 * restrictions between encoders and crtcs are exposed to userspace through the
3925 * possible_clones and possible_crtcs bitmasks.
3926 *
3927 * Returns:
3928 * Zero on success, errno on failure.
3929 */
3471int drm_mode_connector_attach_encoder(struct drm_connector *connector, 3930int drm_mode_connector_attach_encoder(struct drm_connector *connector,
3472 struct drm_encoder *encoder) 3931 struct drm_encoder *encoder)
3473{ 3932{
@@ -3483,23 +3942,20 @@ int drm_mode_connector_attach_encoder(struct drm_connector *connector,
3483} 3942}
3484EXPORT_SYMBOL(drm_mode_connector_attach_encoder); 3943EXPORT_SYMBOL(drm_mode_connector_attach_encoder);
3485 3944
3486void drm_mode_connector_detach_encoder(struct drm_connector *connector, 3945/**
3487 struct drm_encoder *encoder) 3946 * drm_mode_crtc_set_gamma_size - set the gamma table size
3488{ 3947 * @crtc: CRTC to set the gamma table size for
3489 int i; 3948 * @gamma_size: size of the gamma table
3490 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { 3949 *
3491 if (connector->encoder_ids[i] == encoder->base.id) { 3950 * Drivers which support gamma tables should set this to the supported gamma
3492 connector->encoder_ids[i] = 0; 3951 * table size when initializing the CRTC. Currently the drm core only supports a
3493 if (connector->encoder == encoder) 3952 * fixed gamma table size.
3494 connector->encoder = NULL; 3953 *
3495 break; 3954 * Returns:
3496 } 3955 * Zero on success, errno on failure.
3497 } 3956 */
3498}
3499EXPORT_SYMBOL(drm_mode_connector_detach_encoder);
3500
3501int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc, 3957int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
3502 int gamma_size) 3958 int gamma_size)
3503{ 3959{
3504 crtc->gamma_size = gamma_size; 3960 crtc->gamma_size = gamma_size;
3505 3961
@@ -3513,6 +3969,20 @@ int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
3513} 3969}
3514EXPORT_SYMBOL(drm_mode_crtc_set_gamma_size); 3970EXPORT_SYMBOL(drm_mode_crtc_set_gamma_size);
3515 3971
3972/**
3973 * drm_mode_gamma_set_ioctl - set the gamma table
3974 * @dev: DRM device
3975 * @data: ioctl data
3976 * @file_priv: DRM file info
3977 *
3978 * Set the gamma table of a CRTC to the one passed in by the user. Userspace can
3979 * inquire the required gamma table size through drm_mode_gamma_get_ioctl.
3980 *
3981 * Called by the user via ioctl.
3982 *
3983 * Returns:
3984 * Zero on success, errno on failure.
3985 */
3516int drm_mode_gamma_set_ioctl(struct drm_device *dev, 3986int drm_mode_gamma_set_ioctl(struct drm_device *dev,
3517 void *data, struct drm_file *file_priv) 3987 void *data, struct drm_file *file_priv)
3518{ 3988{
@@ -3572,6 +4042,21 @@ out:
3572 4042
3573} 4043}
3574 4044
4045/**
4046 * drm_mode_gamma_get_ioctl - get the gamma table
4047 * @dev: DRM device
4048 * @data: ioctl data
4049 * @file_priv: DRM file info
4050 *
4051 * Copy the current gamma table into the storage provided. This also provides
4052 * the gamma table size the driver expects, which can be used to size the
4053 * allocated storage.
4054 *
4055 * Called by the user via ioctl.
4056 *
4057 * Returns:
4058 * Zero on success, errno on failure.
4059 */
3575int drm_mode_gamma_get_ioctl(struct drm_device *dev, 4060int drm_mode_gamma_get_ioctl(struct drm_device *dev,
3576 void *data, struct drm_file *file_priv) 4061 void *data, struct drm_file *file_priv)
3577{ 4062{
@@ -3622,6 +4107,24 @@ out:
3622 return ret; 4107 return ret;
3623} 4108}
3624 4109
4110/**
4111 * drm_mode_page_flip_ioctl - schedule an asynchronous fb update
4112 * @dev: DRM device
4113 * @data: ioctl data
4114 * @file_priv: DRM file info
4115 *
4116 * This schedules an asynchronous update on a given CRTC, called page flip.
4117 * Optionally a drm event is generated to signal the completion of the event.
4118 * Generic drivers cannot assume that a pageflip with changed framebuffer
4119 * properties (including driver specific metadata like tiling layout) will work,
4120 * but some drivers support e.g. pixel format changes through the pageflip
4121 * ioctl.
4122 *
4123 * Called by the user via ioctl.
4124 *
4125 * Returns:
4126 * Zero on success, errno on failure.
4127 */
3625int drm_mode_page_flip_ioctl(struct drm_device *dev, 4128int drm_mode_page_flip_ioctl(struct drm_device *dev,
3626 void *data, struct drm_file *file_priv) 4129 void *data, struct drm_file *file_priv)
3627{ 4130{
@@ -3646,7 +4149,7 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
3646 crtc = obj_to_crtc(obj); 4149 crtc = obj_to_crtc(obj);
3647 4150
3648 mutex_lock(&crtc->mutex); 4151 mutex_lock(&crtc->mutex);
3649 if (crtc->fb == NULL) { 4152 if (crtc->primary->fb == NULL) {
3650 /* The framebuffer is currently unbound, presumably 4153 /* The framebuffer is currently unbound, presumably
3651 * due to a hotplug event, that userspace has not 4154 * due to a hotplug event, that userspace has not
3652 * yet discovered. 4155 * yet discovered.
@@ -3668,7 +4171,7 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
3668 if (ret) 4171 if (ret)
3669 goto out; 4172 goto out;
3670 4173
3671 if (crtc->fb->pixel_format != fb->pixel_format) { 4174 if (crtc->primary->fb->pixel_format != fb->pixel_format) {
3672 DRM_DEBUG_KMS("Page flip is not allowed to change frame buffer format.\n"); 4175 DRM_DEBUG_KMS("Page flip is not allowed to change frame buffer format.\n");
3673 ret = -EINVAL; 4176 ret = -EINVAL;
3674 goto out; 4177 goto out;
@@ -3701,7 +4204,7 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
3701 (void (*) (struct drm_pending_event *)) kfree; 4204 (void (*) (struct drm_pending_event *)) kfree;
3702 } 4205 }
3703 4206
3704 old_fb = crtc->fb; 4207 old_fb = crtc->primary->fb;
3705 ret = crtc->funcs->page_flip(crtc, fb, e, page_flip->flags); 4208 ret = crtc->funcs->page_flip(crtc, fb, e, page_flip->flags);
3706 if (ret) { 4209 if (ret) {
3707 if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) { 4210 if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) {
@@ -3719,7 +4222,7 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
3719 * Failing to do so will screw with the reference counting 4222 * Failing to do so will screw with the reference counting
3720 * on framebuffers. 4223 * on framebuffers.
3721 */ 4224 */
3722 WARN_ON(crtc->fb != fb); 4225 WARN_ON(crtc->primary->fb != fb);
3723 /* Unref only the old framebuffer. */ 4226 /* Unref only the old framebuffer. */
3724 fb = NULL; 4227 fb = NULL;
3725 } 4228 }
@@ -3734,6 +4237,14 @@ out:
3734 return ret; 4237 return ret;
3735} 4238}
3736 4239
4240/**
4241 * drm_mode_config_reset - call ->reset callbacks
4242 * @dev: drm device
4243 *
4244 * This functions calls all the crtc's, encoder's and connector's ->reset
4245 * callback. Drivers can use this in e.g. their driver load or resume code to
4246 * reset hardware and software state.
4247 */
3737void drm_mode_config_reset(struct drm_device *dev) 4248void drm_mode_config_reset(struct drm_device *dev)
3738{ 4249{
3739 struct drm_crtc *crtc; 4250 struct drm_crtc *crtc;
@@ -3757,16 +4268,66 @@ void drm_mode_config_reset(struct drm_device *dev)
3757} 4268}
3758EXPORT_SYMBOL(drm_mode_config_reset); 4269EXPORT_SYMBOL(drm_mode_config_reset);
3759 4270
4271/**
4272 * drm_mode_create_dumb_ioctl - create a dumb backing storage buffer
4273 * @dev: DRM device
4274 * @data: ioctl data
4275 * @file_priv: DRM file info
4276 *
4277 * This creates a new dumb buffer in the driver's backing storage manager (GEM,
4278 * TTM or something else entirely) and returns the resulting buffer handle. This
4279 * handle can then be wrapped up into a framebuffer modeset object.
4280 *
4281 * Note that userspace is not allowed to use such objects for render
4282 * acceleration - drivers must create their own private ioctls for such a use
4283 * case.
4284 *
4285 * Called by the user via ioctl.
4286 *
4287 * Returns:
4288 * Zero on success, errno on failure.
4289 */
3760int drm_mode_create_dumb_ioctl(struct drm_device *dev, 4290int drm_mode_create_dumb_ioctl(struct drm_device *dev,
3761 void *data, struct drm_file *file_priv) 4291 void *data, struct drm_file *file_priv)
3762{ 4292{
3763 struct drm_mode_create_dumb *args = data; 4293 struct drm_mode_create_dumb *args = data;
4294 u32 cpp, stride, size;
3764 4295
3765 if (!dev->driver->dumb_create) 4296 if (!dev->driver->dumb_create)
3766 return -ENOSYS; 4297 return -ENOSYS;
4298 if (!args->width || !args->height || !args->bpp)
4299 return -EINVAL;
4300
4301 /* overflow checks for 32bit size calculations */
4302 cpp = DIV_ROUND_UP(args->bpp, 8);
4303 if (cpp > 0xffffffffU / args->width)
4304 return -EINVAL;
4305 stride = cpp * args->width;
4306 if (args->height > 0xffffffffU / stride)
4307 return -EINVAL;
4308
4309 /* test for wrap-around */
4310 size = args->height * stride;
4311 if (PAGE_ALIGN(size) == 0)
4312 return -EINVAL;
4313
3767 return dev->driver->dumb_create(file_priv, dev, args); 4314 return dev->driver->dumb_create(file_priv, dev, args);
3768} 4315}
3769 4316
4317/**
4318 * drm_mode_mmap_dumb_ioctl - create an mmap offset for a dumb backing storage buffer
4319 * @dev: DRM device
4320 * @data: ioctl data
4321 * @file_priv: DRM file info
4322 *
4323 * Allocate an offset in the drm device node's address space to be able to
4324 * memory map a dumb buffer.
4325 *
4326 * Called by the user via ioctl.
4327 *
4328 * Returns:
4329 * Zero on success, errno on failure.
4330 */
3770int drm_mode_mmap_dumb_ioctl(struct drm_device *dev, 4331int drm_mode_mmap_dumb_ioctl(struct drm_device *dev,
3771 void *data, struct drm_file *file_priv) 4332 void *data, struct drm_file *file_priv)
3772{ 4333{
@@ -3779,6 +4340,21 @@ int drm_mode_mmap_dumb_ioctl(struct drm_device *dev,
3779 return dev->driver->dumb_map_offset(file_priv, dev, args->handle, &args->offset); 4340 return dev->driver->dumb_map_offset(file_priv, dev, args->handle, &args->offset);
3780} 4341}
3781 4342
4343/**
4344 * drm_mode_destroy_dumb_ioctl - destroy a dumb backing strage buffer
4345 * @dev: DRM device
4346 * @data: ioctl data
4347 * @file_priv: DRM file info
4348 *
4349 * This destroys the userspace handle for the given dumb backing storage buffer.
4350 * Since buffer objects must be reference counted in the kernel a buffer object
4351 * won't be immediately freed if a framebuffer modeset object still uses it.
4352 *
4353 * Called by the user via ioctl.
4354 *
4355 * Returns:
4356 * Zero on success, errno on failure.
4357 */
3782int drm_mode_destroy_dumb_ioctl(struct drm_device *dev, 4358int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
3783 void *data, struct drm_file *file_priv) 4359 void *data, struct drm_file *file_priv)
3784{ 4360{
@@ -3790,9 +4366,14 @@ int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
3790 return dev->driver->dumb_destroy(file_priv, dev, args->handle); 4366 return dev->driver->dumb_destroy(file_priv, dev, args->handle);
3791} 4367}
3792 4368
3793/* 4369/**
3794 * Just need to support RGB formats here for compat with code that doesn't 4370 * drm_fb_get_bpp_depth - get the bpp/depth values for format
3795 * use pixel formats directly yet. 4371 * @format: pixel format (DRM_FORMAT_*)
4372 * @depth: storage for the depth value
4373 * @bpp: storage for the bpp value
4374 *
4375 * This only supports RGB formats here for compat with code that doesn't use
4376 * pixel formats directly yet.
3796 */ 4377 */
3797void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth, 4378void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth,
3798 int *bpp) 4379 int *bpp)
@@ -3864,7 +4445,7 @@ EXPORT_SYMBOL(drm_fb_get_bpp_depth);
3864 * drm_format_num_planes - get the number of planes for format 4445 * drm_format_num_planes - get the number of planes for format
3865 * @format: pixel format (DRM_FORMAT_*) 4446 * @format: pixel format (DRM_FORMAT_*)
3866 * 4447 *
3867 * RETURNS: 4448 * Returns:
3868 * The number of planes used by the specified pixel format. 4449 * The number of planes used by the specified pixel format.
3869 */ 4450 */
3870int drm_format_num_planes(uint32_t format) 4451int drm_format_num_planes(uint32_t format)
@@ -3899,7 +4480,7 @@ EXPORT_SYMBOL(drm_format_num_planes);
3899 * @format: pixel format (DRM_FORMAT_*) 4480 * @format: pixel format (DRM_FORMAT_*)
3900 * @plane: plane index 4481 * @plane: plane index
3901 * 4482 *
3902 * RETURNS: 4483 * Returns:
3903 * The bytes per pixel value for the specified plane. 4484 * The bytes per pixel value for the specified plane.
3904 */ 4485 */
3905int drm_format_plane_cpp(uint32_t format, int plane) 4486int drm_format_plane_cpp(uint32_t format, int plane)
@@ -3945,7 +4526,7 @@ EXPORT_SYMBOL(drm_format_plane_cpp);
3945 * drm_format_horz_chroma_subsampling - get the horizontal chroma subsampling factor 4526 * drm_format_horz_chroma_subsampling - get the horizontal chroma subsampling factor
3946 * @format: pixel format (DRM_FORMAT_*) 4527 * @format: pixel format (DRM_FORMAT_*)
3947 * 4528 *
3948 * RETURNS: 4529 * Returns:
3949 * The horizontal chroma subsampling factor for the 4530 * The horizontal chroma subsampling factor for the
3950 * specified pixel format. 4531 * specified pixel format.
3951 */ 4532 */
@@ -3980,7 +4561,7 @@ EXPORT_SYMBOL(drm_format_horz_chroma_subsampling);
3980 * drm_format_vert_chroma_subsampling - get the vertical chroma subsampling factor 4561 * drm_format_vert_chroma_subsampling - get the vertical chroma subsampling factor
3981 * @format: pixel format (DRM_FORMAT_*) 4562 * @format: pixel format (DRM_FORMAT_*)
3982 * 4563 *
3983 * RETURNS: 4564 * Returns:
3984 * The vertical chroma subsampling factor for the 4565 * The vertical chroma subsampling factor for the
3985 * specified pixel format. 4566 * specified pixel format.
3986 */ 4567 */
@@ -4030,6 +4611,7 @@ void drm_mode_config_init(struct drm_device *dev)
4030 4611
4031 drm_modeset_lock_all(dev); 4612 drm_modeset_lock_all(dev);
4032 drm_mode_create_standard_connector_properties(dev); 4613 drm_mode_create_standard_connector_properties(dev);
4614 drm_mode_create_standard_plane_properties(dev);
4033 drm_modeset_unlock_all(dev); 4615 drm_modeset_unlock_all(dev);
4034 4616
4035 /* Just to be sure */ 4617 /* Just to be sure */
@@ -4037,6 +4619,8 @@ void drm_mode_config_init(struct drm_device *dev)
4037 dev->mode_config.num_connector = 0; 4619 dev->mode_config.num_connector = 0;
4038 dev->mode_config.num_crtc = 0; 4620 dev->mode_config.num_crtc = 0;
4039 dev->mode_config.num_encoder = 0; 4621 dev->mode_config.num_encoder = 0;
4622 dev->mode_config.num_overlay_plane = 0;
4623 dev->mode_config.num_total_plane = 0;
4040} 4624}
4041EXPORT_SYMBOL(drm_mode_config_init); 4625EXPORT_SYMBOL(drm_mode_config_init);
4042 4626
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index f7a81209beb3..c43825e8f5c1 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -105,9 +105,6 @@ static void drm_mode_validate_flag(struct drm_connector *connector,
105 * @maxX: max width for modes 105 * @maxX: max width for modes
106 * @maxY: max height for modes 106 * @maxY: max height for modes
107 * 107 *
108 * LOCKING:
109 * Caller must hold mode config lock.
110 *
111 * Based on the helper callbacks implemented by @connector try to detect all 108 * Based on the helper callbacks implemented by @connector try to detect all
112 * valid modes. Modes will first be added to the connector's probed_modes list, 109 * valid modes. Modes will first be added to the connector's probed_modes list,
113 * then culled (based on validity and the @maxX, @maxY parameters) and put into 110 * then culled (based on validity and the @maxX, @maxY parameters) and put into
@@ -117,8 +114,8 @@ static void drm_mode_validate_flag(struct drm_connector *connector,
117 * @connector vfunc for drivers that use the crtc helpers for output mode 114 * @connector vfunc for drivers that use the crtc helpers for output mode
118 * filtering and detection. 115 * filtering and detection.
119 * 116 *
120 * RETURNS: 117 * Returns:
121 * Number of modes found on @connector. 118 * The number of modes found on @connector.
122 */ 119 */
123int drm_helper_probe_single_connector_modes(struct drm_connector *connector, 120int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
124 uint32_t maxX, uint32_t maxY) 121 uint32_t maxX, uint32_t maxY)
@@ -131,6 +128,8 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
131 int mode_flags = 0; 128 int mode_flags = 0;
132 bool verbose_prune = true; 129 bool verbose_prune = true;
133 130
131 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
132
134 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, 133 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id,
135 drm_get_connector_name(connector)); 134 drm_get_connector_name(connector));
136 /* set all modes to the unverified state */ 135 /* set all modes to the unverified state */
@@ -176,8 +175,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
176 drm_mode_connector_list_update(connector); 175 drm_mode_connector_list_update(connector);
177 176
178 if (maxX && maxY) 177 if (maxX && maxY)
179 drm_mode_validate_size(dev, &connector->modes, maxX, 178 drm_mode_validate_size(dev, &connector->modes, maxX, maxY);
180 maxY, 0);
181 179
182 if (connector->interlace_allowed) 180 if (connector->interlace_allowed)
183 mode_flags |= DRM_MODE_FLAG_INTERLACE; 181 mode_flags |= DRM_MODE_FLAG_INTERLACE;
@@ -219,18 +217,19 @@ EXPORT_SYMBOL(drm_helper_probe_single_connector_modes);
219 * drm_helper_encoder_in_use - check if a given encoder is in use 217 * drm_helper_encoder_in_use - check if a given encoder is in use
220 * @encoder: encoder to check 218 * @encoder: encoder to check
221 * 219 *
222 * LOCKING: 220 * Checks whether @encoder is with the current mode setting output configuration
223 * Caller must hold mode config lock. 221 * in use by any connector. This doesn't mean that it is actually enabled since
224 * 222 * the DPMS state is tracked separately.
225 * Walk @encoders's DRM device's mode_config and see if it's in use.
226 * 223 *
227 * RETURNS: 224 * Returns:
228 * True if @encoder is part of the mode_config, false otherwise. 225 * True if @encoder is used, false otherwise.
229 */ 226 */
230bool drm_helper_encoder_in_use(struct drm_encoder *encoder) 227bool drm_helper_encoder_in_use(struct drm_encoder *encoder)
231{ 228{
232 struct drm_connector *connector; 229 struct drm_connector *connector;
233 struct drm_device *dev = encoder->dev; 230 struct drm_device *dev = encoder->dev;
231
232 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
234 list_for_each_entry(connector, &dev->mode_config.connector_list, head) 233 list_for_each_entry(connector, &dev->mode_config.connector_list, head)
235 if (connector->encoder == encoder) 234 if (connector->encoder == encoder)
236 return true; 235 return true;
@@ -242,19 +241,19 @@ EXPORT_SYMBOL(drm_helper_encoder_in_use);
242 * drm_helper_crtc_in_use - check if a given CRTC is in a mode_config 241 * drm_helper_crtc_in_use - check if a given CRTC is in a mode_config
243 * @crtc: CRTC to check 242 * @crtc: CRTC to check
244 * 243 *
245 * LOCKING: 244 * Checks whether @crtc is with the current mode setting output configuration
246 * Caller must hold mode config lock. 245 * in use by any connector. This doesn't mean that it is actually enabled since
247 * 246 * the DPMS state is tracked separately.
248 * Walk @crtc's DRM device's mode_config and see if it's in use.
249 * 247 *
250 * RETURNS: 248 * Returns:
251 * True if @crtc is part of the mode_config, false otherwise. 249 * True if @crtc is used, false otherwise.
252 */ 250 */
253bool drm_helper_crtc_in_use(struct drm_crtc *crtc) 251bool drm_helper_crtc_in_use(struct drm_crtc *crtc)
254{ 252{
255 struct drm_encoder *encoder; 253 struct drm_encoder *encoder;
256 struct drm_device *dev = crtc->dev; 254 struct drm_device *dev = crtc->dev;
257 /* FIXME: Locking around list access? */ 255
256 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
258 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) 257 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
259 if (encoder->crtc == crtc && drm_helper_encoder_in_use(encoder)) 258 if (encoder->crtc == crtc && drm_helper_encoder_in_use(encoder))
260 return true; 259 return true;
@@ -279,27 +278,17 @@ drm_encoder_disable(struct drm_encoder *encoder)
279 encoder->bridge->funcs->post_disable(encoder->bridge); 278 encoder->bridge->funcs->post_disable(encoder->bridge);
280} 279}
281 280
282/** 281static void __drm_helper_disable_unused_functions(struct drm_device *dev)
283 * drm_helper_disable_unused_functions - disable unused objects
284 * @dev: DRM device
285 *
286 * LOCKING:
287 * Caller must hold mode config lock.
288 *
289 * If an connector or CRTC isn't part of @dev's mode_config, it can be disabled
290 * by calling its dpms function, which should power it off.
291 */
292void drm_helper_disable_unused_functions(struct drm_device *dev)
293{ 282{
294 struct drm_encoder *encoder; 283 struct drm_encoder *encoder;
295 struct drm_connector *connector; 284 struct drm_connector *connector;
296 struct drm_crtc *crtc; 285 struct drm_crtc *crtc;
297 286
287 drm_warn_on_modeset_not_all_locked(dev);
288
298 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 289 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
299 if (!connector->encoder) 290 if (!connector->encoder)
300 continue; 291 continue;
301 if (connector->status == connector_status_disconnected)
302 connector->encoder = NULL;
303 } 292 }
304 293
305 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 294 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
@@ -318,10 +307,27 @@ void drm_helper_disable_unused_functions(struct drm_device *dev)
318 (*crtc_funcs->disable)(crtc); 307 (*crtc_funcs->disable)(crtc);
319 else 308 else
320 (*crtc_funcs->dpms)(crtc, DRM_MODE_DPMS_OFF); 309 (*crtc_funcs->dpms)(crtc, DRM_MODE_DPMS_OFF);
321 crtc->fb = NULL; 310 crtc->primary->fb = NULL;
322 } 311 }
323 } 312 }
324} 313}
314
315/**
316 * drm_helper_disable_unused_functions - disable unused objects
317 * @dev: DRM device
318 *
319 * This function walks through the entire mode setting configuration of @dev. It
320 * will remove any crtc links of unused encoders and encoder links of
321 * disconnected connectors. Then it will disable all unused encoders and crtcs
322 * either by calling their disable callback if available or by calling their
323 * dpms callback with DRM_MODE_DPMS_OFF.
324 */
325void drm_helper_disable_unused_functions(struct drm_device *dev)
326{
327 drm_modeset_lock_all(dev);
328 __drm_helper_disable_unused_functions(dev);
329 drm_modeset_unlock_all(dev);
330}
325EXPORT_SYMBOL(drm_helper_disable_unused_functions); 331EXPORT_SYMBOL(drm_helper_disable_unused_functions);
326 332
327/* 333/*
@@ -355,9 +361,6 @@ drm_crtc_prepare_encoders(struct drm_device *dev)
355 * @y: vertical offset into the surface 361 * @y: vertical offset into the surface
356 * @old_fb: old framebuffer, for cleanup 362 * @old_fb: old framebuffer, for cleanup
357 * 363 *
358 * LOCKING:
359 * Caller must hold mode config lock.
360 *
361 * Try to set @mode on @crtc. Give @crtc and its associated connectors a chance 364 * Try to set @mode on @crtc. Give @crtc and its associated connectors a chance
362 * to fixup or reject the mode prior to trying to set it. This is an internal 365 * to fixup or reject the mode prior to trying to set it. This is an internal
363 * helper that drivers could e.g. use to update properties that require the 366 * helper that drivers could e.g. use to update properties that require the
@@ -367,8 +370,8 @@ drm_crtc_prepare_encoders(struct drm_device *dev)
367 * drm_crtc_helper_set_config() helper function to drive the mode setting 370 * drm_crtc_helper_set_config() helper function to drive the mode setting
368 * sequence. 371 * sequence.
369 * 372 *
370 * RETURNS: 373 * Returns:
371 * True if the mode was set successfully, or false otherwise. 374 * True if the mode was set successfully, false otherwise.
372 */ 375 */
373bool drm_crtc_helper_set_mode(struct drm_crtc *crtc, 376bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
374 struct drm_display_mode *mode, 377 struct drm_display_mode *mode,
@@ -384,6 +387,8 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
384 struct drm_encoder *encoder; 387 struct drm_encoder *encoder;
385 bool ret = true; 388 bool ret = true;
386 389
390 drm_warn_on_modeset_not_all_locked(dev);
391
387 saved_enabled = crtc->enabled; 392 saved_enabled = crtc->enabled;
388 crtc->enabled = drm_helper_crtc_in_use(crtc); 393 crtc->enabled = drm_helper_crtc_in_use(crtc);
389 if (!crtc->enabled) 394 if (!crtc->enabled)
@@ -552,7 +557,7 @@ drm_crtc_helper_disable(struct drm_crtc *crtc)
552 } 557 }
553 } 558 }
554 559
555 drm_helper_disable_unused_functions(dev); 560 __drm_helper_disable_unused_functions(dev);
556 return 0; 561 return 0;
557} 562}
558 563
@@ -560,17 +565,14 @@ drm_crtc_helper_disable(struct drm_crtc *crtc)
560 * drm_crtc_helper_set_config - set a new config from userspace 565 * drm_crtc_helper_set_config - set a new config from userspace
561 * @set: mode set configuration 566 * @set: mode set configuration
562 * 567 *
563 * LOCKING:
564 * Caller must hold mode config lock.
565 *
566 * Setup a new configuration, provided by the upper layers (either an ioctl call 568 * Setup a new configuration, provided by the upper layers (either an ioctl call
567 * from userspace or internally e.g. from the fbdev support code) in @set, and 569 * from userspace or internally e.g. from the fbdev support code) in @set, and
568 * enable it. This is the main helper functions for drivers that implement 570 * enable it. This is the main helper functions for drivers that implement
569 * kernel mode setting with the crtc helper functions and the assorted 571 * kernel mode setting with the crtc helper functions and the assorted
570 * ->prepare(), ->modeset() and ->commit() helper callbacks. 572 * ->prepare(), ->modeset() and ->commit() helper callbacks.
571 * 573 *
572 * RETURNS: 574 * Returns:
573 * Returns 0 on success, -ERRNO on failure. 575 * Returns 0 on success, negative errno numbers on failure.
574 */ 576 */
575int drm_crtc_helper_set_config(struct drm_mode_set *set) 577int drm_crtc_helper_set_config(struct drm_mode_set *set)
576{ 578{
@@ -612,6 +614,8 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
612 614
613 dev = set->crtc->dev; 615 dev = set->crtc->dev;
614 616
617 drm_warn_on_modeset_not_all_locked(dev);
618
615 /* 619 /*
616 * Allocate space for the backup of all (non-pointer) encoder and 620 * Allocate space for the backup of all (non-pointer) encoder and
617 * connector data. 621 * connector data.
@@ -647,19 +651,19 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
647 save_set.mode = &set->crtc->mode; 651 save_set.mode = &set->crtc->mode;
648 save_set.x = set->crtc->x; 652 save_set.x = set->crtc->x;
649 save_set.y = set->crtc->y; 653 save_set.y = set->crtc->y;
650 save_set.fb = set->crtc->fb; 654 save_set.fb = set->crtc->primary->fb;
651 655
652 /* We should be able to check here if the fb has the same properties 656 /* We should be able to check here if the fb has the same properties
653 * and then just flip_or_move it */ 657 * and then just flip_or_move it */
654 if (set->crtc->fb != set->fb) { 658 if (set->crtc->primary->fb != set->fb) {
655 /* If we have no fb then treat it as a full mode set */ 659 /* If we have no fb then treat it as a full mode set */
656 if (set->crtc->fb == NULL) { 660 if (set->crtc->primary->fb == NULL) {
657 DRM_DEBUG_KMS("crtc has no fb, full mode set\n"); 661 DRM_DEBUG_KMS("crtc has no fb, full mode set\n");
658 mode_changed = true; 662 mode_changed = true;
659 } else if (set->fb == NULL) { 663 } else if (set->fb == NULL) {
660 mode_changed = true; 664 mode_changed = true;
661 } else if (set->fb->pixel_format != 665 } else if (set->fb->pixel_format !=
662 set->crtc->fb->pixel_format) { 666 set->crtc->primary->fb->pixel_format) {
663 mode_changed = true; 667 mode_changed = true;
664 } else 668 } else
665 fb_changed = true; 669 fb_changed = true;
@@ -689,12 +693,13 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
689 if (new_encoder == NULL) 693 if (new_encoder == NULL)
690 /* don't break so fail path works correct */ 694 /* don't break so fail path works correct */
691 fail = 1; 695 fail = 1;
692 break;
693 696
694 if (connector->dpms != DRM_MODE_DPMS_ON) { 697 if (connector->dpms != DRM_MODE_DPMS_ON) {
695 DRM_DEBUG_KMS("connector dpms not on, full mode switch\n"); 698 DRM_DEBUG_KMS("connector dpms not on, full mode switch\n");
696 mode_changed = true; 699 mode_changed = true;
697 } 700 }
701
702 break;
698 } 703 }
699 } 704 }
700 705
@@ -760,13 +765,13 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
760 DRM_DEBUG_KMS("attempting to set mode from" 765 DRM_DEBUG_KMS("attempting to set mode from"
761 " userspace\n"); 766 " userspace\n");
762 drm_mode_debug_printmodeline(set->mode); 767 drm_mode_debug_printmodeline(set->mode);
763 set->crtc->fb = set->fb; 768 set->crtc->primary->fb = set->fb;
764 if (!drm_crtc_helper_set_mode(set->crtc, set->mode, 769 if (!drm_crtc_helper_set_mode(set->crtc, set->mode,
765 set->x, set->y, 770 set->x, set->y,
766 save_set.fb)) { 771 save_set.fb)) {
767 DRM_ERROR("failed to set mode on [CRTC:%d]\n", 772 DRM_ERROR("failed to set mode on [CRTC:%d]\n",
768 set->crtc->base.id); 773 set->crtc->base.id);
769 set->crtc->fb = save_set.fb; 774 set->crtc->primary->fb = save_set.fb;
770 ret = -EINVAL; 775 ret = -EINVAL;
771 goto fail; 776 goto fail;
772 } 777 }
@@ -777,17 +782,17 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
777 set->connectors[i]->funcs->dpms(set->connectors[i], DRM_MODE_DPMS_ON); 782 set->connectors[i]->funcs->dpms(set->connectors[i], DRM_MODE_DPMS_ON);
778 } 783 }
779 } 784 }
780 drm_helper_disable_unused_functions(dev); 785 __drm_helper_disable_unused_functions(dev);
781 } else if (fb_changed) { 786 } else if (fb_changed) {
782 set->crtc->x = set->x; 787 set->crtc->x = set->x;
783 set->crtc->y = set->y; 788 set->crtc->y = set->y;
784 set->crtc->fb = set->fb; 789 set->crtc->primary->fb = set->fb;
785 ret = crtc_funcs->mode_set_base(set->crtc, 790 ret = crtc_funcs->mode_set_base(set->crtc,
786 set->x, set->y, save_set.fb); 791 set->x, set->y, save_set.fb);
787 if (ret != 0) { 792 if (ret != 0) {
788 set->crtc->x = save_set.x; 793 set->crtc->x = save_set.x;
789 set->crtc->y = save_set.y; 794 set->crtc->y = save_set.y;
790 set->crtc->fb = save_set.fb; 795 set->crtc->primary->fb = save_set.fb;
791 goto fail; 796 goto fail;
792 } 797 }
793 } 798 }
@@ -924,8 +929,16 @@ void drm_helper_connector_dpms(struct drm_connector *connector, int mode)
924} 929}
925EXPORT_SYMBOL(drm_helper_connector_dpms); 930EXPORT_SYMBOL(drm_helper_connector_dpms);
926 931
927int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb, 932/**
928 struct drm_mode_fb_cmd2 *mode_cmd) 933 * drm_helper_mode_fill_fb_struct - fill out framebuffer metadata
934 * @fb: drm_framebuffer object to fill out
935 * @mode_cmd: metadata from the userspace fb creation request
936 *
937 * This helper can be used in a drivers fb_create callback to pre-fill the fb's
938 * metadata fields.
939 */
940void drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
941 struct drm_mode_fb_cmd2 *mode_cmd)
929{ 942{
930 int i; 943 int i;
931 944
@@ -938,26 +951,47 @@ int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
938 drm_fb_get_bpp_depth(mode_cmd->pixel_format, &fb->depth, 951 drm_fb_get_bpp_depth(mode_cmd->pixel_format, &fb->depth,
939 &fb->bits_per_pixel); 952 &fb->bits_per_pixel);
940 fb->pixel_format = mode_cmd->pixel_format; 953 fb->pixel_format = mode_cmd->pixel_format;
941
942 return 0;
943} 954}
944EXPORT_SYMBOL(drm_helper_mode_fill_fb_struct); 955EXPORT_SYMBOL(drm_helper_mode_fill_fb_struct);
945 956
946int drm_helper_resume_force_mode(struct drm_device *dev) 957/**
958 * drm_helper_resume_force_mode - force-restore mode setting configuration
959 * @dev: drm_device which should be restored
960 *
961 * Drivers which use the mode setting helpers can use this function to
962 * force-restore the mode setting configuration e.g. on resume or when something
963 * else might have trampled over the hw state (like some overzealous old BIOSen
964 * tended to do).
965 *
966 * This helper doesn't provide a error return value since restoring the old
967 * config should never fail due to resource allocation issues since the driver
968 * has successfully set the restored configuration already. Hence this should
969 * boil down to the equivalent of a few dpms on calls, which also don't provide
970 * an error code.
971 *
972 * Drivers where simply restoring an old configuration again might fail (e.g.
973 * due to slight differences in allocating shared resources when the
974 * configuration is restored in a different order than when userspace set it up)
975 * need to use their own restore logic.
976 */
977void drm_helper_resume_force_mode(struct drm_device *dev)
947{ 978{
948 struct drm_crtc *crtc; 979 struct drm_crtc *crtc;
949 struct drm_encoder *encoder; 980 struct drm_encoder *encoder;
950 struct drm_crtc_helper_funcs *crtc_funcs; 981 struct drm_crtc_helper_funcs *crtc_funcs;
951 int ret, encoder_dpms; 982 int encoder_dpms;
983 bool ret;
952 984
985 drm_modeset_lock_all(dev);
953 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 986 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
954 987
955 if (!crtc->enabled) 988 if (!crtc->enabled)
956 continue; 989 continue;
957 990
958 ret = drm_crtc_helper_set_mode(crtc, &crtc->mode, 991 ret = drm_crtc_helper_set_mode(crtc, &crtc->mode,
959 crtc->x, crtc->y, crtc->fb); 992 crtc->x, crtc->y, crtc->primary->fb);
960 993
994 /* Restoring the old config should never fail! */
961 if (ret == false) 995 if (ret == false)
962 DRM_ERROR("failed to set mode on crtc %p\n", crtc); 996 DRM_ERROR("failed to set mode on crtc %p\n", crtc);
963 997
@@ -980,12 +1014,29 @@ int drm_helper_resume_force_mode(struct drm_device *dev)
980 drm_helper_choose_crtc_dpms(crtc)); 1014 drm_helper_choose_crtc_dpms(crtc));
981 } 1015 }
982 } 1016 }
1017
983 /* disable the unused connectors while restoring the modesetting */ 1018 /* disable the unused connectors while restoring the modesetting */
984 drm_helper_disable_unused_functions(dev); 1019 __drm_helper_disable_unused_functions(dev);
985 return 0; 1020 drm_modeset_unlock_all(dev);
986} 1021}
987EXPORT_SYMBOL(drm_helper_resume_force_mode); 1022EXPORT_SYMBOL(drm_helper_resume_force_mode);
988 1023
1024/**
1025 * drm_kms_helper_hotplug_event - fire off KMS hotplug events
1026 * @dev: drm_device whose connector state changed
1027 *
1028 * This function fires off the uevent for userspace and also calls the
1029 * output_poll_changed function, which is most commonly used to inform the fbdev
1030 * emulation code and allow it to update the fbcon output configuration.
1031 *
1032 * Drivers should call this from their hotplug handling code when a change is
1033 * detected. Note that this function does not do any output detection of its
1034 * own, like drm_helper_hpd_irq_event() does - this is assumed to be done by the
1035 * driver already.
1036 *
1037 * This function must be called from process context with no mode
1038 * setting locks held.
1039 */
989void drm_kms_helper_hotplug_event(struct drm_device *dev) 1040void drm_kms_helper_hotplug_event(struct drm_device *dev)
990{ 1041{
991 /* send a uevent + call fbdev */ 1042 /* send a uevent + call fbdev */
@@ -1054,6 +1105,16 @@ static void output_poll_execute(struct work_struct *work)
1054 schedule_delayed_work(delayed_work, DRM_OUTPUT_POLL_PERIOD); 1105 schedule_delayed_work(delayed_work, DRM_OUTPUT_POLL_PERIOD);
1055} 1106}
1056 1107
1108/**
1109 * drm_kms_helper_poll_disable - disable output polling
1110 * @dev: drm_device
1111 *
1112 * This function disables the output polling work.
1113 *
1114 * Drivers can call this helper from their device suspend implementation. It is
1115 * not an error to call this even when output polling isn't enabled or arlready
1116 * disabled.
1117 */
1057void drm_kms_helper_poll_disable(struct drm_device *dev) 1118void drm_kms_helper_poll_disable(struct drm_device *dev)
1058{ 1119{
1059 if (!dev->mode_config.poll_enabled) 1120 if (!dev->mode_config.poll_enabled)
@@ -1062,6 +1123,16 @@ void drm_kms_helper_poll_disable(struct drm_device *dev)
1062} 1123}
1063EXPORT_SYMBOL(drm_kms_helper_poll_disable); 1124EXPORT_SYMBOL(drm_kms_helper_poll_disable);
1064 1125
1126/**
1127 * drm_kms_helper_poll_enable - re-enable output polling.
1128 * @dev: drm_device
1129 *
1130 * This function re-enables the output polling work.
1131 *
1132 * Drivers can call this helper from their device resume implementation. It is
1133 * an error to call this when the output polling support has not yet been set
1134 * up.
1135 */
1065void drm_kms_helper_poll_enable(struct drm_device *dev) 1136void drm_kms_helper_poll_enable(struct drm_device *dev)
1066{ 1137{
1067 bool poll = false; 1138 bool poll = false;
@@ -1081,6 +1152,25 @@ void drm_kms_helper_poll_enable(struct drm_device *dev)
1081} 1152}
1082EXPORT_SYMBOL(drm_kms_helper_poll_enable); 1153EXPORT_SYMBOL(drm_kms_helper_poll_enable);
1083 1154
1155/**
1156 * drm_kms_helper_poll_init - initialize and enable output polling
1157 * @dev: drm_device
1158 *
1159 * This function intializes and then also enables output polling support for
1160 * @dev. Drivers which do not have reliable hotplug support in hardware can use
1161 * this helper infrastructure to regularly poll such connectors for changes in
1162 * their connection state.
1163 *
1164 * Drivers can control which connectors are polled by setting the
1165 * DRM_CONNECTOR_POLL_CONNECT and DRM_CONNECTOR_POLL_DISCONNECT flags. On
1166 * connectors where probing live outputs can result in visual distortion drivers
1167 * should not set the DRM_CONNECTOR_POLL_DISCONNECT flag to avoid this.
1168 * Connectors which have no flag or only DRM_CONNECTOR_POLL_HPD set are
1169 * completely ignored by the polling logic.
1170 *
1171 * Note that a connector can be both polled and probed from the hotplug handler,
1172 * in case the hotplug interrupt is known to be unreliable.
1173 */
1084void drm_kms_helper_poll_init(struct drm_device *dev) 1174void drm_kms_helper_poll_init(struct drm_device *dev)
1085{ 1175{
1086 INIT_DELAYED_WORK(&dev->mode_config.output_poll_work, output_poll_execute); 1176 INIT_DELAYED_WORK(&dev->mode_config.output_poll_work, output_poll_execute);
@@ -1090,12 +1180,39 @@ void drm_kms_helper_poll_init(struct drm_device *dev)
1090} 1180}
1091EXPORT_SYMBOL(drm_kms_helper_poll_init); 1181EXPORT_SYMBOL(drm_kms_helper_poll_init);
1092 1182
1183/**
1184 * drm_kms_helper_poll_fini - disable output polling and clean it up
1185 * @dev: drm_device
1186 */
1093void drm_kms_helper_poll_fini(struct drm_device *dev) 1187void drm_kms_helper_poll_fini(struct drm_device *dev)
1094{ 1188{
1095 drm_kms_helper_poll_disable(dev); 1189 drm_kms_helper_poll_disable(dev);
1096} 1190}
1097EXPORT_SYMBOL(drm_kms_helper_poll_fini); 1191EXPORT_SYMBOL(drm_kms_helper_poll_fini);
1098 1192
1193/**
1194 * drm_helper_hpd_irq_event - hotplug processing
1195 * @dev: drm_device
1196 *
1197 * Drivers can use this helper function to run a detect cycle on all connectors
1198 * which have the DRM_CONNECTOR_POLL_HPD flag set in their &polled member. All
1199 * other connectors are ignored, which is useful to avoid reprobing fixed
1200 * panels.
1201 *
1202 * This helper function is useful for drivers which can't or don't track hotplug
1203 * interrupts for each connector.
1204 *
1205 * Drivers which support hotplug interrupts for each connector individually and
1206 * which have a more fine-grained detect logic should bypass this code and
1207 * directly call drm_kms_helper_hotplug_event() in case the connector state
1208 * changed.
1209 *
1210 * This function must be called from process context with no mode
1211 * setting locks held.
1212 *
1213 * Note that a connector can be both polled and probed from the hotplug handler,
1214 * in case the hotplug interrupt is known to be unreliable.
1215 */
1099bool drm_helper_hpd_irq_event(struct drm_device *dev) 1216bool drm_helper_hpd_irq_event(struct drm_device *dev)
1100{ 1217{
1101 struct drm_connector *connector; 1218 struct drm_connector *connector;
diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h
new file mode 100644
index 000000000000..a2945ee6d675
--- /dev/null
+++ b/drivers/gpu/drm/drm_crtc_internal.h
@@ -0,0 +1,38 @@
1/*
2 * Copyright © 2006 Keith Packard
3 * Copyright © 2007-2008 Dave Airlie
4 * Copyright © 2007-2008 Intel Corporation
5 * Jesse Barnes <jesse.barnes@intel.com>
6 * Copyright © 2014 Intel Corporation
7 * Daniel Vetter <daniel.vetter@ffwll.ch>
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice shall be included in
17 * all copies or substantial portions of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25 * OTHER DEALINGS IN THE SOFTWARE.
26 */
27
28/*
29 * This header file contains mode setting related functions and definitions
30 * which are only used within the drm module as internal implementation details
31 * and are not exported to drivers.
32 */
33
34int drm_mode_object_get(struct drm_device *dev,
35 struct drm_mode_object *obj, uint32_t obj_type);
36void drm_mode_object_put(struct drm_device *dev,
37 struct drm_mode_object *object);
38
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index 9e978aae8972..27671489477d 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -346,3 +346,399 @@ int drm_dp_bw_code_to_link_rate(u8 link_bw)
346 } 346 }
347} 347}
348EXPORT_SYMBOL(drm_dp_bw_code_to_link_rate); 348EXPORT_SYMBOL(drm_dp_bw_code_to_link_rate);
349
350/**
351 * DOC: dp helpers
352 *
353 * The DisplayPort AUX channel is an abstraction to allow generic, driver-
354 * independent access to AUX functionality. Drivers can take advantage of
355 * this by filling in the fields of the drm_dp_aux structure.
356 *
357 * Transactions are described using a hardware-independent drm_dp_aux_msg
358 * structure, which is passed into a driver's .transfer() implementation.
359 * Both native and I2C-over-AUX transactions are supported.
360 */
361
362static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
363 unsigned int offset, void *buffer, size_t size)
364{
365 struct drm_dp_aux_msg msg;
366 unsigned int retry;
367 int err;
368
369 memset(&msg, 0, sizeof(msg));
370 msg.address = offset;
371 msg.request = request;
372 msg.buffer = buffer;
373 msg.size = size;
374
375 /*
376 * The specification doesn't give any recommendation on how often to
377 * retry native transactions, so retry 7 times like for I2C-over-AUX
378 * transactions.
379 */
380 for (retry = 0; retry < 7; retry++) {
381 err = aux->transfer(aux, &msg);
382 if (err < 0) {
383 if (err == -EBUSY)
384 continue;
385
386 return err;
387 }
388
389
390 switch (msg.reply & DP_AUX_NATIVE_REPLY_MASK) {
391 case DP_AUX_NATIVE_REPLY_ACK:
392 if (err < size)
393 return -EPROTO;
394 return err;
395
396 case DP_AUX_NATIVE_REPLY_NACK:
397 return -EIO;
398
399 case DP_AUX_NATIVE_REPLY_DEFER:
400 usleep_range(400, 500);
401 break;
402 }
403 }
404
405 DRM_DEBUG_KMS("too many retries, giving up\n");
406 return -EIO;
407}
408
409/**
410 * drm_dp_dpcd_read() - read a series of bytes from the DPCD
411 * @aux: DisplayPort AUX channel
412 * @offset: address of the (first) register to read
413 * @buffer: buffer to store the register values
414 * @size: number of bytes in @buffer
415 *
416 * Returns the number of bytes transferred on success, or a negative error
417 * code on failure. -EIO is returned if the request was NAKed by the sink or
418 * if the retry count was exceeded. If not all bytes were transferred, this
419 * function returns -EPROTO. Errors from the underlying AUX channel transfer
420 * function, with the exception of -EBUSY (which causes the transaction to
421 * be retried), are propagated to the caller.
422 */
423ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset,
424 void *buffer, size_t size)
425{
426 return drm_dp_dpcd_access(aux, DP_AUX_NATIVE_READ, offset, buffer,
427 size);
428}
429EXPORT_SYMBOL(drm_dp_dpcd_read);
430
431/**
432 * drm_dp_dpcd_write() - write a series of bytes to the DPCD
433 * @aux: DisplayPort AUX channel
434 * @offset: address of the (first) register to write
435 * @buffer: buffer containing the values to write
436 * @size: number of bytes in @buffer
437 *
438 * Returns the number of bytes transferred on success, or a negative error
439 * code on failure. -EIO is returned if the request was NAKed by the sink or
440 * if the retry count was exceeded. If not all bytes were transferred, this
441 * function returns -EPROTO. Errors from the underlying AUX channel transfer
442 * function, with the exception of -EBUSY (which causes the transaction to
443 * be retried), are propagated to the caller.
444 */
445ssize_t drm_dp_dpcd_write(struct drm_dp_aux *aux, unsigned int offset,
446 void *buffer, size_t size)
447{
448 return drm_dp_dpcd_access(aux, DP_AUX_NATIVE_WRITE, offset, buffer,
449 size);
450}
451EXPORT_SYMBOL(drm_dp_dpcd_write);
452
453/**
454 * drm_dp_dpcd_read_link_status() - read DPCD link status (bytes 0x202-0x207)
455 * @aux: DisplayPort AUX channel
456 * @status: buffer to store the link status in (must be at least 6 bytes)
457 *
458 * Returns the number of bytes transferred on success or a negative error
459 * code on failure.
460 */
461int drm_dp_dpcd_read_link_status(struct drm_dp_aux *aux,
462 u8 status[DP_LINK_STATUS_SIZE])
463{
464 return drm_dp_dpcd_read(aux, DP_LANE0_1_STATUS, status,
465 DP_LINK_STATUS_SIZE);
466}
467EXPORT_SYMBOL(drm_dp_dpcd_read_link_status);
468
469/**
470 * drm_dp_link_probe() - probe a DisplayPort link for capabilities
471 * @aux: DisplayPort AUX channel
472 * @link: pointer to structure in which to return link capabilities
473 *
474 * The structure filled in by this function can usually be passed directly
475 * into drm_dp_link_power_up() and drm_dp_link_configure() to power up and
476 * configure the link based on the link's capabilities.
477 *
478 * Returns 0 on success or a negative error code on failure.
479 */
480int drm_dp_link_probe(struct drm_dp_aux *aux, struct drm_dp_link *link)
481{
482 u8 values[3];
483 int err;
484
485 memset(link, 0, sizeof(*link));
486
487 err = drm_dp_dpcd_read(aux, DP_DPCD_REV, values, sizeof(values));
488 if (err < 0)
489 return err;
490
491 link->revision = values[0];
492 link->rate = drm_dp_bw_code_to_link_rate(values[1]);
493 link->num_lanes = values[2] & DP_MAX_LANE_COUNT_MASK;
494
495 if (values[2] & DP_ENHANCED_FRAME_CAP)
496 link->capabilities |= DP_LINK_CAP_ENHANCED_FRAMING;
497
498 return 0;
499}
500EXPORT_SYMBOL(drm_dp_link_probe);
501
502/**
503 * drm_dp_link_power_up() - power up a DisplayPort link
504 * @aux: DisplayPort AUX channel
505 * @link: pointer to a structure containing the link configuration
506 *
507 * Returns 0 on success or a negative error code on failure.
508 */
509int drm_dp_link_power_up(struct drm_dp_aux *aux, struct drm_dp_link *link)
510{
511 u8 value;
512 int err;
513
514 /* DP_SET_POWER register is only available on DPCD v1.1 and later */
515 if (link->revision < 0x11)
516 return 0;
517
518 err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
519 if (err < 0)
520 return err;
521
522 value &= ~DP_SET_POWER_MASK;
523 value |= DP_SET_POWER_D0;
524
525 err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
526 if (err < 0)
527 return err;
528
529 /*
530 * According to the DP 1.1 specification, a "Sink Device must exit the
531 * power saving state within 1 ms" (Section 2.5.3.1, Table 5-52, "Sink
532 * Control Field" (register 0x600).
533 */
534 usleep_range(1000, 2000);
535
536 return 0;
537}
538EXPORT_SYMBOL(drm_dp_link_power_up);
539
540/**
541 * drm_dp_link_configure() - configure a DisplayPort link
542 * @aux: DisplayPort AUX channel
543 * @link: pointer to a structure containing the link configuration
544 *
545 * Returns 0 on success or a negative error code on failure.
546 */
547int drm_dp_link_configure(struct drm_dp_aux *aux, struct drm_dp_link *link)
548{
549 u8 values[2];
550 int err;
551
552 values[0] = drm_dp_link_rate_to_bw_code(link->rate);
553 values[1] = link->num_lanes;
554
555 if (link->capabilities & DP_LINK_CAP_ENHANCED_FRAMING)
556 values[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
557
558 err = drm_dp_dpcd_write(aux, DP_LINK_BW_SET, values, sizeof(values));
559 if (err < 0)
560 return err;
561
562 return 0;
563}
564EXPORT_SYMBOL(drm_dp_link_configure);
565
566/*
567 * I2C-over-AUX implementation
568 */
569
570static u32 drm_dp_i2c_functionality(struct i2c_adapter *adapter)
571{
572 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
573 I2C_FUNC_SMBUS_READ_BLOCK_DATA |
574 I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
575 I2C_FUNC_10BIT_ADDR;
576}
577
578/*
579 * Transfer a single I2C-over-AUX message and handle various error conditions,
580 * retrying the transaction as appropriate.
581 */
582static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
583{
584 unsigned int retry;
585 int err;
586
587 /*
588 * DP1.2 sections 2.7.7.1.5.6.1 and 2.7.7.1.6.6.1: A DP Source device
589 * is required to retry at least seven times upon receiving AUX_DEFER
590 * before giving up the AUX transaction.
591 */
592 for (retry = 0; retry < 7; retry++) {
593 err = aux->transfer(aux, msg);
594 if (err < 0) {
595 if (err == -EBUSY)
596 continue;
597
598 DRM_DEBUG_KMS("transaction failed: %d\n", err);
599 return err;
600 }
601
602
603 switch (msg->reply & DP_AUX_NATIVE_REPLY_MASK) {
604 case DP_AUX_NATIVE_REPLY_ACK:
605 /*
606 * For I2C-over-AUX transactions this isn't enough, we
607 * need to check for the I2C ACK reply.
608 */
609 break;
610
611 case DP_AUX_NATIVE_REPLY_NACK:
612 DRM_DEBUG_KMS("native nack\n");
613 return -EREMOTEIO;
614
615 case DP_AUX_NATIVE_REPLY_DEFER:
616 DRM_DEBUG_KMS("native defer");
617 /*
618 * We could check for I2C bit rate capabilities and if
619 * available adjust this interval. We could also be
620 * more careful with DP-to-legacy adapters where a
621 * long legacy cable may force very low I2C bit rates.
622 *
623 * For now just defer for long enough to hopefully be
624 * safe for all use-cases.
625 */
626 usleep_range(500, 600);
627 continue;
628
629 default:
630 DRM_ERROR("invalid native reply %#04x\n", msg->reply);
631 return -EREMOTEIO;
632 }
633
634 switch (msg->reply & DP_AUX_I2C_REPLY_MASK) {
635 case DP_AUX_I2C_REPLY_ACK:
636 /*
637 * Both native ACK and I2C ACK replies received. We
638 * can assume the transfer was successful.
639 */
640 if (err < msg->size)
641 return -EPROTO;
642 return 0;
643
644 case DP_AUX_I2C_REPLY_NACK:
645 DRM_DEBUG_KMS("I2C nack\n");
646 return -EREMOTEIO;
647
648 case DP_AUX_I2C_REPLY_DEFER:
649 DRM_DEBUG_KMS("I2C defer\n");
650 usleep_range(400, 500);
651 continue;
652
653 default:
654 DRM_ERROR("invalid I2C reply %#04x\n", msg->reply);
655 return -EREMOTEIO;
656 }
657 }
658
659 DRM_DEBUG_KMS("too many retries, giving up\n");
660 return -EREMOTEIO;
661}
662
663static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
664 int num)
665{
666 struct drm_dp_aux *aux = adapter->algo_data;
667 unsigned int i, j;
668
669 for (i = 0; i < num; i++) {
670 struct drm_dp_aux_msg msg;
671 int err;
672
673 /*
674 * Many hardware implementations support FIFOs larger than a
675 * single byte, but it has been empirically determined that
676 * transferring data in larger chunks can actually lead to
677 * decreased performance. Therefore each message is simply
678 * transferred byte-by-byte.
679 */
680 for (j = 0; j < msgs[i].len; j++) {
681 memset(&msg, 0, sizeof(msg));
682 msg.address = msgs[i].addr;
683
684 msg.request = (msgs[i].flags & I2C_M_RD) ?
685 DP_AUX_I2C_READ :
686 DP_AUX_I2C_WRITE;
687
688 /*
689 * All messages except the last one are middle-of-
690 * transfer messages.
691 */
692 if ((i < num - 1) || (j < msgs[i].len - 1))
693 msg.request |= DP_AUX_I2C_MOT;
694
695 msg.buffer = msgs[i].buf + j;
696 msg.size = 1;
697
698 err = drm_dp_i2c_do_msg(aux, &msg);
699 if (err < 0)
700 return err;
701 }
702 }
703
704 return num;
705}
706
707static const struct i2c_algorithm drm_dp_i2c_algo = {
708 .functionality = drm_dp_i2c_functionality,
709 .master_xfer = drm_dp_i2c_xfer,
710};
711
712/**
713 * drm_dp_aux_register_i2c_bus() - register an I2C adapter for I2C-over-AUX
714 * @aux: DisplayPort AUX channel
715 *
716 * Returns 0 on success or a negative error code on failure.
717 */
718int drm_dp_aux_register_i2c_bus(struct drm_dp_aux *aux)
719{
720 aux->ddc.algo = &drm_dp_i2c_algo;
721 aux->ddc.algo_data = aux;
722 aux->ddc.retries = 3;
723
724 aux->ddc.class = I2C_CLASS_DDC;
725 aux->ddc.owner = THIS_MODULE;
726 aux->ddc.dev.parent = aux->dev;
727 aux->ddc.dev.of_node = aux->dev->of_node;
728
729 strlcpy(aux->ddc.name, aux->name ? aux->name : dev_name(aux->dev),
730 sizeof(aux->ddc.name));
731
732 return i2c_add_adapter(&aux->ddc);
733}
734EXPORT_SYMBOL(drm_dp_aux_register_i2c_bus);
735
736/**
737 * drm_dp_aux_unregister_i2c_bus() - unregister an I2C-over-AUX adapter
738 * @aux: DisplayPort AUX channel
739 */
740void drm_dp_aux_unregister_i2c_bus(struct drm_dp_aux *aux)
741{
742 i2c_del_adapter(&aux->ddc);
743}
744EXPORT_SYMBOL(drm_dp_aux_unregister_i2c_bus);
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 345be03c23db..03711d00aaae 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -286,6 +286,45 @@ static int drm_version(struct drm_device *dev, void *data,
286} 286}
287 287
288/** 288/**
289 * drm_ioctl_permit - Check ioctl permissions against caller
290 *
291 * @flags: ioctl permission flags.
292 * @file_priv: Pointer to struct drm_file identifying the caller.
293 *
294 * Checks whether the caller is allowed to run an ioctl with the
295 * indicated permissions. If so, returns zero. Otherwise returns an
296 * error code suitable for ioctl return.
297 */
298static int drm_ioctl_permit(u32 flags, struct drm_file *file_priv)
299{
300 /* ROOT_ONLY is only for CAP_SYS_ADMIN */
301 if (unlikely((flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN)))
302 return -EACCES;
303
304 /* AUTH is only for authenticated or render client */
305 if (unlikely((flags & DRM_AUTH) && !drm_is_render_client(file_priv) &&
306 !file_priv->authenticated))
307 return -EACCES;
308
309 /* MASTER is only for master or control clients */
310 if (unlikely((flags & DRM_MASTER) && !file_priv->is_master &&
311 !drm_is_control_client(file_priv)))
312 return -EACCES;
313
314 /* Control clients must be explicitly allowed */
315 if (unlikely(!(flags & DRM_CONTROL_ALLOW) &&
316 drm_is_control_client(file_priv)))
317 return -EACCES;
318
319 /* Render clients must be explicitly allowed */
320 if (unlikely(!(flags & DRM_RENDER_ALLOW) &&
321 drm_is_render_client(file_priv)))
322 return -EACCES;
323
324 return 0;
325}
326
327/**
289 * Called whenever a process performs an ioctl on /dev/drm. 328 * Called whenever a process performs an ioctl on /dev/drm.
290 * 329 *
291 * \param inode device inode. 330 * \param inode device inode.
@@ -344,65 +383,64 @@ long drm_ioctl(struct file *filp,
344 383
345 DRM_DEBUG("pid=%d, dev=0x%lx, auth=%d, %s\n", 384 DRM_DEBUG("pid=%d, dev=0x%lx, auth=%d, %s\n",
346 task_pid_nr(current), 385 task_pid_nr(current),
347 (long)old_encode_dev(file_priv->minor->device), 386 (long)old_encode_dev(file_priv->minor->kdev->devt),
348 file_priv->authenticated, ioctl->name); 387 file_priv->authenticated, ioctl->name);
349 388
350 /* Do not trust userspace, use our own definition */ 389 /* Do not trust userspace, use our own definition */
351 func = ioctl->func; 390 func = ioctl->func;
352 391
353 if (!func) { 392 if (unlikely(!func)) {
354 DRM_DEBUG("no function\n"); 393 DRM_DEBUG("no function\n");
355 retcode = -EINVAL; 394 retcode = -EINVAL;
356 } else if (((ioctl->flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN)) || 395 goto err_i1;
357 ((ioctl->flags & DRM_AUTH) && !drm_is_render_client(file_priv) && !file_priv->authenticated) || 396 }
358 ((ioctl->flags & DRM_MASTER) && !file_priv->is_master) ||
359 (!(ioctl->flags & DRM_CONTROL_ALLOW) && (file_priv->minor->type == DRM_MINOR_CONTROL)) ||
360 (!(ioctl->flags & DRM_RENDER_ALLOW) && drm_is_render_client(file_priv))) {
361 retcode = -EACCES;
362 } else {
363 if (cmd & (IOC_IN | IOC_OUT)) {
364 if (asize <= sizeof(stack_kdata)) {
365 kdata = stack_kdata;
366 } else {
367 kdata = kmalloc(asize, GFP_KERNEL);
368 if (!kdata) {
369 retcode = -ENOMEM;
370 goto err_i1;
371 }
372 }
373 if (asize > usize)
374 memset(kdata + usize, 0, asize - usize);
375 }
376 397
377 if (cmd & IOC_IN) { 398 retcode = drm_ioctl_permit(ioctl->flags, file_priv);
378 if (copy_from_user(kdata, (void __user *)arg, 399 if (unlikely(retcode))
379 usize) != 0) { 400 goto err_i1;
380 retcode = -EFAULT; 401
402 if (cmd & (IOC_IN | IOC_OUT)) {
403 if (asize <= sizeof(stack_kdata)) {
404 kdata = stack_kdata;
405 } else {
406 kdata = kmalloc(asize, GFP_KERNEL);
407 if (!kdata) {
408 retcode = -ENOMEM;
381 goto err_i1; 409 goto err_i1;
382 } 410 }
383 } else
384 memset(kdata, 0, usize);
385
386 if (ioctl->flags & DRM_UNLOCKED)
387 retcode = func(dev, kdata, file_priv);
388 else {
389 mutex_lock(&drm_global_mutex);
390 retcode = func(dev, kdata, file_priv);
391 mutex_unlock(&drm_global_mutex);
392 } 411 }
412 if (asize > usize)
413 memset(kdata + usize, 0, asize - usize);
414 }
393 415
394 if (cmd & IOC_OUT) { 416 if (cmd & IOC_IN) {
395 if (copy_to_user((void __user *)arg, kdata, 417 if (copy_from_user(kdata, (void __user *)arg,
396 usize) != 0) 418 usize) != 0) {
397 retcode = -EFAULT; 419 retcode = -EFAULT;
420 goto err_i1;
398 } 421 }
422 } else
423 memset(kdata, 0, usize);
424
425 if (ioctl->flags & DRM_UNLOCKED)
426 retcode = func(dev, kdata, file_priv);
427 else {
428 mutex_lock(&drm_global_mutex);
429 retcode = func(dev, kdata, file_priv);
430 mutex_unlock(&drm_global_mutex);
431 }
432
433 if (cmd & IOC_OUT) {
434 if (copy_to_user((void __user *)arg, kdata,
435 usize) != 0)
436 retcode = -EFAULT;
399 } 437 }
400 438
401 err_i1: 439 err_i1:
402 if (!ioctl) 440 if (!ioctl)
403 DRM_DEBUG("invalid ioctl: pid=%d, dev=0x%lx, auth=%d, cmd=0x%02x, nr=0x%02x\n", 441 DRM_DEBUG("invalid ioctl: pid=%d, dev=0x%lx, auth=%d, cmd=0x%02x, nr=0x%02x\n",
404 task_pid_nr(current), 442 task_pid_nr(current),
405 (long)old_encode_dev(file_priv->minor->device), 443 (long)old_encode_dev(file_priv->minor->kdev->devt),
406 file_priv->authenticated, cmd, nr); 444 file_priv->authenticated, cmd, nr);
407 445
408 if (kdata != stack_kdata) 446 if (kdata != stack_kdata)
@@ -412,3 +450,21 @@ long drm_ioctl(struct file *filp,
412 return retcode; 450 return retcode;
413} 451}
414EXPORT_SYMBOL(drm_ioctl); 452EXPORT_SYMBOL(drm_ioctl);
453
454/**
455 * drm_ioctl_flags - Check for core ioctl and return ioctl permission flags
456 *
457 * @nr: Ioctl number.
458 * @flags: Where to return the ioctl permission flags
459 */
460bool drm_ioctl_flags(unsigned int nr, unsigned int *flags)
461{
462 if ((nr >= DRM_COMMAND_END && nr < DRM_CORE_IOCTL_COUNT) ||
463 (nr < DRM_COMMAND_BASE)) {
464 *flags = drm_ioctls[nr].flags;
465 return true;
466 }
467
468 return false;
469}
470EXPORT_SYMBOL(drm_ioctl_flags);
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index b924306b8477..d4e3f9d9370f 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -1098,10 +1098,14 @@ EXPORT_SYMBOL(drm_edid_is_valid);
1098/** 1098/**
1099 * Get EDID information via I2C. 1099 * Get EDID information via I2C.
1100 * 1100 *
1101 * \param adapter : i2c device adaptor 1101 * @adapter : i2c device adaptor
1102 * \param buf : EDID data buffer to be filled 1102 * @buf: EDID data buffer to be filled
1103 * \param len : EDID data buffer length 1103 * @block: 128 byte EDID block to start fetching from
1104 * \return 0 on success or -1 on failure. 1104 * @len: EDID data buffer length to fetch
1105 *
1106 * Returns:
1107 *
1108 * 0 on success or -1 on failure.
1105 * 1109 *
1106 * Try to fetch EDID information by calling i2c driver function. 1110 * Try to fetch EDID information by calling i2c driver function.
1107 */ 1111 */
@@ -1243,9 +1247,11 @@ out:
1243 1247
1244/** 1248/**
1245 * Probe DDC presence. 1249 * Probe DDC presence.
1250 * @adapter: i2c adapter to probe
1251 *
1252 * Returns:
1246 * 1253 *
1247 * \param adapter : i2c device adaptor 1254 * 1 on success
1248 * \return 1 on success
1249 */ 1255 */
1250bool 1256bool
1251drm_probe_ddc(struct i2c_adapter *adapter) 1257drm_probe_ddc(struct i2c_adapter *adapter)
@@ -1586,8 +1592,10 @@ bad_std_timing(u8 a, u8 b)
1586 1592
1587/** 1593/**
1588 * drm_mode_std - convert standard mode info (width, height, refresh) into mode 1594 * drm_mode_std - convert standard mode info (width, height, refresh) into mode
1595 * @connector: connector of for the EDID block
1596 * @edid: EDID block to scan
1589 * @t: standard timing params 1597 * @t: standard timing params
1590 * @timing_level: standard timing level 1598 * @revision: standard timing level
1591 * 1599 *
1592 * Take the standard timing params (in this case width, aspect, and refresh) 1600 * Take the standard timing params (in this case width, aspect, and refresh)
1593 * and convert them into a real mode using CVT/GTF/DMT. 1601 * and convert them into a real mode using CVT/GTF/DMT.
@@ -2132,6 +2140,7 @@ do_established_modes(struct detailed_timing *timing, void *c)
2132 2140
2133/** 2141/**
2134 * add_established_modes - get est. modes from EDID and add them 2142 * add_established_modes - get est. modes from EDID and add them
2143 * @connector: connector of for the EDID block
2135 * @edid: EDID block to scan 2144 * @edid: EDID block to scan
2136 * 2145 *
2137 * Each EDID block contains a bitmap of the supported "established modes" list 2146 * Each EDID block contains a bitmap of the supported "established modes" list
@@ -2194,6 +2203,7 @@ do_standard_modes(struct detailed_timing *timing, void *c)
2194 2203
2195/** 2204/**
2196 * add_standard_modes - get std. modes from EDID and add them 2205 * add_standard_modes - get std. modes from EDID and add them
2206 * @connector: connector of for the EDID block
2197 * @edid: EDID block to scan 2207 * @edid: EDID block to scan
2198 * 2208 *
2199 * Standard modes can be calculated using the appropriate standard (DMT, 2209 * Standard modes can be calculated using the appropriate standard (DMT,
@@ -2580,6 +2590,9 @@ drm_display_mode_from_vic_index(struct drm_connector *connector,
2580 return NULL; 2590 return NULL;
2581 2591
2582 newmode = drm_mode_duplicate(dev, &edid_cea_modes[cea_mode]); 2592 newmode = drm_mode_duplicate(dev, &edid_cea_modes[cea_mode]);
2593 if (!newmode)
2594 return NULL;
2595
2583 newmode->vrefresh = 0; 2596 newmode->vrefresh = 0;
2584 2597
2585 return newmode; 2598 return newmode;
@@ -3300,6 +3313,7 @@ EXPORT_SYMBOL(drm_detect_hdmi_monitor);
3300 3313
3301/** 3314/**
3302 * drm_detect_monitor_audio - check monitor audio capability 3315 * drm_detect_monitor_audio - check monitor audio capability
3316 * @edid: EDID block to scan
3303 * 3317 *
3304 * Monitor should have CEA extension block. 3318 * Monitor should have CEA extension block.
3305 * If monitor has 'basic audio', but no CEA audio blocks, it's 'basic 3319 * If monitor has 'basic audio', but no CEA audio blocks, it's 'basic
@@ -3345,6 +3359,7 @@ EXPORT_SYMBOL(drm_detect_monitor_audio);
3345 3359
3346/** 3360/**
3347 * drm_rgb_quant_range_selectable - is RGB quantization range selectable? 3361 * drm_rgb_quant_range_selectable - is RGB quantization range selectable?
3362 * @edid: EDID block to scan
3348 * 3363 *
3349 * Check whether the monitor reports the RGB quantization range selection 3364 * Check whether the monitor reports the RGB quantization range selection
3350 * as supported. The AVI infoframe can then be used to inform the monitor 3365 * as supported. The AVI infoframe can then be used to inform the monitor
@@ -3564,8 +3579,8 @@ void drm_set_preferred_mode(struct drm_connector *connector,
3564 struct drm_display_mode *mode; 3579 struct drm_display_mode *mode;
3565 3580
3566 list_for_each_entry(mode, &connector->probed_modes, head) { 3581 list_for_each_entry(mode, &connector->probed_modes, head) {
3567 if (drm_mode_width(mode) == hpref && 3582 if (mode->hdisplay == hpref &&
3568 drm_mode_height(mode) == vpref) 3583 mode->vdisplay == vpref)
3569 mode->type |= DRM_MODE_TYPE_PREFERRED; 3584 mode->type |= DRM_MODE_TYPE_PREFERRED;
3570 } 3585 }
3571} 3586}
@@ -3599,6 +3614,7 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
3599 3614
3600 frame->picture_aspect = HDMI_PICTURE_ASPECT_NONE; 3615 frame->picture_aspect = HDMI_PICTURE_ASPECT_NONE;
3601 frame->active_aspect = HDMI_ACTIVE_ASPECT_PICTURE; 3616 frame->active_aspect = HDMI_ACTIVE_ASPECT_PICTURE;
3617 frame->scan_mode = HDMI_SCAN_MODE_UNDERSCAN;
3602 3618
3603 return 0; 3619 return 0;
3604} 3620}
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 98a03639b413..04d3fd3658f3 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -232,7 +232,7 @@ static struct drm_framebuffer *drm_mode_config_fb(struct drm_crtc *crtc)
232 232
233 list_for_each_entry(c, &dev->mode_config.crtc_list, head) { 233 list_for_each_entry(c, &dev->mode_config.crtc_list, head) {
234 if (crtc->base.id == c->base.id) 234 if (crtc->base.id == c->base.id)
235 return c->fb; 235 return c->primary->fb;
236 } 236 }
237 237
238 return NULL; 238 return NULL;
@@ -291,7 +291,8 @@ bool drm_fb_helper_restore_fbdev_mode(struct drm_fb_helper *fb_helper)
291 drm_warn_on_modeset_not_all_locked(dev); 291 drm_warn_on_modeset_not_all_locked(dev);
292 292
293 list_for_each_entry(plane, &dev->mode_config.plane_list, head) 293 list_for_each_entry(plane, &dev->mode_config.plane_list, head)
294 drm_plane_force_disable(plane); 294 if (plane->type != DRM_PLANE_TYPE_PRIMARY)
295 drm_plane_force_disable(plane);
295 296
296 for (i = 0; i < fb_helper->crtc_count; i++) { 297 for (i = 0; i < fb_helper->crtc_count; i++) {
297 struct drm_mode_set *mode_set = &fb_helper->crtc_info[i].mode_set; 298 struct drm_mode_set *mode_set = &fb_helper->crtc_info[i].mode_set;
@@ -365,9 +366,9 @@ static bool drm_fb_helper_is_bound(struct drm_fb_helper *fb_helper)
365 return false; 366 return false;
366 367
367 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 368 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
368 if (crtc->fb) 369 if (crtc->primary->fb)
369 crtcs_bound++; 370 crtcs_bound++;
370 if (crtc->fb == fb_helper->fb) 371 if (crtc->primary->fb == fb_helper->fb)
371 bound++; 372 bound++;
372 } 373 }
373 374
@@ -516,6 +517,9 @@ int drm_fb_helper_init(struct drm_device *dev,
516 struct drm_crtc *crtc; 517 struct drm_crtc *crtc;
517 int i; 518 int i;
518 519
520 if (!max_conn_count)
521 return -EINVAL;
522
519 fb_helper->dev = dev; 523 fb_helper->dev = dev;
520 524
521 INIT_LIST_HEAD(&fb_helper->kernel_fb_list); 525 INIT_LIST_HEAD(&fb_helper->kernel_fb_list);
@@ -809,8 +813,6 @@ int drm_fb_helper_set_par(struct fb_info *info)
809 struct drm_fb_helper *fb_helper = info->par; 813 struct drm_fb_helper *fb_helper = info->par;
810 struct drm_device *dev = fb_helper->dev; 814 struct drm_device *dev = fb_helper->dev;
811 struct fb_var_screeninfo *var = &info->var; 815 struct fb_var_screeninfo *var = &info->var;
812 int ret;
813 int i;
814 816
815 if (var->pixclock != 0) { 817 if (var->pixclock != 0) {
816 DRM_ERROR("PIXEL CLOCK SET\n"); 818 DRM_ERROR("PIXEL CLOCK SET\n");
@@ -818,13 +820,7 @@ int drm_fb_helper_set_par(struct fb_info *info)
818 } 820 }
819 821
820 drm_modeset_lock_all(dev); 822 drm_modeset_lock_all(dev);
821 for (i = 0; i < fb_helper->crtc_count; i++) { 823 drm_fb_helper_restore_fbdev_mode(fb_helper);
822 ret = drm_mode_set_config_internal(&fb_helper->crtc_info[i].mode_set);
823 if (ret) {
824 drm_modeset_unlock_all(dev);
825 return ret;
826 }
827 }
828 drm_modeset_unlock_all(dev); 824 drm_modeset_unlock_all(dev);
829 825
830 if (fb_helper->delayed_hotplug) { 826 if (fb_helper->delayed_hotplug) {
@@ -1136,19 +1132,20 @@ static int drm_fb_helper_probe_connector_modes(struct drm_fb_helper *fb_helper,
1136 return count; 1132 return count;
1137} 1133}
1138 1134
1139static struct drm_display_mode *drm_has_preferred_mode(struct drm_fb_helper_connector *fb_connector, int width, int height) 1135struct drm_display_mode *drm_has_preferred_mode(struct drm_fb_helper_connector *fb_connector, int width, int height)
1140{ 1136{
1141 struct drm_display_mode *mode; 1137 struct drm_display_mode *mode;
1142 1138
1143 list_for_each_entry(mode, &fb_connector->connector->modes, head) { 1139 list_for_each_entry(mode, &fb_connector->connector->modes, head) {
1144 if (drm_mode_width(mode) > width || 1140 if (mode->hdisplay > width ||
1145 drm_mode_height(mode) > height) 1141 mode->vdisplay > height)
1146 continue; 1142 continue;
1147 if (mode->type & DRM_MODE_TYPE_PREFERRED) 1143 if (mode->type & DRM_MODE_TYPE_PREFERRED)
1148 return mode; 1144 return mode;
1149 } 1145 }
1150 return NULL; 1146 return NULL;
1151} 1147}
1148EXPORT_SYMBOL(drm_has_preferred_mode);
1152 1149
1153static bool drm_has_cmdline_mode(struct drm_fb_helper_connector *fb_connector) 1150static bool drm_has_cmdline_mode(struct drm_fb_helper_connector *fb_connector)
1154{ 1151{
@@ -1157,11 +1154,12 @@ static bool drm_has_cmdline_mode(struct drm_fb_helper_connector *fb_connector)
1157 return cmdline_mode->specified; 1154 return cmdline_mode->specified;
1158} 1155}
1159 1156
1160static struct drm_display_mode *drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn, 1157struct drm_display_mode *drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn,
1161 int width, int height) 1158 int width, int height)
1162{ 1159{
1163 struct drm_cmdline_mode *cmdline_mode; 1160 struct drm_cmdline_mode *cmdline_mode;
1164 struct drm_display_mode *mode = NULL; 1161 struct drm_display_mode *mode = NULL;
1162 bool prefer_non_interlace;
1165 1163
1166 cmdline_mode = &fb_helper_conn->cmdline_mode; 1164 cmdline_mode = &fb_helper_conn->cmdline_mode;
1167 if (cmdline_mode->specified == false) 1165 if (cmdline_mode->specified == false)
@@ -1173,6 +1171,8 @@ static struct drm_display_mode *drm_pick_cmdline_mode(struct drm_fb_helper_conne
1173 if (cmdline_mode->rb || cmdline_mode->margins) 1171 if (cmdline_mode->rb || cmdline_mode->margins)
1174 goto create_mode; 1172 goto create_mode;
1175 1173
1174 prefer_non_interlace = !cmdline_mode->interlace;
1175 again:
1176 list_for_each_entry(mode, &fb_helper_conn->connector->modes, head) { 1176 list_for_each_entry(mode, &fb_helper_conn->connector->modes, head) {
1177 /* check width/height */ 1177 /* check width/height */
1178 if (mode->hdisplay != cmdline_mode->xres || 1178 if (mode->hdisplay != cmdline_mode->xres ||
@@ -1187,16 +1187,25 @@ static struct drm_display_mode *drm_pick_cmdline_mode(struct drm_fb_helper_conne
1187 if (cmdline_mode->interlace) { 1187 if (cmdline_mode->interlace) {
1188 if (!(mode->flags & DRM_MODE_FLAG_INTERLACE)) 1188 if (!(mode->flags & DRM_MODE_FLAG_INTERLACE))
1189 continue; 1189 continue;
1190 } else if (prefer_non_interlace) {
1191 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1192 continue;
1190 } 1193 }
1191 return mode; 1194 return mode;
1192 } 1195 }
1193 1196
1197 if (prefer_non_interlace) {
1198 prefer_non_interlace = false;
1199 goto again;
1200 }
1201
1194create_mode: 1202create_mode:
1195 mode = drm_mode_create_from_cmdline_mode(fb_helper_conn->connector->dev, 1203 mode = drm_mode_create_from_cmdline_mode(fb_helper_conn->connector->dev,
1196 cmdline_mode); 1204 cmdline_mode);
1197 list_add(&mode->head, &fb_helper_conn->connector->modes); 1205 list_add(&mode->head, &fb_helper_conn->connector->modes);
1198 return mode; 1206 return mode;
1199} 1207}
1208EXPORT_SYMBOL(drm_pick_cmdline_mode);
1200 1209
1201static bool drm_connector_enabled(struct drm_connector *connector, bool strict) 1210static bool drm_connector_enabled(struct drm_connector *connector, bool strict)
1202{ 1211{
@@ -1539,9 +1548,11 @@ bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel)
1539 1548
1540 drm_fb_helper_parse_command_line(fb_helper); 1549 drm_fb_helper_parse_command_line(fb_helper);
1541 1550
1551 mutex_lock(&dev->mode_config.mutex);
1542 count = drm_fb_helper_probe_connector_modes(fb_helper, 1552 count = drm_fb_helper_probe_connector_modes(fb_helper,
1543 dev->mode_config.max_width, 1553 dev->mode_config.max_width,
1544 dev->mode_config.max_height); 1554 dev->mode_config.max_height);
1555 mutex_unlock(&dev->mode_config.mutex);
1545 /* 1556 /*
1546 * we shouldn't end up with no modes here. 1557 * we shouldn't end up with no modes here.
1547 */ 1558 */
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 309023f12d7f..e1eba0b7cd45 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -39,12 +39,12 @@
39#include <linux/slab.h> 39#include <linux/slab.h>
40#include <linux/module.h> 40#include <linux/module.h>
41 41
42/* from BKL pushdown: note that nothing else serializes idr_find() */ 42/* from BKL pushdown */
43DEFINE_MUTEX(drm_global_mutex); 43DEFINE_MUTEX(drm_global_mutex);
44EXPORT_SYMBOL(drm_global_mutex); 44EXPORT_SYMBOL(drm_global_mutex);
45 45
46static int drm_open_helper(struct inode *inode, struct file *filp, 46static int drm_open_helper(struct inode *inode, struct file *filp,
47 struct drm_device * dev); 47 struct drm_minor *minor);
48 48
49static int drm_setup(struct drm_device * dev) 49static int drm_setup(struct drm_device * dev)
50{ 50{
@@ -79,38 +79,23 @@ static int drm_setup(struct drm_device * dev)
79 */ 79 */
80int drm_open(struct inode *inode, struct file *filp) 80int drm_open(struct inode *inode, struct file *filp)
81{ 81{
82 struct drm_device *dev = NULL; 82 struct drm_device *dev;
83 int minor_id = iminor(inode);
84 struct drm_minor *minor; 83 struct drm_minor *minor;
85 int retcode = 0; 84 int retcode;
86 int need_setup = 0; 85 int need_setup = 0;
87 struct address_space *old_mapping;
88 struct address_space *old_imapping;
89
90 minor = idr_find(&drm_minors_idr, minor_id);
91 if (!minor)
92 return -ENODEV;
93 86
94 if (!(dev = minor->dev)) 87 minor = drm_minor_acquire(iminor(inode));
95 return -ENODEV; 88 if (IS_ERR(minor))
96 89 return PTR_ERR(minor);
97 if (drm_device_is_unplugged(dev))
98 return -ENODEV;
99 90
91 dev = minor->dev;
100 if (!dev->open_count++) 92 if (!dev->open_count++)
101 need_setup = 1; 93 need_setup = 1;
102 mutex_lock(&dev->struct_mutex);
103 old_imapping = inode->i_mapping;
104 old_mapping = dev->dev_mapping;
105 if (old_mapping == NULL)
106 dev->dev_mapping = &inode->i_data;
107 /* ihold ensures nobody can remove inode with our i_data */
108 ihold(container_of(dev->dev_mapping, struct inode, i_data));
109 inode->i_mapping = dev->dev_mapping;
110 filp->f_mapping = dev->dev_mapping;
111 mutex_unlock(&dev->struct_mutex);
112 94
113 retcode = drm_open_helper(inode, filp, dev); 95 /* share address_space across all char-devs of a single device */
96 filp->f_mapping = dev->anon_inode->i_mapping;
97
98 retcode = drm_open_helper(inode, filp, minor);
114 if (retcode) 99 if (retcode)
115 goto err_undo; 100 goto err_undo;
116 if (need_setup) { 101 if (need_setup) {
@@ -121,13 +106,8 @@ int drm_open(struct inode *inode, struct file *filp)
121 return 0; 106 return 0;
122 107
123err_undo: 108err_undo:
124 mutex_lock(&dev->struct_mutex);
125 filp->f_mapping = old_imapping;
126 inode->i_mapping = old_imapping;
127 iput(container_of(dev->dev_mapping, struct inode, i_data));
128 dev->dev_mapping = old_mapping;
129 mutex_unlock(&dev->struct_mutex);
130 dev->open_count--; 109 dev->open_count--;
110 drm_minor_release(minor);
131 return retcode; 111 return retcode;
132} 112}
133EXPORT_SYMBOL(drm_open); 113EXPORT_SYMBOL(drm_open);
@@ -143,33 +123,30 @@ EXPORT_SYMBOL(drm_open);
143 */ 123 */
144int drm_stub_open(struct inode *inode, struct file *filp) 124int drm_stub_open(struct inode *inode, struct file *filp)
145{ 125{
146 struct drm_device *dev = NULL; 126 struct drm_device *dev;
147 struct drm_minor *minor; 127 struct drm_minor *minor;
148 int minor_id = iminor(inode);
149 int err = -ENODEV; 128 int err = -ENODEV;
150 const struct file_operations *new_fops; 129 const struct file_operations *new_fops;
151 130
152 DRM_DEBUG("\n"); 131 DRM_DEBUG("\n");
153 132
154 mutex_lock(&drm_global_mutex); 133 mutex_lock(&drm_global_mutex);
155 minor = idr_find(&drm_minors_idr, minor_id); 134 minor = drm_minor_acquire(iminor(inode));
156 if (!minor) 135 if (IS_ERR(minor))
157 goto out; 136 goto out_unlock;
158
159 if (!(dev = minor->dev))
160 goto out;
161
162 if (drm_device_is_unplugged(dev))
163 goto out;
164 137
138 dev = minor->dev;
165 new_fops = fops_get(dev->driver->fops); 139 new_fops = fops_get(dev->driver->fops);
166 if (!new_fops) 140 if (!new_fops)
167 goto out; 141 goto out_release;
168 142
169 replace_fops(filp, new_fops); 143 replace_fops(filp, new_fops);
170 if (filp->f_op->open) 144 if (filp->f_op->open)
171 err = filp->f_op->open(inode, filp); 145 err = filp->f_op->open(inode, filp);
172out: 146
147out_release:
148 drm_minor_release(minor);
149out_unlock:
173 mutex_unlock(&drm_global_mutex); 150 mutex_unlock(&drm_global_mutex);
174 return err; 151 return err;
175} 152}
@@ -196,16 +173,16 @@ static int drm_cpu_valid(void)
196 * 173 *
197 * \param inode device inode. 174 * \param inode device inode.
198 * \param filp file pointer. 175 * \param filp file pointer.
199 * \param dev device. 176 * \param minor acquired minor-object.
200 * \return zero on success or a negative number on failure. 177 * \return zero on success or a negative number on failure.
201 * 178 *
202 * Creates and initializes a drm_file structure for the file private data in \p 179 * Creates and initializes a drm_file structure for the file private data in \p
203 * filp and add it into the double linked list in \p dev. 180 * filp and add it into the double linked list in \p dev.
204 */ 181 */
205static int drm_open_helper(struct inode *inode, struct file *filp, 182static int drm_open_helper(struct inode *inode, struct file *filp,
206 struct drm_device * dev) 183 struct drm_minor *minor)
207{ 184{
208 int minor_id = iminor(inode); 185 struct drm_device *dev = minor->dev;
209 struct drm_file *priv; 186 struct drm_file *priv;
210 int ret; 187 int ret;
211 188
@@ -216,7 +193,7 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
216 if (dev->switch_power_state != DRM_SWITCH_POWER_ON && dev->switch_power_state != DRM_SWITCH_POWER_DYNAMIC_OFF) 193 if (dev->switch_power_state != DRM_SWITCH_POWER_ON && dev->switch_power_state != DRM_SWITCH_POWER_DYNAMIC_OFF)
217 return -EINVAL; 194 return -EINVAL;
218 195
219 DRM_DEBUG("pid = %d, minor = %d\n", task_pid_nr(current), minor_id); 196 DRM_DEBUG("pid = %d, minor = %d\n", task_pid_nr(current), minor->index);
220 197
221 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 198 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
222 if (!priv) 199 if (!priv)
@@ -226,11 +203,7 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
226 priv->filp = filp; 203 priv->filp = filp;
227 priv->uid = current_euid(); 204 priv->uid = current_euid();
228 priv->pid = get_pid(task_pid(current)); 205 priv->pid = get_pid(task_pid(current));
229 priv->minor = idr_find(&drm_minors_idr, minor_id); 206 priv->minor = minor;
230 if (!priv->minor) {
231 ret = -ENODEV;
232 goto out_put_pid;
233 }
234 207
235 /* for compatibility root is always authenticated */ 208 /* for compatibility root is always authenticated */
236 priv->always_authenticated = capable(CAP_SYS_ADMIN); 209 priv->always_authenticated = capable(CAP_SYS_ADMIN);
@@ -258,12 +231,11 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
258 231
259 /* if there is no current master make this fd it, but do not create 232 /* if there is no current master make this fd it, but do not create
260 * any master object for render clients */ 233 * any master object for render clients */
261 mutex_lock(&dev->struct_mutex); 234 mutex_lock(&dev->master_mutex);
262 if (!priv->minor->master && !drm_is_render_client(priv)) { 235 if (drm_is_primary_client(priv) && !priv->minor->master) {
263 /* create a new master */ 236 /* create a new master */
264 priv->minor->master = drm_master_create(priv->minor); 237 priv->minor->master = drm_master_create(priv->minor);
265 if (!priv->minor->master) { 238 if (!priv->minor->master) {
266 mutex_unlock(&dev->struct_mutex);
267 ret = -ENOMEM; 239 ret = -ENOMEM;
268 goto out_close; 240 goto out_close;
269 } 241 }
@@ -271,37 +243,31 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
271 priv->is_master = 1; 243 priv->is_master = 1;
272 /* take another reference for the copy in the local file priv */ 244 /* take another reference for the copy in the local file priv */
273 priv->master = drm_master_get(priv->minor->master); 245 priv->master = drm_master_get(priv->minor->master);
274
275 priv->authenticated = 1; 246 priv->authenticated = 1;
276 247
277 mutex_unlock(&dev->struct_mutex);
278 if (dev->driver->master_create) { 248 if (dev->driver->master_create) {
279 ret = dev->driver->master_create(dev, priv->master); 249 ret = dev->driver->master_create(dev, priv->master);
280 if (ret) { 250 if (ret) {
281 mutex_lock(&dev->struct_mutex);
282 /* drop both references if this fails */ 251 /* drop both references if this fails */
283 drm_master_put(&priv->minor->master); 252 drm_master_put(&priv->minor->master);
284 drm_master_put(&priv->master); 253 drm_master_put(&priv->master);
285 mutex_unlock(&dev->struct_mutex);
286 goto out_close; 254 goto out_close;
287 } 255 }
288 } 256 }
289 mutex_lock(&dev->struct_mutex);
290 if (dev->driver->master_set) { 257 if (dev->driver->master_set) {
291 ret = dev->driver->master_set(dev, priv, true); 258 ret = dev->driver->master_set(dev, priv, true);
292 if (ret) { 259 if (ret) {
293 /* drop both references if this fails */ 260 /* drop both references if this fails */
294 drm_master_put(&priv->minor->master); 261 drm_master_put(&priv->minor->master);
295 drm_master_put(&priv->master); 262 drm_master_put(&priv->master);
296 mutex_unlock(&dev->struct_mutex);
297 goto out_close; 263 goto out_close;
298 } 264 }
299 } 265 }
300 } else if (!drm_is_render_client(priv)) { 266 } else if (drm_is_primary_client(priv)) {
301 /* get a reference to the master */ 267 /* get a reference to the master */
302 priv->master = drm_master_get(priv->minor->master); 268 priv->master = drm_master_get(priv->minor->master);
303 } 269 }
304 mutex_unlock(&dev->struct_mutex); 270 mutex_unlock(&dev->master_mutex);
305 271
306 mutex_lock(&dev->struct_mutex); 272 mutex_lock(&dev->struct_mutex);
307 list_add(&priv->lhead, &dev->filelist); 273 list_add(&priv->lhead, &dev->filelist);
@@ -330,6 +296,7 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
330 return 0; 296 return 0;
331 297
332out_close: 298out_close:
299 mutex_unlock(&dev->master_mutex);
333 if (dev->driver->postclose) 300 if (dev->driver->postclose)
334 dev->driver->postclose(dev, priv); 301 dev->driver->postclose(dev, priv);
335out_prime_destroy: 302out_prime_destroy:
@@ -337,7 +304,6 @@ out_prime_destroy:
337 drm_prime_destroy_file_private(&priv->prime); 304 drm_prime_destroy_file_private(&priv->prime);
338 if (dev->driver->driver_features & DRIVER_GEM) 305 if (dev->driver->driver_features & DRIVER_GEM)
339 drm_gem_release(dev, priv); 306 drm_gem_release(dev, priv);
340out_put_pid:
341 put_pid(priv->pid); 307 put_pid(priv->pid);
342 kfree(priv); 308 kfree(priv);
343 filp->private_data = NULL; 309 filp->private_data = NULL;
@@ -435,7 +401,6 @@ int drm_lastclose(struct drm_device * dev)
435 401
436 drm_legacy_dma_takedown(dev); 402 drm_legacy_dma_takedown(dev);
437 403
438 dev->dev_mapping = NULL;
439 mutex_unlock(&dev->struct_mutex); 404 mutex_unlock(&dev->struct_mutex);
440 405
441 drm_legacy_dev_reinit(dev); 406 drm_legacy_dev_reinit(dev);
@@ -459,7 +424,8 @@ int drm_lastclose(struct drm_device * dev)
459int drm_release(struct inode *inode, struct file *filp) 424int drm_release(struct inode *inode, struct file *filp)
460{ 425{
461 struct drm_file *file_priv = filp->private_data; 426 struct drm_file *file_priv = filp->private_data;
462 struct drm_device *dev = file_priv->minor->dev; 427 struct drm_minor *minor = file_priv->minor;
428 struct drm_device *dev = minor->dev;
463 int retcode = 0; 429 int retcode = 0;
464 430
465 mutex_lock(&drm_global_mutex); 431 mutex_lock(&drm_global_mutex);
@@ -475,7 +441,7 @@ int drm_release(struct inode *inode, struct file *filp)
475 441
476 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n", 442 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
477 task_pid_nr(current), 443 task_pid_nr(current),
478 (long)old_encode_dev(file_priv->minor->device), 444 (long)old_encode_dev(file_priv->minor->kdev->devt),
479 dev->open_count); 445 dev->open_count);
480 446
481 /* Release any auth tokens that might point to this file_priv, 447 /* Release any auth tokens that might point to this file_priv,
@@ -518,11 +484,13 @@ int drm_release(struct inode *inode, struct file *filp)
518 } 484 }
519 mutex_unlock(&dev->ctxlist_mutex); 485 mutex_unlock(&dev->ctxlist_mutex);
520 486
521 mutex_lock(&dev->struct_mutex); 487 mutex_lock(&dev->master_mutex);
522 488
523 if (file_priv->is_master) { 489 if (file_priv->is_master) {
524 struct drm_master *master = file_priv->master; 490 struct drm_master *master = file_priv->master;
525 struct drm_file *temp; 491 struct drm_file *temp;
492
493 mutex_lock(&dev->struct_mutex);
526 list_for_each_entry(temp, &dev->filelist, lhead) { 494 list_for_each_entry(temp, &dev->filelist, lhead) {
527 if ((temp->master == file_priv->master) && 495 if ((temp->master == file_priv->master) &&
528 (temp != file_priv)) 496 (temp != file_priv))
@@ -541,6 +509,7 @@ int drm_release(struct inode *inode, struct file *filp)
541 master->lock.file_priv = NULL; 509 master->lock.file_priv = NULL;
542 wake_up_interruptible_all(&master->lock.lock_queue); 510 wake_up_interruptible_all(&master->lock.lock_queue);
543 } 511 }
512 mutex_unlock(&dev->struct_mutex);
544 513
545 if (file_priv->minor->master == file_priv->master) { 514 if (file_priv->minor->master == file_priv->master) {
546 /* drop the reference held my the minor */ 515 /* drop the reference held my the minor */
@@ -550,13 +519,13 @@ int drm_release(struct inode *inode, struct file *filp)
550 } 519 }
551 } 520 }
552 521
553 BUG_ON(dev->dev_mapping == NULL); 522 /* drop the master reference held by the file priv */
554 iput(container_of(dev->dev_mapping, struct inode, i_data));
555
556 /* drop the reference held my the file priv */
557 if (file_priv->master) 523 if (file_priv->master)
558 drm_master_put(&file_priv->master); 524 drm_master_put(&file_priv->master);
559 file_priv->is_master = 0; 525 file_priv->is_master = 0;
526 mutex_unlock(&dev->master_mutex);
527
528 mutex_lock(&dev->struct_mutex);
560 list_del(&file_priv->lhead); 529 list_del(&file_priv->lhead);
561 mutex_unlock(&dev->struct_mutex); 530 mutex_unlock(&dev->struct_mutex);
562 531
@@ -581,6 +550,8 @@ int drm_release(struct inode *inode, struct file *filp)
581 } 550 }
582 mutex_unlock(&drm_global_mutex); 551 mutex_unlock(&drm_global_mutex);
583 552
553 drm_minor_release(minor);
554
584 return retcode; 555 return retcode;
585} 556}
586EXPORT_SYMBOL(drm_release); 557EXPORT_SYMBOL(drm_release);
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 5bbad873c798..9909bef59800 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -85,9 +85,9 @@
85#endif 85#endif
86 86
87/** 87/**
88 * Initialize the GEM device fields 88 * drm_gem_init - Initialize the GEM device fields
89 * @dev: drm_devic structure to initialize
89 */ 90 */
90
91int 91int
92drm_gem_init(struct drm_device *dev) 92drm_gem_init(struct drm_device *dev)
93{ 93{
@@ -120,6 +120,11 @@ drm_gem_destroy(struct drm_device *dev)
120} 120}
121 121
122/** 122/**
123 * drm_gem_object_init - initialize an allocated shmem-backed GEM object
124 * @dev: drm_device the object should be initialized for
125 * @obj: drm_gem_object to initialize
126 * @size: object size
127 *
123 * Initialize an already allocated GEM object of the specified size with 128 * Initialize an already allocated GEM object of the specified size with
124 * shmfs backing store. 129 * shmfs backing store.
125 */ 130 */
@@ -141,6 +146,11 @@ int drm_gem_object_init(struct drm_device *dev,
141EXPORT_SYMBOL(drm_gem_object_init); 146EXPORT_SYMBOL(drm_gem_object_init);
142 147
143/** 148/**
149 * drm_gem_object_init - initialize an allocated private GEM object
150 * @dev: drm_device the object should be initialized for
151 * @obj: drm_gem_object to initialize
152 * @size: object size
153 *
144 * Initialize an already allocated GEM object of the specified size with 154 * Initialize an already allocated GEM object of the specified size with
145 * no GEM provided backing store. Instead the caller is responsible for 155 * no GEM provided backing store. Instead the caller is responsible for
146 * backing the object and handling it. 156 * backing the object and handling it.
@@ -176,6 +186,9 @@ drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
176} 186}
177 187
178/** 188/**
189 * drm_gem_object_free - release resources bound to userspace handles
190 * @obj: GEM object to clean up.
191 *
179 * Called after the last handle to the object has been closed 192 * Called after the last handle to the object has been closed
180 * 193 *
181 * Removes any name for the object. Note that this must be 194 * Removes any name for the object. Note that this must be
@@ -225,7 +238,12 @@ drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj)
225} 238}
226 239
227/** 240/**
228 * Removes the mapping from handle to filp for this object. 241 * drm_gem_handle_delete - deletes the given file-private handle
242 * @filp: drm file-private structure to use for the handle look up
243 * @handle: userspace handle to delete
244 *
245 * Removes the GEM handle from the @filp lookup table and if this is the last
246 * handle also cleans up linked resources like GEM names.
229 */ 247 */
230int 248int
231drm_gem_handle_delete(struct drm_file *filp, u32 handle) 249drm_gem_handle_delete(struct drm_file *filp, u32 handle)
@@ -270,6 +288,9 @@ EXPORT_SYMBOL(drm_gem_handle_delete);
270 288
271/** 289/**
272 * drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers 290 * drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers
291 * @file: drm file-private structure to remove the dumb handle from
292 * @dev: corresponding drm_device
293 * @handle: the dumb handle to remove
273 * 294 *
274 * This implements the ->dumb_destroy kms driver callback for drivers which use 295 * This implements the ->dumb_destroy kms driver callback for drivers which use
275 * gem to manage their backing storage. 296 * gem to manage their backing storage.
@@ -284,6 +305,9 @@ EXPORT_SYMBOL(drm_gem_dumb_destroy);
284 305
285/** 306/**
286 * drm_gem_handle_create_tail - internal functions to create a handle 307 * drm_gem_handle_create_tail - internal functions to create a handle
308 * @file_priv: drm file-private structure to register the handle for
309 * @obj: object to register
310 * @handlep: pionter to return the created handle to the caller
287 * 311 *
288 * This expects the dev->object_name_lock to be held already and will drop it 312 * This expects the dev->object_name_lock to be held already and will drop it
289 * before returning. Used to avoid races in establishing new handles when 313 * before returning. Used to avoid races in establishing new handles when
@@ -336,6 +360,11 @@ drm_gem_handle_create_tail(struct drm_file *file_priv,
336} 360}
337 361
338/** 362/**
363 * gem_handle_create - create a gem handle for an object
364 * @file_priv: drm file-private structure to register the handle for
365 * @obj: object to register
366 * @handlep: pionter to return the created handle to the caller
367 *
339 * Create a handle for this object. This adds a handle reference 368 * Create a handle for this object. This adds a handle reference
340 * to the object, which includes a regular reference count. Callers 369 * to the object, which includes a regular reference count. Callers
341 * will likely want to dereference the object afterwards. 370 * will likely want to dereference the object afterwards.
@@ -536,6 +565,11 @@ drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
536EXPORT_SYMBOL(drm_gem_object_lookup); 565EXPORT_SYMBOL(drm_gem_object_lookup);
537 566
538/** 567/**
568 * drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl
569 * @dev: drm_device
570 * @data: ioctl data
571 * @file_priv: drm file-private structure
572 *
539 * Releases the handle to an mm object. 573 * Releases the handle to an mm object.
540 */ 574 */
541int 575int
@@ -554,6 +588,11 @@ drm_gem_close_ioctl(struct drm_device *dev, void *data,
554} 588}
555 589
556/** 590/**
591 * drm_gem_flink_ioctl - implementation of the GEM_FLINK ioctl
592 * @dev: drm_device
593 * @data: ioctl data
594 * @file_priv: drm file-private structure
595 *
557 * Create a global name for an object, returning the name. 596 * Create a global name for an object, returning the name.
558 * 597 *
559 * Note that the name does not hold a reference; when the object 598 * Note that the name does not hold a reference; when the object
@@ -601,6 +640,11 @@ err:
601} 640}
602 641
603/** 642/**
643 * drm_gem_open - implementation of the GEM_OPEN ioctl
644 * @dev: drm_device
645 * @data: ioctl data
646 * @file_priv: drm file-private structure
647 *
604 * Open an object using the global name, returning a handle and the size. 648 * Open an object using the global name, returning a handle and the size.
605 * 649 *
606 * This handle (of course) holds a reference to the object, so the object 650 * This handle (of course) holds a reference to the object, so the object
@@ -640,6 +684,10 @@ drm_gem_open_ioctl(struct drm_device *dev, void *data,
640} 684}
641 685
642/** 686/**
687 * gem_gem_open - initalizes GEM file-private structures at devnode open time
688 * @dev: drm_device which is being opened by userspace
689 * @file_private: drm file-private structure to set up
690 *
643 * Called at device open time, sets up the structure for handling refcounting 691 * Called at device open time, sets up the structure for handling refcounting
644 * of mm objects. 692 * of mm objects.
645 */ 693 */
@@ -650,7 +698,7 @@ drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
650 spin_lock_init(&file_private->table_lock); 698 spin_lock_init(&file_private->table_lock);
651} 699}
652 700
653/** 701/*
654 * Called at device close to release the file's 702 * Called at device close to release the file's
655 * handle references on objects. 703 * handle references on objects.
656 */ 704 */
@@ -674,6 +722,10 @@ drm_gem_object_release_handle(int id, void *ptr, void *data)
674} 722}
675 723
676/** 724/**
725 * drm_gem_release - release file-private GEM resources
726 * @dev: drm_device which is being closed by userspace
727 * @file_private: drm file-private structure to clean up
728 *
677 * Called at close time when the filp is going away. 729 * Called at close time when the filp is going away.
678 * 730 *
679 * Releases any remaining references on objects by this filp. 731 * Releases any remaining references on objects by this filp.
@@ -692,11 +744,16 @@ drm_gem_object_release(struct drm_gem_object *obj)
692 WARN_ON(obj->dma_buf); 744 WARN_ON(obj->dma_buf);
693 745
694 if (obj->filp) 746 if (obj->filp)
695 fput(obj->filp); 747 fput(obj->filp);
748
749 drm_gem_free_mmap_offset(obj);
696} 750}
697EXPORT_SYMBOL(drm_gem_object_release); 751EXPORT_SYMBOL(drm_gem_object_release);
698 752
699/** 753/**
754 * drm_gem_object_free - free a GEM object
755 * @kref: kref of the object to free
756 *
700 * Called after the last reference to the object has been lost. 757 * Called after the last reference to the object has been lost.
701 * Must be called holding struct_ mutex 758 * Must be called holding struct_ mutex
702 * 759 *
@@ -782,7 +839,7 @@ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
782 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; 839 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
783 vma->vm_ops = dev->driver->gem_vm_ops; 840 vma->vm_ops = dev->driver->gem_vm_ops;
784 vma->vm_private_data = obj; 841 vma->vm_private_data = obj;
785 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); 842 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
786 843
787 /* Take a ref for this mapping of the object, so that the fault 844 /* Take a ref for this mapping of the object, so that the fault
788 * handler can dereference the mmap offset's pointer to the object. 845 * handler can dereference the mmap offset's pointer to the object.
@@ -818,7 +875,7 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
818 struct drm_device *dev = priv->minor->dev; 875 struct drm_device *dev = priv->minor->dev;
819 struct drm_gem_object *obj; 876 struct drm_gem_object *obj;
820 struct drm_vma_offset_node *node; 877 struct drm_vma_offset_node *node;
821 int ret = 0; 878 int ret;
822 879
823 if (drm_device_is_unplugged(dev)) 880 if (drm_device_is_unplugged(dev))
824 return -ENODEV; 881 return -ENODEV;
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
index 6b51bf90df0e..05c97c5350a1 100644
--- a/drivers/gpu/drm/drm_gem_cma_helper.c
+++ b/drivers/gpu/drm/drm_gem_cma_helper.c
@@ -79,7 +79,6 @@ struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
79 unsigned int size) 79 unsigned int size)
80{ 80{
81 struct drm_gem_cma_object *cma_obj; 81 struct drm_gem_cma_object *cma_obj;
82 struct sg_table *sgt = NULL;
83 int ret; 82 int ret;
84 83
85 size = round_up(size, PAGE_SIZE); 84 size = round_up(size, PAGE_SIZE);
@@ -97,23 +96,9 @@ struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
97 goto error; 96 goto error;
98 } 97 }
99 98
100 sgt = kzalloc(sizeof(*cma_obj->sgt), GFP_KERNEL);
101 if (sgt == NULL) {
102 ret = -ENOMEM;
103 goto error;
104 }
105
106 ret = dma_get_sgtable(drm->dev, sgt, cma_obj->vaddr,
107 cma_obj->paddr, size);
108 if (ret < 0)
109 goto error;
110
111 cma_obj->sgt = sgt;
112
113 return cma_obj; 99 return cma_obj;
114 100
115error: 101error:
116 kfree(sgt);
117 drm_gem_cma_free_object(&cma_obj->base); 102 drm_gem_cma_free_object(&cma_obj->base);
118 return ERR_PTR(ret); 103 return ERR_PTR(ret);
119} 104}
@@ -175,10 +160,6 @@ void drm_gem_cma_free_object(struct drm_gem_object *gem_obj)
175 if (cma_obj->vaddr) { 160 if (cma_obj->vaddr) {
176 dma_free_writecombine(gem_obj->dev->dev, cma_obj->base.size, 161 dma_free_writecombine(gem_obj->dev->dev, cma_obj->base.size,
177 cma_obj->vaddr, cma_obj->paddr); 162 cma_obj->vaddr, cma_obj->paddr);
178 if (cma_obj->sgt) {
179 sg_free_table(cma_obj->sgt);
180 kfree(cma_obj->sgt);
181 }
182 } else if (gem_obj->import_attach) { 163 } else if (gem_obj->import_attach) {
183 drm_prime_gem_destroy(gem_obj, cma_obj->sgt); 164 drm_prime_gem_destroy(gem_obj, cma_obj->sgt);
184 } 165 }
@@ -253,8 +234,17 @@ static int drm_gem_cma_mmap_obj(struct drm_gem_cma_object *cma_obj,
253{ 234{
254 int ret; 235 int ret;
255 236
256 ret = remap_pfn_range(vma, vma->vm_start, cma_obj->paddr >> PAGE_SHIFT, 237 /*
257 vma->vm_end - vma->vm_start, vma->vm_page_prot); 238 * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
239 * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
240 * the whole buffer.
241 */
242 vma->vm_flags &= ~VM_PFNMAP;
243 vma->vm_pgoff = 0;
244
245 ret = dma_mmap_writecombine(cma_obj->base.dev->dev, vma,
246 cma_obj->vaddr, cma_obj->paddr,
247 vma->vm_end - vma->vm_start);
258 if (ret) 248 if (ret)
259 drm_gem_vm_close(vma); 249 drm_gem_vm_close(vma);
260 250
@@ -292,9 +282,9 @@ void drm_gem_cma_describe(struct drm_gem_cma_object *cma_obj, struct seq_file *m
292 282
293 off = drm_vma_node_start(&obj->vma_node); 283 off = drm_vma_node_start(&obj->vma_node);
294 284
295 seq_printf(m, "%2d (%2d) %08llx %08Zx %p %d", 285 seq_printf(m, "%2d (%2d) %08llx %pad %p %d",
296 obj->name, obj->refcount.refcount.counter, 286 obj->name, obj->refcount.refcount.counter,
297 off, cma_obj->paddr, cma_obj->vaddr, obj->size); 287 off, &cma_obj->paddr, cma_obj->vaddr, obj->size);
298 288
299 seq_printf(m, "\n"); 289 seq_printf(m, "\n");
300} 290}
@@ -342,7 +332,7 @@ drm_gem_cma_prime_import_sg_table(struct drm_device *dev, size_t size,
342 cma_obj->paddr = sg_dma_address(sgt->sgl); 332 cma_obj->paddr = sg_dma_address(sgt->sgl);
343 cma_obj->sgt = sgt; 333 cma_obj->sgt = sgt;
344 334
345 DRM_DEBUG_PRIME("dma_addr = 0x%x, size = %zu\n", cma_obj->paddr, size); 335 DRM_DEBUG_PRIME("dma_addr = %pad, size = %zu\n", &cma_obj->paddr, size);
346 336
347 return &cma_obj->base; 337 return &cma_obj->base;
348} 338}
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index f4dc9b7a3831..93a42040bedb 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -328,6 +328,13 @@ drm_setclientcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
328 return -EINVAL; 328 return -EINVAL;
329 file_priv->stereo_allowed = req->value; 329 file_priv->stereo_allowed = req->value;
330 break; 330 break;
331 case DRM_CLIENT_CAP_UNIVERSAL_PLANES:
332 if (!drm_universal_planes)
333 return -EINVAL;
334 if (req->value > 1)
335 return -EINVAL;
336 file_priv->universal_planes = req->value;
337 break;
331 default: 338 default:
332 return -EINVAL; 339 return -EINVAL;
333 } 340 }
diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
index b155ee2ffa17..09821f46d768 100644
--- a/drivers/gpu/drm/drm_mipi_dsi.c
+++ b/drivers/gpu/drm/drm_mipi_dsi.c
@@ -142,8 +142,12 @@ int mipi_dsi_host_register(struct mipi_dsi_host *host)
142{ 142{
143 struct device_node *node; 143 struct device_node *node;
144 144
145 for_each_available_child_of_node(host->dev->of_node, node) 145 for_each_available_child_of_node(host->dev->of_node, node) {
146 /* skip nodes without reg property */
147 if (!of_find_property(node, "reg", NULL))
148 continue;
146 of_mipi_dsi_device_add(host, node); 149 of_mipi_dsi_device_add(host, node);
150 }
147 151
148 return 0; 152 return 0;
149} 153}
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index af93cc55259f..71e2d3fcd6ee 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -47,7 +47,48 @@
47#include <linux/seq_file.h> 47#include <linux/seq_file.h>
48#include <linux/export.h> 48#include <linux/export.h>
49 49
50#define MM_UNUSED_TARGET 4 50/**
51 * DOC: Overview
52 *
53 * drm_mm provides a simple range allocator. The drivers are free to use the
54 * resource allocator from the linux core if it suits them, the upside of drm_mm
55 * is that it's in the DRM core. Which means that it's easier to extend for
56 * some of the crazier special purpose needs of gpus.
57 *
58 * The main data struct is &drm_mm, allocations are tracked in &drm_mm_node.
59 * Drivers are free to embed either of them into their own suitable
60 * datastructures. drm_mm itself will not do any allocations of its own, so if
61 * drivers choose not to embed nodes they need to still allocate them
62 * themselves.
63 *
64 * The range allocator also supports reservation of preallocated blocks. This is
65 * useful for taking over initial mode setting configurations from the firmware,
66 * where an object needs to be created which exactly matches the firmware's
67 * scanout target. As long as the range is still free it can be inserted anytime
68 * after the allocator is initialized, which helps with avoiding looped
69 * depencies in the driver load sequence.
70 *
71 * drm_mm maintains a stack of most recently freed holes, which of all
72 * simplistic datastructures seems to be a fairly decent approach to clustering
73 * allocations and avoiding too much fragmentation. This means free space
74 * searches are O(num_holes). Given that all the fancy features drm_mm supports
75 * something better would be fairly complex and since gfx thrashing is a fairly
76 * steep cliff not a real concern. Removing a node again is O(1).
77 *
78 * drm_mm supports a few features: Alignment and range restrictions can be
79 * supplied. Further more every &drm_mm_node has a color value (which is just an
80 * opaqua unsigned long) which in conjunction with a driver callback can be used
81 * to implement sophisticated placement restrictions. The i915 DRM driver uses
82 * this to implement guard pages between incompatible caching domains in the
83 * graphics TT.
84 *
85 * Two behaviors are supported for searching and allocating: bottom-up and top-down.
86 * The default is bottom-up. Top-down allocation can be used if the memory area
87 * has different restrictions, or just to reduce fragmentation.
88 *
89 * Finally iteration helpers to walk all nodes and all holes are provided as are
90 * some basic allocator dumpers for debugging.
91 */
51 92
52static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm, 93static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
53 unsigned long size, 94 unsigned long size,
@@ -65,7 +106,8 @@ static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_
65static void drm_mm_insert_helper(struct drm_mm_node *hole_node, 106static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
66 struct drm_mm_node *node, 107 struct drm_mm_node *node,
67 unsigned long size, unsigned alignment, 108 unsigned long size, unsigned alignment,
68 unsigned long color) 109 unsigned long color,
110 enum drm_mm_allocator_flags flags)
69{ 111{
70 struct drm_mm *mm = hole_node->mm; 112 struct drm_mm *mm = hole_node->mm;
71 unsigned long hole_start = drm_mm_hole_node_start(hole_node); 113 unsigned long hole_start = drm_mm_hole_node_start(hole_node);
@@ -78,12 +120,22 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
78 if (mm->color_adjust) 120 if (mm->color_adjust)
79 mm->color_adjust(hole_node, color, &adj_start, &adj_end); 121 mm->color_adjust(hole_node, color, &adj_start, &adj_end);
80 122
123 if (flags & DRM_MM_CREATE_TOP)
124 adj_start = adj_end - size;
125
81 if (alignment) { 126 if (alignment) {
82 unsigned tmp = adj_start % alignment; 127 unsigned tmp = adj_start % alignment;
83 if (tmp) 128 if (tmp) {
84 adj_start += alignment - tmp; 129 if (flags & DRM_MM_CREATE_TOP)
130 adj_start -= tmp;
131 else
132 adj_start += alignment - tmp;
133 }
85 } 134 }
86 135
136 BUG_ON(adj_start < hole_start);
137 BUG_ON(adj_end > hole_end);
138
87 if (adj_start == hole_start) { 139 if (adj_start == hole_start) {
88 hole_node->hole_follows = 0; 140 hole_node->hole_follows = 0;
89 list_del(&hole_node->hole_stack); 141 list_del(&hole_node->hole_stack);
@@ -107,6 +159,20 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
107 } 159 }
108} 160}
109 161
162/**
163 * drm_mm_reserve_node - insert an pre-initialized node
164 * @mm: drm_mm allocator to insert @node into
165 * @node: drm_mm_node to insert
166 *
167 * This functions inserts an already set-up drm_mm_node into the allocator,
168 * meaning that start, size and color must be set by the caller. This is useful
169 * to initialize the allocator with preallocated objects which must be set-up
170 * before the range allocator can be set-up, e.g. when taking over a firmware
171 * framebuffer.
172 *
173 * Returns:
174 * 0 on success, -ENOSPC if there's no hole where @node is.
175 */
110int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node) 176int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
111{ 177{
112 struct drm_mm_node *hole; 178 struct drm_mm_node *hole;
@@ -148,23 +214,34 @@ int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
148EXPORT_SYMBOL(drm_mm_reserve_node); 214EXPORT_SYMBOL(drm_mm_reserve_node);
149 215
150/** 216/**
151 * Search for free space and insert a preallocated memory node. Returns 217 * drm_mm_insert_node_generic - search for space and insert @node
152 * -ENOSPC if no suitable free area is available. The preallocated memory node 218 * @mm: drm_mm to allocate from
153 * must be cleared. 219 * @node: preallocate node to insert
220 * @size: size of the allocation
221 * @alignment: alignment of the allocation
222 * @color: opaque tag value to use for this node
223 * @sflags: flags to fine-tune the allocation search
224 * @aflags: flags to fine-tune the allocation behavior
225 *
226 * The preallocated node must be cleared to 0.
227 *
228 * Returns:
229 * 0 on success, -ENOSPC if there's no suitable hole.
154 */ 230 */
155int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node, 231int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
156 unsigned long size, unsigned alignment, 232 unsigned long size, unsigned alignment,
157 unsigned long color, 233 unsigned long color,
158 enum drm_mm_search_flags flags) 234 enum drm_mm_search_flags sflags,
235 enum drm_mm_allocator_flags aflags)
159{ 236{
160 struct drm_mm_node *hole_node; 237 struct drm_mm_node *hole_node;
161 238
162 hole_node = drm_mm_search_free_generic(mm, size, alignment, 239 hole_node = drm_mm_search_free_generic(mm, size, alignment,
163 color, flags); 240 color, sflags);
164 if (!hole_node) 241 if (!hole_node)
165 return -ENOSPC; 242 return -ENOSPC;
166 243
167 drm_mm_insert_helper(hole_node, node, size, alignment, color); 244 drm_mm_insert_helper(hole_node, node, size, alignment, color, aflags);
168 return 0; 245 return 0;
169} 246}
170EXPORT_SYMBOL(drm_mm_insert_node_generic); 247EXPORT_SYMBOL(drm_mm_insert_node_generic);
@@ -173,7 +250,8 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
173 struct drm_mm_node *node, 250 struct drm_mm_node *node,
174 unsigned long size, unsigned alignment, 251 unsigned long size, unsigned alignment,
175 unsigned long color, 252 unsigned long color,
176 unsigned long start, unsigned long end) 253 unsigned long start, unsigned long end,
254 enum drm_mm_allocator_flags flags)
177{ 255{
178 struct drm_mm *mm = hole_node->mm; 256 struct drm_mm *mm = hole_node->mm;
179 unsigned long hole_start = drm_mm_hole_node_start(hole_node); 257 unsigned long hole_start = drm_mm_hole_node_start(hole_node);
@@ -188,13 +266,20 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
188 if (adj_end > end) 266 if (adj_end > end)
189 adj_end = end; 267 adj_end = end;
190 268
269 if (flags & DRM_MM_CREATE_TOP)
270 adj_start = adj_end - size;
271
191 if (mm->color_adjust) 272 if (mm->color_adjust)
192 mm->color_adjust(hole_node, color, &adj_start, &adj_end); 273 mm->color_adjust(hole_node, color, &adj_start, &adj_end);
193 274
194 if (alignment) { 275 if (alignment) {
195 unsigned tmp = adj_start % alignment; 276 unsigned tmp = adj_start % alignment;
196 if (tmp) 277 if (tmp) {
197 adj_start += alignment - tmp; 278 if (flags & DRM_MM_CREATE_TOP)
279 adj_start -= tmp;
280 else
281 adj_start += alignment - tmp;
282 }
198 } 283 }
199 284
200 if (adj_start == hole_start) { 285 if (adj_start == hole_start) {
@@ -211,6 +296,8 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
211 INIT_LIST_HEAD(&node->hole_stack); 296 INIT_LIST_HEAD(&node->hole_stack);
212 list_add(&node->node_list, &hole_node->node_list); 297 list_add(&node->node_list, &hole_node->node_list);
213 298
299 BUG_ON(node->start < start);
300 BUG_ON(node->start < adj_start);
214 BUG_ON(node->start + node->size > adj_end); 301 BUG_ON(node->start + node->size > adj_end);
215 BUG_ON(node->start + node->size > end); 302 BUG_ON(node->start + node->size > end);
216 303
@@ -222,32 +309,51 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
222} 309}
223 310
224/** 311/**
225 * Search for free space and insert a preallocated memory node. Returns 312 * drm_mm_insert_node_in_range_generic - ranged search for space and insert @node
226 * -ENOSPC if no suitable free area is available. This is for range 313 * @mm: drm_mm to allocate from
227 * restricted allocations. The preallocated memory node must be cleared. 314 * @node: preallocate node to insert
315 * @size: size of the allocation
316 * @alignment: alignment of the allocation
317 * @color: opaque tag value to use for this node
318 * @start: start of the allowed range for this node
319 * @end: end of the allowed range for this node
320 * @sflags: flags to fine-tune the allocation search
321 * @aflags: flags to fine-tune the allocation behavior
322 *
323 * The preallocated node must be cleared to 0.
324 *
325 * Returns:
326 * 0 on success, -ENOSPC if there's no suitable hole.
228 */ 327 */
229int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node, 328int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
230 unsigned long size, unsigned alignment, unsigned long color, 329 unsigned long size, unsigned alignment,
330 unsigned long color,
231 unsigned long start, unsigned long end, 331 unsigned long start, unsigned long end,
232 enum drm_mm_search_flags flags) 332 enum drm_mm_search_flags sflags,
333 enum drm_mm_allocator_flags aflags)
233{ 334{
234 struct drm_mm_node *hole_node; 335 struct drm_mm_node *hole_node;
235 336
236 hole_node = drm_mm_search_free_in_range_generic(mm, 337 hole_node = drm_mm_search_free_in_range_generic(mm,
237 size, alignment, color, 338 size, alignment, color,
238 start, end, flags); 339 start, end, sflags);
239 if (!hole_node) 340 if (!hole_node)
240 return -ENOSPC; 341 return -ENOSPC;
241 342
242 drm_mm_insert_helper_range(hole_node, node, 343 drm_mm_insert_helper_range(hole_node, node,
243 size, alignment, color, 344 size, alignment, color,
244 start, end); 345 start, end, aflags);
245 return 0; 346 return 0;
246} 347}
247EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic); 348EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic);
248 349
249/** 350/**
250 * Remove a memory node from the allocator. 351 * drm_mm_remove_node - Remove a memory node from the allocator.
352 * @node: drm_mm_node to remove
353 *
354 * This just removes a node from its drm_mm allocator. The node does not need to
355 * be cleared again before it can be re-inserted into this or any other drm_mm
356 * allocator. It is a bug to call this function on a un-allocated node.
251 */ 357 */
252void drm_mm_remove_node(struct drm_mm_node *node) 358void drm_mm_remove_node(struct drm_mm_node *node)
253{ 359{
@@ -315,7 +421,10 @@ static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
315 best = NULL; 421 best = NULL;
316 best_size = ~0UL; 422 best_size = ~0UL;
317 423
318 drm_mm_for_each_hole(entry, mm, adj_start, adj_end) { 424 __drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
425 flags & DRM_MM_SEARCH_BELOW) {
426 unsigned long hole_size = adj_end - adj_start;
427
319 if (mm->color_adjust) { 428 if (mm->color_adjust) {
320 mm->color_adjust(entry, color, &adj_start, &adj_end); 429 mm->color_adjust(entry, color, &adj_start, &adj_end);
321 if (adj_end <= adj_start) 430 if (adj_end <= adj_start)
@@ -328,9 +437,9 @@ static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
328 if (!(flags & DRM_MM_SEARCH_BEST)) 437 if (!(flags & DRM_MM_SEARCH_BEST))
329 return entry; 438 return entry;
330 439
331 if (entry->size < best_size) { 440 if (hole_size < best_size) {
332 best = entry; 441 best = entry;
333 best_size = entry->size; 442 best_size = hole_size;
334 } 443 }
335 } 444 }
336 445
@@ -356,7 +465,10 @@ static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_
356 best = NULL; 465 best = NULL;
357 best_size = ~0UL; 466 best_size = ~0UL;
358 467
359 drm_mm_for_each_hole(entry, mm, adj_start, adj_end) { 468 __drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
469 flags & DRM_MM_SEARCH_BELOW) {
470 unsigned long hole_size = adj_end - adj_start;
471
360 if (adj_start < start) 472 if (adj_start < start)
361 adj_start = start; 473 adj_start = start;
362 if (adj_end > end) 474 if (adj_end > end)
@@ -374,9 +486,9 @@ static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_
374 if (!(flags & DRM_MM_SEARCH_BEST)) 486 if (!(flags & DRM_MM_SEARCH_BEST))
375 return entry; 487 return entry;
376 488
377 if (entry->size < best_size) { 489 if (hole_size < best_size) {
378 best = entry; 490 best = entry;
379 best_size = entry->size; 491 best_size = hole_size;
380 } 492 }
381 } 493 }
382 494
@@ -384,7 +496,13 @@ static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_
384} 496}
385 497
386/** 498/**
387 * Moves an allocation. To be used with embedded struct drm_mm_node. 499 * drm_mm_replace_node - move an allocation from @old to @new
500 * @old: drm_mm_node to remove from the allocator
501 * @new: drm_mm_node which should inherit @old's allocation
502 *
503 * This is useful for when drivers embed the drm_mm_node structure and hence
504 * can't move allocations by reassigning pointers. It's a combination of remove
505 * and insert with the guarantee that the allocation start will match.
388 */ 506 */
389void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new) 507void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
390{ 508{
@@ -402,12 +520,46 @@ void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
402EXPORT_SYMBOL(drm_mm_replace_node); 520EXPORT_SYMBOL(drm_mm_replace_node);
403 521
404/** 522/**
405 * Initializa lru scanning. 523 * DOC: lru scan roaster
524 *
525 * Very often GPUs need to have continuous allocations for a given object. When
526 * evicting objects to make space for a new one it is therefore not most
527 * efficient when we simply start to select all objects from the tail of an LRU
528 * until there's a suitable hole: Especially for big objects or nodes that
529 * otherwise have special allocation constraints there's a good chance we evict
530 * lots of (smaller) objects unecessarily.
531 *
532 * The DRM range allocator supports this use-case through the scanning
533 * interfaces. First a scan operation needs to be initialized with
534 * drm_mm_init_scan() or drm_mm_init_scan_with_range(). The the driver adds
535 * objects to the roaster (probably by walking an LRU list, but this can be
536 * freely implemented) until a suitable hole is found or there's no further
537 * evitable object.
538 *
539 * The the driver must walk through all objects again in exactly the reverse
540 * order to restore the allocator state. Note that while the allocator is used
541 * in the scan mode no other operation is allowed.
542 *
543 * Finally the driver evicts all objects selected in the scan. Adding and
544 * removing an object is O(1), and since freeing a node is also O(1) the overall
545 * complexity is O(scanned_objects). So like the free stack which needs to be
546 * walked before a scan operation even begins this is linear in the number of
547 * objects. It doesn't seem to hurt badly.
548 */
549
550/**
551 * drm_mm_init_scan - initialize lru scanning
552 * @mm: drm_mm to scan
553 * @size: size of the allocation
554 * @alignment: alignment of the allocation
555 * @color: opaque tag value to use for the allocation
406 * 556 *
407 * This simply sets up the scanning routines with the parameters for the desired 557 * This simply sets up the scanning routines with the parameters for the desired
408 * hole. 558 * hole. Note that there's no need to specify allocation flags, since they only
559 * change the place a node is allocated from within a suitable hole.
409 * 560 *
410 * Warning: As long as the scan list is non-empty, no other operations than 561 * Warning:
562 * As long as the scan list is non-empty, no other operations than
411 * adding/removing nodes to/from the scan list are allowed. 563 * adding/removing nodes to/from the scan list are allowed.
412 */ 564 */
413void drm_mm_init_scan(struct drm_mm *mm, 565void drm_mm_init_scan(struct drm_mm *mm,
@@ -427,12 +579,20 @@ void drm_mm_init_scan(struct drm_mm *mm,
427EXPORT_SYMBOL(drm_mm_init_scan); 579EXPORT_SYMBOL(drm_mm_init_scan);
428 580
429/** 581/**
430 * Initializa lru scanning. 582 * drm_mm_init_scan - initialize range-restricted lru scanning
583 * @mm: drm_mm to scan
584 * @size: size of the allocation
585 * @alignment: alignment of the allocation
586 * @color: opaque tag value to use for the allocation
587 * @start: start of the allowed range for the allocation
588 * @end: end of the allowed range for the allocation
431 * 589 *
432 * This simply sets up the scanning routines with the parameters for the desired 590 * This simply sets up the scanning routines with the parameters for the desired
433 * hole. This version is for range-restricted scans. 591 * hole. Note that there's no need to specify allocation flags, since they only
592 * change the place a node is allocated from within a suitable hole.
434 * 593 *
435 * Warning: As long as the scan list is non-empty, no other operations than 594 * Warning:
595 * As long as the scan list is non-empty, no other operations than
436 * adding/removing nodes to/from the scan list are allowed. 596 * adding/removing nodes to/from the scan list are allowed.
437 */ 597 */
438void drm_mm_init_scan_with_range(struct drm_mm *mm, 598void drm_mm_init_scan_with_range(struct drm_mm *mm,
@@ -456,12 +616,16 @@ void drm_mm_init_scan_with_range(struct drm_mm *mm,
456EXPORT_SYMBOL(drm_mm_init_scan_with_range); 616EXPORT_SYMBOL(drm_mm_init_scan_with_range);
457 617
458/** 618/**
619 * drm_mm_scan_add_block - add a node to the scan list
620 * @node: drm_mm_node to add
621 *
459 * Add a node to the scan list that might be freed to make space for the desired 622 * Add a node to the scan list that might be freed to make space for the desired
460 * hole. 623 * hole.
461 * 624 *
462 * Returns non-zero, if a hole has been found, zero otherwise. 625 * Returns:
626 * True if a hole has been found, false otherwise.
463 */ 627 */
464int drm_mm_scan_add_block(struct drm_mm_node *node) 628bool drm_mm_scan_add_block(struct drm_mm_node *node)
465{ 629{
466 struct drm_mm *mm = node->mm; 630 struct drm_mm *mm = node->mm;
467 struct drm_mm_node *prev_node; 631 struct drm_mm_node *prev_node;
@@ -501,15 +665,16 @@ int drm_mm_scan_add_block(struct drm_mm_node *node)
501 mm->scan_size, mm->scan_alignment)) { 665 mm->scan_size, mm->scan_alignment)) {
502 mm->scan_hit_start = hole_start; 666 mm->scan_hit_start = hole_start;
503 mm->scan_hit_end = hole_end; 667 mm->scan_hit_end = hole_end;
504 return 1; 668 return true;
505 } 669 }
506 670
507 return 0; 671 return false;
508} 672}
509EXPORT_SYMBOL(drm_mm_scan_add_block); 673EXPORT_SYMBOL(drm_mm_scan_add_block);
510 674
511/** 675/**
512 * Remove a node from the scan list. 676 * drm_mm_scan_remove_block - remove a node from the scan list
677 * @node: drm_mm_node to remove
513 * 678 *
514 * Nodes _must_ be removed in the exact same order from the scan list as they 679 * Nodes _must_ be removed in the exact same order from the scan list as they
515 * have been added, otherwise the internal state of the memory manager will be 680 * have been added, otherwise the internal state of the memory manager will be
@@ -519,10 +684,11 @@ EXPORT_SYMBOL(drm_mm_scan_add_block);
519 * immediately following drm_mm_search_free with !DRM_MM_SEARCH_BEST will then 684 * immediately following drm_mm_search_free with !DRM_MM_SEARCH_BEST will then
520 * return the just freed block (because its at the top of the free_stack list). 685 * return the just freed block (because its at the top of the free_stack list).
521 * 686 *
522 * Returns one if this block should be evicted, zero otherwise. Will always 687 * Returns:
523 * return zero when no hole has been found. 688 * True if this block should be evicted, false otherwise. Will always
689 * return false when no hole has been found.
524 */ 690 */
525int drm_mm_scan_remove_block(struct drm_mm_node *node) 691bool drm_mm_scan_remove_block(struct drm_mm_node *node)
526{ 692{
527 struct drm_mm *mm = node->mm; 693 struct drm_mm *mm = node->mm;
528 struct drm_mm_node *prev_node; 694 struct drm_mm_node *prev_node;
@@ -543,7 +709,15 @@ int drm_mm_scan_remove_block(struct drm_mm_node *node)
543} 709}
544EXPORT_SYMBOL(drm_mm_scan_remove_block); 710EXPORT_SYMBOL(drm_mm_scan_remove_block);
545 711
546int drm_mm_clean(struct drm_mm * mm) 712/**
713 * drm_mm_clean - checks whether an allocator is clean
714 * @mm: drm_mm allocator to check
715 *
716 * Returns:
717 * True if the allocator is completely free, false if there's still a node
718 * allocated in it.
719 */
720bool drm_mm_clean(struct drm_mm * mm)
547{ 721{
548 struct list_head *head = &mm->head_node.node_list; 722 struct list_head *head = &mm->head_node.node_list;
549 723
@@ -551,6 +725,14 @@ int drm_mm_clean(struct drm_mm * mm)
551} 725}
552EXPORT_SYMBOL(drm_mm_clean); 726EXPORT_SYMBOL(drm_mm_clean);
553 727
728/**
729 * drm_mm_init - initialize a drm-mm allocator
730 * @mm: the drm_mm structure to initialize
731 * @start: start of the range managed by @mm
732 * @size: end of the range managed by @mm
733 *
734 * Note that @mm must be cleared to 0 before calling this function.
735 */
554void drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size) 736void drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
555{ 737{
556 INIT_LIST_HEAD(&mm->hole_stack); 738 INIT_LIST_HEAD(&mm->hole_stack);
@@ -572,6 +754,13 @@ void drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
572} 754}
573EXPORT_SYMBOL(drm_mm_init); 755EXPORT_SYMBOL(drm_mm_init);
574 756
757/**
758 * drm_mm_takedown - clean up a drm_mm allocator
759 * @mm: drm_mm allocator to clean up
760 *
761 * Note that it is a bug to call this function on an allocator which is not
762 * clean.
763 */
575void drm_mm_takedown(struct drm_mm * mm) 764void drm_mm_takedown(struct drm_mm * mm)
576{ 765{
577 WARN(!list_empty(&mm->head_node.node_list), 766 WARN(!list_empty(&mm->head_node.node_list),
@@ -597,6 +786,11 @@ static unsigned long drm_mm_debug_hole(struct drm_mm_node *entry,
597 return 0; 786 return 0;
598} 787}
599 788
789/**
790 * drm_mm_debug_table - dump allocator state to dmesg
791 * @mm: drm_mm allocator to dump
792 * @prefix: prefix to use for dumping to dmesg
793 */
600void drm_mm_debug_table(struct drm_mm *mm, const char *prefix) 794void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
601{ 795{
602 struct drm_mm_node *entry; 796 struct drm_mm_node *entry;
@@ -635,6 +829,11 @@ static unsigned long drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *en
635 return 0; 829 return 0;
636} 830}
637 831
832/**
833 * drm_mm_dump_table - dump allocator state to a seq_file
834 * @m: seq_file to dump to
835 * @mm: drm_mm allocator to dump
836 */
638int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm) 837int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
639{ 838{
640 struct drm_mm_node *entry; 839 struct drm_mm_node *entry;
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index b0733153dfd2..8b410576fce4 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -37,15 +37,14 @@
37#include <drm/drm_crtc.h> 37#include <drm/drm_crtc.h>
38#include <video/of_videomode.h> 38#include <video/of_videomode.h>
39#include <video/videomode.h> 39#include <video/videomode.h>
40#include <drm/drm_modes.h>
41
42#include "drm_crtc_internal.h"
40 43
41/** 44/**
42 * drm_mode_debug_printmodeline - debug print a mode 45 * drm_mode_debug_printmodeline - print a mode to dmesg
43 * @dev: DRM device
44 * @mode: mode to print 46 * @mode: mode to print
45 * 47 *
46 * LOCKING:
47 * None.
48 *
49 * Describe @mode using DRM_DEBUG. 48 * Describe @mode using DRM_DEBUG.
50 */ 49 */
51void drm_mode_debug_printmodeline(const struct drm_display_mode *mode) 50void drm_mode_debug_printmodeline(const struct drm_display_mode *mode)
@@ -61,18 +60,77 @@ void drm_mode_debug_printmodeline(const struct drm_display_mode *mode)
61EXPORT_SYMBOL(drm_mode_debug_printmodeline); 60EXPORT_SYMBOL(drm_mode_debug_printmodeline);
62 61
63/** 62/**
64 * drm_cvt_mode -create a modeline based on CVT algorithm 63 * drm_mode_create - create a new display mode
65 * @dev: DRM device 64 * @dev: DRM device
66 * @hdisplay: hdisplay size
67 * @vdisplay: vdisplay size
68 * @vrefresh : vrefresh rate
69 * @reduced : Whether the GTF calculation is simplified
70 * @interlaced:Whether the interlace is supported
71 * 65 *
72 * LOCKING: 66 * Create a new, cleared drm_display_mode with kzalloc, allocate an ID for it
73 * none. 67 * and return it.
74 * 68 *
75 * return the modeline based on CVT algorithm 69 * Returns:
70 * Pointer to new mode on success, NULL on error.
71 */
72struct drm_display_mode *drm_mode_create(struct drm_device *dev)
73{
74 struct drm_display_mode *nmode;
75
76 nmode = kzalloc(sizeof(struct drm_display_mode), GFP_KERNEL);
77 if (!nmode)
78 return NULL;
79
80 if (drm_mode_object_get(dev, &nmode->base, DRM_MODE_OBJECT_MODE)) {
81 kfree(nmode);
82 return NULL;
83 }
84
85 return nmode;
86}
87EXPORT_SYMBOL(drm_mode_create);
88
89/**
90 * drm_mode_destroy - remove a mode
91 * @dev: DRM device
92 * @mode: mode to remove
93 *
94 * Release @mode's unique ID, then free it @mode structure itself using kfree.
95 */
96void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode)
97{
98 if (!mode)
99 return;
100
101 drm_mode_object_put(dev, &mode->base);
102
103 kfree(mode);
104}
105EXPORT_SYMBOL(drm_mode_destroy);
106
107/**
108 * drm_mode_probed_add - add a mode to a connector's probed_mode list
109 * @connector: connector the new mode
110 * @mode: mode data
111 *
112 * Add @mode to @connector's probed_mode list for later use. This list should
113 * then in a second step get filtered and all the modes actually supported by
114 * the hardware moved to the @connector's modes list.
115 */
116void drm_mode_probed_add(struct drm_connector *connector,
117 struct drm_display_mode *mode)
118{
119 WARN_ON(!mutex_is_locked(&connector->dev->mode_config.mutex));
120
121 list_add_tail(&mode->head, &connector->probed_modes);
122}
123EXPORT_SYMBOL(drm_mode_probed_add);
124
125/**
126 * drm_cvt_mode -create a modeline based on the CVT algorithm
127 * @dev: drm device
128 * @hdisplay: hdisplay size
129 * @vdisplay: vdisplay size
130 * @vrefresh: vrefresh rate
131 * @reduced: whether to use reduced blanking
132 * @interlaced: whether to compute an interlaced mode
133 * @margins: whether to add margins (borders)
76 * 134 *
77 * This function is called to generate the modeline based on CVT algorithm 135 * This function is called to generate the modeline based on CVT algorithm
78 * according to the hdisplay, vdisplay, vrefresh. 136 * according to the hdisplay, vdisplay, vrefresh.
@@ -82,12 +140,17 @@ EXPORT_SYMBOL(drm_mode_debug_printmodeline);
82 * 140 *
83 * And it is copied from xf86CVTmode in xserver/hw/xfree86/modes/xf86cvt.c. 141 * And it is copied from xf86CVTmode in xserver/hw/xfree86/modes/xf86cvt.c.
84 * What I have done is to translate it by using integer calculation. 142 * What I have done is to translate it by using integer calculation.
143 *
144 * Returns:
145 * The modeline based on the CVT algorithm stored in a drm_display_mode object.
146 * The display mode object is allocated with drm_mode_create(). Returns NULL
147 * when no mode could be allocated.
85 */ 148 */
86#define HV_FACTOR 1000
87struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay, 149struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay,
88 int vdisplay, int vrefresh, 150 int vdisplay, int vrefresh,
89 bool reduced, bool interlaced, bool margins) 151 bool reduced, bool interlaced, bool margins)
90{ 152{
153#define HV_FACTOR 1000
91 /* 1) top/bottom margin size (% of height) - default: 1.8, */ 154 /* 1) top/bottom margin size (% of height) - default: 1.8, */
92#define CVT_MARGIN_PERCENTAGE 18 155#define CVT_MARGIN_PERCENTAGE 18
93 /* 2) character cell horizontal granularity (pixels) - default 8 */ 156 /* 2) character cell horizontal granularity (pixels) - default 8 */
@@ -281,23 +344,25 @@ struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay,
281EXPORT_SYMBOL(drm_cvt_mode); 344EXPORT_SYMBOL(drm_cvt_mode);
282 345
283/** 346/**
284 * drm_gtf_mode_complex - create the modeline based on full GTF algorithm 347 * drm_gtf_mode_complex - create the modeline based on the full GTF algorithm
285 * 348 * @dev: drm device
286 * @dev :drm device 349 * @hdisplay: hdisplay size
287 * @hdisplay :hdisplay size 350 * @vdisplay: vdisplay size
288 * @vdisplay :vdisplay size 351 * @vrefresh: vrefresh rate.
289 * @vrefresh :vrefresh rate. 352 * @interlaced: whether to compute an interlaced mode
290 * @interlaced :whether the interlace is supported 353 * @margins: desired margin (borders) size
291 * @margins :desired margin size 354 * @GTF_M: extended GTF formula parameters
292 * @GTF_[MCKJ] :extended GTF formula parameters 355 * @GTF_2C: extended GTF formula parameters
293 * 356 * @GTF_K: extended GTF formula parameters
294 * LOCKING. 357 * @GTF_2J: extended GTF formula parameters
295 * none.
296 *
297 * return the modeline based on full GTF algorithm.
298 * 358 *
299 * GTF feature blocks specify C and J in multiples of 0.5, so we pass them 359 * GTF feature blocks specify C and J in multiples of 0.5, so we pass them
300 * in here multiplied by two. For a C of 40, pass in 80. 360 * in here multiplied by two. For a C of 40, pass in 80.
361 *
362 * Returns:
363 * The modeline based on the full GTF algorithm stored in a drm_display_mode object.
364 * The display mode object is allocated with drm_mode_create(). Returns NULL
365 * when no mode could be allocated.
301 */ 366 */
302struct drm_display_mode * 367struct drm_display_mode *
303drm_gtf_mode_complex(struct drm_device *dev, int hdisplay, int vdisplay, 368drm_gtf_mode_complex(struct drm_device *dev, int hdisplay, int vdisplay,
@@ -467,17 +532,13 @@ drm_gtf_mode_complex(struct drm_device *dev, int hdisplay, int vdisplay,
467EXPORT_SYMBOL(drm_gtf_mode_complex); 532EXPORT_SYMBOL(drm_gtf_mode_complex);
468 533
469/** 534/**
470 * drm_gtf_mode - create the modeline based on GTF algorithm 535 * drm_gtf_mode - create the modeline based on the GTF algorithm
471 * 536 * @dev: drm device
472 * @dev :drm device 537 * @hdisplay: hdisplay size
473 * @hdisplay :hdisplay size 538 * @vdisplay: vdisplay size
474 * @vdisplay :vdisplay size 539 * @vrefresh: vrefresh rate.
475 * @vrefresh :vrefresh rate. 540 * @interlaced: whether to compute an interlaced mode
476 * @interlaced :whether the interlace is supported 541 * @margins: desired margin (borders) size
477 * @margins :whether the margin is supported
478 *
479 * LOCKING.
480 * none.
481 * 542 *
482 * return the modeline based on GTF algorithm 543 * return the modeline based on GTF algorithm
483 * 544 *
@@ -496,19 +557,32 @@ EXPORT_SYMBOL(drm_gtf_mode_complex);
496 * C = 40 557 * C = 40
497 * K = 128 558 * K = 128
498 * J = 20 559 * J = 20
560 *
561 * Returns:
562 * The modeline based on the GTF algorithm stored in a drm_display_mode object.
563 * The display mode object is allocated with drm_mode_create(). Returns NULL
564 * when no mode could be allocated.
499 */ 565 */
500struct drm_display_mode * 566struct drm_display_mode *
501drm_gtf_mode(struct drm_device *dev, int hdisplay, int vdisplay, int vrefresh, 567drm_gtf_mode(struct drm_device *dev, int hdisplay, int vdisplay, int vrefresh,
502 bool lace, int margins) 568 bool interlaced, int margins)
503{ 569{
504 return drm_gtf_mode_complex(dev, hdisplay, vdisplay, vrefresh, lace, 570 return drm_gtf_mode_complex(dev, hdisplay, vdisplay, vrefresh,
505 margins, 600, 40 * 2, 128, 20 * 2); 571 interlaced, margins,
572 600, 40 * 2, 128, 20 * 2);
506} 573}
507EXPORT_SYMBOL(drm_gtf_mode); 574EXPORT_SYMBOL(drm_gtf_mode);
508 575
509#ifdef CONFIG_VIDEOMODE_HELPERS 576#ifdef CONFIG_VIDEOMODE_HELPERS
510int drm_display_mode_from_videomode(const struct videomode *vm, 577/**
511 struct drm_display_mode *dmode) 578 * drm_display_mode_from_videomode - fill in @dmode using @vm,
579 * @vm: videomode structure to use as source
580 * @dmode: drm_display_mode structure to use as destination
581 *
582 * Fills out @dmode using the display mode specified in @vm.
583 */
584void drm_display_mode_from_videomode(const struct videomode *vm,
585 struct drm_display_mode *dmode)
512{ 586{
513 dmode->hdisplay = vm->hactive; 587 dmode->hdisplay = vm->hactive;
514 dmode->hsync_start = dmode->hdisplay + vm->hfront_porch; 588 dmode->hsync_start = dmode->hdisplay + vm->hfront_porch;
@@ -538,8 +612,6 @@ int drm_display_mode_from_videomode(const struct videomode *vm,
538 if (vm->flags & DISPLAY_FLAGS_DOUBLECLK) 612 if (vm->flags & DISPLAY_FLAGS_DOUBLECLK)
539 dmode->flags |= DRM_MODE_FLAG_DBLCLK; 613 dmode->flags |= DRM_MODE_FLAG_DBLCLK;
540 drm_mode_set_name(dmode); 614 drm_mode_set_name(dmode);
541
542 return 0;
543} 615}
544EXPORT_SYMBOL_GPL(drm_display_mode_from_videomode); 616EXPORT_SYMBOL_GPL(drm_display_mode_from_videomode);
545 617
@@ -553,6 +625,9 @@ EXPORT_SYMBOL_GPL(drm_display_mode_from_videomode);
553 * This function is expensive and should only be used, if only one mode is to be 625 * This function is expensive and should only be used, if only one mode is to be
554 * read from DT. To get multiple modes start with of_get_display_timings and 626 * read from DT. To get multiple modes start with of_get_display_timings and
555 * work with that instead. 627 * work with that instead.
628 *
629 * Returns:
630 * 0 on success, a negative errno code when no of videomode node was found.
556 */ 631 */
557int of_get_drm_display_mode(struct device_node *np, 632int of_get_drm_display_mode(struct device_node *np,
558 struct drm_display_mode *dmode, int index) 633 struct drm_display_mode *dmode, int index)
@@ -580,10 +655,8 @@ EXPORT_SYMBOL_GPL(of_get_drm_display_mode);
580 * drm_mode_set_name - set the name on a mode 655 * drm_mode_set_name - set the name on a mode
581 * @mode: name will be set in this mode 656 * @mode: name will be set in this mode
582 * 657 *
583 * LOCKING: 658 * Set the name of @mode to a standard format which is <hdisplay>x<vdisplay>
584 * None. 659 * with an optional 'i' suffix for interlaced modes.
585 *
586 * Set the name of @mode to a standard format.
587 */ 660 */
588void drm_mode_set_name(struct drm_display_mode *mode) 661void drm_mode_set_name(struct drm_display_mode *mode)
589{ 662{
@@ -595,54 +668,12 @@ void drm_mode_set_name(struct drm_display_mode *mode)
595} 668}
596EXPORT_SYMBOL(drm_mode_set_name); 669EXPORT_SYMBOL(drm_mode_set_name);
597 670
598/**
599 * drm_mode_width - get the width of a mode
600 * @mode: mode
601 *
602 * LOCKING:
603 * None.
604 *
605 * Return @mode's width (hdisplay) value.
606 *
607 * FIXME: is this needed?
608 *
609 * RETURNS:
610 * @mode->hdisplay
611 */
612int drm_mode_width(const struct drm_display_mode *mode)
613{
614 return mode->hdisplay;
615
616}
617EXPORT_SYMBOL(drm_mode_width);
618
619/**
620 * drm_mode_height - get the height of a mode
621 * @mode: mode
622 *
623 * LOCKING:
624 * None.
625 *
626 * Return @mode's height (vdisplay) value.
627 *
628 * FIXME: is this needed?
629 *
630 * RETURNS:
631 * @mode->vdisplay
632 */
633int drm_mode_height(const struct drm_display_mode *mode)
634{
635 return mode->vdisplay;
636}
637EXPORT_SYMBOL(drm_mode_height);
638
639/** drm_mode_hsync - get the hsync of a mode 671/** drm_mode_hsync - get the hsync of a mode
640 * @mode: mode 672 * @mode: mode
641 * 673 *
642 * LOCKING: 674 * Returns:
643 * None. 675 * @modes's hsync rate in kHz, rounded to the nearest integer. Calculates the
644 * 676 * value first if it is not yet set.
645 * Return @modes's hsync rate in kHz, rounded to the nearest int.
646 */ 677 */
647int drm_mode_hsync(const struct drm_display_mode *mode) 678int drm_mode_hsync(const struct drm_display_mode *mode)
648{ 679{
@@ -666,17 +697,9 @@ EXPORT_SYMBOL(drm_mode_hsync);
666 * drm_mode_vrefresh - get the vrefresh of a mode 697 * drm_mode_vrefresh - get the vrefresh of a mode
667 * @mode: mode 698 * @mode: mode
668 * 699 *
669 * LOCKING: 700 * Returns:
670 * None. 701 * @modes's vrefresh rate in Hz, rounded to the nearest integer. Calculates the
671 * 702 * value first if it is not yet set.
672 * Return @mode's vrefresh rate in Hz or calculate it if necessary.
673 *
674 * FIXME: why is this needed? shouldn't vrefresh be set already?
675 *
676 * RETURNS:
677 * Vertical refresh rate. It will be the result of actual value plus 0.5.
678 * If it is 70.288, it will return 70Hz.
679 * If it is 59.6, it will return 60Hz.
680 */ 703 */
681int drm_mode_vrefresh(const struct drm_display_mode *mode) 704int drm_mode_vrefresh(const struct drm_display_mode *mode)
682{ 705{
@@ -705,14 +728,11 @@ int drm_mode_vrefresh(const struct drm_display_mode *mode)
705EXPORT_SYMBOL(drm_mode_vrefresh); 728EXPORT_SYMBOL(drm_mode_vrefresh);
706 729
707/** 730/**
708 * drm_mode_set_crtcinfo - set CRTC modesetting parameters 731 * drm_mode_set_crtcinfo - set CRTC modesetting timing parameters
709 * @p: mode 732 * @p: mode
710 * @adjust_flags: a combination of adjustment flags 733 * @adjust_flags: a combination of adjustment flags
711 * 734 *
712 * LOCKING: 735 * Setup the CRTC modesetting timing parameters for @p, adjusting if necessary.
713 * None.
714 *
715 * Setup the CRTC modesetting parameters for @p, adjusting if necessary.
716 * 736 *
717 * - The CRTC_INTERLACE_HALVE_V flag can be used to halve vertical timings of 737 * - The CRTC_INTERLACE_HALVE_V flag can be used to halve vertical timings of
718 * interlaced modes. 738 * interlaced modes.
@@ -780,15 +800,11 @@ void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags)
780} 800}
781EXPORT_SYMBOL(drm_mode_set_crtcinfo); 801EXPORT_SYMBOL(drm_mode_set_crtcinfo);
782 802
783
784/** 803/**
785 * drm_mode_copy - copy the mode 804 * drm_mode_copy - copy the mode
786 * @dst: mode to overwrite 805 * @dst: mode to overwrite
787 * @src: mode to copy 806 * @src: mode to copy
788 * 807 *
789 * LOCKING:
790 * None.
791 *
792 * Copy an existing mode into another mode, preserving the object id and 808 * Copy an existing mode into another mode, preserving the object id and
793 * list head of the destination mode. 809 * list head of the destination mode.
794 */ 810 */
@@ -805,13 +821,14 @@ EXPORT_SYMBOL(drm_mode_copy);
805 821
806/** 822/**
807 * drm_mode_duplicate - allocate and duplicate an existing mode 823 * drm_mode_duplicate - allocate and duplicate an existing mode
808 * @m: mode to duplicate 824 * @dev: drm_device to allocate the duplicated mode for
809 * 825 * @mode: mode to duplicate
810 * LOCKING:
811 * None.
812 * 826 *
813 * Just allocate a new mode, copy the existing mode into it, and return 827 * Just allocate a new mode, copy the existing mode into it, and return
814 * a pointer to it. Used to create new instances of established modes. 828 * a pointer to it. Used to create new instances of established modes.
829 *
830 * Returns:
831 * Pointer to duplicated mode on success, NULL on error.
815 */ 832 */
816struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev, 833struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev,
817 const struct drm_display_mode *mode) 834 const struct drm_display_mode *mode)
@@ -833,12 +850,9 @@ EXPORT_SYMBOL(drm_mode_duplicate);
833 * @mode1: first mode 850 * @mode1: first mode
834 * @mode2: second mode 851 * @mode2: second mode
835 * 852 *
836 * LOCKING:
837 * None.
838 *
839 * Check to see if @mode1 and @mode2 are equivalent. 853 * Check to see if @mode1 and @mode2 are equivalent.
840 * 854 *
841 * RETURNS: 855 * Returns:
842 * True if the modes are equal, false otherwise. 856 * True if the modes are equal, false otherwise.
843 */ 857 */
844bool drm_mode_equal(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2) 858bool drm_mode_equal(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2)
@@ -864,13 +878,10 @@ EXPORT_SYMBOL(drm_mode_equal);
864 * @mode1: first mode 878 * @mode1: first mode
865 * @mode2: second mode 879 * @mode2: second mode
866 * 880 *
867 * LOCKING:
868 * None.
869 *
870 * Check to see if @mode1 and @mode2 are equivalent, but 881 * Check to see if @mode1 and @mode2 are equivalent, but
871 * don't check the pixel clocks nor the stereo layout. 882 * don't check the pixel clocks nor the stereo layout.
872 * 883 *
873 * RETURNS: 884 * Returns:
874 * True if the modes are equal, false otherwise. 885 * True if the modes are equal, false otherwise.
875 */ 886 */
876bool drm_mode_equal_no_clocks_no_stereo(const struct drm_display_mode *mode1, 887bool drm_mode_equal_no_clocks_no_stereo(const struct drm_display_mode *mode1,
@@ -900,25 +911,19 @@ EXPORT_SYMBOL(drm_mode_equal_no_clocks_no_stereo);
900 * @mode_list: list of modes to check 911 * @mode_list: list of modes to check
901 * @maxX: maximum width 912 * @maxX: maximum width
902 * @maxY: maximum height 913 * @maxY: maximum height
903 * @maxPitch: max pitch
904 * 914 *
905 * LOCKING: 915 * This function is a helper which can be used to validate modes against size
906 * Caller must hold a lock protecting @mode_list. 916 * limitations of the DRM device/connector. If a mode is too big its status
907 * 917 * memeber is updated with the appropriate validation failure code. The list
908 * The DRM device (@dev) has size and pitch limits. Here we validate the 918 * itself is not changed.
909 * modes we probed for @dev against those limits and set their status as
910 * necessary.
911 */ 919 */
912void drm_mode_validate_size(struct drm_device *dev, 920void drm_mode_validate_size(struct drm_device *dev,
913 struct list_head *mode_list, 921 struct list_head *mode_list,
914 int maxX, int maxY, int maxPitch) 922 int maxX, int maxY)
915{ 923{
916 struct drm_display_mode *mode; 924 struct drm_display_mode *mode;
917 925
918 list_for_each_entry(mode, mode_list, head) { 926 list_for_each_entry(mode, mode_list, head) {
919 if (maxPitch > 0 && mode->hdisplay > maxPitch)
920 mode->status = MODE_BAD_WIDTH;
921
922 if (maxX > 0 && mode->hdisplay > maxX) 927 if (maxX > 0 && mode->hdisplay > maxX)
923 mode->status = MODE_VIRTUAL_X; 928 mode->status = MODE_VIRTUAL_X;
924 929
@@ -934,12 +939,10 @@ EXPORT_SYMBOL(drm_mode_validate_size);
934 * @mode_list: list of modes to check 939 * @mode_list: list of modes to check
935 * @verbose: be verbose about it 940 * @verbose: be verbose about it
936 * 941 *
937 * LOCKING: 942 * This helper function can be used to prune a display mode list after
938 * Caller must hold a lock protecting @mode_list. 943 * validation has been completed. All modes who's status is not MODE_OK will be
939 * 944 * removed from the list, and if @verbose the status code and mode name is also
940 * Once mode list generation is complete, a caller can use this routine to 945 * printed to dmesg.
941 * remove invalid modes from a mode list. If any of the modes have a
942 * status other than %MODE_OK, they are removed from @mode_list and freed.
943 */ 946 */
944void drm_mode_prune_invalid(struct drm_device *dev, 947void drm_mode_prune_invalid(struct drm_device *dev,
945 struct list_head *mode_list, bool verbose) 948 struct list_head *mode_list, bool verbose)
@@ -966,13 +969,10 @@ EXPORT_SYMBOL(drm_mode_prune_invalid);
966 * @lh_a: list_head for first mode 969 * @lh_a: list_head for first mode
967 * @lh_b: list_head for second mode 970 * @lh_b: list_head for second mode
968 * 971 *
969 * LOCKING:
970 * None.
971 *
972 * Compare two modes, given by @lh_a and @lh_b, returning a value indicating 972 * Compare two modes, given by @lh_a and @lh_b, returning a value indicating
973 * which is better. 973 * which is better.
974 * 974 *
975 * RETURNS: 975 * Returns:
976 * Negative if @lh_a is better than @lh_b, zero if they're equivalent, or 976 * Negative if @lh_a is better than @lh_b, zero if they're equivalent, or
977 * positive if @lh_b is better than @lh_a. 977 * positive if @lh_b is better than @lh_a.
978 */ 978 */
@@ -1000,12 +1000,9 @@ static int drm_mode_compare(void *priv, struct list_head *lh_a, struct list_head
1000 1000
1001/** 1001/**
1002 * drm_mode_sort - sort mode list 1002 * drm_mode_sort - sort mode list
1003 * @mode_list: list to sort 1003 * @mode_list: list of drm_display_mode structures to sort
1004 * 1004 *
1005 * LOCKING: 1005 * Sort @mode_list by favorability, moving good modes to the head of the list.
1006 * Caller must hold a lock protecting @mode_list.
1007 *
1008 * Sort @mode_list by favorability, putting good modes first.
1009 */ 1006 */
1010void drm_mode_sort(struct list_head *mode_list) 1007void drm_mode_sort(struct list_head *mode_list)
1011{ 1008{
@@ -1017,13 +1014,12 @@ EXPORT_SYMBOL(drm_mode_sort);
1017 * drm_mode_connector_list_update - update the mode list for the connector 1014 * drm_mode_connector_list_update - update the mode list for the connector
1018 * @connector: the connector to update 1015 * @connector: the connector to update
1019 * 1016 *
1020 * LOCKING:
1021 * Caller must hold a lock protecting @mode_list.
1022 *
1023 * This moves the modes from the @connector probed_modes list 1017 * This moves the modes from the @connector probed_modes list
1024 * to the actual mode list. It compares the probed mode against the current 1018 * to the actual mode list. It compares the probed mode against the current
1025 * list and only adds different modes. All modes unverified after this point 1019 * list and only adds different/new modes.
1026 * will be removed by the prune invalid modes. 1020 *
1021 * This is just a helper functions doesn't validate any modes itself and also
1022 * doesn't prune any invalid modes. Callers need to do that themselves.
1027 */ 1023 */
1028void drm_mode_connector_list_update(struct drm_connector *connector) 1024void drm_mode_connector_list_update(struct drm_connector *connector)
1029{ 1025{
@@ -1031,6 +1027,8 @@ void drm_mode_connector_list_update(struct drm_connector *connector)
1031 struct drm_display_mode *pmode, *pt; 1027 struct drm_display_mode *pmode, *pt;
1032 int found_it; 1028 int found_it;
1033 1029
1030 WARN_ON(!mutex_is_locked(&connector->dev->mode_config.mutex));
1031
1034 list_for_each_entry_safe(pmode, pt, &connector->probed_modes, 1032 list_for_each_entry_safe(pmode, pt, &connector->probed_modes,
1035 head) { 1033 head) {
1036 found_it = 0; 1034 found_it = 0;
@@ -1056,17 +1054,25 @@ void drm_mode_connector_list_update(struct drm_connector *connector)
1056EXPORT_SYMBOL(drm_mode_connector_list_update); 1054EXPORT_SYMBOL(drm_mode_connector_list_update);
1057 1055
1058/** 1056/**
1059 * drm_mode_parse_command_line_for_connector - parse command line for connector 1057 * drm_mode_parse_command_line_for_connector - parse command line modeline for connector
1060 * @mode_option - per connector mode option 1058 * @mode_option: optional per connector mode option
1061 * @connector - connector to parse line for 1059 * @connector: connector to parse modeline for
1060 * @mode: preallocated drm_cmdline_mode structure to fill out
1061 *
1062 * This parses @mode_option command line modeline for modes and options to
1063 * configure the connector. If @mode_option is NULL the default command line
1064 * modeline in fb_mode_option will be parsed instead.
1062 * 1065 *
1063 * This parses the connector specific then generic command lines for 1066 * This uses the same parameters as the fb modedb.c, except for an extra
1064 * modes and options to configure the connector. 1067 * force-enable, force-enable-digital and force-disable bit at the end:
1065 * 1068 *
1066 * This uses the same parameters as the fb modedb.c, except for extra
1067 * <xres>x<yres>[M][R][-<bpp>][@<refresh>][i][m][eDd] 1069 * <xres>x<yres>[M][R][-<bpp>][@<refresh>][i][m][eDd]
1068 * 1070 *
1069 * enable/enable Digital/disable bit at the end 1071 * The intermediate drm_cmdline_mode structure is required to store additional
1072 * options from the command line modline like the force-enabel/disable flag.
1073 *
1074 * Returns:
1075 * True if a valid modeline has been parsed, false otherwise.
1070 */ 1076 */
1071bool drm_mode_parse_command_line_for_connector(const char *mode_option, 1077bool drm_mode_parse_command_line_for_connector(const char *mode_option,
1072 struct drm_connector *connector, 1078 struct drm_connector *connector,
@@ -1219,6 +1225,14 @@ done:
1219} 1225}
1220EXPORT_SYMBOL(drm_mode_parse_command_line_for_connector); 1226EXPORT_SYMBOL(drm_mode_parse_command_line_for_connector);
1221 1227
1228/**
1229 * drm_mode_create_from_cmdline_mode - convert a command line modeline into a DRM display mode
1230 * @dev: DRM device to create the new mode for
1231 * @cmd: input command line modeline
1232 *
1233 * Returns:
1234 * Pointer to converted mode on success, NULL on error.
1235 */
1222struct drm_display_mode * 1236struct drm_display_mode *
1223drm_mode_create_from_cmdline_mode(struct drm_device *dev, 1237drm_mode_create_from_cmdline_mode(struct drm_device *dev,
1224 struct drm_cmdline_mode *cmd) 1238 struct drm_cmdline_mode *cmd)
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index f7af69bcf3f4..9c696a5ad74d 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -351,7 +351,7 @@ err_agp:
351 drm_pci_agp_destroy(dev); 351 drm_pci_agp_destroy(dev);
352 pci_disable_device(pdev); 352 pci_disable_device(pdev);
353err_free: 353err_free:
354 drm_dev_free(dev); 354 drm_dev_unref(dev);
355 return ret; 355 return ret;
356} 356}
357EXPORT_SYMBOL(drm_get_pci_dev); 357EXPORT_SYMBOL(drm_get_pci_dev);
diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c
new file mode 100644
index 000000000000..e768d35ff22e
--- /dev/null
+++ b/drivers/gpu/drm/drm_plane_helper.c
@@ -0,0 +1,333 @@
1/*
2 * Copyright (C) 2014 Intel Corporation
3 *
4 * DRM universal plane helper functions
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 * SOFTWARE.
24 */
25
26#include <linux/list.h>
27#include <drm/drmP.h>
28#include <drm/drm_rect.h>
29
30#define SUBPIXEL_MASK 0xffff
31
32/*
33 * This is the minimal list of formats that seem to be safe for modeset use
34 * with all current DRM drivers. Most hardware can actually support more
35 * formats than this and drivers may specify a more accurate list when
36 * creating the primary plane. However drivers that still call
37 * drm_plane_init() will use this minimal format list as the default.
38 */
39const static uint32_t safe_modeset_formats[] = {
40 DRM_FORMAT_XRGB8888,
41 DRM_FORMAT_ARGB8888,
42};
43
44/*
45 * Returns the connectors currently associated with a CRTC. This function
46 * should be called twice: once with a NULL connector list to retrieve
47 * the list size, and once with the properly allocated list to be filled in.
48 */
49static int get_connectors_for_crtc(struct drm_crtc *crtc,
50 struct drm_connector **connector_list,
51 int num_connectors)
52{
53 struct drm_device *dev = crtc->dev;
54 struct drm_connector *connector;
55 int count = 0;
56
57 list_for_each_entry(connector, &dev->mode_config.connector_list, head)
58 if (connector->encoder && connector->encoder->crtc == crtc) {
59 if (connector_list != NULL && count < num_connectors)
60 *(connector_list++) = connector;
61
62 count++;
63 }
64
65 return count;
66}
67
68/**
69 * drm_primary_helper_update() - Helper for primary plane update
70 * @plane: plane object to update
71 * @crtc: owning CRTC of owning plane
72 * @fb: framebuffer to flip onto plane
73 * @crtc_x: x offset of primary plane on crtc
74 * @crtc_y: y offset of primary plane on crtc
75 * @crtc_w: width of primary plane rectangle on crtc
76 * @crtc_h: height of primary plane rectangle on crtc
77 * @src_x: x offset of @fb for panning
78 * @src_y: y offset of @fb for panning
79 * @src_w: width of source rectangle in @fb
80 * @src_h: height of source rectangle in @fb
81 *
82 * Provides a default plane update handler for primary planes. This is handler
83 * is called in response to a userspace SetPlane operation on the plane with a
84 * non-NULL framebuffer. We call the driver's modeset handler to update the
85 * framebuffer.
86 *
87 * SetPlane() on a primary plane of a disabled CRTC is not supported, and will
88 * return an error.
89 *
90 * Note that we make some assumptions about hardware limitations that may not be
91 * true for all hardware --
92 * 1) Primary plane cannot be repositioned.
93 * 2) Primary plane cannot be scaled.
94 * 3) Primary plane must cover the entire CRTC.
95 * 4) Subpixel positioning is not supported.
96 * Drivers for hardware that don't have these restrictions can provide their
97 * own implementation rather than using this helper.
98 *
99 * RETURNS:
100 * Zero on success, error code on failure
101 */
102int drm_primary_helper_update(struct drm_plane *plane, struct drm_crtc *crtc,
103 struct drm_framebuffer *fb,
104 int crtc_x, int crtc_y,
105 unsigned int crtc_w, unsigned int crtc_h,
106 uint32_t src_x, uint32_t src_y,
107 uint32_t src_w, uint32_t src_h)
108{
109 struct drm_mode_set set = {
110 .crtc = crtc,
111 .fb = fb,
112 .mode = &crtc->mode,
113 .x = src_x >> 16,
114 .y = src_y >> 16,
115 };
116 struct drm_rect dest = {
117 .x1 = crtc_x,
118 .y1 = crtc_y,
119 .x2 = crtc_x + crtc_w,
120 .y2 = crtc_y + crtc_h,
121 };
122 struct drm_rect clip = {
123 .x2 = crtc->mode.hdisplay,
124 .y2 = crtc->mode.vdisplay,
125 };
126 struct drm_connector **connector_list;
127 struct drm_framebuffer *tmpfb;
128 int num_connectors, ret;
129
130 if (!crtc->enabled) {
131 DRM_DEBUG_KMS("Cannot update primary plane of a disabled CRTC.\n");
132 return -EINVAL;
133 }
134
135 /* Disallow subpixel positioning */
136 if ((src_x | src_y | src_w | src_h) & SUBPIXEL_MASK) {
137 DRM_DEBUG_KMS("Primary plane does not support subpixel positioning\n");
138 return -EINVAL;
139 }
140
141 /* Primary planes are locked to their owning CRTC */
142 if (plane->possible_crtcs != drm_crtc_mask(crtc)) {
143 DRM_DEBUG_KMS("Cannot change primary plane CRTC\n");
144 return -EINVAL;
145 }
146
147 /* Disallow scaling */
148 if (crtc_w != src_w || crtc_h != src_h) {
149 DRM_DEBUG_KMS("Can't scale primary plane\n");
150 return -EINVAL;
151 }
152
153 /* Make sure primary plane covers entire CRTC */
154 drm_rect_intersect(&dest, &clip);
155 if (dest.x1 != 0 || dest.y1 != 0 ||
156 dest.x2 != crtc->mode.hdisplay || dest.y2 != crtc->mode.vdisplay) {
157 DRM_DEBUG_KMS("Primary plane must cover entire CRTC\n");
158 return -EINVAL;
159 }
160
161 /* Framebuffer must be big enough to cover entire plane */
162 ret = drm_crtc_check_viewport(crtc, crtc_x, crtc_y, &crtc->mode, fb);
163 if (ret)
164 return ret;
165
166 /* Find current connectors for CRTC */
167 num_connectors = get_connectors_for_crtc(crtc, NULL, 0);
168 BUG_ON(num_connectors == 0);
169 connector_list = kzalloc(num_connectors * sizeof(*connector_list),
170 GFP_KERNEL);
171 if (!connector_list)
172 return -ENOMEM;
173 get_connectors_for_crtc(crtc, connector_list, num_connectors);
174
175 set.connectors = connector_list;
176 set.num_connectors = num_connectors;
177
178 /*
179 * set_config() adjusts crtc->primary->fb; however the DRM setplane
180 * code that called us expects to handle the framebuffer update and
181 * reference counting; save and restore the current fb before
182 * calling it.
183 *
184 * N.B., we call set_config() directly here rather than using
185 * drm_mode_set_config_internal. We're reprogramming the same
186 * connectors that were already in use, so we shouldn't need the extra
187 * cross-CRTC fb refcounting to accomodate stealing connectors.
188 * drm_mode_setplane() already handles the basic refcounting for the
189 * framebuffers involved in this operation.
190 */
191 tmpfb = plane->fb;
192 ret = crtc->funcs->set_config(&set);
193 plane->fb = tmpfb;
194
195 kfree(connector_list);
196 return ret;
197}
198EXPORT_SYMBOL(drm_primary_helper_update);
199
200/**
201 * drm_primary_helper_disable() - Helper for primary plane disable
202 * @plane: plane to disable
203 *
204 * Provides a default plane disable handler for primary planes. This is handler
205 * is called in response to a userspace SetPlane operation on the plane with a
206 * NULL framebuffer parameter. We call the driver's modeset handler with a NULL
207 * framebuffer to disable the CRTC if no other planes are currently enabled.
208 * If other planes are still enabled on the same CRTC, we return -EBUSY.
209 *
210 * Note that some hardware may be able to disable the primary plane without
211 * disabling the whole CRTC. Drivers for such hardware should provide their
212 * own disable handler that disables just the primary plane (and they'll likely
213 * need to provide their own update handler as well to properly re-enable a
214 * disabled primary plane).
215 *
216 * RETURNS:
217 * Zero on success, error code on failure
218 */
219int drm_primary_helper_disable(struct drm_plane *plane)
220{
221 struct drm_plane *p;
222 struct drm_mode_set set = {
223 .crtc = plane->crtc,
224 .fb = NULL,
225 };
226
227 if (plane->crtc == NULL || plane->fb == NULL)
228 /* Already disabled */
229 return 0;
230
231 list_for_each_entry(p, &plane->dev->mode_config.plane_list, head)
232 if (p != plane && p->fb) {
233 DRM_DEBUG_KMS("Cannot disable primary plane while other planes are still active on CRTC.\n");
234 return -EBUSY;
235 }
236
237 /*
238 * N.B. We call set_config() directly here rather than
239 * drm_mode_set_config_internal() since drm_mode_setplane() already
240 * handles the basic refcounting and we don't need the special
241 * cross-CRTC refcounting (no chance of stealing connectors from
242 * other CRTC's with this update).
243 */
244 return plane->crtc->funcs->set_config(&set);
245}
246EXPORT_SYMBOL(drm_primary_helper_disable);
247
248/**
249 * drm_primary_helper_destroy() - Helper for primary plane destruction
250 * @plane: plane to destroy
251 *
252 * Provides a default plane destroy handler for primary planes. This handler
253 * is called during CRTC destruction. We disable the primary plane, remove
254 * it from the DRM plane list, and deallocate the plane structure.
255 */
256void drm_primary_helper_destroy(struct drm_plane *plane)
257{
258 plane->funcs->disable_plane(plane);
259 drm_plane_cleanup(plane);
260 kfree(plane);
261}
262EXPORT_SYMBOL(drm_primary_helper_destroy);
263
264const struct drm_plane_funcs drm_primary_helper_funcs = {
265 .update_plane = drm_primary_helper_update,
266 .disable_plane = drm_primary_helper_disable,
267 .destroy = drm_primary_helper_destroy,
268};
269EXPORT_SYMBOL(drm_primary_helper_funcs);
270
271/**
272 * drm_primary_helper_create_plane() - Create a generic primary plane
273 * @dev: drm device
274 * @formats: pixel formats supported, or NULL for a default safe list
275 * @num_formats: size of @formats; ignored if @formats is NULL
276 *
277 * Allocates and initializes a primary plane that can be used with the primary
278 * plane helpers. Drivers that wish to use driver-specific plane structures or
279 * provide custom handler functions may perform their own allocation and
280 * initialization rather than calling this function.
281 */
282struct drm_plane *drm_primary_helper_create_plane(struct drm_device *dev,
283 const uint32_t *formats,
284 int num_formats)
285{
286 struct drm_plane *primary;
287 int ret;
288
289 primary = kzalloc(sizeof(*primary), GFP_KERNEL);
290 if (primary == NULL) {
291 DRM_DEBUG_KMS("Failed to allocate primary plane\n");
292 return NULL;
293 }
294
295 if (formats == NULL) {
296 formats = safe_modeset_formats;
297 num_formats = ARRAY_SIZE(safe_modeset_formats);
298 }
299
300 /* possible_crtc's will be filled in later by crtc_init */
301 ret = drm_plane_init(dev, primary, 0, &drm_primary_helper_funcs,
302 formats, num_formats,
303 DRM_PLANE_TYPE_PRIMARY);
304 if (ret) {
305 kfree(primary);
306 primary = NULL;
307 }
308
309 return primary;
310}
311EXPORT_SYMBOL(drm_primary_helper_create_plane);
312
313/**
314 * drm_crtc_init - Legacy CRTC initialization function
315 * @dev: DRM device
316 * @crtc: CRTC object to init
317 * @funcs: callbacks for the new CRTC
318 *
319 * Initialize a CRTC object with a default helper-provided primary plane and no
320 * cursor plane.
321 *
322 * Returns:
323 * Zero on success, error code on failure.
324 */
325int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
326 const struct drm_crtc_funcs *funcs)
327{
328 struct drm_plane *primary;
329
330 primary = drm_primary_helper_create_plane(dev, NULL, 0);
331 return drm_crtc_init_with_planes(dev, crtc, primary, NULL, funcs);
332}
333EXPORT_SYMBOL(drm_crtc_init);
diff --git a/drivers/gpu/drm/drm_platform.c b/drivers/gpu/drm/drm_platform.c
index 21fc82006b78..319ff5385601 100644
--- a/drivers/gpu/drm/drm_platform.c
+++ b/drivers/gpu/drm/drm_platform.c
@@ -64,7 +64,7 @@ static int drm_get_platform_dev(struct platform_device *platdev,
64 return 0; 64 return 0;
65 65
66err_free: 66err_free:
67 drm_dev_free(dev); 67 drm_dev_unref(dev);
68 return ret; 68 return ret;
69} 69}
70 70
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index bb516fdd195d..304ca8cacbc4 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -68,7 +68,8 @@ struct drm_prime_attachment {
68 enum dma_data_direction dir; 68 enum dma_data_direction dir;
69}; 69};
70 70
71static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle) 71static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv,
72 struct dma_buf *dma_buf, uint32_t handle)
72{ 73{
73 struct drm_prime_member *member; 74 struct drm_prime_member *member;
74 75
@@ -174,7 +175,7 @@ void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpr
174} 175}
175 176
176static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach, 177static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
177 enum dma_data_direction dir) 178 enum dma_data_direction dir)
178{ 179{
179 struct drm_prime_attachment *prime_attach = attach->priv; 180 struct drm_prime_attachment *prime_attach = attach->priv;
180 struct drm_gem_object *obj = attach->dmabuf->priv; 181 struct drm_gem_object *obj = attach->dmabuf->priv;
@@ -211,11 +212,19 @@ static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
211} 212}
212 213
213static void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach, 214static void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
214 struct sg_table *sgt, enum dma_data_direction dir) 215 struct sg_table *sgt,
216 enum dma_data_direction dir)
215{ 217{
216 /* nothing to be done here */ 218 /* nothing to be done here */
217} 219}
218 220
221/**
222 * drm_gem_dmabuf_release - dma_buf release implementation for GEM
223 * @dma_buf: buffer to be released
224 *
225 * Generic release function for dma_bufs exported as PRIME buffers. GEM drivers
226 * must use this in their dma_buf ops structure as the release callback.
227 */
219void drm_gem_dmabuf_release(struct dma_buf *dma_buf) 228void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
220{ 229{
221 struct drm_gem_object *obj = dma_buf->priv; 230 struct drm_gem_object *obj = dma_buf->priv;
@@ -242,30 +251,30 @@ static void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
242} 251}
243 252
244static void *drm_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, 253static void *drm_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
245 unsigned long page_num) 254 unsigned long page_num)
246{ 255{
247 return NULL; 256 return NULL;
248} 257}
249 258
250static void drm_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, 259static void drm_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
251 unsigned long page_num, void *addr) 260 unsigned long page_num, void *addr)
252{ 261{
253 262
254} 263}
255static void *drm_gem_dmabuf_kmap(struct dma_buf *dma_buf, 264static void *drm_gem_dmabuf_kmap(struct dma_buf *dma_buf,
256 unsigned long page_num) 265 unsigned long page_num)
257{ 266{
258 return NULL; 267 return NULL;
259} 268}
260 269
261static void drm_gem_dmabuf_kunmap(struct dma_buf *dma_buf, 270static void drm_gem_dmabuf_kunmap(struct dma_buf *dma_buf,
262 unsigned long page_num, void *addr) 271 unsigned long page_num, void *addr)
263{ 272{
264 273
265} 274}
266 275
267static int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, 276static int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf,
268 struct vm_area_struct *vma) 277 struct vm_area_struct *vma)
269{ 278{
270 struct drm_gem_object *obj = dma_buf->priv; 279 struct drm_gem_object *obj = dma_buf->priv;
271 struct drm_device *dev = obj->dev; 280 struct drm_device *dev = obj->dev;
@@ -315,6 +324,15 @@ static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = {
315 * driver's scatter/gather table 324 * driver's scatter/gather table
316 */ 325 */
317 326
327/**
328 * drm_gem_prime_export - helper library implemention of the export callback
329 * @dev: drm_device to export from
330 * @obj: GEM object to export
331 * @flags: flags like DRM_CLOEXEC
332 *
333 * This is the implementation of the gem_prime_export functions for GEM drivers
334 * using the PRIME helpers.
335 */
318struct dma_buf *drm_gem_prime_export(struct drm_device *dev, 336struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
319 struct drm_gem_object *obj, int flags) 337 struct drm_gem_object *obj, int flags)
320{ 338{
@@ -355,9 +373,23 @@ static struct dma_buf *export_and_register_object(struct drm_device *dev,
355 return dmabuf; 373 return dmabuf;
356} 374}
357 375
376/**
377 * drm_gem_prime_handle_to_fd - PRIME export function for GEM drivers
378 * @dev: dev to export the buffer from
379 * @file_priv: drm file-private structure
380 * @handle: buffer handle to export
381 * @flags: flags like DRM_CLOEXEC
382 * @prime_fd: pointer to storage for the fd id of the create dma-buf
383 *
384 * This is the PRIME export function which must be used mandatorily by GEM
385 * drivers to ensure correct lifetime management of the underlying GEM object.
386 * The actual exporting from GEM object to a dma-buf is done through the
387 * gem_prime_export driver callback.
388 */
358int drm_gem_prime_handle_to_fd(struct drm_device *dev, 389int drm_gem_prime_handle_to_fd(struct drm_device *dev,
359 struct drm_file *file_priv, uint32_t handle, uint32_t flags, 390 struct drm_file *file_priv, uint32_t handle,
360 int *prime_fd) 391 uint32_t flags,
392 int *prime_fd)
361{ 393{
362 struct drm_gem_object *obj; 394 struct drm_gem_object *obj;
363 int ret = 0; 395 int ret = 0;
@@ -441,6 +473,14 @@ out_unlock:
441} 473}
442EXPORT_SYMBOL(drm_gem_prime_handle_to_fd); 474EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
443 475
476/**
477 * drm_gem_prime_import - helper library implemention of the import callback
478 * @dev: drm_device to import into
479 * @dma_buf: dma-buf object to import
480 *
481 * This is the implementation of the gem_prime_import functions for GEM drivers
482 * using the PRIME helpers.
483 */
444struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev, 484struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
445 struct dma_buf *dma_buf) 485 struct dma_buf *dma_buf)
446{ 486{
@@ -496,8 +536,21 @@ fail_detach:
496} 536}
497EXPORT_SYMBOL(drm_gem_prime_import); 537EXPORT_SYMBOL(drm_gem_prime_import);
498 538
539/**
540 * drm_gem_prime_fd_to_handle - PRIME import function for GEM drivers
541 * @dev: dev to export the buffer from
542 * @file_priv: drm file-private structure
543 * @prime_fd: fd id of the dma-buf which should be imported
544 * @handle: pointer to storage for the handle of the imported buffer object
545 *
546 * This is the PRIME import function which must be used mandatorily by GEM
547 * drivers to ensure correct lifetime management of the underlying GEM object.
548 * The actual importing of GEM object from the dma-buf is done through the
549 * gem_import_export driver callback.
550 */
499int drm_gem_prime_fd_to_handle(struct drm_device *dev, 551int drm_gem_prime_fd_to_handle(struct drm_device *dev,
500 struct drm_file *file_priv, int prime_fd, uint32_t *handle) 552 struct drm_file *file_priv, int prime_fd,
553 uint32_t *handle)
501{ 554{
502 struct dma_buf *dma_buf; 555 struct dma_buf *dma_buf;
503 struct drm_gem_object *obj; 556 struct drm_gem_object *obj;
@@ -598,12 +651,14 @@ int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
598 args->fd, &args->handle); 651 args->fd, &args->handle);
599} 652}
600 653
601/* 654/**
602 * drm_prime_pages_to_sg 655 * drm_prime_pages_to_sg - converts a page array into an sg list
656 * @pages: pointer to the array of page pointers to convert
657 * @nr_pages: length of the page vector
603 * 658 *
604 * this helper creates an sg table object from a set of pages 659 * This helper creates an sg table object from a set of pages
605 * the driver is responsible for mapping the pages into the 660 * the driver is responsible for mapping the pages into the
606 * importers address space 661 * importers address space for use with dma_buf itself.
607 */ 662 */
608struct sg_table *drm_prime_pages_to_sg(struct page **pages, int nr_pages) 663struct sg_table *drm_prime_pages_to_sg(struct page **pages, int nr_pages)
609{ 664{
@@ -628,9 +683,16 @@ out:
628} 683}
629EXPORT_SYMBOL(drm_prime_pages_to_sg); 684EXPORT_SYMBOL(drm_prime_pages_to_sg);
630 685
631/* export an sg table into an array of pages and addresses 686/**
632 this is currently required by the TTM driver in order to do correct fault 687 * drm_prime_sg_to_page_addr_arrays - convert an sg table into a page array
633 handling */ 688 * @sgt: scatter-gather table to convert
689 * @pages: array of page pointers to store the page array in
690 * @addrs: optional array to store the dma bus address of each page
691 * @max_pages: size of both the passed-in arrays
692 *
693 * Exports an sg table into an array of pages and addresses. This is currently
694 * required by the TTM driver in order to do correct fault handling.
695 */
634int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages, 696int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
635 dma_addr_t *addrs, int max_pages) 697 dma_addr_t *addrs, int max_pages)
636{ 698{
@@ -663,7 +725,15 @@ int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
663 return 0; 725 return 0;
664} 726}
665EXPORT_SYMBOL(drm_prime_sg_to_page_addr_arrays); 727EXPORT_SYMBOL(drm_prime_sg_to_page_addr_arrays);
666/* helper function to cleanup a GEM/prime object */ 728
729/**
730 * drm_prime_gem_destroy - helper to clean up a PRIME-imported GEM object
731 * @obj: GEM object which was created from a dma-buf
732 * @sg: the sg-table which was pinned at import time
733 *
734 * This is the cleanup functions which GEM drivers need to call when they use
735 * @drm_gem_prime_import to import dma-bufs.
736 */
667void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg) 737void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg)
668{ 738{
669 struct dma_buf_attachment *attach; 739 struct dma_buf_attachment *attach;
@@ -683,11 +753,9 @@ void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv)
683 INIT_LIST_HEAD(&prime_fpriv->head); 753 INIT_LIST_HEAD(&prime_fpriv->head);
684 mutex_init(&prime_fpriv->lock); 754 mutex_init(&prime_fpriv->lock);
685} 755}
686EXPORT_SYMBOL(drm_prime_init_file_private);
687 756
688void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv) 757void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv)
689{ 758{
690 /* by now drm_gem_release should've made sure the list is empty */ 759 /* by now drm_gem_release should've made sure the list is empty */
691 WARN_ON(!list_empty(&prime_fpriv->head)); 760 WARN_ON(!list_empty(&prime_fpriv->head));
692} 761}
693EXPORT_SYMBOL(drm_prime_destroy_file_private);
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index 98a33c580ca1..4c24c3ac1efa 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -31,8 +31,10 @@
31 * DEALINGS IN THE SOFTWARE. 31 * DEALINGS IN THE SOFTWARE.
32 */ 32 */
33 33
34#include <linux/fs.h>
34#include <linux/module.h> 35#include <linux/module.h>
35#include <linux/moduleparam.h> 36#include <linux/moduleparam.h>
37#include <linux/mount.h>
36#include <linux/slab.h> 38#include <linux/slab.h>
37#include <drm/drmP.h> 39#include <drm/drmP.h>
38#include <drm/drm_core.h> 40#include <drm/drm_core.h>
@@ -43,6 +45,10 @@ EXPORT_SYMBOL(drm_debug);
43unsigned int drm_rnodes = 0; /* 1 to enable experimental render nodes API */ 45unsigned int drm_rnodes = 0; /* 1 to enable experimental render nodes API */
44EXPORT_SYMBOL(drm_rnodes); 46EXPORT_SYMBOL(drm_rnodes);
45 47
48/* 1 to allow user space to request universal planes (experimental) */
49unsigned int drm_universal_planes = 0;
50EXPORT_SYMBOL(drm_universal_planes);
51
46unsigned int drm_vblank_offdelay = 5000; /* Default to 5000 msecs. */ 52unsigned int drm_vblank_offdelay = 5000; /* Default to 5000 msecs. */
47EXPORT_SYMBOL(drm_vblank_offdelay); 53EXPORT_SYMBOL(drm_vblank_offdelay);
48 54
@@ -66,10 +72,12 @@ MODULE_PARM_DESC(timestamp_monotonic, "Use monotonic timestamps");
66 72
67module_param_named(debug, drm_debug, int, 0600); 73module_param_named(debug, drm_debug, int, 0600);
68module_param_named(rnodes, drm_rnodes, int, 0600); 74module_param_named(rnodes, drm_rnodes, int, 0600);
75module_param_named(universal_planes, drm_universal_planes, int, 0600);
69module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600); 76module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600);
70module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600); 77module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600);
71module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600); 78module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600);
72 79
80static DEFINE_SPINLOCK(drm_minor_lock);
73struct idr drm_minors_idr; 81struct idr drm_minors_idr;
74 82
75struct class *drm_class; 83struct class *drm_class;
@@ -94,48 +102,20 @@ int drm_err(const char *func, const char *format, ...)
94} 102}
95EXPORT_SYMBOL(drm_err); 103EXPORT_SYMBOL(drm_err);
96 104
97void drm_ut_debug_printk(unsigned int request_level, 105void drm_ut_debug_printk(const char *function_name, const char *format, ...)
98 const char *prefix,
99 const char *function_name,
100 const char *format, ...)
101{ 106{
102 struct va_format vaf; 107 struct va_format vaf;
103 va_list args; 108 va_list args;
104 109
105 if (drm_debug & request_level) { 110 va_start(args, format);
106 va_start(args, format); 111 vaf.fmt = format;
107 vaf.fmt = format; 112 vaf.va = &args;
108 vaf.va = &args;
109
110 if (function_name)
111 printk(KERN_DEBUG "[%s:%s], %pV", prefix,
112 function_name, &vaf);
113 else
114 printk(KERN_DEBUG "%pV", &vaf);
115 va_end(args);
116 }
117}
118EXPORT_SYMBOL(drm_ut_debug_printk);
119
120static int drm_minor_get_id(struct drm_device *dev, int type)
121{
122 int ret;
123 int base = 0, limit = 63;
124
125 if (type == DRM_MINOR_CONTROL) {
126 base += 64;
127 limit = base + 63;
128 } else if (type == DRM_MINOR_RENDER) {
129 base += 128;
130 limit = base + 63;
131 }
132 113
133 mutex_lock(&dev->struct_mutex); 114 printk(KERN_DEBUG "[" DRM_NAME ":%s] %pV", function_name, &vaf);
134 ret = idr_alloc(&drm_minors_idr, NULL, base, limit, GFP_KERNEL);
135 mutex_unlock(&dev->struct_mutex);
136 115
137 return ret == -ENOSPC ? -EINVAL : ret; 116 va_end(args);
138} 117}
118EXPORT_SYMBOL(drm_ut_debug_printk);
139 119
140struct drm_master *drm_master_create(struct drm_minor *minor) 120struct drm_master *drm_master_create(struct drm_minor *minor)
141{ 121{
@@ -152,8 +132,6 @@ struct drm_master *drm_master_create(struct drm_minor *minor)
152 INIT_LIST_HEAD(&master->magicfree); 132 INIT_LIST_HEAD(&master->magicfree);
153 master->minor = minor; 133 master->minor = minor;
154 134
155 list_add_tail(&master->head, &minor->master_list);
156
157 return master; 135 return master;
158} 136}
159 137
@@ -171,8 +149,7 @@ static void drm_master_destroy(struct kref *kref)
171 struct drm_device *dev = master->minor->dev; 149 struct drm_device *dev = master->minor->dev;
172 struct drm_map_list *r_list, *list_temp; 150 struct drm_map_list *r_list, *list_temp;
173 151
174 list_del(&master->head); 152 mutex_lock(&dev->struct_mutex);
175
176 if (dev->driver->master_destroy) 153 if (dev->driver->master_destroy)
177 dev->driver->master_destroy(dev, master); 154 dev->driver->master_destroy(dev, master);
178 155
@@ -200,6 +177,7 @@ static void drm_master_destroy(struct kref *kref)
200 177
201 drm_ht_remove(&master->magiclist); 178 drm_ht_remove(&master->magiclist);
202 179
180 mutex_unlock(&dev->struct_mutex);
203 kfree(master); 181 kfree(master);
204} 182}
205 183
@@ -215,19 +193,20 @@ int drm_setmaster_ioctl(struct drm_device *dev, void *data,
215{ 193{
216 int ret = 0; 194 int ret = 0;
217 195
196 mutex_lock(&dev->master_mutex);
218 if (file_priv->is_master) 197 if (file_priv->is_master)
219 return 0; 198 goto out_unlock;
220
221 if (file_priv->minor->master && file_priv->minor->master != file_priv->master)
222 return -EINVAL;
223 199
224 if (!file_priv->master) 200 if (file_priv->minor->master) {
225 return -EINVAL; 201 ret = -EINVAL;
202 goto out_unlock;
203 }
226 204
227 if (file_priv->minor->master) 205 if (!file_priv->master) {
228 return -EINVAL; 206 ret = -EINVAL;
207 goto out_unlock;
208 }
229 209
230 mutex_lock(&dev->struct_mutex);
231 file_priv->minor->master = drm_master_get(file_priv->master); 210 file_priv->minor->master = drm_master_get(file_priv->master);
232 file_priv->is_master = 1; 211 file_priv->is_master = 1;
233 if (dev->driver->master_set) { 212 if (dev->driver->master_set) {
@@ -237,142 +216,211 @@ int drm_setmaster_ioctl(struct drm_device *dev, void *data,
237 drm_master_put(&file_priv->minor->master); 216 drm_master_put(&file_priv->minor->master);
238 } 217 }
239 } 218 }
240 mutex_unlock(&dev->struct_mutex);
241 219
220out_unlock:
221 mutex_unlock(&dev->master_mutex);
242 return ret; 222 return ret;
243} 223}
244 224
245int drm_dropmaster_ioctl(struct drm_device *dev, void *data, 225int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
246 struct drm_file *file_priv) 226 struct drm_file *file_priv)
247{ 227{
228 int ret = -EINVAL;
229
230 mutex_lock(&dev->master_mutex);
248 if (!file_priv->is_master) 231 if (!file_priv->is_master)
249 return -EINVAL; 232 goto out_unlock;
250 233
251 if (!file_priv->minor->master) 234 if (!file_priv->minor->master)
252 return -EINVAL; 235 goto out_unlock;
253 236
254 mutex_lock(&dev->struct_mutex); 237 ret = 0;
255 if (dev->driver->master_drop) 238 if (dev->driver->master_drop)
256 dev->driver->master_drop(dev, file_priv, false); 239 dev->driver->master_drop(dev, file_priv, false);
257 drm_master_put(&file_priv->minor->master); 240 drm_master_put(&file_priv->minor->master);
258 file_priv->is_master = 0; 241 file_priv->is_master = 0;
259 mutex_unlock(&dev->struct_mutex); 242
260 return 0; 243out_unlock:
244 mutex_unlock(&dev->master_mutex);
245 return ret;
261} 246}
262 247
263/** 248/*
264 * drm_get_minor - Allocate and register new DRM minor 249 * DRM Minors
265 * @dev: DRM device 250 * A DRM device can provide several char-dev interfaces on the DRM-Major. Each
266 * @minor: Pointer to where new minor is stored 251 * of them is represented by a drm_minor object. Depending on the capabilities
267 * @type: Type of minor 252 * of the device-driver, different interfaces are registered.
268 *
269 * Allocate a new minor of the given type and register it. A pointer to the new
270 * minor is returned in @minor.
271 * Caller must hold the global DRM mutex.
272 * 253 *
273 * RETURNS: 254 * Minors can be accessed via dev->$minor_name. This pointer is either
274 * 0 on success, negative error code on failure. 255 * NULL or a valid drm_minor pointer and stays valid as long as the device is
256 * valid. This means, DRM minors have the same life-time as the underlying
257 * device. However, this doesn't mean that the minor is active. Minors are
258 * registered and unregistered dynamically according to device-state.
275 */ 259 */
276static int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, 260
277 int type) 261static struct drm_minor **drm_minor_get_slot(struct drm_device *dev,
262 unsigned int type)
263{
264 switch (type) {
265 case DRM_MINOR_LEGACY:
266 return &dev->primary;
267 case DRM_MINOR_RENDER:
268 return &dev->render;
269 case DRM_MINOR_CONTROL:
270 return &dev->control;
271 default:
272 return NULL;
273 }
274}
275
276static int drm_minor_alloc(struct drm_device *dev, unsigned int type)
277{
278 struct drm_minor *minor;
279
280 minor = kzalloc(sizeof(*minor), GFP_KERNEL);
281 if (!minor)
282 return -ENOMEM;
283
284 minor->type = type;
285 minor->dev = dev;
286
287 *drm_minor_get_slot(dev, type) = minor;
288 return 0;
289}
290
291static void drm_minor_free(struct drm_device *dev, unsigned int type)
292{
293 struct drm_minor **slot;
294
295 slot = drm_minor_get_slot(dev, type);
296 if (*slot) {
297 kfree(*slot);
298 *slot = NULL;
299 }
300}
301
302static int drm_minor_register(struct drm_device *dev, unsigned int type)
278{ 303{
279 struct drm_minor *new_minor; 304 struct drm_minor *new_minor;
305 unsigned long flags;
280 int ret; 306 int ret;
281 int minor_id; 307 int minor_id;
282 308
283 DRM_DEBUG("\n"); 309 DRM_DEBUG("\n");
284 310
285 minor_id = drm_minor_get_id(dev, type); 311 new_minor = *drm_minor_get_slot(dev, type);
312 if (!new_minor)
313 return 0;
314
315 idr_preload(GFP_KERNEL);
316 spin_lock_irqsave(&drm_minor_lock, flags);
317 minor_id = idr_alloc(&drm_minors_idr,
318 NULL,
319 64 * type,
320 64 * (type + 1),
321 GFP_NOWAIT);
322 spin_unlock_irqrestore(&drm_minor_lock, flags);
323 idr_preload_end();
324
286 if (minor_id < 0) 325 if (minor_id < 0)
287 return minor_id; 326 return minor_id;
288 327
289 new_minor = kzalloc(sizeof(struct drm_minor), GFP_KERNEL);
290 if (!new_minor) {
291 ret = -ENOMEM;
292 goto err_idr;
293 }
294
295 new_minor->type = type;
296 new_minor->device = MKDEV(DRM_MAJOR, minor_id);
297 new_minor->dev = dev;
298 new_minor->index = minor_id; 328 new_minor->index = minor_id;
299 INIT_LIST_HEAD(&new_minor->master_list);
300
301 idr_replace(&drm_minors_idr, new_minor, minor_id);
302 329
303#if defined(CONFIG_DEBUG_FS)
304 ret = drm_debugfs_init(new_minor, minor_id, drm_debugfs_root); 330 ret = drm_debugfs_init(new_minor, minor_id, drm_debugfs_root);
305 if (ret) { 331 if (ret) {
306 DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n"); 332 DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n");
307 goto err_mem; 333 goto err_id;
308 } 334 }
309#endif
310 335
311 ret = drm_sysfs_device_add(new_minor); 336 ret = drm_sysfs_device_add(new_minor);
312 if (ret) { 337 if (ret) {
313 printk(KERN_ERR 338 DRM_ERROR("DRM: Error sysfs_device_add.\n");
314 "DRM: Error sysfs_device_add.\n");
315 goto err_debugfs; 339 goto err_debugfs;
316 } 340 }
317 *minor = new_minor; 341
342 /* replace NULL with @minor so lookups will succeed from now on */
343 spin_lock_irqsave(&drm_minor_lock, flags);
344 idr_replace(&drm_minors_idr, new_minor, new_minor->index);
345 spin_unlock_irqrestore(&drm_minor_lock, flags);
318 346
319 DRM_DEBUG("new minor assigned %d\n", minor_id); 347 DRM_DEBUG("new minor assigned %d\n", minor_id);
320 return 0; 348 return 0;
321 349
322
323err_debugfs: 350err_debugfs:
324#if defined(CONFIG_DEBUG_FS)
325 drm_debugfs_cleanup(new_minor); 351 drm_debugfs_cleanup(new_minor);
326err_mem: 352err_id:
327#endif 353 spin_lock_irqsave(&drm_minor_lock, flags);
328 kfree(new_minor);
329err_idr:
330 idr_remove(&drm_minors_idr, minor_id); 354 idr_remove(&drm_minors_idr, minor_id);
331 *minor = NULL; 355 spin_unlock_irqrestore(&drm_minor_lock, flags);
356 new_minor->index = 0;
332 return ret; 357 return ret;
333} 358}
334 359
335/** 360static void drm_minor_unregister(struct drm_device *dev, unsigned int type)
336 * drm_unplug_minor - Unplug DRM minor
337 * @minor: Minor to unplug
338 *
339 * Unplugs the given DRM minor but keeps the object. So after this returns,
340 * minor->dev is still valid so existing open-files can still access it to get
341 * device information from their drm_file ojects.
342 * If the minor is already unplugged or if @minor is NULL, nothing is done.
343 * The global DRM mutex must be held by the caller.
344 */
345static void drm_unplug_minor(struct drm_minor *minor)
346{ 361{
362 struct drm_minor *minor;
363 unsigned long flags;
364
365 minor = *drm_minor_get_slot(dev, type);
347 if (!minor || !minor->kdev) 366 if (!minor || !minor->kdev)
348 return; 367 return;
349 368
350#if defined(CONFIG_DEBUG_FS) 369 spin_lock_irqsave(&drm_minor_lock, flags);
351 drm_debugfs_cleanup(minor); 370 idr_remove(&drm_minors_idr, minor->index);
352#endif 371 spin_unlock_irqrestore(&drm_minor_lock, flags);
372 minor->index = 0;
353 373
374 drm_debugfs_cleanup(minor);
354 drm_sysfs_device_remove(minor); 375 drm_sysfs_device_remove(minor);
355 idr_remove(&drm_minors_idr, minor->index);
356} 376}
357 377
358/** 378/**
359 * drm_put_minor - Destroy DRM minor 379 * drm_minor_acquire - Acquire a DRM minor
360 * @minor: Minor to destroy 380 * @minor_id: Minor ID of the DRM-minor
381 *
382 * Looks up the given minor-ID and returns the respective DRM-minor object. The
383 * refence-count of the underlying device is increased so you must release this
384 * object with drm_minor_release().
361 * 385 *
362 * This calls drm_unplug_minor() on the given minor and then frees it. Nothing 386 * As long as you hold this minor, it is guaranteed that the object and the
363 * is done if @minor is NULL. It is fine to call this on already unplugged 387 * minor->dev pointer will stay valid! However, the device may get unplugged and
364 * minors. 388 * unregistered while you hold the minor.
365 * The global DRM mutex must be held by the caller. 389 *
390 * Returns:
391 * Pointer to minor-object with increased device-refcount, or PTR_ERR on
392 * failure.
366 */ 393 */
367static void drm_put_minor(struct drm_minor *minor) 394struct drm_minor *drm_minor_acquire(unsigned int minor_id)
368{ 395{
369 if (!minor) 396 struct drm_minor *minor;
370 return; 397 unsigned long flags;
398
399 spin_lock_irqsave(&drm_minor_lock, flags);
400 minor = idr_find(&drm_minors_idr, minor_id);
401 if (minor)
402 drm_dev_ref(minor->dev);
403 spin_unlock_irqrestore(&drm_minor_lock, flags);
404
405 if (!minor) {
406 return ERR_PTR(-ENODEV);
407 } else if (drm_device_is_unplugged(minor->dev)) {
408 drm_dev_unref(minor->dev);
409 return ERR_PTR(-ENODEV);
410 }
371 411
372 DRM_DEBUG("release secondary minor %d\n", minor->index); 412 return minor;
413}
373 414
374 drm_unplug_minor(minor); 415/**
375 kfree(minor); 416 * drm_minor_release - Release DRM minor
417 * @minor: Pointer to DRM minor object
418 *
419 * Release a minor that was previously acquired via drm_minor_acquire().
420 */
421void drm_minor_release(struct drm_minor *minor)
422{
423 drm_dev_unref(minor->dev);
376} 424}
377 425
378/** 426/**
@@ -392,18 +440,16 @@ void drm_put_dev(struct drm_device *dev)
392 } 440 }
393 441
394 drm_dev_unregister(dev); 442 drm_dev_unregister(dev);
395 drm_dev_free(dev); 443 drm_dev_unref(dev);
396} 444}
397EXPORT_SYMBOL(drm_put_dev); 445EXPORT_SYMBOL(drm_put_dev);
398 446
399void drm_unplug_dev(struct drm_device *dev) 447void drm_unplug_dev(struct drm_device *dev)
400{ 448{
401 /* for a USB device */ 449 /* for a USB device */
402 if (drm_core_check_feature(dev, DRIVER_MODESET)) 450 drm_minor_unregister(dev, DRM_MINOR_LEGACY);
403 drm_unplug_minor(dev->control); 451 drm_minor_unregister(dev, DRM_MINOR_RENDER);
404 if (dev->render) 452 drm_minor_unregister(dev, DRM_MINOR_CONTROL);
405 drm_unplug_minor(dev->render);
406 drm_unplug_minor(dev->primary);
407 453
408 mutex_lock(&drm_global_mutex); 454 mutex_lock(&drm_global_mutex);
409 455
@@ -416,6 +462,78 @@ void drm_unplug_dev(struct drm_device *dev)
416} 462}
417EXPORT_SYMBOL(drm_unplug_dev); 463EXPORT_SYMBOL(drm_unplug_dev);
418 464
465/*
466 * DRM internal mount
467 * We want to be able to allocate our own "struct address_space" to control
468 * memory-mappings in VRAM (or stolen RAM, ...). However, core MM does not allow
469 * stand-alone address_space objects, so we need an underlying inode. As there
470 * is no way to allocate an independent inode easily, we need a fake internal
471 * VFS mount-point.
472 *
473 * The drm_fs_inode_new() function allocates a new inode, drm_fs_inode_free()
474 * frees it again. You are allowed to use iget() and iput() to get references to
475 * the inode. But each drm_fs_inode_new() call must be paired with exactly one
476 * drm_fs_inode_free() call (which does not have to be the last iput()).
477 * We use drm_fs_inode_*() to manage our internal VFS mount-point and share it
478 * between multiple inode-users. You could, technically, call
479 * iget() + drm_fs_inode_free() directly after alloc and sometime later do an
480 * iput(), but this way you'd end up with a new vfsmount for each inode.
481 */
482
483static int drm_fs_cnt;
484static struct vfsmount *drm_fs_mnt;
485
486static const struct dentry_operations drm_fs_dops = {
487 .d_dname = simple_dname,
488};
489
490static const struct super_operations drm_fs_sops = {
491 .statfs = simple_statfs,
492};
493
494static struct dentry *drm_fs_mount(struct file_system_type *fs_type, int flags,
495 const char *dev_name, void *data)
496{
497 return mount_pseudo(fs_type,
498 "drm:",
499 &drm_fs_sops,
500 &drm_fs_dops,
501 0x010203ff);
502}
503
504static struct file_system_type drm_fs_type = {
505 .name = "drm",
506 .owner = THIS_MODULE,
507 .mount = drm_fs_mount,
508 .kill_sb = kill_anon_super,
509};
510
511static struct inode *drm_fs_inode_new(void)
512{
513 struct inode *inode;
514 int r;
515
516 r = simple_pin_fs(&drm_fs_type, &drm_fs_mnt, &drm_fs_cnt);
517 if (r < 0) {
518 DRM_ERROR("Cannot mount pseudo fs: %d\n", r);
519 return ERR_PTR(r);
520 }
521
522 inode = alloc_anon_inode(drm_fs_mnt->mnt_sb);
523 if (IS_ERR(inode))
524 simple_release_fs(&drm_fs_mnt, &drm_fs_cnt);
525
526 return inode;
527}
528
529static void drm_fs_inode_free(struct inode *inode)
530{
531 if (inode) {
532 iput(inode);
533 simple_release_fs(&drm_fs_mnt, &drm_fs_cnt);
534 }
535}
536
419/** 537/**
420 * drm_dev_alloc - Allocate new drm device 538 * drm_dev_alloc - Allocate new drm device
421 * @driver: DRM driver to allocate device for 539 * @driver: DRM driver to allocate device for
@@ -425,6 +543,9 @@ EXPORT_SYMBOL(drm_unplug_dev);
425 * Call drm_dev_register() to advertice the device to user space and register it 543 * Call drm_dev_register() to advertice the device to user space and register it
426 * with other core subsystems. 544 * with other core subsystems.
427 * 545 *
546 * The initial ref-count of the object is 1. Use drm_dev_ref() and
547 * drm_dev_unref() to take and drop further ref-counts.
548 *
428 * RETURNS: 549 * RETURNS:
429 * Pointer to new DRM device, or NULL if out of memory. 550 * Pointer to new DRM device, or NULL if out of memory.
430 */ 551 */
@@ -438,6 +559,7 @@ struct drm_device *drm_dev_alloc(struct drm_driver *driver,
438 if (!dev) 559 if (!dev)
439 return NULL; 560 return NULL;
440 561
562 kref_init(&dev->ref);
441 dev->dev = parent; 563 dev->dev = parent;
442 dev->driver = driver; 564 dev->driver = driver;
443 565
@@ -451,9 +573,33 @@ struct drm_device *drm_dev_alloc(struct drm_driver *driver,
451 spin_lock_init(&dev->event_lock); 573 spin_lock_init(&dev->event_lock);
452 mutex_init(&dev->struct_mutex); 574 mutex_init(&dev->struct_mutex);
453 mutex_init(&dev->ctxlist_mutex); 575 mutex_init(&dev->ctxlist_mutex);
576 mutex_init(&dev->master_mutex);
454 577
455 if (drm_ht_create(&dev->map_hash, 12)) 578 dev->anon_inode = drm_fs_inode_new();
579 if (IS_ERR(dev->anon_inode)) {
580 ret = PTR_ERR(dev->anon_inode);
581 DRM_ERROR("Cannot allocate anonymous inode: %d\n", ret);
456 goto err_free; 582 goto err_free;
583 }
584
585 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
586 ret = drm_minor_alloc(dev, DRM_MINOR_CONTROL);
587 if (ret)
588 goto err_minors;
589 }
590
591 if (drm_core_check_feature(dev, DRIVER_RENDER) && drm_rnodes) {
592 ret = drm_minor_alloc(dev, DRM_MINOR_RENDER);
593 if (ret)
594 goto err_minors;
595 }
596
597 ret = drm_minor_alloc(dev, DRM_MINOR_LEGACY);
598 if (ret)
599 goto err_minors;
600
601 if (drm_ht_create(&dev->map_hash, 12))
602 goto err_minors;
457 603
458 ret = drm_ctxbitmap_init(dev); 604 ret = drm_ctxbitmap_init(dev);
459 if (ret) { 605 if (ret) {
@@ -475,38 +621,71 @@ err_ctxbitmap:
475 drm_ctxbitmap_cleanup(dev); 621 drm_ctxbitmap_cleanup(dev);
476err_ht: 622err_ht:
477 drm_ht_remove(&dev->map_hash); 623 drm_ht_remove(&dev->map_hash);
624err_minors:
625 drm_minor_free(dev, DRM_MINOR_LEGACY);
626 drm_minor_free(dev, DRM_MINOR_RENDER);
627 drm_minor_free(dev, DRM_MINOR_CONTROL);
628 drm_fs_inode_free(dev->anon_inode);
478err_free: 629err_free:
630 mutex_destroy(&dev->master_mutex);
479 kfree(dev); 631 kfree(dev);
480 return NULL; 632 return NULL;
481} 633}
482EXPORT_SYMBOL(drm_dev_alloc); 634EXPORT_SYMBOL(drm_dev_alloc);
483 635
484/** 636static void drm_dev_release(struct kref *ref)
485 * drm_dev_free - Free DRM device
486 * @dev: DRM device to free
487 *
488 * Free a DRM device that has previously been allocated via drm_dev_alloc().
489 * You must not use kfree() instead or you will leak memory.
490 *
491 * This must not be called once the device got registered. Use drm_put_dev()
492 * instead, which then calls drm_dev_free().
493 */
494void drm_dev_free(struct drm_device *dev)
495{ 637{
496 drm_put_minor(dev->control); 638 struct drm_device *dev = container_of(ref, struct drm_device, ref);
497 drm_put_minor(dev->render);
498 drm_put_minor(dev->primary);
499 639
500 if (dev->driver->driver_features & DRIVER_GEM) 640 if (dev->driver->driver_features & DRIVER_GEM)
501 drm_gem_destroy(dev); 641 drm_gem_destroy(dev);
502 642
503 drm_ctxbitmap_cleanup(dev); 643 drm_ctxbitmap_cleanup(dev);
504 drm_ht_remove(&dev->map_hash); 644 drm_ht_remove(&dev->map_hash);
645 drm_fs_inode_free(dev->anon_inode);
646
647 drm_minor_free(dev, DRM_MINOR_LEGACY);
648 drm_minor_free(dev, DRM_MINOR_RENDER);
649 drm_minor_free(dev, DRM_MINOR_CONTROL);
505 650
506 kfree(dev->devname); 651 kfree(dev->devname);
652
653 mutex_destroy(&dev->master_mutex);
507 kfree(dev); 654 kfree(dev);
508} 655}
509EXPORT_SYMBOL(drm_dev_free); 656
657/**
658 * drm_dev_ref - Take reference of a DRM device
659 * @dev: device to take reference of or NULL
660 *
661 * This increases the ref-count of @dev by one. You *must* already own a
662 * reference when calling this. Use drm_dev_unref() to drop this reference
663 * again.
664 *
665 * This function never fails. However, this function does not provide *any*
666 * guarantee whether the device is alive or running. It only provides a
667 * reference to the object and the memory associated with it.
668 */
669void drm_dev_ref(struct drm_device *dev)
670{
671 if (dev)
672 kref_get(&dev->ref);
673}
674EXPORT_SYMBOL(drm_dev_ref);
675
676/**
677 * drm_dev_unref - Drop reference of a DRM device
678 * @dev: device to drop reference of or NULL
679 *
680 * This decreases the ref-count of @dev by one. The device is destroyed if the
681 * ref-count drops to zero.
682 */
683void drm_dev_unref(struct drm_device *dev)
684{
685 if (dev)
686 kref_put(&dev->ref, drm_dev_release);
687}
688EXPORT_SYMBOL(drm_dev_unref);
510 689
511/** 690/**
512 * drm_dev_register - Register DRM device 691 * drm_dev_register - Register DRM device
@@ -527,26 +706,22 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags)
527 706
528 mutex_lock(&drm_global_mutex); 707 mutex_lock(&drm_global_mutex);
529 708
530 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 709 ret = drm_minor_register(dev, DRM_MINOR_CONTROL);
531 ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL); 710 if (ret)
532 if (ret) 711 goto err_minors;
533 goto out_unlock;
534 }
535 712
536 if (drm_core_check_feature(dev, DRIVER_RENDER) && drm_rnodes) { 713 ret = drm_minor_register(dev, DRM_MINOR_RENDER);
537 ret = drm_get_minor(dev, &dev->render, DRM_MINOR_RENDER); 714 if (ret)
538 if (ret) 715 goto err_minors;
539 goto err_control_node;
540 }
541 716
542 ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY); 717 ret = drm_minor_register(dev, DRM_MINOR_LEGACY);
543 if (ret) 718 if (ret)
544 goto err_render_node; 719 goto err_minors;
545 720
546 if (dev->driver->load) { 721 if (dev->driver->load) {
547 ret = dev->driver->load(dev, flags); 722 ret = dev->driver->load(dev, flags);
548 if (ret) 723 if (ret)
549 goto err_primary_node; 724 goto err_minors;
550 } 725 }
551 726
552 /* setup grouping for legacy outputs */ 727 /* setup grouping for legacy outputs */
@@ -563,12 +738,10 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags)
563err_unload: 738err_unload:
564 if (dev->driver->unload) 739 if (dev->driver->unload)
565 dev->driver->unload(dev); 740 dev->driver->unload(dev);
566err_primary_node: 741err_minors:
567 drm_unplug_minor(dev->primary); 742 drm_minor_unregister(dev, DRM_MINOR_LEGACY);
568err_render_node: 743 drm_minor_unregister(dev, DRM_MINOR_RENDER);
569 drm_unplug_minor(dev->render); 744 drm_minor_unregister(dev, DRM_MINOR_CONTROL);
570err_control_node:
571 drm_unplug_minor(dev->control);
572out_unlock: 745out_unlock:
573 mutex_unlock(&drm_global_mutex); 746 mutex_unlock(&drm_global_mutex);
574 return ret; 747 return ret;
@@ -581,7 +754,7 @@ EXPORT_SYMBOL(drm_dev_register);
581 * 754 *
582 * Unregister the DRM device from the system. This does the reverse of 755 * Unregister the DRM device from the system. This does the reverse of
583 * drm_dev_register() but does not deallocate the device. The caller must call 756 * drm_dev_register() but does not deallocate the device. The caller must call
584 * drm_dev_free() to free all resources. 757 * drm_dev_unref() to drop their final reference.
585 */ 758 */
586void drm_dev_unregister(struct drm_device *dev) 759void drm_dev_unregister(struct drm_device *dev)
587{ 760{
@@ -600,8 +773,8 @@ void drm_dev_unregister(struct drm_device *dev)
600 list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) 773 list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
601 drm_rmmap(dev, r_list->map); 774 drm_rmmap(dev, r_list->map);
602 775
603 drm_unplug_minor(dev->control); 776 drm_minor_unregister(dev, DRM_MINOR_LEGACY);
604 drm_unplug_minor(dev->render); 777 drm_minor_unregister(dev, DRM_MINOR_RENDER);
605 drm_unplug_minor(dev->primary); 778 drm_minor_unregister(dev, DRM_MINOR_CONTROL);
606} 779}
607EXPORT_SYMBOL(drm_dev_unregister); 780EXPORT_SYMBOL(drm_dev_unregister);
diff --git a/drivers/gpu/drm/drm_usb.c b/drivers/gpu/drm/drm_usb.c
index 0f8cb1ae7607..c3406aad2944 100644
--- a/drivers/gpu/drm/drm_usb.c
+++ b/drivers/gpu/drm/drm_usb.c
@@ -30,7 +30,7 @@ int drm_get_usb_dev(struct usb_interface *interface,
30 return 0; 30 return 0;
31 31
32err_free: 32err_free:
33 drm_dev_free(dev); 33 drm_dev_unref(dev);
34 return ret; 34 return ret;
35 35
36} 36}
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 6e1a1a20cf6b..5bf5bca94f56 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -31,6 +31,30 @@ config DRM_EXYNOS_FIMD
31 help 31 help
32 Choose this option if you want to use Exynos FIMD for DRM. 32 Choose this option if you want to use Exynos FIMD for DRM.
33 33
34config DRM_EXYNOS_DPI
35 bool "EXYNOS DRM parallel output support"
36 depends on DRM_EXYNOS
37 select DRM_PANEL
38 default n
39 help
40 This enables support for Exynos parallel output.
41
42config DRM_EXYNOS_DSI
43 bool "EXYNOS DRM MIPI-DSI driver support"
44 depends on DRM_EXYNOS
45 select DRM_MIPI_DSI
46 select DRM_PANEL
47 default n
48 help
49 This enables support for Exynos MIPI-DSI device.
50
51config DRM_EXYNOS_DP
52 bool "EXYNOS DRM DP driver support"
53 depends on DRM_EXYNOS && ARCH_EXYNOS
54 default DRM_EXYNOS
55 help
56 This enables support for DP device.
57
34config DRM_EXYNOS_HDMI 58config DRM_EXYNOS_HDMI
35 bool "Exynos DRM HDMI" 59 bool "Exynos DRM HDMI"
36 depends on DRM_EXYNOS && !VIDEO_SAMSUNG_S5P_TV 60 depends on DRM_EXYNOS && !VIDEO_SAMSUNG_S5P_TV
diff --git a/drivers/gpu/drm/exynos/Makefile b/drivers/gpu/drm/exynos/Makefile
index 639b49e1ec05..33ae3652b8da 100644
--- a/drivers/gpu/drm/exynos/Makefile
+++ b/drivers/gpu/drm/exynos/Makefile
@@ -3,7 +3,7 @@
3# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. 3# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
4 4
5ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/exynos 5ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/exynos
6exynosdrm-y := exynos_drm_drv.o exynos_drm_encoder.o exynos_drm_connector.o \ 6exynosdrm-y := exynos_drm_drv.o exynos_drm_encoder.o \
7 exynos_drm_crtc.o exynos_drm_fbdev.o exynos_drm_fb.o \ 7 exynos_drm_crtc.o exynos_drm_fbdev.o exynos_drm_fb.o \
8 exynos_drm_buf.o exynos_drm_gem.o exynos_drm_core.o \ 8 exynos_drm_buf.o exynos_drm_gem.o exynos_drm_core.o \
9 exynos_drm_plane.o 9 exynos_drm_plane.o
@@ -11,9 +11,10 @@ exynosdrm-y := exynos_drm_drv.o exynos_drm_encoder.o exynos_drm_connector.o \
11exynosdrm-$(CONFIG_DRM_EXYNOS_IOMMU) += exynos_drm_iommu.o 11exynosdrm-$(CONFIG_DRM_EXYNOS_IOMMU) += exynos_drm_iommu.o
12exynosdrm-$(CONFIG_DRM_EXYNOS_DMABUF) += exynos_drm_dmabuf.o 12exynosdrm-$(CONFIG_DRM_EXYNOS_DMABUF) += exynos_drm_dmabuf.o
13exynosdrm-$(CONFIG_DRM_EXYNOS_FIMD) += exynos_drm_fimd.o 13exynosdrm-$(CONFIG_DRM_EXYNOS_FIMD) += exynos_drm_fimd.o
14exynosdrm-$(CONFIG_DRM_EXYNOS_HDMI) += exynos_hdmi.o exynos_mixer.o \ 14exynosdrm-$(CONFIG_DRM_EXYNOS_DPI) += exynos_drm_dpi.o
15 exynos_ddc.o exynos_hdmiphy.o \ 15exynosdrm-$(CONFIG_DRM_EXYNOS_DSI) += exynos_drm_dsi.o
16 exynos_drm_hdmi.o 16exynosdrm-$(CONFIG_DRM_EXYNOS_DP) += exynos_dp_core.o exynos_dp_reg.o
17exynosdrm-$(CONFIG_DRM_EXYNOS_HDMI) += exynos_hdmi.o exynos_mixer.o
17exynosdrm-$(CONFIG_DRM_EXYNOS_VIDI) += exynos_drm_vidi.o 18exynosdrm-$(CONFIG_DRM_EXYNOS_VIDI) += exynos_drm_vidi.o
18exynosdrm-$(CONFIG_DRM_EXYNOS_G2D) += exynos_drm_g2d.o 19exynosdrm-$(CONFIG_DRM_EXYNOS_G2D) += exynos_drm_g2d.o
19exynosdrm-$(CONFIG_DRM_EXYNOS_IPP) += exynos_drm_ipp.o 20exynosdrm-$(CONFIG_DRM_EXYNOS_IPP) += exynos_drm_ipp.o
diff --git a/drivers/video/exynos/exynos_dp_core.c b/drivers/gpu/drm/exynos/exynos_dp_core.c
index 5e1a71580051..aed533bbfd31 100644
--- a/drivers/video/exynos/exynos_dp_core.c
+++ b/drivers/gpu/drm/exynos/exynos_dp_core.c
@@ -12,7 +12,6 @@
12 12
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/platform_device.h> 14#include <linux/platform_device.h>
15#include <linux/slab.h>
16#include <linux/err.h> 15#include <linux/err.h>
17#include <linux/clk.h> 16#include <linux/clk.h>
18#include <linux/io.h> 17#include <linux/io.h>
@@ -20,9 +19,25 @@
20#include <linux/delay.h> 19#include <linux/delay.h>
21#include <linux/of.h> 20#include <linux/of.h>
22#include <linux/phy/phy.h> 21#include <linux/phy/phy.h>
22#include <video/of_display_timing.h>
23#include <video/of_videomode.h>
23 24
25#include <drm/drmP.h>
26#include <drm/drm_crtc.h>
27#include <drm/drm_crtc_helper.h>
28#include <drm/bridge/ptn3460.h>
29
30#include "exynos_drm_drv.h"
24#include "exynos_dp_core.h" 31#include "exynos_dp_core.h"
25 32
33#define ctx_from_connector(c) container_of(c, struct exynos_dp_device, \
34 connector)
35
36struct bridge_init {
37 struct i2c_client *client;
38 struct device_node *node;
39};
40
26static int exynos_dp_init_dp(struct exynos_dp_device *dp) 41static int exynos_dp_init_dp(struct exynos_dp_device *dp)
27{ 42{
28 exynos_dp_reset(dp); 43 exynos_dp_reset(dp);
@@ -893,6 +908,214 @@ static void exynos_dp_hotplug(struct work_struct *work)
893 dev_err(dp->dev, "unable to config video\n"); 908 dev_err(dp->dev, "unable to config video\n");
894} 909}
895 910
911static enum drm_connector_status exynos_dp_detect(
912 struct drm_connector *connector, bool force)
913{
914 return connector_status_connected;
915}
916
917static void exynos_dp_connector_destroy(struct drm_connector *connector)
918{
919}
920
921static struct drm_connector_funcs exynos_dp_connector_funcs = {
922 .dpms = drm_helper_connector_dpms,
923 .fill_modes = drm_helper_probe_single_connector_modes,
924 .detect = exynos_dp_detect,
925 .destroy = exynos_dp_connector_destroy,
926};
927
928static int exynos_dp_get_modes(struct drm_connector *connector)
929{
930 struct exynos_dp_device *dp = ctx_from_connector(connector);
931 struct drm_display_mode *mode;
932
933 mode = drm_mode_create(connector->dev);
934 if (!mode) {
935 DRM_ERROR("failed to create a new display mode.\n");
936 return 0;
937 }
938
939 drm_display_mode_from_videomode(&dp->panel.vm, mode);
940 mode->width_mm = dp->panel.width_mm;
941 mode->height_mm = dp->panel.height_mm;
942 connector->display_info.width_mm = mode->width_mm;
943 connector->display_info.height_mm = mode->height_mm;
944
945 mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
946 drm_mode_set_name(mode);
947 drm_mode_probed_add(connector, mode);
948
949 return 1;
950}
951
952static int exynos_dp_mode_valid(struct drm_connector *connector,
953 struct drm_display_mode *mode)
954{
955 return MODE_OK;
956}
957
958static struct drm_encoder *exynos_dp_best_encoder(
959 struct drm_connector *connector)
960{
961 struct exynos_dp_device *dp = ctx_from_connector(connector);
962
963 return dp->encoder;
964}
965
966static struct drm_connector_helper_funcs exynos_dp_connector_helper_funcs = {
967 .get_modes = exynos_dp_get_modes,
968 .mode_valid = exynos_dp_mode_valid,
969 .best_encoder = exynos_dp_best_encoder,
970};
971
972static int exynos_dp_initialize(struct exynos_drm_display *display,
973 struct drm_device *drm_dev)
974{
975 struct exynos_dp_device *dp = display->ctx;
976
977 dp->drm_dev = drm_dev;
978
979 return 0;
980}
981
982static bool find_bridge(const char *compat, struct bridge_init *bridge)
983{
984 bridge->client = NULL;
985 bridge->node = of_find_compatible_node(NULL, NULL, compat);
986 if (!bridge->node)
987 return false;
988
989 bridge->client = of_find_i2c_device_by_node(bridge->node);
990 if (!bridge->client)
991 return false;
992
993 return true;
994}
995
996/* returns the number of bridges attached */
997static int exynos_drm_attach_lcd_bridge(struct drm_device *dev,
998 struct drm_encoder *encoder)
999{
1000 struct bridge_init bridge;
1001 int ret;
1002
1003 if (find_bridge("nxp,ptn3460", &bridge)) {
1004 ret = ptn3460_init(dev, encoder, bridge.client, bridge.node);
1005 if (!ret)
1006 return 1;
1007 }
1008 return 0;
1009}
1010
1011static int exynos_dp_create_connector(struct exynos_drm_display *display,
1012 struct drm_encoder *encoder)
1013{
1014 struct exynos_dp_device *dp = display->ctx;
1015 struct drm_connector *connector = &dp->connector;
1016 int ret;
1017
1018 dp->encoder = encoder;
1019
1020 /* Pre-empt DP connector creation if there's a bridge */
1021 ret = exynos_drm_attach_lcd_bridge(dp->drm_dev, encoder);
1022 if (ret)
1023 return 0;
1024
1025 connector->polled = DRM_CONNECTOR_POLL_HPD;
1026
1027 ret = drm_connector_init(dp->drm_dev, connector,
1028 &exynos_dp_connector_funcs, DRM_MODE_CONNECTOR_eDP);
1029 if (ret) {
1030 DRM_ERROR("Failed to initialize connector with drm\n");
1031 return ret;
1032 }
1033
1034 drm_connector_helper_add(connector, &exynos_dp_connector_helper_funcs);
1035 drm_sysfs_connector_add(connector);
1036 drm_mode_connector_attach_encoder(connector, encoder);
1037
1038 return 0;
1039}
1040
1041static void exynos_dp_phy_init(struct exynos_dp_device *dp)
1042{
1043 if (dp->phy) {
1044 phy_power_on(dp->phy);
1045 } else if (dp->phy_addr) {
1046 u32 reg;
1047
1048 reg = __raw_readl(dp->phy_addr);
1049 reg |= dp->enable_mask;
1050 __raw_writel(reg, dp->phy_addr);
1051 }
1052}
1053
1054static void exynos_dp_phy_exit(struct exynos_dp_device *dp)
1055{
1056 if (dp->phy) {
1057 phy_power_off(dp->phy);
1058 } else if (dp->phy_addr) {
1059 u32 reg;
1060
1061 reg = __raw_readl(dp->phy_addr);
1062 reg &= ~(dp->enable_mask);
1063 __raw_writel(reg, dp->phy_addr);
1064 }
1065}
1066
1067static void exynos_dp_poweron(struct exynos_dp_device *dp)
1068{
1069 if (dp->dpms_mode == DRM_MODE_DPMS_ON)
1070 return;
1071
1072 clk_prepare_enable(dp->clock);
1073 exynos_dp_phy_init(dp);
1074 exynos_dp_init_dp(dp);
1075 enable_irq(dp->irq);
1076}
1077
1078static void exynos_dp_poweroff(struct exynos_dp_device *dp)
1079{
1080 if (dp->dpms_mode != DRM_MODE_DPMS_ON)
1081 return;
1082
1083 disable_irq(dp->irq);
1084 flush_work(&dp->hotplug_work);
1085 exynos_dp_phy_exit(dp);
1086 clk_disable_unprepare(dp->clock);
1087}
1088
1089static void exynos_dp_dpms(struct exynos_drm_display *display, int mode)
1090{
1091 struct exynos_dp_device *dp = display->ctx;
1092
1093 switch (mode) {
1094 case DRM_MODE_DPMS_ON:
1095 exynos_dp_poweron(dp);
1096 break;
1097 case DRM_MODE_DPMS_STANDBY:
1098 case DRM_MODE_DPMS_SUSPEND:
1099 case DRM_MODE_DPMS_OFF:
1100 exynos_dp_poweroff(dp);
1101 break;
1102 default:
1103 break;
1104 };
1105 dp->dpms_mode = mode;
1106}
1107
1108static struct exynos_drm_display_ops exynos_dp_display_ops = {
1109 .initialize = exynos_dp_initialize,
1110 .create_connector = exynos_dp_create_connector,
1111 .dpms = exynos_dp_dpms,
1112};
1113
1114static struct exynos_drm_display exynos_dp_display = {
1115 .type = EXYNOS_DISPLAY_TYPE_LCD,
1116 .ops = &exynos_dp_display_ops,
1117};
1118
896static struct video_info *exynos_dp_dt_parse_pdata(struct device *dev) 1119static struct video_info *exynos_dp_dt_parse_pdata(struct device *dev)
897{ 1120{
898 struct device_node *dp_node = dev->of_node; 1121 struct device_node *dp_node = dev->of_node;
@@ -994,30 +1217,17 @@ err:
994 return ret; 1217 return ret;
995} 1218}
996 1219
997static void exynos_dp_phy_init(struct exynos_dp_device *dp) 1220static int exynos_dp_dt_parse_panel(struct exynos_dp_device *dp)
998{
999 if (dp->phy) {
1000 phy_power_on(dp->phy);
1001 } else if (dp->phy_addr) {
1002 u32 reg;
1003
1004 reg = __raw_readl(dp->phy_addr);
1005 reg |= dp->enable_mask;
1006 __raw_writel(reg, dp->phy_addr);
1007 }
1008}
1009
1010static void exynos_dp_phy_exit(struct exynos_dp_device *dp)
1011{ 1221{
1012 if (dp->phy) { 1222 int ret;
1013 phy_power_off(dp->phy);
1014 } else if (dp->phy_addr) {
1015 u32 reg;
1016 1223
1017 reg = __raw_readl(dp->phy_addr); 1224 ret = of_get_videomode(dp->dev->of_node, &dp->panel.vm,
1018 reg &= ~(dp->enable_mask); 1225 OF_USE_NATIVE_MODE);
1019 __raw_writel(reg, dp->phy_addr); 1226 if (ret) {
1227 DRM_ERROR("failed: of_get_videomode() : %d\n", ret);
1228 return ret;
1020 } 1229 }
1230 return 0;
1021} 1231}
1022 1232
1023static int exynos_dp_probe(struct platform_device *pdev) 1233static int exynos_dp_probe(struct platform_device *pdev)
@@ -1035,6 +1245,7 @@ static int exynos_dp_probe(struct platform_device *pdev)
1035 } 1245 }
1036 1246
1037 dp->dev = &pdev->dev; 1247 dp->dev = &pdev->dev;
1248 dp->dpms_mode = DRM_MODE_DPMS_OFF;
1038 1249
1039 dp->video_info = exynos_dp_dt_parse_pdata(&pdev->dev); 1250 dp->video_info = exynos_dp_dt_parse_pdata(&pdev->dev);
1040 if (IS_ERR(dp->video_info)) 1251 if (IS_ERR(dp->video_info))
@@ -1044,6 +1255,10 @@ static int exynos_dp_probe(struct platform_device *pdev)
1044 if (ret) 1255 if (ret)
1045 return ret; 1256 return ret;
1046 1257
1258 ret = exynos_dp_dt_parse_panel(dp);
1259 if (ret)
1260 return ret;
1261
1047 dp->clock = devm_clk_get(&pdev->dev, "dp"); 1262 dp->clock = devm_clk_get(&pdev->dev, "dp");
1048 if (IS_ERR(dp->clock)) { 1263 if (IS_ERR(dp->clock)) {
1049 dev_err(&pdev->dev, "failed to get clock\n"); 1264 dev_err(&pdev->dev, "failed to get clock\n");
@@ -1076,22 +1291,22 @@ static int exynos_dp_probe(struct platform_device *pdev)
1076 dev_err(&pdev->dev, "failed to request irq\n"); 1291 dev_err(&pdev->dev, "failed to request irq\n");
1077 return ret; 1292 return ret;
1078 } 1293 }
1294 disable_irq(dp->irq);
1295
1296 exynos_dp_display.ctx = dp;
1079 1297
1080 platform_set_drvdata(pdev, dp); 1298 platform_set_drvdata(pdev, &exynos_dp_display);
1299 exynos_drm_display_register(&exynos_dp_display);
1081 1300
1082 return 0; 1301 return 0;
1083} 1302}
1084 1303
1085static int exynos_dp_remove(struct platform_device *pdev) 1304static int exynos_dp_remove(struct platform_device *pdev)
1086{ 1305{
1087 struct exynos_dp_device *dp = platform_get_drvdata(pdev); 1306 struct exynos_drm_display *display = platform_get_drvdata(pdev);
1088
1089 flush_work(&dp->hotplug_work);
1090
1091 exynos_dp_phy_exit(dp);
1092
1093 clk_disable_unprepare(dp->clock);
1094 1307
1308 exynos_dp_dpms(display, DRM_MODE_DPMS_OFF);
1309 exynos_drm_display_unregister(&exynos_dp_display);
1095 1310
1096 return 0; 1311 return 0;
1097} 1312}
@@ -1099,31 +1314,19 @@ static int exynos_dp_remove(struct platform_device *pdev)
1099#ifdef CONFIG_PM_SLEEP 1314#ifdef CONFIG_PM_SLEEP
1100static int exynos_dp_suspend(struct device *dev) 1315static int exynos_dp_suspend(struct device *dev)
1101{ 1316{
1102 struct exynos_dp_device *dp = dev_get_drvdata(dev); 1317 struct platform_device *pdev = to_platform_device(dev);
1103 1318 struct exynos_drm_display *display = platform_get_drvdata(pdev);
1104 disable_irq(dp->irq);
1105
1106 flush_work(&dp->hotplug_work);
1107
1108 exynos_dp_phy_exit(dp);
1109
1110 clk_disable_unprepare(dp->clock);
1111 1319
1320 exynos_dp_dpms(display, DRM_MODE_DPMS_OFF);
1112 return 0; 1321 return 0;
1113} 1322}
1114 1323
1115static int exynos_dp_resume(struct device *dev) 1324static int exynos_dp_resume(struct device *dev)
1116{ 1325{
1117 struct exynos_dp_device *dp = dev_get_drvdata(dev); 1326 struct platform_device *pdev = to_platform_device(dev);
1118 1327 struct exynos_drm_display *display = platform_get_drvdata(pdev);
1119 exynos_dp_phy_init(dp);
1120
1121 clk_prepare_enable(dp->clock);
1122
1123 exynos_dp_init_dp(dp);
1124
1125 enable_irq(dp->irq);
1126 1328
1329 exynos_dp_dpms(display, DRM_MODE_DPMS_ON);
1127 return 0; 1330 return 0;
1128} 1331}
1129#endif 1332#endif
@@ -1136,9 +1339,8 @@ static const struct of_device_id exynos_dp_match[] = {
1136 { .compatible = "samsung,exynos5-dp" }, 1339 { .compatible = "samsung,exynos5-dp" },
1137 {}, 1340 {},
1138}; 1341};
1139MODULE_DEVICE_TABLE(of, exynos_dp_match);
1140 1342
1141static struct platform_driver exynos_dp_driver = { 1343struct platform_driver dp_driver = {
1142 .probe = exynos_dp_probe, 1344 .probe = exynos_dp_probe,
1143 .remove = exynos_dp_remove, 1345 .remove = exynos_dp_remove,
1144 .driver = { 1346 .driver = {
@@ -1149,8 +1351,6 @@ static struct platform_driver exynos_dp_driver = {
1149 }, 1351 },
1150}; 1352};
1151 1353
1152module_platform_driver(exynos_dp_driver);
1153
1154MODULE_AUTHOR("Jingoo Han <jg1.han@samsung.com>"); 1354MODULE_AUTHOR("Jingoo Han <jg1.han@samsung.com>");
1155MODULE_DESCRIPTION("Samsung SoC DP Driver"); 1355MODULE_DESCRIPTION("Samsung SoC DP Driver");
1156MODULE_LICENSE("GPL"); 1356MODULE_LICENSE("GPL");
diff --git a/drivers/video/exynos/exynos_dp_core.h b/drivers/gpu/drm/exynos/exynos_dp_core.h
index 607e36d0c147..d6a900d4ee40 100644
--- a/drivers/video/exynos/exynos_dp_core.h
+++ b/drivers/gpu/drm/exynos/exynos_dp_core.h
@@ -13,6 +13,9 @@
13#ifndef _EXYNOS_DP_CORE_H 13#ifndef _EXYNOS_DP_CORE_H
14#define _EXYNOS_DP_CORE_H 14#define _EXYNOS_DP_CORE_H
15 15
16#include <drm/drm_crtc.h>
17#include <drm/exynos_drm.h>
18
16#define DP_TIMEOUT_LOOP_COUNT 100 19#define DP_TIMEOUT_LOOP_COUNT 100
17#define MAX_CR_LOOP 5 20#define MAX_CR_LOOP 5
18#define MAX_EQ_LOOP 5 21#define MAX_EQ_LOOP 5
@@ -142,6 +145,9 @@ struct link_train {
142 145
143struct exynos_dp_device { 146struct exynos_dp_device {
144 struct device *dev; 147 struct device *dev;
148 struct drm_device *drm_dev;
149 struct drm_connector connector;
150 struct drm_encoder *encoder;
145 struct clk *clock; 151 struct clk *clock;
146 unsigned int irq; 152 unsigned int irq;
147 void __iomem *reg_base; 153 void __iomem *reg_base;
@@ -152,6 +158,9 @@ struct exynos_dp_device {
152 struct link_train link_train; 158 struct link_train link_train;
153 struct work_struct hotplug_work; 159 struct work_struct hotplug_work;
154 struct phy *phy; 160 struct phy *phy;
161 int dpms_mode;
162
163 struct exynos_drm_panel_info panel;
155}; 164};
156 165
157/* exynos_dp_reg.c */ 166/* exynos_dp_reg.c */
diff --git a/drivers/video/exynos/exynos_dp_reg.c b/drivers/gpu/drm/exynos/exynos_dp_reg.c
index b70da5052ff0..b70da5052ff0 100644
--- a/drivers/video/exynos/exynos_dp_reg.c
+++ b/drivers/gpu/drm/exynos/exynos_dp_reg.c
diff --git a/drivers/video/exynos/exynos_dp_reg.h b/drivers/gpu/drm/exynos/exynos_dp_reg.h
index 2e9bd0e0b9f2..2e9bd0e0b9f2 100644
--- a/drivers/video/exynos/exynos_dp_reg.h
+++ b/drivers/gpu/drm/exynos/exynos_dp_reg.h
diff --git a/drivers/gpu/drm/exynos/exynos_drm_connector.c b/drivers/gpu/drm/exynos/exynos_drm_connector.c
index e082efb2fece..9a16dbe121d1 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_connector.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_connector.c
@@ -23,27 +23,20 @@
23 drm_connector) 23 drm_connector)
24 24
25struct exynos_drm_connector { 25struct exynos_drm_connector {
26 struct drm_connector drm_connector; 26 struct drm_connector drm_connector;
27 uint32_t encoder_id; 27 uint32_t encoder_id;
28 struct exynos_drm_manager *manager; 28 struct exynos_drm_display *display;
29 uint32_t dpms;
30}; 29};
31 30
32static int exynos_drm_connector_get_modes(struct drm_connector *connector) 31static int exynos_drm_connector_get_modes(struct drm_connector *connector)
33{ 32{
34 struct exynos_drm_connector *exynos_connector = 33 struct exynos_drm_connector *exynos_connector =
35 to_exynos_connector(connector); 34 to_exynos_connector(connector);
36 struct exynos_drm_manager *manager = exynos_connector->manager; 35 struct exynos_drm_display *display = exynos_connector->display;
37 struct exynos_drm_display_ops *display_ops = manager->display_ops;
38 struct edid *edid = NULL; 36 struct edid *edid = NULL;
39 unsigned int count = 0; 37 unsigned int count = 0;
40 int ret; 38 int ret;
41 39
42 if (!display_ops) {
43 DRM_DEBUG_KMS("display_ops is null.\n");
44 return 0;
45 }
46
47 /* 40 /*
48 * if get_edid() exists then get_edid() callback of hdmi side 41 * if get_edid() exists then get_edid() callback of hdmi side
49 * is called to get edid data through i2c interface else 42 * is called to get edid data through i2c interface else
@@ -52,8 +45,8 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector)
52 * P.S. in case of lcd panel, count is always 1 if success 45 * P.S. in case of lcd panel, count is always 1 if success
53 * because lcd panel has only one mode. 46 * because lcd panel has only one mode.
54 */ 47 */
55 if (display_ops->get_edid) { 48 if (display->ops->get_edid) {
56 edid = display_ops->get_edid(manager->dev, connector); 49 edid = display->ops->get_edid(display, connector);
57 if (IS_ERR_OR_NULL(edid)) { 50 if (IS_ERR_OR_NULL(edid)) {
58 ret = PTR_ERR(edid); 51 ret = PTR_ERR(edid);
59 edid = NULL; 52 edid = NULL;
@@ -76,8 +69,8 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector)
76 return 0; 69 return 0;
77 } 70 }
78 71
79 if (display_ops->get_panel) 72 if (display->ops->get_panel)
80 panel = display_ops->get_panel(manager->dev); 73 panel = display->ops->get_panel(display);
81 else { 74 else {
82 drm_mode_destroy(connector->dev, mode); 75 drm_mode_destroy(connector->dev, mode);
83 return 0; 76 return 0;
@@ -106,20 +99,20 @@ static int exynos_drm_connector_mode_valid(struct drm_connector *connector,
106{ 99{
107 struct exynos_drm_connector *exynos_connector = 100 struct exynos_drm_connector *exynos_connector =
108 to_exynos_connector(connector); 101 to_exynos_connector(connector);
109 struct exynos_drm_manager *manager = exynos_connector->manager; 102 struct exynos_drm_display *display = exynos_connector->display;
110 struct exynos_drm_display_ops *display_ops = manager->display_ops;
111 int ret = MODE_BAD; 103 int ret = MODE_BAD;
112 104
113 DRM_DEBUG_KMS("%s\n", __FILE__); 105 DRM_DEBUG_KMS("%s\n", __FILE__);
114 106
115 if (display_ops && display_ops->check_mode) 107 if (display->ops->check_mode)
116 if (!display_ops->check_mode(manager->dev, mode)) 108 if (!display->ops->check_mode(display, mode))
117 ret = MODE_OK; 109 ret = MODE_OK;
118 110
119 return ret; 111 return ret;
120} 112}
121 113
122struct drm_encoder *exynos_drm_best_encoder(struct drm_connector *connector) 114static struct drm_encoder *exynos_drm_best_encoder(
115 struct drm_connector *connector)
123{ 116{
124 struct drm_device *dev = connector->dev; 117 struct drm_device *dev = connector->dev;
125 struct exynos_drm_connector *exynos_connector = 118 struct exynos_drm_connector *exynos_connector =
@@ -146,48 +139,12 @@ static struct drm_connector_helper_funcs exynos_connector_helper_funcs = {
146 .best_encoder = exynos_drm_best_encoder, 139 .best_encoder = exynos_drm_best_encoder,
147}; 140};
148 141
149void exynos_drm_display_power(struct drm_connector *connector, int mode)
150{
151 struct drm_encoder *encoder = exynos_drm_best_encoder(connector);
152 struct exynos_drm_connector *exynos_connector;
153 struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder);
154 struct exynos_drm_display_ops *display_ops = manager->display_ops;
155
156 exynos_connector = to_exynos_connector(connector);
157
158 if (exynos_connector->dpms == mode) {
159 DRM_DEBUG_KMS("desired dpms mode is same as previous one.\n");
160 return;
161 }
162
163 if (display_ops && display_ops->power_on)
164 display_ops->power_on(manager->dev, mode);
165
166 exynos_connector->dpms = mode;
167}
168
169static void exynos_drm_connector_dpms(struct drm_connector *connector,
170 int mode)
171{
172 /*
173 * in case that drm_crtc_helper_set_mode() is called,
174 * encoder/crtc->funcs->dpms() will be just returned
175 * because they already were DRM_MODE_DPMS_ON so only
176 * exynos_drm_display_power() will be called.
177 */
178 drm_helper_connector_dpms(connector, mode);
179
180 exynos_drm_display_power(connector, mode);
181
182}
183
184static int exynos_drm_connector_fill_modes(struct drm_connector *connector, 142static int exynos_drm_connector_fill_modes(struct drm_connector *connector,
185 unsigned int max_width, unsigned int max_height) 143 unsigned int max_width, unsigned int max_height)
186{ 144{
187 struct exynos_drm_connector *exynos_connector = 145 struct exynos_drm_connector *exynos_connector =
188 to_exynos_connector(connector); 146 to_exynos_connector(connector);
189 struct exynos_drm_manager *manager = exynos_connector->manager; 147 struct exynos_drm_display *display = exynos_connector->display;
190 struct exynos_drm_manager_ops *ops = manager->ops;
191 unsigned int width, height; 148 unsigned int width, height;
192 149
193 width = max_width; 150 width = max_width;
@@ -197,8 +154,8 @@ static int exynos_drm_connector_fill_modes(struct drm_connector *connector,
197 * if specific driver want to find desired_mode using maxmum 154 * if specific driver want to find desired_mode using maxmum
198 * resolution then get max width and height from that driver. 155 * resolution then get max width and height from that driver.
199 */ 156 */
200 if (ops && ops->get_max_resol) 157 if (display->ops->get_max_resol)
201 ops->get_max_resol(manager->dev, &width, &height); 158 display->ops->get_max_resol(display, &width, &height);
202 159
203 return drm_helper_probe_single_connector_modes(connector, width, 160 return drm_helper_probe_single_connector_modes(connector, width,
204 height); 161 height);
@@ -210,13 +167,11 @@ exynos_drm_connector_detect(struct drm_connector *connector, bool force)
210{ 167{
211 struct exynos_drm_connector *exynos_connector = 168 struct exynos_drm_connector *exynos_connector =
212 to_exynos_connector(connector); 169 to_exynos_connector(connector);
213 struct exynos_drm_manager *manager = exynos_connector->manager; 170 struct exynos_drm_display *display = exynos_connector->display;
214 struct exynos_drm_display_ops *display_ops =
215 manager->display_ops;
216 enum drm_connector_status status = connector_status_disconnected; 171 enum drm_connector_status status = connector_status_disconnected;
217 172
218 if (display_ops && display_ops->is_connected) { 173 if (display->ops->is_connected) {
219 if (display_ops->is_connected(manager->dev)) 174 if (display->ops->is_connected(display))
220 status = connector_status_connected; 175 status = connector_status_connected;
221 else 176 else
222 status = connector_status_disconnected; 177 status = connector_status_disconnected;
@@ -236,7 +191,7 @@ static void exynos_drm_connector_destroy(struct drm_connector *connector)
236} 191}
237 192
238static struct drm_connector_funcs exynos_connector_funcs = { 193static struct drm_connector_funcs exynos_connector_funcs = {
239 .dpms = exynos_drm_connector_dpms, 194 .dpms = drm_helper_connector_dpms,
240 .fill_modes = exynos_drm_connector_fill_modes, 195 .fill_modes = exynos_drm_connector_fill_modes,
241 .detect = exynos_drm_connector_detect, 196 .detect = exynos_drm_connector_detect,
242 .destroy = exynos_drm_connector_destroy, 197 .destroy = exynos_drm_connector_destroy,
@@ -246,7 +201,7 @@ struct drm_connector *exynos_drm_connector_create(struct drm_device *dev,
246 struct drm_encoder *encoder) 201 struct drm_encoder *encoder)
247{ 202{
248 struct exynos_drm_connector *exynos_connector; 203 struct exynos_drm_connector *exynos_connector;
249 struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder); 204 struct exynos_drm_display *display = exynos_drm_get_display(encoder);
250 struct drm_connector *connector; 205 struct drm_connector *connector;
251 int type; 206 int type;
252 int err; 207 int err;
@@ -257,7 +212,7 @@ struct drm_connector *exynos_drm_connector_create(struct drm_device *dev,
257 212
258 connector = &exynos_connector->drm_connector; 213 connector = &exynos_connector->drm_connector;
259 214
260 switch (manager->display_ops->type) { 215 switch (display->type) {
261 case EXYNOS_DISPLAY_TYPE_HDMI: 216 case EXYNOS_DISPLAY_TYPE_HDMI:
262 type = DRM_MODE_CONNECTOR_HDMIA; 217 type = DRM_MODE_CONNECTOR_HDMIA;
263 connector->interlace_allowed = true; 218 connector->interlace_allowed = true;
@@ -280,8 +235,7 @@ struct drm_connector *exynos_drm_connector_create(struct drm_device *dev,
280 goto err_connector; 235 goto err_connector;
281 236
282 exynos_connector->encoder_id = encoder->base.id; 237 exynos_connector->encoder_id = encoder->base.id;
283 exynos_connector->manager = manager; 238 exynos_connector->display = display;
284 exynos_connector->dpms = DRM_MODE_DPMS_OFF;
285 connector->dpms = DRM_MODE_DPMS_OFF; 239 connector->dpms = DRM_MODE_DPMS_OFF;
286 connector->encoder = encoder; 240 connector->encoder = encoder;
287 241
diff --git a/drivers/gpu/drm/exynos/exynos_drm_connector.h b/drivers/gpu/drm/exynos/exynos_drm_connector.h
index 547c6b590357..4eb20d78379a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_connector.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_connector.h
@@ -17,8 +17,4 @@
17struct drm_connector *exynos_drm_connector_create(struct drm_device *dev, 17struct drm_connector *exynos_drm_connector_create(struct drm_device *dev,
18 struct drm_encoder *encoder); 18 struct drm_encoder *encoder);
19 19
20struct drm_encoder *exynos_drm_best_encoder(struct drm_connector *connector);
21
22void exynos_drm_display_power(struct drm_connector *connector, int mode);
23
24#endif 20#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_core.c b/drivers/gpu/drm/exynos/exynos_drm_core.c
index 1bef6dc77478..0e9e06ce36b8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_core.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_core.c
@@ -14,43 +14,42 @@
14 14
15#include <drm/drmP.h> 15#include <drm/drmP.h>
16#include "exynos_drm_drv.h" 16#include "exynos_drm_drv.h"
17#include "exynos_drm_crtc.h"
17#include "exynos_drm_encoder.h" 18#include "exynos_drm_encoder.h"
18#include "exynos_drm_connector.h"
19#include "exynos_drm_fbdev.h" 19#include "exynos_drm_fbdev.h"
20 20
21static LIST_HEAD(exynos_drm_subdrv_list); 21static LIST_HEAD(exynos_drm_subdrv_list);
22static LIST_HEAD(exynos_drm_manager_list);
23static LIST_HEAD(exynos_drm_display_list);
22 24
23static int exynos_drm_create_enc_conn(struct drm_device *dev, 25static int exynos_drm_create_enc_conn(struct drm_device *dev,
24 struct exynos_drm_subdrv *subdrv) 26 struct exynos_drm_display *display)
25{ 27{
26 struct drm_encoder *encoder; 28 struct drm_encoder *encoder;
27 struct drm_connector *connector; 29 struct exynos_drm_manager *manager;
28 int ret; 30 int ret;
31 unsigned long possible_crtcs = 0;
29 32
30 subdrv->manager->dev = subdrv->dev; 33 /* Find possible crtcs for this display */
34 list_for_each_entry(manager, &exynos_drm_manager_list, list)
35 if (manager->type == display->type)
36 possible_crtcs |= 1 << manager->pipe;
31 37
32 /* create and initialize a encoder for this sub driver. */ 38 /* create and initialize a encoder for this sub driver. */
33 encoder = exynos_drm_encoder_create(dev, subdrv->manager, 39 encoder = exynos_drm_encoder_create(dev, display, possible_crtcs);
34 (1 << MAX_CRTC) - 1);
35 if (!encoder) { 40 if (!encoder) {
36 DRM_ERROR("failed to create encoder\n"); 41 DRM_ERROR("failed to create encoder\n");
37 return -EFAULT; 42 return -EFAULT;
38 } 43 }
39 44
40 /* 45 display->encoder = encoder;
41 * create and initialize a connector for this sub driver and 46
42 * attach the encoder created above to the connector. 47 ret = display->ops->create_connector(display, encoder);
43 */ 48 if (ret) {
44 connector = exynos_drm_connector_create(dev, encoder); 49 DRM_ERROR("failed to create connector ret = %d\n", ret);
45 if (!connector) {
46 DRM_ERROR("failed to create connector\n");
47 ret = -EFAULT;
48 goto err_destroy_encoder; 50 goto err_destroy_encoder;
49 } 51 }
50 52
51 subdrv->encoder = encoder;
52 subdrv->connector = connector;
53
54 return 0; 53 return 0;
55 54
56err_destroy_encoder: 55err_destroy_encoder:
@@ -58,21 +57,6 @@ err_destroy_encoder:
58 return ret; 57 return ret;
59} 58}
60 59
61static void exynos_drm_destroy_enc_conn(struct exynos_drm_subdrv *subdrv)
62{
63 if (subdrv->encoder) {
64 struct drm_encoder *encoder = subdrv->encoder;
65 encoder->funcs->destroy(encoder);
66 subdrv->encoder = NULL;
67 }
68
69 if (subdrv->connector) {
70 struct drm_connector *connector = subdrv->connector;
71 connector->funcs->destroy(connector);
72 subdrv->connector = NULL;
73 }
74}
75
76static int exynos_drm_subdrv_probe(struct drm_device *dev, 60static int exynos_drm_subdrv_probe(struct drm_device *dev,
77 struct exynos_drm_subdrv *subdrv) 61 struct exynos_drm_subdrv *subdrv)
78{ 62{
@@ -104,10 +88,98 @@ static void exynos_drm_subdrv_remove(struct drm_device *dev,
104 subdrv->remove(dev, subdrv->dev); 88 subdrv->remove(dev, subdrv->dev);
105} 89}
106 90
91int exynos_drm_initialize_managers(struct drm_device *dev)
92{
93 struct exynos_drm_manager *manager, *n;
94 int ret, pipe = 0;
95
96 list_for_each_entry(manager, &exynos_drm_manager_list, list) {
97 if (manager->ops->initialize) {
98 ret = manager->ops->initialize(manager, dev, pipe);
99 if (ret) {
100 DRM_ERROR("Mgr init [%d] failed with %d\n",
101 manager->type, ret);
102 goto err;
103 }
104 }
105
106 manager->drm_dev = dev;
107 manager->pipe = pipe++;
108
109 ret = exynos_drm_crtc_create(manager);
110 if (ret) {
111 DRM_ERROR("CRTC create [%d] failed with %d\n",
112 manager->type, ret);
113 goto err;
114 }
115 }
116 return 0;
117
118err:
119 list_for_each_entry_safe(manager, n, &exynos_drm_manager_list, list) {
120 if (pipe-- > 0)
121 exynos_drm_manager_unregister(manager);
122 else
123 list_del(&manager->list);
124 }
125 return ret;
126}
127
128void exynos_drm_remove_managers(struct drm_device *dev)
129{
130 struct exynos_drm_manager *manager, *n;
131
132 list_for_each_entry_safe(manager, n, &exynos_drm_manager_list, list)
133 exynos_drm_manager_unregister(manager);
134}
135
136int exynos_drm_initialize_displays(struct drm_device *dev)
137{
138 struct exynos_drm_display *display, *n;
139 int ret, initialized = 0;
140
141 list_for_each_entry(display, &exynos_drm_display_list, list) {
142 if (display->ops->initialize) {
143 ret = display->ops->initialize(display, dev);
144 if (ret) {
145 DRM_ERROR("Display init [%d] failed with %d\n",
146 display->type, ret);
147 goto err;
148 }
149 }
150
151 initialized++;
152
153 ret = exynos_drm_create_enc_conn(dev, display);
154 if (ret) {
155 DRM_ERROR("Encoder create [%d] failed with %d\n",
156 display->type, ret);
157 goto err;
158 }
159 }
160 return 0;
161
162err:
163 list_for_each_entry_safe(display, n, &exynos_drm_display_list, list) {
164 if (initialized-- > 0)
165 exynos_drm_display_unregister(display);
166 else
167 list_del(&display->list);
168 }
169 return ret;
170}
171
172void exynos_drm_remove_displays(struct drm_device *dev)
173{
174 struct exynos_drm_display *display, *n;
175
176 list_for_each_entry_safe(display, n, &exynos_drm_display_list, list)
177 exynos_drm_display_unregister(display);
178}
179
107int exynos_drm_device_register(struct drm_device *dev) 180int exynos_drm_device_register(struct drm_device *dev)
108{ 181{
109 struct exynos_drm_subdrv *subdrv, *n; 182 struct exynos_drm_subdrv *subdrv, *n;
110 unsigned int fine_cnt = 0;
111 int err; 183 int err;
112 184
113 if (!dev) 185 if (!dev)
@@ -120,30 +192,8 @@ int exynos_drm_device_register(struct drm_device *dev)
120 list_del(&subdrv->list); 192 list_del(&subdrv->list);
121 continue; 193 continue;
122 } 194 }
123
124 /*
125 * if manager is null then it means that this sub driver
126 * doesn't need encoder and connector.
127 */
128 if (!subdrv->manager) {
129 fine_cnt++;
130 continue;
131 }
132
133 err = exynos_drm_create_enc_conn(dev, subdrv);
134 if (err) {
135 DRM_DEBUG("failed to create encoder and connector.\n");
136 exynos_drm_subdrv_remove(dev, subdrv);
137 list_del(&subdrv->list);
138 continue;
139 }
140
141 fine_cnt++;
142 } 195 }
143 196
144 if (!fine_cnt)
145 return -EINVAL;
146
147 return 0; 197 return 0;
148} 198}
149EXPORT_SYMBOL_GPL(exynos_drm_device_register); 199EXPORT_SYMBOL_GPL(exynos_drm_device_register);
@@ -159,13 +209,44 @@ int exynos_drm_device_unregister(struct drm_device *dev)
159 209
160 list_for_each_entry(subdrv, &exynos_drm_subdrv_list, list) { 210 list_for_each_entry(subdrv, &exynos_drm_subdrv_list, list) {
161 exynos_drm_subdrv_remove(dev, subdrv); 211 exynos_drm_subdrv_remove(dev, subdrv);
162 exynos_drm_destroy_enc_conn(subdrv);
163 } 212 }
164 213
165 return 0; 214 return 0;
166} 215}
167EXPORT_SYMBOL_GPL(exynos_drm_device_unregister); 216EXPORT_SYMBOL_GPL(exynos_drm_device_unregister);
168 217
218int exynos_drm_manager_register(struct exynos_drm_manager *manager)
219{
220 BUG_ON(!manager->ops);
221 list_add_tail(&manager->list, &exynos_drm_manager_list);
222 return 0;
223}
224
225int exynos_drm_manager_unregister(struct exynos_drm_manager *manager)
226{
227 if (manager->ops->remove)
228 manager->ops->remove(manager);
229
230 list_del(&manager->list);
231 return 0;
232}
233
234int exynos_drm_display_register(struct exynos_drm_display *display)
235{
236 BUG_ON(!display->ops);
237 list_add_tail(&display->list, &exynos_drm_display_list);
238 return 0;
239}
240
241int exynos_drm_display_unregister(struct exynos_drm_display *display)
242{
243 if (display->ops->remove)
244 display->ops->remove(display);
245
246 list_del(&display->list);
247 return 0;
248}
249
169int exynos_drm_subdrv_register(struct exynos_drm_subdrv *subdrv) 250int exynos_drm_subdrv_register(struct exynos_drm_subdrv *subdrv)
170{ 251{
171 if (!subdrv) 252 if (!subdrv)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index 6f3400f3978a..e930d4fe29c7 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -33,6 +33,7 @@ enum exynos_crtc_mode {
33 * 33 *
34 * @drm_crtc: crtc object. 34 * @drm_crtc: crtc object.
35 * @drm_plane: pointer of private plane object for this crtc 35 * @drm_plane: pointer of private plane object for this crtc
36 * @manager: the manager associated with this crtc
36 * @pipe: a crtc index created at load() with a new crtc object creation 37 * @pipe: a crtc index created at load() with a new crtc object creation
37 * and the crtc object would be set to private->crtc array 38 * and the crtc object would be set to private->crtc array
38 * to get a crtc object corresponding to this pipe from private->crtc 39 * to get a crtc object corresponding to this pipe from private->crtc
@@ -46,6 +47,7 @@ enum exynos_crtc_mode {
46struct exynos_drm_crtc { 47struct exynos_drm_crtc {
47 struct drm_crtc drm_crtc; 48 struct drm_crtc drm_crtc;
48 struct drm_plane *plane; 49 struct drm_plane *plane;
50 struct exynos_drm_manager *manager;
49 unsigned int pipe; 51 unsigned int pipe;
50 unsigned int dpms; 52 unsigned int dpms;
51 enum exynos_crtc_mode mode; 53 enum exynos_crtc_mode mode;
@@ -56,6 +58,7 @@ struct exynos_drm_crtc {
56static void exynos_drm_crtc_dpms(struct drm_crtc *crtc, int mode) 58static void exynos_drm_crtc_dpms(struct drm_crtc *crtc, int mode)
57{ 59{
58 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); 60 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
61 struct exynos_drm_manager *manager = exynos_crtc->manager;
59 62
60 DRM_DEBUG_KMS("crtc[%d] mode[%d]\n", crtc->base.id, mode); 63 DRM_DEBUG_KMS("crtc[%d] mode[%d]\n", crtc->base.id, mode);
61 64
@@ -71,7 +74,9 @@ static void exynos_drm_crtc_dpms(struct drm_crtc *crtc, int mode)
71 drm_vblank_off(crtc->dev, exynos_crtc->pipe); 74 drm_vblank_off(crtc->dev, exynos_crtc->pipe);
72 } 75 }
73 76
74 exynos_drm_fn_encoder(crtc, &mode, exynos_drm_encoder_crtc_dpms); 77 if (manager->ops->dpms)
78 manager->ops->dpms(manager, mode);
79
75 exynos_crtc->dpms = mode; 80 exynos_crtc->dpms = mode;
76} 81}
77 82
@@ -83,9 +88,15 @@ static void exynos_drm_crtc_prepare(struct drm_crtc *crtc)
83static void exynos_drm_crtc_commit(struct drm_crtc *crtc) 88static void exynos_drm_crtc_commit(struct drm_crtc *crtc)
84{ 89{
85 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); 90 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
91 struct exynos_drm_manager *manager = exynos_crtc->manager;
86 92
87 exynos_drm_crtc_dpms(crtc, DRM_MODE_DPMS_ON); 93 exynos_drm_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
94
88 exynos_plane_commit(exynos_crtc->plane); 95 exynos_plane_commit(exynos_crtc->plane);
96
97 if (manager->ops->commit)
98 manager->ops->commit(manager);
99
89 exynos_plane_dpms(exynos_crtc->plane, DRM_MODE_DPMS_ON); 100 exynos_plane_dpms(exynos_crtc->plane, DRM_MODE_DPMS_ON);
90} 101}
91 102
@@ -94,7 +105,12 @@ exynos_drm_crtc_mode_fixup(struct drm_crtc *crtc,
94 const struct drm_display_mode *mode, 105 const struct drm_display_mode *mode,
95 struct drm_display_mode *adjusted_mode) 106 struct drm_display_mode *adjusted_mode)
96{ 107{
97 /* drm framework doesn't check NULL */ 108 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
109 struct exynos_drm_manager *manager = exynos_crtc->manager;
110
111 if (manager->ops->mode_fixup)
112 return manager->ops->mode_fixup(manager, mode, adjusted_mode);
113
98 return true; 114 return true;
99} 115}
100 116
@@ -104,10 +120,10 @@ exynos_drm_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
104 struct drm_framebuffer *old_fb) 120 struct drm_framebuffer *old_fb)
105{ 121{
106 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); 122 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
123 struct exynos_drm_manager *manager = exynos_crtc->manager;
107 struct drm_plane *plane = exynos_crtc->plane; 124 struct drm_plane *plane = exynos_crtc->plane;
108 unsigned int crtc_w; 125 unsigned int crtc_w;
109 unsigned int crtc_h; 126 unsigned int crtc_h;
110 int pipe = exynos_crtc->pipe;
111 int ret; 127 int ret;
112 128
113 /* 129 /*
@@ -116,18 +132,19 @@ exynos_drm_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
116 */ 132 */
117 memcpy(&crtc->mode, adjusted_mode, sizeof(*adjusted_mode)); 133 memcpy(&crtc->mode, adjusted_mode, sizeof(*adjusted_mode));
118 134
119 crtc_w = crtc->fb->width - x; 135 crtc_w = crtc->primary->fb->width - x;
120 crtc_h = crtc->fb->height - y; 136 crtc_h = crtc->primary->fb->height - y;
137
138 if (manager->ops->mode_set)
139 manager->ops->mode_set(manager, &crtc->mode);
121 140
122 ret = exynos_plane_mode_set(plane, crtc, crtc->fb, 0, 0, crtc_w, crtc_h, 141 ret = exynos_plane_mode_set(plane, crtc, crtc->primary->fb, 0, 0, crtc_w, crtc_h,
123 x, y, crtc_w, crtc_h); 142 x, y, crtc_w, crtc_h);
124 if (ret) 143 if (ret)
125 return ret; 144 return ret;
126 145
127 plane->crtc = crtc; 146 plane->crtc = crtc;
128 plane->fb = crtc->fb; 147 plane->fb = crtc->primary->fb;
129
130 exynos_drm_fn_encoder(crtc, &pipe, exynos_drm_encoder_crtc_pipe);
131 148
132 return 0; 149 return 0;
133} 150}
@@ -147,10 +164,10 @@ static int exynos_drm_crtc_mode_set_commit(struct drm_crtc *crtc, int x, int y,
147 return -EPERM; 164 return -EPERM;
148 } 165 }
149 166
150 crtc_w = crtc->fb->width - x; 167 crtc_w = crtc->primary->fb->width - x;
151 crtc_h = crtc->fb->height - y; 168 crtc_h = crtc->primary->fb->height - y;
152 169
153 ret = exynos_plane_mode_set(plane, crtc, crtc->fb, 0, 0, crtc_w, crtc_h, 170 ret = exynos_plane_mode_set(plane, crtc, crtc->primary->fb, 0, 0, crtc_w, crtc_h,
154 x, y, crtc_w, crtc_h); 171 x, y, crtc_w, crtc_h);
155 if (ret) 172 if (ret)
156 return ret; 173 return ret;
@@ -168,10 +185,19 @@ static int exynos_drm_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
168 185
169static void exynos_drm_crtc_disable(struct drm_crtc *crtc) 186static void exynos_drm_crtc_disable(struct drm_crtc *crtc)
170{ 187{
171 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); 188 struct drm_plane *plane;
189 int ret;
172 190
173 exynos_plane_dpms(exynos_crtc->plane, DRM_MODE_DPMS_OFF);
174 exynos_drm_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); 191 exynos_drm_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
192
193 drm_for_each_legacy_plane(plane, &crtc->dev->mode_config.plane_list) {
194 if (plane->crtc != crtc)
195 continue;
196
197 ret = plane->funcs->disable_plane(plane);
198 if (ret)
199 DRM_ERROR("Failed to disable plane %d\n", ret);
200 }
175} 201}
176 202
177static struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = { 203static struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = {
@@ -192,7 +218,7 @@ static int exynos_drm_crtc_page_flip(struct drm_crtc *crtc,
192 struct drm_device *dev = crtc->dev; 218 struct drm_device *dev = crtc->dev;
193 struct exynos_drm_private *dev_priv = dev->dev_private; 219 struct exynos_drm_private *dev_priv = dev->dev_private;
194 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); 220 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
195 struct drm_framebuffer *old_fb = crtc->fb; 221 struct drm_framebuffer *old_fb = crtc->primary->fb;
196 int ret = -EINVAL; 222 int ret = -EINVAL;
197 223
198 /* when the page flip is requested, crtc's dpms should be on */ 224 /* when the page flip is requested, crtc's dpms should be on */
@@ -223,11 +249,11 @@ static int exynos_drm_crtc_page_flip(struct drm_crtc *crtc,
223 atomic_set(&exynos_crtc->pending_flip, 1); 249 atomic_set(&exynos_crtc->pending_flip, 1);
224 spin_unlock_irq(&dev->event_lock); 250 spin_unlock_irq(&dev->event_lock);
225 251
226 crtc->fb = fb; 252 crtc->primary->fb = fb;
227 ret = exynos_drm_crtc_mode_set_commit(crtc, crtc->x, crtc->y, 253 ret = exynos_drm_crtc_mode_set_commit(crtc, crtc->x, crtc->y,
228 NULL); 254 NULL);
229 if (ret) { 255 if (ret) {
230 crtc->fb = old_fb; 256 crtc->primary->fb = old_fb;
231 257
232 spin_lock_irq(&dev->event_lock); 258 spin_lock_irq(&dev->event_lock);
233 drm_vblank_put(dev, exynos_crtc->pipe); 259 drm_vblank_put(dev, exynos_crtc->pipe);
@@ -318,21 +344,24 @@ static void exynos_drm_crtc_attach_mode_property(struct drm_crtc *crtc)
318 drm_object_attach_property(&crtc->base, prop, 0); 344 drm_object_attach_property(&crtc->base, prop, 0);
319} 345}
320 346
321int exynos_drm_crtc_create(struct drm_device *dev, unsigned int nr) 347int exynos_drm_crtc_create(struct exynos_drm_manager *manager)
322{ 348{
323 struct exynos_drm_crtc *exynos_crtc; 349 struct exynos_drm_crtc *exynos_crtc;
324 struct exynos_drm_private *private = dev->dev_private; 350 struct exynos_drm_private *private = manager->drm_dev->dev_private;
325 struct drm_crtc *crtc; 351 struct drm_crtc *crtc;
326 352
327 exynos_crtc = kzalloc(sizeof(*exynos_crtc), GFP_KERNEL); 353 exynos_crtc = kzalloc(sizeof(*exynos_crtc), GFP_KERNEL);
328 if (!exynos_crtc) 354 if (!exynos_crtc)
329 return -ENOMEM; 355 return -ENOMEM;
330 356
331 exynos_crtc->pipe = nr;
332 exynos_crtc->dpms = DRM_MODE_DPMS_OFF;
333 init_waitqueue_head(&exynos_crtc->pending_flip_queue); 357 init_waitqueue_head(&exynos_crtc->pending_flip_queue);
334 atomic_set(&exynos_crtc->pending_flip, 0); 358 atomic_set(&exynos_crtc->pending_flip, 0);
335 exynos_crtc->plane = exynos_plane_init(dev, 1 << nr, true); 359
360 exynos_crtc->dpms = DRM_MODE_DPMS_OFF;
361 exynos_crtc->manager = manager;
362 exynos_crtc->pipe = manager->pipe;
363 exynos_crtc->plane = exynos_plane_init(manager->drm_dev,
364 1 << manager->pipe, true);
336 if (!exynos_crtc->plane) { 365 if (!exynos_crtc->plane) {
337 kfree(exynos_crtc); 366 kfree(exynos_crtc);
338 return -ENOMEM; 367 return -ENOMEM;
@@ -340,9 +369,9 @@ int exynos_drm_crtc_create(struct drm_device *dev, unsigned int nr)
340 369
341 crtc = &exynos_crtc->drm_crtc; 370 crtc = &exynos_crtc->drm_crtc;
342 371
343 private->crtc[nr] = crtc; 372 private->crtc[manager->pipe] = crtc;
344 373
345 drm_crtc_init(dev, crtc, &exynos_crtc_funcs); 374 drm_crtc_init(manager->drm_dev, crtc, &exynos_crtc_funcs);
346 drm_crtc_helper_add(crtc, &exynos_crtc_helper_funcs); 375 drm_crtc_helper_add(crtc, &exynos_crtc_helper_funcs);
347 376
348 exynos_drm_crtc_attach_mode_property(crtc); 377 exynos_drm_crtc_attach_mode_property(crtc);
@@ -350,39 +379,41 @@ int exynos_drm_crtc_create(struct drm_device *dev, unsigned int nr)
350 return 0; 379 return 0;
351} 380}
352 381
353int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int crtc) 382int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int pipe)
354{ 383{
355 struct exynos_drm_private *private = dev->dev_private; 384 struct exynos_drm_private *private = dev->dev_private;
356 struct exynos_drm_crtc *exynos_crtc = 385 struct exynos_drm_crtc *exynos_crtc =
357 to_exynos_crtc(private->crtc[crtc]); 386 to_exynos_crtc(private->crtc[pipe]);
387 struct exynos_drm_manager *manager = exynos_crtc->manager;
358 388
359 if (exynos_crtc->dpms != DRM_MODE_DPMS_ON) 389 if (exynos_crtc->dpms != DRM_MODE_DPMS_ON)
360 return -EPERM; 390 return -EPERM;
361 391
362 exynos_drm_fn_encoder(private->crtc[crtc], &crtc, 392 if (manager->ops->enable_vblank)
363 exynos_drm_enable_vblank); 393 manager->ops->enable_vblank(manager);
364 394
365 return 0; 395 return 0;
366} 396}
367 397
368void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int crtc) 398void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int pipe)
369{ 399{
370 struct exynos_drm_private *private = dev->dev_private; 400 struct exynos_drm_private *private = dev->dev_private;
371 struct exynos_drm_crtc *exynos_crtc = 401 struct exynos_drm_crtc *exynos_crtc =
372 to_exynos_crtc(private->crtc[crtc]); 402 to_exynos_crtc(private->crtc[pipe]);
403 struct exynos_drm_manager *manager = exynos_crtc->manager;
373 404
374 if (exynos_crtc->dpms != DRM_MODE_DPMS_ON) 405 if (exynos_crtc->dpms != DRM_MODE_DPMS_ON)
375 return; 406 return;
376 407
377 exynos_drm_fn_encoder(private->crtc[crtc], &crtc, 408 if (manager->ops->disable_vblank)
378 exynos_drm_disable_vblank); 409 manager->ops->disable_vblank(manager);
379} 410}
380 411
381void exynos_drm_crtc_finish_pageflip(struct drm_device *dev, int crtc) 412void exynos_drm_crtc_finish_pageflip(struct drm_device *dev, int pipe)
382{ 413{
383 struct exynos_drm_private *dev_priv = dev->dev_private; 414 struct exynos_drm_private *dev_priv = dev->dev_private;
384 struct drm_pending_vblank_event *e, *t; 415 struct drm_pending_vblank_event *e, *t;
385 struct drm_crtc *drm_crtc = dev_priv->crtc[crtc]; 416 struct drm_crtc *drm_crtc = dev_priv->crtc[pipe];
386 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(drm_crtc); 417 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(drm_crtc);
387 unsigned long flags; 418 unsigned long flags;
388 419
@@ -391,15 +422,71 @@ void exynos_drm_crtc_finish_pageflip(struct drm_device *dev, int crtc)
391 list_for_each_entry_safe(e, t, &dev_priv->pageflip_event_list, 422 list_for_each_entry_safe(e, t, &dev_priv->pageflip_event_list,
392 base.link) { 423 base.link) {
393 /* if event's pipe isn't same as crtc then ignore it. */ 424 /* if event's pipe isn't same as crtc then ignore it. */
394 if (crtc != e->pipe) 425 if (pipe != e->pipe)
395 continue; 426 continue;
396 427
397 list_del(&e->base.link); 428 list_del(&e->base.link);
398 drm_send_vblank_event(dev, -1, e); 429 drm_send_vblank_event(dev, -1, e);
399 drm_vblank_put(dev, crtc); 430 drm_vblank_put(dev, pipe);
400 atomic_set(&exynos_crtc->pending_flip, 0); 431 atomic_set(&exynos_crtc->pending_flip, 0);
401 wake_up(&exynos_crtc->pending_flip_queue); 432 wake_up(&exynos_crtc->pending_flip_queue);
402 } 433 }
403 434
404 spin_unlock_irqrestore(&dev->event_lock, flags); 435 spin_unlock_irqrestore(&dev->event_lock, flags);
405} 436}
437
438void exynos_drm_crtc_plane_mode_set(struct drm_crtc *crtc,
439 struct exynos_drm_overlay *overlay)
440{
441 struct exynos_drm_manager *manager = to_exynos_crtc(crtc)->manager;
442
443 if (manager->ops->win_mode_set)
444 manager->ops->win_mode_set(manager, overlay);
445}
446
447void exynos_drm_crtc_plane_commit(struct drm_crtc *crtc, int zpos)
448{
449 struct exynos_drm_manager *manager = to_exynos_crtc(crtc)->manager;
450
451 if (manager->ops->win_commit)
452 manager->ops->win_commit(manager, zpos);
453}
454
455void exynos_drm_crtc_plane_enable(struct drm_crtc *crtc, int zpos)
456{
457 struct exynos_drm_manager *manager = to_exynos_crtc(crtc)->manager;
458
459 if (manager->ops->win_enable)
460 manager->ops->win_enable(manager, zpos);
461}
462
463void exynos_drm_crtc_plane_disable(struct drm_crtc *crtc, int zpos)
464{
465 struct exynos_drm_manager *manager = to_exynos_crtc(crtc)->manager;
466
467 if (manager->ops->win_disable)
468 manager->ops->win_disable(manager, zpos);
469}
470
471void exynos_drm_crtc_complete_scanout(struct drm_framebuffer *fb)
472{
473 struct exynos_drm_manager *manager;
474 struct drm_device *dev = fb->dev;
475 struct drm_crtc *crtc;
476
477 /*
478 * make sure that overlay data are updated to real hardware
479 * for all encoders.
480 */
481 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
482 manager = to_exynos_crtc(crtc)->manager;
483
484 /*
485 * wait for vblank interrupt
486 * - this makes sure that overlay data are updated to
487 * real hardware.
488 */
489 if (manager->ops->wait_for_vblank)
490 manager->ops->wait_for_vblank(manager);
491 }
492}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.h b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
index 3e197e6ae7d9..c27b66cc5d24 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
@@ -15,9 +15,21 @@
15#ifndef _EXYNOS_DRM_CRTC_H_ 15#ifndef _EXYNOS_DRM_CRTC_H_
16#define _EXYNOS_DRM_CRTC_H_ 16#define _EXYNOS_DRM_CRTC_H_
17 17
18int exynos_drm_crtc_create(struct drm_device *dev, unsigned int nr); 18struct drm_device;
19int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int crtc); 19struct drm_crtc;
20void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int crtc); 20struct exynos_drm_manager;
21void exynos_drm_crtc_finish_pageflip(struct drm_device *dev, int crtc); 21struct exynos_drm_overlay;
22
23int exynos_drm_crtc_create(struct exynos_drm_manager *manager);
24int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int pipe);
25void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int pipe);
26void exynos_drm_crtc_finish_pageflip(struct drm_device *dev, int pipe);
27void exynos_drm_crtc_complete_scanout(struct drm_framebuffer *fb);
28
29void exynos_drm_crtc_plane_mode_set(struct drm_crtc *crtc,
30 struct exynos_drm_overlay *overlay);
31void exynos_drm_crtc_plane_commit(struct drm_crtc *crtc, int zpos);
32void exynos_drm_crtc_plane_enable(struct drm_crtc *crtc, int zpos);
33void exynos_drm_crtc_plane_disable(struct drm_crtc *crtc, int zpos);
22 34
23#endif 35#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dpi.c b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
new file mode 100644
index 000000000000..2b09c7c0bfcc
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
@@ -0,0 +1,339 @@
1/*
2 * Exynos DRM Parallel output support.
3 *
4 * Copyright (c) 2014 Samsung Electronics Co., Ltd
5 *
6 * Contacts: Andrzej Hajda <a.hajda@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11*/
12
13#include <drm/drmP.h>
14#include <drm/drm_crtc_helper.h>
15#include <drm/drm_panel.h>
16
17#include <linux/regulator/consumer.h>
18
19#include <video/of_videomode.h>
20#include <video/videomode.h>
21
22#include "exynos_drm_drv.h"
23
24struct exynos_dpi {
25 struct device *dev;
26 struct device_node *panel_node;
27
28 struct drm_panel *panel;
29 struct drm_connector connector;
30 struct drm_encoder *encoder;
31
32 struct videomode *vm;
33 int dpms_mode;
34};
35
36#define connector_to_dpi(c) container_of(c, struct exynos_dpi, connector)
37
38static enum drm_connector_status
39exynos_dpi_detect(struct drm_connector *connector, bool force)
40{
41 struct exynos_dpi *ctx = connector_to_dpi(connector);
42
43 /* panels supported only by boot-loader are always connected */
44 if (!ctx->panel_node)
45 return connector_status_connected;
46
47 if (!ctx->panel) {
48 ctx->panel = of_drm_find_panel(ctx->panel_node);
49 if (ctx->panel)
50 drm_panel_attach(ctx->panel, &ctx->connector);
51 }
52
53 if (ctx->panel)
54 return connector_status_connected;
55
56 return connector_status_disconnected;
57}
58
59static void exynos_dpi_connector_destroy(struct drm_connector *connector)
60{
61 drm_sysfs_connector_remove(connector);
62 drm_connector_cleanup(connector);
63}
64
65static struct drm_connector_funcs exynos_dpi_connector_funcs = {
66 .dpms = drm_helper_connector_dpms,
67 .detect = exynos_dpi_detect,
68 .fill_modes = drm_helper_probe_single_connector_modes,
69 .destroy = exynos_dpi_connector_destroy,
70};
71
72static int exynos_dpi_get_modes(struct drm_connector *connector)
73{
74 struct exynos_dpi *ctx = connector_to_dpi(connector);
75
76 /* fimd timings gets precedence over panel modes */
77 if (ctx->vm) {
78 struct drm_display_mode *mode;
79
80 mode = drm_mode_create(connector->dev);
81 if (!mode) {
82 DRM_ERROR("failed to create a new display mode\n");
83 return 0;
84 }
85 drm_display_mode_from_videomode(ctx->vm, mode);
86 mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
87 drm_mode_probed_add(connector, mode);
88 return 1;
89 }
90
91 if (ctx->panel)
92 return ctx->panel->funcs->get_modes(ctx->panel);
93
94 return 0;
95}
96
97static int exynos_dpi_mode_valid(struct drm_connector *connector,
98 struct drm_display_mode *mode)
99{
100 return MODE_OK;
101}
102
103static struct drm_encoder *
104exynos_dpi_best_encoder(struct drm_connector *connector)
105{
106 struct exynos_dpi *ctx = connector_to_dpi(connector);
107
108 return ctx->encoder;
109}
110
111static struct drm_connector_helper_funcs exynos_dpi_connector_helper_funcs = {
112 .get_modes = exynos_dpi_get_modes,
113 .mode_valid = exynos_dpi_mode_valid,
114 .best_encoder = exynos_dpi_best_encoder,
115};
116
117static int exynos_dpi_create_connector(struct exynos_drm_display *display,
118 struct drm_encoder *encoder)
119{
120 struct exynos_dpi *ctx = display->ctx;
121 struct drm_connector *connector = &ctx->connector;
122 int ret;
123
124 ctx->encoder = encoder;
125
126 if (ctx->panel_node)
127 connector->polled = DRM_CONNECTOR_POLL_CONNECT;
128 else
129 connector->polled = DRM_CONNECTOR_POLL_HPD;
130
131 ret = drm_connector_init(encoder->dev, connector,
132 &exynos_dpi_connector_funcs,
133 DRM_MODE_CONNECTOR_VGA);
134 if (ret) {
135 DRM_ERROR("failed to initialize connector with drm\n");
136 return ret;
137 }
138
139 drm_connector_helper_add(connector, &exynos_dpi_connector_helper_funcs);
140 drm_sysfs_connector_add(connector);
141 drm_mode_connector_attach_encoder(connector, encoder);
142
143 return 0;
144}
145
146static void exynos_dpi_poweron(struct exynos_dpi *ctx)
147{
148 if (ctx->panel)
149 drm_panel_enable(ctx->panel);
150}
151
152static void exynos_dpi_poweroff(struct exynos_dpi *ctx)
153{
154 if (ctx->panel)
155 drm_panel_disable(ctx->panel);
156}
157
158static void exynos_dpi_dpms(struct exynos_drm_display *display, int mode)
159{
160 struct exynos_dpi *ctx = display->ctx;
161
162 switch (mode) {
163 case DRM_MODE_DPMS_ON:
164 if (ctx->dpms_mode != DRM_MODE_DPMS_ON)
165 exynos_dpi_poweron(ctx);
166 break;
167 case DRM_MODE_DPMS_STANDBY:
168 case DRM_MODE_DPMS_SUSPEND:
169 case DRM_MODE_DPMS_OFF:
170 if (ctx->dpms_mode == DRM_MODE_DPMS_ON)
171 exynos_dpi_poweroff(ctx);
172 break;
173 default:
174 break;
175 };
176 ctx->dpms_mode = mode;
177}
178
179static struct exynos_drm_display_ops exynos_dpi_display_ops = {
180 .create_connector = exynos_dpi_create_connector,
181 .dpms = exynos_dpi_dpms
182};
183
184static struct exynos_drm_display exynos_dpi_display = {
185 .type = EXYNOS_DISPLAY_TYPE_LCD,
186 .ops = &exynos_dpi_display_ops,
187};
188
189/* of_* functions will be removed after merge of of_graph patches */
190static struct device_node *
191of_get_child_by_name_reg(struct device_node *parent, const char *name, u32 reg)
192{
193 struct device_node *np;
194
195 for_each_child_of_node(parent, np) {
196 u32 r;
197
198 if (!np->name || of_node_cmp(np->name, name))
199 continue;
200
201 if (of_property_read_u32(np, "reg", &r) < 0)
202 r = 0;
203
204 if (reg == r)
205 break;
206 }
207
208 return np;
209}
210
211static struct device_node *of_graph_get_port_by_reg(struct device_node *parent,
212 u32 reg)
213{
214 struct device_node *ports, *port;
215
216 ports = of_get_child_by_name(parent, "ports");
217 if (ports)
218 parent = ports;
219
220 port = of_get_child_by_name_reg(parent, "port", reg);
221
222 of_node_put(ports);
223
224 return port;
225}
226
227static struct device_node *
228of_graph_get_endpoint_by_reg(struct device_node *port, u32 reg)
229{
230 return of_get_child_by_name_reg(port, "endpoint", reg);
231}
232
233static struct device_node *
234of_graph_get_remote_port_parent(const struct device_node *node)
235{
236 struct device_node *np;
237 unsigned int depth;
238
239 np = of_parse_phandle(node, "remote-endpoint", 0);
240
241 /* Walk 3 levels up only if there is 'ports' node. */
242 for (depth = 3; depth && np; depth--) {
243 np = of_get_next_parent(np);
244 if (depth == 2 && of_node_cmp(np->name, "ports"))
245 break;
246 }
247 return np;
248}
249
250enum {
251 FIMD_PORT_IN0,
252 FIMD_PORT_IN1,
253 FIMD_PORT_IN2,
254 FIMD_PORT_RGB,
255 FIMD_PORT_WRB,
256};
257
258static struct device_node *exynos_dpi_of_find_panel_node(struct device *dev)
259{
260 struct device_node *np, *ep;
261
262 np = of_graph_get_port_by_reg(dev->of_node, FIMD_PORT_RGB);
263 if (!np)
264 return NULL;
265
266 ep = of_graph_get_endpoint_by_reg(np, 0);
267 of_node_put(np);
268 if (!ep)
269 return NULL;
270
271 np = of_graph_get_remote_port_parent(ep);
272 of_node_put(ep);
273
274 return np;
275}
276
277static int exynos_dpi_parse_dt(struct exynos_dpi *ctx)
278{
279 struct device *dev = ctx->dev;
280 struct device_node *dn = dev->of_node;
281 struct device_node *np;
282
283 ctx->panel_node = exynos_dpi_of_find_panel_node(dev);
284
285 np = of_get_child_by_name(dn, "display-timings");
286 if (np) {
287 struct videomode *vm;
288 int ret;
289
290 of_node_put(np);
291
292 vm = devm_kzalloc(dev, sizeof(*ctx->vm), GFP_KERNEL);
293 if (!vm)
294 return -ENOMEM;
295
296 ret = of_get_videomode(dn, vm, 0);
297 if (ret < 0)
298 return ret;
299
300 ctx->vm = vm;
301
302 return 0;
303 }
304
305 if (!ctx->panel_node)
306 return -EINVAL;
307
308 return 0;
309}
310
311int exynos_dpi_probe(struct device *dev)
312{
313 struct exynos_dpi *ctx;
314 int ret;
315
316 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
317 if (!ctx)
318 return -ENOMEM;
319
320 ctx->dev = dev;
321 exynos_dpi_display.ctx = ctx;
322 ctx->dpms_mode = DRM_MODE_DPMS_OFF;
323
324 ret = exynos_dpi_parse_dt(ctx);
325 if (ret < 0)
326 return ret;
327
328 exynos_drm_display_register(&exynos_dpi_display);
329
330 return 0;
331}
332
333int exynos_dpi_remove(struct device *dev)
334{
335 exynos_dpi_dpms(&exynos_dpi_display, DRM_MODE_DPMS_OFF);
336 exynos_drm_display_unregister(&exynos_dpi_display);
337
338 return 0;
339}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index c204b4e3356e..2d27ba23a6a8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -11,6 +11,7 @@
11 * option) any later version. 11 * option) any later version.
12 */ 12 */
13 13
14#include <linux/pm_runtime.h>
14#include <drm/drmP.h> 15#include <drm/drmP.h>
15#include <drm/drm_crtc_helper.h> 16#include <drm/drm_crtc_helper.h>
16 17
@@ -53,6 +54,7 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
53 return -ENOMEM; 54 return -ENOMEM;
54 55
55 INIT_LIST_HEAD(&private->pageflip_event_list); 56 INIT_LIST_HEAD(&private->pageflip_event_list);
57 dev_set_drvdata(dev->dev, dev);
56 dev->dev_private = (void *)private; 58 dev->dev_private = (void *)private;
57 59
58 /* 60 /*
@@ -64,38 +66,36 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
64 ret = drm_create_iommu_mapping(dev); 66 ret = drm_create_iommu_mapping(dev);
65 if (ret < 0) { 67 if (ret < 0) {
66 DRM_ERROR("failed to create iommu mapping.\n"); 68 DRM_ERROR("failed to create iommu mapping.\n");
67 goto err_crtc; 69 goto err_free_private;
68 } 70 }
69 71
70 drm_mode_config_init(dev); 72 drm_mode_config_init(dev);
71 73
72 /* init kms poll for handling hpd */
73 drm_kms_helper_poll_init(dev);
74
75 exynos_drm_mode_config_init(dev); 74 exynos_drm_mode_config_init(dev);
76 75
77 /* 76 ret = exynos_drm_initialize_managers(dev);
78 * EXYNOS4 is enough to have two CRTCs and each crtc would be used 77 if (ret)
79 * without dependency of hardware. 78 goto err_mode_config_cleanup;
80 */
81 for (nr = 0; nr < MAX_CRTC; nr++) {
82 ret = exynos_drm_crtc_create(dev, nr);
83 if (ret)
84 goto err_release_iommu_mapping;
85 }
86 79
87 for (nr = 0; nr < MAX_PLANE; nr++) { 80 for (nr = 0; nr < MAX_PLANE; nr++) {
88 struct drm_plane *plane; 81 struct drm_plane *plane;
89 unsigned int possible_crtcs = (1 << MAX_CRTC) - 1; 82 unsigned long possible_crtcs = (1 << MAX_CRTC) - 1;
90 83
91 plane = exynos_plane_init(dev, possible_crtcs, false); 84 plane = exynos_plane_init(dev, possible_crtcs, false);
92 if (!plane) 85 if (!plane)
93 goto err_release_iommu_mapping; 86 goto err_manager_cleanup;
94 } 87 }
95 88
89 ret = exynos_drm_initialize_displays(dev);
90 if (ret)
91 goto err_manager_cleanup;
92
93 /* init kms poll for handling hpd */
94 drm_kms_helper_poll_init(dev);
95
96 ret = drm_vblank_init(dev, MAX_CRTC); 96 ret = drm_vblank_init(dev, MAX_CRTC);
97 if (ret) 97 if (ret)
98 goto err_release_iommu_mapping; 98 goto err_display_cleanup;
99 99
100 /* 100 /*
101 * probe sub drivers such as display controller and hdmi driver, 101 * probe sub drivers such as display controller and hdmi driver,
@@ -109,30 +109,25 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
109 /* setup possible_clones. */ 109 /* setup possible_clones. */
110 exynos_drm_encoder_setup(dev); 110 exynos_drm_encoder_setup(dev);
111 111
112 /*
113 * create and configure fb helper and also exynos specific
114 * fbdev object.
115 */
116 ret = exynos_drm_fbdev_init(dev);
117 if (ret) {
118 DRM_ERROR("failed to initialize drm fbdev\n");
119 goto err_drm_device;
120 }
121
122 drm_vblank_offdelay = VBLANK_OFF_DELAY; 112 drm_vblank_offdelay = VBLANK_OFF_DELAY;
123 113
124 platform_set_drvdata(dev->platformdev, dev); 114 platform_set_drvdata(dev->platformdev, dev);
125 115
116 /* force connectors detection */
117 drm_helper_hpd_irq_event(dev);
118
126 return 0; 119 return 0;
127 120
128err_drm_device:
129 exynos_drm_device_unregister(dev);
130err_vblank: 121err_vblank:
131 drm_vblank_cleanup(dev); 122 drm_vblank_cleanup(dev);
132err_release_iommu_mapping: 123err_display_cleanup:
133 drm_release_iommu_mapping(dev); 124 exynos_drm_remove_displays(dev);
134err_crtc: 125err_manager_cleanup:
126 exynos_drm_remove_managers(dev);
127err_mode_config_cleanup:
135 drm_mode_config_cleanup(dev); 128 drm_mode_config_cleanup(dev);
129 drm_release_iommu_mapping(dev);
130err_free_private:
136 kfree(private); 131 kfree(private);
137 132
138 return ret; 133 return ret;
@@ -144,6 +139,8 @@ static int exynos_drm_unload(struct drm_device *dev)
144 exynos_drm_device_unregister(dev); 139 exynos_drm_device_unregister(dev);
145 drm_vblank_cleanup(dev); 140 drm_vblank_cleanup(dev);
146 drm_kms_helper_poll_fini(dev); 141 drm_kms_helper_poll_fini(dev);
142 exynos_drm_remove_displays(dev);
143 exynos_drm_remove_managers(dev);
147 drm_mode_config_cleanup(dev); 144 drm_mode_config_cleanup(dev);
148 145
149 drm_release_iommu_mapping(dev); 146 drm_release_iommu_mapping(dev);
@@ -158,6 +155,41 @@ static const struct file_operations exynos_drm_gem_fops = {
158 .mmap = exynos_drm_gem_mmap_buffer, 155 .mmap = exynos_drm_gem_mmap_buffer,
159}; 156};
160 157
158static int exynos_drm_suspend(struct drm_device *dev, pm_message_t state)
159{
160 struct drm_connector *connector;
161
162 drm_modeset_lock_all(dev);
163 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
164 int old_dpms = connector->dpms;
165
166 if (connector->funcs->dpms)
167 connector->funcs->dpms(connector, DRM_MODE_DPMS_OFF);
168
169 /* Set the old mode back to the connector for resume */
170 connector->dpms = old_dpms;
171 }
172 drm_modeset_unlock_all(dev);
173
174 return 0;
175}
176
177static int exynos_drm_resume(struct drm_device *dev)
178{
179 struct drm_connector *connector;
180
181 drm_modeset_lock_all(dev);
182 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
183 if (connector->funcs->dpms)
184 connector->funcs->dpms(connector, connector->dpms);
185 }
186
187 drm_helper_resume_force_mode(dev);
188 drm_modeset_unlock_all(dev);
189
190 return 0;
191}
192
161static int exynos_drm_open(struct drm_device *dev, struct drm_file *file) 193static int exynos_drm_open(struct drm_device *dev, struct drm_file *file)
162{ 194{
163 struct drm_exynos_file_private *file_priv; 195 struct drm_exynos_file_private *file_priv;
@@ -295,6 +327,8 @@ static struct drm_driver exynos_drm_driver = {
295 DRIVER_GEM | DRIVER_PRIME, 327 DRIVER_GEM | DRIVER_PRIME,
296 .load = exynos_drm_load, 328 .load = exynos_drm_load,
297 .unload = exynos_drm_unload, 329 .unload = exynos_drm_unload,
330 .suspend = exynos_drm_suspend,
331 .resume = exynos_drm_resume,
298 .open = exynos_drm_open, 332 .open = exynos_drm_open,
299 .preclose = exynos_drm_preclose, 333 .preclose = exynos_drm_preclose,
300 .lastclose = exynos_drm_lastclose, 334 .lastclose = exynos_drm_lastclose,
@@ -329,6 +363,9 @@ static int exynos_drm_platform_probe(struct platform_device *pdev)
329 if (ret) 363 if (ret)
330 return ret; 364 return ret;
331 365
366 pm_runtime_enable(&pdev->dev);
367 pm_runtime_get_sync(&pdev->dev);
368
332 return drm_platform_init(&exynos_drm_driver, pdev); 369 return drm_platform_init(&exynos_drm_driver, pdev);
333} 370}
334 371
@@ -339,12 +376,67 @@ static int exynos_drm_platform_remove(struct platform_device *pdev)
339 return 0; 376 return 0;
340} 377}
341 378
379#ifdef CONFIG_PM_SLEEP
380static int exynos_drm_sys_suspend(struct device *dev)
381{
382 struct drm_device *drm_dev = dev_get_drvdata(dev);
383 pm_message_t message;
384
385 if (pm_runtime_suspended(dev))
386 return 0;
387
388 message.event = PM_EVENT_SUSPEND;
389 return exynos_drm_suspend(drm_dev, message);
390}
391
392static int exynos_drm_sys_resume(struct device *dev)
393{
394 struct drm_device *drm_dev = dev_get_drvdata(dev);
395
396 if (pm_runtime_suspended(dev))
397 return 0;
398
399 return exynos_drm_resume(drm_dev);
400}
401#endif
402
403#ifdef CONFIG_PM_RUNTIME
404static int exynos_drm_runtime_suspend(struct device *dev)
405{
406 struct drm_device *drm_dev = dev_get_drvdata(dev);
407 pm_message_t message;
408
409 if (pm_runtime_suspended(dev))
410 return 0;
411
412 message.event = PM_EVENT_SUSPEND;
413 return exynos_drm_suspend(drm_dev, message);
414}
415
416static int exynos_drm_runtime_resume(struct device *dev)
417{
418 struct drm_device *drm_dev = dev_get_drvdata(dev);
419
420 if (!pm_runtime_suspended(dev))
421 return 0;
422
423 return exynos_drm_resume(drm_dev);
424}
425#endif
426
427static const struct dev_pm_ops exynos_drm_pm_ops = {
428 SET_SYSTEM_SLEEP_PM_OPS(exynos_drm_sys_suspend, exynos_drm_sys_resume)
429 SET_RUNTIME_PM_OPS(exynos_drm_runtime_suspend,
430 exynos_drm_runtime_resume, NULL)
431};
432
342static struct platform_driver exynos_drm_platform_driver = { 433static struct platform_driver exynos_drm_platform_driver = {
343 .probe = exynos_drm_platform_probe, 434 .probe = exynos_drm_platform_probe,
344 .remove = exynos_drm_platform_remove, 435 .remove = exynos_drm_platform_remove,
345 .driver = { 436 .driver = {
346 .owner = THIS_MODULE, 437 .owner = THIS_MODULE,
347 .name = "exynos-drm", 438 .name = "exynos-drm",
439 .pm = &exynos_drm_pm_ops,
348 }, 440 },
349}; 441};
350 442
@@ -352,6 +444,18 @@ static int __init exynos_drm_init(void)
352{ 444{
353 int ret; 445 int ret;
354 446
447#ifdef CONFIG_DRM_EXYNOS_DP
448 ret = platform_driver_register(&dp_driver);
449 if (ret < 0)
450 goto out_dp;
451#endif
452
453#ifdef CONFIG_DRM_EXYNOS_DSI
454 ret = platform_driver_register(&dsi_driver);
455 if (ret < 0)
456 goto out_dsi;
457#endif
458
355#ifdef CONFIG_DRM_EXYNOS_FIMD 459#ifdef CONFIG_DRM_EXYNOS_FIMD
356 ret = platform_driver_register(&fimd_driver); 460 ret = platform_driver_register(&fimd_driver);
357 if (ret < 0) 461 if (ret < 0)
@@ -365,13 +469,6 @@ static int __init exynos_drm_init(void)
365 ret = platform_driver_register(&mixer_driver); 469 ret = platform_driver_register(&mixer_driver);
366 if (ret < 0) 470 if (ret < 0)
367 goto out_mixer; 471 goto out_mixer;
368 ret = platform_driver_register(&exynos_drm_common_hdmi_driver);
369 if (ret < 0)
370 goto out_common_hdmi;
371
372 ret = exynos_platform_device_hdmi_register();
373 if (ret < 0)
374 goto out_common_hdmi_dev;
375#endif 472#endif
376 473
377#ifdef CONFIG_DRM_EXYNOS_VIDI 474#ifdef CONFIG_DRM_EXYNOS_VIDI
@@ -464,10 +561,6 @@ out_vidi:
464#endif 561#endif
465 562
466#ifdef CONFIG_DRM_EXYNOS_HDMI 563#ifdef CONFIG_DRM_EXYNOS_HDMI
467 exynos_platform_device_hdmi_unregister();
468out_common_hdmi_dev:
469 platform_driver_unregister(&exynos_drm_common_hdmi_driver);
470out_common_hdmi:
471 platform_driver_unregister(&mixer_driver); 564 platform_driver_unregister(&mixer_driver);
472out_mixer: 565out_mixer:
473 platform_driver_unregister(&hdmi_driver); 566 platform_driver_unregister(&hdmi_driver);
@@ -478,6 +571,16 @@ out_hdmi:
478 platform_driver_unregister(&fimd_driver); 571 platform_driver_unregister(&fimd_driver);
479out_fimd: 572out_fimd:
480#endif 573#endif
574
575#ifdef CONFIG_DRM_EXYNOS_DSI
576 platform_driver_unregister(&dsi_driver);
577out_dsi:
578#endif
579
580#ifdef CONFIG_DRM_EXYNOS_DP
581 platform_driver_unregister(&dp_driver);
582out_dp:
583#endif
481 return ret; 584 return ret;
482} 585}
483 586
@@ -509,8 +612,6 @@ static void __exit exynos_drm_exit(void)
509#endif 612#endif
510 613
511#ifdef CONFIG_DRM_EXYNOS_HDMI 614#ifdef CONFIG_DRM_EXYNOS_HDMI
512 exynos_platform_device_hdmi_unregister();
513 platform_driver_unregister(&exynos_drm_common_hdmi_driver);
514 platform_driver_unregister(&mixer_driver); 615 platform_driver_unregister(&mixer_driver);
515 platform_driver_unregister(&hdmi_driver); 616 platform_driver_unregister(&hdmi_driver);
516#endif 617#endif
@@ -522,6 +623,14 @@ static void __exit exynos_drm_exit(void)
522#ifdef CONFIG_DRM_EXYNOS_FIMD 623#ifdef CONFIG_DRM_EXYNOS_FIMD
523 platform_driver_unregister(&fimd_driver); 624 platform_driver_unregister(&fimd_driver);
524#endif 625#endif
626
627#ifdef CONFIG_DRM_EXYNOS_DSI
628 platform_driver_unregister(&dsi_driver);
629#endif
630
631#ifdef CONFIG_DRM_EXYNOS_DP
632 platform_driver_unregister(&dp_driver);
633#endif
525} 634}
526 635
527module_init(exynos_drm_init); 636module_init(exynos_drm_init);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index a8f9dba2a816..ce3e6a30deaa 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -54,22 +54,6 @@ enum exynos_drm_output_type {
54}; 54};
55 55
56/* 56/*
57 * Exynos drm overlay ops structure.
58 *
59 * @mode_set: copy drm overlay info to hw specific overlay info.
60 * @commit: apply hardware specific overlay data to registers.
61 * @enable: enable hardware specific overlay.
62 * @disable: disable hardware specific overlay.
63 */
64struct exynos_drm_overlay_ops {
65 void (*mode_set)(struct device *subdrv_dev,
66 struct exynos_drm_overlay *overlay);
67 void (*commit)(struct device *subdrv_dev, int zpos);
68 void (*enable)(struct device *subdrv_dev, int zpos);
69 void (*disable)(struct device *subdrv_dev, int zpos);
70};
71
72/*
73 * Exynos drm common overlay structure. 57 * Exynos drm common overlay structure.
74 * 58 *
75 * @fb_x: offset x on a framebuffer to be displayed. 59 * @fb_x: offset x on a framebuffer to be displayed.
@@ -138,77 +122,110 @@ struct exynos_drm_overlay {
138 * Exynos DRM Display Structure. 122 * Exynos DRM Display Structure.
139 * - this structure is common to analog tv, digital tv and lcd panel. 123 * - this structure is common to analog tv, digital tv and lcd panel.
140 * 124 *
141 * @type: one of EXYNOS_DISPLAY_TYPE_LCD and HDMI. 125 * @initialize: initializes the display with drm_dev
142 * @is_connected: check for that display is connected or not. 126 * @remove: cleans up the display for removal
143 * @get_edid: get edid modes from display driver. 127 * @mode_fixup: fix mode data comparing to hw specific display mode.
144 * @get_panel: get panel object from display driver. 128 * @mode_set: convert drm_display_mode to hw specific display mode and
129 * would be called by encoder->mode_set().
145 * @check_mode: check if mode is valid or not. 130 * @check_mode: check if mode is valid or not.
146 * @power_on: display device on or off. 131 * @dpms: display device on or off.
132 * @commit: apply changes to hw
147 */ 133 */
134struct exynos_drm_display;
148struct exynos_drm_display_ops { 135struct exynos_drm_display_ops {
136 int (*initialize)(struct exynos_drm_display *display,
137 struct drm_device *drm_dev);
138 int (*create_connector)(struct exynos_drm_display *display,
139 struct drm_encoder *encoder);
140 void (*remove)(struct exynos_drm_display *display);
141 void (*mode_fixup)(struct exynos_drm_display *display,
142 struct drm_connector *connector,
143 const struct drm_display_mode *mode,
144 struct drm_display_mode *adjusted_mode);
145 void (*mode_set)(struct exynos_drm_display *display,
146 struct drm_display_mode *mode);
147 int (*check_mode)(struct exynos_drm_display *display,
148 struct drm_display_mode *mode);
149 void (*dpms)(struct exynos_drm_display *display, int mode);
150 void (*commit)(struct exynos_drm_display *display);
151};
152
153/*
154 * Exynos drm display structure, maps 1:1 with an encoder/connector
155 *
156 * @list: the list entry for this manager
157 * @type: one of EXYNOS_DISPLAY_TYPE_LCD and HDMI.
158 * @encoder: encoder object this display maps to
159 * @connector: connector object this display maps to
160 * @ops: pointer to callbacks for exynos drm specific functionality
161 * @ctx: A pointer to the display's implementation specific context
162 */
163struct exynos_drm_display {
164 struct list_head list;
149 enum exynos_drm_output_type type; 165 enum exynos_drm_output_type type;
150 bool (*is_connected)(struct device *dev); 166 struct drm_encoder *encoder;
151 struct edid *(*get_edid)(struct device *dev, 167 struct drm_connector *connector;
152 struct drm_connector *connector); 168 struct exynos_drm_display_ops *ops;
153 void *(*get_panel)(struct device *dev); 169 void *ctx;
154 int (*check_mode)(struct device *dev, struct drm_display_mode *mode);
155 int (*power_on)(struct device *dev, int mode);
156}; 170};
157 171
158/* 172/*
159 * Exynos drm manager ops 173 * Exynos drm manager ops
160 * 174 *
175 * @initialize: initializes the manager with drm_dev
176 * @remove: cleans up the manager for removal
161 * @dpms: control device power. 177 * @dpms: control device power.
162 * @apply: set timing, vblank and overlay data to registers. 178 * @mode_fixup: fix mode data before applying it
163 * @mode_fixup: fix mode data comparing to hw specific display mode. 179 * @mode_set: set the given mode to the manager
164 * @mode_set: convert drm_display_mode to hw specific display mode and
165 * would be called by encoder->mode_set().
166 * @get_max_resol: get maximum resolution to specific hardware.
167 * @commit: set current hw specific display mode to hw. 180 * @commit: set current hw specific display mode to hw.
168 * @enable_vblank: specific driver callback for enabling vblank interrupt. 181 * @enable_vblank: specific driver callback for enabling vblank interrupt.
169 * @disable_vblank: specific driver callback for disabling vblank interrupt. 182 * @disable_vblank: specific driver callback for disabling vblank interrupt.
170 * @wait_for_vblank: wait for vblank interrupt to make sure that 183 * @wait_for_vblank: wait for vblank interrupt to make sure that
171 * hardware overlay is updated. 184 * hardware overlay is updated.
185 * @win_mode_set: copy drm overlay info to hw specific overlay info.
186 * @win_commit: apply hardware specific overlay data to registers.
187 * @win_enable: enable hardware specific overlay.
188 * @win_disable: disable hardware specific overlay.
172 */ 189 */
190struct exynos_drm_manager;
173struct exynos_drm_manager_ops { 191struct exynos_drm_manager_ops {
174 void (*dpms)(struct device *subdrv_dev, int mode); 192 int (*initialize)(struct exynos_drm_manager *mgr,
175 void (*apply)(struct device *subdrv_dev); 193 struct drm_device *drm_dev, int pipe);
176 void (*mode_fixup)(struct device *subdrv_dev, 194 void (*remove)(struct exynos_drm_manager *mgr);
177 struct drm_connector *connector, 195 void (*dpms)(struct exynos_drm_manager *mgr, int mode);
196 bool (*mode_fixup)(struct exynos_drm_manager *mgr,
178 const struct drm_display_mode *mode, 197 const struct drm_display_mode *mode,
179 struct drm_display_mode *adjusted_mode); 198 struct drm_display_mode *adjusted_mode);
180 void (*mode_set)(struct device *subdrv_dev, void *mode); 199 void (*mode_set)(struct exynos_drm_manager *mgr,
181 void (*get_max_resol)(struct device *subdrv_dev, unsigned int *width, 200 const struct drm_display_mode *mode);
182 unsigned int *height); 201 void (*commit)(struct exynos_drm_manager *mgr);
183 void (*commit)(struct device *subdrv_dev); 202 int (*enable_vblank)(struct exynos_drm_manager *mgr);
184 int (*enable_vblank)(struct device *subdrv_dev); 203 void (*disable_vblank)(struct exynos_drm_manager *mgr);
185 void (*disable_vblank)(struct device *subdrv_dev); 204 void (*wait_for_vblank)(struct exynos_drm_manager *mgr);
186 void (*wait_for_vblank)(struct device *subdrv_dev); 205 void (*win_mode_set)(struct exynos_drm_manager *mgr,
206 struct exynos_drm_overlay *overlay);
207 void (*win_commit)(struct exynos_drm_manager *mgr, int zpos);
208 void (*win_enable)(struct exynos_drm_manager *mgr, int zpos);
209 void (*win_disable)(struct exynos_drm_manager *mgr, int zpos);
187}; 210};
188 211
189/* 212/*
190 * Exynos drm common manager structure. 213 * Exynos drm common manager structure, maps 1:1 with a crtc
191 * 214 *
192 * @dev: pointer to device object for subdrv device driver. 215 * @list: the list entry for this manager
193 * sub drivers such as display controller or hdmi driver, 216 * @type: one of EXYNOS_DISPLAY_TYPE_LCD and HDMI.
194 * have their own device object. 217 * @drm_dev: pointer to the drm device
195 * @ops: pointer to callbacks for exynos drm specific framebuffer. 218 * @pipe: the pipe number for this crtc/manager
196 * these callbacks should be set by specific drivers such fimd 219 * @ops: pointer to callbacks for exynos drm specific functionality
197 * or hdmi driver and are used to control hardware global registers. 220 * @ctx: A pointer to the manager's implementation specific context
198 * @overlay_ops: pointer to callbacks for exynos drm specific framebuffer.
199 * these callbacks should be set by specific drivers such fimd
200 * or hdmi driver and are used to control hardware overlay reigsters.
201 * @display: pointer to callbacks for exynos drm specific framebuffer.
202 * these callbacks should be set by specific drivers such fimd
203 * or hdmi driver and are used to control display devices such as
204 * analog tv, digital tv and lcd panel and also get timing data for them.
205 */ 221 */
206struct exynos_drm_manager { 222struct exynos_drm_manager {
207 struct device *dev; 223 struct list_head list;
224 enum exynos_drm_output_type type;
225 struct drm_device *drm_dev;
208 int pipe; 226 int pipe;
209 struct exynos_drm_manager_ops *ops; 227 struct exynos_drm_manager_ops *ops;
210 struct exynos_drm_overlay_ops *overlay_ops; 228 void *ctx;
211 struct exynos_drm_display_ops *display_ops;
212}; 229};
213 230
214struct exynos_drm_g2d_private { 231struct exynos_drm_g2d_private {
@@ -271,14 +288,11 @@ struct exynos_drm_private {
271 * by probe callback. 288 * by probe callback.
272 * @open: this would be called with drm device file open. 289 * @open: this would be called with drm device file open.
273 * @close: this would be called with drm device file close. 290 * @close: this would be called with drm device file close.
274 * @encoder: encoder object owned by this sub driver.
275 * @connector: connector object owned by this sub driver.
276 */ 291 */
277struct exynos_drm_subdrv { 292struct exynos_drm_subdrv {
278 struct list_head list; 293 struct list_head list;
279 struct device *dev; 294 struct device *dev;
280 struct drm_device *drm_dev; 295 struct drm_device *drm_dev;
281 struct exynos_drm_manager *manager;
282 296
283 int (*probe)(struct drm_device *drm_dev, struct device *dev); 297 int (*probe)(struct drm_device *drm_dev, struct device *dev);
284 void (*remove)(struct drm_device *drm_dev, struct device *dev); 298 void (*remove)(struct drm_device *drm_dev, struct device *dev);
@@ -286,9 +300,6 @@ struct exynos_drm_subdrv {
286 struct drm_file *file); 300 struct drm_file *file);
287 void (*close)(struct drm_device *drm_dev, struct device *dev, 301 void (*close)(struct drm_device *drm_dev, struct device *dev,
288 struct drm_file *file); 302 struct drm_file *file);
289
290 struct drm_encoder *encoder;
291 struct drm_connector *connector;
292}; 303};
293 304
294/* 305/*
@@ -303,6 +314,16 @@ int exynos_drm_device_register(struct drm_device *dev);
303 */ 314 */
304int exynos_drm_device_unregister(struct drm_device *dev); 315int exynos_drm_device_unregister(struct drm_device *dev);
305 316
317int exynos_drm_initialize_managers(struct drm_device *dev);
318void exynos_drm_remove_managers(struct drm_device *dev);
319int exynos_drm_initialize_displays(struct drm_device *dev);
320void exynos_drm_remove_displays(struct drm_device *dev);
321
322int exynos_drm_manager_register(struct exynos_drm_manager *manager);
323int exynos_drm_manager_unregister(struct exynos_drm_manager *manager);
324int exynos_drm_display_register(struct exynos_drm_display *display);
325int exynos_drm_display_unregister(struct exynos_drm_display *display);
326
306/* 327/*
307 * this function would be called by sub drivers such as display controller 328 * this function would be called by sub drivers such as display controller
308 * or hdmi driver to register this sub driver object to exynos drm driver 329 * or hdmi driver to register this sub driver object to exynos drm driver
@@ -338,6 +359,16 @@ int exynos_platform_device_ipp_register(void);
338 */ 359 */
339void exynos_platform_device_ipp_unregister(void); 360void exynos_platform_device_ipp_unregister(void);
340 361
362#ifdef CONFIG_DRM_EXYNOS_DPI
363int exynos_dpi_probe(struct device *dev);
364int exynos_dpi_remove(struct device *dev);
365#else
366static inline int exynos_dpi_probe(struct device *dev) { return 0; }
367static inline int exynos_dpi_remove(struct device *dev) { return 0; }
368#endif
369
370extern struct platform_driver dp_driver;
371extern struct platform_driver dsi_driver;
341extern struct platform_driver fimd_driver; 372extern struct platform_driver fimd_driver;
342extern struct platform_driver hdmi_driver; 373extern struct platform_driver hdmi_driver;
343extern struct platform_driver mixer_driver; 374extern struct platform_driver mixer_driver;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
new file mode 100644
index 000000000000..eb73e3bf2a0c
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -0,0 +1,1524 @@
1/*
2 * Samsung SoC MIPI DSI Master driver.
3 *
4 * Copyright (c) 2014 Samsung Electronics Co., Ltd
5 *
6 * Contacts: Tomasz Figa <t.figa@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11*/
12
13#include <drm/drmP.h>
14#include <drm/drm_crtc_helper.h>
15#include <drm/drm_mipi_dsi.h>
16#include <drm/drm_panel.h>
17
18#include <linux/clk.h>
19#include <linux/irq.h>
20#include <linux/phy/phy.h>
21#include <linux/regulator/consumer.h>
22
23#include <video/mipi_display.h>
24#include <video/videomode.h>
25
26#include "exynos_drm_drv.h"
27
28/* returns true iff both arguments logically differs */
29#define NEQV(a, b) (!(a) ^ !(b))
30
31#define DSIM_STATUS_REG 0x0 /* Status register */
32#define DSIM_SWRST_REG 0x4 /* Software reset register */
33#define DSIM_CLKCTRL_REG 0x8 /* Clock control register */
34#define DSIM_TIMEOUT_REG 0xc /* Time out register */
35#define DSIM_CONFIG_REG 0x10 /* Configuration register */
36#define DSIM_ESCMODE_REG 0x14 /* Escape mode register */
37
38/* Main display image resolution register */
39#define DSIM_MDRESOL_REG 0x18
40#define DSIM_MVPORCH_REG 0x1c /* Main display Vporch register */
41#define DSIM_MHPORCH_REG 0x20 /* Main display Hporch register */
42#define DSIM_MSYNC_REG 0x24 /* Main display sync area register */
43
44/* Sub display image resolution register */
45#define DSIM_SDRESOL_REG 0x28
46#define DSIM_INTSRC_REG 0x2c /* Interrupt source register */
47#define DSIM_INTMSK_REG 0x30 /* Interrupt mask register */
48#define DSIM_PKTHDR_REG 0x34 /* Packet Header FIFO register */
49#define DSIM_PAYLOAD_REG 0x38 /* Payload FIFO register */
50#define DSIM_RXFIFO_REG 0x3c /* Read FIFO register */
51#define DSIM_FIFOTHLD_REG 0x40 /* FIFO threshold level register */
52#define DSIM_FIFOCTRL_REG 0x44 /* FIFO status and control register */
53
54/* FIFO memory AC characteristic register */
55#define DSIM_PLLCTRL_REG 0x4c /* PLL control register */
56#define DSIM_PLLTMR_REG 0x50 /* PLL timer register */
57#define DSIM_PHYACCHR_REG 0x54 /* D-PHY AC characteristic register */
58#define DSIM_PHYACCHR1_REG 0x58 /* D-PHY AC characteristic register1 */
59
60/* DSIM_STATUS */
61#define DSIM_STOP_STATE_DAT(x) (((x) & 0xf) << 0)
62#define DSIM_STOP_STATE_CLK (1 << 8)
63#define DSIM_TX_READY_HS_CLK (1 << 10)
64#define DSIM_PLL_STABLE (1 << 31)
65
66/* DSIM_SWRST */
67#define DSIM_FUNCRST (1 << 16)
68#define DSIM_SWRST (1 << 0)
69
70/* DSIM_TIMEOUT */
71#define DSIM_LPDR_TIMEOUT(x) ((x) << 0)
72#define DSIM_BTA_TIMEOUT(x) ((x) << 16)
73
74/* DSIM_CLKCTRL */
75#define DSIM_ESC_PRESCALER(x) (((x) & 0xffff) << 0)
76#define DSIM_ESC_PRESCALER_MASK (0xffff << 0)
77#define DSIM_LANE_ESC_CLK_EN_CLK (1 << 19)
78#define DSIM_LANE_ESC_CLK_EN_DATA(x) (((x) & 0xf) << 20)
79#define DSIM_LANE_ESC_CLK_EN_DATA_MASK (0xf << 20)
80#define DSIM_BYTE_CLKEN (1 << 24)
81#define DSIM_BYTE_CLK_SRC(x) (((x) & 0x3) << 25)
82#define DSIM_BYTE_CLK_SRC_MASK (0x3 << 25)
83#define DSIM_PLL_BYPASS (1 << 27)
84#define DSIM_ESC_CLKEN (1 << 28)
85#define DSIM_TX_REQUEST_HSCLK (1 << 31)
86
87/* DSIM_CONFIG */
88#define DSIM_LANE_EN_CLK (1 << 0)
89#define DSIM_LANE_EN(x) (((x) & 0xf) << 1)
90#define DSIM_NUM_OF_DATA_LANE(x) (((x) & 0x3) << 5)
91#define DSIM_SUB_PIX_FORMAT(x) (((x) & 0x7) << 8)
92#define DSIM_MAIN_PIX_FORMAT_MASK (0x7 << 12)
93#define DSIM_MAIN_PIX_FORMAT_RGB888 (0x7 << 12)
94#define DSIM_MAIN_PIX_FORMAT_RGB666 (0x6 << 12)
95#define DSIM_MAIN_PIX_FORMAT_RGB666_P (0x5 << 12)
96#define DSIM_MAIN_PIX_FORMAT_RGB565 (0x4 << 12)
97#define DSIM_SUB_VC (((x) & 0x3) << 16)
98#define DSIM_MAIN_VC (((x) & 0x3) << 18)
99#define DSIM_HSA_MODE (1 << 20)
100#define DSIM_HBP_MODE (1 << 21)
101#define DSIM_HFP_MODE (1 << 22)
102#define DSIM_HSE_MODE (1 << 23)
103#define DSIM_AUTO_MODE (1 << 24)
104#define DSIM_VIDEO_MODE (1 << 25)
105#define DSIM_BURST_MODE (1 << 26)
106#define DSIM_SYNC_INFORM (1 << 27)
107#define DSIM_EOT_DISABLE (1 << 28)
108#define DSIM_MFLUSH_VS (1 << 29)
109
110/* DSIM_ESCMODE */
111#define DSIM_TX_TRIGGER_RST (1 << 4)
112#define DSIM_TX_LPDT_LP (1 << 6)
113#define DSIM_CMD_LPDT_LP (1 << 7)
114#define DSIM_FORCE_BTA (1 << 16)
115#define DSIM_FORCE_STOP_STATE (1 << 20)
116#define DSIM_STOP_STATE_CNT(x) (((x) & 0x7ff) << 21)
117#define DSIM_STOP_STATE_CNT_MASK (0x7ff << 21)
118
119/* DSIM_MDRESOL */
120#define DSIM_MAIN_STAND_BY (1 << 31)
121#define DSIM_MAIN_VRESOL(x) (((x) & 0x7ff) << 16)
122#define DSIM_MAIN_HRESOL(x) (((x) & 0X7ff) << 0)
123
124/* DSIM_MVPORCH */
125#define DSIM_CMD_ALLOW(x) ((x) << 28)
126#define DSIM_STABLE_VFP(x) ((x) << 16)
127#define DSIM_MAIN_VBP(x) ((x) << 0)
128#define DSIM_CMD_ALLOW_MASK (0xf << 28)
129#define DSIM_STABLE_VFP_MASK (0x7ff << 16)
130#define DSIM_MAIN_VBP_MASK (0x7ff << 0)
131
132/* DSIM_MHPORCH */
133#define DSIM_MAIN_HFP(x) ((x) << 16)
134#define DSIM_MAIN_HBP(x) ((x) << 0)
135#define DSIM_MAIN_HFP_MASK ((0xffff) << 16)
136#define DSIM_MAIN_HBP_MASK ((0xffff) << 0)
137
138/* DSIM_MSYNC */
139#define DSIM_MAIN_VSA(x) ((x) << 22)
140#define DSIM_MAIN_HSA(x) ((x) << 0)
141#define DSIM_MAIN_VSA_MASK ((0x3ff) << 22)
142#define DSIM_MAIN_HSA_MASK ((0xffff) << 0)
143
144/* DSIM_SDRESOL */
145#define DSIM_SUB_STANDY(x) ((x) << 31)
146#define DSIM_SUB_VRESOL(x) ((x) << 16)
147#define DSIM_SUB_HRESOL(x) ((x) << 0)
148#define DSIM_SUB_STANDY_MASK ((0x1) << 31)
149#define DSIM_SUB_VRESOL_MASK ((0x7ff) << 16)
150#define DSIM_SUB_HRESOL_MASK ((0x7ff) << 0)
151
152/* DSIM_INTSRC */
153#define DSIM_INT_PLL_STABLE (1 << 31)
154#define DSIM_INT_SW_RST_RELEASE (1 << 30)
155#define DSIM_INT_SFR_FIFO_EMPTY (1 << 29)
156#define DSIM_INT_BTA (1 << 25)
157#define DSIM_INT_FRAME_DONE (1 << 24)
158#define DSIM_INT_RX_TIMEOUT (1 << 21)
159#define DSIM_INT_BTA_TIMEOUT (1 << 20)
160#define DSIM_INT_RX_DONE (1 << 18)
161#define DSIM_INT_RX_TE (1 << 17)
162#define DSIM_INT_RX_ACK (1 << 16)
163#define DSIM_INT_RX_ECC_ERR (1 << 15)
164#define DSIM_INT_RX_CRC_ERR (1 << 14)
165
166/* DSIM_FIFOCTRL */
167#define DSIM_RX_DATA_FULL (1 << 25)
168#define DSIM_RX_DATA_EMPTY (1 << 24)
169#define DSIM_SFR_HEADER_FULL (1 << 23)
170#define DSIM_SFR_HEADER_EMPTY (1 << 22)
171#define DSIM_SFR_PAYLOAD_FULL (1 << 21)
172#define DSIM_SFR_PAYLOAD_EMPTY (1 << 20)
173#define DSIM_I80_HEADER_FULL (1 << 19)
174#define DSIM_I80_HEADER_EMPTY (1 << 18)
175#define DSIM_I80_PAYLOAD_FULL (1 << 17)
176#define DSIM_I80_PAYLOAD_EMPTY (1 << 16)
177#define DSIM_SD_HEADER_FULL (1 << 15)
178#define DSIM_SD_HEADER_EMPTY (1 << 14)
179#define DSIM_SD_PAYLOAD_FULL (1 << 13)
180#define DSIM_SD_PAYLOAD_EMPTY (1 << 12)
181#define DSIM_MD_HEADER_FULL (1 << 11)
182#define DSIM_MD_HEADER_EMPTY (1 << 10)
183#define DSIM_MD_PAYLOAD_FULL (1 << 9)
184#define DSIM_MD_PAYLOAD_EMPTY (1 << 8)
185#define DSIM_RX_FIFO (1 << 4)
186#define DSIM_SFR_FIFO (1 << 3)
187#define DSIM_I80_FIFO (1 << 2)
188#define DSIM_SD_FIFO (1 << 1)
189#define DSIM_MD_FIFO (1 << 0)
190
191/* DSIM_PHYACCHR */
192#define DSIM_AFC_EN (1 << 14)
193#define DSIM_AFC_CTL(x) (((x) & 0x7) << 5)
194
195/* DSIM_PLLCTRL */
196#define DSIM_FREQ_BAND(x) ((x) << 24)
197#define DSIM_PLL_EN (1 << 23)
198#define DSIM_PLL_P(x) ((x) << 13)
199#define DSIM_PLL_M(x) ((x) << 4)
200#define DSIM_PLL_S(x) ((x) << 1)
201
202#define DSI_MAX_BUS_WIDTH 4
203#define DSI_NUM_VIRTUAL_CHANNELS 4
204#define DSI_TX_FIFO_SIZE 2048
205#define DSI_RX_FIFO_SIZE 256
206#define DSI_XFER_TIMEOUT_MS 100
207#define DSI_RX_FIFO_EMPTY 0x30800002
208
209enum exynos_dsi_transfer_type {
210 EXYNOS_DSI_TX,
211 EXYNOS_DSI_RX,
212};
213
214struct exynos_dsi_transfer {
215 struct list_head list;
216 struct completion completed;
217 int result;
218 u8 data_id;
219 u8 data[2];
220 u16 flags;
221
222 const u8 *tx_payload;
223 u16 tx_len;
224 u16 tx_done;
225
226 u8 *rx_payload;
227 u16 rx_len;
228 u16 rx_done;
229};
230
231#define DSIM_STATE_ENABLED BIT(0)
232#define DSIM_STATE_INITIALIZED BIT(1)
233#define DSIM_STATE_CMD_LPM BIT(2)
234
235struct exynos_dsi {
236 struct mipi_dsi_host dsi_host;
237 struct drm_connector connector;
238 struct drm_encoder *encoder;
239 struct device_node *panel_node;
240 struct drm_panel *panel;
241 struct device *dev;
242
243 void __iomem *reg_base;
244 struct phy *phy;
245 struct clk *pll_clk;
246 struct clk *bus_clk;
247 struct regulator_bulk_data supplies[2];
248 int irq;
249
250 u32 pll_clk_rate;
251 u32 burst_clk_rate;
252 u32 esc_clk_rate;
253 u32 lanes;
254 u32 mode_flags;
255 u32 format;
256 struct videomode vm;
257
258 int state;
259 struct drm_property *brightness;
260 struct completion completed;
261
262 spinlock_t transfer_lock; /* protects transfer_list */
263 struct list_head transfer_list;
264};
265
266#define host_to_dsi(host) container_of(host, struct exynos_dsi, dsi_host)
267#define connector_to_dsi(c) container_of(c, struct exynos_dsi, connector)
268
269static void exynos_dsi_wait_for_reset(struct exynos_dsi *dsi)
270{
271 if (wait_for_completion_timeout(&dsi->completed, msecs_to_jiffies(300)))
272 return;
273
274 dev_err(dsi->dev, "timeout waiting for reset\n");
275}
276
277static void exynos_dsi_reset(struct exynos_dsi *dsi)
278{
279 reinit_completion(&dsi->completed);
280 writel(DSIM_SWRST, dsi->reg_base + DSIM_SWRST_REG);
281}
282
283#ifndef MHZ
284#define MHZ (1000*1000)
285#endif
286
287static unsigned long exynos_dsi_pll_find_pms(struct exynos_dsi *dsi,
288 unsigned long fin, unsigned long fout, u8 *p, u16 *m, u8 *s)
289{
290 unsigned long best_freq = 0;
291 u32 min_delta = 0xffffffff;
292 u8 p_min, p_max;
293 u8 _p, uninitialized_var(best_p);
294 u16 _m, uninitialized_var(best_m);
295 u8 _s, uninitialized_var(best_s);
296
297 p_min = DIV_ROUND_UP(fin, (12 * MHZ));
298 p_max = fin / (6 * MHZ);
299
300 for (_p = p_min; _p <= p_max; ++_p) {
301 for (_s = 0; _s <= 5; ++_s) {
302 u64 tmp;
303 u32 delta;
304
305 tmp = (u64)fout * (_p << _s);
306 do_div(tmp, fin);
307 _m = tmp;
308 if (_m < 41 || _m > 125)
309 continue;
310
311 tmp = (u64)_m * fin;
312 do_div(tmp, _p);
313 if (tmp < 500 * MHZ || tmp > 1000 * MHZ)
314 continue;
315
316 tmp = (u64)_m * fin;
317 do_div(tmp, _p << _s);
318
319 delta = abs(fout - tmp);
320 if (delta < min_delta) {
321 best_p = _p;
322 best_m = _m;
323 best_s = _s;
324 min_delta = delta;
325 best_freq = tmp;
326 }
327 }
328 }
329
330 if (best_freq) {
331 *p = best_p;
332 *m = best_m;
333 *s = best_s;
334 }
335
336 return best_freq;
337}
338
339static unsigned long exynos_dsi_set_pll(struct exynos_dsi *dsi,
340 unsigned long freq)
341{
342 static const unsigned long freq_bands[] = {
343 100 * MHZ, 120 * MHZ, 160 * MHZ, 200 * MHZ,
344 270 * MHZ, 320 * MHZ, 390 * MHZ, 450 * MHZ,
345 510 * MHZ, 560 * MHZ, 640 * MHZ, 690 * MHZ,
346 770 * MHZ, 870 * MHZ, 950 * MHZ,
347 };
348 unsigned long fin, fout;
349 int timeout, band;
350 u8 p, s;
351 u16 m;
352 u32 reg;
353
354 clk_set_rate(dsi->pll_clk, dsi->pll_clk_rate);
355
356 fin = clk_get_rate(dsi->pll_clk);
357 if (!fin) {
358 dev_err(dsi->dev, "failed to get PLL clock frequency\n");
359 return 0;
360 }
361
362 dev_dbg(dsi->dev, "PLL input frequency: %lu\n", fin);
363
364 fout = exynos_dsi_pll_find_pms(dsi, fin, freq, &p, &m, &s);
365 if (!fout) {
366 dev_err(dsi->dev,
367 "failed to find PLL PMS for requested frequency\n");
368 return -EFAULT;
369 }
370
371 for (band = 0; band < ARRAY_SIZE(freq_bands); ++band)
372 if (fout < freq_bands[band])
373 break;
374
375 dev_dbg(dsi->dev, "PLL freq %lu, (p %d, m %d, s %d), band %d\n", fout,
376 p, m, s, band);
377
378 writel(500, dsi->reg_base + DSIM_PLLTMR_REG);
379
380 reg = DSIM_FREQ_BAND(band) | DSIM_PLL_EN
381 | DSIM_PLL_P(p) | DSIM_PLL_M(m) | DSIM_PLL_S(s);
382 writel(reg, dsi->reg_base + DSIM_PLLCTRL_REG);
383
384 timeout = 1000;
385 do {
386 if (timeout-- == 0) {
387 dev_err(dsi->dev, "PLL failed to stabilize\n");
388 return -EFAULT;
389 }
390 reg = readl(dsi->reg_base + DSIM_STATUS_REG);
391 } while ((reg & DSIM_PLL_STABLE) == 0);
392
393 return fout;
394}
395
396static int exynos_dsi_enable_clock(struct exynos_dsi *dsi)
397{
398 unsigned long hs_clk, byte_clk, esc_clk;
399 unsigned long esc_div;
400 u32 reg;
401
402 hs_clk = exynos_dsi_set_pll(dsi, dsi->burst_clk_rate);
403 if (!hs_clk) {
404 dev_err(dsi->dev, "failed to configure DSI PLL\n");
405 return -EFAULT;
406 }
407
408 byte_clk = hs_clk / 8;
409 esc_div = DIV_ROUND_UP(byte_clk, dsi->esc_clk_rate);
410 esc_clk = byte_clk / esc_div;
411
412 if (esc_clk > 20 * MHZ) {
413 ++esc_div;
414 esc_clk = byte_clk / esc_div;
415 }
416
417 dev_dbg(dsi->dev, "hs_clk = %lu, byte_clk = %lu, esc_clk = %lu\n",
418 hs_clk, byte_clk, esc_clk);
419
420 reg = readl(dsi->reg_base + DSIM_CLKCTRL_REG);
421 reg &= ~(DSIM_ESC_PRESCALER_MASK | DSIM_LANE_ESC_CLK_EN_CLK
422 | DSIM_LANE_ESC_CLK_EN_DATA_MASK | DSIM_PLL_BYPASS
423 | DSIM_BYTE_CLK_SRC_MASK);
424 reg |= DSIM_ESC_CLKEN | DSIM_BYTE_CLKEN
425 | DSIM_ESC_PRESCALER(esc_div)
426 | DSIM_LANE_ESC_CLK_EN_CLK
427 | DSIM_LANE_ESC_CLK_EN_DATA(BIT(dsi->lanes) - 1)
428 | DSIM_BYTE_CLK_SRC(0)
429 | DSIM_TX_REQUEST_HSCLK;
430 writel(reg, dsi->reg_base + DSIM_CLKCTRL_REG);
431
432 return 0;
433}
434
435static void exynos_dsi_disable_clock(struct exynos_dsi *dsi)
436{
437 u32 reg;
438
439 reg = readl(dsi->reg_base + DSIM_CLKCTRL_REG);
440 reg &= ~(DSIM_LANE_ESC_CLK_EN_CLK | DSIM_LANE_ESC_CLK_EN_DATA_MASK
441 | DSIM_ESC_CLKEN | DSIM_BYTE_CLKEN);
442 writel(reg, dsi->reg_base + DSIM_CLKCTRL_REG);
443
444 reg = readl(dsi->reg_base + DSIM_PLLCTRL_REG);
445 reg &= ~DSIM_PLL_EN;
446 writel(reg, dsi->reg_base + DSIM_PLLCTRL_REG);
447}
448
449static int exynos_dsi_init_link(struct exynos_dsi *dsi)
450{
451 int timeout;
452 u32 reg;
453 u32 lanes_mask;
454
455 /* Initialize FIFO pointers */
456 reg = readl(dsi->reg_base + DSIM_FIFOCTRL_REG);
457 reg &= ~0x1f;
458 writel(reg, dsi->reg_base + DSIM_FIFOCTRL_REG);
459
460 usleep_range(9000, 11000);
461
462 reg |= 0x1f;
463 writel(reg, dsi->reg_base + DSIM_FIFOCTRL_REG);
464
465 usleep_range(9000, 11000);
466
467 /* DSI configuration */
468 reg = 0;
469
470 if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO) {
471 reg |= DSIM_VIDEO_MODE;
472
473 if (!(dsi->mode_flags & MIPI_DSI_MODE_VSYNC_FLUSH))
474 reg |= DSIM_MFLUSH_VS;
475 if (!(dsi->mode_flags & MIPI_DSI_MODE_EOT_PACKET))
476 reg |= DSIM_EOT_DISABLE;
477 if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
478 reg |= DSIM_SYNC_INFORM;
479 if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST)
480 reg |= DSIM_BURST_MODE;
481 if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_AUTO_VERT)
482 reg |= DSIM_AUTO_MODE;
483 if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_HSE)
484 reg |= DSIM_HSE_MODE;
485 if (!(dsi->mode_flags & MIPI_DSI_MODE_VIDEO_HFP))
486 reg |= DSIM_HFP_MODE;
487 if (!(dsi->mode_flags & MIPI_DSI_MODE_VIDEO_HBP))
488 reg |= DSIM_HBP_MODE;
489 if (!(dsi->mode_flags & MIPI_DSI_MODE_VIDEO_HSA))
490 reg |= DSIM_HSA_MODE;
491 }
492
493 switch (dsi->format) {
494 case MIPI_DSI_FMT_RGB888:
495 reg |= DSIM_MAIN_PIX_FORMAT_RGB888;
496 break;
497 case MIPI_DSI_FMT_RGB666:
498 reg |= DSIM_MAIN_PIX_FORMAT_RGB666;
499 break;
500 case MIPI_DSI_FMT_RGB666_PACKED:
501 reg |= DSIM_MAIN_PIX_FORMAT_RGB666_P;
502 break;
503 case MIPI_DSI_FMT_RGB565:
504 reg |= DSIM_MAIN_PIX_FORMAT_RGB565;
505 break;
506 default:
507 dev_err(dsi->dev, "invalid pixel format\n");
508 return -EINVAL;
509 }
510
511 reg |= DSIM_NUM_OF_DATA_LANE(dsi->lanes - 1);
512
513 writel(reg, dsi->reg_base + DSIM_CONFIG_REG);
514
515 reg |= DSIM_LANE_EN_CLK;
516 writel(reg, dsi->reg_base + DSIM_CONFIG_REG);
517
518 lanes_mask = BIT(dsi->lanes) - 1;
519 reg |= DSIM_LANE_EN(lanes_mask);
520 writel(reg, dsi->reg_base + DSIM_CONFIG_REG);
521
522 /* Check clock and data lane state are stop state */
523 timeout = 100;
524 do {
525 if (timeout-- == 0) {
526 dev_err(dsi->dev, "waiting for bus lanes timed out\n");
527 return -EFAULT;
528 }
529
530 reg = readl(dsi->reg_base + DSIM_STATUS_REG);
531 if ((reg & DSIM_STOP_STATE_DAT(lanes_mask))
532 != DSIM_STOP_STATE_DAT(lanes_mask))
533 continue;
534 } while (!(reg & (DSIM_STOP_STATE_CLK | DSIM_TX_READY_HS_CLK)));
535
536 reg = readl(dsi->reg_base + DSIM_ESCMODE_REG);
537 reg &= ~DSIM_STOP_STATE_CNT_MASK;
538 reg |= DSIM_STOP_STATE_CNT(0xf);
539 writel(reg, dsi->reg_base + DSIM_ESCMODE_REG);
540
541 reg = DSIM_BTA_TIMEOUT(0xff) | DSIM_LPDR_TIMEOUT(0xffff);
542 writel(reg, dsi->reg_base + DSIM_TIMEOUT_REG);
543
544 return 0;
545}
546
547static void exynos_dsi_set_display_mode(struct exynos_dsi *dsi)
548{
549 struct videomode *vm = &dsi->vm;
550 u32 reg;
551
552 if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO) {
553 reg = DSIM_CMD_ALLOW(0xf)
554 | DSIM_STABLE_VFP(vm->vfront_porch)
555 | DSIM_MAIN_VBP(vm->vback_porch);
556 writel(reg, dsi->reg_base + DSIM_MVPORCH_REG);
557
558 reg = DSIM_MAIN_HFP(vm->hfront_porch)
559 | DSIM_MAIN_HBP(vm->hback_porch);
560 writel(reg, dsi->reg_base + DSIM_MHPORCH_REG);
561
562 reg = DSIM_MAIN_VSA(vm->vsync_len)
563 | DSIM_MAIN_HSA(vm->hsync_len);
564 writel(reg, dsi->reg_base + DSIM_MSYNC_REG);
565 }
566
567 reg = DSIM_MAIN_HRESOL(vm->hactive) | DSIM_MAIN_VRESOL(vm->vactive);
568 writel(reg, dsi->reg_base + DSIM_MDRESOL_REG);
569
570 dev_dbg(dsi->dev, "LCD size = %dx%d\n", vm->hactive, vm->vactive);
571}
572
573static void exynos_dsi_set_display_enable(struct exynos_dsi *dsi, bool enable)
574{
575 u32 reg;
576
577 reg = readl(dsi->reg_base + DSIM_MDRESOL_REG);
578 if (enable)
579 reg |= DSIM_MAIN_STAND_BY;
580 else
581 reg &= ~DSIM_MAIN_STAND_BY;
582 writel(reg, dsi->reg_base + DSIM_MDRESOL_REG);
583}
584
585static int exynos_dsi_wait_for_hdr_fifo(struct exynos_dsi *dsi)
586{
587 int timeout = 2000;
588
589 do {
590 u32 reg = readl(dsi->reg_base + DSIM_FIFOCTRL_REG);
591
592 if (!(reg & DSIM_SFR_HEADER_FULL))
593 return 0;
594
595 if (!cond_resched())
596 usleep_range(950, 1050);
597 } while (--timeout);
598
599 return -ETIMEDOUT;
600}
601
602static void exynos_dsi_set_cmd_lpm(struct exynos_dsi *dsi, bool lpm)
603{
604 u32 v = readl(dsi->reg_base + DSIM_ESCMODE_REG);
605
606 if (lpm)
607 v |= DSIM_CMD_LPDT_LP;
608 else
609 v &= ~DSIM_CMD_LPDT_LP;
610
611 writel(v, dsi->reg_base + DSIM_ESCMODE_REG);
612}
613
614static void exynos_dsi_force_bta(struct exynos_dsi *dsi)
615{
616 u32 v = readl(dsi->reg_base + DSIM_ESCMODE_REG);
617
618 v |= DSIM_FORCE_BTA;
619 writel(v, dsi->reg_base + DSIM_ESCMODE_REG);
620}
621
622static void exynos_dsi_send_to_fifo(struct exynos_dsi *dsi,
623 struct exynos_dsi_transfer *xfer)
624{
625 struct device *dev = dsi->dev;
626 const u8 *payload = xfer->tx_payload + xfer->tx_done;
627 u16 length = xfer->tx_len - xfer->tx_done;
628 bool first = !xfer->tx_done;
629 u32 reg;
630
631 dev_dbg(dev, "< xfer %p: tx len %u, done %u, rx len %u, done %u\n",
632 xfer, xfer->tx_len, xfer->tx_done, xfer->rx_len, xfer->rx_done);
633
634 if (length > DSI_TX_FIFO_SIZE)
635 length = DSI_TX_FIFO_SIZE;
636
637 xfer->tx_done += length;
638
639 /* Send payload */
640 while (length >= 4) {
641 reg = (payload[3] << 24) | (payload[2] << 16)
642 | (payload[1] << 8) | payload[0];
643 writel(reg, dsi->reg_base + DSIM_PAYLOAD_REG);
644 payload += 4;
645 length -= 4;
646 }
647
648 reg = 0;
649 switch (length) {
650 case 3:
651 reg |= payload[2] << 16;
652 /* Fall through */
653 case 2:
654 reg |= payload[1] << 8;
655 /* Fall through */
656 case 1:
657 reg |= payload[0];
658 writel(reg, dsi->reg_base + DSIM_PAYLOAD_REG);
659 break;
660 case 0:
661 /* Do nothing */
662 break;
663 }
664
665 /* Send packet header */
666 if (!first)
667 return;
668
669 reg = (xfer->data[1] << 16) | (xfer->data[0] << 8) | xfer->data_id;
670 if (exynos_dsi_wait_for_hdr_fifo(dsi)) {
671 dev_err(dev, "waiting for header FIFO timed out\n");
672 return;
673 }
674
675 if (NEQV(xfer->flags & MIPI_DSI_MSG_USE_LPM,
676 dsi->state & DSIM_STATE_CMD_LPM)) {
677 exynos_dsi_set_cmd_lpm(dsi, xfer->flags & MIPI_DSI_MSG_USE_LPM);
678 dsi->state ^= DSIM_STATE_CMD_LPM;
679 }
680
681 writel(reg, dsi->reg_base + DSIM_PKTHDR_REG);
682
683 if (xfer->flags & MIPI_DSI_MSG_REQ_ACK)
684 exynos_dsi_force_bta(dsi);
685}
686
687static void exynos_dsi_read_from_fifo(struct exynos_dsi *dsi,
688 struct exynos_dsi_transfer *xfer)
689{
690 u8 *payload = xfer->rx_payload + xfer->rx_done;
691 bool first = !xfer->rx_done;
692 struct device *dev = dsi->dev;
693 u16 length;
694 u32 reg;
695
696 if (first) {
697 reg = readl(dsi->reg_base + DSIM_RXFIFO_REG);
698
699 switch (reg & 0x3f) {
700 case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE:
701 case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE:
702 if (xfer->rx_len >= 2) {
703 payload[1] = reg >> 16;
704 ++xfer->rx_done;
705 }
706 /* Fall through */
707 case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE:
708 case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE:
709 payload[0] = reg >> 8;
710 ++xfer->rx_done;
711 xfer->rx_len = xfer->rx_done;
712 xfer->result = 0;
713 goto clear_fifo;
714 case MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT:
715 dev_err(dev, "DSI Error Report: 0x%04x\n",
716 (reg >> 8) & 0xffff);
717 xfer->result = 0;
718 goto clear_fifo;
719 }
720
721 length = (reg >> 8) & 0xffff;
722 if (length > xfer->rx_len) {
723 dev_err(dev,
724 "response too long (%u > %u bytes), stripping\n",
725 xfer->rx_len, length);
726 length = xfer->rx_len;
727 } else if (length < xfer->rx_len)
728 xfer->rx_len = length;
729 }
730
731 length = xfer->rx_len - xfer->rx_done;
732 xfer->rx_done += length;
733
734 /* Receive payload */
735 while (length >= 4) {
736 reg = readl(dsi->reg_base + DSIM_RXFIFO_REG);
737 payload[0] = (reg >> 0) & 0xff;
738 payload[1] = (reg >> 8) & 0xff;
739 payload[2] = (reg >> 16) & 0xff;
740 payload[3] = (reg >> 24) & 0xff;
741 payload += 4;
742 length -= 4;
743 }
744
745 if (length) {
746 reg = readl(dsi->reg_base + DSIM_RXFIFO_REG);
747 switch (length) {
748 case 3:
749 payload[2] = (reg >> 16) & 0xff;
750 /* Fall through */
751 case 2:
752 payload[1] = (reg >> 8) & 0xff;
753 /* Fall through */
754 case 1:
755 payload[0] = reg & 0xff;
756 }
757 }
758
759 if (xfer->rx_done == xfer->rx_len)
760 xfer->result = 0;
761
762clear_fifo:
763 length = DSI_RX_FIFO_SIZE / 4;
764 do {
765 reg = readl(dsi->reg_base + DSIM_RXFIFO_REG);
766 if (reg == DSI_RX_FIFO_EMPTY)
767 break;
768 } while (--length);
769}
770
771static void exynos_dsi_transfer_start(struct exynos_dsi *dsi)
772{
773 unsigned long flags;
774 struct exynos_dsi_transfer *xfer;
775 bool start = false;
776
777again:
778 spin_lock_irqsave(&dsi->transfer_lock, flags);
779
780 if (list_empty(&dsi->transfer_list)) {
781 spin_unlock_irqrestore(&dsi->transfer_lock, flags);
782 return;
783 }
784
785 xfer = list_first_entry(&dsi->transfer_list,
786 struct exynos_dsi_transfer, list);
787
788 spin_unlock_irqrestore(&dsi->transfer_lock, flags);
789
790 if (xfer->tx_len && xfer->tx_done == xfer->tx_len)
791 /* waiting for RX */
792 return;
793
794 exynos_dsi_send_to_fifo(dsi, xfer);
795
796 if (xfer->tx_len || xfer->rx_len)
797 return;
798
799 xfer->result = 0;
800 complete(&xfer->completed);
801
802 spin_lock_irqsave(&dsi->transfer_lock, flags);
803
804 list_del_init(&xfer->list);
805 start = !list_empty(&dsi->transfer_list);
806
807 spin_unlock_irqrestore(&dsi->transfer_lock, flags);
808
809 if (start)
810 goto again;
811}
812
813static bool exynos_dsi_transfer_finish(struct exynos_dsi *dsi)
814{
815 struct exynos_dsi_transfer *xfer;
816 unsigned long flags;
817 bool start = true;
818
819 spin_lock_irqsave(&dsi->transfer_lock, flags);
820
821 if (list_empty(&dsi->transfer_list)) {
822 spin_unlock_irqrestore(&dsi->transfer_lock, flags);
823 return false;
824 }
825
826 xfer = list_first_entry(&dsi->transfer_list,
827 struct exynos_dsi_transfer, list);
828
829 spin_unlock_irqrestore(&dsi->transfer_lock, flags);
830
831 dev_dbg(dsi->dev,
832 "> xfer %p, tx_len %u, tx_done %u, rx_len %u, rx_done %u\n",
833 xfer, xfer->tx_len, xfer->tx_done, xfer->rx_len, xfer->rx_done);
834
835 if (xfer->tx_done != xfer->tx_len)
836 return true;
837
838 if (xfer->rx_done != xfer->rx_len)
839 exynos_dsi_read_from_fifo(dsi, xfer);
840
841 if (xfer->rx_done != xfer->rx_len)
842 return true;
843
844 spin_lock_irqsave(&dsi->transfer_lock, flags);
845
846 list_del_init(&xfer->list);
847 start = !list_empty(&dsi->transfer_list);
848
849 spin_unlock_irqrestore(&dsi->transfer_lock, flags);
850
851 if (!xfer->rx_len)
852 xfer->result = 0;
853 complete(&xfer->completed);
854
855 return start;
856}
857
858static void exynos_dsi_remove_transfer(struct exynos_dsi *dsi,
859 struct exynos_dsi_transfer *xfer)
860{
861 unsigned long flags;
862 bool start;
863
864 spin_lock_irqsave(&dsi->transfer_lock, flags);
865
866 if (!list_empty(&dsi->transfer_list) &&
867 xfer == list_first_entry(&dsi->transfer_list,
868 struct exynos_dsi_transfer, list)) {
869 list_del_init(&xfer->list);
870 start = !list_empty(&dsi->transfer_list);
871 spin_unlock_irqrestore(&dsi->transfer_lock, flags);
872 if (start)
873 exynos_dsi_transfer_start(dsi);
874 return;
875 }
876
877 list_del_init(&xfer->list);
878
879 spin_unlock_irqrestore(&dsi->transfer_lock, flags);
880}
881
882static int exynos_dsi_transfer(struct exynos_dsi *dsi,
883 struct exynos_dsi_transfer *xfer)
884{
885 unsigned long flags;
886 bool stopped;
887
888 xfer->tx_done = 0;
889 xfer->rx_done = 0;
890 xfer->result = -ETIMEDOUT;
891 init_completion(&xfer->completed);
892
893 spin_lock_irqsave(&dsi->transfer_lock, flags);
894
895 stopped = list_empty(&dsi->transfer_list);
896 list_add_tail(&xfer->list, &dsi->transfer_list);
897
898 spin_unlock_irqrestore(&dsi->transfer_lock, flags);
899
900 if (stopped)
901 exynos_dsi_transfer_start(dsi);
902
903 wait_for_completion_timeout(&xfer->completed,
904 msecs_to_jiffies(DSI_XFER_TIMEOUT_MS));
905 if (xfer->result == -ETIMEDOUT) {
906 exynos_dsi_remove_transfer(dsi, xfer);
907 dev_err(dsi->dev, "xfer timed out: %*ph %*ph\n", 2, xfer->data,
908 xfer->tx_len, xfer->tx_payload);
909 return -ETIMEDOUT;
910 }
911
912 /* Also covers hardware timeout condition */
913 return xfer->result;
914}
915
916static irqreturn_t exynos_dsi_irq(int irq, void *dev_id)
917{
918 struct exynos_dsi *dsi = dev_id;
919 u32 status;
920
921 status = readl(dsi->reg_base + DSIM_INTSRC_REG);
922 if (!status) {
923 static unsigned long int j;
924 if (printk_timed_ratelimit(&j, 500))
925 dev_warn(dsi->dev, "spurious interrupt\n");
926 return IRQ_HANDLED;
927 }
928 writel(status, dsi->reg_base + DSIM_INTSRC_REG);
929
930 if (status & DSIM_INT_SW_RST_RELEASE) {
931 u32 mask = ~(DSIM_INT_RX_DONE | DSIM_INT_SFR_FIFO_EMPTY);
932 writel(mask, dsi->reg_base + DSIM_INTMSK_REG);
933 complete(&dsi->completed);
934 return IRQ_HANDLED;
935 }
936
937 if (!(status & (DSIM_INT_RX_DONE | DSIM_INT_SFR_FIFO_EMPTY)))
938 return IRQ_HANDLED;
939
940 if (exynos_dsi_transfer_finish(dsi))
941 exynos_dsi_transfer_start(dsi);
942
943 return IRQ_HANDLED;
944}
945
946static int exynos_dsi_init(struct exynos_dsi *dsi)
947{
948 exynos_dsi_enable_clock(dsi);
949 exynos_dsi_reset(dsi);
950 enable_irq(dsi->irq);
951 exynos_dsi_wait_for_reset(dsi);
952 exynos_dsi_init_link(dsi);
953
954 return 0;
955}
956
957static int exynos_dsi_host_attach(struct mipi_dsi_host *host,
958 struct mipi_dsi_device *device)
959{
960 struct exynos_dsi *dsi = host_to_dsi(host);
961
962 dsi->lanes = device->lanes;
963 dsi->format = device->format;
964 dsi->mode_flags = device->mode_flags;
965 dsi->panel_node = device->dev.of_node;
966
967 if (dsi->connector.dev)
968 drm_helper_hpd_irq_event(dsi->connector.dev);
969
970 return 0;
971}
972
973static int exynos_dsi_host_detach(struct mipi_dsi_host *host,
974 struct mipi_dsi_device *device)
975{
976 struct exynos_dsi *dsi = host_to_dsi(host);
977
978 dsi->panel_node = NULL;
979
980 if (dsi->connector.dev)
981 drm_helper_hpd_irq_event(dsi->connector.dev);
982
983 return 0;
984}
985
986/* distinguish between short and long DSI packet types */
987static bool exynos_dsi_is_short_dsi_type(u8 type)
988{
989 return (type & 0x0f) <= 8;
990}
991
992static ssize_t exynos_dsi_host_transfer(struct mipi_dsi_host *host,
993 struct mipi_dsi_msg *msg)
994{
995 struct exynos_dsi *dsi = host_to_dsi(host);
996 struct exynos_dsi_transfer xfer;
997 int ret;
998
999 if (!(dsi->state & DSIM_STATE_INITIALIZED)) {
1000 ret = exynos_dsi_init(dsi);
1001 if (ret)
1002 return ret;
1003 dsi->state |= DSIM_STATE_INITIALIZED;
1004 }
1005
1006 if (msg->tx_len == 0)
1007 return -EINVAL;
1008
1009 xfer.data_id = msg->type | (msg->channel << 6);
1010
1011 if (exynos_dsi_is_short_dsi_type(msg->type)) {
1012 const char *tx_buf = msg->tx_buf;
1013
1014 if (msg->tx_len > 2)
1015 return -EINVAL;
1016 xfer.tx_len = 0;
1017 xfer.data[0] = tx_buf[0];
1018 xfer.data[1] = (msg->tx_len == 2) ? tx_buf[1] : 0;
1019 } else {
1020 xfer.tx_len = msg->tx_len;
1021 xfer.data[0] = msg->tx_len & 0xff;
1022 xfer.data[1] = msg->tx_len >> 8;
1023 xfer.tx_payload = msg->tx_buf;
1024 }
1025
1026 xfer.rx_len = msg->rx_len;
1027 xfer.rx_payload = msg->rx_buf;
1028 xfer.flags = msg->flags;
1029
1030 ret = exynos_dsi_transfer(dsi, &xfer);
1031 return (ret < 0) ? ret : xfer.rx_done;
1032}
1033
1034static const struct mipi_dsi_host_ops exynos_dsi_ops = {
1035 .attach = exynos_dsi_host_attach,
1036 .detach = exynos_dsi_host_detach,
1037 .transfer = exynos_dsi_host_transfer,
1038};
1039
1040static int exynos_dsi_poweron(struct exynos_dsi *dsi)
1041{
1042 int ret;
1043
1044 ret = regulator_bulk_enable(ARRAY_SIZE(dsi->supplies), dsi->supplies);
1045 if (ret < 0) {
1046 dev_err(dsi->dev, "cannot enable regulators %d\n", ret);
1047 return ret;
1048 }
1049
1050 ret = clk_prepare_enable(dsi->bus_clk);
1051 if (ret < 0) {
1052 dev_err(dsi->dev, "cannot enable bus clock %d\n", ret);
1053 goto err_bus_clk;
1054 }
1055
1056 ret = clk_prepare_enable(dsi->pll_clk);
1057 if (ret < 0) {
1058 dev_err(dsi->dev, "cannot enable pll clock %d\n", ret);
1059 goto err_pll_clk;
1060 }
1061
1062 ret = phy_power_on(dsi->phy);
1063 if (ret < 0) {
1064 dev_err(dsi->dev, "cannot enable phy %d\n", ret);
1065 goto err_phy;
1066 }
1067
1068 return 0;
1069
1070err_phy:
1071 clk_disable_unprepare(dsi->pll_clk);
1072err_pll_clk:
1073 clk_disable_unprepare(dsi->bus_clk);
1074err_bus_clk:
1075 regulator_bulk_disable(ARRAY_SIZE(dsi->supplies), dsi->supplies);
1076
1077 return ret;
1078}
1079
1080static void exynos_dsi_poweroff(struct exynos_dsi *dsi)
1081{
1082 int ret;
1083
1084 usleep_range(10000, 20000);
1085
1086 if (dsi->state & DSIM_STATE_INITIALIZED) {
1087 dsi->state &= ~DSIM_STATE_INITIALIZED;
1088
1089 exynos_dsi_disable_clock(dsi);
1090
1091 disable_irq(dsi->irq);
1092 }
1093
1094 dsi->state &= ~DSIM_STATE_CMD_LPM;
1095
1096 phy_power_off(dsi->phy);
1097
1098 clk_disable_unprepare(dsi->pll_clk);
1099 clk_disable_unprepare(dsi->bus_clk);
1100
1101 ret = regulator_bulk_disable(ARRAY_SIZE(dsi->supplies), dsi->supplies);
1102 if (ret < 0)
1103 dev_err(dsi->dev, "cannot disable regulators %d\n", ret);
1104}
1105
1106static int exynos_dsi_enable(struct exynos_dsi *dsi)
1107{
1108 int ret;
1109
1110 if (dsi->state & DSIM_STATE_ENABLED)
1111 return 0;
1112
1113 ret = exynos_dsi_poweron(dsi);
1114 if (ret < 0)
1115 return ret;
1116
1117 ret = drm_panel_enable(dsi->panel);
1118 if (ret < 0) {
1119 exynos_dsi_poweroff(dsi);
1120 return ret;
1121 }
1122
1123 exynos_dsi_set_display_mode(dsi);
1124 exynos_dsi_set_display_enable(dsi, true);
1125
1126 dsi->state |= DSIM_STATE_ENABLED;
1127
1128 return 0;
1129}
1130
1131static void exynos_dsi_disable(struct exynos_dsi *dsi)
1132{
1133 if (!(dsi->state & DSIM_STATE_ENABLED))
1134 return;
1135
1136 exynos_dsi_set_display_enable(dsi, false);
1137 drm_panel_disable(dsi->panel);
1138 exynos_dsi_poweroff(dsi);
1139
1140 dsi->state &= ~DSIM_STATE_ENABLED;
1141}
1142
1143static void exynos_dsi_dpms(struct exynos_drm_display *display, int mode)
1144{
1145 struct exynos_dsi *dsi = display->ctx;
1146
1147 if (dsi->panel) {
1148 switch (mode) {
1149 case DRM_MODE_DPMS_ON:
1150 exynos_dsi_enable(dsi);
1151 break;
1152 case DRM_MODE_DPMS_STANDBY:
1153 case DRM_MODE_DPMS_SUSPEND:
1154 case DRM_MODE_DPMS_OFF:
1155 exynos_dsi_disable(dsi);
1156 break;
1157 default:
1158 break;
1159 }
1160 }
1161}
1162
1163static enum drm_connector_status
1164exynos_dsi_detect(struct drm_connector *connector, bool force)
1165{
1166 struct exynos_dsi *dsi = connector_to_dsi(connector);
1167
1168 if (!dsi->panel) {
1169 dsi->panel = of_drm_find_panel(dsi->panel_node);
1170 if (dsi->panel)
1171 drm_panel_attach(dsi->panel, &dsi->connector);
1172 } else if (!dsi->panel_node) {
1173 struct exynos_drm_display *display;
1174
1175 display = platform_get_drvdata(to_platform_device(dsi->dev));
1176 exynos_dsi_dpms(display, DRM_MODE_DPMS_OFF);
1177 drm_panel_detach(dsi->panel);
1178 dsi->panel = NULL;
1179 }
1180
1181 if (dsi->panel)
1182 return connector_status_connected;
1183
1184 return connector_status_disconnected;
1185}
1186
1187static void exynos_dsi_connector_destroy(struct drm_connector *connector)
1188{
1189}
1190
1191static struct drm_connector_funcs exynos_dsi_connector_funcs = {
1192 .dpms = drm_helper_connector_dpms,
1193 .detect = exynos_dsi_detect,
1194 .fill_modes = drm_helper_probe_single_connector_modes,
1195 .destroy = exynos_dsi_connector_destroy,
1196};
1197
1198static int exynos_dsi_get_modes(struct drm_connector *connector)
1199{
1200 struct exynos_dsi *dsi = connector_to_dsi(connector);
1201
1202 if (dsi->panel)
1203 return dsi->panel->funcs->get_modes(dsi->panel);
1204
1205 return 0;
1206}
1207
1208static int exynos_dsi_mode_valid(struct drm_connector *connector,
1209 struct drm_display_mode *mode)
1210{
1211 return MODE_OK;
1212}
1213
1214static struct drm_encoder *
1215exynos_dsi_best_encoder(struct drm_connector *connector)
1216{
1217 struct exynos_dsi *dsi = connector_to_dsi(connector);
1218
1219 return dsi->encoder;
1220}
1221
1222static struct drm_connector_helper_funcs exynos_dsi_connector_helper_funcs = {
1223 .get_modes = exynos_dsi_get_modes,
1224 .mode_valid = exynos_dsi_mode_valid,
1225 .best_encoder = exynos_dsi_best_encoder,
1226};
1227
1228static int exynos_dsi_create_connector(struct exynos_drm_display *display,
1229 struct drm_encoder *encoder)
1230{
1231 struct exynos_dsi *dsi = display->ctx;
1232 struct drm_connector *connector = &dsi->connector;
1233 int ret;
1234
1235 dsi->encoder = encoder;
1236
1237 connector->polled = DRM_CONNECTOR_POLL_HPD;
1238
1239 ret = drm_connector_init(encoder->dev, connector,
1240 &exynos_dsi_connector_funcs,
1241 DRM_MODE_CONNECTOR_DSI);
1242 if (ret) {
1243 DRM_ERROR("Failed to initialize connector with drm\n");
1244 return ret;
1245 }
1246
1247 drm_connector_helper_add(connector, &exynos_dsi_connector_helper_funcs);
1248 drm_sysfs_connector_add(connector);
1249 drm_mode_connector_attach_encoder(connector, encoder);
1250
1251 return 0;
1252}
1253
1254static void exynos_dsi_mode_set(struct exynos_drm_display *display,
1255 struct drm_display_mode *mode)
1256{
1257 struct exynos_dsi *dsi = display->ctx;
1258 struct videomode *vm = &dsi->vm;
1259
1260 vm->hactive = mode->hdisplay;
1261 vm->vactive = mode->vdisplay;
1262 vm->vfront_porch = mode->vsync_start - mode->vdisplay;
1263 vm->vback_porch = mode->vtotal - mode->vsync_end;
1264 vm->vsync_len = mode->vsync_end - mode->vsync_start;
1265 vm->hfront_porch = mode->hsync_start - mode->hdisplay;
1266 vm->hback_porch = mode->htotal - mode->hsync_end;
1267 vm->hsync_len = mode->hsync_end - mode->hsync_start;
1268}
1269
1270static struct exynos_drm_display_ops exynos_dsi_display_ops = {
1271 .create_connector = exynos_dsi_create_connector,
1272 .mode_set = exynos_dsi_mode_set,
1273 .dpms = exynos_dsi_dpms
1274};
1275
1276static struct exynos_drm_display exynos_dsi_display = {
1277 .type = EXYNOS_DISPLAY_TYPE_LCD,
1278 .ops = &exynos_dsi_display_ops,
1279};
1280
1281/* of_* functions will be removed after merge of of_graph patches */
1282static struct device_node *
1283of_get_child_by_name_reg(struct device_node *parent, const char *name, u32 reg)
1284{
1285 struct device_node *np;
1286
1287 for_each_child_of_node(parent, np) {
1288 u32 r;
1289
1290 if (!np->name || of_node_cmp(np->name, name))
1291 continue;
1292
1293 if (of_property_read_u32(np, "reg", &r) < 0)
1294 r = 0;
1295
1296 if (reg == r)
1297 break;
1298 }
1299
1300 return np;
1301}
1302
1303static struct device_node *of_graph_get_port_by_reg(struct device_node *parent,
1304 u32 reg)
1305{
1306 struct device_node *ports, *port;
1307
1308 ports = of_get_child_by_name(parent, "ports");
1309 if (ports)
1310 parent = ports;
1311
1312 port = of_get_child_by_name_reg(parent, "port", reg);
1313
1314 of_node_put(ports);
1315
1316 return port;
1317}
1318
1319static struct device_node *
1320of_graph_get_endpoint_by_reg(struct device_node *port, u32 reg)
1321{
1322 return of_get_child_by_name_reg(port, "endpoint", reg);
1323}
1324
1325static int exynos_dsi_of_read_u32(const struct device_node *np,
1326 const char *propname, u32 *out_value)
1327{
1328 int ret = of_property_read_u32(np, propname, out_value);
1329
1330 if (ret < 0)
1331 pr_err("%s: failed to get '%s' property\n", np->full_name,
1332 propname);
1333
1334 return ret;
1335}
1336
1337enum {
1338 DSI_PORT_IN,
1339 DSI_PORT_OUT
1340};
1341
1342static int exynos_dsi_parse_dt(struct exynos_dsi *dsi)
1343{
1344 struct device *dev = dsi->dev;
1345 struct device_node *node = dev->of_node;
1346 struct device_node *port, *ep;
1347 int ret;
1348
1349 ret = exynos_dsi_of_read_u32(node, "samsung,pll-clock-frequency",
1350 &dsi->pll_clk_rate);
1351 if (ret < 0)
1352 return ret;
1353
1354 port = of_graph_get_port_by_reg(node, DSI_PORT_OUT);
1355 if (!port) {
1356 dev_err(dev, "no output port specified\n");
1357 return -EINVAL;
1358 }
1359
1360 ep = of_graph_get_endpoint_by_reg(port, 0);
1361 of_node_put(port);
1362 if (!ep) {
1363 dev_err(dev, "no endpoint specified in output port\n");
1364 return -EINVAL;
1365 }
1366
1367 ret = exynos_dsi_of_read_u32(ep, "samsung,burst-clock-frequency",
1368 &dsi->burst_clk_rate);
1369 if (ret < 0)
1370 goto end;
1371
1372 ret = exynos_dsi_of_read_u32(ep, "samsung,esc-clock-frequency",
1373 &dsi->esc_clk_rate);
1374
1375end:
1376 of_node_put(ep);
1377
1378 return ret;
1379}
1380
1381static int exynos_dsi_probe(struct platform_device *pdev)
1382{
1383 struct resource *res;
1384 struct exynos_dsi *dsi;
1385 int ret;
1386
1387 dsi = devm_kzalloc(&pdev->dev, sizeof(*dsi), GFP_KERNEL);
1388 if (!dsi) {
1389 dev_err(&pdev->dev, "failed to allocate dsi object.\n");
1390 return -ENOMEM;
1391 }
1392
1393 init_completion(&dsi->completed);
1394 spin_lock_init(&dsi->transfer_lock);
1395 INIT_LIST_HEAD(&dsi->transfer_list);
1396
1397 dsi->dsi_host.ops = &exynos_dsi_ops;
1398 dsi->dsi_host.dev = &pdev->dev;
1399
1400 dsi->dev = &pdev->dev;
1401
1402 ret = exynos_dsi_parse_dt(dsi);
1403 if (ret)
1404 return ret;
1405
1406 dsi->supplies[0].supply = "vddcore";
1407 dsi->supplies[1].supply = "vddio";
1408 ret = devm_regulator_bulk_get(&pdev->dev, ARRAY_SIZE(dsi->supplies),
1409 dsi->supplies);
1410 if (ret) {
1411 dev_info(&pdev->dev, "failed to get regulators: %d\n", ret);
1412 return -EPROBE_DEFER;
1413 }
1414
1415 dsi->pll_clk = devm_clk_get(&pdev->dev, "pll_clk");
1416 if (IS_ERR(dsi->pll_clk)) {
1417 dev_info(&pdev->dev, "failed to get dsi pll input clock\n");
1418 return -EPROBE_DEFER;
1419 }
1420
1421 dsi->bus_clk = devm_clk_get(&pdev->dev, "bus_clk");
1422 if (IS_ERR(dsi->bus_clk)) {
1423 dev_info(&pdev->dev, "failed to get dsi bus clock\n");
1424 return -EPROBE_DEFER;
1425 }
1426
1427 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1428 dsi->reg_base = devm_ioremap_resource(&pdev->dev, res);
1429 if (!dsi->reg_base) {
1430 dev_err(&pdev->dev, "failed to remap io region\n");
1431 return -EADDRNOTAVAIL;
1432 }
1433
1434 dsi->phy = devm_phy_get(&pdev->dev, "dsim");
1435 if (IS_ERR(dsi->phy)) {
1436 dev_info(&pdev->dev, "failed to get dsim phy\n");
1437 return -EPROBE_DEFER;
1438 }
1439
1440 dsi->irq = platform_get_irq(pdev, 0);
1441 if (dsi->irq < 0) {
1442 dev_err(&pdev->dev, "failed to request dsi irq resource\n");
1443 return dsi->irq;
1444 }
1445
1446 irq_set_status_flags(dsi->irq, IRQ_NOAUTOEN);
1447 ret = devm_request_threaded_irq(&pdev->dev, dsi->irq, NULL,
1448 exynos_dsi_irq, IRQF_ONESHOT,
1449 dev_name(&pdev->dev), dsi);
1450 if (ret) {
1451 dev_err(&pdev->dev, "failed to request dsi irq\n");
1452 return ret;
1453 }
1454
1455 exynos_dsi_display.ctx = dsi;
1456
1457 platform_set_drvdata(pdev, &exynos_dsi_display);
1458 exynos_drm_display_register(&exynos_dsi_display);
1459
1460 return mipi_dsi_host_register(&dsi->dsi_host);
1461}
1462
1463static int exynos_dsi_remove(struct platform_device *pdev)
1464{
1465 struct exynos_dsi *dsi = exynos_dsi_display.ctx;
1466
1467 exynos_dsi_dpms(&exynos_dsi_display, DRM_MODE_DPMS_OFF);
1468
1469 exynos_drm_display_unregister(&exynos_dsi_display);
1470 mipi_dsi_host_unregister(&dsi->dsi_host);
1471
1472 return 0;
1473}
1474
1475#if CONFIG_PM_SLEEP
1476static int exynos_dsi_resume(struct device *dev)
1477{
1478 struct exynos_dsi *dsi = exynos_dsi_display.ctx;
1479
1480 if (dsi->state & DSIM_STATE_ENABLED) {
1481 dsi->state &= ~DSIM_STATE_ENABLED;
1482 exynos_dsi_enable(dsi);
1483 }
1484
1485 return 0;
1486}
1487
1488static int exynos_dsi_suspend(struct device *dev)
1489{
1490 struct exynos_dsi *dsi = exynos_dsi_display.ctx;
1491
1492 if (dsi->state & DSIM_STATE_ENABLED) {
1493 exynos_dsi_disable(dsi);
1494 dsi->state |= DSIM_STATE_ENABLED;
1495 }
1496
1497 return 0;
1498}
1499#endif
1500
1501static const struct dev_pm_ops exynos_dsi_pm_ops = {
1502 SET_SYSTEM_SLEEP_PM_OPS(exynos_dsi_suspend, exynos_dsi_resume)
1503};
1504
1505static struct of_device_id exynos_dsi_of_match[] = {
1506 { .compatible = "samsung,exynos4210-mipi-dsi" },
1507 { }
1508};
1509
1510struct platform_driver dsi_driver = {
1511 .probe = exynos_dsi_probe,
1512 .remove = exynos_dsi_remove,
1513 .driver = {
1514 .name = "exynos-dsi",
1515 .owner = THIS_MODULE,
1516 .pm = &exynos_dsi_pm_ops,
1517 .of_match_table = exynos_dsi_of_match,
1518 },
1519};
1520
1521MODULE_AUTHOR("Tomasz Figa <t.figa@samsung.com>");
1522MODULE_AUTHOR("Andrzej Hajda <a.hajda@samsung.com>");
1523MODULE_DESCRIPTION("Samsung SoC MIPI DSI Master");
1524MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.c b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
index 06f1b2a09da7..7e282e3d6038 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_encoder.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
@@ -17,7 +17,6 @@
17 17
18#include "exynos_drm_drv.h" 18#include "exynos_drm_drv.h"
19#include "exynos_drm_encoder.h" 19#include "exynos_drm_encoder.h"
20#include "exynos_drm_connector.h"
21 20
22#define to_exynos_encoder(x) container_of(x, struct exynos_drm_encoder,\ 21#define to_exynos_encoder(x) container_of(x, struct exynos_drm_encoder,\
23 drm_encoder) 22 drm_encoder)
@@ -26,72 +25,22 @@
26 * exynos specific encoder structure. 25 * exynos specific encoder structure.
27 * 26 *
28 * @drm_encoder: encoder object. 27 * @drm_encoder: encoder object.
29 * @manager: specific encoder has its own manager to control a hardware 28 * @display: the display structure that maps to this encoder
30 * appropriately and we can access a hardware drawing on this manager.
31 * @dpms: store the encoder dpms value.
32 * @updated: indicate whether overlay data updating is needed or not.
33 */ 29 */
34struct exynos_drm_encoder { 30struct exynos_drm_encoder {
35 struct drm_crtc *old_crtc;
36 struct drm_encoder drm_encoder; 31 struct drm_encoder drm_encoder;
37 struct exynos_drm_manager *manager; 32 struct exynos_drm_display *display;
38 int dpms;
39 bool updated;
40}; 33};
41 34
42static void exynos_drm_connector_power(struct drm_encoder *encoder, int mode)
43{
44 struct drm_device *dev = encoder->dev;
45 struct drm_connector *connector;
46
47 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
48 if (exynos_drm_best_encoder(connector) == encoder) {
49 DRM_DEBUG_KMS("connector[%d] dpms[%d]\n",
50 connector->base.id, mode);
51
52 exynos_drm_display_power(connector, mode);
53 }
54 }
55}
56
57static void exynos_drm_encoder_dpms(struct drm_encoder *encoder, int mode) 35static void exynos_drm_encoder_dpms(struct drm_encoder *encoder, int mode)
58{ 36{
59 struct drm_device *dev = encoder->dev;
60 struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder);
61 struct exynos_drm_manager_ops *manager_ops = manager->ops;
62 struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder); 37 struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder);
38 struct exynos_drm_display *display = exynos_encoder->display;
63 39
64 DRM_DEBUG_KMS("encoder dpms: %d\n", mode); 40 DRM_DEBUG_KMS("encoder dpms: %d\n", mode);
65 41
66 if (exynos_encoder->dpms == mode) { 42 if (display->ops->dpms)
67 DRM_DEBUG_KMS("desired dpms mode is same as previous one.\n"); 43 display->ops->dpms(display, mode);
68 return;
69 }
70
71 mutex_lock(&dev->struct_mutex);
72
73 switch (mode) {
74 case DRM_MODE_DPMS_ON:
75 if (manager_ops && manager_ops->apply)
76 if (!exynos_encoder->updated)
77 manager_ops->apply(manager->dev);
78
79 exynos_drm_connector_power(encoder, mode);
80 exynos_encoder->dpms = mode;
81 break;
82 case DRM_MODE_DPMS_STANDBY:
83 case DRM_MODE_DPMS_SUSPEND:
84 case DRM_MODE_DPMS_OFF:
85 exynos_drm_connector_power(encoder, mode);
86 exynos_encoder->dpms = mode;
87 exynos_encoder->updated = false;
88 break;
89 default:
90 DRM_ERROR("unspecified mode %d\n", mode);
91 break;
92 }
93
94 mutex_unlock(&dev->struct_mutex);
95} 44}
96 45
97static bool 46static bool
@@ -100,87 +49,31 @@ exynos_drm_encoder_mode_fixup(struct drm_encoder *encoder,
100 struct drm_display_mode *adjusted_mode) 49 struct drm_display_mode *adjusted_mode)
101{ 50{
102 struct drm_device *dev = encoder->dev; 51 struct drm_device *dev = encoder->dev;
52 struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder);
53 struct exynos_drm_display *display = exynos_encoder->display;
103 struct drm_connector *connector; 54 struct drm_connector *connector;
104 struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder);
105 struct exynos_drm_manager_ops *manager_ops = manager->ops;
106 55
107 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 56 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
108 if (connector->encoder == encoder) 57 if (connector->encoder != encoder)
109 if (manager_ops && manager_ops->mode_fixup) 58 continue;
110 manager_ops->mode_fixup(manager->dev, connector, 59
111 mode, adjusted_mode); 60 if (display->ops->mode_fixup)
61 display->ops->mode_fixup(display, connector, mode,
62 adjusted_mode);
112 } 63 }
113 64
114 return true; 65 return true;
115} 66}
116 67
117static void disable_plane_to_crtc(struct drm_device *dev,
118 struct drm_crtc *old_crtc,
119 struct drm_crtc *new_crtc)
120{
121 struct drm_plane *plane;
122
123 /*
124 * if old_crtc isn't same as encoder->crtc then it means that
125 * user changed crtc id to another one so the plane to old_crtc
126 * should be disabled and plane->crtc should be set to new_crtc
127 * (encoder->crtc)
128 */
129 list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
130 if (plane->crtc == old_crtc) {
131 /*
132 * do not change below call order.
133 *
134 * plane->funcs->disable_plane call checks
135 * if encoder->crtc is same as plane->crtc and if same
136 * then overlay_ops->disable callback will be called
137 * to diasble current hw overlay so plane->crtc should
138 * have new_crtc because new_crtc was set to
139 * encoder->crtc in advance.
140 */
141 plane->crtc = new_crtc;
142 plane->funcs->disable_plane(plane);
143 }
144 }
145}
146
147static void exynos_drm_encoder_mode_set(struct drm_encoder *encoder, 68static void exynos_drm_encoder_mode_set(struct drm_encoder *encoder,
148 struct drm_display_mode *mode, 69 struct drm_display_mode *mode,
149 struct drm_display_mode *adjusted_mode) 70 struct drm_display_mode *adjusted_mode)
150{ 71{
151 struct drm_device *dev = encoder->dev; 72 struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder);
152 struct drm_connector *connector; 73 struct exynos_drm_display *display = exynos_encoder->display;
153 struct exynos_drm_manager *manager;
154 struct exynos_drm_manager_ops *manager_ops;
155
156 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
157 if (connector->encoder == encoder) {
158 struct exynos_drm_encoder *exynos_encoder;
159
160 exynos_encoder = to_exynos_encoder(encoder);
161
162 if (exynos_encoder->old_crtc != encoder->crtc &&
163 exynos_encoder->old_crtc) {
164
165 /*
166 * disable a plane to old crtc and change
167 * crtc of the plane to new one.
168 */
169 disable_plane_to_crtc(dev,
170 exynos_encoder->old_crtc,
171 encoder->crtc);
172 }
173
174 manager = exynos_drm_get_manager(encoder);
175 manager_ops = manager->ops;
176
177 if (manager_ops && manager_ops->mode_set)
178 manager_ops->mode_set(manager->dev,
179 adjusted_mode);
180 74
181 exynos_encoder->old_crtc = encoder->crtc; 75 if (display->ops->mode_set)
182 } 76 display->ops->mode_set(display, adjusted_mode);
183 }
184} 77}
185 78
186static void exynos_drm_encoder_prepare(struct drm_encoder *encoder) 79static void exynos_drm_encoder_prepare(struct drm_encoder *encoder)
@@ -191,53 +84,15 @@ static void exynos_drm_encoder_prepare(struct drm_encoder *encoder)
191static void exynos_drm_encoder_commit(struct drm_encoder *encoder) 84static void exynos_drm_encoder_commit(struct drm_encoder *encoder)
192{ 85{
193 struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder); 86 struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder);
194 struct exynos_drm_manager *manager = exynos_encoder->manager; 87 struct exynos_drm_display *display = exynos_encoder->display;
195 struct exynos_drm_manager_ops *manager_ops = manager->ops;
196
197 if (manager_ops && manager_ops->commit)
198 manager_ops->commit(manager->dev);
199
200 /*
201 * this will avoid one issue that overlay data is updated to
202 * real hardware two times.
203 * And this variable will be used to check if the data was
204 * already updated or not by exynos_drm_encoder_dpms function.
205 */
206 exynos_encoder->updated = true;
207
208 /*
209 * In case of setcrtc, there is no way to update encoder's dpms
210 * so update it here.
211 */
212 exynos_encoder->dpms = DRM_MODE_DPMS_ON;
213}
214 88
215void exynos_drm_encoder_complete_scanout(struct drm_framebuffer *fb) 89 if (display->ops->dpms)
216{ 90 display->ops->dpms(display, DRM_MODE_DPMS_ON);
217 struct exynos_drm_encoder *exynos_encoder;
218 struct exynos_drm_manager_ops *ops;
219 struct drm_device *dev = fb->dev;
220 struct drm_encoder *encoder;
221 91
222 /* 92 if (display->ops->commit)
223 * make sure that overlay data are updated to real hardware 93 display->ops->commit(display);
224 * for all encoders.
225 */
226 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
227 exynos_encoder = to_exynos_encoder(encoder);
228 ops = exynos_encoder->manager->ops;
229
230 /*
231 * wait for vblank interrupt
232 * - this makes sure that overlay data are updated to
233 * real hardware.
234 */
235 if (ops->wait_for_vblank)
236 ops->wait_for_vblank(exynos_encoder->manager->dev);
237 }
238} 94}
239 95
240
241static void exynos_drm_encoder_disable(struct drm_encoder *encoder) 96static void exynos_drm_encoder_disable(struct drm_encoder *encoder)
242{ 97{
243 struct drm_plane *plane; 98 struct drm_plane *plane;
@@ -246,7 +101,7 @@ static void exynos_drm_encoder_disable(struct drm_encoder *encoder)
246 exynos_drm_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); 101 exynos_drm_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
247 102
248 /* all planes connected to this encoder should be also disabled. */ 103 /* all planes connected to this encoder should be also disabled. */
249 list_for_each_entry(plane, &dev->mode_config.plane_list, head) { 104 drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
250 if (plane->crtc == encoder->crtc) 105 if (plane->crtc == encoder->crtc)
251 plane->funcs->disable_plane(plane); 106 plane->funcs->disable_plane(plane);
252 } 107 }
@@ -263,10 +118,7 @@ static struct drm_encoder_helper_funcs exynos_encoder_helper_funcs = {
263 118
264static void exynos_drm_encoder_destroy(struct drm_encoder *encoder) 119static void exynos_drm_encoder_destroy(struct drm_encoder *encoder)
265{ 120{
266 struct exynos_drm_encoder *exynos_encoder = 121 struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder);
267 to_exynos_encoder(encoder);
268
269 exynos_encoder->manager->pipe = -1;
270 122
271 drm_encoder_cleanup(encoder); 123 drm_encoder_cleanup(encoder);
272 kfree(exynos_encoder); 124 kfree(exynos_encoder);
@@ -281,13 +133,12 @@ static unsigned int exynos_drm_encoder_clones(struct drm_encoder *encoder)
281 struct drm_encoder *clone; 133 struct drm_encoder *clone;
282 struct drm_device *dev = encoder->dev; 134 struct drm_device *dev = encoder->dev;
283 struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder); 135 struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder);
284 struct exynos_drm_display_ops *display_ops = 136 struct exynos_drm_display *display = exynos_encoder->display;
285 exynos_encoder->manager->display_ops;
286 unsigned int clone_mask = 0; 137 unsigned int clone_mask = 0;
287 int cnt = 0; 138 int cnt = 0;
288 139
289 list_for_each_entry(clone, &dev->mode_config.encoder_list, head) { 140 list_for_each_entry(clone, &dev->mode_config.encoder_list, head) {
290 switch (display_ops->type) { 141 switch (display->type) {
291 case EXYNOS_DISPLAY_TYPE_LCD: 142 case EXYNOS_DISPLAY_TYPE_LCD:
292 case EXYNOS_DISPLAY_TYPE_HDMI: 143 case EXYNOS_DISPLAY_TYPE_HDMI:
293 case EXYNOS_DISPLAY_TYPE_VIDI: 144 case EXYNOS_DISPLAY_TYPE_VIDI:
@@ -311,24 +162,20 @@ void exynos_drm_encoder_setup(struct drm_device *dev)
311 162
312struct drm_encoder * 163struct drm_encoder *
313exynos_drm_encoder_create(struct drm_device *dev, 164exynos_drm_encoder_create(struct drm_device *dev,
314 struct exynos_drm_manager *manager, 165 struct exynos_drm_display *display,
315 unsigned int possible_crtcs) 166 unsigned long possible_crtcs)
316{ 167{
317 struct drm_encoder *encoder; 168 struct drm_encoder *encoder;
318 struct exynos_drm_encoder *exynos_encoder; 169 struct exynos_drm_encoder *exynos_encoder;
319 170
320 if (!manager || !possible_crtcs) 171 if (!possible_crtcs)
321 return NULL;
322
323 if (!manager->dev)
324 return NULL; 172 return NULL;
325 173
326 exynos_encoder = kzalloc(sizeof(*exynos_encoder), GFP_KERNEL); 174 exynos_encoder = kzalloc(sizeof(*exynos_encoder), GFP_KERNEL);
327 if (!exynos_encoder) 175 if (!exynos_encoder)
328 return NULL; 176 return NULL;
329 177
330 exynos_encoder->dpms = DRM_MODE_DPMS_OFF; 178 exynos_encoder->display = display;
331 exynos_encoder->manager = manager;
332 encoder = &exynos_encoder->drm_encoder; 179 encoder = &exynos_encoder->drm_encoder;
333 encoder->possible_crtcs = possible_crtcs; 180 encoder->possible_crtcs = possible_crtcs;
334 181
@@ -344,149 +191,7 @@ exynos_drm_encoder_create(struct drm_device *dev,
344 return encoder; 191 return encoder;
345} 192}
346 193
347struct exynos_drm_manager *exynos_drm_get_manager(struct drm_encoder *encoder) 194struct exynos_drm_display *exynos_drm_get_display(struct drm_encoder *encoder)
348{
349 return to_exynos_encoder(encoder)->manager;
350}
351
352void exynos_drm_fn_encoder(struct drm_crtc *crtc, void *data,
353 void (*fn)(struct drm_encoder *, void *))
354{
355 struct drm_device *dev = crtc->dev;
356 struct drm_encoder *encoder;
357 struct exynos_drm_private *private = dev->dev_private;
358 struct exynos_drm_manager *manager;
359
360 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
361 /*
362 * if crtc is detached from encoder, check pipe,
363 * otherwise check crtc attached to encoder
364 */
365 if (!encoder->crtc) {
366 manager = to_exynos_encoder(encoder)->manager;
367 if (manager->pipe < 0 ||
368 private->crtc[manager->pipe] != crtc)
369 continue;
370 } else {
371 if (encoder->crtc != crtc)
372 continue;
373 }
374
375 fn(encoder, data);
376 }
377}
378
379void exynos_drm_enable_vblank(struct drm_encoder *encoder, void *data)
380{
381 struct exynos_drm_manager *manager =
382 to_exynos_encoder(encoder)->manager;
383 struct exynos_drm_manager_ops *manager_ops = manager->ops;
384 int crtc = *(int *)data;
385
386 if (manager->pipe != crtc)
387 return;
388
389 if (manager_ops->enable_vblank)
390 manager_ops->enable_vblank(manager->dev);
391}
392
393void exynos_drm_disable_vblank(struct drm_encoder *encoder, void *data)
394{
395 struct exynos_drm_manager *manager =
396 to_exynos_encoder(encoder)->manager;
397 struct exynos_drm_manager_ops *manager_ops = manager->ops;
398 int crtc = *(int *)data;
399
400 if (manager->pipe != crtc)
401 return;
402
403 if (manager_ops->disable_vblank)
404 manager_ops->disable_vblank(manager->dev);
405}
406
407void exynos_drm_encoder_crtc_dpms(struct drm_encoder *encoder, void *data)
408{
409 struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder);
410 struct exynos_drm_manager *manager = exynos_encoder->manager;
411 struct exynos_drm_manager_ops *manager_ops = manager->ops;
412 int mode = *(int *)data;
413
414 if (manager_ops && manager_ops->dpms)
415 manager_ops->dpms(manager->dev, mode);
416
417 /*
418 * if this condition is ok then it means that the crtc is already
419 * detached from encoder and last function for detaching is properly
420 * done, so clear pipe from manager to prevent repeated call.
421 */
422 if (mode > DRM_MODE_DPMS_ON) {
423 if (!encoder->crtc)
424 manager->pipe = -1;
425 }
426}
427
428void exynos_drm_encoder_crtc_pipe(struct drm_encoder *encoder, void *data)
429{
430 struct exynos_drm_manager *manager =
431 to_exynos_encoder(encoder)->manager;
432 int pipe = *(int *)data;
433
434 /*
435 * when crtc is detached from encoder, this pipe is used
436 * to select manager operation
437 */
438 manager->pipe = pipe;
439}
440
441void exynos_drm_encoder_plane_mode_set(struct drm_encoder *encoder, void *data)
442{
443 struct exynos_drm_manager *manager =
444 to_exynos_encoder(encoder)->manager;
445 struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops;
446 struct exynos_drm_overlay *overlay = data;
447
448 if (overlay_ops && overlay_ops->mode_set)
449 overlay_ops->mode_set(manager->dev, overlay);
450}
451
452void exynos_drm_encoder_plane_commit(struct drm_encoder *encoder, void *data)
453{ 195{
454 struct exynos_drm_manager *manager = 196 return to_exynos_encoder(encoder)->display;
455 to_exynos_encoder(encoder)->manager;
456 struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops;
457 int zpos = DEFAULT_ZPOS;
458
459 if (data)
460 zpos = *(int *)data;
461
462 if (overlay_ops && overlay_ops->commit)
463 overlay_ops->commit(manager->dev, zpos);
464}
465
466void exynos_drm_encoder_plane_enable(struct drm_encoder *encoder, void *data)
467{
468 struct exynos_drm_manager *manager =
469 to_exynos_encoder(encoder)->manager;
470 struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops;
471 int zpos = DEFAULT_ZPOS;
472
473 if (data)
474 zpos = *(int *)data;
475
476 if (overlay_ops && overlay_ops->enable)
477 overlay_ops->enable(manager->dev, zpos);
478}
479
480void exynos_drm_encoder_plane_disable(struct drm_encoder *encoder, void *data)
481{
482 struct exynos_drm_manager *manager =
483 to_exynos_encoder(encoder)->manager;
484 struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops;
485 int zpos = DEFAULT_ZPOS;
486
487 if (data)
488 zpos = *(int *)data;
489
490 if (overlay_ops && overlay_ops->disable)
491 overlay_ops->disable(manager->dev, zpos);
492} 197}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.h b/drivers/gpu/drm/exynos/exynos_drm_encoder.h
index 89e2fb0770af..b7a1620a7e79 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_encoder.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.h
@@ -18,20 +18,8 @@ struct exynos_drm_manager;
18 18
19void exynos_drm_encoder_setup(struct drm_device *dev); 19void exynos_drm_encoder_setup(struct drm_device *dev);
20struct drm_encoder *exynos_drm_encoder_create(struct drm_device *dev, 20struct drm_encoder *exynos_drm_encoder_create(struct drm_device *dev,
21 struct exynos_drm_manager *mgr, 21 struct exynos_drm_display *mgr,
22 unsigned int possible_crtcs); 22 unsigned long possible_crtcs);
23struct exynos_drm_manager * 23struct exynos_drm_display *exynos_drm_get_display(struct drm_encoder *encoder);
24exynos_drm_get_manager(struct drm_encoder *encoder);
25void exynos_drm_fn_encoder(struct drm_crtc *crtc, void *data,
26 void (*fn)(struct drm_encoder *, void *));
27void exynos_drm_enable_vblank(struct drm_encoder *encoder, void *data);
28void exynos_drm_disable_vblank(struct drm_encoder *encoder, void *data);
29void exynos_drm_encoder_crtc_dpms(struct drm_encoder *encoder, void *data);
30void exynos_drm_encoder_crtc_pipe(struct drm_encoder *encoder, void *data);
31void exynos_drm_encoder_plane_mode_set(struct drm_encoder *encoder, void *data);
32void exynos_drm_encoder_plane_commit(struct drm_encoder *encoder, void *data);
33void exynos_drm_encoder_plane_enable(struct drm_encoder *encoder, void *data);
34void exynos_drm_encoder_plane_disable(struct drm_encoder *encoder, void *data);
35void exynos_drm_encoder_complete_scanout(struct drm_framebuffer *fb);
36 24
37#endif 25#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index ea39e0ef2ae4..65a22cad7b36 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -20,9 +20,10 @@
20 20
21#include "exynos_drm_drv.h" 21#include "exynos_drm_drv.h"
22#include "exynos_drm_fb.h" 22#include "exynos_drm_fb.h"
23#include "exynos_drm_fbdev.h"
23#include "exynos_drm_gem.h" 24#include "exynos_drm_gem.h"
24#include "exynos_drm_iommu.h" 25#include "exynos_drm_iommu.h"
25#include "exynos_drm_encoder.h" 26#include "exynos_drm_crtc.h"
26 27
27#define to_exynos_fb(x) container_of(x, struct exynos_drm_fb, fb) 28#define to_exynos_fb(x) container_of(x, struct exynos_drm_fb, fb)
28 29
@@ -71,7 +72,7 @@ static void exynos_drm_fb_destroy(struct drm_framebuffer *fb)
71 unsigned int i; 72 unsigned int i;
72 73
73 /* make sure that overlay data are updated before relesing fb. */ 74 /* make sure that overlay data are updated before relesing fb. */
74 exynos_drm_encoder_complete_scanout(fb); 75 exynos_drm_crtc_complete_scanout(fb);
75 76
76 drm_framebuffer_cleanup(fb); 77 drm_framebuffer_cleanup(fb);
77 78
@@ -300,6 +301,8 @@ static void exynos_drm_output_poll_changed(struct drm_device *dev)
300 301
301 if (fb_helper) 302 if (fb_helper)
302 drm_fb_helper_hotplug_event(fb_helper); 303 drm_fb_helper_hotplug_event(fb_helper);
304 else
305 exynos_drm_fbdev_init(dev);
303} 306}
304 307
305static const struct drm_mode_config_funcs exynos_drm_mode_config_funcs = { 308static const struct drm_mode_config_funcs exynos_drm_mode_config_funcs = {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index e7c2f2d07f19..addbf7536da4 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -90,7 +90,7 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
90 /* RGB formats use only one buffer */ 90 /* RGB formats use only one buffer */
91 buffer = exynos_drm_fb_buffer(fb, 0); 91 buffer = exynos_drm_fb_buffer(fb, 0);
92 if (!buffer) { 92 if (!buffer) {
93 DRM_LOG_KMS("buffer is null.\n"); 93 DRM_DEBUG_KMS("buffer is null.\n");
94 return -EFAULT; 94 return -EFAULT;
95 } 95 }
96 96
@@ -237,6 +237,24 @@ static struct drm_fb_helper_funcs exynos_drm_fb_helper_funcs = {
237 .fb_probe = exynos_drm_fbdev_create, 237 .fb_probe = exynos_drm_fbdev_create,
238}; 238};
239 239
240bool exynos_drm_fbdev_is_anything_connected(struct drm_device *dev)
241{
242 struct drm_connector *connector;
243 bool ret = false;
244
245 mutex_lock(&dev->mode_config.mutex);
246 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
247 if (connector->status != connector_status_connected)
248 continue;
249
250 ret = true;
251 break;
252 }
253 mutex_unlock(&dev->mode_config.mutex);
254
255 return ret;
256}
257
240int exynos_drm_fbdev_init(struct drm_device *dev) 258int exynos_drm_fbdev_init(struct drm_device *dev)
241{ 259{
242 struct exynos_drm_fbdev *fbdev; 260 struct exynos_drm_fbdev *fbdev;
@@ -248,6 +266,9 @@ int exynos_drm_fbdev_init(struct drm_device *dev)
248 if (!dev->mode_config.num_crtc || !dev->mode_config.num_connector) 266 if (!dev->mode_config.num_crtc || !dev->mode_config.num_connector)
249 return 0; 267 return 0;
250 268
269 if (!exynos_drm_fbdev_is_anything_connected(dev))
270 return 0;
271
251 fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL); 272 fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL);
252 if (!fbdev) 273 if (!fbdev)
253 return -ENOMEM; 274 return -ENOMEM;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index a20440ce32e6..40fd6ccfcd6f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -62,7 +62,7 @@
62/* FIMD has totally five hardware windows. */ 62/* FIMD has totally five hardware windows. */
63#define WINDOWS_NR 5 63#define WINDOWS_NR 5
64 64
65#define get_fimd_context(dev) platform_get_drvdata(to_platform_device(dev)) 65#define get_fimd_manager(mgr) platform_get_drvdata(to_platform_device(dev))
66 66
67struct fimd_driver_data { 67struct fimd_driver_data {
68 unsigned int timing_base; 68 unsigned int timing_base;
@@ -105,20 +105,18 @@ struct fimd_win_data {
105}; 105};
106 106
107struct fimd_context { 107struct fimd_context {
108 struct exynos_drm_subdrv subdrv; 108 struct device *dev;
109 int irq; 109 struct drm_device *drm_dev;
110 struct drm_crtc *crtc;
111 struct clk *bus_clk; 110 struct clk *bus_clk;
112 struct clk *lcd_clk; 111 struct clk *lcd_clk;
113 void __iomem *regs; 112 void __iomem *regs;
113 struct drm_display_mode mode;
114 struct fimd_win_data win_data[WINDOWS_NR]; 114 struct fimd_win_data win_data[WINDOWS_NR];
115 unsigned int clkdiv;
116 unsigned int default_win; 115 unsigned int default_win;
117 unsigned long irq_flags; 116 unsigned long irq_flags;
118 u32 vidcon0;
119 u32 vidcon1; 117 u32 vidcon1;
120 bool suspended; 118 bool suspended;
121 struct mutex lock; 119 int pipe;
122 wait_queue_head_t wait_vsync_queue; 120 wait_queue_head_t wait_vsync_queue;
123 atomic_t wait_vsync_event; 121 atomic_t wait_vsync_event;
124 122
@@ -145,153 +143,147 @@ static inline struct fimd_driver_data *drm_fimd_get_driver_data(
145 return (struct fimd_driver_data *)of_id->data; 143 return (struct fimd_driver_data *)of_id->data;
146} 144}
147 145
148static bool fimd_display_is_connected(struct device *dev) 146static int fimd_mgr_initialize(struct exynos_drm_manager *mgr,
147 struct drm_device *drm_dev, int pipe)
149{ 148{
150 /* TODO. */ 149 struct fimd_context *ctx = mgr->ctx;
151 150
152 return true; 151 ctx->drm_dev = drm_dev;
153} 152 ctx->pipe = pipe;
154 153
155static void *fimd_get_panel(struct device *dev) 154 /*
156{ 155 * enable drm irq mode.
157 struct fimd_context *ctx = get_fimd_context(dev); 156 * - with irq_enabled = true, we can use the vblank feature.
157 *
158 * P.S. note that we wouldn't use drm irq handler but
159 * just specific driver own one instead because
160 * drm framework supports only one irq handler.
161 */
162 drm_dev->irq_enabled = true;
158 163
159 return &ctx->panel; 164 /*
160} 165 * with vblank_disable_allowed = true, vblank interrupt will be disabled
166 * by drm timer once a current process gives up ownership of
167 * vblank event.(after drm_vblank_put function is called)
168 */
169 drm_dev->vblank_disable_allowed = true;
161 170
162static int fimd_check_mode(struct device *dev, struct drm_display_mode *mode) 171 /* attach this sub driver to iommu mapping if supported. */
163{ 172 if (is_drm_iommu_supported(ctx->drm_dev))
164 /* TODO. */ 173 drm_iommu_attach_device(ctx->drm_dev, ctx->dev);
165 174
166 return 0; 175 return 0;
167} 176}
168 177
169static int fimd_display_power_on(struct device *dev, int mode) 178static void fimd_mgr_remove(struct exynos_drm_manager *mgr)
170{ 179{
171 /* TODO */ 180 struct fimd_context *ctx = mgr->ctx;
172 181
173 return 0; 182 /* detach this sub driver from iommu mapping if supported. */
183 if (is_drm_iommu_supported(ctx->drm_dev))
184 drm_iommu_detach_device(ctx->drm_dev, ctx->dev);
174} 185}
175 186
176static struct exynos_drm_display_ops fimd_display_ops = { 187static u32 fimd_calc_clkdiv(struct fimd_context *ctx,
177 .type = EXYNOS_DISPLAY_TYPE_LCD, 188 const struct drm_display_mode *mode)
178 .is_connected = fimd_display_is_connected,
179 .get_panel = fimd_get_panel,
180 .check_mode = fimd_check_mode,
181 .power_on = fimd_display_power_on,
182};
183
184static void fimd_dpms(struct device *subdrv_dev, int mode)
185{ 189{
186 struct fimd_context *ctx = get_fimd_context(subdrv_dev); 190 unsigned long ideal_clk = mode->htotal * mode->vtotal * mode->vrefresh;
191 u32 clkdiv;
187 192
188 DRM_DEBUG_KMS("%d\n", mode); 193 /* Find the clock divider value that gets us closest to ideal_clk */
194 clkdiv = DIV_ROUND_UP(clk_get_rate(ctx->lcd_clk), ideal_clk);
189 195
190 mutex_lock(&ctx->lock); 196 return (clkdiv < 0x100) ? clkdiv : 0xff;
197}
191 198
192 switch (mode) { 199static bool fimd_mode_fixup(struct exynos_drm_manager *mgr,
193 case DRM_MODE_DPMS_ON: 200 const struct drm_display_mode *mode,
194 /* 201 struct drm_display_mode *adjusted_mode)
195 * enable fimd hardware only if suspended status. 202{
196 * 203 if (adjusted_mode->vrefresh == 0)
197 * P.S. fimd_dpms function would be called at booting time so 204 adjusted_mode->vrefresh = FIMD_DEFAULT_FRAMERATE;
198 * clk_enable could be called double time.
199 */
200 if (ctx->suspended)
201 pm_runtime_get_sync(subdrv_dev);
202 break;
203 case DRM_MODE_DPMS_STANDBY:
204 case DRM_MODE_DPMS_SUSPEND:
205 case DRM_MODE_DPMS_OFF:
206 if (!ctx->suspended)
207 pm_runtime_put_sync(subdrv_dev);
208 break;
209 default:
210 DRM_DEBUG_KMS("unspecified mode %d\n", mode);
211 break;
212 }
213 205
214 mutex_unlock(&ctx->lock); 206 return true;
215} 207}
216 208
217static void fimd_apply(struct device *subdrv_dev) 209static void fimd_mode_set(struct exynos_drm_manager *mgr,
210 const struct drm_display_mode *in_mode)
218{ 211{
219 struct fimd_context *ctx = get_fimd_context(subdrv_dev); 212 struct fimd_context *ctx = mgr->ctx;
220 struct exynos_drm_manager *mgr = ctx->subdrv.manager;
221 struct exynos_drm_manager_ops *mgr_ops = mgr->ops;
222 struct exynos_drm_overlay_ops *ovl_ops = mgr->overlay_ops;
223 struct fimd_win_data *win_data;
224 int i;
225
226 for (i = 0; i < WINDOWS_NR; i++) {
227 win_data = &ctx->win_data[i];
228 if (win_data->enabled && (ovl_ops && ovl_ops->commit))
229 ovl_ops->commit(subdrv_dev, i);
230 }
231 213
232 if (mgr_ops && mgr_ops->commit) 214 drm_mode_copy(&ctx->mode, in_mode);
233 mgr_ops->commit(subdrv_dev);
234} 215}
235 216
236static void fimd_commit(struct device *dev) 217static void fimd_commit(struct exynos_drm_manager *mgr)
237{ 218{
238 struct fimd_context *ctx = get_fimd_context(dev); 219 struct fimd_context *ctx = mgr->ctx;
239 struct exynos_drm_panel_info *panel = &ctx->panel; 220 struct drm_display_mode *mode = &ctx->mode;
240 struct videomode *vm = &panel->vm;
241 struct fimd_driver_data *driver_data; 221 struct fimd_driver_data *driver_data;
242 u32 val; 222 u32 val, clkdiv, vidcon1;
223 int vsync_len, vbpd, vfpd, hsync_len, hbpd, hfpd;
243 224
244 driver_data = ctx->driver_data; 225 driver_data = ctx->driver_data;
245 if (ctx->suspended) 226 if (ctx->suspended)
246 return; 227 return;
247 228
248 /* setup polarity values from machine code. */ 229 /* nothing to do if we haven't set the mode yet */
249 writel(ctx->vidcon1, ctx->regs + driver_data->timing_base + VIDCON1); 230 if (mode->htotal == 0 || mode->vtotal == 0)
231 return;
232
233 /* setup polarity values */
234 vidcon1 = ctx->vidcon1;
235 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
236 vidcon1 |= VIDCON1_INV_VSYNC;
237 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
238 vidcon1 |= VIDCON1_INV_HSYNC;
239 writel(vidcon1, ctx->regs + driver_data->timing_base + VIDCON1);
250 240
251 /* setup vertical timing values. */ 241 /* setup vertical timing values. */
252 val = VIDTCON0_VBPD(vm->vback_porch - 1) | 242 vsync_len = mode->crtc_vsync_end - mode->crtc_vsync_start;
253 VIDTCON0_VFPD(vm->vfront_porch - 1) | 243 vbpd = mode->crtc_vtotal - mode->crtc_vsync_end;
254 VIDTCON0_VSPW(vm->vsync_len - 1); 244 vfpd = mode->crtc_vsync_start - mode->crtc_vdisplay;
245
246 val = VIDTCON0_VBPD(vbpd - 1) |
247 VIDTCON0_VFPD(vfpd - 1) |
248 VIDTCON0_VSPW(vsync_len - 1);
255 writel(val, ctx->regs + driver_data->timing_base + VIDTCON0); 249 writel(val, ctx->regs + driver_data->timing_base + VIDTCON0);
256 250
257 /* setup horizontal timing values. */ 251 /* setup horizontal timing values. */
258 val = VIDTCON1_HBPD(vm->hback_porch - 1) | 252 hsync_len = mode->crtc_hsync_end - mode->crtc_hsync_start;
259 VIDTCON1_HFPD(vm->hfront_porch - 1) | 253 hbpd = mode->crtc_htotal - mode->crtc_hsync_end;
260 VIDTCON1_HSPW(vm->hsync_len - 1); 254 hfpd = mode->crtc_hsync_start - mode->crtc_hdisplay;
255
256 val = VIDTCON1_HBPD(hbpd - 1) |
257 VIDTCON1_HFPD(hfpd - 1) |
258 VIDTCON1_HSPW(hsync_len - 1);
261 writel(val, ctx->regs + driver_data->timing_base + VIDTCON1); 259 writel(val, ctx->regs + driver_data->timing_base + VIDTCON1);
262 260
263 /* setup horizontal and vertical display size. */ 261 /* setup horizontal and vertical display size. */
264 val = VIDTCON2_LINEVAL(vm->vactive - 1) | 262 val = VIDTCON2_LINEVAL(mode->vdisplay - 1) |
265 VIDTCON2_HOZVAL(vm->hactive - 1) | 263 VIDTCON2_HOZVAL(mode->hdisplay - 1) |
266 VIDTCON2_LINEVAL_E(vm->vactive - 1) | 264 VIDTCON2_LINEVAL_E(mode->vdisplay - 1) |
267 VIDTCON2_HOZVAL_E(vm->hactive - 1); 265 VIDTCON2_HOZVAL_E(mode->hdisplay - 1);
268 writel(val, ctx->regs + driver_data->timing_base + VIDTCON2); 266 writel(val, ctx->regs + driver_data->timing_base + VIDTCON2);
269 267
270 /* setup clock source, clock divider, enable dma. */
271 val = ctx->vidcon0;
272 val &= ~(VIDCON0_CLKVAL_F_MASK | VIDCON0_CLKDIR);
273
274 if (ctx->driver_data->has_clksel) {
275 val &= ~VIDCON0_CLKSEL_MASK;
276 val |= VIDCON0_CLKSEL_LCD;
277 }
278
279 if (ctx->clkdiv > 1)
280 val |= VIDCON0_CLKVAL_F(ctx->clkdiv - 1) | VIDCON0_CLKDIR;
281 else
282 val &= ~VIDCON0_CLKDIR; /* 1:1 clock */
283
284 /* 268 /*
285 * fields of register with prefix '_F' would be updated 269 * fields of register with prefix '_F' would be updated
286 * at vsync(same as dma start) 270 * at vsync(same as dma start)
287 */ 271 */
288 val |= VIDCON0_ENVID | VIDCON0_ENVID_F; 272 val = VIDCON0_ENVID | VIDCON0_ENVID_F;
273
274 if (ctx->driver_data->has_clksel)
275 val |= VIDCON0_CLKSEL_LCD;
276
277 clkdiv = fimd_calc_clkdiv(ctx, mode);
278 if (clkdiv > 1)
279 val |= VIDCON0_CLKVAL_F(clkdiv - 1) | VIDCON0_CLKDIR;
280
289 writel(val, ctx->regs + VIDCON0); 281 writel(val, ctx->regs + VIDCON0);
290} 282}
291 283
292static int fimd_enable_vblank(struct device *dev) 284static int fimd_enable_vblank(struct exynos_drm_manager *mgr)
293{ 285{
294 struct fimd_context *ctx = get_fimd_context(dev); 286 struct fimd_context *ctx = mgr->ctx;
295 u32 val; 287 u32 val;
296 288
297 if (ctx->suspended) 289 if (ctx->suspended)
@@ -314,9 +306,9 @@ static int fimd_enable_vblank(struct device *dev)
314 return 0; 306 return 0;
315} 307}
316 308
317static void fimd_disable_vblank(struct device *dev) 309static void fimd_disable_vblank(struct exynos_drm_manager *mgr)
318{ 310{
319 struct fimd_context *ctx = get_fimd_context(dev); 311 struct fimd_context *ctx = mgr->ctx;
320 u32 val; 312 u32 val;
321 313
322 if (ctx->suspended) 314 if (ctx->suspended)
@@ -332,9 +324,9 @@ static void fimd_disable_vblank(struct device *dev)
332 } 324 }
333} 325}
334 326
335static void fimd_wait_for_vblank(struct device *dev) 327static void fimd_wait_for_vblank(struct exynos_drm_manager *mgr)
336{ 328{
337 struct fimd_context *ctx = get_fimd_context(dev); 329 struct fimd_context *ctx = mgr->ctx;
338 330
339 if (ctx->suspended) 331 if (ctx->suspended)
340 return; 332 return;
@@ -351,25 +343,16 @@ static void fimd_wait_for_vblank(struct device *dev)
351 DRM_DEBUG_KMS("vblank wait timed out.\n"); 343 DRM_DEBUG_KMS("vblank wait timed out.\n");
352} 344}
353 345
354static struct exynos_drm_manager_ops fimd_manager_ops = { 346static void fimd_win_mode_set(struct exynos_drm_manager *mgr,
355 .dpms = fimd_dpms, 347 struct exynos_drm_overlay *overlay)
356 .apply = fimd_apply,
357 .commit = fimd_commit,
358 .enable_vblank = fimd_enable_vblank,
359 .disable_vblank = fimd_disable_vblank,
360 .wait_for_vblank = fimd_wait_for_vblank,
361};
362
363static void fimd_win_mode_set(struct device *dev,
364 struct exynos_drm_overlay *overlay)
365{ 348{
366 struct fimd_context *ctx = get_fimd_context(dev); 349 struct fimd_context *ctx = mgr->ctx;
367 struct fimd_win_data *win_data; 350 struct fimd_win_data *win_data;
368 int win; 351 int win;
369 unsigned long offset; 352 unsigned long offset;
370 353
371 if (!overlay) { 354 if (!overlay) {
372 dev_err(dev, "overlay is NULL\n"); 355 DRM_ERROR("overlay is NULL\n");
373 return; 356 return;
374 } 357 }
375 358
@@ -409,9 +392,8 @@ static void fimd_win_mode_set(struct device *dev,
409 overlay->fb_width, overlay->crtc_width); 392 overlay->fb_width, overlay->crtc_width);
410} 393}
411 394
412static void fimd_win_set_pixfmt(struct device *dev, unsigned int win) 395static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win)
413{ 396{
414 struct fimd_context *ctx = get_fimd_context(dev);
415 struct fimd_win_data *win_data = &ctx->win_data[win]; 397 struct fimd_win_data *win_data = &ctx->win_data[win];
416 unsigned long val; 398 unsigned long val;
417 399
@@ -467,9 +449,8 @@ static void fimd_win_set_pixfmt(struct device *dev, unsigned int win)
467 writel(val, ctx->regs + WINCON(win)); 449 writel(val, ctx->regs + WINCON(win));
468} 450}
469 451
470static void fimd_win_set_colkey(struct device *dev, unsigned int win) 452static void fimd_win_set_colkey(struct fimd_context *ctx, unsigned int win)
471{ 453{
472 struct fimd_context *ctx = get_fimd_context(dev);
473 unsigned int keycon0 = 0, keycon1 = 0; 454 unsigned int keycon0 = 0, keycon1 = 0;
474 455
475 keycon0 = ~(WxKEYCON0_KEYBL_EN | WxKEYCON0_KEYEN_F | 456 keycon0 = ~(WxKEYCON0_KEYBL_EN | WxKEYCON0_KEYEN_F |
@@ -508,9 +489,9 @@ static void fimd_shadow_protect_win(struct fimd_context *ctx,
508 writel(val, ctx->regs + reg); 489 writel(val, ctx->regs + reg);
509} 490}
510 491
511static void fimd_win_commit(struct device *dev, int zpos) 492static void fimd_win_commit(struct exynos_drm_manager *mgr, int zpos)
512{ 493{
513 struct fimd_context *ctx = get_fimd_context(dev); 494 struct fimd_context *ctx = mgr->ctx;
514 struct fimd_win_data *win_data; 495 struct fimd_win_data *win_data;
515 int win = zpos; 496 int win = zpos;
516 unsigned long val, alpha, size; 497 unsigned long val, alpha, size;
@@ -528,6 +509,12 @@ static void fimd_win_commit(struct device *dev, int zpos)
528 509
529 win_data = &ctx->win_data[win]; 510 win_data = &ctx->win_data[win];
530 511
512 /* If suspended, enable this on resume */
513 if (ctx->suspended) {
514 win_data->resume = true;
515 return;
516 }
517
531 /* 518 /*
532 * SHADOWCON/PRTCON register is used for enabling timing. 519 * SHADOWCON/PRTCON register is used for enabling timing.
533 * 520 *
@@ -605,11 +592,11 @@ static void fimd_win_commit(struct device *dev, int zpos)
605 DRM_DEBUG_KMS("osd size = 0x%x\n", (unsigned int)val); 592 DRM_DEBUG_KMS("osd size = 0x%x\n", (unsigned int)val);
606 } 593 }
607 594
608 fimd_win_set_pixfmt(dev, win); 595 fimd_win_set_pixfmt(ctx, win);
609 596
610 /* hardware window 0 doesn't support color key. */ 597 /* hardware window 0 doesn't support color key. */
611 if (win != 0) 598 if (win != 0)
612 fimd_win_set_colkey(dev, win); 599 fimd_win_set_colkey(ctx, win);
613 600
614 /* wincon */ 601 /* wincon */
615 val = readl(ctx->regs + WINCON(win)); 602 val = readl(ctx->regs + WINCON(win));
@@ -628,9 +615,9 @@ static void fimd_win_commit(struct device *dev, int zpos)
628 win_data->enabled = true; 615 win_data->enabled = true;
629} 616}
630 617
631static void fimd_win_disable(struct device *dev, int zpos) 618static void fimd_win_disable(struct exynos_drm_manager *mgr, int zpos)
632{ 619{
633 struct fimd_context *ctx = get_fimd_context(dev); 620 struct fimd_context *ctx = mgr->ctx;
634 struct fimd_win_data *win_data; 621 struct fimd_win_data *win_data;
635 int win = zpos; 622 int win = zpos;
636 u32 val; 623 u32 val;
@@ -669,132 +656,6 @@ static void fimd_win_disable(struct device *dev, int zpos)
669 win_data->enabled = false; 656 win_data->enabled = false;
670} 657}
671 658
672static struct exynos_drm_overlay_ops fimd_overlay_ops = {
673 .mode_set = fimd_win_mode_set,
674 .commit = fimd_win_commit,
675 .disable = fimd_win_disable,
676};
677
678static struct exynos_drm_manager fimd_manager = {
679 .pipe = -1,
680 .ops = &fimd_manager_ops,
681 .overlay_ops = &fimd_overlay_ops,
682 .display_ops = &fimd_display_ops,
683};
684
685static irqreturn_t fimd_irq_handler(int irq, void *dev_id)
686{
687 struct fimd_context *ctx = (struct fimd_context *)dev_id;
688 struct exynos_drm_subdrv *subdrv = &ctx->subdrv;
689 struct drm_device *drm_dev = subdrv->drm_dev;
690 struct exynos_drm_manager *manager = subdrv->manager;
691 u32 val;
692
693 val = readl(ctx->regs + VIDINTCON1);
694
695 if (val & VIDINTCON1_INT_FRAME)
696 /* VSYNC interrupt */
697 writel(VIDINTCON1_INT_FRAME, ctx->regs + VIDINTCON1);
698
699 /* check the crtc is detached already from encoder */
700 if (manager->pipe < 0)
701 goto out;
702
703 drm_handle_vblank(drm_dev, manager->pipe);
704 exynos_drm_crtc_finish_pageflip(drm_dev, manager->pipe);
705
706 /* set wait vsync event to zero and wake up queue. */
707 if (atomic_read(&ctx->wait_vsync_event)) {
708 atomic_set(&ctx->wait_vsync_event, 0);
709 wake_up(&ctx->wait_vsync_queue);
710 }
711out:
712 return IRQ_HANDLED;
713}
714
715static int fimd_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
716{
717 /*
718 * enable drm irq mode.
719 * - with irq_enabled = true, we can use the vblank feature.
720 *
721 * P.S. note that we wouldn't use drm irq handler but
722 * just specific driver own one instead because
723 * drm framework supports only one irq handler.
724 */
725 drm_dev->irq_enabled = true;
726
727 /*
728 * with vblank_disable_allowed = true, vblank interrupt will be disabled
729 * by drm timer once a current process gives up ownership of
730 * vblank event.(after drm_vblank_put function is called)
731 */
732 drm_dev->vblank_disable_allowed = true;
733
734 /* attach this sub driver to iommu mapping if supported. */
735 if (is_drm_iommu_supported(drm_dev))
736 drm_iommu_attach_device(drm_dev, dev);
737
738 return 0;
739}
740
741static void fimd_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
742{
743 /* detach this sub driver from iommu mapping if supported. */
744 if (is_drm_iommu_supported(drm_dev))
745 drm_iommu_detach_device(drm_dev, dev);
746}
747
748static int fimd_configure_clocks(struct fimd_context *ctx, struct device *dev)
749{
750 struct videomode *vm = &ctx->panel.vm;
751 unsigned long clk;
752
753 ctx->bus_clk = devm_clk_get(dev, "fimd");
754 if (IS_ERR(ctx->bus_clk)) {
755 dev_err(dev, "failed to get bus clock\n");
756 return PTR_ERR(ctx->bus_clk);
757 }
758
759 ctx->lcd_clk = devm_clk_get(dev, "sclk_fimd");
760 if (IS_ERR(ctx->lcd_clk)) {
761 dev_err(dev, "failed to get lcd clock\n");
762 return PTR_ERR(ctx->lcd_clk);
763 }
764
765 clk = clk_get_rate(ctx->lcd_clk);
766 if (clk == 0) {
767 dev_err(dev, "error getting sclk_fimd clock rate\n");
768 return -EINVAL;
769 }
770
771 if (vm->pixelclock == 0) {
772 unsigned long c;
773 c = vm->hactive + vm->hback_porch + vm->hfront_porch +
774 vm->hsync_len;
775 c *= vm->vactive + vm->vback_porch + vm->vfront_porch +
776 vm->vsync_len;
777 vm->pixelclock = c * FIMD_DEFAULT_FRAMERATE;
778 if (vm->pixelclock == 0) {
779 dev_err(dev, "incorrect display timings\n");
780 return -EINVAL;
781 }
782 dev_warn(dev, "pixel clock recalculated to %luHz (%dHz frame rate)\n",
783 vm->pixelclock, FIMD_DEFAULT_FRAMERATE);
784 }
785 ctx->clkdiv = DIV_ROUND_UP(clk, vm->pixelclock);
786 if (ctx->clkdiv > 256) {
787 dev_warn(dev, "calculated pixel clock divider too high (%u), lowered to 256\n",
788 ctx->clkdiv);
789 ctx->clkdiv = 256;
790 }
791 vm->pixelclock = clk / ctx->clkdiv;
792 DRM_DEBUG_KMS("pixel clock = %lu, clkdiv = %d\n", vm->pixelclock,
793 ctx->clkdiv);
794
795 return 0;
796}
797
798static void fimd_clear_win(struct fimd_context *ctx, int win) 659static void fimd_clear_win(struct fimd_context *ctx, int win)
799{ 660{
800 writel(0, ctx->regs + WINCON(win)); 661 writel(0, ctx->regs + WINCON(win));
@@ -808,111 +669,190 @@ static void fimd_clear_win(struct fimd_context *ctx, int win)
808 fimd_shadow_protect_win(ctx, win, false); 669 fimd_shadow_protect_win(ctx, win, false);
809} 670}
810 671
811static int fimd_clock(struct fimd_context *ctx, bool enable) 672static void fimd_window_suspend(struct exynos_drm_manager *mgr)
812{ 673{
813 if (enable) { 674 struct fimd_context *ctx = mgr->ctx;
814 int ret; 675 struct fimd_win_data *win_data;
815 676 int i;
816 ret = clk_prepare_enable(ctx->bus_clk);
817 if (ret < 0)
818 return ret;
819 677
820 ret = clk_prepare_enable(ctx->lcd_clk); 678 for (i = 0; i < WINDOWS_NR; i++) {
821 if (ret < 0) { 679 win_data = &ctx->win_data[i];
822 clk_disable_unprepare(ctx->bus_clk); 680 win_data->resume = win_data->enabled;
823 return ret; 681 if (win_data->enabled)
824 } 682 fimd_win_disable(mgr, i);
825 } else {
826 clk_disable_unprepare(ctx->lcd_clk);
827 clk_disable_unprepare(ctx->bus_clk);
828 } 683 }
829 684 fimd_wait_for_vblank(mgr);
830 return 0;
831} 685}
832 686
833static void fimd_window_suspend(struct device *dev) 687static void fimd_window_resume(struct exynos_drm_manager *mgr)
834{ 688{
835 struct fimd_context *ctx = get_fimd_context(dev); 689 struct fimd_context *ctx = mgr->ctx;
836 struct fimd_win_data *win_data; 690 struct fimd_win_data *win_data;
837 int i; 691 int i;
838 692
839 for (i = 0; i < WINDOWS_NR; i++) { 693 for (i = 0; i < WINDOWS_NR; i++) {
840 win_data = &ctx->win_data[i]; 694 win_data = &ctx->win_data[i];
841 win_data->resume = win_data->enabled; 695 win_data->enabled = win_data->resume;
842 fimd_win_disable(dev, i); 696 win_data->resume = false;
843 } 697 }
844 fimd_wait_for_vblank(dev);
845} 698}
846 699
847static void fimd_window_resume(struct device *dev) 700static void fimd_apply(struct exynos_drm_manager *mgr)
848{ 701{
849 struct fimd_context *ctx = get_fimd_context(dev); 702 struct fimd_context *ctx = mgr->ctx;
850 struct fimd_win_data *win_data; 703 struct fimd_win_data *win_data;
851 int i; 704 int i;
852 705
853 for (i = 0; i < WINDOWS_NR; i++) { 706 for (i = 0; i < WINDOWS_NR; i++) {
854 win_data = &ctx->win_data[i]; 707 win_data = &ctx->win_data[i];
855 win_data->enabled = win_data->resume; 708 if (win_data->enabled)
856 win_data->resume = false; 709 fimd_win_commit(mgr, i);
857 } 710 }
711
712 fimd_commit(mgr);
858} 713}
859 714
860static int fimd_activate(struct fimd_context *ctx, bool enable) 715static int fimd_poweron(struct exynos_drm_manager *mgr)
861{ 716{
862 struct device *dev = ctx->subdrv.dev; 717 struct fimd_context *ctx = mgr->ctx;
863 if (enable) { 718 int ret;
864 int ret;
865 719
866 ret = fimd_clock(ctx, true); 720 if (!ctx->suspended)
867 if (ret < 0) 721 return 0;
868 return ret;
869 722
870 ctx->suspended = false; 723 ctx->suspended = false;
871 724
872 /* if vblank was enabled status, enable it again. */ 725 pm_runtime_get_sync(ctx->dev);
873 if (test_and_clear_bit(0, &ctx->irq_flags))
874 fimd_enable_vblank(dev);
875 726
876 fimd_window_resume(dev); 727 ret = clk_prepare_enable(ctx->bus_clk);
877 } else { 728 if (ret < 0) {
878 fimd_window_suspend(dev); 729 DRM_ERROR("Failed to prepare_enable the bus clk [%d]\n", ret);
730 goto bus_clk_err;
731 }
732
733 ret = clk_prepare_enable(ctx->lcd_clk);
734 if (ret < 0) {
735 DRM_ERROR("Failed to prepare_enable the lcd clk [%d]\n", ret);
736 goto lcd_clk_err;
737 }
879 738
880 fimd_clock(ctx, false); 739 /* if vblank was enabled status, enable it again. */
881 ctx->suspended = true; 740 if (test_and_clear_bit(0, &ctx->irq_flags)) {
741 ret = fimd_enable_vblank(mgr);
742 if (ret) {
743 DRM_ERROR("Failed to re-enable vblank [%d]\n", ret);
744 goto enable_vblank_err;
745 }
882 } 746 }
883 747
748 fimd_window_resume(mgr);
749
750 fimd_apply(mgr);
751
884 return 0; 752 return 0;
753
754enable_vblank_err:
755 clk_disable_unprepare(ctx->lcd_clk);
756lcd_clk_err:
757 clk_disable_unprepare(ctx->bus_clk);
758bus_clk_err:
759 ctx->suspended = true;
760 return ret;
885} 761}
886 762
887static int fimd_get_platform_data(struct fimd_context *ctx, struct device *dev) 763static int fimd_poweroff(struct exynos_drm_manager *mgr)
888{ 764{
889 struct videomode *vm; 765 struct fimd_context *ctx = mgr->ctx;
890 int ret;
891 766
892 vm = &ctx->panel.vm; 767 if (ctx->suspended)
893 ret = of_get_videomode(dev->of_node, vm, OF_USE_NATIVE_MODE); 768 return 0;
894 if (ret) {
895 DRM_ERROR("failed: of_get_videomode() : %d\n", ret);
896 return ret;
897 }
898 769
899 if (vm->flags & DISPLAY_FLAGS_VSYNC_LOW) 770 /*
900 ctx->vidcon1 |= VIDCON1_INV_VSYNC; 771 * We need to make sure that all windows are disabled before we
901 if (vm->flags & DISPLAY_FLAGS_HSYNC_LOW) 772 * suspend that connector. Otherwise we might try to scan from
902 ctx->vidcon1 |= VIDCON1_INV_HSYNC; 773 * a destroyed buffer later.
903 if (vm->flags & DISPLAY_FLAGS_DE_LOW) 774 */
904 ctx->vidcon1 |= VIDCON1_INV_VDEN; 775 fimd_window_suspend(mgr);
905 if (vm->flags & DISPLAY_FLAGS_PIXDATA_NEGEDGE)
906 ctx->vidcon1 |= VIDCON1_INV_VCLK;
907 776
777 clk_disable_unprepare(ctx->lcd_clk);
778 clk_disable_unprepare(ctx->bus_clk);
779
780 pm_runtime_put_sync(ctx->dev);
781
782 ctx->suspended = true;
908 return 0; 783 return 0;
909} 784}
910 785
786static void fimd_dpms(struct exynos_drm_manager *mgr, int mode)
787{
788 DRM_DEBUG_KMS("%s, %d\n", __FILE__, mode);
789
790 switch (mode) {
791 case DRM_MODE_DPMS_ON:
792 fimd_poweron(mgr);
793 break;
794 case DRM_MODE_DPMS_STANDBY:
795 case DRM_MODE_DPMS_SUSPEND:
796 case DRM_MODE_DPMS_OFF:
797 fimd_poweroff(mgr);
798 break;
799 default:
800 DRM_DEBUG_KMS("unspecified mode %d\n", mode);
801 break;
802 }
803}
804
805static struct exynos_drm_manager_ops fimd_manager_ops = {
806 .initialize = fimd_mgr_initialize,
807 .remove = fimd_mgr_remove,
808 .dpms = fimd_dpms,
809 .mode_fixup = fimd_mode_fixup,
810 .mode_set = fimd_mode_set,
811 .commit = fimd_commit,
812 .enable_vblank = fimd_enable_vblank,
813 .disable_vblank = fimd_disable_vblank,
814 .wait_for_vblank = fimd_wait_for_vblank,
815 .win_mode_set = fimd_win_mode_set,
816 .win_commit = fimd_win_commit,
817 .win_disable = fimd_win_disable,
818};
819
820static struct exynos_drm_manager fimd_manager = {
821 .type = EXYNOS_DISPLAY_TYPE_LCD,
822 .ops = &fimd_manager_ops,
823};
824
825static irqreturn_t fimd_irq_handler(int irq, void *dev_id)
826{
827 struct fimd_context *ctx = (struct fimd_context *)dev_id;
828 u32 val;
829
830 val = readl(ctx->regs + VIDINTCON1);
831
832 if (val & VIDINTCON1_INT_FRAME)
833 /* VSYNC interrupt */
834 writel(VIDINTCON1_INT_FRAME, ctx->regs + VIDINTCON1);
835
836 /* check the crtc is detached already from encoder */
837 if (ctx->pipe < 0 || !ctx->drm_dev)
838 goto out;
839
840 drm_handle_vblank(ctx->drm_dev, ctx->pipe);
841 exynos_drm_crtc_finish_pageflip(ctx->drm_dev, ctx->pipe);
842
843 /* set wait vsync event to zero and wake up queue. */
844 if (atomic_read(&ctx->wait_vsync_event)) {
845 atomic_set(&ctx->wait_vsync_event, 0);
846 wake_up(&ctx->wait_vsync_queue);
847 }
848out:
849 return IRQ_HANDLED;
850}
851
911static int fimd_probe(struct platform_device *pdev) 852static int fimd_probe(struct platform_device *pdev)
912{ 853{
913 struct device *dev = &pdev->dev; 854 struct device *dev = &pdev->dev;
914 struct fimd_context *ctx; 855 struct fimd_context *ctx;
915 struct exynos_drm_subdrv *subdrv;
916 struct resource *res; 856 struct resource *res;
917 int win; 857 int win;
918 int ret = -EINVAL; 858 int ret = -EINVAL;
@@ -924,13 +864,25 @@ static int fimd_probe(struct platform_device *pdev)
924 if (!ctx) 864 if (!ctx)
925 return -ENOMEM; 865 return -ENOMEM;
926 866
927 ret = fimd_get_platform_data(ctx, dev); 867 ctx->dev = dev;
928 if (ret) 868 ctx->suspended = true;
929 return ret;
930 869
931 ret = fimd_configure_clocks(ctx, dev); 870 if (of_property_read_bool(dev->of_node, "samsung,invert-vden"))
932 if (ret) 871 ctx->vidcon1 |= VIDCON1_INV_VDEN;
933 return ret; 872 if (of_property_read_bool(dev->of_node, "samsung,invert-vclk"))
873 ctx->vidcon1 |= VIDCON1_INV_VCLK;
874
875 ctx->bus_clk = devm_clk_get(dev, "fimd");
876 if (IS_ERR(ctx->bus_clk)) {
877 dev_err(dev, "failed to get bus clock\n");
878 return PTR_ERR(ctx->bus_clk);
879 }
880
881 ctx->lcd_clk = devm_clk_get(dev, "sclk_fimd");
882 if (IS_ERR(ctx->lcd_clk)) {
883 dev_err(dev, "failed to get lcd clock\n");
884 return PTR_ERR(ctx->lcd_clk);
885 }
934 886
935 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 887 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
936 888
@@ -944,9 +896,7 @@ static int fimd_probe(struct platform_device *pdev)
944 return -ENXIO; 896 return -ENXIO;
945 } 897 }
946 898
947 ctx->irq = res->start; 899 ret = devm_request_irq(dev, res->start, fimd_irq_handler,
948
949 ret = devm_request_irq(dev, ctx->irq, fimd_irq_handler,
950 0, "drm_fimd", ctx); 900 0, "drm_fimd", ctx);
951 if (ret) { 901 if (ret) {
952 dev_err(dev, "irq request failed.\n"); 902 dev_err(dev, "irq request failed.\n");
@@ -957,112 +907,35 @@ static int fimd_probe(struct platform_device *pdev)
957 init_waitqueue_head(&ctx->wait_vsync_queue); 907 init_waitqueue_head(&ctx->wait_vsync_queue);
958 atomic_set(&ctx->wait_vsync_event, 0); 908 atomic_set(&ctx->wait_vsync_event, 0);
959 909
960 subdrv = &ctx->subdrv; 910 platform_set_drvdata(pdev, &fimd_manager);
961 911
962 subdrv->dev = dev; 912 fimd_manager.ctx = ctx;
963 subdrv->manager = &fimd_manager; 913 exynos_drm_manager_register(&fimd_manager);
964 subdrv->probe = fimd_subdrv_probe;
965 subdrv->remove = fimd_subdrv_remove;
966 914
967 mutex_init(&ctx->lock); 915 exynos_dpi_probe(ctx->dev);
968
969 platform_set_drvdata(pdev, ctx);
970 916
971 pm_runtime_enable(dev); 917 pm_runtime_enable(dev);
972 pm_runtime_get_sync(dev);
973 918
974 for (win = 0; win < WINDOWS_NR; win++) 919 for (win = 0; win < WINDOWS_NR; win++)
975 fimd_clear_win(ctx, win); 920 fimd_clear_win(ctx, win);
976 921
977 exynos_drm_subdrv_register(subdrv);
978
979 return 0; 922 return 0;
980} 923}
981 924
982static int fimd_remove(struct platform_device *pdev) 925static int fimd_remove(struct platform_device *pdev)
983{ 926{
984 struct device *dev = &pdev->dev; 927 struct exynos_drm_manager *mgr = platform_get_drvdata(pdev);
985 struct fimd_context *ctx = platform_get_drvdata(pdev);
986
987 exynos_drm_subdrv_unregister(&ctx->subdrv);
988
989 if (ctx->suspended)
990 goto out;
991
992 pm_runtime_set_suspended(dev);
993 pm_runtime_put_sync(dev);
994
995out:
996 pm_runtime_disable(dev);
997
998 return 0;
999}
1000
1001#ifdef CONFIG_PM_SLEEP
1002static int fimd_suspend(struct device *dev)
1003{
1004 struct fimd_context *ctx = get_fimd_context(dev);
1005 928
1006 /* 929 exynos_dpi_remove(&pdev->dev);
1007 * do not use pm_runtime_suspend(). if pm_runtime_suspend() is
1008 * called here, an error would be returned by that interface
1009 * because the usage_count of pm runtime is more than 1.
1010 */
1011 if (!pm_runtime_suspended(dev))
1012 return fimd_activate(ctx, false);
1013 930
1014 return 0; 931 exynos_drm_manager_unregister(&fimd_manager);
1015}
1016 932
1017static int fimd_resume(struct device *dev) 933 fimd_dpms(mgr, DRM_MODE_DPMS_OFF);
1018{
1019 struct fimd_context *ctx = get_fimd_context(dev);
1020 934
1021 /* 935 pm_runtime_disable(&pdev->dev);
1022 * if entered to sleep when lcd panel was on, the usage_count
1023 * of pm runtime would still be 1 so in this case, fimd driver
1024 * should be on directly not drawing on pm runtime interface.
1025 */
1026 if (!pm_runtime_suspended(dev)) {
1027 int ret;
1028
1029 ret = fimd_activate(ctx, true);
1030 if (ret < 0)
1031 return ret;
1032
1033 /*
1034 * in case of dpms on(standby), fimd_apply function will
1035 * be called by encoder's dpms callback to update fimd's
1036 * registers but in case of sleep wakeup, it's not.
1037 * so fimd_apply function should be called at here.
1038 */
1039 fimd_apply(dev);
1040 }
1041 936
1042 return 0; 937 return 0;
1043} 938}
1044#endif
1045
1046#ifdef CONFIG_PM_RUNTIME
1047static int fimd_runtime_suspend(struct device *dev)
1048{
1049 struct fimd_context *ctx = get_fimd_context(dev);
1050
1051 return fimd_activate(ctx, false);
1052}
1053
1054static int fimd_runtime_resume(struct device *dev)
1055{
1056 struct fimd_context *ctx = get_fimd_context(dev);
1057
1058 return fimd_activate(ctx, true);
1059}
1060#endif
1061
1062static const struct dev_pm_ops fimd_pm_ops = {
1063 SET_SYSTEM_SLEEP_PM_OPS(fimd_suspend, fimd_resume)
1064 SET_RUNTIME_PM_OPS(fimd_runtime_suspend, fimd_runtime_resume, NULL)
1065};
1066 939
1067struct platform_driver fimd_driver = { 940struct platform_driver fimd_driver = {
1068 .probe = fimd_probe, 941 .probe = fimd_probe,
@@ -1070,7 +943,6 @@ struct platform_driver fimd_driver = {
1070 .driver = { 943 .driver = {
1071 .name = "exynos4-fb", 944 .name = "exynos4-fb",
1072 .owner = THIS_MODULE, 945 .owner = THIS_MODULE,
1073 .pm = &fimd_pm_ops,
1074 .of_match_table = fimd_driver_dt_match, 946 .of_match_table = fimd_driver_dt_match,
1075 }, 947 },
1076}; 948};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
deleted file mode 100644
index 8548b974bd59..000000000000
--- a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
+++ /dev/null
@@ -1,439 +0,0 @@
1/*
2 * Copyright (C) 2011 Samsung Electronics Co.Ltd
3 * Authors:
4 * Inki Dae <inki.dae@samsung.com>
5 * Seung-Woo Kim <sw0312.kim@samsung.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; either version 2 of the License, or (at your
10 * option) any later version.
11 *
12 */
13
14#include <drm/drmP.h>
15
16#include <linux/kernel.h>
17#include <linux/wait.h>
18#include <linux/platform_device.h>
19#include <linux/pm_runtime.h>
20
21#include <drm/exynos_drm.h>
22
23#include "exynos_drm_drv.h"
24#include "exynos_drm_hdmi.h"
25
26#define to_context(dev) platform_get_drvdata(to_platform_device(dev))
27#define to_subdrv(dev) to_context(dev)
28#define get_ctx_from_subdrv(subdrv) container_of(subdrv,\
29 struct drm_hdmi_context, subdrv);
30
31/* platform device pointer for common drm hdmi device. */
32static struct platform_device *exynos_drm_hdmi_pdev;
33
34/* Common hdmi subdrv needs to access the hdmi and mixer though context.
35* These should be initialied by the repective drivers */
36static struct exynos_drm_hdmi_context *hdmi_ctx;
37static struct exynos_drm_hdmi_context *mixer_ctx;
38
39/* these callback points shoud be set by specific drivers. */
40static struct exynos_hdmi_ops *hdmi_ops;
41static struct exynos_mixer_ops *mixer_ops;
42
43struct drm_hdmi_context {
44 struct exynos_drm_subdrv subdrv;
45 struct exynos_drm_hdmi_context *hdmi_ctx;
46 struct exynos_drm_hdmi_context *mixer_ctx;
47
48 bool enabled[MIXER_WIN_NR];
49};
50
51int exynos_platform_device_hdmi_register(void)
52{
53 struct platform_device *pdev;
54
55 if (exynos_drm_hdmi_pdev)
56 return -EEXIST;
57
58 pdev = platform_device_register_simple(
59 "exynos-drm-hdmi", -1, NULL, 0);
60 if (IS_ERR(pdev))
61 return PTR_ERR(pdev);
62
63 exynos_drm_hdmi_pdev = pdev;
64
65 return 0;
66}
67
68void exynos_platform_device_hdmi_unregister(void)
69{
70 if (exynos_drm_hdmi_pdev) {
71 platform_device_unregister(exynos_drm_hdmi_pdev);
72 exynos_drm_hdmi_pdev = NULL;
73 }
74}
75
76void exynos_hdmi_drv_attach(struct exynos_drm_hdmi_context *ctx)
77{
78 if (ctx)
79 hdmi_ctx = ctx;
80}
81
82void exynos_mixer_drv_attach(struct exynos_drm_hdmi_context *ctx)
83{
84 if (ctx)
85 mixer_ctx = ctx;
86}
87
88void exynos_hdmi_ops_register(struct exynos_hdmi_ops *ops)
89{
90 if (ops)
91 hdmi_ops = ops;
92}
93
94void exynos_mixer_ops_register(struct exynos_mixer_ops *ops)
95{
96 if (ops)
97 mixer_ops = ops;
98}
99
100static bool drm_hdmi_is_connected(struct device *dev)
101{
102 struct drm_hdmi_context *ctx = to_context(dev);
103
104 if (hdmi_ops && hdmi_ops->is_connected)
105 return hdmi_ops->is_connected(ctx->hdmi_ctx->ctx);
106
107 return false;
108}
109
110static struct edid *drm_hdmi_get_edid(struct device *dev,
111 struct drm_connector *connector)
112{
113 struct drm_hdmi_context *ctx = to_context(dev);
114
115 if (hdmi_ops && hdmi_ops->get_edid)
116 return hdmi_ops->get_edid(ctx->hdmi_ctx->ctx, connector);
117
118 return NULL;
119}
120
121static int drm_hdmi_check_mode(struct device *dev,
122 struct drm_display_mode *mode)
123{
124 struct drm_hdmi_context *ctx = to_context(dev);
125 int ret = 0;
126
127 /*
128 * Both, mixer and hdmi should be able to handle the requested mode.
129 * If any of the two fails, return mode as BAD.
130 */
131
132 if (mixer_ops && mixer_ops->check_mode)
133 ret = mixer_ops->check_mode(ctx->mixer_ctx->ctx, mode);
134
135 if (ret)
136 return ret;
137
138 if (hdmi_ops && hdmi_ops->check_mode)
139 return hdmi_ops->check_mode(ctx->hdmi_ctx->ctx, mode);
140
141 return 0;
142}
143
144static int drm_hdmi_power_on(struct device *dev, int mode)
145{
146 struct drm_hdmi_context *ctx = to_context(dev);
147
148 if (hdmi_ops && hdmi_ops->power_on)
149 return hdmi_ops->power_on(ctx->hdmi_ctx->ctx, mode);
150
151 return 0;
152}
153
154static struct exynos_drm_display_ops drm_hdmi_display_ops = {
155 .type = EXYNOS_DISPLAY_TYPE_HDMI,
156 .is_connected = drm_hdmi_is_connected,
157 .get_edid = drm_hdmi_get_edid,
158 .check_mode = drm_hdmi_check_mode,
159 .power_on = drm_hdmi_power_on,
160};
161
162static int drm_hdmi_enable_vblank(struct device *subdrv_dev)
163{
164 struct drm_hdmi_context *ctx = to_context(subdrv_dev);
165 struct exynos_drm_subdrv *subdrv = &ctx->subdrv;
166 struct exynos_drm_manager *manager = subdrv->manager;
167
168 if (mixer_ops && mixer_ops->enable_vblank)
169 return mixer_ops->enable_vblank(ctx->mixer_ctx->ctx,
170 manager->pipe);
171
172 return 0;
173}
174
175static void drm_hdmi_disable_vblank(struct device *subdrv_dev)
176{
177 struct drm_hdmi_context *ctx = to_context(subdrv_dev);
178
179 if (mixer_ops && mixer_ops->disable_vblank)
180 return mixer_ops->disable_vblank(ctx->mixer_ctx->ctx);
181}
182
183static void drm_hdmi_wait_for_vblank(struct device *subdrv_dev)
184{
185 struct drm_hdmi_context *ctx = to_context(subdrv_dev);
186
187 if (mixer_ops && mixer_ops->wait_for_vblank)
188 mixer_ops->wait_for_vblank(ctx->mixer_ctx->ctx);
189}
190
191static void drm_hdmi_mode_fixup(struct device *subdrv_dev,
192 struct drm_connector *connector,
193 const struct drm_display_mode *mode,
194 struct drm_display_mode *adjusted_mode)
195{
196 struct drm_display_mode *m;
197 int mode_ok;
198
199 drm_mode_set_crtcinfo(adjusted_mode, 0);
200
201 mode_ok = drm_hdmi_check_mode(subdrv_dev, adjusted_mode);
202
203 /* just return if user desired mode exists. */
204 if (mode_ok == 0)
205 return;
206
207 /*
208 * otherwise, find the most suitable mode among modes and change it
209 * to adjusted_mode.
210 */
211 list_for_each_entry(m, &connector->modes, head) {
212 mode_ok = drm_hdmi_check_mode(subdrv_dev, m);
213
214 if (mode_ok == 0) {
215 struct drm_mode_object base;
216 struct list_head head;
217
218 DRM_INFO("desired mode doesn't exist so\n");
219 DRM_INFO("use the most suitable mode among modes.\n");
220
221 DRM_DEBUG_KMS("Adjusted Mode: [%d]x[%d] [%d]Hz\n",
222 m->hdisplay, m->vdisplay, m->vrefresh);
223
224 /* preserve display mode header while copying. */
225 head = adjusted_mode->head;
226 base = adjusted_mode->base;
227 memcpy(adjusted_mode, m, sizeof(*m));
228 adjusted_mode->head = head;
229 adjusted_mode->base = base;
230 break;
231 }
232 }
233}
234
235static void drm_hdmi_mode_set(struct device *subdrv_dev, void *mode)
236{
237 struct drm_hdmi_context *ctx = to_context(subdrv_dev);
238
239 if (hdmi_ops && hdmi_ops->mode_set)
240 hdmi_ops->mode_set(ctx->hdmi_ctx->ctx, mode);
241}
242
243static void drm_hdmi_get_max_resol(struct device *subdrv_dev,
244 unsigned int *width, unsigned int *height)
245{
246 struct drm_hdmi_context *ctx = to_context(subdrv_dev);
247
248 if (hdmi_ops && hdmi_ops->get_max_resol)
249 hdmi_ops->get_max_resol(ctx->hdmi_ctx->ctx, width, height);
250}
251
252static void drm_hdmi_commit(struct device *subdrv_dev)
253{
254 struct drm_hdmi_context *ctx = to_context(subdrv_dev);
255
256 if (hdmi_ops && hdmi_ops->commit)
257 hdmi_ops->commit(ctx->hdmi_ctx->ctx);
258}
259
260static void drm_hdmi_dpms(struct device *subdrv_dev, int mode)
261{
262 struct drm_hdmi_context *ctx = to_context(subdrv_dev);
263
264 if (mixer_ops && mixer_ops->dpms)
265 mixer_ops->dpms(ctx->mixer_ctx->ctx, mode);
266
267 if (hdmi_ops && hdmi_ops->dpms)
268 hdmi_ops->dpms(ctx->hdmi_ctx->ctx, mode);
269}
270
271static void drm_hdmi_apply(struct device *subdrv_dev)
272{
273 struct drm_hdmi_context *ctx = to_context(subdrv_dev);
274 int i;
275
276 for (i = 0; i < MIXER_WIN_NR; i++) {
277 if (!ctx->enabled[i])
278 continue;
279 if (mixer_ops && mixer_ops->win_commit)
280 mixer_ops->win_commit(ctx->mixer_ctx->ctx, i);
281 }
282
283 if (hdmi_ops && hdmi_ops->commit)
284 hdmi_ops->commit(ctx->hdmi_ctx->ctx);
285}
286
287static struct exynos_drm_manager_ops drm_hdmi_manager_ops = {
288 .dpms = drm_hdmi_dpms,
289 .apply = drm_hdmi_apply,
290 .enable_vblank = drm_hdmi_enable_vblank,
291 .disable_vblank = drm_hdmi_disable_vblank,
292 .wait_for_vblank = drm_hdmi_wait_for_vblank,
293 .mode_fixup = drm_hdmi_mode_fixup,
294 .mode_set = drm_hdmi_mode_set,
295 .get_max_resol = drm_hdmi_get_max_resol,
296 .commit = drm_hdmi_commit,
297};
298
299static void drm_mixer_mode_set(struct device *subdrv_dev,
300 struct exynos_drm_overlay *overlay)
301{
302 struct drm_hdmi_context *ctx = to_context(subdrv_dev);
303
304 if (mixer_ops && mixer_ops->win_mode_set)
305 mixer_ops->win_mode_set(ctx->mixer_ctx->ctx, overlay);
306}
307
308static void drm_mixer_commit(struct device *subdrv_dev, int zpos)
309{
310 struct drm_hdmi_context *ctx = to_context(subdrv_dev);
311 int win = (zpos == DEFAULT_ZPOS) ? MIXER_DEFAULT_WIN : zpos;
312
313 if (win < 0 || win >= MIXER_WIN_NR) {
314 DRM_ERROR("mixer window[%d] is wrong\n", win);
315 return;
316 }
317
318 if (mixer_ops && mixer_ops->win_commit)
319 mixer_ops->win_commit(ctx->mixer_ctx->ctx, win);
320
321 ctx->enabled[win] = true;
322}
323
324static void drm_mixer_disable(struct device *subdrv_dev, int zpos)
325{
326 struct drm_hdmi_context *ctx = to_context(subdrv_dev);
327 int win = (zpos == DEFAULT_ZPOS) ? MIXER_DEFAULT_WIN : zpos;
328
329 if (win < 0 || win >= MIXER_WIN_NR) {
330 DRM_ERROR("mixer window[%d] is wrong\n", win);
331 return;
332 }
333
334 if (mixer_ops && mixer_ops->win_disable)
335 mixer_ops->win_disable(ctx->mixer_ctx->ctx, win);
336
337 ctx->enabled[win] = false;
338}
339
340static struct exynos_drm_overlay_ops drm_hdmi_overlay_ops = {
341 .mode_set = drm_mixer_mode_set,
342 .commit = drm_mixer_commit,
343 .disable = drm_mixer_disable,
344};
345
346static struct exynos_drm_manager hdmi_manager = {
347 .pipe = -1,
348 .ops = &drm_hdmi_manager_ops,
349 .overlay_ops = &drm_hdmi_overlay_ops,
350 .display_ops = &drm_hdmi_display_ops,
351};
352
353static int hdmi_subdrv_probe(struct drm_device *drm_dev,
354 struct device *dev)
355{
356 struct exynos_drm_subdrv *subdrv = to_subdrv(dev);
357 struct drm_hdmi_context *ctx;
358
359 if (!hdmi_ctx) {
360 DRM_ERROR("hdmi context not initialized.\n");
361 return -EFAULT;
362 }
363
364 if (!mixer_ctx) {
365 DRM_ERROR("mixer context not initialized.\n");
366 return -EFAULT;
367 }
368
369 ctx = get_ctx_from_subdrv(subdrv);
370
371 if (!ctx) {
372 DRM_ERROR("no drm hdmi context.\n");
373 return -EFAULT;
374 }
375
376 ctx->hdmi_ctx = hdmi_ctx;
377 ctx->mixer_ctx = mixer_ctx;
378
379 ctx->hdmi_ctx->drm_dev = drm_dev;
380 ctx->mixer_ctx->drm_dev = drm_dev;
381
382 if (mixer_ops->iommu_on)
383 mixer_ops->iommu_on(ctx->mixer_ctx->ctx, true);
384
385 return 0;
386}
387
388static void hdmi_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
389{
390 struct drm_hdmi_context *ctx;
391 struct exynos_drm_subdrv *subdrv = to_subdrv(dev);
392
393 ctx = get_ctx_from_subdrv(subdrv);
394
395 if (mixer_ops->iommu_on)
396 mixer_ops->iommu_on(ctx->mixer_ctx->ctx, false);
397}
398
399static int exynos_drm_hdmi_probe(struct platform_device *pdev)
400{
401 struct device *dev = &pdev->dev;
402 struct exynos_drm_subdrv *subdrv;
403 struct drm_hdmi_context *ctx;
404
405 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
406 if (!ctx)
407 return -ENOMEM;
408
409 subdrv = &ctx->subdrv;
410
411 subdrv->dev = dev;
412 subdrv->manager = &hdmi_manager;
413 subdrv->probe = hdmi_subdrv_probe;
414 subdrv->remove = hdmi_subdrv_remove;
415
416 platform_set_drvdata(pdev, subdrv);
417
418 exynos_drm_subdrv_register(subdrv);
419
420 return 0;
421}
422
423static int exynos_drm_hdmi_remove(struct platform_device *pdev)
424{
425 struct drm_hdmi_context *ctx = platform_get_drvdata(pdev);
426
427 exynos_drm_subdrv_unregister(&ctx->subdrv);
428
429 return 0;
430}
431
432struct platform_driver exynos_drm_common_hdmi_driver = {
433 .probe = exynos_drm_hdmi_probe,
434 .remove = exynos_drm_hdmi_remove,
435 .driver = {
436 .name = "exynos-drm-hdmi",
437 .owner = THIS_MODULE,
438 },
439};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.h b/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
deleted file mode 100644
index 724cab181976..000000000000
--- a/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
+++ /dev/null
@@ -1,67 +0,0 @@
1/* exynos_drm_hdmi.h
2 *
3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 * Authoer: Inki Dae <inki.dae@samsung.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 */
11
12#ifndef _EXYNOS_DRM_HDMI_H_
13#define _EXYNOS_DRM_HDMI_H_
14
15#define MIXER_WIN_NR 3
16#define MIXER_DEFAULT_WIN 0
17
18/*
19 * exynos hdmi common context structure.
20 *
21 * @drm_dev: pointer to drm_device.
22 * @ctx: pointer to the context of specific device driver.
23 * this context should be hdmi_context or mixer_context.
24 */
25struct exynos_drm_hdmi_context {
26 struct drm_device *drm_dev;
27 void *ctx;
28};
29
30struct exynos_hdmi_ops {
31 /* display */
32 bool (*is_connected)(void *ctx);
33 struct edid *(*get_edid)(void *ctx,
34 struct drm_connector *connector);
35 int (*check_mode)(void *ctx, struct drm_display_mode *mode);
36 int (*power_on)(void *ctx, int mode);
37
38 /* manager */
39 void (*mode_set)(void *ctx, struct drm_display_mode *mode);
40 void (*get_max_resol)(void *ctx, unsigned int *width,
41 unsigned int *height);
42 void (*commit)(void *ctx);
43 void (*dpms)(void *ctx, int mode);
44};
45
46struct exynos_mixer_ops {
47 /* manager */
48 int (*iommu_on)(void *ctx, bool enable);
49 int (*enable_vblank)(void *ctx, int pipe);
50 void (*disable_vblank)(void *ctx);
51 void (*wait_for_vblank)(void *ctx);
52 void (*dpms)(void *ctx, int mode);
53
54 /* overlay */
55 void (*win_mode_set)(void *ctx, struct exynos_drm_overlay *overlay);
56 void (*win_commit)(void *ctx, int zpos);
57 void (*win_disable)(void *ctx, int zpos);
58
59 /* display */
60 int (*check_mode)(void *ctx, struct drm_display_mode *mode);
61};
62
63void exynos_hdmi_drv_attach(struct exynos_drm_hdmi_context *ctx);
64void exynos_mixer_drv_attach(struct exynos_drm_hdmi_context *ctx);
65void exynos_hdmi_ops_register(struct exynos_hdmi_ops *ops);
66void exynos_mixer_ops_register(struct exynos_mixer_ops *ops);
67#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
index fcb0652e77d0..8371cbd7631d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -13,7 +13,7 @@
13 13
14#include <drm/exynos_drm.h> 14#include <drm/exynos_drm.h>
15#include "exynos_drm_drv.h" 15#include "exynos_drm_drv.h"
16#include "exynos_drm_encoder.h" 16#include "exynos_drm_crtc.h"
17#include "exynos_drm_fb.h" 17#include "exynos_drm_fb.h"
18#include "exynos_drm_gem.h" 18#include "exynos_drm_gem.h"
19#include "exynos_drm_plane.h" 19#include "exynos_drm_plane.h"
@@ -87,7 +87,7 @@ int exynos_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc,
87 struct exynos_drm_gem_buf *buffer = exynos_drm_fb_buffer(fb, i); 87 struct exynos_drm_gem_buf *buffer = exynos_drm_fb_buffer(fb, i);
88 88
89 if (!buffer) { 89 if (!buffer) {
90 DRM_LOG_KMS("buffer is null\n"); 90 DRM_DEBUG_KMS("buffer is null\n");
91 return -EFAULT; 91 return -EFAULT;
92 } 92 }
93 93
@@ -139,7 +139,7 @@ int exynos_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc,
139 overlay->crtc_x, overlay->crtc_y, 139 overlay->crtc_x, overlay->crtc_y,
140 overlay->crtc_width, overlay->crtc_height); 140 overlay->crtc_width, overlay->crtc_height);
141 141
142 exynos_drm_fn_encoder(crtc, overlay, exynos_drm_encoder_plane_mode_set); 142 exynos_drm_crtc_plane_mode_set(crtc, overlay);
143 143
144 return 0; 144 return 0;
145} 145}
@@ -149,8 +149,7 @@ void exynos_plane_commit(struct drm_plane *plane)
149 struct exynos_plane *exynos_plane = to_exynos_plane(plane); 149 struct exynos_plane *exynos_plane = to_exynos_plane(plane);
150 struct exynos_drm_overlay *overlay = &exynos_plane->overlay; 150 struct exynos_drm_overlay *overlay = &exynos_plane->overlay;
151 151
152 exynos_drm_fn_encoder(plane->crtc, &overlay->zpos, 152 exynos_drm_crtc_plane_commit(plane->crtc, overlay->zpos);
153 exynos_drm_encoder_plane_commit);
154} 153}
155 154
156void exynos_plane_dpms(struct drm_plane *plane, int mode) 155void exynos_plane_dpms(struct drm_plane *plane, int mode)
@@ -162,17 +161,13 @@ void exynos_plane_dpms(struct drm_plane *plane, int mode)
162 if (exynos_plane->enabled) 161 if (exynos_plane->enabled)
163 return; 162 return;
164 163
165 exynos_drm_fn_encoder(plane->crtc, &overlay->zpos, 164 exynos_drm_crtc_plane_enable(plane->crtc, overlay->zpos);
166 exynos_drm_encoder_plane_enable);
167
168 exynos_plane->enabled = true; 165 exynos_plane->enabled = true;
169 } else { 166 } else {
170 if (!exynos_plane->enabled) 167 if (!exynos_plane->enabled)
171 return; 168 return;
172 169
173 exynos_drm_fn_encoder(plane->crtc, &overlay->zpos, 170 exynos_drm_crtc_plane_disable(plane->crtc, overlay->zpos);
174 exynos_drm_encoder_plane_disable);
175
176 exynos_plane->enabled = false; 171 exynos_plane->enabled = false;
177 } 172 }
178} 173}
@@ -259,7 +254,7 @@ static void exynos_plane_attach_zpos_property(struct drm_plane *plane)
259} 254}
260 255
261struct drm_plane *exynos_plane_init(struct drm_device *dev, 256struct drm_plane *exynos_plane_init(struct drm_device *dev,
262 unsigned int possible_crtcs, bool priv) 257 unsigned long possible_crtcs, bool priv)
263{ 258{
264 struct exynos_plane *exynos_plane; 259 struct exynos_plane *exynos_plane;
265 int err; 260 int err;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.h b/drivers/gpu/drm/exynos/exynos_drm_plane.h
index 88312458580d..84d464c90d3d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.h
@@ -17,4 +17,4 @@ int exynos_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc,
17void exynos_plane_commit(struct drm_plane *plane); 17void exynos_plane_commit(struct drm_plane *plane);
18void exynos_plane_dpms(struct drm_plane *plane, int mode); 18void exynos_plane_dpms(struct drm_plane *plane, int mode);
19struct drm_plane *exynos_plane_init(struct drm_device *dev, 19struct drm_plane *exynos_plane_init(struct drm_device *dev,
20 unsigned int possible_crtcs, bool priv); 20 unsigned long possible_crtcs, bool priv);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index ddaaedde173d..7afead9c3f30 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -28,7 +28,9 @@
28/* vidi has totally three virtual windows. */ 28/* vidi has totally three virtual windows. */
29#define WINDOWS_NR 3 29#define WINDOWS_NR 3
30 30
31#define get_vidi_context(dev) platform_get_drvdata(to_platform_device(dev)) 31#define get_vidi_mgr(dev) platform_get_drvdata(to_platform_device(dev))
32#define ctx_from_connector(c) container_of(c, struct vidi_context, \
33 connector)
32 34
33struct vidi_win_data { 35struct vidi_win_data {
34 unsigned int offset_x; 36 unsigned int offset_x;
@@ -45,8 +47,10 @@ struct vidi_win_data {
45}; 47};
46 48
47struct vidi_context { 49struct vidi_context {
48 struct exynos_drm_subdrv subdrv; 50 struct drm_device *drm_dev;
49 struct drm_crtc *crtc; 51 struct drm_crtc *crtc;
52 struct drm_encoder *encoder;
53 struct drm_connector connector;
50 struct vidi_win_data win_data[WINDOWS_NR]; 54 struct vidi_win_data win_data[WINDOWS_NR];
51 struct edid *raw_edid; 55 struct edid *raw_edid;
52 unsigned int clkdiv; 56 unsigned int clkdiv;
@@ -58,6 +62,7 @@ struct vidi_context {
58 bool direct_vblank; 62 bool direct_vblank;
59 struct work_struct work; 63 struct work_struct work;
60 struct mutex lock; 64 struct mutex lock;
65 int pipe;
61}; 66};
62 67
63static const char fake_edid_info[] = { 68static const char fake_edid_info[] = {
@@ -85,126 +90,34 @@ static const char fake_edid_info[] = {
85 0x00, 0x00, 0x00, 0x06 90 0x00, 0x00, 0x00, 0x06
86}; 91};
87 92
88static bool vidi_display_is_connected(struct device *dev) 93static void vidi_apply(struct exynos_drm_manager *mgr)
89{ 94{
90 struct vidi_context *ctx = get_vidi_context(dev); 95 struct vidi_context *ctx = mgr->ctx;
91
92 /*
93 * connection request would come from user side
94 * to do hotplug through specific ioctl.
95 */
96 return ctx->connected ? true : false;
97}
98
99static struct edid *vidi_get_edid(struct device *dev,
100 struct drm_connector *connector)
101{
102 struct vidi_context *ctx = get_vidi_context(dev);
103 struct edid *edid;
104
105 /*
106 * the edid data comes from user side and it would be set
107 * to ctx->raw_edid through specific ioctl.
108 */
109 if (!ctx->raw_edid) {
110 DRM_DEBUG_KMS("raw_edid is null.\n");
111 return ERR_PTR(-EFAULT);
112 }
113
114 edid = drm_edid_duplicate(ctx->raw_edid);
115 if (!edid) {
116 DRM_DEBUG_KMS("failed to allocate edid\n");
117 return ERR_PTR(-ENOMEM);
118 }
119
120 return edid;
121}
122
123static void *vidi_get_panel(struct device *dev)
124{
125 /* TODO. */
126
127 return NULL;
128}
129
130static int vidi_check_mode(struct device *dev, struct drm_display_mode *mode)
131{
132 /* TODO. */
133
134 return 0;
135}
136
137static int vidi_display_power_on(struct device *dev, int mode)
138{
139 /* TODO */
140
141 return 0;
142}
143
144static struct exynos_drm_display_ops vidi_display_ops = {
145 .type = EXYNOS_DISPLAY_TYPE_VIDI,
146 .is_connected = vidi_display_is_connected,
147 .get_edid = vidi_get_edid,
148 .get_panel = vidi_get_panel,
149 .check_mode = vidi_check_mode,
150 .power_on = vidi_display_power_on,
151};
152
153static void vidi_dpms(struct device *subdrv_dev, int mode)
154{
155 struct vidi_context *ctx = get_vidi_context(subdrv_dev);
156
157 DRM_DEBUG_KMS("%d\n", mode);
158
159 mutex_lock(&ctx->lock);
160
161 switch (mode) {
162 case DRM_MODE_DPMS_ON:
163 /* TODO. */
164 break;
165 case DRM_MODE_DPMS_STANDBY:
166 case DRM_MODE_DPMS_SUSPEND:
167 case DRM_MODE_DPMS_OFF:
168 /* TODO. */
169 break;
170 default:
171 DRM_DEBUG_KMS("unspecified mode %d\n", mode);
172 break;
173 }
174
175 mutex_unlock(&ctx->lock);
176}
177
178static void vidi_apply(struct device *subdrv_dev)
179{
180 struct vidi_context *ctx = get_vidi_context(subdrv_dev);
181 struct exynos_drm_manager *mgr = ctx->subdrv.manager;
182 struct exynos_drm_manager_ops *mgr_ops = mgr->ops; 96 struct exynos_drm_manager_ops *mgr_ops = mgr->ops;
183 struct exynos_drm_overlay_ops *ovl_ops = mgr->overlay_ops;
184 struct vidi_win_data *win_data; 97 struct vidi_win_data *win_data;
185 int i; 98 int i;
186 99
187 for (i = 0; i < WINDOWS_NR; i++) { 100 for (i = 0; i < WINDOWS_NR; i++) {
188 win_data = &ctx->win_data[i]; 101 win_data = &ctx->win_data[i];
189 if (win_data->enabled && (ovl_ops && ovl_ops->commit)) 102 if (win_data->enabled && (mgr_ops && mgr_ops->win_commit))
190 ovl_ops->commit(subdrv_dev, i); 103 mgr_ops->win_commit(mgr, i);
191 } 104 }
192 105
193 if (mgr_ops && mgr_ops->commit) 106 if (mgr_ops && mgr_ops->commit)
194 mgr_ops->commit(subdrv_dev); 107 mgr_ops->commit(mgr);
195} 108}
196 109
197static void vidi_commit(struct device *dev) 110static void vidi_commit(struct exynos_drm_manager *mgr)
198{ 111{
199 struct vidi_context *ctx = get_vidi_context(dev); 112 struct vidi_context *ctx = mgr->ctx;
200 113
201 if (ctx->suspended) 114 if (ctx->suspended)
202 return; 115 return;
203} 116}
204 117
205static int vidi_enable_vblank(struct device *dev) 118static int vidi_enable_vblank(struct exynos_drm_manager *mgr)
206{ 119{
207 struct vidi_context *ctx = get_vidi_context(dev); 120 struct vidi_context *ctx = mgr->ctx;
208 121
209 if (ctx->suspended) 122 if (ctx->suspended)
210 return -EPERM; 123 return -EPERM;
@@ -217,16 +130,16 @@ static int vidi_enable_vblank(struct device *dev)
217 /* 130 /*
218 * in case of page flip request, vidi_finish_pageflip function 131 * in case of page flip request, vidi_finish_pageflip function
219 * will not be called because direct_vblank is true and then 132 * will not be called because direct_vblank is true and then
220 * that function will be called by overlay_ops->commit callback 133 * that function will be called by manager_ops->win_commit callback
221 */ 134 */
222 schedule_work(&ctx->work); 135 schedule_work(&ctx->work);
223 136
224 return 0; 137 return 0;
225} 138}
226 139
227static void vidi_disable_vblank(struct device *dev) 140static void vidi_disable_vblank(struct exynos_drm_manager *mgr)
228{ 141{
229 struct vidi_context *ctx = get_vidi_context(dev); 142 struct vidi_context *ctx = mgr->ctx;
230 143
231 if (ctx->suspended) 144 if (ctx->suspended)
232 return; 145 return;
@@ -235,24 +148,16 @@ static void vidi_disable_vblank(struct device *dev)
235 ctx->vblank_on = false; 148 ctx->vblank_on = false;
236} 149}
237 150
238static struct exynos_drm_manager_ops vidi_manager_ops = { 151static void vidi_win_mode_set(struct exynos_drm_manager *mgr,
239 .dpms = vidi_dpms, 152 struct exynos_drm_overlay *overlay)
240 .apply = vidi_apply,
241 .commit = vidi_commit,
242 .enable_vblank = vidi_enable_vblank,
243 .disable_vblank = vidi_disable_vblank,
244};
245
246static void vidi_win_mode_set(struct device *dev,
247 struct exynos_drm_overlay *overlay)
248{ 153{
249 struct vidi_context *ctx = get_vidi_context(dev); 154 struct vidi_context *ctx = mgr->ctx;
250 struct vidi_win_data *win_data; 155 struct vidi_win_data *win_data;
251 int win; 156 int win;
252 unsigned long offset; 157 unsigned long offset;
253 158
254 if (!overlay) { 159 if (!overlay) {
255 dev_err(dev, "overlay is NULL\n"); 160 DRM_ERROR("overlay is NULL\n");
256 return; 161 return;
257 } 162 }
258 163
@@ -296,9 +201,9 @@ static void vidi_win_mode_set(struct device *dev,
296 overlay->fb_width, overlay->crtc_width); 201 overlay->fb_width, overlay->crtc_width);
297} 202}
298 203
299static void vidi_win_commit(struct device *dev, int zpos) 204static void vidi_win_commit(struct exynos_drm_manager *mgr, int zpos)
300{ 205{
301 struct vidi_context *ctx = get_vidi_context(dev); 206 struct vidi_context *ctx = mgr->ctx;
302 struct vidi_win_data *win_data; 207 struct vidi_win_data *win_data;
303 int win = zpos; 208 int win = zpos;
304 209
@@ -321,9 +226,9 @@ static void vidi_win_commit(struct device *dev, int zpos)
321 schedule_work(&ctx->work); 226 schedule_work(&ctx->work);
322} 227}
323 228
324static void vidi_win_disable(struct device *dev, int zpos) 229static void vidi_win_disable(struct exynos_drm_manager *mgr, int zpos)
325{ 230{
326 struct vidi_context *ctx = get_vidi_context(dev); 231 struct vidi_context *ctx = mgr->ctx;
327 struct vidi_win_data *win_data; 232 struct vidi_win_data *win_data;
328 int win = zpos; 233 int win = zpos;
329 234
@@ -339,98 +244,132 @@ static void vidi_win_disable(struct device *dev, int zpos)
339 /* TODO. */ 244 /* TODO. */
340} 245}
341 246
342static struct exynos_drm_overlay_ops vidi_overlay_ops = { 247static int vidi_power_on(struct exynos_drm_manager *mgr, bool enable)
343 .mode_set = vidi_win_mode_set, 248{
344 .commit = vidi_win_commit, 249 struct vidi_context *ctx = mgr->ctx;
345 .disable = vidi_win_disable,
346};
347 250
348static struct exynos_drm_manager vidi_manager = { 251 DRM_DEBUG_KMS("%s\n", __FILE__);
349 .pipe = -1,
350 .ops = &vidi_manager_ops,
351 .overlay_ops = &vidi_overlay_ops,
352 .display_ops = &vidi_display_ops,
353};
354 252
355static void vidi_fake_vblank_handler(struct work_struct *work) 253 if (enable != false && enable != true)
356{ 254 return -EINVAL;
357 struct vidi_context *ctx = container_of(work, struct vidi_context,
358 work);
359 struct exynos_drm_subdrv *subdrv = &ctx->subdrv;
360 struct exynos_drm_manager *manager = subdrv->manager;
361 255
362 if (manager->pipe < 0) 256 if (enable) {
363 return; 257 ctx->suspended = false;
364 258
365 /* refresh rate is about 50Hz. */ 259 /* if vblank was enabled status, enable it again. */
366 usleep_range(16000, 20000); 260 if (test_and_clear_bit(0, &ctx->irq_flags))
261 vidi_enable_vblank(mgr);
262
263 vidi_apply(mgr);
264 } else {
265 ctx->suspended = true;
266 }
267
268 return 0;
269}
270
271static void vidi_dpms(struct exynos_drm_manager *mgr, int mode)
272{
273 struct vidi_context *ctx = mgr->ctx;
274
275 DRM_DEBUG_KMS("%d\n", mode);
367 276
368 mutex_lock(&ctx->lock); 277 mutex_lock(&ctx->lock);
369 278
370 if (ctx->direct_vblank) { 279 switch (mode) {
371 drm_handle_vblank(subdrv->drm_dev, manager->pipe); 280 case DRM_MODE_DPMS_ON:
372 ctx->direct_vblank = false; 281 vidi_power_on(mgr, true);
373 mutex_unlock(&ctx->lock); 282 break;
374 return; 283 case DRM_MODE_DPMS_STANDBY:
284 case DRM_MODE_DPMS_SUSPEND:
285 case DRM_MODE_DPMS_OFF:
286 vidi_power_on(mgr, false);
287 break;
288 default:
289 DRM_DEBUG_KMS("unspecified mode %d\n", mode);
290 break;
375 } 291 }
376 292
377 mutex_unlock(&ctx->lock); 293 mutex_unlock(&ctx->lock);
378
379 exynos_drm_crtc_finish_pageflip(subdrv->drm_dev, manager->pipe);
380} 294}
381 295
382static int vidi_subdrv_probe(struct drm_device *drm_dev, struct device *dev) 296static int vidi_mgr_initialize(struct exynos_drm_manager *mgr,
297 struct drm_device *drm_dev, int pipe)
383{ 298{
299 struct vidi_context *ctx = mgr->ctx;
300
301 DRM_ERROR("vidi initialize ct=%p dev=%p pipe=%d\n", ctx, drm_dev, pipe);
302
303 ctx->drm_dev = drm_dev;
304 ctx->pipe = pipe;
305
384 /* 306 /*
385 * enable drm irq mode. 307 * enable drm irq mode.
386 * - with irq_enabled = true, we can use the vblank feature. 308 * - with irq_enabled = 1, we can use the vblank feature.
387 * 309 *
388 * P.S. note that we wouldn't use drm irq handler but 310 * P.S. note that we wouldn't use drm irq handler but
389 * just specific driver own one instead because 311 * just specific driver own one instead because
390 * drm framework supports only one irq handler. 312 * drm framework supports only one irq handler.
391 */ 313 */
392 drm_dev->irq_enabled = true; 314 drm_dev->irq_enabled = 1;
393 315
394 /* 316 /*
395 * with vblank_disable_allowed = true, vblank interrupt will be disabled 317 * with vblank_disable_allowed = 1, vblank interrupt will be disabled
396 * by drm timer once a current process gives up ownership of 318 * by drm timer once a current process gives up ownership of
397 * vblank event.(after drm_vblank_put function is called) 319 * vblank event.(after drm_vblank_put function is called)
398 */ 320 */
399 drm_dev->vblank_disable_allowed = true; 321 drm_dev->vblank_disable_allowed = 1;
400 322
401 return 0; 323 return 0;
402} 324}
403 325
404static void vidi_subdrv_remove(struct drm_device *drm_dev, struct device *dev) 326static struct exynos_drm_manager_ops vidi_manager_ops = {
405{ 327 .initialize = vidi_mgr_initialize,
406 /* TODO. */ 328 .dpms = vidi_dpms,
407} 329 .commit = vidi_commit,
330 .enable_vblank = vidi_enable_vblank,
331 .disable_vblank = vidi_disable_vblank,
332 .win_mode_set = vidi_win_mode_set,
333 .win_commit = vidi_win_commit,
334 .win_disable = vidi_win_disable,
335};
408 336
409static int vidi_power_on(struct vidi_context *ctx, bool enable) 337static struct exynos_drm_manager vidi_manager = {
338 .type = EXYNOS_DISPLAY_TYPE_VIDI,
339 .ops = &vidi_manager_ops,
340};
341
342static void vidi_fake_vblank_handler(struct work_struct *work)
410{ 343{
411 struct exynos_drm_subdrv *subdrv = &ctx->subdrv; 344 struct vidi_context *ctx = container_of(work, struct vidi_context,
412 struct device *dev = subdrv->dev; 345 work);
413 346
414 if (enable) { 347 if (ctx->pipe < 0)
415 ctx->suspended = false; 348 return;
416 349
417 /* if vblank was enabled status, enable it again. */ 350 /* refresh rate is about 50Hz. */
418 if (test_and_clear_bit(0, &ctx->irq_flags)) 351 usleep_range(16000, 20000);
419 vidi_enable_vblank(dev);
420 352
421 vidi_apply(dev); 353 mutex_lock(&ctx->lock);
422 } else { 354
423 ctx->suspended = true; 355 if (ctx->direct_vblank) {
356 drm_handle_vblank(ctx->drm_dev, ctx->pipe);
357 ctx->direct_vblank = false;
358 mutex_unlock(&ctx->lock);
359 return;
424 } 360 }
425 361
426 return 0; 362 mutex_unlock(&ctx->lock);
363
364 exynos_drm_crtc_finish_pageflip(ctx->drm_dev, ctx->pipe);
427} 365}
428 366
429static int vidi_show_connection(struct device *dev, 367static int vidi_show_connection(struct device *dev,
430 struct device_attribute *attr, char *buf) 368 struct device_attribute *attr, char *buf)
431{ 369{
432 int rc; 370 int rc;
433 struct vidi_context *ctx = get_vidi_context(dev); 371 struct exynos_drm_manager *mgr = get_vidi_mgr(dev);
372 struct vidi_context *ctx = mgr->ctx;
434 373
435 mutex_lock(&ctx->lock); 374 mutex_lock(&ctx->lock);
436 375
@@ -445,7 +384,8 @@ static int vidi_store_connection(struct device *dev,
445 struct device_attribute *attr, 384 struct device_attribute *attr,
446 const char *buf, size_t len) 385 const char *buf, size_t len)
447{ 386{
448 struct vidi_context *ctx = get_vidi_context(dev); 387 struct exynos_drm_manager *mgr = get_vidi_mgr(dev);
388 struct vidi_context *ctx = mgr->ctx;
449 int ret; 389 int ret;
450 390
451 ret = kstrtoint(buf, 0, &ctx->connected); 391 ret = kstrtoint(buf, 0, &ctx->connected);
@@ -467,7 +407,7 @@ static int vidi_store_connection(struct device *dev,
467 407
468 DRM_DEBUG_KMS("requested connection.\n"); 408 DRM_DEBUG_KMS("requested connection.\n");
469 409
470 drm_helper_hpd_irq_event(ctx->subdrv.drm_dev); 410 drm_helper_hpd_irq_event(ctx->drm_dev);
471 411
472 return len; 412 return len;
473} 413}
@@ -480,8 +420,7 @@ int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,
480{ 420{
481 struct vidi_context *ctx = NULL; 421 struct vidi_context *ctx = NULL;
482 struct drm_encoder *encoder; 422 struct drm_encoder *encoder;
483 struct exynos_drm_manager *manager; 423 struct exynos_drm_display *display;
484 struct exynos_drm_display_ops *display_ops;
485 struct drm_exynos_vidi_connection *vidi = data; 424 struct drm_exynos_vidi_connection *vidi = data;
486 425
487 if (!vidi) { 426 if (!vidi) {
@@ -496,11 +435,10 @@ int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,
496 435
497 list_for_each_entry(encoder, &drm_dev->mode_config.encoder_list, 436 list_for_each_entry(encoder, &drm_dev->mode_config.encoder_list,
498 head) { 437 head) {
499 manager = exynos_drm_get_manager(encoder); 438 display = exynos_drm_get_display(encoder);
500 display_ops = manager->display_ops;
501 439
502 if (display_ops->type == EXYNOS_DISPLAY_TYPE_VIDI) { 440 if (display->type == EXYNOS_DISPLAY_TYPE_VIDI) {
503 ctx = get_vidi_context(manager->dev); 441 ctx = display->ctx;
504 break; 442 break;
505 } 443 }
506 } 444 }
@@ -539,16 +477,119 @@ int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,
539 } 477 }
540 478
541 ctx->connected = vidi->connection; 479 ctx->connected = vidi->connection;
542 drm_helper_hpd_irq_event(ctx->subdrv.drm_dev); 480 drm_helper_hpd_irq_event(ctx->drm_dev);
481
482 return 0;
483}
484
485static enum drm_connector_status vidi_detect(struct drm_connector *connector,
486 bool force)
487{
488 struct vidi_context *ctx = ctx_from_connector(connector);
489
490 /*
491 * connection request would come from user side
492 * to do hotplug through specific ioctl.
493 */
494 return ctx->connected ? connector_status_connected :
495 connector_status_disconnected;
496}
497
498static void vidi_connector_destroy(struct drm_connector *connector)
499{
500}
501
502static struct drm_connector_funcs vidi_connector_funcs = {
503 .dpms = drm_helper_connector_dpms,
504 .fill_modes = drm_helper_probe_single_connector_modes,
505 .detect = vidi_detect,
506 .destroy = vidi_connector_destroy,
507};
508
509static int vidi_get_modes(struct drm_connector *connector)
510{
511 struct vidi_context *ctx = ctx_from_connector(connector);
512 struct edid *edid;
513 int edid_len;
514
515 /*
516 * the edid data comes from user side and it would be set
517 * to ctx->raw_edid through specific ioctl.
518 */
519 if (!ctx->raw_edid) {
520 DRM_DEBUG_KMS("raw_edid is null.\n");
521 return -EFAULT;
522 }
523
524 edid_len = (1 + ctx->raw_edid->extensions) * EDID_LENGTH;
525 edid = kmemdup(ctx->raw_edid, edid_len, GFP_KERNEL);
526 if (!edid) {
527 DRM_DEBUG_KMS("failed to allocate edid\n");
528 return -ENOMEM;
529 }
530
531 drm_mode_connector_update_edid_property(connector, edid);
532
533 return drm_add_edid_modes(connector, edid);
534}
535
536static int vidi_mode_valid(struct drm_connector *connector,
537 struct drm_display_mode *mode)
538{
539 return MODE_OK;
540}
541
542static struct drm_encoder *vidi_best_encoder(struct drm_connector *connector)
543{
544 struct vidi_context *ctx = ctx_from_connector(connector);
545
546 return ctx->encoder;
547}
548
549static struct drm_connector_helper_funcs vidi_connector_helper_funcs = {
550 .get_modes = vidi_get_modes,
551 .mode_valid = vidi_mode_valid,
552 .best_encoder = vidi_best_encoder,
553};
554
555static int vidi_create_connector(struct exynos_drm_display *display,
556 struct drm_encoder *encoder)
557{
558 struct vidi_context *ctx = display->ctx;
559 struct drm_connector *connector = &ctx->connector;
560 int ret;
561
562 ctx->encoder = encoder;
563 connector->polled = DRM_CONNECTOR_POLL_HPD;
564
565 ret = drm_connector_init(ctx->drm_dev, connector,
566 &vidi_connector_funcs, DRM_MODE_CONNECTOR_VIRTUAL);
567 if (ret) {
568 DRM_ERROR("Failed to initialize connector with drm\n");
569 return ret;
570 }
571
572 drm_connector_helper_add(connector, &vidi_connector_helper_funcs);
573 drm_sysfs_connector_add(connector);
574 drm_mode_connector_attach_encoder(connector, encoder);
543 575
544 return 0; 576 return 0;
545} 577}
546 578
579
580static struct exynos_drm_display_ops vidi_display_ops = {
581 .create_connector = vidi_create_connector,
582};
583
584static struct exynos_drm_display vidi_display = {
585 .type = EXYNOS_DISPLAY_TYPE_VIDI,
586 .ops = &vidi_display_ops,
587};
588
547static int vidi_probe(struct platform_device *pdev) 589static int vidi_probe(struct platform_device *pdev)
548{ 590{
549 struct device *dev = &pdev->dev; 591 struct device *dev = &pdev->dev;
550 struct vidi_context *ctx; 592 struct vidi_context *ctx;
551 struct exynos_drm_subdrv *subdrv;
552 int ret; 593 int ret;
553 594
554 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); 595 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
@@ -559,21 +600,19 @@ static int vidi_probe(struct platform_device *pdev)
559 600
560 INIT_WORK(&ctx->work, vidi_fake_vblank_handler); 601 INIT_WORK(&ctx->work, vidi_fake_vblank_handler);
561 602
562 subdrv = &ctx->subdrv; 603 vidi_manager.ctx = ctx;
563 subdrv->dev = dev; 604 vidi_display.ctx = ctx;
564 subdrv->manager = &vidi_manager;
565 subdrv->probe = vidi_subdrv_probe;
566 subdrv->remove = vidi_subdrv_remove;
567 605
568 mutex_init(&ctx->lock); 606 mutex_init(&ctx->lock);
569 607
570 platform_set_drvdata(pdev, ctx); 608 platform_set_drvdata(pdev, &vidi_manager);
571 609
572 ret = device_create_file(dev, &dev_attr_connection); 610 ret = device_create_file(dev, &dev_attr_connection);
573 if (ret < 0) 611 if (ret < 0)
574 DRM_INFO("failed to create connection sysfs.\n"); 612 DRM_INFO("failed to create connection sysfs.\n");
575 613
576 exynos_drm_subdrv_register(subdrv); 614 exynos_drm_manager_register(&vidi_manager);
615 exynos_drm_display_register(&vidi_display);
577 616
578 return 0; 617 return 0;
579} 618}
@@ -582,7 +621,8 @@ static int vidi_remove(struct platform_device *pdev)
582{ 621{
583 struct vidi_context *ctx = platform_get_drvdata(pdev); 622 struct vidi_context *ctx = platform_get_drvdata(pdev);
584 623
585 exynos_drm_subdrv_unregister(&ctx->subdrv); 624 exynos_drm_display_unregister(&vidi_display);
625 exynos_drm_manager_unregister(&vidi_manager);
586 626
587 if (ctx->raw_edid != (struct edid *)fake_edid_info) { 627 if (ctx->raw_edid != (struct edid *)fake_edid_info) {
588 kfree(ctx->raw_edid); 628 kfree(ctx->raw_edid);
@@ -592,32 +632,11 @@ static int vidi_remove(struct platform_device *pdev)
592 return 0; 632 return 0;
593} 633}
594 634
595#ifdef CONFIG_PM_SLEEP
596static int vidi_suspend(struct device *dev)
597{
598 struct vidi_context *ctx = get_vidi_context(dev);
599
600 return vidi_power_on(ctx, false);
601}
602
603static int vidi_resume(struct device *dev)
604{
605 struct vidi_context *ctx = get_vidi_context(dev);
606
607 return vidi_power_on(ctx, true);
608}
609#endif
610
611static const struct dev_pm_ops vidi_pm_ops = {
612 SET_SYSTEM_SLEEP_PM_OPS(vidi_suspend, vidi_resume)
613};
614
615struct platform_driver vidi_driver = { 635struct platform_driver vidi_driver = {
616 .probe = vidi_probe, 636 .probe = vidi_probe,
617 .remove = vidi_remove, 637 .remove = vidi_remove,
618 .driver = { 638 .driver = {
619 .name = "exynos-drm-vidi", 639 .name = "exynos-drm-vidi",
620 .owner = THIS_MODULE, 640 .owner = THIS_MODULE,
621 .pm = &vidi_pm_ops,
622 }, 641 },
623}; 642};
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index c021ddc1ffb4..9a6d652a3ef2 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -33,38 +33,42 @@
33#include <linux/regulator/consumer.h> 33#include <linux/regulator/consumer.h>
34#include <linux/io.h> 34#include <linux/io.h>
35#include <linux/of.h> 35#include <linux/of.h>
36#include <linux/i2c.h>
36#include <linux/of_gpio.h> 37#include <linux/of_gpio.h>
37#include <linux/hdmi.h> 38#include <linux/hdmi.h>
38 39
39#include <drm/exynos_drm.h> 40#include <drm/exynos_drm.h>
40 41
41#include "exynos_drm_drv.h" 42#include "exynos_drm_drv.h"
42#include "exynos_drm_hdmi.h" 43#include "exynos_mixer.h"
43
44#include "exynos_hdmi.h"
45 44
46#include <linux/gpio.h> 45#include <linux/gpio.h>
47#include <media/s5p_hdmi.h> 46#include <media/s5p_hdmi.h>
48 47
49#define MAX_WIDTH 1920 48#define get_hdmi_display(dev) platform_get_drvdata(to_platform_device(dev))
50#define MAX_HEIGHT 1080 49#define ctx_from_connector(c) container_of(c, struct hdmi_context, connector)
51#define get_hdmi_context(dev) platform_get_drvdata(to_platform_device(dev))
52 50
53/* AVI header and aspect ratio */ 51/* AVI header and aspect ratio */
54#define HDMI_AVI_VERSION 0x02 52#define HDMI_AVI_VERSION 0x02
55#define HDMI_AVI_LENGTH 0x0D 53#define HDMI_AVI_LENGTH 0x0D
56#define AVI_PIC_ASPECT_RATIO_16_9 (2 << 4)
57#define AVI_SAME_AS_PIC_ASPECT_RATIO 8
58 54
59/* AUI header info */ 55/* AUI header info */
60#define HDMI_AUI_VERSION 0x01 56#define HDMI_AUI_VERSION 0x01
61#define HDMI_AUI_LENGTH 0x0A 57#define HDMI_AUI_LENGTH 0x0A
58#define AVI_SAME_AS_PIC_ASPECT_RATIO 0x8
59#define AVI_4_3_CENTER_RATIO 0x9
60#define AVI_16_9_CENTER_RATIO 0xa
62 61
63enum hdmi_type { 62enum hdmi_type {
64 HDMI_TYPE13, 63 HDMI_TYPE13,
65 HDMI_TYPE14, 64 HDMI_TYPE14,
66}; 65};
67 66
67struct hdmi_driver_data {
68 unsigned int type;
69 unsigned int is_apb_phy:1;
70};
71
68struct hdmi_resources { 72struct hdmi_resources {
69 struct clk *hdmi; 73 struct clk *hdmi;
70 struct clk *sclk_hdmi; 74 struct clk *sclk_hdmi;
@@ -162,6 +166,7 @@ struct hdmi_v14_conf {
162struct hdmi_conf_regs { 166struct hdmi_conf_regs {
163 int pixel_clock; 167 int pixel_clock;
164 int cea_video_id; 168 int cea_video_id;
169 enum hdmi_picture_aspect aspect_ratio;
165 union { 170 union {
166 struct hdmi_v13_conf v13_conf; 171 struct hdmi_v13_conf v13_conf;
167 struct hdmi_v14_conf v14_conf; 172 struct hdmi_v14_conf v14_conf;
@@ -171,16 +176,17 @@ struct hdmi_conf_regs {
171struct hdmi_context { 176struct hdmi_context {
172 struct device *dev; 177 struct device *dev;
173 struct drm_device *drm_dev; 178 struct drm_device *drm_dev;
179 struct drm_connector connector;
180 struct drm_encoder *encoder;
174 bool hpd; 181 bool hpd;
175 bool powered; 182 bool powered;
176 bool dvi_mode; 183 bool dvi_mode;
177 struct mutex hdmi_mutex; 184 struct mutex hdmi_mutex;
178 185
179 void __iomem *regs; 186 void __iomem *regs;
180 void *parent_ctx;
181 int irq; 187 int irq;
182 188
183 struct i2c_client *ddc_port; 189 struct i2c_adapter *ddc_adpt;
184 struct i2c_client *hdmiphy_port; 190 struct i2c_client *hdmiphy_port;
185 191
186 /* current hdmiphy conf regs */ 192 /* current hdmiphy conf regs */
@@ -198,6 +204,14 @@ struct hdmiphy_config {
198 u8 conf[32]; 204 u8 conf[32];
199}; 205};
200 206
207struct hdmi_driver_data exynos4212_hdmi_driver_data = {
208 .type = HDMI_TYPE14,
209};
210
211struct hdmi_driver_data exynos5_hdmi_driver_data = {
212 .type = HDMI_TYPE14,
213};
214
201/* list of phy config settings */ 215/* list of phy config settings */
202static const struct hdmiphy_config hdmiphy_v13_configs[] = { 216static const struct hdmiphy_config hdmiphy_v13_configs[] = {
203 { 217 {
@@ -303,6 +317,24 @@ static const struct hdmiphy_config hdmiphy_v14_configs[] = {
303 }, 317 },
304 }, 318 },
305 { 319 {
320 .pixel_clock = 71000000,
321 .conf = {
322 0x01, 0x91, 0x1e, 0x15, 0x40, 0x3c, 0xce, 0x08,
323 0x04, 0x20, 0xb2, 0xd8, 0x45, 0xa0, 0xac, 0x80,
324 0x06, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
325 0x54, 0xad, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80,
326 },
327 },
328 {
329 .pixel_clock = 73250000,
330 .conf = {
331 0x01, 0xd1, 0x1f, 0x15, 0x40, 0x18, 0xe9, 0x08,
332 0x02, 0xa0, 0xb7, 0xd8, 0x45, 0xa0, 0xac, 0x80,
333 0x06, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
334 0x54, 0xa8, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80,
335 },
336 },
337 {
306 .pixel_clock = 74176000, 338 .pixel_clock = 74176000,
307 .conf = { 339 .conf = {
308 0x01, 0xd1, 0x3e, 0x35, 0x40, 0x5b, 0xde, 0x08, 340 0x01, 0xd1, 0x3e, 0x35, 0x40, 0x5b, 0xde, 0x08,
@@ -330,6 +362,15 @@ static const struct hdmiphy_config hdmiphy_v14_configs[] = {
330 }, 362 },
331 }, 363 },
332 { 364 {
365 .pixel_clock = 88750000,
366 .conf = {
367 0x01, 0x91, 0x25, 0x17, 0x40, 0x30, 0xfe, 0x08,
368 0x06, 0x20, 0xde, 0xd8, 0x45, 0xa0, 0xac, 0x80,
369 0x06, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
370 0x54, 0x8a, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80,
371 },
372 },
373 {
333 .pixel_clock = 106500000, 374 .pixel_clock = 106500000,
334 .conf = { 375 .conf = {
335 0x01, 0xd1, 0x2c, 0x12, 0x40, 0x0c, 0x09, 0x08, 376 0x01, 0xd1, 0x2c, 0x12, 0x40, 0x0c, 0x09, 0x08,
@@ -348,6 +389,24 @@ static const struct hdmiphy_config hdmiphy_v14_configs[] = {
348 }, 389 },
349 }, 390 },
350 { 391 {
392 .pixel_clock = 115500000,
393 .conf = {
394 0x01, 0xd1, 0x30, 0x1a, 0x40, 0x40, 0x10, 0x04,
395 0x04, 0xa0, 0x21, 0xd9, 0x45, 0xa0, 0xac, 0x80,
396 0x06, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
397 0x54, 0xaa, 0x25, 0x03, 0x00, 0x00, 0x01, 0x80,
398 },
399 },
400 {
401 .pixel_clock = 119000000,
402 .conf = {
403 0x01, 0x91, 0x32, 0x14, 0x40, 0x60, 0xd8, 0x08,
404 0x06, 0x20, 0x2a, 0xd9, 0x45, 0xa0, 0xac, 0x80,
405 0x06, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
406 0x54, 0x9d, 0x25, 0x03, 0x00, 0x00, 0x01, 0x80,
407 },
408 },
409 {
351 .pixel_clock = 146250000, 410 .pixel_clock = 146250000,
352 .conf = { 411 .conf = {
353 0x01, 0xd1, 0x3d, 0x15, 0x40, 0x18, 0xfd, 0x08, 412 0x01, 0xd1, 0x3d, 0x15, 0x40, 0x18, 0xfd, 0x08,
@@ -668,7 +727,6 @@ static void hdmi_reg_infoframe(struct hdmi_context *hdata,
668{ 727{
669 u32 hdr_sum; 728 u32 hdr_sum;
670 u8 chksum; 729 u8 chksum;
671 u32 aspect_ratio;
672 u32 mod; 730 u32 mod;
673 u32 vic; 731 u32 vic;
674 732
@@ -697,10 +755,28 @@ static void hdmi_reg_infoframe(struct hdmi_context *hdata,
697 AVI_ACTIVE_FORMAT_VALID | 755 AVI_ACTIVE_FORMAT_VALID |
698 AVI_UNDERSCANNED_DISPLAY_VALID); 756 AVI_UNDERSCANNED_DISPLAY_VALID);
699 757
700 aspect_ratio = AVI_PIC_ASPECT_RATIO_16_9; 758 /*
701 759 * Set the aspect ratio as per the mode, mentioned in
702 hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(2), aspect_ratio | 760 * Table 9 AVI InfoFrame Data Byte 2 of CEA-861-D Standard
703 AVI_SAME_AS_PIC_ASPECT_RATIO); 761 */
762 switch (hdata->mode_conf.aspect_ratio) {
763 case HDMI_PICTURE_ASPECT_4_3:
764 hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(2),
765 hdata->mode_conf.aspect_ratio |
766 AVI_4_3_CENTER_RATIO);
767 break;
768 case HDMI_PICTURE_ASPECT_16_9:
769 hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(2),
770 hdata->mode_conf.aspect_ratio |
771 AVI_16_9_CENTER_RATIO);
772 break;
773 case HDMI_PICTURE_ASPECT_NONE:
774 default:
775 hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(2),
776 hdata->mode_conf.aspect_ratio |
777 AVI_SAME_AS_PIC_ASPECT_RATIO);
778 break;
779 }
704 780
705 vic = hdata->mode_conf.cea_video_id; 781 vic = hdata->mode_conf.cea_video_id;
706 hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(4), vic); 782 hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(4), vic);
@@ -728,31 +804,46 @@ static void hdmi_reg_infoframe(struct hdmi_context *hdata,
728 } 804 }
729} 805}
730 806
731static bool hdmi_is_connected(void *ctx) 807static enum drm_connector_status hdmi_detect(struct drm_connector *connector,
808 bool force)
732{ 809{
733 struct hdmi_context *hdata = ctx; 810 struct hdmi_context *hdata = ctx_from_connector(connector);
811
812 return hdata->hpd ? connector_status_connected :
813 connector_status_disconnected;
814}
734 815
735 return hdata->hpd; 816static void hdmi_connector_destroy(struct drm_connector *connector)
817{
736} 818}
737 819
738static struct edid *hdmi_get_edid(void *ctx, struct drm_connector *connector) 820static struct drm_connector_funcs hdmi_connector_funcs = {
821 .dpms = drm_helper_connector_dpms,
822 .fill_modes = drm_helper_probe_single_connector_modes,
823 .detect = hdmi_detect,
824 .destroy = hdmi_connector_destroy,
825};
826
827static int hdmi_get_modes(struct drm_connector *connector)
739{ 828{
740 struct edid *raw_edid; 829 struct hdmi_context *hdata = ctx_from_connector(connector);
741 struct hdmi_context *hdata = ctx; 830 struct edid *edid;
742 831
743 if (!hdata->ddc_port) 832 if (!hdata->ddc_adpt)
744 return ERR_PTR(-ENODEV); 833 return -ENODEV;
745 834
746 raw_edid = drm_get_edid(connector, hdata->ddc_port->adapter); 835 edid = drm_get_edid(connector, hdata->ddc_adpt);
747 if (!raw_edid) 836 if (!edid)
748 return ERR_PTR(-ENODEV); 837 return -ENODEV;
749 838
750 hdata->dvi_mode = !drm_detect_hdmi_monitor(raw_edid); 839 hdata->dvi_mode = !drm_detect_hdmi_monitor(edid);
751 DRM_DEBUG_KMS("%s : width[%d] x height[%d]\n", 840 DRM_DEBUG_KMS("%s : width[%d] x height[%d]\n",
752 (hdata->dvi_mode ? "dvi monitor" : "hdmi monitor"), 841 (hdata->dvi_mode ? "dvi monitor" : "hdmi monitor"),
753 raw_edid->width_cm, raw_edid->height_cm); 842 edid->width_cm, edid->height_cm);
754 843
755 return raw_edid; 844 drm_mode_connector_update_edid_property(connector, edid);
845
846 return drm_add_edid_modes(connector, edid);
756} 847}
757 848
758static int hdmi_find_phy_conf(struct hdmi_context *hdata, u32 pixel_clock) 849static int hdmi_find_phy_conf(struct hdmi_context *hdata, u32 pixel_clock)
@@ -777,9 +868,10 @@ static int hdmi_find_phy_conf(struct hdmi_context *hdata, u32 pixel_clock)
777 return -EINVAL; 868 return -EINVAL;
778} 869}
779 870
780static int hdmi_check_mode(void *ctx, struct drm_display_mode *mode) 871static int hdmi_mode_valid(struct drm_connector *connector,
872 struct drm_display_mode *mode)
781{ 873{
782 struct hdmi_context *hdata = ctx; 874 struct hdmi_context *hdata = ctx_from_connector(connector);
783 int ret; 875 int ret;
784 876
785 DRM_DEBUG_KMS("xres=%d, yres=%d, refresh=%d, intl=%d clock=%d\n", 877 DRM_DEBUG_KMS("xres=%d, yres=%d, refresh=%d, intl=%d clock=%d\n",
@@ -787,12 +879,103 @@ static int hdmi_check_mode(void *ctx, struct drm_display_mode *mode)
787 (mode->flags & DRM_MODE_FLAG_INTERLACE) ? true : 879 (mode->flags & DRM_MODE_FLAG_INTERLACE) ? true :
788 false, mode->clock * 1000); 880 false, mode->clock * 1000);
789 881
882 ret = mixer_check_mode(mode);
883 if (ret)
884 return MODE_BAD;
885
790 ret = hdmi_find_phy_conf(hdata, mode->clock * 1000); 886 ret = hdmi_find_phy_conf(hdata, mode->clock * 1000);
791 if (ret < 0) 887 if (ret < 0)
888 return MODE_BAD;
889
890 return MODE_OK;
891}
892
893static struct drm_encoder *hdmi_best_encoder(struct drm_connector *connector)
894{
895 struct hdmi_context *hdata = ctx_from_connector(connector);
896
897 return hdata->encoder;
898}
899
900static struct drm_connector_helper_funcs hdmi_connector_helper_funcs = {
901 .get_modes = hdmi_get_modes,
902 .mode_valid = hdmi_mode_valid,
903 .best_encoder = hdmi_best_encoder,
904};
905
906static int hdmi_create_connector(struct exynos_drm_display *display,
907 struct drm_encoder *encoder)
908{
909 struct hdmi_context *hdata = display->ctx;
910 struct drm_connector *connector = &hdata->connector;
911 int ret;
912
913 hdata->encoder = encoder;
914 connector->interlace_allowed = true;
915 connector->polled = DRM_CONNECTOR_POLL_HPD;
916
917 ret = drm_connector_init(hdata->drm_dev, connector,
918 &hdmi_connector_funcs, DRM_MODE_CONNECTOR_HDMIA);
919 if (ret) {
920 DRM_ERROR("Failed to initialize connector with drm\n");
792 return ret; 921 return ret;
922 }
923
924 drm_connector_helper_add(connector, &hdmi_connector_helper_funcs);
925 drm_sysfs_connector_add(connector);
926 drm_mode_connector_attach_encoder(connector, encoder);
927
928 return 0;
929}
930
931static int hdmi_initialize(struct exynos_drm_display *display,
932 struct drm_device *drm_dev)
933{
934 struct hdmi_context *hdata = display->ctx;
935
936 hdata->drm_dev = drm_dev;
937
793 return 0; 938 return 0;
794} 939}
795 940
941static void hdmi_mode_fixup(struct exynos_drm_display *display,
942 struct drm_connector *connector,
943 const struct drm_display_mode *mode,
944 struct drm_display_mode *adjusted_mode)
945{
946 struct drm_display_mode *m;
947 int mode_ok;
948
949 DRM_DEBUG_KMS("%s\n", __FILE__);
950
951 drm_mode_set_crtcinfo(adjusted_mode, 0);
952
953 mode_ok = hdmi_mode_valid(connector, adjusted_mode);
954
955 /* just return if user desired mode exists. */
956 if (mode_ok == MODE_OK)
957 return;
958
959 /*
960 * otherwise, find the most suitable mode among modes and change it
961 * to adjusted_mode.
962 */
963 list_for_each_entry(m, &connector->modes, head) {
964 mode_ok = hdmi_mode_valid(connector, m);
965
966 if (mode_ok == MODE_OK) {
967 DRM_INFO("desired mode doesn't exist so\n");
968 DRM_INFO("use the most suitable mode among modes.\n");
969
970 DRM_DEBUG_KMS("Adjusted Mode: [%d]x[%d] [%d]Hz\n",
971 m->hdisplay, m->vdisplay, m->vrefresh);
972
973 drm_mode_copy(adjusted_mode, m);
974 break;
975 }
976 }
977}
978
796static void hdmi_set_acr(u32 freq, u8 *acr) 979static void hdmi_set_acr(u32 freq, u8 *acr)
797{ 980{
798 u32 n, cts; 981 u32 n, cts;
@@ -1421,6 +1604,7 @@ static void hdmi_v13_mode_set(struct hdmi_context *hdata,
1421 hdata->mode_conf.cea_video_id = 1604 hdata->mode_conf.cea_video_id =
1422 drm_match_cea_mode((struct drm_display_mode *)m); 1605 drm_match_cea_mode((struct drm_display_mode *)m);
1423 hdata->mode_conf.pixel_clock = m->clock * 1000; 1606 hdata->mode_conf.pixel_clock = m->clock * 1000;
1607 hdata->mode_conf.aspect_ratio = m->picture_aspect_ratio;
1424 1608
1425 hdmi_set_reg(core->h_blank, 2, m->htotal - m->hdisplay); 1609 hdmi_set_reg(core->h_blank, 2, m->htotal - m->hdisplay);
1426 hdmi_set_reg(core->h_v_line, 3, (m->htotal << 12) | m->vtotal); 1610 hdmi_set_reg(core->h_v_line, 3, (m->htotal << 12) | m->vtotal);
@@ -1517,6 +1701,7 @@ static void hdmi_v14_mode_set(struct hdmi_context *hdata,
1517 hdata->mode_conf.cea_video_id = 1701 hdata->mode_conf.cea_video_id =
1518 drm_match_cea_mode((struct drm_display_mode *)m); 1702 drm_match_cea_mode((struct drm_display_mode *)m);
1519 hdata->mode_conf.pixel_clock = m->clock * 1000; 1703 hdata->mode_conf.pixel_clock = m->clock * 1000;
1704 hdata->mode_conf.aspect_ratio = m->picture_aspect_ratio;
1520 1705
1521 hdmi_set_reg(core->h_blank, 2, m->htotal - m->hdisplay); 1706 hdmi_set_reg(core->h_blank, 2, m->htotal - m->hdisplay);
1522 hdmi_set_reg(core->v_line, 2, m->vtotal); 1707 hdmi_set_reg(core->v_line, 2, m->vtotal);
@@ -1618,9 +1803,10 @@ static void hdmi_v14_mode_set(struct hdmi_context *hdata,
1618 hdmi_set_reg(tg->tg_3d, 1, 0x0); 1803 hdmi_set_reg(tg->tg_3d, 1, 0x0);
1619} 1804}
1620 1805
1621static void hdmi_mode_set(void *ctx, struct drm_display_mode *mode) 1806static void hdmi_mode_set(struct exynos_drm_display *display,
1807 struct drm_display_mode *mode)
1622{ 1808{
1623 struct hdmi_context *hdata = ctx; 1809 struct hdmi_context *hdata = display->ctx;
1624 struct drm_display_mode *m = mode; 1810 struct drm_display_mode *m = mode;
1625 1811
1626 DRM_DEBUG_KMS("xres=%d, yres=%d, refresh=%d, intl=%s\n", 1812 DRM_DEBUG_KMS("xres=%d, yres=%d, refresh=%d, intl=%s\n",
@@ -1634,16 +1820,9 @@ static void hdmi_mode_set(void *ctx, struct drm_display_mode *mode)
1634 hdmi_v14_mode_set(hdata, mode); 1820 hdmi_v14_mode_set(hdata, mode);
1635} 1821}
1636 1822
1637static void hdmi_get_max_resol(void *ctx, unsigned int *width, 1823static void hdmi_commit(struct exynos_drm_display *display)
1638 unsigned int *height)
1639{
1640 *width = MAX_WIDTH;
1641 *height = MAX_HEIGHT;
1642}
1643
1644static void hdmi_commit(void *ctx)
1645{ 1824{
1646 struct hdmi_context *hdata = ctx; 1825 struct hdmi_context *hdata = display->ctx;
1647 1826
1648 mutex_lock(&hdata->hdmi_mutex); 1827 mutex_lock(&hdata->hdmi_mutex);
1649 if (!hdata->powered) { 1828 if (!hdata->powered) {
@@ -1655,8 +1834,9 @@ static void hdmi_commit(void *ctx)
1655 hdmi_conf_apply(hdata); 1834 hdmi_conf_apply(hdata);
1656} 1835}
1657 1836
1658static void hdmi_poweron(struct hdmi_context *hdata) 1837static void hdmi_poweron(struct exynos_drm_display *display)
1659{ 1838{
1839 struct hdmi_context *hdata = display->ctx;
1660 struct hdmi_resources *res = &hdata->res; 1840 struct hdmi_resources *res = &hdata->res;
1661 1841
1662 mutex_lock(&hdata->hdmi_mutex); 1842 mutex_lock(&hdata->hdmi_mutex);
@@ -1669,6 +1849,8 @@ static void hdmi_poweron(struct hdmi_context *hdata)
1669 1849
1670 mutex_unlock(&hdata->hdmi_mutex); 1850 mutex_unlock(&hdata->hdmi_mutex);
1671 1851
1852 pm_runtime_get_sync(hdata->dev);
1853
1672 if (regulator_bulk_enable(res->regul_count, res->regul_bulk)) 1854 if (regulator_bulk_enable(res->regul_count, res->regul_bulk))
1673 DRM_DEBUG_KMS("failed to enable regulator bulk\n"); 1855 DRM_DEBUG_KMS("failed to enable regulator bulk\n");
1674 1856
@@ -1677,10 +1859,12 @@ static void hdmi_poweron(struct hdmi_context *hdata)
1677 clk_prepare_enable(res->sclk_hdmi); 1859 clk_prepare_enable(res->sclk_hdmi);
1678 1860
1679 hdmiphy_poweron(hdata); 1861 hdmiphy_poweron(hdata);
1862 hdmi_commit(display);
1680} 1863}
1681 1864
1682static void hdmi_poweroff(struct hdmi_context *hdata) 1865static void hdmi_poweroff(struct exynos_drm_display *display)
1683{ 1866{
1867 struct hdmi_context *hdata = display->ctx;
1684 struct hdmi_resources *res = &hdata->res; 1868 struct hdmi_resources *res = &hdata->res;
1685 1869
1686 mutex_lock(&hdata->hdmi_mutex); 1870 mutex_lock(&hdata->hdmi_mutex);
@@ -1700,30 +1884,27 @@ static void hdmi_poweroff(struct hdmi_context *hdata)
1700 clk_disable_unprepare(res->hdmiphy); 1884 clk_disable_unprepare(res->hdmiphy);
1701 regulator_bulk_disable(res->regul_count, res->regul_bulk); 1885 regulator_bulk_disable(res->regul_count, res->regul_bulk);
1702 1886
1703 mutex_lock(&hdata->hdmi_mutex); 1887 pm_runtime_put_sync(hdata->dev);
1704 1888
1889 mutex_lock(&hdata->hdmi_mutex);
1705 hdata->powered = false; 1890 hdata->powered = false;
1706 1891
1707out: 1892out:
1708 mutex_unlock(&hdata->hdmi_mutex); 1893 mutex_unlock(&hdata->hdmi_mutex);
1709} 1894}
1710 1895
1711static void hdmi_dpms(void *ctx, int mode) 1896static void hdmi_dpms(struct exynos_drm_display *display, int mode)
1712{ 1897{
1713 struct hdmi_context *hdata = ctx;
1714
1715 DRM_DEBUG_KMS("mode %d\n", mode); 1898 DRM_DEBUG_KMS("mode %d\n", mode);
1716 1899
1717 switch (mode) { 1900 switch (mode) {
1718 case DRM_MODE_DPMS_ON: 1901 case DRM_MODE_DPMS_ON:
1719 if (pm_runtime_suspended(hdata->dev)) 1902 hdmi_poweron(display);
1720 pm_runtime_get_sync(hdata->dev);
1721 break; 1903 break;
1722 case DRM_MODE_DPMS_STANDBY: 1904 case DRM_MODE_DPMS_STANDBY:
1723 case DRM_MODE_DPMS_SUSPEND: 1905 case DRM_MODE_DPMS_SUSPEND:
1724 case DRM_MODE_DPMS_OFF: 1906 case DRM_MODE_DPMS_OFF:
1725 if (!pm_runtime_suspended(hdata->dev)) 1907 hdmi_poweroff(display);
1726 pm_runtime_put_sync(hdata->dev);
1727 break; 1908 break;
1728 default: 1909 default:
1729 DRM_DEBUG_KMS("unknown dpms mode: %d\n", mode); 1910 DRM_DEBUG_KMS("unknown dpms mode: %d\n", mode);
@@ -1731,30 +1912,30 @@ static void hdmi_dpms(void *ctx, int mode)
1731 } 1912 }
1732} 1913}
1733 1914
1734static struct exynos_hdmi_ops hdmi_ops = { 1915static struct exynos_drm_display_ops hdmi_display_ops = {
1735 /* display */ 1916 .initialize = hdmi_initialize,
1736 .is_connected = hdmi_is_connected, 1917 .create_connector = hdmi_create_connector,
1737 .get_edid = hdmi_get_edid, 1918 .mode_fixup = hdmi_mode_fixup,
1738 .check_mode = hdmi_check_mode,
1739
1740 /* manager */
1741 .mode_set = hdmi_mode_set, 1919 .mode_set = hdmi_mode_set,
1742 .get_max_resol = hdmi_get_max_resol,
1743 .commit = hdmi_commit,
1744 .dpms = hdmi_dpms, 1920 .dpms = hdmi_dpms,
1921 .commit = hdmi_commit,
1922};
1923
1924static struct exynos_drm_display hdmi_display = {
1925 .type = EXYNOS_DISPLAY_TYPE_HDMI,
1926 .ops = &hdmi_display_ops,
1745}; 1927};
1746 1928
1747static irqreturn_t hdmi_irq_thread(int irq, void *arg) 1929static irqreturn_t hdmi_irq_thread(int irq, void *arg)
1748{ 1930{
1749 struct exynos_drm_hdmi_context *ctx = arg; 1931 struct hdmi_context *hdata = arg;
1750 struct hdmi_context *hdata = ctx->ctx;
1751 1932
1752 mutex_lock(&hdata->hdmi_mutex); 1933 mutex_lock(&hdata->hdmi_mutex);
1753 hdata->hpd = gpio_get_value(hdata->hpd_gpio); 1934 hdata->hpd = gpio_get_value(hdata->hpd_gpio);
1754 mutex_unlock(&hdata->hdmi_mutex); 1935 mutex_unlock(&hdata->hdmi_mutex);
1755 1936
1756 if (ctx->drm_dev) 1937 if (hdata->drm_dev)
1757 drm_helper_hpd_irq_event(ctx->drm_dev); 1938 drm_helper_hpd_irq_event(hdata->drm_dev);
1758 1939
1759 return IRQ_HANDLED; 1940 return IRQ_HANDLED;
1760} 1941}
@@ -1830,20 +2011,6 @@ fail:
1830 return -ENODEV; 2011 return -ENODEV;
1831} 2012}
1832 2013
1833static struct i2c_client *hdmi_ddc, *hdmi_hdmiphy;
1834
1835void hdmi_attach_ddc_client(struct i2c_client *ddc)
1836{
1837 if (ddc)
1838 hdmi_ddc = ddc;
1839}
1840
1841void hdmi_attach_hdmiphy_client(struct i2c_client *hdmiphy)
1842{
1843 if (hdmiphy)
1844 hdmi_hdmiphy = hdmiphy;
1845}
1846
1847static struct s5p_hdmi_platform_data *drm_hdmi_dt_parse_pdata 2014static struct s5p_hdmi_platform_data *drm_hdmi_dt_parse_pdata
1848 (struct device *dev) 2015 (struct device *dev)
1849{ 2016{
@@ -1871,10 +2038,10 @@ err_data:
1871static struct of_device_id hdmi_match_types[] = { 2038static struct of_device_id hdmi_match_types[] = {
1872 { 2039 {
1873 .compatible = "samsung,exynos5-hdmi", 2040 .compatible = "samsung,exynos5-hdmi",
1874 .data = (void *)HDMI_TYPE14, 2041 .data = &exynos5_hdmi_driver_data,
1875 }, { 2042 }, {
1876 .compatible = "samsung,exynos4212-hdmi", 2043 .compatible = "samsung,exynos4212-hdmi",
1877 .data = (void *)HDMI_TYPE14, 2044 .data = &exynos4212_hdmi_driver_data,
1878 }, { 2045 }, {
1879 /* end node */ 2046 /* end node */
1880 } 2047 }
@@ -1883,11 +2050,12 @@ static struct of_device_id hdmi_match_types[] = {
1883static int hdmi_probe(struct platform_device *pdev) 2050static int hdmi_probe(struct platform_device *pdev)
1884{ 2051{
1885 struct device *dev = &pdev->dev; 2052 struct device *dev = &pdev->dev;
1886 struct exynos_drm_hdmi_context *drm_hdmi_ctx;
1887 struct hdmi_context *hdata; 2053 struct hdmi_context *hdata;
1888 struct s5p_hdmi_platform_data *pdata; 2054 struct s5p_hdmi_platform_data *pdata;
1889 struct resource *res; 2055 struct resource *res;
1890 const struct of_device_id *match; 2056 const struct of_device_id *match;
2057 struct device_node *ddc_node, *phy_node;
2058 struct hdmi_driver_data *drv_data;
1891 int ret; 2059 int ret;
1892 2060
1893 if (!dev->of_node) 2061 if (!dev->of_node)
@@ -1897,25 +2065,20 @@ static int hdmi_probe(struct platform_device *pdev)
1897 if (!pdata) 2065 if (!pdata)
1898 return -EINVAL; 2066 return -EINVAL;
1899 2067
1900 drm_hdmi_ctx = devm_kzalloc(dev, sizeof(*drm_hdmi_ctx), GFP_KERNEL);
1901 if (!drm_hdmi_ctx)
1902 return -ENOMEM;
1903
1904 hdata = devm_kzalloc(dev, sizeof(struct hdmi_context), GFP_KERNEL); 2068 hdata = devm_kzalloc(dev, sizeof(struct hdmi_context), GFP_KERNEL);
1905 if (!hdata) 2069 if (!hdata)
1906 return -ENOMEM; 2070 return -ENOMEM;
1907 2071
1908 mutex_init(&hdata->hdmi_mutex); 2072 mutex_init(&hdata->hdmi_mutex);
1909 2073
1910 drm_hdmi_ctx->ctx = (void *)hdata; 2074 platform_set_drvdata(pdev, &hdmi_display);
1911 hdata->parent_ctx = (void *)drm_hdmi_ctx;
1912
1913 platform_set_drvdata(pdev, drm_hdmi_ctx);
1914 2075
1915 match = of_match_node(hdmi_match_types, dev->of_node); 2076 match = of_match_node(hdmi_match_types, dev->of_node);
1916 if (!match) 2077 if (!match)
1917 return -ENODEV; 2078 return -ENODEV;
1918 hdata->type = (enum hdmi_type)match->data; 2079
2080 drv_data = (struct hdmi_driver_data *)match->data;
2081 hdata->type = drv_data->type;
1919 2082
1920 hdata->hpd_gpio = pdata->hpd_gpio; 2083 hdata->hpd_gpio = pdata->hpd_gpio;
1921 hdata->dev = dev; 2084 hdata->dev = dev;
@@ -1938,21 +2101,34 @@ static int hdmi_probe(struct platform_device *pdev)
1938 } 2101 }
1939 2102
1940 /* DDC i2c driver */ 2103 /* DDC i2c driver */
1941 if (i2c_add_driver(&ddc_driver)) { 2104 ddc_node = of_parse_phandle(dev->of_node, "ddc", 0);
1942 DRM_ERROR("failed to register ddc i2c driver\n"); 2105 if (!ddc_node) {
1943 return -ENOENT; 2106 DRM_ERROR("Failed to find ddc node in device tree\n");
2107 return -ENODEV;
2108 }
2109 hdata->ddc_adpt = of_find_i2c_adapter_by_node(ddc_node);
2110 if (!hdata->ddc_adpt) {
2111 DRM_ERROR("Failed to get ddc i2c adapter by node\n");
2112 return -ENODEV;
1944 } 2113 }
1945 2114
1946 hdata->ddc_port = hdmi_ddc; 2115 /* Not support APB PHY yet. */
2116 if (drv_data->is_apb_phy)
2117 return -EPERM;
1947 2118
1948 /* hdmiphy i2c driver */ 2119 /* hdmiphy i2c driver */
1949 if (i2c_add_driver(&hdmiphy_driver)) { 2120 phy_node = of_parse_phandle(dev->of_node, "phy", 0);
1950 DRM_ERROR("failed to register hdmiphy i2c driver\n"); 2121 if (!phy_node) {
1951 ret = -ENOENT; 2122 DRM_ERROR("Failed to find hdmiphy node in device tree\n");
2123 ret = -ENODEV;
2124 goto err_ddc;
2125 }
2126 hdata->hdmiphy_port = of_find_i2c_device_by_node(phy_node);
2127 if (!hdata->hdmiphy_port) {
2128 DRM_ERROR("Failed to get hdmi phy i2c client from node\n");
2129 ret = -ENODEV;
1952 goto err_ddc; 2130 goto err_ddc;
1953 } 2131 }
1954
1955 hdata->hdmiphy_port = hdmi_hdmiphy;
1956 2132
1957 hdata->irq = gpio_to_irq(hdata->hpd_gpio); 2133 hdata->irq = gpio_to_irq(hdata->hpd_gpio);
1958 if (hdata->irq < 0) { 2134 if (hdata->irq < 0) {
@@ -1966,119 +2142,45 @@ static int hdmi_probe(struct platform_device *pdev)
1966 ret = devm_request_threaded_irq(dev, hdata->irq, NULL, 2142 ret = devm_request_threaded_irq(dev, hdata->irq, NULL,
1967 hdmi_irq_thread, IRQF_TRIGGER_RISING | 2143 hdmi_irq_thread, IRQF_TRIGGER_RISING |
1968 IRQF_TRIGGER_FALLING | IRQF_ONESHOT, 2144 IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
1969 "hdmi", drm_hdmi_ctx); 2145 "hdmi", hdata);
1970 if (ret) { 2146 if (ret) {
1971 DRM_ERROR("failed to register hdmi interrupt\n"); 2147 DRM_ERROR("failed to register hdmi interrupt\n");
1972 goto err_hdmiphy; 2148 goto err_hdmiphy;
1973 } 2149 }
1974 2150
1975 /* Attach HDMI Driver to common hdmi. */
1976 exynos_hdmi_drv_attach(drm_hdmi_ctx);
1977
1978 /* register specific callbacks to common hdmi. */
1979 exynos_hdmi_ops_register(&hdmi_ops);
1980
1981 pm_runtime_enable(dev); 2151 pm_runtime_enable(dev);
1982 2152
2153 hdmi_display.ctx = hdata;
2154 exynos_drm_display_register(&hdmi_display);
2155
1983 return 0; 2156 return 0;
1984 2157
1985err_hdmiphy: 2158err_hdmiphy:
1986 i2c_del_driver(&hdmiphy_driver); 2159 put_device(&hdata->hdmiphy_port->dev);
1987err_ddc: 2160err_ddc:
1988 i2c_del_driver(&ddc_driver); 2161 put_device(&hdata->ddc_adpt->dev);
1989 return ret; 2162 return ret;
1990} 2163}
1991 2164
1992static int hdmi_remove(struct platform_device *pdev) 2165static int hdmi_remove(struct platform_device *pdev)
1993{ 2166{
1994 struct device *dev = &pdev->dev; 2167 struct device *dev = &pdev->dev;
2168 struct exynos_drm_display *display = get_hdmi_display(dev);
2169 struct hdmi_context *hdata = display->ctx;
1995 2170
1996 pm_runtime_disable(dev); 2171 put_device(&hdata->hdmiphy_port->dev);
1997 2172 put_device(&hdata->ddc_adpt->dev);
1998 /* hdmiphy i2c driver */ 2173 pm_runtime_disable(&pdev->dev);
1999 i2c_del_driver(&hdmiphy_driver);
2000 /* DDC i2c driver */
2001 i2c_del_driver(&ddc_driver);
2002
2003 return 0;
2004}
2005
2006#ifdef CONFIG_PM_SLEEP
2007static int hdmi_suspend(struct device *dev)
2008{
2009 struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev);
2010 struct hdmi_context *hdata = ctx->ctx;
2011
2012 disable_irq(hdata->irq);
2013
2014 hdata->hpd = false;
2015 if (ctx->drm_dev)
2016 drm_helper_hpd_irq_event(ctx->drm_dev);
2017
2018 if (pm_runtime_suspended(dev)) {
2019 DRM_DEBUG_KMS("Already suspended\n");
2020 return 0;
2021 }
2022
2023 hdmi_poweroff(hdata);
2024 2174
2025 return 0; 2175 return 0;
2026} 2176}
2027 2177
2028static int hdmi_resume(struct device *dev)
2029{
2030 struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev);
2031 struct hdmi_context *hdata = ctx->ctx;
2032
2033 hdata->hpd = gpio_get_value(hdata->hpd_gpio);
2034
2035 enable_irq(hdata->irq);
2036
2037 if (!pm_runtime_suspended(dev)) {
2038 DRM_DEBUG_KMS("Already resumed\n");
2039 return 0;
2040 }
2041
2042 hdmi_poweron(hdata);
2043
2044 return 0;
2045}
2046#endif
2047
2048#ifdef CONFIG_PM_RUNTIME
2049static int hdmi_runtime_suspend(struct device *dev)
2050{
2051 struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev);
2052 struct hdmi_context *hdata = ctx->ctx;
2053
2054 hdmi_poweroff(hdata);
2055
2056 return 0;
2057}
2058
2059static int hdmi_runtime_resume(struct device *dev)
2060{
2061 struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev);
2062 struct hdmi_context *hdata = ctx->ctx;
2063
2064 hdmi_poweron(hdata);
2065
2066 return 0;
2067}
2068#endif
2069
2070static const struct dev_pm_ops hdmi_pm_ops = {
2071 SET_SYSTEM_SLEEP_PM_OPS(hdmi_suspend, hdmi_resume)
2072 SET_RUNTIME_PM_OPS(hdmi_runtime_suspend, hdmi_runtime_resume, NULL)
2073};
2074
2075struct platform_driver hdmi_driver = { 2178struct platform_driver hdmi_driver = {
2076 .probe = hdmi_probe, 2179 .probe = hdmi_probe,
2077 .remove = hdmi_remove, 2180 .remove = hdmi_remove,
2078 .driver = { 2181 .driver = {
2079 .name = "exynos-hdmi", 2182 .name = "exynos-hdmi",
2080 .owner = THIS_MODULE, 2183 .owner = THIS_MODULE,
2081 .pm = &hdmi_pm_ops,
2082 .of_match_table = hdmi_match_types, 2184 .of_match_table = hdmi_match_types,
2083 }, 2185 },
2084}; 2186};
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 2dfa48c76f54..ce288818d2c0 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -36,10 +36,13 @@
36 36
37#include "exynos_drm_drv.h" 37#include "exynos_drm_drv.h"
38#include "exynos_drm_crtc.h" 38#include "exynos_drm_crtc.h"
39#include "exynos_drm_hdmi.h"
40#include "exynos_drm_iommu.h" 39#include "exynos_drm_iommu.h"
40#include "exynos_mixer.h"
41 41
42#define get_mixer_context(dev) platform_get_drvdata(to_platform_device(dev)) 42#define get_mixer_manager(dev) platform_get_drvdata(to_platform_device(dev))
43
44#define MIXER_WIN_NR 3
45#define MIXER_DEFAULT_WIN 0
43 46
44struct hdmi_win_data { 47struct hdmi_win_data {
45 dma_addr_t dma_addr; 48 dma_addr_t dma_addr;
@@ -82,6 +85,7 @@ enum mixer_version_id {
82}; 85};
83 86
84struct mixer_context { 87struct mixer_context {
88 struct platform_device *pdev;
85 struct device *dev; 89 struct device *dev;
86 struct drm_device *drm_dev; 90 struct drm_device *drm_dev;
87 int pipe; 91 int pipe;
@@ -94,7 +98,6 @@ struct mixer_context {
94 struct mixer_resources mixer_res; 98 struct mixer_resources mixer_res;
95 struct hdmi_win_data win_data[MIXER_WIN_NR]; 99 struct hdmi_win_data win_data[MIXER_WIN_NR];
96 enum mixer_version_id mxr_ver; 100 enum mixer_version_id mxr_ver;
97 void *parent_ctx;
98 wait_queue_head_t wait_vsync_queue; 101 wait_queue_head_t wait_vsync_queue;
99 atomic_t wait_vsync_event; 102 atomic_t wait_vsync_event;
100}; 103};
@@ -685,31 +688,196 @@ static void mixer_win_reset(struct mixer_context *ctx)
685 spin_unlock_irqrestore(&res->reg_slock, flags); 688 spin_unlock_irqrestore(&res->reg_slock, flags);
686} 689}
687 690
688static int mixer_iommu_on(void *ctx, bool enable) 691static irqreturn_t mixer_irq_handler(int irq, void *arg)
692{
693 struct mixer_context *ctx = arg;
694 struct mixer_resources *res = &ctx->mixer_res;
695 u32 val, base, shadow;
696
697 spin_lock(&res->reg_slock);
698
699 /* read interrupt status for handling and clearing flags for VSYNC */
700 val = mixer_reg_read(res, MXR_INT_STATUS);
701
702 /* handling VSYNC */
703 if (val & MXR_INT_STATUS_VSYNC) {
704 /* interlace scan need to check shadow register */
705 if (ctx->interlace) {
706 base = mixer_reg_read(res, MXR_GRAPHIC_BASE(0));
707 shadow = mixer_reg_read(res, MXR_GRAPHIC_BASE_S(0));
708 if (base != shadow)
709 goto out;
710
711 base = mixer_reg_read(res, MXR_GRAPHIC_BASE(1));
712 shadow = mixer_reg_read(res, MXR_GRAPHIC_BASE_S(1));
713 if (base != shadow)
714 goto out;
715 }
716
717 drm_handle_vblank(ctx->drm_dev, ctx->pipe);
718 exynos_drm_crtc_finish_pageflip(ctx->drm_dev, ctx->pipe);
719
720 /* set wait vsync event to zero and wake up queue. */
721 if (atomic_read(&ctx->wait_vsync_event)) {
722 atomic_set(&ctx->wait_vsync_event, 0);
723 wake_up(&ctx->wait_vsync_queue);
724 }
725 }
726
727out:
728 /* clear interrupts */
729 if (~val & MXR_INT_EN_VSYNC) {
730 /* vsync interrupt use different bit for read and clear */
731 val &= ~MXR_INT_EN_VSYNC;
732 val |= MXR_INT_CLEAR_VSYNC;
733 }
734 mixer_reg_write(res, MXR_INT_STATUS, val);
735
736 spin_unlock(&res->reg_slock);
737
738 return IRQ_HANDLED;
739}
740
741static int mixer_resources_init(struct mixer_context *mixer_ctx)
689{ 742{
690 struct exynos_drm_hdmi_context *drm_hdmi_ctx; 743 struct device *dev = &mixer_ctx->pdev->dev;
691 struct mixer_context *mdata = ctx; 744 struct mixer_resources *mixer_res = &mixer_ctx->mixer_res;
692 struct drm_device *drm_dev; 745 struct resource *res;
746 int ret;
693 747
694 drm_hdmi_ctx = mdata->parent_ctx; 748 spin_lock_init(&mixer_res->reg_slock);
695 drm_dev = drm_hdmi_ctx->drm_dev;
696 749
697 if (is_drm_iommu_supported(drm_dev)) { 750 mixer_res->mixer = devm_clk_get(dev, "mixer");
698 if (enable) 751 if (IS_ERR(mixer_res->mixer)) {
699 return drm_iommu_attach_device(drm_dev, mdata->dev); 752 dev_err(dev, "failed to get clock 'mixer'\n");
753 return -ENODEV;
754 }
700 755
701 drm_iommu_detach_device(drm_dev, mdata->dev); 756 mixer_res->sclk_hdmi = devm_clk_get(dev, "sclk_hdmi");
757 if (IS_ERR(mixer_res->sclk_hdmi)) {
758 dev_err(dev, "failed to get clock 'sclk_hdmi'\n");
759 return -ENODEV;
760 }
761 res = platform_get_resource(mixer_ctx->pdev, IORESOURCE_MEM, 0);
762 if (res == NULL) {
763 dev_err(dev, "get memory resource failed.\n");
764 return -ENXIO;
702 } 765 }
766
767 mixer_res->mixer_regs = devm_ioremap(dev, res->start,
768 resource_size(res));
769 if (mixer_res->mixer_regs == NULL) {
770 dev_err(dev, "register mapping failed.\n");
771 return -ENXIO;
772 }
773
774 res = platform_get_resource(mixer_ctx->pdev, IORESOURCE_IRQ, 0);
775 if (res == NULL) {
776 dev_err(dev, "get interrupt resource failed.\n");
777 return -ENXIO;
778 }
779
780 ret = devm_request_irq(dev, res->start, mixer_irq_handler,
781 0, "drm_mixer", mixer_ctx);
782 if (ret) {
783 dev_err(dev, "request interrupt failed.\n");
784 return ret;
785 }
786 mixer_res->irq = res->start;
787
703 return 0; 788 return 0;
704} 789}
705 790
706static int mixer_enable_vblank(void *ctx, int pipe) 791static int vp_resources_init(struct mixer_context *mixer_ctx)
707{ 792{
708 struct mixer_context *mixer_ctx = ctx; 793 struct device *dev = &mixer_ctx->pdev->dev;
709 struct mixer_resources *res = &mixer_ctx->mixer_res; 794 struct mixer_resources *mixer_res = &mixer_ctx->mixer_res;
795 struct resource *res;
796
797 mixer_res->vp = devm_clk_get(dev, "vp");
798 if (IS_ERR(mixer_res->vp)) {
799 dev_err(dev, "failed to get clock 'vp'\n");
800 return -ENODEV;
801 }
802 mixer_res->sclk_mixer = devm_clk_get(dev, "sclk_mixer");
803 if (IS_ERR(mixer_res->sclk_mixer)) {
804 dev_err(dev, "failed to get clock 'sclk_mixer'\n");
805 return -ENODEV;
806 }
807 mixer_res->sclk_dac = devm_clk_get(dev, "sclk_dac");
808 if (IS_ERR(mixer_res->sclk_dac)) {
809 dev_err(dev, "failed to get clock 'sclk_dac'\n");
810 return -ENODEV;
811 }
812
813 if (mixer_res->sclk_hdmi)
814 clk_set_parent(mixer_res->sclk_mixer, mixer_res->sclk_hdmi);
815
816 res = platform_get_resource(mixer_ctx->pdev, IORESOURCE_MEM, 1);
817 if (res == NULL) {
818 dev_err(dev, "get memory resource failed.\n");
819 return -ENXIO;
820 }
710 821
822 mixer_res->vp_regs = devm_ioremap(dev, res->start,
823 resource_size(res));
824 if (mixer_res->vp_regs == NULL) {
825 dev_err(dev, "register mapping failed.\n");
826 return -ENXIO;
827 }
828
829 return 0;
830}
831
832static int mixer_initialize(struct exynos_drm_manager *mgr,
833 struct drm_device *drm_dev, int pipe)
834{
835 int ret;
836 struct mixer_context *mixer_ctx = mgr->ctx;
837
838 mixer_ctx->drm_dev = drm_dev;
711 mixer_ctx->pipe = pipe; 839 mixer_ctx->pipe = pipe;
712 840
841 /* acquire resources: regs, irqs, clocks */
842 ret = mixer_resources_init(mixer_ctx);
843 if (ret) {
844 DRM_ERROR("mixer_resources_init failed ret=%d\n", ret);
845 return ret;
846 }
847
848 if (mixer_ctx->vp_enabled) {
849 /* acquire vp resources: regs, irqs, clocks */
850 ret = vp_resources_init(mixer_ctx);
851 if (ret) {
852 DRM_ERROR("vp_resources_init failed ret=%d\n", ret);
853 return ret;
854 }
855 }
856
857 if (!is_drm_iommu_supported(mixer_ctx->drm_dev))
858 return 0;
859
860 return drm_iommu_attach_device(mixer_ctx->drm_dev, mixer_ctx->dev);
861}
862
863static void mixer_mgr_remove(struct exynos_drm_manager *mgr)
864{
865 struct mixer_context *mixer_ctx = mgr->ctx;
866
867 if (is_drm_iommu_supported(mixer_ctx->drm_dev))
868 drm_iommu_detach_device(mixer_ctx->drm_dev, mixer_ctx->dev);
869}
870
871static int mixer_enable_vblank(struct exynos_drm_manager *mgr)
872{
873 struct mixer_context *mixer_ctx = mgr->ctx;
874 struct mixer_resources *res = &mixer_ctx->mixer_res;
875
876 if (!mixer_ctx->powered) {
877 mixer_ctx->int_en |= MXR_INT_EN_VSYNC;
878 return 0;
879 }
880
713 /* enable vsync interrupt */ 881 /* enable vsync interrupt */
714 mixer_reg_writemask(res, MXR_INT_EN, MXR_INT_EN_VSYNC, 882 mixer_reg_writemask(res, MXR_INT_EN, MXR_INT_EN_VSYNC,
715 MXR_INT_EN_VSYNC); 883 MXR_INT_EN_VSYNC);
@@ -717,19 +885,19 @@ static int mixer_enable_vblank(void *ctx, int pipe)
717 return 0; 885 return 0;
718} 886}
719 887
720static void mixer_disable_vblank(void *ctx) 888static void mixer_disable_vblank(struct exynos_drm_manager *mgr)
721{ 889{
722 struct mixer_context *mixer_ctx = ctx; 890 struct mixer_context *mixer_ctx = mgr->ctx;
723 struct mixer_resources *res = &mixer_ctx->mixer_res; 891 struct mixer_resources *res = &mixer_ctx->mixer_res;
724 892
725 /* disable vsync interrupt */ 893 /* disable vsync interrupt */
726 mixer_reg_writemask(res, MXR_INT_EN, 0, MXR_INT_EN_VSYNC); 894 mixer_reg_writemask(res, MXR_INT_EN, 0, MXR_INT_EN_VSYNC);
727} 895}
728 896
729static void mixer_win_mode_set(void *ctx, 897static void mixer_win_mode_set(struct exynos_drm_manager *mgr,
730 struct exynos_drm_overlay *overlay) 898 struct exynos_drm_overlay *overlay)
731{ 899{
732 struct mixer_context *mixer_ctx = ctx; 900 struct mixer_context *mixer_ctx = mgr->ctx;
733 struct hdmi_win_data *win_data; 901 struct hdmi_win_data *win_data;
734 int win; 902 int win;
735 903
@@ -778,9 +946,10 @@ static void mixer_win_mode_set(void *ctx,
778 win_data->scan_flags = overlay->scan_flag; 946 win_data->scan_flags = overlay->scan_flag;
779} 947}
780 948
781static void mixer_win_commit(void *ctx, int win) 949static void mixer_win_commit(struct exynos_drm_manager *mgr, int zpos)
782{ 950{
783 struct mixer_context *mixer_ctx = ctx; 951 struct mixer_context *mixer_ctx = mgr->ctx;
952 int win = zpos == DEFAULT_ZPOS ? MIXER_DEFAULT_WIN : zpos;
784 953
785 DRM_DEBUG_KMS("win: %d\n", win); 954 DRM_DEBUG_KMS("win: %d\n", win);
786 955
@@ -799,10 +968,11 @@ static void mixer_win_commit(void *ctx, int win)
799 mixer_ctx->win_data[win].enabled = true; 968 mixer_ctx->win_data[win].enabled = true;
800} 969}
801 970
802static void mixer_win_disable(void *ctx, int win) 971static void mixer_win_disable(struct exynos_drm_manager *mgr, int zpos)
803{ 972{
804 struct mixer_context *mixer_ctx = ctx; 973 struct mixer_context *mixer_ctx = mgr->ctx;
805 struct mixer_resources *res = &mixer_ctx->mixer_res; 974 struct mixer_resources *res = &mixer_ctx->mixer_res;
975 int win = zpos == DEFAULT_ZPOS ? MIXER_DEFAULT_WIN : zpos;
806 unsigned long flags; 976 unsigned long flags;
807 977
808 DRM_DEBUG_KMS("win: %d\n", win); 978 DRM_DEBUG_KMS("win: %d\n", win);
@@ -826,32 +996,9 @@ static void mixer_win_disable(void *ctx, int win)
826 mixer_ctx->win_data[win].enabled = false; 996 mixer_ctx->win_data[win].enabled = false;
827} 997}
828 998
829static int mixer_check_mode(void *ctx, struct drm_display_mode *mode) 999static void mixer_wait_for_vblank(struct exynos_drm_manager *mgr)
830{ 1000{
831 struct mixer_context *mixer_ctx = ctx; 1001 struct mixer_context *mixer_ctx = mgr->ctx;
832 u32 w, h;
833
834 w = mode->hdisplay;
835 h = mode->vdisplay;
836
837 DRM_DEBUG_KMS("xres=%d, yres=%d, refresh=%d, intl=%d\n",
838 mode->hdisplay, mode->vdisplay, mode->vrefresh,
839 (mode->flags & DRM_MODE_FLAG_INTERLACE) ? 1 : 0);
840
841 if (mixer_ctx->mxr_ver == MXR_VER_0_0_0_16 ||
842 mixer_ctx->mxr_ver == MXR_VER_128_0_0_184)
843 return 0;
844
845 if ((w >= 464 && w <= 720 && h >= 261 && h <= 576) ||
846 (w >= 1024 && w <= 1280 && h >= 576 && h <= 720) ||
847 (w >= 1664 && w <= 1920 && h >= 936 && h <= 1080))
848 return 0;
849
850 return -EINVAL;
851}
852static void mixer_wait_for_vblank(void *ctx)
853{
854 struct mixer_context *mixer_ctx = ctx;
855 1002
856 mutex_lock(&mixer_ctx->mixer_mutex); 1003 mutex_lock(&mixer_ctx->mixer_mutex);
857 if (!mixer_ctx->powered) { 1004 if (!mixer_ctx->powered) {
@@ -872,21 +1019,23 @@ static void mixer_wait_for_vblank(void *ctx)
872 DRM_DEBUG_KMS("vblank wait timed out.\n"); 1019 DRM_DEBUG_KMS("vblank wait timed out.\n");
873} 1020}
874 1021
875static void mixer_window_suspend(struct mixer_context *ctx) 1022static void mixer_window_suspend(struct exynos_drm_manager *mgr)
876{ 1023{
1024 struct mixer_context *ctx = mgr->ctx;
877 struct hdmi_win_data *win_data; 1025 struct hdmi_win_data *win_data;
878 int i; 1026 int i;
879 1027
880 for (i = 0; i < MIXER_WIN_NR; i++) { 1028 for (i = 0; i < MIXER_WIN_NR; i++) {
881 win_data = &ctx->win_data[i]; 1029 win_data = &ctx->win_data[i];
882 win_data->resume = win_data->enabled; 1030 win_data->resume = win_data->enabled;
883 mixer_win_disable(ctx, i); 1031 mixer_win_disable(mgr, i);
884 } 1032 }
885 mixer_wait_for_vblank(ctx); 1033 mixer_wait_for_vblank(mgr);
886} 1034}
887 1035
888static void mixer_window_resume(struct mixer_context *ctx) 1036static void mixer_window_resume(struct exynos_drm_manager *mgr)
889{ 1037{
1038 struct mixer_context *ctx = mgr->ctx;
890 struct hdmi_win_data *win_data; 1039 struct hdmi_win_data *win_data;
891 int i; 1040 int i;
892 1041
@@ -894,11 +1043,14 @@ static void mixer_window_resume(struct mixer_context *ctx)
894 win_data = &ctx->win_data[i]; 1043 win_data = &ctx->win_data[i];
895 win_data->enabled = win_data->resume; 1044 win_data->enabled = win_data->resume;
896 win_data->resume = false; 1045 win_data->resume = false;
1046 if (win_data->enabled)
1047 mixer_win_commit(mgr, i);
897 } 1048 }
898} 1049}
899 1050
900static void mixer_poweron(struct mixer_context *ctx) 1051static void mixer_poweron(struct exynos_drm_manager *mgr)
901{ 1052{
1053 struct mixer_context *ctx = mgr->ctx;
902 struct mixer_resources *res = &ctx->mixer_res; 1054 struct mixer_resources *res = &ctx->mixer_res;
903 1055
904 mutex_lock(&ctx->mixer_mutex); 1056 mutex_lock(&ctx->mixer_mutex);
@@ -909,6 +1061,8 @@ static void mixer_poweron(struct mixer_context *ctx)
909 ctx->powered = true; 1061 ctx->powered = true;
910 mutex_unlock(&ctx->mixer_mutex); 1062 mutex_unlock(&ctx->mixer_mutex);
911 1063
1064 pm_runtime_get_sync(ctx->dev);
1065
912 clk_prepare_enable(res->mixer); 1066 clk_prepare_enable(res->mixer);
913 if (ctx->vp_enabled) { 1067 if (ctx->vp_enabled) {
914 clk_prepare_enable(res->vp); 1068 clk_prepare_enable(res->vp);
@@ -918,11 +1072,12 @@ static void mixer_poweron(struct mixer_context *ctx)
918 mixer_reg_write(res, MXR_INT_EN, ctx->int_en); 1072 mixer_reg_write(res, MXR_INT_EN, ctx->int_en);
919 mixer_win_reset(ctx); 1073 mixer_win_reset(ctx);
920 1074
921 mixer_window_resume(ctx); 1075 mixer_window_resume(mgr);
922} 1076}
923 1077
924static void mixer_poweroff(struct mixer_context *ctx) 1078static void mixer_poweroff(struct exynos_drm_manager *mgr)
925{ 1079{
1080 struct mixer_context *ctx = mgr->ctx;
926 struct mixer_resources *res = &ctx->mixer_res; 1081 struct mixer_resources *res = &ctx->mixer_res;
927 1082
928 mutex_lock(&ctx->mixer_mutex); 1083 mutex_lock(&ctx->mixer_mutex);
@@ -930,7 +1085,7 @@ static void mixer_poweroff(struct mixer_context *ctx)
930 goto out; 1085 goto out;
931 mutex_unlock(&ctx->mixer_mutex); 1086 mutex_unlock(&ctx->mixer_mutex);
932 1087
933 mixer_window_suspend(ctx); 1088 mixer_window_suspend(mgr);
934 1089
935 ctx->int_en = mixer_reg_read(res, MXR_INT_EN); 1090 ctx->int_en = mixer_reg_read(res, MXR_INT_EN);
936 1091
@@ -940,6 +1095,8 @@ static void mixer_poweroff(struct mixer_context *ctx)
940 clk_disable_unprepare(res->sclk_mixer); 1095 clk_disable_unprepare(res->sclk_mixer);
941 } 1096 }
942 1097
1098 pm_runtime_put_sync(ctx->dev);
1099
943 mutex_lock(&ctx->mixer_mutex); 1100 mutex_lock(&ctx->mixer_mutex);
944 ctx->powered = false; 1101 ctx->powered = false;
945 1102
@@ -947,20 +1104,16 @@ out:
947 mutex_unlock(&ctx->mixer_mutex); 1104 mutex_unlock(&ctx->mixer_mutex);
948} 1105}
949 1106
950static void mixer_dpms(void *ctx, int mode) 1107static void mixer_dpms(struct exynos_drm_manager *mgr, int mode)
951{ 1108{
952 struct mixer_context *mixer_ctx = ctx;
953
954 switch (mode) { 1109 switch (mode) {
955 case DRM_MODE_DPMS_ON: 1110 case DRM_MODE_DPMS_ON:
956 if (pm_runtime_suspended(mixer_ctx->dev)) 1111 mixer_poweron(mgr);
957 pm_runtime_get_sync(mixer_ctx->dev);
958 break; 1112 break;
959 case DRM_MODE_DPMS_STANDBY: 1113 case DRM_MODE_DPMS_STANDBY:
960 case DRM_MODE_DPMS_SUSPEND: 1114 case DRM_MODE_DPMS_SUSPEND:
961 case DRM_MODE_DPMS_OFF: 1115 case DRM_MODE_DPMS_OFF:
962 if (!pm_runtime_suspended(mixer_ctx->dev)) 1116 mixer_poweroff(mgr);
963 pm_runtime_put_sync(mixer_ctx->dev);
964 break; 1117 break;
965 default: 1118 default:
966 DRM_DEBUG_KMS("unknown dpms mode: %d\n", mode); 1119 DRM_DEBUG_KMS("unknown dpms mode: %d\n", mode);
@@ -968,169 +1121,42 @@ static void mixer_dpms(void *ctx, int mode)
968 } 1121 }
969} 1122}
970 1123
971static struct exynos_mixer_ops mixer_ops = { 1124/* Only valid for Mixer version 16.0.33.0 */
972 /* manager */ 1125int mixer_check_mode(struct drm_display_mode *mode)
973 .iommu_on = mixer_iommu_on,
974 .enable_vblank = mixer_enable_vblank,
975 .disable_vblank = mixer_disable_vblank,
976 .wait_for_vblank = mixer_wait_for_vblank,
977 .dpms = mixer_dpms,
978
979 /* overlay */
980 .win_mode_set = mixer_win_mode_set,
981 .win_commit = mixer_win_commit,
982 .win_disable = mixer_win_disable,
983
984 /* display */
985 .check_mode = mixer_check_mode,
986};
987
988static irqreturn_t mixer_irq_handler(int irq, void *arg)
989{
990 struct exynos_drm_hdmi_context *drm_hdmi_ctx = arg;
991 struct mixer_context *ctx = drm_hdmi_ctx->ctx;
992 struct mixer_resources *res = &ctx->mixer_res;
993 u32 val, base, shadow;
994
995 spin_lock(&res->reg_slock);
996
997 /* read interrupt status for handling and clearing flags for VSYNC */
998 val = mixer_reg_read(res, MXR_INT_STATUS);
999
1000 /* handling VSYNC */
1001 if (val & MXR_INT_STATUS_VSYNC) {
1002 /* interlace scan need to check shadow register */
1003 if (ctx->interlace) {
1004 base = mixer_reg_read(res, MXR_GRAPHIC_BASE(0));
1005 shadow = mixer_reg_read(res, MXR_GRAPHIC_BASE_S(0));
1006 if (base != shadow)
1007 goto out;
1008
1009 base = mixer_reg_read(res, MXR_GRAPHIC_BASE(1));
1010 shadow = mixer_reg_read(res, MXR_GRAPHIC_BASE_S(1));
1011 if (base != shadow)
1012 goto out;
1013 }
1014
1015 drm_handle_vblank(drm_hdmi_ctx->drm_dev, ctx->pipe);
1016 exynos_drm_crtc_finish_pageflip(drm_hdmi_ctx->drm_dev,
1017 ctx->pipe);
1018
1019 /* set wait vsync event to zero and wake up queue. */
1020 if (atomic_read(&ctx->wait_vsync_event)) {
1021 atomic_set(&ctx->wait_vsync_event, 0);
1022 wake_up(&ctx->wait_vsync_queue);
1023 }
1024 }
1025
1026out:
1027 /* clear interrupts */
1028 if (~val & MXR_INT_EN_VSYNC) {
1029 /* vsync interrupt use different bit for read and clear */
1030 val &= ~MXR_INT_EN_VSYNC;
1031 val |= MXR_INT_CLEAR_VSYNC;
1032 }
1033 mixer_reg_write(res, MXR_INT_STATUS, val);
1034
1035 spin_unlock(&res->reg_slock);
1036
1037 return IRQ_HANDLED;
1038}
1039
1040static int mixer_resources_init(struct exynos_drm_hdmi_context *ctx,
1041 struct platform_device *pdev)
1042{ 1126{
1043 struct mixer_context *mixer_ctx = ctx->ctx; 1127 u32 w, h;
1044 struct device *dev = &pdev->dev;
1045 struct mixer_resources *mixer_res = &mixer_ctx->mixer_res;
1046 struct resource *res;
1047 int ret;
1048
1049 spin_lock_init(&mixer_res->reg_slock);
1050
1051 mixer_res->mixer = devm_clk_get(dev, "mixer");
1052 if (IS_ERR(mixer_res->mixer)) {
1053 dev_err(dev, "failed to get clock 'mixer'\n");
1054 return -ENODEV;
1055 }
1056
1057 mixer_res->sclk_hdmi = devm_clk_get(dev, "sclk_hdmi");
1058 if (IS_ERR(mixer_res->sclk_hdmi)) {
1059 dev_err(dev, "failed to get clock 'sclk_hdmi'\n");
1060 return -ENODEV;
1061 }
1062 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1063 if (res == NULL) {
1064 dev_err(dev, "get memory resource failed.\n");
1065 return -ENXIO;
1066 }
1067 1128
1068 mixer_res->mixer_regs = devm_ioremap(dev, res->start, 1129 w = mode->hdisplay;
1069 resource_size(res)); 1130 h = mode->vdisplay;
1070 if (mixer_res->mixer_regs == NULL) {
1071 dev_err(dev, "register mapping failed.\n");
1072 return -ENXIO;
1073 }
1074 1131
1075 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 1132 DRM_DEBUG_KMS("xres=%d, yres=%d, refresh=%d, intl=%d\n",
1076 if (res == NULL) { 1133 mode->hdisplay, mode->vdisplay, mode->vrefresh,
1077 dev_err(dev, "get interrupt resource failed.\n"); 1134 (mode->flags & DRM_MODE_FLAG_INTERLACE) ? 1 : 0);
1078 return -ENXIO;
1079 }
1080 1135
1081 ret = devm_request_irq(dev, res->start, mixer_irq_handler, 1136 if ((w >= 464 && w <= 720 && h >= 261 && h <= 576) ||
1082 0, "drm_mixer", ctx); 1137 (w >= 1024 && w <= 1280 && h >= 576 && h <= 720) ||
1083 if (ret) { 1138 (w >= 1664 && w <= 1920 && h >= 936 && h <= 1080))
1084 dev_err(dev, "request interrupt failed.\n"); 1139 return 0;
1085 return ret;
1086 }
1087 mixer_res->irq = res->start;
1088 1140
1089 return 0; 1141 return -EINVAL;
1090} 1142}
1091 1143
1092static int vp_resources_init(struct exynos_drm_hdmi_context *ctx, 1144static struct exynos_drm_manager_ops mixer_manager_ops = {
1093 struct platform_device *pdev) 1145 .initialize = mixer_initialize,
1094{ 1146 .remove = mixer_mgr_remove,
1095 struct mixer_context *mixer_ctx = ctx->ctx; 1147 .dpms = mixer_dpms,
1096 struct device *dev = &pdev->dev; 1148 .enable_vblank = mixer_enable_vblank,
1097 struct mixer_resources *mixer_res = &mixer_ctx->mixer_res; 1149 .disable_vblank = mixer_disable_vblank,
1098 struct resource *res; 1150 .wait_for_vblank = mixer_wait_for_vblank,
1099 1151 .win_mode_set = mixer_win_mode_set,
1100 mixer_res->vp = devm_clk_get(dev, "vp"); 1152 .win_commit = mixer_win_commit,
1101 if (IS_ERR(mixer_res->vp)) { 1153 .win_disable = mixer_win_disable,
1102 dev_err(dev, "failed to get clock 'vp'\n"); 1154};
1103 return -ENODEV;
1104 }
1105 mixer_res->sclk_mixer = devm_clk_get(dev, "sclk_mixer");
1106 if (IS_ERR(mixer_res->sclk_mixer)) {
1107 dev_err(dev, "failed to get clock 'sclk_mixer'\n");
1108 return -ENODEV;
1109 }
1110 mixer_res->sclk_dac = devm_clk_get(dev, "sclk_dac");
1111 if (IS_ERR(mixer_res->sclk_dac)) {
1112 dev_err(dev, "failed to get clock 'sclk_dac'\n");
1113 return -ENODEV;
1114 }
1115
1116 if (mixer_res->sclk_hdmi)
1117 clk_set_parent(mixer_res->sclk_mixer, mixer_res->sclk_hdmi);
1118
1119 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1120 if (res == NULL) {
1121 dev_err(dev, "get memory resource failed.\n");
1122 return -ENXIO;
1123 }
1124
1125 mixer_res->vp_regs = devm_ioremap(dev, res->start,
1126 resource_size(res));
1127 if (mixer_res->vp_regs == NULL) {
1128 dev_err(dev, "register mapping failed.\n");
1129 return -ENXIO;
1130 }
1131 1155
1132 return 0; 1156static struct exynos_drm_manager mixer_manager = {
1133} 1157 .type = EXYNOS_DISPLAY_TYPE_HDMI,
1158 .ops = &mixer_manager_ops,
1159};
1134 1160
1135static struct mixer_drv_data exynos5420_mxr_drv_data = { 1161static struct mixer_drv_data exynos5420_mxr_drv_data = {
1136 .version = MXR_VER_128_0_0_184, 1162 .version = MXR_VER_128_0_0_184,
@@ -1177,21 +1203,16 @@ static struct of_device_id mixer_match_types[] = {
1177static int mixer_probe(struct platform_device *pdev) 1203static int mixer_probe(struct platform_device *pdev)
1178{ 1204{
1179 struct device *dev = &pdev->dev; 1205 struct device *dev = &pdev->dev;
1180 struct exynos_drm_hdmi_context *drm_hdmi_ctx;
1181 struct mixer_context *ctx; 1206 struct mixer_context *ctx;
1182 struct mixer_drv_data *drv; 1207 struct mixer_drv_data *drv;
1183 int ret;
1184 1208
1185 dev_info(dev, "probe start\n"); 1209 dev_info(dev, "probe start\n");
1186 1210
1187 drm_hdmi_ctx = devm_kzalloc(dev, sizeof(*drm_hdmi_ctx), 1211 ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
1188 GFP_KERNEL); 1212 if (!ctx) {
1189 if (!drm_hdmi_ctx) 1213 DRM_ERROR("failed to alloc mixer context.\n");
1190 return -ENOMEM;
1191
1192 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
1193 if (!ctx)
1194 return -ENOMEM; 1214 return -ENOMEM;
1215 }
1195 1216
1196 mutex_init(&ctx->mixer_mutex); 1217 mutex_init(&ctx->mixer_mutex);
1197 1218
@@ -1204,46 +1225,20 @@ static int mixer_probe(struct platform_device *pdev)
1204 platform_get_device_id(pdev)->driver_data; 1225 platform_get_device_id(pdev)->driver_data;
1205 } 1226 }
1206 1227
1228 ctx->pdev = pdev;
1207 ctx->dev = dev; 1229 ctx->dev = dev;
1208 ctx->parent_ctx = (void *)drm_hdmi_ctx;
1209 drm_hdmi_ctx->ctx = (void *)ctx;
1210 ctx->vp_enabled = drv->is_vp_enabled; 1230 ctx->vp_enabled = drv->is_vp_enabled;
1211 ctx->mxr_ver = drv->version; 1231 ctx->mxr_ver = drv->version;
1212 init_waitqueue_head(&ctx->wait_vsync_queue); 1232 init_waitqueue_head(&ctx->wait_vsync_queue);
1213 atomic_set(&ctx->wait_vsync_event, 0); 1233 atomic_set(&ctx->wait_vsync_event, 0);
1214 1234
1215 platform_set_drvdata(pdev, drm_hdmi_ctx); 1235 mixer_manager.ctx = ctx;
1216 1236 platform_set_drvdata(pdev, &mixer_manager);
1217 /* acquire resources: regs, irqs, clocks */ 1237 exynos_drm_manager_register(&mixer_manager);
1218 ret = mixer_resources_init(drm_hdmi_ctx, pdev);
1219 if (ret) {
1220 DRM_ERROR("mixer_resources_init failed\n");
1221 goto fail;
1222 }
1223
1224 if (ctx->vp_enabled) {
1225 /* acquire vp resources: regs, irqs, clocks */
1226 ret = vp_resources_init(drm_hdmi_ctx, pdev);
1227 if (ret) {
1228 DRM_ERROR("vp_resources_init failed\n");
1229 goto fail;
1230 }
1231 }
1232
1233 /* attach mixer driver to common hdmi. */
1234 exynos_mixer_drv_attach(drm_hdmi_ctx);
1235
1236 /* register specific callback point to common hdmi. */
1237 exynos_mixer_ops_register(&mixer_ops);
1238 1238
1239 pm_runtime_enable(dev); 1239 pm_runtime_enable(dev);
1240 1240
1241 return 0; 1241 return 0;
1242
1243
1244fail:
1245 dev_info(dev, "probe failed\n");
1246 return ret;
1247} 1242}
1248 1243
1249static int mixer_remove(struct platform_device *pdev) 1244static int mixer_remove(struct platform_device *pdev)
@@ -1255,70 +1250,10 @@ static int mixer_remove(struct platform_device *pdev)
1255 return 0; 1250 return 0;
1256} 1251}
1257 1252
1258#ifdef CONFIG_PM_SLEEP
1259static int mixer_suspend(struct device *dev)
1260{
1261 struct exynos_drm_hdmi_context *drm_hdmi_ctx = get_mixer_context(dev);
1262 struct mixer_context *ctx = drm_hdmi_ctx->ctx;
1263
1264 if (pm_runtime_suspended(dev)) {
1265 DRM_DEBUG_KMS("Already suspended\n");
1266 return 0;
1267 }
1268
1269 mixer_poweroff(ctx);
1270
1271 return 0;
1272}
1273
1274static int mixer_resume(struct device *dev)
1275{
1276 struct exynos_drm_hdmi_context *drm_hdmi_ctx = get_mixer_context(dev);
1277 struct mixer_context *ctx = drm_hdmi_ctx->ctx;
1278
1279 if (!pm_runtime_suspended(dev)) {
1280 DRM_DEBUG_KMS("Already resumed\n");
1281 return 0;
1282 }
1283
1284 mixer_poweron(ctx);
1285
1286 return 0;
1287}
1288#endif
1289
1290#ifdef CONFIG_PM_RUNTIME
1291static int mixer_runtime_suspend(struct device *dev)
1292{
1293 struct exynos_drm_hdmi_context *drm_hdmi_ctx = get_mixer_context(dev);
1294 struct mixer_context *ctx = drm_hdmi_ctx->ctx;
1295
1296 mixer_poweroff(ctx);
1297
1298 return 0;
1299}
1300
1301static int mixer_runtime_resume(struct device *dev)
1302{
1303 struct exynos_drm_hdmi_context *drm_hdmi_ctx = get_mixer_context(dev);
1304 struct mixer_context *ctx = drm_hdmi_ctx->ctx;
1305
1306 mixer_poweron(ctx);
1307
1308 return 0;
1309}
1310#endif
1311
1312static const struct dev_pm_ops mixer_pm_ops = {
1313 SET_SYSTEM_SLEEP_PM_OPS(mixer_suspend, mixer_resume)
1314 SET_RUNTIME_PM_OPS(mixer_runtime_suspend, mixer_runtime_resume, NULL)
1315};
1316
1317struct platform_driver mixer_driver = { 1253struct platform_driver mixer_driver = {
1318 .driver = { 1254 .driver = {
1319 .name = "exynos-mixer", 1255 .name = "exynos-mixer",
1320 .owner = THIS_MODULE, 1256 .owner = THIS_MODULE,
1321 .pm = &mixer_pm_ops,
1322 .of_match_table = mixer_match_types, 1257 .of_match_table = mixer_match_types,
1323 }, 1258 },
1324 .probe = mixer_probe, 1259 .probe = mixer_probe,
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.h b/drivers/gpu/drm/exynos/exynos_mixer.h
new file mode 100644
index 000000000000..3811e417f0e9
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_mixer.h
@@ -0,0 +1,20 @@
1/*
2 * Copyright (C) 2013 Google, Inc.
3 *
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#ifndef _EXYNOS_MIXER_H_
15#define _EXYNOS_MIXER_H_
16
17/* This function returns 0 if the given timing is valid for the mixer */
18int mixer_check_mode(struct drm_display_mode *mode);
19
20#endif
diff --git a/drivers/gpu/drm/gma500/Makefile b/drivers/gpu/drm/gma500/Makefile
index e9064dd9045d..b15315576376 100644
--- a/drivers/gpu/drm/gma500/Makefile
+++ b/drivers/gpu/drm/gma500/Makefile
@@ -13,9 +13,11 @@ gma500_gfx-y += \
13 intel_i2c.o \ 13 intel_i2c.o \
14 intel_gmbus.o \ 14 intel_gmbus.o \
15 mmu.o \ 15 mmu.o \
16 blitter.o \
16 power.o \ 17 power.o \
17 psb_drv.o \ 18 psb_drv.o \
18 gma_display.o \ 19 gma_display.o \
20 gma_device.o \
19 psb_intel_display.o \ 21 psb_intel_display.o \
20 psb_intel_lvds.o \ 22 psb_intel_lvds.o \
21 psb_intel_modes.o \ 23 psb_intel_modes.o \
diff --git a/drivers/gpu/drm/gma500/blitter.c b/drivers/gpu/drm/gma500/blitter.c
new file mode 100644
index 000000000000..9cd54a6fb899
--- /dev/null
+++ b/drivers/gpu/drm/gma500/blitter.c
@@ -0,0 +1,51 @@
1/*
2 * Copyright (c) 2014, Patrik Jakobsson
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * Authors: Patrik Jakobsson <patrik.r.jakobsson@gmail.com>
15 */
16
17#include "psb_drv.h"
18
19#include "blitter.h"
20#include "psb_reg.h"
21
22/* Wait for the blitter to be completely idle */
23int gma_blt_wait_idle(struct drm_psb_private *dev_priv)
24{
25 unsigned long stop = jiffies + HZ;
26 int busy = 1;
27
28 /* NOP for Cedarview */
29 if (IS_CDV(dev_priv->dev))
30 return 0;
31
32 /* First do a quick check */
33 if ((PSB_RSGX32(PSB_CR_2D_SOCIF) == _PSB_C2_SOCIF_EMPTY) &&
34 ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) & _PSB_C2B_STATUS_BUSY) == 0))
35 return 0;
36
37 do {
38 busy = (PSB_RSGX32(PSB_CR_2D_SOCIF) != _PSB_C2_SOCIF_EMPTY);
39 } while (busy && !time_after_eq(jiffies, stop));
40
41 if (busy)
42 return -EBUSY;
43
44 do {
45 busy = ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) &
46 _PSB_C2B_STATUS_BUSY) != 0);
47 } while (busy && !time_after_eq(jiffies, stop));
48
49 /* If still busy, we probably have a hang */
50 return (busy) ? -EBUSY : 0;
51}
diff --git a/drivers/gpu/drm/gma500/blitter.h b/drivers/gpu/drm/gma500/blitter.h
new file mode 100644
index 000000000000..b83648df590d
--- /dev/null
+++ b/drivers/gpu/drm/gma500/blitter.h
@@ -0,0 +1,22 @@
1/*
2 * Copyright (c) 2014, Patrik Jakobsson
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * Authors: Patrik Jakobsson <patrik.r.jakobsson@gmail.com>
15 */
16
17#ifndef __BLITTER_H
18#define __BLITTER_H
19
20extern int gma_blt_wait_idle(struct drm_psb_private *dev_priv);
21
22#endif
diff --git a/drivers/gpu/drm/gma500/cdv_device.c b/drivers/gpu/drm/gma500/cdv_device.c
index 5a9a6a3063a8..3531f90e53d0 100644
--- a/drivers/gpu/drm/gma500/cdv_device.c
+++ b/drivers/gpu/drm/gma500/cdv_device.c
@@ -26,6 +26,7 @@
26#include "psb_intel_reg.h" 26#include "psb_intel_reg.h"
27#include "intel_bios.h" 27#include "intel_bios.h"
28#include "cdv_device.h" 28#include "cdv_device.h"
29#include "gma_device.h"
29 30
30#define VGA_SR_INDEX 0x3c4 31#define VGA_SR_INDEX 0x3c4
31#define VGA_SR_DATA 0x3c5 32#define VGA_SR_DATA 0x3c5
@@ -426,43 +427,6 @@ static int cdv_power_up(struct drm_device *dev)
426 return 0; 427 return 0;
427} 428}
428 429
429/* FIXME ? - shared with Poulsbo */
430static void cdv_get_core_freq(struct drm_device *dev)
431{
432 uint32_t clock;
433 struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
434 struct drm_psb_private *dev_priv = dev->dev_private;
435
436 pci_write_config_dword(pci_root, 0xD0, 0xD0050300);
437 pci_read_config_dword(pci_root, 0xD4, &clock);
438 pci_dev_put(pci_root);
439
440 switch (clock & 0x07) {
441 case 0:
442 dev_priv->core_freq = 100;
443 break;
444 case 1:
445 dev_priv->core_freq = 133;
446 break;
447 case 2:
448 dev_priv->core_freq = 150;
449 break;
450 case 3:
451 dev_priv->core_freq = 178;
452 break;
453 case 4:
454 dev_priv->core_freq = 200;
455 break;
456 case 5:
457 case 6:
458 case 7:
459 dev_priv->core_freq = 266;
460 break;
461 default:
462 dev_priv->core_freq = 0;
463 }
464}
465
466static void cdv_hotplug_work_func(struct work_struct *work) 430static void cdv_hotplug_work_func(struct work_struct *work)
467{ 431{
468 struct drm_psb_private *dev_priv = container_of(work, struct drm_psb_private, 432 struct drm_psb_private *dev_priv = container_of(work, struct drm_psb_private,
@@ -618,7 +582,7 @@ static int cdv_chip_setup(struct drm_device *dev)
618 if (pci_enable_msi(dev->pdev)) 582 if (pci_enable_msi(dev->pdev))
619 dev_warn(dev->dev, "Enabling MSI failed!\n"); 583 dev_warn(dev->dev, "Enabling MSI failed!\n");
620 dev_priv->regmap = cdv_regmap; 584 dev_priv->regmap = cdv_regmap;
621 cdv_get_core_freq(dev); 585 gma_get_core_freq(dev);
622 psb_intel_opregion_init(dev); 586 psb_intel_opregion_init(dev);
623 psb_intel_init_bios(dev); 587 psb_intel_init_bios(dev);
624 cdv_hotplug_enable(dev, false); 588 cdv_hotplug_enable(dev, false);
diff --git a/drivers/gpu/drm/gma500/cdv_intel_crt.c b/drivers/gpu/drm/gma500/cdv_intel_crt.c
index 661af492173d..c18268cd516e 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_crt.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_crt.c
@@ -81,13 +81,6 @@ static int cdv_intel_crt_mode_valid(struct drm_connector *connector,
81 return MODE_OK; 81 return MODE_OK;
82} 82}
83 83
84static bool cdv_intel_crt_mode_fixup(struct drm_encoder *encoder,
85 const struct drm_display_mode *mode,
86 struct drm_display_mode *adjusted_mode)
87{
88 return true;
89}
90
91static void cdv_intel_crt_mode_set(struct drm_encoder *encoder, 84static void cdv_intel_crt_mode_set(struct drm_encoder *encoder,
92 struct drm_display_mode *mode, 85 struct drm_display_mode *mode,
93 struct drm_display_mode *adjusted_mode) 86 struct drm_display_mode *adjusted_mode)
@@ -224,7 +217,7 @@ static int cdv_intel_crt_set_property(struct drm_connector *connector,
224 217
225static const struct drm_encoder_helper_funcs cdv_intel_crt_helper_funcs = { 218static const struct drm_encoder_helper_funcs cdv_intel_crt_helper_funcs = {
226 .dpms = cdv_intel_crt_dpms, 219 .dpms = cdv_intel_crt_dpms,
227 .mode_fixup = cdv_intel_crt_mode_fixup, 220 .mode_fixup = gma_encoder_mode_fixup,
228 .prepare = gma_encoder_prepare, 221 .prepare = gma_encoder_prepare,
229 .commit = gma_encoder_commit, 222 .commit = gma_encoder_commit,
230 .mode_set = cdv_intel_crt_mode_set, 223 .mode_set = cdv_intel_crt_mode_set,
diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c
index 8fbfa06da62d..66727328832d 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_display.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_display.c
@@ -412,8 +412,11 @@ static bool cdv_intel_find_dp_pll(const struct gma_limit_t *limit,
412 int refclk, 412 int refclk,
413 struct gma_clock_t *best_clock) 413 struct gma_clock_t *best_clock)
414{ 414{
415 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
415 struct gma_clock_t clock; 416 struct gma_clock_t clock;
416 if (refclk == 27000) { 417
418 switch (refclk) {
419 case 27000:
417 if (target < 200000) { 420 if (target < 200000) {
418 clock.p1 = 2; 421 clock.p1 = 2;
419 clock.p2 = 10; 422 clock.p2 = 10;
@@ -427,7 +430,9 @@ static bool cdv_intel_find_dp_pll(const struct gma_limit_t *limit,
427 clock.m1 = 0; 430 clock.m1 = 0;
428 clock.m2 = 98; 431 clock.m2 = 98;
429 } 432 }
430 } else if (refclk == 100000) { 433 break;
434
435 case 100000:
431 if (target < 200000) { 436 if (target < 200000) {
432 clock.p1 = 2; 437 clock.p1 = 2;
433 clock.p2 = 10; 438 clock.p2 = 10;
@@ -441,12 +446,13 @@ static bool cdv_intel_find_dp_pll(const struct gma_limit_t *limit,
441 clock.m1 = 0; 446 clock.m1 = 0;
442 clock.m2 = 133; 447 clock.m2 = 133;
443 } 448 }
444 } else 449 break;
450
451 default:
445 return false; 452 return false;
446 clock.m = clock.m2 + 2; 453 }
447 clock.p = clock.p1 * clock.p2; 454
448 clock.vco = (refclk * clock.m) / clock.n; 455 gma_crtc->clock_funcs->clock(refclk, &clock);
449 clock.dot = clock.vco / clock.p;
450 memcpy(best_clock, &clock, sizeof(struct gma_clock_t)); 456 memcpy(best_clock, &clock, sizeof(struct gma_clock_t));
451 return true; 457 return true;
452} 458}
@@ -463,54 +469,11 @@ static bool cdv_intel_pipe_enabled(struct drm_device *dev, int pipe)
463 crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 469 crtc = dev_priv->pipe_to_crtc_mapping[pipe];
464 gma_crtc = to_gma_crtc(crtc); 470 gma_crtc = to_gma_crtc(crtc);
465 471
466 if (crtc->fb == NULL || !gma_crtc->active) 472 if (crtc->primary->fb == NULL || !gma_crtc->active)
467 return false; 473 return false;
468 return true; 474 return true;
469} 475}
470 476
471static bool cdv_intel_single_pipe_active (struct drm_device *dev)
472{
473 uint32_t pipe_enabled = 0;
474
475 if (cdv_intel_pipe_enabled(dev, 0))
476 pipe_enabled |= FIFO_PIPEA;
477
478 if (cdv_intel_pipe_enabled(dev, 1))
479 pipe_enabled |= FIFO_PIPEB;
480
481
482 DRM_DEBUG_KMS("pipe enabled %x\n", pipe_enabled);
483
484 if (pipe_enabled == FIFO_PIPEA || pipe_enabled == FIFO_PIPEB)
485 return true;
486 else
487 return false;
488}
489
490static bool is_pipeb_lvds(struct drm_device *dev, struct drm_crtc *crtc)
491{
492 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
493 struct drm_mode_config *mode_config = &dev->mode_config;
494 struct drm_connector *connector;
495
496 if (gma_crtc->pipe != 1)
497 return false;
498
499 list_for_each_entry(connector, &mode_config->connector_list, head) {
500 struct gma_encoder *gma_encoder =
501 gma_attached_encoder(connector);
502
503 if (!connector->encoder
504 || connector->encoder->crtc != crtc)
505 continue;
506
507 if (gma_encoder->type == INTEL_OUTPUT_LVDS)
508 return true;
509 }
510
511 return false;
512}
513
514void cdv_disable_sr(struct drm_device *dev) 477void cdv_disable_sr(struct drm_device *dev)
515{ 478{
516 if (REG_READ(FW_BLC_SELF) & FW_BLC_SELF_EN) { 479 if (REG_READ(FW_BLC_SELF) & FW_BLC_SELF_EN) {
@@ -535,8 +498,10 @@ void cdv_disable_sr(struct drm_device *dev)
535void cdv_update_wm(struct drm_device *dev, struct drm_crtc *crtc) 498void cdv_update_wm(struct drm_device *dev, struct drm_crtc *crtc)
536{ 499{
537 struct drm_psb_private *dev_priv = dev->dev_private; 500 struct drm_psb_private *dev_priv = dev->dev_private;
501 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
538 502
539 if (cdv_intel_single_pipe_active(dev)) { 503 /* Is only one pipe enabled? */
504 if (cdv_intel_pipe_enabled(dev, 0) ^ cdv_intel_pipe_enabled(dev, 1)) {
540 u32 fw; 505 u32 fw;
541 506
542 fw = REG_READ(DSPFW1); 507 fw = REG_READ(DSPFW1);
@@ -557,7 +522,9 @@ void cdv_update_wm(struct drm_device *dev, struct drm_crtc *crtc)
557 522
558 /* ignore FW4 */ 523 /* ignore FW4 */
559 524
560 if (is_pipeb_lvds(dev, crtc)) { 525 /* Is pipe b lvds ? */
526 if (gma_crtc->pipe == 1 &&
527 gma_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
561 REG_WRITE(DSPFW5, 0x00040330); 528 REG_WRITE(DSPFW5, 0x00040330);
562 } else { 529 } else {
563 fw = (3 << DSP_PLANE_B_FIFO_WM1_SHIFT) | 530 fw = (3 << DSP_PLANE_B_FIFO_WM1_SHIFT) |
diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
index 0490ce36b53f..9ff30c2efadb 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_dp.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
@@ -1693,7 +1693,7 @@ done:
1693 struct drm_crtc *crtc = encoder->base.crtc; 1693 struct drm_crtc *crtc = encoder->base.crtc;
1694 drm_crtc_helper_set_mode(crtc, &crtc->mode, 1694 drm_crtc_helper_set_mode(crtc, &crtc->mode,
1695 crtc->x, crtc->y, 1695 crtc->x, crtc->y,
1696 crtc->fb); 1696 crtc->primary->fb);
1697 } 1697 }
1698 1698
1699 return 0; 1699 return 0;
diff --git a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
index 1c0d723b8d24..b99084b3f706 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
@@ -89,13 +89,6 @@ static void cdv_hdmi_mode_set(struct drm_encoder *encoder,
89 REG_READ(hdmi_priv->hdmi_reg); 89 REG_READ(hdmi_priv->hdmi_reg);
90} 90}
91 91
92static bool cdv_hdmi_mode_fixup(struct drm_encoder *encoder,
93 const struct drm_display_mode *mode,
94 struct drm_display_mode *adjusted_mode)
95{
96 return true;
97}
98
99static void cdv_hdmi_dpms(struct drm_encoder *encoder, int mode) 92static void cdv_hdmi_dpms(struct drm_encoder *encoder, int mode)
100{ 93{
101 struct drm_device *dev = encoder->dev; 94 struct drm_device *dev = encoder->dev;
@@ -199,7 +192,7 @@ static int cdv_hdmi_set_property(struct drm_connector *connector,
199 crtc->saved_mode.vdisplay != 0) { 192 crtc->saved_mode.vdisplay != 0) {
200 if (centre) { 193 if (centre) {
201 if (!drm_crtc_helper_set_mode(encoder->crtc, &crtc->saved_mode, 194 if (!drm_crtc_helper_set_mode(encoder->crtc, &crtc->saved_mode,
202 encoder->crtc->x, encoder->crtc->y, encoder->crtc->fb)) 195 encoder->crtc->x, encoder->crtc->y, encoder->crtc->primary->fb))
203 return -1; 196 return -1;
204 } else { 197 } else {
205 struct drm_encoder_helper_funcs *helpers 198 struct drm_encoder_helper_funcs *helpers
@@ -262,7 +255,7 @@ static void cdv_hdmi_destroy(struct drm_connector *connector)
262 255
263static const struct drm_encoder_helper_funcs cdv_hdmi_helper_funcs = { 256static const struct drm_encoder_helper_funcs cdv_hdmi_helper_funcs = {
264 .dpms = cdv_hdmi_dpms, 257 .dpms = cdv_hdmi_dpms,
265 .mode_fixup = cdv_hdmi_mode_fixup, 258 .mode_fixup = gma_encoder_mode_fixup,
266 .prepare = gma_encoder_prepare, 259 .prepare = gma_encoder_prepare,
267 .mode_set = cdv_hdmi_mode_set, 260 .mode_set = cdv_hdmi_mode_set,
268 .commit = gma_encoder_commit, 261 .commit = gma_encoder_commit,
diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
index 20e08e65d46c..8ecc920fc26d 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
@@ -494,7 +494,7 @@ static int cdv_intel_lvds_set_property(struct drm_connector *connector,
494 &crtc->saved_mode, 494 &crtc->saved_mode,
495 encoder->crtc->x, 495 encoder->crtc->x,
496 encoder->crtc->y, 496 encoder->crtc->y,
497 encoder->crtc->fb)) 497 encoder->crtc->primary->fb))
498 return -1; 498 return -1;
499 } 499 }
500 } else if (!strcmp(property->name, "backlight") && encoder) { 500 } else if (!strcmp(property->name, "backlight") && encoder) {
@@ -712,6 +712,7 @@ void cdv_intel_lvds_init(struct drm_device *dev,
712 * Attempt to get the fixed panel mode from DDC. Assume that the 712 * Attempt to get the fixed panel mode from DDC. Assume that the
713 * preferred mode is the right one. 713 * preferred mode is the right one.
714 */ 714 */
715 mutex_lock(&dev->mode_config.mutex);
715 psb_intel_ddc_get_modes(connector, 716 psb_intel_ddc_get_modes(connector,
716 &gma_encoder->ddc_bus->adapter); 717 &gma_encoder->ddc_bus->adapter);
717 list_for_each_entry(scan, &connector->probed_modes, head) { 718 list_for_each_entry(scan, &connector->probed_modes, head) {
@@ -772,10 +773,12 @@ void cdv_intel_lvds_init(struct drm_device *dev,
772 } 773 }
773 774
774out: 775out:
776 mutex_unlock(&dev->mode_config.mutex);
775 drm_sysfs_connector_add(connector); 777 drm_sysfs_connector_add(connector);
776 return; 778 return;
777 779
778failed_find: 780failed_find:
781 mutex_unlock(&dev->mode_config.mutex);
779 printk(KERN_ERR "Failed find\n"); 782 printk(KERN_ERR "Failed find\n");
780 if (gma_encoder->ddc_bus) 783 if (gma_encoder->ddc_bus)
781 psb_intel_i2c_destroy(gma_encoder->ddc_bus); 784 psb_intel_i2c_destroy(gma_encoder->ddc_bus);
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index 94b3fec22c28..e7fcc148f333 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -319,7 +319,7 @@ static struct gtt_range *psbfb_alloc(struct drm_device *dev, int aligned_size)
319{ 319{
320 struct gtt_range *backing; 320 struct gtt_range *backing;
321 /* Begin by trying to use stolen memory backing */ 321 /* Begin by trying to use stolen memory backing */
322 backing = psb_gtt_alloc_range(dev, aligned_size, "fb", 1); 322 backing = psb_gtt_alloc_range(dev, aligned_size, "fb", 1, PAGE_SIZE);
323 if (backing) { 323 if (backing) {
324 drm_gem_private_object_init(dev, &backing->gem, aligned_size); 324 drm_gem_private_object_init(dev, &backing->gem, aligned_size);
325 return backing; 325 return backing;
diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c
index e2db48a81ed0..c707fa6fca85 100644
--- a/drivers/gpu/drm/gma500/gem.c
+++ b/drivers/gpu/drm/gma500/gem.c
@@ -62,9 +62,6 @@ int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev,
62 int ret = 0; 62 int ret = 0;
63 struct drm_gem_object *obj; 63 struct drm_gem_object *obj;
64 64
65 if (!(dev->driver->driver_features & DRIVER_GEM))
66 return -ENODEV;
67
68 mutex_lock(&dev->struct_mutex); 65 mutex_lock(&dev->struct_mutex);
69 66
70 /* GEM does all our handle to object mapping */ 67 /* GEM does all our handle to object mapping */
@@ -98,8 +95,8 @@ unlock:
98 * it so that userspace can speak about it. This does the core work 95 * it so that userspace can speak about it. This does the core work
99 * for the various methods that do/will create GEM objects for things 96 * for the various methods that do/will create GEM objects for things
100 */ 97 */
101static int psb_gem_create(struct drm_file *file, 98int psb_gem_create(struct drm_file *file, struct drm_device *dev, u64 size,
102 struct drm_device *dev, uint64_t size, uint32_t *handlep) 99 u32 *handlep, int stolen, u32 align)
103{ 100{
104 struct gtt_range *r; 101 struct gtt_range *r;
105 int ret; 102 int ret;
@@ -109,7 +106,7 @@ static int psb_gem_create(struct drm_file *file,
109 106
110 /* Allocate our object - for now a direct gtt range which is not 107 /* Allocate our object - for now a direct gtt range which is not
111 stolen memory backed */ 108 stolen memory backed */
112 r = psb_gtt_alloc_range(dev, size, "gem", 0); 109 r = psb_gtt_alloc_range(dev, size, "gem", 0, PAGE_SIZE);
113 if (r == NULL) { 110 if (r == NULL) {
114 dev_err(dev->dev, "no memory for %lld byte GEM object\n", size); 111 dev_err(dev->dev, "no memory for %lld byte GEM object\n", size);
115 return -ENOSPC; 112 return -ENOSPC;
@@ -153,7 +150,8 @@ int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
153{ 150{
154 args->pitch = ALIGN(args->width * ((args->bpp + 7) / 8), 64); 151 args->pitch = ALIGN(args->width * ((args->bpp + 7) / 8), 64);
155 args->size = args->pitch * args->height; 152 args->size = args->pitch * args->height;
156 return psb_gem_create(file, dev, args->size, &args->handle); 153 return psb_gem_create(file, dev, args->size, &args->handle, 0,
154 PAGE_SIZE);
157} 155}
158 156
159/** 157/**
@@ -229,47 +227,3 @@ fail:
229 return VM_FAULT_SIGBUS; 227 return VM_FAULT_SIGBUS;
230 } 228 }
231} 229}
232
233static int psb_gem_create_stolen(struct drm_file *file, struct drm_device *dev,
234 int size, u32 *handle)
235{
236 struct gtt_range *gtt = psb_gtt_alloc_range(dev, size, "gem", 1);
237 if (gtt == NULL)
238 return -ENOMEM;
239
240 drm_gem_private_object_init(dev, &gtt->gem, size);
241 if (drm_gem_handle_create(file, &gtt->gem, handle) == 0)
242 return 0;
243
244 drm_gem_object_release(&gtt->gem);
245 psb_gtt_free_range(dev, gtt);
246 return -ENOMEM;
247}
248
249/*
250 * GEM interfaces for our specific client
251 */
252int psb_gem_create_ioctl(struct drm_device *dev, void *data,
253 struct drm_file *file)
254{
255 struct drm_psb_gem_create *args = data;
256 int ret;
257 if (args->flags & GMA_GEM_CREATE_STOLEN) {
258 ret = psb_gem_create_stolen(file, dev, args->size,
259 &args->handle);
260 if (ret == 0)
261 return 0;
262 /* Fall throguh */
263 args->flags &= ~GMA_GEM_CREATE_STOLEN;
264 }
265 return psb_gem_create(file, dev, args->size, &args->handle);
266}
267
268int psb_gem_mmap_ioctl(struct drm_device *dev, void *data,
269 struct drm_file *file)
270{
271 struct drm_psb_gem_mmap *args = data;
272 return dev->driver->dumb_map_offset(file, dev,
273 args->handle, &args->offset);
274}
275
diff --git a/drivers/gpu/drm/gma500/gem.h b/drivers/gpu/drm/gma500/gem.h
new file mode 100644
index 000000000000..1381c5190f46
--- /dev/null
+++ b/drivers/gpu/drm/gma500/gem.h
@@ -0,0 +1,21 @@
1/**************************************************************************
2 * Copyright (c) 2014 Patrik Jakobsson
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 **************************************************************************/
15
16#ifndef _GEM_H
17#define _GEM_H
18
19extern int psb_gem_create(struct drm_file *file, struct drm_device *dev,
20 u64 size, u32 *handlep, int stolen, u32 align);
21#endif
diff --git a/drivers/gpu/drm/gma500/gma_device.c b/drivers/gpu/drm/gma500/gma_device.c
new file mode 100644
index 000000000000..4a295f9ba067
--- /dev/null
+++ b/drivers/gpu/drm/gma500/gma_device.c
@@ -0,0 +1,56 @@
1/**************************************************************************
2 * Copyright (c) 2011, Intel Corporation.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 **************************************************************************/
15
16#include <drm/drmP.h>
17#include "psb_drv.h"
18
19void gma_get_core_freq(struct drm_device *dev)
20{
21 uint32_t clock;
22 struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
23 struct drm_psb_private *dev_priv = dev->dev_private;
24
25 /*pci_write_config_dword(pci_root, 0xD4, 0x00C32004);*/
26 /*pci_write_config_dword(pci_root, 0xD0, 0xE0033000);*/
27
28 pci_write_config_dword(pci_root, 0xD0, 0xD0050300);
29 pci_read_config_dword(pci_root, 0xD4, &clock);
30 pci_dev_put(pci_root);
31
32 switch (clock & 0x07) {
33 case 0:
34 dev_priv->core_freq = 100;
35 break;
36 case 1:
37 dev_priv->core_freq = 133;
38 break;
39 case 2:
40 dev_priv->core_freq = 150;
41 break;
42 case 3:
43 dev_priv->core_freq = 178;
44 break;
45 case 4:
46 dev_priv->core_freq = 200;
47 break;
48 case 5:
49 case 6:
50 case 7:
51 dev_priv->core_freq = 266;
52 break;
53 default:
54 dev_priv->core_freq = 0;
55 }
56}
diff --git a/drivers/gpu/drm/gma500/gma_device.h b/drivers/gpu/drm/gma500/gma_device.h
new file mode 100644
index 000000000000..e1dbb007b820
--- /dev/null
+++ b/drivers/gpu/drm/gma500/gma_device.h
@@ -0,0 +1,21 @@
1/**************************************************************************
2 * Copyright (c) 2011, Intel Corporation.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 **************************************************************************/
15
16#ifndef _GMA_DEVICE_H
17#define _GMA_DEVICE_H
18
19extern void gma_get_core_freq(struct drm_device *dev);
20
21#endif
diff --git a/drivers/gpu/drm/gma500/gma_display.c b/drivers/gpu/drm/gma500/gma_display.c
index 386de2c9dc86..9bb9bddd881a 100644
--- a/drivers/gpu/drm/gma500/gma_display.c
+++ b/drivers/gpu/drm/gma500/gma_display.c
@@ -59,7 +59,7 @@ int gma_pipe_set_base(struct drm_crtc *crtc, int x, int y,
59 struct drm_device *dev = crtc->dev; 59 struct drm_device *dev = crtc->dev;
60 struct drm_psb_private *dev_priv = dev->dev_private; 60 struct drm_psb_private *dev_priv = dev->dev_private;
61 struct gma_crtc *gma_crtc = to_gma_crtc(crtc); 61 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
62 struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb); 62 struct psb_framebuffer *psbfb = to_psb_fb(crtc->primary->fb);
63 int pipe = gma_crtc->pipe; 63 int pipe = gma_crtc->pipe;
64 const struct psb_offset *map = &dev_priv->regmap[pipe]; 64 const struct psb_offset *map = &dev_priv->regmap[pipe];
65 unsigned long start, offset; 65 unsigned long start, offset;
@@ -70,7 +70,7 @@ int gma_pipe_set_base(struct drm_crtc *crtc, int x, int y,
70 return 0; 70 return 0;
71 71
72 /* no fb bound */ 72 /* no fb bound */
73 if (!crtc->fb) { 73 if (!crtc->primary->fb) {
74 dev_err(dev->dev, "No FB bound\n"); 74 dev_err(dev->dev, "No FB bound\n");
75 goto gma_pipe_cleaner; 75 goto gma_pipe_cleaner;
76 } 76 }
@@ -81,19 +81,19 @@ int gma_pipe_set_base(struct drm_crtc *crtc, int x, int y,
81 if (ret < 0) 81 if (ret < 0)
82 goto gma_pipe_set_base_exit; 82 goto gma_pipe_set_base_exit;
83 start = psbfb->gtt->offset; 83 start = psbfb->gtt->offset;
84 offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8); 84 offset = y * crtc->primary->fb->pitches[0] + x * (crtc->primary->fb->bits_per_pixel / 8);
85 85
86 REG_WRITE(map->stride, crtc->fb->pitches[0]); 86 REG_WRITE(map->stride, crtc->primary->fb->pitches[0]);
87 87
88 dspcntr = REG_READ(map->cntr); 88 dspcntr = REG_READ(map->cntr);
89 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; 89 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
90 90
91 switch (crtc->fb->bits_per_pixel) { 91 switch (crtc->primary->fb->bits_per_pixel) {
92 case 8: 92 case 8:
93 dspcntr |= DISPPLANE_8BPP; 93 dspcntr |= DISPPLANE_8BPP;
94 break; 94 break;
95 case 16: 95 case 16:
96 if (crtc->fb->depth == 15) 96 if (crtc->primary->fb->depth == 15)
97 dspcntr |= DISPPLANE_15_16BPP; 97 dspcntr |= DISPPLANE_15_16BPP;
98 else 98 else
99 dspcntr |= DISPPLANE_16BPP; 99 dspcntr |= DISPPLANE_16BPP;
@@ -485,6 +485,13 @@ int gma_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
485 return 0; 485 return 0;
486} 486}
487 487
488bool gma_encoder_mode_fixup(struct drm_encoder *encoder,
489 const struct drm_display_mode *mode,
490 struct drm_display_mode *adjusted_mode)
491{
492 return true;
493}
494
488bool gma_crtc_mode_fixup(struct drm_crtc *crtc, 495bool gma_crtc_mode_fixup(struct drm_crtc *crtc,
489 const struct drm_display_mode *mode, 496 const struct drm_display_mode *mode,
490 struct drm_display_mode *adjusted_mode) 497 struct drm_display_mode *adjusted_mode)
@@ -511,8 +518,8 @@ void gma_crtc_disable(struct drm_crtc *crtc)
511 518
512 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF); 519 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
513 520
514 if (crtc->fb) { 521 if (crtc->primary->fb) {
515 gt = to_psb_fb(crtc->fb)->gtt; 522 gt = to_psb_fb(crtc->primary->fb)->gtt;
516 psb_gtt_unpin(gt); 523 psb_gtt_unpin(gt);
517 } 524 }
518} 525}
diff --git a/drivers/gpu/drm/gma500/gma_display.h b/drivers/gpu/drm/gma500/gma_display.h
index 78b9f986a6e5..ed569d8a6af3 100644
--- a/drivers/gpu/drm/gma500/gma_display.h
+++ b/drivers/gpu/drm/gma500/gma_display.h
@@ -90,6 +90,9 @@ extern void gma_crtc_restore(struct drm_crtc *crtc);
90extern void gma_encoder_prepare(struct drm_encoder *encoder); 90extern void gma_encoder_prepare(struct drm_encoder *encoder);
91extern void gma_encoder_commit(struct drm_encoder *encoder); 91extern void gma_encoder_commit(struct drm_encoder *encoder);
92extern void gma_encoder_destroy(struct drm_encoder *encoder); 92extern void gma_encoder_destroy(struct drm_encoder *encoder);
93extern bool gma_encoder_mode_fixup(struct drm_encoder *encoder,
94 const struct drm_display_mode *mode,
95 struct drm_display_mode *adjusted_mode);
93 96
94/* Common clock related functions */ 97/* Common clock related functions */
95extern const struct gma_limit_t *gma_limit(struct drm_crtc *crtc, int refclk); 98extern const struct gma_limit_t *gma_limit(struct drm_crtc *crtc, int refclk);
diff --git a/drivers/gpu/drm/gma500/gtt.c b/drivers/gpu/drm/gma500/gtt.c
index 2db731f00930..592d205a0089 100644
--- a/drivers/gpu/drm/gma500/gtt.c
+++ b/drivers/gpu/drm/gma500/gtt.c
@@ -22,6 +22,7 @@
22#include <drm/drmP.h> 22#include <drm/drmP.h>
23#include <linux/shmem_fs.h> 23#include <linux/shmem_fs.h>
24#include "psb_drv.h" 24#include "psb_drv.h"
25#include "blitter.h"
25 26
26 27
27/* 28/*
@@ -105,11 +106,13 @@ static int psb_gtt_insert(struct drm_device *dev, struct gtt_range *r,
105 106
106 /* Write our page entries into the GTT itself */ 107 /* Write our page entries into the GTT itself */
107 for (i = r->roll; i < r->npage; i++) { 108 for (i = r->roll; i < r->npage; i++) {
108 pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), 0); 109 pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]),
110 PSB_MMU_CACHED_MEMORY);
109 iowrite32(pte, gtt_slot++); 111 iowrite32(pte, gtt_slot++);
110 } 112 }
111 for (i = 0; i < r->roll; i++) { 113 for (i = 0; i < r->roll; i++) {
112 pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), 0); 114 pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]),
115 PSB_MMU_CACHED_MEMORY);
113 iowrite32(pte, gtt_slot++); 116 iowrite32(pte, gtt_slot++);
114 } 117 }
115 /* Make sure all the entries are set before we return */ 118 /* Make sure all the entries are set before we return */
@@ -127,7 +130,7 @@ static int psb_gtt_insert(struct drm_device *dev, struct gtt_range *r,
127 * page table entries with the dummy page. This is protected via the gtt 130 * page table entries with the dummy page. This is protected via the gtt
128 * mutex which the caller must hold. 131 * mutex which the caller must hold.
129 */ 132 */
130static void psb_gtt_remove(struct drm_device *dev, struct gtt_range *r) 133void psb_gtt_remove(struct drm_device *dev, struct gtt_range *r)
131{ 134{
132 struct drm_psb_private *dev_priv = dev->dev_private; 135 struct drm_psb_private *dev_priv = dev->dev_private;
133 u32 __iomem *gtt_slot; 136 u32 __iomem *gtt_slot;
@@ -137,7 +140,8 @@ static void psb_gtt_remove(struct drm_device *dev, struct gtt_range *r)
137 WARN_ON(r->stolen); 140 WARN_ON(r->stolen);
138 141
139 gtt_slot = psb_gtt_entry(dev, r); 142 gtt_slot = psb_gtt_entry(dev, r);
140 pte = psb_gtt_mask_pte(page_to_pfn(dev_priv->scratch_page), 0); 143 pte = psb_gtt_mask_pte(page_to_pfn(dev_priv->scratch_page),
144 PSB_MMU_CACHED_MEMORY);
141 145
142 for (i = 0; i < r->npage; i++) 146 for (i = 0; i < r->npage; i++)
143 iowrite32(pte, gtt_slot++); 147 iowrite32(pte, gtt_slot++);
@@ -176,11 +180,13 @@ void psb_gtt_roll(struct drm_device *dev, struct gtt_range *r, int roll)
176 gtt_slot = psb_gtt_entry(dev, r); 180 gtt_slot = psb_gtt_entry(dev, r);
177 181
178 for (i = r->roll; i < r->npage; i++) { 182 for (i = r->roll; i < r->npage; i++) {
179 pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), 0); 183 pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]),
184 PSB_MMU_CACHED_MEMORY);
180 iowrite32(pte, gtt_slot++); 185 iowrite32(pte, gtt_slot++);
181 } 186 }
182 for (i = 0; i < r->roll; i++) { 187 for (i = 0; i < r->roll; i++) {
183 pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), 0); 188 pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]),
189 PSB_MMU_CACHED_MEMORY);
184 iowrite32(pte, gtt_slot++); 190 iowrite32(pte, gtt_slot++);
185 } 191 }
186 ioread32(gtt_slot - 1); 192 ioread32(gtt_slot - 1);
@@ -240,6 +246,7 @@ int psb_gtt_pin(struct gtt_range *gt)
240 int ret = 0; 246 int ret = 0;
241 struct drm_device *dev = gt->gem.dev; 247 struct drm_device *dev = gt->gem.dev;
242 struct drm_psb_private *dev_priv = dev->dev_private; 248 struct drm_psb_private *dev_priv = dev->dev_private;
249 u32 gpu_base = dev_priv->gtt.gatt_start;
243 250
244 mutex_lock(&dev_priv->gtt_mutex); 251 mutex_lock(&dev_priv->gtt_mutex);
245 252
@@ -252,6 +259,9 @@ int psb_gtt_pin(struct gtt_range *gt)
252 psb_gtt_detach_pages(gt); 259 psb_gtt_detach_pages(gt);
253 goto out; 260 goto out;
254 } 261 }
262 psb_mmu_insert_pages(psb_mmu_get_default_pd(dev_priv->mmu),
263 gt->pages, (gpu_base + gt->offset),
264 gt->npage, 0, 0, PSB_MMU_CACHED_MEMORY);
255 } 265 }
256 gt->in_gart++; 266 gt->in_gart++;
257out: 267out:
@@ -274,16 +284,30 @@ void psb_gtt_unpin(struct gtt_range *gt)
274{ 284{
275 struct drm_device *dev = gt->gem.dev; 285 struct drm_device *dev = gt->gem.dev;
276 struct drm_psb_private *dev_priv = dev->dev_private; 286 struct drm_psb_private *dev_priv = dev->dev_private;
287 u32 gpu_base = dev_priv->gtt.gatt_start;
288 int ret;
277 289
290 /* While holding the gtt_mutex no new blits can be initiated */
278 mutex_lock(&dev_priv->gtt_mutex); 291 mutex_lock(&dev_priv->gtt_mutex);
279 292
293 /* Wait for any possible usage of the memory to be finished */
294 ret = gma_blt_wait_idle(dev_priv);
295 if (ret) {
296 DRM_ERROR("Failed to idle the blitter, unpin failed!");
297 goto out;
298 }
299
280 WARN_ON(!gt->in_gart); 300 WARN_ON(!gt->in_gart);
281 301
282 gt->in_gart--; 302 gt->in_gart--;
283 if (gt->in_gart == 0 && gt->stolen == 0) { 303 if (gt->in_gart == 0 && gt->stolen == 0) {
304 psb_mmu_remove_pages(psb_mmu_get_default_pd(dev_priv->mmu),
305 (gpu_base + gt->offset), gt->npage, 0, 0);
284 psb_gtt_remove(dev, gt); 306 psb_gtt_remove(dev, gt);
285 psb_gtt_detach_pages(gt); 307 psb_gtt_detach_pages(gt);
286 } 308 }
309
310out:
287 mutex_unlock(&dev_priv->gtt_mutex); 311 mutex_unlock(&dev_priv->gtt_mutex);
288} 312}
289 313
@@ -306,7 +330,7 @@ void psb_gtt_unpin(struct gtt_range *gt)
306 * as in use. 330 * as in use.
307 */ 331 */
308struct gtt_range *psb_gtt_alloc_range(struct drm_device *dev, int len, 332struct gtt_range *psb_gtt_alloc_range(struct drm_device *dev, int len,
309 const char *name, int backed) 333 const char *name, int backed, u32 align)
310{ 334{
311 struct drm_psb_private *dev_priv = dev->dev_private; 335 struct drm_psb_private *dev_priv = dev->dev_private;
312 struct gtt_range *gt; 336 struct gtt_range *gt;
@@ -334,7 +358,7 @@ struct gtt_range *psb_gtt_alloc_range(struct drm_device *dev, int len,
334 /* Ensure this is set for non GEM objects */ 358 /* Ensure this is set for non GEM objects */
335 gt->gem.dev = dev; 359 gt->gem.dev = dev;
336 ret = allocate_resource(dev_priv->gtt_mem, &gt->resource, 360 ret = allocate_resource(dev_priv->gtt_mem, &gt->resource,
337 len, start, end, PAGE_SIZE, NULL, NULL); 361 len, start, end, align, NULL, NULL);
338 if (ret == 0) { 362 if (ret == 0) {
339 gt->offset = gt->resource.start - r->start; 363 gt->offset = gt->resource.start - r->start;
340 return gt; 364 return gt;
@@ -497,6 +521,7 @@ int psb_gtt_init(struct drm_device *dev, int resume)
497 if (!resume) 521 if (!resume)
498 dev_priv->vram_addr = ioremap_wc(dev_priv->stolen_base, 522 dev_priv->vram_addr = ioremap_wc(dev_priv->stolen_base,
499 stolen_size); 523 stolen_size);
524
500 if (!dev_priv->vram_addr) { 525 if (!dev_priv->vram_addr) {
501 dev_err(dev->dev, "Failure to map stolen base.\n"); 526 dev_err(dev->dev, "Failure to map stolen base.\n");
502 ret = -ENOMEM; 527 ret = -ENOMEM;
@@ -512,7 +537,7 @@ int psb_gtt_init(struct drm_device *dev, int resume)
512 dev_dbg(dev->dev, "Set up %d stolen pages starting at 0x%08x, GTT offset %dK\n", 537 dev_dbg(dev->dev, "Set up %d stolen pages starting at 0x%08x, GTT offset %dK\n",
513 num_pages, pfn_base << PAGE_SHIFT, 0); 538 num_pages, pfn_base << PAGE_SHIFT, 0);
514 for (i = 0; i < num_pages; ++i) { 539 for (i = 0; i < num_pages; ++i) {
515 pte = psb_gtt_mask_pte(pfn_base + i, 0); 540 pte = psb_gtt_mask_pte(pfn_base + i, PSB_MMU_CACHED_MEMORY);
516 iowrite32(pte, dev_priv->gtt_map + i); 541 iowrite32(pte, dev_priv->gtt_map + i);
517 } 542 }
518 543
@@ -521,7 +546,7 @@ int psb_gtt_init(struct drm_device *dev, int resume)
521 */ 546 */
522 547
523 pfn_base = page_to_pfn(dev_priv->scratch_page); 548 pfn_base = page_to_pfn(dev_priv->scratch_page);
524 pte = psb_gtt_mask_pte(pfn_base, 0); 549 pte = psb_gtt_mask_pte(pfn_base, PSB_MMU_CACHED_MEMORY);
525 for (; i < gtt_pages; ++i) 550 for (; i < gtt_pages; ++i)
526 iowrite32(pte, dev_priv->gtt_map + i); 551 iowrite32(pte, dev_priv->gtt_map + i);
527 552
diff --git a/drivers/gpu/drm/gma500/gtt.h b/drivers/gpu/drm/gma500/gtt.h
index 6191d10acf33..f5860a739bd8 100644
--- a/drivers/gpu/drm/gma500/gtt.h
+++ b/drivers/gpu/drm/gma500/gtt.h
@@ -53,7 +53,8 @@ struct gtt_range {
53}; 53};
54 54
55extern struct gtt_range *psb_gtt_alloc_range(struct drm_device *dev, int len, 55extern struct gtt_range *psb_gtt_alloc_range(struct drm_device *dev, int len,
56 const char *name, int backed); 56 const char *name, int backed,
57 u32 align);
57extern void psb_gtt_kref_put(struct gtt_range *gt); 58extern void psb_gtt_kref_put(struct gtt_range *gt);
58extern void psb_gtt_free_range(struct drm_device *dev, struct gtt_range *gt); 59extern void psb_gtt_free_range(struct drm_device *dev, struct gtt_range *gt);
59extern int psb_gtt_pin(struct gtt_range *gt); 60extern int psb_gtt_pin(struct gtt_range *gt);
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_output.c b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
index 860a4ee9baaf..6e91b20ce2e5 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_output.c
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
@@ -287,7 +287,7 @@ static int mdfld_dsi_connector_set_property(struct drm_connector *connector,
287 &gma_crtc->saved_mode, 287 &gma_crtc->saved_mode,
288 encoder->crtc->x, 288 encoder->crtc->x,
289 encoder->crtc->y, 289 encoder->crtc->y,
290 encoder->crtc->fb)) 290 encoder->crtc->primary->fb))
291 goto set_prop_error; 291 goto set_prop_error;
292 } else { 292 } else {
293 struct drm_encoder_helper_funcs *funcs = 293 struct drm_encoder_helper_funcs *funcs =
diff --git a/drivers/gpu/drm/gma500/mdfld_intel_display.c b/drivers/gpu/drm/gma500/mdfld_intel_display.c
index 321c00a944e9..8cc8a5abbc7b 100644
--- a/drivers/gpu/drm/gma500/mdfld_intel_display.c
+++ b/drivers/gpu/drm/gma500/mdfld_intel_display.c
@@ -166,7 +166,7 @@ static int mdfld__intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
166 struct drm_device *dev = crtc->dev; 166 struct drm_device *dev = crtc->dev;
167 struct drm_psb_private *dev_priv = dev->dev_private; 167 struct drm_psb_private *dev_priv = dev->dev_private;
168 struct gma_crtc *gma_crtc = to_gma_crtc(crtc); 168 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
169 struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb); 169 struct psb_framebuffer *psbfb = to_psb_fb(crtc->primary->fb);
170 int pipe = gma_crtc->pipe; 170 int pipe = gma_crtc->pipe;
171 const struct psb_offset *map = &dev_priv->regmap[pipe]; 171 const struct psb_offset *map = &dev_priv->regmap[pipe];
172 unsigned long start, offset; 172 unsigned long start, offset;
@@ -178,12 +178,12 @@ static int mdfld__intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
178 dev_dbg(dev->dev, "pipe = 0x%x.\n", pipe); 178 dev_dbg(dev->dev, "pipe = 0x%x.\n", pipe);
179 179
180 /* no fb bound */ 180 /* no fb bound */
181 if (!crtc->fb) { 181 if (!crtc->primary->fb) {
182 dev_dbg(dev->dev, "No FB bound\n"); 182 dev_dbg(dev->dev, "No FB bound\n");
183 return 0; 183 return 0;
184 } 184 }
185 185
186 ret = check_fb(crtc->fb); 186 ret = check_fb(crtc->primary->fb);
187 if (ret) 187 if (ret)
188 return ret; 188 return ret;
189 189
@@ -196,18 +196,18 @@ static int mdfld__intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
196 return 0; 196 return 0;
197 197
198 start = psbfb->gtt->offset; 198 start = psbfb->gtt->offset;
199 offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8); 199 offset = y * crtc->primary->fb->pitches[0] + x * (crtc->primary->fb->bits_per_pixel / 8);
200 200
201 REG_WRITE(map->stride, crtc->fb->pitches[0]); 201 REG_WRITE(map->stride, crtc->primary->fb->pitches[0]);
202 dspcntr = REG_READ(map->cntr); 202 dspcntr = REG_READ(map->cntr);
203 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; 203 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
204 204
205 switch (crtc->fb->bits_per_pixel) { 205 switch (crtc->primary->fb->bits_per_pixel) {
206 case 8: 206 case 8:
207 dspcntr |= DISPPLANE_8BPP; 207 dspcntr |= DISPPLANE_8BPP;
208 break; 208 break;
209 case 16: 209 case 16:
210 if (crtc->fb->depth == 15) 210 if (crtc->primary->fb->depth == 15)
211 dspcntr |= DISPPLANE_15_16BPP; 211 dspcntr |= DISPPLANE_15_16BPP;
212 else 212 else
213 dspcntr |= DISPPLANE_16BPP; 213 dspcntr |= DISPPLANE_16BPP;
@@ -700,7 +700,7 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
700 } 700 }
701#endif 701#endif
702 702
703 ret = check_fb(crtc->fb); 703 ret = check_fb(crtc->primary->fb);
704 if (ret) 704 if (ret)
705 return ret; 705 return ret;
706 706
diff --git a/drivers/gpu/drm/gma500/mmu.c b/drivers/gpu/drm/gma500/mmu.c
index c3e67ba94446..0eaf11c19939 100644
--- a/drivers/gpu/drm/gma500/mmu.c
+++ b/drivers/gpu/drm/gma500/mmu.c
@@ -18,6 +18,7 @@
18#include <drm/drmP.h> 18#include <drm/drmP.h>
19#include "psb_drv.h" 19#include "psb_drv.h"
20#include "psb_reg.h" 20#include "psb_reg.h"
21#include "mmu.h"
21 22
22/* 23/*
23 * Code for the SGX MMU: 24 * Code for the SGX MMU:
@@ -47,51 +48,6 @@
47 * but on average it should be fast. 48 * but on average it should be fast.
48 */ 49 */
49 50
50struct psb_mmu_driver {
51 /* protects driver- and pd structures. Always take in read mode
52 * before taking the page table spinlock.
53 */
54 struct rw_semaphore sem;
55
56 /* protects page tables, directory tables and pt tables.
57 * and pt structures.
58 */
59 spinlock_t lock;
60
61 atomic_t needs_tlbflush;
62
63 uint8_t __iomem *register_map;
64 struct psb_mmu_pd *default_pd;
65 /*uint32_t bif_ctrl;*/
66 int has_clflush;
67 int clflush_add;
68 unsigned long clflush_mask;
69
70 struct drm_psb_private *dev_priv;
71};
72
73struct psb_mmu_pd;
74
75struct psb_mmu_pt {
76 struct psb_mmu_pd *pd;
77 uint32_t index;
78 uint32_t count;
79 struct page *p;
80 uint32_t *v;
81};
82
83struct psb_mmu_pd {
84 struct psb_mmu_driver *driver;
85 int hw_context;
86 struct psb_mmu_pt **tables;
87 struct page *p;
88 struct page *dummy_pt;
89 struct page *dummy_page;
90 uint32_t pd_mask;
91 uint32_t invalid_pde;
92 uint32_t invalid_pte;
93};
94
95static inline uint32_t psb_mmu_pt_index(uint32_t offset) 51static inline uint32_t psb_mmu_pt_index(uint32_t offset)
96{ 52{
97 return (offset >> PSB_PTE_SHIFT) & 0x3FF; 53 return (offset >> PSB_PTE_SHIFT) & 0x3FF;
@@ -102,13 +58,13 @@ static inline uint32_t psb_mmu_pd_index(uint32_t offset)
102 return offset >> PSB_PDE_SHIFT; 58 return offset >> PSB_PDE_SHIFT;
103} 59}
104 60
61#if defined(CONFIG_X86)
105static inline void psb_clflush(void *addr) 62static inline void psb_clflush(void *addr)
106{ 63{
107 __asm__ __volatile__("clflush (%0)\n" : : "r"(addr) : "memory"); 64 __asm__ __volatile__("clflush (%0)\n" : : "r"(addr) : "memory");
108} 65}
109 66
110static inline void psb_mmu_clflush(struct psb_mmu_driver *driver, 67static inline void psb_mmu_clflush(struct psb_mmu_driver *driver, void *addr)
111 void *addr)
112{ 68{
113 if (!driver->has_clflush) 69 if (!driver->has_clflush)
114 return; 70 return;
@@ -117,62 +73,77 @@ static inline void psb_mmu_clflush(struct psb_mmu_driver *driver,
117 psb_clflush(addr); 73 psb_clflush(addr);
118 mb(); 74 mb();
119} 75}
76#else
120 77
121static void psb_page_clflush(struct psb_mmu_driver *driver, struct page* page) 78static inline void psb_mmu_clflush(struct psb_mmu_driver *driver, void *addr)
122{ 79{;
123 uint32_t clflush_add = driver->clflush_add >> PAGE_SHIFT;
124 uint32_t clflush_count = PAGE_SIZE / clflush_add;
125 int i;
126 uint8_t *clf;
127
128 clf = kmap_atomic(page);
129 mb();
130 for (i = 0; i < clflush_count; ++i) {
131 psb_clflush(clf);
132 clf += clflush_add;
133 }
134 mb();
135 kunmap_atomic(clf);
136} 80}
137 81
138static void psb_pages_clflush(struct psb_mmu_driver *driver, 82#endif
139 struct page *page[], unsigned long num_pages)
140{
141 int i;
142
143 if (!driver->has_clflush)
144 return ;
145 83
146 for (i = 0; i < num_pages; i++) 84static void psb_mmu_flush_pd_locked(struct psb_mmu_driver *driver, int force)
147 psb_page_clflush(driver, *page++);
148}
149
150static void psb_mmu_flush_pd_locked(struct psb_mmu_driver *driver,
151 int force)
152{ 85{
86 struct drm_device *dev = driver->dev;
87 struct drm_psb_private *dev_priv = dev->dev_private;
88
89 if (atomic_read(&driver->needs_tlbflush) || force) {
90 uint32_t val = PSB_RSGX32(PSB_CR_BIF_CTRL);
91 PSB_WSGX32(val | _PSB_CB_CTRL_INVALDC, PSB_CR_BIF_CTRL);
92
93 /* Make sure data cache is turned off before enabling it */
94 wmb();
95 PSB_WSGX32(val & ~_PSB_CB_CTRL_INVALDC, PSB_CR_BIF_CTRL);
96 (void)PSB_RSGX32(PSB_CR_BIF_CTRL);
97 if (driver->msvdx_mmu_invaldc)
98 atomic_set(driver->msvdx_mmu_invaldc, 1);
99 }
153 atomic_set(&driver->needs_tlbflush, 0); 100 atomic_set(&driver->needs_tlbflush, 0);
154} 101}
155 102
103#if 0
156static void psb_mmu_flush_pd(struct psb_mmu_driver *driver, int force) 104static void psb_mmu_flush_pd(struct psb_mmu_driver *driver, int force)
157{ 105{
158 down_write(&driver->sem); 106 down_write(&driver->sem);
159 psb_mmu_flush_pd_locked(driver, force); 107 psb_mmu_flush_pd_locked(driver, force);
160 up_write(&driver->sem); 108 up_write(&driver->sem);
161} 109}
110#endif
162 111
163void psb_mmu_flush(struct psb_mmu_driver *driver, int rc_prot) 112void psb_mmu_flush(struct psb_mmu_driver *driver)
164{ 113{
165 if (rc_prot) 114 struct drm_device *dev = driver->dev;
166 down_write(&driver->sem); 115 struct drm_psb_private *dev_priv = dev->dev_private;
167 if (rc_prot) 116 uint32_t val;
168 up_write(&driver->sem); 117
118 down_write(&driver->sem);
119 val = PSB_RSGX32(PSB_CR_BIF_CTRL);
120 if (atomic_read(&driver->needs_tlbflush))
121 PSB_WSGX32(val | _PSB_CB_CTRL_INVALDC, PSB_CR_BIF_CTRL);
122 else
123 PSB_WSGX32(val | _PSB_CB_CTRL_FLUSH, PSB_CR_BIF_CTRL);
124
125 /* Make sure data cache is turned off and MMU is flushed before
126 restoring bank interface control register */
127 wmb();
128 PSB_WSGX32(val & ~(_PSB_CB_CTRL_FLUSH | _PSB_CB_CTRL_INVALDC),
129 PSB_CR_BIF_CTRL);
130 (void)PSB_RSGX32(PSB_CR_BIF_CTRL);
131
132 atomic_set(&driver->needs_tlbflush, 0);
133 if (driver->msvdx_mmu_invaldc)
134 atomic_set(driver->msvdx_mmu_invaldc, 1);
135 up_write(&driver->sem);
169} 136}
170 137
171void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context) 138void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context)
172{ 139{
173 /*ttm_tt_cache_flush(&pd->p, 1);*/ 140 struct drm_device *dev = pd->driver->dev;
174 psb_pages_clflush(pd->driver, &pd->p, 1); 141 struct drm_psb_private *dev_priv = dev->dev_private;
142 uint32_t offset = (hw_context == 0) ? PSB_CR_BIF_DIR_LIST_BASE0 :
143 PSB_CR_BIF_DIR_LIST_BASE1 + hw_context * 4;
144
175 down_write(&pd->driver->sem); 145 down_write(&pd->driver->sem);
146 PSB_WSGX32(page_to_pfn(pd->p) << PAGE_SHIFT, offset);
176 wmb(); 147 wmb();
177 psb_mmu_flush_pd_locked(pd->driver, 1); 148 psb_mmu_flush_pd_locked(pd->driver, 1);
178 pd->hw_context = hw_context; 149 pd->hw_context = hw_context;
@@ -183,7 +154,6 @@ void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context)
183static inline unsigned long psb_pd_addr_end(unsigned long addr, 154static inline unsigned long psb_pd_addr_end(unsigned long addr,
184 unsigned long end) 155 unsigned long end)
185{ 156{
186
187 addr = (addr + PSB_PDE_MASK + 1) & ~PSB_PDE_MASK; 157 addr = (addr + PSB_PDE_MASK + 1) & ~PSB_PDE_MASK;
188 return (addr < end) ? addr : end; 158 return (addr < end) ? addr : end;
189} 159}
@@ -223,12 +193,10 @@ struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver,
223 goto out_err3; 193 goto out_err3;
224 194
225 if (!trap_pagefaults) { 195 if (!trap_pagefaults) {
226 pd->invalid_pde = 196 pd->invalid_pde = psb_mmu_mask_pte(page_to_pfn(pd->dummy_pt),
227 psb_mmu_mask_pte(page_to_pfn(pd->dummy_pt), 197 invalid_type);
228 invalid_type); 198 pd->invalid_pte = psb_mmu_mask_pte(page_to_pfn(pd->dummy_page),
229 pd->invalid_pte = 199 invalid_type);
230 psb_mmu_mask_pte(page_to_pfn(pd->dummy_page),
231 invalid_type);
232 } else { 200 } else {
233 pd->invalid_pde = 0; 201 pd->invalid_pde = 0;
234 pd->invalid_pte = 0; 202 pd->invalid_pte = 0;
@@ -279,12 +247,16 @@ static void psb_mmu_free_pt(struct psb_mmu_pt *pt)
279void psb_mmu_free_pagedir(struct psb_mmu_pd *pd) 247void psb_mmu_free_pagedir(struct psb_mmu_pd *pd)
280{ 248{
281 struct psb_mmu_driver *driver = pd->driver; 249 struct psb_mmu_driver *driver = pd->driver;
250 struct drm_device *dev = driver->dev;
251 struct drm_psb_private *dev_priv = dev->dev_private;
282 struct psb_mmu_pt *pt; 252 struct psb_mmu_pt *pt;
283 int i; 253 int i;
284 254
285 down_write(&driver->sem); 255 down_write(&driver->sem);
286 if (pd->hw_context != -1) 256 if (pd->hw_context != -1) {
257 PSB_WSGX32(0, PSB_CR_BIF_DIR_LIST_BASE0 + pd->hw_context * 4);
287 psb_mmu_flush_pd_locked(driver, 1); 258 psb_mmu_flush_pd_locked(driver, 1);
259 }
288 260
289 /* Should take the spinlock here, but we don't need to do that 261 /* Should take the spinlock here, but we don't need to do that
290 since we have the semaphore in write mode. */ 262 since we have the semaphore in write mode. */
@@ -331,7 +303,7 @@ static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd)
331 for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i) 303 for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
332 *ptes++ = pd->invalid_pte; 304 *ptes++ = pd->invalid_pte;
333 305
334 306#if defined(CONFIG_X86)
335 if (pd->driver->has_clflush && pd->hw_context != -1) { 307 if (pd->driver->has_clflush && pd->hw_context != -1) {
336 mb(); 308 mb();
337 for (i = 0; i < clflush_count; ++i) { 309 for (i = 0; i < clflush_count; ++i) {
@@ -340,7 +312,7 @@ static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd)
340 } 312 }
341 mb(); 313 mb();
342 } 314 }
343 315#endif
344 kunmap_atomic(v); 316 kunmap_atomic(v);
345 spin_unlock(lock); 317 spin_unlock(lock);
346 318
@@ -351,7 +323,7 @@ static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd)
351 return pt; 323 return pt;
352} 324}
353 325
354static struct psb_mmu_pt *psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd *pd, 326struct psb_mmu_pt *psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd *pd,
355 unsigned long addr) 327 unsigned long addr)
356{ 328{
357 uint32_t index = psb_mmu_pd_index(addr); 329 uint32_t index = psb_mmu_pd_index(addr);
@@ -383,7 +355,7 @@ static struct psb_mmu_pt *psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd *pd,
383 kunmap_atomic((void *) v); 355 kunmap_atomic((void *) v);
384 356
385 if (pd->hw_context != -1) { 357 if (pd->hw_context != -1) {
386 psb_mmu_clflush(pd->driver, (void *) &v[index]); 358 psb_mmu_clflush(pd->driver, (void *)&v[index]);
387 atomic_set(&pd->driver->needs_tlbflush, 1); 359 atomic_set(&pd->driver->needs_tlbflush, 1);
388 } 360 }
389 } 361 }
@@ -420,8 +392,7 @@ static void psb_mmu_pt_unmap_unlock(struct psb_mmu_pt *pt)
420 pd->tables[pt->index] = NULL; 392 pd->tables[pt->index] = NULL;
421 393
422 if (pd->hw_context != -1) { 394 if (pd->hw_context != -1) {
423 psb_mmu_clflush(pd->driver, 395 psb_mmu_clflush(pd->driver, (void *)&v[pt->index]);
424 (void *) &v[pt->index]);
425 atomic_set(&pd->driver->needs_tlbflush, 1); 396 atomic_set(&pd->driver->needs_tlbflush, 1);
426 } 397 }
427 kunmap_atomic(pt->v); 398 kunmap_atomic(pt->v);
@@ -432,8 +403,8 @@ static void psb_mmu_pt_unmap_unlock(struct psb_mmu_pt *pt)
432 spin_unlock(&pd->driver->lock); 403 spin_unlock(&pd->driver->lock);
433} 404}
434 405
435static inline void psb_mmu_set_pte(struct psb_mmu_pt *pt, 406static inline void psb_mmu_set_pte(struct psb_mmu_pt *pt, unsigned long addr,
436 unsigned long addr, uint32_t pte) 407 uint32_t pte)
437{ 408{
438 pt->v[psb_mmu_pt_index(addr)] = pte; 409 pt->v[psb_mmu_pt_index(addr)] = pte;
439} 410}
@@ -444,69 +415,50 @@ static inline void psb_mmu_invalidate_pte(struct psb_mmu_pt *pt,
444 pt->v[psb_mmu_pt_index(addr)] = pt->pd->invalid_pte; 415 pt->v[psb_mmu_pt_index(addr)] = pt->pd->invalid_pte;
445} 416}
446 417
447 418struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver *driver)
448void psb_mmu_mirror_gtt(struct psb_mmu_pd *pd,
449 uint32_t mmu_offset, uint32_t gtt_start,
450 uint32_t gtt_pages)
451{ 419{
452 uint32_t *v; 420 struct psb_mmu_pd *pd;
453 uint32_t start = psb_mmu_pd_index(mmu_offset);
454 struct psb_mmu_driver *driver = pd->driver;
455 int num_pages = gtt_pages;
456 421
457 down_read(&driver->sem); 422 down_read(&driver->sem);
458 spin_lock(&driver->lock); 423 pd = driver->default_pd;
459 424 up_read(&driver->sem);
460 v = kmap_atomic(pd->p);
461 v += start;
462
463 while (gtt_pages--) {
464 *v++ = gtt_start | pd->pd_mask;
465 gtt_start += PAGE_SIZE;
466 }
467
468 /*ttm_tt_cache_flush(&pd->p, num_pages);*/
469 psb_pages_clflush(pd->driver, &pd->p, num_pages);
470 kunmap_atomic(v);
471 spin_unlock(&driver->lock);
472
473 if (pd->hw_context != -1)
474 atomic_set(&pd->driver->needs_tlbflush, 1);
475 425
476 up_read(&pd->driver->sem); 426 return pd;
477 psb_mmu_flush_pd(pd->driver, 0);
478} 427}
479 428
480struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver *driver) 429/* Returns the physical address of the PD shared by sgx/msvdx */
430uint32_t psb_get_default_pd_addr(struct psb_mmu_driver *driver)
481{ 431{
482 struct psb_mmu_pd *pd; 432 struct psb_mmu_pd *pd;
483 433
484 /* down_read(&driver->sem); */ 434 pd = psb_mmu_get_default_pd(driver);
485 pd = driver->default_pd; 435 return page_to_pfn(pd->p) << PAGE_SHIFT;
486 /* up_read(&driver->sem); */
487
488 return pd;
489} 436}
490 437
491void psb_mmu_driver_takedown(struct psb_mmu_driver *driver) 438void psb_mmu_driver_takedown(struct psb_mmu_driver *driver)
492{ 439{
440 struct drm_device *dev = driver->dev;
441 struct drm_psb_private *dev_priv = dev->dev_private;
442
443 PSB_WSGX32(driver->bif_ctrl, PSB_CR_BIF_CTRL);
493 psb_mmu_free_pagedir(driver->default_pd); 444 psb_mmu_free_pagedir(driver->default_pd);
494 kfree(driver); 445 kfree(driver);
495} 446}
496 447
497struct psb_mmu_driver *psb_mmu_driver_init(uint8_t __iomem * registers, 448struct psb_mmu_driver *psb_mmu_driver_init(struct drm_device *dev,
498 int trap_pagefaults, 449 int trap_pagefaults,
499 int invalid_type, 450 int invalid_type,
500 struct drm_psb_private *dev_priv) 451 atomic_t *msvdx_mmu_invaldc)
501{ 452{
502 struct psb_mmu_driver *driver; 453 struct psb_mmu_driver *driver;
454 struct drm_psb_private *dev_priv = dev->dev_private;
503 455
504 driver = kmalloc(sizeof(*driver), GFP_KERNEL); 456 driver = kmalloc(sizeof(*driver), GFP_KERNEL);
505 457
506 if (!driver) 458 if (!driver)
507 return NULL; 459 return NULL;
508 driver->dev_priv = dev_priv;
509 460
461 driver->dev = dev;
510 driver->default_pd = psb_mmu_alloc_pd(driver, trap_pagefaults, 462 driver->default_pd = psb_mmu_alloc_pd(driver, trap_pagefaults,
511 invalid_type); 463 invalid_type);
512 if (!driver->default_pd) 464 if (!driver->default_pd)
@@ -515,17 +467,24 @@ struct psb_mmu_driver *psb_mmu_driver_init(uint8_t __iomem * registers,
515 spin_lock_init(&driver->lock); 467 spin_lock_init(&driver->lock);
516 init_rwsem(&driver->sem); 468 init_rwsem(&driver->sem);
517 down_write(&driver->sem); 469 down_write(&driver->sem);
518 driver->register_map = registers;
519 atomic_set(&driver->needs_tlbflush, 1); 470 atomic_set(&driver->needs_tlbflush, 1);
471 driver->msvdx_mmu_invaldc = msvdx_mmu_invaldc;
472
473 driver->bif_ctrl = PSB_RSGX32(PSB_CR_BIF_CTRL);
474 PSB_WSGX32(driver->bif_ctrl | _PSB_CB_CTRL_CLEAR_FAULT,
475 PSB_CR_BIF_CTRL);
476 PSB_WSGX32(driver->bif_ctrl & ~_PSB_CB_CTRL_CLEAR_FAULT,
477 PSB_CR_BIF_CTRL);
520 478
521 driver->has_clflush = 0; 479 driver->has_clflush = 0;
522 480
481#if defined(CONFIG_X86)
523 if (boot_cpu_has(X86_FEATURE_CLFLUSH)) { 482 if (boot_cpu_has(X86_FEATURE_CLFLUSH)) {
524 uint32_t tfms, misc, cap0, cap4, clflush_size; 483 uint32_t tfms, misc, cap0, cap4, clflush_size;
525 484
526 /* 485 /*
527 * clflush size is determined at kernel setup for x86_64 486 * clflush size is determined at kernel setup for x86_64 but not
528 * but not for i386. We have to do it here. 487 * for i386. We have to do it here.
529 */ 488 */
530 489
531 cpuid(0x00000001, &tfms, &misc, &cap0, &cap4); 490 cpuid(0x00000001, &tfms, &misc, &cap0, &cap4);
@@ -536,6 +495,7 @@ struct psb_mmu_driver *psb_mmu_driver_init(uint8_t __iomem * registers,
536 driver->clflush_mask = driver->clflush_add - 1; 495 driver->clflush_mask = driver->clflush_add - 1;
537 driver->clflush_mask = ~driver->clflush_mask; 496 driver->clflush_mask = ~driver->clflush_mask;
538 } 497 }
498#endif
539 499
540 up_write(&driver->sem); 500 up_write(&driver->sem);
541 return driver; 501 return driver;
@@ -545,9 +505,9 @@ out_err1:
545 return NULL; 505 return NULL;
546} 506}
547 507
548static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd, 508#if defined(CONFIG_X86)
549 unsigned long address, uint32_t num_pages, 509static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd, unsigned long address,
550 uint32_t desired_tile_stride, 510 uint32_t num_pages, uint32_t desired_tile_stride,
551 uint32_t hw_tile_stride) 511 uint32_t hw_tile_stride)
552{ 512{
553 struct psb_mmu_pt *pt; 513 struct psb_mmu_pt *pt;
@@ -561,11 +521,8 @@ static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd,
561 unsigned long clflush_add = pd->driver->clflush_add; 521 unsigned long clflush_add = pd->driver->clflush_add;
562 unsigned long clflush_mask = pd->driver->clflush_mask; 522 unsigned long clflush_mask = pd->driver->clflush_mask;
563 523
564 if (!pd->driver->has_clflush) { 524 if (!pd->driver->has_clflush)
565 /*ttm_tt_cache_flush(&pd->p, num_pages);*/
566 psb_pages_clflush(pd->driver, &pd->p, num_pages);
567 return; 525 return;
568 }
569 526
570 if (hw_tile_stride) 527 if (hw_tile_stride)
571 rows = num_pages / desired_tile_stride; 528 rows = num_pages / desired_tile_stride;
@@ -586,10 +543,8 @@ static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd,
586 if (!pt) 543 if (!pt)
587 continue; 544 continue;
588 do { 545 do {
589 psb_clflush(&pt->v 546 psb_clflush(&pt->v[psb_mmu_pt_index(addr)]);
590 [psb_mmu_pt_index(addr)]); 547 } while (addr += clflush_add,
591 } while (addr +=
592 clflush_add,
593 (addr & clflush_mask) < next); 548 (addr & clflush_mask) < next);
594 549
595 psb_mmu_pt_unmap_unlock(pt); 550 psb_mmu_pt_unmap_unlock(pt);
@@ -598,6 +553,14 @@ static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd,
598 } 553 }
599 mb(); 554 mb();
600} 555}
556#else
557static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd, unsigned long address,
558 uint32_t num_pages, uint32_t desired_tile_stride,
559 uint32_t hw_tile_stride)
560{
561 drm_ttm_cache_flush();
562}
563#endif
601 564
602void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd, 565void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd,
603 unsigned long address, uint32_t num_pages) 566 unsigned long address, uint32_t num_pages)
@@ -633,7 +596,7 @@ out:
633 up_read(&pd->driver->sem); 596 up_read(&pd->driver->sem);
634 597
635 if (pd->hw_context != -1) 598 if (pd->hw_context != -1)
636 psb_mmu_flush(pd->driver, 0); 599 psb_mmu_flush(pd->driver);
637 600
638 return; 601 return;
639} 602}
@@ -660,7 +623,7 @@ void psb_mmu_remove_pages(struct psb_mmu_pd *pd, unsigned long address,
660 add = desired_tile_stride << PAGE_SHIFT; 623 add = desired_tile_stride << PAGE_SHIFT;
661 row_add = hw_tile_stride << PAGE_SHIFT; 624 row_add = hw_tile_stride << PAGE_SHIFT;
662 625
663 /* down_read(&pd->driver->sem); */ 626 down_read(&pd->driver->sem);
664 627
665 /* Make sure we only need to flush this processor's cache */ 628 /* Make sure we only need to flush this processor's cache */
666 629
@@ -688,10 +651,10 @@ void psb_mmu_remove_pages(struct psb_mmu_pd *pd, unsigned long address,
688 psb_mmu_flush_ptes(pd, f_address, num_pages, 651 psb_mmu_flush_ptes(pd, f_address, num_pages,
689 desired_tile_stride, hw_tile_stride); 652 desired_tile_stride, hw_tile_stride);
690 653
691 /* up_read(&pd->driver->sem); */ 654 up_read(&pd->driver->sem);
692 655
693 if (pd->hw_context != -1) 656 if (pd->hw_context != -1)
694 psb_mmu_flush(pd->driver, 0); 657 psb_mmu_flush(pd->driver);
695} 658}
696 659
697int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd, uint32_t start_pfn, 660int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd, uint32_t start_pfn,
@@ -704,7 +667,7 @@ int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd, uint32_t start_pfn,
704 unsigned long end; 667 unsigned long end;
705 unsigned long next; 668 unsigned long next;
706 unsigned long f_address = address; 669 unsigned long f_address = address;
707 int ret = 0; 670 int ret = -ENOMEM;
708 671
709 down_read(&pd->driver->sem); 672 down_read(&pd->driver->sem);
710 673
@@ -726,6 +689,7 @@ int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd, uint32_t start_pfn,
726 psb_mmu_pt_unmap_unlock(pt); 689 psb_mmu_pt_unmap_unlock(pt);
727 690
728 } while (addr = next, next != end); 691 } while (addr = next, next != end);
692 ret = 0;
729 693
730out: 694out:
731 if (pd->hw_context != -1) 695 if (pd->hw_context != -1)
@@ -734,15 +698,15 @@ out:
734 up_read(&pd->driver->sem); 698 up_read(&pd->driver->sem);
735 699
736 if (pd->hw_context != -1) 700 if (pd->hw_context != -1)
737 psb_mmu_flush(pd->driver, 1); 701 psb_mmu_flush(pd->driver);
738 702
739 return ret; 703 return 0;
740} 704}
741 705
742int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages, 706int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
743 unsigned long address, uint32_t num_pages, 707 unsigned long address, uint32_t num_pages,
744 uint32_t desired_tile_stride, 708 uint32_t desired_tile_stride, uint32_t hw_tile_stride,
745 uint32_t hw_tile_stride, int type) 709 int type)
746{ 710{
747 struct psb_mmu_pt *pt; 711 struct psb_mmu_pt *pt;
748 uint32_t rows = 1; 712 uint32_t rows = 1;
@@ -754,7 +718,7 @@ int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
754 unsigned long add; 718 unsigned long add;
755 unsigned long row_add; 719 unsigned long row_add;
756 unsigned long f_address = address; 720 unsigned long f_address = address;
757 int ret = 0; 721 int ret = -ENOMEM;
758 722
759 if (hw_tile_stride) { 723 if (hw_tile_stride) {
760 if (num_pages % desired_tile_stride != 0) 724 if (num_pages % desired_tile_stride != 0)
@@ -777,14 +741,11 @@ int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
777 do { 741 do {
778 next = psb_pd_addr_end(addr, end); 742 next = psb_pd_addr_end(addr, end);
779 pt = psb_mmu_pt_alloc_map_lock(pd, addr); 743 pt = psb_mmu_pt_alloc_map_lock(pd, addr);
780 if (!pt) { 744 if (!pt)
781 ret = -ENOMEM;
782 goto out; 745 goto out;
783 }
784 do { 746 do {
785 pte = 747 pte = psb_mmu_mask_pte(page_to_pfn(*pages++),
786 psb_mmu_mask_pte(page_to_pfn(*pages++), 748 type);
787 type);
788 psb_mmu_set_pte(pt, addr, pte); 749 psb_mmu_set_pte(pt, addr, pte);
789 pt->count++; 750 pt->count++;
790 } while (addr += PAGE_SIZE, addr < next); 751 } while (addr += PAGE_SIZE, addr < next);
@@ -794,6 +755,8 @@ int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
794 755
795 address += row_add; 756 address += row_add;
796 } 757 }
758
759 ret = 0;
797out: 760out:
798 if (pd->hw_context != -1) 761 if (pd->hw_context != -1)
799 psb_mmu_flush_ptes(pd, f_address, num_pages, 762 psb_mmu_flush_ptes(pd, f_address, num_pages,
@@ -802,7 +765,7 @@ out:
802 up_read(&pd->driver->sem); 765 up_read(&pd->driver->sem);
803 766
804 if (pd->hw_context != -1) 767 if (pd->hw_context != -1)
805 psb_mmu_flush(pd->driver, 1); 768 psb_mmu_flush(pd->driver);
806 769
807 return ret; 770 return ret;
808} 771}
diff --git a/drivers/gpu/drm/gma500/mmu.h b/drivers/gpu/drm/gma500/mmu.h
new file mode 100644
index 000000000000..e89abec6209d
--- /dev/null
+++ b/drivers/gpu/drm/gma500/mmu.h
@@ -0,0 +1,93 @@
1/**************************************************************************
2 * Copyright (c) 2007-2011, Intel Corporation.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 **************************************************************************/
14
15#ifndef __MMU_H
16#define __MMU_H
17
18struct psb_mmu_driver {
19 /* protects driver- and pd structures. Always take in read mode
20 * before taking the page table spinlock.
21 */
22 struct rw_semaphore sem;
23
24 /* protects page tables, directory tables and pt tables.
25 * and pt structures.
26 */
27 spinlock_t lock;
28
29 atomic_t needs_tlbflush;
30 atomic_t *msvdx_mmu_invaldc;
31 struct psb_mmu_pd *default_pd;
32 uint32_t bif_ctrl;
33 int has_clflush;
34 int clflush_add;
35 unsigned long clflush_mask;
36
37 struct drm_device *dev;
38};
39
40struct psb_mmu_pd;
41
42struct psb_mmu_pt {
43 struct psb_mmu_pd *pd;
44 uint32_t index;
45 uint32_t count;
46 struct page *p;
47 uint32_t *v;
48};
49
50struct psb_mmu_pd {
51 struct psb_mmu_driver *driver;
52 int hw_context;
53 struct psb_mmu_pt **tables;
54 struct page *p;
55 struct page *dummy_pt;
56 struct page *dummy_page;
57 uint32_t pd_mask;
58 uint32_t invalid_pde;
59 uint32_t invalid_pte;
60};
61
62extern struct psb_mmu_driver *psb_mmu_driver_init(struct drm_device *dev,
63 int trap_pagefaults,
64 int invalid_type,
65 atomic_t *msvdx_mmu_invaldc);
66extern void psb_mmu_driver_takedown(struct psb_mmu_driver *driver);
67extern struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver
68 *driver);
69extern struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver,
70 int trap_pagefaults,
71 int invalid_type);
72extern void psb_mmu_free_pagedir(struct psb_mmu_pd *pd);
73extern void psb_mmu_flush(struct psb_mmu_driver *driver);
74extern void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd,
75 unsigned long address,
76 uint32_t num_pages);
77extern int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd,
78 uint32_t start_pfn,
79 unsigned long address,
80 uint32_t num_pages, int type);
81extern int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
82 unsigned long *pfn);
83extern void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context);
84extern int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
85 unsigned long address, uint32_t num_pages,
86 uint32_t desired_tile_stride,
87 uint32_t hw_tile_stride, int type);
88extern void psb_mmu_remove_pages(struct psb_mmu_pd *pd,
89 unsigned long address, uint32_t num_pages,
90 uint32_t desired_tile_stride,
91 uint32_t hw_tile_stride);
92
93#endif
diff --git a/drivers/gpu/drm/gma500/oaktrail_crtc.c b/drivers/gpu/drm/gma500/oaktrail_crtc.c
index 8195e8592107..2de216c2374f 100644
--- a/drivers/gpu/drm/gma500/oaktrail_crtc.c
+++ b/drivers/gpu/drm/gma500/oaktrail_crtc.c
@@ -599,7 +599,7 @@ static int oaktrail_pipe_set_base(struct drm_crtc *crtc,
599 struct drm_device *dev = crtc->dev; 599 struct drm_device *dev = crtc->dev;
600 struct drm_psb_private *dev_priv = dev->dev_private; 600 struct drm_psb_private *dev_priv = dev->dev_private;
601 struct gma_crtc *gma_crtc = to_gma_crtc(crtc); 601 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
602 struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb); 602 struct psb_framebuffer *psbfb = to_psb_fb(crtc->primary->fb);
603 int pipe = gma_crtc->pipe; 603 int pipe = gma_crtc->pipe;
604 const struct psb_offset *map = &dev_priv->regmap[pipe]; 604 const struct psb_offset *map = &dev_priv->regmap[pipe];
605 unsigned long start, offset; 605 unsigned long start, offset;
@@ -608,7 +608,7 @@ static int oaktrail_pipe_set_base(struct drm_crtc *crtc,
608 int ret = 0; 608 int ret = 0;
609 609
610 /* no fb bound */ 610 /* no fb bound */
611 if (!crtc->fb) { 611 if (!crtc->primary->fb) {
612 dev_dbg(dev->dev, "No FB bound\n"); 612 dev_dbg(dev->dev, "No FB bound\n");
613 return 0; 613 return 0;
614 } 614 }
@@ -617,19 +617,19 @@ static int oaktrail_pipe_set_base(struct drm_crtc *crtc,
617 return 0; 617 return 0;
618 618
619 start = psbfb->gtt->offset; 619 start = psbfb->gtt->offset;
620 offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8); 620 offset = y * crtc->primary->fb->pitches[0] + x * (crtc->primary->fb->bits_per_pixel / 8);
621 621
622 REG_WRITE(map->stride, crtc->fb->pitches[0]); 622 REG_WRITE(map->stride, crtc->primary->fb->pitches[0]);
623 623
624 dspcntr = REG_READ(map->cntr); 624 dspcntr = REG_READ(map->cntr);
625 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; 625 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
626 626
627 switch (crtc->fb->bits_per_pixel) { 627 switch (crtc->primary->fb->bits_per_pixel) {
628 case 8: 628 case 8:
629 dspcntr |= DISPPLANE_8BPP; 629 dspcntr |= DISPPLANE_8BPP;
630 break; 630 break;
631 case 16: 631 case 16:
632 if (crtc->fb->depth == 15) 632 if (crtc->primary->fb->depth == 15)
633 dspcntr |= DISPPLANE_15_16BPP; 633 dspcntr |= DISPPLANE_15_16BPP;
634 else 634 else
635 dspcntr |= DISPPLANE_16BPP; 635 dspcntr |= DISPPLANE_16BPP;
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
index 38153143ed8c..cf018ddcc5a6 100644
--- a/drivers/gpu/drm/gma500/oaktrail_hdmi.c
+++ b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
@@ -523,13 +523,6 @@ static int oaktrail_hdmi_mode_valid(struct drm_connector *connector,
523 return MODE_OK; 523 return MODE_OK;
524} 524}
525 525
526static bool oaktrail_hdmi_mode_fixup(struct drm_encoder *encoder,
527 const struct drm_display_mode *mode,
528 struct drm_display_mode *adjusted_mode)
529{
530 return true;
531}
532
533static enum drm_connector_status 526static enum drm_connector_status
534oaktrail_hdmi_detect(struct drm_connector *connector, bool force) 527oaktrail_hdmi_detect(struct drm_connector *connector, bool force)
535{ 528{
@@ -608,7 +601,7 @@ static void oaktrail_hdmi_destroy(struct drm_connector *connector)
608 601
609static const struct drm_encoder_helper_funcs oaktrail_hdmi_helper_funcs = { 602static const struct drm_encoder_helper_funcs oaktrail_hdmi_helper_funcs = {
610 .dpms = oaktrail_hdmi_dpms, 603 .dpms = oaktrail_hdmi_dpms,
611 .mode_fixup = oaktrail_hdmi_mode_fixup, 604 .mode_fixup = gma_encoder_mode_fixup,
612 .prepare = gma_encoder_prepare, 605 .prepare = gma_encoder_prepare,
613 .mode_set = oaktrail_hdmi_mode_set, 606 .mode_set = oaktrail_hdmi_mode_set,
614 .commit = gma_encoder_commit, 607 .commit = gma_encoder_commit,
diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds.c b/drivers/gpu/drm/gma500/oaktrail_lvds.c
index 5e0697862736..9b099468a5db 100644
--- a/drivers/gpu/drm/gma500/oaktrail_lvds.c
+++ b/drivers/gpu/drm/gma500/oaktrail_lvds.c
@@ -359,6 +359,7 @@ void oaktrail_lvds_init(struct drm_device *dev,
359 * if closed, act like it's not there for now 359 * if closed, act like it's not there for now
360 */ 360 */
361 361
362 mutex_lock(&dev->mode_config.mutex);
362 i2c_adap = i2c_get_adapter(dev_priv->ops->i2c_bus); 363 i2c_adap = i2c_get_adapter(dev_priv->ops->i2c_bus);
363 if (i2c_adap == NULL) 364 if (i2c_adap == NULL)
364 dev_err(dev->dev, "No ddc adapter available!\n"); 365 dev_err(dev->dev, "No ddc adapter available!\n");
@@ -401,10 +402,14 @@ void oaktrail_lvds_init(struct drm_device *dev,
401 } 402 }
402 403
403out: 404out:
405 mutex_unlock(&dev->mode_config.mutex);
406
404 drm_sysfs_connector_add(connector); 407 drm_sysfs_connector_add(connector);
405 return; 408 return;
406 409
407failed_find: 410failed_find:
411 mutex_unlock(&dev->mode_config.mutex);
412
408 dev_dbg(dev->dev, "No LVDS modes found, disabling.\n"); 413 dev_dbg(dev->dev, "No LVDS modes found, disabling.\n");
409 if (gma_encoder->ddc_bus) 414 if (gma_encoder->ddc_bus)
410 psb_intel_i2c_destroy(gma_encoder->ddc_bus); 415 psb_intel_i2c_destroy(gma_encoder->ddc_bus);
diff --git a/drivers/gpu/drm/gma500/opregion.c b/drivers/gpu/drm/gma500/opregion.c
index 13ec6283bf59..ab696ca7eeec 100644
--- a/drivers/gpu/drm/gma500/opregion.c
+++ b/drivers/gpu/drm/gma500/opregion.c
@@ -173,10 +173,13 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
173 return 0; 173 return 0;
174} 174}
175 175
176void psb_intel_opregion_asle_intr(struct drm_device *dev) 176static void psb_intel_opregion_asle_work(struct work_struct *work)
177{ 177{
178 struct drm_psb_private *dev_priv = dev->dev_private; 178 struct psb_intel_opregion *opregion =
179 struct opregion_asle *asle = dev_priv->opregion.asle; 179 container_of(work, struct psb_intel_opregion, asle_work);
180 struct drm_psb_private *dev_priv =
181 container_of(opregion, struct drm_psb_private, opregion);
182 struct opregion_asle *asle = opregion->asle;
180 u32 asle_stat = 0; 183 u32 asle_stat = 0;
181 u32 asle_req; 184 u32 asle_req;
182 185
@@ -190,9 +193,18 @@ void psb_intel_opregion_asle_intr(struct drm_device *dev)
190 } 193 }
191 194
192 if (asle_req & ASLE_SET_BACKLIGHT) 195 if (asle_req & ASLE_SET_BACKLIGHT)
193 asle_stat |= asle_set_backlight(dev, asle->bclp); 196 asle_stat |= asle_set_backlight(dev_priv->dev, asle->bclp);
194 197
195 asle->aslc = asle_stat; 198 asle->aslc = asle_stat;
199
200}
201
202void psb_intel_opregion_asle_intr(struct drm_device *dev)
203{
204 struct drm_psb_private *dev_priv = dev->dev_private;
205
206 if (dev_priv->opregion.asle)
207 schedule_work(&dev_priv->opregion.asle_work);
196} 208}
197 209
198#define ASLE_ALS_EN (1<<0) 210#define ASLE_ALS_EN (1<<0)
@@ -282,6 +294,8 @@ void psb_intel_opregion_fini(struct drm_device *dev)
282 unregister_acpi_notifier(&psb_intel_opregion_notifier); 294 unregister_acpi_notifier(&psb_intel_opregion_notifier);
283 } 295 }
284 296
297 cancel_work_sync(&opregion->asle_work);
298
285 /* just clear all opregion memory pointers now */ 299 /* just clear all opregion memory pointers now */
286 iounmap(opregion->header); 300 iounmap(opregion->header);
287 opregion->header = NULL; 301 opregion->header = NULL;
@@ -304,6 +318,9 @@ int psb_intel_opregion_setup(struct drm_device *dev)
304 DRM_DEBUG_DRIVER("ACPI Opregion not supported\n"); 318 DRM_DEBUG_DRIVER("ACPI Opregion not supported\n");
305 return -ENOTSUPP; 319 return -ENOTSUPP;
306 } 320 }
321
322 INIT_WORK(&opregion->asle_work, psb_intel_opregion_asle_work);
323
307 DRM_DEBUG("OpRegion detected at 0x%8x\n", opregion_phy); 324 DRM_DEBUG("OpRegion detected at 0x%8x\n", opregion_phy);
308 base = acpi_os_ioremap(opregion_phy, 8*1024); 325 base = acpi_os_ioremap(opregion_phy, 8*1024);
309 if (!base) 326 if (!base)
diff --git a/drivers/gpu/drm/gma500/psb_device.c b/drivers/gpu/drm/gma500/psb_device.c
index 23fb33f1471b..07df7d4eea72 100644
--- a/drivers/gpu/drm/gma500/psb_device.c
+++ b/drivers/gpu/drm/gma500/psb_device.c
@@ -26,6 +26,7 @@
26#include "psb_intel_reg.h" 26#include "psb_intel_reg.h"
27#include "intel_bios.h" 27#include "intel_bios.h"
28#include "psb_device.h" 28#include "psb_device.h"
29#include "gma_device.h"
29 30
30static int psb_output_init(struct drm_device *dev) 31static int psb_output_init(struct drm_device *dev)
31{ 32{
@@ -257,45 +258,6 @@ static int psb_power_up(struct drm_device *dev)
257 return 0; 258 return 0;
258} 259}
259 260
260static void psb_get_core_freq(struct drm_device *dev)
261{
262 uint32_t clock;
263 struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
264 struct drm_psb_private *dev_priv = dev->dev_private;
265
266 /*pci_write_config_dword(pci_root, 0xD4, 0x00C32004);*/
267 /*pci_write_config_dword(pci_root, 0xD0, 0xE0033000);*/
268
269 pci_write_config_dword(pci_root, 0xD0, 0xD0050300);
270 pci_read_config_dword(pci_root, 0xD4, &clock);
271 pci_dev_put(pci_root);
272
273 switch (clock & 0x07) {
274 case 0:
275 dev_priv->core_freq = 100;
276 break;
277 case 1:
278 dev_priv->core_freq = 133;
279 break;
280 case 2:
281 dev_priv->core_freq = 150;
282 break;
283 case 3:
284 dev_priv->core_freq = 178;
285 break;
286 case 4:
287 dev_priv->core_freq = 200;
288 break;
289 case 5:
290 case 6:
291 case 7:
292 dev_priv->core_freq = 266;
293 break;
294 default:
295 dev_priv->core_freq = 0;
296 }
297}
298
299/* Poulsbo */ 261/* Poulsbo */
300static const struct psb_offset psb_regmap[2] = { 262static const struct psb_offset psb_regmap[2] = {
301 { 263 {
@@ -352,7 +314,7 @@ static int psb_chip_setup(struct drm_device *dev)
352{ 314{
353 struct drm_psb_private *dev_priv = dev->dev_private; 315 struct drm_psb_private *dev_priv = dev->dev_private;
354 dev_priv->regmap = psb_regmap; 316 dev_priv->regmap = psb_regmap;
355 psb_get_core_freq(dev); 317 gma_get_core_freq(dev);
356 gma_intel_setup_gmbus(dev); 318 gma_intel_setup_gmbus(dev);
357 psb_intel_opregion_init(dev); 319 psb_intel_opregion_init(dev);
358 psb_intel_init_bios(dev); 320 psb_intel_init_bios(dev);
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index 1199180667c9..b686e56646eb 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -21,7 +21,6 @@
21 21
22#include <drm/drmP.h> 22#include <drm/drmP.h>
23#include <drm/drm.h> 23#include <drm/drm.h>
24#include <drm/gma_drm.h>
25#include "psb_drv.h" 24#include "psb_drv.h"
26#include "framebuffer.h" 25#include "framebuffer.h"
27#include "psb_reg.h" 26#include "psb_reg.h"
@@ -37,56 +36,65 @@
37#include <acpi/video.h> 36#include <acpi/video.h>
38#include <linux/module.h> 37#include <linux/module.h>
39 38
40static int drm_psb_trap_pagefaults; 39static struct drm_driver driver;
41 40static int psb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
42static int psb_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
43
44MODULE_PARM_DESC(trap_pagefaults, "Error and reset on MMU pagefaults");
45module_param_named(trap_pagefaults, drm_psb_trap_pagefaults, int, 0600);
46
47 41
42/*
43 * The table below contains a mapping of the PCI vendor ID and the PCI Device ID
44 * to the different groups of PowerVR 5-series chip designs
45 *
46 * 0x8086 = Intel Corporation
47 *
48 * PowerVR SGX535 - Poulsbo - Intel GMA 500, Intel Atom Z5xx
49 * PowerVR SGX535 - Moorestown - Intel GMA 600
50 * PowerVR SGX535 - Oaktrail - Intel GMA 600, Intel Atom Z6xx, E6xx
51 * PowerVR SGX540 - Medfield - Intel Atom Z2460
52 * PowerVR SGX544MP2 - Medfield -
53 * PowerVR SGX545 - Cedartrail - Intel GMA 3600, Intel Atom D2500, N2600
54 * PowerVR SGX545 - Cedartrail - Intel GMA 3650, Intel Atom D2550, D2700,
55 * N2800
56 */
48static DEFINE_PCI_DEVICE_TABLE(pciidlist) = { 57static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
49 { 0x8086, 0x8108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &psb_chip_ops }, 58 { 0x8086, 0x8108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &psb_chip_ops },
50 { 0x8086, 0x8109, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &psb_chip_ops }, 59 { 0x8086, 0x8109, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &psb_chip_ops },
51#if defined(CONFIG_DRM_GMA600) 60#if defined(CONFIG_DRM_GMA600)
52 { 0x8086, 0x4100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops}, 61 { 0x8086, 0x4100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops },
53 { 0x8086, 0x4101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops}, 62 { 0x8086, 0x4101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops },
54 { 0x8086, 0x4102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops}, 63 { 0x8086, 0x4102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops },
55 { 0x8086, 0x4103, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops}, 64 { 0x8086, 0x4103, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops },
56 { 0x8086, 0x4104, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops}, 65 { 0x8086, 0x4104, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops },
57 { 0x8086, 0x4105, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops}, 66 { 0x8086, 0x4105, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops },
58 { 0x8086, 0x4106, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops}, 67 { 0x8086, 0x4106, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops },
59 { 0x8086, 0x4107, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops}, 68 { 0x8086, 0x4107, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops },
60 /* Atom E620 */ 69 { 0x8086, 0x4108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops },
61 { 0x8086, 0x4108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops},
62#endif 70#endif
63#if defined(CONFIG_DRM_MEDFIELD) 71#if defined(CONFIG_DRM_MEDFIELD)
64 {0x8086, 0x0130, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops}, 72 { 0x8086, 0x0130, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops },
65 {0x8086, 0x0131, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops}, 73 { 0x8086, 0x0131, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops },
66 {0x8086, 0x0132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops}, 74 { 0x8086, 0x0132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops },
67 {0x8086, 0x0133, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops}, 75 { 0x8086, 0x0133, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops },
68 {0x8086, 0x0134, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops}, 76 { 0x8086, 0x0134, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops },
69 {0x8086, 0x0135, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops}, 77 { 0x8086, 0x0135, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops },
70 {0x8086, 0x0136, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops}, 78 { 0x8086, 0x0136, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops },
71 {0x8086, 0x0137, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops}, 79 { 0x8086, 0x0137, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops },
72#endif 80#endif
73#if defined(CONFIG_DRM_GMA3600) 81#if defined(CONFIG_DRM_GMA3600)
74 { 0x8086, 0x0be0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops}, 82 { 0x8086, 0x0be0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
75 { 0x8086, 0x0be1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops}, 83 { 0x8086, 0x0be1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
76 { 0x8086, 0x0be2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops}, 84 { 0x8086, 0x0be2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
77 { 0x8086, 0x0be3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops}, 85 { 0x8086, 0x0be3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
78 { 0x8086, 0x0be4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops}, 86 { 0x8086, 0x0be4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
79 { 0x8086, 0x0be5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops}, 87 { 0x8086, 0x0be5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
80 { 0x8086, 0x0be6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops}, 88 { 0x8086, 0x0be6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
81 { 0x8086, 0x0be7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops}, 89 { 0x8086, 0x0be7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
82 { 0x8086, 0x0be8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops}, 90 { 0x8086, 0x0be8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
83 { 0x8086, 0x0be9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops}, 91 { 0x8086, 0x0be9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
84 { 0x8086, 0x0bea, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops}, 92 { 0x8086, 0x0bea, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
85 { 0x8086, 0x0beb, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops}, 93 { 0x8086, 0x0beb, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
86 { 0x8086, 0x0bec, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops}, 94 { 0x8086, 0x0bec, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
87 { 0x8086, 0x0bed, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops}, 95 { 0x8086, 0x0bed, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
88 { 0x8086, 0x0bee, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops}, 96 { 0x8086, 0x0bee, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
89 { 0x8086, 0x0bef, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops}, 97 { 0x8086, 0x0bef, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
90#endif 98#endif
91 { 0, } 99 { 0, }
92}; 100};
@@ -95,59 +103,10 @@ MODULE_DEVICE_TABLE(pci, pciidlist);
95/* 103/*
96 * Standard IOCTLs. 104 * Standard IOCTLs.
97 */ 105 */
98
99#define DRM_IOCTL_GMA_ADB \
100 DRM_IOWR(DRM_GMA_ADB + DRM_COMMAND_BASE, uint32_t)
101#define DRM_IOCTL_GMA_MODE_OPERATION \
102 DRM_IOWR(DRM_GMA_MODE_OPERATION + DRM_COMMAND_BASE, \
103 struct drm_psb_mode_operation_arg)
104#define DRM_IOCTL_GMA_STOLEN_MEMORY \
105 DRM_IOWR(DRM_GMA_STOLEN_MEMORY + DRM_COMMAND_BASE, \
106 struct drm_psb_stolen_memory_arg)
107#define DRM_IOCTL_GMA_GAMMA \
108 DRM_IOWR(DRM_GMA_GAMMA + DRM_COMMAND_BASE, \
109 struct drm_psb_dpst_lut_arg)
110#define DRM_IOCTL_GMA_DPST_BL \
111 DRM_IOWR(DRM_GMA_DPST_BL + DRM_COMMAND_BASE, \
112 uint32_t)
113#define DRM_IOCTL_GMA_GET_PIPE_FROM_CRTC_ID \
114 DRM_IOWR(DRM_GMA_GET_PIPE_FROM_CRTC_ID + DRM_COMMAND_BASE, \
115 struct drm_psb_get_pipe_from_crtc_id_arg)
116#define DRM_IOCTL_GMA_GEM_CREATE \
117 DRM_IOWR(DRM_GMA_GEM_CREATE + DRM_COMMAND_BASE, \
118 struct drm_psb_gem_create)
119#define DRM_IOCTL_GMA_GEM_MMAP \
120 DRM_IOWR(DRM_GMA_GEM_MMAP + DRM_COMMAND_BASE, \
121 struct drm_psb_gem_mmap)
122
123static int psb_adb_ioctl(struct drm_device *dev, void *data,
124 struct drm_file *file_priv);
125static int psb_mode_operation_ioctl(struct drm_device *dev, void *data,
126 struct drm_file *file_priv);
127static int psb_stolen_memory_ioctl(struct drm_device *dev, void *data,
128 struct drm_file *file_priv);
129static int psb_gamma_ioctl(struct drm_device *dev, void *data,
130 struct drm_file *file_priv);
131static int psb_dpst_bl_ioctl(struct drm_device *dev, void *data,
132 struct drm_file *file_priv);
133
134static const struct drm_ioctl_desc psb_ioctls[] = { 106static const struct drm_ioctl_desc psb_ioctls[] = {
135 DRM_IOCTL_DEF_DRV(GMA_ADB, psb_adb_ioctl, DRM_AUTH),
136 DRM_IOCTL_DEF_DRV(GMA_MODE_OPERATION, psb_mode_operation_ioctl,
137 DRM_AUTH),
138 DRM_IOCTL_DEF_DRV(GMA_STOLEN_MEMORY, psb_stolen_memory_ioctl,
139 DRM_AUTH),
140 DRM_IOCTL_DEF_DRV(GMA_GAMMA, psb_gamma_ioctl, DRM_AUTH),
141 DRM_IOCTL_DEF_DRV(GMA_DPST_BL, psb_dpst_bl_ioctl, DRM_AUTH),
142 DRM_IOCTL_DEF_DRV(GMA_GET_PIPE_FROM_CRTC_ID,
143 psb_intel_get_pipe_from_crtc_id, 0),
144 DRM_IOCTL_DEF_DRV(GMA_GEM_CREATE, psb_gem_create_ioctl,
145 DRM_UNLOCKED | DRM_AUTH),
146 DRM_IOCTL_DEF_DRV(GMA_GEM_MMAP, psb_gem_mmap_ioctl,
147 DRM_UNLOCKED | DRM_AUTH),
148}; 107};
149 108
150static void psb_lastclose(struct drm_device *dev) 109static void psb_driver_lastclose(struct drm_device *dev)
151{ 110{
152 int ret; 111 int ret;
153 struct drm_psb_private *dev_priv = dev->dev_private; 112 struct drm_psb_private *dev_priv = dev->dev_private;
@@ -169,19 +128,14 @@ static int psb_do_init(struct drm_device *dev)
169 128
170 uint32_t stolen_gtt; 129 uint32_t stolen_gtt;
171 130
172 int ret = -ENOMEM;
173
174 if (pg->mmu_gatt_start & 0x0FFFFFFF) { 131 if (pg->mmu_gatt_start & 0x0FFFFFFF) {
175 dev_err(dev->dev, "Gatt must be 256M aligned. This is a bug.\n"); 132 dev_err(dev->dev, "Gatt must be 256M aligned. This is a bug.\n");
176 ret = -EINVAL; 133 return -EINVAL;
177 goto out_err;
178 } 134 }
179 135
180
181 stolen_gtt = (pg->stolen_size >> PAGE_SHIFT) * 4; 136 stolen_gtt = (pg->stolen_size >> PAGE_SHIFT) * 4;
182 stolen_gtt = (stolen_gtt + PAGE_SIZE - 1) >> PAGE_SHIFT; 137 stolen_gtt = (stolen_gtt + PAGE_SIZE - 1) >> PAGE_SHIFT;
183 stolen_gtt = 138 stolen_gtt = (stolen_gtt < pg->gtt_pages) ? stolen_gtt : pg->gtt_pages;
184 (stolen_gtt < pg->gtt_pages) ? stolen_gtt : pg->gtt_pages;
185 139
186 dev_priv->gatt_free_offset = pg->mmu_gatt_start + 140 dev_priv->gatt_free_offset = pg->mmu_gatt_start +
187 (stolen_gtt << PAGE_SHIFT) * 1024; 141 (stolen_gtt << PAGE_SHIFT) * 1024;
@@ -192,23 +146,26 @@ static int psb_do_init(struct drm_device *dev)
192 PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK0); 146 PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK0);
193 PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK1); 147 PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK1);
194 PSB_RSGX32(PSB_CR_BIF_BANK1); 148 PSB_RSGX32(PSB_CR_BIF_BANK1);
195 PSB_WSGX32(PSB_RSGX32(PSB_CR_BIF_CTRL) | _PSB_MMU_ER_MASK, 149
196 PSB_CR_BIF_CTRL); 150 /* Do not bypass any MMU access, let them pagefault instead */
151 PSB_WSGX32((PSB_RSGX32(PSB_CR_BIF_CTRL) & ~_PSB_MMU_ER_MASK),
152 PSB_CR_BIF_CTRL);
153 PSB_RSGX32(PSB_CR_BIF_CTRL);
154
197 psb_spank(dev_priv); 155 psb_spank(dev_priv);
198 156
199 /* mmu_gatt ?? */ 157 /* mmu_gatt ?? */
200 PSB_WSGX32(pg->gatt_start, PSB_CR_BIF_TWOD_REQ_BASE); 158 PSB_WSGX32(pg->gatt_start, PSB_CR_BIF_TWOD_REQ_BASE);
159 PSB_RSGX32(PSB_CR_BIF_TWOD_REQ_BASE); /* Post */
160
201 return 0; 161 return 0;
202out_err:
203 return ret;
204} 162}
205 163
206static int psb_driver_unload(struct drm_device *dev) 164static int psb_driver_unload(struct drm_device *dev)
207{ 165{
208 struct drm_psb_private *dev_priv = dev->dev_private; 166 struct drm_psb_private *dev_priv = dev->dev_private;
209 167
210 /* Kill vblank etc here */ 168 /* TODO: Kill vblank etc here */
211
212 169
213 if (dev_priv) { 170 if (dev_priv) {
214 if (dev_priv->backlight_device) 171 if (dev_priv->backlight_device)
@@ -268,8 +225,7 @@ static int psb_driver_unload(struct drm_device *dev)
268 return 0; 225 return 0;
269} 226}
270 227
271 228static int psb_driver_load(struct drm_device *dev, unsigned long flags)
272static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
273{ 229{
274 struct drm_psb_private *dev_priv; 230 struct drm_psb_private *dev_priv;
275 unsigned long resource_start, resource_len; 231 unsigned long resource_start, resource_len;
@@ -277,15 +233,19 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
277 int ret = -ENOMEM; 233 int ret = -ENOMEM;
278 struct drm_connector *connector; 234 struct drm_connector *connector;
279 struct gma_encoder *gma_encoder; 235 struct gma_encoder *gma_encoder;
236 struct psb_gtt *pg;
280 237
238 /* allocating and initializing driver private data */
281 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); 239 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
282 if (dev_priv == NULL) 240 if (dev_priv == NULL)
283 return -ENOMEM; 241 return -ENOMEM;
284 242
285 dev_priv->ops = (struct psb_ops *)chipset; 243 dev_priv->ops = (struct psb_ops *)flags;
286 dev_priv->dev = dev; 244 dev_priv->dev = dev;
287 dev->dev_private = (void *) dev_priv; 245 dev->dev_private = (void *) dev_priv;
288 246
247 pg = &dev_priv->gtt;
248
289 pci_set_master(dev->pdev); 249 pci_set_master(dev->pdev);
290 250
291 dev_priv->num_pipe = dev_priv->ops->pipes; 251 dev_priv->num_pipe = dev_priv->ops->pipes;
@@ -347,9 +307,7 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
347 if (ret) 307 if (ret)
348 goto out_err; 308 goto out_err;
349 309
350 dev_priv->mmu = psb_mmu_driver_init((void *)0, 310 dev_priv->mmu = psb_mmu_driver_init(dev, 1, 0, 0);
351 drm_psb_trap_pagefaults, 0,
352 dev_priv);
353 if (!dev_priv->mmu) 311 if (!dev_priv->mmu)
354 goto out_err; 312 goto out_err;
355 313
@@ -357,18 +315,27 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
357 if (!dev_priv->pf_pd) 315 if (!dev_priv->pf_pd)
358 goto out_err; 316 goto out_err;
359 317
360 psb_mmu_set_pd_context(psb_mmu_get_default_pd(dev_priv->mmu), 0);
361 psb_mmu_set_pd_context(dev_priv->pf_pd, 1);
362
363 ret = psb_do_init(dev); 318 ret = psb_do_init(dev);
364 if (ret) 319 if (ret)
365 return ret; 320 return ret;
366 321
322 /* Add stolen memory to SGX MMU */
323 down_read(&pg->sem);
324 ret = psb_mmu_insert_pfn_sequence(psb_mmu_get_default_pd(dev_priv->mmu),
325 dev_priv->stolen_base >> PAGE_SHIFT,
326 pg->gatt_start,
327 pg->stolen_size >> PAGE_SHIFT, 0);
328 up_read(&pg->sem);
329
330 psb_mmu_set_pd_context(psb_mmu_get_default_pd(dev_priv->mmu), 0);
331 psb_mmu_set_pd_context(dev_priv->pf_pd, 1);
332
367 PSB_WSGX32(0x20000000, PSB_CR_PDS_EXEC_BASE); 333 PSB_WSGX32(0x20000000, PSB_CR_PDS_EXEC_BASE);
368 PSB_WSGX32(0x30000000, PSB_CR_BIF_3D_REQ_BASE); 334 PSB_WSGX32(0x30000000, PSB_CR_BIF_3D_REQ_BASE);
369 335
370 acpi_video_register(); 336 acpi_video_register();
371 337
338 /* Setup vertical blanking handling */
372 ret = drm_vblank_init(dev, dev_priv->num_pipe); 339 ret = drm_vblank_init(dev, dev_priv->num_pipe);
373 if (ret) 340 if (ret)
374 goto out_err; 341 goto out_err;
@@ -390,9 +357,7 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
390 drm_irq_install(dev); 357 drm_irq_install(dev);
391 358
392 dev->vblank_disable_allowed = true; 359 dev->vblank_disable_allowed = true;
393
394 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 360 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
395
396 dev->driver->get_vblank_counter = psb_get_vblank_counter; 361 dev->driver->get_vblank_counter = psb_get_vblank_counter;
397 362
398 psb_modeset_init(dev); 363 psb_modeset_init(dev);
@@ -416,11 +381,11 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
416 return ret; 381 return ret;
417 psb_intel_opregion_enable_asle(dev); 382 psb_intel_opregion_enable_asle(dev);
418#if 0 383#if 0
419 /*enable runtime pm at last*/ 384 /* Enable runtime pm at last */
420 pm_runtime_enable(&dev->pdev->dev); 385 pm_runtime_enable(&dev->pdev->dev);
421 pm_runtime_set_active(&dev->pdev->dev); 386 pm_runtime_set_active(&dev->pdev->dev);
422#endif 387#endif
423 /*Intel drm driver load is done, continue doing pvr load*/ 388 /* Intel drm driver load is done, continue doing pvr load */
424 return 0; 389 return 0;
425out_err: 390out_err:
426 psb_driver_unload(dev); 391 psb_driver_unload(dev);
@@ -442,161 +407,6 @@ static inline void get_brightness(struct backlight_device *bd)
442#endif 407#endif
443} 408}
444 409
445static int psb_dpst_bl_ioctl(struct drm_device *dev, void *data,
446 struct drm_file *file_priv)
447{
448 struct drm_psb_private *dev_priv = psb_priv(dev);
449 uint32_t *arg = data;
450
451 dev_priv->blc_adj2 = *arg;
452 get_brightness(dev_priv->backlight_device);
453 return 0;
454}
455
456static int psb_adb_ioctl(struct drm_device *dev, void *data,
457 struct drm_file *file_priv)
458{
459 struct drm_psb_private *dev_priv = psb_priv(dev);
460 uint32_t *arg = data;
461
462 dev_priv->blc_adj1 = *arg;
463 get_brightness(dev_priv->backlight_device);
464 return 0;
465}
466
467static int psb_gamma_ioctl(struct drm_device *dev, void *data,
468 struct drm_file *file_priv)
469{
470 struct drm_psb_dpst_lut_arg *lut_arg = data;
471 struct drm_mode_object *obj;
472 struct drm_crtc *crtc;
473 struct drm_connector *connector;
474 struct gma_crtc *gma_crtc;
475 int i = 0;
476 int32_t obj_id;
477
478 obj_id = lut_arg->output_id;
479 obj = drm_mode_object_find(dev, obj_id, DRM_MODE_OBJECT_CONNECTOR);
480 if (!obj) {
481 dev_dbg(dev->dev, "Invalid Connector object.\n");
482 return -ENOENT;
483 }
484
485 connector = obj_to_connector(obj);
486 crtc = connector->encoder->crtc;
487 gma_crtc = to_gma_crtc(crtc);
488
489 for (i = 0; i < 256; i++)
490 gma_crtc->lut_adj[i] = lut_arg->lut[i];
491
492 gma_crtc_load_lut(crtc);
493
494 return 0;
495}
496
497static int psb_mode_operation_ioctl(struct drm_device *dev, void *data,
498 struct drm_file *file_priv)
499{
500 uint32_t obj_id;
501 uint16_t op;
502 struct drm_mode_modeinfo *umode;
503 struct drm_display_mode *mode = NULL;
504 struct drm_psb_mode_operation_arg *arg;
505 struct drm_mode_object *obj;
506 struct drm_connector *connector;
507 struct drm_connector_helper_funcs *connector_funcs;
508 int ret = 0;
509 int resp = MODE_OK;
510
511 arg = (struct drm_psb_mode_operation_arg *)data;
512 obj_id = arg->obj_id;
513 op = arg->operation;
514
515 switch (op) {
516 case PSB_MODE_OPERATION_MODE_VALID:
517 umode = &arg->mode;
518
519 drm_modeset_lock_all(dev);
520
521 obj = drm_mode_object_find(dev, obj_id,
522 DRM_MODE_OBJECT_CONNECTOR);
523 if (!obj) {
524 ret = -ENOENT;
525 goto mode_op_out;
526 }
527
528 connector = obj_to_connector(obj);
529
530 mode = drm_mode_create(dev);
531 if (!mode) {
532 ret = -ENOMEM;
533 goto mode_op_out;
534 }
535
536 /* drm_crtc_convert_umode(mode, umode); */
537 {
538 mode->clock = umode->clock;
539 mode->hdisplay = umode->hdisplay;
540 mode->hsync_start = umode->hsync_start;
541 mode->hsync_end = umode->hsync_end;
542 mode->htotal = umode->htotal;
543 mode->hskew = umode->hskew;
544 mode->vdisplay = umode->vdisplay;
545 mode->vsync_start = umode->vsync_start;
546 mode->vsync_end = umode->vsync_end;
547 mode->vtotal = umode->vtotal;
548 mode->vscan = umode->vscan;
549 mode->vrefresh = umode->vrefresh;
550 mode->flags = umode->flags;
551 mode->type = umode->type;
552 strncpy(mode->name, umode->name, DRM_DISPLAY_MODE_LEN);
553 mode->name[DRM_DISPLAY_MODE_LEN-1] = 0;
554 }
555
556 connector_funcs = (struct drm_connector_helper_funcs *)
557 connector->helper_private;
558
559 if (connector_funcs->mode_valid) {
560 resp = connector_funcs->mode_valid(connector, mode);
561 arg->data = resp;
562 }
563
564 /*do some clean up work*/
565 if (mode)
566 drm_mode_destroy(dev, mode);
567mode_op_out:
568 drm_modeset_unlock_all(dev);
569 return ret;
570
571 default:
572 dev_dbg(dev->dev, "Unsupported psb mode operation\n");
573 return -EOPNOTSUPP;
574 }
575
576 return 0;
577}
578
579static int psb_stolen_memory_ioctl(struct drm_device *dev, void *data,
580 struct drm_file *file_priv)
581{
582 struct drm_psb_private *dev_priv = psb_priv(dev);
583 struct drm_psb_stolen_memory_arg *arg = data;
584
585 arg->base = dev_priv->stolen_base;
586 arg->size = dev_priv->vram_stolen_size;
587
588 return 0;
589}
590
591static int psb_driver_open(struct drm_device *dev, struct drm_file *priv)
592{
593 return 0;
594}
595
596static void psb_driver_close(struct drm_device *dev, struct drm_file *priv)
597{
598}
599
600static long psb_unlocked_ioctl(struct file *filp, unsigned int cmd, 410static long psb_unlocked_ioctl(struct file *filp, unsigned int cmd,
601 unsigned long arg) 411 unsigned long arg)
602{ 412{
@@ -614,15 +424,21 @@ static long psb_unlocked_ioctl(struct file *filp, unsigned int cmd,
614 /* FIXME: do we need to wrap the other side of this */ 424 /* FIXME: do we need to wrap the other side of this */
615} 425}
616 426
617 427/*
618/* When a client dies: 428 * When a client dies:
619 * - Check for and clean up flipped page state 429 * - Check for and clean up flipped page state
620 */ 430 */
621static void psb_driver_preclose(struct drm_device *dev, struct drm_file *priv) 431static void psb_driver_preclose(struct drm_device *dev, struct drm_file *priv)
622{ 432{
623} 433}
624 434
625static void psb_remove(struct pci_dev *pdev) 435static int psb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
436{
437 return drm_get_pci_dev(pdev, ent, &driver);
438}
439
440
441static void psb_pci_remove(struct pci_dev *pdev)
626{ 442{
627 struct drm_device *dev = pci_get_drvdata(pdev); 443 struct drm_device *dev = pci_get_drvdata(pdev);
628 drm_put_dev(dev); 444 drm_put_dev(dev);
@@ -657,11 +473,12 @@ static const struct file_operations psb_gem_fops = {
657 473
658static struct drm_driver driver = { 474static struct drm_driver driver = {
659 .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | \ 475 .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | \
660 DRIVER_MODESET | DRIVER_GEM , 476 DRIVER_MODESET | DRIVER_GEM,
661 .load = psb_driver_load, 477 .load = psb_driver_load,
662 .unload = psb_driver_unload, 478 .unload = psb_driver_unload,
479 .lastclose = psb_driver_lastclose,
480 .preclose = psb_driver_preclose,
663 481
664 .ioctls = psb_ioctls,
665 .num_ioctls = DRM_ARRAY_SIZE(psb_ioctls), 482 .num_ioctls = DRM_ARRAY_SIZE(psb_ioctls),
666 .device_is_agp = psb_driver_device_is_agp, 483 .device_is_agp = psb_driver_device_is_agp,
667 .irq_preinstall = psb_irq_preinstall, 484 .irq_preinstall = psb_irq_preinstall,
@@ -671,40 +488,31 @@ static struct drm_driver driver = {
671 .enable_vblank = psb_enable_vblank, 488 .enable_vblank = psb_enable_vblank,
672 .disable_vblank = psb_disable_vblank, 489 .disable_vblank = psb_disable_vblank,
673 .get_vblank_counter = psb_get_vblank_counter, 490 .get_vblank_counter = psb_get_vblank_counter,
674 .lastclose = psb_lastclose,
675 .open = psb_driver_open,
676 .preclose = psb_driver_preclose,
677 .postclose = psb_driver_close,
678 491
679 .gem_free_object = psb_gem_free_object, 492 .gem_free_object = psb_gem_free_object,
680 .gem_vm_ops = &psb_gem_vm_ops, 493 .gem_vm_ops = &psb_gem_vm_ops,
494
681 .dumb_create = psb_gem_dumb_create, 495 .dumb_create = psb_gem_dumb_create,
682 .dumb_map_offset = psb_gem_dumb_map_gtt, 496 .dumb_map_offset = psb_gem_dumb_map_gtt,
683 .dumb_destroy = drm_gem_dumb_destroy, 497 .dumb_destroy = drm_gem_dumb_destroy,
498 .ioctls = psb_ioctls,
684 .fops = &psb_gem_fops, 499 .fops = &psb_gem_fops,
685 .name = DRIVER_NAME, 500 .name = DRIVER_NAME,
686 .desc = DRIVER_DESC, 501 .desc = DRIVER_DESC,
687 .date = PSB_DRM_DRIVER_DATE, 502 .date = DRIVER_DATE,
688 .major = PSB_DRM_DRIVER_MAJOR, 503 .major = DRIVER_MAJOR,
689 .minor = PSB_DRM_DRIVER_MINOR, 504 .minor = DRIVER_MINOR,
690 .patchlevel = PSB_DRM_DRIVER_PATCHLEVEL 505 .patchlevel = DRIVER_PATCHLEVEL
691}; 506};
692 507
693static struct pci_driver psb_pci_driver = { 508static struct pci_driver psb_pci_driver = {
694 .name = DRIVER_NAME, 509 .name = DRIVER_NAME,
695 .id_table = pciidlist, 510 .id_table = pciidlist,
696 .probe = psb_probe, 511 .probe = psb_pci_probe,
697 .remove = psb_remove, 512 .remove = psb_pci_remove,
698 .driver = { 513 .driver.pm = &psb_pm_ops,
699 .pm = &psb_pm_ops,
700 }
701}; 514};
702 515
703static int psb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
704{
705 return drm_get_pci_dev(pdev, ent, &driver);
706}
707
708static int __init psb_init(void) 516static int __init psb_init(void)
709{ 517{
710 return drm_pci_init(&driver, &psb_pci_driver); 518 return drm_pci_init(&driver, &psb_pci_driver);
@@ -718,6 +526,6 @@ static void __exit psb_exit(void)
718late_initcall(psb_init); 526late_initcall(psb_init);
719module_exit(psb_exit); 527module_exit(psb_exit);
720 528
721MODULE_AUTHOR("Alan Cox <alan@linux.intel.com> and others"); 529MODULE_AUTHOR(DRIVER_AUTHOR);
722MODULE_DESCRIPTION(DRIVER_DESC); 530MODULE_DESCRIPTION(DRIVER_DESC);
723MODULE_LICENSE("GPL"); 531MODULE_LICENSE(DRIVER_LICENSE);
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index 5ad6a03e477e..55ebe2bd88dd 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -33,6 +33,18 @@
33#include "power.h" 33#include "power.h"
34#include "opregion.h" 34#include "opregion.h"
35#include "oaktrail.h" 35#include "oaktrail.h"
36#include "mmu.h"
37
38#define DRIVER_AUTHOR "Alan Cox <alan@linux.intel.com> and others"
39#define DRIVER_LICENSE "GPL"
40
41#define DRIVER_NAME "gma500"
42#define DRIVER_DESC "DRM driver for the Intel GMA500, GMA600, GMA3600, GMA3650"
43#define DRIVER_DATE "20140314"
44
45#define DRIVER_MAJOR 1
46#define DRIVER_MINOR 0
47#define DRIVER_PATCHLEVEL 0
36 48
37/* Append new drm mode definition here, align with libdrm definition */ 49/* Append new drm mode definition here, align with libdrm definition */
38#define DRM_MODE_SCALE_NO_SCALE 2 50#define DRM_MODE_SCALE_NO_SCALE 2
@@ -49,21 +61,7 @@ enum {
49#define IS_MFLD(dev) (((dev)->pdev->device & 0xfff8) == 0x0130) 61#define IS_MFLD(dev) (((dev)->pdev->device & 0xfff8) == 0x0130)
50#define IS_CDV(dev) (((dev)->pdev->device & 0xfff0) == 0x0be0) 62#define IS_CDV(dev) (((dev)->pdev->device & 0xfff0) == 0x0be0)
51 63
52/* 64/* Hardware offsets */
53 * Driver definitions
54 */
55
56#define DRIVER_NAME "gma500"
57#define DRIVER_DESC "DRM driver for the Intel GMA500"
58
59#define PSB_DRM_DRIVER_DATE "2011-06-06"
60#define PSB_DRM_DRIVER_MAJOR 1
61#define PSB_DRM_DRIVER_MINOR 0
62#define PSB_DRM_DRIVER_PATCHLEVEL 0
63
64/*
65 * Hardware offsets
66 */
67#define PSB_VDC_OFFSET 0x00000000 65#define PSB_VDC_OFFSET 0x00000000
68#define PSB_VDC_SIZE 0x000080000 66#define PSB_VDC_SIZE 0x000080000
69#define MRST_MMIO_SIZE 0x0000C0000 67#define MRST_MMIO_SIZE 0x0000C0000
@@ -71,16 +69,14 @@ enum {
71#define PSB_SGX_SIZE 0x8000 69#define PSB_SGX_SIZE 0x8000
72#define PSB_SGX_OFFSET 0x00040000 70#define PSB_SGX_OFFSET 0x00040000
73#define MRST_SGX_OFFSET 0x00080000 71#define MRST_SGX_OFFSET 0x00080000
74/* 72
75 * PCI resource identifiers 73/* PCI resource identifiers */
76 */
77#define PSB_MMIO_RESOURCE 0 74#define PSB_MMIO_RESOURCE 0
78#define PSB_AUX_RESOURCE 0 75#define PSB_AUX_RESOURCE 0
79#define PSB_GATT_RESOURCE 2 76#define PSB_GATT_RESOURCE 2
80#define PSB_GTT_RESOURCE 3 77#define PSB_GTT_RESOURCE 3
81/* 78
82 * PCI configuration 79/* PCI configuration */
83 */
84#define PSB_GMCH_CTRL 0x52 80#define PSB_GMCH_CTRL 0x52
85#define PSB_BSM 0x5C 81#define PSB_BSM 0x5C
86#define _PSB_GMCH_ENABLED 0x4 82#define _PSB_GMCH_ENABLED 0x4
@@ -88,37 +84,29 @@ enum {
88#define _PSB_PGETBL_ENABLED 0x00000001 84#define _PSB_PGETBL_ENABLED 0x00000001
89#define PSB_SGX_2D_SLAVE_PORT 0x4000 85#define PSB_SGX_2D_SLAVE_PORT 0x4000
90 86
91/* To get rid of */ 87/* TODO: To get rid of */
92#define PSB_TT_PRIV0_LIMIT (256*1024*1024) 88#define PSB_TT_PRIV0_LIMIT (256*1024*1024)
93#define PSB_TT_PRIV0_PLIMIT (PSB_TT_PRIV0_LIMIT >> PAGE_SHIFT) 89#define PSB_TT_PRIV0_PLIMIT (PSB_TT_PRIV0_LIMIT >> PAGE_SHIFT)
94 90
95/* 91/* SGX side MMU definitions (these can probably go) */
96 * SGX side MMU definitions (these can probably go)
97 */
98 92
99/* 93/* Flags for external memory type field */
100 * Flags for external memory type field.
101 */
102#define PSB_MMU_CACHED_MEMORY 0x0001 /* Bind to MMU only */ 94#define PSB_MMU_CACHED_MEMORY 0x0001 /* Bind to MMU only */
103#define PSB_MMU_RO_MEMORY 0x0002 /* MMU RO memory */ 95#define PSB_MMU_RO_MEMORY 0x0002 /* MMU RO memory */
104#define PSB_MMU_WO_MEMORY 0x0004 /* MMU WO memory */ 96#define PSB_MMU_WO_MEMORY 0x0004 /* MMU WO memory */
105/* 97
106 * PTE's and PDE's 98/* PTE's and PDE's */
107 */
108#define PSB_PDE_MASK 0x003FFFFF 99#define PSB_PDE_MASK 0x003FFFFF
109#define PSB_PDE_SHIFT 22 100#define PSB_PDE_SHIFT 22
110#define PSB_PTE_SHIFT 12 101#define PSB_PTE_SHIFT 12
111/* 102
112 * Cache control 103/* Cache control */
113 */
114#define PSB_PTE_VALID 0x0001 /* PTE / PDE valid */ 104#define PSB_PTE_VALID 0x0001 /* PTE / PDE valid */
115#define PSB_PTE_WO 0x0002 /* Write only */ 105#define PSB_PTE_WO 0x0002 /* Write only */
116#define PSB_PTE_RO 0x0004 /* Read only */ 106#define PSB_PTE_RO 0x0004 /* Read only */
117#define PSB_PTE_CACHED 0x0008 /* CPU cache coherent */ 107#define PSB_PTE_CACHED 0x0008 /* CPU cache coherent */
118 108
119/* 109/* VDC registers and bits */
120 * VDC registers and bits
121 */
122#define PSB_MSVDX_CLOCKGATING 0x2064 110#define PSB_MSVDX_CLOCKGATING 0x2064
123#define PSB_TOPAZ_CLOCKGATING 0x2068 111#define PSB_TOPAZ_CLOCKGATING 0x2068
124#define PSB_HWSTAM 0x2098 112#define PSB_HWSTAM 0x2098
@@ -265,6 +253,7 @@ struct psb_intel_opregion {
265 struct opregion_asle *asle; 253 struct opregion_asle *asle;
266 void *vbt; 254 void *vbt;
267 u32 __iomem *lid_state; 255 u32 __iomem *lid_state;
256 struct work_struct asle_work;
268}; 257};
269 258
270struct sdvo_device_mapping { 259struct sdvo_device_mapping {
@@ -283,10 +272,7 @@ struct intel_gmbus {
283 u32 reg0; 272 u32 reg0;
284}; 273};
285 274
286/* 275/* Register offset maps */
287 * Register offset maps
288 */
289
290struct psb_offset { 276struct psb_offset {
291 u32 fp0; 277 u32 fp0;
292 u32 fp1; 278 u32 fp1;
@@ -320,9 +306,7 @@ struct psb_offset {
320 * update the register cache instead. 306 * update the register cache instead.
321 */ 307 */
322 308
323/* 309/* Common status for pipes */
324 * Common status for pipes.
325 */
326struct psb_pipe { 310struct psb_pipe {
327 u32 fp0; 311 u32 fp0;
328 u32 fp1; 312 u32 fp1;
@@ -482,35 +466,24 @@ struct drm_psb_private {
482 struct psb_mmu_driver *mmu; 466 struct psb_mmu_driver *mmu;
483 struct psb_mmu_pd *pf_pd; 467 struct psb_mmu_pd *pf_pd;
484 468
485 /* 469 /* Register base */
486 * Register base
487 */
488
489 uint8_t __iomem *sgx_reg; 470 uint8_t __iomem *sgx_reg;
490 uint8_t __iomem *vdc_reg; 471 uint8_t __iomem *vdc_reg;
491 uint8_t __iomem *aux_reg; /* Auxillary vdc pipe regs */ 472 uint8_t __iomem *aux_reg; /* Auxillary vdc pipe regs */
492 uint32_t gatt_free_offset; 473 uint32_t gatt_free_offset;
493 474
494 /* 475 /* Fencing / irq */
495 * Fencing / irq.
496 */
497
498 uint32_t vdc_irq_mask; 476 uint32_t vdc_irq_mask;
499 uint32_t pipestat[PSB_NUM_PIPE]; 477 uint32_t pipestat[PSB_NUM_PIPE];
500 478
501 spinlock_t irqmask_lock; 479 spinlock_t irqmask_lock;
502 480
503 /* 481 /* Power */
504 * Power
505 */
506
507 bool suspended; 482 bool suspended;
508 bool display_power; 483 bool display_power;
509 int display_count; 484 int display_count;
510 485
511 /* 486 /* Modesetting */
512 * Modesetting
513 */
514 struct psb_intel_mode_device mode_dev; 487 struct psb_intel_mode_device mode_dev;
515 bool modeset; /* true if we have done the mode_device setup */ 488 bool modeset; /* true if we have done the mode_device setup */
516 489
@@ -518,15 +491,10 @@ struct drm_psb_private {
518 struct drm_crtc *pipe_to_crtc_mapping[PSB_NUM_PIPE]; 491 struct drm_crtc *pipe_to_crtc_mapping[PSB_NUM_PIPE];
519 uint32_t num_pipe; 492 uint32_t num_pipe;
520 493
521 /* 494 /* OSPM info (Power management base) (TODO: can go ?) */
522 * OSPM info (Power management base) (can go ?)
523 */
524 uint32_t ospm_base; 495 uint32_t ospm_base;
525 496
526 /* 497 /* Sizes info */
527 * Sizes info
528 */
529
530 u32 fuse_reg_value; 498 u32 fuse_reg_value;
531 u32 video_device_fuse; 499 u32 video_device_fuse;
532 500
@@ -546,9 +514,7 @@ struct drm_psb_private {
546 struct drm_property *broadcast_rgb_property; 514 struct drm_property *broadcast_rgb_property;
547 struct drm_property *force_audio_property; 515 struct drm_property *force_audio_property;
548 516
549 /* 517 /* LVDS info */
550 * LVDS info
551 */
552 int backlight_duty_cycle; /* restore backlight to this value */ 518 int backlight_duty_cycle; /* restore backlight to this value */
553 bool panel_wants_dither; 519 bool panel_wants_dither;
554 struct drm_display_mode *panel_fixed_mode; 520 struct drm_display_mode *panel_fixed_mode;
@@ -582,34 +548,23 @@ struct drm_psb_private {
582 /* Oaktrail HDMI state */ 548 /* Oaktrail HDMI state */
583 struct oaktrail_hdmi_dev *hdmi_priv; 549 struct oaktrail_hdmi_dev *hdmi_priv;
584 550
585 /* 551 /* Register state */
586 * Register state
587 */
588
589 struct psb_save_area regs; 552 struct psb_save_area regs;
590 553
591 /* MSI reg save */ 554 /* MSI reg save */
592 uint32_t msi_addr; 555 uint32_t msi_addr;
593 uint32_t msi_data; 556 uint32_t msi_data;
594 557
595 /* 558 /* Hotplug handling */
596 * Hotplug handling
597 */
598
599 struct work_struct hotplug_work; 559 struct work_struct hotplug_work;
600 560
601 /* 561 /* LID-Switch */
602 * LID-Switch
603 */
604 spinlock_t lid_lock; 562 spinlock_t lid_lock;
605 struct timer_list lid_timer; 563 struct timer_list lid_timer;
606 struct psb_intel_opregion opregion; 564 struct psb_intel_opregion opregion;
607 u32 lid_last_state; 565 u32 lid_last_state;
608 566
609 /* 567 /* Watchdog */
610 * Watchdog
611 */
612
613 uint32_t apm_reg; 568 uint32_t apm_reg;
614 uint16_t apm_base; 569 uint16_t apm_base;
615 570
@@ -629,9 +584,7 @@ struct drm_psb_private {
629 /* 2D acceleration */ 584 /* 2D acceleration */
630 spinlock_t lock_2d; 585 spinlock_t lock_2d;
631 586
632 /* 587 /* Panel brightness */
633 * Panel brightness
634 */
635 int brightness; 588 int brightness;
636 int brightness_adjusted; 589 int brightness_adjusted;
637 590
@@ -664,10 +617,7 @@ struct drm_psb_private {
664}; 617};
665 618
666 619
667/* 620/* Operations for each board type */
668 * Operations for each board type
669 */
670
671struct psb_ops { 621struct psb_ops {
672 const char *name; 622 const char *name;
673 unsigned int accel_2d:1; 623 unsigned int accel_2d:1;
@@ -713,8 +663,6 @@ struct psb_ops {
713 663
714 664
715 665
716struct psb_mmu_driver;
717
718extern int drm_crtc_probe_output_modes(struct drm_device *dev, int, int); 666extern int drm_crtc_probe_output_modes(struct drm_device *dev, int, int);
719extern int drm_pick_crtcs(struct drm_device *dev); 667extern int drm_pick_crtcs(struct drm_device *dev);
720 668
@@ -723,52 +671,7 @@ static inline struct drm_psb_private *psb_priv(struct drm_device *dev)
723 return (struct drm_psb_private *) dev->dev_private; 671 return (struct drm_psb_private *) dev->dev_private;
724} 672}
725 673
726/* 674/* psb_irq.c */
727 * MMU stuff.
728 */
729
730extern struct psb_mmu_driver *psb_mmu_driver_init(uint8_t __iomem * registers,
731 int trap_pagefaults,
732 int invalid_type,
733 struct drm_psb_private *dev_priv);
734extern void psb_mmu_driver_takedown(struct psb_mmu_driver *driver);
735extern struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver
736 *driver);
737extern void psb_mmu_mirror_gtt(struct psb_mmu_pd *pd, uint32_t mmu_offset,
738 uint32_t gtt_start, uint32_t gtt_pages);
739extern struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver,
740 int trap_pagefaults,
741 int invalid_type);
742extern void psb_mmu_free_pagedir(struct psb_mmu_pd *pd);
743extern void psb_mmu_flush(struct psb_mmu_driver *driver, int rc_prot);
744extern void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd,
745 unsigned long address,
746 uint32_t num_pages);
747extern int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd,
748 uint32_t start_pfn,
749 unsigned long address,
750 uint32_t num_pages, int type);
751extern int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
752 unsigned long *pfn);
753
754/*
755 * Enable / disable MMU for different requestors.
756 */
757
758
759extern void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context);
760extern int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
761 unsigned long address, uint32_t num_pages,
762 uint32_t desired_tile_stride,
763 uint32_t hw_tile_stride, int type);
764extern void psb_mmu_remove_pages(struct psb_mmu_pd *pd,
765 unsigned long address, uint32_t num_pages,
766 uint32_t desired_tile_stride,
767 uint32_t hw_tile_stride);
768/*
769 *psb_irq.c
770 */
771
772extern irqreturn_t psb_irq_handler(int irq, void *arg); 675extern irqreturn_t psb_irq_handler(int irq, void *arg);
773extern int psb_irq_enable_dpst(struct drm_device *dev); 676extern int psb_irq_enable_dpst(struct drm_device *dev);
774extern int psb_irq_disable_dpst(struct drm_device *dev); 677extern int psb_irq_disable_dpst(struct drm_device *dev);
@@ -791,24 +694,17 @@ psb_disable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask);
791 694
792extern u32 psb_get_vblank_counter(struct drm_device *dev, int crtc); 695extern u32 psb_get_vblank_counter(struct drm_device *dev, int crtc);
793 696
794/* 697/* framebuffer.c */
795 * framebuffer.c
796 */
797extern int psbfb_probed(struct drm_device *dev); 698extern int psbfb_probed(struct drm_device *dev);
798extern int psbfb_remove(struct drm_device *dev, 699extern int psbfb_remove(struct drm_device *dev,
799 struct drm_framebuffer *fb); 700 struct drm_framebuffer *fb);
800/* 701/* accel_2d.c */
801 * accel_2d.c
802 */
803extern void psbfb_copyarea(struct fb_info *info, 702extern void psbfb_copyarea(struct fb_info *info,
804 const struct fb_copyarea *region); 703 const struct fb_copyarea *region);
805extern int psbfb_sync(struct fb_info *info); 704extern int psbfb_sync(struct fb_info *info);
806extern void psb_spank(struct drm_psb_private *dev_priv); 705extern void psb_spank(struct drm_psb_private *dev_priv);
807 706
808/* 707/* psb_reset.c */
809 * psb_reset.c
810 */
811
812extern void psb_lid_timer_init(struct drm_psb_private *dev_priv); 708extern void psb_lid_timer_init(struct drm_psb_private *dev_priv);
813extern void psb_lid_timer_takedown(struct drm_psb_private *dev_priv); 709extern void psb_lid_timer_takedown(struct drm_psb_private *dev_priv);
814extern void psb_print_pagefault(struct drm_psb_private *dev_priv); 710extern void psb_print_pagefault(struct drm_psb_private *dev_priv);
@@ -867,9 +763,7 @@ extern const struct psb_ops mdfld_chip_ops;
867/* cdv_device.c */ 763/* cdv_device.c */
868extern const struct psb_ops cdv_chip_ops; 764extern const struct psb_ops cdv_chip_ops;
869 765
870/* 766/* Debug print bits setting */
871 * Debug print bits setting
872 */
873#define PSB_D_GENERAL (1 << 0) 767#define PSB_D_GENERAL (1 << 0)
874#define PSB_D_INIT (1 << 1) 768#define PSB_D_INIT (1 << 1)
875#define PSB_D_IRQ (1 << 2) 769#define PSB_D_IRQ (1 << 2)
@@ -885,10 +779,7 @@ extern const struct psb_ops cdv_chip_ops;
885 779
886extern int drm_idle_check_interval; 780extern int drm_idle_check_interval;
887 781
888/* 782/* Utilities */
889 * Utilities
890 */
891
892static inline u32 MRST_MSG_READ32(uint port, uint offset) 783static inline u32 MRST_MSG_READ32(uint port, uint offset)
893{ 784{
894 int mcr = (0xD0<<24) | (port << 16) | (offset << 8); 785 int mcr = (0xD0<<24) | (port << 16) | (offset << 8);
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c
index c8841ac6c8f1..87b50ba64ed4 100644
--- a/drivers/gpu/drm/gma500/psb_intel_display.c
+++ b/drivers/gpu/drm/gma500/psb_intel_display.c
@@ -120,7 +120,7 @@ static int psb_intel_crtc_mode_set(struct drm_crtc *crtc,
120 const struct gma_limit_t *limit; 120 const struct gma_limit_t *limit;
121 121
122 /* No scan out no play */ 122 /* No scan out no play */
123 if (crtc->fb == NULL) { 123 if (crtc->primary->fb == NULL) {
124 crtc_funcs->mode_set_base(crtc, x, y, old_fb); 124 crtc_funcs->mode_set_base(crtc, x, y, old_fb);
125 return 0; 125 return 0;
126 } 126 }
@@ -469,7 +469,8 @@ static void psb_intel_cursor_init(struct drm_device *dev,
469 /* Allocate 4 pages of stolen mem for a hardware cursor. That 469 /* Allocate 4 pages of stolen mem for a hardware cursor. That
470 * is enough for the 64 x 64 ARGB cursors we support. 470 * is enough for the 64 x 64 ARGB cursors we support.
471 */ 471 */
472 cursor_gt = psb_gtt_alloc_range(dev, 4 * PAGE_SIZE, "cursor", 1); 472 cursor_gt = psb_gtt_alloc_range(dev, 4 * PAGE_SIZE, "cursor", 1,
473 PAGE_SIZE);
473 if (!cursor_gt) { 474 if (!cursor_gt) {
474 gma_crtc->cursor_gt = NULL; 475 gma_crtc->cursor_gt = NULL;
475 goto out; 476 goto out;
@@ -554,33 +555,6 @@ void psb_intel_crtc_init(struct drm_device *dev, int pipe,
554 gma_crtc->active = true; 555 gma_crtc->active = true;
555} 556}
556 557
557int psb_intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
558 struct drm_file *file_priv)
559{
560 struct drm_psb_private *dev_priv = dev->dev_private;
561 struct drm_psb_get_pipe_from_crtc_id_arg *pipe_from_crtc_id = data;
562 struct drm_mode_object *drmmode_obj;
563 struct gma_crtc *crtc;
564
565 if (!dev_priv) {
566 dev_err(dev->dev, "called with no initialization\n");
567 return -EINVAL;
568 }
569
570 drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id,
571 DRM_MODE_OBJECT_CRTC);
572
573 if (!drmmode_obj) {
574 dev_err(dev->dev, "no such CRTC id\n");
575 return -ENOENT;
576 }
577
578 crtc = to_gma_crtc(obj_to_crtc(drmmode_obj));
579 pipe_from_crtc_id->pipe = crtc->pipe;
580
581 return 0;
582}
583
584struct drm_crtc *psb_intel_get_crtc_from_pipe(struct drm_device *dev, int pipe) 558struct drm_crtc *psb_intel_get_crtc_from_pipe(struct drm_device *dev, int pipe)
585{ 559{
586 struct drm_crtc *crtc = NULL; 560 struct drm_crtc *crtc = NULL;
diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h
index dc2c8eb030fa..336bd3aa1a06 100644
--- a/drivers/gpu/drm/gma500/psb_intel_drv.h
+++ b/drivers/gpu/drm/gma500/psb_intel_drv.h
@@ -238,8 +238,6 @@ static inline struct gma_encoder *gma_attached_encoder(
238 238
239extern struct drm_display_mode *psb_intel_crtc_mode_get(struct drm_device *dev, 239extern struct drm_display_mode *psb_intel_crtc_mode_get(struct drm_device *dev,
240 struct drm_crtc *crtc); 240 struct drm_crtc *crtc);
241extern int psb_intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
242 struct drm_file *file_priv);
243extern struct drm_crtc *psb_intel_get_crtc_from_pipe(struct drm_device *dev, 241extern struct drm_crtc *psb_intel_get_crtc_from_pipe(struct drm_device *dev,
244 int pipe); 242 int pipe);
245extern struct drm_connector *psb_intel_sdvo_find(struct drm_device *dev, 243extern struct drm_connector *psb_intel_sdvo_find(struct drm_device *dev,
diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c
index 32342f6990d9..d7778d0472c1 100644
--- a/drivers/gpu/drm/gma500/psb_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c
@@ -614,7 +614,7 @@ int psb_intel_lvds_set_property(struct drm_connector *connector,
614 &crtc->saved_mode, 614 &crtc->saved_mode,
615 encoder->crtc->x, 615 encoder->crtc->x,
616 encoder->crtc->y, 616 encoder->crtc->y,
617 encoder->crtc->fb)) 617 encoder->crtc->primary->fb))
618 goto set_prop_error; 618 goto set_prop_error;
619 } 619 }
620 } else if (!strcmp(property->name, "backlight")) { 620 } else if (!strcmp(property->name, "backlight")) {
@@ -777,6 +777,7 @@ void psb_intel_lvds_init(struct drm_device *dev,
777 * Attempt to get the fixed panel mode from DDC. Assume that the 777 * Attempt to get the fixed panel mode from DDC. Assume that the
778 * preferred mode is the right one. 778 * preferred mode is the right one.
779 */ 779 */
780 mutex_lock(&dev->mode_config.mutex);
780 psb_intel_ddc_get_modes(connector, &lvds_priv->ddc_bus->adapter); 781 psb_intel_ddc_get_modes(connector, &lvds_priv->ddc_bus->adapter);
781 list_for_each_entry(scan, &connector->probed_modes, head) { 782 list_for_each_entry(scan, &connector->probed_modes, head) {
782 if (scan->type & DRM_MODE_TYPE_PREFERRED) { 783 if (scan->type & DRM_MODE_TYPE_PREFERRED) {
@@ -827,10 +828,12 @@ void psb_intel_lvds_init(struct drm_device *dev,
827 * actually having one. 828 * actually having one.
828 */ 829 */
829out: 830out:
831 mutex_unlock(&dev->mode_config.mutex);
830 drm_sysfs_connector_add(connector); 832 drm_sysfs_connector_add(connector);
831 return; 833 return;
832 834
833failed_find: 835failed_find:
836 mutex_unlock(&dev->mode_config.mutex);
834 if (lvds_priv->ddc_bus) 837 if (lvds_priv->ddc_bus)
835 psb_intel_i2c_destroy(lvds_priv->ddc_bus); 838 psb_intel_i2c_destroy(lvds_priv->ddc_bus);
836failed_ddc: 839failed_ddc:
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
index 07d3a9e6d79b..deeb0829b129 100644
--- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c
+++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
@@ -406,18 +406,18 @@ static void psb_intel_sdvo_debug_write(struct psb_intel_sdvo *psb_intel_sdvo, u8
406 DRM_DEBUG_KMS("%s: W: %02X ", 406 DRM_DEBUG_KMS("%s: W: %02X ",
407 SDVO_NAME(psb_intel_sdvo), cmd); 407 SDVO_NAME(psb_intel_sdvo), cmd);
408 for (i = 0; i < args_len; i++) 408 for (i = 0; i < args_len; i++)
409 DRM_LOG_KMS("%02X ", ((u8 *)args)[i]); 409 DRM_DEBUG_KMS("%02X ", ((u8 *)args)[i]);
410 for (; i < 8; i++) 410 for (; i < 8; i++)
411 DRM_LOG_KMS(" "); 411 DRM_DEBUG_KMS(" ");
412 for (i = 0; i < ARRAY_SIZE(sdvo_cmd_names); i++) { 412 for (i = 0; i < ARRAY_SIZE(sdvo_cmd_names); i++) {
413 if (cmd == sdvo_cmd_names[i].cmd) { 413 if (cmd == sdvo_cmd_names[i].cmd) {
414 DRM_LOG_KMS("(%s)", sdvo_cmd_names[i].name); 414 DRM_DEBUG_KMS("(%s)", sdvo_cmd_names[i].name);
415 break; 415 break;
416 } 416 }
417 } 417 }
418 if (i == ARRAY_SIZE(sdvo_cmd_names)) 418 if (i == ARRAY_SIZE(sdvo_cmd_names))
419 DRM_LOG_KMS("(%02X)", cmd); 419 DRM_DEBUG_KMS("(%02X)", cmd);
420 DRM_LOG_KMS("\n"); 420 DRM_DEBUG_KMS("\n");
421} 421}
422 422
423static const char *cmd_status_names[] = { 423static const char *cmd_status_names[] = {
@@ -512,9 +512,9 @@ static bool psb_intel_sdvo_read_response(struct psb_intel_sdvo *psb_intel_sdvo,
512 } 512 }
513 513
514 if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP) 514 if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP)
515 DRM_LOG_KMS("(%s)", cmd_status_names[status]); 515 DRM_DEBUG_KMS("(%s)", cmd_status_names[status]);
516 else 516 else
517 DRM_LOG_KMS("(??? %d)", status); 517 DRM_DEBUG_KMS("(??? %d)", status);
518 518
519 if (status != SDVO_CMD_STATUS_SUCCESS) 519 if (status != SDVO_CMD_STATUS_SUCCESS)
520 goto log_fail; 520 goto log_fail;
@@ -525,13 +525,13 @@ static bool psb_intel_sdvo_read_response(struct psb_intel_sdvo *psb_intel_sdvo,
525 SDVO_I2C_RETURN_0 + i, 525 SDVO_I2C_RETURN_0 + i,
526 &((u8 *)response)[i])) 526 &((u8 *)response)[i]))
527 goto log_fail; 527 goto log_fail;
528 DRM_LOG_KMS(" %02X", ((u8 *)response)[i]); 528 DRM_DEBUG_KMS(" %02X", ((u8 *)response)[i]);
529 } 529 }
530 DRM_LOG_KMS("\n"); 530 DRM_DEBUG_KMS("\n");
531 return true; 531 return true;
532 532
533log_fail: 533log_fail:
534 DRM_LOG_KMS("... failed\n"); 534 DRM_DEBUG_KMS("... failed\n");
535 return false; 535 return false;
536} 536}
537 537
@@ -1844,7 +1844,7 @@ done:
1844 if (psb_intel_sdvo->base.base.crtc) { 1844 if (psb_intel_sdvo->base.base.crtc) {
1845 struct drm_crtc *crtc = psb_intel_sdvo->base.base.crtc; 1845 struct drm_crtc *crtc = psb_intel_sdvo->base.base.crtc;
1846 drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x, 1846 drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x,
1847 crtc->y, crtc->fb); 1847 crtc->y, crtc->primary->fb);
1848 } 1848 }
1849 1849
1850 return 0; 1850 return 0;
diff --git a/drivers/gpu/drm/gma500/psb_irq.c b/drivers/gpu/drm/gma500/psb_irq.c
index f883f9e4c524..624eb36511c5 100644
--- a/drivers/gpu/drm/gma500/psb_irq.c
+++ b/drivers/gpu/drm/gma500/psb_irq.c
@@ -200,11 +200,64 @@ static void psb_vdc_interrupt(struct drm_device *dev, uint32_t vdc_stat)
200 mid_pipe_event_handler(dev, 1); 200 mid_pipe_event_handler(dev, 1);
201} 201}
202 202
203/*
204 * SGX interrupt handler
205 */
206static void psb_sgx_interrupt(struct drm_device *dev, u32 stat_1, u32 stat_2)
207{
208 struct drm_psb_private *dev_priv = dev->dev_private;
209 u32 val, addr;
210 int error = false;
211
212 if (stat_1 & _PSB_CE_TWOD_COMPLETE)
213 val = PSB_RSGX32(PSB_CR_2D_BLIT_STATUS);
214
215 if (stat_2 & _PSB_CE2_BIF_REQUESTER_FAULT) {
216 val = PSB_RSGX32(PSB_CR_BIF_INT_STAT);
217 addr = PSB_RSGX32(PSB_CR_BIF_FAULT);
218 if (val) {
219 if (val & _PSB_CBI_STAT_PF_N_RW)
220 DRM_ERROR("SGX MMU page fault:");
221 else
222 DRM_ERROR("SGX MMU read / write protection fault:");
223
224 if (val & _PSB_CBI_STAT_FAULT_CACHE)
225 DRM_ERROR("\tCache requestor");
226 if (val & _PSB_CBI_STAT_FAULT_TA)
227 DRM_ERROR("\tTA requestor");
228 if (val & _PSB_CBI_STAT_FAULT_VDM)
229 DRM_ERROR("\tVDM requestor");
230 if (val & _PSB_CBI_STAT_FAULT_2D)
231 DRM_ERROR("\t2D requestor");
232 if (val & _PSB_CBI_STAT_FAULT_PBE)
233 DRM_ERROR("\tPBE requestor");
234 if (val & _PSB_CBI_STAT_FAULT_TSP)
235 DRM_ERROR("\tTSP requestor");
236 if (val & _PSB_CBI_STAT_FAULT_ISP)
237 DRM_ERROR("\tISP requestor");
238 if (val & _PSB_CBI_STAT_FAULT_USSEPDS)
239 DRM_ERROR("\tUSSEPDS requestor");
240 if (val & _PSB_CBI_STAT_FAULT_HOST)
241 DRM_ERROR("\tHost requestor");
242
243 DRM_ERROR("\tMMU failing address is 0x%08x.\n",
244 (unsigned int)addr);
245 error = true;
246 }
247 }
248
249 /* Clear bits */
250 PSB_WSGX32(stat_1, PSB_CR_EVENT_HOST_CLEAR);
251 PSB_WSGX32(stat_2, PSB_CR_EVENT_HOST_CLEAR2);
252 PSB_RSGX32(PSB_CR_EVENT_HOST_CLEAR2);
253}
254
203irqreturn_t psb_irq_handler(int irq, void *arg) 255irqreturn_t psb_irq_handler(int irq, void *arg)
204{ 256{
205 struct drm_device *dev = arg; 257 struct drm_device *dev = arg;
206 struct drm_psb_private *dev_priv = dev->dev_private; 258 struct drm_psb_private *dev_priv = dev->dev_private;
207 uint32_t vdc_stat, dsp_int = 0, sgx_int = 0, hotplug_int = 0; 259 uint32_t vdc_stat, dsp_int = 0, sgx_int = 0, hotplug_int = 0;
260 u32 sgx_stat_1, sgx_stat_2;
208 int handled = 0; 261 int handled = 0;
209 262
210 spin_lock(&dev_priv->irqmask_lock); 263 spin_lock(&dev_priv->irqmask_lock);
@@ -233,14 +286,9 @@ irqreturn_t psb_irq_handler(int irq, void *arg)
233 } 286 }
234 287
235 if (sgx_int) { 288 if (sgx_int) {
236 /* Not expected - we have it masked, shut it up */ 289 sgx_stat_1 = PSB_RSGX32(PSB_CR_EVENT_STATUS);
237 u32 s, s2; 290 sgx_stat_2 = PSB_RSGX32(PSB_CR_EVENT_STATUS2);
238 s = PSB_RSGX32(PSB_CR_EVENT_STATUS); 291 psb_sgx_interrupt(dev, sgx_stat_1, sgx_stat_2);
239 s2 = PSB_RSGX32(PSB_CR_EVENT_STATUS2);
240 PSB_WSGX32(s, PSB_CR_EVENT_HOST_CLEAR);
241 PSB_WSGX32(s2, PSB_CR_EVENT_HOST_CLEAR2);
242 /* if s & _PSB_CE_TWOD_COMPLETE we have 2D done but
243 we may as well poll even if we add that ! */
244 handled = 1; 292 handled = 1;
245 } 293 }
246 294
@@ -269,8 +317,13 @@ void psb_irq_preinstall(struct drm_device *dev)
269 317
270 spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags); 318 spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
271 319
272 if (gma_power_is_on(dev)) 320 if (gma_power_is_on(dev)) {
273 PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM); 321 PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
322 PSB_WVDC32(0x00000000, PSB_INT_MASK_R);
323 PSB_WVDC32(0x00000000, PSB_INT_ENABLE_R);
324 PSB_WSGX32(0x00000000, PSB_CR_EVENT_HOST_ENABLE);
325 PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE);
326 }
274 if (dev->vblank[0].enabled) 327 if (dev->vblank[0].enabled)
275 dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEA_FLAG; 328 dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEA_FLAG;
276 if (dev->vblank[1].enabled) 329 if (dev->vblank[1].enabled)
@@ -286,7 +339,7 @@ void psb_irq_preinstall(struct drm_device *dev)
286 /* Revisit this area - want per device masks ? */ 339 /* Revisit this area - want per device masks ? */
287 if (dev_priv->ops->hotplug) 340 if (dev_priv->ops->hotplug)
288 dev_priv->vdc_irq_mask |= _PSB_IRQ_DISP_HOTSYNC; 341 dev_priv->vdc_irq_mask |= _PSB_IRQ_DISP_HOTSYNC;
289 dev_priv->vdc_irq_mask |= _PSB_IRQ_ASLE; 342 dev_priv->vdc_irq_mask |= _PSB_IRQ_ASLE | _PSB_IRQ_SGX_FLAG;
290 343
291 /* This register is safe even if display island is off */ 344 /* This register is safe even if display island is off */
292 PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R); 345 PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
@@ -295,12 +348,16 @@ void psb_irq_preinstall(struct drm_device *dev)
295 348
296int psb_irq_postinstall(struct drm_device *dev) 349int psb_irq_postinstall(struct drm_device *dev)
297{ 350{
298 struct drm_psb_private *dev_priv = 351 struct drm_psb_private *dev_priv = dev->dev_private;
299 (struct drm_psb_private *) dev->dev_private;
300 unsigned long irqflags; 352 unsigned long irqflags;
301 353
302 spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags); 354 spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
303 355
356 /* Enable 2D and MMU fault interrupts */
357 PSB_WSGX32(_PSB_CE2_BIF_REQUESTER_FAULT, PSB_CR_EVENT_HOST_ENABLE2);
358 PSB_WSGX32(_PSB_CE_TWOD_COMPLETE, PSB_CR_EVENT_HOST_ENABLE);
359 PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE); /* Post */
360
304 /* This register is safe even if display island is off */ 361 /* This register is safe even if display island is off */
305 PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R); 362 PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
306 PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM); 363 PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index faa77f543a07..48af5cac1902 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -19,6 +19,8 @@
19 19
20#include <linux/hdmi.h> 20#include <linux/hdmi.h>
21#include <linux/module.h> 21#include <linux/module.h>
22#include <linux/irq.h>
23#include <sound/asoundef.h>
22 24
23#include <drm/drmP.h> 25#include <drm/drmP.h>
24#include <drm/drm_crtc_helper.h> 26#include <drm/drm_crtc_helper.h>
@@ -30,6 +32,7 @@
30 32
31struct tda998x_priv { 33struct tda998x_priv {
32 struct i2c_client *cec; 34 struct i2c_client *cec;
35 struct i2c_client *hdmi;
33 uint16_t rev; 36 uint16_t rev;
34 uint8_t current_page; 37 uint8_t current_page;
35 int dpms; 38 int dpms;
@@ -38,6 +41,10 @@ struct tda998x_priv {
38 u8 vip_cntrl_1; 41 u8 vip_cntrl_1;
39 u8 vip_cntrl_2; 42 u8 vip_cntrl_2;
40 struct tda998x_encoder_params params; 43 struct tda998x_encoder_params params;
44
45 wait_queue_head_t wq_edid;
46 volatile int wq_edid_wait;
47 struct drm_encoder *encoder;
41}; 48};
42 49
43#define to_tda998x_priv(x) ((struct tda998x_priv *)to_encoder_slave(x)->slave_priv) 50#define to_tda998x_priv(x) ((struct tda998x_priv *)to_encoder_slave(x)->slave_priv)
@@ -120,6 +127,8 @@ struct tda998x_priv {
120# define VIP_CNTRL_5_CKCASE (1 << 0) 127# define VIP_CNTRL_5_CKCASE (1 << 0)
121# define VIP_CNTRL_5_SP_CNT(x) (((x) & 3) << 1) 128# define VIP_CNTRL_5_SP_CNT(x) (((x) & 3) << 1)
122#define REG_MUX_AP REG(0x00, 0x26) /* read/write */ 129#define REG_MUX_AP REG(0x00, 0x26) /* read/write */
130# define MUX_AP_SELECT_I2S 0x64
131# define MUX_AP_SELECT_SPDIF 0x40
123#define REG_MUX_VP_VIP_OUT REG(0x00, 0x27) /* read/write */ 132#define REG_MUX_VP_VIP_OUT REG(0x00, 0x27) /* read/write */
124#define REG_MAT_CONTRL REG(0x00, 0x80) /* write */ 133#define REG_MAT_CONTRL REG(0x00, 0x80) /* write */
125# define MAT_CONTRL_MAT_SC(x) (((x) & 3) << 0) 134# define MAT_CONTRL_MAT_SC(x) (((x) & 3) << 0)
@@ -197,10 +206,11 @@ struct tda998x_priv {
197#define REG_I2S_FORMAT REG(0x00, 0xfc) /* read/write */ 206#define REG_I2S_FORMAT REG(0x00, 0xfc) /* read/write */
198# define I2S_FORMAT(x) (((x) & 3) << 0) 207# define I2S_FORMAT(x) (((x) & 3) << 0)
199#define REG_AIP_CLKSEL REG(0x00, 0xfd) /* write */ 208#define REG_AIP_CLKSEL REG(0x00, 0xfd) /* write */
200# define AIP_CLKSEL_FS(x) (((x) & 3) << 0) 209# define AIP_CLKSEL_AIP_SPDIF (0 << 3)
201# define AIP_CLKSEL_CLK_POL(x) (((x) & 1) << 2) 210# define AIP_CLKSEL_AIP_I2S (1 << 3)
202# define AIP_CLKSEL_AIP(x) (((x) & 7) << 3) 211# define AIP_CLKSEL_FS_ACLK (0 << 0)
203 212# define AIP_CLKSEL_FS_MCLK (1 << 0)
213# define AIP_CLKSEL_FS_FS64SPDIF (2 << 0)
204 214
205/* Page 02h: PLL settings */ 215/* Page 02h: PLL settings */
206#define REG_PLL_SERIAL_1 REG(0x02, 0x00) /* read/write */ 216#define REG_PLL_SERIAL_1 REG(0x02, 0x00) /* read/write */
@@ -304,11 +314,16 @@ struct tda998x_priv {
304 314
305/* CEC registers: (not paged) 315/* CEC registers: (not paged)
306 */ 316 */
317#define REG_CEC_INTSTATUS 0xee /* read */
318# define CEC_INTSTATUS_CEC (1 << 0)
319# define CEC_INTSTATUS_HDMI (1 << 1)
307#define REG_CEC_FRO_IM_CLK_CTRL 0xfb /* read/write */ 320#define REG_CEC_FRO_IM_CLK_CTRL 0xfb /* read/write */
308# define CEC_FRO_IM_CLK_CTRL_GHOST_DIS (1 << 7) 321# define CEC_FRO_IM_CLK_CTRL_GHOST_DIS (1 << 7)
309# define CEC_FRO_IM_CLK_CTRL_ENA_OTP (1 << 6) 322# define CEC_FRO_IM_CLK_CTRL_ENA_OTP (1 << 6)
310# define CEC_FRO_IM_CLK_CTRL_IMCLK_SEL (1 << 1) 323# define CEC_FRO_IM_CLK_CTRL_IMCLK_SEL (1 << 1)
311# define CEC_FRO_IM_CLK_CTRL_FRO_DIV (1 << 0) 324# define CEC_FRO_IM_CLK_CTRL_FRO_DIV (1 << 0)
325#define REG_CEC_RXSHPDINTENA 0xfc /* read/write */
326#define REG_CEC_RXSHPDINT 0xfd /* read */
312#define REG_CEC_RXSHPDLEV 0xfe /* read */ 327#define REG_CEC_RXSHPDLEV 0xfe /* read */
313# define CEC_RXSHPDLEV_RXSENS (1 << 0) 328# define CEC_RXSHPDLEV_RXSENS (1 << 0)
314# define CEC_RXSHPDLEV_HPD (1 << 1) 329# define CEC_RXSHPDLEV_HPD (1 << 1)
@@ -328,21 +343,21 @@ struct tda998x_priv {
328#define TDA19988 0x0301 343#define TDA19988 0x0301
329 344
330static void 345static void
331cec_write(struct drm_encoder *encoder, uint16_t addr, uint8_t val) 346cec_write(struct tda998x_priv *priv, uint16_t addr, uint8_t val)
332{ 347{
333 struct i2c_client *client = to_tda998x_priv(encoder)->cec; 348 struct i2c_client *client = priv->cec;
334 uint8_t buf[] = {addr, val}; 349 uint8_t buf[] = {addr, val};
335 int ret; 350 int ret;
336 351
337 ret = i2c_master_send(client, buf, ARRAY_SIZE(buf)); 352 ret = i2c_master_send(client, buf, sizeof(buf));
338 if (ret < 0) 353 if (ret < 0)
339 dev_err(&client->dev, "Error %d writing to cec:0x%x\n", ret, addr); 354 dev_err(&client->dev, "Error %d writing to cec:0x%x\n", ret, addr);
340} 355}
341 356
342static uint8_t 357static uint8_t
343cec_read(struct drm_encoder *encoder, uint8_t addr) 358cec_read(struct tda998x_priv *priv, uint8_t addr)
344{ 359{
345 struct i2c_client *client = to_tda998x_priv(encoder)->cec; 360 struct i2c_client *client = priv->cec;
346 uint8_t val; 361 uint8_t val;
347 int ret; 362 int ret;
348 363
@@ -361,32 +376,36 @@ fail:
361 return 0; 376 return 0;
362} 377}
363 378
364static void 379static int
365set_page(struct drm_encoder *encoder, uint16_t reg) 380set_page(struct tda998x_priv *priv, uint16_t reg)
366{ 381{
367 struct tda998x_priv *priv = to_tda998x_priv(encoder);
368
369 if (REG2PAGE(reg) != priv->current_page) { 382 if (REG2PAGE(reg) != priv->current_page) {
370 struct i2c_client *client = drm_i2c_encoder_get_client(encoder); 383 struct i2c_client *client = priv->hdmi;
371 uint8_t buf[] = { 384 uint8_t buf[] = {
372 REG_CURPAGE, REG2PAGE(reg) 385 REG_CURPAGE, REG2PAGE(reg)
373 }; 386 };
374 int ret = i2c_master_send(client, buf, sizeof(buf)); 387 int ret = i2c_master_send(client, buf, sizeof(buf));
375 if (ret < 0) 388 if (ret < 0) {
376 dev_err(&client->dev, "Error %d writing to REG_CURPAGE\n", ret); 389 dev_err(&client->dev, "setpage %04x err %d\n",
390 reg, ret);
391 return ret;
392 }
377 393
378 priv->current_page = REG2PAGE(reg); 394 priv->current_page = REG2PAGE(reg);
379 } 395 }
396 return 0;
380} 397}
381 398
382static int 399static int
383reg_read_range(struct drm_encoder *encoder, uint16_t reg, char *buf, int cnt) 400reg_read_range(struct tda998x_priv *priv, uint16_t reg, char *buf, int cnt)
384{ 401{
385 struct i2c_client *client = drm_i2c_encoder_get_client(encoder); 402 struct i2c_client *client = priv->hdmi;
386 uint8_t addr = REG2ADDR(reg); 403 uint8_t addr = REG2ADDR(reg);
387 int ret; 404 int ret;
388 405
389 set_page(encoder, reg); 406 ret = set_page(priv, reg);
407 if (ret < 0)
408 return ret;
390 409
391 ret = i2c_master_send(client, &addr, sizeof(addr)); 410 ret = i2c_master_send(client, &addr, sizeof(addr));
392 if (ret < 0) 411 if (ret < 0)
@@ -404,100 +423,147 @@ fail:
404} 423}
405 424
406static void 425static void
407reg_write_range(struct drm_encoder *encoder, uint16_t reg, uint8_t *p, int cnt) 426reg_write_range(struct tda998x_priv *priv, uint16_t reg, uint8_t *p, int cnt)
408{ 427{
409 struct i2c_client *client = drm_i2c_encoder_get_client(encoder); 428 struct i2c_client *client = priv->hdmi;
410 uint8_t buf[cnt+1]; 429 uint8_t buf[cnt+1];
411 int ret; 430 int ret;
412 431
413 buf[0] = REG2ADDR(reg); 432 buf[0] = REG2ADDR(reg);
414 memcpy(&buf[1], p, cnt); 433 memcpy(&buf[1], p, cnt);
415 434
416 set_page(encoder, reg); 435 ret = set_page(priv, reg);
436 if (ret < 0)
437 return;
417 438
418 ret = i2c_master_send(client, buf, cnt + 1); 439 ret = i2c_master_send(client, buf, cnt + 1);
419 if (ret < 0) 440 if (ret < 0)
420 dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg); 441 dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg);
421} 442}
422 443
423static uint8_t 444static int
424reg_read(struct drm_encoder *encoder, uint16_t reg) 445reg_read(struct tda998x_priv *priv, uint16_t reg)
425{ 446{
426 uint8_t val = 0; 447 uint8_t val = 0;
427 reg_read_range(encoder, reg, &val, sizeof(val)); 448 int ret;
449
450 ret = reg_read_range(priv, reg, &val, sizeof(val));
451 if (ret < 0)
452 return ret;
428 return val; 453 return val;
429} 454}
430 455
431static void 456static void
432reg_write(struct drm_encoder *encoder, uint16_t reg, uint8_t val) 457reg_write(struct tda998x_priv *priv, uint16_t reg, uint8_t val)
433{ 458{
434 struct i2c_client *client = drm_i2c_encoder_get_client(encoder); 459 struct i2c_client *client = priv->hdmi;
435 uint8_t buf[] = {REG2ADDR(reg), val}; 460 uint8_t buf[] = {REG2ADDR(reg), val};
436 int ret; 461 int ret;
437 462
438 set_page(encoder, reg); 463 ret = set_page(priv, reg);
464 if (ret < 0)
465 return;
439 466
440 ret = i2c_master_send(client, buf, ARRAY_SIZE(buf)); 467 ret = i2c_master_send(client, buf, sizeof(buf));
441 if (ret < 0) 468 if (ret < 0)
442 dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg); 469 dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg);
443} 470}
444 471
445static void 472static void
446reg_write16(struct drm_encoder *encoder, uint16_t reg, uint16_t val) 473reg_write16(struct tda998x_priv *priv, uint16_t reg, uint16_t val)
447{ 474{
448 struct i2c_client *client = drm_i2c_encoder_get_client(encoder); 475 struct i2c_client *client = priv->hdmi;
449 uint8_t buf[] = {REG2ADDR(reg), val >> 8, val}; 476 uint8_t buf[] = {REG2ADDR(reg), val >> 8, val};
450 int ret; 477 int ret;
451 478
452 set_page(encoder, reg); 479 ret = set_page(priv, reg);
480 if (ret < 0)
481 return;
453 482
454 ret = i2c_master_send(client, buf, ARRAY_SIZE(buf)); 483 ret = i2c_master_send(client, buf, sizeof(buf));
455 if (ret < 0) 484 if (ret < 0)
456 dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg); 485 dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg);
457} 486}
458 487
459static void 488static void
460reg_set(struct drm_encoder *encoder, uint16_t reg, uint8_t val) 489reg_set(struct tda998x_priv *priv, uint16_t reg, uint8_t val)
461{ 490{
462 reg_write(encoder, reg, reg_read(encoder, reg) | val); 491 int old_val;
492
493 old_val = reg_read(priv, reg);
494 if (old_val >= 0)
495 reg_write(priv, reg, old_val | val);
463} 496}
464 497
465static void 498static void
466reg_clear(struct drm_encoder *encoder, uint16_t reg, uint8_t val) 499reg_clear(struct tda998x_priv *priv, uint16_t reg, uint8_t val)
467{ 500{
468 reg_write(encoder, reg, reg_read(encoder, reg) & ~val); 501 int old_val;
502
503 old_val = reg_read(priv, reg);
504 if (old_val >= 0)
505 reg_write(priv, reg, old_val & ~val);
469} 506}
470 507
471static void 508static void
472tda998x_reset(struct drm_encoder *encoder) 509tda998x_reset(struct tda998x_priv *priv)
473{ 510{
474 /* reset audio and i2c master: */ 511 /* reset audio and i2c master: */
475 reg_set(encoder, REG_SOFTRESET, SOFTRESET_AUDIO | SOFTRESET_I2C_MASTER); 512 reg_write(priv, REG_SOFTRESET, SOFTRESET_AUDIO | SOFTRESET_I2C_MASTER);
476 msleep(50); 513 msleep(50);
477 reg_clear(encoder, REG_SOFTRESET, SOFTRESET_AUDIO | SOFTRESET_I2C_MASTER); 514 reg_write(priv, REG_SOFTRESET, 0);
478 msleep(50); 515 msleep(50);
479 516
480 /* reset transmitter: */ 517 /* reset transmitter: */
481 reg_set(encoder, REG_MAIN_CNTRL0, MAIN_CNTRL0_SR); 518 reg_set(priv, REG_MAIN_CNTRL0, MAIN_CNTRL0_SR);
482 reg_clear(encoder, REG_MAIN_CNTRL0, MAIN_CNTRL0_SR); 519 reg_clear(priv, REG_MAIN_CNTRL0, MAIN_CNTRL0_SR);
483 520
484 /* PLL registers common configuration */ 521 /* PLL registers common configuration */
485 reg_write(encoder, REG_PLL_SERIAL_1, 0x00); 522 reg_write(priv, REG_PLL_SERIAL_1, 0x00);
486 reg_write(encoder, REG_PLL_SERIAL_2, PLL_SERIAL_2_SRL_NOSC(1)); 523 reg_write(priv, REG_PLL_SERIAL_2, PLL_SERIAL_2_SRL_NOSC(1));
487 reg_write(encoder, REG_PLL_SERIAL_3, 0x00); 524 reg_write(priv, REG_PLL_SERIAL_3, 0x00);
488 reg_write(encoder, REG_SERIALIZER, 0x00); 525 reg_write(priv, REG_SERIALIZER, 0x00);
489 reg_write(encoder, REG_BUFFER_OUT, 0x00); 526 reg_write(priv, REG_BUFFER_OUT, 0x00);
490 reg_write(encoder, REG_PLL_SCG1, 0x00); 527 reg_write(priv, REG_PLL_SCG1, 0x00);
491 reg_write(encoder, REG_AUDIO_DIV, AUDIO_DIV_SERCLK_8); 528 reg_write(priv, REG_AUDIO_DIV, AUDIO_DIV_SERCLK_8);
492 reg_write(encoder, REG_SEL_CLK, SEL_CLK_SEL_CLK1 | SEL_CLK_ENA_SC_CLK); 529 reg_write(priv, REG_SEL_CLK, SEL_CLK_SEL_CLK1 | SEL_CLK_ENA_SC_CLK);
493 reg_write(encoder, REG_PLL_SCGN1, 0xfa); 530 reg_write(priv, REG_PLL_SCGN1, 0xfa);
494 reg_write(encoder, REG_PLL_SCGN2, 0x00); 531 reg_write(priv, REG_PLL_SCGN2, 0x00);
495 reg_write(encoder, REG_PLL_SCGR1, 0x5b); 532 reg_write(priv, REG_PLL_SCGR1, 0x5b);
496 reg_write(encoder, REG_PLL_SCGR2, 0x00); 533 reg_write(priv, REG_PLL_SCGR2, 0x00);
497 reg_write(encoder, REG_PLL_SCG2, 0x10); 534 reg_write(priv, REG_PLL_SCG2, 0x10);
498 535
499 /* Write the default value MUX register */ 536 /* Write the default value MUX register */
500 reg_write(encoder, REG_MUX_VP_VIP_OUT, 0x24); 537 reg_write(priv, REG_MUX_VP_VIP_OUT, 0x24);
538}
539
540/*
541 * only 2 interrupts may occur: screen plug/unplug and EDID read
542 */
543static irqreturn_t tda998x_irq_thread(int irq, void *data)
544{
545 struct tda998x_priv *priv = data;
546 u8 sta, cec, lvl, flag0, flag1, flag2;
547
548 if (!priv)
549 return IRQ_HANDLED;
550 sta = cec_read(priv, REG_CEC_INTSTATUS);
551 cec = cec_read(priv, REG_CEC_RXSHPDINT);
552 lvl = cec_read(priv, REG_CEC_RXSHPDLEV);
553 flag0 = reg_read(priv, REG_INT_FLAGS_0);
554 flag1 = reg_read(priv, REG_INT_FLAGS_1);
555 flag2 = reg_read(priv, REG_INT_FLAGS_2);
556 DRM_DEBUG_DRIVER(
557 "tda irq sta %02x cec %02x lvl %02x f0 %02x f1 %02x f2 %02x\n",
558 sta, cec, lvl, flag0, flag1, flag2);
559 if ((flag2 & INT_FLAGS_2_EDID_BLK_RD) && priv->wq_edid_wait) {
560 priv->wq_edid_wait = 0;
561 wake_up(&priv->wq_edid);
562 } else if (cec != 0) { /* HPD change */
563 if (priv->encoder && priv->encoder->dev)
564 drm_helper_hpd_irq_event(priv->encoder->dev);
565 }
566 return IRQ_HANDLED;
501} 567}
502 568
503static uint8_t tda998x_cksum(uint8_t *buf, size_t bytes) 569static uint8_t tda998x_cksum(uint8_t *buf, size_t bytes)
@@ -513,91 +579,88 @@ static uint8_t tda998x_cksum(uint8_t *buf, size_t bytes)
513#define PB(x) (HB(2) + 1 + (x)) 579#define PB(x) (HB(2) + 1 + (x))
514 580
515static void 581static void
516tda998x_write_if(struct drm_encoder *encoder, uint8_t bit, uint16_t addr, 582tda998x_write_if(struct tda998x_priv *priv, uint8_t bit, uint16_t addr,
517 uint8_t *buf, size_t size) 583 uint8_t *buf, size_t size)
518{ 584{
519 buf[PB(0)] = tda998x_cksum(buf, size); 585 buf[PB(0)] = tda998x_cksum(buf, size);
520 586
521 reg_clear(encoder, REG_DIP_IF_FLAGS, bit); 587 reg_clear(priv, REG_DIP_IF_FLAGS, bit);
522 reg_write_range(encoder, addr, buf, size); 588 reg_write_range(priv, addr, buf, size);
523 reg_set(encoder, REG_DIP_IF_FLAGS, bit); 589 reg_set(priv, REG_DIP_IF_FLAGS, bit);
524} 590}
525 591
526static void 592static void
527tda998x_write_aif(struct drm_encoder *encoder, struct tda998x_encoder_params *p) 593tda998x_write_aif(struct tda998x_priv *priv, struct tda998x_encoder_params *p)
528{ 594{
529 uint8_t buf[PB(5) + 1]; 595 u8 buf[PB(HDMI_AUDIO_INFOFRAME_SIZE) + 1];
530 596
531 memset(buf, 0, sizeof(buf)); 597 memset(buf, 0, sizeof(buf));
532 buf[HB(0)] = 0x84; 598 buf[HB(0)] = HDMI_INFOFRAME_TYPE_AUDIO;
533 buf[HB(1)] = 0x01; 599 buf[HB(1)] = 0x01;
534 buf[HB(2)] = 10; 600 buf[HB(2)] = HDMI_AUDIO_INFOFRAME_SIZE;
535 buf[PB(1)] = p->audio_frame[1] & 0x07; /* CC */ 601 buf[PB(1)] = p->audio_frame[1] & 0x07; /* CC */
536 buf[PB(2)] = p->audio_frame[2] & 0x1c; /* SF */ 602 buf[PB(2)] = p->audio_frame[2] & 0x1c; /* SF */
537 buf[PB(4)] = p->audio_frame[4]; 603 buf[PB(4)] = p->audio_frame[4];
538 buf[PB(5)] = p->audio_frame[5] & 0xf8; /* DM_INH + LSV */ 604 buf[PB(5)] = p->audio_frame[5] & 0xf8; /* DM_INH + LSV */
539 605
540 tda998x_write_if(encoder, DIP_IF_FLAGS_IF4, REG_IF4_HB0, buf, 606 tda998x_write_if(priv, DIP_IF_FLAGS_IF4, REG_IF4_HB0, buf,
541 sizeof(buf)); 607 sizeof(buf));
542} 608}
543 609
544static void 610static void
545tda998x_write_avi(struct drm_encoder *encoder, struct drm_display_mode *mode) 611tda998x_write_avi(struct tda998x_priv *priv, struct drm_display_mode *mode)
546{ 612{
547 uint8_t buf[PB(13) + 1]; 613 u8 buf[PB(HDMI_AVI_INFOFRAME_SIZE) + 1];
548 614
549 memset(buf, 0, sizeof(buf)); 615 memset(buf, 0, sizeof(buf));
550 buf[HB(0)] = 0x82; 616 buf[HB(0)] = HDMI_INFOFRAME_TYPE_AVI;
551 buf[HB(1)] = 0x02; 617 buf[HB(1)] = 0x02;
552 buf[HB(2)] = 13; 618 buf[HB(2)] = HDMI_AVI_INFOFRAME_SIZE;
553 buf[PB(1)] = HDMI_SCAN_MODE_UNDERSCAN; 619 buf[PB(1)] = HDMI_SCAN_MODE_UNDERSCAN;
620 buf[PB(2)] = HDMI_ACTIVE_ASPECT_PICTURE;
554 buf[PB(3)] = HDMI_QUANTIZATION_RANGE_FULL << 2; 621 buf[PB(3)] = HDMI_QUANTIZATION_RANGE_FULL << 2;
555 buf[PB(4)] = drm_match_cea_mode(mode); 622 buf[PB(4)] = drm_match_cea_mode(mode);
556 623
557 tda998x_write_if(encoder, DIP_IF_FLAGS_IF2, REG_IF2_HB0, buf, 624 tda998x_write_if(priv, DIP_IF_FLAGS_IF2, REG_IF2_HB0, buf,
558 sizeof(buf)); 625 sizeof(buf));
559} 626}
560 627
561static void tda998x_audio_mute(struct drm_encoder *encoder, bool on) 628static void tda998x_audio_mute(struct tda998x_priv *priv, bool on)
562{ 629{
563 if (on) { 630 if (on) {
564 reg_set(encoder, REG_SOFTRESET, SOFTRESET_AUDIO); 631 reg_set(priv, REG_SOFTRESET, SOFTRESET_AUDIO);
565 reg_clear(encoder, REG_SOFTRESET, SOFTRESET_AUDIO); 632 reg_clear(priv, REG_SOFTRESET, SOFTRESET_AUDIO);
566 reg_set(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_FIFO); 633 reg_set(priv, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_FIFO);
567 } else { 634 } else {
568 reg_clear(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_FIFO); 635 reg_clear(priv, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_FIFO);
569 } 636 }
570} 637}
571 638
572static void 639static void
573tda998x_configure_audio(struct drm_encoder *encoder, 640tda998x_configure_audio(struct tda998x_priv *priv,
574 struct drm_display_mode *mode, struct tda998x_encoder_params *p) 641 struct drm_display_mode *mode, struct tda998x_encoder_params *p)
575{ 642{
576 uint8_t buf[6], clksel_aip, clksel_fs, ca_i2s, cts_n, adiv; 643 uint8_t buf[6], clksel_aip, clksel_fs, cts_n, adiv;
577 uint32_t n; 644 uint32_t n;
578 645
579 /* Enable audio ports */ 646 /* Enable audio ports */
580 reg_write(encoder, REG_ENA_AP, p->audio_cfg); 647 reg_write(priv, REG_ENA_AP, p->audio_cfg);
581 reg_write(encoder, REG_ENA_ACLK, p->audio_clk_cfg); 648 reg_write(priv, REG_ENA_ACLK, p->audio_clk_cfg);
582 649
583 /* Set audio input source */ 650 /* Set audio input source */
584 switch (p->audio_format) { 651 switch (p->audio_format) {
585 case AFMT_SPDIF: 652 case AFMT_SPDIF:
586 reg_write(encoder, REG_MUX_AP, 0x40); 653 reg_write(priv, REG_MUX_AP, MUX_AP_SELECT_SPDIF);
587 clksel_aip = AIP_CLKSEL_AIP(0); 654 clksel_aip = AIP_CLKSEL_AIP_SPDIF;
588 /* FS64SPDIF */ 655 clksel_fs = AIP_CLKSEL_FS_FS64SPDIF;
589 clksel_fs = AIP_CLKSEL_FS(2);
590 cts_n = CTS_N_M(3) | CTS_N_K(3); 656 cts_n = CTS_N_M(3) | CTS_N_K(3);
591 ca_i2s = 0;
592 break; 657 break;
593 658
594 case AFMT_I2S: 659 case AFMT_I2S:
595 reg_write(encoder, REG_MUX_AP, 0x64); 660 reg_write(priv, REG_MUX_AP, MUX_AP_SELECT_I2S);
596 clksel_aip = AIP_CLKSEL_AIP(1); 661 clksel_aip = AIP_CLKSEL_AIP_I2S;
597 /* ACLK */ 662 clksel_fs = AIP_CLKSEL_FS_ACLK;
598 clksel_fs = AIP_CLKSEL_FS(0);
599 cts_n = CTS_N_M(3) | CTS_N_K(3); 663 cts_n = CTS_N_M(3) | CTS_N_K(3);
600 ca_i2s = CA_I2S_CA_I2S(0);
601 break; 664 break;
602 665
603 default: 666 default:
@@ -605,12 +668,10 @@ tda998x_configure_audio(struct drm_encoder *encoder,
605 return; 668 return;
606 } 669 }
607 670
608 reg_write(encoder, REG_AIP_CLKSEL, clksel_aip); 671 reg_write(priv, REG_AIP_CLKSEL, clksel_aip);
609 reg_clear(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_LAYOUT); 672 reg_clear(priv, REG_AIP_CNTRL_0, AIP_CNTRL_0_LAYOUT |
610 673 AIP_CNTRL_0_ACR_MAN); /* auto CTS */
611 /* Enable automatic CTS generation */ 674 reg_write(priv, REG_CTS_N, cts_n);
612 reg_clear(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_ACR_MAN);
613 reg_write(encoder, REG_CTS_N, cts_n);
614 675
615 /* 676 /*
616 * Audio input somehow depends on HDMI line rate which is 677 * Audio input somehow depends on HDMI line rate which is
@@ -619,11 +680,15 @@ tda998x_configure_audio(struct drm_encoder *encoder,
619 * There is no detailed info in the datasheet, so we just 680 * There is no detailed info in the datasheet, so we just
620 * assume 100MHz requires larger divider. 681 * assume 100MHz requires larger divider.
621 */ 682 */
683 adiv = AUDIO_DIV_SERCLK_8;
622 if (mode->clock > 100000) 684 if (mode->clock > 100000)
623 adiv = AUDIO_DIV_SERCLK_16; 685 adiv++; /* AUDIO_DIV_SERCLK_16 */
624 else 686
625 adiv = AUDIO_DIV_SERCLK_8; 687 /* S/PDIF asks for a larger divider */
626 reg_write(encoder, REG_AUDIO_DIV, adiv); 688 if (p->audio_format == AFMT_SPDIF)
689 adiv++; /* AUDIO_DIV_SERCLK_16 or _32 */
690
691 reg_write(priv, REG_AUDIO_DIV, adiv);
627 692
628 /* 693 /*
629 * This is the approximate value of N, which happens to be 694 * This is the approximate value of N, which happens to be
@@ -638,28 +703,29 @@ tda998x_configure_audio(struct drm_encoder *encoder,
638 buf[3] = n; 703 buf[3] = n;
639 buf[4] = n >> 8; 704 buf[4] = n >> 8;
640 buf[5] = n >> 16; 705 buf[5] = n >> 16;
641 reg_write_range(encoder, REG_ACR_CTS_0, buf, 6); 706 reg_write_range(priv, REG_ACR_CTS_0, buf, 6);
642 707
643 /* Set CTS clock reference */ 708 /* Set CTS clock reference */
644 reg_write(encoder, REG_AIP_CLKSEL, clksel_aip | clksel_fs); 709 reg_write(priv, REG_AIP_CLKSEL, clksel_aip | clksel_fs);
645 710
646 /* Reset CTS generator */ 711 /* Reset CTS generator */
647 reg_set(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_CTS); 712 reg_set(priv, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_CTS);
648 reg_clear(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_CTS); 713 reg_clear(priv, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_CTS);
649 714
650 /* Write the channel status */ 715 /* Write the channel status */
651 buf[0] = 0x04; 716 buf[0] = IEC958_AES0_CON_NOT_COPYRIGHT;
652 buf[1] = 0x00; 717 buf[1] = 0x00;
653 buf[2] = 0x00; 718 buf[2] = IEC958_AES3_CON_FS_NOTID;
654 buf[3] = 0xf1; 719 buf[3] = IEC958_AES4_CON_ORIGFS_NOTID |
655 reg_write_range(encoder, REG_CH_STAT_B(0), buf, 4); 720 IEC958_AES4_CON_MAX_WORDLEN_24;
721 reg_write_range(priv, REG_CH_STAT_B(0), buf, 4);
656 722
657 tda998x_audio_mute(encoder, true); 723 tda998x_audio_mute(priv, true);
658 mdelay(20); 724 msleep(20);
659 tda998x_audio_mute(encoder, false); 725 tda998x_audio_mute(priv, false);
660 726
661 /* Write the audio information packet */ 727 /* Write the audio information packet */
662 tda998x_write_aif(encoder, p); 728 tda998x_write_aif(priv, p);
663} 729}
664 730
665/* DRM encoder functions */ 731/* DRM encoder functions */
@@ -701,19 +767,19 @@ tda998x_encoder_dpms(struct drm_encoder *encoder, int mode)
701 switch (mode) { 767 switch (mode) {
702 case DRM_MODE_DPMS_ON: 768 case DRM_MODE_DPMS_ON:
703 /* enable video ports, audio will be enabled later */ 769 /* enable video ports, audio will be enabled later */
704 reg_write(encoder, REG_ENA_VP_0, 0xff); 770 reg_write(priv, REG_ENA_VP_0, 0xff);
705 reg_write(encoder, REG_ENA_VP_1, 0xff); 771 reg_write(priv, REG_ENA_VP_1, 0xff);
706 reg_write(encoder, REG_ENA_VP_2, 0xff); 772 reg_write(priv, REG_ENA_VP_2, 0xff);
707 /* set muxing after enabling ports: */ 773 /* set muxing after enabling ports: */
708 reg_write(encoder, REG_VIP_CNTRL_0, priv->vip_cntrl_0); 774 reg_write(priv, REG_VIP_CNTRL_0, priv->vip_cntrl_0);
709 reg_write(encoder, REG_VIP_CNTRL_1, priv->vip_cntrl_1); 775 reg_write(priv, REG_VIP_CNTRL_1, priv->vip_cntrl_1);
710 reg_write(encoder, REG_VIP_CNTRL_2, priv->vip_cntrl_2); 776 reg_write(priv, REG_VIP_CNTRL_2, priv->vip_cntrl_2);
711 break; 777 break;
712 case DRM_MODE_DPMS_OFF: 778 case DRM_MODE_DPMS_OFF:
713 /* disable video ports */ 779 /* disable video ports */
714 reg_write(encoder, REG_ENA_VP_0, 0x00); 780 reg_write(priv, REG_ENA_VP_0, 0x00);
715 reg_write(encoder, REG_ENA_VP_1, 0x00); 781 reg_write(priv, REG_ENA_VP_1, 0x00);
716 reg_write(encoder, REG_ENA_VP_2, 0x00); 782 reg_write(priv, REG_ENA_VP_2, 0x00);
717 break; 783 break;
718 } 784 }
719 785
@@ -831,110 +897,110 @@ tda998x_encoder_mode_set(struct drm_encoder *encoder,
831 } 897 }
832 898
833 /* mute the audio FIFO: */ 899 /* mute the audio FIFO: */
834 reg_set(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_FIFO); 900 reg_set(priv, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_FIFO);
835 901
836 /* set HDMI HDCP mode off: */ 902 /* set HDMI HDCP mode off: */
837 reg_set(encoder, REG_TBG_CNTRL_1, TBG_CNTRL_1_DWIN_DIS); 903 reg_write(priv, REG_TBG_CNTRL_1, TBG_CNTRL_1_DWIN_DIS);
838 reg_clear(encoder, REG_TX33, TX33_HDMI); 904 reg_clear(priv, REG_TX33, TX33_HDMI);
905 reg_write(priv, REG_ENC_CNTRL, ENC_CNTRL_CTL_CODE(0));
839 906
840 reg_write(encoder, REG_ENC_CNTRL, ENC_CNTRL_CTL_CODE(0));
841 /* no pre-filter or interpolator: */ 907 /* no pre-filter or interpolator: */
842 reg_write(encoder, REG_HVF_CNTRL_0, HVF_CNTRL_0_PREFIL(0) | 908 reg_write(priv, REG_HVF_CNTRL_0, HVF_CNTRL_0_PREFIL(0) |
843 HVF_CNTRL_0_INTPOL(0)); 909 HVF_CNTRL_0_INTPOL(0));
844 reg_write(encoder, REG_VIP_CNTRL_5, VIP_CNTRL_5_SP_CNT(0)); 910 reg_write(priv, REG_VIP_CNTRL_5, VIP_CNTRL_5_SP_CNT(0));
845 reg_write(encoder, REG_VIP_CNTRL_4, VIP_CNTRL_4_BLANKIT(0) | 911 reg_write(priv, REG_VIP_CNTRL_4, VIP_CNTRL_4_BLANKIT(0) |
846 VIP_CNTRL_4_BLC(0)); 912 VIP_CNTRL_4_BLC(0));
847 reg_clear(encoder, REG_PLL_SERIAL_3, PLL_SERIAL_3_SRL_CCIR);
848 913
849 reg_clear(encoder, REG_PLL_SERIAL_1, PLL_SERIAL_1_SRL_MAN_IZ); 914 reg_clear(priv, REG_PLL_SERIAL_1, PLL_SERIAL_1_SRL_MAN_IZ);
850 reg_clear(encoder, REG_PLL_SERIAL_3, PLL_SERIAL_3_SRL_DE); 915 reg_clear(priv, REG_PLL_SERIAL_3, PLL_SERIAL_3_SRL_CCIR |
851 reg_write(encoder, REG_SERIALIZER, 0); 916 PLL_SERIAL_3_SRL_DE);
852 reg_write(encoder, REG_HVF_CNTRL_1, HVF_CNTRL_1_VQR(0)); 917 reg_write(priv, REG_SERIALIZER, 0);
918 reg_write(priv, REG_HVF_CNTRL_1, HVF_CNTRL_1_VQR(0));
853 919
854 /* TODO enable pixel repeat for pixel rates less than 25Msamp/s */ 920 /* TODO enable pixel repeat for pixel rates less than 25Msamp/s */
855 rep = 0; 921 rep = 0;
856 reg_write(encoder, REG_RPT_CNTRL, 0); 922 reg_write(priv, REG_RPT_CNTRL, 0);
857 reg_write(encoder, REG_SEL_CLK, SEL_CLK_SEL_VRF_CLK(0) | 923 reg_write(priv, REG_SEL_CLK, SEL_CLK_SEL_VRF_CLK(0) |
858 SEL_CLK_SEL_CLK1 | SEL_CLK_ENA_SC_CLK); 924 SEL_CLK_SEL_CLK1 | SEL_CLK_ENA_SC_CLK);
859 925
860 reg_write(encoder, REG_PLL_SERIAL_2, PLL_SERIAL_2_SRL_NOSC(div) | 926 reg_write(priv, REG_PLL_SERIAL_2, PLL_SERIAL_2_SRL_NOSC(div) |
861 PLL_SERIAL_2_SRL_PR(rep)); 927 PLL_SERIAL_2_SRL_PR(rep));
862 928
863 /* set color matrix bypass flag: */ 929 /* set color matrix bypass flag: */
864 reg_set(encoder, REG_MAT_CONTRL, MAT_CONTRL_MAT_BP); 930 reg_write(priv, REG_MAT_CONTRL, MAT_CONTRL_MAT_BP |
931 MAT_CONTRL_MAT_SC(1));
865 932
866 /* set BIAS tmds value: */ 933 /* set BIAS tmds value: */
867 reg_write(encoder, REG_ANA_GENERAL, 0x09); 934 reg_write(priv, REG_ANA_GENERAL, 0x09);
868
869 reg_clear(encoder, REG_TBG_CNTRL_0, TBG_CNTRL_0_SYNC_MTHD);
870 935
871 /* 936 /*
872 * Sync on rising HSYNC/VSYNC 937 * Sync on rising HSYNC/VSYNC
873 */ 938 */
874 reg_write(encoder, REG_VIP_CNTRL_3, 0); 939 reg = VIP_CNTRL_3_SYNC_HS;
875 reg_set(encoder, REG_VIP_CNTRL_3, VIP_CNTRL_3_SYNC_HS);
876 940
877 /* 941 /*
878 * TDA19988 requires high-active sync at input stage, 942 * TDA19988 requires high-active sync at input stage,
879 * so invert low-active sync provided by master encoder here 943 * so invert low-active sync provided by master encoder here
880 */ 944 */
881 if (mode->flags & DRM_MODE_FLAG_NHSYNC) 945 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
882 reg_set(encoder, REG_VIP_CNTRL_3, VIP_CNTRL_3_H_TGL); 946 reg |= VIP_CNTRL_3_H_TGL;
883 if (mode->flags & DRM_MODE_FLAG_NVSYNC) 947 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
884 reg_set(encoder, REG_VIP_CNTRL_3, VIP_CNTRL_3_V_TGL); 948 reg |= VIP_CNTRL_3_V_TGL;
949 reg_write(priv, REG_VIP_CNTRL_3, reg);
950
951 reg_write(priv, REG_VIDFORMAT, 0x00);
952 reg_write16(priv, REG_REFPIX_MSB, ref_pix);
953 reg_write16(priv, REG_REFLINE_MSB, ref_line);
954 reg_write16(priv, REG_NPIX_MSB, n_pix);
955 reg_write16(priv, REG_NLINE_MSB, n_line);
956 reg_write16(priv, REG_VS_LINE_STRT_1_MSB, vs1_line_s);
957 reg_write16(priv, REG_VS_PIX_STRT_1_MSB, vs1_pix_s);
958 reg_write16(priv, REG_VS_LINE_END_1_MSB, vs1_line_e);
959 reg_write16(priv, REG_VS_PIX_END_1_MSB, vs1_pix_e);
960 reg_write16(priv, REG_VS_LINE_STRT_2_MSB, vs2_line_s);
961 reg_write16(priv, REG_VS_PIX_STRT_2_MSB, vs2_pix_s);
962 reg_write16(priv, REG_VS_LINE_END_2_MSB, vs2_line_e);
963 reg_write16(priv, REG_VS_PIX_END_2_MSB, vs2_pix_e);
964 reg_write16(priv, REG_HS_PIX_START_MSB, hs_pix_s);
965 reg_write16(priv, REG_HS_PIX_STOP_MSB, hs_pix_e);
966 reg_write16(priv, REG_VWIN_START_1_MSB, vwin1_line_s);
967 reg_write16(priv, REG_VWIN_END_1_MSB, vwin1_line_e);
968 reg_write16(priv, REG_VWIN_START_2_MSB, vwin2_line_s);
969 reg_write16(priv, REG_VWIN_END_2_MSB, vwin2_line_e);
970 reg_write16(priv, REG_DE_START_MSB, de_pix_s);
971 reg_write16(priv, REG_DE_STOP_MSB, de_pix_e);
972
973 if (priv->rev == TDA19988) {
974 /* let incoming pixels fill the active space (if any) */
975 reg_write(priv, REG_ENABLE_SPACE, 0x00);
976 }
885 977
886 /* 978 /*
887 * Always generate sync polarity relative to input sync and 979 * Always generate sync polarity relative to input sync and
888 * revert input stage toggled sync at output stage 980 * revert input stage toggled sync at output stage
889 */ 981 */
890 reg = TBG_CNTRL_1_TGL_EN; 982 reg = TBG_CNTRL_1_DWIN_DIS | TBG_CNTRL_1_TGL_EN;
891 if (mode->flags & DRM_MODE_FLAG_NHSYNC) 983 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
892 reg |= TBG_CNTRL_1_H_TGL; 984 reg |= TBG_CNTRL_1_H_TGL;
893 if (mode->flags & DRM_MODE_FLAG_NVSYNC) 985 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
894 reg |= TBG_CNTRL_1_V_TGL; 986 reg |= TBG_CNTRL_1_V_TGL;
895 reg_write(encoder, REG_TBG_CNTRL_1, reg); 987 reg_write(priv, REG_TBG_CNTRL_1, reg);
896
897 reg_write(encoder, REG_VIDFORMAT, 0x00);
898 reg_write16(encoder, REG_REFPIX_MSB, ref_pix);
899 reg_write16(encoder, REG_REFLINE_MSB, ref_line);
900 reg_write16(encoder, REG_NPIX_MSB, n_pix);
901 reg_write16(encoder, REG_NLINE_MSB, n_line);
902 reg_write16(encoder, REG_VS_LINE_STRT_1_MSB, vs1_line_s);
903 reg_write16(encoder, REG_VS_PIX_STRT_1_MSB, vs1_pix_s);
904 reg_write16(encoder, REG_VS_LINE_END_1_MSB, vs1_line_e);
905 reg_write16(encoder, REG_VS_PIX_END_1_MSB, vs1_pix_e);
906 reg_write16(encoder, REG_VS_LINE_STRT_2_MSB, vs2_line_s);
907 reg_write16(encoder, REG_VS_PIX_STRT_2_MSB, vs2_pix_s);
908 reg_write16(encoder, REG_VS_LINE_END_2_MSB, vs2_line_e);
909 reg_write16(encoder, REG_VS_PIX_END_2_MSB, vs2_pix_e);
910 reg_write16(encoder, REG_HS_PIX_START_MSB, hs_pix_s);
911 reg_write16(encoder, REG_HS_PIX_STOP_MSB, hs_pix_e);
912 reg_write16(encoder, REG_VWIN_START_1_MSB, vwin1_line_s);
913 reg_write16(encoder, REG_VWIN_END_1_MSB, vwin1_line_e);
914 reg_write16(encoder, REG_VWIN_START_2_MSB, vwin2_line_s);
915 reg_write16(encoder, REG_VWIN_END_2_MSB, vwin2_line_e);
916 reg_write16(encoder, REG_DE_START_MSB, de_pix_s);
917 reg_write16(encoder, REG_DE_STOP_MSB, de_pix_e);
918
919 if (priv->rev == TDA19988) {
920 /* let incoming pixels fill the active space (if any) */
921 reg_write(encoder, REG_ENABLE_SPACE, 0x00);
922 }
923 988
924 /* must be last register set: */ 989 /* must be last register set: */
925 reg_clear(encoder, REG_TBG_CNTRL_0, TBG_CNTRL_0_SYNC_ONCE); 990 reg_write(priv, REG_TBG_CNTRL_0, 0);
926 991
927 /* Only setup the info frames if the sink is HDMI */ 992 /* Only setup the info frames if the sink is HDMI */
928 if (priv->is_hdmi_sink) { 993 if (priv->is_hdmi_sink) {
929 /* We need to turn HDMI HDCP stuff on to get audio through */ 994 /* We need to turn HDMI HDCP stuff on to get audio through */
930 reg_clear(encoder, REG_TBG_CNTRL_1, TBG_CNTRL_1_DWIN_DIS); 995 reg &= ~TBG_CNTRL_1_DWIN_DIS;
931 reg_write(encoder, REG_ENC_CNTRL, ENC_CNTRL_CTL_CODE(1)); 996 reg_write(priv, REG_TBG_CNTRL_1, reg);
932 reg_set(encoder, REG_TX33, TX33_HDMI); 997 reg_write(priv, REG_ENC_CNTRL, ENC_CNTRL_CTL_CODE(1));
998 reg_set(priv, REG_TX33, TX33_HDMI);
933 999
934 tda998x_write_avi(encoder, adjusted_mode); 1000 tda998x_write_avi(priv, adjusted_mode);
935 1001
936 if (priv->params.audio_cfg) 1002 if (priv->params.audio_cfg)
937 tda998x_configure_audio(encoder, adjusted_mode, 1003 tda998x_configure_audio(priv, adjusted_mode,
938 &priv->params); 1004 &priv->params);
939 } 1005 }
940} 1006}
@@ -943,7 +1009,9 @@ static enum drm_connector_status
943tda998x_encoder_detect(struct drm_encoder *encoder, 1009tda998x_encoder_detect(struct drm_encoder *encoder,
944 struct drm_connector *connector) 1010 struct drm_connector *connector)
945{ 1011{
946 uint8_t val = cec_read(encoder, REG_CEC_RXSHPDLEV); 1012 struct tda998x_priv *priv = to_tda998x_priv(encoder);
1013 uint8_t val = cec_read(priv, REG_CEC_RXSHPDLEV);
1014
947 return (val & CEC_RXSHPDLEV_HPD) ? connector_status_connected : 1015 return (val & CEC_RXSHPDLEV_HPD) ? connector_status_connected :
948 connector_status_disconnected; 1016 connector_status_disconnected;
949} 1017}
@@ -951,46 +1019,57 @@ tda998x_encoder_detect(struct drm_encoder *encoder,
951static int 1019static int
952read_edid_block(struct drm_encoder *encoder, uint8_t *buf, int blk) 1020read_edid_block(struct drm_encoder *encoder, uint8_t *buf, int blk)
953{ 1021{
1022 struct tda998x_priv *priv = to_tda998x_priv(encoder);
954 uint8_t offset, segptr; 1023 uint8_t offset, segptr;
955 int ret, i; 1024 int ret, i;
956 1025
957 /* enable EDID read irq: */
958 reg_set(encoder, REG_INT_FLAGS_2, INT_FLAGS_2_EDID_BLK_RD);
959
960 offset = (blk & 1) ? 128 : 0; 1026 offset = (blk & 1) ? 128 : 0;
961 segptr = blk / 2; 1027 segptr = blk / 2;
962 1028
963 reg_write(encoder, REG_DDC_ADDR, 0xa0); 1029 reg_write(priv, REG_DDC_ADDR, 0xa0);
964 reg_write(encoder, REG_DDC_OFFS, offset); 1030 reg_write(priv, REG_DDC_OFFS, offset);
965 reg_write(encoder, REG_DDC_SEGM_ADDR, 0x60); 1031 reg_write(priv, REG_DDC_SEGM_ADDR, 0x60);
966 reg_write(encoder, REG_DDC_SEGM, segptr); 1032 reg_write(priv, REG_DDC_SEGM, segptr);
967 1033
968 /* enable reading EDID: */ 1034 /* enable reading EDID: */
969 reg_write(encoder, REG_EDID_CTRL, 0x1); 1035 priv->wq_edid_wait = 1;
1036 reg_write(priv, REG_EDID_CTRL, 0x1);
970 1037
971 /* flag must be cleared by sw: */ 1038 /* flag must be cleared by sw: */
972 reg_write(encoder, REG_EDID_CTRL, 0x0); 1039 reg_write(priv, REG_EDID_CTRL, 0x0);
973 1040
974 /* wait for block read to complete: */ 1041 /* wait for block read to complete: */
975 for (i = 100; i > 0; i--) { 1042 if (priv->hdmi->irq) {
976 uint8_t val = reg_read(encoder, REG_INT_FLAGS_2); 1043 i = wait_event_timeout(priv->wq_edid,
977 if (val & INT_FLAGS_2_EDID_BLK_RD) 1044 !priv->wq_edid_wait,
978 break; 1045 msecs_to_jiffies(100));
979 msleep(1); 1046 if (i < 0) {
1047 dev_err(&priv->hdmi->dev, "read edid wait err %d\n", i);
1048 return i;
1049 }
1050 } else {
1051 for (i = 10; i > 0; i--) {
1052 msleep(10);
1053 ret = reg_read(priv, REG_INT_FLAGS_2);
1054 if (ret < 0)
1055 return ret;
1056 if (ret & INT_FLAGS_2_EDID_BLK_RD)
1057 break;
1058 }
980 } 1059 }
981 1060
982 if (i == 0) 1061 if (i == 0) {
1062 dev_err(&priv->hdmi->dev, "read edid timeout\n");
983 return -ETIMEDOUT; 1063 return -ETIMEDOUT;
1064 }
984 1065
985 ret = reg_read_range(encoder, REG_EDID_DATA_0, buf, EDID_LENGTH); 1066 ret = reg_read_range(priv, REG_EDID_DATA_0, buf, EDID_LENGTH);
986 if (ret != EDID_LENGTH) { 1067 if (ret != EDID_LENGTH) {
987 dev_err(encoder->dev->dev, "failed to read edid block %d: %d", 1068 dev_err(&priv->hdmi->dev, "failed to read edid block %d: %d\n",
988 blk, ret); 1069 blk, ret);
989 return ret; 1070 return ret;
990 } 1071 }
991 1072
992 reg_clear(encoder, REG_INT_FLAGS_2, INT_FLAGS_2_EDID_BLK_RD);
993
994 return 0; 1073 return 0;
995} 1074}
996 1075
@@ -998,7 +1077,7 @@ static uint8_t *
998do_get_edid(struct drm_encoder *encoder) 1077do_get_edid(struct drm_encoder *encoder)
999{ 1078{
1000 struct tda998x_priv *priv = to_tda998x_priv(encoder); 1079 struct tda998x_priv *priv = to_tda998x_priv(encoder);
1001 int j = 0, valid_extensions = 0; 1080 int j, valid_extensions = 0;
1002 uint8_t *block, *new; 1081 uint8_t *block, *new;
1003 bool print_bad_edid = drm_debug & DRM_UT_KMS; 1082 bool print_bad_edid = drm_debug & DRM_UT_KMS;
1004 1083
@@ -1006,7 +1085,7 @@ do_get_edid(struct drm_encoder *encoder)
1006 return NULL; 1085 return NULL;
1007 1086
1008 if (priv->rev == TDA19988) 1087 if (priv->rev == TDA19988)
1009 reg_clear(encoder, REG_TX4, TX4_PD_RAM); 1088 reg_clear(priv, REG_TX4, TX4_PD_RAM);
1010 1089
1011 /* base block fetch */ 1090 /* base block fetch */
1012 if (read_edid_block(encoder, block, 0)) 1091 if (read_edid_block(encoder, block, 0))
@@ -1046,14 +1125,14 @@ do_get_edid(struct drm_encoder *encoder)
1046 1125
1047done: 1126done:
1048 if (priv->rev == TDA19988) 1127 if (priv->rev == TDA19988)
1049 reg_set(encoder, REG_TX4, TX4_PD_RAM); 1128 reg_set(priv, REG_TX4, TX4_PD_RAM);
1050 1129
1051 return block; 1130 return block;
1052 1131
1053fail: 1132fail:
1054 if (priv->rev == TDA19988) 1133 if (priv->rev == TDA19988)
1055 reg_set(encoder, REG_TX4, TX4_PD_RAM); 1134 reg_set(priv, REG_TX4, TX4_PD_RAM);
1056 dev_warn(encoder->dev->dev, "failed to read EDID\n"); 1135 dev_warn(&priv->hdmi->dev, "failed to read EDID\n");
1057 kfree(block); 1136 kfree(block);
1058 return NULL; 1137 return NULL;
1059} 1138}
@@ -1080,7 +1159,13 @@ static int
1080tda998x_encoder_create_resources(struct drm_encoder *encoder, 1159tda998x_encoder_create_resources(struct drm_encoder *encoder,
1081 struct drm_connector *connector) 1160 struct drm_connector *connector)
1082{ 1161{
1083 DBG(""); 1162 struct tda998x_priv *priv = to_tda998x_priv(encoder);
1163
1164 if (priv->hdmi->irq)
1165 connector->polled = DRM_CONNECTOR_POLL_HPD;
1166 else
1167 connector->polled = DRM_CONNECTOR_POLL_CONNECT |
1168 DRM_CONNECTOR_POLL_DISCONNECT;
1084 return 0; 1169 return 0;
1085} 1170}
1086 1171
@@ -1099,6 +1184,13 @@ tda998x_encoder_destroy(struct drm_encoder *encoder)
1099{ 1184{
1100 struct tda998x_priv *priv = to_tda998x_priv(encoder); 1185 struct tda998x_priv *priv = to_tda998x_priv(encoder);
1101 drm_i2c_encoder_destroy(encoder); 1186 drm_i2c_encoder_destroy(encoder);
1187
1188 /* disable all IRQs and free the IRQ handler */
1189 cec_write(priv, REG_CEC_RXSHPDINTENA, 0);
1190 reg_clear(priv, REG_INT_FLAGS_2, INT_FLAGS_2_EDID_BLK_RD);
1191 if (priv->hdmi->irq)
1192 free_irq(priv->hdmi->irq, priv);
1193
1102 if (priv->cec) 1194 if (priv->cec)
1103 i2c_unregister_device(priv->cec); 1195 i2c_unregister_device(priv->cec);
1104 kfree(priv); 1196 kfree(priv);
@@ -1138,8 +1230,10 @@ tda998x_encoder_init(struct i2c_client *client,
1138 struct drm_device *dev, 1230 struct drm_device *dev,
1139 struct drm_encoder_slave *encoder_slave) 1231 struct drm_encoder_slave *encoder_slave)
1140{ 1232{
1141 struct drm_encoder *encoder = &encoder_slave->base;
1142 struct tda998x_priv *priv; 1233 struct tda998x_priv *priv;
1234 struct device_node *np = client->dev.of_node;
1235 u32 video;
1236 int rev_lo, rev_hi, ret;
1143 1237
1144 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 1238 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1145 if (!priv) 1239 if (!priv)
@@ -1150,52 +1244,113 @@ tda998x_encoder_init(struct i2c_client *client,
1150 priv->vip_cntrl_2 = VIP_CNTRL_2_SWAP_E(4) | VIP_CNTRL_2_SWAP_F(5); 1244 priv->vip_cntrl_2 = VIP_CNTRL_2_SWAP_E(4) | VIP_CNTRL_2_SWAP_F(5);
1151 1245
1152 priv->current_page = 0xff; 1246 priv->current_page = 0xff;
1247 priv->hdmi = client;
1153 priv->cec = i2c_new_dummy(client->adapter, 0x34); 1248 priv->cec = i2c_new_dummy(client->adapter, 0x34);
1154 if (!priv->cec) { 1249 if (!priv->cec) {
1155 kfree(priv); 1250 kfree(priv);
1156 return -ENODEV; 1251 return -ENODEV;
1157 } 1252 }
1253
1254 priv->encoder = &encoder_slave->base;
1158 priv->dpms = DRM_MODE_DPMS_OFF; 1255 priv->dpms = DRM_MODE_DPMS_OFF;
1159 1256
1160 encoder_slave->slave_priv = priv; 1257 encoder_slave->slave_priv = priv;
1161 encoder_slave->slave_funcs = &tda998x_encoder_funcs; 1258 encoder_slave->slave_funcs = &tda998x_encoder_funcs;
1162 1259
1163 /* wake up the device: */ 1260 /* wake up the device: */
1164 cec_write(encoder, REG_CEC_ENAMODS, 1261 cec_write(priv, REG_CEC_ENAMODS,
1165 CEC_ENAMODS_EN_RXSENS | CEC_ENAMODS_EN_HDMI); 1262 CEC_ENAMODS_EN_RXSENS | CEC_ENAMODS_EN_HDMI);
1166 1263
1167 tda998x_reset(encoder); 1264 tda998x_reset(priv);
1168 1265
1169 /* read version: */ 1266 /* read version: */
1170 priv->rev = reg_read(encoder, REG_VERSION_LSB) | 1267 rev_lo = reg_read(priv, REG_VERSION_LSB);
1171 reg_read(encoder, REG_VERSION_MSB) << 8; 1268 rev_hi = reg_read(priv, REG_VERSION_MSB);
1269 if (rev_lo < 0 || rev_hi < 0) {
1270 ret = rev_lo < 0 ? rev_lo : rev_hi;
1271 goto fail;
1272 }
1273
1274 priv->rev = rev_lo | rev_hi << 8;
1172 1275
1173 /* mask off feature bits: */ 1276 /* mask off feature bits: */
1174 priv->rev &= ~0x30; /* not-hdcp and not-scalar bit */ 1277 priv->rev &= ~0x30; /* not-hdcp and not-scalar bit */
1175 1278
1176 switch (priv->rev) { 1279 switch (priv->rev) {
1177 case TDA9989N2: dev_info(dev->dev, "found TDA9989 n2"); break; 1280 case TDA9989N2:
1178 case TDA19989: dev_info(dev->dev, "found TDA19989"); break; 1281 dev_info(&client->dev, "found TDA9989 n2");
1179 case TDA19989N2: dev_info(dev->dev, "found TDA19989 n2"); break; 1282 break;
1180 case TDA19988: dev_info(dev->dev, "found TDA19988"); break; 1283 case TDA19989:
1284 dev_info(&client->dev, "found TDA19989");
1285 break;
1286 case TDA19989N2:
1287 dev_info(&client->dev, "found TDA19989 n2");
1288 break;
1289 case TDA19988:
1290 dev_info(&client->dev, "found TDA19988");
1291 break;
1181 default: 1292 default:
1182 DBG("found unsupported device: %04x", priv->rev); 1293 dev_err(&client->dev, "found unsupported device: %04x\n",
1294 priv->rev);
1183 goto fail; 1295 goto fail;
1184 } 1296 }
1185 1297
1186 /* after reset, enable DDC: */ 1298 /* after reset, enable DDC: */
1187 reg_write(encoder, REG_DDC_DISABLE, 0x00); 1299 reg_write(priv, REG_DDC_DISABLE, 0x00);
1188 1300
1189 /* set clock on DDC channel: */ 1301 /* set clock on DDC channel: */
1190 reg_write(encoder, REG_TX3, 39); 1302 reg_write(priv, REG_TX3, 39);
1191 1303
1192 /* if necessary, disable multi-master: */ 1304 /* if necessary, disable multi-master: */
1193 if (priv->rev == TDA19989) 1305 if (priv->rev == TDA19989)
1194 reg_set(encoder, REG_I2C_MASTER, I2C_MASTER_DIS_MM); 1306 reg_set(priv, REG_I2C_MASTER, I2C_MASTER_DIS_MM);
1195 1307
1196 cec_write(encoder, REG_CEC_FRO_IM_CLK_CTRL, 1308 cec_write(priv, REG_CEC_FRO_IM_CLK_CTRL,
1197 CEC_FRO_IM_CLK_CTRL_GHOST_DIS | CEC_FRO_IM_CLK_CTRL_IMCLK_SEL); 1309 CEC_FRO_IM_CLK_CTRL_GHOST_DIS | CEC_FRO_IM_CLK_CTRL_IMCLK_SEL);
1198 1310
1311 /* initialize the optional IRQ */
1312 if (client->irq) {
1313 int irqf_trigger;
1314
1315 /* init read EDID waitqueue */
1316 init_waitqueue_head(&priv->wq_edid);
1317
1318 /* clear pending interrupts */
1319 reg_read(priv, REG_INT_FLAGS_0);
1320 reg_read(priv, REG_INT_FLAGS_1);
1321 reg_read(priv, REG_INT_FLAGS_2);
1322
1323 irqf_trigger =
1324 irqd_get_trigger_type(irq_get_irq_data(client->irq));
1325 ret = request_threaded_irq(client->irq, NULL,
1326 tda998x_irq_thread,
1327 irqf_trigger | IRQF_ONESHOT,
1328 "tda998x", priv);
1329 if (ret) {
1330 dev_err(&client->dev,
1331 "failed to request IRQ#%u: %d\n",
1332 client->irq, ret);
1333 goto fail;
1334 }
1335
1336 /* enable HPD irq */
1337 cec_write(priv, REG_CEC_RXSHPDINTENA, CEC_RXSHPDLEV_HPD);
1338 }
1339
1340 /* enable EDID read irq: */
1341 reg_set(priv, REG_INT_FLAGS_2, INT_FLAGS_2_EDID_BLK_RD);
1342
1343 if (!np)
1344 return 0; /* non-DT */
1345
1346 /* get the optional video properties */
1347 ret = of_property_read_u32(np, "video-ports", &video);
1348 if (ret == 0) {
1349 priv->vip_cntrl_0 = video >> 16;
1350 priv->vip_cntrl_1 = video >> 8;
1351 priv->vip_cntrl_2 = video;
1352 }
1353
1199 return 0; 1354 return 0;
1200 1355
1201fail: 1356fail:
@@ -1210,6 +1365,14 @@ fail:
1210 return -ENXIO; 1365 return -ENXIO;
1211} 1366}
1212 1367
1368#ifdef CONFIG_OF
1369static const struct of_device_id tda998x_dt_ids[] = {
1370 { .compatible = "nxp,tda998x", },
1371 { }
1372};
1373MODULE_DEVICE_TABLE(of, tda998x_dt_ids);
1374#endif
1375
1213static struct i2c_device_id tda998x_ids[] = { 1376static struct i2c_device_id tda998x_ids[] = {
1214 { "tda998x", 0 }, 1377 { "tda998x", 0 },
1215 { } 1378 { }
@@ -1222,6 +1385,7 @@ static struct drm_i2c_encoder_driver tda998x_driver = {
1222 .remove = tda998x_remove, 1385 .remove = tda998x_remove,
1223 .driver = { 1386 .driver = {
1224 .name = "tda998x", 1387 .name = "tda998x",
1388 .of_match_table = of_match_ptr(tda998x_dt_ids),
1225 }, 1389 },
1226 .id_table = tda998x_ids, 1390 .id_table = tda998x_ids,
1227 }, 1391 },
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 9fd44f5f3b3b..b1445b73465b 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -3,57 +3,69 @@
3# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. 3# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
4 4
5ccflags-y := -Iinclude/drm 5ccflags-y := -Iinclude/drm
6i915-y := i915_drv.o i915_dma.o i915_irq.o \ 6
7 i915_gpu_error.o \ 7# Please keep these build lists sorted!
8
9# core driver code
10i915-y := i915_drv.o \
11 i915_params.o \
8 i915_suspend.o \ 12 i915_suspend.o \
9 i915_gem.o \ 13 i915_sysfs.o \
14 intel_pm.o
15i915-$(CONFIG_COMPAT) += i915_ioc32.o
16i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o
17
18# GEM code
19i915-y += i915_cmd_parser.o \
10 i915_gem_context.o \ 20 i915_gem_context.o \
11 i915_gem_debug.o \ 21 i915_gem_debug.o \
22 i915_gem_dmabuf.o \
12 i915_gem_evict.o \ 23 i915_gem_evict.o \
13 i915_gem_execbuffer.o \ 24 i915_gem_execbuffer.o \
14 i915_gem_gtt.o \ 25 i915_gem_gtt.o \
26 i915_gem.o \
15 i915_gem_stolen.o \ 27 i915_gem_stolen.o \
16 i915_gem_tiling.o \ 28 i915_gem_tiling.o \
17 i915_sysfs.o \ 29 i915_gpu_error.o \
30 i915_irq.o \
18 i915_trace_points.o \ 31 i915_trace_points.o \
19 i915_ums.o \ 32 intel_ringbuffer.o \
33 intel_uncore.o
34
35# modesetting core code
36i915-y += intel_bios.o \
20 intel_display.o \ 37 intel_display.o \
21 intel_crt.o \
22 intel_lvds.o \
23 intel_dsi.o \
24 intel_dsi_cmd.o \
25 intel_dsi_pll.o \
26 intel_bios.o \
27 intel_ddi.o \
28 intel_dp.o \
29 intel_hdmi.o \
30 intel_sdvo.o \
31 intel_modes.o \ 38 intel_modes.o \
32 intel_panel.o \
33 intel_pm.o \
34 intel_i2c.o \
35 intel_tv.o \
36 intel_dvo.o \
37 intel_ringbuffer.o \
38 intel_overlay.o \ 39 intel_overlay.o \
39 intel_sprite.o \
40 intel_sideband.o \ 40 intel_sideband.o \
41 intel_uncore.o \ 41 intel_sprite.o
42i915-$(CONFIG_ACPI) += intel_acpi.o intel_opregion.o
43i915-$(CONFIG_DRM_I915_FBDEV) += intel_fbdev.o
44
45# modesetting output/encoder code
46i915-y += dvo_ch7017.o \
42 dvo_ch7xxx.o \ 47 dvo_ch7xxx.o \
43 dvo_ch7017.o \
44 dvo_ivch.o \ 48 dvo_ivch.o \
45 dvo_tfp410.o \
46 dvo_sil164.o \
47 dvo_ns2501.o \ 49 dvo_ns2501.o \
48 i915_gem_dmabuf.o 50 dvo_sil164.o \
49 51 dvo_tfp410.o \
50i915-$(CONFIG_COMPAT) += i915_ioc32.o 52 intel_crt.o \
51 53 intel_ddi.o \
52i915-$(CONFIG_ACPI) += intel_acpi.o intel_opregion.o 54 intel_dp.o \
53 55 intel_dsi_cmd.o \
54i915-$(CONFIG_DRM_I915_FBDEV) += intel_fbdev.o 56 intel_dsi.o \
57 intel_dsi_pll.o \
58 intel_dvo.o \
59 intel_hdmi.o \
60 intel_i2c.o \
61 intel_lvds.o \
62 intel_panel.o \
63 intel_sdvo.o \
64 intel_tv.o
55 65
56i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o 66# legacy horrors
67i915-y += i915_dma.o \
68 i915_ums.o
57 69
58obj-$(CONFIG_DRM_I915) += i915.o 70obj-$(CONFIG_DRM_I915) += i915.o
59 71
diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c
index af42e94f6846..a0f5bdd69491 100644
--- a/drivers/gpu/drm/i915/dvo_ch7xxx.c
+++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c
@@ -340,9 +340,9 @@ static void ch7xxx_dump_regs(struct intel_dvo_device *dvo)
340 for (i = 0; i < CH7xxx_NUM_REGS; i++) { 340 for (i = 0; i < CH7xxx_NUM_REGS; i++) {
341 uint8_t val; 341 uint8_t val;
342 if ((i % 8) == 0) 342 if ((i % 8) == 0)
343 DRM_LOG_KMS("\n %02X: ", i); 343 DRM_DEBUG_KMS("\n %02X: ", i);
344 ch7xxx_readb(dvo, i, &val); 344 ch7xxx_readb(dvo, i, &val);
345 DRM_LOG_KMS("%02X ", val); 345 DRM_DEBUG_KMS("%02X ", val);
346 } 346 }
347} 347}
348 348
diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c
index baaf65bf0bdd..0f1865d7d4d8 100644
--- a/drivers/gpu/drm/i915/dvo_ivch.c
+++ b/drivers/gpu/drm/i915/dvo_ivch.c
@@ -377,41 +377,41 @@ static void ivch_dump_regs(struct intel_dvo_device *dvo)
377 uint16_t val; 377 uint16_t val;
378 378
379 ivch_read(dvo, VR00, &val); 379 ivch_read(dvo, VR00, &val);
380 DRM_LOG_KMS("VR00: 0x%04x\n", val); 380 DRM_DEBUG_KMS("VR00: 0x%04x\n", val);
381 ivch_read(dvo, VR01, &val); 381 ivch_read(dvo, VR01, &val);
382 DRM_LOG_KMS("VR01: 0x%04x\n", val); 382 DRM_DEBUG_KMS("VR01: 0x%04x\n", val);
383 ivch_read(dvo, VR30, &val); 383 ivch_read(dvo, VR30, &val);
384 DRM_LOG_KMS("VR30: 0x%04x\n", val); 384 DRM_DEBUG_KMS("VR30: 0x%04x\n", val);
385 ivch_read(dvo, VR40, &val); 385 ivch_read(dvo, VR40, &val);
386 DRM_LOG_KMS("VR40: 0x%04x\n", val); 386 DRM_DEBUG_KMS("VR40: 0x%04x\n", val);
387 387
388 /* GPIO registers */ 388 /* GPIO registers */
389 ivch_read(dvo, VR80, &val); 389 ivch_read(dvo, VR80, &val);
390 DRM_LOG_KMS("VR80: 0x%04x\n", val); 390 DRM_DEBUG_KMS("VR80: 0x%04x\n", val);
391 ivch_read(dvo, VR81, &val); 391 ivch_read(dvo, VR81, &val);
392 DRM_LOG_KMS("VR81: 0x%04x\n", val); 392 DRM_DEBUG_KMS("VR81: 0x%04x\n", val);
393 ivch_read(dvo, VR82, &val); 393 ivch_read(dvo, VR82, &val);
394 DRM_LOG_KMS("VR82: 0x%04x\n", val); 394 DRM_DEBUG_KMS("VR82: 0x%04x\n", val);
395 ivch_read(dvo, VR83, &val); 395 ivch_read(dvo, VR83, &val);
396 DRM_LOG_KMS("VR83: 0x%04x\n", val); 396 DRM_DEBUG_KMS("VR83: 0x%04x\n", val);
397 ivch_read(dvo, VR84, &val); 397 ivch_read(dvo, VR84, &val);
398 DRM_LOG_KMS("VR84: 0x%04x\n", val); 398 DRM_DEBUG_KMS("VR84: 0x%04x\n", val);
399 ivch_read(dvo, VR85, &val); 399 ivch_read(dvo, VR85, &val);
400 DRM_LOG_KMS("VR85: 0x%04x\n", val); 400 DRM_DEBUG_KMS("VR85: 0x%04x\n", val);
401 ivch_read(dvo, VR86, &val); 401 ivch_read(dvo, VR86, &val);
402 DRM_LOG_KMS("VR86: 0x%04x\n", val); 402 DRM_DEBUG_KMS("VR86: 0x%04x\n", val);
403 ivch_read(dvo, VR87, &val); 403 ivch_read(dvo, VR87, &val);
404 DRM_LOG_KMS("VR87: 0x%04x\n", val); 404 DRM_DEBUG_KMS("VR87: 0x%04x\n", val);
405 ivch_read(dvo, VR88, &val); 405 ivch_read(dvo, VR88, &val);
406 DRM_LOG_KMS("VR88: 0x%04x\n", val); 406 DRM_DEBUG_KMS("VR88: 0x%04x\n", val);
407 407
408 /* Scratch register 0 - AIM Panel type */ 408 /* Scratch register 0 - AIM Panel type */
409 ivch_read(dvo, VR8E, &val); 409 ivch_read(dvo, VR8E, &val);
410 DRM_LOG_KMS("VR8E: 0x%04x\n", val); 410 DRM_DEBUG_KMS("VR8E: 0x%04x\n", val);
411 411
412 /* Scratch register 1 - Status register */ 412 /* Scratch register 1 - Status register */
413 ivch_read(dvo, VR8F, &val); 413 ivch_read(dvo, VR8F, &val);
414 DRM_LOG_KMS("VR8F: 0x%04x\n", val); 414 DRM_DEBUG_KMS("VR8F: 0x%04x\n", val);
415} 415}
416 416
417static void ivch_destroy(struct intel_dvo_device *dvo) 417static void ivch_destroy(struct intel_dvo_device *dvo)
diff --git a/drivers/gpu/drm/i915/dvo_ns2501.c b/drivers/gpu/drm/i915/dvo_ns2501.c
index 954acb2c7021..8155ded79079 100644
--- a/drivers/gpu/drm/i915/dvo_ns2501.c
+++ b/drivers/gpu/drm/i915/dvo_ns2501.c
@@ -490,15 +490,15 @@ static void ns2501_dump_regs(struct intel_dvo_device *dvo)
490 uint8_t val; 490 uint8_t val;
491 491
492 ns2501_readb(dvo, NS2501_FREQ_LO, &val); 492 ns2501_readb(dvo, NS2501_FREQ_LO, &val);
493 DRM_LOG_KMS("NS2501_FREQ_LO: 0x%02x\n", val); 493 DRM_DEBUG_KMS("NS2501_FREQ_LO: 0x%02x\n", val);
494 ns2501_readb(dvo, NS2501_FREQ_HI, &val); 494 ns2501_readb(dvo, NS2501_FREQ_HI, &val);
495 DRM_LOG_KMS("NS2501_FREQ_HI: 0x%02x\n", val); 495 DRM_DEBUG_KMS("NS2501_FREQ_HI: 0x%02x\n", val);
496 ns2501_readb(dvo, NS2501_REG8, &val); 496 ns2501_readb(dvo, NS2501_REG8, &val);
497 DRM_LOG_KMS("NS2501_REG8: 0x%02x\n", val); 497 DRM_DEBUG_KMS("NS2501_REG8: 0x%02x\n", val);
498 ns2501_readb(dvo, NS2501_REG9, &val); 498 ns2501_readb(dvo, NS2501_REG9, &val);
499 DRM_LOG_KMS("NS2501_REG9: 0x%02x\n", val); 499 DRM_DEBUG_KMS("NS2501_REG9: 0x%02x\n", val);
500 ns2501_readb(dvo, NS2501_REGC, &val); 500 ns2501_readb(dvo, NS2501_REGC, &val);
501 DRM_LOG_KMS("NS2501_REGC: 0x%02x\n", val); 501 DRM_DEBUG_KMS("NS2501_REGC: 0x%02x\n", val);
502} 502}
503 503
504static void ns2501_destroy(struct intel_dvo_device *dvo) 504static void ns2501_destroy(struct intel_dvo_device *dvo)
diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c
index 4debd32e3e4c..7b3e9e936200 100644
--- a/drivers/gpu/drm/i915/dvo_sil164.c
+++ b/drivers/gpu/drm/i915/dvo_sil164.c
@@ -246,15 +246,15 @@ static void sil164_dump_regs(struct intel_dvo_device *dvo)
246 uint8_t val; 246 uint8_t val;
247 247
248 sil164_readb(dvo, SIL164_FREQ_LO, &val); 248 sil164_readb(dvo, SIL164_FREQ_LO, &val);
249 DRM_LOG_KMS("SIL164_FREQ_LO: 0x%02x\n", val); 249 DRM_DEBUG_KMS("SIL164_FREQ_LO: 0x%02x\n", val);
250 sil164_readb(dvo, SIL164_FREQ_HI, &val); 250 sil164_readb(dvo, SIL164_FREQ_HI, &val);
251 DRM_LOG_KMS("SIL164_FREQ_HI: 0x%02x\n", val); 251 DRM_DEBUG_KMS("SIL164_FREQ_HI: 0x%02x\n", val);
252 sil164_readb(dvo, SIL164_REG8, &val); 252 sil164_readb(dvo, SIL164_REG8, &val);
253 DRM_LOG_KMS("SIL164_REG8: 0x%02x\n", val); 253 DRM_DEBUG_KMS("SIL164_REG8: 0x%02x\n", val);
254 sil164_readb(dvo, SIL164_REG9, &val); 254 sil164_readb(dvo, SIL164_REG9, &val);
255 DRM_LOG_KMS("SIL164_REG9: 0x%02x\n", val); 255 DRM_DEBUG_KMS("SIL164_REG9: 0x%02x\n", val);
256 sil164_readb(dvo, SIL164_REGC, &val); 256 sil164_readb(dvo, SIL164_REGC, &val);
257 DRM_LOG_KMS("SIL164_REGC: 0x%02x\n", val); 257 DRM_DEBUG_KMS("SIL164_REGC: 0x%02x\n", val);
258} 258}
259 259
260static void sil164_destroy(struct intel_dvo_device *dvo) 260static void sil164_destroy(struct intel_dvo_device *dvo)
diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c
index e17f1b07e915..12ea4b164692 100644
--- a/drivers/gpu/drm/i915/dvo_tfp410.c
+++ b/drivers/gpu/drm/i915/dvo_tfp410.c
@@ -267,33 +267,33 @@ static void tfp410_dump_regs(struct intel_dvo_device *dvo)
267 uint8_t val, val2; 267 uint8_t val, val2;
268 268
269 tfp410_readb(dvo, TFP410_REV, &val); 269 tfp410_readb(dvo, TFP410_REV, &val);
270 DRM_LOG_KMS("TFP410_REV: 0x%02X\n", val); 270 DRM_DEBUG_KMS("TFP410_REV: 0x%02X\n", val);
271 tfp410_readb(dvo, TFP410_CTL_1, &val); 271 tfp410_readb(dvo, TFP410_CTL_1, &val);
272 DRM_LOG_KMS("TFP410_CTL1: 0x%02X\n", val); 272 DRM_DEBUG_KMS("TFP410_CTL1: 0x%02X\n", val);
273 tfp410_readb(dvo, TFP410_CTL_2, &val); 273 tfp410_readb(dvo, TFP410_CTL_2, &val);
274 DRM_LOG_KMS("TFP410_CTL2: 0x%02X\n", val); 274 DRM_DEBUG_KMS("TFP410_CTL2: 0x%02X\n", val);
275 tfp410_readb(dvo, TFP410_CTL_3, &val); 275 tfp410_readb(dvo, TFP410_CTL_3, &val);
276 DRM_LOG_KMS("TFP410_CTL3: 0x%02X\n", val); 276 DRM_DEBUG_KMS("TFP410_CTL3: 0x%02X\n", val);
277 tfp410_readb(dvo, TFP410_USERCFG, &val); 277 tfp410_readb(dvo, TFP410_USERCFG, &val);
278 DRM_LOG_KMS("TFP410_USERCFG: 0x%02X\n", val); 278 DRM_DEBUG_KMS("TFP410_USERCFG: 0x%02X\n", val);
279 tfp410_readb(dvo, TFP410_DE_DLY, &val); 279 tfp410_readb(dvo, TFP410_DE_DLY, &val);
280 DRM_LOG_KMS("TFP410_DE_DLY: 0x%02X\n", val); 280 DRM_DEBUG_KMS("TFP410_DE_DLY: 0x%02X\n", val);
281 tfp410_readb(dvo, TFP410_DE_CTL, &val); 281 tfp410_readb(dvo, TFP410_DE_CTL, &val);
282 DRM_LOG_KMS("TFP410_DE_CTL: 0x%02X\n", val); 282 DRM_DEBUG_KMS("TFP410_DE_CTL: 0x%02X\n", val);
283 tfp410_readb(dvo, TFP410_DE_TOP, &val); 283 tfp410_readb(dvo, TFP410_DE_TOP, &val);
284 DRM_LOG_KMS("TFP410_DE_TOP: 0x%02X\n", val); 284 DRM_DEBUG_KMS("TFP410_DE_TOP: 0x%02X\n", val);
285 tfp410_readb(dvo, TFP410_DE_CNT_LO, &val); 285 tfp410_readb(dvo, TFP410_DE_CNT_LO, &val);
286 tfp410_readb(dvo, TFP410_DE_CNT_HI, &val2); 286 tfp410_readb(dvo, TFP410_DE_CNT_HI, &val2);
287 DRM_LOG_KMS("TFP410_DE_CNT: 0x%02X%02X\n", val2, val); 287 DRM_DEBUG_KMS("TFP410_DE_CNT: 0x%02X%02X\n", val2, val);
288 tfp410_readb(dvo, TFP410_DE_LIN_LO, &val); 288 tfp410_readb(dvo, TFP410_DE_LIN_LO, &val);
289 tfp410_readb(dvo, TFP410_DE_LIN_HI, &val2); 289 tfp410_readb(dvo, TFP410_DE_LIN_HI, &val2);
290 DRM_LOG_KMS("TFP410_DE_LIN: 0x%02X%02X\n", val2, val); 290 DRM_DEBUG_KMS("TFP410_DE_LIN: 0x%02X%02X\n", val2, val);
291 tfp410_readb(dvo, TFP410_H_RES_LO, &val); 291 tfp410_readb(dvo, TFP410_H_RES_LO, &val);
292 tfp410_readb(dvo, TFP410_H_RES_HI, &val2); 292 tfp410_readb(dvo, TFP410_H_RES_HI, &val2);
293 DRM_LOG_KMS("TFP410_H_RES: 0x%02X%02X\n", val2, val); 293 DRM_DEBUG_KMS("TFP410_H_RES: 0x%02X%02X\n", val2, val);
294 tfp410_readb(dvo, TFP410_V_RES_LO, &val); 294 tfp410_readb(dvo, TFP410_V_RES_LO, &val);
295 tfp410_readb(dvo, TFP410_V_RES_HI, &val2); 295 tfp410_readb(dvo, TFP410_V_RES_HI, &val2);
296 DRM_LOG_KMS("TFP410_V_RES: 0x%02X%02X\n", val2, val); 296 DRM_DEBUG_KMS("TFP410_V_RES: 0x%02X%02X\n", val2, val);
297} 297}
298 298
299static void tfp410_destroy(struct intel_dvo_device *dvo) 299static void tfp410_destroy(struct intel_dvo_device *dvo)
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
new file mode 100644
index 000000000000..4cf6d020d513
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -0,0 +1,485 @@
1/*
2 * Copyright © 2013 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Brad Volkin <bradley.d.volkin@intel.com>
25 *
26 */
27
28#include "i915_drv.h"
29
30/**
31 * DOC: i915 batch buffer command parser
32 *
33 * Motivation:
34 * Certain OpenGL features (e.g. transform feedback, performance monitoring)
35 * require userspace code to submit batches containing commands such as
36 * MI_LOAD_REGISTER_IMM to access various registers. Unfortunately, some
37 * generations of the hardware will noop these commands in "unsecure" batches
38 * (which includes all userspace batches submitted via i915) even though the
39 * commands may be safe and represent the intended programming model of the
40 * device.
41 *
42 * The software command parser is similar in operation to the command parsing
43 * done in hardware for unsecure batches. However, the software parser allows
44 * some operations that would be noop'd by hardware, if the parser determines
45 * the operation is safe, and submits the batch as "secure" to prevent hardware
46 * parsing.
47 *
48 * Threats:
49 * At a high level, the hardware (and software) checks attempt to prevent
50 * granting userspace undue privileges. There are three categories of privilege.
51 *
52 * First, commands which are explicitly defined as privileged or which should
53 * only be used by the kernel driver. The parser generally rejects such
54 * commands, though it may allow some from the drm master process.
55 *
56 * Second, commands which access registers. To support correct/enhanced
57 * userspace functionality, particularly certain OpenGL extensions, the parser
58 * provides a whitelist of registers which userspace may safely access (for both
59 * normal and drm master processes).
60 *
61 * Third, commands which access privileged memory (i.e. GGTT, HWS page, etc).
62 * The parser always rejects such commands.
63 *
64 * The majority of the problematic commands fall in the MI_* range, with only a
65 * few specific commands on each ring (e.g. PIPE_CONTROL and MI_FLUSH_DW).
66 *
67 * Implementation:
68 * Each ring maintains tables of commands and registers which the parser uses in
69 * scanning batch buffers submitted to that ring.
70 *
71 * Since the set of commands that the parser must check for is significantly
72 * smaller than the number of commands supported, the parser tables contain only
73 * those commands required by the parser. This generally works because command
74 * opcode ranges have standard command length encodings. So for commands that
75 * the parser does not need to check, it can easily skip them. This is
76 * implementated via a per-ring length decoding vfunc.
77 *
78 * Unfortunately, there are a number of commands that do not follow the standard
79 * length encoding for their opcode range, primarily amongst the MI_* commands.
80 * To handle this, the parser provides a way to define explicit "skip" entries
81 * in the per-ring command tables.
82 *
83 * Other command table entries map fairly directly to high level categories
84 * mentioned above: rejected, master-only, register whitelist. The parser
85 * implements a number of checks, including the privileged memory checks, via a
86 * general bitmasking mechanism.
87 */
88
89static u32 gen7_render_get_cmd_length_mask(u32 cmd_header)
90{
91 u32 client = (cmd_header & INSTR_CLIENT_MASK) >> INSTR_CLIENT_SHIFT;
92 u32 subclient =
93 (cmd_header & INSTR_SUBCLIENT_MASK) >> INSTR_SUBCLIENT_SHIFT;
94
95 if (client == INSTR_MI_CLIENT)
96 return 0x3F;
97 else if (client == INSTR_RC_CLIENT) {
98 if (subclient == INSTR_MEDIA_SUBCLIENT)
99 return 0xFFFF;
100 else
101 return 0xFF;
102 }
103
104 DRM_DEBUG_DRIVER("CMD: Abnormal rcs cmd length! 0x%08X\n", cmd_header);
105 return 0;
106}
107
108static u32 gen7_bsd_get_cmd_length_mask(u32 cmd_header)
109{
110 u32 client = (cmd_header & INSTR_CLIENT_MASK) >> INSTR_CLIENT_SHIFT;
111 u32 subclient =
112 (cmd_header & INSTR_SUBCLIENT_MASK) >> INSTR_SUBCLIENT_SHIFT;
113
114 if (client == INSTR_MI_CLIENT)
115 return 0x3F;
116 else if (client == INSTR_RC_CLIENT) {
117 if (subclient == INSTR_MEDIA_SUBCLIENT)
118 return 0xFFF;
119 else
120 return 0xFF;
121 }
122
123 DRM_DEBUG_DRIVER("CMD: Abnormal bsd cmd length! 0x%08X\n", cmd_header);
124 return 0;
125}
126
127static u32 gen7_blt_get_cmd_length_mask(u32 cmd_header)
128{
129 u32 client = (cmd_header & INSTR_CLIENT_MASK) >> INSTR_CLIENT_SHIFT;
130
131 if (client == INSTR_MI_CLIENT)
132 return 0x3F;
133 else if (client == INSTR_BC_CLIENT)
134 return 0xFF;
135
136 DRM_DEBUG_DRIVER("CMD: Abnormal blt cmd length! 0x%08X\n", cmd_header);
137 return 0;
138}
139
140static void validate_cmds_sorted(struct intel_ring_buffer *ring)
141{
142 int i;
143
144 if (!ring->cmd_tables || ring->cmd_table_count == 0)
145 return;
146
147 for (i = 0; i < ring->cmd_table_count; i++) {
148 const struct drm_i915_cmd_table *table = &ring->cmd_tables[i];
149 u32 previous = 0;
150 int j;
151
152 for (j = 0; j < table->count; j++) {
153 const struct drm_i915_cmd_descriptor *desc =
154 &table->table[i];
155 u32 curr = desc->cmd.value & desc->cmd.mask;
156
157 if (curr < previous)
158 DRM_ERROR("CMD: table not sorted ring=%d table=%d entry=%d cmd=0x%08X prev=0x%08X\n",
159 ring->id, i, j, curr, previous);
160
161 previous = curr;
162 }
163 }
164}
165
166static void check_sorted(int ring_id, const u32 *reg_table, int reg_count)
167{
168 int i;
169 u32 previous = 0;
170
171 for (i = 0; i < reg_count; i++) {
172 u32 curr = reg_table[i];
173
174 if (curr < previous)
175 DRM_ERROR("CMD: table not sorted ring=%d entry=%d reg=0x%08X prev=0x%08X\n",
176 ring_id, i, curr, previous);
177
178 previous = curr;
179 }
180}
181
182static void validate_regs_sorted(struct intel_ring_buffer *ring)
183{
184 check_sorted(ring->id, ring->reg_table, ring->reg_count);
185 check_sorted(ring->id, ring->master_reg_table, ring->master_reg_count);
186}
187
188/**
189 * i915_cmd_parser_init_ring() - set cmd parser related fields for a ringbuffer
190 * @ring: the ringbuffer to initialize
191 *
192 * Optionally initializes fields related to batch buffer command parsing in the
193 * struct intel_ring_buffer based on whether the platform requires software
194 * command parsing.
195 */
196void i915_cmd_parser_init_ring(struct intel_ring_buffer *ring)
197{
198 if (!IS_GEN7(ring->dev))
199 return;
200
201 switch (ring->id) {
202 case RCS:
203 ring->get_cmd_length_mask = gen7_render_get_cmd_length_mask;
204 break;
205 case VCS:
206 ring->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
207 break;
208 case BCS:
209 ring->get_cmd_length_mask = gen7_blt_get_cmd_length_mask;
210 break;
211 case VECS:
212 /* VECS can use the same length_mask function as VCS */
213 ring->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
214 break;
215 default:
216 DRM_ERROR("CMD: cmd_parser_init with unknown ring: %d\n",
217 ring->id);
218 BUG();
219 }
220
221 validate_cmds_sorted(ring);
222 validate_regs_sorted(ring);
223}
224
225static const struct drm_i915_cmd_descriptor*
226find_cmd_in_table(const struct drm_i915_cmd_table *table,
227 u32 cmd_header)
228{
229 int i;
230
231 for (i = 0; i < table->count; i++) {
232 const struct drm_i915_cmd_descriptor *desc = &table->table[i];
233 u32 masked_cmd = desc->cmd.mask & cmd_header;
234 u32 masked_value = desc->cmd.value & desc->cmd.mask;
235
236 if (masked_cmd == masked_value)
237 return desc;
238 }
239
240 return NULL;
241}
242
243/*
244 * Returns a pointer to a descriptor for the command specified by cmd_header.
245 *
246 * The caller must supply space for a default descriptor via the default_desc
247 * parameter. If no descriptor for the specified command exists in the ring's
248 * command parser tables, this function fills in default_desc based on the
249 * ring's default length encoding and returns default_desc.
250 */
251static const struct drm_i915_cmd_descriptor*
252find_cmd(struct intel_ring_buffer *ring,
253 u32 cmd_header,
254 struct drm_i915_cmd_descriptor *default_desc)
255{
256 u32 mask;
257 int i;
258
259 for (i = 0; i < ring->cmd_table_count; i++) {
260 const struct drm_i915_cmd_descriptor *desc;
261
262 desc = find_cmd_in_table(&ring->cmd_tables[i], cmd_header);
263 if (desc)
264 return desc;
265 }
266
267 mask = ring->get_cmd_length_mask(cmd_header);
268 if (!mask)
269 return NULL;
270
271 BUG_ON(!default_desc);
272 default_desc->flags = CMD_DESC_SKIP;
273 default_desc->length.mask = mask;
274
275 return default_desc;
276}
277
278static bool valid_reg(const u32 *table, int count, u32 addr)
279{
280 if (table && count != 0) {
281 int i;
282
283 for (i = 0; i < count; i++) {
284 if (table[i] == addr)
285 return true;
286 }
287 }
288
289 return false;
290}
291
292static u32 *vmap_batch(struct drm_i915_gem_object *obj)
293{
294 int i;
295 void *addr = NULL;
296 struct sg_page_iter sg_iter;
297 struct page **pages;
298
299 pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages));
300 if (pages == NULL) {
301 DRM_DEBUG_DRIVER("Failed to get space for pages\n");
302 goto finish;
303 }
304
305 i = 0;
306 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
307 pages[i] = sg_page_iter_page(&sg_iter);
308 i++;
309 }
310
311 addr = vmap(pages, i, 0, PAGE_KERNEL);
312 if (addr == NULL) {
313 DRM_DEBUG_DRIVER("Failed to vmap pages\n");
314 goto finish;
315 }
316
317finish:
318 if (pages)
319 drm_free_large(pages);
320 return (u32*)addr;
321}
322
323/**
324 * i915_needs_cmd_parser() - should a given ring use software command parsing?
325 * @ring: the ring in question
326 *
327 * Only certain platforms require software batch buffer command parsing, and
328 * only when enabled via module paramter.
329 *
330 * Return: true if the ring requires software command parsing
331 */
332bool i915_needs_cmd_parser(struct intel_ring_buffer *ring)
333{
334 /* No command tables indicates a platform without parsing */
335 if (!ring->cmd_tables)
336 return false;
337
338 return (i915.enable_cmd_parser == 1);
339}
340
341#define LENGTH_BIAS 2
342
343/**
344 * i915_parse_cmds() - parse a submitted batch buffer for privilege violations
345 * @ring: the ring on which the batch is to execute
346 * @batch_obj: the batch buffer in question
347 * @batch_start_offset: byte offset in the batch at which execution starts
348 * @is_master: is the submitting process the drm master?
349 *
350 * Parses the specified batch buffer looking for privilege violations as
351 * described in the overview.
352 *
353 * Return: non-zero if the parser finds violations or otherwise fails
354 */
355int i915_parse_cmds(struct intel_ring_buffer *ring,
356 struct drm_i915_gem_object *batch_obj,
357 u32 batch_start_offset,
358 bool is_master)
359{
360 int ret = 0;
361 u32 *cmd, *batch_base, *batch_end;
362 struct drm_i915_cmd_descriptor default_desc = { 0 };
363 int needs_clflush = 0;
364
365 ret = i915_gem_obj_prepare_shmem_read(batch_obj, &needs_clflush);
366 if (ret) {
367 DRM_DEBUG_DRIVER("CMD: failed to prep read\n");
368 return ret;
369 }
370
371 batch_base = vmap_batch(batch_obj);
372 if (!batch_base) {
373 DRM_DEBUG_DRIVER("CMD: Failed to vmap batch\n");
374 i915_gem_object_unpin_pages(batch_obj);
375 return -ENOMEM;
376 }
377
378 if (needs_clflush)
379 drm_clflush_virt_range((char *)batch_base, batch_obj->base.size);
380
381 cmd = batch_base + (batch_start_offset / sizeof(*cmd));
382 batch_end = cmd + (batch_obj->base.size / sizeof(*batch_end));
383
384 while (cmd < batch_end) {
385 const struct drm_i915_cmd_descriptor *desc;
386 u32 length;
387
388 if (*cmd == MI_BATCH_BUFFER_END)
389 break;
390
391 desc = find_cmd(ring, *cmd, &default_desc);
392 if (!desc) {
393 DRM_DEBUG_DRIVER("CMD: Unrecognized command: 0x%08X\n",
394 *cmd);
395 ret = -EINVAL;
396 break;
397 }
398
399 if (desc->flags & CMD_DESC_FIXED)
400 length = desc->length.fixed;
401 else
402 length = ((*cmd & desc->length.mask) + LENGTH_BIAS);
403
404 if ((batch_end - cmd) < length) {
405 DRM_DEBUG_DRIVER("CMD: Command length exceeds batch length: 0x%08X length=%d batchlen=%td\n",
406 *cmd,
407 length,
408 (unsigned long)(batch_end - cmd));
409 ret = -EINVAL;
410 break;
411 }
412
413 if (desc->flags & CMD_DESC_REJECT) {
414 DRM_DEBUG_DRIVER("CMD: Rejected command: 0x%08X\n", *cmd);
415 ret = -EINVAL;
416 break;
417 }
418
419 if ((desc->flags & CMD_DESC_MASTER) && !is_master) {
420 DRM_DEBUG_DRIVER("CMD: Rejected master-only command: 0x%08X\n",
421 *cmd);
422 ret = -EINVAL;
423 break;
424 }
425
426 if (desc->flags & CMD_DESC_REGISTER) {
427 u32 reg_addr = cmd[desc->reg.offset] & desc->reg.mask;
428
429 if (!valid_reg(ring->reg_table,
430 ring->reg_count, reg_addr)) {
431 if (!is_master ||
432 !valid_reg(ring->master_reg_table,
433 ring->master_reg_count,
434 reg_addr)) {
435 DRM_DEBUG_DRIVER("CMD: Rejected register 0x%08X in command: 0x%08X (ring=%d)\n",
436 reg_addr,
437 *cmd,
438 ring->id);
439 ret = -EINVAL;
440 break;
441 }
442 }
443 }
444
445 if (desc->flags & CMD_DESC_BITMASK) {
446 int i;
447
448 for (i = 0; i < MAX_CMD_DESC_BITMASKS; i++) {
449 u32 dword;
450
451 if (desc->bits[i].mask == 0)
452 break;
453
454 dword = cmd[desc->bits[i].offset] &
455 desc->bits[i].mask;
456
457 if (dword != desc->bits[i].expected) {
458 DRM_DEBUG_DRIVER("CMD: Rejected command 0x%08X for bitmask 0x%08X (exp=0x%08X act=0x%08X) (ring=%d)\n",
459 *cmd,
460 desc->bits[i].mask,
461 desc->bits[i].expected,
462 dword, ring->id);
463 ret = -EINVAL;
464 break;
465 }
466 }
467
468 if (ret)
469 break;
470 }
471
472 cmd += length;
473 }
474
475 if (cmd >= batch_end) {
476 DRM_DEBUG_DRIVER("CMD: Got to the end of the buffer w/o a BBE cmd!\n");
477 ret = -EINVAL;
478 }
479
480 vunmap(batch_base);
481
482 i915_gem_object_unpin_pages(batch_obj);
483
484 return ret;
485}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index b2b46c52294c..195fe5bc0aac 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -98,7 +98,7 @@ static const char *get_pin_flag(struct drm_i915_gem_object *obj)
98{ 98{
99 if (obj->user_pin_count > 0) 99 if (obj->user_pin_count > 0)
100 return "P"; 100 return "P";
101 else if (obj->pin_count > 0) 101 else if (i915_gem_obj_is_pinned(obj))
102 return "p"; 102 return "p";
103 else 103 else
104 return " "; 104 return " ";
@@ -123,6 +123,8 @@ static void
123describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) 123describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
124{ 124{
125 struct i915_vma *vma; 125 struct i915_vma *vma;
126 int pin_count = 0;
127
126 seq_printf(m, "%pK: %s%s%s %8zdKiB %02x %02x %u %u %u%s%s%s", 128 seq_printf(m, "%pK: %s%s%s %8zdKiB %02x %02x %u %u %u%s%s%s",
127 &obj->base, 129 &obj->base,
128 get_pin_flag(obj), 130 get_pin_flag(obj),
@@ -139,8 +141,10 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
139 obj->madv == I915_MADV_DONTNEED ? " purgeable" : ""); 141 obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
140 if (obj->base.name) 142 if (obj->base.name)
141 seq_printf(m, " (name: %d)", obj->base.name); 143 seq_printf(m, " (name: %d)", obj->base.name);
142 if (obj->pin_count) 144 list_for_each_entry(vma, &obj->vma_list, vma_link)
143 seq_printf(m, " (pinned x %d)", obj->pin_count); 145 if (vma->pin_count > 0)
146 pin_count++;
147 seq_printf(m, " (pinned x %d)", pin_count);
144 if (obj->pin_display) 148 if (obj->pin_display)
145 seq_printf(m, " (display)"); 149 seq_printf(m, " (display)");
146 if (obj->fence_reg != I915_FENCE_REG_NONE) 150 if (obj->fence_reg != I915_FENCE_REG_NONE)
@@ -295,28 +299,62 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
295} while (0) 299} while (0)
296 300
297struct file_stats { 301struct file_stats {
302 struct drm_i915_file_private *file_priv;
298 int count; 303 int count;
299 size_t total, active, inactive, unbound; 304 size_t total, unbound;
305 size_t global, shared;
306 size_t active, inactive;
300}; 307};
301 308
302static int per_file_stats(int id, void *ptr, void *data) 309static int per_file_stats(int id, void *ptr, void *data)
303{ 310{
304 struct drm_i915_gem_object *obj = ptr; 311 struct drm_i915_gem_object *obj = ptr;
305 struct file_stats *stats = data; 312 struct file_stats *stats = data;
313 struct i915_vma *vma;
306 314
307 stats->count++; 315 stats->count++;
308 stats->total += obj->base.size; 316 stats->total += obj->base.size;
309 317
310 if (i915_gem_obj_ggtt_bound(obj)) { 318 if (obj->base.name || obj->base.dma_buf)
311 if (!list_empty(&obj->ring_list)) 319 stats->shared += obj->base.size;
312 stats->active += obj->base.size; 320
313 else 321 if (USES_FULL_PPGTT(obj->base.dev)) {
314 stats->inactive += obj->base.size; 322 list_for_each_entry(vma, &obj->vma_list, vma_link) {
323 struct i915_hw_ppgtt *ppgtt;
324
325 if (!drm_mm_node_allocated(&vma->node))
326 continue;
327
328 if (i915_is_ggtt(vma->vm)) {
329 stats->global += obj->base.size;
330 continue;
331 }
332
333 ppgtt = container_of(vma->vm, struct i915_hw_ppgtt, base);
334 if (ppgtt->ctx && ppgtt->ctx->file_priv != stats->file_priv)
335 continue;
336
337 if (obj->ring) /* XXX per-vma statistic */
338 stats->active += obj->base.size;
339 else
340 stats->inactive += obj->base.size;
341
342 return 0;
343 }
315 } else { 344 } else {
316 if (!list_empty(&obj->global_list)) 345 if (i915_gem_obj_ggtt_bound(obj)) {
317 stats->unbound += obj->base.size; 346 stats->global += obj->base.size;
347 if (obj->ring)
348 stats->active += obj->base.size;
349 else
350 stats->inactive += obj->base.size;
351 return 0;
352 }
318 } 353 }
319 354
355 if (!list_empty(&obj->global_list))
356 stats->unbound += obj->base.size;
357
320 return 0; 358 return 0;
321} 359}
322 360
@@ -407,6 +445,7 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
407 struct task_struct *task; 445 struct task_struct *task;
408 446
409 memset(&stats, 0, sizeof(stats)); 447 memset(&stats, 0, sizeof(stats));
448 stats.file_priv = file->driver_priv;
410 idr_for_each(&file->object_idr, per_file_stats, &stats); 449 idr_for_each(&file->object_idr, per_file_stats, &stats);
411 /* 450 /*
412 * Although we have a valid reference on file->pid, that does 451 * Although we have a valid reference on file->pid, that does
@@ -416,12 +455,14 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
416 */ 455 */
417 rcu_read_lock(); 456 rcu_read_lock();
418 task = pid_task(file->pid, PIDTYPE_PID); 457 task = pid_task(file->pid, PIDTYPE_PID);
419 seq_printf(m, "%s: %u objects, %zu bytes (%zu active, %zu inactive, %zu unbound)\n", 458 seq_printf(m, "%s: %u objects, %zu bytes (%zu active, %zu inactive, %zu global, %zu shared, %zu unbound)\n",
420 task ? task->comm : "<unknown>", 459 task ? task->comm : "<unknown>",
421 stats.count, 460 stats.count,
422 stats.total, 461 stats.total,
423 stats.active, 462 stats.active,
424 stats.inactive, 463 stats.inactive,
464 stats.global,
465 stats.shared,
425 stats.unbound); 466 stats.unbound);
426 rcu_read_unlock(); 467 rcu_read_unlock();
427 } 468 }
@@ -447,7 +488,7 @@ static int i915_gem_gtt_info(struct seq_file *m, void *data)
447 488
448 total_obj_size = total_gtt_size = count = 0; 489 total_obj_size = total_gtt_size = count = 0;
449 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { 490 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
450 if (list == PINNED_LIST && obj->pin_count == 0) 491 if (list == PINNED_LIST && !i915_gem_obj_is_pinned(obj))
451 continue; 492 continue;
452 493
453 seq_puts(m, " "); 494 seq_puts(m, " ");
@@ -520,7 +561,7 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
520{ 561{
521 struct drm_info_node *node = (struct drm_info_node *) m->private; 562 struct drm_info_node *node = (struct drm_info_node *) m->private;
522 struct drm_device *dev = node->minor->dev; 563 struct drm_device *dev = node->minor->dev;
523 drm_i915_private_t *dev_priv = dev->dev_private; 564 struct drm_i915_private *dev_priv = dev->dev_private;
524 struct intel_ring_buffer *ring; 565 struct intel_ring_buffer *ring;
525 struct drm_i915_gem_request *gem_request; 566 struct drm_i915_gem_request *gem_request;
526 int ret, count, i; 567 int ret, count, i;
@@ -565,7 +606,7 @@ static int i915_gem_seqno_info(struct seq_file *m, void *data)
565{ 606{
566 struct drm_info_node *node = (struct drm_info_node *) m->private; 607 struct drm_info_node *node = (struct drm_info_node *) m->private;
567 struct drm_device *dev = node->minor->dev; 608 struct drm_device *dev = node->minor->dev;
568 drm_i915_private_t *dev_priv = dev->dev_private; 609 struct drm_i915_private *dev_priv = dev->dev_private;
569 struct intel_ring_buffer *ring; 610 struct intel_ring_buffer *ring;
570 int ret, i; 611 int ret, i;
571 612
@@ -588,7 +629,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
588{ 629{
589 struct drm_info_node *node = (struct drm_info_node *) m->private; 630 struct drm_info_node *node = (struct drm_info_node *) m->private;
590 struct drm_device *dev = node->minor->dev; 631 struct drm_device *dev = node->minor->dev;
591 drm_i915_private_t *dev_priv = dev->dev_private; 632 struct drm_i915_private *dev_priv = dev->dev_private;
592 struct intel_ring_buffer *ring; 633 struct intel_ring_buffer *ring;
593 int ret, i, pipe; 634 int ret, i, pipe;
594 635
@@ -598,7 +639,6 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
598 intel_runtime_pm_get(dev_priv); 639 intel_runtime_pm_get(dev_priv);
599 640
600 if (INTEL_INFO(dev)->gen >= 8) { 641 if (INTEL_INFO(dev)->gen >= 8) {
601 int i;
602 seq_printf(m, "Master Interrupt Control:\t%08x\n", 642 seq_printf(m, "Master Interrupt Control:\t%08x\n",
603 I915_READ(GEN8_MASTER_IRQ)); 643 I915_READ(GEN8_MASTER_IRQ));
604 644
@@ -611,16 +651,16 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
611 i, I915_READ(GEN8_GT_IER(i))); 651 i, I915_READ(GEN8_GT_IER(i)));
612 } 652 }
613 653
614 for_each_pipe(i) { 654 for_each_pipe(pipe) {
615 seq_printf(m, "Pipe %c IMR:\t%08x\n", 655 seq_printf(m, "Pipe %c IMR:\t%08x\n",
616 pipe_name(i), 656 pipe_name(pipe),
617 I915_READ(GEN8_DE_PIPE_IMR(i))); 657 I915_READ(GEN8_DE_PIPE_IMR(pipe)));
618 seq_printf(m, "Pipe %c IIR:\t%08x\n", 658 seq_printf(m, "Pipe %c IIR:\t%08x\n",
619 pipe_name(i), 659 pipe_name(pipe),
620 I915_READ(GEN8_DE_PIPE_IIR(i))); 660 I915_READ(GEN8_DE_PIPE_IIR(pipe)));
621 seq_printf(m, "Pipe %c IER:\t%08x\n", 661 seq_printf(m, "Pipe %c IER:\t%08x\n",
622 pipe_name(i), 662 pipe_name(pipe),
623 I915_READ(GEN8_DE_PIPE_IER(i))); 663 I915_READ(GEN8_DE_PIPE_IER(pipe)));
624 } 664 }
625 665
626 seq_printf(m, "Display Engine port interrupt mask:\t%08x\n", 666 seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
@@ -712,8 +752,6 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
712 seq_printf(m, "Graphics Interrupt mask: %08x\n", 752 seq_printf(m, "Graphics Interrupt mask: %08x\n",
713 I915_READ(GTIMR)); 753 I915_READ(GTIMR));
714 } 754 }
715 seq_printf(m, "Interrupts received: %d\n",
716 atomic_read(&dev_priv->irq_received));
717 for_each_ring(ring, dev_priv, i) { 755 for_each_ring(ring, dev_priv, i) {
718 if (INTEL_INFO(dev)->gen >= 6) { 756 if (INTEL_INFO(dev)->gen >= 6) {
719 seq_printf(m, 757 seq_printf(m,
@@ -732,7 +770,7 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
732{ 770{
733 struct drm_info_node *node = (struct drm_info_node *) m->private; 771 struct drm_info_node *node = (struct drm_info_node *) m->private;
734 struct drm_device *dev = node->minor->dev; 772 struct drm_device *dev = node->minor->dev;
735 drm_i915_private_t *dev_priv = dev->dev_private; 773 struct drm_i915_private *dev_priv = dev->dev_private;
736 int i, ret; 774 int i, ret;
737 775
738 ret = mutex_lock_interruptible(&dev->struct_mutex); 776 ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -761,7 +799,7 @@ static int i915_hws_info(struct seq_file *m, void *data)
761{ 799{
762 struct drm_info_node *node = (struct drm_info_node *) m->private; 800 struct drm_info_node *node = (struct drm_info_node *) m->private;
763 struct drm_device *dev = node->minor->dev; 801 struct drm_device *dev = node->minor->dev;
764 drm_i915_private_t *dev_priv = dev->dev_private; 802 struct drm_i915_private *dev_priv = dev->dev_private;
765 struct intel_ring_buffer *ring; 803 struct intel_ring_buffer *ring;
766 const u32 *hws; 804 const u32 *hws;
767 int i; 805 int i;
@@ -872,7 +910,7 @@ static int
872i915_next_seqno_get(void *data, u64 *val) 910i915_next_seqno_get(void *data, u64 *val)
873{ 911{
874 struct drm_device *dev = data; 912 struct drm_device *dev = data;
875 drm_i915_private_t *dev_priv = dev->dev_private; 913 struct drm_i915_private *dev_priv = dev->dev_private;
876 int ret; 914 int ret;
877 915
878 ret = mutex_lock_interruptible(&dev->struct_mutex); 916 ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -909,7 +947,7 @@ static int i915_rstdby_delays(struct seq_file *m, void *unused)
909{ 947{
910 struct drm_info_node *node = (struct drm_info_node *) m->private; 948 struct drm_info_node *node = (struct drm_info_node *) m->private;
911 struct drm_device *dev = node->minor->dev; 949 struct drm_device *dev = node->minor->dev;
912 drm_i915_private_t *dev_priv = dev->dev_private; 950 struct drm_i915_private *dev_priv = dev->dev_private;
913 u16 crstanddelay; 951 u16 crstanddelay;
914 int ret; 952 int ret;
915 953
@@ -932,7 +970,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
932{ 970{
933 struct drm_info_node *node = (struct drm_info_node *) m->private; 971 struct drm_info_node *node = (struct drm_info_node *) m->private;
934 struct drm_device *dev = node->minor->dev; 972 struct drm_device *dev = node->minor->dev;
935 drm_i915_private_t *dev_priv = dev->dev_private; 973 struct drm_i915_private *dev_priv = dev->dev_private;
936 int ret = 0; 974 int ret = 0;
937 975
938 intel_runtime_pm_get(dev_priv); 976 intel_runtime_pm_get(dev_priv);
@@ -1025,7 +1063,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
1025 max_freq * GT_FREQUENCY_MULTIPLIER); 1063 max_freq * GT_FREQUENCY_MULTIPLIER);
1026 1064
1027 seq_printf(m, "Max overclocked frequency: %dMHz\n", 1065 seq_printf(m, "Max overclocked frequency: %dMHz\n",
1028 dev_priv->rps.hw_max * GT_FREQUENCY_MULTIPLIER); 1066 dev_priv->rps.max_freq * GT_FREQUENCY_MULTIPLIER);
1029 } else if (IS_VALLEYVIEW(dev)) { 1067 } else if (IS_VALLEYVIEW(dev)) {
1030 u32 freq_sts, val; 1068 u32 freq_sts, val;
1031 1069
@@ -1058,7 +1096,7 @@ static int i915_delayfreq_table(struct seq_file *m, void *unused)
1058{ 1096{
1059 struct drm_info_node *node = (struct drm_info_node *) m->private; 1097 struct drm_info_node *node = (struct drm_info_node *) m->private;
1060 struct drm_device *dev = node->minor->dev; 1098 struct drm_device *dev = node->minor->dev;
1061 drm_i915_private_t *dev_priv = dev->dev_private; 1099 struct drm_i915_private *dev_priv = dev->dev_private;
1062 u32 delayfreq; 1100 u32 delayfreq;
1063 int ret, i; 1101 int ret, i;
1064 1102
@@ -1089,7 +1127,7 @@ static int i915_inttoext_table(struct seq_file *m, void *unused)
1089{ 1127{
1090 struct drm_info_node *node = (struct drm_info_node *) m->private; 1128 struct drm_info_node *node = (struct drm_info_node *) m->private;
1091 struct drm_device *dev = node->minor->dev; 1129 struct drm_device *dev = node->minor->dev;
1092 drm_i915_private_t *dev_priv = dev->dev_private; 1130 struct drm_i915_private *dev_priv = dev->dev_private;
1093 u32 inttoext; 1131 u32 inttoext;
1094 int ret, i; 1132 int ret, i;
1095 1133
@@ -1113,7 +1151,7 @@ static int ironlake_drpc_info(struct seq_file *m)
1113{ 1151{
1114 struct drm_info_node *node = (struct drm_info_node *) m->private; 1152 struct drm_info_node *node = (struct drm_info_node *) m->private;
1115 struct drm_device *dev = node->minor->dev; 1153 struct drm_device *dev = node->minor->dev;
1116 drm_i915_private_t *dev_priv = dev->dev_private; 1154 struct drm_i915_private *dev_priv = dev->dev_private;
1117 u32 rgvmodectl, rstdbyctl; 1155 u32 rgvmodectl, rstdbyctl;
1118 u16 crstandvid; 1156 u16 crstandvid;
1119 int ret; 1157 int ret;
@@ -1339,13 +1377,15 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
1339{ 1377{
1340 struct drm_info_node *node = (struct drm_info_node *) m->private; 1378 struct drm_info_node *node = (struct drm_info_node *) m->private;
1341 struct drm_device *dev = node->minor->dev; 1379 struct drm_device *dev = node->minor->dev;
1342 drm_i915_private_t *dev_priv = dev->dev_private; 1380 struct drm_i915_private *dev_priv = dev->dev_private;
1343 1381
1344 if (!HAS_FBC(dev)) { 1382 if (!HAS_FBC(dev)) {
1345 seq_puts(m, "FBC unsupported on this chipset\n"); 1383 seq_puts(m, "FBC unsupported on this chipset\n");
1346 return 0; 1384 return 0;
1347 } 1385 }
1348 1386
1387 intel_runtime_pm_get(dev_priv);
1388
1349 if (intel_fbc_enabled(dev)) { 1389 if (intel_fbc_enabled(dev)) {
1350 seq_puts(m, "FBC enabled\n"); 1390 seq_puts(m, "FBC enabled\n");
1351 } else { 1391 } else {
@@ -1389,6 +1429,9 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
1389 } 1429 }
1390 seq_putc(m, '\n'); 1430 seq_putc(m, '\n');
1391 } 1431 }
1432
1433 intel_runtime_pm_put(dev_priv);
1434
1392 return 0; 1435 return 0;
1393} 1436}
1394 1437
@@ -1403,11 +1446,15 @@ static int i915_ips_status(struct seq_file *m, void *unused)
1403 return 0; 1446 return 0;
1404 } 1447 }
1405 1448
1449 intel_runtime_pm_get(dev_priv);
1450
1406 if (IS_BROADWELL(dev) || I915_READ(IPS_CTL) & IPS_ENABLE) 1451 if (IS_BROADWELL(dev) || I915_READ(IPS_CTL) & IPS_ENABLE)
1407 seq_puts(m, "enabled\n"); 1452 seq_puts(m, "enabled\n");
1408 else 1453 else
1409 seq_puts(m, "disabled\n"); 1454 seq_puts(m, "disabled\n");
1410 1455
1456 intel_runtime_pm_put(dev_priv);
1457
1411 return 0; 1458 return 0;
1412} 1459}
1413 1460
@@ -1415,9 +1462,11 @@ static int i915_sr_status(struct seq_file *m, void *unused)
1415{ 1462{
1416 struct drm_info_node *node = (struct drm_info_node *) m->private; 1463 struct drm_info_node *node = (struct drm_info_node *) m->private;
1417 struct drm_device *dev = node->minor->dev; 1464 struct drm_device *dev = node->minor->dev;
1418 drm_i915_private_t *dev_priv = dev->dev_private; 1465 struct drm_i915_private *dev_priv = dev->dev_private;
1419 bool sr_enabled = false; 1466 bool sr_enabled = false;
1420 1467
1468 intel_runtime_pm_get(dev_priv);
1469
1421 if (HAS_PCH_SPLIT(dev)) 1470 if (HAS_PCH_SPLIT(dev))
1422 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN; 1471 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
1423 else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev)) 1472 else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev))
@@ -1427,6 +1476,8 @@ static int i915_sr_status(struct seq_file *m, void *unused)
1427 else if (IS_PINEVIEW(dev)) 1476 else if (IS_PINEVIEW(dev))
1428 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN; 1477 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
1429 1478
1479 intel_runtime_pm_put(dev_priv);
1480
1430 seq_printf(m, "self-refresh: %s\n", 1481 seq_printf(m, "self-refresh: %s\n",
1431 sr_enabled ? "enabled" : "disabled"); 1482 sr_enabled ? "enabled" : "disabled");
1432 1483
@@ -1437,7 +1488,7 @@ static int i915_emon_status(struct seq_file *m, void *unused)
1437{ 1488{
1438 struct drm_info_node *node = (struct drm_info_node *) m->private; 1489 struct drm_info_node *node = (struct drm_info_node *) m->private;
1439 struct drm_device *dev = node->minor->dev; 1490 struct drm_device *dev = node->minor->dev;
1440 drm_i915_private_t *dev_priv = dev->dev_private; 1491 struct drm_i915_private *dev_priv = dev->dev_private;
1441 unsigned long temp, chipset, gfx; 1492 unsigned long temp, chipset, gfx;
1442 int ret; 1493 int ret;
1443 1494
@@ -1465,8 +1516,8 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
1465{ 1516{
1466 struct drm_info_node *node = (struct drm_info_node *) m->private; 1517 struct drm_info_node *node = (struct drm_info_node *) m->private;
1467 struct drm_device *dev = node->minor->dev; 1518 struct drm_device *dev = node->minor->dev;
1468 drm_i915_private_t *dev_priv = dev->dev_private; 1519 struct drm_i915_private *dev_priv = dev->dev_private;
1469 int ret; 1520 int ret = 0;
1470 int gpu_freq, ia_freq; 1521 int gpu_freq, ia_freq;
1471 1522
1472 if (!(IS_GEN6(dev) || IS_GEN7(dev))) { 1523 if (!(IS_GEN6(dev) || IS_GEN7(dev))) {
@@ -1474,17 +1525,18 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
1474 return 0; 1525 return 0;
1475 } 1526 }
1476 1527
1528 intel_runtime_pm_get(dev_priv);
1529
1477 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 1530 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
1478 1531
1479 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 1532 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
1480 if (ret) 1533 if (ret)
1481 return ret; 1534 goto out;
1482 intel_runtime_pm_get(dev_priv);
1483 1535
1484 seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n"); 1536 seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
1485 1537
1486 for (gpu_freq = dev_priv->rps.min_delay; 1538 for (gpu_freq = dev_priv->rps.min_freq_softlimit;
1487 gpu_freq <= dev_priv->rps.max_delay; 1539 gpu_freq <= dev_priv->rps.max_freq_softlimit;
1488 gpu_freq++) { 1540 gpu_freq++) {
1489 ia_freq = gpu_freq; 1541 ia_freq = gpu_freq;
1490 sandybridge_pcode_read(dev_priv, 1542 sandybridge_pcode_read(dev_priv,
@@ -1496,17 +1548,18 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
1496 ((ia_freq >> 8) & 0xff) * 100); 1548 ((ia_freq >> 8) & 0xff) * 100);
1497 } 1549 }
1498 1550
1499 intel_runtime_pm_put(dev_priv);
1500 mutex_unlock(&dev_priv->rps.hw_lock); 1551 mutex_unlock(&dev_priv->rps.hw_lock);
1501 1552
1502 return 0; 1553out:
1554 intel_runtime_pm_put(dev_priv);
1555 return ret;
1503} 1556}
1504 1557
1505static int i915_gfxec(struct seq_file *m, void *unused) 1558static int i915_gfxec(struct seq_file *m, void *unused)
1506{ 1559{
1507 struct drm_info_node *node = (struct drm_info_node *) m->private; 1560 struct drm_info_node *node = (struct drm_info_node *) m->private;
1508 struct drm_device *dev = node->minor->dev; 1561 struct drm_device *dev = node->minor->dev;
1509 drm_i915_private_t *dev_priv = dev->dev_private; 1562 struct drm_i915_private *dev_priv = dev->dev_private;
1510 int ret; 1563 int ret;
1511 1564
1512 ret = mutex_lock_interruptible(&dev->struct_mutex); 1565 ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -1526,7 +1579,7 @@ static int i915_opregion(struct seq_file *m, void *unused)
1526{ 1579{
1527 struct drm_info_node *node = (struct drm_info_node *) m->private; 1580 struct drm_info_node *node = (struct drm_info_node *) m->private;
1528 struct drm_device *dev = node->minor->dev; 1581 struct drm_device *dev = node->minor->dev;
1529 drm_i915_private_t *dev_priv = dev->dev_private; 1582 struct drm_i915_private *dev_priv = dev->dev_private;
1530 struct intel_opregion *opregion = &dev_priv->opregion; 1583 struct intel_opregion *opregion = &dev_priv->opregion;
1531 void *data = kmalloc(OPREGION_SIZE, GFP_KERNEL); 1584 void *data = kmalloc(OPREGION_SIZE, GFP_KERNEL);
1532 int ret; 1585 int ret;
@@ -1600,7 +1653,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
1600{ 1653{
1601 struct drm_info_node *node = (struct drm_info_node *) m->private; 1654 struct drm_info_node *node = (struct drm_info_node *) m->private;
1602 struct drm_device *dev = node->minor->dev; 1655 struct drm_device *dev = node->minor->dev;
1603 drm_i915_private_t *dev_priv = dev->dev_private; 1656 struct drm_i915_private *dev_priv = dev->dev_private;
1604 struct intel_ring_buffer *ring; 1657 struct intel_ring_buffer *ring;
1605 struct i915_hw_context *ctx; 1658 struct i915_hw_context *ctx;
1606 int ret, i; 1659 int ret, i;
@@ -1733,6 +1786,17 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
1733 return 0; 1786 return 0;
1734} 1787}
1735 1788
1789static int per_file_ctx(int id, void *ptr, void *data)
1790{
1791 struct i915_hw_context *ctx = ptr;
1792 struct seq_file *m = data;
1793 struct i915_hw_ppgtt *ppgtt = ctx_to_ppgtt(ctx);
1794
1795 ppgtt->debug_dump(ppgtt, m);
1796
1797 return 0;
1798}
1799
1736static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev) 1800static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
1737{ 1801{
1738 struct drm_i915_private *dev_priv = dev->dev_private; 1802 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1744,7 +1808,7 @@ static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
1744 return; 1808 return;
1745 1809
1746 seq_printf(m, "Page directories: %d\n", ppgtt->num_pd_pages); 1810 seq_printf(m, "Page directories: %d\n", ppgtt->num_pd_pages);
1747 seq_printf(m, "Page tables: %d\n", ppgtt->num_pt_pages); 1811 seq_printf(m, "Page tables: %d\n", ppgtt->num_pd_entries);
1748 for_each_ring(ring, dev_priv, unused) { 1812 for_each_ring(ring, dev_priv, unused) {
1749 seq_printf(m, "%s\n", ring->name); 1813 seq_printf(m, "%s\n", ring->name);
1750 for (i = 0; i < 4; i++) { 1814 for (i = 0; i < 4; i++) {
@@ -1762,6 +1826,7 @@ static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
1762{ 1826{
1763 struct drm_i915_private *dev_priv = dev->dev_private; 1827 struct drm_i915_private *dev_priv = dev->dev_private;
1764 struct intel_ring_buffer *ring; 1828 struct intel_ring_buffer *ring;
1829 struct drm_file *file;
1765 int i; 1830 int i;
1766 1831
1767 if (INTEL_INFO(dev)->gen == 6) 1832 if (INTEL_INFO(dev)->gen == 6)
@@ -1780,6 +1845,20 @@ static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
1780 1845
1781 seq_puts(m, "aliasing PPGTT:\n"); 1846 seq_puts(m, "aliasing PPGTT:\n");
1782 seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset); 1847 seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset);
1848
1849 ppgtt->debug_dump(ppgtt, m);
1850 } else
1851 return;
1852
1853 list_for_each_entry_reverse(file, &dev->filelist, lhead) {
1854 struct drm_i915_file_private *file_priv = file->driver_priv;
1855 struct i915_hw_ppgtt *pvt_ppgtt;
1856
1857 pvt_ppgtt = ctx_to_ppgtt(file_priv->private_default_ctx);
1858 seq_printf(m, "proc: %s\n",
1859 get_pid_task(file->pid, PIDTYPE_PID)->comm);
1860 seq_puts(m, " default context:\n");
1861 idr_for_each(&file_priv->context_idr, per_file_ctx, m);
1783 } 1862 }
1784 seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK)); 1863 seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
1785} 1864}
@@ -1892,6 +1971,47 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
1892 return 0; 1971 return 0;
1893} 1972}
1894 1973
1974static int i915_sink_crc(struct seq_file *m, void *data)
1975{
1976 struct drm_info_node *node = m->private;
1977 struct drm_device *dev = node->minor->dev;
1978 struct intel_encoder *encoder;
1979 struct intel_connector *connector;
1980 struct intel_dp *intel_dp = NULL;
1981 int ret;
1982 u8 crc[6];
1983
1984 drm_modeset_lock_all(dev);
1985 list_for_each_entry(connector, &dev->mode_config.connector_list,
1986 base.head) {
1987
1988 if (connector->base.dpms != DRM_MODE_DPMS_ON)
1989 continue;
1990
1991 if (!connector->base.encoder)
1992 continue;
1993
1994 encoder = to_intel_encoder(connector->base.encoder);
1995 if (encoder->type != INTEL_OUTPUT_EDP)
1996 continue;
1997
1998 intel_dp = enc_to_intel_dp(&encoder->base);
1999
2000 ret = intel_dp_sink_crc(intel_dp, crc);
2001 if (ret)
2002 goto out;
2003
2004 seq_printf(m, "%02x%02x%02x%02x%02x%02x\n",
2005 crc[0], crc[1], crc[2],
2006 crc[3], crc[4], crc[5]);
2007 goto out;
2008 }
2009 ret = -ENODEV;
2010out:
2011 drm_modeset_unlock_all(dev);
2012 return ret;
2013}
2014
1895static int i915_energy_uJ(struct seq_file *m, void *data) 2015static int i915_energy_uJ(struct seq_file *m, void *data)
1896{ 2016{
1897 struct drm_info_node *node = m->private; 2017 struct drm_info_node *node = m->private;
@@ -1903,12 +2023,16 @@ static int i915_energy_uJ(struct seq_file *m, void *data)
1903 if (INTEL_INFO(dev)->gen < 6) 2023 if (INTEL_INFO(dev)->gen < 6)
1904 return -ENODEV; 2024 return -ENODEV;
1905 2025
2026 intel_runtime_pm_get(dev_priv);
2027
1906 rdmsrl(MSR_RAPL_POWER_UNIT, power); 2028 rdmsrl(MSR_RAPL_POWER_UNIT, power);
1907 power = (power & 0x1f00) >> 8; 2029 power = (power & 0x1f00) >> 8;
1908 units = 1000000 / (1 << power); /* convert to uJ */ 2030 units = 1000000 / (1 << power); /* convert to uJ */
1909 power = I915_READ(MCH_SECP_NRG_STTS); 2031 power = I915_READ(MCH_SECP_NRG_STTS);
1910 power *= units; 2032 power *= units;
1911 2033
2034 intel_runtime_pm_put(dev_priv);
2035
1912 seq_printf(m, "%llu", (long long unsigned)power); 2036 seq_printf(m, "%llu", (long long unsigned)power);
1913 2037
1914 return 0; 2038 return 0;
@@ -1925,15 +2049,9 @@ static int i915_pc8_status(struct seq_file *m, void *unused)
1925 return 0; 2049 return 0;
1926 } 2050 }
1927 2051
1928 mutex_lock(&dev_priv->pc8.lock); 2052 seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->mm.busy));
1929 seq_printf(m, "Requirements met: %s\n",
1930 yesno(dev_priv->pc8.requirements_met));
1931 seq_printf(m, "GPU idle: %s\n", yesno(dev_priv->pc8.gpu_idle));
1932 seq_printf(m, "Disable count: %d\n", dev_priv->pc8.disable_count);
1933 seq_printf(m, "IRQs disabled: %s\n", 2053 seq_printf(m, "IRQs disabled: %s\n",
1934 yesno(dev_priv->pc8.irqs_disabled)); 2054 yesno(dev_priv->pm.irqs_disabled));
1935 seq_printf(m, "Enabled: %s\n", yesno(dev_priv->pc8.enabled));
1936 mutex_unlock(&dev_priv->pc8.lock);
1937 2055
1938 return 0; 2056 return 0;
1939} 2057}
@@ -1961,6 +2079,28 @@ static const char *power_domain_str(enum intel_display_power_domain domain)
1961 return "TRANSCODER_C"; 2079 return "TRANSCODER_C";
1962 case POWER_DOMAIN_TRANSCODER_EDP: 2080 case POWER_DOMAIN_TRANSCODER_EDP:
1963 return "TRANSCODER_EDP"; 2081 return "TRANSCODER_EDP";
2082 case POWER_DOMAIN_PORT_DDI_A_2_LANES:
2083 return "PORT_DDI_A_2_LANES";
2084 case POWER_DOMAIN_PORT_DDI_A_4_LANES:
2085 return "PORT_DDI_A_4_LANES";
2086 case POWER_DOMAIN_PORT_DDI_B_2_LANES:
2087 return "PORT_DDI_B_2_LANES";
2088 case POWER_DOMAIN_PORT_DDI_B_4_LANES:
2089 return "PORT_DDI_B_4_LANES";
2090 case POWER_DOMAIN_PORT_DDI_C_2_LANES:
2091 return "PORT_DDI_C_2_LANES";
2092 case POWER_DOMAIN_PORT_DDI_C_4_LANES:
2093 return "PORT_DDI_C_4_LANES";
2094 case POWER_DOMAIN_PORT_DDI_D_2_LANES:
2095 return "PORT_DDI_D_2_LANES";
2096 case POWER_DOMAIN_PORT_DDI_D_4_LANES:
2097 return "PORT_DDI_D_4_LANES";
2098 case POWER_DOMAIN_PORT_DSI:
2099 return "PORT_DSI";
2100 case POWER_DOMAIN_PORT_CRT:
2101 return "PORT_CRT";
2102 case POWER_DOMAIN_PORT_OTHER:
2103 return "PORT_OTHER";
1964 case POWER_DOMAIN_VGA: 2104 case POWER_DOMAIN_VGA:
1965 return "VGA"; 2105 return "VGA";
1966 case POWER_DOMAIN_AUDIO: 2106 case POWER_DOMAIN_AUDIO:
@@ -2008,6 +2148,215 @@ static int i915_power_domain_info(struct seq_file *m, void *unused)
2008 return 0; 2148 return 0;
2009} 2149}
2010 2150
2151static void intel_seq_print_mode(struct seq_file *m, int tabs,
2152 struct drm_display_mode *mode)
2153{
2154 int i;
2155
2156 for (i = 0; i < tabs; i++)
2157 seq_putc(m, '\t');
2158
2159 seq_printf(m, "id %d:\"%s\" freq %d clock %d hdisp %d hss %d hse %d htot %d vdisp %d vss %d vse %d vtot %d type 0x%x flags 0x%x\n",
2160 mode->base.id, mode->name,
2161 mode->vrefresh, mode->clock,
2162 mode->hdisplay, mode->hsync_start,
2163 mode->hsync_end, mode->htotal,
2164 mode->vdisplay, mode->vsync_start,
2165 mode->vsync_end, mode->vtotal,
2166 mode->type, mode->flags);
2167}
2168
2169static void intel_encoder_info(struct seq_file *m,
2170 struct intel_crtc *intel_crtc,
2171 struct intel_encoder *intel_encoder)
2172{
2173 struct drm_info_node *node = (struct drm_info_node *) m->private;
2174 struct drm_device *dev = node->minor->dev;
2175 struct drm_crtc *crtc = &intel_crtc->base;
2176 struct intel_connector *intel_connector;
2177 struct drm_encoder *encoder;
2178
2179 encoder = &intel_encoder->base;
2180 seq_printf(m, "\tencoder %d: type: %s, connectors:\n",
2181 encoder->base.id, drm_get_encoder_name(encoder));
2182 for_each_connector_on_encoder(dev, encoder, intel_connector) {
2183 struct drm_connector *connector = &intel_connector->base;
2184 seq_printf(m, "\t\tconnector %d: type: %s, status: %s",
2185 connector->base.id,
2186 drm_get_connector_name(connector),
2187 drm_get_connector_status_name(connector->status));
2188 if (connector->status == connector_status_connected) {
2189 struct drm_display_mode *mode = &crtc->mode;
2190 seq_printf(m, ", mode:\n");
2191 intel_seq_print_mode(m, 2, mode);
2192 } else {
2193 seq_putc(m, '\n');
2194 }
2195 }
2196}
2197
2198static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2199{
2200 struct drm_info_node *node = (struct drm_info_node *) m->private;
2201 struct drm_device *dev = node->minor->dev;
2202 struct drm_crtc *crtc = &intel_crtc->base;
2203 struct intel_encoder *intel_encoder;
2204
2205 seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
2206 crtc->primary->fb->base.id, crtc->x, crtc->y,
2207 crtc->primary->fb->width, crtc->primary->fb->height);
2208 for_each_encoder_on_crtc(dev, crtc, intel_encoder)
2209 intel_encoder_info(m, intel_crtc, intel_encoder);
2210}
2211
2212static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
2213{
2214 struct drm_display_mode *mode = panel->fixed_mode;
2215
2216 seq_printf(m, "\tfixed mode:\n");
2217 intel_seq_print_mode(m, 2, mode);
2218}
2219
2220static void intel_dp_info(struct seq_file *m,
2221 struct intel_connector *intel_connector)
2222{
2223 struct intel_encoder *intel_encoder = intel_connector->encoder;
2224 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
2225
2226 seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
2227 seq_printf(m, "\taudio support: %s\n", intel_dp->has_audio ? "yes" :
2228 "no");
2229 if (intel_encoder->type == INTEL_OUTPUT_EDP)
2230 intel_panel_info(m, &intel_connector->panel);
2231}
2232
2233static void intel_hdmi_info(struct seq_file *m,
2234 struct intel_connector *intel_connector)
2235{
2236 struct intel_encoder *intel_encoder = intel_connector->encoder;
2237 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
2238
2239 seq_printf(m, "\taudio support: %s\n", intel_hdmi->has_audio ? "yes" :
2240 "no");
2241}
2242
2243static void intel_lvds_info(struct seq_file *m,
2244 struct intel_connector *intel_connector)
2245{
2246 intel_panel_info(m, &intel_connector->panel);
2247}
2248
2249static void intel_connector_info(struct seq_file *m,
2250 struct drm_connector *connector)
2251{
2252 struct intel_connector *intel_connector = to_intel_connector(connector);
2253 struct intel_encoder *intel_encoder = intel_connector->encoder;
2254 struct drm_display_mode *mode;
2255
2256 seq_printf(m, "connector %d: type %s, status: %s\n",
2257 connector->base.id, drm_get_connector_name(connector),
2258 drm_get_connector_status_name(connector->status));
2259 if (connector->status == connector_status_connected) {
2260 seq_printf(m, "\tname: %s\n", connector->display_info.name);
2261 seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
2262 connector->display_info.width_mm,
2263 connector->display_info.height_mm);
2264 seq_printf(m, "\tsubpixel order: %s\n",
2265 drm_get_subpixel_order_name(connector->display_info.subpixel_order));
2266 seq_printf(m, "\tCEA rev: %d\n",
2267 connector->display_info.cea_rev);
2268 }
2269 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
2270 intel_encoder->type == INTEL_OUTPUT_EDP)
2271 intel_dp_info(m, intel_connector);
2272 else if (intel_encoder->type == INTEL_OUTPUT_HDMI)
2273 intel_hdmi_info(m, intel_connector);
2274 else if (intel_encoder->type == INTEL_OUTPUT_LVDS)
2275 intel_lvds_info(m, intel_connector);
2276
2277 seq_printf(m, "\tmodes:\n");
2278 list_for_each_entry(mode, &connector->modes, head)
2279 intel_seq_print_mode(m, 2, mode);
2280}
2281
2282static bool cursor_active(struct drm_device *dev, int pipe)
2283{
2284 struct drm_i915_private *dev_priv = dev->dev_private;
2285 u32 state;
2286
2287 if (IS_845G(dev) || IS_I865G(dev))
2288 state = I915_READ(_CURACNTR) & CURSOR_ENABLE;
2289 else if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev))
2290 state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
2291 else
2292 state = I915_READ(CURCNTR_IVB(pipe)) & CURSOR_MODE;
2293
2294 return state;
2295}
2296
2297static bool cursor_position(struct drm_device *dev, int pipe, int *x, int *y)
2298{
2299 struct drm_i915_private *dev_priv = dev->dev_private;
2300 u32 pos;
2301
2302 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_BROADWELL(dev))
2303 pos = I915_READ(CURPOS_IVB(pipe));
2304 else
2305 pos = I915_READ(CURPOS(pipe));
2306
2307 *x = (pos >> CURSOR_X_SHIFT) & CURSOR_POS_MASK;
2308 if (pos & (CURSOR_POS_SIGN << CURSOR_X_SHIFT))
2309 *x = -*x;
2310
2311 *y = (pos >> CURSOR_Y_SHIFT) & CURSOR_POS_MASK;
2312 if (pos & (CURSOR_POS_SIGN << CURSOR_Y_SHIFT))
2313 *y = -*y;
2314
2315 return cursor_active(dev, pipe);
2316}
2317
2318static int i915_display_info(struct seq_file *m, void *unused)
2319{
2320 struct drm_info_node *node = (struct drm_info_node *) m->private;
2321 struct drm_device *dev = node->minor->dev;
2322 struct drm_i915_private *dev_priv = dev->dev_private;
2323 struct intel_crtc *crtc;
2324 struct drm_connector *connector;
2325
2326 intel_runtime_pm_get(dev_priv);
2327 drm_modeset_lock_all(dev);
2328 seq_printf(m, "CRTC info\n");
2329 seq_printf(m, "---------\n");
2330 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
2331 bool active;
2332 int x, y;
2333
2334 seq_printf(m, "CRTC %d: pipe: %c, active: %s\n",
2335 crtc->base.base.id, pipe_name(crtc->pipe),
2336 yesno(crtc->active));
2337 if (crtc->active) {
2338 intel_crtc_info(m, crtc);
2339
2340 active = cursor_position(dev, crtc->pipe, &x, &y);
2341 seq_printf(m, "\tcursor visible? %s, position (%d, %d), addr 0x%08x, active? %s\n",
2342 yesno(crtc->cursor_visible),
2343 x, y, crtc->cursor_addr,
2344 yesno(active));
2345 }
2346 }
2347
2348 seq_printf(m, "\n");
2349 seq_printf(m, "Connector info\n");
2350 seq_printf(m, "--------------\n");
2351 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2352 intel_connector_info(m, connector);
2353 }
2354 drm_modeset_unlock_all(dev);
2355 intel_runtime_pm_put(dev_priv);
2356
2357 return 0;
2358}
2359
2011struct pipe_crc_info { 2360struct pipe_crc_info {
2012 const char *name; 2361 const char *name;
2013 struct drm_device *dev; 2362 struct drm_device *dev;
@@ -2332,8 +2681,6 @@ static int vlv_pipe_crc_ctl_reg(struct drm_device *dev,
2332 if (need_stable_symbols) { 2681 if (need_stable_symbols) {
2333 uint32_t tmp = I915_READ(PORT_DFT2_G4X); 2682 uint32_t tmp = I915_READ(PORT_DFT2_G4X);
2334 2683
2335 WARN_ON(!IS_G4X(dev));
2336
2337 tmp |= DC_BALANCE_RESET_VLV; 2684 tmp |= DC_BALANCE_RESET_VLV;
2338 if (pipe == PIPE_A) 2685 if (pipe == PIPE_A)
2339 tmp |= PIPE_A_SCRAMBLE_RESET; 2686 tmp |= PIPE_A_SCRAMBLE_RESET;
@@ -2756,11 +3103,179 @@ static const struct file_operations i915_display_crc_ctl_fops = {
2756 .write = display_crc_ctl_write 3103 .write = display_crc_ctl_write
2757}; 3104};
2758 3105
3106static void wm_latency_show(struct seq_file *m, const uint16_t wm[5])
3107{
3108 struct drm_device *dev = m->private;
3109 int num_levels = IS_HASWELL(dev) || IS_BROADWELL(dev) ? 5 : 4;
3110 int level;
3111
3112 drm_modeset_lock_all(dev);
3113
3114 for (level = 0; level < num_levels; level++) {
3115 unsigned int latency = wm[level];
3116
3117 /* WM1+ latency values in 0.5us units */
3118 if (level > 0)
3119 latency *= 5;
3120
3121 seq_printf(m, "WM%d %u (%u.%u usec)\n",
3122 level, wm[level],
3123 latency / 10, latency % 10);
3124 }
3125
3126 drm_modeset_unlock_all(dev);
3127}
3128
3129static int pri_wm_latency_show(struct seq_file *m, void *data)
3130{
3131 struct drm_device *dev = m->private;
3132
3133 wm_latency_show(m, to_i915(dev)->wm.pri_latency);
3134
3135 return 0;
3136}
3137
3138static int spr_wm_latency_show(struct seq_file *m, void *data)
3139{
3140 struct drm_device *dev = m->private;
3141
3142 wm_latency_show(m, to_i915(dev)->wm.spr_latency);
3143
3144 return 0;
3145}
3146
3147static int cur_wm_latency_show(struct seq_file *m, void *data)
3148{
3149 struct drm_device *dev = m->private;
3150
3151 wm_latency_show(m, to_i915(dev)->wm.cur_latency);
3152
3153 return 0;
3154}
3155
3156static int pri_wm_latency_open(struct inode *inode, struct file *file)
3157{
3158 struct drm_device *dev = inode->i_private;
3159
3160 if (!HAS_PCH_SPLIT(dev))
3161 return -ENODEV;
3162
3163 return single_open(file, pri_wm_latency_show, dev);
3164}
3165
3166static int spr_wm_latency_open(struct inode *inode, struct file *file)
3167{
3168 struct drm_device *dev = inode->i_private;
3169
3170 if (!HAS_PCH_SPLIT(dev))
3171 return -ENODEV;
3172
3173 return single_open(file, spr_wm_latency_show, dev);
3174}
3175
3176static int cur_wm_latency_open(struct inode *inode, struct file *file)
3177{
3178 struct drm_device *dev = inode->i_private;
3179
3180 if (!HAS_PCH_SPLIT(dev))
3181 return -ENODEV;
3182
3183 return single_open(file, cur_wm_latency_show, dev);
3184}
3185
3186static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
3187 size_t len, loff_t *offp, uint16_t wm[5])
3188{
3189 struct seq_file *m = file->private_data;
3190 struct drm_device *dev = m->private;
3191 uint16_t new[5] = { 0 };
3192 int num_levels = IS_HASWELL(dev) || IS_BROADWELL(dev) ? 5 : 4;
3193 int level;
3194 int ret;
3195 char tmp[32];
3196
3197 if (len >= sizeof(tmp))
3198 return -EINVAL;
3199
3200 if (copy_from_user(tmp, ubuf, len))
3201 return -EFAULT;
3202
3203 tmp[len] = '\0';
3204
3205 ret = sscanf(tmp, "%hu %hu %hu %hu %hu", &new[0], &new[1], &new[2], &new[3], &new[4]);
3206 if (ret != num_levels)
3207 return -EINVAL;
3208
3209 drm_modeset_lock_all(dev);
3210
3211 for (level = 0; level < num_levels; level++)
3212 wm[level] = new[level];
3213
3214 drm_modeset_unlock_all(dev);
3215
3216 return len;
3217}
3218
3219
3220static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
3221 size_t len, loff_t *offp)
3222{
3223 struct seq_file *m = file->private_data;
3224 struct drm_device *dev = m->private;
3225
3226 return wm_latency_write(file, ubuf, len, offp, to_i915(dev)->wm.pri_latency);
3227}
3228
3229static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
3230 size_t len, loff_t *offp)
3231{
3232 struct seq_file *m = file->private_data;
3233 struct drm_device *dev = m->private;
3234
3235 return wm_latency_write(file, ubuf, len, offp, to_i915(dev)->wm.spr_latency);
3236}
3237
3238static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
3239 size_t len, loff_t *offp)
3240{
3241 struct seq_file *m = file->private_data;
3242 struct drm_device *dev = m->private;
3243
3244 return wm_latency_write(file, ubuf, len, offp, to_i915(dev)->wm.cur_latency);
3245}
3246
3247static const struct file_operations i915_pri_wm_latency_fops = {
3248 .owner = THIS_MODULE,
3249 .open = pri_wm_latency_open,
3250 .read = seq_read,
3251 .llseek = seq_lseek,
3252 .release = single_release,
3253 .write = pri_wm_latency_write
3254};
3255
3256static const struct file_operations i915_spr_wm_latency_fops = {
3257 .owner = THIS_MODULE,
3258 .open = spr_wm_latency_open,
3259 .read = seq_read,
3260 .llseek = seq_lseek,
3261 .release = single_release,
3262 .write = spr_wm_latency_write
3263};
3264
3265static const struct file_operations i915_cur_wm_latency_fops = {
3266 .owner = THIS_MODULE,
3267 .open = cur_wm_latency_open,
3268 .read = seq_read,
3269 .llseek = seq_lseek,
3270 .release = single_release,
3271 .write = cur_wm_latency_write
3272};
3273
2759static int 3274static int
2760i915_wedged_get(void *data, u64 *val) 3275i915_wedged_get(void *data, u64 *val)
2761{ 3276{
2762 struct drm_device *dev = data; 3277 struct drm_device *dev = data;
2763 drm_i915_private_t *dev_priv = dev->dev_private; 3278 struct drm_i915_private *dev_priv = dev->dev_private;
2764 3279
2765 *val = atomic_read(&dev_priv->gpu_error.reset_counter); 3280 *val = atomic_read(&dev_priv->gpu_error.reset_counter);
2766 3281
@@ -2772,9 +3287,8 @@ i915_wedged_set(void *data, u64 val)
2772{ 3287{
2773 struct drm_device *dev = data; 3288 struct drm_device *dev = data;
2774 3289
2775 DRM_INFO("Manually setting wedged to %llu\n", val); 3290 i915_handle_error(dev, val,
2776 i915_handle_error(dev, val); 3291 "Manually setting wedged to %llu", val);
2777
2778 return 0; 3292 return 0;
2779} 3293}
2780 3294
@@ -2786,7 +3300,7 @@ static int
2786i915_ring_stop_get(void *data, u64 *val) 3300i915_ring_stop_get(void *data, u64 *val)
2787{ 3301{
2788 struct drm_device *dev = data; 3302 struct drm_device *dev = data;
2789 drm_i915_private_t *dev_priv = dev->dev_private; 3303 struct drm_i915_private *dev_priv = dev->dev_private;
2790 3304
2791 *val = dev_priv->gpu_error.stop_rings; 3305 *val = dev_priv->gpu_error.stop_rings;
2792 3306
@@ -2929,7 +3443,7 @@ i915_drop_caches_set(void *data, u64 val)
2929 list_for_each_entry(vm, &dev_priv->vm_list, global_link) { 3443 list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
2930 list_for_each_entry_safe(vma, x, &vm->inactive_list, 3444 list_for_each_entry_safe(vma, x, &vm->inactive_list,
2931 mm_list) { 3445 mm_list) {
2932 if (vma->obj->pin_count) 3446 if (vma->pin_count)
2933 continue; 3447 continue;
2934 3448
2935 ret = i915_vma_unbind(vma); 3449 ret = i915_vma_unbind(vma);
@@ -2963,7 +3477,7 @@ static int
2963i915_max_freq_get(void *data, u64 *val) 3477i915_max_freq_get(void *data, u64 *val)
2964{ 3478{
2965 struct drm_device *dev = data; 3479 struct drm_device *dev = data;
2966 drm_i915_private_t *dev_priv = dev->dev_private; 3480 struct drm_i915_private *dev_priv = dev->dev_private;
2967 int ret; 3481 int ret;
2968 3482
2969 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 3483 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
@@ -2976,9 +3490,9 @@ i915_max_freq_get(void *data, u64 *val)
2976 return ret; 3490 return ret;
2977 3491
2978 if (IS_VALLEYVIEW(dev)) 3492 if (IS_VALLEYVIEW(dev))
2979 *val = vlv_gpu_freq(dev_priv, dev_priv->rps.max_delay); 3493 *val = vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit);
2980 else 3494 else
2981 *val = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER; 3495 *val = dev_priv->rps.max_freq_softlimit * GT_FREQUENCY_MULTIPLIER;
2982 mutex_unlock(&dev_priv->rps.hw_lock); 3496 mutex_unlock(&dev_priv->rps.hw_lock);
2983 3497
2984 return 0; 3498 return 0;
@@ -2989,6 +3503,7 @@ i915_max_freq_set(void *data, u64 val)
2989{ 3503{
2990 struct drm_device *dev = data; 3504 struct drm_device *dev = data;
2991 struct drm_i915_private *dev_priv = dev->dev_private; 3505 struct drm_i915_private *dev_priv = dev->dev_private;
3506 u32 rp_state_cap, hw_max, hw_min;
2992 int ret; 3507 int ret;
2993 3508
2994 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 3509 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
@@ -3007,14 +3522,29 @@ i915_max_freq_set(void *data, u64 val)
3007 */ 3522 */
3008 if (IS_VALLEYVIEW(dev)) { 3523 if (IS_VALLEYVIEW(dev)) {
3009 val = vlv_freq_opcode(dev_priv, val); 3524 val = vlv_freq_opcode(dev_priv, val);
3010 dev_priv->rps.max_delay = val; 3525
3011 valleyview_set_rps(dev, val); 3526 hw_max = valleyview_rps_max_freq(dev_priv);
3527 hw_min = valleyview_rps_min_freq(dev_priv);
3012 } else { 3528 } else {
3013 do_div(val, GT_FREQUENCY_MULTIPLIER); 3529 do_div(val, GT_FREQUENCY_MULTIPLIER);
3014 dev_priv->rps.max_delay = val; 3530
3015 gen6_set_rps(dev, val); 3531 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
3532 hw_max = dev_priv->rps.max_freq;
3533 hw_min = (rp_state_cap >> 16) & 0xff;
3534 }
3535
3536 if (val < hw_min || val > hw_max || val < dev_priv->rps.min_freq_softlimit) {
3537 mutex_unlock(&dev_priv->rps.hw_lock);
3538 return -EINVAL;
3016 } 3539 }
3017 3540
3541 dev_priv->rps.max_freq_softlimit = val;
3542
3543 if (IS_VALLEYVIEW(dev))
3544 valleyview_set_rps(dev, val);
3545 else
3546 gen6_set_rps(dev, val);
3547
3018 mutex_unlock(&dev_priv->rps.hw_lock); 3548 mutex_unlock(&dev_priv->rps.hw_lock);
3019 3549
3020 return 0; 3550 return 0;
@@ -3028,7 +3558,7 @@ static int
3028i915_min_freq_get(void *data, u64 *val) 3558i915_min_freq_get(void *data, u64 *val)
3029{ 3559{
3030 struct drm_device *dev = data; 3560 struct drm_device *dev = data;
3031 drm_i915_private_t *dev_priv = dev->dev_private; 3561 struct drm_i915_private *dev_priv = dev->dev_private;
3032 int ret; 3562 int ret;
3033 3563
3034 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 3564 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
@@ -3041,9 +3571,9 @@ i915_min_freq_get(void *data, u64 *val)
3041 return ret; 3571 return ret;
3042 3572
3043 if (IS_VALLEYVIEW(dev)) 3573 if (IS_VALLEYVIEW(dev))
3044 *val = vlv_gpu_freq(dev_priv, dev_priv->rps.min_delay); 3574 *val = vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit);
3045 else 3575 else
3046 *val = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER; 3576 *val = dev_priv->rps.min_freq_softlimit * GT_FREQUENCY_MULTIPLIER;
3047 mutex_unlock(&dev_priv->rps.hw_lock); 3577 mutex_unlock(&dev_priv->rps.hw_lock);
3048 3578
3049 return 0; 3579 return 0;
@@ -3054,6 +3584,7 @@ i915_min_freq_set(void *data, u64 val)
3054{ 3584{
3055 struct drm_device *dev = data; 3585 struct drm_device *dev = data;
3056 struct drm_i915_private *dev_priv = dev->dev_private; 3586 struct drm_i915_private *dev_priv = dev->dev_private;
3587 u32 rp_state_cap, hw_max, hw_min;
3057 int ret; 3588 int ret;
3058 3589
3059 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 3590 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
@@ -3072,13 +3603,29 @@ i915_min_freq_set(void *data, u64 val)
3072 */ 3603 */
3073 if (IS_VALLEYVIEW(dev)) { 3604 if (IS_VALLEYVIEW(dev)) {
3074 val = vlv_freq_opcode(dev_priv, val); 3605 val = vlv_freq_opcode(dev_priv, val);
3075 dev_priv->rps.min_delay = val; 3606
3076 valleyview_set_rps(dev, val); 3607 hw_max = valleyview_rps_max_freq(dev_priv);
3608 hw_min = valleyview_rps_min_freq(dev_priv);
3077 } else { 3609 } else {
3078 do_div(val, GT_FREQUENCY_MULTIPLIER); 3610 do_div(val, GT_FREQUENCY_MULTIPLIER);
3079 dev_priv->rps.min_delay = val; 3611
3080 gen6_set_rps(dev, val); 3612 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
3613 hw_max = dev_priv->rps.max_freq;
3614 hw_min = (rp_state_cap >> 16) & 0xff;
3615 }
3616
3617 if (val < hw_min || val > hw_max || val > dev_priv->rps.max_freq_softlimit) {
3618 mutex_unlock(&dev_priv->rps.hw_lock);
3619 return -EINVAL;
3081 } 3620 }
3621
3622 dev_priv->rps.min_freq_softlimit = val;
3623
3624 if (IS_VALLEYVIEW(dev))
3625 valleyview_set_rps(dev, val);
3626 else
3627 gen6_set_rps(dev, val);
3628
3082 mutex_unlock(&dev_priv->rps.hw_lock); 3629 mutex_unlock(&dev_priv->rps.hw_lock);
3083 3630
3084 return 0; 3631 return 0;
@@ -3092,7 +3639,7 @@ static int
3092i915_cache_sharing_get(void *data, u64 *val) 3639i915_cache_sharing_get(void *data, u64 *val)
3093{ 3640{
3094 struct drm_device *dev = data; 3641 struct drm_device *dev = data;
3095 drm_i915_private_t *dev_priv = dev->dev_private; 3642 struct drm_i915_private *dev_priv = dev->dev_private;
3096 u32 snpcr; 3643 u32 snpcr;
3097 int ret; 3644 int ret;
3098 3645
@@ -3152,7 +3699,6 @@ static int i915_forcewake_open(struct inode *inode, struct file *file)
3152 if (INTEL_INFO(dev)->gen < 6) 3699 if (INTEL_INFO(dev)->gen < 6)
3153 return 0; 3700 return 0;
3154 3701
3155 intel_runtime_pm_get(dev_priv);
3156 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL); 3702 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
3157 3703
3158 return 0; 3704 return 0;
@@ -3167,7 +3713,6 @@ static int i915_forcewake_release(struct inode *inode, struct file *file)
3167 return 0; 3713 return 0;
3168 3714
3169 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL); 3715 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
3170 intel_runtime_pm_put(dev_priv);
3171 3716
3172 return 0; 3717 return 0;
3173} 3718}
@@ -3248,9 +3793,11 @@ static const struct drm_info_list i915_debugfs_list[] = {
3248 {"i915_dpio", i915_dpio_info, 0}, 3793 {"i915_dpio", i915_dpio_info, 0},
3249 {"i915_llc", i915_llc, 0}, 3794 {"i915_llc", i915_llc, 0},
3250 {"i915_edp_psr_status", i915_edp_psr_status, 0}, 3795 {"i915_edp_psr_status", i915_edp_psr_status, 0},
3796 {"i915_sink_crc_eDP1", i915_sink_crc, 0},
3251 {"i915_energy_uJ", i915_energy_uJ, 0}, 3797 {"i915_energy_uJ", i915_energy_uJ, 0},
3252 {"i915_pc8_status", i915_pc8_status, 0}, 3798 {"i915_pc8_status", i915_pc8_status, 0},
3253 {"i915_power_domain_info", i915_power_domain_info, 0}, 3799 {"i915_power_domain_info", i915_power_domain_info, 0},
3800 {"i915_display_info", i915_display_info, 0},
3254}; 3801};
3255#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) 3802#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
3256 3803
@@ -3269,6 +3816,9 @@ static const struct i915_debugfs_files {
3269 {"i915_error_state", &i915_error_state_fops}, 3816 {"i915_error_state", &i915_error_state_fops},
3270 {"i915_next_seqno", &i915_next_seqno_fops}, 3817 {"i915_next_seqno", &i915_next_seqno_fops},
3271 {"i915_display_crc_ctl", &i915_display_crc_ctl_fops}, 3818 {"i915_display_crc_ctl", &i915_display_crc_ctl_fops},
3819 {"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
3820 {"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
3821 {"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
3272}; 3822};
3273 3823
3274void intel_display_crc_init(struct drm_device *dev) 3824void intel_display_crc_init(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 15a74f979b4b..96177eec0a0e 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -82,7 +82,7 @@ intel_read_legacy_status_page(struct drm_i915_private *dev_priv, int reg)
82 82
83void i915_update_dri1_breadcrumb(struct drm_device *dev) 83void i915_update_dri1_breadcrumb(struct drm_device *dev)
84{ 84{
85 drm_i915_private_t *dev_priv = dev->dev_private; 85 struct drm_i915_private *dev_priv = dev->dev_private;
86 struct drm_i915_master_private *master_priv; 86 struct drm_i915_master_private *master_priv;
87 87
88 /* 88 /*
@@ -103,7 +103,7 @@ void i915_update_dri1_breadcrumb(struct drm_device *dev)
103 103
104static void i915_write_hws_pga(struct drm_device *dev) 104static void i915_write_hws_pga(struct drm_device *dev)
105{ 105{
106 drm_i915_private_t *dev_priv = dev->dev_private; 106 struct drm_i915_private *dev_priv = dev->dev_private;
107 u32 addr; 107 u32 addr;
108 108
109 addr = dev_priv->status_page_dmah->busaddr; 109 addr = dev_priv->status_page_dmah->busaddr;
@@ -118,7 +118,7 @@ static void i915_write_hws_pga(struct drm_device *dev)
118 */ 118 */
119static void i915_free_hws(struct drm_device *dev) 119static void i915_free_hws(struct drm_device *dev)
120{ 120{
121 drm_i915_private_t *dev_priv = dev->dev_private; 121 struct drm_i915_private *dev_priv = dev->dev_private;
122 struct intel_ring_buffer *ring = LP_RING(dev_priv); 122 struct intel_ring_buffer *ring = LP_RING(dev_priv);
123 123
124 if (dev_priv->status_page_dmah) { 124 if (dev_priv->status_page_dmah) {
@@ -137,7 +137,7 @@ static void i915_free_hws(struct drm_device *dev)
137 137
138void i915_kernel_lost_context(struct drm_device * dev) 138void i915_kernel_lost_context(struct drm_device * dev)
139{ 139{
140 drm_i915_private_t *dev_priv = dev->dev_private; 140 struct drm_i915_private *dev_priv = dev->dev_private;
141 struct drm_i915_master_private *master_priv; 141 struct drm_i915_master_private *master_priv;
142 struct intel_ring_buffer *ring = LP_RING(dev_priv); 142 struct intel_ring_buffer *ring = LP_RING(dev_priv);
143 143
@@ -164,7 +164,7 @@ void i915_kernel_lost_context(struct drm_device * dev)
164 164
165static int i915_dma_cleanup(struct drm_device * dev) 165static int i915_dma_cleanup(struct drm_device * dev)
166{ 166{
167 drm_i915_private_t *dev_priv = dev->dev_private; 167 struct drm_i915_private *dev_priv = dev->dev_private;
168 int i; 168 int i;
169 169
170 /* Make sure interrupts are disabled here because the uninstall ioctl 170 /* Make sure interrupts are disabled here because the uninstall ioctl
@@ -188,7 +188,7 @@ static int i915_dma_cleanup(struct drm_device * dev)
188 188
189static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) 189static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
190{ 190{
191 drm_i915_private_t *dev_priv = dev->dev_private; 191 struct drm_i915_private *dev_priv = dev->dev_private;
192 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 192 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
193 int ret; 193 int ret;
194 194
@@ -233,7 +233,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
233 233
234static int i915_dma_resume(struct drm_device * dev) 234static int i915_dma_resume(struct drm_device * dev)
235{ 235{
236 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 236 struct drm_i915_private *dev_priv = dev->dev_private;
237 struct intel_ring_buffer *ring = LP_RING(dev_priv); 237 struct intel_ring_buffer *ring = LP_RING(dev_priv);
238 238
239 DRM_DEBUG_DRIVER("%s\n", __func__); 239 DRM_DEBUG_DRIVER("%s\n", __func__);
@@ -357,7 +357,7 @@ static int validate_cmd(int cmd)
357 357
358static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords) 358static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords)
359{ 359{
360 drm_i915_private_t *dev_priv = dev->dev_private; 360 struct drm_i915_private *dev_priv = dev->dev_private;
361 int i, ret; 361 int i, ret;
362 362
363 if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->size - 8) 363 if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->size - 8)
@@ -431,7 +431,7 @@ i915_emit_box(struct drm_device *dev,
431 431
432static void i915_emit_breadcrumb(struct drm_device *dev) 432static void i915_emit_breadcrumb(struct drm_device *dev)
433{ 433{
434 drm_i915_private_t *dev_priv = dev->dev_private; 434 struct drm_i915_private *dev_priv = dev->dev_private;
435 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 435 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
436 436
437 dev_priv->dri1.counter++; 437 dev_priv->dri1.counter++;
@@ -547,7 +547,7 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
547 547
548static int i915_dispatch_flip(struct drm_device * dev) 548static int i915_dispatch_flip(struct drm_device * dev)
549{ 549{
550 drm_i915_private_t *dev_priv = dev->dev_private; 550 struct drm_i915_private *dev_priv = dev->dev_private;
551 struct drm_i915_master_private *master_priv = 551 struct drm_i915_master_private *master_priv =
552 dev->primary->master->driver_priv; 552 dev->primary->master->driver_priv;
553 int ret; 553 int ret;
@@ -625,10 +625,9 @@ static int i915_flush_ioctl(struct drm_device *dev, void *data,
625static int i915_batchbuffer(struct drm_device *dev, void *data, 625static int i915_batchbuffer(struct drm_device *dev, void *data,
626 struct drm_file *file_priv) 626 struct drm_file *file_priv)
627{ 627{
628 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 628 struct drm_i915_private *dev_priv = dev->dev_private;
629 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 629 struct drm_i915_master_private *master_priv;
630 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) 630 drm_i915_sarea_t *sarea_priv;
631 master_priv->sarea_priv;
632 drm_i915_batchbuffer_t *batch = data; 631 drm_i915_batchbuffer_t *batch = data;
633 int ret; 632 int ret;
634 struct drm_clip_rect *cliprects = NULL; 633 struct drm_clip_rect *cliprects = NULL;
@@ -636,6 +635,9 @@ static int i915_batchbuffer(struct drm_device *dev, void *data,
636 if (drm_core_check_feature(dev, DRIVER_MODESET)) 635 if (drm_core_check_feature(dev, DRIVER_MODESET))
637 return -ENODEV; 636 return -ENODEV;
638 637
638 master_priv = dev->primary->master->driver_priv;
639 sarea_priv = (drm_i915_sarea_t *) master_priv->sarea_priv;
640
639 if (!dev_priv->dri1.allow_batchbuffer) { 641 if (!dev_priv->dri1.allow_batchbuffer) {
640 DRM_ERROR("Batchbuffer ioctl disabled\n"); 642 DRM_ERROR("Batchbuffer ioctl disabled\n");
641 return -EINVAL; 643 return -EINVAL;
@@ -681,10 +683,9 @@ fail_free:
681static int i915_cmdbuffer(struct drm_device *dev, void *data, 683static int i915_cmdbuffer(struct drm_device *dev, void *data,
682 struct drm_file *file_priv) 684 struct drm_file *file_priv)
683{ 685{
684 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 686 struct drm_i915_private *dev_priv = dev->dev_private;
685 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 687 struct drm_i915_master_private *master_priv;
686 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) 688 drm_i915_sarea_t *sarea_priv;
687 master_priv->sarea_priv;
688 drm_i915_cmdbuffer_t *cmdbuf = data; 689 drm_i915_cmdbuffer_t *cmdbuf = data;
689 struct drm_clip_rect *cliprects = NULL; 690 struct drm_clip_rect *cliprects = NULL;
690 void *batch_data; 691 void *batch_data;
@@ -696,6 +697,9 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
696 if (drm_core_check_feature(dev, DRIVER_MODESET)) 697 if (drm_core_check_feature(dev, DRIVER_MODESET))
697 return -ENODEV; 698 return -ENODEV;
698 699
700 master_priv = dev->primary->master->driver_priv;
701 sarea_priv = (drm_i915_sarea_t *) master_priv->sarea_priv;
702
699 RING_LOCK_TEST_WITH_RETURN(dev, file_priv); 703 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
700 704
701 if (cmdbuf->num_cliprects < 0) 705 if (cmdbuf->num_cliprects < 0)
@@ -749,7 +753,7 @@ fail_batch_free:
749 753
750static int i915_emit_irq(struct drm_device * dev) 754static int i915_emit_irq(struct drm_device * dev)
751{ 755{
752 drm_i915_private_t *dev_priv = dev->dev_private; 756 struct drm_i915_private *dev_priv = dev->dev_private;
753 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 757 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
754 758
755 i915_kernel_lost_context(dev); 759 i915_kernel_lost_context(dev);
@@ -775,7 +779,7 @@ static int i915_emit_irq(struct drm_device * dev)
775 779
776static int i915_wait_irq(struct drm_device * dev, int irq_nr) 780static int i915_wait_irq(struct drm_device * dev, int irq_nr)
777{ 781{
778 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 782 struct drm_i915_private *dev_priv = dev->dev_private;
779 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 783 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
780 int ret = 0; 784 int ret = 0;
781 struct intel_ring_buffer *ring = LP_RING(dev_priv); 785 struct intel_ring_buffer *ring = LP_RING(dev_priv);
@@ -812,7 +816,7 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
812static int i915_irq_emit(struct drm_device *dev, void *data, 816static int i915_irq_emit(struct drm_device *dev, void *data,
813 struct drm_file *file_priv) 817 struct drm_file *file_priv)
814{ 818{
815 drm_i915_private_t *dev_priv = dev->dev_private; 819 struct drm_i915_private *dev_priv = dev->dev_private;
816 drm_i915_irq_emit_t *emit = data; 820 drm_i915_irq_emit_t *emit = data;
817 int result; 821 int result;
818 822
@@ -843,7 +847,7 @@ static int i915_irq_emit(struct drm_device *dev, void *data,
843static int i915_irq_wait(struct drm_device *dev, void *data, 847static int i915_irq_wait(struct drm_device *dev, void *data,
844 struct drm_file *file_priv) 848 struct drm_file *file_priv)
845{ 849{
846 drm_i915_private_t *dev_priv = dev->dev_private; 850 struct drm_i915_private *dev_priv = dev->dev_private;
847 drm_i915_irq_wait_t *irqwait = data; 851 drm_i915_irq_wait_t *irqwait = data;
848 852
849 if (drm_core_check_feature(dev, DRIVER_MODESET)) 853 if (drm_core_check_feature(dev, DRIVER_MODESET))
@@ -860,7 +864,7 @@ static int i915_irq_wait(struct drm_device *dev, void *data,
860static int i915_vblank_pipe_get(struct drm_device *dev, void *data, 864static int i915_vblank_pipe_get(struct drm_device *dev, void *data,
861 struct drm_file *file_priv) 865 struct drm_file *file_priv)
862{ 866{
863 drm_i915_private_t *dev_priv = dev->dev_private; 867 struct drm_i915_private *dev_priv = dev->dev_private;
864 drm_i915_vblank_pipe_t *pipe = data; 868 drm_i915_vblank_pipe_t *pipe = data;
865 869
866 if (drm_core_check_feature(dev, DRIVER_MODESET)) 870 if (drm_core_check_feature(dev, DRIVER_MODESET))
@@ -921,7 +925,7 @@ static int i915_flip_bufs(struct drm_device *dev, void *data,
921static int i915_getparam(struct drm_device *dev, void *data, 925static int i915_getparam(struct drm_device *dev, void *data,
922 struct drm_file *file_priv) 926 struct drm_file *file_priv)
923{ 927{
924 drm_i915_private_t *dev_priv = dev->dev_private; 928 struct drm_i915_private *dev_priv = dev->dev_private;
925 drm_i915_getparam_t *param = data; 929 drm_i915_getparam_t *param = data;
926 int value; 930 int value;
927 931
@@ -990,7 +994,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
990 value = HAS_WT(dev); 994 value = HAS_WT(dev);
991 break; 995 break;
992 case I915_PARAM_HAS_ALIASING_PPGTT: 996 case I915_PARAM_HAS_ALIASING_PPGTT:
993 value = dev_priv->mm.aliasing_ppgtt ? 1 : 0; 997 value = dev_priv->mm.aliasing_ppgtt || USES_FULL_PPGTT(dev);
994 break; 998 break;
995 case I915_PARAM_HAS_WAIT_TIMEOUT: 999 case I915_PARAM_HAS_WAIT_TIMEOUT:
996 value = 1; 1000 value = 1;
@@ -1029,7 +1033,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
1029static int i915_setparam(struct drm_device *dev, void *data, 1033static int i915_setparam(struct drm_device *dev, void *data,
1030 struct drm_file *file_priv) 1034 struct drm_file *file_priv)
1031{ 1035{
1032 drm_i915_private_t *dev_priv = dev->dev_private; 1036 struct drm_i915_private *dev_priv = dev->dev_private;
1033 drm_i915_setparam_t *param = data; 1037 drm_i915_setparam_t *param = data;
1034 1038
1035 if (!dev_priv) { 1039 if (!dev_priv) {
@@ -1064,7 +1068,7 @@ static int i915_setparam(struct drm_device *dev, void *data,
1064static int i915_set_status_page(struct drm_device *dev, void *data, 1068static int i915_set_status_page(struct drm_device *dev, void *data,
1065 struct drm_file *file_priv) 1069 struct drm_file *file_priv)
1066{ 1070{
1067 drm_i915_private_t *dev_priv = dev->dev_private; 1071 struct drm_i915_private *dev_priv = dev->dev_private;
1068 drm_i915_hws_addr_t *hws = data; 1072 drm_i915_hws_addr_t *hws = data;
1069 struct intel_ring_buffer *ring; 1073 struct intel_ring_buffer *ring;
1070 1074
@@ -1132,7 +1136,7 @@ static int i915_get_bridge_dev(struct drm_device *dev)
1132static int 1136static int
1133intel_alloc_mchbar_resource(struct drm_device *dev) 1137intel_alloc_mchbar_resource(struct drm_device *dev)
1134{ 1138{
1135 drm_i915_private_t *dev_priv = dev->dev_private; 1139 struct drm_i915_private *dev_priv = dev->dev_private;
1136 int reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; 1140 int reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
1137 u32 temp_lo, temp_hi = 0; 1141 u32 temp_lo, temp_hi = 0;
1138 u64 mchbar_addr; 1142 u64 mchbar_addr;
@@ -1178,11 +1182,14 @@ intel_alloc_mchbar_resource(struct drm_device *dev)
1178static void 1182static void
1179intel_setup_mchbar(struct drm_device *dev) 1183intel_setup_mchbar(struct drm_device *dev)
1180{ 1184{
1181 drm_i915_private_t *dev_priv = dev->dev_private; 1185 struct drm_i915_private *dev_priv = dev->dev_private;
1182 int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; 1186 int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
1183 u32 temp; 1187 u32 temp;
1184 bool enabled; 1188 bool enabled;
1185 1189
1190 if (IS_VALLEYVIEW(dev))
1191 return;
1192
1186 dev_priv->mchbar_need_disable = false; 1193 dev_priv->mchbar_need_disable = false;
1187 1194
1188 if (IS_I915G(dev) || IS_I915GM(dev)) { 1195 if (IS_I915G(dev) || IS_I915GM(dev)) {
@@ -1215,7 +1222,7 @@ intel_setup_mchbar(struct drm_device *dev)
1215static void 1222static void
1216intel_teardown_mchbar(struct drm_device *dev) 1223intel_teardown_mchbar(struct drm_device *dev)
1217{ 1224{
1218 drm_i915_private_t *dev_priv = dev->dev_private; 1225 struct drm_i915_private *dev_priv = dev->dev_private;
1219 int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; 1226 int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
1220 u32 temp; 1227 u32 temp;
1221 1228
@@ -1317,12 +1324,12 @@ static int i915_load_modeset_init(struct drm_device *dev)
1317 if (ret) 1324 if (ret)
1318 goto cleanup_vga_switcheroo; 1325 goto cleanup_vga_switcheroo;
1319 1326
1327 intel_power_domains_init_hw(dev_priv);
1328
1320 ret = drm_irq_install(dev); 1329 ret = drm_irq_install(dev);
1321 if (ret) 1330 if (ret)
1322 goto cleanup_gem_stolen; 1331 goto cleanup_gem_stolen;
1323 1332
1324 intel_power_domains_init_hw(dev);
1325
1326 /* Important: The output setup functions called by modeset_init need 1333 /* Important: The output setup functions called by modeset_init need
1327 * working irqs for e.g. gmbus and dp aux transfers. */ 1334 * working irqs for e.g. gmbus and dp aux transfers. */
1328 intel_modeset_init(dev); 1335 intel_modeset_init(dev);
@@ -1339,7 +1346,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
1339 /* FIXME: do pre/post-mode set stuff in core KMS code */ 1346 /* FIXME: do pre/post-mode set stuff in core KMS code */
1340 dev->vblank_disable_allowed = true; 1347 dev->vblank_disable_allowed = true;
1341 if (INTEL_INFO(dev)->num_pipes == 0) { 1348 if (INTEL_INFO(dev)->num_pipes == 0) {
1342 intel_display_power_put(dev, POWER_DOMAIN_VGA); 1349 intel_display_power_put(dev_priv, POWER_DOMAIN_VGA);
1343 return 0; 1350 return 0;
1344 } 1351 }
1345 1352
@@ -1374,10 +1381,10 @@ cleanup_gem:
1374 i915_gem_cleanup_ringbuffer(dev); 1381 i915_gem_cleanup_ringbuffer(dev);
1375 i915_gem_context_fini(dev); 1382 i915_gem_context_fini(dev);
1376 mutex_unlock(&dev->struct_mutex); 1383 mutex_unlock(&dev->struct_mutex);
1377 i915_gem_cleanup_aliasing_ppgtt(dev); 1384 WARN_ON(dev_priv->mm.aliasing_ppgtt);
1378 drm_mm_takedown(&dev_priv->gtt.base.mm); 1385 drm_mm_takedown(&dev_priv->gtt.base.mm);
1379cleanup_power: 1386cleanup_power:
1380 intel_display_power_put(dev, POWER_DOMAIN_VGA); 1387 intel_display_power_put(dev_priv, POWER_DOMAIN_VGA);
1381 drm_irq_uninstall(dev); 1388 drm_irq_uninstall(dev);
1382cleanup_gem_stolen: 1389cleanup_gem_stolen:
1383 i915_gem_cleanup_stolen(dev); 1390 i915_gem_cleanup_stolen(dev);
@@ -1442,7 +1449,7 @@ static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
1442 1449
1443static void i915_dump_device_info(struct drm_i915_private *dev_priv) 1450static void i915_dump_device_info(struct drm_i915_private *dev_priv)
1444{ 1451{
1445 const struct intel_device_info *info = dev_priv->info; 1452 const struct intel_device_info *info = &dev_priv->info;
1446 1453
1447#define PRINT_S(name) "%s" 1454#define PRINT_S(name) "%s"
1448#define SEP_EMPTY 1455#define SEP_EMPTY
@@ -1459,6 +1466,62 @@ static void i915_dump_device_info(struct drm_i915_private *dev_priv)
1459#undef SEP_COMMA 1466#undef SEP_COMMA
1460} 1467}
1461 1468
1469/*
1470 * Determine various intel_device_info fields at runtime.
1471 *
1472 * Use it when either:
1473 * - it's judged too laborious to fill n static structures with the limit
1474 * when a simple if statement does the job,
1475 * - run-time checks (eg read fuse/strap registers) are needed.
1476 *
1477 * This function needs to be called:
1478 * - after the MMIO has been setup as we are reading registers,
1479 * - after the PCH has been detected,
1480 * - before the first usage of the fields it can tweak.
1481 */
1482static void intel_device_info_runtime_init(struct drm_device *dev)
1483{
1484 struct drm_i915_private *dev_priv = dev->dev_private;
1485 struct intel_device_info *info;
1486 enum pipe pipe;
1487
1488 info = (struct intel_device_info *)&dev_priv->info;
1489
1490 if (IS_VALLEYVIEW(dev))
1491 for_each_pipe(pipe)
1492 info->num_sprites[pipe] = 2;
1493 else
1494 for_each_pipe(pipe)
1495 info->num_sprites[pipe] = 1;
1496
1497 if (i915.disable_display) {
1498 DRM_INFO("Display disabled (module parameter)\n");
1499 info->num_pipes = 0;
1500 } else if (info->num_pipes > 0 &&
1501 (INTEL_INFO(dev)->gen == 7 || INTEL_INFO(dev)->gen == 8) &&
1502 !IS_VALLEYVIEW(dev)) {
1503 u32 fuse_strap = I915_READ(FUSE_STRAP);
1504 u32 sfuse_strap = I915_READ(SFUSE_STRAP);
1505
1506 /*
1507 * SFUSE_STRAP is supposed to have a bit signalling the display
1508 * is fused off. Unfortunately it seems that, at least in
1509 * certain cases, fused off display means that PCH display
1510 * reads don't land anywhere. In that case, we read 0s.
1511 *
1512 * On CPT/PPT, we can detect this case as SFUSE_STRAP_FUSE_LOCK
1513 * should be set when taking over after the firmware.
1514 */
1515 if (fuse_strap & ILK_INTERNAL_DISPLAY_DISABLE ||
1516 sfuse_strap & SFUSE_STRAP_DISPLAY_DISABLED ||
1517 (dev_priv->pch_type == PCH_CPT &&
1518 !(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) {
1519 DRM_INFO("Display fused off, disabling\n");
1520 info->num_pipes = 0;
1521 }
1522 }
1523}
1524
1462/** 1525/**
1463 * i915_driver_load - setup chip and create an initial config 1526 * i915_driver_load - setup chip and create an initial config
1464 * @dev: DRM device 1527 * @dev: DRM device
@@ -1473,7 +1536,7 @@ static void i915_dump_device_info(struct drm_i915_private *dev_priv)
1473int i915_driver_load(struct drm_device *dev, unsigned long flags) 1536int i915_driver_load(struct drm_device *dev, unsigned long flags)
1474{ 1537{
1475 struct drm_i915_private *dev_priv; 1538 struct drm_i915_private *dev_priv;
1476 struct intel_device_info *info; 1539 struct intel_device_info *info, *device_info;
1477 int ret = 0, mmio_bar, mmio_size; 1540 int ret = 0, mmio_bar, mmio_size;
1478 uint32_t aperture_size; 1541 uint32_t aperture_size;
1479 1542
@@ -1496,7 +1559,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1496 1559
1497 dev->dev_private = (void *)dev_priv; 1560 dev->dev_private = (void *)dev_priv;
1498 dev_priv->dev = dev; 1561 dev_priv->dev = dev;
1499 dev_priv->info = info; 1562
1563 /* copy initial configuration to dev_priv->info */
1564 device_info = (struct intel_device_info *)&dev_priv->info;
1565 *device_info = *info;
1500 1566
1501 spin_lock_init(&dev_priv->irq_lock); 1567 spin_lock_init(&dev_priv->irq_lock);
1502 spin_lock_init(&dev_priv->gpu_error.lock); 1568 spin_lock_init(&dev_priv->gpu_error.lock);
@@ -1545,8 +1611,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1545 goto put_bridge; 1611 goto put_bridge;
1546 } 1612 }
1547 1613
1548 intel_uncore_early_sanitize(dev);
1549
1550 /* This must be called before any calls to HAS_PCH_* */ 1614 /* This must be called before any calls to HAS_PCH_* */
1551 intel_detect_pch(dev); 1615 intel_detect_pch(dev);
1552 1616
@@ -1635,9 +1699,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1635 if (!IS_I945G(dev) && !IS_I945GM(dev)) 1699 if (!IS_I945G(dev) && !IS_I945GM(dev))
1636 pci_enable_msi(dev->pdev); 1700 pci_enable_msi(dev->pdev);
1637 1701
1638 dev_priv->num_plane = 1; 1702 intel_device_info_runtime_init(dev);
1639 if (IS_VALLEYVIEW(dev))
1640 dev_priv->num_plane = 2;
1641 1703
1642 if (INTEL_INFO(dev)->num_pipes) { 1704 if (INTEL_INFO(dev)->num_pipes) {
1643 ret = drm_vblank_init(dev, INTEL_INFO(dev)->num_pipes); 1705 ret = drm_vblank_init(dev, INTEL_INFO(dev)->num_pipes);
@@ -1645,7 +1707,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1645 goto out_gem_unload; 1707 goto out_gem_unload;
1646 } 1708 }
1647 1709
1648 intel_power_domains_init(dev); 1710 intel_power_domains_init(dev_priv);
1649 1711
1650 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 1712 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1651 ret = i915_load_modeset_init(dev); 1713 ret = i915_load_modeset_init(dev);
@@ -1674,7 +1736,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1674 return 0; 1736 return 0;
1675 1737
1676out_power_well: 1738out_power_well:
1677 intel_power_domains_remove(dev); 1739 intel_power_domains_remove(dev_priv);
1678 drm_vblank_cleanup(dev); 1740 drm_vblank_cleanup(dev);
1679out_gem_unload: 1741out_gem_unload:
1680 if (dev_priv->mm.inactive_shrinker.scan_objects) 1742 if (dev_priv->mm.inactive_shrinker.scan_objects)
@@ -1724,8 +1786,8 @@ int i915_driver_unload(struct drm_device *dev)
1724 /* The i915.ko module is still not prepared to be loaded when 1786 /* The i915.ko module is still not prepared to be loaded when
1725 * the power well is not enabled, so just enable it in case 1787 * the power well is not enabled, so just enable it in case
1726 * we're going to unload/reload. */ 1788 * we're going to unload/reload. */
1727 intel_display_set_init_power(dev, true); 1789 intel_display_set_init_power(dev_priv, true);
1728 intel_power_domains_remove(dev); 1790 intel_power_domains_remove(dev_priv);
1729 1791
1730 i915_teardown_sysfs(dev); 1792 i915_teardown_sysfs(dev);
1731 1793
@@ -1761,8 +1823,6 @@ int i915_driver_unload(struct drm_device *dev)
1761 cancel_work_sync(&dev_priv->gpu_error.work); 1823 cancel_work_sync(&dev_priv->gpu_error.work);
1762 i915_destroy_error_state(dev); 1824 i915_destroy_error_state(dev);
1763 1825
1764 cancel_delayed_work_sync(&dev_priv->pc8.enable_work);
1765
1766 if (dev->pdev->msi_enabled) 1826 if (dev->pdev->msi_enabled)
1767 pci_disable_msi(dev->pdev); 1827 pci_disable_msi(dev->pdev);
1768 1828
@@ -1776,8 +1836,8 @@ int i915_driver_unload(struct drm_device *dev)
1776 i915_gem_free_all_phys_object(dev); 1836 i915_gem_free_all_phys_object(dev);
1777 i915_gem_cleanup_ringbuffer(dev); 1837 i915_gem_cleanup_ringbuffer(dev);
1778 i915_gem_context_fini(dev); 1838 i915_gem_context_fini(dev);
1839 WARN_ON(dev_priv->mm.aliasing_ppgtt);
1779 mutex_unlock(&dev->struct_mutex); 1840 mutex_unlock(&dev->struct_mutex);
1780 i915_gem_cleanup_aliasing_ppgtt(dev);
1781 i915_gem_cleanup_stolen(dev); 1841 i915_gem_cleanup_stolen(dev);
1782 1842
1783 if (!I915_NEED_GFX_HWS(dev)) 1843 if (!I915_NEED_GFX_HWS(dev))
@@ -1835,7 +1895,7 @@ int i915_driver_open(struct drm_device *dev, struct drm_file *file)
1835 */ 1895 */
1836void i915_driver_lastclose(struct drm_device * dev) 1896void i915_driver_lastclose(struct drm_device * dev)
1837{ 1897{
1838 drm_i915_private_t *dev_priv = dev->dev_private; 1898 struct drm_i915_private *dev_priv = dev->dev_private;
1839 1899
1840 /* On gen6+ we refuse to init without kms enabled, but then the drm core 1900 /* On gen6+ we refuse to init without kms enabled, but then the drm core
1841 * goes right around and calls lastclose. Check for this and don't clean 1901 * goes right around and calls lastclose. Check for this and don't clean
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index ec7bb0fc71bc..82f4d1f47d3b 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -38,134 +38,30 @@
38#include <linux/module.h> 38#include <linux/module.h>
39#include <drm/drm_crtc_helper.h> 39#include <drm/drm_crtc_helper.h>
40 40
41static int i915_modeset __read_mostly = -1;
42module_param_named(modeset, i915_modeset, int, 0400);
43MODULE_PARM_DESC(modeset,
44 "Use kernel modesetting [KMS] (0=DRM_I915_KMS from .config, "
45 "1=on, -1=force vga console preference [default])");
46
47unsigned int i915_fbpercrtc __always_unused = 0;
48module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400);
49
50int i915_panel_ignore_lid __read_mostly = 1;
51module_param_named(panel_ignore_lid, i915_panel_ignore_lid, int, 0600);
52MODULE_PARM_DESC(panel_ignore_lid,
53 "Override lid status (0=autodetect, 1=autodetect disabled [default], "
54 "-1=force lid closed, -2=force lid open)");
55
56unsigned int i915_powersave __read_mostly = 1;
57module_param_named(powersave, i915_powersave, int, 0600);
58MODULE_PARM_DESC(powersave,
59 "Enable powersavings, fbc, downclocking, etc. (default: true)");
60
61int i915_semaphores __read_mostly = -1;
62module_param_named(semaphores, i915_semaphores, int, 0400);
63MODULE_PARM_DESC(semaphores,
64 "Use semaphores for inter-ring sync (default: -1 (use per-chip defaults))");
65
66int i915_enable_rc6 __read_mostly = -1;
67module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0400);
68MODULE_PARM_DESC(i915_enable_rc6,
69 "Enable power-saving render C-state 6. "
70 "Different stages can be selected via bitmask values "
71 "(0 = disable; 1 = enable rc6; 2 = enable deep rc6; 4 = enable deepest rc6). "
72 "For example, 3 would enable rc6 and deep rc6, and 7 would enable everything. "
73 "default: -1 (use per-chip default)");
74
75int i915_enable_fbc __read_mostly = -1;
76module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600);
77MODULE_PARM_DESC(i915_enable_fbc,
78 "Enable frame buffer compression for power savings "
79 "(default: -1 (use per-chip default))");
80
81unsigned int i915_lvds_downclock __read_mostly = 0;
82module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
83MODULE_PARM_DESC(lvds_downclock,
84 "Use panel (LVDS/eDP) downclocking for power savings "
85 "(default: false)");
86
87int i915_lvds_channel_mode __read_mostly;
88module_param_named(lvds_channel_mode, i915_lvds_channel_mode, int, 0600);
89MODULE_PARM_DESC(lvds_channel_mode,
90 "Specify LVDS channel mode "
91 "(0=probe BIOS [default], 1=single-channel, 2=dual-channel)");
92
93int i915_panel_use_ssc __read_mostly = -1;
94module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600);
95MODULE_PARM_DESC(lvds_use_ssc,
96 "Use Spread Spectrum Clock with panels [LVDS/eDP] "
97 "(default: auto from VBT)");
98
99int i915_vbt_sdvo_panel_type __read_mostly = -1;
100module_param_named(vbt_sdvo_panel_type, i915_vbt_sdvo_panel_type, int, 0600);
101MODULE_PARM_DESC(vbt_sdvo_panel_type,
102 "Override/Ignore selection of SDVO panel mode in the VBT "
103 "(-2=ignore, -1=auto [default], index in VBT BIOS table)");
104
105static bool i915_try_reset __read_mostly = true;
106module_param_named(reset, i915_try_reset, bool, 0600);
107MODULE_PARM_DESC(reset, "Attempt GPU resets (default: true)");
108
109bool i915_enable_hangcheck __read_mostly = true;
110module_param_named(enable_hangcheck, i915_enable_hangcheck, bool, 0644);
111MODULE_PARM_DESC(enable_hangcheck,
112 "Periodically check GPU activity for detecting hangs. "
113 "WARNING: Disabling this can cause system wide hangs. "
114 "(default: true)");
115
116int i915_enable_ppgtt __read_mostly = -1;
117module_param_named(i915_enable_ppgtt, i915_enable_ppgtt, int, 0400);
118MODULE_PARM_DESC(i915_enable_ppgtt,
119 "Enable PPGTT (default: true)");
120
121int i915_enable_psr __read_mostly = 0;
122module_param_named(enable_psr, i915_enable_psr, int, 0600);
123MODULE_PARM_DESC(enable_psr, "Enable PSR (default: false)");
124
125unsigned int i915_preliminary_hw_support __read_mostly = IS_ENABLED(CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT);
126module_param_named(preliminary_hw_support, i915_preliminary_hw_support, int, 0600);
127MODULE_PARM_DESC(preliminary_hw_support,
128 "Enable preliminary hardware support.");
129
130int i915_disable_power_well __read_mostly = 1;
131module_param_named(disable_power_well, i915_disable_power_well, int, 0600);
132MODULE_PARM_DESC(disable_power_well,
133 "Disable the power well when possible (default: true)");
134
135int i915_enable_ips __read_mostly = 1;
136module_param_named(enable_ips, i915_enable_ips, int, 0600);
137MODULE_PARM_DESC(enable_ips, "Enable IPS (default: true)");
138
139bool i915_fastboot __read_mostly = 0;
140module_param_named(fastboot, i915_fastboot, bool, 0600);
141MODULE_PARM_DESC(fastboot, "Try to skip unnecessary mode sets at boot time "
142 "(default: false)");
143
144int i915_enable_pc8 __read_mostly = 1;
145module_param_named(enable_pc8, i915_enable_pc8, int, 0600);
146MODULE_PARM_DESC(enable_pc8, "Enable support for low power package C states (PC8+) (default: true)");
147
148int i915_pc8_timeout __read_mostly = 5000;
149module_param_named(pc8_timeout, i915_pc8_timeout, int, 0600);
150MODULE_PARM_DESC(pc8_timeout, "Number of msecs of idleness required to enter PC8+ (default: 5000)");
151
152bool i915_prefault_disable __read_mostly;
153module_param_named(prefault_disable, i915_prefault_disable, bool, 0600);
154MODULE_PARM_DESC(prefault_disable,
155 "Disable page prefaulting for pread/pwrite/reloc (default:false). For developers only.");
156
157static struct drm_driver driver; 41static struct drm_driver driver;
158 42
43#define GEN_DEFAULT_PIPEOFFSETS \
44 .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
45 PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \
46 .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
47 TRANSCODER_C_OFFSET, TRANSCODER_EDP_OFFSET }, \
48 .dpll_offsets = { DPLL_A_OFFSET, DPLL_B_OFFSET }, \
49 .dpll_md_offsets = { DPLL_A_MD_OFFSET, DPLL_B_MD_OFFSET }, \
50 .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET }
51
52
159static const struct intel_device_info intel_i830_info = { 53static const struct intel_device_info intel_i830_info = {
160 .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2, 54 .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
161 .has_overlay = 1, .overlay_needs_physical = 1, 55 .has_overlay = 1, .overlay_needs_physical = 1,
162 .ring_mask = RENDER_RING, 56 .ring_mask = RENDER_RING,
57 GEN_DEFAULT_PIPEOFFSETS,
163}; 58};
164 59
165static const struct intel_device_info intel_845g_info = { 60static const struct intel_device_info intel_845g_info = {
166 .gen = 2, .num_pipes = 1, 61 .gen = 2, .num_pipes = 1,
167 .has_overlay = 1, .overlay_needs_physical = 1, 62 .has_overlay = 1, .overlay_needs_physical = 1,
168 .ring_mask = RENDER_RING, 63 .ring_mask = RENDER_RING,
64 GEN_DEFAULT_PIPEOFFSETS,
169}; 65};
170 66
171static const struct intel_device_info intel_i85x_info = { 67static const struct intel_device_info intel_i85x_info = {
@@ -174,18 +70,21 @@ static const struct intel_device_info intel_i85x_info = {
174 .has_overlay = 1, .overlay_needs_physical = 1, 70 .has_overlay = 1, .overlay_needs_physical = 1,
175 .has_fbc = 1, 71 .has_fbc = 1,
176 .ring_mask = RENDER_RING, 72 .ring_mask = RENDER_RING,
73 GEN_DEFAULT_PIPEOFFSETS,
177}; 74};
178 75
179static const struct intel_device_info intel_i865g_info = { 76static const struct intel_device_info intel_i865g_info = {
180 .gen = 2, .num_pipes = 1, 77 .gen = 2, .num_pipes = 1,
181 .has_overlay = 1, .overlay_needs_physical = 1, 78 .has_overlay = 1, .overlay_needs_physical = 1,
182 .ring_mask = RENDER_RING, 79 .ring_mask = RENDER_RING,
80 GEN_DEFAULT_PIPEOFFSETS,
183}; 81};
184 82
185static const struct intel_device_info intel_i915g_info = { 83static const struct intel_device_info intel_i915g_info = {
186 .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2, 84 .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2,
187 .has_overlay = 1, .overlay_needs_physical = 1, 85 .has_overlay = 1, .overlay_needs_physical = 1,
188 .ring_mask = RENDER_RING, 86 .ring_mask = RENDER_RING,
87 GEN_DEFAULT_PIPEOFFSETS,
189}; 88};
190static const struct intel_device_info intel_i915gm_info = { 89static const struct intel_device_info intel_i915gm_info = {
191 .gen = 3, .is_mobile = 1, .num_pipes = 2, 90 .gen = 3, .is_mobile = 1, .num_pipes = 2,
@@ -194,11 +93,13 @@ static const struct intel_device_info intel_i915gm_info = {
194 .supports_tv = 1, 93 .supports_tv = 1,
195 .has_fbc = 1, 94 .has_fbc = 1,
196 .ring_mask = RENDER_RING, 95 .ring_mask = RENDER_RING,
96 GEN_DEFAULT_PIPEOFFSETS,
197}; 97};
198static const struct intel_device_info intel_i945g_info = { 98static const struct intel_device_info intel_i945g_info = {
199 .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2, 99 .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2,
200 .has_overlay = 1, .overlay_needs_physical = 1, 100 .has_overlay = 1, .overlay_needs_physical = 1,
201 .ring_mask = RENDER_RING, 101 .ring_mask = RENDER_RING,
102 GEN_DEFAULT_PIPEOFFSETS,
202}; 103};
203static const struct intel_device_info intel_i945gm_info = { 104static const struct intel_device_info intel_i945gm_info = {
204 .gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2, 105 .gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2,
@@ -207,6 +108,7 @@ static const struct intel_device_info intel_i945gm_info = {
207 .supports_tv = 1, 108 .supports_tv = 1,
208 .has_fbc = 1, 109 .has_fbc = 1,
209 .ring_mask = RENDER_RING, 110 .ring_mask = RENDER_RING,
111 GEN_DEFAULT_PIPEOFFSETS,
210}; 112};
211 113
212static const struct intel_device_info intel_i965g_info = { 114static const struct intel_device_info intel_i965g_info = {
@@ -214,6 +116,7 @@ static const struct intel_device_info intel_i965g_info = {
214 .has_hotplug = 1, 116 .has_hotplug = 1,
215 .has_overlay = 1, 117 .has_overlay = 1,
216 .ring_mask = RENDER_RING, 118 .ring_mask = RENDER_RING,
119 GEN_DEFAULT_PIPEOFFSETS,
217}; 120};
218 121
219static const struct intel_device_info intel_i965gm_info = { 122static const struct intel_device_info intel_i965gm_info = {
@@ -222,6 +125,7 @@ static const struct intel_device_info intel_i965gm_info = {
222 .has_overlay = 1, 125 .has_overlay = 1,
223 .supports_tv = 1, 126 .supports_tv = 1,
224 .ring_mask = RENDER_RING, 127 .ring_mask = RENDER_RING,
128 GEN_DEFAULT_PIPEOFFSETS,
225}; 129};
226 130
227static const struct intel_device_info intel_g33_info = { 131static const struct intel_device_info intel_g33_info = {
@@ -229,12 +133,14 @@ static const struct intel_device_info intel_g33_info = {
229 .need_gfx_hws = 1, .has_hotplug = 1, 133 .need_gfx_hws = 1, .has_hotplug = 1,
230 .has_overlay = 1, 134 .has_overlay = 1,
231 .ring_mask = RENDER_RING, 135 .ring_mask = RENDER_RING,
136 GEN_DEFAULT_PIPEOFFSETS,
232}; 137};
233 138
234static const struct intel_device_info intel_g45_info = { 139static const struct intel_device_info intel_g45_info = {
235 .gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2, 140 .gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2,
236 .has_pipe_cxsr = 1, .has_hotplug = 1, 141 .has_pipe_cxsr = 1, .has_hotplug = 1,
237 .ring_mask = RENDER_RING | BSD_RING, 142 .ring_mask = RENDER_RING | BSD_RING,
143 GEN_DEFAULT_PIPEOFFSETS,
238}; 144};
239 145
240static const struct intel_device_info intel_gm45_info = { 146static const struct intel_device_info intel_gm45_info = {
@@ -243,18 +149,21 @@ static const struct intel_device_info intel_gm45_info = {
243 .has_pipe_cxsr = 1, .has_hotplug = 1, 149 .has_pipe_cxsr = 1, .has_hotplug = 1,
244 .supports_tv = 1, 150 .supports_tv = 1,
245 .ring_mask = RENDER_RING | BSD_RING, 151 .ring_mask = RENDER_RING | BSD_RING,
152 GEN_DEFAULT_PIPEOFFSETS,
246}; 153};
247 154
248static const struct intel_device_info intel_pineview_info = { 155static const struct intel_device_info intel_pineview_info = {
249 .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .num_pipes = 2, 156 .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .num_pipes = 2,
250 .need_gfx_hws = 1, .has_hotplug = 1, 157 .need_gfx_hws = 1, .has_hotplug = 1,
251 .has_overlay = 1, 158 .has_overlay = 1,
159 GEN_DEFAULT_PIPEOFFSETS,
252}; 160};
253 161
254static const struct intel_device_info intel_ironlake_d_info = { 162static const struct intel_device_info intel_ironlake_d_info = {
255 .gen = 5, .num_pipes = 2, 163 .gen = 5, .num_pipes = 2,
256 .need_gfx_hws = 1, .has_hotplug = 1, 164 .need_gfx_hws = 1, .has_hotplug = 1,
257 .ring_mask = RENDER_RING | BSD_RING, 165 .ring_mask = RENDER_RING | BSD_RING,
166 GEN_DEFAULT_PIPEOFFSETS,
258}; 167};
259 168
260static const struct intel_device_info intel_ironlake_m_info = { 169static const struct intel_device_info intel_ironlake_m_info = {
@@ -262,6 +171,7 @@ static const struct intel_device_info intel_ironlake_m_info = {
262 .need_gfx_hws = 1, .has_hotplug = 1, 171 .need_gfx_hws = 1, .has_hotplug = 1,
263 .has_fbc = 1, 172 .has_fbc = 1,
264 .ring_mask = RENDER_RING | BSD_RING, 173 .ring_mask = RENDER_RING | BSD_RING,
174 GEN_DEFAULT_PIPEOFFSETS,
265}; 175};
266 176
267static const struct intel_device_info intel_sandybridge_d_info = { 177static const struct intel_device_info intel_sandybridge_d_info = {
@@ -270,6 +180,7 @@ static const struct intel_device_info intel_sandybridge_d_info = {
270 .has_fbc = 1, 180 .has_fbc = 1,
271 .ring_mask = RENDER_RING | BSD_RING | BLT_RING, 181 .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
272 .has_llc = 1, 182 .has_llc = 1,
183 GEN_DEFAULT_PIPEOFFSETS,
273}; 184};
274 185
275static const struct intel_device_info intel_sandybridge_m_info = { 186static const struct intel_device_info intel_sandybridge_m_info = {
@@ -278,6 +189,7 @@ static const struct intel_device_info intel_sandybridge_m_info = {
278 .has_fbc = 1, 189 .has_fbc = 1,
279 .ring_mask = RENDER_RING | BSD_RING | BLT_RING, 190 .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
280 .has_llc = 1, 191 .has_llc = 1,
192 GEN_DEFAULT_PIPEOFFSETS,
281}; 193};
282 194
283#define GEN7_FEATURES \ 195#define GEN7_FEATURES \
@@ -290,18 +202,21 @@ static const struct intel_device_info intel_sandybridge_m_info = {
290static const struct intel_device_info intel_ivybridge_d_info = { 202static const struct intel_device_info intel_ivybridge_d_info = {
291 GEN7_FEATURES, 203 GEN7_FEATURES,
292 .is_ivybridge = 1, 204 .is_ivybridge = 1,
205 GEN_DEFAULT_PIPEOFFSETS,
293}; 206};
294 207
295static const struct intel_device_info intel_ivybridge_m_info = { 208static const struct intel_device_info intel_ivybridge_m_info = {
296 GEN7_FEATURES, 209 GEN7_FEATURES,
297 .is_ivybridge = 1, 210 .is_ivybridge = 1,
298 .is_mobile = 1, 211 .is_mobile = 1,
212 GEN_DEFAULT_PIPEOFFSETS,
299}; 213};
300 214
301static const struct intel_device_info intel_ivybridge_q_info = { 215static const struct intel_device_info intel_ivybridge_q_info = {
302 GEN7_FEATURES, 216 GEN7_FEATURES,
303 .is_ivybridge = 1, 217 .is_ivybridge = 1,
304 .num_pipes = 0, /* legal, last one wins */ 218 .num_pipes = 0, /* legal, last one wins */
219 GEN_DEFAULT_PIPEOFFSETS,
305}; 220};
306 221
307static const struct intel_device_info intel_valleyview_m_info = { 222static const struct intel_device_info intel_valleyview_m_info = {
@@ -312,6 +227,7 @@ static const struct intel_device_info intel_valleyview_m_info = {
312 .display_mmio_offset = VLV_DISPLAY_BASE, 227 .display_mmio_offset = VLV_DISPLAY_BASE,
313 .has_fbc = 0, /* legal, last one wins */ 228 .has_fbc = 0, /* legal, last one wins */
314 .has_llc = 0, /* legal, last one wins */ 229 .has_llc = 0, /* legal, last one wins */
230 GEN_DEFAULT_PIPEOFFSETS,
315}; 231};
316 232
317static const struct intel_device_info intel_valleyview_d_info = { 233static const struct intel_device_info intel_valleyview_d_info = {
@@ -321,6 +237,7 @@ static const struct intel_device_info intel_valleyview_d_info = {
321 .display_mmio_offset = VLV_DISPLAY_BASE, 237 .display_mmio_offset = VLV_DISPLAY_BASE,
322 .has_fbc = 0, /* legal, last one wins */ 238 .has_fbc = 0, /* legal, last one wins */
323 .has_llc = 0, /* legal, last one wins */ 239 .has_llc = 0, /* legal, last one wins */
240 GEN_DEFAULT_PIPEOFFSETS,
324}; 241};
325 242
326static const struct intel_device_info intel_haswell_d_info = { 243static const struct intel_device_info intel_haswell_d_info = {
@@ -329,6 +246,7 @@ static const struct intel_device_info intel_haswell_d_info = {
329 .has_ddi = 1, 246 .has_ddi = 1,
330 .has_fpga_dbg = 1, 247 .has_fpga_dbg = 1,
331 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, 248 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
249 GEN_DEFAULT_PIPEOFFSETS,
332}; 250};
333 251
334static const struct intel_device_info intel_haswell_m_info = { 252static const struct intel_device_info intel_haswell_m_info = {
@@ -338,6 +256,7 @@ static const struct intel_device_info intel_haswell_m_info = {
338 .has_ddi = 1, 256 .has_ddi = 1,
339 .has_fpga_dbg = 1, 257 .has_fpga_dbg = 1,
340 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, 258 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
259 GEN_DEFAULT_PIPEOFFSETS,
341}; 260};
342 261
343static const struct intel_device_info intel_broadwell_d_info = { 262static const struct intel_device_info intel_broadwell_d_info = {
@@ -346,6 +265,8 @@ static const struct intel_device_info intel_broadwell_d_info = {
346 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, 265 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
347 .has_llc = 1, 266 .has_llc = 1,
348 .has_ddi = 1, 267 .has_ddi = 1,
268 .has_fbc = 1,
269 GEN_DEFAULT_PIPEOFFSETS,
349}; 270};
350 271
351static const struct intel_device_info intel_broadwell_m_info = { 272static const struct intel_device_info intel_broadwell_m_info = {
@@ -354,6 +275,8 @@ static const struct intel_device_info intel_broadwell_m_info = {
354 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, 275 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
355 .has_llc = 1, 276 .has_llc = 1,
356 .has_ddi = 1, 277 .has_ddi = 1,
278 .has_fbc = 1,
279 GEN_DEFAULT_PIPEOFFSETS,
357}; 280};
358 281
359/* 282/*
@@ -475,14 +398,12 @@ bool i915_semaphore_is_enabled(struct drm_device *dev)
475 if (INTEL_INFO(dev)->gen < 6) 398 if (INTEL_INFO(dev)->gen < 6)
476 return false; 399 return false;
477 400
401 if (i915.semaphores >= 0)
402 return i915.semaphores;
403
478 /* Until we get further testing... */ 404 /* Until we get further testing... */
479 if (IS_GEN8(dev)) { 405 if (IS_GEN8(dev))
480 WARN_ON(!i915_preliminary_hw_support);
481 return false; 406 return false;
482 }
483
484 if (i915_semaphores >= 0)
485 return i915_semaphores;
486 407
487#ifdef CONFIG_INTEL_IOMMU 408#ifdef CONFIG_INTEL_IOMMU
488 /* Enable semaphores on SNB when IO remapping is off */ 409 /* Enable semaphores on SNB when IO remapping is off */
@@ -507,8 +428,7 @@ static int i915_drm_freeze(struct drm_device *dev)
507 428
508 /* We do a lot of poking in a lot of registers, make sure they work 429 /* We do a lot of poking in a lot of registers, make sure they work
509 * properly. */ 430 * properly. */
510 hsw_disable_package_c8(dev_priv); 431 intel_display_set_init_power(dev_priv, true);
511 intel_display_set_init_power(dev, true);
512 432
513 drm_kms_helper_poll_disable(dev); 433 drm_kms_helper_poll_disable(dev);
514 434
@@ -546,11 +466,14 @@ static int i915_drm_freeze(struct drm_device *dev)
546 i915_save_state(dev); 466 i915_save_state(dev);
547 467
548 intel_opregion_fini(dev); 468 intel_opregion_fini(dev);
469 intel_uncore_fini(dev);
549 470
550 console_lock(); 471 console_lock();
551 intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED); 472 intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED);
552 console_unlock(); 473 console_unlock();
553 474
475 dev_priv->suspend_count++;
476
554 return 0; 477 return 0;
555} 478}
556 479
@@ -614,14 +537,21 @@ static void intel_resume_hotplug(struct drm_device *dev)
614 drm_helper_hpd_irq_event(dev); 537 drm_helper_hpd_irq_event(dev);
615} 538}
616 539
617static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings) 540static int i915_drm_thaw_early(struct drm_device *dev)
618{ 541{
619 struct drm_i915_private *dev_priv = dev->dev_private; 542 struct drm_i915_private *dev_priv = dev->dev_private;
620 int error = 0;
621 543
622 intel_uncore_early_sanitize(dev); 544 intel_uncore_early_sanitize(dev);
623
624 intel_uncore_sanitize(dev); 545 intel_uncore_sanitize(dev);
546 intel_power_domains_init_hw(dev_priv);
547
548 return 0;
549}
550
551static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
552{
553 struct drm_i915_private *dev_priv = dev->dev_private;
554 int error = 0;
625 555
626 if (drm_core_check_feature(dev, DRIVER_MODESET) && 556 if (drm_core_check_feature(dev, DRIVER_MODESET) &&
627 restore_gtt_mappings) { 557 restore_gtt_mappings) {
@@ -630,14 +560,13 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
630 mutex_unlock(&dev->struct_mutex); 560 mutex_unlock(&dev->struct_mutex);
631 } 561 }
632 562
633 intel_power_domains_init_hw(dev);
634
635 i915_restore_state(dev); 563 i915_restore_state(dev);
636 intel_opregion_setup(dev); 564 intel_opregion_setup(dev);
637 565
638 /* KMS EnterVT equivalent */ 566 /* KMS EnterVT equivalent */
639 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 567 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
640 intel_init_pch_refclk(dev); 568 intel_init_pch_refclk(dev);
569 drm_mode_config_reset(dev);
641 570
642 mutex_lock(&dev->struct_mutex); 571 mutex_lock(&dev->struct_mutex);
643 572
@@ -650,7 +579,6 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
650 intel_modeset_init_hw(dev); 579 intel_modeset_init_hw(dev);
651 580
652 drm_modeset_lock_all(dev); 581 drm_modeset_lock_all(dev);
653 drm_mode_config_reset(dev);
654 intel_modeset_setup_hw_state(dev, true); 582 intel_modeset_setup_hw_state(dev, true);
655 drm_modeset_unlock_all(dev); 583 drm_modeset_unlock_all(dev);
656 584
@@ -680,10 +608,6 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
680 schedule_work(&dev_priv->console_resume_work); 608 schedule_work(&dev_priv->console_resume_work);
681 } 609 }
682 610
683 /* Undo what we did at i915_drm_freeze so the refcount goes back to the
684 * expected level. */
685 hsw_enable_package_c8(dev_priv);
686
687 mutex_lock(&dev_priv->modeset_restore_lock); 611 mutex_lock(&dev_priv->modeset_restore_lock);
688 dev_priv->modeset_restore = MODESET_DONE; 612 dev_priv->modeset_restore = MODESET_DONE;
689 mutex_unlock(&dev_priv->modeset_restore_lock); 613 mutex_unlock(&dev_priv->modeset_restore_lock);
@@ -700,19 +624,33 @@ static int i915_drm_thaw(struct drm_device *dev)
700 return __i915_drm_thaw(dev, true); 624 return __i915_drm_thaw(dev, true);
701} 625}
702 626
703int i915_resume(struct drm_device *dev) 627static int i915_resume_early(struct drm_device *dev)
704{ 628{
705 struct drm_i915_private *dev_priv = dev->dev_private;
706 int ret;
707
708 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 629 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
709 return 0; 630 return 0;
710 631
632 /*
633 * We have a resume ordering issue with the snd-hda driver also
634 * requiring our device to be power up. Due to the lack of a
635 * parent/child relationship we currently solve this with an early
636 * resume hook.
637 *
638 * FIXME: This should be solved with a special hdmi sink device or
639 * similar so that power domains can be employed.
640 */
711 if (pci_enable_device(dev->pdev)) 641 if (pci_enable_device(dev->pdev))
712 return -EIO; 642 return -EIO;
713 643
714 pci_set_master(dev->pdev); 644 pci_set_master(dev->pdev);
715 645
646 return i915_drm_thaw_early(dev);
647}
648
649int i915_resume(struct drm_device *dev)
650{
651 struct drm_i915_private *dev_priv = dev->dev_private;
652 int ret;
653
716 /* 654 /*
717 * Platforms with opregion should have sane BIOS, older ones (gen3 and 655 * Platforms with opregion should have sane BIOS, older ones (gen3 and
718 * earlier) need to restore the GTT mappings since the BIOS might clear 656 * earlier) need to restore the GTT mappings since the BIOS might clear
@@ -726,6 +664,14 @@ int i915_resume(struct drm_device *dev)
726 return 0; 664 return 0;
727} 665}
728 666
667static int i915_resume_legacy(struct drm_device *dev)
668{
669 i915_resume_early(dev);
670 i915_resume(dev);
671
672 return 0;
673}
674
729/** 675/**
730 * i915_reset - reset chip after a hang 676 * i915_reset - reset chip after a hang
731 * @dev: drm device to reset 677 * @dev: drm device to reset
@@ -743,11 +689,11 @@ int i915_resume(struct drm_device *dev)
743 */ 689 */
744int i915_reset(struct drm_device *dev) 690int i915_reset(struct drm_device *dev)
745{ 691{
746 drm_i915_private_t *dev_priv = dev->dev_private; 692 struct drm_i915_private *dev_priv = dev->dev_private;
747 bool simulated; 693 bool simulated;
748 int ret; 694 int ret;
749 695
750 if (!i915_try_reset) 696 if (!i915.reset)
751 return 0; 697 return 0;
752 698
753 mutex_lock(&dev->struct_mutex); 699 mutex_lock(&dev->struct_mutex);
@@ -802,6 +748,17 @@ int i915_reset(struct drm_device *dev)
802 748
803 drm_irq_uninstall(dev); 749 drm_irq_uninstall(dev);
804 drm_irq_install(dev); 750 drm_irq_install(dev);
751
752 /* rps/rc6 re-init is necessary to restore state lost after the
753 * reset and the re-install of drm irq. Skip for ironlake per
754 * previous concerns that it doesn't respond well to some forms
755 * of re-init after reset. */
756 if (INTEL_INFO(dev)->gen > 5) {
757 mutex_lock(&dev->struct_mutex);
758 intel_enable_gt_powersave(dev);
759 mutex_unlock(&dev->struct_mutex);
760 }
761
805 intel_hpd_init(dev); 762 intel_hpd_init(dev);
806 } else { 763 } else {
807 mutex_unlock(&dev->struct_mutex); 764 mutex_unlock(&dev->struct_mutex);
@@ -815,7 +772,7 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
815 struct intel_device_info *intel_info = 772 struct intel_device_info *intel_info =
816 (struct intel_device_info *) ent->driver_data; 773 (struct intel_device_info *) ent->driver_data;
817 774
818 if (IS_PRELIMINARY_HW(intel_info) && !i915_preliminary_hw_support) { 775 if (IS_PRELIMINARY_HW(intel_info) && !i915.preliminary_hw_support) {
819 DRM_INFO("This hardware requires preliminary hardware support.\n" 776 DRM_INFO("This hardware requires preliminary hardware support.\n"
820 "See CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT, and/or modparam preliminary_hw_support\n"); 777 "See CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT, and/or modparam preliminary_hw_support\n");
821 return -ENODEV; 778 return -ENODEV;
@@ -846,7 +803,6 @@ static int i915_pm_suspend(struct device *dev)
846{ 803{
847 struct pci_dev *pdev = to_pci_dev(dev); 804 struct pci_dev *pdev = to_pci_dev(dev);
848 struct drm_device *drm_dev = pci_get_drvdata(pdev); 805 struct drm_device *drm_dev = pci_get_drvdata(pdev);
849 int error;
850 806
851 if (!drm_dev || !drm_dev->dev_private) { 807 if (!drm_dev || !drm_dev->dev_private) {
852 dev_err(dev, "DRM not initialized, aborting suspend.\n"); 808 dev_err(dev, "DRM not initialized, aborting suspend.\n");
@@ -856,9 +812,25 @@ static int i915_pm_suspend(struct device *dev)
856 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) 812 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
857 return 0; 813 return 0;
858 814
859 error = i915_drm_freeze(drm_dev); 815 return i915_drm_freeze(drm_dev);
860 if (error) 816}
861 return error; 817
818static int i915_pm_suspend_late(struct device *dev)
819{
820 struct pci_dev *pdev = to_pci_dev(dev);
821 struct drm_device *drm_dev = pci_get_drvdata(pdev);
822
823 /*
824 * We have a suspedn ordering issue with the snd-hda driver also
825 * requiring our device to be power up. Due to the lack of a
826 * parent/child relationship we currently solve this with an late
827 * suspend hook.
828 *
829 * FIXME: This should be solved with a special hdmi sink device or
830 * similar so that power domains can be employed.
831 */
832 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
833 return 0;
862 834
863 pci_disable_device(pdev); 835 pci_disable_device(pdev);
864 pci_set_power_state(pdev, PCI_D3hot); 836 pci_set_power_state(pdev, PCI_D3hot);
@@ -866,6 +838,14 @@ static int i915_pm_suspend(struct device *dev)
866 return 0; 838 return 0;
867} 839}
868 840
841static int i915_pm_resume_early(struct device *dev)
842{
843 struct pci_dev *pdev = to_pci_dev(dev);
844 struct drm_device *drm_dev = pci_get_drvdata(pdev);
845
846 return i915_resume_early(drm_dev);
847}
848
869static int i915_pm_resume(struct device *dev) 849static int i915_pm_resume(struct device *dev)
870{ 850{
871 struct pci_dev *pdev = to_pci_dev(dev); 851 struct pci_dev *pdev = to_pci_dev(dev);
@@ -887,6 +867,14 @@ static int i915_pm_freeze(struct device *dev)
887 return i915_drm_freeze(drm_dev); 867 return i915_drm_freeze(drm_dev);
888} 868}
889 869
870static int i915_pm_thaw_early(struct device *dev)
871{
872 struct pci_dev *pdev = to_pci_dev(dev);
873 struct drm_device *drm_dev = pci_get_drvdata(pdev);
874
875 return i915_drm_thaw_early(drm_dev);
876}
877
890static int i915_pm_thaw(struct device *dev) 878static int i915_pm_thaw(struct device *dev)
891{ 879{
892 struct pci_dev *pdev = to_pci_dev(dev); 880 struct pci_dev *pdev = to_pci_dev(dev);
@@ -910,9 +898,13 @@ static int i915_runtime_suspend(struct device *device)
910 struct drm_i915_private *dev_priv = dev->dev_private; 898 struct drm_i915_private *dev_priv = dev->dev_private;
911 899
912 WARN_ON(!HAS_RUNTIME_PM(dev)); 900 WARN_ON(!HAS_RUNTIME_PM(dev));
901 assert_force_wake_inactive(dev_priv);
913 902
914 DRM_DEBUG_KMS("Suspending device\n"); 903 DRM_DEBUG_KMS("Suspending device\n");
915 904
905 if (HAS_PC8(dev))
906 hsw_enable_pc8(dev_priv);
907
916 i915_gem_release_all_mmaps(dev_priv); 908 i915_gem_release_all_mmaps(dev_priv);
917 909
918 del_timer_sync(&dev_priv->gpu_error.hangcheck_timer); 910 del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
@@ -927,6 +919,7 @@ static int i915_runtime_suspend(struct device *device)
927 */ 919 */
928 intel_opregion_notify_adapter(dev, PCI_D1); 920 intel_opregion_notify_adapter(dev, PCI_D1);
929 921
922 DRM_DEBUG_KMS("Device suspended\n");
930 return 0; 923 return 0;
931} 924}
932 925
@@ -943,15 +936,23 @@ static int i915_runtime_resume(struct device *device)
943 intel_opregion_notify_adapter(dev, PCI_D0); 936 intel_opregion_notify_adapter(dev, PCI_D0);
944 dev_priv->pm.suspended = false; 937 dev_priv->pm.suspended = false;
945 938
939 if (HAS_PC8(dev))
940 hsw_disable_pc8(dev_priv);
941
942 DRM_DEBUG_KMS("Device resumed\n");
946 return 0; 943 return 0;
947} 944}
948 945
949static const struct dev_pm_ops i915_pm_ops = { 946static const struct dev_pm_ops i915_pm_ops = {
950 .suspend = i915_pm_suspend, 947 .suspend = i915_pm_suspend,
948 .suspend_late = i915_pm_suspend_late,
949 .resume_early = i915_pm_resume_early,
951 .resume = i915_pm_resume, 950 .resume = i915_pm_resume,
952 .freeze = i915_pm_freeze, 951 .freeze = i915_pm_freeze,
952 .thaw_early = i915_pm_thaw_early,
953 .thaw = i915_pm_thaw, 953 .thaw = i915_pm_thaw,
954 .poweroff = i915_pm_poweroff, 954 .poweroff = i915_pm_poweroff,
955 .restore_early = i915_pm_resume_early,
955 .restore = i915_pm_resume, 956 .restore = i915_pm_resume,
956 .runtime_suspend = i915_runtime_suspend, 957 .runtime_suspend = i915_runtime_suspend,
957 .runtime_resume = i915_runtime_resume, 958 .runtime_resume = i915_runtime_resume,
@@ -994,7 +995,7 @@ static struct drm_driver driver = {
994 995
995 /* Used in place of i915_pm_ops for non-DRIVER_MODESET */ 996 /* Used in place of i915_pm_ops for non-DRIVER_MODESET */
996 .suspend = i915_suspend, 997 .suspend = i915_suspend,
997 .resume = i915_resume, 998 .resume = i915_resume_legacy,
998 999
999 .device_is_agp = i915_driver_device_is_agp, 1000 .device_is_agp = i915_driver_device_is_agp,
1000 .master_create = i915_master_create, 1001 .master_create = i915_master_create,
@@ -1046,14 +1047,14 @@ static int __init i915_init(void)
1046 * the default behavior. 1047 * the default behavior.
1047 */ 1048 */
1048#if defined(CONFIG_DRM_I915_KMS) 1049#if defined(CONFIG_DRM_I915_KMS)
1049 if (i915_modeset != 0) 1050 if (i915.modeset != 0)
1050 driver.driver_features |= DRIVER_MODESET; 1051 driver.driver_features |= DRIVER_MODESET;
1051#endif 1052#endif
1052 if (i915_modeset == 1) 1053 if (i915.modeset == 1)
1053 driver.driver_features |= DRIVER_MODESET; 1054 driver.driver_features |= DRIVER_MODESET;
1054 1055
1055#ifdef CONFIG_VGA_CONSOLE 1056#ifdef CONFIG_VGA_CONSOLE
1056 if (vgacon_text_force() && i915_modeset == -1) 1057 if (vgacon_text_force() && i915.modeset == -1)
1057 driver.driver_features &= ~DRIVER_MODESET; 1058 driver.driver_features &= ~DRIVER_MODESET;
1058#endif 1059#endif
1059 1060
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index df77e20e3c3d..0905cd915589 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -58,7 +58,8 @@ enum pipe {
58 PIPE_A = 0, 58 PIPE_A = 0,
59 PIPE_B, 59 PIPE_B,
60 PIPE_C, 60 PIPE_C,
61 I915_MAX_PIPES 61 _PIPE_EDP,
62 I915_MAX_PIPES = _PIPE_EDP
62}; 63};
63#define pipe_name(p) ((p) + 'A') 64#define pipe_name(p) ((p) + 'A')
64 65
@@ -66,7 +67,8 @@ enum transcoder {
66 TRANSCODER_A = 0, 67 TRANSCODER_A = 0,
67 TRANSCODER_B, 68 TRANSCODER_B,
68 TRANSCODER_C, 69 TRANSCODER_C,
69 TRANSCODER_EDP = 0xF, 70 TRANSCODER_EDP,
71 I915_MAX_TRANSCODERS
70}; 72};
71#define transcoder_name(t) ((t) + 'A') 73#define transcoder_name(t) ((t) + 'A')
72 74
@@ -77,7 +79,7 @@ enum plane {
77}; 79};
78#define plane_name(p) ((p) + 'A') 80#define plane_name(p) ((p) + 'A')
79 81
80#define sprite_name(p, s) ((p) * dev_priv->num_plane + (s) + 'A') 82#define sprite_name(p, s) ((p) * INTEL_INFO(dev)->num_sprites[(p)] + (s) + 'A')
81 83
82enum port { 84enum port {
83 PORT_A = 0, 85 PORT_A = 0,
@@ -112,6 +114,17 @@ enum intel_display_power_domain {
112 POWER_DOMAIN_TRANSCODER_B, 114 POWER_DOMAIN_TRANSCODER_B,
113 POWER_DOMAIN_TRANSCODER_C, 115 POWER_DOMAIN_TRANSCODER_C,
114 POWER_DOMAIN_TRANSCODER_EDP, 116 POWER_DOMAIN_TRANSCODER_EDP,
117 POWER_DOMAIN_PORT_DDI_A_2_LANES,
118 POWER_DOMAIN_PORT_DDI_A_4_LANES,
119 POWER_DOMAIN_PORT_DDI_B_2_LANES,
120 POWER_DOMAIN_PORT_DDI_B_4_LANES,
121 POWER_DOMAIN_PORT_DDI_C_2_LANES,
122 POWER_DOMAIN_PORT_DDI_C_4_LANES,
123 POWER_DOMAIN_PORT_DDI_D_2_LANES,
124 POWER_DOMAIN_PORT_DDI_D_4_LANES,
125 POWER_DOMAIN_PORT_DSI,
126 POWER_DOMAIN_PORT_CRT,
127 POWER_DOMAIN_PORT_OTHER,
115 POWER_DOMAIN_VGA, 128 POWER_DOMAIN_VGA,
116 POWER_DOMAIN_AUDIO, 129 POWER_DOMAIN_AUDIO,
117 POWER_DOMAIN_INIT, 130 POWER_DOMAIN_INIT,
@@ -119,8 +132,6 @@ enum intel_display_power_domain {
119 POWER_DOMAIN_NUM, 132 POWER_DOMAIN_NUM,
120}; 133};
121 134
122#define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
123
124#define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A) 135#define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A)
125#define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \ 136#define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \
126 ((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER) 137 ((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER)
@@ -128,14 +139,6 @@ enum intel_display_power_domain {
128 ((tran) == TRANSCODER_EDP ? POWER_DOMAIN_TRANSCODER_EDP : \ 139 ((tran) == TRANSCODER_EDP ? POWER_DOMAIN_TRANSCODER_EDP : \
129 (tran) + POWER_DOMAIN_TRANSCODER_A) 140 (tran) + POWER_DOMAIN_TRANSCODER_A)
130 141
131#define HSW_ALWAYS_ON_POWER_DOMAINS ( \
132 BIT(POWER_DOMAIN_PIPE_A) | \
133 BIT(POWER_DOMAIN_TRANSCODER_EDP))
134#define BDW_ALWAYS_ON_POWER_DOMAINS ( \
135 BIT(POWER_DOMAIN_PIPE_A) | \
136 BIT(POWER_DOMAIN_TRANSCODER_EDP) | \
137 BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER))
138
139enum hpd_pin { 142enum hpd_pin {
140 HPD_NONE = 0, 143 HPD_NONE = 0,
141 HPD_PORT_A = HPD_NONE, /* PORT_A is internal */ 144 HPD_PORT_A = HPD_NONE, /* PORT_A is internal */
@@ -157,11 +160,16 @@ enum hpd_pin {
157 I915_GEM_DOMAIN_VERTEX) 160 I915_GEM_DOMAIN_VERTEX)
158 161
159#define for_each_pipe(p) for ((p) = 0; (p) < INTEL_INFO(dev)->num_pipes; (p)++) 162#define for_each_pipe(p) for ((p) = 0; (p) < INTEL_INFO(dev)->num_pipes; (p)++)
163#define for_each_sprite(p, s) for ((s) = 0; (s) < INTEL_INFO(dev)->num_sprites[(p)]; (s)++)
160 164
161#define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \ 165#define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
162 list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \ 166 list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
163 if ((intel_encoder)->base.crtc == (__crtc)) 167 if ((intel_encoder)->base.crtc == (__crtc))
164 168
169#define for_each_connector_on_encoder(dev, __encoder, intel_connector) \
170 list_for_each_entry((intel_connector), &(dev)->mode_config.connector_list, base.head) \
171 if ((intel_connector)->base.encoder == (__encoder))
172
165struct drm_i915_private; 173struct drm_i915_private;
166 174
167enum intel_dpll_id { 175enum intel_dpll_id {
@@ -295,53 +303,87 @@ struct intel_display_error_state;
295 303
296struct drm_i915_error_state { 304struct drm_i915_error_state {
297 struct kref ref; 305 struct kref ref;
306 struct timeval time;
307
308 char error_msg[128];
309 u32 reset_count;
310 u32 suspend_count;
311
312 /* Generic register state */
298 u32 eir; 313 u32 eir;
299 u32 pgtbl_er; 314 u32 pgtbl_er;
300 u32 ier; 315 u32 ier;
301 u32 ccid; 316 u32 ccid;
302 u32 derrmr; 317 u32 derrmr;
303 u32 forcewake; 318 u32 forcewake;
304 bool waiting[I915_NUM_RINGS];
305 u32 pipestat[I915_MAX_PIPES];
306 u32 tail[I915_NUM_RINGS];
307 u32 head[I915_NUM_RINGS];
308 u32 ctl[I915_NUM_RINGS];
309 u32 ipeir[I915_NUM_RINGS];
310 u32 ipehr[I915_NUM_RINGS];
311 u32 instdone[I915_NUM_RINGS];
312 u32 acthd[I915_NUM_RINGS];
313 u32 semaphore_mboxes[I915_NUM_RINGS][I915_NUM_RINGS - 1];
314 u32 semaphore_seqno[I915_NUM_RINGS][I915_NUM_RINGS - 1];
315 u32 rc_psmi[I915_NUM_RINGS]; /* sleep state */
316 /* our own tracking of ring head and tail */
317 u32 cpu_ring_head[I915_NUM_RINGS];
318 u32 cpu_ring_tail[I915_NUM_RINGS];
319 u32 error; /* gen6+ */ 319 u32 error; /* gen6+ */
320 u32 err_int; /* gen7 */ 320 u32 err_int; /* gen7 */
321 u32 bbstate[I915_NUM_RINGS];
322 u32 instpm[I915_NUM_RINGS];
323 u32 instps[I915_NUM_RINGS];
324 u32 extra_instdone[I915_NUM_INSTDONE_REG];
325 u32 seqno[I915_NUM_RINGS];
326 u64 bbaddr[I915_NUM_RINGS];
327 u32 fault_reg[I915_NUM_RINGS];
328 u32 done_reg; 321 u32 done_reg;
329 u32 faddr[I915_NUM_RINGS]; 322 u32 gac_eco;
323 u32 gam_ecochk;
324 u32 gab_ctl;
325 u32 gfx_mode;
326 u32 extra_instdone[I915_NUM_INSTDONE_REG];
327 u32 pipestat[I915_MAX_PIPES];
330 u64 fence[I915_MAX_NUM_FENCES]; 328 u64 fence[I915_MAX_NUM_FENCES];
331 struct timeval time; 329 struct intel_overlay_error_state *overlay;
330 struct intel_display_error_state *display;
331
332 struct drm_i915_error_ring { 332 struct drm_i915_error_ring {
333 bool valid; 333 bool valid;
334 /* Software tracked state */
335 bool waiting;
336 int hangcheck_score;
337 enum intel_ring_hangcheck_action hangcheck_action;
338 int num_requests;
339
340 /* our own tracking of ring head and tail */
341 u32 cpu_ring_head;
342 u32 cpu_ring_tail;
343
344 u32 semaphore_seqno[I915_NUM_RINGS - 1];
345
346 /* Register state */
347 u32 tail;
348 u32 head;
349 u32 ctl;
350 u32 hws;
351 u32 ipeir;
352 u32 ipehr;
353 u32 instdone;
354 u32 bbstate;
355 u32 instpm;
356 u32 instps;
357 u32 seqno;
358 u64 bbaddr;
359 u64 acthd;
360 u32 fault_reg;
361 u32 faddr;
362 u32 rc_psmi; /* sleep state */
363 u32 semaphore_mboxes[I915_NUM_RINGS - 1];
364
334 struct drm_i915_error_object { 365 struct drm_i915_error_object {
335 int page_count; 366 int page_count;
336 u32 gtt_offset; 367 u32 gtt_offset;
337 u32 *pages[0]; 368 u32 *pages[0];
338 } *ringbuffer, *batchbuffer, *ctx; 369 } *ringbuffer, *batchbuffer, *wa_batchbuffer, *ctx, *hws_page;
370
339 struct drm_i915_error_request { 371 struct drm_i915_error_request {
340 long jiffies; 372 long jiffies;
341 u32 seqno; 373 u32 seqno;
342 u32 tail; 374 u32 tail;
343 } *requests; 375 } *requests;
344 int num_requests; 376
377 struct {
378 u32 gfx_mode;
379 union {
380 u64 pdp[4];
381 u32 pp_dir_base;
382 };
383 } vm_info;
384
385 pid_t pid;
386 char comm[TASK_COMM_LEN];
345 } ring[I915_NUM_RINGS]; 387 } ring[I915_NUM_RINGS];
346 struct drm_i915_error_buffer { 388 struct drm_i915_error_buffer {
347 u32 size; 389 u32 size;
@@ -358,15 +400,13 @@ struct drm_i915_error_state {
358 s32 ring:4; 400 s32 ring:4;
359 u32 cache_level:3; 401 u32 cache_level:3;
360 } **active_bo, **pinned_bo; 402 } **active_bo, **pinned_bo;
403
361 u32 *active_bo_count, *pinned_bo_count; 404 u32 *active_bo_count, *pinned_bo_count;
362 struct intel_overlay_error_state *overlay;
363 struct intel_display_error_state *display;
364 int hangcheck_score[I915_NUM_RINGS];
365 enum intel_ring_hangcheck_action hangcheck_action[I915_NUM_RINGS];
366}; 405};
367 406
368struct intel_connector; 407struct intel_connector;
369struct intel_crtc_config; 408struct intel_crtc_config;
409struct intel_plane_config;
370struct intel_crtc; 410struct intel_crtc;
371struct intel_limit; 411struct intel_limit;
372struct dpll; 412struct dpll;
@@ -405,6 +445,8 @@ struct drm_i915_display_funcs {
405 * fills out the pipe-config with the hw state. */ 445 * fills out the pipe-config with the hw state. */
406 bool (*get_pipe_config)(struct intel_crtc *, 446 bool (*get_pipe_config)(struct intel_crtc *,
407 struct intel_crtc_config *); 447 struct intel_crtc_config *);
448 void (*get_plane_config)(struct intel_crtc *,
449 struct intel_plane_config *);
408 int (*crtc_mode_set)(struct drm_crtc *crtc, 450 int (*crtc_mode_set)(struct drm_crtc *crtc,
409 int x, int y, 451 int x, int y,
410 struct drm_framebuffer *old_fb); 452 struct drm_framebuffer *old_fb);
@@ -420,8 +462,9 @@ struct drm_i915_display_funcs {
420 struct drm_framebuffer *fb, 462 struct drm_framebuffer *fb,
421 struct drm_i915_gem_object *obj, 463 struct drm_i915_gem_object *obj,
422 uint32_t flags); 464 uint32_t flags);
423 int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb, 465 int (*update_primary_plane)(struct drm_crtc *crtc,
424 int x, int y); 466 struct drm_framebuffer *fb,
467 int x, int y);
425 void (*hpd_irq_setup)(struct drm_device *dev); 468 void (*hpd_irq_setup)(struct drm_device *dev);
426 /* clock updates for mode set */ 469 /* clock updates for mode set */
427 /* cursor updates */ 470 /* cursor updates */
@@ -469,7 +512,7 @@ struct intel_uncore {
469 unsigned fw_rendercount; 512 unsigned fw_rendercount;
470 unsigned fw_mediacount; 513 unsigned fw_mediacount;
471 514
472 struct delayed_work force_wake_work; 515 struct timer_list force_wake_timer;
473}; 516};
474 517
475#define DEV_INFO_FOR_EACH_FLAG(func, sep) \ 518#define DEV_INFO_FOR_EACH_FLAG(func, sep) \
@@ -504,9 +547,16 @@ struct intel_uncore {
504struct intel_device_info { 547struct intel_device_info {
505 u32 display_mmio_offset; 548 u32 display_mmio_offset;
506 u8 num_pipes:3; 549 u8 num_pipes:3;
550 u8 num_sprites[I915_MAX_PIPES];
507 u8 gen; 551 u8 gen;
508 u8 ring_mask; /* Rings supported by the HW */ 552 u8 ring_mask; /* Rings supported by the HW */
509 DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON); 553 DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON);
554 /* Register offsets for the various display pipes and transcoders */
555 int pipe_offsets[I915_MAX_TRANSCODERS];
556 int trans_offsets[I915_MAX_TRANSCODERS];
557 int dpll_offsets[I915_MAX_PIPES];
558 int dpll_md_offsets[I915_MAX_PIPES];
559 int palette_offsets[I915_MAX_PIPES];
510}; 560};
511 561
512#undef DEFINE_FLAG 562#undef DEFINE_FLAG
@@ -524,6 +574,57 @@ enum i915_cache_level {
524 574
525typedef uint32_t gen6_gtt_pte_t; 575typedef uint32_t gen6_gtt_pte_t;
526 576
577/**
578 * A VMA represents a GEM BO that is bound into an address space. Therefore, a
579 * VMA's presence cannot be guaranteed before binding, or after unbinding the
580 * object into/from the address space.
581 *
582 * To make things as simple as possible (ie. no refcounting), a VMA's lifetime
583 * will always be <= an objects lifetime. So object refcounting should cover us.
584 */
585struct i915_vma {
586 struct drm_mm_node node;
587 struct drm_i915_gem_object *obj;
588 struct i915_address_space *vm;
589
590 /** This object's place on the active/inactive lists */
591 struct list_head mm_list;
592
593 struct list_head vma_link; /* Link in the object's VMA list */
594
595 /** This vma's place in the batchbuffer or on the eviction list */
596 struct list_head exec_list;
597
598 /**
599 * Used for performing relocations during execbuffer insertion.
600 */
601 struct hlist_node exec_node;
602 unsigned long exec_handle;
603 struct drm_i915_gem_exec_object2 *exec_entry;
604
605 /**
606 * How many users have pinned this object in GTT space. The following
607 * users can each hold at most one reference: pwrite/pread, pin_ioctl
608 * (via user_pin_count), execbuffer (objects are not allowed multiple
609 * times for the same batchbuffer), and the framebuffer code. When
610 * switching/pageflipping, the framebuffer code has at most two buffers
611 * pinned per crtc.
612 *
613 * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
614 * bits with absolutely no headroom. So use 4 bits. */
615 unsigned int pin_count:4;
616#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
617
618 /** Unmap an object from an address space. This usually consists of
619 * setting the valid PTE entries to a reserved scratch page. */
620 void (*unbind_vma)(struct i915_vma *vma);
621 /* Map an object into an address space with the given cache flags. */
622#define GLOBAL_BIND (1<<0)
623 void (*bind_vma)(struct i915_vma *vma,
624 enum i915_cache_level cache_level,
625 u32 flags);
626};
627
527struct i915_address_space { 628struct i915_address_space {
528 struct drm_mm mm; 629 struct drm_mm mm;
529 struct drm_device *dev; 630 struct drm_device *dev;
@@ -564,12 +665,12 @@ struct i915_address_space {
564 enum i915_cache_level level, 665 enum i915_cache_level level,
565 bool valid); /* Create a valid PTE */ 666 bool valid); /* Create a valid PTE */
566 void (*clear_range)(struct i915_address_space *vm, 667 void (*clear_range)(struct i915_address_space *vm,
567 unsigned int first_entry, 668 uint64_t start,
568 unsigned int num_entries, 669 uint64_t length,
569 bool use_scratch); 670 bool use_scratch);
570 void (*insert_entries)(struct i915_address_space *vm, 671 void (*insert_entries)(struct i915_address_space *vm,
571 struct sg_table *st, 672 struct sg_table *st,
572 unsigned int first_entry, 673 uint64_t start,
573 enum i915_cache_level cache_level); 674 enum i915_cache_level cache_level);
574 void (*cleanup)(struct i915_address_space *vm); 675 void (*cleanup)(struct i915_address_space *vm);
575}; 676};
@@ -603,55 +704,34 @@ struct i915_gtt {
603}; 704};
604#define gtt_total_entries(gtt) ((gtt).base.total >> PAGE_SHIFT) 705#define gtt_total_entries(gtt) ((gtt).base.total >> PAGE_SHIFT)
605 706
707#define GEN8_LEGACY_PDPS 4
606struct i915_hw_ppgtt { 708struct i915_hw_ppgtt {
607 struct i915_address_space base; 709 struct i915_address_space base;
710 struct kref ref;
711 struct drm_mm_node node;
608 unsigned num_pd_entries; 712 unsigned num_pd_entries;
713 unsigned num_pd_pages; /* gen8+ */
609 union { 714 union {
610 struct page **pt_pages; 715 struct page **pt_pages;
611 struct page *gen8_pt_pages; 716 struct page **gen8_pt_pages[GEN8_LEGACY_PDPS];
612 }; 717 };
613 struct page *pd_pages; 718 struct page *pd_pages;
614 int num_pd_pages;
615 int num_pt_pages;
616 union { 719 union {
617 uint32_t pd_offset; 720 uint32_t pd_offset;
618 dma_addr_t pd_dma_addr[4]; 721 dma_addr_t pd_dma_addr[GEN8_LEGACY_PDPS];
619 }; 722 };
620 union { 723 union {
621 dma_addr_t *pt_dma_addr; 724 dma_addr_t *pt_dma_addr;
622 dma_addr_t *gen8_pt_dma_addr[4]; 725 dma_addr_t *gen8_pt_dma_addr[4];
623 }; 726 };
624 int (*enable)(struct drm_device *dev);
625};
626
627/**
628 * A VMA represents a GEM BO that is bound into an address space. Therefore, a
629 * VMA's presence cannot be guaranteed before binding, or after unbinding the
630 * object into/from the address space.
631 *
632 * To make things as simple as possible (ie. no refcounting), a VMA's lifetime
633 * will always be <= an objects lifetime. So object refcounting should cover us.
634 */
635struct i915_vma {
636 struct drm_mm_node node;
637 struct drm_i915_gem_object *obj;
638 struct i915_address_space *vm;
639
640 /** This object's place on the active/inactive lists */
641 struct list_head mm_list;
642 727
643 struct list_head vma_link; /* Link in the object's VMA list */ 728 struct i915_hw_context *ctx;
644
645 /** This vma's place in the batchbuffer or on the eviction list */
646 struct list_head exec_list;
647
648 /**
649 * Used for performing relocations during execbuffer insertion.
650 */
651 struct hlist_node exec_node;
652 unsigned long exec_handle;
653 struct drm_i915_gem_exec_object2 *exec_entry;
654 729
730 int (*enable)(struct i915_hw_ppgtt *ppgtt);
731 int (*switch_mm)(struct i915_hw_ppgtt *ppgtt,
732 struct intel_ring_buffer *ring,
733 bool synchronous);
734 void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m);
655}; 735};
656 736
657struct i915_ctx_hang_stats { 737struct i915_ctx_hang_stats {
@@ -676,9 +756,10 @@ struct i915_hw_context {
676 bool is_initialized; 756 bool is_initialized;
677 uint8_t remap_slice; 757 uint8_t remap_slice;
678 struct drm_i915_file_private *file_priv; 758 struct drm_i915_file_private *file_priv;
679 struct intel_ring_buffer *ring; 759 struct intel_ring_buffer *last_ring;
680 struct drm_i915_gem_object *obj; 760 struct drm_i915_gem_object *obj;
681 struct i915_ctx_hang_stats hang_stats; 761 struct i915_ctx_hang_stats hang_stats;
762 struct i915_address_space *vm;
682 763
683 struct list_head link; 764 struct list_head link;
684}; 765};
@@ -831,11 +912,7 @@ struct i915_suspend_saved_registers {
831 u32 savePFIT_CONTROL; 912 u32 savePFIT_CONTROL;
832 u32 save_palette_a[256]; 913 u32 save_palette_a[256];
833 u32 save_palette_b[256]; 914 u32 save_palette_b[256];
834 u32 saveDPFC_CB_BASE;
835 u32 saveFBC_CFB_BASE;
836 u32 saveFBC_LL_BASE;
837 u32 saveFBC_CONTROL; 915 u32 saveFBC_CONTROL;
838 u32 saveFBC_CONTROL2;
839 u32 saveIER; 916 u32 saveIER;
840 u32 saveIIR; 917 u32 saveIIR;
841 u32 saveIMR; 918 u32 saveIMR;
@@ -905,15 +982,24 @@ struct intel_gen6_power_mgmt {
905 struct work_struct work; 982 struct work_struct work;
906 u32 pm_iir; 983 u32 pm_iir;
907 984
908 /* The below variables an all the rps hw state are protected by 985 /* Frequencies are stored in potentially platform dependent multiples.
909 * dev->struct mutext. */ 986 * In other words, *_freq needs to be multiplied by X to be interesting.
910 u8 cur_delay; 987 * Soft limits are those which are used for the dynamic reclocking done
911 u8 min_delay; 988 * by the driver (raise frequencies under heavy loads, and lower for
912 u8 max_delay; 989 * lighter loads). Hard limits are those imposed by the hardware.
913 u8 rpe_delay; 990 *
914 u8 rp1_delay; 991 * A distinction is made for overclocking, which is never enabled by
915 u8 rp0_delay; 992 * default, and is considered to be above the hard limit if it's
916 u8 hw_max; 993 * possible at all.
994 */
995 u8 cur_freq; /* Current frequency (cached, may not == HW) */
996 u8 min_freq_softlimit; /* Minimum frequency permitted by the driver */
997 u8 max_freq_softlimit; /* Max frequency permitted by the driver */
998 u8 max_freq; /* Maximum frequency, RP0 if not overclocking */
999 u8 min_freq; /* AKA RPn. Minimum frequency */
1000 u8 efficient_freq; /* AKA RPe. Pre-determined balanced frequency */
1001 u8 rp1_freq; /* "less than" RP0 power/freqency */
1002 u8 rp0_freq; /* Non-overclocked max frequency. */
917 1003
918 int last_adj; 1004 int last_adj;
919 enum { LOW_POWER, BETWEEN, HIGH_POWER } power; 1005 enum { LOW_POWER, BETWEEN, HIGH_POWER } power;
@@ -953,6 +1039,36 @@ struct intel_ilk_power_mgmt {
953 struct drm_i915_gem_object *renderctx; 1039 struct drm_i915_gem_object *renderctx;
954}; 1040};
955 1041
1042struct drm_i915_private;
1043struct i915_power_well;
1044
1045struct i915_power_well_ops {
1046 /*
1047 * Synchronize the well's hw state to match the current sw state, for
1048 * example enable/disable it based on the current refcount. Called
1049 * during driver init and resume time, possibly after first calling
1050 * the enable/disable handlers.
1051 */
1052 void (*sync_hw)(struct drm_i915_private *dev_priv,
1053 struct i915_power_well *power_well);
1054 /*
1055 * Enable the well and resources that depend on it (for example
1056 * interrupts located on the well). Called after the 0->1 refcount
1057 * transition.
1058 */
1059 void (*enable)(struct drm_i915_private *dev_priv,
1060 struct i915_power_well *power_well);
1061 /*
1062 * Disable the well and resources that depend on it. Called after
1063 * the 1->0 refcount transition.
1064 */
1065 void (*disable)(struct drm_i915_private *dev_priv,
1066 struct i915_power_well *power_well);
1067 /* Returns the hw enabled state. */
1068 bool (*is_enabled)(struct drm_i915_private *dev_priv,
1069 struct i915_power_well *power_well);
1070};
1071
956/* Power well structure for haswell */ 1072/* Power well structure for haswell */
957struct i915_power_well { 1073struct i915_power_well {
958 const char *name; 1074 const char *name;
@@ -960,11 +1076,8 @@ struct i915_power_well {
960 /* power well enable/disable usage count */ 1076 /* power well enable/disable usage count */
961 int count; 1077 int count;
962 unsigned long domains; 1078 unsigned long domains;
963 void *data; 1079 unsigned long data;
964 void (*set)(struct drm_device *dev, struct i915_power_well *power_well, 1080 const struct i915_power_well_ops *ops;
965 bool enable);
966 bool (*is_enabled)(struct drm_device *dev,
967 struct i915_power_well *power_well);
968}; 1081};
969 1082
970struct i915_power_domains { 1083struct i915_power_domains {
@@ -1061,6 +1174,14 @@ struct i915_gem_mm {
1061 */ 1174 */
1062 bool interruptible; 1175 bool interruptible;
1063 1176
1177 /**
1178 * Is the GPU currently considered idle, or busy executing userspace
1179 * requests? Whilst idle, we attempt to power down the hardware and
1180 * display clocks. In order to reduce the effect on performance, there
1181 * is a slight delay before we do so.
1182 */
1183 bool busy;
1184
1064 /** Bit 6 swizzling required for X tiling */ 1185 /** Bit 6 swizzling required for X tiling */
1065 uint32_t bit_6_swizzle_x; 1186 uint32_t bit_6_swizzle_x;
1066 /** Bit 6 swizzling required for Y tiling */ 1187 /** Bit 6 swizzling required for Y tiling */
@@ -1226,44 +1347,19 @@ struct ilk_wm_values {
1226}; 1347};
1227 1348
1228/* 1349/*
1229 * This struct tracks the state needed for the Package C8+ feature. 1350 * This struct helps tracking the state needed for runtime PM, which puts the
1230 * 1351 * device in PCI D3 state. Notice that when this happens, nothing on the
1231 * Package states C8 and deeper are really deep PC states that can only be 1352 * graphics device works, even register access, so we don't get interrupts nor
1232 * reached when all the devices on the system allow it, so even if the graphics 1353 * anything else.
1233 * device allows PC8+, it doesn't mean the system will actually get to these
1234 * states.
1235 *
1236 * Our driver only allows PC8+ when all the outputs are disabled, the power well
1237 * is disabled and the GPU is idle. When these conditions are met, we manually
1238 * do the other conditions: disable the interrupts, clocks and switch LCPLL
1239 * refclk to Fclk.
1240 *
1241 * When we really reach PC8 or deeper states (not just when we allow it) we lose
1242 * the state of some registers, so when we come back from PC8+ we need to
1243 * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
1244 * need to take care of the registers kept by RC6.
1245 * 1354 *
1246 * The interrupt disabling is part of the requirements. We can only leave the 1355 * Every piece of our code that needs to actually touch the hardware needs to
1247 * PCH HPD interrupts enabled. If we're in PC8+ and we get another interrupt we 1356 * either call intel_runtime_pm_get or call intel_display_power_get with the
1248 * can lock the machine. 1357 * appropriate power domain.
1249 * 1358 *
1250 * Ideally every piece of our code that needs PC8+ disabled would call 1359 * Our driver uses the autosuspend delay feature, which means we'll only really
1251 * hsw_disable_package_c8, which would increment disable_count and prevent the 1360 * suspend if we stay with zero refcount for a certain amount of time. The
1252 * system from reaching PC8+. But we don't have a symmetric way to do this for 1361 * default value is currently very conservative (see intel_init_runtime_pm), but
1253 * everything, so we have the requirements_met and gpu_idle variables. When we 1362 * it can be changed with the standard runtime PM files from sysfs.
1254 * switch requirements_met or gpu_idle to true we decrease disable_count, and
1255 * increase it in the opposite case. The requirements_met variable is true when
1256 * all the CRTCs, encoders and the power well are disabled. The gpu_idle
1257 * variable is true when the GPU is idle.
1258 *
1259 * In addition to everything, we only actually enable PC8+ if disable_count
1260 * stays at zero for at least some seconds. This is implemented with the
1261 * enable_work variable. We do this so we don't enable/disable PC8 dozens of
1262 * consecutive times when all screens are disabled and some background app
1263 * queries the state of our connectors, or we have some application constantly
1264 * waking up to use the GPU. Only after the enable_work function actually
1265 * enables PC8+ the "enable" variable will become true, which means that it can
1266 * be false even if disable_count is 0.
1267 * 1363 *
1268 * The irqs_disabled variable becomes true exactly after we disable the IRQs and 1364 * The irqs_disabled variable becomes true exactly after we disable the IRQs and
1269 * goes back to false exactly before we reenable the IRQs. We use this variable 1365 * goes back to false exactly before we reenable the IRQs. We use this variable
@@ -1273,17 +1369,11 @@ struct ilk_wm_values {
1273 * inside struct regsave so when we restore the IRQs they will contain the 1369 * inside struct regsave so when we restore the IRQs they will contain the
1274 * latest expected values. 1370 * latest expected values.
1275 * 1371 *
1276 * For more, read "Display Sequences for Package C8" on our documentation. 1372 * For more, read the Documentation/power/runtime_pm.txt.
1277 */ 1373 */
1278struct i915_package_c8 { 1374struct i915_runtime_pm {
1279 bool requirements_met; 1375 bool suspended;
1280 bool gpu_idle;
1281 bool irqs_disabled; 1376 bool irqs_disabled;
1282 /* Only true after the delayed work task actually enables it. */
1283 bool enabled;
1284 int disable_count;
1285 struct mutex lock;
1286 struct delayed_work enable_work;
1287 1377
1288 struct { 1378 struct {
1289 uint32_t deimr; 1379 uint32_t deimr;
@@ -1294,10 +1384,6 @@ struct i915_package_c8 {
1294 } regsave; 1384 } regsave;
1295}; 1385};
1296 1386
1297struct i915_runtime_pm {
1298 bool suspended;
1299};
1300
1301enum intel_pipe_crc_source { 1387enum intel_pipe_crc_source {
1302 INTEL_PIPE_CRC_SOURCE_NONE, 1388 INTEL_PIPE_CRC_SOURCE_NONE,
1303 INTEL_PIPE_CRC_SOURCE_PLANE1, 1389 INTEL_PIPE_CRC_SOURCE_PLANE1,
@@ -1332,7 +1418,7 @@ typedef struct drm_i915_private {
1332 struct drm_device *dev; 1418 struct drm_device *dev;
1333 struct kmem_cache *slab; 1419 struct kmem_cache *slab;
1334 1420
1335 const struct intel_device_info *info; 1421 const struct intel_device_info info;
1336 1422
1337 int relative_constants_mode; 1423 int relative_constants_mode;
1338 1424
@@ -1361,11 +1447,11 @@ typedef struct drm_i915_private {
1361 drm_dma_handle_t *status_page_dmah; 1447 drm_dma_handle_t *status_page_dmah;
1362 struct resource mch_res; 1448 struct resource mch_res;
1363 1449
1364 atomic_t irq_received;
1365
1366 /* protects the irq masks */ 1450 /* protects the irq masks */
1367 spinlock_t irq_lock; 1451 spinlock_t irq_lock;
1368 1452
1453 bool display_irqs_enabled;
1454
1369 /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */ 1455 /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
1370 struct pm_qos_request pm_qos; 1456 struct pm_qos_request pm_qos;
1371 1457
@@ -1379,6 +1465,8 @@ typedef struct drm_i915_private {
1379 }; 1465 };
1380 u32 gt_irq_mask; 1466 u32 gt_irq_mask;
1381 u32 pm_irq_mask; 1467 u32 pm_irq_mask;
1468 u32 pm_rps_events;
1469 u32 pipestat_irq_mask[I915_MAX_PIPES];
1382 1470
1383 struct work_struct hotplug_work; 1471 struct work_struct hotplug_work;
1384 bool enable_hotplug_processing; 1472 bool enable_hotplug_processing;
@@ -1394,8 +1482,6 @@ typedef struct drm_i915_private {
1394 u32 hpd_event_bits; 1482 u32 hpd_event_bits;
1395 struct timer_list hotplug_reenable_timer; 1483 struct timer_list hotplug_reenable_timer;
1396 1484
1397 int num_plane;
1398
1399 struct i915_fbc fbc; 1485 struct i915_fbc fbc;
1400 struct intel_opregion opregion; 1486 struct intel_opregion opregion;
1401 struct intel_vbt_data vbt; 1487 struct intel_vbt_data vbt;
@@ -1445,8 +1531,8 @@ typedef struct drm_i915_private {
1445 1531
1446 struct sdvo_device_mapping sdvo_mappings[2]; 1532 struct sdvo_device_mapping sdvo_mappings[2];
1447 1533
1448 struct drm_crtc *plane_to_crtc_mapping[3]; 1534 struct drm_crtc *plane_to_crtc_mapping[I915_MAX_PIPES];
1449 struct drm_crtc *pipe_to_crtc_mapping[3]; 1535 struct drm_crtc *pipe_to_crtc_mapping[I915_MAX_PIPES];
1450 wait_queue_head_t pending_flip_queue; 1536 wait_queue_head_t pending_flip_queue;
1451 1537
1452#ifdef CONFIG_DEBUG_FS 1538#ifdef CONFIG_DEBUG_FS
@@ -1506,6 +1592,7 @@ typedef struct drm_i915_private {
1506 1592
1507 u32 fdi_rx_config; 1593 u32 fdi_rx_config;
1508 1594
1595 u32 suspend_count;
1509 struct i915_suspend_saved_registers regfile; 1596 struct i915_suspend_saved_registers regfile;
1510 1597
1511 struct { 1598 struct {
@@ -1525,8 +1612,6 @@ typedef struct drm_i915_private {
1525 struct ilk_wm_values hw; 1612 struct ilk_wm_values hw;
1526 } wm; 1613 } wm;
1527 1614
1528 struct i915_package_c8 pc8;
1529
1530 struct i915_runtime_pm pm; 1615 struct i915_runtime_pm pm;
1531 1616
1532 /* Old dri1 support infrastructure, beware the dragons ya fools entering 1617 /* Old dri1 support infrastructure, beware the dragons ya fools entering
@@ -1627,18 +1712,6 @@ struct drm_i915_gem_object {
1627 */ 1712 */
1628 unsigned int fence_dirty:1; 1713 unsigned int fence_dirty:1;
1629 1714
1630 /** How many users have pinned this object in GTT space. The following
1631 * users can each hold at most one reference: pwrite/pread, pin_ioctl
1632 * (via user_pin_count), execbuffer (objects are not allowed multiple
1633 * times for the same batchbuffer), and the framebuffer code. When
1634 * switching/pageflipping, the framebuffer code has at most two buffers
1635 * pinned per crtc.
1636 *
1637 * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
1638 * bits with absolutely no headroom. So use 4 bits. */
1639 unsigned int pin_count:4;
1640#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
1641
1642 /** 1715 /**
1643 * Is the object at the current location in the gtt mappable and 1716 * Is the object at the current location in the gtt mappable and
1644 * fenceable? Used to avoid costly recalculations. 1717 * fenceable? Used to avoid costly recalculations.
@@ -1697,7 +1770,6 @@ struct drm_i915_gem_object {
1697 /** for phy allocated objects */ 1770 /** for phy allocated objects */
1698 struct drm_i915_gem_phys_object *phys_obj; 1771 struct drm_i915_gem_phys_object *phys_obj;
1699}; 1772};
1700#define to_gem_object(obj) (&((struct drm_i915_gem_object *)(obj))->base)
1701 1773
1702#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base) 1774#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
1703 1775
@@ -1743,6 +1815,7 @@ struct drm_i915_gem_request {
1743 1815
1744struct drm_i915_file_private { 1816struct drm_i915_file_private {
1745 struct drm_i915_private *dev_priv; 1817 struct drm_i915_private *dev_priv;
1818 struct drm_file *file;
1746 1819
1747 struct { 1820 struct {
1748 spinlock_t lock; 1821 spinlock_t lock;
@@ -1751,11 +1824,95 @@ struct drm_i915_file_private {
1751 } mm; 1824 } mm;
1752 struct idr context_idr; 1825 struct idr context_idr;
1753 1826
1754 struct i915_ctx_hang_stats hang_stats; 1827 struct i915_hw_context *private_default_ctx;
1755 atomic_t rps_wait_boost; 1828 atomic_t rps_wait_boost;
1756}; 1829};
1757 1830
1758#define INTEL_INFO(dev) (to_i915(dev)->info) 1831/*
1832 * A command that requires special handling by the command parser.
1833 */
1834struct drm_i915_cmd_descriptor {
1835 /*
1836 * Flags describing how the command parser processes the command.
1837 *
1838 * CMD_DESC_FIXED: The command has a fixed length if this is set,
1839 * a length mask if not set
1840 * CMD_DESC_SKIP: The command is allowed but does not follow the
1841 * standard length encoding for the opcode range in
1842 * which it falls
1843 * CMD_DESC_REJECT: The command is never allowed
1844 * CMD_DESC_REGISTER: The command should be checked against the
1845 * register whitelist for the appropriate ring
1846 * CMD_DESC_MASTER: The command is allowed if the submitting process
1847 * is the DRM master
1848 */
1849 u32 flags;
1850#define CMD_DESC_FIXED (1<<0)
1851#define CMD_DESC_SKIP (1<<1)
1852#define CMD_DESC_REJECT (1<<2)
1853#define CMD_DESC_REGISTER (1<<3)
1854#define CMD_DESC_BITMASK (1<<4)
1855#define CMD_DESC_MASTER (1<<5)
1856
1857 /*
1858 * The command's unique identification bits and the bitmask to get them.
1859 * This isn't strictly the opcode field as defined in the spec and may
1860 * also include type, subtype, and/or subop fields.
1861 */
1862 struct {
1863 u32 value;
1864 u32 mask;
1865 } cmd;
1866
1867 /*
1868 * The command's length. The command is either fixed length (i.e. does
1869 * not include a length field) or has a length field mask. The flag
1870 * CMD_DESC_FIXED indicates a fixed length. Otherwise, the command has
1871 * a length mask. All command entries in a command table must include
1872 * length information.
1873 */
1874 union {
1875 u32 fixed;
1876 u32 mask;
1877 } length;
1878
1879 /*
1880 * Describes where to find a register address in the command to check
1881 * against the ring's register whitelist. Only valid if flags has the
1882 * CMD_DESC_REGISTER bit set.
1883 */
1884 struct {
1885 u32 offset;
1886 u32 mask;
1887 } reg;
1888
1889#define MAX_CMD_DESC_BITMASKS 3
1890 /*
1891 * Describes command checks where a particular dword is masked and
1892 * compared against an expected value. If the command does not match
1893 * the expected value, the parser rejects it. Only valid if flags has
1894 * the CMD_DESC_BITMASK bit set. Only entries where mask is non-zero
1895 * are valid.
1896 */
1897 struct {
1898 u32 offset;
1899 u32 mask;
1900 u32 expected;
1901 } bits[MAX_CMD_DESC_BITMASKS];
1902};
1903
1904/*
1905 * A table of commands requiring special handling by the command parser.
1906 *
1907 * Each ring has an array of tables. Each table consists of an array of command
1908 * descriptors, which must be sorted with command opcodes in ascending order.
1909 */
1910struct drm_i915_cmd_table {
1911 const struct drm_i915_cmd_descriptor *table;
1912 int count;
1913};
1914
1915#define INTEL_INFO(dev) (&to_i915(dev)->info)
1759 1916
1760#define IS_I830(dev) ((dev)->pdev->device == 0x3577) 1917#define IS_I830(dev) ((dev)->pdev->device == 0x3577)
1761#define IS_845G(dev) ((dev)->pdev->device == 0x2562) 1918#define IS_845G(dev) ((dev)->pdev->device == 0x2562)
@@ -1824,7 +1981,11 @@ struct drm_i915_file_private {
1824#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) 1981#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
1825 1982
1826#define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6) 1983#define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6)
1827#define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >=6 && !IS_VALLEYVIEW(dev)) 1984#define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >= 6 && !IS_VALLEYVIEW(dev))
1985#define HAS_PPGTT(dev) (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev) \
1986 && !IS_BROADWELL(dev))
1987#define USES_PPGTT(dev) intel_enable_ppgtt(dev, false)
1988#define USES_FULL_PPGTT(dev) intel_enable_ppgtt(dev, true)
1828 1989
1829#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay) 1990#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay)
1830#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical) 1991#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical)
@@ -1887,32 +2048,40 @@ struct drm_i915_file_private {
1887 2048
1888extern const struct drm_ioctl_desc i915_ioctls[]; 2049extern const struct drm_ioctl_desc i915_ioctls[];
1889extern int i915_max_ioctl; 2050extern int i915_max_ioctl;
1890extern unsigned int i915_fbpercrtc __always_unused;
1891extern int i915_panel_ignore_lid __read_mostly;
1892extern unsigned int i915_powersave __read_mostly;
1893extern int i915_semaphores __read_mostly;
1894extern unsigned int i915_lvds_downclock __read_mostly;
1895extern int i915_lvds_channel_mode __read_mostly;
1896extern int i915_panel_use_ssc __read_mostly;
1897extern int i915_vbt_sdvo_panel_type __read_mostly;
1898extern int i915_enable_rc6 __read_mostly;
1899extern int i915_enable_fbc __read_mostly;
1900extern bool i915_enable_hangcheck __read_mostly;
1901extern int i915_enable_ppgtt __read_mostly;
1902extern int i915_enable_psr __read_mostly;
1903extern unsigned int i915_preliminary_hw_support __read_mostly;
1904extern int i915_disable_power_well __read_mostly;
1905extern int i915_enable_ips __read_mostly;
1906extern bool i915_fastboot __read_mostly;
1907extern int i915_enable_pc8 __read_mostly;
1908extern int i915_pc8_timeout __read_mostly;
1909extern bool i915_prefault_disable __read_mostly;
1910 2051
1911extern int i915_suspend(struct drm_device *dev, pm_message_t state); 2052extern int i915_suspend(struct drm_device *dev, pm_message_t state);
1912extern int i915_resume(struct drm_device *dev); 2053extern int i915_resume(struct drm_device *dev);
1913extern int i915_master_create(struct drm_device *dev, struct drm_master *master); 2054extern int i915_master_create(struct drm_device *dev, struct drm_master *master);
1914extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master); 2055extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master);
1915 2056
2057/* i915_params.c */
2058struct i915_params {
2059 int modeset;
2060 int panel_ignore_lid;
2061 unsigned int powersave;
2062 int semaphores;
2063 unsigned int lvds_downclock;
2064 int lvds_channel_mode;
2065 int panel_use_ssc;
2066 int vbt_sdvo_panel_type;
2067 int enable_rc6;
2068 int enable_fbc;
2069 int enable_ppgtt;
2070 int enable_psr;
2071 unsigned int preliminary_hw_support;
2072 int disable_power_well;
2073 int enable_ips;
2074 int invert_brightness;
2075 int enable_cmd_parser;
2076 /* leave bools at the end to not create holes */
2077 bool enable_hangcheck;
2078 bool fastboot;
2079 bool prefault_disable;
2080 bool reset;
2081 bool disable_display;
2082};
2083extern struct i915_params i915 __read_mostly;
2084
1916 /* i915_dma.c */ 2085 /* i915_dma.c */
1917void i915_update_dri1_breadcrumb(struct drm_device *dev); 2086void i915_update_dri1_breadcrumb(struct drm_device *dev);
1918extern void i915_kernel_lost_context(struct drm_device * dev); 2087extern void i915_kernel_lost_context(struct drm_device * dev);
@@ -1943,8 +2112,12 @@ extern void intel_console_resume(struct work_struct *work);
1943 2112
1944/* i915_irq.c */ 2113/* i915_irq.c */
1945void i915_queue_hangcheck(struct drm_device *dev); 2114void i915_queue_hangcheck(struct drm_device *dev);
1946void i915_handle_error(struct drm_device *dev, bool wedged); 2115__printf(3, 4)
2116void i915_handle_error(struct drm_device *dev, bool wedged,
2117 const char *fmt, ...);
1947 2118
2119void gen6_set_pm_mask(struct drm_i915_private *dev_priv, u32 pm_iir,
2120 int new_delay);
1948extern void intel_irq_init(struct drm_device *dev); 2121extern void intel_irq_init(struct drm_device *dev);
1949extern void intel_hpd_init(struct drm_device *dev); 2122extern void intel_hpd_init(struct drm_device *dev);
1950 2123
@@ -1955,10 +2128,15 @@ extern void intel_uncore_check_errors(struct drm_device *dev);
1955extern void intel_uncore_fini(struct drm_device *dev); 2128extern void intel_uncore_fini(struct drm_device *dev);
1956 2129
1957void 2130void
1958i915_enable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask); 2131i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
2132 u32 status_mask);
1959 2133
1960void 2134void
1961i915_disable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask); 2135i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
2136 u32 status_mask);
2137
2138void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv);
2139void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv);
1962 2140
1963/* i915_gem.c */ 2141/* i915_gem.c */
1964int i915_gem_init_ioctl(struct drm_device *dev, void *data, 2142int i915_gem_init_ioctl(struct drm_device *dev, void *data,
@@ -2014,22 +2192,27 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
2014 const struct drm_i915_gem_object_ops *ops); 2192 const struct drm_i915_gem_object_ops *ops);
2015struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, 2193struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
2016 size_t size); 2194 size_t size);
2195void i915_init_vm(struct drm_i915_private *dev_priv,
2196 struct i915_address_space *vm);
2017void i915_gem_free_object(struct drm_gem_object *obj); 2197void i915_gem_free_object(struct drm_gem_object *obj);
2018void i915_gem_vma_destroy(struct i915_vma *vma); 2198void i915_gem_vma_destroy(struct i915_vma *vma);
2019 2199
2200#define PIN_MAPPABLE 0x1
2201#define PIN_NONBLOCK 0x2
2202#define PIN_GLOBAL 0x4
2020int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj, 2203int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
2021 struct i915_address_space *vm, 2204 struct i915_address_space *vm,
2022 uint32_t alignment, 2205 uint32_t alignment,
2023 bool map_and_fenceable, 2206 unsigned flags);
2024 bool nonblocking);
2025void i915_gem_object_unpin(struct drm_i915_gem_object *obj);
2026int __must_check i915_vma_unbind(struct i915_vma *vma); 2207int __must_check i915_vma_unbind(struct i915_vma *vma);
2027int __must_check i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj);
2028int i915_gem_object_put_pages(struct drm_i915_gem_object *obj); 2208int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
2029void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv); 2209void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv);
2030void i915_gem_release_mmap(struct drm_i915_gem_object *obj); 2210void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
2031void i915_gem_lastclose(struct drm_device *dev); 2211void i915_gem_lastclose(struct drm_device *dev);
2032 2212
2213int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
2214 int *needs_clflush);
2215
2033int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj); 2216int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
2034static inline struct page *i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n) 2217static inline struct page *i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
2035{ 2218{
@@ -2096,8 +2279,10 @@ i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
2096 } 2279 }
2097} 2280}
2098 2281
2282struct drm_i915_gem_request *
2283i915_gem_find_active_request(struct intel_ring_buffer *ring);
2284
2099bool i915_gem_retire_requests(struct drm_device *dev); 2285bool i915_gem_retire_requests(struct drm_device *dev);
2100void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring);
2101int __must_check i915_gem_check_wedge(struct i915_gpu_error *error, 2286int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
2102 bool interruptible); 2287 bool interruptible);
2103static inline bool i915_reset_in_progress(struct i915_gpu_error *error) 2288static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
@@ -2186,6 +2371,13 @@ i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
2186 struct i915_address_space *vm); 2371 struct i915_address_space *vm);
2187 2372
2188struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj); 2373struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj);
2374static inline bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj) {
2375 struct i915_vma *vma;
2376 list_for_each_entry(vma, &obj->vma_list, vma_link)
2377 if (vma->pin_count > 0)
2378 return true;
2379 return false;
2380}
2189 2381
2190/* Some GGTT VM helpers */ 2382/* Some GGTT VM helpers */
2191#define obj_to_ggtt(obj) \ 2383#define obj_to_ggtt(obj) \
@@ -2217,54 +2409,69 @@ i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj)
2217static inline int __must_check 2409static inline int __must_check
2218i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj, 2410i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj,
2219 uint32_t alignment, 2411 uint32_t alignment,
2220 bool map_and_fenceable, 2412 unsigned flags)
2221 bool nonblocking)
2222{ 2413{
2223 return i915_gem_object_pin(obj, obj_to_ggtt(obj), alignment, 2414 return i915_gem_object_pin(obj, obj_to_ggtt(obj), alignment, flags | PIN_GLOBAL);
2224 map_and_fenceable, nonblocking);
2225} 2415}
2226 2416
2417static inline int
2418i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj)
2419{
2420 return i915_vma_unbind(i915_gem_obj_to_ggtt(obj));
2421}
2422
2423void i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj);
2424
2227/* i915_gem_context.c */ 2425/* i915_gem_context.c */
2426#define ctx_to_ppgtt(ctx) container_of((ctx)->vm, struct i915_hw_ppgtt, base)
2228int __must_check i915_gem_context_init(struct drm_device *dev); 2427int __must_check i915_gem_context_init(struct drm_device *dev);
2229void i915_gem_context_fini(struct drm_device *dev); 2428void i915_gem_context_fini(struct drm_device *dev);
2429void i915_gem_context_reset(struct drm_device *dev);
2430int i915_gem_context_open(struct drm_device *dev, struct drm_file *file);
2431int i915_gem_context_enable(struct drm_i915_private *dev_priv);
2230void i915_gem_context_close(struct drm_device *dev, struct drm_file *file); 2432void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
2231int i915_switch_context(struct intel_ring_buffer *ring, 2433int i915_switch_context(struct intel_ring_buffer *ring,
2232 struct drm_file *file, int to_id); 2434 struct drm_file *file, struct i915_hw_context *to);
2435struct i915_hw_context *
2436i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id);
2233void i915_gem_context_free(struct kref *ctx_ref); 2437void i915_gem_context_free(struct kref *ctx_ref);
2234static inline void i915_gem_context_reference(struct i915_hw_context *ctx) 2438static inline void i915_gem_context_reference(struct i915_hw_context *ctx)
2235{ 2439{
2236 kref_get(&ctx->ref); 2440 if (ctx->obj && HAS_HW_CONTEXTS(ctx->obj->base.dev))
2441 kref_get(&ctx->ref);
2237} 2442}
2238 2443
2239static inline void i915_gem_context_unreference(struct i915_hw_context *ctx) 2444static inline void i915_gem_context_unreference(struct i915_hw_context *ctx)
2240{ 2445{
2241 kref_put(&ctx->ref, i915_gem_context_free); 2446 if (ctx->obj && HAS_HW_CONTEXTS(ctx->obj->base.dev))
2447 kref_put(&ctx->ref, i915_gem_context_free);
2448}
2449
2450static inline bool i915_gem_context_is_default(const struct i915_hw_context *c)
2451{
2452 return c->id == DEFAULT_CONTEXT_ID;
2242} 2453}
2243 2454
2244struct i915_ctx_hang_stats * __must_check
2245i915_gem_context_get_hang_stats(struct drm_device *dev,
2246 struct drm_file *file,
2247 u32 id);
2248int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, 2455int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
2249 struct drm_file *file); 2456 struct drm_file *file);
2250int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, 2457int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
2251 struct drm_file *file); 2458 struct drm_file *file);
2252 2459
2253/* i915_gem_gtt.c */ 2460/* i915_gem_evict.c */
2254void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev); 2461int __must_check i915_gem_evict_something(struct drm_device *dev,
2255void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt, 2462 struct i915_address_space *vm,
2256 struct drm_i915_gem_object *obj, 2463 int min_size,
2257 enum i915_cache_level cache_level); 2464 unsigned alignment,
2258void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt, 2465 unsigned cache_level,
2259 struct drm_i915_gem_object *obj); 2466 unsigned flags);
2467int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
2468int i915_gem_evict_everything(struct drm_device *dev);
2260 2469
2470/* i915_gem_gtt.c */
2261void i915_check_and_clear_faults(struct drm_device *dev); 2471void i915_check_and_clear_faults(struct drm_device *dev);
2262void i915_gem_suspend_gtt_mappings(struct drm_device *dev); 2472void i915_gem_suspend_gtt_mappings(struct drm_device *dev);
2263void i915_gem_restore_gtt_mappings(struct drm_device *dev); 2473void i915_gem_restore_gtt_mappings(struct drm_device *dev);
2264int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj); 2474int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
2265void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
2266 enum i915_cache_level cache_level);
2267void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj);
2268void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj); 2475void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
2269void i915_gem_init_global_gtt(struct drm_device *dev); 2476void i915_gem_init_global_gtt(struct drm_device *dev);
2270void i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start, 2477void i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start,
@@ -2275,18 +2482,8 @@ static inline void i915_gem_chipset_flush(struct drm_device *dev)
2275 if (INTEL_INFO(dev)->gen < 6) 2482 if (INTEL_INFO(dev)->gen < 6)
2276 intel_gtt_chipset_flush(); 2483 intel_gtt_chipset_flush();
2277} 2484}
2278 2485int i915_gem_init_ppgtt(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt);
2279 2486bool intel_enable_ppgtt(struct drm_device *dev, bool full);
2280/* i915_gem_evict.c */
2281int __must_check i915_gem_evict_something(struct drm_device *dev,
2282 struct i915_address_space *vm,
2283 int min_size,
2284 unsigned alignment,
2285 unsigned cache_level,
2286 bool mappable,
2287 bool nonblock);
2288int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
2289int i915_gem_evict_everything(struct drm_device *dev);
2290 2487
2291/* i915_gem_stolen.c */ 2488/* i915_gem_stolen.c */
2292int i915_gem_init_stolen(struct drm_device *dev); 2489int i915_gem_init_stolen(struct drm_device *dev);
@@ -2305,7 +2502,7 @@ void i915_gem_object_release_stolen(struct drm_i915_gem_object *obj);
2305/* i915_gem_tiling.c */ 2502/* i915_gem_tiling.c */
2306static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj) 2503static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
2307{ 2504{
2308 drm_i915_private_t *dev_priv = obj->base.dev->dev_private; 2505 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2309 2506
2310 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 && 2507 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
2311 obj->tiling_mode != I915_TILING_NONE; 2508 obj->tiling_mode != I915_TILING_NONE;
@@ -2343,7 +2540,8 @@ static inline void i915_error_state_buf_release(
2343{ 2540{
2344 kfree(eb->buf); 2541 kfree(eb->buf);
2345} 2542}
2346void i915_capture_error_state(struct drm_device *dev); 2543void i915_capture_error_state(struct drm_device *dev, bool wedge,
2544 const char *error_msg);
2347void i915_error_state_get(struct drm_device *dev, 2545void i915_error_state_get(struct drm_device *dev,
2348 struct i915_error_state_file_priv *error_priv); 2546 struct i915_error_state_file_priv *error_priv);
2349void i915_error_state_put(struct i915_error_state_file_priv *error_priv); 2547void i915_error_state_put(struct i915_error_state_file_priv *error_priv);
@@ -2352,6 +2550,14 @@ void i915_destroy_error_state(struct drm_device *dev);
2352void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone); 2550void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone);
2353const char *i915_cache_level_str(int type); 2551const char *i915_cache_level_str(int type);
2354 2552
2553/* i915_cmd_parser.c */
2554void i915_cmd_parser_init_ring(struct intel_ring_buffer *ring);
2555bool i915_needs_cmd_parser(struct intel_ring_buffer *ring);
2556int i915_parse_cmds(struct intel_ring_buffer *ring,
2557 struct drm_i915_gem_object *batch_obj,
2558 u32 batch_start_offset,
2559 bool is_master);
2560
2355/* i915_suspend.c */ 2561/* i915_suspend.c */
2356extern int i915_save_state(struct drm_device *dev); 2562extern int i915_save_state(struct drm_device *dev);
2357extern int i915_restore_state(struct drm_device *dev); 2563extern int i915_restore_state(struct drm_device *dev);
@@ -2425,10 +2631,12 @@ extern void intel_modeset_suspend_hw(struct drm_device *dev);
2425extern void intel_modeset_init(struct drm_device *dev); 2631extern void intel_modeset_init(struct drm_device *dev);
2426extern void intel_modeset_gem_init(struct drm_device *dev); 2632extern void intel_modeset_gem_init(struct drm_device *dev);
2427extern void intel_modeset_cleanup(struct drm_device *dev); 2633extern void intel_modeset_cleanup(struct drm_device *dev);
2634extern void intel_connector_unregister(struct intel_connector *);
2428extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); 2635extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
2429extern void intel_modeset_setup_hw_state(struct drm_device *dev, 2636extern void intel_modeset_setup_hw_state(struct drm_device *dev,
2430 bool force_restore); 2637 bool force_restore);
2431extern void i915_redisable_vga(struct drm_device *dev); 2638extern void i915_redisable_vga(struct drm_device *dev);
2639extern void i915_redisable_vga_power_on(struct drm_device *dev);
2432extern bool intel_fbc_enabled(struct drm_device *dev); 2640extern bool intel_fbc_enabled(struct drm_device *dev);
2433extern void intel_disable_fbc(struct drm_device *dev); 2641extern void intel_disable_fbc(struct drm_device *dev);
2434extern bool ironlake_set_drps(struct drm_device *dev, u8 val); 2642extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
@@ -2463,6 +2671,7 @@ extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
2463 */ 2671 */
2464void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine); 2672void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine);
2465void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine); 2673void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine);
2674void assert_force_wake_inactive(struct drm_i915_private *dev_priv);
2466 2675
2467int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val); 2676int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val);
2468int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val); 2677int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val);
@@ -2525,9 +2734,26 @@ void vlv_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine);
2525#define I915_READ_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), false) 2734#define I915_READ_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), false)
2526#define I915_WRITE_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), false) 2735#define I915_WRITE_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), false)
2527 2736
2737/* Be very careful with read/write 64-bit values. On 32-bit machines, they
2738 * will be implemented using 2 32-bit writes in an arbitrary order with
2739 * an arbitrary delay between them. This can cause the hardware to
2740 * act upon the intermediate value, possibly leading to corruption and
2741 * machine death. You have been warned.
2742 */
2528#define I915_WRITE64(reg, val) dev_priv->uncore.funcs.mmio_writeq(dev_priv, (reg), (val), true) 2743#define I915_WRITE64(reg, val) dev_priv->uncore.funcs.mmio_writeq(dev_priv, (reg), (val), true)
2529#define I915_READ64(reg) dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true) 2744#define I915_READ64(reg) dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true)
2530 2745
2746#define I915_READ64_2x32(lower_reg, upper_reg) ({ \
2747 u32 upper = I915_READ(upper_reg); \
2748 u32 lower = I915_READ(lower_reg); \
2749 u32 tmp = I915_READ(upper_reg); \
2750 if (upper != tmp) { \
2751 upper = tmp; \
2752 lower = I915_READ(lower_reg); \
2753 WARN_ON(I915_READ(upper_reg) != upper); \
2754 } \
2755 (u64)upper << 32 | lower; })
2756
2531#define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg) 2757#define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg)
2532#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg) 2758#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg)
2533 2759
@@ -2566,4 +2792,31 @@ timespec_to_jiffies_timeout(const struct timespec *value)
2566 return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1); 2792 return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
2567} 2793}
2568 2794
2795/*
2796 * If you need to wait X milliseconds between events A and B, but event B
2797 * doesn't happen exactly after event A, you record the timestamp (jiffies) of
2798 * when event A happened, then just before event B you call this function and
2799 * pass the timestamp as the first argument, and X as the second argument.
2800 */
2801static inline void
2802wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
2803{
2804 unsigned long target_jiffies, tmp_jiffies, remaining_jiffies;
2805
2806 /*
2807 * Don't re-read the value of "jiffies" every time since it may change
2808 * behind our back and break the math.
2809 */
2810 tmp_jiffies = jiffies;
2811 target_jiffies = timestamp_jiffies +
2812 msecs_to_jiffies_timeout(to_wait_ms);
2813
2814 if (time_after(target_jiffies, tmp_jiffies)) {
2815 remaining_jiffies = target_jiffies - tmp_jiffies;
2816 while (remaining_jiffies)
2817 remaining_jiffies =
2818 schedule_timeout_uninterruptible(remaining_jiffies);
2819 }
2820}
2821
2569#endif 2822#endif
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 00c836154725..6370a761d137 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -43,12 +43,6 @@ static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *o
43static __must_check int 43static __must_check int
44i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, 44i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
45 bool readonly); 45 bool readonly);
46static __must_check int
47i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
48 struct i915_address_space *vm,
49 unsigned alignment,
50 bool map_and_fenceable,
51 bool nonblocking);
52static int i915_gem_phys_pwrite(struct drm_device *dev, 46static int i915_gem_phys_pwrite(struct drm_device *dev,
53 struct drm_i915_gem_object *obj, 47 struct drm_i915_gem_object *obj,
54 struct drm_i915_gem_pwrite *args, 48 struct drm_i915_gem_pwrite *args,
@@ -67,6 +61,7 @@ static unsigned long i915_gem_inactive_scan(struct shrinker *shrinker,
67static unsigned long i915_gem_purge(struct drm_i915_private *dev_priv, long target); 61static unsigned long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
68static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv); 62static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
69static void i915_gem_object_truncate(struct drm_i915_gem_object *obj); 63static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
64static void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring);
70 65
71static bool cpu_cache_is_coherent(struct drm_device *dev, 66static bool cpu_cache_is_coherent(struct drm_device *dev,
72 enum i915_cache_level level) 67 enum i915_cache_level level)
@@ -204,7 +199,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
204 pinned = 0; 199 pinned = 0;
205 mutex_lock(&dev->struct_mutex); 200 mutex_lock(&dev->struct_mutex);
206 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) 201 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
207 if (obj->pin_count) 202 if (i915_gem_obj_is_pinned(obj))
208 pinned += i915_gem_obj_ggtt_size(obj); 203 pinned += i915_gem_obj_ggtt_size(obj);
209 mutex_unlock(&dev->struct_mutex); 204 mutex_unlock(&dev->struct_mutex);
210 205
@@ -332,6 +327,42 @@ __copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
332 return 0; 327 return 0;
333} 328}
334 329
330/*
331 * Pins the specified object's pages and synchronizes the object with
332 * GPU accesses. Sets needs_clflush to non-zero if the caller should
333 * flush the object from the CPU cache.
334 */
335int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
336 int *needs_clflush)
337{
338 int ret;
339
340 *needs_clflush = 0;
341
342 if (!obj->base.filp)
343 return -EINVAL;
344
345 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
346 /* If we're not in the cpu read domain, set ourself into the gtt
347 * read domain and manually flush cachelines (if required). This
348 * optimizes for the case when the gpu will dirty the data
349 * anyway again before the next pread happens. */
350 *needs_clflush = !cpu_cache_is_coherent(obj->base.dev,
351 obj->cache_level);
352 ret = i915_gem_object_wait_rendering(obj, true);
353 if (ret)
354 return ret;
355 }
356
357 ret = i915_gem_object_get_pages(obj);
358 if (ret)
359 return ret;
360
361 i915_gem_object_pin_pages(obj);
362
363 return ret;
364}
365
335/* Per-page copy function for the shmem pread fastpath. 366/* Per-page copy function for the shmem pread fastpath.
336 * Flushes invalid cachelines before reading the target if 367 * Flushes invalid cachelines before reading the target if
337 * needs_clflush is set. */ 368 * needs_clflush is set. */
@@ -429,23 +460,10 @@ i915_gem_shmem_pread(struct drm_device *dev,
429 460
430 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); 461 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
431 462
432 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) { 463 ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
433 /* If we're not in the cpu read domain, set ourself into the gtt
434 * read domain and manually flush cachelines (if required). This
435 * optimizes for the case when the gpu will dirty the data
436 * anyway again before the next pread happens. */
437 needs_clflush = !cpu_cache_is_coherent(dev, obj->cache_level);
438 ret = i915_gem_object_wait_rendering(obj, true);
439 if (ret)
440 return ret;
441 }
442
443 ret = i915_gem_object_get_pages(obj);
444 if (ret) 464 if (ret)
445 return ret; 465 return ret;
446 466
447 i915_gem_object_pin_pages(obj);
448
449 offset = args->offset; 467 offset = args->offset;
450 468
451 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 469 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
@@ -476,7 +494,7 @@ i915_gem_shmem_pread(struct drm_device *dev,
476 494
477 mutex_unlock(&dev->struct_mutex); 495 mutex_unlock(&dev->struct_mutex);
478 496
479 if (likely(!i915_prefault_disable) && !prefaulted) { 497 if (likely(!i915.prefault_disable) && !prefaulted) {
480 ret = fault_in_multipages_writeable(user_data, remain); 498 ret = fault_in_multipages_writeable(user_data, remain);
481 /* Userspace is tricking us, but we've already clobbered 499 /* Userspace is tricking us, but we've already clobbered
482 * its pages with the prefault and promised to write the 500 * its pages with the prefault and promised to write the
@@ -492,12 +510,10 @@ i915_gem_shmem_pread(struct drm_device *dev,
492 510
493 mutex_lock(&dev->struct_mutex); 511 mutex_lock(&dev->struct_mutex);
494 512
495next_page:
496 mark_page_accessed(page);
497
498 if (ret) 513 if (ret)
499 goto out; 514 goto out;
500 515
516next_page:
501 remain -= page_length; 517 remain -= page_length;
502 user_data += page_length; 518 user_data += page_length;
503 offset += page_length; 519 offset += page_length;
@@ -599,13 +615,13 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
599 struct drm_i915_gem_pwrite *args, 615 struct drm_i915_gem_pwrite *args,
600 struct drm_file *file) 616 struct drm_file *file)
601{ 617{
602 drm_i915_private_t *dev_priv = dev->dev_private; 618 struct drm_i915_private *dev_priv = dev->dev_private;
603 ssize_t remain; 619 ssize_t remain;
604 loff_t offset, page_base; 620 loff_t offset, page_base;
605 char __user *user_data; 621 char __user *user_data;
606 int page_offset, page_length, ret; 622 int page_offset, page_length, ret;
607 623
608 ret = i915_gem_obj_ggtt_pin(obj, 0, true, true); 624 ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE | PIN_NONBLOCK);
609 if (ret) 625 if (ret)
610 goto out; 626 goto out;
611 627
@@ -651,7 +667,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
651 } 667 }
652 668
653out_unpin: 669out_unpin:
654 i915_gem_object_unpin(obj); 670 i915_gem_object_ggtt_unpin(obj);
655out: 671out:
656 return ret; 672 return ret;
657} 673}
@@ -677,9 +693,8 @@ shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
677 if (needs_clflush_before) 693 if (needs_clflush_before)
678 drm_clflush_virt_range(vaddr + shmem_page_offset, 694 drm_clflush_virt_range(vaddr + shmem_page_offset,
679 page_length); 695 page_length);
680 ret = __copy_from_user_inatomic_nocache(vaddr + shmem_page_offset, 696 ret = __copy_from_user_inatomic(vaddr + shmem_page_offset,
681 user_data, 697 user_data, page_length);
682 page_length);
683 if (needs_clflush_after) 698 if (needs_clflush_after)
684 drm_clflush_virt_range(vaddr + shmem_page_offset, 699 drm_clflush_virt_range(vaddr + shmem_page_offset,
685 page_length); 700 page_length);
@@ -813,13 +828,10 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
813 828
814 mutex_lock(&dev->struct_mutex); 829 mutex_lock(&dev->struct_mutex);
815 830
816next_page:
817 set_page_dirty(page);
818 mark_page_accessed(page);
819
820 if (ret) 831 if (ret)
821 goto out; 832 goto out;
822 833
834next_page:
823 remain -= page_length; 835 remain -= page_length;
824 user_data += page_length; 836 user_data += page_length;
825 offset += page_length; 837 offset += page_length;
@@ -868,7 +880,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
868 args->size)) 880 args->size))
869 return -EFAULT; 881 return -EFAULT;
870 882
871 if (likely(!i915_prefault_disable)) { 883 if (likely(!i915.prefault_disable)) {
872 ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr), 884 ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr),
873 args->size); 885 args->size);
874 if (ret) 886 if (ret)
@@ -1014,7 +1026,8 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
1014 struct timespec *timeout, 1026 struct timespec *timeout,
1015 struct drm_i915_file_private *file_priv) 1027 struct drm_i915_file_private *file_priv)
1016{ 1028{
1017 drm_i915_private_t *dev_priv = ring->dev->dev_private; 1029 struct drm_device *dev = ring->dev;
1030 struct drm_i915_private *dev_priv = dev->dev_private;
1018 const bool irq_test_in_progress = 1031 const bool irq_test_in_progress =
1019 ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring); 1032 ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring);
1020 struct timespec before, now; 1033 struct timespec before, now;
@@ -1022,14 +1035,14 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
1022 unsigned long timeout_expire; 1035 unsigned long timeout_expire;
1023 int ret; 1036 int ret;
1024 1037
1025 WARN(dev_priv->pc8.irqs_disabled, "IRQs disabled\n"); 1038 WARN(dev_priv->pm.irqs_disabled, "IRQs disabled\n");
1026 1039
1027 if (i915_seqno_passed(ring->get_seqno(ring, true), seqno)) 1040 if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
1028 return 0; 1041 return 0;
1029 1042
1030 timeout_expire = timeout ? jiffies + timespec_to_jiffies_timeout(timeout) : 0; 1043 timeout_expire = timeout ? jiffies + timespec_to_jiffies_timeout(timeout) : 0;
1031 1044
1032 if (dev_priv->info->gen >= 6 && can_wait_boost(file_priv)) { 1045 if (INTEL_INFO(dev)->gen >= 6 && can_wait_boost(file_priv)) {
1033 gen6_rps_boost(dev_priv); 1046 gen6_rps_boost(dev_priv);
1034 if (file_priv) 1047 if (file_priv)
1035 mod_delayed_work(dev_priv->wq, 1048 mod_delayed_work(dev_priv->wq,
@@ -1184,7 +1197,7 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
1184 */ 1197 */
1185static __must_check int 1198static __must_check int
1186i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj, 1199i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
1187 struct drm_file *file, 1200 struct drm_i915_file_private *file_priv,
1188 bool readonly) 1201 bool readonly)
1189{ 1202{
1190 struct drm_device *dev = obj->base.dev; 1203 struct drm_device *dev = obj->base.dev;
@@ -1211,7 +1224,7 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
1211 1224
1212 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); 1225 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
1213 mutex_unlock(&dev->struct_mutex); 1226 mutex_unlock(&dev->struct_mutex);
1214 ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, file->driver_priv); 1227 ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, file_priv);
1215 mutex_lock(&dev->struct_mutex); 1228 mutex_lock(&dev->struct_mutex);
1216 if (ret) 1229 if (ret)
1217 return ret; 1230 return ret;
@@ -1260,7 +1273,9 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1260 * We will repeat the flush holding the lock in the normal manner 1273 * We will repeat the flush holding the lock in the normal manner
1261 * to catch cases where we are gazumped. 1274 * to catch cases where we are gazumped.
1262 */ 1275 */
1263 ret = i915_gem_object_wait_rendering__nonblocking(obj, file, !write_domain); 1276 ret = i915_gem_object_wait_rendering__nonblocking(obj,
1277 file->driver_priv,
1278 !write_domain);
1264 if (ret) 1279 if (ret)
1265 goto unref; 1280 goto unref;
1266 1281
@@ -1374,7 +1389,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1374{ 1389{
1375 struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data); 1390 struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
1376 struct drm_device *dev = obj->base.dev; 1391 struct drm_device *dev = obj->base.dev;
1377 drm_i915_private_t *dev_priv = dev->dev_private; 1392 struct drm_i915_private *dev_priv = dev->dev_private;
1378 pgoff_t page_offset; 1393 pgoff_t page_offset;
1379 unsigned long pfn; 1394 unsigned long pfn;
1380 int ret = 0; 1395 int ret = 0;
@@ -1392,6 +1407,15 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1392 1407
1393 trace_i915_gem_object_fault(obj, page_offset, true, write); 1408 trace_i915_gem_object_fault(obj, page_offset, true, write);
1394 1409
1410 /* Try to flush the object off the GPU first without holding the lock.
1411 * Upon reacquiring the lock, we will perform our sanity checks and then
1412 * repeat the flush holding the lock in the normal manner to catch cases
1413 * where we are gazumped.
1414 */
1415 ret = i915_gem_object_wait_rendering__nonblocking(obj, NULL, !write);
1416 if (ret)
1417 goto unlock;
1418
1395 /* Access to snoopable pages through the GTT is incoherent. */ 1419 /* Access to snoopable pages through the GTT is incoherent. */
1396 if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) { 1420 if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
1397 ret = -EINVAL; 1421 ret = -EINVAL;
@@ -1399,7 +1423,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1399 } 1423 }
1400 1424
1401 /* Now bind it into the GTT if needed */ 1425 /* Now bind it into the GTT if needed */
1402 ret = i915_gem_obj_ggtt_pin(obj, 0, true, false); 1426 ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE);
1403 if (ret) 1427 if (ret)
1404 goto unlock; 1428 goto unlock;
1405 1429
@@ -1420,7 +1444,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1420 /* Finally, remap it using the new GTT offset */ 1444 /* Finally, remap it using the new GTT offset */
1421 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn); 1445 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
1422unpin: 1446unpin:
1423 i915_gem_object_unpin(obj); 1447 i915_gem_object_ggtt_unpin(obj);
1424unlock: 1448unlock:
1425 mutex_unlock(&dev->struct_mutex); 1449 mutex_unlock(&dev->struct_mutex);
1426out: 1450out:
@@ -1453,6 +1477,7 @@ out:
1453 ret = VM_FAULT_OOM; 1477 ret = VM_FAULT_OOM;
1454 break; 1478 break;
1455 case -ENOSPC: 1479 case -ENOSPC:
1480 case -EFAULT:
1456 ret = VM_FAULT_SIGBUS; 1481 ret = VM_FAULT_SIGBUS;
1457 break; 1482 break;
1458 default: 1483 default:
@@ -1501,7 +1526,8 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj)
1501 if (!obj->fault_mappable) 1526 if (!obj->fault_mappable)
1502 return; 1527 return;
1503 1528
1504 drm_vma_node_unmap(&obj->base.vma_node, obj->base.dev->dev_mapping); 1529 drm_vma_node_unmap(&obj->base.vma_node,
1530 obj->base.dev->anon_inode->i_mapping);
1505 obj->fault_mappable = false; 1531 obj->fault_mappable = false;
1506} 1532}
1507 1533
@@ -1617,8 +1643,8 @@ i915_gem_mmap_gtt(struct drm_file *file,
1617 } 1643 }
1618 1644
1619 if (obj->madv != I915_MADV_WILLNEED) { 1645 if (obj->madv != I915_MADV_WILLNEED) {
1620 DRM_ERROR("Attempting to mmap a purgeable buffer\n"); 1646 DRM_DEBUG("Attempting to mmap a purgeable buffer\n");
1621 ret = -EINVAL; 1647 ret = -EFAULT;
1622 goto out; 1648 goto out;
1623 } 1649 }
1624 1650
@@ -1971,8 +1997,8 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
1971 return 0; 1997 return 0;
1972 1998
1973 if (obj->madv != I915_MADV_WILLNEED) { 1999 if (obj->madv != I915_MADV_WILLNEED) {
1974 DRM_ERROR("Attempting to obtain a purgeable object\n"); 2000 DRM_DEBUG("Attempting to obtain a purgeable object\n");
1975 return -EINVAL; 2001 return -EFAULT;
1976 } 2002 }
1977 2003
1978 BUG_ON(obj->pages_pin_count); 2004 BUG_ON(obj->pages_pin_count);
@@ -2035,13 +2061,17 @@ static void
2035i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj) 2061i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
2036{ 2062{
2037 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 2063 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2038 struct i915_address_space *ggtt_vm = &dev_priv->gtt.base; 2064 struct i915_address_space *vm;
2039 struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm); 2065 struct i915_vma *vma;
2040 2066
2041 BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS); 2067 BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
2042 BUG_ON(!obj->active); 2068 BUG_ON(!obj->active);
2043 2069
2044 list_move_tail(&vma->mm_list, &ggtt_vm->inactive_list); 2070 list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
2071 vma = i915_gem_obj_to_vma(obj, vm);
2072 if (vma && !list_empty(&vma->mm_list))
2073 list_move_tail(&vma->mm_list, &vm->inactive_list);
2074 }
2045 2075
2046 list_del_init(&obj->ring_list); 2076 list_del_init(&obj->ring_list);
2047 obj->ring = NULL; 2077 obj->ring = NULL;
@@ -2134,10 +2164,9 @@ int __i915_add_request(struct intel_ring_buffer *ring,
2134 struct drm_i915_gem_object *obj, 2164 struct drm_i915_gem_object *obj,
2135 u32 *out_seqno) 2165 u32 *out_seqno)
2136{ 2166{
2137 drm_i915_private_t *dev_priv = ring->dev->dev_private; 2167 struct drm_i915_private *dev_priv = ring->dev->dev_private;
2138 struct drm_i915_gem_request *request; 2168 struct drm_i915_gem_request *request;
2139 u32 request_ring_position, request_start; 2169 u32 request_ring_position, request_start;
2140 int was_empty;
2141 int ret; 2170 int ret;
2142 2171
2143 request_start = intel_ring_get_tail(ring); 2172 request_start = intel_ring_get_tail(ring);
@@ -2188,7 +2217,6 @@ int __i915_add_request(struct intel_ring_buffer *ring,
2188 i915_gem_context_reference(request->ctx); 2217 i915_gem_context_reference(request->ctx);
2189 2218
2190 request->emitted_jiffies = jiffies; 2219 request->emitted_jiffies = jiffies;
2191 was_empty = list_empty(&ring->request_list);
2192 list_add_tail(&request->list, &ring->request_list); 2220 list_add_tail(&request->list, &ring->request_list);
2193 request->file_priv = NULL; 2221 request->file_priv = NULL;
2194 2222
@@ -2209,13 +2237,11 @@ int __i915_add_request(struct intel_ring_buffer *ring,
2209 if (!dev_priv->ums.mm_suspended) { 2237 if (!dev_priv->ums.mm_suspended) {
2210 i915_queue_hangcheck(ring->dev); 2238 i915_queue_hangcheck(ring->dev);
2211 2239
2212 if (was_empty) { 2240 cancel_delayed_work_sync(&dev_priv->mm.idle_work);
2213 cancel_delayed_work_sync(&dev_priv->mm.idle_work); 2241 queue_delayed_work(dev_priv->wq,
2214 queue_delayed_work(dev_priv->wq, 2242 &dev_priv->mm.retire_work,
2215 &dev_priv->mm.retire_work, 2243 round_jiffies_up_relative(HZ));
2216 round_jiffies_up_relative(HZ)); 2244 intel_mark_busy(dev_priv->dev);
2217 intel_mark_busy(dev_priv->dev);
2218 }
2219 } 2245 }
2220 2246
2221 if (out_seqno) 2247 if (out_seqno)
@@ -2237,125 +2263,46 @@ i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
2237 spin_unlock(&file_priv->mm.lock); 2263 spin_unlock(&file_priv->mm.lock);
2238} 2264}
2239 2265
2240static bool i915_head_inside_object(u32 acthd, struct drm_i915_gem_object *obj, 2266static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
2241 struct i915_address_space *vm) 2267 const struct i915_hw_context *ctx)
2242{ 2268{
2243 if (acthd >= i915_gem_obj_offset(obj, vm) && 2269 unsigned long elapsed;
2244 acthd < i915_gem_obj_offset(obj, vm) + obj->base.size)
2245 return true;
2246 2270
2247 return false; 2271 elapsed = get_seconds() - ctx->hang_stats.guilty_ts;
2248}
2249 2272
2250static bool i915_head_inside_request(const u32 acthd_unmasked, 2273 if (ctx->hang_stats.banned)
2251 const u32 request_start, 2274 return true;
2252 const u32 request_end)
2253{
2254 const u32 acthd = acthd_unmasked & HEAD_ADDR;
2255 2275
2256 if (request_start < request_end) { 2276 if (elapsed <= DRM_I915_CTX_BAN_PERIOD) {
2257 if (acthd >= request_start && acthd < request_end) 2277 if (!i915_gem_context_is_default(ctx)) {
2258 return true; 2278 DRM_DEBUG("context hanging too fast, banning!\n");
2259 } else if (request_start > request_end) {
2260 if (acthd >= request_start || acthd < request_end)
2261 return true; 2279 return true;
2262 } 2280 } else if (dev_priv->gpu_error.stop_rings == 0) {
2263 2281 DRM_ERROR("gpu hanging too fast, banning!\n");
2264 return false;
2265}
2266
2267static struct i915_address_space *
2268request_to_vm(struct drm_i915_gem_request *request)
2269{
2270 struct drm_i915_private *dev_priv = request->ring->dev->dev_private;
2271 struct i915_address_space *vm;
2272
2273 vm = &dev_priv->gtt.base;
2274
2275 return vm;
2276}
2277
2278static bool i915_request_guilty(struct drm_i915_gem_request *request,
2279 const u32 acthd, bool *inside)
2280{
2281 /* There is a possibility that unmasked head address
2282 * pointing inside the ring, matches the batch_obj address range.
2283 * However this is extremely unlikely.
2284 */
2285 if (request->batch_obj) {
2286 if (i915_head_inside_object(acthd, request->batch_obj,
2287 request_to_vm(request))) {
2288 *inside = true;
2289 return true; 2282 return true;
2290 } 2283 }
2291 } 2284 }
2292 2285
2293 if (i915_head_inside_request(acthd, request->head, request->tail)) {
2294 *inside = false;
2295 return true;
2296 }
2297
2298 return false; 2286 return false;
2299} 2287}
2300 2288
2301static bool i915_context_is_banned(const struct i915_ctx_hang_stats *hs) 2289static void i915_set_reset_status(struct drm_i915_private *dev_priv,
2290 struct i915_hw_context *ctx,
2291 const bool guilty)
2302{ 2292{
2303 const unsigned long elapsed = get_seconds() - hs->guilty_ts; 2293 struct i915_ctx_hang_stats *hs;
2304
2305 if (hs->banned)
2306 return true;
2307
2308 if (elapsed <= DRM_I915_CTX_BAN_PERIOD) {
2309 DRM_ERROR("context hanging too fast, declaring banned!\n");
2310 return true;
2311 }
2312
2313 return false;
2314}
2315 2294
2316static void i915_set_reset_status(struct intel_ring_buffer *ring, 2295 if (WARN_ON(!ctx))
2317 struct drm_i915_gem_request *request, 2296 return;
2318 u32 acthd)
2319{
2320 struct i915_ctx_hang_stats *hs = NULL;
2321 bool inside, guilty;
2322 unsigned long offset = 0;
2323
2324 /* Innocent until proven guilty */
2325 guilty = false;
2326
2327 if (request->batch_obj)
2328 offset = i915_gem_obj_offset(request->batch_obj,
2329 request_to_vm(request));
2330 2297
2331 if (ring->hangcheck.action != HANGCHECK_WAIT && 2298 hs = &ctx->hang_stats;
2332 i915_request_guilty(request, acthd, &inside)) {
2333 DRM_DEBUG("%s hung %s bo (0x%lx ctx %d) at 0x%x\n",
2334 ring->name,
2335 inside ? "inside" : "flushing",
2336 offset,
2337 request->ctx ? request->ctx->id : 0,
2338 acthd);
2339 2299
2340 guilty = true; 2300 if (guilty) {
2341 } 2301 hs->banned = i915_context_is_banned(dev_priv, ctx);
2342 2302 hs->batch_active++;
2343 /* If contexts are disabled or this is the default context, use 2303 hs->guilty_ts = get_seconds();
2344 * file_priv->reset_state 2304 } else {
2345 */ 2305 hs->batch_pending++;
2346 if (request->ctx && request->ctx->id != DEFAULT_CONTEXT_ID)
2347 hs = &request->ctx->hang_stats;
2348 else if (request->file_priv)
2349 hs = &request->file_priv->hang_stats;
2350
2351 if (hs) {
2352 if (guilty) {
2353 hs->banned = i915_context_is_banned(hs);
2354 hs->batch_active++;
2355 hs->guilty_ts = get_seconds();
2356 } else {
2357 hs->batch_pending++;
2358 }
2359 } 2306 }
2360} 2307}
2361 2308
@@ -2370,19 +2317,41 @@ static void i915_gem_free_request(struct drm_i915_gem_request *request)
2370 kfree(request); 2317 kfree(request);
2371} 2318}
2372 2319
2373static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv, 2320struct drm_i915_gem_request *
2374 struct intel_ring_buffer *ring) 2321i915_gem_find_active_request(struct intel_ring_buffer *ring)
2375{ 2322{
2376 u32 completed_seqno = ring->get_seqno(ring, false);
2377 u32 acthd = intel_ring_get_active_head(ring);
2378 struct drm_i915_gem_request *request; 2323 struct drm_i915_gem_request *request;
2324 u32 completed_seqno;
2325
2326 completed_seqno = ring->get_seqno(ring, false);
2379 2327
2380 list_for_each_entry(request, &ring->request_list, list) { 2328 list_for_each_entry(request, &ring->request_list, list) {
2381 if (i915_seqno_passed(completed_seqno, request->seqno)) 2329 if (i915_seqno_passed(completed_seqno, request->seqno))
2382 continue; 2330 continue;
2383 2331
2384 i915_set_reset_status(ring, request, acthd); 2332 return request;
2385 } 2333 }
2334
2335 return NULL;
2336}
2337
2338static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
2339 struct intel_ring_buffer *ring)
2340{
2341 struct drm_i915_gem_request *request;
2342 bool ring_hung;
2343
2344 request = i915_gem_find_active_request(ring);
2345
2346 if (request == NULL)
2347 return;
2348
2349 ring_hung = ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
2350
2351 i915_set_reset_status(dev_priv, request->ctx, ring_hung);
2352
2353 list_for_each_entry_continue(request, &ring->request_list, list)
2354 i915_set_reset_status(dev_priv, request->ctx, false);
2386} 2355}
2387 2356
2388static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv, 2357static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
@@ -2456,13 +2425,15 @@ void i915_gem_reset(struct drm_device *dev)
2456 2425
2457 i915_gem_cleanup_ringbuffer(dev); 2426 i915_gem_cleanup_ringbuffer(dev);
2458 2427
2428 i915_gem_context_reset(dev);
2429
2459 i915_gem_restore_fences(dev); 2430 i915_gem_restore_fences(dev);
2460} 2431}
2461 2432
2462/** 2433/**
2463 * This function clears the request list as sequence numbers are passed. 2434 * This function clears the request list as sequence numbers are passed.
2464 */ 2435 */
2465void 2436static void
2466i915_gem_retire_requests_ring(struct intel_ring_buffer *ring) 2437i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
2467{ 2438{
2468 uint32_t seqno; 2439 uint32_t seqno;
@@ -2474,6 +2445,24 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
2474 2445
2475 seqno = ring->get_seqno(ring, true); 2446 seqno = ring->get_seqno(ring, true);
2476 2447
2448 /* Move any buffers on the active list that are no longer referenced
2449 * by the ringbuffer to the flushing/inactive lists as appropriate,
2450 * before we free the context associated with the requests.
2451 */
2452 while (!list_empty(&ring->active_list)) {
2453 struct drm_i915_gem_object *obj;
2454
2455 obj = list_first_entry(&ring->active_list,
2456 struct drm_i915_gem_object,
2457 ring_list);
2458
2459 if (!i915_seqno_passed(seqno, obj->last_read_seqno))
2460 break;
2461
2462 i915_gem_object_move_to_inactive(obj);
2463 }
2464
2465
2477 while (!list_empty(&ring->request_list)) { 2466 while (!list_empty(&ring->request_list)) {
2478 struct drm_i915_gem_request *request; 2467 struct drm_i915_gem_request *request;
2479 2468
@@ -2495,22 +2484,6 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
2495 i915_gem_free_request(request); 2484 i915_gem_free_request(request);
2496 } 2485 }
2497 2486
2498 /* Move any buffers on the active list that are no longer referenced
2499 * by the ringbuffer to the flushing/inactive lists as appropriate.
2500 */
2501 while (!list_empty(&ring->active_list)) {
2502 struct drm_i915_gem_object *obj;
2503
2504 obj = list_first_entry(&ring->active_list,
2505 struct drm_i915_gem_object,
2506 ring_list);
2507
2508 if (!i915_seqno_passed(seqno, obj->last_read_seqno))
2509 break;
2510
2511 i915_gem_object_move_to_inactive(obj);
2512 }
2513
2514 if (unlikely(ring->trace_irq_seqno && 2487 if (unlikely(ring->trace_irq_seqno &&
2515 i915_seqno_passed(seqno, ring->trace_irq_seqno))) { 2488 i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
2516 ring->irq_put(ring); 2489 ring->irq_put(ring);
@@ -2523,7 +2496,7 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
2523bool 2496bool
2524i915_gem_retire_requests(struct drm_device *dev) 2497i915_gem_retire_requests(struct drm_device *dev)
2525{ 2498{
2526 drm_i915_private_t *dev_priv = dev->dev_private; 2499 struct drm_i915_private *dev_priv = dev->dev_private;
2527 struct intel_ring_buffer *ring; 2500 struct intel_ring_buffer *ring;
2528 bool idle = true; 2501 bool idle = true;
2529 int i; 2502 int i;
@@ -2615,7 +2588,7 @@ i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
2615int 2588int
2616i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) 2589i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2617{ 2590{
2618 drm_i915_private_t *dev_priv = dev->dev_private; 2591 struct drm_i915_private *dev_priv = dev->dev_private;
2619 struct drm_i915_gem_wait *args = data; 2592 struct drm_i915_gem_wait *args = data;
2620 struct drm_i915_gem_object *obj; 2593 struct drm_i915_gem_object *obj;
2621 struct intel_ring_buffer *ring = NULL; 2594 struct intel_ring_buffer *ring = NULL;
@@ -2750,22 +2723,18 @@ static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
2750int i915_vma_unbind(struct i915_vma *vma) 2723int i915_vma_unbind(struct i915_vma *vma)
2751{ 2724{
2752 struct drm_i915_gem_object *obj = vma->obj; 2725 struct drm_i915_gem_object *obj = vma->obj;
2753 drm_i915_private_t *dev_priv = obj->base.dev->dev_private; 2726 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2754 int ret; 2727 int ret;
2755 2728
2756 /* For now we only ever use 1 vma per object */
2757 WARN_ON(!list_is_singular(&obj->vma_list));
2758
2759 if (list_empty(&vma->vma_link)) 2729 if (list_empty(&vma->vma_link))
2760 return 0; 2730 return 0;
2761 2731
2762 if (!drm_mm_node_allocated(&vma->node)) { 2732 if (!drm_mm_node_allocated(&vma->node)) {
2763 i915_gem_vma_destroy(vma); 2733 i915_gem_vma_destroy(vma);
2764
2765 return 0; 2734 return 0;
2766 } 2735 }
2767 2736
2768 if (obj->pin_count) 2737 if (vma->pin_count)
2769 return -EBUSY; 2738 return -EBUSY;
2770 2739
2771 BUG_ON(obj->pages == NULL); 2740 BUG_ON(obj->pages == NULL);
@@ -2787,15 +2756,11 @@ int i915_vma_unbind(struct i915_vma *vma)
2787 2756
2788 trace_i915_vma_unbind(vma); 2757 trace_i915_vma_unbind(vma);
2789 2758
2790 if (obj->has_global_gtt_mapping) 2759 vma->unbind_vma(vma);
2791 i915_gem_gtt_unbind_object(obj); 2760
2792 if (obj->has_aliasing_ppgtt_mapping) {
2793 i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj);
2794 obj->has_aliasing_ppgtt_mapping = 0;
2795 }
2796 i915_gem_gtt_finish_object(obj); 2761 i915_gem_gtt_finish_object(obj);
2797 2762
2798 list_del(&vma->mm_list); 2763 list_del_init(&vma->mm_list);
2799 /* Avoid an unnecessary call to unbind on rebind. */ 2764 /* Avoid an unnecessary call to unbind on rebind. */
2800 if (i915_is_ggtt(vma->vm)) 2765 if (i915_is_ggtt(vma->vm))
2801 obj->map_and_fenceable = true; 2766 obj->map_and_fenceable = true;
@@ -2817,35 +2782,15 @@ int i915_vma_unbind(struct i915_vma *vma)
2817 return 0; 2782 return 0;
2818} 2783}
2819 2784
2820/**
2821 * Unbinds an object from the global GTT aperture.
2822 */
2823int
2824i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj)
2825{
2826 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2827 struct i915_address_space *ggtt = &dev_priv->gtt.base;
2828
2829 if (!i915_gem_obj_ggtt_bound(obj))
2830 return 0;
2831
2832 if (obj->pin_count)
2833 return -EBUSY;
2834
2835 BUG_ON(obj->pages == NULL);
2836
2837 return i915_vma_unbind(i915_gem_obj_to_vma(obj, ggtt));
2838}
2839
2840int i915_gpu_idle(struct drm_device *dev) 2785int i915_gpu_idle(struct drm_device *dev)
2841{ 2786{
2842 drm_i915_private_t *dev_priv = dev->dev_private; 2787 struct drm_i915_private *dev_priv = dev->dev_private;
2843 struct intel_ring_buffer *ring; 2788 struct intel_ring_buffer *ring;
2844 int ret, i; 2789 int ret, i;
2845 2790
2846 /* Flush everything onto the inactive list. */ 2791 /* Flush everything onto the inactive list. */
2847 for_each_ring(ring, dev_priv, i) { 2792 for_each_ring(ring, dev_priv, i) {
2848 ret = i915_switch_context(ring, NULL, DEFAULT_CONTEXT_ID); 2793 ret = i915_switch_context(ring, NULL, ring->default_context);
2849 if (ret) 2794 if (ret)
2850 return ret; 2795 return ret;
2851 2796
@@ -2860,7 +2805,7 @@ int i915_gpu_idle(struct drm_device *dev)
2860static void i965_write_fence_reg(struct drm_device *dev, int reg, 2805static void i965_write_fence_reg(struct drm_device *dev, int reg,
2861 struct drm_i915_gem_object *obj) 2806 struct drm_i915_gem_object *obj)
2862{ 2807{
2863 drm_i915_private_t *dev_priv = dev->dev_private; 2808 struct drm_i915_private *dev_priv = dev->dev_private;
2864 int fence_reg; 2809 int fence_reg;
2865 int fence_pitch_shift; 2810 int fence_pitch_shift;
2866 2811
@@ -2912,7 +2857,7 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg,
2912static void i915_write_fence_reg(struct drm_device *dev, int reg, 2857static void i915_write_fence_reg(struct drm_device *dev, int reg,
2913 struct drm_i915_gem_object *obj) 2858 struct drm_i915_gem_object *obj)
2914{ 2859{
2915 drm_i915_private_t *dev_priv = dev->dev_private; 2860 struct drm_i915_private *dev_priv = dev->dev_private;
2916 u32 val; 2861 u32 val;
2917 2862
2918 if (obj) { 2863 if (obj) {
@@ -2956,7 +2901,7 @@ static void i915_write_fence_reg(struct drm_device *dev, int reg,
2956static void i830_write_fence_reg(struct drm_device *dev, int reg, 2901static void i830_write_fence_reg(struct drm_device *dev, int reg,
2957 struct drm_i915_gem_object *obj) 2902 struct drm_i915_gem_object *obj)
2958{ 2903{
2959 drm_i915_private_t *dev_priv = dev->dev_private; 2904 struct drm_i915_private *dev_priv = dev->dev_private;
2960 uint32_t val; 2905 uint32_t val;
2961 2906
2962 if (obj) { 2907 if (obj) {
@@ -3259,18 +3204,17 @@ static void i915_gem_verify_gtt(struct drm_device *dev)
3259/** 3204/**
3260 * Finds free space in the GTT aperture and binds the object there. 3205 * Finds free space in the GTT aperture and binds the object there.
3261 */ 3206 */
3262static int 3207static struct i915_vma *
3263i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, 3208i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
3264 struct i915_address_space *vm, 3209 struct i915_address_space *vm,
3265 unsigned alignment, 3210 unsigned alignment,
3266 bool map_and_fenceable, 3211 unsigned flags)
3267 bool nonblocking)
3268{ 3212{
3269 struct drm_device *dev = obj->base.dev; 3213 struct drm_device *dev = obj->base.dev;
3270 drm_i915_private_t *dev_priv = dev->dev_private; 3214 struct drm_i915_private *dev_priv = dev->dev_private;
3271 u32 size, fence_size, fence_alignment, unfenced_alignment; 3215 u32 size, fence_size, fence_alignment, unfenced_alignment;
3272 size_t gtt_max = 3216 size_t gtt_max =
3273 map_and_fenceable ? dev_priv->gtt.mappable_end : vm->total; 3217 flags & PIN_MAPPABLE ? dev_priv->gtt.mappable_end : vm->total;
3274 struct i915_vma *vma; 3218 struct i915_vma *vma;
3275 int ret; 3219 int ret;
3276 3220
@@ -3282,57 +3226,49 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
3282 obj->tiling_mode, true); 3226 obj->tiling_mode, true);
3283 unfenced_alignment = 3227 unfenced_alignment =
3284 i915_gem_get_gtt_alignment(dev, 3228 i915_gem_get_gtt_alignment(dev,
3285 obj->base.size, 3229 obj->base.size,
3286 obj->tiling_mode, false); 3230 obj->tiling_mode, false);
3287 3231
3288 if (alignment == 0) 3232 if (alignment == 0)
3289 alignment = map_and_fenceable ? fence_alignment : 3233 alignment = flags & PIN_MAPPABLE ? fence_alignment :
3290 unfenced_alignment; 3234 unfenced_alignment;
3291 if (map_and_fenceable && alignment & (fence_alignment - 1)) { 3235 if (flags & PIN_MAPPABLE && alignment & (fence_alignment - 1)) {
3292 DRM_ERROR("Invalid object alignment requested %u\n", alignment); 3236 DRM_DEBUG("Invalid object alignment requested %u\n", alignment);
3293 return -EINVAL; 3237 return ERR_PTR(-EINVAL);
3294 } 3238 }
3295 3239
3296 size = map_and_fenceable ? fence_size : obj->base.size; 3240 size = flags & PIN_MAPPABLE ? fence_size : obj->base.size;
3297 3241
3298 /* If the object is bigger than the entire aperture, reject it early 3242 /* If the object is bigger than the entire aperture, reject it early
3299 * before evicting everything in a vain attempt to find space. 3243 * before evicting everything in a vain attempt to find space.
3300 */ 3244 */
3301 if (obj->base.size > gtt_max) { 3245 if (obj->base.size > gtt_max) {
3302 DRM_ERROR("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%zu\n", 3246 DRM_DEBUG("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%zu\n",
3303 obj->base.size, 3247 obj->base.size,
3304 map_and_fenceable ? "mappable" : "total", 3248 flags & PIN_MAPPABLE ? "mappable" : "total",
3305 gtt_max); 3249 gtt_max);
3306 return -E2BIG; 3250 return ERR_PTR(-E2BIG);
3307 } 3251 }
3308 3252
3309 ret = i915_gem_object_get_pages(obj); 3253 ret = i915_gem_object_get_pages(obj);
3310 if (ret) 3254 if (ret)
3311 return ret; 3255 return ERR_PTR(ret);
3312 3256
3313 i915_gem_object_pin_pages(obj); 3257 i915_gem_object_pin_pages(obj);
3314 3258
3315 BUG_ON(!i915_is_ggtt(vm));
3316
3317 vma = i915_gem_obj_lookup_or_create_vma(obj, vm); 3259 vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
3318 if (IS_ERR(vma)) { 3260 if (IS_ERR(vma))
3319 ret = PTR_ERR(vma);
3320 goto err_unpin; 3261 goto err_unpin;
3321 }
3322
3323 /* For now we only ever use 1 vma per object */
3324 WARN_ON(!list_is_singular(&obj->vma_list));
3325 3262
3326search_free: 3263search_free:
3327 ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node, 3264 ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
3328 size, alignment, 3265 size, alignment,
3329 obj->cache_level, 0, gtt_max, 3266 obj->cache_level, 0, gtt_max,
3330 DRM_MM_SEARCH_DEFAULT); 3267 DRM_MM_SEARCH_DEFAULT,
3268 DRM_MM_CREATE_DEFAULT);
3331 if (ret) { 3269 if (ret) {
3332 ret = i915_gem_evict_something(dev, vm, size, alignment, 3270 ret = i915_gem_evict_something(dev, vm, size, alignment,
3333 obj->cache_level, 3271 obj->cache_level, flags);
3334 map_and_fenceable,
3335 nonblocking);
3336 if (ret == 0) 3272 if (ret == 0)
3337 goto search_free; 3273 goto search_free;
3338 3274
@@ -3363,19 +3299,23 @@ search_free:
3363 obj->map_and_fenceable = mappable && fenceable; 3299 obj->map_and_fenceable = mappable && fenceable;
3364 } 3300 }
3365 3301
3366 WARN_ON(map_and_fenceable && !obj->map_and_fenceable); 3302 WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable);
3303
3304 trace_i915_vma_bind(vma, flags);
3305 vma->bind_vma(vma, obj->cache_level,
3306 flags & (PIN_MAPPABLE | PIN_GLOBAL) ? GLOBAL_BIND : 0);
3367 3307
3368 trace_i915_vma_bind(vma, map_and_fenceable);
3369 i915_gem_verify_gtt(dev); 3308 i915_gem_verify_gtt(dev);
3370 return 0; 3309 return vma;
3371 3310
3372err_remove_node: 3311err_remove_node:
3373 drm_mm_remove_node(&vma->node); 3312 drm_mm_remove_node(&vma->node);
3374err_free_vma: 3313err_free_vma:
3375 i915_gem_vma_destroy(vma); 3314 i915_gem_vma_destroy(vma);
3315 vma = ERR_PTR(ret);
3376err_unpin: 3316err_unpin:
3377 i915_gem_object_unpin_pages(obj); 3317 i915_gem_object_unpin_pages(obj);
3378 return ret; 3318 return vma;
3379} 3319}
3380 3320
3381bool 3321bool
@@ -3470,7 +3410,7 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
3470int 3410int
3471i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) 3411i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3472{ 3412{
3473 drm_i915_private_t *dev_priv = obj->base.dev->dev_private; 3413 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3474 uint32_t old_write_domain, old_read_domains; 3414 uint32_t old_write_domain, old_read_domains;
3475 int ret; 3415 int ret;
3476 3416
@@ -3528,25 +3468,22 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3528 enum i915_cache_level cache_level) 3468 enum i915_cache_level cache_level)
3529{ 3469{
3530 struct drm_device *dev = obj->base.dev; 3470 struct drm_device *dev = obj->base.dev;
3531 drm_i915_private_t *dev_priv = dev->dev_private; 3471 struct i915_vma *vma, *next;
3532 struct i915_vma *vma;
3533 int ret; 3472 int ret;
3534 3473
3535 if (obj->cache_level == cache_level) 3474 if (obj->cache_level == cache_level)
3536 return 0; 3475 return 0;
3537 3476
3538 if (obj->pin_count) { 3477 if (i915_gem_obj_is_pinned(obj)) {
3539 DRM_DEBUG("can not change the cache level of pinned objects\n"); 3478 DRM_DEBUG("can not change the cache level of pinned objects\n");
3540 return -EBUSY; 3479 return -EBUSY;
3541 } 3480 }
3542 3481
3543 list_for_each_entry(vma, &obj->vma_list, vma_link) { 3482 list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
3544 if (!i915_gem_valid_gtt_space(dev, &vma->node, cache_level)) { 3483 if (!i915_gem_valid_gtt_space(dev, &vma->node, cache_level)) {
3545 ret = i915_vma_unbind(vma); 3484 ret = i915_vma_unbind(vma);
3546 if (ret) 3485 if (ret)
3547 return ret; 3486 return ret;
3548
3549 break;
3550 } 3487 }
3551 } 3488 }
3552 3489
@@ -3567,11 +3504,10 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3567 return ret; 3504 return ret;
3568 } 3505 }
3569 3506
3570 if (obj->has_global_gtt_mapping) 3507 list_for_each_entry(vma, &obj->vma_list, vma_link)
3571 i915_gem_gtt_bind_object(obj, cache_level); 3508 if (drm_mm_node_allocated(&vma->node))
3572 if (obj->has_aliasing_ppgtt_mapping) 3509 vma->bind_vma(vma, cache_level,
3573 i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt, 3510 obj->has_global_gtt_mapping ? GLOBAL_BIND : 0);
3574 obj, cache_level);
3575 } 3511 }
3576 3512
3577 list_for_each_entry(vma, &obj->vma_list, vma_link) 3513 list_for_each_entry(vma, &obj->vma_list, vma_link)
@@ -3695,7 +3631,7 @@ static bool is_pin_display(struct drm_i915_gem_object *obj)
3695 * subtracting the potential reference by the user, any pin_count 3631 * subtracting the potential reference by the user, any pin_count
3696 * remains, it must be due to another use by the display engine. 3632 * remains, it must be due to another use by the display engine.
3697 */ 3633 */
3698 return obj->pin_count - !!obj->user_pin_count; 3634 return i915_gem_obj_to_ggtt(obj)->pin_count - !!obj->user_pin_count;
3699} 3635}
3700 3636
3701/* 3637/*
@@ -3740,7 +3676,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3740 * (e.g. libkms for the bootup splash), we have to ensure that we 3676 * (e.g. libkms for the bootup splash), we have to ensure that we
3741 * always use map_and_fenceable for all scanout buffers. 3677 * always use map_and_fenceable for all scanout buffers.
3742 */ 3678 */
3743 ret = i915_gem_obj_ggtt_pin(obj, alignment, true, false); 3679 ret = i915_gem_obj_ggtt_pin(obj, alignment, PIN_MAPPABLE);
3744 if (ret) 3680 if (ret)
3745 goto err_unpin_display; 3681 goto err_unpin_display;
3746 3682
@@ -3769,7 +3705,7 @@ err_unpin_display:
3769void 3705void
3770i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj) 3706i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj)
3771{ 3707{
3772 i915_gem_object_unpin(obj); 3708 i915_gem_object_ggtt_unpin(obj);
3773 obj->pin_display = is_pin_display(obj); 3709 obj->pin_display = is_pin_display(obj);
3774} 3710}
3775 3711
@@ -3896,65 +3832,63 @@ int
3896i915_gem_object_pin(struct drm_i915_gem_object *obj, 3832i915_gem_object_pin(struct drm_i915_gem_object *obj,
3897 struct i915_address_space *vm, 3833 struct i915_address_space *vm,
3898 uint32_t alignment, 3834 uint32_t alignment,
3899 bool map_and_fenceable, 3835 unsigned flags)
3900 bool nonblocking)
3901{ 3836{
3902 struct i915_vma *vma; 3837 struct i915_vma *vma;
3903 int ret; 3838 int ret;
3904 3839
3905 if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT)) 3840 if (WARN_ON(flags & (PIN_GLOBAL | PIN_MAPPABLE) && !i915_is_ggtt(vm)))
3906 return -EBUSY; 3841 return -EINVAL;
3907
3908 WARN_ON(map_and_fenceable && !i915_is_ggtt(vm));
3909 3842
3910 vma = i915_gem_obj_to_vma(obj, vm); 3843 vma = i915_gem_obj_to_vma(obj, vm);
3911
3912 if (vma) { 3844 if (vma) {
3845 if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
3846 return -EBUSY;
3847
3913 if ((alignment && 3848 if ((alignment &&
3914 vma->node.start & (alignment - 1)) || 3849 vma->node.start & (alignment - 1)) ||
3915 (map_and_fenceable && !obj->map_and_fenceable)) { 3850 (flags & PIN_MAPPABLE && !obj->map_and_fenceable)) {
3916 WARN(obj->pin_count, 3851 WARN(vma->pin_count,
3917 "bo is already pinned with incorrect alignment:" 3852 "bo is already pinned with incorrect alignment:"
3918 " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d," 3853 " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
3919 " obj->map_and_fenceable=%d\n", 3854 " obj->map_and_fenceable=%d\n",
3920 i915_gem_obj_offset(obj, vm), alignment, 3855 i915_gem_obj_offset(obj, vm), alignment,
3921 map_and_fenceable, 3856 flags & PIN_MAPPABLE,
3922 obj->map_and_fenceable); 3857 obj->map_and_fenceable);
3923 ret = i915_vma_unbind(vma); 3858 ret = i915_vma_unbind(vma);
3924 if (ret) 3859 if (ret)
3925 return ret; 3860 return ret;
3861
3862 vma = NULL;
3926 } 3863 }
3927 } 3864 }
3928 3865
3929 if (!i915_gem_obj_bound(obj, vm)) { 3866 if (vma == NULL || !drm_mm_node_allocated(&vma->node)) {
3930 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 3867 vma = i915_gem_object_bind_to_vm(obj, vm, alignment, flags);
3931 3868 if (IS_ERR(vma))
3932 ret = i915_gem_object_bind_to_vm(obj, vm, alignment, 3869 return PTR_ERR(vma);
3933 map_and_fenceable,
3934 nonblocking);
3935 if (ret)
3936 return ret;
3937
3938 if (!dev_priv->mm.aliasing_ppgtt)
3939 i915_gem_gtt_bind_object(obj, obj->cache_level);
3940 } 3870 }
3941 3871
3942 if (!obj->has_global_gtt_mapping && map_and_fenceable) 3872 if (flags & PIN_GLOBAL && !obj->has_global_gtt_mapping)
3943 i915_gem_gtt_bind_object(obj, obj->cache_level); 3873 vma->bind_vma(vma, obj->cache_level, GLOBAL_BIND);
3944 3874
3945 obj->pin_count++; 3875 vma->pin_count++;
3946 obj->pin_mappable |= map_and_fenceable; 3876 if (flags & PIN_MAPPABLE)
3877 obj->pin_mappable |= true;
3947 3878
3948 return 0; 3879 return 0;
3949} 3880}
3950 3881
3951void 3882void
3952i915_gem_object_unpin(struct drm_i915_gem_object *obj) 3883i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj)
3953{ 3884{
3954 BUG_ON(obj->pin_count == 0); 3885 struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
3955 BUG_ON(!i915_gem_obj_bound_any(obj));
3956 3886
3957 if (--obj->pin_count == 0) 3887 BUG_ON(!vma);
3888 BUG_ON(vma->pin_count == 0);
3889 BUG_ON(!i915_gem_obj_ggtt_bound(obj));
3890
3891 if (--vma->pin_count == 0)
3958 obj->pin_mappable = false; 3892 obj->pin_mappable = false;
3959} 3893}
3960 3894
@@ -3966,6 +3900,9 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
3966 struct drm_i915_gem_object *obj; 3900 struct drm_i915_gem_object *obj;
3967 int ret; 3901 int ret;
3968 3902
3903 if (INTEL_INFO(dev)->gen >= 6)
3904 return -ENODEV;
3905
3969 ret = i915_mutex_lock_interruptible(dev); 3906 ret = i915_mutex_lock_interruptible(dev);
3970 if (ret) 3907 if (ret)
3971 return ret; 3908 return ret;
@@ -3977,13 +3914,13 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
3977 } 3914 }
3978 3915
3979 if (obj->madv != I915_MADV_WILLNEED) { 3916 if (obj->madv != I915_MADV_WILLNEED) {
3980 DRM_ERROR("Attempting to pin a purgeable buffer\n"); 3917 DRM_DEBUG("Attempting to pin a purgeable buffer\n");
3981 ret = -EINVAL; 3918 ret = -EFAULT;
3982 goto out; 3919 goto out;
3983 } 3920 }
3984 3921
3985 if (obj->pin_filp != NULL && obj->pin_filp != file) { 3922 if (obj->pin_filp != NULL && obj->pin_filp != file) {
3986 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n", 3923 DRM_DEBUG("Already pinned in i915_gem_pin_ioctl(): %d\n",
3987 args->handle); 3924 args->handle);
3988 ret = -EINVAL; 3925 ret = -EINVAL;
3989 goto out; 3926 goto out;
@@ -3995,7 +3932,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
3995 } 3932 }
3996 3933
3997 if (obj->user_pin_count == 0) { 3934 if (obj->user_pin_count == 0) {
3998 ret = i915_gem_obj_ggtt_pin(obj, args->alignment, true, false); 3935 ret = i915_gem_obj_ggtt_pin(obj, args->alignment, PIN_MAPPABLE);
3999 if (ret) 3936 if (ret)
4000 goto out; 3937 goto out;
4001 } 3938 }
@@ -4030,7 +3967,7 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
4030 } 3967 }
4031 3968
4032 if (obj->pin_filp != file) { 3969 if (obj->pin_filp != file) {
4033 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n", 3970 DRM_DEBUG("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
4034 args->handle); 3971 args->handle);
4035 ret = -EINVAL; 3972 ret = -EINVAL;
4036 goto out; 3973 goto out;
@@ -4038,7 +3975,7 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
4038 obj->user_pin_count--; 3975 obj->user_pin_count--;
4039 if (obj->user_pin_count == 0) { 3976 if (obj->user_pin_count == 0) {
4040 obj->pin_filp = NULL; 3977 obj->pin_filp = NULL;
4041 i915_gem_object_unpin(obj); 3978 i915_gem_object_ggtt_unpin(obj);
4042 } 3979 }
4043 3980
4044out: 3981out:
@@ -4118,7 +4055,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
4118 goto unlock; 4055 goto unlock;
4119 } 4056 }
4120 4057
4121 if (obj->pin_count) { 4058 if (i915_gem_obj_is_pinned(obj)) {
4122 ret = -EINVAL; 4059 ret = -EINVAL;
4123 goto out; 4060 goto out;
4124 } 4061 }
@@ -4219,7 +4156,7 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
4219{ 4156{
4220 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); 4157 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
4221 struct drm_device *dev = obj->base.dev; 4158 struct drm_device *dev = obj->base.dev;
4222 drm_i915_private_t *dev_priv = dev->dev_private; 4159 struct drm_i915_private *dev_priv = dev->dev_private;
4223 struct i915_vma *vma, *next; 4160 struct i915_vma *vma, *next;
4224 4161
4225 intel_runtime_pm_get(dev_priv); 4162 intel_runtime_pm_get(dev_priv);
@@ -4229,12 +4166,11 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
4229 if (obj->phys_obj) 4166 if (obj->phys_obj)
4230 i915_gem_detach_phys_object(dev, obj); 4167 i915_gem_detach_phys_object(dev, obj);
4231 4168
4232 obj->pin_count = 0;
4233 /* NB: 0 or 1 elements */
4234 WARN_ON(!list_empty(&obj->vma_list) &&
4235 !list_is_singular(&obj->vma_list));
4236 list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) { 4169 list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
4237 int ret = i915_vma_unbind(vma); 4170 int ret;
4171
4172 vma->pin_count = 0;
4173 ret = i915_vma_unbind(vma);
4238 if (WARN_ON(ret == -ERESTARTSYS)) { 4174 if (WARN_ON(ret == -ERESTARTSYS)) {
4239 bool was_interruptible; 4175 bool was_interruptible;
4240 4176
@@ -4283,41 +4219,6 @@ struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
4283 return NULL; 4219 return NULL;
4284} 4220}
4285 4221
4286static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj,
4287 struct i915_address_space *vm)
4288{
4289 struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
4290 if (vma == NULL)
4291 return ERR_PTR(-ENOMEM);
4292
4293 INIT_LIST_HEAD(&vma->vma_link);
4294 INIT_LIST_HEAD(&vma->mm_list);
4295 INIT_LIST_HEAD(&vma->exec_list);
4296 vma->vm = vm;
4297 vma->obj = obj;
4298
4299 /* Keep GGTT vmas first to make debug easier */
4300 if (i915_is_ggtt(vm))
4301 list_add(&vma->vma_link, &obj->vma_list);
4302 else
4303 list_add_tail(&vma->vma_link, &obj->vma_list);
4304
4305 return vma;
4306}
4307
4308struct i915_vma *
4309i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
4310 struct i915_address_space *vm)
4311{
4312 struct i915_vma *vma;
4313
4314 vma = i915_gem_obj_to_vma(obj, vm);
4315 if (!vma)
4316 vma = __i915_gem_vma_create(obj, vm);
4317
4318 return vma;
4319}
4320
4321void i915_gem_vma_destroy(struct i915_vma *vma) 4222void i915_gem_vma_destroy(struct i915_vma *vma)
4322{ 4223{
4323 WARN_ON(vma->node.allocated); 4224 WARN_ON(vma->node.allocated);
@@ -4334,7 +4235,7 @@ void i915_gem_vma_destroy(struct i915_vma *vma)
4334int 4235int
4335i915_gem_suspend(struct drm_device *dev) 4236i915_gem_suspend(struct drm_device *dev)
4336{ 4237{
4337 drm_i915_private_t *dev_priv = dev->dev_private; 4238 struct drm_i915_private *dev_priv = dev->dev_private;
4338 int ret = 0; 4239 int ret = 0;
4339 4240
4340 mutex_lock(&dev->struct_mutex); 4241 mutex_lock(&dev->struct_mutex);
@@ -4376,7 +4277,7 @@ err:
4376int i915_gem_l3_remap(struct intel_ring_buffer *ring, int slice) 4277int i915_gem_l3_remap(struct intel_ring_buffer *ring, int slice)
4377{ 4278{
4378 struct drm_device *dev = ring->dev; 4279 struct drm_device *dev = ring->dev;
4379 drm_i915_private_t *dev_priv = dev->dev_private; 4280 struct drm_i915_private *dev_priv = dev->dev_private;
4380 u32 reg_base = GEN7_L3LOG_BASE + (slice * 0x200); 4281 u32 reg_base = GEN7_L3LOG_BASE + (slice * 0x200);
4381 u32 *remap_info = dev_priv->l3_parity.remap_info[slice]; 4282 u32 *remap_info = dev_priv->l3_parity.remap_info[slice];
4382 int i, ret; 4283 int i, ret;
@@ -4406,7 +4307,7 @@ int i915_gem_l3_remap(struct intel_ring_buffer *ring, int slice)
4406 4307
4407void i915_gem_init_swizzling(struct drm_device *dev) 4308void i915_gem_init_swizzling(struct drm_device *dev)
4408{ 4309{
4409 drm_i915_private_t *dev_priv = dev->dev_private; 4310 struct drm_i915_private *dev_priv = dev->dev_private;
4410 4311
4411 if (INTEL_INFO(dev)->gen < 5 || 4312 if (INTEL_INFO(dev)->gen < 5 ||
4412 dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE) 4313 dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
@@ -4494,7 +4395,7 @@ cleanup_render_ring:
4494int 4395int
4495i915_gem_init_hw(struct drm_device *dev) 4396i915_gem_init_hw(struct drm_device *dev)
4496{ 4397{
4497 drm_i915_private_t *dev_priv = dev->dev_private; 4398 struct drm_i915_private *dev_priv = dev->dev_private;
4498 int ret, i; 4399 int ret, i;
4499 4400
4500 if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt()) 4401 if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
@@ -4508,9 +4409,15 @@ i915_gem_init_hw(struct drm_device *dev)
4508 LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED); 4409 LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
4509 4410
4510 if (HAS_PCH_NOP(dev)) { 4411 if (HAS_PCH_NOP(dev)) {
4511 u32 temp = I915_READ(GEN7_MSG_CTL); 4412 if (IS_IVYBRIDGE(dev)) {
4512 temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK); 4413 u32 temp = I915_READ(GEN7_MSG_CTL);
4513 I915_WRITE(GEN7_MSG_CTL, temp); 4414 temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
4415 I915_WRITE(GEN7_MSG_CTL, temp);
4416 } else if (INTEL_INFO(dev)->gen >= 7) {
4417 u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT);
4418 temp &= ~RESET_PCH_HANDSHAKE_ENABLE;
4419 I915_WRITE(HSW_NDE_RSTWRN_OPT, temp);
4420 }
4514 } 4421 }
4515 4422
4516 i915_gem_init_swizzling(dev); 4423 i915_gem_init_swizzling(dev);
@@ -4523,25 +4430,23 @@ i915_gem_init_hw(struct drm_device *dev)
4523 i915_gem_l3_remap(&dev_priv->ring[RCS], i); 4430 i915_gem_l3_remap(&dev_priv->ring[RCS], i);
4524 4431
4525 /* 4432 /*
4526 * XXX: There was some w/a described somewhere suggesting loading 4433 * XXX: Contexts should only be initialized once. Doing a switch to the
4527 * contexts before PPGTT. 4434 * default context switch however is something we'd like to do after
4435 * reset or thaw (the latter may not actually be necessary for HW, but
4436 * goes with our code better). Context switching requires rings (for
4437 * the do_switch), but before enabling PPGTT. So don't move this.
4528 */ 4438 */
4529 ret = i915_gem_context_init(dev); 4439 ret = i915_gem_context_enable(dev_priv);
4530 if (ret) { 4440 if (ret) {
4531 i915_gem_cleanup_ringbuffer(dev); 4441 DRM_ERROR("Context enable failed %d\n", ret);
4532 DRM_ERROR("Context initialization failed %d\n", ret); 4442 goto err_out;
4533 return ret;
4534 }
4535
4536 if (dev_priv->mm.aliasing_ppgtt) {
4537 ret = dev_priv->mm.aliasing_ppgtt->enable(dev);
4538 if (ret) {
4539 i915_gem_cleanup_aliasing_ppgtt(dev);
4540 DRM_INFO("PPGTT enable failed. This is not fatal, but unexpected\n");
4541 }
4542 } 4443 }
4543 4444
4544 return 0; 4445 return 0;
4446
4447err_out:
4448 i915_gem_cleanup_ringbuffer(dev);
4449 return ret;
4545} 4450}
4546 4451
4547int i915_gem_init(struct drm_device *dev) 4452int i915_gem_init(struct drm_device *dev)
@@ -4560,10 +4465,18 @@ int i915_gem_init(struct drm_device *dev)
4560 4465
4561 i915_gem_init_global_gtt(dev); 4466 i915_gem_init_global_gtt(dev);
4562 4467
4468 ret = i915_gem_context_init(dev);
4469 if (ret) {
4470 mutex_unlock(&dev->struct_mutex);
4471 return ret;
4472 }
4473
4563 ret = i915_gem_init_hw(dev); 4474 ret = i915_gem_init_hw(dev);
4564 mutex_unlock(&dev->struct_mutex); 4475 mutex_unlock(&dev->struct_mutex);
4565 if (ret) { 4476 if (ret) {
4566 i915_gem_cleanup_aliasing_ppgtt(dev); 4477 WARN_ON(dev_priv->mm.aliasing_ppgtt);
4478 i915_gem_context_fini(dev);
4479 drm_mm_takedown(&dev_priv->gtt.base.mm);
4567 return ret; 4480 return ret;
4568 } 4481 }
4569 4482
@@ -4576,7 +4489,7 @@ int i915_gem_init(struct drm_device *dev)
4576void 4489void
4577i915_gem_cleanup_ringbuffer(struct drm_device *dev) 4490i915_gem_cleanup_ringbuffer(struct drm_device *dev)
4578{ 4491{
4579 drm_i915_private_t *dev_priv = dev->dev_private; 4492 struct drm_i915_private *dev_priv = dev->dev_private;
4580 struct intel_ring_buffer *ring; 4493 struct intel_ring_buffer *ring;
4581 int i; 4494 int i;
4582 4495
@@ -4658,20 +4571,22 @@ init_ring_lists(struct intel_ring_buffer *ring)
4658 INIT_LIST_HEAD(&ring->request_list); 4571 INIT_LIST_HEAD(&ring->request_list);
4659} 4572}
4660 4573
4661static void i915_init_vm(struct drm_i915_private *dev_priv, 4574void i915_init_vm(struct drm_i915_private *dev_priv,
4662 struct i915_address_space *vm) 4575 struct i915_address_space *vm)
4663{ 4576{
4577 if (!i915_is_ggtt(vm))
4578 drm_mm_init(&vm->mm, vm->start, vm->total);
4664 vm->dev = dev_priv->dev; 4579 vm->dev = dev_priv->dev;
4665 INIT_LIST_HEAD(&vm->active_list); 4580 INIT_LIST_HEAD(&vm->active_list);
4666 INIT_LIST_HEAD(&vm->inactive_list); 4581 INIT_LIST_HEAD(&vm->inactive_list);
4667 INIT_LIST_HEAD(&vm->global_link); 4582 INIT_LIST_HEAD(&vm->global_link);
4668 list_add(&vm->global_link, &dev_priv->vm_list); 4583 list_add_tail(&vm->global_link, &dev_priv->vm_list);
4669} 4584}
4670 4585
4671void 4586void
4672i915_gem_load(struct drm_device *dev) 4587i915_gem_load(struct drm_device *dev)
4673{ 4588{
4674 drm_i915_private_t *dev_priv = dev->dev_private; 4589 struct drm_i915_private *dev_priv = dev->dev_private;
4675 int i; 4590 int i;
4676 4591
4677 dev_priv->slab = 4592 dev_priv->slab =
@@ -4738,7 +4653,7 @@ i915_gem_load(struct drm_device *dev)
4738static int i915_gem_init_phys_object(struct drm_device *dev, 4653static int i915_gem_init_phys_object(struct drm_device *dev,
4739 int id, int size, int align) 4654 int id, int size, int align)
4740{ 4655{
4741 drm_i915_private_t *dev_priv = dev->dev_private; 4656 struct drm_i915_private *dev_priv = dev->dev_private;
4742 struct drm_i915_gem_phys_object *phys_obj; 4657 struct drm_i915_gem_phys_object *phys_obj;
4743 int ret; 4658 int ret;
4744 4659
@@ -4770,7 +4685,7 @@ kfree_obj:
4770 4685
4771static void i915_gem_free_phys_object(struct drm_device *dev, int id) 4686static void i915_gem_free_phys_object(struct drm_device *dev, int id)
4772{ 4687{
4773 drm_i915_private_t *dev_priv = dev->dev_private; 4688 struct drm_i915_private *dev_priv = dev->dev_private;
4774 struct drm_i915_gem_phys_object *phys_obj; 4689 struct drm_i915_gem_phys_object *phys_obj;
4775 4690
4776 if (!dev_priv->mm.phys_objs[id - 1]) 4691 if (!dev_priv->mm.phys_objs[id - 1])
@@ -4837,7 +4752,7 @@ i915_gem_attach_phys_object(struct drm_device *dev,
4837 int align) 4752 int align)
4838{ 4753{
4839 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping; 4754 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
4840 drm_i915_private_t *dev_priv = dev->dev_private; 4755 struct drm_i915_private *dev_priv = dev->dev_private;
4841 int ret = 0; 4756 int ret = 0;
4842 int page_count; 4757 int page_count;
4843 int i; 4758 int i;
@@ -4950,6 +4865,7 @@ i915_gem_file_idle_work_handler(struct work_struct *work)
4950int i915_gem_open(struct drm_device *dev, struct drm_file *file) 4865int i915_gem_open(struct drm_device *dev, struct drm_file *file)
4951{ 4866{
4952 struct drm_i915_file_private *file_priv; 4867 struct drm_i915_file_private *file_priv;
4868 int ret;
4953 4869
4954 DRM_DEBUG_DRIVER("\n"); 4870 DRM_DEBUG_DRIVER("\n");
4955 4871
@@ -4959,15 +4875,18 @@ int i915_gem_open(struct drm_device *dev, struct drm_file *file)
4959 4875
4960 file->driver_priv = file_priv; 4876 file->driver_priv = file_priv;
4961 file_priv->dev_priv = dev->dev_private; 4877 file_priv->dev_priv = dev->dev_private;
4878 file_priv->file = file;
4962 4879
4963 spin_lock_init(&file_priv->mm.lock); 4880 spin_lock_init(&file_priv->mm.lock);
4964 INIT_LIST_HEAD(&file_priv->mm.request_list); 4881 INIT_LIST_HEAD(&file_priv->mm.request_list);
4965 INIT_DELAYED_WORK(&file_priv->mm.idle_work, 4882 INIT_DELAYED_WORK(&file_priv->mm.idle_work,
4966 i915_gem_file_idle_work_handler); 4883 i915_gem_file_idle_work_handler);
4967 4884
4968 idr_init(&file_priv->context_idr); 4885 ret = i915_gem_context_open(dev, file);
4886 if (ret)
4887 kfree(file_priv);
4969 4888
4970 return 0; 4889 return ret;
4971} 4890}
4972 4891
4973static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task) 4892static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
@@ -5014,7 +4933,7 @@ i915_gem_inactive_count(struct shrinker *shrinker, struct shrink_control *sc)
5014 if (obj->active) 4933 if (obj->active)
5015 continue; 4934 continue;
5016 4935
5017 if (obj->pin_count == 0 && obj->pages_pin_count == 0) 4936 if (!i915_gem_obj_is_pinned(obj) && obj->pages_pin_count == 0)
5018 count += obj->base.size >> PAGE_SHIFT; 4937 count += obj->base.size >> PAGE_SHIFT;
5019 } 4938 }
5020 4939
@@ -5031,7 +4950,8 @@ unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
5031 struct drm_i915_private *dev_priv = o->base.dev->dev_private; 4950 struct drm_i915_private *dev_priv = o->base.dev->dev_private;
5032 struct i915_vma *vma; 4951 struct i915_vma *vma;
5033 4952
5034 if (vm == &dev_priv->mm.aliasing_ppgtt->base) 4953 if (!dev_priv->mm.aliasing_ppgtt ||
4954 vm == &dev_priv->mm.aliasing_ppgtt->base)
5035 vm = &dev_priv->gtt.base; 4955 vm = &dev_priv->gtt.base;
5036 4956
5037 BUG_ON(list_empty(&o->vma_list)); 4957 BUG_ON(list_empty(&o->vma_list));
@@ -5072,7 +4992,8 @@ unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
5072 struct drm_i915_private *dev_priv = o->base.dev->dev_private; 4992 struct drm_i915_private *dev_priv = o->base.dev->dev_private;
5073 struct i915_vma *vma; 4993 struct i915_vma *vma;
5074 4994
5075 if (vm == &dev_priv->mm.aliasing_ppgtt->base) 4995 if (!dev_priv->mm.aliasing_ppgtt ||
4996 vm == &dev_priv->mm.aliasing_ppgtt->base)
5076 vm = &dev_priv->gtt.base; 4997 vm = &dev_priv->gtt.base;
5077 4998
5078 BUG_ON(list_empty(&o->vma_list)); 4999 BUG_ON(list_empty(&o->vma_list));
@@ -5127,7 +5048,7 @@ struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
5127 return NULL; 5048 return NULL;
5128 5049
5129 vma = list_first_entry(&obj->vma_list, typeof(*vma), vma_link); 5050 vma = list_first_entry(&obj->vma_list, typeof(*vma), vma_link);
5130 if (WARN_ON(vma->vm != obj_to_ggtt(obj))) 5051 if (vma->vm != obj_to_ggtt(obj))
5131 return NULL; 5052 return NULL;
5132 5053
5133 return vma; 5054 return vma;
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index e08acaba5402..6043062ffce7 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -93,11 +93,63 @@
93 * I've seen in a spec to date, and that was a workaround for a non-shipping 93 * I've seen in a spec to date, and that was a workaround for a non-shipping
94 * part. It should be safe to decrease this, but it's more future proof as is. 94 * part. It should be safe to decrease this, but it's more future proof as is.
95 */ 95 */
96#define CONTEXT_ALIGN (64<<10) 96#define GEN6_CONTEXT_ALIGN (64<<10)
97#define GEN7_CONTEXT_ALIGN 4096
97 98
98static struct i915_hw_context * 99static int do_switch(struct intel_ring_buffer *ring,
99i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id); 100 struct i915_hw_context *to);
100static int do_switch(struct i915_hw_context *to); 101
102static void do_ppgtt_cleanup(struct i915_hw_ppgtt *ppgtt)
103{
104 struct drm_device *dev = ppgtt->base.dev;
105 struct drm_i915_private *dev_priv = dev->dev_private;
106 struct i915_address_space *vm = &ppgtt->base;
107
108 if (ppgtt == dev_priv->mm.aliasing_ppgtt ||
109 (list_empty(&vm->active_list) && list_empty(&vm->inactive_list))) {
110 ppgtt->base.cleanup(&ppgtt->base);
111 return;
112 }
113
114 /*
115 * Make sure vmas are unbound before we take down the drm_mm
116 *
117 * FIXME: Proper refcounting should take care of this, this shouldn't be
118 * needed at all.
119 */
120 if (!list_empty(&vm->active_list)) {
121 struct i915_vma *vma;
122
123 list_for_each_entry(vma, &vm->active_list, mm_list)
124 if (WARN_ON(list_empty(&vma->vma_link) ||
125 list_is_singular(&vma->vma_link)))
126 break;
127
128 i915_gem_evict_vm(&ppgtt->base, true);
129 } else {
130 i915_gem_retire_requests(dev);
131 i915_gem_evict_vm(&ppgtt->base, false);
132 }
133
134 ppgtt->base.cleanup(&ppgtt->base);
135}
136
137static void ppgtt_release(struct kref *kref)
138{
139 struct i915_hw_ppgtt *ppgtt =
140 container_of(kref, struct i915_hw_ppgtt, ref);
141
142 do_ppgtt_cleanup(ppgtt);
143 kfree(ppgtt);
144}
145
146static size_t get_context_alignment(struct drm_device *dev)
147{
148 if (IS_GEN6(dev))
149 return GEN6_CONTEXT_ALIGN;
150
151 return GEN7_CONTEXT_ALIGN;
152}
101 153
102static int get_context_size(struct drm_device *dev) 154static int get_context_size(struct drm_device *dev)
103{ 155{
@@ -131,14 +183,44 @@ void i915_gem_context_free(struct kref *ctx_ref)
131{ 183{
132 struct i915_hw_context *ctx = container_of(ctx_ref, 184 struct i915_hw_context *ctx = container_of(ctx_ref,
133 typeof(*ctx), ref); 185 typeof(*ctx), ref);
186 struct i915_hw_ppgtt *ppgtt = NULL;
134 187
135 list_del(&ctx->link); 188 /* We refcount even the aliasing PPGTT to keep the code symmetric */
189 if (USES_PPGTT(ctx->obj->base.dev))
190 ppgtt = ctx_to_ppgtt(ctx);
191
192 /* XXX: Free up the object before tearing down the address space, in
193 * case we're bound in the PPGTT */
136 drm_gem_object_unreference(&ctx->obj->base); 194 drm_gem_object_unreference(&ctx->obj->base);
195
196 if (ppgtt)
197 kref_put(&ppgtt->ref, ppgtt_release);
198 list_del(&ctx->link);
137 kfree(ctx); 199 kfree(ctx);
138} 200}
139 201
202static struct i915_hw_ppgtt *
203create_vm_for_ctx(struct drm_device *dev, struct i915_hw_context *ctx)
204{
205 struct i915_hw_ppgtt *ppgtt;
206 int ret;
207
208 ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
209 if (!ppgtt)
210 return ERR_PTR(-ENOMEM);
211
212 ret = i915_gem_init_ppgtt(dev, ppgtt);
213 if (ret) {
214 kfree(ppgtt);
215 return ERR_PTR(ret);
216 }
217
218 ppgtt->ctx = ctx;
219 return ppgtt;
220}
221
140static struct i915_hw_context * 222static struct i915_hw_context *
141create_hw_context(struct drm_device *dev, 223__create_hw_context(struct drm_device *dev,
142 struct drm_i915_file_private *file_priv) 224 struct drm_i915_file_private *file_priv)
143{ 225{
144 struct drm_i915_private *dev_priv = dev->dev_private; 226 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -166,18 +248,13 @@ create_hw_context(struct drm_device *dev,
166 goto err_out; 248 goto err_out;
167 } 249 }
168 250
169 /* The ring associated with the context object is handled by the normal
170 * object tracking code. We give an initial ring value simple to pass an
171 * assertion in the context switch code.
172 */
173 ctx->ring = &dev_priv->ring[RCS];
174 list_add_tail(&ctx->link, &dev_priv->context_list); 251 list_add_tail(&ctx->link, &dev_priv->context_list);
175 252
176 /* Default context will never have a file_priv */ 253 /* Default context will never have a file_priv */
177 if (file_priv == NULL) 254 if (file_priv == NULL)
178 return ctx; 255 return ctx;
179 256
180 ret = idr_alloc(&file_priv->context_idr, ctx, DEFAULT_CONTEXT_ID + 1, 0, 257 ret = idr_alloc(&file_priv->context_idr, ctx, DEFAULT_CONTEXT_ID, 0,
181 GFP_KERNEL); 258 GFP_KERNEL);
182 if (ret < 0) 259 if (ret < 0)
183 goto err_out; 260 goto err_out;
@@ -196,67 +273,136 @@ err_out:
196 return ERR_PTR(ret); 273 return ERR_PTR(ret);
197} 274}
198 275
199static inline bool is_default_context(struct i915_hw_context *ctx)
200{
201 return (ctx == ctx->ring->default_context);
202}
203
204/** 276/**
205 * The default context needs to exist per ring that uses contexts. It stores the 277 * The default context needs to exist per ring that uses contexts. It stores the
206 * context state of the GPU for applications that don't utilize HW contexts, as 278 * context state of the GPU for applications that don't utilize HW contexts, as
207 * well as an idle case. 279 * well as an idle case.
208 */ 280 */
209static int create_default_context(struct drm_i915_private *dev_priv) 281static struct i915_hw_context *
282i915_gem_create_context(struct drm_device *dev,
283 struct drm_i915_file_private *file_priv,
284 bool create_vm)
210{ 285{
286 const bool is_global_default_ctx = file_priv == NULL;
287 struct drm_i915_private *dev_priv = dev->dev_private;
211 struct i915_hw_context *ctx; 288 struct i915_hw_context *ctx;
212 int ret; 289 int ret = 0;
213 290
214 BUG_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex)); 291 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
215 292
216 ctx = create_hw_context(dev_priv->dev, NULL); 293 ctx = __create_hw_context(dev, file_priv);
217 if (IS_ERR(ctx)) 294 if (IS_ERR(ctx))
218 return PTR_ERR(ctx); 295 return ctx;
219
220 /* We may need to do things with the shrinker which require us to
221 * immediately switch back to the default context. This can cause a
222 * problem as pinning the default context also requires GTT space which
223 * may not be available. To avoid this we always pin the
224 * default context.
225 */
226 ret = i915_gem_obj_ggtt_pin(ctx->obj, CONTEXT_ALIGN, false, false);
227 if (ret) {
228 DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
229 goto err_destroy;
230 }
231 296
232 ret = do_switch(ctx); 297 if (is_global_default_ctx) {
233 if (ret) { 298 /* We may need to do things with the shrinker which
234 DRM_DEBUG_DRIVER("Switch failed %d\n", ret); 299 * require us to immediately switch back to the default
235 goto err_unpin; 300 * context. This can cause a problem as pinning the
301 * default context also requires GTT space which may not
302 * be available. To avoid this we always pin the default
303 * context.
304 */
305 ret = i915_gem_obj_ggtt_pin(ctx->obj,
306 get_context_alignment(dev), 0);
307 if (ret) {
308 DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
309 goto err_destroy;
310 }
236 } 311 }
237 312
238 dev_priv->ring[RCS].default_context = ctx; 313 if (create_vm) {
314 struct i915_hw_ppgtt *ppgtt = create_vm_for_ctx(dev, ctx);
315
316 if (IS_ERR_OR_NULL(ppgtt)) {
317 DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
318 PTR_ERR(ppgtt));
319 ret = PTR_ERR(ppgtt);
320 goto err_unpin;
321 } else
322 ctx->vm = &ppgtt->base;
323
324 /* This case is reserved for the global default context and
325 * should only happen once. */
326 if (is_global_default_ctx) {
327 if (WARN_ON(dev_priv->mm.aliasing_ppgtt)) {
328 ret = -EEXIST;
329 goto err_unpin;
330 }
331
332 dev_priv->mm.aliasing_ppgtt = ppgtt;
333 }
334 } else if (USES_PPGTT(dev)) {
335 /* For platforms which only have aliasing PPGTT, we fake the
336 * address space and refcounting. */
337 ctx->vm = &dev_priv->mm.aliasing_ppgtt->base;
338 kref_get(&dev_priv->mm.aliasing_ppgtt->ref);
339 } else
340 ctx->vm = &dev_priv->gtt.base;
239 341
240 DRM_DEBUG_DRIVER("Default HW context loaded\n"); 342 return ctx;
241 return 0;
242 343
243err_unpin: 344err_unpin:
244 i915_gem_object_unpin(ctx->obj); 345 if (is_global_default_ctx)
346 i915_gem_object_ggtt_unpin(ctx->obj);
245err_destroy: 347err_destroy:
246 i915_gem_context_unreference(ctx); 348 i915_gem_context_unreference(ctx);
247 return ret; 349 return ERR_PTR(ret);
350}
351
352void i915_gem_context_reset(struct drm_device *dev)
353{
354 struct drm_i915_private *dev_priv = dev->dev_private;
355 struct intel_ring_buffer *ring;
356 int i;
357
358 if (!HAS_HW_CONTEXTS(dev))
359 return;
360
361 /* Prevent the hardware from restoring the last context (which hung) on
362 * the next switch */
363 for (i = 0; i < I915_NUM_RINGS; i++) {
364 struct i915_hw_context *dctx;
365 if (!(INTEL_INFO(dev)->ring_mask & (1<<i)))
366 continue;
367
368 /* Do a fake switch to the default context */
369 ring = &dev_priv->ring[i];
370 dctx = ring->default_context;
371 if (WARN_ON(!dctx))
372 continue;
373
374 if (!ring->last_context)
375 continue;
376
377 if (ring->last_context == dctx)
378 continue;
379
380 if (i == RCS) {
381 WARN_ON(i915_gem_obj_ggtt_pin(dctx->obj,
382 get_context_alignment(dev), 0));
383 /* Fake a finish/inactive */
384 dctx->obj->base.write_domain = 0;
385 dctx->obj->active = 0;
386 }
387
388 i915_gem_context_unreference(ring->last_context);
389 i915_gem_context_reference(dctx);
390 ring->last_context = dctx;
391 }
248} 392}
249 393
250int i915_gem_context_init(struct drm_device *dev) 394int i915_gem_context_init(struct drm_device *dev)
251{ 395{
252 struct drm_i915_private *dev_priv = dev->dev_private; 396 struct drm_i915_private *dev_priv = dev->dev_private;
253 int ret; 397 struct intel_ring_buffer *ring;
398 int i;
254 399
255 if (!HAS_HW_CONTEXTS(dev)) 400 if (!HAS_HW_CONTEXTS(dev))
256 return 0; 401 return 0;
257 402
258 /* If called from reset, or thaw... we've been here already */ 403 /* Init should only be called once per module load. Eventually the
259 if (dev_priv->ring[RCS].default_context) 404 * restriction on the context_disabled check can be loosened. */
405 if (WARN_ON(dev_priv->ring[RCS].default_context))
260 return 0; 406 return 0;
261 407
262 dev_priv->hw_context_size = round_up(get_context_size(dev), 4096); 408 dev_priv->hw_context_size = round_up(get_context_size(dev), 4096);
@@ -266,11 +412,23 @@ int i915_gem_context_init(struct drm_device *dev)
266 return -E2BIG; 412 return -E2BIG;
267 } 413 }
268 414
269 ret = create_default_context(dev_priv); 415 dev_priv->ring[RCS].default_context =
270 if (ret) { 416 i915_gem_create_context(dev, NULL, USES_PPGTT(dev));
271 DRM_DEBUG_DRIVER("Disabling HW Contexts; create failed %d\n", 417
272 ret); 418 if (IS_ERR_OR_NULL(dev_priv->ring[RCS].default_context)) {
273 return ret; 419 DRM_DEBUG_DRIVER("Disabling HW Contexts; create failed %ld\n",
420 PTR_ERR(dev_priv->ring[RCS].default_context));
421 return PTR_ERR(dev_priv->ring[RCS].default_context);
422 }
423
424 for (i = RCS + 1; i < I915_NUM_RINGS; i++) {
425 if (!(INTEL_INFO(dev)->ring_mask & (1<<i)))
426 continue;
427
428 ring = &dev_priv->ring[i];
429
430 /* NB: RCS will hold a ref for all rings */
431 ring->default_context = dev_priv->ring[RCS].default_context;
274 } 432 }
275 433
276 DRM_DEBUG_DRIVER("HW context support initialized\n"); 434 DRM_DEBUG_DRIVER("HW context support initialized\n");
@@ -281,6 +439,7 @@ void i915_gem_context_fini(struct drm_device *dev)
281{ 439{
282 struct drm_i915_private *dev_priv = dev->dev_private; 440 struct drm_i915_private *dev_priv = dev->dev_private;
283 struct i915_hw_context *dctx = dev_priv->ring[RCS].default_context; 441 struct i915_hw_context *dctx = dev_priv->ring[RCS].default_context;
442 int i;
284 443
285 if (!HAS_HW_CONTEXTS(dev)) 444 if (!HAS_HW_CONTEXTS(dev))
286 return; 445 return;
@@ -300,59 +459,129 @@ void i915_gem_context_fini(struct drm_device *dev)
300 if (dev_priv->ring[RCS].last_context == dctx) { 459 if (dev_priv->ring[RCS].last_context == dctx) {
301 /* Fake switch to NULL context */ 460 /* Fake switch to NULL context */
302 WARN_ON(dctx->obj->active); 461 WARN_ON(dctx->obj->active);
303 i915_gem_object_unpin(dctx->obj); 462 i915_gem_object_ggtt_unpin(dctx->obj);
304 i915_gem_context_unreference(dctx); 463 i915_gem_context_unreference(dctx);
464 dev_priv->ring[RCS].last_context = NULL;
305 } 465 }
306 466
307 i915_gem_object_unpin(dctx->obj); 467 for (i = 0; i < I915_NUM_RINGS; i++) {
468 struct intel_ring_buffer *ring = &dev_priv->ring[i];
469 if (!(INTEL_INFO(dev)->ring_mask & (1<<i)))
470 continue;
471
472 if (ring->last_context)
473 i915_gem_context_unreference(ring->last_context);
474
475 ring->default_context = NULL;
476 ring->last_context = NULL;
477 }
478
479 i915_gem_object_ggtt_unpin(dctx->obj);
308 i915_gem_context_unreference(dctx); 480 i915_gem_context_unreference(dctx);
309 dev_priv->ring[RCS].default_context = NULL; 481 dev_priv->mm.aliasing_ppgtt = NULL;
310 dev_priv->ring[RCS].last_context = NULL; 482}
483
484int i915_gem_context_enable(struct drm_i915_private *dev_priv)
485{
486 struct intel_ring_buffer *ring;
487 int ret, i;
488
489 if (!HAS_HW_CONTEXTS(dev_priv->dev))
490 return 0;
491
492 /* This is the only place the aliasing PPGTT gets enabled, which means
493 * it has to happen before we bail on reset */
494 if (dev_priv->mm.aliasing_ppgtt) {
495 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
496 ppgtt->enable(ppgtt);
497 }
498
499 /* FIXME: We should make this work, even in reset */
500 if (i915_reset_in_progress(&dev_priv->gpu_error))
501 return 0;
502
503 BUG_ON(!dev_priv->ring[RCS].default_context);
504
505 for_each_ring(ring, dev_priv, i) {
506 ret = do_switch(ring, ring->default_context);
507 if (ret)
508 return ret;
509 }
510
511 return 0;
311} 512}
312 513
313static int context_idr_cleanup(int id, void *p, void *data) 514static int context_idr_cleanup(int id, void *p, void *data)
314{ 515{
315 struct i915_hw_context *ctx = p; 516 struct i915_hw_context *ctx = p;
316 517
317 BUG_ON(id == DEFAULT_CONTEXT_ID); 518 /* Ignore the default context because close will handle it */
519 if (i915_gem_context_is_default(ctx))
520 return 0;
318 521
319 i915_gem_context_unreference(ctx); 522 i915_gem_context_unreference(ctx);
320 return 0; 523 return 0;
321} 524}
322 525
323struct i915_ctx_hang_stats * 526int i915_gem_context_open(struct drm_device *dev, struct drm_file *file)
324i915_gem_context_get_hang_stats(struct drm_device *dev,
325 struct drm_file *file,
326 u32 id)
327{ 527{
328 struct drm_i915_file_private *file_priv = file->driver_priv; 528 struct drm_i915_file_private *file_priv = file->driver_priv;
329 struct i915_hw_context *ctx; 529 struct drm_i915_private *dev_priv = dev->dev_private;
330 530
331 if (id == DEFAULT_CONTEXT_ID) 531 if (!HAS_HW_CONTEXTS(dev)) {
332 return &file_priv->hang_stats; 532 /* Cheat for hang stats */
533 file_priv->private_default_ctx =
534 kzalloc(sizeof(struct i915_hw_context), GFP_KERNEL);
333 535
334 if (!HAS_HW_CONTEXTS(dev)) 536 if (file_priv->private_default_ctx == NULL)
335 return ERR_PTR(-ENOENT); 537 return -ENOMEM;
336 538
337 ctx = i915_gem_context_get(file->driver_priv, id); 539 file_priv->private_default_ctx->vm = &dev_priv->gtt.base;
338 if (ctx == NULL) 540 return 0;
339 return ERR_PTR(-ENOENT); 541 }
542
543 idr_init(&file_priv->context_idr);
340 544
341 return &ctx->hang_stats; 545 mutex_lock(&dev->struct_mutex);
546 file_priv->private_default_ctx =
547 i915_gem_create_context(dev, file_priv, USES_FULL_PPGTT(dev));
548 mutex_unlock(&dev->struct_mutex);
549
550 if (IS_ERR(file_priv->private_default_ctx)) {
551 idr_destroy(&file_priv->context_idr);
552 return PTR_ERR(file_priv->private_default_ctx);
553 }
554
555 return 0;
342} 556}
343 557
344void i915_gem_context_close(struct drm_device *dev, struct drm_file *file) 558void i915_gem_context_close(struct drm_device *dev, struct drm_file *file)
345{ 559{
346 struct drm_i915_file_private *file_priv = file->driver_priv; 560 struct drm_i915_file_private *file_priv = file->driver_priv;
347 561
562 if (!HAS_HW_CONTEXTS(dev)) {
563 kfree(file_priv->private_default_ctx);
564 return;
565 }
566
348 idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL); 567 idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
568 i915_gem_context_unreference(file_priv->private_default_ctx);
349 idr_destroy(&file_priv->context_idr); 569 idr_destroy(&file_priv->context_idr);
350} 570}
351 571
352static struct i915_hw_context * 572struct i915_hw_context *
353i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id) 573i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id)
354{ 574{
355 return (struct i915_hw_context *)idr_find(&file_priv->context_idr, id); 575 struct i915_hw_context *ctx;
576
577 if (!HAS_HW_CONTEXTS(file_priv->dev_priv->dev))
578 return file_priv->private_default_ctx;
579
580 ctx = (struct i915_hw_context *)idr_find(&file_priv->context_idr, id);
581 if (!ctx)
582 return ERR_PTR(-ENOENT);
583
584 return ctx;
356} 585}
357 586
358static inline int 587static inline int
@@ -390,7 +619,10 @@ mi_set_context(struct intel_ring_buffer *ring,
390 MI_SAVE_EXT_STATE_EN | 619 MI_SAVE_EXT_STATE_EN |
391 MI_RESTORE_EXT_STATE_EN | 620 MI_RESTORE_EXT_STATE_EN |
392 hw_flags); 621 hw_flags);
393 /* w/a: MI_SET_CONTEXT must always be followed by MI_NOOP */ 622 /*
623 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
624 * WaMiSetContext_Hang:snb,ivb,vlv
625 */
394 intel_ring_emit(ring, MI_NOOP); 626 intel_ring_emit(ring, MI_NOOP);
395 627
396 if (IS_GEN7(ring->dev)) 628 if (IS_GEN7(ring->dev))
@@ -403,21 +635,30 @@ mi_set_context(struct intel_ring_buffer *ring,
403 return ret; 635 return ret;
404} 636}
405 637
406static int do_switch(struct i915_hw_context *to) 638static int do_switch(struct intel_ring_buffer *ring,
639 struct i915_hw_context *to)
407{ 640{
408 struct intel_ring_buffer *ring = to->ring; 641 struct drm_i915_private *dev_priv = ring->dev->dev_private;
409 struct i915_hw_context *from = ring->last_context; 642 struct i915_hw_context *from = ring->last_context;
643 struct i915_hw_ppgtt *ppgtt = ctx_to_ppgtt(to);
410 u32 hw_flags = 0; 644 u32 hw_flags = 0;
411 int ret, i; 645 int ret, i;
412 646
413 BUG_ON(from != NULL && from->obj != NULL && from->obj->pin_count == 0); 647 if (from != NULL && ring == &dev_priv->ring[RCS]) {
648 BUG_ON(from->obj == NULL);
649 BUG_ON(!i915_gem_obj_is_pinned(from->obj));
650 }
414 651
415 if (from == to && !to->remap_slice) 652 if (from == to && from->last_ring == ring && !to->remap_slice)
416 return 0; 653 return 0;
417 654
418 ret = i915_gem_obj_ggtt_pin(to->obj, CONTEXT_ALIGN, false, false); 655 /* Trying to pin first makes error handling easier. */
419 if (ret) 656 if (ring == &dev_priv->ring[RCS]) {
420 return ret; 657 ret = i915_gem_obj_ggtt_pin(to->obj,
658 get_context_alignment(ring->dev), 0);
659 if (ret)
660 return ret;
661 }
421 662
422 /* 663 /*
423 * Pin can switch back to the default context if we end up calling into 664 * Pin can switch back to the default context if we end up calling into
@@ -426,6 +667,18 @@ static int do_switch(struct i915_hw_context *to)
426 */ 667 */
427 from = ring->last_context; 668 from = ring->last_context;
428 669
670 if (USES_FULL_PPGTT(ring->dev)) {
671 ret = ppgtt->switch_mm(ppgtt, ring, false);
672 if (ret)
673 goto unpin_out;
674 }
675
676 if (ring != &dev_priv->ring[RCS]) {
677 if (from)
678 i915_gem_context_unreference(from);
679 goto done;
680 }
681
429 /* 682 /*
430 * Clear this page out of any CPU caches for coherent swap-in/out. Note 683 * Clear this page out of any CPU caches for coherent swap-in/out. Note
431 * that thanks to write = false in this call and us not setting any gpu 684 * that thanks to write = false in this call and us not setting any gpu
@@ -435,22 +688,21 @@ static int do_switch(struct i915_hw_context *to)
435 * XXX: We need a real interface to do this instead of trickery. 688 * XXX: We need a real interface to do this instead of trickery.
436 */ 689 */
437 ret = i915_gem_object_set_to_gtt_domain(to->obj, false); 690 ret = i915_gem_object_set_to_gtt_domain(to->obj, false);
438 if (ret) { 691 if (ret)
439 i915_gem_object_unpin(to->obj); 692 goto unpin_out;
440 return ret;
441 }
442 693
443 if (!to->obj->has_global_gtt_mapping) 694 if (!to->obj->has_global_gtt_mapping) {
444 i915_gem_gtt_bind_object(to->obj, to->obj->cache_level); 695 struct i915_vma *vma = i915_gem_obj_to_vma(to->obj,
696 &dev_priv->gtt.base);
697 vma->bind_vma(vma, to->obj->cache_level, GLOBAL_BIND);
698 }
445 699
446 if (!to->is_initialized || is_default_context(to)) 700 if (!to->is_initialized || i915_gem_context_is_default(to))
447 hw_flags |= MI_RESTORE_INHIBIT; 701 hw_flags |= MI_RESTORE_INHIBIT;
448 702
449 ret = mi_set_context(ring, to, hw_flags); 703 ret = mi_set_context(ring, to, hw_flags);
450 if (ret) { 704 if (ret)
451 i915_gem_object_unpin(to->obj); 705 goto unpin_out;
452 return ret;
453 }
454 706
455 for (i = 0; i < MAX_L3_SLICES; i++) { 707 for (i = 0; i < MAX_L3_SLICES; i++) {
456 if (!(to->remap_slice & (1<<i))) 708 if (!(to->remap_slice & (1<<i)))
@@ -484,22 +736,30 @@ static int do_switch(struct i915_hw_context *to)
484 BUG_ON(from->obj->ring != ring); 736 BUG_ON(from->obj->ring != ring);
485 737
486 /* obj is kept alive until the next request by its active ref */ 738 /* obj is kept alive until the next request by its active ref */
487 i915_gem_object_unpin(from->obj); 739 i915_gem_object_ggtt_unpin(from->obj);
488 i915_gem_context_unreference(from); 740 i915_gem_context_unreference(from);
489 } 741 }
490 742
743 to->is_initialized = true;
744
745done:
491 i915_gem_context_reference(to); 746 i915_gem_context_reference(to);
492 ring->last_context = to; 747 ring->last_context = to;
493 to->is_initialized = true; 748 to->last_ring = ring;
494 749
495 return 0; 750 return 0;
751
752unpin_out:
753 if (ring->id == RCS)
754 i915_gem_object_ggtt_unpin(to->obj);
755 return ret;
496} 756}
497 757
498/** 758/**
499 * i915_switch_context() - perform a GPU context switch. 759 * i915_switch_context() - perform a GPU context switch.
500 * @ring: ring for which we'll execute the context switch 760 * @ring: ring for which we'll execute the context switch
501 * @file_priv: file_priv associated with the context, may be NULL 761 * @file_priv: file_priv associated with the context, may be NULL
502 * @id: context id number 762 * @to: the context to switch to
503 * 763 *
504 * The context life cycle is simple. The context refcount is incremented and 764 * The context life cycle is simple. The context refcount is incremented and
505 * decremented by 1 and create and destroy. If the context is in use by the GPU, 765 * decremented by 1 and create and destroy. If the context is in use by the GPU,
@@ -508,31 +768,21 @@ static int do_switch(struct i915_hw_context *to)
508 */ 768 */
509int i915_switch_context(struct intel_ring_buffer *ring, 769int i915_switch_context(struct intel_ring_buffer *ring,
510 struct drm_file *file, 770 struct drm_file *file,
511 int to_id) 771 struct i915_hw_context *to)
512{ 772{
513 struct drm_i915_private *dev_priv = ring->dev->dev_private; 773 struct drm_i915_private *dev_priv = ring->dev->dev_private;
514 struct i915_hw_context *to;
515
516 if (!HAS_HW_CONTEXTS(ring->dev))
517 return 0;
518 774
519 WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex)); 775 WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
520 776
521 if (ring != &dev_priv->ring[RCS]) 777 BUG_ON(file && to == NULL);
522 return 0;
523
524 if (to_id == DEFAULT_CONTEXT_ID) {
525 to = ring->default_context;
526 } else {
527 if (file == NULL)
528 return -EINVAL;
529 778
530 to = i915_gem_context_get(file->driver_priv, to_id); 779 /* We have the fake context */
531 if (to == NULL) 780 if (!HAS_HW_CONTEXTS(ring->dev)) {
532 return -ENOENT; 781 ring->last_context = to;
782 return 0;
533 } 783 }
534 784
535 return do_switch(to); 785 return do_switch(ring, to);
536} 786}
537 787
538int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, 788int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
@@ -543,9 +793,6 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
543 struct i915_hw_context *ctx; 793 struct i915_hw_context *ctx;
544 int ret; 794 int ret;
545 795
546 if (!(dev->driver->driver_features & DRIVER_GEM))
547 return -ENODEV;
548
549 if (!HAS_HW_CONTEXTS(dev)) 796 if (!HAS_HW_CONTEXTS(dev))
550 return -ENODEV; 797 return -ENODEV;
551 798
@@ -553,7 +800,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
553 if (ret) 800 if (ret)
554 return ret; 801 return ret;
555 802
556 ctx = create_hw_context(dev, file_priv); 803 ctx = i915_gem_create_context(dev, file_priv, USES_FULL_PPGTT(dev));
557 mutex_unlock(&dev->struct_mutex); 804 mutex_unlock(&dev->struct_mutex);
558 if (IS_ERR(ctx)) 805 if (IS_ERR(ctx))
559 return PTR_ERR(ctx); 806 return PTR_ERR(ctx);
@@ -572,17 +819,17 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
572 struct i915_hw_context *ctx; 819 struct i915_hw_context *ctx;
573 int ret; 820 int ret;
574 821
575 if (!(dev->driver->driver_features & DRIVER_GEM)) 822 if (args->ctx_id == DEFAULT_CONTEXT_ID)
576 return -ENODEV; 823 return -ENOENT;
577 824
578 ret = i915_mutex_lock_interruptible(dev); 825 ret = i915_mutex_lock_interruptible(dev);
579 if (ret) 826 if (ret)
580 return ret; 827 return ret;
581 828
582 ctx = i915_gem_context_get(file_priv, args->ctx_id); 829 ctx = i915_gem_context_get(file_priv, args->ctx_id);
583 if (!ctx) { 830 if (IS_ERR(ctx)) {
584 mutex_unlock(&dev->struct_mutex); 831 mutex_unlock(&dev->struct_mutex);
585 return -ENOENT; 832 return PTR_ERR(ctx);
586 } 833 }
587 834
588 idr_remove(&ctx->file_priv->context_idr, ctx->id); 835 idr_remove(&ctx->file_priv->context_idr, ctx->id);
diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c
index 775d506b3208..f462d1b51d97 100644
--- a/drivers/gpu/drm/i915/i915_gem_debug.c
+++ b/drivers/gpu/drm/i915/i915_gem_debug.c
@@ -34,7 +34,7 @@ int
34i915_verify_lists(struct drm_device *dev) 34i915_verify_lists(struct drm_device *dev)
35{ 35{
36 static int warned; 36 static int warned;
37 drm_i915_private_t *dev_priv = dev->dev_private; 37 struct drm_i915_private *dev_priv = dev->dev_private;
38 struct drm_i915_gem_object *obj; 38 struct drm_i915_gem_object *obj;
39 int err = 0; 39 int err = 0;
40 40
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 2ca280f9ee53..75fca63dc8c1 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -36,7 +36,7 @@
36static bool 36static bool
37mark_free(struct i915_vma *vma, struct list_head *unwind) 37mark_free(struct i915_vma *vma, struct list_head *unwind)
38{ 38{
39 if (vma->obj->pin_count) 39 if (vma->pin_count)
40 return false; 40 return false;
41 41
42 if (WARN_ON(!list_empty(&vma->exec_list))) 42 if (WARN_ON(!list_empty(&vma->exec_list)))
@@ -46,18 +46,37 @@ mark_free(struct i915_vma *vma, struct list_head *unwind)
46 return drm_mm_scan_add_block(&vma->node); 46 return drm_mm_scan_add_block(&vma->node);
47} 47}
48 48
49/**
50 * i915_gem_evict_something - Evict vmas to make room for binding a new one
51 * @dev: drm_device
52 * @vm: address space to evict from
53 * @size: size of the desired free space
54 * @alignment: alignment constraint of the desired free space
55 * @cache_level: cache_level for the desired space
56 * @mappable: whether the free space must be mappable
57 * @nonblocking: whether evicting active objects is allowed or not
58 *
59 * This function will try to evict vmas until a free space satisfying the
60 * requirements is found. Callers must check first whether any such hole exists
61 * already before calling this function.
62 *
63 * This function is used by the object/vma binding code.
64 *
65 * To clarify: This is for freeing up virtual address space, not for freeing
66 * memory in e.g. the shrinker.
67 */
49int 68int
50i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm, 69i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
51 int min_size, unsigned alignment, unsigned cache_level, 70 int min_size, unsigned alignment, unsigned cache_level,
52 bool mappable, bool nonblocking) 71 unsigned flags)
53{ 72{
54 drm_i915_private_t *dev_priv = dev->dev_private; 73 struct drm_i915_private *dev_priv = dev->dev_private;
55 struct list_head eviction_list, unwind_list; 74 struct list_head eviction_list, unwind_list;
56 struct i915_vma *vma; 75 struct i915_vma *vma;
57 int ret = 0; 76 int ret = 0;
58 int pass = 0; 77 int pass = 0;
59 78
60 trace_i915_gem_evict(dev, min_size, alignment, mappable); 79 trace_i915_gem_evict(dev, min_size, alignment, flags);
61 80
62 /* 81 /*
63 * The goal is to evict objects and amalgamate space in LRU order. 82 * The goal is to evict objects and amalgamate space in LRU order.
@@ -83,7 +102,7 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
83 */ 102 */
84 103
85 INIT_LIST_HEAD(&unwind_list); 104 INIT_LIST_HEAD(&unwind_list);
86 if (mappable) { 105 if (flags & PIN_MAPPABLE) {
87 BUG_ON(!i915_is_ggtt(vm)); 106 BUG_ON(!i915_is_ggtt(vm));
88 drm_mm_init_scan_with_range(&vm->mm, min_size, 107 drm_mm_init_scan_with_range(&vm->mm, min_size,
89 alignment, cache_level, 0, 108 alignment, cache_level, 0,
@@ -98,7 +117,7 @@ search_again:
98 goto found; 117 goto found;
99 } 118 }
100 119
101 if (nonblocking) 120 if (flags & PIN_NONBLOCK)
102 goto none; 121 goto none;
103 122
104 /* Now merge in the soon-to-be-expired objects... */ 123 /* Now merge in the soon-to-be-expired objects... */
@@ -122,7 +141,7 @@ none:
122 /* Can we unpin some objects such as idle hw contents, 141 /* Can we unpin some objects such as idle hw contents,
123 * or pending flips? 142 * or pending flips?
124 */ 143 */
125 if (nonblocking) 144 if (flags & PIN_NONBLOCK)
126 return -ENOSPC; 145 return -ENOSPC;
127 146
128 /* Only idle the GPU and repeat the search once */ 147 /* Only idle the GPU and repeat the search once */
@@ -177,19 +196,19 @@ found:
177} 196}
178 197
179/** 198/**
180 * i915_gem_evict_vm - Try to free up VM space 199 * i915_gem_evict_vm - Evict all idle vmas from a vm
181 * 200 *
182 * @vm: Address space to evict from 201 * @vm: Address space to cleanse
183 * @do_idle: Boolean directing whether to idle first. 202 * @do_idle: Boolean directing whether to idle first.
184 * 203 *
185 * VM eviction is about freeing up virtual address space. If one wants fine 204 * This function evicts all idles vmas from a vm. If all unpinned vmas should be
186 * grained eviction, they should see evict something for more details. In terms 205 * evicted the @do_idle needs to be set to true.
187 * of freeing up actual system memory, this function may not accomplish the
188 * desired result. An object may be shared in multiple address space, and this
189 * function will not assert those objects be freed.
190 * 206 *
191 * Using do_idle will result in a more complete eviction because it retires, and 207 * This is used by the execbuf code as a last-ditch effort to defragment the
192 * inactivates current BOs. 208 * address space.
209 *
210 * To clarify: This is for freeing up virtual address space, not for freeing
211 * memory in e.g. the shrinker.
193 */ 212 */
194int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle) 213int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
195{ 214{
@@ -207,16 +226,24 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
207 } 226 }
208 227
209 list_for_each_entry_safe(vma, next, &vm->inactive_list, mm_list) 228 list_for_each_entry_safe(vma, next, &vm->inactive_list, mm_list)
210 if (vma->obj->pin_count == 0) 229 if (vma->pin_count == 0)
211 WARN_ON(i915_vma_unbind(vma)); 230 WARN_ON(i915_vma_unbind(vma));
212 231
213 return 0; 232 return 0;
214} 233}
215 234
235/**
236 * i915_gem_evict_everything - Try to evict all objects
237 * @dev: Device to evict objects for
238 *
239 * This functions tries to evict all gem objects from all address spaces. Used
240 * by the shrinker as a last-ditch effort and for suspend, before releasing the
241 * backing storage of all unbound objects.
242 */
216int 243int
217i915_gem_evict_everything(struct drm_device *dev) 244i915_gem_evict_everything(struct drm_device *dev)
218{ 245{
219 drm_i915_private_t *dev_priv = dev->dev_private; 246 struct drm_i915_private *dev_priv = dev->dev_private;
220 struct i915_address_space *vm; 247 struct i915_address_space *vm;
221 bool lists_empty = true; 248 bool lists_empty = true;
222 int ret; 249 int ret;
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index d269ecf46e26..7447160155a3 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -91,6 +91,7 @@ eb_lookup_vmas(struct eb_vmas *eb,
91 struct i915_address_space *vm, 91 struct i915_address_space *vm,
92 struct drm_file *file) 92 struct drm_file *file)
93{ 93{
94 struct drm_i915_private *dev_priv = vm->dev->dev_private;
94 struct drm_i915_gem_object *obj; 95 struct drm_i915_gem_object *obj;
95 struct list_head objects; 96 struct list_head objects;
96 int i, ret; 97 int i, ret;
@@ -125,6 +126,20 @@ eb_lookup_vmas(struct eb_vmas *eb,
125 i = 0; 126 i = 0;
126 while (!list_empty(&objects)) { 127 while (!list_empty(&objects)) {
127 struct i915_vma *vma; 128 struct i915_vma *vma;
129 struct i915_address_space *bind_vm = vm;
130
131 if (exec[i].flags & EXEC_OBJECT_NEEDS_GTT &&
132 USES_FULL_PPGTT(vm->dev)) {
133 ret = -EINVAL;
134 goto err;
135 }
136
137 /* If we have secure dispatch, or the userspace assures us that
138 * they know what they're doing, use the GGTT VM.
139 */
140 if (((args->flags & I915_EXEC_SECURE) &&
141 (i == (args->buffer_count - 1))))
142 bind_vm = &dev_priv->gtt.base;
128 143
129 obj = list_first_entry(&objects, 144 obj = list_first_entry(&objects,
130 struct drm_i915_gem_object, 145 struct drm_i915_gem_object,
@@ -138,7 +153,7 @@ eb_lookup_vmas(struct eb_vmas *eb,
138 * from the (obj, vm) we don't run the risk of creating 153 * from the (obj, vm) we don't run the risk of creating
139 * duplicated vmas for the same vm. 154 * duplicated vmas for the same vm.
140 */ 155 */
141 vma = i915_gem_obj_lookup_or_create_vma(obj, vm); 156 vma = i915_gem_obj_lookup_or_create_vma(obj, bind_vm);
142 if (IS_ERR(vma)) { 157 if (IS_ERR(vma)) {
143 DRM_DEBUG("Failed to lookup VMA\n"); 158 DRM_DEBUG("Failed to lookup VMA\n");
144 ret = PTR_ERR(vma); 159 ret = PTR_ERR(vma);
@@ -217,7 +232,7 @@ i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
217 i915_gem_object_unpin_fence(obj); 232 i915_gem_object_unpin_fence(obj);
218 233
219 if (entry->flags & __EXEC_OBJECT_HAS_PIN) 234 if (entry->flags & __EXEC_OBJECT_HAS_PIN)
220 i915_gem_object_unpin(obj); 235 vma->pin_count--;
221 236
222 entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN); 237 entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
223} 238}
@@ -327,8 +342,7 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
327static int 342static int
328i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, 343i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
329 struct eb_vmas *eb, 344 struct eb_vmas *eb,
330 struct drm_i915_gem_relocation_entry *reloc, 345 struct drm_i915_gem_relocation_entry *reloc)
331 struct i915_address_space *vm)
332{ 346{
333 struct drm_device *dev = obj->base.dev; 347 struct drm_device *dev = obj->base.dev;
334 struct drm_gem_object *target_obj; 348 struct drm_gem_object *target_obj;
@@ -352,8 +366,10 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
352 if (unlikely(IS_GEN6(dev) && 366 if (unlikely(IS_GEN6(dev) &&
353 reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION && 367 reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
354 !target_i915_obj->has_global_gtt_mapping)) { 368 !target_i915_obj->has_global_gtt_mapping)) {
355 i915_gem_gtt_bind_object(target_i915_obj, 369 struct i915_vma *vma =
356 target_i915_obj->cache_level); 370 list_first_entry(&target_i915_obj->vma_list,
371 typeof(*vma), vma_link);
372 vma->bind_vma(vma, target_i915_obj->cache_level, GLOBAL_BIND);
357 } 373 }
358 374
359 /* Validate that the target is in a valid r/w GPU domain */ 375 /* Validate that the target is in a valid r/w GPU domain */
@@ -451,8 +467,7 @@ i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
451 do { 467 do {
452 u64 offset = r->presumed_offset; 468 u64 offset = r->presumed_offset;
453 469
454 ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r, 470 ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r);
455 vma->vm);
456 if (ret) 471 if (ret)
457 return ret; 472 return ret;
458 473
@@ -481,8 +496,7 @@ i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma,
481 int i, ret; 496 int i, ret;
482 497
483 for (i = 0; i < entry->relocation_count; i++) { 498 for (i = 0; i < entry->relocation_count; i++) {
484 ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i], 499 ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i]);
485 vma->vm);
486 if (ret) 500 if (ret)
487 return ret; 501 return ret;
488 } 502 }
@@ -527,21 +541,26 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
527 struct intel_ring_buffer *ring, 541 struct intel_ring_buffer *ring,
528 bool *need_reloc) 542 bool *need_reloc)
529{ 543{
530 struct drm_i915_private *dev_priv = ring->dev->dev_private; 544 struct drm_i915_gem_object *obj = vma->obj;
531 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; 545 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
532 bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; 546 bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
533 bool need_fence, need_mappable; 547 bool need_fence;
534 struct drm_i915_gem_object *obj = vma->obj; 548 unsigned flags;
535 int ret; 549 int ret;
536 550
551 flags = 0;
552
537 need_fence = 553 need_fence =
538 has_fenced_gpu_access && 554 has_fenced_gpu_access &&
539 entry->flags & EXEC_OBJECT_NEEDS_FENCE && 555 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
540 obj->tiling_mode != I915_TILING_NONE; 556 obj->tiling_mode != I915_TILING_NONE;
541 need_mappable = need_fence || need_reloc_mappable(vma); 557 if (need_fence || need_reloc_mappable(vma))
558 flags |= PIN_MAPPABLE;
542 559
543 ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, need_mappable, 560 if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
544 false); 561 flags |= PIN_GLOBAL;
562
563 ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, flags);
545 if (ret) 564 if (ret)
546 return ret; 565 return ret;
547 566
@@ -560,14 +579,6 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
560 } 579 }
561 } 580 }
562 581
563 /* Ensure ppgtt mapping exists if needed */
564 if (dev_priv->mm.aliasing_ppgtt && !obj->has_aliasing_ppgtt_mapping) {
565 i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
566 obj, obj->cache_level);
567
568 obj->has_aliasing_ppgtt_mapping = 1;
569 }
570
571 if (entry->offset != vma->node.start) { 582 if (entry->offset != vma->node.start) {
572 entry->offset = vma->node.start; 583 entry->offset = vma->node.start;
573 *need_reloc = true; 584 *need_reloc = true;
@@ -578,10 +589,6 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
578 obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER; 589 obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER;
579 } 590 }
580 591
581 if (entry->flags & EXEC_OBJECT_NEEDS_GTT &&
582 !obj->has_global_gtt_mapping)
583 i915_gem_gtt_bind_object(obj, obj->cache_level);
584
585 return 0; 592 return 0;
586} 593}
587 594
@@ -891,7 +898,7 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
891 if (!access_ok(VERIFY_WRITE, ptr, length)) 898 if (!access_ok(VERIFY_WRITE, ptr, length))
892 return -EFAULT; 899 return -EFAULT;
893 900
894 if (likely(!i915_prefault_disable)) { 901 if (likely(!i915.prefault_disable)) {
895 if (fault_in_multipages_readable(ptr, length)) 902 if (fault_in_multipages_readable(ptr, length))
896 return -EFAULT; 903 return -EFAULT;
897 } 904 }
@@ -900,22 +907,27 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
900 return 0; 907 return 0;
901} 908}
902 909
903static int 910static struct i915_hw_context *
904i915_gem_validate_context(struct drm_device *dev, struct drm_file *file, 911i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
905 const u32 ctx_id) 912 struct intel_ring_buffer *ring, const u32 ctx_id)
906{ 913{
914 struct i915_hw_context *ctx = NULL;
907 struct i915_ctx_hang_stats *hs; 915 struct i915_ctx_hang_stats *hs;
908 916
909 hs = i915_gem_context_get_hang_stats(dev, file, ctx_id); 917 if (ring->id != RCS && ctx_id != DEFAULT_CONTEXT_ID)
910 if (IS_ERR(hs)) 918 return ERR_PTR(-EINVAL);
911 return PTR_ERR(hs); 919
920 ctx = i915_gem_context_get(file->driver_priv, ctx_id);
921 if (IS_ERR(ctx))
922 return ctx;
912 923
924 hs = &ctx->hang_stats;
913 if (hs->banned) { 925 if (hs->banned) {
914 DRM_DEBUG("Context %u tried to submit while banned\n", ctx_id); 926 DRM_DEBUG("Context %u tried to submit while banned\n", ctx_id);
915 return -EIO; 927 return ERR_PTR(-EIO);
916 } 928 }
917 929
918 return 0; 930 return ctx;
919} 931}
920 932
921static void 933static void
@@ -939,7 +951,9 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
939 if (obj->base.write_domain) { 951 if (obj->base.write_domain) {
940 obj->dirty = 1; 952 obj->dirty = 1;
941 obj->last_write_seqno = intel_ring_get_seqno(ring); 953 obj->last_write_seqno = intel_ring_get_seqno(ring);
942 if (obj->pin_count) /* check for potential scanout */ 954 /* check for potential scanout */
955 if (i915_gem_obj_ggtt_bound(obj) &&
956 i915_gem_obj_to_ggtt(obj)->pin_count)
943 intel_mark_fb_busy(obj, ring); 957 intel_mark_fb_busy(obj, ring);
944 } 958 }
945 959
@@ -964,7 +978,7 @@ static int
964i915_reset_gen7_sol_offsets(struct drm_device *dev, 978i915_reset_gen7_sol_offsets(struct drm_device *dev,
965 struct intel_ring_buffer *ring) 979 struct intel_ring_buffer *ring)
966{ 980{
967 drm_i915_private_t *dev_priv = dev->dev_private; 981 struct drm_i915_private *dev_priv = dev->dev_private;
968 int ret, i; 982 int ret, i;
969 983
970 if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS]) 984 if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS])
@@ -989,16 +1003,17 @@ static int
989i915_gem_do_execbuffer(struct drm_device *dev, void *data, 1003i915_gem_do_execbuffer(struct drm_device *dev, void *data,
990 struct drm_file *file, 1004 struct drm_file *file,
991 struct drm_i915_gem_execbuffer2 *args, 1005 struct drm_i915_gem_execbuffer2 *args,
992 struct drm_i915_gem_exec_object2 *exec, 1006 struct drm_i915_gem_exec_object2 *exec)
993 struct i915_address_space *vm)
994{ 1007{
995 drm_i915_private_t *dev_priv = dev->dev_private; 1008 struct drm_i915_private *dev_priv = dev->dev_private;
996 struct eb_vmas *eb; 1009 struct eb_vmas *eb;
997 struct drm_i915_gem_object *batch_obj; 1010 struct drm_i915_gem_object *batch_obj;
998 struct drm_clip_rect *cliprects = NULL; 1011 struct drm_clip_rect *cliprects = NULL;
999 struct intel_ring_buffer *ring; 1012 struct intel_ring_buffer *ring;
1013 struct i915_hw_context *ctx;
1014 struct i915_address_space *vm;
1000 const u32 ctx_id = i915_execbuffer2_get_context_id(*args); 1015 const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
1001 u32 exec_start, exec_len; 1016 u32 exec_start = args->batch_start_offset, exec_len;
1002 u32 mask, flags; 1017 u32 mask, flags;
1003 int ret, mode, i; 1018 int ret, mode, i;
1004 bool need_relocs; 1019 bool need_relocs;
@@ -1020,41 +1035,17 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1020 if (args->flags & I915_EXEC_IS_PINNED) 1035 if (args->flags & I915_EXEC_IS_PINNED)
1021 flags |= I915_DISPATCH_PINNED; 1036 flags |= I915_DISPATCH_PINNED;
1022 1037
1023 switch (args->flags & I915_EXEC_RING_MASK) { 1038 if ((args->flags & I915_EXEC_RING_MASK) > I915_NUM_RINGS) {
1024 case I915_EXEC_DEFAULT:
1025 case I915_EXEC_RENDER:
1026 ring = &dev_priv->ring[RCS];
1027 break;
1028 case I915_EXEC_BSD:
1029 ring = &dev_priv->ring[VCS];
1030 if (ctx_id != DEFAULT_CONTEXT_ID) {
1031 DRM_DEBUG("Ring %s doesn't support contexts\n",
1032 ring->name);
1033 return -EPERM;
1034 }
1035 break;
1036 case I915_EXEC_BLT:
1037 ring = &dev_priv->ring[BCS];
1038 if (ctx_id != DEFAULT_CONTEXT_ID) {
1039 DRM_DEBUG("Ring %s doesn't support contexts\n",
1040 ring->name);
1041 return -EPERM;
1042 }
1043 break;
1044 case I915_EXEC_VEBOX:
1045 ring = &dev_priv->ring[VECS];
1046 if (ctx_id != DEFAULT_CONTEXT_ID) {
1047 DRM_DEBUG("Ring %s doesn't support contexts\n",
1048 ring->name);
1049 return -EPERM;
1050 }
1051 break;
1052
1053 default:
1054 DRM_DEBUG("execbuf with unknown ring: %d\n", 1039 DRM_DEBUG("execbuf with unknown ring: %d\n",
1055 (int)(args->flags & I915_EXEC_RING_MASK)); 1040 (int)(args->flags & I915_EXEC_RING_MASK));
1056 return -EINVAL; 1041 return -EINVAL;
1057 } 1042 }
1043
1044 if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_DEFAULT)
1045 ring = &dev_priv->ring[RCS];
1046 else
1047 ring = &dev_priv->ring[(args->flags & I915_EXEC_RING_MASK) - 1];
1048
1058 if (!intel_ring_initialized(ring)) { 1049 if (!intel_ring_initialized(ring)) {
1059 DRM_DEBUG("execbuf with invalid ring: %d\n", 1050 DRM_DEBUG("execbuf with invalid ring: %d\n",
1060 (int)(args->flags & I915_EXEC_RING_MASK)); 1051 (int)(args->flags & I915_EXEC_RING_MASK));
@@ -1136,11 +1127,18 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1136 goto pre_mutex_err; 1127 goto pre_mutex_err;
1137 } 1128 }
1138 1129
1139 ret = i915_gem_validate_context(dev, file, ctx_id); 1130 ctx = i915_gem_validate_context(dev, file, ring, ctx_id);
1140 if (ret) { 1131 if (IS_ERR(ctx)) {
1141 mutex_unlock(&dev->struct_mutex); 1132 mutex_unlock(&dev->struct_mutex);
1133 ret = PTR_ERR(ctx);
1142 goto pre_mutex_err; 1134 goto pre_mutex_err;
1143 } 1135 }
1136
1137 i915_gem_context_reference(ctx);
1138
1139 vm = ctx->vm;
1140 if (!USES_FULL_PPGTT(dev))
1141 vm = &dev_priv->gtt.base;
1144 1142
1145 eb = eb_create(args); 1143 eb = eb_create(args);
1146 if (eb == NULL) { 1144 if (eb == NULL) {
@@ -1184,17 +1182,46 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1184 } 1182 }
1185 batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND; 1183 batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
1186 1184
1185 if (i915_needs_cmd_parser(ring)) {
1186 ret = i915_parse_cmds(ring,
1187 batch_obj,
1188 args->batch_start_offset,
1189 file->is_master);
1190 if (ret)
1191 goto err;
1192
1193 /*
1194 * XXX: Actually do this when enabling batch copy...
1195 *
1196 * Set the DISPATCH_SECURE bit to remove the NON_SECURE bit
1197 * from MI_BATCH_BUFFER_START commands issued in the
1198 * dispatch_execbuffer implementations. We specifically don't
1199 * want that set when the command parser is enabled.
1200 */
1201 }
1202
1187 /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure 1203 /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
1188 * batch" bit. Hence we need to pin secure batches into the global gtt. 1204 * batch" bit. Hence we need to pin secure batches into the global gtt.
1189 * hsw should have this fixed, but bdw mucks it up again. */ 1205 * hsw should have this fixed, but bdw mucks it up again. */
1190 if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping) 1206 if (flags & I915_DISPATCH_SECURE &&
1191 i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level); 1207 !batch_obj->has_global_gtt_mapping) {
1208 /* When we have multiple VMs, we'll need to make sure that we
1209 * allocate space first */
1210 struct i915_vma *vma = i915_gem_obj_to_ggtt(batch_obj);
1211 BUG_ON(!vma);
1212 vma->bind_vma(vma, batch_obj->cache_level, GLOBAL_BIND);
1213 }
1214
1215 if (flags & I915_DISPATCH_SECURE)
1216 exec_start += i915_gem_obj_ggtt_offset(batch_obj);
1217 else
1218 exec_start += i915_gem_obj_offset(batch_obj, vm);
1192 1219
1193 ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->vmas); 1220 ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->vmas);
1194 if (ret) 1221 if (ret)
1195 goto err; 1222 goto err;
1196 1223
1197 ret = i915_switch_context(ring, file, ctx_id); 1224 ret = i915_switch_context(ring, file, ctx);
1198 if (ret) 1225 if (ret)
1199 goto err; 1226 goto err;
1200 1227
@@ -1219,8 +1246,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1219 goto err; 1246 goto err;
1220 } 1247 }
1221 1248
1222 exec_start = i915_gem_obj_offset(batch_obj, vm) + 1249
1223 args->batch_start_offset;
1224 exec_len = args->batch_len; 1250 exec_len = args->batch_len;
1225 if (cliprects) { 1251 if (cliprects) {
1226 for (i = 0; i < args->num_cliprects; i++) { 1252 for (i = 0; i < args->num_cliprects; i++) {
@@ -1249,6 +1275,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1249 i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj); 1275 i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
1250 1276
1251err: 1277err:
1278 /* the request owns the ref now */
1279 i915_gem_context_unreference(ctx);
1252 eb_destroy(eb); 1280 eb_destroy(eb);
1253 1281
1254 mutex_unlock(&dev->struct_mutex); 1282 mutex_unlock(&dev->struct_mutex);
@@ -1270,7 +1298,6 @@ int
1270i915_gem_execbuffer(struct drm_device *dev, void *data, 1298i915_gem_execbuffer(struct drm_device *dev, void *data,
1271 struct drm_file *file) 1299 struct drm_file *file)
1272{ 1300{
1273 struct drm_i915_private *dev_priv = dev->dev_private;
1274 struct drm_i915_gem_execbuffer *args = data; 1301 struct drm_i915_gem_execbuffer *args = data;
1275 struct drm_i915_gem_execbuffer2 exec2; 1302 struct drm_i915_gem_execbuffer2 exec2;
1276 struct drm_i915_gem_exec_object *exec_list = NULL; 1303 struct drm_i915_gem_exec_object *exec_list = NULL;
@@ -1326,8 +1353,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
1326 exec2.flags = I915_EXEC_RENDER; 1353 exec2.flags = I915_EXEC_RENDER;
1327 i915_execbuffer2_set_context_id(exec2, 0); 1354 i915_execbuffer2_set_context_id(exec2, 0);
1328 1355
1329 ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list, 1356 ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
1330 &dev_priv->gtt.base);
1331 if (!ret) { 1357 if (!ret) {
1332 /* Copy the new buffer offsets back to the user's exec list. */ 1358 /* Copy the new buffer offsets back to the user's exec list. */
1333 for (i = 0; i < args->buffer_count; i++) 1359 for (i = 0; i < args->buffer_count; i++)
@@ -1353,7 +1379,6 @@ int
1353i915_gem_execbuffer2(struct drm_device *dev, void *data, 1379i915_gem_execbuffer2(struct drm_device *dev, void *data,
1354 struct drm_file *file) 1380 struct drm_file *file)
1355{ 1381{
1356 struct drm_i915_private *dev_priv = dev->dev_private;
1357 struct drm_i915_gem_execbuffer2 *args = data; 1382 struct drm_i915_gem_execbuffer2 *args = data;
1358 struct drm_i915_gem_exec_object2 *exec2_list = NULL; 1383 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1359 int ret; 1384 int ret;
@@ -1384,8 +1409,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
1384 return -EFAULT; 1409 return -EFAULT;
1385 } 1410 }
1386 1411
1387 ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list, 1412 ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
1388 &dev_priv->gtt.base);
1389 if (!ret) { 1413 if (!ret) {
1390 /* Copy the new buffer offsets back to the user's exec list. */ 1414 /* Copy the new buffer offsets back to the user's exec list. */
1391 ret = copy_to_user(to_user_ptr(args->buffers_ptr), 1415 ret = copy_to_user(to_user_ptr(args->buffers_ptr),
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index d278be110805..ab5e93c30aa2 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright © 2010 Daniel Vetter 2 * Copyright © 2010 Daniel Vetter
3 * Copyright © 2011-2014 Intel Corporation
3 * 4 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 6 * copy of this software and associated documentation files (the "Software"),
@@ -22,12 +23,38 @@
22 * 23 *
23 */ 24 */
24 25
26#include <linux/seq_file.h>
25#include <drm/drmP.h> 27#include <drm/drmP.h>
26#include <drm/i915_drm.h> 28#include <drm/i915_drm.h>
27#include "i915_drv.h" 29#include "i915_drv.h"
28#include "i915_trace.h" 30#include "i915_trace.h"
29#include "intel_drv.h" 31#include "intel_drv.h"
30 32
33static void gen8_setup_private_ppat(struct drm_i915_private *dev_priv);
34
35bool intel_enable_ppgtt(struct drm_device *dev, bool full)
36{
37 if (i915.enable_ppgtt == 0 || !HAS_ALIASING_PPGTT(dev))
38 return false;
39
40 if (i915.enable_ppgtt == 1 && full)
41 return false;
42
43#ifdef CONFIG_INTEL_IOMMU
44 /* Disable ppgtt on SNB if VT-d is on. */
45 if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) {
46 DRM_INFO("Disabling PPGTT because VT-d is on\n");
47 return false;
48 }
49#endif
50
51 /* Full ppgtt disabled by default for now due to issues. */
52 if (full)
53 return false; /* HAS_PPGTT(dev) */
54 else
55 return HAS_ALIASING_PPGTT(dev);
56}
57
31#define GEN6_PPGTT_PD_ENTRIES 512 58#define GEN6_PPGTT_PD_ENTRIES 512
32#define I915_PPGTT_PT_ENTRIES (PAGE_SIZE / sizeof(gen6_gtt_pte_t)) 59#define I915_PPGTT_PT_ENTRIES (PAGE_SIZE / sizeof(gen6_gtt_pte_t))
33typedef uint64_t gen8_gtt_pte_t; 60typedef uint64_t gen8_gtt_pte_t;
@@ -63,13 +90,31 @@ typedef gen8_gtt_pte_t gen8_ppgtt_pde_t;
63 90
64#define GEN8_PTES_PER_PAGE (PAGE_SIZE / sizeof(gen8_gtt_pte_t)) 91#define GEN8_PTES_PER_PAGE (PAGE_SIZE / sizeof(gen8_gtt_pte_t))
65#define GEN8_PDES_PER_PAGE (PAGE_SIZE / sizeof(gen8_ppgtt_pde_t)) 92#define GEN8_PDES_PER_PAGE (PAGE_SIZE / sizeof(gen8_ppgtt_pde_t))
66#define GEN8_LEGACY_PDPS 4 93
94/* GEN8 legacy style addressis defined as a 3 level page table:
95 * 31:30 | 29:21 | 20:12 | 11:0
96 * PDPE | PDE | PTE | offset
97 * The difference as compared to normal x86 3 level page table is the PDPEs are
98 * programmed via register.
99 */
100#define GEN8_PDPE_SHIFT 30
101#define GEN8_PDPE_MASK 0x3
102#define GEN8_PDE_SHIFT 21
103#define GEN8_PDE_MASK 0x1ff
104#define GEN8_PTE_SHIFT 12
105#define GEN8_PTE_MASK 0x1ff
67 106
68#define PPAT_UNCACHED_INDEX (_PAGE_PWT | _PAGE_PCD) 107#define PPAT_UNCACHED_INDEX (_PAGE_PWT | _PAGE_PCD)
69#define PPAT_CACHED_PDE_INDEX 0 /* WB LLC */ 108#define PPAT_CACHED_PDE_INDEX 0 /* WB LLC */
70#define PPAT_CACHED_INDEX _PAGE_PAT /* WB LLCeLLC */ 109#define PPAT_CACHED_INDEX _PAGE_PAT /* WB LLCeLLC */
71#define PPAT_DISPLAY_ELLC_INDEX _PAGE_PCD /* WT eLLC */ 110#define PPAT_DISPLAY_ELLC_INDEX _PAGE_PCD /* WT eLLC */
72 111
112static void ppgtt_bind_vma(struct i915_vma *vma,
113 enum i915_cache_level cache_level,
114 u32 flags);
115static void ppgtt_unbind_vma(struct i915_vma *vma);
116static int gen8_ppgtt_enable(struct i915_hw_ppgtt *ppgtt);
117
73static inline gen8_gtt_pte_t gen8_pte_encode(dma_addr_t addr, 118static inline gen8_gtt_pte_t gen8_pte_encode(dma_addr_t addr,
74 enum i915_cache_level level, 119 enum i915_cache_level level,
75 bool valid) 120 bool valid)
@@ -199,12 +244,19 @@ static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr,
199 244
200/* Broadwell Page Directory Pointer Descriptors */ 245/* Broadwell Page Directory Pointer Descriptors */
201static int gen8_write_pdp(struct intel_ring_buffer *ring, unsigned entry, 246static int gen8_write_pdp(struct intel_ring_buffer *ring, unsigned entry,
202 uint64_t val) 247 uint64_t val, bool synchronous)
203{ 248{
249 struct drm_i915_private *dev_priv = ring->dev->dev_private;
204 int ret; 250 int ret;
205 251
206 BUG_ON(entry >= 4); 252 BUG_ON(entry >= 4);
207 253
254 if (synchronous) {
255 I915_WRITE(GEN8_RING_PDP_UDW(ring, entry), val >> 32);
256 I915_WRITE(GEN8_RING_PDP_LDW(ring, entry), (u32)val);
257 return 0;
258 }
259
208 ret = intel_ring_begin(ring, 6); 260 ret = intel_ring_begin(ring, 6);
209 if (ret) 261 if (ret)
210 return ret; 262 return ret;
@@ -220,216 +272,357 @@ static int gen8_write_pdp(struct intel_ring_buffer *ring, unsigned entry,
220 return 0; 272 return 0;
221} 273}
222 274
223static int gen8_ppgtt_enable(struct drm_device *dev) 275static int gen8_mm_switch(struct i915_hw_ppgtt *ppgtt,
276 struct intel_ring_buffer *ring,
277 bool synchronous)
224{ 278{
225 struct drm_i915_private *dev_priv = dev->dev_private; 279 int i, ret;
226 struct intel_ring_buffer *ring;
227 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
228 int i, j, ret;
229 280
230 /* bit of a hack to find the actual last used pd */ 281 /* bit of a hack to find the actual last used pd */
231 int used_pd = ppgtt->num_pd_entries / GEN8_PDES_PER_PAGE; 282 int used_pd = ppgtt->num_pd_entries / GEN8_PDES_PER_PAGE;
232 283
233 for_each_ring(ring, dev_priv, j) {
234 I915_WRITE(RING_MODE_GEN7(ring),
235 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
236 }
237
238 for (i = used_pd - 1; i >= 0; i--) { 284 for (i = used_pd - 1; i >= 0; i--) {
239 dma_addr_t addr = ppgtt->pd_dma_addr[i]; 285 dma_addr_t addr = ppgtt->pd_dma_addr[i];
240 for_each_ring(ring, dev_priv, j) { 286 ret = gen8_write_pdp(ring, i, addr, synchronous);
241 ret = gen8_write_pdp(ring, i, addr); 287 if (ret)
242 if (ret) 288 return ret;
243 goto err_out;
244 }
245 } 289 }
246 return 0;
247 290
248err_out: 291 return 0;
249 for_each_ring(ring, dev_priv, j)
250 I915_WRITE(RING_MODE_GEN7(ring),
251 _MASKED_BIT_DISABLE(GFX_PPGTT_ENABLE));
252 return ret;
253} 292}
254 293
255static void gen8_ppgtt_clear_range(struct i915_address_space *vm, 294static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
256 unsigned first_entry, 295 uint64_t start,
257 unsigned num_entries, 296 uint64_t length,
258 bool use_scratch) 297 bool use_scratch)
259{ 298{
260 struct i915_hw_ppgtt *ppgtt = 299 struct i915_hw_ppgtt *ppgtt =
261 container_of(vm, struct i915_hw_ppgtt, base); 300 container_of(vm, struct i915_hw_ppgtt, base);
262 gen8_gtt_pte_t *pt_vaddr, scratch_pte; 301 gen8_gtt_pte_t *pt_vaddr, scratch_pte;
263 unsigned act_pt = first_entry / GEN8_PTES_PER_PAGE; 302 unsigned pdpe = start >> GEN8_PDPE_SHIFT & GEN8_PDPE_MASK;
264 unsigned first_pte = first_entry % GEN8_PTES_PER_PAGE; 303 unsigned pde = start >> GEN8_PDE_SHIFT & GEN8_PDE_MASK;
304 unsigned pte = start >> GEN8_PTE_SHIFT & GEN8_PTE_MASK;
305 unsigned num_entries = length >> PAGE_SHIFT;
265 unsigned last_pte, i; 306 unsigned last_pte, i;
266 307
267 scratch_pte = gen8_pte_encode(ppgtt->base.scratch.addr, 308 scratch_pte = gen8_pte_encode(ppgtt->base.scratch.addr,
268 I915_CACHE_LLC, use_scratch); 309 I915_CACHE_LLC, use_scratch);
269 310
270 while (num_entries) { 311 while (num_entries) {
271 struct page *page_table = &ppgtt->gen8_pt_pages[act_pt]; 312 struct page *page_table = ppgtt->gen8_pt_pages[pdpe][pde];
272 313
273 last_pte = first_pte + num_entries; 314 last_pte = pte + num_entries;
274 if (last_pte > GEN8_PTES_PER_PAGE) 315 if (last_pte > GEN8_PTES_PER_PAGE)
275 last_pte = GEN8_PTES_PER_PAGE; 316 last_pte = GEN8_PTES_PER_PAGE;
276 317
277 pt_vaddr = kmap_atomic(page_table); 318 pt_vaddr = kmap_atomic(page_table);
278 319
279 for (i = first_pte; i < last_pte; i++) 320 for (i = pte; i < last_pte; i++) {
280 pt_vaddr[i] = scratch_pte; 321 pt_vaddr[i] = scratch_pte;
322 num_entries--;
323 }
281 324
282 kunmap_atomic(pt_vaddr); 325 kunmap_atomic(pt_vaddr);
283 326
284 num_entries -= last_pte - first_pte; 327 pte = 0;
285 first_pte = 0; 328 if (++pde == GEN8_PDES_PER_PAGE) {
286 act_pt++; 329 pdpe++;
330 pde = 0;
331 }
287 } 332 }
288} 333}
289 334
290static void gen8_ppgtt_insert_entries(struct i915_address_space *vm, 335static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
291 struct sg_table *pages, 336 struct sg_table *pages,
292 unsigned first_entry, 337 uint64_t start,
293 enum i915_cache_level cache_level) 338 enum i915_cache_level cache_level)
294{ 339{
295 struct i915_hw_ppgtt *ppgtt = 340 struct i915_hw_ppgtt *ppgtt =
296 container_of(vm, struct i915_hw_ppgtt, base); 341 container_of(vm, struct i915_hw_ppgtt, base);
297 gen8_gtt_pte_t *pt_vaddr; 342 gen8_gtt_pte_t *pt_vaddr;
298 unsigned act_pt = first_entry / GEN8_PTES_PER_PAGE; 343 unsigned pdpe = start >> GEN8_PDPE_SHIFT & GEN8_PDPE_MASK;
299 unsigned act_pte = first_entry % GEN8_PTES_PER_PAGE; 344 unsigned pde = start >> GEN8_PDE_SHIFT & GEN8_PDE_MASK;
345 unsigned pte = start >> GEN8_PTE_SHIFT & GEN8_PTE_MASK;
300 struct sg_page_iter sg_iter; 346 struct sg_page_iter sg_iter;
301 347
302 pt_vaddr = NULL; 348 pt_vaddr = NULL;
349
303 for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) { 350 for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
351 if (WARN_ON(pdpe >= GEN8_LEGACY_PDPS))
352 break;
353
304 if (pt_vaddr == NULL) 354 if (pt_vaddr == NULL)
305 pt_vaddr = kmap_atomic(&ppgtt->gen8_pt_pages[act_pt]); 355 pt_vaddr = kmap_atomic(ppgtt->gen8_pt_pages[pdpe][pde]);
306 356
307 pt_vaddr[act_pte] = 357 pt_vaddr[pte] =
308 gen8_pte_encode(sg_page_iter_dma_address(&sg_iter), 358 gen8_pte_encode(sg_page_iter_dma_address(&sg_iter),
309 cache_level, true); 359 cache_level, true);
310 if (++act_pte == GEN8_PTES_PER_PAGE) { 360 if (++pte == GEN8_PTES_PER_PAGE) {
311 kunmap_atomic(pt_vaddr); 361 kunmap_atomic(pt_vaddr);
312 pt_vaddr = NULL; 362 pt_vaddr = NULL;
313 act_pt++; 363 if (++pde == GEN8_PDES_PER_PAGE) {
314 act_pte = 0; 364 pdpe++;
365 pde = 0;
366 }
367 pte = 0;
315 } 368 }
316 } 369 }
317 if (pt_vaddr) 370 if (pt_vaddr)
318 kunmap_atomic(pt_vaddr); 371 kunmap_atomic(pt_vaddr);
319} 372}
320 373
374static void gen8_free_page_tables(struct page **pt_pages)
375{
376 int i;
377
378 if (pt_pages == NULL)
379 return;
380
381 for (i = 0; i < GEN8_PDES_PER_PAGE; i++)
382 if (pt_pages[i])
383 __free_pages(pt_pages[i], 0);
384}
385
386static void gen8_ppgtt_free(const struct i915_hw_ppgtt *ppgtt)
387{
388 int i;
389
390 for (i = 0; i < ppgtt->num_pd_pages; i++) {
391 gen8_free_page_tables(ppgtt->gen8_pt_pages[i]);
392 kfree(ppgtt->gen8_pt_pages[i]);
393 kfree(ppgtt->gen8_pt_dma_addr[i]);
394 }
395
396 __free_pages(ppgtt->pd_pages, get_order(ppgtt->num_pd_pages << PAGE_SHIFT));
397}
398
399static void gen8_ppgtt_unmap_pages(struct i915_hw_ppgtt *ppgtt)
400{
401 struct pci_dev *hwdev = ppgtt->base.dev->pdev;
402 int i, j;
403
404 for (i = 0; i < ppgtt->num_pd_pages; i++) {
405 /* TODO: In the future we'll support sparse mappings, so this
406 * will have to change. */
407 if (!ppgtt->pd_dma_addr[i])
408 continue;
409
410 pci_unmap_page(hwdev, ppgtt->pd_dma_addr[i], PAGE_SIZE,
411 PCI_DMA_BIDIRECTIONAL);
412
413 for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
414 dma_addr_t addr = ppgtt->gen8_pt_dma_addr[i][j];
415 if (addr)
416 pci_unmap_page(hwdev, addr, PAGE_SIZE,
417 PCI_DMA_BIDIRECTIONAL);
418 }
419 }
420}
421
321static void gen8_ppgtt_cleanup(struct i915_address_space *vm) 422static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
322{ 423{
323 struct i915_hw_ppgtt *ppgtt = 424 struct i915_hw_ppgtt *ppgtt =
324 container_of(vm, struct i915_hw_ppgtt, base); 425 container_of(vm, struct i915_hw_ppgtt, base);
325 int i, j;
326 426
427 list_del(&vm->global_link);
327 drm_mm_takedown(&vm->mm); 428 drm_mm_takedown(&vm->mm);
328 429
329 for (i = 0; i < ppgtt->num_pd_pages ; i++) { 430 gen8_ppgtt_unmap_pages(ppgtt);
330 if (ppgtt->pd_dma_addr[i]) { 431 gen8_ppgtt_free(ppgtt);
331 pci_unmap_page(ppgtt->base.dev->pdev, 432}
332 ppgtt->pd_dma_addr[i],
333 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
334 433
335 for (j = 0; j < GEN8_PDES_PER_PAGE; j++) { 434static struct page **__gen8_alloc_page_tables(void)
336 dma_addr_t addr = ppgtt->gen8_pt_dma_addr[i][j]; 435{
337 if (addr) 436 struct page **pt_pages;
338 pci_unmap_page(ppgtt->base.dev->pdev, 437 int i;
339 addr,
340 PAGE_SIZE,
341 PCI_DMA_BIDIRECTIONAL);
342 438
343 } 439 pt_pages = kcalloc(GEN8_PDES_PER_PAGE, sizeof(struct page *), GFP_KERNEL);
344 } 440 if (!pt_pages)
345 kfree(ppgtt->gen8_pt_dma_addr[i]); 441 return ERR_PTR(-ENOMEM);
442
443 for (i = 0; i < GEN8_PDES_PER_PAGE; i++) {
444 pt_pages[i] = alloc_page(GFP_KERNEL);
445 if (!pt_pages[i])
446 goto bail;
346 } 447 }
347 448
348 __free_pages(ppgtt->gen8_pt_pages, get_order(ppgtt->num_pt_pages << PAGE_SHIFT)); 449 return pt_pages;
349 __free_pages(ppgtt->pd_pages, get_order(ppgtt->num_pd_pages << PAGE_SHIFT)); 450
451bail:
452 gen8_free_page_tables(pt_pages);
453 kfree(pt_pages);
454 return ERR_PTR(-ENOMEM);
350} 455}
351 456
352/** 457static int gen8_ppgtt_allocate_page_tables(struct i915_hw_ppgtt *ppgtt,
353 * GEN8 legacy ppgtt programming is accomplished through 4 PDP registers with a 458 const int max_pdp)
354 * net effect resembling a 2-level page table in normal x86 terms. Each PDP
355 * represents 1GB of memory
356 * 4 * 512 * 512 * 4096 = 4GB legacy 32b address space.
357 *
358 * TODO: Do something with the size parameter
359 **/
360static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
361{ 459{
362 struct page *pt_pages; 460 struct page **pt_pages[GEN8_LEGACY_PDPS];
363 int i, j, ret = -ENOMEM; 461 int i, ret;
364 const int max_pdp = DIV_ROUND_UP(size, 1 << 30);
365 const int num_pt_pages = GEN8_PDES_PER_PAGE * max_pdp;
366 462
367 if (size % (1<<30)) 463 for (i = 0; i < max_pdp; i++) {
368 DRM_INFO("Pages will be wasted unless GTT size (%llu) is divisible by 1GB\n", size); 464 pt_pages[i] = __gen8_alloc_page_tables();
465 if (IS_ERR(pt_pages[i])) {
466 ret = PTR_ERR(pt_pages[i]);
467 goto unwind_out;
468 }
469 }
369 470
370 /* FIXME: split allocation into smaller pieces. For now we only ever do 471 /* NB: Avoid touching gen8_pt_pages until last to keep the allocation,
371 * this once, but with full PPGTT, the multiple contiguous allocations 472 * "atomic" - for cleanup purposes.
372 * will be bad.
373 */ 473 */
474 for (i = 0; i < max_pdp; i++)
475 ppgtt->gen8_pt_pages[i] = pt_pages[i];
476
477 return 0;
478
479unwind_out:
480 while (i--) {
481 gen8_free_page_tables(pt_pages[i]);
482 kfree(pt_pages[i]);
483 }
484
485 return ret;
486}
487
488static int gen8_ppgtt_allocate_dma(struct i915_hw_ppgtt *ppgtt)
489{
490 int i;
491
492 for (i = 0; i < ppgtt->num_pd_pages; i++) {
493 ppgtt->gen8_pt_dma_addr[i] = kcalloc(GEN8_PDES_PER_PAGE,
494 sizeof(dma_addr_t),
495 GFP_KERNEL);
496 if (!ppgtt->gen8_pt_dma_addr[i])
497 return -ENOMEM;
498 }
499
500 return 0;
501}
502
503static int gen8_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt,
504 const int max_pdp)
505{
374 ppgtt->pd_pages = alloc_pages(GFP_KERNEL, get_order(max_pdp << PAGE_SHIFT)); 506 ppgtt->pd_pages = alloc_pages(GFP_KERNEL, get_order(max_pdp << PAGE_SHIFT));
375 if (!ppgtt->pd_pages) 507 if (!ppgtt->pd_pages)
376 return -ENOMEM; 508 return -ENOMEM;
377 509
378 pt_pages = alloc_pages(GFP_KERNEL, get_order(num_pt_pages << PAGE_SHIFT)); 510 ppgtt->num_pd_pages = 1 << get_order(max_pdp << PAGE_SHIFT);
379 if (!pt_pages) { 511 BUG_ON(ppgtt->num_pd_pages > GEN8_LEGACY_PDPS);
512
513 return 0;
514}
515
516static int gen8_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt,
517 const int max_pdp)
518{
519 int ret;
520
521 ret = gen8_ppgtt_allocate_page_directories(ppgtt, max_pdp);
522 if (ret)
523 return ret;
524
525 ret = gen8_ppgtt_allocate_page_tables(ppgtt, max_pdp);
526 if (ret) {
380 __free_pages(ppgtt->pd_pages, get_order(max_pdp << PAGE_SHIFT)); 527 __free_pages(ppgtt->pd_pages, get_order(max_pdp << PAGE_SHIFT));
381 return -ENOMEM; 528 return ret;
382 } 529 }
383 530
384 ppgtt->gen8_pt_pages = pt_pages;
385 ppgtt->num_pd_pages = 1 << get_order(max_pdp << PAGE_SHIFT);
386 ppgtt->num_pt_pages = 1 << get_order(num_pt_pages << PAGE_SHIFT);
387 ppgtt->num_pd_entries = max_pdp * GEN8_PDES_PER_PAGE; 531 ppgtt->num_pd_entries = max_pdp * GEN8_PDES_PER_PAGE;
388 ppgtt->enable = gen8_ppgtt_enable;
389 ppgtt->base.clear_range = gen8_ppgtt_clear_range;
390 ppgtt->base.insert_entries = gen8_ppgtt_insert_entries;
391 ppgtt->base.cleanup = gen8_ppgtt_cleanup;
392 ppgtt->base.start = 0;
393 ppgtt->base.total = ppgtt->num_pt_pages * GEN8_PTES_PER_PAGE * PAGE_SIZE;
394 532
395 BUG_ON(ppgtt->num_pd_pages > GEN8_LEGACY_PDPS); 533 ret = gen8_ppgtt_allocate_dma(ppgtt);
534 if (ret)
535 gen8_ppgtt_free(ppgtt);
396 536
397 /* 537 return ret;
398 * - Create a mapping for the page directories. 538}
399 * - For each page directory:
400 * allocate space for page table mappings.
401 * map each page table
402 */
403 for (i = 0; i < max_pdp; i++) {
404 dma_addr_t temp;
405 temp = pci_map_page(ppgtt->base.dev->pdev,
406 &ppgtt->pd_pages[i], 0,
407 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
408 if (pci_dma_mapping_error(ppgtt->base.dev->pdev, temp))
409 goto err_out;
410 539
411 ppgtt->pd_dma_addr[i] = temp; 540static int gen8_ppgtt_setup_page_directories(struct i915_hw_ppgtt *ppgtt,
541 const int pd)
542{
543 dma_addr_t pd_addr;
544 int ret;
412 545
413 ppgtt->gen8_pt_dma_addr[i] = kmalloc(sizeof(dma_addr_t) * GEN8_PDES_PER_PAGE, GFP_KERNEL); 546 pd_addr = pci_map_page(ppgtt->base.dev->pdev,
414 if (!ppgtt->gen8_pt_dma_addr[i]) 547 &ppgtt->pd_pages[pd], 0,
415 goto err_out; 548 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
416 549
417 for (j = 0; j < GEN8_PDES_PER_PAGE; j++) { 550 ret = pci_dma_mapping_error(ppgtt->base.dev->pdev, pd_addr);
418 struct page *p = &pt_pages[i * GEN8_PDES_PER_PAGE + j]; 551 if (ret)
419 temp = pci_map_page(ppgtt->base.dev->pdev, 552 return ret;
420 p, 0, PAGE_SIZE,
421 PCI_DMA_BIDIRECTIONAL);
422 553
423 if (pci_dma_mapping_error(ppgtt->base.dev->pdev, temp)) 554 ppgtt->pd_dma_addr[pd] = pd_addr;
424 goto err_out;
425 555
426 ppgtt->gen8_pt_dma_addr[i][j] = temp; 556 return 0;
557}
558
559static int gen8_ppgtt_setup_page_tables(struct i915_hw_ppgtt *ppgtt,
560 const int pd,
561 const int pt)
562{
563 dma_addr_t pt_addr;
564 struct page *p;
565 int ret;
566
567 p = ppgtt->gen8_pt_pages[pd][pt];
568 pt_addr = pci_map_page(ppgtt->base.dev->pdev,
569 p, 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
570 ret = pci_dma_mapping_error(ppgtt->base.dev->pdev, pt_addr);
571 if (ret)
572 return ret;
573
574 ppgtt->gen8_pt_dma_addr[pd][pt] = pt_addr;
575
576 return 0;
577}
578
579/**
580 * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
581 * with a net effect resembling a 2-level page table in normal x86 terms. Each
582 * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address
583 * space.
584 *
585 * FIXME: split allocation into smaller pieces. For now we only ever do this
586 * once, but with full PPGTT, the multiple contiguous allocations will be bad.
587 * TODO: Do something with the size parameter
588 */
589static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
590{
591 const int max_pdp = DIV_ROUND_UP(size, 1 << 30);
592 const int min_pt_pages = GEN8_PDES_PER_PAGE * max_pdp;
593 int i, j, ret;
594
595 if (size % (1<<30))
596 DRM_INFO("Pages will be wasted unless GTT size (%llu) is divisible by 1GB\n", size);
597
598 /* 1. Do all our allocations for page directories and page tables. */
599 ret = gen8_ppgtt_alloc(ppgtt, max_pdp);
600 if (ret)
601 return ret;
602
603 /*
604 * 2. Create DMA mappings for the page directories and page tables.
605 */
606 for (i = 0; i < max_pdp; i++) {
607 ret = gen8_ppgtt_setup_page_directories(ppgtt, i);
608 if (ret)
609 goto bail;
610
611 for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
612 ret = gen8_ppgtt_setup_page_tables(ppgtt, i, j);
613 if (ret)
614 goto bail;
427 } 615 }
428 } 616 }
429 617
430 /* For now, the PPGTT helper functions all require that the PDEs are 618 /*
619 * 3. Map all the page directory entires to point to the page tables
620 * we've allocated.
621 *
622 * For now, the PPGTT helper functions all require that the PDEs are
431 * plugged in correctly. So we do that now/here. For aliasing PPGTT, we 623 * plugged in correctly. So we do that now/here. For aliasing PPGTT, we
432 * will never need to touch the PDEs again */ 624 * will never need to touch the PDEs again.
625 */
433 for (i = 0; i < max_pdp; i++) { 626 for (i = 0; i < max_pdp; i++) {
434 gen8_ppgtt_pde_t *pd_vaddr; 627 gen8_ppgtt_pde_t *pd_vaddr;
435 pd_vaddr = kmap_atomic(&ppgtt->pd_pages[i]); 628 pd_vaddr = kmap_atomic(&ppgtt->pd_pages[i]);
@@ -441,23 +634,85 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
441 kunmap_atomic(pd_vaddr); 634 kunmap_atomic(pd_vaddr);
442 } 635 }
443 636
444 ppgtt->base.clear_range(&ppgtt->base, 0, 637 ppgtt->enable = gen8_ppgtt_enable;
445 ppgtt->num_pd_entries * GEN8_PTES_PER_PAGE, 638 ppgtt->switch_mm = gen8_mm_switch;
446 true); 639 ppgtt->base.clear_range = gen8_ppgtt_clear_range;
640 ppgtt->base.insert_entries = gen8_ppgtt_insert_entries;
641 ppgtt->base.cleanup = gen8_ppgtt_cleanup;
642 ppgtt->base.start = 0;
643 ppgtt->base.total = ppgtt->num_pd_entries * GEN8_PTES_PER_PAGE * PAGE_SIZE;
644
645 ppgtt->base.clear_range(&ppgtt->base, 0, ppgtt->base.total, true);
447 646
448 DRM_DEBUG_DRIVER("Allocated %d pages for page directories (%d wasted)\n", 647 DRM_DEBUG_DRIVER("Allocated %d pages for page directories (%d wasted)\n",
449 ppgtt->num_pd_pages, ppgtt->num_pd_pages - max_pdp); 648 ppgtt->num_pd_pages, ppgtt->num_pd_pages - max_pdp);
450 DRM_DEBUG_DRIVER("Allocated %d pages for page tables (%lld wasted)\n", 649 DRM_DEBUG_DRIVER("Allocated %d pages for page tables (%lld wasted)\n",
451 ppgtt->num_pt_pages, 650 ppgtt->num_pd_entries,
452 (ppgtt->num_pt_pages - num_pt_pages) + 651 (ppgtt->num_pd_entries - min_pt_pages) + size % (1<<30));
453 size % (1<<30));
454 return 0; 652 return 0;
455 653
456err_out: 654bail:
457 ppgtt->base.cleanup(&ppgtt->base); 655 gen8_ppgtt_unmap_pages(ppgtt);
656 gen8_ppgtt_free(ppgtt);
458 return ret; 657 return ret;
459} 658}
460 659
660static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
661{
662 struct drm_i915_private *dev_priv = ppgtt->base.dev->dev_private;
663 struct i915_address_space *vm = &ppgtt->base;
664 gen6_gtt_pte_t __iomem *pd_addr;
665 gen6_gtt_pte_t scratch_pte;
666 uint32_t pd_entry;
667 int pte, pde;
668
669 scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true);
670
671 pd_addr = (gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm +
672 ppgtt->pd_offset / sizeof(gen6_gtt_pte_t);
673
674 seq_printf(m, " VM %p (pd_offset %x-%x):\n", vm,
675 ppgtt->pd_offset, ppgtt->pd_offset + ppgtt->num_pd_entries);
676 for (pde = 0; pde < ppgtt->num_pd_entries; pde++) {
677 u32 expected;
678 gen6_gtt_pte_t *pt_vaddr;
679 dma_addr_t pt_addr = ppgtt->pt_dma_addr[pde];
680 pd_entry = readl(pd_addr + pde);
681 expected = (GEN6_PDE_ADDR_ENCODE(pt_addr) | GEN6_PDE_VALID);
682
683 if (pd_entry != expected)
684 seq_printf(m, "\tPDE #%d mismatch: Actual PDE: %x Expected PDE: %x\n",
685 pde,
686 pd_entry,
687 expected);
688 seq_printf(m, "\tPDE: %x\n", pd_entry);
689
690 pt_vaddr = kmap_atomic(ppgtt->pt_pages[pde]);
691 for (pte = 0; pte < I915_PPGTT_PT_ENTRIES; pte+=4) {
692 unsigned long va =
693 (pde * PAGE_SIZE * I915_PPGTT_PT_ENTRIES) +
694 (pte * PAGE_SIZE);
695 int i;
696 bool found = false;
697 for (i = 0; i < 4; i++)
698 if (pt_vaddr[pte + i] != scratch_pte)
699 found = true;
700 if (!found)
701 continue;
702
703 seq_printf(m, "\t\t0x%lx [%03d,%04d]: =", va, pde, pte);
704 for (i = 0; i < 4; i++) {
705 if (pt_vaddr[pte + i] != scratch_pte)
706 seq_printf(m, " %08x", pt_vaddr[pte + i]);
707 else
708 seq_puts(m, " SCRATCH ");
709 }
710 seq_puts(m, "\n");
711 }
712 kunmap_atomic(pt_vaddr);
713 }
714}
715
461static void gen6_write_pdes(struct i915_hw_ppgtt *ppgtt) 716static void gen6_write_pdes(struct i915_hw_ppgtt *ppgtt)
462{ 717{
463 struct drm_i915_private *dev_priv = ppgtt->base.dev->dev_private; 718 struct drm_i915_private *dev_priv = ppgtt->base.dev->dev_private;
@@ -480,73 +735,235 @@ static void gen6_write_pdes(struct i915_hw_ppgtt *ppgtt)
480 readl(pd_addr); 735 readl(pd_addr);
481} 736}
482 737
483static int gen6_ppgtt_enable(struct drm_device *dev) 738static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt)
739{
740 BUG_ON(ppgtt->pd_offset & 0x3f);
741
742 return (ppgtt->pd_offset / 64) << 16;
743}
744
745static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
746 struct intel_ring_buffer *ring,
747 bool synchronous)
748{
749 struct drm_device *dev = ppgtt->base.dev;
750 struct drm_i915_private *dev_priv = dev->dev_private;
751 int ret;
752
753 /* If we're in reset, we can assume the GPU is sufficiently idle to
754 * manually frob these bits. Ideally we could use the ring functions,
755 * except our error handling makes it quite difficult (can't use
756 * intel_ring_begin, ring->flush, or intel_ring_advance)
757 *
758 * FIXME: We should try not to special case reset
759 */
760 if (synchronous ||
761 i915_reset_in_progress(&dev_priv->gpu_error)) {
762 WARN_ON(ppgtt != dev_priv->mm.aliasing_ppgtt);
763 I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
764 I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
765 POSTING_READ(RING_PP_DIR_BASE(ring));
766 return 0;
767 }
768
769 /* NB: TLBs must be flushed and invalidated before a switch */
770 ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
771 if (ret)
772 return ret;
773
774 ret = intel_ring_begin(ring, 6);
775 if (ret)
776 return ret;
777
778 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
779 intel_ring_emit(ring, RING_PP_DIR_DCLV(ring));
780 intel_ring_emit(ring, PP_DIR_DCLV_2G);
781 intel_ring_emit(ring, RING_PP_DIR_BASE(ring));
782 intel_ring_emit(ring, get_pd_offset(ppgtt));
783 intel_ring_emit(ring, MI_NOOP);
784 intel_ring_advance(ring);
785
786 return 0;
787}
788
789static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
790 struct intel_ring_buffer *ring,
791 bool synchronous)
792{
793 struct drm_device *dev = ppgtt->base.dev;
794 struct drm_i915_private *dev_priv = dev->dev_private;
795 int ret;
796
797 /* If we're in reset, we can assume the GPU is sufficiently idle to
798 * manually frob these bits. Ideally we could use the ring functions,
799 * except our error handling makes it quite difficult (can't use
800 * intel_ring_begin, ring->flush, or intel_ring_advance)
801 *
802 * FIXME: We should try not to special case reset
803 */
804 if (synchronous ||
805 i915_reset_in_progress(&dev_priv->gpu_error)) {
806 WARN_ON(ppgtt != dev_priv->mm.aliasing_ppgtt);
807 I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
808 I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
809 POSTING_READ(RING_PP_DIR_BASE(ring));
810 return 0;
811 }
812
813 /* NB: TLBs must be flushed and invalidated before a switch */
814 ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
815 if (ret)
816 return ret;
817
818 ret = intel_ring_begin(ring, 6);
819 if (ret)
820 return ret;
821
822 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
823 intel_ring_emit(ring, RING_PP_DIR_DCLV(ring));
824 intel_ring_emit(ring, PP_DIR_DCLV_2G);
825 intel_ring_emit(ring, RING_PP_DIR_BASE(ring));
826 intel_ring_emit(ring, get_pd_offset(ppgtt));
827 intel_ring_emit(ring, MI_NOOP);
828 intel_ring_advance(ring);
829
830 /* XXX: RCS is the only one to auto invalidate the TLBs? */
831 if (ring->id != RCS) {
832 ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
833 if (ret)
834 return ret;
835 }
836
837 return 0;
838}
839
840static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
841 struct intel_ring_buffer *ring,
842 bool synchronous)
843{
844 struct drm_device *dev = ppgtt->base.dev;
845 struct drm_i915_private *dev_priv = dev->dev_private;
846
847 if (!synchronous)
848 return 0;
849
850 I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
851 I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
852
853 POSTING_READ(RING_PP_DIR_DCLV(ring));
854
855 return 0;
856}
857
858static int gen8_ppgtt_enable(struct i915_hw_ppgtt *ppgtt)
484{ 859{
485 drm_i915_private_t *dev_priv = dev->dev_private; 860 struct drm_device *dev = ppgtt->base.dev;
486 uint32_t pd_offset; 861 struct drm_i915_private *dev_priv = dev->dev_private;
487 struct intel_ring_buffer *ring; 862 struct intel_ring_buffer *ring;
488 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; 863 int j, ret;
489 int i;
490 864
491 BUG_ON(ppgtt->pd_offset & 0x3f); 865 for_each_ring(ring, dev_priv, j) {
866 I915_WRITE(RING_MODE_GEN7(ring),
867 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
492 868
493 gen6_write_pdes(ppgtt); 869 /* We promise to do a switch later with FULL PPGTT. If this is
870 * aliasing, this is the one and only switch we'll do */
871 if (USES_FULL_PPGTT(dev))
872 continue;
494 873
495 pd_offset = ppgtt->pd_offset; 874 ret = ppgtt->switch_mm(ppgtt, ring, true);
496 pd_offset /= 64; /* in cachelines, */ 875 if (ret)
497 pd_offset <<= 16; 876 goto err_out;
877 }
498 878
499 if (INTEL_INFO(dev)->gen == 6) { 879 return 0;
500 uint32_t ecochk, gab_ctl, ecobits;
501 880
502 ecobits = I915_READ(GAC_ECO_BITS); 881err_out:
503 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT | 882 for_each_ring(ring, dev_priv, j)
504 ECOBITS_PPGTT_CACHE64B); 883 I915_WRITE(RING_MODE_GEN7(ring),
884 _MASKED_BIT_DISABLE(GFX_PPGTT_ENABLE));
885 return ret;
886}
505 887
506 gab_ctl = I915_READ(GAB_CTL); 888static int gen7_ppgtt_enable(struct i915_hw_ppgtt *ppgtt)
507 I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT); 889{
890 struct drm_device *dev = ppgtt->base.dev;
891 struct drm_i915_private *dev_priv = dev->dev_private;
892 struct intel_ring_buffer *ring;
893 uint32_t ecochk, ecobits;
894 int i;
508 895
509 ecochk = I915_READ(GAM_ECOCHK); 896 ecobits = I915_READ(GAC_ECO_BITS);
510 I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | 897 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
511 ECOCHK_PPGTT_CACHE64B);
512 I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
513 } else if (INTEL_INFO(dev)->gen >= 7) {
514 uint32_t ecochk, ecobits;
515 898
516 ecobits = I915_READ(GAC_ECO_BITS); 899 ecochk = I915_READ(GAM_ECOCHK);
517 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B); 900 if (IS_HASWELL(dev)) {
901 ecochk |= ECOCHK_PPGTT_WB_HSW;
902 } else {
903 ecochk |= ECOCHK_PPGTT_LLC_IVB;
904 ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
905 }
906 I915_WRITE(GAM_ECOCHK, ecochk);
518 907
519 ecochk = I915_READ(GAM_ECOCHK); 908 for_each_ring(ring, dev_priv, i) {
520 if (IS_HASWELL(dev)) { 909 int ret;
521 ecochk |= ECOCHK_PPGTT_WB_HSW;
522 } else {
523 ecochk |= ECOCHK_PPGTT_LLC_IVB;
524 ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
525 }
526 I915_WRITE(GAM_ECOCHK, ecochk);
527 /* GFX_MODE is per-ring on gen7+ */ 910 /* GFX_MODE is per-ring on gen7+ */
911 I915_WRITE(RING_MODE_GEN7(ring),
912 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
913
914 /* We promise to do a switch later with FULL PPGTT. If this is
915 * aliasing, this is the one and only switch we'll do */
916 if (USES_FULL_PPGTT(dev))
917 continue;
918
919 ret = ppgtt->switch_mm(ppgtt, ring, true);
920 if (ret)
921 return ret;
528 } 922 }
529 923
530 for_each_ring(ring, dev_priv, i) { 924 return 0;
531 if (INTEL_INFO(dev)->gen >= 7) 925}
532 I915_WRITE(RING_MODE_GEN7(ring),
533 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
534 926
535 I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G); 927static int gen6_ppgtt_enable(struct i915_hw_ppgtt *ppgtt)
536 I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset); 928{
929 struct drm_device *dev = ppgtt->base.dev;
930 struct drm_i915_private *dev_priv = dev->dev_private;
931 struct intel_ring_buffer *ring;
932 uint32_t ecochk, gab_ctl, ecobits;
933 int i;
934
935 ecobits = I915_READ(GAC_ECO_BITS);
936 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT |
937 ECOBITS_PPGTT_CACHE64B);
938
939 gab_ctl = I915_READ(GAB_CTL);
940 I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
941
942 ecochk = I915_READ(GAM_ECOCHK);
943 I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B);
944
945 I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
946
947 for_each_ring(ring, dev_priv, i) {
948 int ret = ppgtt->switch_mm(ppgtt, ring, true);
949 if (ret)
950 return ret;
537 } 951 }
952
538 return 0; 953 return 0;
539} 954}
540 955
541/* PPGTT support for Sandybdrige/Gen6 and later */ 956/* PPGTT support for Sandybdrige/Gen6 and later */
542static void gen6_ppgtt_clear_range(struct i915_address_space *vm, 957static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
543 unsigned first_entry, 958 uint64_t start,
544 unsigned num_entries, 959 uint64_t length,
545 bool use_scratch) 960 bool use_scratch)
546{ 961{
547 struct i915_hw_ppgtt *ppgtt = 962 struct i915_hw_ppgtt *ppgtt =
548 container_of(vm, struct i915_hw_ppgtt, base); 963 container_of(vm, struct i915_hw_ppgtt, base);
549 gen6_gtt_pte_t *pt_vaddr, scratch_pte; 964 gen6_gtt_pte_t *pt_vaddr, scratch_pte;
965 unsigned first_entry = start >> PAGE_SHIFT;
966 unsigned num_entries = length >> PAGE_SHIFT;
550 unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES; 967 unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
551 unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES; 968 unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
552 unsigned last_pte, i; 969 unsigned last_pte, i;
@@ -573,12 +990,13 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
573 990
574static void gen6_ppgtt_insert_entries(struct i915_address_space *vm, 991static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
575 struct sg_table *pages, 992 struct sg_table *pages,
576 unsigned first_entry, 993 uint64_t start,
577 enum i915_cache_level cache_level) 994 enum i915_cache_level cache_level)
578{ 995{
579 struct i915_hw_ppgtt *ppgtt = 996 struct i915_hw_ppgtt *ppgtt =
580 container_of(vm, struct i915_hw_ppgtt, base); 997 container_of(vm, struct i915_hw_ppgtt, base);
581 gen6_gtt_pte_t *pt_vaddr; 998 gen6_gtt_pte_t *pt_vaddr;
999 unsigned first_entry = start >> PAGE_SHIFT;
582 unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES; 1000 unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
583 unsigned act_pte = first_entry % I915_PPGTT_PT_ENTRIES; 1001 unsigned act_pte = first_entry % I915_PPGTT_PT_ENTRIES;
584 struct sg_page_iter sg_iter; 1002 struct sg_page_iter sg_iter;
@@ -602,65 +1020,130 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
602 kunmap_atomic(pt_vaddr); 1020 kunmap_atomic(pt_vaddr);
603} 1021}
604 1022
605static void gen6_ppgtt_cleanup(struct i915_address_space *vm) 1023static void gen6_ppgtt_unmap_pages(struct i915_hw_ppgtt *ppgtt)
606{ 1024{
607 struct i915_hw_ppgtt *ppgtt =
608 container_of(vm, struct i915_hw_ppgtt, base);
609 int i; 1025 int i;
610 1026
611 drm_mm_takedown(&ppgtt->base.mm);
612
613 if (ppgtt->pt_dma_addr) { 1027 if (ppgtt->pt_dma_addr) {
614 for (i = 0; i < ppgtt->num_pd_entries; i++) 1028 for (i = 0; i < ppgtt->num_pd_entries; i++)
615 pci_unmap_page(ppgtt->base.dev->pdev, 1029 pci_unmap_page(ppgtt->base.dev->pdev,
616 ppgtt->pt_dma_addr[i], 1030 ppgtt->pt_dma_addr[i],
617 4096, PCI_DMA_BIDIRECTIONAL); 1031 4096, PCI_DMA_BIDIRECTIONAL);
618 } 1032 }
1033}
1034
1035static void gen6_ppgtt_free(struct i915_hw_ppgtt *ppgtt)
1036{
1037 int i;
619 1038
620 kfree(ppgtt->pt_dma_addr); 1039 kfree(ppgtt->pt_dma_addr);
621 for (i = 0; i < ppgtt->num_pd_entries; i++) 1040 for (i = 0; i < ppgtt->num_pd_entries; i++)
622 __free_page(ppgtt->pt_pages[i]); 1041 __free_page(ppgtt->pt_pages[i]);
623 kfree(ppgtt->pt_pages); 1042 kfree(ppgtt->pt_pages);
624 kfree(ppgtt);
625} 1043}
626 1044
627static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt) 1045static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
628{ 1046{
1047 struct i915_hw_ppgtt *ppgtt =
1048 container_of(vm, struct i915_hw_ppgtt, base);
1049
1050 list_del(&vm->global_link);
1051 drm_mm_takedown(&ppgtt->base.mm);
1052 drm_mm_remove_node(&ppgtt->node);
1053
1054 gen6_ppgtt_unmap_pages(ppgtt);
1055 gen6_ppgtt_free(ppgtt);
1056}
1057
1058static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
1059{
1060#define GEN6_PD_ALIGN (PAGE_SIZE * 16)
1061#define GEN6_PD_SIZE (GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE)
629 struct drm_device *dev = ppgtt->base.dev; 1062 struct drm_device *dev = ppgtt->base.dev;
630 struct drm_i915_private *dev_priv = dev->dev_private; 1063 struct drm_i915_private *dev_priv = dev->dev_private;
631 unsigned first_pd_entry_in_global_pt; 1064 bool retried = false;
632 int i; 1065 int ret;
633 int ret = -ENOMEM;
634 1066
635 /* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024 1067 /* PPGTT PDEs reside in the GGTT and consists of 512 entries. The
636 * entries. For aliasing ppgtt support we just steal them at the end for 1068 * allocator works in address space sizes, so it's multiplied by page
637 * now. */ 1069 * size. We allocate at the top of the GTT to avoid fragmentation.
638 first_pd_entry_in_global_pt = gtt_total_entries(dev_priv->gtt); 1070 */
1071 BUG_ON(!drm_mm_initialized(&dev_priv->gtt.base.mm));
1072alloc:
1073 ret = drm_mm_insert_node_in_range_generic(&dev_priv->gtt.base.mm,
1074 &ppgtt->node, GEN6_PD_SIZE,
1075 GEN6_PD_ALIGN, 0,
1076 0, dev_priv->gtt.base.total,
1077 DRM_MM_SEARCH_DEFAULT,
1078 DRM_MM_CREATE_DEFAULT);
1079 if (ret == -ENOSPC && !retried) {
1080 ret = i915_gem_evict_something(dev, &dev_priv->gtt.base,
1081 GEN6_PD_SIZE, GEN6_PD_ALIGN,
1082 I915_CACHE_NONE, 0);
1083 if (ret)
1084 return ret;
1085
1086 retried = true;
1087 goto alloc;
1088 }
1089
1090 if (ppgtt->node.start < dev_priv->gtt.mappable_end)
1091 DRM_DEBUG("Forced to use aperture for PDEs\n");
639 1092
640 ppgtt->base.pte_encode = dev_priv->gtt.base.pte_encode;
641 ppgtt->num_pd_entries = GEN6_PPGTT_PD_ENTRIES; 1093 ppgtt->num_pd_entries = GEN6_PPGTT_PD_ENTRIES;
642 ppgtt->enable = gen6_ppgtt_enable; 1094 return ret;
643 ppgtt->base.clear_range = gen6_ppgtt_clear_range; 1095}
644 ppgtt->base.insert_entries = gen6_ppgtt_insert_entries; 1096
645 ppgtt->base.cleanup = gen6_ppgtt_cleanup; 1097static int gen6_ppgtt_allocate_page_tables(struct i915_hw_ppgtt *ppgtt)
646 ppgtt->base.scratch = dev_priv->gtt.base.scratch; 1098{
647 ppgtt->base.start = 0; 1099 int i;
648 ppgtt->base.total = GEN6_PPGTT_PD_ENTRIES * I915_PPGTT_PT_ENTRIES * PAGE_SIZE; 1100
649 ppgtt->pt_pages = kcalloc(ppgtt->num_pd_entries, sizeof(struct page *), 1101 ppgtt->pt_pages = kcalloc(ppgtt->num_pd_entries, sizeof(struct page *),
650 GFP_KERNEL); 1102 GFP_KERNEL);
1103
651 if (!ppgtt->pt_pages) 1104 if (!ppgtt->pt_pages)
652 return -ENOMEM; 1105 return -ENOMEM;
653 1106
654 for (i = 0; i < ppgtt->num_pd_entries; i++) { 1107 for (i = 0; i < ppgtt->num_pd_entries; i++) {
655 ppgtt->pt_pages[i] = alloc_page(GFP_KERNEL); 1108 ppgtt->pt_pages[i] = alloc_page(GFP_KERNEL);
656 if (!ppgtt->pt_pages[i]) 1109 if (!ppgtt->pt_pages[i]) {
657 goto err_pt_alloc; 1110 gen6_ppgtt_free(ppgtt);
1111 return -ENOMEM;
1112 }
1113 }
1114
1115 return 0;
1116}
1117
1118static int gen6_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt)
1119{
1120 int ret;
1121
1122 ret = gen6_ppgtt_allocate_page_directories(ppgtt);
1123 if (ret)
1124 return ret;
1125
1126 ret = gen6_ppgtt_allocate_page_tables(ppgtt);
1127 if (ret) {
1128 drm_mm_remove_node(&ppgtt->node);
1129 return ret;
658 } 1130 }
659 1131
660 ppgtt->pt_dma_addr = kcalloc(ppgtt->num_pd_entries, sizeof(dma_addr_t), 1132 ppgtt->pt_dma_addr = kcalloc(ppgtt->num_pd_entries, sizeof(dma_addr_t),
661 GFP_KERNEL); 1133 GFP_KERNEL);
662 if (!ppgtt->pt_dma_addr) 1134 if (!ppgtt->pt_dma_addr) {
663 goto err_pt_alloc; 1135 drm_mm_remove_node(&ppgtt->node);
1136 gen6_ppgtt_free(ppgtt);
1137 return -ENOMEM;
1138 }
1139
1140 return 0;
1141}
1142
1143static int gen6_ppgtt_setup_page_tables(struct i915_hw_ppgtt *ppgtt)
1144{
1145 struct drm_device *dev = ppgtt->base.dev;
1146 int i;
664 1147
665 for (i = 0; i < ppgtt->num_pd_entries; i++) { 1148 for (i = 0; i < ppgtt->num_pd_entries; i++) {
666 dma_addr_t pt_addr; 1149 dma_addr_t pt_addr;
@@ -669,48 +1152,71 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
669 PCI_DMA_BIDIRECTIONAL); 1152 PCI_DMA_BIDIRECTIONAL);
670 1153
671 if (pci_dma_mapping_error(dev->pdev, pt_addr)) { 1154 if (pci_dma_mapping_error(dev->pdev, pt_addr)) {
672 ret = -EIO; 1155 gen6_ppgtt_unmap_pages(ppgtt);
673 goto err_pd_pin; 1156 return -EIO;
674
675 } 1157 }
1158
676 ppgtt->pt_dma_addr[i] = pt_addr; 1159 ppgtt->pt_dma_addr[i] = pt_addr;
677 } 1160 }
678 1161
679 ppgtt->base.clear_range(&ppgtt->base, 0, 1162 return 0;
680 ppgtt->num_pd_entries * I915_PPGTT_PT_ENTRIES, true); 1163}
681 1164
682 ppgtt->pd_offset = first_pd_entry_in_global_pt * sizeof(gen6_gtt_pte_t); 1165static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
1166{
1167 struct drm_device *dev = ppgtt->base.dev;
1168 struct drm_i915_private *dev_priv = dev->dev_private;
1169 int ret;
683 1170
684 return 0; 1171 ppgtt->base.pte_encode = dev_priv->gtt.base.pte_encode;
1172 if (IS_GEN6(dev)) {
1173 ppgtt->enable = gen6_ppgtt_enable;
1174 ppgtt->switch_mm = gen6_mm_switch;
1175 } else if (IS_HASWELL(dev)) {
1176 ppgtt->enable = gen7_ppgtt_enable;
1177 ppgtt->switch_mm = hsw_mm_switch;
1178 } else if (IS_GEN7(dev)) {
1179 ppgtt->enable = gen7_ppgtt_enable;
1180 ppgtt->switch_mm = gen7_mm_switch;
1181 } else
1182 BUG();
685 1183
686err_pd_pin: 1184 ret = gen6_ppgtt_alloc(ppgtt);
687 if (ppgtt->pt_dma_addr) { 1185 if (ret)
688 for (i--; i >= 0; i--) 1186 return ret;
689 pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i], 1187
690 4096, PCI_DMA_BIDIRECTIONAL); 1188 ret = gen6_ppgtt_setup_page_tables(ppgtt);
691 } 1189 if (ret) {
692err_pt_alloc: 1190 gen6_ppgtt_free(ppgtt);
693 kfree(ppgtt->pt_dma_addr); 1191 return ret;
694 for (i = 0; i < ppgtt->num_pd_entries; i++) {
695 if (ppgtt->pt_pages[i])
696 __free_page(ppgtt->pt_pages[i]);
697 } 1192 }
698 kfree(ppgtt->pt_pages);
699 1193
700 return ret; 1194 ppgtt->base.clear_range = gen6_ppgtt_clear_range;
1195 ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
1196 ppgtt->base.cleanup = gen6_ppgtt_cleanup;
1197 ppgtt->base.start = 0;
1198 ppgtt->base.total = ppgtt->num_pd_entries * I915_PPGTT_PT_ENTRIES * PAGE_SIZE;
1199 ppgtt->debug_dump = gen6_dump_ppgtt;
1200
1201 ppgtt->pd_offset =
1202 ppgtt->node.start / PAGE_SIZE * sizeof(gen6_gtt_pte_t);
1203
1204 ppgtt->base.clear_range(&ppgtt->base, 0, ppgtt->base.total, true);
1205
1206 DRM_DEBUG_DRIVER("Allocated pde space (%ldM) at GTT entry: %lx\n",
1207 ppgtt->node.size >> 20,
1208 ppgtt->node.start / PAGE_SIZE);
1209
1210 return 0;
701} 1211}
702 1212
703static int i915_gem_init_aliasing_ppgtt(struct drm_device *dev) 1213int i915_gem_init_ppgtt(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
704{ 1214{
705 struct drm_i915_private *dev_priv = dev->dev_private; 1215 struct drm_i915_private *dev_priv = dev->dev_private;
706 struct i915_hw_ppgtt *ppgtt; 1216 int ret = 0;
707 int ret;
708
709 ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
710 if (!ppgtt)
711 return -ENOMEM;
712 1217
713 ppgtt->base.dev = dev; 1218 ppgtt->base.dev = dev;
1219 ppgtt->base.scratch = dev_priv->gtt.base.scratch;
714 1220
715 if (INTEL_INFO(dev)->gen < 8) 1221 if (INTEL_INFO(dev)->gen < 8)
716 ret = gen6_ppgtt_init(ppgtt); 1222 ret = gen6_ppgtt_init(ppgtt);
@@ -719,45 +1225,37 @@ static int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
719 else 1225 else
720 BUG(); 1226 BUG();
721 1227
722 if (ret) 1228 if (!ret) {
723 kfree(ppgtt); 1229 struct drm_i915_private *dev_priv = dev->dev_private;
724 else { 1230 kref_init(&ppgtt->ref);
725 dev_priv->mm.aliasing_ppgtt = ppgtt;
726 drm_mm_init(&ppgtt->base.mm, ppgtt->base.start, 1231 drm_mm_init(&ppgtt->base.mm, ppgtt->base.start,
727 ppgtt->base.total); 1232 ppgtt->base.total);
1233 i915_init_vm(dev_priv, &ppgtt->base);
1234 if (INTEL_INFO(dev)->gen < 8) {
1235 gen6_write_pdes(ppgtt);
1236 DRM_DEBUG("Adding PPGTT at offset %x\n",
1237 ppgtt->pd_offset << 10);
1238 }
728 } 1239 }
729 1240
730 return ret; 1241 return ret;
731} 1242}
732 1243
733void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev) 1244static void
1245ppgtt_bind_vma(struct i915_vma *vma,
1246 enum i915_cache_level cache_level,
1247 u32 flags)
734{ 1248{
735 struct drm_i915_private *dev_priv = dev->dev_private; 1249 vma->vm->insert_entries(vma->vm, vma->obj->pages, vma->node.start,
736 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; 1250 cache_level);
737
738 if (!ppgtt)
739 return;
740
741 ppgtt->base.cleanup(&ppgtt->base);
742 dev_priv->mm.aliasing_ppgtt = NULL;
743} 1251}
744 1252
745void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt, 1253static void ppgtt_unbind_vma(struct i915_vma *vma)
746 struct drm_i915_gem_object *obj,
747 enum i915_cache_level cache_level)
748{ 1254{
749 ppgtt->base.insert_entries(&ppgtt->base, obj->pages, 1255 vma->vm->clear_range(vma->vm,
750 i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT, 1256 vma->node.start,
751 cache_level); 1257 vma->obj->base.size,
752} 1258 true);
753
754void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
755 struct drm_i915_gem_object *obj)
756{
757 ppgtt->base.clear_range(&ppgtt->base,
758 i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
759 obj->base.size >> PAGE_SHIFT,
760 true);
761} 1259}
762 1260
763extern int intel_iommu_gfx_mapped; 1261extern int intel_iommu_gfx_mapped;
@@ -840,8 +1338,8 @@ void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
840 i915_check_and_clear_faults(dev); 1338 i915_check_and_clear_faults(dev);
841 1339
842 dev_priv->gtt.base.clear_range(&dev_priv->gtt.base, 1340 dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
843 dev_priv->gtt.base.start / PAGE_SIZE, 1341 dev_priv->gtt.base.start,
844 dev_priv->gtt.base.total / PAGE_SIZE, 1342 dev_priv->gtt.base.total,
845 true); 1343 true);
846} 1344}
847 1345
@@ -849,18 +1347,46 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
849{ 1347{
850 struct drm_i915_private *dev_priv = dev->dev_private; 1348 struct drm_i915_private *dev_priv = dev->dev_private;
851 struct drm_i915_gem_object *obj; 1349 struct drm_i915_gem_object *obj;
1350 struct i915_address_space *vm;
852 1351
853 i915_check_and_clear_faults(dev); 1352 i915_check_and_clear_faults(dev);
854 1353
855 /* First fill our portion of the GTT with scratch pages */ 1354 /* First fill our portion of the GTT with scratch pages */
856 dev_priv->gtt.base.clear_range(&dev_priv->gtt.base, 1355 dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
857 dev_priv->gtt.base.start / PAGE_SIZE, 1356 dev_priv->gtt.base.start,
858 dev_priv->gtt.base.total / PAGE_SIZE, 1357 dev_priv->gtt.base.total,
859 true); 1358 true);
860 1359
861 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { 1360 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
1361 struct i915_vma *vma = i915_gem_obj_to_vma(obj,
1362 &dev_priv->gtt.base);
1363 if (!vma)
1364 continue;
1365
862 i915_gem_clflush_object(obj, obj->pin_display); 1366 i915_gem_clflush_object(obj, obj->pin_display);
863 i915_gem_gtt_bind_object(obj, obj->cache_level); 1367 /* The bind_vma code tries to be smart about tracking mappings.
1368 * Unfortunately above, we've just wiped out the mappings
1369 * without telling our object about it. So we need to fake it.
1370 */
1371 obj->has_global_gtt_mapping = 0;
1372 vma->bind_vma(vma, obj->cache_level, GLOBAL_BIND);
1373 }
1374
1375
1376 if (INTEL_INFO(dev)->gen >= 8) {
1377 gen8_setup_private_ppat(dev_priv);
1378 return;
1379 }
1380
1381 list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
1382 /* TODO: Perhaps it shouldn't be gen6 specific */
1383 if (i915_is_ggtt(vm)) {
1384 if (dev_priv->mm.aliasing_ppgtt)
1385 gen6_write_pdes(dev_priv->mm.aliasing_ppgtt);
1386 continue;
1387 }
1388
1389 gen6_write_pdes(container_of(vm, struct i915_hw_ppgtt, base));
864 } 1390 }
865 1391
866 i915_gem_chipset_flush(dev); 1392 i915_gem_chipset_flush(dev);
@@ -891,10 +1417,11 @@ static inline void gen8_set_pte(void __iomem *addr, gen8_gtt_pte_t pte)
891 1417
892static void gen8_ggtt_insert_entries(struct i915_address_space *vm, 1418static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
893 struct sg_table *st, 1419 struct sg_table *st,
894 unsigned int first_entry, 1420 uint64_t start,
895 enum i915_cache_level level) 1421 enum i915_cache_level level)
896{ 1422{
897 struct drm_i915_private *dev_priv = vm->dev->dev_private; 1423 struct drm_i915_private *dev_priv = vm->dev->dev_private;
1424 unsigned first_entry = start >> PAGE_SHIFT;
898 gen8_gtt_pte_t __iomem *gtt_entries = 1425 gen8_gtt_pte_t __iomem *gtt_entries =
899 (gen8_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry; 1426 (gen8_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
900 int i = 0; 1427 int i = 0;
@@ -936,10 +1463,11 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
936 */ 1463 */
937static void gen6_ggtt_insert_entries(struct i915_address_space *vm, 1464static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
938 struct sg_table *st, 1465 struct sg_table *st,
939 unsigned int first_entry, 1466 uint64_t start,
940 enum i915_cache_level level) 1467 enum i915_cache_level level)
941{ 1468{
942 struct drm_i915_private *dev_priv = vm->dev->dev_private; 1469 struct drm_i915_private *dev_priv = vm->dev->dev_private;
1470 unsigned first_entry = start >> PAGE_SHIFT;
943 gen6_gtt_pte_t __iomem *gtt_entries = 1471 gen6_gtt_pte_t __iomem *gtt_entries =
944 (gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry; 1472 (gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
945 int i = 0; 1473 int i = 0;
@@ -971,11 +1499,13 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
971} 1499}
972 1500
973static void gen8_ggtt_clear_range(struct i915_address_space *vm, 1501static void gen8_ggtt_clear_range(struct i915_address_space *vm,
974 unsigned int first_entry, 1502 uint64_t start,
975 unsigned int num_entries, 1503 uint64_t length,
976 bool use_scratch) 1504 bool use_scratch)
977{ 1505{
978 struct drm_i915_private *dev_priv = vm->dev->dev_private; 1506 struct drm_i915_private *dev_priv = vm->dev->dev_private;
1507 unsigned first_entry = start >> PAGE_SHIFT;
1508 unsigned num_entries = length >> PAGE_SHIFT;
979 gen8_gtt_pte_t scratch_pte, __iomem *gtt_base = 1509 gen8_gtt_pte_t scratch_pte, __iomem *gtt_base =
980 (gen8_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry; 1510 (gen8_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
981 const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry; 1511 const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
@@ -995,11 +1525,13 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm,
995} 1525}
996 1526
997static void gen6_ggtt_clear_range(struct i915_address_space *vm, 1527static void gen6_ggtt_clear_range(struct i915_address_space *vm,
998 unsigned int first_entry, 1528 uint64_t start,
999 unsigned int num_entries, 1529 uint64_t length,
1000 bool use_scratch) 1530 bool use_scratch)
1001{ 1531{
1002 struct drm_i915_private *dev_priv = vm->dev->dev_private; 1532 struct drm_i915_private *dev_priv = vm->dev->dev_private;
1533 unsigned first_entry = start >> PAGE_SHIFT;
1534 unsigned num_entries = length >> PAGE_SHIFT;
1003 gen6_gtt_pte_t scratch_pte, __iomem *gtt_base = 1535 gen6_gtt_pte_t scratch_pte, __iomem *gtt_base =
1004 (gen6_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry; 1536 (gen6_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
1005 const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry; 1537 const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
@@ -1017,53 +1549,103 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
1017 readl(gtt_base); 1549 readl(gtt_base);
1018} 1550}
1019 1551
1020static void i915_ggtt_insert_entries(struct i915_address_space *vm, 1552
1021 struct sg_table *st, 1553static void i915_ggtt_bind_vma(struct i915_vma *vma,
1022 unsigned int pg_start, 1554 enum i915_cache_level cache_level,
1023 enum i915_cache_level cache_level) 1555 u32 unused)
1024{ 1556{
1557 const unsigned long entry = vma->node.start >> PAGE_SHIFT;
1025 unsigned int flags = (cache_level == I915_CACHE_NONE) ? 1558 unsigned int flags = (cache_level == I915_CACHE_NONE) ?
1026 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY; 1559 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
1027 1560
1028 intel_gtt_insert_sg_entries(st, pg_start, flags); 1561 BUG_ON(!i915_is_ggtt(vma->vm));
1029 1562 intel_gtt_insert_sg_entries(vma->obj->pages, entry, flags);
1563 vma->obj->has_global_gtt_mapping = 1;
1030} 1564}
1031 1565
1032static void i915_ggtt_clear_range(struct i915_address_space *vm, 1566static void i915_ggtt_clear_range(struct i915_address_space *vm,
1033 unsigned int first_entry, 1567 uint64_t start,
1034 unsigned int num_entries, 1568 uint64_t length,
1035 bool unused) 1569 bool unused)
1036{ 1570{
1571 unsigned first_entry = start >> PAGE_SHIFT;
1572 unsigned num_entries = length >> PAGE_SHIFT;
1037 intel_gtt_clear_range(first_entry, num_entries); 1573 intel_gtt_clear_range(first_entry, num_entries);
1038} 1574}
1039 1575
1576static void i915_ggtt_unbind_vma(struct i915_vma *vma)
1577{
1578 const unsigned int first = vma->node.start >> PAGE_SHIFT;
1579 const unsigned int size = vma->obj->base.size >> PAGE_SHIFT;
1580
1581 BUG_ON(!i915_is_ggtt(vma->vm));
1582 vma->obj->has_global_gtt_mapping = 0;
1583 intel_gtt_clear_range(first, size);
1584}
1040 1585
1041void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj, 1586static void ggtt_bind_vma(struct i915_vma *vma,
1042 enum i915_cache_level cache_level) 1587 enum i915_cache_level cache_level,
1588 u32 flags)
1043{ 1589{
1044 struct drm_device *dev = obj->base.dev; 1590 struct drm_device *dev = vma->vm->dev;
1045 struct drm_i915_private *dev_priv = dev->dev_private; 1591 struct drm_i915_private *dev_priv = dev->dev_private;
1046 const unsigned long entry = i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT; 1592 struct drm_i915_gem_object *obj = vma->obj;
1047 1593
1048 dev_priv->gtt.base.insert_entries(&dev_priv->gtt.base, obj->pages, 1594 /* If there is no aliasing PPGTT, or the caller needs a global mapping,
1049 entry, 1595 * or we have a global mapping already but the cacheability flags have
1050 cache_level); 1596 * changed, set the global PTEs.
1597 *
1598 * If there is an aliasing PPGTT it is anecdotally faster, so use that
1599 * instead if none of the above hold true.
1600 *
1601 * NB: A global mapping should only be needed for special regions like
1602 * "gtt mappable", SNB errata, or if specified via special execbuf
1603 * flags. At all other times, the GPU will use the aliasing PPGTT.
1604 */
1605 if (!dev_priv->mm.aliasing_ppgtt || flags & GLOBAL_BIND) {
1606 if (!obj->has_global_gtt_mapping ||
1607 (cache_level != obj->cache_level)) {
1608 vma->vm->insert_entries(vma->vm, obj->pages,
1609 vma->node.start,
1610 cache_level);
1611 obj->has_global_gtt_mapping = 1;
1612 }
1613 }
1051 1614
1052 obj->has_global_gtt_mapping = 1; 1615 if (dev_priv->mm.aliasing_ppgtt &&
1616 (!obj->has_aliasing_ppgtt_mapping ||
1617 (cache_level != obj->cache_level))) {
1618 struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
1619 appgtt->base.insert_entries(&appgtt->base,
1620 vma->obj->pages,
1621 vma->node.start,
1622 cache_level);
1623 vma->obj->has_aliasing_ppgtt_mapping = 1;
1624 }
1053} 1625}
1054 1626
1055void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj) 1627static void ggtt_unbind_vma(struct i915_vma *vma)
1056{ 1628{
1057 struct drm_device *dev = obj->base.dev; 1629 struct drm_device *dev = vma->vm->dev;
1058 struct drm_i915_private *dev_priv = dev->dev_private; 1630 struct drm_i915_private *dev_priv = dev->dev_private;
1059 const unsigned long entry = i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT; 1631 struct drm_i915_gem_object *obj = vma->obj;
1060 1632
1061 dev_priv->gtt.base.clear_range(&dev_priv->gtt.base, 1633 if (obj->has_global_gtt_mapping) {
1062 entry, 1634 vma->vm->clear_range(vma->vm,
1063 obj->base.size >> PAGE_SHIFT, 1635 vma->node.start,
1064 true); 1636 obj->base.size,
1637 true);
1638 obj->has_global_gtt_mapping = 0;
1639 }
1065 1640
1066 obj->has_global_gtt_mapping = 0; 1641 if (obj->has_aliasing_ppgtt_mapping) {
1642 struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
1643 appgtt->base.clear_range(&appgtt->base,
1644 vma->node.start,
1645 obj->base.size,
1646 true);
1647 obj->has_aliasing_ppgtt_mapping = 0;
1648 }
1067} 1649}
1068 1650
1069void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj) 1651void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
@@ -1145,29 +1727,14 @@ void i915_gem_setup_global_gtt(struct drm_device *dev,
1145 1727
1146 /* Clear any non-preallocated blocks */ 1728 /* Clear any non-preallocated blocks */
1147 drm_mm_for_each_hole(entry, &ggtt_vm->mm, hole_start, hole_end) { 1729 drm_mm_for_each_hole(entry, &ggtt_vm->mm, hole_start, hole_end) {
1148 const unsigned long count = (hole_end - hole_start) / PAGE_SIZE;
1149 DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n", 1730 DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
1150 hole_start, hole_end); 1731 hole_start, hole_end);
1151 ggtt_vm->clear_range(ggtt_vm, hole_start / PAGE_SIZE, count, true); 1732 ggtt_vm->clear_range(ggtt_vm, hole_start,
1733 hole_end - hole_start, true);
1152 } 1734 }
1153 1735
1154 /* And finally clear the reserved guard page */ 1736 /* And finally clear the reserved guard page */
1155 ggtt_vm->clear_range(ggtt_vm, end / PAGE_SIZE - 1, 1, true); 1737 ggtt_vm->clear_range(ggtt_vm, end - PAGE_SIZE, PAGE_SIZE, true);
1156}
1157
1158static bool
1159intel_enable_ppgtt(struct drm_device *dev)
1160{
1161 if (i915_enable_ppgtt >= 0)
1162 return i915_enable_ppgtt;
1163
1164#ifdef CONFIG_INTEL_IOMMU
1165 /* Disable ppgtt on SNB if VT-d is on. */
1166 if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
1167 return false;
1168#endif
1169
1170 return true;
1171} 1738}
1172 1739
1173void i915_gem_init_global_gtt(struct drm_device *dev) 1740void i915_gem_init_global_gtt(struct drm_device *dev)
@@ -1178,26 +1745,6 @@ void i915_gem_init_global_gtt(struct drm_device *dev)
1178 gtt_size = dev_priv->gtt.base.total; 1745 gtt_size = dev_priv->gtt.base.total;
1179 mappable_size = dev_priv->gtt.mappable_end; 1746 mappable_size = dev_priv->gtt.mappable_end;
1180 1747
1181 if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
1182 int ret;
1183
1184 if (INTEL_INFO(dev)->gen <= 7) {
1185 /* PPGTT pdes are stolen from global gtt ptes, so shrink the
1186 * aperture accordingly when using aliasing ppgtt. */
1187 gtt_size -= GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE;
1188 }
1189
1190 i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
1191
1192 ret = i915_gem_init_aliasing_ppgtt(dev);
1193 if (!ret)
1194 return;
1195
1196 DRM_ERROR("Aliased PPGTT setup failed %d\n", ret);
1197 drm_mm_takedown(&dev_priv->gtt.base.mm);
1198 if (INTEL_INFO(dev)->gen < 8)
1199 gtt_size += GEN6_PPGTT_PD_ENTRIES*PAGE_SIZE;
1200 }
1201 i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size); 1748 i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
1202} 1749}
1203 1750
@@ -1252,11 +1799,6 @@ static inline unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
1252 bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK; 1799 bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
1253 if (bdw_gmch_ctl) 1800 if (bdw_gmch_ctl)
1254 bdw_gmch_ctl = 1 << bdw_gmch_ctl; 1801 bdw_gmch_ctl = 1 << bdw_gmch_ctl;
1255 if (bdw_gmch_ctl > 4) {
1256 WARN_ON(!i915_preliminary_hw_support);
1257 return 4<<20;
1258 }
1259
1260 return bdw_gmch_ctl << 20; 1802 return bdw_gmch_ctl << 20;
1261} 1803}
1262 1804
@@ -1438,7 +1980,6 @@ static int i915_gmch_probe(struct drm_device *dev,
1438 1980
1439 dev_priv->gtt.do_idle_maps = needs_idle_maps(dev_priv->dev); 1981 dev_priv->gtt.do_idle_maps = needs_idle_maps(dev_priv->dev);
1440 dev_priv->gtt.base.clear_range = i915_ggtt_clear_range; 1982 dev_priv->gtt.base.clear_range = i915_ggtt_clear_range;
1441 dev_priv->gtt.base.insert_entries = i915_ggtt_insert_entries;
1442 1983
1443 if (unlikely(dev_priv->gtt.do_idle_maps)) 1984 if (unlikely(dev_priv->gtt.do_idle_maps))
1444 DRM_INFO("applying Ironlake quirks for intel_iommu\n"); 1985 DRM_INFO("applying Ironlake quirks for intel_iommu\n");
@@ -1493,3 +2034,62 @@ int i915_gem_gtt_init(struct drm_device *dev)
1493 2034
1494 return 0; 2035 return 0;
1495} 2036}
2037
2038static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj,
2039 struct i915_address_space *vm)
2040{
2041 struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
2042 if (vma == NULL)
2043 return ERR_PTR(-ENOMEM);
2044
2045 INIT_LIST_HEAD(&vma->vma_link);
2046 INIT_LIST_HEAD(&vma->mm_list);
2047 INIT_LIST_HEAD(&vma->exec_list);
2048 vma->vm = vm;
2049 vma->obj = obj;
2050
2051 switch (INTEL_INFO(vm->dev)->gen) {
2052 case 8:
2053 case 7:
2054 case 6:
2055 if (i915_is_ggtt(vm)) {
2056 vma->unbind_vma = ggtt_unbind_vma;
2057 vma->bind_vma = ggtt_bind_vma;
2058 } else {
2059 vma->unbind_vma = ppgtt_unbind_vma;
2060 vma->bind_vma = ppgtt_bind_vma;
2061 }
2062 break;
2063 case 5:
2064 case 4:
2065 case 3:
2066 case 2:
2067 BUG_ON(!i915_is_ggtt(vm));
2068 vma->unbind_vma = i915_ggtt_unbind_vma;
2069 vma->bind_vma = i915_ggtt_bind_vma;
2070 break;
2071 default:
2072 BUG();
2073 }
2074
2075 /* Keep GGTT vmas first to make debug easier */
2076 if (i915_is_ggtt(vm))
2077 list_add(&vma->vma_link, &obj->vma_list);
2078 else
2079 list_add_tail(&vma->vma_link, &obj->vma_list);
2080
2081 return vma;
2082}
2083
2084struct i915_vma *
2085i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
2086 struct i915_address_space *vm)
2087{
2088 struct i915_vma *vma;
2089
2090 vma = i915_gem_obj_to_vma(obj, vm);
2091 if (!vma)
2092 vma = __i915_gem_vma_create(obj, vm);
2093
2094 return vma;
2095}
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 28d24caa49f3..62ef55ba061c 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -215,7 +215,7 @@ int i915_gem_init_stolen(struct drm_device *dev)
215 int bios_reserved = 0; 215 int bios_reserved = 0;
216 216
217#ifdef CONFIG_INTEL_IOMMU 217#ifdef CONFIG_INTEL_IOMMU
218 if (intel_iommu_gfx_mapped) { 218 if (intel_iommu_gfx_mapped && INTEL_INFO(dev)->gen < 8) {
219 DRM_INFO("DMAR active, disabling use of stolen memory\n"); 219 DRM_INFO("DMAR active, disabling use of stolen memory\n");
220 return 0; 220 return 0;
221 } 221 }
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index b13905348048..cb150e8b4336 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -87,7 +87,7 @@
87void 87void
88i915_gem_detect_bit_6_swizzle(struct drm_device *dev) 88i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
89{ 89{
90 drm_i915_private_t *dev_priv = dev->dev_private; 90 struct drm_i915_private *dev_priv = dev->dev_private;
91 uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; 91 uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
92 uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; 92 uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
93 93
@@ -294,7 +294,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
294 struct drm_file *file) 294 struct drm_file *file)
295{ 295{
296 struct drm_i915_gem_set_tiling *args = data; 296 struct drm_i915_gem_set_tiling *args = data;
297 drm_i915_private_t *dev_priv = dev->dev_private; 297 struct drm_i915_private *dev_priv = dev->dev_private;
298 struct drm_i915_gem_object *obj; 298 struct drm_i915_gem_object *obj;
299 int ret = 0; 299 int ret = 0;
300 300
@@ -308,7 +308,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
308 return -EINVAL; 308 return -EINVAL;
309 } 309 }
310 310
311 if (obj->pin_count || obj->framebuffer_references) { 311 if (i915_gem_obj_is_pinned(obj) || obj->framebuffer_references) {
312 drm_gem_object_unreference_unlocked(&obj->base); 312 drm_gem_object_unreference_unlocked(&obj->base);
313 return -EBUSY; 313 return -EBUSY;
314 } 314 }
@@ -415,7 +415,7 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
415 struct drm_file *file) 415 struct drm_file *file)
416{ 416{
417 struct drm_i915_gem_get_tiling *args = data; 417 struct drm_i915_gem_get_tiling *args = data;
418 drm_i915_private_t *dev_priv = dev->dev_private; 418 struct drm_i915_private *dev_priv = dev->dev_private;
419 struct drm_i915_gem_object *obj; 419 struct drm_i915_gem_object *obj;
420 420
421 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); 421 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 990cf8f43efd..12f1d43b2d68 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -238,50 +238,61 @@ static const char *hangcheck_action_to_str(enum intel_ring_hangcheck_action a)
238 238
239static void i915_ring_error_state(struct drm_i915_error_state_buf *m, 239static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
240 struct drm_device *dev, 240 struct drm_device *dev,
241 struct drm_i915_error_state *error, 241 struct drm_i915_error_ring *ring)
242 unsigned ring)
243{ 242{
244 BUG_ON(ring >= I915_NUM_RINGS); /* shut up confused gcc */ 243 if (!ring->valid)
245 if (!error->ring[ring].valid)
246 return; 244 return;
247 245
248 err_printf(m, "%s command stream:\n", ring_str(ring)); 246 err_printf(m, " HEAD: 0x%08x\n", ring->head);
249 err_printf(m, " HEAD: 0x%08x\n", error->head[ring]); 247 err_printf(m, " TAIL: 0x%08x\n", ring->tail);
250 err_printf(m, " TAIL: 0x%08x\n", error->tail[ring]); 248 err_printf(m, " CTL: 0x%08x\n", ring->ctl);
251 err_printf(m, " CTL: 0x%08x\n", error->ctl[ring]); 249 err_printf(m, " HWS: 0x%08x\n", ring->hws);
252 err_printf(m, " ACTHD: 0x%08x\n", error->acthd[ring]); 250 err_printf(m, " ACTHD: 0x%08x %08x\n", (u32)(ring->acthd>>32), (u32)ring->acthd);
253 err_printf(m, " IPEIR: 0x%08x\n", error->ipeir[ring]); 251 err_printf(m, " IPEIR: 0x%08x\n", ring->ipeir);
254 err_printf(m, " IPEHR: 0x%08x\n", error->ipehr[ring]); 252 err_printf(m, " IPEHR: 0x%08x\n", ring->ipehr);
255 err_printf(m, " INSTDONE: 0x%08x\n", error->instdone[ring]); 253 err_printf(m, " INSTDONE: 0x%08x\n", ring->instdone);
256 if (INTEL_INFO(dev)->gen >= 4) { 254 if (INTEL_INFO(dev)->gen >= 4) {
257 err_printf(m, " BBADDR: 0x%08llx\n", error->bbaddr[ring]); 255 err_printf(m, " BBADDR: 0x%08x %08x\n", (u32)(ring->bbaddr>>32), (u32)ring->bbaddr);
258 err_printf(m, " BB_STATE: 0x%08x\n", error->bbstate[ring]); 256 err_printf(m, " BB_STATE: 0x%08x\n", ring->bbstate);
259 err_printf(m, " INSTPS: 0x%08x\n", error->instps[ring]); 257 err_printf(m, " INSTPS: 0x%08x\n", ring->instps);
260 } 258 }
261 err_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]); 259 err_printf(m, " INSTPM: 0x%08x\n", ring->instpm);
262 err_printf(m, " FADDR: 0x%08x\n", error->faddr[ring]); 260 err_printf(m, " FADDR: 0x%08x\n", ring->faddr);
263 if (INTEL_INFO(dev)->gen >= 6) { 261 if (INTEL_INFO(dev)->gen >= 6) {
264 err_printf(m, " RC PSMI: 0x%08x\n", error->rc_psmi[ring]); 262 err_printf(m, " RC PSMI: 0x%08x\n", ring->rc_psmi);
265 err_printf(m, " FAULT_REG: 0x%08x\n", error->fault_reg[ring]); 263 err_printf(m, " FAULT_REG: 0x%08x\n", ring->fault_reg);
266 err_printf(m, " SYNC_0: 0x%08x [last synced 0x%08x]\n", 264 err_printf(m, " SYNC_0: 0x%08x [last synced 0x%08x]\n",
267 error->semaphore_mboxes[ring][0], 265 ring->semaphore_mboxes[0],
268 error->semaphore_seqno[ring][0]); 266 ring->semaphore_seqno[0]);
269 err_printf(m, " SYNC_1: 0x%08x [last synced 0x%08x]\n", 267 err_printf(m, " SYNC_1: 0x%08x [last synced 0x%08x]\n",
270 error->semaphore_mboxes[ring][1], 268 ring->semaphore_mboxes[1],
271 error->semaphore_seqno[ring][1]); 269 ring->semaphore_seqno[1]);
272 if (HAS_VEBOX(dev)) { 270 if (HAS_VEBOX(dev)) {
273 err_printf(m, " SYNC_2: 0x%08x [last synced 0x%08x]\n", 271 err_printf(m, " SYNC_2: 0x%08x [last synced 0x%08x]\n",
274 error->semaphore_mboxes[ring][2], 272 ring->semaphore_mboxes[2],
275 error->semaphore_seqno[ring][2]); 273 ring->semaphore_seqno[2]);
276 } 274 }
277 } 275 }
278 err_printf(m, " seqno: 0x%08x\n", error->seqno[ring]); 276 if (USES_PPGTT(dev)) {
279 err_printf(m, " waiting: %s\n", yesno(error->waiting[ring])); 277 err_printf(m, " GFX_MODE: 0x%08x\n", ring->vm_info.gfx_mode);
280 err_printf(m, " ring->head: 0x%08x\n", error->cpu_ring_head[ring]); 278
281 err_printf(m, " ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]); 279 if (INTEL_INFO(dev)->gen >= 8) {
280 int i;
281 for (i = 0; i < 4; i++)
282 err_printf(m, " PDP%d: 0x%016llx\n",
283 i, ring->vm_info.pdp[i]);
284 } else {
285 err_printf(m, " PP_DIR_BASE: 0x%08x\n",
286 ring->vm_info.pp_dir_base);
287 }
288 }
289 err_printf(m, " seqno: 0x%08x\n", ring->seqno);
290 err_printf(m, " waiting: %s\n", yesno(ring->waiting));
291 err_printf(m, " ring->head: 0x%08x\n", ring->cpu_ring_head);
292 err_printf(m, " ring->tail: 0x%08x\n", ring->cpu_ring_tail);
282 err_printf(m, " hangcheck: %s [%d]\n", 293 err_printf(m, " hangcheck: %s [%d]\n",
283 hangcheck_action_to_str(error->hangcheck_action[ring]), 294 hangcheck_action_to_str(ring->hangcheck_action),
284 error->hangcheck_score[ring]); 295 ring->hangcheck_score);
285} 296}
286 297
287void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...) 298void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
@@ -293,22 +304,54 @@ void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
293 va_end(args); 304 va_end(args);
294} 305}
295 306
307static void print_error_obj(struct drm_i915_error_state_buf *m,
308 struct drm_i915_error_object *obj)
309{
310 int page, offset, elt;
311
312 for (page = offset = 0; page < obj->page_count; page++) {
313 for (elt = 0; elt < PAGE_SIZE/4; elt++) {
314 err_printf(m, "%08x : %08x\n", offset,
315 obj->pages[page][elt]);
316 offset += 4;
317 }
318 }
319}
320
296int i915_error_state_to_str(struct drm_i915_error_state_buf *m, 321int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
297 const struct i915_error_state_file_priv *error_priv) 322 const struct i915_error_state_file_priv *error_priv)
298{ 323{
299 struct drm_device *dev = error_priv->dev; 324 struct drm_device *dev = error_priv->dev;
300 drm_i915_private_t *dev_priv = dev->dev_private; 325 struct drm_i915_private *dev_priv = dev->dev_private;
301 struct drm_i915_error_state *error = error_priv->error; 326 struct drm_i915_error_state *error = error_priv->error;
302 int i, j, page, offset, elt; 327 int i, j, offset, elt;
328 int max_hangcheck_score;
303 329
304 if (!error) { 330 if (!error) {
305 err_printf(m, "no error state collected\n"); 331 err_printf(m, "no error state collected\n");
306 goto out; 332 goto out;
307 } 333 }
308 334
335 err_printf(m, "%s\n", error->error_msg);
309 err_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec, 336 err_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
310 error->time.tv_usec); 337 error->time.tv_usec);
311 err_printf(m, "Kernel: " UTS_RELEASE "\n"); 338 err_printf(m, "Kernel: " UTS_RELEASE "\n");
339 max_hangcheck_score = 0;
340 for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
341 if (error->ring[i].hangcheck_score > max_hangcheck_score)
342 max_hangcheck_score = error->ring[i].hangcheck_score;
343 }
344 for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
345 if (error->ring[i].hangcheck_score == max_hangcheck_score &&
346 error->ring[i].pid != -1) {
347 err_printf(m, "Active process (on ring %s): %s [%d]\n",
348 ring_str(i),
349 error->ring[i].comm,
350 error->ring[i].pid);
351 }
352 }
353 err_printf(m, "Reset count: %u\n", error->reset_count);
354 err_printf(m, "Suspend count: %u\n", error->suspend_count);
312 err_printf(m, "PCI ID: 0x%04x\n", dev->pdev->device); 355 err_printf(m, "PCI ID: 0x%04x\n", dev->pdev->device);
313 err_printf(m, "EIR: 0x%08x\n", error->eir); 356 err_printf(m, "EIR: 0x%08x\n", error->eir);
314 err_printf(m, "IER: 0x%08x\n", error->ier); 357 err_printf(m, "IER: 0x%08x\n", error->ier);
@@ -333,8 +376,10 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
333 if (INTEL_INFO(dev)->gen == 7) 376 if (INTEL_INFO(dev)->gen == 7)
334 err_printf(m, "ERR_INT: 0x%08x\n", error->err_int); 377 err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
335 378
336 for (i = 0; i < ARRAY_SIZE(error->ring); i++) 379 for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
337 i915_ring_error_state(m, dev, error, i); 380 err_printf(m, "%s command stream:\n", ring_str(i));
381 i915_ring_error_state(m, dev, &error->ring[i]);
382 }
338 383
339 if (error->active_bo) 384 if (error->active_bo)
340 print_error_buffers(m, "Active", 385 print_error_buffers(m, "Active",
@@ -349,18 +394,23 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
349 for (i = 0; i < ARRAY_SIZE(error->ring); i++) { 394 for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
350 struct drm_i915_error_object *obj; 395 struct drm_i915_error_object *obj;
351 396
352 if ((obj = error->ring[i].batchbuffer)) { 397 obj = error->ring[i].batchbuffer;
353 err_printf(m, "%s --- gtt_offset = 0x%08x\n", 398 if (obj) {
354 dev_priv->ring[i].name, 399 err_puts(m, dev_priv->ring[i].name);
400 if (error->ring[i].pid != -1)
401 err_printf(m, " (submitted by %s [%d])",
402 error->ring[i].comm,
403 error->ring[i].pid);
404 err_printf(m, " --- gtt_offset = 0x%08x\n",
355 obj->gtt_offset); 405 obj->gtt_offset);
356 offset = 0; 406 print_error_obj(m, obj);
357 for (page = 0; page < obj->page_count; page++) { 407 }
358 for (elt = 0; elt < PAGE_SIZE/4; elt++) { 408
359 err_printf(m, "%08x : %08x\n", offset, 409 obj = error->ring[i].wa_batchbuffer;
360 obj->pages[page][elt]); 410 if (obj) {
361 offset += 4; 411 err_printf(m, "%s (w/a) --- gtt_offset = 0x%08x\n",
362 } 412 dev_priv->ring[i].name, obj->gtt_offset);
363 } 413 print_error_obj(m, obj);
364 } 414 }
365 415
366 if (error->ring[i].num_requests) { 416 if (error->ring[i].num_requests) {
@@ -379,14 +429,22 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
379 err_printf(m, "%s --- ringbuffer = 0x%08x\n", 429 err_printf(m, "%s --- ringbuffer = 0x%08x\n",
380 dev_priv->ring[i].name, 430 dev_priv->ring[i].name,
381 obj->gtt_offset); 431 obj->gtt_offset);
432 print_error_obj(m, obj);
433 }
434
435 if ((obj = error->ring[i].hws_page)) {
436 err_printf(m, "%s --- HW Status = 0x%08x\n",
437 dev_priv->ring[i].name,
438 obj->gtt_offset);
382 offset = 0; 439 offset = 0;
383 for (page = 0; page < obj->page_count; page++) { 440 for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
384 for (elt = 0; elt < PAGE_SIZE/4; elt++) { 441 err_printf(m, "[%04x] %08x %08x %08x %08x\n",
385 err_printf(m, "%08x : %08x\n", 442 offset,
386 offset, 443 obj->pages[0][elt],
387 obj->pages[page][elt]); 444 obj->pages[0][elt+1],
388 offset += 4; 445 obj->pages[0][elt+2],
389 } 446 obj->pages[0][elt+3]);
447 offset += 16;
390 } 448 }
391 } 449 }
392 450
@@ -472,6 +530,7 @@ static void i915_error_state_free(struct kref *error_ref)
472 for (i = 0; i < ARRAY_SIZE(error->ring); i++) { 530 for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
473 i915_error_object_free(error->ring[i].batchbuffer); 531 i915_error_object_free(error->ring[i].batchbuffer);
474 i915_error_object_free(error->ring[i].ringbuffer); 532 i915_error_object_free(error->ring[i].ringbuffer);
533 i915_error_object_free(error->ring[i].hws_page);
475 i915_error_object_free(error->ring[i].ctx); 534 i915_error_object_free(error->ring[i].ctx);
476 kfree(error->ring[i].requests); 535 kfree(error->ring[i].requests);
477 } 536 }
@@ -485,6 +544,7 @@ static void i915_error_state_free(struct kref *error_ref)
485static struct drm_i915_error_object * 544static struct drm_i915_error_object *
486i915_error_object_create_sized(struct drm_i915_private *dev_priv, 545i915_error_object_create_sized(struct drm_i915_private *dev_priv,
487 struct drm_i915_gem_object *src, 546 struct drm_i915_gem_object *src,
547 struct i915_address_space *vm,
488 const int num_pages) 548 const int num_pages)
489{ 549{
490 struct drm_i915_error_object *dst; 550 struct drm_i915_error_object *dst;
@@ -498,7 +558,7 @@ i915_error_object_create_sized(struct drm_i915_private *dev_priv,
498 if (dst == NULL) 558 if (dst == NULL)
499 return NULL; 559 return NULL;
500 560
501 reloc_offset = dst->gtt_offset = i915_gem_obj_ggtt_offset(src); 561 reloc_offset = dst->gtt_offset = i915_gem_obj_offset(src, vm);
502 for (i = 0; i < num_pages; i++) { 562 for (i = 0; i < num_pages; i++) {
503 unsigned long flags; 563 unsigned long flags;
504 void *d; 564 void *d;
@@ -508,8 +568,10 @@ i915_error_object_create_sized(struct drm_i915_private *dev_priv,
508 goto unwind; 568 goto unwind;
509 569
510 local_irq_save(flags); 570 local_irq_save(flags);
511 if (reloc_offset < dev_priv->gtt.mappable_end && 571 if (src->cache_level == I915_CACHE_NONE &&
512 src->has_global_gtt_mapping) { 572 reloc_offset < dev_priv->gtt.mappable_end &&
573 src->has_global_gtt_mapping &&
574 i915_is_ggtt(vm)) {
513 void __iomem *s; 575 void __iomem *s;
514 576
515 /* Simply ignore tiling or any overlapping fence. 577 /* Simply ignore tiling or any overlapping fence.
@@ -559,8 +621,12 @@ unwind:
559 kfree(dst); 621 kfree(dst);
560 return NULL; 622 return NULL;
561} 623}
562#define i915_error_object_create(dev_priv, src) \ 624#define i915_error_object_create(dev_priv, src, vm) \
563 i915_error_object_create_sized((dev_priv), (src), \ 625 i915_error_object_create_sized((dev_priv), (src), (vm), \
626 (src)->base.size>>PAGE_SHIFT)
627
628#define i915_error_ggtt_object_create(dev_priv, src) \
629 i915_error_object_create_sized((dev_priv), (src), &(dev_priv)->gtt.base, \
564 (src)->base.size>>PAGE_SHIFT) 630 (src)->base.size>>PAGE_SHIFT)
565 631
566static void capture_bo(struct drm_i915_error_buffer *err, 632static void capture_bo(struct drm_i915_error_buffer *err,
@@ -575,7 +641,7 @@ static void capture_bo(struct drm_i915_error_buffer *err,
575 err->write_domain = obj->base.write_domain; 641 err->write_domain = obj->base.write_domain;
576 err->fence_reg = obj->fence_reg; 642 err->fence_reg = obj->fence_reg;
577 err->pinned = 0; 643 err->pinned = 0;
578 if (obj->pin_count > 0) 644 if (i915_gem_obj_is_pinned(obj))
579 err->pinned = 1; 645 err->pinned = 1;
580 if (obj->user_pin_count > 0) 646 if (obj->user_pin_count > 0)
581 err->pinned = -1; 647 err->pinned = -1;
@@ -608,7 +674,7 @@ static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
608 int i = 0; 674 int i = 0;
609 675
610 list_for_each_entry(obj, head, global_list) { 676 list_for_each_entry(obj, head, global_list) {
611 if (obj->pin_count == 0) 677 if (!i915_gem_obj_is_pinned(obj))
612 continue; 678 continue;
613 679
614 capture_bo(err++, obj); 680 capture_bo(err++, obj);
@@ -619,6 +685,39 @@ static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
619 return i; 685 return i;
620} 686}
621 687
688/* Generate a semi-unique error code. The code is not meant to have meaning, The
689 * code's only purpose is to try to prevent false duplicated bug reports by
690 * grossly estimating a GPU error state.
691 *
692 * TODO Ideally, hashing the batchbuffer would be a very nice way to determine
693 * the hang if we could strip the GTT offset information from it.
694 *
695 * It's only a small step better than a random number in its current form.
696 */
697static uint32_t i915_error_generate_code(struct drm_i915_private *dev_priv,
698 struct drm_i915_error_state *error,
699 int *ring_id)
700{
701 uint32_t error_code = 0;
702 int i;
703
704 /* IPEHR would be an ideal way to detect errors, as it's the gross
705 * measure of "the command that hung." However, has some very common
706 * synchronization commands which almost always appear in the case
707 * strictly a client bug. Use instdone to differentiate those some.
708 */
709 for (i = 0; i < I915_NUM_RINGS; i++) {
710 if (error->ring[i].hangcheck_action == HANGCHECK_HUNG) {
711 if (ring_id)
712 *ring_id = i;
713
714 return error->ring[i].ipehr ^ error->ring[i].instdone;
715 }
716 }
717
718 return error_code;
719}
720
622static void i915_gem_record_fences(struct drm_device *dev, 721static void i915_gem_record_fences(struct drm_device *dev,
623 struct drm_i915_error_state *error) 722 struct drm_i915_error_state *error)
624{ 723{
@@ -652,107 +751,114 @@ static void i915_gem_record_fences(struct drm_device *dev,
652 } 751 }
653} 752}
654 753
655static struct drm_i915_error_object *
656i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
657 struct intel_ring_buffer *ring)
658{
659 struct i915_address_space *vm;
660 struct i915_vma *vma;
661 struct drm_i915_gem_object *obj;
662 u32 seqno;
663
664 if (!ring->get_seqno)
665 return NULL;
666
667 if (HAS_BROKEN_CS_TLB(dev_priv->dev)) {
668 u32 acthd = I915_READ(ACTHD);
669
670 if (WARN_ON(ring->id != RCS))
671 return NULL;
672
673 obj = ring->scratch.obj;
674 if (obj != NULL &&
675 acthd >= i915_gem_obj_ggtt_offset(obj) &&
676 acthd < i915_gem_obj_ggtt_offset(obj) + obj->base.size)
677 return i915_error_object_create(dev_priv, obj);
678 }
679
680 seqno = ring->get_seqno(ring, false);
681 list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
682 list_for_each_entry(vma, &vm->active_list, mm_list) {
683 obj = vma->obj;
684 if (obj->ring != ring)
685 continue;
686
687 if (i915_seqno_passed(seqno, obj->last_read_seqno))
688 continue;
689
690 if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
691 continue;
692
693 /* We need to copy these to an anonymous buffer as the simplest
694 * method to avoid being overwritten by userspace.
695 */
696 return i915_error_object_create(dev_priv, obj);
697 }
698 }
699
700 return NULL;
701}
702
703static void i915_record_ring_state(struct drm_device *dev, 754static void i915_record_ring_state(struct drm_device *dev,
704 struct drm_i915_error_state *error, 755 struct intel_ring_buffer *ring,
705 struct intel_ring_buffer *ring) 756 struct drm_i915_error_ring *ering)
706{ 757{
707 struct drm_i915_private *dev_priv = dev->dev_private; 758 struct drm_i915_private *dev_priv = dev->dev_private;
708 759
709 if (INTEL_INFO(dev)->gen >= 6) { 760 if (INTEL_INFO(dev)->gen >= 6) {
710 error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50); 761 ering->rc_psmi = I915_READ(ring->mmio_base + 0x50);
711 error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring)); 762 ering->fault_reg = I915_READ(RING_FAULT_REG(ring));
712 error->semaphore_mboxes[ring->id][0] 763 ering->semaphore_mboxes[0]
713 = I915_READ(RING_SYNC_0(ring->mmio_base)); 764 = I915_READ(RING_SYNC_0(ring->mmio_base));
714 error->semaphore_mboxes[ring->id][1] 765 ering->semaphore_mboxes[1]
715 = I915_READ(RING_SYNC_1(ring->mmio_base)); 766 = I915_READ(RING_SYNC_1(ring->mmio_base));
716 error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0]; 767 ering->semaphore_seqno[0] = ring->sync_seqno[0];
717 error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1]; 768 ering->semaphore_seqno[1] = ring->sync_seqno[1];
718 } 769 }
719 770
720 if (HAS_VEBOX(dev)) { 771 if (HAS_VEBOX(dev)) {
721 error->semaphore_mboxes[ring->id][2] = 772 ering->semaphore_mboxes[2] =
722 I915_READ(RING_SYNC_2(ring->mmio_base)); 773 I915_READ(RING_SYNC_2(ring->mmio_base));
723 error->semaphore_seqno[ring->id][2] = ring->sync_seqno[2]; 774 ering->semaphore_seqno[2] = ring->sync_seqno[2];
724 } 775 }
725 776
726 if (INTEL_INFO(dev)->gen >= 4) { 777 if (INTEL_INFO(dev)->gen >= 4) {
727 error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base)); 778 ering->faddr = I915_READ(RING_DMA_FADD(ring->mmio_base));
728 error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base)); 779 ering->ipeir = I915_READ(RING_IPEIR(ring->mmio_base));
729 error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base)); 780 ering->ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
730 error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base)); 781 ering->instdone = I915_READ(RING_INSTDONE(ring->mmio_base));
731 error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base)); 782 ering->instps = I915_READ(RING_INSTPS(ring->mmio_base));
732 error->bbaddr[ring->id] = I915_READ(RING_BBADDR(ring->mmio_base)); 783 ering->bbaddr = I915_READ(RING_BBADDR(ring->mmio_base));
733 if (INTEL_INFO(dev)->gen >= 8) 784 if (INTEL_INFO(dev)->gen >= 8)
734 error->bbaddr[ring->id] |= (u64) I915_READ(RING_BBADDR_UDW(ring->mmio_base)) << 32; 785 ering->bbaddr |= (u64) I915_READ(RING_BBADDR_UDW(ring->mmio_base)) << 32;
735 error->bbstate[ring->id] = I915_READ(RING_BBSTATE(ring->mmio_base)); 786 ering->bbstate = I915_READ(RING_BBSTATE(ring->mmio_base));
736 } else { 787 } else {
737 error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX); 788 ering->faddr = I915_READ(DMA_FADD_I8XX);
738 error->ipeir[ring->id] = I915_READ(IPEIR); 789 ering->ipeir = I915_READ(IPEIR);
739 error->ipehr[ring->id] = I915_READ(IPEHR); 790 ering->ipehr = I915_READ(IPEHR);
740 error->instdone[ring->id] = I915_READ(INSTDONE); 791 ering->instdone = I915_READ(INSTDONE);
792 }
793
794 ering->waiting = waitqueue_active(&ring->irq_queue);
795 ering->instpm = I915_READ(RING_INSTPM(ring->mmio_base));
796 ering->seqno = ring->get_seqno(ring, false);
797 ering->acthd = intel_ring_get_active_head(ring);
798 ering->head = I915_READ_HEAD(ring);
799 ering->tail = I915_READ_TAIL(ring);
800 ering->ctl = I915_READ_CTL(ring);
801
802 if (I915_NEED_GFX_HWS(dev)) {
803 int mmio;
804
805 if (IS_GEN7(dev)) {
806 switch (ring->id) {
807 default:
808 case RCS:
809 mmio = RENDER_HWS_PGA_GEN7;
810 break;
811 case BCS:
812 mmio = BLT_HWS_PGA_GEN7;
813 break;
814 case VCS:
815 mmio = BSD_HWS_PGA_GEN7;
816 break;
817 case VECS:
818 mmio = VEBOX_HWS_PGA_GEN7;
819 break;
820 }
821 } else if (IS_GEN6(ring->dev)) {
822 mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
823 } else {
824 /* XXX: gen8 returns to sanity */
825 mmio = RING_HWS_PGA(ring->mmio_base);
826 }
827
828 ering->hws = I915_READ(mmio);
741 } 829 }
742 830
743 error->waiting[ring->id] = waitqueue_active(&ring->irq_queue); 831 ering->cpu_ring_head = ring->head;
744 error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base)); 832 ering->cpu_ring_tail = ring->tail;
745 error->seqno[ring->id] = ring->get_seqno(ring, false); 833
746 error->acthd[ring->id] = intel_ring_get_active_head(ring); 834 ering->hangcheck_score = ring->hangcheck.score;
747 error->head[ring->id] = I915_READ_HEAD(ring); 835 ering->hangcheck_action = ring->hangcheck.action;
748 error->tail[ring->id] = I915_READ_TAIL(ring); 836
749 error->ctl[ring->id] = I915_READ_CTL(ring); 837 if (USES_PPGTT(dev)) {
838 int i;
750 839
751 error->cpu_ring_head[ring->id] = ring->head; 840 ering->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(ring));
752 error->cpu_ring_tail[ring->id] = ring->tail;
753 841
754 error->hangcheck_score[ring->id] = ring->hangcheck.score; 842 switch (INTEL_INFO(dev)->gen) {
755 error->hangcheck_action[ring->id] = ring->hangcheck.action; 843 case 8:
844 for (i = 0; i < 4; i++) {
845 ering->vm_info.pdp[i] =
846 I915_READ(GEN8_RING_PDP_UDW(ring, i));
847 ering->vm_info.pdp[i] <<= 32;
848 ering->vm_info.pdp[i] |=
849 I915_READ(GEN8_RING_PDP_LDW(ring, i));
850 }
851 break;
852 case 7:
853 ering->vm_info.pp_dir_base =
854 I915_READ(RING_PP_DIR_BASE(ring));
855 break;
856 case 6:
857 ering->vm_info.pp_dir_base =
858 I915_READ(RING_PP_DIR_BASE_READ(ring));
859 break;
860 }
861 }
756} 862}
757 863
758 864
@@ -770,7 +876,9 @@ static void i915_gem_record_active_context(struct intel_ring_buffer *ring,
770 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { 876 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
771 if ((error->ccid & PAGE_MASK) == i915_gem_obj_ggtt_offset(obj)) { 877 if ((error->ccid & PAGE_MASK) == i915_gem_obj_ggtt_offset(obj)) {
772 ering->ctx = i915_error_object_create_sized(dev_priv, 878 ering->ctx = i915_error_object_create_sized(dev_priv,
773 obj, 1); 879 obj,
880 &dev_priv->gtt.base,
881 1);
774 break; 882 break;
775 } 883 }
776 } 884 }
@@ -791,14 +899,48 @@ static void i915_gem_record_rings(struct drm_device *dev,
791 899
792 error->ring[i].valid = true; 900 error->ring[i].valid = true;
793 901
794 i915_record_ring_state(dev, error, ring); 902 i915_record_ring_state(dev, ring, &error->ring[i]);
795 903
796 error->ring[i].batchbuffer = 904 error->ring[i].pid = -1;
797 i915_error_first_batchbuffer(dev_priv, ring); 905 request = i915_gem_find_active_request(ring);
906 if (request) {
907 /* We need to copy these to an anonymous buffer
908 * as the simplest method to avoid being overwritten
909 * by userspace.
910 */
911 error->ring[i].batchbuffer =
912 i915_error_object_create(dev_priv,
913 request->batch_obj,
914 request->ctx ?
915 request->ctx->vm :
916 &dev_priv->gtt.base);
917
918 if (HAS_BROKEN_CS_TLB(dev_priv->dev) &&
919 ring->scratch.obj)
920 error->ring[i].wa_batchbuffer =
921 i915_error_ggtt_object_create(dev_priv,
922 ring->scratch.obj);
923
924 if (request->file_priv) {
925 struct task_struct *task;
926
927 rcu_read_lock();
928 task = pid_task(request->file_priv->file->pid,
929 PIDTYPE_PID);
930 if (task) {
931 strcpy(error->ring[i].comm, task->comm);
932 error->ring[i].pid = task->pid;
933 }
934 rcu_read_unlock();
935 }
936 }
798 937
799 error->ring[i].ringbuffer = 938 error->ring[i].ringbuffer =
800 i915_error_object_create(dev_priv, ring->obj); 939 i915_error_ggtt_object_create(dev_priv, ring->obj);
801 940
941 if (ring->status_page.obj)
942 error->ring[i].hws_page =
943 i915_error_ggtt_object_create(dev_priv, ring->status_page.obj);
802 944
803 i915_gem_record_active_context(ring, error, &error->ring[i]); 945 i915_gem_record_active_context(ring, error, &error->ring[i]);
804 946
@@ -845,7 +987,7 @@ static void i915_gem_capture_vm(struct drm_i915_private *dev_priv,
845 i++; 987 i++;
846 error->active_bo_count[ndx] = i; 988 error->active_bo_count[ndx] = i;
847 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) 989 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
848 if (obj->pin_count) 990 if (i915_gem_obj_is_pinned(obj))
849 i++; 991 i++;
850 error->pinned_bo_count[ndx] = i - error->active_bo_count[ndx]; 992 error->pinned_bo_count[ndx] = i - error->active_bo_count[ndx];
851 993
@@ -879,11 +1021,6 @@ static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv,
879 list_for_each_entry(vm, &dev_priv->vm_list, global_link) 1021 list_for_each_entry(vm, &dev_priv->vm_list, global_link)
880 cnt++; 1022 cnt++;
881 1023
882 if (WARN(cnt > 1, "Multiple VMs not yet supported\n"))
883 cnt = 1;
884
885 vm = &dev_priv->gtt.base;
886
887 error->active_bo = kcalloc(cnt, sizeof(*error->active_bo), GFP_ATOMIC); 1024 error->active_bo = kcalloc(cnt, sizeof(*error->active_bo), GFP_ATOMIC);
888 error->pinned_bo = kcalloc(cnt, sizeof(*error->pinned_bo), GFP_ATOMIC); 1025 error->pinned_bo = kcalloc(cnt, sizeof(*error->pinned_bo), GFP_ATOMIC);
889 error->active_bo_count = kcalloc(cnt, sizeof(*error->active_bo_count), 1026 error->active_bo_count = kcalloc(cnt, sizeof(*error->active_bo_count),
@@ -895,6 +1032,108 @@ static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv,
895 i915_gem_capture_vm(dev_priv, error, vm, i++); 1032 i915_gem_capture_vm(dev_priv, error, vm, i++);
896} 1033}
897 1034
1035/* Capture all registers which don't fit into another category. */
1036static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
1037 struct drm_i915_error_state *error)
1038{
1039 struct drm_device *dev = dev_priv->dev;
1040 int pipe;
1041
1042 /* General organization
1043 * 1. Registers specific to a single generation
1044 * 2. Registers which belong to multiple generations
1045 * 3. Feature specific registers.
1046 * 4. Everything else
1047 * Please try to follow the order.
1048 */
1049
1050 /* 1: Registers specific to a single generation */
1051 if (IS_VALLEYVIEW(dev)) {
1052 error->ier = I915_READ(GTIER) | I915_READ(VLV_IER);
1053 error->forcewake = I915_READ(FORCEWAKE_VLV);
1054 }
1055
1056 if (IS_GEN7(dev))
1057 error->err_int = I915_READ(GEN7_ERR_INT);
1058
1059 if (IS_GEN6(dev)) {
1060 error->forcewake = I915_READ(FORCEWAKE);
1061 error->gab_ctl = I915_READ(GAB_CTL);
1062 error->gfx_mode = I915_READ(GFX_MODE);
1063 }
1064
1065 if (IS_GEN2(dev))
1066 error->ier = I915_READ16(IER);
1067
1068 /* 2: Registers which belong to multiple generations */
1069 if (INTEL_INFO(dev)->gen >= 7)
1070 error->forcewake = I915_READ(FORCEWAKE_MT);
1071
1072 if (INTEL_INFO(dev)->gen >= 6) {
1073 error->derrmr = I915_READ(DERRMR);
1074 error->error = I915_READ(ERROR_GEN6);
1075 error->done_reg = I915_READ(DONE_REG);
1076 }
1077
1078 /* 3: Feature specific registers */
1079 if (IS_GEN6(dev) || IS_GEN7(dev)) {
1080 error->gam_ecochk = I915_READ(GAM_ECOCHK);
1081 error->gac_eco = I915_READ(GAC_ECO_BITS);
1082 }
1083
1084 /* 4: Everything else */
1085 if (HAS_HW_CONTEXTS(dev))
1086 error->ccid = I915_READ(CCID);
1087
1088 if (HAS_PCH_SPLIT(dev))
1089 error->ier = I915_READ(DEIER) | I915_READ(GTIER);
1090 else {
1091 error->ier = I915_READ(IER);
1092 for_each_pipe(pipe)
1093 error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
1094 }
1095
1096 /* 4: Everything else */
1097 error->eir = I915_READ(EIR);
1098 error->pgtbl_er = I915_READ(PGTBL_ER);
1099
1100 i915_get_extra_instdone(dev, error->extra_instdone);
1101}
1102
1103static void i915_error_capture_msg(struct drm_device *dev,
1104 struct drm_i915_error_state *error,
1105 bool wedged,
1106 const char *error_msg)
1107{
1108 struct drm_i915_private *dev_priv = dev->dev_private;
1109 u32 ecode;
1110 int ring_id = -1, len;
1111
1112 ecode = i915_error_generate_code(dev_priv, error, &ring_id);
1113
1114 len = scnprintf(error->error_msg, sizeof(error->error_msg),
1115 "GPU HANG: ecode %d:0x%08x", ring_id, ecode);
1116
1117 if (ring_id != -1 && error->ring[ring_id].pid != -1)
1118 len += scnprintf(error->error_msg + len,
1119 sizeof(error->error_msg) - len,
1120 ", in %s [%d]",
1121 error->ring[ring_id].comm,
1122 error->ring[ring_id].pid);
1123
1124 scnprintf(error->error_msg + len, sizeof(error->error_msg) - len,
1125 ", reason: %s, action: %s",
1126 error_msg,
1127 wedged ? "reset" : "continue");
1128}
1129
1130static void i915_capture_gen_state(struct drm_i915_private *dev_priv,
1131 struct drm_i915_error_state *error)
1132{
1133 error->reset_count = i915_reset_count(&dev_priv->gpu_error);
1134 error->suspend_count = dev_priv->suspend_count;
1135}
1136
898/** 1137/**
899 * i915_capture_error_state - capture an error record for later analysis 1138 * i915_capture_error_state - capture an error record for later analysis
900 * @dev: drm device 1139 * @dev: drm device
@@ -904,18 +1143,13 @@ static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv,
904 * out a structure which becomes available in debugfs for user level tools 1143 * out a structure which becomes available in debugfs for user level tools
905 * to pick up. 1144 * to pick up.
906 */ 1145 */
907void i915_capture_error_state(struct drm_device *dev) 1146void i915_capture_error_state(struct drm_device *dev, bool wedged,
1147 const char *error_msg)
908{ 1148{
1149 static bool warned;
909 struct drm_i915_private *dev_priv = dev->dev_private; 1150 struct drm_i915_private *dev_priv = dev->dev_private;
910 struct drm_i915_error_state *error; 1151 struct drm_i915_error_state *error;
911 unsigned long flags; 1152 unsigned long flags;
912 int pipe;
913
914 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
915 error = dev_priv->gpu_error.first_error;
916 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
917 if (error)
918 return;
919 1153
920 /* Account for pipe specific data like PIPE*STAT */ 1154 /* Account for pipe specific data like PIPE*STAT */
921 error = kzalloc(sizeof(*error), GFP_ATOMIC); 1155 error = kzalloc(sizeof(*error), GFP_ATOMIC);
@@ -924,52 +1158,10 @@ void i915_capture_error_state(struct drm_device *dev)
924 return; 1158 return;
925 } 1159 }
926 1160
927 DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n",
928 dev->primary->index);
929 DRM_INFO("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
930 DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
931 DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
932 DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n");
933
934 kref_init(&error->ref); 1161 kref_init(&error->ref);
935 error->eir = I915_READ(EIR);
936 error->pgtbl_er = I915_READ(PGTBL_ER);
937 if (HAS_HW_CONTEXTS(dev))
938 error->ccid = I915_READ(CCID);
939
940 if (HAS_PCH_SPLIT(dev))
941 error->ier = I915_READ(DEIER) | I915_READ(GTIER);
942 else if (IS_VALLEYVIEW(dev))
943 error->ier = I915_READ(GTIER) | I915_READ(VLV_IER);
944 else if (IS_GEN2(dev))
945 error->ier = I915_READ16(IER);
946 else
947 error->ier = I915_READ(IER);
948
949 if (INTEL_INFO(dev)->gen >= 6)
950 error->derrmr = I915_READ(DERRMR);
951
952 if (IS_VALLEYVIEW(dev))
953 error->forcewake = I915_READ(FORCEWAKE_VLV);
954 else if (INTEL_INFO(dev)->gen >= 7)
955 error->forcewake = I915_READ(FORCEWAKE_MT);
956 else if (INTEL_INFO(dev)->gen == 6)
957 error->forcewake = I915_READ(FORCEWAKE);
958
959 if (!HAS_PCH_SPLIT(dev))
960 for_each_pipe(pipe)
961 error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
962
963 if (INTEL_INFO(dev)->gen >= 6) {
964 error->error = I915_READ(ERROR_GEN6);
965 error->done_reg = I915_READ(DONE_REG);
966 }
967
968 if (INTEL_INFO(dev)->gen == 7)
969 error->err_int = I915_READ(GEN7_ERR_INT);
970
971 i915_get_extra_instdone(dev, error->extra_instdone);
972 1162
1163 i915_capture_gen_state(dev_priv, error);
1164 i915_capture_reg_state(dev_priv, error);
973 i915_gem_capture_buffers(dev_priv, error); 1165 i915_gem_capture_buffers(dev_priv, error);
974 i915_gem_record_fences(dev, error); 1166 i915_gem_record_fences(dev, error);
975 i915_gem_record_rings(dev, error); 1167 i915_gem_record_rings(dev, error);
@@ -979,6 +1171,9 @@ void i915_capture_error_state(struct drm_device *dev)
979 error->overlay = intel_overlay_capture_error_state(dev); 1171 error->overlay = intel_overlay_capture_error_state(dev);
980 error->display = intel_display_capture_error_state(dev); 1172 error->display = intel_display_capture_error_state(dev);
981 1173
1174 i915_error_capture_msg(dev, error, wedged, error_msg);
1175 DRM_INFO("%s\n", error->error_msg);
1176
982 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); 1177 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
983 if (dev_priv->gpu_error.first_error == NULL) { 1178 if (dev_priv->gpu_error.first_error == NULL) {
984 dev_priv->gpu_error.first_error = error; 1179 dev_priv->gpu_error.first_error = error;
@@ -986,8 +1181,19 @@ void i915_capture_error_state(struct drm_device *dev)
986 } 1181 }
987 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags); 1182 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
988 1183
989 if (error) 1184 if (error) {
990 i915_error_state_free(&error->ref); 1185 i915_error_state_free(&error->ref);
1186 return;
1187 }
1188
1189 if (!warned) {
1190 DRM_INFO("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
1191 DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
1192 DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
1193 DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n");
1194 DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n", dev->primary->index);
1195 warned = true;
1196 }
991} 1197}
992 1198
993void i915_error_state_get(struct drm_device *dev, 1199void i915_error_state_get(struct drm_device *dev,
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index d554169ac592..7753249b3a95 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -82,13 +82,13 @@ static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */
82 82
83/* For display hotplug interrupt */ 83/* For display hotplug interrupt */
84static void 84static void
85ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask) 85ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
86{ 86{
87 assert_spin_locked(&dev_priv->irq_lock); 87 assert_spin_locked(&dev_priv->irq_lock);
88 88
89 if (dev_priv->pc8.irqs_disabled) { 89 if (dev_priv->pm.irqs_disabled) {
90 WARN(1, "IRQs disabled\n"); 90 WARN(1, "IRQs disabled\n");
91 dev_priv->pc8.regsave.deimr &= ~mask; 91 dev_priv->pm.regsave.deimr &= ~mask;
92 return; 92 return;
93 } 93 }
94 94
@@ -100,13 +100,13 @@ ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
100} 100}
101 101
102static void 102static void
103ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask) 103ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
104{ 104{
105 assert_spin_locked(&dev_priv->irq_lock); 105 assert_spin_locked(&dev_priv->irq_lock);
106 106
107 if (dev_priv->pc8.irqs_disabled) { 107 if (dev_priv->pm.irqs_disabled) {
108 WARN(1, "IRQs disabled\n"); 108 WARN(1, "IRQs disabled\n");
109 dev_priv->pc8.regsave.deimr |= mask; 109 dev_priv->pm.regsave.deimr |= mask;
110 return; 110 return;
111 } 111 }
112 112
@@ -129,10 +129,10 @@ static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
129{ 129{
130 assert_spin_locked(&dev_priv->irq_lock); 130 assert_spin_locked(&dev_priv->irq_lock);
131 131
132 if (dev_priv->pc8.irqs_disabled) { 132 if (dev_priv->pm.irqs_disabled) {
133 WARN(1, "IRQs disabled\n"); 133 WARN(1, "IRQs disabled\n");
134 dev_priv->pc8.regsave.gtimr &= ~interrupt_mask; 134 dev_priv->pm.regsave.gtimr &= ~interrupt_mask;
135 dev_priv->pc8.regsave.gtimr |= (~enabled_irq_mask & 135 dev_priv->pm.regsave.gtimr |= (~enabled_irq_mask &
136 interrupt_mask); 136 interrupt_mask);
137 return; 137 return;
138 } 138 }
@@ -167,10 +167,10 @@ static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
167 167
168 assert_spin_locked(&dev_priv->irq_lock); 168 assert_spin_locked(&dev_priv->irq_lock);
169 169
170 if (dev_priv->pc8.irqs_disabled) { 170 if (dev_priv->pm.irqs_disabled) {
171 WARN(1, "IRQs disabled\n"); 171 WARN(1, "IRQs disabled\n");
172 dev_priv->pc8.regsave.gen6_pmimr &= ~interrupt_mask; 172 dev_priv->pm.regsave.gen6_pmimr &= ~interrupt_mask;
173 dev_priv->pc8.regsave.gen6_pmimr |= (~enabled_irq_mask & 173 dev_priv->pm.regsave.gen6_pmimr |= (~enabled_irq_mask &
174 interrupt_mask); 174 interrupt_mask);
175 return; 175 return;
176 } 176 }
@@ -232,6 +232,18 @@ static bool cpt_can_enable_serr_int(struct drm_device *dev)
232 return true; 232 return true;
233} 233}
234 234
235static void i9xx_clear_fifo_underrun(struct drm_device *dev, enum pipe pipe)
236{
237 struct drm_i915_private *dev_priv = dev->dev_private;
238 u32 reg = PIPESTAT(pipe);
239 u32 pipestat = I915_READ(reg) & 0x7fff0000;
240
241 assert_spin_locked(&dev_priv->irq_lock);
242
243 I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS);
244 POSTING_READ(reg);
245}
246
235static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev, 247static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
236 enum pipe pipe, bool enable) 248 enum pipe pipe, bool enable)
237{ 249{
@@ -301,11 +313,11 @@ static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
301 313
302 assert_spin_locked(&dev_priv->irq_lock); 314 assert_spin_locked(&dev_priv->irq_lock);
303 315
304 if (dev_priv->pc8.irqs_disabled && 316 if (dev_priv->pm.irqs_disabled &&
305 (interrupt_mask & SDE_HOTPLUG_MASK_CPT)) { 317 (interrupt_mask & SDE_HOTPLUG_MASK_CPT)) {
306 WARN(1, "IRQs disabled\n"); 318 WARN(1, "IRQs disabled\n");
307 dev_priv->pc8.regsave.sdeimr &= ~interrupt_mask; 319 dev_priv->pm.regsave.sdeimr &= ~interrupt_mask;
308 dev_priv->pc8.regsave.sdeimr |= (~enabled_irq_mask & 320 dev_priv->pm.regsave.sdeimr |= (~enabled_irq_mask &
309 interrupt_mask); 321 interrupt_mask);
310 return; 322 return;
311 } 323 }
@@ -375,16 +387,15 @@ static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
375 * 387 *
376 * Returns the previous state of underrun reporting. 388 * Returns the previous state of underrun reporting.
377 */ 389 */
378bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev, 390bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
379 enum pipe pipe, bool enable) 391 enum pipe pipe, bool enable)
380{ 392{
381 struct drm_i915_private *dev_priv = dev->dev_private; 393 struct drm_i915_private *dev_priv = dev->dev_private;
382 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 394 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
383 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 395 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
384 unsigned long flags;
385 bool ret; 396 bool ret;
386 397
387 spin_lock_irqsave(&dev_priv->irq_lock, flags); 398 assert_spin_locked(&dev_priv->irq_lock);
388 399
389 ret = !intel_crtc->cpu_fifo_underrun_disabled; 400 ret = !intel_crtc->cpu_fifo_underrun_disabled;
390 401
@@ -393,7 +404,9 @@ bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
393 404
394 intel_crtc->cpu_fifo_underrun_disabled = !enable; 405 intel_crtc->cpu_fifo_underrun_disabled = !enable;
395 406
396 if (IS_GEN5(dev) || IS_GEN6(dev)) 407 if (enable && (INTEL_INFO(dev)->gen < 5 || IS_VALLEYVIEW(dev)))
408 i9xx_clear_fifo_underrun(dev, pipe);
409 else if (IS_GEN5(dev) || IS_GEN6(dev))
397 ironlake_set_fifo_underrun_reporting(dev, pipe, enable); 410 ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
398 else if (IS_GEN7(dev)) 411 else if (IS_GEN7(dev))
399 ivybridge_set_fifo_underrun_reporting(dev, pipe, enable); 412 ivybridge_set_fifo_underrun_reporting(dev, pipe, enable);
@@ -401,10 +414,33 @@ bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
401 broadwell_set_fifo_underrun_reporting(dev, pipe, enable); 414 broadwell_set_fifo_underrun_reporting(dev, pipe, enable);
402 415
403done: 416done:
417 return ret;
418}
419
420bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
421 enum pipe pipe, bool enable)
422{
423 struct drm_i915_private *dev_priv = dev->dev_private;
424 unsigned long flags;
425 bool ret;
426
427 spin_lock_irqsave(&dev_priv->irq_lock, flags);
428 ret = __intel_set_cpu_fifo_underrun_reporting(dev, pipe, enable);
404 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 429 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
430
405 return ret; 431 return ret;
406} 432}
407 433
434static bool __cpu_fifo_underrun_reporting_enabled(struct drm_device *dev,
435 enum pipe pipe)
436{
437 struct drm_i915_private *dev_priv = dev->dev_private;
438 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
439 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
440
441 return !intel_crtc->cpu_fifo_underrun_disabled;
442}
443
408/** 444/**
409 * intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages 445 * intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages
410 * @dev: drm device 446 * @dev: drm device
@@ -458,45 +494,109 @@ done:
458} 494}
459 495
460 496
461void 497static void
462i915_enable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask) 498__i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
499 u32 enable_mask, u32 status_mask)
463{ 500{
464 u32 reg = PIPESTAT(pipe); 501 u32 reg = PIPESTAT(pipe);
465 u32 pipestat = I915_READ(reg) & 0x7fff0000; 502 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
466 503
467 assert_spin_locked(&dev_priv->irq_lock); 504 assert_spin_locked(&dev_priv->irq_lock);
468 505
469 if ((pipestat & mask) == mask) 506 if (WARN_ON_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
507 status_mask & ~PIPESTAT_INT_STATUS_MASK))
470 return; 508 return;
471 509
510 if ((pipestat & enable_mask) == enable_mask)
511 return;
512
513 dev_priv->pipestat_irq_mask[pipe] |= status_mask;
514
472 /* Enable the interrupt, clear any pending status */ 515 /* Enable the interrupt, clear any pending status */
473 pipestat |= mask | (mask >> 16); 516 pipestat |= enable_mask | status_mask;
474 I915_WRITE(reg, pipestat); 517 I915_WRITE(reg, pipestat);
475 POSTING_READ(reg); 518 POSTING_READ(reg);
476} 519}
477 520
478void 521static void
479i915_disable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask) 522__i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
523 u32 enable_mask, u32 status_mask)
480{ 524{
481 u32 reg = PIPESTAT(pipe); 525 u32 reg = PIPESTAT(pipe);
482 u32 pipestat = I915_READ(reg) & 0x7fff0000; 526 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
483 527
484 assert_spin_locked(&dev_priv->irq_lock); 528 assert_spin_locked(&dev_priv->irq_lock);
485 529
486 if ((pipestat & mask) == 0) 530 if (WARN_ON_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
531 status_mask & ~PIPESTAT_INT_STATUS_MASK))
532 return;
533
534 if ((pipestat & enable_mask) == 0)
487 return; 535 return;
488 536
489 pipestat &= ~mask; 537 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
538
539 pipestat &= ~enable_mask;
490 I915_WRITE(reg, pipestat); 540 I915_WRITE(reg, pipestat);
491 POSTING_READ(reg); 541 POSTING_READ(reg);
492} 542}
493 543
544static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask)
545{
546 u32 enable_mask = status_mask << 16;
547
548 /*
549 * On pipe A we don't support the PSR interrupt yet, on pipe B the
550 * same bit MBZ.
551 */
552 if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
553 return 0;
554
555 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
556 SPRITE0_FLIP_DONE_INT_EN_VLV |
557 SPRITE1_FLIP_DONE_INT_EN_VLV);
558 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
559 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
560 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
561 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
562
563 return enable_mask;
564}
565
566void
567i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
568 u32 status_mask)
569{
570 u32 enable_mask;
571
572 if (IS_VALLEYVIEW(dev_priv->dev))
573 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
574 status_mask);
575 else
576 enable_mask = status_mask << 16;
577 __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask);
578}
579
580void
581i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
582 u32 status_mask)
583{
584 u32 enable_mask;
585
586 if (IS_VALLEYVIEW(dev_priv->dev))
587 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
588 status_mask);
589 else
590 enable_mask = status_mask << 16;
591 __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask);
592}
593
494/** 594/**
495 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion 595 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
496 */ 596 */
497static void i915_enable_asle_pipestat(struct drm_device *dev) 597static void i915_enable_asle_pipestat(struct drm_device *dev)
498{ 598{
499 drm_i915_private_t *dev_priv = dev->dev_private; 599 struct drm_i915_private *dev_priv = dev->dev_private;
500 unsigned long irqflags; 600 unsigned long irqflags;
501 601
502 if (!dev_priv->opregion.asle || !IS_MOBILE(dev)) 602 if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
@@ -504,10 +604,10 @@ static void i915_enable_asle_pipestat(struct drm_device *dev)
504 604
505 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 605 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
506 606
507 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_ENABLE); 607 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
508 if (INTEL_INFO(dev)->gen >= 4) 608 if (INTEL_INFO(dev)->gen >= 4)
509 i915_enable_pipestat(dev_priv, PIPE_A, 609 i915_enable_pipestat(dev_priv, PIPE_A,
510 PIPE_LEGACY_BLC_EVENT_ENABLE); 610 PIPE_LEGACY_BLC_EVENT_STATUS);
511 611
512 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 612 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
513} 613}
@@ -524,7 +624,7 @@ static void i915_enable_asle_pipestat(struct drm_device *dev)
524static int 624static int
525i915_pipe_enabled(struct drm_device *dev, int pipe) 625i915_pipe_enabled(struct drm_device *dev, int pipe)
526{ 626{
527 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 627 struct drm_i915_private *dev_priv = dev->dev_private;
528 628
529 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 629 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
530 /* Locking is horribly broken here, but whatever. */ 630 /* Locking is horribly broken here, but whatever. */
@@ -548,7 +648,7 @@ static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe)
548 */ 648 */
549static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) 649static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
550{ 650{
551 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 651 struct drm_i915_private *dev_priv = dev->dev_private;
552 unsigned long high_frame; 652 unsigned long high_frame;
553 unsigned long low_frame; 653 unsigned long low_frame;
554 u32 high1, high2, low, pixel, vbl_start; 654 u32 high1, high2, low, pixel, vbl_start;
@@ -604,7 +704,7 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
604 704
605static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) 705static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
606{ 706{
607 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 707 struct drm_i915_private *dev_priv = dev->dev_private;
608 int reg = PIPE_FRMCOUNT_GM45(pipe); 708 int reg = PIPE_FRMCOUNT_GM45(pipe);
609 709
610 if (!i915_pipe_enabled(dev, pipe)) { 710 if (!i915_pipe_enabled(dev, pipe)) {
@@ -859,8 +959,8 @@ static bool intel_hpd_irq_event(struct drm_device *dev,
859 959
860static void i915_hotplug_work_func(struct work_struct *work) 960static void i915_hotplug_work_func(struct work_struct *work)
861{ 961{
862 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 962 struct drm_i915_private *dev_priv =
863 hotplug_work); 963 container_of(work, struct drm_i915_private, hotplug_work);
864 struct drm_device *dev = dev_priv->dev; 964 struct drm_device *dev = dev_priv->dev;
865 struct drm_mode_config *mode_config = &dev->mode_config; 965 struct drm_mode_config *mode_config = &dev->mode_config;
866 struct intel_connector *intel_connector; 966 struct intel_connector *intel_connector;
@@ -928,9 +1028,14 @@ static void i915_hotplug_work_func(struct work_struct *work)
928 drm_kms_helper_hotplug_event(dev); 1028 drm_kms_helper_hotplug_event(dev);
929} 1029}
930 1030
1031static void intel_hpd_irq_uninstall(struct drm_i915_private *dev_priv)
1032{
1033 del_timer_sync(&dev_priv->hotplug_reenable_timer);
1034}
1035
931static void ironlake_rps_change_irq_handler(struct drm_device *dev) 1036static void ironlake_rps_change_irq_handler(struct drm_device *dev)
932{ 1037{
933 drm_i915_private_t *dev_priv = dev->dev_private; 1038 struct drm_i915_private *dev_priv = dev->dev_private;
934 u32 busy_up, busy_down, max_avg, min_avg; 1039 u32 busy_up, busy_down, max_avg, min_avg;
935 u8 new_delay; 1040 u8 new_delay;
936 1041
@@ -981,8 +1086,8 @@ static void notify_ring(struct drm_device *dev,
981 1086
982static void gen6_pm_rps_work(struct work_struct *work) 1087static void gen6_pm_rps_work(struct work_struct *work)
983{ 1088{
984 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 1089 struct drm_i915_private *dev_priv =
985 rps.work); 1090 container_of(work, struct drm_i915_private, rps.work);
986 u32 pm_iir; 1091 u32 pm_iir;
987 int new_delay, adj; 1092 int new_delay, adj;
988 1093
@@ -990,13 +1095,13 @@ static void gen6_pm_rps_work(struct work_struct *work)
990 pm_iir = dev_priv->rps.pm_iir; 1095 pm_iir = dev_priv->rps.pm_iir;
991 dev_priv->rps.pm_iir = 0; 1096 dev_priv->rps.pm_iir = 0;
992 /* Make sure not to corrupt PMIMR state used by ringbuffer code */ 1097 /* Make sure not to corrupt PMIMR state used by ringbuffer code */
993 snb_enable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS); 1098 snb_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
994 spin_unlock_irq(&dev_priv->irq_lock); 1099 spin_unlock_irq(&dev_priv->irq_lock);
995 1100
996 /* Make sure we didn't queue anything we're not going to process. */ 1101 /* Make sure we didn't queue anything we're not going to process. */
997 WARN_ON(pm_iir & ~GEN6_PM_RPS_EVENTS); 1102 WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
998 1103
999 if ((pm_iir & GEN6_PM_RPS_EVENTS) == 0) 1104 if ((pm_iir & dev_priv->pm_rps_events) == 0)
1000 return; 1105 return;
1001 1106
1002 mutex_lock(&dev_priv->rps.hw_lock); 1107 mutex_lock(&dev_priv->rps.hw_lock);
@@ -1007,36 +1112,38 @@ static void gen6_pm_rps_work(struct work_struct *work)
1007 adj *= 2; 1112 adj *= 2;
1008 else 1113 else
1009 adj = 1; 1114 adj = 1;
1010 new_delay = dev_priv->rps.cur_delay + adj; 1115 new_delay = dev_priv->rps.cur_freq + adj;
1011 1116
1012 /* 1117 /*
1013 * For better performance, jump directly 1118 * For better performance, jump directly
1014 * to RPe if we're below it. 1119 * to RPe if we're below it.
1015 */ 1120 */
1016 if (new_delay < dev_priv->rps.rpe_delay) 1121 if (new_delay < dev_priv->rps.efficient_freq)
1017 new_delay = dev_priv->rps.rpe_delay; 1122 new_delay = dev_priv->rps.efficient_freq;
1018 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) { 1123 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
1019 if (dev_priv->rps.cur_delay > dev_priv->rps.rpe_delay) 1124 if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
1020 new_delay = dev_priv->rps.rpe_delay; 1125 new_delay = dev_priv->rps.efficient_freq;
1021 else 1126 else
1022 new_delay = dev_priv->rps.min_delay; 1127 new_delay = dev_priv->rps.min_freq_softlimit;
1023 adj = 0; 1128 adj = 0;
1024 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { 1129 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1025 if (adj < 0) 1130 if (adj < 0)
1026 adj *= 2; 1131 adj *= 2;
1027 else 1132 else
1028 adj = -1; 1133 adj = -1;
1029 new_delay = dev_priv->rps.cur_delay + adj; 1134 new_delay = dev_priv->rps.cur_freq + adj;
1030 } else { /* unknown event */ 1135 } else { /* unknown event */
1031 new_delay = dev_priv->rps.cur_delay; 1136 new_delay = dev_priv->rps.cur_freq;
1032 } 1137 }
1033 1138
1034 /* sysfs frequency interfaces may have snuck in while servicing the 1139 /* sysfs frequency interfaces may have snuck in while servicing the
1035 * interrupt 1140 * interrupt
1036 */ 1141 */
1037 new_delay = clamp_t(int, new_delay, 1142 new_delay = clamp_t(int, new_delay,
1038 dev_priv->rps.min_delay, dev_priv->rps.max_delay); 1143 dev_priv->rps.min_freq_softlimit,
1039 dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_delay; 1144 dev_priv->rps.max_freq_softlimit);
1145
1146 dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_freq;
1040 1147
1041 if (IS_VALLEYVIEW(dev_priv->dev)) 1148 if (IS_VALLEYVIEW(dev_priv->dev))
1042 valleyview_set_rps(dev_priv->dev, new_delay); 1149 valleyview_set_rps(dev_priv->dev, new_delay);
@@ -1058,8 +1165,8 @@ static void gen6_pm_rps_work(struct work_struct *work)
1058 */ 1165 */
1059static void ivybridge_parity_work(struct work_struct *work) 1166static void ivybridge_parity_work(struct work_struct *work)
1060{ 1167{
1061 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 1168 struct drm_i915_private *dev_priv =
1062 l3_parity.error_work); 1169 container_of(work, struct drm_i915_private, l3_parity.error_work);
1063 u32 error_status, row, bank, subbank; 1170 u32 error_status, row, bank, subbank;
1064 char *parity_event[6]; 1171 char *parity_event[6];
1065 uint32_t misccpctl; 1172 uint32_t misccpctl;
@@ -1131,7 +1238,7 @@ out:
1131 1238
1132static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir) 1239static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
1133{ 1240{
1134 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1241 struct drm_i915_private *dev_priv = dev->dev_private;
1135 1242
1136 if (!HAS_L3_DPF(dev)) 1243 if (!HAS_L3_DPF(dev))
1137 return; 1244 return;
@@ -1177,8 +1284,8 @@ static void snb_gt_irq_handler(struct drm_device *dev,
1177 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT | 1284 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
1178 GT_BSD_CS_ERROR_INTERRUPT | 1285 GT_BSD_CS_ERROR_INTERRUPT |
1179 GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) { 1286 GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) {
1180 DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir); 1287 i915_handle_error(dev, false, "GT error interrupt 0x%08x",
1181 i915_handle_error(dev, false); 1288 gt_iir);
1182 } 1289 }
1183 1290
1184 if (gt_iir & GT_PARITY_ERROR(dev)) 1291 if (gt_iir & GT_PARITY_ERROR(dev))
@@ -1242,13 +1349,16 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev,
1242 u32 hotplug_trigger, 1349 u32 hotplug_trigger,
1243 const u32 *hpd) 1350 const u32 *hpd)
1244{ 1351{
1245 drm_i915_private_t *dev_priv = dev->dev_private; 1352 struct drm_i915_private *dev_priv = dev->dev_private;
1246 int i; 1353 int i;
1247 bool storm_detected = false; 1354 bool storm_detected = false;
1248 1355
1249 if (!hotplug_trigger) 1356 if (!hotplug_trigger)
1250 return; 1357 return;
1251 1358
1359 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
1360 hotplug_trigger);
1361
1252 spin_lock(&dev_priv->irq_lock); 1362 spin_lock(&dev_priv->irq_lock);
1253 for (i = 1; i < HPD_NUM_PINS; i++) { 1363 for (i = 1; i < HPD_NUM_PINS; i++) {
1254 1364
@@ -1295,14 +1405,14 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev,
1295 1405
1296static void gmbus_irq_handler(struct drm_device *dev) 1406static void gmbus_irq_handler(struct drm_device *dev)
1297{ 1407{
1298 struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private; 1408 struct drm_i915_private *dev_priv = dev->dev_private;
1299 1409
1300 wake_up_all(&dev_priv->gmbus_wait_queue); 1410 wake_up_all(&dev_priv->gmbus_wait_queue);
1301} 1411}
1302 1412
1303static void dp_aux_irq_handler(struct drm_device *dev) 1413static void dp_aux_irq_handler(struct drm_device *dev)
1304{ 1414{
1305 struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private; 1415 struct drm_i915_private *dev_priv = dev->dev_private;
1306 1416
1307 wake_up_all(&dev_priv->gmbus_wait_queue); 1417 wake_up_all(&dev_priv->gmbus_wait_queue);
1308} 1418}
@@ -1408,10 +1518,10 @@ static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1408 * the work queue. */ 1518 * the work queue. */
1409static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) 1519static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1410{ 1520{
1411 if (pm_iir & GEN6_PM_RPS_EVENTS) { 1521 if (pm_iir & dev_priv->pm_rps_events) {
1412 spin_lock(&dev_priv->irq_lock); 1522 spin_lock(&dev_priv->irq_lock);
1413 dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS; 1523 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
1414 snb_disable_pm_irq(dev_priv, pm_iir & GEN6_PM_RPS_EVENTS); 1524 snb_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
1415 spin_unlock(&dev_priv->irq_lock); 1525 spin_unlock(&dev_priv->irq_lock);
1416 1526
1417 queue_work(dev_priv->wq, &dev_priv->rps.work); 1527 queue_work(dev_priv->wq, &dev_priv->rps.work);
@@ -1422,23 +1532,89 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1422 notify_ring(dev_priv->dev, &dev_priv->ring[VECS]); 1532 notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
1423 1533
1424 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) { 1534 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) {
1425 DRM_ERROR("VEBOX CS error interrupt 0x%08x\n", pm_iir); 1535 i915_handle_error(dev_priv->dev, false,
1426 i915_handle_error(dev_priv->dev, false); 1536 "VEBOX CS error interrupt 0x%08x",
1537 pm_iir);
1427 } 1538 }
1428 } 1539 }
1429} 1540}
1430 1541
1542static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
1543{
1544 struct drm_i915_private *dev_priv = dev->dev_private;
1545 u32 pipe_stats[I915_MAX_PIPES] = { };
1546 int pipe;
1547
1548 spin_lock(&dev_priv->irq_lock);
1549 for_each_pipe(pipe) {
1550 int reg;
1551 u32 mask, iir_bit = 0;
1552
1553 /*
1554 * PIPESTAT bits get signalled even when the interrupt is
1555 * disabled with the mask bits, and some of the status bits do
1556 * not generate interrupts at all (like the underrun bit). Hence
1557 * we need to be careful that we only handle what we want to
1558 * handle.
1559 */
1560 mask = 0;
1561 if (__cpu_fifo_underrun_reporting_enabled(dev, pipe))
1562 mask |= PIPE_FIFO_UNDERRUN_STATUS;
1563
1564 switch (pipe) {
1565 case PIPE_A:
1566 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
1567 break;
1568 case PIPE_B:
1569 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1570 break;
1571 }
1572 if (iir & iir_bit)
1573 mask |= dev_priv->pipestat_irq_mask[pipe];
1574
1575 if (!mask)
1576 continue;
1577
1578 reg = PIPESTAT(pipe);
1579 mask |= PIPESTAT_INT_ENABLE_MASK;
1580 pipe_stats[pipe] = I915_READ(reg) & mask;
1581
1582 /*
1583 * Clear the PIPE*STAT regs before the IIR
1584 */
1585 if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS |
1586 PIPESTAT_INT_STATUS_MASK))
1587 I915_WRITE(reg, pipe_stats[pipe]);
1588 }
1589 spin_unlock(&dev_priv->irq_lock);
1590
1591 for_each_pipe(pipe) {
1592 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
1593 drm_handle_vblank(dev, pipe);
1594
1595 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) {
1596 intel_prepare_page_flip(dev, pipe);
1597 intel_finish_page_flip(dev, pipe);
1598 }
1599
1600 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1601 i9xx_pipe_crc_irq_handler(dev, pipe);
1602
1603 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
1604 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
1605 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
1606 }
1607
1608 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1609 gmbus_irq_handler(dev);
1610}
1611
1431static irqreturn_t valleyview_irq_handler(int irq, void *arg) 1612static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1432{ 1613{
1433 struct drm_device *dev = (struct drm_device *) arg; 1614 struct drm_device *dev = (struct drm_device *) arg;
1434 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1615 struct drm_i915_private *dev_priv = dev->dev_private;
1435 u32 iir, gt_iir, pm_iir; 1616 u32 iir, gt_iir, pm_iir;
1436 irqreturn_t ret = IRQ_NONE; 1617 irqreturn_t ret = IRQ_NONE;
1437 unsigned long irqflags;
1438 int pipe;
1439 u32 pipe_stats[I915_MAX_PIPES];
1440
1441 atomic_inc(&dev_priv->irq_received);
1442 1618
1443 while (true) { 1619 while (true) {
1444 iir = I915_READ(VLV_IIR); 1620 iir = I915_READ(VLV_IIR);
@@ -1452,44 +1628,13 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1452 1628
1453 snb_gt_irq_handler(dev, dev_priv, gt_iir); 1629 snb_gt_irq_handler(dev, dev_priv, gt_iir);
1454 1630
1455 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1631 valleyview_pipestat_irq_handler(dev, iir);
1456 for_each_pipe(pipe) {
1457 int reg = PIPESTAT(pipe);
1458 pipe_stats[pipe] = I915_READ(reg);
1459
1460 /*
1461 * Clear the PIPE*STAT regs before the IIR
1462 */
1463 if (pipe_stats[pipe] & 0x8000ffff) {
1464 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1465 DRM_DEBUG_DRIVER("pipe %c underrun\n",
1466 pipe_name(pipe));
1467 I915_WRITE(reg, pipe_stats[pipe]);
1468 }
1469 }
1470 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1471
1472 for_each_pipe(pipe) {
1473 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
1474 drm_handle_vblank(dev, pipe);
1475
1476 if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) {
1477 intel_prepare_page_flip(dev, pipe);
1478 intel_finish_page_flip(dev, pipe);
1479 }
1480
1481 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1482 i9xx_pipe_crc_irq_handler(dev, pipe);
1483 }
1484 1632
1485 /* Consume port. Then clear IIR or we'll miss events */ 1633 /* Consume port. Then clear IIR or we'll miss events */
1486 if (iir & I915_DISPLAY_PORT_INTERRUPT) { 1634 if (iir & I915_DISPLAY_PORT_INTERRUPT) {
1487 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 1635 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
1488 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; 1636 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1489 1637
1490 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
1491 hotplug_status);
1492
1493 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915); 1638 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
1494 1639
1495 if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X) 1640 if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
@@ -1499,8 +1644,6 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1499 I915_READ(PORT_HOTPLUG_STAT); 1644 I915_READ(PORT_HOTPLUG_STAT);
1500 } 1645 }
1501 1646
1502 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1503 gmbus_irq_handler(dev);
1504 1647
1505 if (pm_iir) 1648 if (pm_iir)
1506 gen6_rps_irq_handler(dev_priv, pm_iir); 1649 gen6_rps_irq_handler(dev_priv, pm_iir);
@@ -1516,7 +1659,7 @@ out:
1516 1659
1517static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) 1660static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
1518{ 1661{
1519 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1662 struct drm_i915_private *dev_priv = dev->dev_private;
1520 int pipe; 1663 int pipe;
1521 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; 1664 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
1522 1665
@@ -1559,12 +1702,12 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
1559 if (pch_iir & SDE_TRANSA_FIFO_UNDER) 1702 if (pch_iir & SDE_TRANSA_FIFO_UNDER)
1560 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, 1703 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
1561 false)) 1704 false))
1562 DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n"); 1705 DRM_ERROR("PCH transcoder A FIFO underrun\n");
1563 1706
1564 if (pch_iir & SDE_TRANSB_FIFO_UNDER) 1707 if (pch_iir & SDE_TRANSB_FIFO_UNDER)
1565 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B, 1708 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
1566 false)) 1709 false))
1567 DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n"); 1710 DRM_ERROR("PCH transcoder B FIFO underrun\n");
1568} 1711}
1569 1712
1570static void ivb_err_int_handler(struct drm_device *dev) 1713static void ivb_err_int_handler(struct drm_device *dev)
@@ -1580,8 +1723,8 @@ static void ivb_err_int_handler(struct drm_device *dev)
1580 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) { 1723 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) {
1581 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, 1724 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
1582 false)) 1725 false))
1583 DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n", 1726 DRM_ERROR("Pipe %c FIFO underrun\n",
1584 pipe_name(pipe)); 1727 pipe_name(pipe));
1585 } 1728 }
1586 1729
1587 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) { 1730 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
@@ -1606,24 +1749,24 @@ static void cpt_serr_int_handler(struct drm_device *dev)
1606 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN) 1749 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
1607 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, 1750 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
1608 false)) 1751 false))
1609 DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n"); 1752 DRM_ERROR("PCH transcoder A FIFO underrun\n");
1610 1753
1611 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN) 1754 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
1612 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B, 1755 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
1613 false)) 1756 false))
1614 DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n"); 1757 DRM_ERROR("PCH transcoder B FIFO underrun\n");
1615 1758
1616 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN) 1759 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
1617 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C, 1760 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C,
1618 false)) 1761 false))
1619 DRM_DEBUG_DRIVER("PCH transcoder C FIFO underrun\n"); 1762 DRM_ERROR("PCH transcoder C FIFO underrun\n");
1620 1763
1621 I915_WRITE(SERR_INT, serr_int); 1764 I915_WRITE(SERR_INT, serr_int);
1622} 1765}
1623 1766
1624static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) 1767static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
1625{ 1768{
1626 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1769 struct drm_i915_private *dev_priv = dev->dev_private;
1627 int pipe; 1770 int pipe;
1628 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; 1771 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
1629 1772
@@ -1678,8 +1821,8 @@ static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
1678 1821
1679 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) 1822 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
1680 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, false)) 1823 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
1681 DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n", 1824 DRM_ERROR("Pipe %c FIFO underrun\n",
1682 pipe_name(pipe)); 1825 pipe_name(pipe));
1683 1826
1684 if (de_iir & DE_PIPE_CRC_DONE(pipe)) 1827 if (de_iir & DE_PIPE_CRC_DONE(pipe))
1685 i9xx_pipe_crc_irq_handler(dev, pipe); 1828 i9xx_pipe_crc_irq_handler(dev, pipe);
@@ -1711,7 +1854,7 @@ static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
1711static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir) 1854static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
1712{ 1855{
1713 struct drm_i915_private *dev_priv = dev->dev_private; 1856 struct drm_i915_private *dev_priv = dev->dev_private;
1714 enum pipe i; 1857 enum pipe pipe;
1715 1858
1716 if (de_iir & DE_ERR_INT_IVB) 1859 if (de_iir & DE_ERR_INT_IVB)
1717 ivb_err_int_handler(dev); 1860 ivb_err_int_handler(dev);
@@ -1722,14 +1865,14 @@ static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
1722 if (de_iir & DE_GSE_IVB) 1865 if (de_iir & DE_GSE_IVB)
1723 intel_opregion_asle_intr(dev); 1866 intel_opregion_asle_intr(dev);
1724 1867
1725 for_each_pipe(i) { 1868 for_each_pipe(pipe) {
1726 if (de_iir & (DE_PIPE_VBLANK_IVB(i))) 1869 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)))
1727 drm_handle_vblank(dev, i); 1870 drm_handle_vblank(dev, pipe);
1728 1871
1729 /* plane/pipes map 1:1 on ilk+ */ 1872 /* plane/pipes map 1:1 on ilk+ */
1730 if (de_iir & DE_PLANE_FLIP_DONE_IVB(i)) { 1873 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) {
1731 intel_prepare_page_flip(dev, i); 1874 intel_prepare_page_flip(dev, pipe);
1732 intel_finish_page_flip_plane(dev, i); 1875 intel_finish_page_flip_plane(dev, pipe);
1733 } 1876 }
1734 } 1877 }
1735 1878
@@ -1747,12 +1890,10 @@ static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
1747static irqreturn_t ironlake_irq_handler(int irq, void *arg) 1890static irqreturn_t ironlake_irq_handler(int irq, void *arg)
1748{ 1891{
1749 struct drm_device *dev = (struct drm_device *) arg; 1892 struct drm_device *dev = (struct drm_device *) arg;
1750 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1893 struct drm_i915_private *dev_priv = dev->dev_private;
1751 u32 de_iir, gt_iir, de_ier, sde_ier = 0; 1894 u32 de_iir, gt_iir, de_ier, sde_ier = 0;
1752 irqreturn_t ret = IRQ_NONE; 1895 irqreturn_t ret = IRQ_NONE;
1753 1896
1754 atomic_inc(&dev_priv->irq_received);
1755
1756 /* We get interrupts on unclaimed registers, so check for this before we 1897 /* We get interrupts on unclaimed registers, so check for this before we
1757 * do any I915_{READ,WRITE}. */ 1898 * do any I915_{READ,WRITE}. */
1758 intel_uncore_check_errors(dev); 1899 intel_uncore_check_errors(dev);
@@ -1821,8 +1962,6 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
1821 uint32_t tmp = 0; 1962 uint32_t tmp = 0;
1822 enum pipe pipe; 1963 enum pipe pipe;
1823 1964
1824 atomic_inc(&dev_priv->irq_received);
1825
1826 master_ctl = I915_READ(GEN8_MASTER_IRQ); 1965 master_ctl = I915_READ(GEN8_MASTER_IRQ);
1827 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL; 1966 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
1828 if (!master_ctl) 1967 if (!master_ctl)
@@ -1884,8 +2023,8 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
1884 if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) { 2023 if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) {
1885 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, 2024 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
1886 false)) 2025 false))
1887 DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n", 2026 DRM_ERROR("Pipe %c FIFO underrun\n",
1888 pipe_name(pipe)); 2027 pipe_name(pipe));
1889 } 2028 }
1890 2029
1891 if (pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS) { 2030 if (pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS) {
@@ -1962,8 +2101,8 @@ static void i915_error_work_func(struct work_struct *work)
1962{ 2101{
1963 struct i915_gpu_error *error = container_of(work, struct i915_gpu_error, 2102 struct i915_gpu_error *error = container_of(work, struct i915_gpu_error,
1964 work); 2103 work);
1965 drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t, 2104 struct drm_i915_private *dev_priv =
1966 gpu_error); 2105 container_of(error, struct drm_i915_private, gpu_error);
1967 struct drm_device *dev = dev_priv->dev; 2106 struct drm_device *dev = dev_priv->dev;
1968 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; 2107 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
1969 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; 2108 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
@@ -2127,11 +2266,18 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
2127 * so userspace knows something bad happened (should trigger collection 2266 * so userspace knows something bad happened (should trigger collection
2128 * of a ring dump etc.). 2267 * of a ring dump etc.).
2129 */ 2268 */
2130void i915_handle_error(struct drm_device *dev, bool wedged) 2269void i915_handle_error(struct drm_device *dev, bool wedged,
2270 const char *fmt, ...)
2131{ 2271{
2132 struct drm_i915_private *dev_priv = dev->dev_private; 2272 struct drm_i915_private *dev_priv = dev->dev_private;
2273 va_list args;
2274 char error_msg[80];
2133 2275
2134 i915_capture_error_state(dev); 2276 va_start(args, fmt);
2277 vscnprintf(error_msg, sizeof(error_msg), fmt, args);
2278 va_end(args);
2279
2280 i915_capture_error_state(dev, wedged, error_msg);
2135 i915_report_and_clear_eir(dev); 2281 i915_report_and_clear_eir(dev);
2136 2282
2137 if (wedged) { 2283 if (wedged) {
@@ -2165,7 +2311,7 @@ void i915_handle_error(struct drm_device *dev, bool wedged)
2165 2311
2166static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe) 2312static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe)
2167{ 2313{
2168 drm_i915_private_t *dev_priv = dev->dev_private; 2314 struct drm_i915_private *dev_priv = dev->dev_private;
2169 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 2315 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
2170 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2316 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2171 struct drm_i915_gem_object *obj; 2317 struct drm_i915_gem_object *obj;
@@ -2197,8 +2343,8 @@ static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, in
2197 } else { 2343 } else {
2198 int dspaddr = DSPADDR(intel_crtc->plane); 2344 int dspaddr = DSPADDR(intel_crtc->plane);
2199 stall_detected = I915_READ(dspaddr) == (i915_gem_obj_ggtt_offset(obj) + 2345 stall_detected = I915_READ(dspaddr) == (i915_gem_obj_ggtt_offset(obj) +
2200 crtc->y * crtc->fb->pitches[0] + 2346 crtc->y * crtc->primary->fb->pitches[0] +
2201 crtc->x * crtc->fb->bits_per_pixel/8); 2347 crtc->x * crtc->primary->fb->bits_per_pixel/8);
2202 } 2348 }
2203 2349
2204 spin_unlock_irqrestore(&dev->event_lock, flags); 2350 spin_unlock_irqrestore(&dev->event_lock, flags);
@@ -2214,7 +2360,7 @@ static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, in
2214 */ 2360 */
2215static int i915_enable_vblank(struct drm_device *dev, int pipe) 2361static int i915_enable_vblank(struct drm_device *dev, int pipe)
2216{ 2362{
2217 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2363 struct drm_i915_private *dev_priv = dev->dev_private;
2218 unsigned long irqflags; 2364 unsigned long irqflags;
2219 2365
2220 if (!i915_pipe_enabled(dev, pipe)) 2366 if (!i915_pipe_enabled(dev, pipe))
@@ -2223,13 +2369,13 @@ static int i915_enable_vblank(struct drm_device *dev, int pipe)
2223 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2369 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2224 if (INTEL_INFO(dev)->gen >= 4) 2370 if (INTEL_INFO(dev)->gen >= 4)
2225 i915_enable_pipestat(dev_priv, pipe, 2371 i915_enable_pipestat(dev_priv, pipe,
2226 PIPE_START_VBLANK_INTERRUPT_ENABLE); 2372 PIPE_START_VBLANK_INTERRUPT_STATUS);
2227 else 2373 else
2228 i915_enable_pipestat(dev_priv, pipe, 2374 i915_enable_pipestat(dev_priv, pipe,
2229 PIPE_VBLANK_INTERRUPT_ENABLE); 2375 PIPE_VBLANK_INTERRUPT_STATUS);
2230 2376
2231 /* maintain vblank delivery even in deep C-states */ 2377 /* maintain vblank delivery even in deep C-states */
2232 if (dev_priv->info->gen == 3) 2378 if (INTEL_INFO(dev)->gen == 3)
2233 I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS)); 2379 I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS));
2234 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2380 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2235 2381
@@ -2238,7 +2384,7 @@ static int i915_enable_vblank(struct drm_device *dev, int pipe)
2238 2384
2239static int ironlake_enable_vblank(struct drm_device *dev, int pipe) 2385static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
2240{ 2386{
2241 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2387 struct drm_i915_private *dev_priv = dev->dev_private;
2242 unsigned long irqflags; 2388 unsigned long irqflags;
2243 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : 2389 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2244 DE_PIPE_VBLANK(pipe); 2390 DE_PIPE_VBLANK(pipe);
@@ -2255,22 +2401,15 @@ static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
2255 2401
2256static int valleyview_enable_vblank(struct drm_device *dev, int pipe) 2402static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
2257{ 2403{
2258 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2404 struct drm_i915_private *dev_priv = dev->dev_private;
2259 unsigned long irqflags; 2405 unsigned long irqflags;
2260 u32 imr;
2261 2406
2262 if (!i915_pipe_enabled(dev, pipe)) 2407 if (!i915_pipe_enabled(dev, pipe))
2263 return -EINVAL; 2408 return -EINVAL;
2264 2409
2265 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2410 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2266 imr = I915_READ(VLV_IMR);
2267 if (pipe == PIPE_A)
2268 imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
2269 else
2270 imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
2271 I915_WRITE(VLV_IMR, imr);
2272 i915_enable_pipestat(dev_priv, pipe, 2411 i915_enable_pipestat(dev_priv, pipe,
2273 PIPE_START_VBLANK_INTERRUPT_ENABLE); 2412 PIPE_START_VBLANK_INTERRUPT_STATUS);
2274 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2413 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2275 2414
2276 return 0; 2415 return 0;
@@ -2297,22 +2436,22 @@ static int gen8_enable_vblank(struct drm_device *dev, int pipe)
2297 */ 2436 */
2298static void i915_disable_vblank(struct drm_device *dev, int pipe) 2437static void i915_disable_vblank(struct drm_device *dev, int pipe)
2299{ 2438{
2300 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2439 struct drm_i915_private *dev_priv = dev->dev_private;
2301 unsigned long irqflags; 2440 unsigned long irqflags;
2302 2441
2303 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2442 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2304 if (dev_priv->info->gen == 3) 2443 if (INTEL_INFO(dev)->gen == 3)
2305 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS)); 2444 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS));
2306 2445
2307 i915_disable_pipestat(dev_priv, pipe, 2446 i915_disable_pipestat(dev_priv, pipe,
2308 PIPE_VBLANK_INTERRUPT_ENABLE | 2447 PIPE_VBLANK_INTERRUPT_STATUS |
2309 PIPE_START_VBLANK_INTERRUPT_ENABLE); 2448 PIPE_START_VBLANK_INTERRUPT_STATUS);
2310 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2449 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2311} 2450}
2312 2451
2313static void ironlake_disable_vblank(struct drm_device *dev, int pipe) 2452static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
2314{ 2453{
2315 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2454 struct drm_i915_private *dev_priv = dev->dev_private;
2316 unsigned long irqflags; 2455 unsigned long irqflags;
2317 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : 2456 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2318 DE_PIPE_VBLANK(pipe); 2457 DE_PIPE_VBLANK(pipe);
@@ -2324,19 +2463,12 @@ static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
2324 2463
2325static void valleyview_disable_vblank(struct drm_device *dev, int pipe) 2464static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
2326{ 2465{
2327 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2466 struct drm_i915_private *dev_priv = dev->dev_private;
2328 unsigned long irqflags; 2467 unsigned long irqflags;
2329 u32 imr;
2330 2468
2331 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2469 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2332 i915_disable_pipestat(dev_priv, pipe, 2470 i915_disable_pipestat(dev_priv, pipe,
2333 PIPE_START_VBLANK_INTERRUPT_ENABLE); 2471 PIPE_START_VBLANK_INTERRUPT_STATUS);
2334 imr = I915_READ(VLV_IMR);
2335 if (pipe == PIPE_A)
2336 imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
2337 else
2338 imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
2339 I915_WRITE(VLV_IMR, imr);
2340 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2472 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2341} 2473}
2342 2474
@@ -2373,29 +2505,43 @@ static struct intel_ring_buffer *
2373semaphore_waits_for(struct intel_ring_buffer *ring, u32 *seqno) 2505semaphore_waits_for(struct intel_ring_buffer *ring, u32 *seqno)
2374{ 2506{
2375 struct drm_i915_private *dev_priv = ring->dev->dev_private; 2507 struct drm_i915_private *dev_priv = ring->dev->dev_private;
2376 u32 cmd, ipehr, acthd, acthd_min; 2508 u32 cmd, ipehr, head;
2509 int i;
2377 2510
2378 ipehr = I915_READ(RING_IPEHR(ring->mmio_base)); 2511 ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
2379 if ((ipehr & ~(0x3 << 16)) != 2512 if ((ipehr & ~(0x3 << 16)) !=
2380 (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | MI_SEMAPHORE_REGISTER)) 2513 (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | MI_SEMAPHORE_REGISTER))
2381 return NULL; 2514 return NULL;
2382 2515
2383 /* ACTHD is likely pointing to the dword after the actual command, 2516 /*
2384 * so scan backwards until we find the MBOX. 2517 * HEAD is likely pointing to the dword after the actual command,
2518 * so scan backwards until we find the MBOX. But limit it to just 3
2519 * dwords. Note that we don't care about ACTHD here since that might
2520 * point at at batch, and semaphores are always emitted into the
2521 * ringbuffer itself.
2385 */ 2522 */
2386 acthd = intel_ring_get_active_head(ring) & HEAD_ADDR; 2523 head = I915_READ_HEAD(ring) & HEAD_ADDR;
2387 acthd_min = max((int)acthd - 3 * 4, 0); 2524
2388 do { 2525 for (i = 4; i; --i) {
2389 cmd = ioread32(ring->virtual_start + acthd); 2526 /*
2527 * Be paranoid and presume the hw has gone off into the wild -
2528 * our ring is smaller than what the hardware (and hence
2529 * HEAD_ADDR) allows. Also handles wrap-around.
2530 */
2531 head &= ring->size - 1;
2532
2533 /* This here seems to blow up */
2534 cmd = ioread32(ring->virtual_start + head);
2390 if (cmd == ipehr) 2535 if (cmd == ipehr)
2391 break; 2536 break;
2392 2537
2393 acthd -= 4; 2538 head -= 4;
2394 if (acthd < acthd_min) 2539 }
2395 return NULL;
2396 } while (1);
2397 2540
2398 *seqno = ioread32(ring->virtual_start+acthd+4)+1; 2541 if (!i)
2542 return NULL;
2543
2544 *seqno = ioread32(ring->virtual_start + head + 4) + 1;
2399 return &dev_priv->ring[(ring->id + (((ipehr >> 17) & 1) + 1)) % 3]; 2545 return &dev_priv->ring[(ring->id + (((ipehr >> 17) & 1) + 1)) % 3];
2400} 2546}
2401 2547
@@ -2429,7 +2575,7 @@ static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
2429} 2575}
2430 2576
2431static enum intel_ring_hangcheck_action 2577static enum intel_ring_hangcheck_action
2432ring_stuck(struct intel_ring_buffer *ring, u32 acthd) 2578ring_stuck(struct intel_ring_buffer *ring, u64 acthd)
2433{ 2579{
2434 struct drm_device *dev = ring->dev; 2580 struct drm_device *dev = ring->dev;
2435 struct drm_i915_private *dev_priv = dev->dev_private; 2581 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2448,9 +2594,9 @@ ring_stuck(struct intel_ring_buffer *ring, u32 acthd)
2448 */ 2594 */
2449 tmp = I915_READ_CTL(ring); 2595 tmp = I915_READ_CTL(ring);
2450 if (tmp & RING_WAIT) { 2596 if (tmp & RING_WAIT) {
2451 DRM_ERROR("Kicking stuck wait on %s\n", 2597 i915_handle_error(dev, false,
2452 ring->name); 2598 "Kicking stuck wait on %s",
2453 i915_handle_error(dev, false); 2599 ring->name);
2454 I915_WRITE_CTL(ring, tmp); 2600 I915_WRITE_CTL(ring, tmp);
2455 return HANGCHECK_KICK; 2601 return HANGCHECK_KICK;
2456 } 2602 }
@@ -2460,9 +2606,9 @@ ring_stuck(struct intel_ring_buffer *ring, u32 acthd)
2460 default: 2606 default:
2461 return HANGCHECK_HUNG; 2607 return HANGCHECK_HUNG;
2462 case 1: 2608 case 1:
2463 DRM_ERROR("Kicking stuck semaphore on %s\n", 2609 i915_handle_error(dev, false,
2464 ring->name); 2610 "Kicking stuck semaphore on %s",
2465 i915_handle_error(dev, false); 2611 ring->name);
2466 I915_WRITE_CTL(ring, tmp); 2612 I915_WRITE_CTL(ring, tmp);
2467 return HANGCHECK_KICK; 2613 return HANGCHECK_KICK;
2468 case 0: 2614 case 0:
@@ -2484,7 +2630,7 @@ ring_stuck(struct intel_ring_buffer *ring, u32 acthd)
2484static void i915_hangcheck_elapsed(unsigned long data) 2630static void i915_hangcheck_elapsed(unsigned long data)
2485{ 2631{
2486 struct drm_device *dev = (struct drm_device *)data; 2632 struct drm_device *dev = (struct drm_device *)data;
2487 drm_i915_private_t *dev_priv = dev->dev_private; 2633 struct drm_i915_private *dev_priv = dev->dev_private;
2488 struct intel_ring_buffer *ring; 2634 struct intel_ring_buffer *ring;
2489 int i; 2635 int i;
2490 int busy_count = 0, rings_hung = 0; 2636 int busy_count = 0, rings_hung = 0;
@@ -2492,13 +2638,13 @@ static void i915_hangcheck_elapsed(unsigned long data)
2492#define BUSY 1 2638#define BUSY 1
2493#define KICK 5 2639#define KICK 5
2494#define HUNG 20 2640#define HUNG 20
2495#define FIRE 30
2496 2641
2497 if (!i915_enable_hangcheck) 2642 if (!i915.enable_hangcheck)
2498 return; 2643 return;
2499 2644
2500 for_each_ring(ring, dev_priv, i) { 2645 for_each_ring(ring, dev_priv, i) {
2501 u32 seqno, acthd; 2646 u64 acthd;
2647 u32 seqno;
2502 bool busy = true; 2648 bool busy = true;
2503 2649
2504 semaphore_clear_deadlocks(dev_priv); 2650 semaphore_clear_deadlocks(dev_priv);
@@ -2576,7 +2722,7 @@ static void i915_hangcheck_elapsed(unsigned long data)
2576 } 2722 }
2577 2723
2578 for_each_ring(ring, dev_priv, i) { 2724 for_each_ring(ring, dev_priv, i) {
2579 if (ring->hangcheck.score > FIRE) { 2725 if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
2580 DRM_INFO("%s on %s\n", 2726 DRM_INFO("%s on %s\n",
2581 stuck[i] ? "stuck" : "no progress", 2727 stuck[i] ? "stuck" : "no progress",
2582 ring->name); 2728 ring->name);
@@ -2585,7 +2731,7 @@ static void i915_hangcheck_elapsed(unsigned long data)
2585 } 2731 }
2586 2732
2587 if (rings_hung) 2733 if (rings_hung)
2588 return i915_handle_error(dev, true); 2734 return i915_handle_error(dev, true, "Ring hung");
2589 2735
2590 if (busy_count) 2736 if (busy_count)
2591 /* Reset timer case chip hangs without another request 2737 /* Reset timer case chip hangs without another request
@@ -2596,7 +2742,7 @@ static void i915_hangcheck_elapsed(unsigned long data)
2596void i915_queue_hangcheck(struct drm_device *dev) 2742void i915_queue_hangcheck(struct drm_device *dev)
2597{ 2743{
2598 struct drm_i915_private *dev_priv = dev->dev_private; 2744 struct drm_i915_private *dev_priv = dev->dev_private;
2599 if (!i915_enable_hangcheck) 2745 if (!i915.enable_hangcheck)
2600 return; 2746 return;
2601 2747
2602 mod_timer(&dev_priv->gpu_error.hangcheck_timer, 2748 mod_timer(&dev_priv->gpu_error.hangcheck_timer,
@@ -2643,9 +2789,7 @@ static void gen5_gt_irq_preinstall(struct drm_device *dev)
2643*/ 2789*/
2644static void ironlake_irq_preinstall(struct drm_device *dev) 2790static void ironlake_irq_preinstall(struct drm_device *dev)
2645{ 2791{
2646 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2792 struct drm_i915_private *dev_priv = dev->dev_private;
2647
2648 atomic_set(&dev_priv->irq_received, 0);
2649 2793
2650 I915_WRITE(HWSTAM, 0xeffe); 2794 I915_WRITE(HWSTAM, 0xeffe);
2651 2795
@@ -2660,11 +2804,9 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
2660 2804
2661static void valleyview_irq_preinstall(struct drm_device *dev) 2805static void valleyview_irq_preinstall(struct drm_device *dev)
2662{ 2806{
2663 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2807 struct drm_i915_private *dev_priv = dev->dev_private;
2664 int pipe; 2808 int pipe;
2665 2809
2666 atomic_set(&dev_priv->irq_received, 0);
2667
2668 /* VLV magic */ 2810 /* VLV magic */
2669 I915_WRITE(VLV_IMR, 0); 2811 I915_WRITE(VLV_IMR, 0);
2670 I915_WRITE(RING_IMR(RENDER_RING_BASE), 0); 2812 I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
@@ -2694,8 +2836,6 @@ static void gen8_irq_preinstall(struct drm_device *dev)
2694 struct drm_i915_private *dev_priv = dev->dev_private; 2836 struct drm_i915_private *dev_priv = dev->dev_private;
2695 int pipe; 2837 int pipe;
2696 2838
2697 atomic_set(&dev_priv->irq_received, 0);
2698
2699 I915_WRITE(GEN8_MASTER_IRQ, 0); 2839 I915_WRITE(GEN8_MASTER_IRQ, 0);
2700 POSTING_READ(GEN8_MASTER_IRQ); 2840 POSTING_READ(GEN8_MASTER_IRQ);
2701 2841
@@ -2740,7 +2880,7 @@ static void gen8_irq_preinstall(struct drm_device *dev)
2740 2880
2741static void ibx_hpd_irq_setup(struct drm_device *dev) 2881static void ibx_hpd_irq_setup(struct drm_device *dev)
2742{ 2882{
2743 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2883 struct drm_i915_private *dev_priv = dev->dev_private;
2744 struct drm_mode_config *mode_config = &dev->mode_config; 2884 struct drm_mode_config *mode_config = &dev->mode_config;
2745 struct intel_encoder *intel_encoder; 2885 struct intel_encoder *intel_encoder;
2746 u32 hotplug_irqs, hotplug, enabled_irqs = 0; 2886 u32 hotplug_irqs, hotplug, enabled_irqs = 0;
@@ -2775,7 +2915,7 @@ static void ibx_hpd_irq_setup(struct drm_device *dev)
2775 2915
2776static void ibx_irq_postinstall(struct drm_device *dev) 2916static void ibx_irq_postinstall(struct drm_device *dev)
2777{ 2917{
2778 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2918 struct drm_i915_private *dev_priv = dev->dev_private;
2779 u32 mask; 2919 u32 mask;
2780 2920
2781 if (HAS_PCH_NOP(dev)) 2921 if (HAS_PCH_NOP(dev))
@@ -2821,7 +2961,7 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
2821 POSTING_READ(GTIER); 2961 POSTING_READ(GTIER);
2822 2962
2823 if (INTEL_INFO(dev)->gen >= 6) { 2963 if (INTEL_INFO(dev)->gen >= 6) {
2824 pm_irqs |= GEN6_PM_RPS_EVENTS; 2964 pm_irqs |= dev_priv->pm_rps_events;
2825 2965
2826 if (HAS_VEBOX(dev)) 2966 if (HAS_VEBOX(dev))
2827 pm_irqs |= PM_VEBOX_USER_INTERRUPT; 2967 pm_irqs |= PM_VEBOX_USER_INTERRUPT;
@@ -2837,7 +2977,7 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
2837static int ironlake_irq_postinstall(struct drm_device *dev) 2977static int ironlake_irq_postinstall(struct drm_device *dev)
2838{ 2978{
2839 unsigned long irqflags; 2979 unsigned long irqflags;
2840 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2980 struct drm_i915_private *dev_priv = dev->dev_private;
2841 u32 display_mask, extra_mask; 2981 u32 display_mask, extra_mask;
2842 2982
2843 if (INTEL_INFO(dev)->gen >= 7) { 2983 if (INTEL_INFO(dev)->gen >= 7) {
@@ -2885,44 +3025,113 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
2885 return 0; 3025 return 0;
2886} 3026}
2887 3027
3028static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv)
3029{
3030 u32 pipestat_mask;
3031 u32 iir_mask;
3032
3033 pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3034 PIPE_FIFO_UNDERRUN_STATUS;
3035
3036 I915_WRITE(PIPESTAT(PIPE_A), pipestat_mask);
3037 I915_WRITE(PIPESTAT(PIPE_B), pipestat_mask);
3038 POSTING_READ(PIPESTAT(PIPE_A));
3039
3040 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3041 PIPE_CRC_DONE_INTERRUPT_STATUS;
3042
3043 i915_enable_pipestat(dev_priv, PIPE_A, pipestat_mask |
3044 PIPE_GMBUS_INTERRUPT_STATUS);
3045 i915_enable_pipestat(dev_priv, PIPE_B, pipestat_mask);
3046
3047 iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3048 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3049 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
3050 dev_priv->irq_mask &= ~iir_mask;
3051
3052 I915_WRITE(VLV_IIR, iir_mask);
3053 I915_WRITE(VLV_IIR, iir_mask);
3054 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3055 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3056 POSTING_READ(VLV_IER);
3057}
3058
3059static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv)
3060{
3061 u32 pipestat_mask;
3062 u32 iir_mask;
3063
3064 iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3065 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3066 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
3067
3068 dev_priv->irq_mask |= iir_mask;
3069 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3070 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3071 I915_WRITE(VLV_IIR, iir_mask);
3072 I915_WRITE(VLV_IIR, iir_mask);
3073 POSTING_READ(VLV_IIR);
3074
3075 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3076 PIPE_CRC_DONE_INTERRUPT_STATUS;
3077
3078 i915_disable_pipestat(dev_priv, PIPE_A, pipestat_mask |
3079 PIPE_GMBUS_INTERRUPT_STATUS);
3080 i915_disable_pipestat(dev_priv, PIPE_B, pipestat_mask);
3081
3082 pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3083 PIPE_FIFO_UNDERRUN_STATUS;
3084 I915_WRITE(PIPESTAT(PIPE_A), pipestat_mask);
3085 I915_WRITE(PIPESTAT(PIPE_B), pipestat_mask);
3086 POSTING_READ(PIPESTAT(PIPE_A));
3087}
3088
3089void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3090{
3091 assert_spin_locked(&dev_priv->irq_lock);
3092
3093 if (dev_priv->display_irqs_enabled)
3094 return;
3095
3096 dev_priv->display_irqs_enabled = true;
3097
3098 if (dev_priv->dev->irq_enabled)
3099 valleyview_display_irqs_install(dev_priv);
3100}
3101
3102void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3103{
3104 assert_spin_locked(&dev_priv->irq_lock);
3105
3106 if (!dev_priv->display_irqs_enabled)
3107 return;
3108
3109 dev_priv->display_irqs_enabled = false;
3110
3111 if (dev_priv->dev->irq_enabled)
3112 valleyview_display_irqs_uninstall(dev_priv);
3113}
3114
2888static int valleyview_irq_postinstall(struct drm_device *dev) 3115static int valleyview_irq_postinstall(struct drm_device *dev)
2889{ 3116{
2890 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3117 struct drm_i915_private *dev_priv = dev->dev_private;
2891 u32 enable_mask;
2892 u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV |
2893 PIPE_CRC_DONE_ENABLE;
2894 unsigned long irqflags; 3118 unsigned long irqflags;
2895 3119
2896 enable_mask = I915_DISPLAY_PORT_INTERRUPT; 3120 dev_priv->irq_mask = ~0;
2897 enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2898 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
2899 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2900 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
2901
2902 /*
2903 *Leave vblank interrupts masked initially. enable/disable will
2904 * toggle them based on usage.
2905 */
2906 dev_priv->irq_mask = (~enable_mask) |
2907 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
2908 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
2909 3121
2910 I915_WRITE(PORT_HOTPLUG_EN, 0); 3122 I915_WRITE(PORT_HOTPLUG_EN, 0);
2911 POSTING_READ(PORT_HOTPLUG_EN); 3123 POSTING_READ(PORT_HOTPLUG_EN);
2912 3124
2913 I915_WRITE(VLV_IMR, dev_priv->irq_mask); 3125 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
2914 I915_WRITE(VLV_IER, enable_mask); 3126 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
2915 I915_WRITE(VLV_IIR, 0xffffffff); 3127 I915_WRITE(VLV_IIR, 0xffffffff);
2916 I915_WRITE(PIPESTAT(0), 0xffff);
2917 I915_WRITE(PIPESTAT(1), 0xffff);
2918 POSTING_READ(VLV_IER); 3128 POSTING_READ(VLV_IER);
2919 3129
2920 /* Interrupt setup is already guaranteed to be single-threaded, this is 3130 /* Interrupt setup is already guaranteed to be single-threaded, this is
2921 * just to make the assert_spin_locked check happy. */ 3131 * just to make the assert_spin_locked check happy. */
2922 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3132 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2923 i915_enable_pipestat(dev_priv, PIPE_A, pipestat_enable); 3133 if (dev_priv->display_irqs_enabled)
2924 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_EVENT_ENABLE); 3134 valleyview_display_irqs_install(dev_priv);
2925 i915_enable_pipestat(dev_priv, PIPE_B, pipestat_enable);
2926 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3135 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2927 3136
2928 I915_WRITE(VLV_IIR, 0xffffffff); 3137 I915_WRITE(VLV_IIR, 0xffffffff);
@@ -3018,8 +3227,6 @@ static void gen8_irq_uninstall(struct drm_device *dev)
3018 if (!dev_priv) 3227 if (!dev_priv)
3019 return; 3228 return;
3020 3229
3021 atomic_set(&dev_priv->irq_received, 0);
3022
3023 I915_WRITE(GEN8_MASTER_IRQ, 0); 3230 I915_WRITE(GEN8_MASTER_IRQ, 0);
3024 3231
3025#define GEN8_IRQ_FINI_NDX(type, which) do { \ 3232#define GEN8_IRQ_FINI_NDX(type, which) do { \
@@ -3054,13 +3261,14 @@ static void gen8_irq_uninstall(struct drm_device *dev)
3054 3261
3055static void valleyview_irq_uninstall(struct drm_device *dev) 3262static void valleyview_irq_uninstall(struct drm_device *dev)
3056{ 3263{
3057 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3264 struct drm_i915_private *dev_priv = dev->dev_private;
3265 unsigned long irqflags;
3058 int pipe; 3266 int pipe;
3059 3267
3060 if (!dev_priv) 3268 if (!dev_priv)
3061 return; 3269 return;
3062 3270
3063 del_timer_sync(&dev_priv->hotplug_reenable_timer); 3271 intel_hpd_irq_uninstall(dev_priv);
3064 3272
3065 for_each_pipe(pipe) 3273 for_each_pipe(pipe)
3066 I915_WRITE(PIPESTAT(pipe), 0xffff); 3274 I915_WRITE(PIPESTAT(pipe), 0xffff);
@@ -3068,8 +3276,14 @@ static void valleyview_irq_uninstall(struct drm_device *dev)
3068 I915_WRITE(HWSTAM, 0xffffffff); 3276 I915_WRITE(HWSTAM, 0xffffffff);
3069 I915_WRITE(PORT_HOTPLUG_EN, 0); 3277 I915_WRITE(PORT_HOTPLUG_EN, 0);
3070 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3278 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3071 for_each_pipe(pipe) 3279
3072 I915_WRITE(PIPESTAT(pipe), 0xffff); 3280 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3281 if (dev_priv->display_irqs_enabled)
3282 valleyview_display_irqs_uninstall(dev_priv);
3283 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3284
3285 dev_priv->irq_mask = 0;
3286
3073 I915_WRITE(VLV_IIR, 0xffffffff); 3287 I915_WRITE(VLV_IIR, 0xffffffff);
3074 I915_WRITE(VLV_IMR, 0xffffffff); 3288 I915_WRITE(VLV_IMR, 0xffffffff);
3075 I915_WRITE(VLV_IER, 0x0); 3289 I915_WRITE(VLV_IER, 0x0);
@@ -3078,12 +3292,12 @@ static void valleyview_irq_uninstall(struct drm_device *dev)
3078 3292
3079static void ironlake_irq_uninstall(struct drm_device *dev) 3293static void ironlake_irq_uninstall(struct drm_device *dev)
3080{ 3294{
3081 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3295 struct drm_i915_private *dev_priv = dev->dev_private;
3082 3296
3083 if (!dev_priv) 3297 if (!dev_priv)
3084 return; 3298 return;
3085 3299
3086 del_timer_sync(&dev_priv->hotplug_reenable_timer); 3300 intel_hpd_irq_uninstall(dev_priv);
3087 3301
3088 I915_WRITE(HWSTAM, 0xffffffff); 3302 I915_WRITE(HWSTAM, 0xffffffff);
3089 3303
@@ -3109,11 +3323,9 @@ static void ironlake_irq_uninstall(struct drm_device *dev)
3109 3323
3110static void i8xx_irq_preinstall(struct drm_device * dev) 3324static void i8xx_irq_preinstall(struct drm_device * dev)
3111{ 3325{
3112 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3326 struct drm_i915_private *dev_priv = dev->dev_private;
3113 int pipe; 3327 int pipe;
3114 3328
3115 atomic_set(&dev_priv->irq_received, 0);
3116
3117 for_each_pipe(pipe) 3329 for_each_pipe(pipe)
3118 I915_WRITE(PIPESTAT(pipe), 0); 3330 I915_WRITE(PIPESTAT(pipe), 0);
3119 I915_WRITE16(IMR, 0xffff); 3331 I915_WRITE16(IMR, 0xffff);
@@ -3123,7 +3335,7 @@ static void i8xx_irq_preinstall(struct drm_device * dev)
3123 3335
3124static int i8xx_irq_postinstall(struct drm_device *dev) 3336static int i8xx_irq_postinstall(struct drm_device *dev)
3125{ 3337{
3126 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3338 struct drm_i915_private *dev_priv = dev->dev_private;
3127 unsigned long irqflags; 3339 unsigned long irqflags;
3128 3340
3129 I915_WRITE16(EMR, 3341 I915_WRITE16(EMR,
@@ -3148,8 +3360,8 @@ static int i8xx_irq_postinstall(struct drm_device *dev)
3148 /* Interrupt setup is already guaranteed to be single-threaded, this is 3360 /* Interrupt setup is already guaranteed to be single-threaded, this is
3149 * just to make the assert_spin_locked check happy. */ 3361 * just to make the assert_spin_locked check happy. */
3150 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3362 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3151 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_ENABLE); 3363 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3152 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_ENABLE); 3364 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3153 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3365 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3154 3366
3155 return 0; 3367 return 0;
@@ -3161,7 +3373,7 @@ static int i8xx_irq_postinstall(struct drm_device *dev)
3161static bool i8xx_handle_vblank(struct drm_device *dev, 3373static bool i8xx_handle_vblank(struct drm_device *dev,
3162 int plane, int pipe, u32 iir) 3374 int plane, int pipe, u32 iir)
3163{ 3375{
3164 drm_i915_private_t *dev_priv = dev->dev_private; 3376 struct drm_i915_private *dev_priv = dev->dev_private;
3165 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); 3377 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3166 3378
3167 if (!drm_handle_vblank(dev, pipe)) 3379 if (!drm_handle_vblank(dev, pipe))
@@ -3189,7 +3401,7 @@ static bool i8xx_handle_vblank(struct drm_device *dev,
3189static irqreturn_t i8xx_irq_handler(int irq, void *arg) 3401static irqreturn_t i8xx_irq_handler(int irq, void *arg)
3190{ 3402{
3191 struct drm_device *dev = (struct drm_device *) arg; 3403 struct drm_device *dev = (struct drm_device *) arg;
3192 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3404 struct drm_i915_private *dev_priv = dev->dev_private;
3193 u16 iir, new_iir; 3405 u16 iir, new_iir;
3194 u32 pipe_stats[2]; 3406 u32 pipe_stats[2];
3195 unsigned long irqflags; 3407 unsigned long irqflags;
@@ -3198,8 +3410,6 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
3198 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3410 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3199 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 3411 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3200 3412
3201 atomic_inc(&dev_priv->irq_received);
3202
3203 iir = I915_READ16(IIR); 3413 iir = I915_READ16(IIR);
3204 if (iir == 0) 3414 if (iir == 0)
3205 return IRQ_NONE; 3415 return IRQ_NONE;
@@ -3212,7 +3422,9 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
3212 */ 3422 */
3213 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3423 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3214 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 3424 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3215 i915_handle_error(dev, false); 3425 i915_handle_error(dev, false,
3426 "Command parser error, iir 0x%08x",
3427 iir);
3216 3428
3217 for_each_pipe(pipe) { 3429 for_each_pipe(pipe) {
3218 int reg = PIPESTAT(pipe); 3430 int reg = PIPESTAT(pipe);
@@ -3221,12 +3433,8 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
3221 /* 3433 /*
3222 * Clear the PIPE*STAT regs before the IIR 3434 * Clear the PIPE*STAT regs before the IIR
3223 */ 3435 */
3224 if (pipe_stats[pipe] & 0x8000ffff) { 3436 if (pipe_stats[pipe] & 0x8000ffff)
3225 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3226 DRM_DEBUG_DRIVER("pipe %c underrun\n",
3227 pipe_name(pipe));
3228 I915_WRITE(reg, pipe_stats[pipe]); 3437 I915_WRITE(reg, pipe_stats[pipe]);
3229 }
3230 } 3438 }
3231 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3439 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3232 3440
@@ -3249,6 +3457,10 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
3249 3457
3250 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 3458 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
3251 i9xx_pipe_crc_irq_handler(dev, pipe); 3459 i9xx_pipe_crc_irq_handler(dev, pipe);
3460
3461 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
3462 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
3463 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
3252 } 3464 }
3253 3465
3254 iir = new_iir; 3466 iir = new_iir;
@@ -3259,7 +3471,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
3259 3471
3260static void i8xx_irq_uninstall(struct drm_device * dev) 3472static void i8xx_irq_uninstall(struct drm_device * dev)
3261{ 3473{
3262 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3474 struct drm_i915_private *dev_priv = dev->dev_private;
3263 int pipe; 3475 int pipe;
3264 3476
3265 for_each_pipe(pipe) { 3477 for_each_pipe(pipe) {
@@ -3274,11 +3486,9 @@ static void i8xx_irq_uninstall(struct drm_device * dev)
3274 3486
3275static void i915_irq_preinstall(struct drm_device * dev) 3487static void i915_irq_preinstall(struct drm_device * dev)
3276{ 3488{
3277 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3489 struct drm_i915_private *dev_priv = dev->dev_private;
3278 int pipe; 3490 int pipe;
3279 3491
3280 atomic_set(&dev_priv->irq_received, 0);
3281
3282 if (I915_HAS_HOTPLUG(dev)) { 3492 if (I915_HAS_HOTPLUG(dev)) {
3283 I915_WRITE(PORT_HOTPLUG_EN, 0); 3493 I915_WRITE(PORT_HOTPLUG_EN, 0);
3284 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3494 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
@@ -3294,7 +3504,7 @@ static void i915_irq_preinstall(struct drm_device * dev)
3294 3504
3295static int i915_irq_postinstall(struct drm_device *dev) 3505static int i915_irq_postinstall(struct drm_device *dev)
3296{ 3506{
3297 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3507 struct drm_i915_private *dev_priv = dev->dev_private;
3298 u32 enable_mask; 3508 u32 enable_mask;
3299 unsigned long irqflags; 3509 unsigned long irqflags;
3300 3510
@@ -3335,8 +3545,8 @@ static int i915_irq_postinstall(struct drm_device *dev)
3335 /* Interrupt setup is already guaranteed to be single-threaded, this is 3545 /* Interrupt setup is already guaranteed to be single-threaded, this is
3336 * just to make the assert_spin_locked check happy. */ 3546 * just to make the assert_spin_locked check happy. */
3337 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3547 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3338 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_ENABLE); 3548 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3339 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_ENABLE); 3549 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3340 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3550 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3341 3551
3342 return 0; 3552 return 0;
@@ -3348,7 +3558,7 @@ static int i915_irq_postinstall(struct drm_device *dev)
3348static bool i915_handle_vblank(struct drm_device *dev, 3558static bool i915_handle_vblank(struct drm_device *dev,
3349 int plane, int pipe, u32 iir) 3559 int plane, int pipe, u32 iir)
3350{ 3560{
3351 drm_i915_private_t *dev_priv = dev->dev_private; 3561 struct drm_i915_private *dev_priv = dev->dev_private;
3352 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); 3562 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3353 3563
3354 if (!drm_handle_vblank(dev, pipe)) 3564 if (!drm_handle_vblank(dev, pipe))
@@ -3376,7 +3586,7 @@ static bool i915_handle_vblank(struct drm_device *dev,
3376static irqreturn_t i915_irq_handler(int irq, void *arg) 3586static irqreturn_t i915_irq_handler(int irq, void *arg)
3377{ 3587{
3378 struct drm_device *dev = (struct drm_device *) arg; 3588 struct drm_device *dev = (struct drm_device *) arg;
3379 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3589 struct drm_i915_private *dev_priv = dev->dev_private;
3380 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES]; 3590 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
3381 unsigned long irqflags; 3591 unsigned long irqflags;
3382 u32 flip_mask = 3592 u32 flip_mask =
@@ -3384,8 +3594,6 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
3384 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 3594 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3385 int pipe, ret = IRQ_NONE; 3595 int pipe, ret = IRQ_NONE;
3386 3596
3387 atomic_inc(&dev_priv->irq_received);
3388
3389 iir = I915_READ(IIR); 3597 iir = I915_READ(IIR);
3390 do { 3598 do {
3391 bool irq_received = (iir & ~flip_mask) != 0; 3599 bool irq_received = (iir & ~flip_mask) != 0;
@@ -3398,7 +3606,9 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
3398 */ 3606 */
3399 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3607 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3400 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 3608 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3401 i915_handle_error(dev, false); 3609 i915_handle_error(dev, false,
3610 "Command parser error, iir 0x%08x",
3611 iir);
3402 3612
3403 for_each_pipe(pipe) { 3613 for_each_pipe(pipe) {
3404 int reg = PIPESTAT(pipe); 3614 int reg = PIPESTAT(pipe);
@@ -3406,9 +3616,6 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
3406 3616
3407 /* Clear the PIPE*STAT regs before the IIR */ 3617 /* Clear the PIPE*STAT regs before the IIR */
3408 if (pipe_stats[pipe] & 0x8000ffff) { 3618 if (pipe_stats[pipe] & 0x8000ffff) {
3409 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3410 DRM_DEBUG_DRIVER("pipe %c underrun\n",
3411 pipe_name(pipe));
3412 I915_WRITE(reg, pipe_stats[pipe]); 3619 I915_WRITE(reg, pipe_stats[pipe]);
3413 irq_received = true; 3620 irq_received = true;
3414 } 3621 }
@@ -3424,9 +3631,6 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
3424 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 3631 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
3425 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; 3632 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
3426 3633
3427 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
3428 hotplug_status);
3429
3430 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915); 3634 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
3431 3635
3432 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 3636 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
@@ -3453,6 +3657,10 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
3453 3657
3454 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 3658 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
3455 i9xx_pipe_crc_irq_handler(dev, pipe); 3659 i9xx_pipe_crc_irq_handler(dev, pipe);
3660
3661 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
3662 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
3663 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
3456 } 3664 }
3457 3665
3458 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 3666 if (blc_event || (iir & I915_ASLE_INTERRUPT))
@@ -3484,10 +3692,10 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
3484 3692
3485static void i915_irq_uninstall(struct drm_device * dev) 3693static void i915_irq_uninstall(struct drm_device * dev)
3486{ 3694{
3487 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3695 struct drm_i915_private *dev_priv = dev->dev_private;
3488 int pipe; 3696 int pipe;
3489 3697
3490 del_timer_sync(&dev_priv->hotplug_reenable_timer); 3698 intel_hpd_irq_uninstall(dev_priv);
3491 3699
3492 if (I915_HAS_HOTPLUG(dev)) { 3700 if (I915_HAS_HOTPLUG(dev)) {
3493 I915_WRITE(PORT_HOTPLUG_EN, 0); 3701 I915_WRITE(PORT_HOTPLUG_EN, 0);
@@ -3508,11 +3716,9 @@ static void i915_irq_uninstall(struct drm_device * dev)
3508 3716
3509static void i965_irq_preinstall(struct drm_device * dev) 3717static void i965_irq_preinstall(struct drm_device * dev)
3510{ 3718{
3511 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3719 struct drm_i915_private *dev_priv = dev->dev_private;
3512 int pipe; 3720 int pipe;
3513 3721
3514 atomic_set(&dev_priv->irq_received, 0);
3515
3516 I915_WRITE(PORT_HOTPLUG_EN, 0); 3722 I915_WRITE(PORT_HOTPLUG_EN, 0);
3517 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3723 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3518 3724
@@ -3526,7 +3732,7 @@ static void i965_irq_preinstall(struct drm_device * dev)
3526 3732
3527static int i965_irq_postinstall(struct drm_device *dev) 3733static int i965_irq_postinstall(struct drm_device *dev)
3528{ 3734{
3529 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3735 struct drm_i915_private *dev_priv = dev->dev_private;
3530 u32 enable_mask; 3736 u32 enable_mask;
3531 u32 error_mask; 3737 u32 error_mask;
3532 unsigned long irqflags; 3738 unsigned long irqflags;
@@ -3551,9 +3757,9 @@ static int i965_irq_postinstall(struct drm_device *dev)
3551 /* Interrupt setup is already guaranteed to be single-threaded, this is 3757 /* Interrupt setup is already guaranteed to be single-threaded, this is
3552 * just to make the assert_spin_locked check happy. */ 3758 * just to make the assert_spin_locked check happy. */
3553 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3759 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3554 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_EVENT_ENABLE); 3760 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3555 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_ENABLE); 3761 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3556 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_ENABLE); 3762 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3557 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3763 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3558 3764
3559 /* 3765 /*
@@ -3585,7 +3791,7 @@ static int i965_irq_postinstall(struct drm_device *dev)
3585 3791
3586static void i915_hpd_irq_setup(struct drm_device *dev) 3792static void i915_hpd_irq_setup(struct drm_device *dev)
3587{ 3793{
3588 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3794 struct drm_i915_private *dev_priv = dev->dev_private;
3589 struct drm_mode_config *mode_config = &dev->mode_config; 3795 struct drm_mode_config *mode_config = &dev->mode_config;
3590 struct intel_encoder *intel_encoder; 3796 struct intel_encoder *intel_encoder;
3591 u32 hotplug_en; 3797 u32 hotplug_en;
@@ -3617,25 +3823,21 @@ static void i915_hpd_irq_setup(struct drm_device *dev)
3617static irqreturn_t i965_irq_handler(int irq, void *arg) 3823static irqreturn_t i965_irq_handler(int irq, void *arg)
3618{ 3824{
3619 struct drm_device *dev = (struct drm_device *) arg; 3825 struct drm_device *dev = (struct drm_device *) arg;
3620 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3826 struct drm_i915_private *dev_priv = dev->dev_private;
3621 u32 iir, new_iir; 3827 u32 iir, new_iir;
3622 u32 pipe_stats[I915_MAX_PIPES]; 3828 u32 pipe_stats[I915_MAX_PIPES];
3623 unsigned long irqflags; 3829 unsigned long irqflags;
3624 int irq_received;
3625 int ret = IRQ_NONE, pipe; 3830 int ret = IRQ_NONE, pipe;
3626 u32 flip_mask = 3831 u32 flip_mask =
3627 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3832 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3628 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 3833 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3629 3834
3630 atomic_inc(&dev_priv->irq_received);
3631
3632 iir = I915_READ(IIR); 3835 iir = I915_READ(IIR);
3633 3836
3634 for (;;) { 3837 for (;;) {
3838 bool irq_received = (iir & ~flip_mask) != 0;
3635 bool blc_event = false; 3839 bool blc_event = false;
3636 3840
3637 irq_received = (iir & ~flip_mask) != 0;
3638
3639 /* Can't rely on pipestat interrupt bit in iir as it might 3841 /* Can't rely on pipestat interrupt bit in iir as it might
3640 * have been cleared after the pipestat interrupt was received. 3842 * have been cleared after the pipestat interrupt was received.
3641 * It doesn't set the bit in iir again, but it still produces 3843 * It doesn't set the bit in iir again, but it still produces
@@ -3643,7 +3845,9 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
3643 */ 3845 */
3644 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3846 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3645 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 3847 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3646 i915_handle_error(dev, false); 3848 i915_handle_error(dev, false,
3849 "Command parser error, iir 0x%08x",
3850 iir);
3647 3851
3648 for_each_pipe(pipe) { 3852 for_each_pipe(pipe) {
3649 int reg = PIPESTAT(pipe); 3853 int reg = PIPESTAT(pipe);
@@ -3653,11 +3857,8 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
3653 * Clear the PIPE*STAT regs before the IIR 3857 * Clear the PIPE*STAT regs before the IIR
3654 */ 3858 */
3655 if (pipe_stats[pipe] & 0x8000ffff) { 3859 if (pipe_stats[pipe] & 0x8000ffff) {
3656 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3657 DRM_DEBUG_DRIVER("pipe %c underrun\n",
3658 pipe_name(pipe));
3659 I915_WRITE(reg, pipe_stats[pipe]); 3860 I915_WRITE(reg, pipe_stats[pipe]);
3660 irq_received = 1; 3861 irq_received = true;
3661 } 3862 }
3662 } 3863 }
3663 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3864 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
@@ -3674,9 +3875,6 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
3674 HOTPLUG_INT_STATUS_G4X : 3875 HOTPLUG_INT_STATUS_G4X :
3675 HOTPLUG_INT_STATUS_I915); 3876 HOTPLUG_INT_STATUS_I915);
3676 3877
3677 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
3678 hotplug_status);
3679
3680 intel_hpd_irq_handler(dev, hotplug_trigger, 3878 intel_hpd_irq_handler(dev, hotplug_trigger,
3681 IS_G4X(dev) ? hpd_status_g4x : hpd_status_i915); 3879 IS_G4X(dev) ? hpd_status_g4x : hpd_status_i915);
3682 3880
@@ -3706,8 +3904,11 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
3706 3904
3707 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 3905 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
3708 i9xx_pipe_crc_irq_handler(dev, pipe); 3906 i9xx_pipe_crc_irq_handler(dev, pipe);
3709 }
3710 3907
3908 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
3909 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
3910 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
3911 }
3711 3912
3712 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 3913 if (blc_event || (iir & I915_ASLE_INTERRUPT))
3713 intel_opregion_asle_intr(dev); 3914 intel_opregion_asle_intr(dev);
@@ -3740,13 +3941,13 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
3740 3941
3741static void i965_irq_uninstall(struct drm_device * dev) 3942static void i965_irq_uninstall(struct drm_device * dev)
3742{ 3943{
3743 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3944 struct drm_i915_private *dev_priv = dev->dev_private;
3744 int pipe; 3945 int pipe;
3745 3946
3746 if (!dev_priv) 3947 if (!dev_priv)
3747 return; 3948 return;
3748 3949
3749 del_timer_sync(&dev_priv->hotplug_reenable_timer); 3950 intel_hpd_irq_uninstall(dev_priv);
3750 3951
3751 I915_WRITE(PORT_HOTPLUG_EN, 0); 3952 I915_WRITE(PORT_HOTPLUG_EN, 0);
3752 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3953 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
@@ -3763,9 +3964,9 @@ static void i965_irq_uninstall(struct drm_device * dev)
3763 I915_WRITE(IIR, I915_READ(IIR)); 3964 I915_WRITE(IIR, I915_READ(IIR));
3764} 3965}
3765 3966
3766static void i915_reenable_hotplug_timer_func(unsigned long data) 3967static void intel_hpd_irq_reenable(unsigned long data)
3767{ 3968{
3768 drm_i915_private_t *dev_priv = (drm_i915_private_t *)data; 3969 struct drm_i915_private *dev_priv = (struct drm_i915_private *)data;
3769 struct drm_device *dev = dev_priv->dev; 3970 struct drm_device *dev = dev_priv->dev;
3770 struct drm_mode_config *mode_config = &dev->mode_config; 3971 struct drm_mode_config *mode_config = &dev->mode_config;
3771 unsigned long irqflags; 3972 unsigned long irqflags;
@@ -3807,10 +4008,13 @@ void intel_irq_init(struct drm_device *dev)
3807 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); 4008 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
3808 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); 4009 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
3809 4010
4011 /* Let's track the enabled rps events */
4012 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
4013
3810 setup_timer(&dev_priv->gpu_error.hangcheck_timer, 4014 setup_timer(&dev_priv->gpu_error.hangcheck_timer,
3811 i915_hangcheck_elapsed, 4015 i915_hangcheck_elapsed,
3812 (unsigned long) dev); 4016 (unsigned long) dev);
3813 setup_timer(&dev_priv->hotplug_reenable_timer, i915_reenable_hotplug_timer_func, 4017 setup_timer(&dev_priv->hotplug_reenable_timer, intel_hpd_irq_reenable,
3814 (unsigned long) dev_priv); 4018 (unsigned long) dev_priv);
3815 4019
3816 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); 4020 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
@@ -3906,32 +4110,32 @@ void intel_hpd_init(struct drm_device *dev)
3906 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 4110 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3907} 4111}
3908 4112
3909/* Disable interrupts so we can allow Package C8+. */ 4113/* Disable interrupts so we can allow runtime PM. */
3910void hsw_pc8_disable_interrupts(struct drm_device *dev) 4114void hsw_runtime_pm_disable_interrupts(struct drm_device *dev)
3911{ 4115{
3912 struct drm_i915_private *dev_priv = dev->dev_private; 4116 struct drm_i915_private *dev_priv = dev->dev_private;
3913 unsigned long irqflags; 4117 unsigned long irqflags;
3914 4118
3915 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 4119 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3916 4120
3917 dev_priv->pc8.regsave.deimr = I915_READ(DEIMR); 4121 dev_priv->pm.regsave.deimr = I915_READ(DEIMR);
3918 dev_priv->pc8.regsave.sdeimr = I915_READ(SDEIMR); 4122 dev_priv->pm.regsave.sdeimr = I915_READ(SDEIMR);
3919 dev_priv->pc8.regsave.gtimr = I915_READ(GTIMR); 4123 dev_priv->pm.regsave.gtimr = I915_READ(GTIMR);
3920 dev_priv->pc8.regsave.gtier = I915_READ(GTIER); 4124 dev_priv->pm.regsave.gtier = I915_READ(GTIER);
3921 dev_priv->pc8.regsave.gen6_pmimr = I915_READ(GEN6_PMIMR); 4125 dev_priv->pm.regsave.gen6_pmimr = I915_READ(GEN6_PMIMR);
3922 4126
3923 ironlake_disable_display_irq(dev_priv, 0xffffffff); 4127 ironlake_disable_display_irq(dev_priv, 0xffffffff);
3924 ibx_disable_display_interrupt(dev_priv, 0xffffffff); 4128 ibx_disable_display_interrupt(dev_priv, 0xffffffff);
3925 ilk_disable_gt_irq(dev_priv, 0xffffffff); 4129 ilk_disable_gt_irq(dev_priv, 0xffffffff);
3926 snb_disable_pm_irq(dev_priv, 0xffffffff); 4130 snb_disable_pm_irq(dev_priv, 0xffffffff);
3927 4131
3928 dev_priv->pc8.irqs_disabled = true; 4132 dev_priv->pm.irqs_disabled = true;
3929 4133
3930 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 4134 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3931} 4135}
3932 4136
3933/* Restore interrupts so we can recover from Package C8+. */ 4137/* Restore interrupts so we can recover from runtime PM. */
3934void hsw_pc8_restore_interrupts(struct drm_device *dev) 4138void hsw_runtime_pm_restore_interrupts(struct drm_device *dev)
3935{ 4139{
3936 struct drm_i915_private *dev_priv = dev->dev_private; 4140 struct drm_i915_private *dev_priv = dev->dev_private;
3937 unsigned long irqflags; 4141 unsigned long irqflags;
@@ -3951,13 +4155,13 @@ void hsw_pc8_restore_interrupts(struct drm_device *dev)
3951 val = I915_READ(GEN6_PMIMR); 4155 val = I915_READ(GEN6_PMIMR);
3952 WARN(val != 0xffffffff, "GEN6_PMIMR is 0x%08x\n", val); 4156 WARN(val != 0xffffffff, "GEN6_PMIMR is 0x%08x\n", val);
3953 4157
3954 dev_priv->pc8.irqs_disabled = false; 4158 dev_priv->pm.irqs_disabled = false;
3955 4159
3956 ironlake_enable_display_irq(dev_priv, ~dev_priv->pc8.regsave.deimr); 4160 ironlake_enable_display_irq(dev_priv, ~dev_priv->pm.regsave.deimr);
3957 ibx_enable_display_interrupt(dev_priv, ~dev_priv->pc8.regsave.sdeimr); 4161 ibx_enable_display_interrupt(dev_priv, ~dev_priv->pm.regsave.sdeimr);
3958 ilk_enable_gt_irq(dev_priv, ~dev_priv->pc8.regsave.gtimr); 4162 ilk_enable_gt_irq(dev_priv, ~dev_priv->pm.regsave.gtimr);
3959 snb_enable_pm_irq(dev_priv, ~dev_priv->pc8.regsave.gen6_pmimr); 4163 snb_enable_pm_irq(dev_priv, ~dev_priv->pm.regsave.gen6_pmimr);
3960 I915_WRITE(GTIER, dev_priv->pc8.regsave.gtier); 4164 I915_WRITE(GTIER, dev_priv->pm.regsave.gtier);
3961 4165
3962 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 4166 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3963} 4167}
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
new file mode 100644
index 000000000000..d1d7980f0e01
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -0,0 +1,154 @@
1/*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the
6 * "Software"), to deal in the Software without restriction, including
7 * without limitation the rights to use, copy, modify, merge, publish,
8 * distribute, sub license, and/or sell copies of the Software, and to
9 * permit persons to whom the Software is furnished to do so, subject to
10 * the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the
13 * next paragraph) shall be included in all copies or substantial portions
14 * of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 */
24
25#include "i915_drv.h"
26
27struct i915_params i915 __read_mostly = {
28 .modeset = -1,
29 .panel_ignore_lid = 1,
30 .powersave = 1,
31 .semaphores = -1,
32 .lvds_downclock = 0,
33 .lvds_channel_mode = 0,
34 .panel_use_ssc = -1,
35 .vbt_sdvo_panel_type = -1,
36 .enable_rc6 = -1,
37 .enable_fbc = -1,
38 .enable_hangcheck = true,
39 .enable_ppgtt = -1,
40 .enable_psr = 0,
41 .preliminary_hw_support = IS_ENABLED(CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT),
42 .disable_power_well = 1,
43 .enable_ips = 1,
44 .fastboot = 0,
45 .prefault_disable = 0,
46 .reset = true,
47 .invert_brightness = 0,
48 .disable_display = 0,
49 .enable_cmd_parser = 0,
50};
51
52module_param_named(modeset, i915.modeset, int, 0400);
53MODULE_PARM_DESC(modeset,
54 "Use kernel modesetting [KMS] (0=DRM_I915_KMS from .config, "
55 "1=on, -1=force vga console preference [default])");
56
57module_param_named(panel_ignore_lid, i915.panel_ignore_lid, int, 0600);
58MODULE_PARM_DESC(panel_ignore_lid,
59 "Override lid status (0=autodetect, 1=autodetect disabled [default], "
60 "-1=force lid closed, -2=force lid open)");
61
62module_param_named(powersave, i915.powersave, int, 0600);
63MODULE_PARM_DESC(powersave,
64 "Enable powersavings, fbc, downclocking, etc. (default: true)");
65
66module_param_named(semaphores, i915.semaphores, int, 0400);
67MODULE_PARM_DESC(semaphores,
68 "Use semaphores for inter-ring sync "
69 "(default: -1 (use per-chip defaults))");
70
71module_param_named(enable_rc6, i915.enable_rc6, int, 0400);
72MODULE_PARM_DESC(enable_rc6,
73 "Enable power-saving render C-state 6. "
74 "Different stages can be selected via bitmask values "
75 "(0 = disable; 1 = enable rc6; 2 = enable deep rc6; 4 = enable deepest rc6). "
76 "For example, 3 would enable rc6 and deep rc6, and 7 would enable everything. "
77 "default: -1 (use per-chip default)");
78
79module_param_named(enable_fbc, i915.enable_fbc, int, 0600);
80MODULE_PARM_DESC(enable_fbc,
81 "Enable frame buffer compression for power savings "
82 "(default: -1 (use per-chip default))");
83
84module_param_named(lvds_downclock, i915.lvds_downclock, int, 0400);
85MODULE_PARM_DESC(lvds_downclock,
86 "Use panel (LVDS/eDP) downclocking for power savings "
87 "(default: false)");
88
89module_param_named(lvds_channel_mode, i915.lvds_channel_mode, int, 0600);
90MODULE_PARM_DESC(lvds_channel_mode,
91 "Specify LVDS channel mode "
92 "(0=probe BIOS [default], 1=single-channel, 2=dual-channel)");
93
94module_param_named(lvds_use_ssc, i915.panel_use_ssc, int, 0600);
95MODULE_PARM_DESC(lvds_use_ssc,
96 "Use Spread Spectrum Clock with panels [LVDS/eDP] "
97 "(default: auto from VBT)");
98
99module_param_named(vbt_sdvo_panel_type, i915.vbt_sdvo_panel_type, int, 0600);
100MODULE_PARM_DESC(vbt_sdvo_panel_type,
101 "Override/Ignore selection of SDVO panel mode in the VBT "
102 "(-2=ignore, -1=auto [default], index in VBT BIOS table)");
103
104module_param_named(reset, i915.reset, bool, 0600);
105MODULE_PARM_DESC(reset, "Attempt GPU resets (default: true)");
106
107module_param_named(enable_hangcheck, i915.enable_hangcheck, bool, 0644);
108MODULE_PARM_DESC(enable_hangcheck,
109 "Periodically check GPU activity for detecting hangs. "
110 "WARNING: Disabling this can cause system wide hangs. "
111 "(default: true)");
112
113module_param_named(enable_ppgtt, i915.enable_ppgtt, int, 0400);
114MODULE_PARM_DESC(enable_ppgtt,
115 "Override PPGTT usage. "
116 "(-1=auto [default], 0=disabled, 1=aliasing, 2=full)");
117
118module_param_named(enable_psr, i915.enable_psr, int, 0600);
119MODULE_PARM_DESC(enable_psr, "Enable PSR (default: false)");
120
121module_param_named(preliminary_hw_support, i915.preliminary_hw_support, int, 0600);
122MODULE_PARM_DESC(preliminary_hw_support,
123 "Enable preliminary hardware support.");
124
125module_param_named(disable_power_well, i915.disable_power_well, int, 0600);
126MODULE_PARM_DESC(disable_power_well,
127 "Disable the power well when possible (default: true)");
128
129module_param_named(enable_ips, i915.enable_ips, int, 0600);
130MODULE_PARM_DESC(enable_ips, "Enable IPS (default: true)");
131
132module_param_named(fastboot, i915.fastboot, bool, 0600);
133MODULE_PARM_DESC(fastboot,
134 "Try to skip unnecessary mode sets at boot time (default: false)");
135
136module_param_named(prefault_disable, i915.prefault_disable, bool, 0600);
137MODULE_PARM_DESC(prefault_disable,
138 "Disable page prefaulting for pread/pwrite/reloc (default:false). "
139 "For developers only.");
140
141module_param_named(invert_brightness, i915.invert_brightness, int, 0600);
142MODULE_PARM_DESC(invert_brightness,
143 "Invert backlight brightness "
144 "(-1 force normal, 0 machine defaults, 1 force inversion), please "
145 "report PCI device ID, subsystem vendor and subsystem device ID "
146 "to dri-devel@lists.freedesktop.org, if your machine needs it. "
147 "It will then be included in an upcoming module version.");
148
149module_param_named(disable_display, i915.disable_display, bool, 0600);
150MODULE_PARM_DESC(disable_display, "Disable display (default: false)");
151
152module_param_named(enable_cmd_parser, i915.enable_cmd_parser, int, 0600);
153MODULE_PARM_DESC(enable_cmd_parser,
154 "Enable command parsing (1=enabled, 0=disabled [default])");
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index a48b7cad6f11..9f5b18d9d885 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -26,7 +26,6 @@
26#define _I915_REG_H_ 26#define _I915_REG_H_
27 27
28#define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a))) 28#define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a)))
29#define _PIPE_INC(pipe, base, inc) ((base) + (pipe)*(inc))
30#define _TRANSCODER(tran, a, b) ((a) + (tran)*((b)-(a))) 29#define _TRANSCODER(tran, a, b) ((a) + (tran)*((b)-(a)))
31 30
32#define _PORT(port, a, b) ((a) + (port)*((b)-(a))) 31#define _PORT(port, a, b) ((a) + (port)*((b)-(a)))
@@ -73,7 +72,8 @@
73#define I915_GC_RENDER_CLOCK_166_MHZ (0 << 0) 72#define I915_GC_RENDER_CLOCK_166_MHZ (0 << 0)
74#define I915_GC_RENDER_CLOCK_200_MHZ (1 << 0) 73#define I915_GC_RENDER_CLOCK_200_MHZ (1 << 0)
75#define I915_GC_RENDER_CLOCK_333_MHZ (4 << 0) 74#define I915_GC_RENDER_CLOCK_333_MHZ (4 << 0)
76#define LBB 0xf4 75#define PCI_LBPC 0xf4 /* legacy/combination backlight modes, also called LBB */
76
77 77
78/* Graphics reset regs */ 78/* Graphics reset regs */
79#define I965_GDRST 0xc0 /* PCI config register */ 79#define I965_GDRST 0xc0 /* PCI config register */
@@ -175,6 +175,18 @@
175#define VGA_CR_DATA_CGA 0x3d5 175#define VGA_CR_DATA_CGA 0x3d5
176 176
177/* 177/*
178 * Instruction field definitions used by the command parser
179 */
180#define INSTR_CLIENT_SHIFT 29
181#define INSTR_CLIENT_MASK 0xE0000000
182#define INSTR_MI_CLIENT 0x0
183#define INSTR_BC_CLIENT 0x2
184#define INSTR_RC_CLIENT 0x3
185#define INSTR_SUBCLIENT_SHIFT 27
186#define INSTR_SUBCLIENT_MASK 0x18000000
187#define INSTR_MEDIA_SUBCLIENT 0x2
188
189/*
178 * Memory interface instructions used by the kernel 190 * Memory interface instructions used by the kernel
179 */ 191 */
180#define MI_INSTR(opcode, flags) (((opcode) << 23) | (flags)) 192#define MI_INSTR(opcode, flags) (((opcode) << 23) | (flags))
@@ -377,14 +389,30 @@
377#define DSPFREQSTAT_MASK (0x3 << DSPFREQSTAT_SHIFT) 389#define DSPFREQSTAT_MASK (0x3 << DSPFREQSTAT_SHIFT)
378#define DSPFREQGUAR_SHIFT 14 390#define DSPFREQGUAR_SHIFT 14
379#define DSPFREQGUAR_MASK (0x3 << DSPFREQGUAR_SHIFT) 391#define DSPFREQGUAR_MASK (0x3 << DSPFREQGUAR_SHIFT)
392
393/* See the PUNIT HAS v0.8 for the below bits */
394enum punit_power_well {
395 PUNIT_POWER_WELL_RENDER = 0,
396 PUNIT_POWER_WELL_MEDIA = 1,
397 PUNIT_POWER_WELL_DISP2D = 3,
398 PUNIT_POWER_WELL_DPIO_CMN_BC = 5,
399 PUNIT_POWER_WELL_DPIO_TX_B_LANES_01 = 6,
400 PUNIT_POWER_WELL_DPIO_TX_B_LANES_23 = 7,
401 PUNIT_POWER_WELL_DPIO_TX_C_LANES_01 = 8,
402 PUNIT_POWER_WELL_DPIO_TX_C_LANES_23 = 9,
403 PUNIT_POWER_WELL_DPIO_RX0 = 10,
404 PUNIT_POWER_WELL_DPIO_RX1 = 11,
405
406 PUNIT_POWER_WELL_NUM,
407};
408
380#define PUNIT_REG_PWRGT_CTRL 0x60 409#define PUNIT_REG_PWRGT_CTRL 0x60
381#define PUNIT_REG_PWRGT_STATUS 0x61 410#define PUNIT_REG_PWRGT_STATUS 0x61
382#define PUNIT_CLK_GATE 1 411#define PUNIT_PWRGT_MASK(power_well) (3 << ((power_well) * 2))
383#define PUNIT_PWR_RESET 2 412#define PUNIT_PWRGT_PWR_ON(power_well) (0 << ((power_well) * 2))
384#define PUNIT_PWR_GATE 3 413#define PUNIT_PWRGT_CLK_GATE(power_well) (1 << ((power_well) * 2))
385#define RENDER_PWRGT (PUNIT_PWR_GATE << 0) 414#define PUNIT_PWRGT_RESET(power_well) (2 << ((power_well) * 2))
386#define MEDIA_PWRGT (PUNIT_PWR_GATE << 2) 415#define PUNIT_PWRGT_PWR_GATE(power_well) (3 << ((power_well) * 2))
387#define DISP2D_PWRGT (PUNIT_PWR_GATE << 6)
388 416
389#define PUNIT_REG_GPU_LFM 0xd3 417#define PUNIT_REG_GPU_LFM 0xd3
390#define PUNIT_REG_GPU_FREQ_REQ 0xd4 418#define PUNIT_REG_GPU_FREQ_REQ 0xd4
@@ -678,6 +706,7 @@
678#define BLT_HWS_PGA_GEN7 (0x04280) 706#define BLT_HWS_PGA_GEN7 (0x04280)
679#define VEBOX_HWS_PGA_GEN7 (0x04380) 707#define VEBOX_HWS_PGA_GEN7 (0x04380)
680#define RING_ACTHD(base) ((base)+0x74) 708#define RING_ACTHD(base) ((base)+0x74)
709#define RING_ACTHD_UDW(base) ((base)+0x5c)
681#define RING_NOPID(base) ((base)+0x94) 710#define RING_NOPID(base) ((base)+0x94)
682#define RING_IMR(base) ((base)+0xa8) 711#define RING_IMR(base) ((base)+0xa8)
683#define RING_TIMESTAMP(base) ((base)+0x358) 712#define RING_TIMESTAMP(base) ((base)+0x358)
@@ -720,6 +749,7 @@
720#define RING_INSTPS(base) ((base)+0x70) 749#define RING_INSTPS(base) ((base)+0x70)
721#define RING_DMA_FADD(base) ((base)+0x78) 750#define RING_DMA_FADD(base) ((base)+0x78)
722#define RING_INSTPM(base) ((base)+0xc0) 751#define RING_INSTPM(base) ((base)+0xc0)
752#define RING_MI_MODE(base) ((base)+0x9c)
723#define INSTPS 0x02070 /* 965+ only */ 753#define INSTPS 0x02070 /* 965+ only */
724#define INSTDONE1 0x0207c /* 965+ only */ 754#define INSTDONE1 0x0207c /* 965+ only */
725#define ACTHD_I965 0x02074 755#define ACTHD_I965 0x02074
@@ -789,15 +819,22 @@
789#define _3D_CHICKEN3 0x02090 819#define _3D_CHICKEN3 0x02090
790#define _3D_CHICKEN_SF_DISABLE_OBJEND_CULL (1 << 10) 820#define _3D_CHICKEN_SF_DISABLE_OBJEND_CULL (1 << 10)
791#define _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL (1 << 5) 821#define _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL (1 << 5)
792#define _3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(x) ((x)<<1) 822#define _3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(x) ((x)<<1) /* gen8+ */
823#define _3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH (1 << 1) /* gen6 */
793 824
794#define MI_MODE 0x0209c 825#define MI_MODE 0x0209c
795# define VS_TIMER_DISPATCH (1 << 6) 826# define VS_TIMER_DISPATCH (1 << 6)
796# define MI_FLUSH_ENABLE (1 << 12) 827# define MI_FLUSH_ENABLE (1 << 12)
797# define ASYNC_FLIP_PERF_DISABLE (1 << 14) 828# define ASYNC_FLIP_PERF_DISABLE (1 << 14)
829# define MODE_IDLE (1 << 9)
798 830
799#define GEN6_GT_MODE 0x20d0 831#define GEN6_GT_MODE 0x20d0
800#define GEN6_GT_MODE_HI (1 << 9) 832#define GEN7_GT_MODE 0x7008
833#define GEN6_WIZ_HASHING(hi, lo) (((hi) << 9) | ((lo) << 7))
834#define GEN6_WIZ_HASHING_8x8 GEN6_WIZ_HASHING(0, 0)
835#define GEN6_WIZ_HASHING_8x4 GEN6_WIZ_HASHING(0, 1)
836#define GEN6_WIZ_HASHING_16x4 GEN6_WIZ_HASHING(1, 0)
837#define GEN6_WIZ_HASHING_MASK (GEN6_WIZ_HASHING(1, 1) << 16)
801#define GEN6_TD_FOUR_ROW_DISPATCH_DISABLE (1 << 5) 838#define GEN6_TD_FOUR_ROW_DISPATCH_DISABLE (1 << 5)
802 839
803#define GFX_MODE 0x02520 840#define GFX_MODE 0x02520
@@ -934,13 +971,19 @@
934#define ECO_GATING_CX_ONLY (1<<3) 971#define ECO_GATING_CX_ONLY (1<<3)
935#define ECO_FLIP_DONE (1<<0) 972#define ECO_FLIP_DONE (1<<0)
936 973
974#define CACHE_MODE_0_GEN7 0x7000 /* IVB+ */
975#define HIZ_RAW_STALL_OPT_DISABLE (1<<2)
937#define CACHE_MODE_1 0x7004 /* IVB+ */ 976#define CACHE_MODE_1 0x7004 /* IVB+ */
938#define PIXEL_SUBSPAN_COLLECT_OPT_DISABLE (1<<6) 977#define PIXEL_SUBSPAN_COLLECT_OPT_DISABLE (1<<6)
978#define GEN8_4x4_STC_OPTIMIZATION_DISABLE (1<<6)
939 979
940#define GEN6_BLITTER_ECOSKPD 0x221d0 980#define GEN6_BLITTER_ECOSKPD 0x221d0
941#define GEN6_BLITTER_LOCK_SHIFT 16 981#define GEN6_BLITTER_LOCK_SHIFT 16
942#define GEN6_BLITTER_FBC_NOTIFY (1<<3) 982#define GEN6_BLITTER_FBC_NOTIFY (1<<3)
943 983
984#define GEN6_RC_SLEEP_PSMI_CONTROL 0x2050
985#define GEN8_RC_SEMA_IDLE_MSG_DISABLE (1 << 12)
986
944#define GEN6_BSD_SLEEP_PSMI_CONTROL 0x12050 987#define GEN6_BSD_SLEEP_PSMI_CONTROL 0x12050
945#define GEN6_BSD_SLEEP_MSG_DISABLE (1 << 0) 988#define GEN6_BSD_SLEEP_MSG_DISABLE (1 << 0)
946#define GEN6_BSD_SLEEP_FLUSH_DISABLE (1 << 2) 989#define GEN6_BSD_SLEEP_FLUSH_DISABLE (1 << 2)
@@ -1046,9 +1089,8 @@
1046#define FBC_CTL_IDLE_LINE (2<<2) 1089#define FBC_CTL_IDLE_LINE (2<<2)
1047#define FBC_CTL_IDLE_DEBUG (3<<2) 1090#define FBC_CTL_IDLE_DEBUG (3<<2)
1048#define FBC_CTL_CPU_FENCE (1<<1) 1091#define FBC_CTL_CPU_FENCE (1<<1)
1049#define FBC_CTL_PLANEA (0<<0) 1092#define FBC_CTL_PLANE(plane) ((plane)<<0)
1050#define FBC_CTL_PLANEB (1<<0) 1093#define FBC_FENCE_OFF 0x03218 /* BSpec typo has 321Bh */
1051#define FBC_FENCE_OFF 0x0321b
1052#define FBC_TAG 0x03300 1094#define FBC_TAG 0x03300
1053 1095
1054#define FBC_LL_SIZE (1536) 1096#define FBC_LL_SIZE (1536)
@@ -1057,9 +1099,8 @@
1057#define DPFC_CB_BASE 0x3200 1099#define DPFC_CB_BASE 0x3200
1058#define DPFC_CONTROL 0x3208 1100#define DPFC_CONTROL 0x3208
1059#define DPFC_CTL_EN (1<<31) 1101#define DPFC_CTL_EN (1<<31)
1060#define DPFC_CTL_PLANEA (0<<30) 1102#define DPFC_CTL_PLANE(plane) ((plane)<<30)
1061#define DPFC_CTL_PLANEB (1<<30) 1103#define IVB_DPFC_CTL_PLANE(plane) ((plane)<<29)
1062#define IVB_DPFC_CTL_PLANE_SHIFT (29)
1063#define DPFC_CTL_FENCE_EN (1<<29) 1104#define DPFC_CTL_FENCE_EN (1<<29)
1064#define IVB_DPFC_CTL_FENCE_EN (1<<28) 1105#define IVB_DPFC_CTL_FENCE_EN (1<<28)
1065#define DPFC_CTL_PERSISTENT_MODE (1<<25) 1106#define DPFC_CTL_PERSISTENT_MODE (1<<25)
@@ -1120,13 +1161,6 @@
1120#define FBC_REND_NUKE (1<<2) 1161#define FBC_REND_NUKE (1<<2)
1121#define FBC_REND_CACHE_CLEAN (1<<1) 1162#define FBC_REND_CACHE_CLEAN (1<<1)
1122 1163
1123#define _HSW_PIPE_SLICE_CHICKEN_1_A 0x420B0
1124#define _HSW_PIPE_SLICE_CHICKEN_1_B 0x420B4
1125#define HSW_BYPASS_FBC_QUEUE (1<<22)
1126#define HSW_PIPE_SLICE_CHICKEN_1(pipe) _PIPE(pipe, + \
1127 _HSW_PIPE_SLICE_CHICKEN_1_A, + \
1128 _HSW_PIPE_SLICE_CHICKEN_1_B)
1129
1130/* 1164/*
1131 * GPIO regs 1165 * GPIO regs
1132 */ 1166 */
@@ -1202,6 +1236,10 @@
1202/* 1236/*
1203 * Clock control & power management 1237 * Clock control & power management
1204 */ 1238 */
1239#define DPLL_A_OFFSET 0x6014
1240#define DPLL_B_OFFSET 0x6018
1241#define DPLL(pipe) (dev_priv->info.dpll_offsets[pipe] + \
1242 dev_priv->info.display_mmio_offset)
1205 1243
1206#define VGA0 0x6000 1244#define VGA0 0x6000
1207#define VGA1 0x6004 1245#define VGA1 0x6004
@@ -1214,9 +1252,6 @@
1214#define VGA1_PD_P1_DIV_2 (1 << 13) 1252#define VGA1_PD_P1_DIV_2 (1 << 13)
1215#define VGA1_PD_P1_SHIFT 8 1253#define VGA1_PD_P1_SHIFT 8
1216#define VGA1_PD_P1_MASK (0x1f << 8) 1254#define VGA1_PD_P1_MASK (0x1f << 8)
1217#define _DPLL_A (dev_priv->info->display_mmio_offset + 0x6014)
1218#define _DPLL_B (dev_priv->info->display_mmio_offset + 0x6018)
1219#define DPLL(pipe) _PIPE(pipe, _DPLL_A, _DPLL_B)
1220#define DPLL_VCO_ENABLE (1 << 31) 1255#define DPLL_VCO_ENABLE (1 << 31)
1221#define DPLL_SDVO_HIGH_SPEED (1 << 30) 1256#define DPLL_SDVO_HIGH_SPEED (1 << 30)
1222#define DPLL_DVO_2X_MODE (1 << 30) 1257#define DPLL_DVO_2X_MODE (1 << 30)
@@ -1278,7 +1313,12 @@
1278#define SDVO_MULTIPLIER_MASK 0x000000ff 1313#define SDVO_MULTIPLIER_MASK 0x000000ff
1279#define SDVO_MULTIPLIER_SHIFT_HIRES 4 1314#define SDVO_MULTIPLIER_SHIFT_HIRES 4
1280#define SDVO_MULTIPLIER_SHIFT_VGA 0 1315#define SDVO_MULTIPLIER_SHIFT_VGA 0
1281#define _DPLL_A_MD (dev_priv->info->display_mmio_offset + 0x601c) /* 965+ only */ 1316
1317#define DPLL_A_MD_OFFSET 0x601c /* 965+ only */
1318#define DPLL_B_MD_OFFSET 0x6020 /* 965+ only */
1319#define DPLL_MD(pipe) (dev_priv->info.dpll_md_offsets[pipe] + \
1320 dev_priv->info.display_mmio_offset)
1321
1282/* 1322/*
1283 * UDI pixel divider, controlling how many pixels are stuffed into a packet. 1323 * UDI pixel divider, controlling how many pixels are stuffed into a packet.
1284 * 1324 *
@@ -1315,8 +1355,6 @@
1315 */ 1355 */
1316#define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f 1356#define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f
1317#define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0 1357#define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0
1318#define _DPLL_B_MD (dev_priv->info->display_mmio_offset + 0x6020) /* 965+ only */
1319#define DPLL_MD(pipe) _PIPE(pipe, _DPLL_A_MD, _DPLL_B_MD)
1320 1358
1321#define _FPA0 0x06040 1359#define _FPA0 0x06040
1322#define _FPA1 0x06044 1360#define _FPA1 0x06044
@@ -1348,7 +1386,7 @@
1348#define DSTATE_PLL_D3_OFF (1<<3) 1386#define DSTATE_PLL_D3_OFF (1<<3)
1349#define DSTATE_GFX_CLOCK_GATING (1<<1) 1387#define DSTATE_GFX_CLOCK_GATING (1<<1)
1350#define DSTATE_DOT_CLOCK_GATING (1<<0) 1388#define DSTATE_DOT_CLOCK_GATING (1<<0)
1351#define DSPCLK_GATE_D (dev_priv->info->display_mmio_offset + 0x6200) 1389#define DSPCLK_GATE_D (dev_priv->info.display_mmio_offset + 0x6200)
1352# define DPUNIT_B_CLOCK_GATE_DISABLE (1 << 30) /* 965 */ 1390# define DPUNIT_B_CLOCK_GATE_DISABLE (1 << 30) /* 965 */
1353# define VSUNIT_CLOCK_GATE_DISABLE (1 << 29) /* 965 */ 1391# define VSUNIT_CLOCK_GATE_DISABLE (1 << 29) /* 965 */
1354# define VRHUNIT_CLOCK_GATE_DISABLE (1 << 28) /* 965 */ 1392# define VRHUNIT_CLOCK_GATE_DISABLE (1 << 28) /* 965 */
@@ -1472,10 +1510,10 @@
1472/* 1510/*
1473 * Palette regs 1511 * Palette regs
1474 */ 1512 */
1475 1513#define PALETTE_A_OFFSET 0xa000
1476#define _PALETTE_A (dev_priv->info->display_mmio_offset + 0xa000) 1514#define PALETTE_B_OFFSET 0xa800
1477#define _PALETTE_B (dev_priv->info->display_mmio_offset + 0xa800) 1515#define PALETTE(pipe) (dev_priv->info.palette_offsets[pipe] + \
1478#define PALETTE(pipe) _PIPE(pipe, _PALETTE_A, _PALETTE_B) 1516 dev_priv->info.display_mmio_offset)
1479 1517
1480/* MCH MMIO space */ 1518/* MCH MMIO space */
1481 1519
@@ -1862,7 +1900,7 @@
1862 */ 1900 */
1863 1901
1864/* Pipe A CRC regs */ 1902/* Pipe A CRC regs */
1865#define _PIPE_CRC_CTL_A (dev_priv->info->display_mmio_offset + 0x60050) 1903#define _PIPE_CRC_CTL_A 0x60050
1866#define PIPE_CRC_ENABLE (1 << 31) 1904#define PIPE_CRC_ENABLE (1 << 31)
1867/* ivb+ source selection */ 1905/* ivb+ source selection */
1868#define PIPE_CRC_SOURCE_PRIMARY_IVB (0 << 29) 1906#define PIPE_CRC_SOURCE_PRIMARY_IVB (0 << 29)
@@ -1902,11 +1940,11 @@
1902#define _PIPE_CRC_RES_4_A_IVB 0x60070 1940#define _PIPE_CRC_RES_4_A_IVB 0x60070
1903#define _PIPE_CRC_RES_5_A_IVB 0x60074 1941#define _PIPE_CRC_RES_5_A_IVB 0x60074
1904 1942
1905#define _PIPE_CRC_RES_RED_A (dev_priv->info->display_mmio_offset + 0x60060) 1943#define _PIPE_CRC_RES_RED_A 0x60060
1906#define _PIPE_CRC_RES_GREEN_A (dev_priv->info->display_mmio_offset + 0x60064) 1944#define _PIPE_CRC_RES_GREEN_A 0x60064
1907#define _PIPE_CRC_RES_BLUE_A (dev_priv->info->display_mmio_offset + 0x60068) 1945#define _PIPE_CRC_RES_BLUE_A 0x60068
1908#define _PIPE_CRC_RES_RES1_A_I915 (dev_priv->info->display_mmio_offset + 0x6006c) 1946#define _PIPE_CRC_RES_RES1_A_I915 0x6006c
1909#define _PIPE_CRC_RES_RES2_A_G4X (dev_priv->info->display_mmio_offset + 0x60080) 1947#define _PIPE_CRC_RES_RES2_A_G4X 0x60080
1910 1948
1911/* Pipe B CRC regs */ 1949/* Pipe B CRC regs */
1912#define _PIPE_CRC_RES_1_B_IVB 0x61064 1950#define _PIPE_CRC_RES_1_B_IVB 0x61064
@@ -1915,59 +1953,69 @@
1915#define _PIPE_CRC_RES_4_B_IVB 0x61070 1953#define _PIPE_CRC_RES_4_B_IVB 0x61070
1916#define _PIPE_CRC_RES_5_B_IVB 0x61074 1954#define _PIPE_CRC_RES_5_B_IVB 0x61074
1917 1955
1918#define PIPE_CRC_CTL(pipe) _PIPE_INC(pipe, _PIPE_CRC_CTL_A, 0x01000) 1956#define PIPE_CRC_CTL(pipe) _TRANSCODER2(pipe, _PIPE_CRC_CTL_A)
1919#define PIPE_CRC_RES_1_IVB(pipe) \ 1957#define PIPE_CRC_RES_1_IVB(pipe) \
1920 _PIPE(pipe, _PIPE_CRC_RES_1_A_IVB, _PIPE_CRC_RES_1_B_IVB) 1958 _TRANSCODER2(pipe, _PIPE_CRC_RES_1_A_IVB)
1921#define PIPE_CRC_RES_2_IVB(pipe) \ 1959#define PIPE_CRC_RES_2_IVB(pipe) \
1922 _PIPE(pipe, _PIPE_CRC_RES_2_A_IVB, _PIPE_CRC_RES_2_B_IVB) 1960 _TRANSCODER2(pipe, _PIPE_CRC_RES_2_A_IVB)
1923#define PIPE_CRC_RES_3_IVB(pipe) \ 1961#define PIPE_CRC_RES_3_IVB(pipe) \
1924 _PIPE(pipe, _PIPE_CRC_RES_3_A_IVB, _PIPE_CRC_RES_3_B_IVB) 1962 _TRANSCODER2(pipe, _PIPE_CRC_RES_3_A_IVB)
1925#define PIPE_CRC_RES_4_IVB(pipe) \ 1963#define PIPE_CRC_RES_4_IVB(pipe) \
1926 _PIPE(pipe, _PIPE_CRC_RES_4_A_IVB, _PIPE_CRC_RES_4_B_IVB) 1964 _TRANSCODER2(pipe, _PIPE_CRC_RES_4_A_IVB)
1927#define PIPE_CRC_RES_5_IVB(pipe) \ 1965#define PIPE_CRC_RES_5_IVB(pipe) \
1928 _PIPE(pipe, _PIPE_CRC_RES_5_A_IVB, _PIPE_CRC_RES_5_B_IVB) 1966 _TRANSCODER2(pipe, _PIPE_CRC_RES_5_A_IVB)
1929 1967
1930#define PIPE_CRC_RES_RED(pipe) \ 1968#define PIPE_CRC_RES_RED(pipe) \
1931 _PIPE_INC(pipe, _PIPE_CRC_RES_RED_A, 0x01000) 1969 _TRANSCODER2(pipe, _PIPE_CRC_RES_RED_A)
1932#define PIPE_CRC_RES_GREEN(pipe) \ 1970#define PIPE_CRC_RES_GREEN(pipe) \
1933 _PIPE_INC(pipe, _PIPE_CRC_RES_GREEN_A, 0x01000) 1971 _TRANSCODER2(pipe, _PIPE_CRC_RES_GREEN_A)
1934#define PIPE_CRC_RES_BLUE(pipe) \ 1972#define PIPE_CRC_RES_BLUE(pipe) \
1935 _PIPE_INC(pipe, _PIPE_CRC_RES_BLUE_A, 0x01000) 1973 _TRANSCODER2(pipe, _PIPE_CRC_RES_BLUE_A)
1936#define PIPE_CRC_RES_RES1_I915(pipe) \ 1974#define PIPE_CRC_RES_RES1_I915(pipe) \
1937 _PIPE_INC(pipe, _PIPE_CRC_RES_RES1_A_I915, 0x01000) 1975 _TRANSCODER2(pipe, _PIPE_CRC_RES_RES1_A_I915)
1938#define PIPE_CRC_RES_RES2_G4X(pipe) \ 1976#define PIPE_CRC_RES_RES2_G4X(pipe) \
1939 _PIPE_INC(pipe, _PIPE_CRC_RES_RES2_A_G4X, 0x01000) 1977 _TRANSCODER2(pipe, _PIPE_CRC_RES_RES2_A_G4X)
1940 1978
1941/* Pipe A timing regs */ 1979/* Pipe A timing regs */
1942#define _HTOTAL_A (dev_priv->info->display_mmio_offset + 0x60000) 1980#define _HTOTAL_A 0x60000
1943#define _HBLANK_A (dev_priv->info->display_mmio_offset + 0x60004) 1981#define _HBLANK_A 0x60004
1944#define _HSYNC_A (dev_priv->info->display_mmio_offset + 0x60008) 1982#define _HSYNC_A 0x60008
1945#define _VTOTAL_A (dev_priv->info->display_mmio_offset + 0x6000c) 1983#define _VTOTAL_A 0x6000c
1946#define _VBLANK_A (dev_priv->info->display_mmio_offset + 0x60010) 1984#define _VBLANK_A 0x60010
1947#define _VSYNC_A (dev_priv->info->display_mmio_offset + 0x60014) 1985#define _VSYNC_A 0x60014
1948#define _PIPEASRC (dev_priv->info->display_mmio_offset + 0x6001c) 1986#define _PIPEASRC 0x6001c
1949#define _BCLRPAT_A (dev_priv->info->display_mmio_offset + 0x60020) 1987#define _BCLRPAT_A 0x60020
1950#define _VSYNCSHIFT_A (dev_priv->info->display_mmio_offset + 0x60028) 1988#define _VSYNCSHIFT_A 0x60028
1951 1989
1952/* Pipe B timing regs */ 1990/* Pipe B timing regs */
1953#define _HTOTAL_B (dev_priv->info->display_mmio_offset + 0x61000) 1991#define _HTOTAL_B 0x61000
1954#define _HBLANK_B (dev_priv->info->display_mmio_offset + 0x61004) 1992#define _HBLANK_B 0x61004
1955#define _HSYNC_B (dev_priv->info->display_mmio_offset + 0x61008) 1993#define _HSYNC_B 0x61008
1956#define _VTOTAL_B (dev_priv->info->display_mmio_offset + 0x6100c) 1994#define _VTOTAL_B 0x6100c
1957#define _VBLANK_B (dev_priv->info->display_mmio_offset + 0x61010) 1995#define _VBLANK_B 0x61010
1958#define _VSYNC_B (dev_priv->info->display_mmio_offset + 0x61014) 1996#define _VSYNC_B 0x61014
1959#define _PIPEBSRC (dev_priv->info->display_mmio_offset + 0x6101c) 1997#define _PIPEBSRC 0x6101c
1960#define _BCLRPAT_B (dev_priv->info->display_mmio_offset + 0x61020) 1998#define _BCLRPAT_B 0x61020
1961#define _VSYNCSHIFT_B (dev_priv->info->display_mmio_offset + 0x61028) 1999#define _VSYNCSHIFT_B 0x61028
1962 2000
1963#define HTOTAL(trans) _TRANSCODER(trans, _HTOTAL_A, _HTOTAL_B) 2001#define TRANSCODER_A_OFFSET 0x60000
1964#define HBLANK(trans) _TRANSCODER(trans, _HBLANK_A, _HBLANK_B) 2002#define TRANSCODER_B_OFFSET 0x61000
1965#define HSYNC(trans) _TRANSCODER(trans, _HSYNC_A, _HSYNC_B) 2003#define TRANSCODER_C_OFFSET 0x62000
1966#define VTOTAL(trans) _TRANSCODER(trans, _VTOTAL_A, _VTOTAL_B) 2004#define TRANSCODER_EDP_OFFSET 0x6f000
1967#define VBLANK(trans) _TRANSCODER(trans, _VBLANK_A, _VBLANK_B) 2005
1968#define VSYNC(trans) _TRANSCODER(trans, _VSYNC_A, _VSYNC_B) 2006#define _TRANSCODER2(pipe, reg) (dev_priv->info.trans_offsets[(pipe)] - \
1969#define BCLRPAT(pipe) _PIPE(pipe, _BCLRPAT_A, _BCLRPAT_B) 2007 dev_priv->info.trans_offsets[TRANSCODER_A] + (reg) + \
1970#define VSYNCSHIFT(trans) _TRANSCODER(trans, _VSYNCSHIFT_A, _VSYNCSHIFT_B) 2008 dev_priv->info.display_mmio_offset)
2009
2010#define HTOTAL(trans) _TRANSCODER2(trans, _HTOTAL_A)
2011#define HBLANK(trans) _TRANSCODER2(trans, _HBLANK_A)
2012#define HSYNC(trans) _TRANSCODER2(trans, _HSYNC_A)
2013#define VTOTAL(trans) _TRANSCODER2(trans, _VTOTAL_A)
2014#define VBLANK(trans) _TRANSCODER2(trans, _VBLANK_A)
2015#define VSYNC(trans) _TRANSCODER2(trans, _VSYNC_A)
2016#define BCLRPAT(trans) _TRANSCODER2(trans, _BCLRPAT_A)
2017#define VSYNCSHIFT(trans) _TRANSCODER2(trans, _VSYNCSHIFT_A)
2018#define PIPESRC(trans) _TRANSCODER2(trans, _PIPEASRC)
1971 2019
1972/* HSW+ eDP PSR registers */ 2020/* HSW+ eDP PSR registers */
1973#define EDP_PSR_BASE(dev) (IS_HASWELL(dev) ? 0x64800 : 0x6f800) 2021#define EDP_PSR_BASE(dev) (IS_HASWELL(dev) ? 0x64800 : 0x6f800)
@@ -2084,7 +2132,7 @@
2084 2132
2085 2133
2086/* Hotplug control (945+ only) */ 2134/* Hotplug control (945+ only) */
2087#define PORT_HOTPLUG_EN (dev_priv->info->display_mmio_offset + 0x61110) 2135#define PORT_HOTPLUG_EN (dev_priv->info.display_mmio_offset + 0x61110)
2088#define PORTB_HOTPLUG_INT_EN (1 << 29) 2136#define PORTB_HOTPLUG_INT_EN (1 << 29)
2089#define PORTC_HOTPLUG_INT_EN (1 << 28) 2137#define PORTC_HOTPLUG_INT_EN (1 << 28)
2090#define PORTD_HOTPLUG_INT_EN (1 << 27) 2138#define PORTD_HOTPLUG_INT_EN (1 << 27)
@@ -2114,7 +2162,7 @@
2114#define CRT_HOTPLUG_DETECT_VOLTAGE_325MV (0 << 2) 2162#define CRT_HOTPLUG_DETECT_VOLTAGE_325MV (0 << 2)
2115#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2) 2163#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2)
2116 2164
2117#define PORT_HOTPLUG_STAT (dev_priv->info->display_mmio_offset + 0x61114) 2165#define PORT_HOTPLUG_STAT (dev_priv->info.display_mmio_offset + 0x61114)
2118/* 2166/*
2119 * HDMI/DP bits are gen4+ 2167 * HDMI/DP bits are gen4+
2120 * 2168 *
@@ -2332,9 +2380,7 @@
2332#define VIDEO_DIP_CTL 0x61170 2380#define VIDEO_DIP_CTL 0x61170
2333/* Pre HSW: */ 2381/* Pre HSW: */
2334#define VIDEO_DIP_ENABLE (1 << 31) 2382#define VIDEO_DIP_ENABLE (1 << 31)
2335#define VIDEO_DIP_PORT_B (1 << 29) 2383#define VIDEO_DIP_PORT(port) ((port) << 29)
2336#define VIDEO_DIP_PORT_C (2 << 29)
2337#define VIDEO_DIP_PORT_D (3 << 29)
2338#define VIDEO_DIP_PORT_MASK (3 << 29) 2384#define VIDEO_DIP_PORT_MASK (3 << 29)
2339#define VIDEO_DIP_ENABLE_GCP (1 << 25) 2385#define VIDEO_DIP_ENABLE_GCP (1 << 25)
2340#define VIDEO_DIP_ENABLE_AVI (1 << 21) 2386#define VIDEO_DIP_ENABLE_AVI (1 << 21)
@@ -2391,7 +2437,7 @@
2391#define PP_DIVISOR 0x61210 2437#define PP_DIVISOR 0x61210
2392 2438
2393/* Panel fitting */ 2439/* Panel fitting */
2394#define PFIT_CONTROL (dev_priv->info->display_mmio_offset + 0x61230) 2440#define PFIT_CONTROL (dev_priv->info.display_mmio_offset + 0x61230)
2395#define PFIT_ENABLE (1 << 31) 2441#define PFIT_ENABLE (1 << 31)
2396#define PFIT_PIPE_MASK (3 << 29) 2442#define PFIT_PIPE_MASK (3 << 29)
2397#define PFIT_PIPE_SHIFT 29 2443#define PFIT_PIPE_SHIFT 29
@@ -2409,7 +2455,7 @@
2409#define PFIT_SCALING_PROGRAMMED (1 << 26) 2455#define PFIT_SCALING_PROGRAMMED (1 << 26)
2410#define PFIT_SCALING_PILLAR (2 << 26) 2456#define PFIT_SCALING_PILLAR (2 << 26)
2411#define PFIT_SCALING_LETTER (3 << 26) 2457#define PFIT_SCALING_LETTER (3 << 26)
2412#define PFIT_PGM_RATIOS (dev_priv->info->display_mmio_offset + 0x61234) 2458#define PFIT_PGM_RATIOS (dev_priv->info.display_mmio_offset + 0x61234)
2413/* Pre-965 */ 2459/* Pre-965 */
2414#define PFIT_VERT_SCALE_SHIFT 20 2460#define PFIT_VERT_SCALE_SHIFT 20
2415#define PFIT_VERT_SCALE_MASK 0xfff00000 2461#define PFIT_VERT_SCALE_MASK 0xfff00000
@@ -2421,25 +2467,25 @@
2421#define PFIT_HORIZ_SCALE_SHIFT_965 0 2467#define PFIT_HORIZ_SCALE_SHIFT_965 0
2422#define PFIT_HORIZ_SCALE_MASK_965 0x00001fff 2468#define PFIT_HORIZ_SCALE_MASK_965 0x00001fff
2423 2469
2424#define PFIT_AUTO_RATIOS (dev_priv->info->display_mmio_offset + 0x61238) 2470#define PFIT_AUTO_RATIOS (dev_priv->info.display_mmio_offset + 0x61238)
2425 2471
2426#define _VLV_BLC_PWM_CTL2_A (dev_priv->info->display_mmio_offset + 0x61250) 2472#define _VLV_BLC_PWM_CTL2_A (dev_priv->info.display_mmio_offset + 0x61250)
2427#define _VLV_BLC_PWM_CTL2_B (dev_priv->info->display_mmio_offset + 0x61350) 2473#define _VLV_BLC_PWM_CTL2_B (dev_priv->info.display_mmio_offset + 0x61350)
2428#define VLV_BLC_PWM_CTL2(pipe) _PIPE(pipe, _VLV_BLC_PWM_CTL2_A, \ 2474#define VLV_BLC_PWM_CTL2(pipe) _PIPE(pipe, _VLV_BLC_PWM_CTL2_A, \
2429 _VLV_BLC_PWM_CTL2_B) 2475 _VLV_BLC_PWM_CTL2_B)
2430 2476
2431#define _VLV_BLC_PWM_CTL_A (dev_priv->info->display_mmio_offset + 0x61254) 2477#define _VLV_BLC_PWM_CTL_A (dev_priv->info.display_mmio_offset + 0x61254)
2432#define _VLV_BLC_PWM_CTL_B (dev_priv->info->display_mmio_offset + 0x61354) 2478#define _VLV_BLC_PWM_CTL_B (dev_priv->info.display_mmio_offset + 0x61354)
2433#define VLV_BLC_PWM_CTL(pipe) _PIPE(pipe, _VLV_BLC_PWM_CTL_A, \ 2479#define VLV_BLC_PWM_CTL(pipe) _PIPE(pipe, _VLV_BLC_PWM_CTL_A, \
2434 _VLV_BLC_PWM_CTL_B) 2480 _VLV_BLC_PWM_CTL_B)
2435 2481
2436#define _VLV_BLC_HIST_CTL_A (dev_priv->info->display_mmio_offset + 0x61260) 2482#define _VLV_BLC_HIST_CTL_A (dev_priv->info.display_mmio_offset + 0x61260)
2437#define _VLV_BLC_HIST_CTL_B (dev_priv->info->display_mmio_offset + 0x61360) 2483#define _VLV_BLC_HIST_CTL_B (dev_priv->info.display_mmio_offset + 0x61360)
2438#define VLV_BLC_HIST_CTL(pipe) _PIPE(pipe, _VLV_BLC_HIST_CTL_A, \ 2484#define VLV_BLC_HIST_CTL(pipe) _PIPE(pipe, _VLV_BLC_HIST_CTL_A, \
2439 _VLV_BLC_HIST_CTL_B) 2485 _VLV_BLC_HIST_CTL_B)
2440 2486
2441/* Backlight control */ 2487/* Backlight control */
2442#define BLC_PWM_CTL2 (dev_priv->info->display_mmio_offset + 0x61250) /* 965+ only */ 2488#define BLC_PWM_CTL2 (dev_priv->info.display_mmio_offset + 0x61250) /* 965+ only */
2443#define BLM_PWM_ENABLE (1 << 31) 2489#define BLM_PWM_ENABLE (1 << 31)
2444#define BLM_COMBINATION_MODE (1 << 30) /* gen4 only */ 2490#define BLM_COMBINATION_MODE (1 << 30) /* gen4 only */
2445#define BLM_PIPE_SELECT (1 << 29) 2491#define BLM_PIPE_SELECT (1 << 29)
@@ -2462,7 +2508,7 @@
2462#define BLM_PHASE_IN_COUNT_MASK (0xff << 8) 2508#define BLM_PHASE_IN_COUNT_MASK (0xff << 8)
2463#define BLM_PHASE_IN_INCR_SHIFT (0) 2509#define BLM_PHASE_IN_INCR_SHIFT (0)
2464#define BLM_PHASE_IN_INCR_MASK (0xff << 0) 2510#define BLM_PHASE_IN_INCR_MASK (0xff << 0)
2465#define BLC_PWM_CTL (dev_priv->info->display_mmio_offset + 0x61254) 2511#define BLC_PWM_CTL (dev_priv->info.display_mmio_offset + 0x61254)
2466/* 2512/*
2467 * This is the most significant 15 bits of the number of backlight cycles in a 2513 * This is the most significant 15 bits of the number of backlight cycles in a
2468 * complete cycle of the modulated backlight control. 2514 * complete cycle of the modulated backlight control.
@@ -2484,7 +2530,7 @@
2484#define BACKLIGHT_DUTY_CYCLE_MASK_PNV (0xfffe) 2530#define BACKLIGHT_DUTY_CYCLE_MASK_PNV (0xfffe)
2485#define BLM_POLARITY_PNV (1 << 0) /* pnv only */ 2531#define BLM_POLARITY_PNV (1 << 0) /* pnv only */
2486 2532
2487#define BLC_HIST_CTL (dev_priv->info->display_mmio_offset + 0x61260) 2533#define BLC_HIST_CTL (dev_priv->info.display_mmio_offset + 0x61260)
2488 2534
2489/* New registers for PCH-split platforms. Safe where new bits show up, the 2535/* New registers for PCH-split platforms. Safe where new bits show up, the
2490 * register layout machtes with gen4 BLC_PWM_CTL[12]. */ 2536 * register layout machtes with gen4 BLC_PWM_CTL[12]. */
@@ -3178,10 +3224,10 @@
3178/* Display & cursor control */ 3224/* Display & cursor control */
3179 3225
3180/* Pipe A */ 3226/* Pipe A */
3181#define _PIPEADSL (dev_priv->info->display_mmio_offset + 0x70000) 3227#define _PIPEADSL 0x70000
3182#define DSL_LINEMASK_GEN2 0x00000fff 3228#define DSL_LINEMASK_GEN2 0x00000fff
3183#define DSL_LINEMASK_GEN3 0x00001fff 3229#define DSL_LINEMASK_GEN3 0x00001fff
3184#define _PIPEACONF (dev_priv->info->display_mmio_offset + 0x70008) 3230#define _PIPEACONF 0x70008
3185#define PIPECONF_ENABLE (1<<31) 3231#define PIPECONF_ENABLE (1<<31)
3186#define PIPECONF_DISABLE 0 3232#define PIPECONF_DISABLE 0
3187#define PIPECONF_DOUBLE_WIDE (1<<30) 3233#define PIPECONF_DOUBLE_WIDE (1<<30)
@@ -3224,9 +3270,9 @@
3224#define PIPECONF_DITHER_TYPE_ST1 (1<<2) 3270#define PIPECONF_DITHER_TYPE_ST1 (1<<2)
3225#define PIPECONF_DITHER_TYPE_ST2 (2<<2) 3271#define PIPECONF_DITHER_TYPE_ST2 (2<<2)
3226#define PIPECONF_DITHER_TYPE_TEMP (3<<2) 3272#define PIPECONF_DITHER_TYPE_TEMP (3<<2)
3227#define _PIPEASTAT (dev_priv->info->display_mmio_offset + 0x70024) 3273#define _PIPEASTAT 0x70024
3228#define PIPE_FIFO_UNDERRUN_STATUS (1UL<<31) 3274#define PIPE_FIFO_UNDERRUN_STATUS (1UL<<31)
3229#define SPRITE1_FLIPDONE_INT_EN_VLV (1UL<<30) 3275#define SPRITE1_FLIP_DONE_INT_EN_VLV (1UL<<30)
3230#define PIPE_CRC_ERROR_ENABLE (1UL<<29) 3276#define PIPE_CRC_ERROR_ENABLE (1UL<<29)
3231#define PIPE_CRC_DONE_ENABLE (1UL<<28) 3277#define PIPE_CRC_DONE_ENABLE (1UL<<28)
3232#define PIPE_GMBUS_EVENT_ENABLE (1UL<<27) 3278#define PIPE_GMBUS_EVENT_ENABLE (1UL<<27)
@@ -3239,35 +3285,55 @@
3239#define PIPE_LEGACY_BLC_EVENT_ENABLE (1UL<<22) 3285#define PIPE_LEGACY_BLC_EVENT_ENABLE (1UL<<22)
3240#define PIPE_ODD_FIELD_INTERRUPT_ENABLE (1UL<<21) 3286#define PIPE_ODD_FIELD_INTERRUPT_ENABLE (1UL<<21)
3241#define PIPE_EVEN_FIELD_INTERRUPT_ENABLE (1UL<<20) 3287#define PIPE_EVEN_FIELD_INTERRUPT_ENABLE (1UL<<20)
3288#define PIPE_B_PSR_INTERRUPT_ENABLE_VLV (1UL<<19)
3242#define PIPE_HOTPLUG_TV_INTERRUPT_ENABLE (1UL<<18) /* pre-965 */ 3289#define PIPE_HOTPLUG_TV_INTERRUPT_ENABLE (1UL<<18) /* pre-965 */
3243#define PIPE_START_VBLANK_INTERRUPT_ENABLE (1UL<<18) /* 965 or later */ 3290#define PIPE_START_VBLANK_INTERRUPT_ENABLE (1UL<<18) /* 965 or later */
3244#define PIPE_VBLANK_INTERRUPT_ENABLE (1UL<<17) 3291#define PIPE_VBLANK_INTERRUPT_ENABLE (1UL<<17)
3245#define PIPEA_HBLANK_INT_EN_VLV (1UL<<16) 3292#define PIPEA_HBLANK_INT_EN_VLV (1UL<<16)
3246#define PIPE_OVERLAY_UPDATED_ENABLE (1UL<<16) 3293#define PIPE_OVERLAY_UPDATED_ENABLE (1UL<<16)
3247#define SPRITE1_FLIPDONE_INT_STATUS_VLV (1UL<<15) 3294#define SPRITE1_FLIP_DONE_INT_STATUS_VLV (1UL<<15)
3248#define SPRITE0_FLIPDONE_INT_STATUS_VLV (1UL<<14) 3295#define SPRITE0_FLIP_DONE_INT_STATUS_VLV (1UL<<14)
3249#define PIPE_CRC_ERROR_INTERRUPT_STATUS (1UL<<13) 3296#define PIPE_CRC_ERROR_INTERRUPT_STATUS (1UL<<13)
3250#define PIPE_CRC_DONE_INTERRUPT_STATUS (1UL<<12) 3297#define PIPE_CRC_DONE_INTERRUPT_STATUS (1UL<<12)
3251#define PIPE_GMBUS_INTERRUPT_STATUS (1UL<<11) 3298#define PIPE_GMBUS_INTERRUPT_STATUS (1UL<<11)
3252#define PLANE_FLIPDONE_INT_STATUS_VLV (1UL<<10) 3299#define PLANE_FLIP_DONE_INT_STATUS_VLV (1UL<<10)
3253#define PIPE_HOTPLUG_INTERRUPT_STATUS (1UL<<10) 3300#define PIPE_HOTPLUG_INTERRUPT_STATUS (1UL<<10)
3254#define PIPE_VSYNC_INTERRUPT_STATUS (1UL<<9) 3301#define PIPE_VSYNC_INTERRUPT_STATUS (1UL<<9)
3255#define PIPE_DISPLAY_LINE_COMPARE_STATUS (1UL<<8) 3302#define PIPE_DISPLAY_LINE_COMPARE_STATUS (1UL<<8)
3256#define PIPE_DPST_EVENT_STATUS (1UL<<7) 3303#define PIPE_DPST_EVENT_STATUS (1UL<<7)
3257#define PIPE_LEGACY_BLC_EVENT_STATUS (1UL<<6) 3304#define PIPE_LEGACY_BLC_EVENT_STATUS (1UL<<6)
3305#define PIPE_A_PSR_STATUS_VLV (1UL<<6)
3258#define PIPE_ODD_FIELD_INTERRUPT_STATUS (1UL<<5) 3306#define PIPE_ODD_FIELD_INTERRUPT_STATUS (1UL<<5)
3259#define PIPE_EVEN_FIELD_INTERRUPT_STATUS (1UL<<4) 3307#define PIPE_EVEN_FIELD_INTERRUPT_STATUS (1UL<<4)
3308#define PIPE_B_PSR_STATUS_VLV (1UL<<3)
3260#define PIPE_HOTPLUG_TV_INTERRUPT_STATUS (1UL<<2) /* pre-965 */ 3309#define PIPE_HOTPLUG_TV_INTERRUPT_STATUS (1UL<<2) /* pre-965 */
3261#define PIPE_START_VBLANK_INTERRUPT_STATUS (1UL<<2) /* 965 or later */ 3310#define PIPE_START_VBLANK_INTERRUPT_STATUS (1UL<<2) /* 965 or later */
3262#define PIPE_VBLANK_INTERRUPT_STATUS (1UL<<1) 3311#define PIPE_VBLANK_INTERRUPT_STATUS (1UL<<1)
3263#define PIPE_OVERLAY_UPDATED_STATUS (1UL<<0) 3312#define PIPE_OVERLAY_UPDATED_STATUS (1UL<<0)
3264 3313
3265#define PIPESRC(pipe) _PIPE(pipe, _PIPEASRC, _PIPEBSRC) 3314#define PIPESTAT_INT_ENABLE_MASK 0x7fff0000
3266#define PIPECONF(tran) _TRANSCODER(tran, _PIPEACONF, _PIPEBCONF) 3315#define PIPESTAT_INT_STATUS_MASK 0x0000ffff
3267#define PIPEDSL(pipe) _PIPE(pipe, _PIPEADSL, _PIPEBDSL) 3316
3268#define PIPEFRAME(pipe) _PIPE(pipe, _PIPEAFRAMEHIGH, _PIPEBFRAMEHIGH) 3317#define PIPE_A_OFFSET 0x70000
3269#define PIPEFRAMEPIXEL(pipe) _PIPE(pipe, _PIPEAFRAMEPIXEL, _PIPEBFRAMEPIXEL) 3318#define PIPE_B_OFFSET 0x71000
3270#define PIPESTAT(pipe) _PIPE(pipe, _PIPEASTAT, _PIPEBSTAT) 3319#define PIPE_C_OFFSET 0x72000
3320/*
3321 * There's actually no pipe EDP. Some pipe registers have
3322 * simply shifted from the pipe to the transcoder, while
3323 * keeping their original offset. Thus we need PIPE_EDP_OFFSET
3324 * to access such registers in transcoder EDP.
3325 */
3326#define PIPE_EDP_OFFSET 0x7f000
3327
3328#define _PIPE2(pipe, reg) (dev_priv->info.pipe_offsets[pipe] - \
3329 dev_priv->info.pipe_offsets[PIPE_A] + (reg) + \
3330 dev_priv->info.display_mmio_offset)
3331
3332#define PIPECONF(pipe) _PIPE2(pipe, _PIPEACONF)
3333#define PIPEDSL(pipe) _PIPE2(pipe, _PIPEADSL)
3334#define PIPEFRAME(pipe) _PIPE2(pipe, _PIPEAFRAMEHIGH)
3335#define PIPEFRAMEPIXEL(pipe) _PIPE2(pipe, _PIPEAFRAMEPIXEL)
3336#define PIPESTAT(pipe) _PIPE2(pipe, _PIPEASTAT)
3271 3337
3272#define _PIPE_MISC_A 0x70030 3338#define _PIPE_MISC_A 0x70030
3273#define _PIPE_MISC_B 0x71030 3339#define _PIPE_MISC_B 0x71030
@@ -3279,20 +3345,20 @@
3279#define PIPEMISC_DITHER_ENABLE (1<<4) 3345#define PIPEMISC_DITHER_ENABLE (1<<4)
3280#define PIPEMISC_DITHER_TYPE_MASK (3<<2) 3346#define PIPEMISC_DITHER_TYPE_MASK (3<<2)
3281#define PIPEMISC_DITHER_TYPE_SP (0<<2) 3347#define PIPEMISC_DITHER_TYPE_SP (0<<2)
3282#define PIPEMISC(pipe) _PIPE(pipe, _PIPE_MISC_A, _PIPE_MISC_B) 3348#define PIPEMISC(pipe) _PIPE2(pipe, _PIPE_MISC_A)
3283 3349
3284#define VLV_DPFLIPSTAT (VLV_DISPLAY_BASE + 0x70028) 3350#define VLV_DPFLIPSTAT (VLV_DISPLAY_BASE + 0x70028)
3285#define PIPEB_LINE_COMPARE_INT_EN (1<<29) 3351#define PIPEB_LINE_COMPARE_INT_EN (1<<29)
3286#define PIPEB_HLINE_INT_EN (1<<28) 3352#define PIPEB_HLINE_INT_EN (1<<28)
3287#define PIPEB_VBLANK_INT_EN (1<<27) 3353#define PIPEB_VBLANK_INT_EN (1<<27)
3288#define SPRITED_FLIPDONE_INT_EN (1<<26) 3354#define SPRITED_FLIP_DONE_INT_EN (1<<26)
3289#define SPRITEC_FLIPDONE_INT_EN (1<<25) 3355#define SPRITEC_FLIP_DONE_INT_EN (1<<25)
3290#define PLANEB_FLIPDONE_INT_EN (1<<24) 3356#define PLANEB_FLIP_DONE_INT_EN (1<<24)
3291#define PIPEA_LINE_COMPARE_INT_EN (1<<21) 3357#define PIPEA_LINE_COMPARE_INT_EN (1<<21)
3292#define PIPEA_HLINE_INT_EN (1<<20) 3358#define PIPEA_HLINE_INT_EN (1<<20)
3293#define PIPEA_VBLANK_INT_EN (1<<19) 3359#define PIPEA_VBLANK_INT_EN (1<<19)
3294#define SPRITEB_FLIPDONE_INT_EN (1<<18) 3360#define SPRITEB_FLIP_DONE_INT_EN (1<<18)
3295#define SPRITEA_FLIPDONE_INT_EN (1<<17) 3361#define SPRITEA_FLIP_DONE_INT_EN (1<<17)
3296#define PLANEA_FLIPDONE_INT_EN (1<<16) 3362#define PLANEA_FLIPDONE_INT_EN (1<<16)
3297 3363
3298#define DPINVGTT (VLV_DISPLAY_BASE + 0x7002c) /* VLV only */ 3364#define DPINVGTT (VLV_DISPLAY_BASE + 0x7002c) /* VLV only */
@@ -3323,7 +3389,7 @@
3323#define DSPARB_BEND_SHIFT 9 /* on 855 */ 3389#define DSPARB_BEND_SHIFT 9 /* on 855 */
3324#define DSPARB_AEND_SHIFT 0 3390#define DSPARB_AEND_SHIFT 0
3325 3391
3326#define DSPFW1 (dev_priv->info->display_mmio_offset + 0x70034) 3392#define DSPFW1 (dev_priv->info.display_mmio_offset + 0x70034)
3327#define DSPFW_SR_SHIFT 23 3393#define DSPFW_SR_SHIFT 23
3328#define DSPFW_SR_MASK (0x1ff<<23) 3394#define DSPFW_SR_MASK (0x1ff<<23)
3329#define DSPFW_CURSORB_SHIFT 16 3395#define DSPFW_CURSORB_SHIFT 16
@@ -3331,11 +3397,11 @@
3331#define DSPFW_PLANEB_SHIFT 8 3397#define DSPFW_PLANEB_SHIFT 8
3332#define DSPFW_PLANEB_MASK (0x7f<<8) 3398#define DSPFW_PLANEB_MASK (0x7f<<8)
3333#define DSPFW_PLANEA_MASK (0x7f) 3399#define DSPFW_PLANEA_MASK (0x7f)
3334#define DSPFW2 (dev_priv->info->display_mmio_offset + 0x70038) 3400#define DSPFW2 (dev_priv->info.display_mmio_offset + 0x70038)
3335#define DSPFW_CURSORA_MASK 0x00003f00 3401#define DSPFW_CURSORA_MASK 0x00003f00
3336#define DSPFW_CURSORA_SHIFT 8 3402#define DSPFW_CURSORA_SHIFT 8
3337#define DSPFW_PLANEC_MASK (0x7f) 3403#define DSPFW_PLANEC_MASK (0x7f)
3338#define DSPFW3 (dev_priv->info->display_mmio_offset + 0x7003c) 3404#define DSPFW3 (dev_priv->info.display_mmio_offset + 0x7003c)
3339#define DSPFW_HPLL_SR_EN (1<<31) 3405#define DSPFW_HPLL_SR_EN (1<<31)
3340#define DSPFW_CURSOR_SR_SHIFT 24 3406#define DSPFW_CURSOR_SR_SHIFT 24
3341#define PINEVIEW_SELF_REFRESH_EN (1<<30) 3407#define PINEVIEW_SELF_REFRESH_EN (1<<30)
@@ -3343,8 +3409,8 @@
3343#define DSPFW_HPLL_CURSOR_SHIFT 16 3409#define DSPFW_HPLL_CURSOR_SHIFT 16
3344#define DSPFW_HPLL_CURSOR_MASK (0x3f<<16) 3410#define DSPFW_HPLL_CURSOR_MASK (0x3f<<16)
3345#define DSPFW_HPLL_SR_MASK (0x1ff) 3411#define DSPFW_HPLL_SR_MASK (0x1ff)
3346#define DSPFW4 (dev_priv->info->display_mmio_offset + 0x70070) 3412#define DSPFW4 (dev_priv->info.display_mmio_offset + 0x70070)
3347#define DSPFW7 (dev_priv->info->display_mmio_offset + 0x7007c) 3413#define DSPFW7 (dev_priv->info.display_mmio_offset + 0x7007c)
3348 3414
3349/* drain latency register values*/ 3415/* drain latency register values*/
3350#define DRAIN_LATENCY_PRECISION_32 32 3416#define DRAIN_LATENCY_PRECISION_32 32
@@ -3468,12 +3534,12 @@
3468#define PIPE_PIXEL_MASK 0x00ffffff 3534#define PIPE_PIXEL_MASK 0x00ffffff
3469#define PIPE_PIXEL_SHIFT 0 3535#define PIPE_PIXEL_SHIFT 0
3470/* GM45+ just has to be different */ 3536/* GM45+ just has to be different */
3471#define _PIPEA_FRMCOUNT_GM45 (dev_priv->info->display_mmio_offset + 0x70040) 3537#define _PIPEA_FRMCOUNT_GM45 (dev_priv->info.display_mmio_offset + 0x70040)
3472#define _PIPEA_FLIPCOUNT_GM45 (dev_priv->info->display_mmio_offset + 0x70044) 3538#define _PIPEA_FLIPCOUNT_GM45 (dev_priv->info.display_mmio_offset + 0x70044)
3473#define PIPE_FRMCOUNT_GM45(pipe) _PIPE(pipe, _PIPEA_FRMCOUNT_GM45, _PIPEB_FRMCOUNT_GM45) 3539#define PIPE_FRMCOUNT_GM45(pipe) _PIPE(pipe, _PIPEA_FRMCOUNT_GM45, _PIPEB_FRMCOUNT_GM45)
3474 3540
3475/* Cursor A & B regs */ 3541/* Cursor A & B regs */
3476#define _CURACNTR (dev_priv->info->display_mmio_offset + 0x70080) 3542#define _CURACNTR (dev_priv->info.display_mmio_offset + 0x70080)
3477/* Old style CUR*CNTR flags (desktop 8xx) */ 3543/* Old style CUR*CNTR flags (desktop 8xx) */
3478#define CURSOR_ENABLE 0x80000000 3544#define CURSOR_ENABLE 0x80000000
3479#define CURSOR_GAMMA_ENABLE 0x40000000 3545#define CURSOR_GAMMA_ENABLE 0x40000000
@@ -3489,23 +3555,27 @@
3489/* New style CUR*CNTR flags */ 3555/* New style CUR*CNTR flags */
3490#define CURSOR_MODE 0x27 3556#define CURSOR_MODE 0x27
3491#define CURSOR_MODE_DISABLE 0x00 3557#define CURSOR_MODE_DISABLE 0x00
3558#define CURSOR_MODE_128_32B_AX 0x02
3559#define CURSOR_MODE_256_32B_AX 0x03
3492#define CURSOR_MODE_64_32B_AX 0x07 3560#define CURSOR_MODE_64_32B_AX 0x07
3561#define CURSOR_MODE_128_ARGB_AX ((1 << 5) | CURSOR_MODE_128_32B_AX)
3562#define CURSOR_MODE_256_ARGB_AX ((1 << 5) | CURSOR_MODE_256_32B_AX)
3493#define CURSOR_MODE_64_ARGB_AX ((1 << 5) | CURSOR_MODE_64_32B_AX) 3563#define CURSOR_MODE_64_ARGB_AX ((1 << 5) | CURSOR_MODE_64_32B_AX)
3494#define MCURSOR_PIPE_SELECT (1 << 28) 3564#define MCURSOR_PIPE_SELECT (1 << 28)
3495#define MCURSOR_PIPE_A 0x00 3565#define MCURSOR_PIPE_A 0x00
3496#define MCURSOR_PIPE_B (1 << 28) 3566#define MCURSOR_PIPE_B (1 << 28)
3497#define MCURSOR_GAMMA_ENABLE (1 << 26) 3567#define MCURSOR_GAMMA_ENABLE (1 << 26)
3498#define CURSOR_TRICKLE_FEED_DISABLE (1 << 14) 3568#define CURSOR_TRICKLE_FEED_DISABLE (1 << 14)
3499#define _CURABASE (dev_priv->info->display_mmio_offset + 0x70084) 3569#define _CURABASE (dev_priv->info.display_mmio_offset + 0x70084)
3500#define _CURAPOS (dev_priv->info->display_mmio_offset + 0x70088) 3570#define _CURAPOS (dev_priv->info.display_mmio_offset + 0x70088)
3501#define CURSOR_POS_MASK 0x007FF 3571#define CURSOR_POS_MASK 0x007FF
3502#define CURSOR_POS_SIGN 0x8000 3572#define CURSOR_POS_SIGN 0x8000
3503#define CURSOR_X_SHIFT 0 3573#define CURSOR_X_SHIFT 0
3504#define CURSOR_Y_SHIFT 16 3574#define CURSOR_Y_SHIFT 16
3505#define CURSIZE 0x700a0 3575#define CURSIZE 0x700a0
3506#define _CURBCNTR (dev_priv->info->display_mmio_offset + 0x700c0) 3576#define _CURBCNTR (dev_priv->info.display_mmio_offset + 0x700c0)
3507#define _CURBBASE (dev_priv->info->display_mmio_offset + 0x700c4) 3577#define _CURBBASE (dev_priv->info.display_mmio_offset + 0x700c4)
3508#define _CURBPOS (dev_priv->info->display_mmio_offset + 0x700c8) 3578#define _CURBPOS (dev_priv->info.display_mmio_offset + 0x700c8)
3509 3579
3510#define _CURBCNTR_IVB 0x71080 3580#define _CURBCNTR_IVB 0x71080
3511#define _CURBBASE_IVB 0x71084 3581#define _CURBBASE_IVB 0x71084
@@ -3520,7 +3590,7 @@
3520#define CURPOS_IVB(pipe) _PIPE(pipe, _CURAPOS, _CURBPOS_IVB) 3590#define CURPOS_IVB(pipe) _PIPE(pipe, _CURAPOS, _CURBPOS_IVB)
3521 3591
3522/* Display A control */ 3592/* Display A control */
3523#define _DSPACNTR (dev_priv->info->display_mmio_offset + 0x70180) 3593#define _DSPACNTR 0x70180
3524#define DISPLAY_PLANE_ENABLE (1<<31) 3594#define DISPLAY_PLANE_ENABLE (1<<31)
3525#define DISPLAY_PLANE_DISABLE 0 3595#define DISPLAY_PLANE_DISABLE 0
3526#define DISPPLANE_GAMMA_ENABLE (1<<30) 3596#define DISPPLANE_GAMMA_ENABLE (1<<30)
@@ -3554,25 +3624,25 @@
3554#define DISPPLANE_STEREO_POLARITY_SECOND (1<<18) 3624#define DISPPLANE_STEREO_POLARITY_SECOND (1<<18)
3555#define DISPPLANE_TRICKLE_FEED_DISABLE (1<<14) /* Ironlake */ 3625#define DISPPLANE_TRICKLE_FEED_DISABLE (1<<14) /* Ironlake */
3556#define DISPPLANE_TILED (1<<10) 3626#define DISPPLANE_TILED (1<<10)
3557#define _DSPAADDR (dev_priv->info->display_mmio_offset + 0x70184) 3627#define _DSPAADDR 0x70184
3558#define _DSPASTRIDE (dev_priv->info->display_mmio_offset + 0x70188) 3628#define _DSPASTRIDE 0x70188
3559#define _DSPAPOS (dev_priv->info->display_mmio_offset + 0x7018C) /* reserved */ 3629#define _DSPAPOS 0x7018C /* reserved */
3560#define _DSPASIZE (dev_priv->info->display_mmio_offset + 0x70190) 3630#define _DSPASIZE 0x70190
3561#define _DSPASURF (dev_priv->info->display_mmio_offset + 0x7019C) /* 965+ only */ 3631#define _DSPASURF 0x7019C /* 965+ only */
3562#define _DSPATILEOFF (dev_priv->info->display_mmio_offset + 0x701A4) /* 965+ only */ 3632#define _DSPATILEOFF 0x701A4 /* 965+ only */
3563#define _DSPAOFFSET (dev_priv->info->display_mmio_offset + 0x701A4) /* HSW */ 3633#define _DSPAOFFSET 0x701A4 /* HSW */
3564#define _DSPASURFLIVE (dev_priv->info->display_mmio_offset + 0x701AC) 3634#define _DSPASURFLIVE 0x701AC
3565 3635
3566#define DSPCNTR(plane) _PIPE(plane, _DSPACNTR, _DSPBCNTR) 3636#define DSPCNTR(plane) _PIPE2(plane, _DSPACNTR)
3567#define DSPADDR(plane) _PIPE(plane, _DSPAADDR, _DSPBADDR) 3637#define DSPADDR(plane) _PIPE2(plane, _DSPAADDR)
3568#define DSPSTRIDE(plane) _PIPE(plane, _DSPASTRIDE, _DSPBSTRIDE) 3638#define DSPSTRIDE(plane) _PIPE2(plane, _DSPASTRIDE)
3569#define DSPPOS(plane) _PIPE(plane, _DSPAPOS, _DSPBPOS) 3639#define DSPPOS(plane) _PIPE2(plane, _DSPAPOS)
3570#define DSPSIZE(plane) _PIPE(plane, _DSPASIZE, _DSPBSIZE) 3640#define DSPSIZE(plane) _PIPE2(plane, _DSPASIZE)
3571#define DSPSURF(plane) _PIPE(plane, _DSPASURF, _DSPBSURF) 3641#define DSPSURF(plane) _PIPE2(plane, _DSPASURF)
3572#define DSPTILEOFF(plane) _PIPE(plane, _DSPATILEOFF, _DSPBTILEOFF) 3642#define DSPTILEOFF(plane) _PIPE2(plane, _DSPATILEOFF)
3573#define DSPLINOFF(plane) DSPADDR(plane) 3643#define DSPLINOFF(plane) DSPADDR(plane)
3574#define DSPOFFSET(plane) _PIPE(plane, _DSPAOFFSET, _DSPBOFFSET) 3644#define DSPOFFSET(plane) _PIPE2(plane, _DSPAOFFSET)
3575#define DSPSURFLIVE(plane) _PIPE(plane, _DSPASURFLIVE, _DSPBSURFLIVE) 3645#define DSPSURFLIVE(plane) _PIPE2(plane, _DSPASURFLIVE)
3576 3646
3577/* Display/Sprite base address macros */ 3647/* Display/Sprite base address macros */
3578#define DISP_BASEADDR_MASK (0xfffff000) 3648#define DISP_BASEADDR_MASK (0xfffff000)
@@ -3580,44 +3650,44 @@
3580#define I915_HI_DISPBASE(val) (val & DISP_BASEADDR_MASK) 3650#define I915_HI_DISPBASE(val) (val & DISP_BASEADDR_MASK)
3581 3651
3582/* VBIOS flags */ 3652/* VBIOS flags */
3583#define SWF00 (dev_priv->info->display_mmio_offset + 0x71410) 3653#define SWF00 (dev_priv->info.display_mmio_offset + 0x71410)
3584#define SWF01 (dev_priv->info->display_mmio_offset + 0x71414) 3654#define SWF01 (dev_priv->info.display_mmio_offset + 0x71414)
3585#define SWF02 (dev_priv->info->display_mmio_offset + 0x71418) 3655#define SWF02 (dev_priv->info.display_mmio_offset + 0x71418)
3586#define SWF03 (dev_priv->info->display_mmio_offset + 0x7141c) 3656#define SWF03 (dev_priv->info.display_mmio_offset + 0x7141c)
3587#define SWF04 (dev_priv->info->display_mmio_offset + 0x71420) 3657#define SWF04 (dev_priv->info.display_mmio_offset + 0x71420)
3588#define SWF05 (dev_priv->info->display_mmio_offset + 0x71424) 3658#define SWF05 (dev_priv->info.display_mmio_offset + 0x71424)
3589#define SWF06 (dev_priv->info->display_mmio_offset + 0x71428) 3659#define SWF06 (dev_priv->info.display_mmio_offset + 0x71428)
3590#define SWF10 (dev_priv->info->display_mmio_offset + 0x70410) 3660#define SWF10 (dev_priv->info.display_mmio_offset + 0x70410)
3591#define SWF11 (dev_priv->info->display_mmio_offset + 0x70414) 3661#define SWF11 (dev_priv->info.display_mmio_offset + 0x70414)
3592#define SWF14 (dev_priv->info->display_mmio_offset + 0x71420) 3662#define SWF14 (dev_priv->info.display_mmio_offset + 0x71420)
3593#define SWF30 (dev_priv->info->display_mmio_offset + 0x72414) 3663#define SWF30 (dev_priv->info.display_mmio_offset + 0x72414)
3594#define SWF31 (dev_priv->info->display_mmio_offset + 0x72418) 3664#define SWF31 (dev_priv->info.display_mmio_offset + 0x72418)
3595#define SWF32 (dev_priv->info->display_mmio_offset + 0x7241c) 3665#define SWF32 (dev_priv->info.display_mmio_offset + 0x7241c)
3596 3666
3597/* Pipe B */ 3667/* Pipe B */
3598#define _PIPEBDSL (dev_priv->info->display_mmio_offset + 0x71000) 3668#define _PIPEBDSL (dev_priv->info.display_mmio_offset + 0x71000)
3599#define _PIPEBCONF (dev_priv->info->display_mmio_offset + 0x71008) 3669#define _PIPEBCONF (dev_priv->info.display_mmio_offset + 0x71008)
3600#define _PIPEBSTAT (dev_priv->info->display_mmio_offset + 0x71024) 3670#define _PIPEBSTAT (dev_priv->info.display_mmio_offset + 0x71024)
3601#define _PIPEBFRAMEHIGH 0x71040 3671#define _PIPEBFRAMEHIGH 0x71040
3602#define _PIPEBFRAMEPIXEL 0x71044 3672#define _PIPEBFRAMEPIXEL 0x71044
3603#define _PIPEB_FRMCOUNT_GM45 (dev_priv->info->display_mmio_offset + 0x71040) 3673#define _PIPEB_FRMCOUNT_GM45 (dev_priv->info.display_mmio_offset + 0x71040)
3604#define _PIPEB_FLIPCOUNT_GM45 (dev_priv->info->display_mmio_offset + 0x71044) 3674#define _PIPEB_FLIPCOUNT_GM45 (dev_priv->info.display_mmio_offset + 0x71044)
3605 3675
3606 3676
3607/* Display B control */ 3677/* Display B control */
3608#define _DSPBCNTR (dev_priv->info->display_mmio_offset + 0x71180) 3678#define _DSPBCNTR (dev_priv->info.display_mmio_offset + 0x71180)
3609#define DISPPLANE_ALPHA_TRANS_ENABLE (1<<15) 3679#define DISPPLANE_ALPHA_TRANS_ENABLE (1<<15)
3610#define DISPPLANE_ALPHA_TRANS_DISABLE 0 3680#define DISPPLANE_ALPHA_TRANS_DISABLE 0
3611#define DISPPLANE_SPRITE_ABOVE_DISPLAY 0 3681#define DISPPLANE_SPRITE_ABOVE_DISPLAY 0
3612#define DISPPLANE_SPRITE_ABOVE_OVERLAY (1) 3682#define DISPPLANE_SPRITE_ABOVE_OVERLAY (1)
3613#define _DSPBADDR (dev_priv->info->display_mmio_offset + 0x71184) 3683#define _DSPBADDR (dev_priv->info.display_mmio_offset + 0x71184)
3614#define _DSPBSTRIDE (dev_priv->info->display_mmio_offset + 0x71188) 3684#define _DSPBSTRIDE (dev_priv->info.display_mmio_offset + 0x71188)
3615#define _DSPBPOS (dev_priv->info->display_mmio_offset + 0x7118C) 3685#define _DSPBPOS (dev_priv->info.display_mmio_offset + 0x7118C)
3616#define _DSPBSIZE (dev_priv->info->display_mmio_offset + 0x71190) 3686#define _DSPBSIZE (dev_priv->info.display_mmio_offset + 0x71190)
3617#define _DSPBSURF (dev_priv->info->display_mmio_offset + 0x7119C) 3687#define _DSPBSURF (dev_priv->info.display_mmio_offset + 0x7119C)
3618#define _DSPBTILEOFF (dev_priv->info->display_mmio_offset + 0x711A4) 3688#define _DSPBTILEOFF (dev_priv->info.display_mmio_offset + 0x711A4)
3619#define _DSPBOFFSET (dev_priv->info->display_mmio_offset + 0x711A4) 3689#define _DSPBOFFSET (dev_priv->info.display_mmio_offset + 0x711A4)
3620#define _DSPBSURFLIVE (dev_priv->info->display_mmio_offset + 0x711AC) 3690#define _DSPBSURFLIVE (dev_priv->info.display_mmio_offset + 0x711AC)
3621 3691
3622/* Sprite A control */ 3692/* Sprite A control */
3623#define _DVSACNTR 0x72180 3693#define _DVSACNTR 0x72180
@@ -3866,48 +3936,45 @@
3866#define FDI_PLL_FREQ_DISABLE_COUNT_LIMIT_MASK 0xff 3936#define FDI_PLL_FREQ_DISABLE_COUNT_LIMIT_MASK 0xff
3867 3937
3868 3938
3869#define _PIPEA_DATA_M1 (dev_priv->info->display_mmio_offset + 0x60030) 3939#define _PIPEA_DATA_M1 0x60030
3870#define PIPE_DATA_M1_OFFSET 0 3940#define PIPE_DATA_M1_OFFSET 0
3871#define _PIPEA_DATA_N1 (dev_priv->info->display_mmio_offset + 0x60034) 3941#define _PIPEA_DATA_N1 0x60034
3872#define PIPE_DATA_N1_OFFSET 0 3942#define PIPE_DATA_N1_OFFSET 0
3873 3943
3874#define _PIPEA_DATA_M2 (dev_priv->info->display_mmio_offset + 0x60038) 3944#define _PIPEA_DATA_M2 0x60038
3875#define PIPE_DATA_M2_OFFSET 0 3945#define PIPE_DATA_M2_OFFSET 0
3876#define _PIPEA_DATA_N2 (dev_priv->info->display_mmio_offset + 0x6003c) 3946#define _PIPEA_DATA_N2 0x6003c
3877#define PIPE_DATA_N2_OFFSET 0 3947#define PIPE_DATA_N2_OFFSET 0
3878 3948
3879#define _PIPEA_LINK_M1 (dev_priv->info->display_mmio_offset + 0x60040) 3949#define _PIPEA_LINK_M1 0x60040
3880#define PIPE_LINK_M1_OFFSET 0 3950#define PIPE_LINK_M1_OFFSET 0
3881#define _PIPEA_LINK_N1 (dev_priv->info->display_mmio_offset + 0x60044) 3951#define _PIPEA_LINK_N1 0x60044
3882#define PIPE_LINK_N1_OFFSET 0 3952#define PIPE_LINK_N1_OFFSET 0
3883 3953
3884#define _PIPEA_LINK_M2 (dev_priv->info->display_mmio_offset + 0x60048) 3954#define _PIPEA_LINK_M2 0x60048
3885#define PIPE_LINK_M2_OFFSET 0 3955#define PIPE_LINK_M2_OFFSET 0
3886#define _PIPEA_LINK_N2 (dev_priv->info->display_mmio_offset + 0x6004c) 3956#define _PIPEA_LINK_N2 0x6004c
3887#define PIPE_LINK_N2_OFFSET 0 3957#define PIPE_LINK_N2_OFFSET 0
3888 3958
3889/* PIPEB timing regs are same start from 0x61000 */ 3959/* PIPEB timing regs are same start from 0x61000 */
3890 3960
3891#define _PIPEB_DATA_M1 (dev_priv->info->display_mmio_offset + 0x61030) 3961#define _PIPEB_DATA_M1 0x61030
3892#define _PIPEB_DATA_N1 (dev_priv->info->display_mmio_offset + 0x61034) 3962#define _PIPEB_DATA_N1 0x61034
3893 3963#define _PIPEB_DATA_M2 0x61038
3894#define _PIPEB_DATA_M2 (dev_priv->info->display_mmio_offset + 0x61038) 3964#define _PIPEB_DATA_N2 0x6103c
3895#define _PIPEB_DATA_N2 (dev_priv->info->display_mmio_offset + 0x6103c) 3965#define _PIPEB_LINK_M1 0x61040
3896 3966#define _PIPEB_LINK_N1 0x61044
3897#define _PIPEB_LINK_M1 (dev_priv->info->display_mmio_offset + 0x61040) 3967#define _PIPEB_LINK_M2 0x61048
3898#define _PIPEB_LINK_N1 (dev_priv->info->display_mmio_offset + 0x61044) 3968#define _PIPEB_LINK_N2 0x6104c
3899 3969
3900#define _PIPEB_LINK_M2 (dev_priv->info->display_mmio_offset + 0x61048) 3970#define PIPE_DATA_M1(tran) _TRANSCODER2(tran, _PIPEA_DATA_M1)
3901#define _PIPEB_LINK_N2 (dev_priv->info->display_mmio_offset + 0x6104c) 3971#define PIPE_DATA_N1(tran) _TRANSCODER2(tran, _PIPEA_DATA_N1)
3902 3972#define PIPE_DATA_M2(tran) _TRANSCODER2(tran, _PIPEA_DATA_M2)
3903#define PIPE_DATA_M1(tran) _TRANSCODER(tran, _PIPEA_DATA_M1, _PIPEB_DATA_M1) 3973#define PIPE_DATA_N2(tran) _TRANSCODER2(tran, _PIPEA_DATA_N2)
3904#define PIPE_DATA_N1(tran) _TRANSCODER(tran, _PIPEA_DATA_N1, _PIPEB_DATA_N1) 3974#define PIPE_LINK_M1(tran) _TRANSCODER2(tran, _PIPEA_LINK_M1)
3905#define PIPE_DATA_M2(tran) _TRANSCODER(tran, _PIPEA_DATA_M2, _PIPEB_DATA_M2) 3975#define PIPE_LINK_N1(tran) _TRANSCODER2(tran, _PIPEA_LINK_N1)
3906#define PIPE_DATA_N2(tran) _TRANSCODER(tran, _PIPEA_DATA_N2, _PIPEB_DATA_N2) 3976#define PIPE_LINK_M2(tran) _TRANSCODER2(tran, _PIPEA_LINK_M2)
3907#define PIPE_LINK_M1(tran) _TRANSCODER(tran, _PIPEA_LINK_M1, _PIPEB_LINK_M1) 3977#define PIPE_LINK_N2(tran) _TRANSCODER2(tran, _PIPEA_LINK_N2)
3908#define PIPE_LINK_N1(tran) _TRANSCODER(tran, _PIPEA_LINK_N1, _PIPEB_LINK_N1)
3909#define PIPE_LINK_M2(tran) _TRANSCODER(tran, _PIPEA_LINK_M2, _PIPEB_LINK_M2)
3910#define PIPE_LINK_N2(tran) _TRANSCODER(tran, _PIPEA_LINK_N2, _PIPEB_LINK_N2)
3911 3978
3912/* CPU panel fitter */ 3979/* CPU panel fitter */
3913/* IVB+ has 3 fitters, 0 is 7x5 capable, the other two only 3x3 */ 3980/* IVB+ has 3 fitters, 0 is 7x5 capable, the other two only 3x3 */
@@ -4084,13 +4151,14 @@
4084#define ILK_ELPIN_409_SELECT (1 << 25) 4151#define ILK_ELPIN_409_SELECT (1 << 25)
4085#define ILK_DPARB_GATE (1<<22) 4152#define ILK_DPARB_GATE (1<<22)
4086#define ILK_VSDPFD_FULL (1<<21) 4153#define ILK_VSDPFD_FULL (1<<21)
4087#define ILK_DISPLAY_CHICKEN_FUSES 0x42014 4154#define FUSE_STRAP 0x42014
4088#define ILK_INTERNAL_GRAPHICS_DISABLE (1<<31) 4155#define ILK_INTERNAL_GRAPHICS_DISABLE (1 << 31)
4089#define ILK_INTERNAL_DISPLAY_DISABLE (1<<30) 4156#define ILK_INTERNAL_DISPLAY_DISABLE (1 << 30)
4090#define ILK_DISPLAY_DEBUG_DISABLE (1<<29) 4157#define ILK_DISPLAY_DEBUG_DISABLE (1 << 29)
4091#define ILK_HDCP_DISABLE (1<<25) 4158#define ILK_HDCP_DISABLE (1 << 25)
4092#define ILK_eDP_A_DISABLE (1<<24) 4159#define ILK_eDP_A_DISABLE (1 << 24)
4093#define ILK_DESKTOP (1<<23) 4160#define HSW_CDCLK_LIMIT (1 << 24)
4161#define ILK_DESKTOP (1 << 23)
4094 4162
4095#define ILK_DSPCLK_GATE_D 0x42020 4163#define ILK_DSPCLK_GATE_D 0x42020
4096#define ILK_VRHUNIT_CLOCK_GATE_DISABLE (1 << 28) 4164#define ILK_VRHUNIT_CLOCK_GATE_DISABLE (1 << 28)
@@ -4109,7 +4177,8 @@
4109 4177
4110#define _CHICKEN_PIPESL_1_A 0x420b0 4178#define _CHICKEN_PIPESL_1_A 0x420b0
4111#define _CHICKEN_PIPESL_1_B 0x420b4 4179#define _CHICKEN_PIPESL_1_B 0x420b4
4112#define DPRS_MASK_VBLANK_SRD (1 << 0) 4180#define HSW_FBCQ_DIS (1 << 22)
4181#define BDW_DPRS_MASK_VBLANK_SRD (1 << 0)
4113#define CHICKEN_PIPESL_1(pipe) _PIPE(pipe, _CHICKEN_PIPESL_1_A, _CHICKEN_PIPESL_1_B) 4182#define CHICKEN_PIPESL_1(pipe) _PIPE(pipe, _CHICKEN_PIPESL_1_A, _CHICKEN_PIPESL_1_B)
4114 4183
4115#define DISP_ARB_CTL 0x45000 4184#define DISP_ARB_CTL 0x45000
@@ -4120,6 +4189,8 @@
4120#define GEN7_MSG_CTL 0x45010 4189#define GEN7_MSG_CTL 0x45010
4121#define WAIT_FOR_PCH_RESET_ACK (1<<1) 4190#define WAIT_FOR_PCH_RESET_ACK (1<<1)
4122#define WAIT_FOR_PCH_FLR_ACK (1<<0) 4191#define WAIT_FOR_PCH_FLR_ACK (1<<0)
4192#define HSW_NDE_RSTWRN_OPT 0x46408
4193#define RESET_PCH_HANDSHAKE_ENABLE (1<<4)
4123 4194
4124/* GEN7 chicken */ 4195/* GEN7 chicken */
4125#define GEN7_COMMON_SLICE_CHICKEN1 0x7010 4196#define GEN7_COMMON_SLICE_CHICKEN1 0x7010
@@ -4127,8 +4198,11 @@
4127#define COMMON_SLICE_CHICKEN2 0x7014 4198#define COMMON_SLICE_CHICKEN2 0x7014
4128# define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE (1<<0) 4199# define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE (1<<0)
4129 4200
4201#define GEN7_L3SQCREG1 0xB010
4202#define VLV_B0_WA_L3SQCREG1_VALUE 0x00D30000
4203
4130#define GEN7_L3CNTLREG1 0xB01C 4204#define GEN7_L3CNTLREG1 0xB01C
4131#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C4FFF8C 4205#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C47FF8C
4132#define GEN7_L3AGDIS (1<<19) 4206#define GEN7_L3AGDIS (1<<19)
4133 4207
4134#define GEN7_L3_CHICKEN_MODE_REGISTER 0xB030 4208#define GEN7_L3_CHICKEN_MODE_REGISTER 0xB030
@@ -4148,9 +4222,6 @@
4148#define HSW_SCRATCH1 0xb038 4222#define HSW_SCRATCH1 0xb038
4149#define HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE (1<<27) 4223#define HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE (1<<27)
4150 4224
4151#define HSW_FUSE_STRAP 0x42014
4152#define HSW_CDCLK_LIMIT (1 << 24)
4153
4154/* PCH */ 4225/* PCH */
4155 4226
4156/* south display engine interrupt: IBX */ 4227/* south display engine interrupt: IBX */
@@ -4436,24 +4507,24 @@
4436#define HSW_VIDEO_DIP_GCP_B 0x61210 4507#define HSW_VIDEO_DIP_GCP_B 0x61210
4437 4508
4438#define HSW_TVIDEO_DIP_CTL(trans) \ 4509#define HSW_TVIDEO_DIP_CTL(trans) \
4439 _TRANSCODER(trans, HSW_VIDEO_DIP_CTL_A, HSW_VIDEO_DIP_CTL_B) 4510 _TRANSCODER2(trans, HSW_VIDEO_DIP_CTL_A)
4440#define HSW_TVIDEO_DIP_AVI_DATA(trans) \ 4511#define HSW_TVIDEO_DIP_AVI_DATA(trans) \
4441 _TRANSCODER(trans, HSW_VIDEO_DIP_AVI_DATA_A, HSW_VIDEO_DIP_AVI_DATA_B) 4512 _TRANSCODER2(trans, HSW_VIDEO_DIP_AVI_DATA_A)
4442#define HSW_TVIDEO_DIP_VS_DATA(trans) \ 4513#define HSW_TVIDEO_DIP_VS_DATA(trans) \
4443 _TRANSCODER(trans, HSW_VIDEO_DIP_VS_DATA_A, HSW_VIDEO_DIP_VS_DATA_B) 4514 _TRANSCODER2(trans, HSW_VIDEO_DIP_VS_DATA_A)
4444#define HSW_TVIDEO_DIP_SPD_DATA(trans) \ 4515#define HSW_TVIDEO_DIP_SPD_DATA(trans) \
4445 _TRANSCODER(trans, HSW_VIDEO_DIP_SPD_DATA_A, HSW_VIDEO_DIP_SPD_DATA_B) 4516 _TRANSCODER2(trans, HSW_VIDEO_DIP_SPD_DATA_A)
4446#define HSW_TVIDEO_DIP_GCP(trans) \ 4517#define HSW_TVIDEO_DIP_GCP(trans) \
4447 _TRANSCODER(trans, HSW_VIDEO_DIP_GCP_A, HSW_VIDEO_DIP_GCP_B) 4518 _TRANSCODER2(trans, HSW_VIDEO_DIP_GCP_A)
4448#define HSW_TVIDEO_DIP_VSC_DATA(trans) \ 4519#define HSW_TVIDEO_DIP_VSC_DATA(trans) \
4449 _TRANSCODER(trans, HSW_VIDEO_DIP_VSC_DATA_A, HSW_VIDEO_DIP_VSC_DATA_B) 4520 _TRANSCODER2(trans, HSW_VIDEO_DIP_VSC_DATA_A)
4450 4521
4451#define HSW_STEREO_3D_CTL_A 0x70020 4522#define HSW_STEREO_3D_CTL_A 0x70020
4452#define S3D_ENABLE (1<<31) 4523#define S3D_ENABLE (1<<31)
4453#define HSW_STEREO_3D_CTL_B 0x71020 4524#define HSW_STEREO_3D_CTL_B 0x71020
4454 4525
4455#define HSW_STEREO_3D_CTL(trans) \ 4526#define HSW_STEREO_3D_CTL(trans) \
4456 _TRANSCODER(trans, HSW_STEREO_3D_CTL_A, HSW_STEREO_3D_CTL_A) 4527 _PIPE2(trans, HSW_STEREO_3D_CTL_A)
4457 4528
4458#define _PCH_TRANS_HTOTAL_B 0xe1000 4529#define _PCH_TRANS_HTOTAL_B 0xe1000
4459#define _PCH_TRANS_HBLANK_B 0xe1004 4530#define _PCH_TRANS_HBLANK_B 0xe1004
@@ -4865,6 +4936,9 @@
4865#define GEN7_UCGCTL4 0x940c 4936#define GEN7_UCGCTL4 0x940c
4866#define GEN7_L3BANK2X_CLOCK_GATE_DISABLE (1<<25) 4937#define GEN7_L3BANK2X_CLOCK_GATE_DISABLE (1<<25)
4867 4938
4939#define GEN8_UCGCTL6 0x9430
4940#define GEN8_SDEUNIT_CLOCK_GATE_DISABLE (1<<14)
4941
4868#define GEN6_RPNSWREQ 0xA008 4942#define GEN6_RPNSWREQ 0xA008
4869#define GEN6_TURBO_DISABLE (1<<31) 4943#define GEN6_TURBO_DISABLE (1<<31)
4870#define GEN6_FREQUENCY(x) ((x)<<25) 4944#define GEN6_FREQUENCY(x) ((x)<<25)
@@ -4945,6 +5019,10 @@
4945 GEN6_PM_RP_DOWN_THRESHOLD | \ 5019 GEN6_PM_RP_DOWN_THRESHOLD | \
4946 GEN6_PM_RP_DOWN_TIMEOUT) 5020 GEN6_PM_RP_DOWN_TIMEOUT)
4947 5021
5022#define VLV_GTLC_SURVIVABILITY_REG 0x130098
5023#define VLV_GFX_CLK_STATUS_BIT (1<<3)
5024#define VLV_GFX_CLK_FORCE_ON_BIT (1<<2)
5025
4948#define GEN6_GT_GFX_RC6_LOCKED 0x138104 5026#define GEN6_GT_GFX_RC6_LOCKED 0x138104
4949#define VLV_COUNTER_CONTROL 0x138104 5027#define VLV_COUNTER_CONTROL 0x138104
4950#define VLV_COUNT_RANGE_HIGH (1<<15) 5028#define VLV_COUNT_RANGE_HIGH (1<<15)
@@ -5006,6 +5084,10 @@
5006#define GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE (1<<10) 5084#define GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE (1<<10)
5007#define GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE (1<<3) 5085#define GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE (1<<3)
5008 5086
5087#define GEN8_ROW_CHICKEN 0xe4f0
5088#define PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE (1<<8)
5089#define STALL_DOP_GATING_DISABLE (1<<5)
5090
5009#define GEN7_ROW_CHICKEN2 0xe4f4 5091#define GEN7_ROW_CHICKEN2 0xe4f4
5010#define GEN7_ROW_CHICKEN2_GT2 0xf4f4 5092#define GEN7_ROW_CHICKEN2_GT2 0xf4f4
5011#define DOP_CLOCK_GATING_DISABLE (1<<0) 5093#define DOP_CLOCK_GATING_DISABLE (1<<0)
@@ -5017,7 +5099,7 @@
5017#define GEN8_CENTROID_PIXEL_OPT_DIS (1<<8) 5099#define GEN8_CENTROID_PIXEL_OPT_DIS (1<<8)
5018#define GEN8_SAMPLER_POWER_BYPASS_DIS (1<<1) 5100#define GEN8_SAMPLER_POWER_BYPASS_DIS (1<<1)
5019 5101
5020#define G4X_AUD_VID_DID (dev_priv->info->display_mmio_offset + 0x62020) 5102#define G4X_AUD_VID_DID (dev_priv->info.display_mmio_offset + 0x62020)
5021#define INTEL_AUDIO_DEVCL 0x808629FB 5103#define INTEL_AUDIO_DEVCL 0x808629FB
5022#define INTEL_AUDIO_DEVBLC 0x80862801 5104#define INTEL_AUDIO_DEVBLC 0x80862801
5023#define INTEL_AUDIO_DEVCTG 0x80862802 5105#define INTEL_AUDIO_DEVCTG 0x80862802
@@ -5178,8 +5260,8 @@
5178#define TRANS_DDI_FUNC_CTL_B 0x61400 5260#define TRANS_DDI_FUNC_CTL_B 0x61400
5179#define TRANS_DDI_FUNC_CTL_C 0x62400 5261#define TRANS_DDI_FUNC_CTL_C 0x62400
5180#define TRANS_DDI_FUNC_CTL_EDP 0x6F400 5262#define TRANS_DDI_FUNC_CTL_EDP 0x6F400
5181#define TRANS_DDI_FUNC_CTL(tran) _TRANSCODER(tran, TRANS_DDI_FUNC_CTL_A, \ 5263#define TRANS_DDI_FUNC_CTL(tran) _TRANSCODER2(tran, TRANS_DDI_FUNC_CTL_A)
5182 TRANS_DDI_FUNC_CTL_B) 5264
5183#define TRANS_DDI_FUNC_ENABLE (1<<31) 5265#define TRANS_DDI_FUNC_ENABLE (1<<31)
5184/* Those bits are ignored by pipe EDP since it can only connect to DDI A */ 5266/* Those bits are ignored by pipe EDP since it can only connect to DDI A */
5185#define TRANS_DDI_PORT_MASK (7<<28) 5267#define TRANS_DDI_PORT_MASK (7<<28)
@@ -5311,8 +5393,12 @@
5311#define SPLL_PLL_ENABLE (1<<31) 5393#define SPLL_PLL_ENABLE (1<<31)
5312#define SPLL_PLL_SSC (1<<28) 5394#define SPLL_PLL_SSC (1<<28)
5313#define SPLL_PLL_NON_SSC (2<<28) 5395#define SPLL_PLL_NON_SSC (2<<28)
5396#define SPLL_PLL_LCPLL (3<<28)
5397#define SPLL_PLL_REF_MASK (3<<28)
5314#define SPLL_PLL_FREQ_810MHz (0<<26) 5398#define SPLL_PLL_FREQ_810MHz (0<<26)
5315#define SPLL_PLL_FREQ_1350MHz (1<<26) 5399#define SPLL_PLL_FREQ_1350MHz (1<<26)
5400#define SPLL_PLL_FREQ_2700MHz (2<<26)
5401#define SPLL_PLL_FREQ_MASK (3<<26)
5316 5402
5317/* WRPLL */ 5403/* WRPLL */
5318#define WRPLL_CTL1 0x46040 5404#define WRPLL_CTL1 0x46040
@@ -5323,8 +5409,13 @@
5323#define WRPLL_PLL_SELECT_LCPLL_2700 (0x03<<28) 5409#define WRPLL_PLL_SELECT_LCPLL_2700 (0x03<<28)
5324/* WRPLL divider programming */ 5410/* WRPLL divider programming */
5325#define WRPLL_DIVIDER_REFERENCE(x) ((x)<<0) 5411#define WRPLL_DIVIDER_REFERENCE(x) ((x)<<0)
5412#define WRPLL_DIVIDER_REF_MASK (0xff)
5326#define WRPLL_DIVIDER_POST(x) ((x)<<8) 5413#define WRPLL_DIVIDER_POST(x) ((x)<<8)
5414#define WRPLL_DIVIDER_POST_MASK (0x3f<<8)
5415#define WRPLL_DIVIDER_POST_SHIFT 8
5327#define WRPLL_DIVIDER_FEEDBACK(x) ((x)<<16) 5416#define WRPLL_DIVIDER_FEEDBACK(x) ((x)<<16)
5417#define WRPLL_DIVIDER_FB_SHIFT 16
5418#define WRPLL_DIVIDER_FB_MASK (0xff<<16)
5328 5419
5329/* Port clock selection */ 5420/* Port clock selection */
5330#define PORT_CLK_SEL_A 0x46100 5421#define PORT_CLK_SEL_A 0x46100
@@ -5337,6 +5428,7 @@
5337#define PORT_CLK_SEL_WRPLL1 (4<<29) 5428#define PORT_CLK_SEL_WRPLL1 (4<<29)
5338#define PORT_CLK_SEL_WRPLL2 (5<<29) 5429#define PORT_CLK_SEL_WRPLL2 (5<<29)
5339#define PORT_CLK_SEL_NONE (7<<29) 5430#define PORT_CLK_SEL_NONE (7<<29)
5431#define PORT_CLK_SEL_MASK (7<<29)
5340 5432
5341/* Transcoder clock selection */ 5433/* Transcoder clock selection */
5342#define TRANS_CLK_SEL_A 0x46140 5434#define TRANS_CLK_SEL_A 0x46140
@@ -5346,10 +5438,12 @@
5346#define TRANS_CLK_SEL_DISABLED (0x0<<29) 5438#define TRANS_CLK_SEL_DISABLED (0x0<<29)
5347#define TRANS_CLK_SEL_PORT(x) ((x+1)<<29) 5439#define TRANS_CLK_SEL_PORT(x) ((x+1)<<29)
5348 5440
5349#define _TRANSA_MSA_MISC 0x60410 5441#define TRANSA_MSA_MISC 0x60410
5350#define _TRANSB_MSA_MISC 0x61410 5442#define TRANSB_MSA_MISC 0x61410
5351#define TRANS_MSA_MISC(tran) _TRANSCODER(tran, _TRANSA_MSA_MISC, \ 5443#define TRANSC_MSA_MISC 0x62410
5352 _TRANSB_MSA_MISC) 5444#define TRANS_EDP_MSA_MISC 0x6f410
5445#define TRANS_MSA_MISC(tran) _TRANSCODER2(tran, TRANSA_MSA_MISC)
5446
5353#define TRANS_MSA_SYNC_CLK (1<<0) 5447#define TRANS_MSA_SYNC_CLK (1<<0)
5354#define TRANS_MSA_6_BPC (0<<5) 5448#define TRANS_MSA_6_BPC (0<<5)
5355#define TRANS_MSA_8_BPC (1<<5) 5449#define TRANS_MSA_8_BPC (1<<5)
@@ -5389,6 +5483,8 @@
5389 5483
5390/* SFUSE_STRAP */ 5484/* SFUSE_STRAP */
5391#define SFUSE_STRAP 0xc2014 5485#define SFUSE_STRAP 0xc2014
5486#define SFUSE_STRAP_FUSE_LOCK (1<<13)
5487#define SFUSE_STRAP_DISPLAY_DISABLED (1<<7)
5392#define SFUSE_STRAP_DDIB_DETECTED (1<<2) 5488#define SFUSE_STRAP_DDIB_DETECTED (1<<2)
5393#define SFUSE_STRAP_DDIC_DETECTED (1<<1) 5489#define SFUSE_STRAP_DDIC_DETECTED (1<<1)
5394#define SFUSE_STRAP_DDID_DETECTED (1<<0) 5490#define SFUSE_STRAP_DDID_DETECTED (1<<0)
@@ -5857,4 +5953,12 @@
5857#define MIPI_READ_DATA_VALID(pipe) _PIPE(pipe, _MIPIA_READ_DATA_VALID, _MIPIB_READ_DATA_VALID) 5953#define MIPI_READ_DATA_VALID(pipe) _PIPE(pipe, _MIPIA_READ_DATA_VALID, _MIPIB_READ_DATA_VALID)
5858#define READ_DATA_VALID(n) (1 << (n)) 5954#define READ_DATA_VALID(n) (1 << (n))
5859 5955
5956/* For UMS only (deprecated): */
5957#define _PALETTE_A (dev_priv->info.display_mmio_offset + 0xa000)
5958#define _PALETTE_B (dev_priv->info.display_mmio_offset + 0xa800)
5959#define _DPLL_A (dev_priv->info.display_mmio_offset + 0x6014)
5960#define _DPLL_B (dev_priv->info.display_mmio_offset + 0x6018)
5961#define _DPLL_A_MD (dev_priv->info.display_mmio_offset + 0x601c)
5962#define _DPLL_B_MD (dev_priv->info.display_mmio_offset + 0x6020)
5963
5860#endif /* _I915_REG_H_ */ 5964#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 8150fdc08d49..56785e8fb2eb 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -236,19 +236,9 @@ static void i915_save_display(struct drm_device *dev)
236 dev_priv->regfile.savePP_DIVISOR = I915_READ(PP_DIVISOR); 236 dev_priv->regfile.savePP_DIVISOR = I915_READ(PP_DIVISOR);
237 } 237 }
238 238
239 /* Only regfile.save FBC state on the platform that supports FBC */ 239 /* save FBC interval */
240 if (HAS_FBC(dev)) { 240 if (HAS_FBC(dev) && INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev))
241 if (HAS_PCH_SPLIT(dev)) { 241 dev_priv->regfile.saveFBC_CONTROL = I915_READ(FBC_CONTROL);
242 dev_priv->regfile.saveDPFC_CB_BASE = I915_READ(ILK_DPFC_CB_BASE);
243 } else if (IS_GM45(dev)) {
244 dev_priv->regfile.saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE);
245 } else {
246 dev_priv->regfile.saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE);
247 dev_priv->regfile.saveFBC_LL_BASE = I915_READ(FBC_LL_BASE);
248 dev_priv->regfile.saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2);
249 dev_priv->regfile.saveFBC_CONTROL = I915_READ(FBC_CONTROL);
250 }
251 }
252 242
253 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 243 if (!drm_core_check_feature(dev, DRIVER_MODESET))
254 i915_save_vga(dev); 244 i915_save_vga(dev);
@@ -300,18 +290,10 @@ static void i915_restore_display(struct drm_device *dev)
300 290
301 /* only restore FBC info on the platform that supports FBC*/ 291 /* only restore FBC info on the platform that supports FBC*/
302 intel_disable_fbc(dev); 292 intel_disable_fbc(dev);
303 if (HAS_FBC(dev)) { 293
304 if (HAS_PCH_SPLIT(dev)) { 294 /* restore FBC interval */
305 I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->regfile.saveDPFC_CB_BASE); 295 if (HAS_FBC(dev) && INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev))
306 } else if (IS_GM45(dev)) { 296 I915_WRITE(FBC_CONTROL, dev_priv->regfile.saveFBC_CONTROL);
307 I915_WRITE(DPFC_CB_BASE, dev_priv->regfile.saveDPFC_CB_BASE);
308 } else {
309 I915_WRITE(FBC_CFB_BASE, dev_priv->regfile.saveFBC_CFB_BASE);
310 I915_WRITE(FBC_LL_BASE, dev_priv->regfile.saveFBC_LL_BASE);
311 I915_WRITE(FBC_CONTROL2, dev_priv->regfile.saveFBC_CONTROL2);
312 I915_WRITE(FBC_CONTROL, dev_priv->regfile.saveFBC_CONTROL);
313 }
314 }
315 297
316 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 298 if (!drm_core_check_feature(dev, DRIVER_MODESET))
317 i915_restore_vga(dev); 299 i915_restore_vga(dev);
@@ -324,10 +306,6 @@ int i915_save_state(struct drm_device *dev)
324 struct drm_i915_private *dev_priv = dev->dev_private; 306 struct drm_i915_private *dev_priv = dev->dev_private;
325 int i; 307 int i;
326 308
327 if (INTEL_INFO(dev)->gen <= 4)
328 pci_read_config_byte(dev->pdev, LBB,
329 &dev_priv->regfile.saveLBB);
330
331 mutex_lock(&dev->struct_mutex); 309 mutex_lock(&dev->struct_mutex);
332 310
333 i915_save_display(dev); 311 i915_save_display(dev);
@@ -377,10 +355,6 @@ int i915_restore_state(struct drm_device *dev)
377 struct drm_i915_private *dev_priv = dev->dev_private; 355 struct drm_i915_private *dev_priv = dev->dev_private;
378 int i; 356 int i;
379 357
380 if (INTEL_INFO(dev)->gen <= 4)
381 pci_write_config_byte(dev->pdev, LBB,
382 dev_priv->regfile.saveLBB);
383
384 mutex_lock(&dev->struct_mutex); 358 mutex_lock(&dev->struct_mutex);
385 359
386 i915_gem_restore_fences(dev); 360 i915_gem_restore_fences(dev);
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 33bcae314bf8..9c57029f6f4b 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -269,7 +269,7 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
269 freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); 269 freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
270 ret = vlv_gpu_freq(dev_priv, (freq >> 8) & 0xff); 270 ret = vlv_gpu_freq(dev_priv, (freq >> 8) & 0xff);
271 } else { 271 } else {
272 ret = dev_priv->rps.cur_delay * GT_FREQUENCY_MULTIPLIER; 272 ret = dev_priv->rps.cur_freq * GT_FREQUENCY_MULTIPLIER;
273 } 273 }
274 mutex_unlock(&dev_priv->rps.hw_lock); 274 mutex_unlock(&dev_priv->rps.hw_lock);
275 275
@@ -284,7 +284,7 @@ static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
284 struct drm_i915_private *dev_priv = dev->dev_private; 284 struct drm_i915_private *dev_priv = dev->dev_private;
285 285
286 return snprintf(buf, PAGE_SIZE, "%d\n", 286 return snprintf(buf, PAGE_SIZE, "%d\n",
287 vlv_gpu_freq(dev_priv, dev_priv->rps.rpe_delay)); 287 vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
288} 288}
289 289
290static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) 290static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
@@ -298,9 +298,9 @@ static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute
298 298
299 mutex_lock(&dev_priv->rps.hw_lock); 299 mutex_lock(&dev_priv->rps.hw_lock);
300 if (IS_VALLEYVIEW(dev_priv->dev)) 300 if (IS_VALLEYVIEW(dev_priv->dev))
301 ret = vlv_gpu_freq(dev_priv, dev_priv->rps.max_delay); 301 ret = vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit);
302 else 302 else
303 ret = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER; 303 ret = dev_priv->rps.max_freq_softlimit * GT_FREQUENCY_MULTIPLIER;
304 mutex_unlock(&dev_priv->rps.hw_lock); 304 mutex_unlock(&dev_priv->rps.hw_lock);
305 305
306 return snprintf(buf, PAGE_SIZE, "%d\n", ret); 306 return snprintf(buf, PAGE_SIZE, "%d\n", ret);
@@ -313,7 +313,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
313 struct drm_minor *minor = dev_to_drm_minor(kdev); 313 struct drm_minor *minor = dev_to_drm_minor(kdev);
314 struct drm_device *dev = minor->dev; 314 struct drm_device *dev = minor->dev;
315 struct drm_i915_private *dev_priv = dev->dev_private; 315 struct drm_i915_private *dev_priv = dev->dev_private;
316 u32 val, rp_state_cap, hw_max, hw_min, non_oc_max; 316 u32 val;
317 ssize_t ret; 317 ssize_t ret;
318 318
319 ret = kstrtou32(buf, 0, &val); 319 ret = kstrtou32(buf, 0, &val);
@@ -324,38 +324,34 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
324 324
325 mutex_lock(&dev_priv->rps.hw_lock); 325 mutex_lock(&dev_priv->rps.hw_lock);
326 326
327 if (IS_VALLEYVIEW(dev_priv->dev)) { 327 if (IS_VALLEYVIEW(dev_priv->dev))
328 val = vlv_freq_opcode(dev_priv, val); 328 val = vlv_freq_opcode(dev_priv, val);
329 329 else
330 hw_max = valleyview_rps_max_freq(dev_priv);
331 hw_min = valleyview_rps_min_freq(dev_priv);
332 non_oc_max = hw_max;
333 } else {
334 val /= GT_FREQUENCY_MULTIPLIER; 330 val /= GT_FREQUENCY_MULTIPLIER;
335 331
336 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 332 if (val < dev_priv->rps.min_freq ||
337 hw_max = dev_priv->rps.hw_max; 333 val > dev_priv->rps.max_freq ||
338 non_oc_max = (rp_state_cap & 0xff); 334 val < dev_priv->rps.min_freq_softlimit) {
339 hw_min = ((rp_state_cap & 0xff0000) >> 16);
340 }
341
342 if (val < hw_min || val > hw_max ||
343 val < dev_priv->rps.min_delay) {
344 mutex_unlock(&dev_priv->rps.hw_lock); 335 mutex_unlock(&dev_priv->rps.hw_lock);
345 return -EINVAL; 336 return -EINVAL;
346 } 337 }
347 338
348 if (val > non_oc_max) 339 if (val > dev_priv->rps.rp0_freq)
349 DRM_DEBUG("User requested overclocking to %d\n", 340 DRM_DEBUG("User requested overclocking to %d\n",
350 val * GT_FREQUENCY_MULTIPLIER); 341 val * GT_FREQUENCY_MULTIPLIER);
351 342
352 dev_priv->rps.max_delay = val; 343 dev_priv->rps.max_freq_softlimit = val;
353 344
354 if (dev_priv->rps.cur_delay > val) { 345 if (dev_priv->rps.cur_freq > val) {
355 if (IS_VALLEYVIEW(dev)) 346 if (IS_VALLEYVIEW(dev))
356 valleyview_set_rps(dev, val); 347 valleyview_set_rps(dev, val);
357 else 348 else
358 gen6_set_rps(dev, val); 349 gen6_set_rps(dev, val);
350 } else if (!IS_VALLEYVIEW(dev)) {
351 /* We still need gen6_set_rps to process the new max_delay and
352 * update the interrupt limits even though frequency request is
353 * unchanged. */
354 gen6_set_rps(dev, dev_priv->rps.cur_freq);
359 } 355 }
360 356
361 mutex_unlock(&dev_priv->rps.hw_lock); 357 mutex_unlock(&dev_priv->rps.hw_lock);
@@ -374,9 +370,9 @@ static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute
374 370
375 mutex_lock(&dev_priv->rps.hw_lock); 371 mutex_lock(&dev_priv->rps.hw_lock);
376 if (IS_VALLEYVIEW(dev_priv->dev)) 372 if (IS_VALLEYVIEW(dev_priv->dev))
377 ret = vlv_gpu_freq(dev_priv, dev_priv->rps.min_delay); 373 ret = vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit);
378 else 374 else
379 ret = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER; 375 ret = dev_priv->rps.min_freq_softlimit * GT_FREQUENCY_MULTIPLIER;
380 mutex_unlock(&dev_priv->rps.hw_lock); 376 mutex_unlock(&dev_priv->rps.hw_lock);
381 377
382 return snprintf(buf, PAGE_SIZE, "%d\n", ret); 378 return snprintf(buf, PAGE_SIZE, "%d\n", ret);
@@ -389,7 +385,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
389 struct drm_minor *minor = dev_to_drm_minor(kdev); 385 struct drm_minor *minor = dev_to_drm_minor(kdev);
390 struct drm_device *dev = minor->dev; 386 struct drm_device *dev = minor->dev;
391 struct drm_i915_private *dev_priv = dev->dev_private; 387 struct drm_i915_private *dev_priv = dev->dev_private;
392 u32 val, rp_state_cap, hw_max, hw_min; 388 u32 val;
393 ssize_t ret; 389 ssize_t ret;
394 390
395 ret = kstrtou32(buf, 0, &val); 391 ret = kstrtou32(buf, 0, &val);
@@ -400,31 +396,30 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
400 396
401 mutex_lock(&dev_priv->rps.hw_lock); 397 mutex_lock(&dev_priv->rps.hw_lock);
402 398
403 if (IS_VALLEYVIEW(dev)) { 399 if (IS_VALLEYVIEW(dev))
404 val = vlv_freq_opcode(dev_priv, val); 400 val = vlv_freq_opcode(dev_priv, val);
405 401 else
406 hw_max = valleyview_rps_max_freq(dev_priv);
407 hw_min = valleyview_rps_min_freq(dev_priv);
408 } else {
409 val /= GT_FREQUENCY_MULTIPLIER; 402 val /= GT_FREQUENCY_MULTIPLIER;
410 403
411 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 404 if (val < dev_priv->rps.min_freq ||
412 hw_max = dev_priv->rps.hw_max; 405 val > dev_priv->rps.max_freq ||
413 hw_min = ((rp_state_cap & 0xff0000) >> 16); 406 val > dev_priv->rps.max_freq_softlimit) {
414 }
415
416 if (val < hw_min || val > hw_max || val > dev_priv->rps.max_delay) {
417 mutex_unlock(&dev_priv->rps.hw_lock); 407 mutex_unlock(&dev_priv->rps.hw_lock);
418 return -EINVAL; 408 return -EINVAL;
419 } 409 }
420 410
421 dev_priv->rps.min_delay = val; 411 dev_priv->rps.min_freq_softlimit = val;
422 412
423 if (dev_priv->rps.cur_delay < val) { 413 if (dev_priv->rps.cur_freq < val) {
424 if (IS_VALLEYVIEW(dev)) 414 if (IS_VALLEYVIEW(dev))
425 valleyview_set_rps(dev, val); 415 valleyview_set_rps(dev, val);
426 else 416 else
427 gen6_set_rps(dev, val); 417 gen6_set_rps(dev, val);
418 } else if (!IS_VALLEYVIEW(dev)) {
419 /* We still need gen6_set_rps to process the new min_delay and
420 * update the interrupt limits even though frequency request is
421 * unchanged. */
422 gen6_set_rps(dev, dev_priv->rps.cur_freq);
428 } 423 }
429 424
430 mutex_unlock(&dev_priv->rps.hw_lock); 425 mutex_unlock(&dev_priv->rps.hw_lock);
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 6e580c98dede..23c26f1f8b37 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -34,15 +34,15 @@ TRACE_EVENT(i915_gem_object_create,
34); 34);
35 35
36TRACE_EVENT(i915_vma_bind, 36TRACE_EVENT(i915_vma_bind,
37 TP_PROTO(struct i915_vma *vma, bool mappable), 37 TP_PROTO(struct i915_vma *vma, unsigned flags),
38 TP_ARGS(vma, mappable), 38 TP_ARGS(vma, flags),
39 39
40 TP_STRUCT__entry( 40 TP_STRUCT__entry(
41 __field(struct drm_i915_gem_object *, obj) 41 __field(struct drm_i915_gem_object *, obj)
42 __field(struct i915_address_space *, vm) 42 __field(struct i915_address_space *, vm)
43 __field(u32, offset) 43 __field(u32, offset)
44 __field(u32, size) 44 __field(u32, size)
45 __field(bool, mappable) 45 __field(unsigned, flags)
46 ), 46 ),
47 47
48 TP_fast_assign( 48 TP_fast_assign(
@@ -50,12 +50,12 @@ TRACE_EVENT(i915_vma_bind,
50 __entry->vm = vma->vm; 50 __entry->vm = vma->vm;
51 __entry->offset = vma->node.start; 51 __entry->offset = vma->node.start;
52 __entry->size = vma->node.size; 52 __entry->size = vma->node.size;
53 __entry->mappable = mappable; 53 __entry->flags = flags;
54 ), 54 ),
55 55
56 TP_printk("obj=%p, offset=%08x size=%x%s vm=%p", 56 TP_printk("obj=%p, offset=%08x size=%x%s vm=%p",
57 __entry->obj, __entry->offset, __entry->size, 57 __entry->obj, __entry->offset, __entry->size,
58 __entry->mappable ? ", mappable" : "", 58 __entry->flags & PIN_MAPPABLE ? ", mappable" : "",
59 __entry->vm) 59 __entry->vm)
60); 60);
61 61
@@ -196,26 +196,26 @@ DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy,
196); 196);
197 197
198TRACE_EVENT(i915_gem_evict, 198TRACE_EVENT(i915_gem_evict,
199 TP_PROTO(struct drm_device *dev, u32 size, u32 align, bool mappable), 199 TP_PROTO(struct drm_device *dev, u32 size, u32 align, unsigned flags),
200 TP_ARGS(dev, size, align, mappable), 200 TP_ARGS(dev, size, align, flags),
201 201
202 TP_STRUCT__entry( 202 TP_STRUCT__entry(
203 __field(u32, dev) 203 __field(u32, dev)
204 __field(u32, size) 204 __field(u32, size)
205 __field(u32, align) 205 __field(u32, align)
206 __field(bool, mappable) 206 __field(unsigned, flags)
207 ), 207 ),
208 208
209 TP_fast_assign( 209 TP_fast_assign(
210 __entry->dev = dev->primary->index; 210 __entry->dev = dev->primary->index;
211 __entry->size = size; 211 __entry->size = size;
212 __entry->align = align; 212 __entry->align = align;
213 __entry->mappable = mappable; 213 __entry->flags = flags;
214 ), 214 ),
215 215
216 TP_printk("dev=%d, size=%d, align=%d %s", 216 TP_printk("dev=%d, size=%d, align=%d %s",
217 __entry->dev, __entry->size, __entry->align, 217 __entry->dev, __entry->size, __entry->align,
218 __entry->mappable ? ", mappable" : "") 218 __entry->flags & PIN_MAPPABLE ? ", mappable" : "")
219); 219);
220 220
221TRACE_EVENT(i915_gem_evict_everything, 221TRACE_EVENT(i915_gem_evict_everything,
@@ -238,14 +238,16 @@ TRACE_EVENT(i915_gem_evict_vm,
238 TP_ARGS(vm), 238 TP_ARGS(vm),
239 239
240 TP_STRUCT__entry( 240 TP_STRUCT__entry(
241 __field(u32, dev)
241 __field(struct i915_address_space *, vm) 242 __field(struct i915_address_space *, vm)
242 ), 243 ),
243 244
244 TP_fast_assign( 245 TP_fast_assign(
246 __entry->dev = vm->dev->primary->index;
245 __entry->vm = vm; 247 __entry->vm = vm;
246 ), 248 ),
247 249
248 TP_printk("dev=%d, vm=%p", __entry->vm->dev->primary->index, __entry->vm) 250 TP_printk("dev=%d, vm=%p", __entry->dev, __entry->vm)
249); 251);
250 252
251TRACE_EVENT(i915_gem_ring_sync_to, 253TRACE_EVENT(i915_gem_ring_sync_to,
diff --git a/drivers/gpu/drm/i915/i915_ums.c b/drivers/gpu/drm/i915/i915_ums.c
index caa18e855815..480da593e6c0 100644
--- a/drivers/gpu/drm/i915/i915_ums.c
+++ b/drivers/gpu/drm/i915/i915_ums.c
@@ -271,6 +271,10 @@ void i915_save_display_reg(struct drm_device *dev)
271 /* FIXME: regfile.save TV & SDVO state */ 271 /* FIXME: regfile.save TV & SDVO state */
272 272
273 /* Backlight */ 273 /* Backlight */
274 if (INTEL_INFO(dev)->gen <= 4)
275 pci_read_config_byte(dev->pdev, PCI_LBPC,
276 &dev_priv->regfile.saveLBB);
277
274 if (HAS_PCH_SPLIT(dev)) { 278 if (HAS_PCH_SPLIT(dev)) {
275 dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1); 279 dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1);
276 dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2); 280 dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2);
@@ -293,6 +297,10 @@ void i915_restore_display_reg(struct drm_device *dev)
293 int i; 297 int i;
294 298
295 /* Backlight */ 299 /* Backlight */
300 if (INTEL_INFO(dev)->gen <= 4)
301 pci_write_config_byte(dev->pdev, PCI_LBPC,
302 dev_priv->regfile.saveLBB);
303
296 if (HAS_PCH_SPLIT(dev)) { 304 if (HAS_PCH_SPLIT(dev)) {
297 I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->regfile.saveBLC_PWM_CTL); 305 I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->regfile.saveBLC_PWM_CTL);
298 I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2); 306 I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2);
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index f22041973f3a..4867f4cc0938 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -259,7 +259,7 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
259 downclock = dvo_timing->clock; 259 downclock = dvo_timing->clock;
260 } 260 }
261 261
262 if (downclock < panel_dvo_timing->clock && i915_lvds_downclock) { 262 if (downclock < panel_dvo_timing->clock && i915.lvds_downclock) {
263 dev_priv->lvds_downclock_avail = 1; 263 dev_priv->lvds_downclock_avail = 1;
264 dev_priv->lvds_downclock = downclock * 10; 264 dev_priv->lvds_downclock = downclock * 10;
265 DRM_DEBUG_KMS("LVDS downclock is found in VBT. " 265 DRM_DEBUG_KMS("LVDS downclock is found in VBT. "
@@ -318,7 +318,7 @@ parse_sdvo_panel_data(struct drm_i915_private *dev_priv,
318 struct drm_display_mode *panel_fixed_mode; 318 struct drm_display_mode *panel_fixed_mode;
319 int index; 319 int index;
320 320
321 index = i915_vbt_sdvo_panel_type; 321 index = i915.vbt_sdvo_panel_type;
322 if (index == -2) { 322 if (index == -2) {
323 DRM_DEBUG_KMS("Ignore SDVO panel mode from BIOS VBT tables.\n"); 323 DRM_DEBUG_KMS("Ignore SDVO panel mode from BIOS VBT tables.\n");
324 return; 324 return;
@@ -599,14 +599,14 @@ parse_mipi(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
599{ 599{
600 struct bdb_mipi *mipi; 600 struct bdb_mipi *mipi;
601 601
602 mipi = find_section(bdb, BDB_MIPI); 602 mipi = find_section(bdb, BDB_MIPI_CONFIG);
603 if (!mipi) { 603 if (!mipi) {
604 DRM_DEBUG_KMS("No MIPI BDB found"); 604 DRM_DEBUG_KMS("No MIPI BDB found");
605 return; 605 return;
606 } 606 }
607 607
608 /* XXX: add more info */ 608 /* XXX: add more info */
609 dev_priv->vbt.dsi.panel_id = mipi->panel_id; 609 dev_priv->vbt.dsi.panel_id = MIPI_DSI_GENERIC_PANEL_ID;
610} 610}
611 611
612static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port, 612static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index 282de5e9f39d..83b7629e4367 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -104,7 +104,8 @@ struct vbios_data {
104#define BDB_LVDS_LFP_DATA 42 104#define BDB_LVDS_LFP_DATA 42
105#define BDB_LVDS_BACKLIGHT 43 105#define BDB_LVDS_BACKLIGHT 43
106#define BDB_LVDS_POWER 44 106#define BDB_LVDS_POWER 44
107#define BDB_MIPI 50 107#define BDB_MIPI_CONFIG 52
108#define BDB_MIPI_SEQUENCE 53
108#define BDB_SKIP 254 /* VBIOS private block, ignore */ 109#define BDB_SKIP 254 /* VBIOS private block, ignore */
109 110
110struct bdb_general_features { 111struct bdb_general_features {
@@ -711,44 +712,159 @@ int intel_parse_bios(struct drm_device *dev);
711#define DVO_PORT_DPD 9 712#define DVO_PORT_DPD 9
712#define DVO_PORT_DPA 10 713#define DVO_PORT_DPA 10
713 714
714/* MIPI DSI panel info */ 715/* Block 52 contains MIPI Panel info
715struct bdb_mipi { 716 * 6 such enteries will there. Index into correct
716 u16 panel_id; 717 * entery is based on the panel_index in #40 LFP
717 u16 bridge_revision; 718 */
718 719#define MAX_MIPI_CONFIGURATIONS 6
719 /* General params */
720 u32 dithering:1;
721 u32 bpp_pixel_format:1;
722 u32 rsvd1:1;
723 u32 dphy_valid:1;
724 u32 resvd2:28;
725 720
726 u16 port_info; 721#define MIPI_DSI_UNDEFINED_PANEL_ID 0
727 u16 rsvd3:2; 722#define MIPI_DSI_GENERIC_PANEL_ID 1
728 u16 num_lanes:2;
729 u16 rsvd4:12;
730 723
731 /* DSI config */ 724struct mipi_config {
732 u16 virt_ch_num:2; 725 u16 panel_id;
733 u16 vtm:2;
734 u16 rsvd5:12;
735 726
736 u32 dsi_clock; 727 /* General Params */
728 u32 enable_dithering:1;
729 u32 rsvd1:1;
730 u32 is_bridge:1;
731
732 u32 panel_arch_type:2;
733 u32 is_cmd_mode:1;
734
735#define NON_BURST_SYNC_PULSE 0x1
736#define NON_BURST_SYNC_EVENTS 0x2
737#define BURST_MODE 0x3
738 u32 video_transfer_mode:2;
739
740 u32 cabc_supported:1;
741 u32 pwm_blc:1;
742
743 /* Bit 13:10 */
744#define PIXEL_FORMAT_RGB565 0x1
745#define PIXEL_FORMAT_RGB666 0x2
746#define PIXEL_FORMAT_RGB666_LOOSELY_PACKED 0x3
747#define PIXEL_FORMAT_RGB888 0x4
748 u32 videomode_color_format:4;
749
750 /* Bit 15:14 */
751#define ENABLE_ROTATION_0 0x0
752#define ENABLE_ROTATION_90 0x1
753#define ENABLE_ROTATION_180 0x2
754#define ENABLE_ROTATION_270 0x3
755 u32 rotation:2;
756 u32 bta_enabled:1;
757 u32 rsvd2:15;
758
759 /* 2 byte Port Description */
760#define DUAL_LINK_NOT_SUPPORTED 0
761#define DUAL_LINK_FRONT_BACK 1
762#define DUAL_LINK_PIXEL_ALT 2
763 u16 dual_link:2;
764 u16 lane_cnt:2;
765 u16 rsvd3:12;
766
767 u16 rsvd4;
768
769 u8 rsvd5[5];
770 u32 dsi_ddr_clk;
737 u32 bridge_ref_clk; 771 u32 bridge_ref_clk;
738 u16 rsvd_pwr;
739 772
740 /* Dphy Params */ 773#define BYTE_CLK_SEL_20MHZ 0
741 u32 prepare_cnt:5; 774#define BYTE_CLK_SEL_10MHZ 1
742 u32 rsvd6:3; 775#define BYTE_CLK_SEL_5MHZ 2
776 u8 byte_clk_sel:2;
777
778 u8 rsvd6:6;
779
780 /* DPHY Flags */
781 u16 dphy_param_valid:1;
782 u16 eot_pkt_disabled:1;
783 u16 enable_clk_stop:1;
784 u16 rsvd7:13;
785
786 u32 hs_tx_timeout;
787 u32 lp_rx_timeout;
788 u32 turn_around_timeout;
789 u32 device_reset_timer;
790 u32 master_init_timer;
791 u32 dbi_bw_timer;
792 u32 lp_byte_clk_val;
793
794 /* 4 byte Dphy Params */
795 u32 prepare_cnt:6;
796 u32 rsvd8:2;
743 u32 clk_zero_cnt:8; 797 u32 clk_zero_cnt:8;
744 u32 trail_cnt:5; 798 u32 trail_cnt:5;
745 u32 rsvd7:3; 799 u32 rsvd9:3;
746 u32 exit_zero_cnt:6; 800 u32 exit_zero_cnt:6;
747 u32 rsvd8:2; 801 u32 rsvd10:2;
748 802
749 u32 hl_switch_cnt;
750 u32 lp_byte_clk;
751 u32 clk_lane_switch_cnt; 803 u32 clk_lane_switch_cnt;
804 u32 hl_switch_cnt;
805
806 u32 rsvd11[6];
807
808 /* timings based on dphy spec */
809 u8 tclk_miss;
810 u8 tclk_post;
811 u8 rsvd12;
812 u8 tclk_pre;
813 u8 tclk_prepare;
814 u8 tclk_settle;
815 u8 tclk_term_enable;
816 u8 tclk_trail;
817 u16 tclk_prepare_clkzero;
818 u8 rsvd13;
819 u8 td_term_enable;
820 u8 teot;
821 u8 ths_exit;
822 u8 ths_prepare;
823 u16 ths_prepare_hszero;
824 u8 rsvd14;
825 u8 ths_settle;
826 u8 ths_skip;
827 u8 ths_trail;
828 u8 tinit;
829 u8 tlpx;
830 u8 rsvd15[3];
831
832 /* GPIOs */
833 u8 panel_enable;
834 u8 bl_enable;
835 u8 pwm_enable;
836 u8 reset_r_n;
837 u8 pwr_down_r;
838 u8 stdby_r_n;
839
752} __packed; 840} __packed;
753 841
842/* Block 52 contains MIPI configuration block
843 * 6 * bdb_mipi_config, followed by 6 pps data
844 * block below
845 *
846 * all delays has a unit of 100us
847 */
848struct mipi_pps_data {
849 u16 panel_on_delay;
850 u16 bl_enable_delay;
851 u16 bl_disable_delay;
852 u16 panel_off_delay;
853 u16 panel_power_cycle_delay;
854};
855
856struct bdb_mipi_config {
857 struct mipi_config config[MAX_MIPI_CONFIGURATIONS];
858 struct mipi_pps_data pps[MAX_MIPI_CONFIGURATIONS];
859};
860
861/* Block 53 contains MIPI sequences as needed by the panel
862 * for enabling it. This block can be variable in size and
863 * can be maximum of 6 blocks
864 */
865struct bdb_mipi_sequence {
866 u8 version;
867 u8 data[0];
868};
869
754#endif /* _I830_BIOS_H_ */ 870#endif /* _I830_BIOS_H_ */
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index e2e39e65f109..aa5a3dc43342 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -68,8 +68,13 @@ static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
68 struct drm_device *dev = encoder->base.dev; 68 struct drm_device *dev = encoder->base.dev;
69 struct drm_i915_private *dev_priv = dev->dev_private; 69 struct drm_i915_private *dev_priv = dev->dev_private;
70 struct intel_crt *crt = intel_encoder_to_crt(encoder); 70 struct intel_crt *crt = intel_encoder_to_crt(encoder);
71 enum intel_display_power_domain power_domain;
71 u32 tmp; 72 u32 tmp;
72 73
74 power_domain = intel_display_port_power_domain(encoder);
75 if (!intel_display_power_enabled(dev_priv, power_domain))
76 return false;
77
73 tmp = I915_READ(crt->adpa_reg); 78 tmp = I915_READ(crt->adpa_reg);
74 79
75 if (!(tmp & ADPA_DAC_ENABLE)) 80 if (!(tmp & ADPA_DAC_ENABLE))
@@ -262,6 +267,10 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder,
262 if (HAS_PCH_LPT(dev)) 267 if (HAS_PCH_LPT(dev))
263 pipe_config->pipe_bpp = 24; 268 pipe_config->pipe_bpp = 24;
264 269
270 /* FDI must always be 2.7 GHz */
271 if (HAS_DDI(dev))
272 pipe_config->port_clock = 135000 * 2;
273
265 return true; 274 return true;
266} 275}
267 276
@@ -630,14 +639,22 @@ static enum drm_connector_status
630intel_crt_detect(struct drm_connector *connector, bool force) 639intel_crt_detect(struct drm_connector *connector, bool force)
631{ 640{
632 struct drm_device *dev = connector->dev; 641 struct drm_device *dev = connector->dev;
642 struct drm_i915_private *dev_priv = dev->dev_private;
633 struct intel_crt *crt = intel_attached_crt(connector); 643 struct intel_crt *crt = intel_attached_crt(connector);
644 struct intel_encoder *intel_encoder = &crt->base;
645 enum intel_display_power_domain power_domain;
634 enum drm_connector_status status; 646 enum drm_connector_status status;
635 struct intel_load_detect_pipe tmp; 647 struct intel_load_detect_pipe tmp;
636 648
649 intel_runtime_pm_get(dev_priv);
650
637 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] force=%d\n", 651 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] force=%d\n",
638 connector->base.id, drm_get_connector_name(connector), 652 connector->base.id, drm_get_connector_name(connector),
639 force); 653 force);
640 654
655 power_domain = intel_display_port_power_domain(intel_encoder);
656 intel_display_power_get(dev_priv, power_domain);
657
641 if (I915_HAS_HOTPLUG(dev)) { 658 if (I915_HAS_HOTPLUG(dev)) {
642 /* We can not rely on the HPD pin always being correctly wired 659 /* We can not rely on the HPD pin always being correctly wired
643 * up, for example many KVM do not pass it through, and so 660 * up, for example many KVM do not pass it through, and so
@@ -645,23 +662,30 @@ intel_crt_detect(struct drm_connector *connector, bool force)
645 */ 662 */
646 if (intel_crt_detect_hotplug(connector)) { 663 if (intel_crt_detect_hotplug(connector)) {
647 DRM_DEBUG_KMS("CRT detected via hotplug\n"); 664 DRM_DEBUG_KMS("CRT detected via hotplug\n");
648 return connector_status_connected; 665 status = connector_status_connected;
666 goto out;
649 } else 667 } else
650 DRM_DEBUG_KMS("CRT not detected via hotplug\n"); 668 DRM_DEBUG_KMS("CRT not detected via hotplug\n");
651 } 669 }
652 670
653 if (intel_crt_detect_ddc(connector)) 671 if (intel_crt_detect_ddc(connector)) {
654 return connector_status_connected; 672 status = connector_status_connected;
673 goto out;
674 }
655 675
656 /* Load detection is broken on HPD capable machines. Whoever wants a 676 /* Load detection is broken on HPD capable machines. Whoever wants a
657 * broken monitor (without edid) to work behind a broken kvm (that fails 677 * broken monitor (without edid) to work behind a broken kvm (that fails
658 * to have the right resistors for HP detection) needs to fix this up. 678 * to have the right resistors for HP detection) needs to fix this up.
659 * For now just bail out. */ 679 * For now just bail out. */
660 if (I915_HAS_HOTPLUG(dev)) 680 if (I915_HAS_HOTPLUG(dev)) {
661 return connector_status_disconnected; 681 status = connector_status_disconnected;
682 goto out;
683 }
662 684
663 if (!force) 685 if (!force) {
664 return connector->status; 686 status = connector->status;
687 goto out;
688 }
665 689
666 /* for pre-945g platforms use load detect */ 690 /* for pre-945g platforms use load detect */
667 if (intel_get_load_detect_pipe(connector, NULL, &tmp)) { 691 if (intel_get_load_detect_pipe(connector, NULL, &tmp)) {
@@ -673,6 +697,10 @@ intel_crt_detect(struct drm_connector *connector, bool force)
673 } else 697 } else
674 status = connector_status_unknown; 698 status = connector_status_unknown;
675 699
700out:
701 intel_display_power_put(dev_priv, power_domain);
702 intel_runtime_pm_put(dev_priv);
703
676 return status; 704 return status;
677} 705}
678 706
@@ -686,17 +714,28 @@ static int intel_crt_get_modes(struct drm_connector *connector)
686{ 714{
687 struct drm_device *dev = connector->dev; 715 struct drm_device *dev = connector->dev;
688 struct drm_i915_private *dev_priv = dev->dev_private; 716 struct drm_i915_private *dev_priv = dev->dev_private;
717 struct intel_crt *crt = intel_attached_crt(connector);
718 struct intel_encoder *intel_encoder = &crt->base;
719 enum intel_display_power_domain power_domain;
689 int ret; 720 int ret;
690 struct i2c_adapter *i2c; 721 struct i2c_adapter *i2c;
691 722
723 power_domain = intel_display_port_power_domain(intel_encoder);
724 intel_display_power_get(dev_priv, power_domain);
725
692 i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->vbt.crt_ddc_pin); 726 i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->vbt.crt_ddc_pin);
693 ret = intel_crt_ddc_get_modes(connector, i2c); 727 ret = intel_crt_ddc_get_modes(connector, i2c);
694 if (ret || !IS_G4X(dev)) 728 if (ret || !IS_G4X(dev))
695 return ret; 729 goto out;
696 730
697 /* Try to probe digital port for output in DVI-I -> VGA mode. */ 731 /* Try to probe digital port for output in DVI-I -> VGA mode. */
698 i2c = intel_gmbus_get_adapter(dev_priv, GMBUS_PORT_DPB); 732 i2c = intel_gmbus_get_adapter(dev_priv, GMBUS_PORT_DPB);
699 return intel_crt_ddc_get_modes(connector, i2c); 733 ret = intel_crt_ddc_get_modes(connector, i2c);
734
735out:
736 intel_display_power_put(dev_priv, power_domain);
737
738 return ret;
700} 739}
701 740
702static int intel_crt_set_property(struct drm_connector *connector, 741static int intel_crt_set_property(struct drm_connector *connector,
@@ -765,6 +804,14 @@ static const struct dmi_system_id intel_no_crt[] = {
765 DMI_MATCH(DMI_PRODUCT_NAME, "ZGB"), 804 DMI_MATCH(DMI_PRODUCT_NAME, "ZGB"),
766 }, 805 },
767 }, 806 },
807 {
808 .callback = intel_no_crt_dmi_callback,
809 .ident = "DELL XPS 8700",
810 .matches = {
811 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
812 DMI_MATCH(DMI_PRODUCT_NAME, "XPS 8700"),
813 },
814 },
768 { } 815 { }
769}; 816};
770 817
@@ -800,7 +847,7 @@ void intel_crt_init(struct drm_device *dev)
800 intel_connector_attach_encoder(intel_connector, &crt->base); 847 intel_connector_attach_encoder(intel_connector, &crt->base);
801 848
802 crt->base.type = INTEL_OUTPUT_ANALOG; 849 crt->base.type = INTEL_OUTPUT_ANALOG;
803 crt->base.cloneable = true; 850 crt->base.cloneable = (1 << INTEL_OUTPUT_DVO) | (1 << INTEL_OUTPUT_HDMI);
804 if (IS_I830(dev)) 851 if (IS_I830(dev))
805 crt->base.crtc_mask = (1 << 0); 852 crt->base.crtc_mask = (1 << 0);
806 else 853 else
@@ -833,6 +880,7 @@ void intel_crt_init(struct drm_device *dev)
833 crt->base.get_hw_state = intel_crt_get_hw_state; 880 crt->base.get_hw_state = intel_crt_get_hw_state;
834 } 881 }
835 intel_connector->get_hw_state = intel_connector_get_hw_state; 882 intel_connector->get_hw_state = intel_connector_get_hw_state;
883 intel_connector->unregister = intel_connector_unregister;
836 884
837 drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs); 885 drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
838 886
@@ -857,4 +905,6 @@ void intel_crt_init(struct drm_device *dev)
857 905
858 dev_priv->fdi_rx_config = I915_READ(_FDI_RXA_CTL) & fdi_config; 906 dev_priv->fdi_rx_config = I915_READ(_FDI_RXA_CTL) & fdi_config;
859 } 907 }
908
909 intel_crt_reset(connector);
860} 910}
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 234ac5f7bc5a..0ad4e9600063 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -633,6 +633,97 @@ static void wrpll_update_rnp(uint64_t freq2k, unsigned budget,
633 /* Otherwise a < c && b >= d, do nothing */ 633 /* Otherwise a < c && b >= d, do nothing */
634} 634}
635 635
636static int intel_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv,
637 int reg)
638{
639 int refclk = LC_FREQ;
640 int n, p, r;
641 u32 wrpll;
642
643 wrpll = I915_READ(reg);
644 switch (wrpll & SPLL_PLL_REF_MASK) {
645 case SPLL_PLL_SSC:
646 case SPLL_PLL_NON_SSC:
647 /*
648 * We could calculate spread here, but our checking
649 * code only cares about 5% accuracy, and spread is a max of
650 * 0.5% downspread.
651 */
652 refclk = 135;
653 break;
654 case SPLL_PLL_LCPLL:
655 refclk = LC_FREQ;
656 break;
657 default:
658 WARN(1, "bad wrpll refclk\n");
659 return 0;
660 }
661
662 r = wrpll & WRPLL_DIVIDER_REF_MASK;
663 p = (wrpll & WRPLL_DIVIDER_POST_MASK) >> WRPLL_DIVIDER_POST_SHIFT;
664 n = (wrpll & WRPLL_DIVIDER_FB_MASK) >> WRPLL_DIVIDER_FB_SHIFT;
665
666 /* Convert to KHz, p & r have a fixed point portion */
667 return (refclk * n * 100) / (p * r);
668}
669
670static void intel_ddi_clock_get(struct intel_encoder *encoder,
671 struct intel_crtc_config *pipe_config)
672{
673 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
674 enum port port = intel_ddi_get_encoder_port(encoder);
675 int link_clock = 0;
676 u32 val, pll;
677
678 val = I915_READ(PORT_CLK_SEL(port));
679 switch (val & PORT_CLK_SEL_MASK) {
680 case PORT_CLK_SEL_LCPLL_810:
681 link_clock = 81000;
682 break;
683 case PORT_CLK_SEL_LCPLL_1350:
684 link_clock = 135000;
685 break;
686 case PORT_CLK_SEL_LCPLL_2700:
687 link_clock = 270000;
688 break;
689 case PORT_CLK_SEL_WRPLL1:
690 link_clock = intel_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL1);
691 break;
692 case PORT_CLK_SEL_WRPLL2:
693 link_clock = intel_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL2);
694 break;
695 case PORT_CLK_SEL_SPLL:
696 pll = I915_READ(SPLL_CTL) & SPLL_PLL_FREQ_MASK;
697 if (pll == SPLL_PLL_FREQ_810MHz)
698 link_clock = 81000;
699 else if (pll == SPLL_PLL_FREQ_1350MHz)
700 link_clock = 135000;
701 else if (pll == SPLL_PLL_FREQ_2700MHz)
702 link_clock = 270000;
703 else {
704 WARN(1, "bad spll freq\n");
705 return;
706 }
707 break;
708 default:
709 WARN(1, "bad port clock sel\n");
710 return;
711 }
712
713 pipe_config->port_clock = link_clock * 2;
714
715 if (pipe_config->has_pch_encoder)
716 pipe_config->adjusted_mode.crtc_clock =
717 intel_dotclock_calculate(pipe_config->port_clock,
718 &pipe_config->fdi_m_n);
719 else if (pipe_config->has_dp_encoder)
720 pipe_config->adjusted_mode.crtc_clock =
721 intel_dotclock_calculate(pipe_config->port_clock,
722 &pipe_config->dp_m_n);
723 else
724 pipe_config->adjusted_mode.crtc_clock = pipe_config->port_clock;
725}
726
636static void 727static void
637intel_ddi_calculate_wrpll(int clock /* in Hz */, 728intel_ddi_calculate_wrpll(int clock /* in Hz */,
638 unsigned *r2_out, unsigned *n2_out, unsigned *p_out) 729 unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
@@ -1017,8 +1108,13 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
1017 enum port port = intel_ddi_get_encoder_port(intel_encoder); 1108 enum port port = intel_ddi_get_encoder_port(intel_encoder);
1018 enum pipe pipe = 0; 1109 enum pipe pipe = 0;
1019 enum transcoder cpu_transcoder; 1110 enum transcoder cpu_transcoder;
1111 enum intel_display_power_domain power_domain;
1020 uint32_t tmp; 1112 uint32_t tmp;
1021 1113
1114 power_domain = intel_display_port_power_domain(intel_encoder);
1115 if (!intel_display_power_enabled(dev_priv, power_domain))
1116 return false;
1117
1022 if (!intel_encoder->get_hw_state(intel_encoder, &pipe)) 1118 if (!intel_encoder->get_hw_state(intel_encoder, &pipe))
1023 return false; 1119 return false;
1024 1120
@@ -1054,9 +1150,14 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
1054 struct drm_device *dev = encoder->base.dev; 1150 struct drm_device *dev = encoder->base.dev;
1055 struct drm_i915_private *dev_priv = dev->dev_private; 1151 struct drm_i915_private *dev_priv = dev->dev_private;
1056 enum port port = intel_ddi_get_encoder_port(encoder); 1152 enum port port = intel_ddi_get_encoder_port(encoder);
1153 enum intel_display_power_domain power_domain;
1057 u32 tmp; 1154 u32 tmp;
1058 int i; 1155 int i;
1059 1156
1157 power_domain = intel_display_port_power_domain(encoder);
1158 if (!intel_display_power_enabled(dev_priv, power_domain))
1159 return false;
1160
1060 tmp = I915_READ(DDI_BUF_CTL(port)); 1161 tmp = I915_READ(DDI_BUF_CTL(port));
1061 1162
1062 if (!(tmp & DDI_BUF_CTL_ENABLE)) 1163 if (!(tmp & DDI_BUF_CTL_ENABLE))
@@ -1200,7 +1301,7 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
1200 1301
1201 if (type == INTEL_OUTPUT_EDP) { 1302 if (type == INTEL_OUTPUT_EDP) {
1202 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1303 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1203 ironlake_edp_panel_on(intel_dp); 1304 intel_edp_panel_on(intel_dp);
1204 } 1305 }
1205 1306
1206 WARN_ON(intel_crtc->ddi_pll_sel == PORT_CLK_SEL_NONE); 1307 WARN_ON(intel_crtc->ddi_pll_sel == PORT_CLK_SEL_NONE);
@@ -1244,8 +1345,8 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
1244 if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) { 1345 if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
1245 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1346 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1246 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF); 1347 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
1247 ironlake_edp_panel_vdd_on(intel_dp); 1348 intel_edp_panel_vdd_on(intel_dp);
1248 ironlake_edp_panel_off(intel_dp); 1349 intel_edp_panel_off(intel_dp);
1249 } 1350 }
1250 1351
1251 I915_WRITE(PORT_CLK_SEL(port), PORT_CLK_SEL_NONE); 1352 I915_WRITE(PORT_CLK_SEL(port), PORT_CLK_SEL_NONE);
@@ -1280,7 +1381,7 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder)
1280 if (port == PORT_A) 1381 if (port == PORT_A)
1281 intel_dp_stop_link_train(intel_dp); 1382 intel_dp_stop_link_train(intel_dp);
1282 1383
1283 ironlake_edp_backlight_on(intel_dp); 1384 intel_edp_backlight_on(intel_dp);
1284 intel_edp_psr_enable(intel_dp); 1385 intel_edp_psr_enable(intel_dp);
1285 } 1386 }
1286 1387
@@ -1313,7 +1414,7 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder)
1313 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1414 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1314 1415
1315 intel_edp_psr_disable(intel_dp); 1416 intel_edp_psr_disable(intel_dp);
1316 ironlake_edp_backlight_off(intel_dp); 1417 intel_edp_backlight_off(intel_dp);
1317 } 1418 }
1318} 1419}
1319 1420
@@ -1325,7 +1426,7 @@ int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv)
1325 1426
1326 if (lcpll & LCPLL_CD_SOURCE_FCLK) { 1427 if (lcpll & LCPLL_CD_SOURCE_FCLK) {
1327 return 800000; 1428 return 800000;
1328 } else if (I915_READ(HSW_FUSE_STRAP) & HSW_CDCLK_LIMIT) { 1429 } else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT) {
1329 return 450000; 1430 return 450000;
1330 } else if (freq == LCPLL_CLK_FREQ_450) { 1431 } else if (freq == LCPLL_CLK_FREQ_450) {
1331 return 450000; 1432 return 450000;
@@ -1510,6 +1611,8 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
1510 pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp); 1611 pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
1511 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp; 1612 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
1512 } 1613 }
1614
1615 intel_ddi_clock_get(encoder, pipe_config);
1513} 1616}
1514 1617
1515static void intel_ddi_destroy(struct drm_encoder *encoder) 1618static void intel_ddi_destroy(struct drm_encoder *encoder)
@@ -1620,7 +1723,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
1620 1723
1621 intel_encoder->type = INTEL_OUTPUT_UNKNOWN; 1724 intel_encoder->type = INTEL_OUTPUT_UNKNOWN;
1622 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); 1725 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
1623 intel_encoder->cloneable = false; 1726 intel_encoder->cloneable = 0;
1624 intel_encoder->hot_plug = intel_ddi_hot_plug; 1727 intel_encoder->hot_plug = intel_ddi_hot_plug;
1625 1728
1626 if (init_dp) 1729 if (init_dp)
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 9b8a7c7ea7fc..dae976f51d83 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -51,7 +51,10 @@ static void ironlake_pch_clock_get(struct intel_crtc *crtc,
51 51
52static int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode, 52static int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
53 int x, int y, struct drm_framebuffer *old_fb); 53 int x, int y, struct drm_framebuffer *old_fb);
54 54static int intel_framebuffer_init(struct drm_device *dev,
55 struct intel_framebuffer *ifb,
56 struct drm_mode_fb_cmd2 *mode_cmd,
57 struct drm_i915_gem_object *obj);
55 58
56typedef struct { 59typedef struct {
57 int min, max; 60 int min, max;
@@ -738,10 +741,10 @@ bool intel_crtc_active(struct drm_crtc *crtc)
738 * We can ditch the adjusted_mode.crtc_clock check as soon 741 * We can ditch the adjusted_mode.crtc_clock check as soon
739 * as Haswell has gained clock readout/fastboot support. 742 * as Haswell has gained clock readout/fastboot support.
740 * 743 *
741 * We can ditch the crtc->fb check as soon as we can 744 * We can ditch the crtc->primary->fb check as soon as we can
742 * properly reconstruct framebuffers. 745 * properly reconstruct framebuffers.
743 */ 746 */
744 return intel_crtc->active && crtc->fb && 747 return intel_crtc->active && crtc->primary->fb &&
745 intel_crtc->config.adjusted_mode.crtc_clock; 748 intel_crtc->config.adjusted_mode.crtc_clock;
746} 749}
747 750
@@ -1030,7 +1033,7 @@ static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1030 u32 val; 1033 u32 val;
1031 1034
1032 /* ILK FDI PLL is always enabled */ 1035 /* ILK FDI PLL is always enabled */
1033 if (dev_priv->info->gen == 5) 1036 if (INTEL_INFO(dev_priv->dev)->gen == 5)
1034 return; 1037 return;
1035 1038
1036 /* On Haswell, DDI ports are responsible for the FDI PLL setup */ 1039 /* On Haswell, DDI ports are responsible for the FDI PLL setup */
@@ -1119,7 +1122,7 @@ void assert_pipe(struct drm_i915_private *dev_priv,
1119 if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) 1122 if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
1120 state = true; 1123 state = true;
1121 1124
1122 if (!intel_display_power_enabled(dev_priv->dev, 1125 if (!intel_display_power_enabled(dev_priv,
1123 POWER_DOMAIN_TRANSCODER(cpu_transcoder))) { 1126 POWER_DOMAIN_TRANSCODER(cpu_transcoder))) {
1124 cur_state = false; 1127 cur_state = false;
1125 } else { 1128 } else {
@@ -1163,7 +1166,7 @@ static void assert_planes_disabled(struct drm_i915_private *dev_priv,
1163 if (INTEL_INFO(dev)->gen >= 4) { 1166 if (INTEL_INFO(dev)->gen >= 4) {
1164 reg = DSPCNTR(pipe); 1167 reg = DSPCNTR(pipe);
1165 val = I915_READ(reg); 1168 val = I915_READ(reg);
1166 WARN((val & DISPLAY_PLANE_ENABLE), 1169 WARN(val & DISPLAY_PLANE_ENABLE,
1167 "plane %c assertion failure, should be disabled but not\n", 1170 "plane %c assertion failure, should be disabled but not\n",
1168 plane_name(pipe)); 1171 plane_name(pipe));
1169 return; 1172 return;
@@ -1185,27 +1188,27 @@ static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
1185 enum pipe pipe) 1188 enum pipe pipe)
1186{ 1189{
1187 struct drm_device *dev = dev_priv->dev; 1190 struct drm_device *dev = dev_priv->dev;
1188 int reg, i; 1191 int reg, sprite;
1189 u32 val; 1192 u32 val;
1190 1193
1191 if (IS_VALLEYVIEW(dev)) { 1194 if (IS_VALLEYVIEW(dev)) {
1192 for (i = 0; i < dev_priv->num_plane; i++) { 1195 for_each_sprite(pipe, sprite) {
1193 reg = SPCNTR(pipe, i); 1196 reg = SPCNTR(pipe, sprite);
1194 val = I915_READ(reg); 1197 val = I915_READ(reg);
1195 WARN((val & SP_ENABLE), 1198 WARN(val & SP_ENABLE,
1196 "sprite %c assertion failure, should be off on pipe %c but is still active\n", 1199 "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1197 sprite_name(pipe, i), pipe_name(pipe)); 1200 sprite_name(pipe, sprite), pipe_name(pipe));
1198 } 1201 }
1199 } else if (INTEL_INFO(dev)->gen >= 7) { 1202 } else if (INTEL_INFO(dev)->gen >= 7) {
1200 reg = SPRCTL(pipe); 1203 reg = SPRCTL(pipe);
1201 val = I915_READ(reg); 1204 val = I915_READ(reg);
1202 WARN((val & SPRITE_ENABLE), 1205 WARN(val & SPRITE_ENABLE,
1203 "sprite %c assertion failure, should be off on pipe %c but is still active\n", 1206 "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1204 plane_name(pipe), pipe_name(pipe)); 1207 plane_name(pipe), pipe_name(pipe));
1205 } else if (INTEL_INFO(dev)->gen >= 5) { 1208 } else if (INTEL_INFO(dev)->gen >= 5) {
1206 reg = DVSCNTR(pipe); 1209 reg = DVSCNTR(pipe);
1207 val = I915_READ(reg); 1210 val = I915_READ(reg);
1208 WARN((val & DVS_ENABLE), 1211 WARN(val & DVS_ENABLE,
1209 "sprite %c assertion failure, should be off on pipe %c but is still active\n", 1212 "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1210 plane_name(pipe), pipe_name(pipe)); 1213 plane_name(pipe), pipe_name(pipe));
1211 } 1214 }
@@ -1443,7 +1446,7 @@ static void i9xx_enable_pll(struct intel_crtc *crtc)
1443 assert_pipe_disabled(dev_priv, crtc->pipe); 1446 assert_pipe_disabled(dev_priv, crtc->pipe);
1444 1447
1445 /* No really, not for ILK+ */ 1448 /* No really, not for ILK+ */
1446 BUG_ON(dev_priv->info->gen >= 5); 1449 BUG_ON(INTEL_INFO(dev)->gen >= 5);
1447 1450
1448 /* PLL is protected by panel, make sure we can write it */ 1451 /* PLL is protected by panel, make sure we can write it */
1449 if (IS_MOBILE(dev) && !IS_I830(dev)) 1452 if (IS_MOBILE(dev) && !IS_I830(dev))
@@ -1549,11 +1552,12 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
1549 */ 1552 */
1550static void ironlake_enable_shared_dpll(struct intel_crtc *crtc) 1553static void ironlake_enable_shared_dpll(struct intel_crtc *crtc)
1551{ 1554{
1552 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 1555 struct drm_device *dev = crtc->base.dev;
1556 struct drm_i915_private *dev_priv = dev->dev_private;
1553 struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc); 1557 struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
1554 1558
1555 /* PCH PLLs only available on ILK, SNB and IVB */ 1559 /* PCH PLLs only available on ILK, SNB and IVB */
1556 BUG_ON(dev_priv->info->gen < 5); 1560 BUG_ON(INTEL_INFO(dev)->gen < 5);
1557 if (WARN_ON(pll == NULL)) 1561 if (WARN_ON(pll == NULL))
1558 return; 1562 return;
1559 1563
@@ -1578,11 +1582,12 @@ static void ironlake_enable_shared_dpll(struct intel_crtc *crtc)
1578 1582
1579static void intel_disable_shared_dpll(struct intel_crtc *crtc) 1583static void intel_disable_shared_dpll(struct intel_crtc *crtc)
1580{ 1584{
1581 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 1585 struct drm_device *dev = crtc->base.dev;
1586 struct drm_i915_private *dev_priv = dev->dev_private;
1582 struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc); 1587 struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
1583 1588
1584 /* PCH only available on ILK+ */ 1589 /* PCH only available on ILK+ */
1585 BUG_ON(dev_priv->info->gen < 5); 1590 BUG_ON(INTEL_INFO(dev)->gen < 5);
1586 if (WARN_ON(pll == NULL)) 1591 if (WARN_ON(pll == NULL))
1587 return; 1592 return;
1588 1593
@@ -1617,7 +1622,7 @@ static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1617 uint32_t reg, val, pipeconf_val; 1622 uint32_t reg, val, pipeconf_val;
1618 1623
1619 /* PCH only available on ILK+ */ 1624 /* PCH only available on ILK+ */
1620 BUG_ON(dev_priv->info->gen < 5); 1625 BUG_ON(INTEL_INFO(dev)->gen < 5);
1621 1626
1622 /* Make sure PCH DPLL is enabled */ 1627 /* Make sure PCH DPLL is enabled */
1623 assert_shared_dpll_enabled(dev_priv, 1628 assert_shared_dpll_enabled(dev_priv,
@@ -1670,7 +1675,7 @@ static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1670 u32 val, pipeconf_val; 1675 u32 val, pipeconf_val;
1671 1676
1672 /* PCH only available on ILK+ */ 1677 /* PCH only available on ILK+ */
1673 BUG_ON(dev_priv->info->gen < 5); 1678 BUG_ON(INTEL_INFO(dev_priv->dev)->gen < 5);
1674 1679
1675 /* FDI must be feeding us bits for PCH ports */ 1680 /* FDI must be feeding us bits for PCH ports */
1676 assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder); 1681 assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
@@ -1744,21 +1749,16 @@ static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
1744 1749
1745/** 1750/**
1746 * intel_enable_pipe - enable a pipe, asserting requirements 1751 * intel_enable_pipe - enable a pipe, asserting requirements
1747 * @dev_priv: i915 private structure 1752 * @crtc: crtc responsible for the pipe
1748 * @pipe: pipe to enable
1749 * @pch_port: on ILK+, is this pipe driving a PCH port or not
1750 * 1753 *
1751 * Enable @pipe, making sure that various hardware specific requirements 1754 * Enable @crtc's pipe, making sure that various hardware specific requirements
1752 * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc. 1755 * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
1753 *
1754 * @pipe should be %PIPE_A or %PIPE_B.
1755 *
1756 * Will wait until the pipe is actually running (i.e. first vblank) before
1757 * returning.
1758 */ 1756 */
1759static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, 1757static void intel_enable_pipe(struct intel_crtc *crtc)
1760 bool pch_port, bool dsi)
1761{ 1758{
1759 struct drm_device *dev = crtc->base.dev;
1760 struct drm_i915_private *dev_priv = dev->dev_private;
1761 enum pipe pipe = crtc->pipe;
1762 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 1762 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1763 pipe); 1763 pipe);
1764 enum pipe pch_transcoder; 1764 enum pipe pch_transcoder;
@@ -1780,12 +1780,12 @@ static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
1780 * need the check. 1780 * need the check.
1781 */ 1781 */
1782 if (!HAS_PCH_SPLIT(dev_priv->dev)) 1782 if (!HAS_PCH_SPLIT(dev_priv->dev))
1783 if (dsi) 1783 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DSI))
1784 assert_dsi_pll_enabled(dev_priv); 1784 assert_dsi_pll_enabled(dev_priv);
1785 else 1785 else
1786 assert_pll_enabled(dev_priv, pipe); 1786 assert_pll_enabled(dev_priv, pipe);
1787 else { 1787 else {
1788 if (pch_port) { 1788 if (crtc->config.has_pch_encoder) {
1789 /* if driving the PCH, we need FDI enabled */ 1789 /* if driving the PCH, we need FDI enabled */
1790 assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder); 1790 assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder);
1791 assert_fdi_tx_pll_enabled(dev_priv, 1791 assert_fdi_tx_pll_enabled(dev_priv,
@@ -1796,11 +1796,24 @@ static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
1796 1796
1797 reg = PIPECONF(cpu_transcoder); 1797 reg = PIPECONF(cpu_transcoder);
1798 val = I915_READ(reg); 1798 val = I915_READ(reg);
1799 if (val & PIPECONF_ENABLE) 1799 if (val & PIPECONF_ENABLE) {
1800 WARN_ON(!(pipe == PIPE_A &&
1801 dev_priv->quirks & QUIRK_PIPEA_FORCE));
1800 return; 1802 return;
1803 }
1801 1804
1802 I915_WRITE(reg, val | PIPECONF_ENABLE); 1805 I915_WRITE(reg, val | PIPECONF_ENABLE);
1803 intel_wait_for_vblank(dev_priv->dev, pipe); 1806 POSTING_READ(reg);
1807
1808 /*
1809 * There's no guarantee the pipe will really start running now. It
1810 * depends on the Gen, the output type and the relative order between
1811 * pipe and plane enabling. Avoid waiting on HSW+ since it's not
1812 * necessary.
1813 * TODO: audit the previous gens.
1814 */
1815 if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
1816 intel_wait_for_vblank(dev_priv->dev, pipe);
1804} 1817}
1805 1818
1806/** 1819/**
@@ -1851,22 +1864,23 @@ static void intel_disable_pipe(struct drm_i915_private *dev_priv,
1851void intel_flush_primary_plane(struct drm_i915_private *dev_priv, 1864void intel_flush_primary_plane(struct drm_i915_private *dev_priv,
1852 enum plane plane) 1865 enum plane plane)
1853{ 1866{
1854 u32 reg = dev_priv->info->gen >= 4 ? DSPSURF(plane) : DSPADDR(plane); 1867 struct drm_device *dev = dev_priv->dev;
1868 u32 reg = INTEL_INFO(dev)->gen >= 4 ? DSPSURF(plane) : DSPADDR(plane);
1855 1869
1856 I915_WRITE(reg, I915_READ(reg)); 1870 I915_WRITE(reg, I915_READ(reg));
1857 POSTING_READ(reg); 1871 POSTING_READ(reg);
1858} 1872}
1859 1873
1860/** 1874/**
1861 * intel_enable_primary_plane - enable the primary plane on a given pipe 1875 * intel_enable_primary_hw_plane - enable the primary plane on a given pipe
1862 * @dev_priv: i915 private structure 1876 * @dev_priv: i915 private structure
1863 * @plane: plane to enable 1877 * @plane: plane to enable
1864 * @pipe: pipe being fed 1878 * @pipe: pipe being fed
1865 * 1879 *
1866 * Enable @plane on @pipe, making sure that @pipe is running first. 1880 * Enable @plane on @pipe, making sure that @pipe is running first.
1867 */ 1881 */
1868static void intel_enable_primary_plane(struct drm_i915_private *dev_priv, 1882static void intel_enable_primary_hw_plane(struct drm_i915_private *dev_priv,
1869 enum plane plane, enum pipe pipe) 1883 enum plane plane, enum pipe pipe)
1870{ 1884{
1871 struct intel_crtc *intel_crtc = 1885 struct intel_crtc *intel_crtc =
1872 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 1886 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
@@ -1891,15 +1905,15 @@ static void intel_enable_primary_plane(struct drm_i915_private *dev_priv,
1891} 1905}
1892 1906
1893/** 1907/**
1894 * intel_disable_primary_plane - disable the primary plane 1908 * intel_disable_primary_hw_plane - disable the primary hardware plane
1895 * @dev_priv: i915 private structure 1909 * @dev_priv: i915 private structure
1896 * @plane: plane to disable 1910 * @plane: plane to disable
1897 * @pipe: pipe consuming the data 1911 * @pipe: pipe consuming the data
1898 * 1912 *
1899 * Disable @plane; should be an independent operation. 1913 * Disable @plane; should be an independent operation.
1900 */ 1914 */
1901static void intel_disable_primary_plane(struct drm_i915_private *dev_priv, 1915static void intel_disable_primary_hw_plane(struct drm_i915_private *dev_priv,
1902 enum plane plane, enum pipe pipe) 1916 enum plane plane, enum pipe pipe)
1903{ 1917{
1904 struct intel_crtc *intel_crtc = 1918 struct intel_crtc *intel_crtc =
1905 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 1919 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
@@ -1929,6 +1943,14 @@ static bool need_vtd_wa(struct drm_device *dev)
1929 return false; 1943 return false;
1930} 1944}
1931 1945
1946static int intel_align_height(struct drm_device *dev, int height, bool tiled)
1947{
1948 int tile_height;
1949
1950 tile_height = tiled ? (IS_GEN2(dev) ? 16 : 8) : 1;
1951 return ALIGN(height, tile_height);
1952}
1953
1932int 1954int
1933intel_pin_and_fence_fb_obj(struct drm_device *dev, 1955intel_pin_and_fence_fb_obj(struct drm_device *dev,
1934 struct drm_i915_gem_object *obj, 1956 struct drm_i915_gem_object *obj,
@@ -2025,8 +2047,114 @@ unsigned long intel_gen4_compute_page_offset(int *x, int *y,
2025 } 2047 }
2026} 2048}
2027 2049
2028static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb, 2050int intel_format_to_fourcc(int format)
2029 int x, int y) 2051{
2052 switch (format) {
2053 case DISPPLANE_8BPP:
2054 return DRM_FORMAT_C8;
2055 case DISPPLANE_BGRX555:
2056 return DRM_FORMAT_XRGB1555;
2057 case DISPPLANE_BGRX565:
2058 return DRM_FORMAT_RGB565;
2059 default:
2060 case DISPPLANE_BGRX888:
2061 return DRM_FORMAT_XRGB8888;
2062 case DISPPLANE_RGBX888:
2063 return DRM_FORMAT_XBGR8888;
2064 case DISPPLANE_BGRX101010:
2065 return DRM_FORMAT_XRGB2101010;
2066 case DISPPLANE_RGBX101010:
2067 return DRM_FORMAT_XBGR2101010;
2068 }
2069}
2070
2071static bool intel_alloc_plane_obj(struct intel_crtc *crtc,
2072 struct intel_plane_config *plane_config)
2073{
2074 struct drm_device *dev = crtc->base.dev;
2075 struct drm_i915_gem_object *obj = NULL;
2076 struct drm_mode_fb_cmd2 mode_cmd = { 0 };
2077 u32 base = plane_config->base;
2078
2079 if (plane_config->size == 0)
2080 return false;
2081
2082 obj = i915_gem_object_create_stolen_for_preallocated(dev, base, base,
2083 plane_config->size);
2084 if (!obj)
2085 return false;
2086
2087 if (plane_config->tiled) {
2088 obj->tiling_mode = I915_TILING_X;
2089 obj->stride = crtc->base.primary->fb->pitches[0];
2090 }
2091
2092 mode_cmd.pixel_format = crtc->base.primary->fb->pixel_format;
2093 mode_cmd.width = crtc->base.primary->fb->width;
2094 mode_cmd.height = crtc->base.primary->fb->height;
2095 mode_cmd.pitches[0] = crtc->base.primary->fb->pitches[0];
2096
2097 mutex_lock(&dev->struct_mutex);
2098
2099 if (intel_framebuffer_init(dev, to_intel_framebuffer(crtc->base.primary->fb),
2100 &mode_cmd, obj)) {
2101 DRM_DEBUG_KMS("intel fb init failed\n");
2102 goto out_unref_obj;
2103 }
2104
2105 mutex_unlock(&dev->struct_mutex);
2106
2107 DRM_DEBUG_KMS("plane fb obj %p\n", obj);
2108 return true;
2109
2110out_unref_obj:
2111 drm_gem_object_unreference(&obj->base);
2112 mutex_unlock(&dev->struct_mutex);
2113 return false;
2114}
2115
2116static void intel_find_plane_obj(struct intel_crtc *intel_crtc,
2117 struct intel_plane_config *plane_config)
2118{
2119 struct drm_device *dev = intel_crtc->base.dev;
2120 struct drm_crtc *c;
2121 struct intel_crtc *i;
2122 struct intel_framebuffer *fb;
2123
2124 if (!intel_crtc->base.primary->fb)
2125 return;
2126
2127 if (intel_alloc_plane_obj(intel_crtc, plane_config))
2128 return;
2129
2130 kfree(intel_crtc->base.primary->fb);
2131 intel_crtc->base.primary->fb = NULL;
2132
2133 /*
2134 * Failed to alloc the obj, check to see if we should share
2135 * an fb with another CRTC instead
2136 */
2137 list_for_each_entry(c, &dev->mode_config.crtc_list, head) {
2138 i = to_intel_crtc(c);
2139
2140 if (c == &intel_crtc->base)
2141 continue;
2142
2143 if (!i->active || !c->primary->fb)
2144 continue;
2145
2146 fb = to_intel_framebuffer(c->primary->fb);
2147 if (i915_gem_obj_ggtt_offset(fb->obj) == plane_config->base) {
2148 drm_framebuffer_reference(c->primary->fb);
2149 intel_crtc->base.primary->fb = c->primary->fb;
2150 break;
2151 }
2152 }
2153}
2154
2155static int i9xx_update_primary_plane(struct drm_crtc *crtc,
2156 struct drm_framebuffer *fb,
2157 int x, int y)
2030{ 2158{
2031 struct drm_device *dev = crtc->dev; 2159 struct drm_device *dev = crtc->dev;
2032 struct drm_i915_private *dev_priv = dev->dev_private; 2160 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2125,8 +2253,9 @@ static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
2125 return 0; 2253 return 0;
2126} 2254}
2127 2255
2128static int ironlake_update_plane(struct drm_crtc *crtc, 2256static int ironlake_update_primary_plane(struct drm_crtc *crtc,
2129 struct drm_framebuffer *fb, int x, int y) 2257 struct drm_framebuffer *fb,
2258 int x, int y)
2130{ 2259{
2131 struct drm_device *dev = crtc->dev; 2260 struct drm_device *dev = crtc->dev;
2132 struct drm_i915_private *dev_priv = dev->dev_private; 2261 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2230,7 +2359,7 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
2230 dev_priv->display.disable_fbc(dev); 2359 dev_priv->display.disable_fbc(dev);
2231 intel_increase_pllclock(crtc); 2360 intel_increase_pllclock(crtc);
2232 2361
2233 return dev_priv->display.update_plane(crtc, fb, x, y); 2362 return dev_priv->display.update_primary_plane(crtc, fb, x, y);
2234} 2363}
2235 2364
2236void intel_display_handle_reset(struct drm_device *dev) 2365void intel_display_handle_reset(struct drm_device *dev)
@@ -2267,11 +2396,13 @@ void intel_display_handle_reset(struct drm_device *dev)
2267 /* 2396 /*
2268 * FIXME: Once we have proper support for primary planes (and 2397 * FIXME: Once we have proper support for primary planes (and
2269 * disabling them without disabling the entire crtc) allow again 2398 * disabling them without disabling the entire crtc) allow again
2270 * a NULL crtc->fb. 2399 * a NULL crtc->primary->fb.
2271 */ 2400 */
2272 if (intel_crtc->active && crtc->fb) 2401 if (intel_crtc->active && crtc->primary->fb)
2273 dev_priv->display.update_plane(crtc, crtc->fb, 2402 dev_priv->display.update_primary_plane(crtc,
2274 crtc->x, crtc->y); 2403 crtc->primary->fb,
2404 crtc->x,
2405 crtc->y);
2275 mutex_unlock(&crtc->mutex); 2406 mutex_unlock(&crtc->mutex);
2276 } 2407 }
2277} 2408}
@@ -2299,31 +2430,23 @@ intel_finish_fb(struct drm_framebuffer *old_fb)
2299 return ret; 2430 return ret;
2300} 2431}
2301 2432
2302static void intel_crtc_update_sarea_pos(struct drm_crtc *crtc, int x, int y) 2433static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
2303{ 2434{
2304 struct drm_device *dev = crtc->dev; 2435 struct drm_device *dev = crtc->dev;
2305 struct drm_i915_master_private *master_priv; 2436 struct drm_i915_private *dev_priv = dev->dev_private;
2306 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2437 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2438 unsigned long flags;
2439 bool pending;
2307 2440
2308 if (!dev->primary->master) 2441 if (i915_reset_in_progress(&dev_priv->gpu_error) ||
2309 return; 2442 intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
2443 return false;
2310 2444
2311 master_priv = dev->primary->master->driver_priv; 2445 spin_lock_irqsave(&dev->event_lock, flags);
2312 if (!master_priv->sarea_priv) 2446 pending = to_intel_crtc(crtc)->unpin_work != NULL;
2313 return; 2447 spin_unlock_irqrestore(&dev->event_lock, flags);
2314 2448
2315 switch (intel_crtc->pipe) { 2449 return pending;
2316 case 0:
2317 master_priv->sarea_priv->pipeA_x = x;
2318 master_priv->sarea_priv->pipeA_y = y;
2319 break;
2320 case 1:
2321 master_priv->sarea_priv->pipeB_x = x;
2322 master_priv->sarea_priv->pipeB_y = y;
2323 break;
2324 default:
2325 break;
2326 }
2327} 2450}
2328 2451
2329static int 2452static int
@@ -2336,6 +2459,11 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2336 struct drm_framebuffer *old_fb; 2459 struct drm_framebuffer *old_fb;
2337 int ret; 2460 int ret;
2338 2461
2462 if (intel_crtc_has_pending_flip(crtc)) {
2463 DRM_ERROR("pipe is still busy with an old pageflip\n");
2464 return -EBUSY;
2465 }
2466
2339 /* no fb bound */ 2467 /* no fb bound */
2340 if (!fb) { 2468 if (!fb) {
2341 DRM_ERROR("No FB bound\n"); 2469 DRM_ERROR("No FB bound\n");
@@ -2353,8 +2481,8 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2353 ret = intel_pin_and_fence_fb_obj(dev, 2481 ret = intel_pin_and_fence_fb_obj(dev,
2354 to_intel_framebuffer(fb)->obj, 2482 to_intel_framebuffer(fb)->obj,
2355 NULL); 2483 NULL);
2484 mutex_unlock(&dev->struct_mutex);
2356 if (ret != 0) { 2485 if (ret != 0) {
2357 mutex_unlock(&dev->struct_mutex);
2358 DRM_ERROR("pin & fence failed\n"); 2486 DRM_ERROR("pin & fence failed\n");
2359 return ret; 2487 return ret;
2360 } 2488 }
@@ -2372,7 +2500,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2372 * whether the platform allows pfit disable with pipe active, and only 2500 * whether the platform allows pfit disable with pipe active, and only
2373 * then update the pipesrc and pfit state, even on the flip path. 2501 * then update the pipesrc and pfit state, even on the flip path.
2374 */ 2502 */
2375 if (i915_fastboot) { 2503 if (i915.fastboot) {
2376 const struct drm_display_mode *adjusted_mode = 2504 const struct drm_display_mode *adjusted_mode =
2377 &intel_crtc->config.adjusted_mode; 2505 &intel_crtc->config.adjusted_mode;
2378 2506
@@ -2390,31 +2518,33 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2390 intel_crtc->config.pipe_src_h = adjusted_mode->crtc_vdisplay; 2518 intel_crtc->config.pipe_src_h = adjusted_mode->crtc_vdisplay;
2391 } 2519 }
2392 2520
2393 ret = dev_priv->display.update_plane(crtc, fb, x, y); 2521 ret = dev_priv->display.update_primary_plane(crtc, fb, x, y);
2394 if (ret) { 2522 if (ret) {
2523 mutex_lock(&dev->struct_mutex);
2395 intel_unpin_fb_obj(to_intel_framebuffer(fb)->obj); 2524 intel_unpin_fb_obj(to_intel_framebuffer(fb)->obj);
2396 mutex_unlock(&dev->struct_mutex); 2525 mutex_unlock(&dev->struct_mutex);
2397 DRM_ERROR("failed to update base address\n"); 2526 DRM_ERROR("failed to update base address\n");
2398 return ret; 2527 return ret;
2399 } 2528 }
2400 2529
2401 old_fb = crtc->fb; 2530 old_fb = crtc->primary->fb;
2402 crtc->fb = fb; 2531 crtc->primary->fb = fb;
2403 crtc->x = x; 2532 crtc->x = x;
2404 crtc->y = y; 2533 crtc->y = y;
2405 2534
2406 if (old_fb) { 2535 if (old_fb) {
2407 if (intel_crtc->active && old_fb != fb) 2536 if (intel_crtc->active && old_fb != fb)
2408 intel_wait_for_vblank(dev, intel_crtc->pipe); 2537 intel_wait_for_vblank(dev, intel_crtc->pipe);
2538 mutex_lock(&dev->struct_mutex);
2409 intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj); 2539 intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj);
2540 mutex_unlock(&dev->struct_mutex);
2410 } 2541 }
2411 2542
2543 mutex_lock(&dev->struct_mutex);
2412 intel_update_fbc(dev); 2544 intel_update_fbc(dev);
2413 intel_edp_psr_update(dev); 2545 intel_edp_psr_update(dev);
2414 mutex_unlock(&dev->struct_mutex); 2546 mutex_unlock(&dev->struct_mutex);
2415 2547
2416 intel_crtc_update_sarea_pos(crtc, x, y);
2417
2418 return 0; 2548 return 0;
2419} 2549}
2420 2550
@@ -2963,25 +3093,6 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc)
2963 udelay(100); 3093 udelay(100);
2964} 3094}
2965 3095
2966static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
2967{
2968 struct drm_device *dev = crtc->dev;
2969 struct drm_i915_private *dev_priv = dev->dev_private;
2970 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2971 unsigned long flags;
2972 bool pending;
2973
2974 if (i915_reset_in_progress(&dev_priv->gpu_error) ||
2975 intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
2976 return false;
2977
2978 spin_lock_irqsave(&dev->event_lock, flags);
2979 pending = to_intel_crtc(crtc)->unpin_work != NULL;
2980 spin_unlock_irqrestore(&dev->event_lock, flags);
2981
2982 return pending;
2983}
2984
2985bool intel_has_pending_fb_unpin(struct drm_device *dev) 3096bool intel_has_pending_fb_unpin(struct drm_device *dev)
2986{ 3097{
2987 struct intel_crtc *crtc; 3098 struct intel_crtc *crtc;
@@ -3011,7 +3122,7 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
3011 struct drm_device *dev = crtc->dev; 3122 struct drm_device *dev = crtc->dev;
3012 struct drm_i915_private *dev_priv = dev->dev_private; 3123 struct drm_i915_private *dev_priv = dev->dev_private;
3013 3124
3014 if (crtc->fb == NULL) 3125 if (crtc->primary->fb == NULL)
3015 return; 3126 return;
3016 3127
3017 WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue)); 3128 WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue));
@@ -3020,7 +3131,7 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
3020 !intel_crtc_has_pending_flip(crtc)); 3131 !intel_crtc_has_pending_flip(crtc));
3021 3132
3022 mutex_lock(&dev->struct_mutex); 3133 mutex_lock(&dev->struct_mutex);
3023 intel_finish_fb(crtc->fb); 3134 intel_finish_fb(crtc->primary->fb);
3024 mutex_unlock(&dev->struct_mutex); 3135 mutex_unlock(&dev->struct_mutex);
3025} 3136}
3026 3137
@@ -3425,22 +3536,28 @@ static void intel_enable_planes(struct drm_crtc *crtc)
3425{ 3536{
3426 struct drm_device *dev = crtc->dev; 3537 struct drm_device *dev = crtc->dev;
3427 enum pipe pipe = to_intel_crtc(crtc)->pipe; 3538 enum pipe pipe = to_intel_crtc(crtc)->pipe;
3539 struct drm_plane *plane;
3428 struct intel_plane *intel_plane; 3540 struct intel_plane *intel_plane;
3429 3541
3430 list_for_each_entry(intel_plane, &dev->mode_config.plane_list, base.head) 3542 drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
3543 intel_plane = to_intel_plane(plane);
3431 if (intel_plane->pipe == pipe) 3544 if (intel_plane->pipe == pipe)
3432 intel_plane_restore(&intel_plane->base); 3545 intel_plane_restore(&intel_plane->base);
3546 }
3433} 3547}
3434 3548
3435static void intel_disable_planes(struct drm_crtc *crtc) 3549static void intel_disable_planes(struct drm_crtc *crtc)
3436{ 3550{
3437 struct drm_device *dev = crtc->dev; 3551 struct drm_device *dev = crtc->dev;
3438 enum pipe pipe = to_intel_crtc(crtc)->pipe; 3552 enum pipe pipe = to_intel_crtc(crtc)->pipe;
3553 struct drm_plane *plane;
3439 struct intel_plane *intel_plane; 3554 struct intel_plane *intel_plane;
3440 3555
3441 list_for_each_entry(intel_plane, &dev->mode_config.plane_list, base.head) 3556 drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
3557 intel_plane = to_intel_plane(plane);
3442 if (intel_plane->pipe == pipe) 3558 if (intel_plane->pipe == pipe)
3443 intel_plane_disable(&intel_plane->base); 3559 intel_plane_disable(&intel_plane->base);
3560 }
3444} 3561}
3445 3562
3446void hsw_enable_ips(struct intel_crtc *crtc) 3563void hsw_enable_ips(struct intel_crtc *crtc)
@@ -3587,9 +3704,8 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
3587 intel_crtc_load_lut(crtc); 3704 intel_crtc_load_lut(crtc);
3588 3705
3589 intel_update_watermarks(crtc); 3706 intel_update_watermarks(crtc);
3590 intel_enable_pipe(dev_priv, pipe, 3707 intel_enable_pipe(intel_crtc);
3591 intel_crtc->config.has_pch_encoder, false); 3708 intel_enable_primary_hw_plane(dev_priv, plane, pipe);
3592 intel_enable_primary_plane(dev_priv, plane, pipe);
3593 intel_enable_planes(crtc); 3709 intel_enable_planes(crtc);
3594 intel_crtc_update_cursor(crtc, true); 3710 intel_crtc_update_cursor(crtc, true);
3595 3711
@@ -3631,7 +3747,7 @@ static void haswell_crtc_enable_planes(struct drm_crtc *crtc)
3631 int pipe = intel_crtc->pipe; 3747 int pipe = intel_crtc->pipe;
3632 int plane = intel_crtc->plane; 3748 int plane = intel_crtc->plane;
3633 3749
3634 intel_enable_primary_plane(dev_priv, plane, pipe); 3750 intel_enable_primary_hw_plane(dev_priv, plane, pipe);
3635 intel_enable_planes(crtc); 3751 intel_enable_planes(crtc);
3636 intel_crtc_update_cursor(crtc, true); 3752 intel_crtc_update_cursor(crtc, true);
3637 3753
@@ -3661,7 +3777,7 @@ static void haswell_crtc_disable_planes(struct drm_crtc *crtc)
3661 3777
3662 intel_crtc_update_cursor(crtc, false); 3778 intel_crtc_update_cursor(crtc, false);
3663 intel_disable_planes(crtc); 3779 intel_disable_planes(crtc);
3664 intel_disable_primary_plane(dev_priv, plane, pipe); 3780 intel_disable_primary_hw_plane(dev_priv, plane, pipe);
3665} 3781}
3666 3782
3667/* 3783/*
@@ -3733,8 +3849,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
3733 intel_ddi_enable_transcoder_func(crtc); 3849 intel_ddi_enable_transcoder_func(crtc);
3734 3850
3735 intel_update_watermarks(crtc); 3851 intel_update_watermarks(crtc);
3736 intel_enable_pipe(dev_priv, pipe, 3852 intel_enable_pipe(intel_crtc);
3737 intel_crtc->config.has_pch_encoder, false);
3738 3853
3739 if (intel_crtc->config.has_pch_encoder) 3854 if (intel_crtc->config.has_pch_encoder)
3740 lpt_pch_enable(crtc); 3855 lpt_pch_enable(crtc);
@@ -3748,16 +3863,6 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
3748 * to change the workaround. */ 3863 * to change the workaround. */
3749 haswell_mode_set_planes_workaround(intel_crtc); 3864 haswell_mode_set_planes_workaround(intel_crtc);
3750 haswell_crtc_enable_planes(crtc); 3865 haswell_crtc_enable_planes(crtc);
3751
3752 /*
3753 * There seems to be a race in PCH platform hw (at least on some
3754 * outputs) where an enabled pipe still completes any pageflip right
3755 * away (as if the pipe is off) instead of waiting for vblank. As soon
3756 * as the first vblank happend, everything works as expected. Hence just
3757 * wait for one vblank before returning to avoid strange things
3758 * happening.
3759 */
3760 intel_wait_for_vblank(dev, intel_crtc->pipe);
3761} 3866}
3762 3867
3763static void ironlake_pfit_disable(struct intel_crtc *crtc) 3868static void ironlake_pfit_disable(struct intel_crtc *crtc)
@@ -3800,7 +3905,7 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
3800 3905
3801 intel_crtc_update_cursor(crtc, false); 3906 intel_crtc_update_cursor(crtc, false);
3802 intel_disable_planes(crtc); 3907 intel_disable_planes(crtc);
3803 intel_disable_primary_plane(dev_priv, plane, pipe); 3908 intel_disable_primary_hw_plane(dev_priv, plane, pipe);
3804 3909
3805 if (intel_crtc->config.has_pch_encoder) 3910 if (intel_crtc->config.has_pch_encoder)
3806 intel_set_pch_fifo_underrun_reporting(dev, pipe, false); 3911 intel_set_pch_fifo_underrun_reporting(dev, pipe, false);
@@ -3972,6 +4077,117 @@ static void i9xx_pfit_enable(struct intel_crtc *crtc)
3972 I915_WRITE(BCLRPAT(crtc->pipe), 0); 4077 I915_WRITE(BCLRPAT(crtc->pipe), 0);
3973} 4078}
3974 4079
4080#define for_each_power_domain(domain, mask) \
4081 for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \
4082 if ((1 << (domain)) & (mask))
4083
4084enum intel_display_power_domain
4085intel_display_port_power_domain(struct intel_encoder *intel_encoder)
4086{
4087 struct drm_device *dev = intel_encoder->base.dev;
4088 struct intel_digital_port *intel_dig_port;
4089
4090 switch (intel_encoder->type) {
4091 case INTEL_OUTPUT_UNKNOWN:
4092 /* Only DDI platforms should ever use this output type */
4093 WARN_ON_ONCE(!HAS_DDI(dev));
4094 case INTEL_OUTPUT_DISPLAYPORT:
4095 case INTEL_OUTPUT_HDMI:
4096 case INTEL_OUTPUT_EDP:
4097 intel_dig_port = enc_to_dig_port(&intel_encoder->base);
4098 switch (intel_dig_port->port) {
4099 case PORT_A:
4100 return POWER_DOMAIN_PORT_DDI_A_4_LANES;
4101 case PORT_B:
4102 return POWER_DOMAIN_PORT_DDI_B_4_LANES;
4103 case PORT_C:
4104 return POWER_DOMAIN_PORT_DDI_C_4_LANES;
4105 case PORT_D:
4106 return POWER_DOMAIN_PORT_DDI_D_4_LANES;
4107 default:
4108 WARN_ON_ONCE(1);
4109 return POWER_DOMAIN_PORT_OTHER;
4110 }
4111 case INTEL_OUTPUT_ANALOG:
4112 return POWER_DOMAIN_PORT_CRT;
4113 case INTEL_OUTPUT_DSI:
4114 return POWER_DOMAIN_PORT_DSI;
4115 default:
4116 return POWER_DOMAIN_PORT_OTHER;
4117 }
4118}
4119
4120static unsigned long get_crtc_power_domains(struct drm_crtc *crtc)
4121{
4122 struct drm_device *dev = crtc->dev;
4123 struct intel_encoder *intel_encoder;
4124 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4125 enum pipe pipe = intel_crtc->pipe;
4126 bool pfit_enabled = intel_crtc->config.pch_pfit.enabled;
4127 unsigned long mask;
4128 enum transcoder transcoder;
4129
4130 transcoder = intel_pipe_to_cpu_transcoder(dev->dev_private, pipe);
4131
4132 mask = BIT(POWER_DOMAIN_PIPE(pipe));
4133 mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder));
4134 if (pfit_enabled)
4135 mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
4136
4137 for_each_encoder_on_crtc(dev, crtc, intel_encoder)
4138 mask |= BIT(intel_display_port_power_domain(intel_encoder));
4139
4140 return mask;
4141}
4142
4143void intel_display_set_init_power(struct drm_i915_private *dev_priv,
4144 bool enable)
4145{
4146 if (dev_priv->power_domains.init_power_on == enable)
4147 return;
4148
4149 if (enable)
4150 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
4151 else
4152 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
4153
4154 dev_priv->power_domains.init_power_on = enable;
4155}
4156
4157static void modeset_update_crtc_power_domains(struct drm_device *dev)
4158{
4159 struct drm_i915_private *dev_priv = dev->dev_private;
4160 unsigned long pipe_domains[I915_MAX_PIPES] = { 0, };
4161 struct intel_crtc *crtc;
4162
4163 /*
4164 * First get all needed power domains, then put all unneeded, to avoid
4165 * any unnecessary toggling of the power wells.
4166 */
4167 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
4168 enum intel_display_power_domain domain;
4169
4170 if (!crtc->base.enabled)
4171 continue;
4172
4173 pipe_domains[crtc->pipe] = get_crtc_power_domains(&crtc->base);
4174
4175 for_each_power_domain(domain, pipe_domains[crtc->pipe])
4176 intel_display_power_get(dev_priv, domain);
4177 }
4178
4179 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
4180 enum intel_display_power_domain domain;
4181
4182 for_each_power_domain(domain, crtc->enabled_power_domains)
4183 intel_display_power_put(dev_priv, domain);
4184
4185 crtc->enabled_power_domains = pipe_domains[crtc->pipe];
4186 }
4187
4188 intel_display_set_init_power(dev_priv, false);
4189}
4190
3975int valleyview_get_vco(struct drm_i915_private *dev_priv) 4191int valleyview_get_vco(struct drm_i915_private *dev_priv)
3976{ 4192{
3977 int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 }; 4193 int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
@@ -4088,9 +4304,8 @@ static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
4088 /* Looks like the 200MHz CDclk freq doesn't work on some configs */ 4304 /* Looks like the 200MHz CDclk freq doesn't work on some configs */
4089} 4305}
4090 4306
4091static int intel_mode_max_pixclk(struct drm_i915_private *dev_priv, 4307/* compute the max pixel clock for new configuration */
4092 unsigned modeset_pipes, 4308static int intel_mode_max_pixclk(struct drm_i915_private *dev_priv)
4093 struct intel_crtc_config *pipe_config)
4094{ 4309{
4095 struct drm_device *dev = dev_priv->dev; 4310 struct drm_device *dev = dev_priv->dev;
4096 struct intel_crtc *intel_crtc; 4311 struct intel_crtc *intel_crtc;
@@ -4098,31 +4313,26 @@ static int intel_mode_max_pixclk(struct drm_i915_private *dev_priv,
4098 4313
4099 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, 4314 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
4100 base.head) { 4315 base.head) {
4101 if (modeset_pipes & (1 << intel_crtc->pipe)) 4316 if (intel_crtc->new_enabled)
4102 max_pixclk = max(max_pixclk,
4103 pipe_config->adjusted_mode.crtc_clock);
4104 else if (intel_crtc->base.enabled)
4105 max_pixclk = max(max_pixclk, 4317 max_pixclk = max(max_pixclk,
4106 intel_crtc->config.adjusted_mode.crtc_clock); 4318 intel_crtc->new_config->adjusted_mode.crtc_clock);
4107 } 4319 }
4108 4320
4109 return max_pixclk; 4321 return max_pixclk;
4110} 4322}
4111 4323
4112static void valleyview_modeset_global_pipes(struct drm_device *dev, 4324static void valleyview_modeset_global_pipes(struct drm_device *dev,
4113 unsigned *prepare_pipes, 4325 unsigned *prepare_pipes)
4114 unsigned modeset_pipes,
4115 struct intel_crtc_config *pipe_config)
4116{ 4326{
4117 struct drm_i915_private *dev_priv = dev->dev_private; 4327 struct drm_i915_private *dev_priv = dev->dev_private;
4118 struct intel_crtc *intel_crtc; 4328 struct intel_crtc *intel_crtc;
4119 int max_pixclk = intel_mode_max_pixclk(dev_priv, modeset_pipes, 4329 int max_pixclk = intel_mode_max_pixclk(dev_priv);
4120 pipe_config);
4121 int cur_cdclk = valleyview_cur_cdclk(dev_priv); 4330 int cur_cdclk = valleyview_cur_cdclk(dev_priv);
4122 4331
4123 if (valleyview_calc_cdclk(dev_priv, max_pixclk) == cur_cdclk) 4332 if (valleyview_calc_cdclk(dev_priv, max_pixclk) == cur_cdclk)
4124 return; 4333 return;
4125 4334
4335 /* disable/enable all currently active pipes while we change cdclk */
4126 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, 4336 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
4127 base.head) 4337 base.head)
4128 if (intel_crtc->base.enabled) 4338 if (intel_crtc->base.enabled)
@@ -4132,12 +4342,13 @@ static void valleyview_modeset_global_pipes(struct drm_device *dev,
4132static void valleyview_modeset_global_resources(struct drm_device *dev) 4342static void valleyview_modeset_global_resources(struct drm_device *dev)
4133{ 4343{
4134 struct drm_i915_private *dev_priv = dev->dev_private; 4344 struct drm_i915_private *dev_priv = dev->dev_private;
4135 int max_pixclk = intel_mode_max_pixclk(dev_priv, 0, NULL); 4345 int max_pixclk = intel_mode_max_pixclk(dev_priv);
4136 int cur_cdclk = valleyview_cur_cdclk(dev_priv); 4346 int cur_cdclk = valleyview_cur_cdclk(dev_priv);
4137 int req_cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk); 4347 int req_cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk);
4138 4348
4139 if (req_cdclk != cur_cdclk) 4349 if (req_cdclk != cur_cdclk)
4140 valleyview_set_cdclk(dev, req_cdclk); 4350 valleyview_set_cdclk(dev, req_cdclk);
4351 modeset_update_crtc_power_domains(dev);
4141} 4352}
4142 4353
4143static void valleyview_crtc_enable(struct drm_crtc *crtc) 4354static void valleyview_crtc_enable(struct drm_crtc *crtc)
@@ -4175,8 +4386,9 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
4175 intel_crtc_load_lut(crtc); 4386 intel_crtc_load_lut(crtc);
4176 4387
4177 intel_update_watermarks(crtc); 4388 intel_update_watermarks(crtc);
4178 intel_enable_pipe(dev_priv, pipe, false, is_dsi); 4389 intel_enable_pipe(intel_crtc);
4179 intel_enable_primary_plane(dev_priv, plane, pipe); 4390 intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
4391 intel_enable_primary_hw_plane(dev_priv, plane, pipe);
4180 intel_enable_planes(crtc); 4392 intel_enable_planes(crtc);
4181 intel_crtc_update_cursor(crtc, true); 4393 intel_crtc_update_cursor(crtc, true);
4182 4394
@@ -4213,8 +4425,9 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
4213 intel_crtc_load_lut(crtc); 4425 intel_crtc_load_lut(crtc);
4214 4426
4215 intel_update_watermarks(crtc); 4427 intel_update_watermarks(crtc);
4216 intel_enable_pipe(dev_priv, pipe, false, false); 4428 intel_enable_pipe(intel_crtc);
4217 intel_enable_primary_plane(dev_priv, plane, pipe); 4429 intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
4430 intel_enable_primary_hw_plane(dev_priv, plane, pipe);
4218 intel_enable_planes(crtc); 4431 intel_enable_planes(crtc);
4219 /* The fixup needs to happen before cursor is enabled */ 4432 /* The fixup needs to happen before cursor is enabled */
4220 if (IS_G4X(dev)) 4433 if (IS_G4X(dev))
@@ -4270,8 +4483,9 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
4270 intel_crtc_dpms_overlay(intel_crtc, false); 4483 intel_crtc_dpms_overlay(intel_crtc, false);
4271 intel_crtc_update_cursor(crtc, false); 4484 intel_crtc_update_cursor(crtc, false);
4272 intel_disable_planes(crtc); 4485 intel_disable_planes(crtc);
4273 intel_disable_primary_plane(dev_priv, plane, pipe); 4486 intel_disable_primary_hw_plane(dev_priv, plane, pipe);
4274 4487
4488 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false);
4275 intel_disable_pipe(dev_priv, pipe); 4489 intel_disable_pipe(dev_priv, pipe);
4276 4490
4277 i9xx_pfit_disable(intel_crtc); 4491 i9xx_pfit_disable(intel_crtc);
@@ -4365,11 +4579,11 @@ static void intel_crtc_disable(struct drm_crtc *crtc)
4365 assert_cursor_disabled(dev_priv, to_intel_crtc(crtc)->pipe); 4579 assert_cursor_disabled(dev_priv, to_intel_crtc(crtc)->pipe);
4366 assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe); 4580 assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe);
4367 4581
4368 if (crtc->fb) { 4582 if (crtc->primary->fb) {
4369 mutex_lock(&dev->struct_mutex); 4583 mutex_lock(&dev->struct_mutex);
4370 intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj); 4584 intel_unpin_fb_obj(to_intel_framebuffer(crtc->primary->fb)->obj);
4371 mutex_unlock(&dev->struct_mutex); 4585 mutex_unlock(&dev->struct_mutex);
4372 crtc->fb = NULL; 4586 crtc->primary->fb = NULL;
4373 } 4587 }
4374 4588
4375 /* Update computed state. */ 4589 /* Update computed state. */
@@ -4583,7 +4797,7 @@ retry:
4583static void hsw_compute_ips_config(struct intel_crtc *crtc, 4797static void hsw_compute_ips_config(struct intel_crtc *crtc,
4584 struct intel_crtc_config *pipe_config) 4798 struct intel_crtc_config *pipe_config)
4585{ 4799{
4586 pipe_config->ips_enabled = i915_enable_ips && 4800 pipe_config->ips_enabled = i915.enable_ips &&
4587 hsw_crtc_supports_ips(crtc) && 4801 hsw_crtc_supports_ips(crtc) &&
4588 pipe_config->pipe_bpp <= 24; 4802 pipe_config->pipe_bpp <= 24;
4589} 4803}
@@ -4784,8 +4998,8 @@ intel_link_compute_m_n(int bits_per_pixel, int nlanes,
4784 4998
4785static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv) 4999static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
4786{ 5000{
4787 if (i915_panel_use_ssc >= 0) 5001 if (i915.panel_use_ssc >= 0)
4788 return i915_panel_use_ssc != 0; 5002 return i915.panel_use_ssc != 0;
4789 return dev_priv->vbt.lvds_use_ssc 5003 return dev_priv->vbt.lvds_use_ssc
4790 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE); 5004 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
4791} 5005}
@@ -4844,7 +5058,7 @@ static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
4844 5058
4845 crtc->lowfreq_avail = false; 5059 crtc->lowfreq_avail = false;
4846 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) && 5060 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
4847 reduced_clock && i915_powersave) { 5061 reduced_clock && i915.powersave) {
4848 I915_WRITE(FP1(pipe), fp2); 5062 I915_WRITE(FP1(pipe), fp2);
4849 crtc->config.dpll_hw_state.fp1 = fp2; 5063 crtc->config.dpll_hw_state.fp1 = fp2;
4850 crtc->lowfreq_avail = true; 5064 crtc->lowfreq_avail = true;
@@ -5161,21 +5375,26 @@ static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
5161 enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder; 5375 enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
5162 struct drm_display_mode *adjusted_mode = 5376 struct drm_display_mode *adjusted_mode =
5163 &intel_crtc->config.adjusted_mode; 5377 &intel_crtc->config.adjusted_mode;
5164 uint32_t vsyncshift, crtc_vtotal, crtc_vblank_end; 5378 uint32_t crtc_vtotal, crtc_vblank_end;
5379 int vsyncshift = 0;
5165 5380
5166 /* We need to be careful not to changed the adjusted mode, for otherwise 5381 /* We need to be careful not to changed the adjusted mode, for otherwise
5167 * the hw state checker will get angry at the mismatch. */ 5382 * the hw state checker will get angry at the mismatch. */
5168 crtc_vtotal = adjusted_mode->crtc_vtotal; 5383 crtc_vtotal = adjusted_mode->crtc_vtotal;
5169 crtc_vblank_end = adjusted_mode->crtc_vblank_end; 5384 crtc_vblank_end = adjusted_mode->crtc_vblank_end;
5170 5385
5171 if (!IS_GEN2(dev) && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { 5386 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
5172 /* the chip adds 2 halflines automatically */ 5387 /* the chip adds 2 halflines automatically */
5173 crtc_vtotal -= 1; 5388 crtc_vtotal -= 1;
5174 crtc_vblank_end -= 1; 5389 crtc_vblank_end -= 1;
5175 vsyncshift = adjusted_mode->crtc_hsync_start 5390
5176 - adjusted_mode->crtc_htotal / 2; 5391 if (intel_pipe_has_type(&intel_crtc->base, INTEL_OUTPUT_SDVO))
5177 } else { 5392 vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
5178 vsyncshift = 0; 5393 else
5394 vsyncshift = adjusted_mode->crtc_hsync_start -
5395 adjusted_mode->crtc_htotal / 2;
5396 if (vsyncshift < 0)
5397 vsyncshift += adjusted_mode->crtc_htotal;
5179 } 5398 }
5180 5399
5181 if (INTEL_INFO(dev)->gen > 3) 5400 if (INTEL_INFO(dev)->gen > 3)
@@ -5259,25 +5478,23 @@ static void intel_get_pipe_timings(struct intel_crtc *crtc,
5259 pipe_config->requested_mode.hdisplay = pipe_config->pipe_src_w; 5478 pipe_config->requested_mode.hdisplay = pipe_config->pipe_src_w;
5260} 5479}
5261 5480
5262static void intel_crtc_mode_from_pipe_config(struct intel_crtc *intel_crtc, 5481void intel_mode_from_pipe_config(struct drm_display_mode *mode,
5263 struct intel_crtc_config *pipe_config) 5482 struct intel_crtc_config *pipe_config)
5264{ 5483{
5265 struct drm_crtc *crtc = &intel_crtc->base; 5484 mode->hdisplay = pipe_config->adjusted_mode.crtc_hdisplay;
5266 5485 mode->htotal = pipe_config->adjusted_mode.crtc_htotal;
5267 crtc->mode.hdisplay = pipe_config->adjusted_mode.crtc_hdisplay; 5486 mode->hsync_start = pipe_config->adjusted_mode.crtc_hsync_start;
5268 crtc->mode.htotal = pipe_config->adjusted_mode.crtc_htotal; 5487 mode->hsync_end = pipe_config->adjusted_mode.crtc_hsync_end;
5269 crtc->mode.hsync_start = pipe_config->adjusted_mode.crtc_hsync_start;
5270 crtc->mode.hsync_end = pipe_config->adjusted_mode.crtc_hsync_end;
5271 5488
5272 crtc->mode.vdisplay = pipe_config->adjusted_mode.crtc_vdisplay; 5489 mode->vdisplay = pipe_config->adjusted_mode.crtc_vdisplay;
5273 crtc->mode.vtotal = pipe_config->adjusted_mode.crtc_vtotal; 5490 mode->vtotal = pipe_config->adjusted_mode.crtc_vtotal;
5274 crtc->mode.vsync_start = pipe_config->adjusted_mode.crtc_vsync_start; 5491 mode->vsync_start = pipe_config->adjusted_mode.crtc_vsync_start;
5275 crtc->mode.vsync_end = pipe_config->adjusted_mode.crtc_vsync_end; 5492 mode->vsync_end = pipe_config->adjusted_mode.crtc_vsync_end;
5276 5493
5277 crtc->mode.flags = pipe_config->adjusted_mode.flags; 5494 mode->flags = pipe_config->adjusted_mode.flags;
5278 5495
5279 crtc->mode.clock = pipe_config->adjusted_mode.crtc_clock; 5496 mode->clock = pipe_config->adjusted_mode.crtc_clock;
5280 crtc->mode.flags |= pipe_config->adjusted_mode.flags; 5497 mode->flags |= pipe_config->adjusted_mode.flags;
5281} 5498}
5282 5499
5283static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc) 5500static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
@@ -5327,10 +5544,13 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
5327 } 5544 }
5328 } 5545 }
5329 5546
5330 if (!IS_GEN2(dev) && 5547 if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
5331 intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) 5548 if (INTEL_INFO(dev)->gen < 4 ||
5332 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION; 5549 intel_pipe_has_type(&intel_crtc->base, INTEL_OUTPUT_SDVO))
5333 else 5550 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
5551 else
5552 pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
5553 } else
5334 pipeconf |= PIPECONF_PROGRESSIVE; 5554 pipeconf |= PIPECONF_PROGRESSIVE;
5335 5555
5336 if (IS_VALLEYVIEW(dev) && intel_crtc->config.limited_color_range) 5556 if (IS_VALLEYVIEW(dev) && intel_crtc->config.limited_color_range)
@@ -5512,6 +5732,67 @@ static void vlv_crtc_clock_get(struct intel_crtc *crtc,
5512 pipe_config->port_clock = clock.dot / 5; 5732 pipe_config->port_clock = clock.dot / 5;
5513} 5733}
5514 5734
5735static void i9xx_get_plane_config(struct intel_crtc *crtc,
5736 struct intel_plane_config *plane_config)
5737{
5738 struct drm_device *dev = crtc->base.dev;
5739 struct drm_i915_private *dev_priv = dev->dev_private;
5740 u32 val, base, offset;
5741 int pipe = crtc->pipe, plane = crtc->plane;
5742 int fourcc, pixel_format;
5743 int aligned_height;
5744
5745 crtc->base.primary->fb = kzalloc(sizeof(struct intel_framebuffer), GFP_KERNEL);
5746 if (!crtc->base.primary->fb) {
5747 DRM_DEBUG_KMS("failed to alloc fb\n");
5748 return;
5749 }
5750
5751 val = I915_READ(DSPCNTR(plane));
5752
5753 if (INTEL_INFO(dev)->gen >= 4)
5754 if (val & DISPPLANE_TILED)
5755 plane_config->tiled = true;
5756
5757 pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
5758 fourcc = intel_format_to_fourcc(pixel_format);
5759 crtc->base.primary->fb->pixel_format = fourcc;
5760 crtc->base.primary->fb->bits_per_pixel =
5761 drm_format_plane_cpp(fourcc, 0) * 8;
5762
5763 if (INTEL_INFO(dev)->gen >= 4) {
5764 if (plane_config->tiled)
5765 offset = I915_READ(DSPTILEOFF(plane));
5766 else
5767 offset = I915_READ(DSPLINOFF(plane));
5768 base = I915_READ(DSPSURF(plane)) & 0xfffff000;
5769 } else {
5770 base = I915_READ(DSPADDR(plane));
5771 }
5772 plane_config->base = base;
5773
5774 val = I915_READ(PIPESRC(pipe));
5775 crtc->base.primary->fb->width = ((val >> 16) & 0xfff) + 1;
5776 crtc->base.primary->fb->height = ((val >> 0) & 0xfff) + 1;
5777
5778 val = I915_READ(DSPSTRIDE(pipe));
5779 crtc->base.primary->fb->pitches[0] = val & 0xffffff80;
5780
5781 aligned_height = intel_align_height(dev, crtc->base.primary->fb->height,
5782 plane_config->tiled);
5783
5784 plane_config->size = ALIGN(crtc->base.primary->fb->pitches[0] *
5785 aligned_height, PAGE_SIZE);
5786
5787 DRM_DEBUG_KMS("pipe/plane %d/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
5788 pipe, plane, crtc->base.primary->fb->width,
5789 crtc->base.primary->fb->height,
5790 crtc->base.primary->fb->bits_per_pixel, base,
5791 crtc->base.primary->fb->pitches[0],
5792 plane_config->size);
5793
5794}
5795
5515static bool i9xx_get_pipe_config(struct intel_crtc *crtc, 5796static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
5516 struct intel_crtc_config *pipe_config) 5797 struct intel_crtc_config *pipe_config)
5517{ 5798{
@@ -5519,6 +5800,10 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
5519 struct drm_i915_private *dev_priv = dev->dev_private; 5800 struct drm_i915_private *dev_priv = dev->dev_private;
5520 uint32_t tmp; 5801 uint32_t tmp;
5521 5802
5803 if (!intel_display_power_enabled(dev_priv,
5804 POWER_DOMAIN_PIPE(crtc->pipe)))
5805 return false;
5806
5522 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 5807 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
5523 pipe_config->shared_dpll = DPLL_ID_PRIVATE; 5808 pipe_config->shared_dpll = DPLL_ID_PRIVATE;
5524 5809
@@ -6180,7 +6465,7 @@ int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
6180 * is 2.5%; use 5% for safety's sake. 6465 * is 2.5%; use 5% for safety's sake.
6181 */ 6466 */
6182 u32 bps = target_clock * bpp * 21 / 20; 6467 u32 bps = target_clock * bpp * 21 / 20;
6183 return bps / (link_bw * 8) + 1; 6468 return DIV_ROUND_UP(bps, link_bw * 8);
6184} 6469}
6185 6470
6186static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor) 6471static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
@@ -6348,7 +6633,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
6348 if (intel_crtc->config.has_dp_encoder) 6633 if (intel_crtc->config.has_dp_encoder)
6349 intel_dp_set_m_n(intel_crtc); 6634 intel_dp_set_m_n(intel_crtc);
6350 6635
6351 if (is_lvds && has_reduced_clock && i915_powersave) 6636 if (is_lvds && has_reduced_clock && i915.powersave)
6352 intel_crtc->lowfreq_avail = true; 6637 intel_crtc->lowfreq_avail = true;
6353 else 6638 else
6354 intel_crtc->lowfreq_avail = false; 6639 intel_crtc->lowfreq_avail = false;
@@ -6455,6 +6740,66 @@ static void ironlake_get_pfit_config(struct intel_crtc *crtc,
6455 } 6740 }
6456} 6741}
6457 6742
6743static void ironlake_get_plane_config(struct intel_crtc *crtc,
6744 struct intel_plane_config *plane_config)
6745{
6746 struct drm_device *dev = crtc->base.dev;
6747 struct drm_i915_private *dev_priv = dev->dev_private;
6748 u32 val, base, offset;
6749 int pipe = crtc->pipe, plane = crtc->plane;
6750 int fourcc, pixel_format;
6751 int aligned_height;
6752
6753 crtc->base.primary->fb = kzalloc(sizeof(struct intel_framebuffer), GFP_KERNEL);
6754 if (!crtc->base.primary->fb) {
6755 DRM_DEBUG_KMS("failed to alloc fb\n");
6756 return;
6757 }
6758
6759 val = I915_READ(DSPCNTR(plane));
6760
6761 if (INTEL_INFO(dev)->gen >= 4)
6762 if (val & DISPPLANE_TILED)
6763 plane_config->tiled = true;
6764
6765 pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
6766 fourcc = intel_format_to_fourcc(pixel_format);
6767 crtc->base.primary->fb->pixel_format = fourcc;
6768 crtc->base.primary->fb->bits_per_pixel =
6769 drm_format_plane_cpp(fourcc, 0) * 8;
6770
6771 base = I915_READ(DSPSURF(plane)) & 0xfffff000;
6772 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
6773 offset = I915_READ(DSPOFFSET(plane));
6774 } else {
6775 if (plane_config->tiled)
6776 offset = I915_READ(DSPTILEOFF(plane));
6777 else
6778 offset = I915_READ(DSPLINOFF(plane));
6779 }
6780 plane_config->base = base;
6781
6782 val = I915_READ(PIPESRC(pipe));
6783 crtc->base.primary->fb->width = ((val >> 16) & 0xfff) + 1;
6784 crtc->base.primary->fb->height = ((val >> 0) & 0xfff) + 1;
6785
6786 val = I915_READ(DSPSTRIDE(pipe));
6787 crtc->base.primary->fb->pitches[0] = val & 0xffffff80;
6788
6789 aligned_height = intel_align_height(dev, crtc->base.primary->fb->height,
6790 plane_config->tiled);
6791
6792 plane_config->size = ALIGN(crtc->base.primary->fb->pitches[0] *
6793 aligned_height, PAGE_SIZE);
6794
6795 DRM_DEBUG_KMS("pipe/plane %d/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
6796 pipe, plane, crtc->base.primary->fb->width,
6797 crtc->base.primary->fb->height,
6798 crtc->base.primary->fb->bits_per_pixel, base,
6799 crtc->base.primary->fb->pitches[0],
6800 plane_config->size);
6801}
6802
6458static bool ironlake_get_pipe_config(struct intel_crtc *crtc, 6803static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
6459 struct intel_crtc_config *pipe_config) 6804 struct intel_crtc_config *pipe_config)
6460{ 6805{
@@ -6629,6 +6974,7 @@ static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
6629static void hsw_restore_lcpll(struct drm_i915_private *dev_priv) 6974static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
6630{ 6975{
6631 uint32_t val; 6976 uint32_t val;
6977 unsigned long irqflags;
6632 6978
6633 val = I915_READ(LCPLL_CTL); 6979 val = I915_READ(LCPLL_CTL);
6634 6980
@@ -6636,9 +6982,22 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
6636 LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK) 6982 LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
6637 return; 6983 return;
6638 6984
6639 /* Make sure we're not on PC8 state before disabling PC8, otherwise 6985 /*
6640 * we'll hang the machine! */ 6986 * Make sure we're not on PC8 state before disabling PC8, otherwise
6641 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL); 6987 * we'll hang the machine. To prevent PC8 state, just enable force_wake.
6988 *
6989 * The other problem is that hsw_restore_lcpll() is called as part of
6990 * the runtime PM resume sequence, so we can't just call
6991 * gen6_gt_force_wake_get() because that function calls
6992 * intel_runtime_pm_get(), and we can't change the runtime PM refcount
6993 * while we are on the resume sequence. So to solve this problem we have
6994 * to call special forcewake code that doesn't touch runtime PM and
6995 * doesn't enable the forcewake delayed work.
6996 */
6997 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
6998 if (dev_priv->uncore.forcewake_count++ == 0)
6999 dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL);
7000 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
6642 7001
6643 if (val & LCPLL_POWER_DOWN_ALLOW) { 7002 if (val & LCPLL_POWER_DOWN_ALLOW) {
6644 val &= ~LCPLL_POWER_DOWN_ALLOW; 7003 val &= ~LCPLL_POWER_DOWN_ALLOW;
@@ -6672,26 +7031,45 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
6672 DRM_ERROR("Switching back to LCPLL failed\n"); 7031 DRM_ERROR("Switching back to LCPLL failed\n");
6673 } 7032 }
6674 7033
6675 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL); 7034 /* See the big comment above. */
7035 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
7036 if (--dev_priv->uncore.forcewake_count == 0)
7037 dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL);
7038 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
6676} 7039}
6677 7040
6678void hsw_enable_pc8_work(struct work_struct *__work) 7041/*
7042 * Package states C8 and deeper are really deep PC states that can only be
7043 * reached when all the devices on the system allow it, so even if the graphics
7044 * device allows PC8+, it doesn't mean the system will actually get to these
7045 * states. Our driver only allows PC8+ when going into runtime PM.
7046 *
7047 * The requirements for PC8+ are that all the outputs are disabled, the power
7048 * well is disabled and most interrupts are disabled, and these are also
7049 * requirements for runtime PM. When these conditions are met, we manually do
7050 * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
7051 * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
7052 * hang the machine.
7053 *
7054 * When we really reach PC8 or deeper states (not just when we allow it) we lose
7055 * the state of some registers, so when we come back from PC8+ we need to
7056 * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
7057 * need to take care of the registers kept by RC6. Notice that this happens even
7058 * if we don't put the device in PCI D3 state (which is what currently happens
7059 * because of the runtime PM support).
7060 *
7061 * For more, read "Display Sequences for Package C8" on the hardware
7062 * documentation.
7063 */
7064void hsw_enable_pc8(struct drm_i915_private *dev_priv)
6679{ 7065{
6680 struct drm_i915_private *dev_priv =
6681 container_of(to_delayed_work(__work), struct drm_i915_private,
6682 pc8.enable_work);
6683 struct drm_device *dev = dev_priv->dev; 7066 struct drm_device *dev = dev_priv->dev;
6684 uint32_t val; 7067 uint32_t val;
6685 7068
6686 WARN_ON(!HAS_PC8(dev)); 7069 WARN_ON(!HAS_PC8(dev));
6687 7070
6688 if (dev_priv->pc8.enabled)
6689 return;
6690
6691 DRM_DEBUG_KMS("Enabling package C8+\n"); 7071 DRM_DEBUG_KMS("Enabling package C8+\n");
6692 7072
6693 dev_priv->pc8.enabled = true;
6694
6695 if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) { 7073 if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
6696 val = I915_READ(SOUTH_DSPCLK_GATE_D); 7074 val = I915_READ(SOUTH_DSPCLK_GATE_D);
6697 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE; 7075 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
@@ -6699,51 +7077,21 @@ void hsw_enable_pc8_work(struct work_struct *__work)
6699 } 7077 }
6700 7078
6701 lpt_disable_clkout_dp(dev); 7079 lpt_disable_clkout_dp(dev);
6702 hsw_pc8_disable_interrupts(dev); 7080 hsw_runtime_pm_disable_interrupts(dev);
6703 hsw_disable_lcpll(dev_priv, true, true); 7081 hsw_disable_lcpll(dev_priv, true, true);
6704
6705 intel_runtime_pm_put(dev_priv);
6706}
6707
6708static void __hsw_enable_package_c8(struct drm_i915_private *dev_priv)
6709{
6710 WARN_ON(!mutex_is_locked(&dev_priv->pc8.lock));
6711 WARN(dev_priv->pc8.disable_count < 1,
6712 "pc8.disable_count: %d\n", dev_priv->pc8.disable_count);
6713
6714 dev_priv->pc8.disable_count--;
6715 if (dev_priv->pc8.disable_count != 0)
6716 return;
6717
6718 schedule_delayed_work(&dev_priv->pc8.enable_work,
6719 msecs_to_jiffies(i915_pc8_timeout));
6720} 7082}
6721 7083
6722static void __hsw_disable_package_c8(struct drm_i915_private *dev_priv) 7084void hsw_disable_pc8(struct drm_i915_private *dev_priv)
6723{ 7085{
6724 struct drm_device *dev = dev_priv->dev; 7086 struct drm_device *dev = dev_priv->dev;
6725 uint32_t val; 7087 uint32_t val;
6726 7088
6727 WARN_ON(!mutex_is_locked(&dev_priv->pc8.lock));
6728 WARN(dev_priv->pc8.disable_count < 0,
6729 "pc8.disable_count: %d\n", dev_priv->pc8.disable_count);
6730
6731 dev_priv->pc8.disable_count++;
6732 if (dev_priv->pc8.disable_count != 1)
6733 return;
6734
6735 WARN_ON(!HAS_PC8(dev)); 7089 WARN_ON(!HAS_PC8(dev));
6736 7090
6737 cancel_delayed_work_sync(&dev_priv->pc8.enable_work);
6738 if (!dev_priv->pc8.enabled)
6739 return;
6740
6741 DRM_DEBUG_KMS("Disabling package C8+\n"); 7091 DRM_DEBUG_KMS("Disabling package C8+\n");
6742 7092
6743 intel_runtime_pm_get(dev_priv);
6744
6745 hsw_restore_lcpll(dev_priv); 7093 hsw_restore_lcpll(dev_priv);
6746 hsw_pc8_restore_interrupts(dev); 7094 hsw_runtime_pm_restore_interrupts(dev);
6747 lpt_init_pch_refclk(dev); 7095 lpt_init_pch_refclk(dev);
6748 7096
6749 if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) { 7097 if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
@@ -6757,185 +7105,11 @@ static void __hsw_disable_package_c8(struct drm_i915_private *dev_priv)
6757 mutex_lock(&dev_priv->rps.hw_lock); 7105 mutex_lock(&dev_priv->rps.hw_lock);
6758 gen6_update_ring_freq(dev); 7106 gen6_update_ring_freq(dev);
6759 mutex_unlock(&dev_priv->rps.hw_lock); 7107 mutex_unlock(&dev_priv->rps.hw_lock);
6760 dev_priv->pc8.enabled = false;
6761}
6762
6763void hsw_enable_package_c8(struct drm_i915_private *dev_priv)
6764{
6765 if (!HAS_PC8(dev_priv->dev))
6766 return;
6767
6768 mutex_lock(&dev_priv->pc8.lock);
6769 __hsw_enable_package_c8(dev_priv);
6770 mutex_unlock(&dev_priv->pc8.lock);
6771}
6772
6773void hsw_disable_package_c8(struct drm_i915_private *dev_priv)
6774{
6775 if (!HAS_PC8(dev_priv->dev))
6776 return;
6777
6778 mutex_lock(&dev_priv->pc8.lock);
6779 __hsw_disable_package_c8(dev_priv);
6780 mutex_unlock(&dev_priv->pc8.lock);
6781}
6782
6783static bool hsw_can_enable_package_c8(struct drm_i915_private *dev_priv)
6784{
6785 struct drm_device *dev = dev_priv->dev;
6786 struct intel_crtc *crtc;
6787 uint32_t val;
6788
6789 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head)
6790 if (crtc->base.enabled)
6791 return false;
6792
6793 /* This case is still possible since we have the i915.disable_power_well
6794 * parameter and also the KVMr or something else might be requesting the
6795 * power well. */
6796 val = I915_READ(HSW_PWR_WELL_DRIVER);
6797 if (val != 0) {
6798 DRM_DEBUG_KMS("Not enabling PC8: power well on\n");
6799 return false;
6800 }
6801
6802 return true;
6803}
6804
6805/* Since we're called from modeset_global_resources there's no way to
6806 * symmetrically increase and decrease the refcount, so we use
6807 * dev_priv->pc8.requirements_met to track whether we already have the refcount
6808 * or not.
6809 */
6810static void hsw_update_package_c8(struct drm_device *dev)
6811{
6812 struct drm_i915_private *dev_priv = dev->dev_private;
6813 bool allow;
6814
6815 if (!HAS_PC8(dev_priv->dev))
6816 return;
6817
6818 if (!i915_enable_pc8)
6819 return;
6820
6821 mutex_lock(&dev_priv->pc8.lock);
6822
6823 allow = hsw_can_enable_package_c8(dev_priv);
6824
6825 if (allow == dev_priv->pc8.requirements_met)
6826 goto done;
6827
6828 dev_priv->pc8.requirements_met = allow;
6829
6830 if (allow)
6831 __hsw_enable_package_c8(dev_priv);
6832 else
6833 __hsw_disable_package_c8(dev_priv);
6834
6835done:
6836 mutex_unlock(&dev_priv->pc8.lock);
6837}
6838
6839static void hsw_package_c8_gpu_idle(struct drm_i915_private *dev_priv)
6840{
6841 if (!HAS_PC8(dev_priv->dev))
6842 return;
6843
6844 mutex_lock(&dev_priv->pc8.lock);
6845 if (!dev_priv->pc8.gpu_idle) {
6846 dev_priv->pc8.gpu_idle = true;
6847 __hsw_enable_package_c8(dev_priv);
6848 }
6849 mutex_unlock(&dev_priv->pc8.lock);
6850}
6851
6852static void hsw_package_c8_gpu_busy(struct drm_i915_private *dev_priv)
6853{
6854 if (!HAS_PC8(dev_priv->dev))
6855 return;
6856
6857 mutex_lock(&dev_priv->pc8.lock);
6858 if (dev_priv->pc8.gpu_idle) {
6859 dev_priv->pc8.gpu_idle = false;
6860 __hsw_disable_package_c8(dev_priv);
6861 }
6862 mutex_unlock(&dev_priv->pc8.lock);
6863}
6864
6865#define for_each_power_domain(domain, mask) \
6866 for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \
6867 if ((1 << (domain)) & (mask))
6868
6869static unsigned long get_pipe_power_domains(struct drm_device *dev,
6870 enum pipe pipe, bool pfit_enabled)
6871{
6872 unsigned long mask;
6873 enum transcoder transcoder;
6874
6875 transcoder = intel_pipe_to_cpu_transcoder(dev->dev_private, pipe);
6876
6877 mask = BIT(POWER_DOMAIN_PIPE(pipe));
6878 mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder));
6879 if (pfit_enabled)
6880 mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
6881
6882 return mask;
6883}
6884
6885void intel_display_set_init_power(struct drm_device *dev, bool enable)
6886{
6887 struct drm_i915_private *dev_priv = dev->dev_private;
6888
6889 if (dev_priv->power_domains.init_power_on == enable)
6890 return;
6891
6892 if (enable)
6893 intel_display_power_get(dev, POWER_DOMAIN_INIT);
6894 else
6895 intel_display_power_put(dev, POWER_DOMAIN_INIT);
6896
6897 dev_priv->power_domains.init_power_on = enable;
6898}
6899
6900static void modeset_update_power_wells(struct drm_device *dev)
6901{
6902 unsigned long pipe_domains[I915_MAX_PIPES] = { 0, };
6903 struct intel_crtc *crtc;
6904
6905 /*
6906 * First get all needed power domains, then put all unneeded, to avoid
6907 * any unnecessary toggling of the power wells.
6908 */
6909 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
6910 enum intel_display_power_domain domain;
6911
6912 if (!crtc->base.enabled)
6913 continue;
6914
6915 pipe_domains[crtc->pipe] = get_pipe_power_domains(dev,
6916 crtc->pipe,
6917 crtc->config.pch_pfit.enabled);
6918
6919 for_each_power_domain(domain, pipe_domains[crtc->pipe])
6920 intel_display_power_get(dev, domain);
6921 }
6922
6923 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
6924 enum intel_display_power_domain domain;
6925
6926 for_each_power_domain(domain, crtc->enabled_power_domains)
6927 intel_display_power_put(dev, domain);
6928
6929 crtc->enabled_power_domains = pipe_domains[crtc->pipe];
6930 }
6931
6932 intel_display_set_init_power(dev, false);
6933} 7108}
6934 7109
6935static void haswell_modeset_global_resources(struct drm_device *dev) 7110static void haswell_modeset_global_resources(struct drm_device *dev)
6936{ 7111{
6937 modeset_update_power_wells(dev); 7112 modeset_update_crtc_power_domains(dev);
6938 hsw_update_package_c8(dev);
6939} 7113}
6940 7114
6941static int haswell_crtc_mode_set(struct drm_crtc *crtc, 7115static int haswell_crtc_mode_set(struct drm_crtc *crtc,
@@ -6985,6 +7159,10 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
6985 enum intel_display_power_domain pfit_domain; 7159 enum intel_display_power_domain pfit_domain;
6986 uint32_t tmp; 7160 uint32_t tmp;
6987 7161
7162 if (!intel_display_power_enabled(dev_priv,
7163 POWER_DOMAIN_PIPE(crtc->pipe)))
7164 return false;
7165
6988 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 7166 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
6989 pipe_config->shared_dpll = DPLL_ID_PRIVATE; 7167 pipe_config->shared_dpll = DPLL_ID_PRIVATE;
6990 7168
@@ -7010,7 +7188,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
7010 pipe_config->cpu_transcoder = TRANSCODER_EDP; 7188 pipe_config->cpu_transcoder = TRANSCODER_EDP;
7011 } 7189 }
7012 7190
7013 if (!intel_display_power_enabled(dev, 7191 if (!intel_display_power_enabled(dev_priv,
7014 POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder))) 7192 POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
7015 return false; 7193 return false;
7016 7194
@@ -7038,7 +7216,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
7038 intel_get_pipe_timings(crtc, pipe_config); 7216 intel_get_pipe_timings(crtc, pipe_config);
7039 7217
7040 pfit_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe); 7218 pfit_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
7041 if (intel_display_power_enabled(dev, pfit_domain)) 7219 if (intel_display_power_enabled(dev_priv, pfit_domain))
7042 ironlake_get_pfit_config(crtc, pipe_config); 7220 ironlake_get_pfit_config(crtc, pipe_config);
7043 7221
7044 if (IS_HASWELL(dev)) 7222 if (IS_HASWELL(dev))
@@ -7435,10 +7613,26 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
7435 bool visible = base != 0; 7613 bool visible = base != 0;
7436 7614
7437 if (intel_crtc->cursor_visible != visible) { 7615 if (intel_crtc->cursor_visible != visible) {
7616 int16_t width = intel_crtc->cursor_width;
7438 uint32_t cntl = I915_READ(CURCNTR(pipe)); 7617 uint32_t cntl = I915_READ(CURCNTR(pipe));
7439 if (base) { 7618 if (base) {
7440 cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT); 7619 cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT);
7441 cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE; 7620 cntl |= MCURSOR_GAMMA_ENABLE;
7621
7622 switch (width) {
7623 case 64:
7624 cntl |= CURSOR_MODE_64_ARGB_AX;
7625 break;
7626 case 128:
7627 cntl |= CURSOR_MODE_128_ARGB_AX;
7628 break;
7629 case 256:
7630 cntl |= CURSOR_MODE_256_ARGB_AX;
7631 break;
7632 default:
7633 WARN_ON(1);
7634 return;
7635 }
7442 cntl |= pipe << 28; /* Connect to correct pipe */ 7636 cntl |= pipe << 28; /* Connect to correct pipe */
7443 } else { 7637 } else {
7444 cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE); 7638 cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
@@ -7463,10 +7657,25 @@ static void ivb_update_cursor(struct drm_crtc *crtc, u32 base)
7463 bool visible = base != 0; 7657 bool visible = base != 0;
7464 7658
7465 if (intel_crtc->cursor_visible != visible) { 7659 if (intel_crtc->cursor_visible != visible) {
7660 int16_t width = intel_crtc->cursor_width;
7466 uint32_t cntl = I915_READ(CURCNTR_IVB(pipe)); 7661 uint32_t cntl = I915_READ(CURCNTR_IVB(pipe));
7467 if (base) { 7662 if (base) {
7468 cntl &= ~CURSOR_MODE; 7663 cntl &= ~CURSOR_MODE;
7469 cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE; 7664 cntl |= MCURSOR_GAMMA_ENABLE;
7665 switch (width) {
7666 case 64:
7667 cntl |= CURSOR_MODE_64_ARGB_AX;
7668 break;
7669 case 128:
7670 cntl |= CURSOR_MODE_128_ARGB_AX;
7671 break;
7672 case 256:
7673 cntl |= CURSOR_MODE_256_ARGB_AX;
7674 break;
7675 default:
7676 WARN_ON(1);
7677 return;
7678 }
7470 } else { 7679 } else {
7471 cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE); 7680 cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
7472 cntl |= CURSOR_MODE_DISABLE; 7681 cntl |= CURSOR_MODE_DISABLE;
@@ -7550,6 +7759,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
7550 struct drm_i915_private *dev_priv = dev->dev_private; 7759 struct drm_i915_private *dev_priv = dev->dev_private;
7551 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 7760 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7552 struct drm_i915_gem_object *obj; 7761 struct drm_i915_gem_object *obj;
7762 unsigned old_width;
7553 uint32_t addr; 7763 uint32_t addr;
7554 int ret; 7764 int ret;
7555 7765
@@ -7562,9 +7772,11 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
7562 goto finish; 7772 goto finish;
7563 } 7773 }
7564 7774
7565 /* Currently we only support 64x64 cursors */ 7775 /* Check for which cursor types we support */
7566 if (width != 64 || height != 64) { 7776 if (!((width == 64 && height == 64) ||
7567 DRM_ERROR("we currently only support 64x64 cursors\n"); 7777 (width == 128 && height == 128 && !IS_GEN2(dev)) ||
7778 (width == 256 && height == 256 && !IS_GEN2(dev)))) {
7779 DRM_DEBUG("Cursor dimension not supported\n");
7568 return -EINVAL; 7780 return -EINVAL;
7569 } 7781 }
7570 7782
@@ -7573,18 +7785,18 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
7573 return -ENOENT; 7785 return -ENOENT;
7574 7786
7575 if (obj->base.size < width * height * 4) { 7787 if (obj->base.size < width * height * 4) {
7576 DRM_ERROR("buffer is to small\n"); 7788 DRM_DEBUG_KMS("buffer is to small\n");
7577 ret = -ENOMEM; 7789 ret = -ENOMEM;
7578 goto fail; 7790 goto fail;
7579 } 7791 }
7580 7792
7581 /* we only need to pin inside GTT if cursor is non-phy */ 7793 /* we only need to pin inside GTT if cursor is non-phy */
7582 mutex_lock(&dev->struct_mutex); 7794 mutex_lock(&dev->struct_mutex);
7583 if (!dev_priv->info->cursor_needs_physical) { 7795 if (!INTEL_INFO(dev)->cursor_needs_physical) {
7584 unsigned alignment; 7796 unsigned alignment;
7585 7797
7586 if (obj->tiling_mode) { 7798 if (obj->tiling_mode) {
7587 DRM_ERROR("cursor cannot be tiled\n"); 7799 DRM_DEBUG_KMS("cursor cannot be tiled\n");
7588 ret = -EINVAL; 7800 ret = -EINVAL;
7589 goto fail_locked; 7801 goto fail_locked;
7590 } 7802 }
@@ -7600,13 +7812,13 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
7600 7812
7601 ret = i915_gem_object_pin_to_display_plane(obj, alignment, NULL); 7813 ret = i915_gem_object_pin_to_display_plane(obj, alignment, NULL);
7602 if (ret) { 7814 if (ret) {
7603 DRM_ERROR("failed to move cursor bo into the GTT\n"); 7815 DRM_DEBUG_KMS("failed to move cursor bo into the GTT\n");
7604 goto fail_locked; 7816 goto fail_locked;
7605 } 7817 }
7606 7818
7607 ret = i915_gem_object_put_fence(obj); 7819 ret = i915_gem_object_put_fence(obj);
7608 if (ret) { 7820 if (ret) {
7609 DRM_ERROR("failed to release fence for cursor"); 7821 DRM_DEBUG_KMS("failed to release fence for cursor");
7610 goto fail_unpin; 7822 goto fail_unpin;
7611 } 7823 }
7612 7824
@@ -7617,7 +7829,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
7617 (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1, 7829 (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1,
7618 align); 7830 align);
7619 if (ret) { 7831 if (ret) {
7620 DRM_ERROR("failed to attach phys object\n"); 7832 DRM_DEBUG_KMS("failed to attach phys object\n");
7621 goto fail_locked; 7833 goto fail_locked;
7622 } 7834 }
7623 addr = obj->phys_obj->handle->busaddr; 7835 addr = obj->phys_obj->handle->busaddr;
@@ -7628,7 +7840,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
7628 7840
7629 finish: 7841 finish:
7630 if (intel_crtc->cursor_bo) { 7842 if (intel_crtc->cursor_bo) {
7631 if (dev_priv->info->cursor_needs_physical) { 7843 if (INTEL_INFO(dev)->cursor_needs_physical) {
7632 if (intel_crtc->cursor_bo != obj) 7844 if (intel_crtc->cursor_bo != obj)
7633 i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo); 7845 i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
7634 } else 7846 } else
@@ -7638,13 +7850,18 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
7638 7850
7639 mutex_unlock(&dev->struct_mutex); 7851 mutex_unlock(&dev->struct_mutex);
7640 7852
7853 old_width = intel_crtc->cursor_width;
7854
7641 intel_crtc->cursor_addr = addr; 7855 intel_crtc->cursor_addr = addr;
7642 intel_crtc->cursor_bo = obj; 7856 intel_crtc->cursor_bo = obj;
7643 intel_crtc->cursor_width = width; 7857 intel_crtc->cursor_width = width;
7644 intel_crtc->cursor_height = height; 7858 intel_crtc->cursor_height = height;
7645 7859
7646 if (intel_crtc->active) 7860 if (intel_crtc->active) {
7861 if (old_width != width)
7862 intel_update_watermarks(crtc);
7647 intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL); 7863 intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
7864 }
7648 7865
7649 return 0; 7866 return 0;
7650fail_unpin: 7867fail_unpin:
@@ -7690,10 +7907,10 @@ static struct drm_display_mode load_detect_mode = {
7690 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), 7907 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
7691}; 7908};
7692 7909
7693static struct drm_framebuffer * 7910struct drm_framebuffer *
7694intel_framebuffer_create(struct drm_device *dev, 7911__intel_framebuffer_create(struct drm_device *dev,
7695 struct drm_mode_fb_cmd2 *mode_cmd, 7912 struct drm_mode_fb_cmd2 *mode_cmd,
7696 struct drm_i915_gem_object *obj) 7913 struct drm_i915_gem_object *obj)
7697{ 7914{
7698 struct intel_framebuffer *intel_fb; 7915 struct intel_framebuffer *intel_fb;
7699 int ret; 7916 int ret;
@@ -7704,12 +7921,7 @@ intel_framebuffer_create(struct drm_device *dev,
7704 return ERR_PTR(-ENOMEM); 7921 return ERR_PTR(-ENOMEM);
7705 } 7922 }
7706 7923
7707 ret = i915_mutex_lock_interruptible(dev);
7708 if (ret)
7709 goto err;
7710
7711 ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj); 7924 ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
7712 mutex_unlock(&dev->struct_mutex);
7713 if (ret) 7925 if (ret)
7714 goto err; 7926 goto err;
7715 7927
@@ -7721,6 +7933,23 @@ err:
7721 return ERR_PTR(ret); 7933 return ERR_PTR(ret);
7722} 7934}
7723 7935
7936static struct drm_framebuffer *
7937intel_framebuffer_create(struct drm_device *dev,
7938 struct drm_mode_fb_cmd2 *mode_cmd,
7939 struct drm_i915_gem_object *obj)
7940{
7941 struct drm_framebuffer *fb;
7942 int ret;
7943
7944 ret = i915_mutex_lock_interruptible(dev);
7945 if (ret)
7946 return ERR_PTR(ret);
7947 fb = __intel_framebuffer_create(dev, mode_cmd, obj);
7948 mutex_unlock(&dev->struct_mutex);
7949
7950 return fb;
7951}
7952
7724static u32 7953static u32
7725intel_framebuffer_pitch_for_width(int width, int bpp) 7954intel_framebuffer_pitch_for_width(int width, int bpp)
7726{ 7955{
@@ -7766,14 +7995,16 @@ mode_fits_in_fbdev(struct drm_device *dev,
7766 struct drm_i915_gem_object *obj; 7995 struct drm_i915_gem_object *obj;
7767 struct drm_framebuffer *fb; 7996 struct drm_framebuffer *fb;
7768 7997
7769 if (dev_priv->fbdev == NULL) 7998 if (!dev_priv->fbdev)
7770 return NULL; 7999 return NULL;
7771 8000
7772 obj = dev_priv->fbdev->ifb.obj; 8001 if (!dev_priv->fbdev->fb)
7773 if (obj == NULL)
7774 return NULL; 8002 return NULL;
7775 8003
7776 fb = &dev_priv->fbdev->ifb.base; 8004 obj = dev_priv->fbdev->fb->obj;
8005 BUG_ON(!obj);
8006
8007 fb = &dev_priv->fbdev->fb->base;
7777 if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay, 8008 if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay,
7778 fb->bits_per_pixel)) 8009 fb->bits_per_pixel))
7779 return NULL; 8010 return NULL;
@@ -7855,6 +8086,8 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
7855 to_intel_connector(connector)->new_encoder = intel_encoder; 8086 to_intel_connector(connector)->new_encoder = intel_encoder;
7856 8087
7857 intel_crtc = to_intel_crtc(crtc); 8088 intel_crtc = to_intel_crtc(crtc);
8089 intel_crtc->new_enabled = true;
8090 intel_crtc->new_config = &intel_crtc->config;
7858 old->dpms_mode = connector->dpms; 8091 old->dpms_mode = connector->dpms;
7859 old->load_detect_temp = true; 8092 old->load_detect_temp = true;
7860 old->release_fb = NULL; 8093 old->release_fb = NULL;
@@ -7878,21 +8111,28 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
7878 DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n"); 8111 DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
7879 if (IS_ERR(fb)) { 8112 if (IS_ERR(fb)) {
7880 DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n"); 8113 DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
7881 mutex_unlock(&crtc->mutex); 8114 goto fail;
7882 return false;
7883 } 8115 }
7884 8116
7885 if (intel_set_mode(crtc, mode, 0, 0, fb)) { 8117 if (intel_set_mode(crtc, mode, 0, 0, fb)) {
7886 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n"); 8118 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
7887 if (old->release_fb) 8119 if (old->release_fb)
7888 old->release_fb->funcs->destroy(old->release_fb); 8120 old->release_fb->funcs->destroy(old->release_fb);
7889 mutex_unlock(&crtc->mutex); 8121 goto fail;
7890 return false;
7891 } 8122 }
7892 8123
7893 /* let the connector get through one full cycle before testing */ 8124 /* let the connector get through one full cycle before testing */
7894 intel_wait_for_vblank(dev, intel_crtc->pipe); 8125 intel_wait_for_vblank(dev, intel_crtc->pipe);
7895 return true; 8126 return true;
8127
8128 fail:
8129 intel_crtc->new_enabled = crtc->enabled;
8130 if (intel_crtc->new_enabled)
8131 intel_crtc->new_config = &intel_crtc->config;
8132 else
8133 intel_crtc->new_config = NULL;
8134 mutex_unlock(&crtc->mutex);
8135 return false;
7896} 8136}
7897 8137
7898void intel_release_load_detect_pipe(struct drm_connector *connector, 8138void intel_release_load_detect_pipe(struct drm_connector *connector,
@@ -7902,6 +8142,7 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
7902 intel_attached_encoder(connector); 8142 intel_attached_encoder(connector);
7903 struct drm_encoder *encoder = &intel_encoder->base; 8143 struct drm_encoder *encoder = &intel_encoder->base;
7904 struct drm_crtc *crtc = encoder->crtc; 8144 struct drm_crtc *crtc = encoder->crtc;
8145 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7905 8146
7906 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", 8147 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
7907 connector->base.id, drm_get_connector_name(connector), 8148 connector->base.id, drm_get_connector_name(connector),
@@ -7910,6 +8151,8 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
7910 if (old->load_detect_temp) { 8151 if (old->load_detect_temp) {
7911 to_intel_connector(connector)->new_encoder = NULL; 8152 to_intel_connector(connector)->new_encoder = NULL;
7912 intel_encoder->new_crtc = NULL; 8153 intel_encoder->new_crtc = NULL;
8154 intel_crtc->new_enabled = false;
8155 intel_crtc->new_config = NULL;
7913 intel_set_mode(crtc, NULL, 0, 0, NULL); 8156 intel_set_mode(crtc, NULL, 0, 0, NULL);
7914 8157
7915 if (old->release_fb) { 8158 if (old->release_fb) {
@@ -8122,7 +8365,7 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
8122static void intel_increase_pllclock(struct drm_crtc *crtc) 8365static void intel_increase_pllclock(struct drm_crtc *crtc)
8123{ 8366{
8124 struct drm_device *dev = crtc->dev; 8367 struct drm_device *dev = crtc->dev;
8125 drm_i915_private_t *dev_priv = dev->dev_private; 8368 struct drm_i915_private *dev_priv = dev->dev_private;
8126 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 8369 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8127 int pipe = intel_crtc->pipe; 8370 int pipe = intel_crtc->pipe;
8128 int dpll_reg = DPLL(pipe); 8371 int dpll_reg = DPLL(pipe);
@@ -8153,7 +8396,7 @@ static void intel_increase_pllclock(struct drm_crtc *crtc)
8153static void intel_decrease_pllclock(struct drm_crtc *crtc) 8396static void intel_decrease_pllclock(struct drm_crtc *crtc)
8154{ 8397{
8155 struct drm_device *dev = crtc->dev; 8398 struct drm_device *dev = crtc->dev;
8156 drm_i915_private_t *dev_priv = dev->dev_private; 8399 struct drm_i915_private *dev_priv = dev->dev_private;
8157 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 8400 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8158 8401
8159 if (HAS_PCH_SPLIT(dev)) 8402 if (HAS_PCH_SPLIT(dev))
@@ -8190,8 +8433,12 @@ void intel_mark_busy(struct drm_device *dev)
8190{ 8433{
8191 struct drm_i915_private *dev_priv = dev->dev_private; 8434 struct drm_i915_private *dev_priv = dev->dev_private;
8192 8435
8193 hsw_package_c8_gpu_busy(dev_priv); 8436 if (dev_priv->mm.busy)
8437 return;
8438
8439 intel_runtime_pm_get(dev_priv);
8194 i915_update_gfx_val(dev_priv); 8440 i915_update_gfx_val(dev_priv);
8441 dev_priv->mm.busy = true;
8195} 8442}
8196 8443
8197void intel_mark_idle(struct drm_device *dev) 8444void intel_mark_idle(struct drm_device *dev)
@@ -8199,20 +8446,26 @@ void intel_mark_idle(struct drm_device *dev)
8199 struct drm_i915_private *dev_priv = dev->dev_private; 8446 struct drm_i915_private *dev_priv = dev->dev_private;
8200 struct drm_crtc *crtc; 8447 struct drm_crtc *crtc;
8201 8448
8202 hsw_package_c8_gpu_idle(dev_priv); 8449 if (!dev_priv->mm.busy)
8203
8204 if (!i915_powersave)
8205 return; 8450 return;
8206 8451
8452 dev_priv->mm.busy = false;
8453
8454 if (!i915.powersave)
8455 goto out;
8456
8207 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 8457 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
8208 if (!crtc->fb) 8458 if (!crtc->primary->fb)
8209 continue; 8459 continue;
8210 8460
8211 intel_decrease_pllclock(crtc); 8461 intel_decrease_pllclock(crtc);
8212 } 8462 }
8213 8463
8214 if (dev_priv->info->gen >= 6) 8464 if (INTEL_INFO(dev)->gen >= 6)
8215 gen6_rps_idle(dev->dev_private); 8465 gen6_rps_idle(dev->dev_private);
8466
8467out:
8468 intel_runtime_pm_put(dev_priv);
8216} 8469}
8217 8470
8218void intel_mark_fb_busy(struct drm_i915_gem_object *obj, 8471void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
@@ -8221,14 +8474,14 @@ void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
8221 struct drm_device *dev = obj->base.dev; 8474 struct drm_device *dev = obj->base.dev;
8222 struct drm_crtc *crtc; 8475 struct drm_crtc *crtc;
8223 8476
8224 if (!i915_powersave) 8477 if (!i915.powersave)
8225 return; 8478 return;
8226 8479
8227 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 8480 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
8228 if (!crtc->fb) 8481 if (!crtc->primary->fb)
8229 continue; 8482 continue;
8230 8483
8231 if (to_intel_framebuffer(crtc->fb)->obj != obj) 8484 if (to_intel_framebuffer(crtc->primary->fb)->obj != obj)
8232 continue; 8485 continue;
8233 8486
8234 intel_increase_pllclock(crtc); 8487 intel_increase_pllclock(crtc);
@@ -8284,7 +8537,7 @@ static void intel_unpin_work_fn(struct work_struct *__work)
8284static void do_intel_finish_page_flip(struct drm_device *dev, 8537static void do_intel_finish_page_flip(struct drm_device *dev,
8285 struct drm_crtc *crtc) 8538 struct drm_crtc *crtc)
8286{ 8539{
8287 drm_i915_private_t *dev_priv = dev->dev_private; 8540 struct drm_i915_private *dev_priv = dev->dev_private;
8288 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 8541 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8289 struct intel_unpin_work *work; 8542 struct intel_unpin_work *work;
8290 unsigned long flags; 8543 unsigned long flags;
@@ -8325,7 +8578,7 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
8325 8578
8326void intel_finish_page_flip(struct drm_device *dev, int pipe) 8579void intel_finish_page_flip(struct drm_device *dev, int pipe)
8327{ 8580{
8328 drm_i915_private_t *dev_priv = dev->dev_private; 8581 struct drm_i915_private *dev_priv = dev->dev_private;
8329 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 8582 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
8330 8583
8331 do_intel_finish_page_flip(dev, crtc); 8584 do_intel_finish_page_flip(dev, crtc);
@@ -8333,7 +8586,7 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe)
8333 8586
8334void intel_finish_page_flip_plane(struct drm_device *dev, int plane) 8587void intel_finish_page_flip_plane(struct drm_device *dev, int plane)
8335{ 8588{
8336 drm_i915_private_t *dev_priv = dev->dev_private; 8589 struct drm_i915_private *dev_priv = dev->dev_private;
8337 struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane]; 8590 struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane];
8338 8591
8339 do_intel_finish_page_flip(dev, crtc); 8592 do_intel_finish_page_flip(dev, crtc);
@@ -8341,7 +8594,7 @@ void intel_finish_page_flip_plane(struct drm_device *dev, int plane)
8341 8594
8342void intel_prepare_page_flip(struct drm_device *dev, int plane) 8595void intel_prepare_page_flip(struct drm_device *dev, int plane)
8343{ 8596{
8344 drm_i915_private_t *dev_priv = dev->dev_private; 8597 struct drm_i915_private *dev_priv = dev->dev_private;
8345 struct intel_crtc *intel_crtc = 8598 struct intel_crtc *intel_crtc =
8346 to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]); 8599 to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
8347 unsigned long flags; 8600 unsigned long flags;
@@ -8656,7 +8909,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
8656{ 8909{
8657 struct drm_device *dev = crtc->dev; 8910 struct drm_device *dev = crtc->dev;
8658 struct drm_i915_private *dev_priv = dev->dev_private; 8911 struct drm_i915_private *dev_priv = dev->dev_private;
8659 struct drm_framebuffer *old_fb = crtc->fb; 8912 struct drm_framebuffer *old_fb = crtc->primary->fb;
8660 struct drm_i915_gem_object *obj = to_intel_framebuffer(fb)->obj; 8913 struct drm_i915_gem_object *obj = to_intel_framebuffer(fb)->obj;
8661 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 8914 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8662 struct intel_unpin_work *work; 8915 struct intel_unpin_work *work;
@@ -8664,7 +8917,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
8664 int ret; 8917 int ret;
8665 8918
8666 /* Can't change pixel format via MI display flips. */ 8919 /* Can't change pixel format via MI display flips. */
8667 if (fb->pixel_format != crtc->fb->pixel_format) 8920 if (fb->pixel_format != crtc->primary->fb->pixel_format)
8668 return -EINVAL; 8921 return -EINVAL;
8669 8922
8670 /* 8923 /*
@@ -8672,10 +8925,13 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
8672 * Note that pitch changes could also affect these register. 8925 * Note that pitch changes could also affect these register.
8673 */ 8926 */
8674 if (INTEL_INFO(dev)->gen > 3 && 8927 if (INTEL_INFO(dev)->gen > 3 &&
8675 (fb->offsets[0] != crtc->fb->offsets[0] || 8928 (fb->offsets[0] != crtc->primary->fb->offsets[0] ||
8676 fb->pitches[0] != crtc->fb->pitches[0])) 8929 fb->pitches[0] != crtc->primary->fb->pitches[0]))
8677 return -EINVAL; 8930 return -EINVAL;
8678 8931
8932 if (i915_terminally_wedged(&dev_priv->gpu_error))
8933 goto out_hang;
8934
8679 work = kzalloc(sizeof(*work), GFP_KERNEL); 8935 work = kzalloc(sizeof(*work), GFP_KERNEL);
8680 if (work == NULL) 8936 if (work == NULL)
8681 return -ENOMEM; 8937 return -ENOMEM;
@@ -8713,7 +8969,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
8713 drm_gem_object_reference(&work->old_fb_obj->base); 8969 drm_gem_object_reference(&work->old_fb_obj->base);
8714 drm_gem_object_reference(&obj->base); 8970 drm_gem_object_reference(&obj->base);
8715 8971
8716 crtc->fb = fb; 8972 crtc->primary->fb = fb;
8717 8973
8718 work->pending_flip_obj = obj; 8974 work->pending_flip_obj = obj;
8719 8975
@@ -8736,7 +8992,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
8736 8992
8737cleanup_pending: 8993cleanup_pending:
8738 atomic_dec(&intel_crtc->unpin_work_count); 8994 atomic_dec(&intel_crtc->unpin_work_count);
8739 crtc->fb = old_fb; 8995 crtc->primary->fb = old_fb;
8740 drm_gem_object_unreference(&work->old_fb_obj->base); 8996 drm_gem_object_unreference(&work->old_fb_obj->base);
8741 drm_gem_object_unreference(&obj->base); 8997 drm_gem_object_unreference(&obj->base);
8742 mutex_unlock(&dev->struct_mutex); 8998 mutex_unlock(&dev->struct_mutex);
@@ -8750,6 +9006,13 @@ cleanup:
8750free_work: 9006free_work:
8751 kfree(work); 9007 kfree(work);
8752 9008
9009 if (ret == -EIO) {
9010out_hang:
9011 intel_crtc_wait_for_pending_flips(crtc);
9012 ret = intel_pipe_set_base(crtc, crtc->x, crtc->y, fb);
9013 if (ret == 0 && event)
9014 drm_send_vblank_event(dev, intel_crtc->pipe, event);
9015 }
8753 return ret; 9016 return ret;
8754} 9017}
8755 9018
@@ -8766,6 +9029,7 @@ static struct drm_crtc_helper_funcs intel_helper_funcs = {
8766 */ 9029 */
8767static void intel_modeset_update_staged_output_state(struct drm_device *dev) 9030static void intel_modeset_update_staged_output_state(struct drm_device *dev)
8768{ 9031{
9032 struct intel_crtc *crtc;
8769 struct intel_encoder *encoder; 9033 struct intel_encoder *encoder;
8770 struct intel_connector *connector; 9034 struct intel_connector *connector;
8771 9035
@@ -8780,6 +9044,16 @@ static void intel_modeset_update_staged_output_state(struct drm_device *dev)
8780 encoder->new_crtc = 9044 encoder->new_crtc =
8781 to_intel_crtc(encoder->base.crtc); 9045 to_intel_crtc(encoder->base.crtc);
8782 } 9046 }
9047
9048 list_for_each_entry(crtc, &dev->mode_config.crtc_list,
9049 base.head) {
9050 crtc->new_enabled = crtc->base.enabled;
9051
9052 if (crtc->new_enabled)
9053 crtc->new_config = &crtc->config;
9054 else
9055 crtc->new_config = NULL;
9056 }
8783} 9057}
8784 9058
8785/** 9059/**
@@ -8789,6 +9063,7 @@ static void intel_modeset_update_staged_output_state(struct drm_device *dev)
8789 */ 9063 */
8790static void intel_modeset_commit_output_state(struct drm_device *dev) 9064static void intel_modeset_commit_output_state(struct drm_device *dev)
8791{ 9065{
9066 struct intel_crtc *crtc;
8792 struct intel_encoder *encoder; 9067 struct intel_encoder *encoder;
8793 struct intel_connector *connector; 9068 struct intel_connector *connector;
8794 9069
@@ -8801,6 +9076,11 @@ static void intel_modeset_commit_output_state(struct drm_device *dev)
8801 base.head) { 9076 base.head) {
8802 encoder->base.crtc = &encoder->new_crtc->base; 9077 encoder->base.crtc = &encoder->new_crtc->base;
8803 } 9078 }
9079
9080 list_for_each_entry(crtc, &dev->mode_config.crtc_list,
9081 base.head) {
9082 crtc->base.enabled = crtc->new_enabled;
9083 }
8804} 9084}
8805 9085
8806static void 9086static void
@@ -8941,23 +9221,47 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
8941 DRM_DEBUG_KMS("double wide: %i\n", pipe_config->double_wide); 9221 DRM_DEBUG_KMS("double wide: %i\n", pipe_config->double_wide);
8942} 9222}
8943 9223
8944static bool check_encoder_cloning(struct drm_crtc *crtc) 9224static bool encoders_cloneable(const struct intel_encoder *a,
9225 const struct intel_encoder *b)
8945{ 9226{
8946 int num_encoders = 0; 9227 /* masks could be asymmetric, so check both ways */
8947 bool uncloneable_encoders = false; 9228 return a == b || (a->cloneable & (1 << b->type) &&
9229 b->cloneable & (1 << a->type));
9230}
9231
9232static bool check_single_encoder_cloning(struct intel_crtc *crtc,
9233 struct intel_encoder *encoder)
9234{
9235 struct drm_device *dev = crtc->base.dev;
9236 struct intel_encoder *source_encoder;
9237
9238 list_for_each_entry(source_encoder,
9239 &dev->mode_config.encoder_list, base.head) {
9240 if (source_encoder->new_crtc != crtc)
9241 continue;
9242
9243 if (!encoders_cloneable(encoder, source_encoder))
9244 return false;
9245 }
9246
9247 return true;
9248}
9249
9250static bool check_encoder_cloning(struct intel_crtc *crtc)
9251{
9252 struct drm_device *dev = crtc->base.dev;
8948 struct intel_encoder *encoder; 9253 struct intel_encoder *encoder;
8949 9254
8950 list_for_each_entry(encoder, &crtc->dev->mode_config.encoder_list, 9255 list_for_each_entry(encoder,
8951 base.head) { 9256 &dev->mode_config.encoder_list, base.head) {
8952 if (&encoder->new_crtc->base != crtc) 9257 if (encoder->new_crtc != crtc)
8953 continue; 9258 continue;
8954 9259
8955 num_encoders++; 9260 if (!check_single_encoder_cloning(crtc, encoder))
8956 if (!encoder->cloneable) 9261 return false;
8957 uncloneable_encoders = true;
8958 } 9262 }
8959 9263
8960 return !(num_encoders > 1 && uncloneable_encoders); 9264 return true;
8961} 9265}
8962 9266
8963static struct intel_crtc_config * 9267static struct intel_crtc_config *
@@ -8971,7 +9275,7 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
8971 int plane_bpp, ret = -EINVAL; 9275 int plane_bpp, ret = -EINVAL;
8972 bool retry = true; 9276 bool retry = true;
8973 9277
8974 if (!check_encoder_cloning(crtc)) { 9278 if (!check_encoder_cloning(to_intel_crtc(crtc))) {
8975 DRM_DEBUG_KMS("rejecting invalid cloning configuration\n"); 9279 DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
8976 return ERR_PTR(-EINVAL); 9280 return ERR_PTR(-EINVAL);
8977 } 9281 }
@@ -9127,29 +9431,22 @@ intel_modeset_affected_pipes(struct drm_crtc *crtc, unsigned *modeset_pipes,
9127 *prepare_pipes |= 1 << encoder->new_crtc->pipe; 9431 *prepare_pipes |= 1 << encoder->new_crtc->pipe;
9128 } 9432 }
9129 9433
9130 /* Check for any pipes that will be fully disabled ... */ 9434 /* Check for pipes that will be enabled/disabled ... */
9131 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, 9435 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
9132 base.head) { 9436 base.head) {
9133 bool used = false; 9437 if (intel_crtc->base.enabled == intel_crtc->new_enabled)
9134
9135 /* Don't try to disable disabled crtcs. */
9136 if (!intel_crtc->base.enabled)
9137 continue; 9438 continue;
9138 9439
9139 list_for_each_entry(encoder, &dev->mode_config.encoder_list, 9440 if (!intel_crtc->new_enabled)
9140 base.head) {
9141 if (encoder->new_crtc == intel_crtc)
9142 used = true;
9143 }
9144
9145 if (!used)
9146 *disable_pipes |= 1 << intel_crtc->pipe; 9441 *disable_pipes |= 1 << intel_crtc->pipe;
9442 else
9443 *prepare_pipes |= 1 << intel_crtc->pipe;
9147 } 9444 }
9148 9445
9149 9446
9150 /* set_mode is also used to update properties on life display pipes. */ 9447 /* set_mode is also used to update properties on life display pipes. */
9151 intel_crtc = to_intel_crtc(crtc); 9448 intel_crtc = to_intel_crtc(crtc);
9152 if (crtc->enabled) 9449 if (intel_crtc->new_enabled)
9153 *prepare_pipes |= 1 << intel_crtc->pipe; 9450 *prepare_pipes |= 1 << intel_crtc->pipe;
9154 9451
9155 /* 9452 /*
@@ -9208,10 +9505,13 @@ intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes)
9208 9505
9209 intel_modeset_commit_output_state(dev); 9506 intel_modeset_commit_output_state(dev);
9210 9507
9211 /* Update computed state. */ 9508 /* Double check state. */
9212 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, 9509 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
9213 base.head) { 9510 base.head) {
9214 intel_crtc->base.enabled = intel_crtc_in_use(&intel_crtc->base); 9511 WARN_ON(intel_crtc->base.enabled != intel_crtc_in_use(&intel_crtc->base));
9512 WARN_ON(intel_crtc->new_config &&
9513 intel_crtc->new_config != &intel_crtc->config);
9514 WARN_ON(intel_crtc->base.enabled != !!intel_crtc->new_config);
9215 } 9515 }
9216 9516
9217 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 9517 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
@@ -9380,10 +9680,8 @@ intel_pipe_config_compare(struct drm_device *dev,
9380 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) 9680 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
9381 PIPE_CONF_CHECK_I(pipe_bpp); 9681 PIPE_CONF_CHECK_I(pipe_bpp);
9382 9682
9383 if (!HAS_DDI(dev)) { 9683 PIPE_CONF_CHECK_CLOCK_FUZZY(adjusted_mode.crtc_clock);
9384 PIPE_CONF_CHECK_CLOCK_FUZZY(adjusted_mode.crtc_clock); 9684 PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
9385 PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
9386 }
9387 9685
9388#undef PIPE_CONF_CHECK_X 9686#undef PIPE_CONF_CHECK_X
9389#undef PIPE_CONF_CHECK_I 9687#undef PIPE_CONF_CHECK_I
@@ -9471,7 +9769,7 @@ check_encoder_state(struct drm_device *dev)
9471static void 9769static void
9472check_crtc_state(struct drm_device *dev) 9770check_crtc_state(struct drm_device *dev)
9473{ 9771{
9474 drm_i915_private_t *dev_priv = dev->dev_private; 9772 struct drm_i915_private *dev_priv = dev->dev_private;
9475 struct intel_crtc *crtc; 9773 struct intel_crtc *crtc;
9476 struct intel_encoder *encoder; 9774 struct intel_encoder *encoder;
9477 struct intel_crtc_config pipe_config; 9775 struct intel_crtc_config pipe_config;
@@ -9539,7 +9837,7 @@ check_crtc_state(struct drm_device *dev)
9539static void 9837static void
9540check_shared_dpll_state(struct drm_device *dev) 9838check_shared_dpll_state(struct drm_device *dev)
9541{ 9839{
9542 drm_i915_private_t *dev_priv = dev->dev_private; 9840 struct drm_i915_private *dev_priv = dev->dev_private;
9543 struct intel_crtc *crtc; 9841 struct intel_crtc *crtc;
9544 struct intel_dpll_hw_state dpll_hw_state; 9842 struct intel_dpll_hw_state dpll_hw_state;
9545 int i; 9843 int i;
@@ -9612,7 +9910,7 @@ static int __intel_set_mode(struct drm_crtc *crtc,
9612 int x, int y, struct drm_framebuffer *fb) 9910 int x, int y, struct drm_framebuffer *fb)
9613{ 9911{
9614 struct drm_device *dev = crtc->dev; 9912 struct drm_device *dev = crtc->dev;
9615 drm_i915_private_t *dev_priv = dev->dev_private; 9913 struct drm_i915_private *dev_priv = dev->dev_private;
9616 struct drm_display_mode *saved_mode; 9914 struct drm_display_mode *saved_mode;
9617 struct intel_crtc_config *pipe_config = NULL; 9915 struct intel_crtc_config *pipe_config = NULL;
9618 struct intel_crtc *intel_crtc; 9916 struct intel_crtc *intel_crtc;
@@ -9643,6 +9941,7 @@ static int __intel_set_mode(struct drm_crtc *crtc,
9643 } 9941 }
9644 intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config, 9942 intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
9645 "[modeset]"); 9943 "[modeset]");
9944 to_intel_crtc(crtc)->new_config = pipe_config;
9646 } 9945 }
9647 9946
9648 /* 9947 /*
@@ -9653,8 +9952,7 @@ static int __intel_set_mode(struct drm_crtc *crtc,
9653 * adjusted_mode bits in the crtc directly. 9952 * adjusted_mode bits in the crtc directly.
9654 */ 9953 */
9655 if (IS_VALLEYVIEW(dev)) { 9954 if (IS_VALLEYVIEW(dev)) {
9656 valleyview_modeset_global_pipes(dev, &prepare_pipes, 9955 valleyview_modeset_global_pipes(dev, &prepare_pipes);
9657 modeset_pipes, pipe_config);
9658 9956
9659 /* may have added more to prepare_pipes than we should */ 9957 /* may have added more to prepare_pipes than we should */
9660 prepare_pipes &= ~disable_pipes; 9958 prepare_pipes &= ~disable_pipes;
@@ -9676,6 +9974,7 @@ static int __intel_set_mode(struct drm_crtc *crtc,
9676 /* mode_set/enable/disable functions rely on a correct pipe 9974 /* mode_set/enable/disable functions rely on a correct pipe
9677 * config. */ 9975 * config. */
9678 to_intel_crtc(crtc)->config = *pipe_config; 9976 to_intel_crtc(crtc)->config = *pipe_config;
9977 to_intel_crtc(crtc)->new_config = &to_intel_crtc(crtc)->config;
9679 9978
9680 /* 9979 /*
9681 * Calculate and store various constants which 9980 * Calculate and store various constants which
@@ -9734,7 +10033,7 @@ static int intel_set_mode(struct drm_crtc *crtc,
9734 10033
9735void intel_crtc_restore_mode(struct drm_crtc *crtc) 10034void intel_crtc_restore_mode(struct drm_crtc *crtc)
9736{ 10035{
9737 intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->fb); 10036 intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->primary->fb);
9738} 10037}
9739 10038
9740#undef for_each_intel_crtc_masked 10039#undef for_each_intel_crtc_masked
@@ -9746,16 +10045,24 @@ static void intel_set_config_free(struct intel_set_config *config)
9746 10045
9747 kfree(config->save_connector_encoders); 10046 kfree(config->save_connector_encoders);
9748 kfree(config->save_encoder_crtcs); 10047 kfree(config->save_encoder_crtcs);
10048 kfree(config->save_crtc_enabled);
9749 kfree(config); 10049 kfree(config);
9750} 10050}
9751 10051
9752static int intel_set_config_save_state(struct drm_device *dev, 10052static int intel_set_config_save_state(struct drm_device *dev,
9753 struct intel_set_config *config) 10053 struct intel_set_config *config)
9754{ 10054{
10055 struct drm_crtc *crtc;
9755 struct drm_encoder *encoder; 10056 struct drm_encoder *encoder;
9756 struct drm_connector *connector; 10057 struct drm_connector *connector;
9757 int count; 10058 int count;
9758 10059
10060 config->save_crtc_enabled =
10061 kcalloc(dev->mode_config.num_crtc,
10062 sizeof(bool), GFP_KERNEL);
10063 if (!config->save_crtc_enabled)
10064 return -ENOMEM;
10065
9759 config->save_encoder_crtcs = 10066 config->save_encoder_crtcs =
9760 kcalloc(dev->mode_config.num_encoder, 10067 kcalloc(dev->mode_config.num_encoder,
9761 sizeof(struct drm_crtc *), GFP_KERNEL); 10068 sizeof(struct drm_crtc *), GFP_KERNEL);
@@ -9773,6 +10080,11 @@ static int intel_set_config_save_state(struct drm_device *dev,
9773 * restored, not the drivers personal bookkeeping. 10080 * restored, not the drivers personal bookkeeping.
9774 */ 10081 */
9775 count = 0; 10082 count = 0;
10083 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
10084 config->save_crtc_enabled[count++] = crtc->enabled;
10085 }
10086
10087 count = 0;
9776 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 10088 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
9777 config->save_encoder_crtcs[count++] = encoder->crtc; 10089 config->save_encoder_crtcs[count++] = encoder->crtc;
9778 } 10090 }
@@ -9788,11 +10100,22 @@ static int intel_set_config_save_state(struct drm_device *dev,
9788static void intel_set_config_restore_state(struct drm_device *dev, 10100static void intel_set_config_restore_state(struct drm_device *dev,
9789 struct intel_set_config *config) 10101 struct intel_set_config *config)
9790{ 10102{
10103 struct intel_crtc *crtc;
9791 struct intel_encoder *encoder; 10104 struct intel_encoder *encoder;
9792 struct intel_connector *connector; 10105 struct intel_connector *connector;
9793 int count; 10106 int count;
9794 10107
9795 count = 0; 10108 count = 0;
10109 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
10110 crtc->new_enabled = config->save_crtc_enabled[count++];
10111
10112 if (crtc->new_enabled)
10113 crtc->new_config = &crtc->config;
10114 else
10115 crtc->new_config = NULL;
10116 }
10117
10118 count = 0;
9796 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { 10119 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
9797 encoder->new_crtc = 10120 encoder->new_crtc =
9798 to_intel_crtc(config->save_encoder_crtcs[count++]); 10121 to_intel_crtc(config->save_encoder_crtcs[count++]);
@@ -9834,13 +10157,13 @@ intel_set_config_compute_mode_changes(struct drm_mode_set *set,
9834 * and then just flip_or_move it */ 10157 * and then just flip_or_move it */
9835 if (is_crtc_connector_off(set)) { 10158 if (is_crtc_connector_off(set)) {
9836 config->mode_changed = true; 10159 config->mode_changed = true;
9837 } else if (set->crtc->fb != set->fb) { 10160 } else if (set->crtc->primary->fb != set->fb) {
9838 /* If we have no fb then treat it as a full mode set */ 10161 /* If we have no fb then treat it as a full mode set */
9839 if (set->crtc->fb == NULL) { 10162 if (set->crtc->primary->fb == NULL) {
9840 struct intel_crtc *intel_crtc = 10163 struct intel_crtc *intel_crtc =
9841 to_intel_crtc(set->crtc); 10164 to_intel_crtc(set->crtc);
9842 10165
9843 if (intel_crtc->active && i915_fastboot) { 10166 if (intel_crtc->active && i915.fastboot) {
9844 DRM_DEBUG_KMS("crtc has no fb, will flip\n"); 10167 DRM_DEBUG_KMS("crtc has no fb, will flip\n");
9845 config->fb_changed = true; 10168 config->fb_changed = true;
9846 } else { 10169 } else {
@@ -9850,7 +10173,7 @@ intel_set_config_compute_mode_changes(struct drm_mode_set *set,
9850 } else if (set->fb == NULL) { 10173 } else if (set->fb == NULL) {
9851 config->mode_changed = true; 10174 config->mode_changed = true;
9852 } else if (set->fb->pixel_format != 10175 } else if (set->fb->pixel_format !=
9853 set->crtc->fb->pixel_format) { 10176 set->crtc->primary->fb->pixel_format) {
9854 config->mode_changed = true; 10177 config->mode_changed = true;
9855 } else { 10178 } else {
9856 config->fb_changed = true; 10179 config->fb_changed = true;
@@ -9876,9 +10199,9 @@ intel_modeset_stage_output_state(struct drm_device *dev,
9876 struct drm_mode_set *set, 10199 struct drm_mode_set *set,
9877 struct intel_set_config *config) 10200 struct intel_set_config *config)
9878{ 10201{
9879 struct drm_crtc *new_crtc;
9880 struct intel_connector *connector; 10202 struct intel_connector *connector;
9881 struct intel_encoder *encoder; 10203 struct intel_encoder *encoder;
10204 struct intel_crtc *crtc;
9882 int ro; 10205 int ro;
9883 10206
9884 /* The upper layers ensure that we either disable a crtc or have a list 10207 /* The upper layers ensure that we either disable a crtc or have a list
@@ -9921,6 +10244,8 @@ intel_modeset_stage_output_state(struct drm_device *dev,
9921 /* Update crtc of enabled connectors. */ 10244 /* Update crtc of enabled connectors. */
9922 list_for_each_entry(connector, &dev->mode_config.connector_list, 10245 list_for_each_entry(connector, &dev->mode_config.connector_list,
9923 base.head) { 10246 base.head) {
10247 struct drm_crtc *new_crtc;
10248
9924 if (!connector->new_encoder) 10249 if (!connector->new_encoder)
9925 continue; 10250 continue;
9926 10251
@@ -9971,9 +10296,58 @@ intel_modeset_stage_output_state(struct drm_device *dev,
9971 } 10296 }
9972 /* Now we've also updated encoder->new_crtc for all encoders. */ 10297 /* Now we've also updated encoder->new_crtc for all encoders. */
9973 10298
10299 list_for_each_entry(crtc, &dev->mode_config.crtc_list,
10300 base.head) {
10301 crtc->new_enabled = false;
10302
10303 list_for_each_entry(encoder,
10304 &dev->mode_config.encoder_list,
10305 base.head) {
10306 if (encoder->new_crtc == crtc) {
10307 crtc->new_enabled = true;
10308 break;
10309 }
10310 }
10311
10312 if (crtc->new_enabled != crtc->base.enabled) {
10313 DRM_DEBUG_KMS("crtc %sabled, full mode switch\n",
10314 crtc->new_enabled ? "en" : "dis");
10315 config->mode_changed = true;
10316 }
10317
10318 if (crtc->new_enabled)
10319 crtc->new_config = &crtc->config;
10320 else
10321 crtc->new_config = NULL;
10322 }
10323
9974 return 0; 10324 return 0;
9975} 10325}
9976 10326
10327static void disable_crtc_nofb(struct intel_crtc *crtc)
10328{
10329 struct drm_device *dev = crtc->base.dev;
10330 struct intel_encoder *encoder;
10331 struct intel_connector *connector;
10332
10333 DRM_DEBUG_KMS("Trying to restore without FB -> disabling pipe %c\n",
10334 pipe_name(crtc->pipe));
10335
10336 list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) {
10337 if (connector->new_encoder &&
10338 connector->new_encoder->new_crtc == crtc)
10339 connector->new_encoder = NULL;
10340 }
10341
10342 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
10343 if (encoder->new_crtc == crtc)
10344 encoder->new_crtc = NULL;
10345 }
10346
10347 crtc->new_enabled = false;
10348 crtc->new_config = NULL;
10349}
10350
9977static int intel_crtc_set_config(struct drm_mode_set *set) 10351static int intel_crtc_set_config(struct drm_mode_set *set)
9978{ 10352{
9979 struct drm_device *dev; 10353 struct drm_device *dev;
@@ -10012,7 +10386,7 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
10012 save_set.mode = &set->crtc->mode; 10386 save_set.mode = &set->crtc->mode;
10013 save_set.x = set->crtc->x; 10387 save_set.x = set->crtc->x;
10014 save_set.y = set->crtc->y; 10388 save_set.y = set->crtc->y;
10015 save_set.fb = set->crtc->fb; 10389 save_set.fb = set->crtc->primary->fb;
10016 10390
10017 /* Compute whether we need a full modeset, only an fb base update or no 10391 /* Compute whether we need a full modeset, only an fb base update or no
10018 * change at all. In the future we might also check whether only the 10392 * change at all. In the future we might also check whether only the
@@ -10040,7 +10414,7 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
10040 * flipping, so increasing its cost here shouldn't be a big 10414 * flipping, so increasing its cost here shouldn't be a big
10041 * deal). 10415 * deal).
10042 */ 10416 */
10043 if (i915_fastboot && ret == 0) 10417 if (i915.fastboot && ret == 0)
10044 intel_modeset_check_state(set->crtc->dev); 10418 intel_modeset_check_state(set->crtc->dev);
10045 } 10419 }
10046 10420
@@ -10050,6 +10424,15 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
10050fail: 10424fail:
10051 intel_set_config_restore_state(dev, config); 10425 intel_set_config_restore_state(dev, config);
10052 10426
10427 /*
10428 * HACK: if the pipe was on, but we didn't have a framebuffer,
10429 * force the pipe off to avoid oopsing in the modeset code
10430 * due to fb==NULL. This should only happen during boot since
10431 * we don't yet reconstruct the FB from the hardware state.
10432 */
10433 if (to_intel_crtc(save_set.crtc)->new_enabled && !save_set.fb)
10434 disable_crtc_nofb(to_intel_crtc(save_set.crtc));
10435
10053 /* Try to restore the config */ 10436 /* Try to restore the config */
10054 if (config->mode_changed && 10437 if (config->mode_changed &&
10055 intel_set_mode(save_set.crtc, save_set.mode, 10438 intel_set_mode(save_set.crtc, save_set.mode,
@@ -10174,7 +10557,7 @@ static void intel_shared_dpll_init(struct drm_device *dev)
10174 10557
10175static void intel_crtc_init(struct drm_device *dev, int pipe) 10558static void intel_crtc_init(struct drm_device *dev, int pipe)
10176{ 10559{
10177 drm_i915_private_t *dev_priv = dev->dev_private; 10560 struct drm_i915_private *dev_priv = dev->dev_private;
10178 struct intel_crtc *intel_crtc; 10561 struct intel_crtc *intel_crtc;
10179 int i; 10562 int i;
10180 10563
@@ -10184,6 +10567,16 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
10184 10567
10185 drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs); 10568 drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs);
10186 10569
10570 if (IS_GEN2(dev)) {
10571 intel_crtc->max_cursor_width = GEN2_CURSOR_WIDTH;
10572 intel_crtc->max_cursor_height = GEN2_CURSOR_HEIGHT;
10573 } else {
10574 intel_crtc->max_cursor_width = CURSOR_WIDTH;
10575 intel_crtc->max_cursor_height = CURSOR_HEIGHT;
10576 }
10577 dev->mode_config.cursor_width = intel_crtc->max_cursor_width;
10578 dev->mode_config.cursor_height = intel_crtc->max_cursor_height;
10579
10187 drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256); 10580 drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256);
10188 for (i = 0; i < 256; i++) { 10581 for (i = 0; i < 256; i++) {
10189 intel_crtc->lut_r[i] = i; 10582 intel_crtc->lut_r[i] = i;
@@ -10255,12 +10648,7 @@ static int intel_encoder_clones(struct intel_encoder *encoder)
10255 10648
10256 list_for_each_entry(source_encoder, 10649 list_for_each_entry(source_encoder,
10257 &dev->mode_config.encoder_list, base.head) { 10650 &dev->mode_config.encoder_list, base.head) {
10258 10651 if (encoders_cloneable(encoder, source_encoder))
10259 if (encoder == source_encoder)
10260 index_mask |= (1 << entry);
10261
10262 /* Intel hw has only one MUX where enocoders could be cloned. */
10263 if (encoder->cloneable && source_encoder->cloneable)
10264 index_mask |= (1 << entry); 10652 index_mask |= (1 << entry);
10265 10653
10266 entry++; 10654 entry++;
@@ -10279,8 +10667,7 @@ static bool has_edp_a(struct drm_device *dev)
10279 if ((I915_READ(DP_A) & DP_DETECTED) == 0) 10667 if ((I915_READ(DP_A) & DP_DETECTED) == 0)
10280 return false; 10668 return false;
10281 10669
10282 if (IS_GEN5(dev) && 10670 if (IS_GEN5(dev) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
10283 (I915_READ(ILK_DISPLAY_CHICKEN_FUSES) & ILK_eDP_A_DISABLE))
10284 return false; 10671 return false;
10285 10672
10286 return true; 10673 return true;
@@ -10433,18 +10820,13 @@ static void intel_setup_outputs(struct drm_device *dev)
10433 drm_helper_move_panel_connectors_to_head(dev); 10820 drm_helper_move_panel_connectors_to_head(dev);
10434} 10821}
10435 10822
10436void intel_framebuffer_fini(struct intel_framebuffer *fb)
10437{
10438 drm_framebuffer_cleanup(&fb->base);
10439 WARN_ON(!fb->obj->framebuffer_references--);
10440 drm_gem_object_unreference_unlocked(&fb->obj->base);
10441}
10442
10443static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) 10823static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
10444{ 10824{
10445 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 10825 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
10446 10826
10447 intel_framebuffer_fini(intel_fb); 10827 drm_framebuffer_cleanup(fb);
10828 WARN_ON(!intel_fb->obj->framebuffer_references--);
10829 drm_gem_object_unreference_unlocked(&intel_fb->obj->base);
10448 kfree(intel_fb); 10830 kfree(intel_fb);
10449} 10831}
10450 10832
@@ -10463,12 +10845,12 @@ static const struct drm_framebuffer_funcs intel_fb_funcs = {
10463 .create_handle = intel_user_framebuffer_create_handle, 10845 .create_handle = intel_user_framebuffer_create_handle,
10464}; 10846};
10465 10847
10466int intel_framebuffer_init(struct drm_device *dev, 10848static int intel_framebuffer_init(struct drm_device *dev,
10467 struct intel_framebuffer *intel_fb, 10849 struct intel_framebuffer *intel_fb,
10468 struct drm_mode_fb_cmd2 *mode_cmd, 10850 struct drm_mode_fb_cmd2 *mode_cmd,
10469 struct drm_i915_gem_object *obj) 10851 struct drm_i915_gem_object *obj)
10470{ 10852{
10471 int aligned_height, tile_height; 10853 int aligned_height;
10472 int pitch_limit; 10854 int pitch_limit;
10473 int ret; 10855 int ret;
10474 10856
@@ -10562,9 +10944,8 @@ int intel_framebuffer_init(struct drm_device *dev,
10562 if (mode_cmd->offsets[0] != 0) 10944 if (mode_cmd->offsets[0] != 0)
10563 return -EINVAL; 10945 return -EINVAL;
10564 10946
10565 tile_height = IS_GEN2(dev) ? 16 : 8; 10947 aligned_height = intel_align_height(dev, mode_cmd->height,
10566 aligned_height = ALIGN(mode_cmd->height, 10948 obj->tiling_mode);
10567 obj->tiling_mode ? tile_height : 1);
10568 /* FIXME drm helper for size checks (especially planar formats)? */ 10949 /* FIXME drm helper for size checks (especially planar formats)? */
10569 if (obj->base.size < aligned_height * mode_cmd->pitches[0]) 10950 if (obj->base.size < aligned_height * mode_cmd->pitches[0])
10570 return -EINVAL; 10951 return -EINVAL;
@@ -10624,32 +11005,40 @@ static void intel_init_display(struct drm_device *dev)
10624 11005
10625 if (HAS_DDI(dev)) { 11006 if (HAS_DDI(dev)) {
10626 dev_priv->display.get_pipe_config = haswell_get_pipe_config; 11007 dev_priv->display.get_pipe_config = haswell_get_pipe_config;
11008 dev_priv->display.get_plane_config = ironlake_get_plane_config;
10627 dev_priv->display.crtc_mode_set = haswell_crtc_mode_set; 11009 dev_priv->display.crtc_mode_set = haswell_crtc_mode_set;
10628 dev_priv->display.crtc_enable = haswell_crtc_enable; 11010 dev_priv->display.crtc_enable = haswell_crtc_enable;
10629 dev_priv->display.crtc_disable = haswell_crtc_disable; 11011 dev_priv->display.crtc_disable = haswell_crtc_disable;
10630 dev_priv->display.off = haswell_crtc_off; 11012 dev_priv->display.off = haswell_crtc_off;
10631 dev_priv->display.update_plane = ironlake_update_plane; 11013 dev_priv->display.update_primary_plane =
11014 ironlake_update_primary_plane;
10632 } else if (HAS_PCH_SPLIT(dev)) { 11015 } else if (HAS_PCH_SPLIT(dev)) {
10633 dev_priv->display.get_pipe_config = ironlake_get_pipe_config; 11016 dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
11017 dev_priv->display.get_plane_config = ironlake_get_plane_config;
10634 dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set; 11018 dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
10635 dev_priv->display.crtc_enable = ironlake_crtc_enable; 11019 dev_priv->display.crtc_enable = ironlake_crtc_enable;
10636 dev_priv->display.crtc_disable = ironlake_crtc_disable; 11020 dev_priv->display.crtc_disable = ironlake_crtc_disable;
10637 dev_priv->display.off = ironlake_crtc_off; 11021 dev_priv->display.off = ironlake_crtc_off;
10638 dev_priv->display.update_plane = ironlake_update_plane; 11022 dev_priv->display.update_primary_plane =
11023 ironlake_update_primary_plane;
10639 } else if (IS_VALLEYVIEW(dev)) { 11024 } else if (IS_VALLEYVIEW(dev)) {
10640 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 11025 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
11026 dev_priv->display.get_plane_config = i9xx_get_plane_config;
10641 dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set; 11027 dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
10642 dev_priv->display.crtc_enable = valleyview_crtc_enable; 11028 dev_priv->display.crtc_enable = valleyview_crtc_enable;
10643 dev_priv->display.crtc_disable = i9xx_crtc_disable; 11029 dev_priv->display.crtc_disable = i9xx_crtc_disable;
10644 dev_priv->display.off = i9xx_crtc_off; 11030 dev_priv->display.off = i9xx_crtc_off;
10645 dev_priv->display.update_plane = i9xx_update_plane; 11031 dev_priv->display.update_primary_plane =
11032 i9xx_update_primary_plane;
10646 } else { 11033 } else {
10647 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 11034 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
11035 dev_priv->display.get_plane_config = i9xx_get_plane_config;
10648 dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set; 11036 dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
10649 dev_priv->display.crtc_enable = i9xx_crtc_enable; 11037 dev_priv->display.crtc_enable = i9xx_crtc_enable;
10650 dev_priv->display.crtc_disable = i9xx_crtc_disable; 11038 dev_priv->display.crtc_disable = i9xx_crtc_disable;
10651 dev_priv->display.off = i9xx_crtc_off; 11039 dev_priv->display.off = i9xx_crtc_off;
10652 dev_priv->display.update_plane = i9xx_update_plane; 11040 dev_priv->display.update_primary_plane =
11041 i9xx_update_primary_plane;
10653 } 11042 }
10654 11043
10655 /* Returns the core display clock speed */ 11044 /* Returns the core display clock speed */
@@ -10839,6 +11228,9 @@ static struct intel_quirk intel_quirks[] = {
10839 11228
10840 /* Acer Aspire 4736Z */ 11229 /* Acer Aspire 4736Z */
10841 { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness }, 11230 { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
11231
11232 /* Acer Aspire 5336 */
11233 { 0x2a42, 0x1025, 0x048a, quirk_invert_brightness },
10842}; 11234};
10843 11235
10844static void intel_init_quirks(struct drm_device *dev) 11236static void intel_init_quirks(struct drm_device *dev)
@@ -10869,6 +11261,7 @@ static void i915_disable_vga(struct drm_device *dev)
10869 u8 sr1; 11261 u8 sr1;
10870 u32 vga_reg = i915_vgacntrl_reg(dev); 11262 u32 vga_reg = i915_vgacntrl_reg(dev);
10871 11263
11264 /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
10872 vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO); 11265 vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
10873 outb(SR01, VGA_SR_INDEX); 11266 outb(SR01, VGA_SR_INDEX);
10874 sr1 = inb(VGA_SR_DATA); 11267 sr1 = inb(VGA_SR_DATA);
@@ -10901,7 +11294,9 @@ void intel_modeset_suspend_hw(struct drm_device *dev)
10901void intel_modeset_init(struct drm_device *dev) 11294void intel_modeset_init(struct drm_device *dev)
10902{ 11295{
10903 struct drm_i915_private *dev_priv = dev->dev_private; 11296 struct drm_i915_private *dev_priv = dev->dev_private;
10904 int i, j, ret; 11297 int sprite, ret;
11298 enum pipe pipe;
11299 struct intel_crtc *crtc;
10905 11300
10906 drm_mode_config_init(dev); 11301 drm_mode_config_init(dev);
10907 11302
@@ -10938,13 +11333,13 @@ void intel_modeset_init(struct drm_device *dev)
10938 INTEL_INFO(dev)->num_pipes, 11333 INTEL_INFO(dev)->num_pipes,
10939 INTEL_INFO(dev)->num_pipes > 1 ? "s" : ""); 11334 INTEL_INFO(dev)->num_pipes > 1 ? "s" : "");
10940 11335
10941 for_each_pipe(i) { 11336 for_each_pipe(pipe) {
10942 intel_crtc_init(dev, i); 11337 intel_crtc_init(dev, pipe);
10943 for (j = 0; j < dev_priv->num_plane; j++) { 11338 for_each_sprite(pipe, sprite) {
10944 ret = intel_plane_init(dev, i, j); 11339 ret = intel_plane_init(dev, pipe, sprite);
10945 if (ret) 11340 if (ret)
10946 DRM_DEBUG_KMS("pipe %c sprite %c init failed: %d\n", 11341 DRM_DEBUG_KMS("pipe %c sprite %c init failed: %d\n",
10947 pipe_name(i), sprite_name(i, j), ret); 11342 pipe_name(pipe), sprite_name(pipe, sprite), ret);
10948 } 11343 }
10949 } 11344 }
10950 11345
@@ -10960,6 +11355,33 @@ void intel_modeset_init(struct drm_device *dev)
10960 11355
10961 /* Just in case the BIOS is doing something questionable. */ 11356 /* Just in case the BIOS is doing something questionable. */
10962 intel_disable_fbc(dev); 11357 intel_disable_fbc(dev);
11358
11359 mutex_lock(&dev->mode_config.mutex);
11360 intel_modeset_setup_hw_state(dev, false);
11361 mutex_unlock(&dev->mode_config.mutex);
11362
11363 list_for_each_entry(crtc, &dev->mode_config.crtc_list,
11364 base.head) {
11365 if (!crtc->active)
11366 continue;
11367
11368 /*
11369 * Note that reserving the BIOS fb up front prevents us
11370 * from stuffing other stolen allocations like the ring
11371 * on top. This prevents some ugliness at boot time, and
11372 * can even allow for smooth boot transitions if the BIOS
11373 * fb is large enough for the active pipe configuration.
11374 */
11375 if (dev_priv->display.get_plane_config) {
11376 dev_priv->display.get_plane_config(crtc,
11377 &crtc->plane_config);
11378 /*
11379 * If the fb is shared between multiple heads, we'll
11380 * just get the first one.
11381 */
11382 intel_find_plane_obj(crtc, &crtc->plane_config);
11383 }
11384 }
10963} 11385}
10964 11386
10965static void 11387static void
@@ -11097,6 +11519,17 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
11097 encoder->base.crtc = NULL; 11519 encoder->base.crtc = NULL;
11098 } 11520 }
11099 } 11521 }
11522 if (crtc->active) {
11523 /*
11524 * We start out with underrun reporting disabled to avoid races.
11525 * For correct bookkeeping mark this on active crtcs.
11526 *
11527 * No protection against concurrent access is required - at
11528 * worst a fifo underrun happens which also sets this to false.
11529 */
11530 crtc->cpu_fifo_underrun_disabled = true;
11531 crtc->pch_fifo_underrun_disabled = true;
11532 }
11100} 11533}
11101 11534
11102static void intel_sanitize_encoder(struct intel_encoder *encoder) 11535static void intel_sanitize_encoder(struct intel_encoder *encoder)
@@ -11142,11 +11575,21 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
11142 * the crtc fixup. */ 11575 * the crtc fixup. */
11143} 11576}
11144 11577
11145void i915_redisable_vga(struct drm_device *dev) 11578void i915_redisable_vga_power_on(struct drm_device *dev)
11146{ 11579{
11147 struct drm_i915_private *dev_priv = dev->dev_private; 11580 struct drm_i915_private *dev_priv = dev->dev_private;
11148 u32 vga_reg = i915_vgacntrl_reg(dev); 11581 u32 vga_reg = i915_vgacntrl_reg(dev);
11149 11582
11583 if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
11584 DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
11585 i915_disable_vga(dev);
11586 }
11587}
11588
11589void i915_redisable_vga(struct drm_device *dev)
11590{
11591 struct drm_i915_private *dev_priv = dev->dev_private;
11592
11150 /* This function can be called both from intel_modeset_setup_hw_state or 11593 /* This function can be called both from intel_modeset_setup_hw_state or
11151 * at a very early point in our resume sequence, where the power well 11594 * at a very early point in our resume sequence, where the power well
11152 * structures are not yet restored. Since this function is at a very 11595 * structures are not yet restored. Since this function is at a very
@@ -11154,14 +11597,10 @@ void i915_redisable_vga(struct drm_device *dev)
11154 * level, just check if the power well is enabled instead of trying to 11597 * level, just check if the power well is enabled instead of trying to
11155 * follow the "don't touch the power well if we don't need it" policy 11598 * follow the "don't touch the power well if we don't need it" policy
11156 * the rest of the driver uses. */ 11599 * the rest of the driver uses. */
11157 if ((IS_HASWELL(dev) || IS_BROADWELL(dev)) && 11600 if (!intel_display_power_enabled(dev_priv, POWER_DOMAIN_VGA))
11158 (I915_READ(HSW_PWR_WELL_DRIVER) & HSW_PWR_WELL_STATE_ENABLED) == 0)
11159 return; 11601 return;
11160 11602
11161 if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) { 11603 i915_redisable_vga_power_on(dev);
11162 DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
11163 i915_disable_vga(dev);
11164 }
11165} 11604}
11166 11605
11167static void intel_modeset_readout_hw_state(struct drm_device *dev) 11606static void intel_modeset_readout_hw_state(struct drm_device *dev)
@@ -11265,9 +11704,8 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
11265 */ 11704 */
11266 list_for_each_entry(crtc, &dev->mode_config.crtc_list, 11705 list_for_each_entry(crtc, &dev->mode_config.crtc_list,
11267 base.head) { 11706 base.head) {
11268 if (crtc->active && i915_fastboot) { 11707 if (crtc->active && i915.fastboot) {
11269 intel_crtc_mode_from_pipe_config(crtc, &crtc->config); 11708 intel_mode_from_pipe_config(&crtc->base.mode, &crtc->config);
11270
11271 DRM_DEBUG_KMS("[CRTC:%d] found active mode: ", 11709 DRM_DEBUG_KMS("[CRTC:%d] found active mode: ",
11272 crtc->base.base.id); 11710 crtc->base.base.id);
11273 drm_mode_debug_printmodeline(&crtc->base.mode); 11711 drm_mode_debug_printmodeline(&crtc->base.mode);
@@ -11313,7 +11751,7 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
11313 dev_priv->pipe_to_crtc_mapping[pipe]; 11751 dev_priv->pipe_to_crtc_mapping[pipe];
11314 11752
11315 __intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, 11753 __intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y,
11316 crtc->fb); 11754 crtc->primary->fb);
11317 } 11755 }
11318 } else { 11756 } else {
11319 intel_modeset_update_staged_output_state(dev); 11757 intel_modeset_update_staged_output_state(dev);
@@ -11324,14 +11762,44 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
11324 11762
11325void intel_modeset_gem_init(struct drm_device *dev) 11763void intel_modeset_gem_init(struct drm_device *dev)
11326{ 11764{
11765 struct drm_crtc *c;
11766 struct intel_framebuffer *fb;
11767
11768 mutex_lock(&dev->struct_mutex);
11769 intel_init_gt_powersave(dev);
11770 mutex_unlock(&dev->struct_mutex);
11771
11327 intel_modeset_init_hw(dev); 11772 intel_modeset_init_hw(dev);
11328 11773
11329 intel_setup_overlay(dev); 11774 intel_setup_overlay(dev);
11330 11775
11331 mutex_lock(&dev->mode_config.mutex); 11776 /*
11332 drm_mode_config_reset(dev); 11777 * Make sure any fbs we allocated at startup are properly
11333 intel_modeset_setup_hw_state(dev, false); 11778 * pinned & fenced. When we do the allocation it's too early
11334 mutex_unlock(&dev->mode_config.mutex); 11779 * for this.
11780 */
11781 mutex_lock(&dev->struct_mutex);
11782 list_for_each_entry(c, &dev->mode_config.crtc_list, head) {
11783 if (!c->primary->fb)
11784 continue;
11785
11786 fb = to_intel_framebuffer(c->primary->fb);
11787 if (intel_pin_and_fence_fb_obj(dev, fb->obj, NULL)) {
11788 DRM_ERROR("failed to pin boot fb on pipe %d\n",
11789 to_intel_crtc(c)->pipe);
11790 drm_framebuffer_unreference(c->primary->fb);
11791 c->primary->fb = NULL;
11792 }
11793 }
11794 mutex_unlock(&dev->struct_mutex);
11795}
11796
11797void intel_connector_unregister(struct intel_connector *intel_connector)
11798{
11799 struct drm_connector *connector = &intel_connector->base;
11800
11801 intel_panel_destroy_backlight(connector);
11802 drm_sysfs_connector_remove(connector);
11335} 11803}
11336 11804
11337void intel_modeset_cleanup(struct drm_device *dev) 11805void intel_modeset_cleanup(struct drm_device *dev)
@@ -11359,7 +11827,7 @@ void intel_modeset_cleanup(struct drm_device *dev)
11359 11827
11360 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 11828 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
11361 /* Skip inactive CRTCs */ 11829 /* Skip inactive CRTCs */
11362 if (!crtc->fb) 11830 if (!crtc->primary->fb)
11363 continue; 11831 continue;
11364 11832
11365 intel_increase_pllclock(crtc); 11833 intel_increase_pllclock(crtc);
@@ -11378,13 +11846,19 @@ void intel_modeset_cleanup(struct drm_device *dev)
11378 11846
11379 /* destroy the backlight and sysfs files before encoders/connectors */ 11847 /* destroy the backlight and sysfs files before encoders/connectors */
11380 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 11848 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
11381 intel_panel_destroy_backlight(connector); 11849 struct intel_connector *intel_connector;
11382 drm_sysfs_connector_remove(connector); 11850
11851 intel_connector = to_intel_connector(connector);
11852 intel_connector->unregister(intel_connector);
11383 } 11853 }
11384 11854
11385 drm_mode_config_cleanup(dev); 11855 drm_mode_config_cleanup(dev);
11386 11856
11387 intel_cleanup_overlay(dev); 11857 intel_cleanup_overlay(dev);
11858
11859 mutex_lock(&dev->struct_mutex);
11860 intel_cleanup_gt_powersave(dev);
11861 mutex_unlock(&dev->struct_mutex);
11388} 11862}
11389 11863
11390/* 11864/*
@@ -11412,12 +11886,24 @@ int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
11412 unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL; 11886 unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
11413 u16 gmch_ctrl; 11887 u16 gmch_ctrl;
11414 11888
11415 pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl); 11889 if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) {
11890 DRM_ERROR("failed to read control word\n");
11891 return -EIO;
11892 }
11893
11894 if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !state)
11895 return 0;
11896
11416 if (state) 11897 if (state)
11417 gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE; 11898 gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
11418 else 11899 else
11419 gmch_ctrl |= INTEL_GMCH_VGA_DISABLE; 11900 gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
11420 pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl); 11901
11902 if (pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl)) {
11903 DRM_ERROR("failed to write control word\n");
11904 return -EIO;
11905 }
11906
11421 return 0; 11907 return 0;
11422} 11908}
11423 11909
@@ -11467,7 +11953,7 @@ struct intel_display_error_state {
11467struct intel_display_error_state * 11953struct intel_display_error_state *
11468intel_display_capture_error_state(struct drm_device *dev) 11954intel_display_capture_error_state(struct drm_device *dev)
11469{ 11955{
11470 drm_i915_private_t *dev_priv = dev->dev_private; 11956 struct drm_i915_private *dev_priv = dev->dev_private;
11471 struct intel_display_error_state *error; 11957 struct intel_display_error_state *error;
11472 int transcoders[] = { 11958 int transcoders[] = {
11473 TRANSCODER_A, 11959 TRANSCODER_A,
@@ -11489,7 +11975,8 @@ intel_display_capture_error_state(struct drm_device *dev)
11489 11975
11490 for_each_pipe(i) { 11976 for_each_pipe(i) {
11491 error->pipe[i].power_domain_on = 11977 error->pipe[i].power_domain_on =
11492 intel_display_power_enabled_sw(dev, POWER_DOMAIN_PIPE(i)); 11978 intel_display_power_enabled_sw(dev_priv,
11979 POWER_DOMAIN_PIPE(i));
11493 if (!error->pipe[i].power_domain_on) 11980 if (!error->pipe[i].power_domain_on)
11494 continue; 11981 continue;
11495 11982
@@ -11527,7 +12014,7 @@ intel_display_capture_error_state(struct drm_device *dev)
11527 enum transcoder cpu_transcoder = transcoders[i]; 12014 enum transcoder cpu_transcoder = transcoders[i];
11528 12015
11529 error->transcoder[i].power_domain_on = 12016 error->transcoder[i].power_domain_on =
11530 intel_display_power_enabled_sw(dev, 12017 intel_display_power_enabled_sw(dev_priv,
11531 POWER_DOMAIN_TRANSCODER(cpu_transcoder)); 12018 POWER_DOMAIN_TRANSCODER(cpu_transcoder));
11532 if (!error->transcoder[i].power_domain_on) 12019 if (!error->transcoder[i].power_domain_on)
11533 continue; 12020 continue;
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 2688f6d64bb9..a0dad1a2f819 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -91,18 +91,25 @@ static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
91} 91}
92 92
93static void intel_dp_link_down(struct intel_dp *intel_dp); 93static void intel_dp_link_down(struct intel_dp *intel_dp);
94static bool _edp_panel_vdd_on(struct intel_dp *intel_dp);
95static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
94 96
95static int 97static int
96intel_dp_max_link_bw(struct intel_dp *intel_dp) 98intel_dp_max_link_bw(struct intel_dp *intel_dp)
97{ 99{
98 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE]; 100 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
101 struct drm_device *dev = intel_dp->attached_connector->base.dev;
99 102
100 switch (max_link_bw) { 103 switch (max_link_bw) {
101 case DP_LINK_BW_1_62: 104 case DP_LINK_BW_1_62:
102 case DP_LINK_BW_2_7: 105 case DP_LINK_BW_2_7:
103 break; 106 break;
104 case DP_LINK_BW_5_4: /* 1.2 capable displays may advertise higher bw */ 107 case DP_LINK_BW_5_4: /* 1.2 capable displays may advertise higher bw */
105 max_link_bw = DP_LINK_BW_2_7; 108 if ((IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8) &&
109 intel_dp->dpcd[DP_DPCD_REV] >= 0x12)
110 max_link_bw = DP_LINK_BW_5_4;
111 else
112 max_link_bw = DP_LINK_BW_2_7;
106 break; 113 break;
107 default: 114 default:
108 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n", 115 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
@@ -294,7 +301,7 @@ static u32 _pp_stat_reg(struct intel_dp *intel_dp)
294 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp)); 301 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
295} 302}
296 303
297static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp) 304static bool edp_have_panel_power(struct intel_dp *intel_dp)
298{ 305{
299 struct drm_device *dev = intel_dp_to_dev(intel_dp); 306 struct drm_device *dev = intel_dp_to_dev(intel_dp);
300 struct drm_i915_private *dev_priv = dev->dev_private; 307 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -302,12 +309,13 @@ static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp)
302 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0; 309 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
303} 310}
304 311
305static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp) 312static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
306{ 313{
307 struct drm_device *dev = intel_dp_to_dev(intel_dp); 314 struct drm_device *dev = intel_dp_to_dev(intel_dp);
308 struct drm_i915_private *dev_priv = dev->dev_private; 315 struct drm_i915_private *dev_priv = dev->dev_private;
309 316
310 return (I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD) != 0; 317 return !dev_priv->pm.suspended &&
318 (I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD) != 0;
311} 319}
312 320
313static void 321static void
@@ -319,7 +327,7 @@ intel_dp_check_edp(struct intel_dp *intel_dp)
319 if (!is_edp(intel_dp)) 327 if (!is_edp(intel_dp))
320 return; 328 return;
321 329
322 if (!ironlake_edp_have_panel_power(intel_dp) && !ironlake_edp_have_panel_vdd(intel_dp)) { 330 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
323 WARN(1, "eDP powered off while attempting aux channel communication.\n"); 331 WARN(1, "eDP powered off while attempting aux channel communication.\n");
324 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n", 332 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
325 I915_READ(_pp_stat_reg(intel_dp)), 333 I915_READ(_pp_stat_reg(intel_dp)),
@@ -351,31 +359,46 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
351 return status; 359 return status;
352} 360}
353 361
354static uint32_t get_aux_clock_divider(struct intel_dp *intel_dp, 362static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
355 int index)
356{ 363{
357 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 364 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
358 struct drm_device *dev = intel_dig_port->base.base.dev; 365 struct drm_device *dev = intel_dig_port->base.base.dev;
359 struct drm_i915_private *dev_priv = dev->dev_private;
360 366
361 /* The clock divider is based off the hrawclk, 367 /*
362 * and would like to run at 2MHz. So, take the 368 * The clock divider is based off the hrawclk, and would like to run at
363 * hrawclk value and divide by 2 and use that 369 * 2MHz. So, take the hrawclk value and divide by 2 and use that
364 *
365 * Note that PCH attached eDP panels should use a 125MHz input
366 * clock divider.
367 */ 370 */
368 if (IS_VALLEYVIEW(dev)) { 371 return index ? 0 : intel_hrawclk(dev) / 2;
369 return index ? 0 : 100; 372}
370 } else if (intel_dig_port->port == PORT_A) { 373
371 if (index) 374static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
372 return 0; 375{
373 if (HAS_DDI(dev)) 376 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
374 return DIV_ROUND_CLOSEST(intel_ddi_get_cdclk_freq(dev_priv), 2000); 377 struct drm_device *dev = intel_dig_port->base.base.dev;
375 else if (IS_GEN6(dev) || IS_GEN7(dev)) 378
379 if (index)
380 return 0;
381
382 if (intel_dig_port->port == PORT_A) {
383 if (IS_GEN6(dev) || IS_GEN7(dev))
376 return 200; /* SNB & IVB eDP input clock at 400Mhz */ 384 return 200; /* SNB & IVB eDP input clock at 400Mhz */
377 else 385 else
378 return 225; /* eDP input clock at 450Mhz */ 386 return 225; /* eDP input clock at 450Mhz */
387 } else {
388 return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
389 }
390}
391
392static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
393{
394 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
395 struct drm_device *dev = intel_dig_port->base.base.dev;
396 struct drm_i915_private *dev_priv = dev->dev_private;
397
398 if (intel_dig_port->port == PORT_A) {
399 if (index)
400 return 0;
401 return DIV_ROUND_CLOSEST(intel_ddi_get_cdclk_freq(dev_priv), 2000);
379 } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) { 402 } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
380 /* Workaround for non-ULT HSW */ 403 /* Workaround for non-ULT HSW */
381 switch (index) { 404 switch (index) {
@@ -383,13 +406,46 @@ static uint32_t get_aux_clock_divider(struct intel_dp *intel_dp,
383 case 1: return 72; 406 case 1: return 72;
384 default: return 0; 407 default: return 0;
385 } 408 }
386 } else if (HAS_PCH_SPLIT(dev)) { 409 } else {
387 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2); 410 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
388 } else {
389 return index ? 0 :intel_hrawclk(dev) / 2;
390 } 411 }
391} 412}
392 413
414static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
415{
416 return index ? 0 : 100;
417}
418
419static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
420 bool has_aux_irq,
421 int send_bytes,
422 uint32_t aux_clock_divider)
423{
424 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
425 struct drm_device *dev = intel_dig_port->base.base.dev;
426 uint32_t precharge, timeout;
427
428 if (IS_GEN6(dev))
429 precharge = 3;
430 else
431 precharge = 5;
432
433 if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
434 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
435 else
436 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
437
438 return DP_AUX_CH_CTL_SEND_BUSY |
439 DP_AUX_CH_CTL_DONE |
440 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
441 DP_AUX_CH_CTL_TIME_OUT_ERROR |
442 timeout |
443 DP_AUX_CH_CTL_RECEIVE_ERROR |
444 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
445 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
446 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
447}
448
393static int 449static int
394intel_dp_aux_ch(struct intel_dp *intel_dp, 450intel_dp_aux_ch(struct intel_dp *intel_dp,
395 uint8_t *send, int send_bytes, 451 uint8_t *send, int send_bytes,
@@ -403,9 +459,11 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
403 uint32_t aux_clock_divider; 459 uint32_t aux_clock_divider;
404 int i, ret, recv_bytes; 460 int i, ret, recv_bytes;
405 uint32_t status; 461 uint32_t status;
406 int try, precharge, clock = 0; 462 int try, clock = 0;
407 bool has_aux_irq = HAS_AUX_IRQ(dev); 463 bool has_aux_irq = HAS_AUX_IRQ(dev);
408 uint32_t timeout; 464 bool vdd;
465
466 vdd = _edp_panel_vdd_on(intel_dp);
409 467
410 /* dp aux is extremely sensitive to irq latency, hence request the 468 /* dp aux is extremely sensitive to irq latency, hence request the
411 * lowest possible wakeup latency and so prevent the cpu from going into 469 * lowest possible wakeup latency and so prevent the cpu from going into
@@ -415,16 +473,6 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
415 473
416 intel_dp_check_edp(intel_dp); 474 intel_dp_check_edp(intel_dp);
417 475
418 if (IS_GEN6(dev))
419 precharge = 3;
420 else
421 precharge = 5;
422
423 if (IS_BROADWELL(dev) && ch_ctl == DPA_AUX_CH_CTL)
424 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
425 else
426 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
427
428 intel_aux_display_runtime_get(dev_priv); 476 intel_aux_display_runtime_get(dev_priv);
429 477
430 /* Try to wait for any previous AUX channel activity */ 478 /* Try to wait for any previous AUX channel activity */
@@ -448,7 +496,12 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
448 goto out; 496 goto out;
449 } 497 }
450 498
451 while ((aux_clock_divider = get_aux_clock_divider(intel_dp, clock++))) { 499 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
500 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
501 has_aux_irq,
502 send_bytes,
503 aux_clock_divider);
504
452 /* Must try at least 3 times according to DP spec */ 505 /* Must try at least 3 times according to DP spec */
453 for (try = 0; try < 5; try++) { 506 for (try = 0; try < 5; try++) {
454 /* Load the send data into the aux channel data registers */ 507 /* Load the send data into the aux channel data registers */
@@ -457,16 +510,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
457 pack_aux(send + i, send_bytes - i)); 510 pack_aux(send + i, send_bytes - i));
458 511
459 /* Send the command and wait for it to complete */ 512 /* Send the command and wait for it to complete */
460 I915_WRITE(ch_ctl, 513 I915_WRITE(ch_ctl, send_ctl);
461 DP_AUX_CH_CTL_SEND_BUSY |
462 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
463 timeout |
464 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
465 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
466 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) |
467 DP_AUX_CH_CTL_DONE |
468 DP_AUX_CH_CTL_TIME_OUT_ERROR |
469 DP_AUX_CH_CTL_RECEIVE_ERROR);
470 514
471 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq); 515 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
472 516
@@ -525,246 +569,140 @@ out:
525 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE); 569 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
526 intel_aux_display_runtime_put(dev_priv); 570 intel_aux_display_runtime_put(dev_priv);
527 571
572 if (vdd)
573 edp_panel_vdd_off(intel_dp, false);
574
528 return ret; 575 return ret;
529} 576}
530 577
531/* Write data to the aux channel in native mode */ 578#define HEADER_SIZE 4
532static int 579static ssize_t
533intel_dp_aux_native_write(struct intel_dp *intel_dp, 580intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
534 uint16_t address, uint8_t *send, int send_bytes)
535{ 581{
582 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
583 uint8_t txbuf[20], rxbuf[20];
584 size_t txsize, rxsize;
536 int ret; 585 int ret;
537 uint8_t msg[20];
538 int msg_bytes;
539 uint8_t ack;
540 int retry;
541 586
542 if (WARN_ON(send_bytes > 16)) 587 txbuf[0] = msg->request << 4;
543 return -E2BIG; 588 txbuf[1] = msg->address >> 8;
589 txbuf[2] = msg->address & 0xff;
590 txbuf[3] = msg->size - 1;
544 591
545 intel_dp_check_edp(intel_dp); 592 switch (msg->request & ~DP_AUX_I2C_MOT) {
546 msg[0] = DP_AUX_NATIVE_WRITE << 4; 593 case DP_AUX_NATIVE_WRITE:
547 msg[1] = address >> 8; 594 case DP_AUX_I2C_WRITE:
548 msg[2] = address & 0xff; 595 txsize = HEADER_SIZE + msg->size;
549 msg[3] = send_bytes - 1; 596 rxsize = 1;
550 memcpy(&msg[4], send, send_bytes);
551 msg_bytes = send_bytes + 4;
552 for (retry = 0; retry < 7; retry++) {
553 ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, &ack, 1);
554 if (ret < 0)
555 return ret;
556 ack >>= 4;
557 if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_ACK)
558 return send_bytes;
559 else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER)
560 usleep_range(400, 500);
561 else
562 return -EIO;
563 }
564 597
565 DRM_ERROR("too many retries, giving up\n"); 598 if (WARN_ON(txsize > 20))
566 return -EIO; 599 return -E2BIG;
567}
568 600
569/* Write a single byte to the aux channel in native mode */ 601 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
570static int
571intel_dp_aux_native_write_1(struct intel_dp *intel_dp,
572 uint16_t address, uint8_t byte)
573{
574 return intel_dp_aux_native_write(intel_dp, address, &byte, 1);
575}
576 602
577/* read bytes from a native aux channel */ 603 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
578static int 604 if (ret > 0) {
579intel_dp_aux_native_read(struct intel_dp *intel_dp, 605 msg->reply = rxbuf[0] >> 4;
580 uint16_t address, uint8_t *recv, int recv_bytes) 606
581{ 607 /* Return payload size. */
582 uint8_t msg[4]; 608 ret = msg->size;
583 int msg_bytes; 609 }
584 uint8_t reply[20]; 610 break;
585 int reply_bytes;
586 uint8_t ack;
587 int ret;
588 int retry;
589 611
590 if (WARN_ON(recv_bytes > 19)) 612 case DP_AUX_NATIVE_READ:
591 return -E2BIG; 613 case DP_AUX_I2C_READ:
614 txsize = HEADER_SIZE;
615 rxsize = msg->size + 1;
592 616
593 intel_dp_check_edp(intel_dp); 617 if (WARN_ON(rxsize > 20))
594 msg[0] = DP_AUX_NATIVE_READ << 4; 618 return -E2BIG;
595 msg[1] = address >> 8; 619
596 msg[2] = address & 0xff; 620 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
597 msg[3] = recv_bytes - 1; 621 if (ret > 0) {
598 622 msg->reply = rxbuf[0] >> 4;
599 msg_bytes = 4; 623 /*
600 reply_bytes = recv_bytes + 1; 624 * Assume happy day, and copy the data. The caller is
601 625 * expected to check msg->reply before touching it.
602 for (retry = 0; retry < 7; retry++) { 626 *
603 ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, 627 * Return payload size.
604 reply, reply_bytes); 628 */
605 if (ret == 0) 629 ret--;
606 return -EPROTO; 630 memcpy(msg->buffer, rxbuf + 1, ret);
607 if (ret < 0)
608 return ret;
609 ack = reply[0] >> 4;
610 if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_ACK) {
611 memcpy(recv, reply + 1, ret - 1);
612 return ret - 1;
613 } 631 }
614 else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER) 632 break;
615 usleep_range(400, 500); 633
616 else 634 default:
617 return -EIO; 635 ret = -EINVAL;
636 break;
618 } 637 }
619 638
620 DRM_ERROR("too many retries, giving up\n"); 639 return ret;
621 return -EIO;
622} 640}
623 641
624static int 642static void
625intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, 643intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
626 uint8_t write_byte, uint8_t *read_byte) 644{
627{ 645 struct drm_device *dev = intel_dp_to_dev(intel_dp);
628 struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; 646 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
629 struct intel_dp *intel_dp = container_of(adapter, 647 enum port port = intel_dig_port->port;
630 struct intel_dp, 648 const char *name = NULL;
631 adapter);
632 uint16_t address = algo_data->address;
633 uint8_t msg[5];
634 uint8_t reply[2];
635 unsigned retry;
636 int msg_bytes;
637 int reply_bytes;
638 int ret; 649 int ret;
639 650
640 ironlake_edp_panel_vdd_on(intel_dp); 651 switch (port) {
641 intel_dp_check_edp(intel_dp); 652 case PORT_A:
642 /* Set up the command byte */ 653 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
643 if (mode & MODE_I2C_READ) 654 name = "DPDDC-A";
644 msg[0] = DP_AUX_I2C_READ << 4; 655 break;
645 else 656 case PORT_B:
646 msg[0] = DP_AUX_I2C_WRITE << 4; 657 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
647 658 name = "DPDDC-B";
648 if (!(mode & MODE_I2C_STOP)) 659 break;
649 msg[0] |= DP_AUX_I2C_MOT << 4; 660 case PORT_C:
650 661 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
651 msg[1] = address >> 8; 662 name = "DPDDC-C";
652 msg[2] = address;
653
654 switch (mode) {
655 case MODE_I2C_WRITE:
656 msg[3] = 0;
657 msg[4] = write_byte;
658 msg_bytes = 5;
659 reply_bytes = 1;
660 break; 663 break;
661 case MODE_I2C_READ: 664 case PORT_D:
662 msg[3] = 0; 665 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
663 msg_bytes = 4; 666 name = "DPDDC-D";
664 reply_bytes = 2;
665 break; 667 break;
666 default: 668 default:
667 msg_bytes = 3; 669 BUG();
668 reply_bytes = 1;
669 break;
670 } 670 }
671 671
672 /* 672 if (!HAS_DDI(dev))
673 * DP1.2 sections 2.7.7.1.5.6.1 and 2.7.7.1.6.6.1: A DP Source device is 673 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
674 * required to retry at least seven times upon receiving AUX_DEFER
675 * before giving up the AUX transaction.
676 */
677 for (retry = 0; retry < 7; retry++) {
678 ret = intel_dp_aux_ch(intel_dp,
679 msg, msg_bytes,
680 reply, reply_bytes);
681 if (ret < 0) {
682 DRM_DEBUG_KMS("aux_ch failed %d\n", ret);
683 goto out;
684 }
685 674
686 switch ((reply[0] >> 4) & DP_AUX_NATIVE_REPLY_MASK) { 675 intel_dp->aux.name = name;
687 case DP_AUX_NATIVE_REPLY_ACK: 676 intel_dp->aux.dev = dev->dev;
688 /* I2C-over-AUX Reply field is only valid 677 intel_dp->aux.transfer = intel_dp_aux_transfer;
689 * when paired with AUX ACK.
690 */
691 break;
692 case DP_AUX_NATIVE_REPLY_NACK:
693 DRM_DEBUG_KMS("aux_ch native nack\n");
694 ret = -EREMOTEIO;
695 goto out;
696 case DP_AUX_NATIVE_REPLY_DEFER:
697 /*
698 * For now, just give more slack to branch devices. We
699 * could check the DPCD for I2C bit rate capabilities,
700 * and if available, adjust the interval. We could also
701 * be more careful with DP-to-Legacy adapters where a
702 * long legacy cable may force very low I2C bit rates.
703 */
704 if (intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
705 DP_DWN_STRM_PORT_PRESENT)
706 usleep_range(500, 600);
707 else
708 usleep_range(300, 400);
709 continue;
710 default:
711 DRM_ERROR("aux_ch invalid native reply 0x%02x\n",
712 reply[0]);
713 ret = -EREMOTEIO;
714 goto out;
715 }
716 678
717 switch ((reply[0] >> 4) & DP_AUX_I2C_REPLY_MASK) { 679 DRM_DEBUG_KMS("registering %s bus for %s\n", name,
718 case DP_AUX_I2C_REPLY_ACK: 680 connector->base.kdev->kobj.name);
719 if (mode == MODE_I2C_READ) {
720 *read_byte = reply[1];
721 }
722 ret = reply_bytes - 1;
723 goto out;
724 case DP_AUX_I2C_REPLY_NACK:
725 DRM_DEBUG_KMS("aux_i2c nack\n");
726 ret = -EREMOTEIO;
727 goto out;
728 case DP_AUX_I2C_REPLY_DEFER:
729 DRM_DEBUG_KMS("aux_i2c defer\n");
730 udelay(100);
731 break;
732 default:
733 DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]);
734 ret = -EREMOTEIO;
735 goto out;
736 }
737 }
738 681
739 DRM_ERROR("too many retries, giving up\n"); 682 ret = drm_dp_aux_register_i2c_bus(&intel_dp->aux);
740 ret = -EREMOTEIO; 683 if (ret < 0) {
684 DRM_ERROR("drm_dp_aux_register_i2c_bus() for %s failed (%d)\n",
685 name, ret);
686 return;
687 }
741 688
742out: 689 ret = sysfs_create_link(&connector->base.kdev->kobj,
743 ironlake_edp_panel_vdd_off(intel_dp, false); 690 &intel_dp->aux.ddc.dev.kobj,
744 return ret; 691 intel_dp->aux.ddc.dev.kobj.name);
692 if (ret < 0) {
693 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name, ret);
694 drm_dp_aux_unregister_i2c_bus(&intel_dp->aux);
695 }
745} 696}
746 697
747static int 698static void
748intel_dp_i2c_init(struct intel_dp *intel_dp, 699intel_dp_connector_unregister(struct intel_connector *intel_connector)
749 struct intel_connector *intel_connector, const char *name)
750{ 700{
751 int ret; 701 struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
752
753 DRM_DEBUG_KMS("i2c_init %s\n", name);
754 intel_dp->algo.running = false;
755 intel_dp->algo.address = 0;
756 intel_dp->algo.aux_ch = intel_dp_i2c_aux_ch;
757
758 memset(&intel_dp->adapter, '\0', sizeof(intel_dp->adapter));
759 intel_dp->adapter.owner = THIS_MODULE;
760 intel_dp->adapter.class = I2C_CLASS_DDC;
761 strncpy(intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1);
762 intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0';
763 intel_dp->adapter.algo_data = &intel_dp->algo;
764 intel_dp->adapter.dev.parent = intel_connector->base.kdev;
765 702
766 ret = i2c_dp_aux_add_bus(&intel_dp->adapter); 703 sysfs_remove_link(&intel_connector->base.kdev->kobj,
767 return ret; 704 intel_dp->aux.ddc.dev.kobj.name);
705 intel_connector_unregister(intel_connector);
768} 706}
769 707
770static void 708static void
@@ -812,9 +750,10 @@ intel_dp_compute_config(struct intel_encoder *encoder,
812 struct intel_connector *intel_connector = intel_dp->attached_connector; 750 struct intel_connector *intel_connector = intel_dp->attached_connector;
813 int lane_count, clock; 751 int lane_count, clock;
814 int max_lane_count = drm_dp_max_lane_count(intel_dp->dpcd); 752 int max_lane_count = drm_dp_max_lane_count(intel_dp->dpcd);
815 int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0; 753 /* Conveniently, the link BW constants become indices with a shift...*/
754 int max_clock = intel_dp_max_link_bw(intel_dp) >> 3;
816 int bpp, mode_rate; 755 int bpp, mode_rate;
817 static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 }; 756 static int bws[] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7, DP_LINK_BW_5_4 };
818 int link_avail, link_clock; 757 int link_avail, link_clock;
819 758
820 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A) 759 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
@@ -855,8 +794,8 @@ intel_dp_compute_config(struct intel_encoder *encoder,
855 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock, 794 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
856 bpp); 795 bpp);
857 796
858 for (clock = 0; clock <= max_clock; clock++) { 797 for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
859 for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { 798 for (clock = 0; clock <= max_clock; clock++) {
860 link_clock = drm_dp_bw_code_to_link_rate(bws[clock]); 799 link_clock = drm_dp_bw_code_to_link_rate(bws[clock]);
861 link_avail = intel_dp_max_data_rate(link_clock, 800 link_avail = intel_dp_max_data_rate(link_clock,
862 lane_count); 801 lane_count);
@@ -1015,16 +954,16 @@ static void intel_dp_mode_set(struct intel_encoder *encoder)
1015 ironlake_set_pll_cpu_edp(intel_dp); 954 ironlake_set_pll_cpu_edp(intel_dp);
1016} 955}
1017 956
1018#define IDLE_ON_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK) 957#define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
1019#define IDLE_ON_VALUE (PP_ON | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE) 958#define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
1020 959
1021#define IDLE_OFF_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK) 960#define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
1022#define IDLE_OFF_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE) 961#define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
1023 962
1024#define IDLE_CYCLE_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK) 963#define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1025#define IDLE_CYCLE_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE) 964#define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
1026 965
1027static void ironlake_wait_panel_status(struct intel_dp *intel_dp, 966static void wait_panel_status(struct intel_dp *intel_dp,
1028 u32 mask, 967 u32 mask,
1029 u32 value) 968 u32 value)
1030{ 969{
@@ -1049,24 +988,41 @@ static void ironlake_wait_panel_status(struct intel_dp *intel_dp,
1049 DRM_DEBUG_KMS("Wait complete\n"); 988 DRM_DEBUG_KMS("Wait complete\n");
1050} 989}
1051 990
1052static void ironlake_wait_panel_on(struct intel_dp *intel_dp) 991static void wait_panel_on(struct intel_dp *intel_dp)
1053{ 992{
1054 DRM_DEBUG_KMS("Wait for panel power on\n"); 993 DRM_DEBUG_KMS("Wait for panel power on\n");
1055 ironlake_wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE); 994 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
1056} 995}
1057 996
1058static void ironlake_wait_panel_off(struct intel_dp *intel_dp) 997static void wait_panel_off(struct intel_dp *intel_dp)
1059{ 998{
1060 DRM_DEBUG_KMS("Wait for panel power off time\n"); 999 DRM_DEBUG_KMS("Wait for panel power off time\n");
1061 ironlake_wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE); 1000 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
1062} 1001}
1063 1002
1064static void ironlake_wait_panel_power_cycle(struct intel_dp *intel_dp) 1003static void wait_panel_power_cycle(struct intel_dp *intel_dp)
1065{ 1004{
1066 DRM_DEBUG_KMS("Wait for panel power cycle\n"); 1005 DRM_DEBUG_KMS("Wait for panel power cycle\n");
1067 ironlake_wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE); 1006
1007 /* When we disable the VDD override bit last we have to do the manual
1008 * wait. */
1009 wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1010 intel_dp->panel_power_cycle_delay);
1011
1012 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
1013}
1014
1015static void wait_backlight_on(struct intel_dp *intel_dp)
1016{
1017 wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1018 intel_dp->backlight_on_delay);
1068} 1019}
1069 1020
1021static void edp_wait_backlight_off(struct intel_dp *intel_dp)
1022{
1023 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1024 intel_dp->backlight_off_delay);
1025}
1070 1026
1071/* Read the current pp_control value, unlocking the register if it 1027/* Read the current pp_control value, unlocking the register if it
1072 * is locked 1028 * is locked
@@ -1084,30 +1040,28 @@ static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
1084 return control; 1040 return control;
1085} 1041}
1086 1042
1087void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp) 1043static bool _edp_panel_vdd_on(struct intel_dp *intel_dp)
1088{ 1044{
1089 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1045 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1090 struct drm_i915_private *dev_priv = dev->dev_private; 1046 struct drm_i915_private *dev_priv = dev->dev_private;
1091 u32 pp; 1047 u32 pp;
1092 u32 pp_stat_reg, pp_ctrl_reg; 1048 u32 pp_stat_reg, pp_ctrl_reg;
1049 bool need_to_disable = !intel_dp->want_panel_vdd;
1093 1050
1094 if (!is_edp(intel_dp)) 1051 if (!is_edp(intel_dp))
1095 return; 1052 return false;
1096
1097 WARN(intel_dp->want_panel_vdd,
1098 "eDP VDD already requested on\n");
1099 1053
1100 intel_dp->want_panel_vdd = true; 1054 intel_dp->want_panel_vdd = true;
1101 1055
1102 if (ironlake_edp_have_panel_vdd(intel_dp)) 1056 if (edp_have_panel_vdd(intel_dp))
1103 return; 1057 return need_to_disable;
1104 1058
1105 intel_runtime_pm_get(dev_priv); 1059 intel_runtime_pm_get(dev_priv);
1106 1060
1107 DRM_DEBUG_KMS("Turning eDP VDD on\n"); 1061 DRM_DEBUG_KMS("Turning eDP VDD on\n");
1108 1062
1109 if (!ironlake_edp_have_panel_power(intel_dp)) 1063 if (!edp_have_panel_power(intel_dp))
1110 ironlake_wait_panel_power_cycle(intel_dp); 1064 wait_panel_power_cycle(intel_dp);
1111 1065
1112 pp = ironlake_get_pp_control(intel_dp); 1066 pp = ironlake_get_pp_control(intel_dp);
1113 pp |= EDP_FORCE_VDD; 1067 pp |= EDP_FORCE_VDD;
@@ -1122,13 +1076,24 @@ void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
1122 /* 1076 /*
1123 * If the panel wasn't on, delay before accessing aux channel 1077 * If the panel wasn't on, delay before accessing aux channel
1124 */ 1078 */
1125 if (!ironlake_edp_have_panel_power(intel_dp)) { 1079 if (!edp_have_panel_power(intel_dp)) {
1126 DRM_DEBUG_KMS("eDP was not running\n"); 1080 DRM_DEBUG_KMS("eDP was not running\n");
1127 msleep(intel_dp->panel_power_up_delay); 1081 msleep(intel_dp->panel_power_up_delay);
1128 } 1082 }
1083
1084 return need_to_disable;
1085}
1086
1087void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
1088{
1089 if (is_edp(intel_dp)) {
1090 bool vdd = _edp_panel_vdd_on(intel_dp);
1091
1092 WARN(!vdd, "eDP VDD already requested on\n");
1093 }
1129} 1094}
1130 1095
1131static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp) 1096static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
1132{ 1097{
1133 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1098 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1134 struct drm_i915_private *dev_priv = dev->dev_private; 1099 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1137,7 +1102,7 @@ static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp)
1137 1102
1138 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); 1103 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
1139 1104
1140 if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) { 1105 if (!intel_dp->want_panel_vdd && edp_have_panel_vdd(intel_dp)) {
1141 DRM_DEBUG_KMS("Turning eDP VDD off\n"); 1106 DRM_DEBUG_KMS("Turning eDP VDD off\n");
1142 1107
1143 pp = ironlake_get_pp_control(intel_dp); 1108 pp = ironlake_get_pp_control(intel_dp);
@@ -1154,24 +1119,24 @@ static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp)
1154 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg)); 1119 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1155 1120
1156 if ((pp & POWER_TARGET_ON) == 0) 1121 if ((pp & POWER_TARGET_ON) == 0)
1157 msleep(intel_dp->panel_power_cycle_delay); 1122 intel_dp->last_power_cycle = jiffies;
1158 1123
1159 intel_runtime_pm_put(dev_priv); 1124 intel_runtime_pm_put(dev_priv);
1160 } 1125 }
1161} 1126}
1162 1127
1163static void ironlake_panel_vdd_work(struct work_struct *__work) 1128static void edp_panel_vdd_work(struct work_struct *__work)
1164{ 1129{
1165 struct intel_dp *intel_dp = container_of(to_delayed_work(__work), 1130 struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1166 struct intel_dp, panel_vdd_work); 1131 struct intel_dp, panel_vdd_work);
1167 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1132 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1168 1133
1169 mutex_lock(&dev->mode_config.mutex); 1134 mutex_lock(&dev->mode_config.mutex);
1170 ironlake_panel_vdd_off_sync(intel_dp); 1135 edp_panel_vdd_off_sync(intel_dp);
1171 mutex_unlock(&dev->mode_config.mutex); 1136 mutex_unlock(&dev->mode_config.mutex);
1172} 1137}
1173 1138
1174void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) 1139static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1175{ 1140{
1176 if (!is_edp(intel_dp)) 1141 if (!is_edp(intel_dp))
1177 return; 1142 return;
@@ -1181,7 +1146,7 @@ void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1181 intel_dp->want_panel_vdd = false; 1146 intel_dp->want_panel_vdd = false;
1182 1147
1183 if (sync) { 1148 if (sync) {
1184 ironlake_panel_vdd_off_sync(intel_dp); 1149 edp_panel_vdd_off_sync(intel_dp);
1185 } else { 1150 } else {
1186 /* 1151 /*
1187 * Queue the timer to fire a long 1152 * Queue the timer to fire a long
@@ -1193,7 +1158,7 @@ void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1193 } 1158 }
1194} 1159}
1195 1160
1196void ironlake_edp_panel_on(struct intel_dp *intel_dp) 1161void intel_edp_panel_on(struct intel_dp *intel_dp)
1197{ 1162{
1198 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1163 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1199 struct drm_i915_private *dev_priv = dev->dev_private; 1164 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1205,12 +1170,12 @@ void ironlake_edp_panel_on(struct intel_dp *intel_dp)
1205 1170
1206 DRM_DEBUG_KMS("Turn eDP power on\n"); 1171 DRM_DEBUG_KMS("Turn eDP power on\n");
1207 1172
1208 if (ironlake_edp_have_panel_power(intel_dp)) { 1173 if (edp_have_panel_power(intel_dp)) {
1209 DRM_DEBUG_KMS("eDP power already on\n"); 1174 DRM_DEBUG_KMS("eDP power already on\n");
1210 return; 1175 return;
1211 } 1176 }
1212 1177
1213 ironlake_wait_panel_power_cycle(intel_dp); 1178 wait_panel_power_cycle(intel_dp);
1214 1179
1215 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 1180 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1216 pp = ironlake_get_pp_control(intel_dp); 1181 pp = ironlake_get_pp_control(intel_dp);
@@ -1228,7 +1193,8 @@ void ironlake_edp_panel_on(struct intel_dp *intel_dp)
1228 I915_WRITE(pp_ctrl_reg, pp); 1193 I915_WRITE(pp_ctrl_reg, pp);
1229 POSTING_READ(pp_ctrl_reg); 1194 POSTING_READ(pp_ctrl_reg);
1230 1195
1231 ironlake_wait_panel_on(intel_dp); 1196 wait_panel_on(intel_dp);
1197 intel_dp->last_power_on = jiffies;
1232 1198
1233 if (IS_GEN5(dev)) { 1199 if (IS_GEN5(dev)) {
1234 pp |= PANEL_POWER_RESET; /* restore panel reset bit */ 1200 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
@@ -1237,7 +1203,7 @@ void ironlake_edp_panel_on(struct intel_dp *intel_dp)
1237 } 1203 }
1238} 1204}
1239 1205
1240void ironlake_edp_panel_off(struct intel_dp *intel_dp) 1206void intel_edp_panel_off(struct intel_dp *intel_dp)
1241{ 1207{
1242 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1208 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1243 struct drm_i915_private *dev_priv = dev->dev_private; 1209 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1249,27 +1215,31 @@ void ironlake_edp_panel_off(struct intel_dp *intel_dp)
1249 1215
1250 DRM_DEBUG_KMS("Turn eDP power off\n"); 1216 DRM_DEBUG_KMS("Turn eDP power off\n");
1251 1217
1218 edp_wait_backlight_off(intel_dp);
1219
1252 WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n"); 1220 WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n");
1253 1221
1254 pp = ironlake_get_pp_control(intel_dp); 1222 pp = ironlake_get_pp_control(intel_dp);
1255 /* We need to switch off panel power _and_ force vdd, for otherwise some 1223 /* We need to switch off panel power _and_ force vdd, for otherwise some
1256 * panels get very unhappy and cease to work. */ 1224 * panels get very unhappy and cease to work. */
1257 pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE); 1225 pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
1226 EDP_BLC_ENABLE);
1258 1227
1259 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 1228 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1260 1229
1230 intel_dp->want_panel_vdd = false;
1231
1261 I915_WRITE(pp_ctrl_reg, pp); 1232 I915_WRITE(pp_ctrl_reg, pp);
1262 POSTING_READ(pp_ctrl_reg); 1233 POSTING_READ(pp_ctrl_reg);
1263 1234
1264 intel_dp->want_panel_vdd = false; 1235 intel_dp->last_power_cycle = jiffies;
1265 1236 wait_panel_off(intel_dp);
1266 ironlake_wait_panel_off(intel_dp);
1267 1237
1268 /* We got a reference when we enabled the VDD. */ 1238 /* We got a reference when we enabled the VDD. */
1269 intel_runtime_pm_put(dev_priv); 1239 intel_runtime_pm_put(dev_priv);
1270} 1240}
1271 1241
1272void ironlake_edp_backlight_on(struct intel_dp *intel_dp) 1242void intel_edp_backlight_on(struct intel_dp *intel_dp)
1273{ 1243{
1274 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 1244 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1275 struct drm_device *dev = intel_dig_port->base.base.dev; 1245 struct drm_device *dev = intel_dig_port->base.base.dev;
@@ -1287,7 +1257,7 @@ void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
1287 * link. So delay a bit to make sure the image is solid before 1257 * link. So delay a bit to make sure the image is solid before
1288 * allowing it to appear. 1258 * allowing it to appear.
1289 */ 1259 */
1290 msleep(intel_dp->backlight_on_delay); 1260 wait_backlight_on(intel_dp);
1291 pp = ironlake_get_pp_control(intel_dp); 1261 pp = ironlake_get_pp_control(intel_dp);
1292 pp |= EDP_BLC_ENABLE; 1262 pp |= EDP_BLC_ENABLE;
1293 1263
@@ -1299,7 +1269,7 @@ void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
1299 intel_panel_enable_backlight(intel_dp->attached_connector); 1269 intel_panel_enable_backlight(intel_dp->attached_connector);
1300} 1270}
1301 1271
1302void ironlake_edp_backlight_off(struct intel_dp *intel_dp) 1272void intel_edp_backlight_off(struct intel_dp *intel_dp)
1303{ 1273{
1304 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1274 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1305 struct drm_i915_private *dev_priv = dev->dev_private; 1275 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1319,7 +1289,7 @@ void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
1319 1289
1320 I915_WRITE(pp_ctrl_reg, pp); 1290 I915_WRITE(pp_ctrl_reg, pp);
1321 POSTING_READ(pp_ctrl_reg); 1291 POSTING_READ(pp_ctrl_reg);
1322 msleep(intel_dp->backlight_off_delay); 1292 intel_dp->last_backlight_off = jiffies;
1323} 1293}
1324 1294
1325static void ironlake_edp_pll_on(struct intel_dp *intel_dp) 1295static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
@@ -1383,8 +1353,8 @@ void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
1383 return; 1353 return;
1384 1354
1385 if (mode != DRM_MODE_DPMS_ON) { 1355 if (mode != DRM_MODE_DPMS_ON) {
1386 ret = intel_dp_aux_native_write_1(intel_dp, DP_SET_POWER, 1356 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
1387 DP_SET_POWER_D3); 1357 DP_SET_POWER_D3);
1388 if (ret != 1) 1358 if (ret != 1)
1389 DRM_DEBUG_DRIVER("failed to write sink power state\n"); 1359 DRM_DEBUG_DRIVER("failed to write sink power state\n");
1390 } else { 1360 } else {
@@ -1393,9 +1363,8 @@ void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
1393 * time to wake up. 1363 * time to wake up.
1394 */ 1364 */
1395 for (i = 0; i < 3; i++) { 1365 for (i = 0; i < 3; i++) {
1396 ret = intel_dp_aux_native_write_1(intel_dp, 1366 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
1397 DP_SET_POWER, 1367 DP_SET_POWER_D0);
1398 DP_SET_POWER_D0);
1399 if (ret == 1) 1368 if (ret == 1)
1400 break; 1369 break;
1401 msleep(1); 1370 msleep(1);
@@ -1410,7 +1379,14 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
1410 enum port port = dp_to_dig_port(intel_dp)->port; 1379 enum port port = dp_to_dig_port(intel_dp)->port;
1411 struct drm_device *dev = encoder->base.dev; 1380 struct drm_device *dev = encoder->base.dev;
1412 struct drm_i915_private *dev_priv = dev->dev_private; 1381 struct drm_i915_private *dev_priv = dev->dev_private;
1413 u32 tmp = I915_READ(intel_dp->output_reg); 1382 enum intel_display_power_domain power_domain;
1383 u32 tmp;
1384
1385 power_domain = intel_display_port_power_domain(encoder);
1386 if (!intel_display_power_enabled(dev_priv, power_domain))
1387 return false;
1388
1389 tmp = I915_READ(intel_dp->output_reg);
1414 1390
1415 if (!(tmp & DP_PORT_EN)) 1391 if (!(tmp & DP_PORT_EN))
1416 return false; 1392 return false;
@@ -1604,19 +1580,19 @@ static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp)
1604{ 1580{
1605 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1581 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1606 struct drm_i915_private *dev_priv = dev->dev_private; 1582 struct drm_i915_private *dev_priv = dev->dev_private;
1607 uint32_t aux_clock_divider = get_aux_clock_divider(intel_dp, 0); 1583 uint32_t aux_clock_divider;
1608 int precharge = 0x3; 1584 int precharge = 0x3;
1609 int msg_size = 5; /* Header(4) + Message(1) */ 1585 int msg_size = 5; /* Header(4) + Message(1) */
1610 1586
1587 aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
1588
1611 /* Enable PSR in sink */ 1589 /* Enable PSR in sink */
1612 if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT) 1590 if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT)
1613 intel_dp_aux_native_write_1(intel_dp, DP_PSR_EN_CFG, 1591 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
1614 DP_PSR_ENABLE & 1592 DP_PSR_ENABLE & ~DP_PSR_MAIN_LINK_ACTIVE);
1615 ~DP_PSR_MAIN_LINK_ACTIVE);
1616 else 1593 else
1617 intel_dp_aux_native_write_1(intel_dp, DP_PSR_EN_CFG, 1594 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
1618 DP_PSR_ENABLE | 1595 DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
1619 DP_PSR_MAIN_LINK_ACTIVE);
1620 1596
1621 /* Setup AUX registers */ 1597 /* Setup AUX registers */
1622 I915_WRITE(EDP_PSR_AUX_DATA1(dev), EDP_PSR_DPCD_COMMAND); 1598 I915_WRITE(EDP_PSR_AUX_DATA1(dev), EDP_PSR_DPCD_COMMAND);
@@ -1659,7 +1635,7 @@ static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
1659 struct drm_i915_private *dev_priv = dev->dev_private; 1635 struct drm_i915_private *dev_priv = dev->dev_private;
1660 struct drm_crtc *crtc = dig_port->base.base.crtc; 1636 struct drm_crtc *crtc = dig_port->base.base.crtc;
1661 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1637 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1662 struct drm_i915_gem_object *obj = to_intel_framebuffer(crtc->fb)->obj; 1638 struct drm_i915_gem_object *obj = to_intel_framebuffer(crtc->primary->fb)->obj;
1663 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base; 1639 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
1664 1640
1665 dev_priv->psr.source_ok = false; 1641 dev_priv->psr.source_ok = false;
@@ -1675,7 +1651,7 @@ static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
1675 return false; 1651 return false;
1676 } 1652 }
1677 1653
1678 if (!i915_enable_psr) { 1654 if (!i915.enable_psr) {
1679 DRM_DEBUG_KMS("PSR disable by flag\n"); 1655 DRM_DEBUG_KMS("PSR disable by flag\n");
1680 return false; 1656 return false;
1681 } 1657 }
@@ -1692,7 +1668,7 @@ static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
1692 return false; 1668 return false;
1693 } 1669 }
1694 1670
1695 obj = to_intel_framebuffer(crtc->fb)->obj; 1671 obj = to_intel_framebuffer(crtc->primary->fb)->obj;
1696 if (obj->tiling_mode != I915_TILING_X || 1672 if (obj->tiling_mode != I915_TILING_X ||
1697 obj->fence_reg == I915_FENCE_REG_NONE) { 1673 obj->fence_reg == I915_FENCE_REG_NONE) {
1698 DRM_DEBUG_KMS("PSR condition failed: fb not tiled or fenced\n"); 1674 DRM_DEBUG_KMS("PSR condition failed: fb not tiled or fenced\n");
@@ -1791,10 +1767,10 @@ static void intel_disable_dp(struct intel_encoder *encoder)
1791 1767
1792 /* Make sure the panel is off before trying to change the mode. But also 1768 /* Make sure the panel is off before trying to change the mode. But also
1793 * ensure that we have vdd while we switch off the panel. */ 1769 * ensure that we have vdd while we switch off the panel. */
1794 ironlake_edp_panel_vdd_on(intel_dp); 1770 intel_edp_panel_vdd_on(intel_dp);
1795 ironlake_edp_backlight_off(intel_dp); 1771 intel_edp_backlight_off(intel_dp);
1796 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF); 1772 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
1797 ironlake_edp_panel_off(intel_dp); 1773 intel_edp_panel_off(intel_dp);
1798 1774
1799 /* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */ 1775 /* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */
1800 if (!(port == PORT_A || IS_VALLEYVIEW(dev))) 1776 if (!(port == PORT_A || IS_VALLEYVIEW(dev)))
@@ -1824,11 +1800,11 @@ static void intel_enable_dp(struct intel_encoder *encoder)
1824 if (WARN_ON(dp_reg & DP_PORT_EN)) 1800 if (WARN_ON(dp_reg & DP_PORT_EN))
1825 return; 1801 return;
1826 1802
1827 ironlake_edp_panel_vdd_on(intel_dp); 1803 intel_edp_panel_vdd_on(intel_dp);
1828 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); 1804 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
1829 intel_dp_start_link_train(intel_dp); 1805 intel_dp_start_link_train(intel_dp);
1830 ironlake_edp_panel_on(intel_dp); 1806 intel_edp_panel_on(intel_dp);
1831 ironlake_edp_panel_vdd_off(intel_dp, true); 1807 edp_panel_vdd_off(intel_dp, true);
1832 intel_dp_complete_link_train(intel_dp); 1808 intel_dp_complete_link_train(intel_dp);
1833 intel_dp_stop_link_train(intel_dp); 1809 intel_dp_stop_link_train(intel_dp);
1834} 1810}
@@ -1838,14 +1814,14 @@ static void g4x_enable_dp(struct intel_encoder *encoder)
1838 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1814 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1839 1815
1840 intel_enable_dp(encoder); 1816 intel_enable_dp(encoder);
1841 ironlake_edp_backlight_on(intel_dp); 1817 intel_edp_backlight_on(intel_dp);
1842} 1818}
1843 1819
1844static void vlv_enable_dp(struct intel_encoder *encoder) 1820static void vlv_enable_dp(struct intel_encoder *encoder)
1845{ 1821{
1846 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1822 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1847 1823
1848 ironlake_edp_backlight_on(intel_dp); 1824 intel_edp_backlight_on(intel_dp);
1849} 1825}
1850 1826
1851static void g4x_pre_enable_dp(struct intel_encoder *encoder) 1827static void g4x_pre_enable_dp(struct intel_encoder *encoder)
@@ -1927,26 +1903,25 @@ static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
1927/* 1903/*
1928 * Native read with retry for link status and receiver capability reads for 1904 * Native read with retry for link status and receiver capability reads for
1929 * cases where the sink may still be asleep. 1905 * cases where the sink may still be asleep.
1906 *
1907 * Sinks are *supposed* to come up within 1ms from an off state, but we're also
1908 * supposed to retry 3 times per the spec.
1930 */ 1909 */
1931static bool 1910static ssize_t
1932intel_dp_aux_native_read_retry(struct intel_dp *intel_dp, uint16_t address, 1911intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
1933 uint8_t *recv, int recv_bytes) 1912 void *buffer, size_t size)
1934{ 1913{
1935 int ret, i; 1914 ssize_t ret;
1915 int i;
1936 1916
1937 /*
1938 * Sinks are *supposed* to come up within 1ms from an off state,
1939 * but we're also supposed to retry 3 times per the spec.
1940 */
1941 for (i = 0; i < 3; i++) { 1917 for (i = 0; i < 3; i++) {
1942 ret = intel_dp_aux_native_read(intel_dp, address, recv, 1918 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
1943 recv_bytes); 1919 if (ret == size)
1944 if (ret == recv_bytes) 1920 return ret;
1945 return true;
1946 msleep(1); 1921 msleep(1);
1947 } 1922 }
1948 1923
1949 return false; 1924 return ret;
1950} 1925}
1951 1926
1952/* 1927/*
@@ -1956,10 +1931,10 @@ intel_dp_aux_native_read_retry(struct intel_dp *intel_dp, uint16_t address,
1956static bool 1931static bool
1957intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]) 1932intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
1958{ 1933{
1959 return intel_dp_aux_native_read_retry(intel_dp, 1934 return intel_dp_dpcd_read_wake(&intel_dp->aux,
1960 DP_LANE0_1_STATUS, 1935 DP_LANE0_1_STATUS,
1961 link_status, 1936 link_status,
1962 DP_LINK_STATUS_SIZE); 1937 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
1963} 1938}
1964 1939
1965/* 1940/*
@@ -2473,8 +2448,8 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
2473 len = intel_dp->lane_count + 1; 2448 len = intel_dp->lane_count + 1;
2474 } 2449 }
2475 2450
2476 ret = intel_dp_aux_native_write(intel_dp, DP_TRAINING_PATTERN_SET, 2451 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
2477 buf, len); 2452 buf, len);
2478 2453
2479 return ret == len; 2454 return ret == len;
2480} 2455}
@@ -2503,9 +2478,8 @@ intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
2503 I915_WRITE(intel_dp->output_reg, *DP); 2478 I915_WRITE(intel_dp->output_reg, *DP);
2504 POSTING_READ(intel_dp->output_reg); 2479 POSTING_READ(intel_dp->output_reg);
2505 2480
2506 ret = intel_dp_aux_native_write(intel_dp, DP_TRAINING_LANE0_SET, 2481 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
2507 intel_dp->train_set, 2482 intel_dp->train_set, intel_dp->lane_count);
2508 intel_dp->lane_count);
2509 2483
2510 return ret == intel_dp->lane_count; 2484 return ret == intel_dp->lane_count;
2511} 2485}
@@ -2561,11 +2535,11 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
2561 link_config[1] = intel_dp->lane_count; 2535 link_config[1] = intel_dp->lane_count;
2562 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) 2536 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
2563 link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; 2537 link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
2564 intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, link_config, 2); 2538 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
2565 2539
2566 link_config[0] = 0; 2540 link_config[0] = 0;
2567 link_config[1] = DP_SET_ANSI_8B10B; 2541 link_config[1] = DP_SET_ANSI_8B10B;
2568 intel_dp_aux_native_write(intel_dp, DP_DOWNSPREAD_CTRL, link_config, 2); 2542 drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
2569 2543
2570 DP |= DP_PORT_EN; 2544 DP |= DP_PORT_EN;
2571 2545
@@ -2638,10 +2612,15 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
2638 bool channel_eq = false; 2612 bool channel_eq = false;
2639 int tries, cr_tries; 2613 int tries, cr_tries;
2640 uint32_t DP = intel_dp->DP; 2614 uint32_t DP = intel_dp->DP;
2615 uint32_t training_pattern = DP_TRAINING_PATTERN_2;
2616
2617 /* Training Pattern 3 for HBR2 ot 1.2 devices that support it*/
2618 if (intel_dp->link_bw == DP_LINK_BW_5_4 || intel_dp->use_tps3)
2619 training_pattern = DP_TRAINING_PATTERN_3;
2641 2620
2642 /* channel equalization */ 2621 /* channel equalization */
2643 if (!intel_dp_set_link_train(intel_dp, &DP, 2622 if (!intel_dp_set_link_train(intel_dp, &DP,
2644 DP_TRAINING_PATTERN_2 | 2623 training_pattern |
2645 DP_LINK_SCRAMBLING_DISABLE)) { 2624 DP_LINK_SCRAMBLING_DISABLE)) {
2646 DRM_ERROR("failed to start channel equalization\n"); 2625 DRM_ERROR("failed to start channel equalization\n");
2647 return; 2626 return;
@@ -2668,7 +2647,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
2668 if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) { 2647 if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
2669 intel_dp_start_link_train(intel_dp); 2648 intel_dp_start_link_train(intel_dp);
2670 intel_dp_set_link_train(intel_dp, &DP, 2649 intel_dp_set_link_train(intel_dp, &DP,
2671 DP_TRAINING_PATTERN_2 | 2650 training_pattern |
2672 DP_LINK_SCRAMBLING_DISABLE); 2651 DP_LINK_SCRAMBLING_DISABLE);
2673 cr_tries++; 2652 cr_tries++;
2674 continue; 2653 continue;
@@ -2684,7 +2663,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
2684 intel_dp_link_down(intel_dp); 2663 intel_dp_link_down(intel_dp);
2685 intel_dp_start_link_train(intel_dp); 2664 intel_dp_start_link_train(intel_dp);
2686 intel_dp_set_link_train(intel_dp, &DP, 2665 intel_dp_set_link_train(intel_dp, &DP,
2687 DP_TRAINING_PATTERN_2 | 2666 training_pattern |
2688 DP_LINK_SCRAMBLING_DISABLE); 2667 DP_LINK_SCRAMBLING_DISABLE);
2689 tries = 0; 2668 tries = 0;
2690 cr_tries++; 2669 cr_tries++;
@@ -2803,8 +2782,8 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
2803 2782
2804 char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3]; 2783 char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3];
2805 2784
2806 if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd, 2785 if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
2807 sizeof(intel_dp->dpcd)) == 0) 2786 sizeof(intel_dp->dpcd)) < 0)
2808 return false; /* aux transfer failed */ 2787 return false; /* aux transfer failed */
2809 2788
2810 hex_dump_to_buffer(intel_dp->dpcd, sizeof(intel_dp->dpcd), 2789 hex_dump_to_buffer(intel_dp->dpcd, sizeof(intel_dp->dpcd),
@@ -2817,15 +2796,23 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
2817 /* Check if the panel supports PSR */ 2796 /* Check if the panel supports PSR */
2818 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd)); 2797 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
2819 if (is_edp(intel_dp)) { 2798 if (is_edp(intel_dp)) {
2820 intel_dp_aux_native_read_retry(intel_dp, DP_PSR_SUPPORT, 2799 intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
2821 intel_dp->psr_dpcd, 2800 intel_dp->psr_dpcd,
2822 sizeof(intel_dp->psr_dpcd)); 2801 sizeof(intel_dp->psr_dpcd));
2823 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) { 2802 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
2824 dev_priv->psr.sink_support = true; 2803 dev_priv->psr.sink_support = true;
2825 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n"); 2804 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
2826 } 2805 }
2827 } 2806 }
2828 2807
2808 /* Training Pattern 3 support */
2809 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
2810 intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED) {
2811 intel_dp->use_tps3 = true;
2812 DRM_DEBUG_KMS("Displayport TPS3 supported");
2813 } else
2814 intel_dp->use_tps3 = false;
2815
2829 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & 2816 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
2830 DP_DWN_STRM_PORT_PRESENT)) 2817 DP_DWN_STRM_PORT_PRESENT))
2831 return true; /* native DP sink */ 2818 return true; /* native DP sink */
@@ -2833,9 +2820,9 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
2833 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10) 2820 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
2834 return true; /* no per-port downstream info */ 2821 return true; /* no per-port downstream info */
2835 2822
2836 if (intel_dp_aux_native_read_retry(intel_dp, DP_DOWNSTREAM_PORT_0, 2823 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
2837 intel_dp->downstream_ports, 2824 intel_dp->downstream_ports,
2838 DP_MAX_DOWNSTREAM_PORTS) == 0) 2825 DP_MAX_DOWNSTREAM_PORTS) < 0)
2839 return false; /* downstream port status fetch failed */ 2826 return false; /* downstream port status fetch failed */
2840 2827
2841 return true; 2828 return true;
@@ -2849,38 +2836,61 @@ intel_dp_probe_oui(struct intel_dp *intel_dp)
2849 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT)) 2836 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
2850 return; 2837 return;
2851 2838
2852 ironlake_edp_panel_vdd_on(intel_dp); 2839 intel_edp_panel_vdd_on(intel_dp);
2853 2840
2854 if (intel_dp_aux_native_read_retry(intel_dp, DP_SINK_OUI, buf, 3)) 2841 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
2855 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n", 2842 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
2856 buf[0], buf[1], buf[2]); 2843 buf[0], buf[1], buf[2]);
2857 2844
2858 if (intel_dp_aux_native_read_retry(intel_dp, DP_BRANCH_OUI, buf, 3)) 2845 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
2859 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n", 2846 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
2860 buf[0], buf[1], buf[2]); 2847 buf[0], buf[1], buf[2]);
2861 2848
2862 ironlake_edp_panel_vdd_off(intel_dp, false); 2849 edp_panel_vdd_off(intel_dp, false);
2863} 2850}
2864 2851
2865static bool 2852int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
2866intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
2867{ 2853{
2868 int ret; 2854 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2855 struct drm_device *dev = intel_dig_port->base.base.dev;
2856 struct intel_crtc *intel_crtc =
2857 to_intel_crtc(intel_dig_port->base.base.crtc);
2858 u8 buf[1];
2869 2859
2870 ret = intel_dp_aux_native_read_retry(intel_dp, 2860 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, buf) < 0)
2871 DP_DEVICE_SERVICE_IRQ_VECTOR, 2861 return -EAGAIN;
2872 sink_irq_vector, 1);
2873 if (!ret)
2874 return false;
2875 2862
2876 return true; 2863 if (!(buf[0] & DP_TEST_CRC_SUPPORTED))
2864 return -ENOTTY;
2865
2866 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
2867 DP_TEST_SINK_START) < 0)
2868 return -EAGAIN;
2869
2870 /* Wait 2 vblanks to be sure we will have the correct CRC value */
2871 intel_wait_for_vblank(dev, intel_crtc->pipe);
2872 intel_wait_for_vblank(dev, intel_crtc->pipe);
2873
2874 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0)
2875 return -EAGAIN;
2876
2877 drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK, 0);
2878 return 0;
2879}
2880
2881static bool
2882intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
2883{
2884 return intel_dp_dpcd_read_wake(&intel_dp->aux,
2885 DP_DEVICE_SERVICE_IRQ_VECTOR,
2886 sink_irq_vector, 1) == 1;
2877} 2887}
2878 2888
2879static void 2889static void
2880intel_dp_handle_test_request(struct intel_dp *intel_dp) 2890intel_dp_handle_test_request(struct intel_dp *intel_dp)
2881{ 2891{
2882 /* NAK by default */ 2892 /* NAK by default */
2883 intel_dp_aux_native_write_1(intel_dp, DP_TEST_RESPONSE, DP_TEST_NAK); 2893 drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, DP_TEST_NAK);
2884} 2894}
2885 2895
2886/* 2896/*
@@ -2919,9 +2929,9 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
2919 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && 2929 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
2920 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) { 2930 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
2921 /* Clear interrupt source */ 2931 /* Clear interrupt source */
2922 intel_dp_aux_native_write_1(intel_dp, 2932 drm_dp_dpcd_writeb(&intel_dp->aux,
2923 DP_DEVICE_SERVICE_IRQ_VECTOR, 2933 DP_DEVICE_SERVICE_IRQ_VECTOR,
2924 sink_irq_vector); 2934 sink_irq_vector);
2925 2935
2926 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST) 2936 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
2927 intel_dp_handle_test_request(intel_dp); 2937 intel_dp_handle_test_request(intel_dp);
@@ -2956,15 +2966,17 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp)
2956 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && 2966 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
2957 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) { 2967 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
2958 uint8_t reg; 2968 uint8_t reg;
2959 if (!intel_dp_aux_native_read_retry(intel_dp, DP_SINK_COUNT, 2969
2960 &reg, 1)) 2970 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
2971 &reg, 1) < 0)
2961 return connector_status_unknown; 2972 return connector_status_unknown;
2973
2962 return DP_GET_SINK_COUNT(reg) ? connector_status_connected 2974 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
2963 : connector_status_disconnected; 2975 : connector_status_disconnected;
2964 } 2976 }
2965 2977
2966 /* If no HPD, poke DDC gently */ 2978 /* If no HPD, poke DDC gently */
2967 if (drm_probe_ddc(&intel_dp->adapter)) 2979 if (drm_probe_ddc(&intel_dp->aux.ddc))
2968 return connector_status_connected; 2980 return connector_status_connected;
2969 2981
2970 /* Well we tried, say unknown for unreliable port types */ 2982 /* Well we tried, say unknown for unreliable port types */
@@ -3106,10 +3118,14 @@ intel_dp_detect(struct drm_connector *connector, bool force)
3106 struct drm_device *dev = connector->dev; 3118 struct drm_device *dev = connector->dev;
3107 struct drm_i915_private *dev_priv = dev->dev_private; 3119 struct drm_i915_private *dev_priv = dev->dev_private;
3108 enum drm_connector_status status; 3120 enum drm_connector_status status;
3121 enum intel_display_power_domain power_domain;
3109 struct edid *edid = NULL; 3122 struct edid *edid = NULL;
3110 3123
3111 intel_runtime_pm_get(dev_priv); 3124 intel_runtime_pm_get(dev_priv);
3112 3125
3126 power_domain = intel_display_port_power_domain(intel_encoder);
3127 intel_display_power_get(dev_priv, power_domain);
3128
3113 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", 3129 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
3114 connector->base.id, drm_get_connector_name(connector)); 3130 connector->base.id, drm_get_connector_name(connector));
3115 3131
@@ -3128,7 +3144,7 @@ intel_dp_detect(struct drm_connector *connector, bool force)
3128 if (intel_dp->force_audio != HDMI_AUDIO_AUTO) { 3144 if (intel_dp->force_audio != HDMI_AUDIO_AUTO) {
3129 intel_dp->has_audio = (intel_dp->force_audio == HDMI_AUDIO_ON); 3145 intel_dp->has_audio = (intel_dp->force_audio == HDMI_AUDIO_ON);
3130 } else { 3146 } else {
3131 edid = intel_dp_get_edid(connector, &intel_dp->adapter); 3147 edid = intel_dp_get_edid(connector, &intel_dp->aux.ddc);
3132 if (edid) { 3148 if (edid) {
3133 intel_dp->has_audio = drm_detect_monitor_audio(edid); 3149 intel_dp->has_audio = drm_detect_monitor_audio(edid);
3134 kfree(edid); 3150 kfree(edid);
@@ -3140,21 +3156,32 @@ intel_dp_detect(struct drm_connector *connector, bool force)
3140 status = connector_status_connected; 3156 status = connector_status_connected;
3141 3157
3142out: 3158out:
3159 intel_display_power_put(dev_priv, power_domain);
3160
3143 intel_runtime_pm_put(dev_priv); 3161 intel_runtime_pm_put(dev_priv);
3162
3144 return status; 3163 return status;
3145} 3164}
3146 3165
3147static int intel_dp_get_modes(struct drm_connector *connector) 3166static int intel_dp_get_modes(struct drm_connector *connector)
3148{ 3167{
3149 struct intel_dp *intel_dp = intel_attached_dp(connector); 3168 struct intel_dp *intel_dp = intel_attached_dp(connector);
3169 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3170 struct intel_encoder *intel_encoder = &intel_dig_port->base;
3150 struct intel_connector *intel_connector = to_intel_connector(connector); 3171 struct intel_connector *intel_connector = to_intel_connector(connector);
3151 struct drm_device *dev = connector->dev; 3172 struct drm_device *dev = connector->dev;
3173 struct drm_i915_private *dev_priv = dev->dev_private;
3174 enum intel_display_power_domain power_domain;
3152 int ret; 3175 int ret;
3153 3176
3154 /* We should parse the EDID data and find out if it has an audio sink 3177 /* We should parse the EDID data and find out if it has an audio sink
3155 */ 3178 */
3156 3179
3157 ret = intel_dp_get_edid_modes(connector, &intel_dp->adapter); 3180 power_domain = intel_display_port_power_domain(intel_encoder);
3181 intel_display_power_get(dev_priv, power_domain);
3182
3183 ret = intel_dp_get_edid_modes(connector, &intel_dp->aux.ddc);
3184 intel_display_power_put(dev_priv, power_domain);
3158 if (ret) 3185 if (ret)
3159 return ret; 3186 return ret;
3160 3187
@@ -3175,15 +3202,25 @@ static bool
3175intel_dp_detect_audio(struct drm_connector *connector) 3202intel_dp_detect_audio(struct drm_connector *connector)
3176{ 3203{
3177 struct intel_dp *intel_dp = intel_attached_dp(connector); 3204 struct intel_dp *intel_dp = intel_attached_dp(connector);
3205 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3206 struct intel_encoder *intel_encoder = &intel_dig_port->base;
3207 struct drm_device *dev = connector->dev;
3208 struct drm_i915_private *dev_priv = dev->dev_private;
3209 enum intel_display_power_domain power_domain;
3178 struct edid *edid; 3210 struct edid *edid;
3179 bool has_audio = false; 3211 bool has_audio = false;
3180 3212
3181 edid = intel_dp_get_edid(connector, &intel_dp->adapter); 3213 power_domain = intel_display_port_power_domain(intel_encoder);
3214 intel_display_power_get(dev_priv, power_domain);
3215
3216 edid = intel_dp_get_edid(connector, &intel_dp->aux.ddc);
3182 if (edid) { 3217 if (edid) {
3183 has_audio = drm_detect_monitor_audio(edid); 3218 has_audio = drm_detect_monitor_audio(edid);
3184 kfree(edid); 3219 kfree(edid);
3185 } 3220 }
3186 3221
3222 intel_display_power_put(dev_priv, power_domain);
3223
3187 return has_audio; 3224 return has_audio;
3188} 3225}
3189 3226
@@ -3298,12 +3335,12 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
3298 struct intel_dp *intel_dp = &intel_dig_port->dp; 3335 struct intel_dp *intel_dp = &intel_dig_port->dp;
3299 struct drm_device *dev = intel_dp_to_dev(intel_dp); 3336 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3300 3337
3301 i2c_del_adapter(&intel_dp->adapter); 3338 drm_dp_aux_unregister_i2c_bus(&intel_dp->aux);
3302 drm_encoder_cleanup(encoder); 3339 drm_encoder_cleanup(encoder);
3303 if (is_edp(intel_dp)) { 3340 if (is_edp(intel_dp)) {
3304 cancel_delayed_work_sync(&intel_dp->panel_vdd_work); 3341 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
3305 mutex_lock(&dev->mode_config.mutex); 3342 mutex_lock(&dev->mode_config.mutex);
3306 ironlake_panel_vdd_off_sync(intel_dp); 3343 edp_panel_vdd_off_sync(intel_dp);
3307 mutex_unlock(&dev->mode_config.mutex); 3344 mutex_unlock(&dev->mode_config.mutex);
3308 } 3345 }
3309 kfree(intel_dig_port); 3346 kfree(intel_dig_port);
@@ -3402,6 +3439,13 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect
3402 } 3439 }
3403} 3440}
3404 3441
3442static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
3443{
3444 intel_dp->last_power_cycle = jiffies;
3445 intel_dp->last_power_on = jiffies;
3446 intel_dp->last_backlight_off = jiffies;
3447}
3448
3405static void 3449static void
3406intel_dp_init_panel_power_sequencer(struct drm_device *dev, 3450intel_dp_init_panel_power_sequencer(struct drm_device *dev,
3407 struct intel_dp *intel_dp, 3451 struct intel_dp *intel_dp,
@@ -3524,10 +3568,17 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
3524 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe); 3568 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
3525 } 3569 }
3526 3570
3527 /* And finally store the new values in the power sequencer. */ 3571 /*
3572 * And finally store the new values in the power sequencer. The
3573 * backlight delays are set to 1 because we do manual waits on them. For
3574 * T8, even BSpec recommends doing it. For T9, if we don't do this,
3575 * we'll end up waiting for the backlight off delay twice: once when we
3576 * do the manual sleep, and once when we disable the panel and wait for
3577 * the PP_STATUS bit to become zero.
3578 */
3528 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) | 3579 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
3529 (seq->t8 << PANEL_LIGHT_ON_DELAY_SHIFT); 3580 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
3530 pp_off = (seq->t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) | 3581 pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
3531 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT); 3582 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
3532 /* Compute the divisor for the pp clock, simply match the Bspec 3583 /* Compute the divisor for the pp clock, simply match the Bspec
3533 * formula. */ 3584 * formula. */
@@ -3562,14 +3613,14 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
3562} 3613}
3563 3614
3564static bool intel_edp_init_connector(struct intel_dp *intel_dp, 3615static bool intel_edp_init_connector(struct intel_dp *intel_dp,
3565 struct intel_connector *intel_connector) 3616 struct intel_connector *intel_connector,
3617 struct edp_power_seq *power_seq)
3566{ 3618{
3567 struct drm_connector *connector = &intel_connector->base; 3619 struct drm_connector *connector = &intel_connector->base;
3568 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 3620 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3569 struct drm_device *dev = intel_dig_port->base.base.dev; 3621 struct drm_device *dev = intel_dig_port->base.base.dev;
3570 struct drm_i915_private *dev_priv = dev->dev_private; 3622 struct drm_i915_private *dev_priv = dev->dev_private;
3571 struct drm_display_mode *fixed_mode = NULL; 3623 struct drm_display_mode *fixed_mode = NULL;
3572 struct edp_power_seq power_seq = { 0 };
3573 bool has_dpcd; 3624 bool has_dpcd;
3574 struct drm_display_mode *scan; 3625 struct drm_display_mode *scan;
3575 struct edid *edid; 3626 struct edid *edid;
@@ -3577,12 +3628,10 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
3577 if (!is_edp(intel_dp)) 3628 if (!is_edp(intel_dp))
3578 return true; 3629 return true;
3579 3630
3580 intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
3581
3582 /* Cache DPCD and EDID for edp. */ 3631 /* Cache DPCD and EDID for edp. */
3583 ironlake_edp_panel_vdd_on(intel_dp); 3632 intel_edp_panel_vdd_on(intel_dp);
3584 has_dpcd = intel_dp_get_dpcd(intel_dp); 3633 has_dpcd = intel_dp_get_dpcd(intel_dp);
3585 ironlake_edp_panel_vdd_off(intel_dp, false); 3634 edp_panel_vdd_off(intel_dp, false);
3586 3635
3587 if (has_dpcd) { 3636 if (has_dpcd) {
3588 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) 3637 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
@@ -3596,10 +3645,10 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
3596 } 3645 }
3597 3646
3598 /* We now know it's not a ghost, init power sequence regs. */ 3647 /* We now know it's not a ghost, init power sequence regs. */
3599 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, 3648 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, power_seq);
3600 &power_seq);
3601 3649
3602 edid = drm_get_edid(connector, &intel_dp->adapter); 3650 mutex_lock(&dev->mode_config.mutex);
3651 edid = drm_get_edid(connector, &intel_dp->aux.ddc);
3603 if (edid) { 3652 if (edid) {
3604 if (drm_add_edid_modes(connector, edid)) { 3653 if (drm_add_edid_modes(connector, edid)) {
3605 drm_mode_connector_update_edid_property(connector, 3654 drm_mode_connector_update_edid_property(connector,
@@ -3629,8 +3678,9 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
3629 if (fixed_mode) 3678 if (fixed_mode)
3630 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; 3679 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
3631 } 3680 }
3681 mutex_unlock(&dev->mode_config.mutex);
3632 3682
3633 intel_panel_init(&intel_connector->panel, fixed_mode); 3683 intel_panel_init(&intel_connector->panel, fixed_mode, NULL);
3634 intel_panel_setup_backlight(connector); 3684 intel_panel_setup_backlight(connector);
3635 3685
3636 return true; 3686 return true;
@@ -3646,8 +3696,20 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
3646 struct drm_device *dev = intel_encoder->base.dev; 3696 struct drm_device *dev = intel_encoder->base.dev;
3647 struct drm_i915_private *dev_priv = dev->dev_private; 3697 struct drm_i915_private *dev_priv = dev->dev_private;
3648 enum port port = intel_dig_port->port; 3698 enum port port = intel_dig_port->port;
3649 const char *name = NULL; 3699 struct edp_power_seq power_seq = { 0 };
3650 int type, error; 3700 int type;
3701
3702 /* intel_dp vfuncs */
3703 if (IS_VALLEYVIEW(dev))
3704 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
3705 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
3706 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
3707 else if (HAS_PCH_SPLIT(dev))
3708 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
3709 else
3710 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
3711
3712 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
3651 3713
3652 /* Preserve the current hw state. */ 3714 /* Preserve the current hw state. */
3653 intel_dp->DP = I915_READ(intel_dp->output_reg); 3715 intel_dp->DP = I915_READ(intel_dp->output_reg);
@@ -3677,7 +3739,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
3677 connector->doublescan_allowed = 0; 3739 connector->doublescan_allowed = 0;
3678 3740
3679 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work, 3741 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
3680 ironlake_panel_vdd_work); 3742 edp_panel_vdd_work);
3681 3743
3682 intel_connector_attach_encoder(intel_connector, intel_encoder); 3744 intel_connector_attach_encoder(intel_connector, intel_encoder);
3683 drm_sysfs_connector_add(connector); 3745 drm_sysfs_connector_add(connector);
@@ -3686,61 +3748,41 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
3686 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state; 3748 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
3687 else 3749 else
3688 intel_connector->get_hw_state = intel_connector_get_hw_state; 3750 intel_connector->get_hw_state = intel_connector_get_hw_state;
3751 intel_connector->unregister = intel_dp_connector_unregister;
3689 3752
3690 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10; 3753 /* Set up the hotplug pin. */
3691 if (HAS_DDI(dev)) {
3692 switch (intel_dig_port->port) {
3693 case PORT_A:
3694 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
3695 break;
3696 case PORT_B:
3697 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
3698 break;
3699 case PORT_C:
3700 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
3701 break;
3702 case PORT_D:
3703 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
3704 break;
3705 default:
3706 BUG();
3707 }
3708 }
3709
3710 /* Set up the DDC bus. */
3711 switch (port) { 3754 switch (port) {
3712 case PORT_A: 3755 case PORT_A:
3713 intel_encoder->hpd_pin = HPD_PORT_A; 3756 intel_encoder->hpd_pin = HPD_PORT_A;
3714 name = "DPDDC-A";
3715 break; 3757 break;
3716 case PORT_B: 3758 case PORT_B:
3717 intel_encoder->hpd_pin = HPD_PORT_B; 3759 intel_encoder->hpd_pin = HPD_PORT_B;
3718 name = "DPDDC-B";
3719 break; 3760 break;
3720 case PORT_C: 3761 case PORT_C:
3721 intel_encoder->hpd_pin = HPD_PORT_C; 3762 intel_encoder->hpd_pin = HPD_PORT_C;
3722 name = "DPDDC-C";
3723 break; 3763 break;
3724 case PORT_D: 3764 case PORT_D:
3725 intel_encoder->hpd_pin = HPD_PORT_D; 3765 intel_encoder->hpd_pin = HPD_PORT_D;
3726 name = "DPDDC-D";
3727 break; 3766 break;
3728 default: 3767 default:
3729 BUG(); 3768 BUG();
3730 } 3769 }
3731 3770
3732 error = intel_dp_i2c_init(intel_dp, intel_connector, name); 3771 if (is_edp(intel_dp)) {
3733 WARN(error, "intel_dp_i2c_init failed with error %d for port %c\n", 3772 intel_dp_init_panel_power_timestamps(intel_dp);
3734 error, port_name(port)); 3773 intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
3774 }
3775
3776 intel_dp_aux_init(intel_dp, intel_connector);
3735 3777
3736 intel_dp->psr_setup_done = false; 3778 intel_dp->psr_setup_done = false;
3737 3779
3738 if (!intel_edp_init_connector(intel_dp, intel_connector)) { 3780 if (!intel_edp_init_connector(intel_dp, intel_connector, &power_seq)) {
3739 i2c_del_adapter(&intel_dp->adapter); 3781 drm_dp_aux_unregister_i2c_bus(&intel_dp->aux);
3740 if (is_edp(intel_dp)) { 3782 if (is_edp(intel_dp)) {
3741 cancel_delayed_work_sync(&intel_dp->panel_vdd_work); 3783 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
3742 mutex_lock(&dev->mode_config.mutex); 3784 mutex_lock(&dev->mode_config.mutex);
3743 ironlake_panel_vdd_off_sync(intel_dp); 3785 edp_panel_vdd_off_sync(intel_dp);
3744 mutex_unlock(&dev->mode_config.mutex); 3786 mutex_unlock(&dev->mode_config.mutex);
3745 } 3787 }
3746 drm_sysfs_connector_remove(connector); 3788 drm_sysfs_connector_remove(connector);
@@ -3806,7 +3848,7 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
3806 3848
3807 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; 3849 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
3808 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); 3850 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
3809 intel_encoder->cloneable = false; 3851 intel_encoder->cloneable = 0;
3810 intel_encoder->hot_plug = intel_dp_hot_plug; 3852 intel_encoder->hot_plug = intel_dp_hot_plug;
3811 3853
3812 if (!intel_dp_init_connector(intel_dig_port, intel_connector)) { 3854 if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index fbfaaba5cc3b..0542de982260 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -78,6 +78,12 @@
78#define MAX_OUTPUTS 6 78#define MAX_OUTPUTS 6
79/* maximum connectors per crtcs in the mode set */ 79/* maximum connectors per crtcs in the mode set */
80 80
81/* Maximum cursor sizes */
82#define GEN2_CURSOR_WIDTH 64
83#define GEN2_CURSOR_HEIGHT 64
84#define CURSOR_WIDTH 256
85#define CURSOR_HEIGHT 256
86
81#define INTEL_I2C_BUS_DVO 1 87#define INTEL_I2C_BUS_DVO 1
82#define INTEL_I2C_BUS_SDVO 2 88#define INTEL_I2C_BUS_SDVO 2
83 89
@@ -110,9 +116,10 @@ struct intel_framebuffer {
110 116
111struct intel_fbdev { 117struct intel_fbdev {
112 struct drm_fb_helper helper; 118 struct drm_fb_helper helper;
113 struct intel_framebuffer ifb; 119 struct intel_framebuffer *fb;
114 struct list_head fbdev_list; 120 struct list_head fbdev_list;
115 struct drm_display_mode *our_mode; 121 struct drm_display_mode *our_mode;
122 int preferred_bpp;
116}; 123};
117 124
118struct intel_encoder { 125struct intel_encoder {
@@ -124,11 +131,7 @@ struct intel_encoder {
124 struct intel_crtc *new_crtc; 131 struct intel_crtc *new_crtc;
125 132
126 int type; 133 int type;
127 /* 134 unsigned int cloneable;
128 * Intel hw has only one MUX where encoders could be clone, hence a
129 * simple flag is enough to compute the possible_clones mask.
130 */
131 bool cloneable;
132 bool connectors_active; 135 bool connectors_active;
133 void (*hot_plug)(struct intel_encoder *); 136 void (*hot_plug)(struct intel_encoder *);
134 bool (*compute_config)(struct intel_encoder *, 137 bool (*compute_config)(struct intel_encoder *,
@@ -187,6 +190,14 @@ struct intel_connector {
187 * and active (i.e. dpms ON state). */ 190 * and active (i.e. dpms ON state). */
188 bool (*get_hw_state)(struct intel_connector *); 191 bool (*get_hw_state)(struct intel_connector *);
189 192
193 /*
194 * Removes all interfaces through which the connector is accessible
195 * - like sysfs, debugfs entries -, so that no new operations can be
196 * started on the connector. Also makes sure all currently pending
197 * operations finish before returing.
198 */
199 void (*unregister)(struct intel_connector *);
200
190 /* Panel info for eDP and LVDS */ 201 /* Panel info for eDP and LVDS */
191 struct intel_panel panel; 202 struct intel_panel panel;
192 203
@@ -210,6 +221,12 @@ typedef struct dpll {
210 int p; 221 int p;
211} intel_clock_t; 222} intel_clock_t;
212 223
224struct intel_plane_config {
225 bool tiled;
226 int size;
227 u32 base;
228};
229
213struct intel_crtc_config { 230struct intel_crtc_config {
214 /** 231 /**
215 * quirks - bitfield with hw state readout quirks 232 * quirks - bitfield with hw state readout quirks
@@ -356,9 +373,13 @@ struct intel_crtc {
356 uint32_t cursor_addr; 373 uint32_t cursor_addr;
357 int16_t cursor_x, cursor_y; 374 int16_t cursor_x, cursor_y;
358 int16_t cursor_width, cursor_height; 375 int16_t cursor_width, cursor_height;
376 int16_t max_cursor_width, max_cursor_height;
359 bool cursor_visible; 377 bool cursor_visible;
360 378
379 struct intel_plane_config plane_config;
361 struct intel_crtc_config config; 380 struct intel_crtc_config config;
381 struct intel_crtc_config *new_config;
382 bool new_enabled;
362 383
363 uint32_t ddi_pll_sel; 384 uint32_t ddi_pll_sel;
364 385
@@ -475,8 +496,7 @@ struct intel_dp {
475 uint8_t dpcd[DP_RECEIVER_CAP_SIZE]; 496 uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
476 uint8_t psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE]; 497 uint8_t psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE];
477 uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS]; 498 uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS];
478 struct i2c_adapter adapter; 499 struct drm_dp_aux aux;
479 struct i2c_algo_dp_aux_data algo;
480 uint8_t train_set[4]; 500 uint8_t train_set[4];
481 int panel_power_up_delay; 501 int panel_power_up_delay;
482 int panel_power_down_delay; 502 int panel_power_down_delay;
@@ -485,8 +505,22 @@ struct intel_dp {
485 int backlight_off_delay; 505 int backlight_off_delay;
486 struct delayed_work panel_vdd_work; 506 struct delayed_work panel_vdd_work;
487 bool want_panel_vdd; 507 bool want_panel_vdd;
508 unsigned long last_power_cycle;
509 unsigned long last_power_on;
510 unsigned long last_backlight_off;
488 bool psr_setup_done; 511 bool psr_setup_done;
512 bool use_tps3;
489 struct intel_connector *attached_connector; 513 struct intel_connector *attached_connector;
514
515 uint32_t (*get_aux_clock_divider)(struct intel_dp *dp, int index);
516 /*
517 * This function returns the value we have to program the AUX_CTL
518 * register with to kick off an AUX transaction.
519 */
520 uint32_t (*get_aux_send_ctl)(struct intel_dp *dp,
521 bool has_aux_irq,
522 int send_bytes,
523 uint32_t aux_clock_divider);
490}; 524};
491 525
492struct intel_digital_port { 526struct intel_digital_port {
@@ -540,6 +574,7 @@ struct intel_unpin_work {
540struct intel_set_config { 574struct intel_set_config {
541 struct drm_encoder **save_connector_encoders; 575 struct drm_encoder **save_connector_encoders;
542 struct drm_crtc **save_encoder_crtcs; 576 struct drm_crtc **save_encoder_crtcs;
577 bool *save_crtc_enabled;
543 578
544 bool fb_changed; 579 bool fb_changed;
545 bool mode_changed; 580 bool mode_changed;
@@ -584,6 +619,8 @@ hdmi_to_dig_port(struct intel_hdmi *intel_hdmi)
584/* i915_irq.c */ 619/* i915_irq.c */
585bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev, 620bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
586 enum pipe pipe, bool enable); 621 enum pipe pipe, bool enable);
622bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
623 enum pipe pipe, bool enable);
587bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev, 624bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
588 enum transcoder pch_transcoder, 625 enum transcoder pch_transcoder,
589 bool enable); 626 bool enable);
@@ -591,8 +628,8 @@ void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
591void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask); 628void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
592void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); 629void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
593void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); 630void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
594void hsw_pc8_disable_interrupts(struct drm_device *dev); 631void hsw_runtime_pm_disable_interrupts(struct drm_device *dev);
595void hsw_pc8_restore_interrupts(struct drm_device *dev); 632void hsw_runtime_pm_restore_interrupts(struct drm_device *dev);
596 633
597 634
598/* intel_crt.c */ 635/* intel_crt.c */
@@ -664,11 +701,10 @@ int intel_pin_and_fence_fb_obj(struct drm_device *dev,
664 struct drm_i915_gem_object *obj, 701 struct drm_i915_gem_object *obj,
665 struct intel_ring_buffer *pipelined); 702 struct intel_ring_buffer *pipelined);
666void intel_unpin_fb_obj(struct drm_i915_gem_object *obj); 703void intel_unpin_fb_obj(struct drm_i915_gem_object *obj);
667int intel_framebuffer_init(struct drm_device *dev, 704struct drm_framebuffer *
668 struct intel_framebuffer *ifb, 705__intel_framebuffer_create(struct drm_device *dev,
669 struct drm_mode_fb_cmd2 *mode_cmd, 706 struct drm_mode_fb_cmd2 *mode_cmd,
670 struct drm_i915_gem_object *obj); 707 struct drm_i915_gem_object *obj);
671void intel_framebuffer_fini(struct intel_framebuffer *fb);
672void intel_prepare_page_flip(struct drm_device *dev, int plane); 708void intel_prepare_page_flip(struct drm_device *dev, int plane);
673void intel_finish_page_flip(struct drm_device *dev, int pipe); 709void intel_finish_page_flip(struct drm_device *dev, int pipe);
674void intel_finish_page_flip_plane(struct drm_device *dev, int plane); 710void intel_finish_page_flip_plane(struct drm_device *dev, int plane);
@@ -696,9 +732,8 @@ unsigned long intel_gen4_compute_page_offset(int *x, int *y,
696 unsigned int bpp, 732 unsigned int bpp,
697 unsigned int pitch); 733 unsigned int pitch);
698void intel_display_handle_reset(struct drm_device *dev); 734void intel_display_handle_reset(struct drm_device *dev);
699void hsw_enable_pc8_work(struct work_struct *__work); 735void hsw_enable_pc8(struct drm_i915_private *dev_priv);
700void hsw_enable_package_c8(struct drm_i915_private *dev_priv); 736void hsw_disable_pc8(struct drm_i915_private *dev_priv);
701void hsw_disable_package_c8(struct drm_i915_private *dev_priv);
702void intel_dp_get_m_n(struct intel_crtc *crtc, 737void intel_dp_get_m_n(struct intel_crtc *crtc,
703 struct intel_crtc_config *pipe_config); 738 struct intel_crtc_config *pipe_config);
704int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n); 739int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n);
@@ -708,8 +743,13 @@ ironlake_check_encoder_dotclock(const struct intel_crtc_config *pipe_config,
708bool intel_crtc_active(struct drm_crtc *crtc); 743bool intel_crtc_active(struct drm_crtc *crtc);
709void hsw_enable_ips(struct intel_crtc *crtc); 744void hsw_enable_ips(struct intel_crtc *crtc);
710void hsw_disable_ips(struct intel_crtc *crtc); 745void hsw_disable_ips(struct intel_crtc *crtc);
711void intel_display_set_init_power(struct drm_device *dev, bool enable); 746void intel_display_set_init_power(struct drm_i915_private *dev, bool enable);
747enum intel_display_power_domain
748intel_display_port_power_domain(struct intel_encoder *intel_encoder);
712int valleyview_get_vco(struct drm_i915_private *dev_priv); 749int valleyview_get_vco(struct drm_i915_private *dev_priv);
750void intel_mode_from_pipe_config(struct drm_display_mode *mode,
751 struct intel_crtc_config *pipe_config);
752int intel_format_to_fourcc(int format);
713 753
714/* intel_dp.c */ 754/* intel_dp.c */
715void intel_dp_init(struct drm_device *dev, int output_reg, enum port port); 755void intel_dp_init(struct drm_device *dev, int output_reg, enum port port);
@@ -721,15 +761,15 @@ void intel_dp_stop_link_train(struct intel_dp *intel_dp);
721void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode); 761void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
722void intel_dp_encoder_destroy(struct drm_encoder *encoder); 762void intel_dp_encoder_destroy(struct drm_encoder *encoder);
723void intel_dp_check_link_status(struct intel_dp *intel_dp); 763void intel_dp_check_link_status(struct intel_dp *intel_dp);
764int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc);
724bool intel_dp_compute_config(struct intel_encoder *encoder, 765bool intel_dp_compute_config(struct intel_encoder *encoder,
725 struct intel_crtc_config *pipe_config); 766 struct intel_crtc_config *pipe_config);
726bool intel_dp_is_edp(struct drm_device *dev, enum port port); 767bool intel_dp_is_edp(struct drm_device *dev, enum port port);
727void ironlake_edp_backlight_on(struct intel_dp *intel_dp); 768void intel_edp_backlight_on(struct intel_dp *intel_dp);
728void ironlake_edp_backlight_off(struct intel_dp *intel_dp); 769void intel_edp_backlight_off(struct intel_dp *intel_dp);
729void ironlake_edp_panel_on(struct intel_dp *intel_dp); 770void intel_edp_panel_vdd_on(struct intel_dp *intel_dp);
730void ironlake_edp_panel_off(struct intel_dp *intel_dp); 771void intel_edp_panel_on(struct intel_dp *intel_dp);
731void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp); 772void intel_edp_panel_off(struct intel_dp *intel_dp);
732void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
733void intel_edp_psr_enable(struct intel_dp *intel_dp); 773void intel_edp_psr_enable(struct intel_dp *intel_dp);
734void intel_edp_psr_disable(struct intel_dp *intel_dp); 774void intel_edp_psr_disable(struct intel_dp *intel_dp);
735void intel_edp_psr_update(struct drm_device *dev); 775void intel_edp_psr_update(struct drm_device *dev);
@@ -808,7 +848,8 @@ int intel_overlay_attrs(struct drm_device *dev, void *data,
808 848
809/* intel_panel.c */ 849/* intel_panel.c */
810int intel_panel_init(struct intel_panel *panel, 850int intel_panel_init(struct intel_panel *panel,
811 struct drm_display_mode *fixed_mode); 851 struct drm_display_mode *fixed_mode,
852 struct drm_display_mode *downclock_mode);
812void intel_panel_fini(struct intel_panel *panel); 853void intel_panel_fini(struct intel_panel *panel);
813void intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode, 854void intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
814 struct drm_display_mode *adjusted_mode); 855 struct drm_display_mode *adjusted_mode);
@@ -845,18 +886,19 @@ bool intel_fbc_enabled(struct drm_device *dev);
845void intel_update_fbc(struct drm_device *dev); 886void intel_update_fbc(struct drm_device *dev);
846void intel_gpu_ips_init(struct drm_i915_private *dev_priv); 887void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
847void intel_gpu_ips_teardown(void); 888void intel_gpu_ips_teardown(void);
848int intel_power_domains_init(struct drm_device *dev); 889int intel_power_domains_init(struct drm_i915_private *);
849void intel_power_domains_remove(struct drm_device *dev); 890void intel_power_domains_remove(struct drm_i915_private *);
850bool intel_display_power_enabled(struct drm_device *dev, 891bool intel_display_power_enabled(struct drm_i915_private *dev_priv,
851 enum intel_display_power_domain domain); 892 enum intel_display_power_domain domain);
852bool intel_display_power_enabled_sw(struct drm_device *dev, 893bool intel_display_power_enabled_sw(struct drm_i915_private *dev_priv,
853 enum intel_display_power_domain domain); 894 enum intel_display_power_domain domain);
854void intel_display_power_get(struct drm_device *dev, 895void intel_display_power_get(struct drm_i915_private *dev_priv,
855 enum intel_display_power_domain domain); 896 enum intel_display_power_domain domain);
856void intel_display_power_put(struct drm_device *dev, 897void intel_display_power_put(struct drm_i915_private *dev_priv,
857 enum intel_display_power_domain domain); 898 enum intel_display_power_domain domain);
858void intel_power_domains_init_hw(struct drm_device *dev); 899void intel_power_domains_init_hw(struct drm_i915_private *dev_priv);
859void intel_set_power_well(struct drm_device *dev, bool enable); 900void intel_init_gt_powersave(struct drm_device *dev);
901void intel_cleanup_gt_powersave(struct drm_device *dev);
860void intel_enable_gt_powersave(struct drm_device *dev); 902void intel_enable_gt_powersave(struct drm_device *dev);
861void intel_disable_gt_powersave(struct drm_device *dev); 903void intel_disable_gt_powersave(struct drm_device *dev);
862void ironlake_teardown_rc6(struct drm_device *dev); 904void ironlake_teardown_rc6(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index fabbf0d895cf..33656647f8bc 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -243,11 +243,16 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
243 enum pipe *pipe) 243 enum pipe *pipe)
244{ 244{
245 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 245 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
246 enum intel_display_power_domain power_domain;
246 u32 port, func; 247 u32 port, func;
247 enum pipe p; 248 enum pipe p;
248 249
249 DRM_DEBUG_KMS("\n"); 250 DRM_DEBUG_KMS("\n");
250 251
252 power_domain = intel_display_port_power_domain(encoder);
253 if (!intel_display_power_enabled(dev_priv, power_domain))
254 return false;
255
251 /* XXX: this only works for one DSI output */ 256 /* XXX: this only works for one DSI output */
252 for (p = PIPE_A; p <= PIPE_B; p++) { 257 for (p = PIPE_A; p <= PIPE_B; p++) {
253 port = I915_READ(MIPI_PORT_CTRL(p)); 258 port = I915_READ(MIPI_PORT_CTRL(p));
@@ -488,8 +493,19 @@ static enum drm_connector_status
488intel_dsi_detect(struct drm_connector *connector, bool force) 493intel_dsi_detect(struct drm_connector *connector, bool force)
489{ 494{
490 struct intel_dsi *intel_dsi = intel_attached_dsi(connector); 495 struct intel_dsi *intel_dsi = intel_attached_dsi(connector);
496 struct intel_encoder *intel_encoder = &intel_dsi->base;
497 enum intel_display_power_domain power_domain;
498 enum drm_connector_status connector_status;
499 struct drm_i915_private *dev_priv = intel_encoder->base.dev->dev_private;
500
491 DRM_DEBUG_KMS("\n"); 501 DRM_DEBUG_KMS("\n");
492 return intel_dsi->dev.dev_ops->detect(&intel_dsi->dev); 502 power_domain = intel_display_port_power_domain(intel_encoder);
503
504 intel_display_power_get(dev_priv, power_domain);
505 connector_status = intel_dsi->dev.dev_ops->detect(&intel_dsi->dev);
506 intel_display_power_put(dev_priv, power_domain);
507
508 return connector_status;
493} 509}
494 510
495static int intel_dsi_get_modes(struct drm_connector *connector) 511static int intel_dsi_get_modes(struct drm_connector *connector)
@@ -586,6 +602,7 @@ bool intel_dsi_init(struct drm_device *dev)
586 intel_encoder->get_config = intel_dsi_get_config; 602 intel_encoder->get_config = intel_dsi_get_config;
587 603
588 intel_connector->get_hw_state = intel_connector_get_hw_state; 604 intel_connector->get_hw_state = intel_connector_get_hw_state;
605 intel_connector->unregister = intel_connector_unregister;
589 606
590 for (i = 0; i < ARRAY_SIZE(intel_dsi_devices); i++) { 607 for (i = 0; i < ARRAY_SIZE(intel_dsi_devices); i++) {
591 dsi = &intel_dsi_devices[i]; 608 dsi = &intel_dsi_devices[i];
@@ -603,7 +620,7 @@ bool intel_dsi_init(struct drm_device *dev)
603 intel_encoder->type = INTEL_OUTPUT_DSI; 620 intel_encoder->type = INTEL_OUTPUT_DSI;
604 intel_encoder->crtc_mask = (1 << 0); /* XXX */ 621 intel_encoder->crtc_mask = (1 << 0); /* XXX */
605 622
606 intel_encoder->cloneable = false; 623 intel_encoder->cloneable = 0;
607 drm_connector_init(dev, connector, &intel_dsi_connector_funcs, 624 drm_connector_init(dev, connector, &intel_dsi_connector_funcs,
608 DRM_MODE_CONNECTOR_DSI); 625 DRM_MODE_CONNECTOR_DSI);
609 626
@@ -624,7 +641,7 @@ bool intel_dsi_init(struct drm_device *dev)
624 } 641 }
625 642
626 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; 643 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
627 intel_panel_init(&intel_connector->panel, fixed_mode); 644 intel_panel_init(&intel_connector->panel, fixed_mode, NULL);
628 645
629 return true; 646 return true;
630 647
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index eeff998e52ef..7fe3feedfe03 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -477,6 +477,7 @@ void intel_dvo_init(struct drm_device *dev)
477 intel_encoder->compute_config = intel_dvo_compute_config; 477 intel_encoder->compute_config = intel_dvo_compute_config;
478 intel_encoder->mode_set = intel_dvo_mode_set; 478 intel_encoder->mode_set = intel_dvo_mode_set;
479 intel_connector->get_hw_state = intel_dvo_connector_get_hw_state; 479 intel_connector->get_hw_state = intel_dvo_connector_get_hw_state;
480 intel_connector->unregister = intel_connector_unregister;
480 481
481 /* Now, try to find a controller */ 482 /* Now, try to find a controller */
482 for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) { 483 for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) {
@@ -521,14 +522,15 @@ void intel_dvo_init(struct drm_device *dev)
521 intel_encoder->crtc_mask = (1 << 0) | (1 << 1); 522 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
522 switch (dvo->type) { 523 switch (dvo->type) {
523 case INTEL_DVO_CHIP_TMDS: 524 case INTEL_DVO_CHIP_TMDS:
524 intel_encoder->cloneable = true; 525 intel_encoder->cloneable = (1 << INTEL_OUTPUT_ANALOG) |
526 (1 << INTEL_OUTPUT_DVO);
525 drm_connector_init(dev, connector, 527 drm_connector_init(dev, connector,
526 &intel_dvo_connector_funcs, 528 &intel_dvo_connector_funcs,
527 DRM_MODE_CONNECTOR_DVII); 529 DRM_MODE_CONNECTOR_DVII);
528 encoder_type = DRM_MODE_ENCODER_TMDS; 530 encoder_type = DRM_MODE_ENCODER_TMDS;
529 break; 531 break;
530 case INTEL_DVO_CHIP_LVDS: 532 case INTEL_DVO_CHIP_LVDS:
531 intel_encoder->cloneable = false; 533 intel_encoder->cloneable = 0;
532 drm_connector_init(dev, connector, 534 drm_connector_init(dev, connector,
533 &intel_dvo_connector_funcs, 535 &intel_dvo_connector_funcs,
534 DRM_MODE_CONNECTOR_LVDS); 536 DRM_MODE_CONNECTOR_LVDS);
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index 39eac9937a4a..b4d44e62f0c7 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -62,6 +62,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
62{ 62{
63 struct intel_fbdev *ifbdev = 63 struct intel_fbdev *ifbdev =
64 container_of(helper, struct intel_fbdev, helper); 64 container_of(helper, struct intel_fbdev, helper);
65 struct drm_framebuffer *fb;
65 struct drm_device *dev = helper->dev; 66 struct drm_device *dev = helper->dev;
66 struct drm_mode_fb_cmd2 mode_cmd = {}; 67 struct drm_mode_fb_cmd2 mode_cmd = {};
67 struct drm_i915_gem_object *obj; 68 struct drm_i915_gem_object *obj;
@@ -93,18 +94,22 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
93 /* Flush everything out, we'll be doing GTT only from now on */ 94 /* Flush everything out, we'll be doing GTT only from now on */
94 ret = intel_pin_and_fence_fb_obj(dev, obj, NULL); 95 ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
95 if (ret) { 96 if (ret) {
96 DRM_ERROR("failed to pin fb: %d\n", ret); 97 DRM_ERROR("failed to pin obj: %d\n", ret);
97 goto out_unref; 98 goto out_unref;
98 } 99 }
99 100
100 ret = intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, obj); 101 fb = __intel_framebuffer_create(dev, &mode_cmd, obj);
101 if (ret) 102 if (IS_ERR(fb)) {
103 ret = PTR_ERR(fb);
102 goto out_unpin; 104 goto out_unpin;
105 }
106
107 ifbdev->fb = to_intel_framebuffer(fb);
103 108
104 return 0; 109 return 0;
105 110
106out_unpin: 111out_unpin:
107 i915_gem_object_unpin(obj); 112 i915_gem_object_ggtt_unpin(obj);
108out_unref: 113out_unref:
109 drm_gem_object_unreference(&obj->base); 114 drm_gem_object_unreference(&obj->base);
110out: 115out:
@@ -116,23 +121,26 @@ static int intelfb_create(struct drm_fb_helper *helper,
116{ 121{
117 struct intel_fbdev *ifbdev = 122 struct intel_fbdev *ifbdev =
118 container_of(helper, struct intel_fbdev, helper); 123 container_of(helper, struct intel_fbdev, helper);
119 struct intel_framebuffer *intel_fb = &ifbdev->ifb; 124 struct intel_framebuffer *intel_fb = ifbdev->fb;
120 struct drm_device *dev = helper->dev; 125 struct drm_device *dev = helper->dev;
121 struct drm_i915_private *dev_priv = dev->dev_private; 126 struct drm_i915_private *dev_priv = dev->dev_private;
122 struct fb_info *info; 127 struct fb_info *info;
123 struct drm_framebuffer *fb; 128 struct drm_framebuffer *fb;
124 struct drm_i915_gem_object *obj; 129 struct drm_i915_gem_object *obj;
125 int size, ret; 130 int size, ret;
131 bool prealloc = false;
126 132
127 mutex_lock(&dev->struct_mutex); 133 mutex_lock(&dev->struct_mutex);
128 134
129 if (!intel_fb->obj) { 135 if (!intel_fb || WARN_ON(!intel_fb->obj)) {
130 DRM_DEBUG_KMS("no BIOS fb, allocating a new one\n"); 136 DRM_DEBUG_KMS("no BIOS fb, allocating a new one\n");
131 ret = intelfb_alloc(helper, sizes); 137 ret = intelfb_alloc(helper, sizes);
132 if (ret) 138 if (ret)
133 goto out_unlock; 139 goto out_unlock;
140 intel_fb = ifbdev->fb;
134 } else { 141 } else {
135 DRM_DEBUG_KMS("re-using BIOS fb\n"); 142 DRM_DEBUG_KMS("re-using BIOS fb\n");
143 prealloc = true;
136 sizes->fb_width = intel_fb->base.width; 144 sizes->fb_width = intel_fb->base.width;
137 sizes->fb_height = intel_fb->base.height; 145 sizes->fb_height = intel_fb->base.height;
138 } 146 }
@@ -148,7 +156,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
148 156
149 info->par = helper; 157 info->par = helper;
150 158
151 fb = &ifbdev->ifb.base; 159 fb = &ifbdev->fb->base;
152 160
153 ifbdev->helper.fb = fb; 161 ifbdev->helper.fb = fb;
154 ifbdev->helper.fbdev = info; 162 ifbdev->helper.fbdev = info;
@@ -194,7 +202,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
194 * If the object is stolen however, it will be full of whatever 202 * If the object is stolen however, it will be full of whatever
195 * garbage was left in there. 203 * garbage was left in there.
196 */ 204 */
197 if (ifbdev->ifb.obj->stolen) 205 if (ifbdev->fb->obj->stolen && !prealloc)
198 memset_io(info->screen_base, 0, info->screen_size); 206 memset_io(info->screen_base, 0, info->screen_size);
199 207
200 /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */ 208 /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
@@ -208,7 +216,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
208 return 0; 216 return 0;
209 217
210out_unpin: 218out_unpin:
211 i915_gem_object_unpin(obj); 219 i915_gem_object_ggtt_unpin(obj);
212 drm_gem_object_unreference(&obj->base); 220 drm_gem_object_unreference(&obj->base);
213out_unlock: 221out_unlock:
214 mutex_unlock(&dev->struct_mutex); 222 mutex_unlock(&dev->struct_mutex);
@@ -236,7 +244,193 @@ static void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
236 *blue = intel_crtc->lut_b[regno] << 8; 244 *blue = intel_crtc->lut_b[regno] << 8;
237} 245}
238 246
247static struct drm_fb_helper_crtc *
248intel_fb_helper_crtc(struct drm_fb_helper *fb_helper, struct drm_crtc *crtc)
249{
250 int i;
251
252 for (i = 0; i < fb_helper->crtc_count; i++)
253 if (fb_helper->crtc_info[i].mode_set.crtc == crtc)
254 return &fb_helper->crtc_info[i];
255
256 return NULL;
257}
258
259/*
260 * Try to read the BIOS display configuration and use it for the initial
261 * fb configuration.
262 *
263 * The BIOS or boot loader will generally create an initial display
264 * configuration for us that includes some set of active pipes and displays.
265 * This routine tries to figure out which pipes and connectors are active
266 * and stuffs them into the crtcs and modes array given to us by the
267 * drm_fb_helper code.
268 *
269 * The overall sequence is:
270 * intel_fbdev_init - from driver load
271 * intel_fbdev_init_bios - initialize the intel_fbdev using BIOS data
272 * drm_fb_helper_init - build fb helper structs
273 * drm_fb_helper_single_add_all_connectors - more fb helper structs
274 * intel_fbdev_initial_config - apply the config
275 * drm_fb_helper_initial_config - call ->probe then register_framebuffer()
276 * drm_setup_crtcs - build crtc config for fbdev
277 * intel_fb_initial_config - find active connectors etc
278 * drm_fb_helper_single_fb_probe - set up fbdev
279 * intelfb_create - re-use or alloc fb, build out fbdev structs
280 *
281 * Note that we don't make special consideration whether we could actually
282 * switch to the selected modes without a full modeset. E.g. when the display
283 * is in VGA mode we need to recalculate watermarks and set a new high-res
284 * framebuffer anyway.
285 */
286static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
287 struct drm_fb_helper_crtc **crtcs,
288 struct drm_display_mode **modes,
289 bool *enabled, int width, int height)
290{
291 struct drm_device *dev = fb_helper->dev;
292 int i, j;
293 bool *save_enabled;
294 bool fallback = true;
295 int num_connectors_enabled = 0;
296 int num_connectors_detected = 0;
297
298 /*
299 * If the user specified any force options, just bail here
300 * and use that config.
301 */
302 for (i = 0; i < fb_helper->connector_count; i++) {
303 struct drm_fb_helper_connector *fb_conn;
304 struct drm_connector *connector;
305
306 fb_conn = fb_helper->connector_info[i];
307 connector = fb_conn->connector;
308
309 if (!enabled[i])
310 continue;
311
312 if (connector->force != DRM_FORCE_UNSPECIFIED)
313 return false;
314 }
315
316 save_enabled = kcalloc(dev->mode_config.num_connector, sizeof(bool),
317 GFP_KERNEL);
318 if (!save_enabled)
319 return false;
320
321 memcpy(save_enabled, enabled, dev->mode_config.num_connector);
322
323 for (i = 0; i < fb_helper->connector_count; i++) {
324 struct drm_fb_helper_connector *fb_conn;
325 struct drm_connector *connector;
326 struct drm_encoder *encoder;
327 struct drm_fb_helper_crtc *new_crtc;
328
329 fb_conn = fb_helper->connector_info[i];
330 connector = fb_conn->connector;
331
332 if (connector->status == connector_status_connected)
333 num_connectors_detected++;
334
335 if (!enabled[i]) {
336 DRM_DEBUG_KMS("connector %d not enabled, skipping\n",
337 connector->base.id);
338 continue;
339 }
340
341 encoder = connector->encoder;
342 if (!encoder || WARN_ON(!encoder->crtc)) {
343 DRM_DEBUG_KMS("connector %d has no encoder or crtc, skipping\n",
344 connector->base.id);
345 enabled[i] = false;
346 continue;
347 }
348
349 num_connectors_enabled++;
350
351 new_crtc = intel_fb_helper_crtc(fb_helper, encoder->crtc);
352
353 /*
354 * Make sure we're not trying to drive multiple connectors
355 * with a single CRTC, since our cloning support may not
356 * match the BIOS.
357 */
358 for (j = 0; j < fb_helper->connector_count; j++) {
359 if (crtcs[j] == new_crtc) {
360 DRM_DEBUG_KMS("fallback: cloned configuration\n");
361 fallback = true;
362 goto out;
363 }
364 }
365
366 DRM_DEBUG_KMS("looking for cmdline mode on connector %d\n",
367 fb_conn->connector->base.id);
368
369 /* go for command line mode first */
370 modes[i] = drm_pick_cmdline_mode(fb_conn, width, height);
371
372 /* try for preferred next */
373 if (!modes[i]) {
374 DRM_DEBUG_KMS("looking for preferred mode on connector %d\n",
375 fb_conn->connector->base.id);
376 modes[i] = drm_has_preferred_mode(fb_conn, width,
377 height);
378 }
379
380 /* last resort: use current mode */
381 if (!modes[i]) {
382 /*
383 * IMPORTANT: We want to use the adjusted mode (i.e.
384 * after the panel fitter upscaling) as the initial
385 * config, not the input mode, which is what crtc->mode
386 * usually contains. But since our current fastboot
387 * code puts a mode derived from the post-pfit timings
388 * into crtc->mode this works out correctly. We don't
389 * use hwmode anywhere right now, so use it for this
390 * since the fb helper layer wants a pointer to
391 * something we own.
392 */
393 intel_mode_from_pipe_config(&encoder->crtc->hwmode,
394 &to_intel_crtc(encoder->crtc)->config);
395 modes[i] = &encoder->crtc->hwmode;
396 }
397 crtcs[i] = new_crtc;
398
399 DRM_DEBUG_KMS("connector %s on crtc %d: %s\n",
400 drm_get_connector_name(connector),
401 encoder->crtc->base.id,
402 modes[i]->name);
403
404 fallback = false;
405 }
406
407 /*
408 * If the BIOS didn't enable everything it could, fall back to have the
409 * same user experiencing of lighting up as much as possible like the
410 * fbdev helper library.
411 */
412 if (num_connectors_enabled != num_connectors_detected &&
413 num_connectors_enabled < INTEL_INFO(dev)->num_pipes) {
414 DRM_DEBUG_KMS("fallback: Not all outputs enabled\n");
415 DRM_DEBUG_KMS("Enabled: %i, detected: %i\n", num_connectors_enabled,
416 num_connectors_detected);
417 fallback = true;
418 }
419
420out:
421 if (fallback) {
422 DRM_DEBUG_KMS("Not using firmware configuration\n");
423 memcpy(enabled, save_enabled, dev->mode_config.num_connector);
424 kfree(save_enabled);
425 return false;
426 }
427
428 kfree(save_enabled);
429 return true;
430}
431
239static struct drm_fb_helper_funcs intel_fb_helper_funcs = { 432static struct drm_fb_helper_funcs intel_fb_helper_funcs = {
433 .initial_config = intel_fb_initial_config,
240 .gamma_set = intel_crtc_fb_gamma_set, 434 .gamma_set = intel_crtc_fb_gamma_set,
241 .gamma_get = intel_crtc_fb_gamma_get, 435 .gamma_get = intel_crtc_fb_gamma_get,
242 .fb_probe = intelfb_create, 436 .fb_probe = intelfb_create,
@@ -258,8 +452,139 @@ static void intel_fbdev_destroy(struct drm_device *dev,
258 452
259 drm_fb_helper_fini(&ifbdev->helper); 453 drm_fb_helper_fini(&ifbdev->helper);
260 454
261 drm_framebuffer_unregister_private(&ifbdev->ifb.base); 455 drm_framebuffer_unregister_private(&ifbdev->fb->base);
262 intel_framebuffer_fini(&ifbdev->ifb); 456 drm_framebuffer_remove(&ifbdev->fb->base);
457}
458
459/*
460 * Build an intel_fbdev struct using a BIOS allocated framebuffer, if possible.
461 * The core display code will have read out the current plane configuration,
462 * so we use that to figure out if there's an object for us to use as the
463 * fb, and if so, we re-use it for the fbdev configuration.
464 *
465 * Note we only support a single fb shared across pipes for boot (mostly for
466 * fbcon), so we just find the biggest and use that.
467 */
468static bool intel_fbdev_init_bios(struct drm_device *dev,
469 struct intel_fbdev *ifbdev)
470{
471 struct intel_framebuffer *fb = NULL;
472 struct drm_crtc *crtc;
473 struct intel_crtc *intel_crtc;
474 struct intel_plane_config *plane_config = NULL;
475 unsigned int max_size = 0;
476
477 if (!i915.fastboot)
478 return false;
479
480 /* Find the largest fb */
481 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
482 intel_crtc = to_intel_crtc(crtc);
483
484 if (!intel_crtc->active || !crtc->primary->fb) {
485 DRM_DEBUG_KMS("pipe %c not active or no fb, skipping\n",
486 pipe_name(intel_crtc->pipe));
487 continue;
488 }
489
490 if (intel_crtc->plane_config.size > max_size) {
491 DRM_DEBUG_KMS("found possible fb from plane %c\n",
492 pipe_name(intel_crtc->pipe));
493 plane_config = &intel_crtc->plane_config;
494 fb = to_intel_framebuffer(crtc->primary->fb);
495 max_size = plane_config->size;
496 }
497 }
498
499 if (!fb) {
500 DRM_DEBUG_KMS("no active fbs found, not using BIOS config\n");
501 goto out;
502 }
503
504 /* Now make sure all the pipes will fit into it */
505 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
506 unsigned int cur_size;
507
508 intel_crtc = to_intel_crtc(crtc);
509
510 if (!intel_crtc->active) {
511 DRM_DEBUG_KMS("pipe %c not active, skipping\n",
512 pipe_name(intel_crtc->pipe));
513 continue;
514 }
515
516 DRM_DEBUG_KMS("checking plane %c for BIOS fb\n",
517 pipe_name(intel_crtc->pipe));
518
519 /*
520 * See if the plane fb we found above will fit on this
521 * pipe. Note we need to use the selected fb's pitch and bpp
522 * rather than the current pipe's, since they differ.
523 */
524 cur_size = intel_crtc->config.adjusted_mode.crtc_hdisplay;
525 cur_size = cur_size * fb->base.bits_per_pixel / 8;
526 if (fb->base.pitches[0] < cur_size) {
527 DRM_DEBUG_KMS("fb not wide enough for plane %c (%d vs %d)\n",
528 pipe_name(intel_crtc->pipe),
529 cur_size, fb->base.pitches[0]);
530 plane_config = NULL;
531 fb = NULL;
532 break;
533 }
534
535 cur_size = intel_crtc->config.adjusted_mode.crtc_vdisplay;
536 cur_size = ALIGN(cur_size, plane_config->tiled ? (IS_GEN2(dev) ? 16 : 8) : 1);
537 cur_size *= fb->base.pitches[0];
538 DRM_DEBUG_KMS("pipe %c area: %dx%d, bpp: %d, size: %d\n",
539 pipe_name(intel_crtc->pipe),
540 intel_crtc->config.adjusted_mode.crtc_hdisplay,
541 intel_crtc->config.adjusted_mode.crtc_vdisplay,
542 fb->base.bits_per_pixel,
543 cur_size);
544
545 if (cur_size > max_size) {
546 DRM_DEBUG_KMS("fb not big enough for plane %c (%d vs %d)\n",
547 pipe_name(intel_crtc->pipe),
548 cur_size, max_size);
549 plane_config = NULL;
550 fb = NULL;
551 break;
552 }
553
554 DRM_DEBUG_KMS("fb big enough for plane %c (%d >= %d)\n",
555 pipe_name(intel_crtc->pipe),
556 max_size, cur_size);
557 }
558
559 if (!fb) {
560 DRM_DEBUG_KMS("BIOS fb not suitable for all pipes, not using\n");
561 goto out;
562 }
563
564 ifbdev->preferred_bpp = fb->base.bits_per_pixel;
565 ifbdev->fb = fb;
566
567 drm_framebuffer_reference(&ifbdev->fb->base);
568
569 /* Final pass to check if any active pipes don't have fbs */
570 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
571 intel_crtc = to_intel_crtc(crtc);
572
573 if (!intel_crtc->active)
574 continue;
575
576 WARN(!crtc->primary->fb,
577 "re-used BIOS config but lost an fb on crtc %d\n",
578 crtc->base.id);
579 }
580
581
582 DRM_DEBUG_KMS("using BIOS fb for initial console\n");
583 return true;
584
585out:
586
587 return false;
263} 588}
264 589
265int intel_fbdev_init(struct drm_device *dev) 590int intel_fbdev_init(struct drm_device *dev)
@@ -268,21 +593,25 @@ int intel_fbdev_init(struct drm_device *dev)
268 struct drm_i915_private *dev_priv = dev->dev_private; 593 struct drm_i915_private *dev_priv = dev->dev_private;
269 int ret; 594 int ret;
270 595
271 ifbdev = kzalloc(sizeof(*ifbdev), GFP_KERNEL); 596 if (WARN_ON(INTEL_INFO(dev)->num_pipes == 0))
272 if (!ifbdev) 597 return -ENODEV;
598
599 ifbdev = kzalloc(sizeof(struct intel_fbdev), GFP_KERNEL);
600 if (ifbdev == NULL)
273 return -ENOMEM; 601 return -ENOMEM;
274 602
275 dev_priv->fbdev = ifbdev;
276 ifbdev->helper.funcs = &intel_fb_helper_funcs; 603 ifbdev->helper.funcs = &intel_fb_helper_funcs;
604 if (!intel_fbdev_init_bios(dev, ifbdev))
605 ifbdev->preferred_bpp = 32;
277 606
278 ret = drm_fb_helper_init(dev, &ifbdev->helper, 607 ret = drm_fb_helper_init(dev, &ifbdev->helper,
279 INTEL_INFO(dev)->num_pipes, 608 INTEL_INFO(dev)->num_pipes, 4);
280 4);
281 if (ret) { 609 if (ret) {
282 kfree(ifbdev); 610 kfree(ifbdev);
283 return ret; 611 return ret;
284 } 612 }
285 613
614 dev_priv->fbdev = ifbdev;
286 drm_fb_helper_single_add_all_connectors(&ifbdev->helper); 615 drm_fb_helper_single_add_all_connectors(&ifbdev->helper);
287 616
288 return 0; 617 return 0;
@@ -291,9 +620,10 @@ int intel_fbdev_init(struct drm_device *dev)
291void intel_fbdev_initial_config(struct drm_device *dev) 620void intel_fbdev_initial_config(struct drm_device *dev)
292{ 621{
293 struct drm_i915_private *dev_priv = dev->dev_private; 622 struct drm_i915_private *dev_priv = dev->dev_private;
623 struct intel_fbdev *ifbdev = dev_priv->fbdev;
294 624
295 /* Due to peculiar init order wrt to hpd handling this is separate. */ 625 /* Due to peculiar init order wrt to hpd handling this is separate. */
296 drm_fb_helper_initial_config(&dev_priv->fbdev->helper, 32); 626 drm_fb_helper_initial_config(&ifbdev->helper, ifbdev->preferred_bpp);
297} 627}
298 628
299void intel_fbdev_fini(struct drm_device *dev) 629void intel_fbdev_fini(struct drm_device *dev)
@@ -322,7 +652,7 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state)
322 * been restored from swap. If the object is stolen however, it will be 652 * been restored from swap. If the object is stolen however, it will be
323 * full of whatever garbage was left in there. 653 * full of whatever garbage was left in there.
324 */ 654 */
325 if (state == FBINFO_STATE_RUNNING && ifbdev->ifb.obj->stolen) 655 if (state == FBINFO_STATE_RUNNING && ifbdev->fb->obj->stolen)
326 memset_io(info->screen_base, 0, info->screen_size); 656 memset_io(info->screen_base, 0, info->screen_size);
327 657
328 fb_set_suspend(info, state); 658 fb_set_suspend(info, state);
@@ -331,7 +661,8 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state)
331void intel_fbdev_output_poll_changed(struct drm_device *dev) 661void intel_fbdev_output_poll_changed(struct drm_device *dev)
332{ 662{
333 struct drm_i915_private *dev_priv = dev->dev_private; 663 struct drm_i915_private *dev_priv = dev->dev_private;
334 drm_fb_helper_hotplug_event(&dev_priv->fbdev->helper); 664 if (dev_priv->fbdev)
665 drm_fb_helper_hotplug_event(&dev_priv->fbdev->helper);
335} 666}
336 667
337void intel_fbdev_restore_mode(struct drm_device *dev) 668void intel_fbdev_restore_mode(struct drm_device *dev)
@@ -339,7 +670,7 @@ void intel_fbdev_restore_mode(struct drm_device *dev)
339 int ret; 670 int ret;
340 struct drm_i915_private *dev_priv = dev->dev_private; 671 struct drm_i915_private *dev_priv = dev->dev_private;
341 672
342 if (INTEL_INFO(dev)->num_pipes == 0) 673 if (!dev_priv->fbdev)
343 return; 674 return;
344 675
345 drm_modeset_lock_all(dev); 676 drm_modeset_lock_all(dev);
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index ee3181ebcc92..b0413e190625 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -113,7 +113,8 @@ static u32 hsw_infoframe_enable(enum hdmi_infoframe_type type)
113} 113}
114 114
115static u32 hsw_infoframe_data_reg(enum hdmi_infoframe_type type, 115static u32 hsw_infoframe_data_reg(enum hdmi_infoframe_type type,
116 enum transcoder cpu_transcoder) 116 enum transcoder cpu_transcoder,
117 struct drm_i915_private *dev_priv)
117{ 118{
118 switch (type) { 119 switch (type) {
119 case HDMI_INFOFRAME_TYPE_AVI: 120 case HDMI_INFOFRAME_TYPE_AVI:
@@ -296,7 +297,8 @@ static void hsw_write_infoframe(struct drm_encoder *encoder,
296 u32 val = I915_READ(ctl_reg); 297 u32 val = I915_READ(ctl_reg);
297 298
298 data_reg = hsw_infoframe_data_reg(type, 299 data_reg = hsw_infoframe_data_reg(type,
299 intel_crtc->config.cpu_transcoder); 300 intel_crtc->config.cpu_transcoder,
301 dev_priv);
300 if (data_reg == 0) 302 if (data_reg == 0)
301 return; 303 return;
302 304
@@ -423,7 +425,7 @@ static void g4x_set_infoframes(struct drm_encoder *encoder,
423 struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi; 425 struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
424 u32 reg = VIDEO_DIP_CTL; 426 u32 reg = VIDEO_DIP_CTL;
425 u32 val = I915_READ(reg); 427 u32 val = I915_READ(reg);
426 u32 port; 428 u32 port = VIDEO_DIP_PORT(intel_dig_port->port);
427 429
428 assert_hdmi_port_disabled(intel_hdmi); 430 assert_hdmi_port_disabled(intel_hdmi);
429 431
@@ -447,18 +449,6 @@ static void g4x_set_infoframes(struct drm_encoder *encoder,
447 return; 449 return;
448 } 450 }
449 451
450 switch (intel_dig_port->port) {
451 case PORT_B:
452 port = VIDEO_DIP_PORT_B;
453 break;
454 case PORT_C:
455 port = VIDEO_DIP_PORT_C;
456 break;
457 default:
458 BUG();
459 return;
460 }
461
462 if (port != (val & VIDEO_DIP_PORT_MASK)) { 452 if (port != (val & VIDEO_DIP_PORT_MASK)) {
463 if (val & VIDEO_DIP_ENABLE) { 453 if (val & VIDEO_DIP_ENABLE) {
464 val &= ~VIDEO_DIP_ENABLE; 454 val &= ~VIDEO_DIP_ENABLE;
@@ -489,7 +479,7 @@ static void ibx_set_infoframes(struct drm_encoder *encoder,
489 struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi; 479 struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
490 u32 reg = TVIDEO_DIP_CTL(intel_crtc->pipe); 480 u32 reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
491 u32 val = I915_READ(reg); 481 u32 val = I915_READ(reg);
492 u32 port; 482 u32 port = VIDEO_DIP_PORT(intel_dig_port->port);
493 483
494 assert_hdmi_port_disabled(intel_hdmi); 484 assert_hdmi_port_disabled(intel_hdmi);
495 485
@@ -505,21 +495,6 @@ static void ibx_set_infoframes(struct drm_encoder *encoder,
505 return; 495 return;
506 } 496 }
507 497
508 switch (intel_dig_port->port) {
509 case PORT_B:
510 port = VIDEO_DIP_PORT_B;
511 break;
512 case PORT_C:
513 port = VIDEO_DIP_PORT_C;
514 break;
515 case PORT_D:
516 port = VIDEO_DIP_PORT_D;
517 break;
518 default:
519 BUG();
520 return;
521 }
522
523 if (port != (val & VIDEO_DIP_PORT_MASK)) { 498 if (port != (val & VIDEO_DIP_PORT_MASK)) {
524 if (val & VIDEO_DIP_ENABLE) { 499 if (val & VIDEO_DIP_ENABLE) {
525 val &= ~VIDEO_DIP_ENABLE; 500 val &= ~VIDEO_DIP_ENABLE;
@@ -692,8 +667,13 @@ static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
692 struct drm_device *dev = encoder->base.dev; 667 struct drm_device *dev = encoder->base.dev;
693 struct drm_i915_private *dev_priv = dev->dev_private; 668 struct drm_i915_private *dev_priv = dev->dev_private;
694 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); 669 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
670 enum intel_display_power_domain power_domain;
695 u32 tmp; 671 u32 tmp;
696 672
673 power_domain = intel_display_port_power_domain(encoder);
674 if (!intel_display_power_enabled(dev_priv, power_domain))
675 return false;
676
697 tmp = I915_READ(intel_hdmi->hdmi_reg); 677 tmp = I915_READ(intel_hdmi->hdmi_reg);
698 678
699 if (!(tmp & SDVO_ENABLE)) 679 if (!(tmp & SDVO_ENABLE))
@@ -868,6 +848,30 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
868 return MODE_OK; 848 return MODE_OK;
869} 849}
870 850
851static bool hdmi_12bpc_possible(struct intel_crtc *crtc)
852{
853 struct drm_device *dev = crtc->base.dev;
854 struct intel_encoder *encoder;
855 int count = 0, count_hdmi = 0;
856
857 if (!HAS_PCH_SPLIT(dev))
858 return false;
859
860 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
861 if (encoder->new_crtc != crtc)
862 continue;
863
864 count_hdmi += encoder->type == INTEL_OUTPUT_HDMI;
865 count++;
866 }
867
868 /*
869 * HDMI 12bpc affects the clocks, so it's only possible
870 * when not cloning with other encoder types.
871 */
872 return count_hdmi > 0 && count_hdmi == count;
873}
874
871bool intel_hdmi_compute_config(struct intel_encoder *encoder, 875bool intel_hdmi_compute_config(struct intel_encoder *encoder,
872 struct intel_crtc_config *pipe_config) 876 struct intel_crtc_config *pipe_config)
873{ 877{
@@ -900,7 +904,8 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
900 * within limits. 904 * within limits.
901 */ 905 */
902 if (pipe_config->pipe_bpp > 8*3 && intel_hdmi->has_hdmi_sink && 906 if (pipe_config->pipe_bpp > 8*3 && intel_hdmi->has_hdmi_sink &&
903 clock_12bpc <= portclock_limit && HAS_PCH_SPLIT(dev)) { 907 clock_12bpc <= portclock_limit &&
908 hdmi_12bpc_possible(encoder->new_crtc)) {
904 DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n"); 909 DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n");
905 desired_bpp = 12*3; 910 desired_bpp = 12*3;
906 911
@@ -934,11 +939,15 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
934 struct intel_encoder *intel_encoder = &intel_dig_port->base; 939 struct intel_encoder *intel_encoder = &intel_dig_port->base;
935 struct drm_i915_private *dev_priv = dev->dev_private; 940 struct drm_i915_private *dev_priv = dev->dev_private;
936 struct edid *edid; 941 struct edid *edid;
942 enum intel_display_power_domain power_domain;
937 enum drm_connector_status status = connector_status_disconnected; 943 enum drm_connector_status status = connector_status_disconnected;
938 944
939 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", 945 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
940 connector->base.id, drm_get_connector_name(connector)); 946 connector->base.id, drm_get_connector_name(connector));
941 947
948 power_domain = intel_display_port_power_domain(intel_encoder);
949 intel_display_power_get(dev_priv, power_domain);
950
942 intel_hdmi->has_hdmi_sink = false; 951 intel_hdmi->has_hdmi_sink = false;
943 intel_hdmi->has_audio = false; 952 intel_hdmi->has_audio = false;
944 intel_hdmi->rgb_quant_range_selectable = false; 953 intel_hdmi->rgb_quant_range_selectable = false;
@@ -966,31 +975,48 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
966 intel_encoder->type = INTEL_OUTPUT_HDMI; 975 intel_encoder->type = INTEL_OUTPUT_HDMI;
967 } 976 }
968 977
978 intel_display_power_put(dev_priv, power_domain);
979
969 return status; 980 return status;
970} 981}
971 982
972static int intel_hdmi_get_modes(struct drm_connector *connector) 983static int intel_hdmi_get_modes(struct drm_connector *connector)
973{ 984{
974 struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); 985 struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
986 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
975 struct drm_i915_private *dev_priv = connector->dev->dev_private; 987 struct drm_i915_private *dev_priv = connector->dev->dev_private;
988 enum intel_display_power_domain power_domain;
989 int ret;
976 990
977 /* We should parse the EDID data and find out if it's an HDMI sink so 991 /* We should parse the EDID data and find out if it's an HDMI sink so
978 * we can send audio to it. 992 * we can send audio to it.
979 */ 993 */
980 994
981 return intel_ddc_get_modes(connector, 995 power_domain = intel_display_port_power_domain(intel_encoder);
996 intel_display_power_get(dev_priv, power_domain);
997
998 ret = intel_ddc_get_modes(connector,
982 intel_gmbus_get_adapter(dev_priv, 999 intel_gmbus_get_adapter(dev_priv,
983 intel_hdmi->ddc_bus)); 1000 intel_hdmi->ddc_bus));
1001
1002 intel_display_power_put(dev_priv, power_domain);
1003
1004 return ret;
984} 1005}
985 1006
986static bool 1007static bool
987intel_hdmi_detect_audio(struct drm_connector *connector) 1008intel_hdmi_detect_audio(struct drm_connector *connector)
988{ 1009{
989 struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); 1010 struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
1011 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
990 struct drm_i915_private *dev_priv = connector->dev->dev_private; 1012 struct drm_i915_private *dev_priv = connector->dev->dev_private;
1013 enum intel_display_power_domain power_domain;
991 struct edid *edid; 1014 struct edid *edid;
992 bool has_audio = false; 1015 bool has_audio = false;
993 1016
1017 power_domain = intel_display_port_power_domain(intel_encoder);
1018 intel_display_power_get(dev_priv, power_domain);
1019
994 edid = drm_get_edid(connector, 1020 edid = drm_get_edid(connector,
995 intel_gmbus_get_adapter(dev_priv, 1021 intel_gmbus_get_adapter(dev_priv,
996 intel_hdmi->ddc_bus)); 1022 intel_hdmi->ddc_bus));
@@ -1000,6 +1026,8 @@ intel_hdmi_detect_audio(struct drm_connector *connector)
1000 kfree(edid); 1026 kfree(edid);
1001 } 1027 }
1002 1028
1029 intel_display_power_put(dev_priv, power_domain);
1030
1003 return has_audio; 1031 return has_audio;
1004} 1032}
1005 1033
@@ -1261,6 +1289,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
1261 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state; 1289 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
1262 else 1290 else
1263 intel_connector->get_hw_state = intel_connector_get_hw_state; 1291 intel_connector->get_hw_state = intel_connector_get_hw_state;
1292 intel_connector->unregister = intel_connector_unregister;
1264 1293
1265 intel_hdmi_add_properties(intel_hdmi, connector); 1294 intel_hdmi_add_properties(intel_hdmi, connector);
1266 1295
@@ -1314,7 +1343,14 @@ void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port)
1314 1343
1315 intel_encoder->type = INTEL_OUTPUT_HDMI; 1344 intel_encoder->type = INTEL_OUTPUT_HDMI;
1316 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); 1345 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
1317 intel_encoder->cloneable = false; 1346 intel_encoder->cloneable = 1 << INTEL_OUTPUT_ANALOG;
1347 /*
1348 * BSpec is unclear about HDMI+HDMI cloning on g4x, but it seems
1349 * to work on real hardware. And since g4x can send infoframes to
1350 * only one port anyway, nothing is lost by allowing it.
1351 */
1352 if (IS_G4X(dev))
1353 intel_encoder->cloneable |= 1 << INTEL_OUTPUT_HDMI;
1318 1354
1319 intel_dig_port->port = port; 1355 intel_dig_port->port = port;
1320 intel_dig_port->hdmi.hdmi_reg = hdmi_reg; 1356 intel_dig_port->hdmi.hdmi_reg = hdmi_reg;
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 8bcb93a2a9f6..f1ecf916474a 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -848,8 +848,8 @@ static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
848 struct drm_i915_private *dev_priv = dev->dev_private; 848 struct drm_i915_private *dev_priv = dev->dev_private;
849 849
850 /* use the module option value if specified */ 850 /* use the module option value if specified */
851 if (i915_lvds_channel_mode > 0) 851 if (i915.lvds_channel_mode > 0)
852 return i915_lvds_channel_mode == 2; 852 return i915.lvds_channel_mode == 2;
853 853
854 if (dmi_check_system(intel_dual_link_lvds)) 854 if (dmi_check_system(intel_dual_link_lvds))
855 return true; 855 return true;
@@ -899,6 +899,7 @@ void intel_lvds_init(struct drm_device *dev)
899 struct drm_encoder *encoder; 899 struct drm_encoder *encoder;
900 struct drm_display_mode *scan; /* *modes, *bios_mode; */ 900 struct drm_display_mode *scan; /* *modes, *bios_mode; */
901 struct drm_display_mode *fixed_mode = NULL; 901 struct drm_display_mode *fixed_mode = NULL;
902 struct drm_display_mode *downclock_mode = NULL;
902 struct edid *edid; 903 struct edid *edid;
903 struct drm_crtc *crtc; 904 struct drm_crtc *crtc;
904 u32 lvds; 905 u32 lvds;
@@ -957,11 +958,12 @@ void intel_lvds_init(struct drm_device *dev)
957 intel_encoder->get_hw_state = intel_lvds_get_hw_state; 958 intel_encoder->get_hw_state = intel_lvds_get_hw_state;
958 intel_encoder->get_config = intel_lvds_get_config; 959 intel_encoder->get_config = intel_lvds_get_config;
959 intel_connector->get_hw_state = intel_connector_get_hw_state; 960 intel_connector->get_hw_state = intel_connector_get_hw_state;
961 intel_connector->unregister = intel_connector_unregister;
960 962
961 intel_connector_attach_encoder(intel_connector, intel_encoder); 963 intel_connector_attach_encoder(intel_connector, intel_encoder);
962 intel_encoder->type = INTEL_OUTPUT_LVDS; 964 intel_encoder->type = INTEL_OUTPUT_LVDS;
963 965
964 intel_encoder->cloneable = false; 966 intel_encoder->cloneable = 0;
965 if (HAS_PCH_SPLIT(dev)) 967 if (HAS_PCH_SPLIT(dev))
966 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); 968 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
967 else if (IS_GEN4(dev)) 969 else if (IS_GEN4(dev))
@@ -1000,6 +1002,7 @@ void intel_lvds_init(struct drm_device *dev)
1000 * Attempt to get the fixed panel mode from DDC. Assume that the 1002 * Attempt to get the fixed panel mode from DDC. Assume that the
1001 * preferred mode is the right one. 1003 * preferred mode is the right one.
1002 */ 1004 */
1005 mutex_lock(&dev->mode_config.mutex);
1003 edid = drm_get_edid(connector, intel_gmbus_get_adapter(dev_priv, pin)); 1006 edid = drm_get_edid(connector, intel_gmbus_get_adapter(dev_priv, pin));
1004 if (edid) { 1007 if (edid) {
1005 if (drm_add_edid_modes(connector, edid)) { 1008 if (drm_add_edid_modes(connector, edid)) {
@@ -1032,15 +1035,14 @@ void intel_lvds_init(struct drm_device *dev)
1032 1035
1033 fixed_mode = drm_mode_duplicate(dev, scan); 1036 fixed_mode = drm_mode_duplicate(dev, scan);
1034 if (fixed_mode) { 1037 if (fixed_mode) {
1035 intel_connector->panel.downclock_mode = 1038 downclock_mode =
1036 intel_find_panel_downclock(dev, 1039 intel_find_panel_downclock(dev,
1037 fixed_mode, connector); 1040 fixed_mode, connector);
1038 if (intel_connector->panel.downclock_mode != 1041 if (downclock_mode != NULL &&
1039 NULL && i915_lvds_downclock) { 1042 i915.lvds_downclock) {
1040 /* We found the downclock for LVDS. */ 1043 /* We found the downclock for LVDS. */
1041 dev_priv->lvds_downclock_avail = true; 1044 dev_priv->lvds_downclock_avail = true;
1042 dev_priv->lvds_downclock = 1045 dev_priv->lvds_downclock =
1043 intel_connector->panel.
1044 downclock_mode->clock; 1046 downclock_mode->clock;
1045 DRM_DEBUG_KMS("LVDS downclock is found" 1047 DRM_DEBUG_KMS("LVDS downclock is found"
1046 " in EDID. Normal clock %dKhz, " 1048 " in EDID. Normal clock %dKhz, "
@@ -1094,6 +1096,8 @@ void intel_lvds_init(struct drm_device *dev)
1094 goto failed; 1096 goto failed;
1095 1097
1096out: 1098out:
1099 mutex_unlock(&dev->mode_config.mutex);
1100
1097 lvds_encoder->is_dual_link = compute_is_dual_link_lvds(lvds_encoder); 1101 lvds_encoder->is_dual_link = compute_is_dual_link_lvds(lvds_encoder);
1098 DRM_DEBUG_KMS("detected %s-link lvds configuration\n", 1102 DRM_DEBUG_KMS("detected %s-link lvds configuration\n",
1099 lvds_encoder->is_dual_link ? "dual" : "single"); 1103 lvds_encoder->is_dual_link ? "dual" : "single");
@@ -1116,17 +1120,17 @@ out:
1116 } 1120 }
1117 drm_sysfs_connector_add(connector); 1121 drm_sysfs_connector_add(connector);
1118 1122
1119 intel_panel_init(&intel_connector->panel, fixed_mode); 1123 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
1120 intel_panel_setup_backlight(connector); 1124 intel_panel_setup_backlight(connector);
1121 1125
1122 return; 1126 return;
1123 1127
1124failed: 1128failed:
1129 mutex_unlock(&dev->mode_config.mutex);
1130
1125 DRM_DEBUG_KMS("No LVDS modes found, disabling.\n"); 1131 DRM_DEBUG_KMS("No LVDS modes found, disabling.\n");
1126 drm_connector_cleanup(connector); 1132 drm_connector_cleanup(connector);
1127 drm_encoder_cleanup(encoder); 1133 drm_encoder_cleanup(encoder);
1128 if (fixed_mode)
1129 drm_mode_destroy(dev, fixed_mode);
1130 kfree(lvds_encoder); 1134 kfree(lvds_encoder);
1131 kfree(lvds_connector); 1135 kfree(lvds_connector);
1132 return; 1136 return;
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index a759ecdb7a6e..d8adc9104dca 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -189,7 +189,7 @@ struct intel_overlay {
189static struct overlay_registers __iomem * 189static struct overlay_registers __iomem *
190intel_overlay_map_regs(struct intel_overlay *overlay) 190intel_overlay_map_regs(struct intel_overlay *overlay)
191{ 191{
192 drm_i915_private_t *dev_priv = overlay->dev->dev_private; 192 struct drm_i915_private *dev_priv = overlay->dev->dev_private;
193 struct overlay_registers __iomem *regs; 193 struct overlay_registers __iomem *regs;
194 194
195 if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) 195 if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
@@ -212,7 +212,7 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
212 void (*tail)(struct intel_overlay *)) 212 void (*tail)(struct intel_overlay *))
213{ 213{
214 struct drm_device *dev = overlay->dev; 214 struct drm_device *dev = overlay->dev;
215 drm_i915_private_t *dev_priv = dev->dev_private; 215 struct drm_i915_private *dev_priv = dev->dev_private;
216 struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; 216 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
217 int ret; 217 int ret;
218 218
@@ -262,7 +262,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
262 bool load_polyphase_filter) 262 bool load_polyphase_filter)
263{ 263{
264 struct drm_device *dev = overlay->dev; 264 struct drm_device *dev = overlay->dev;
265 drm_i915_private_t *dev_priv = dev->dev_private; 265 struct drm_i915_private *dev_priv = dev->dev_private;
266 struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; 266 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
267 u32 flip_addr = overlay->flip_addr; 267 u32 flip_addr = overlay->flip_addr;
268 u32 tmp; 268 u32 tmp;
@@ -293,7 +293,7 @@ static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay)
293{ 293{
294 struct drm_i915_gem_object *obj = overlay->old_vid_bo; 294 struct drm_i915_gem_object *obj = overlay->old_vid_bo;
295 295
296 i915_gem_object_unpin(obj); 296 i915_gem_object_ggtt_unpin(obj);
297 drm_gem_object_unreference(&obj->base); 297 drm_gem_object_unreference(&obj->base);
298 298
299 overlay->old_vid_bo = NULL; 299 overlay->old_vid_bo = NULL;
@@ -306,7 +306,7 @@ static void intel_overlay_off_tail(struct intel_overlay *overlay)
306 /* never have the overlay hw on without showing a frame */ 306 /* never have the overlay hw on without showing a frame */
307 BUG_ON(!overlay->vid_bo); 307 BUG_ON(!overlay->vid_bo);
308 308
309 i915_gem_object_unpin(obj); 309 i915_gem_object_ggtt_unpin(obj);
310 drm_gem_object_unreference(&obj->base); 310 drm_gem_object_unreference(&obj->base);
311 overlay->vid_bo = NULL; 311 overlay->vid_bo = NULL;
312 312
@@ -362,7 +362,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
362static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay) 362static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
363{ 363{
364 struct drm_device *dev = overlay->dev; 364 struct drm_device *dev = overlay->dev;
365 drm_i915_private_t *dev_priv = dev->dev_private; 365 struct drm_i915_private *dev_priv = dev->dev_private;
366 struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; 366 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
367 int ret; 367 int ret;
368 368
@@ -388,7 +388,7 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
388static int intel_overlay_release_old_vid(struct intel_overlay *overlay) 388static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
389{ 389{
390 struct drm_device *dev = overlay->dev; 390 struct drm_device *dev = overlay->dev;
391 drm_i915_private_t *dev_priv = dev->dev_private; 391 struct drm_i915_private *dev_priv = dev->dev_private;
392 struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; 392 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
393 int ret; 393 int ret;
394 394
@@ -606,14 +606,14 @@ static void update_colorkey(struct intel_overlay *overlay,
606{ 606{
607 u32 key = overlay->color_key; 607 u32 key = overlay->color_key;
608 608
609 switch (overlay->crtc->base.fb->bits_per_pixel) { 609 switch (overlay->crtc->base.primary->fb->bits_per_pixel) {
610 case 8: 610 case 8:
611 iowrite32(0, &regs->DCLRKV); 611 iowrite32(0, &regs->DCLRKV);
612 iowrite32(CLK_RGB8I_MASK | DST_KEY_ENABLE, &regs->DCLRKM); 612 iowrite32(CLK_RGB8I_MASK | DST_KEY_ENABLE, &regs->DCLRKM);
613 break; 613 break;
614 614
615 case 16: 615 case 16:
616 if (overlay->crtc->base.fb->depth == 15) { 616 if (overlay->crtc->base.primary->fb->depth == 15) {
617 iowrite32(RGB15_TO_COLORKEY(key), &regs->DCLRKV); 617 iowrite32(RGB15_TO_COLORKEY(key), &regs->DCLRKV);
618 iowrite32(CLK_RGB15_MASK | DST_KEY_ENABLE, 618 iowrite32(CLK_RGB15_MASK | DST_KEY_ENABLE,
619 &regs->DCLRKM); 619 &regs->DCLRKM);
@@ -782,7 +782,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
782 return 0; 782 return 0;
783 783
784out_unpin: 784out_unpin:
785 i915_gem_object_unpin(new_bo); 785 i915_gem_object_ggtt_unpin(new_bo);
786 return ret; 786 return ret;
787} 787}
788 788
@@ -834,7 +834,7 @@ static int check_overlay_possible_on_crtc(struct intel_overlay *overlay,
834static void update_pfit_vscale_ratio(struct intel_overlay *overlay) 834static void update_pfit_vscale_ratio(struct intel_overlay *overlay)
835{ 835{
836 struct drm_device *dev = overlay->dev; 836 struct drm_device *dev = overlay->dev;
837 drm_i915_private_t *dev_priv = dev->dev_private; 837 struct drm_i915_private *dev_priv = dev->dev_private;
838 u32 pfit_control = I915_READ(PFIT_CONTROL); 838 u32 pfit_control = I915_READ(PFIT_CONTROL);
839 u32 ratio; 839 u32 ratio;
840 840
@@ -1026,7 +1026,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
1026 struct drm_file *file_priv) 1026 struct drm_file *file_priv)
1027{ 1027{
1028 struct drm_intel_overlay_put_image *put_image_rec = data; 1028 struct drm_intel_overlay_put_image *put_image_rec = data;
1029 drm_i915_private_t *dev_priv = dev->dev_private; 1029 struct drm_i915_private *dev_priv = dev->dev_private;
1030 struct intel_overlay *overlay; 1030 struct intel_overlay *overlay;
1031 struct drm_mode_object *drmmode_obj; 1031 struct drm_mode_object *drmmode_obj;
1032 struct intel_crtc *crtc; 1032 struct intel_crtc *crtc;
@@ -1076,7 +1076,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
1076 mutex_lock(&dev->struct_mutex); 1076 mutex_lock(&dev->struct_mutex);
1077 1077
1078 if (new_bo->tiling_mode) { 1078 if (new_bo->tiling_mode) {
1079 DRM_ERROR("buffer used for overlay image can not be tiled\n"); 1079 DRM_DEBUG_KMS("buffer used for overlay image can not be tiled\n");
1080 ret = -EINVAL; 1080 ret = -EINVAL;
1081 goto out_unlock; 1081 goto out_unlock;
1082 } 1082 }
@@ -1226,7 +1226,7 @@ int intel_overlay_attrs(struct drm_device *dev, void *data,
1226 struct drm_file *file_priv) 1226 struct drm_file *file_priv)
1227{ 1227{
1228 struct drm_intel_overlay_attrs *attrs = data; 1228 struct drm_intel_overlay_attrs *attrs = data;
1229 drm_i915_private_t *dev_priv = dev->dev_private; 1229 struct drm_i915_private *dev_priv = dev->dev_private;
1230 struct intel_overlay *overlay; 1230 struct intel_overlay *overlay;
1231 struct overlay_registers __iomem *regs; 1231 struct overlay_registers __iomem *regs;
1232 int ret; 1232 int ret;
@@ -1311,7 +1311,7 @@ out_unlock:
1311 1311
1312void intel_setup_overlay(struct drm_device *dev) 1312void intel_setup_overlay(struct drm_device *dev)
1313{ 1313{
1314 drm_i915_private_t *dev_priv = dev->dev_private; 1314 struct drm_i915_private *dev_priv = dev->dev_private;
1315 struct intel_overlay *overlay; 1315 struct intel_overlay *overlay;
1316 struct drm_i915_gem_object *reg_bo; 1316 struct drm_i915_gem_object *reg_bo;
1317 struct overlay_registers __iomem *regs; 1317 struct overlay_registers __iomem *regs;
@@ -1349,7 +1349,7 @@ void intel_setup_overlay(struct drm_device *dev)
1349 } 1349 }
1350 overlay->flip_addr = reg_bo->phys_obj->handle->busaddr; 1350 overlay->flip_addr = reg_bo->phys_obj->handle->busaddr;
1351 } else { 1351 } else {
1352 ret = i915_gem_obj_ggtt_pin(reg_bo, PAGE_SIZE, true, false); 1352 ret = i915_gem_obj_ggtt_pin(reg_bo, PAGE_SIZE, PIN_MAPPABLE);
1353 if (ret) { 1353 if (ret) {
1354 DRM_ERROR("failed to pin overlay register bo\n"); 1354 DRM_ERROR("failed to pin overlay register bo\n");
1355 goto out_free_bo; 1355 goto out_free_bo;
@@ -1386,7 +1386,7 @@ void intel_setup_overlay(struct drm_device *dev)
1386 1386
1387out_unpin_bo: 1387out_unpin_bo:
1388 if (!OVERLAY_NEEDS_PHYSICAL(dev)) 1388 if (!OVERLAY_NEEDS_PHYSICAL(dev))
1389 i915_gem_object_unpin(reg_bo); 1389 i915_gem_object_ggtt_unpin(reg_bo);
1390out_free_bo: 1390out_free_bo:
1391 drm_gem_object_unreference(&reg_bo->base); 1391 drm_gem_object_unreference(&reg_bo->base);
1392out_free: 1392out_free:
@@ -1397,7 +1397,7 @@ out_free:
1397 1397
1398void intel_cleanup_overlay(struct drm_device *dev) 1398void intel_cleanup_overlay(struct drm_device *dev)
1399{ 1399{
1400 drm_i915_private_t *dev_priv = dev->dev_private; 1400 struct drm_i915_private *dev_priv = dev->dev_private;
1401 1401
1402 if (!dev_priv->overlay) 1402 if (!dev_priv->overlay)
1403 return; 1403 return;
@@ -1421,7 +1421,7 @@ struct intel_overlay_error_state {
1421static struct overlay_registers __iomem * 1421static struct overlay_registers __iomem *
1422intel_overlay_map_regs_atomic(struct intel_overlay *overlay) 1422intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
1423{ 1423{
1424 drm_i915_private_t *dev_priv = overlay->dev->dev_private; 1424 struct drm_i915_private *dev_priv = overlay->dev->dev_private;
1425 struct overlay_registers __iomem *regs; 1425 struct overlay_registers __iomem *regs;
1426 1426
1427 if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) 1427 if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
@@ -1447,7 +1447,7 @@ static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay,
1447struct intel_overlay_error_state * 1447struct intel_overlay_error_state *
1448intel_overlay_capture_error_state(struct drm_device *dev) 1448intel_overlay_capture_error_state(struct drm_device *dev)
1449{ 1449{
1450 drm_i915_private_t *dev_priv = dev->dev_private; 1450 struct drm_i915_private *dev_priv = dev->dev_private;
1451 struct intel_overlay *overlay = dev_priv->overlay; 1451 struct intel_overlay *overlay = dev_priv->overlay;
1452 struct intel_overlay_error_state *error; 1452 struct intel_overlay_error_state *error;
1453 struct overlay_registers __iomem *regs; 1453 struct overlay_registers __iomem *regs;
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 079ea38f14d9..cb058408c70e 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -33,8 +33,6 @@
33#include <linux/moduleparam.h> 33#include <linux/moduleparam.h>
34#include "intel_drv.h" 34#include "intel_drv.h"
35 35
36#define PCI_LBPC 0xf4 /* legacy/combination backlight modes */
37
38void 36void
39intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode, 37intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
40 struct drm_display_mode *adjusted_mode) 38 struct drm_display_mode *adjusted_mode)
@@ -325,13 +323,6 @@ out:
325 pipe_config->gmch_pfit.lvds_border_bits = border; 323 pipe_config->gmch_pfit.lvds_border_bits = border;
326} 324}
327 325
328static int i915_panel_invert_brightness;
329MODULE_PARM_DESC(invert_brightness, "Invert backlight brightness "
330 "(-1 force normal, 0 machine defaults, 1 force inversion), please "
331 "report PCI device ID, subsystem vendor and subsystem device ID "
332 "to dri-devel@lists.freedesktop.org, if your machine needs it. "
333 "It will then be included in an upcoming module version.");
334module_param_named(invert_brightness, i915_panel_invert_brightness, int, 0600);
335static u32 intel_panel_compute_brightness(struct intel_connector *connector, 326static u32 intel_panel_compute_brightness(struct intel_connector *connector,
336 u32 val) 327 u32 val)
337{ 328{
@@ -341,10 +332,10 @@ static u32 intel_panel_compute_brightness(struct intel_connector *connector,
341 332
342 WARN_ON(panel->backlight.max == 0); 333 WARN_ON(panel->backlight.max == 0);
343 334
344 if (i915_panel_invert_brightness < 0) 335 if (i915.invert_brightness < 0)
345 return val; 336 return val;
346 337
347 if (i915_panel_invert_brightness > 0 || 338 if (i915.invert_brightness > 0 ||
348 dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) { 339 dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) {
349 return panel->backlight.max - val; 340 return panel->backlight.max - val;
350 } 341 }
@@ -810,13 +801,13 @@ intel_panel_detect(struct drm_device *dev)
810 struct drm_i915_private *dev_priv = dev->dev_private; 801 struct drm_i915_private *dev_priv = dev->dev_private;
811 802
812 /* Assume that the BIOS does not lie through the OpRegion... */ 803 /* Assume that the BIOS does not lie through the OpRegion... */
813 if (!i915_panel_ignore_lid && dev_priv->opregion.lid_state) { 804 if (!i915.panel_ignore_lid && dev_priv->opregion.lid_state) {
814 return ioread32(dev_priv->opregion.lid_state) & 0x1 ? 805 return ioread32(dev_priv->opregion.lid_state) & 0x1 ?
815 connector_status_connected : 806 connector_status_connected :
816 connector_status_disconnected; 807 connector_status_disconnected;
817 } 808 }
818 809
819 switch (i915_panel_ignore_lid) { 810 switch (i915.panel_ignore_lid) {
820 case -2: 811 case -2:
821 return connector_status_connected; 812 return connector_status_connected;
822 case -1: 813 case -1:
@@ -1199,9 +1190,11 @@ void intel_panel_init_backlight_funcs(struct drm_device *dev)
1199} 1190}
1200 1191
1201int intel_panel_init(struct intel_panel *panel, 1192int intel_panel_init(struct intel_panel *panel,
1202 struct drm_display_mode *fixed_mode) 1193 struct drm_display_mode *fixed_mode,
1194 struct drm_display_mode *downclock_mode)
1203{ 1195{
1204 panel->fixed_mode = fixed_mode; 1196 panel->fixed_mode = fixed_mode;
1197 panel->downclock_mode = downclock_mode;
1205 1198
1206 return 0; 1199 return 0;
1207} 1200}
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index e1fc35a72656..5874716774a7 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -92,12 +92,12 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc)
92{ 92{
93 struct drm_device *dev = crtc->dev; 93 struct drm_device *dev = crtc->dev;
94 struct drm_i915_private *dev_priv = dev->dev_private; 94 struct drm_i915_private *dev_priv = dev->dev_private;
95 struct drm_framebuffer *fb = crtc->fb; 95 struct drm_framebuffer *fb = crtc->primary->fb;
96 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 96 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
97 struct drm_i915_gem_object *obj = intel_fb->obj; 97 struct drm_i915_gem_object *obj = intel_fb->obj;
98 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 98 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
99 int cfb_pitch; 99 int cfb_pitch;
100 int plane, i; 100 int i;
101 u32 fbc_ctl; 101 u32 fbc_ctl;
102 102
103 cfb_pitch = dev_priv->fbc.size / FBC_LL_SIZE; 103 cfb_pitch = dev_priv->fbc.size / FBC_LL_SIZE;
@@ -109,7 +109,6 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc)
109 cfb_pitch = (cfb_pitch / 32) - 1; 109 cfb_pitch = (cfb_pitch / 32) - 1;
110 else 110 else
111 cfb_pitch = (cfb_pitch / 64) - 1; 111 cfb_pitch = (cfb_pitch / 64) - 1;
112 plane = intel_crtc->plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
113 112
114 /* Clear old tags */ 113 /* Clear old tags */
115 for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++) 114 for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
@@ -120,7 +119,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc)
120 119
121 /* Set it up... */ 120 /* Set it up... */
122 fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE; 121 fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
123 fbc_ctl2 |= plane; 122 fbc_ctl2 |= FBC_CTL_PLANE(intel_crtc->plane);
124 I915_WRITE(FBC_CONTROL2, fbc_ctl2); 123 I915_WRITE(FBC_CONTROL2, fbc_ctl2);
125 I915_WRITE(FBC_FENCE_OFF, crtc->y); 124 I915_WRITE(FBC_FENCE_OFF, crtc->y);
126 } 125 }
@@ -135,7 +134,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc)
135 fbc_ctl |= obj->fence_reg; 134 fbc_ctl |= obj->fence_reg;
136 I915_WRITE(FBC_CONTROL, fbc_ctl); 135 I915_WRITE(FBC_CONTROL, fbc_ctl);
137 136
138 DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %c, ", 137 DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %c\n",
139 cfb_pitch, crtc->y, plane_name(intel_crtc->plane)); 138 cfb_pitch, crtc->y, plane_name(intel_crtc->plane));
140} 139}
141 140
@@ -150,21 +149,23 @@ static void g4x_enable_fbc(struct drm_crtc *crtc)
150{ 149{
151 struct drm_device *dev = crtc->dev; 150 struct drm_device *dev = crtc->dev;
152 struct drm_i915_private *dev_priv = dev->dev_private; 151 struct drm_i915_private *dev_priv = dev->dev_private;
153 struct drm_framebuffer *fb = crtc->fb; 152 struct drm_framebuffer *fb = crtc->primary->fb;
154 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 153 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
155 struct drm_i915_gem_object *obj = intel_fb->obj; 154 struct drm_i915_gem_object *obj = intel_fb->obj;
156 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 155 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
157 int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
158 u32 dpfc_ctl; 156 u32 dpfc_ctl;
159 157
160 dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X; 158 dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane) | DPFC_SR_EN;
159 if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
160 dpfc_ctl |= DPFC_CTL_LIMIT_2X;
161 else
162 dpfc_ctl |= DPFC_CTL_LIMIT_1X;
161 dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg; 163 dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
162 I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
163 164
164 I915_WRITE(DPFC_FENCE_YOFF, crtc->y); 165 I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
165 166
166 /* enable it... */ 167 /* enable it... */
167 I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN); 168 I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
168 169
169 DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane)); 170 DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
170} 171}
@@ -220,22 +221,20 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc)
220{ 221{
221 struct drm_device *dev = crtc->dev; 222 struct drm_device *dev = crtc->dev;
222 struct drm_i915_private *dev_priv = dev->dev_private; 223 struct drm_i915_private *dev_priv = dev->dev_private;
223 struct drm_framebuffer *fb = crtc->fb; 224 struct drm_framebuffer *fb = crtc->primary->fb;
224 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 225 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
225 struct drm_i915_gem_object *obj = intel_fb->obj; 226 struct drm_i915_gem_object *obj = intel_fb->obj;
226 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 227 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
227 int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
228 u32 dpfc_ctl; 228 u32 dpfc_ctl;
229 229
230 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL); 230 dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane);
231 dpfc_ctl &= DPFC_RESERVED; 231 if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
232 dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X); 232 dpfc_ctl |= DPFC_CTL_LIMIT_2X;
233 /* Set persistent mode for front-buffer rendering, ala X. */ 233 else
234 dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE; 234 dpfc_ctl |= DPFC_CTL_LIMIT_1X;
235 dpfc_ctl |= DPFC_CTL_FENCE_EN; 235 dpfc_ctl |= DPFC_CTL_FENCE_EN;
236 if (IS_GEN5(dev)) 236 if (IS_GEN5(dev))
237 dpfc_ctl |= obj->fence_reg; 237 dpfc_ctl |= obj->fence_reg;
238 I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
239 238
240 I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y); 239 I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
241 I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID); 240 I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID);
@@ -278,24 +277,31 @@ static void gen7_enable_fbc(struct drm_crtc *crtc)
278{ 277{
279 struct drm_device *dev = crtc->dev; 278 struct drm_device *dev = crtc->dev;
280 struct drm_i915_private *dev_priv = dev->dev_private; 279 struct drm_i915_private *dev_priv = dev->dev_private;
281 struct drm_framebuffer *fb = crtc->fb; 280 struct drm_framebuffer *fb = crtc->primary->fb;
282 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 281 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
283 struct drm_i915_gem_object *obj = intel_fb->obj; 282 struct drm_i915_gem_object *obj = intel_fb->obj;
284 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 283 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
284 u32 dpfc_ctl;
285 285
286 I915_WRITE(IVB_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj)); 286 dpfc_ctl = IVB_DPFC_CTL_PLANE(intel_crtc->plane);
287 if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
288 dpfc_ctl |= DPFC_CTL_LIMIT_2X;
289 else
290 dpfc_ctl |= DPFC_CTL_LIMIT_1X;
291 dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
287 292
288 I915_WRITE(ILK_DPFC_CONTROL, DPFC_CTL_EN | DPFC_CTL_LIMIT_1X | 293 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
289 IVB_DPFC_CTL_FENCE_EN |
290 intel_crtc->plane << IVB_DPFC_CTL_PLANE_SHIFT);
291 294
292 if (IS_IVYBRIDGE(dev)) { 295 if (IS_IVYBRIDGE(dev)) {
293 /* WaFbcAsynchFlipDisableFbcQueue:ivb */ 296 /* WaFbcAsynchFlipDisableFbcQueue:ivb */
294 I915_WRITE(ILK_DISPLAY_CHICKEN1, ILK_FBCQ_DIS); 297 I915_WRITE(ILK_DISPLAY_CHICKEN1,
298 I915_READ(ILK_DISPLAY_CHICKEN1) |
299 ILK_FBCQ_DIS);
295 } else { 300 } else {
296 /* WaFbcAsynchFlipDisableFbcQueue:hsw */ 301 /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
297 I915_WRITE(HSW_PIPE_SLICE_CHICKEN_1(intel_crtc->pipe), 302 I915_WRITE(CHICKEN_PIPESL_1(intel_crtc->pipe),
298 HSW_BYPASS_FBC_QUEUE); 303 I915_READ(CHICKEN_PIPESL_1(intel_crtc->pipe)) |
304 HSW_FBCQ_DIS);
299 } 305 }
300 306
301 I915_WRITE(SNB_DPFC_CTL_SA, 307 I915_WRITE(SNB_DPFC_CTL_SA,
@@ -330,11 +336,11 @@ static void intel_fbc_work_fn(struct work_struct *__work)
330 /* Double check that we haven't switched fb without cancelling 336 /* Double check that we haven't switched fb without cancelling
331 * the prior work. 337 * the prior work.
332 */ 338 */
333 if (work->crtc->fb == work->fb) { 339 if (work->crtc->primary->fb == work->fb) {
334 dev_priv->display.enable_fbc(work->crtc); 340 dev_priv->display.enable_fbc(work->crtc);
335 341
336 dev_priv->fbc.plane = to_intel_crtc(work->crtc)->plane; 342 dev_priv->fbc.plane = to_intel_crtc(work->crtc)->plane;
337 dev_priv->fbc.fb_id = work->crtc->fb->base.id; 343 dev_priv->fbc.fb_id = work->crtc->primary->fb->base.id;
338 dev_priv->fbc.y = work->crtc->y; 344 dev_priv->fbc.y = work->crtc->y;
339 } 345 }
340 346
@@ -387,7 +393,7 @@ static void intel_enable_fbc(struct drm_crtc *crtc)
387 } 393 }
388 394
389 work->crtc = crtc; 395 work->crtc = crtc;
390 work->fb = crtc->fb; 396 work->fb = crtc->primary->fb;
391 INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn); 397 INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
392 398
393 dev_priv->fbc.fbc_work = work; 399 dev_priv->fbc.fbc_work = work;
@@ -466,7 +472,7 @@ void intel_update_fbc(struct drm_device *dev)
466 return; 472 return;
467 } 473 }
468 474
469 if (!i915_powersave) { 475 if (!i915.powersave) {
470 if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM)) 476 if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
471 DRM_DEBUG_KMS("fbc disabled per module param\n"); 477 DRM_DEBUG_KMS("fbc disabled per module param\n");
472 return; 478 return;
@@ -493,25 +499,25 @@ void intel_update_fbc(struct drm_device *dev)
493 } 499 }
494 } 500 }
495 501
496 if (!crtc || crtc->fb == NULL) { 502 if (!crtc || crtc->primary->fb == NULL) {
497 if (set_no_fbc_reason(dev_priv, FBC_NO_OUTPUT)) 503 if (set_no_fbc_reason(dev_priv, FBC_NO_OUTPUT))
498 DRM_DEBUG_KMS("no output, disabling\n"); 504 DRM_DEBUG_KMS("no output, disabling\n");
499 goto out_disable; 505 goto out_disable;
500 } 506 }
501 507
502 intel_crtc = to_intel_crtc(crtc); 508 intel_crtc = to_intel_crtc(crtc);
503 fb = crtc->fb; 509 fb = crtc->primary->fb;
504 intel_fb = to_intel_framebuffer(fb); 510 intel_fb = to_intel_framebuffer(fb);
505 obj = intel_fb->obj; 511 obj = intel_fb->obj;
506 adjusted_mode = &intel_crtc->config.adjusted_mode; 512 adjusted_mode = &intel_crtc->config.adjusted_mode;
507 513
508 if (i915_enable_fbc < 0 && 514 if (i915.enable_fbc < 0 &&
509 INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) { 515 INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) {
510 if (set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT)) 516 if (set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT))
511 DRM_DEBUG_KMS("disabled per chip default\n"); 517 DRM_DEBUG_KMS("disabled per chip default\n");
512 goto out_disable; 518 goto out_disable;
513 } 519 }
514 if (!i915_enable_fbc) { 520 if (!i915.enable_fbc) {
515 if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM)) 521 if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
516 DRM_DEBUG_KMS("fbc disabled per module param\n"); 522 DRM_DEBUG_KMS("fbc disabled per module param\n");
517 goto out_disable; 523 goto out_disable;
@@ -537,7 +543,7 @@ void intel_update_fbc(struct drm_device *dev)
537 DRM_DEBUG_KMS("mode too large for compression, disabling\n"); 543 DRM_DEBUG_KMS("mode too large for compression, disabling\n");
538 goto out_disable; 544 goto out_disable;
539 } 545 }
540 if ((INTEL_INFO(dev)->gen < 4 || IS_HASWELL(dev)) && 546 if ((INTEL_INFO(dev)->gen < 4 || HAS_DDI(dev)) &&
541 intel_crtc->plane != PLANE_A) { 547 intel_crtc->plane != PLANE_A) {
542 if (set_no_fbc_reason(dev_priv, FBC_BAD_PLANE)) 548 if (set_no_fbc_reason(dev_priv, FBC_BAD_PLANE))
543 DRM_DEBUG_KMS("plane not A, disabling compression\n"); 549 DRM_DEBUG_KMS("plane not A, disabling compression\n");
@@ -617,7 +623,7 @@ out_disable:
617 623
618static void i915_pineview_get_mem_freq(struct drm_device *dev) 624static void i915_pineview_get_mem_freq(struct drm_device *dev)
619{ 625{
620 drm_i915_private_t *dev_priv = dev->dev_private; 626 struct drm_i915_private *dev_priv = dev->dev_private;
621 u32 tmp; 627 u32 tmp;
622 628
623 tmp = I915_READ(CLKCFG); 629 tmp = I915_READ(CLKCFG);
@@ -656,7 +662,7 @@ static void i915_pineview_get_mem_freq(struct drm_device *dev)
656 662
657static void i915_ironlake_get_mem_freq(struct drm_device *dev) 663static void i915_ironlake_get_mem_freq(struct drm_device *dev)
658{ 664{
659 drm_i915_private_t *dev_priv = dev->dev_private; 665 struct drm_i915_private *dev_priv = dev->dev_private;
660 u16 ddrpll, csipll; 666 u16 ddrpll, csipll;
661 667
662 ddrpll = I915_READ16(DDRMPLL1); 668 ddrpll = I915_READ16(DDRMPLL1);
@@ -1035,7 +1041,7 @@ static void pineview_update_wm(struct drm_crtc *unused_crtc)
1035 crtc = single_enabled_crtc(dev); 1041 crtc = single_enabled_crtc(dev);
1036 if (crtc) { 1042 if (crtc) {
1037 const struct drm_display_mode *adjusted_mode; 1043 const struct drm_display_mode *adjusted_mode;
1038 int pixel_size = crtc->fb->bits_per_pixel / 8; 1044 int pixel_size = crtc->primary->fb->bits_per_pixel / 8;
1039 int clock; 1045 int clock;
1040 1046
1041 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode; 1047 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
@@ -1115,7 +1121,7 @@ static bool g4x_compute_wm0(struct drm_device *dev,
1115 clock = adjusted_mode->crtc_clock; 1121 clock = adjusted_mode->crtc_clock;
1116 htotal = adjusted_mode->crtc_htotal; 1122 htotal = adjusted_mode->crtc_htotal;
1117 hdisplay = to_intel_crtc(crtc)->config.pipe_src_w; 1123 hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
1118 pixel_size = crtc->fb->bits_per_pixel / 8; 1124 pixel_size = crtc->primary->fb->bits_per_pixel / 8;
1119 1125
1120 /* Use the small buffer method to calculate plane watermark */ 1126 /* Use the small buffer method to calculate plane watermark */
1121 entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000; 1127 entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
@@ -1128,9 +1134,9 @@ static bool g4x_compute_wm0(struct drm_device *dev,
1128 *plane_wm = display->max_wm; 1134 *plane_wm = display->max_wm;
1129 1135
1130 /* Use the large buffer method to calculate cursor watermark */ 1136 /* Use the large buffer method to calculate cursor watermark */
1131 line_time_us = ((htotal * 1000) / clock); 1137 line_time_us = max(htotal * 1000 / clock, 1);
1132 line_count = (cursor_latency_ns / line_time_us + 1000) / 1000; 1138 line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
1133 entries = line_count * 64 * pixel_size; 1139 entries = line_count * to_intel_crtc(crtc)->cursor_width * pixel_size;
1134 tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8; 1140 tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
1135 if (tlb_miss > 0) 1141 if (tlb_miss > 0)
1136 entries += tlb_miss; 1142 entries += tlb_miss;
@@ -1202,9 +1208,9 @@ static bool g4x_compute_srwm(struct drm_device *dev,
1202 clock = adjusted_mode->crtc_clock; 1208 clock = adjusted_mode->crtc_clock;
1203 htotal = adjusted_mode->crtc_htotal; 1209 htotal = adjusted_mode->crtc_htotal;
1204 hdisplay = to_intel_crtc(crtc)->config.pipe_src_w; 1210 hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
1205 pixel_size = crtc->fb->bits_per_pixel / 8; 1211 pixel_size = crtc->primary->fb->bits_per_pixel / 8;
1206 1212
1207 line_time_us = (htotal * 1000) / clock; 1213 line_time_us = max(htotal * 1000 / clock, 1);
1208 line_count = (latency_ns / line_time_us + 1000) / 1000; 1214 line_count = (latency_ns / line_time_us + 1000) / 1000;
1209 line_size = hdisplay * pixel_size; 1215 line_size = hdisplay * pixel_size;
1210 1216
@@ -1216,7 +1222,7 @@ static bool g4x_compute_srwm(struct drm_device *dev,
1216 *display_wm = entries + display->guard_size; 1222 *display_wm = entries + display->guard_size;
1217 1223
1218 /* calculate the self-refresh watermark for display cursor */ 1224 /* calculate the self-refresh watermark for display cursor */
1219 entries = line_count * pixel_size * 64; 1225 entries = line_count * pixel_size * to_intel_crtc(crtc)->cursor_width;
1220 entries = DIV_ROUND_UP(entries, cursor->cacheline_size); 1226 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
1221 *cursor_wm = entries + cursor->guard_size; 1227 *cursor_wm = entries + cursor->guard_size;
1222 1228
@@ -1241,7 +1247,7 @@ static bool vlv_compute_drain_latency(struct drm_device *dev,
1241 return false; 1247 return false;
1242 1248
1243 clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock; 1249 clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
1244 pixel_size = crtc->fb->bits_per_pixel / 8; /* BPP */ 1250 pixel_size = crtc->primary->fb->bits_per_pixel / 8; /* BPP */
1245 1251
1246 entries = (clock / 1000) * pixel_size; 1252 entries = (clock / 1000) * pixel_size;
1247 *plane_prec_mult = (entries > 256) ? 1253 *plane_prec_mult = (entries > 256) ?
@@ -1433,11 +1439,11 @@ static void i965_update_wm(struct drm_crtc *unused_crtc)
1433 int clock = adjusted_mode->crtc_clock; 1439 int clock = adjusted_mode->crtc_clock;
1434 int htotal = adjusted_mode->crtc_htotal; 1440 int htotal = adjusted_mode->crtc_htotal;
1435 int hdisplay = to_intel_crtc(crtc)->config.pipe_src_w; 1441 int hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
1436 int pixel_size = crtc->fb->bits_per_pixel / 8; 1442 int pixel_size = crtc->primary->fb->bits_per_pixel / 8;
1437 unsigned long line_time_us; 1443 unsigned long line_time_us;
1438 int entries; 1444 int entries;
1439 1445
1440 line_time_us = ((htotal * 1000) / clock); 1446 line_time_us = max(htotal * 1000 / clock, 1);
1441 1447
1442 /* Use ns/us then divide to preserve precision */ 1448 /* Use ns/us then divide to preserve precision */
1443 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * 1449 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
@@ -1451,7 +1457,7 @@ static void i965_update_wm(struct drm_crtc *unused_crtc)
1451 entries, srwm); 1457 entries, srwm);
1452 1458
1453 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * 1459 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1454 pixel_size * 64; 1460 pixel_size * to_intel_crtc(crtc)->cursor_width;
1455 entries = DIV_ROUND_UP(entries, 1461 entries = DIV_ROUND_UP(entries,
1456 i965_cursor_wm_info.cacheline_size); 1462 i965_cursor_wm_info.cacheline_size);
1457 cursor_sr = i965_cursor_wm_info.fifo_size - 1463 cursor_sr = i965_cursor_wm_info.fifo_size -
@@ -1506,7 +1512,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
1506 crtc = intel_get_crtc_for_plane(dev, 0); 1512 crtc = intel_get_crtc_for_plane(dev, 0);
1507 if (intel_crtc_active(crtc)) { 1513 if (intel_crtc_active(crtc)) {
1508 const struct drm_display_mode *adjusted_mode; 1514 const struct drm_display_mode *adjusted_mode;
1509 int cpp = crtc->fb->bits_per_pixel / 8; 1515 int cpp = crtc->primary->fb->bits_per_pixel / 8;
1510 if (IS_GEN2(dev)) 1516 if (IS_GEN2(dev))
1511 cpp = 4; 1517 cpp = 4;
1512 1518
@@ -1522,7 +1528,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
1522 crtc = intel_get_crtc_for_plane(dev, 1); 1528 crtc = intel_get_crtc_for_plane(dev, 1);
1523 if (intel_crtc_active(crtc)) { 1529 if (intel_crtc_active(crtc)) {
1524 const struct drm_display_mode *adjusted_mode; 1530 const struct drm_display_mode *adjusted_mode;
1525 int cpp = crtc->fb->bits_per_pixel / 8; 1531 int cpp = crtc->primary->fb->bits_per_pixel / 8;
1526 if (IS_GEN2(dev)) 1532 if (IS_GEN2(dev))
1527 cpp = 4; 1533 cpp = 4;
1528 1534
@@ -1559,11 +1565,11 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
1559 int clock = adjusted_mode->crtc_clock; 1565 int clock = adjusted_mode->crtc_clock;
1560 int htotal = adjusted_mode->crtc_htotal; 1566 int htotal = adjusted_mode->crtc_htotal;
1561 int hdisplay = to_intel_crtc(enabled)->config.pipe_src_w; 1567 int hdisplay = to_intel_crtc(enabled)->config.pipe_src_w;
1562 int pixel_size = enabled->fb->bits_per_pixel / 8; 1568 int pixel_size = enabled->primary->fb->bits_per_pixel / 8;
1563 unsigned long line_time_us; 1569 unsigned long line_time_us;
1564 int entries; 1570 int entries;
1565 1571
1566 line_time_us = (htotal * 1000) / clock; 1572 line_time_us = max(htotal * 1000 / clock, 1);
1567 1573
1568 /* Use ns/us then divide to preserve precision */ 1574 /* Use ns/us then divide to preserve precision */
1569 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * 1575 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
@@ -1886,7 +1892,7 @@ static unsigned int ilk_cursor_wm_max(const struct drm_device *dev,
1886} 1892}
1887 1893
1888/* Calculate the maximum FBC watermark */ 1894/* Calculate the maximum FBC watermark */
1889static unsigned int ilk_fbc_wm_max(struct drm_device *dev) 1895static unsigned int ilk_fbc_wm_max(const struct drm_device *dev)
1890{ 1896{
1891 /* max that registers can hold */ 1897 /* max that registers can hold */
1892 if (INTEL_INFO(dev)->gen >= 8) 1898 if (INTEL_INFO(dev)->gen >= 8)
@@ -1895,7 +1901,7 @@ static unsigned int ilk_fbc_wm_max(struct drm_device *dev)
1895 return 15; 1901 return 15;
1896} 1902}
1897 1903
1898static void ilk_compute_wm_maximums(struct drm_device *dev, 1904static void ilk_compute_wm_maximums(const struct drm_device *dev,
1899 int level, 1905 int level,
1900 const struct intel_wm_config *config, 1906 const struct intel_wm_config *config,
1901 enum intel_ddb_partitioning ddb_partitioning, 1907 enum intel_ddb_partitioning ddb_partitioning,
@@ -1948,7 +1954,7 @@ static bool ilk_validate_wm_level(int level,
1948 return ret; 1954 return ret;
1949} 1955}
1950 1956
1951static void ilk_compute_wm_level(struct drm_i915_private *dev_priv, 1957static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
1952 int level, 1958 int level,
1953 const struct ilk_pipe_wm_parameters *p, 1959 const struct ilk_pipe_wm_parameters *p,
1954 struct intel_wm_level *result) 1960 struct intel_wm_level *result)
@@ -2079,7 +2085,7 @@ static void intel_print_wm_latency(struct drm_device *dev,
2079 } 2085 }
2080} 2086}
2081 2087
2082static void intel_setup_wm_latency(struct drm_device *dev) 2088static void ilk_setup_wm_latency(struct drm_device *dev)
2083{ 2089{
2084 struct drm_i915_private *dev_priv = dev->dev_private; 2090 struct drm_i915_private *dev_priv = dev->dev_private;
2085 2091
@@ -2111,10 +2117,10 @@ static void ilk_compute_wm_parameters(struct drm_crtc *crtc,
2111 if (p->active) { 2117 if (p->active) {
2112 p->pipe_htotal = intel_crtc->config.adjusted_mode.crtc_htotal; 2118 p->pipe_htotal = intel_crtc->config.adjusted_mode.crtc_htotal;
2113 p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc); 2119 p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc);
2114 p->pri.bytes_per_pixel = crtc->fb->bits_per_pixel / 8; 2120 p->pri.bytes_per_pixel = crtc->primary->fb->bits_per_pixel / 8;
2115 p->cur.bytes_per_pixel = 4; 2121 p->cur.bytes_per_pixel = 4;
2116 p->pri.horiz_pixels = intel_crtc->config.pipe_src_w; 2122 p->pri.horiz_pixels = intel_crtc->config.pipe_src_w;
2117 p->cur.horiz_pixels = 64; 2123 p->cur.horiz_pixels = intel_crtc->cursor_width;
2118 /* TODO: for now, assume primary and cursor planes are always enabled. */ 2124 /* TODO: for now, assume primary and cursor planes are always enabled. */
2119 p->pri.enabled = true; 2125 p->pri.enabled = true;
2120 p->cur.enabled = true; 2126 p->cur.enabled = true;
@@ -2123,7 +2129,7 @@ static void ilk_compute_wm_parameters(struct drm_crtc *crtc,
2123 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) 2129 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
2124 config->num_pipes_active += intel_crtc_active(crtc); 2130 config->num_pipes_active += intel_crtc_active(crtc);
2125 2131
2126 list_for_each_entry(plane, &dev->mode_config.plane_list, head) { 2132 drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
2127 struct intel_plane *intel_plane = to_intel_plane(plane); 2133 struct intel_plane *intel_plane = to_intel_plane(plane);
2128 2134
2129 if (intel_plane->pipe == pipe) 2135 if (intel_plane->pipe == pipe)
@@ -2140,7 +2146,7 @@ static bool intel_compute_pipe_wm(struct drm_crtc *crtc,
2140 struct intel_pipe_wm *pipe_wm) 2146 struct intel_pipe_wm *pipe_wm)
2141{ 2147{
2142 struct drm_device *dev = crtc->dev; 2148 struct drm_device *dev = crtc->dev;
2143 struct drm_i915_private *dev_priv = dev->dev_private; 2149 const struct drm_i915_private *dev_priv = dev->dev_private;
2144 int level, max_level = ilk_wm_max_level(dev); 2150 int level, max_level = ilk_wm_max_level(dev);
2145 /* LP0 watermark maximums depend on this pipe alone */ 2151 /* LP0 watermark maximums depend on this pipe alone */
2146 struct intel_wm_config config = { 2152 struct intel_wm_config config = {
@@ -2738,7 +2744,7 @@ intel_alloc_context_page(struct drm_device *dev)
2738 return NULL; 2744 return NULL;
2739 } 2745 }
2740 2746
2741 ret = i915_gem_obj_ggtt_pin(ctx, 4096, true, false); 2747 ret = i915_gem_obj_ggtt_pin(ctx, 4096, 0);
2742 if (ret) { 2748 if (ret) {
2743 DRM_ERROR("failed to pin power context: %d\n", ret); 2749 DRM_ERROR("failed to pin power context: %d\n", ret);
2744 goto err_unref; 2750 goto err_unref;
@@ -2753,7 +2759,7 @@ intel_alloc_context_page(struct drm_device *dev)
2753 return ctx; 2759 return ctx;
2754 2760
2755err_unpin: 2761err_unpin:
2756 i915_gem_object_unpin(ctx); 2762 i915_gem_object_ggtt_unpin(ctx);
2757err_unref: 2763err_unref:
2758 drm_gem_object_unreference(&ctx->base); 2764 drm_gem_object_unreference(&ctx->base);
2759 return NULL; 2765 return NULL;
@@ -2901,9 +2907,9 @@ static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 val)
2901 * the hw runs at the minimal clock before selecting the desired 2907 * the hw runs at the minimal clock before selecting the desired
2902 * frequency, if the down threshold expires in that window we will not 2908 * frequency, if the down threshold expires in that window we will not
2903 * receive a down interrupt. */ 2909 * receive a down interrupt. */
2904 limits = dev_priv->rps.max_delay << 24; 2910 limits = dev_priv->rps.max_freq_softlimit << 24;
2905 if (val <= dev_priv->rps.min_delay) 2911 if (val <= dev_priv->rps.min_freq_softlimit)
2906 limits |= dev_priv->rps.min_delay << 16; 2912 limits |= dev_priv->rps.min_freq_softlimit << 16;
2907 2913
2908 return limits; 2914 return limits;
2909} 2915}
@@ -2915,26 +2921,26 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
2915 new_power = dev_priv->rps.power; 2921 new_power = dev_priv->rps.power;
2916 switch (dev_priv->rps.power) { 2922 switch (dev_priv->rps.power) {
2917 case LOW_POWER: 2923 case LOW_POWER:
2918 if (val > dev_priv->rps.rpe_delay + 1 && val > dev_priv->rps.cur_delay) 2924 if (val > dev_priv->rps.efficient_freq + 1 && val > dev_priv->rps.cur_freq)
2919 new_power = BETWEEN; 2925 new_power = BETWEEN;
2920 break; 2926 break;
2921 2927
2922 case BETWEEN: 2928 case BETWEEN:
2923 if (val <= dev_priv->rps.rpe_delay && val < dev_priv->rps.cur_delay) 2929 if (val <= dev_priv->rps.efficient_freq && val < dev_priv->rps.cur_freq)
2924 new_power = LOW_POWER; 2930 new_power = LOW_POWER;
2925 else if (val >= dev_priv->rps.rp0_delay && val > dev_priv->rps.cur_delay) 2931 else if (val >= dev_priv->rps.rp0_freq && val > dev_priv->rps.cur_freq)
2926 new_power = HIGH_POWER; 2932 new_power = HIGH_POWER;
2927 break; 2933 break;
2928 2934
2929 case HIGH_POWER: 2935 case HIGH_POWER:
2930 if (val < (dev_priv->rps.rp1_delay + dev_priv->rps.rp0_delay) >> 1 && val < dev_priv->rps.cur_delay) 2936 if (val < (dev_priv->rps.rp1_freq + dev_priv->rps.rp0_freq) >> 1 && val < dev_priv->rps.cur_freq)
2931 new_power = BETWEEN; 2937 new_power = BETWEEN;
2932 break; 2938 break;
2933 } 2939 }
2934 /* Max/min bins are special */ 2940 /* Max/min bins are special */
2935 if (val == dev_priv->rps.min_delay) 2941 if (val == dev_priv->rps.min_freq_softlimit)
2936 new_power = LOW_POWER; 2942 new_power = LOW_POWER;
2937 if (val == dev_priv->rps.max_delay) 2943 if (val == dev_priv->rps.max_freq_softlimit)
2938 new_power = HIGH_POWER; 2944 new_power = HIGH_POWER;
2939 if (new_power == dev_priv->rps.power) 2945 if (new_power == dev_priv->rps.power)
2940 return; 2946 return;
@@ -3000,41 +3006,113 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
3000 dev_priv->rps.last_adj = 0; 3006 dev_priv->rps.last_adj = 0;
3001} 3007}
3002 3008
3009static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
3010{
3011 u32 mask = 0;
3012
3013 if (val > dev_priv->rps.min_freq_softlimit)
3014 mask |= GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
3015 if (val < dev_priv->rps.max_freq_softlimit)
3016 mask |= GEN6_PM_RP_UP_THRESHOLD;
3017
3018 /* IVB and SNB hard hangs on looping batchbuffer
3019 * if GEN6_PM_UP_EI_EXPIRED is masked.
3020 */
3021 if (INTEL_INFO(dev_priv->dev)->gen <= 7 && !IS_HASWELL(dev_priv->dev))
3022 mask |= GEN6_PM_RP_UP_EI_EXPIRED;
3023
3024 return ~mask;
3025}
3026
3027/* gen6_set_rps is called to update the frequency request, but should also be
3028 * called when the range (min_delay and max_delay) is modified so that we can
3029 * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */
3003void gen6_set_rps(struct drm_device *dev, u8 val) 3030void gen6_set_rps(struct drm_device *dev, u8 val)
3004{ 3031{
3005 struct drm_i915_private *dev_priv = dev->dev_private; 3032 struct drm_i915_private *dev_priv = dev->dev_private;
3006 3033
3007 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 3034 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3008 WARN_ON(val > dev_priv->rps.max_delay); 3035 WARN_ON(val > dev_priv->rps.max_freq_softlimit);
3009 WARN_ON(val < dev_priv->rps.min_delay); 3036 WARN_ON(val < dev_priv->rps.min_freq_softlimit);
3010
3011 if (val == dev_priv->rps.cur_delay)
3012 return;
3013 3037
3014 gen6_set_rps_thresholds(dev_priv, val); 3038 /* min/max delay may still have been modified so be sure to
3039 * write the limits value.
3040 */
3041 if (val != dev_priv->rps.cur_freq) {
3042 gen6_set_rps_thresholds(dev_priv, val);
3015 3043
3016 if (IS_HASWELL(dev)) 3044 if (IS_HASWELL(dev))
3017 I915_WRITE(GEN6_RPNSWREQ, 3045 I915_WRITE(GEN6_RPNSWREQ,
3018 HSW_FREQUENCY(val)); 3046 HSW_FREQUENCY(val));
3019 else 3047 else
3020 I915_WRITE(GEN6_RPNSWREQ, 3048 I915_WRITE(GEN6_RPNSWREQ,
3021 GEN6_FREQUENCY(val) | 3049 GEN6_FREQUENCY(val) |
3022 GEN6_OFFSET(0) | 3050 GEN6_OFFSET(0) |
3023 GEN6_AGGRESSIVE_TURBO); 3051 GEN6_AGGRESSIVE_TURBO);
3052 }
3024 3053
3025 /* Make sure we continue to get interrupts 3054 /* Make sure we continue to get interrupts
3026 * until we hit the minimum or maximum frequencies. 3055 * until we hit the minimum or maximum frequencies.
3027 */ 3056 */
3028 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, 3057 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, gen6_rps_limits(dev_priv, val));
3029 gen6_rps_limits(dev_priv, val)); 3058 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
3030 3059
3031 POSTING_READ(GEN6_RPNSWREQ); 3060 POSTING_READ(GEN6_RPNSWREQ);
3032 3061
3033 dev_priv->rps.cur_delay = val; 3062 dev_priv->rps.cur_freq = val;
3034
3035 trace_intel_gpu_freq_change(val * 50); 3063 trace_intel_gpu_freq_change(val * 50);
3036} 3064}
3037 3065
3066/* vlv_set_rps_idle: Set the frequency to Rpn if Gfx clocks are down
3067 *
3068 * * If Gfx is Idle, then
3069 * 1. Mask Turbo interrupts
3070 * 2. Bring up Gfx clock
3071 * 3. Change the freq to Rpn and wait till P-Unit updates freq
3072 * 4. Clear the Force GFX CLK ON bit so that Gfx can down
3073 * 5. Unmask Turbo interrupts
3074*/
3075static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
3076{
3077 /*
3078 * When we are idle. Drop to min voltage state.
3079 */
3080
3081 if (dev_priv->rps.cur_freq <= dev_priv->rps.min_freq_softlimit)
3082 return;
3083
3084 /* Mask turbo interrupt so that they will not come in between */
3085 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
3086
3087 /* Bring up the Gfx clock */
3088 I915_WRITE(VLV_GTLC_SURVIVABILITY_REG,
3089 I915_READ(VLV_GTLC_SURVIVABILITY_REG) |
3090 VLV_GFX_CLK_FORCE_ON_BIT);
3091
3092 if (wait_for(((VLV_GFX_CLK_STATUS_BIT &
3093 I915_READ(VLV_GTLC_SURVIVABILITY_REG)) != 0), 5)) {
3094 DRM_ERROR("GFX_CLK_ON request timed out\n");
3095 return;
3096 }
3097
3098 dev_priv->rps.cur_freq = dev_priv->rps.min_freq_softlimit;
3099
3100 vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ,
3101 dev_priv->rps.min_freq_softlimit);
3102
3103 if (wait_for(((vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS))
3104 & GENFREQSTATUS) == 0, 5))
3105 DRM_ERROR("timed out waiting for Punit\n");
3106
3107 /* Release the Gfx clock */
3108 I915_WRITE(VLV_GTLC_SURVIVABILITY_REG,
3109 I915_READ(VLV_GTLC_SURVIVABILITY_REG) &
3110 ~VLV_GFX_CLK_FORCE_ON_BIT);
3111
3112 I915_WRITE(GEN6_PMINTRMSK,
3113 gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq));
3114}
3115
3038void gen6_rps_idle(struct drm_i915_private *dev_priv) 3116void gen6_rps_idle(struct drm_i915_private *dev_priv)
3039{ 3117{
3040 struct drm_device *dev = dev_priv->dev; 3118 struct drm_device *dev = dev_priv->dev;
@@ -3042,9 +3120,9 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv)
3042 mutex_lock(&dev_priv->rps.hw_lock); 3120 mutex_lock(&dev_priv->rps.hw_lock);
3043 if (dev_priv->rps.enabled) { 3121 if (dev_priv->rps.enabled) {
3044 if (IS_VALLEYVIEW(dev)) 3122 if (IS_VALLEYVIEW(dev))
3045 valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_delay); 3123 vlv_set_rps_idle(dev_priv);
3046 else 3124 else
3047 gen6_set_rps(dev_priv->dev, dev_priv->rps.min_delay); 3125 gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
3048 dev_priv->rps.last_adj = 0; 3126 dev_priv->rps.last_adj = 0;
3049 } 3127 }
3050 mutex_unlock(&dev_priv->rps.hw_lock); 3128 mutex_unlock(&dev_priv->rps.hw_lock);
@@ -3057,9 +3135,9 @@ void gen6_rps_boost(struct drm_i915_private *dev_priv)
3057 mutex_lock(&dev_priv->rps.hw_lock); 3135 mutex_lock(&dev_priv->rps.hw_lock);
3058 if (dev_priv->rps.enabled) { 3136 if (dev_priv->rps.enabled) {
3059 if (IS_VALLEYVIEW(dev)) 3137 if (IS_VALLEYVIEW(dev))
3060 valleyview_set_rps(dev_priv->dev, dev_priv->rps.max_delay); 3138 valleyview_set_rps(dev_priv->dev, dev_priv->rps.max_freq_softlimit);
3061 else 3139 else
3062 gen6_set_rps(dev_priv->dev, dev_priv->rps.max_delay); 3140 gen6_set_rps(dev_priv->dev, dev_priv->rps.max_freq_softlimit);
3063 dev_priv->rps.last_adj = 0; 3141 dev_priv->rps.last_adj = 0;
3064 } 3142 }
3065 mutex_unlock(&dev_priv->rps.hw_lock); 3143 mutex_unlock(&dev_priv->rps.hw_lock);
@@ -3070,21 +3148,20 @@ void valleyview_set_rps(struct drm_device *dev, u8 val)
3070 struct drm_i915_private *dev_priv = dev->dev_private; 3148 struct drm_i915_private *dev_priv = dev->dev_private;
3071 3149
3072 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 3150 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3073 WARN_ON(val > dev_priv->rps.max_delay); 3151 WARN_ON(val > dev_priv->rps.max_freq_softlimit);
3074 WARN_ON(val < dev_priv->rps.min_delay); 3152 WARN_ON(val < dev_priv->rps.min_freq_softlimit);
3075 3153
3076 DRM_DEBUG_DRIVER("GPU freq request from %d MHz (%u) to %d MHz (%u)\n", 3154 DRM_DEBUG_DRIVER("GPU freq request from %d MHz (%u) to %d MHz (%u)\n",
3077 vlv_gpu_freq(dev_priv, dev_priv->rps.cur_delay), 3155 vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
3078 dev_priv->rps.cur_delay, 3156 dev_priv->rps.cur_freq,
3079 vlv_gpu_freq(dev_priv, val), val); 3157 vlv_gpu_freq(dev_priv, val), val);
3080 3158
3081 if (val == dev_priv->rps.cur_delay) 3159 if (val != dev_priv->rps.cur_freq)
3082 return; 3160 vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
3083
3084 vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
3085 3161
3086 dev_priv->rps.cur_delay = val; 3162 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
3087 3163
3164 dev_priv->rps.cur_freq = val;
3088 trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv, val)); 3165 trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv, val));
3089} 3166}
3090 3167
@@ -3093,7 +3170,8 @@ static void gen6_disable_rps_interrupts(struct drm_device *dev)
3093 struct drm_i915_private *dev_priv = dev->dev_private; 3170 struct drm_i915_private *dev_priv = dev->dev_private;
3094 3171
3095 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff); 3172 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
3096 I915_WRITE(GEN6_PMIER, I915_READ(GEN6_PMIER) & ~GEN6_PM_RPS_EVENTS); 3173 I915_WRITE(GEN6_PMIER, I915_READ(GEN6_PMIER) &
3174 ~dev_priv->pm_rps_events);
3097 /* Complete PM interrupt masking here doesn't race with the rps work 3175 /* Complete PM interrupt masking here doesn't race with the rps work
3098 * item again unmasking PM interrupts because that is using a different 3176 * item again unmasking PM interrupts because that is using a different
3099 * register (PMIMR) to mask PM interrupts. The only risk is in leaving 3177 * register (PMIMR) to mask PM interrupts. The only risk is in leaving
@@ -3103,7 +3181,7 @@ static void gen6_disable_rps_interrupts(struct drm_device *dev)
3103 dev_priv->rps.pm_iir = 0; 3181 dev_priv->rps.pm_iir = 0;
3104 spin_unlock_irq(&dev_priv->irq_lock); 3182 spin_unlock_irq(&dev_priv->irq_lock);
3105 3183
3106 I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS); 3184 I915_WRITE(GEN6_PMIIR, dev_priv->pm_rps_events);
3107} 3185}
3108 3186
3109static void gen6_disable_rps(struct drm_device *dev) 3187static void gen6_disable_rps(struct drm_device *dev)
@@ -3123,25 +3201,14 @@ static void valleyview_disable_rps(struct drm_device *dev)
3123 I915_WRITE(GEN6_RC_CONTROL, 0); 3201 I915_WRITE(GEN6_RC_CONTROL, 0);
3124 3202
3125 gen6_disable_rps_interrupts(dev); 3203 gen6_disable_rps_interrupts(dev);
3126
3127 if (dev_priv->vlv_pctx) {
3128 drm_gem_object_unreference(&dev_priv->vlv_pctx->base);
3129 dev_priv->vlv_pctx = NULL;
3130 }
3131} 3204}
3132 3205
3133static void intel_print_rc6_info(struct drm_device *dev, u32 mode) 3206static void intel_print_rc6_info(struct drm_device *dev, u32 mode)
3134{ 3207{
3135 if (IS_GEN6(dev))
3136 DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n");
3137
3138 if (IS_HASWELL(dev))
3139 DRM_DEBUG_DRIVER("Haswell: only RC6 available\n");
3140
3141 DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n", 3208 DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
3142 (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off", 3209 (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
3143 (mode & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off", 3210 (mode & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
3144 (mode & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off"); 3211 (mode & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
3145} 3212}
3146 3213
3147int intel_enable_rc6(const struct drm_device *dev) 3214int intel_enable_rc6(const struct drm_device *dev)
@@ -3151,44 +3218,28 @@ int intel_enable_rc6(const struct drm_device *dev)
3151 return 0; 3218 return 0;
3152 3219
3153 /* Respect the kernel parameter if it is set */ 3220 /* Respect the kernel parameter if it is set */
3154 if (i915_enable_rc6 >= 0) 3221 if (i915.enable_rc6 >= 0)
3155 return i915_enable_rc6; 3222 return i915.enable_rc6;
3156 3223
3157 /* Disable RC6 on Ironlake */ 3224 /* Disable RC6 on Ironlake */
3158 if (INTEL_INFO(dev)->gen == 5) 3225 if (INTEL_INFO(dev)->gen == 5)
3159 return 0; 3226 return 0;
3160 3227
3161 if (IS_HASWELL(dev)) 3228 if (IS_IVYBRIDGE(dev))
3162 return INTEL_RC6_ENABLE; 3229 return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
3163
3164 /* snb/ivb have more than one rc6 state. */
3165 if (INTEL_INFO(dev)->gen == 6)
3166 return INTEL_RC6_ENABLE;
3167 3230
3168 return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE); 3231 return INTEL_RC6_ENABLE;
3169} 3232}
3170 3233
3171static void gen6_enable_rps_interrupts(struct drm_device *dev) 3234static void gen6_enable_rps_interrupts(struct drm_device *dev)
3172{ 3235{
3173 struct drm_i915_private *dev_priv = dev->dev_private; 3236 struct drm_i915_private *dev_priv = dev->dev_private;
3174 u32 enabled_intrs;
3175 3237
3176 spin_lock_irq(&dev_priv->irq_lock); 3238 spin_lock_irq(&dev_priv->irq_lock);
3177 WARN_ON(dev_priv->rps.pm_iir); 3239 WARN_ON(dev_priv->rps.pm_iir);
3178 snb_enable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS); 3240 snb_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
3179 I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS); 3241 I915_WRITE(GEN6_PMIIR, dev_priv->pm_rps_events);
3180 spin_unlock_irq(&dev_priv->irq_lock); 3242 spin_unlock_irq(&dev_priv->irq_lock);
3181
3182 /* only unmask PM interrupts we need. Mask all others. */
3183 enabled_intrs = GEN6_PM_RPS_EVENTS;
3184
3185 /* IVB and SNB hard hangs on looping batchbuffer
3186 * if GEN6_PM_UP_EI_EXPIRED is masked.
3187 */
3188 if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
3189 enabled_intrs |= GEN6_PM_RP_UP_EI_EXPIRED;
3190
3191 I915_WRITE(GEN6_PMINTRMSK, ~enabled_intrs);
3192} 3243}
3193 3244
3194static void gen8_enable_rps(struct drm_device *dev) 3245static void gen8_enable_rps(struct drm_device *dev)
@@ -3222,10 +3273,10 @@ static void gen8_enable_rps(struct drm_device *dev)
3222 /* 3: Enable RC6 */ 3273 /* 3: Enable RC6 */
3223 if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE) 3274 if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
3224 rc6_mask = GEN6_RC_CTL_RC6_ENABLE; 3275 rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
3225 DRM_INFO("RC6 %s\n", (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off"); 3276 intel_print_rc6_info(dev, rc6_mask);
3226 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE | 3277 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
3227 GEN6_RC_CTL_EI_MODE(1) | 3278 GEN6_RC_CTL_EI_MODE(1) |
3228 rc6_mask); 3279 rc6_mask);
3229 3280
3230 /* 4 Program defaults and thresholds for RPS*/ 3281 /* 4 Program defaults and thresholds for RPS*/
3231 I915_WRITE(GEN6_RPNSWREQ, HSW_FREQUENCY(10)); /* Request 500 MHz */ 3282 I915_WRITE(GEN6_RPNSWREQ, HSW_FREQUENCY(10)); /* Request 500 MHz */
@@ -3235,8 +3286,8 @@ static void gen8_enable_rps(struct drm_device *dev)
3235 3286
3236 /* Docs recommend 900MHz, and 300 MHz respectively */ 3287 /* Docs recommend 900MHz, and 300 MHz respectively */
3237 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, 3288 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
3238 dev_priv->rps.max_delay << 24 | 3289 dev_priv->rps.max_freq_softlimit << 24 |
3239 dev_priv->rps.min_delay << 16); 3290 dev_priv->rps.min_freq_softlimit << 16);
3240 3291
3241 I915_WRITE(GEN6_RP_UP_THRESHOLD, 7600000 / 128); /* 76ms busyness per EI, 90% */ 3292 I915_WRITE(GEN6_RP_UP_THRESHOLD, 7600000 / 128); /* 76ms busyness per EI, 90% */
3242 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 31300000 / 128); /* 313ms busyness per EI, 70%*/ 3293 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 31300000 / 128); /* 313ms busyness per EI, 70%*/
@@ -3269,7 +3320,7 @@ static void gen6_enable_rps(struct drm_device *dev)
3269 struct intel_ring_buffer *ring; 3320 struct intel_ring_buffer *ring;
3270 u32 rp_state_cap; 3321 u32 rp_state_cap;
3271 u32 gt_perf_status; 3322 u32 gt_perf_status;
3272 u32 rc6vids, pcu_mbox, rc6_mask = 0; 3323 u32 rc6vids, pcu_mbox = 0, rc6_mask = 0;
3273 u32 gtfifodbg; 3324 u32 gtfifodbg;
3274 int rc6_mode; 3325 int rc6_mode;
3275 int i, ret; 3326 int i, ret;
@@ -3295,13 +3346,23 @@ static void gen6_enable_rps(struct drm_device *dev)
3295 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 3346 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
3296 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); 3347 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
3297 3348
3298 /* In units of 50MHz */ 3349 /* All of these values are in units of 50MHz */
3299 dev_priv->rps.hw_max = dev_priv->rps.max_delay = rp_state_cap & 0xff; 3350 dev_priv->rps.cur_freq = 0;
3300 dev_priv->rps.min_delay = (rp_state_cap >> 16) & 0xff; 3351 /* static values from HW: RP0 < RPe < RP1 < RPn (min_freq) */
3301 dev_priv->rps.rp1_delay = (rp_state_cap >> 8) & 0xff; 3352 dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
3302 dev_priv->rps.rp0_delay = (rp_state_cap >> 0) & 0xff; 3353 dev_priv->rps.rp0_freq = (rp_state_cap >> 0) & 0xff;
3303 dev_priv->rps.rpe_delay = dev_priv->rps.rp1_delay; 3354 dev_priv->rps.min_freq = (rp_state_cap >> 16) & 0xff;
3304 dev_priv->rps.cur_delay = 0; 3355 /* XXX: only BYT has a special efficient freq */
3356 dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
3357 /* hw_max = RP0 until we check for overclocking */
3358 dev_priv->rps.max_freq = dev_priv->rps.rp0_freq;
3359
3360 /* Preserve min/max settings in case of re-init */
3361 if (dev_priv->rps.max_freq_softlimit == 0)
3362 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
3363
3364 if (dev_priv->rps.min_freq_softlimit == 0)
3365 dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
3305 3366
3306 /* disable the counters and set deterministic thresholds */ 3367 /* disable the counters and set deterministic thresholds */
3307 I915_WRITE(GEN6_RC_CONTROL, 0); 3368 I915_WRITE(GEN6_RC_CONTROL, 0);
@@ -3350,21 +3411,19 @@ static void gen6_enable_rps(struct drm_device *dev)
3350 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); 3411 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
3351 3412
3352 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0); 3413 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0);
3353 if (!ret) { 3414 if (ret)
3354 pcu_mbox = 0;
3355 ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox);
3356 if (!ret && (pcu_mbox & (1<<31))) { /* OC supported */
3357 DRM_DEBUG_DRIVER("Overclocking supported. Max: %dMHz, Overclock max: %dMHz\n",
3358 (dev_priv->rps.max_delay & 0xff) * 50,
3359 (pcu_mbox & 0xff) * 50);
3360 dev_priv->rps.hw_max = pcu_mbox & 0xff;
3361 }
3362 } else {
3363 DRM_DEBUG_DRIVER("Failed to set the min frequency\n"); 3415 DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
3416
3417 ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox);
3418 if (!ret && (pcu_mbox & (1<<31))) { /* OC supported */
3419 DRM_DEBUG_DRIVER("Overclocking supported. Max: %dMHz, Overclock max: %dMHz\n",
3420 (dev_priv->rps.max_freq_softlimit & 0xff) * 50,
3421 (pcu_mbox & 0xff) * 50);
3422 dev_priv->rps.max_freq = pcu_mbox & 0xff;
3364 } 3423 }
3365 3424
3366 dev_priv->rps.power = HIGH_POWER; /* force a reset */ 3425 dev_priv->rps.power = HIGH_POWER; /* force a reset */
3367 gen6_set_rps(dev_priv->dev, dev_priv->rps.min_delay); 3426 gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
3368 3427
3369 gen6_enable_rps_interrupts(dev); 3428 gen6_enable_rps_interrupts(dev);
3370 3429
@@ -3420,9 +3479,9 @@ void gen6_update_ring_freq(struct drm_device *dev)
3420 * to use for memory access. We do this by specifying the IA frequency 3479 * to use for memory access. We do this by specifying the IA frequency
3421 * the PCU should use as a reference to determine the ring frequency. 3480 * the PCU should use as a reference to determine the ring frequency.
3422 */ 3481 */
3423 for (gpu_freq = dev_priv->rps.max_delay; gpu_freq >= dev_priv->rps.min_delay; 3482 for (gpu_freq = dev_priv->rps.max_freq_softlimit; gpu_freq >= dev_priv->rps.min_freq_softlimit;
3424 gpu_freq--) { 3483 gpu_freq--) {
3425 int diff = dev_priv->rps.max_delay - gpu_freq; 3484 int diff = dev_priv->rps.max_freq_softlimit - gpu_freq;
3426 unsigned int ia_freq = 0, ring_freq = 0; 3485 unsigned int ia_freq = 0, ring_freq = 0;
3427 3486
3428 if (INTEL_INFO(dev)->gen >= 8) { 3487 if (INTEL_INFO(dev)->gen >= 8) {
@@ -3485,6 +3544,15 @@ int valleyview_rps_min_freq(struct drm_i915_private *dev_priv)
3485 return vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff; 3544 return vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff;
3486} 3545}
3487 3546
3547/* Check that the pctx buffer wasn't move under us. */
3548static void valleyview_check_pctx(struct drm_i915_private *dev_priv)
3549{
3550 unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
3551
3552 WARN_ON(pctx_addr != dev_priv->mm.stolen_base +
3553 dev_priv->vlv_pctx->stolen->start);
3554}
3555
3488static void valleyview_setup_pctx(struct drm_device *dev) 3556static void valleyview_setup_pctx(struct drm_device *dev)
3489{ 3557{
3490 struct drm_i915_private *dev_priv = dev->dev_private; 3558 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3529,6 +3597,17 @@ out:
3529 dev_priv->vlv_pctx = pctx; 3597 dev_priv->vlv_pctx = pctx;
3530} 3598}
3531 3599
3600static void valleyview_cleanup_pctx(struct drm_device *dev)
3601{
3602 struct drm_i915_private *dev_priv = dev->dev_private;
3603
3604 if (WARN_ON(!dev_priv->vlv_pctx))
3605 return;
3606
3607 drm_gem_object_unreference(&dev_priv->vlv_pctx->base);
3608 dev_priv->vlv_pctx = NULL;
3609}
3610
3532static void valleyview_enable_rps(struct drm_device *dev) 3611static void valleyview_enable_rps(struct drm_device *dev)
3533{ 3612{
3534 struct drm_i915_private *dev_priv = dev->dev_private; 3613 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3538,6 +3617,8 @@ static void valleyview_enable_rps(struct drm_device *dev)
3538 3617
3539 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 3618 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3540 3619
3620 valleyview_check_pctx(dev_priv);
3621
3541 if ((gtfifodbg = I915_READ(GTFIFODBG))) { 3622 if ((gtfifodbg = I915_READ(GTFIFODBG))) {
3542 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n", 3623 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
3543 gtfifodbg); 3624 gtfifodbg);
@@ -3588,32 +3669,39 @@ static void valleyview_enable_rps(struct drm_device *dev)
3588 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no"); 3669 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no");
3589 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val); 3670 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
3590 3671
3591 dev_priv->rps.cur_delay = (val >> 8) & 0xff; 3672 dev_priv->rps.cur_freq = (val >> 8) & 0xff;
3592 DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n", 3673 DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
3593 vlv_gpu_freq(dev_priv, dev_priv->rps.cur_delay), 3674 vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
3594 dev_priv->rps.cur_delay); 3675 dev_priv->rps.cur_freq);
3595 3676
3596 dev_priv->rps.max_delay = valleyview_rps_max_freq(dev_priv); 3677 dev_priv->rps.max_freq = valleyview_rps_max_freq(dev_priv);
3597 dev_priv->rps.hw_max = dev_priv->rps.max_delay; 3678 dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
3598 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n", 3679 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
3599 vlv_gpu_freq(dev_priv, dev_priv->rps.max_delay), 3680 vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq),
3600 dev_priv->rps.max_delay); 3681 dev_priv->rps.max_freq);
3601 3682
3602 dev_priv->rps.rpe_delay = valleyview_rps_rpe_freq(dev_priv); 3683 dev_priv->rps.efficient_freq = valleyview_rps_rpe_freq(dev_priv);
3603 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n", 3684 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
3604 vlv_gpu_freq(dev_priv, dev_priv->rps.rpe_delay), 3685 vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
3605 dev_priv->rps.rpe_delay); 3686 dev_priv->rps.efficient_freq);
3606 3687
3607 dev_priv->rps.min_delay = valleyview_rps_min_freq(dev_priv); 3688 dev_priv->rps.min_freq = valleyview_rps_min_freq(dev_priv);
3608 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n", 3689 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
3609 vlv_gpu_freq(dev_priv, dev_priv->rps.min_delay), 3690 vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq),
3610 dev_priv->rps.min_delay); 3691 dev_priv->rps.min_freq);
3692
3693 /* Preserve min/max settings in case of re-init */
3694 if (dev_priv->rps.max_freq_softlimit == 0)
3695 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
3696
3697 if (dev_priv->rps.min_freq_softlimit == 0)
3698 dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
3611 3699
3612 DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n", 3700 DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
3613 vlv_gpu_freq(dev_priv, dev_priv->rps.rpe_delay), 3701 vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
3614 dev_priv->rps.rpe_delay); 3702 dev_priv->rps.efficient_freq);
3615 3703
3616 valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay); 3704 valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
3617 3705
3618 gen6_enable_rps_interrupts(dev); 3706 gen6_enable_rps_interrupts(dev);
3619 3707
@@ -3625,13 +3713,13 @@ void ironlake_teardown_rc6(struct drm_device *dev)
3625 struct drm_i915_private *dev_priv = dev->dev_private; 3713 struct drm_i915_private *dev_priv = dev->dev_private;
3626 3714
3627 if (dev_priv->ips.renderctx) { 3715 if (dev_priv->ips.renderctx) {
3628 i915_gem_object_unpin(dev_priv->ips.renderctx); 3716 i915_gem_object_ggtt_unpin(dev_priv->ips.renderctx);
3629 drm_gem_object_unreference(&dev_priv->ips.renderctx->base); 3717 drm_gem_object_unreference(&dev_priv->ips.renderctx->base);
3630 dev_priv->ips.renderctx = NULL; 3718 dev_priv->ips.renderctx = NULL;
3631 } 3719 }
3632 3720
3633 if (dev_priv->ips.pwrctx) { 3721 if (dev_priv->ips.pwrctx) {
3634 i915_gem_object_unpin(dev_priv->ips.pwrctx); 3722 i915_gem_object_ggtt_unpin(dev_priv->ips.pwrctx);
3635 drm_gem_object_unreference(&dev_priv->ips.pwrctx->base); 3723 drm_gem_object_unreference(&dev_priv->ips.pwrctx->base);
3636 dev_priv->ips.pwrctx = NULL; 3724 dev_priv->ips.pwrctx = NULL;
3637 } 3725 }
@@ -3823,9 +3911,10 @@ static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv)
3823 3911
3824unsigned long i915_chipset_val(struct drm_i915_private *dev_priv) 3912unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
3825{ 3913{
3914 struct drm_device *dev = dev_priv->dev;
3826 unsigned long val; 3915 unsigned long val;
3827 3916
3828 if (dev_priv->info->gen != 5) 3917 if (INTEL_INFO(dev)->gen != 5)
3829 return 0; 3918 return 0;
3830 3919
3831 spin_lock_irq(&mchdev_lock); 3920 spin_lock_irq(&mchdev_lock);
@@ -3854,6 +3943,7 @@ unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
3854 3943
3855static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid) 3944static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
3856{ 3945{
3946 struct drm_device *dev = dev_priv->dev;
3857 static const struct v_table { 3947 static const struct v_table {
3858 u16 vd; /* in .1 mil */ 3948 u16 vd; /* in .1 mil */
3859 u16 vm; /* in .1 mil */ 3949 u16 vm; /* in .1 mil */
@@ -3987,7 +4077,7 @@ static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
3987 { 16000, 14875, }, 4077 { 16000, 14875, },
3988 { 16125, 15000, }, 4078 { 16125, 15000, },
3989 }; 4079 };
3990 if (dev_priv->info->is_mobile) 4080 if (INTEL_INFO(dev)->is_mobile)
3991 return v_table[pxvid].vm; 4081 return v_table[pxvid].vm;
3992 else 4082 else
3993 return v_table[pxvid].vd; 4083 return v_table[pxvid].vd;
@@ -4030,7 +4120,9 @@ static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
4030 4120
4031void i915_update_gfx_val(struct drm_i915_private *dev_priv) 4121void i915_update_gfx_val(struct drm_i915_private *dev_priv)
4032{ 4122{
4033 if (dev_priv->info->gen != 5) 4123 struct drm_device *dev = dev_priv->dev;
4124
4125 if (INTEL_INFO(dev)->gen != 5)
4034 return; 4126 return;
4035 4127
4036 spin_lock_irq(&mchdev_lock); 4128 spin_lock_irq(&mchdev_lock);
@@ -4047,7 +4139,7 @@ static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
4047 4139
4048 assert_spin_locked(&mchdev_lock); 4140 assert_spin_locked(&mchdev_lock);
4049 4141
4050 pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->rps.cur_delay * 4)); 4142 pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->rps.cur_freq * 4));
4051 pxvid = (pxvid >> 24) & 0x7f; 4143 pxvid = (pxvid >> 24) & 0x7f;
4052 ext_v = pvid_to_extvid(dev_priv, pxvid); 4144 ext_v = pvid_to_extvid(dev_priv, pxvid);
4053 4145
@@ -4079,9 +4171,10 @@ static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
4079 4171
4080unsigned long i915_gfx_val(struct drm_i915_private *dev_priv) 4172unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
4081{ 4173{
4174 struct drm_device *dev = dev_priv->dev;
4082 unsigned long val; 4175 unsigned long val;
4083 4176
4084 if (dev_priv->info->gen != 5) 4177 if (INTEL_INFO(dev)->gen != 5)
4085 return 0; 4178 return 0;
4086 4179
4087 spin_lock_irq(&mchdev_lock); 4180 spin_lock_irq(&mchdev_lock);
@@ -4270,6 +4363,7 @@ void intel_gpu_ips_teardown(void)
4270 i915_mch_dev = NULL; 4363 i915_mch_dev = NULL;
4271 spin_unlock_irq(&mchdev_lock); 4364 spin_unlock_irq(&mchdev_lock);
4272} 4365}
4366
4273static void intel_init_emon(struct drm_device *dev) 4367static void intel_init_emon(struct drm_device *dev)
4274{ 4368{
4275 struct drm_i915_private *dev_priv = dev->dev_private; 4369 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4341,6 +4435,18 @@ static void intel_init_emon(struct drm_device *dev)
4341 dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK); 4435 dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK);
4342} 4436}
4343 4437
4438void intel_init_gt_powersave(struct drm_device *dev)
4439{
4440 if (IS_VALLEYVIEW(dev))
4441 valleyview_setup_pctx(dev);
4442}
4443
4444void intel_cleanup_gt_powersave(struct drm_device *dev)
4445{
4446 if (IS_VALLEYVIEW(dev))
4447 valleyview_cleanup_pctx(dev);
4448}
4449
4344void intel_disable_gt_powersave(struct drm_device *dev) 4450void intel_disable_gt_powersave(struct drm_device *dev)
4345{ 4451{
4346 struct drm_i915_private *dev_priv = dev->dev_private; 4452 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4395,8 +4501,6 @@ void intel_enable_gt_powersave(struct drm_device *dev)
4395 ironlake_enable_rc6(dev); 4501 ironlake_enable_rc6(dev);
4396 intel_init_emon(dev); 4502 intel_init_emon(dev);
4397 } else if (IS_GEN6(dev) || IS_GEN7(dev)) { 4503 } else if (IS_GEN6(dev) || IS_GEN7(dev)) {
4398 if (IS_VALLEYVIEW(dev))
4399 valleyview_setup_pctx(dev);
4400 /* 4504 /*
4401 * PCU communication is slow and this doesn't need to be 4505 * PCU communication is slow and this doesn't need to be
4402 * done at any specific time, so do this out of our fast path 4506 * done at any specific time, so do this out of our fast path
@@ -4587,6 +4691,17 @@ static void gen6_init_clock_gating(struct drm_device *dev)
4587 I915_WRITE(GEN6_GT_MODE, 4691 I915_WRITE(GEN6_GT_MODE,
4588 _MASKED_BIT_ENABLE(GEN6_TD_FOUR_ROW_DISPATCH_DISABLE)); 4692 _MASKED_BIT_ENABLE(GEN6_TD_FOUR_ROW_DISPATCH_DISABLE));
4589 4693
4694 /*
4695 * BSpec recoomends 8x4 when MSAA is used,
4696 * however in practice 16x4 seems fastest.
4697 *
4698 * Note that PS/WM thread counts depend on the WIZ hashing
4699 * disable bit, which we don't touch here, but it's good
4700 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
4701 */
4702 I915_WRITE(GEN6_GT_MODE,
4703 GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
4704
4590 ilk_init_lp_watermarks(dev); 4705 ilk_init_lp_watermarks(dev);
4591 4706
4592 I915_WRITE(CACHE_MODE_0, 4707 I915_WRITE(CACHE_MODE_0,
@@ -4607,17 +4722,24 @@ static void gen6_init_clock_gating(struct drm_device *dev)
4607 * According to the spec, bit 11 (RCCUNIT) must also be set, 4722 * According to the spec, bit 11 (RCCUNIT) must also be set,
4608 * but we didn't debug actual testcases to find it out. 4723 * but we didn't debug actual testcases to find it out.
4609 * 4724 *
4610 * Also apply WaDisableVDSUnitClockGating:snb and 4725 * WaDisableRCCUnitClockGating:snb
4611 * WaDisableRCPBUnitClockGating:snb. 4726 * WaDisableRCPBUnitClockGating:snb
4612 */ 4727 */
4613 I915_WRITE(GEN6_UCGCTL2, 4728 I915_WRITE(GEN6_UCGCTL2,
4614 GEN7_VDSUNIT_CLOCK_GATE_DISABLE |
4615 GEN6_RCPBUNIT_CLOCK_GATE_DISABLE | 4729 GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
4616 GEN6_RCCUNIT_CLOCK_GATE_DISABLE); 4730 GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
4617 4731
4618 /* Bspec says we need to always set all mask bits. */ 4732 /* WaStripsFansDisableFastClipPerformanceFix:snb */
4619 I915_WRITE(_3D_CHICKEN3, (0xFFFF << 16) | 4733 I915_WRITE(_3D_CHICKEN3,
4620 _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL); 4734 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL));
4735
4736 /*
4737 * Bspec says:
4738 * "This bit must be set if 3DSTATE_CLIP clip mode is set to normal and
4739 * 3DSTATE_SF number of SF output attributes is more than 16."
4740 */
4741 I915_WRITE(_3D_CHICKEN3,
4742 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH));
4621 4743
4622 /* 4744 /*
4623 * According to the spec the following bits should be 4745 * According to the spec the following bits should be
@@ -4643,11 +4765,6 @@ static void gen6_init_clock_gating(struct drm_device *dev)
4643 4765
4644 g4x_disable_trickle_feed(dev); 4766 g4x_disable_trickle_feed(dev);
4645 4767
4646 /* The default value should be 0x200 according to docs, but the two
4647 * platforms I checked have a 0 for this. (Maybe BIOS overrides?) */
4648 I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_DISABLE(0xffff));
4649 I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_ENABLE(GEN6_GT_MODE_HI));
4650
4651 cpt_init_clock_gating(dev); 4768 cpt_init_clock_gating(dev);
4652 4769
4653 gen6_check_mch_setup(dev); 4770 gen6_check_mch_setup(dev);
@@ -4657,14 +4774,17 @@ static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
4657{ 4774{
4658 uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE); 4775 uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE);
4659 4776
4777 /*
4778 * WaVSThreadDispatchOverride:ivb,vlv
4779 *
4780 * This actually overrides the dispatch
4781 * mode for all thread types.
4782 */
4660 reg &= ~GEN7_FF_SCHED_MASK; 4783 reg &= ~GEN7_FF_SCHED_MASK;
4661 reg |= GEN7_FF_TS_SCHED_HW; 4784 reg |= GEN7_FF_TS_SCHED_HW;
4662 reg |= GEN7_FF_VS_SCHED_HW; 4785 reg |= GEN7_FF_VS_SCHED_HW;
4663 reg |= GEN7_FF_DS_SCHED_HW; 4786 reg |= GEN7_FF_DS_SCHED_HW;
4664 4787
4665 if (IS_HASWELL(dev_priv->dev))
4666 reg &= ~GEN7_FF_VS_REF_CNT_FFME;
4667
4668 I915_WRITE(GEN7_FF_THREAD_MODE, reg); 4788 I915_WRITE(GEN7_FF_THREAD_MODE, reg);
4669} 4789}
4670 4790
@@ -4702,7 +4822,7 @@ static void lpt_suspend_hw(struct drm_device *dev)
4702static void gen8_init_clock_gating(struct drm_device *dev) 4822static void gen8_init_clock_gating(struct drm_device *dev)
4703{ 4823{
4704 struct drm_i915_private *dev_priv = dev->dev_private; 4824 struct drm_i915_private *dev_priv = dev->dev_private;
4705 enum pipe i; 4825 enum pipe pipe;
4706 4826
4707 I915_WRITE(WM3_LP_ILK, 0); 4827 I915_WRITE(WM3_LP_ILK, 0);
4708 I915_WRITE(WM2_LP_ILK, 0); 4828 I915_WRITE(WM2_LP_ILK, 0);
@@ -4711,8 +4831,19 @@ static void gen8_init_clock_gating(struct drm_device *dev)
4711 /* FIXME(BDW): Check all the w/a, some might only apply to 4831 /* FIXME(BDW): Check all the w/a, some might only apply to
4712 * pre-production hw. */ 4832 * pre-production hw. */
4713 4833
4714 WARN(!i915_preliminary_hw_support, 4834 /* WaDisablePartialInstShootdown:bdw */
4715 "GEN8_CENTROID_PIXEL_OPT_DIS not be needed for production\n"); 4835 I915_WRITE(GEN8_ROW_CHICKEN,
4836 _MASKED_BIT_ENABLE(PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE));
4837
4838 /* WaDisableThreadStallDopClockGating:bdw */
4839 /* FIXME: Unclear whether we really need this on production bdw. */
4840 I915_WRITE(GEN8_ROW_CHICKEN,
4841 _MASKED_BIT_ENABLE(STALL_DOP_GATING_DISABLE));
4842
4843 /*
4844 * This GEN8_CENTROID_PIXEL_OPT_DIS W/A is only needed for
4845 * pre-production hardware
4846 */
4716 I915_WRITE(HALF_SLICE_CHICKEN3, 4847 I915_WRITE(HALF_SLICE_CHICKEN3,
4717 _MASKED_BIT_ENABLE(GEN8_CENTROID_PIXEL_OPT_DIS)); 4848 _MASKED_BIT_ENABLE(GEN8_CENTROID_PIXEL_OPT_DIS));
4718 I915_WRITE(HALF_SLICE_CHICKEN3, 4849 I915_WRITE(HALF_SLICE_CHICKEN3,
@@ -4736,10 +4867,10 @@ static void gen8_init_clock_gating(struct drm_device *dev)
4736 I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD); 4867 I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD);
4737 4868
4738 /* WaPsrDPRSUnmaskVBlankInSRD:bdw */ 4869 /* WaPsrDPRSUnmaskVBlankInSRD:bdw */
4739 for_each_pipe(i) { 4870 for_each_pipe(pipe) {
4740 I915_WRITE(CHICKEN_PIPESL_1(i), 4871 I915_WRITE(CHICKEN_PIPESL_1(pipe),
4741 I915_READ(CHICKEN_PIPESL_1(i) | 4872 I915_READ(CHICKEN_PIPESL_1(pipe)) |
4742 DPRS_MASK_VBLANK_SRD)); 4873 BDW_DPRS_MASK_VBLANK_SRD);
4743 } 4874 }
4744 4875
4745 /* Use Force Non-Coherent whenever executing a 3D context. This is a 4876 /* Use Force Non-Coherent whenever executing a 3D context. This is a
@@ -4755,6 +4886,28 @@ static void gen8_init_clock_gating(struct drm_device *dev)
4755 I915_WRITE(GEN7_FF_THREAD_MODE, 4886 I915_WRITE(GEN7_FF_THREAD_MODE,
4756 I915_READ(GEN7_FF_THREAD_MODE) & 4887 I915_READ(GEN7_FF_THREAD_MODE) &
4757 ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME)); 4888 ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
4889
4890 /*
4891 * BSpec recommends 8x4 when MSAA is used,
4892 * however in practice 16x4 seems fastest.
4893 *
4894 * Note that PS/WM thread counts depend on the WIZ hashing
4895 * disable bit, which we don't touch here, but it's good
4896 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
4897 */
4898 I915_WRITE(GEN7_GT_MODE,
4899 GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
4900
4901 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
4902 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
4903
4904 /* WaDisableSDEUnitClockGating:bdw */
4905 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
4906 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
4907
4908 /* Wa4x4STCOptimizationDisable:bdw */
4909 I915_WRITE(CACHE_MODE_1,
4910 _MASKED_BIT_ENABLE(GEN8_4x4_STC_OPTIMIZATION_DISABLE));
4758} 4911}
4759 4912
4760static void haswell_init_clock_gating(struct drm_device *dev) 4913static void haswell_init_clock_gating(struct drm_device *dev)
@@ -4763,21 +4916,6 @@ static void haswell_init_clock_gating(struct drm_device *dev)
4763 4916
4764 ilk_init_lp_watermarks(dev); 4917 ilk_init_lp_watermarks(dev);
4765 4918
4766 /* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
4767 * This implements the WaDisableRCZUnitClockGating:hsw workaround.
4768 */
4769 I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
4770
4771 /* Apply the WaDisableRHWOOptimizationForRenderHang:hsw workaround. */
4772 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
4773 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
4774
4775 /* WaApplyL3ControlAndL3ChickenMode:hsw */
4776 I915_WRITE(GEN7_L3CNTLREG1,
4777 GEN7_WA_FOR_GEN7_L3_CONTROL);
4778 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
4779 GEN7_WA_L3_CHICKEN_MODE);
4780
4781 /* L3 caching of data atomics doesn't work -- disable it. */ 4919 /* L3 caching of data atomics doesn't work -- disable it. */
4782 I915_WRITE(HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE); 4920 I915_WRITE(HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE);
4783 I915_WRITE(HSW_ROW_CHICKEN3, 4921 I915_WRITE(HSW_ROW_CHICKEN3,
@@ -4789,12 +4927,28 @@ static void haswell_init_clock_gating(struct drm_device *dev)
4789 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB); 4927 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
4790 4928
4791 /* WaVSRefCountFullforceMissDisable:hsw */ 4929 /* WaVSRefCountFullforceMissDisable:hsw */
4792 gen7_setup_fixed_func_scheduler(dev_priv); 4930 I915_WRITE(GEN7_FF_THREAD_MODE,
4931 I915_READ(GEN7_FF_THREAD_MODE) & ~GEN7_FF_VS_REF_CNT_FFME);
4932
4933 /* enable HiZ Raw Stall Optimization */
4934 I915_WRITE(CACHE_MODE_0_GEN7,
4935 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
4793 4936
4794 /* WaDisable4x2SubspanOptimization:hsw */ 4937 /* WaDisable4x2SubspanOptimization:hsw */
4795 I915_WRITE(CACHE_MODE_1, 4938 I915_WRITE(CACHE_MODE_1,
4796 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE)); 4939 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
4797 4940
4941 /*
4942 * BSpec recommends 8x4 when MSAA is used,
4943 * however in practice 16x4 seems fastest.
4944 *
4945 * Note that PS/WM thread counts depend on the WIZ hashing
4946 * disable bit, which we don't touch here, but it's good
4947 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
4948 */
4949 I915_WRITE(GEN7_GT_MODE,
4950 GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
4951
4798 /* WaSwitchSolVfFArbitrationPriority:hsw */ 4952 /* WaSwitchSolVfFArbitrationPriority:hsw */
4799 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL); 4953 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
4800 4954
@@ -4827,9 +4981,6 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
4827 if (IS_IVB_GT1(dev)) 4981 if (IS_IVB_GT1(dev))
4828 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1, 4982 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
4829 _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE)); 4983 _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
4830 else
4831 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1_GT2,
4832 _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
4833 4984
4834 /* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */ 4985 /* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
4835 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1, 4986 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
@@ -4843,31 +4994,24 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
4843 if (IS_IVB_GT1(dev)) 4994 if (IS_IVB_GT1(dev))
4844 I915_WRITE(GEN7_ROW_CHICKEN2, 4995 I915_WRITE(GEN7_ROW_CHICKEN2,
4845 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE)); 4996 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
4846 else 4997 else {
4998 /* must write both registers */
4999 I915_WRITE(GEN7_ROW_CHICKEN2,
5000 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
4847 I915_WRITE(GEN7_ROW_CHICKEN2_GT2, 5001 I915_WRITE(GEN7_ROW_CHICKEN2_GT2,
4848 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE)); 5002 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
4849 5003 }
4850 5004
4851 /* WaForceL3Serialization:ivb */ 5005 /* WaForceL3Serialization:ivb */
4852 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) & 5006 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
4853 ~L3SQ_URB_READ_CAM_MATCH_DISABLE); 5007 ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
4854 5008
4855 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock 5009 /*
4856 * gating disable must be set. Failure to set it results in
4857 * flickering pixels due to Z write ordering failures after
4858 * some amount of runtime in the Mesa "fire" demo, and Unigine
4859 * Sanctuary and Tropics, and apparently anything else with
4860 * alpha test or pixel discard.
4861 *
4862 * According to the spec, bit 11 (RCCUNIT) must also be set,
4863 * but we didn't debug actual testcases to find it out.
4864 *
4865 * According to the spec, bit 13 (RCZUNIT) must be set on IVB. 5010 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
4866 * This implements the WaDisableRCZUnitClockGating:ivb workaround. 5011 * This implements the WaDisableRCZUnitClockGating:ivb workaround.
4867 */ 5012 */
4868 I915_WRITE(GEN6_UCGCTL2, 5013 I915_WRITE(GEN6_UCGCTL2,
4869 GEN6_RCZUNIT_CLOCK_GATE_DISABLE | 5014 GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
4870 GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
4871 5015
4872 /* This is required by WaCatErrorRejectionIssue:ivb */ 5016 /* This is required by WaCatErrorRejectionIssue:ivb */
4873 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG, 5017 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
@@ -4876,13 +5020,29 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
4876 5020
4877 g4x_disable_trickle_feed(dev); 5021 g4x_disable_trickle_feed(dev);
4878 5022
4879 /* WaVSRefCountFullforceMissDisable:ivb */
4880 gen7_setup_fixed_func_scheduler(dev_priv); 5023 gen7_setup_fixed_func_scheduler(dev_priv);
4881 5024
5025 if (0) { /* causes HiZ corruption on ivb:gt1 */
5026 /* enable HiZ Raw Stall Optimization */
5027 I915_WRITE(CACHE_MODE_0_GEN7,
5028 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
5029 }
5030
4882 /* WaDisable4x2SubspanOptimization:ivb */ 5031 /* WaDisable4x2SubspanOptimization:ivb */
4883 I915_WRITE(CACHE_MODE_1, 5032 I915_WRITE(CACHE_MODE_1,
4884 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE)); 5033 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
4885 5034
5035 /*
5036 * BSpec recommends 8x4 when MSAA is used,
5037 * however in practice 16x4 seems fastest.
5038 *
5039 * Note that PS/WM thread counts depend on the WIZ hashing
5040 * disable bit, which we don't touch here, but it's good
5041 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
5042 */
5043 I915_WRITE(GEN7_GT_MODE,
5044 GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
5045
4886 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); 5046 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
4887 snpcr &= ~GEN6_MBC_SNPCR_MASK; 5047 snpcr &= ~GEN6_MBC_SNPCR_MASK;
4888 snpcr |= GEN6_MBC_SNPCR_MED; 5048 snpcr |= GEN6_MBC_SNPCR_MED;
@@ -4904,13 +5064,11 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
4904 mutex_unlock(&dev_priv->rps.hw_lock); 5064 mutex_unlock(&dev_priv->rps.hw_lock);
4905 switch ((val >> 6) & 3) { 5065 switch ((val >> 6) & 3) {
4906 case 0: 5066 case 0:
4907 dev_priv->mem_freq = 800;
4908 break;
4909 case 1: 5067 case 1:
4910 dev_priv->mem_freq = 1066; 5068 dev_priv->mem_freq = 800;
4911 break; 5069 break;
4912 case 2: 5070 case 2:
4913 dev_priv->mem_freq = 1333; 5071 dev_priv->mem_freq = 1066;
4914 break; 5072 break;
4915 case 3: 5073 case 3:
4916 dev_priv->mem_freq = 1333; 5074 dev_priv->mem_freq = 1333;
@@ -4929,19 +5087,12 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
4929 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE | 5087 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
4930 CHICKEN3_DGMG_DONE_FIX_DISABLE); 5088 CHICKEN3_DGMG_DONE_FIX_DISABLE);
4931 5089
5090 /* WaPsdDispatchEnable:vlv */
4932 /* WaDisablePSDDualDispatchEnable:vlv */ 5091 /* WaDisablePSDDualDispatchEnable:vlv */
4933 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1, 5092 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
4934 _MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP | 5093 _MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP |
4935 GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE)); 5094 GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
4936 5095
4937 /* Apply the WaDisableRHWOOptimizationForRenderHang:vlv workaround. */
4938 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
4939 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
4940
4941 /* WaApplyL3ControlAndL3ChickenMode:vlv */
4942 I915_WRITE(GEN7_L3CNTLREG1, I915_READ(GEN7_L3CNTLREG1) | GEN7_L3AGDIS);
4943 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, GEN7_WA_L3_CHICKEN_MODE);
4944
4945 /* WaForceL3Serialization:vlv */ 5096 /* WaForceL3Serialization:vlv */
4946 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) & 5097 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
4947 ~L3SQ_URB_READ_CAM_MATCH_DISABLE); 5098 ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
@@ -4955,51 +5106,39 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
4955 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | 5106 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
4956 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB); 5107 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
4957 5108
4958 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock 5109 gen7_setup_fixed_func_scheduler(dev_priv);
4959 * gating disable must be set. Failure to set it results in 5110
4960 * flickering pixels due to Z write ordering failures after 5111 /*
4961 * some amount of runtime in the Mesa "fire" demo, and Unigine
4962 * Sanctuary and Tropics, and apparently anything else with
4963 * alpha test or pixel discard.
4964 *
4965 * According to the spec, bit 11 (RCCUNIT) must also be set,
4966 * but we didn't debug actual testcases to find it out.
4967 *
4968 * According to the spec, bit 13 (RCZUNIT) must be set on IVB. 5112 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
4969 * This implements the WaDisableRCZUnitClockGating:vlv workaround. 5113 * This implements the WaDisableRCZUnitClockGating:vlv workaround.
4970 *
4971 * Also apply WaDisableVDSUnitClockGating:vlv and
4972 * WaDisableRCPBUnitClockGating:vlv.
4973 */ 5114 */
4974 I915_WRITE(GEN6_UCGCTL2, 5115 I915_WRITE(GEN6_UCGCTL2,
4975 GEN7_VDSUNIT_CLOCK_GATE_DISABLE | 5116 GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
4976 GEN7_TDLUNIT_CLOCK_GATE_DISABLE |
4977 GEN6_RCZUNIT_CLOCK_GATE_DISABLE |
4978 GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
4979 GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
4980 5117
5118 /* WaDisableL3Bank2xClockGate:vlv */
4981 I915_WRITE(GEN7_UCGCTL4, GEN7_L3BANK2X_CLOCK_GATE_DISABLE); 5119 I915_WRITE(GEN7_UCGCTL4, GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
4982 5120
4983 I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE); 5121 I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
4984 5122
5123 /*
5124 * BSpec says this must be set, even though
5125 * WaDisable4x2SubspanOptimization isn't listed for VLV.
5126 */
4985 I915_WRITE(CACHE_MODE_1, 5127 I915_WRITE(CACHE_MODE_1,
4986 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE)); 5128 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
4987 5129
4988 /* 5130 /*
5131 * WaIncreaseL3CreditsForVLVB0:vlv
5132 * This is the hardware default actually.
5133 */
5134 I915_WRITE(GEN7_L3SQCREG1, VLV_B0_WA_L3SQCREG1_VALUE);
5135
5136 /*
4989 * WaDisableVLVClockGating_VBIIssue:vlv 5137 * WaDisableVLVClockGating_VBIIssue:vlv
4990 * Disable clock gating on th GCFG unit to prevent a delay 5138 * Disable clock gating on th GCFG unit to prevent a delay
4991 * in the reporting of vblank events. 5139 * in the reporting of vblank events.
4992 */ 5140 */
4993 I915_WRITE(VLV_GUNIT_CLOCK_GATE, 0xffffffff); 5141 I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
4994
4995 /* Conservative clock gating settings for now */
4996 I915_WRITE(0x9400, 0xffffffff);
4997 I915_WRITE(0x9404, 0xffffffff);
4998 I915_WRITE(0x9408, 0xffffffff);
4999 I915_WRITE(0x940c, 0xffffffff);
5000 I915_WRITE(0x9410, 0xffffffff);
5001 I915_WRITE(0x9414, 0xffffffff);
5002 I915_WRITE(0x9418, 0xffffffff);
5003} 5142}
5004 5143
5005static void g4x_init_clock_gating(struct drm_device *dev) 5144static void g4x_init_clock_gating(struct drm_device *dev)
@@ -5114,19 +5253,16 @@ void intel_suspend_hw(struct drm_device *dev)
5114 * enable it, so check if it's enabled and also check if we've requested it to 5253 * enable it, so check if it's enabled and also check if we've requested it to
5115 * be enabled. 5254 * be enabled.
5116 */ 5255 */
5117static bool hsw_power_well_enabled(struct drm_device *dev, 5256static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
5118 struct i915_power_well *power_well) 5257 struct i915_power_well *power_well)
5119{ 5258{
5120 struct drm_i915_private *dev_priv = dev->dev_private;
5121
5122 return I915_READ(HSW_PWR_WELL_DRIVER) == 5259 return I915_READ(HSW_PWR_WELL_DRIVER) ==
5123 (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED); 5260 (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
5124} 5261}
5125 5262
5126bool intel_display_power_enabled_sw(struct drm_device *dev, 5263bool intel_display_power_enabled_sw(struct drm_i915_private *dev_priv,
5127 enum intel_display_power_domain domain) 5264 enum intel_display_power_domain domain)
5128{ 5265{
5129 struct drm_i915_private *dev_priv = dev->dev_private;
5130 struct i915_power_domains *power_domains; 5266 struct i915_power_domains *power_domains;
5131 5267
5132 power_domains = &dev_priv->power_domains; 5268 power_domains = &dev_priv->power_domains;
@@ -5134,15 +5270,17 @@ bool intel_display_power_enabled_sw(struct drm_device *dev,
5134 return power_domains->domain_use_count[domain]; 5270 return power_domains->domain_use_count[domain];
5135} 5271}
5136 5272
5137bool intel_display_power_enabled(struct drm_device *dev, 5273bool intel_display_power_enabled(struct drm_i915_private *dev_priv,
5138 enum intel_display_power_domain domain) 5274 enum intel_display_power_domain domain)
5139{ 5275{
5140 struct drm_i915_private *dev_priv = dev->dev_private;
5141 struct i915_power_domains *power_domains; 5276 struct i915_power_domains *power_domains;
5142 struct i915_power_well *power_well; 5277 struct i915_power_well *power_well;
5143 bool is_enabled; 5278 bool is_enabled;
5144 int i; 5279 int i;
5145 5280
5281 if (dev_priv->pm.suspended)
5282 return false;
5283
5146 power_domains = &dev_priv->power_domains; 5284 power_domains = &dev_priv->power_domains;
5147 5285
5148 is_enabled = true; 5286 is_enabled = true;
@@ -5152,7 +5290,7 @@ bool intel_display_power_enabled(struct drm_device *dev,
5152 if (power_well->always_on) 5290 if (power_well->always_on)
5153 continue; 5291 continue;
5154 5292
5155 if (!power_well->is_enabled(dev, power_well)) { 5293 if (!power_well->ops->is_enabled(dev_priv, power_well)) {
5156 is_enabled = false; 5294 is_enabled = false;
5157 break; 5295 break;
5158 } 5296 }
@@ -5162,6 +5300,12 @@ bool intel_display_power_enabled(struct drm_device *dev,
5162 return is_enabled; 5300 return is_enabled;
5163} 5301}
5164 5302
5303/*
5304 * Starting with Haswell, we have a "Power Down Well" that can be turned off
5305 * when not needed anymore. We have 4 registers that can request the power well
5306 * to be enabled, and it will only be disabled if none of the registers is
5307 * requesting it to be enabled.
5308 */
5165static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv) 5309static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
5166{ 5310{
5167 struct drm_device *dev = dev_priv->dev; 5311 struct drm_device *dev = dev_priv->dev;
@@ -5198,10 +5342,17 @@ static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
5198 } 5342 }
5199} 5343}
5200 5344
5345static void reset_vblank_counter(struct drm_device *dev, enum pipe pipe)
5346{
5347 assert_spin_locked(&dev->vbl_lock);
5348
5349 dev->vblank[pipe].last = 0;
5350}
5351
5201static void hsw_power_well_post_disable(struct drm_i915_private *dev_priv) 5352static void hsw_power_well_post_disable(struct drm_i915_private *dev_priv)
5202{ 5353{
5203 struct drm_device *dev = dev_priv->dev; 5354 struct drm_device *dev = dev_priv->dev;
5204 enum pipe p; 5355 enum pipe pipe;
5205 unsigned long irqflags; 5356 unsigned long irqflags;
5206 5357
5207 /* 5358 /*
@@ -5212,21 +5363,18 @@ static void hsw_power_well_post_disable(struct drm_i915_private *dev_priv)
5212 * FIXME: Should we do this in general in drm_vblank_post_modeset? 5363 * FIXME: Should we do this in general in drm_vblank_post_modeset?
5213 */ 5364 */
5214 spin_lock_irqsave(&dev->vbl_lock, irqflags); 5365 spin_lock_irqsave(&dev->vbl_lock, irqflags);
5215 for_each_pipe(p) 5366 for_each_pipe(pipe)
5216 if (p != PIPE_A) 5367 if (pipe != PIPE_A)
5217 dev->vblank[p].last = 0; 5368 reset_vblank_counter(dev, pipe);
5218 spin_unlock_irqrestore(&dev->vbl_lock, irqflags); 5369 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
5219} 5370}
5220 5371
5221static void hsw_set_power_well(struct drm_device *dev, 5372static void hsw_set_power_well(struct drm_i915_private *dev_priv,
5222 struct i915_power_well *power_well, bool enable) 5373 struct i915_power_well *power_well, bool enable)
5223{ 5374{
5224 struct drm_i915_private *dev_priv = dev->dev_private;
5225 bool is_enabled, enable_requested; 5375 bool is_enabled, enable_requested;
5226 uint32_t tmp; 5376 uint32_t tmp;
5227 5377
5228 WARN_ON(dev_priv->pc8.enabled);
5229
5230 tmp = I915_READ(HSW_PWR_WELL_DRIVER); 5378 tmp = I915_READ(HSW_PWR_WELL_DRIVER);
5231 is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED; 5379 is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED;
5232 enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST; 5380 enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST;
@@ -5255,55 +5403,229 @@ static void hsw_set_power_well(struct drm_device *dev,
5255 } 5403 }
5256} 5404}
5257 5405
5258static void __intel_power_well_get(struct drm_device *dev, 5406static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
5259 struct i915_power_well *power_well) 5407 struct i915_power_well *power_well)
5260{ 5408{
5261 struct drm_i915_private *dev_priv = dev->dev_private; 5409 hsw_set_power_well(dev_priv, power_well, power_well->count > 0);
5262 5410
5263 if (!power_well->count++ && power_well->set) { 5411 /*
5264 hsw_disable_package_c8(dev_priv); 5412 * We're taking over the BIOS, so clear any requests made by it since
5265 power_well->set(dev, power_well, true); 5413 * the driver is in charge now.
5266 } 5414 */
5415 if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST)
5416 I915_WRITE(HSW_PWR_WELL_BIOS, 0);
5417}
5418
5419static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
5420 struct i915_power_well *power_well)
5421{
5422 hsw_set_power_well(dev_priv, power_well, true);
5267} 5423}
5268 5424
5269static void __intel_power_well_put(struct drm_device *dev, 5425static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
5270 struct i915_power_well *power_well) 5426 struct i915_power_well *power_well)
5271{ 5427{
5272 struct drm_i915_private *dev_priv = dev->dev_private; 5428 hsw_set_power_well(dev_priv, power_well, false);
5429}
5430
5431static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
5432 struct i915_power_well *power_well)
5433{
5434}
5435
5436static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
5437 struct i915_power_well *power_well)
5438{
5439 return true;
5440}
5441
5442static void vlv_set_power_well(struct drm_i915_private *dev_priv,
5443 struct i915_power_well *power_well, bool enable)
5444{
5445 enum punit_power_well power_well_id = power_well->data;
5446 u32 mask;
5447 u32 state;
5448 u32 ctrl;
5449
5450 mask = PUNIT_PWRGT_MASK(power_well_id);
5451 state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) :
5452 PUNIT_PWRGT_PWR_GATE(power_well_id);
5453
5454 mutex_lock(&dev_priv->rps.hw_lock);
5455
5456#define COND \
5457 ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
5458
5459 if (COND)
5460 goto out;
5461
5462 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
5463 ctrl &= ~mask;
5464 ctrl |= state;
5465 vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
5466
5467 if (wait_for(COND, 100))
5468 DRM_ERROR("timout setting power well state %08x (%08x)\n",
5469 state,
5470 vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
5471
5472#undef COND
5473
5474out:
5475 mutex_unlock(&dev_priv->rps.hw_lock);
5476}
5477
5478static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv,
5479 struct i915_power_well *power_well)
5480{
5481 vlv_set_power_well(dev_priv, power_well, power_well->count > 0);
5482}
5483
5484static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
5485 struct i915_power_well *power_well)
5486{
5487 vlv_set_power_well(dev_priv, power_well, true);
5488}
5489
5490static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
5491 struct i915_power_well *power_well)
5492{
5493 vlv_set_power_well(dev_priv, power_well, false);
5494}
5495
5496static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
5497 struct i915_power_well *power_well)
5498{
5499 int power_well_id = power_well->data;
5500 bool enabled = false;
5501 u32 mask;
5502 u32 state;
5503 u32 ctrl;
5504
5505 mask = PUNIT_PWRGT_MASK(power_well_id);
5506 ctrl = PUNIT_PWRGT_PWR_ON(power_well_id);
5507
5508 mutex_lock(&dev_priv->rps.hw_lock);
5509
5510 state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
5511 /*
5512 * We only ever set the power-on and power-gate states, anything
5513 * else is unexpected.
5514 */
5515 WARN_ON(state != PUNIT_PWRGT_PWR_ON(power_well_id) &&
5516 state != PUNIT_PWRGT_PWR_GATE(power_well_id));
5517 if (state == ctrl)
5518 enabled = true;
5519
5520 /*
5521 * A transient state at this point would mean some unexpected party
5522 * is poking at the power controls too.
5523 */
5524 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
5525 WARN_ON(ctrl != state);
5526
5527 mutex_unlock(&dev_priv->rps.hw_lock);
5528
5529 return enabled;
5530}
5273 5531
5274 WARN_ON(!power_well->count); 5532static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
5533 struct i915_power_well *power_well)
5534{
5535 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
5536
5537 vlv_set_power_well(dev_priv, power_well, true);
5538
5539 spin_lock_irq(&dev_priv->irq_lock);
5540 valleyview_enable_display_irqs(dev_priv);
5541 spin_unlock_irq(&dev_priv->irq_lock);
5542
5543 /*
5544 * During driver initialization we need to defer enabling hotplug
5545 * processing until fbdev is set up.
5546 */
5547 if (dev_priv->enable_hotplug_processing)
5548 intel_hpd_init(dev_priv->dev);
5549
5550 i915_redisable_vga_power_on(dev_priv->dev);
5551}
5552
5553static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
5554 struct i915_power_well *power_well)
5555{
5556 struct drm_device *dev = dev_priv->dev;
5557 enum pipe pipe;
5558
5559 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
5560
5561 spin_lock_irq(&dev_priv->irq_lock);
5562 for_each_pipe(pipe)
5563 __intel_set_cpu_fifo_underrun_reporting(dev, pipe, false);
5564
5565 valleyview_disable_display_irqs(dev_priv);
5566 spin_unlock_irq(&dev_priv->irq_lock);
5567
5568 spin_lock_irq(&dev->vbl_lock);
5569 for_each_pipe(pipe)
5570 reset_vblank_counter(dev, pipe);
5571 spin_unlock_irq(&dev->vbl_lock);
5572
5573 vlv_set_power_well(dev_priv, power_well, false);
5574}
5275 5575
5276 if (!--power_well->count && power_well->set && 5576static void check_power_well_state(struct drm_i915_private *dev_priv,
5277 i915_disable_power_well) { 5577 struct i915_power_well *power_well)
5278 power_well->set(dev, power_well, false); 5578{
5279 hsw_enable_package_c8(dev_priv); 5579 bool enabled = power_well->ops->is_enabled(dev_priv, power_well);
5580
5581 if (power_well->always_on || !i915.disable_power_well) {
5582 if (!enabled)
5583 goto mismatch;
5584
5585 return;
5280 } 5586 }
5587
5588 if (enabled != (power_well->count > 0))
5589 goto mismatch;
5590
5591 return;
5592
5593mismatch:
5594 WARN(1, "state mismatch for '%s' (always_on %d hw state %d use-count %d disable_power_well %d\n",
5595 power_well->name, power_well->always_on, enabled,
5596 power_well->count, i915.disable_power_well);
5281} 5597}
5282 5598
5283void intel_display_power_get(struct drm_device *dev, 5599void intel_display_power_get(struct drm_i915_private *dev_priv,
5284 enum intel_display_power_domain domain) 5600 enum intel_display_power_domain domain)
5285{ 5601{
5286 struct drm_i915_private *dev_priv = dev->dev_private;
5287 struct i915_power_domains *power_domains; 5602 struct i915_power_domains *power_domains;
5288 struct i915_power_well *power_well; 5603 struct i915_power_well *power_well;
5289 int i; 5604 int i;
5290 5605
5606 intel_runtime_pm_get(dev_priv);
5607
5291 power_domains = &dev_priv->power_domains; 5608 power_domains = &dev_priv->power_domains;
5292 5609
5293 mutex_lock(&power_domains->lock); 5610 mutex_lock(&power_domains->lock);
5294 5611
5295 for_each_power_well(i, power_well, BIT(domain), power_domains) 5612 for_each_power_well(i, power_well, BIT(domain), power_domains) {
5296 __intel_power_well_get(dev, power_well); 5613 if (!power_well->count++) {
5614 DRM_DEBUG_KMS("enabling %s\n", power_well->name);
5615 power_well->ops->enable(dev_priv, power_well);
5616 }
5617
5618 check_power_well_state(dev_priv, power_well);
5619 }
5297 5620
5298 power_domains->domain_use_count[domain]++; 5621 power_domains->domain_use_count[domain]++;
5299 5622
5300 mutex_unlock(&power_domains->lock); 5623 mutex_unlock(&power_domains->lock);
5301} 5624}
5302 5625
5303void intel_display_power_put(struct drm_device *dev, 5626void intel_display_power_put(struct drm_i915_private *dev_priv,
5304 enum intel_display_power_domain domain) 5627 enum intel_display_power_domain domain)
5305{ 5628{
5306 struct drm_i915_private *dev_priv = dev->dev_private;
5307 struct i915_power_domains *power_domains; 5629 struct i915_power_domains *power_domains;
5308 struct i915_power_well *power_well; 5630 struct i915_power_well *power_well;
5309 int i; 5631 int i;
@@ -5315,10 +5637,20 @@ void intel_display_power_put(struct drm_device *dev,
5315 WARN_ON(!power_domains->domain_use_count[domain]); 5637 WARN_ON(!power_domains->domain_use_count[domain]);
5316 power_domains->domain_use_count[domain]--; 5638 power_domains->domain_use_count[domain]--;
5317 5639
5318 for_each_power_well_rev(i, power_well, BIT(domain), power_domains) 5640 for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
5319 __intel_power_well_put(dev, power_well); 5641 WARN_ON(!power_well->count);
5642
5643 if (!--power_well->count && i915.disable_power_well) {
5644 DRM_DEBUG_KMS("disabling %s\n", power_well->name);
5645 power_well->ops->disable(dev_priv, power_well);
5646 }
5647
5648 check_power_well_state(dev_priv, power_well);
5649 }
5320 5650
5321 mutex_unlock(&power_domains->lock); 5651 mutex_unlock(&power_domains->lock);
5652
5653 intel_runtime_pm_put(dev_priv);
5322} 5654}
5323 5655
5324static struct i915_power_domains *hsw_pwr; 5656static struct i915_power_domains *hsw_pwr;
@@ -5333,7 +5665,7 @@ void i915_request_power_well(void)
5333 5665
5334 dev_priv = container_of(hsw_pwr, struct drm_i915_private, 5666 dev_priv = container_of(hsw_pwr, struct drm_i915_private,
5335 power_domains); 5667 power_domains);
5336 intel_display_power_get(dev_priv->dev, POWER_DOMAIN_AUDIO); 5668 intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
5337} 5669}
5338EXPORT_SYMBOL_GPL(i915_request_power_well); 5670EXPORT_SYMBOL_GPL(i915_request_power_well);
5339 5671
@@ -5347,29 +5679,99 @@ void i915_release_power_well(void)
5347 5679
5348 dev_priv = container_of(hsw_pwr, struct drm_i915_private, 5680 dev_priv = container_of(hsw_pwr, struct drm_i915_private,
5349 power_domains); 5681 power_domains);
5350 intel_display_power_put(dev_priv->dev, POWER_DOMAIN_AUDIO); 5682 intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
5351} 5683}
5352EXPORT_SYMBOL_GPL(i915_release_power_well); 5684EXPORT_SYMBOL_GPL(i915_release_power_well);
5353 5685
5686#define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
5687
5688#define HSW_ALWAYS_ON_POWER_DOMAINS ( \
5689 BIT(POWER_DOMAIN_PIPE_A) | \
5690 BIT(POWER_DOMAIN_TRANSCODER_EDP) | \
5691 BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \
5692 BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \
5693 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
5694 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
5695 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
5696 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
5697 BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
5698 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
5699 BIT(POWER_DOMAIN_PORT_CRT) | \
5700 BIT(POWER_DOMAIN_INIT))
5701#define HSW_DISPLAY_POWER_DOMAINS ( \
5702 (POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) | \
5703 BIT(POWER_DOMAIN_INIT))
5704
5705#define BDW_ALWAYS_ON_POWER_DOMAINS ( \
5706 HSW_ALWAYS_ON_POWER_DOMAINS | \
5707 BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER))
5708#define BDW_DISPLAY_POWER_DOMAINS ( \
5709 (POWER_DOMAIN_MASK & ~BDW_ALWAYS_ON_POWER_DOMAINS) | \
5710 BIT(POWER_DOMAIN_INIT))
5711
5712#define VLV_ALWAYS_ON_POWER_DOMAINS BIT(POWER_DOMAIN_INIT)
5713#define VLV_DISPLAY_POWER_DOMAINS POWER_DOMAIN_MASK
5714
5715#define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \
5716 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
5717 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
5718 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
5719 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
5720 BIT(POWER_DOMAIN_PORT_CRT) | \
5721 BIT(POWER_DOMAIN_INIT))
5722
5723#define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \
5724 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
5725 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
5726 BIT(POWER_DOMAIN_INIT))
5727
5728#define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \
5729 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
5730 BIT(POWER_DOMAIN_INIT))
5731
5732#define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \
5733 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
5734 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
5735 BIT(POWER_DOMAIN_INIT))
5736
5737#define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \
5738 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
5739 BIT(POWER_DOMAIN_INIT))
5740
5741static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
5742 .sync_hw = i9xx_always_on_power_well_noop,
5743 .enable = i9xx_always_on_power_well_noop,
5744 .disable = i9xx_always_on_power_well_noop,
5745 .is_enabled = i9xx_always_on_power_well_enabled,
5746};
5747
5354static struct i915_power_well i9xx_always_on_power_well[] = { 5748static struct i915_power_well i9xx_always_on_power_well[] = {
5355 { 5749 {
5356 .name = "always-on", 5750 .name = "always-on",
5357 .always_on = 1, 5751 .always_on = 1,
5358 .domains = POWER_DOMAIN_MASK, 5752 .domains = POWER_DOMAIN_MASK,
5753 .ops = &i9xx_always_on_power_well_ops,
5359 }, 5754 },
5360}; 5755};
5361 5756
5757static const struct i915_power_well_ops hsw_power_well_ops = {
5758 .sync_hw = hsw_power_well_sync_hw,
5759 .enable = hsw_power_well_enable,
5760 .disable = hsw_power_well_disable,
5761 .is_enabled = hsw_power_well_enabled,
5762};
5763
5362static struct i915_power_well hsw_power_wells[] = { 5764static struct i915_power_well hsw_power_wells[] = {
5363 { 5765 {
5364 .name = "always-on", 5766 .name = "always-on",
5365 .always_on = 1, 5767 .always_on = 1,
5366 .domains = HSW_ALWAYS_ON_POWER_DOMAINS, 5768 .domains = HSW_ALWAYS_ON_POWER_DOMAINS,
5769 .ops = &i9xx_always_on_power_well_ops,
5367 }, 5770 },
5368 { 5771 {
5369 .name = "display", 5772 .name = "display",
5370 .domains = POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS, 5773 .domains = HSW_DISPLAY_POWER_DOMAINS,
5371 .is_enabled = hsw_power_well_enabled, 5774 .ops = &hsw_power_well_ops,
5372 .set = hsw_set_power_well,
5373 }, 5775 },
5374}; 5776};
5375 5777
@@ -5378,12 +5780,83 @@ static struct i915_power_well bdw_power_wells[] = {
5378 .name = "always-on", 5780 .name = "always-on",
5379 .always_on = 1, 5781 .always_on = 1,
5380 .domains = BDW_ALWAYS_ON_POWER_DOMAINS, 5782 .domains = BDW_ALWAYS_ON_POWER_DOMAINS,
5783 .ops = &i9xx_always_on_power_well_ops,
5381 }, 5784 },
5382 { 5785 {
5383 .name = "display", 5786 .name = "display",
5384 .domains = POWER_DOMAIN_MASK & ~BDW_ALWAYS_ON_POWER_DOMAINS, 5787 .domains = BDW_DISPLAY_POWER_DOMAINS,
5385 .is_enabled = hsw_power_well_enabled, 5788 .ops = &hsw_power_well_ops,
5386 .set = hsw_set_power_well, 5789 },
5790};
5791
5792static const struct i915_power_well_ops vlv_display_power_well_ops = {
5793 .sync_hw = vlv_power_well_sync_hw,
5794 .enable = vlv_display_power_well_enable,
5795 .disable = vlv_display_power_well_disable,
5796 .is_enabled = vlv_power_well_enabled,
5797};
5798
5799static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
5800 .sync_hw = vlv_power_well_sync_hw,
5801 .enable = vlv_power_well_enable,
5802 .disable = vlv_power_well_disable,
5803 .is_enabled = vlv_power_well_enabled,
5804};
5805
5806static struct i915_power_well vlv_power_wells[] = {
5807 {
5808 .name = "always-on",
5809 .always_on = 1,
5810 .domains = VLV_ALWAYS_ON_POWER_DOMAINS,
5811 .ops = &i9xx_always_on_power_well_ops,
5812 },
5813 {
5814 .name = "display",
5815 .domains = VLV_DISPLAY_POWER_DOMAINS,
5816 .data = PUNIT_POWER_WELL_DISP2D,
5817 .ops = &vlv_display_power_well_ops,
5818 },
5819 {
5820 .name = "dpio-common",
5821 .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
5822 .data = PUNIT_POWER_WELL_DPIO_CMN_BC,
5823 .ops = &vlv_dpio_power_well_ops,
5824 },
5825 {
5826 .name = "dpio-tx-b-01",
5827 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
5828 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
5829 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
5830 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
5831 .ops = &vlv_dpio_power_well_ops,
5832 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
5833 },
5834 {
5835 .name = "dpio-tx-b-23",
5836 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
5837 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
5838 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
5839 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
5840 .ops = &vlv_dpio_power_well_ops,
5841 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
5842 },
5843 {
5844 .name = "dpio-tx-c-01",
5845 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
5846 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
5847 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
5848 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
5849 .ops = &vlv_dpio_power_well_ops,
5850 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
5851 },
5852 {
5853 .name = "dpio-tx-c-23",
5854 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
5855 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
5856 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
5857 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
5858 .ops = &vlv_dpio_power_well_ops,
5859 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
5387 }, 5860 },
5388}; 5861};
5389 5862
@@ -5392,9 +5865,8 @@ static struct i915_power_well bdw_power_wells[] = {
5392 (power_domains)->power_well_count = ARRAY_SIZE(__power_wells); \ 5865 (power_domains)->power_well_count = ARRAY_SIZE(__power_wells); \
5393}) 5866})
5394 5867
5395int intel_power_domains_init(struct drm_device *dev) 5868int intel_power_domains_init(struct drm_i915_private *dev_priv)
5396{ 5869{
5397 struct drm_i915_private *dev_priv = dev->dev_private;
5398 struct i915_power_domains *power_domains = &dev_priv->power_domains; 5870 struct i915_power_domains *power_domains = &dev_priv->power_domains;
5399 5871
5400 mutex_init(&power_domains->lock); 5872 mutex_init(&power_domains->lock);
@@ -5403,12 +5875,14 @@ int intel_power_domains_init(struct drm_device *dev)
5403 * The enabling order will be from lower to higher indexed wells, 5875 * The enabling order will be from lower to higher indexed wells,
5404 * the disabling order is reversed. 5876 * the disabling order is reversed.
5405 */ 5877 */
5406 if (IS_HASWELL(dev)) { 5878 if (IS_HASWELL(dev_priv->dev)) {
5407 set_power_wells(power_domains, hsw_power_wells); 5879 set_power_wells(power_domains, hsw_power_wells);
5408 hsw_pwr = power_domains; 5880 hsw_pwr = power_domains;
5409 } else if (IS_BROADWELL(dev)) { 5881 } else if (IS_BROADWELL(dev_priv->dev)) {
5410 set_power_wells(power_domains, bdw_power_wells); 5882 set_power_wells(power_domains, bdw_power_wells);
5411 hsw_pwr = power_domains; 5883 hsw_pwr = power_domains;
5884 } else if (IS_VALLEYVIEW(dev_priv->dev)) {
5885 set_power_wells(power_domains, vlv_power_wells);
5412 } else { 5886 } else {
5413 set_power_wells(power_domains, i9xx_always_on_power_well); 5887 set_power_wells(power_domains, i9xx_always_on_power_well);
5414 } 5888 }
@@ -5416,58 +5890,38 @@ int intel_power_domains_init(struct drm_device *dev)
5416 return 0; 5890 return 0;
5417} 5891}
5418 5892
5419void intel_power_domains_remove(struct drm_device *dev) 5893void intel_power_domains_remove(struct drm_i915_private *dev_priv)
5420{ 5894{
5421 hsw_pwr = NULL; 5895 hsw_pwr = NULL;
5422} 5896}
5423 5897
5424static void intel_power_domains_resume(struct drm_device *dev) 5898static void intel_power_domains_resume(struct drm_i915_private *dev_priv)
5425{ 5899{
5426 struct drm_i915_private *dev_priv = dev->dev_private;
5427 struct i915_power_domains *power_domains = &dev_priv->power_domains; 5900 struct i915_power_domains *power_domains = &dev_priv->power_domains;
5428 struct i915_power_well *power_well; 5901 struct i915_power_well *power_well;
5429 int i; 5902 int i;
5430 5903
5431 mutex_lock(&power_domains->lock); 5904 mutex_lock(&power_domains->lock);
5432 for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) { 5905 for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains)
5433 if (power_well->set) 5906 power_well->ops->sync_hw(dev_priv, power_well);
5434 power_well->set(dev, power_well, power_well->count > 0);
5435 }
5436 mutex_unlock(&power_domains->lock); 5907 mutex_unlock(&power_domains->lock);
5437} 5908}
5438 5909
5439/* 5910void intel_power_domains_init_hw(struct drm_i915_private *dev_priv)
5440 * Starting with Haswell, we have a "Power Down Well" that can be turned off
5441 * when not needed anymore. We have 4 registers that can request the power well
5442 * to be enabled, and it will only be disabled if none of the registers is
5443 * requesting it to be enabled.
5444 */
5445void intel_power_domains_init_hw(struct drm_device *dev)
5446{ 5911{
5447 struct drm_i915_private *dev_priv = dev->dev_private;
5448
5449 /* For now, we need the power well to be always enabled. */ 5912 /* For now, we need the power well to be always enabled. */
5450 intel_display_set_init_power(dev, true); 5913 intel_display_set_init_power(dev_priv, true);
5451 intel_power_domains_resume(dev); 5914 intel_power_domains_resume(dev_priv);
5452
5453 if (!(IS_HASWELL(dev) || IS_BROADWELL(dev)))
5454 return;
5455
5456 /* We're taking over the BIOS, so clear any requests made by it since
5457 * the driver is in charge now. */
5458 if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST)
5459 I915_WRITE(HSW_PWR_WELL_BIOS, 0);
5460} 5915}
5461 5916
5462/* Disables PC8 so we can use the GMBUS and DP AUX interrupts. */
5463void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv) 5917void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv)
5464{ 5918{
5465 hsw_disable_package_c8(dev_priv); 5919 intel_runtime_pm_get(dev_priv);
5466} 5920}
5467 5921
5468void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv) 5922void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv)
5469{ 5923{
5470 hsw_enable_package_c8(dev_priv); 5924 intel_runtime_pm_put(dev_priv);
5471} 5925}
5472 5926
5473void intel_runtime_pm_get(struct drm_i915_private *dev_priv) 5927void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
@@ -5499,8 +5953,6 @@ void intel_init_runtime_pm(struct drm_i915_private *dev_priv)
5499 struct drm_device *dev = dev_priv->dev; 5953 struct drm_device *dev = dev_priv->dev;
5500 struct device *device = &dev->pdev->dev; 5954 struct device *device = &dev->pdev->dev;
5501 5955
5502 dev_priv->pm.suspended = false;
5503
5504 if (!HAS_RUNTIME_PM(dev)) 5956 if (!HAS_RUNTIME_PM(dev))
5505 return; 5957 return;
5506 5958
@@ -5509,6 +5961,8 @@ void intel_init_runtime_pm(struct drm_i915_private *dev_priv)
5509 pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */ 5961 pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */
5510 pm_runtime_mark_last_busy(device); 5962 pm_runtime_mark_last_busy(device);
5511 pm_runtime_use_autosuspend(device); 5963 pm_runtime_use_autosuspend(device);
5964
5965 pm_runtime_put_autosuspend(device);
5512} 5966}
5513 5967
5514void intel_fini_runtime_pm(struct drm_i915_private *dev_priv) 5968void intel_fini_runtime_pm(struct drm_i915_private *dev_priv)
@@ -5560,7 +6014,7 @@ void intel_init_pm(struct drm_device *dev)
5560 6014
5561 /* For FIFO watermark updates */ 6015 /* For FIFO watermark updates */
5562 if (HAS_PCH_SPLIT(dev)) { 6016 if (HAS_PCH_SPLIT(dev)) {
5563 intel_setup_wm_latency(dev); 6017 ilk_setup_wm_latency(dev);
5564 6018
5565 if ((IS_GEN5(dev) && dev_priv->wm.pri_latency[1] && 6019 if ((IS_GEN5(dev) && dev_priv->wm.pri_latency[1] &&
5566 dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) || 6020 dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) ||
@@ -5731,13 +6185,9 @@ void intel_pm_setup(struct drm_device *dev)
5731 6185
5732 mutex_init(&dev_priv->rps.hw_lock); 6186 mutex_init(&dev_priv->rps.hw_lock);
5733 6187
5734 mutex_init(&dev_priv->pc8.lock);
5735 dev_priv->pc8.requirements_met = false;
5736 dev_priv->pc8.gpu_idle = false;
5737 dev_priv->pc8.irqs_disabled = false;
5738 dev_priv->pc8.enabled = false;
5739 dev_priv->pc8.disable_count = 2; /* requirements_met + gpu_idle */
5740 INIT_DELAYED_WORK(&dev_priv->pc8.enable_work, hsw_enable_pc8_work);
5741 INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work, 6188 INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
5742 intel_gen6_powersave_work); 6189 intel_gen6_powersave_work);
6190
6191 dev_priv->pm.suspended = false;
6192 dev_priv->pm.irqs_disabled = false;
5743} 6193}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 31b36c5ac894..6bc68bdcf433 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -406,17 +406,24 @@ gen8_render_ring_flush(struct intel_ring_buffer *ring,
406static void ring_write_tail(struct intel_ring_buffer *ring, 406static void ring_write_tail(struct intel_ring_buffer *ring,
407 u32 value) 407 u32 value)
408{ 408{
409 drm_i915_private_t *dev_priv = ring->dev->dev_private; 409 struct drm_i915_private *dev_priv = ring->dev->dev_private;
410 I915_WRITE_TAIL(ring, value); 410 I915_WRITE_TAIL(ring, value);
411} 411}
412 412
413u32 intel_ring_get_active_head(struct intel_ring_buffer *ring) 413u64 intel_ring_get_active_head(struct intel_ring_buffer *ring)
414{ 414{
415 drm_i915_private_t *dev_priv = ring->dev->dev_private; 415 struct drm_i915_private *dev_priv = ring->dev->dev_private;
416 u32 acthd_reg = INTEL_INFO(ring->dev)->gen >= 4 ? 416 u64 acthd;
417 RING_ACTHD(ring->mmio_base) : ACTHD; 417
418 if (INTEL_INFO(ring->dev)->gen >= 8)
419 acthd = I915_READ64_2x32(RING_ACTHD(ring->mmio_base),
420 RING_ACTHD_UDW(ring->mmio_base));
421 else if (INTEL_INFO(ring->dev)->gen >= 4)
422 acthd = I915_READ(RING_ACTHD(ring->mmio_base));
423 else
424 acthd = I915_READ(ACTHD);
418 425
419 return I915_READ(acthd_reg); 426 return acthd;
420} 427}
421 428
422static void ring_setup_phys_status_page(struct intel_ring_buffer *ring) 429static void ring_setup_phys_status_page(struct intel_ring_buffer *ring)
@@ -433,22 +440,24 @@ static void ring_setup_phys_status_page(struct intel_ring_buffer *ring)
433static int init_ring_common(struct intel_ring_buffer *ring) 440static int init_ring_common(struct intel_ring_buffer *ring)
434{ 441{
435 struct drm_device *dev = ring->dev; 442 struct drm_device *dev = ring->dev;
436 drm_i915_private_t *dev_priv = dev->dev_private; 443 struct drm_i915_private *dev_priv = dev->dev_private;
437 struct drm_i915_gem_object *obj = ring->obj; 444 struct drm_i915_gem_object *obj = ring->obj;
438 int ret = 0; 445 int ret = 0;
439 u32 head; 446 u32 head;
440 447
441 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL); 448 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
442 449
443 if (I915_NEED_GFX_HWS(dev))
444 intel_ring_setup_status_page(ring);
445 else
446 ring_setup_phys_status_page(ring);
447
448 /* Stop the ring if it's running. */ 450 /* Stop the ring if it's running. */
449 I915_WRITE_CTL(ring, 0); 451 I915_WRITE_CTL(ring, 0);
450 I915_WRITE_HEAD(ring, 0); 452 I915_WRITE_HEAD(ring, 0);
451 ring->write_tail(ring, 0); 453 ring->write_tail(ring, 0);
454 if (wait_for_atomic((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000))
455 DRM_ERROR("%s :timed out trying to stop ring\n", ring->name);
456
457 if (I915_NEED_GFX_HWS(dev))
458 intel_ring_setup_status_page(ring);
459 else
460 ring_setup_phys_status_page(ring);
452 461
453 head = I915_READ_HEAD(ring) & HEAD_ADDR; 462 head = I915_READ_HEAD(ring) & HEAD_ADDR;
454 463
@@ -531,9 +540,11 @@ init_pipe_control(struct intel_ring_buffer *ring)
531 goto err; 540 goto err;
532 } 541 }
533 542
534 i915_gem_object_set_cache_level(ring->scratch.obj, I915_CACHE_LLC); 543 ret = i915_gem_object_set_cache_level(ring->scratch.obj, I915_CACHE_LLC);
544 if (ret)
545 goto err_unref;
535 546
536 ret = i915_gem_obj_ggtt_pin(ring->scratch.obj, 4096, true, false); 547 ret = i915_gem_obj_ggtt_pin(ring->scratch.obj, 4096, 0);
537 if (ret) 548 if (ret)
538 goto err_unref; 549 goto err_unref;
539 550
@@ -549,7 +560,7 @@ init_pipe_control(struct intel_ring_buffer *ring)
549 return 0; 560 return 0;
550 561
551err_unpin: 562err_unpin:
552 i915_gem_object_unpin(ring->scratch.obj); 563 i915_gem_object_ggtt_unpin(ring->scratch.obj);
553err_unref: 564err_unref:
554 drm_gem_object_unreference(&ring->scratch.obj->base); 565 drm_gem_object_unreference(&ring->scratch.obj->base);
555err: 566err:
@@ -562,14 +573,15 @@ static int init_render_ring(struct intel_ring_buffer *ring)
562 struct drm_i915_private *dev_priv = dev->dev_private; 573 struct drm_i915_private *dev_priv = dev->dev_private;
563 int ret = init_ring_common(ring); 574 int ret = init_ring_common(ring);
564 575
565 if (INTEL_INFO(dev)->gen > 3) 576 /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
577 if (INTEL_INFO(dev)->gen >= 4 && INTEL_INFO(dev)->gen < 7)
566 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH)); 578 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
567 579
568 /* We need to disable the AsyncFlip performance optimisations in order 580 /* We need to disable the AsyncFlip performance optimisations in order
569 * to use MI_WAIT_FOR_EVENT within the CS. It should already be 581 * to use MI_WAIT_FOR_EVENT within the CS. It should already be
570 * programmed to '1' on all products. 582 * programmed to '1' on all products.
571 * 583 *
572 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv 584 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw
573 */ 585 */
574 if (INTEL_INFO(dev)->gen >= 6) 586 if (INTEL_INFO(dev)->gen >= 6)
575 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE)); 587 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
@@ -625,7 +637,7 @@ static void render_ring_cleanup(struct intel_ring_buffer *ring)
625 637
626 if (INTEL_INFO(dev)->gen >= 5) { 638 if (INTEL_INFO(dev)->gen >= 5) {
627 kunmap(sg_page(ring->scratch.obj->pages->sgl)); 639 kunmap(sg_page(ring->scratch.obj->pages->sgl));
628 i915_gem_object_unpin(ring->scratch.obj); 640 i915_gem_object_ggtt_unpin(ring->scratch.obj);
629 } 641 }
630 642
631 drm_gem_object_unreference(&ring->scratch.obj->base); 643 drm_gem_object_unreference(&ring->scratch.obj->base);
@@ -809,8 +821,11 @@ gen6_ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
809 /* Workaround to force correct ordering between irq and seqno writes on 821 /* Workaround to force correct ordering between irq and seqno writes on
810 * ivb (and maybe also on snb) by reading from a CS register (like 822 * ivb (and maybe also on snb) by reading from a CS register (like
811 * ACTHD) before reading the status page. */ 823 * ACTHD) before reading the status page. */
812 if (!lazy_coherency) 824 if (!lazy_coherency) {
813 intel_ring_get_active_head(ring); 825 struct drm_i915_private *dev_priv = ring->dev->dev_private;
826 POSTING_READ(RING_ACTHD(ring->mmio_base));
827 }
828
814 return intel_read_status_page(ring, I915_GEM_HWS_INDEX); 829 return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
815} 830}
816 831
@@ -842,7 +857,7 @@ static bool
842gen5_ring_get_irq(struct intel_ring_buffer *ring) 857gen5_ring_get_irq(struct intel_ring_buffer *ring)
843{ 858{
844 struct drm_device *dev = ring->dev; 859 struct drm_device *dev = ring->dev;
845 drm_i915_private_t *dev_priv = dev->dev_private; 860 struct drm_i915_private *dev_priv = dev->dev_private;
846 unsigned long flags; 861 unsigned long flags;
847 862
848 if (!dev->irq_enabled) 863 if (!dev->irq_enabled)
@@ -860,7 +875,7 @@ static void
860gen5_ring_put_irq(struct intel_ring_buffer *ring) 875gen5_ring_put_irq(struct intel_ring_buffer *ring)
861{ 876{
862 struct drm_device *dev = ring->dev; 877 struct drm_device *dev = ring->dev;
863 drm_i915_private_t *dev_priv = dev->dev_private; 878 struct drm_i915_private *dev_priv = dev->dev_private;
864 unsigned long flags; 879 unsigned long flags;
865 880
866 spin_lock_irqsave(&dev_priv->irq_lock, flags); 881 spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@ -873,7 +888,7 @@ static bool
873i9xx_ring_get_irq(struct intel_ring_buffer *ring) 888i9xx_ring_get_irq(struct intel_ring_buffer *ring)
874{ 889{
875 struct drm_device *dev = ring->dev; 890 struct drm_device *dev = ring->dev;
876 drm_i915_private_t *dev_priv = dev->dev_private; 891 struct drm_i915_private *dev_priv = dev->dev_private;
877 unsigned long flags; 892 unsigned long flags;
878 893
879 if (!dev->irq_enabled) 894 if (!dev->irq_enabled)
@@ -894,7 +909,7 @@ static void
894i9xx_ring_put_irq(struct intel_ring_buffer *ring) 909i9xx_ring_put_irq(struct intel_ring_buffer *ring)
895{ 910{
896 struct drm_device *dev = ring->dev; 911 struct drm_device *dev = ring->dev;
897 drm_i915_private_t *dev_priv = dev->dev_private; 912 struct drm_i915_private *dev_priv = dev->dev_private;
898 unsigned long flags; 913 unsigned long flags;
899 914
900 spin_lock_irqsave(&dev_priv->irq_lock, flags); 915 spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@ -910,7 +925,7 @@ static bool
910i8xx_ring_get_irq(struct intel_ring_buffer *ring) 925i8xx_ring_get_irq(struct intel_ring_buffer *ring)
911{ 926{
912 struct drm_device *dev = ring->dev; 927 struct drm_device *dev = ring->dev;
913 drm_i915_private_t *dev_priv = dev->dev_private; 928 struct drm_i915_private *dev_priv = dev->dev_private;
914 unsigned long flags; 929 unsigned long flags;
915 930
916 if (!dev->irq_enabled) 931 if (!dev->irq_enabled)
@@ -931,7 +946,7 @@ static void
931i8xx_ring_put_irq(struct intel_ring_buffer *ring) 946i8xx_ring_put_irq(struct intel_ring_buffer *ring)
932{ 947{
933 struct drm_device *dev = ring->dev; 948 struct drm_device *dev = ring->dev;
934 drm_i915_private_t *dev_priv = dev->dev_private; 949 struct drm_i915_private *dev_priv = dev->dev_private;
935 unsigned long flags; 950 unsigned long flags;
936 951
937 spin_lock_irqsave(&dev_priv->irq_lock, flags); 952 spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@ -946,7 +961,7 @@ i8xx_ring_put_irq(struct intel_ring_buffer *ring)
946void intel_ring_setup_status_page(struct intel_ring_buffer *ring) 961void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
947{ 962{
948 struct drm_device *dev = ring->dev; 963 struct drm_device *dev = ring->dev;
949 drm_i915_private_t *dev_priv = ring->dev->dev_private; 964 struct drm_i915_private *dev_priv = ring->dev->dev_private;
950 u32 mmio = 0; 965 u32 mmio = 0;
951 966
952 /* The ring status page addresses are no longer next to the rest of 967 /* The ring status page addresses are no longer next to the rest of
@@ -977,9 +992,19 @@ void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
977 I915_WRITE(mmio, (u32)ring->status_page.gfx_addr); 992 I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
978 POSTING_READ(mmio); 993 POSTING_READ(mmio);
979 994
980 /* Flush the TLB for this page */ 995 /*
981 if (INTEL_INFO(dev)->gen >= 6) { 996 * Flush the TLB for this page
997 *
998 * FIXME: These two bits have disappeared on gen8, so a question
999 * arises: do we still need this and if so how should we go about
1000 * invalidating the TLB?
1001 */
1002 if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) {
982 u32 reg = RING_INSTPM(ring->mmio_base); 1003 u32 reg = RING_INSTPM(ring->mmio_base);
1004
1005 /* ring should be idle before issuing a sync flush*/
1006 WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0);
1007
983 I915_WRITE(reg, 1008 I915_WRITE(reg,
984 _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE | 1009 _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
985 INSTPM_SYNC_FLUSH)); 1010 INSTPM_SYNC_FLUSH));
@@ -1029,7 +1054,7 @@ static bool
1029gen6_ring_get_irq(struct intel_ring_buffer *ring) 1054gen6_ring_get_irq(struct intel_ring_buffer *ring)
1030{ 1055{
1031 struct drm_device *dev = ring->dev; 1056 struct drm_device *dev = ring->dev;
1032 drm_i915_private_t *dev_priv = dev->dev_private; 1057 struct drm_i915_private *dev_priv = dev->dev_private;
1033 unsigned long flags; 1058 unsigned long flags;
1034 1059
1035 if (!dev->irq_enabled) 1060 if (!dev->irq_enabled)
@@ -1054,7 +1079,7 @@ static void
1054gen6_ring_put_irq(struct intel_ring_buffer *ring) 1079gen6_ring_put_irq(struct intel_ring_buffer *ring)
1055{ 1080{
1056 struct drm_device *dev = ring->dev; 1081 struct drm_device *dev = ring->dev;
1057 drm_i915_private_t *dev_priv = dev->dev_private; 1082 struct drm_i915_private *dev_priv = dev->dev_private;
1058 unsigned long flags; 1083 unsigned long flags;
1059 1084
1060 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1085 spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@ -1253,7 +1278,7 @@ static void cleanup_status_page(struct intel_ring_buffer *ring)
1253 return; 1278 return;
1254 1279
1255 kunmap(sg_page(obj->pages->sgl)); 1280 kunmap(sg_page(obj->pages->sgl));
1256 i915_gem_object_unpin(obj); 1281 i915_gem_object_ggtt_unpin(obj);
1257 drm_gem_object_unreference(&obj->base); 1282 drm_gem_object_unreference(&obj->base);
1258 ring->status_page.obj = NULL; 1283 ring->status_page.obj = NULL;
1259} 1284}
@@ -1271,12 +1296,13 @@ static int init_status_page(struct intel_ring_buffer *ring)
1271 goto err; 1296 goto err;
1272 } 1297 }
1273 1298
1274 i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); 1299 ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
1300 if (ret)
1301 goto err_unref;
1275 1302
1276 ret = i915_gem_obj_ggtt_pin(obj, 4096, true, false); 1303 ret = i915_gem_obj_ggtt_pin(obj, 4096, 0);
1277 if (ret != 0) { 1304 if (ret)
1278 goto err_unref; 1305 goto err_unref;
1279 }
1280 1306
1281 ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj); 1307 ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj);
1282 ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl)); 1308 ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl));
@@ -1293,7 +1319,7 @@ static int init_status_page(struct intel_ring_buffer *ring)
1293 return 0; 1319 return 0;
1294 1320
1295err_unpin: 1321err_unpin:
1296 i915_gem_object_unpin(obj); 1322 i915_gem_object_ggtt_unpin(obj);
1297err_unref: 1323err_unref:
1298 drm_gem_object_unreference(&obj->base); 1324 drm_gem_object_unreference(&obj->base);
1299err: 1325err:
@@ -1356,7 +1382,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
1356 1382
1357 ring->obj = obj; 1383 ring->obj = obj;
1358 1384
1359 ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, true, false); 1385 ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE);
1360 if (ret) 1386 if (ret)
1361 goto err_unref; 1387 goto err_unref;
1362 1388
@@ -1385,12 +1411,14 @@ static int intel_init_ring_buffer(struct drm_device *dev,
1385 if (IS_I830(ring->dev) || IS_845G(ring->dev)) 1411 if (IS_I830(ring->dev) || IS_845G(ring->dev))
1386 ring->effective_size -= 128; 1412 ring->effective_size -= 128;
1387 1413
1414 i915_cmd_parser_init_ring(ring);
1415
1388 return 0; 1416 return 0;
1389 1417
1390err_unmap: 1418err_unmap:
1391 iounmap(ring->virtual_start); 1419 iounmap(ring->virtual_start);
1392err_unpin: 1420err_unpin:
1393 i915_gem_object_unpin(obj); 1421 i915_gem_object_ggtt_unpin(obj);
1394err_unref: 1422err_unref:
1395 drm_gem_object_unreference(&obj->base); 1423 drm_gem_object_unreference(&obj->base);
1396 ring->obj = NULL; 1424 ring->obj = NULL;
@@ -1418,7 +1446,7 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
1418 1446
1419 iounmap(ring->virtual_start); 1447 iounmap(ring->virtual_start);
1420 1448
1421 i915_gem_object_unpin(ring->obj); 1449 i915_gem_object_ggtt_unpin(ring->obj);
1422 drm_gem_object_unreference(&ring->obj->base); 1450 drm_gem_object_unreference(&ring->obj->base);
1423 ring->obj = NULL; 1451 ring->obj = NULL;
1424 ring->preallocated_lazy_request = NULL; 1452 ring->preallocated_lazy_request = NULL;
@@ -1430,28 +1458,16 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
1430 cleanup_status_page(ring); 1458 cleanup_status_page(ring);
1431} 1459}
1432 1460
1433static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno)
1434{
1435 int ret;
1436
1437 ret = i915_wait_seqno(ring, seqno);
1438 if (!ret)
1439 i915_gem_retire_requests_ring(ring);
1440
1441 return ret;
1442}
1443
1444static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n) 1461static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n)
1445{ 1462{
1446 struct drm_i915_gem_request *request; 1463 struct drm_i915_gem_request *request;
1447 u32 seqno = 0; 1464 u32 seqno = 0, tail;
1448 int ret; 1465 int ret;
1449 1466
1450 i915_gem_retire_requests_ring(ring);
1451
1452 if (ring->last_retired_head != -1) { 1467 if (ring->last_retired_head != -1) {
1453 ring->head = ring->last_retired_head; 1468 ring->head = ring->last_retired_head;
1454 ring->last_retired_head = -1; 1469 ring->last_retired_head = -1;
1470
1455 ring->space = ring_space(ring); 1471 ring->space = ring_space(ring);
1456 if (ring->space >= n) 1472 if (ring->space >= n)
1457 return 0; 1473 return 0;
@@ -1468,6 +1484,7 @@ static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n)
1468 space += ring->size; 1484 space += ring->size;
1469 if (space >= n) { 1485 if (space >= n) {
1470 seqno = request->seqno; 1486 seqno = request->seqno;
1487 tail = request->tail;
1471 break; 1488 break;
1472 } 1489 }
1473 1490
@@ -1482,15 +1499,11 @@ static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n)
1482 if (seqno == 0) 1499 if (seqno == 0)
1483 return -ENOSPC; 1500 return -ENOSPC;
1484 1501
1485 ret = intel_ring_wait_seqno(ring, seqno); 1502 ret = i915_wait_seqno(ring, seqno);
1486 if (ret) 1503 if (ret)
1487 return ret; 1504 return ret;
1488 1505
1489 if (WARN_ON(ring->last_retired_head == -1)) 1506 ring->head = tail;
1490 return -ENOSPC;
1491
1492 ring->head = ring->last_retired_head;
1493 ring->last_retired_head = -1;
1494 ring->space = ring_space(ring); 1507 ring->space = ring_space(ring);
1495 if (WARN_ON(ring->space < n)) 1508 if (WARN_ON(ring->space < n))
1496 return -ENOSPC; 1509 return -ENOSPC;
@@ -1528,7 +1541,8 @@ static int ring_wait_for_space(struct intel_ring_buffer *ring, int n)
1528 return 0; 1541 return 0;
1529 } 1542 }
1530 1543
1531 if (dev->primary->master) { 1544 if (!drm_core_check_feature(dev, DRIVER_MODESET) &&
1545 dev->primary->master) {
1532 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 1546 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
1533 if (master_priv->sarea_priv) 1547 if (master_priv->sarea_priv)
1534 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; 1548 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
@@ -1632,7 +1646,7 @@ static int __intel_ring_prepare(struct intel_ring_buffer *ring,
1632int intel_ring_begin(struct intel_ring_buffer *ring, 1646int intel_ring_begin(struct intel_ring_buffer *ring,
1633 int num_dwords) 1647 int num_dwords)
1634{ 1648{
1635 drm_i915_private_t *dev_priv = ring->dev->dev_private; 1649 struct drm_i915_private *dev_priv = ring->dev->dev_private;
1636 int ret; 1650 int ret;
1637 1651
1638 ret = i915_gem_check_wedge(&dev_priv->gpu_error, 1652 ret = i915_gem_check_wedge(&dev_priv->gpu_error,
@@ -1694,7 +1708,7 @@ void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno)
1694static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring, 1708static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
1695 u32 value) 1709 u32 value)
1696{ 1710{
1697 drm_i915_private_t *dev_priv = ring->dev->dev_private; 1711 struct drm_i915_private *dev_priv = ring->dev->dev_private;
1698 1712
1699 /* Every tail move must follow the sequence below */ 1713 /* Every tail move must follow the sequence below */
1700 1714
@@ -1869,7 +1883,7 @@ static int gen6_ring_flush(struct intel_ring_buffer *ring,
1869 1883
1870int intel_init_render_ring_buffer(struct drm_device *dev) 1884int intel_init_render_ring_buffer(struct drm_device *dev)
1871{ 1885{
1872 drm_i915_private_t *dev_priv = dev->dev_private; 1886 struct drm_i915_private *dev_priv = dev->dev_private;
1873 struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; 1887 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
1874 1888
1875 ring->name = "render ring"; 1889 ring->name = "render ring";
@@ -1954,7 +1968,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
1954 return -ENOMEM; 1968 return -ENOMEM;
1955 } 1969 }
1956 1970
1957 ret = i915_gem_obj_ggtt_pin(obj, 0, true, false); 1971 ret = i915_gem_obj_ggtt_pin(obj, 0, 0);
1958 if (ret != 0) { 1972 if (ret != 0) {
1959 drm_gem_object_unreference(&obj->base); 1973 drm_gem_object_unreference(&obj->base);
1960 DRM_ERROR("Failed to ping batch bo\n"); 1974 DRM_ERROR("Failed to ping batch bo\n");
@@ -1970,7 +1984,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
1970 1984
1971int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size) 1985int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
1972{ 1986{
1973 drm_i915_private_t *dev_priv = dev->dev_private; 1987 struct drm_i915_private *dev_priv = dev->dev_private;
1974 struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; 1988 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
1975 int ret; 1989 int ret;
1976 1990
@@ -2038,7 +2052,7 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
2038 2052
2039int intel_init_bsd_ring_buffer(struct drm_device *dev) 2053int intel_init_bsd_ring_buffer(struct drm_device *dev)
2040{ 2054{
2041 drm_i915_private_t *dev_priv = dev->dev_private; 2055 struct drm_i915_private *dev_priv = dev->dev_private;
2042 struct intel_ring_buffer *ring = &dev_priv->ring[VCS]; 2056 struct intel_ring_buffer *ring = &dev_priv->ring[VCS];
2043 2057
2044 ring->name = "bsd ring"; 2058 ring->name = "bsd ring";
@@ -2101,7 +2115,7 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
2101 2115
2102int intel_init_blt_ring_buffer(struct drm_device *dev) 2116int intel_init_blt_ring_buffer(struct drm_device *dev)
2103{ 2117{
2104 drm_i915_private_t *dev_priv = dev->dev_private; 2118 struct drm_i915_private *dev_priv = dev->dev_private;
2105 struct intel_ring_buffer *ring = &dev_priv->ring[BCS]; 2119 struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
2106 2120
2107 ring->name = "blitter ring"; 2121 ring->name = "blitter ring";
@@ -2141,7 +2155,7 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
2141 2155
2142int intel_init_vebox_ring_buffer(struct drm_device *dev) 2156int intel_init_vebox_ring_buffer(struct drm_device *dev)
2143{ 2157{
2144 drm_i915_private_t *dev_priv = dev->dev_private; 2158 struct drm_i915_private *dev_priv = dev->dev_private;
2145 struct intel_ring_buffer *ring = &dev_priv->ring[VECS]; 2159 struct intel_ring_buffer *ring = &dev_priv->ring[VECS];
2146 2160
2147 ring->name = "video enhancement ring"; 2161 ring->name = "video enhancement ring";
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 0b243ce33714..270a6a973438 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -33,6 +33,8 @@ struct intel_hw_status_page {
33#define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base)) 33#define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base))
34#define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val) 34#define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
35 35
36#define I915_READ_MODE(ring) I915_READ(RING_MI_MODE((ring)->mmio_base))
37
36enum intel_ring_hangcheck_action { 38enum intel_ring_hangcheck_action {
37 HANGCHECK_IDLE = 0, 39 HANGCHECK_IDLE = 0,
38 HANGCHECK_WAIT, 40 HANGCHECK_WAIT,
@@ -41,12 +43,14 @@ enum intel_ring_hangcheck_action {
41 HANGCHECK_HUNG, 43 HANGCHECK_HUNG,
42}; 44};
43 45
46#define HANGCHECK_SCORE_RING_HUNG 31
47
44struct intel_ring_hangcheck { 48struct intel_ring_hangcheck {
45 bool deadlock; 49 u64 acthd;
46 u32 seqno; 50 u32 seqno;
47 u32 acthd;
48 int score; 51 int score;
49 enum intel_ring_hangcheck_action action; 52 enum intel_ring_hangcheck_action action;
53 bool deadlock;
50}; 54};
51 55
52struct intel_ring_buffer { 56struct intel_ring_buffer {
@@ -162,6 +166,38 @@ struct intel_ring_buffer {
162 u32 gtt_offset; 166 u32 gtt_offset;
163 volatile u32 *cpu_page; 167 volatile u32 *cpu_page;
164 } scratch; 168 } scratch;
169
170 /*
171 * Tables of commands the command parser needs to know about
172 * for this ring.
173 */
174 const struct drm_i915_cmd_table *cmd_tables;
175 int cmd_table_count;
176
177 /*
178 * Table of registers allowed in commands that read/write registers.
179 */
180 const u32 *reg_table;
181 int reg_count;
182
183 /*
184 * Table of registers allowed in commands that read/write registers, but
185 * only from the DRM master.
186 */
187 const u32 *master_reg_table;
188 int master_reg_count;
189
190 /*
191 * Returns the bitmask for the length field of the specified command.
192 * Return 0 for an unrecognized/invalid command.
193 *
194 * If the command parser finds an entry for a command in the ring's
195 * cmd_tables, it gets the command's length based on the table entry.
196 * If not, it calls this function to determine the per-ring length field
197 * encoding for the command (i.e. certain opcode ranges use certain bits
198 * to encode the command length in the header).
199 */
200 u32 (*get_cmd_length_mask)(u32 cmd_header);
165}; 201};
166 202
167static inline bool 203static inline bool
@@ -256,7 +292,7 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev);
256int intel_init_blt_ring_buffer(struct drm_device *dev); 292int intel_init_blt_ring_buffer(struct drm_device *dev);
257int intel_init_vebox_ring_buffer(struct drm_device *dev); 293int intel_init_vebox_ring_buffer(struct drm_device *dev);
258 294
259u32 intel_ring_get_active_head(struct intel_ring_buffer *ring); 295u64 intel_ring_get_active_head(struct intel_ring_buffer *ring);
260void intel_ring_setup_status_page(struct intel_ring_buffer *ring); 296void intel_ring_setup_status_page(struct intel_ring_buffer *ring);
261 297
262static inline u32 intel_ring_get_tail(struct intel_ring_buffer *ring) 298static inline u32 intel_ring_get_tail(struct intel_ring_buffer *ring)
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 95bdfb3c431c..d27155adf5db 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1461,7 +1461,7 @@ static void intel_enable_sdvo(struct intel_encoder *encoder)
1461 u32 temp; 1461 u32 temp;
1462 bool input1, input2; 1462 bool input1, input2;
1463 int i; 1463 int i;
1464 u8 status; 1464 bool success;
1465 1465
1466 temp = I915_READ(intel_sdvo->sdvo_reg); 1466 temp = I915_READ(intel_sdvo->sdvo_reg);
1467 if ((temp & SDVO_ENABLE) == 0) { 1467 if ((temp & SDVO_ENABLE) == 0) {
@@ -1475,12 +1475,12 @@ static void intel_enable_sdvo(struct intel_encoder *encoder)
1475 for (i = 0; i < 2; i++) 1475 for (i = 0; i < 2; i++)
1476 intel_wait_for_vblank(dev, intel_crtc->pipe); 1476 intel_wait_for_vblank(dev, intel_crtc->pipe);
1477 1477
1478 status = intel_sdvo_get_trained_inputs(intel_sdvo, &input1, &input2); 1478 success = intel_sdvo_get_trained_inputs(intel_sdvo, &input1, &input2);
1479 /* Warn if the device reported failure to sync. 1479 /* Warn if the device reported failure to sync.
1480 * A lot of SDVO devices fail to notify of sync, but it's 1480 * A lot of SDVO devices fail to notify of sync, but it's
1481 * a given it the status is a success, we succeeded. 1481 * a given it the status is a success, we succeeded.
1482 */ 1482 */
1483 if (status == SDVO_CMD_STATUS_SUCCESS && !input1) { 1483 if (success && !input1) {
1484 DRM_DEBUG_KMS("First %s output reported failure to " 1484 DRM_DEBUG_KMS("First %s output reported failure to "
1485 "sync\n", SDVO_NAME(intel_sdvo)); 1485 "sync\n", SDVO_NAME(intel_sdvo));
1486 } 1486 }
@@ -2382,24 +2382,62 @@ intel_sdvo_get_slave_addr(struct drm_device *dev, struct intel_sdvo *sdvo)
2382} 2382}
2383 2383
2384static void 2384static void
2385intel_sdvo_connector_unregister(struct intel_connector *intel_connector)
2386{
2387 struct drm_connector *drm_connector;
2388 struct intel_sdvo *sdvo_encoder;
2389
2390 drm_connector = &intel_connector->base;
2391 sdvo_encoder = intel_attached_sdvo(&intel_connector->base);
2392
2393 sysfs_remove_link(&drm_connector->kdev->kobj,
2394 sdvo_encoder->ddc.dev.kobj.name);
2395 intel_connector_unregister(intel_connector);
2396}
2397
2398static int
2385intel_sdvo_connector_init(struct intel_sdvo_connector *connector, 2399intel_sdvo_connector_init(struct intel_sdvo_connector *connector,
2386 struct intel_sdvo *encoder) 2400 struct intel_sdvo *encoder)
2387{ 2401{
2388 drm_connector_init(encoder->base.base.dev, 2402 struct drm_connector *drm_connector;
2389 &connector->base.base, 2403 int ret;
2404
2405 drm_connector = &connector->base.base;
2406 ret = drm_connector_init(encoder->base.base.dev,
2407 drm_connector,
2390 &intel_sdvo_connector_funcs, 2408 &intel_sdvo_connector_funcs,
2391 connector->base.base.connector_type); 2409 connector->base.base.connector_type);
2410 if (ret < 0)
2411 return ret;
2392 2412
2393 drm_connector_helper_add(&connector->base.base, 2413 drm_connector_helper_add(drm_connector,
2394 &intel_sdvo_connector_helper_funcs); 2414 &intel_sdvo_connector_helper_funcs);
2395 2415
2396 connector->base.base.interlace_allowed = 1; 2416 connector->base.base.interlace_allowed = 1;
2397 connector->base.base.doublescan_allowed = 0; 2417 connector->base.base.doublescan_allowed = 0;
2398 connector->base.base.display_info.subpixel_order = SubPixelHorizontalRGB; 2418 connector->base.base.display_info.subpixel_order = SubPixelHorizontalRGB;
2399 connector->base.get_hw_state = intel_sdvo_connector_get_hw_state; 2419 connector->base.get_hw_state = intel_sdvo_connector_get_hw_state;
2420 connector->base.unregister = intel_sdvo_connector_unregister;
2400 2421
2401 intel_connector_attach_encoder(&connector->base, &encoder->base); 2422 intel_connector_attach_encoder(&connector->base, &encoder->base);
2402 drm_sysfs_connector_add(&connector->base.base); 2423 ret = drm_sysfs_connector_add(drm_connector);
2424 if (ret < 0)
2425 goto err1;
2426
2427 ret = sysfs_create_link(&encoder->ddc.dev.kobj,
2428 &drm_connector->kdev->kobj,
2429 encoder->ddc.dev.kobj.name);
2430 if (ret < 0)
2431 goto err2;
2432
2433 return 0;
2434
2435err2:
2436 drm_sysfs_connector_remove(drm_connector);
2437err1:
2438 drm_connector_cleanup(drm_connector);
2439
2440 return ret;
2403} 2441}
2404 2442
2405static void 2443static void
@@ -2459,7 +2497,11 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
2459 intel_sdvo->is_hdmi = true; 2497 intel_sdvo->is_hdmi = true;
2460 } 2498 }
2461 2499
2462 intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); 2500 if (intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo) < 0) {
2501 kfree(intel_sdvo_connector);
2502 return false;
2503 }
2504
2463 if (intel_sdvo->is_hdmi) 2505 if (intel_sdvo->is_hdmi)
2464 intel_sdvo_add_hdmi_properties(intel_sdvo, intel_sdvo_connector); 2506 intel_sdvo_add_hdmi_properties(intel_sdvo, intel_sdvo_connector);
2465 2507
@@ -2490,7 +2532,10 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
2490 2532
2491 intel_sdvo->is_tv = true; 2533 intel_sdvo->is_tv = true;
2492 2534
2493 intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); 2535 if (intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo) < 0) {
2536 kfree(intel_sdvo_connector);
2537 return false;
2538 }
2494 2539
2495 if (!intel_sdvo_tv_create_property(intel_sdvo, intel_sdvo_connector, type)) 2540 if (!intel_sdvo_tv_create_property(intel_sdvo, intel_sdvo_connector, type))
2496 goto err; 2541 goto err;
@@ -2534,8 +2579,11 @@ intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device)
2534 intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1; 2579 intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1;
2535 } 2580 }
2536 2581
2537 intel_sdvo_connector_init(intel_sdvo_connector, 2582 if (intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo) < 0) {
2538 intel_sdvo); 2583 kfree(intel_sdvo_connector);
2584 return false;
2585 }
2586
2539 return true; 2587 return true;
2540} 2588}
2541 2589
@@ -2566,7 +2614,11 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
2566 intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1; 2614 intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1;
2567 } 2615 }
2568 2616
2569 intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); 2617 if (intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo) < 0) {
2618 kfree(intel_sdvo_connector);
2619 return false;
2620 }
2621
2570 if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector)) 2622 if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
2571 goto err; 2623 goto err;
2572 2624
@@ -2980,7 +3032,7 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
2980 * simplistic anyway to express such constraints, so just give up on 3032 * simplistic anyway to express such constraints, so just give up on
2981 * cloning for SDVO encoders. 3033 * cloning for SDVO encoders.
2982 */ 3034 */
2983 intel_sdvo->base.cloneable = false; 3035 intel_sdvo->base.cloneable = 0;
2984 3036
2985 intel_sdvo_select_ddc_bus(dev_priv, intel_sdvo, sdvo_reg); 3037 intel_sdvo_select_ddc_bus(dev_priv, intel_sdvo, sdvo_reg);
2986 3038
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 716a3c9c0751..336ae6c602f2 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -124,9 +124,6 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
124 crtc_w--; 124 crtc_w--;
125 crtc_h--; 125 crtc_h--;
126 126
127 I915_WRITE(SPSTRIDE(pipe, plane), fb->pitches[0]);
128 I915_WRITE(SPPOS(pipe, plane), (crtc_y << 16) | crtc_x);
129
130 linear_offset = y * fb->pitches[0] + x * pixel_size; 127 linear_offset = y * fb->pitches[0] + x * pixel_size;
131 sprsurf_offset = intel_gen4_compute_page_offset(&x, &y, 128 sprsurf_offset = intel_gen4_compute_page_offset(&x, &y,
132 obj->tiling_mode, 129 obj->tiling_mode,
@@ -134,6 +131,9 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
134 fb->pitches[0]); 131 fb->pitches[0]);
135 linear_offset -= sprsurf_offset; 132 linear_offset -= sprsurf_offset;
136 133
134 I915_WRITE(SPSTRIDE(pipe, plane), fb->pitches[0]);
135 I915_WRITE(SPPOS(pipe, plane), (crtc_y << 16) | crtc_x);
136
137 if (obj->tiling_mode != I915_TILING_NONE) 137 if (obj->tiling_mode != I915_TILING_NONE)
138 I915_WRITE(SPTILEOFF(pipe, plane), (y << 16) | x); 138 I915_WRITE(SPTILEOFF(pipe, plane), (y << 16) | x);
139 else 139 else
@@ -293,15 +293,15 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
293 if (crtc_w != src_w || crtc_h != src_h) 293 if (crtc_w != src_w || crtc_h != src_h)
294 sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h; 294 sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h;
295 295
296 I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]);
297 I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x);
298
299 linear_offset = y * fb->pitches[0] + x * pixel_size; 296 linear_offset = y * fb->pitches[0] + x * pixel_size;
300 sprsurf_offset = 297 sprsurf_offset =
301 intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode, 298 intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
302 pixel_size, fb->pitches[0]); 299 pixel_size, fb->pitches[0]);
303 linear_offset -= sprsurf_offset; 300 linear_offset -= sprsurf_offset;
304 301
302 I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]);
303 I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x);
304
305 /* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET 305 /* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET
306 * register */ 306 * register */
307 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 307 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
@@ -472,15 +472,15 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
472 if (crtc_w != src_w || crtc_h != src_h) 472 if (crtc_w != src_w || crtc_h != src_h)
473 dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h; 473 dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h;
474 474
475 I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]);
476 I915_WRITE(DVSPOS(pipe), (crtc_y << 16) | crtc_x);
477
478 linear_offset = y * fb->pitches[0] + x * pixel_size; 475 linear_offset = y * fb->pitches[0] + x * pixel_size;
479 dvssurf_offset = 476 dvssurf_offset =
480 intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode, 477 intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
481 pixel_size, fb->pitches[0]); 478 pixel_size, fb->pitches[0]);
482 linear_offset -= dvssurf_offset; 479 linear_offset -= dvssurf_offset;
483 480
481 I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]);
482 I915_WRITE(DVSPOS(pipe), (crtc_y << 16) | crtc_x);
483
484 if (obj->tiling_mode != I915_TILING_NONE) 484 if (obj->tiling_mode != I915_TILING_NONE)
485 I915_WRITE(DVSTILEOFF(pipe), (y << 16) | x); 485 I915_WRITE(DVSTILEOFF(pipe), (y << 16) | x);
486 else 486 else
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 22cf0f4ba248..bafe92e317d5 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1189,8 +1189,8 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
1189 if (connector->polled & DRM_CONNECTOR_POLL_HPD) { 1189 if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
1190 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1190 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1191 i915_disable_pipestat(dev_priv, 0, 1191 i915_disable_pipestat(dev_priv, 0,
1192 PIPE_HOTPLUG_INTERRUPT_ENABLE | 1192 PIPE_HOTPLUG_INTERRUPT_STATUS |
1193 PIPE_HOTPLUG_TV_INTERRUPT_ENABLE); 1193 PIPE_HOTPLUG_TV_INTERRUPT_STATUS);
1194 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1194 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1195 } 1195 }
1196 1196
@@ -1266,8 +1266,8 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
1266 if (connector->polled & DRM_CONNECTOR_POLL_HPD) { 1266 if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
1267 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1267 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1268 i915_enable_pipestat(dev_priv, 0, 1268 i915_enable_pipestat(dev_priv, 0,
1269 PIPE_HOTPLUG_INTERRUPT_ENABLE | 1269 PIPE_HOTPLUG_INTERRUPT_STATUS |
1270 PIPE_HOTPLUG_TV_INTERRUPT_ENABLE); 1270 PIPE_HOTPLUG_TV_INTERRUPT_STATUS);
1271 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1271 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1272 } 1272 }
1273 1273
@@ -1536,9 +1536,14 @@ static int tv_is_present_in_vbt(struct drm_device *dev)
1536 /* 1536 /*
1537 * If the device type is not TV, continue. 1537 * If the device type is not TV, continue.
1538 */ 1538 */
1539 if (p_child->old.device_type != DEVICE_TYPE_INT_TV && 1539 switch (p_child->old.device_type) {
1540 p_child->old.device_type != DEVICE_TYPE_TV) 1540 case DEVICE_TYPE_INT_TV:
1541 case DEVICE_TYPE_TV:
1542 case DEVICE_TYPE_TV_SVIDEO_COMPOSITE:
1543 break;
1544 default:
1541 continue; 1545 continue;
1546 }
1542 /* Only when the addin_offset is non-zero, it is regarded 1547 /* Only when the addin_offset is non-zero, it is regarded
1543 * as present. 1548 * as present.
1544 */ 1549 */
@@ -1634,13 +1639,13 @@ intel_tv_init(struct drm_device *dev)
1634 intel_encoder->disable = intel_disable_tv; 1639 intel_encoder->disable = intel_disable_tv;
1635 intel_encoder->get_hw_state = intel_tv_get_hw_state; 1640 intel_encoder->get_hw_state = intel_tv_get_hw_state;
1636 intel_connector->get_hw_state = intel_connector_get_hw_state; 1641 intel_connector->get_hw_state = intel_connector_get_hw_state;
1642 intel_connector->unregister = intel_connector_unregister;
1637 1643
1638 intel_connector_attach_encoder(intel_connector, intel_encoder); 1644 intel_connector_attach_encoder(intel_connector, intel_encoder);
1639 intel_encoder->type = INTEL_OUTPUT_TVOUT; 1645 intel_encoder->type = INTEL_OUTPUT_TVOUT;
1640 intel_encoder->crtc_mask = (1 << 0) | (1 << 1); 1646 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
1641 intel_encoder->cloneable = false; 1647 intel_encoder->cloneable = 0;
1642 intel_encoder->base.possible_crtcs = ((1 << 0) | (1 << 1)); 1648 intel_encoder->base.possible_crtcs = ((1 << 0) | (1 << 1));
1643 intel_encoder->base.possible_clones = (1 << INTEL_OUTPUT_TVOUT);
1644 intel_tv->type = DRM_MODE_CONNECTOR_Unknown; 1649 intel_tv->type = DRM_MODE_CONNECTOR_Unknown;
1645 1650
1646 /* BIOS margin values */ 1651 /* BIOS margin values */
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 87df68f5f504..f729dc71d5be 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -40,6 +40,12 @@
40 40
41#define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32(dev_priv__, reg__) 41#define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32(dev_priv__, reg__)
42 42
43static void
44assert_device_not_suspended(struct drm_i915_private *dev_priv)
45{
46 WARN(HAS_RUNTIME_PM(dev_priv->dev) && dev_priv->pm.suspended,
47 "Device suspended\n");
48}
43 49
44static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv) 50static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
45{ 51{
@@ -83,14 +89,14 @@ static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv,
83 __gen6_gt_wait_for_thread_c0(dev_priv); 89 __gen6_gt_wait_for_thread_c0(dev_priv);
84} 90}
85 91
86static void __gen6_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv) 92static void __gen7_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
87{ 93{
88 __raw_i915_write32(dev_priv, FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff)); 94 __raw_i915_write32(dev_priv, FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff));
89 /* something from same cacheline, but !FORCEWAKE_MT */ 95 /* something from same cacheline, but !FORCEWAKE_MT */
90 __raw_posting_read(dev_priv, ECOBUS); 96 __raw_posting_read(dev_priv, ECOBUS);
91} 97}
92 98
93static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv, 99static void __gen7_gt_force_wake_mt_get(struct drm_i915_private *dev_priv,
94 int fw_engine) 100 int fw_engine)
95{ 101{
96 u32 forcewake_ack; 102 u32 forcewake_ack;
@@ -136,14 +142,16 @@ static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv,
136 gen6_gt_check_fifodbg(dev_priv); 142 gen6_gt_check_fifodbg(dev_priv);
137} 143}
138 144
139static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv, 145static void __gen7_gt_force_wake_mt_put(struct drm_i915_private *dev_priv,
140 int fw_engine) 146 int fw_engine)
141{ 147{
142 __raw_i915_write32(dev_priv, FORCEWAKE_MT, 148 __raw_i915_write32(dev_priv, FORCEWAKE_MT,
143 _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); 149 _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
144 /* something from same cacheline, but !FORCEWAKE_MT */ 150 /* something from same cacheline, but !FORCEWAKE_MT */
145 __raw_posting_read(dev_priv, ECOBUS); 151 __raw_posting_read(dev_priv, ECOBUS);
146 gen6_gt_check_fifodbg(dev_priv); 152
153 if (IS_GEN7(dev_priv->dev))
154 gen6_gt_check_fifodbg(dev_priv);
147} 155}
148 156
149static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv) 157static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
@@ -251,16 +259,16 @@ void vlv_force_wake_get(struct drm_i915_private *dev_priv,
251 unsigned long irqflags; 259 unsigned long irqflags;
252 260
253 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 261 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
254 if (FORCEWAKE_RENDER & fw_engine) { 262
255 if (dev_priv->uncore.fw_rendercount++ == 0) 263 if (fw_engine & FORCEWAKE_RENDER &&
256 dev_priv->uncore.funcs.force_wake_get(dev_priv, 264 dev_priv->uncore.fw_rendercount++ != 0)
257 FORCEWAKE_RENDER); 265 fw_engine &= ~FORCEWAKE_RENDER;
258 } 266 if (fw_engine & FORCEWAKE_MEDIA &&
259 if (FORCEWAKE_MEDIA & fw_engine) { 267 dev_priv->uncore.fw_mediacount++ != 0)
260 if (dev_priv->uncore.fw_mediacount++ == 0) 268 fw_engine &= ~FORCEWAKE_MEDIA;
261 dev_priv->uncore.funcs.force_wake_get(dev_priv, 269
262 FORCEWAKE_MEDIA); 270 if (fw_engine)
263 } 271 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_engine);
264 272
265 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 273 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
266} 274}
@@ -272,46 +280,89 @@ void vlv_force_wake_put(struct drm_i915_private *dev_priv,
272 280
273 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 281 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
274 282
275 if (FORCEWAKE_RENDER & fw_engine) { 283 if (fw_engine & FORCEWAKE_RENDER) {
276 WARN_ON(dev_priv->uncore.fw_rendercount == 0); 284 WARN_ON(!dev_priv->uncore.fw_rendercount);
277 if (--dev_priv->uncore.fw_rendercount == 0) 285 if (--dev_priv->uncore.fw_rendercount != 0)
278 dev_priv->uncore.funcs.force_wake_put(dev_priv, 286 fw_engine &= ~FORCEWAKE_RENDER;
279 FORCEWAKE_RENDER);
280 } 287 }
281 288
282 if (FORCEWAKE_MEDIA & fw_engine) { 289 if (fw_engine & FORCEWAKE_MEDIA) {
283 WARN_ON(dev_priv->uncore.fw_mediacount == 0); 290 WARN_ON(!dev_priv->uncore.fw_mediacount);
284 if (--dev_priv->uncore.fw_mediacount == 0) 291 if (--dev_priv->uncore.fw_mediacount != 0)
285 dev_priv->uncore.funcs.force_wake_put(dev_priv, 292 fw_engine &= ~FORCEWAKE_MEDIA;
286 FORCEWAKE_MEDIA);
287 } 293 }
288 294
295 if (fw_engine)
296 dev_priv->uncore.funcs.force_wake_put(dev_priv, fw_engine);
297
289 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 298 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
290} 299}
291 300
292static void gen6_force_wake_work(struct work_struct *work) 301static void gen6_force_wake_timer(unsigned long arg)
293{ 302{
294 struct drm_i915_private *dev_priv = 303 struct drm_i915_private *dev_priv = (void *)arg;
295 container_of(work, typeof(*dev_priv), uncore.force_wake_work.work);
296 unsigned long irqflags; 304 unsigned long irqflags;
297 305
306 assert_device_not_suspended(dev_priv);
307
298 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 308 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
309 WARN_ON(!dev_priv->uncore.forcewake_count);
310
299 if (--dev_priv->uncore.forcewake_count == 0) 311 if (--dev_priv->uncore.forcewake_count == 0)
300 dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL); 312 dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL);
301 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 313 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
314
315 intel_runtime_pm_put(dev_priv);
302} 316}
303 317
304static void intel_uncore_forcewake_reset(struct drm_device *dev) 318static void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
305{ 319{
306 struct drm_i915_private *dev_priv = dev->dev_private; 320 struct drm_i915_private *dev_priv = dev->dev_private;
321 unsigned long irqflags;
307 322
308 if (IS_VALLEYVIEW(dev)) { 323 del_timer_sync(&dev_priv->uncore.force_wake_timer);
324
325 /* Hold uncore.lock across reset to prevent any register access
326 * with forcewake not set correctly
327 */
328 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
329
330 if (IS_VALLEYVIEW(dev))
309 vlv_force_wake_reset(dev_priv); 331 vlv_force_wake_reset(dev_priv);
310 } else if (INTEL_INFO(dev)->gen >= 6) { 332 else if (IS_GEN6(dev) || IS_GEN7(dev))
311 __gen6_gt_force_wake_reset(dev_priv); 333 __gen6_gt_force_wake_reset(dev_priv);
312 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) 334
313 __gen6_gt_force_wake_mt_reset(dev_priv); 335 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_GEN8(dev))
336 __gen7_gt_force_wake_mt_reset(dev_priv);
337
338 if (restore) { /* If reset with a user forcewake, try to restore */
339 unsigned fw = 0;
340
341 if (IS_VALLEYVIEW(dev)) {
342 if (dev_priv->uncore.fw_rendercount)
343 fw |= FORCEWAKE_RENDER;
344
345 if (dev_priv->uncore.fw_mediacount)
346 fw |= FORCEWAKE_MEDIA;
347 } else {
348 if (dev_priv->uncore.forcewake_count)
349 fw = FORCEWAKE_ALL;
350 }
351
352 if (fw)
353 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw);
354
355 if (IS_GEN6(dev) || IS_GEN7(dev))
356 dev_priv->uncore.fifo_count =
357 __raw_i915_read32(dev_priv, GTFIFOCTL) &
358 GT_FIFO_FREE_ENTRIES_MASK;
359 } else {
360 dev_priv->uncore.forcewake_count = 0;
361 dev_priv->uncore.fw_rendercount = 0;
362 dev_priv->uncore.fw_mediacount = 0;
314 } 363 }
364
365 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
315} 366}
316 367
317void intel_uncore_early_sanitize(struct drm_device *dev) 368void intel_uncore_early_sanitize(struct drm_device *dev)
@@ -337,7 +388,7 @@ void intel_uncore_early_sanitize(struct drm_device *dev)
337 __raw_i915_write32(dev_priv, GTFIFODBG, 388 __raw_i915_write32(dev_priv, GTFIFODBG,
338 __raw_i915_read32(dev_priv, GTFIFODBG)); 389 __raw_i915_read32(dev_priv, GTFIFODBG));
339 390
340 intel_uncore_forcewake_reset(dev); 391 intel_uncore_forcewake_reset(dev, false);
341} 392}
342 393
343void intel_uncore_sanitize(struct drm_device *dev) 394void intel_uncore_sanitize(struct drm_device *dev)
@@ -354,7 +405,9 @@ void intel_uncore_sanitize(struct drm_device *dev)
354 mutex_lock(&dev_priv->rps.hw_lock); 405 mutex_lock(&dev_priv->rps.hw_lock);
355 reg_val = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS); 406 reg_val = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS);
356 407
357 if (reg_val & (RENDER_PWRGT | MEDIA_PWRGT | DISP2D_PWRGT)) 408 if (reg_val & (PUNIT_PWRGT_PWR_GATE(PUNIT_POWER_WELL_RENDER) |
409 PUNIT_PWRGT_PWR_GATE(PUNIT_POWER_WELL_MEDIA) |
410 PUNIT_PWRGT_PWR_GATE(PUNIT_POWER_WELL_DISP2D)))
358 vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, 0x0); 411 vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, 0x0);
359 412
360 mutex_unlock(&dev_priv->rps.hw_lock); 413 mutex_unlock(&dev_priv->rps.hw_lock);
@@ -393,25 +446,40 @@ void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
393void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine) 446void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
394{ 447{
395 unsigned long irqflags; 448 unsigned long irqflags;
449 bool delayed = false;
396 450
397 if (!dev_priv->uncore.funcs.force_wake_put) 451 if (!dev_priv->uncore.funcs.force_wake_put)
398 return; 452 return;
399 453
400 /* Redirect to VLV specific routine */ 454 /* Redirect to VLV specific routine */
401 if (IS_VALLEYVIEW(dev_priv->dev)) 455 if (IS_VALLEYVIEW(dev_priv->dev)) {
402 return vlv_force_wake_put(dev_priv, fw_engine); 456 vlv_force_wake_put(dev_priv, fw_engine);
457 goto out;
458 }
403 459
404 460
405 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 461 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
462 WARN_ON(!dev_priv->uncore.forcewake_count);
463
406 if (--dev_priv->uncore.forcewake_count == 0) { 464 if (--dev_priv->uncore.forcewake_count == 0) {
407 dev_priv->uncore.forcewake_count++; 465 dev_priv->uncore.forcewake_count++;
408 mod_delayed_work(dev_priv->wq, 466 delayed = true;
409 &dev_priv->uncore.force_wake_work, 467 mod_timer_pinned(&dev_priv->uncore.force_wake_timer,
410 1); 468 jiffies + 1);
411 } 469 }
412 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 470 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
413 471
414 intel_runtime_pm_put(dev_priv); 472out:
473 if (!delayed)
474 intel_runtime_pm_put(dev_priv);
475}
476
477void assert_force_wake_inactive(struct drm_i915_private *dev_priv)
478{
479 if (!dev_priv->uncore.funcs.force_wake_get)
480 return;
481
482 WARN_ON(dev_priv->uncore.forcewake_count > 0);
415} 483}
416 484
417/* We give fast paths for the really cool registers */ 485/* We give fast paths for the really cool registers */
@@ -446,16 +514,10 @@ hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg)
446 } 514 }
447} 515}
448 516
449static void
450assert_device_not_suspended(struct drm_i915_private *dev_priv)
451{
452 WARN(HAS_RUNTIME_PM(dev_priv->dev) && dev_priv->pm.suspended,
453 "Device suspended\n");
454}
455
456#define REG_READ_HEADER(x) \ 517#define REG_READ_HEADER(x) \
457 unsigned long irqflags; \ 518 unsigned long irqflags; \
458 u##x val = 0; \ 519 u##x val = 0; \
520 assert_device_not_suspended(dev_priv); \
459 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags) 521 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
460 522
461#define REG_READ_FOOTER \ 523#define REG_READ_FOOTER \
@@ -484,14 +546,13 @@ gen5_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
484static u##x \ 546static u##x \
485gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ 547gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
486 REG_READ_HEADER(x); \ 548 REG_READ_HEADER(x); \
487 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ 549 if (dev_priv->uncore.forcewake_count == 0 && \
488 if (dev_priv->uncore.forcewake_count == 0) \ 550 NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
489 dev_priv->uncore.funcs.force_wake_get(dev_priv, \ 551 dev_priv->uncore.funcs.force_wake_get(dev_priv, \
490 FORCEWAKE_ALL); \ 552 FORCEWAKE_ALL); \
491 val = __raw_i915_read##x(dev_priv, reg); \ 553 val = __raw_i915_read##x(dev_priv, reg); \
492 if (dev_priv->uncore.forcewake_count == 0) \ 554 dev_priv->uncore.funcs.force_wake_put(dev_priv, \
493 dev_priv->uncore.funcs.force_wake_put(dev_priv, \ 555 FORCEWAKE_ALL); \
494 FORCEWAKE_ALL); \
495 } else { \ 556 } else { \
496 val = __raw_i915_read##x(dev_priv, reg); \ 557 val = __raw_i915_read##x(dev_priv, reg); \
497 } \ 558 } \
@@ -502,27 +563,19 @@ gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
502static u##x \ 563static u##x \
503vlv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ 564vlv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
504 unsigned fwengine = 0; \ 565 unsigned fwengine = 0; \
505 unsigned *fwcount; \
506 REG_READ_HEADER(x); \ 566 REG_READ_HEADER(x); \
507 if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg)) { \ 567 if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg)) { \
508 fwengine = FORCEWAKE_RENDER; \ 568 if (dev_priv->uncore.fw_rendercount == 0) \
509 fwcount = &dev_priv->uncore.fw_rendercount; \ 569 fwengine = FORCEWAKE_RENDER; \
510 } \ 570 } else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)) { \
511 else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)) { \ 571 if (dev_priv->uncore.fw_mediacount == 0) \
512 fwengine = FORCEWAKE_MEDIA; \ 572 fwengine = FORCEWAKE_MEDIA; \
513 fwcount = &dev_priv->uncore.fw_mediacount; \
514 } \ 573 } \
515 if (fwengine != 0) { \ 574 if (fwengine) \
516 if ((*fwcount)++ == 0) \ 575 dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \
517 (dev_priv)->uncore.funcs.force_wake_get(dev_priv, \ 576 val = __raw_i915_read##x(dev_priv, reg); \
518 fwengine); \ 577 if (fwengine) \
519 val = __raw_i915_read##x(dev_priv, reg); \ 578 dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \
520 if (--(*fwcount) == 0) \
521 (dev_priv)->uncore.funcs.force_wake_put(dev_priv, \
522 fwengine); \
523 } else { \
524 val = __raw_i915_read##x(dev_priv, reg); \
525 } \
526 REG_READ_FOOTER; \ 579 REG_READ_FOOTER; \
527} 580}
528 581
@@ -554,6 +607,7 @@ __gen4_read(64)
554#define REG_WRITE_HEADER \ 607#define REG_WRITE_HEADER \
555 unsigned long irqflags; \ 608 unsigned long irqflags; \
556 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \ 609 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
610 assert_device_not_suspended(dev_priv); \
557 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags) 611 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
558 612
559#define REG_WRITE_FOOTER \ 613#define REG_WRITE_FOOTER \
@@ -584,7 +638,6 @@ gen6_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace
584 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ 638 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
585 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ 639 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
586 } \ 640 } \
587 assert_device_not_suspended(dev_priv); \
588 __raw_i915_write##x(dev_priv, reg, val); \ 641 __raw_i915_write##x(dev_priv, reg, val); \
589 if (unlikely(__fifo_ret)) { \ 642 if (unlikely(__fifo_ret)) { \
590 gen6_gt_check_fifodbg(dev_priv); \ 643 gen6_gt_check_fifodbg(dev_priv); \
@@ -600,7 +653,6 @@ hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace)
600 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ 653 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
601 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ 654 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
602 } \ 655 } \
603 assert_device_not_suspended(dev_priv); \
604 hsw_unclaimed_reg_clear(dev_priv, reg); \ 656 hsw_unclaimed_reg_clear(dev_priv, reg); \
605 __raw_i915_write##x(dev_priv, reg, val); \ 657 __raw_i915_write##x(dev_priv, reg, val); \
606 if (unlikely(__fifo_ret)) { \ 658 if (unlikely(__fifo_ret)) { \
@@ -634,16 +686,17 @@ static bool is_gen8_shadowed(struct drm_i915_private *dev_priv, u32 reg)
634#define __gen8_write(x) \ 686#define __gen8_write(x) \
635static void \ 687static void \
636gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ 688gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
637 bool __needs_put = reg < 0x40000 && !is_gen8_shadowed(dev_priv, reg); \
638 REG_WRITE_HEADER; \ 689 REG_WRITE_HEADER; \
639 if (__needs_put) { \ 690 if (reg < 0x40000 && !is_gen8_shadowed(dev_priv, reg)) { \
640 dev_priv->uncore.funcs.force_wake_get(dev_priv, \ 691 if (dev_priv->uncore.forcewake_count == 0) \
641 FORCEWAKE_ALL); \ 692 dev_priv->uncore.funcs.force_wake_get(dev_priv, \
642 } \ 693 FORCEWAKE_ALL); \
643 __raw_i915_write##x(dev_priv, reg, val); \ 694 __raw_i915_write##x(dev_priv, reg, val); \
644 if (__needs_put) { \ 695 if (dev_priv->uncore.forcewake_count == 0) \
645 dev_priv->uncore.funcs.force_wake_put(dev_priv, \ 696 dev_priv->uncore.funcs.force_wake_put(dev_priv, \
646 FORCEWAKE_ALL); \ 697 FORCEWAKE_ALL); \
698 } else { \
699 __raw_i915_write##x(dev_priv, reg, val); \
647 } \ 700 } \
648 REG_WRITE_FOOTER; \ 701 REG_WRITE_FOOTER; \
649} 702}
@@ -681,15 +734,17 @@ void intel_uncore_init(struct drm_device *dev)
681{ 734{
682 struct drm_i915_private *dev_priv = dev->dev_private; 735 struct drm_i915_private *dev_priv = dev->dev_private;
683 736
684 INIT_DELAYED_WORK(&dev_priv->uncore.force_wake_work, 737 setup_timer(&dev_priv->uncore.force_wake_timer,
685 gen6_force_wake_work); 738 gen6_force_wake_timer, (unsigned long)dev_priv);
739
740 intel_uncore_early_sanitize(dev);
686 741
687 if (IS_VALLEYVIEW(dev)) { 742 if (IS_VALLEYVIEW(dev)) {
688 dev_priv->uncore.funcs.force_wake_get = __vlv_force_wake_get; 743 dev_priv->uncore.funcs.force_wake_get = __vlv_force_wake_get;
689 dev_priv->uncore.funcs.force_wake_put = __vlv_force_wake_put; 744 dev_priv->uncore.funcs.force_wake_put = __vlv_force_wake_put;
690 } else if (IS_HASWELL(dev) || IS_GEN8(dev)) { 745 } else if (IS_HASWELL(dev) || IS_GEN8(dev)) {
691 dev_priv->uncore.funcs.force_wake_get = __gen6_gt_force_wake_mt_get; 746 dev_priv->uncore.funcs.force_wake_get = __gen7_gt_force_wake_mt_get;
692 dev_priv->uncore.funcs.force_wake_put = __gen6_gt_force_wake_mt_put; 747 dev_priv->uncore.funcs.force_wake_put = __gen7_gt_force_wake_mt_put;
693 } else if (IS_IVYBRIDGE(dev)) { 748 } else if (IS_IVYBRIDGE(dev)) {
694 u32 ecobus; 749 u32 ecobus;
695 750
@@ -703,16 +758,16 @@ void intel_uncore_init(struct drm_device *dev)
703 * forcewake being disabled. 758 * forcewake being disabled.
704 */ 759 */
705 mutex_lock(&dev->struct_mutex); 760 mutex_lock(&dev->struct_mutex);
706 __gen6_gt_force_wake_mt_get(dev_priv, FORCEWAKE_ALL); 761 __gen7_gt_force_wake_mt_get(dev_priv, FORCEWAKE_ALL);
707 ecobus = __raw_i915_read32(dev_priv, ECOBUS); 762 ecobus = __raw_i915_read32(dev_priv, ECOBUS);
708 __gen6_gt_force_wake_mt_put(dev_priv, FORCEWAKE_ALL); 763 __gen7_gt_force_wake_mt_put(dev_priv, FORCEWAKE_ALL);
709 mutex_unlock(&dev->struct_mutex); 764 mutex_unlock(&dev->struct_mutex);
710 765
711 if (ecobus & FORCEWAKE_MT_ENABLE) { 766 if (ecobus & FORCEWAKE_MT_ENABLE) {
712 dev_priv->uncore.funcs.force_wake_get = 767 dev_priv->uncore.funcs.force_wake_get =
713 __gen6_gt_force_wake_mt_get; 768 __gen7_gt_force_wake_mt_get;
714 dev_priv->uncore.funcs.force_wake_put = 769 dev_priv->uncore.funcs.force_wake_put =
715 __gen6_gt_force_wake_mt_put; 770 __gen7_gt_force_wake_mt_put;
716 } else { 771 } else {
717 DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n"); 772 DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
718 DRM_INFO("when using vblank-synced partial screen updates.\n"); 773 DRM_INFO("when using vblank-synced partial screen updates.\n");
@@ -792,12 +847,9 @@ void intel_uncore_init(struct drm_device *dev)
792 847
793void intel_uncore_fini(struct drm_device *dev) 848void intel_uncore_fini(struct drm_device *dev)
794{ 849{
795 struct drm_i915_private *dev_priv = dev->dev_private;
796
797 flush_delayed_work(&dev_priv->uncore.force_wake_work);
798
799 /* Paranoia: make sure we have disabled everything before we exit. */ 850 /* Paranoia: make sure we have disabled everything before we exit. */
800 intel_uncore_sanitize(dev); 851 intel_uncore_sanitize(dev);
852 intel_uncore_forcewake_reset(dev, false);
801} 853}
802 854
803static const struct register_whitelist { 855static const struct register_whitelist {
@@ -814,7 +866,7 @@ int i915_reg_read_ioctl(struct drm_device *dev,
814 struct drm_i915_private *dev_priv = dev->dev_private; 866 struct drm_i915_private *dev_priv = dev->dev_private;
815 struct drm_i915_reg_read *reg = data; 867 struct drm_i915_reg_read *reg = data;
816 struct register_whitelist const *entry = whitelist; 868 struct register_whitelist const *entry = whitelist;
817 int i; 869 int i, ret = 0;
818 870
819 for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) { 871 for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
820 if (entry->offset == reg->offset && 872 if (entry->offset == reg->offset &&
@@ -825,6 +877,8 @@ int i915_reg_read_ioctl(struct drm_device *dev,
825 if (i == ARRAY_SIZE(whitelist)) 877 if (i == ARRAY_SIZE(whitelist))
826 return -EINVAL; 878 return -EINVAL;
827 879
880 intel_runtime_pm_get(dev_priv);
881
828 switch (entry->size) { 882 switch (entry->size) {
829 case 8: 883 case 8:
830 reg->val = I915_READ64(reg->offset); 884 reg->val = I915_READ64(reg->offset);
@@ -840,10 +894,13 @@ int i915_reg_read_ioctl(struct drm_device *dev,
840 break; 894 break;
841 default: 895 default:
842 WARN_ON(1); 896 WARN_ON(1);
843 return -EINVAL; 897 ret = -EINVAL;
898 goto out;
844 } 899 }
845 900
846 return 0; 901out:
902 intel_runtime_pm_put(dev_priv);
903 return ret;
847} 904}
848 905
849int i915_get_reset_stats_ioctl(struct drm_device *dev, 906int i915_get_reset_stats_ioctl(struct drm_device *dev,
@@ -852,6 +909,7 @@ int i915_get_reset_stats_ioctl(struct drm_device *dev,
852 struct drm_i915_private *dev_priv = dev->dev_private; 909 struct drm_i915_private *dev_priv = dev->dev_private;
853 struct drm_i915_reset_stats *args = data; 910 struct drm_i915_reset_stats *args = data;
854 struct i915_ctx_hang_stats *hs; 911 struct i915_ctx_hang_stats *hs;
912 struct i915_hw_context *ctx;
855 int ret; 913 int ret;
856 914
857 if (args->flags || args->pad) 915 if (args->flags || args->pad)
@@ -864,11 +922,12 @@ int i915_get_reset_stats_ioctl(struct drm_device *dev,
864 if (ret) 922 if (ret)
865 return ret; 923 return ret;
866 924
867 hs = i915_gem_context_get_hang_stats(dev, file, args->ctx_id); 925 ctx = i915_gem_context_get(file->driver_priv, args->ctx_id);
868 if (IS_ERR(hs)) { 926 if (IS_ERR(ctx)) {
869 mutex_unlock(&dev->struct_mutex); 927 mutex_unlock(&dev->struct_mutex);
870 return PTR_ERR(hs); 928 return PTR_ERR(ctx);
871 } 929 }
930 hs = &ctx->hang_stats;
872 931
873 if (capable(CAP_SYS_ADMIN)) 932 if (capable(CAP_SYS_ADMIN))
874 args->reset_count = i915_reset_count(&dev_priv->gpu_error); 933 args->reset_count = i915_reset_count(&dev_priv->gpu_error);
@@ -944,12 +1003,6 @@ static int gen6_do_reset(struct drm_device *dev)
944{ 1003{
945 struct drm_i915_private *dev_priv = dev->dev_private; 1004 struct drm_i915_private *dev_priv = dev->dev_private;
946 int ret; 1005 int ret;
947 unsigned long irqflags;
948
949 /* Hold uncore.lock across reset to prevent any register access
950 * with forcewake not set correctly
951 */
952 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
953 1006
954 /* Reset the chip */ 1007 /* Reset the chip */
955 1008
@@ -962,18 +1015,8 @@ static int gen6_do_reset(struct drm_device *dev)
962 /* Spin waiting for the device to ack the reset request */ 1015 /* Spin waiting for the device to ack the reset request */
963 ret = wait_for((__raw_i915_read32(dev_priv, GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500); 1016 ret = wait_for((__raw_i915_read32(dev_priv, GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
964 1017
965 intel_uncore_forcewake_reset(dev); 1018 intel_uncore_forcewake_reset(dev, true);
966 1019
967 /* If reset with a user forcewake, try to restore, otherwise turn it off */
968 if (dev_priv->uncore.forcewake_count)
969 dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL);
970 else
971 dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL);
972
973 /* Restore fifo count */
974 dev_priv->uncore.fifo_count = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK;
975
976 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
977 return ret; 1020 return ret;
978} 1021}
979 1022
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index 968374776db9..a034ed408252 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -29,7 +29,7 @@ static void mga_crtc_load_lut(struct drm_crtc *crtc)
29 struct mga_crtc *mga_crtc = to_mga_crtc(crtc); 29 struct mga_crtc *mga_crtc = to_mga_crtc(crtc);
30 struct drm_device *dev = crtc->dev; 30 struct drm_device *dev = crtc->dev;
31 struct mga_device *mdev = dev->dev_private; 31 struct mga_device *mdev = dev->dev_private;
32 struct drm_framebuffer *fb = crtc->fb; 32 struct drm_framebuffer *fb = crtc->primary->fb;
33 int i; 33 int i;
34 34
35 if (!crtc->enabled) 35 if (!crtc->enabled)
@@ -742,7 +742,7 @@ static int mga_crtc_do_set_base(struct drm_crtc *crtc,
742 mgag200_bo_unreserve(bo); 742 mgag200_bo_unreserve(bo);
743 } 743 }
744 744
745 mga_fb = to_mga_framebuffer(crtc->fb); 745 mga_fb = to_mga_framebuffer(crtc->primary->fb);
746 obj = mga_fb->obj; 746 obj = mga_fb->obj;
747 bo = gem_to_mga_bo(obj); 747 bo = gem_to_mga_bo(obj);
748 748
@@ -805,7 +805,7 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
805 /* 0x48: */ 0, 0, 0, 0, 0, 0, 0, 0 805 /* 0x48: */ 0, 0, 0, 0, 0, 0, 0, 0
806 }; 806 };
807 807
808 bppshift = mdev->bpp_shifts[(crtc->fb->bits_per_pixel >> 3) - 1]; 808 bppshift = mdev->bpp_shifts[(crtc->primary->fb->bits_per_pixel >> 3) - 1];
809 809
810 switch (mdev->type) { 810 switch (mdev->type) {
811 case G200_SE_A: 811 case G200_SE_A:
@@ -843,12 +843,12 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
843 break; 843 break;
844 } 844 }
845 845
846 switch (crtc->fb->bits_per_pixel) { 846 switch (crtc->primary->fb->bits_per_pixel) {
847 case 8: 847 case 8:
848 dacvalue[MGA1064_MUL_CTL] = MGA1064_MUL_CTL_8bits; 848 dacvalue[MGA1064_MUL_CTL] = MGA1064_MUL_CTL_8bits;
849 break; 849 break;
850 case 16: 850 case 16:
851 if (crtc->fb->depth == 15) 851 if (crtc->primary->fb->depth == 15)
852 dacvalue[MGA1064_MUL_CTL] = MGA1064_MUL_CTL_15bits; 852 dacvalue[MGA1064_MUL_CTL] = MGA1064_MUL_CTL_15bits;
853 else 853 else
854 dacvalue[MGA1064_MUL_CTL] = MGA1064_MUL_CTL_16bits; 854 dacvalue[MGA1064_MUL_CTL] = MGA1064_MUL_CTL_16bits;
@@ -896,8 +896,8 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
896 WREG_SEQ(3, 0); 896 WREG_SEQ(3, 0);
897 WREG_SEQ(4, 0xe); 897 WREG_SEQ(4, 0xe);
898 898
899 pitch = crtc->fb->pitches[0] / (crtc->fb->bits_per_pixel / 8); 899 pitch = crtc->primary->fb->pitches[0] / (crtc->primary->fb->bits_per_pixel / 8);
900 if (crtc->fb->bits_per_pixel == 24) 900 if (crtc->primary->fb->bits_per_pixel == 24)
901 pitch = (pitch * 3) >> (4 - bppshift); 901 pitch = (pitch * 3) >> (4 - bppshift);
902 else 902 else
903 pitch = pitch >> (4 - bppshift); 903 pitch = pitch >> (4 - bppshift);
@@ -974,7 +974,7 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
974 ((vdisplay & 0xc00) >> 7) | 974 ((vdisplay & 0xc00) >> 7) |
975 ((vsyncstart & 0xc00) >> 5) | 975 ((vsyncstart & 0xc00) >> 5) |
976 ((vdisplay & 0x400) >> 3); 976 ((vdisplay & 0x400) >> 3);
977 if (crtc->fb->bits_per_pixel == 24) 977 if (crtc->primary->fb->bits_per_pixel == 24)
978 ext_vga[3] = (((1 << bppshift) * 3) - 1) | 0x80; 978 ext_vga[3] = (((1 << bppshift) * 3) - 1) | 0x80;
979 else 979 else
980 ext_vga[3] = ((1 << bppshift) - 1) | 0x80; 980 ext_vga[3] = ((1 << bppshift) - 1) | 0x80;
@@ -1034,9 +1034,9 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
1034 u32 bpp; 1034 u32 bpp;
1035 u32 mb; 1035 u32 mb;
1036 1036
1037 if (crtc->fb->bits_per_pixel > 16) 1037 if (crtc->primary->fb->bits_per_pixel > 16)
1038 bpp = 32; 1038 bpp = 32;
1039 else if (crtc->fb->bits_per_pixel > 8) 1039 else if (crtc->primary->fb->bits_per_pixel > 8)
1040 bpp = 16; 1040 bpp = 16;
1041 else 1041 else
1042 bpp = 8; 1042 bpp = 8;
@@ -1277,8 +1277,8 @@ static void mga_crtc_disable(struct drm_crtc *crtc)
1277 int ret; 1277 int ret;
1278 DRM_DEBUG_KMS("\n"); 1278 DRM_DEBUG_KMS("\n");
1279 mga_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); 1279 mga_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
1280 if (crtc->fb) { 1280 if (crtc->primary->fb) {
1281 struct mga_framebuffer *mga_fb = to_mga_framebuffer(crtc->fb); 1281 struct mga_framebuffer *mga_fb = to_mga_framebuffer(crtc->primary->fb);
1282 struct drm_gem_object *obj = mga_fb->obj; 1282 struct drm_gem_object *obj = mga_fb->obj;
1283 struct mgag200_bo *bo = gem_to_mga_bo(obj); 1283 struct mgag200_bo *bo = gem_to_mga_bo(obj);
1284 ret = mgag200_bo_reserve(bo, false); 1284 ret = mgag200_bo_reserve(bo, false);
@@ -1287,7 +1287,7 @@ static void mga_crtc_disable(struct drm_crtc *crtc)
1287 mgag200_bo_push_sysram(bo); 1287 mgag200_bo_push_sysram(bo);
1288 mgag200_bo_unreserve(bo); 1288 mgag200_bo_unreserve(bo);
1289 } 1289 }
1290 crtc->fb = NULL; 1290 crtc->primary->fb = NULL;
1291} 1291}
1292 1292
1293/* These provide the minimum set of functions required to handle a CRTC */ 1293/* These provide the minimum set of functions required to handle a CRTC */
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c
index adb5166a5dfd..5a00e90696de 100644
--- a/drivers/gpu/drm/mgag200/mgag200_ttm.c
+++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c
@@ -259,7 +259,9 @@ int mgag200_mm_init(struct mga_device *mdev)
259 259
260 ret = ttm_bo_device_init(&mdev->ttm.bdev, 260 ret = ttm_bo_device_init(&mdev->ttm.bdev,
261 mdev->ttm.bo_global_ref.ref.object, 261 mdev->ttm.bo_global_ref.ref.object,
262 &mgag200_bo_driver, DRM_FILE_PAGE_OFFSET, 262 &mgag200_bo_driver,
263 dev->anon_inode->i_mapping,
264 DRM_FILE_PAGE_OFFSET,
263 true); 265 true);
264 if (ret) { 266 if (ret) {
265 DRM_ERROR("Error initialising bo driver; %d\n", ret); 267 DRM_ERROR("Error initialising bo driver; %d\n", ret);
@@ -324,7 +326,6 @@ int mgag200_bo_create(struct drm_device *dev, int size, int align,
324 } 326 }
325 327
326 mgabo->bo.bdev = &mdev->ttm.bdev; 328 mgabo->bo.bdev = &mdev->ttm.bdev;
327 mgabo->bo.bdev->dev_mapping = dev->dev_mapping;
328 329
329 mgag200_ttm_placement(mgabo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM); 330 mgag200_ttm_placement(mgabo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
330 331
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 4f977a593bea..5e1e6b0cd8ac 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -7,6 +7,7 @@ msm-y := \
7 adreno/adreno_gpu.o \ 7 adreno/adreno_gpu.o \
8 adreno/a3xx_gpu.o \ 8 adreno/a3xx_gpu.o \
9 hdmi/hdmi.o \ 9 hdmi/hdmi.o \
10 hdmi/hdmi_audio.o \
10 hdmi/hdmi_bridge.o \ 11 hdmi/hdmi_bridge.o \
11 hdmi/hdmi_connector.o \ 12 hdmi/hdmi_connector.o \
12 hdmi/hdmi_i2c.o \ 13 hdmi/hdmi_i2c.o \
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
index 461df93e825e..f20fbde5dc49 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -35,7 +35,11 @@
35 A3XX_INT0_CP_AHB_ERROR_HALT | \ 35 A3XX_INT0_CP_AHB_ERROR_HALT | \
36 A3XX_INT0_UCHE_OOB_ACCESS) 36 A3XX_INT0_UCHE_OOB_ACCESS)
37 37
38static struct platform_device *a3xx_pdev; 38
39static bool hang_debug = false;
40MODULE_PARM_DESC(hang_debug, "Dump registers when hang is detected (can be slow!)");
41module_param_named(hang_debug, hang_debug, bool, 0600);
42static void a3xx_dump(struct msm_gpu *gpu);
39 43
40static void a3xx_me_init(struct msm_gpu *gpu) 44static void a3xx_me_init(struct msm_gpu *gpu)
41{ 45{
@@ -291,6 +295,9 @@ static int a3xx_hw_init(struct msm_gpu *gpu)
291 295
292static void a3xx_recover(struct msm_gpu *gpu) 296static void a3xx_recover(struct msm_gpu *gpu)
293{ 297{
298 /* dump registers before resetting gpu, if enabled: */
299 if (hang_debug)
300 a3xx_dump(gpu);
294 gpu_write(gpu, REG_A3XX_RBBM_SW_RESET_CMD, 1); 301 gpu_write(gpu, REG_A3XX_RBBM_SW_RESET_CMD, 1);
295 gpu_read(gpu, REG_A3XX_RBBM_SW_RESET_CMD); 302 gpu_read(gpu, REG_A3XX_RBBM_SW_RESET_CMD);
296 gpu_write(gpu, REG_A3XX_RBBM_SW_RESET_CMD, 0); 303 gpu_write(gpu, REG_A3XX_RBBM_SW_RESET_CMD, 0);
@@ -311,27 +318,18 @@ static void a3xx_destroy(struct msm_gpu *gpu)
311 ocmem_free(OCMEM_GRAPHICS, a3xx_gpu->ocmem_hdl); 318 ocmem_free(OCMEM_GRAPHICS, a3xx_gpu->ocmem_hdl);
312#endif 319#endif
313 320
314 put_device(&a3xx_gpu->pdev->dev);
315 kfree(a3xx_gpu); 321 kfree(a3xx_gpu);
316} 322}
317 323
318static void a3xx_idle(struct msm_gpu *gpu) 324static void a3xx_idle(struct msm_gpu *gpu)
319{ 325{
320 unsigned long t;
321
322 /* wait for ringbuffer to drain: */ 326 /* wait for ringbuffer to drain: */
323 adreno_idle(gpu); 327 adreno_idle(gpu);
324 328
325 t = jiffies + ADRENO_IDLE_TIMEOUT;
326
327 /* then wait for GPU to finish: */ 329 /* then wait for GPU to finish: */
328 do { 330 if (spin_until(!(gpu_read(gpu, REG_A3XX_RBBM_STATUS) &
329 uint32_t rbbm_status = gpu_read(gpu, REG_A3XX_RBBM_STATUS); 331 A3XX_RBBM_STATUS_GPU_BUSY)))
330 if (!(rbbm_status & A3XX_RBBM_STATUS_GPU_BUSY)) 332 DRM_ERROR("%s: timeout waiting for GPU to idle!\n", gpu->name);
331 return;
332 } while(time_before(jiffies, t));
333
334 DRM_ERROR("timeout waiting for %s to idle!\n", gpu->name);
335 333
336 /* TODO maybe we need to reset GPU here to recover from hang? */ 334 /* TODO maybe we need to reset GPU here to recover from hang? */
337} 335}
@@ -352,7 +350,6 @@ static irqreturn_t a3xx_irq(struct msm_gpu *gpu)
352 return IRQ_HANDLED; 350 return IRQ_HANDLED;
353} 351}
354 352
355#ifdef CONFIG_DEBUG_FS
356static const unsigned int a3xx_registers[] = { 353static const unsigned int a3xx_registers[] = {
357 0x0000, 0x0002, 0x0010, 0x0012, 0x0018, 0x0018, 0x0020, 0x0027, 354 0x0000, 0x0002, 0x0010, 0x0012, 0x0018, 0x0018, 0x0020, 0x0027,
358 0x0029, 0x002b, 0x002e, 0x0033, 0x0040, 0x0042, 0x0050, 0x005c, 355 0x0029, 0x002b, 0x002e, 0x0033, 0x0040, 0x0042, 0x0050, 0x005c,
@@ -392,11 +389,18 @@ static const unsigned int a3xx_registers[] = {
392 0x303c, 0x303c, 0x305e, 0x305f, 389 0x303c, 0x303c, 0x305e, 0x305f,
393}; 390};
394 391
392#ifdef CONFIG_DEBUG_FS
395static void a3xx_show(struct msm_gpu *gpu, struct seq_file *m) 393static void a3xx_show(struct msm_gpu *gpu, struct seq_file *m)
396{ 394{
395 struct drm_device *dev = gpu->dev;
397 int i; 396 int i;
398 397
399 adreno_show(gpu, m); 398 adreno_show(gpu, m);
399
400 mutex_lock(&dev->struct_mutex);
401
402 gpu->funcs->pm_resume(gpu);
403
400 seq_printf(m, "status: %08x\n", 404 seq_printf(m, "status: %08x\n",
401 gpu_read(gpu, REG_A3XX_RBBM_STATUS)); 405 gpu_read(gpu, REG_A3XX_RBBM_STATUS));
402 406
@@ -412,9 +416,36 @@ static void a3xx_show(struct msm_gpu *gpu, struct seq_file *m)
412 seq_printf(m, "IO:R %08x %08x\n", addr<<2, val); 416 seq_printf(m, "IO:R %08x %08x\n", addr<<2, val);
413 } 417 }
414 } 418 }
419
420 gpu->funcs->pm_suspend(gpu);
421
422 mutex_unlock(&dev->struct_mutex);
415} 423}
416#endif 424#endif
417 425
426/* would be nice to not have to duplicate the _show() stuff with printk(): */
427static void a3xx_dump(struct msm_gpu *gpu)
428{
429 int i;
430
431 adreno_dump(gpu);
432 printk("status: %08x\n",
433 gpu_read(gpu, REG_A3XX_RBBM_STATUS));
434
435 /* dump these out in a form that can be parsed by demsm: */
436 printk("IO:region %s 00000000 00020000\n", gpu->name);
437 for (i = 0; i < ARRAY_SIZE(a3xx_registers); i += 2) {
438 uint32_t start = a3xx_registers[i];
439 uint32_t end = a3xx_registers[i+1];
440 uint32_t addr;
441
442 for (addr = start; addr <= end; addr++) {
443 uint32_t val = gpu_read(gpu, addr);
444 printk("IO:R %08x %08x\n", addr<<2, val);
445 }
446 }
447}
448
418static const struct adreno_gpu_funcs funcs = { 449static const struct adreno_gpu_funcs funcs = {
419 .base = { 450 .base = {
420 .get_param = adreno_get_param, 451 .get_param = adreno_get_param,
@@ -439,7 +470,8 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
439 struct a3xx_gpu *a3xx_gpu = NULL; 470 struct a3xx_gpu *a3xx_gpu = NULL;
440 struct adreno_gpu *adreno_gpu; 471 struct adreno_gpu *adreno_gpu;
441 struct msm_gpu *gpu; 472 struct msm_gpu *gpu;
442 struct platform_device *pdev = a3xx_pdev; 473 struct msm_drm_private *priv = dev->dev_private;
474 struct platform_device *pdev = priv->gpu_pdev;
443 struct adreno_platform_config *config; 475 struct adreno_platform_config *config;
444 int ret; 476 int ret;
445 477
@@ -460,7 +492,6 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
460 adreno_gpu = &a3xx_gpu->base; 492 adreno_gpu = &a3xx_gpu->base;
461 gpu = &adreno_gpu->base; 493 gpu = &adreno_gpu->base;
462 494
463 get_device(&pdev->dev);
464 a3xx_gpu->pdev = pdev; 495 a3xx_gpu->pdev = pdev;
465 496
466 gpu->fast_rate = config->fast_rate; 497 gpu->fast_rate = config->fast_rate;
@@ -522,17 +553,24 @@ fail:
522# include <mach/kgsl.h> 553# include <mach/kgsl.h>
523#endif 554#endif
524 555
525static int a3xx_probe(struct platform_device *pdev) 556static void set_gpu_pdev(struct drm_device *dev,
557 struct platform_device *pdev)
558{
559 struct msm_drm_private *priv = dev->dev_private;
560 priv->gpu_pdev = pdev;
561}
562
563static int a3xx_bind(struct device *dev, struct device *master, void *data)
526{ 564{
527 static struct adreno_platform_config config = {}; 565 static struct adreno_platform_config config = {};
528#ifdef CONFIG_OF 566#ifdef CONFIG_OF
529 struct device_node *child, *node = pdev->dev.of_node; 567 struct device_node *child, *node = dev->of_node;
530 u32 val; 568 u32 val;
531 int ret; 569 int ret;
532 570
533 ret = of_property_read_u32(node, "qcom,chipid", &val); 571 ret = of_property_read_u32(node, "qcom,chipid", &val);
534 if (ret) { 572 if (ret) {
535 dev_err(&pdev->dev, "could not find chipid: %d\n", ret); 573 dev_err(dev, "could not find chipid: %d\n", ret);
536 return ret; 574 return ret;
537 } 575 }
538 576
@@ -548,7 +586,7 @@ static int a3xx_probe(struct platform_device *pdev)
548 for_each_child_of_node(child, pwrlvl) { 586 for_each_child_of_node(child, pwrlvl) {
549 ret = of_property_read_u32(pwrlvl, "qcom,gpu-freq", &val); 587 ret = of_property_read_u32(pwrlvl, "qcom,gpu-freq", &val);
550 if (ret) { 588 if (ret) {
551 dev_err(&pdev->dev, "could not find gpu-freq: %d\n", ret); 589 dev_err(dev, "could not find gpu-freq: %d\n", ret);
552 return ret; 590 return ret;
553 } 591 }
554 config.fast_rate = max(config.fast_rate, val); 592 config.fast_rate = max(config.fast_rate, val);
@@ -558,12 +596,12 @@ static int a3xx_probe(struct platform_device *pdev)
558 } 596 }
559 597
560 if (!config.fast_rate) { 598 if (!config.fast_rate) {
561 dev_err(&pdev->dev, "could not find clk rates\n"); 599 dev_err(dev, "could not find clk rates\n");
562 return -ENXIO; 600 return -ENXIO;
563 } 601 }
564 602
565#else 603#else
566 struct kgsl_device_platform_data *pdata = pdev->dev.platform_data; 604 struct kgsl_device_platform_data *pdata = dev->platform_data;
567 uint32_t version = socinfo_get_version(); 605 uint32_t version = socinfo_get_version();
568 if (cpu_is_apq8064ab()) { 606 if (cpu_is_apq8064ab()) {
569 config.fast_rate = 450000000; 607 config.fast_rate = 450000000;
@@ -609,14 +647,30 @@ static int a3xx_probe(struct platform_device *pdev)
609 config.bus_scale_table = pdata->bus_scale_table; 647 config.bus_scale_table = pdata->bus_scale_table;
610# endif 648# endif
611#endif 649#endif
612 pdev->dev.platform_data = &config; 650 dev->platform_data = &config;
613 a3xx_pdev = pdev; 651 set_gpu_pdev(dev_get_drvdata(master), to_platform_device(dev));
614 return 0; 652 return 0;
615} 653}
616 654
655static void a3xx_unbind(struct device *dev, struct device *master,
656 void *data)
657{
658 set_gpu_pdev(dev_get_drvdata(master), NULL);
659}
660
661static const struct component_ops a3xx_ops = {
662 .bind = a3xx_bind,
663 .unbind = a3xx_unbind,
664};
665
666static int a3xx_probe(struct platform_device *pdev)
667{
668 return component_add(&pdev->dev, &a3xx_ops);
669}
670
617static int a3xx_remove(struct platform_device *pdev) 671static int a3xx_remove(struct platform_device *pdev)
618{ 672{
619 a3xx_pdev = NULL; 673 component_del(&pdev->dev, &a3xx_ops);
620 return 0; 674 return 0;
621} 675}
622 676
@@ -624,7 +678,6 @@ static const struct of_device_id dt_match[] = {
624 { .compatible = "qcom,kgsl-3d0" }, 678 { .compatible = "qcom,kgsl-3d0" },
625 {} 679 {}
626}; 680};
627MODULE_DEVICE_TABLE(of, dt_match);
628 681
629static struct platform_driver a3xx_driver = { 682static struct platform_driver a3xx_driver = {
630 .probe = a3xx_probe, 683 .probe = a3xx_probe,
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index d321099abdd4..28ca8cd8b09e 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -73,6 +73,12 @@ int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
73 case MSM_PARAM_GMEM_SIZE: 73 case MSM_PARAM_GMEM_SIZE:
74 *value = adreno_gpu->gmem; 74 *value = adreno_gpu->gmem;
75 return 0; 75 return 0;
76 case MSM_PARAM_CHIP_ID:
77 *value = adreno_gpu->rev.patchid |
78 (adreno_gpu->rev.minor << 8) |
79 (adreno_gpu->rev.major << 16) |
80 (adreno_gpu->rev.core << 24);
81 return 0;
76 default: 82 default:
77 DBG("%s: invalid param: %u", gpu->name, param); 83 DBG("%s: invalid param: %u", gpu->name, param);
78 return -EINVAL; 84 return -EINVAL;
@@ -225,19 +231,11 @@ void adreno_flush(struct msm_gpu *gpu)
225void adreno_idle(struct msm_gpu *gpu) 231void adreno_idle(struct msm_gpu *gpu)
226{ 232{
227 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); 233 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
228 uint32_t rptr, wptr = get_wptr(gpu->rb); 234 uint32_t wptr = get_wptr(gpu->rb);
229 unsigned long t;
230
231 t = jiffies + ADRENO_IDLE_TIMEOUT;
232
233 /* then wait for CP to drain ringbuffer: */
234 do {
235 rptr = adreno_gpu->memptrs->rptr;
236 if (rptr == wptr)
237 return;
238 } while(time_before(jiffies, t));
239 235
240 DRM_ERROR("%s: timeout waiting to drain ringbuffer!\n", gpu->name); 236 /* wait for CP to drain ringbuffer: */
237 if (spin_until(adreno_gpu->memptrs->rptr == wptr))
238 DRM_ERROR("%s: timeout waiting to drain ringbuffer!\n", gpu->name);
241 239
242 /* TODO maybe we need to reset GPU here to recover from hang? */ 240 /* TODO maybe we need to reset GPU here to recover from hang? */
243} 241}
@@ -260,22 +258,37 @@ void adreno_show(struct msm_gpu *gpu, struct seq_file *m)
260} 258}
261#endif 259#endif
262 260
263void adreno_wait_ring(struct msm_gpu *gpu, uint32_t ndwords) 261/* would be nice to not have to duplicate the _show() stuff with printk(): */
262void adreno_dump(struct msm_gpu *gpu)
264{ 263{
265 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); 264 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
266 uint32_t freedwords; 265
267 unsigned long t = jiffies + ADRENO_IDLE_TIMEOUT; 266 printk("revision: %d (%d.%d.%d.%d)\n",
268 do { 267 adreno_gpu->info->revn, adreno_gpu->rev.core,
269 uint32_t size = gpu->rb->size / 4; 268 adreno_gpu->rev.major, adreno_gpu->rev.minor,
270 uint32_t wptr = get_wptr(gpu->rb); 269 adreno_gpu->rev.patchid);
271 uint32_t rptr = adreno_gpu->memptrs->rptr; 270
272 freedwords = (rptr + (size - 1) - wptr) % size; 271 printk("fence: %d/%d\n", adreno_gpu->memptrs->fence,
273 272 gpu->submitted_fence);
274 if (time_after(jiffies, t)) { 273 printk("rptr: %d\n", adreno_gpu->memptrs->rptr);
275 DRM_ERROR("%s: timeout waiting for ringbuffer space\n", gpu->name); 274 printk("wptr: %d\n", adreno_gpu->memptrs->wptr);
276 break; 275 printk("rb wptr: %d\n", get_wptr(gpu->rb));
277 } 276
278 } while(freedwords < ndwords); 277}
278
279static uint32_t ring_freewords(struct msm_gpu *gpu)
280{
281 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
282 uint32_t size = gpu->rb->size / 4;
283 uint32_t wptr = get_wptr(gpu->rb);
284 uint32_t rptr = adreno_gpu->memptrs->rptr;
285 return (rptr + (size - 1) - wptr) % size;
286}
287
288void adreno_wait_ring(struct msm_gpu *gpu, uint32_t ndwords)
289{
290 if (spin_until(ring_freewords(gpu) >= ndwords))
291 DRM_ERROR("%s: timeout waiting for ringbuffer space\n", gpu->name);
279} 292}
280 293
281static const char *iommu_ports[] = { 294static const char *iommu_ports[] = {
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index ca11ea4da165..63c36ce33020 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -76,7 +76,20 @@ struct adreno_platform_config {
76#endif 76#endif
77}; 77};
78 78
79#define ADRENO_IDLE_TIMEOUT (20 * 1000) 79#define ADRENO_IDLE_TIMEOUT msecs_to_jiffies(1000)
80
81#define spin_until(X) ({ \
82 int __ret = -ETIMEDOUT; \
83 unsigned long __t = jiffies + ADRENO_IDLE_TIMEOUT; \
84 do { \
85 if (X) { \
86 __ret = 0; \
87 break; \
88 } \
89 } while (time_before(jiffies, __t)); \
90 __ret; \
91})
92
80 93
81static inline bool adreno_is_a3xx(struct adreno_gpu *gpu) 94static inline bool adreno_is_a3xx(struct adreno_gpu *gpu)
82{ 95{
@@ -114,6 +127,7 @@ void adreno_idle(struct msm_gpu *gpu);
114#ifdef CONFIG_DEBUG_FS 127#ifdef CONFIG_DEBUG_FS
115void adreno_show(struct msm_gpu *gpu, struct seq_file *m); 128void adreno_show(struct msm_gpu *gpu, struct seq_file *m);
116#endif 129#endif
130void adreno_dump(struct msm_gpu *gpu);
117void adreno_wait_ring(struct msm_gpu *gpu, uint32_t ndwords); 131void adreno_wait_ring(struct msm_gpu *gpu, uint32_t ndwords);
118 132
119int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, 133int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index 6f1588aa9071..ae750f6928c1 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -17,8 +17,6 @@
17 17
18#include "hdmi.h" 18#include "hdmi.h"
19 19
20static struct platform_device *hdmi_pdev;
21
22void hdmi_set_mode(struct hdmi *hdmi, bool power_on) 20void hdmi_set_mode(struct hdmi *hdmi, bool power_on)
23{ 21{
24 uint32_t ctrl = 0; 22 uint32_t ctrl = 0;
@@ -67,7 +65,7 @@ void hdmi_destroy(struct kref *kref)
67 if (hdmi->i2c) 65 if (hdmi->i2c)
68 hdmi_i2c_destroy(hdmi->i2c); 66 hdmi_i2c_destroy(hdmi->i2c);
69 67
70 put_device(&hdmi->pdev->dev); 68 platform_set_drvdata(hdmi->pdev, NULL);
71} 69}
72 70
73/* initialize connector */ 71/* initialize connector */
@@ -75,7 +73,7 @@ struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
75{ 73{
76 struct hdmi *hdmi = NULL; 74 struct hdmi *hdmi = NULL;
77 struct msm_drm_private *priv = dev->dev_private; 75 struct msm_drm_private *priv = dev->dev_private;
78 struct platform_device *pdev = hdmi_pdev; 76 struct platform_device *pdev = priv->hdmi_pdev;
79 struct hdmi_platform_config *config; 77 struct hdmi_platform_config *config;
80 int i, ret; 78 int i, ret;
81 79
@@ -95,13 +93,13 @@ struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
95 93
96 kref_init(&hdmi->refcount); 94 kref_init(&hdmi->refcount);
97 95
98 get_device(&pdev->dev);
99
100 hdmi->dev = dev; 96 hdmi->dev = dev;
101 hdmi->pdev = pdev; 97 hdmi->pdev = pdev;
102 hdmi->config = config; 98 hdmi->config = config;
103 hdmi->encoder = encoder; 99 hdmi->encoder = encoder;
104 100
101 hdmi_audio_infoframe_init(&hdmi->audio.infoframe);
102
105 /* not sure about which phy maps to which msm.. probably I miss some */ 103 /* not sure about which phy maps to which msm.. probably I miss some */
106 if (config->phy_init) 104 if (config->phy_init)
107 hdmi->phy = config->phy_init(hdmi); 105 hdmi->phy = config->phy_init(hdmi);
@@ -228,6 +226,8 @@ struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
228 priv->bridges[priv->num_bridges++] = hdmi->bridge; 226 priv->bridges[priv->num_bridges++] = hdmi->bridge;
229 priv->connectors[priv->num_connectors++] = hdmi->connector; 227 priv->connectors[priv->num_connectors++] = hdmi->connector;
230 228
229 platform_set_drvdata(pdev, hdmi);
230
231 return hdmi; 231 return hdmi;
232 232
233fail: 233fail:
@@ -249,17 +249,24 @@ fail:
249 249
250#include <linux/of_gpio.h> 250#include <linux/of_gpio.h>
251 251
252static int hdmi_dev_probe(struct platform_device *pdev) 252static void set_hdmi_pdev(struct drm_device *dev,
253 struct platform_device *pdev)
254{
255 struct msm_drm_private *priv = dev->dev_private;
256 priv->hdmi_pdev = pdev;
257}
258
259static int hdmi_bind(struct device *dev, struct device *master, void *data)
253{ 260{
254 static struct hdmi_platform_config config = {}; 261 static struct hdmi_platform_config config = {};
255#ifdef CONFIG_OF 262#ifdef CONFIG_OF
256 struct device_node *of_node = pdev->dev.of_node; 263 struct device_node *of_node = dev->of_node;
257 264
258 int get_gpio(const char *name) 265 int get_gpio(const char *name)
259 { 266 {
260 int gpio = of_get_named_gpio(of_node, name, 0); 267 int gpio = of_get_named_gpio(of_node, name, 0);
261 if (gpio < 0) { 268 if (gpio < 0) {
262 dev_err(&pdev->dev, "failed to get gpio: %s (%d)\n", 269 dev_err(dev, "failed to get gpio: %s (%d)\n",
263 name, gpio); 270 name, gpio);
264 gpio = -1; 271 gpio = -1;
265 } 272 }
@@ -305,7 +312,7 @@ static int hdmi_dev_probe(struct platform_device *pdev)
305 config.ddc_data_gpio = 71; 312 config.ddc_data_gpio = 71;
306 config.hpd_gpio = 72; 313 config.hpd_gpio = 72;
307 config.mux_en_gpio = -1; 314 config.mux_en_gpio = -1;
308 config.mux_sel_gpio = 13 + NR_GPIO_IRQS; 315 config.mux_sel_gpio = -1;
309 } else if (cpu_is_msm8960() || cpu_is_msm8960ab()) { 316 } else if (cpu_is_msm8960() || cpu_is_msm8960ab()) {
310 static const char *hpd_reg_names[] = {"8921_hdmi_mvs"}; 317 static const char *hpd_reg_names[] = {"8921_hdmi_mvs"};
311 config.phy_init = hdmi_phy_8960_init; 318 config.phy_init = hdmi_phy_8960_init;
@@ -336,14 +343,30 @@ static int hdmi_dev_probe(struct platform_device *pdev)
336 config.mux_sel_gpio = -1; 343 config.mux_sel_gpio = -1;
337 } 344 }
338#endif 345#endif
339 pdev->dev.platform_data = &config; 346 dev->platform_data = &config;
340 hdmi_pdev = pdev; 347 set_hdmi_pdev(dev_get_drvdata(master), to_platform_device(dev));
341 return 0; 348 return 0;
342} 349}
343 350
351static void hdmi_unbind(struct device *dev, struct device *master,
352 void *data)
353{
354 set_hdmi_pdev(dev_get_drvdata(master), NULL);
355}
356
357static const struct component_ops hdmi_ops = {
358 .bind = hdmi_bind,
359 .unbind = hdmi_unbind,
360};
361
362static int hdmi_dev_probe(struct platform_device *pdev)
363{
364 return component_add(&pdev->dev, &hdmi_ops);
365}
366
344static int hdmi_dev_remove(struct platform_device *pdev) 367static int hdmi_dev_remove(struct platform_device *pdev)
345{ 368{
346 hdmi_pdev = NULL; 369 component_del(&pdev->dev, &hdmi_ops);
347 return 0; 370 return 0;
348} 371}
349 372
@@ -351,7 +374,6 @@ static const struct of_device_id dt_match[] = {
351 { .compatible = "qcom,hdmi-tx" }, 374 { .compatible = "qcom,hdmi-tx" },
352 {} 375 {}
353}; 376};
354MODULE_DEVICE_TABLE(of, dt_match);
355 377
356static struct platform_driver hdmi_driver = { 378static struct platform_driver hdmi_driver = {
357 .probe = hdmi_dev_probe, 379 .probe = hdmi_dev_probe,
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h
index 41b29add70b1..9fafee6a3e43 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.h
@@ -22,6 +22,7 @@
22#include <linux/clk.h> 22#include <linux/clk.h>
23#include <linux/platform_device.h> 23#include <linux/platform_device.h>
24#include <linux/regulator/consumer.h> 24#include <linux/regulator/consumer.h>
25#include <linux/hdmi.h>
25 26
26#include "msm_drv.h" 27#include "msm_drv.h"
27#include "hdmi.xml.h" 28#include "hdmi.xml.h"
@@ -30,6 +31,12 @@
30struct hdmi_phy; 31struct hdmi_phy;
31struct hdmi_platform_config; 32struct hdmi_platform_config;
32 33
34struct hdmi_audio {
35 bool enabled;
36 struct hdmi_audio_infoframe infoframe;
37 int rate;
38};
39
33struct hdmi { 40struct hdmi {
34 struct kref refcount; 41 struct kref refcount;
35 42
@@ -38,6 +45,13 @@ struct hdmi {
38 45
39 const struct hdmi_platform_config *config; 46 const struct hdmi_platform_config *config;
40 47
48 /* audio state: */
49 struct hdmi_audio audio;
50
51 /* video state: */
52 bool power_on;
53 unsigned long int pixclock;
54
41 void __iomem *mmio; 55 void __iomem *mmio;
42 56
43 struct regulator *hpd_regs[2]; 57 struct regulator *hpd_regs[2];
@@ -132,6 +146,17 @@ struct hdmi_phy *hdmi_phy_8x60_init(struct hdmi *hdmi);
132struct hdmi_phy *hdmi_phy_8x74_init(struct hdmi *hdmi); 146struct hdmi_phy *hdmi_phy_8x74_init(struct hdmi *hdmi);
133 147
134/* 148/*
149 * audio:
150 */
151
152int hdmi_audio_update(struct hdmi *hdmi);
153int hdmi_audio_info_setup(struct hdmi *hdmi, bool enabled,
154 uint32_t num_of_channels, uint32_t channel_allocation,
155 uint32_t level_shift, bool down_mix);
156void hdmi_audio_set_sample_rate(struct hdmi *hdmi, int rate);
157
158
159/*
135 * hdmi bridge: 160 * hdmi bridge:
136 */ 161 */
137 162
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_audio.c b/drivers/gpu/drm/msm/hdmi/hdmi_audio.c
new file mode 100644
index 000000000000..872485f60134
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_audio.c
@@ -0,0 +1,273 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/hdmi.h>
19#include "hdmi.h"
20
21
22/* Supported HDMI Audio channels */
23#define MSM_HDMI_AUDIO_CHANNEL_2 0
24#define MSM_HDMI_AUDIO_CHANNEL_4 1
25#define MSM_HDMI_AUDIO_CHANNEL_6 2
26#define MSM_HDMI_AUDIO_CHANNEL_8 3
27
28/* maps MSM_HDMI_AUDIO_CHANNEL_n consts used by audio driver to # of channels: */
29static int nchannels[] = { 2, 4, 6, 8 };
30
31/* Supported HDMI Audio sample rates */
32#define MSM_HDMI_SAMPLE_RATE_32KHZ 0
33#define MSM_HDMI_SAMPLE_RATE_44_1KHZ 1
34#define MSM_HDMI_SAMPLE_RATE_48KHZ 2
35#define MSM_HDMI_SAMPLE_RATE_88_2KHZ 3
36#define MSM_HDMI_SAMPLE_RATE_96KHZ 4
37#define MSM_HDMI_SAMPLE_RATE_176_4KHZ 5
38#define MSM_HDMI_SAMPLE_RATE_192KHZ 6
39#define MSM_HDMI_SAMPLE_RATE_MAX 7
40
41
42struct hdmi_msm_audio_acr {
43 uint32_t n; /* N parameter for clock regeneration */
44 uint32_t cts; /* CTS parameter for clock regeneration */
45};
46
47struct hdmi_msm_audio_arcs {
48 unsigned long int pixclock;
49 struct hdmi_msm_audio_acr lut[MSM_HDMI_SAMPLE_RATE_MAX];
50};
51
52#define HDMI_MSM_AUDIO_ARCS(pclk, ...) { (1000 * (pclk)), __VA_ARGS__ }
53
54/* Audio constants lookup table for hdmi_msm_audio_acr_setup */
55/* Valid Pixel-Clock rates: 25.2MHz, 27MHz, 27.03MHz, 74.25MHz, 148.5MHz */
56static const struct hdmi_msm_audio_arcs acr_lut[] = {
57 /* 25.200MHz */
58 HDMI_MSM_AUDIO_ARCS(25200, {
59 {4096, 25200}, {6272, 28000}, {6144, 25200}, {12544, 28000},
60 {12288, 25200}, {25088, 28000}, {24576, 25200} }),
61 /* 27.000MHz */
62 HDMI_MSM_AUDIO_ARCS(27000, {
63 {4096, 27000}, {6272, 30000}, {6144, 27000}, {12544, 30000},
64 {12288, 27000}, {25088, 30000}, {24576, 27000} }),
65 /* 27.027MHz */
66 HDMI_MSM_AUDIO_ARCS(27030, {
67 {4096, 27027}, {6272, 30030}, {6144, 27027}, {12544, 30030},
68 {12288, 27027}, {25088, 30030}, {24576, 27027} }),
69 /* 74.250MHz */
70 HDMI_MSM_AUDIO_ARCS(74250, {
71 {4096, 74250}, {6272, 82500}, {6144, 74250}, {12544, 82500},
72 {12288, 74250}, {25088, 82500}, {24576, 74250} }),
73 /* 148.500MHz */
74 HDMI_MSM_AUDIO_ARCS(148500, {
75 {4096, 148500}, {6272, 165000}, {6144, 148500}, {12544, 165000},
76 {12288, 148500}, {25088, 165000}, {24576, 148500} }),
77};
78
79static const struct hdmi_msm_audio_arcs *get_arcs(unsigned long int pixclock)
80{
81 int i;
82
83 for (i = 0; i < ARRAY_SIZE(acr_lut); i++) {
84 const struct hdmi_msm_audio_arcs *arcs = &acr_lut[i];
85 if (arcs->pixclock == pixclock)
86 return arcs;
87 }
88
89 return NULL;
90}
91
92int hdmi_audio_update(struct hdmi *hdmi)
93{
94 struct hdmi_audio *audio = &hdmi->audio;
95 struct hdmi_audio_infoframe *info = &audio->infoframe;
96 const struct hdmi_msm_audio_arcs *arcs = NULL;
97 bool enabled = audio->enabled;
98 uint32_t acr_pkt_ctrl, vbi_pkt_ctrl, aud_pkt_ctrl;
99 uint32_t infofrm_ctrl, audio_config;
100
101 DBG("audio: enabled=%d, channels=%d, channel_allocation=0x%x, "
102 "level_shift_value=%d, downmix_inhibit=%d, rate=%d",
103 audio->enabled, info->channels, info->channel_allocation,
104 info->level_shift_value, info->downmix_inhibit, audio->rate);
105 DBG("video: power_on=%d, pixclock=%lu", hdmi->power_on, hdmi->pixclock);
106
107 if (enabled && !(hdmi->power_on && hdmi->pixclock)) {
108 DBG("disabling audio: no video");
109 enabled = false;
110 }
111
112 if (enabled) {
113 arcs = get_arcs(hdmi->pixclock);
114 if (!arcs) {
115 DBG("disabling audio: unsupported pixclock: %lu",
116 hdmi->pixclock);
117 enabled = false;
118 }
119 }
120
121 /* Read first before writing */
122 acr_pkt_ctrl = hdmi_read(hdmi, REG_HDMI_ACR_PKT_CTRL);
123 vbi_pkt_ctrl = hdmi_read(hdmi, REG_HDMI_VBI_PKT_CTRL);
124 aud_pkt_ctrl = hdmi_read(hdmi, REG_HDMI_AUDIO_PKT_CTRL1);
125 infofrm_ctrl = hdmi_read(hdmi, REG_HDMI_INFOFRAME_CTRL0);
126 audio_config = hdmi_read(hdmi, REG_HDMI_AUDIO_CFG);
127
128 /* Clear N/CTS selection bits */
129 acr_pkt_ctrl &= ~HDMI_ACR_PKT_CTRL_SELECT__MASK;
130
131 if (enabled) {
132 uint32_t n, cts, multiplier;
133 enum hdmi_acr_cts select;
134 uint8_t buf[14];
135
136 n = arcs->lut[audio->rate].n;
137 cts = arcs->lut[audio->rate].cts;
138
139 if ((MSM_HDMI_SAMPLE_RATE_192KHZ == audio->rate) ||
140 (MSM_HDMI_SAMPLE_RATE_176_4KHZ == audio->rate)) {
141 multiplier = 4;
142 n >>= 2; /* divide N by 4 and use multiplier */
143 } else if ((MSM_HDMI_SAMPLE_RATE_96KHZ == audio->rate) ||
144 (MSM_HDMI_SAMPLE_RATE_88_2KHZ == audio->rate)) {
145 multiplier = 2;
146 n >>= 1; /* divide N by 2 and use multiplier */
147 } else {
148 multiplier = 1;
149 }
150
151 DBG("n=%u, cts=%u, multiplier=%u", n, cts, multiplier);
152
153 acr_pkt_ctrl |= HDMI_ACR_PKT_CTRL_SOURCE;
154 acr_pkt_ctrl |= HDMI_ACR_PKT_CTRL_AUDIO_PRIORITY;
155 acr_pkt_ctrl |= HDMI_ACR_PKT_CTRL_N_MULTIPLIER(multiplier);
156
157 if ((MSM_HDMI_SAMPLE_RATE_48KHZ == audio->rate) ||
158 (MSM_HDMI_SAMPLE_RATE_96KHZ == audio->rate) ||
159 (MSM_HDMI_SAMPLE_RATE_192KHZ == audio->rate))
160 select = ACR_48;
161 else if ((MSM_HDMI_SAMPLE_RATE_44_1KHZ == audio->rate) ||
162 (MSM_HDMI_SAMPLE_RATE_88_2KHZ == audio->rate) ||
163 (MSM_HDMI_SAMPLE_RATE_176_4KHZ == audio->rate))
164 select = ACR_44;
165 else /* default to 32k */
166 select = ACR_32;
167
168 acr_pkt_ctrl |= HDMI_ACR_PKT_CTRL_SELECT(select);
169
170 hdmi_write(hdmi, REG_HDMI_ACR_0(select - 1),
171 HDMI_ACR_0_CTS(cts));
172 hdmi_write(hdmi, REG_HDMI_ACR_1(select - 1),
173 HDMI_ACR_1_N(n));
174
175 hdmi_write(hdmi, REG_HDMI_AUDIO_PKT_CTRL2,
176 COND(info->channels != 2, HDMI_AUDIO_PKT_CTRL2_LAYOUT) |
177 HDMI_AUDIO_PKT_CTRL2_OVERRIDE);
178
179 acr_pkt_ctrl |= HDMI_ACR_PKT_CTRL_CONT;
180 acr_pkt_ctrl |= HDMI_ACR_PKT_CTRL_SEND;
181
182 /* configure infoframe: */
183 hdmi_audio_infoframe_pack(info, buf, sizeof(buf));
184 hdmi_write(hdmi, REG_HDMI_AUDIO_INFO0,
185 (buf[3] << 0) || (buf[4] << 8) ||
186 (buf[5] << 16) || (buf[6] << 24));
187 hdmi_write(hdmi, REG_HDMI_AUDIO_INFO1,
188 (buf[7] << 0) || (buf[8] << 8));
189
190 hdmi_write(hdmi, REG_HDMI_GC, 0);
191
192 vbi_pkt_ctrl |= HDMI_VBI_PKT_CTRL_GC_ENABLE;
193 vbi_pkt_ctrl |= HDMI_VBI_PKT_CTRL_GC_EVERY_FRAME;
194
195 aud_pkt_ctrl |= HDMI_AUDIO_PKT_CTRL1_AUDIO_SAMPLE_SEND;
196
197 infofrm_ctrl |= HDMI_INFOFRAME_CTRL0_AUDIO_INFO_SEND;
198 infofrm_ctrl |= HDMI_INFOFRAME_CTRL0_AUDIO_INFO_CONT;
199 infofrm_ctrl |= HDMI_INFOFRAME_CTRL0_AUDIO_INFO_SOURCE;
200 infofrm_ctrl |= HDMI_INFOFRAME_CTRL0_AUDIO_INFO_UPDATE;
201
202 audio_config &= ~HDMI_AUDIO_CFG_FIFO_WATERMARK__MASK;
203 audio_config |= HDMI_AUDIO_CFG_FIFO_WATERMARK(4);
204 audio_config |= HDMI_AUDIO_CFG_ENGINE_ENABLE;
205 } else {
206 hdmi_write(hdmi, REG_HDMI_GC, HDMI_GC_MUTE);
207 acr_pkt_ctrl &= ~HDMI_ACR_PKT_CTRL_CONT;
208 acr_pkt_ctrl &= ~HDMI_ACR_PKT_CTRL_SEND;
209 vbi_pkt_ctrl &= ~HDMI_VBI_PKT_CTRL_GC_ENABLE;
210 vbi_pkt_ctrl &= ~HDMI_VBI_PKT_CTRL_GC_EVERY_FRAME;
211 aud_pkt_ctrl &= ~HDMI_AUDIO_PKT_CTRL1_AUDIO_SAMPLE_SEND;
212 infofrm_ctrl &= ~HDMI_INFOFRAME_CTRL0_AUDIO_INFO_SEND;
213 infofrm_ctrl &= ~HDMI_INFOFRAME_CTRL0_AUDIO_INFO_CONT;
214 infofrm_ctrl &= ~HDMI_INFOFRAME_CTRL0_AUDIO_INFO_SOURCE;
215 infofrm_ctrl &= ~HDMI_INFOFRAME_CTRL0_AUDIO_INFO_UPDATE;
216 audio_config &= ~HDMI_AUDIO_CFG_ENGINE_ENABLE;
217 }
218
219 hdmi_write(hdmi, REG_HDMI_ACR_PKT_CTRL, acr_pkt_ctrl);
220 hdmi_write(hdmi, REG_HDMI_VBI_PKT_CTRL, vbi_pkt_ctrl);
221 hdmi_write(hdmi, REG_HDMI_AUDIO_PKT_CTRL1, aud_pkt_ctrl);
222 hdmi_write(hdmi, REG_HDMI_INFOFRAME_CTRL0, infofrm_ctrl);
223
224 hdmi_write(hdmi, REG_HDMI_AUD_INT,
225 COND(enabled, HDMI_AUD_INT_AUD_FIFO_URUN_INT) |
226 COND(enabled, HDMI_AUD_INT_AUD_SAM_DROP_INT));
227
228 hdmi_write(hdmi, REG_HDMI_AUDIO_CFG, audio_config);
229
230
231 DBG("audio %sabled", enabled ? "en" : "dis");
232
233 return 0;
234}
235
236int hdmi_audio_info_setup(struct hdmi *hdmi, bool enabled,
237 uint32_t num_of_channels, uint32_t channel_allocation,
238 uint32_t level_shift, bool down_mix)
239{
240 struct hdmi_audio *audio;
241
242 if (!hdmi)
243 return -ENXIO;
244
245 audio = &hdmi->audio;
246
247 if (num_of_channels >= ARRAY_SIZE(nchannels))
248 return -EINVAL;
249
250 audio->enabled = enabled;
251 audio->infoframe.channels = nchannels[num_of_channels];
252 audio->infoframe.channel_allocation = channel_allocation;
253 audio->infoframe.level_shift_value = level_shift;
254 audio->infoframe.downmix_inhibit = down_mix;
255
256 return hdmi_audio_update(hdmi);
257}
258
259void hdmi_audio_set_sample_rate(struct hdmi *hdmi, int rate)
260{
261 struct hdmi_audio *audio;
262
263 if (!hdmi)
264 return;
265
266 audio = &hdmi->audio;
267
268 if ((rate < 0) || (rate >= MSM_HDMI_SAMPLE_RATE_MAX))
269 return;
270
271 audio->rate = rate;
272 hdmi_audio_update(hdmi);
273}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
index 7d10e55403c6..f6cf745c249e 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
@@ -19,11 +19,7 @@
19 19
20struct hdmi_bridge { 20struct hdmi_bridge {
21 struct drm_bridge base; 21 struct drm_bridge base;
22
23 struct hdmi *hdmi; 22 struct hdmi *hdmi;
24 bool power_on;
25
26 unsigned long int pixclock;
27}; 23};
28#define to_hdmi_bridge(x) container_of(x, struct hdmi_bridge, base) 24#define to_hdmi_bridge(x) container_of(x, struct hdmi_bridge, base)
29 25
@@ -52,8 +48,8 @@ static void power_on(struct drm_bridge *bridge)
52 } 48 }
53 49
54 if (config->pwr_clk_cnt > 0) { 50 if (config->pwr_clk_cnt > 0) {
55 DBG("pixclock: %lu", hdmi_bridge->pixclock); 51 DBG("pixclock: %lu", hdmi->pixclock);
56 ret = clk_set_rate(hdmi->pwr_clks[0], hdmi_bridge->pixclock); 52 ret = clk_set_rate(hdmi->pwr_clks[0], hdmi->pixclock);
57 if (ret) { 53 if (ret) {
58 dev_err(dev->dev, "failed to set pixel clk: %s (%d)\n", 54 dev_err(dev->dev, "failed to set pixel clk: %s (%d)\n",
59 config->pwr_clk_names[0], ret); 55 config->pwr_clk_names[0], ret);
@@ -102,12 +98,13 @@ static void hdmi_bridge_pre_enable(struct drm_bridge *bridge)
102 98
103 DBG("power up"); 99 DBG("power up");
104 100
105 if (!hdmi_bridge->power_on) { 101 if (!hdmi->power_on) {
106 power_on(bridge); 102 power_on(bridge);
107 hdmi_bridge->power_on = true; 103 hdmi->power_on = true;
104 hdmi_audio_update(hdmi);
108 } 105 }
109 106
110 phy->funcs->powerup(phy, hdmi_bridge->pixclock); 107 phy->funcs->powerup(phy, hdmi->pixclock);
111 hdmi_set_mode(hdmi, true); 108 hdmi_set_mode(hdmi, true);
112} 109}
113 110
@@ -129,9 +126,10 @@ static void hdmi_bridge_post_disable(struct drm_bridge *bridge)
129 hdmi_set_mode(hdmi, false); 126 hdmi_set_mode(hdmi, false);
130 phy->funcs->powerdown(phy); 127 phy->funcs->powerdown(phy);
131 128
132 if (hdmi_bridge->power_on) { 129 if (hdmi->power_on) {
133 power_off(bridge); 130 power_off(bridge);
134 hdmi_bridge->power_on = false; 131 hdmi->power_on = false;
132 hdmi_audio_update(hdmi);
135 } 133 }
136} 134}
137 135
@@ -146,7 +144,7 @@ static void hdmi_bridge_mode_set(struct drm_bridge *bridge,
146 144
147 mode = adjusted_mode; 145 mode = adjusted_mode;
148 146
149 hdmi_bridge->pixclock = mode->clock * 1000; 147 hdmi->pixclock = mode->clock * 1000;
150 148
151 hdmi->hdmi_mode = drm_match_cea_mode(mode) > 1; 149 hdmi->hdmi_mode = drm_match_cea_mode(mode) > 1;
152 150
@@ -194,9 +192,7 @@ static void hdmi_bridge_mode_set(struct drm_bridge *bridge,
194 DBG("frame_ctrl=%08x", frame_ctrl); 192 DBG("frame_ctrl=%08x", frame_ctrl);
195 hdmi_write(hdmi, REG_HDMI_FRAME_CTRL, frame_ctrl); 193 hdmi_write(hdmi, REG_HDMI_FRAME_CTRL, frame_ctrl);
196 194
197 // TODO until we have audio, this might be safest: 195 hdmi_audio_update(hdmi);
198 if (hdmi->hdmi_mode)
199 hdmi_write(hdmi, REG_HDMI_GC, HDMI_GC_MUTE);
200} 196}
201 197
202static const struct drm_bridge_funcs hdmi_bridge_funcs = { 198static const struct drm_bridge_funcs hdmi_bridge_funcs = {
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
index 84c5b13b33c9..3e6c0f3ed592 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
@@ -120,7 +120,7 @@ static void update_fb(struct drm_crtc *crtc, struct drm_framebuffer *new_fb)
120 120
121 /* grab reference to incoming scanout fb: */ 121 /* grab reference to incoming scanout fb: */
122 drm_framebuffer_reference(new_fb); 122 drm_framebuffer_reference(new_fb);
123 mdp4_crtc->base.fb = new_fb; 123 mdp4_crtc->base.primary->fb = new_fb;
124 mdp4_crtc->fb = new_fb; 124 mdp4_crtc->fb = new_fb;
125 125
126 if (old_fb) 126 if (old_fb)
@@ -182,7 +182,7 @@ static void pageflip_cb(struct msm_fence_cb *cb)
182 struct mdp4_crtc *mdp4_crtc = 182 struct mdp4_crtc *mdp4_crtc =
183 container_of(cb, struct mdp4_crtc, pageflip_cb); 183 container_of(cb, struct mdp4_crtc, pageflip_cb);
184 struct drm_crtc *crtc = &mdp4_crtc->base; 184 struct drm_crtc *crtc = &mdp4_crtc->base;
185 struct drm_framebuffer *fb = crtc->fb; 185 struct drm_framebuffer *fb = crtc->primary->fb;
186 186
187 if (!fb) 187 if (!fb)
188 return; 188 return;
@@ -348,14 +348,14 @@ static int mdp4_crtc_mode_set(struct drm_crtc *crtc,
348 mode->type, mode->flags); 348 mode->type, mode->flags);
349 349
350 /* grab extra ref for update_scanout() */ 350 /* grab extra ref for update_scanout() */
351 drm_framebuffer_reference(crtc->fb); 351 drm_framebuffer_reference(crtc->primary->fb);
352 352
353 ret = mdp4_plane_mode_set(mdp4_crtc->plane, crtc, crtc->fb, 353 ret = mdp4_plane_mode_set(mdp4_crtc->plane, crtc, crtc->primary->fb,
354 0, 0, mode->hdisplay, mode->vdisplay, 354 0, 0, mode->hdisplay, mode->vdisplay,
355 x << 16, y << 16, 355 x << 16, y << 16,
356 mode->hdisplay << 16, mode->vdisplay << 16); 356 mode->hdisplay << 16, mode->vdisplay << 16);
357 if (ret) { 357 if (ret) {
358 drm_framebuffer_unreference(crtc->fb); 358 drm_framebuffer_unreference(crtc->primary->fb);
359 dev_err(crtc->dev->dev, "%s: failed to set mode on plane: %d\n", 359 dev_err(crtc->dev->dev, "%s: failed to set mode on plane: %d\n",
360 mdp4_crtc->name, ret); 360 mdp4_crtc->name, ret);
361 return ret; 361 return ret;
@@ -368,7 +368,7 @@ static int mdp4_crtc_mode_set(struct drm_crtc *crtc,
368 /* take data from pipe: */ 368 /* take data from pipe: */
369 mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_BASE(dma), 0); 369 mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_BASE(dma), 0);
370 mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_STRIDE(dma), 370 mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_STRIDE(dma),
371 crtc->fb->pitches[0]); 371 crtc->primary->fb->pitches[0]);
372 mdp4_write(mdp4_kms, REG_MDP4_DMA_DST_SIZE(dma), 372 mdp4_write(mdp4_kms, REG_MDP4_DMA_DST_SIZE(dma),
373 MDP4_DMA_DST_SIZE_WIDTH(0) | 373 MDP4_DMA_DST_SIZE_WIDTH(0) |
374 MDP4_DMA_DST_SIZE_HEIGHT(0)); 374 MDP4_DMA_DST_SIZE_HEIGHT(0));
@@ -378,7 +378,7 @@ static int mdp4_crtc_mode_set(struct drm_crtc *crtc,
378 MDP4_OVLP_SIZE_WIDTH(mode->hdisplay) | 378 MDP4_OVLP_SIZE_WIDTH(mode->hdisplay) |
379 MDP4_OVLP_SIZE_HEIGHT(mode->vdisplay)); 379 MDP4_OVLP_SIZE_HEIGHT(mode->vdisplay));
380 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STRIDE(ovlp), 380 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STRIDE(ovlp),
381 crtc->fb->pitches[0]); 381 crtc->primary->fb->pitches[0]);
382 382
383 mdp4_write(mdp4_kms, REG_MDP4_OVLP_CFG(ovlp), 1); 383 mdp4_write(mdp4_kms, REG_MDP4_OVLP_CFG(ovlp), 1);
384 384
@@ -388,8 +388,8 @@ static int mdp4_crtc_mode_set(struct drm_crtc *crtc,
388 mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(2), 0x00ff0000); 388 mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(2), 0x00ff0000);
389 } 389 }
390 390
391 update_fb(crtc, crtc->fb); 391 update_fb(crtc, crtc->primary->fb);
392 update_scanout(crtc, crtc->fb); 392 update_scanout(crtc, crtc->primary->fb);
393 393
394 return 0; 394 return 0;
395} 395}
@@ -420,19 +420,19 @@ static int mdp4_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
420 int ret; 420 int ret;
421 421
422 /* grab extra ref for update_scanout() */ 422 /* grab extra ref for update_scanout() */
423 drm_framebuffer_reference(crtc->fb); 423 drm_framebuffer_reference(crtc->primary->fb);
424 424
425 ret = mdp4_plane_mode_set(plane, crtc, crtc->fb, 425 ret = mdp4_plane_mode_set(plane, crtc, crtc->primary->fb,
426 0, 0, mode->hdisplay, mode->vdisplay, 426 0, 0, mode->hdisplay, mode->vdisplay,
427 x << 16, y << 16, 427 x << 16, y << 16,
428 mode->hdisplay << 16, mode->vdisplay << 16); 428 mode->hdisplay << 16, mode->vdisplay << 16);
429 if (ret) { 429 if (ret) {
430 drm_framebuffer_unreference(crtc->fb); 430 drm_framebuffer_unreference(crtc->primary->fb);
431 return ret; 431 return ret;
432 } 432 }
433 433
434 update_fb(crtc, crtc->fb); 434 update_fb(crtc, crtc->primary->fb);
435 update_scanout(crtc, crtc->fb); 435 update_scanout(crtc, crtc->primary->fb);
436 436
437 return 0; 437 return 0;
438} 438}
@@ -740,6 +740,9 @@ void mdp4_crtc_attach(struct drm_crtc *crtc, struct drm_plane *plane)
740 740
741void mdp4_crtc_detach(struct drm_crtc *crtc, struct drm_plane *plane) 741void mdp4_crtc_detach(struct drm_crtc *crtc, struct drm_plane *plane)
742{ 742{
743 /* don't actually detatch our primary plane: */
744 if (to_mdp4_crtc(crtc)->plane == plane)
745 return;
743 set_attach(crtc, mdp4_plane_pipe(plane), NULL); 746 set_attach(crtc, mdp4_plane_pipe(plane), NULL);
744} 747}
745 748
@@ -791,7 +794,7 @@ struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
791 794
792 INIT_FENCE_CB(&mdp4_crtc->pageflip_cb, pageflip_cb); 795 INIT_FENCE_CB(&mdp4_crtc->pageflip_cb, pageflip_cb);
793 796
794 drm_crtc_init(dev, crtc, &mdp4_crtc_funcs); 797 drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp4_crtc_funcs);
795 drm_crtc_helper_add(crtc, &mdp4_crtc_helper_funcs); 798 drm_crtc_helper_add(crtc, &mdp4_crtc_helper_funcs);
796 799
797 mdp4_plane_install_properties(mdp4_crtc->plane, &crtc->base); 800 mdp4_plane_install_properties(mdp4_crtc->plane, &crtc->base);
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
index 1e893dd13859..66f33dba1ebb 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
@@ -222,6 +222,7 @@ struct drm_plane *mdp4_plane_init(struct drm_device *dev,
222 struct drm_plane *plane = NULL; 222 struct drm_plane *plane = NULL;
223 struct mdp4_plane *mdp4_plane; 223 struct mdp4_plane *mdp4_plane;
224 int ret; 224 int ret;
225 enum drm_plane_type type;
225 226
226 mdp4_plane = kzalloc(sizeof(*mdp4_plane), GFP_KERNEL); 227 mdp4_plane = kzalloc(sizeof(*mdp4_plane), GFP_KERNEL);
227 if (!mdp4_plane) { 228 if (!mdp4_plane) {
@@ -237,9 +238,10 @@ struct drm_plane *mdp4_plane_init(struct drm_device *dev,
237 mdp4_plane->nformats = mdp4_get_formats(pipe_id, mdp4_plane->formats, 238 mdp4_plane->nformats = mdp4_get_formats(pipe_id, mdp4_plane->formats,
238 ARRAY_SIZE(mdp4_plane->formats)); 239 ARRAY_SIZE(mdp4_plane->formats));
239 240
240 drm_plane_init(dev, plane, 0xff, &mdp4_plane_funcs, 241 type = private_plane ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY;
241 mdp4_plane->formats, mdp4_plane->nformats, 242 drm_universal_plane_init(dev, plane, 0xff, &mdp4_plane_funcs,
242 private_plane); 243 mdp4_plane->formats, mdp4_plane->nformats,
244 type);
243 245
244 mdp4_plane_install_properties(plane, &plane->base); 246 mdp4_plane_install_properties(plane, &plane->base);
245 247
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
index f2794021f086..6ea10bdb6e8f 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -102,7 +102,7 @@ static void update_fb(struct drm_crtc *crtc, struct drm_framebuffer *new_fb)
102 102
103 /* grab reference to incoming scanout fb: */ 103 /* grab reference to incoming scanout fb: */
104 drm_framebuffer_reference(new_fb); 104 drm_framebuffer_reference(new_fb);
105 mdp5_crtc->base.fb = new_fb; 105 mdp5_crtc->base.primary->fb = new_fb;
106 mdp5_crtc->fb = new_fb; 106 mdp5_crtc->fb = new_fb;
107 107
108 if (old_fb) 108 if (old_fb)
@@ -289,14 +289,14 @@ static int mdp5_crtc_mode_set(struct drm_crtc *crtc,
289 mode->type, mode->flags); 289 mode->type, mode->flags);
290 290
291 /* grab extra ref for update_scanout() */ 291 /* grab extra ref for update_scanout() */
292 drm_framebuffer_reference(crtc->fb); 292 drm_framebuffer_reference(crtc->primary->fb);
293 293
294 ret = mdp5_plane_mode_set(mdp5_crtc->plane, crtc, crtc->fb, 294 ret = mdp5_plane_mode_set(mdp5_crtc->plane, crtc, crtc->primary->fb,
295 0, 0, mode->hdisplay, mode->vdisplay, 295 0, 0, mode->hdisplay, mode->vdisplay,
296 x << 16, y << 16, 296 x << 16, y << 16,
297 mode->hdisplay << 16, mode->vdisplay << 16); 297 mode->hdisplay << 16, mode->vdisplay << 16);
298 if (ret) { 298 if (ret) {
299 drm_framebuffer_unreference(crtc->fb); 299 drm_framebuffer_unreference(crtc->primary->fb);
300 dev_err(crtc->dev->dev, "%s: failed to set mode on plane: %d\n", 300 dev_err(crtc->dev->dev, "%s: failed to set mode on plane: %d\n",
301 mdp5_crtc->name, ret); 301 mdp5_crtc->name, ret);
302 return ret; 302 return ret;
@@ -306,8 +306,8 @@ static int mdp5_crtc_mode_set(struct drm_crtc *crtc,
306 MDP5_LM_OUT_SIZE_WIDTH(mode->hdisplay) | 306 MDP5_LM_OUT_SIZE_WIDTH(mode->hdisplay) |
307 MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay)); 307 MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay));
308 308
309 update_fb(crtc, crtc->fb); 309 update_fb(crtc, crtc->primary->fb);
310 update_scanout(crtc, crtc->fb); 310 update_scanout(crtc, crtc->primary->fb);
311 311
312 return 0; 312 return 0;
313} 313}
@@ -338,19 +338,19 @@ static int mdp5_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
338 int ret; 338 int ret;
339 339
340 /* grab extra ref for update_scanout() */ 340 /* grab extra ref for update_scanout() */
341 drm_framebuffer_reference(crtc->fb); 341 drm_framebuffer_reference(crtc->primary->fb);
342 342
343 ret = mdp5_plane_mode_set(plane, crtc, crtc->fb, 343 ret = mdp5_plane_mode_set(plane, crtc, crtc->primary->fb,
344 0, 0, mode->hdisplay, mode->vdisplay, 344 0, 0, mode->hdisplay, mode->vdisplay,
345 x << 16, y << 16, 345 x << 16, y << 16,
346 mode->hdisplay << 16, mode->vdisplay << 16); 346 mode->hdisplay << 16, mode->vdisplay << 16);
347 if (ret) { 347 if (ret) {
348 drm_framebuffer_unreference(crtc->fb); 348 drm_framebuffer_unreference(crtc->primary->fb);
349 return ret; 349 return ret;
350 } 350 }
351 351
352 update_fb(crtc, crtc->fb); 352 update_fb(crtc, crtc->primary->fb);
353 update_scanout(crtc, crtc->fb); 353 update_scanout(crtc, crtc->primary->fb);
354 354
355 return 0; 355 return 0;
356} 356}
@@ -524,6 +524,9 @@ void mdp5_crtc_attach(struct drm_crtc *crtc, struct drm_plane *plane)
524 524
525void mdp5_crtc_detach(struct drm_crtc *crtc, struct drm_plane *plane) 525void mdp5_crtc_detach(struct drm_crtc *crtc, struct drm_plane *plane)
526{ 526{
527 /* don't actually detatch our primary plane: */
528 if (to_mdp5_crtc(crtc)->plane == plane)
529 return;
527 set_attach(crtc, mdp5_plane_pipe(plane), NULL); 530 set_attach(crtc, mdp5_plane_pipe(plane), NULL);
528} 531}
529 532
@@ -559,7 +562,7 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
559 562
560 INIT_FENCE_CB(&mdp5_crtc->pageflip_cb, pageflip_cb); 563 INIT_FENCE_CB(&mdp5_crtc->pageflip_cb, pageflip_cb);
561 564
562 drm_crtc_init(dev, crtc, &mdp5_crtc_funcs); 565 drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp5_crtc_funcs);
563 drm_crtc_helper_add(crtc, &mdp5_crtc_helper_funcs); 566 drm_crtc_helper_add(crtc, &mdp5_crtc_helper_funcs);
564 567
565 mdp5_plane_install_properties(mdp5_crtc->plane, &crtc->base); 568 mdp5_plane_install_properties(mdp5_crtc->plane, &crtc->base);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
index 0ac8bb5e7e85..47f7bbb9c15a 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
@@ -358,6 +358,7 @@ struct drm_plane *mdp5_plane_init(struct drm_device *dev,
358 struct drm_plane *plane = NULL; 358 struct drm_plane *plane = NULL;
359 struct mdp5_plane *mdp5_plane; 359 struct mdp5_plane *mdp5_plane;
360 int ret; 360 int ret;
361 enum drm_plane_type type;
361 362
362 mdp5_plane = kzalloc(sizeof(*mdp5_plane), GFP_KERNEL); 363 mdp5_plane = kzalloc(sizeof(*mdp5_plane), GFP_KERNEL);
363 if (!mdp5_plane) { 364 if (!mdp5_plane) {
@@ -373,9 +374,10 @@ struct drm_plane *mdp5_plane_init(struct drm_device *dev,
373 mdp5_plane->nformats = mdp5_get_formats(pipe, mdp5_plane->formats, 374 mdp5_plane->nformats = mdp5_get_formats(pipe, mdp5_plane->formats,
374 ARRAY_SIZE(mdp5_plane->formats)); 375 ARRAY_SIZE(mdp5_plane->formats));
375 376
376 drm_plane_init(dev, plane, 0xff, &mdp5_plane_funcs, 377 type = private_plane ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY;
377 mdp5_plane->formats, mdp5_plane->nformats, 378 drm_universal_plane_init(dev, plane, 0xff, &mdp5_plane_funcs,
378 private_plane); 379 mdp5_plane->formats, mdp5_plane->nformats,
380 type);
379 381
380 mdp5_plane_install_properties(plane, &plane->base); 382 mdp5_plane_install_properties(plane, &plane->base);
381 383
diff --git a/drivers/gpu/drm/msm/mdp/mdp_kms.c b/drivers/gpu/drm/msm/mdp/mdp_kms.c
index 3be48f7c36be..03455b64a245 100644
--- a/drivers/gpu/drm/msm/mdp/mdp_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp_kms.c
@@ -101,7 +101,8 @@ void mdp_irq_wait(struct mdp_kms *mdp_kms, uint32_t irqmask)
101 .count = 1, 101 .count = 1,
102 }; 102 };
103 mdp_irq_register(mdp_kms, &wait.irq); 103 mdp_irq_register(mdp_kms, &wait.irq);
104 wait_event(wait_event, (wait.count <= 0)); 104 wait_event_timeout(wait_event, (wait.count <= 0),
105 msecs_to_jiffies(100));
105 mdp_irq_unregister(mdp_kms, &wait.irq); 106 mdp_irq_unregister(mdp_kms, &wait.irq);
106} 107}
107 108
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index e6adafc7eff3..f9de156b9e65 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -56,6 +56,10 @@ static char *vram;
56MODULE_PARM_DESC(vram, "Configure VRAM size (for devices without IOMMU/GPUMMU"); 56MODULE_PARM_DESC(vram, "Configure VRAM size (for devices without IOMMU/GPUMMU");
57module_param(vram, charp, 0); 57module_param(vram, charp, 0);
58 58
59/*
60 * Util/helpers:
61 */
62
59void __iomem *msm_ioremap(struct platform_device *pdev, const char *name, 63void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
60 const char *dbgname) 64 const char *dbgname)
61{ 65{
@@ -143,6 +147,8 @@ static int msm_unload(struct drm_device *dev)
143 priv->vram.paddr, &attrs); 147 priv->vram.paddr, &attrs);
144 } 148 }
145 149
150 component_unbind_all(dev->dev, dev);
151
146 dev->dev_private = NULL; 152 dev->dev_private = NULL;
147 153
148 kfree(priv); 154 kfree(priv);
@@ -175,6 +181,7 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
175 struct msm_kms *kms; 181 struct msm_kms *kms;
176 int ret; 182 int ret;
177 183
184
178 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 185 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
179 if (!priv) { 186 if (!priv) {
180 dev_err(dev->dev, "failed to allocate private data\n"); 187 dev_err(dev->dev, "failed to allocate private data\n");
@@ -226,6 +233,13 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
226 (uint32_t)(priv->vram.paddr + size)); 233 (uint32_t)(priv->vram.paddr + size));
227 } 234 }
228 235
236 platform_set_drvdata(pdev, dev);
237
238 /* Bind all our sub-components: */
239 ret = component_bind_all(dev->dev, dev);
240 if (ret)
241 return ret;
242
229 switch (get_mdp_ver(pdev)) { 243 switch (get_mdp_ver(pdev)) {
230 case 4: 244 case 4:
231 kms = mdp4_kms_init(dev); 245 kms = mdp4_kms_init(dev);
@@ -281,8 +295,6 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
281 goto fail; 295 goto fail;
282 } 296 }
283 297
284 platform_set_drvdata(pdev, dev);
285
286#ifdef CONFIG_DRM_MSM_FBDEV 298#ifdef CONFIG_DRM_MSM_FBDEV
287 priv->fbdev = msm_fbdev_init(dev); 299 priv->fbdev = msm_fbdev_init(dev);
288#endif 300#endif
@@ -311,7 +323,6 @@ static void load_gpu(struct drm_device *dev)
311 gpu = NULL; 323 gpu = NULL;
312 /* not fatal */ 324 /* not fatal */
313 } 325 }
314 mutex_unlock(&dev->struct_mutex);
315 326
316 if (gpu) { 327 if (gpu) {
317 int ret; 328 int ret;
@@ -321,10 +332,16 @@ static void load_gpu(struct drm_device *dev)
321 dev_err(dev->dev, "gpu hw init failed: %d\n", ret); 332 dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
322 gpu->funcs->destroy(gpu); 333 gpu->funcs->destroy(gpu);
323 gpu = NULL; 334 gpu = NULL;
335 } else {
336 /* give inactive pm a chance to kick in: */
337 msm_gpu_retire(gpu);
324 } 338 }
339
325 } 340 }
326 341
327 priv->gpu = gpu; 342 priv->gpu = gpu;
343
344 mutex_unlock(&dev->struct_mutex);
328} 345}
329 346
330static int msm_open(struct drm_device *dev, struct drm_file *file) 347static int msm_open(struct drm_device *dev, struct drm_file *file)
@@ -647,6 +664,12 @@ static int msm_ioctl_gem_new(struct drm_device *dev, void *data,
647 struct drm_file *file) 664 struct drm_file *file)
648{ 665{
649 struct drm_msm_gem_new *args = data; 666 struct drm_msm_gem_new *args = data;
667
668 if (args->flags & ~MSM_BO_FLAGS) {
669 DRM_ERROR("invalid flags: %08x\n", args->flags);
670 return -EINVAL;
671 }
672
650 return msm_gem_new_handle(dev, file, args->size, 673 return msm_gem_new_handle(dev, file, args->size,
651 args->flags, &args->handle); 674 args->flags, &args->handle);
652} 675}
@@ -660,6 +683,11 @@ static int msm_ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
660 struct drm_gem_object *obj; 683 struct drm_gem_object *obj;
661 int ret; 684 int ret;
662 685
686 if (args->op & ~MSM_PREP_FLAGS) {
687 DRM_ERROR("invalid op: %08x\n", args->op);
688 return -EINVAL;
689 }
690
663 obj = drm_gem_object_lookup(dev, file, args->handle); 691 obj = drm_gem_object_lookup(dev, file, args->handle);
664 if (!obj) 692 if (!obj)
665 return -ENOENT; 693 return -ENOENT;
@@ -714,7 +742,14 @@ static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
714 struct drm_file *file) 742 struct drm_file *file)
715{ 743{
716 struct drm_msm_wait_fence *args = data; 744 struct drm_msm_wait_fence *args = data;
717 return msm_wait_fence_interruptable(dev, args->fence, &TS(args->timeout)); 745
746 if (args->pad) {
747 DRM_ERROR("invalid pad: %08x\n", args->pad);
748 return -EINVAL;
749 }
750
751 return msm_wait_fence_interruptable(dev, args->fence,
752 &TS(args->timeout));
718} 753}
719 754
720static const struct drm_ioctl_desc msm_ioctls[] = { 755static const struct drm_ioctl_desc msm_ioctls[] = {
@@ -819,18 +854,110 @@ static const struct dev_pm_ops msm_pm_ops = {
819}; 854};
820 855
821/* 856/*
857 * Componentized driver support:
858 */
859
860#ifdef CONFIG_OF
861/* NOTE: the CONFIG_OF case duplicates the same code as exynos or imx
862 * (or probably any other).. so probably some room for some helpers
863 */
864static int compare_of(struct device *dev, void *data)
865{
866 return dev->of_node == data;
867}
868
869static int msm_drm_add_components(struct device *master, struct master *m)
870{
871 struct device_node *np = master->of_node;
872 unsigned i;
873 int ret;
874
875 for (i = 0; ; i++) {
876 struct device_node *node;
877
878 node = of_parse_phandle(np, "connectors", i);
879 if (!node)
880 break;
881
882 ret = component_master_add_child(m, compare_of, node);
883 of_node_put(node);
884
885 if (ret)
886 return ret;
887 }
888 return 0;
889}
890#else
891static int compare_dev(struct device *dev, void *data)
892{
893 return dev == data;
894}
895
896static int msm_drm_add_components(struct device *master, struct master *m)
897{
898 /* For non-DT case, it kinda sucks. We don't actually have a way
899 * to know whether or not we are waiting for certain devices (or if
900 * they are simply not present). But for non-DT we only need to
901 * care about apq8064/apq8060/etc (all mdp4/a3xx):
902 */
903 static const char *devnames[] = {
904 "hdmi_msm.0", "kgsl-3d0.0",
905 };
906 int i;
907
908 DBG("Adding components..");
909
910 for (i = 0; i < ARRAY_SIZE(devnames); i++) {
911 struct device *dev;
912 int ret;
913
914 dev = bus_find_device_by_name(&platform_bus_type,
915 NULL, devnames[i]);
916 if (!dev) {
917 dev_info(master, "still waiting for %s\n", devnames[i]);
918 return -EPROBE_DEFER;
919 }
920
921 ret = component_master_add_child(m, compare_dev, dev);
922 if (ret) {
923 DBG("could not add child: %d", ret);
924 return ret;
925 }
926 }
927
928 return 0;
929}
930#endif
931
932static int msm_drm_bind(struct device *dev)
933{
934 return drm_platform_init(&msm_driver, to_platform_device(dev));
935}
936
937static void msm_drm_unbind(struct device *dev)
938{
939 drm_put_dev(platform_get_drvdata(to_platform_device(dev)));
940}
941
942static const struct component_master_ops msm_drm_ops = {
943 .add_components = msm_drm_add_components,
944 .bind = msm_drm_bind,
945 .unbind = msm_drm_unbind,
946};
947
948/*
822 * Platform driver: 949 * Platform driver:
823 */ 950 */
824 951
825static int msm_pdev_probe(struct platform_device *pdev) 952static int msm_pdev_probe(struct platform_device *pdev)
826{ 953{
827 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); 954 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
828 return drm_platform_init(&msm_driver, pdev); 955 return component_master_add(&pdev->dev, &msm_drm_ops);
829} 956}
830 957
831static int msm_pdev_remove(struct platform_device *pdev) 958static int msm_pdev_remove(struct platform_device *pdev)
832{ 959{
833 drm_put_dev(platform_get_drvdata(pdev)); 960 component_master_del(&pdev->dev, &msm_drm_ops);
834 961
835 return 0; 962 return 0;
836} 963}
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 3d63269c5b29..9d10ee0b5aac 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -22,6 +22,7 @@
22#include <linux/clk.h> 22#include <linux/clk.h>
23#include <linux/cpufreq.h> 23#include <linux/cpufreq.h>
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/component.h>
25#include <linux/platform_device.h> 26#include <linux/platform_device.h>
26#include <linux/pm.h> 27#include <linux/pm.h>
27#include <linux/pm_runtime.h> 28#include <linux/pm_runtime.h>
@@ -69,6 +70,9 @@ struct msm_drm_private {
69 70
70 struct msm_kms *kms; 71 struct msm_kms *kms;
71 72
73 /* subordinate devices, if present: */
74 struct platform_device *hdmi_pdev, *gpu_pdev;
75
72 /* when we have more than one 'msm_gpu' these need to be an array: */ 76 /* when we have more than one 'msm_gpu' these need to be an array: */
73 struct msm_gpu *gpu; 77 struct msm_gpu *gpu;
74 struct msm_file_private *lastctx; 78 struct msm_file_private *lastctx;
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 5423e914e491..1f1f4cffdaed 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -23,7 +23,6 @@
23 * Cmdstream submission: 23 * Cmdstream submission:
24 */ 24 */
25 25
26#define BO_INVALID_FLAGS ~(MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE)
27/* make sure these don't conflict w/ MSM_SUBMIT_BO_x */ 26/* make sure these don't conflict w/ MSM_SUBMIT_BO_x */
28#define BO_VALID 0x8000 27#define BO_VALID 0x8000
29#define BO_LOCKED 0x4000 28#define BO_LOCKED 0x4000
@@ -77,7 +76,7 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
77 goto out_unlock; 76 goto out_unlock;
78 } 77 }
79 78
80 if (submit_bo.flags & BO_INVALID_FLAGS) { 79 if (submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) {
81 DRM_ERROR("invalid flags: %x\n", submit_bo.flags); 80 DRM_ERROR("invalid flags: %x\n", submit_bo.flags);
82 ret = -EINVAL; 81 ret = -EINVAL;
83 goto out_unlock; 82 goto out_unlock;
@@ -369,6 +368,18 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
369 goto out; 368 goto out;
370 } 369 }
371 370
371 /* validate input from userspace: */
372 switch (submit_cmd.type) {
373 case MSM_SUBMIT_CMD_BUF:
374 case MSM_SUBMIT_CMD_IB_TARGET_BUF:
375 case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
376 break;
377 default:
378 DRM_ERROR("invalid type: %08x\n", submit_cmd.type);
379 ret = -EINVAL;
380 goto out;
381 }
382
372 ret = submit_bo(submit, submit_cmd.submit_idx, 383 ret = submit_bo(submit, submit_cmd.submit_idx,
373 &msm_obj, &iova, NULL); 384 &msm_obj, &iova, NULL);
374 if (ret) 385 if (ret)
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 0cfe3f426ee4..3e667ca1f2b9 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -154,9 +154,18 @@ static int disable_axi(struct msm_gpu *gpu)
154 154
155int msm_gpu_pm_resume(struct msm_gpu *gpu) 155int msm_gpu_pm_resume(struct msm_gpu *gpu)
156{ 156{
157 struct drm_device *dev = gpu->dev;
157 int ret; 158 int ret;
158 159
159 DBG("%s", gpu->name); 160 DBG("%s: active_cnt=%d", gpu->name, gpu->active_cnt);
161
162 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
163
164 if (gpu->active_cnt++ > 0)
165 return 0;
166
167 if (WARN_ON(gpu->active_cnt <= 0))
168 return -EINVAL;
160 169
161 ret = enable_pwrrail(gpu); 170 ret = enable_pwrrail(gpu);
162 if (ret) 171 if (ret)
@@ -175,9 +184,18 @@ int msm_gpu_pm_resume(struct msm_gpu *gpu)
175 184
176int msm_gpu_pm_suspend(struct msm_gpu *gpu) 185int msm_gpu_pm_suspend(struct msm_gpu *gpu)
177{ 186{
187 struct drm_device *dev = gpu->dev;
178 int ret; 188 int ret;
179 189
180 DBG("%s", gpu->name); 190 DBG("%s: active_cnt=%d", gpu->name, gpu->active_cnt);
191
192 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
193
194 if (--gpu->active_cnt > 0)
195 return 0;
196
197 if (WARN_ON(gpu->active_cnt < 0))
198 return -EINVAL;
181 199
182 ret = disable_axi(gpu); 200 ret = disable_axi(gpu);
183 if (ret) 201 if (ret)
@@ -195,6 +213,55 @@ int msm_gpu_pm_suspend(struct msm_gpu *gpu)
195} 213}
196 214
197/* 215/*
216 * Inactivity detection (for suspend):
217 */
218
219static void inactive_worker(struct work_struct *work)
220{
221 struct msm_gpu *gpu = container_of(work, struct msm_gpu, inactive_work);
222 struct drm_device *dev = gpu->dev;
223
224 if (gpu->inactive)
225 return;
226
227 DBG("%s: inactive!\n", gpu->name);
228 mutex_lock(&dev->struct_mutex);
229 if (!(msm_gpu_active(gpu) || gpu->inactive)) {
230 disable_axi(gpu);
231 disable_clk(gpu);
232 gpu->inactive = true;
233 }
234 mutex_unlock(&dev->struct_mutex);
235}
236
237static void inactive_handler(unsigned long data)
238{
239 struct msm_gpu *gpu = (struct msm_gpu *)data;
240 struct msm_drm_private *priv = gpu->dev->dev_private;
241
242 queue_work(priv->wq, &gpu->inactive_work);
243}
244
245/* cancel inactive timer and make sure we are awake: */
246static void inactive_cancel(struct msm_gpu *gpu)
247{
248 DBG("%s", gpu->name);
249 del_timer(&gpu->inactive_timer);
250 if (gpu->inactive) {
251 enable_clk(gpu);
252 enable_axi(gpu);
253 gpu->inactive = false;
254 }
255}
256
257static void inactive_start(struct msm_gpu *gpu)
258{
259 DBG("%s", gpu->name);
260 mod_timer(&gpu->inactive_timer,
261 round_jiffies_up(jiffies + DRM_MSM_INACTIVE_JIFFIES));
262}
263
264/*
198 * Hangcheck detection for locked gpu: 265 * Hangcheck detection for locked gpu:
199 */ 266 */
200 267
@@ -206,7 +273,10 @@ static void recover_worker(struct work_struct *work)
206 dev_err(dev->dev, "%s: hangcheck recover!\n", gpu->name); 273 dev_err(dev->dev, "%s: hangcheck recover!\n", gpu->name);
207 274
208 mutex_lock(&dev->struct_mutex); 275 mutex_lock(&dev->struct_mutex);
209 gpu->funcs->recover(gpu); 276 if (msm_gpu_active(gpu)) {
277 inactive_cancel(gpu);
278 gpu->funcs->recover(gpu);
279 }
210 mutex_unlock(&dev->struct_mutex); 280 mutex_unlock(&dev->struct_mutex);
211 281
212 msm_gpu_retire(gpu); 282 msm_gpu_retire(gpu);
@@ -281,6 +351,9 @@ static void retire_worker(struct work_struct *work)
281 } 351 }
282 352
283 mutex_unlock(&dev->struct_mutex); 353 mutex_unlock(&dev->struct_mutex);
354
355 if (!msm_gpu_active(gpu))
356 inactive_start(gpu);
284} 357}
285 358
286/* call from irq handler to schedule work to retire bo's */ 359/* call from irq handler to schedule work to retire bo's */
@@ -302,6 +375,8 @@ int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
302 375
303 gpu->submitted_fence = submit->fence; 376 gpu->submitted_fence = submit->fence;
304 377
378 inactive_cancel(gpu);
379
305 ret = gpu->funcs->submit(gpu, submit, ctx); 380 ret = gpu->funcs->submit(gpu, submit, ctx);
306 priv->lastctx = ctx; 381 priv->lastctx = ctx;
307 382
@@ -357,11 +432,15 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
357 gpu->dev = drm; 432 gpu->dev = drm;
358 gpu->funcs = funcs; 433 gpu->funcs = funcs;
359 gpu->name = name; 434 gpu->name = name;
435 gpu->inactive = true;
360 436
361 INIT_LIST_HEAD(&gpu->active_list); 437 INIT_LIST_HEAD(&gpu->active_list);
362 INIT_WORK(&gpu->retire_work, retire_worker); 438 INIT_WORK(&gpu->retire_work, retire_worker);
439 INIT_WORK(&gpu->inactive_work, inactive_worker);
363 INIT_WORK(&gpu->recover_work, recover_worker); 440 INIT_WORK(&gpu->recover_work, recover_worker);
364 441
442 setup_timer(&gpu->inactive_timer, inactive_handler,
443 (unsigned long)gpu);
365 setup_timer(&gpu->hangcheck_timer, hangcheck_handler, 444 setup_timer(&gpu->hangcheck_timer, hangcheck_handler,
366 (unsigned long)gpu); 445 (unsigned long)gpu);
367 446
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index 458db8c64c28..fad27008922f 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -72,6 +72,10 @@ struct msm_gpu {
72 72
73 uint32_t submitted_fence; 73 uint32_t submitted_fence;
74 74
75 /* is gpu powered/active? */
76 int active_cnt;
77 bool inactive;
78
75 /* worker for handling active-list retiring: */ 79 /* worker for handling active-list retiring: */
76 struct work_struct retire_work; 80 struct work_struct retire_work;
77 81
@@ -91,7 +95,12 @@ struct msm_gpu {
91 uint32_t bsc; 95 uint32_t bsc;
92#endif 96#endif
93 97
94 /* Hang Detction: */ 98 /* Hang and Inactivity Detection:
99 */
100#define DRM_MSM_INACTIVE_PERIOD 66 /* in ms (roughly four frames) */
101#define DRM_MSM_INACTIVE_JIFFIES msecs_to_jiffies(DRM_MSM_INACTIVE_PERIOD)
102 struct timer_list inactive_timer;
103 struct work_struct inactive_work;
95#define DRM_MSM_HANGCHECK_PERIOD 500 /* in ms */ 104#define DRM_MSM_HANGCHECK_PERIOD 500 /* in ms */
96#define DRM_MSM_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_MSM_HANGCHECK_PERIOD) 105#define DRM_MSM_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_MSM_HANGCHECK_PERIOD)
97 struct timer_list hangcheck_timer; 106 struct timer_list hangcheck_timer;
@@ -99,6 +108,11 @@ struct msm_gpu {
99 struct work_struct recover_work; 108 struct work_struct recover_work;
100}; 109};
101 110
111static inline bool msm_gpu_active(struct msm_gpu *gpu)
112{
113 return gpu->submitted_fence > gpu->funcs->last_fence(gpu);
114}
115
102static inline void gpu_write(struct msm_gpu *gpu, u32 reg, u32 data) 116static inline void gpu_write(struct msm_gpu *gpu, u32 reg, u32 data)
103{ 117{
104 msm_writel(data, gpu->mmio + (reg << 2)); 118 msm_writel(data, gpu->mmio + (reg << 2));
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index d310c195bdfe..b7d216264775 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -48,6 +48,7 @@ nouveau-y += core/subdev/bios/therm.o
48nouveau-y += core/subdev/bios/vmap.o 48nouveau-y += core/subdev/bios/vmap.o
49nouveau-y += core/subdev/bios/volt.o 49nouveau-y += core/subdev/bios/volt.o
50nouveau-y += core/subdev/bios/xpio.o 50nouveau-y += core/subdev/bios/xpio.o
51nouveau-y += core/subdev/bios/P0260.o
51nouveau-y += core/subdev/bus/hwsq.o 52nouveau-y += core/subdev/bus/hwsq.o
52nouveau-y += core/subdev/bus/nv04.o 53nouveau-y += core/subdev/bus/nv04.o
53nouveau-y += core/subdev/bus/nv31.o 54nouveau-y += core/subdev/bus/nv31.o
@@ -77,6 +78,7 @@ nouveau-y += core/subdev/devinit/nv98.o
77nouveau-y += core/subdev/devinit/nva3.o 78nouveau-y += core/subdev/devinit/nva3.o
78nouveau-y += core/subdev/devinit/nvaf.o 79nouveau-y += core/subdev/devinit/nvaf.o
79nouveau-y += core/subdev/devinit/nvc0.o 80nouveau-y += core/subdev/devinit/nvc0.o
81nouveau-y += core/subdev/devinit/gm107.o
80nouveau-y += core/subdev/fb/base.o 82nouveau-y += core/subdev/fb/base.o
81nouveau-y += core/subdev/fb/nv04.o 83nouveau-y += core/subdev/fb/nv04.o
82nouveau-y += core/subdev/fb/nv10.o 84nouveau-y += core/subdev/fb/nv10.o
@@ -100,6 +102,7 @@ nouveau-y += core/subdev/fb/nvaa.o
100nouveau-y += core/subdev/fb/nvaf.o 102nouveau-y += core/subdev/fb/nvaf.o
101nouveau-y += core/subdev/fb/nvc0.o 103nouveau-y += core/subdev/fb/nvc0.o
102nouveau-y += core/subdev/fb/nve0.o 104nouveau-y += core/subdev/fb/nve0.o
105nouveau-y += core/subdev/fb/gm107.o
103nouveau-y += core/subdev/fb/ramnv04.o 106nouveau-y += core/subdev/fb/ramnv04.o
104nouveau-y += core/subdev/fb/ramnv10.o 107nouveau-y += core/subdev/fb/ramnv10.o
105nouveau-y += core/subdev/fb/ramnv1a.o 108nouveau-y += core/subdev/fb/ramnv1a.o
@@ -114,6 +117,7 @@ nouveau-y += core/subdev/fb/ramnva3.o
114nouveau-y += core/subdev/fb/ramnvaa.o 117nouveau-y += core/subdev/fb/ramnvaa.o
115nouveau-y += core/subdev/fb/ramnvc0.o 118nouveau-y += core/subdev/fb/ramnvc0.o
116nouveau-y += core/subdev/fb/ramnve0.o 119nouveau-y += core/subdev/fb/ramnve0.o
120nouveau-y += core/subdev/fb/ramgm107.o
117nouveau-y += core/subdev/fb/sddr3.o 121nouveau-y += core/subdev/fb/sddr3.o
118nouveau-y += core/subdev/fb/gddr5.o 122nouveau-y += core/subdev/fb/gddr5.o
119nouveau-y += core/subdev/gpio/base.o 123nouveau-y += core/subdev/gpio/base.o
@@ -136,7 +140,8 @@ nouveau-y += core/subdev/instmem/base.o
136nouveau-y += core/subdev/instmem/nv04.o 140nouveau-y += core/subdev/instmem/nv04.o
137nouveau-y += core/subdev/instmem/nv40.o 141nouveau-y += core/subdev/instmem/nv40.o
138nouveau-y += core/subdev/instmem/nv50.o 142nouveau-y += core/subdev/instmem/nv50.o
139nouveau-y += core/subdev/ltcg/nvc0.o 143nouveau-y += core/subdev/ltcg/gf100.o
144nouveau-y += core/subdev/ltcg/gm107.o
140nouveau-y += core/subdev/mc/base.o 145nouveau-y += core/subdev/mc/base.o
141nouveau-y += core/subdev/mc/nv04.o 146nouveau-y += core/subdev/mc/nv04.o
142nouveau-y += core/subdev/mc/nv40.o 147nouveau-y += core/subdev/mc/nv40.o
@@ -170,6 +175,7 @@ nouveau-y += core/subdev/therm/nva3.o
170nouveau-y += core/subdev/therm/nvd0.o 175nouveau-y += core/subdev/therm/nvd0.o
171nouveau-y += core/subdev/timer/base.o 176nouveau-y += core/subdev/timer/base.o
172nouveau-y += core/subdev/timer/nv04.o 177nouveau-y += core/subdev/timer/nv04.o
178nouveau-y += core/subdev/timer/gk20a.o
173nouveau-y += core/subdev/vm/base.o 179nouveau-y += core/subdev/vm/base.o
174nouveau-y += core/subdev/vm/nv04.o 180nouveau-y += core/subdev/vm/nv04.o
175nouveau-y += core/subdev/vm/nv41.o 181nouveau-y += core/subdev/vm/nv41.o
@@ -206,6 +212,7 @@ nouveau-y += core/engine/device/nv40.o
206nouveau-y += core/engine/device/nv50.o 212nouveau-y += core/engine/device/nv50.o
207nouveau-y += core/engine/device/nvc0.o 213nouveau-y += core/engine/device/nvc0.o
208nouveau-y += core/engine/device/nve0.o 214nouveau-y += core/engine/device/nve0.o
215nouveau-y += core/engine/device/gm100.o
209nouveau-y += core/engine/disp/base.o 216nouveau-y += core/engine/disp/base.o
210nouveau-y += core/engine/disp/nv04.o 217nouveau-y += core/engine/disp/nv04.o
211nouveau-y += core/engine/disp/nv50.o 218nouveau-y += core/engine/disp/nv50.o
@@ -216,6 +223,7 @@ nouveau-y += core/engine/disp/nva3.o
216nouveau-y += core/engine/disp/nvd0.o 223nouveau-y += core/engine/disp/nvd0.o
217nouveau-y += core/engine/disp/nve0.o 224nouveau-y += core/engine/disp/nve0.o
218nouveau-y += core/engine/disp/nvf0.o 225nouveau-y += core/engine/disp/nvf0.o
226nouveau-y += core/engine/disp/gm107.o
219nouveau-y += core/engine/disp/dacnv50.o 227nouveau-y += core/engine/disp/dacnv50.o
220nouveau-y += core/engine/disp/dport.o 228nouveau-y += core/engine/disp/dport.o
221nouveau-y += core/engine/disp/hdanva3.o 229nouveau-y += core/engine/disp/hdanva3.o
@@ -242,13 +250,14 @@ nouveau-y += core/engine/graph/ctxnv40.o
242nouveau-y += core/engine/graph/ctxnv50.o 250nouveau-y += core/engine/graph/ctxnv50.o
243nouveau-y += core/engine/graph/ctxnvc0.o 251nouveau-y += core/engine/graph/ctxnvc0.o
244nouveau-y += core/engine/graph/ctxnvc1.o 252nouveau-y += core/engine/graph/ctxnvc1.o
245nouveau-y += core/engine/graph/ctxnvc3.o 253nouveau-y += core/engine/graph/ctxnvc4.o
246nouveau-y += core/engine/graph/ctxnvc8.o 254nouveau-y += core/engine/graph/ctxnvc8.o
247nouveau-y += core/engine/graph/ctxnvd7.o 255nouveau-y += core/engine/graph/ctxnvd7.o
248nouveau-y += core/engine/graph/ctxnvd9.o 256nouveau-y += core/engine/graph/ctxnvd9.o
249nouveau-y += core/engine/graph/ctxnve4.o 257nouveau-y += core/engine/graph/ctxnve4.o
250nouveau-y += core/engine/graph/ctxnvf0.o 258nouveau-y += core/engine/graph/ctxnvf0.o
251nouveau-y += core/engine/graph/ctxnv108.o 259nouveau-y += core/engine/graph/ctxnv108.o
260nouveau-y += core/engine/graph/ctxgm107.o
252nouveau-y += core/engine/graph/nv04.o 261nouveau-y += core/engine/graph/nv04.o
253nouveau-y += core/engine/graph/nv10.o 262nouveau-y += core/engine/graph/nv10.o
254nouveau-y += core/engine/graph/nv20.o 263nouveau-y += core/engine/graph/nv20.o
@@ -261,13 +270,14 @@ nouveau-y += core/engine/graph/nv40.o
261nouveau-y += core/engine/graph/nv50.o 270nouveau-y += core/engine/graph/nv50.o
262nouveau-y += core/engine/graph/nvc0.o 271nouveau-y += core/engine/graph/nvc0.o
263nouveau-y += core/engine/graph/nvc1.o 272nouveau-y += core/engine/graph/nvc1.o
264nouveau-y += core/engine/graph/nvc3.o 273nouveau-y += core/engine/graph/nvc4.o
265nouveau-y += core/engine/graph/nvc8.o 274nouveau-y += core/engine/graph/nvc8.o
266nouveau-y += core/engine/graph/nvd7.o 275nouveau-y += core/engine/graph/nvd7.o
267nouveau-y += core/engine/graph/nvd9.o 276nouveau-y += core/engine/graph/nvd9.o
268nouveau-y += core/engine/graph/nve4.o 277nouveau-y += core/engine/graph/nve4.o
269nouveau-y += core/engine/graph/nvf0.o 278nouveau-y += core/engine/graph/nvf0.o
270nouveau-y += core/engine/graph/nv108.o 279nouveau-y += core/engine/graph/nv108.o
280nouveau-y += core/engine/graph/gm107.o
271nouveau-y += core/engine/mpeg/nv31.o 281nouveau-y += core/engine/mpeg/nv31.o
272nouveau-y += core/engine/mpeg/nv40.o 282nouveau-y += core/engine/mpeg/nv40.o
273nouveau-y += core/engine/mpeg/nv44.o 283nouveau-y += core/engine/mpeg/nv44.o
diff --git a/drivers/gpu/drm/nouveau/core/core/namedb.c b/drivers/gpu/drm/nouveau/core/core/namedb.c
index 1ce95a8709df..0594a599f6fb 100644
--- a/drivers/gpu/drm/nouveau/core/core/namedb.c
+++ b/drivers/gpu/drm/nouveau/core/core/namedb.c
@@ -167,7 +167,7 @@ int
167nouveau_namedb_create_(struct nouveau_object *parent, 167nouveau_namedb_create_(struct nouveau_object *parent,
168 struct nouveau_object *engine, 168 struct nouveau_object *engine,
169 struct nouveau_oclass *oclass, u32 pclass, 169 struct nouveau_oclass *oclass, u32 pclass,
170 struct nouveau_oclass *sclass, u32 engcls, 170 struct nouveau_oclass *sclass, u64 engcls,
171 int length, void **pobject) 171 int length, void **pobject)
172{ 172{
173 struct nouveau_namedb *namedb; 173 struct nouveau_namedb *namedb;
diff --git a/drivers/gpu/drm/nouveau/core/core/parent.c b/drivers/gpu/drm/nouveau/core/core/parent.c
index 313380ce632d..dee5d1235e9b 100644
--- a/drivers/gpu/drm/nouveau/core/core/parent.c
+++ b/drivers/gpu/drm/nouveau/core/core/parent.c
@@ -49,7 +49,7 @@ nouveau_parent_sclass(struct nouveau_object *parent, u16 handle,
49 49
50 mask = nv_parent(parent)->engine; 50 mask = nv_parent(parent)->engine;
51 while (mask) { 51 while (mask) {
52 int i = ffsll(mask) - 1; 52 int i = __ffs64(mask);
53 53
54 if (nv_iclass(parent, NV_CLIENT_CLASS)) 54 if (nv_iclass(parent, NV_CLIENT_CLASS))
55 engine = nv_engine(nv_client(parent)->device); 55 engine = nv_engine(nv_client(parent)->device);
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/base.c b/drivers/gpu/drm/nouveau/core/engine/device/base.c
index dd01c6c435d6..18c8c7245b73 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/base.c
@@ -131,8 +131,8 @@ nouveau_devobj_ctor(struct nouveau_object *parent,
131 if (ret) 131 if (ret)
132 return ret; 132 return ret;
133 133
134 mmio_base = pci_resource_start(device->pdev, 0); 134 mmio_base = nv_device_resource_start(device, 0);
135 mmio_size = pci_resource_len(device->pdev, 0); 135 mmio_size = nv_device_resource_len(device, 0);
136 136
137 /* translate api disable mask into internal mapping */ 137 /* translate api disable mask into internal mapping */
138 disable = args->debug0; 138 disable = args->debug0;
@@ -185,6 +185,7 @@ nouveau_devobj_ctor(struct nouveau_object *parent,
185 case 0x0e0: 185 case 0x0e0:
186 case 0x0f0: 186 case 0x0f0:
187 case 0x100: device->card_type = NV_E0; break; 187 case 0x100: device->card_type = NV_E0; break;
188 case 0x110: device->card_type = GM100; break;
188 default: 189 default:
189 break; 190 break;
190 } 191 }
@@ -208,6 +209,7 @@ nouveau_devobj_ctor(struct nouveau_object *parent,
208 case NV_C0: 209 case NV_C0:
209 case NV_D0: ret = nvc0_identify(device); break; 210 case NV_D0: ret = nvc0_identify(device); break;
210 case NV_E0: ret = nve0_identify(device); break; 211 case NV_E0: ret = nve0_identify(device); break;
212 case GM100: ret = gm100_identify(device); break;
211 default: 213 default:
212 ret = -EINVAL; 214 ret = -EINVAL;
213 break; 215 break;
@@ -446,6 +448,72 @@ nouveau_device_dtor(struct nouveau_object *object)
446 nouveau_engine_destroy(&device->base); 448 nouveau_engine_destroy(&device->base);
447} 449}
448 450
451resource_size_t
452nv_device_resource_start(struct nouveau_device *device, unsigned int bar)
453{
454 if (nv_device_is_pci(device)) {
455 return pci_resource_start(device->pdev, bar);
456 } else {
457 struct resource *res;
458 res = platform_get_resource(device->platformdev,
459 IORESOURCE_MEM, bar);
460 if (!res)
461 return 0;
462 return res->start;
463 }
464}
465
466resource_size_t
467nv_device_resource_len(struct nouveau_device *device, unsigned int bar)
468{
469 if (nv_device_is_pci(device)) {
470 return pci_resource_len(device->pdev, bar);
471 } else {
472 struct resource *res;
473 res = platform_get_resource(device->platformdev,
474 IORESOURCE_MEM, bar);
475 if (!res)
476 return 0;
477 return resource_size(res);
478 }
479}
480
481dma_addr_t
482nv_device_map_page(struct nouveau_device *device, struct page *page)
483{
484 dma_addr_t ret;
485
486 if (nv_device_is_pci(device)) {
487 ret = pci_map_page(device->pdev, page, 0, PAGE_SIZE,
488 PCI_DMA_BIDIRECTIONAL);
489 if (pci_dma_mapping_error(device->pdev, ret))
490 ret = 0;
491 } else {
492 ret = page_to_phys(page);
493 }
494
495 return ret;
496}
497
498void
499nv_device_unmap_page(struct nouveau_device *device, dma_addr_t addr)
500{
501 if (nv_device_is_pci(device))
502 pci_unmap_page(device->pdev, addr, PAGE_SIZE,
503 PCI_DMA_BIDIRECTIONAL);
504}
505
506int
507nv_device_get_irq(struct nouveau_device *device, bool stall)
508{
509 if (nv_device_is_pci(device)) {
510 return device->pdev->irq;
511 } else {
512 return platform_get_irq_byname(device->platformdev,
513 stall ? "stall" : "nonstall");
514 }
515}
516
449static struct nouveau_oclass 517static struct nouveau_oclass
450nouveau_device_oclass = { 518nouveau_device_oclass = {
451 .handle = NV_ENGINE(DEVICE, 0x00), 519 .handle = NV_ENGINE(DEVICE, 0x00),
@@ -457,8 +525,8 @@ nouveau_device_oclass = {
457}; 525};
458 526
459int 527int
460nouveau_device_create_(struct pci_dev *pdev, u64 name, const char *sname, 528nouveau_device_create_(void *dev, enum nv_bus_type type, u64 name,
461 const char *cfg, const char *dbg, 529 const char *sname, const char *cfg, const char *dbg,
462 int length, void **pobject) 530 int length, void **pobject)
463{ 531{
464 struct nouveau_device *device; 532 struct nouveau_device *device;
@@ -476,7 +544,14 @@ nouveau_device_create_(struct pci_dev *pdev, u64 name, const char *sname,
476 if (ret) 544 if (ret)
477 goto done; 545 goto done;
478 546
479 device->pdev = pdev; 547 switch (type) {
548 case NOUVEAU_BUS_PCI:
549 device->pdev = dev;
550 break;
551 case NOUVEAU_BUS_PLATFORM:
552 device->platformdev = dev;
553 break;
554 }
480 device->handle = name; 555 device->handle = name;
481 device->cfgopt = cfg; 556 device->cfgopt = cfg;
482 device->dbgopt = dbg; 557 device->dbgopt = dbg;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/gm100.c b/drivers/gpu/drm/nouveau/core/engine/device/gm100.c
new file mode 100644
index 000000000000..d258c21c4a22
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/device/gm100.c
@@ -0,0 +1,106 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/bios.h>
26#include <subdev/bus.h>
27#include <subdev/gpio.h>
28#include <subdev/i2c.h>
29#include <subdev/clock.h>
30#include <subdev/therm.h>
31#include <subdev/mxm.h>
32#include <subdev/devinit.h>
33#include <subdev/mc.h>
34#include <subdev/timer.h>
35#include <subdev/fb.h>
36#include <subdev/ltcg.h>
37#include <subdev/ibus.h>
38#include <subdev/instmem.h>
39#include <subdev/vm.h>
40#include <subdev/bar.h>
41#include <subdev/pwr.h>
42#include <subdev/volt.h>
43
44#include <engine/device.h>
45#include <engine/dmaobj.h>
46#include <engine/fifo.h>
47#include <engine/software.h>
48#include <engine/graph.h>
49#include <engine/disp.h>
50#include <engine/copy.h>
51#include <engine/bsp.h>
52#include <engine/vp.h>
53#include <engine/ppp.h>
54#include <engine/perfmon.h>
55
56int
57gm100_identify(struct nouveau_device *device)
58{
59 switch (device->chipset) {
60 case 0x117:
61 device->cname = "GM107";
62 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
63 device->oclass[NVDEV_SUBDEV_GPIO ] = &nve0_gpio_oclass;
64 device->oclass[NVDEV_SUBDEV_I2C ] = &nvd0_i2c_oclass;
65 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass;
66#if 0
67 device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
68#endif
69 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
70 device->oclass[NVDEV_SUBDEV_DEVINIT] = gm107_devinit_oclass;
71 device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass;
72 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
73 device->oclass[NVDEV_SUBDEV_TIMER ] = &gk20a_timer_oclass;
74 device->oclass[NVDEV_SUBDEV_FB ] = gm107_fb_oclass;
75 device->oclass[NVDEV_SUBDEV_LTCG ] = gm107_ltcg_oclass;
76 device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass;
77 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
78 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
79 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
80#if 0
81 device->oclass[NVDEV_SUBDEV_PWR ] = &nv108_pwr_oclass;
82 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
83#endif
84 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
85 device->oclass[NVDEV_ENGINE_FIFO ] = nv108_fifo_oclass;
86 device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
87 device->oclass[NVDEV_ENGINE_GR ] = gm107_graph_oclass;
88 device->oclass[NVDEV_ENGINE_DISP ] = gm107_disp_oclass;
89 device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass;
90#if 0
91 device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass;
92#endif
93 device->oclass[NVDEV_ENGINE_COPY2 ] = &nve0_copy2_oclass;
94#if 0
95 device->oclass[NVDEV_ENGINE_BSP ] = &nve0_bsp_oclass;
96 device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass;
97 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
98#endif
99 break;
100 default:
101 nv_fatal(device, "unknown Maxwell chipset\n");
102 return -EINVAL;
103 }
104
105 return 0;
106}
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv04.c b/drivers/gpu/drm/nouveau/core/engine/device/nv04.c
index 32113b08c4d5..0a51ff4e9e00 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv04.c
@@ -60,7 +60,7 @@ nv04_identify(struct nouveau_device *device)
60 device->oclass[NVDEV_ENGINE_FIFO ] = nv04_fifo_oclass; 60 device->oclass[NVDEV_ENGINE_FIFO ] = nv04_fifo_oclass;
61 device->oclass[NVDEV_ENGINE_SW ] = nv04_software_oclass; 61 device->oclass[NVDEV_ENGINE_SW ] = nv04_software_oclass;
62 device->oclass[NVDEV_ENGINE_GR ] = &nv04_graph_oclass; 62 device->oclass[NVDEV_ENGINE_GR ] = &nv04_graph_oclass;
63 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 63 device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
64 break; 64 break;
65 case 0x05: 65 case 0x05:
66 device->cname = "NV05"; 66 device->cname = "NV05";
@@ -78,7 +78,7 @@ nv04_identify(struct nouveau_device *device)
78 device->oclass[NVDEV_ENGINE_FIFO ] = nv04_fifo_oclass; 78 device->oclass[NVDEV_ENGINE_FIFO ] = nv04_fifo_oclass;
79 device->oclass[NVDEV_ENGINE_SW ] = nv04_software_oclass; 79 device->oclass[NVDEV_ENGINE_SW ] = nv04_software_oclass;
80 device->oclass[NVDEV_ENGINE_GR ] = &nv04_graph_oclass; 80 device->oclass[NVDEV_ENGINE_GR ] = &nv04_graph_oclass;
81 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 81 device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
82 break; 82 break;
83 default: 83 default:
84 nv_fatal(device, "unknown RIVA chipset\n"); 84 nv_fatal(device, "unknown RIVA chipset\n");
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv10.c b/drivers/gpu/drm/nouveau/core/engine/device/nv10.c
index 744f15d7e131..e008de8b51b0 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv10.c
@@ -60,7 +60,7 @@ nv10_identify(struct nouveau_device *device)
60 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 60 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
61 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 61 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
62 device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass; 62 device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
63 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 63 device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
64 break; 64 break;
65 case 0x15: 65 case 0x15:
66 device->cname = "NV15"; 66 device->cname = "NV15";
@@ -79,7 +79,7 @@ nv10_identify(struct nouveau_device *device)
79 device->oclass[NVDEV_ENGINE_FIFO ] = nv10_fifo_oclass; 79 device->oclass[NVDEV_ENGINE_FIFO ] = nv10_fifo_oclass;
80 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; 80 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
81 device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass; 81 device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
82 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 82 device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
83 break; 83 break;
84 case 0x16: 84 case 0x16:
85 device->cname = "NV16"; 85 device->cname = "NV16";
@@ -98,7 +98,7 @@ nv10_identify(struct nouveau_device *device)
98 device->oclass[NVDEV_ENGINE_FIFO ] = nv10_fifo_oclass; 98 device->oclass[NVDEV_ENGINE_FIFO ] = nv10_fifo_oclass;
99 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; 99 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
100 device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass; 100 device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
101 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 101 device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
102 break; 102 break;
103 case 0x1a: 103 case 0x1a:
104 device->cname = "nForce"; 104 device->cname = "nForce";
@@ -117,7 +117,7 @@ nv10_identify(struct nouveau_device *device)
117 device->oclass[NVDEV_ENGINE_FIFO ] = nv10_fifo_oclass; 117 device->oclass[NVDEV_ENGINE_FIFO ] = nv10_fifo_oclass;
118 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; 118 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
119 device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass; 119 device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
120 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 120 device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
121 break; 121 break;
122 case 0x11: 122 case 0x11:
123 device->cname = "NV11"; 123 device->cname = "NV11";
@@ -136,7 +136,7 @@ nv10_identify(struct nouveau_device *device)
136 device->oclass[NVDEV_ENGINE_FIFO ] = nv10_fifo_oclass; 136 device->oclass[NVDEV_ENGINE_FIFO ] = nv10_fifo_oclass;
137 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; 137 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
138 device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass; 138 device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
139 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 139 device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
140 break; 140 break;
141 case 0x17: 141 case 0x17:
142 device->cname = "NV17"; 142 device->cname = "NV17";
@@ -155,7 +155,7 @@ nv10_identify(struct nouveau_device *device)
155 device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass; 155 device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
156 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; 156 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
157 device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass; 157 device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
158 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 158 device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
159 break; 159 break;
160 case 0x1f: 160 case 0x1f:
161 device->cname = "nForce2"; 161 device->cname = "nForce2";
@@ -174,7 +174,7 @@ nv10_identify(struct nouveau_device *device)
174 device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass; 174 device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
175 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; 175 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
176 device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass; 176 device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
177 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 177 device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
178 break; 178 break;
179 case 0x18: 179 case 0x18:
180 device->cname = "NV18"; 180 device->cname = "NV18";
@@ -193,7 +193,7 @@ nv10_identify(struct nouveau_device *device)
193 device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass; 193 device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
194 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; 194 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
195 device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass; 195 device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
196 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 196 device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
197 break; 197 break;
198 default: 198 default:
199 nv_fatal(device, "unknown Celsius chipset\n"); 199 nv_fatal(device, "unknown Celsius chipset\n");
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv20.c b/drivers/gpu/drm/nouveau/core/engine/device/nv20.c
index 27ba61fb2710..7b629a3aed05 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv20.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv20.c
@@ -63,7 +63,7 @@ nv20_identify(struct nouveau_device *device)
63 device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass; 63 device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
64 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; 64 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
65 device->oclass[NVDEV_ENGINE_GR ] = &nv20_graph_oclass; 65 device->oclass[NVDEV_ENGINE_GR ] = &nv20_graph_oclass;
66 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 66 device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
67 break; 67 break;
68 case 0x25: 68 case 0x25:
69 device->cname = "NV25"; 69 device->cname = "NV25";
@@ -82,7 +82,7 @@ nv20_identify(struct nouveau_device *device)
82 device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass; 82 device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
83 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; 83 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
84 device->oclass[NVDEV_ENGINE_GR ] = &nv25_graph_oclass; 84 device->oclass[NVDEV_ENGINE_GR ] = &nv25_graph_oclass;
85 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 85 device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
86 break; 86 break;
87 case 0x28: 87 case 0x28:
88 device->cname = "NV28"; 88 device->cname = "NV28";
@@ -101,7 +101,7 @@ nv20_identify(struct nouveau_device *device)
101 device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass; 101 device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
102 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; 102 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
103 device->oclass[NVDEV_ENGINE_GR ] = &nv25_graph_oclass; 103 device->oclass[NVDEV_ENGINE_GR ] = &nv25_graph_oclass;
104 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 104 device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
105 break; 105 break;
106 case 0x2a: 106 case 0x2a:
107 device->cname = "NV2A"; 107 device->cname = "NV2A";
@@ -120,7 +120,7 @@ nv20_identify(struct nouveau_device *device)
120 device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass; 120 device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
121 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; 121 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
122 device->oclass[NVDEV_ENGINE_GR ] = &nv2a_graph_oclass; 122 device->oclass[NVDEV_ENGINE_GR ] = &nv2a_graph_oclass;
123 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 123 device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
124 break; 124 break;
125 default: 125 default:
126 nv_fatal(device, "unknown Kelvin chipset\n"); 126 nv_fatal(device, "unknown Kelvin chipset\n");
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv30.c b/drivers/gpu/drm/nouveau/core/engine/device/nv30.c
index fd47ace67543..7dfddd5a1908 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv30.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv30.c
@@ -63,7 +63,7 @@ nv30_identify(struct nouveau_device *device)
63 device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass; 63 device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
64 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; 64 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
65 device->oclass[NVDEV_ENGINE_GR ] = &nv30_graph_oclass; 65 device->oclass[NVDEV_ENGINE_GR ] = &nv30_graph_oclass;
66 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 66 device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
67 break; 67 break;
68 case 0x35: 68 case 0x35:
69 device->cname = "NV35"; 69 device->cname = "NV35";
@@ -82,7 +82,7 @@ nv30_identify(struct nouveau_device *device)
82 device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass; 82 device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
83 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; 83 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
84 device->oclass[NVDEV_ENGINE_GR ] = &nv35_graph_oclass; 84 device->oclass[NVDEV_ENGINE_GR ] = &nv35_graph_oclass;
85 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 85 device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
86 break; 86 break;
87 case 0x31: 87 case 0x31:
88 device->cname = "NV31"; 88 device->cname = "NV31";
@@ -102,7 +102,7 @@ nv30_identify(struct nouveau_device *device)
102 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; 102 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
103 device->oclass[NVDEV_ENGINE_GR ] = &nv30_graph_oclass; 103 device->oclass[NVDEV_ENGINE_GR ] = &nv30_graph_oclass;
104 device->oclass[NVDEV_ENGINE_MPEG ] = &nv31_mpeg_oclass; 104 device->oclass[NVDEV_ENGINE_MPEG ] = &nv31_mpeg_oclass;
105 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 105 device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
106 break; 106 break;
107 case 0x36: 107 case 0x36:
108 device->cname = "NV36"; 108 device->cname = "NV36";
@@ -122,7 +122,7 @@ nv30_identify(struct nouveau_device *device)
122 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; 122 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
123 device->oclass[NVDEV_ENGINE_GR ] = &nv35_graph_oclass; 123 device->oclass[NVDEV_ENGINE_GR ] = &nv35_graph_oclass;
124 device->oclass[NVDEV_ENGINE_MPEG ] = &nv31_mpeg_oclass; 124 device->oclass[NVDEV_ENGINE_MPEG ] = &nv31_mpeg_oclass;
125 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 125 device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
126 break; 126 break;
127 case 0x34: 127 case 0x34:
128 device->cname = "NV34"; 128 device->cname = "NV34";
@@ -142,7 +142,7 @@ nv30_identify(struct nouveau_device *device)
142 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; 142 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
143 device->oclass[NVDEV_ENGINE_GR ] = &nv34_graph_oclass; 143 device->oclass[NVDEV_ENGINE_GR ] = &nv34_graph_oclass;
144 device->oclass[NVDEV_ENGINE_MPEG ] = &nv31_mpeg_oclass; 144 device->oclass[NVDEV_ENGINE_MPEG ] = &nv31_mpeg_oclass;
145 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 145 device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
146 break; 146 break;
147 default: 147 default:
148 nv_fatal(device, "unknown Rankine chipset\n"); 148 nv_fatal(device, "unknown Rankine chipset\n");
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv40.c b/drivers/gpu/drm/nouveau/core/engine/device/nv40.c
index 08b88591ed60..7c1ce6cf4f1f 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv40.c
@@ -70,7 +70,7 @@ nv40_identify(struct nouveau_device *device)
70 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; 70 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
71 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; 71 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
72 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass; 72 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
73 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 73 device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
74 device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass; 74 device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
75 break; 75 break;
76 case 0x41: 76 case 0x41:
@@ -93,7 +93,7 @@ nv40_identify(struct nouveau_device *device)
93 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; 93 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
94 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; 94 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
95 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass; 95 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
96 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 96 device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
97 device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass; 97 device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
98 break; 98 break;
99 case 0x42: 99 case 0x42:
@@ -116,7 +116,7 @@ nv40_identify(struct nouveau_device *device)
116 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; 116 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
117 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; 117 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
118 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass; 118 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
119 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 119 device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
120 device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass; 120 device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
121 break; 121 break;
122 case 0x43: 122 case 0x43:
@@ -139,7 +139,7 @@ nv40_identify(struct nouveau_device *device)
139 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; 139 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
140 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; 140 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
141 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass; 141 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
142 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 142 device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
143 device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass; 143 device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
144 break; 144 break;
145 case 0x45: 145 case 0x45:
@@ -162,7 +162,7 @@ nv40_identify(struct nouveau_device *device)
162 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; 162 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
163 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; 163 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
164 device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass; 164 device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
165 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 165 device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
166 device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass; 166 device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
167 break; 167 break;
168 case 0x47: 168 case 0x47:
@@ -185,7 +185,7 @@ nv40_identify(struct nouveau_device *device)
185 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; 185 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
186 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; 186 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
187 device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass; 187 device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
188 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 188 device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
189 device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass; 189 device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
190 break; 190 break;
191 case 0x49: 191 case 0x49:
@@ -208,7 +208,7 @@ nv40_identify(struct nouveau_device *device)
208 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; 208 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
209 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; 209 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
210 device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass; 210 device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
211 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 211 device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
212 device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass; 212 device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
213 break; 213 break;
214 case 0x4b: 214 case 0x4b:
@@ -231,7 +231,7 @@ nv40_identify(struct nouveau_device *device)
231 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; 231 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
232 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; 232 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
233 device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass; 233 device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
234 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 234 device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
235 device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass; 235 device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
236 break; 236 break;
237 case 0x44: 237 case 0x44:
@@ -254,7 +254,7 @@ nv40_identify(struct nouveau_device *device)
254 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; 254 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
255 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; 255 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
256 device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass; 256 device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
257 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 257 device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
258 device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass; 258 device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
259 break; 259 break;
260 case 0x46: 260 case 0x46:
@@ -277,7 +277,7 @@ nv40_identify(struct nouveau_device *device)
277 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; 277 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
278 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; 278 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
279 device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass; 279 device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
280 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 280 device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
281 device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass; 281 device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
282 break; 282 break;
283 case 0x4a: 283 case 0x4a:
@@ -300,7 +300,7 @@ nv40_identify(struct nouveau_device *device)
300 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; 300 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
301 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; 301 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
302 device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass; 302 device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
303 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 303 device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
304 device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass; 304 device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
305 break; 305 break;
306 case 0x4c: 306 case 0x4c:
@@ -323,7 +323,7 @@ nv40_identify(struct nouveau_device *device)
323 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; 323 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
324 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; 324 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
325 device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass; 325 device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
326 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 326 device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
327 device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass; 327 device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
328 break; 328 break;
329 case 0x4e: 329 case 0x4e:
@@ -346,7 +346,7 @@ nv40_identify(struct nouveau_device *device)
346 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; 346 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
347 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; 347 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
348 device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass; 348 device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
349 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 349 device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
350 device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass; 350 device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
351 break; 351 break;
352 case 0x63: 352 case 0x63:
@@ -369,7 +369,7 @@ nv40_identify(struct nouveau_device *device)
369 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; 369 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
370 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; 370 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
371 device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass; 371 device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
372 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 372 device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
373 device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass; 373 device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
374 break; 374 break;
375 case 0x67: 375 case 0x67:
@@ -392,7 +392,7 @@ nv40_identify(struct nouveau_device *device)
392 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; 392 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
393 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; 393 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
394 device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass; 394 device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
395 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 395 device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
396 device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass; 396 device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
397 break; 397 break;
398 case 0x68: 398 case 0x68:
@@ -415,7 +415,7 @@ nv40_identify(struct nouveau_device *device)
415 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; 415 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
416 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; 416 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
417 device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass; 417 device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
418 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 418 device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
419 device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass; 419 device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
420 break; 420 break;
421 default: 421 default:
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv50.c b/drivers/gpu/drm/nouveau/core/engine/device/nv50.c
index 81d5c26643d5..66499fa0f758 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv50.c
@@ -79,7 +79,7 @@ nv50_identify(struct nouveau_device *device)
79 device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass; 79 device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
80 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass; 80 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
81 device->oclass[NVDEV_ENGINE_MPEG ] = &nv50_mpeg_oclass; 81 device->oclass[NVDEV_ENGINE_MPEG ] = &nv50_mpeg_oclass;
82 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass; 82 device->oclass[NVDEV_ENGINE_DISP ] = nv50_disp_oclass;
83 device->oclass[NVDEV_ENGINE_PERFMON] = nv50_perfmon_oclass; 83 device->oclass[NVDEV_ENGINE_PERFMON] = nv50_perfmon_oclass;
84 break; 84 break;
85 case 0x84: 85 case 0x84:
@@ -107,7 +107,7 @@ nv50_identify(struct nouveau_device *device)
107 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass; 107 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
108 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass; 108 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
109 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; 109 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
110 device->oclass[NVDEV_ENGINE_DISP ] = &nv84_disp_oclass; 110 device->oclass[NVDEV_ENGINE_DISP ] = nv84_disp_oclass;
111 device->oclass[NVDEV_ENGINE_PERFMON] = nv84_perfmon_oclass; 111 device->oclass[NVDEV_ENGINE_PERFMON] = nv84_perfmon_oclass;
112 break; 112 break;
113 case 0x86: 113 case 0x86:
@@ -135,7 +135,7 @@ nv50_identify(struct nouveau_device *device)
135 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass; 135 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
136 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass; 136 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
137 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; 137 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
138 device->oclass[NVDEV_ENGINE_DISP ] = &nv84_disp_oclass; 138 device->oclass[NVDEV_ENGINE_DISP ] = nv84_disp_oclass;
139 device->oclass[NVDEV_ENGINE_PERFMON] = nv84_perfmon_oclass; 139 device->oclass[NVDEV_ENGINE_PERFMON] = nv84_perfmon_oclass;
140 break; 140 break;
141 case 0x92: 141 case 0x92:
@@ -163,7 +163,7 @@ nv50_identify(struct nouveau_device *device)
163 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass; 163 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
164 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass; 164 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
165 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; 165 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
166 device->oclass[NVDEV_ENGINE_DISP ] = &nv84_disp_oclass; 166 device->oclass[NVDEV_ENGINE_DISP ] = nv84_disp_oclass;
167 device->oclass[NVDEV_ENGINE_PERFMON] = nv84_perfmon_oclass; 167 device->oclass[NVDEV_ENGINE_PERFMON] = nv84_perfmon_oclass;
168 break; 168 break;
169 case 0x94: 169 case 0x94:
@@ -191,7 +191,7 @@ nv50_identify(struct nouveau_device *device)
191 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass; 191 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
192 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass; 192 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
193 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; 193 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
194 device->oclass[NVDEV_ENGINE_DISP ] = &nv94_disp_oclass; 194 device->oclass[NVDEV_ENGINE_DISP ] = nv94_disp_oclass;
195 device->oclass[NVDEV_ENGINE_PERFMON] = nv84_perfmon_oclass; 195 device->oclass[NVDEV_ENGINE_PERFMON] = nv84_perfmon_oclass;
196 break; 196 break;
197 case 0x96: 197 case 0x96:
@@ -219,7 +219,7 @@ nv50_identify(struct nouveau_device *device)
219 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass; 219 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
220 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass; 220 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
221 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; 221 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
222 device->oclass[NVDEV_ENGINE_DISP ] = &nv94_disp_oclass; 222 device->oclass[NVDEV_ENGINE_DISP ] = nv94_disp_oclass;
223 device->oclass[NVDEV_ENGINE_PERFMON] = nv84_perfmon_oclass; 223 device->oclass[NVDEV_ENGINE_PERFMON] = nv84_perfmon_oclass;
224 break; 224 break;
225 case 0x98: 225 case 0x98:
@@ -247,7 +247,7 @@ nv50_identify(struct nouveau_device *device)
247 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv98_crypt_oclass; 247 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv98_crypt_oclass;
248 device->oclass[NVDEV_ENGINE_BSP ] = &nv98_bsp_oclass; 248 device->oclass[NVDEV_ENGINE_BSP ] = &nv98_bsp_oclass;
249 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass; 249 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
250 device->oclass[NVDEV_ENGINE_DISP ] = &nv94_disp_oclass; 250 device->oclass[NVDEV_ENGINE_DISP ] = nv94_disp_oclass;
251 device->oclass[NVDEV_ENGINE_PERFMON] = nv84_perfmon_oclass; 251 device->oclass[NVDEV_ENGINE_PERFMON] = nv84_perfmon_oclass;
252 break; 252 break;
253 case 0xa0: 253 case 0xa0:
@@ -275,7 +275,7 @@ nv50_identify(struct nouveau_device *device)
275 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass; 275 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
276 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass; 276 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
277 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; 277 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
278 device->oclass[NVDEV_ENGINE_DISP ] = &nva0_disp_oclass; 278 device->oclass[NVDEV_ENGINE_DISP ] = nva0_disp_oclass;
279 device->oclass[NVDEV_ENGINE_PERFMON] = nv84_perfmon_oclass; 279 device->oclass[NVDEV_ENGINE_PERFMON] = nv84_perfmon_oclass;
280 break; 280 break;
281 case 0xaa: 281 case 0xaa:
@@ -303,7 +303,7 @@ nv50_identify(struct nouveau_device *device)
303 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv98_crypt_oclass; 303 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv98_crypt_oclass;
304 device->oclass[NVDEV_ENGINE_BSP ] = &nv98_bsp_oclass; 304 device->oclass[NVDEV_ENGINE_BSP ] = &nv98_bsp_oclass;
305 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass; 305 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
306 device->oclass[NVDEV_ENGINE_DISP ] = &nv94_disp_oclass; 306 device->oclass[NVDEV_ENGINE_DISP ] = nv94_disp_oclass;
307 device->oclass[NVDEV_ENGINE_PERFMON] = nv84_perfmon_oclass; 307 device->oclass[NVDEV_ENGINE_PERFMON] = nv84_perfmon_oclass;
308 break; 308 break;
309 case 0xac: 309 case 0xac:
@@ -331,7 +331,7 @@ nv50_identify(struct nouveau_device *device)
331 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv98_crypt_oclass; 331 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv98_crypt_oclass;
332 device->oclass[NVDEV_ENGINE_BSP ] = &nv98_bsp_oclass; 332 device->oclass[NVDEV_ENGINE_BSP ] = &nv98_bsp_oclass;
333 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass; 333 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
334 device->oclass[NVDEV_ENGINE_DISP ] = &nv94_disp_oclass; 334 device->oclass[NVDEV_ENGINE_DISP ] = nv94_disp_oclass;
335 device->oclass[NVDEV_ENGINE_PERFMON] = nv84_perfmon_oclass; 335 device->oclass[NVDEV_ENGINE_PERFMON] = nv84_perfmon_oclass;
336 break; 336 break;
337 case 0xa3: 337 case 0xa3:
@@ -361,7 +361,7 @@ nv50_identify(struct nouveau_device *device)
361 device->oclass[NVDEV_ENGINE_BSP ] = &nv98_bsp_oclass; 361 device->oclass[NVDEV_ENGINE_BSP ] = &nv98_bsp_oclass;
362 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass; 362 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
363 device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass; 363 device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass;
364 device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass; 364 device->oclass[NVDEV_ENGINE_DISP ] = nva3_disp_oclass;
365 device->oclass[NVDEV_ENGINE_PERFMON] = nva3_perfmon_oclass; 365 device->oclass[NVDEV_ENGINE_PERFMON] = nva3_perfmon_oclass;
366 break; 366 break;
367 case 0xa5: 367 case 0xa5:
@@ -390,7 +390,7 @@ nv50_identify(struct nouveau_device *device)
390 device->oclass[NVDEV_ENGINE_BSP ] = &nv98_bsp_oclass; 390 device->oclass[NVDEV_ENGINE_BSP ] = &nv98_bsp_oclass;
391 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass; 391 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
392 device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass; 392 device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass;
393 device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass; 393 device->oclass[NVDEV_ENGINE_DISP ] = nva3_disp_oclass;
394 device->oclass[NVDEV_ENGINE_PERFMON] = nva3_perfmon_oclass; 394 device->oclass[NVDEV_ENGINE_PERFMON] = nva3_perfmon_oclass;
395 break; 395 break;
396 case 0xa8: 396 case 0xa8:
@@ -419,7 +419,7 @@ nv50_identify(struct nouveau_device *device)
419 device->oclass[NVDEV_ENGINE_BSP ] = &nv98_bsp_oclass; 419 device->oclass[NVDEV_ENGINE_BSP ] = &nv98_bsp_oclass;
420 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass; 420 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
421 device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass; 421 device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass;
422 device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass; 422 device->oclass[NVDEV_ENGINE_DISP ] = nva3_disp_oclass;
423 device->oclass[NVDEV_ENGINE_PERFMON] = nva3_perfmon_oclass; 423 device->oclass[NVDEV_ENGINE_PERFMON] = nva3_perfmon_oclass;
424 break; 424 break;
425 case 0xaf: 425 case 0xaf:
@@ -448,7 +448,7 @@ nv50_identify(struct nouveau_device *device)
448 device->oclass[NVDEV_ENGINE_BSP ] = &nv98_bsp_oclass; 448 device->oclass[NVDEV_ENGINE_BSP ] = &nv98_bsp_oclass;
449 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass; 449 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
450 device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass; 450 device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass;
451 device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass; 451 device->oclass[NVDEV_ENGINE_DISP ] = nva3_disp_oclass;
452 device->oclass[NVDEV_ENGINE_PERFMON] = nva3_perfmon_oclass; 452 device->oclass[NVDEV_ENGINE_PERFMON] = nva3_perfmon_oclass;
453 break; 453 break;
454 default: 454 default:
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
index b7d66b59f43d..2075b3027052 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
@@ -70,7 +70,7 @@ nvc0_identify(struct nouveau_device *device)
70 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; 70 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
71 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 71 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
72 device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass; 72 device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
73 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; 73 device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass;
74 device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass; 74 device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
75 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; 75 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
76 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; 76 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
@@ -86,7 +86,7 @@ nvc0_identify(struct nouveau_device *device)
86 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass; 86 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
87 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass; 87 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
88 device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass; 88 device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
89 device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass; 89 device->oclass[NVDEV_ENGINE_DISP ] = nva3_disp_oclass;
90 device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass; 90 device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass;
91 break; 91 break;
92 case 0xc4: 92 case 0xc4:
@@ -102,7 +102,7 @@ nvc0_identify(struct nouveau_device *device)
102 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; 102 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
103 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 103 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
104 device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass; 104 device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
105 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; 105 device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass;
106 device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass; 106 device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
107 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; 107 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
108 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; 108 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
@@ -112,13 +112,13 @@ nvc0_identify(struct nouveau_device *device)
112 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass; 112 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
113 device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass; 113 device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass;
114 device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass; 114 device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
115 device->oclass[NVDEV_ENGINE_GR ] = nvc3_graph_oclass; 115 device->oclass[NVDEV_ENGINE_GR ] = nvc4_graph_oclass;
116 device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass; 116 device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
117 device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass; 117 device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
118 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass; 118 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
119 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass; 119 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
120 device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass; 120 device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
121 device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass; 121 device->oclass[NVDEV_ENGINE_DISP ] = nva3_disp_oclass;
122 device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass; 122 device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass;
123 break; 123 break;
124 case 0xc3: 124 case 0xc3:
@@ -134,7 +134,7 @@ nvc0_identify(struct nouveau_device *device)
134 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; 134 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
135 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 135 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
136 device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass; 136 device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
137 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; 137 device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass;
138 device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass; 138 device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
139 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; 139 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
140 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; 140 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
@@ -144,12 +144,12 @@ nvc0_identify(struct nouveau_device *device)
144 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass; 144 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
145 device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass; 145 device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass;
146 device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass; 146 device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
147 device->oclass[NVDEV_ENGINE_GR ] = nvc3_graph_oclass; 147 device->oclass[NVDEV_ENGINE_GR ] = nvc4_graph_oclass;
148 device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass; 148 device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
149 device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass; 149 device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
150 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass; 150 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
151 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass; 151 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
152 device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass; 152 device->oclass[NVDEV_ENGINE_DISP ] = nva3_disp_oclass;
153 device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass; 153 device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass;
154 break; 154 break;
155 case 0xce: 155 case 0xce:
@@ -165,7 +165,7 @@ nvc0_identify(struct nouveau_device *device)
165 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; 165 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
166 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 166 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
167 device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass; 167 device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
168 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; 168 device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass;
169 device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass; 169 device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
170 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; 170 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
171 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; 171 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
@@ -175,13 +175,13 @@ nvc0_identify(struct nouveau_device *device)
175 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass; 175 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
176 device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass; 176 device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass;
177 device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass; 177 device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
178 device->oclass[NVDEV_ENGINE_GR ] = nvc3_graph_oclass; 178 device->oclass[NVDEV_ENGINE_GR ] = nvc4_graph_oclass;
179 device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass; 179 device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
180 device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass; 180 device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
181 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass; 181 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
182 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass; 182 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
183 device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass; 183 device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
184 device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass; 184 device->oclass[NVDEV_ENGINE_DISP ] = nva3_disp_oclass;
185 device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass; 185 device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass;
186 break; 186 break;
187 case 0xcf: 187 case 0xcf:
@@ -197,7 +197,7 @@ nvc0_identify(struct nouveau_device *device)
197 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; 197 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
198 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 198 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
199 device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass; 199 device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
200 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; 200 device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass;
201 device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass; 201 device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
202 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; 202 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
203 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; 203 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
@@ -207,13 +207,13 @@ nvc0_identify(struct nouveau_device *device)
207 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass; 207 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
208 device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass; 208 device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass;
209 device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass; 209 device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
210 device->oclass[NVDEV_ENGINE_GR ] = nvc3_graph_oclass; 210 device->oclass[NVDEV_ENGINE_GR ] = nvc4_graph_oclass;
211 device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass; 211 device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
212 device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass; 212 device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
213 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass; 213 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
214 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass; 214 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
215 device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass; 215 device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
216 device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass; 216 device->oclass[NVDEV_ENGINE_DISP ] = nva3_disp_oclass;
217 device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass; 217 device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass;
218 break; 218 break;
219 case 0xc1: 219 case 0xc1:
@@ -229,7 +229,7 @@ nvc0_identify(struct nouveau_device *device)
229 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; 229 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
230 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 230 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
231 device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass; 231 device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
232 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; 232 device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass;
233 device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass; 233 device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
234 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; 234 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
235 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; 235 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
@@ -244,7 +244,7 @@ nvc0_identify(struct nouveau_device *device)
244 device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass; 244 device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
245 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass; 245 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
246 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass; 246 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
247 device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass; 247 device->oclass[NVDEV_ENGINE_DISP ] = nva3_disp_oclass;
248 device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass; 248 device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass;
249 break; 249 break;
250 case 0xc8: 250 case 0xc8:
@@ -260,7 +260,7 @@ nvc0_identify(struct nouveau_device *device)
260 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; 260 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
261 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 261 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
262 device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass; 262 device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
263 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; 263 device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass;
264 device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass; 264 device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
265 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; 265 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
266 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; 266 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
@@ -276,7 +276,7 @@ nvc0_identify(struct nouveau_device *device)
276 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass; 276 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
277 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass; 277 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
278 device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass; 278 device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
279 device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass; 279 device->oclass[NVDEV_ENGINE_DISP ] = nva3_disp_oclass;
280 device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass; 280 device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass;
281 break; 281 break;
282 case 0xd9: 282 case 0xd9:
@@ -292,7 +292,7 @@ nvc0_identify(struct nouveau_device *device)
292 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; 292 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
293 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 293 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
294 device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass; 294 device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
295 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; 295 device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass;
296 device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass; 296 device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
297 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; 297 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
298 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; 298 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
@@ -307,7 +307,7 @@ nvc0_identify(struct nouveau_device *device)
307 device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass; 307 device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
308 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass; 308 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
309 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass; 309 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
310 device->oclass[NVDEV_ENGINE_DISP ] = &nvd0_disp_oclass; 310 device->oclass[NVDEV_ENGINE_DISP ] = nvd0_disp_oclass;
311 device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass; 311 device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass;
312 break; 312 break;
313 case 0xd7: 313 case 0xd7:
@@ -323,7 +323,7 @@ nvc0_identify(struct nouveau_device *device)
323 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; 323 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
324 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 324 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
325 device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass; 325 device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
326 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; 326 device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass;
327 device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass; 327 device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
328 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; 328 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
329 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; 329 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
@@ -336,7 +336,7 @@ nvc0_identify(struct nouveau_device *device)
336 device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass; 336 device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
337 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass; 337 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
338 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass; 338 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
339 device->oclass[NVDEV_ENGINE_DISP ] = &nvd0_disp_oclass; 339 device->oclass[NVDEV_ENGINE_DISP ] = nvd0_disp_oclass;
340 device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass; 340 device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass;
341 break; 341 break;
342 default: 342 default:
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nve0.c b/drivers/gpu/drm/nouveau/core/engine/device/nve0.c
index 987edbc30a09..9784cbf8a9d2 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nve0.c
@@ -70,7 +70,7 @@ nve0_identify(struct nouveau_device *device)
70 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; 70 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
71 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 71 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
72 device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass; 72 device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass;
73 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; 73 device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass;
74 device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass; 74 device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass;
75 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; 75 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
76 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; 76 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
@@ -81,7 +81,7 @@ nve0_identify(struct nouveau_device *device)
81 device->oclass[NVDEV_ENGINE_FIFO ] = nve0_fifo_oclass; 81 device->oclass[NVDEV_ENGINE_FIFO ] = nve0_fifo_oclass;
82 device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass; 82 device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
83 device->oclass[NVDEV_ENGINE_GR ] = nve4_graph_oclass; 83 device->oclass[NVDEV_ENGINE_GR ] = nve4_graph_oclass;
84 device->oclass[NVDEV_ENGINE_DISP ] = &nve0_disp_oclass; 84 device->oclass[NVDEV_ENGINE_DISP ] = nve0_disp_oclass;
85 device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass; 85 device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass;
86 device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass; 86 device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass;
87 device->oclass[NVDEV_ENGINE_COPY2 ] = &nve0_copy2_oclass; 87 device->oclass[NVDEV_ENGINE_COPY2 ] = &nve0_copy2_oclass;
@@ -103,7 +103,7 @@ nve0_identify(struct nouveau_device *device)
103 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; 103 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
104 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 104 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
105 device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass; 105 device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass;
106 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; 106 device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass;
107 device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass; 107 device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass;
108 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; 108 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
109 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; 109 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
@@ -114,7 +114,7 @@ nve0_identify(struct nouveau_device *device)
114 device->oclass[NVDEV_ENGINE_FIFO ] = nve0_fifo_oclass; 114 device->oclass[NVDEV_ENGINE_FIFO ] = nve0_fifo_oclass;
115 device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass; 115 device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
116 device->oclass[NVDEV_ENGINE_GR ] = nve4_graph_oclass; 116 device->oclass[NVDEV_ENGINE_GR ] = nve4_graph_oclass;
117 device->oclass[NVDEV_ENGINE_DISP ] = &nve0_disp_oclass; 117 device->oclass[NVDEV_ENGINE_DISP ] = nve0_disp_oclass;
118 device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass; 118 device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass;
119 device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass; 119 device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass;
120 device->oclass[NVDEV_ENGINE_COPY2 ] = &nve0_copy2_oclass; 120 device->oclass[NVDEV_ENGINE_COPY2 ] = &nve0_copy2_oclass;
@@ -136,7 +136,7 @@ nve0_identify(struct nouveau_device *device)
136 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; 136 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
137 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 137 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
138 device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass; 138 device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass;
139 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; 139 device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass;
140 device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass; 140 device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass;
141 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; 141 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
142 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; 142 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
@@ -147,7 +147,7 @@ nve0_identify(struct nouveau_device *device)
147 device->oclass[NVDEV_ENGINE_FIFO ] = nve0_fifo_oclass; 147 device->oclass[NVDEV_ENGINE_FIFO ] = nve0_fifo_oclass;
148 device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass; 148 device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
149 device->oclass[NVDEV_ENGINE_GR ] = nve4_graph_oclass; 149 device->oclass[NVDEV_ENGINE_GR ] = nve4_graph_oclass;
150 device->oclass[NVDEV_ENGINE_DISP ] = &nve0_disp_oclass; 150 device->oclass[NVDEV_ENGINE_DISP ] = nve0_disp_oclass;
151 device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass; 151 device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass;
152 device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass; 152 device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass;
153 device->oclass[NVDEV_ENGINE_COPY2 ] = &nve0_copy2_oclass; 153 device->oclass[NVDEV_ENGINE_COPY2 ] = &nve0_copy2_oclass;
@@ -169,7 +169,7 @@ nve0_identify(struct nouveau_device *device)
169 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; 169 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
170 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 170 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
171 device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass; 171 device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass;
172 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; 172 device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass;
173 device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass; 173 device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass;
174 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; 174 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
175 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; 175 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
@@ -180,7 +180,7 @@ nve0_identify(struct nouveau_device *device)
180 device->oclass[NVDEV_ENGINE_FIFO ] = nve0_fifo_oclass; 180 device->oclass[NVDEV_ENGINE_FIFO ] = nve0_fifo_oclass;
181 device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass; 181 device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
182 device->oclass[NVDEV_ENGINE_GR ] = nvf0_graph_oclass; 182 device->oclass[NVDEV_ENGINE_GR ] = nvf0_graph_oclass;
183 device->oclass[NVDEV_ENGINE_DISP ] = &nvf0_disp_oclass; 183 device->oclass[NVDEV_ENGINE_DISP ] = nvf0_disp_oclass;
184 device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass; 184 device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass;
185 device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass; 185 device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass;
186 device->oclass[NVDEV_ENGINE_COPY2 ] = &nve0_copy2_oclass; 186 device->oclass[NVDEV_ENGINE_COPY2 ] = &nve0_copy2_oclass;
@@ -204,7 +204,7 @@ nve0_identify(struct nouveau_device *device)
204 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; 204 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
205 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 205 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
206 device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass; 206 device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass;
207 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; 207 device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass;
208 device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass; 208 device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass;
209 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; 209 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
210 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; 210 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
@@ -215,7 +215,7 @@ nve0_identify(struct nouveau_device *device)
215 device->oclass[NVDEV_ENGINE_FIFO ] = nv108_fifo_oclass; 215 device->oclass[NVDEV_ENGINE_FIFO ] = nv108_fifo_oclass;
216 device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass; 216 device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
217 device->oclass[NVDEV_ENGINE_GR ] = nv108_graph_oclass; 217 device->oclass[NVDEV_ENGINE_GR ] = nv108_graph_oclass;
218 device->oclass[NVDEV_ENGINE_DISP ] = &nvf0_disp_oclass; 218 device->oclass[NVDEV_ENGINE_DISP ] = nvf0_disp_oclass;
219 device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass; 219 device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass;
220 device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass; 220 device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass;
221 device->oclass[NVDEV_ENGINE_COPY2 ] = &nve0_copy2_oclass; 221 device->oclass[NVDEV_ENGINE_COPY2 ] = &nve0_copy2_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/dport.c b/drivers/gpu/drm/nouveau/core/engine/disp/dport.c
index 1bd4c63369c1..3ca2d25b7f5e 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/dport.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/dport.c
@@ -273,7 +273,7 @@ nouveau_dp_train(struct nouveau_disp *disp, const struct nouveau_dp_func *func,
273 .outp = outp, 273 .outp = outp,
274 .head = head, 274 .head = head,
275 }, *dp = &_dp; 275 }, *dp = &_dp;
276 const u32 bw_list[] = { 270000, 162000, 0 }; 276 const u32 bw_list[] = { 540000, 270000, 162000, 0 };
277 const u32 *link_bw = bw_list; 277 const u32 *link_bw = bw_list;
278 u8 hdr, cnt, len; 278 u8 hdr, cnt, len;
279 u32 data; 279 u32 data;
@@ -312,6 +312,14 @@ nouveau_dp_train(struct nouveau_disp *disp, const struct nouveau_dp_func *func,
312 ERR("failed to read DPCD\n"); 312 ERR("failed to read DPCD\n");
313 } 313 }
314 314
315 /* bring capabilities within encoder limits */
316 if ((dp->dpcd[2] & 0x1f) > dp->outp->dpconf.link_nr) {
317 dp->dpcd[2] &= ~0x1f;
318 dp->dpcd[2] |= dp->outp->dpconf.link_nr;
319 }
320 if (dp->dpcd[1] > dp->outp->dpconf.link_bw)
321 dp->dpcd[1] = dp->outp->dpconf.link_bw;
322
315 /* adjust required bandwidth for 8B/10B coding overhead */ 323 /* adjust required bandwidth for 8B/10B coding overhead */
316 datarate = (datarate / 8) * 10; 324 datarate = (datarate / 8) * 10;
317 325
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/gm107.c b/drivers/gpu/drm/nouveau/core/engine/disp/gm107.c
new file mode 100644
index 000000000000..cf6f59677b74
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/gm107.c
@@ -0,0 +1,101 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <engine/software.h>
26#include <engine/disp.h>
27
28#include <core/class.h>
29
30#include "nv50.h"
31
32/*******************************************************************************
33 * Base display object
34 ******************************************************************************/
35
36static struct nouveau_oclass
37gm107_disp_sclass[] = {
38 { GM107_DISP_MAST_CLASS, &nvd0_disp_mast_ofuncs },
39 { GM107_DISP_SYNC_CLASS, &nvd0_disp_sync_ofuncs },
40 { GM107_DISP_OVLY_CLASS, &nvd0_disp_ovly_ofuncs },
41 { GM107_DISP_OIMM_CLASS, &nvd0_disp_oimm_ofuncs },
42 { GM107_DISP_CURS_CLASS, &nvd0_disp_curs_ofuncs },
43 {}
44};
45
46static struct nouveau_oclass
47gm107_disp_base_oclass[] = {
48 { GM107_DISP_CLASS, &nvd0_disp_base_ofuncs, nvd0_disp_base_omthds },
49 {}
50};
51
52/*******************************************************************************
53 * Display engine implementation
54 ******************************************************************************/
55
56static int
57gm107_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
58 struct nouveau_oclass *oclass, void *data, u32 size,
59 struct nouveau_object **pobject)
60{
61 struct nv50_disp_priv *priv;
62 int heads = nv_rd32(parent, 0x022448);
63 int ret;
64
65 ret = nouveau_disp_create(parent, engine, oclass, heads,
66 "PDISP", "display", &priv);
67 *pobject = nv_object(priv);
68 if (ret)
69 return ret;
70
71 nv_engine(priv)->sclass = gm107_disp_base_oclass;
72 nv_engine(priv)->cclass = &nv50_disp_cclass;
73 nv_subdev(priv)->intr = nvd0_disp_intr;
74 INIT_WORK(&priv->supervisor, nvd0_disp_intr_supervisor);
75 priv->sclass = gm107_disp_sclass;
76 priv->head.nr = heads;
77 priv->dac.nr = 3;
78 priv->sor.nr = 4;
79 priv->dac.power = nv50_dac_power;
80 priv->dac.sense = nv50_dac_sense;
81 priv->sor.power = nv50_sor_power;
82 priv->sor.hda_eld = nvd0_hda_eld;
83 priv->sor.hdmi = nvd0_hdmi_ctrl;
84 priv->sor.dp = &nvd0_sor_dp_func;
85 return 0;
86}
87
88struct nouveau_oclass *
89gm107_disp_oclass = &(struct nv50_disp_impl) {
90 .base.base.handle = NV_ENGINE(DISP, 0x07),
91 .base.base.ofuncs = &(struct nouveau_ofuncs) {
92 .ctor = gm107_disp_ctor,
93 .dtor = _nouveau_disp_dtor,
94 .init = _nouveau_disp_init,
95 .fini = _nouveau_disp_fini,
96 },
97 .mthd.core = &nve0_disp_mast_mthd_chan,
98 .mthd.base = &nvd0_disp_sync_mthd_chan,
99 .mthd.ovly = &nve0_disp_ovly_mthd_chan,
100 .mthd.prev = -0x020000,
101}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c
index 7cf8b1348632..6c89af792889 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c
@@ -22,7 +22,7 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <engine/disp.h> 25#include "priv.h"
26 26
27#include <core/event.h> 27#include <core/event.h>
28#include <core/class.h> 28#include <core/class.h>
@@ -138,13 +138,13 @@ nv04_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
138 return 0; 138 return 0;
139} 139}
140 140
141struct nouveau_oclass 141struct nouveau_oclass *
142nv04_disp_oclass = { 142nv04_disp_oclass = &(struct nouveau_disp_impl) {
143 .handle = NV_ENGINE(DISP, 0x04), 143 .base.handle = NV_ENGINE(DISP, 0x04),
144 .ofuncs = &(struct nouveau_ofuncs) { 144 .base.ofuncs = &(struct nouveau_ofuncs) {
145 .ctor = nv04_disp_ctor, 145 .ctor = nv04_disp_ctor,
146 .dtor = _nouveau_disp_dtor, 146 .dtor = _nouveau_disp_dtor,
147 .init = _nouveau_disp_init, 147 .init = _nouveau_disp_init,
148 .fini = _nouveau_disp_fini, 148 .fini = _nouveau_disp_fini,
149 }, 149 },
150}; 150}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
index 9ad722e4e087..9a0cab9c3adb 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
@@ -26,8 +26,7 @@
26#include <core/parent.h> 26#include <core/parent.h>
27#include <core/handle.h> 27#include <core/handle.h>
28#include <core/class.h> 28#include <core/class.h>
29 29#include <core/enum.h>
30#include <engine/disp.h>
31 30
32#include <subdev/bios.h> 31#include <subdev/bios.h>
33#include <subdev/bios/dcb.h> 32#include <subdev/bios/dcb.h>
@@ -227,6 +226,177 @@ nv50_disp_dmac_fini(struct nouveau_object *object, bool suspend)
227 * EVO master channel object 226 * EVO master channel object
228 ******************************************************************************/ 227 ******************************************************************************/
229 228
229static void
230nv50_disp_mthd_list(struct nv50_disp_priv *priv, int debug, u32 base, int c,
231 const struct nv50_disp_mthd_list *list, int inst)
232{
233 struct nouveau_object *disp = nv_object(priv);
234 int i;
235
236 for (i = 0; list->data[i].mthd; i++) {
237 if (list->data[i].addr) {
238 u32 next = nv_rd32(priv, list->data[i].addr + base + 0);
239 u32 prev = nv_rd32(priv, list->data[i].addr + base + c);
240 u32 mthd = list->data[i].mthd + (list->mthd * inst);
241 const char *name = list->data[i].name;
242 char mods[16];
243
244 if (prev != next)
245 snprintf(mods, sizeof(mods), "-> 0x%08x", next);
246 else
247 snprintf(mods, sizeof(mods), "%13c", ' ');
248
249 nv_printk_(disp, debug, "\t0x%04x: 0x%08x %s%s%s\n",
250 mthd, prev, mods, name ? " // " : "",
251 name ? name : "");
252 }
253 }
254}
255
256void
257nv50_disp_mthd_chan(struct nv50_disp_priv *priv, int debug, int head,
258 const struct nv50_disp_mthd_chan *chan)
259{
260 struct nouveau_object *disp = nv_object(priv);
261 const struct nv50_disp_impl *impl = (void *)disp->oclass;
262 const struct nv50_disp_mthd_list *list;
263 int i, j;
264
265 if (debug > nv_subdev(priv)->debug)
266 return;
267
268 for (i = 0; (list = chan->data[i].mthd) != NULL; i++) {
269 u32 base = head * chan->addr;
270 for (j = 0; j < chan->data[i].nr; j++, base += list->addr) {
271 const char *cname = chan->name;
272 const char *sname = "";
273 char cname_[16], sname_[16];
274
275 if (chan->addr) {
276 snprintf(cname_, sizeof(cname_), "%s %d",
277 chan->name, head);
278 cname = cname_;
279 }
280
281 if (chan->data[i].nr > 1) {
282 snprintf(sname_, sizeof(sname_), " - %s %d",
283 chan->data[i].name, j);
284 sname = sname_;
285 }
286
287 nv_printk_(disp, debug, "%s%s:\n", cname, sname);
288 nv50_disp_mthd_list(priv, debug, base, impl->mthd.prev,
289 list, j);
290 }
291 }
292}
293
294const struct nv50_disp_mthd_list
295nv50_disp_mast_mthd_base = {
296 .mthd = 0x0000,
297 .addr = 0x000000,
298 .data = {
299 { 0x0080, 0x000000 },
300 { 0x0084, 0x610bb8 },
301 { 0x0088, 0x610b9c },
302 { 0x008c, 0x000000 },
303 {}
304 }
305};
306
307static const struct nv50_disp_mthd_list
308nv50_disp_mast_mthd_dac = {
309 .mthd = 0x0080,
310 .addr = 0x000008,
311 .data = {
312 { 0x0400, 0x610b58 },
313 { 0x0404, 0x610bdc },
314 { 0x0420, 0x610828 },
315 {}
316 }
317};
318
319const struct nv50_disp_mthd_list
320nv50_disp_mast_mthd_sor = {
321 .mthd = 0x0040,
322 .addr = 0x000008,
323 .data = {
324 { 0x0600, 0x610b70 },
325 {}
326 }
327};
328
329const struct nv50_disp_mthd_list
330nv50_disp_mast_mthd_pior = {
331 .mthd = 0x0040,
332 .addr = 0x000008,
333 .data = {
334 { 0x0700, 0x610b80 },
335 {}
336 }
337};
338
339static const struct nv50_disp_mthd_list
340nv50_disp_mast_mthd_head = {
341 .mthd = 0x0400,
342 .addr = 0x000540,
343 .data = {
344 { 0x0800, 0x610ad8 },
345 { 0x0804, 0x610ad0 },
346 { 0x0808, 0x610a48 },
347 { 0x080c, 0x610a78 },
348 { 0x0810, 0x610ac0 },
349 { 0x0814, 0x610af8 },
350 { 0x0818, 0x610b00 },
351 { 0x081c, 0x610ae8 },
352 { 0x0820, 0x610af0 },
353 { 0x0824, 0x610b08 },
354 { 0x0828, 0x610b10 },
355 { 0x082c, 0x610a68 },
356 { 0x0830, 0x610a60 },
357 { 0x0834, 0x000000 },
358 { 0x0838, 0x610a40 },
359 { 0x0840, 0x610a24 },
360 { 0x0844, 0x610a2c },
361 { 0x0848, 0x610aa8 },
362 { 0x084c, 0x610ab0 },
363 { 0x0860, 0x610a84 },
364 { 0x0864, 0x610a90 },
365 { 0x0868, 0x610b18 },
366 { 0x086c, 0x610b20 },
367 { 0x0870, 0x610ac8 },
368 { 0x0874, 0x610a38 },
369 { 0x0880, 0x610a58 },
370 { 0x0884, 0x610a9c },
371 { 0x08a0, 0x610a70 },
372 { 0x08a4, 0x610a50 },
373 { 0x08a8, 0x610ae0 },
374 { 0x08c0, 0x610b28 },
375 { 0x08c4, 0x610b30 },
376 { 0x08c8, 0x610b40 },
377 { 0x08d4, 0x610b38 },
378 { 0x08d8, 0x610b48 },
379 { 0x08dc, 0x610b50 },
380 { 0x0900, 0x610a18 },
381 { 0x0904, 0x610ab8 },
382 {}
383 }
384};
385
386static const struct nv50_disp_mthd_chan
387nv50_disp_mast_mthd_chan = {
388 .name = "Core",
389 .addr = 0x000000,
390 .data = {
391 { "Global", 1, &nv50_disp_mast_mthd_base },
392 { "DAC", 3, &nv50_disp_mast_mthd_dac },
393 { "SOR", 2, &nv50_disp_mast_mthd_sor },
394 { "PIOR", 3, &nv50_disp_mast_mthd_pior },
395 { "HEAD", 2, &nv50_disp_mast_mthd_head },
396 {}
397 }
398};
399
230static int 400static int
231nv50_disp_mast_ctor(struct nouveau_object *parent, 401nv50_disp_mast_ctor(struct nouveau_object *parent,
232 struct nouveau_object *engine, 402 struct nouveau_object *engine,
@@ -323,6 +493,56 @@ nv50_disp_mast_ofuncs = {
323 * EVO sync channel objects 493 * EVO sync channel objects
324 ******************************************************************************/ 494 ******************************************************************************/
325 495
496static const struct nv50_disp_mthd_list
497nv50_disp_sync_mthd_base = {
498 .mthd = 0x0000,
499 .addr = 0x000000,
500 .data = {
501 { 0x0080, 0x000000 },
502 { 0x0084, 0x0008c4 },
503 { 0x0088, 0x0008d0 },
504 { 0x008c, 0x0008dc },
505 { 0x0090, 0x0008e4 },
506 { 0x0094, 0x610884 },
507 { 0x00a0, 0x6108a0 },
508 { 0x00a4, 0x610878 },
509 { 0x00c0, 0x61086c },
510 { 0x00e0, 0x610858 },
511 { 0x00e4, 0x610860 },
512 { 0x00e8, 0x6108ac },
513 { 0x00ec, 0x6108b4 },
514 { 0x0100, 0x610894 },
515 { 0x0110, 0x6108bc },
516 { 0x0114, 0x61088c },
517 {}
518 }
519};
520
521const struct nv50_disp_mthd_list
522nv50_disp_sync_mthd_image = {
523 .mthd = 0x0400,
524 .addr = 0x000000,
525 .data = {
526 { 0x0800, 0x6108f0 },
527 { 0x0804, 0x6108fc },
528 { 0x0808, 0x61090c },
529 { 0x080c, 0x610914 },
530 { 0x0810, 0x610904 },
531 {}
532 }
533};
534
535static const struct nv50_disp_mthd_chan
536nv50_disp_sync_mthd_chan = {
537 .name = "Base",
538 .addr = 0x000540,
539 .data = {
540 { "Global", 1, &nv50_disp_sync_mthd_base },
541 { "Image", 2, &nv50_disp_sync_mthd_image },
542 {}
543 }
544};
545
326static int 546static int
327nv50_disp_sync_ctor(struct nouveau_object *parent, 547nv50_disp_sync_ctor(struct nouveau_object *parent,
328 struct nouveau_object *engine, 548 struct nouveau_object *engine,
@@ -362,6 +582,44 @@ nv50_disp_sync_ofuncs = {
362 * EVO overlay channel objects 582 * EVO overlay channel objects
363 ******************************************************************************/ 583 ******************************************************************************/
364 584
585const struct nv50_disp_mthd_list
586nv50_disp_ovly_mthd_base = {
587 .mthd = 0x0000,
588 .addr = 0x000000,
589 .data = {
590 { 0x0080, 0x000000 },
591 { 0x0084, 0x0009a0 },
592 { 0x0088, 0x0009c0 },
593 { 0x008c, 0x0009c8 },
594 { 0x0090, 0x6109b4 },
595 { 0x0094, 0x610970 },
596 { 0x00a0, 0x610998 },
597 { 0x00a4, 0x610964 },
598 { 0x00c0, 0x610958 },
599 { 0x00e0, 0x6109a8 },
600 { 0x00e4, 0x6109d0 },
601 { 0x00e8, 0x6109d8 },
602 { 0x0100, 0x61094c },
603 { 0x0104, 0x610984 },
604 { 0x0108, 0x61098c },
605 { 0x0800, 0x6109f8 },
606 { 0x0808, 0x610a08 },
607 { 0x080c, 0x610a10 },
608 { 0x0810, 0x610a00 },
609 {}
610 }
611};
612
613static const struct nv50_disp_mthd_chan
614nv50_disp_ovly_mthd_chan = {
615 .name = "Overlay",
616 .addr = 0x000540,
617 .data = {
618 { "Global", 1, &nv50_disp_ovly_mthd_base },
619 {}
620 }
621};
622
365static int 623static int
366nv50_disp_ovly_ctor(struct nouveau_object *parent, 624nv50_disp_ovly_ctor(struct nouveau_object *parent,
367 struct nouveau_object *engine, 625 struct nouveau_object *engine,
@@ -782,25 +1040,78 @@ nv50_disp_cclass = {
782 * Display engine implementation 1040 * Display engine implementation
783 ******************************************************************************/ 1041 ******************************************************************************/
784 1042
785static void 1043static const struct nouveau_enum
786nv50_disp_intr_error(struct nv50_disp_priv *priv) 1044nv50_disp_intr_error_type[] = {
787{ 1045 { 3, "ILLEGAL_MTHD" },
788 u32 channels = (nv_rd32(priv, 0x610020) & 0x001f0000) >> 16; 1046 { 4, "INVALID_VALUE" },
789 u32 addr, data; 1047 { 5, "INVALID_STATE" },
790 int chid; 1048 { 7, "INVALID_HANDLE" },
791 1049 {}
792 for (chid = 0; chid < 5; chid++) { 1050};
793 if (!(channels & (1 << chid)))
794 continue;
795 1051
796 nv_wr32(priv, 0x610020, 0x00010000 << chid); 1052static const struct nouveau_enum
797 addr = nv_rd32(priv, 0x610080 + (chid * 0x08)); 1053nv50_disp_intr_error_code[] = {
798 data = nv_rd32(priv, 0x610084 + (chid * 0x08)); 1054 { 0x00, "" },
799 nv_wr32(priv, 0x610080 + (chid * 0x08), 0x90000000); 1055 {}
1056};
800 1057
801 nv_error(priv, "chid %d mthd 0x%04x data 0x%08x 0x%08x\n", 1058static void
802 chid, addr & 0xffc, data, addr); 1059nv50_disp_intr_error(struct nv50_disp_priv *priv, int chid)
1060{
1061 struct nv50_disp_impl *impl = (void *)nv_object(priv)->oclass;
1062 u32 data = nv_rd32(priv, 0x610084 + (chid * 0x08));
1063 u32 addr = nv_rd32(priv, 0x610080 + (chid * 0x08));
1064 u32 code = (addr & 0x00ff0000) >> 16;
1065 u32 type = (addr & 0x00007000) >> 12;
1066 u32 mthd = (addr & 0x00000ffc);
1067 const struct nouveau_enum *ec, *et;
1068 char ecunk[6], etunk[6];
1069
1070 et = nouveau_enum_find(nv50_disp_intr_error_type, type);
1071 if (!et)
1072 snprintf(etunk, sizeof(etunk), "UNK%02X", type);
1073
1074 ec = nouveau_enum_find(nv50_disp_intr_error_code, code);
1075 if (!ec)
1076 snprintf(ecunk, sizeof(ecunk), "UNK%02X", code);
1077
1078 nv_error(priv, "%s [%s] chid %d mthd 0x%04x data 0x%08x\n",
1079 et ? et->name : etunk, ec ? ec->name : ecunk,
1080 chid, mthd, data);
1081
1082 if (chid == 0) {
1083 switch (mthd) {
1084 case 0x0080:
1085 nv50_disp_mthd_chan(priv, NV_DBG_ERROR, chid - 0,
1086 impl->mthd.core);
1087 break;
1088 default:
1089 break;
1090 }
1091 } else
1092 if (chid <= 2) {
1093 switch (mthd) {
1094 case 0x0080:
1095 nv50_disp_mthd_chan(priv, NV_DBG_ERROR, chid - 1,
1096 impl->mthd.base);
1097 break;
1098 default:
1099 break;
1100 }
1101 } else
1102 if (chid <= 4) {
1103 switch (mthd) {
1104 case 0x0080:
1105 nv50_disp_mthd_chan(priv, NV_DBG_ERROR, chid - 3,
1106 impl->mthd.ovly);
1107 break;
1108 default:
1109 break;
1110 }
803 } 1111 }
1112
1113 nv_wr32(priv, 0x610020, 0x00010000 << chid);
1114 nv_wr32(priv, 0x610080 + (chid * 0x08), 0x90000000);
804} 1115}
805 1116
806static u16 1117static u16
@@ -1241,12 +1552,14 @@ nv50_disp_intr_supervisor(struct work_struct *work)
1241{ 1552{
1242 struct nv50_disp_priv *priv = 1553 struct nv50_disp_priv *priv =
1243 container_of(work, struct nv50_disp_priv, supervisor); 1554 container_of(work, struct nv50_disp_priv, supervisor);
1555 struct nv50_disp_impl *impl = (void *)nv_object(priv)->oclass;
1244 u32 super = nv_rd32(priv, 0x610030); 1556 u32 super = nv_rd32(priv, 0x610030);
1245 int head; 1557 int head;
1246 1558
1247 nv_debug(priv, "supervisor 0x%08x 0x%08x\n", priv->super, super); 1559 nv_debug(priv, "supervisor 0x%08x 0x%08x\n", priv->super, super);
1248 1560
1249 if (priv->super & 0x00000010) { 1561 if (priv->super & 0x00000010) {
1562 nv50_disp_mthd_chan(priv, NV_DBG_DEBUG, 0, impl->mthd.core);
1250 for (head = 0; head < priv->head.nr; head++) { 1563 for (head = 0; head < priv->head.nr; head++) {
1251 if (!(super & (0x00000020 << head))) 1564 if (!(super & (0x00000020 << head)))
1252 continue; 1565 continue;
@@ -1290,9 +1603,10 @@ nv50_disp_intr(struct nouveau_subdev *subdev)
1290 u32 intr0 = nv_rd32(priv, 0x610020); 1603 u32 intr0 = nv_rd32(priv, 0x610020);
1291 u32 intr1 = nv_rd32(priv, 0x610024); 1604 u32 intr1 = nv_rd32(priv, 0x610024);
1292 1605
1293 if (intr0 & 0x001f0000) { 1606 while (intr0 & 0x001f0000) {
1294 nv50_disp_intr_error(priv); 1607 u32 chid = __ffs(intr0 & 0x001f0000) - 16;
1295 intr0 &= ~0x001f0000; 1608 nv50_disp_intr_error(priv, chid);
1609 intr0 &= ~(0x00010000 << chid);
1296 } 1610 }
1297 1611
1298 if (intr1 & 0x00000004) { 1612 if (intr1 & 0x00000004) {
@@ -1346,13 +1660,17 @@ nv50_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
1346 return 0; 1660 return 0;
1347} 1661}
1348 1662
1349struct nouveau_oclass 1663struct nouveau_oclass *
1350nv50_disp_oclass = { 1664nv50_disp_oclass = &(struct nv50_disp_impl) {
1351 .handle = NV_ENGINE(DISP, 0x50), 1665 .base.base.handle = NV_ENGINE(DISP, 0x50),
1352 .ofuncs = &(struct nouveau_ofuncs) { 1666 .base.base.ofuncs = &(struct nouveau_ofuncs) {
1353 .ctor = nv50_disp_ctor, 1667 .ctor = nv50_disp_ctor,
1354 .dtor = _nouveau_disp_dtor, 1668 .dtor = _nouveau_disp_dtor,
1355 .init = _nouveau_disp_init, 1669 .init = _nouveau_disp_init,
1356 .fini = _nouveau_disp_fini, 1670 .fini = _nouveau_disp_fini,
1357 }, 1671 },
1358}; 1672 .mthd.core = &nv50_disp_mast_mthd_chan,
1673 .mthd.base = &nv50_disp_sync_mthd_chan,
1674 .mthd.ovly = &nv50_disp_ovly_mthd_chan,
1675 .mthd.prev = 0x000004,
1676}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h
index d31d426ea1f6..48d59db47f0d 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h
@@ -8,9 +8,19 @@
8#include <core/event.h> 8#include <core/event.h>
9 9
10#include <engine/dmaobj.h> 10#include <engine/dmaobj.h>
11#include <engine/disp.h>
12 11
13#include "dport.h" 12#include "dport.h"
13#include "priv.h"
14
15struct nv50_disp_impl {
16 struct nouveau_disp_impl base;
17 struct {
18 const struct nv50_disp_mthd_chan *core;
19 const struct nv50_disp_mthd_chan *base;
20 const struct nv50_disp_mthd_chan *ovly;
21 int prev;
22 } mthd;
23};
14 24
15struct nv50_disp_priv { 25struct nv50_disp_priv {
16 struct nouveau_disp base; 26 struct nouveau_disp base;
@@ -124,21 +134,60 @@ struct nv50_disp_pioc {
124 struct nv50_disp_chan base; 134 struct nv50_disp_chan base;
125}; 135};
126 136
137struct nv50_disp_mthd_list {
138 u32 mthd;
139 u32 addr;
140 struct {
141 u32 mthd;
142 u32 addr;
143 const char *name;
144 } data[];
145};
146
147struct nv50_disp_mthd_chan {
148 const char *name;
149 u32 addr;
150 struct {
151 const char *name;
152 int nr;
153 const struct nv50_disp_mthd_list *mthd;
154 } data[];
155};
156
127extern struct nouveau_ofuncs nv50_disp_mast_ofuncs; 157extern struct nouveau_ofuncs nv50_disp_mast_ofuncs;
158extern const struct nv50_disp_mthd_list nv50_disp_mast_mthd_base;
159extern const struct nv50_disp_mthd_list nv50_disp_mast_mthd_sor;
160extern const struct nv50_disp_mthd_list nv50_disp_mast_mthd_pior;
128extern struct nouveau_ofuncs nv50_disp_sync_ofuncs; 161extern struct nouveau_ofuncs nv50_disp_sync_ofuncs;
162extern const struct nv50_disp_mthd_list nv50_disp_sync_mthd_image;
129extern struct nouveau_ofuncs nv50_disp_ovly_ofuncs; 163extern struct nouveau_ofuncs nv50_disp_ovly_ofuncs;
164extern const struct nv50_disp_mthd_list nv50_disp_ovly_mthd_base;
130extern struct nouveau_ofuncs nv50_disp_oimm_ofuncs; 165extern struct nouveau_ofuncs nv50_disp_oimm_ofuncs;
131extern struct nouveau_ofuncs nv50_disp_curs_ofuncs; 166extern struct nouveau_ofuncs nv50_disp_curs_ofuncs;
132extern struct nouveau_ofuncs nv50_disp_base_ofuncs; 167extern struct nouveau_ofuncs nv50_disp_base_ofuncs;
133extern struct nouveau_oclass nv50_disp_cclass; 168extern struct nouveau_oclass nv50_disp_cclass;
169void nv50_disp_mthd_chan(struct nv50_disp_priv *, int debug, int head,
170 const struct nv50_disp_mthd_chan *);
134void nv50_disp_intr_supervisor(struct work_struct *); 171void nv50_disp_intr_supervisor(struct work_struct *);
135void nv50_disp_intr(struct nouveau_subdev *); 172void nv50_disp_intr(struct nouveau_subdev *);
136 173
174extern const struct nv50_disp_mthd_chan nv84_disp_mast_mthd_chan;
175extern const struct nv50_disp_mthd_list nv84_disp_mast_mthd_dac;
176extern const struct nv50_disp_mthd_list nv84_disp_mast_mthd_head;
177extern const struct nv50_disp_mthd_chan nv84_disp_sync_mthd_chan;
178extern const struct nv50_disp_mthd_chan nv84_disp_ovly_mthd_chan;
137extern struct nouveau_omthds nv84_disp_base_omthds[]; 179extern struct nouveau_omthds nv84_disp_base_omthds[];
138 180
181extern const struct nv50_disp_mthd_chan nv94_disp_mast_mthd_chan;
182
139extern struct nouveau_ofuncs nvd0_disp_mast_ofuncs; 183extern struct nouveau_ofuncs nvd0_disp_mast_ofuncs;
184extern const struct nv50_disp_mthd_list nvd0_disp_mast_mthd_base;
185extern const struct nv50_disp_mthd_list nvd0_disp_mast_mthd_dac;
186extern const struct nv50_disp_mthd_list nvd0_disp_mast_mthd_sor;
187extern const struct nv50_disp_mthd_list nvd0_disp_mast_mthd_pior;
140extern struct nouveau_ofuncs nvd0_disp_sync_ofuncs; 188extern struct nouveau_ofuncs nvd0_disp_sync_ofuncs;
141extern struct nouveau_ofuncs nvd0_disp_ovly_ofuncs; 189extern struct nouveau_ofuncs nvd0_disp_ovly_ofuncs;
190extern const struct nv50_disp_mthd_chan nvd0_disp_sync_mthd_chan;
142extern struct nouveau_ofuncs nvd0_disp_oimm_ofuncs; 191extern struct nouveau_ofuncs nvd0_disp_oimm_ofuncs;
143extern struct nouveau_ofuncs nvd0_disp_curs_ofuncs; 192extern struct nouveau_ofuncs nvd0_disp_curs_ofuncs;
144extern struct nouveau_omthds nvd0_disp_base_omthds[]; 193extern struct nouveau_omthds nvd0_disp_base_omthds[];
@@ -147,4 +196,7 @@ extern struct nouveau_oclass nvd0_disp_cclass;
147void nvd0_disp_intr_supervisor(struct work_struct *); 196void nvd0_disp_intr_supervisor(struct work_struct *);
148void nvd0_disp_intr(struct nouveau_subdev *); 197void nvd0_disp_intr(struct nouveau_subdev *);
149 198
199extern const struct nv50_disp_mthd_chan nve0_disp_mast_mthd_chan;
200extern const struct nv50_disp_mthd_chan nve0_disp_ovly_mthd_chan;
201
150#endif 202#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c
index ef9ce300a496..98c5b19bc2b0 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c
@@ -29,6 +29,179 @@
29 29
30#include "nv50.h" 30#include "nv50.h"
31 31
32/*******************************************************************************
33 * EVO master channel object
34 ******************************************************************************/
35
36const struct nv50_disp_mthd_list
37nv84_disp_mast_mthd_dac = {
38 .mthd = 0x0080,
39 .addr = 0x000008,
40 .data = {
41 { 0x0400, 0x610b58 },
42 { 0x0404, 0x610bdc },
43 { 0x0420, 0x610bc4 },
44 {}
45 }
46};
47
48const struct nv50_disp_mthd_list
49nv84_disp_mast_mthd_head = {
50 .mthd = 0x0400,
51 .addr = 0x000540,
52 .data = {
53 { 0x0800, 0x610ad8 },
54 { 0x0804, 0x610ad0 },
55 { 0x0808, 0x610a48 },
56 { 0x080c, 0x610a78 },
57 { 0x0810, 0x610ac0 },
58 { 0x0814, 0x610af8 },
59 { 0x0818, 0x610b00 },
60 { 0x081c, 0x610ae8 },
61 { 0x0820, 0x610af0 },
62 { 0x0824, 0x610b08 },
63 { 0x0828, 0x610b10 },
64 { 0x082c, 0x610a68 },
65 { 0x0830, 0x610a60 },
66 { 0x0834, 0x000000 },
67 { 0x0838, 0x610a40 },
68 { 0x0840, 0x610a24 },
69 { 0x0844, 0x610a2c },
70 { 0x0848, 0x610aa8 },
71 { 0x084c, 0x610ab0 },
72 { 0x085c, 0x610c5c },
73 { 0x0860, 0x610a84 },
74 { 0x0864, 0x610a90 },
75 { 0x0868, 0x610b18 },
76 { 0x086c, 0x610b20 },
77 { 0x0870, 0x610ac8 },
78 { 0x0874, 0x610a38 },
79 { 0x0878, 0x610c50 },
80 { 0x0880, 0x610a58 },
81 { 0x0884, 0x610a9c },
82 { 0x089c, 0x610c68 },
83 { 0x08a0, 0x610a70 },
84 { 0x08a4, 0x610a50 },
85 { 0x08a8, 0x610ae0 },
86 { 0x08c0, 0x610b28 },
87 { 0x08c4, 0x610b30 },
88 { 0x08c8, 0x610b40 },
89 { 0x08d4, 0x610b38 },
90 { 0x08d8, 0x610b48 },
91 { 0x08dc, 0x610b50 },
92 { 0x0900, 0x610a18 },
93 { 0x0904, 0x610ab8 },
94 { 0x0910, 0x610c70 },
95 { 0x0914, 0x610c78 },
96 {}
97 }
98};
99
100const struct nv50_disp_mthd_chan
101nv84_disp_mast_mthd_chan = {
102 .name = "Core",
103 .addr = 0x000000,
104 .data = {
105 { "Global", 1, &nv50_disp_mast_mthd_base },
106 { "DAC", 3, &nv84_disp_mast_mthd_dac },
107 { "SOR", 2, &nv50_disp_mast_mthd_sor },
108 { "PIOR", 3, &nv50_disp_mast_mthd_pior },
109 { "HEAD", 2, &nv84_disp_mast_mthd_head },
110 {}
111 }
112};
113
114/*******************************************************************************
115 * EVO sync channel objects
116 ******************************************************************************/
117
118static const struct nv50_disp_mthd_list
119nv84_disp_sync_mthd_base = {
120 .mthd = 0x0000,
121 .addr = 0x000000,
122 .data = {
123 { 0x0080, 0x000000 },
124 { 0x0084, 0x0008c4 },
125 { 0x0088, 0x0008d0 },
126 { 0x008c, 0x0008dc },
127 { 0x0090, 0x0008e4 },
128 { 0x0094, 0x610884 },
129 { 0x00a0, 0x6108a0 },
130 { 0x00a4, 0x610878 },
131 { 0x00c0, 0x61086c },
132 { 0x00c4, 0x610800 },
133 { 0x00c8, 0x61080c },
134 { 0x00cc, 0x610818 },
135 { 0x00e0, 0x610858 },
136 { 0x00e4, 0x610860 },
137 { 0x00e8, 0x6108ac },
138 { 0x00ec, 0x6108b4 },
139 { 0x00fc, 0x610824 },
140 { 0x0100, 0x610894 },
141 { 0x0104, 0x61082c },
142 { 0x0110, 0x6108bc },
143 { 0x0114, 0x61088c },
144 {}
145 }
146};
147
148const struct nv50_disp_mthd_chan
149nv84_disp_sync_mthd_chan = {
150 .name = "Base",
151 .addr = 0x000540,
152 .data = {
153 { "Global", 1, &nv84_disp_sync_mthd_base },
154 { "Image", 2, &nv50_disp_sync_mthd_image },
155 {}
156 }
157};
158
159/*******************************************************************************
160 * EVO overlay channel objects
161 ******************************************************************************/
162
163static const struct nv50_disp_mthd_list
164nv84_disp_ovly_mthd_base = {
165 .mthd = 0x0000,
166 .addr = 0x000000,
167 .data = {
168 { 0x0080, 0x000000 },
169 { 0x0084, 0x6109a0 },
170 { 0x0088, 0x6109c0 },
171 { 0x008c, 0x6109c8 },
172 { 0x0090, 0x6109b4 },
173 { 0x0094, 0x610970 },
174 { 0x00a0, 0x610998 },
175 { 0x00a4, 0x610964 },
176 { 0x00c0, 0x610958 },
177 { 0x00e0, 0x6109a8 },
178 { 0x00e4, 0x6109d0 },
179 { 0x00e8, 0x6109d8 },
180 { 0x0100, 0x61094c },
181 { 0x0104, 0x610984 },
182 { 0x0108, 0x61098c },
183 { 0x0800, 0x6109f8 },
184 { 0x0808, 0x610a08 },
185 { 0x080c, 0x610a10 },
186 { 0x0810, 0x610a00 },
187 {}
188 }
189};
190
191const struct nv50_disp_mthd_chan
192nv84_disp_ovly_mthd_chan = {
193 .name = "Overlay",
194 .addr = 0x000540,
195 .data = {
196 { "Global", 1, &nv84_disp_ovly_mthd_base },
197 {}
198 }
199};
200
201/*******************************************************************************
202 * Base display object
203 ******************************************************************************/
204
32static struct nouveau_oclass 205static struct nouveau_oclass
33nv84_disp_sclass[] = { 206nv84_disp_sclass[] = {
34 { NV84_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs }, 207 { NV84_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs },
@@ -59,6 +232,10 @@ nv84_disp_base_oclass[] = {
59 {} 232 {}
60}; 233};
61 234
235/*******************************************************************************
236 * Display engine implementation
237 ******************************************************************************/
238
62static int 239static int
63nv84_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 240nv84_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
64 struct nouveau_oclass *oclass, void *data, u32 size, 241 struct nouveau_oclass *oclass, void *data, u32 size,
@@ -91,13 +268,17 @@ nv84_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
91 return 0; 268 return 0;
92} 269}
93 270
94struct nouveau_oclass 271struct nouveau_oclass *
95nv84_disp_oclass = { 272nv84_disp_oclass = &(struct nv50_disp_impl) {
96 .handle = NV_ENGINE(DISP, 0x82), 273 .base.base.handle = NV_ENGINE(DISP, 0x82),
97 .ofuncs = &(struct nouveau_ofuncs) { 274 .base.base.ofuncs = &(struct nouveau_ofuncs) {
98 .ctor = nv84_disp_ctor, 275 .ctor = nv84_disp_ctor,
99 .dtor = _nouveau_disp_dtor, 276 .dtor = _nouveau_disp_dtor,
100 .init = _nouveau_disp_init, 277 .init = _nouveau_disp_init,
101 .fini = _nouveau_disp_fini, 278 .fini = _nouveau_disp_fini,
102 }, 279 },
103}; 280 .mthd.core = &nv84_disp_mast_mthd_chan,
281 .mthd.base = &nv84_disp_sync_mthd_chan,
282 .mthd.ovly = &nv84_disp_ovly_mthd_chan,
283 .mthd.prev = 0x000004,
284}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c
index a518543c00ab..6844061c7e04 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c
@@ -29,6 +29,38 @@
29 29
30#include "nv50.h" 30#include "nv50.h"
31 31
32/*******************************************************************************
33 * EVO master channel object
34 ******************************************************************************/
35
36const struct nv50_disp_mthd_list
37nv94_disp_mast_mthd_sor = {
38 .mthd = 0x0040,
39 .addr = 0x000008,
40 .data = {
41 { 0x0600, 0x610794 },
42 {}
43 }
44};
45
46const struct nv50_disp_mthd_chan
47nv94_disp_mast_mthd_chan = {
48 .name = "Core",
49 .addr = 0x000000,
50 .data = {
51 { "Global", 1, &nv50_disp_mast_mthd_base },
52 { "DAC", 3, &nv84_disp_mast_mthd_dac },
53 { "SOR", 4, &nv94_disp_mast_mthd_sor },
54 { "PIOR", 3, &nv50_disp_mast_mthd_pior },
55 { "HEAD", 2, &nv84_disp_mast_mthd_head },
56 {}
57 }
58};
59
60/*******************************************************************************
61 * Base display object
62 ******************************************************************************/
63
32static struct nouveau_oclass 64static struct nouveau_oclass
33nv94_disp_sclass[] = { 65nv94_disp_sclass[] = {
34 { NV94_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs }, 66 { NV94_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs },
@@ -59,6 +91,10 @@ nv94_disp_base_oclass[] = {
59 {} 91 {}
60}; 92};
61 93
94/*******************************************************************************
95 * Display engine implementation
96 ******************************************************************************/
97
62static int 98static int
63nv94_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 99nv94_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
64 struct nouveau_oclass *oclass, void *data, u32 size, 100 struct nouveau_oclass *oclass, void *data, u32 size,
@@ -92,13 +128,17 @@ nv94_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
92 return 0; 128 return 0;
93} 129}
94 130
95struct nouveau_oclass 131struct nouveau_oclass *
96nv94_disp_oclass = { 132nv94_disp_oclass = &(struct nv50_disp_impl) {
97 .handle = NV_ENGINE(DISP, 0x88), 133 .base.base.handle = NV_ENGINE(DISP, 0x88),
98 .ofuncs = &(struct nouveau_ofuncs) { 134 .base.base.ofuncs = &(struct nouveau_ofuncs) {
99 .ctor = nv94_disp_ctor, 135 .ctor = nv94_disp_ctor,
100 .dtor = _nouveau_disp_dtor, 136 .dtor = _nouveau_disp_dtor,
101 .init = _nouveau_disp_init, 137 .init = _nouveau_disp_init,
102 .fini = _nouveau_disp_fini, 138 .fini = _nouveau_disp_fini,
103 }, 139 },
104}; 140 .mthd.core = &nv94_disp_mast_mthd_chan,
141 .mthd.base = &nv84_disp_sync_mthd_chan,
142 .mthd.ovly = &nv84_disp_ovly_mthd_chan,
143 .mthd.prev = 0x000004,
144}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c
index 6cf8eefac368..88c96241c02a 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c
@@ -29,6 +29,55 @@
29 29
30#include "nv50.h" 30#include "nv50.h"
31 31
32/*******************************************************************************
33 * EVO overlay channel objects
34 ******************************************************************************/
35
36static const struct nv50_disp_mthd_list
37nva0_disp_ovly_mthd_base = {
38 .mthd = 0x0000,
39 .addr = 0x000000,
40 .data = {
41 { 0x0080, 0x000000 },
42 { 0x0084, 0x6109a0 },
43 { 0x0088, 0x6109c0 },
44 { 0x008c, 0x6109c8 },
45 { 0x0090, 0x6109b4 },
46 { 0x0094, 0x610970 },
47 { 0x00a0, 0x610998 },
48 { 0x00a4, 0x610964 },
49 { 0x00b0, 0x610c98 },
50 { 0x00b4, 0x610ca4 },
51 { 0x00b8, 0x610cac },
52 { 0x00c0, 0x610958 },
53 { 0x00e0, 0x6109a8 },
54 { 0x00e4, 0x6109d0 },
55 { 0x00e8, 0x6109d8 },
56 { 0x0100, 0x61094c },
57 { 0x0104, 0x610984 },
58 { 0x0108, 0x61098c },
59 { 0x0800, 0x6109f8 },
60 { 0x0808, 0x610a08 },
61 { 0x080c, 0x610a10 },
62 { 0x0810, 0x610a00 },
63 {}
64 }
65};
66
67static const struct nv50_disp_mthd_chan
68nva0_disp_ovly_mthd_chan = {
69 .name = "Overlay",
70 .addr = 0x000540,
71 .data = {
72 { "Global", 1, &nva0_disp_ovly_mthd_base },
73 {}
74 }
75};
76
77/*******************************************************************************
78 * Base display object
79 ******************************************************************************/
80
32static struct nouveau_oclass 81static struct nouveau_oclass
33nva0_disp_sclass[] = { 82nva0_disp_sclass[] = {
34 { NVA0_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs }, 83 { NVA0_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs },
@@ -45,6 +94,10 @@ nva0_disp_base_oclass[] = {
45 {} 94 {}
46}; 95};
47 96
97/*******************************************************************************
98 * Display engine implementation
99 ******************************************************************************/
100
48static int 101static int
49nva0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 102nva0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
50 struct nouveau_oclass *oclass, void *data, u32 size, 103 struct nouveau_oclass *oclass, void *data, u32 size,
@@ -77,13 +130,17 @@ nva0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
77 return 0; 130 return 0;
78} 131}
79 132
80struct nouveau_oclass 133struct nouveau_oclass *
81nva0_disp_oclass = { 134nva0_disp_oclass = &(struct nv50_disp_impl) {
82 .handle = NV_ENGINE(DISP, 0x83), 135 .base.base.handle = NV_ENGINE(DISP, 0x83),
83 .ofuncs = &(struct nouveau_ofuncs) { 136 .base.base.ofuncs = &(struct nouveau_ofuncs) {
84 .ctor = nva0_disp_ctor, 137 .ctor = nva0_disp_ctor,
85 .dtor = _nouveau_disp_dtor, 138 .dtor = _nouveau_disp_dtor,
86 .init = _nouveau_disp_init, 139 .init = _nouveau_disp_init,
87 .fini = _nouveau_disp_fini, 140 .fini = _nouveau_disp_fini,
88 }, 141 },
89}; 142 .mthd.core = &nv84_disp_mast_mthd_chan,
143 .mthd.base = &nv84_disp_sync_mthd_chan,
144 .mthd.ovly = &nva0_disp_ovly_mthd_chan,
145 .mthd.prev = 0x000004,
146}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c b/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c
index 6ad6dcece43b..46cb2ce0e82a 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c
@@ -29,6 +29,10 @@
29 29
30#include "nv50.h" 30#include "nv50.h"
31 31
32/*******************************************************************************
33 * Base display object
34 ******************************************************************************/
35
32static struct nouveau_oclass 36static struct nouveau_oclass
33nva3_disp_sclass[] = { 37nva3_disp_sclass[] = {
34 { NVA3_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs }, 38 { NVA3_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs },
@@ -60,6 +64,10 @@ nva3_disp_base_oclass[] = {
60 {} 64 {}
61}; 65};
62 66
67/*******************************************************************************
68 * Display engine implementation
69 ******************************************************************************/
70
63static int 71static int
64nva3_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 72nva3_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
65 struct nouveau_oclass *oclass, void *data, u32 size, 73 struct nouveau_oclass *oclass, void *data, u32 size,
@@ -94,13 +102,17 @@ nva3_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
94 return 0; 102 return 0;
95} 103}
96 104
97struct nouveau_oclass 105struct nouveau_oclass *
98nva3_disp_oclass = { 106nva3_disp_oclass = &(struct nv50_disp_impl) {
99 .handle = NV_ENGINE(DISP, 0x85), 107 .base.base.handle = NV_ENGINE(DISP, 0x85),
100 .ofuncs = &(struct nouveau_ofuncs) { 108 .base.base.ofuncs = &(struct nouveau_ofuncs) {
101 .ctor = nva3_disp_ctor, 109 .ctor = nva3_disp_ctor,
102 .dtor = _nouveau_disp_dtor, 110 .dtor = _nouveau_disp_dtor,
103 .init = _nouveau_disp_init, 111 .init = _nouveau_disp_init,
104 .fini = _nouveau_disp_fini, 112 .fini = _nouveau_disp_fini,
105 }, 113 },
106}; 114 .mthd.core = &nv94_disp_mast_mthd_chan,
115 .mthd.base = &nv84_disp_sync_mthd_chan,
116 .mthd.ovly = &nv84_disp_ovly_mthd_chan,
117 .mthd.prev = 0x000004,
118}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
index 1c5e4e8b2c82..7762665ad8fd 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
@@ -124,6 +124,146 @@ nvd0_disp_dmac_fini(struct nouveau_object *object, bool suspend)
124 * EVO master channel object 124 * EVO master channel object
125 ******************************************************************************/ 125 ******************************************************************************/
126 126
127const struct nv50_disp_mthd_list
128nvd0_disp_mast_mthd_base = {
129 .mthd = 0x0000,
130 .addr = 0x000000,
131 .data = {
132 { 0x0080, 0x660080 },
133 { 0x0084, 0x660084 },
134 { 0x0088, 0x660088 },
135 { 0x008c, 0x000000 },
136 {}
137 }
138};
139
140const struct nv50_disp_mthd_list
141nvd0_disp_mast_mthd_dac = {
142 .mthd = 0x0020,
143 .addr = 0x000020,
144 .data = {
145 { 0x0180, 0x660180 },
146 { 0x0184, 0x660184 },
147 { 0x0188, 0x660188 },
148 { 0x0190, 0x660190 },
149 {}
150 }
151};
152
153const struct nv50_disp_mthd_list
154nvd0_disp_mast_mthd_sor = {
155 .mthd = 0x0020,
156 .addr = 0x000020,
157 .data = {
158 { 0x0200, 0x660200 },
159 { 0x0204, 0x660204 },
160 { 0x0208, 0x660208 },
161 { 0x0210, 0x660210 },
162 {}
163 }
164};
165
166const struct nv50_disp_mthd_list
167nvd0_disp_mast_mthd_pior = {
168 .mthd = 0x0020,
169 .addr = 0x000020,
170 .data = {
171 { 0x0300, 0x660300 },
172 { 0x0304, 0x660304 },
173 { 0x0308, 0x660308 },
174 { 0x0310, 0x660310 },
175 {}
176 }
177};
178
179static const struct nv50_disp_mthd_list
180nvd0_disp_mast_mthd_head = {
181 .mthd = 0x0300,
182 .addr = 0x000300,
183 .data = {
184 { 0x0400, 0x660400 },
185 { 0x0404, 0x660404 },
186 { 0x0408, 0x660408 },
187 { 0x040c, 0x66040c },
188 { 0x0410, 0x660410 },
189 { 0x0414, 0x660414 },
190 { 0x0418, 0x660418 },
191 { 0x041c, 0x66041c },
192 { 0x0420, 0x660420 },
193 { 0x0424, 0x660424 },
194 { 0x0428, 0x660428 },
195 { 0x042c, 0x66042c },
196 { 0x0430, 0x660430 },
197 { 0x0434, 0x660434 },
198 { 0x0438, 0x660438 },
199 { 0x0440, 0x660440 },
200 { 0x0444, 0x660444 },
201 { 0x0448, 0x660448 },
202 { 0x044c, 0x66044c },
203 { 0x0450, 0x660450 },
204 { 0x0454, 0x660454 },
205 { 0x0458, 0x660458 },
206 { 0x045c, 0x66045c },
207 { 0x0460, 0x660460 },
208 { 0x0468, 0x660468 },
209 { 0x046c, 0x66046c },
210 { 0x0470, 0x660470 },
211 { 0x0474, 0x660474 },
212 { 0x0480, 0x660480 },
213 { 0x0484, 0x660484 },
214 { 0x048c, 0x66048c },
215 { 0x0490, 0x660490 },
216 { 0x0494, 0x660494 },
217 { 0x0498, 0x660498 },
218 { 0x04b0, 0x6604b0 },
219 { 0x04b8, 0x6604b8 },
220 { 0x04bc, 0x6604bc },
221 { 0x04c0, 0x6604c0 },
222 { 0x04c4, 0x6604c4 },
223 { 0x04c8, 0x6604c8 },
224 { 0x04d0, 0x6604d0 },
225 { 0x04d4, 0x6604d4 },
226 { 0x04e0, 0x6604e0 },
227 { 0x04e4, 0x6604e4 },
228 { 0x04e8, 0x6604e8 },
229 { 0x04ec, 0x6604ec },
230 { 0x04f0, 0x6604f0 },
231 { 0x04f4, 0x6604f4 },
232 { 0x04f8, 0x6604f8 },
233 { 0x04fc, 0x6604fc },
234 { 0x0500, 0x660500 },
235 { 0x0504, 0x660504 },
236 { 0x0508, 0x660508 },
237 { 0x050c, 0x66050c },
238 { 0x0510, 0x660510 },
239 { 0x0514, 0x660514 },
240 { 0x0518, 0x660518 },
241 { 0x051c, 0x66051c },
242 { 0x052c, 0x66052c },
243 { 0x0530, 0x660530 },
244 { 0x054c, 0x66054c },
245 { 0x0550, 0x660550 },
246 { 0x0554, 0x660554 },
247 { 0x0558, 0x660558 },
248 { 0x055c, 0x66055c },
249 {}
250 }
251};
252
253static const struct nv50_disp_mthd_chan
254nvd0_disp_mast_mthd_chan = {
255 .name = "Core",
256 .addr = 0x000000,
257 .data = {
258 { "Global", 1, &nvd0_disp_mast_mthd_base },
259 { "DAC", 3, &nvd0_disp_mast_mthd_dac },
260 { "SOR", 8, &nvd0_disp_mast_mthd_sor },
261 { "PIOR", 4, &nvd0_disp_mast_mthd_pior },
262 { "HEAD", 4, &nvd0_disp_mast_mthd_head },
263 {}
264 }
265};
266
127static int 267static int
128nvd0_disp_mast_ctor(struct nouveau_object *parent, 268nvd0_disp_mast_ctor(struct nouveau_object *parent,
129 struct nouveau_object *engine, 269 struct nouveau_object *engine,
@@ -216,6 +356,81 @@ nvd0_disp_mast_ofuncs = {
216 * EVO sync channel objects 356 * EVO sync channel objects
217 ******************************************************************************/ 357 ******************************************************************************/
218 358
359static const struct nv50_disp_mthd_list
360nvd0_disp_sync_mthd_base = {
361 .mthd = 0x0000,
362 .addr = 0x000000,
363 .data = {
364 { 0x0080, 0x661080 },
365 { 0x0084, 0x661084 },
366 { 0x0088, 0x661088 },
367 { 0x008c, 0x66108c },
368 { 0x0090, 0x661090 },
369 { 0x0094, 0x661094 },
370 { 0x00a0, 0x6610a0 },
371 { 0x00a4, 0x6610a4 },
372 { 0x00c0, 0x6610c0 },
373 { 0x00c4, 0x6610c4 },
374 { 0x00c8, 0x6610c8 },
375 { 0x00cc, 0x6610cc },
376 { 0x00e0, 0x6610e0 },
377 { 0x00e4, 0x6610e4 },
378 { 0x00e8, 0x6610e8 },
379 { 0x00ec, 0x6610ec },
380 { 0x00fc, 0x6610fc },
381 { 0x0100, 0x661100 },
382 { 0x0104, 0x661104 },
383 { 0x0108, 0x661108 },
384 { 0x010c, 0x66110c },
385 { 0x0110, 0x661110 },
386 { 0x0114, 0x661114 },
387 { 0x0118, 0x661118 },
388 { 0x011c, 0x66111c },
389 { 0x0130, 0x661130 },
390 { 0x0134, 0x661134 },
391 { 0x0138, 0x661138 },
392 { 0x013c, 0x66113c },
393 { 0x0140, 0x661140 },
394 { 0x0144, 0x661144 },
395 { 0x0148, 0x661148 },
396 { 0x014c, 0x66114c },
397 { 0x0150, 0x661150 },
398 { 0x0154, 0x661154 },
399 { 0x0158, 0x661158 },
400 { 0x015c, 0x66115c },
401 { 0x0160, 0x661160 },
402 { 0x0164, 0x661164 },
403 { 0x0168, 0x661168 },
404 { 0x016c, 0x66116c },
405 {}
406 }
407};
408
409static const struct nv50_disp_mthd_list
410nvd0_disp_sync_mthd_image = {
411 .mthd = 0x0400,
412 .addr = 0x000400,
413 .data = {
414 { 0x0400, 0x661400 },
415 { 0x0404, 0x661404 },
416 { 0x0408, 0x661408 },
417 { 0x040c, 0x66140c },
418 { 0x0410, 0x661410 },
419 {}
420 }
421};
422
423const struct nv50_disp_mthd_chan
424nvd0_disp_sync_mthd_chan = {
425 .name = "Base",
426 .addr = 0x001000,
427 .data = {
428 { "Global", 1, &nvd0_disp_sync_mthd_base },
429 { "Image", 2, &nvd0_disp_sync_mthd_image },
430 {}
431 }
432};
433
219static int 434static int
220nvd0_disp_sync_ctor(struct nouveau_object *parent, 435nvd0_disp_sync_ctor(struct nouveau_object *parent,
221 struct nouveau_object *engine, 436 struct nouveau_object *engine,
@@ -256,6 +471,68 @@ nvd0_disp_sync_ofuncs = {
256 * EVO overlay channel objects 471 * EVO overlay channel objects
257 ******************************************************************************/ 472 ******************************************************************************/
258 473
474static const struct nv50_disp_mthd_list
475nvd0_disp_ovly_mthd_base = {
476 .mthd = 0x0000,
477 .data = {
478 { 0x0080, 0x665080 },
479 { 0x0084, 0x665084 },
480 { 0x0088, 0x665088 },
481 { 0x008c, 0x66508c },
482 { 0x0090, 0x665090 },
483 { 0x0094, 0x665094 },
484 { 0x00a0, 0x6650a0 },
485 { 0x00a4, 0x6650a4 },
486 { 0x00b0, 0x6650b0 },
487 { 0x00b4, 0x6650b4 },
488 { 0x00b8, 0x6650b8 },
489 { 0x00c0, 0x6650c0 },
490 { 0x00e0, 0x6650e0 },
491 { 0x00e4, 0x6650e4 },
492 { 0x00e8, 0x6650e8 },
493 { 0x0100, 0x665100 },
494 { 0x0104, 0x665104 },
495 { 0x0108, 0x665108 },
496 { 0x010c, 0x66510c },
497 { 0x0110, 0x665110 },
498 { 0x0118, 0x665118 },
499 { 0x011c, 0x66511c },
500 { 0x0120, 0x665120 },
501 { 0x0124, 0x665124 },
502 { 0x0130, 0x665130 },
503 { 0x0134, 0x665134 },
504 { 0x0138, 0x665138 },
505 { 0x013c, 0x66513c },
506 { 0x0140, 0x665140 },
507 { 0x0144, 0x665144 },
508 { 0x0148, 0x665148 },
509 { 0x014c, 0x66514c },
510 { 0x0150, 0x665150 },
511 { 0x0154, 0x665154 },
512 { 0x0158, 0x665158 },
513 { 0x015c, 0x66515c },
514 { 0x0160, 0x665160 },
515 { 0x0164, 0x665164 },
516 { 0x0168, 0x665168 },
517 { 0x016c, 0x66516c },
518 { 0x0400, 0x665400 },
519 { 0x0408, 0x665408 },
520 { 0x040c, 0x66540c },
521 { 0x0410, 0x665410 },
522 {}
523 }
524};
525
526static const struct nv50_disp_mthd_chan
527nvd0_disp_ovly_mthd_chan = {
528 .name = "Overlay",
529 .addr = 0x001000,
530 .data = {
531 { "Global", 1, &nvd0_disp_ovly_mthd_base },
532 {}
533 }
534};
535
259static int 536static int
260nvd0_disp_ovly_ctor(struct nouveau_object *parent, 537nvd0_disp_ovly_ctor(struct nouveau_object *parent,
261 struct nouveau_object *engine, 538 struct nouveau_object *engine,
@@ -897,19 +1174,22 @@ nvd0_disp_intr_supervisor(struct work_struct *work)
897{ 1174{
898 struct nv50_disp_priv *priv = 1175 struct nv50_disp_priv *priv =
899 container_of(work, struct nv50_disp_priv, supervisor); 1176 container_of(work, struct nv50_disp_priv, supervisor);
1177 struct nv50_disp_impl *impl = (void *)nv_object(priv)->oclass;
900 u32 mask[4]; 1178 u32 mask[4];
901 int head; 1179 int head;
902 1180
903 nv_debug(priv, "supervisor %08x\n", priv->super); 1181 nv_debug(priv, "supervisor %d\n", ffs(priv->super));
904 for (head = 0; head < priv->head.nr; head++) { 1182 for (head = 0; head < priv->head.nr; head++) {
905 mask[head] = nv_rd32(priv, 0x6101d4 + (head * 0x800)); 1183 mask[head] = nv_rd32(priv, 0x6101d4 + (head * 0x800));
906 nv_debug(priv, "head %d: 0x%08x\n", head, mask[head]); 1184 nv_debug(priv, "head %d: 0x%08x\n", head, mask[head]);
907 } 1185 }
908 1186
909 if (priv->super & 0x00000001) { 1187 if (priv->super & 0x00000001) {
1188 nv50_disp_mthd_chan(priv, NV_DBG_DEBUG, 0, impl->mthd.core);
910 for (head = 0; head < priv->head.nr; head++) { 1189 for (head = 0; head < priv->head.nr; head++) {
911 if (!(mask[head] & 0x00001000)) 1190 if (!(mask[head] & 0x00001000))
912 continue; 1191 continue;
1192 nv_debug(priv, "supervisor 1.0 - head %d\n", head);
913 nvd0_disp_intr_unk1_0(priv, head); 1193 nvd0_disp_intr_unk1_0(priv, head);
914 } 1194 }
915 } else 1195 } else
@@ -917,16 +1197,19 @@ nvd0_disp_intr_supervisor(struct work_struct *work)
917 for (head = 0; head < priv->head.nr; head++) { 1197 for (head = 0; head < priv->head.nr; head++) {
918 if (!(mask[head] & 0x00001000)) 1198 if (!(mask[head] & 0x00001000))
919 continue; 1199 continue;
1200 nv_debug(priv, "supervisor 2.0 - head %d\n", head);
920 nvd0_disp_intr_unk2_0(priv, head); 1201 nvd0_disp_intr_unk2_0(priv, head);
921 } 1202 }
922 for (head = 0; head < priv->head.nr; head++) { 1203 for (head = 0; head < priv->head.nr; head++) {
923 if (!(mask[head] & 0x00010000)) 1204 if (!(mask[head] & 0x00010000))
924 continue; 1205 continue;
1206 nv_debug(priv, "supervisor 2.1 - head %d\n", head);
925 nvd0_disp_intr_unk2_1(priv, head); 1207 nvd0_disp_intr_unk2_1(priv, head);
926 } 1208 }
927 for (head = 0; head < priv->head.nr; head++) { 1209 for (head = 0; head < priv->head.nr; head++) {
928 if (!(mask[head] & 0x00001000)) 1210 if (!(mask[head] & 0x00001000))
929 continue; 1211 continue;
1212 nv_debug(priv, "supervisor 2.2 - head %d\n", head);
930 nvd0_disp_intr_unk2_2(priv, head); 1213 nvd0_disp_intr_unk2_2(priv, head);
931 } 1214 }
932 } else 1215 } else
@@ -934,6 +1217,7 @@ nvd0_disp_intr_supervisor(struct work_struct *work)
934 for (head = 0; head < priv->head.nr; head++) { 1217 for (head = 0; head < priv->head.nr; head++) {
935 if (!(mask[head] & 0x00001000)) 1218 if (!(mask[head] & 0x00001000))
936 continue; 1219 continue;
1220 nv_debug(priv, "supervisor 3.0 - head %d\n", head);
937 nvd0_disp_intr_unk4_0(priv, head); 1221 nvd0_disp_intr_unk4_0(priv, head);
938 } 1222 }
939 } 1223 }
@@ -943,6 +1227,53 @@ nvd0_disp_intr_supervisor(struct work_struct *work)
943 nv_wr32(priv, 0x6101d0, 0x80000000); 1227 nv_wr32(priv, 0x6101d0, 0x80000000);
944} 1228}
945 1229
1230static void
1231nvd0_disp_intr_error(struct nv50_disp_priv *priv, int chid)
1232{
1233 const struct nv50_disp_impl *impl = (void *)nv_object(priv)->oclass;
1234 u32 mthd = nv_rd32(priv, 0x6101f0 + (chid * 12));
1235 u32 data = nv_rd32(priv, 0x6101f4 + (chid * 12));
1236 u32 unkn = nv_rd32(priv, 0x6101f8 + (chid * 12));
1237
1238 nv_error(priv, "chid %d mthd 0x%04x data 0x%08x "
1239 "0x%08x 0x%08x\n",
1240 chid, (mthd & 0x0000ffc), data, mthd, unkn);
1241
1242 if (chid == 0) {
1243 switch (mthd) {
1244 case 0x0080:
1245 nv50_disp_mthd_chan(priv, NV_DBG_ERROR, chid - 0,
1246 impl->mthd.core);
1247 break;
1248 default:
1249 break;
1250 }
1251 } else
1252 if (chid <= 4) {
1253 switch (mthd) {
1254 case 0x0080:
1255 nv50_disp_mthd_chan(priv, NV_DBG_ERROR, chid - 1,
1256 impl->mthd.base);
1257 break;
1258 default:
1259 break;
1260 }
1261 } else
1262 if (chid <= 8) {
1263 switch (mthd) {
1264 case 0x0080:
1265 nv50_disp_mthd_chan(priv, NV_DBG_ERROR, chid - 5,
1266 impl->mthd.ovly);
1267 break;
1268 default:
1269 break;
1270 }
1271 }
1272
1273 nv_wr32(priv, 0x61009c, (1 << chid));
1274 nv_wr32(priv, 0x6101f0 + (chid * 12), 0x90000000);
1275}
1276
946void 1277void
947nvd0_disp_intr(struct nouveau_subdev *subdev) 1278nvd0_disp_intr(struct nouveau_subdev *subdev)
948{ 1279{
@@ -959,18 +1290,8 @@ nvd0_disp_intr(struct nouveau_subdev *subdev)
959 if (intr & 0x00000002) { 1290 if (intr & 0x00000002) {
960 u32 stat = nv_rd32(priv, 0x61009c); 1291 u32 stat = nv_rd32(priv, 0x61009c);
961 int chid = ffs(stat) - 1; 1292 int chid = ffs(stat) - 1;
962 if (chid >= 0) { 1293 if (chid >= 0)
963 u32 mthd = nv_rd32(priv, 0x6101f0 + (chid * 12)); 1294 nvd0_disp_intr_error(priv, chid);
964 u32 data = nv_rd32(priv, 0x6101f4 + (chid * 12));
965 u32 unkn = nv_rd32(priv, 0x6101f8 + (chid * 12));
966
967 nv_error(priv, "chid %d mthd 0x%04x data 0x%08x "
968 "0x%08x 0x%08x\n",
969 chid, (mthd & 0x0000ffc), data, mthd, unkn);
970 nv_wr32(priv, 0x61009c, (1 << chid));
971 nv_wr32(priv, 0x6101f0 + (chid * 12), 0x90000000);
972 }
973
974 intr &= ~0x00000002; 1295 intr &= ~0x00000002;
975 } 1296 }
976 1297
@@ -1035,13 +1356,17 @@ nvd0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
1035 return 0; 1356 return 0;
1036} 1357}
1037 1358
1038struct nouveau_oclass 1359struct nouveau_oclass *
1039nvd0_disp_oclass = { 1360nvd0_disp_oclass = &(struct nv50_disp_impl) {
1040 .handle = NV_ENGINE(DISP, 0x90), 1361 .base.base.handle = NV_ENGINE(DISP, 0x90),
1041 .ofuncs = &(struct nouveau_ofuncs) { 1362 .base.base.ofuncs = &(struct nouveau_ofuncs) {
1042 .ctor = nvd0_disp_ctor, 1363 .ctor = nvd0_disp_ctor,
1043 .dtor = _nouveau_disp_dtor, 1364 .dtor = _nouveau_disp_dtor,
1044 .init = _nouveau_disp_init, 1365 .init = _nouveau_disp_init,
1045 .fini = _nouveau_disp_fini, 1366 .fini = _nouveau_disp_fini,
1046 }, 1367 },
1047}; 1368 .mthd.core = &nvd0_disp_mast_mthd_chan,
1369 .mthd.base = &nvd0_disp_sync_mthd_chan,
1370 .mthd.ovly = &nvd0_disp_ovly_mthd_chan,
1371 .mthd.prev = -0x020000,
1372}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c
index ab63f32c00b2..44e0b8f34c1a 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c
@@ -29,6 +29,175 @@
29 29
30#include "nv50.h" 30#include "nv50.h"
31 31
32/*******************************************************************************
33 * EVO master channel object
34 ******************************************************************************/
35
36static const struct nv50_disp_mthd_list
37nve0_disp_mast_mthd_head = {
38 .mthd = 0x0300,
39 .addr = 0x000300,
40 .data = {
41 { 0x0400, 0x660400 },
42 { 0x0404, 0x660404 },
43 { 0x0408, 0x660408 },
44 { 0x040c, 0x66040c },
45 { 0x0410, 0x660410 },
46 { 0x0414, 0x660414 },
47 { 0x0418, 0x660418 },
48 { 0x041c, 0x66041c },
49 { 0x0420, 0x660420 },
50 { 0x0424, 0x660424 },
51 { 0x0428, 0x660428 },
52 { 0x042c, 0x66042c },
53 { 0x0430, 0x660430 },
54 { 0x0434, 0x660434 },
55 { 0x0438, 0x660438 },
56 { 0x0440, 0x660440 },
57 { 0x0444, 0x660444 },
58 { 0x0448, 0x660448 },
59 { 0x044c, 0x66044c },
60 { 0x0450, 0x660450 },
61 { 0x0454, 0x660454 },
62 { 0x0458, 0x660458 },
63 { 0x045c, 0x66045c },
64 { 0x0460, 0x660460 },
65 { 0x0468, 0x660468 },
66 { 0x046c, 0x66046c },
67 { 0x0470, 0x660470 },
68 { 0x0474, 0x660474 },
69 { 0x047c, 0x66047c },
70 { 0x0480, 0x660480 },
71 { 0x0484, 0x660484 },
72 { 0x0488, 0x660488 },
73 { 0x048c, 0x66048c },
74 { 0x0490, 0x660490 },
75 { 0x0494, 0x660494 },
76 { 0x0498, 0x660498 },
77 { 0x04a0, 0x6604a0 },
78 { 0x04b0, 0x6604b0 },
79 { 0x04b8, 0x6604b8 },
80 { 0x04bc, 0x6604bc },
81 { 0x04c0, 0x6604c0 },
82 { 0x04c4, 0x6604c4 },
83 { 0x04c8, 0x6604c8 },
84 { 0x04d0, 0x6604d0 },
85 { 0x04d4, 0x6604d4 },
86 { 0x04e0, 0x6604e0 },
87 { 0x04e4, 0x6604e4 },
88 { 0x04e8, 0x6604e8 },
89 { 0x04ec, 0x6604ec },
90 { 0x04f0, 0x6604f0 },
91 { 0x04f4, 0x6604f4 },
92 { 0x04f8, 0x6604f8 },
93 { 0x04fc, 0x6604fc },
94 { 0x0500, 0x660500 },
95 { 0x0504, 0x660504 },
96 { 0x0508, 0x660508 },
97 { 0x050c, 0x66050c },
98 { 0x0510, 0x660510 },
99 { 0x0514, 0x660514 },
100 { 0x0518, 0x660518 },
101 { 0x051c, 0x66051c },
102 { 0x0520, 0x660520 },
103 { 0x0524, 0x660524 },
104 { 0x052c, 0x66052c },
105 { 0x0530, 0x660530 },
106 { 0x054c, 0x66054c },
107 { 0x0550, 0x660550 },
108 { 0x0554, 0x660554 },
109 { 0x0558, 0x660558 },
110 { 0x055c, 0x66055c },
111 {}
112 }
113};
114
115const struct nv50_disp_mthd_chan
116nve0_disp_mast_mthd_chan = {
117 .name = "Core",
118 .addr = 0x000000,
119 .data = {
120 { "Global", 1, &nvd0_disp_mast_mthd_base },
121 { "DAC", 3, &nvd0_disp_mast_mthd_dac },
122 { "SOR", 8, &nvd0_disp_mast_mthd_sor },
123 { "PIOR", 4, &nvd0_disp_mast_mthd_pior },
124 { "HEAD", 4, &nve0_disp_mast_mthd_head },
125 {}
126 }
127};
128
129/*******************************************************************************
130 * EVO overlay channel objects
131 ******************************************************************************/
132
133static const struct nv50_disp_mthd_list
134nve0_disp_ovly_mthd_base = {
135 .mthd = 0x0000,
136 .data = {
137 { 0x0080, 0x665080 },
138 { 0x0084, 0x665084 },
139 { 0x0088, 0x665088 },
140 { 0x008c, 0x66508c },
141 { 0x0090, 0x665090 },
142 { 0x0094, 0x665094 },
143 { 0x00a0, 0x6650a0 },
144 { 0x00a4, 0x6650a4 },
145 { 0x00b0, 0x6650b0 },
146 { 0x00b4, 0x6650b4 },
147 { 0x00b8, 0x6650b8 },
148 { 0x00c0, 0x6650c0 },
149 { 0x00c4, 0x6650c4 },
150 { 0x00e0, 0x6650e0 },
151 { 0x00e4, 0x6650e4 },
152 { 0x00e8, 0x6650e8 },
153 { 0x0100, 0x665100 },
154 { 0x0104, 0x665104 },
155 { 0x0108, 0x665108 },
156 { 0x010c, 0x66510c },
157 { 0x0110, 0x665110 },
158 { 0x0118, 0x665118 },
159 { 0x011c, 0x66511c },
160 { 0x0120, 0x665120 },
161 { 0x0124, 0x665124 },
162 { 0x0130, 0x665130 },
163 { 0x0134, 0x665134 },
164 { 0x0138, 0x665138 },
165 { 0x013c, 0x66513c },
166 { 0x0140, 0x665140 },
167 { 0x0144, 0x665144 },
168 { 0x0148, 0x665148 },
169 { 0x014c, 0x66514c },
170 { 0x0150, 0x665150 },
171 { 0x0154, 0x665154 },
172 { 0x0158, 0x665158 },
173 { 0x015c, 0x66515c },
174 { 0x0160, 0x665160 },
175 { 0x0164, 0x665164 },
176 { 0x0168, 0x665168 },
177 { 0x016c, 0x66516c },
178 { 0x0400, 0x665400 },
179 { 0x0404, 0x665404 },
180 { 0x0408, 0x665408 },
181 { 0x040c, 0x66540c },
182 { 0x0410, 0x665410 },
183 {}
184 }
185};
186
187const struct nv50_disp_mthd_chan
188nve0_disp_ovly_mthd_chan = {
189 .name = "Overlay",
190 .addr = 0x001000,
191 .data = {
192 { "Global", 1, &nve0_disp_ovly_mthd_base },
193 {}
194 }
195};
196
197/*******************************************************************************
198 * Base display object
199 ******************************************************************************/
200
32static struct nouveau_oclass 201static struct nouveau_oclass
33nve0_disp_sclass[] = { 202nve0_disp_sclass[] = {
34 { NVE0_DISP_MAST_CLASS, &nvd0_disp_mast_ofuncs }, 203 { NVE0_DISP_MAST_CLASS, &nvd0_disp_mast_ofuncs },
@@ -45,6 +214,10 @@ nve0_disp_base_oclass[] = {
45 {} 214 {}
46}; 215};
47 216
217/*******************************************************************************
218 * Display engine implementation
219 ******************************************************************************/
220
48static int 221static int
49nve0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 222nve0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
50 struct nouveau_oclass *oclass, void *data, u32 size, 223 struct nouveau_oclass *oclass, void *data, u32 size,
@@ -77,13 +250,17 @@ nve0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
77 return 0; 250 return 0;
78} 251}
79 252
80struct nouveau_oclass 253struct nouveau_oclass *
81nve0_disp_oclass = { 254nve0_disp_oclass = &(struct nv50_disp_impl) {
82 .handle = NV_ENGINE(DISP, 0x91), 255 .base.base.handle = NV_ENGINE(DISP, 0x91),
83 .ofuncs = &(struct nouveau_ofuncs) { 256 .base.base.ofuncs = &(struct nouveau_ofuncs) {
84 .ctor = nve0_disp_ctor, 257 .ctor = nve0_disp_ctor,
85 .dtor = _nouveau_disp_dtor, 258 .dtor = _nouveau_disp_dtor,
86 .init = _nouveau_disp_init, 259 .init = _nouveau_disp_init,
87 .fini = _nouveau_disp_fini, 260 .fini = _nouveau_disp_fini,
88 }, 261 },
89}; 262 .mthd.core = &nve0_disp_mast_mthd_chan,
263 .mthd.base = &nvd0_disp_sync_mthd_chan,
264 .mthd.ovly = &nve0_disp_ovly_mthd_chan,
265 .mthd.prev = -0x020000,
266}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c
index 05fee10e0c97..482585d375fa 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c
@@ -29,6 +29,10 @@
29 29
30#include "nv50.h" 30#include "nv50.h"
31 31
32/*******************************************************************************
33 * Base display object
34 ******************************************************************************/
35
32static struct nouveau_oclass 36static struct nouveau_oclass
33nvf0_disp_sclass[] = { 37nvf0_disp_sclass[] = {
34 { NVF0_DISP_MAST_CLASS, &nvd0_disp_mast_ofuncs }, 38 { NVF0_DISP_MAST_CLASS, &nvd0_disp_mast_ofuncs },
@@ -45,6 +49,10 @@ nvf0_disp_base_oclass[] = {
45 {} 49 {}
46}; 50};
47 51
52/*******************************************************************************
53 * Display engine implementation
54 ******************************************************************************/
55
48static int 56static int
49nvf0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 57nvf0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
50 struct nouveau_oclass *oclass, void *data, u32 size, 58 struct nouveau_oclass *oclass, void *data, u32 size,
@@ -77,13 +85,17 @@ nvf0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
77 return 0; 85 return 0;
78} 86}
79 87
80struct nouveau_oclass 88struct nouveau_oclass *
81nvf0_disp_oclass = { 89nvf0_disp_oclass = &(struct nv50_disp_impl) {
82 .handle = NV_ENGINE(DISP, 0x92), 90 .base.base.handle = NV_ENGINE(DISP, 0x92),
83 .ofuncs = &(struct nouveau_ofuncs) { 91 .base.base.ofuncs = &(struct nouveau_ofuncs) {
84 .ctor = nvf0_disp_ctor, 92 .ctor = nvf0_disp_ctor,
85 .dtor = _nouveau_disp_dtor, 93 .dtor = _nouveau_disp_dtor,
86 .init = _nouveau_disp_init, 94 .init = _nouveau_disp_init,
87 .fini = _nouveau_disp_fini, 95 .fini = _nouveau_disp_fini,
88 }, 96 },
89}; 97 .mthd.core = &nve0_disp_mast_mthd_chan,
98 .mthd.base = &nvd0_disp_sync_mthd_chan,
99 .mthd.ovly = &nve0_disp_ovly_mthd_chan,
100 .mthd.prev = -0x020000,
101}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/priv.h b/drivers/gpu/drm/nouveau/core/engine/disp/priv.h
new file mode 100644
index 000000000000..cc3c7a4ca747
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/priv.h
@@ -0,0 +1,10 @@
1#ifndef __NVKM_DISP_PRIV_H__
2#define __NVKM_DISP_PRIV_H__
3
4#include <engine/disp.h>
5
6struct nouveau_disp_impl {
7 struct nouveau_oclass base;
8};
9
10#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c
index 944e73ac485c..1cfb3bb90131 100644
--- a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c
@@ -53,6 +53,9 @@ nvd0_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
53 case NVF0_DISP_MAST_CLASS: 53 case NVF0_DISP_MAST_CLASS:
54 case NVF0_DISP_SYNC_CLASS: 54 case NVF0_DISP_SYNC_CLASS:
55 case NVF0_DISP_OVLY_CLASS: 55 case NVF0_DISP_OVLY_CLASS:
56 case GM107_DISP_MAST_CLASS:
57 case GM107_DISP_SYNC_CLASS:
58 case GM107_DISP_OVLY_CLASS:
56 break; 59 break;
57 default: 60 default:
58 return -EINVAL; 61 return -EINVAL;
diff --git a/drivers/gpu/drm/nouveau/core/engine/falcon.c b/drivers/gpu/drm/nouveau/core/engine/falcon.c
index 5e077e4ed7f6..2914646c8709 100644
--- a/drivers/gpu/drm/nouveau/core/engine/falcon.c
+++ b/drivers/gpu/drm/nouveau/core/engine/falcon.c
@@ -119,7 +119,7 @@ _nouveau_falcon_init(struct nouveau_object *object)
119 snprintf(name, sizeof(name), "nouveau/nv%02x_fuc%03x", 119 snprintf(name, sizeof(name), "nouveau/nv%02x_fuc%03x",
120 device->chipset, falcon->addr >> 12); 120 device->chipset, falcon->addr >> 12);
121 121
122 ret = request_firmware(&fw, name, &device->pdev->dev); 122 ret = request_firmware(&fw, name, nv_device_base(device));
123 if (ret == 0) { 123 if (ret == 0) {
124 falcon->code.data = vmemdup(fw->data, fw->size); 124 falcon->code.data = vmemdup(fw->data, fw->size);
125 falcon->code.size = fw->size; 125 falcon->code.size = fw->size;
@@ -138,7 +138,7 @@ _nouveau_falcon_init(struct nouveau_object *object)
138 snprintf(name, sizeof(name), "nouveau/nv%02x_fuc%03xd", 138 snprintf(name, sizeof(name), "nouveau/nv%02x_fuc%03xd",
139 device->chipset, falcon->addr >> 12); 139 device->chipset, falcon->addr >> 12);
140 140
141 ret = request_firmware(&fw, name, &device->pdev->dev); 141 ret = request_firmware(&fw, name, nv_device_base(device));
142 if (ret) { 142 if (ret) {
143 nv_error(falcon, "unable to load firmware data\n"); 143 nv_error(falcon, "unable to load firmware data\n");
144 return ret; 144 return ret;
@@ -153,7 +153,7 @@ _nouveau_falcon_init(struct nouveau_object *object)
153 snprintf(name, sizeof(name), "nouveau/nv%02x_fuc%03xc", 153 snprintf(name, sizeof(name), "nouveau/nv%02x_fuc%03xc",
154 device->chipset, falcon->addr >> 12); 154 device->chipset, falcon->addr >> 12);
155 155
156 ret = request_firmware(&fw, name, &device->pdev->dev); 156 ret = request_firmware(&fw, name, nv_device_base(device));
157 if (ret) { 157 if (ret) {
158 nv_error(falcon, "unable to load firmware code\n"); 158 nv_error(falcon, "unable to load firmware code\n");
159 return ret; 159 return ret;
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/base.c b/drivers/gpu/drm/nouveau/core/engine/fifo/base.c
index d3ec436d9cb5..6f9041ced9a2 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/base.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/base.c
@@ -86,7 +86,7 @@ nouveau_fifo_channel_create_(struct nouveau_object *parent,
86 } 86 }
87 87
88 /* map fifo control registers */ 88 /* map fifo control registers */
89 chan->user = ioremap(pci_resource_start(device->pdev, bar) + addr + 89 chan->user = ioremap(nv_device_resource_start(device, bar) + addr +
90 (chan->chid * size), size); 90 (chan->chid * size), size);
91 if (!chan->user) 91 if (!chan->user)
92 return -EFAULT; 92 return -EFAULT;
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
index b22a33f0702d..fa1e719872b7 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
@@ -41,8 +41,16 @@
41 41
42struct nvc0_fifo_priv { 42struct nvc0_fifo_priv {
43 struct nouveau_fifo base; 43 struct nouveau_fifo base;
44 struct nouveau_gpuobj *playlist[2]; 44
45 int cur_playlist; 45 struct work_struct fault;
46 u64 mask;
47
48 struct {
49 struct nouveau_gpuobj *mem[2];
50 int active;
51 wait_queue_head_t wait;
52 } runlist;
53
46 struct { 54 struct {
47 struct nouveau_gpuobj *mem; 55 struct nouveau_gpuobj *mem;
48 struct nouveau_vma bar; 56 struct nouveau_vma bar;
@@ -58,6 +66,11 @@ struct nvc0_fifo_base {
58 66
59struct nvc0_fifo_chan { 67struct nvc0_fifo_chan {
60 struct nouveau_fifo_chan base; 68 struct nouveau_fifo_chan base;
69 enum {
70 STOPPED,
71 RUNNING,
72 KILLED
73 } state;
61}; 74};
62 75
63/******************************************************************************* 76/*******************************************************************************
@@ -65,29 +78,33 @@ struct nvc0_fifo_chan {
65 ******************************************************************************/ 78 ******************************************************************************/
66 79
67static void 80static void
68nvc0_fifo_playlist_update(struct nvc0_fifo_priv *priv) 81nvc0_fifo_runlist_update(struct nvc0_fifo_priv *priv)
69{ 82{
70 struct nouveau_bar *bar = nouveau_bar(priv); 83 struct nouveau_bar *bar = nouveau_bar(priv);
71 struct nouveau_gpuobj *cur; 84 struct nouveau_gpuobj *cur;
72 int i, p; 85 int i, p;
73 86
74 mutex_lock(&nv_subdev(priv)->mutex); 87 mutex_lock(&nv_subdev(priv)->mutex);
75 cur = priv->playlist[priv->cur_playlist]; 88 cur = priv->runlist.mem[priv->runlist.active];
76 priv->cur_playlist = !priv->cur_playlist; 89 priv->runlist.active = !priv->runlist.active;
77 90
78 for (i = 0, p = 0; i < 128; i++) { 91 for (i = 0, p = 0; i < 128; i++) {
79 if (!(nv_rd32(priv, 0x003004 + (i * 8)) & 1)) 92 struct nvc0_fifo_chan *chan = (void *)priv->base.channel[i];
80 continue; 93 if (chan && chan->state == RUNNING) {
81 nv_wo32(cur, p + 0, i); 94 nv_wo32(cur, p + 0, i);
82 nv_wo32(cur, p + 4, 0x00000004); 95 nv_wo32(cur, p + 4, 0x00000004);
83 p += 8; 96 p += 8;
97 }
84 } 98 }
85 bar->flush(bar); 99 bar->flush(bar);
86 100
87 nv_wr32(priv, 0x002270, cur->addr >> 12); 101 nv_wr32(priv, 0x002270, cur->addr >> 12);
88 nv_wr32(priv, 0x002274, 0x01f00000 | (p >> 3)); 102 nv_wr32(priv, 0x002274, 0x01f00000 | (p >> 3));
89 if (!nv_wait(priv, 0x00227c, 0x00100000, 0x00000000)) 103
90 nv_error(priv, "playlist update failed\n"); 104 if (wait_event_timeout(priv->runlist.wait,
105 !(nv_rd32(priv, 0x00227c) & 0x00100000),
106 msecs_to_jiffies(2000)) == 0)
107 nv_error(priv, "runlist update timeout\n");
91 mutex_unlock(&nv_subdev(priv)->mutex); 108 mutex_unlock(&nv_subdev(priv)->mutex);
92} 109}
93 110
@@ -239,30 +256,32 @@ nvc0_fifo_chan_init(struct nouveau_object *object)
239 return ret; 256 return ret;
240 257
241 nv_wr32(priv, 0x003000 + (chid * 8), 0xc0000000 | base->addr >> 12); 258 nv_wr32(priv, 0x003000 + (chid * 8), 0xc0000000 | base->addr >> 12);
242 nv_wr32(priv, 0x003004 + (chid * 8), 0x001f0001); 259
243 nvc0_fifo_playlist_update(priv); 260 if (chan->state == STOPPED && (chan->state = RUNNING) == RUNNING) {
261 nv_wr32(priv, 0x003004 + (chid * 8), 0x001f0001);
262 nvc0_fifo_runlist_update(priv);
263 }
264
244 return 0; 265 return 0;
245} 266}
246 267
268static void nvc0_fifo_intr_engine(struct nvc0_fifo_priv *priv);
269
247static int 270static int
248nvc0_fifo_chan_fini(struct nouveau_object *object, bool suspend) 271nvc0_fifo_chan_fini(struct nouveau_object *object, bool suspend)
249{ 272{
250 struct nvc0_fifo_priv *priv = (void *)object->engine; 273 struct nvc0_fifo_priv *priv = (void *)object->engine;
251 struct nvc0_fifo_chan *chan = (void *)object; 274 struct nvc0_fifo_chan *chan = (void *)object;
252 u32 chid = chan->base.chid; 275 u32 chid = chan->base.chid;
253 u32 mask, engine;
254 276
255 nv_mask(priv, 0x003004 + (chid * 8), 0x00000001, 0x00000000); 277 if (chan->state == RUNNING && (chan->state = STOPPED) == STOPPED) {
256 nvc0_fifo_playlist_update(priv); 278 nv_mask(priv, 0x003004 + (chid * 8), 0x00000001, 0x00000000);
257 mask = nv_rd32(priv, 0x0025a4); 279 nvc0_fifo_runlist_update(priv);
258 for (engine = 0; mask && engine < 16; engine++) {
259 if (!(mask & (1 << engine)))
260 continue;
261 nv_mask(priv, 0x0025a8 + (engine * 4), 0x00000000, 0x00000000);
262 mask &= ~(1 << engine);
263 } 280 }
264 nv_wr32(priv, 0x003000 + (chid * 8), 0x00000000);
265 281
282 nvc0_fifo_intr_engine(priv);
283
284 nv_wr32(priv, 0x003000 + (chid * 8), 0x00000000);
266 return nouveau_fifo_channel_fini(&chan->base, suspend); 285 return nouveau_fifo_channel_fini(&chan->base, suspend);
267} 286}
268 287
@@ -345,11 +364,177 @@ nvc0_fifo_cclass = {
345 * PFIFO engine 364 * PFIFO engine
346 ******************************************************************************/ 365 ******************************************************************************/
347 366
348static const struct nouveau_enum nvc0_fifo_fault_unit[] = { 367static inline int
368nvc0_fifo_engidx(struct nvc0_fifo_priv *priv, u32 engn)
369{
370 switch (engn) {
371 case NVDEV_ENGINE_GR : engn = 0; break;
372 case NVDEV_ENGINE_BSP : engn = 1; break;
373 case NVDEV_ENGINE_PPP : engn = 2; break;
374 case NVDEV_ENGINE_VP : engn = 3; break;
375 case NVDEV_ENGINE_COPY0: engn = 4; break;
376 case NVDEV_ENGINE_COPY1: engn = 5; break;
377 default:
378 return -1;
379 }
380
381 return engn;
382}
383
384static inline struct nouveau_engine *
385nvc0_fifo_engine(struct nvc0_fifo_priv *priv, u32 engn)
386{
387 switch (engn) {
388 case 0: engn = NVDEV_ENGINE_GR; break;
389 case 1: engn = NVDEV_ENGINE_BSP; break;
390 case 2: engn = NVDEV_ENGINE_PPP; break;
391 case 3: engn = NVDEV_ENGINE_VP; break;
392 case 4: engn = NVDEV_ENGINE_COPY0; break;
393 case 5: engn = NVDEV_ENGINE_COPY1; break;
394 default:
395 return NULL;
396 }
397
398 return nouveau_engine(priv, engn);
399}
400
401static void
402nvc0_fifo_recover_work(struct work_struct *work)
403{
404 struct nvc0_fifo_priv *priv = container_of(work, typeof(*priv), fault);
405 struct nouveau_object *engine;
406 unsigned long flags;
407 u32 engn, engm = 0;
408 u64 mask, todo;
409
410 spin_lock_irqsave(&priv->base.lock, flags);
411 mask = priv->mask;
412 priv->mask = 0ULL;
413 spin_unlock_irqrestore(&priv->base.lock, flags);
414
415 for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn))
416 engm |= 1 << nvc0_fifo_engidx(priv, engn);
417 nv_mask(priv, 0x002630, engm, engm);
418
419 for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn)) {
420 if ((engine = (void *)nouveau_engine(priv, engn))) {
421 nv_ofuncs(engine)->fini(engine, false);
422 WARN_ON(nv_ofuncs(engine)->init(engine));
423 }
424 }
425
426 nvc0_fifo_runlist_update(priv);
427 nv_wr32(priv, 0x00262c, engm);
428 nv_mask(priv, 0x002630, engm, 0x00000000);
429}
430
431static void
432nvc0_fifo_recover(struct nvc0_fifo_priv *priv, struct nouveau_engine *engine,
433 struct nvc0_fifo_chan *chan)
434{
435 struct nouveau_object *engobj = nv_object(engine);
436 u32 chid = chan->base.chid;
437 unsigned long flags;
438
439 nv_error(priv, "%s engine fault on channel %d, recovering...\n",
440 nv_subdev(engine)->name, chid);
441
442 nv_mask(priv, 0x003004 + (chid * 0x08), 0x00000001, 0x00000000);
443 chan->state = KILLED;
444
445 spin_lock_irqsave(&priv->base.lock, flags);
446 priv->mask |= 1ULL << nv_engidx(engobj);
447 spin_unlock_irqrestore(&priv->base.lock, flags);
448 schedule_work(&priv->fault);
449}
450
451static int
452nvc0_fifo_swmthd(struct nvc0_fifo_priv *priv, u32 chid, u32 mthd, u32 data)
453{
454 struct nvc0_fifo_chan *chan = NULL;
455 struct nouveau_handle *bind;
456 unsigned long flags;
457 int ret = -EINVAL;
458
459 spin_lock_irqsave(&priv->base.lock, flags);
460 if (likely(chid >= priv->base.min && chid <= priv->base.max))
461 chan = (void *)priv->base.channel[chid];
462 if (unlikely(!chan))
463 goto out;
464
465 bind = nouveau_namedb_get_class(nv_namedb(chan), 0x906e);
466 if (likely(bind)) {
467 if (!mthd || !nv_call(bind->object, mthd, data))
468 ret = 0;
469 nouveau_namedb_put(bind);
470 }
471
472out:
473 spin_unlock_irqrestore(&priv->base.lock, flags);
474 return ret;
475}
476
477static const struct nouveau_enum
478nvc0_fifo_sched_reason[] = {
479 { 0x0a, "CTXSW_TIMEOUT" },
480 {}
481};
482
483static void
484nvc0_fifo_intr_sched_ctxsw(struct nvc0_fifo_priv *priv)
485{
486 struct nouveau_engine *engine;
487 struct nvc0_fifo_chan *chan;
488 u32 engn;
489
490 for (engn = 0; engn < 6; engn++) {
491 u32 stat = nv_rd32(priv, 0x002640 + (engn * 0x04));
492 u32 busy = (stat & 0x80000000);
493 u32 save = (stat & 0x00100000); /* maybe? */
494 u32 unk0 = (stat & 0x00040000);
495 u32 unk1 = (stat & 0x00001000);
496 u32 chid = (stat & 0x0000007f);
497 (void)save;
498
499 if (busy && unk0 && unk1) {
500 if (!(chan = (void *)priv->base.channel[chid]))
501 continue;
502 if (!(engine = nvc0_fifo_engine(priv, engn)))
503 continue;
504 nvc0_fifo_recover(priv, engine, chan);
505 }
506 }
507}
508
509static void
510nvc0_fifo_intr_sched(struct nvc0_fifo_priv *priv)
511{
512 u32 intr = nv_rd32(priv, 0x00254c);
513 u32 code = intr & 0x000000ff;
514 const struct nouveau_enum *en;
515 char enunk[6] = "";
516
517 en = nouveau_enum_find(nvc0_fifo_sched_reason, code);
518 if (!en)
519 snprintf(enunk, sizeof(enunk), "UNK%02x", code);
520
521 nv_error(priv, "SCHED_ERROR [ %s ]\n", en ? en->name : enunk);
522
523 switch (code) {
524 case 0x0a:
525 nvc0_fifo_intr_sched_ctxsw(priv);
526 break;
527 default:
528 break;
529 }
530}
531
532static const struct nouveau_enum
533nvc0_fifo_fault_engine[] = {
349 { 0x00, "PGRAPH", NULL, NVDEV_ENGINE_GR }, 534 { 0x00, "PGRAPH", NULL, NVDEV_ENGINE_GR },
350 { 0x03, "PEEPHOLE" }, 535 { 0x03, "PEEPHOLE", NULL, NVDEV_ENGINE_IFB },
351 { 0x04, "BAR1" }, 536 { 0x04, "BAR1", NULL, NVDEV_SUBDEV_BAR },
352 { 0x05, "BAR3" }, 537 { 0x05, "BAR3", NULL, NVDEV_SUBDEV_INSTMEM },
353 { 0x07, "PFIFO", NULL, NVDEV_ENGINE_FIFO }, 538 { 0x07, "PFIFO", NULL, NVDEV_ENGINE_FIFO },
354 { 0x10, "PBSP", NULL, NVDEV_ENGINE_BSP }, 539 { 0x10, "PBSP", NULL, NVDEV_ENGINE_BSP },
355 { 0x11, "PPPP", NULL, NVDEV_ENGINE_PPP }, 540 { 0x11, "PPPP", NULL, NVDEV_ENGINE_PPP },
@@ -361,7 +546,8 @@ static const struct nouveau_enum nvc0_fifo_fault_unit[] = {
361 {} 546 {}
362}; 547};
363 548
364static const struct nouveau_enum nvc0_fifo_fault_reason[] = { 549static const struct nouveau_enum
550nvc0_fifo_fault_reason[] = {
365 { 0x00, "PT_NOT_PRESENT" }, 551 { 0x00, "PT_NOT_PRESENT" },
366 { 0x01, "PT_TOO_SHORT" }, 552 { 0x01, "PT_TOO_SHORT" },
367 { 0x02, "PAGE_NOT_PRESENT" }, 553 { 0x02, "PAGE_NOT_PRESENT" },
@@ -374,7 +560,8 @@ static const struct nouveau_enum nvc0_fifo_fault_reason[] = {
374 {} 560 {}
375}; 561};
376 562
377static const struct nouveau_enum nvc0_fifo_fault_hubclient[] = { 563static const struct nouveau_enum
564nvc0_fifo_fault_hubclient[] = {
378 { 0x01, "PCOPY0" }, 565 { 0x01, "PCOPY0" },
379 { 0x02, "PCOPY1" }, 566 { 0x02, "PCOPY1" },
380 { 0x04, "DISPATCH" }, 567 { 0x04, "DISPATCH" },
@@ -392,7 +579,8 @@ static const struct nouveau_enum nvc0_fifo_fault_hubclient[] = {
392 {} 579 {}
393}; 580};
394 581
395static const struct nouveau_enum nvc0_fifo_fault_gpcclient[] = { 582static const struct nouveau_enum
583nvc0_fifo_fault_gpcclient[] = {
396 { 0x01, "TEX" }, 584 { 0x01, "TEX" },
397 { 0x0c, "ESETUP" }, 585 { 0x0c, "ESETUP" },
398 { 0x0e, "CTXCTL" }, 586 { 0x0e, "CTXCTL" },
@@ -400,92 +588,92 @@ static const struct nouveau_enum nvc0_fifo_fault_gpcclient[] = {
400 {} 588 {}
401}; 589};
402 590
403static const struct nouveau_bitfield nvc0_fifo_subfifo_intr[] = {
404/* { 0x00008000, "" } seen with null ib push */
405 { 0x00200000, "ILLEGAL_MTHD" },
406 { 0x00800000, "EMPTY_SUBC" },
407 {}
408};
409
410static void 591static void
411nvc0_fifo_isr_vm_fault(struct nvc0_fifo_priv *priv, int unit) 592nvc0_fifo_intr_fault(struct nvc0_fifo_priv *priv, int unit)
412{ 593{
413 u32 inst = nv_rd32(priv, 0x002800 + (unit * 0x10)); 594 u32 inst = nv_rd32(priv, 0x002800 + (unit * 0x10));
414 u32 valo = nv_rd32(priv, 0x002804 + (unit * 0x10)); 595 u32 valo = nv_rd32(priv, 0x002804 + (unit * 0x10));
415 u32 vahi = nv_rd32(priv, 0x002808 + (unit * 0x10)); 596 u32 vahi = nv_rd32(priv, 0x002808 + (unit * 0x10));
416 u32 stat = nv_rd32(priv, 0x00280c + (unit * 0x10)); 597 u32 stat = nv_rd32(priv, 0x00280c + (unit * 0x10));
598 u32 gpc = (stat & 0x1f000000) >> 24;
417 u32 client = (stat & 0x00001f00) >> 8; 599 u32 client = (stat & 0x00001f00) >> 8;
418 const struct nouveau_enum *en; 600 u32 write = (stat & 0x00000080);
419 struct nouveau_engine *engine; 601 u32 hub = (stat & 0x00000040);
420 struct nouveau_object *engctx = NULL; 602 u32 reason = (stat & 0x0000000f);
421 603 struct nouveau_object *engctx = NULL, *object;
422 switch (unit) { 604 struct nouveau_engine *engine = NULL;
423 case 3: /* PEEPHOLE */ 605 const struct nouveau_enum *er, *eu, *ec;
424 nv_mask(priv, 0x001718, 0x00000000, 0x00000000); 606 char erunk[6] = "";
425 break; 607 char euunk[6] = "";
426 case 4: /* BAR1 */ 608 char ecunk[6] = "";
427 nv_mask(priv, 0x001704, 0x00000000, 0x00000000); 609 char gpcid[3] = "";
428 break; 610
429 case 5: /* BAR3 */ 611 er = nouveau_enum_find(nvc0_fifo_fault_reason, reason);
430 nv_mask(priv, 0x001714, 0x00000000, 0x00000000); 612 if (!er)
431 break; 613 snprintf(erunk, sizeof(erunk), "UNK%02X", reason);
432 default: 614
433 break; 615 eu = nouveau_enum_find(nvc0_fifo_fault_engine, unit);
616 if (eu) {
617 switch (eu->data2) {
618 case NVDEV_SUBDEV_BAR:
619 nv_mask(priv, 0x001704, 0x00000000, 0x00000000);
620 break;
621 case NVDEV_SUBDEV_INSTMEM:
622 nv_mask(priv, 0x001714, 0x00000000, 0x00000000);
623 break;
624 case NVDEV_ENGINE_IFB:
625 nv_mask(priv, 0x001718, 0x00000000, 0x00000000);
626 break;
627 default:
628 engine = nouveau_engine(priv, eu->data2);
629 if (engine)
630 engctx = nouveau_engctx_get(engine, inst);
631 break;
632 }
633 } else {
634 snprintf(euunk, sizeof(euunk), "UNK%02x", unit);
434 } 635 }
435 636
436 nv_error(priv, "%s fault at 0x%010llx [", (stat & 0x00000080) ? 637 if (hub) {
437 "write" : "read", (u64)vahi << 32 | valo); 638 ec = nouveau_enum_find(nvc0_fifo_fault_hubclient, client);
438 nouveau_enum_print(nvc0_fifo_fault_reason, stat & 0x0000000f);
439 pr_cont("] from ");
440 en = nouveau_enum_print(nvc0_fifo_fault_unit, unit);
441 if (stat & 0x00000040) {
442 pr_cont("/");
443 nouveau_enum_print(nvc0_fifo_fault_hubclient, client);
444 } else { 639 } else {
445 pr_cont("/GPC%d/", (stat & 0x1f000000) >> 24); 640 ec = nouveau_enum_find(nvc0_fifo_fault_gpcclient, client);
446 nouveau_enum_print(nvc0_fifo_fault_gpcclient, client); 641 snprintf(gpcid, sizeof(gpcid), "%d", gpc);
447 } 642 }
448 643
449 if (en && en->data2) { 644 if (!ec)
450 engine = nouveau_engine(priv, en->data2); 645 snprintf(ecunk, sizeof(ecunk), "UNK%02x", client);
451 if (engine) 646
452 engctx = nouveau_engctx_get(engine, inst); 647 nv_error(priv, "%s fault at 0x%010llx [%s] from %s/%s%s%s%s on "
453 648 "channel 0x%010llx [%s]\n", write ? "write" : "read",
649 (u64)vahi << 32 | valo, er ? er->name : erunk,
650 eu ? eu->name : euunk, hub ? "" : "GPC", gpcid, hub ? "" : "/",
651 ec ? ec->name : ecunk, (u64)inst << 12,
652 nouveau_client_name(engctx));
653
654 object = engctx;
655 while (object) {
656 switch (nv_mclass(object)) {
657 case NVC0_CHANNEL_IND_CLASS:
658 nvc0_fifo_recover(priv, engine, (void *)object);
659 break;
660 }
661 object = object->parent;
454 } 662 }
455 pr_cont(" on channel 0x%010llx [%s]\n", (u64)inst << 12,
456 nouveau_client_name(engctx));
457 663
458 nouveau_engctx_put(engctx); 664 nouveau_engctx_put(engctx);
459} 665}
460 666
461static int 667static const struct nouveau_bitfield
462nvc0_fifo_swmthd(struct nvc0_fifo_priv *priv, u32 chid, u32 mthd, u32 data) 668nvc0_fifo_pbdma_intr[] = {
463{ 669/* { 0x00008000, "" } seen with null ib push */
464 struct nvc0_fifo_chan *chan = NULL; 670 { 0x00200000, "ILLEGAL_MTHD" },
465 struct nouveau_handle *bind; 671 { 0x00800000, "EMPTY_SUBC" },
466 unsigned long flags; 672 {}
467 int ret = -EINVAL; 673};
468
469 spin_lock_irqsave(&priv->base.lock, flags);
470 if (likely(chid >= priv->base.min && chid <= priv->base.max))
471 chan = (void *)priv->base.channel[chid];
472 if (unlikely(!chan))
473 goto out;
474
475 bind = nouveau_namedb_get_class(nv_namedb(chan), 0x906e);
476 if (likely(bind)) {
477 if (!mthd || !nv_call(bind->object, mthd, data))
478 ret = 0;
479 nouveau_namedb_put(bind);
480 }
481
482out:
483 spin_unlock_irqrestore(&priv->base.lock, flags);
484 return ret;
485}
486 674
487static void 675static void
488nvc0_fifo_isr_subfifo_intr(struct nvc0_fifo_priv *priv, int unit) 676nvc0_fifo_intr_pbdma(struct nvc0_fifo_priv *priv, int unit)
489{ 677{
490 u32 stat = nv_rd32(priv, 0x040108 + (unit * 0x2000)); 678 u32 stat = nv_rd32(priv, 0x040108 + (unit * 0x2000));
491 u32 addr = nv_rd32(priv, 0x0400c0 + (unit * 0x2000)); 679 u32 addr = nv_rd32(priv, 0x0400c0 + (unit * 0x2000));
@@ -501,11 +689,11 @@ nvc0_fifo_isr_subfifo_intr(struct nvc0_fifo_priv *priv, int unit)
501 } 689 }
502 690
503 if (show) { 691 if (show) {
504 nv_error(priv, "SUBFIFO%d:", unit); 692 nv_error(priv, "PBDMA%d:", unit);
505 nouveau_bitfield_print(nvc0_fifo_subfifo_intr, show); 693 nouveau_bitfield_print(nvc0_fifo_pbdma_intr, show);
506 pr_cont("\n"); 694 pr_cont("\n");
507 nv_error(priv, 695 nv_error(priv,
508 "SUBFIFO%d: ch %d [%s] subc %d mthd 0x%04x data 0x%08x\n", 696 "PBDMA%d: ch %d [%s] subc %d mthd 0x%04x data 0x%08x\n",
509 unit, chid, 697 unit, chid,
510 nouveau_client_name_for_fifo_chid(&priv->base, chid), 698 nouveau_client_name_for_fifo_chid(&priv->base, chid),
511 subc, mthd, data); 699 subc, mthd, data);
@@ -516,6 +704,56 @@ nvc0_fifo_isr_subfifo_intr(struct nvc0_fifo_priv *priv, int unit)
516} 704}
517 705
518static void 706static void
707nvc0_fifo_intr_runlist(struct nvc0_fifo_priv *priv)
708{
709 u32 intr = nv_rd32(priv, 0x002a00);
710
711 if (intr & 0x10000000) {
712 wake_up(&priv->runlist.wait);
713 nv_wr32(priv, 0x002a00, 0x10000000);
714 intr &= ~0x10000000;
715 }
716
717 if (intr) {
718 nv_error(priv, "RUNLIST 0x%08x\n", intr);
719 nv_wr32(priv, 0x002a00, intr);
720 }
721}
722
723static void
724nvc0_fifo_intr_engine_unit(struct nvc0_fifo_priv *priv, int engn)
725{
726 u32 intr = nv_rd32(priv, 0x0025a8 + (engn * 0x04));
727 u32 inte = nv_rd32(priv, 0x002628);
728 u32 unkn;
729
730 for (unkn = 0; unkn < 8; unkn++) {
731 u32 ints = (intr >> (unkn * 0x04)) & inte;
732 if (ints & 0x1) {
733 nouveau_event_trigger(priv->base.uevent, 0);
734 ints &= ~1;
735 }
736 if (ints) {
737 nv_error(priv, "ENGINE %d %d %01x", engn, unkn, ints);
738 nv_mask(priv, 0x002628, ints, 0);
739 }
740 }
741
742 nv_wr32(priv, 0x0025a8 + (engn * 0x04), intr);
743}
744
745static void
746nvc0_fifo_intr_engine(struct nvc0_fifo_priv *priv)
747{
748 u32 mask = nv_rd32(priv, 0x0025a4);
749 while (mask) {
750 u32 unit = __ffs(mask);
751 nvc0_fifo_intr_engine_unit(priv, unit);
752 mask &= ~(1 << unit);
753 }
754}
755
756static void
519nvc0_fifo_intr(struct nouveau_subdev *subdev) 757nvc0_fifo_intr(struct nouveau_subdev *subdev)
520{ 758{
521 struct nvc0_fifo_priv *priv = (void *)subdev; 759 struct nvc0_fifo_priv *priv = (void *)subdev;
@@ -530,8 +768,7 @@ nvc0_fifo_intr(struct nouveau_subdev *subdev)
530 } 768 }
531 769
532 if (stat & 0x00000100) { 770 if (stat & 0x00000100) {
533 u32 intr = nv_rd32(priv, 0x00254c); 771 nvc0_fifo_intr_sched(priv);
534 nv_warn(priv, "INTR 0x00000100: 0x%08x\n", intr);
535 nv_wr32(priv, 0x002100, 0x00000100); 772 nv_wr32(priv, 0x002100, 0x00000100);
536 stat &= ~0x00000100; 773 stat &= ~0x00000100;
537 } 774 }
@@ -551,52 +788,41 @@ nvc0_fifo_intr(struct nouveau_subdev *subdev)
551 } 788 }
552 789
553 if (stat & 0x10000000) { 790 if (stat & 0x10000000) {
554 u32 units = nv_rd32(priv, 0x00259c); 791 u32 mask = nv_rd32(priv, 0x00259c);
555 u32 u = units; 792 while (mask) {
556 793 u32 unit = __ffs(mask);
557 while (u) { 794 nvc0_fifo_intr_fault(priv, unit);
558 int i = ffs(u) - 1; 795 nv_wr32(priv, 0x00259c, (1 << unit));
559 nvc0_fifo_isr_vm_fault(priv, i); 796 mask &= ~(1 << unit);
560 u &= ~(1 << i);
561 } 797 }
562
563 nv_wr32(priv, 0x00259c, units);
564 stat &= ~0x10000000; 798 stat &= ~0x10000000;
565 } 799 }
566 800
567 if (stat & 0x20000000) { 801 if (stat & 0x20000000) {
568 u32 units = nv_rd32(priv, 0x0025a0); 802 u32 mask = nv_rd32(priv, 0x0025a0);
569 u32 u = units; 803 while (mask) {
570 804 u32 unit = __ffs(mask);
571 while (u) { 805 nvc0_fifo_intr_pbdma(priv, unit);
572 int i = ffs(u) - 1; 806 nv_wr32(priv, 0x0025a0, (1 << unit));
573 nvc0_fifo_isr_subfifo_intr(priv, i); 807 mask &= ~(1 << unit);
574 u &= ~(1 << i);
575 } 808 }
576
577 nv_wr32(priv, 0x0025a0, units);
578 stat &= ~0x20000000; 809 stat &= ~0x20000000;
579 } 810 }
580 811
581 if (stat & 0x40000000) { 812 if (stat & 0x40000000) {
582 u32 intr0 = nv_rd32(priv, 0x0025a4); 813 nvc0_fifo_intr_runlist(priv);
583 u32 intr1 = nv_mask(priv, 0x002a00, 0x00000000, 0x00000);
584 nv_debug(priv, "INTR 0x40000000: 0x%08x 0x%08x\n",
585 intr0, intr1);
586 stat &= ~0x40000000; 814 stat &= ~0x40000000;
587 } 815 }
588 816
589 if (stat & 0x80000000) { 817 if (stat & 0x80000000) {
590 u32 intr = nv_mask(priv, 0x0025a8, 0x00000000, 0x00000000); 818 nvc0_fifo_intr_engine(priv);
591 nouveau_event_trigger(priv->base.uevent, 0);
592 nv_debug(priv, "INTR 0x80000000: 0x%08x\n", intr);
593 stat &= ~0x80000000; 819 stat &= ~0x80000000;
594 } 820 }
595 821
596 if (stat) { 822 if (stat) {
597 nv_fatal(priv, "unhandled status 0x%08x\n", stat); 823 nv_error(priv, "INTR 0x%08x\n", stat);
824 nv_mask(priv, 0x002140, stat, 0x00000000);
598 nv_wr32(priv, 0x002100, stat); 825 nv_wr32(priv, 0x002100, stat);
599 nv_wr32(priv, 0x002140, 0);
600 } 826 }
601} 827}
602 828
@@ -627,16 +853,20 @@ nvc0_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
627 if (ret) 853 if (ret)
628 return ret; 854 return ret;
629 855
856 INIT_WORK(&priv->fault, nvc0_fifo_recover_work);
857
630 ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x1000, 0x1000, 0, 858 ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x1000, 0x1000, 0,
631 &priv->playlist[0]); 859 &priv->runlist.mem[0]);
632 if (ret) 860 if (ret)
633 return ret; 861 return ret;
634 862
635 ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x1000, 0x1000, 0, 863 ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x1000, 0x1000, 0,
636 &priv->playlist[1]); 864 &priv->runlist.mem[1]);
637 if (ret) 865 if (ret)
638 return ret; 866 return ret;
639 867
868 init_waitqueue_head(&priv->runlist.wait);
869
640 ret = nouveau_gpuobj_new(nv_object(priv), NULL, 128 * 0x1000, 0x1000, 0, 870 ret = nouveau_gpuobj_new(nv_object(priv), NULL, 128 * 0x1000, 0x1000, 0,
641 &priv->user.mem); 871 &priv->user.mem);
642 if (ret) 872 if (ret)
@@ -665,8 +895,8 @@ nvc0_fifo_dtor(struct nouveau_object *object)
665 895
666 nouveau_gpuobj_unmap(&priv->user.bar); 896 nouveau_gpuobj_unmap(&priv->user.bar);
667 nouveau_gpuobj_ref(NULL, &priv->user.mem); 897 nouveau_gpuobj_ref(NULL, &priv->user.mem);
668 nouveau_gpuobj_ref(NULL, &priv->playlist[1]); 898 nouveau_gpuobj_ref(NULL, &priv->runlist.mem[0]);
669 nouveau_gpuobj_ref(NULL, &priv->playlist[0]); 899 nouveau_gpuobj_ref(NULL, &priv->runlist.mem[1]);
670 900
671 nouveau_fifo_destroy(&priv->base); 901 nouveau_fifo_destroy(&priv->base);
672} 902}
@@ -685,9 +915,9 @@ nvc0_fifo_init(struct nouveau_object *object)
685 nv_wr32(priv, 0x002204, 0xffffffff); 915 nv_wr32(priv, 0x002204, 0xffffffff);
686 916
687 priv->spoon_nr = hweight32(nv_rd32(priv, 0x002204)); 917 priv->spoon_nr = hweight32(nv_rd32(priv, 0x002204));
688 nv_debug(priv, "%d subfifo(s)\n", priv->spoon_nr); 918 nv_debug(priv, "%d PBDMA unit(s)\n", priv->spoon_nr);
689 919
690 /* assign engines to subfifos */ 920 /* assign engines to PBDMAs */
691 if (priv->spoon_nr >= 3) { 921 if (priv->spoon_nr >= 3) {
692 nv_wr32(priv, 0x002208, ~(1 << 0)); /* PGRAPH */ 922 nv_wr32(priv, 0x002208, ~(1 << 0)); /* PGRAPH */
693 nv_wr32(priv, 0x00220c, ~(1 << 1)); /* PVP */ 923 nv_wr32(priv, 0x00220c, ~(1 << 1)); /* PVP */
@@ -697,7 +927,7 @@ nvc0_fifo_init(struct nouveau_object *object)
697 nv_wr32(priv, 0x00221c, ~(1 << 1)); /* PCE1 */ 927 nv_wr32(priv, 0x00221c, ~(1 << 1)); /* PCE1 */
698 } 928 }
699 929
700 /* PSUBFIFO[n] */ 930 /* PBDMA[n] */
701 for (i = 0; i < priv->spoon_nr; i++) { 931 for (i = 0; i < priv->spoon_nr; i++) {
702 nv_mask(priv, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000); 932 nv_mask(priv, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
703 nv_wr32(priv, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */ 933 nv_wr32(priv, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
@@ -707,10 +937,9 @@ nvc0_fifo_init(struct nouveau_object *object)
707 nv_mask(priv, 0x002200, 0x00000001, 0x00000001); 937 nv_mask(priv, 0x002200, 0x00000001, 0x00000001);
708 nv_wr32(priv, 0x002254, 0x10000000 | priv->user.bar.offset >> 12); 938 nv_wr32(priv, 0x002254, 0x10000000 | priv->user.bar.offset >> 12);
709 939
710 nv_wr32(priv, 0x002a00, 0xffffffff); /* clears PFIFO.INTR bit 30 */
711 nv_wr32(priv, 0x002100, 0xffffffff); 940 nv_wr32(priv, 0x002100, 0xffffffff);
712 nv_wr32(priv, 0x002140, 0x3fffffff); 941 nv_wr32(priv, 0x002140, 0x7fffffff);
713 nv_wr32(priv, 0x002628, 0x00000001); /* makes mthd 0x20 work */ 942 nv_wr32(priv, 0x002628, 0x00000001); /* ENGINE_INTR_EN */
714 return 0; 943 return 0;
715} 944}
716 945
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
index 54c1b5b471cd..a9a1a9c9f9f2 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
@@ -60,10 +60,15 @@ static const struct {
60struct nve0_fifo_engn { 60struct nve0_fifo_engn {
61 struct nouveau_gpuobj *runlist[2]; 61 struct nouveau_gpuobj *runlist[2];
62 int cur_runlist; 62 int cur_runlist;
63 wait_queue_head_t wait;
63}; 64};
64 65
65struct nve0_fifo_priv { 66struct nve0_fifo_priv {
66 struct nouveau_fifo base; 67 struct nouveau_fifo base;
68
69 struct work_struct fault;
70 u64 mask;
71
67 struct nve0_fifo_engn engine[FIFO_ENGINE_NR]; 72 struct nve0_fifo_engn engine[FIFO_ENGINE_NR];
68 struct { 73 struct {
69 struct nouveau_gpuobj *mem; 74 struct nouveau_gpuobj *mem;
@@ -81,6 +86,11 @@ struct nve0_fifo_base {
81struct nve0_fifo_chan { 86struct nve0_fifo_chan {
82 struct nouveau_fifo_chan base; 87 struct nouveau_fifo_chan base;
83 u32 engine; 88 u32 engine;
89 enum {
90 STOPPED,
91 RUNNING,
92 KILLED
93 } state;
84}; 94};
85 95
86/******************************************************************************* 96/*******************************************************************************
@@ -93,7 +103,6 @@ nve0_fifo_runlist_update(struct nve0_fifo_priv *priv, u32 engine)
93 struct nouveau_bar *bar = nouveau_bar(priv); 103 struct nouveau_bar *bar = nouveau_bar(priv);
94 struct nve0_fifo_engn *engn = &priv->engine[engine]; 104 struct nve0_fifo_engn *engn = &priv->engine[engine];
95 struct nouveau_gpuobj *cur; 105 struct nouveau_gpuobj *cur;
96 u32 match = (engine << 16) | 0x00000001;
97 int i, p; 106 int i, p;
98 107
99 mutex_lock(&nv_subdev(priv)->mutex); 108 mutex_lock(&nv_subdev(priv)->mutex);
@@ -101,18 +110,21 @@ nve0_fifo_runlist_update(struct nve0_fifo_priv *priv, u32 engine)
101 engn->cur_runlist = !engn->cur_runlist; 110 engn->cur_runlist = !engn->cur_runlist;
102 111
103 for (i = 0, p = 0; i < priv->base.max; i++) { 112 for (i = 0, p = 0; i < priv->base.max; i++) {
104 u32 ctrl = nv_rd32(priv, 0x800004 + (i * 8)) & 0x001f0001; 113 struct nve0_fifo_chan *chan = (void *)priv->base.channel[i];
105 if (ctrl != match) 114 if (chan && chan->state == RUNNING && chan->engine == engine) {
106 continue; 115 nv_wo32(cur, p + 0, i);
107 nv_wo32(cur, p + 0, i); 116 nv_wo32(cur, p + 4, 0x00000000);
108 nv_wo32(cur, p + 4, 0x00000000); 117 p += 8;
109 p += 8; 118 }
110 } 119 }
111 bar->flush(bar); 120 bar->flush(bar);
112 121
113 nv_wr32(priv, 0x002270, cur->addr >> 12); 122 nv_wr32(priv, 0x002270, cur->addr >> 12);
114 nv_wr32(priv, 0x002274, (engine << 20) | (p >> 3)); 123 nv_wr32(priv, 0x002274, (engine << 20) | (p >> 3));
115 if (!nv_wait(priv, 0x002284 + (engine * 8), 0x00100000, 0x00000000)) 124
125 if (wait_event_timeout(engn->wait, !(nv_rd32(priv, 0x002284 +
126 (engine * 0x08)) & 0x00100000),
127 msecs_to_jiffies(2000)) == 0)
116 nv_error(priv, "runlist %d update timeout\n", engine); 128 nv_error(priv, "runlist %d update timeout\n", engine);
117 mutex_unlock(&nv_subdev(priv)->mutex); 129 mutex_unlock(&nv_subdev(priv)->mutex);
118} 130}
@@ -129,9 +141,11 @@ nve0_fifo_context_attach(struct nouveau_object *parent,
129 141
130 switch (nv_engidx(object->engine)) { 142 switch (nv_engidx(object->engine)) {
131 case NVDEV_ENGINE_SW : 143 case NVDEV_ENGINE_SW :
144 return 0;
132 case NVDEV_ENGINE_COPY0: 145 case NVDEV_ENGINE_COPY0:
133 case NVDEV_ENGINE_COPY1: 146 case NVDEV_ENGINE_COPY1:
134 case NVDEV_ENGINE_COPY2: 147 case NVDEV_ENGINE_COPY2:
148 nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
135 return 0; 149 return 0;
136 case NVDEV_ENGINE_GR : addr = 0x0210; break; 150 case NVDEV_ENGINE_GR : addr = 0x0210; break;
137 case NVDEV_ENGINE_BSP : addr = 0x0270; break; 151 case NVDEV_ENGINE_BSP : addr = 0x0270; break;
@@ -279,9 +293,13 @@ nve0_fifo_chan_init(struct nouveau_object *object)
279 293
280 nv_mask(priv, 0x800004 + (chid * 8), 0x000f0000, chan->engine << 16); 294 nv_mask(priv, 0x800004 + (chid * 8), 0x000f0000, chan->engine << 16);
281 nv_wr32(priv, 0x800000 + (chid * 8), 0x80000000 | base->addr >> 12); 295 nv_wr32(priv, 0x800000 + (chid * 8), 0x80000000 | base->addr >> 12);
282 nv_mask(priv, 0x800004 + (chid * 8), 0x00000400, 0x00000400); 296
283 nve0_fifo_runlist_update(priv, chan->engine); 297 if (chan->state == STOPPED && (chan->state = RUNNING) == RUNNING) {
284 nv_mask(priv, 0x800004 + (chid * 8), 0x00000400, 0x00000400); 298 nv_mask(priv, 0x800004 + (chid * 8), 0x00000400, 0x00000400);
299 nve0_fifo_runlist_update(priv, chan->engine);
300 nv_mask(priv, 0x800004 + (chid * 8), 0x00000400, 0x00000400);
301 }
302
285 return 0; 303 return 0;
286} 304}
287 305
@@ -292,10 +310,12 @@ nve0_fifo_chan_fini(struct nouveau_object *object, bool suspend)
292 struct nve0_fifo_chan *chan = (void *)object; 310 struct nve0_fifo_chan *chan = (void *)object;
293 u32 chid = chan->base.chid; 311 u32 chid = chan->base.chid;
294 312
295 nv_mask(priv, 0x800004 + (chid * 8), 0x00000800, 0x00000800); 313 if (chan->state == RUNNING && (chan->state = STOPPED) == STOPPED) {
296 nve0_fifo_runlist_update(priv, chan->engine); 314 nv_mask(priv, 0x800004 + (chid * 8), 0x00000800, 0x00000800);
297 nv_wr32(priv, 0x800000 + (chid * 8), 0x00000000); 315 nve0_fifo_runlist_update(priv, chan->engine);
316 }
298 317
318 nv_wr32(priv, 0x800000 + (chid * 8), 0x00000000);
299 return nouveau_fifo_channel_fini(&chan->base, suspend); 319 return nouveau_fifo_channel_fini(&chan->base, suspend);
300} 320}
301 321
@@ -377,14 +397,211 @@ nve0_fifo_cclass = {
377 * PFIFO engine 397 * PFIFO engine
378 ******************************************************************************/ 398 ******************************************************************************/
379 399
380static const struct nouveau_enum nve0_fifo_sched_reason[] = { 400static inline int
401nve0_fifo_engidx(struct nve0_fifo_priv *priv, u32 engn)
402{
403 switch (engn) {
404 case NVDEV_ENGINE_GR :
405 case NVDEV_ENGINE_COPY2: engn = 0; break;
406 case NVDEV_ENGINE_BSP : engn = 1; break;
407 case NVDEV_ENGINE_PPP : engn = 2; break;
408 case NVDEV_ENGINE_VP : engn = 3; break;
409 case NVDEV_ENGINE_COPY0: engn = 4; break;
410 case NVDEV_ENGINE_COPY1: engn = 5; break;
411 case NVDEV_ENGINE_VENC : engn = 6; break;
412 default:
413 return -1;
414 }
415
416 return engn;
417}
418
419static inline struct nouveau_engine *
420nve0_fifo_engine(struct nve0_fifo_priv *priv, u32 engn)
421{
422 if (engn >= ARRAY_SIZE(fifo_engine))
423 return NULL;
424 return nouveau_engine(priv, fifo_engine[engn].subdev);
425}
426
427static void
428nve0_fifo_recover_work(struct work_struct *work)
429{
430 struct nve0_fifo_priv *priv = container_of(work, typeof(*priv), fault);
431 struct nouveau_object *engine;
432 unsigned long flags;
433 u32 engn, engm = 0;
434 u64 mask, todo;
435
436 spin_lock_irqsave(&priv->base.lock, flags);
437 mask = priv->mask;
438 priv->mask = 0ULL;
439 spin_unlock_irqrestore(&priv->base.lock, flags);
440
441 for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn))
442 engm |= 1 << nve0_fifo_engidx(priv, engn);
443 nv_mask(priv, 0x002630, engm, engm);
444
445 for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn)) {
446 if ((engine = (void *)nouveau_engine(priv, engn))) {
447 nv_ofuncs(engine)->fini(engine, false);
448 WARN_ON(nv_ofuncs(engine)->init(engine));
449 }
450 nve0_fifo_runlist_update(priv, nve0_fifo_engidx(priv, engn));
451 }
452
453 nv_wr32(priv, 0x00262c, engm);
454 nv_mask(priv, 0x002630, engm, 0x00000000);
455}
456
457static void
458nve0_fifo_recover(struct nve0_fifo_priv *priv, struct nouveau_engine *engine,
459 struct nve0_fifo_chan *chan)
460{
461 struct nouveau_object *engobj = nv_object(engine);
462 u32 chid = chan->base.chid;
463 unsigned long flags;
464
465 nv_error(priv, "%s engine fault on channel %d, recovering...\n",
466 nv_subdev(engine)->name, chid);
467
468 nv_mask(priv, 0x800004 + (chid * 0x08), 0x00000800, 0x00000800);
469 chan->state = KILLED;
470
471 spin_lock_irqsave(&priv->base.lock, flags);
472 priv->mask |= 1ULL << nv_engidx(engobj);
473 spin_unlock_irqrestore(&priv->base.lock, flags);
474 schedule_work(&priv->fault);
475}
476
477static int
478nve0_fifo_swmthd(struct nve0_fifo_priv *priv, u32 chid, u32 mthd, u32 data)
479{
480 struct nve0_fifo_chan *chan = NULL;
481 struct nouveau_handle *bind;
482 unsigned long flags;
483 int ret = -EINVAL;
484
485 spin_lock_irqsave(&priv->base.lock, flags);
486 if (likely(chid >= priv->base.min && chid <= priv->base.max))
487 chan = (void *)priv->base.channel[chid];
488 if (unlikely(!chan))
489 goto out;
490
491 bind = nouveau_namedb_get_class(nv_namedb(chan), 0x906e);
492 if (likely(bind)) {
493 if (!mthd || !nv_call(bind->object, mthd, data))
494 ret = 0;
495 nouveau_namedb_put(bind);
496 }
497
498out:
499 spin_unlock_irqrestore(&priv->base.lock, flags);
500 return ret;
501}
502
503static const struct nouveau_enum
504nve0_fifo_bind_reason[] = {
505 { 0x01, "BIND_NOT_UNBOUND" },
506 { 0x02, "SNOOP_WITHOUT_BAR1" },
507 { 0x03, "UNBIND_WHILE_RUNNING" },
508 { 0x05, "INVALID_RUNLIST" },
509 { 0x06, "INVALID_CTX_TGT" },
510 { 0x0b, "UNBIND_WHILE_PARKED" },
511 {}
512};
513
514static void
515nve0_fifo_intr_bind(struct nve0_fifo_priv *priv)
516{
517 u32 intr = nv_rd32(priv, 0x00252c);
518 u32 code = intr & 0x000000ff;
519 const struct nouveau_enum *en;
520 char enunk[6] = "";
521
522 en = nouveau_enum_find(nve0_fifo_bind_reason, code);
523 if (!en)
524 snprintf(enunk, sizeof(enunk), "UNK%02x", code);
525
526 nv_error(priv, "BIND_ERROR [ %s ]\n", en ? en->name : enunk);
527}
528
529static const struct nouveau_enum
530nve0_fifo_sched_reason[] = {
381 { 0x0a, "CTXSW_TIMEOUT" }, 531 { 0x0a, "CTXSW_TIMEOUT" },
382 {} 532 {}
383}; 533};
384 534
385static const struct nouveau_enum nve0_fifo_fault_engine[] = { 535static void
536nve0_fifo_intr_sched_ctxsw(struct nve0_fifo_priv *priv)
537{
538 struct nouveau_engine *engine;
539 struct nve0_fifo_chan *chan;
540 u32 engn;
541
542 for (engn = 0; engn < ARRAY_SIZE(fifo_engine); engn++) {
543 u32 stat = nv_rd32(priv, 0x002640 + (engn * 0x04));
544 u32 busy = (stat & 0x80000000);
545 u32 next = (stat & 0x07ff0000) >> 16;
546 u32 chsw = (stat & 0x00008000);
547 u32 save = (stat & 0x00004000);
548 u32 load = (stat & 0x00002000);
549 u32 prev = (stat & 0x000007ff);
550 u32 chid = load ? next : prev;
551 (void)save;
552
553 if (busy && chsw) {
554 if (!(chan = (void *)priv->base.channel[chid]))
555 continue;
556 if (!(engine = nve0_fifo_engine(priv, engn)))
557 continue;
558 nve0_fifo_recover(priv, engine, chan);
559 }
560 }
561}
562
563static void
564nve0_fifo_intr_sched(struct nve0_fifo_priv *priv)
565{
566 u32 intr = nv_rd32(priv, 0x00254c);
567 u32 code = intr & 0x000000ff;
568 const struct nouveau_enum *en;
569 char enunk[6] = "";
570
571 en = nouveau_enum_find(nve0_fifo_sched_reason, code);
572 if (!en)
573 snprintf(enunk, sizeof(enunk), "UNK%02x", code);
574
575 nv_error(priv, "SCHED_ERROR [ %s ]\n", en ? en->name : enunk);
576
577 switch (code) {
578 case 0x0a:
579 nve0_fifo_intr_sched_ctxsw(priv);
580 break;
581 default:
582 break;
583 }
584}
585
586static void
587nve0_fifo_intr_chsw(struct nve0_fifo_priv *priv)
588{
589 u32 stat = nv_rd32(priv, 0x00256c);
590 nv_error(priv, "CHSW_ERROR 0x%08x\n", stat);
591 nv_wr32(priv, 0x00256c, stat);
592}
593
594static void
595nve0_fifo_intr_dropped_fault(struct nve0_fifo_priv *priv)
596{
597 u32 stat = nv_rd32(priv, 0x00259c);
598 nv_error(priv, "DROPPED_MMU_FAULT 0x%08x\n", stat);
599}
600
601static const struct nouveau_enum
602nve0_fifo_fault_engine[] = {
386 { 0x00, "GR", NULL, NVDEV_ENGINE_GR }, 603 { 0x00, "GR", NULL, NVDEV_ENGINE_GR },
387 { 0x03, "IFB" }, 604 { 0x03, "IFB", NULL, NVDEV_ENGINE_IFB },
388 { 0x04, "BAR1", NULL, NVDEV_SUBDEV_BAR }, 605 { 0x04, "BAR1", NULL, NVDEV_SUBDEV_BAR },
389 { 0x05, "BAR3", NULL, NVDEV_SUBDEV_INSTMEM }, 606 { 0x05, "BAR3", NULL, NVDEV_SUBDEV_INSTMEM },
390 { 0x07, "PBDMA0", NULL, NVDEV_ENGINE_FIFO }, 607 { 0x07, "PBDMA0", NULL, NVDEV_ENGINE_FIFO },
@@ -402,7 +619,8 @@ static const struct nouveau_enum nve0_fifo_fault_engine[] = {
402 {} 619 {}
403}; 620};
404 621
405static const struct nouveau_enum nve0_fifo_fault_reason[] = { 622static const struct nouveau_enum
623nve0_fifo_fault_reason[] = {
406 { 0x00, "PDE" }, 624 { 0x00, "PDE" },
407 { 0x01, "PDE_SIZE" }, 625 { 0x01, "PDE_SIZE" },
408 { 0x02, "PTE" }, 626 { 0x02, "PTE" },
@@ -422,7 +640,8 @@ static const struct nouveau_enum nve0_fifo_fault_reason[] = {
422 {} 640 {}
423}; 641};
424 642
425static const struct nouveau_enum nve0_fifo_fault_hubclient[] = { 643static const struct nouveau_enum
644nve0_fifo_fault_hubclient[] = {
426 { 0x00, "VIP" }, 645 { 0x00, "VIP" },
427 { 0x01, "CE0" }, 646 { 0x01, "CE0" },
428 { 0x02, "CE1" }, 647 { 0x02, "CE1" },
@@ -458,7 +677,8 @@ static const struct nouveau_enum nve0_fifo_fault_hubclient[] = {
458 {} 677 {}
459}; 678};
460 679
461static const struct nouveau_enum nve0_fifo_fault_gpcclient[] = { 680static const struct nouveau_enum
681nve0_fifo_fault_gpcclient[] = {
462 { 0x00, "L1_0" }, { 0x01, "T1_0" }, { 0x02, "PE_0" }, 682 { 0x00, "L1_0" }, { 0x01, "T1_0" }, { 0x02, "PE_0" },
463 { 0x03, "L1_1" }, { 0x04, "T1_1" }, { 0x05, "PE_1" }, 683 { 0x03, "L1_1" }, { 0x04, "T1_1" }, { 0x05, "PE_1" },
464 { 0x06, "L1_2" }, { 0x07, "T1_2" }, { 0x08, "PE_2" }, 684 { 0x06, "L1_2" }, { 0x07, "T1_2" }, { 0x08, "PE_2" },
@@ -483,6 +703,82 @@ static const struct nouveau_enum nve0_fifo_fault_gpcclient[] = {
483 {} 703 {}
484}; 704};
485 705
706static void
707nve0_fifo_intr_fault(struct nve0_fifo_priv *priv, int unit)
708{
709 u32 inst = nv_rd32(priv, 0x002800 + (unit * 0x10));
710 u32 valo = nv_rd32(priv, 0x002804 + (unit * 0x10));
711 u32 vahi = nv_rd32(priv, 0x002808 + (unit * 0x10));
712 u32 stat = nv_rd32(priv, 0x00280c + (unit * 0x10));
713 u32 gpc = (stat & 0x1f000000) >> 24;
714 u32 client = (stat & 0x00001f00) >> 8;
715 u32 write = (stat & 0x00000080);
716 u32 hub = (stat & 0x00000040);
717 u32 reason = (stat & 0x0000000f);
718 struct nouveau_object *engctx = NULL, *object;
719 struct nouveau_engine *engine = NULL;
720 const struct nouveau_enum *er, *eu, *ec;
721 char erunk[6] = "";
722 char euunk[6] = "";
723 char ecunk[6] = "";
724 char gpcid[3] = "";
725
726 er = nouveau_enum_find(nve0_fifo_fault_reason, reason);
727 if (!er)
728 snprintf(erunk, sizeof(erunk), "UNK%02X", reason);
729
730 eu = nouveau_enum_find(nve0_fifo_fault_engine, unit);
731 if (eu) {
732 switch (eu->data2) {
733 case NVDEV_SUBDEV_BAR:
734 nv_mask(priv, 0x001704, 0x00000000, 0x00000000);
735 break;
736 case NVDEV_SUBDEV_INSTMEM:
737 nv_mask(priv, 0x001714, 0x00000000, 0x00000000);
738 break;
739 case NVDEV_ENGINE_IFB:
740 nv_mask(priv, 0x001718, 0x00000000, 0x00000000);
741 break;
742 default:
743 engine = nouveau_engine(priv, eu->data2);
744 if (engine)
745 engctx = nouveau_engctx_get(engine, inst);
746 break;
747 }
748 } else {
749 snprintf(euunk, sizeof(euunk), "UNK%02x", unit);
750 }
751
752 if (hub) {
753 ec = nouveau_enum_find(nve0_fifo_fault_hubclient, client);
754 } else {
755 ec = nouveau_enum_find(nve0_fifo_fault_gpcclient, client);
756 snprintf(gpcid, sizeof(gpcid), "%d", gpc);
757 }
758
759 if (!ec)
760 snprintf(ecunk, sizeof(ecunk), "UNK%02x", client);
761
762 nv_error(priv, "%s fault at 0x%010llx [%s] from %s/%s%s%s%s on "
763 "channel 0x%010llx [%s]\n", write ? "write" : "read",
764 (u64)vahi << 32 | valo, er ? er->name : erunk,
765 eu ? eu->name : euunk, hub ? "" : "GPC", gpcid, hub ? "" : "/",
766 ec ? ec->name : ecunk, (u64)inst << 12,
767 nouveau_client_name(engctx));
768
769 object = engctx;
770 while (object) {
771 switch (nv_mclass(object)) {
772 case NVE0_CHANNEL_IND_CLASS:
773 nve0_fifo_recover(priv, engine, (void *)object);
774 break;
775 }
776 object = object->parent;
777 }
778
779 nouveau_engctx_put(engctx);
780}
781
486static const struct nouveau_bitfield nve0_fifo_pbdma_intr[] = { 782static const struct nouveau_bitfield nve0_fifo_pbdma_intr[] = {
487 { 0x00000001, "MEMREQ" }, 783 { 0x00000001, "MEMREQ" },
488 { 0x00000002, "MEMACK_TIMEOUT" }, 784 { 0x00000002, "MEMACK_TIMEOUT" },
@@ -518,104 +814,6 @@ static const struct nouveau_bitfield nve0_fifo_pbdma_intr[] = {
518}; 814};
519 815
520static void 816static void
521nve0_fifo_intr_sched(struct nve0_fifo_priv *priv)
522{
523 u32 intr = nv_rd32(priv, 0x00254c);
524 u32 code = intr & 0x000000ff;
525 nv_error(priv, "SCHED_ERROR [");
526 nouveau_enum_print(nve0_fifo_sched_reason, code);
527 pr_cont("]\n");
528}
529
530static void
531nve0_fifo_intr_chsw(struct nve0_fifo_priv *priv)
532{
533 u32 stat = nv_rd32(priv, 0x00256c);
534 nv_error(priv, "CHSW_ERROR 0x%08x\n", stat);
535 nv_wr32(priv, 0x00256c, stat);
536}
537
538static void
539nve0_fifo_intr_dropped_fault(struct nve0_fifo_priv *priv)
540{
541 u32 stat = nv_rd32(priv, 0x00259c);
542 nv_error(priv, "DROPPED_MMU_FAULT 0x%08x\n", stat);
543}
544
545static void
546nve0_fifo_intr_fault(struct nve0_fifo_priv *priv, int unit)
547{
548 u32 inst = nv_rd32(priv, 0x2800 + (unit * 0x10));
549 u32 valo = nv_rd32(priv, 0x2804 + (unit * 0x10));
550 u32 vahi = nv_rd32(priv, 0x2808 + (unit * 0x10));
551 u32 stat = nv_rd32(priv, 0x280c + (unit * 0x10));
552 u32 client = (stat & 0x00001f00) >> 8;
553 struct nouveau_engine *engine = NULL;
554 struct nouveau_object *engctx = NULL;
555 const struct nouveau_enum *en;
556 const char *name = "unknown";
557
558 nv_error(priv, "PFIFO: %s fault at 0x%010llx [", (stat & 0x00000080) ?
559 "write" : "read", (u64)vahi << 32 | valo);
560 nouveau_enum_print(nve0_fifo_fault_reason, stat & 0x0000000f);
561 pr_cont("] from ");
562 en = nouveau_enum_print(nve0_fifo_fault_engine, unit);
563 if (stat & 0x00000040) {
564 pr_cont("/");
565 nouveau_enum_print(nve0_fifo_fault_hubclient, client);
566 } else {
567 pr_cont("/GPC%d/", (stat & 0x1f000000) >> 24);
568 nouveau_enum_print(nve0_fifo_fault_gpcclient, client);
569 }
570
571 if (en && en->data2) {
572 if (en->data2 == NVDEV_SUBDEV_BAR) {
573 nv_mask(priv, 0x001704, 0x00000000, 0x00000000);
574 name = "BAR1";
575 } else
576 if (en->data2 == NVDEV_SUBDEV_INSTMEM) {
577 nv_mask(priv, 0x001714, 0x00000000, 0x00000000);
578 name = "BAR3";
579 } else {
580 engine = nouveau_engine(priv, en->data2);
581 if (engine) {
582 engctx = nouveau_engctx_get(engine, inst);
583 name = nouveau_client_name(engctx);
584 }
585 }
586 }
587 pr_cont(" on channel 0x%010llx [%s]\n", (u64)inst << 12, name);
588
589 nouveau_engctx_put(engctx);
590}
591
592static int
593nve0_fifo_swmthd(struct nve0_fifo_priv *priv, u32 chid, u32 mthd, u32 data)
594{
595 struct nve0_fifo_chan *chan = NULL;
596 struct nouveau_handle *bind;
597 unsigned long flags;
598 int ret = -EINVAL;
599
600 spin_lock_irqsave(&priv->base.lock, flags);
601 if (likely(chid >= priv->base.min && chid <= priv->base.max))
602 chan = (void *)priv->base.channel[chid];
603 if (unlikely(!chan))
604 goto out;
605
606 bind = nouveau_namedb_get_class(nv_namedb(chan), 0x906e);
607 if (likely(bind)) {
608 if (!mthd || !nv_call(bind->object, mthd, data))
609 ret = 0;
610 nouveau_namedb_put(bind);
611 }
612
613out:
614 spin_unlock_irqrestore(&priv->base.lock, flags);
615 return ret;
616}
617
618static void
619nve0_fifo_intr_pbdma(struct nve0_fifo_priv *priv, int unit) 817nve0_fifo_intr_pbdma(struct nve0_fifo_priv *priv, int unit)
620{ 818{
621 u32 stat = nv_rd32(priv, 0x040108 + (unit * 0x2000)); 819 u32 stat = nv_rd32(priv, 0x040108 + (unit * 0x2000));
@@ -647,6 +845,24 @@ nve0_fifo_intr_pbdma(struct nve0_fifo_priv *priv, int unit)
647} 845}
648 846
649static void 847static void
848nve0_fifo_intr_runlist(struct nve0_fifo_priv *priv)
849{
850 u32 mask = nv_rd32(priv, 0x002a00);
851 while (mask) {
852 u32 engn = __ffs(mask);
853 wake_up(&priv->engine[engn].wait);
854 nv_wr32(priv, 0x002a00, 1 << engn);
855 mask &= ~(1 << engn);
856 }
857}
858
859static void
860nve0_fifo_intr_engine(struct nve0_fifo_priv *priv)
861{
862 nouveau_event_trigger(priv->base.uevent, 0);
863}
864
865static void
650nve0_fifo_intr(struct nouveau_subdev *subdev) 866nve0_fifo_intr(struct nouveau_subdev *subdev)
651{ 867{
652 struct nve0_fifo_priv *priv = (void *)subdev; 868 struct nve0_fifo_priv *priv = (void *)subdev;
@@ -654,8 +870,7 @@ nve0_fifo_intr(struct nouveau_subdev *subdev)
654 u32 stat = nv_rd32(priv, 0x002100) & mask; 870 u32 stat = nv_rd32(priv, 0x002100) & mask;
655 871
656 if (stat & 0x00000001) { 872 if (stat & 0x00000001) {
657 u32 stat = nv_rd32(priv, 0x00252c); 873 nve0_fifo_intr_bind(priv);
658 nv_error(priv, "BIND_ERROR 0x%08x\n", stat);
659 nv_wr32(priv, 0x002100, 0x00000001); 874 nv_wr32(priv, 0x002100, 0x00000001);
660 stat &= ~0x00000001; 875 stat &= ~0x00000001;
661 } 876 }
@@ -697,55 +912,42 @@ nve0_fifo_intr(struct nouveau_subdev *subdev)
697 } 912 }
698 913
699 if (stat & 0x10000000) { 914 if (stat & 0x10000000) {
700 u32 units = nv_rd32(priv, 0x00259c); 915 u32 mask = nv_rd32(priv, 0x00259c);
701 u32 u = units; 916 while (mask) {
702 917 u32 unit = __ffs(mask);
703 while (u) { 918 nve0_fifo_intr_fault(priv, unit);
704 int i = ffs(u) - 1; 919 nv_wr32(priv, 0x00259c, (1 << unit));
705 nve0_fifo_intr_fault(priv, i); 920 mask &= ~(1 << unit);
706 u &= ~(1 << i);
707 } 921 }
708
709 nv_wr32(priv, 0x00259c, units);
710 stat &= ~0x10000000; 922 stat &= ~0x10000000;
711 } 923 }
712 924
713 if (stat & 0x20000000) { 925 if (stat & 0x20000000) {
714 u32 mask = nv_rd32(priv, 0x0025a0); 926 u32 mask = nv_rd32(priv, 0x0025a0);
715 u32 temp = mask; 927 while (mask) {
716 928 u32 unit = __ffs(mask);
717 while (temp) {
718 u32 unit = ffs(temp) - 1;
719 nve0_fifo_intr_pbdma(priv, unit); 929 nve0_fifo_intr_pbdma(priv, unit);
720 temp &= ~(1 << unit); 930 nv_wr32(priv, 0x0025a0, (1 << unit));
931 mask &= ~(1 << unit);
721 } 932 }
722
723 nv_wr32(priv, 0x0025a0, mask);
724 stat &= ~0x20000000; 933 stat &= ~0x20000000;
725 } 934 }
726 935
727 if (stat & 0x40000000) { 936 if (stat & 0x40000000) {
728 u32 mask = nv_mask(priv, 0x002a00, 0x00000000, 0x00000000); 937 nve0_fifo_intr_runlist(priv);
729
730 while (mask) {
731 u32 engn = ffs(mask) - 1;
732 /* runlist event, not currently used */
733 mask &= ~(1 << engn);
734 }
735
736 stat &= ~0x40000000; 938 stat &= ~0x40000000;
737 } 939 }
738 940
739 if (stat & 0x80000000) { 941 if (stat & 0x80000000) {
740 nouveau_event_trigger(priv->base.uevent, 0); 942 nve0_fifo_intr_engine(priv);
741 nv_wr32(priv, 0x002100, 0x80000000); 943 nv_wr32(priv, 0x002100, 0x80000000);
742 stat &= ~0x80000000; 944 stat &= ~0x80000000;
743 } 945 }
744 946
745 if (stat) { 947 if (stat) {
746 nv_fatal(priv, "unhandled status 0x%08x\n", stat); 948 nv_error(priv, "INTR 0x%08x\n", stat);
949 nv_mask(priv, 0x002140, stat, 0x00000000);
747 nv_wr32(priv, 0x002100, stat); 950 nv_wr32(priv, 0x002100, stat);
748 nv_wr32(priv, 0x002140, 0);
749 } 951 }
750} 952}
751 953
@@ -802,9 +1004,8 @@ nve0_fifo_init(struct nouveau_object *object)
802 1004
803 nv_wr32(priv, 0x002254, 0x10000000 | priv->user.bar.offset >> 12); 1005 nv_wr32(priv, 0x002254, 0x10000000 | priv->user.bar.offset >> 12);
804 1006
805 nv_wr32(priv, 0x002a00, 0xffffffff);
806 nv_wr32(priv, 0x002100, 0xffffffff); 1007 nv_wr32(priv, 0x002100, 0xffffffff);
807 nv_wr32(priv, 0x002140, 0x3fffffff); 1008 nv_wr32(priv, 0x002140, 0x7fffffff);
808 return 0; 1009 return 0;
809} 1010}
810 1011
@@ -840,6 +1041,8 @@ nve0_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
840 if (ret) 1041 if (ret)
841 return ret; 1042 return ret;
842 1043
1044 INIT_WORK(&priv->fault, nve0_fifo_recover_work);
1045
843 for (i = 0; i < FIFO_ENGINE_NR; i++) { 1046 for (i = 0; i < FIFO_ENGINE_NR; i++) {
844 ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x8000, 0x1000, 1047 ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x8000, 0x1000,
845 0, &priv->engine[i].runlist[0]); 1048 0, &priv->engine[i].runlist[0]);
@@ -850,10 +1053,12 @@ nve0_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
850 0, &priv->engine[i].runlist[1]); 1053 0, &priv->engine[i].runlist[1]);
851 if (ret) 1054 if (ret)
852 return ret; 1055 return ret;
1056
1057 init_waitqueue_head(&priv->engine[i].wait);
853 } 1058 }
854 1059
855 ret = nouveau_gpuobj_new(nv_object(priv), NULL, 4096 * 0x200, 0x1000, 1060 ret = nouveau_gpuobj_new(nv_object(priv), NULL, impl->channels * 0x200,
856 NVOBJ_FLAG_ZERO_ALLOC, &priv->user.mem); 1061 0x1000, NVOBJ_FLAG_ZERO_ALLOC, &priv->user.mem);
857 if (ret) 1062 if (ret)
858 return ret; 1063 return ret;
859 1064
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxgm107.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxgm107.c
new file mode 100644
index 000000000000..1dc37b1ddbfa
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxgm107.c
@@ -0,0 +1,989 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24
25#include "ctxnvc0.h"
26
27/*******************************************************************************
28 * PGRAPH context register lists
29 ******************************************************************************/
30
31static const struct nvc0_graph_init
32gm107_grctx_init_icmd_0[] = {
33 { 0x001000, 1, 0x01, 0x00000004 },
34 { 0x000039, 3, 0x01, 0x00000000 },
35 { 0x0000a9, 1, 0x01, 0x0000ffff },
36 { 0x000038, 1, 0x01, 0x0fac6881 },
37 { 0x00003d, 1, 0x01, 0x00000001 },
38 { 0x0000e8, 8, 0x01, 0x00000400 },
39 { 0x000078, 8, 0x01, 0x00000300 },
40 { 0x000050, 1, 0x01, 0x00000011 },
41 { 0x000058, 8, 0x01, 0x00000008 },
42 { 0x000208, 8, 0x01, 0x00000001 },
43 { 0x000081, 1, 0x01, 0x00000001 },
44 { 0x000085, 1, 0x01, 0x00000004 },
45 { 0x000088, 1, 0x01, 0x00000400 },
46 { 0x000090, 1, 0x01, 0x00000300 },
47 { 0x000098, 1, 0x01, 0x00001001 },
48 { 0x0000e3, 1, 0x01, 0x00000001 },
49 { 0x0000da, 1, 0x01, 0x00000001 },
50 { 0x0000f8, 1, 0x01, 0x00000003 },
51 { 0x0000fa, 1, 0x01, 0x00000001 },
52 { 0x0000b1, 2, 0x01, 0x00000001 },
53 { 0x00009f, 4, 0x01, 0x0000ffff },
54 { 0x0000a8, 1, 0x01, 0x0000ffff },
55 { 0x0000ad, 1, 0x01, 0x0000013e },
56 { 0x0000e1, 1, 0x01, 0x00000010 },
57 { 0x000290, 16, 0x01, 0x00000000 },
58 { 0x0003b0, 16, 0x01, 0x00000000 },
59 { 0x0002a0, 16, 0x01, 0x00000000 },
60 { 0x000420, 16, 0x01, 0x00000000 },
61 { 0x0002b0, 16, 0x01, 0x00000000 },
62 { 0x000430, 16, 0x01, 0x00000000 },
63 { 0x0002c0, 16, 0x01, 0x00000000 },
64 { 0x0004d0, 16, 0x01, 0x00000000 },
65 { 0x000720, 16, 0x01, 0x00000000 },
66 { 0x0008c0, 16, 0x01, 0x00000000 },
67 { 0x000890, 16, 0x01, 0x00000000 },
68 { 0x0008e0, 16, 0x01, 0x00000000 },
69 { 0x0008a0, 16, 0x01, 0x00000000 },
70 { 0x0008f0, 16, 0x01, 0x00000000 },
71 { 0x00094c, 1, 0x01, 0x000000ff },
72 { 0x00094d, 1, 0x01, 0xffffffff },
73 { 0x00094e, 1, 0x01, 0x00000002 },
74 { 0x0002f2, 2, 0x01, 0x00000001 },
75 { 0x0002f5, 1, 0x01, 0x00000001 },
76 { 0x0002f7, 1, 0x01, 0x00000001 },
77 { 0x000303, 1, 0x01, 0x00000001 },
78 { 0x0002e6, 1, 0x01, 0x00000001 },
79 { 0x000466, 1, 0x01, 0x00000052 },
80 { 0x000301, 1, 0x01, 0x3f800000 },
81 { 0x000304, 1, 0x01, 0x30201000 },
82 { 0x000305, 1, 0x01, 0x70605040 },
83 { 0x000306, 1, 0x01, 0xb8a89888 },
84 { 0x000307, 1, 0x01, 0xf8e8d8c8 },
85 { 0x00030a, 1, 0x01, 0x00ffff00 },
86 { 0x0000de, 1, 0x01, 0x00000001 },
87 { 0x00030b, 1, 0x01, 0x0000001a },
88 { 0x00030c, 1, 0x01, 0x00000001 },
89 { 0x000318, 1, 0x01, 0x00000001 },
90 { 0x000340, 1, 0x01, 0x00000000 },
91 { 0x00037d, 1, 0x01, 0x00000006 },
92 { 0x0003a0, 1, 0x01, 0x00000002 },
93 { 0x0003aa, 1, 0x01, 0x00000001 },
94 { 0x0003a9, 1, 0x01, 0x00000001 },
95 { 0x000380, 1, 0x01, 0x00000001 },
96 { 0x000383, 1, 0x01, 0x00000011 },
97 { 0x000360, 1, 0x01, 0x00000040 },
98 { 0x000366, 2, 0x01, 0x00000000 },
99 { 0x000368, 1, 0x01, 0x00000fff },
100 { 0x000370, 2, 0x01, 0x00000000 },
101 { 0x000372, 1, 0x01, 0x000fffff },
102 { 0x00037a, 1, 0x01, 0x00000012 },
103 { 0x000619, 1, 0x01, 0x00000003 },
104 { 0x000811, 1, 0x01, 0x00000003 },
105 { 0x000812, 1, 0x01, 0x00000004 },
106 { 0x000813, 1, 0x01, 0x00000006 },
107 { 0x000814, 1, 0x01, 0x00000008 },
108 { 0x000815, 1, 0x01, 0x0000000b },
109 { 0x000800, 6, 0x01, 0x00000001 },
110 { 0x000632, 1, 0x01, 0x00000001 },
111 { 0x000633, 1, 0x01, 0x00000002 },
112 { 0x000634, 1, 0x01, 0x00000003 },
113 { 0x000635, 1, 0x01, 0x00000004 },
114 { 0x000654, 1, 0x01, 0x3f800000 },
115 { 0x000657, 1, 0x01, 0x3f800000 },
116 { 0x000655, 2, 0x01, 0x3f800000 },
117 { 0x0006cd, 1, 0x01, 0x3f800000 },
118 { 0x0007f5, 1, 0x01, 0x3f800000 },
119 { 0x0007dc, 1, 0x01, 0x39291909 },
120 { 0x0007dd, 1, 0x01, 0x79695949 },
121 { 0x0007de, 1, 0x01, 0xb9a99989 },
122 { 0x0007df, 1, 0x01, 0xf9e9d9c9 },
123 { 0x0007e8, 1, 0x01, 0x00003210 },
124 { 0x0007e9, 1, 0x01, 0x00007654 },
125 { 0x0007ea, 1, 0x01, 0x00000098 },
126 { 0x0007ec, 1, 0x01, 0x39291909 },
127 { 0x0007ed, 1, 0x01, 0x79695949 },
128 { 0x0007ee, 1, 0x01, 0xb9a99989 },
129 { 0x0007ef, 1, 0x01, 0xf9e9d9c9 },
130 { 0x0007f0, 1, 0x01, 0x00003210 },
131 { 0x0007f1, 1, 0x01, 0x00007654 },
132 { 0x0007f2, 1, 0x01, 0x00000098 },
133 { 0x0005a5, 1, 0x01, 0x00000001 },
134 { 0x0005d0, 1, 0x01, 0x20181008 },
135 { 0x0005d1, 1, 0x01, 0x40383028 },
136 { 0x0005d2, 1, 0x01, 0x60585048 },
137 { 0x0005d3, 1, 0x01, 0x80787068 },
138 { 0x000980, 128, 0x01, 0x00000000 },
139 { 0x000468, 1, 0x01, 0x00000004 },
140 { 0x00046c, 1, 0x01, 0x00000001 },
141 { 0x000470, 96, 0x01, 0x00000000 },
142 { 0x000510, 16, 0x01, 0x3f800000 },
143 { 0x000520, 1, 0x01, 0x000002b6 },
144 { 0x000529, 1, 0x01, 0x00000001 },
145 { 0x000530, 16, 0x01, 0xffff0000 },
146 { 0x000550, 32, 0x01, 0xffff0000 },
147 { 0x000585, 1, 0x01, 0x0000003f },
148 { 0x000576, 1, 0x01, 0x00000003 },
149 { 0x00057b, 1, 0x01, 0x00000059 },
150 { 0x000586, 1, 0x01, 0x00000040 },
151 { 0x000582, 2, 0x01, 0x00000080 },
152 { 0x000595, 1, 0x01, 0x00400040 },
153 { 0x000596, 1, 0x01, 0x00000492 },
154 { 0x000597, 1, 0x01, 0x08080203 },
155 { 0x0005ad, 1, 0x01, 0x00000008 },
156 { 0x000598, 1, 0x01, 0x00020001 },
157 { 0x0005c2, 1, 0x01, 0x00000001 },
158 { 0x000638, 2, 0x01, 0x00000001 },
159 { 0x00063a, 1, 0x01, 0x00000002 },
160 { 0x00063b, 2, 0x01, 0x00000001 },
161 { 0x00063d, 1, 0x01, 0x00000002 },
162 { 0x00063e, 1, 0x01, 0x00000001 },
163 { 0x0008b8, 8, 0x01, 0x00000001 },
164 { 0x000900, 8, 0x01, 0x00000001 },
165 { 0x000908, 8, 0x01, 0x00000002 },
166 { 0x000910, 16, 0x01, 0x00000001 },
167 { 0x000920, 8, 0x01, 0x00000002 },
168 { 0x000928, 8, 0x01, 0x00000001 },
169 { 0x000662, 1, 0x01, 0x00000001 },
170 { 0x000648, 9, 0x01, 0x00000001 },
171 { 0x000658, 1, 0x01, 0x0000000f },
172 { 0x0007ff, 1, 0x01, 0x0000000a },
173 { 0x00066a, 1, 0x01, 0x40000000 },
174 { 0x00066b, 1, 0x01, 0x10000000 },
175 { 0x00066c, 2, 0x01, 0xffff0000 },
176 { 0x0007af, 2, 0x01, 0x00000008 },
177 { 0x0007f6, 1, 0x01, 0x00000001 },
178 { 0x0006b2, 1, 0x01, 0x00000055 },
179 { 0x0007ad, 1, 0x01, 0x00000003 },
180 { 0x000971, 1, 0x01, 0x00000008 },
181 { 0x000972, 1, 0x01, 0x00000040 },
182 { 0x000973, 1, 0x01, 0x0000012c },
183 { 0x00097c, 1, 0x01, 0x00000040 },
184 { 0x000975, 1, 0x01, 0x00000020 },
185 { 0x000976, 1, 0x01, 0x00000001 },
186 { 0x000977, 1, 0x01, 0x00000020 },
187 { 0x000978, 1, 0x01, 0x00000001 },
188 { 0x000957, 1, 0x01, 0x00000003 },
189 { 0x00095e, 1, 0x01, 0x20164010 },
190 { 0x00095f, 1, 0x01, 0x00000020 },
191 { 0x000a0d, 1, 0x01, 0x00000006 },
192 { 0x00097d, 1, 0x01, 0x0000000c },
193 { 0x000683, 1, 0x01, 0x00000006 },
194 { 0x000687, 1, 0x01, 0x003fffff },
195 { 0x0006a0, 1, 0x01, 0x00000005 },
196 { 0x000840, 1, 0x01, 0x00400008 },
197 { 0x000841, 1, 0x01, 0x08000080 },
198 { 0x000842, 1, 0x01, 0x00400008 },
199 { 0x000843, 1, 0x01, 0x08000080 },
200 { 0x000818, 8, 0x01, 0x00000000 },
201 { 0x000848, 16, 0x01, 0x00000000 },
202 { 0x000738, 1, 0x01, 0x00000000 },
203 { 0x0006aa, 1, 0x01, 0x00000001 },
204 { 0x0006ab, 1, 0x01, 0x00000002 },
205 { 0x0006ac, 1, 0x01, 0x00000080 },
206 { 0x0006ad, 2, 0x01, 0x00000100 },
207 { 0x0006b1, 1, 0x01, 0x00000011 },
208 { 0x0006bb, 1, 0x01, 0x000000cf },
209 { 0x0006ce, 1, 0x01, 0x2a712488 },
210 { 0x000739, 1, 0x01, 0x4085c000 },
211 { 0x00073a, 1, 0x01, 0x00000080 },
212 { 0x000786, 1, 0x01, 0x80000100 },
213 { 0x00073c, 1, 0x01, 0x00010100 },
214 { 0x00073d, 1, 0x01, 0x02800000 },
215 { 0x000787, 1, 0x01, 0x000000cf },
216 { 0x00078c, 1, 0x01, 0x00000008 },
217 { 0x000792, 1, 0x01, 0x00000001 },
218 { 0x000794, 3, 0x01, 0x00000001 },
219 { 0x000797, 1, 0x01, 0x000000cf },
220 { 0x000836, 1, 0x01, 0x00000001 },
221 { 0x00079a, 1, 0x01, 0x00000002 },
222 { 0x000833, 1, 0x01, 0x04444480 },
223 { 0x0007a1, 1, 0x01, 0x00000001 },
224 { 0x0007a3, 3, 0x01, 0x00000001 },
225 { 0x000831, 1, 0x01, 0x00000004 },
226 { 0x000b07, 1, 0x01, 0x00000002 },
227 { 0x000b08, 2, 0x01, 0x00000100 },
228 { 0x000b0a, 1, 0x01, 0x00000001 },
229 { 0x000a04, 1, 0x01, 0x000000ff },
230 { 0x000a0b, 1, 0x01, 0x00000040 },
231 { 0x00097f, 1, 0x01, 0x00000100 },
232 { 0x000a02, 1, 0x01, 0x00000001 },
233 { 0x000809, 1, 0x01, 0x00000007 },
234 { 0x00c221, 1, 0x01, 0x00000040 },
235 { 0x00c1b0, 8, 0x01, 0x0000000f },
236 { 0x00c1b8, 1, 0x01, 0x0fac6881 },
237 { 0x00c1b9, 1, 0x01, 0x00fac688 },
238 { 0x00c401, 1, 0x01, 0x00000001 },
239 { 0x00c402, 1, 0x01, 0x00010001 },
240 { 0x00c403, 2, 0x01, 0x00000001 },
241 { 0x00c40e, 1, 0x01, 0x00000020 },
242 { 0x01e100, 1, 0x01, 0x00000001 },
243 { 0x001000, 1, 0x01, 0x00000002 },
244 { 0x0006aa, 1, 0x01, 0x00000001 },
245 { 0x0006ad, 2, 0x01, 0x00000100 },
246 { 0x0006b1, 1, 0x01, 0x00000011 },
247 { 0x00078c, 1, 0x01, 0x00000008 },
248 { 0x000792, 1, 0x01, 0x00000001 },
249 { 0x000794, 3, 0x01, 0x00000001 },
250 { 0x000797, 1, 0x01, 0x000000cf },
251 { 0x00079a, 1, 0x01, 0x00000002 },
252 { 0x0007a1, 1, 0x01, 0x00000001 },
253 { 0x0007a3, 3, 0x01, 0x00000001 },
254 { 0x000831, 1, 0x01, 0x00000004 },
255 { 0x01e100, 1, 0x01, 0x00000001 },
256 { 0x001000, 1, 0x01, 0x00000008 },
257 { 0x000039, 3, 0x01, 0x00000000 },
258 { 0x000380, 1, 0x01, 0x00000001 },
259 { 0x000366, 2, 0x01, 0x00000000 },
260 { 0x000368, 1, 0x01, 0x00000fff },
261 { 0x000370, 2, 0x01, 0x00000000 },
262 { 0x000372, 1, 0x01, 0x000fffff },
263 { 0x000813, 1, 0x01, 0x00000006 },
264 { 0x000814, 1, 0x01, 0x00000008 },
265 { 0x000818, 8, 0x01, 0x00000000 },
266 { 0x000848, 16, 0x01, 0x00000000 },
267 { 0x000738, 1, 0x01, 0x00000000 },
268 { 0x000b07, 1, 0x01, 0x00000002 },
269 { 0x000b08, 2, 0x01, 0x00000100 },
270 { 0x000b0a, 1, 0x01, 0x00000001 },
271 { 0x000a04, 1, 0x01, 0x000000ff },
272 { 0x000a0b, 1, 0x01, 0x00000040 },
273 { 0x00097f, 1, 0x01, 0x00000100 },
274 { 0x000a02, 1, 0x01, 0x00000001 },
275 { 0x000809, 1, 0x01, 0x00000007 },
276 { 0x00c221, 1, 0x01, 0x00000040 },
277 { 0x00c401, 1, 0x01, 0x00000001 },
278 { 0x00c402, 1, 0x01, 0x00010001 },
279 { 0x00c403, 2, 0x01, 0x00000001 },
280 { 0x00c40e, 1, 0x01, 0x00000020 },
281 { 0x01e100, 1, 0x01, 0x00000001 },
282 { 0x001000, 1, 0x01, 0x00000001 },
283 { 0x000b07, 1, 0x01, 0x00000002 },
284 { 0x000b08, 2, 0x01, 0x00000100 },
285 { 0x000b0a, 1, 0x01, 0x00000001 },
286 { 0x01e100, 1, 0x01, 0x00000001 },
287 {}
288};
289
290static const struct nvc0_graph_pack
291gm107_grctx_pack_icmd[] = {
292 { gm107_grctx_init_icmd_0 },
293 {}
294};
295
296static const struct nvc0_graph_init
297gm107_grctx_init_b097_0[] = {
298 { 0x000800, 8, 0x40, 0x00000000 },
299 { 0x000804, 8, 0x40, 0x00000000 },
300 { 0x000808, 8, 0x40, 0x00000400 },
301 { 0x00080c, 8, 0x40, 0x00000300 },
302 { 0x000810, 1, 0x04, 0x000000cf },
303 { 0x000850, 7, 0x40, 0x00000000 },
304 { 0x000814, 8, 0x40, 0x00000040 },
305 { 0x000818, 8, 0x40, 0x00000001 },
306 { 0x00081c, 8, 0x40, 0x00000000 },
307 { 0x000820, 8, 0x40, 0x00000000 },
308 { 0x001c00, 16, 0x10, 0x00000000 },
309 { 0x001c04, 16, 0x10, 0x00000000 },
310 { 0x001c08, 16, 0x10, 0x00000000 },
311 { 0x001c0c, 16, 0x10, 0x00000000 },
312 { 0x001d00, 16, 0x10, 0x00000000 },
313 { 0x001d04, 16, 0x10, 0x00000000 },
314 { 0x001d08, 16, 0x10, 0x00000000 },
315 { 0x001d0c, 16, 0x10, 0x00000000 },
316 { 0x001f00, 16, 0x08, 0x00000000 },
317 { 0x001f04, 16, 0x08, 0x00000000 },
318 { 0x001f80, 16, 0x08, 0x00000000 },
319 { 0x001f84, 16, 0x08, 0x00000000 },
320 { 0x002000, 1, 0x04, 0x00000000 },
321 { 0x002040, 1, 0x04, 0x00000011 },
322 { 0x002080, 1, 0x04, 0x00000020 },
323 { 0x0020c0, 1, 0x04, 0x00000030 },
324 { 0x002100, 1, 0x04, 0x00000040 },
325 { 0x002140, 1, 0x04, 0x00000051 },
326 { 0x00200c, 6, 0x40, 0x00000001 },
327 { 0x002010, 1, 0x04, 0x00000000 },
328 { 0x002050, 1, 0x04, 0x00000000 },
329 { 0x002090, 1, 0x04, 0x00000001 },
330 { 0x0020d0, 1, 0x04, 0x00000002 },
331 { 0x002110, 1, 0x04, 0x00000003 },
332 { 0x002150, 1, 0x04, 0x00000004 },
333 { 0x000380, 4, 0x20, 0x00000000 },
334 { 0x000384, 4, 0x20, 0x00000000 },
335 { 0x000388, 4, 0x20, 0x00000000 },
336 { 0x00038c, 4, 0x20, 0x00000000 },
337 { 0x000700, 4, 0x10, 0x00000000 },
338 { 0x000704, 4, 0x10, 0x00000000 },
339 { 0x000708, 4, 0x10, 0x00000000 },
340 { 0x002800, 128, 0x04, 0x00000000 },
341 { 0x000a00, 16, 0x20, 0x00000000 },
342 { 0x000a04, 16, 0x20, 0x00000000 },
343 { 0x000a08, 16, 0x20, 0x00000000 },
344 { 0x000a0c, 16, 0x20, 0x00000000 },
345 { 0x000a10, 16, 0x20, 0x00000000 },
346 { 0x000a14, 16, 0x20, 0x00000000 },
347 { 0x000c00, 16, 0x10, 0x00000000 },
348 { 0x000c04, 16, 0x10, 0x00000000 },
349 { 0x000c08, 16, 0x10, 0x00000000 },
350 { 0x000c0c, 16, 0x10, 0x3f800000 },
351 { 0x000d00, 8, 0x08, 0xffff0000 },
352 { 0x000d04, 8, 0x08, 0xffff0000 },
353 { 0x000e00, 16, 0x10, 0x00000000 },
354 { 0x000e04, 16, 0x10, 0xffff0000 },
355 { 0x000e08, 16, 0x10, 0xffff0000 },
356 { 0x000d40, 4, 0x08, 0x00000000 },
357 { 0x000d44, 4, 0x08, 0x00000000 },
358 { 0x001e00, 8, 0x20, 0x00000001 },
359 { 0x001e04, 8, 0x20, 0x00000001 },
360 { 0x001e08, 8, 0x20, 0x00000002 },
361 { 0x001e0c, 8, 0x20, 0x00000001 },
362 { 0x001e10, 8, 0x20, 0x00000001 },
363 { 0x001e14, 8, 0x20, 0x00000002 },
364 { 0x001e18, 8, 0x20, 0x00000001 },
365 { 0x001480, 8, 0x10, 0x00000000 },
366 { 0x001484, 8, 0x10, 0x00000000 },
367 { 0x001488, 8, 0x10, 0x00000000 },
368 { 0x003400, 128, 0x04, 0x00000000 },
369 { 0x00030c, 1, 0x04, 0x00000001 },
370 { 0x001944, 1, 0x04, 0x00000000 },
371 { 0x001514, 1, 0x04, 0x00000000 },
372 { 0x000d68, 1, 0x04, 0x0000ffff },
373 { 0x00121c, 1, 0x04, 0x0fac6881 },
374 { 0x000fac, 1, 0x04, 0x00000001 },
375 { 0x001538, 1, 0x04, 0x00000001 },
376 { 0x000fe0, 2, 0x04, 0x00000000 },
377 { 0x000fe8, 1, 0x04, 0x00000014 },
378 { 0x000fec, 1, 0x04, 0x00000040 },
379 { 0x000ff0, 1, 0x04, 0x00000000 },
380 { 0x00179c, 1, 0x04, 0x00000000 },
381 { 0x001228, 1, 0x04, 0x00000400 },
382 { 0x00122c, 1, 0x04, 0x00000300 },
383 { 0x001230, 1, 0x04, 0x00010001 },
384 { 0x0007f8, 1, 0x04, 0x00000000 },
385 { 0x0015b4, 1, 0x04, 0x00000001 },
386 { 0x0015cc, 1, 0x04, 0x00000000 },
387 { 0x001534, 1, 0x04, 0x00000000 },
388 { 0x000754, 1, 0x04, 0x00000001 },
389 { 0x000fb0, 1, 0x04, 0x00000000 },
390 { 0x0015d0, 1, 0x04, 0x00000000 },
391 { 0x00153c, 1, 0x04, 0x00000000 },
392 { 0x0016b4, 1, 0x04, 0x00000003 },
393 { 0x000fbc, 4, 0x04, 0x0000ffff },
394 { 0x000df8, 2, 0x04, 0x00000000 },
395 { 0x001948, 1, 0x04, 0x00000000 },
396 { 0x001970, 1, 0x04, 0x00000001 },
397 { 0x00161c, 1, 0x04, 0x000009f0 },
398 { 0x000dcc, 1, 0x04, 0x00000010 },
399 { 0x0015e4, 1, 0x04, 0x00000000 },
400 { 0x001160, 32, 0x04, 0x25e00040 },
401 { 0x001880, 32, 0x04, 0x00000000 },
402 { 0x000f84, 2, 0x04, 0x00000000 },
403 { 0x0017c8, 2, 0x04, 0x00000000 },
404 { 0x0017d0, 1, 0x04, 0x000000ff },
405 { 0x0017d4, 1, 0x04, 0xffffffff },
406 { 0x0017d8, 1, 0x04, 0x00000002 },
407 { 0x0017dc, 1, 0x04, 0x00000000 },
408 { 0x0015f4, 2, 0x04, 0x00000000 },
409 { 0x001434, 2, 0x04, 0x00000000 },
410 { 0x000d74, 1, 0x04, 0x00000000 },
411 { 0x0013a4, 1, 0x04, 0x00000000 },
412 { 0x001318, 1, 0x04, 0x00000001 },
413 { 0x001080, 2, 0x04, 0x00000000 },
414 { 0x001088, 2, 0x04, 0x00000001 },
415 { 0x001090, 1, 0x04, 0x00000000 },
416 { 0x001094, 1, 0x04, 0x00000001 },
417 { 0x001098, 1, 0x04, 0x00000000 },
418 { 0x00109c, 1, 0x04, 0x00000001 },
419 { 0x0010a0, 2, 0x04, 0x00000000 },
420 { 0x001644, 1, 0x04, 0x00000000 },
421 { 0x000748, 1, 0x04, 0x00000000 },
422 { 0x000de8, 1, 0x04, 0x00000000 },
423 { 0x001648, 1, 0x04, 0x00000000 },
424 { 0x0012a4, 1, 0x04, 0x00000000 },
425 { 0x001120, 4, 0x04, 0x00000000 },
426 { 0x001118, 1, 0x04, 0x00000000 },
427 { 0x00164c, 1, 0x04, 0x00000000 },
428 { 0x001658, 1, 0x04, 0x00000000 },
429 { 0x001910, 1, 0x04, 0x00000290 },
430 { 0x001518, 1, 0x04, 0x00000000 },
431 { 0x00165c, 1, 0x04, 0x00000001 },
432 { 0x001520, 1, 0x04, 0x00000000 },
433 { 0x001604, 1, 0x04, 0x00000000 },
434 { 0x001570, 1, 0x04, 0x00000000 },
435 { 0x0013b0, 2, 0x04, 0x3f800000 },
436 { 0x00020c, 1, 0x04, 0x00000000 },
437 { 0x001670, 1, 0x04, 0x30201000 },
438 { 0x001674, 1, 0x04, 0x70605040 },
439 { 0x001678, 1, 0x04, 0xb8a89888 },
440 { 0x00167c, 1, 0x04, 0xf8e8d8c8 },
441 { 0x00166c, 1, 0x04, 0x00000000 },
442 { 0x001680, 1, 0x04, 0x00ffff00 },
443 { 0x0012d0, 1, 0x04, 0x00000003 },
444 { 0x0012d4, 1, 0x04, 0x00000002 },
445 { 0x001684, 2, 0x04, 0x00000000 },
446 { 0x000dac, 2, 0x04, 0x00001b02 },
447 { 0x000db4, 1, 0x04, 0x00000000 },
448 { 0x00168c, 1, 0x04, 0x00000000 },
449 { 0x0015bc, 1, 0x04, 0x00000000 },
450 { 0x00156c, 1, 0x04, 0x00000000 },
451 { 0x00187c, 1, 0x04, 0x00000000 },
452 { 0x001110, 1, 0x04, 0x00000001 },
453 { 0x000dc0, 3, 0x04, 0x00000000 },
454 { 0x000f40, 5, 0x04, 0x00000000 },
455 { 0x001234, 1, 0x04, 0x00000000 },
456 { 0x001690, 1, 0x04, 0x00000000 },
457 { 0x000790, 5, 0x04, 0x00000000 },
458 { 0x00077c, 1, 0x04, 0x00000000 },
459 { 0x001000, 1, 0x04, 0x00000010 },
460 { 0x0010fc, 1, 0x04, 0x00000000 },
461 { 0x001290, 1, 0x04, 0x00000000 },
462 { 0x000218, 1, 0x04, 0x00000010 },
463 { 0x0012d8, 1, 0x04, 0x00000000 },
464 { 0x0012dc, 1, 0x04, 0x00000010 },
465 { 0x000d94, 1, 0x04, 0x00000001 },
466 { 0x00155c, 2, 0x04, 0x00000000 },
467 { 0x001564, 1, 0x04, 0x00000fff },
468 { 0x001574, 2, 0x04, 0x00000000 },
469 { 0x00157c, 1, 0x04, 0x000fffff },
470 { 0x001354, 1, 0x04, 0x00000000 },
471 { 0x001610, 1, 0x04, 0x00000012 },
472 { 0x001608, 2, 0x04, 0x00000000 },
473 { 0x00260c, 1, 0x04, 0x00000000 },
474 { 0x0007ac, 1, 0x04, 0x00000000 },
475 { 0x00162c, 1, 0x04, 0x00000003 },
476 { 0x000210, 1, 0x04, 0x00000000 },
477 { 0x000320, 1, 0x04, 0x00000000 },
478 { 0x000324, 6, 0x04, 0x3f800000 },
479 { 0x000750, 1, 0x04, 0x00000000 },
480 { 0x000760, 1, 0x04, 0x39291909 },
481 { 0x000764, 1, 0x04, 0x79695949 },
482 { 0x000768, 1, 0x04, 0xb9a99989 },
483 { 0x00076c, 1, 0x04, 0xf9e9d9c9 },
484 { 0x000770, 1, 0x04, 0x30201000 },
485 { 0x000774, 1, 0x04, 0x70605040 },
486 { 0x000778, 1, 0x04, 0x00009080 },
487 { 0x000780, 1, 0x04, 0x39291909 },
488 { 0x000784, 1, 0x04, 0x79695949 },
489 { 0x000788, 1, 0x04, 0xb9a99989 },
490 { 0x00078c, 1, 0x04, 0xf9e9d9c9 },
491 { 0x0007d0, 1, 0x04, 0x30201000 },
492 { 0x0007d4, 1, 0x04, 0x70605040 },
493 { 0x0007d8, 1, 0x04, 0x00009080 },
494 { 0x00037c, 1, 0x04, 0x00000001 },
495 { 0x000740, 2, 0x04, 0x00000000 },
496 { 0x002600, 1, 0x04, 0x00000000 },
497 { 0x001918, 1, 0x04, 0x00000000 },
498 { 0x00191c, 1, 0x04, 0x00000900 },
499 { 0x001920, 1, 0x04, 0x00000405 },
500 { 0x001308, 1, 0x04, 0x00000001 },
501 { 0x001924, 1, 0x04, 0x00000000 },
502 { 0x0013ac, 1, 0x04, 0x00000000 },
503 { 0x00192c, 1, 0x04, 0x00000001 },
504 { 0x00193c, 1, 0x04, 0x00002c1c },
505 { 0x000d7c, 1, 0x04, 0x00000000 },
506 { 0x000f8c, 1, 0x04, 0x00000000 },
507 { 0x0002c0, 1, 0x04, 0x00000001 },
508 { 0x001510, 1, 0x04, 0x00000000 },
509 { 0x001940, 1, 0x04, 0x00000000 },
510 { 0x000ff4, 2, 0x04, 0x00000000 },
511 { 0x00194c, 2, 0x04, 0x00000000 },
512 { 0x001968, 1, 0x04, 0x00000000 },
513 { 0x001590, 1, 0x04, 0x0000003f },
514 { 0x0007e8, 4, 0x04, 0x00000000 },
515 { 0x00196c, 1, 0x04, 0x00000011 },
516 { 0x0002e4, 1, 0x04, 0x0000b001 },
517 { 0x00036c, 2, 0x04, 0x00000000 },
518 { 0x00197c, 1, 0x04, 0x00000000 },
519 { 0x000fcc, 2, 0x04, 0x00000000 },
520 { 0x0002d8, 1, 0x04, 0x00000040 },
521 { 0x001980, 1, 0x04, 0x00000080 },
522 { 0x001504, 1, 0x04, 0x00000080 },
523 { 0x001984, 1, 0x04, 0x00000000 },
524 { 0x000f60, 1, 0x04, 0x00000000 },
525 { 0x000f64, 1, 0x04, 0x00400040 },
526 { 0x000f68, 1, 0x04, 0x00002212 },
527 { 0x000f6c, 1, 0x04, 0x08080203 },
528 { 0x001108, 1, 0x04, 0x00000008 },
529 { 0x000f70, 1, 0x04, 0x00080001 },
530 { 0x000ffc, 1, 0x04, 0x00000000 },
531 { 0x000300, 1, 0x04, 0x00000001 },
532 { 0x0013a8, 1, 0x04, 0x00000000 },
533 { 0x0012ec, 1, 0x04, 0x00000000 },
534 { 0x001310, 1, 0x04, 0x00000000 },
535 { 0x001314, 1, 0x04, 0x00000001 },
536 { 0x001380, 1, 0x04, 0x00000000 },
537 { 0x001384, 4, 0x04, 0x00000001 },
538 { 0x001394, 1, 0x04, 0x00000000 },
539 { 0x00139c, 1, 0x04, 0x00000000 },
540 { 0x001398, 1, 0x04, 0x00000000 },
541 { 0x001594, 1, 0x04, 0x00000000 },
542 { 0x001598, 4, 0x04, 0x00000001 },
543 { 0x000f54, 3, 0x04, 0x00000000 },
544 { 0x0019bc, 1, 0x04, 0x00000000 },
545 { 0x000f9c, 2, 0x04, 0x00000000 },
546 { 0x0012cc, 1, 0x04, 0x00000000 },
547 { 0x0012e8, 1, 0x04, 0x00000000 },
548 { 0x00130c, 1, 0x04, 0x00000001 },
549 { 0x001360, 8, 0x04, 0x00000000 },
550 { 0x00133c, 2, 0x04, 0x00000001 },
551 { 0x001344, 1, 0x04, 0x00000002 },
552 { 0x001348, 2, 0x04, 0x00000001 },
553 { 0x001350, 1, 0x04, 0x00000002 },
554 { 0x001358, 1, 0x04, 0x00000001 },
555 { 0x0012e4, 1, 0x04, 0x00000000 },
556 { 0x00131c, 4, 0x04, 0x00000000 },
557 { 0x0019c0, 1, 0x04, 0x00000000 },
558 { 0x001140, 1, 0x04, 0x00000000 },
559 { 0x000dd0, 1, 0x04, 0x00000000 },
560 { 0x000dd4, 1, 0x04, 0x00000001 },
561 { 0x0002f4, 1, 0x04, 0x00000000 },
562 { 0x0019c4, 1, 0x04, 0x00000000 },
563 { 0x0019c8, 1, 0x04, 0x00001500 },
564 { 0x00135c, 1, 0x04, 0x00000000 },
565 { 0x000f90, 1, 0x04, 0x00000000 },
566 { 0x0019e0, 8, 0x04, 0x00000001 },
567 { 0x0019cc, 1, 0x04, 0x00000001 },
568 { 0x0015b8, 1, 0x04, 0x00000000 },
569 { 0x001a00, 1, 0x04, 0x00001111 },
570 { 0x001a04, 7, 0x04, 0x00000000 },
571 { 0x000d6c, 2, 0x04, 0xffff0000 },
572 { 0x0010f8, 1, 0x04, 0x00001010 },
573 { 0x000d80, 5, 0x04, 0x00000000 },
574 { 0x000da0, 1, 0x04, 0x00000000 },
575 { 0x0007a4, 2, 0x04, 0x00000000 },
576 { 0x001508, 1, 0x04, 0x80000000 },
577 { 0x00150c, 1, 0x04, 0x40000000 },
578 { 0x001668, 1, 0x04, 0x00000000 },
579 { 0x000318, 2, 0x04, 0x00000008 },
580 { 0x000d9c, 1, 0x04, 0x00000001 },
581 { 0x000f14, 1, 0x04, 0x00000000 },
582 { 0x000374, 1, 0x04, 0x00000000 },
583 { 0x000378, 1, 0x04, 0x0000000c },
584 { 0x0007dc, 1, 0x04, 0x00000000 },
585 { 0x00074c, 1, 0x04, 0x00000055 },
586 { 0x001420, 1, 0x04, 0x00000003 },
587 { 0x001008, 1, 0x04, 0x00000008 },
588 { 0x00100c, 1, 0x04, 0x00000040 },
589 { 0x001010, 1, 0x04, 0x0000012c },
590 { 0x000d60, 1, 0x04, 0x00000040 },
591 { 0x001018, 1, 0x04, 0x00000020 },
592 { 0x00101c, 1, 0x04, 0x00000001 },
593 { 0x001020, 1, 0x04, 0x00000020 },
594 { 0x001024, 1, 0x04, 0x00000001 },
595 { 0x001444, 3, 0x04, 0x00000000 },
596 { 0x000360, 1, 0x04, 0x20164010 },
597 { 0x000364, 1, 0x04, 0x00000020 },
598 { 0x000368, 1, 0x04, 0x00000000 },
599 { 0x000da8, 1, 0x04, 0x00000030 },
600 { 0x000de4, 1, 0x04, 0x00000000 },
601 { 0x000204, 1, 0x04, 0x00000006 },
602 { 0x0002d0, 1, 0x04, 0x003fffff },
603 { 0x001220, 1, 0x04, 0x00000005 },
604 { 0x000fdc, 1, 0x04, 0x00000000 },
605 { 0x000f98, 1, 0x04, 0x00400008 },
606 { 0x001284, 1, 0x04, 0x08000080 },
607 { 0x001450, 1, 0x04, 0x00400008 },
608 { 0x001454, 1, 0x04, 0x08000080 },
609 { 0x000214, 1, 0x04, 0x00000000 },
610 {}
611};
612
613static const struct nvc0_graph_pack
614gm107_grctx_pack_mthd[] = {
615 { gm107_grctx_init_b097_0, 0xb097 },
616 { nvc0_grctx_init_902d_0, 0x902d },
617 {}
618};
619
620static const struct nvc0_graph_init
621gm107_grctx_init_fe_0[] = {
622 { 0x404004, 8, 0x04, 0x00000000 },
623 { 0x404024, 1, 0x04, 0x0000e000 },
624 { 0x404028, 8, 0x04, 0x00000000 },
625 { 0x4040a8, 8, 0x04, 0x00000000 },
626 { 0x4040c8, 1, 0x04, 0xf800008f },
627 { 0x4040d0, 6, 0x04, 0x00000000 },
628 { 0x4040f8, 1, 0x04, 0x00000000 },
629 { 0x404100, 10, 0x04, 0x00000000 },
630 { 0x404130, 2, 0x04, 0x00000000 },
631 { 0x404150, 1, 0x04, 0x0000002e },
632 { 0x404154, 1, 0x04, 0x00000400 },
633 { 0x404158, 1, 0x04, 0x00000200 },
634 { 0x404164, 1, 0x04, 0x00000045 },
635 { 0x40417c, 2, 0x04, 0x00000000 },
636 { 0x404194, 1, 0x04, 0x01000700 },
637 { 0x4041a0, 4, 0x04, 0x00000000 },
638 { 0x404200, 4, 0x04, 0x00000000 },
639 {}
640};
641
642static const struct nvc0_graph_init
643gm107_grctx_init_ds_0[] = {
644 { 0x405800, 1, 0x04, 0x0f8001bf },
645 { 0x405830, 1, 0x04, 0x0aa01000 },
646 { 0x405834, 1, 0x04, 0x08000000 },
647 { 0x405838, 1, 0x04, 0x00000000 },
648 { 0x405854, 1, 0x04, 0x00000000 },
649 { 0x405870, 4, 0x04, 0x00000001 },
650 { 0x405a00, 2, 0x04, 0x00000000 },
651 { 0x405a18, 1, 0x04, 0x00000000 },
652 { 0x405a1c, 1, 0x04, 0x000000ff },
653 {}
654};
655
656static const struct nvc0_graph_init
657gm107_grctx_init_pd_0[] = {
658 { 0x406020, 1, 0x04, 0x07410001 },
659 { 0x406028, 4, 0x04, 0x00000001 },
660 { 0x4064a8, 1, 0x04, 0x00000000 },
661 { 0x4064ac, 1, 0x04, 0x00003fff },
662 { 0x4064b0, 3, 0x04, 0x00000000 },
663 { 0x4064c0, 1, 0x04, 0x80400280 },
664 { 0x4064c4, 1, 0x04, 0x0400ffff },
665 { 0x4064c8, 1, 0x04, 0x018001ff },
666 { 0x4064cc, 9, 0x04, 0x00000000 },
667 { 0x4064fc, 1, 0x04, 0x0000022a },
668 { 0x406500, 1, 0x04, 0x00000000 },
669 {}
670};
671
672static const struct nvc0_graph_init
673gm107_grctx_init_be_0[] = {
674 { 0x408800, 1, 0x04, 0x32802a3c },
675 { 0x408804, 1, 0x04, 0x00000040 },
676 { 0x408808, 1, 0x04, 0x1003e005 },
677 { 0x408840, 1, 0x04, 0x0000000b },
678 { 0x408900, 1, 0x04, 0xb080b801 },
679 { 0x408904, 1, 0x04, 0x63038001 },
680 { 0x408908, 1, 0x04, 0x02c8102f },
681 { 0x408980, 1, 0x04, 0x0000011d },
682 {}
683};
684
685static const struct nvc0_graph_pack
686gm107_grctx_pack_hub[] = {
687 { nvc0_grctx_init_main_0 },
688 { gm107_grctx_init_fe_0 },
689 { nvf0_grctx_init_pri_0 },
690 { nve4_grctx_init_memfmt_0 },
691 { gm107_grctx_init_ds_0 },
692 { nvf0_grctx_init_cwd_0 },
693 { gm107_grctx_init_pd_0 },
694 { nv108_grctx_init_rstr2d_0 },
695 { nve4_grctx_init_scc_0 },
696 { gm107_grctx_init_be_0 },
697 {}
698};
699
700static const struct nvc0_graph_init
701gm107_grctx_init_gpc_unk_0[] = {
702 { 0x418380, 1, 0x04, 0x00000056 },
703 {}
704};
705
706static const struct nvc0_graph_init
707gm107_grctx_init_gpc_unk_1[] = {
708 { 0x418600, 1, 0x04, 0x0000007f },
709 { 0x418684, 1, 0x04, 0x0000001f },
710 { 0x418700, 1, 0x04, 0x00000002 },
711 { 0x418704, 1, 0x04, 0x00000080 },
712 { 0x418708, 1, 0x04, 0x40000000 },
713 { 0x41870c, 2, 0x04, 0x00000000 },
714 {}
715};
716
717static const struct nvc0_graph_init
718gm107_grctx_init_setup_0[] = {
719 { 0x418800, 1, 0x04, 0x7006863a },
720 { 0x418810, 1, 0x04, 0x00000000 },
721 { 0x418828, 1, 0x04, 0x00000044 },
722 { 0x418830, 1, 0x04, 0x10000001 },
723 { 0x4188d8, 1, 0x04, 0x00000008 },
724 { 0x4188e0, 1, 0x04, 0x01000000 },
725 { 0x4188e8, 5, 0x04, 0x00000000 },
726 { 0x4188fc, 1, 0x04, 0x20100058 },
727 {}
728};
729
730static const struct nvc0_graph_init
731gm107_grctx_init_gpc_unk_2[] = {
732 { 0x418d24, 1, 0x04, 0x00000000 },
733 { 0x418e00, 1, 0x04, 0x90000000 },
734 { 0x418e24, 1, 0x04, 0x00000000 },
735 { 0x418e28, 1, 0x04, 0x00000030 },
736 { 0x418e30, 1, 0x04, 0x00000000 },
737 { 0x418e34, 1, 0x04, 0x00010000 },
738 { 0x418e38, 1, 0x04, 0x00000000 },
739 { 0x418e40, 22, 0x04, 0x00000000 },
740 { 0x418ea0, 2, 0x04, 0x00000000 },
741 {}
742};
743
744static const struct nvc0_graph_pack
745gm107_grctx_pack_gpc[] = {
746 { gm107_grctx_init_gpc_unk_0 },
747 { nv108_grctx_init_prop_0 },
748 { gm107_grctx_init_gpc_unk_1 },
749 { gm107_grctx_init_setup_0 },
750 { nvc0_grctx_init_zcull_0 },
751 { nv108_grctx_init_crstr_0 },
752 { nve4_grctx_init_gpm_0 },
753 { gm107_grctx_init_gpc_unk_2 },
754 { nvc0_grctx_init_gcc_0 },
755 {}
756};
757
758static const struct nvc0_graph_init
759gm107_grctx_init_tex_0[] = {
760 { 0x419a00, 1, 0x04, 0x000300f0 },
761 { 0x419a04, 1, 0x04, 0x00000005 },
762 { 0x419a08, 1, 0x04, 0x00000421 },
763 { 0x419a0c, 1, 0x04, 0x00120000 },
764 { 0x419a10, 1, 0x04, 0x00000000 },
765 { 0x419a14, 1, 0x04, 0x00002200 },
766 { 0x419a1c, 1, 0x04, 0x0000c000 },
767 { 0x419a20, 1, 0x04, 0x20008a00 },
768 { 0x419a30, 1, 0x04, 0x00000001 },
769 { 0x419a3c, 1, 0x04, 0x00000002 },
770 { 0x419ac4, 1, 0x04, 0x00000000 },
771 {}
772};
773
774static const struct nvc0_graph_init
775gm107_grctx_init_mpc_0[] = {
776 { 0x419c00, 1, 0x04, 0x0000001a },
777 { 0x419c04, 1, 0x04, 0x80000006 },
778 { 0x419c08, 1, 0x04, 0x00000002 },
779 { 0x419c20, 1, 0x04, 0x00000000 },
780 { 0x419c24, 1, 0x04, 0x00084210 },
781 { 0x419c28, 1, 0x04, 0x3efbefbe },
782 { 0x419c2c, 1, 0x04, 0x00000000 },
783 { 0x419c34, 1, 0x04, 0x01ff1ff3 },
784 { 0x419c3c, 1, 0x04, 0x00001919 },
785 {}
786};
787
788static const struct nvc0_graph_init
789gm107_grctx_init_l1c_0[] = {
790 { 0x419c84, 1, 0x04, 0x00000020 },
791 {}
792};
793
794static const struct nvc0_graph_init
795gm107_grctx_init_sm_0[] = {
796 { 0x419e04, 3, 0x04, 0x00000000 },
797 { 0x419e10, 1, 0x04, 0x00001c02 },
798 { 0x419e44, 1, 0x04, 0x00d3eff2 },
799 { 0x419e48, 1, 0x04, 0x00000000 },
800 { 0x419e4c, 1, 0x04, 0x0000007f },
801 { 0x419e50, 1, 0x04, 0x00000000 },
802 { 0x419e60, 4, 0x04, 0x00000000 },
803 { 0x419e74, 10, 0x04, 0x00000000 },
804 { 0x419eac, 1, 0x04, 0x0001cf8b },
805 { 0x419eb0, 1, 0x04, 0x00030300 },
806 { 0x419eb8, 1, 0x04, 0x00000000 },
807 { 0x419ef0, 24, 0x04, 0x00000000 },
808 { 0x419f68, 2, 0x04, 0x00000000 },
809 { 0x419f70, 1, 0x04, 0x00000020 },
810 { 0x419f78, 1, 0x04, 0x000003eb },
811 { 0x419f7c, 1, 0x04, 0x00000000 },
812 {}
813};
814
815static const struct nvc0_graph_pack
816gm107_grctx_pack_tpc[] = {
817 { nvd7_grctx_init_pe_0 },
818 { gm107_grctx_init_tex_0 },
819 { gm107_grctx_init_mpc_0 },
820 { gm107_grctx_init_l1c_0 },
821 { gm107_grctx_init_sm_0 },
822 {}
823};
824
825static const struct nvc0_graph_init
826gm107_grctx_init_cbm_0[] = {
827 { 0x41bec0, 1, 0x04, 0x00000000 },
828 { 0x41bec4, 1, 0x04, 0x01050000 },
829 { 0x41bee4, 1, 0x04, 0x00000000 },
830 { 0x41bef0, 1, 0x04, 0x000003ff },
831 { 0x41bef4, 2, 0x04, 0x00000000 },
832 {}
833};
834
835static const struct nvc0_graph_init
836gm107_grctx_init_wwdx_0[] = {
837 { 0x41bf00, 1, 0x04, 0x0a418820 },
838 { 0x41bf04, 1, 0x04, 0x062080e6 },
839 { 0x41bf08, 1, 0x04, 0x020398a4 },
840 { 0x41bf0c, 1, 0x04, 0x0e629062 },
841 { 0x41bf10, 1, 0x04, 0x0a418820 },
842 { 0x41bf14, 1, 0x04, 0x000000e6 },
843 { 0x41bfd0, 1, 0x04, 0x00900103 },
844 { 0x41bfe0, 1, 0x04, 0x80000000 },
845 { 0x41bfe4, 1, 0x04, 0x00000000 },
846 {}
847};
848
849static const struct nvc0_graph_pack
850gm107_grctx_pack_ppc[] = {
851 { nve4_grctx_init_pes_0 },
852 { gm107_grctx_init_cbm_0 },
853 { gm107_grctx_init_wwdx_0 },
854 {}
855};
856
857/*******************************************************************************
858 * PGRAPH context implementation
859 ******************************************************************************/
860
861static void
862gm107_grctx_generate_mods(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
863{
864 mmio_data(0x003000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS);
865 mmio_data(0x008000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS);
866 mmio_data(0x060000, 0x1000, NV_MEM_ACCESS_RW);
867
868 mmio_list(0x40800c, 0x00000000, 8, 1);
869 mmio_list(0x408010, 0x80000000, 0, 0);
870 mmio_list(0x419004, 0x00000000, 8, 1);
871 mmio_list(0x419008, 0x00000000, 0, 0);
872 mmio_list(0x4064cc, 0x80000000, 0, 0);
873 mmio_list(0x418e30, 0x80000000, 0, 0);
874
875 mmio_list(0x408004, 0x00000000, 8, 0);
876 mmio_list(0x408008, 0x80000030, 0, 0);
877 mmio_list(0x418e24, 0x00000000, 8, 0);
878 mmio_list(0x418e28, 0x80000030, 0, 0);
879
880 mmio_list(0x418810, 0x80000000, 12, 2);
881 mmio_list(0x419848, 0x10000000, 12, 2);
882 mmio_list(0x419c2c, 0x10000000, 12, 2);
883
884 mmio_list(0x405830, 0x0aa01000, 0, 0);
885 mmio_list(0x4064c4, 0x0400ffff, 0, 0);
886
887 /*XXX*/
888 mmio_list(0x5030c0, 0x00001540, 0, 0);
889 mmio_list(0x5030f4, 0x00000000, 0, 0);
890 mmio_list(0x5030e4, 0x00002000, 0, 0);
891 mmio_list(0x5030f8, 0x00003fc0, 0, 0);
892 mmio_list(0x418ea0, 0x07151540, 0, 0);
893
894 mmio_list(0x5032c0, 0x00001540, 0, 0);
895 mmio_list(0x5032f4, 0x00001fe0, 0, 0);
896 mmio_list(0x5032e4, 0x00002000, 0, 0);
897 mmio_list(0x5032f8, 0x00006fc0, 0, 0);
898 mmio_list(0x418ea4, 0x07151540, 0, 0);
899}
900
901static void
902gm107_grctx_generate_tpcid(struct nvc0_graph_priv *priv)
903{
904 int gpc, tpc, id;
905
906 for (tpc = 0, id = 0; tpc < 4; tpc++) {
907 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
908 if (tpc < priv->tpc_nr[gpc]) {
909 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x698), id);
910 nv_wr32(priv, GPC_UNIT(gpc, 0x0c10 + tpc * 4), id);
911 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x088), id);
912 id++;
913 }
914
915 nv_wr32(priv, GPC_UNIT(gpc, 0x0c08), priv->tpc_nr[gpc]);
916 nv_wr32(priv, GPC_UNIT(gpc, 0x0c8c), priv->tpc_nr[gpc]);
917 }
918 }
919}
920
921static void
922gm107_grctx_generate_main(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
923{
924 struct nvc0_grctx_oclass *oclass = (void *)nv_engine(priv)->cclass;
925 int i;
926
927 nvc0_graph_mmio(priv, oclass->hub);
928 nvc0_graph_mmio(priv, oclass->gpc);
929 nvc0_graph_mmio(priv, oclass->zcull);
930 nvc0_graph_mmio(priv, oclass->tpc);
931 nvc0_graph_mmio(priv, oclass->ppc);
932
933 nv_wr32(priv, 0x404154, 0x00000000);
934
935 oclass->mods(priv, info);
936 oclass->unkn(priv);
937
938 gm107_grctx_generate_tpcid(priv);
939 nvc0_grctx_generate_r406028(priv);
940 nve4_grctx_generate_r418bb8(priv);
941 nvc0_grctx_generate_r406800(priv);
942
943 nv_wr32(priv, 0x4064d0, 0x00000001);
944 for (i = 1; i < 8; i++)
945 nv_wr32(priv, 0x4064d0 + (i * 0x04), 0x00000000);
946 nv_wr32(priv, 0x406500, 0x00000001);
947
948 nv_wr32(priv, 0x405b00, (priv->tpc_total << 8) | priv->gpc_nr);
949
950 if (priv->gpc_nr == 1) {
951 nv_mask(priv, 0x408850, 0x0000000f, priv->tpc_nr[0]);
952 nv_mask(priv, 0x408958, 0x0000000f, priv->tpc_nr[0]);
953 } else {
954 nv_mask(priv, 0x408850, 0x0000000f, priv->gpc_nr);
955 nv_mask(priv, 0x408958, 0x0000000f, priv->gpc_nr);
956 }
957
958 nvc0_graph_icmd(priv, oclass->icmd);
959 nv_wr32(priv, 0x404154, 0x00000400);
960 nvc0_graph_mthd(priv, oclass->mthd);
961
962 nv_mask(priv, 0x419e00, 0x00808080, 0x00808080);
963 nv_mask(priv, 0x419ccc, 0x80000000, 0x80000000);
964 nv_mask(priv, 0x419f80, 0x80000000, 0x80000000);
965 nv_mask(priv, 0x419f88, 0x80000000, 0x80000000);
966}
967
968struct nouveau_oclass *
969gm107_grctx_oclass = &(struct nvc0_grctx_oclass) {
970 .base.handle = NV_ENGCTX(GR, 0x08),
971 .base.ofuncs = &(struct nouveau_ofuncs) {
972 .ctor = nvc0_graph_context_ctor,
973 .dtor = nvc0_graph_context_dtor,
974 .init = _nouveau_graph_context_init,
975 .fini = _nouveau_graph_context_fini,
976 .rd32 = _nouveau_graph_context_rd32,
977 .wr32 = _nouveau_graph_context_wr32,
978 },
979 .main = gm107_grctx_generate_main,
980 .mods = gm107_grctx_generate_mods,
981 .unkn = nve4_grctx_generate_unkn,
982 .hub = gm107_grctx_pack_hub,
983 .gpc = gm107_grctx_pack_gpc,
984 .zcull = nvc0_grctx_pack_zcull,
985 .tpc = gm107_grctx_pack_tpc,
986 .ppc = gm107_grctx_pack_ppc,
987 .icmd = gm107_grctx_pack_icmd,
988 .mthd = gm107_grctx_pack_mthd,
989}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv108.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv108.c
index a86bd3352bf8..48351b4d6d6b 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv108.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv108.c
@@ -22,10 +22,14 @@
22 * Authors: Ben Skeggs <bskeggs@redhat.com> 22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */ 23 */
24 24
25#include "nvc0.h" 25#include "ctxnvc0.h"
26 26
27static struct nvc0_graph_init 27/*******************************************************************************
28nv108_grctx_init_icmd[] = { 28 * PGRAPH context register lists
29 ******************************************************************************/
30
31static const struct nvc0_graph_init
32nv108_grctx_init_icmd_0[] = {
29 { 0x001000, 1, 0x01, 0x00000004 }, 33 { 0x001000, 1, 0x01, 0x00000004 },
30 { 0x000039, 3, 0x01, 0x00000000 }, 34 { 0x000039, 3, 0x01, 0x00000000 },
31 { 0x0000a9, 1, 0x01, 0x0000ffff }, 35 { 0x0000a9, 1, 0x01, 0x0000ffff },
@@ -274,839 +278,14 @@ nv108_grctx_init_icmd[] = {
274 {} 278 {}
275}; 279};
276 280
277static struct nvc0_graph_init 281static const struct nvc0_graph_pack
278nv108_grctx_init_a197[] = { 282nv108_grctx_pack_icmd[] = {
279 { 0x000800, 1, 0x04, 0x00000000 }, 283 { nv108_grctx_init_icmd_0 },
280 { 0x000840, 1, 0x04, 0x00000000 },
281 { 0x000880, 1, 0x04, 0x00000000 },
282 { 0x0008c0, 1, 0x04, 0x00000000 },
283 { 0x000900, 1, 0x04, 0x00000000 },
284 { 0x000940, 1, 0x04, 0x00000000 },
285 { 0x000980, 1, 0x04, 0x00000000 },
286 { 0x0009c0, 1, 0x04, 0x00000000 },
287 { 0x000804, 1, 0x04, 0x00000000 },
288 { 0x000844, 1, 0x04, 0x00000000 },
289 { 0x000884, 1, 0x04, 0x00000000 },
290 { 0x0008c4, 1, 0x04, 0x00000000 },
291 { 0x000904, 1, 0x04, 0x00000000 },
292 { 0x000944, 1, 0x04, 0x00000000 },
293 { 0x000984, 1, 0x04, 0x00000000 },
294 { 0x0009c4, 1, 0x04, 0x00000000 },
295 { 0x000808, 1, 0x04, 0x00000400 },
296 { 0x000848, 1, 0x04, 0x00000400 },
297 { 0x000888, 1, 0x04, 0x00000400 },
298 { 0x0008c8, 1, 0x04, 0x00000400 },
299 { 0x000908, 1, 0x04, 0x00000400 },
300 { 0x000948, 1, 0x04, 0x00000400 },
301 { 0x000988, 1, 0x04, 0x00000400 },
302 { 0x0009c8, 1, 0x04, 0x00000400 },
303 { 0x00080c, 1, 0x04, 0x00000300 },
304 { 0x00084c, 1, 0x04, 0x00000300 },
305 { 0x00088c, 1, 0x04, 0x00000300 },
306 { 0x0008cc, 1, 0x04, 0x00000300 },
307 { 0x00090c, 1, 0x04, 0x00000300 },
308 { 0x00094c, 1, 0x04, 0x00000300 },
309 { 0x00098c, 1, 0x04, 0x00000300 },
310 { 0x0009cc, 1, 0x04, 0x00000300 },
311 { 0x000810, 1, 0x04, 0x000000cf },
312 { 0x000850, 1, 0x04, 0x00000000 },
313 { 0x000890, 1, 0x04, 0x00000000 },
314 { 0x0008d0, 1, 0x04, 0x00000000 },
315 { 0x000910, 1, 0x04, 0x00000000 },
316 { 0x000950, 1, 0x04, 0x00000000 },
317 { 0x000990, 1, 0x04, 0x00000000 },
318 { 0x0009d0, 1, 0x04, 0x00000000 },
319 { 0x000814, 1, 0x04, 0x00000040 },
320 { 0x000854, 1, 0x04, 0x00000040 },
321 { 0x000894, 1, 0x04, 0x00000040 },
322 { 0x0008d4, 1, 0x04, 0x00000040 },
323 { 0x000914, 1, 0x04, 0x00000040 },
324 { 0x000954, 1, 0x04, 0x00000040 },
325 { 0x000994, 1, 0x04, 0x00000040 },
326 { 0x0009d4, 1, 0x04, 0x00000040 },
327 { 0x000818, 1, 0x04, 0x00000001 },
328 { 0x000858, 1, 0x04, 0x00000001 },
329 { 0x000898, 1, 0x04, 0x00000001 },
330 { 0x0008d8, 1, 0x04, 0x00000001 },
331 { 0x000918, 1, 0x04, 0x00000001 },
332 { 0x000958, 1, 0x04, 0x00000001 },
333 { 0x000998, 1, 0x04, 0x00000001 },
334 { 0x0009d8, 1, 0x04, 0x00000001 },
335 { 0x00081c, 1, 0x04, 0x00000000 },
336 { 0x00085c, 1, 0x04, 0x00000000 },
337 { 0x00089c, 1, 0x04, 0x00000000 },
338 { 0x0008dc, 1, 0x04, 0x00000000 },
339 { 0x00091c, 1, 0x04, 0x00000000 },
340 { 0x00095c, 1, 0x04, 0x00000000 },
341 { 0x00099c, 1, 0x04, 0x00000000 },
342 { 0x0009dc, 1, 0x04, 0x00000000 },
343 { 0x000820, 1, 0x04, 0x00000000 },
344 { 0x000860, 1, 0x04, 0x00000000 },
345 { 0x0008a0, 1, 0x04, 0x00000000 },
346 { 0x0008e0, 1, 0x04, 0x00000000 },
347 { 0x000920, 1, 0x04, 0x00000000 },
348 { 0x000960, 1, 0x04, 0x00000000 },
349 { 0x0009a0, 1, 0x04, 0x00000000 },
350 { 0x0009e0, 1, 0x04, 0x00000000 },
351 { 0x001c00, 1, 0x04, 0x00000000 },
352 { 0x001c10, 1, 0x04, 0x00000000 },
353 { 0x001c20, 1, 0x04, 0x00000000 },
354 { 0x001c30, 1, 0x04, 0x00000000 },
355 { 0x001c40, 1, 0x04, 0x00000000 },
356 { 0x001c50, 1, 0x04, 0x00000000 },
357 { 0x001c60, 1, 0x04, 0x00000000 },
358 { 0x001c70, 1, 0x04, 0x00000000 },
359 { 0x001c80, 1, 0x04, 0x00000000 },
360 { 0x001c90, 1, 0x04, 0x00000000 },
361 { 0x001ca0, 1, 0x04, 0x00000000 },
362 { 0x001cb0, 1, 0x04, 0x00000000 },
363 { 0x001cc0, 1, 0x04, 0x00000000 },
364 { 0x001cd0, 1, 0x04, 0x00000000 },
365 { 0x001ce0, 1, 0x04, 0x00000000 },
366 { 0x001cf0, 1, 0x04, 0x00000000 },
367 { 0x001c04, 1, 0x04, 0x00000000 },
368 { 0x001c14, 1, 0x04, 0x00000000 },
369 { 0x001c24, 1, 0x04, 0x00000000 },
370 { 0x001c34, 1, 0x04, 0x00000000 },
371 { 0x001c44, 1, 0x04, 0x00000000 },
372 { 0x001c54, 1, 0x04, 0x00000000 },
373 { 0x001c64, 1, 0x04, 0x00000000 },
374 { 0x001c74, 1, 0x04, 0x00000000 },
375 { 0x001c84, 1, 0x04, 0x00000000 },
376 { 0x001c94, 1, 0x04, 0x00000000 },
377 { 0x001ca4, 1, 0x04, 0x00000000 },
378 { 0x001cb4, 1, 0x04, 0x00000000 },
379 { 0x001cc4, 1, 0x04, 0x00000000 },
380 { 0x001cd4, 1, 0x04, 0x00000000 },
381 { 0x001ce4, 1, 0x04, 0x00000000 },
382 { 0x001cf4, 1, 0x04, 0x00000000 },
383 { 0x001c08, 1, 0x04, 0x00000000 },
384 { 0x001c18, 1, 0x04, 0x00000000 },
385 { 0x001c28, 1, 0x04, 0x00000000 },
386 { 0x001c38, 1, 0x04, 0x00000000 },
387 { 0x001c48, 1, 0x04, 0x00000000 },
388 { 0x001c58, 1, 0x04, 0x00000000 },
389 { 0x001c68, 1, 0x04, 0x00000000 },
390 { 0x001c78, 1, 0x04, 0x00000000 },
391 { 0x001c88, 1, 0x04, 0x00000000 },
392 { 0x001c98, 1, 0x04, 0x00000000 },
393 { 0x001ca8, 1, 0x04, 0x00000000 },
394 { 0x001cb8, 1, 0x04, 0x00000000 },
395 { 0x001cc8, 1, 0x04, 0x00000000 },
396 { 0x001cd8, 1, 0x04, 0x00000000 },
397 { 0x001ce8, 1, 0x04, 0x00000000 },
398 { 0x001cf8, 1, 0x04, 0x00000000 },
399 { 0x001c0c, 1, 0x04, 0x00000000 },
400 { 0x001c1c, 1, 0x04, 0x00000000 },
401 { 0x001c2c, 1, 0x04, 0x00000000 },
402 { 0x001c3c, 1, 0x04, 0x00000000 },
403 { 0x001c4c, 1, 0x04, 0x00000000 },
404 { 0x001c5c, 1, 0x04, 0x00000000 },
405 { 0x001c6c, 1, 0x04, 0x00000000 },
406 { 0x001c7c, 1, 0x04, 0x00000000 },
407 { 0x001c8c, 1, 0x04, 0x00000000 },
408 { 0x001c9c, 1, 0x04, 0x00000000 },
409 { 0x001cac, 1, 0x04, 0x00000000 },
410 { 0x001cbc, 1, 0x04, 0x00000000 },
411 { 0x001ccc, 1, 0x04, 0x00000000 },
412 { 0x001cdc, 1, 0x04, 0x00000000 },
413 { 0x001cec, 1, 0x04, 0x00000000 },
414 { 0x001cfc, 2, 0x04, 0x00000000 },
415 { 0x001d10, 1, 0x04, 0x00000000 },
416 { 0x001d20, 1, 0x04, 0x00000000 },
417 { 0x001d30, 1, 0x04, 0x00000000 },
418 { 0x001d40, 1, 0x04, 0x00000000 },
419 { 0x001d50, 1, 0x04, 0x00000000 },
420 { 0x001d60, 1, 0x04, 0x00000000 },
421 { 0x001d70, 1, 0x04, 0x00000000 },
422 { 0x001d80, 1, 0x04, 0x00000000 },
423 { 0x001d90, 1, 0x04, 0x00000000 },
424 { 0x001da0, 1, 0x04, 0x00000000 },
425 { 0x001db0, 1, 0x04, 0x00000000 },
426 { 0x001dc0, 1, 0x04, 0x00000000 },
427 { 0x001dd0, 1, 0x04, 0x00000000 },
428 { 0x001de0, 1, 0x04, 0x00000000 },
429 { 0x001df0, 1, 0x04, 0x00000000 },
430 { 0x001d04, 1, 0x04, 0x00000000 },
431 { 0x001d14, 1, 0x04, 0x00000000 },
432 { 0x001d24, 1, 0x04, 0x00000000 },
433 { 0x001d34, 1, 0x04, 0x00000000 },
434 { 0x001d44, 1, 0x04, 0x00000000 },
435 { 0x001d54, 1, 0x04, 0x00000000 },
436 { 0x001d64, 1, 0x04, 0x00000000 },
437 { 0x001d74, 1, 0x04, 0x00000000 },
438 { 0x001d84, 1, 0x04, 0x00000000 },
439 { 0x001d94, 1, 0x04, 0x00000000 },
440 { 0x001da4, 1, 0x04, 0x00000000 },
441 { 0x001db4, 1, 0x04, 0x00000000 },
442 { 0x001dc4, 1, 0x04, 0x00000000 },
443 { 0x001dd4, 1, 0x04, 0x00000000 },
444 { 0x001de4, 1, 0x04, 0x00000000 },
445 { 0x001df4, 1, 0x04, 0x00000000 },
446 { 0x001d08, 1, 0x04, 0x00000000 },
447 { 0x001d18, 1, 0x04, 0x00000000 },
448 { 0x001d28, 1, 0x04, 0x00000000 },
449 { 0x001d38, 1, 0x04, 0x00000000 },
450 { 0x001d48, 1, 0x04, 0x00000000 },
451 { 0x001d58, 1, 0x04, 0x00000000 },
452 { 0x001d68, 1, 0x04, 0x00000000 },
453 { 0x001d78, 1, 0x04, 0x00000000 },
454 { 0x001d88, 1, 0x04, 0x00000000 },
455 { 0x001d98, 1, 0x04, 0x00000000 },
456 { 0x001da8, 1, 0x04, 0x00000000 },
457 { 0x001db8, 1, 0x04, 0x00000000 },
458 { 0x001dc8, 1, 0x04, 0x00000000 },
459 { 0x001dd8, 1, 0x04, 0x00000000 },
460 { 0x001de8, 1, 0x04, 0x00000000 },
461 { 0x001df8, 1, 0x04, 0x00000000 },
462 { 0x001d0c, 1, 0x04, 0x00000000 },
463 { 0x001d1c, 1, 0x04, 0x00000000 },
464 { 0x001d2c, 1, 0x04, 0x00000000 },
465 { 0x001d3c, 1, 0x04, 0x00000000 },
466 { 0x001d4c, 1, 0x04, 0x00000000 },
467 { 0x001d5c, 1, 0x04, 0x00000000 },
468 { 0x001d6c, 1, 0x04, 0x00000000 },
469 { 0x001d7c, 1, 0x04, 0x00000000 },
470 { 0x001d8c, 1, 0x04, 0x00000000 },
471 { 0x001d9c, 1, 0x04, 0x00000000 },
472 { 0x001dac, 1, 0x04, 0x00000000 },
473 { 0x001dbc, 1, 0x04, 0x00000000 },
474 { 0x001dcc, 1, 0x04, 0x00000000 },
475 { 0x001ddc, 1, 0x04, 0x00000000 },
476 { 0x001dec, 1, 0x04, 0x00000000 },
477 { 0x001dfc, 1, 0x04, 0x00000000 },
478 { 0x001f00, 1, 0x04, 0x00000000 },
479 { 0x001f08, 1, 0x04, 0x00000000 },
480 { 0x001f10, 1, 0x04, 0x00000000 },
481 { 0x001f18, 1, 0x04, 0x00000000 },
482 { 0x001f20, 1, 0x04, 0x00000000 },
483 { 0x001f28, 1, 0x04, 0x00000000 },
484 { 0x001f30, 1, 0x04, 0x00000000 },
485 { 0x001f38, 1, 0x04, 0x00000000 },
486 { 0x001f40, 1, 0x04, 0x00000000 },
487 { 0x001f48, 1, 0x04, 0x00000000 },
488 { 0x001f50, 1, 0x04, 0x00000000 },
489 { 0x001f58, 1, 0x04, 0x00000000 },
490 { 0x001f60, 1, 0x04, 0x00000000 },
491 { 0x001f68, 1, 0x04, 0x00000000 },
492 { 0x001f70, 1, 0x04, 0x00000000 },
493 { 0x001f78, 1, 0x04, 0x00000000 },
494 { 0x001f04, 1, 0x04, 0x00000000 },
495 { 0x001f0c, 1, 0x04, 0x00000000 },
496 { 0x001f14, 1, 0x04, 0x00000000 },
497 { 0x001f1c, 1, 0x04, 0x00000000 },
498 { 0x001f24, 1, 0x04, 0x00000000 },
499 { 0x001f2c, 1, 0x04, 0x00000000 },
500 { 0x001f34, 1, 0x04, 0x00000000 },
501 { 0x001f3c, 1, 0x04, 0x00000000 },
502 { 0x001f44, 1, 0x04, 0x00000000 },
503 { 0x001f4c, 1, 0x04, 0x00000000 },
504 { 0x001f54, 1, 0x04, 0x00000000 },
505 { 0x001f5c, 1, 0x04, 0x00000000 },
506 { 0x001f64, 1, 0x04, 0x00000000 },
507 { 0x001f6c, 1, 0x04, 0x00000000 },
508 { 0x001f74, 1, 0x04, 0x00000000 },
509 { 0x001f7c, 2, 0x04, 0x00000000 },
510 { 0x001f88, 1, 0x04, 0x00000000 },
511 { 0x001f90, 1, 0x04, 0x00000000 },
512 { 0x001f98, 1, 0x04, 0x00000000 },
513 { 0x001fa0, 1, 0x04, 0x00000000 },
514 { 0x001fa8, 1, 0x04, 0x00000000 },
515 { 0x001fb0, 1, 0x04, 0x00000000 },
516 { 0x001fb8, 1, 0x04, 0x00000000 },
517 { 0x001fc0, 1, 0x04, 0x00000000 },
518 { 0x001fc8, 1, 0x04, 0x00000000 },
519 { 0x001fd0, 1, 0x04, 0x00000000 },
520 { 0x001fd8, 1, 0x04, 0x00000000 },
521 { 0x001fe0, 1, 0x04, 0x00000000 },
522 { 0x001fe8, 1, 0x04, 0x00000000 },
523 { 0x001ff0, 1, 0x04, 0x00000000 },
524 { 0x001ff8, 1, 0x04, 0x00000000 },
525 { 0x001f84, 1, 0x04, 0x00000000 },
526 { 0x001f8c, 1, 0x04, 0x00000000 },
527 { 0x001f94, 1, 0x04, 0x00000000 },
528 { 0x001f9c, 1, 0x04, 0x00000000 },
529 { 0x001fa4, 1, 0x04, 0x00000000 },
530 { 0x001fac, 1, 0x04, 0x00000000 },
531 { 0x001fb4, 1, 0x04, 0x00000000 },
532 { 0x001fbc, 1, 0x04, 0x00000000 },
533 { 0x001fc4, 1, 0x04, 0x00000000 },
534 { 0x001fcc, 1, 0x04, 0x00000000 },
535 { 0x001fd4, 1, 0x04, 0x00000000 },
536 { 0x001fdc, 1, 0x04, 0x00000000 },
537 { 0x001fe4, 1, 0x04, 0x00000000 },
538 { 0x001fec, 1, 0x04, 0x00000000 },
539 { 0x001ff4, 1, 0x04, 0x00000000 },
540 { 0x001ffc, 2, 0x04, 0x00000000 },
541 { 0x002040, 1, 0x04, 0x00000011 },
542 { 0x002080, 1, 0x04, 0x00000020 },
543 { 0x0020c0, 1, 0x04, 0x00000030 },
544 { 0x002100, 1, 0x04, 0x00000040 },
545 { 0x002140, 1, 0x04, 0x00000051 },
546 { 0x00200c, 1, 0x04, 0x00000001 },
547 { 0x00204c, 1, 0x04, 0x00000001 },
548 { 0x00208c, 1, 0x04, 0x00000001 },
549 { 0x0020cc, 1, 0x04, 0x00000001 },
550 { 0x00210c, 1, 0x04, 0x00000001 },
551 { 0x00214c, 1, 0x04, 0x00000001 },
552 { 0x002010, 1, 0x04, 0x00000000 },
553 { 0x002050, 1, 0x04, 0x00000000 },
554 { 0x002090, 1, 0x04, 0x00000001 },
555 { 0x0020d0, 1, 0x04, 0x00000002 },
556 { 0x002110, 1, 0x04, 0x00000003 },
557 { 0x002150, 1, 0x04, 0x00000004 },
558 { 0x000380, 1, 0x04, 0x00000000 },
559 { 0x0003a0, 1, 0x04, 0x00000000 },
560 { 0x0003c0, 1, 0x04, 0x00000000 },
561 { 0x0003e0, 1, 0x04, 0x00000000 },
562 { 0x000384, 1, 0x04, 0x00000000 },
563 { 0x0003a4, 1, 0x04, 0x00000000 },
564 { 0x0003c4, 1, 0x04, 0x00000000 },
565 { 0x0003e4, 1, 0x04, 0x00000000 },
566 { 0x000388, 1, 0x04, 0x00000000 },
567 { 0x0003a8, 1, 0x04, 0x00000000 },
568 { 0x0003c8, 1, 0x04, 0x00000000 },
569 { 0x0003e8, 1, 0x04, 0x00000000 },
570 { 0x00038c, 1, 0x04, 0x00000000 },
571 { 0x0003ac, 1, 0x04, 0x00000000 },
572 { 0x0003cc, 1, 0x04, 0x00000000 },
573 { 0x0003ec, 1, 0x04, 0x00000000 },
574 { 0x000700, 1, 0x04, 0x00000000 },
575 { 0x000710, 1, 0x04, 0x00000000 },
576 { 0x000720, 1, 0x04, 0x00000000 },
577 { 0x000730, 1, 0x04, 0x00000000 },
578 { 0x000704, 1, 0x04, 0x00000000 },
579 { 0x000714, 1, 0x04, 0x00000000 },
580 { 0x000724, 1, 0x04, 0x00000000 },
581 { 0x000734, 1, 0x04, 0x00000000 },
582 { 0x000708, 1, 0x04, 0x00000000 },
583 { 0x000718, 1, 0x04, 0x00000000 },
584 { 0x000728, 1, 0x04, 0x00000000 },
585 { 0x000738, 1, 0x04, 0x00000000 },
586 { 0x002800, 128, 0x04, 0x00000000 },
587 { 0x000a00, 1, 0x04, 0x00000000 },
588 { 0x000a20, 1, 0x04, 0x00000000 },
589 { 0x000a40, 1, 0x04, 0x00000000 },
590 { 0x000a60, 1, 0x04, 0x00000000 },
591 { 0x000a80, 1, 0x04, 0x00000000 },
592 { 0x000aa0, 1, 0x04, 0x00000000 },
593 { 0x000ac0, 1, 0x04, 0x00000000 },
594 { 0x000ae0, 1, 0x04, 0x00000000 },
595 { 0x000b00, 1, 0x04, 0x00000000 },
596 { 0x000b20, 1, 0x04, 0x00000000 },
597 { 0x000b40, 1, 0x04, 0x00000000 },
598 { 0x000b60, 1, 0x04, 0x00000000 },
599 { 0x000b80, 1, 0x04, 0x00000000 },
600 { 0x000ba0, 1, 0x04, 0x00000000 },
601 { 0x000bc0, 1, 0x04, 0x00000000 },
602 { 0x000be0, 1, 0x04, 0x00000000 },
603 { 0x000a04, 1, 0x04, 0x00000000 },
604 { 0x000a24, 1, 0x04, 0x00000000 },
605 { 0x000a44, 1, 0x04, 0x00000000 },
606 { 0x000a64, 1, 0x04, 0x00000000 },
607 { 0x000a84, 1, 0x04, 0x00000000 },
608 { 0x000aa4, 1, 0x04, 0x00000000 },
609 { 0x000ac4, 1, 0x04, 0x00000000 },
610 { 0x000ae4, 1, 0x04, 0x00000000 },
611 { 0x000b04, 1, 0x04, 0x00000000 },
612 { 0x000b24, 1, 0x04, 0x00000000 },
613 { 0x000b44, 1, 0x04, 0x00000000 },
614 { 0x000b64, 1, 0x04, 0x00000000 },
615 { 0x000b84, 1, 0x04, 0x00000000 },
616 { 0x000ba4, 1, 0x04, 0x00000000 },
617 { 0x000bc4, 1, 0x04, 0x00000000 },
618 { 0x000be4, 1, 0x04, 0x00000000 },
619 { 0x000a08, 1, 0x04, 0x00000000 },
620 { 0x000a28, 1, 0x04, 0x00000000 },
621 { 0x000a48, 1, 0x04, 0x00000000 },
622 { 0x000a68, 1, 0x04, 0x00000000 },
623 { 0x000a88, 1, 0x04, 0x00000000 },
624 { 0x000aa8, 1, 0x04, 0x00000000 },
625 { 0x000ac8, 1, 0x04, 0x00000000 },
626 { 0x000ae8, 1, 0x04, 0x00000000 },
627 { 0x000b08, 1, 0x04, 0x00000000 },
628 { 0x000b28, 1, 0x04, 0x00000000 },
629 { 0x000b48, 1, 0x04, 0x00000000 },
630 { 0x000b68, 1, 0x04, 0x00000000 },
631 { 0x000b88, 1, 0x04, 0x00000000 },
632 { 0x000ba8, 1, 0x04, 0x00000000 },
633 { 0x000bc8, 1, 0x04, 0x00000000 },
634 { 0x000be8, 1, 0x04, 0x00000000 },
635 { 0x000a0c, 1, 0x04, 0x00000000 },
636 { 0x000a2c, 1, 0x04, 0x00000000 },
637 { 0x000a4c, 1, 0x04, 0x00000000 },
638 { 0x000a6c, 1, 0x04, 0x00000000 },
639 { 0x000a8c, 1, 0x04, 0x00000000 },
640 { 0x000aac, 1, 0x04, 0x00000000 },
641 { 0x000acc, 1, 0x04, 0x00000000 },
642 { 0x000aec, 1, 0x04, 0x00000000 },
643 { 0x000b0c, 1, 0x04, 0x00000000 },
644 { 0x000b2c, 1, 0x04, 0x00000000 },
645 { 0x000b4c, 1, 0x04, 0x00000000 },
646 { 0x000b6c, 1, 0x04, 0x00000000 },
647 { 0x000b8c, 1, 0x04, 0x00000000 },
648 { 0x000bac, 1, 0x04, 0x00000000 },
649 { 0x000bcc, 1, 0x04, 0x00000000 },
650 { 0x000bec, 1, 0x04, 0x00000000 },
651 { 0x000a10, 1, 0x04, 0x00000000 },
652 { 0x000a30, 1, 0x04, 0x00000000 },
653 { 0x000a50, 1, 0x04, 0x00000000 },
654 { 0x000a70, 1, 0x04, 0x00000000 },
655 { 0x000a90, 1, 0x04, 0x00000000 },
656 { 0x000ab0, 1, 0x04, 0x00000000 },
657 { 0x000ad0, 1, 0x04, 0x00000000 },
658 { 0x000af0, 1, 0x04, 0x00000000 },
659 { 0x000b10, 1, 0x04, 0x00000000 },
660 { 0x000b30, 1, 0x04, 0x00000000 },
661 { 0x000b50, 1, 0x04, 0x00000000 },
662 { 0x000b70, 1, 0x04, 0x00000000 },
663 { 0x000b90, 1, 0x04, 0x00000000 },
664 { 0x000bb0, 1, 0x04, 0x00000000 },
665 { 0x000bd0, 1, 0x04, 0x00000000 },
666 { 0x000bf0, 1, 0x04, 0x00000000 },
667 { 0x000a14, 1, 0x04, 0x00000000 },
668 { 0x000a34, 1, 0x04, 0x00000000 },
669 { 0x000a54, 1, 0x04, 0x00000000 },
670 { 0x000a74, 1, 0x04, 0x00000000 },
671 { 0x000a94, 1, 0x04, 0x00000000 },
672 { 0x000ab4, 1, 0x04, 0x00000000 },
673 { 0x000ad4, 1, 0x04, 0x00000000 },
674 { 0x000af4, 1, 0x04, 0x00000000 },
675 { 0x000b14, 1, 0x04, 0x00000000 },
676 { 0x000b34, 1, 0x04, 0x00000000 },
677 { 0x000b54, 1, 0x04, 0x00000000 },
678 { 0x000b74, 1, 0x04, 0x00000000 },
679 { 0x000b94, 1, 0x04, 0x00000000 },
680 { 0x000bb4, 1, 0x04, 0x00000000 },
681 { 0x000bd4, 1, 0x04, 0x00000000 },
682 { 0x000bf4, 1, 0x04, 0x00000000 },
683 { 0x000c00, 1, 0x04, 0x00000000 },
684 { 0x000c10, 1, 0x04, 0x00000000 },
685 { 0x000c20, 1, 0x04, 0x00000000 },
686 { 0x000c30, 1, 0x04, 0x00000000 },
687 { 0x000c40, 1, 0x04, 0x00000000 },
688 { 0x000c50, 1, 0x04, 0x00000000 },
689 { 0x000c60, 1, 0x04, 0x00000000 },
690 { 0x000c70, 1, 0x04, 0x00000000 },
691 { 0x000c80, 1, 0x04, 0x00000000 },
692 { 0x000c90, 1, 0x04, 0x00000000 },
693 { 0x000ca0, 1, 0x04, 0x00000000 },
694 { 0x000cb0, 1, 0x04, 0x00000000 },
695 { 0x000cc0, 1, 0x04, 0x00000000 },
696 { 0x000cd0, 1, 0x04, 0x00000000 },
697 { 0x000ce0, 1, 0x04, 0x00000000 },
698 { 0x000cf0, 1, 0x04, 0x00000000 },
699 { 0x000c04, 1, 0x04, 0x00000000 },
700 { 0x000c14, 1, 0x04, 0x00000000 },
701 { 0x000c24, 1, 0x04, 0x00000000 },
702 { 0x000c34, 1, 0x04, 0x00000000 },
703 { 0x000c44, 1, 0x04, 0x00000000 },
704 { 0x000c54, 1, 0x04, 0x00000000 },
705 { 0x000c64, 1, 0x04, 0x00000000 },
706 { 0x000c74, 1, 0x04, 0x00000000 },
707 { 0x000c84, 1, 0x04, 0x00000000 },
708 { 0x000c94, 1, 0x04, 0x00000000 },
709 { 0x000ca4, 1, 0x04, 0x00000000 },
710 { 0x000cb4, 1, 0x04, 0x00000000 },
711 { 0x000cc4, 1, 0x04, 0x00000000 },
712 { 0x000cd4, 1, 0x04, 0x00000000 },
713 { 0x000ce4, 1, 0x04, 0x00000000 },
714 { 0x000cf4, 1, 0x04, 0x00000000 },
715 { 0x000c08, 1, 0x04, 0x00000000 },
716 { 0x000c18, 1, 0x04, 0x00000000 },
717 { 0x000c28, 1, 0x04, 0x00000000 },
718 { 0x000c38, 1, 0x04, 0x00000000 },
719 { 0x000c48, 1, 0x04, 0x00000000 },
720 { 0x000c58, 1, 0x04, 0x00000000 },
721 { 0x000c68, 1, 0x04, 0x00000000 },
722 { 0x000c78, 1, 0x04, 0x00000000 },
723 { 0x000c88, 1, 0x04, 0x00000000 },
724 { 0x000c98, 1, 0x04, 0x00000000 },
725 { 0x000ca8, 1, 0x04, 0x00000000 },
726 { 0x000cb8, 1, 0x04, 0x00000000 },
727 { 0x000cc8, 1, 0x04, 0x00000000 },
728 { 0x000cd8, 1, 0x04, 0x00000000 },
729 { 0x000ce8, 1, 0x04, 0x00000000 },
730 { 0x000cf8, 1, 0x04, 0x00000000 },
731 { 0x000c0c, 1, 0x04, 0x3f800000 },
732 { 0x000c1c, 1, 0x04, 0x3f800000 },
733 { 0x000c2c, 1, 0x04, 0x3f800000 },
734 { 0x000c3c, 1, 0x04, 0x3f800000 },
735 { 0x000c4c, 1, 0x04, 0x3f800000 },
736 { 0x000c5c, 1, 0x04, 0x3f800000 },
737 { 0x000c6c, 1, 0x04, 0x3f800000 },
738 { 0x000c7c, 1, 0x04, 0x3f800000 },
739 { 0x000c8c, 1, 0x04, 0x3f800000 },
740 { 0x000c9c, 1, 0x04, 0x3f800000 },
741 { 0x000cac, 1, 0x04, 0x3f800000 },
742 { 0x000cbc, 1, 0x04, 0x3f800000 },
743 { 0x000ccc, 1, 0x04, 0x3f800000 },
744 { 0x000cdc, 1, 0x04, 0x3f800000 },
745 { 0x000cec, 1, 0x04, 0x3f800000 },
746 { 0x000cfc, 1, 0x04, 0x3f800000 },
747 { 0x000d00, 1, 0x04, 0xffff0000 },
748 { 0x000d08, 1, 0x04, 0xffff0000 },
749 { 0x000d10, 1, 0x04, 0xffff0000 },
750 { 0x000d18, 1, 0x04, 0xffff0000 },
751 { 0x000d20, 1, 0x04, 0xffff0000 },
752 { 0x000d28, 1, 0x04, 0xffff0000 },
753 { 0x000d30, 1, 0x04, 0xffff0000 },
754 { 0x000d38, 1, 0x04, 0xffff0000 },
755 { 0x000d04, 1, 0x04, 0xffff0000 },
756 { 0x000d0c, 1, 0x04, 0xffff0000 },
757 { 0x000d14, 1, 0x04, 0xffff0000 },
758 { 0x000d1c, 1, 0x04, 0xffff0000 },
759 { 0x000d24, 1, 0x04, 0xffff0000 },
760 { 0x000d2c, 1, 0x04, 0xffff0000 },
761 { 0x000d34, 1, 0x04, 0xffff0000 },
762 { 0x000d3c, 1, 0x04, 0xffff0000 },
763 { 0x000e00, 1, 0x04, 0x00000000 },
764 { 0x000e10, 1, 0x04, 0x00000000 },
765 { 0x000e20, 1, 0x04, 0x00000000 },
766 { 0x000e30, 1, 0x04, 0x00000000 },
767 { 0x000e40, 1, 0x04, 0x00000000 },
768 { 0x000e50, 1, 0x04, 0x00000000 },
769 { 0x000e60, 1, 0x04, 0x00000000 },
770 { 0x000e70, 1, 0x04, 0x00000000 },
771 { 0x000e80, 1, 0x04, 0x00000000 },
772 { 0x000e90, 1, 0x04, 0x00000000 },
773 { 0x000ea0, 1, 0x04, 0x00000000 },
774 { 0x000eb0, 1, 0x04, 0x00000000 },
775 { 0x000ec0, 1, 0x04, 0x00000000 },
776 { 0x000ed0, 1, 0x04, 0x00000000 },
777 { 0x000ee0, 1, 0x04, 0x00000000 },
778 { 0x000ef0, 1, 0x04, 0x00000000 },
779 { 0x000e04, 1, 0x04, 0xffff0000 },
780 { 0x000e14, 1, 0x04, 0xffff0000 },
781 { 0x000e24, 1, 0x04, 0xffff0000 },
782 { 0x000e34, 1, 0x04, 0xffff0000 },
783 { 0x000e44, 1, 0x04, 0xffff0000 },
784 { 0x000e54, 1, 0x04, 0xffff0000 },
785 { 0x000e64, 1, 0x04, 0xffff0000 },
786 { 0x000e74, 1, 0x04, 0xffff0000 },
787 { 0x000e84, 1, 0x04, 0xffff0000 },
788 { 0x000e94, 1, 0x04, 0xffff0000 },
789 { 0x000ea4, 1, 0x04, 0xffff0000 },
790 { 0x000eb4, 1, 0x04, 0xffff0000 },
791 { 0x000ec4, 1, 0x04, 0xffff0000 },
792 { 0x000ed4, 1, 0x04, 0xffff0000 },
793 { 0x000ee4, 1, 0x04, 0xffff0000 },
794 { 0x000ef4, 1, 0x04, 0xffff0000 },
795 { 0x000e08, 1, 0x04, 0xffff0000 },
796 { 0x000e18, 1, 0x04, 0xffff0000 },
797 { 0x000e28, 1, 0x04, 0xffff0000 },
798 { 0x000e38, 1, 0x04, 0xffff0000 },
799 { 0x000e48, 1, 0x04, 0xffff0000 },
800 { 0x000e58, 1, 0x04, 0xffff0000 },
801 { 0x000e68, 1, 0x04, 0xffff0000 },
802 { 0x000e78, 1, 0x04, 0xffff0000 },
803 { 0x000e88, 1, 0x04, 0xffff0000 },
804 { 0x000e98, 1, 0x04, 0xffff0000 },
805 { 0x000ea8, 1, 0x04, 0xffff0000 },
806 { 0x000eb8, 1, 0x04, 0xffff0000 },
807 { 0x000ec8, 1, 0x04, 0xffff0000 },
808 { 0x000ed8, 1, 0x04, 0xffff0000 },
809 { 0x000ee8, 1, 0x04, 0xffff0000 },
810 { 0x000ef8, 1, 0x04, 0xffff0000 },
811 { 0x000d40, 1, 0x04, 0x00000000 },
812 { 0x000d48, 1, 0x04, 0x00000000 },
813 { 0x000d50, 1, 0x04, 0x00000000 },
814 { 0x000d58, 1, 0x04, 0x00000000 },
815 { 0x000d44, 1, 0x04, 0x00000000 },
816 { 0x000d4c, 1, 0x04, 0x00000000 },
817 { 0x000d54, 1, 0x04, 0x00000000 },
818 { 0x000d5c, 1, 0x04, 0x00000000 },
819 { 0x001e00, 1, 0x04, 0x00000001 },
820 { 0x001e20, 1, 0x04, 0x00000001 },
821 { 0x001e40, 1, 0x04, 0x00000001 },
822 { 0x001e60, 1, 0x04, 0x00000001 },
823 { 0x001e80, 1, 0x04, 0x00000001 },
824 { 0x001ea0, 1, 0x04, 0x00000001 },
825 { 0x001ec0, 1, 0x04, 0x00000001 },
826 { 0x001ee0, 1, 0x04, 0x00000001 },
827 { 0x001e04, 1, 0x04, 0x00000001 },
828 { 0x001e24, 1, 0x04, 0x00000001 },
829 { 0x001e44, 1, 0x04, 0x00000001 },
830 { 0x001e64, 1, 0x04, 0x00000001 },
831 { 0x001e84, 1, 0x04, 0x00000001 },
832 { 0x001ea4, 1, 0x04, 0x00000001 },
833 { 0x001ec4, 1, 0x04, 0x00000001 },
834 { 0x001ee4, 1, 0x04, 0x00000001 },
835 { 0x001e08, 1, 0x04, 0x00000002 },
836 { 0x001e28, 1, 0x04, 0x00000002 },
837 { 0x001e48, 1, 0x04, 0x00000002 },
838 { 0x001e68, 1, 0x04, 0x00000002 },
839 { 0x001e88, 1, 0x04, 0x00000002 },
840 { 0x001ea8, 1, 0x04, 0x00000002 },
841 { 0x001ec8, 1, 0x04, 0x00000002 },
842 { 0x001ee8, 1, 0x04, 0x00000002 },
843 { 0x001e0c, 1, 0x04, 0x00000001 },
844 { 0x001e2c, 1, 0x04, 0x00000001 },
845 { 0x001e4c, 1, 0x04, 0x00000001 },
846 { 0x001e6c, 1, 0x04, 0x00000001 },
847 { 0x001e8c, 1, 0x04, 0x00000001 },
848 { 0x001eac, 1, 0x04, 0x00000001 },
849 { 0x001ecc, 1, 0x04, 0x00000001 },
850 { 0x001eec, 1, 0x04, 0x00000001 },
851 { 0x001e10, 1, 0x04, 0x00000001 },
852 { 0x001e30, 1, 0x04, 0x00000001 },
853 { 0x001e50, 1, 0x04, 0x00000001 },
854 { 0x001e70, 1, 0x04, 0x00000001 },
855 { 0x001e90, 1, 0x04, 0x00000001 },
856 { 0x001eb0, 1, 0x04, 0x00000001 },
857 { 0x001ed0, 1, 0x04, 0x00000001 },
858 { 0x001ef0, 1, 0x04, 0x00000001 },
859 { 0x001e14, 1, 0x04, 0x00000002 },
860 { 0x001e34, 1, 0x04, 0x00000002 },
861 { 0x001e54, 1, 0x04, 0x00000002 },
862 { 0x001e74, 1, 0x04, 0x00000002 },
863 { 0x001e94, 1, 0x04, 0x00000002 },
864 { 0x001eb4, 1, 0x04, 0x00000002 },
865 { 0x001ed4, 1, 0x04, 0x00000002 },
866 { 0x001ef4, 1, 0x04, 0x00000002 },
867 { 0x001e18, 1, 0x04, 0x00000001 },
868 { 0x001e38, 1, 0x04, 0x00000001 },
869 { 0x001e58, 1, 0x04, 0x00000001 },
870 { 0x001e78, 1, 0x04, 0x00000001 },
871 { 0x001e98, 1, 0x04, 0x00000001 },
872 { 0x001eb8, 1, 0x04, 0x00000001 },
873 { 0x001ed8, 1, 0x04, 0x00000001 },
874 { 0x001ef8, 1, 0x04, 0x00000001 },
875 { 0x003400, 128, 0x04, 0x00000000 },
876 { 0x00030c, 1, 0x04, 0x00000001 },
877 { 0x001944, 1, 0x04, 0x00000000 },
878 { 0x001514, 1, 0x04, 0x00000000 },
879 { 0x000d68, 1, 0x04, 0x0000ffff },
880 { 0x00121c, 1, 0x04, 0x0fac6881 },
881 { 0x000fac, 1, 0x04, 0x00000001 },
882 { 0x001538, 1, 0x04, 0x00000001 },
883 { 0x000fe0, 2, 0x04, 0x00000000 },
884 { 0x000fe8, 1, 0x04, 0x00000014 },
885 { 0x000fec, 1, 0x04, 0x00000040 },
886 { 0x000ff0, 1, 0x04, 0x00000000 },
887 { 0x00179c, 1, 0x04, 0x00000000 },
888 { 0x001228, 1, 0x04, 0x00000400 },
889 { 0x00122c, 1, 0x04, 0x00000300 },
890 { 0x001230, 1, 0x04, 0x00010001 },
891 { 0x0007f8, 1, 0x04, 0x00000000 },
892 { 0x0015b4, 1, 0x04, 0x00000001 },
893 { 0x0015cc, 1, 0x04, 0x00000000 },
894 { 0x001534, 1, 0x04, 0x00000000 },
895 { 0x000fb0, 1, 0x04, 0x00000000 },
896 { 0x0015d0, 1, 0x04, 0x00000000 },
897 { 0x00153c, 1, 0x04, 0x00000000 },
898 { 0x0016b4, 1, 0x04, 0x00000003 },
899 { 0x000fbc, 4, 0x04, 0x0000ffff },
900 { 0x000df8, 2, 0x04, 0x00000000 },
901 { 0x001948, 1, 0x04, 0x00000000 },
902 { 0x001970, 1, 0x04, 0x00000001 },
903 { 0x00161c, 1, 0x04, 0x000009f0 },
904 { 0x000dcc, 1, 0x04, 0x00000010 },
905 { 0x00163c, 1, 0x04, 0x00000000 },
906 { 0x0015e4, 1, 0x04, 0x00000000 },
907 { 0x001160, 32, 0x04, 0x25e00040 },
908 { 0x001880, 32, 0x04, 0x00000000 },
909 { 0x000f84, 2, 0x04, 0x00000000 },
910 { 0x0017c8, 2, 0x04, 0x00000000 },
911 { 0x0017d0, 1, 0x04, 0x000000ff },
912 { 0x0017d4, 1, 0x04, 0xffffffff },
913 { 0x0017d8, 1, 0x04, 0x00000002 },
914 { 0x0017dc, 1, 0x04, 0x00000000 },
915 { 0x0015f4, 2, 0x04, 0x00000000 },
916 { 0x001434, 2, 0x04, 0x00000000 },
917 { 0x000d74, 1, 0x04, 0x00000000 },
918 { 0x000dec, 1, 0x04, 0x00000001 },
919 { 0x0013a4, 1, 0x04, 0x00000000 },
920 { 0x001318, 1, 0x04, 0x00000001 },
921 { 0x001644, 1, 0x04, 0x00000000 },
922 { 0x000748, 1, 0x04, 0x00000000 },
923 { 0x000de8, 1, 0x04, 0x00000000 },
924 { 0x001648, 1, 0x04, 0x00000000 },
925 { 0x0012a4, 1, 0x04, 0x00000000 },
926 { 0x001120, 4, 0x04, 0x00000000 },
927 { 0x001118, 1, 0x04, 0x00000000 },
928 { 0x00164c, 1, 0x04, 0x00000000 },
929 { 0x001658, 1, 0x04, 0x00000000 },
930 { 0x001910, 1, 0x04, 0x00000290 },
931 { 0x001518, 1, 0x04, 0x00000000 },
932 { 0x00165c, 1, 0x04, 0x00000001 },
933 { 0x001520, 1, 0x04, 0x00000000 },
934 { 0x001604, 1, 0x04, 0x00000000 },
935 { 0x001570, 1, 0x04, 0x00000000 },
936 { 0x0013b0, 2, 0x04, 0x3f800000 },
937 { 0x00020c, 1, 0x04, 0x00000000 },
938 { 0x001670, 1, 0x04, 0x30201000 },
939 { 0x001674, 1, 0x04, 0x70605040 },
940 { 0x001678, 1, 0x04, 0xb8a89888 },
941 { 0x00167c, 1, 0x04, 0xf8e8d8c8 },
942 { 0x00166c, 1, 0x04, 0x00000000 },
943 { 0x001680, 1, 0x04, 0x00ffff00 },
944 { 0x0012d0, 1, 0x04, 0x00000003 },
945 { 0x0012d4, 1, 0x04, 0x00000002 },
946 { 0x001684, 2, 0x04, 0x00000000 },
947 { 0x000dac, 2, 0x04, 0x00001b02 },
948 { 0x000db4, 1, 0x04, 0x00000000 },
949 { 0x00168c, 1, 0x04, 0x00000000 },
950 { 0x0015bc, 1, 0x04, 0x00000000 },
951 { 0x00156c, 1, 0x04, 0x00000000 },
952 { 0x00187c, 1, 0x04, 0x00000000 },
953 { 0x001110, 1, 0x04, 0x00000001 },
954 { 0x000dc0, 3, 0x04, 0x00000000 },
955 { 0x001234, 1, 0x04, 0x00000000 },
956 { 0x001690, 1, 0x04, 0x00000000 },
957 { 0x0012ac, 1, 0x04, 0x00000001 },
958 { 0x0002c4, 1, 0x04, 0x00000000 },
959 { 0x000790, 5, 0x04, 0x00000000 },
960 { 0x00077c, 1, 0x04, 0x00000000 },
961 { 0x001000, 1, 0x04, 0x00000010 },
962 { 0x0010fc, 1, 0x04, 0x00000000 },
963 { 0x001290, 1, 0x04, 0x00000000 },
964 { 0x000218, 1, 0x04, 0x00000010 },
965 { 0x0012d8, 1, 0x04, 0x00000000 },
966 { 0x0012dc, 1, 0x04, 0x00000010 },
967 { 0x000d94, 1, 0x04, 0x00000001 },
968 { 0x00155c, 2, 0x04, 0x00000000 },
969 { 0x001564, 1, 0x04, 0x00000fff },
970 { 0x001574, 2, 0x04, 0x00000000 },
971 { 0x00157c, 1, 0x04, 0x000fffff },
972 { 0x001354, 1, 0x04, 0x00000000 },
973 { 0x001610, 1, 0x04, 0x00000012 },
974 { 0x001608, 2, 0x04, 0x00000000 },
975 { 0x00260c, 1, 0x04, 0x00000000 },
976 { 0x0007ac, 1, 0x04, 0x00000000 },
977 { 0x00162c, 1, 0x04, 0x00000003 },
978 { 0x000210, 1, 0x04, 0x00000000 },
979 { 0x000320, 1, 0x04, 0x00000000 },
980 { 0x000324, 6, 0x04, 0x3f800000 },
981 { 0x000750, 1, 0x04, 0x00000000 },
982 { 0x000760, 1, 0x04, 0x39291909 },
983 { 0x000764, 1, 0x04, 0x79695949 },
984 { 0x000768, 1, 0x04, 0xb9a99989 },
985 { 0x00076c, 1, 0x04, 0xf9e9d9c9 },
986 { 0x000770, 1, 0x04, 0x30201000 },
987 { 0x000774, 1, 0x04, 0x70605040 },
988 { 0x000778, 1, 0x04, 0x00009080 },
989 { 0x000780, 1, 0x04, 0x39291909 },
990 { 0x000784, 1, 0x04, 0x79695949 },
991 { 0x000788, 1, 0x04, 0xb9a99989 },
992 { 0x00078c, 1, 0x04, 0xf9e9d9c9 },
993 { 0x0007d0, 1, 0x04, 0x30201000 },
994 { 0x0007d4, 1, 0x04, 0x70605040 },
995 { 0x0007d8, 1, 0x04, 0x00009080 },
996 { 0x00037c, 1, 0x04, 0x00000001 },
997 { 0x000740, 2, 0x04, 0x00000000 },
998 { 0x002600, 1, 0x04, 0x00000000 },
999 { 0x001918, 1, 0x04, 0x00000000 },
1000 { 0x00191c, 1, 0x04, 0x00000900 },
1001 { 0x001920, 1, 0x04, 0x00000405 },
1002 { 0x001308, 1, 0x04, 0x00000001 },
1003 { 0x001924, 1, 0x04, 0x00000000 },
1004 { 0x0013ac, 1, 0x04, 0x00000000 },
1005 { 0x00192c, 1, 0x04, 0x00000001 },
1006 { 0x00193c, 1, 0x04, 0x00002c1c },
1007 { 0x000d7c, 1, 0x04, 0x00000000 },
1008 { 0x000f8c, 1, 0x04, 0x00000000 },
1009 { 0x0002c0, 1, 0x04, 0x00000001 },
1010 { 0x001510, 1, 0x04, 0x00000000 },
1011 { 0x001940, 1, 0x04, 0x00000000 },
1012 { 0x000ff4, 2, 0x04, 0x00000000 },
1013 { 0x00194c, 2, 0x04, 0x00000000 },
1014 { 0x001968, 1, 0x04, 0x00000000 },
1015 { 0x001590, 1, 0x04, 0x0000003f },
1016 { 0x0007e8, 4, 0x04, 0x00000000 },
1017 { 0x00196c, 1, 0x04, 0x00000011 },
1018 { 0x0002e4, 1, 0x04, 0x0000b001 },
1019 { 0x00036c, 2, 0x04, 0x00000000 },
1020 { 0x00197c, 1, 0x04, 0x00000000 },
1021 { 0x000fcc, 2, 0x04, 0x00000000 },
1022 { 0x0002d8, 1, 0x04, 0x00000040 },
1023 { 0x001980, 1, 0x04, 0x00000080 },
1024 { 0x001504, 1, 0x04, 0x00000080 },
1025 { 0x001984, 1, 0x04, 0x00000000 },
1026 { 0x000300, 1, 0x04, 0x00000001 },
1027 { 0x0013a8, 1, 0x04, 0x00000000 },
1028 { 0x0012ec, 1, 0x04, 0x00000000 },
1029 { 0x001310, 1, 0x04, 0x00000000 },
1030 { 0x001314, 1, 0x04, 0x00000001 },
1031 { 0x001380, 1, 0x04, 0x00000000 },
1032 { 0x001384, 4, 0x04, 0x00000001 },
1033 { 0x001394, 1, 0x04, 0x00000000 },
1034 { 0x00139c, 1, 0x04, 0x00000000 },
1035 { 0x001398, 1, 0x04, 0x00000000 },
1036 { 0x001594, 1, 0x04, 0x00000000 },
1037 { 0x001598, 4, 0x04, 0x00000001 },
1038 { 0x000f54, 3, 0x04, 0x00000000 },
1039 { 0x0019bc, 1, 0x04, 0x00000000 },
1040 { 0x000f9c, 2, 0x04, 0x00000000 },
1041 { 0x0012cc, 1, 0x04, 0x00000000 },
1042 { 0x0012e8, 1, 0x04, 0x00000000 },
1043 { 0x00130c, 1, 0x04, 0x00000001 },
1044 { 0x001360, 8, 0x04, 0x00000000 },
1045 { 0x00133c, 2, 0x04, 0x00000001 },
1046 { 0x001344, 1, 0x04, 0x00000002 },
1047 { 0x001348, 2, 0x04, 0x00000001 },
1048 { 0x001350, 1, 0x04, 0x00000002 },
1049 { 0x001358, 1, 0x04, 0x00000001 },
1050 { 0x0012e4, 1, 0x04, 0x00000000 },
1051 { 0x00131c, 4, 0x04, 0x00000000 },
1052 { 0x0019c0, 1, 0x04, 0x00000000 },
1053 { 0x001140, 1, 0x04, 0x00000000 },
1054 { 0x0019c4, 1, 0x04, 0x00000000 },
1055 { 0x0019c8, 1, 0x04, 0x00001500 },
1056 { 0x00135c, 1, 0x04, 0x00000000 },
1057 { 0x000f90, 1, 0x04, 0x00000000 },
1058 { 0x0019e0, 8, 0x04, 0x00000001 },
1059 { 0x0019cc, 1, 0x04, 0x00000001 },
1060 { 0x0015b8, 1, 0x04, 0x00000000 },
1061 { 0x001a00, 1, 0x04, 0x00001111 },
1062 { 0x001a04, 7, 0x04, 0x00000000 },
1063 { 0x000d6c, 2, 0x04, 0xffff0000 },
1064 { 0x0010f8, 1, 0x04, 0x00001010 },
1065 { 0x000d80, 5, 0x04, 0x00000000 },
1066 { 0x000da0, 1, 0x04, 0x00000000 },
1067 { 0x0007a4, 2, 0x04, 0x00000000 },
1068 { 0x001508, 1, 0x04, 0x80000000 },
1069 { 0x00150c, 1, 0x04, 0x40000000 },
1070 { 0x001668, 1, 0x04, 0x00000000 },
1071 { 0x000318, 2, 0x04, 0x00000008 },
1072 { 0x000d9c, 1, 0x04, 0x00000001 },
1073 { 0x000ddc, 1, 0x04, 0x00000002 },
1074 { 0x000374, 1, 0x04, 0x00000000 },
1075 { 0x000378, 1, 0x04, 0x00000020 },
1076 { 0x0007dc, 1, 0x04, 0x00000000 },
1077 { 0x00074c, 1, 0x04, 0x00000055 },
1078 { 0x001420, 1, 0x04, 0x00000003 },
1079 { 0x0017bc, 2, 0x04, 0x00000000 },
1080 { 0x0017c4, 1, 0x04, 0x00000001 },
1081 { 0x001008, 1, 0x04, 0x00000008 },
1082 { 0x00100c, 1, 0x04, 0x00000040 },
1083 { 0x001010, 1, 0x04, 0x0000012c },
1084 { 0x000d60, 1, 0x04, 0x00000040 },
1085 { 0x00075c, 1, 0x04, 0x00000003 },
1086 { 0x001018, 1, 0x04, 0x00000020 },
1087 { 0x00101c, 1, 0x04, 0x00000001 },
1088 { 0x001020, 1, 0x04, 0x00000020 },
1089 { 0x001024, 1, 0x04, 0x00000001 },
1090 { 0x001444, 3, 0x04, 0x00000000 },
1091 { 0x000360, 1, 0x04, 0x20164010 },
1092 { 0x000364, 1, 0x04, 0x00000020 },
1093 { 0x000368, 1, 0x04, 0x00000000 },
1094 { 0x000de4, 1, 0x04, 0x00000000 },
1095 { 0x000204, 1, 0x04, 0x00000006 },
1096 { 0x000208, 1, 0x04, 0x00000000 },
1097 { 0x0002cc, 2, 0x04, 0x003fffff },
1098 { 0x001220, 1, 0x04, 0x00000005 },
1099 { 0x000fdc, 1, 0x04, 0x00000000 },
1100 { 0x000f98, 1, 0x04, 0x00400008 },
1101 { 0x001284, 1, 0x04, 0x08000080 },
1102 { 0x001450, 1, 0x04, 0x00400008 },
1103 { 0x001454, 1, 0x04, 0x08000080 },
1104 { 0x000214, 1, 0x04, 0x00000000 },
1105 {} 284 {}
1106}; 285};
1107 286
1108static struct nvc0_graph_init 287static const struct nvc0_graph_init
1109nv108_grctx_init_unk40xx[] = { 288nv108_grctx_init_fe_0[] = {
1110 { 0x404004, 8, 0x04, 0x00000000 }, 289 { 0x404004, 8, 0x04, 0x00000000 },
1111 { 0x404024, 1, 0x04, 0x0000e000 }, 290 { 0x404024, 1, 0x04, 0x0000e000 },
1112 { 0x404028, 8, 0x04, 0x00000000 }, 291 { 0x404028, 8, 0x04, 0x00000000 },
@@ -1132,8 +311,8 @@ nv108_grctx_init_unk40xx[] = {
1132 {} 311 {}
1133}; 312};
1134 313
1135static struct nvc0_graph_init 314static const struct nvc0_graph_init
1136nv108_grctx_init_unk58xx[] = { 315nv108_grctx_init_ds_0[] = {
1137 { 0x405800, 1, 0x04, 0x0f8000bf }, 316 { 0x405800, 1, 0x04, 0x0f8000bf },
1138 { 0x405830, 1, 0x04, 0x02180648 }, 317 { 0x405830, 1, 0x04, 0x02180648 },
1139 { 0x405834, 1, 0x04, 0x08000000 }, 318 { 0x405834, 1, 0x04, 0x08000000 },
@@ -1146,8 +325,10 @@ nv108_grctx_init_unk58xx[] = {
1146 {} 325 {}
1147}; 326};
1148 327
1149static struct nvc0_graph_init 328static const struct nvc0_graph_init
1150nv108_grctx_init_unk64xx[] = { 329nv108_grctx_init_pd_0[] = {
330 { 0x406020, 1, 0x04, 0x034103c1 },
331 { 0x406028, 4, 0x04, 0x00000001 },
1151 { 0x4064a8, 1, 0x04, 0x00000000 }, 332 { 0x4064a8, 1, 0x04, 0x00000000 },
1152 { 0x4064ac, 1, 0x04, 0x00003fff }, 333 { 0x4064ac, 1, 0x04, 0x00003fff },
1153 { 0x4064b0, 3, 0x04, 0x00000000 }, 334 { 0x4064b0, 3, 0x04, 0x00000000 },
@@ -1159,8 +340,8 @@ nv108_grctx_init_unk64xx[] = {
1159 {} 340 {}
1160}; 341};
1161 342
1162static struct nvc0_graph_init 343const struct nvc0_graph_init
1163nv108_grctx_init_unk78xx[] = { 344nv108_grctx_init_rstr2d_0[] = {
1164 { 0x407804, 1, 0x04, 0x00000063 }, 345 { 0x407804, 1, 0x04, 0x00000063 },
1165 { 0x40780c, 1, 0x04, 0x0a418820 }, 346 { 0x40780c, 1, 0x04, 0x0a418820 },
1166 { 0x407810, 1, 0x04, 0x062080e6 }, 347 { 0x407810, 1, 0x04, 0x062080e6 },
@@ -1172,8 +353,8 @@ nv108_grctx_init_unk78xx[] = {
1172 {} 353 {}
1173}; 354};
1174 355
1175static struct nvc0_graph_init 356static const struct nvc0_graph_init
1176nv108_grctx_init_unk88xx[] = { 357nv108_grctx_init_be_0[] = {
1177 { 0x408800, 1, 0x04, 0x32802a3c }, 358 { 0x408800, 1, 0x04, 0x32802a3c },
1178 { 0x408804, 1, 0x04, 0x00000040 }, 359 { 0x408804, 1, 0x04, 0x00000040 },
1179 { 0x408808, 1, 0x04, 0x1003e005 }, 360 { 0x408808, 1, 0x04, 0x1003e005 },
@@ -1185,9 +366,23 @@ nv108_grctx_init_unk88xx[] = {
1185 {} 366 {}
1186}; 367};
1187 368
1188static struct nvc0_graph_init 369static const struct nvc0_graph_pack
1189nv108_grctx_init_gpc_0[] = { 370nv108_grctx_pack_hub[] = {
1190 { 0x418380, 1, 0x04, 0x00000016 }, 371 { nvc0_grctx_init_main_0 },
372 { nv108_grctx_init_fe_0 },
373 { nvf0_grctx_init_pri_0 },
374 { nve4_grctx_init_memfmt_0 },
375 { nv108_grctx_init_ds_0 },
376 { nvf0_grctx_init_cwd_0 },
377 { nv108_grctx_init_pd_0 },
378 { nv108_grctx_init_rstr2d_0 },
379 { nve4_grctx_init_scc_0 },
380 { nv108_grctx_init_be_0 },
381 {}
382};
383
384const struct nvc0_graph_init
385nv108_grctx_init_prop_0[] = {
1191 { 0x418400, 1, 0x04, 0x38005e00 }, 386 { 0x418400, 1, 0x04, 0x38005e00 },
1192 { 0x418404, 1, 0x04, 0x71e0ffff }, 387 { 0x418404, 1, 0x04, 0x71e0ffff },
1193 { 0x41840c, 1, 0x04, 0x00001008 }, 388 { 0x41840c, 1, 0x04, 0x00001008 },
@@ -1196,11 +391,21 @@ nv108_grctx_init_gpc_0[] = {
1196 { 0x418450, 6, 0x04, 0x00000000 }, 391 { 0x418450, 6, 0x04, 0x00000000 },
1197 { 0x418468, 1, 0x04, 0x00000001 }, 392 { 0x418468, 1, 0x04, 0x00000001 },
1198 { 0x41846c, 2, 0x04, 0x00000000 }, 393 { 0x41846c, 2, 0x04, 0x00000000 },
394 {}
395};
396
397static const struct nvc0_graph_init
398nv108_grctx_init_gpc_unk_1[] = {
1199 { 0x418600, 1, 0x04, 0x0000007f }, 399 { 0x418600, 1, 0x04, 0x0000007f },
1200 { 0x418684, 1, 0x04, 0x0000001f }, 400 { 0x418684, 1, 0x04, 0x0000001f },
1201 { 0x418700, 1, 0x04, 0x00000002 }, 401 { 0x418700, 1, 0x04, 0x00000002 },
1202 { 0x418704, 2, 0x04, 0x00000080 }, 402 { 0x418704, 2, 0x04, 0x00000080 },
1203 { 0x41870c, 2, 0x04, 0x00000000 }, 403 { 0x41870c, 2, 0x04, 0x00000000 },
404 {}
405};
406
407static const struct nvc0_graph_init
408nv108_grctx_init_setup_0[] = {
1204 { 0x418800, 1, 0x04, 0x7006863a }, 409 { 0x418800, 1, 0x04, 0x7006863a },
1205 { 0x418808, 1, 0x04, 0x00000000 }, 410 { 0x418808, 1, 0x04, 0x00000000 },
1206 { 0x41880c, 1, 0x04, 0x00000030 }, 411 { 0x41880c, 1, 0x04, 0x00000030 },
@@ -1211,10 +416,11 @@ nv108_grctx_init_gpc_0[] = {
1211 { 0x4188e0, 1, 0x04, 0x01000000 }, 416 { 0x4188e0, 1, 0x04, 0x01000000 },
1212 { 0x4188e8, 5, 0x04, 0x00000000 }, 417 { 0x4188e8, 5, 0x04, 0x00000000 },
1213 { 0x4188fc, 1, 0x04, 0x20100058 }, 418 { 0x4188fc, 1, 0x04, 0x20100058 },
1214 { 0x41891c, 1, 0x04, 0x00ff00ff }, 419 {}
1215 { 0x418924, 1, 0x04, 0x00000000 }, 420};
1216 { 0x418928, 1, 0x04, 0x00ffff00 }, 421
1217 { 0x41892c, 1, 0x04, 0x0000ff00 }, 422const struct nvc0_graph_init
423nv108_grctx_init_crstr_0[] = {
1218 { 0x418b00, 1, 0x04, 0x0000001e }, 424 { 0x418b00, 1, 0x04, 0x0000001e },
1219 { 0x418b08, 1, 0x04, 0x0a418820 }, 425 { 0x418b08, 1, 0x04, 0x0a418820 },
1220 { 0x418b0c, 1, 0x04, 0x062080e6 }, 426 { 0x418b0c, 1, 0x04, 0x062080e6 },
@@ -1223,24 +429,36 @@ nv108_grctx_init_gpc_0[] = {
1223 { 0x418b18, 1, 0x04, 0x0a418820 }, 429 { 0x418b18, 1, 0x04, 0x0a418820 },
1224 { 0x418b1c, 1, 0x04, 0x000000e6 }, 430 { 0x418b1c, 1, 0x04, 0x000000e6 },
1225 { 0x418bb8, 1, 0x04, 0x00000103 }, 431 { 0x418bb8, 1, 0x04, 0x00000103 },
432 {}
433};
434
435static const struct nvc0_graph_init
436nv108_grctx_init_gpm_0[] = {
1226 { 0x418c08, 1, 0x04, 0x00000001 }, 437 { 0x418c08, 1, 0x04, 0x00000001 },
1227 { 0x418c10, 8, 0x04, 0x00000000 }, 438 { 0x418c10, 8, 0x04, 0x00000000 },
1228 { 0x418c40, 1, 0x04, 0xffffffff }, 439 { 0x418c40, 1, 0x04, 0xffffffff },
1229 { 0x418c6c, 1, 0x04, 0x00000001 }, 440 { 0x418c6c, 1, 0x04, 0x00000001 },
1230 { 0x418c80, 1, 0x04, 0x2020000c }, 441 { 0x418c80, 1, 0x04, 0x2020000c },
1231 { 0x418c8c, 1, 0x04, 0x00000001 }, 442 { 0x418c8c, 1, 0x04, 0x00000001 },
1232 { 0x418d24, 1, 0x04, 0x00000000 },
1233 { 0x419000, 1, 0x04, 0x00000780 },
1234 { 0x419004, 2, 0x04, 0x00000000 },
1235 { 0x419014, 1, 0x04, 0x00000004 },
1236 {} 443 {}
1237}; 444};
1238 445
1239static struct nvc0_graph_init 446static const struct nvc0_graph_pack
1240nv108_grctx_init_tpc[] = { 447nv108_grctx_pack_gpc[] = {
1241 { 0x419848, 1, 0x04, 0x00000000 }, 448 { nvc0_grctx_init_gpc_unk_0 },
1242 { 0x419864, 1, 0x04, 0x00000129 }, 449 { nv108_grctx_init_prop_0 },
1243 { 0x419888, 1, 0x04, 0x00000000 }, 450 { nv108_grctx_init_gpc_unk_1 },
451 { nv108_grctx_init_setup_0 },
452 { nvc0_grctx_init_zcull_0 },
453 { nv108_grctx_init_crstr_0 },
454 { nv108_grctx_init_gpm_0 },
455 { nvf0_grctx_init_gpc_unk_2 },
456 { nvc0_grctx_init_gcc_0 },
457 {}
458};
459
460static const struct nvc0_graph_init
461nv108_grctx_init_tex_0[] = {
1244 { 0x419a00, 1, 0x04, 0x000100f0 }, 462 { 0x419a00, 1, 0x04, 0x000100f0 },
1245 { 0x419a04, 1, 0x04, 0x00000001 }, 463 { 0x419a04, 1, 0x04, 0x00000001 },
1246 { 0x419a08, 1, 0x04, 0x00000421 }, 464 { 0x419a08, 1, 0x04, 0x00000421 },
@@ -1251,14 +469,11 @@ nv108_grctx_init_tpc[] = {
1251 { 0x419a20, 1, 0x04, 0x00000800 }, 469 { 0x419a20, 1, 0x04, 0x00000800 },
1252 { 0x419a30, 1, 0x04, 0x00000001 }, 470 { 0x419a30, 1, 0x04, 0x00000001 },
1253 { 0x419ac4, 1, 0x04, 0x0037f440 }, 471 { 0x419ac4, 1, 0x04, 0x0037f440 },
1254 { 0x419c00, 1, 0x04, 0x0000001a }, 472 {}
1255 { 0x419c04, 1, 0x04, 0x80000006 }, 473};
1256 { 0x419c08, 1, 0x04, 0x00000002 }, 474
1257 { 0x419c20, 1, 0x04, 0x00000000 }, 475static const struct nvc0_graph_init
1258 { 0x419c24, 1, 0x04, 0x00084210 }, 476nv108_grctx_init_sm_0[] = {
1259 { 0x419c28, 1, 0x04, 0x3efbefbe },
1260 { 0x419ce8, 1, 0x04, 0x00000000 },
1261 { 0x419cf4, 1, 0x04, 0x00000203 },
1262 { 0x419e04, 1, 0x04, 0x00000000 }, 477 { 0x419e04, 1, 0x04, 0x00000000 },
1263 { 0x419e08, 1, 0x04, 0x0000001d }, 478 { 0x419e08, 1, 0x04, 0x0000001d },
1264 { 0x419e0c, 1, 0x04, 0x00000000 }, 479 { 0x419e0c, 1, 0x04, 0x00000000 },
@@ -1272,7 +487,7 @@ nv108_grctx_init_tpc[] = {
1272 { 0x419e68, 1, 0x04, 0x00000002 }, 487 { 0x419e68, 1, 0x04, 0x00000002 },
1273 { 0x419e6c, 12, 0x04, 0x00000000 }, 488 { 0x419e6c, 12, 0x04, 0x00000000 },
1274 { 0x419eac, 1, 0x04, 0x00001f8f }, 489 { 0x419eac, 1, 0x04, 0x00001f8f },
1275 { 0x419eb0, 1, 0x04, 0x0db00da0 }, 490 { 0x419eb0, 1, 0x04, 0x0db00d2f },
1276 { 0x419eb8, 1, 0x04, 0x00000000 }, 491 { 0x419eb8, 1, 0x04, 0x00000000 },
1277 { 0x419ec8, 1, 0x04, 0x0001304f }, 492 { 0x419ec8, 1, 0x04, 0x0001304f },
1278 { 0x419f30, 4, 0x04, 0x00000000 }, 493 { 0x419f30, 4, 0x04, 0x00000000 },
@@ -1285,25 +500,37 @@ nv108_grctx_init_tpc[] = {
1285 {} 500 {}
1286}; 501};
1287 502
1288static struct nvc0_graph_init 503static const struct nvc0_graph_pack
1289nv108_grctx_init_unk[] = { 504nv108_grctx_pack_tpc[] = {
1290 { 0x41be24, 1, 0x04, 0x00000006 }, 505 { nvd7_grctx_init_pe_0 },
506 { nv108_grctx_init_tex_0 },
507 { nvf0_grctx_init_mpc_0 },
508 { nvf0_grctx_init_l1c_0 },
509 { nv108_grctx_init_sm_0 },
510 {}
511};
512
513static const struct nvc0_graph_init
514nv108_grctx_init_cbm_0[] = {
1291 { 0x41bec0, 1, 0x04, 0x10000000 }, 515 { 0x41bec0, 1, 0x04, 0x10000000 },
1292 { 0x41bec4, 1, 0x04, 0x00037f7f }, 516 { 0x41bec4, 1, 0x04, 0x00037f7f },
1293 { 0x41bee4, 1, 0x04, 0x00000000 }, 517 { 0x41bee4, 1, 0x04, 0x00000000 },
1294 { 0x41bef0, 1, 0x04, 0x000003ff }, 518 { 0x41bef0, 1, 0x04, 0x000003ff },
1295 { 0x41bf00, 1, 0x04, 0x0a418820 },
1296 { 0x41bf04, 1, 0x04, 0x062080e6 },
1297 { 0x41bf08, 1, 0x04, 0x020398a4 },
1298 { 0x41bf0c, 1, 0x04, 0x0e629062 },
1299 { 0x41bf10, 1, 0x04, 0x0a418820 },
1300 { 0x41bf14, 1, 0x04, 0x000000e6 },
1301 { 0x41bfd0, 1, 0x04, 0x00900103 },
1302 { 0x41bfe0, 1, 0x04, 0x00400001 },
1303 { 0x41bfe4, 1, 0x04, 0x00000000 },
1304 {} 519 {}
1305}; 520};
1306 521
522static const struct nvc0_graph_pack
523nv108_grctx_pack_ppc[] = {
524 { nve4_grctx_init_pes_0 },
525 { nv108_grctx_init_cbm_0 },
526 { nvd7_grctx_init_wwdx_0 },
527 {}
528};
529
530/*******************************************************************************
531 * PGRAPH context implementation
532 ******************************************************************************/
533
1307static void 534static void
1308nv108_grctx_generate_mods(struct nvc0_graph_priv *priv, struct nvc0_grctx *info) 535nv108_grctx_generate_mods(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
1309{ 536{
@@ -1346,47 +573,6 @@ nv108_grctx_generate_mods(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
1346 mmio_list(0x17e920, 0x00090d08, 0, 0); 573 mmio_list(0x17e920, 0x00090d08, 0, 0);
1347} 574}
1348 575
1349static struct nvc0_graph_init *
1350nv108_grctx_init_hub[] = {
1351 nvc0_grctx_init_base,
1352 nv108_grctx_init_unk40xx,
1353 nvf0_grctx_init_unk44xx,
1354 nve4_grctx_init_unk46xx,
1355 nve4_grctx_init_unk47xx,
1356 nv108_grctx_init_unk58xx,
1357 nvf0_grctx_init_unk5bxx,
1358 nvf0_grctx_init_unk60xx,
1359 nv108_grctx_init_unk64xx,
1360 nv108_grctx_init_unk78xx,
1361 nve4_grctx_init_unk80xx,
1362 nv108_grctx_init_unk88xx,
1363 NULL
1364};
1365
1366struct nvc0_graph_init *
1367nv108_grctx_init_gpc[] = {
1368 nv108_grctx_init_gpc_0,
1369 nvc0_grctx_init_gpc_1,
1370 nv108_grctx_init_tpc,
1371 nv108_grctx_init_unk,
1372 NULL
1373};
1374
1375struct nvc0_graph_init
1376nv108_grctx_init_mthd_magic[] = {
1377 { 0x3410, 1, 0x04, 0x8e0e2006 },
1378 { 0x3414, 1, 0x04, 0x00000038 },
1379 {}
1380};
1381
1382static struct nvc0_graph_mthd
1383nv108_grctx_init_mthd[] = {
1384 { 0xa197, nv108_grctx_init_a197, },
1385 { 0x902d, nvc0_grctx_init_902d, },
1386 { 0x902d, nv108_grctx_init_mthd_magic, },
1387 {}
1388};
1389
1390struct nouveau_oclass * 576struct nouveau_oclass *
1391nv108_grctx_oclass = &(struct nvc0_grctx_oclass) { 577nv108_grctx_oclass = &(struct nvc0_grctx_oclass) {
1392 .base.handle = NV_ENGCTX(GR, 0x08), 578 .base.handle = NV_ENGCTX(GR, 0x08),
@@ -1398,11 +584,14 @@ nv108_grctx_oclass = &(struct nvc0_grctx_oclass) {
1398 .rd32 = _nouveau_graph_context_rd32, 584 .rd32 = _nouveau_graph_context_rd32,
1399 .wr32 = _nouveau_graph_context_wr32, 585 .wr32 = _nouveau_graph_context_wr32,
1400 }, 586 },
1401 .main = nve4_grctx_generate_main, 587 .main = nve4_grctx_generate_main,
1402 .mods = nv108_grctx_generate_mods, 588 .mods = nv108_grctx_generate_mods,
1403 .unkn = nve4_grctx_generate_unkn, 589 .unkn = nve4_grctx_generate_unkn,
1404 .hub = nv108_grctx_init_hub, 590 .hub = nv108_grctx_pack_hub,
1405 .gpc = nv108_grctx_init_gpc, 591 .gpc = nv108_grctx_pack_gpc,
1406 .icmd = nv108_grctx_init_icmd, 592 .zcull = nvc0_grctx_pack_zcull,
1407 .mthd = nv108_grctx_init_mthd, 593 .tpc = nv108_grctx_pack_tpc,
594 .ppc = nv108_grctx_pack_ppc,
595 .icmd = nv108_grctx_pack_icmd,
596 .mthd = nvf0_grctx_pack_mthd,
1408}.base; 597}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c
index fe67415c3e17..833a96508c4e 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c
@@ -22,10 +22,14 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include "nvc0.h" 25#include "ctxnvc0.h"
26 26
27struct nvc0_graph_init 27/*******************************************************************************
28nvc0_grctx_init_icmd[] = { 28 * PGRAPH context register lists
29 ******************************************************************************/
30
31static const struct nvc0_graph_init
32nvc0_grctx_init_icmd_0[] = {
29 { 0x001000, 1, 0x01, 0x00000004 }, 33 { 0x001000, 1, 0x01, 0x00000004 },
30 { 0x0000a9, 1, 0x01, 0x0000ffff }, 34 { 0x0000a9, 1, 0x01, 0x0000ffff },
31 { 0x000038, 1, 0x01, 0x0fac6881 }, 35 { 0x000038, 1, 0x01, 0x0fac6881 },
@@ -140,8 +144,7 @@ nvc0_grctx_init_icmd[] = {
140 { 0x000586, 1, 0x01, 0x00000040 }, 144 { 0x000586, 1, 0x01, 0x00000040 },
141 { 0x000582, 2, 0x01, 0x00000080 }, 145 { 0x000582, 2, 0x01, 0x00000080 },
142 { 0x0005c2, 1, 0x01, 0x00000001 }, 146 { 0x0005c2, 1, 0x01, 0x00000001 },
143 { 0x000638, 1, 0x01, 0x00000001 }, 147 { 0x000638, 2, 0x01, 0x00000001 },
144 { 0x000639, 1, 0x01, 0x00000001 },
145 { 0x00063a, 1, 0x01, 0x00000002 }, 148 { 0x00063a, 1, 0x01, 0x00000002 },
146 { 0x00063b, 2, 0x01, 0x00000001 }, 149 { 0x00063b, 2, 0x01, 0x00000001 },
147 { 0x00063d, 1, 0x01, 0x00000002 }, 150 { 0x00063d, 1, 0x01, 0x00000002 },
@@ -201,15 +204,13 @@ nvc0_grctx_init_icmd[] = {
201 { 0x000787, 1, 0x01, 0x000000cf }, 204 { 0x000787, 1, 0x01, 0x000000cf },
202 { 0x00078c, 1, 0x01, 0x00000008 }, 205 { 0x00078c, 1, 0x01, 0x00000008 },
203 { 0x000792, 1, 0x01, 0x00000001 }, 206 { 0x000792, 1, 0x01, 0x00000001 },
204 { 0x000794, 1, 0x01, 0x00000001 }, 207 { 0x000794, 3, 0x01, 0x00000001 },
205 { 0x000795, 2, 0x01, 0x00000001 },
206 { 0x000797, 1, 0x01, 0x000000cf }, 208 { 0x000797, 1, 0x01, 0x000000cf },
207 { 0x000836, 1, 0x01, 0x00000001 }, 209 { 0x000836, 1, 0x01, 0x00000001 },
208 { 0x00079a, 1, 0x01, 0x00000002 }, 210 { 0x00079a, 1, 0x01, 0x00000002 },
209 { 0x000833, 1, 0x01, 0x04444480 }, 211 { 0x000833, 1, 0x01, 0x04444480 },
210 { 0x0007a1, 1, 0x01, 0x00000001 }, 212 { 0x0007a1, 1, 0x01, 0x00000001 },
211 { 0x0007a3, 1, 0x01, 0x00000001 }, 213 { 0x0007a3, 3, 0x01, 0x00000001 },
212 { 0x0007a4, 2, 0x01, 0x00000001 },
213 { 0x000831, 1, 0x01, 0x00000004 }, 214 { 0x000831, 1, 0x01, 0x00000004 },
214 { 0x00080c, 1, 0x01, 0x00000002 }, 215 { 0x00080c, 1, 0x01, 0x00000002 },
215 { 0x00080d, 2, 0x01, 0x00000100 }, 216 { 0x00080d, 2, 0x01, 0x00000100 },
@@ -235,14 +236,12 @@ nvc0_grctx_init_icmd[] = {
235 { 0x0006b1, 1, 0x01, 0x00000011 }, 236 { 0x0006b1, 1, 0x01, 0x00000011 },
236 { 0x00078c, 1, 0x01, 0x00000008 }, 237 { 0x00078c, 1, 0x01, 0x00000008 },
237 { 0x000792, 1, 0x01, 0x00000001 }, 238 { 0x000792, 1, 0x01, 0x00000001 },
238 { 0x000794, 1, 0x01, 0x00000001 }, 239 { 0x000794, 3, 0x01, 0x00000001 },
239 { 0x000795, 2, 0x01, 0x00000001 },
240 { 0x000797, 1, 0x01, 0x000000cf }, 240 { 0x000797, 1, 0x01, 0x000000cf },
241 { 0x00079a, 1, 0x01, 0x00000002 }, 241 { 0x00079a, 1, 0x01, 0x00000002 },
242 { 0x000833, 1, 0x01, 0x04444480 }, 242 { 0x000833, 1, 0x01, 0x04444480 },
243 { 0x0007a1, 1, 0x01, 0x00000001 }, 243 { 0x0007a1, 1, 0x01, 0x00000001 },
244 { 0x0007a3, 1, 0x01, 0x00000001 }, 244 { 0x0007a3, 3, 0x01, 0x00000001 },
245 { 0x0007a4, 2, 0x01, 0x00000001 },
246 { 0x000831, 1, 0x01, 0x00000004 }, 245 { 0x000831, 1, 0x01, 0x00000004 },
247 { 0x01e100, 1, 0x01, 0x00000001 }, 246 { 0x01e100, 1, 0x01, 0x00000001 },
248 { 0x001000, 1, 0x01, 0x00000014 }, 247 { 0x001000, 1, 0x01, 0x00000014 },
@@ -267,8 +266,14 @@ nvc0_grctx_init_icmd[] = {
267 {} 266 {}
268}; 267};
269 268
270struct nvc0_graph_init 269const struct nvc0_graph_pack
271nvc0_grctx_init_9097[] = { 270nvc0_grctx_pack_icmd[] = {
271 { nvc0_grctx_init_icmd_0 },
272 {}
273};
274
275static const struct nvc0_graph_init
276nvc0_grctx_init_9097_0[] = {
272 { 0x000800, 8, 0x40, 0x00000000 }, 277 { 0x000800, 8, 0x40, 0x00000000 },
273 { 0x000804, 8, 0x40, 0x00000000 }, 278 { 0x000804, 8, 0x40, 0x00000000 },
274 { 0x000808, 8, 0x40, 0x00000400 }, 279 { 0x000808, 8, 0x40, 0x00000400 },
@@ -516,8 +521,7 @@ nvc0_grctx_init_9097[] = {
516 { 0x001350, 1, 0x04, 0x00000002 }, 521 { 0x001350, 1, 0x04, 0x00000002 },
517 { 0x001358, 1, 0x04, 0x00000001 }, 522 { 0x001358, 1, 0x04, 0x00000001 },
518 { 0x0012e4, 1, 0x04, 0x00000000 }, 523 { 0x0012e4, 1, 0x04, 0x00000000 },
519 { 0x00131c, 1, 0x04, 0x00000000 }, 524 { 0x00131c, 4, 0x04, 0x00000000 },
520 { 0x001320, 3, 0x04, 0x00000000 },
521 { 0x0019c0, 1, 0x04, 0x00000000 }, 525 { 0x0019c0, 1, 0x04, 0x00000000 },
522 { 0x001140, 1, 0x04, 0x00000000 }, 526 { 0x001140, 1, 0x04, 0x00000000 },
523 { 0x0019c4, 1, 0x04, 0x00000000 }, 527 { 0x0019c4, 1, 0x04, 0x00000000 },
@@ -571,8 +575,8 @@ nvc0_grctx_init_9097[] = {
571 {} 575 {}
572}; 576};
573 577
574struct nvc0_graph_init 578const struct nvc0_graph_init
575nvc0_grctx_init_902d[] = { 579nvc0_grctx_init_902d_0[] = {
576 { 0x000200, 1, 0x04, 0x000000cf }, 580 { 0x000200, 1, 0x04, 0x000000cf },
577 { 0x000204, 1, 0x04, 0x00000001 }, 581 { 0x000204, 1, 0x04, 0x00000001 },
578 { 0x000208, 1, 0x04, 0x00000020 }, 582 { 0x000208, 1, 0x04, 0x00000020 },
@@ -590,8 +594,8 @@ nvc0_grctx_init_902d[] = {
590 {} 594 {}
591}; 595};
592 596
593struct nvc0_graph_init 597const struct nvc0_graph_init
594nvc0_grctx_init_9039[] = { 598nvc0_grctx_init_9039_0[] = {
595 { 0x00030c, 3, 0x04, 0x00000000 }, 599 { 0x00030c, 3, 0x04, 0x00000000 },
596 { 0x000320, 1, 0x04, 0x00000000 }, 600 { 0x000320, 1, 0x04, 0x00000000 },
597 { 0x000238, 2, 0x04, 0x00000000 }, 601 { 0x000238, 2, 0x04, 0x00000000 },
@@ -599,8 +603,8 @@ nvc0_grctx_init_9039[] = {
599 {} 603 {}
600}; 604};
601 605
602struct nvc0_graph_init 606const struct nvc0_graph_init
603nvc0_grctx_init_90c0[] = { 607nvc0_grctx_init_90c0_0[] = {
604 { 0x00270c, 8, 0x20, 0x00000000 }, 608 { 0x00270c, 8, 0x20, 0x00000000 },
605 { 0x00030c, 1, 0x04, 0x00000001 }, 609 { 0x00030c, 1, 0x04, 0x00000001 },
606 { 0x001944, 1, 0x04, 0x00000000 }, 610 { 0x001944, 1, 0x04, 0x00000000 },
@@ -617,38 +621,44 @@ nvc0_grctx_init_90c0[] = {
617 {} 621 {}
618}; 622};
619 623
620struct nvc0_graph_init 624const struct nvc0_graph_pack
621nvc0_grctx_init_base[] = { 625nvc0_grctx_pack_mthd[] = {
626 { nvc0_grctx_init_9097_0, 0x9097 },
627 { nvc0_grctx_init_902d_0, 0x902d },
628 { nvc0_grctx_init_9039_0, 0x9039 },
629 { nvc0_grctx_init_90c0_0, 0x90c0 },
630 {}
631};
632
633const struct nvc0_graph_init
634nvc0_grctx_init_main_0[] = {
622 { 0x400204, 2, 0x04, 0x00000000 }, 635 { 0x400204, 2, 0x04, 0x00000000 },
623 {} 636 {}
624}; 637};
625 638
626struct nvc0_graph_init 639const struct nvc0_graph_init
627nvc0_grctx_init_unk40xx[] = { 640nvc0_grctx_init_fe_0[] = {
628 { 0x404004, 10, 0x04, 0x00000000 }, 641 { 0x404004, 11, 0x04, 0x00000000 },
629 { 0x404044, 1, 0x04, 0x00000000 }, 642 { 0x404044, 1, 0x04, 0x00000000 },
630 { 0x404094, 1, 0x04, 0x00000000 }, 643 { 0x404094, 13, 0x04, 0x00000000 },
631 { 0x404098, 12, 0x04, 0x00000000 },
632 { 0x4040c8, 1, 0x04, 0xf0000087 }, 644 { 0x4040c8, 1, 0x04, 0xf0000087 },
633 { 0x4040d0, 6, 0x04, 0x00000000 }, 645 { 0x4040d0, 6, 0x04, 0x00000000 },
634 { 0x4040e8, 1, 0x04, 0x00001000 }, 646 { 0x4040e8, 1, 0x04, 0x00001000 },
635 { 0x4040f8, 1, 0x04, 0x00000000 }, 647 { 0x4040f8, 1, 0x04, 0x00000000 },
636 { 0x404130, 1, 0x04, 0x00000000 }, 648 { 0x404130, 2, 0x04, 0x00000000 },
637 { 0x404134, 1, 0x04, 0x00000000 },
638 { 0x404138, 1, 0x04, 0x20000040 }, 649 { 0x404138, 1, 0x04, 0x20000040 },
639 { 0x404150, 1, 0x04, 0x0000002e }, 650 { 0x404150, 1, 0x04, 0x0000002e },
640 { 0x404154, 1, 0x04, 0x00000400 }, 651 { 0x404154, 1, 0x04, 0x00000400 },
641 { 0x404158, 1, 0x04, 0x00000200 }, 652 { 0x404158, 1, 0x04, 0x00000200 },
642 { 0x404164, 1, 0x04, 0x00000055 }, 653 { 0x404164, 1, 0x04, 0x00000055 },
643 { 0x404168, 1, 0x04, 0x00000000 }, 654 { 0x404168, 1, 0x04, 0x00000000 },
644 { 0x404174, 1, 0x04, 0x00000000 }, 655 { 0x404174, 3, 0x04, 0x00000000 },
645 { 0x404178, 2, 0x04, 0x00000000 },
646 { 0x404200, 8, 0x04, 0x00000000 }, 656 { 0x404200, 8, 0x04, 0x00000000 },
647 {} 657 {}
648}; 658};
649 659
650struct nvc0_graph_init 660const struct nvc0_graph_init
651nvc0_grctx_init_unk44xx[] = { 661nvc0_grctx_init_pri_0[] = {
652 { 0x404404, 14, 0x04, 0x00000000 }, 662 { 0x404404, 14, 0x04, 0x00000000 },
653 { 0x404460, 2, 0x04, 0x00000000 }, 663 { 0x404460, 2, 0x04, 0x00000000 },
654 { 0x404468, 1, 0x04, 0x00ffffff }, 664 { 0x404468, 1, 0x04, 0x00ffffff },
@@ -658,8 +668,8 @@ nvc0_grctx_init_unk44xx[] = {
658 {} 668 {}
659}; 669};
660 670
661struct nvc0_graph_init 671const struct nvc0_graph_init
662nvc0_grctx_init_unk46xx[] = { 672nvc0_grctx_init_memfmt_0[] = {
663 { 0x404604, 1, 0x04, 0x00000015 }, 673 { 0x404604, 1, 0x04, 0x00000015 },
664 { 0x404608, 1, 0x04, 0x00000000 }, 674 { 0x404608, 1, 0x04, 0x00000000 },
665 { 0x40460c, 1, 0x04, 0x00002e00 }, 675 { 0x40460c, 1, 0x04, 0x00002e00 },
@@ -674,19 +684,14 @@ nvc0_grctx_init_unk46xx[] = {
674 { 0x4046a0, 1, 0x04, 0x007f0080 }, 684 { 0x4046a0, 1, 0x04, 0x007f0080 },
675 { 0x4046a4, 18, 0x04, 0x00000000 }, 685 { 0x4046a4, 18, 0x04, 0x00000000 },
676 { 0x4046f0, 2, 0x04, 0x00000000 }, 686 { 0x4046f0, 2, 0x04, 0x00000000 },
677 {}
678};
679
680struct nvc0_graph_init
681nvc0_grctx_init_unk47xx[] = {
682 { 0x404700, 13, 0x04, 0x00000000 }, 687 { 0x404700, 13, 0x04, 0x00000000 },
683 { 0x404734, 1, 0x04, 0x00000100 }, 688 { 0x404734, 1, 0x04, 0x00000100 },
684 { 0x404738, 8, 0x04, 0x00000000 }, 689 { 0x404738, 8, 0x04, 0x00000000 },
685 {} 690 {}
686}; 691};
687 692
688struct nvc0_graph_init 693static const struct nvc0_graph_init
689nvc0_grctx_init_unk58xx[] = { 694nvc0_grctx_init_ds_0[] = {
690 { 0x405800, 1, 0x04, 0x078000bf }, 695 { 0x405800, 1, 0x04, 0x078000bf },
691 { 0x405830, 1, 0x04, 0x02180000 }, 696 { 0x405830, 1, 0x04, 0x02180000 },
692 { 0x405834, 2, 0x04, 0x00000000 }, 697 { 0x405834, 2, 0x04, 0x00000000 },
@@ -697,23 +702,18 @@ nvc0_grctx_init_unk58xx[] = {
697 {} 702 {}
698}; 703};
699 704
700struct nvc0_graph_init 705static const struct nvc0_graph_init
701nvc0_grctx_init_unk60xx[] = { 706nvc0_grctx_init_pd_0[] = {
702 { 0x406020, 1, 0x04, 0x000103c1 }, 707 { 0x406020, 1, 0x04, 0x000103c1 },
703 { 0x406028, 4, 0x04, 0x00000001 }, 708 { 0x406028, 4, 0x04, 0x00000001 },
704 {}
705};
706
707struct nvc0_graph_init
708nvc0_grctx_init_unk64xx[] = {
709 { 0x4064a8, 1, 0x04, 0x00000000 }, 709 { 0x4064a8, 1, 0x04, 0x00000000 },
710 { 0x4064ac, 1, 0x04, 0x00003fff }, 710 { 0x4064ac, 1, 0x04, 0x00003fff },
711 { 0x4064b4, 2, 0x04, 0x00000000 }, 711 { 0x4064b4, 2, 0x04, 0x00000000 },
712 {} 712 {}
713}; 713};
714 714
715struct nvc0_graph_init 715const struct nvc0_graph_init
716nvc0_grctx_init_unk78xx[] = { 716nvc0_grctx_init_rstr2d_0[] = {
717 { 0x407804, 1, 0x04, 0x00000023 }, 717 { 0x407804, 1, 0x04, 0x00000023 },
718 { 0x40780c, 1, 0x04, 0x0a418820 }, 718 { 0x40780c, 1, 0x04, 0x0a418820 },
719 { 0x407810, 1, 0x04, 0x062080e6 }, 719 { 0x407810, 1, 0x04, 0x062080e6 },
@@ -725,8 +725,8 @@ nvc0_grctx_init_unk78xx[] = {
725 {} 725 {}
726}; 726};
727 727
728struct nvc0_graph_init 728const struct nvc0_graph_init
729nvc0_grctx_init_unk80xx[] = { 729nvc0_grctx_init_scc_0[] = {
730 { 0x408000, 2, 0x04, 0x00000000 }, 730 { 0x408000, 2, 0x04, 0x00000000 },
731 { 0x408008, 1, 0x04, 0x00000018 }, 731 { 0x408008, 1, 0x04, 0x00000018 },
732 { 0x40800c, 2, 0x04, 0x00000000 }, 732 { 0x40800c, 2, 0x04, 0x00000000 },
@@ -736,8 +736,8 @@ nvc0_grctx_init_unk80xx[] = {
736 {} 736 {}
737}; 737};
738 738
739struct nvc0_graph_init 739static const struct nvc0_graph_init
740nvc0_grctx_init_rop[] = { 740nvc0_grctx_init_be_0[] = {
741 { 0x408800, 1, 0x04, 0x02802a3c }, 741 { 0x408800, 1, 0x04, 0x02802a3c },
742 { 0x408804, 1, 0x04, 0x00000040 }, 742 { 0x408804, 1, 0x04, 0x00000040 },
743 { 0x408808, 1, 0x04, 0x0003e00d }, 743 { 0x408808, 1, 0x04, 0x0003e00d },
@@ -748,9 +748,28 @@ nvc0_grctx_init_rop[] = {
748 {} 748 {}
749}; 749};
750 750
751struct nvc0_graph_init 751const struct nvc0_graph_pack
752nvc0_grctx_init_gpc_0[] = { 752nvc0_grctx_pack_hub[] = {
753 { nvc0_grctx_init_main_0 },
754 { nvc0_grctx_init_fe_0 },
755 { nvc0_grctx_init_pri_0 },
756 { nvc0_grctx_init_memfmt_0 },
757 { nvc0_grctx_init_ds_0 },
758 { nvc0_grctx_init_pd_0 },
759 { nvc0_grctx_init_rstr2d_0 },
760 { nvc0_grctx_init_scc_0 },
761 { nvc0_grctx_init_be_0 },
762 {}
763};
764
765const struct nvc0_graph_init
766nvc0_grctx_init_gpc_unk_0[] = {
753 { 0x418380, 1, 0x04, 0x00000016 }, 767 { 0x418380, 1, 0x04, 0x00000016 },
768 {}
769};
770
771const struct nvc0_graph_init
772nvc0_grctx_init_prop_0[] = {
754 { 0x418400, 1, 0x04, 0x38004e00 }, 773 { 0x418400, 1, 0x04, 0x38004e00 },
755 { 0x418404, 1, 0x04, 0x71e0ffff }, 774 { 0x418404, 1, 0x04, 0x71e0ffff },
756 { 0x418408, 1, 0x04, 0x00000000 }, 775 { 0x418408, 1, 0x04, 0x00000000 },
@@ -760,6 +779,11 @@ nvc0_grctx_init_gpc_0[] = {
760 { 0x418450, 6, 0x04, 0x00000000 }, 779 { 0x418450, 6, 0x04, 0x00000000 },
761 { 0x418468, 1, 0x04, 0x00000001 }, 780 { 0x418468, 1, 0x04, 0x00000001 },
762 { 0x41846c, 2, 0x04, 0x00000000 }, 781 { 0x41846c, 2, 0x04, 0x00000000 },
782 {}
783};
784
785const struct nvc0_graph_init
786nvc0_grctx_init_gpc_unk_1[] = {
763 { 0x418600, 1, 0x04, 0x0000001f }, 787 { 0x418600, 1, 0x04, 0x0000001f },
764 { 0x418684, 1, 0x04, 0x0000000f }, 788 { 0x418684, 1, 0x04, 0x0000000f },
765 { 0x418700, 1, 0x04, 0x00000002 }, 789 { 0x418700, 1, 0x04, 0x00000002 },
@@ -767,6 +791,11 @@ nvc0_grctx_init_gpc_0[] = {
767 { 0x418708, 1, 0x04, 0x00000000 }, 791 { 0x418708, 1, 0x04, 0x00000000 },
768 { 0x41870c, 1, 0x04, 0x07c80000 }, 792 { 0x41870c, 1, 0x04, 0x07c80000 },
769 { 0x418710, 1, 0x04, 0x00000000 }, 793 { 0x418710, 1, 0x04, 0x00000000 },
794 {}
795};
796
797static const struct nvc0_graph_init
798nvc0_grctx_init_setup_0[] = {
770 { 0x418800, 1, 0x04, 0x0006860a }, 799 { 0x418800, 1, 0x04, 0x0006860a },
771 { 0x418808, 3, 0x04, 0x00000000 }, 800 { 0x418808, 3, 0x04, 0x00000000 },
772 { 0x418828, 1, 0x04, 0x00008442 }, 801 { 0x418828, 1, 0x04, 0x00008442 },
@@ -775,10 +804,20 @@ nvc0_grctx_init_gpc_0[] = {
775 { 0x4188e0, 1, 0x04, 0x01000000 }, 804 { 0x4188e0, 1, 0x04, 0x01000000 },
776 { 0x4188e8, 5, 0x04, 0x00000000 }, 805 { 0x4188e8, 5, 0x04, 0x00000000 },
777 { 0x4188fc, 1, 0x04, 0x00100000 }, 806 { 0x4188fc, 1, 0x04, 0x00100000 },
807 {}
808};
809
810const struct nvc0_graph_init
811nvc0_grctx_init_zcull_0[] = {
778 { 0x41891c, 1, 0x04, 0x00ff00ff }, 812 { 0x41891c, 1, 0x04, 0x00ff00ff },
779 { 0x418924, 1, 0x04, 0x00000000 }, 813 { 0x418924, 1, 0x04, 0x00000000 },
780 { 0x418928, 1, 0x04, 0x00ffff00 }, 814 { 0x418928, 1, 0x04, 0x00ffff00 },
781 { 0x41892c, 1, 0x04, 0x0000ff00 }, 815 { 0x41892c, 1, 0x04, 0x0000ff00 },
816 {}
817};
818
819const struct nvc0_graph_init
820nvc0_grctx_init_crstr_0[] = {
782 { 0x418b00, 1, 0x04, 0x00000000 }, 821 { 0x418b00, 1, 0x04, 0x00000000 },
783 { 0x418b08, 1, 0x04, 0x0a418820 }, 822 { 0x418b08, 1, 0x04, 0x0a418820 },
784 { 0x418b0c, 1, 0x04, 0x062080e6 }, 823 { 0x418b0c, 1, 0x04, 0x062080e6 },
@@ -787,18 +826,41 @@ nvc0_grctx_init_gpc_0[] = {
787 { 0x418b18, 1, 0x04, 0x0a418820 }, 826 { 0x418b18, 1, 0x04, 0x0a418820 },
788 { 0x418b1c, 1, 0x04, 0x000000e6 }, 827 { 0x418b1c, 1, 0x04, 0x000000e6 },
789 { 0x418bb8, 1, 0x04, 0x00000103 }, 828 { 0x418bb8, 1, 0x04, 0x00000103 },
829 {}
830};
831
832const struct nvc0_graph_init
833nvc0_grctx_init_gpm_0[] = {
790 { 0x418c08, 1, 0x04, 0x00000001 }, 834 { 0x418c08, 1, 0x04, 0x00000001 },
791 { 0x418c10, 8, 0x04, 0x00000000 }, 835 { 0x418c10, 8, 0x04, 0x00000000 },
792 { 0x418c80, 1, 0x04, 0x20200004 }, 836 { 0x418c80, 1, 0x04, 0x20200004 },
793 { 0x418c8c, 1, 0x04, 0x00000001 }, 837 { 0x418c8c, 1, 0x04, 0x00000001 },
838 {}
839};
840
841const struct nvc0_graph_init
842nvc0_grctx_init_gcc_0[] = {
794 { 0x419000, 1, 0x04, 0x00000780 }, 843 { 0x419000, 1, 0x04, 0x00000780 },
795 { 0x419004, 2, 0x04, 0x00000000 }, 844 { 0x419004, 2, 0x04, 0x00000000 },
796 { 0x419014, 1, 0x04, 0x00000004 }, 845 { 0x419014, 1, 0x04, 0x00000004 },
797 {} 846 {}
798}; 847};
799 848
800struct nvc0_graph_init 849const struct nvc0_graph_pack
801nvc0_grctx_init_gpc_1[] = { 850nvc0_grctx_pack_gpc[] = {
851 { nvc0_grctx_init_gpc_unk_0 },
852 { nvc0_grctx_init_prop_0 },
853 { nvc0_grctx_init_gpc_unk_1 },
854 { nvc0_grctx_init_setup_0 },
855 { nvc0_grctx_init_zcull_0 },
856 { nvc0_grctx_init_crstr_0 },
857 { nvc0_grctx_init_gpm_0 },
858 { nvc0_grctx_init_gcc_0 },
859 {}
860};
861
862static const struct nvc0_graph_init
863nvc0_grctx_init_zcullr_0[] = {
802 { 0x418a00, 3, 0x04, 0x00000000 }, 864 { 0x418a00, 3, 0x04, 0x00000000 },
803 { 0x418a0c, 1, 0x04, 0x00010000 }, 865 { 0x418a0c, 1, 0x04, 0x00010000 },
804 { 0x418a10, 3, 0x04, 0x00000000 }, 866 { 0x418a10, 3, 0x04, 0x00000000 },
@@ -826,19 +888,35 @@ nvc0_grctx_init_gpc_1[] = {
826 {} 888 {}
827}; 889};
828 890
829struct nvc0_graph_init 891const struct nvc0_graph_pack
830nvc0_grctx_init_tpc[] = { 892nvc0_grctx_pack_zcull[] = {
893 { nvc0_grctx_init_zcullr_0 },
894 {}
895};
896
897const struct nvc0_graph_init
898nvc0_grctx_init_pe_0[] = {
831 { 0x419818, 1, 0x04, 0x00000000 }, 899 { 0x419818, 1, 0x04, 0x00000000 },
832 { 0x41983c, 1, 0x04, 0x00038bc7 }, 900 { 0x41983c, 1, 0x04, 0x00038bc7 },
833 { 0x419848, 1, 0x04, 0x00000000 }, 901 { 0x419848, 1, 0x04, 0x00000000 },
834 { 0x419864, 1, 0x04, 0x0000012a }, 902 { 0x419864, 1, 0x04, 0x0000012a },
835 { 0x419888, 1, 0x04, 0x00000000 }, 903 { 0x419888, 1, 0x04, 0x00000000 },
904 {}
905};
906
907static const struct nvc0_graph_init
908nvc0_grctx_init_tex_0[] = {
836 { 0x419a00, 1, 0x04, 0x000001f0 }, 909 { 0x419a00, 1, 0x04, 0x000001f0 },
837 { 0x419a04, 1, 0x04, 0x00000001 }, 910 { 0x419a04, 1, 0x04, 0x00000001 },
838 { 0x419a08, 1, 0x04, 0x00000023 }, 911 { 0x419a08, 1, 0x04, 0x00000023 },
839 { 0x419a0c, 1, 0x04, 0x00020000 }, 912 { 0x419a0c, 1, 0x04, 0x00020000 },
840 { 0x419a10, 1, 0x04, 0x00000000 }, 913 { 0x419a10, 1, 0x04, 0x00000000 },
841 { 0x419a14, 1, 0x04, 0x00000200 }, 914 { 0x419a14, 1, 0x04, 0x00000200 },
915 {}
916};
917
918const struct nvc0_graph_init
919nvc0_grctx_init_wwdx_0[] = {
842 { 0x419b00, 1, 0x04, 0x0a418820 }, 920 { 0x419b00, 1, 0x04, 0x0a418820 },
843 { 0x419b04, 1, 0x04, 0x062080e6 }, 921 { 0x419b04, 1, 0x04, 0x062080e6 },
844 { 0x419b08, 1, 0x04, 0x020398a4 }, 922 { 0x419b08, 1, 0x04, 0x020398a4 },
@@ -848,15 +926,35 @@ nvc0_grctx_init_tpc[] = {
848 { 0x419bd0, 1, 0x04, 0x00900103 }, 926 { 0x419bd0, 1, 0x04, 0x00900103 },
849 { 0x419be0, 1, 0x04, 0x00000001 }, 927 { 0x419be0, 1, 0x04, 0x00000001 },
850 { 0x419be4, 1, 0x04, 0x00000000 }, 928 { 0x419be4, 1, 0x04, 0x00000000 },
929 {}
930};
931
932const struct nvc0_graph_init
933nvc0_grctx_init_mpc_0[] = {
851 { 0x419c00, 1, 0x04, 0x00000002 }, 934 { 0x419c00, 1, 0x04, 0x00000002 },
852 { 0x419c04, 1, 0x04, 0x00000006 }, 935 { 0x419c04, 1, 0x04, 0x00000006 },
853 { 0x419c08, 1, 0x04, 0x00000002 }, 936 { 0x419c08, 1, 0x04, 0x00000002 },
854 { 0x419c20, 1, 0x04, 0x00000000 }, 937 { 0x419c20, 1, 0x04, 0x00000000 },
938 {}
939};
940
941static const struct nvc0_graph_init
942nvc0_grctx_init_l1c_0[] = {
855 { 0x419cb0, 1, 0x04, 0x00060048 }, 943 { 0x419cb0, 1, 0x04, 0x00060048 },
856 { 0x419ce8, 1, 0x04, 0x00000000 }, 944 { 0x419ce8, 1, 0x04, 0x00000000 },
857 { 0x419cf4, 1, 0x04, 0x00000183 }, 945 { 0x419cf4, 1, 0x04, 0x00000183 },
946 {}
947};
948
949const struct nvc0_graph_init
950nvc0_grctx_init_tpccs_0[] = {
858 { 0x419d20, 1, 0x04, 0x02180000 }, 951 { 0x419d20, 1, 0x04, 0x02180000 },
859 { 0x419d24, 1, 0x04, 0x00001fff }, 952 { 0x419d24, 1, 0x04, 0x00001fff },
953 {}
954};
955
956static const struct nvc0_graph_init
957nvc0_grctx_init_sm_0[] = {
860 { 0x419e04, 3, 0x04, 0x00000000 }, 958 { 0x419e04, 3, 0x04, 0x00000000 },
861 { 0x419e10, 1, 0x04, 0x00000002 }, 959 { 0x419e10, 1, 0x04, 0x00000002 },
862 { 0x419e44, 1, 0x04, 0x001beff2 }, 960 { 0x419e44, 1, 0x04, 0x001beff2 },
@@ -868,6 +966,22 @@ nvc0_grctx_init_tpc[] = {
868 {} 966 {}
869}; 967};
870 968
969const struct nvc0_graph_pack
970nvc0_grctx_pack_tpc[] = {
971 { nvc0_grctx_init_pe_0 },
972 { nvc0_grctx_init_tex_0 },
973 { nvc0_grctx_init_wwdx_0 },
974 { nvc0_grctx_init_mpc_0 },
975 { nvc0_grctx_init_l1c_0 },
976 { nvc0_grctx_init_tpccs_0 },
977 { nvc0_grctx_init_sm_0 },
978 {}
979};
980
981/*******************************************************************************
982 * PGRAPH context implementation
983 ******************************************************************************/
984
871void 985void
872nvc0_grctx_generate_mods(struct nvc0_graph_priv *priv, struct nvc0_grctx *info) 986nvc0_grctx_generate_mods(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
873{ 987{
@@ -1055,14 +1169,14 @@ void
1055nvc0_grctx_generate_main(struct nvc0_graph_priv *priv, struct nvc0_grctx *info) 1169nvc0_grctx_generate_main(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
1056{ 1170{
1057 struct nvc0_grctx_oclass *oclass = (void *)nv_engine(priv)->cclass; 1171 struct nvc0_grctx_oclass *oclass = (void *)nv_engine(priv)->cclass;
1058 int i;
1059 1172
1060 nv_mask(priv, 0x000260, 0x00000001, 0x00000000); 1173 nv_mask(priv, 0x000260, 0x00000001, 0x00000000);
1061 1174
1062 for (i = 0; oclass->hub[i]; i++) 1175 nvc0_graph_mmio(priv, oclass->hub);
1063 nvc0_graph_mmio(priv, oclass->hub[i]); 1176 nvc0_graph_mmio(priv, oclass->gpc);
1064 for (i = 0; oclass->gpc[i]; i++) 1177 nvc0_graph_mmio(priv, oclass->zcull);
1065 nvc0_graph_mmio(priv, oclass->gpc[i]); 1178 nvc0_graph_mmio(priv, oclass->tpc);
1179 nvc0_graph_mmio(priv, oclass->ppc);
1066 1180
1067 nv_wr32(priv, 0x404154, 0x00000000); 1181 nv_wr32(priv, 0x404154, 0x00000000);
1068 1182
@@ -1182,46 +1296,6 @@ done:
1182 return ret; 1296 return ret;
1183} 1297}
1184 1298
1185struct nvc0_graph_init *
1186nvc0_grctx_init_hub[] = {
1187 nvc0_grctx_init_base,
1188 nvc0_grctx_init_unk40xx,
1189 nvc0_grctx_init_unk44xx,
1190 nvc0_grctx_init_unk46xx,
1191 nvc0_grctx_init_unk47xx,
1192 nvc0_grctx_init_unk58xx,
1193 nvc0_grctx_init_unk60xx,
1194 nvc0_grctx_init_unk64xx,
1195 nvc0_grctx_init_unk78xx,
1196 nvc0_grctx_init_unk80xx,
1197 nvc0_grctx_init_rop,
1198 NULL
1199};
1200
1201static struct nvc0_graph_init *
1202nvc0_grctx_init_gpc[] = {
1203 nvc0_grctx_init_gpc_0,
1204 nvc0_grctx_init_gpc_1,
1205 nvc0_grctx_init_tpc,
1206 NULL
1207};
1208
1209struct nvc0_graph_init
1210nvc0_grctx_init_mthd_magic[] = {
1211 { 0x3410, 1, 0x04, 0x00000000 },
1212 {}
1213};
1214
1215struct nvc0_graph_mthd
1216nvc0_grctx_init_mthd[] = {
1217 { 0x9097, nvc0_grctx_init_9097, },
1218 { 0x902d, nvc0_grctx_init_902d, },
1219 { 0x9039, nvc0_grctx_init_9039, },
1220 { 0x90c0, nvc0_grctx_init_90c0, },
1221 { 0x902d, nvc0_grctx_init_mthd_magic, },
1222 {}
1223};
1224
1225struct nouveau_oclass * 1299struct nouveau_oclass *
1226nvc0_grctx_oclass = &(struct nvc0_grctx_oclass) { 1300nvc0_grctx_oclass = &(struct nvc0_grctx_oclass) {
1227 .base.handle = NV_ENGCTX(GR, 0xc0), 1301 .base.handle = NV_ENGCTX(GR, 0xc0),
@@ -1233,11 +1307,13 @@ nvc0_grctx_oclass = &(struct nvc0_grctx_oclass) {
1233 .rd32 = _nouveau_graph_context_rd32, 1307 .rd32 = _nouveau_graph_context_rd32,
1234 .wr32 = _nouveau_graph_context_wr32, 1308 .wr32 = _nouveau_graph_context_wr32,
1235 }, 1309 },
1236 .main = nvc0_grctx_generate_main, 1310 .main = nvc0_grctx_generate_main,
1237 .mods = nvc0_grctx_generate_mods, 1311 .mods = nvc0_grctx_generate_mods,
1238 .unkn = nvc0_grctx_generate_unkn, 1312 .unkn = nvc0_grctx_generate_unkn,
1239 .hub = nvc0_grctx_init_hub, 1313 .hub = nvc0_grctx_pack_hub,
1240 .gpc = nvc0_grctx_init_gpc, 1314 .gpc = nvc0_grctx_pack_gpc,
1241 .icmd = nvc0_grctx_init_icmd, 1315 .zcull = nvc0_grctx_pack_zcull,
1242 .mthd = nvc0_grctx_init_mthd, 1316 .tpc = nvc0_grctx_pack_tpc,
1317 .icmd = nvc0_grctx_pack_icmd,
1318 .mthd = nvc0_grctx_pack_mthd,
1243}.base; 1319}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.h b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.h
new file mode 100644
index 000000000000..9c815d1f99ef
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.h
@@ -0,0 +1,170 @@
1#ifndef __NVKM_GRCTX_NVC0_H__
2#define __NVKM_GRCTX_NVC0_H__
3
4#include "nvc0.h"
5
6struct nvc0_grctx {
7 struct nvc0_graph_priv *priv;
8 struct nvc0_graph_data *data;
9 struct nvc0_graph_mmio *mmio;
10 int buffer_nr;
11 u64 buffer[4];
12 u64 addr;
13};
14
15struct nvc0_grctx_oclass {
16 struct nouveau_oclass base;
17 /* main context generation function */
18 void (*main)(struct nvc0_graph_priv *, struct nvc0_grctx *);
19 /* context-specific modify-on-first-load list generation function */
20 void (*mods)(struct nvc0_graph_priv *, struct nvc0_grctx *);
21 void (*unkn)(struct nvc0_graph_priv *);
22 /* mmio context data */
23 const struct nvc0_graph_pack *hub;
24 const struct nvc0_graph_pack *gpc;
25 const struct nvc0_graph_pack *zcull;
26 const struct nvc0_graph_pack *tpc;
27 const struct nvc0_graph_pack *ppc;
28 /* indirect context data, generated with icmds/mthds */
29 const struct nvc0_graph_pack *icmd;
30 const struct nvc0_graph_pack *mthd;
31};
32
33#define mmio_data(s,a,p) do { \
34 info->buffer[info->buffer_nr] = round_up(info->addr, (a)); \
35 info->addr = info->buffer[info->buffer_nr++] + (s); \
36 info->data->size = (s); \
37 info->data->align = (a); \
38 info->data->access = (p); \
39 info->data++; \
40} while(0)
41
42#define mmio_list(r,d,s,b) do { \
43 info->mmio->addr = (r); \
44 info->mmio->data = (d); \
45 info->mmio->shift = (s); \
46 info->mmio->buffer = (b); \
47 info->mmio++; \
48 nv_wr32(priv, (r), (d) | ((s) ? (info->buffer[(b)] >> (s)) : 0)); \
49} while(0)
50
51extern struct nouveau_oclass *nvc0_grctx_oclass;
52int nvc0_grctx_generate(struct nvc0_graph_priv *);
53void nvc0_grctx_generate_main(struct nvc0_graph_priv *, struct nvc0_grctx *);
54void nvc0_grctx_generate_mods(struct nvc0_graph_priv *, struct nvc0_grctx *);
55void nvc0_grctx_generate_unkn(struct nvc0_graph_priv *);
56void nvc0_grctx_generate_tpcid(struct nvc0_graph_priv *);
57void nvc0_grctx_generate_r406028(struct nvc0_graph_priv *);
58void nvc0_grctx_generate_r4060a8(struct nvc0_graph_priv *);
59void nvc0_grctx_generate_r418bb8(struct nvc0_graph_priv *);
60void nvc0_grctx_generate_r406800(struct nvc0_graph_priv *);
61
62extern struct nouveau_oclass *nvc1_grctx_oclass;
63void nvc1_grctx_generate_mods(struct nvc0_graph_priv *, struct nvc0_grctx *);
64void nvc1_grctx_generate_unkn(struct nvc0_graph_priv *);
65
66extern struct nouveau_oclass *nvc4_grctx_oclass;
67extern struct nouveau_oclass *nvc8_grctx_oclass;
68extern struct nouveau_oclass *nvd7_grctx_oclass;
69extern struct nouveau_oclass *nvd9_grctx_oclass;
70
71extern struct nouveau_oclass *nve4_grctx_oclass;
72void nve4_grctx_generate_main(struct nvc0_graph_priv *, struct nvc0_grctx *);
73void nve4_grctx_generate_unkn(struct nvc0_graph_priv *);
74void nve4_grctx_generate_r418bb8(struct nvc0_graph_priv *);
75
76extern struct nouveau_oclass *nvf0_grctx_oclass;
77extern struct nouveau_oclass *nv108_grctx_oclass;
78extern struct nouveau_oclass *gm107_grctx_oclass;
79
80/* context init value lists */
81
82extern const struct nvc0_graph_pack nvc0_grctx_pack_icmd[];
83
84extern const struct nvc0_graph_pack nvc0_grctx_pack_mthd[];
85extern const struct nvc0_graph_init nvc0_grctx_init_902d_0[];
86extern const struct nvc0_graph_init nvc0_grctx_init_9039_0[];
87extern const struct nvc0_graph_init nvc0_grctx_init_90c0_0[];
88
89extern const struct nvc0_graph_pack nvc0_grctx_pack_hub[];
90extern const struct nvc0_graph_init nvc0_grctx_init_main_0[];
91extern const struct nvc0_graph_init nvc0_grctx_init_fe_0[];
92extern const struct nvc0_graph_init nvc0_grctx_init_pri_0[];
93extern const struct nvc0_graph_init nvc0_grctx_init_memfmt_0[];
94extern const struct nvc0_graph_init nvc0_grctx_init_rstr2d_0[];
95extern const struct nvc0_graph_init nvc0_grctx_init_scc_0[];
96
97extern const struct nvc0_graph_pack nvc0_grctx_pack_gpc[];
98extern const struct nvc0_graph_init nvc0_grctx_init_gpc_unk_0[];
99extern const struct nvc0_graph_init nvc0_grctx_init_prop_0[];
100extern const struct nvc0_graph_init nvc0_grctx_init_gpc_unk_1[];
101extern const struct nvc0_graph_init nvc0_grctx_init_zcull_0[];
102extern const struct nvc0_graph_init nvc0_grctx_init_crstr_0[];
103extern const struct nvc0_graph_init nvc0_grctx_init_gpm_0[];
104extern const struct nvc0_graph_init nvc0_grctx_init_gcc_0[];
105
106extern const struct nvc0_graph_pack nvc0_grctx_pack_zcull[];
107
108extern const struct nvc0_graph_pack nvc0_grctx_pack_tpc[];
109extern const struct nvc0_graph_init nvc0_grctx_init_pe_0[];
110extern const struct nvc0_graph_init nvc0_grctx_init_wwdx_0[];
111extern const struct nvc0_graph_init nvc0_grctx_init_mpc_0[];
112extern const struct nvc0_graph_init nvc0_grctx_init_tpccs_0[];
113
114extern const struct nvc0_graph_init nvc4_grctx_init_tex_0[];
115extern const struct nvc0_graph_init nvc4_grctx_init_l1c_0[];
116extern const struct nvc0_graph_init nvc4_grctx_init_sm_0[];
117
118extern const struct nvc0_graph_init nvc1_grctx_init_9097_0[];
119
120extern const struct nvc0_graph_init nvc1_grctx_init_gpm_0[];
121
122extern const struct nvc0_graph_init nvc1_grctx_init_pe_0[];
123extern const struct nvc0_graph_init nvc1_grctx_init_wwdx_0[];
124extern const struct nvc0_graph_init nvc1_grctx_init_tpccs_0[];
125
126extern const struct nvc0_graph_init nvc8_grctx_init_9197_0[];
127extern const struct nvc0_graph_init nvc8_grctx_init_9297_0[];
128
129extern const struct nvc0_graph_pack nvd9_grctx_pack_icmd[];
130
131extern const struct nvc0_graph_pack nvd9_grctx_pack_mthd[];
132
133extern const struct nvc0_graph_init nvd9_grctx_init_fe_0[];
134extern const struct nvc0_graph_init nvd9_grctx_init_be_0[];
135
136extern const struct nvc0_graph_init nvd9_grctx_init_prop_0[];
137extern const struct nvc0_graph_init nvd9_grctx_init_gpc_unk_1[];
138extern const struct nvc0_graph_init nvd9_grctx_init_crstr_0[];
139
140extern const struct nvc0_graph_init nvd9_grctx_init_sm_0[];
141
142extern const struct nvc0_graph_init nvd7_grctx_init_pe_0[];
143
144extern const struct nvc0_graph_init nvd7_grctx_init_wwdx_0[];
145
146extern const struct nvc0_graph_init nve4_grctx_init_memfmt_0[];
147extern const struct nvc0_graph_init nve4_grctx_init_ds_0[];
148extern const struct nvc0_graph_init nve4_grctx_init_scc_0[];
149
150extern const struct nvc0_graph_init nve4_grctx_init_gpm_0[];
151
152extern const struct nvc0_graph_init nve4_grctx_init_pes_0[];
153
154extern const struct nvc0_graph_pack nvf0_grctx_pack_mthd[];
155
156extern const struct nvc0_graph_init nvf0_grctx_init_pri_0[];
157extern const struct nvc0_graph_init nvf0_grctx_init_cwd_0[];
158
159extern const struct nvc0_graph_init nvf0_grctx_init_gpc_unk_2[];
160
161extern const struct nvc0_graph_init nvf0_grctx_init_mpc_0[];
162extern const struct nvc0_graph_init nvf0_grctx_init_l1c_0[];
163
164extern const struct nvc0_graph_init nv108_grctx_init_rstr2d_0[];
165
166extern const struct nvc0_graph_init nv108_grctx_init_prop_0[];
167extern const struct nvc0_graph_init nv108_grctx_init_crstr_0[];
168
169
170#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc1.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc1.c
index 71b4283f7fad..24a92c569c0a 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc1.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc1.c
@@ -22,10 +22,14 @@
22 * Authors: Ben Skeggs <bskeggs@redhat.com> 22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */ 23 */
24 24
25#include "nvc0.h" 25#include "ctxnvc0.h"
26 26
27static struct nvc0_graph_init 27/*******************************************************************************
28nvc1_grctx_init_icmd[] = { 28 * PGRAPH context register lists
29 ******************************************************************************/
30
31static const struct nvc0_graph_init
32nvc1_grctx_init_icmd_0[] = {
29 { 0x001000, 1, 0x01, 0x00000004 }, 33 { 0x001000, 1, 0x01, 0x00000004 },
30 { 0x0000a9, 1, 0x01, 0x0000ffff }, 34 { 0x0000a9, 1, 0x01, 0x0000ffff },
31 { 0x000038, 1, 0x01, 0x0fac6881 }, 35 { 0x000038, 1, 0x01, 0x0fac6881 },
@@ -141,8 +145,7 @@ nvc1_grctx_init_icmd[] = {
141 { 0x000586, 1, 0x01, 0x00000040 }, 145 { 0x000586, 1, 0x01, 0x00000040 },
142 { 0x000582, 2, 0x01, 0x00000080 }, 146 { 0x000582, 2, 0x01, 0x00000080 },
143 { 0x0005c2, 1, 0x01, 0x00000001 }, 147 { 0x0005c2, 1, 0x01, 0x00000001 },
144 { 0x000638, 1, 0x01, 0x00000001 }, 148 { 0x000638, 2, 0x01, 0x00000001 },
145 { 0x000639, 1, 0x01, 0x00000001 },
146 { 0x00063a, 1, 0x01, 0x00000002 }, 149 { 0x00063a, 1, 0x01, 0x00000002 },
147 { 0x00063b, 2, 0x01, 0x00000001 }, 150 { 0x00063b, 2, 0x01, 0x00000001 },
148 { 0x00063d, 1, 0x01, 0x00000002 }, 151 { 0x00063d, 1, 0x01, 0x00000002 },
@@ -202,15 +205,13 @@ nvc1_grctx_init_icmd[] = {
202 { 0x000787, 1, 0x01, 0x000000cf }, 205 { 0x000787, 1, 0x01, 0x000000cf },
203 { 0x00078c, 1, 0x01, 0x00000008 }, 206 { 0x00078c, 1, 0x01, 0x00000008 },
204 { 0x000792, 1, 0x01, 0x00000001 }, 207 { 0x000792, 1, 0x01, 0x00000001 },
205 { 0x000794, 1, 0x01, 0x00000001 }, 208 { 0x000794, 3, 0x01, 0x00000001 },
206 { 0x000795, 2, 0x01, 0x00000001 },
207 { 0x000797, 1, 0x01, 0x000000cf }, 209 { 0x000797, 1, 0x01, 0x000000cf },
208 { 0x000836, 1, 0x01, 0x00000001 }, 210 { 0x000836, 1, 0x01, 0x00000001 },
209 { 0x00079a, 1, 0x01, 0x00000002 }, 211 { 0x00079a, 1, 0x01, 0x00000002 },
210 { 0x000833, 1, 0x01, 0x04444480 }, 212 { 0x000833, 1, 0x01, 0x04444480 },
211 { 0x0007a1, 1, 0x01, 0x00000001 }, 213 { 0x0007a1, 1, 0x01, 0x00000001 },
212 { 0x0007a3, 1, 0x01, 0x00000001 }, 214 { 0x0007a3, 3, 0x01, 0x00000001 },
213 { 0x0007a4, 2, 0x01, 0x00000001 },
214 { 0x000831, 1, 0x01, 0x00000004 }, 215 { 0x000831, 1, 0x01, 0x00000004 },
215 { 0x00080c, 1, 0x01, 0x00000002 }, 216 { 0x00080c, 1, 0x01, 0x00000002 },
216 { 0x00080d, 2, 0x01, 0x00000100 }, 217 { 0x00080d, 2, 0x01, 0x00000100 },
@@ -236,14 +237,12 @@ nvc1_grctx_init_icmd[] = {
236 { 0x0006b1, 1, 0x01, 0x00000011 }, 237 { 0x0006b1, 1, 0x01, 0x00000011 },
237 { 0x00078c, 1, 0x01, 0x00000008 }, 238 { 0x00078c, 1, 0x01, 0x00000008 },
238 { 0x000792, 1, 0x01, 0x00000001 }, 239 { 0x000792, 1, 0x01, 0x00000001 },
239 { 0x000794, 1, 0x01, 0x00000001 }, 240 { 0x000794, 3, 0x01, 0x00000001 },
240 { 0x000795, 2, 0x01, 0x00000001 },
241 { 0x000797, 1, 0x01, 0x000000cf }, 241 { 0x000797, 1, 0x01, 0x000000cf },
242 { 0x00079a, 1, 0x01, 0x00000002 }, 242 { 0x00079a, 1, 0x01, 0x00000002 },
243 { 0x000833, 1, 0x01, 0x04444480 }, 243 { 0x000833, 1, 0x01, 0x04444480 },
244 { 0x0007a1, 1, 0x01, 0x00000001 }, 244 { 0x0007a1, 1, 0x01, 0x00000001 },
245 { 0x0007a3, 1, 0x01, 0x00000001 }, 245 { 0x0007a3, 3, 0x01, 0x00000001 },
246 { 0x0007a4, 2, 0x01, 0x00000001 },
247 { 0x000831, 1, 0x01, 0x00000004 }, 246 { 0x000831, 1, 0x01, 0x00000004 },
248 { 0x01e100, 1, 0x01, 0x00000001 }, 247 { 0x01e100, 1, 0x01, 0x00000001 },
249 { 0x001000, 1, 0x01, 0x00000014 }, 248 { 0x001000, 1, 0x01, 0x00000014 },
@@ -268,8 +267,14 @@ nvc1_grctx_init_icmd[] = {
268 {} 267 {}
269}; 268};
270 269
271struct nvc0_graph_init 270static const struct nvc0_graph_pack
272nvc1_grctx_init_9097[] = { 271nvc1_grctx_pack_icmd[] = {
272 { nvc1_grctx_init_icmd_0 },
273 {}
274};
275
276const struct nvc0_graph_init
277nvc1_grctx_init_9097_0[] = {
273 { 0x000800, 8, 0x40, 0x00000000 }, 278 { 0x000800, 8, 0x40, 0x00000000 },
274 { 0x000804, 8, 0x40, 0x00000000 }, 279 { 0x000804, 8, 0x40, 0x00000000 },
275 { 0x000808, 8, 0x40, 0x00000400 }, 280 { 0x000808, 8, 0x40, 0x00000400 },
@@ -516,8 +521,7 @@ nvc1_grctx_init_9097[] = {
516 { 0x001350, 1, 0x04, 0x00000002 }, 521 { 0x001350, 1, 0x04, 0x00000002 },
517 { 0x001358, 1, 0x04, 0x00000001 }, 522 { 0x001358, 1, 0x04, 0x00000001 },
518 { 0x0012e4, 1, 0x04, 0x00000000 }, 523 { 0x0012e4, 1, 0x04, 0x00000000 },
519 { 0x00131c, 1, 0x04, 0x00000000 }, 524 { 0x00131c, 4, 0x04, 0x00000000 },
520 { 0x001320, 3, 0x04, 0x00000000 },
521 { 0x0019c0, 1, 0x04, 0x00000000 }, 525 { 0x0019c0, 1, 0x04, 0x00000000 },
522 { 0x001140, 1, 0x04, 0x00000000 }, 526 { 0x001140, 1, 0x04, 0x00000000 },
523 { 0x0019c4, 1, 0x04, 0x00000000 }, 527 { 0x0019c4, 1, 0x04, 0x00000000 },
@@ -571,15 +575,25 @@ nvc1_grctx_init_9097[] = {
571 {} 575 {}
572}; 576};
573 577
574static struct nvc0_graph_init 578static const struct nvc0_graph_init
575nvc1_grctx_init_9197[] = { 579nvc1_grctx_init_9197_0[] = {
576 { 0x003400, 128, 0x04, 0x00000000 }, 580 { 0x003400, 128, 0x04, 0x00000000 },
577 { 0x0002e4, 1, 0x04, 0x0000b001 }, 581 { 0x0002e4, 1, 0x04, 0x0000b001 },
578 {} 582 {}
579}; 583};
580 584
581static struct nvc0_graph_init 585static const struct nvc0_graph_pack
582nvc1_grctx_init_unk58xx[] = { 586nvc1_grctx_pack_mthd[] = {
587 { nvc1_grctx_init_9097_0, 0x9097 },
588 { nvc1_grctx_init_9197_0, 0x9197 },
589 { nvc0_grctx_init_902d_0, 0x902d },
590 { nvc0_grctx_init_9039_0, 0x9039 },
591 { nvc0_grctx_init_90c0_0, 0x90c0 },
592 {}
593};
594
595static const struct nvc0_graph_init
596nvc1_grctx_init_ds_0[] = {
583 { 0x405800, 1, 0x04, 0x0f8000bf }, 597 { 0x405800, 1, 0x04, 0x0f8000bf },
584 { 0x405830, 1, 0x04, 0x02180218 }, 598 { 0x405830, 1, 0x04, 0x02180218 },
585 { 0x405834, 2, 0x04, 0x00000000 }, 599 { 0x405834, 2, 0x04, 0x00000000 },
@@ -590,8 +604,20 @@ nvc1_grctx_init_unk58xx[] = {
590 {} 604 {}
591}; 605};
592 606
593static struct nvc0_graph_init 607static const struct nvc0_graph_init
594nvc1_grctx_init_rop[] = { 608nvc1_grctx_init_pd_0[] = {
609 { 0x406020, 1, 0x04, 0x000103c1 },
610 { 0x406028, 4, 0x04, 0x00000001 },
611 { 0x4064a8, 1, 0x04, 0x00000000 },
612 { 0x4064ac, 1, 0x04, 0x00003fff },
613 { 0x4064b4, 2, 0x04, 0x00000000 },
614 { 0x4064c0, 1, 0x04, 0x80140078 },
615 { 0x4064c4, 1, 0x04, 0x0086ffff },
616 {}
617};
618
619static const struct nvc0_graph_init
620nvc1_grctx_init_be_0[] = {
595 { 0x408800, 1, 0x04, 0x02802a3c }, 621 { 0x408800, 1, 0x04, 0x02802a3c },
596 { 0x408804, 1, 0x04, 0x00000040 }, 622 { 0x408804, 1, 0x04, 0x00000040 },
597 { 0x408808, 1, 0x04, 0x1003e005 }, 623 { 0x408808, 1, 0x04, 0x1003e005 },
@@ -602,25 +628,22 @@ nvc1_grctx_init_rop[] = {
602 {} 628 {}
603}; 629};
604 630
605static struct nvc0_graph_init 631static const struct nvc0_graph_pack
606nvc1_grctx_init_gpc_0[] = { 632nvc1_grctx_pack_hub[] = {
607 { 0x418380, 1, 0x04, 0x00000016 }, 633 { nvc0_grctx_init_main_0 },
608 { 0x418400, 1, 0x04, 0x38004e00 }, 634 { nvc0_grctx_init_fe_0 },
609 { 0x418404, 1, 0x04, 0x71e0ffff }, 635 { nvc0_grctx_init_pri_0 },
610 { 0x418408, 1, 0x04, 0x00000000 }, 636 { nvc0_grctx_init_memfmt_0 },
611 { 0x41840c, 1, 0x04, 0x00001008 }, 637 { nvc1_grctx_init_ds_0 },
612 { 0x418410, 1, 0x04, 0x0fff0fff }, 638 { nvc1_grctx_init_pd_0 },
613 { 0x418414, 1, 0x04, 0x00200fff }, 639 { nvc0_grctx_init_rstr2d_0 },
614 { 0x418450, 6, 0x04, 0x00000000 }, 640 { nvc0_grctx_init_scc_0 },
615 { 0x418468, 1, 0x04, 0x00000001 }, 641 { nvc1_grctx_init_be_0 },
616 { 0x41846c, 2, 0x04, 0x00000000 }, 642 {}
617 { 0x418600, 1, 0x04, 0x0000001f }, 643};
618 { 0x418684, 1, 0x04, 0x0000000f }, 644
619 { 0x418700, 1, 0x04, 0x00000002 }, 645static const struct nvc0_graph_init
620 { 0x418704, 1, 0x04, 0x00000080 }, 646nvc1_grctx_init_setup_0[] = {
621 { 0x418708, 1, 0x04, 0x00000000 },
622 { 0x41870c, 1, 0x04, 0x07c80000 },
623 { 0x418710, 1, 0x04, 0x00000000 },
624 { 0x418800, 1, 0x04, 0x0006860a }, 647 { 0x418800, 1, 0x04, 0x0006860a },
625 { 0x418808, 3, 0x04, 0x00000000 }, 648 { 0x418808, 3, 0x04, 0x00000000 },
626 { 0x418828, 1, 0x04, 0x00008442 }, 649 { 0x418828, 1, 0x04, 0x00008442 },
@@ -629,69 +652,44 @@ nvc1_grctx_init_gpc_0[] = {
629 { 0x4188e0, 1, 0x04, 0x01000000 }, 652 { 0x4188e0, 1, 0x04, 0x01000000 },
630 { 0x4188e8, 5, 0x04, 0x00000000 }, 653 { 0x4188e8, 5, 0x04, 0x00000000 },
631 { 0x4188fc, 1, 0x04, 0x00100018 }, 654 { 0x4188fc, 1, 0x04, 0x00100018 },
632 { 0x41891c, 1, 0x04, 0x00ff00ff }, 655 {}
633 { 0x418924, 1, 0x04, 0x00000000 }, 656};
634 { 0x418928, 1, 0x04, 0x00ffff00 }, 657
635 { 0x41892c, 1, 0x04, 0x0000ff00 }, 658const struct nvc0_graph_init
636 { 0x418a00, 3, 0x04, 0x00000000 }, 659nvc1_grctx_init_gpm_0[] = {
637 { 0x418a0c, 1, 0x04, 0x00010000 },
638 { 0x418a10, 3, 0x04, 0x00000000 },
639 { 0x418a20, 3, 0x04, 0x00000000 },
640 { 0x418a2c, 1, 0x04, 0x00010000 },
641 { 0x418a30, 3, 0x04, 0x00000000 },
642 { 0x418a40, 3, 0x04, 0x00000000 },
643 { 0x418a4c, 1, 0x04, 0x00010000 },
644 { 0x418a50, 3, 0x04, 0x00000000 },
645 { 0x418a60, 3, 0x04, 0x00000000 },
646 { 0x418a6c, 1, 0x04, 0x00010000 },
647 { 0x418a70, 3, 0x04, 0x00000000 },
648 { 0x418a80, 3, 0x04, 0x00000000 },
649 { 0x418a8c, 1, 0x04, 0x00010000 },
650 { 0x418a90, 3, 0x04, 0x00000000 },
651 { 0x418aa0, 3, 0x04, 0x00000000 },
652 { 0x418aac, 1, 0x04, 0x00010000 },
653 { 0x418ab0, 3, 0x04, 0x00000000 },
654 { 0x418ac0, 3, 0x04, 0x00000000 },
655 { 0x418acc, 1, 0x04, 0x00010000 },
656 { 0x418ad0, 3, 0x04, 0x00000000 },
657 { 0x418ae0, 3, 0x04, 0x00000000 },
658 { 0x418aec, 1, 0x04, 0x00010000 },
659 { 0x418af0, 3, 0x04, 0x00000000 },
660 { 0x418b00, 1, 0x04, 0x00000000 },
661 { 0x418b08, 1, 0x04, 0x0a418820 },
662 { 0x418b0c, 1, 0x04, 0x062080e6 },
663 { 0x418b10, 1, 0x04, 0x020398a4 },
664 { 0x418b14, 1, 0x04, 0x0e629062 },
665 { 0x418b18, 1, 0x04, 0x0a418820 },
666 { 0x418b1c, 1, 0x04, 0x000000e6 },
667 { 0x418bb8, 1, 0x04, 0x00000103 },
668 { 0x418c08, 1, 0x04, 0x00000001 }, 660 { 0x418c08, 1, 0x04, 0x00000001 },
669 { 0x418c10, 8, 0x04, 0x00000000 }, 661 { 0x418c10, 8, 0x04, 0x00000000 },
670 { 0x418c6c, 1, 0x04, 0x00000001 }, 662 { 0x418c6c, 1, 0x04, 0x00000001 },
671 { 0x418c80, 1, 0x04, 0x20200004 }, 663 { 0x418c80, 1, 0x04, 0x20200004 },
672 { 0x418c8c, 1, 0x04, 0x00000001 }, 664 { 0x418c8c, 1, 0x04, 0x00000001 },
673 { 0x419000, 1, 0x04, 0x00000780 },
674 { 0x419004, 2, 0x04, 0x00000000 },
675 { 0x419014, 1, 0x04, 0x00000004 },
676 {} 665 {}
677}; 666};
678 667
679static struct nvc0_graph_init 668static const struct nvc0_graph_pack
680nvc1_grctx_init_tpc[] = { 669nvc1_grctx_pack_gpc[] = {
670 { nvc0_grctx_init_gpc_unk_0 },
671 { nvc0_grctx_init_prop_0 },
672 { nvc0_grctx_init_gpc_unk_1 },
673 { nvc1_grctx_init_setup_0 },
674 { nvc0_grctx_init_zcull_0 },
675 { nvc0_grctx_init_crstr_0 },
676 { nvc1_grctx_init_gpm_0 },
677 { nvc0_grctx_init_gcc_0 },
678 {}
679};
680
681const struct nvc0_graph_init
682nvc1_grctx_init_pe_0[] = {
681 { 0x419818, 1, 0x04, 0x00000000 }, 683 { 0x419818, 1, 0x04, 0x00000000 },
682 { 0x41983c, 1, 0x04, 0x00038bc7 }, 684 { 0x41983c, 1, 0x04, 0x00038bc7 },
683 { 0x419848, 1, 0x04, 0x00000000 }, 685 { 0x419848, 1, 0x04, 0x00000000 },
684 { 0x419864, 1, 0x04, 0x00000129 }, 686 { 0x419864, 1, 0x04, 0x00000129 },
685 { 0x419888, 1, 0x04, 0x00000000 }, 687 { 0x419888, 1, 0x04, 0x00000000 },
686 { 0x419a00, 1, 0x04, 0x000001f0 }, 688 {}
687 { 0x419a04, 1, 0x04, 0x00000001 }, 689};
688 { 0x419a08, 1, 0x04, 0x00000023 }, 690
689 { 0x419a0c, 1, 0x04, 0x00020000 }, 691const struct nvc0_graph_init
690 { 0x419a10, 1, 0x04, 0x00000000 }, 692nvc1_grctx_init_wwdx_0[] = {
691 { 0x419a14, 1, 0x04, 0x00000200 },
692 { 0x419a1c, 1, 0x04, 0x00000000 },
693 { 0x419a20, 1, 0x04, 0x00000800 },
694 { 0x419ac4, 1, 0x04, 0x0007f440 },
695 { 0x419b00, 1, 0x04, 0x0a418820 }, 693 { 0x419b00, 1, 0x04, 0x0a418820 },
696 { 0x419b04, 1, 0x04, 0x062080e6 }, 694 { 0x419b04, 1, 0x04, 0x062080e6 },
697 { 0x419b08, 1, 0x04, 0x020398a4 }, 695 { 0x419b08, 1, 0x04, 0x020398a4 },
@@ -701,28 +699,33 @@ nvc1_grctx_init_tpc[] = {
701 { 0x419bd0, 1, 0x04, 0x00900103 }, 699 { 0x419bd0, 1, 0x04, 0x00900103 },
702 { 0x419be0, 1, 0x04, 0x00400001 }, 700 { 0x419be0, 1, 0x04, 0x00400001 },
703 { 0x419be4, 1, 0x04, 0x00000000 }, 701 { 0x419be4, 1, 0x04, 0x00000000 },
704 { 0x419c00, 1, 0x04, 0x00000002 }, 702 {}
705 { 0x419c04, 1, 0x04, 0x00000006 }, 703};
706 { 0x419c08, 1, 0x04, 0x00000002 }, 704
707 { 0x419c20, 1, 0x04, 0x00000000 }, 705const struct nvc0_graph_init
708 { 0x419cb0, 1, 0x04, 0x00020048 }, 706nvc1_grctx_init_tpccs_0[] = {
709 { 0x419ce8, 1, 0x04, 0x00000000 },
710 { 0x419cf4, 1, 0x04, 0x00000183 },
711 { 0x419d20, 1, 0x04, 0x12180000 }, 707 { 0x419d20, 1, 0x04, 0x12180000 },
712 { 0x419d24, 1, 0x04, 0x00001fff }, 708 { 0x419d24, 1, 0x04, 0x00001fff },
713 { 0x419d44, 1, 0x04, 0x02180218 }, 709 { 0x419d44, 1, 0x04, 0x02180218 },
714 { 0x419e04, 3, 0x04, 0x00000000 },
715 { 0x419e10, 1, 0x04, 0x00000002 },
716 { 0x419e44, 1, 0x04, 0x001beff2 },
717 { 0x419e48, 1, 0x04, 0x00000000 },
718 { 0x419e4c, 1, 0x04, 0x0000000f },
719 { 0x419e50, 17, 0x04, 0x00000000 },
720 { 0x419e98, 1, 0x04, 0x00000000 },
721 { 0x419ee0, 1, 0x04, 0x00011110 },
722 { 0x419f30, 11, 0x04, 0x00000000 },
723 {} 710 {}
724}; 711};
725 712
713static const struct nvc0_graph_pack
714nvc1_grctx_pack_tpc[] = {
715 { nvc1_grctx_init_pe_0 },
716 { nvc4_grctx_init_tex_0 },
717 { nvc1_grctx_init_wwdx_0 },
718 { nvc0_grctx_init_mpc_0 },
719 { nvc4_grctx_init_l1c_0 },
720 { nvc1_grctx_init_tpccs_0 },
721 { nvc4_grctx_init_sm_0 },
722 {}
723};
724
725/*******************************************************************************
726 * PGRAPH context implementation
727 ******************************************************************************/
728
726void 729void
727nvc1_grctx_generate_mods(struct nvc0_graph_priv *priv, struct nvc0_grctx *info) 730nvc1_grctx_generate_mods(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
728{ 731{
@@ -771,41 +774,6 @@ nvc1_grctx_generate_unkn(struct nvc0_graph_priv *priv)
771 nv_mask(priv, 0x419c00, 0x00000008, 0x00000008); 774 nv_mask(priv, 0x419c00, 0x00000008, 0x00000008);
772} 775}
773 776
774static struct nvc0_graph_init *
775nvc1_grctx_init_hub[] = {
776 nvc0_grctx_init_base,
777 nvc0_grctx_init_unk40xx,
778 nvc0_grctx_init_unk44xx,
779 nvc0_grctx_init_unk46xx,
780 nvc0_grctx_init_unk47xx,
781 nvc1_grctx_init_unk58xx,
782 nvc0_grctx_init_unk60xx,
783 nvc0_grctx_init_unk64xx,
784 nvc0_grctx_init_unk78xx,
785 nvc0_grctx_init_unk80xx,
786 nvc1_grctx_init_rop,
787 NULL
788};
789
790struct nvc0_graph_init *
791nvc1_grctx_init_gpc[] = {
792 nvc1_grctx_init_gpc_0,
793 nvc0_grctx_init_gpc_1,
794 nvc1_grctx_init_tpc,
795 NULL
796};
797
798static struct nvc0_graph_mthd
799nvc1_grctx_init_mthd[] = {
800 { 0x9097, nvc1_grctx_init_9097, },
801 { 0x9197, nvc1_grctx_init_9197, },
802 { 0x902d, nvc0_grctx_init_902d, },
803 { 0x9039, nvc0_grctx_init_9039, },
804 { 0x90c0, nvc0_grctx_init_90c0, },
805 { 0x902d, nvc0_grctx_init_mthd_magic, },
806 {}
807};
808
809struct nouveau_oclass * 777struct nouveau_oclass *
810nvc1_grctx_oclass = &(struct nvc0_grctx_oclass) { 778nvc1_grctx_oclass = &(struct nvc0_grctx_oclass) {
811 .base.handle = NV_ENGCTX(GR, 0xc1), 779 .base.handle = NV_ENGCTX(GR, 0xc1),
@@ -817,11 +785,13 @@ nvc1_grctx_oclass = &(struct nvc0_grctx_oclass) {
817 .rd32 = _nouveau_graph_context_rd32, 785 .rd32 = _nouveau_graph_context_rd32,
818 .wr32 = _nouveau_graph_context_wr32, 786 .wr32 = _nouveau_graph_context_wr32,
819 }, 787 },
820 .main = nvc0_grctx_generate_main, 788 .main = nvc0_grctx_generate_main,
821 .mods = nvc1_grctx_generate_mods, 789 .mods = nvc1_grctx_generate_mods,
822 .unkn = nvc1_grctx_generate_unkn, 790 .unkn = nvc1_grctx_generate_unkn,
823 .hub = nvc1_grctx_init_hub, 791 .hub = nvc1_grctx_pack_hub,
824 .gpc = nvc1_grctx_init_gpc, 792 .gpc = nvc1_grctx_pack_gpc,
825 .icmd = nvc1_grctx_init_icmd, 793 .zcull = nvc0_grctx_pack_zcull,
826 .mthd = nvc1_grctx_init_mthd, 794 .tpc = nvc1_grctx_pack_tpc,
795 .icmd = nvc1_grctx_pack_icmd,
796 .mthd = nvc1_grctx_pack_mthd,
827}.base; 797}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc3.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc4.c
index 8f237b3bd8c6..e11ed5538193 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc3.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc4.c
@@ -22,15 +22,14 @@
22 * Authors: Ben Skeggs <bskeggs@redhat.com> 22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */ 23 */
24 24
25#include "nvc0.h" 25#include "ctxnvc0.h"
26 26
27static struct nvc0_graph_init 27/*******************************************************************************
28nvc3_grctx_init_tpc[] = { 28 * PGRAPH context register lists
29 { 0x419818, 1, 0x04, 0x00000000 }, 29 ******************************************************************************/
30 { 0x41983c, 1, 0x04, 0x00038bc7 }, 30
31 { 0x419848, 1, 0x04, 0x00000000 }, 31const struct nvc0_graph_init
32 { 0x419864, 1, 0x04, 0x0000012a }, 32nvc4_grctx_init_tex_0[] = {
33 { 0x419888, 1, 0x04, 0x00000000 },
34 { 0x419a00, 1, 0x04, 0x000001f0 }, 33 { 0x419a00, 1, 0x04, 0x000001f0 },
35 { 0x419a04, 1, 0x04, 0x00000001 }, 34 { 0x419a04, 1, 0x04, 0x00000001 },
36 { 0x419a08, 1, 0x04, 0x00000023 }, 35 { 0x419a08, 1, 0x04, 0x00000023 },
@@ -40,24 +39,19 @@ nvc3_grctx_init_tpc[] = {
40 { 0x419a1c, 1, 0x04, 0x00000000 }, 39 { 0x419a1c, 1, 0x04, 0x00000000 },
41 { 0x419a20, 1, 0x04, 0x00000800 }, 40 { 0x419a20, 1, 0x04, 0x00000800 },
42 { 0x419ac4, 1, 0x04, 0x0007f440 }, 41 { 0x419ac4, 1, 0x04, 0x0007f440 },
43 { 0x419b00, 1, 0x04, 0x0a418820 }, 42 {}
44 { 0x419b04, 1, 0x04, 0x062080e6 }, 43};
45 { 0x419b08, 1, 0x04, 0x020398a4 }, 44
46 { 0x419b0c, 1, 0x04, 0x0e629062 }, 45const struct nvc0_graph_init
47 { 0x419b10, 1, 0x04, 0x0a418820 }, 46nvc4_grctx_init_l1c_0[] = {
48 { 0x419b14, 1, 0x04, 0x000000e6 },
49 { 0x419bd0, 1, 0x04, 0x00900103 },
50 { 0x419be0, 1, 0x04, 0x00000001 },
51 { 0x419be4, 1, 0x04, 0x00000000 },
52 { 0x419c00, 1, 0x04, 0x00000002 },
53 { 0x419c04, 1, 0x04, 0x00000006 },
54 { 0x419c08, 1, 0x04, 0x00000002 },
55 { 0x419c20, 1, 0x04, 0x00000000 },
56 { 0x419cb0, 1, 0x04, 0x00020048 }, 47 { 0x419cb0, 1, 0x04, 0x00020048 },
57 { 0x419ce8, 1, 0x04, 0x00000000 }, 48 { 0x419ce8, 1, 0x04, 0x00000000 },
58 { 0x419cf4, 1, 0x04, 0x00000183 }, 49 { 0x419cf4, 1, 0x04, 0x00000183 },
59 { 0x419d20, 1, 0x04, 0x02180000 }, 50 {}
60 { 0x419d24, 1, 0x04, 0x00001fff }, 51};
52
53const struct nvc0_graph_init
54nvc4_grctx_init_sm_0[] = {
61 { 0x419e04, 3, 0x04, 0x00000000 }, 55 { 0x419e04, 3, 0x04, 0x00000000 },
62 { 0x419e10, 1, 0x04, 0x00000002 }, 56 { 0x419e10, 1, 0x04, 0x00000002 },
63 { 0x419e44, 1, 0x04, 0x001beff2 }, 57 { 0x419e44, 1, 0x04, 0x001beff2 },
@@ -70,16 +64,24 @@ nvc3_grctx_init_tpc[] = {
70 {} 64 {}
71}; 65};
72 66
73struct nvc0_graph_init * 67static const struct nvc0_graph_pack
74nvc3_grctx_init_gpc[] = { 68nvc4_grctx_pack_tpc[] = {
75 nvc0_grctx_init_gpc_0, 69 { nvc0_grctx_init_pe_0 },
76 nvc0_grctx_init_gpc_1, 70 { nvc4_grctx_init_tex_0 },
77 nvc3_grctx_init_tpc, 71 { nvc0_grctx_init_wwdx_0 },
78 NULL 72 { nvc0_grctx_init_mpc_0 },
73 { nvc4_grctx_init_l1c_0 },
74 { nvc0_grctx_init_tpccs_0 },
75 { nvc4_grctx_init_sm_0 },
76 {}
79}; 77};
80 78
79/*******************************************************************************
80 * PGRAPH context implementation
81 ******************************************************************************/
82
81struct nouveau_oclass * 83struct nouveau_oclass *
82nvc3_grctx_oclass = &(struct nvc0_grctx_oclass) { 84nvc4_grctx_oclass = &(struct nvc0_grctx_oclass) {
83 .base.handle = NV_ENGCTX(GR, 0xc3), 85 .base.handle = NV_ENGCTX(GR, 0xc3),
84 .base.ofuncs = &(struct nouveau_ofuncs) { 86 .base.ofuncs = &(struct nouveau_ofuncs) {
85 .ctor = nvc0_graph_context_ctor, 87 .ctor = nvc0_graph_context_ctor,
@@ -89,11 +91,13 @@ nvc3_grctx_oclass = &(struct nvc0_grctx_oclass) {
89 .rd32 = _nouveau_graph_context_rd32, 91 .rd32 = _nouveau_graph_context_rd32,
90 .wr32 = _nouveau_graph_context_wr32, 92 .wr32 = _nouveau_graph_context_wr32,
91 }, 93 },
92 .main = nvc0_grctx_generate_main, 94 .main = nvc0_grctx_generate_main,
93 .mods = nvc0_grctx_generate_mods, 95 .mods = nvc0_grctx_generate_mods,
94 .unkn = nvc0_grctx_generate_unkn, 96 .unkn = nvc0_grctx_generate_unkn,
95 .hub = nvc0_grctx_init_hub, 97 .hub = nvc0_grctx_pack_hub,
96 .gpc = nvc3_grctx_init_gpc, 98 .gpc = nvc0_grctx_pack_gpc,
97 .icmd = nvc0_grctx_init_icmd, 99 .zcull = nvc0_grctx_pack_zcull,
98 .mthd = nvc0_grctx_init_mthd, 100 .tpc = nvc4_grctx_pack_tpc,
101 .icmd = nvc0_grctx_pack_icmd,
102 .mthd = nvc0_grctx_pack_mthd,
99}.base; 103}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc8.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc8.c
index d0d4ce3c4892..feebd58dfe8d 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc8.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc8.c
@@ -22,10 +22,14 @@
22 * Authors: Ben Skeggs <bskeggs@redhat.com> 22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */ 23 */
24 24
25#include "nvc0.h" 25#include "ctxnvc0.h"
26 26
27static struct nvc0_graph_init 27/*******************************************************************************
28nvc8_grctx_init_icmd[] = { 28 * PGRAPH context register lists
29 ******************************************************************************/
30
31static const struct nvc0_graph_init
32nvc8_grctx_init_icmd_0[] = {
29 { 0x001000, 1, 0x01, 0x00000004 }, 33 { 0x001000, 1, 0x01, 0x00000004 },
30 { 0x0000a9, 1, 0x01, 0x0000ffff }, 34 { 0x0000a9, 1, 0x01, 0x0000ffff },
31 { 0x000038, 1, 0x01, 0x0fac6881 }, 35 { 0x000038, 1, 0x01, 0x0fac6881 },
@@ -141,8 +145,7 @@ nvc8_grctx_init_icmd[] = {
141 { 0x000586, 1, 0x01, 0x00000040 }, 145 { 0x000586, 1, 0x01, 0x00000040 },
142 { 0x000582, 2, 0x01, 0x00000080 }, 146 { 0x000582, 2, 0x01, 0x00000080 },
143 { 0x0005c2, 1, 0x01, 0x00000001 }, 147 { 0x0005c2, 1, 0x01, 0x00000001 },
144 { 0x000638, 1, 0x01, 0x00000001 }, 148 { 0x000638, 2, 0x01, 0x00000001 },
145 { 0x000639, 1, 0x01, 0x00000001 },
146 { 0x00063a, 1, 0x01, 0x00000002 }, 149 { 0x00063a, 1, 0x01, 0x00000002 },
147 { 0x00063b, 2, 0x01, 0x00000001 }, 150 { 0x00063b, 2, 0x01, 0x00000001 },
148 { 0x00063d, 1, 0x01, 0x00000002 }, 151 { 0x00063d, 1, 0x01, 0x00000002 },
@@ -203,15 +206,13 @@ nvc8_grctx_init_icmd[] = {
203 { 0x000787, 1, 0x01, 0x000000cf }, 206 { 0x000787, 1, 0x01, 0x000000cf },
204 { 0x00078c, 1, 0x01, 0x00000008 }, 207 { 0x00078c, 1, 0x01, 0x00000008 },
205 { 0x000792, 1, 0x01, 0x00000001 }, 208 { 0x000792, 1, 0x01, 0x00000001 },
206 { 0x000794, 1, 0x01, 0x00000001 }, 209 { 0x000794, 3, 0x01, 0x00000001 },
207 { 0x000795, 2, 0x01, 0x00000001 },
208 { 0x000797, 1, 0x01, 0x000000cf }, 210 { 0x000797, 1, 0x01, 0x000000cf },
209 { 0x000836, 1, 0x01, 0x00000001 }, 211 { 0x000836, 1, 0x01, 0x00000001 },
210 { 0x00079a, 1, 0x01, 0x00000002 }, 212 { 0x00079a, 1, 0x01, 0x00000002 },
211 { 0x000833, 1, 0x01, 0x04444480 }, 213 { 0x000833, 1, 0x01, 0x04444480 },
212 { 0x0007a1, 1, 0x01, 0x00000001 }, 214 { 0x0007a1, 1, 0x01, 0x00000001 },
213 { 0x0007a3, 1, 0x01, 0x00000001 }, 215 { 0x0007a3, 3, 0x01, 0x00000001 },
214 { 0x0007a4, 2, 0x01, 0x00000001 },
215 { 0x000831, 1, 0x01, 0x00000004 }, 216 { 0x000831, 1, 0x01, 0x00000004 },
216 { 0x00080c, 1, 0x01, 0x00000002 }, 217 { 0x00080c, 1, 0x01, 0x00000002 },
217 { 0x00080d, 2, 0x01, 0x00000100 }, 218 { 0x00080d, 2, 0x01, 0x00000100 },
@@ -237,14 +238,12 @@ nvc8_grctx_init_icmd[] = {
237 { 0x0006b1, 1, 0x01, 0x00000011 }, 238 { 0x0006b1, 1, 0x01, 0x00000011 },
238 { 0x00078c, 1, 0x01, 0x00000008 }, 239 { 0x00078c, 1, 0x01, 0x00000008 },
239 { 0x000792, 1, 0x01, 0x00000001 }, 240 { 0x000792, 1, 0x01, 0x00000001 },
240 { 0x000794, 1, 0x01, 0x00000001 }, 241 { 0x000794, 3, 0x01, 0x00000001 },
241 { 0x000795, 2, 0x01, 0x00000001 },
242 { 0x000797, 1, 0x01, 0x000000cf }, 242 { 0x000797, 1, 0x01, 0x000000cf },
243 { 0x00079a, 1, 0x01, 0x00000002 }, 243 { 0x00079a, 1, 0x01, 0x00000002 },
244 { 0x000833, 1, 0x01, 0x04444480 }, 244 { 0x000833, 1, 0x01, 0x04444480 },
245 { 0x0007a1, 1, 0x01, 0x00000001 }, 245 { 0x0007a1, 1, 0x01, 0x00000001 },
246 { 0x0007a3, 1, 0x01, 0x00000001 }, 246 { 0x0007a3, 3, 0x01, 0x00000001 },
247 { 0x0007a4, 2, 0x01, 0x00000001 },
248 { 0x000831, 1, 0x01, 0x00000004 }, 247 { 0x000831, 1, 0x01, 0x00000004 },
249 { 0x01e100, 1, 0x01, 0x00000001 }, 248 { 0x01e100, 1, 0x01, 0x00000001 },
250 { 0x001000, 1, 0x01, 0x00000014 }, 249 { 0x001000, 1, 0x01, 0x00000014 },
@@ -269,58 +268,20 @@ nvc8_grctx_init_icmd[] = {
269 {} 268 {}
270}; 269};
271 270
272static struct nvc0_graph_init 271static const struct nvc0_graph_pack
273nvc8_grctx_init_tpc[] = { 272nvc8_grctx_pack_icmd[] = {
274 { 0x419818, 1, 0x04, 0x00000000 }, 273 { nvc8_grctx_init_icmd_0 },
275 { 0x41983c, 1, 0x04, 0x00038bc7 },
276 { 0x419848, 1, 0x04, 0x00000000 },
277 { 0x419864, 1, 0x04, 0x0000012a },
278 { 0x419888, 1, 0x04, 0x00000000 },
279 { 0x419a00, 1, 0x04, 0x000001f0 },
280 { 0x419a04, 1, 0x04, 0x00000001 },
281 { 0x419a08, 1, 0x04, 0x00000023 },
282 { 0x419a0c, 1, 0x04, 0x00020000 },
283 { 0x419a10, 1, 0x04, 0x00000000 },
284 { 0x419a14, 1, 0x04, 0x00000200 },
285 { 0x419a1c, 1, 0x04, 0x00000000 },
286 { 0x419a20, 1, 0x04, 0x00000800 },
287 { 0x419b00, 1, 0x04, 0x0a418820 },
288 { 0x419b04, 1, 0x04, 0x062080e6 },
289 { 0x419b08, 1, 0x04, 0x020398a4 },
290 { 0x419b0c, 1, 0x04, 0x0e629062 },
291 { 0x419b10, 1, 0x04, 0x0a418820 },
292 { 0x419b14, 1, 0x04, 0x000000e6 },
293 { 0x419bd0, 1, 0x04, 0x00900103 },
294 { 0x419be0, 1, 0x04, 0x00000001 },
295 { 0x419be4, 1, 0x04, 0x00000000 },
296 { 0x419c00, 1, 0x04, 0x00000002 },
297 { 0x419c04, 1, 0x04, 0x00000006 },
298 { 0x419c08, 1, 0x04, 0x00000002 },
299 { 0x419c20, 1, 0x04, 0x00000000 },
300 { 0x419cb0, 1, 0x04, 0x00060048 },
301 { 0x419ce8, 1, 0x04, 0x00000000 },
302 { 0x419cf4, 1, 0x04, 0x00000183 },
303 { 0x419d20, 1, 0x04, 0x02180000 },
304 { 0x419d24, 1, 0x04, 0x00001fff },
305 { 0x419e04, 3, 0x04, 0x00000000 },
306 { 0x419e10, 1, 0x04, 0x00000002 },
307 { 0x419e44, 1, 0x04, 0x001beff2 },
308 { 0x419e48, 1, 0x04, 0x00000000 },
309 { 0x419e4c, 1, 0x04, 0x0000000f },
310 { 0x419e50, 17, 0x04, 0x00000000 },
311 { 0x419e98, 1, 0x04, 0x00000000 },
312 { 0x419f50, 2, 0x04, 0x00000000 },
313 {} 274 {}
314}; 275};
315 276
316struct nvc0_graph_init 277const struct nvc0_graph_init
317nvc8_grctx_init_9197[] = { 278nvc8_grctx_init_9197_0[] = {
318 { 0x0002e4, 1, 0x04, 0x0000b001 }, 279 { 0x0002e4, 1, 0x04, 0x0000b001 },
319 {} 280 {}
320}; 281};
321 282
322struct nvc0_graph_init 283const struct nvc0_graph_init
323nvc8_grctx_init_9297[] = { 284nvc8_grctx_init_9297_0[] = {
324 { 0x003400, 128, 0x04, 0x00000000 }, 285 { 0x003400, 128, 0x04, 0x00000000 },
325 { 0x00036c, 2, 0x04, 0x00000000 }, 286 { 0x00036c, 2, 0x04, 0x00000000 },
326 { 0x0007a4, 2, 0x04, 0x00000000 }, 287 { 0x0007a4, 2, 0x04, 0x00000000 },
@@ -329,26 +290,47 @@ nvc8_grctx_init_9297[] = {
329 {} 290 {}
330}; 291};
331 292
332static struct nvc0_graph_mthd 293static const struct nvc0_graph_pack
333nvc8_grctx_init_mthd[] = { 294nvc8_grctx_pack_mthd[] = {
334 { 0x9097, nvc1_grctx_init_9097, }, 295 { nvc1_grctx_init_9097_0, 0x9097 },
335 { 0x9197, nvc8_grctx_init_9197, }, 296 { nvc8_grctx_init_9197_0, 0x9197 },
336 { 0x9297, nvc8_grctx_init_9297, }, 297 { nvc8_grctx_init_9297_0, 0x9297 },
337 { 0x902d, nvc0_grctx_init_902d, }, 298 { nvc0_grctx_init_902d_0, 0x902d },
338 { 0x9039, nvc0_grctx_init_9039, }, 299 { nvc0_grctx_init_9039_0, 0x9039 },
339 { 0x90c0, nvc0_grctx_init_90c0, }, 300 { nvc0_grctx_init_90c0_0, 0x90c0 },
340 { 0x902d, nvc0_grctx_init_mthd_magic, }, 301 {}
302};
303
304static const struct nvc0_graph_init
305nvc8_grctx_init_setup_0[] = {
306 { 0x418800, 1, 0x04, 0x0006860a },
307 { 0x418808, 3, 0x04, 0x00000000 },
308 { 0x418828, 1, 0x04, 0x00008442 },
309 { 0x418830, 1, 0x04, 0x00000001 },
310 { 0x4188d8, 1, 0x04, 0x00000008 },
311 { 0x4188e0, 1, 0x04, 0x01000000 },
312 { 0x4188e8, 5, 0x04, 0x00000000 },
313 { 0x4188fc, 1, 0x04, 0x20100000 },
341 {} 314 {}
342}; 315};
343 316
344static struct nvc0_graph_init * 317static const struct nvc0_graph_pack
345nvc8_grctx_init_gpc[] = { 318nvc8_grctx_pack_gpc[] = {
346 nvc0_grctx_init_gpc_0, 319 { nvc0_grctx_init_gpc_unk_0 },
347 nvc0_grctx_init_gpc_1, 320 { nvc0_grctx_init_prop_0 },
348 nvc8_grctx_init_tpc, 321 { nvc0_grctx_init_gpc_unk_1 },
349 NULL 322 { nvc8_grctx_init_setup_0 },
323 { nvc0_grctx_init_zcull_0 },
324 { nvc0_grctx_init_crstr_0 },
325 { nvc0_grctx_init_gpm_0 },
326 { nvc0_grctx_init_gcc_0 },
327 {}
350}; 328};
351 329
330/*******************************************************************************
331 * PGRAPH context implementation
332 ******************************************************************************/
333
352struct nouveau_oclass * 334struct nouveau_oclass *
353nvc8_grctx_oclass = &(struct nvc0_grctx_oclass) { 335nvc8_grctx_oclass = &(struct nvc0_grctx_oclass) {
354 .base.handle = NV_ENGCTX(GR, 0xc8), 336 .base.handle = NV_ENGCTX(GR, 0xc8),
@@ -360,11 +342,13 @@ nvc8_grctx_oclass = &(struct nvc0_grctx_oclass) {
360 .rd32 = _nouveau_graph_context_rd32, 342 .rd32 = _nouveau_graph_context_rd32,
361 .wr32 = _nouveau_graph_context_wr32, 343 .wr32 = _nouveau_graph_context_wr32,
362 }, 344 },
363 .main = nvc0_grctx_generate_main, 345 .main = nvc0_grctx_generate_main,
364 .mods = nvc0_grctx_generate_mods, 346 .mods = nvc0_grctx_generate_mods,
365 .unkn = nvc0_grctx_generate_unkn, 347 .unkn = nvc0_grctx_generate_unkn,
366 .hub = nvc0_grctx_init_hub, 348 .hub = nvc0_grctx_pack_hub,
367 .gpc = nvc8_grctx_init_gpc, 349 .gpc = nvc8_grctx_pack_gpc,
368 .icmd = nvc8_grctx_init_icmd, 350 .zcull = nvc0_grctx_pack_zcull,
369 .mthd = nvc8_grctx_init_mthd, 351 .tpc = nvc0_grctx_pack_tpc,
352 .icmd = nvc8_grctx_pack_icmd,
353 .mthd = nvc8_grctx_pack_mthd,
370}.base; 354}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd7.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd7.c
index c4740d528532..1dbc8d7f2e86 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd7.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd7.c
@@ -22,33 +22,14 @@
22 * Authors: Ben Skeggs <bskeggs@redhat.com> 22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */ 23 */
24 24
25#include "nvc0.h" 25#include "ctxnvc0.h"
26 26
27struct nvc0_graph_init 27/*******************************************************************************
28nvd7_grctx_init_unk40xx[] = { 28 * PGRAPH context register lists
29 { 0x404004, 10, 0x04, 0x00000000 }, 29 ******************************************************************************/
30 { 0x404044, 1, 0x04, 0x00000000 },
31 { 0x404094, 1, 0x04, 0x00000000 },
32 { 0x404098, 12, 0x04, 0x00000000 },
33 { 0x4040c8, 1, 0x04, 0xf0000087 },
34 { 0x4040d0, 6, 0x04, 0x00000000 },
35 { 0x4040e8, 1, 0x04, 0x00001000 },
36 { 0x4040f8, 1, 0x04, 0x00000000 },
37 { 0x404130, 1, 0x04, 0x00000000 },
38 { 0x404134, 1, 0x04, 0x00000000 },
39 { 0x404138, 1, 0x04, 0x20000040 },
40 { 0x404150, 1, 0x04, 0x0000002e },
41 { 0x404154, 1, 0x04, 0x00000400 },
42 { 0x404158, 1, 0x04, 0x00000200 },
43 { 0x404164, 1, 0x04, 0x00000055 },
44 { 0x404168, 1, 0x04, 0x00000000 },
45 { 0x404178, 2, 0x04, 0x00000000 },
46 { 0x404200, 8, 0x04, 0x00000000 },
47 {}
48};
49 30
50static struct nvc0_graph_init 31static const struct nvc0_graph_init
51nvd7_grctx_init_unk58xx[] = { 32nvd7_grctx_init_ds_0[] = {
52 { 0x405800, 1, 0x04, 0x0f8000bf }, 33 { 0x405800, 1, 0x04, 0x0f8000bf },
53 { 0x405830, 1, 0x04, 0x02180324 }, 34 { 0x405830, 1, 0x04, 0x02180324 },
54 { 0x405834, 1, 0x04, 0x08000000 }, 35 { 0x405834, 1, 0x04, 0x08000000 },
@@ -60,8 +41,10 @@ nvd7_grctx_init_unk58xx[] = {
60 {} 41 {}
61}; 42};
62 43
63static struct nvc0_graph_init 44static const struct nvc0_graph_init
64nvd7_grctx_init_unk64xx[] = { 45nvd7_grctx_init_pd_0[] = {
46 { 0x406020, 1, 0x04, 0x000103c1 },
47 { 0x406028, 4, 0x04, 0x00000001 },
65 { 0x4064a8, 1, 0x04, 0x00000000 }, 48 { 0x4064a8, 1, 0x04, 0x00000000 },
66 { 0x4064ac, 1, 0x04, 0x00003fff }, 49 { 0x4064ac, 1, 0x04, 0x00003fff },
67 { 0x4064b4, 3, 0x04, 0x00000000 }, 50 { 0x4064b4, 3, 0x04, 0x00000000 },
@@ -71,22 +54,22 @@ nvd7_grctx_init_unk64xx[] = {
71 {} 54 {}
72}; 55};
73 56
74static struct nvc0_graph_init 57static const struct nvc0_graph_pack
75nvd7_grctx_init_gpc_0[] = { 58nvd7_grctx_pack_hub[] = {
76 { 0x418380, 1, 0x04, 0x00000016 }, 59 { nvc0_grctx_init_main_0 },
77 { 0x418400, 1, 0x04, 0x38004e00 }, 60 { nvd9_grctx_init_fe_0 },
78 { 0x418404, 1, 0x04, 0x71e0ffff }, 61 { nvc0_grctx_init_pri_0 },
79 { 0x41840c, 1, 0x04, 0x00001008 }, 62 { nvc0_grctx_init_memfmt_0 },
80 { 0x418410, 1, 0x04, 0x0fff0fff }, 63 { nvd7_grctx_init_ds_0 },
81 { 0x418414, 1, 0x04, 0x02200fff }, 64 { nvd7_grctx_init_pd_0 },
82 { 0x418450, 6, 0x04, 0x00000000 }, 65 { nvc0_grctx_init_rstr2d_0 },
83 { 0x418468, 1, 0x04, 0x00000001 }, 66 { nvc0_grctx_init_scc_0 },
84 { 0x41846c, 2, 0x04, 0x00000000 }, 67 { nvd9_grctx_init_be_0 },
85 { 0x418600, 1, 0x04, 0x0000001f }, 68 {}
86 { 0x418684, 1, 0x04, 0x0000000f }, 69};
87 { 0x418700, 1, 0x04, 0x00000002 }, 70
88 { 0x418704, 1, 0x04, 0x00000080 }, 71static const struct nvc0_graph_init
89 { 0x418708, 3, 0x04, 0x00000000 }, 72nvd7_grctx_init_setup_0[] = {
90 { 0x418800, 1, 0x04, 0x7006860a }, 73 { 0x418800, 1, 0x04, 0x7006860a },
91 { 0x418808, 3, 0x04, 0x00000000 }, 74 { 0x418808, 3, 0x04, 0x00000000 },
92 { 0x418828, 1, 0x04, 0x00008442 }, 75 { 0x418828, 1, 0x04, 0x00008442 },
@@ -95,34 +78,32 @@ nvd7_grctx_init_gpc_0[] = {
95 { 0x4188e0, 1, 0x04, 0x01000000 }, 78 { 0x4188e0, 1, 0x04, 0x01000000 },
96 { 0x4188e8, 5, 0x04, 0x00000000 }, 79 { 0x4188e8, 5, 0x04, 0x00000000 },
97 { 0x4188fc, 1, 0x04, 0x20100018 }, 80 { 0x4188fc, 1, 0x04, 0x20100018 },
98 { 0x41891c, 1, 0x04, 0x00ff00ff },
99 { 0x418924, 1, 0x04, 0x00000000 },
100 { 0x418928, 1, 0x04, 0x00ffff00 },
101 { 0x41892c, 1, 0x04, 0x0000ff00 },
102 { 0x418b00, 1, 0x04, 0x00000006 },
103 { 0x418b08, 1, 0x04, 0x0a418820 },
104 { 0x418b0c, 1, 0x04, 0x062080e6 },
105 { 0x418b10, 1, 0x04, 0x020398a4 },
106 { 0x418b14, 1, 0x04, 0x0e629062 },
107 { 0x418b18, 1, 0x04, 0x0a418820 },
108 { 0x418b1c, 1, 0x04, 0x000000e6 },
109 { 0x418bb8, 1, 0x04, 0x00000103 },
110 { 0x418c08, 1, 0x04, 0x00000001 },
111 { 0x418c10, 8, 0x04, 0x00000000 },
112 { 0x418c6c, 1, 0x04, 0x00000001 },
113 { 0x418c80, 1, 0x04, 0x20200004 },
114 { 0x418c8c, 1, 0x04, 0x00000001 },
115 { 0x419000, 1, 0x04, 0x00000780 },
116 { 0x419004, 2, 0x04, 0x00000000 },
117 { 0x419014, 1, 0x04, 0x00000004 },
118 {} 81 {}
119}; 82};
120 83
121static struct nvc0_graph_init 84static const struct nvc0_graph_pack
122nvd7_grctx_init_tpc[] = { 85nvd7_grctx_pack_gpc[] = {
86 { nvc0_grctx_init_gpc_unk_0 },
87 { nvd9_grctx_init_prop_0 },
88 { nvd9_grctx_init_gpc_unk_1 },
89 { nvd7_grctx_init_setup_0 },
90 { nvc0_grctx_init_zcull_0 },
91 { nvd9_grctx_init_crstr_0 },
92 { nvc1_grctx_init_gpm_0 },
93 { nvc0_grctx_init_gcc_0 },
94 {}
95};
96
97const struct nvc0_graph_init
98nvd7_grctx_init_pe_0[] = {
123 { 0x419848, 1, 0x04, 0x00000000 }, 99 { 0x419848, 1, 0x04, 0x00000000 },
124 { 0x419864, 1, 0x04, 0x00000129 }, 100 { 0x419864, 1, 0x04, 0x00000129 },
125 { 0x419888, 1, 0x04, 0x00000000 }, 101 { 0x419888, 1, 0x04, 0x00000000 },
102 {}
103};
104
105static const struct nvc0_graph_init
106nvd7_grctx_init_tex_0[] = {
126 { 0x419a00, 1, 0x04, 0x000001f0 }, 107 { 0x419a00, 1, 0x04, 0x000001f0 },
127 { 0x419a04, 1, 0x04, 0x00000001 }, 108 { 0x419a04, 1, 0x04, 0x00000001 },
128 { 0x419a08, 1, 0x04, 0x00000023 }, 109 { 0x419a08, 1, 0x04, 0x00000023 },
@@ -132,33 +113,46 @@ nvd7_grctx_init_tpc[] = {
132 { 0x419a1c, 1, 0x04, 0x00008000 }, 113 { 0x419a1c, 1, 0x04, 0x00008000 },
133 { 0x419a20, 1, 0x04, 0x00000800 }, 114 { 0x419a20, 1, 0x04, 0x00000800 },
134 { 0x419ac4, 1, 0x04, 0x0017f440 }, 115 { 0x419ac4, 1, 0x04, 0x0017f440 },
116 {}
117};
118
119static const struct nvc0_graph_init
120nvd7_grctx_init_mpc_0[] = {
135 { 0x419c00, 1, 0x04, 0x0000000a }, 121 { 0x419c00, 1, 0x04, 0x0000000a },
136 { 0x419c04, 1, 0x04, 0x00000006 }, 122 { 0x419c04, 1, 0x04, 0x00000006 },
137 { 0x419c08, 1, 0x04, 0x00000002 }, 123 { 0x419c08, 1, 0x04, 0x00000002 },
138 { 0x419c20, 1, 0x04, 0x00000000 }, 124 { 0x419c20, 1, 0x04, 0x00000000 },
139 { 0x419c24, 1, 0x04, 0x00084210 }, 125 { 0x419c24, 1, 0x04, 0x00084210 },
140 { 0x419c28, 1, 0x04, 0x3efbefbe }, 126 { 0x419c28, 1, 0x04, 0x3efbefbe },
141 { 0x419cb0, 1, 0x04, 0x00020048 },
142 { 0x419ce8, 1, 0x04, 0x00000000 },
143 { 0x419cf4, 1, 0x04, 0x00000183 },
144 { 0x419e04, 3, 0x04, 0x00000000 },
145 { 0x419e10, 1, 0x04, 0x00000002 },
146 { 0x419e44, 1, 0x04, 0x001beff2 },
147 { 0x419e48, 1, 0x04, 0x00000000 },
148 { 0x419e4c, 1, 0x04, 0x0000000f },
149 { 0x419e50, 17, 0x04, 0x00000000 },
150 { 0x419e98, 1, 0x04, 0x00000000 },
151 { 0x419ee0, 1, 0x04, 0x00010110 },
152 { 0x419f30, 11, 0x04, 0x00000000 },
153 {} 127 {}
154}; 128};
155 129
156static struct nvc0_graph_init 130static const struct nvc0_graph_pack
157nvd7_grctx_init_unk[] = { 131nvd7_grctx_pack_tpc[] = {
132 { nvd7_grctx_init_pe_0 },
133 { nvd7_grctx_init_tex_0 },
134 { nvd7_grctx_init_mpc_0 },
135 { nvc4_grctx_init_l1c_0 },
136 { nvd9_grctx_init_sm_0 },
137 {}
138};
139
140static const struct nvc0_graph_init
141nvd7_grctx_init_pes_0[] = {
158 { 0x41be24, 1, 0x04, 0x00000002 }, 142 { 0x41be24, 1, 0x04, 0x00000002 },
143 {}
144};
145
146static const struct nvc0_graph_init
147nvd7_grctx_init_cbm_0[] = {
159 { 0x41bec0, 1, 0x04, 0x12180000 }, 148 { 0x41bec0, 1, 0x04, 0x12180000 },
160 { 0x41bec4, 1, 0x04, 0x00003fff }, 149 { 0x41bec4, 1, 0x04, 0x00003fff },
161 { 0x41bee4, 1, 0x04, 0x03240218 }, 150 { 0x41bee4, 1, 0x04, 0x03240218 },
151 {}
152};
153
154const struct nvc0_graph_init
155nvd7_grctx_init_wwdx_0[] = {
162 { 0x41bf00, 1, 0x04, 0x0a418820 }, 156 { 0x41bf00, 1, 0x04, 0x0a418820 },
163 { 0x41bf04, 1, 0x04, 0x062080e6 }, 157 { 0x41bf04, 1, 0x04, 0x062080e6 },
164 { 0x41bf08, 1, 0x04, 0x020398a4 }, 158 { 0x41bf08, 1, 0x04, 0x020398a4 },
@@ -171,6 +165,18 @@ nvd7_grctx_init_unk[] = {
171 {} 165 {}
172}; 166};
173 167
168static const struct nvc0_graph_pack
169nvd7_grctx_pack_ppc[] = {
170 { nvd7_grctx_init_pes_0 },
171 { nvd7_grctx_init_cbm_0 },
172 { nvd7_grctx_init_wwdx_0 },
173 {}
174};
175
176/*******************************************************************************
177 * PGRAPH context implementation
178 ******************************************************************************/
179
174static void 180static void
175nvd7_grctx_generate_mods(struct nvc0_graph_priv *priv, struct nvc0_grctx *info) 181nvd7_grctx_generate_mods(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
176{ 182{
@@ -219,10 +225,11 @@ nvd7_grctx_generate_main(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
219 225
220 nv_mask(priv, 0x000260, 0x00000001, 0x00000000); 226 nv_mask(priv, 0x000260, 0x00000001, 0x00000000);
221 227
222 for (i = 0; oclass->hub[i]; i++) 228 nvc0_graph_mmio(priv, oclass->hub);
223 nvc0_graph_mmio(priv, oclass->hub[i]); 229 nvc0_graph_mmio(priv, oclass->gpc);
224 for (i = 0; oclass->gpc[i]; i++) 230 nvc0_graph_mmio(priv, oclass->zcull);
225 nvc0_graph_mmio(priv, oclass->gpc[i]); 231 nvc0_graph_mmio(priv, oclass->tpc);
232 nvc0_graph_mmio(priv, oclass->ppc);
226 233
227 nv_wr32(priv, 0x404154, 0x00000000); 234 nv_wr32(priv, 0x404154, 0x00000000);
228 235
@@ -244,32 +251,6 @@ nvd7_grctx_generate_main(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
244 nv_mask(priv, 0x000260, 0x00000001, 0x00000001); 251 nv_mask(priv, 0x000260, 0x00000001, 0x00000001);
245} 252}
246 253
247
248static struct nvc0_graph_init *
249nvd7_grctx_init_hub[] = {
250 nvc0_grctx_init_base,
251 nvd7_grctx_init_unk40xx,
252 nvc0_grctx_init_unk44xx,
253 nvc0_grctx_init_unk46xx,
254 nvc0_grctx_init_unk47xx,
255 nvd7_grctx_init_unk58xx,
256 nvc0_grctx_init_unk60xx,
257 nvd7_grctx_init_unk64xx,
258 nvc0_grctx_init_unk78xx,
259 nvc0_grctx_init_unk80xx,
260 nvd9_grctx_init_rop,
261 NULL
262};
263
264struct nvc0_graph_init *
265nvd7_grctx_init_gpc[] = {
266 nvd7_grctx_init_gpc_0,
267 nvc0_grctx_init_gpc_1,
268 nvd7_grctx_init_tpc,
269 nvd7_grctx_init_unk,
270 NULL
271};
272
273struct nouveau_oclass * 254struct nouveau_oclass *
274nvd7_grctx_oclass = &(struct nvc0_grctx_oclass) { 255nvd7_grctx_oclass = &(struct nvc0_grctx_oclass) {
275 .base.handle = NV_ENGCTX(GR, 0xd7), 256 .base.handle = NV_ENGCTX(GR, 0xd7),
@@ -281,11 +262,14 @@ nvd7_grctx_oclass = &(struct nvc0_grctx_oclass) {
281 .rd32 = _nouveau_graph_context_rd32, 262 .rd32 = _nouveau_graph_context_rd32,
282 .wr32 = _nouveau_graph_context_wr32, 263 .wr32 = _nouveau_graph_context_wr32,
283 }, 264 },
284 .main = nvd7_grctx_generate_main, 265 .main = nvd7_grctx_generate_main,
285 .mods = nvd7_grctx_generate_mods, 266 .mods = nvd7_grctx_generate_mods,
286 .unkn = nve4_grctx_generate_unkn, 267 .unkn = nve4_grctx_generate_unkn,
287 .hub = nvd7_grctx_init_hub, 268 .hub = nvd7_grctx_pack_hub,
288 .gpc = nvd7_grctx_init_gpc, 269 .gpc = nvd7_grctx_pack_gpc,
289 .icmd = nvd9_grctx_init_icmd, 270 .zcull = nvc0_grctx_pack_zcull,
290 .mthd = nvd9_grctx_init_mthd, 271 .tpc = nvd7_grctx_pack_tpc,
272 .ppc = nvd7_grctx_pack_ppc,
273 .icmd = nvd9_grctx_pack_icmd,
274 .mthd = nvd9_grctx_pack_mthd,
291}.base; 275}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd9.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd9.c
index a1102cbf2fdc..c665fb7e4660 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd9.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd9.c
@@ -22,38 +22,14 @@
22 * Authors: Ben Skeggs <bskeggs@redhat.com> 22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */ 23 */
24 24
25#include "nvc0.h" 25#include "ctxnvc0.h"
26 26
27struct nvc0_graph_init 27/*******************************************************************************
28nvd9_grctx_init_90c0[] = { 28 * PGRAPH context register lists
29 { 0x002700, 4, 0x40, 0x00000000 }, 29 ******************************************************************************/
30 { 0x002720, 4, 0x40, 0x00000000 },
31 { 0x002704, 4, 0x40, 0x00000000 },
32 { 0x002724, 4, 0x40, 0x00000000 },
33 { 0x002708, 4, 0x40, 0x00000000 },
34 { 0x002728, 4, 0x40, 0x00000000 },
35 { 0x00270c, 8, 0x20, 0x00000000 },
36 { 0x002710, 4, 0x40, 0x00014000 },
37 { 0x002730, 4, 0x40, 0x00014000 },
38 { 0x002714, 4, 0x40, 0x00000040 },
39 { 0x002734, 4, 0x40, 0x00000040 },
40 { 0x00030c, 1, 0x04, 0x00000001 },
41 { 0x001944, 1, 0x04, 0x00000000 },
42 { 0x000758, 1, 0x04, 0x00000100 },
43 { 0x0002c4, 1, 0x04, 0x00000000 },
44 { 0x000790, 5, 0x04, 0x00000000 },
45 { 0x00077c, 1, 0x04, 0x00000000 },
46 { 0x000204, 3, 0x04, 0x00000000 },
47 { 0x000214, 1, 0x04, 0x00000000 },
48 { 0x00024c, 1, 0x04, 0x00000000 },
49 { 0x000d94, 1, 0x04, 0x00000001 },
50 { 0x001608, 2, 0x04, 0x00000000 },
51 { 0x001664, 1, 0x04, 0x00000000 },
52 {}
53};
54 30
55struct nvc0_graph_init 31static const struct nvc0_graph_init
56nvd9_grctx_init_icmd[] = { 32nvd9_grctx_init_icmd_0[] = {
57 { 0x001000, 1, 0x01, 0x00000004 }, 33 { 0x001000, 1, 0x01, 0x00000004 },
58 { 0x0000a9, 1, 0x01, 0x0000ffff }, 34 { 0x0000a9, 1, 0x01, 0x0000ffff },
59 { 0x000038, 1, 0x01, 0x0fac6881 }, 35 { 0x000038, 1, 0x01, 0x0fac6881 },
@@ -171,8 +147,7 @@ nvd9_grctx_init_icmd[] = {
171 { 0x000586, 1, 0x01, 0x00000040 }, 147 { 0x000586, 1, 0x01, 0x00000040 },
172 { 0x000582, 2, 0x01, 0x00000080 }, 148 { 0x000582, 2, 0x01, 0x00000080 },
173 { 0x0005c2, 1, 0x01, 0x00000001 }, 149 { 0x0005c2, 1, 0x01, 0x00000001 },
174 { 0x000638, 1, 0x01, 0x00000001 }, 150 { 0x000638, 2, 0x01, 0x00000001 },
175 { 0x000639, 1, 0x01, 0x00000001 },
176 { 0x00063a, 1, 0x01, 0x00000002 }, 151 { 0x00063a, 1, 0x01, 0x00000002 },
177 { 0x00063b, 2, 0x01, 0x00000001 }, 152 { 0x00063b, 2, 0x01, 0x00000001 },
178 { 0x00063d, 1, 0x01, 0x00000002 }, 153 { 0x00063d, 1, 0x01, 0x00000002 },
@@ -233,15 +208,13 @@ nvd9_grctx_init_icmd[] = {
233 { 0x000787, 1, 0x01, 0x000000cf }, 208 { 0x000787, 1, 0x01, 0x000000cf },
234 { 0x00078c, 1, 0x01, 0x00000008 }, 209 { 0x00078c, 1, 0x01, 0x00000008 },
235 { 0x000792, 1, 0x01, 0x00000001 }, 210 { 0x000792, 1, 0x01, 0x00000001 },
236 { 0x000794, 1, 0x01, 0x00000001 }, 211 { 0x000794, 3, 0x01, 0x00000001 },
237 { 0x000795, 2, 0x01, 0x00000001 },
238 { 0x000797, 1, 0x01, 0x000000cf }, 212 { 0x000797, 1, 0x01, 0x000000cf },
239 { 0x000836, 1, 0x01, 0x00000001 }, 213 { 0x000836, 1, 0x01, 0x00000001 },
240 { 0x00079a, 1, 0x01, 0x00000002 }, 214 { 0x00079a, 1, 0x01, 0x00000002 },
241 { 0x000833, 1, 0x01, 0x04444480 }, 215 { 0x000833, 1, 0x01, 0x04444480 },
242 { 0x0007a1, 1, 0x01, 0x00000001 }, 216 { 0x0007a1, 1, 0x01, 0x00000001 },
243 { 0x0007a3, 1, 0x01, 0x00000001 }, 217 { 0x0007a3, 3, 0x01, 0x00000001 },
244 { 0x0007a4, 2, 0x01, 0x00000001 },
245 { 0x000831, 1, 0x01, 0x00000004 }, 218 { 0x000831, 1, 0x01, 0x00000004 },
246 { 0x00080c, 1, 0x01, 0x00000002 }, 219 { 0x00080c, 1, 0x01, 0x00000002 },
247 { 0x00080d, 2, 0x01, 0x00000100 }, 220 { 0x00080d, 2, 0x01, 0x00000100 },
@@ -267,14 +240,12 @@ nvd9_grctx_init_icmd[] = {
267 { 0x0006b1, 1, 0x01, 0x00000011 }, 240 { 0x0006b1, 1, 0x01, 0x00000011 },
268 { 0x00078c, 1, 0x01, 0x00000008 }, 241 { 0x00078c, 1, 0x01, 0x00000008 },
269 { 0x000792, 1, 0x01, 0x00000001 }, 242 { 0x000792, 1, 0x01, 0x00000001 },
270 { 0x000794, 1, 0x01, 0x00000001 }, 243 { 0x000794, 3, 0x01, 0x00000001 },
271 { 0x000795, 2, 0x01, 0x00000001 },
272 { 0x000797, 1, 0x01, 0x000000cf }, 244 { 0x000797, 1, 0x01, 0x000000cf },
273 { 0x00079a, 1, 0x01, 0x00000002 }, 245 { 0x00079a, 1, 0x01, 0x00000002 },
274 { 0x000833, 1, 0x01, 0x04444480 }, 246 { 0x000833, 1, 0x01, 0x04444480 },
275 { 0x0007a1, 1, 0x01, 0x00000001 }, 247 { 0x0007a1, 1, 0x01, 0x00000001 },
276 { 0x0007a3, 1, 0x01, 0x00000001 }, 248 { 0x0007a3, 3, 0x01, 0x00000001 },
277 { 0x0007a4, 2, 0x01, 0x00000001 },
278 { 0x000831, 1, 0x01, 0x00000004 }, 249 { 0x000831, 1, 0x01, 0x00000004 },
279 { 0x01e100, 1, 0x01, 0x00000001 }, 250 { 0x01e100, 1, 0x01, 0x00000001 },
280 { 0x001000, 1, 0x01, 0x00000014 }, 251 { 0x001000, 1, 0x01, 0x00000014 },
@@ -299,18 +270,56 @@ nvd9_grctx_init_icmd[] = {
299 {} 270 {}
300}; 271};
301 272
302struct nvc0_graph_init 273const struct nvc0_graph_pack
303nvd9_grctx_init_unk40xx[] = { 274nvd9_grctx_pack_icmd[] = {
304 { 0x404004, 11, 0x04, 0x00000000 }, 275 { nvd9_grctx_init_icmd_0 },
276 {}
277};
278
279static const struct nvc0_graph_init
280nvd9_grctx_init_90c0_0[] = {
281 { 0x002700, 8, 0x20, 0x00000000 },
282 { 0x002704, 8, 0x20, 0x00000000 },
283 { 0x002708, 8, 0x20, 0x00000000 },
284 { 0x00270c, 8, 0x20, 0x00000000 },
285 { 0x002710, 8, 0x20, 0x00014000 },
286 { 0x002714, 8, 0x20, 0x00000040 },
287 { 0x00030c, 1, 0x04, 0x00000001 },
288 { 0x001944, 1, 0x04, 0x00000000 },
289 { 0x000758, 1, 0x04, 0x00000100 },
290 { 0x0002c4, 1, 0x04, 0x00000000 },
291 { 0x000790, 5, 0x04, 0x00000000 },
292 { 0x00077c, 1, 0x04, 0x00000000 },
293 { 0x000204, 3, 0x04, 0x00000000 },
294 { 0x000214, 1, 0x04, 0x00000000 },
295 { 0x00024c, 1, 0x04, 0x00000000 },
296 { 0x000d94, 1, 0x04, 0x00000001 },
297 { 0x001608, 2, 0x04, 0x00000000 },
298 { 0x001664, 1, 0x04, 0x00000000 },
299 {}
300};
301
302const struct nvc0_graph_pack
303nvd9_grctx_pack_mthd[] = {
304 { nvc1_grctx_init_9097_0, 0x9097 },
305 { nvc8_grctx_init_9197_0, 0x9197 },
306 { nvc8_grctx_init_9297_0, 0x9297 },
307 { nvc0_grctx_init_902d_0, 0x902d },
308 { nvc0_grctx_init_9039_0, 0x9039 },
309 { nvd9_grctx_init_90c0_0, 0x90c0 },
310 {}
311};
312
313const struct nvc0_graph_init
314nvd9_grctx_init_fe_0[] = {
315 { 0x404004, 10, 0x04, 0x00000000 },
305 { 0x404044, 1, 0x04, 0x00000000 }, 316 { 0x404044, 1, 0x04, 0x00000000 },
306 { 0x404094, 1, 0x04, 0x00000000 }, 317 { 0x404094, 13, 0x04, 0x00000000 },
307 { 0x404098, 12, 0x04, 0x00000000 },
308 { 0x4040c8, 1, 0x04, 0xf0000087 }, 318 { 0x4040c8, 1, 0x04, 0xf0000087 },
309 { 0x4040d0, 6, 0x04, 0x00000000 }, 319 { 0x4040d0, 6, 0x04, 0x00000000 },
310 { 0x4040e8, 1, 0x04, 0x00001000 }, 320 { 0x4040e8, 1, 0x04, 0x00001000 },
311 { 0x4040f8, 1, 0x04, 0x00000000 }, 321 { 0x4040f8, 1, 0x04, 0x00000000 },
312 { 0x404130, 1, 0x04, 0x00000000 }, 322 { 0x404130, 2, 0x04, 0x00000000 },
313 { 0x404134, 1, 0x04, 0x00000000 },
314 { 0x404138, 1, 0x04, 0x20000040 }, 323 { 0x404138, 1, 0x04, 0x20000040 },
315 { 0x404150, 1, 0x04, 0x0000002e }, 324 { 0x404150, 1, 0x04, 0x0000002e },
316 { 0x404154, 1, 0x04, 0x00000400 }, 325 { 0x404154, 1, 0x04, 0x00000400 },
@@ -322,8 +331,8 @@ nvd9_grctx_init_unk40xx[] = {
322 {} 331 {}
323}; 332};
324 333
325static struct nvc0_graph_init 334static const struct nvc0_graph_init
326nvd9_grctx_init_unk58xx[] = { 335nvd9_grctx_init_ds_0[] = {
327 { 0x405800, 1, 0x04, 0x0f8000bf }, 336 { 0x405800, 1, 0x04, 0x0f8000bf },
328 { 0x405830, 1, 0x04, 0x02180218 }, 337 { 0x405830, 1, 0x04, 0x02180218 },
329 { 0x405834, 1, 0x04, 0x08000000 }, 338 { 0x405834, 1, 0x04, 0x08000000 },
@@ -335,8 +344,10 @@ nvd9_grctx_init_unk58xx[] = {
335 {} 344 {}
336}; 345};
337 346
338static struct nvc0_graph_init 347static const struct nvc0_graph_init
339nvd9_grctx_init_unk64xx[] = { 348nvd9_grctx_init_pd_0[] = {
349 { 0x406020, 1, 0x04, 0x000103c1 },
350 { 0x406028, 4, 0x04, 0x00000001 },
340 { 0x4064a8, 1, 0x04, 0x00000000 }, 351 { 0x4064a8, 1, 0x04, 0x00000000 },
341 { 0x4064ac, 1, 0x04, 0x00003fff }, 352 { 0x4064ac, 1, 0x04, 0x00003fff },
342 { 0x4064b4, 3, 0x04, 0x00000000 }, 353 { 0x4064b4, 3, 0x04, 0x00000000 },
@@ -345,21 +356,34 @@ nvd9_grctx_init_unk64xx[] = {
345 {} 356 {}
346}; 357};
347 358
348struct nvc0_graph_init 359const struct nvc0_graph_init
349nvd9_grctx_init_rop[] = { 360nvd9_grctx_init_be_0[] = {
350 { 0x408800, 1, 0x04, 0x02802a3c }, 361 { 0x408800, 1, 0x04, 0x02802a3c },
351 { 0x408804, 1, 0x04, 0x00000040 }, 362 { 0x408804, 1, 0x04, 0x00000040 },
352 { 0x408808, 1, 0x04, 0x1043e005 }, 363 { 0x408808, 1, 0x04, 0x1043e005 },
353 { 0x408900, 1, 0x04, 0x3080b801 }, 364 { 0x408900, 1, 0x04, 0x3080b801 },
354 { 0x408904, 1, 0x04, 0x1043e005 }, 365 { 0x408904, 1, 0x04, 0x62000001 },
355 { 0x408908, 1, 0x04, 0x00c8102f }, 366 { 0x408908, 1, 0x04, 0x00c8102f },
356 { 0x408980, 1, 0x04, 0x0000011d }, 367 { 0x408980, 1, 0x04, 0x0000011d },
357 {} 368 {}
358}; 369};
359 370
360static struct nvc0_graph_init 371static const struct nvc0_graph_pack
361nvd9_grctx_init_gpc_0[] = { 372nvd9_grctx_pack_hub[] = {
362 { 0x418380, 1, 0x04, 0x00000016 }, 373 { nvc0_grctx_init_main_0 },
374 { nvd9_grctx_init_fe_0 },
375 { nvc0_grctx_init_pri_0 },
376 { nvc0_grctx_init_memfmt_0 },
377 { nvd9_grctx_init_ds_0 },
378 { nvd9_grctx_init_pd_0 },
379 { nvc0_grctx_init_rstr2d_0 },
380 { nvc0_grctx_init_scc_0 },
381 { nvd9_grctx_init_be_0 },
382 {}
383};
384
385const struct nvc0_graph_init
386nvd9_grctx_init_prop_0[] = {
363 { 0x418400, 1, 0x04, 0x38004e00 }, 387 { 0x418400, 1, 0x04, 0x38004e00 },
364 { 0x418404, 1, 0x04, 0x71e0ffff }, 388 { 0x418404, 1, 0x04, 0x71e0ffff },
365 { 0x41840c, 1, 0x04, 0x00001008 }, 389 { 0x41840c, 1, 0x04, 0x00001008 },
@@ -368,11 +392,21 @@ nvd9_grctx_init_gpc_0[] = {
368 { 0x418450, 6, 0x04, 0x00000000 }, 392 { 0x418450, 6, 0x04, 0x00000000 },
369 { 0x418468, 1, 0x04, 0x00000001 }, 393 { 0x418468, 1, 0x04, 0x00000001 },
370 { 0x41846c, 2, 0x04, 0x00000000 }, 394 { 0x41846c, 2, 0x04, 0x00000000 },
395 {}
396};
397
398const struct nvc0_graph_init
399nvd9_grctx_init_gpc_unk_1[] = {
371 { 0x418600, 1, 0x04, 0x0000001f }, 400 { 0x418600, 1, 0x04, 0x0000001f },
372 { 0x418684, 1, 0x04, 0x0000000f }, 401 { 0x418684, 1, 0x04, 0x0000000f },
373 { 0x418700, 1, 0x04, 0x00000002 }, 402 { 0x418700, 1, 0x04, 0x00000002 },
374 { 0x418704, 1, 0x04, 0x00000080 }, 403 { 0x418704, 1, 0x04, 0x00000080 },
375 { 0x418708, 3, 0x04, 0x00000000 }, 404 { 0x418708, 3, 0x04, 0x00000000 },
405 {}
406};
407
408static const struct nvc0_graph_init
409nvd9_grctx_init_setup_0[] = {
376 { 0x418800, 1, 0x04, 0x7006860a }, 410 { 0x418800, 1, 0x04, 0x7006860a },
377 { 0x418808, 3, 0x04, 0x00000000 }, 411 { 0x418808, 3, 0x04, 0x00000000 },
378 { 0x418828, 1, 0x04, 0x00008442 }, 412 { 0x418828, 1, 0x04, 0x00008442 },
@@ -381,10 +415,11 @@ nvd9_grctx_init_gpc_0[] = {
381 { 0x4188e0, 1, 0x04, 0x01000000 }, 415 { 0x4188e0, 1, 0x04, 0x01000000 },
382 { 0x4188e8, 5, 0x04, 0x00000000 }, 416 { 0x4188e8, 5, 0x04, 0x00000000 },
383 { 0x4188fc, 1, 0x04, 0x20100008 }, 417 { 0x4188fc, 1, 0x04, 0x20100008 },
384 { 0x41891c, 1, 0x04, 0x00ff00ff }, 418 {}
385 { 0x418924, 1, 0x04, 0x00000000 }, 419};
386 { 0x418928, 1, 0x04, 0x00ffff00 }, 420
387 { 0x41892c, 1, 0x04, 0x0000ff00 }, 421const struct nvc0_graph_init
422nvd9_grctx_init_crstr_0[] = {
388 { 0x418b00, 1, 0x04, 0x00000006 }, 423 { 0x418b00, 1, 0x04, 0x00000006 },
389 { 0x418b08, 1, 0x04, 0x0a418820 }, 424 { 0x418b08, 1, 0x04, 0x0a418820 },
390 { 0x418b0c, 1, 0x04, 0x062080e6 }, 425 { 0x418b0c, 1, 0x04, 0x062080e6 },
@@ -393,24 +428,24 @@ nvd9_grctx_init_gpc_0[] = {
393 { 0x418b18, 1, 0x04, 0x0a418820 }, 428 { 0x418b18, 1, 0x04, 0x0a418820 },
394 { 0x418b1c, 1, 0x04, 0x000000e6 }, 429 { 0x418b1c, 1, 0x04, 0x000000e6 },
395 { 0x418bb8, 1, 0x04, 0x00000103 }, 430 { 0x418bb8, 1, 0x04, 0x00000103 },
396 { 0x418c08, 1, 0x04, 0x00000001 },
397 { 0x418c10, 8, 0x04, 0x00000000 },
398 { 0x418c6c, 1, 0x04, 0x00000001 },
399 { 0x418c80, 1, 0x04, 0x20200004 },
400 { 0x418c8c, 1, 0x04, 0x00000001 },
401 { 0x419000, 1, 0x04, 0x00000780 },
402 { 0x419004, 2, 0x04, 0x00000000 },
403 { 0x419014, 1, 0x04, 0x00000004 },
404 {} 431 {}
405}; 432};
406 433
407static struct nvc0_graph_init 434static const struct nvc0_graph_pack
408nvd9_grctx_init_tpc[] = { 435nvd9_grctx_pack_gpc[] = {
409 { 0x419818, 1, 0x04, 0x00000000 }, 436 { nvc0_grctx_init_gpc_unk_0 },
410 { 0x41983c, 1, 0x04, 0x00038bc7 }, 437 { nvd9_grctx_init_prop_0 },
411 { 0x419848, 1, 0x04, 0x00000000 }, 438 { nvd9_grctx_init_gpc_unk_1 },
412 { 0x419864, 1, 0x04, 0x00000129 }, 439 { nvd9_grctx_init_setup_0 },
413 { 0x419888, 1, 0x04, 0x00000000 }, 440 { nvc0_grctx_init_zcull_0 },
441 { nvd9_grctx_init_crstr_0 },
442 { nvc1_grctx_init_gpm_0 },
443 { nvc0_grctx_init_gcc_0 },
444 {}
445};
446
447static const struct nvc0_graph_init
448nvd9_grctx_init_tex_0[] = {
414 { 0x419a00, 1, 0x04, 0x000001f0 }, 449 { 0x419a00, 1, 0x04, 0x000001f0 },
415 { 0x419a04, 1, 0x04, 0x00000001 }, 450 { 0x419a04, 1, 0x04, 0x00000001 },
416 { 0x419a08, 1, 0x04, 0x00000023 }, 451 { 0x419a08, 1, 0x04, 0x00000023 },
@@ -420,27 +455,22 @@ nvd9_grctx_init_tpc[] = {
420 { 0x419a1c, 1, 0x04, 0x00000000 }, 455 { 0x419a1c, 1, 0x04, 0x00000000 },
421 { 0x419a20, 1, 0x04, 0x00000800 }, 456 { 0x419a20, 1, 0x04, 0x00000800 },
422 { 0x419ac4, 1, 0x04, 0x0017f440 }, 457 { 0x419ac4, 1, 0x04, 0x0017f440 },
423 { 0x419b00, 1, 0x04, 0x0a418820 }, 458 {}
424 { 0x419b04, 1, 0x04, 0x062080e6 }, 459};
425 { 0x419b08, 1, 0x04, 0x020398a4 }, 460
426 { 0x419b0c, 1, 0x04, 0x0e629062 }, 461static const struct nvc0_graph_init
427 { 0x419b10, 1, 0x04, 0x0a418820 }, 462nvd9_grctx_init_mpc_0[] = {
428 { 0x419b14, 1, 0x04, 0x000000e6 },
429 { 0x419bd0, 1, 0x04, 0x00900103 },
430 { 0x419be0, 1, 0x04, 0x00400001 },
431 { 0x419be4, 1, 0x04, 0x00000000 },
432 { 0x419c00, 1, 0x04, 0x0000000a }, 463 { 0x419c00, 1, 0x04, 0x0000000a },
433 { 0x419c04, 1, 0x04, 0x00000006 }, 464 { 0x419c04, 1, 0x04, 0x00000006 },
434 { 0x419c08, 1, 0x04, 0x00000002 }, 465 { 0x419c08, 1, 0x04, 0x00000002 },
435 { 0x419c20, 1, 0x04, 0x00000000 }, 466 { 0x419c20, 1, 0x04, 0x00000000 },
436 { 0x419c24, 1, 0x04, 0x00084210 }, 467 { 0x419c24, 1, 0x04, 0x00084210 },
437 { 0x419c28, 1, 0x04, 0x3cf3cf3c }, 468 { 0x419c28, 1, 0x04, 0x3cf3cf3c },
438 { 0x419cb0, 1, 0x04, 0x00020048 }, 469 {}
439 { 0x419ce8, 1, 0x04, 0x00000000 }, 470};
440 { 0x419cf4, 1, 0x04, 0x00000183 }, 471
441 { 0x419d20, 1, 0x04, 0x12180000 }, 472const struct nvc0_graph_init
442 { 0x419d24, 1, 0x04, 0x00001fff }, 473nvd9_grctx_init_sm_0[] = {
443 { 0x419d44, 1, 0x04, 0x02180218 },
444 { 0x419e04, 3, 0x04, 0x00000000 }, 474 { 0x419e04, 3, 0x04, 0x00000000 },
445 { 0x419e10, 1, 0x04, 0x00000002 }, 475 { 0x419e10, 1, 0x04, 0x00000002 },
446 { 0x419e44, 1, 0x04, 0x001beff2 }, 476 { 0x419e44, 1, 0x04, 0x001beff2 },
@@ -453,47 +483,21 @@ nvd9_grctx_init_tpc[] = {
453 {} 483 {}
454}; 484};
455 485
456static struct nvc0_graph_init * 486static const struct nvc0_graph_pack
457nvd9_grctx_init_hub[] = { 487nvd9_grctx_pack_tpc[] = {
458 nvc0_grctx_init_base, 488 { nvc1_grctx_init_pe_0 },
459 nvd9_grctx_init_unk40xx, 489 { nvd9_grctx_init_tex_0 },
460 nvc0_grctx_init_unk44xx, 490 { nvc1_grctx_init_wwdx_0 },
461 nvc0_grctx_init_unk46xx, 491 { nvd9_grctx_init_mpc_0 },
462 nvc0_grctx_init_unk47xx, 492 { nvc4_grctx_init_l1c_0 },
463 nvd9_grctx_init_unk58xx, 493 { nvc1_grctx_init_tpccs_0 },
464 nvc0_grctx_init_unk60xx, 494 { nvd9_grctx_init_sm_0 },
465 nvd9_grctx_init_unk64xx,
466 nvc0_grctx_init_unk78xx,
467 nvc0_grctx_init_unk80xx,
468 nvd9_grctx_init_rop,
469 NULL
470};
471
472struct nvc0_graph_init *
473nvd9_grctx_init_gpc[] = {
474 nvd9_grctx_init_gpc_0,
475 nvc0_grctx_init_gpc_1,
476 nvd9_grctx_init_tpc,
477 NULL
478};
479
480struct nvc0_graph_init
481nvd9_grctx_init_mthd_magic[] = {
482 { 0x3410, 1, 0x04, 0x80002006 },
483 {} 495 {}
484}; 496};
485 497
486struct nvc0_graph_mthd 498/*******************************************************************************
487nvd9_grctx_init_mthd[] = { 499 * PGRAPH context implementation
488 { 0x9097, nvc1_grctx_init_9097, }, 500 ******************************************************************************/
489 { 0x9197, nvc8_grctx_init_9197, },
490 { 0x9297, nvc8_grctx_init_9297, },
491 { 0x902d, nvc0_grctx_init_902d, },
492 { 0x9039, nvc0_grctx_init_9039, },
493 { 0x90c0, nvd9_grctx_init_90c0, },
494 { 0x902d, nvd9_grctx_init_mthd_magic, },
495 {}
496};
497 501
498struct nouveau_oclass * 502struct nouveau_oclass *
499nvd9_grctx_oclass = &(struct nvc0_grctx_oclass) { 503nvd9_grctx_oclass = &(struct nvc0_grctx_oclass) {
@@ -506,11 +510,13 @@ nvd9_grctx_oclass = &(struct nvc0_grctx_oclass) {
506 .rd32 = _nouveau_graph_context_rd32, 510 .rd32 = _nouveau_graph_context_rd32,
507 .wr32 = _nouveau_graph_context_wr32, 511 .wr32 = _nouveau_graph_context_wr32,
508 }, 512 },
509 .main = nvc0_grctx_generate_main, 513 .main = nvc0_grctx_generate_main,
510 .mods = nvc1_grctx_generate_mods, 514 .mods = nvc1_grctx_generate_mods,
511 .unkn = nvc1_grctx_generate_unkn, 515 .unkn = nvc1_grctx_generate_unkn,
512 .hub = nvd9_grctx_init_hub, 516 .hub = nvd9_grctx_pack_hub,
513 .gpc = nvd9_grctx_init_gpc, 517 .gpc = nvd9_grctx_pack_gpc,
514 .icmd = nvd9_grctx_init_icmd, 518 .zcull = nvc0_grctx_pack_zcull,
515 .mthd = nvd9_grctx_init_mthd, 519 .tpc = nvd9_grctx_pack_tpc,
520 .icmd = nvd9_grctx_pack_icmd,
521 .mthd = nvd9_grctx_pack_mthd,
516}.base; 522}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnve4.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnve4.c
index e2de73ee5eee..49a14b116a5f 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnve4.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnve4.c
@@ -22,10 +22,14 @@
22 * Authors: Ben Skeggs <bskeggs@redhat.com> 22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */ 23 */
24 24
25#include "nvc0.h" 25#include "ctxnvc0.h"
26 26
27struct nvc0_graph_init 27/*******************************************************************************
28nve4_grctx_init_icmd[] = { 28 * PGRAPH context register lists
29 ******************************************************************************/
30
31static const struct nvc0_graph_init
32nve4_grctx_init_icmd_0[] = {
29 { 0x001000, 1, 0x01, 0x00000004 }, 33 { 0x001000, 1, 0x01, 0x00000004 },
30 { 0x000039, 3, 0x01, 0x00000000 }, 34 { 0x000039, 3, 0x01, 0x00000000 },
31 { 0x0000a9, 1, 0x01, 0x0000ffff }, 35 { 0x0000a9, 1, 0x01, 0x0000ffff },
@@ -138,8 +142,7 @@ nve4_grctx_init_icmd[] = {
138 { 0x000586, 1, 0x01, 0x00000040 }, 142 { 0x000586, 1, 0x01, 0x00000040 },
139 { 0x000582, 2, 0x01, 0x00000080 }, 143 { 0x000582, 2, 0x01, 0x00000080 },
140 { 0x0005c2, 1, 0x01, 0x00000001 }, 144 { 0x0005c2, 1, 0x01, 0x00000001 },
141 { 0x000638, 1, 0x01, 0x00000001 }, 145 { 0x000638, 2, 0x01, 0x00000001 },
142 { 0x000639, 1, 0x01, 0x00000001 },
143 { 0x00063a, 1, 0x01, 0x00000002 }, 146 { 0x00063a, 1, 0x01, 0x00000002 },
144 { 0x00063b, 2, 0x01, 0x00000001 }, 147 { 0x00063b, 2, 0x01, 0x00000001 },
145 { 0x00063d, 1, 0x01, 0x00000002 }, 148 { 0x00063d, 1, 0x01, 0x00000002 },
@@ -197,15 +200,13 @@ nve4_grctx_init_icmd[] = {
197 { 0x000787, 1, 0x01, 0x000000cf }, 200 { 0x000787, 1, 0x01, 0x000000cf },
198 { 0x00078c, 1, 0x01, 0x00000008 }, 201 { 0x00078c, 1, 0x01, 0x00000008 },
199 { 0x000792, 1, 0x01, 0x00000001 }, 202 { 0x000792, 1, 0x01, 0x00000001 },
200 { 0x000794, 1, 0x01, 0x00000001 }, 203 { 0x000794, 3, 0x01, 0x00000001 },
201 { 0x000795, 2, 0x01, 0x00000001 },
202 { 0x000797, 1, 0x01, 0x000000cf }, 204 { 0x000797, 1, 0x01, 0x000000cf },
203 { 0x000836, 1, 0x01, 0x00000001 }, 205 { 0x000836, 1, 0x01, 0x00000001 },
204 { 0x00079a, 1, 0x01, 0x00000002 }, 206 { 0x00079a, 1, 0x01, 0x00000002 },
205 { 0x000833, 1, 0x01, 0x04444480 }, 207 { 0x000833, 1, 0x01, 0x04444480 },
206 { 0x0007a1, 1, 0x01, 0x00000001 }, 208 { 0x0007a1, 1, 0x01, 0x00000001 },
207 { 0x0007a3, 1, 0x01, 0x00000001 }, 209 { 0x0007a3, 3, 0x01, 0x00000001 },
208 { 0x0007a4, 2, 0x01, 0x00000001 },
209 { 0x000831, 1, 0x01, 0x00000004 }, 210 { 0x000831, 1, 0x01, 0x00000004 },
210 { 0x000b07, 1, 0x01, 0x00000002 }, 211 { 0x000b07, 1, 0x01, 0x00000002 },
211 { 0x000b08, 2, 0x01, 0x00000100 }, 212 { 0x000b08, 2, 0x01, 0x00000100 },
@@ -231,14 +232,12 @@ nve4_grctx_init_icmd[] = {
231 { 0x0006b1, 1, 0x01, 0x00000011 }, 232 { 0x0006b1, 1, 0x01, 0x00000011 },
232 { 0x00078c, 1, 0x01, 0x00000008 }, 233 { 0x00078c, 1, 0x01, 0x00000008 },
233 { 0x000792, 1, 0x01, 0x00000001 }, 234 { 0x000792, 1, 0x01, 0x00000001 },
234 { 0x000794, 1, 0x01, 0x00000001 }, 235 { 0x000794, 3, 0x01, 0x00000001 },
235 { 0x000795, 2, 0x01, 0x00000001 },
236 { 0x000797, 1, 0x01, 0x000000cf }, 236 { 0x000797, 1, 0x01, 0x000000cf },
237 { 0x00079a, 1, 0x01, 0x00000002 }, 237 { 0x00079a, 1, 0x01, 0x00000002 },
238 { 0x000833, 1, 0x01, 0x04444480 }, 238 { 0x000833, 1, 0x01, 0x04444480 },
239 { 0x0007a1, 1, 0x01, 0x00000001 }, 239 { 0x0007a1, 1, 0x01, 0x00000001 },
240 { 0x0007a3, 1, 0x01, 0x00000001 }, 240 { 0x0007a3, 3, 0x01, 0x00000001 },
241 { 0x0007a4, 2, 0x01, 0x00000001 },
242 { 0x000831, 1, 0x01, 0x00000004 }, 241 { 0x000831, 1, 0x01, 0x00000004 },
243 { 0x01e100, 1, 0x01, 0x00000001 }, 242 { 0x01e100, 1, 0x01, 0x00000001 },
244 { 0x001000, 1, 0x01, 0x00000008 }, 243 { 0x001000, 1, 0x01, 0x00000008 },
@@ -273,8 +272,14 @@ nve4_grctx_init_icmd[] = {
273 {} 272 {}
274}; 273};
275 274
276struct nvc0_graph_init 275static const struct nvc0_graph_pack
277nve4_grctx_init_a097[] = { 276nve4_grctx_pack_icmd[] = {
277 { nve4_grctx_init_icmd_0 },
278 {}
279};
280
281static const struct nvc0_graph_init
282nve4_grctx_init_a097_0[] = {
278 { 0x000800, 8, 0x40, 0x00000000 }, 283 { 0x000800, 8, 0x40, 0x00000000 },
279 { 0x000804, 8, 0x40, 0x00000000 }, 284 { 0x000804, 8, 0x40, 0x00000000 },
280 { 0x000808, 8, 0x40, 0x00000400 }, 285 { 0x000808, 8, 0x40, 0x00000400 },
@@ -517,8 +522,7 @@ nve4_grctx_init_a097[] = {
517 { 0x001350, 1, 0x04, 0x00000002 }, 522 { 0x001350, 1, 0x04, 0x00000002 },
518 { 0x001358, 1, 0x04, 0x00000001 }, 523 { 0x001358, 1, 0x04, 0x00000001 },
519 { 0x0012e4, 1, 0x04, 0x00000000 }, 524 { 0x0012e4, 1, 0x04, 0x00000000 },
520 { 0x00131c, 1, 0x04, 0x00000000 }, 525 { 0x00131c, 4, 0x04, 0x00000000 },
521 { 0x001320, 3, 0x04, 0x00000000 },
522 { 0x0019c0, 1, 0x04, 0x00000000 }, 526 { 0x0019c0, 1, 0x04, 0x00000000 },
523 { 0x001140, 1, 0x04, 0x00000000 }, 527 { 0x001140, 1, 0x04, 0x00000000 },
524 { 0x0019c4, 1, 0x04, 0x00000000 }, 528 { 0x0019c4, 1, 0x04, 0x00000000 },
@@ -574,19 +578,24 @@ nve4_grctx_init_a097[] = {
574 {} 578 {}
575}; 579};
576 580
577static struct nvc0_graph_init 581static const struct nvc0_graph_pack
578nve4_grctx_init_unk40xx[] = { 582nve4_grctx_pack_mthd[] = {
583 { nve4_grctx_init_a097_0, 0xa097 },
584 { nvc0_grctx_init_902d_0, 0x902d },
585 {}
586};
587
588static const struct nvc0_graph_init
589nve4_grctx_init_fe_0[] = {
579 { 0x404010, 5, 0x04, 0x00000000 }, 590 { 0x404010, 5, 0x04, 0x00000000 },
580 { 0x404024, 1, 0x04, 0x0000e000 }, 591 { 0x404024, 1, 0x04, 0x0000e000 },
581 { 0x404028, 1, 0x04, 0x00000000 }, 592 { 0x404028, 1, 0x04, 0x00000000 },
582 { 0x4040a8, 1, 0x04, 0x00000000 }, 593 { 0x4040a8, 8, 0x04, 0x00000000 },
583 { 0x4040ac, 7, 0x04, 0x00000000 },
584 { 0x4040c8, 1, 0x04, 0xf800008f }, 594 { 0x4040c8, 1, 0x04, 0xf800008f },
585 { 0x4040d0, 6, 0x04, 0x00000000 }, 595 { 0x4040d0, 6, 0x04, 0x00000000 },
586 { 0x4040e8, 1, 0x04, 0x00001000 }, 596 { 0x4040e8, 1, 0x04, 0x00001000 },
587 { 0x4040f8, 1, 0x04, 0x00000000 }, 597 { 0x4040f8, 1, 0x04, 0x00000000 },
588 { 0x404130, 1, 0x04, 0x00000000 }, 598 { 0x404130, 2, 0x04, 0x00000000 },
589 { 0x404134, 1, 0x04, 0x00000000 },
590 { 0x404138, 1, 0x04, 0x20000040 }, 599 { 0x404138, 1, 0x04, 0x20000040 },
591 { 0x404150, 1, 0x04, 0x0000002e }, 600 { 0x404150, 1, 0x04, 0x0000002e },
592 { 0x404154, 1, 0x04, 0x00000400 }, 601 { 0x404154, 1, 0x04, 0x00000400 },
@@ -597,8 +606,8 @@ nve4_grctx_init_unk40xx[] = {
597 {} 606 {}
598}; 607};
599 608
600struct nvc0_graph_init 609const struct nvc0_graph_init
601nve4_grctx_init_unk46xx[] = { 610nve4_grctx_init_memfmt_0[] = {
602 { 0x404604, 1, 0x04, 0x00000014 }, 611 { 0x404604, 1, 0x04, 0x00000014 },
603 { 0x404608, 1, 0x04, 0x00000000 }, 612 { 0x404608, 1, 0x04, 0x00000000 },
604 { 0x40460c, 1, 0x04, 0x00003fff }, 613 { 0x40460c, 1, 0x04, 0x00003fff },
@@ -614,11 +623,6 @@ nve4_grctx_init_unk46xx[] = {
614 { 0x4046a0, 1, 0x04, 0x007f0080 }, 623 { 0x4046a0, 1, 0x04, 0x007f0080 },
615 { 0x4046a4, 8, 0x04, 0x00000000 }, 624 { 0x4046a4, 8, 0x04, 0x00000000 },
616 { 0x4046c8, 3, 0x04, 0x00000000 }, 625 { 0x4046c8, 3, 0x04, 0x00000000 },
617 {}
618};
619
620struct nvc0_graph_init
621nve4_grctx_init_unk47xx[] = {
622 { 0x404700, 3, 0x04, 0x00000000 }, 626 { 0x404700, 3, 0x04, 0x00000000 },
623 { 0x404718, 7, 0x04, 0x00000000 }, 627 { 0x404718, 7, 0x04, 0x00000000 },
624 { 0x404734, 1, 0x04, 0x00000100 }, 628 { 0x404734, 1, 0x04, 0x00000100 },
@@ -628,8 +632,8 @@ nve4_grctx_init_unk47xx[] = {
628 {} 632 {}
629}; 633};
630 634
631struct nvc0_graph_init 635const struct nvc0_graph_init
632nve4_grctx_init_unk58xx[] = { 636nve4_grctx_init_ds_0[] = {
633 { 0x405800, 1, 0x04, 0x0f8000bf }, 637 { 0x405800, 1, 0x04, 0x0f8000bf },
634 { 0x405830, 1, 0x04, 0x02180648 }, 638 { 0x405830, 1, 0x04, 0x02180648 },
635 { 0x405834, 1, 0x04, 0x08000000 }, 639 { 0x405834, 1, 0x04, 0x08000000 },
@@ -641,22 +645,17 @@ nve4_grctx_init_unk58xx[] = {
641 {} 645 {}
642}; 646};
643 647
644static struct nvc0_graph_init 648static const struct nvc0_graph_init
645nve4_grctx_init_unk5bxx[] = { 649nve4_grctx_init_cwd_0[] = {
646 { 0x405b00, 1, 0x04, 0x00000000 }, 650 { 0x405b00, 1, 0x04, 0x00000000 },
647 { 0x405b10, 1, 0x04, 0x00001000 }, 651 { 0x405b10, 1, 0x04, 0x00001000 },
648 {} 652 {}
649}; 653};
650 654
651static struct nvc0_graph_init 655static const struct nvc0_graph_init
652nve4_grctx_init_unk60xx[] = { 656nve4_grctx_init_pd_0[] = {
653 { 0x406020, 1, 0x04, 0x004103c1 }, 657 { 0x406020, 1, 0x04, 0x004103c1 },
654 { 0x406028, 4, 0x04, 0x00000001 }, 658 { 0x406028, 4, 0x04, 0x00000001 },
655 {}
656};
657
658static struct nvc0_graph_init
659nve4_grctx_init_unk64xx[] = {
660 { 0x4064a8, 1, 0x04, 0x00000000 }, 659 { 0x4064a8, 1, 0x04, 0x00000000 },
661 { 0x4064ac, 1, 0x04, 0x00003fff }, 660 { 0x4064ac, 1, 0x04, 0x00003fff },
662 { 0x4064b4, 2, 0x04, 0x00000000 }, 661 { 0x4064b4, 2, 0x04, 0x00000000 },
@@ -668,14 +667,14 @@ nve4_grctx_init_unk64xx[] = {
668 {} 667 {}
669}; 668};
670 669
671static struct nvc0_graph_init 670static const struct nvc0_graph_init
672nve4_grctx_init_unk70xx[] = { 671nve4_grctx_init_sked_0[] = {
673 { 0x407040, 1, 0x04, 0x00000000 }, 672 { 0x407040, 1, 0x04, 0x00000000 },
674 {} 673 {}
675}; 674};
676 675
677struct nvc0_graph_init 676const struct nvc0_graph_init
678nve4_grctx_init_unk80xx[] = { 677nve4_grctx_init_scc_0[] = {
679 { 0x408000, 2, 0x04, 0x00000000 }, 678 { 0x408000, 2, 0x04, 0x00000000 },
680 { 0x408008, 1, 0x04, 0x00000030 }, 679 { 0x408008, 1, 0x04, 0x00000030 },
681 { 0x40800c, 2, 0x04, 0x00000000 }, 680 { 0x40800c, 2, 0x04, 0x00000000 },
@@ -685,8 +684,8 @@ nve4_grctx_init_unk80xx[] = {
685 {} 684 {}
686}; 685};
687 686
688static struct nvc0_graph_init 687static const struct nvc0_graph_init
689nve4_grctx_init_rop[] = { 688nve4_grctx_init_be_0[] = {
690 { 0x408800, 1, 0x04, 0x02802a3c }, 689 { 0x408800, 1, 0x04, 0x02802a3c },
691 { 0x408804, 1, 0x04, 0x00000040 }, 690 { 0x408804, 1, 0x04, 0x00000040 },
692 { 0x408808, 1, 0x04, 0x1043e005 }, 691 { 0x408808, 1, 0x04, 0x1043e005 },
@@ -698,22 +697,24 @@ nve4_grctx_init_rop[] = {
698 {} 697 {}
699}; 698};
700 699
701static struct nvc0_graph_init 700static const struct nvc0_graph_pack
702nve4_grctx_init_gpc_0[] = { 701nve4_grctx_pack_hub[] = {
703 { 0x418380, 1, 0x04, 0x00000016 }, 702 { nvc0_grctx_init_main_0 },
704 { 0x418400, 1, 0x04, 0x38004e00 }, 703 { nve4_grctx_init_fe_0 },
705 { 0x418404, 1, 0x04, 0x71e0ffff }, 704 { nvc0_grctx_init_pri_0 },
706 { 0x41840c, 1, 0x04, 0x00001008 }, 705 { nve4_grctx_init_memfmt_0 },
707 { 0x418410, 1, 0x04, 0x0fff0fff }, 706 { nve4_grctx_init_ds_0 },
708 { 0x418414, 1, 0x04, 0x02200fff }, 707 { nve4_grctx_init_cwd_0 },
709 { 0x418450, 6, 0x04, 0x00000000 }, 708 { nve4_grctx_init_pd_0 },
710 { 0x418468, 1, 0x04, 0x00000001 }, 709 { nve4_grctx_init_sked_0 },
711 { 0x41846c, 2, 0x04, 0x00000000 }, 710 { nvc0_grctx_init_rstr2d_0 },
712 { 0x418600, 1, 0x04, 0x0000001f }, 711 { nve4_grctx_init_scc_0 },
713 { 0x418684, 1, 0x04, 0x0000000f }, 712 { nve4_grctx_init_be_0 },
714 { 0x418700, 1, 0x04, 0x00000002 }, 713 {}
715 { 0x418704, 1, 0x04, 0x00000080 }, 714};
716 { 0x418708, 3, 0x04, 0x00000000 }, 715
716static const struct nvc0_graph_init
717nve4_grctx_init_setup_0[] = {
717 { 0x418800, 1, 0x04, 0x7006860a }, 718 { 0x418800, 1, 0x04, 0x7006860a },
718 { 0x418808, 3, 0x04, 0x00000000 }, 719 { 0x418808, 3, 0x04, 0x00000000 },
719 { 0x418828, 1, 0x04, 0x00000044 }, 720 { 0x418828, 1, 0x04, 0x00000044 },
@@ -722,35 +723,35 @@ nve4_grctx_init_gpc_0[] = {
722 { 0x4188e0, 1, 0x04, 0x01000000 }, 723 { 0x4188e0, 1, 0x04, 0x01000000 },
723 { 0x4188e8, 5, 0x04, 0x00000000 }, 724 { 0x4188e8, 5, 0x04, 0x00000000 },
724 { 0x4188fc, 1, 0x04, 0x20100018 }, 725 { 0x4188fc, 1, 0x04, 0x20100018 },
725 { 0x41891c, 1, 0x04, 0x00ff00ff }, 726 {}
726 { 0x418924, 1, 0x04, 0x00000000 }, 727};
727 { 0x418928, 1, 0x04, 0x00ffff00 }, 728
728 { 0x41892c, 1, 0x04, 0x0000ff00 }, 729const struct nvc0_graph_init
729 { 0x418b00, 1, 0x04, 0x00000006 }, 730nve4_grctx_init_gpm_0[] = {
730 { 0x418b08, 1, 0x04, 0x0a418820 },
731 { 0x418b0c, 1, 0x04, 0x062080e6 },
732 { 0x418b10, 1, 0x04, 0x020398a4 },
733 { 0x418b14, 1, 0x04, 0x0e629062 },
734 { 0x418b18, 1, 0x04, 0x0a418820 },
735 { 0x418b1c, 1, 0x04, 0x000000e6 },
736 { 0x418bb8, 1, 0x04, 0x00000103 },
737 { 0x418c08, 1, 0x04, 0x00000001 }, 731 { 0x418c08, 1, 0x04, 0x00000001 },
738 { 0x418c10, 8, 0x04, 0x00000000 }, 732 { 0x418c10, 8, 0x04, 0x00000000 },
739 { 0x418c40, 1, 0x04, 0xffffffff }, 733 { 0x418c40, 1, 0x04, 0xffffffff },
740 { 0x418c6c, 1, 0x04, 0x00000001 }, 734 { 0x418c6c, 1, 0x04, 0x00000001 },
741 { 0x418c80, 1, 0x04, 0x20200004 }, 735 { 0x418c80, 1, 0x04, 0x20200004 },
742 { 0x418c8c, 1, 0x04, 0x00000001 }, 736 { 0x418c8c, 1, 0x04, 0x00000001 },
743 { 0x419000, 1, 0x04, 0x00000780 },
744 { 0x419004, 2, 0x04, 0x00000000 },
745 { 0x419014, 1, 0x04, 0x00000004 },
746 {} 737 {}
747}; 738};
748 739
749static struct nvc0_graph_init 740static const struct nvc0_graph_pack
750nve4_grctx_init_tpc[] = { 741nve4_grctx_pack_gpc[] = {
751 { 0x419848, 1, 0x04, 0x00000000 }, 742 { nvc0_grctx_init_gpc_unk_0 },
752 { 0x419864, 1, 0x04, 0x00000129 }, 743 { nvd9_grctx_init_prop_0 },
753 { 0x419888, 1, 0x04, 0x00000000 }, 744 { nvd9_grctx_init_gpc_unk_1 },
745 { nve4_grctx_init_setup_0 },
746 { nvc0_grctx_init_zcull_0 },
747 { nvd9_grctx_init_crstr_0 },
748 { nve4_grctx_init_gpm_0 },
749 { nvc0_grctx_init_gcc_0 },
750 {}
751};
752
753static const struct nvc0_graph_init
754nve4_grctx_init_tex_0[] = {
754 { 0x419a00, 1, 0x04, 0x000000f0 }, 755 { 0x419a00, 1, 0x04, 0x000000f0 },
755 { 0x419a04, 1, 0x04, 0x00000001 }, 756 { 0x419a04, 1, 0x04, 0x00000001 },
756 { 0x419a08, 1, 0x04, 0x00000021 }, 757 { 0x419a08, 1, 0x04, 0x00000021 },
@@ -761,14 +762,29 @@ nve4_grctx_init_tpc[] = {
761 { 0x419a20, 1, 0x04, 0x00000800 }, 762 { 0x419a20, 1, 0x04, 0x00000800 },
762 { 0x419a30, 1, 0x04, 0x00000001 }, 763 { 0x419a30, 1, 0x04, 0x00000001 },
763 { 0x419ac4, 1, 0x04, 0x0037f440 }, 764 { 0x419ac4, 1, 0x04, 0x0037f440 },
765 {}
766};
767
768static const struct nvc0_graph_init
769nve4_grctx_init_mpc_0[] = {
764 { 0x419c00, 1, 0x04, 0x0000000a }, 770 { 0x419c00, 1, 0x04, 0x0000000a },
765 { 0x419c04, 1, 0x04, 0x80000006 }, 771 { 0x419c04, 1, 0x04, 0x80000006 },
766 { 0x419c08, 1, 0x04, 0x00000002 }, 772 { 0x419c08, 1, 0x04, 0x00000002 },
767 { 0x419c20, 1, 0x04, 0x00000000 }, 773 { 0x419c20, 1, 0x04, 0x00000000 },
768 { 0x419c24, 1, 0x04, 0x00084210 }, 774 { 0x419c24, 1, 0x04, 0x00084210 },
769 { 0x419c28, 1, 0x04, 0x3efbefbe }, 775 { 0x419c28, 1, 0x04, 0x3efbefbe },
776 {}
777};
778
779static const struct nvc0_graph_init
780nve4_grctx_init_l1c_0[] = {
770 { 0x419ce8, 1, 0x04, 0x00000000 }, 781 { 0x419ce8, 1, 0x04, 0x00000000 },
771 { 0x419cf4, 1, 0x04, 0x00003203 }, 782 { 0x419cf4, 1, 0x04, 0x00003203 },
783 {}
784};
785
786static const struct nvc0_graph_init
787nve4_grctx_init_sm_0[] = {
772 { 0x419e04, 3, 0x04, 0x00000000 }, 788 { 0x419e04, 3, 0x04, 0x00000000 },
773 { 0x419e10, 1, 0x04, 0x00000402 }, 789 { 0x419e10, 1, 0x04, 0x00000402 },
774 { 0x419e44, 1, 0x04, 0x0013eff2 }, 790 { 0x419e44, 1, 0x04, 0x0013eff2 },
@@ -782,28 +798,46 @@ nve4_grctx_init_tpc[] = {
782 { 0x419f58, 1, 0x04, 0x00000000 }, 798 { 0x419f58, 1, 0x04, 0x00000000 },
783 { 0x419f70, 1, 0x04, 0x00000000 }, 799 { 0x419f70, 1, 0x04, 0x00000000 },
784 { 0x419f78, 1, 0x04, 0x0000000b }, 800 { 0x419f78, 1, 0x04, 0x0000000b },
785 { 0x419f7c, 1, 0x04, 0x0000027a }, 801 { 0x419f7c, 1, 0x04, 0x0000027c },
802 {}
803};
804
805static const struct nvc0_graph_pack
806nve4_grctx_pack_tpc[] = {
807 { nvd7_grctx_init_pe_0 },
808 { nve4_grctx_init_tex_0 },
809 { nve4_grctx_init_mpc_0 },
810 { nve4_grctx_init_l1c_0 },
811 { nve4_grctx_init_sm_0 },
786 {} 812 {}
787}; 813};
788 814
789static struct nvc0_graph_init 815const struct nvc0_graph_init
790nve4_grctx_init_unk[] = { 816nve4_grctx_init_pes_0[] = {
791 { 0x41be24, 1, 0x04, 0x00000006 }, 817 { 0x41be24, 1, 0x04, 0x00000006 },
818 {}
819};
820
821static const struct nvc0_graph_init
822nve4_grctx_init_cbm_0[] = {
792 { 0x41bec0, 1, 0x04, 0x12180000 }, 823 { 0x41bec0, 1, 0x04, 0x12180000 },
793 { 0x41bec4, 1, 0x04, 0x00037f7f }, 824 { 0x41bec4, 1, 0x04, 0x00037f7f },
794 { 0x41bee4, 1, 0x04, 0x06480430 }, 825 { 0x41bee4, 1, 0x04, 0x06480430 },
795 { 0x41bf00, 1, 0x04, 0x0a418820 },
796 { 0x41bf04, 1, 0x04, 0x062080e6 },
797 { 0x41bf08, 1, 0x04, 0x020398a4 },
798 { 0x41bf0c, 1, 0x04, 0x0e629062 },
799 { 0x41bf10, 1, 0x04, 0x0a418820 },
800 { 0x41bf14, 1, 0x04, 0x000000e6 },
801 { 0x41bfd0, 1, 0x04, 0x00900103 },
802 { 0x41bfe0, 1, 0x04, 0x00400001 },
803 { 0x41bfe4, 1, 0x04, 0x00000000 },
804 {} 826 {}
805}; 827};
806 828
829static const struct nvc0_graph_pack
830nve4_grctx_pack_ppc[] = {
831 { nve4_grctx_init_pes_0 },
832 { nve4_grctx_init_cbm_0 },
833 { nvd7_grctx_init_wwdx_0 },
834 {}
835};
836
837/*******************************************************************************
838 * PGRAPH context implementation
839 ******************************************************************************/
840
807static void 841static void
808nve4_grctx_generate_mods(struct nvc0_graph_priv *priv, struct nvc0_grctx *info) 842nve4_grctx_generate_mods(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
809{ 843{
@@ -925,10 +959,11 @@ nve4_grctx_generate_main(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
925 959
926 nv_mask(priv, 0x000260, 0x00000001, 0x00000000); 960 nv_mask(priv, 0x000260, 0x00000001, 0x00000000);
927 961
928 for (i = 0; oclass->hub[i]; i++) 962 nvc0_graph_mmio(priv, oclass->hub);
929 nvc0_graph_mmio(priv, oclass->hub[i]); 963 nvc0_graph_mmio(priv, oclass->gpc);
930 for (i = 0; oclass->gpc[i]; i++) 964 nvc0_graph_mmio(priv, oclass->zcull);
931 nvc0_graph_mmio(priv, oclass->gpc[i]); 965 nvc0_graph_mmio(priv, oclass->tpc);
966 nvc0_graph_mmio(priv, oclass->ppc);
932 967
933 nv_wr32(priv, 0x404154, 0x00000000); 968 nv_wr32(priv, 0x404154, 0x00000000);
934 969
@@ -962,41 +997,6 @@ nve4_grctx_generate_main(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
962 nv_mask(priv, 0x41be10, 0x00800000, 0x00800000); 997 nv_mask(priv, 0x41be10, 0x00800000, 0x00800000);
963} 998}
964 999
965static struct nvc0_graph_init *
966nve4_grctx_init_hub[] = {
967 nvc0_grctx_init_base,
968 nve4_grctx_init_unk40xx,
969 nvc0_grctx_init_unk44xx,
970 nve4_grctx_init_unk46xx,
971 nve4_grctx_init_unk47xx,
972 nve4_grctx_init_unk58xx,
973 nve4_grctx_init_unk5bxx,
974 nve4_grctx_init_unk60xx,
975 nve4_grctx_init_unk64xx,
976 nve4_grctx_init_unk70xx,
977 nvc0_grctx_init_unk78xx,
978 nve4_grctx_init_unk80xx,
979 nve4_grctx_init_rop,
980 NULL
981};
982
983struct nvc0_graph_init *
984nve4_grctx_init_gpc[] = {
985 nve4_grctx_init_gpc_0,
986 nvc0_grctx_init_gpc_1,
987 nve4_grctx_init_tpc,
988 nve4_grctx_init_unk,
989 NULL
990};
991
992static struct nvc0_graph_mthd
993nve4_grctx_init_mthd[] = {
994 { 0xa097, nve4_grctx_init_a097, },
995 { 0x902d, nvc0_grctx_init_902d, },
996 { 0x902d, nvc0_grctx_init_mthd_magic, },
997 {}
998};
999
1000struct nouveau_oclass * 1000struct nouveau_oclass *
1001nve4_grctx_oclass = &(struct nvc0_grctx_oclass) { 1001nve4_grctx_oclass = &(struct nvc0_grctx_oclass) {
1002 .base.handle = NV_ENGCTX(GR, 0xe4), 1002 .base.handle = NV_ENGCTX(GR, 0xe4),
@@ -1008,11 +1008,14 @@ nve4_grctx_oclass = &(struct nvc0_grctx_oclass) {
1008 .rd32 = _nouveau_graph_context_rd32, 1008 .rd32 = _nouveau_graph_context_rd32,
1009 .wr32 = _nouveau_graph_context_wr32, 1009 .wr32 = _nouveau_graph_context_wr32,
1010 }, 1010 },
1011 .main = nve4_grctx_generate_main, 1011 .main = nve4_grctx_generate_main,
1012 .mods = nve4_grctx_generate_mods, 1012 .mods = nve4_grctx_generate_mods,
1013 .unkn = nve4_grctx_generate_unkn, 1013 .unkn = nve4_grctx_generate_unkn,
1014 .hub = nve4_grctx_init_hub, 1014 .hub = nve4_grctx_pack_hub,
1015 .gpc = nve4_grctx_init_gpc, 1015 .gpc = nve4_grctx_pack_gpc,
1016 .icmd = nve4_grctx_init_icmd, 1016 .zcull = nvc0_grctx_pack_zcull,
1017 .mthd = nve4_grctx_init_mthd, 1017 .tpc = nve4_grctx_pack_tpc,
1018 .ppc = nve4_grctx_pack_ppc,
1019 .icmd = nve4_grctx_pack_icmd,
1020 .mthd = nve4_grctx_pack_mthd,
1018}.base; 1021}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvf0.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvf0.c
index 44012c3da538..0fab95e49f53 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvf0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvf0.c
@@ -22,10 +22,580 @@
22 * Authors: Ben Skeggs <bskeggs@redhat.com> 22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */ 23 */
24 24
25#include "nvc0.h" 25#include "ctxnvc0.h"
26 26
27static struct nvc0_graph_init 27/*******************************************************************************
28nvf0_grctx_init_unk40xx[] = { 28 * PGRAPH context register lists
29 ******************************************************************************/
30
31static const struct nvc0_graph_init
32nvf0_grctx_init_icmd_0[] = {
33 { 0x001000, 1, 0x01, 0x00000004 },
34 { 0x000039, 3, 0x01, 0x00000000 },
35 { 0x0000a9, 1, 0x01, 0x0000ffff },
36 { 0x000038, 1, 0x01, 0x0fac6881 },
37 { 0x00003d, 1, 0x01, 0x00000001 },
38 { 0x0000e8, 8, 0x01, 0x00000400 },
39 { 0x000078, 8, 0x01, 0x00000300 },
40 { 0x000050, 1, 0x01, 0x00000011 },
41 { 0x000058, 8, 0x01, 0x00000008 },
42 { 0x000208, 8, 0x01, 0x00000001 },
43 { 0x000081, 1, 0x01, 0x00000001 },
44 { 0x000085, 1, 0x01, 0x00000004 },
45 { 0x000088, 1, 0x01, 0x00000400 },
46 { 0x000090, 1, 0x01, 0x00000300 },
47 { 0x000098, 1, 0x01, 0x00001001 },
48 { 0x0000e3, 1, 0x01, 0x00000001 },
49 { 0x0000da, 1, 0x01, 0x00000001 },
50 { 0x0000f8, 1, 0x01, 0x00000003 },
51 { 0x0000fa, 1, 0x01, 0x00000001 },
52 { 0x00009f, 4, 0x01, 0x0000ffff },
53 { 0x0000b1, 1, 0x01, 0x00000001 },
54 { 0x0000ad, 1, 0x01, 0x0000013e },
55 { 0x0000e1, 1, 0x01, 0x00000010 },
56 { 0x000290, 16, 0x01, 0x00000000 },
57 { 0x0003b0, 16, 0x01, 0x00000000 },
58 { 0x0002a0, 16, 0x01, 0x00000000 },
59 { 0x000420, 16, 0x01, 0x00000000 },
60 { 0x0002b0, 16, 0x01, 0x00000000 },
61 { 0x000430, 16, 0x01, 0x00000000 },
62 { 0x0002c0, 16, 0x01, 0x00000000 },
63 { 0x0004d0, 16, 0x01, 0x00000000 },
64 { 0x000720, 16, 0x01, 0x00000000 },
65 { 0x0008c0, 16, 0x01, 0x00000000 },
66 { 0x000890, 16, 0x01, 0x00000000 },
67 { 0x0008e0, 16, 0x01, 0x00000000 },
68 { 0x0008a0, 16, 0x01, 0x00000000 },
69 { 0x0008f0, 16, 0x01, 0x00000000 },
70 { 0x00094c, 1, 0x01, 0x000000ff },
71 { 0x00094d, 1, 0x01, 0xffffffff },
72 { 0x00094e, 1, 0x01, 0x00000002 },
73 { 0x0002ec, 1, 0x01, 0x00000001 },
74 { 0x0002f2, 2, 0x01, 0x00000001 },
75 { 0x0002f5, 1, 0x01, 0x00000001 },
76 { 0x0002f7, 1, 0x01, 0x00000001 },
77 { 0x000303, 1, 0x01, 0x00000001 },
78 { 0x0002e6, 1, 0x01, 0x00000001 },
79 { 0x000466, 1, 0x01, 0x00000052 },
80 { 0x000301, 1, 0x01, 0x3f800000 },
81 { 0x000304, 1, 0x01, 0x30201000 },
82 { 0x000305, 1, 0x01, 0x70605040 },
83 { 0x000306, 1, 0x01, 0xb8a89888 },
84 { 0x000307, 1, 0x01, 0xf8e8d8c8 },
85 { 0x00030a, 1, 0x01, 0x00ffff00 },
86 { 0x00030b, 1, 0x01, 0x0000001a },
87 { 0x00030c, 1, 0x01, 0x00000001 },
88 { 0x000318, 1, 0x01, 0x00000001 },
89 { 0x000340, 1, 0x01, 0x00000000 },
90 { 0x000375, 1, 0x01, 0x00000001 },
91 { 0x00037d, 1, 0x01, 0x00000006 },
92 { 0x0003a0, 1, 0x01, 0x00000002 },
93 { 0x0003aa, 1, 0x01, 0x00000001 },
94 { 0x0003a9, 1, 0x01, 0x00000001 },
95 { 0x000380, 1, 0x01, 0x00000001 },
96 { 0x000383, 1, 0x01, 0x00000011 },
97 { 0x000360, 1, 0x01, 0x00000040 },
98 { 0x000366, 2, 0x01, 0x00000000 },
99 { 0x000368, 1, 0x01, 0x00000fff },
100 { 0x000370, 2, 0x01, 0x00000000 },
101 { 0x000372, 1, 0x01, 0x000fffff },
102 { 0x00037a, 1, 0x01, 0x00000012 },
103 { 0x000619, 1, 0x01, 0x00000003 },
104 { 0x000811, 1, 0x01, 0x00000003 },
105 { 0x000812, 1, 0x01, 0x00000004 },
106 { 0x000813, 1, 0x01, 0x00000006 },
107 { 0x000814, 1, 0x01, 0x00000008 },
108 { 0x000815, 1, 0x01, 0x0000000b },
109 { 0x000800, 6, 0x01, 0x00000001 },
110 { 0x000632, 1, 0x01, 0x00000001 },
111 { 0x000633, 1, 0x01, 0x00000002 },
112 { 0x000634, 1, 0x01, 0x00000003 },
113 { 0x000635, 1, 0x01, 0x00000004 },
114 { 0x000654, 1, 0x01, 0x3f800000 },
115 { 0x000657, 1, 0x01, 0x3f800000 },
116 { 0x000655, 2, 0x01, 0x3f800000 },
117 { 0x0006cd, 1, 0x01, 0x3f800000 },
118 { 0x0007f5, 1, 0x01, 0x3f800000 },
119 { 0x0007dc, 1, 0x01, 0x39291909 },
120 { 0x0007dd, 1, 0x01, 0x79695949 },
121 { 0x0007de, 1, 0x01, 0xb9a99989 },
122 { 0x0007df, 1, 0x01, 0xf9e9d9c9 },
123 { 0x0007e8, 1, 0x01, 0x00003210 },
124 { 0x0007e9, 1, 0x01, 0x00007654 },
125 { 0x0007ea, 1, 0x01, 0x00000098 },
126 { 0x0007ec, 1, 0x01, 0x39291909 },
127 { 0x0007ed, 1, 0x01, 0x79695949 },
128 { 0x0007ee, 1, 0x01, 0xb9a99989 },
129 { 0x0007ef, 1, 0x01, 0xf9e9d9c9 },
130 { 0x0007f0, 1, 0x01, 0x00003210 },
131 { 0x0007f1, 1, 0x01, 0x00007654 },
132 { 0x0007f2, 1, 0x01, 0x00000098 },
133 { 0x0005a5, 1, 0x01, 0x00000001 },
134 { 0x000980, 128, 0x01, 0x00000000 },
135 { 0x000468, 1, 0x01, 0x00000004 },
136 { 0x00046c, 1, 0x01, 0x00000001 },
137 { 0x000470, 96, 0x01, 0x00000000 },
138 { 0x000510, 16, 0x01, 0x3f800000 },
139 { 0x000520, 1, 0x01, 0x000002b6 },
140 { 0x000529, 1, 0x01, 0x00000001 },
141 { 0x000530, 16, 0x01, 0xffff0000 },
142 { 0x000585, 1, 0x01, 0x0000003f },
143 { 0x000576, 1, 0x01, 0x00000003 },
144 { 0x00057b, 1, 0x01, 0x00000059 },
145 { 0x000586, 1, 0x01, 0x00000040 },
146 { 0x000582, 2, 0x01, 0x00000080 },
147 { 0x0005c2, 1, 0x01, 0x00000001 },
148 { 0x000638, 2, 0x01, 0x00000001 },
149 { 0x00063a, 1, 0x01, 0x00000002 },
150 { 0x00063b, 2, 0x01, 0x00000001 },
151 { 0x00063d, 1, 0x01, 0x00000002 },
152 { 0x00063e, 1, 0x01, 0x00000001 },
153 { 0x0008b8, 8, 0x01, 0x00000001 },
154 { 0x000900, 8, 0x01, 0x00000001 },
155 { 0x000908, 8, 0x01, 0x00000002 },
156 { 0x000910, 16, 0x01, 0x00000001 },
157 { 0x000920, 8, 0x01, 0x00000002 },
158 { 0x000928, 8, 0x01, 0x00000001 },
159 { 0x000662, 1, 0x01, 0x00000001 },
160 { 0x000648, 9, 0x01, 0x00000001 },
161 { 0x000658, 1, 0x01, 0x0000000f },
162 { 0x0007ff, 1, 0x01, 0x0000000a },
163 { 0x00066a, 1, 0x01, 0x40000000 },
164 { 0x00066b, 1, 0x01, 0x10000000 },
165 { 0x00066c, 2, 0x01, 0xffff0000 },
166 { 0x0007af, 2, 0x01, 0x00000008 },
167 { 0x0007f6, 1, 0x01, 0x00000001 },
168 { 0x00080b, 1, 0x01, 0x00000002 },
169 { 0x0006b2, 1, 0x01, 0x00000055 },
170 { 0x0007ad, 1, 0x01, 0x00000003 },
171 { 0x000937, 1, 0x01, 0x00000001 },
172 { 0x000971, 1, 0x01, 0x00000008 },
173 { 0x000972, 1, 0x01, 0x00000040 },
174 { 0x000973, 1, 0x01, 0x0000012c },
175 { 0x00097c, 1, 0x01, 0x00000040 },
176 { 0x000979, 1, 0x01, 0x00000003 },
177 { 0x000975, 1, 0x01, 0x00000020 },
178 { 0x000976, 1, 0x01, 0x00000001 },
179 { 0x000977, 1, 0x01, 0x00000020 },
180 { 0x000978, 1, 0x01, 0x00000001 },
181 { 0x000957, 1, 0x01, 0x00000003 },
182 { 0x00095e, 1, 0x01, 0x20164010 },
183 { 0x00095f, 1, 0x01, 0x00000020 },
184 { 0x000a0d, 1, 0x01, 0x00000006 },
185 { 0x00097d, 1, 0x01, 0x00000020 },
186 { 0x000683, 1, 0x01, 0x00000006 },
187 { 0x000685, 1, 0x01, 0x003fffff },
188 { 0x000687, 1, 0x01, 0x003fffff },
189 { 0x0006a0, 1, 0x01, 0x00000005 },
190 { 0x000840, 1, 0x01, 0x00400008 },
191 { 0x000841, 1, 0x01, 0x08000080 },
192 { 0x000842, 1, 0x01, 0x00400008 },
193 { 0x000843, 1, 0x01, 0x08000080 },
194 { 0x0006aa, 1, 0x01, 0x00000001 },
195 { 0x0006ab, 1, 0x01, 0x00000002 },
196 { 0x0006ac, 1, 0x01, 0x00000080 },
197 { 0x0006ad, 2, 0x01, 0x00000100 },
198 { 0x0006b1, 1, 0x01, 0x00000011 },
199 { 0x0006bb, 1, 0x01, 0x000000cf },
200 { 0x0006ce, 1, 0x01, 0x2a712488 },
201 { 0x000739, 1, 0x01, 0x4085c000 },
202 { 0x00073a, 1, 0x01, 0x00000080 },
203 { 0x000786, 1, 0x01, 0x80000100 },
204 { 0x00073c, 1, 0x01, 0x00010100 },
205 { 0x00073d, 1, 0x01, 0x02800000 },
206 { 0x000787, 1, 0x01, 0x000000cf },
207 { 0x00078c, 1, 0x01, 0x00000008 },
208 { 0x000792, 1, 0x01, 0x00000001 },
209 { 0x000794, 3, 0x01, 0x00000001 },
210 { 0x000797, 1, 0x01, 0x000000cf },
211 { 0x000836, 1, 0x01, 0x00000001 },
212 { 0x00079a, 1, 0x01, 0x00000002 },
213 { 0x000833, 1, 0x01, 0x04444480 },
214 { 0x0007a1, 1, 0x01, 0x00000001 },
215 { 0x0007a3, 3, 0x01, 0x00000001 },
216 { 0x000831, 1, 0x01, 0x00000004 },
217 { 0x000b07, 1, 0x01, 0x00000002 },
218 { 0x000b08, 2, 0x01, 0x00000100 },
219 { 0x000b0a, 1, 0x01, 0x00000001 },
220 { 0x000a04, 1, 0x01, 0x000000ff },
221 { 0x000a0b, 1, 0x01, 0x00000040 },
222 { 0x00097f, 1, 0x01, 0x00000100 },
223 { 0x000a02, 1, 0x01, 0x00000001 },
224 { 0x000809, 1, 0x01, 0x00000007 },
225 { 0x00c221, 1, 0x01, 0x00000040 },
226 { 0x00c1b0, 8, 0x01, 0x0000000f },
227 { 0x00c1b8, 1, 0x01, 0x0fac6881 },
228 { 0x00c1b9, 1, 0x01, 0x00fac688 },
229 { 0x00c401, 1, 0x01, 0x00000001 },
230 { 0x00c402, 1, 0x01, 0x00010001 },
231 { 0x00c403, 2, 0x01, 0x00000001 },
232 { 0x00c40e, 1, 0x01, 0x00000020 },
233 { 0x00c500, 1, 0x01, 0x00000003 },
234 { 0x01e100, 1, 0x01, 0x00000001 },
235 { 0x001000, 1, 0x01, 0x00000002 },
236 { 0x0006aa, 1, 0x01, 0x00000001 },
237 { 0x0006ad, 2, 0x01, 0x00000100 },
238 { 0x0006b1, 1, 0x01, 0x00000011 },
239 { 0x00078c, 1, 0x01, 0x00000008 },
240 { 0x000792, 1, 0x01, 0x00000001 },
241 { 0x000794, 3, 0x01, 0x00000001 },
242 { 0x000797, 1, 0x01, 0x000000cf },
243 { 0x00079a, 1, 0x01, 0x00000002 },
244 { 0x000833, 1, 0x01, 0x04444480 },
245 { 0x0007a1, 1, 0x01, 0x00000001 },
246 { 0x0007a3, 3, 0x01, 0x00000001 },
247 { 0x000831, 1, 0x01, 0x00000004 },
248 { 0x01e100, 1, 0x01, 0x00000001 },
249 { 0x001000, 1, 0x01, 0x00000008 },
250 { 0x000039, 3, 0x01, 0x00000000 },
251 { 0x000380, 1, 0x01, 0x00000001 },
252 { 0x000366, 2, 0x01, 0x00000000 },
253 { 0x000368, 1, 0x01, 0x00000fff },
254 { 0x000370, 2, 0x01, 0x00000000 },
255 { 0x000372, 1, 0x01, 0x000fffff },
256 { 0x000813, 1, 0x01, 0x00000006 },
257 { 0x000814, 1, 0x01, 0x00000008 },
258 { 0x000957, 1, 0x01, 0x00000003 },
259 { 0x000b07, 1, 0x01, 0x00000002 },
260 { 0x000b08, 2, 0x01, 0x00000100 },
261 { 0x000b0a, 1, 0x01, 0x00000001 },
262 { 0x000a04, 1, 0x01, 0x000000ff },
263 { 0x000a0b, 1, 0x01, 0x00000040 },
264 { 0x00097f, 1, 0x01, 0x00000100 },
265 { 0x000a02, 1, 0x01, 0x00000001 },
266 { 0x000809, 1, 0x01, 0x00000007 },
267 { 0x00c221, 1, 0x01, 0x00000040 },
268 { 0x00c401, 1, 0x01, 0x00000001 },
269 { 0x00c402, 1, 0x01, 0x00010001 },
270 { 0x00c403, 2, 0x01, 0x00000001 },
271 { 0x00c40e, 1, 0x01, 0x00000020 },
272 { 0x00c500, 1, 0x01, 0x00000003 },
273 { 0x01e100, 1, 0x01, 0x00000001 },
274 { 0x001000, 1, 0x01, 0x00000001 },
275 { 0x000b07, 1, 0x01, 0x00000002 },
276 { 0x000b08, 2, 0x01, 0x00000100 },
277 { 0x000b0a, 1, 0x01, 0x00000001 },
278 { 0x01e100, 1, 0x01, 0x00000001 },
279 {}
280};
281
282static const struct nvc0_graph_pack
283nvf0_grctx_pack_icmd[] = {
284 { nvf0_grctx_init_icmd_0 },
285 {}
286};
287
288static const struct nvc0_graph_init
289nvf0_grctx_init_a197_0[] = {
290 { 0x000800, 8, 0x40, 0x00000000 },
291 { 0x000804, 8, 0x40, 0x00000000 },
292 { 0x000808, 8, 0x40, 0x00000400 },
293 { 0x00080c, 8, 0x40, 0x00000300 },
294 { 0x000810, 1, 0x04, 0x000000cf },
295 { 0x000850, 7, 0x40, 0x00000000 },
296 { 0x000814, 8, 0x40, 0x00000040 },
297 { 0x000818, 8, 0x40, 0x00000001 },
298 { 0x00081c, 8, 0x40, 0x00000000 },
299 { 0x000820, 8, 0x40, 0x00000000 },
300 { 0x001c00, 16, 0x10, 0x00000000 },
301 { 0x001c04, 16, 0x10, 0x00000000 },
302 { 0x001c08, 16, 0x10, 0x00000000 },
303 { 0x001c0c, 16, 0x10, 0x00000000 },
304 { 0x001d00, 16, 0x10, 0x00000000 },
305 { 0x001d04, 16, 0x10, 0x00000000 },
306 { 0x001d08, 16, 0x10, 0x00000000 },
307 { 0x001d0c, 16, 0x10, 0x00000000 },
308 { 0x001f00, 16, 0x08, 0x00000000 },
309 { 0x001f04, 16, 0x08, 0x00000000 },
310 { 0x001f80, 16, 0x08, 0x00000000 },
311 { 0x001f84, 16, 0x08, 0x00000000 },
312 { 0x002000, 1, 0x04, 0x00000000 },
313 { 0x002040, 1, 0x04, 0x00000011 },
314 { 0x002080, 1, 0x04, 0x00000020 },
315 { 0x0020c0, 1, 0x04, 0x00000030 },
316 { 0x002100, 1, 0x04, 0x00000040 },
317 { 0x002140, 1, 0x04, 0x00000051 },
318 { 0x00200c, 6, 0x40, 0x00000001 },
319 { 0x002010, 1, 0x04, 0x00000000 },
320 { 0x002050, 1, 0x04, 0x00000000 },
321 { 0x002090, 1, 0x04, 0x00000001 },
322 { 0x0020d0, 1, 0x04, 0x00000002 },
323 { 0x002110, 1, 0x04, 0x00000003 },
324 { 0x002150, 1, 0x04, 0x00000004 },
325 { 0x000380, 4, 0x20, 0x00000000 },
326 { 0x000384, 4, 0x20, 0x00000000 },
327 { 0x000388, 4, 0x20, 0x00000000 },
328 { 0x00038c, 4, 0x20, 0x00000000 },
329 { 0x000700, 4, 0x10, 0x00000000 },
330 { 0x000704, 4, 0x10, 0x00000000 },
331 { 0x000708, 4, 0x10, 0x00000000 },
332 { 0x002800, 128, 0x04, 0x00000000 },
333 { 0x000a00, 16, 0x20, 0x00000000 },
334 { 0x000a04, 16, 0x20, 0x00000000 },
335 { 0x000a08, 16, 0x20, 0x00000000 },
336 { 0x000a0c, 16, 0x20, 0x00000000 },
337 { 0x000a10, 16, 0x20, 0x00000000 },
338 { 0x000a14, 16, 0x20, 0x00000000 },
339 { 0x000c00, 16, 0x10, 0x00000000 },
340 { 0x000c04, 16, 0x10, 0x00000000 },
341 { 0x000c08, 16, 0x10, 0x00000000 },
342 { 0x000c0c, 16, 0x10, 0x3f800000 },
343 { 0x000d00, 8, 0x08, 0xffff0000 },
344 { 0x000d04, 8, 0x08, 0xffff0000 },
345 { 0x000e00, 16, 0x10, 0x00000000 },
346 { 0x000e04, 16, 0x10, 0xffff0000 },
347 { 0x000e08, 16, 0x10, 0xffff0000 },
348 { 0x000d40, 4, 0x08, 0x00000000 },
349 { 0x000d44, 4, 0x08, 0x00000000 },
350 { 0x001e00, 8, 0x20, 0x00000001 },
351 { 0x001e04, 8, 0x20, 0x00000001 },
352 { 0x001e08, 8, 0x20, 0x00000002 },
353 { 0x001e0c, 8, 0x20, 0x00000001 },
354 { 0x001e10, 8, 0x20, 0x00000001 },
355 { 0x001e14, 8, 0x20, 0x00000002 },
356 { 0x001e18, 8, 0x20, 0x00000001 },
357 { 0x003400, 128, 0x04, 0x00000000 },
358 { 0x00030c, 1, 0x04, 0x00000001 },
359 { 0x001944, 1, 0x04, 0x00000000 },
360 { 0x001514, 1, 0x04, 0x00000000 },
361 { 0x000d68, 1, 0x04, 0x0000ffff },
362 { 0x00121c, 1, 0x04, 0x0fac6881 },
363 { 0x000fac, 1, 0x04, 0x00000001 },
364 { 0x001538, 1, 0x04, 0x00000001 },
365 { 0x000fe0, 2, 0x04, 0x00000000 },
366 { 0x000fe8, 1, 0x04, 0x00000014 },
367 { 0x000fec, 1, 0x04, 0x00000040 },
368 { 0x000ff0, 1, 0x04, 0x00000000 },
369 { 0x00179c, 1, 0x04, 0x00000000 },
370 { 0x001228, 1, 0x04, 0x00000400 },
371 { 0x00122c, 1, 0x04, 0x00000300 },
372 { 0x001230, 1, 0x04, 0x00010001 },
373 { 0x0007f8, 1, 0x04, 0x00000000 },
374 { 0x0015b4, 1, 0x04, 0x00000001 },
375 { 0x0015cc, 1, 0x04, 0x00000000 },
376 { 0x001534, 1, 0x04, 0x00000000 },
377 { 0x000fb0, 1, 0x04, 0x00000000 },
378 { 0x0015d0, 1, 0x04, 0x00000000 },
379 { 0x00153c, 1, 0x04, 0x00000000 },
380 { 0x0016b4, 1, 0x04, 0x00000003 },
381 { 0x000fbc, 4, 0x04, 0x0000ffff },
382 { 0x000df8, 2, 0x04, 0x00000000 },
383 { 0x001948, 1, 0x04, 0x00000000 },
384 { 0x001970, 1, 0x04, 0x00000001 },
385 { 0x00161c, 1, 0x04, 0x000009f0 },
386 { 0x000dcc, 1, 0x04, 0x00000010 },
387 { 0x00163c, 1, 0x04, 0x00000000 },
388 { 0x0015e4, 1, 0x04, 0x00000000 },
389 { 0x001160, 32, 0x04, 0x25e00040 },
390 { 0x001880, 32, 0x04, 0x00000000 },
391 { 0x000f84, 2, 0x04, 0x00000000 },
392 { 0x0017c8, 2, 0x04, 0x00000000 },
393 { 0x0017d0, 1, 0x04, 0x000000ff },
394 { 0x0017d4, 1, 0x04, 0xffffffff },
395 { 0x0017d8, 1, 0x04, 0x00000002 },
396 { 0x0017dc, 1, 0x04, 0x00000000 },
397 { 0x0015f4, 2, 0x04, 0x00000000 },
398 { 0x001434, 2, 0x04, 0x00000000 },
399 { 0x000d74, 1, 0x04, 0x00000000 },
400 { 0x000dec, 1, 0x04, 0x00000001 },
401 { 0x0013a4, 1, 0x04, 0x00000000 },
402 { 0x001318, 1, 0x04, 0x00000001 },
403 { 0x001644, 1, 0x04, 0x00000000 },
404 { 0x000748, 1, 0x04, 0x00000000 },
405 { 0x000de8, 1, 0x04, 0x00000000 },
406 { 0x001648, 1, 0x04, 0x00000000 },
407 { 0x0012a4, 1, 0x04, 0x00000000 },
408 { 0x001120, 4, 0x04, 0x00000000 },
409 { 0x001118, 1, 0x04, 0x00000000 },
410 { 0x00164c, 1, 0x04, 0x00000000 },
411 { 0x001658, 1, 0x04, 0x00000000 },
412 { 0x001910, 1, 0x04, 0x00000290 },
413 { 0x001518, 1, 0x04, 0x00000000 },
414 { 0x00165c, 1, 0x04, 0x00000001 },
415 { 0x001520, 1, 0x04, 0x00000000 },
416 { 0x001604, 1, 0x04, 0x00000000 },
417 { 0x001570, 1, 0x04, 0x00000000 },
418 { 0x0013b0, 2, 0x04, 0x3f800000 },
419 { 0x00020c, 1, 0x04, 0x00000000 },
420 { 0x001670, 1, 0x04, 0x30201000 },
421 { 0x001674, 1, 0x04, 0x70605040 },
422 { 0x001678, 1, 0x04, 0xb8a89888 },
423 { 0x00167c, 1, 0x04, 0xf8e8d8c8 },
424 { 0x00166c, 1, 0x04, 0x00000000 },
425 { 0x001680, 1, 0x04, 0x00ffff00 },
426 { 0x0012d0, 1, 0x04, 0x00000003 },
427 { 0x0012d4, 1, 0x04, 0x00000002 },
428 { 0x001684, 2, 0x04, 0x00000000 },
429 { 0x000dac, 2, 0x04, 0x00001b02 },
430 { 0x000db4, 1, 0x04, 0x00000000 },
431 { 0x00168c, 1, 0x04, 0x00000000 },
432 { 0x0015bc, 1, 0x04, 0x00000000 },
433 { 0x00156c, 1, 0x04, 0x00000000 },
434 { 0x00187c, 1, 0x04, 0x00000000 },
435 { 0x001110, 1, 0x04, 0x00000001 },
436 { 0x000dc0, 3, 0x04, 0x00000000 },
437 { 0x001234, 1, 0x04, 0x00000000 },
438 { 0x001690, 1, 0x04, 0x00000000 },
439 { 0x0012ac, 1, 0x04, 0x00000001 },
440 { 0x0002c4, 1, 0x04, 0x00000000 },
441 { 0x000790, 5, 0x04, 0x00000000 },
442 { 0x00077c, 1, 0x04, 0x00000000 },
443 { 0x001000, 1, 0x04, 0x00000010 },
444 { 0x0010fc, 1, 0x04, 0x00000000 },
445 { 0x001290, 1, 0x04, 0x00000000 },
446 { 0x000218, 1, 0x04, 0x00000010 },
447 { 0x0012d8, 1, 0x04, 0x00000000 },
448 { 0x0012dc, 1, 0x04, 0x00000010 },
449 { 0x000d94, 1, 0x04, 0x00000001 },
450 { 0x00155c, 2, 0x04, 0x00000000 },
451 { 0x001564, 1, 0x04, 0x00000fff },
452 { 0x001574, 2, 0x04, 0x00000000 },
453 { 0x00157c, 1, 0x04, 0x000fffff },
454 { 0x001354, 1, 0x04, 0x00000000 },
455 { 0x001610, 1, 0x04, 0x00000012 },
456 { 0x001608, 2, 0x04, 0x00000000 },
457 { 0x00260c, 1, 0x04, 0x00000000 },
458 { 0x0007ac, 1, 0x04, 0x00000000 },
459 { 0x00162c, 1, 0x04, 0x00000003 },
460 { 0x000210, 1, 0x04, 0x00000000 },
461 { 0x000320, 1, 0x04, 0x00000000 },
462 { 0x000324, 6, 0x04, 0x3f800000 },
463 { 0x000750, 1, 0x04, 0x00000000 },
464 { 0x000760, 1, 0x04, 0x39291909 },
465 { 0x000764, 1, 0x04, 0x79695949 },
466 { 0x000768, 1, 0x04, 0xb9a99989 },
467 { 0x00076c, 1, 0x04, 0xf9e9d9c9 },
468 { 0x000770, 1, 0x04, 0x30201000 },
469 { 0x000774, 1, 0x04, 0x70605040 },
470 { 0x000778, 1, 0x04, 0x00009080 },
471 { 0x000780, 1, 0x04, 0x39291909 },
472 { 0x000784, 1, 0x04, 0x79695949 },
473 { 0x000788, 1, 0x04, 0xb9a99989 },
474 { 0x00078c, 1, 0x04, 0xf9e9d9c9 },
475 { 0x0007d0, 1, 0x04, 0x30201000 },
476 { 0x0007d4, 1, 0x04, 0x70605040 },
477 { 0x0007d8, 1, 0x04, 0x00009080 },
478 { 0x00037c, 1, 0x04, 0x00000001 },
479 { 0x000740, 2, 0x04, 0x00000000 },
480 { 0x002600, 1, 0x04, 0x00000000 },
481 { 0x001918, 1, 0x04, 0x00000000 },
482 { 0x00191c, 1, 0x04, 0x00000900 },
483 { 0x001920, 1, 0x04, 0x00000405 },
484 { 0x001308, 1, 0x04, 0x00000001 },
485 { 0x001924, 1, 0x04, 0x00000000 },
486 { 0x0013ac, 1, 0x04, 0x00000000 },
487 { 0x00192c, 1, 0x04, 0x00000001 },
488 { 0x00193c, 1, 0x04, 0x00002c1c },
489 { 0x000d7c, 1, 0x04, 0x00000000 },
490 { 0x000f8c, 1, 0x04, 0x00000000 },
491 { 0x0002c0, 1, 0x04, 0x00000001 },
492 { 0x001510, 1, 0x04, 0x00000000 },
493 { 0x001940, 1, 0x04, 0x00000000 },
494 { 0x000ff4, 2, 0x04, 0x00000000 },
495 { 0x00194c, 2, 0x04, 0x00000000 },
496 { 0x001968, 1, 0x04, 0x00000000 },
497 { 0x001590, 1, 0x04, 0x0000003f },
498 { 0x0007e8, 4, 0x04, 0x00000000 },
499 { 0x00196c, 1, 0x04, 0x00000011 },
500 { 0x0002e4, 1, 0x04, 0x0000b001 },
501 { 0x00036c, 2, 0x04, 0x00000000 },
502 { 0x00197c, 1, 0x04, 0x00000000 },
503 { 0x000fcc, 2, 0x04, 0x00000000 },
504 { 0x0002d8, 1, 0x04, 0x00000040 },
505 { 0x001980, 1, 0x04, 0x00000080 },
506 { 0x001504, 1, 0x04, 0x00000080 },
507 { 0x001984, 1, 0x04, 0x00000000 },
508 { 0x000300, 1, 0x04, 0x00000001 },
509 { 0x0013a8, 1, 0x04, 0x00000000 },
510 { 0x0012ec, 1, 0x04, 0x00000000 },
511 { 0x001310, 1, 0x04, 0x00000000 },
512 { 0x001314, 1, 0x04, 0x00000001 },
513 { 0x001380, 1, 0x04, 0x00000000 },
514 { 0x001384, 4, 0x04, 0x00000001 },
515 { 0x001394, 1, 0x04, 0x00000000 },
516 { 0x00139c, 1, 0x04, 0x00000000 },
517 { 0x001398, 1, 0x04, 0x00000000 },
518 { 0x001594, 1, 0x04, 0x00000000 },
519 { 0x001598, 4, 0x04, 0x00000001 },
520 { 0x000f54, 3, 0x04, 0x00000000 },
521 { 0x0019bc, 1, 0x04, 0x00000000 },
522 { 0x000f9c, 2, 0x04, 0x00000000 },
523 { 0x0012cc, 1, 0x04, 0x00000000 },
524 { 0x0012e8, 1, 0x04, 0x00000000 },
525 { 0x00130c, 1, 0x04, 0x00000001 },
526 { 0x001360, 8, 0x04, 0x00000000 },
527 { 0x00133c, 2, 0x04, 0x00000001 },
528 { 0x001344, 1, 0x04, 0x00000002 },
529 { 0x001348, 2, 0x04, 0x00000001 },
530 { 0x001350, 1, 0x04, 0x00000002 },
531 { 0x001358, 1, 0x04, 0x00000001 },
532 { 0x0012e4, 1, 0x04, 0x00000000 },
533 { 0x00131c, 4, 0x04, 0x00000000 },
534 { 0x0019c0, 1, 0x04, 0x00000000 },
535 { 0x001140, 1, 0x04, 0x00000000 },
536 { 0x0019c4, 1, 0x04, 0x00000000 },
537 { 0x0019c8, 1, 0x04, 0x00001500 },
538 { 0x00135c, 1, 0x04, 0x00000000 },
539 { 0x000f90, 1, 0x04, 0x00000000 },
540 { 0x0019e0, 8, 0x04, 0x00000001 },
541 { 0x0019cc, 1, 0x04, 0x00000001 },
542 { 0x0015b8, 1, 0x04, 0x00000000 },
543 { 0x001a00, 1, 0x04, 0x00001111 },
544 { 0x001a04, 7, 0x04, 0x00000000 },
545 { 0x000d6c, 2, 0x04, 0xffff0000 },
546 { 0x0010f8, 1, 0x04, 0x00001010 },
547 { 0x000d80, 5, 0x04, 0x00000000 },
548 { 0x000da0, 1, 0x04, 0x00000000 },
549 { 0x0007a4, 2, 0x04, 0x00000000 },
550 { 0x001508, 1, 0x04, 0x80000000 },
551 { 0x00150c, 1, 0x04, 0x40000000 },
552 { 0x001668, 1, 0x04, 0x00000000 },
553 { 0x000318, 2, 0x04, 0x00000008 },
554 { 0x000d9c, 1, 0x04, 0x00000001 },
555 { 0x000ddc, 1, 0x04, 0x00000002 },
556 { 0x000374, 1, 0x04, 0x00000000 },
557 { 0x000378, 1, 0x04, 0x00000020 },
558 { 0x0007dc, 1, 0x04, 0x00000000 },
559 { 0x00074c, 1, 0x04, 0x00000055 },
560 { 0x001420, 1, 0x04, 0x00000003 },
561 { 0x0017bc, 2, 0x04, 0x00000000 },
562 { 0x0017c4, 1, 0x04, 0x00000001 },
563 { 0x001008, 1, 0x04, 0x00000008 },
564 { 0x00100c, 1, 0x04, 0x00000040 },
565 { 0x001010, 1, 0x04, 0x0000012c },
566 { 0x000d60, 1, 0x04, 0x00000040 },
567 { 0x00075c, 1, 0x04, 0x00000003 },
568 { 0x001018, 1, 0x04, 0x00000020 },
569 { 0x00101c, 1, 0x04, 0x00000001 },
570 { 0x001020, 1, 0x04, 0x00000020 },
571 { 0x001024, 1, 0x04, 0x00000001 },
572 { 0x001444, 3, 0x04, 0x00000000 },
573 { 0x000360, 1, 0x04, 0x20164010 },
574 { 0x000364, 1, 0x04, 0x00000020 },
575 { 0x000368, 1, 0x04, 0x00000000 },
576 { 0x000de4, 1, 0x04, 0x00000000 },
577 { 0x000204, 1, 0x04, 0x00000006 },
578 { 0x000208, 1, 0x04, 0x00000000 },
579 { 0x0002cc, 2, 0x04, 0x003fffff },
580 { 0x001220, 1, 0x04, 0x00000005 },
581 { 0x000fdc, 1, 0x04, 0x00000000 },
582 { 0x000f98, 1, 0x04, 0x00400008 },
583 { 0x001284, 1, 0x04, 0x08000080 },
584 { 0x001450, 1, 0x04, 0x00400008 },
585 { 0x001454, 1, 0x04, 0x08000080 },
586 { 0x000214, 1, 0x04, 0x00000000 },
587 {}
588};
589
590const struct nvc0_graph_pack
591nvf0_grctx_pack_mthd[] = {
592 { nvf0_grctx_init_a197_0, 0xa197 },
593 { nvc0_grctx_init_902d_0, 0x902d },
594 {}
595};
596
597static const struct nvc0_graph_init
598nvf0_grctx_init_fe_0[] = {
29 { 0x404004, 8, 0x04, 0x00000000 }, 599 { 0x404004, 8, 0x04, 0x00000000 },
30 { 0x404024, 1, 0x04, 0x0000e000 }, 600 { 0x404024, 1, 0x04, 0x0000e000 },
31 { 0x404028, 8, 0x04, 0x00000000 }, 601 { 0x404028, 8, 0x04, 0x00000000 },
@@ -50,8 +620,8 @@ nvf0_grctx_init_unk40xx[] = {
50 {} 620 {}
51}; 621};
52 622
53struct nvc0_graph_init 623const struct nvc0_graph_init
54nvf0_grctx_init_unk44xx[] = { 624nvf0_grctx_init_pri_0[] = {
55 { 0x404404, 12, 0x04, 0x00000000 }, 625 { 0x404404, 12, 0x04, 0x00000000 },
56 { 0x404438, 1, 0x04, 0x00000000 }, 626 { 0x404438, 1, 0x04, 0x00000000 },
57 { 0x404460, 2, 0x04, 0x00000000 }, 627 { 0x404460, 2, 0x04, 0x00000000 },
@@ -62,23 +632,18 @@ nvf0_grctx_init_unk44xx[] = {
62 {} 632 {}
63}; 633};
64 634
65struct nvc0_graph_init 635const struct nvc0_graph_init
66nvf0_grctx_init_unk5bxx[] = { 636nvf0_grctx_init_cwd_0[] = {
67 { 0x405b00, 1, 0x04, 0x00000000 }, 637 { 0x405b00, 1, 0x04, 0x00000000 },
68 { 0x405b10, 1, 0x04, 0x00001000 }, 638 { 0x405b10, 1, 0x04, 0x00001000 },
69 { 0x405b20, 1, 0x04, 0x04000000 }, 639 { 0x405b20, 1, 0x04, 0x04000000 },
70 {} 640 {}
71}; 641};
72 642
73struct nvc0_graph_init 643static const struct nvc0_graph_init
74nvf0_grctx_init_unk60xx[] = { 644nvf0_grctx_init_pd_0[] = {
75 { 0x406020, 1, 0x04, 0x034103c1 }, 645 { 0x406020, 1, 0x04, 0x034103c1 },
76 { 0x406028, 4, 0x04, 0x00000001 }, 646 { 0x406028, 4, 0x04, 0x00000001 },
77 {}
78};
79
80static struct nvc0_graph_init
81nvf0_grctx_init_unk64xx[] = {
82 { 0x4064a8, 1, 0x04, 0x00000000 }, 647 { 0x4064a8, 1, 0x04, 0x00000000 },
83 { 0x4064ac, 1, 0x04, 0x00003fff }, 648 { 0x4064ac, 1, 0x04, 0x00003fff },
84 { 0x4064b0, 3, 0x04, 0x00000000 }, 649 { 0x4064b0, 3, 0x04, 0x00000000 },
@@ -90,8 +655,8 @@ nvf0_grctx_init_unk64xx[] = {
90 {} 655 {}
91}; 656};
92 657
93static struct nvc0_graph_init 658static const struct nvc0_graph_init
94nvf0_grctx_init_unk88xx[] = { 659nvf0_grctx_init_be_0[] = {
95 { 0x408800, 1, 0x04, 0x12802a3c }, 660 { 0x408800, 1, 0x04, 0x12802a3c },
96 { 0x408804, 1, 0x04, 0x00000040 }, 661 { 0x408804, 1, 0x04, 0x00000040 },
97 { 0x408808, 1, 0x04, 0x1003e005 }, 662 { 0x408808, 1, 0x04, 0x1003e005 },
@@ -103,22 +668,23 @@ nvf0_grctx_init_unk88xx[] = {
103 {} 668 {}
104}; 669};
105 670
106static struct nvc0_graph_init 671static const struct nvc0_graph_pack
107nvf0_grctx_init_gpc_0[] = { 672nvf0_grctx_pack_hub[] = {
108 { 0x418380, 1, 0x04, 0x00000016 }, 673 { nvc0_grctx_init_main_0 },
109 { 0x418400, 1, 0x04, 0x38004e00 }, 674 { nvf0_grctx_init_fe_0 },
110 { 0x418404, 1, 0x04, 0x71e0ffff }, 675 { nvf0_grctx_init_pri_0 },
111 { 0x41840c, 1, 0x04, 0x00001008 }, 676 { nve4_grctx_init_memfmt_0 },
112 { 0x418410, 1, 0x04, 0x0fff0fff }, 677 { nve4_grctx_init_ds_0 },
113 { 0x418414, 1, 0x04, 0x02200fff }, 678 { nvf0_grctx_init_cwd_0 },
114 { 0x418450, 6, 0x04, 0x00000000 }, 679 { nvf0_grctx_init_pd_0 },
115 { 0x418468, 1, 0x04, 0x00000001 }, 680 { nvc0_grctx_init_rstr2d_0 },
116 { 0x41846c, 2, 0x04, 0x00000000 }, 681 { nve4_grctx_init_scc_0 },
117 { 0x418600, 1, 0x04, 0x0000001f }, 682 { nvf0_grctx_init_be_0 },
118 { 0x418684, 1, 0x04, 0x0000000f }, 683 {}
119 { 0x418700, 1, 0x04, 0x00000002 }, 684};
120 { 0x418704, 1, 0x04, 0x00000080 }, 685
121 { 0x418708, 3, 0x04, 0x00000000 }, 686static const struct nvc0_graph_init
687nvf0_grctx_init_setup_0[] = {
122 { 0x418800, 1, 0x04, 0x7006860a }, 688 { 0x418800, 1, 0x04, 0x7006860a },
123 { 0x418808, 1, 0x04, 0x00000000 }, 689 { 0x418808, 1, 0x04, 0x00000000 },
124 { 0x41880c, 1, 0x04, 0x00000030 }, 690 { 0x41880c, 1, 0x04, 0x00000030 },
@@ -129,36 +695,31 @@ nvf0_grctx_init_gpc_0[] = {
129 { 0x4188e0, 1, 0x04, 0x01000000 }, 695 { 0x4188e0, 1, 0x04, 0x01000000 },
130 { 0x4188e8, 5, 0x04, 0x00000000 }, 696 { 0x4188e8, 5, 0x04, 0x00000000 },
131 { 0x4188fc, 1, 0x04, 0x20100018 }, 697 { 0x4188fc, 1, 0x04, 0x20100018 },
132 { 0x41891c, 1, 0x04, 0x00ff00ff }, 698 {}
133 { 0x418924, 1, 0x04, 0x00000000 }, 699};
134 { 0x418928, 1, 0x04, 0x00ffff00 }, 700
135 { 0x41892c, 1, 0x04, 0x0000ff00 }, 701const struct nvc0_graph_init
136 { 0x418b00, 1, 0x04, 0x00000006 }, 702nvf0_grctx_init_gpc_unk_2[] = {
137 { 0x418b08, 1, 0x04, 0x0a418820 },
138 { 0x418b0c, 1, 0x04, 0x062080e6 },
139 { 0x418b10, 1, 0x04, 0x020398a4 },
140 { 0x418b14, 1, 0x04, 0x0e629062 },
141 { 0x418b18, 1, 0x04, 0x0a418820 },
142 { 0x418b1c, 1, 0x04, 0x000000e6 },
143 { 0x418bb8, 1, 0x04, 0x00000103 },
144 { 0x418c08, 1, 0x04, 0x00000001 },
145 { 0x418c10, 8, 0x04, 0x00000000 },
146 { 0x418c40, 1, 0x04, 0xffffffff },
147 { 0x418c6c, 1, 0x04, 0x00000001 },
148 { 0x418c80, 1, 0x04, 0x20200004 },
149 { 0x418c8c, 1, 0x04, 0x00000001 },
150 { 0x418d24, 1, 0x04, 0x00000000 }, 703 { 0x418d24, 1, 0x04, 0x00000000 },
151 { 0x419000, 1, 0x04, 0x00000780 },
152 { 0x419004, 2, 0x04, 0x00000000 },
153 { 0x419014, 1, 0x04, 0x00000004 },
154 {} 704 {}
155}; 705};
156 706
157static struct nvc0_graph_init 707static const struct nvc0_graph_pack
158nvf0_grctx_init_tpc[] = { 708nvf0_grctx_pack_gpc[] = {
159 { 0x419848, 1, 0x04, 0x00000000 }, 709 { nvc0_grctx_init_gpc_unk_0 },
160 { 0x419864, 1, 0x04, 0x00000129 }, 710 { nvd9_grctx_init_prop_0 },
161 { 0x419888, 1, 0x04, 0x00000000 }, 711 { nvd9_grctx_init_gpc_unk_1 },
712 { nvf0_grctx_init_setup_0 },
713 { nvc0_grctx_init_zcull_0 },
714 { nvd9_grctx_init_crstr_0 },
715 { nve4_grctx_init_gpm_0 },
716 { nvf0_grctx_init_gpc_unk_2 },
717 { nvc0_grctx_init_gcc_0 },
718 {}
719};
720
721static const struct nvc0_graph_init
722nvf0_grctx_init_tex_0[] = {
162 { 0x419a00, 1, 0x04, 0x000000f0 }, 723 { 0x419a00, 1, 0x04, 0x000000f0 },
163 { 0x419a04, 1, 0x04, 0x00000001 }, 724 { 0x419a04, 1, 0x04, 0x00000001 },
164 { 0x419a08, 1, 0x04, 0x00000021 }, 725 { 0x419a08, 1, 0x04, 0x00000021 },
@@ -169,14 +730,29 @@ nvf0_grctx_init_tpc[] = {
169 { 0x419a20, 1, 0x04, 0x00020800 }, 730 { 0x419a20, 1, 0x04, 0x00020800 },
170 { 0x419a30, 1, 0x04, 0x00000001 }, 731 { 0x419a30, 1, 0x04, 0x00000001 },
171 { 0x419ac4, 1, 0x04, 0x0037f440 }, 732 { 0x419ac4, 1, 0x04, 0x0037f440 },
733 {}
734};
735
736const struct nvc0_graph_init
737nvf0_grctx_init_mpc_0[] = {
172 { 0x419c00, 1, 0x04, 0x0000001a }, 738 { 0x419c00, 1, 0x04, 0x0000001a },
173 { 0x419c04, 1, 0x04, 0x80000006 }, 739 { 0x419c04, 1, 0x04, 0x80000006 },
174 { 0x419c08, 1, 0x04, 0x00000002 }, 740 { 0x419c08, 1, 0x04, 0x00000002 },
175 { 0x419c20, 1, 0x04, 0x00000000 }, 741 { 0x419c20, 1, 0x04, 0x00000000 },
176 { 0x419c24, 1, 0x04, 0x00084210 }, 742 { 0x419c24, 1, 0x04, 0x00084210 },
177 { 0x419c28, 1, 0x04, 0x3efbefbe }, 743 { 0x419c28, 1, 0x04, 0x3efbefbe },
744 {}
745};
746
747const struct nvc0_graph_init
748nvf0_grctx_init_l1c_0[] = {
178 { 0x419ce8, 1, 0x04, 0x00000000 }, 749 { 0x419ce8, 1, 0x04, 0x00000000 },
179 { 0x419cf4, 1, 0x04, 0x00000203 }, 750 { 0x419cf4, 1, 0x04, 0x00000203 },
751 {}
752};
753
754static const struct nvc0_graph_init
755nvf0_grctx_init_sm_0[] = {
180 { 0x419e04, 1, 0x04, 0x00000000 }, 756 { 0x419e04, 1, 0x04, 0x00000000 },
181 { 0x419e08, 1, 0x04, 0x0000001d }, 757 { 0x419e08, 1, 0x04, 0x0000001d },
182 { 0x419e0c, 1, 0x04, 0x00000000 }, 758 { 0x419e0c, 1, 0x04, 0x00000000 },
@@ -189,8 +765,8 @@ nvf0_grctx_init_tpc[] = {
189 { 0x419e5c, 3, 0x04, 0x00000000 }, 765 { 0x419e5c, 3, 0x04, 0x00000000 },
190 { 0x419e68, 1, 0x04, 0x00000002 }, 766 { 0x419e68, 1, 0x04, 0x00000002 },
191 { 0x419e6c, 12, 0x04, 0x00000000 }, 767 { 0x419e6c, 12, 0x04, 0x00000000 },
192 { 0x419eac, 1, 0x04, 0x00001fcf }, 768 { 0x419eac, 1, 0x04, 0x00001f8f },
193 { 0x419eb0, 1, 0x04, 0x0db00da0 }, 769 { 0x419eb0, 1, 0x04, 0x0db00d2f },
194 { 0x419eb8, 1, 0x04, 0x00000000 }, 770 { 0x419eb8, 1, 0x04, 0x00000000 },
195 { 0x419ec8, 1, 0x04, 0x0001304f }, 771 { 0x419ec8, 1, 0x04, 0x0001304f },
196 { 0x419f30, 4, 0x04, 0x00000000 }, 772 { 0x419f30, 4, 0x04, 0x00000000 },
@@ -203,24 +779,36 @@ nvf0_grctx_init_tpc[] = {
203 {} 779 {}
204}; 780};
205 781
206static struct nvc0_graph_init 782static const struct nvc0_graph_pack
207nvf0_grctx_init_unk[] = { 783nvf0_grctx_pack_tpc[] = {
208 { 0x41be24, 1, 0x04, 0x00000006 }, 784 { nvd7_grctx_init_pe_0 },
785 { nvf0_grctx_init_tex_0 },
786 { nvf0_grctx_init_mpc_0 },
787 { nvf0_grctx_init_l1c_0 },
788 { nvf0_grctx_init_sm_0 },
789 {}
790};
791
792static const struct nvc0_graph_init
793nvf0_grctx_init_cbm_0[] = {
209 { 0x41bec0, 1, 0x04, 0x10000000 }, 794 { 0x41bec0, 1, 0x04, 0x10000000 },
210 { 0x41bec4, 1, 0x04, 0x00037f7f }, 795 { 0x41bec4, 1, 0x04, 0x00037f7f },
211 { 0x41bee4, 1, 0x04, 0x00000000 }, 796 { 0x41bee4, 1, 0x04, 0x00000000 },
212 { 0x41bf00, 1, 0x04, 0x0a418820 },
213 { 0x41bf04, 1, 0x04, 0x062080e6 },
214 { 0x41bf08, 1, 0x04, 0x020398a4 },
215 { 0x41bf0c, 1, 0x04, 0x0e629062 },
216 { 0x41bf10, 1, 0x04, 0x0a418820 },
217 { 0x41bf14, 1, 0x04, 0x000000e6 },
218 { 0x41bfd0, 1, 0x04, 0x00900103 },
219 { 0x41bfe0, 1, 0x04, 0x00400001 },
220 { 0x41bfe4, 1, 0x04, 0x00000000 },
221 {} 797 {}
222}; 798};
223 799
800static const struct nvc0_graph_pack
801nvf0_grctx_pack_ppc[] = {
802 { nve4_grctx_init_pes_0 },
803 { nvf0_grctx_init_cbm_0 },
804 { nvd7_grctx_init_wwdx_0 },
805 {}
806};
807
808/*******************************************************************************
809 * PGRAPH context implementation
810 ******************************************************************************/
811
224static void 812static void
225nvf0_grctx_generate_mods(struct nvc0_graph_priv *priv, struct nvc0_grctx *info) 813nvf0_grctx_generate_mods(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
226{ 814{
@@ -273,39 +861,6 @@ nvf0_grctx_generate_mods(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
273 mmio_list(0x17e920, 0x00090a05, 0, 0); 861 mmio_list(0x17e920, 0x00090a05, 0, 0);
274} 862}
275 863
276static struct nvc0_graph_init *
277nvf0_grctx_init_hub[] = {
278 nvc0_grctx_init_base,
279 nvf0_grctx_init_unk40xx,
280 nvf0_grctx_init_unk44xx,
281 nve4_grctx_init_unk46xx,
282 nve4_grctx_init_unk47xx,
283 nve4_grctx_init_unk58xx,
284 nvf0_grctx_init_unk5bxx,
285 nvf0_grctx_init_unk60xx,
286 nvf0_grctx_init_unk64xx,
287 nve4_grctx_init_unk80xx,
288 nvf0_grctx_init_unk88xx,
289 NULL
290};
291
292struct nvc0_graph_init *
293nvf0_grctx_init_gpc[] = {
294 nvf0_grctx_init_gpc_0,
295 nvc0_grctx_init_gpc_1,
296 nvf0_grctx_init_tpc,
297 nvf0_grctx_init_unk,
298 NULL
299};
300
301static struct nvc0_graph_mthd
302nvf0_grctx_init_mthd[] = {
303 { 0xa197, nvc1_grctx_init_9097, },
304 { 0x902d, nvc0_grctx_init_902d, },
305 { 0x902d, nvc0_grctx_init_mthd_magic, },
306 {}
307};
308
309struct nouveau_oclass * 864struct nouveau_oclass *
310nvf0_grctx_oclass = &(struct nvc0_grctx_oclass) { 865nvf0_grctx_oclass = &(struct nvc0_grctx_oclass) {
311 .base.handle = NV_ENGCTX(GR, 0xf0), 866 .base.handle = NV_ENGCTX(GR, 0xf0),
@@ -317,11 +872,14 @@ nvf0_grctx_oclass = &(struct nvc0_grctx_oclass) {
317 .rd32 = _nouveau_graph_context_rd32, 872 .rd32 = _nouveau_graph_context_rd32,
318 .wr32 = _nouveau_graph_context_wr32, 873 .wr32 = _nouveau_graph_context_wr32,
319 }, 874 },
320 .main = nve4_grctx_generate_main, 875 .main = nve4_grctx_generate_main,
321 .mods = nvf0_grctx_generate_mods, 876 .mods = nvf0_grctx_generate_mods,
322 .unkn = nve4_grctx_generate_unkn, 877 .unkn = nve4_grctx_generate_unkn,
323 .hub = nvf0_grctx_init_hub, 878 .hub = nvf0_grctx_pack_hub,
324 .gpc = nvf0_grctx_init_gpc, 879 .gpc = nvf0_grctx_pack_gpc,
325 .icmd = nvc0_grctx_init_icmd, 880 .zcull = nvc0_grctx_pack_zcull,
326 .mthd = nvf0_grctx_init_mthd, 881 .tpc = nvf0_grctx_pack_tpc,
882 .ppc = nvf0_grctx_pack_ppc,
883 .icmd = nvf0_grctx_pack_icmd,
884 .mthd = nvf0_grctx_pack_mthd,
327}.base; 885}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/com.fuc b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/com.fuc
index e148961b8075..e37d8106ae1a 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/com.fuc
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/com.fuc
@@ -228,7 +228,7 @@ mmctx_xfer:
228 and $r11 0x1f 228 and $r11 0x1f
229 cmpu b32 $r11 0x10 229 cmpu b32 $r11 0x10
230 bra ne #mmctx_fini_wait 230 bra ne #mmctx_fini_wait
231 mov $r10 2 // DONE_MMCTX 231 mov $r10 5 // DONE_MMCTX
232 call(wait_donez) 232 call(wait_donez)
233 bra #mmctx_done 233 bra #mmctx_done
234 mmctx_stop: 234 mmctx_stop:
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpc.fuc b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpc.fuc
index 96cbcea3b2c9..2f7345f7fe07 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpc.fuc
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpc.fuc
@@ -78,7 +78,12 @@ error:
78// 78//
79init: 79init:
80 clear b32 $r0 80 clear b32 $r0
81 mov $sp $r0 81
82 // setup stack
83 nv_iord($r1, NV_PGRAPH_GPCX_GPCCS_CAPS, 0)
84 extr $r1 $r1 9:17
85 shl b32 $r1 8
86 mov $sp $r1
82 87
83 // enable fifo access 88 // enable fifo access
84 mov $r2 NV_PGRAPH_GPCX_GPCCS_ACCESS_FIFO 89 mov $r2 NV_PGRAPH_GPCX_GPCCS_ACCESS_FIFO
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcgm107.fuc5 b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcgm107.fuc5
new file mode 100644
index 000000000000..e730603891d7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcgm107.fuc5
@@ -0,0 +1,42 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24
25#define NV_PGRAPH_GPCX_UNK__SIZE 0x00000002
26
27#define CHIPSET GK208
28#include "macros.fuc"
29
30.section #gm107_grgpc_data
31#define INCLUDE_DATA
32#include "com.fuc"
33#include "gpc.fuc"
34#undef INCLUDE_DATA
35
36.section #gm107_grgpc_code
37#define INCLUDE_CODE
38bra #init
39#include "com.fuc"
40#include "gpc.fuc"
41.align 256
42#undef INCLUDE_CODE
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcgm107.fuc5.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcgm107.fuc5.h
new file mode 100644
index 000000000000..6d53b67dd3c4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcgm107.fuc5.h
@@ -0,0 +1,473 @@
1uint32_t gm107_grgpc_data[] = {
2/* 0x0000: gpc_mmio_list_head */
3 0x0000006c,
4/* 0x0004: gpc_mmio_list_tail */
5/* 0x0004: tpc_mmio_list_head */
6 0x0000006c,
7/* 0x0008: tpc_mmio_list_tail */
8/* 0x0008: unk_mmio_list_head */
9 0x0000006c,
10/* 0x000c: unk_mmio_list_tail */
11 0x0000006c,
12/* 0x0010: gpc_id */
13 0x00000000,
14/* 0x0014: tpc_count */
15 0x00000000,
16/* 0x0018: tpc_mask */
17 0x00000000,
18/* 0x001c: unk_count */
19 0x00000000,
20/* 0x0020: unk_mask */
21 0x00000000,
22/* 0x0024: cmd_queue */
23 0x00000000,
24 0x00000000,
25 0x00000000,
26 0x00000000,
27 0x00000000,
28 0x00000000,
29 0x00000000,
30 0x00000000,
31 0x00000000,
32 0x00000000,
33 0x00000000,
34 0x00000000,
35 0x00000000,
36 0x00000000,
37 0x00000000,
38 0x00000000,
39 0x00000000,
40 0x00000000,
41};
42
43uint32_t gm107_grgpc_code[] = {
44 0x03140ef5,
45/* 0x0004: queue_put */
46 0x9800d898,
47 0x86f001d9,
48 0xf489a408,
49 0x020f0b1b,
50 0x0002f87e,
51/* 0x001a: queue_put_next */
52 0x98c400f8,
53 0x0384b607,
54 0xb6008dbb,
55 0x8eb50880,
56 0x018fb500,
57 0xf00190b6,
58 0xd9b50f94,
59/* 0x0037: queue_get */
60 0xf400f801,
61 0xd8980131,
62 0x01d99800,
63 0x0bf489a4,
64 0x0789c421,
65 0xbb0394b6,
66 0x90b6009d,
67 0x009e9808,
68 0xb6019f98,
69 0x84f00180,
70 0x00d8b50f,
71/* 0x0063: queue_get_done */
72 0xf80132f4,
73/* 0x0065: nv_rd32 */
74 0xf0ecb200,
75 0x00801fc9,
76 0x0cf601ca,
77/* 0x0073: nv_rd32_wait */
78 0x8c04bd00,
79 0xcf01ca00,
80 0xccc800cc,
81 0xf61bf41f,
82 0xec7e060a,
83 0x008f0000,
84 0xffcf01cb,
85/* 0x008f: nv_wr32 */
86 0x8000f800,
87 0xf601cc00,
88 0x04bd000f,
89 0xc9f0ecb2,
90 0x1ec9f01f,
91 0x01ca0080,
92 0xbd000cf6,
93/* 0x00a9: nv_wr32_wait */
94 0xca008c04,
95 0x00cccf01,
96 0xf41fccc8,
97 0x00f8f61b,
98/* 0x00b8: wait_donez */
99 0x99f094bd,
100 0x37008000,
101 0x0009f602,
102 0x008004bd,
103 0x0af60206,
104/* 0x00cf: wait_donez_ne */
105 0x8804bd00,
106 0xcf010000,
107 0x8aff0088,
108 0xf61bf488,
109 0x99f094bd,
110 0x17008000,
111 0x0009f602,
112 0x00f804bd,
113/* 0x00ec: wait_doneo */
114 0x99f094bd,
115 0x37008000,
116 0x0009f602,
117 0x008004bd,
118 0x0af60206,
119/* 0x0103: wait_doneo_e */
120 0x8804bd00,
121 0xcf010000,
122 0x8aff0088,
123 0xf60bf488,
124 0x99f094bd,
125 0x17008000,
126 0x0009f602,
127 0x00f804bd,
128/* 0x0120: mmctx_size */
129/* 0x0122: nv_mmctx_size_loop */
130 0xe89894bd,
131 0x1a85b600,
132 0xb60180b6,
133 0x98bb0284,
134 0x04e0b600,
135 0x1bf4efa4,
136 0xf89fb2ec,
137/* 0x013d: mmctx_xfer */
138 0xf094bd00,
139 0x00800199,
140 0x09f60237,
141 0xbd04bd00,
142 0x05bbfd94,
143 0x800f0bf4,
144 0xf601c400,
145 0x04bd000b,
146/* 0x015f: mmctx_base_disabled */
147 0xfd0099f0,
148 0x0bf405ee,
149 0xc6008018,
150 0x000ef601,
151 0x008004bd,
152 0x0ff601c7,
153 0xf004bd00,
154/* 0x017a: mmctx_multi_disabled */
155 0xabc80199,
156 0x10b4b600,
157 0xc80cb9f0,
158 0xe4b601ae,
159 0x05befd11,
160 0x01c50080,
161 0xbd000bf6,
162/* 0x0195: mmctx_exec_loop */
163/* 0x0195: mmctx_wait_free */
164 0xc5008e04,
165 0x00eecf01,
166 0xf41fe4f0,
167 0xce98f60b,
168 0x05e9fd00,
169 0x01c80080,
170 0xbd000ef6,
171 0x04c0b604,
172 0x1bf4cda4,
173 0x02abc8df,
174/* 0x01bf: mmctx_fini_wait */
175 0x8b1c1bf4,
176 0xcf01c500,
177 0xb4f000bb,
178 0x10b4b01f,
179 0x0af31bf4,
180 0x00b87e05,
181 0x250ef400,
182/* 0x01d8: mmctx_stop */
183 0xb600abc8,
184 0xb9f010b4,
185 0x12b9f00c,
186 0x01c50080,
187 0xbd000bf6,
188/* 0x01ed: mmctx_stop_wait */
189 0xc5008b04,
190 0x00bbcf01,
191 0xf412bbc8,
192/* 0x01fa: mmctx_done */
193 0x94bdf61b,
194 0x800199f0,
195 0xf6021700,
196 0x04bd0009,
197/* 0x020a: strand_wait */
198 0xa0f900f8,
199 0xb87e020a,
200 0xa0fc0000,
201/* 0x0216: strand_pre */
202 0x0c0900f8,
203 0x024afc80,
204 0xbd0009f6,
205 0x020a7e04,
206/* 0x0227: strand_post */
207 0x0900f800,
208 0x4afc800d,
209 0x0009f602,
210 0x0a7e04bd,
211 0x00f80002,
212/* 0x0238: strand_set */
213 0xfc800f0c,
214 0x0cf6024f,
215 0x0c04bd00,
216 0x4afc800b,
217 0x000cf602,
218 0xfc8004bd,
219 0x0ef6024f,
220 0x0c04bd00,
221 0x4afc800a,
222 0x000cf602,
223 0x0a7e04bd,
224 0x00f80002,
225/* 0x0268: strand_ctx_init */
226 0x99f094bd,
227 0x37008003,
228 0x0009f602,
229 0x167e04bd,
230 0x030e0002,
231 0x0002387e,
232 0xfc80c4bd,
233 0x0cf60247,
234 0x0c04bd00,
235 0x4afc8001,
236 0x000cf602,
237 0x0a7e04bd,
238 0x0c920002,
239 0x46fc8001,
240 0x000cf602,
241 0x020c04bd,
242 0x024afc80,
243 0xbd000cf6,
244 0x020a7e04,
245 0x02277e00,
246 0x42008800,
247 0x20008902,
248 0x0099cf02,
249/* 0x02c7: ctx_init_strand_loop */
250 0xf608fe95,
251 0x8ef6008e,
252 0x808acf40,
253 0xb606a5b6,
254 0xeabb01a0,
255 0x0480b600,
256 0xf40192b6,
257 0xe4b6e81b,
258 0xf2efbc08,
259 0x99f094bd,
260 0x17008003,
261 0x0009f602,
262 0x00f804bd,
263/* 0x02f8: error */
264 0xffb2e0f9,
265 0x4098148e,
266 0x00008f7e,
267 0xffb2010f,
268 0x409c1c8e,
269 0x00008f7e,
270 0x00f8e0fc,
271/* 0x0314: init */
272 0x004104bd,
273 0x0011cf42,
274 0x010911e7,
275 0xfe0814b6,
276 0x02020014,
277 0xf6120040,
278 0x04bd0002,
279 0xfe047241,
280 0x00400010,
281 0x0000f607,
282 0x040204bd,
283 0xf6040040,
284 0x04bd0002,
285 0x821031f4,
286 0xcf018200,
287 0x01030022,
288 0xbb1f24f0,
289 0x32b60432,
290 0x0502b501,
291 0x820603b5,
292 0xcf018600,
293 0x02b50022,
294 0x0c308e04,
295 0xbd24bd50,
296/* 0x0377: init_unk_loop */
297 0x7e44bd34,
298 0xb0000065,
299 0x0bf400f6,
300 0xbb010f0e,
301 0x4ffd04f2,
302 0x0130b605,
303/* 0x038c: init_unk_next */
304 0xb60120b6,
305 0x26b004e0,
306 0xe21bf402,
307/* 0x0398: init_unk_done */
308 0xb50703b5,
309 0x00820804,
310 0x22cf0201,
311 0x9534bd00,
312 0x00800825,
313 0x05f601c0,
314 0x8004bd00,
315 0xf601c100,
316 0x04bd0005,
317 0x98000e98,
318 0x207e010f,
319 0x2fbb0001,
320 0x003fbb00,
321 0x98010e98,
322 0x207e020f,
323 0x0e980001,
324 0x00effd05,
325 0xbb002ebb,
326 0x0e98003e,
327 0x030f9802,
328 0x0001207e,
329 0xfd070e98,
330 0x2ebb00ef,
331 0x003ebb00,
332 0x800235b6,
333 0xf601d300,
334 0x04bd0003,
335 0xb60825b6,
336 0x20b60635,
337 0x0130b601,
338 0xb60824b6,
339 0x2fb20834,
340 0x0002687e,
341 0x80003fbb,
342 0xf6020100,
343 0x04bd0003,
344 0x29f024bd,
345 0x3000801f,
346 0x0002f602,
347/* 0x0436: main */
348 0x31f404bd,
349 0x0028f400,
350 0x377e240d,
351 0x01f40000,
352 0x04e4b0f4,
353 0xfe1d18f4,
354 0x06020181,
355 0x12fd20bd,
356 0x01e4b604,
357 0xfe051efd,
358 0x097e0018,
359 0x0ef40005,
360/* 0x0465: main_not_ctx_xfer */
361 0x10ef94d4,
362 0x7e01f5f0,
363 0xf40002f8,
364/* 0x0472: ih */
365 0x80f9c70e,
366 0xf90188fe,
367 0xf990f980,
368 0xf9b0f9a0,
369 0xf9e0f9d0,
370 0x4a04bdf0,
371 0xaacf0200,
372 0x04abc400,
373 0x0d1f0bf4,
374 0x1a004e24,
375 0x4f00eecf,
376 0xffcf1900,
377 0x00047e00,
378 0x40010e00,
379 0x0ef61d00,
380/* 0x04af: ih_no_fifo */
381 0x4004bd00,
382 0x0af60100,
383 0xfc04bd00,
384 0xfce0fcf0,
385 0xfcb0fcd0,
386 0xfc90fca0,
387 0x0088fe80,
388 0x32f480fc,
389/* 0x04cf: hub_barrier_done */
390 0x0f01f800,
391 0x040e9801,
392 0xb204febb,
393 0x94188eff,
394 0x008f7e40,
395/* 0x04e3: ctx_redswitch */
396 0x0f00f800,
397 0x85008020,
398 0x000ff601,
399 0x080e04bd,
400/* 0x04f0: ctx_redswitch_delay */
401 0xf401e2b6,
402 0xf5f1fd1b,
403 0xf5f10800,
404 0x00800200,
405 0x0ff60185,
406 0xf804bd00,
407/* 0x0509: ctx_xfer */
408 0x81008000,
409 0x000ff602,
410 0x11f404bd,
411 0x04e37e07,
412/* 0x0519: ctx_xfer_not_load */
413 0x02167e00,
414 0x8024bd00,
415 0xf60247fc,
416 0x04bd0002,
417 0xb6012cf0,
418 0xfc800320,
419 0x02f6024a,
420 0xf004bd00,
421 0xa5f001ac,
422 0x00008b02,
423 0x040c9850,
424 0xbb0fc4b6,
425 0x0c9800bc,
426 0x010d9800,
427 0x3d7e000e,
428 0xacf00001,
429 0x40008b01,
430 0x040c9850,
431 0xbb0fc4b6,
432 0x0c9800bc,
433 0x020d9801,
434 0x4e060f98,
435 0x3d7e0800,
436 0xacf00001,
437 0x04a5f001,
438 0x5030008b,
439 0xb6040c98,
440 0xbcbb0fc4,
441 0x020c9800,
442 0x98030d98,
443 0x004e080f,
444 0x013d7e02,
445 0x020a7e00,
446 0x0601f400,
447/* 0x05a3: ctx_xfer_post */
448 0x7e0712f4,
449/* 0x05a7: ctx_xfer_done */
450 0x7e000227,
451 0xf80004cf,
452 0x00000000,
453 0x00000000,
454 0x00000000,
455 0x00000000,
456 0x00000000,
457 0x00000000,
458 0x00000000,
459 0x00000000,
460 0x00000000,
461 0x00000000,
462 0x00000000,
463 0x00000000,
464 0x00000000,
465 0x00000000,
466 0x00000000,
467 0x00000000,
468 0x00000000,
469 0x00000000,
470 0x00000000,
471 0x00000000,
472 0x00000000,
473};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnv108.fuc5.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnv108.fuc5.h
index 27dc1280dc10..31922707794f 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnv108.fuc5.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnv108.fuc5.h
@@ -177,7 +177,7 @@ uint32_t nv108_grgpc_code[] = {
177 0xb4f000bb, 177 0xb4f000bb,
178 0x10b4b01f, 178 0x10b4b01f,
179 0x0af31bf4, 179 0x0af31bf4,
180 0x00b87e02, 180 0x00b87e05,
181 0x250ef400, 181 0x250ef400,
182/* 0x01d8: mmctx_stop */ 182/* 0x01d8: mmctx_stop */
183 0xb600abc8, 183 0xb600abc8,
@@ -269,186 +269,186 @@ uint32_t nv108_grgpc_code[] = {
269 0x00008f7e, 269 0x00008f7e,
270 0x00f8e0fc, 270 0x00f8e0fc,
271/* 0x0314: init */ 271/* 0x0314: init */
272 0x04fe04bd, 272 0x004104bd,
273 0x40020200, 273 0x0011cf42,
274 0x02f61200, 274 0x010911e7,
275 0x4104bd00, 275 0xfe0814b6,
276 0x10fe0465, 276 0x02020014,
277 0x07004000, 277 0xf6120040,
278 0xbd0000f6, 278 0x04bd0002,
279 0x40040204, 279 0xfe047241,
280 0x02f60400, 280 0x00400010,
281 0xf404bd00, 281 0x0000f607,
282 0x00821031, 282 0x040204bd,
283 0x22cf0182, 283 0xf6040040,
284 0xf0010300, 284 0x04bd0002,
285 0x32bb1f24, 285 0x821031f4,
286 0x0132b604, 286 0xcf018200,
287 0xb50502b5, 287 0x01030022,
288 0x00820603, 288 0xbb1f24f0,
289 0x22cf0186, 289 0x32b60432,
290 0x0402b500, 290 0x0502b501,
291 0x500c308e, 291 0x820603b5,
292 0x34bd24bd, 292 0xcf018600,
293/* 0x036a: init_unk_loop */ 293 0x02b50022,
294 0x657e44bd, 294 0x0c308e04,
295 0xf6b00000, 295 0xbd24bd50,
296 0x0e0bf400, 296/* 0x0377: init_unk_loop */
297 0xf2bb010f, 297 0x7e44bd34,
298 0x054ffd04, 298 0xb0000065,
299/* 0x037f: init_unk_next */ 299 0x0bf400f6,
300 0xb60130b6, 300 0xbb010f0e,
301 0xe0b60120, 301 0x4ffd04f2,
302 0x0126b004, 302 0x0130b605,
303/* 0x038b: init_unk_done */ 303/* 0x038c: init_unk_next */
304 0xb5e21bf4, 304 0xb60120b6,
305 0x04b50703, 305 0x26b004e0,
306 0x01008208, 306 0xe21bf401,
307 0x0022cf02, 307/* 0x0398: init_unk_done */
308 0x259534bd, 308 0xb50703b5,
309 0xc0008008, 309 0x00820804,
310 0x0005f601, 310 0x22cf0201,
311 0x008004bd, 311 0x9534bd00,
312 0x05f601c1, 312 0x00800825,
313 0x9804bd00, 313 0x05f601c0,
314 0x0f98000e, 314 0x8004bd00,
315 0x01207e01, 315 0xf601c100,
316 0x002fbb00, 316 0x04bd0005,
317 0x98003fbb, 317 0x98000e98,
318 0x0f98010e, 318 0x207e010f,
319 0x01207e02, 319 0x2fbb0001,
320 0x050e9800, 320 0x003fbb00,
321 0xbb00effd, 321 0x98010e98,
322 0x3ebb002e, 322 0x207e020f,
323 0x020e9800, 323 0x0e980001,
324 0x7e030f98, 324 0x00effd05,
325 0x98000120, 325 0xbb002ebb,
326 0xeffd070e, 326 0x0e98003e,
327 0x002ebb00, 327 0x030f9802,
328 0xb6003ebb, 328 0x0001207e,
329 0x00800235, 329 0xfd070e98,
330 0x03f601d3, 330 0x2ebb00ef,
331 0xb604bd00, 331 0x003ebb00,
332 0x35b60825, 332 0x800235b6,
333 0x0120b606, 333 0xf601d300,
334 0xb60130b6, 334 0x04bd0003,
335 0x34b60824, 335 0xb60825b6,
336 0x7e2fb208, 336 0x20b60635,
337 0xbb000268, 337 0x0130b601,
338 0x0080003f, 338 0xb60824b6,
339 0x03f60201, 339 0x2fb20834,
340 0xbd04bd00, 340 0x0002687e,
341 0x1f29f024, 341 0x80003fbb,
342 0x02300080, 342 0xf6020100,
343 0xbd0002f6, 343 0x04bd0003,
344/* 0x0429: main */ 344 0x29f024bd,
345 0x0031f404, 345 0x3000801f,
346 0x0d0028f4, 346 0x0002f602,
347 0x00377e24, 347/* 0x0436: main */
348 0xf401f400, 348 0x31f404bd,
349 0xf404e4b0, 349 0x0028f400,
350 0x81fe1d18, 350 0x377e240d,
351 0xbd060201, 351 0x01f40000,
352 0x0412fd20, 352 0x04e4b0f4,
353 0xfd01e4b6, 353 0xfe1d18f4,
354 0x18fe051e, 354 0x06020181,
355 0x04fc7e00, 355 0x12fd20bd,
356 0xd40ef400, 356 0x01e4b604,
357/* 0x0458: main_not_ctx_xfer */ 357 0xfe051efd,
358 0xf010ef94, 358 0x097e0018,
359 0xf87e01f5, 359 0x0ef40005,
360 0x0ef40002, 360/* 0x0465: main_not_ctx_xfer */
361/* 0x0465: ih */ 361 0x10ef94d4,
362 0xfe80f9c7, 362 0x7e01f5f0,
363 0x80f90188, 363 0xf40002f8,
364 0xa0f990f9, 364/* 0x0472: ih */
365 0xd0f9b0f9, 365 0x80f9c70e,
366 0xf0f9e0f9, 366 0xf90188fe,
367 0x004a04bd, 367 0xf990f980,
368 0x00aacf02, 368 0xf9b0f9a0,
369 0xf404abc4, 369 0xf9e0f9d0,
370 0x240d1f0b, 370 0x4a04bdf0,
371 0xcf1a004e, 371 0xaacf0200,
372 0x004f00ee, 372 0x04abc400,
373 0x00ffcf19, 373 0x0d1f0bf4,
374 0x0000047e, 374 0x1a004e24,
375 0x0040010e, 375 0x4f00eecf,
376 0x000ef61d, 376 0xffcf1900,
377/* 0x04a2: ih_no_fifo */ 377 0x00047e00,
378 0x004004bd, 378 0x40010e00,
379 0x000af601, 379 0x0ef61d00,
380 0xf0fc04bd, 380/* 0x04af: ih_no_fifo */
381 0xd0fce0fc, 381 0x4004bd00,
382 0xa0fcb0fc, 382 0x0af60100,
383 0x80fc90fc, 383 0xfc04bd00,
384 0xfc0088fe, 384 0xfce0fcf0,
385 0x0032f480, 385 0xfcb0fcd0,
386/* 0x04c2: hub_barrier_done */ 386 0xfc90fca0,
387 0x010f01f8, 387 0x0088fe80,
388 0xbb040e98, 388 0x32f480fc,
389 0xffb204fe, 389/* 0x04cf: hub_barrier_done */
390 0x4094188e, 390 0x0f01f800,
391 0x00008f7e, 391 0x040e9801,
392/* 0x04d6: ctx_redswitch */ 392 0xb204febb,
393 0x200f00f8, 393 0x94188eff,
394 0x01850080, 394 0x008f7e40,
395 0xbd000ff6, 395/* 0x04e3: ctx_redswitch */
396/* 0x04e3: ctx_redswitch_delay */ 396 0x0f00f800,
397 0xb6080e04, 397 0x85008020,
398 0x1bf401e2,
399 0x00f5f1fd,
400 0x00f5f108,
401 0x85008002,
402 0x000ff601, 398 0x000ff601,
403 0x00f804bd, 399 0x080e04bd,
404/* 0x04fc: ctx_xfer */ 400/* 0x04f0: ctx_redswitch_delay */
405 0x02810080, 401 0xf401e2b6,
406 0xbd000ff6, 402 0xf5f1fd1b,
407 0x0711f404, 403 0xf5f10800,
408 0x0004d67e, 404 0x00800200,
409/* 0x050c: ctx_xfer_not_load */ 405 0x0ff60185,
410 0x0002167e, 406 0xf804bd00,
411 0xfc8024bd, 407/* 0x0509: ctx_xfer */
412 0x02f60247, 408 0x81008000,
409 0x000ff602,
410 0x11f404bd,
411 0x04e37e07,
412/* 0x0519: ctx_xfer_not_load */
413 0x02167e00,
414 0x8024bd00,
415 0xf60247fc,
416 0x04bd0002,
417 0xb6012cf0,
418 0xfc800320,
419 0x02f6024a,
413 0xf004bd00, 420 0xf004bd00,
414 0x20b6012c, 421 0xa5f001ac,
415 0x4afc8003, 422 0x00008b02,
416 0x0002f602, 423 0x040c9850,
417 0xacf004bd, 424 0xbb0fc4b6,
418 0x02a5f001, 425 0x0c9800bc,
419 0x5000008b, 426 0x010d9800,
427 0x3d7e000e,
428 0xacf00001,
429 0x40008b01,
430 0x040c9850,
431 0xbb0fc4b6,
432 0x0c9800bc,
433 0x020d9801,
434 0x4e060f98,
435 0x3d7e0800,
436 0xacf00001,
437 0x04a5f001,
438 0x5030008b,
420 0xb6040c98, 439 0xb6040c98,
421 0xbcbb0fc4, 440 0xbcbb0fc4,
422 0x000c9800, 441 0x020c9800,
423 0x0e010d98, 442 0x98030d98,
424 0x013d7e00, 443 0x004e080f,
425 0x01acf000, 444 0x013d7e02,
426 0x5040008b, 445 0x020a7e00,
427 0xb6040c98, 446 0x0601f400,
428 0xbcbb0fc4, 447/* 0x05a3: ctx_xfer_post */
429 0x010c9800, 448 0x7e0712f4,
430 0x98020d98, 449/* 0x05a7: ctx_xfer_done */
431 0x004e060f, 450 0x7e000227,
432 0x013d7e08, 451 0xf80004cf,
433 0x01acf000,
434 0x8b04a5f0,
435 0x98503000,
436 0xc4b6040c,
437 0x00bcbb0f,
438 0x98020c98,
439 0x0f98030d,
440 0x02004e08,
441 0x00013d7e,
442 0x00020a7e,
443 0xf40601f4,
444/* 0x0596: ctx_xfer_post */
445 0x277e0712,
446/* 0x059a: ctx_xfer_done */
447 0xc27e0002,
448 0x00f80004,
449 0x00000000,
450 0x00000000,
451 0x00000000,
452 0x00000000, 452 0x00000000,
453 0x00000000, 453 0x00000000,
454 0x00000000, 454 0x00000000,
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvc0.fuc.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvc0.fuc.h
index 0e7b01efae8d..325cc7b7b2fb 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvc0.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvc0.fuc.h
@@ -192,7 +192,7 @@ uint32_t nvc0_grgpc_code[] = {
192 0x1fb4f000, 192 0x1fb4f000,
193 0xf410b4b0, 193 0xf410b4b0,
194 0xa7f0f01b, 194 0xa7f0f01b,
195 0xd021f402, 195 0xd021f405,
196/* 0x0223: mmctx_stop */ 196/* 0x0223: mmctx_stop */
197 0xc82b0ef4, 197 0xc82b0ef4,
198 0xb4b600ab, 198 0xb4b600ab,
@@ -300,182 +300,182 @@ uint32_t nvc0_grgpc_code[] = {
300 0x21f440e3, 300 0x21f440e3,
301 0xf8e0fc9d, 301 0xf8e0fc9d,
302/* 0x03a1: init */ 302/* 0x03a1: init */
303 0xfe04bd00, 303 0xf104bd00,
304 0x27f00004, 304 0xf0420017,
305 0x0007f102, 305 0x11cf0013,
306 0x0003f012, 306 0x0911e700,
307 0xbd0002d0, 307 0x0814b601,
308 0xd517f104, 308 0xf00014fe,
309 0x0010fe04, 309 0x07f10227,
310 0x070007f1, 310 0x03f01200,
311 0x0002d000,
312 0x17f104bd,
313 0x10fe04e6,
314 0x0007f100,
315 0x0003f007,
316 0xbd0000d0,
317 0x0427f004,
318 0x040007f1,
311 0xd00003f0, 319 0xd00003f0,
312 0x04bd0000,
313 0xf10427f0,
314 0xf0040007,
315 0x02d00003,
316 0xf404bd00,
317 0x27f11031,
318 0x23f08200,
319 0x0022cf01,
320 0xf00137f0,
321 0x32bb1f24,
322 0x0132b604,
323 0x80050280,
324 0x27f10603,
325 0x23f08600,
326 0x0022cf01,
327 0xf1040280,
328 0xf0010027,
329 0x22cf0223,
330 0x9534bd00,
331 0x07f10825,
332 0x03f0c000,
333 0x0005d001,
334 0x07f104bd,
335 0x03f0c100,
336 0x0005d001,
337 0x0e9804bd,
338 0x010f9800,
339 0x015021f5,
340 0xbb002fbb,
341 0x0e98003f,
342 0x020f9801,
343 0x015021f5,
344 0xfd050e98,
345 0x2ebb00ef,
346 0x003ebb00,
347 0xf10235b6,
348 0xf0d30007,
349 0x03d00103,
350 0xb604bd00,
351 0x35b60825,
352 0x0120b606,
353 0xb60130b6,
354 0x34b60824,
355 0x022fb908,
356 0x02d321f5,
357 0xf1003fbb,
358 0xf0010007,
359 0x03d00203,
360 0xbd04bd00,
361 0x1f29f024,
362 0x080007f1,
363 0xd00203f0,
364 0x04bd0002, 320 0x04bd0002,
365/* 0x0498: main */ 321 0xf11031f4,
366 0xf40031f4, 322 0xf0820027,
367 0xd7f00028, 323 0x22cf0123,
368 0x3921f41c, 324 0x0137f000,
369 0xb0f401f4, 325 0xbb1f24f0,
370 0x18f404e4, 326 0x32b60432,
371 0x0181fe1e, 327 0x05028001,
372 0xbd0627f0, 328 0xf1060380,
373 0x0412fd20, 329 0xf0860027,
374 0xfd01e4b6, 330 0x22cf0123,
375 0x18fe051e, 331 0x04028000,
376 0x8d21f500, 332 0x010027f1,
377 0xd30ef405, 333 0xcf0223f0,
378/* 0x04c8: main_not_ctx_xfer */ 334 0x34bd0022,
379 0xf010ef94, 335 0xf1082595,
380 0x21f501f5, 336 0xf0c00007,
381 0x0ef4037e, 337 0x05d00103,
382/* 0x04d5: ih */
383 0xfe80f9c6,
384 0x80f90188,
385 0xa0f990f9,
386 0xd0f9b0f9,
387 0xf0f9e0f9,
388 0xa7f104bd,
389 0xa3f00200,
390 0x00aacf00,
391 0xf404abc4,
392 0xd7f02c0b,
393 0x00e7f11c,
394 0x00e3f01a,
395 0xf100eecf,
396 0xf01900f7,
397 0xffcf00f3,
398 0x0421f400,
399 0xf101e7f0,
400 0xf01d0007,
401 0x0ed00003,
402/* 0x0523: ih_no_fifo */
403 0xf104bd00, 338 0xf104bd00,
404 0xf0010007, 339 0xf0c10007,
405 0x0ad00003, 340 0x05d00103,
406 0xfc04bd00, 341 0x9804bd00,
407 0xfce0fcf0, 342 0x0f98000e,
408 0xfcb0fcd0, 343 0x5021f501,
409 0xfc90fca0, 344 0x002fbb01,
410 0x0088fe80, 345 0x98003fbb,
411 0x32f480fc, 346 0x0f98010e,
412/* 0x0547: hub_barrier_done */ 347 0x5021f502,
413 0xf001f800, 348 0x050e9801,
414 0x0e9801f7, 349 0xbb00effd,
415 0x04febb04, 350 0x3ebb002e,
416 0xf102ffb9, 351 0x0235b600,
417 0xf09418e7, 352 0xd30007f1,
418 0x21f440e3, 353 0xd00103f0,
419/* 0x055f: ctx_redswitch */ 354 0x04bd0003,
420 0xf000f89d, 355 0xb60825b6,
421 0x07f120f7, 356 0x20b60635,
422 0x03f08500, 357 0x0130b601,
423 0x000fd001, 358 0xb60824b6,
424 0xe7f004bd, 359 0x2fb90834,
425/* 0x0571: ctx_redswitch_delay */ 360 0xd321f502,
426 0x01e2b608, 361 0x003fbb02,
427 0xf1fd1bf4, 362 0x010007f1,
428 0xf10800f5, 363 0xd00203f0,
429 0xf10200f5, 364 0x04bd0003,
365 0x29f024bd,
366 0x0007f11f,
367 0x0203f008,
368 0xbd0002d0,
369/* 0x04a9: main */
370 0x0031f404,
371 0xf00028f4,
372 0x21f41cd7,
373 0xf401f439,
374 0xf404e4b0,
375 0x81fe1e18,
376 0x0627f001,
377 0x12fd20bd,
378 0x01e4b604,
379 0xfe051efd,
380 0x21f50018,
381 0x0ef4059e,
382/* 0x04d9: main_not_ctx_xfer */
383 0x10ef94d3,
384 0xf501f5f0,
385 0xf4037e21,
386/* 0x04e6: ih */
387 0x80f9c60e,
388 0xf90188fe,
389 0xf990f980,
390 0xf9b0f9a0,
391 0xf9e0f9d0,
392 0xf104bdf0,
393 0xf00200a7,
394 0xaacf00a3,
395 0x04abc400,
396 0xf02c0bf4,
397 0xe7f11cd7,
398 0xe3f01a00,
399 0x00eecf00,
400 0x1900f7f1,
401 0xcf00f3f0,
402 0x21f400ff,
403 0x01e7f004,
404 0x1d0007f1,
405 0xd00003f0,
406 0x04bd000e,
407/* 0x0534: ih_no_fifo */
408 0x010007f1,
409 0xd00003f0,
410 0x04bd000a,
411 0xe0fcf0fc,
412 0xb0fcd0fc,
413 0x90fca0fc,
414 0x88fe80fc,
415 0xf480fc00,
416 0x01f80032,
417/* 0x0558: hub_barrier_done */
418 0x9801f7f0,
419 0xfebb040e,
420 0x02ffb904,
421 0x9418e7f1,
422 0xf440e3f0,
423 0x00f89d21,
424/* 0x0570: ctx_redswitch */
425 0xf120f7f0,
430 0xf0850007, 426 0xf0850007,
431 0x0fd00103, 427 0x0fd00103,
432 0xf804bd00, 428 0xf004bd00,
433/* 0x058d: ctx_xfer */ 429/* 0x0582: ctx_redswitch_delay */
434 0x0007f100, 430 0xe2b608e7,
435 0x0203f081, 431 0xfd1bf401,
436 0xbd000fd0, 432 0x0800f5f1,
437 0x0711f404, 433 0x0200f5f1,
438 0x055f21f5, 434 0x850007f1,
439/* 0x05a0: ctx_xfer_not_load */ 435 0xd00103f0,
440 0x026a21f5, 436 0x04bd000f,
441 0x07f124bd, 437/* 0x059e: ctx_xfer */
442 0x03f047fc, 438 0x07f100f8,
443 0x0002d002, 439 0x03f08100,
444 0x2cf004bd, 440 0x000fd002,
445 0x0320b601, 441 0x11f404bd,
446 0x4afc07f1, 442 0x7021f507,
447 0xd00203f0, 443/* 0x05b1: ctx_xfer_not_load */
448 0x04bd0002, 444 0x6a21f505,
445 0xf124bd02,
446 0xf047fc07,
447 0x02d00203,
448 0xf004bd00,
449 0x20b6012c,
450 0xfc07f103,
451 0x0203f04a,
452 0xbd0002d0,
453 0x01acf004,
454 0xf102a5f0,
455 0xf00000b7,
456 0x0c9850b3,
457 0x0fc4b604,
458 0x9800bcbb,
459 0x0d98000c,
460 0x00e7f001,
461 0x016f21f5,
449 0xf001acf0, 462 0xf001acf0,
450 0xb7f102a5, 463 0xb7f104a5,
451 0xb3f00000, 464 0xb3f04000,
452 0x040c9850, 465 0x040c9850,
453 0xbb0fc4b6, 466 0xbb0fc4b6,
454 0x0c9800bc, 467 0x0c9800bc,
455 0x010d9800, 468 0x020d9801,
456 0xf500e7f0, 469 0xf1060f98,
457 0xf0016f21, 470 0xf50800e7,
458 0xa5f001ac, 471 0xf5016f21,
459 0x00b7f104, 472 0xf4025e21,
460 0x50b3f040, 473 0x12f40601,
461 0xb6040c98, 474/* 0x0629: ctx_xfer_post */
462 0xbcbb0fc4, 475 0x7f21f507,
463 0x010c9800, 476/* 0x062d: ctx_xfer_done */
464 0x98020d98, 477 0x5821f502,
465 0xe7f1060f, 478 0x0000f805,
466 0x21f50800,
467 0x21f5016f,
468 0x01f4025e,
469 0x0712f406,
470/* 0x0618: ctx_xfer_post */
471 0x027f21f5,
472/* 0x061c: ctx_xfer_done */
473 0x054721f5,
474 0x000000f8,
475 0x00000000,
476 0x00000000,
477 0x00000000,
478 0x00000000,
479 0x00000000, 479 0x00000000,
480 0x00000000, 480 0x00000000,
481 0x00000000, 481 0x00000000,
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvd7.fuc.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvd7.fuc.h
index 84dd32db28a0..d1504a4059c6 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvd7.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvd7.fuc.h
@@ -196,7 +196,7 @@ uint32_t nvd7_grgpc_code[] = {
196 0x1fb4f000, 196 0x1fb4f000,
197 0xf410b4b0, 197 0xf410b4b0,
198 0xa7f0f01b, 198 0xa7f0f01b,
199 0xd021f402, 199 0xd021f405,
200/* 0x0223: mmctx_stop */ 200/* 0x0223: mmctx_stop */
201 0xc82b0ef4, 201 0xc82b0ef4,
202 0xb4b600ab, 202 0xb4b600ab,
@@ -304,212 +304,212 @@ uint32_t nvd7_grgpc_code[] = {
304 0x21f440e3, 304 0x21f440e3,
305 0xf8e0fc9d, 305 0xf8e0fc9d,
306/* 0x03a1: init */ 306/* 0x03a1: init */
307 0xfe04bd00, 307 0xf104bd00,
308 0x27f00004, 308 0xf0420017,
309 0x0007f102, 309 0x11cf0013,
310 0x0003f012, 310 0x0911e700,
311 0xbd0002d0, 311 0x0814b601,
312 0x1f17f104, 312 0xf00014fe,
313 0x0010fe05, 313 0x07f10227,
314 0x070007f1, 314 0x03f01200,
315 0x0002d000,
316 0x17f104bd,
317 0x10fe0530,
318 0x0007f100,
319 0x0003f007,
320 0xbd0000d0,
321 0x0427f004,
322 0x040007f1,
315 0xd00003f0, 323 0xd00003f0,
316 0x04bd0000, 324 0x04bd0002,
317 0xf10427f0, 325 0xf11031f4,
318 0xf0040007, 326 0xf0820027,
319 0x02d00003, 327 0x22cf0123,
328 0x0137f000,
329 0xbb1f24f0,
330 0x32b60432,
331 0x05028001,
332 0xf1060380,
333 0xf0860027,
334 0x22cf0123,
335 0x04028000,
336 0x0c30e7f1,
337 0xbd50e3f0,
338 0xbd34bd24,
339/* 0x0421: init_unk_loop */
340 0x6821f444,
341 0xf400f6b0,
342 0xf7f00f0b,
343 0x04f2bb01,
344 0xb6054ffd,
345/* 0x0436: init_unk_next */
346 0x20b60130,
347 0x04e0b601,
348 0xf40126b0,
349/* 0x0442: init_unk_done */
350 0x0380e21b,
351 0x08048007,
352 0x010027f1,
353 0xcf0223f0,
354 0x34bd0022,
355 0xf1082595,
356 0xf0c00007,
357 0x05d00103,
358 0xf104bd00,
359 0xf0c10007,
360 0x05d00103,
361 0x9804bd00,
362 0x0f98000e,
363 0x5021f501,
364 0x002fbb01,
365 0x98003fbb,
366 0x0f98010e,
367 0x5021f502,
368 0x050e9801,
369 0xbb00effd,
370 0x3ebb002e,
371 0x020e9800,
372 0xf5030f98,
373 0x98015021,
374 0xeffd070e,
375 0x002ebb00,
376 0xb6003ebb,
377 0x07f10235,
378 0x03f0d300,
379 0x0003d001,
380 0x25b604bd,
381 0x0635b608,
382 0xb60120b6,
383 0x24b60130,
384 0x0834b608,
385 0xf5022fb9,
386 0xbb02d321,
387 0x07f1003f,
388 0x03f00100,
389 0x0003d002,
390 0x24bd04bd,
391 0xf11f29f0,
392 0xf0080007,
393 0x02d00203,
394/* 0x04f3: main */
320 0xf404bd00, 395 0xf404bd00,
321 0x27f11031, 396 0x28f40031,
322 0x23f08200, 397 0x24d7f000,
323 0x0022cf01, 398 0xf43921f4,
324 0xf00137f0, 399 0xe4b0f401,
325 0x32bb1f24, 400 0x1e18f404,
326 0x0132b604, 401 0xf00181fe,
327 0x80050280, 402 0x20bd0627,
328 0x27f10603, 403 0xb60412fd,
329 0x23f08600, 404 0x1efd01e4,
330 0x0022cf01, 405 0x0018fe05,
331 0xf1040280, 406 0x05e821f5,
332 0xf00c30e7, 407/* 0x0523: main_not_ctx_xfer */
333 0x24bd50e3, 408 0x94d30ef4,
334 0x44bd34bd, 409 0xf5f010ef,
335/* 0x0410: init_unk_loop */ 410 0x7e21f501,
336 0xb06821f4, 411 0xc60ef403,
337 0x0bf400f6, 412/* 0x0530: ih */
338 0x01f7f00f, 413 0x88fe80f9,
339 0xfd04f2bb, 414 0xf980f901,
340 0x30b6054f, 415 0xf9a0f990,
341/* 0x0425: init_unk_next */ 416 0xf9d0f9b0,
342 0x0120b601, 417 0xbdf0f9e0,
343 0xb004e0b6, 418 0x00a7f104,
344 0x1bf40126, 419 0x00a3f002,
345/* 0x0431: init_unk_done */ 420 0xc400aacf,
346 0x070380e2, 421 0x0bf404ab,
347 0xf1080480, 422 0x24d7f02c,
348 0xf0010027, 423 0x1a00e7f1,
349 0x22cf0223, 424 0xcf00e3f0,
350 0x9534bd00, 425 0xf7f100ee,
351 0x07f10825, 426 0xf3f01900,
352 0x03f0c000, 427 0x00ffcf00,
353 0x0005d001, 428 0xf00421f4,
429 0x07f101e7,
430 0x03f01d00,
431 0x000ed000,
432/* 0x057e: ih_no_fifo */
354 0x07f104bd, 433 0x07f104bd,
355 0x03f0c100, 434 0x03f00100,
356 0x0005d001, 435 0x000ad000,
357 0x0e9804bd, 436 0xf0fc04bd,
358 0x010f9800, 437 0xd0fce0fc,
359 0x015021f5, 438 0xa0fcb0fc,
360 0xbb002fbb, 439 0x80fc90fc,
361 0x0e98003f, 440 0xfc0088fe,
362 0x020f9801, 441 0x0032f480,
363 0x015021f5, 442/* 0x05a2: hub_barrier_done */
364 0xfd050e98, 443 0xf7f001f8,
365 0x2ebb00ef, 444 0x040e9801,
366 0x003ebb00, 445 0xb904febb,
367 0x98020e98, 446 0xe7f102ff,
368 0x21f5030f, 447 0xe3f09418,
369 0x0e980150, 448 0x9d21f440,
370 0x00effd07, 449/* 0x05ba: ctx_redswitch */
371 0xbb002ebb, 450 0xf7f000f8,
372 0x35b6003e, 451 0x0007f120,
373 0x0007f102,
374 0x0103f0d3,
375 0xbd0003d0,
376 0x0825b604,
377 0xb60635b6,
378 0x30b60120,
379 0x0824b601,
380 0xb90834b6,
381 0x21f5022f,
382 0x3fbb02d3,
383 0x0007f100,
384 0x0203f001,
385 0xbd0003d0,
386 0xf024bd04,
387 0x07f11f29,
388 0x03f00800,
389 0x0002d002,
390/* 0x04e2: main */
391 0x31f404bd,
392 0x0028f400,
393 0xf424d7f0,
394 0x01f43921,
395 0x04e4b0f4,
396 0xfe1e18f4,
397 0x27f00181,
398 0xfd20bd06,
399 0xe4b60412,
400 0x051efd01,
401 0xf50018fe,
402 0xf405d721,
403/* 0x0512: main_not_ctx_xfer */
404 0xef94d30e,
405 0x01f5f010,
406 0x037e21f5,
407/* 0x051f: ih */
408 0xf9c60ef4,
409 0x0188fe80,
410 0x90f980f9,
411 0xb0f9a0f9,
412 0xe0f9d0f9,
413 0x04bdf0f9,
414 0x0200a7f1,
415 0xcf00a3f0,
416 0xabc400aa,
417 0x2c0bf404,
418 0xf124d7f0,
419 0xf01a00e7,
420 0xeecf00e3,
421 0x00f7f100,
422 0x00f3f019,
423 0xf400ffcf,
424 0xe7f00421,
425 0x0007f101,
426 0x0003f01d,
427 0xbd000ed0,
428/* 0x056d: ih_no_fifo */
429 0x0007f104,
430 0x0003f001,
431 0xbd000ad0,
432 0xfcf0fc04,
433 0xfcd0fce0,
434 0xfca0fcb0,
435 0xfe80fc90,
436 0x80fc0088,
437 0xf80032f4,
438/* 0x0591: hub_barrier_done */
439 0x01f7f001,
440 0xbb040e98,
441 0xffb904fe,
442 0x18e7f102,
443 0x40e3f094,
444 0xf89d21f4,
445/* 0x05a9: ctx_redswitch */
446 0x20f7f000,
447 0x850007f1,
448 0xd00103f0,
449 0x04bd000f,
450/* 0x05bb: ctx_redswitch_delay */
451 0xb608e7f0,
452 0x1bf401e2,
453 0x00f5f1fd,
454 0x00f5f108,
455 0x0007f102,
456 0x0103f085, 452 0x0103f085,
457 0xbd000fd0, 453 0xbd000fd0,
458/* 0x05d7: ctx_xfer */ 454 0x08e7f004,
459 0xf100f804, 455/* 0x05cc: ctx_redswitch_delay */
460 0xf0810007, 456 0xf401e2b6,
461 0x0fd00203, 457 0xf5f1fd1b,
462 0xf404bd00, 458 0xf5f10800,
463 0x21f50711, 459 0x07f10200,
464/* 0x05ea: ctx_xfer_not_load */ 460 0x03f08500,
465 0x21f505a9, 461 0x000fd001,
466 0x24bd026a, 462 0x00f804bd,
467 0x47fc07f1, 463/* 0x05e8: ctx_xfer */
464 0x810007f1,
468 0xd00203f0, 465 0xd00203f0,
469 0x04bd0002, 466 0x04bd000f,
470 0xb6012cf0, 467 0xf50711f4,
471 0x07f10320, 468/* 0x05fb: ctx_xfer_not_load */
472 0x03f04afc, 469 0xf505ba21,
473 0x0002d002, 470 0xbd026a21,
474 0xacf004bd, 471 0xfc07f124,
475 0x02a5f001, 472 0x0203f047,
476 0x0000b7f1, 473 0xbd0002d0,
477 0x9850b3f0, 474 0x012cf004,
478 0xc4b6040c, 475 0xf10320b6,
479 0x00bcbb0f, 476 0xf04afc07,
480 0x98000c98, 477 0x02d00203,
481 0xe7f0010d, 478 0xf004bd00,
482 0x6f21f500, 479 0xa5f001ac,
483 0x01acf001, 480 0x00b7f102,
484 0x4000b7f1, 481 0x50b3f000,
482 0xb6040c98,
483 0xbcbb0fc4,
484 0x000c9800,
485 0xf0010d98,
486 0x21f500e7,
487 0xacf0016f,
488 0x00b7f101,
489 0x50b3f040,
490 0xb6040c98,
491 0xbcbb0fc4,
492 0x010c9800,
493 0x98020d98,
494 0xe7f1060f,
495 0x21f50800,
496 0xacf0016f,
497 0x04a5f001,
498 0x3000b7f1,
485 0x9850b3f0, 499 0x9850b3f0,
486 0xc4b6040c, 500 0xc4b6040c,
487 0x00bcbb0f, 501 0x00bcbb0f,
488 0x98010c98, 502 0x98020c98,
489 0x0f98020d, 503 0x0f98030d,
490 0x00e7f106, 504 0x00e7f108,
491 0x6f21f508, 505 0x6f21f502,
492 0x01acf001, 506 0x5e21f501,
493 0xf104a5f0, 507 0x0601f402,
494 0xf03000b7, 508/* 0x0697: ctx_xfer_post */
495 0x0c9850b3, 509 0xf50712f4,
496 0x0fc4b604, 510/* 0x069b: ctx_xfer_done */
497 0x9800bcbb, 511 0xf5027f21,
498 0x0d98020c, 512 0xf805a221,
499 0x080f9803,
500 0x0200e7f1,
501 0x016f21f5,
502 0x025e21f5,
503 0xf40601f4,
504/* 0x0686: ctx_xfer_post */
505 0x21f50712,
506/* 0x068a: ctx_xfer_done */
507 0x21f5027f,
508 0x00f80591,
509 0x00000000,
510 0x00000000,
511 0x00000000,
512 0x00000000,
513 0x00000000, 513 0x00000000,
514 0x00000000, 514 0x00000000,
515 0x00000000, 515 0x00000000,
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc.h
index b6da800ee9c2..855b220378f9 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc.h
@@ -196,7 +196,7 @@ uint32_t nve0_grgpc_code[] = {
196 0x1fb4f000, 196 0x1fb4f000,
197 0xf410b4b0, 197 0xf410b4b0,
198 0xa7f0f01b, 198 0xa7f0f01b,
199 0xd021f402, 199 0xd021f405,
200/* 0x0223: mmctx_stop */ 200/* 0x0223: mmctx_stop */
201 0xc82b0ef4, 201 0xc82b0ef4,
202 0xb4b600ab, 202 0xb4b600ab,
@@ -304,212 +304,212 @@ uint32_t nve0_grgpc_code[] = {
304 0x21f440e3, 304 0x21f440e3,
305 0xf8e0fc9d, 305 0xf8e0fc9d,
306/* 0x03a1: init */ 306/* 0x03a1: init */
307 0xfe04bd00, 307 0xf104bd00,
308 0x27f00004, 308 0xf0420017,
309 0x0007f102, 309 0x11cf0013,
310 0x0003f012, 310 0x0911e700,
311 0xbd0002d0, 311 0x0814b601,
312 0x1f17f104, 312 0xf00014fe,
313 0x0010fe05, 313 0x07f10227,
314 0x070007f1, 314 0x03f01200,
315 0x0002d000,
316 0x17f104bd,
317 0x10fe0530,
318 0x0007f100,
319 0x0003f007,
320 0xbd0000d0,
321 0x0427f004,
322 0x040007f1,
315 0xd00003f0, 323 0xd00003f0,
316 0x04bd0000, 324 0x04bd0002,
317 0xf10427f0, 325 0xf11031f4,
318 0xf0040007, 326 0xf0820027,
319 0x02d00003, 327 0x22cf0123,
328 0x0137f000,
329 0xbb1f24f0,
330 0x32b60432,
331 0x05028001,
332 0xf1060380,
333 0xf0860027,
334 0x22cf0123,
335 0x04028000,
336 0x0c30e7f1,
337 0xbd50e3f0,
338 0xbd34bd24,
339/* 0x0421: init_unk_loop */
340 0x6821f444,
341 0xf400f6b0,
342 0xf7f00f0b,
343 0x04f2bb01,
344 0xb6054ffd,
345/* 0x0436: init_unk_next */
346 0x20b60130,
347 0x04e0b601,
348 0xf40126b0,
349/* 0x0442: init_unk_done */
350 0x0380e21b,
351 0x08048007,
352 0x010027f1,
353 0xcf0223f0,
354 0x34bd0022,
355 0xf1082595,
356 0xf0c00007,
357 0x05d00103,
358 0xf104bd00,
359 0xf0c10007,
360 0x05d00103,
361 0x9804bd00,
362 0x0f98000e,
363 0x5021f501,
364 0x002fbb01,
365 0x98003fbb,
366 0x0f98010e,
367 0x5021f502,
368 0x050e9801,
369 0xbb00effd,
370 0x3ebb002e,
371 0x020e9800,
372 0xf5030f98,
373 0x98015021,
374 0xeffd070e,
375 0x002ebb00,
376 0xb6003ebb,
377 0x07f10235,
378 0x03f0d300,
379 0x0003d001,
380 0x25b604bd,
381 0x0635b608,
382 0xb60120b6,
383 0x24b60130,
384 0x0834b608,
385 0xf5022fb9,
386 0xbb02d321,
387 0x07f1003f,
388 0x03f00100,
389 0x0003d002,
390 0x24bd04bd,
391 0xf11f29f0,
392 0xf0080007,
393 0x02d00203,
394/* 0x04f3: main */
320 0xf404bd00, 395 0xf404bd00,
321 0x27f11031, 396 0x28f40031,
322 0x23f08200, 397 0x24d7f000,
323 0x0022cf01, 398 0xf43921f4,
324 0xf00137f0, 399 0xe4b0f401,
325 0x32bb1f24, 400 0x1e18f404,
326 0x0132b604, 401 0xf00181fe,
327 0x80050280, 402 0x20bd0627,
328 0x27f10603, 403 0xb60412fd,
329 0x23f08600, 404 0x1efd01e4,
330 0x0022cf01, 405 0x0018fe05,
331 0xf1040280, 406 0x05e821f5,
332 0xf00c30e7, 407/* 0x0523: main_not_ctx_xfer */
333 0x24bd50e3, 408 0x94d30ef4,
334 0x44bd34bd, 409 0xf5f010ef,
335/* 0x0410: init_unk_loop */ 410 0x7e21f501,
336 0xb06821f4, 411 0xc60ef403,
337 0x0bf400f6, 412/* 0x0530: ih */
338 0x01f7f00f, 413 0x88fe80f9,
339 0xfd04f2bb, 414 0xf980f901,
340 0x30b6054f, 415 0xf9a0f990,
341/* 0x0425: init_unk_next */ 416 0xf9d0f9b0,
342 0x0120b601, 417 0xbdf0f9e0,
343 0xb004e0b6, 418 0x00a7f104,
344 0x1bf40126, 419 0x00a3f002,
345/* 0x0431: init_unk_done */ 420 0xc400aacf,
346 0x070380e2, 421 0x0bf404ab,
347 0xf1080480, 422 0x24d7f02c,
348 0xf0010027, 423 0x1a00e7f1,
349 0x22cf0223, 424 0xcf00e3f0,
350 0x9534bd00, 425 0xf7f100ee,
351 0x07f10825, 426 0xf3f01900,
352 0x03f0c000, 427 0x00ffcf00,
353 0x0005d001, 428 0xf00421f4,
429 0x07f101e7,
430 0x03f01d00,
431 0x000ed000,
432/* 0x057e: ih_no_fifo */
354 0x07f104bd, 433 0x07f104bd,
355 0x03f0c100, 434 0x03f00100,
356 0x0005d001, 435 0x000ad000,
357 0x0e9804bd, 436 0xf0fc04bd,
358 0x010f9800, 437 0xd0fce0fc,
359 0x015021f5, 438 0xa0fcb0fc,
360 0xbb002fbb, 439 0x80fc90fc,
361 0x0e98003f, 440 0xfc0088fe,
362 0x020f9801, 441 0x0032f480,
363 0x015021f5, 442/* 0x05a2: hub_barrier_done */
364 0xfd050e98, 443 0xf7f001f8,
365 0x2ebb00ef, 444 0x040e9801,
366 0x003ebb00, 445 0xb904febb,
367 0x98020e98, 446 0xe7f102ff,
368 0x21f5030f, 447 0xe3f09418,
369 0x0e980150, 448 0x9d21f440,
370 0x00effd07, 449/* 0x05ba: ctx_redswitch */
371 0xbb002ebb, 450 0xf7f000f8,
372 0x35b6003e, 451 0x0007f120,
373 0x0007f102,
374 0x0103f0d3,
375 0xbd0003d0,
376 0x0825b604,
377 0xb60635b6,
378 0x30b60120,
379 0x0824b601,
380 0xb90834b6,
381 0x21f5022f,
382 0x3fbb02d3,
383 0x0007f100,
384 0x0203f001,
385 0xbd0003d0,
386 0xf024bd04,
387 0x07f11f29,
388 0x03f00800,
389 0x0002d002,
390/* 0x04e2: main */
391 0x31f404bd,
392 0x0028f400,
393 0xf424d7f0,
394 0x01f43921,
395 0x04e4b0f4,
396 0xfe1e18f4,
397 0x27f00181,
398 0xfd20bd06,
399 0xe4b60412,
400 0x051efd01,
401 0xf50018fe,
402 0xf405d721,
403/* 0x0512: main_not_ctx_xfer */
404 0xef94d30e,
405 0x01f5f010,
406 0x037e21f5,
407/* 0x051f: ih */
408 0xf9c60ef4,
409 0x0188fe80,
410 0x90f980f9,
411 0xb0f9a0f9,
412 0xe0f9d0f9,
413 0x04bdf0f9,
414 0x0200a7f1,
415 0xcf00a3f0,
416 0xabc400aa,
417 0x2c0bf404,
418 0xf124d7f0,
419 0xf01a00e7,
420 0xeecf00e3,
421 0x00f7f100,
422 0x00f3f019,
423 0xf400ffcf,
424 0xe7f00421,
425 0x0007f101,
426 0x0003f01d,
427 0xbd000ed0,
428/* 0x056d: ih_no_fifo */
429 0x0007f104,
430 0x0003f001,
431 0xbd000ad0,
432 0xfcf0fc04,
433 0xfcd0fce0,
434 0xfca0fcb0,
435 0xfe80fc90,
436 0x80fc0088,
437 0xf80032f4,
438/* 0x0591: hub_barrier_done */
439 0x01f7f001,
440 0xbb040e98,
441 0xffb904fe,
442 0x18e7f102,
443 0x40e3f094,
444 0xf89d21f4,
445/* 0x05a9: ctx_redswitch */
446 0x20f7f000,
447 0x850007f1,
448 0xd00103f0,
449 0x04bd000f,
450/* 0x05bb: ctx_redswitch_delay */
451 0xb608e7f0,
452 0x1bf401e2,
453 0x00f5f1fd,
454 0x00f5f108,
455 0x0007f102,
456 0x0103f085, 452 0x0103f085,
457 0xbd000fd0, 453 0xbd000fd0,
458/* 0x05d7: ctx_xfer */ 454 0x08e7f004,
459 0xf100f804, 455/* 0x05cc: ctx_redswitch_delay */
460 0xf0810007, 456 0xf401e2b6,
461 0x0fd00203, 457 0xf5f1fd1b,
462 0xf404bd00, 458 0xf5f10800,
463 0x21f50711, 459 0x07f10200,
464/* 0x05ea: ctx_xfer_not_load */ 460 0x03f08500,
465 0x21f505a9, 461 0x000fd001,
466 0x24bd026a, 462 0x00f804bd,
467 0x47fc07f1, 463/* 0x05e8: ctx_xfer */
464 0x810007f1,
468 0xd00203f0, 465 0xd00203f0,
469 0x04bd0002, 466 0x04bd000f,
470 0xb6012cf0, 467 0xf50711f4,
471 0x07f10320, 468/* 0x05fb: ctx_xfer_not_load */
472 0x03f04afc, 469 0xf505ba21,
473 0x0002d002, 470 0xbd026a21,
474 0xacf004bd, 471 0xfc07f124,
475 0x02a5f001, 472 0x0203f047,
476 0x0000b7f1, 473 0xbd0002d0,
477 0x9850b3f0, 474 0x012cf004,
478 0xc4b6040c, 475 0xf10320b6,
479 0x00bcbb0f, 476 0xf04afc07,
480 0x98000c98, 477 0x02d00203,
481 0xe7f0010d, 478 0xf004bd00,
482 0x6f21f500, 479 0xa5f001ac,
483 0x01acf001, 480 0x00b7f102,
484 0x4000b7f1, 481 0x50b3f000,
482 0xb6040c98,
483 0xbcbb0fc4,
484 0x000c9800,
485 0xf0010d98,
486 0x21f500e7,
487 0xacf0016f,
488 0x00b7f101,
489 0x50b3f040,
490 0xb6040c98,
491 0xbcbb0fc4,
492 0x010c9800,
493 0x98020d98,
494 0xe7f1060f,
495 0x21f50800,
496 0xacf0016f,
497 0x04a5f001,
498 0x3000b7f1,
485 0x9850b3f0, 499 0x9850b3f0,
486 0xc4b6040c, 500 0xc4b6040c,
487 0x00bcbb0f, 501 0x00bcbb0f,
488 0x98010c98, 502 0x98020c98,
489 0x0f98020d, 503 0x0f98030d,
490 0x00e7f106, 504 0x00e7f108,
491 0x6f21f508, 505 0x6f21f502,
492 0x01acf001, 506 0x5e21f501,
493 0xf104a5f0, 507 0x0601f402,
494 0xf03000b7, 508/* 0x0697: ctx_xfer_post */
495 0x0c9850b3, 509 0xf50712f4,
496 0x0fc4b604, 510/* 0x069b: ctx_xfer_done */
497 0x9800bcbb, 511 0xf5027f21,
498 0x0d98020c, 512 0xf805a221,
499 0x080f9803,
500 0x0200e7f1,
501 0x016f21f5,
502 0x025e21f5,
503 0xf40601f4,
504/* 0x0686: ctx_xfer_post */
505 0x21f50712,
506/* 0x068a: ctx_xfer_done */
507 0x21f5027f,
508 0x00f80591,
509 0x00000000,
510 0x00000000,
511 0x00000000,
512 0x00000000,
513 0x00000000, 513 0x00000000,
514 0x00000000, 514 0x00000000,
515 0x00000000, 515 0x00000000,
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvf0.fuc.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvf0.fuc.h
index 6316ebaf5d9a..1b803197d28b 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvf0.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvf0.fuc.h
@@ -196,7 +196,7 @@ uint32_t nvf0_grgpc_code[] = {
196 0x1fb4f000, 196 0x1fb4f000,
197 0xf410b4b0, 197 0xf410b4b0,
198 0xa7f0f01b, 198 0xa7f0f01b,
199 0xd021f402, 199 0xd021f405,
200/* 0x0223: mmctx_stop */ 200/* 0x0223: mmctx_stop */
201 0xc82b0ef4, 201 0xc82b0ef4,
202 0xb4b600ab, 202 0xb4b600ab,
@@ -304,212 +304,212 @@ uint32_t nvf0_grgpc_code[] = {
304 0x21f440e3, 304 0x21f440e3,
305 0xf8e0fc9d, 305 0xf8e0fc9d,
306/* 0x03a1: init */ 306/* 0x03a1: init */
307 0xfe04bd00, 307 0xf104bd00,
308 0x27f00004, 308 0xf0420017,
309 0x0007f102, 309 0x11cf0013,
310 0x0003f012, 310 0x0911e700,
311 0xbd0002d0, 311 0x0814b601,
312 0x1f17f104, 312 0xf00014fe,
313 0x0010fe05, 313 0x07f10227,
314 0x070007f1, 314 0x03f01200,
315 0x0002d000,
316 0x17f104bd,
317 0x10fe0530,
318 0x0007f100,
319 0x0003f007,
320 0xbd0000d0,
321 0x0427f004,
322 0x040007f1,
315 0xd00003f0, 323 0xd00003f0,
316 0x04bd0000, 324 0x04bd0002,
317 0xf10427f0, 325 0xf11031f4,
318 0xf0040007, 326 0xf0820027,
319 0x02d00003, 327 0x22cf0123,
328 0x0137f000,
329 0xbb1f24f0,
330 0x32b60432,
331 0x05028001,
332 0xf1060380,
333 0xf0860027,
334 0x22cf0123,
335 0x04028000,
336 0x0c30e7f1,
337 0xbd50e3f0,
338 0xbd34bd24,
339/* 0x0421: init_unk_loop */
340 0x6821f444,
341 0xf400f6b0,
342 0xf7f00f0b,
343 0x04f2bb01,
344 0xb6054ffd,
345/* 0x0436: init_unk_next */
346 0x20b60130,
347 0x04e0b601,
348 0xf40226b0,
349/* 0x0442: init_unk_done */
350 0x0380e21b,
351 0x08048007,
352 0x010027f1,
353 0xcf0223f0,
354 0x34bd0022,
355 0xf1082595,
356 0xf0c00007,
357 0x05d00103,
358 0xf104bd00,
359 0xf0c10007,
360 0x05d00103,
361 0x9804bd00,
362 0x0f98000e,
363 0x5021f501,
364 0x002fbb01,
365 0x98003fbb,
366 0x0f98010e,
367 0x5021f502,
368 0x050e9801,
369 0xbb00effd,
370 0x3ebb002e,
371 0x020e9800,
372 0xf5030f98,
373 0x98015021,
374 0xeffd070e,
375 0x002ebb00,
376 0xb6003ebb,
377 0x07f10235,
378 0x03f0d300,
379 0x0003d001,
380 0x25b604bd,
381 0x0635b608,
382 0xb60120b6,
383 0x24b60130,
384 0x0834b608,
385 0xf5022fb9,
386 0xbb02d321,
387 0x07f1003f,
388 0x03f00100,
389 0x0003d002,
390 0x24bd04bd,
391 0xf11f29f0,
392 0xf0300007,
393 0x02d00203,
394/* 0x04f3: main */
320 0xf404bd00, 395 0xf404bd00,
321 0x27f11031, 396 0x28f40031,
322 0x23f08200, 397 0x24d7f000,
323 0x0022cf01, 398 0xf43921f4,
324 0xf00137f0, 399 0xe4b0f401,
325 0x32bb1f24, 400 0x1e18f404,
326 0x0132b604, 401 0xf00181fe,
327 0x80050280, 402 0x20bd0627,
328 0x27f10603, 403 0xb60412fd,
329 0x23f08600, 404 0x1efd01e4,
330 0x0022cf01, 405 0x0018fe05,
331 0xf1040280, 406 0x05e821f5,
332 0xf00c30e7, 407/* 0x0523: main_not_ctx_xfer */
333 0x24bd50e3, 408 0x94d30ef4,
334 0x44bd34bd, 409 0xf5f010ef,
335/* 0x0410: init_unk_loop */ 410 0x7e21f501,
336 0xb06821f4, 411 0xc60ef403,
337 0x0bf400f6, 412/* 0x0530: ih */
338 0x01f7f00f, 413 0x88fe80f9,
339 0xfd04f2bb, 414 0xf980f901,
340 0x30b6054f, 415 0xf9a0f990,
341/* 0x0425: init_unk_next */ 416 0xf9d0f9b0,
342 0x0120b601, 417 0xbdf0f9e0,
343 0xb004e0b6, 418 0x00a7f104,
344 0x1bf40226, 419 0x00a3f002,
345/* 0x0431: init_unk_done */ 420 0xc400aacf,
346 0x070380e2, 421 0x0bf404ab,
347 0xf1080480, 422 0x24d7f02c,
348 0xf0010027, 423 0x1a00e7f1,
349 0x22cf0223, 424 0xcf00e3f0,
350 0x9534bd00, 425 0xf7f100ee,
351 0x07f10825, 426 0xf3f01900,
352 0x03f0c000, 427 0x00ffcf00,
353 0x0005d001, 428 0xf00421f4,
429 0x07f101e7,
430 0x03f01d00,
431 0x000ed000,
432/* 0x057e: ih_no_fifo */
354 0x07f104bd, 433 0x07f104bd,
355 0x03f0c100, 434 0x03f00100,
356 0x0005d001, 435 0x000ad000,
357 0x0e9804bd, 436 0xf0fc04bd,
358 0x010f9800, 437 0xd0fce0fc,
359 0x015021f5, 438 0xa0fcb0fc,
360 0xbb002fbb, 439 0x80fc90fc,
361 0x0e98003f, 440 0xfc0088fe,
362 0x020f9801, 441 0x0032f480,
363 0x015021f5, 442/* 0x05a2: hub_barrier_done */
364 0xfd050e98, 443 0xf7f001f8,
365 0x2ebb00ef, 444 0x040e9801,
366 0x003ebb00, 445 0xb904febb,
367 0x98020e98, 446 0xe7f102ff,
368 0x21f5030f, 447 0xe3f09418,
369 0x0e980150, 448 0x9d21f440,
370 0x00effd07, 449/* 0x05ba: ctx_redswitch */
371 0xbb002ebb, 450 0xf7f000f8,
372 0x35b6003e, 451 0x0007f120,
373 0x0007f102,
374 0x0103f0d3,
375 0xbd0003d0,
376 0x0825b604,
377 0xb60635b6,
378 0x30b60120,
379 0x0824b601,
380 0xb90834b6,
381 0x21f5022f,
382 0x3fbb02d3,
383 0x0007f100,
384 0x0203f001,
385 0xbd0003d0,
386 0xf024bd04,
387 0x07f11f29,
388 0x03f03000,
389 0x0002d002,
390/* 0x04e2: main */
391 0x31f404bd,
392 0x0028f400,
393 0xf424d7f0,
394 0x01f43921,
395 0x04e4b0f4,
396 0xfe1e18f4,
397 0x27f00181,
398 0xfd20bd06,
399 0xe4b60412,
400 0x051efd01,
401 0xf50018fe,
402 0xf405d721,
403/* 0x0512: main_not_ctx_xfer */
404 0xef94d30e,
405 0x01f5f010,
406 0x037e21f5,
407/* 0x051f: ih */
408 0xf9c60ef4,
409 0x0188fe80,
410 0x90f980f9,
411 0xb0f9a0f9,
412 0xe0f9d0f9,
413 0x04bdf0f9,
414 0x0200a7f1,
415 0xcf00a3f0,
416 0xabc400aa,
417 0x2c0bf404,
418 0xf124d7f0,
419 0xf01a00e7,
420 0xeecf00e3,
421 0x00f7f100,
422 0x00f3f019,
423 0xf400ffcf,
424 0xe7f00421,
425 0x0007f101,
426 0x0003f01d,
427 0xbd000ed0,
428/* 0x056d: ih_no_fifo */
429 0x0007f104,
430 0x0003f001,
431 0xbd000ad0,
432 0xfcf0fc04,
433 0xfcd0fce0,
434 0xfca0fcb0,
435 0xfe80fc90,
436 0x80fc0088,
437 0xf80032f4,
438/* 0x0591: hub_barrier_done */
439 0x01f7f001,
440 0xbb040e98,
441 0xffb904fe,
442 0x18e7f102,
443 0x40e3f094,
444 0xf89d21f4,
445/* 0x05a9: ctx_redswitch */
446 0x20f7f000,
447 0x850007f1,
448 0xd00103f0,
449 0x04bd000f,
450/* 0x05bb: ctx_redswitch_delay */
451 0xb608e7f0,
452 0x1bf401e2,
453 0x00f5f1fd,
454 0x00f5f108,
455 0x0007f102,
456 0x0103f085, 452 0x0103f085,
457 0xbd000fd0, 453 0xbd000fd0,
458/* 0x05d7: ctx_xfer */ 454 0x08e7f004,
459 0xf100f804, 455/* 0x05cc: ctx_redswitch_delay */
460 0xf0810007, 456 0xf401e2b6,
461 0x0fd00203, 457 0xf5f1fd1b,
462 0xf404bd00, 458 0xf5f10800,
463 0x21f50711, 459 0x07f10200,
464/* 0x05ea: ctx_xfer_not_load */ 460 0x03f08500,
465 0x21f505a9, 461 0x000fd001,
466 0x24bd026a, 462 0x00f804bd,
467 0x47fc07f1, 463/* 0x05e8: ctx_xfer */
464 0x810007f1,
468 0xd00203f0, 465 0xd00203f0,
469 0x04bd0002, 466 0x04bd000f,
470 0xb6012cf0, 467 0xf50711f4,
471 0x07f10320, 468/* 0x05fb: ctx_xfer_not_load */
472 0x03f04afc, 469 0xf505ba21,
473 0x0002d002, 470 0xbd026a21,
474 0xacf004bd, 471 0xfc07f124,
475 0x02a5f001, 472 0x0203f047,
476 0x0000b7f1, 473 0xbd0002d0,
477 0x9850b3f0, 474 0x012cf004,
478 0xc4b6040c, 475 0xf10320b6,
479 0x00bcbb0f, 476 0xf04afc07,
480 0x98000c98, 477 0x02d00203,
481 0xe7f0010d, 478 0xf004bd00,
482 0x6f21f500, 479 0xa5f001ac,
483 0x01acf001, 480 0x00b7f102,
484 0x4000b7f1, 481 0x50b3f000,
482 0xb6040c98,
483 0xbcbb0fc4,
484 0x000c9800,
485 0xf0010d98,
486 0x21f500e7,
487 0xacf0016f,
488 0x00b7f101,
489 0x50b3f040,
490 0xb6040c98,
491 0xbcbb0fc4,
492 0x010c9800,
493 0x98020d98,
494 0xe7f1060f,
495 0x21f50800,
496 0xacf0016f,
497 0x04a5f001,
498 0x3000b7f1,
485 0x9850b3f0, 499 0x9850b3f0,
486 0xc4b6040c, 500 0xc4b6040c,
487 0x00bcbb0f, 501 0x00bcbb0f,
488 0x98010c98, 502 0x98020c98,
489 0x0f98020d, 503 0x0f98030d,
490 0x00e7f106, 504 0x00e7f108,
491 0x6f21f508, 505 0x6f21f502,
492 0x01acf001, 506 0x5e21f501,
493 0xf104a5f0, 507 0x0601f402,
494 0xf03000b7, 508/* 0x0697: ctx_xfer_post */
495 0x0c9850b3, 509 0xf50712f4,
496 0x0fc4b604, 510/* 0x069b: ctx_xfer_done */
497 0x9800bcbb, 511 0xf5027f21,
498 0x0d98020c, 512 0xf805a221,
499 0x080f9803,
500 0x0200e7f1,
501 0x016f21f5,
502 0x025e21f5,
503 0xf40601f4,
504/* 0x0686: ctx_xfer_post */
505 0x21f50712,
506/* 0x068a: ctx_xfer_done */
507 0x21f5027f,
508 0x00f80591,
509 0x00000000,
510 0x00000000,
511 0x00000000,
512 0x00000000,
513 0x00000000, 513 0x00000000,
514 0x00000000, 514 0x00000000,
515 0x00000000, 515 0x00000000,
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubgm107.fuc5 b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubgm107.fuc5
new file mode 100644
index 000000000000..27591b3086a5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubgm107.fuc5
@@ -0,0 +1,40 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24
25#define CHIPSET GK208
26#include "macros.fuc"
27
28.section #gm107_grhub_data
29#define INCLUDE_DATA
30#include "com.fuc"
31#include "hub.fuc"
32#undef INCLUDE_DATA
33
34.section #gm107_grhub_code
35#define INCLUDE_CODE
36bra #init
37#include "com.fuc"
38#include "hub.fuc"
39.align 256
40#undef INCLUDE_CODE
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubgm107.fuc5.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubgm107.fuc5.h
new file mode 100644
index 000000000000..214dd16ec566
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubgm107.fuc5.h
@@ -0,0 +1,916 @@
1uint32_t gm107_grhub_data[] = {
2/* 0x0000: hub_mmio_list_head */
3 0x00000300,
4/* 0x0004: hub_mmio_list_tail */
5 0x00000304,
6/* 0x0008: gpc_count */
7 0x00000000,
8/* 0x000c: rop_count */
9 0x00000000,
10/* 0x0010: cmd_queue */
11 0x00000000,
12 0x00000000,
13 0x00000000,
14 0x00000000,
15 0x00000000,
16 0x00000000,
17 0x00000000,
18 0x00000000,
19 0x00000000,
20 0x00000000,
21 0x00000000,
22 0x00000000,
23 0x00000000,
24 0x00000000,
25 0x00000000,
26 0x00000000,
27 0x00000000,
28 0x00000000,
29/* 0x0058: ctx_current */
30 0x00000000,
31 0x00000000,
32 0x00000000,
33 0x00000000,
34 0x00000000,
35 0x00000000,
36 0x00000000,
37 0x00000000,
38 0x00000000,
39 0x00000000,
40 0x00000000,
41 0x00000000,
42 0x00000000,
43 0x00000000,
44 0x00000000,
45 0x00000000,
46 0x00000000,
47 0x00000000,
48 0x00000000,
49 0x00000000,
50 0x00000000,
51 0x00000000,
52 0x00000000,
53 0x00000000,
54 0x00000000,
55 0x00000000,
56 0x00000000,
57 0x00000000,
58 0x00000000,
59 0x00000000,
60 0x00000000,
61 0x00000000,
62 0x00000000,
63 0x00000000,
64 0x00000000,
65 0x00000000,
66 0x00000000,
67 0x00000000,
68 0x00000000,
69 0x00000000,
70 0x00000000,
71 0x00000000,
72/* 0x0100: chan_data */
73/* 0x0100: chan_mmio_count */
74 0x00000000,
75/* 0x0104: chan_mmio_address */
76 0x00000000,
77 0x00000000,
78 0x00000000,
79 0x00000000,
80 0x00000000,
81 0x00000000,
82 0x00000000,
83 0x00000000,
84 0x00000000,
85 0x00000000,
86 0x00000000,
87 0x00000000,
88 0x00000000,
89 0x00000000,
90 0x00000000,
91 0x00000000,
92 0x00000000,
93 0x00000000,
94 0x00000000,
95 0x00000000,
96 0x00000000,
97 0x00000000,
98 0x00000000,
99 0x00000000,
100 0x00000000,
101 0x00000000,
102 0x00000000,
103 0x00000000,
104 0x00000000,
105 0x00000000,
106 0x00000000,
107 0x00000000,
108 0x00000000,
109 0x00000000,
110 0x00000000,
111 0x00000000,
112 0x00000000,
113 0x00000000,
114 0x00000000,
115 0x00000000,
116 0x00000000,
117 0x00000000,
118 0x00000000,
119 0x00000000,
120 0x00000000,
121 0x00000000,
122 0x00000000,
123 0x00000000,
124 0x00000000,
125 0x00000000,
126 0x00000000,
127 0x00000000,
128 0x00000000,
129 0x00000000,
130 0x00000000,
131 0x00000000,
132 0x00000000,
133 0x00000000,
134 0x00000000,
135 0x00000000,
136 0x00000000,
137 0x00000000,
138 0x00000000,
139/* 0x0200: xfer_data */
140 0x00000000,
141 0x00000000,
142 0x00000000,
143 0x00000000,
144 0x00000000,
145 0x00000000,
146 0x00000000,
147 0x00000000,
148 0x00000000,
149 0x00000000,
150 0x00000000,
151 0x00000000,
152 0x00000000,
153 0x00000000,
154 0x00000000,
155 0x00000000,
156 0x00000000,
157 0x00000000,
158 0x00000000,
159 0x00000000,
160 0x00000000,
161 0x00000000,
162 0x00000000,
163 0x00000000,
164 0x00000000,
165 0x00000000,
166 0x00000000,
167 0x00000000,
168 0x00000000,
169 0x00000000,
170 0x00000000,
171 0x00000000,
172 0x00000000,
173 0x00000000,
174 0x00000000,
175 0x00000000,
176 0x00000000,
177 0x00000000,
178 0x00000000,
179 0x00000000,
180 0x00000000,
181 0x00000000,
182 0x00000000,
183 0x00000000,
184 0x00000000,
185 0x00000000,
186 0x00000000,
187 0x00000000,
188 0x00000000,
189 0x00000000,
190 0x00000000,
191 0x00000000,
192 0x00000000,
193 0x00000000,
194 0x00000000,
195 0x00000000,
196 0x00000000,
197 0x00000000,
198 0x00000000,
199 0x00000000,
200 0x00000000,
201 0x00000000,
202 0x00000000,
203 0x00000000,
204/* 0x0300: hub_mmio_list_base */
205 0x0417e91c,
206};
207
208uint32_t gm107_grhub_code[] = {
209 0x030e0ef5,
210/* 0x0004: queue_put */
211 0x9800d898,
212 0x86f001d9,
213 0xf489a408,
214 0x020f0b1b,
215 0x0002f87e,
216/* 0x001a: queue_put_next */
217 0x98c400f8,
218 0x0384b607,
219 0xb6008dbb,
220 0x8eb50880,
221 0x018fb500,
222 0xf00190b6,
223 0xd9b50f94,
224/* 0x0037: queue_get */
225 0xf400f801,
226 0xd8980131,
227 0x01d99800,
228 0x0bf489a4,
229 0x0789c421,
230 0xbb0394b6,
231 0x90b6009d,
232 0x009e9808,
233 0xb6019f98,
234 0x84f00180,
235 0x00d8b50f,
236/* 0x0063: queue_get_done */
237 0xf80132f4,
238/* 0x0065: nv_rd32 */
239 0xf0ecb200,
240 0x00801fc9,
241 0x0cf601ca,
242/* 0x0073: nv_rd32_wait */
243 0x8c04bd00,
244 0xcf01ca00,
245 0xccc800cc,
246 0xf61bf41f,
247 0xec7e060a,
248 0x008f0000,
249 0xffcf01cb,
250/* 0x008f: nv_wr32 */
251 0x8000f800,
252 0xf601cc00,
253 0x04bd000f,
254 0xc9f0ecb2,
255 0x1ec9f01f,
256 0x01ca0080,
257 0xbd000cf6,
258/* 0x00a9: nv_wr32_wait */
259 0xca008c04,
260 0x00cccf01,
261 0xf41fccc8,
262 0x00f8f61b,
263/* 0x00b8: wait_donez */
264 0x99f094bd,
265 0x37008000,
266 0x0009f602,
267 0x008004bd,
268 0x0af60206,
269/* 0x00cf: wait_donez_ne */
270 0x8804bd00,
271 0xcf010000,
272 0x8aff0088,
273 0xf61bf488,
274 0x99f094bd,
275 0x17008000,
276 0x0009f602,
277 0x00f804bd,
278/* 0x00ec: wait_doneo */
279 0x99f094bd,
280 0x37008000,
281 0x0009f602,
282 0x008004bd,
283 0x0af60206,
284/* 0x0103: wait_doneo_e */
285 0x8804bd00,
286 0xcf010000,
287 0x8aff0088,
288 0xf60bf488,
289 0x99f094bd,
290 0x17008000,
291 0x0009f602,
292 0x00f804bd,
293/* 0x0120: mmctx_size */
294/* 0x0122: nv_mmctx_size_loop */
295 0xe89894bd,
296 0x1a85b600,
297 0xb60180b6,
298 0x98bb0284,
299 0x04e0b600,
300 0x1bf4efa4,
301 0xf89fb2ec,
302/* 0x013d: mmctx_xfer */
303 0xf094bd00,
304 0x00800199,
305 0x09f60237,
306 0xbd04bd00,
307 0x05bbfd94,
308 0x800f0bf4,
309 0xf601c400,
310 0x04bd000b,
311/* 0x015f: mmctx_base_disabled */
312 0xfd0099f0,
313 0x0bf405ee,
314 0xc6008018,
315 0x000ef601,
316 0x008004bd,
317 0x0ff601c7,
318 0xf004bd00,
319/* 0x017a: mmctx_multi_disabled */
320 0xabc80199,
321 0x10b4b600,
322 0xc80cb9f0,
323 0xe4b601ae,
324 0x05befd11,
325 0x01c50080,
326 0xbd000bf6,
327/* 0x0195: mmctx_exec_loop */
328/* 0x0195: mmctx_wait_free */
329 0xc5008e04,
330 0x00eecf01,
331 0xf41fe4f0,
332 0xce98f60b,
333 0x05e9fd00,
334 0x01c80080,
335 0xbd000ef6,
336 0x04c0b604,
337 0x1bf4cda4,
338 0x02abc8df,
339/* 0x01bf: mmctx_fini_wait */
340 0x8b1c1bf4,
341 0xcf01c500,
342 0xb4f000bb,
343 0x10b4b01f,
344 0x0af31bf4,
345 0x00b87e05,
346 0x250ef400,
347/* 0x01d8: mmctx_stop */
348 0xb600abc8,
349 0xb9f010b4,
350 0x12b9f00c,
351 0x01c50080,
352 0xbd000bf6,
353/* 0x01ed: mmctx_stop_wait */
354 0xc5008b04,
355 0x00bbcf01,
356 0xf412bbc8,
357/* 0x01fa: mmctx_done */
358 0x94bdf61b,
359 0x800199f0,
360 0xf6021700,
361 0x04bd0009,
362/* 0x020a: strand_wait */
363 0xa0f900f8,
364 0xb87e020a,
365 0xa0fc0000,
366/* 0x0216: strand_pre */
367 0x0c0900f8,
368 0x024afc80,
369 0xbd0009f6,
370 0x020a7e04,
371/* 0x0227: strand_post */
372 0x0900f800,
373 0x4afc800d,
374 0x0009f602,
375 0x0a7e04bd,
376 0x00f80002,
377/* 0x0238: strand_set */
378 0xfc800f0c,
379 0x0cf6024f,
380 0x0c04bd00,
381 0x4afc800b,
382 0x000cf602,
383 0xfc8004bd,
384 0x0ef6024f,
385 0x0c04bd00,
386 0x4afc800a,
387 0x000cf602,
388 0x0a7e04bd,
389 0x00f80002,
390/* 0x0268: strand_ctx_init */
391 0x99f094bd,
392 0x37008003,
393 0x0009f602,
394 0x167e04bd,
395 0x030e0002,
396 0x0002387e,
397 0xfc80c4bd,
398 0x0cf60247,
399 0x0c04bd00,
400 0x4afc8001,
401 0x000cf602,
402 0x0a7e04bd,
403 0x0c920002,
404 0x46fc8001,
405 0x000cf602,
406 0x020c04bd,
407 0x024afc80,
408 0xbd000cf6,
409 0x020a7e04,
410 0x02277e00,
411 0x42008800,
412 0x20008902,
413 0x0099cf02,
414/* 0x02c7: ctx_init_strand_loop */
415 0xf608fe95,
416 0x8ef6008e,
417 0x808acf40,
418 0xb606a5b6,
419 0xeabb01a0,
420 0x0480b600,
421 0xf40192b6,
422 0xe4b6e81b,
423 0xf2efbc08,
424 0x99f094bd,
425 0x17008003,
426 0x0009f602,
427 0x00f804bd,
428/* 0x02f8: error */
429 0x02050080,
430 0xbd000ff6,
431 0x80010f04,
432 0xf6030700,
433 0x04bd000f,
434/* 0x030e: init */
435 0x04bd00f8,
436 0x410007fe,
437 0x11cf4200,
438 0x0911e700,
439 0x0814b601,
440 0x020014fe,
441 0x12004002,
442 0xbd0002f6,
443 0x05c94104,
444 0xbd0010fe,
445 0x07004024,
446 0xbd0002f6,
447 0x20034204,
448 0x01010080,
449 0xbd0002f6,
450 0x20044204,
451 0x01010480,
452 0xbd0002f6,
453 0x200b4204,
454 0x01010880,
455 0xbd0002f6,
456 0x200c4204,
457 0x01011c80,
458 0xbd0002f6,
459 0x01039204,
460 0x03090080,
461 0xbd0003f6,
462 0x87044204,
463 0xf6040040,
464 0x04bd0002,
465 0x00400402,
466 0x0002f603,
467 0x31f404bd,
468 0x96048e10,
469 0x00657e40,
470 0xc7feb200,
471 0x01b590f1,
472 0x1ff4f003,
473 0x01020fb5,
474 0x041fbb01,
475 0x800112b6,
476 0xf6010300,
477 0x04bd0001,
478 0x01040080,
479 0xbd0001f6,
480 0x01004104,
481 0x627e020f,
482 0x717e0006,
483 0x100f0006,
484 0x0006b37e,
485 0x98000e98,
486 0x207e010f,
487 0x14950001,
488 0xc0008008,
489 0x0004f601,
490 0x008004bd,
491 0x04f601c1,
492 0xb704bd00,
493 0xbb130030,
494 0xf5b6001f,
495 0xd3008002,
496 0x000ff601,
497 0x15b604bd,
498 0x0110b608,
499 0xb20814b6,
500 0x02687e1f,
501 0x001fbb00,
502 0x84020398,
503/* 0x041f: init_gpc */
504 0xb8502000,
505 0x0008044e,
506 0x8f7e1fb2,
507 0x4eb80000,
508 0xbd00010c,
509 0x008f7ef4,
510 0x044eb800,
511 0x8f7e0001,
512 0x4eb80000,
513 0x0f000100,
514 0x008f7e02,
515 0x004eb800,
516/* 0x044e: init_gpc_wait */
517 0x657e0008,
518 0xffc80000,
519 0xf90bf41f,
520 0x08044eb8,
521 0x00657e00,
522 0x001fbb00,
523 0x800040b7,
524 0xf40132b6,
525 0x000fb41b,
526 0x0006b37e,
527 0x627e000f,
528 0x00800006,
529 0x01f60201,
530 0xbd04bd00,
531 0x1f19f014,
532 0x02300080,
533 0xbd0001f6,
534/* 0x0491: main */
535 0x0031f404,
536 0x0d0028f4,
537 0x00377e10,
538 0xf401f400,
539 0x4001e4b1,
540 0x00c71bf5,
541 0x99f094bd,
542 0x37008004,
543 0x0009f602,
544 0x008104bd,
545 0x11cf02c0,
546 0xc1008200,
547 0x0022cf02,
548 0xf41f13c8,
549 0x23c8770b,
550 0x550bf41f,
551 0x12b220f9,
552 0x99f094bd,
553 0x37008007,
554 0x0009f602,
555 0x32f404bd,
556 0x0231f401,
557 0x0008367e,
558 0x99f094bd,
559 0x17008007,
560 0x0009f602,
561 0x20fc04bd,
562 0x99f094bd,
563 0x37008006,
564 0x0009f602,
565 0x31f404bd,
566 0x08367e01,
567 0xf094bd00,
568 0x00800699,
569 0x09f60217,
570 0xf404bd00,
571/* 0x0522: chsw_prev_no_next */
572 0x20f92f0e,
573 0x32f412b2,
574 0x0232f401,
575 0x0008367e,
576 0x008020fc,
577 0x02f602c0,
578 0xf404bd00,
579/* 0x053e: chsw_no_prev */
580 0x23c8130e,
581 0x0d0bf41f,
582 0xf40131f4,
583 0x367e0232,
584/* 0x054e: chsw_done */
585 0x01020008,
586 0x02c30080,
587 0xbd0002f6,
588 0xf094bd04,
589 0x00800499,
590 0x09f60217,
591 0xf504bd00,
592/* 0x056b: main_not_ctx_switch */
593 0xb0ff2a0e,
594 0x1bf401e4,
595 0x7ef2b20c,
596 0xf40007d6,
597/* 0x057a: main_not_ctx_chan */
598 0xe4b0400e,
599 0x2c1bf402,
600 0x99f094bd,
601 0x37008007,
602 0x0009f602,
603 0x32f404bd,
604 0x0232f401,
605 0x0008367e,
606 0x99f094bd,
607 0x17008007,
608 0x0009f602,
609 0x0ef404bd,
610/* 0x05a9: main_not_ctx_save */
611 0x10ef9411,
612 0x7e01f5f0,
613 0xf50002f8,
614/* 0x05b7: main_done */
615 0xbdfede0e,
616 0x1f29f024,
617 0x02300080,
618 0xbd0002f6,
619 0xcc0ef504,
620/* 0x05c9: ih */
621 0xfe80f9fe,
622 0x80f90188,
623 0xa0f990f9,
624 0xd0f9b0f9,
625 0xf0f9e0f9,
626 0x004a04bd,
627 0x00aacf02,
628 0xf404abc4,
629 0x100d230b,
630 0xcf1a004e,
631 0x004f00ee,
632 0x00ffcf19,
633 0x0000047e,
634 0x0400b0b7,
635 0x0040010e,
636 0x000ef61d,
637/* 0x060a: ih_no_fifo */
638 0xabe404bd,
639 0x0bf40100,
640 0x4e100d0c,
641 0x047e4001,
642/* 0x061a: ih_no_ctxsw */
643 0xabe40000,
644 0x0bf40400,
645 0x01004b10,
646 0x448ebfb2,
647 0x8f7e4001,
648/* 0x062e: ih_no_fwmthd */
649 0x044b0000,
650 0xffb0bd01,
651 0x0bf4b4ab,
652 0x0700800c,
653 0x000bf603,
654/* 0x0642: ih_no_other */
655 0x004004bd,
656 0x000af601,
657 0xf0fc04bd,
658 0xd0fce0fc,
659 0xa0fcb0fc,
660 0x80fc90fc,
661 0xfc0088fe,
662 0x0032f480,
663/* 0x0662: ctx_4170s */
664 0xf5f001f8,
665 0x8effb210,
666 0x7e404170,
667 0xf800008f,
668/* 0x0671: ctx_4170w */
669 0x41708e00,
670 0x00657e40,
671 0xf0ffb200,
672 0x1bf410f4,
673/* 0x0683: ctx_redswitch */
674 0x4e00f8f3,
675 0xe5f00200,
676 0x20e5f040,
677 0x8010e5f0,
678 0xf6018500,
679 0x04bd000e,
680/* 0x069a: ctx_redswitch_delay */
681 0xf2b6080f,
682 0xfd1bf401,
683 0x0400e5f1,
684 0x0100e5f1,
685 0x01850080,
686 0xbd000ef6,
687/* 0x06b3: ctx_86c */
688 0x8000f804,
689 0xf6022300,
690 0x04bd000f,
691 0x148effb2,
692 0x8f7e408a,
693 0xffb20000,
694 0x41a88c8e,
695 0x00008f7e,
696/* 0x06d2: ctx_mem */
697 0x008000f8,
698 0x0ff60284,
699/* 0x06db: ctx_mem_wait */
700 0x8f04bd00,
701 0xcf028400,
702 0xfffd00ff,
703 0xf61bf405,
704/* 0x06ea: ctx_load */
705 0x94bd00f8,
706 0x800599f0,
707 0xf6023700,
708 0x04bd0009,
709 0xb87e0c0a,
710 0xf4bd0000,
711 0x02890080,
712 0xbd000ff6,
713 0xc1008004,
714 0x0002f602,
715 0x008004bd,
716 0x02f60283,
717 0x0f04bd00,
718 0x06d27e07,
719 0xc0008000,
720 0x0002f602,
721 0x0bfe04bd,
722 0x1f2af000,
723 0xb60424b6,
724 0x94bd0220,
725 0x800899f0,
726 0xf6023700,
727 0x04bd0009,
728 0x02810080,
729 0xbd0002f6,
730 0x0000d204,
731 0x25f08000,
732 0x88008002,
733 0x0002f602,
734 0x100104bd,
735 0xf0020042,
736 0x12fa0223,
737 0xbd03f805,
738 0x0899f094,
739 0x02170080,
740 0xbd0009f6,
741 0x81019804,
742 0x981814b6,
743 0x25b68002,
744 0x0512fd08,
745 0xbd1601b5,
746 0x0999f094,
747 0x02370080,
748 0xbd0009f6,
749 0x81008004,
750 0x0001f602,
751 0x010204bd,
752 0x02880080,
753 0xbd0002f6,
754 0x01004104,
755 0xfa0613f0,
756 0x03f80501,
757 0x99f094bd,
758 0x17008009,
759 0x0009f602,
760 0x94bd04bd,
761 0x800599f0,
762 0xf6021700,
763 0x04bd0009,
764/* 0x07d6: ctx_chan */
765 0xea7e00f8,
766 0x0c0a0006,
767 0x0000b87e,
768 0xd27e050f,
769 0x00f80006,
770/* 0x07e8: ctx_mmio_exec */
771 0x80410398,
772 0xf6028100,
773 0x04bd0003,
774/* 0x07f6: ctx_mmio_loop */
775 0x34c434bd,
776 0x0e1bf4ff,
777 0xf0020045,
778 0x35fa0653,
779/* 0x0807: ctx_mmio_pull */
780 0x9803f805,
781 0x4f98804e,
782 0x008f7e81,
783 0x0830b600,
784 0xf40112b6,
785/* 0x081a: ctx_mmio_done */
786 0x0398df1b,
787 0x81008016,
788 0x0003f602,
789 0x00b504bd,
790 0x01004140,
791 0xfa0613f0,
792 0x03f80601,
793/* 0x0836: ctx_xfer */
794 0x040e00f8,
795 0x03020080,
796 0xbd000ef6,
797/* 0x0841: ctx_xfer_idle */
798 0x00008e04,
799 0x00eecf03,
800 0x2000e4f1,
801 0xf4f51bf4,
802 0x02f40611,
803/* 0x0855: ctx_xfer_pre */
804 0x7e100f0c,
805 0xf40006b3,
806/* 0x085e: ctx_xfer_pre_load */
807 0x020f1b11,
808 0x0006627e,
809 0x0006717e,
810 0x0006837e,
811 0x627ef4bd,
812 0xea7e0006,
813/* 0x0876: ctx_xfer_exec */
814 0x01980006,
815 0x8024bd16,
816 0xf6010500,
817 0x04bd0002,
818 0x008e1fb2,
819 0x8f7e41a5,
820 0xfcf00000,
821 0x022cf001,
822 0xfd0124b6,
823 0xffb205f2,
824 0x41a5048e,
825 0x00008f7e,
826 0x0002167e,
827 0xfc8024bd,
828 0x02f60247,
829 0xf004bd00,
830 0x20b6012c,
831 0x4afc8003,
832 0x0002f602,
833 0xacf004bd,
834 0x06a5f001,
835 0x0c98000b,
836 0x010d9800,
837 0x3d7e000e,
838 0x080a0001,
839 0x0000ec7e,
840 0x00020a7e,
841 0x0a1201f4,
842 0x00b87e0c,
843 0x7e050f00,
844 0xf40006d2,
845/* 0x08f2: ctx_xfer_post */
846 0x020f2d02,
847 0x0006627e,
848 0xb37ef4bd,
849 0x277e0006,
850 0x717e0002,
851 0xf4bd0006,
852 0x0006627e,
853 0x981011f4,
854 0x11fd4001,
855 0x070bf405,
856 0x0007e87e,
857/* 0x091c: ctx_xfer_no_post_mmio */
858/* 0x091c: ctx_xfer_done */
859 0x000000f8,
860 0x00000000,
861 0x00000000,
862 0x00000000,
863 0x00000000,
864 0x00000000,
865 0x00000000,
866 0x00000000,
867 0x00000000,
868 0x00000000,
869 0x00000000,
870 0x00000000,
871 0x00000000,
872 0x00000000,
873 0x00000000,
874 0x00000000,
875 0x00000000,
876 0x00000000,
877 0x00000000,
878 0x00000000,
879 0x00000000,
880 0x00000000,
881 0x00000000,
882 0x00000000,
883 0x00000000,
884 0x00000000,
885 0x00000000,
886 0x00000000,
887 0x00000000,
888 0x00000000,
889 0x00000000,
890 0x00000000,
891 0x00000000,
892 0x00000000,
893 0x00000000,
894 0x00000000,
895 0x00000000,
896 0x00000000,
897 0x00000000,
898 0x00000000,
899 0x00000000,
900 0x00000000,
901 0x00000000,
902 0x00000000,
903 0x00000000,
904 0x00000000,
905 0x00000000,
906 0x00000000,
907 0x00000000,
908 0x00000000,
909 0x00000000,
910 0x00000000,
911 0x00000000,
912 0x00000000,
913 0x00000000,
914 0x00000000,
915 0x00000000,
916};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnv108.fuc5.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnv108.fuc5.h
index 4750984bf380..64dfd75192bf 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnv108.fuc5.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnv108.fuc5.h
@@ -342,7 +342,7 @@ uint32_t nv108_grhub_code[] = {
342 0xb4f000bb, 342 0xb4f000bb,
343 0x10b4b01f, 343 0x10b4b01f,
344 0x0af31bf4, 344 0x0af31bf4,
345 0x00b87e02, 345 0x00b87e05,
346 0x250ef400, 346 0x250ef400,
347/* 0x01d8: mmctx_stop */ 347/* 0x01d8: mmctx_stop */
348 0xb600abc8, 348 0xb600abc8,
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc.h
index 132f684b1946..f8f7b278a13f 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc.h
@@ -361,7 +361,7 @@ uint32_t nvc0_grhub_code[] = {
361 0x1fb4f000, 361 0x1fb4f000,
362 0xf410b4b0, 362 0xf410b4b0,
363 0xa7f0f01b, 363 0xa7f0f01b,
364 0xd021f402, 364 0xd021f405,
365/* 0x0223: mmctx_stop */ 365/* 0x0223: mmctx_stop */
366 0xc82b0ef4, 366 0xc82b0ef4,
367 0xb4b600ab, 367 0xb4b600ab,
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvd7.fuc.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvd7.fuc.h
index 84af82418987..624215a005b0 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvd7.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvd7.fuc.h
@@ -361,7 +361,7 @@ uint32_t nvd7_grhub_code[] = {
361 0x1fb4f000, 361 0x1fb4f000,
362 0xf410b4b0, 362 0xf410b4b0,
363 0xa7f0f01b, 363 0xa7f0f01b,
364 0xd021f402, 364 0xd021f405,
365/* 0x0223: mmctx_stop */ 365/* 0x0223: mmctx_stop */
366 0xc82b0ef4, 366 0xc82b0ef4,
367 0xb4b600ab, 367 0xb4b600ab,
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h
index 1c179bdd48cc..6547b3dfc7ed 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h
@@ -361,7 +361,7 @@ uint32_t nve0_grhub_code[] = {
361 0x1fb4f000, 361 0x1fb4f000,
362 0xf410b4b0, 362 0xf410b4b0,
363 0xa7f0f01b, 363 0xa7f0f01b,
364 0xd021f402, 364 0xd021f405,
365/* 0x0223: mmctx_stop */ 365/* 0x0223: mmctx_stop */
366 0xc82b0ef4, 366 0xc82b0ef4,
367 0xb4b600ab, 367 0xb4b600ab,
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvf0.fuc.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvf0.fuc.h
index 229c0ae37228..a5aee5a4302f 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvf0.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvf0.fuc.h
@@ -361,7 +361,7 @@ uint32_t nvf0_grhub_code[] = {
361 0x1fb4f000, 361 0x1fb4f000,
362 0xf410b4b0, 362 0xf410b4b0,
363 0xa7f0f01b, 363 0xa7f0f01b,
364 0xd021f402, 364 0xd021f405,
365/* 0x0223: mmctx_stop */ 365/* 0x0223: mmctx_stop */
366 0xc82b0ef4, 366 0xc82b0ef4,
367 0xb4b600ab, 367 0xb4b600ab,
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/macros.fuc b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/macros.fuc
index 6ffe28307dbd..a47d49db5232 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/macros.fuc
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/macros.fuc
@@ -132,6 +132,7 @@
132#define NV_PGRAPH_GPCX_GPCCS_FIFO_CMD 0x41a068 132#define NV_PGRAPH_GPCX_GPCCS_FIFO_CMD 0x41a068
133#define NV_PGRAPH_GPCX_GPCCS_FIFO_ACK 0x41a074 133#define NV_PGRAPH_GPCX_GPCCS_FIFO_ACK 0x41a074
134#define NV_PGRAPH_GPCX_GPCCS_UNITS 0x41a608 134#define NV_PGRAPH_GPCX_GPCCS_UNITS 0x41a608
135#define NV_PGRAPH_GPCX_GPCCS_CAPS 0x41a108
135#define NV_PGRAPH_GPCX_GPCCS_RED_SWITCH 0x41a614 136#define NV_PGRAPH_GPCX_GPCCS_RED_SWITCH 0x41a614
136#define NV_PGRAPH_GPCX_GPCCS_RED_SWITCH_UNK11 0x00000800 137#define NV_PGRAPH_GPCX_GPCCS_RED_SWITCH_UNK11 0x00000800
137#define NV_PGRAPH_GPCX_GPCCS_RED_SWITCH_ENABLE 0x00000200 138#define NV_PGRAPH_GPCX_GPCCS_RED_SWITCH_ENABLE 0x00000200
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/gm107.c b/drivers/gpu/drm/nouveau/core/engine/graph/gm107.c
new file mode 100644
index 000000000000..21c5f31d607f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/gm107.c
@@ -0,0 +1,465 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24
25#include <subdev/bios.h>
26#include <subdev/bios/P0260.h>
27
28#include "nvc0.h"
29#include "ctxnvc0.h"
30
31/*******************************************************************************
32 * Graphics object classes
33 ******************************************************************************/
34
35static struct nouveau_oclass
36gm107_graph_sclass[] = {
37 { 0x902d, &nouveau_object_ofuncs },
38 { 0xa140, &nouveau_object_ofuncs },
39 { 0xb097, &nouveau_object_ofuncs },
40 { 0xb0c0, &nouveau_object_ofuncs },
41 {}
42};
43
44/*******************************************************************************
45 * PGRAPH register lists
46 ******************************************************************************/
47
48static const struct nvc0_graph_init
49gm107_graph_init_main_0[] = {
50 { 0x400080, 1, 0x04, 0x003003c2 },
51 { 0x400088, 1, 0x04, 0x0001bfe7 },
52 { 0x40008c, 1, 0x04, 0x00060000 },
53 { 0x400090, 1, 0x04, 0x00000030 },
54 { 0x40013c, 1, 0x04, 0x003901f3 },
55 { 0x400140, 1, 0x04, 0x00000100 },
56 { 0x400144, 1, 0x04, 0x00000000 },
57 { 0x400148, 1, 0x04, 0x00000110 },
58 { 0x400138, 1, 0x04, 0x00000000 },
59 { 0x400130, 2, 0x04, 0x00000000 },
60 { 0x400124, 1, 0x04, 0x00000002 },
61 {}
62};
63
64static const struct nvc0_graph_init
65gm107_graph_init_ds_0[] = {
66 { 0x405844, 1, 0x04, 0x00ffffff },
67 { 0x405850, 1, 0x04, 0x00000000 },
68 { 0x405900, 1, 0x04, 0x00000000 },
69 { 0x405908, 1, 0x04, 0x00000000 },
70 {}
71};
72
73static const struct nvc0_graph_init
74gm107_graph_init_scc_0[] = {
75 { 0x40803c, 1, 0x04, 0x00000010 },
76 {}
77};
78
79static const struct nvc0_graph_init
80gm107_graph_init_sked_0[] = {
81 { 0x407010, 1, 0x04, 0x00000000 },
82 { 0x407040, 1, 0x04, 0x40440424 },
83 { 0x407048, 1, 0x04, 0x0000000a },
84 {}
85};
86
87static const struct nvc0_graph_init
88gm107_graph_init_prop_0[] = {
89 { 0x418408, 1, 0x04, 0x00000000 },
90 { 0x4184a0, 1, 0x04, 0x00000000 },
91 {}
92};
93
94static const struct nvc0_graph_init
95gm107_graph_init_setup_1[] = {
96 { 0x4188c8, 2, 0x04, 0x00000000 },
97 { 0x4188d0, 1, 0x04, 0x00010000 },
98 { 0x4188d4, 1, 0x04, 0x00010201 },
99 {}
100};
101
102static const struct nvc0_graph_init
103gm107_graph_init_zcull_0[] = {
104 { 0x418910, 1, 0x04, 0x00010001 },
105 { 0x418914, 1, 0x04, 0x00000301 },
106 { 0x418918, 1, 0x04, 0x00800000 },
107 { 0x418930, 2, 0x04, 0x00000000 },
108 { 0x418980, 1, 0x04, 0x77777770 },
109 { 0x418984, 3, 0x04, 0x77777777 },
110 {}
111};
112
113static const struct nvc0_graph_init
114gm107_graph_init_gpc_unk_1[] = {
115 { 0x418d00, 1, 0x04, 0x00000000 },
116 { 0x418f00, 1, 0x04, 0x00000400 },
117 { 0x418f08, 1, 0x04, 0x00000000 },
118 { 0x418e08, 1, 0x04, 0x00000000 },
119 {}
120};
121
122static const struct nvc0_graph_init
123gm107_graph_init_tpccs_0[] = {
124 { 0x419dc4, 1, 0x04, 0x00000000 },
125 { 0x419dc8, 1, 0x04, 0x00000501 },
126 { 0x419dd0, 1, 0x04, 0x00000000 },
127 { 0x419dd4, 1, 0x04, 0x00000100 },
128 { 0x419dd8, 1, 0x04, 0x00000001 },
129 { 0x419ddc, 1, 0x04, 0x00000002 },
130 { 0x419de0, 1, 0x04, 0x00000001 },
131 { 0x419d0c, 1, 0x04, 0x00000000 },
132 { 0x419d10, 1, 0x04, 0x00000014 },
133 {}
134};
135
136static const struct nvc0_graph_init
137gm107_graph_init_tex_0[] = {
138 { 0x419ab0, 1, 0x04, 0x00000000 },
139 { 0x419ab8, 1, 0x04, 0x000000e7 },
140 { 0x419abc, 1, 0x04, 0x00000000 },
141 { 0x419acc, 1, 0x04, 0x000000ff },
142 { 0x419ac0, 1, 0x04, 0x00000000 },
143 { 0x419aa8, 2, 0x04, 0x00000000 },
144 { 0x419ad0, 2, 0x04, 0x00000000 },
145 { 0x419ae0, 2, 0x04, 0x00000000 },
146 { 0x419af0, 4, 0x04, 0x00000000 },
147 {}
148};
149
150static const struct nvc0_graph_init
151gm107_graph_init_pe_0[] = {
152 { 0x419900, 1, 0x04, 0x000000ff },
153 { 0x41980c, 1, 0x04, 0x00000010 },
154 { 0x419844, 1, 0x04, 0x00000000 },
155 { 0x419838, 1, 0x04, 0x000000ff },
156 { 0x419850, 1, 0x04, 0x00000004 },
157 { 0x419854, 2, 0x04, 0x00000000 },
158 { 0x419894, 3, 0x04, 0x00100401 },
159 {}
160};
161
162static const struct nvc0_graph_init
163gm107_graph_init_l1c_0[] = {
164 { 0x419c98, 1, 0x04, 0x00000000 },
165 { 0x419cc0, 2, 0x04, 0x00000000 },
166 {}
167};
168
169static const struct nvc0_graph_init
170gm107_graph_init_sm_0[] = {
171 { 0x419e30, 1, 0x04, 0x000000ff },
172 { 0x419e00, 1, 0x04, 0x00000000 },
173 { 0x419ea0, 1, 0x04, 0x00000000 },
174 { 0x419ee4, 1, 0x04, 0x00000000 },
175 { 0x419ea4, 1, 0x04, 0x00000100 },
176 { 0x419ea8, 1, 0x04, 0x01000000 },
177 { 0x419ee8, 1, 0x04, 0x00000091 },
178 { 0x419eb4, 1, 0x04, 0x00000000 },
179 { 0x419ebc, 2, 0x04, 0x00000000 },
180 { 0x419edc, 1, 0x04, 0x000c1810 },
181 { 0x419ed8, 1, 0x04, 0x00000000 },
182 { 0x419ee0, 1, 0x04, 0x00000000 },
183 { 0x419f74, 1, 0x04, 0x00005155 },
184 { 0x419f80, 4, 0x04, 0x00000000 },
185 {}
186};
187
188static const struct nvc0_graph_init
189gm107_graph_init_l1c_1[] = {
190 { 0x419ccc, 2, 0x04, 0x00000000 },
191 { 0x419c80, 1, 0x04, 0x3f006022 },
192 { 0x419c88, 1, 0x04, 0x00000000 },
193 {}
194};
195
196static const struct nvc0_graph_init
197gm107_graph_init_pes_0[] = {
198 { 0x41be50, 1, 0x04, 0x000000ff },
199 { 0x41be04, 1, 0x04, 0x00000000 },
200 { 0x41be08, 1, 0x04, 0x00000004 },
201 { 0x41be0c, 1, 0x04, 0x00000008 },
202 { 0x41be10, 1, 0x04, 0x0e3b8bc7 },
203 { 0x41be14, 2, 0x04, 0x00000000 },
204 { 0x41be3c, 5, 0x04, 0x00100401 },
205 {}
206};
207
208static const struct nvc0_graph_init
209gm107_graph_init_wwdx_0[] = {
210 { 0x41bfd4, 1, 0x04, 0x00800000 },
211 { 0x41bfdc, 1, 0x04, 0x00000000 },
212 {}
213};
214
215static const struct nvc0_graph_init
216gm107_graph_init_cbm_0[] = {
217 { 0x41becc, 1, 0x04, 0x00000000 },
218 {}
219};
220
221static const struct nvc0_graph_init
222gm107_graph_init_be_0[] = {
223 { 0x408890, 1, 0x04, 0x000000ff },
224 { 0x40880c, 1, 0x04, 0x00000000 },
225 { 0x408850, 1, 0x04, 0x00000004 },
226 { 0x408878, 1, 0x04, 0x00c81603 },
227 { 0x40887c, 1, 0x04, 0x80543432 },
228 { 0x408880, 1, 0x04, 0x0010581e },
229 { 0x408884, 1, 0x04, 0x00001205 },
230 { 0x408974, 1, 0x04, 0x000000ff },
231 { 0x408910, 9, 0x04, 0x00000000 },
232 { 0x408950, 1, 0x04, 0x00000000 },
233 { 0x408954, 1, 0x04, 0x0000ffff },
234 { 0x408958, 1, 0x04, 0x00000034 },
235 { 0x40895c, 1, 0x04, 0x8531a003 },
236 { 0x408960, 1, 0x04, 0x0561985a },
237 { 0x408964, 1, 0x04, 0x04e15c4f },
238 { 0x408968, 1, 0x04, 0x02808833 },
239 { 0x40896c, 1, 0x04, 0x01f02438 },
240 { 0x408970, 1, 0x04, 0x00012c00 },
241 { 0x408984, 1, 0x04, 0x00000000 },
242 { 0x408988, 1, 0x04, 0x08040201 },
243 { 0x40898c, 1, 0x04, 0x80402010 },
244 {}
245};
246
247static const struct nvc0_graph_init
248gm107_graph_init_sm_1[] = {
249 { 0x419e5c, 1, 0x04, 0x00000000 },
250 { 0x419e58, 1, 0x04, 0x00000000 },
251 {}
252};
253
254static const struct nvc0_graph_pack
255gm107_graph_pack_mmio[] = {
256 { gm107_graph_init_main_0 },
257 { nvf0_graph_init_fe_0 },
258 { nvc0_graph_init_pri_0 },
259 { nvc0_graph_init_rstr2d_0 },
260 { nvc0_graph_init_pd_0 },
261 { gm107_graph_init_ds_0 },
262 { gm107_graph_init_scc_0 },
263 { gm107_graph_init_sked_0 },
264 { nvf0_graph_init_cwd_0 },
265 { gm107_graph_init_prop_0 },
266 { nv108_graph_init_gpc_unk_0 },
267 { nvc0_graph_init_setup_0 },
268 { nvc0_graph_init_crstr_0 },
269 { gm107_graph_init_setup_1 },
270 { gm107_graph_init_zcull_0 },
271 { nvc0_graph_init_gpm_0 },
272 { gm107_graph_init_gpc_unk_1 },
273 { nvc0_graph_init_gcc_0 },
274 { gm107_graph_init_tpccs_0 },
275 { gm107_graph_init_tex_0 },
276 { gm107_graph_init_pe_0 },
277 { gm107_graph_init_l1c_0 },
278 { nvc0_graph_init_mpc_0 },
279 { gm107_graph_init_sm_0 },
280 { gm107_graph_init_l1c_1 },
281 { gm107_graph_init_pes_0 },
282 { gm107_graph_init_wwdx_0 },
283 { gm107_graph_init_cbm_0 },
284 { gm107_graph_init_be_0 },
285 { gm107_graph_init_sm_1 },
286 {}
287};
288
289/*******************************************************************************
290 * PGRAPH engine/subdev functions
291 ******************************************************************************/
292
293static void
294gm107_graph_init_bios(struct nvc0_graph_priv *priv)
295{
296 static const struct {
297 u32 ctrl;
298 u32 data;
299 } regs[] = {
300 { 0x419ed8, 0x419ee0 },
301 { 0x419ad0, 0x419ad4 },
302 { 0x419ae0, 0x419ae4 },
303 { 0x419af0, 0x419af4 },
304 { 0x419af8, 0x419afc },
305 };
306 struct nouveau_bios *bios = nouveau_bios(priv);
307 struct nvbios_P0260E infoE;
308 struct nvbios_P0260X infoX;
309 int E = -1, X;
310 u8 ver, hdr;
311
312 while (nvbios_P0260Ep(bios, ++E, &ver, &hdr, &infoE)) {
313 if (X = -1, E < ARRAY_SIZE(regs)) {
314 nv_wr32(priv, regs[E].ctrl, infoE.data);
315 while (nvbios_P0260Xp(bios, ++X, &ver, &hdr, &infoX))
316 nv_wr32(priv, regs[E].data, infoX.data);
317 }
318 }
319}
320
321int
322gm107_graph_init(struct nouveau_object *object)
323{
324 struct nvc0_graph_oclass *oclass = (void *)object->oclass;
325 struct nvc0_graph_priv *priv = (void *)object;
326 const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, priv->tpc_total);
327 u32 data[TPC_MAX / 8] = {};
328 u8 tpcnr[GPC_MAX];
329 int gpc, tpc, ppc, rop;
330 int ret, i;
331
332 ret = nouveau_graph_init(&priv->base);
333 if (ret)
334 return ret;
335
336 nv_wr32(priv, GPC_BCAST(0x0880), 0x00000000);
337 nv_wr32(priv, GPC_BCAST(0x0890), 0x00000000);
338 nv_wr32(priv, GPC_BCAST(0x0894), 0x00000000);
339 nv_wr32(priv, GPC_BCAST(0x08b4), priv->unk4188b4->addr >> 8);
340 nv_wr32(priv, GPC_BCAST(0x08b8), priv->unk4188b8->addr >> 8);
341
342 nvc0_graph_mmio(priv, oclass->mmio);
343
344 gm107_graph_init_bios(priv);
345
346 nv_wr32(priv, GPC_UNIT(0, 0x3018), 0x00000001);
347
348 memset(data, 0x00, sizeof(data));
349 memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
350 for (i = 0, gpc = -1; i < priv->tpc_total; i++) {
351 do {
352 gpc = (gpc + 1) % priv->gpc_nr;
353 } while (!tpcnr[gpc]);
354 tpc = priv->tpc_nr[gpc] - tpcnr[gpc]--;
355
356 data[i / 8] |= tpc << ((i % 8) * 4);
357 }
358
359 nv_wr32(priv, GPC_BCAST(0x0980), data[0]);
360 nv_wr32(priv, GPC_BCAST(0x0984), data[1]);
361 nv_wr32(priv, GPC_BCAST(0x0988), data[2]);
362 nv_wr32(priv, GPC_BCAST(0x098c), data[3]);
363
364 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
365 nv_wr32(priv, GPC_UNIT(gpc, 0x0914),
366 priv->magic_not_rop_nr << 8 | priv->tpc_nr[gpc]);
367 nv_wr32(priv, GPC_UNIT(gpc, 0x0910), 0x00040000 |
368 priv->tpc_total);
369 nv_wr32(priv, GPC_UNIT(gpc, 0x0918), magicgpc918);
370 }
371
372 nv_wr32(priv, GPC_BCAST(0x3fd4), magicgpc918);
373 nv_wr32(priv, GPC_BCAST(0x08ac), nv_rd32(priv, 0x100800));
374
375 nv_wr32(priv, 0x400500, 0x00010001);
376
377 nv_wr32(priv, 0x400100, 0xffffffff);
378 nv_wr32(priv, 0x40013c, 0xffffffff);
379 nv_wr32(priv, 0x400124, 0x00000002);
380 nv_wr32(priv, 0x409c24, 0x000e0000);
381
382 nv_wr32(priv, 0x404000, 0xc0000000);
383 nv_wr32(priv, 0x404600, 0xc0000000);
384 nv_wr32(priv, 0x408030, 0xc0000000);
385 nv_wr32(priv, 0x404490, 0xc0000000);
386 nv_wr32(priv, 0x406018, 0xc0000000);
387 nv_wr32(priv, 0x407020, 0x40000000);
388 nv_wr32(priv, 0x405840, 0xc0000000);
389 nv_wr32(priv, 0x405844, 0x00ffffff);
390 nv_mask(priv, 0x419cc0, 0x00000008, 0x00000008);
391
392 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
393 for (ppc = 0; ppc < 2 /* priv->ppc_nr[gpc] */; ppc++)
394 nv_wr32(priv, PPC_UNIT(gpc, ppc, 0x038), 0xc0000000);
395 nv_wr32(priv, GPC_UNIT(gpc, 0x0420), 0xc0000000);
396 nv_wr32(priv, GPC_UNIT(gpc, 0x0900), 0xc0000000);
397 nv_wr32(priv, GPC_UNIT(gpc, 0x1028), 0xc0000000);
398 nv_wr32(priv, GPC_UNIT(gpc, 0x0824), 0xc0000000);
399 for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) {
400 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
401 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
402 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000);
403 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000);
404 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
405 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x430), 0xc0000000);
406 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x644), 0x00dffffe);
407 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x64c), 0x00000005);
408 }
409 nv_wr32(priv, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
410 nv_wr32(priv, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
411 }
412
413 for (rop = 0; rop < priv->rop_nr; rop++) {
414 nv_wr32(priv, ROP_UNIT(rop, 0x144), 0x40000000);
415 nv_wr32(priv, ROP_UNIT(rop, 0x070), 0x40000000);
416 nv_wr32(priv, ROP_UNIT(rop, 0x204), 0xffffffff);
417 nv_wr32(priv, ROP_UNIT(rop, 0x208), 0xffffffff);
418 }
419
420 nv_wr32(priv, 0x400108, 0xffffffff);
421 nv_wr32(priv, 0x400138, 0xffffffff);
422 nv_wr32(priv, 0x400118, 0xffffffff);
423 nv_wr32(priv, 0x400130, 0xffffffff);
424 nv_wr32(priv, 0x40011c, 0xffffffff);
425 nv_wr32(priv, 0x400134, 0xffffffff);
426
427 nv_wr32(priv, 0x400054, 0x2c350f63);
428 return nvc0_graph_init_ctxctl(priv);
429}
430
431#include "fuc/hubgm107.fuc5.h"
432
433static struct nvc0_graph_ucode
434gm107_graph_fecs_ucode = {
435 .code.data = gm107_grhub_code,
436 .code.size = sizeof(gm107_grhub_code),
437 .data.data = gm107_grhub_data,
438 .data.size = sizeof(gm107_grhub_data),
439};
440
441#include "fuc/gpcgm107.fuc5.h"
442
443static struct nvc0_graph_ucode
444gm107_graph_gpccs_ucode = {
445 .code.data = gm107_grgpc_code,
446 .code.size = sizeof(gm107_grgpc_code),
447 .data.data = gm107_grgpc_data,
448 .data.size = sizeof(gm107_grgpc_data),
449};
450
451struct nouveau_oclass *
452gm107_graph_oclass = &(struct nvc0_graph_oclass) {
453 .base.handle = NV_ENGINE(GR, 0x07),
454 .base.ofuncs = &(struct nouveau_ofuncs) {
455 .ctor = nvc0_graph_ctor,
456 .dtor = nvc0_graph_dtor,
457 .init = gm107_graph_init,
458 .fini = _nouveau_graph_fini,
459 },
460 .cclass = &gm107_grctx_oclass,
461 .sclass = gm107_graph_sclass,
462 .mmio = gm107_graph_pack_mmio,
463 .fecs.ucode = 0 ? &gm107_graph_fecs_ucode : NULL,
464 .gpccs.ucode = &gm107_graph_gpccs_ucode,
465}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv108.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv108.c
index e1af65ead379..00ea1a089822 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv108.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv108.c
@@ -23,6 +23,7 @@
23 */ 23 */
24 24
25#include "nvc0.h" 25#include "nvc0.h"
26#include "ctxnvc0.h"
26 27
27/******************************************************************************* 28/*******************************************************************************
28 * Graphics object classes 29 * Graphics object classes
@@ -38,11 +39,11 @@ nv108_graph_sclass[] = {
38}; 39};
39 40
40/******************************************************************************* 41/*******************************************************************************
41 * PGRAPH engine/subdev functions 42 * PGRAPH register lists
42 ******************************************************************************/ 43 ******************************************************************************/
43 44
44static struct nvc0_graph_init 45static const struct nvc0_graph_init
45nv108_graph_init_regs[] = { 46nv108_graph_init_main_0[] = {
46 { 0x400080, 1, 0x04, 0x003083c2 }, 47 { 0x400080, 1, 0x04, 0x003083c2 },
47 { 0x400088, 1, 0x04, 0x0001bfe7 }, 48 { 0x400088, 1, 0x04, 0x0001bfe7 },
48 { 0x40008c, 1, 0x04, 0x00000000 }, 49 { 0x40008c, 1, 0x04, 0x00000000 },
@@ -57,66 +58,46 @@ nv108_graph_init_regs[] = {
57 {} 58 {}
58}; 59};
59 60
60struct nvc0_graph_init 61static const struct nvc0_graph_init
61nv108_graph_init_unk58xx[] = { 62nv108_graph_init_ds_0[] = {
62 { 0x405844, 1, 0x04, 0x00ffffff }, 63 { 0x405844, 1, 0x04, 0x00ffffff },
63 { 0x405850, 1, 0x04, 0x00000000 }, 64 { 0x405850, 1, 0x04, 0x00000000 },
64 { 0x405900, 1, 0x04, 0x00000000 }, 65 { 0x405900, 1, 0x04, 0x00000000 },
65 { 0x405908, 1, 0x04, 0x00000000 }, 66 { 0x405908, 1, 0x04, 0x00000000 },
66 { 0x405928, 1, 0x04, 0x00000000 }, 67 { 0x405928, 2, 0x04, 0x00000000 },
67 { 0x40592c, 1, 0x04, 0x00000000 },
68 {} 68 {}
69}; 69};
70 70
71static struct nvc0_graph_init 71const struct nvc0_graph_init
72nv108_graph_init_gpc[] = { 72nv108_graph_init_gpc_unk_0[] = {
73 { 0x418408, 1, 0x04, 0x00000000 },
74 { 0x4184a0, 3, 0x04, 0x00000000 },
75 { 0x418604, 1, 0x04, 0x00000000 }, 73 { 0x418604, 1, 0x04, 0x00000000 },
76 { 0x418680, 1, 0x04, 0x00000000 }, 74 { 0x418680, 1, 0x04, 0x00000000 },
77 { 0x418714, 1, 0x04, 0x00000000 }, 75 { 0x418714, 1, 0x04, 0x00000000 },
78 { 0x418384, 2, 0x04, 0x00000000 }, 76 { 0x418384, 2, 0x04, 0x00000000 },
79 { 0x418814, 3, 0x04, 0x00000000 }, 77 {}
80 { 0x418b04, 1, 0x04, 0x00000000 }, 78};
79
80static const struct nvc0_graph_init
81nv108_graph_init_setup_1[] = {
81 { 0x4188c8, 2, 0x04, 0x00000000 }, 82 { 0x4188c8, 2, 0x04, 0x00000000 },
82 { 0x4188d0, 1, 0x04, 0x00010000 }, 83 { 0x4188d0, 1, 0x04, 0x00010000 },
83 { 0x4188d4, 1, 0x04, 0x00000201 }, 84 { 0x4188d4, 1, 0x04, 0x00000201 },
84 { 0x418910, 1, 0x04, 0x00010001 },
85 { 0x418914, 1, 0x04, 0x00000301 },
86 { 0x418918, 1, 0x04, 0x00800000 },
87 { 0x418980, 1, 0x04, 0x77777770 },
88 { 0x418984, 3, 0x04, 0x77777777 },
89 { 0x418c04, 1, 0x04, 0x00000000 },
90 { 0x418c64, 2, 0x04, 0x00000000 },
91 { 0x418c88, 1, 0x04, 0x00000000 },
92 { 0x418cb4, 2, 0x04, 0x00000000 },
93 { 0x418d00, 1, 0x04, 0x00000000 },
94 { 0x418d28, 2, 0x04, 0x00000000 },
95 { 0x418f00, 1, 0x04, 0x00000400 },
96 { 0x418f08, 1, 0x04, 0x00000000 },
97 { 0x418f20, 2, 0x04, 0x00000000 },
98 { 0x418e00, 1, 0x04, 0x00000000 },
99 { 0x418e08, 1, 0x04, 0x00000000 },
100 { 0x418e1c, 2, 0x04, 0x00000000 },
101 { 0x41900c, 1, 0x04, 0x00000000 },
102 { 0x419018, 1, 0x04, 0x00000000 },
103 {} 85 {}
104}; 86};
105 87
106static struct nvc0_graph_init 88static const struct nvc0_graph_init
107nv108_graph_init_tpc[] = { 89nv108_graph_init_tex_0[] = {
108 { 0x419d0c, 1, 0x04, 0x00000000 },
109 { 0x419d10, 1, 0x04, 0x00000014 },
110 { 0x419ab0, 1, 0x04, 0x00000000 }, 90 { 0x419ab0, 1, 0x04, 0x00000000 },
111 { 0x419ac8, 1, 0x04, 0x00000000 }, 91 { 0x419ac8, 1, 0x04, 0x00000000 },
112 { 0x419ab8, 1, 0x04, 0x000000e7 }, 92 { 0x419ab8, 1, 0x04, 0x000000e7 },
113 { 0x419abc, 2, 0x04, 0x00000000 }, 93 { 0x419abc, 2, 0x04, 0x00000000 },
114 { 0x419ab4, 1, 0x04, 0x00000000 }, 94 { 0x419ab4, 1, 0x04, 0x00000000 },
115 { 0x419aa8, 2, 0x04, 0x00000000 }, 95 { 0x419aa8, 2, 0x04, 0x00000000 },
116 { 0x41980c, 1, 0x04, 0x00000010 }, 96 {}
117 { 0x419844, 1, 0x04, 0x00000000 }, 97};
118 { 0x419850, 1, 0x04, 0x00000004 }, 98
119 { 0x419854, 2, 0x04, 0x00000000 }, 99static const struct nvc0_graph_init
100nv108_graph_init_l1c_0[] = {
120 { 0x419c98, 1, 0x04, 0x00000000 }, 101 { 0x419c98, 1, 0x04, 0x00000000 },
121 { 0x419ca8, 1, 0x04, 0x00000000 }, 102 { 0x419ca8, 1, 0x04, 0x00000000 },
122 { 0x419cb0, 1, 0x04, 0x01000000 }, 103 { 0x419cb0, 1, 0x04, 0x01000000 },
@@ -127,22 +108,47 @@ nv108_graph_init_tpc[] = {
127 { 0x419cc0, 2, 0x04, 0x00000000 }, 108 { 0x419cc0, 2, 0x04, 0x00000000 },
128 { 0x419c80, 1, 0x04, 0x00000230 }, 109 { 0x419c80, 1, 0x04, 0x00000230 },
129 { 0x419ccc, 2, 0x04, 0x00000000 }, 110 { 0x419ccc, 2, 0x04, 0x00000000 },
130 { 0x419c0c, 1, 0x04, 0x00000000 },
131 { 0x419e00, 1, 0x04, 0x00000080 },
132 { 0x419ea0, 1, 0x04, 0x00000000 },
133 { 0x419ee4, 1, 0x04, 0x00000000 },
134 { 0x419ea4, 1, 0x04, 0x00000100 },
135 { 0x419ea8, 1, 0x04, 0x00000000 },
136 { 0x419eb4, 1, 0x04, 0x00000000 },
137 { 0x419ebc, 2, 0x04, 0x00000000 },
138 { 0x419edc, 1, 0x04, 0x00000000 },
139 { 0x419f00, 1, 0x04, 0x00000000 },
140 { 0x419ed0, 1, 0x04, 0x00003234 },
141 { 0x419f74, 1, 0x04, 0x00015555 },
142 { 0x419f80, 4, 0x04, 0x00000000 },
143 {} 111 {}
144}; 112};
145 113
114static const struct nvc0_graph_pack
115nv108_graph_pack_mmio[] = {
116 { nv108_graph_init_main_0 },
117 { nvf0_graph_init_fe_0 },
118 { nvc0_graph_init_pri_0 },
119 { nvc0_graph_init_rstr2d_0 },
120 { nvd9_graph_init_pd_0 },
121 { nv108_graph_init_ds_0 },
122 { nvc0_graph_init_scc_0 },
123 { nvf0_graph_init_sked_0 },
124 { nvf0_graph_init_cwd_0 },
125 { nvd9_graph_init_prop_0 },
126 { nv108_graph_init_gpc_unk_0 },
127 { nvc0_graph_init_setup_0 },
128 { nvc0_graph_init_crstr_0 },
129 { nv108_graph_init_setup_1 },
130 { nvc0_graph_init_zcull_0 },
131 { nvd9_graph_init_gpm_0 },
132 { nvf0_graph_init_gpc_unk_1 },
133 { nvc0_graph_init_gcc_0 },
134 { nve4_graph_init_tpccs_0 },
135 { nv108_graph_init_tex_0 },
136 { nve4_graph_init_pe_0 },
137 { nv108_graph_init_l1c_0 },
138 { nvc0_graph_init_mpc_0 },
139 { nvf0_graph_init_sm_0 },
140 { nvd7_graph_init_pes_0 },
141 { nvd7_graph_init_wwdx_0 },
142 { nvd7_graph_init_cbm_0 },
143 { nve4_graph_init_be_0 },
144 { nvc0_graph_init_fe_1 },
145 {}
146};
147
148/*******************************************************************************
149 * PGRAPH engine/subdev functions
150 ******************************************************************************/
151
146static int 152static int
147nv108_graph_fini(struct nouveau_object *object, bool suspend) 153nv108_graph_fini(struct nouveau_object *object, bool suspend)
148{ 154{
@@ -180,25 +186,6 @@ nv108_graph_fini(struct nouveau_object *object, bool suspend)
180 return nouveau_graph_fini(&priv->base, suspend); 186 return nouveau_graph_fini(&priv->base, suspend);
181} 187}
182 188
183static struct nvc0_graph_init *
184nv108_graph_init_mmio[] = {
185 nv108_graph_init_regs,
186 nvf0_graph_init_unk40xx,
187 nvc0_graph_init_unk44xx,
188 nvc0_graph_init_unk78xx,
189 nvc0_graph_init_unk60xx,
190 nvd9_graph_init_unk64xx,
191 nv108_graph_init_unk58xx,
192 nvc0_graph_init_unk80xx,
193 nvf0_graph_init_unk70xx,
194 nvf0_graph_init_unk5bxx,
195 nv108_graph_init_gpc,
196 nv108_graph_init_tpc,
197 nve4_graph_init_unk,
198 nve4_graph_init_unk88xx,
199 NULL
200};
201
202#include "fuc/hubnv108.fuc5.h" 189#include "fuc/hubnv108.fuc5.h"
203 190
204static struct nvc0_graph_ucode 191static struct nvc0_graph_ucode
@@ -230,7 +217,7 @@ nv108_graph_oclass = &(struct nvc0_graph_oclass) {
230 }, 217 },
231 .cclass = &nv108_grctx_oclass, 218 .cclass = &nv108_grctx_oclass,
232 .sclass = nv108_graph_sclass, 219 .sclass = nv108_graph_sclass,
233 .mmio = nv108_graph_init_mmio, 220 .mmio = nv108_graph_pack_mmio,
234 .fecs.ucode = &nv108_graph_fecs_ucode, 221 .fecs.ucode = &nv108_graph_fecs_ucode,
235 .gpccs.ucode = &nv108_graph_gpccs_ucode, 222 .gpccs.ucode = &nv108_graph_gpccs_ucode,
236}.base; 223}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c
index b24559315903..d145e080899a 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c
@@ -349,7 +349,7 @@ nv20_graph_init(struct nouveau_object *object)
349 nv_wr32(priv, NV10_PGRAPH_SURFACE, tmp); 349 nv_wr32(priv, NV10_PGRAPH_SURFACE, tmp);
350 350
351 /* begin RAM config */ 351 /* begin RAM config */
352 vramsz = pci_resource_len(nv_device(priv)->pdev, 0) - 1; 352 vramsz = nv_device_resource_len(nv_device(priv), 0) - 1;
353 nv_wr32(priv, 0x4009A4, nv_rd32(priv, 0x100200)); 353 nv_wr32(priv, 0x4009A4, nv_rd32(priv, 0x100200));
354 nv_wr32(priv, 0x4009A8, nv_rd32(priv, 0x100204)); 354 nv_wr32(priv, 0x4009A8, nv_rd32(priv, 0x100204));
355 nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0000); 355 nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0000);
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c
index 193a5de1b482..6477fbf6a550 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c
@@ -484,7 +484,7 @@ nv40_graph_init(struct nouveau_object *object)
484 engine->tile_prog(engine, i); 484 engine->tile_prog(engine, i);
485 485
486 /* begin RAM config */ 486 /* begin RAM config */
487 vramsz = pci_resource_len(nv_device(priv)->pdev, 0) - 1; 487 vramsz = nv_device_resource_len(nv_device(priv), 0) - 1;
488 switch (nv_device(priv)->chipset) { 488 switch (nv_device(priv)->chipset) {
489 case 0x40: 489 case 0x40:
490 nv_wr32(priv, 0x4009A4, nv_rd32(priv, 0x100200)); 490 nv_wr32(priv, 0x4009A4, nv_rd32(priv, 0x100200));
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
index 7a367c402978..2c7809e1a09b 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
@@ -197,34 +197,35 @@ static const struct nouveau_bitfield nv50_pgraph_status[] = {
197 { 0x00000080, "UNK7" }, 197 { 0x00000080, "UNK7" },
198 { 0x00000100, "CTXPROG" }, 198 { 0x00000100, "CTXPROG" },
199 { 0x00000200, "VFETCH" }, 199 { 0x00000200, "VFETCH" },
200 { 0x00000400, "CCACHE_UNK4" }, 200 { 0x00000400, "CCACHE_PREGEOM" },
201 { 0x00000800, "STRMOUT_GSCHED_UNK5" }, 201 { 0x00000800, "STRMOUT_VATTR_POSTGEOM" },
202 { 0x00001000, "UNK14XX" }, 202 { 0x00001000, "VCLIP" },
203 { 0x00002000, "UNK24XX_CSCHED" }, 203 { 0x00002000, "RATTR_APLANE" },
204 { 0x00004000, "UNK1CXX" }, 204 { 0x00004000, "TRAST" },
205 { 0x00008000, "CLIPID" }, 205 { 0x00008000, "CLIPID" },
206 { 0x00010000, "ZCULL" }, 206 { 0x00010000, "ZCULL" },
207 { 0x00020000, "ENG2D" }, 207 { 0x00020000, "ENG2D" },
208 { 0x00040000, "UNK34XX" }, 208 { 0x00040000, "RMASK" },
209 { 0x00080000, "TPRAST" }, 209 { 0x00080000, "TPC_RAST" },
210 { 0x00100000, "TPROP" }, 210 { 0x00100000, "TPC_PROP" },
211 { 0x00200000, "TEX" }, 211 { 0x00200000, "TPC_TEX" },
212 { 0x00400000, "TPVP" }, 212 { 0x00400000, "TPC_GEOM" },
213 { 0x00800000, "MP" }, 213 { 0x00800000, "TPC_MP" },
214 { 0x01000000, "ROP" }, 214 { 0x01000000, "ROP" },
215 {} 215 {}
216}; 216};
217 217
218static const char *const nv50_pgraph_vstatus_0[] = { 218static const char *const nv50_pgraph_vstatus_0[] = {
219 "VFETCH", "CCACHE", "UNK4", "UNK5", "GSCHED", "STRMOUT", "UNK14XX", NULL 219 "VFETCH", "CCACHE", "PREGEOM", "POSTGEOM", "VATTR", "STRMOUT", "VCLIP",
220 NULL
220}; 221};
221 222
222static const char *const nv50_pgraph_vstatus_1[] = { 223static const char *const nv50_pgraph_vstatus_1[] = {
223 "TPRAST", "TPROP", "TEXTURE", "TPVP", "MP", NULL 224 "TPC_RAST", "TPC_PROP", "TPC_TEX", "TPC_GEOM", "TPC_MP", NULL
224}; 225};
225 226
226static const char *const nv50_pgraph_vstatus_2[] = { 227static const char *const nv50_pgraph_vstatus_2[] = {
227 "UNK24XX", "CSCHED", "UNK1CXX", "CLIPID", "ZCULL", "ENG2D", "UNK34XX", 228 "RATTR", "APLANE", "TRAST", "CLIPID", "ZCULL", "ENG2D", "RMASK",
228 "ROP", NULL 229 "ROP", NULL
229}; 230};
230 231
@@ -329,6 +330,15 @@ static const struct nouveau_bitfield nv50_mpc_traps[] = {
329 {} 330 {}
330}; 331};
331 332
333static const struct nouveau_bitfield nv50_tex_traps[] = {
334 { 0x00000001, "" }, /* any bit set? */
335 { 0x00000002, "FAULT" },
336 { 0x00000004, "STORAGE_TYPE_MISMATCH" },
337 { 0x00000008, "LINEAR_MISMATCH" },
338 { 0x00000020, "WRONG_MEMTYPE" },
339 {}
340};
341
332static const struct nouveau_bitfield nv50_graph_trap_m2mf[] = { 342static const struct nouveau_bitfield nv50_graph_trap_m2mf[] = {
333 { 0x00000001, "NOTIFY" }, 343 { 0x00000001, "NOTIFY" },
334 { 0x00000002, "IN" }, 344 { 0x00000002, "IN" },
@@ -531,6 +541,13 @@ nv50_priv_tp_trap(struct nv50_graph_priv *priv, int type, u32 ustatus_old,
531 for (r = ustatus_addr + 4; r <= ustatus_addr + 0x10; r += 4) 541 for (r = ustatus_addr + 4; r <= ustatus_addr + 0x10; r += 4)
532 nv_error(priv, "\t0x%08x: 0x%08x\n", r, 542 nv_error(priv, "\t0x%08x: 0x%08x\n", r,
533 nv_rd32(priv, r)); 543 nv_rd32(priv, r));
544 if (ustatus) {
545 nv_error(priv, "%s - TP%d:", name, i);
546 nouveau_bitfield_print(nv50_tex_traps,
547 ustatus);
548 pr_cont("\n");
549 ustatus = 0;
550 }
534 } 551 }
535 break; 552 break;
536 case 7: /* MP error */ 553 case 7: /* MP error */
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
index a73ab209ea88..f3c7329da0a0 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
@@ -23,6 +23,7 @@
23 */ 23 */
24 24
25#include "nvc0.h" 25#include "nvc0.h"
26#include "ctxnvc0.h"
26 27
27/******************************************************************************* 28/*******************************************************************************
28 * Graphics object classes 29 * Graphics object classes
@@ -146,11 +147,11 @@ nvc0_graph_context_dtor(struct nouveau_object *object)
146} 147}
147 148
148/******************************************************************************* 149/*******************************************************************************
149 * PGRAPH engine/subdev functions 150 * PGRAPH register lists
150 ******************************************************************************/ 151 ******************************************************************************/
151 152
152struct nvc0_graph_init 153const struct nvc0_graph_init
153nvc0_graph_init_regs[] = { 154nvc0_graph_init_main_0[] = {
154 { 0x400080, 1, 0x04, 0x003083c2 }, 155 { 0x400080, 1, 0x04, 0x003083c2 },
155 { 0x400088, 1, 0x04, 0x00006fe7 }, 156 { 0x400088, 1, 0x04, 0x00006fe7 },
156 { 0x40008c, 1, 0x04, 0x00000000 }, 157 { 0x40008c, 1, 0x04, 0x00000000 },
@@ -165,95 +166,170 @@ nvc0_graph_init_regs[] = {
165 {} 166 {}
166}; 167};
167 168
168struct nvc0_graph_init 169const struct nvc0_graph_init
169nvc0_graph_init_unk40xx[] = { 170nvc0_graph_init_fe_0[] = {
170 { 0x40415c, 1, 0x04, 0x00000000 }, 171 { 0x40415c, 1, 0x04, 0x00000000 },
171 { 0x404170, 1, 0x04, 0x00000000 }, 172 { 0x404170, 1, 0x04, 0x00000000 },
172 {} 173 {}
173}; 174};
174 175
175struct nvc0_graph_init 176const struct nvc0_graph_init
176nvc0_graph_init_unk44xx[] = { 177nvc0_graph_init_pri_0[] = {
177 { 0x404488, 2, 0x04, 0x00000000 }, 178 { 0x404488, 2, 0x04, 0x00000000 },
178 {} 179 {}
179}; 180};
180 181
181struct nvc0_graph_init 182const struct nvc0_graph_init
182nvc0_graph_init_unk78xx[] = { 183nvc0_graph_init_rstr2d_0[] = {
183 { 0x407808, 1, 0x04, 0x00000000 }, 184 { 0x407808, 1, 0x04, 0x00000000 },
184 {} 185 {}
185}; 186};
186 187
187struct nvc0_graph_init 188const struct nvc0_graph_init
188nvc0_graph_init_unk60xx[] = { 189nvc0_graph_init_pd_0[] = {
189 { 0x406024, 1, 0x04, 0x00000000 }, 190 { 0x406024, 1, 0x04, 0x00000000 },
190 {} 191 {}
191}; 192};
192 193
193struct nvc0_graph_init 194const struct nvc0_graph_init
194nvc0_graph_init_unk58xx[] = { 195nvc0_graph_init_ds_0[] = {
195 { 0x405844, 1, 0x04, 0x00ffffff }, 196 { 0x405844, 1, 0x04, 0x00ffffff },
196 { 0x405850, 1, 0x04, 0x00000000 }, 197 { 0x405850, 1, 0x04, 0x00000000 },
197 { 0x405908, 1, 0x04, 0x00000000 }, 198 { 0x405908, 1, 0x04, 0x00000000 },
198 {} 199 {}
199}; 200};
200 201
201struct nvc0_graph_init 202const struct nvc0_graph_init
202nvc0_graph_init_unk80xx[] = { 203nvc0_graph_init_scc_0[] = {
203 { 0x40803c, 1, 0x04, 0x00000000 }, 204 { 0x40803c, 1, 0x04, 0x00000000 },
204 {} 205 {}
205}; 206};
206 207
207struct nvc0_graph_init 208const struct nvc0_graph_init
208nvc0_graph_init_gpc[] = { 209nvc0_graph_init_prop_0[] = {
209 { 0x4184a0, 1, 0x04, 0x00000000 }, 210 { 0x4184a0, 1, 0x04, 0x00000000 },
211 {}
212};
213
214const struct nvc0_graph_init
215nvc0_graph_init_gpc_unk_0[] = {
210 { 0x418604, 1, 0x04, 0x00000000 }, 216 { 0x418604, 1, 0x04, 0x00000000 },
211 { 0x418680, 1, 0x04, 0x00000000 }, 217 { 0x418680, 1, 0x04, 0x00000000 },
212 { 0x418714, 1, 0x04, 0x80000000 }, 218 { 0x418714, 1, 0x04, 0x80000000 },
213 { 0x418384, 1, 0x04, 0x00000000 }, 219 { 0x418384, 1, 0x04, 0x00000000 },
220 {}
221};
222
223const struct nvc0_graph_init
224nvc0_graph_init_setup_0[] = {
214 { 0x418814, 3, 0x04, 0x00000000 }, 225 { 0x418814, 3, 0x04, 0x00000000 },
226 {}
227};
228
229const struct nvc0_graph_init
230nvc0_graph_init_crstr_0[] = {
215 { 0x418b04, 1, 0x04, 0x00000000 }, 231 { 0x418b04, 1, 0x04, 0x00000000 },
232 {}
233};
234
235const struct nvc0_graph_init
236nvc0_graph_init_setup_1[] = {
216 { 0x4188c8, 1, 0x04, 0x80000000 }, 237 { 0x4188c8, 1, 0x04, 0x80000000 },
217 { 0x4188cc, 1, 0x04, 0x00000000 }, 238 { 0x4188cc, 1, 0x04, 0x00000000 },
218 { 0x4188d0, 1, 0x04, 0x00010000 }, 239 { 0x4188d0, 1, 0x04, 0x00010000 },
219 { 0x4188d4, 1, 0x04, 0x00000001 }, 240 { 0x4188d4, 1, 0x04, 0x00000001 },
241 {}
242};
243
244const struct nvc0_graph_init
245nvc0_graph_init_zcull_0[] = {
220 { 0x418910, 1, 0x04, 0x00010001 }, 246 { 0x418910, 1, 0x04, 0x00010001 },
221 { 0x418914, 1, 0x04, 0x00000301 }, 247 { 0x418914, 1, 0x04, 0x00000301 },
222 { 0x418918, 1, 0x04, 0x00800000 }, 248 { 0x418918, 1, 0x04, 0x00800000 },
223 { 0x418980, 1, 0x04, 0x77777770 }, 249 { 0x418980, 1, 0x04, 0x77777770 },
224 { 0x418984, 3, 0x04, 0x77777777 }, 250 { 0x418984, 3, 0x04, 0x77777777 },
251 {}
252};
253
254const struct nvc0_graph_init
255nvc0_graph_init_gpm_0[] = {
225 { 0x418c04, 1, 0x04, 0x00000000 }, 256 { 0x418c04, 1, 0x04, 0x00000000 },
226 { 0x418c88, 1, 0x04, 0x00000000 }, 257 { 0x418c88, 1, 0x04, 0x00000000 },
258 {}
259};
260
261const struct nvc0_graph_init
262nvc0_graph_init_gpc_unk_1[] = {
227 { 0x418d00, 1, 0x04, 0x00000000 }, 263 { 0x418d00, 1, 0x04, 0x00000000 },
228 { 0x418f08, 1, 0x04, 0x00000000 }, 264 { 0x418f08, 1, 0x04, 0x00000000 },
229 { 0x418e00, 1, 0x04, 0x00000050 }, 265 { 0x418e00, 1, 0x04, 0x00000050 },
230 { 0x418e08, 1, 0x04, 0x00000000 }, 266 { 0x418e08, 1, 0x04, 0x00000000 },
267 {}
268};
269
270const struct nvc0_graph_init
271nvc0_graph_init_gcc_0[] = {
231 { 0x41900c, 1, 0x04, 0x00000000 }, 272 { 0x41900c, 1, 0x04, 0x00000000 },
232 { 0x419018, 1, 0x04, 0x00000000 }, 273 { 0x419018, 1, 0x04, 0x00000000 },
233 {} 274 {}
234}; 275};
235 276
236static struct nvc0_graph_init 277const struct nvc0_graph_init
237nvc0_graph_init_tpc[] = { 278nvc0_graph_init_tpccs_0[] = {
238 { 0x419d08, 2, 0x04, 0x00000000 }, 279 { 0x419d08, 2, 0x04, 0x00000000 },
239 { 0x419d10, 1, 0x04, 0x00000014 }, 280 { 0x419d10, 1, 0x04, 0x00000014 },
281 {}
282};
283
284const struct nvc0_graph_init
285nvc0_graph_init_tex_0[] = {
240 { 0x419ab0, 1, 0x04, 0x00000000 }, 286 { 0x419ab0, 1, 0x04, 0x00000000 },
241 { 0x419ab8, 1, 0x04, 0x000000e7 }, 287 { 0x419ab8, 1, 0x04, 0x000000e7 },
242 { 0x419abc, 2, 0x04, 0x00000000 }, 288 { 0x419abc, 2, 0x04, 0x00000000 },
289 {}
290};
291
292const struct nvc0_graph_init
293nvc0_graph_init_pe_0[] = {
243 { 0x41980c, 3, 0x04, 0x00000000 }, 294 { 0x41980c, 3, 0x04, 0x00000000 },
244 { 0x419844, 1, 0x04, 0x00000000 }, 295 { 0x419844, 1, 0x04, 0x00000000 },
245 { 0x41984c, 1, 0x04, 0x00005bc5 }, 296 { 0x41984c, 1, 0x04, 0x00005bc5 },
246 { 0x419850, 4, 0x04, 0x00000000 }, 297 { 0x419850, 4, 0x04, 0x00000000 },
298 {}
299};
300
301const struct nvc0_graph_init
302nvc0_graph_init_l1c_0[] = {
247 { 0x419c98, 1, 0x04, 0x00000000 }, 303 { 0x419c98, 1, 0x04, 0x00000000 },
248 { 0x419ca8, 1, 0x04, 0x80000000 }, 304 { 0x419ca8, 1, 0x04, 0x80000000 },
249 { 0x419cb4, 1, 0x04, 0x00000000 }, 305 { 0x419cb4, 1, 0x04, 0x00000000 },
250 { 0x419cb8, 1, 0x04, 0x00008bf4 }, 306 { 0x419cb8, 1, 0x04, 0x00008bf4 },
251 { 0x419cbc, 1, 0x04, 0x28137606 }, 307 { 0x419cbc, 1, 0x04, 0x28137606 },
252 { 0x419cc0, 2, 0x04, 0x00000000 }, 308 { 0x419cc0, 2, 0x04, 0x00000000 },
309 {}
310};
311
312const struct nvc0_graph_init
313nvc0_graph_init_wwdx_0[] = {
253 { 0x419bd4, 1, 0x04, 0x00800000 }, 314 { 0x419bd4, 1, 0x04, 0x00800000 },
254 { 0x419bdc, 1, 0x04, 0x00000000 }, 315 { 0x419bdc, 1, 0x04, 0x00000000 },
316 {}
317};
318
319const struct nvc0_graph_init
320nvc0_graph_init_tpccs_1[] = {
255 { 0x419d2c, 1, 0x04, 0x00000000 }, 321 { 0x419d2c, 1, 0x04, 0x00000000 },
322 {}
323};
324
325const struct nvc0_graph_init
326nvc0_graph_init_mpc_0[] = {
256 { 0x419c0c, 1, 0x04, 0x00000000 }, 327 { 0x419c0c, 1, 0x04, 0x00000000 },
328 {}
329};
330
331static const struct nvc0_graph_init
332nvc0_graph_init_sm_0[] = {
257 { 0x419e00, 1, 0x04, 0x00000000 }, 333 { 0x419e00, 1, 0x04, 0x00000000 },
258 { 0x419ea0, 1, 0x04, 0x00000000 }, 334 { 0x419ea0, 1, 0x04, 0x00000000 },
259 { 0x419ea4, 1, 0x04, 0x00000100 }, 335 { 0x419ea4, 1, 0x04, 0x00000100 },
@@ -270,8 +346,8 @@ nvc0_graph_init_tpc[] = {
270 {} 346 {}
271}; 347};
272 348
273struct nvc0_graph_init 349const struct nvc0_graph_init
274nvc0_graph_init_unk88xx[] = { 350nvc0_graph_init_be_0[] = {
275 { 0x40880c, 1, 0x04, 0x00000000 }, 351 { 0x40880c, 1, 0x04, 0x00000000 },
276 { 0x408910, 9, 0x04, 0x00000000 }, 352 { 0x408910, 9, 0x04, 0x00000000 },
277 { 0x408950, 1, 0x04, 0x00000000 }, 353 { 0x408950, 1, 0x04, 0x00000000 },
@@ -282,18 +358,64 @@ nvc0_graph_init_unk88xx[] = {
282 {} 358 {}
283}; 359};
284 360
285struct nvc0_graph_init 361const struct nvc0_graph_init
286nvc0_graph_tpc_0[] = { 362nvc0_graph_init_fe_1[] = {
287 { 0x50405c, 1, 0x04, 0x00000001 }, 363 { 0x4040f0, 1, 0x04, 0x00000000 },
288 {} 364 {}
289}; 365};
290 366
367const struct nvc0_graph_init
368nvc0_graph_init_pe_1[] = {
369 { 0x419880, 1, 0x04, 0x00000002 },
370 {}
371};
372
373static const struct nvc0_graph_pack
374nvc0_graph_pack_mmio[] = {
375 { nvc0_graph_init_main_0 },
376 { nvc0_graph_init_fe_0 },
377 { nvc0_graph_init_pri_0 },
378 { nvc0_graph_init_rstr2d_0 },
379 { nvc0_graph_init_pd_0 },
380 { nvc0_graph_init_ds_0 },
381 { nvc0_graph_init_scc_0 },
382 { nvc0_graph_init_prop_0 },
383 { nvc0_graph_init_gpc_unk_0 },
384 { nvc0_graph_init_setup_0 },
385 { nvc0_graph_init_crstr_0 },
386 { nvc0_graph_init_setup_1 },
387 { nvc0_graph_init_zcull_0 },
388 { nvc0_graph_init_gpm_0 },
389 { nvc0_graph_init_gpc_unk_1 },
390 { nvc0_graph_init_gcc_0 },
391 { nvc0_graph_init_tpccs_0 },
392 { nvc0_graph_init_tex_0 },
393 { nvc0_graph_init_pe_0 },
394 { nvc0_graph_init_l1c_0 },
395 { nvc0_graph_init_wwdx_0 },
396 { nvc0_graph_init_tpccs_1 },
397 { nvc0_graph_init_mpc_0 },
398 { nvc0_graph_init_sm_0 },
399 { nvc0_graph_init_be_0 },
400 { nvc0_graph_init_fe_1 },
401 { nvc0_graph_init_pe_1 },
402 {}
403};
404
405/*******************************************************************************
406 * PGRAPH engine/subdev functions
407 ******************************************************************************/
408
291void 409void
292nvc0_graph_mmio(struct nvc0_graph_priv *priv, struct nvc0_graph_init *init) 410nvc0_graph_mmio(struct nvc0_graph_priv *priv, const struct nvc0_graph_pack *p)
293{ 411{
294 for (; init && init->count; init++) { 412 const struct nvc0_graph_pack *pack;
295 u32 addr = init->addr, i; 413 const struct nvc0_graph_init *init;
296 for (i = 0; i < init->count; i++) { 414
415 pack_for_each_init(init, pack, p) {
416 u32 next = init->addr + init->count * init->pitch;
417 u32 addr = init->addr;
418 while (addr < next) {
297 nv_wr32(priv, addr, init->data); 419 nv_wr32(priv, addr, init->data);
298 addr += init->pitch; 420 addr += init->pitch;
299 } 421 }
@@ -301,49 +423,53 @@ nvc0_graph_mmio(struct nvc0_graph_priv *priv, struct nvc0_graph_init *init)
301} 423}
302 424
303void 425void
304nvc0_graph_icmd(struct nvc0_graph_priv *priv, struct nvc0_graph_init *init) 426nvc0_graph_icmd(struct nvc0_graph_priv *priv, const struct nvc0_graph_pack *p)
305{ 427{
306 u32 addr, data; 428 const struct nvc0_graph_pack *pack;
307 int i, j; 429 const struct nvc0_graph_init *init;
430 u32 data = 0;
308 431
309 nv_wr32(priv, 0x400208, 0x80000000); 432 nv_wr32(priv, 0x400208, 0x80000000);
310 for (i = 0; init->count; init++, i++) { 433
311 if (!i || data != init->data) { 434 pack_for_each_init(init, pack, p) {
435 u32 next = init->addr + init->count * init->pitch;
436 u32 addr = init->addr;
437
438 if ((pack == p && init == p->init) || data != init->data) {
312 nv_wr32(priv, 0x400204, init->data); 439 nv_wr32(priv, 0x400204, init->data);
313 data = init->data; 440 data = init->data;
314 } 441 }
315 442
316 addr = init->addr; 443 while (addr < next) {
317 for (j = 0; j < init->count; j++) {
318 nv_wr32(priv, 0x400200, addr); 444 nv_wr32(priv, 0x400200, addr);
445 nv_wait(priv, 0x400700, 0x00000002, 0x00000000);
319 addr += init->pitch; 446 addr += init->pitch;
320 while (nv_rd32(priv, 0x400700) & 0x00000002) {}
321 } 447 }
322 } 448 }
449
323 nv_wr32(priv, 0x400208, 0x00000000); 450 nv_wr32(priv, 0x400208, 0x00000000);
324} 451}
325 452
326void 453void
327nvc0_graph_mthd(struct nvc0_graph_priv *priv, struct nvc0_graph_mthd *mthds) 454nvc0_graph_mthd(struct nvc0_graph_priv *priv, const struct nvc0_graph_pack *p)
328{ 455{
329 struct nvc0_graph_mthd *mthd; 456 const struct nvc0_graph_pack *pack;
330 struct nvc0_graph_init *init; 457 const struct nvc0_graph_init *init;
331 int i = 0, j; 458 u32 data = 0;
332 u32 data;
333
334 while ((mthd = &mthds[i++]) && (init = mthd->init)) {
335 u32 addr = 0x80000000 | mthd->oclass;
336 for (data = 0; init->count; init++) {
337 if (init == mthd->init || data != init->data) {
338 nv_wr32(priv, 0x40448c, init->data);
339 data = init->data;
340 }
341 459
342 addr = (addr & 0x8000ffff) | (init->addr << 14); 460 pack_for_each_init(init, pack, p) {
343 for (j = 0; j < init->count; j++) { 461 u32 ctrl = 0x80000000 | pack->type;
344 nv_wr32(priv, 0x404488, addr); 462 u32 next = init->addr + init->count * init->pitch;
345 addr += init->pitch << 14; 463 u32 addr = init->addr;
346 } 464
465 if ((pack == p && init == p->init) || data != init->data) {
466 nv_wr32(priv, 0x40448c, init->data);
467 data = init->data;
468 }
469
470 while (addr < next) {
471 nv_wr32(priv, 0x404488, ctrl | (addr << 14));
472 addr += init->pitch;
347 } 473 }
348 } 474 }
349} 475}
@@ -772,11 +898,12 @@ nvc0_graph_init_fw(struct nvc0_graph_priv *priv, u32 fuc_base,
772 898
773static void 899static void
774nvc0_graph_init_csdata(struct nvc0_graph_priv *priv, 900nvc0_graph_init_csdata(struct nvc0_graph_priv *priv,
775 struct nvc0_graph_init *init, 901 const struct nvc0_graph_pack *pack,
776 u32 falcon, u32 starstar, u32 base) 902 u32 falcon, u32 starstar, u32 base)
777{ 903{
778 u32 addr = init->addr; 904 const struct nvc0_graph_pack *iter;
779 u32 next = addr; 905 const struct nvc0_graph_init *init;
906 u32 addr = ~0, prev = ~0, xfer = 0;
780 u32 star, temp; 907 u32 star, temp;
781 908
782 nv_wr32(priv, falcon + 0x01c0, 0x02000000 + starstar); 909 nv_wr32(priv, falcon + 0x01c0, 0x02000000 + starstar);
@@ -786,22 +913,28 @@ nvc0_graph_init_csdata(struct nvc0_graph_priv *priv,
786 star = temp; 913 star = temp;
787 nv_wr32(priv, falcon + 0x01c0, 0x01000000 + star); 914 nv_wr32(priv, falcon + 0x01c0, 0x01000000 + star);
788 915
789 do { 916 pack_for_each_init(init, iter, pack) {
790 if (init->addr != next) { 917 u32 head = init->addr - base;
791 while (addr < next) { 918 u32 tail = head + init->count * init->pitch;
792 u32 nr = min((int)(next - addr) / 4, 32); 919 while (head < tail) {
793 nv_wr32(priv, falcon + 0x01c4, 920 if (head != prev + 4 || xfer >= 32) {
794 ((nr - 1) << 26) | (addr - base)); 921 if (xfer) {
795 addr += nr * 4; 922 u32 data = ((--xfer << 26) | addr);
796 star += 4; 923 nv_wr32(priv, falcon + 0x01c4, data);
924 star += 4;
925 }
926 addr = head;
927 xfer = 0;
797 } 928 }
798 addr = next = init->addr; 929 prev = head;
930 xfer = xfer + 1;
931 head = head + init->pitch;
799 } 932 }
800 next += init->count * 4; 933 }
801 } while ((init++)->count);
802 934
935 nv_wr32(priv, falcon + 0x01c4, (--xfer << 26) | addr);
803 nv_wr32(priv, falcon + 0x01c0, 0x01000004 + starstar); 936 nv_wr32(priv, falcon + 0x01c0, 0x01000004 + starstar);
804 nv_wr32(priv, falcon + 0x01c4, star); 937 nv_wr32(priv, falcon + 0x01c4, star + 4);
805} 938}
806 939
807int 940int
@@ -809,7 +942,6 @@ nvc0_graph_init_ctxctl(struct nvc0_graph_priv *priv)
809{ 942{
810 struct nvc0_graph_oclass *oclass = (void *)nv_object(priv)->oclass; 943 struct nvc0_graph_oclass *oclass = (void *)nv_object(priv)->oclass;
811 struct nvc0_grctx_oclass *cclass = (void *)nv_engine(priv)->cclass; 944 struct nvc0_grctx_oclass *cclass = (void *)nv_engine(priv)->cclass;
812 struct nvc0_graph_init *init;
813 u32 r000260; 945 u32 r000260;
814 int i; 946 int i;
815 947
@@ -919,10 +1051,6 @@ nvc0_graph_init_ctxctl(struct nvc0_graph_priv *priv)
919 nv_wr32(priv, 0x409184, oclass->fecs.ucode->code.data[i]); 1051 nv_wr32(priv, 0x409184, oclass->fecs.ucode->code.data[i]);
920 } 1052 }
921 1053
922 for (i = 0; (init = cclass->hub[i]); i++) {
923 nvc0_graph_init_csdata(priv, init, 0x409000, 0x000, 0x000000);
924 }
925
926 /* load GPC microcode */ 1054 /* load GPC microcode */
927 nv_wr32(priv, 0x41a1c0, 0x01000000); 1055 nv_wr32(priv, 0x41a1c0, 0x01000000);
928 for (i = 0; i < oclass->gpccs.ucode->data.size / 4; i++) 1056 for (i = 0; i < oclass->gpccs.ucode->data.size / 4; i++)
@@ -936,12 +1064,11 @@ nvc0_graph_init_ctxctl(struct nvc0_graph_priv *priv)
936 } 1064 }
937 nv_wr32(priv, 0x000260, r000260); 1065 nv_wr32(priv, 0x000260, r000260);
938 1066
939 if ((init = cclass->gpc[0])) 1067 /* load register lists */
940 nvc0_graph_init_csdata(priv, init, 0x41a000, 0x000, 0x418000); 1068 nvc0_graph_init_csdata(priv, cclass->hub, 0x409000, 0x000, 0x000000);
941 if ((init = cclass->gpc[2])) 1069 nvc0_graph_init_csdata(priv, cclass->gpc, 0x41a000, 0x000, 0x418000);
942 nvc0_graph_init_csdata(priv, init, 0x41a000, 0x004, 0x419800); 1070 nvc0_graph_init_csdata(priv, cclass->tpc, 0x41a000, 0x004, 0x419800);
943 if ((init = cclass->gpc[3])) 1071 nvc0_graph_init_csdata(priv, cclass->ppc, 0x41a000, 0x008, 0x41be00);
944 nvc0_graph_init_csdata(priv, init, 0x41a000, 0x008, 0x41be00);
945 1072
946 /* start HUB ucode running, it'll init the GPCs */ 1073 /* start HUB ucode running, it'll init the GPCs */
947 nv_wr32(priv, 0x40910c, 0x00000000); 1074 nv_wr32(priv, 0x40910c, 0x00000000);
@@ -988,8 +1115,7 @@ nvc0_graph_init(struct nouveau_object *object)
988 nv_wr32(priv, GPC_BCAST(0x08b4), priv->unk4188b4->addr >> 8); 1115 nv_wr32(priv, GPC_BCAST(0x08b4), priv->unk4188b4->addr >> 8);
989 nv_wr32(priv, GPC_BCAST(0x08b8), priv->unk4188b8->addr >> 8); 1116 nv_wr32(priv, GPC_BCAST(0x08b8), priv->unk4188b8->addr >> 8);
990 1117
991 for (i = 0; oclass->mmio[i]; i++) 1118 nvc0_graph_mmio(priv, oclass->mmio);
992 nvc0_graph_mmio(priv, oclass->mmio[i]);
993 1119
994 memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr)); 1120 memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
995 for (i = 0, gpc = -1; i < priv->tpc_total; i++) { 1121 for (i = 0, gpc = -1; i < priv->tpc_total; i++) {
@@ -1091,10 +1217,10 @@ nvc0_graph_ctor_fw(struct nvc0_graph_priv *priv, const char *fwname,
1091 int ret; 1217 int ret;
1092 1218
1093 snprintf(f, sizeof(f), "nouveau/nv%02x_%s", device->chipset, fwname); 1219 snprintf(f, sizeof(f), "nouveau/nv%02x_%s", device->chipset, fwname);
1094 ret = request_firmware(&fw, f, &device->pdev->dev); 1220 ret = request_firmware(&fw, f, nv_device_base(device));
1095 if (ret) { 1221 if (ret) {
1096 snprintf(f, sizeof(f), "nouveau/%s", fwname); 1222 snprintf(f, sizeof(f), "nouveau/%s", fwname);
1097 ret = request_firmware(&fw, f, &device->pdev->dev); 1223 ret = request_firmware(&fw, f, nv_device_base(device));
1098 if (ret) { 1224 if (ret) {
1099 nv_error(priv, "failed to load %s\n", fwname); 1225 nv_error(priv, "failed to load %s\n", fwname);
1100 return ret; 1226 return ret;
@@ -1220,22 +1346,6 @@ nvc0_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
1220 return 0; 1346 return 0;
1221} 1347}
1222 1348
1223struct nvc0_graph_init *
1224nvc0_graph_init_mmio[] = {
1225 nvc0_graph_init_regs,
1226 nvc0_graph_init_unk40xx,
1227 nvc0_graph_init_unk44xx,
1228 nvc0_graph_init_unk78xx,
1229 nvc0_graph_init_unk60xx,
1230 nvc0_graph_init_unk58xx,
1231 nvc0_graph_init_unk80xx,
1232 nvc0_graph_init_gpc,
1233 nvc0_graph_init_tpc,
1234 nvc0_graph_init_unk88xx,
1235 nvc0_graph_tpc_0,
1236 NULL
1237};
1238
1239#include "fuc/hubnvc0.fuc.h" 1349#include "fuc/hubnvc0.fuc.h"
1240 1350
1241struct nvc0_graph_ucode 1351struct nvc0_graph_ucode
@@ -1267,7 +1377,7 @@ nvc0_graph_oclass = &(struct nvc0_graph_oclass) {
1267 }, 1377 },
1268 .cclass = &nvc0_grctx_oclass, 1378 .cclass = &nvc0_grctx_oclass,
1269 .sclass = nvc0_graph_sclass, 1379 .sclass = nvc0_graph_sclass,
1270 .mmio = nvc0_graph_init_mmio, 1380 .mmio = nvc0_graph_pack_mmio,
1271 .fecs.ucode = &nvc0_graph_fecs_ucode, 1381 .fecs.ucode = &nvc0_graph_fecs_ucode,
1272 .gpccs.ucode = &nvc0_graph_gpccs_ucode, 1382 .gpccs.ucode = &nvc0_graph_gpccs_ucode,
1273}.base; 1383}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h
index b0ab6de270b2..90d44616c876 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h
@@ -45,6 +45,7 @@
45#define ROP_UNIT(u, r) (0x410000 + (u) * 0x400 + (r)) 45#define ROP_UNIT(u, r) (0x410000 + (u) * 0x400 + (r))
46#define GPC_BCAST(r) (0x418000 + (r)) 46#define GPC_BCAST(r) (0x418000 + (r))
47#define GPC_UNIT(t, r) (0x500000 + (t) * 0x8000 + (r)) 47#define GPC_UNIT(t, r) (0x500000 + (t) * 0x8000 + (r))
48#define PPC_UNIT(t, m, r) (0x503000 + (t) * 0x8000 + (m) * 0x200 + (r))
48#define TPC_UNIT(t, m, r) (0x504000 + (t) * 0x8000 + (m) * 0x800 + (r)) 49#define TPC_UNIT(t, m, r) (0x504000 + (t) * 0x8000 + (m) * 0x800 + (r))
49 50
50struct nvc0_graph_data { 51struct nvc0_graph_data {
@@ -102,8 +103,6 @@ struct nvc0_graph_chan {
102 } data[4]; 103 } data[4];
103}; 104};
104 105
105int nvc0_grctx_generate(struct nvc0_graph_priv *);
106
107int nvc0_graph_context_ctor(struct nouveau_object *, struct nouveau_object *, 106int nvc0_graph_context_ctor(struct nouveau_object *, struct nouveau_object *,
108 struct nouveau_oclass *, void *, u32, 107 struct nouveau_oclass *, void *, u32,
109 struct nouveau_object **); 108 struct nouveau_object **);
@@ -130,34 +129,14 @@ struct nvc0_graph_init {
130 u32 data; 129 u32 data;
131}; 130};
132 131
133struct nvc0_graph_mthd { 132struct nvc0_graph_pack {
134 u16 oclass; 133 const struct nvc0_graph_init *init;
135 struct nvc0_graph_init *init; 134 u32 type;
136};
137
138struct nvc0_grctx {
139 struct nvc0_graph_priv *priv;
140 struct nvc0_graph_data *data;
141 struct nvc0_graph_mmio *mmio;
142 int buffer_nr;
143 u64 buffer[4];
144 u64 addr;
145}; 135};
146 136
147struct nvc0_grctx_oclass { 137#define pack_for_each_init(init, pack, head) \
148 struct nouveau_oclass base; 138 for (pack = head; pack && pack->init; pack++) \
149 /* main context generation function */ 139 for (init = pack->init; init && init->count; init++)
150 void (*main)(struct nvc0_graph_priv *, struct nvc0_grctx *);
151 /* context-specific modify-on-first-load list generation function */
152 void (*mods)(struct nvc0_graph_priv *, struct nvc0_grctx *);
153 void (*unkn)(struct nvc0_graph_priv *);
154 /* mmio context data */
155 struct nvc0_graph_init **hub;
156 struct nvc0_graph_init **gpc;
157 /* indirect context data, generated with icmds/mthds */
158 struct nvc0_graph_init *icmd;
159 struct nvc0_graph_mthd *mthd;
160};
161 140
162struct nvc0_graph_ucode { 141struct nvc0_graph_ucode {
163 struct nvc0_graph_fuc code; 142 struct nvc0_graph_fuc code;
@@ -171,7 +150,7 @@ struct nvc0_graph_oclass {
171 struct nouveau_oclass base; 150 struct nouveau_oclass base;
172 struct nouveau_oclass **cclass; 151 struct nouveau_oclass **cclass;
173 struct nouveau_oclass *sclass; 152 struct nouveau_oclass *sclass;
174 struct nvc0_graph_init **mmio; 153 const struct nvc0_graph_pack *mmio;
175 struct { 154 struct {
176 struct nvc0_graph_ucode *ucode; 155 struct nvc0_graph_ucode *ucode;
177 } fecs; 156 } fecs;
@@ -180,119 +159,72 @@ struct nvc0_graph_oclass {
180 } gpccs; 159 } gpccs;
181}; 160};
182 161
183void nvc0_graph_mmio(struct nvc0_graph_priv *, struct nvc0_graph_init *); 162void nvc0_graph_mmio(struct nvc0_graph_priv *, const struct nvc0_graph_pack *);
184void nvc0_graph_icmd(struct nvc0_graph_priv *, struct nvc0_graph_init *); 163void nvc0_graph_icmd(struct nvc0_graph_priv *, const struct nvc0_graph_pack *);
185void nvc0_graph_mthd(struct nvc0_graph_priv *, struct nvc0_graph_mthd *); 164void nvc0_graph_mthd(struct nvc0_graph_priv *, const struct nvc0_graph_pack *);
186int nvc0_graph_init_ctxctl(struct nvc0_graph_priv *); 165int nvc0_graph_init_ctxctl(struct nvc0_graph_priv *);
187 166
188extern struct nvc0_graph_init nvc0_graph_init_regs[]; 167/* register init value lists */
189extern struct nvc0_graph_init nvc0_graph_init_unk40xx[]; 168
190extern struct nvc0_graph_init nvc0_graph_init_unk44xx[]; 169extern const struct nvc0_graph_init nvc0_graph_init_main_0[];
191extern struct nvc0_graph_init nvc0_graph_init_unk78xx[]; 170extern const struct nvc0_graph_init nvc0_graph_init_fe_0[];
192extern struct nvc0_graph_init nvc0_graph_init_unk60xx[]; 171extern const struct nvc0_graph_init nvc0_graph_init_pri_0[];
193extern struct nvc0_graph_init nvc0_graph_init_unk58xx[]; 172extern const struct nvc0_graph_init nvc0_graph_init_rstr2d_0[];
194extern struct nvc0_graph_init nvc0_graph_init_unk80xx[]; 173extern const struct nvc0_graph_init nvc0_graph_init_pd_0[];
195extern struct nvc0_graph_init nvc0_graph_init_gpc[]; 174extern const struct nvc0_graph_init nvc0_graph_init_ds_0[];
196extern struct nvc0_graph_init nvc0_graph_init_unk88xx[]; 175extern const struct nvc0_graph_init nvc0_graph_init_scc_0[];
197extern struct nvc0_graph_init nvc0_graph_tpc_0[]; 176extern const struct nvc0_graph_init nvc0_graph_init_prop_0[];
198 177extern const struct nvc0_graph_init nvc0_graph_init_gpc_unk_0[];
199extern struct nvc0_graph_init nvc3_graph_init_unk58xx[]; 178extern const struct nvc0_graph_init nvc0_graph_init_setup_0[];
200 179extern const struct nvc0_graph_init nvc0_graph_init_crstr_0[];
201extern struct nvc0_graph_init nvd9_graph_init_unk58xx[]; 180extern const struct nvc0_graph_init nvc0_graph_init_setup_1[];
202extern struct nvc0_graph_init nvd9_graph_init_unk64xx[]; 181extern const struct nvc0_graph_init nvc0_graph_init_zcull_0[];
203 182extern const struct nvc0_graph_init nvc0_graph_init_gpm_0[];
204extern struct nvc0_graph_init nve4_graph_init_regs[]; 183extern const struct nvc0_graph_init nvc0_graph_init_gpc_unk_1[];
205extern struct nvc0_graph_init nve4_graph_init_unk[]; 184extern const struct nvc0_graph_init nvc0_graph_init_gcc_0[];
206extern struct nvc0_graph_init nve4_graph_init_unk88xx[]; 185extern const struct nvc0_graph_init nvc0_graph_init_tpccs_0[];
207 186extern const struct nvc0_graph_init nvc0_graph_init_tex_0[];
208extern struct nvc0_graph_init nvf0_graph_init_unk40xx[]; 187extern const struct nvc0_graph_init nvc0_graph_init_pe_0[];
209extern struct nvc0_graph_init nvf0_graph_init_unk70xx[]; 188extern const struct nvc0_graph_init nvc0_graph_init_l1c_0[];
210extern struct nvc0_graph_init nvf0_graph_init_unk5bxx[]; 189extern const struct nvc0_graph_init nvc0_graph_init_wwdx_0[];
211extern struct nvc0_graph_init nvf0_graph_init_tpc[]; 190extern const struct nvc0_graph_init nvc0_graph_init_tpccs_1[];
212 191extern const struct nvc0_graph_init nvc0_graph_init_mpc_0[];
213int nvc0_grctx_generate(struct nvc0_graph_priv *); 192extern const struct nvc0_graph_init nvc0_graph_init_be_0[];
214void nvc0_grctx_generate_main(struct nvc0_graph_priv *, struct nvc0_grctx *); 193extern const struct nvc0_graph_init nvc0_graph_init_fe_1[];
215void nvc0_grctx_generate_mods(struct nvc0_graph_priv *, struct nvc0_grctx *); 194extern const struct nvc0_graph_init nvc0_graph_init_pe_1[];
216void nvc0_grctx_generate_unkn(struct nvc0_graph_priv *); 195
217void nvc0_grctx_generate_tpcid(struct nvc0_graph_priv *); 196extern const struct nvc0_graph_init nvc4_graph_init_ds_0[];
218void nvc0_grctx_generate_r406028(struct nvc0_graph_priv *); 197extern const struct nvc0_graph_init nvc4_graph_init_tex_0[];
219void nvc0_grctx_generate_r4060a8(struct nvc0_graph_priv *); 198extern const struct nvc0_graph_init nvc4_graph_init_sm_0[];
220void nvc0_grctx_generate_r418bb8(struct nvc0_graph_priv *); 199
221void nve4_grctx_generate_r418bb8(struct nvc0_graph_priv *); 200extern const struct nvc0_graph_init nvc1_graph_init_gpc_unk_0[];
222void nvc0_grctx_generate_r406800(struct nvc0_graph_priv *); 201extern const struct nvc0_graph_init nvc1_graph_init_setup_1[];
223 202
224extern struct nouveau_oclass *nvc0_grctx_oclass; 203extern const struct nvc0_graph_init nvd9_graph_init_pd_0[];
225extern struct nvc0_graph_init *nvc0_grctx_init_hub[]; 204extern const struct nvc0_graph_init nvd9_graph_init_ds_0[];
226extern struct nvc0_graph_init nvc0_grctx_init_base[]; 205extern const struct nvc0_graph_init nvd9_graph_init_prop_0[];
227extern struct nvc0_graph_init nvc0_grctx_init_unk40xx[]; 206extern const struct nvc0_graph_init nvd9_graph_init_gpm_0[];
228extern struct nvc0_graph_init nvc0_grctx_init_unk44xx[]; 207extern const struct nvc0_graph_init nvd9_graph_init_gpc_unk_1[];
229extern struct nvc0_graph_init nvc0_grctx_init_unk46xx[]; 208extern const struct nvc0_graph_init nvd9_graph_init_tex_0[];
230extern struct nvc0_graph_init nvc0_grctx_init_unk47xx[]; 209extern const struct nvc0_graph_init nvd9_graph_init_sm_0[];
231extern struct nvc0_graph_init nvc0_grctx_init_unk60xx[]; 210extern const struct nvc0_graph_init nvd9_graph_init_fe_1[];
232extern struct nvc0_graph_init nvc0_grctx_init_unk64xx[]; 211
233extern struct nvc0_graph_init nvc0_grctx_init_unk78xx[]; 212extern const struct nvc0_graph_init nvd7_graph_init_pes_0[];
234extern struct nvc0_graph_init nvc0_grctx_init_unk80xx[]; 213extern const struct nvc0_graph_init nvd7_graph_init_wwdx_0[];
235extern struct nvc0_graph_init nvc0_grctx_init_gpc_0[]; 214extern const struct nvc0_graph_init nvd7_graph_init_cbm_0[];
236extern struct nvc0_graph_init nvc0_grctx_init_gpc_1[]; 215
237extern struct nvc0_graph_init nvc0_grctx_init_tpc[]; 216extern const struct nvc0_graph_init nve4_graph_init_main_0[];
238extern struct nvc0_graph_init nvc0_grctx_init_icmd[]; 217extern const struct nvc0_graph_init nve4_graph_init_tpccs_0[];
239extern struct nvc0_graph_init nvd9_grctx_init_icmd[]; // 218extern const struct nvc0_graph_init nve4_graph_init_pe_0[];
240 219extern const struct nvc0_graph_init nve4_graph_init_be_0[];
241extern struct nvc0_graph_mthd nvc0_grctx_init_mthd[]; 220
242extern struct nvc0_graph_init nvc0_grctx_init_902d[]; 221extern const struct nvc0_graph_init nvf0_graph_init_fe_0[];
243extern struct nvc0_graph_init nvc0_grctx_init_9039[]; 222extern const struct nvc0_graph_init nvf0_graph_init_sked_0[];
244extern struct nvc0_graph_init nvc0_grctx_init_90c0[]; 223extern const struct nvc0_graph_init nvf0_graph_init_cwd_0[];
245extern struct nvc0_graph_init nvc0_grctx_init_mthd_magic[]; 224extern const struct nvc0_graph_init nvf0_graph_init_gpc_unk_1[];
246 225extern const struct nvc0_graph_init nvf0_graph_init_sm_0[];
247void nvc1_grctx_generate_mods(struct nvc0_graph_priv *, struct nvc0_grctx *); 226
248void nvc1_grctx_generate_unkn(struct nvc0_graph_priv *); 227extern const struct nvc0_graph_init nv108_graph_init_gpc_unk_0[];
249extern struct nouveau_oclass *nvc1_grctx_oclass; 228
250extern struct nvc0_graph_init nvc1_grctx_init_9097[];
251
252extern struct nouveau_oclass *nvc3_grctx_oclass;
253
254extern struct nouveau_oclass *nvc8_grctx_oclass;
255extern struct nvc0_graph_init nvc8_grctx_init_9197[];
256extern struct nvc0_graph_init nvc8_grctx_init_9297[];
257
258extern struct nouveau_oclass *nvd7_grctx_oclass;
259
260extern struct nouveau_oclass *nvd9_grctx_oclass;
261extern struct nvc0_graph_init nvd9_grctx_init_rop[];
262extern struct nvc0_graph_mthd nvd9_grctx_init_mthd[];
263
264void nve4_grctx_generate_main(struct nvc0_graph_priv *, struct nvc0_grctx *);
265void nve4_grctx_generate_unkn(struct nvc0_graph_priv *);
266extern struct nouveau_oclass *nve4_grctx_oclass;
267extern struct nvc0_graph_init nve4_grctx_init_unk46xx[];
268extern struct nvc0_graph_init nve4_grctx_init_unk47xx[];
269extern struct nvc0_graph_init nve4_grctx_init_unk58xx[];
270extern struct nvc0_graph_init nve4_grctx_init_unk80xx[];
271extern struct nvc0_graph_init nve4_grctx_init_unk90xx[];
272
273extern struct nouveau_oclass *nvf0_grctx_oclass;
274extern struct nvc0_graph_init nvf0_grctx_init_unk44xx[];
275extern struct nvc0_graph_init nvf0_grctx_init_unk5bxx[];
276extern struct nvc0_graph_init nvf0_grctx_init_unk60xx[];
277
278extern struct nouveau_oclass *nv108_grctx_oclass;
279
280#define mmio_data(s,a,p) do { \
281 info->buffer[info->buffer_nr] = round_up(info->addr, (a)); \
282 info->addr = info->buffer[info->buffer_nr++] + (s); \
283 info->data->size = (s); \
284 info->data->align = (a); \
285 info->data->access = (p); \
286 info->data++; \
287} while(0)
288
289#define mmio_list(r,d,s,b) do { \
290 info->mmio->addr = (r); \
291 info->mmio->data = (d); \
292 info->mmio->shift = (s); \
293 info->mmio->buffer = (b); \
294 info->mmio++; \
295 nv_wr32(priv, (r), (d) | ((s) ? (info->buffer[(b)] >> (s)) : 0)); \
296} while(0)
297 229
298#endif 230#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc1.c b/drivers/gpu/drm/nouveau/core/engine/graph/nvc1.c
index bc4a469b86cb..30cab0b2eba1 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc1.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc1.c
@@ -23,6 +23,7 @@
23 */ 23 */
24 24
25#include "nvc0.h" 25#include "nvc0.h"
26#include "ctxnvc0.h"
26 27
27/******************************************************************************* 28/*******************************************************************************
28 * Graphics object classes 29 * Graphics object classes
@@ -39,94 +40,82 @@ nvc1_graph_sclass[] = {
39}; 40};
40 41
41/******************************************************************************* 42/*******************************************************************************
42 * PGRAPH engine/subdev functions 43 * PGRAPH register lists
43 ******************************************************************************/ 44 ******************************************************************************/
44 45
45static struct nvc0_graph_init 46const struct nvc0_graph_init
46nvc1_graph_init_gpc[] = { 47nvc1_graph_init_gpc_unk_0[] = {
47 { 0x4184a0, 1, 0x04, 0x00000000 },
48 { 0x418604, 1, 0x04, 0x00000000 }, 48 { 0x418604, 1, 0x04, 0x00000000 },
49 { 0x418680, 1, 0x04, 0x00000000 }, 49 { 0x418680, 1, 0x04, 0x00000000 },
50 { 0x418714, 1, 0x04, 0x00000000 }, 50 { 0x418714, 1, 0x04, 0x00000000 },
51 { 0x418384, 1, 0x04, 0x00000000 }, 51 { 0x418384, 1, 0x04, 0x00000000 },
52 { 0x418814, 3, 0x04, 0x00000000 }, 52 {}
53 { 0x418b04, 1, 0x04, 0x00000000 }, 53};
54
55const struct nvc0_graph_init
56nvc1_graph_init_setup_1[] = {
54 { 0x4188c8, 2, 0x04, 0x00000000 }, 57 { 0x4188c8, 2, 0x04, 0x00000000 },
55 { 0x4188d0, 1, 0x04, 0x00010000 }, 58 { 0x4188d0, 1, 0x04, 0x00010000 },
56 { 0x4188d4, 1, 0x04, 0x00000001 }, 59 { 0x4188d4, 1, 0x04, 0x00000001 },
57 { 0x418910, 1, 0x04, 0x00010001 }, 60 {}
58 { 0x418914, 1, 0x04, 0x00000301 }, 61};
59 { 0x418918, 1, 0x04, 0x00800000 }, 62
60 { 0x418980, 1, 0x04, 0x77777770 }, 63static const struct nvc0_graph_init
61 { 0x418984, 3, 0x04, 0x77777777 }, 64nvc1_graph_init_gpc_unk_1[] = {
62 { 0x418c04, 1, 0x04, 0x00000000 },
63 { 0x418c88, 1, 0x04, 0x00000000 },
64 { 0x418d00, 1, 0x04, 0x00000000 }, 65 { 0x418d00, 1, 0x04, 0x00000000 },
65 { 0x418f08, 1, 0x04, 0x00000000 }, 66 { 0x418f08, 1, 0x04, 0x00000000 },
66 { 0x418e00, 1, 0x04, 0x00000003 }, 67 { 0x418e00, 1, 0x04, 0x00000003 },
67 { 0x418e08, 1, 0x04, 0x00000000 }, 68 { 0x418e08, 1, 0x04, 0x00000000 },
68 { 0x41900c, 1, 0x04, 0x00000000 },
69 { 0x419018, 1, 0x04, 0x00000000 },
70 {} 69 {}
71}; 70};
72 71
73static struct nvc0_graph_init 72static const struct nvc0_graph_init
74nvc1_graph_init_tpc[] = { 73nvc1_graph_init_pe_0[] = {
75 { 0x419d08, 2, 0x04, 0x00000000 }, 74 { 0x41980c, 1, 0x04, 0x00000010 },
76 { 0x419d10, 1, 0x04, 0x00000014 }, 75 { 0x419810, 1, 0x04, 0x00000000 },
77 { 0x419ab0, 1, 0x04, 0x00000000 },
78 { 0x419ac8, 1, 0x04, 0x00000000 },
79 { 0x419ab8, 1, 0x04, 0x000000e7 },
80 { 0x419abc, 2, 0x04, 0x00000000 },
81 { 0x41980c, 2, 0x04, 0x00000000 },
82 { 0x419814, 1, 0x04, 0x00000004 }, 76 { 0x419814, 1, 0x04, 0x00000004 },
83 { 0x419844, 1, 0x04, 0x00000000 }, 77 { 0x419844, 1, 0x04, 0x00000000 },
84 { 0x41984c, 1, 0x04, 0x00005bc5 }, 78 { 0x41984c, 1, 0x04, 0x00005bc5 },
85 { 0x419850, 4, 0x04, 0x00000000 }, 79 { 0x419850, 4, 0x04, 0x00000000 },
86 { 0x419880, 1, 0x04, 0x00000002 }, 80 { 0x419880, 1, 0x04, 0x00000002 },
87 { 0x419c98, 1, 0x04, 0x00000000 },
88 { 0x419ca8, 1, 0x04, 0x80000000 },
89 { 0x419cb4, 1, 0x04, 0x00000000 },
90 { 0x419cb8, 1, 0x04, 0x00008bf4 },
91 { 0x419cbc, 1, 0x04, 0x28137606 },
92 { 0x419cc0, 2, 0x04, 0x00000000 },
93 { 0x419bd4, 1, 0x04, 0x00800000 },
94 { 0x419bdc, 1, 0x04, 0x00000000 },
95 { 0x419d2c, 1, 0x04, 0x00000000 },
96 { 0x419c0c, 1, 0x04, 0x00000000 },
97 { 0x419e00, 1, 0x04, 0x00000000 },
98 { 0x419ea0, 1, 0x04, 0x00000000 },
99 { 0x419ea4, 1, 0x04, 0x00000100 },
100 { 0x419ea8, 1, 0x04, 0x00001100 },
101 { 0x419eac, 1, 0x04, 0x11100702 },
102 { 0x419eb0, 1, 0x04, 0x00000003 },
103 { 0x419eb4, 4, 0x04, 0x00000000 },
104 { 0x419ec8, 1, 0x04, 0x0e063818 },
105 { 0x419ecc, 1, 0x04, 0x0e060e06 },
106 { 0x419ed0, 1, 0x04, 0x00003818 },
107 { 0x419ed4, 1, 0x04, 0x011104f1 },
108 { 0x419edc, 1, 0x04, 0x00000000 },
109 { 0x419f00, 1, 0x04, 0x00000000 },
110 { 0x419f2c, 1, 0x04, 0x00000000 },
111 {} 81 {}
112}; 82};
113 83
114struct nvc0_graph_init * 84static const struct nvc0_graph_pack
115nvc1_graph_init_mmio[] = { 85nvc1_graph_pack_mmio[] = {
116 nvc0_graph_init_regs, 86 { nvc0_graph_init_main_0 },
117 nvc0_graph_init_unk40xx, 87 { nvc0_graph_init_fe_0 },
118 nvc0_graph_init_unk44xx, 88 { nvc0_graph_init_pri_0 },
119 nvc0_graph_init_unk78xx, 89 { nvc0_graph_init_rstr2d_0 },
120 nvc0_graph_init_unk60xx, 90 { nvc0_graph_init_pd_0 },
121 nvc3_graph_init_unk58xx, 91 { nvc4_graph_init_ds_0 },
122 nvc0_graph_init_unk80xx, 92 { nvc0_graph_init_scc_0 },
123 nvc1_graph_init_gpc, 93 { nvc0_graph_init_prop_0 },
124 nvc1_graph_init_tpc, 94 { nvc1_graph_init_gpc_unk_0 },
125 nvc0_graph_init_unk88xx, 95 { nvc0_graph_init_setup_0 },
126 nvc0_graph_tpc_0, 96 { nvc0_graph_init_crstr_0 },
127 NULL 97 { nvc1_graph_init_setup_1 },
98 { nvc0_graph_init_zcull_0 },
99 { nvc0_graph_init_gpm_0 },
100 { nvc1_graph_init_gpc_unk_1 },
101 { nvc0_graph_init_gcc_0 },
102 { nvc0_graph_init_tpccs_0 },
103 { nvc4_graph_init_tex_0 },
104 { nvc1_graph_init_pe_0 },
105 { nvc0_graph_init_l1c_0 },
106 { nvc0_graph_init_wwdx_0 },
107 { nvc0_graph_init_tpccs_1 },
108 { nvc0_graph_init_mpc_0 },
109 { nvc4_graph_init_sm_0 },
110 { nvc0_graph_init_be_0 },
111 { nvc0_graph_init_fe_1 },
112 {}
128}; 113};
129 114
115/*******************************************************************************
116 * PGRAPH engine/subdev functions
117 ******************************************************************************/
118
130struct nouveau_oclass * 119struct nouveau_oclass *
131nvc1_graph_oclass = &(struct nvc0_graph_oclass) { 120nvc1_graph_oclass = &(struct nvc0_graph_oclass) {
132 .base.handle = NV_ENGINE(GR, 0xc1), 121 .base.handle = NV_ENGINE(GR, 0xc1),
@@ -138,7 +127,7 @@ nvc1_graph_oclass = &(struct nvc0_graph_oclass) {
138 }, 127 },
139 .cclass = &nvc1_grctx_oclass, 128 .cclass = &nvc1_grctx_oclass,
140 .sclass = nvc1_graph_sclass, 129 .sclass = nvc1_graph_sclass,
141 .mmio = nvc1_graph_init_mmio, 130 .mmio = nvc1_graph_pack_mmio,
142 .fecs.ucode = &nvc0_graph_fecs_ucode, 131 .fecs.ucode = &nvc0_graph_fecs_ucode,
143 .gpccs.ucode = &nvc0_graph_gpccs_ucode, 132 .gpccs.ucode = &nvc0_graph_gpccs_ucode,
144}.base; 133}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc3.c b/drivers/gpu/drm/nouveau/core/engine/graph/nvc4.c
index d44b3b3ee800..e82e70c53132 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc3.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc4.c
@@ -23,13 +23,14 @@
23 */ 23 */
24 24
25#include "nvc0.h" 25#include "nvc0.h"
26#include "ctxnvc0.h"
26 27
27/******************************************************************************* 28/*******************************************************************************
28 * PGRAPH engine/subdev functions 29 * PGRAPH register lists
29 ******************************************************************************/ 30 ******************************************************************************/
30 31
31struct nvc0_graph_init 32const struct nvc0_graph_init
32nvc3_graph_init_unk58xx[] = { 33nvc4_graph_init_ds_0[] = {
33 { 0x405844, 1, 0x04, 0x00ffffff }, 34 { 0x405844, 1, 0x04, 0x00ffffff },
34 { 0x405850, 1, 0x04, 0x00000000 }, 35 { 0x405850, 1, 0x04, 0x00000000 },
35 { 0x405900, 1, 0x04, 0x00002834 }, 36 { 0x405900, 1, 0x04, 0x00002834 },
@@ -37,29 +38,27 @@ nvc3_graph_init_unk58xx[] = {
37 {} 38 {}
38}; 39};
39 40
40static struct nvc0_graph_init 41const struct nvc0_graph_init
41nvc3_graph_init_tpc[] = { 42nvc4_graph_init_tex_0[] = {
42 { 0x419d08, 2, 0x04, 0x00000000 },
43 { 0x419d10, 1, 0x04, 0x00000014 },
44 { 0x419ab0, 1, 0x04, 0x00000000 }, 43 { 0x419ab0, 1, 0x04, 0x00000000 },
45 { 0x419ac8, 1, 0x04, 0x00000000 }, 44 { 0x419ac8, 1, 0x04, 0x00000000 },
46 { 0x419ab8, 1, 0x04, 0x000000e7 }, 45 { 0x419ab8, 1, 0x04, 0x000000e7 },
47 { 0x419abc, 2, 0x04, 0x00000000 }, 46 { 0x419abc, 2, 0x04, 0x00000000 },
47 {}
48};
49
50static const struct nvc0_graph_init
51nvc4_graph_init_pe_0[] = {
48 { 0x41980c, 3, 0x04, 0x00000000 }, 52 { 0x41980c, 3, 0x04, 0x00000000 },
49 { 0x419844, 1, 0x04, 0x00000000 }, 53 { 0x419844, 1, 0x04, 0x00000000 },
50 { 0x41984c, 1, 0x04, 0x00005bc5 }, 54 { 0x41984c, 1, 0x04, 0x00005bc5 },
51 { 0x419850, 4, 0x04, 0x00000000 }, 55 { 0x419850, 4, 0x04, 0x00000000 },
52 { 0x419880, 1, 0x04, 0x00000002 }, 56 { 0x419880, 1, 0x04, 0x00000002 },
53 { 0x419c98, 1, 0x04, 0x00000000 }, 57 {}
54 { 0x419ca8, 1, 0x04, 0x80000000 }, 58};
55 { 0x419cb4, 1, 0x04, 0x00000000 }, 59
56 { 0x419cb8, 1, 0x04, 0x00008bf4 }, 60const struct nvc0_graph_init
57 { 0x419cbc, 1, 0x04, 0x28137606 }, 61nvc4_graph_init_sm_0[] = {
58 { 0x419cc0, 2, 0x04, 0x00000000 },
59 { 0x419bd4, 1, 0x04, 0x00800000 },
60 { 0x419bdc, 1, 0x04, 0x00000000 },
61 { 0x419d2c, 1, 0x04, 0x00000000 },
62 { 0x419c0c, 1, 0x04, 0x00000000 },
63 { 0x419e00, 1, 0x04, 0x00000000 }, 62 { 0x419e00, 1, 0x04, 0x00000000 },
64 { 0x419ea0, 1, 0x04, 0x00000000 }, 63 { 0x419ea0, 1, 0x04, 0x00000000 },
65 { 0x419ea4, 1, 0x04, 0x00000100 }, 64 { 0x419ea4, 1, 0x04, 0x00000100 },
@@ -77,24 +76,43 @@ nvc3_graph_init_tpc[] = {
77 {} 76 {}
78}; 77};
79 78
80static struct nvc0_graph_init * 79static const struct nvc0_graph_pack
81nvc3_graph_init_mmio[] = { 80nvc4_graph_pack_mmio[] = {
82 nvc0_graph_init_regs, 81 { nvc0_graph_init_main_0 },
83 nvc0_graph_init_unk40xx, 82 { nvc0_graph_init_fe_0 },
84 nvc0_graph_init_unk44xx, 83 { nvc0_graph_init_pri_0 },
85 nvc0_graph_init_unk78xx, 84 { nvc0_graph_init_rstr2d_0 },
86 nvc0_graph_init_unk60xx, 85 { nvc0_graph_init_pd_0 },
87 nvc3_graph_init_unk58xx, 86 { nvc4_graph_init_ds_0 },
88 nvc0_graph_init_unk80xx, 87 { nvc0_graph_init_scc_0 },
89 nvc0_graph_init_gpc, 88 { nvc0_graph_init_prop_0 },
90 nvc3_graph_init_tpc, 89 { nvc0_graph_init_gpc_unk_0 },
91 nvc0_graph_init_unk88xx, 90 { nvc0_graph_init_setup_0 },
92 nvc0_graph_tpc_0, 91 { nvc0_graph_init_crstr_0 },
93 NULL 92 { nvc0_graph_init_setup_1 },
93 { nvc0_graph_init_zcull_0 },
94 { nvc0_graph_init_gpm_0 },
95 { nvc0_graph_init_gpc_unk_1 },
96 { nvc0_graph_init_gcc_0 },
97 { nvc0_graph_init_tpccs_0 },
98 { nvc4_graph_init_tex_0 },
99 { nvc4_graph_init_pe_0 },
100 { nvc0_graph_init_l1c_0 },
101 { nvc0_graph_init_wwdx_0 },
102 { nvc0_graph_init_tpccs_1 },
103 { nvc0_graph_init_mpc_0 },
104 { nvc4_graph_init_sm_0 },
105 { nvc0_graph_init_be_0 },
106 { nvc0_graph_init_fe_1 },
107 {}
94}; 108};
95 109
110/*******************************************************************************
111 * PGRAPH engine/subdev functions
112 ******************************************************************************/
113
96struct nouveau_oclass * 114struct nouveau_oclass *
97nvc3_graph_oclass = &(struct nvc0_graph_oclass) { 115nvc4_graph_oclass = &(struct nvc0_graph_oclass) {
98 .base.handle = NV_ENGINE(GR, 0xc3), 116 .base.handle = NV_ENGINE(GR, 0xc3),
99 .base.ofuncs = &(struct nouveau_ofuncs) { 117 .base.ofuncs = &(struct nouveau_ofuncs) {
100 .ctor = nvc0_graph_ctor, 118 .ctor = nvc0_graph_ctor,
@@ -102,9 +120,9 @@ nvc3_graph_oclass = &(struct nvc0_graph_oclass) {
102 .init = nvc0_graph_init, 120 .init = nvc0_graph_init,
103 .fini = _nouveau_graph_fini, 121 .fini = _nouveau_graph_fini,
104 }, 122 },
105 .cclass = &nvc3_grctx_oclass, 123 .cclass = &nvc4_grctx_oclass,
106 .sclass = nvc0_graph_sclass, 124 .sclass = nvc0_graph_sclass,
107 .mmio = nvc3_graph_init_mmio, 125 .mmio = nvc4_graph_pack_mmio,
108 .fecs.ucode = &nvc0_graph_fecs_ucode, 126 .fecs.ucode = &nvc0_graph_fecs_ucode,
109 .gpccs.ucode = &nvc0_graph_gpccs_ucode, 127 .gpccs.ucode = &nvc0_graph_gpccs_ucode,
110}.base; 128}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc8.c b/drivers/gpu/drm/nouveau/core/engine/graph/nvc8.c
index 02845e567314..a6bf783e1256 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc8.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc8.c
@@ -23,6 +23,7 @@
23 */ 23 */
24 24
25#include "nvc0.h" 25#include "nvc0.h"
26#include "ctxnvc0.h"
26 27
27/******************************************************************************* 28/*******************************************************************************
28 * Graphics object classes 29 * Graphics object classes
@@ -40,58 +41,11 @@ nvc8_graph_sclass[] = {
40}; 41};
41 42
42/******************************************************************************* 43/*******************************************************************************
43 * PGRAPH engine/subdev functions 44 * PGRAPH register lists
44 ******************************************************************************/ 45 ******************************************************************************/
45 46
46static struct nvc0_graph_init 47static const struct nvc0_graph_init
47nvc8_graph_init_gpc[] = { 48nvc8_graph_init_sm_0[] = {
48 { 0x4184a0, 1, 0x04, 0x00000000 },
49 { 0x418604, 1, 0x04, 0x00000000 },
50 { 0x418680, 1, 0x04, 0x00000000 },
51 { 0x418714, 1, 0x04, 0x80000000 },
52 { 0x418384, 1, 0x04, 0x00000000 },
53 { 0x418814, 3, 0x04, 0x00000000 },
54 { 0x418b04, 1, 0x04, 0x00000000 },
55 { 0x4188c8, 2, 0x04, 0x00000000 },
56 { 0x4188d0, 1, 0x04, 0x00010000 },
57 { 0x4188d4, 1, 0x04, 0x00000001 },
58 { 0x418910, 1, 0x04, 0x00010001 },
59 { 0x418914, 1, 0x04, 0x00000301 },
60 { 0x418918, 1, 0x04, 0x00800000 },
61 { 0x418980, 1, 0x04, 0x77777770 },
62 { 0x418984, 3, 0x04, 0x77777777 },
63 { 0x418c04, 1, 0x04, 0x00000000 },
64 { 0x418c88, 1, 0x04, 0x00000000 },
65 { 0x418d00, 1, 0x04, 0x00000000 },
66 { 0x418f08, 1, 0x04, 0x00000000 },
67 { 0x418e00, 1, 0x04, 0x00000050 },
68 { 0x418e08, 1, 0x04, 0x00000000 },
69 { 0x41900c, 1, 0x04, 0x00000000 },
70 { 0x419018, 1, 0x04, 0x00000000 },
71 {}
72};
73
74static struct nvc0_graph_init
75nvc8_graph_init_tpc[] = {
76 { 0x419d08, 2, 0x04, 0x00000000 },
77 { 0x419d10, 1, 0x04, 0x00000014 },
78 { 0x419ab0, 1, 0x04, 0x00000000 },
79 { 0x419ab8, 1, 0x04, 0x000000e7 },
80 { 0x419abc, 2, 0x04, 0x00000000 },
81 { 0x41980c, 3, 0x04, 0x00000000 },
82 { 0x419844, 1, 0x04, 0x00000000 },
83 { 0x41984c, 1, 0x04, 0x00005bc5 },
84 { 0x419850, 4, 0x04, 0x00000000 },
85 { 0x419c98, 1, 0x04, 0x00000000 },
86 { 0x419ca8, 1, 0x04, 0x80000000 },
87 { 0x419cb4, 1, 0x04, 0x00000000 },
88 { 0x419cb8, 1, 0x04, 0x00008bf4 },
89 { 0x419cbc, 1, 0x04, 0x28137606 },
90 { 0x419cc0, 2, 0x04, 0x00000000 },
91 { 0x419bd4, 1, 0x04, 0x00800000 },
92 { 0x419bdc, 1, 0x04, 0x00000000 },
93 { 0x419d2c, 1, 0x04, 0x00000000 },
94 { 0x419c0c, 1, 0x04, 0x00000000 },
95 { 0x419e00, 1, 0x04, 0x00000000 }, 49 { 0x419e00, 1, 0x04, 0x00000000 },
96 { 0x419ea0, 1, 0x04, 0x00000000 }, 50 { 0x419ea0, 1, 0x04, 0x00000000 },
97 { 0x419ea4, 1, 0x04, 0x00000100 }, 51 { 0x419ea4, 1, 0x04, 0x00000100 },
@@ -108,22 +62,42 @@ nvc8_graph_init_tpc[] = {
108 {} 62 {}
109}; 63};
110 64
111static struct nvc0_graph_init * 65static const struct nvc0_graph_pack
112nvc8_graph_init_mmio[] = { 66nvc8_graph_pack_mmio[] = {
113 nvc0_graph_init_regs, 67 { nvc0_graph_init_main_0 },
114 nvc0_graph_init_unk40xx, 68 { nvc0_graph_init_fe_0 },
115 nvc0_graph_init_unk44xx, 69 { nvc0_graph_init_pri_0 },
116 nvc0_graph_init_unk78xx, 70 { nvc0_graph_init_rstr2d_0 },
117 nvc0_graph_init_unk60xx, 71 { nvc0_graph_init_pd_0 },
118 nvc0_graph_init_unk58xx, 72 { nvc0_graph_init_ds_0 },
119 nvc0_graph_init_unk80xx, 73 { nvc0_graph_init_scc_0 },
120 nvc8_graph_init_gpc, 74 { nvc0_graph_init_prop_0 },
121 nvc8_graph_init_tpc, 75 { nvc0_graph_init_gpc_unk_0 },
122 nvc0_graph_init_unk88xx, 76 { nvc0_graph_init_setup_0 },
123 nvc0_graph_tpc_0, 77 { nvc0_graph_init_crstr_0 },
124 NULL 78 { nvc1_graph_init_setup_1 },
79 { nvc0_graph_init_zcull_0 },
80 { nvc0_graph_init_gpm_0 },
81 { nvc0_graph_init_gpc_unk_1 },
82 { nvc0_graph_init_gcc_0 },
83 { nvc0_graph_init_tpccs_0 },
84 { nvc0_graph_init_tex_0 },
85 { nvc0_graph_init_pe_0 },
86 { nvc0_graph_init_l1c_0 },
87 { nvc0_graph_init_wwdx_0 },
88 { nvc0_graph_init_tpccs_1 },
89 { nvc0_graph_init_mpc_0 },
90 { nvc8_graph_init_sm_0 },
91 { nvc0_graph_init_be_0 },
92 { nvc0_graph_init_fe_1 },
93 { nvc0_graph_init_pe_1 },
94 {}
125}; 95};
126 96
97/*******************************************************************************
98 * PGRAPH engine/subdev functions
99 ******************************************************************************/
100
127struct nouveau_oclass * 101struct nouveau_oclass *
128nvc8_graph_oclass = &(struct nvc0_graph_oclass) { 102nvc8_graph_oclass = &(struct nvc0_graph_oclass) {
129 .base.handle = NV_ENGINE(GR, 0xc8), 103 .base.handle = NV_ENGINE(GR, 0xc8),
@@ -135,7 +109,7 @@ nvc8_graph_oclass = &(struct nvc0_graph_oclass) {
135 }, 109 },
136 .cclass = &nvc8_grctx_oclass, 110 .cclass = &nvc8_grctx_oclass,
137 .sclass = nvc8_graph_sclass, 111 .sclass = nvc8_graph_sclass,
138 .mmio = nvc8_graph_init_mmio, 112 .mmio = nvc8_graph_pack_mmio,
139 .fecs.ucode = &nvc0_graph_fecs_ucode, 113 .fecs.ucode = &nvc0_graph_fecs_ucode,
140 .gpccs.ucode = &nvc0_graph_gpccs_ucode, 114 .gpccs.ucode = &nvc0_graph_gpccs_ucode,
141}.base; 115}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvd7.c b/drivers/gpu/drm/nouveau/core/engine/graph/nvd7.c
index 5052d7ab4d72..2a6a94e2a041 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvd7.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvd7.c
@@ -23,6 +23,77 @@
23 */ 23 */
24 24
25#include "nvc0.h" 25#include "nvc0.h"
26#include "ctxnvc0.h"
27
28/*******************************************************************************
29 * PGRAPH register lists
30 ******************************************************************************/
31
32static const struct nvc0_graph_init
33nvd7_graph_init_pe_0[] = {
34 { 0x41980c, 1, 0x04, 0x00000010 },
35 { 0x419844, 1, 0x04, 0x00000000 },
36 { 0x41984c, 1, 0x04, 0x00005bc8 },
37 { 0x419850, 3, 0x04, 0x00000000 },
38 {}
39};
40
41const struct nvc0_graph_init
42nvd7_graph_init_pes_0[] = {
43 { 0x41be04, 1, 0x04, 0x00000000 },
44 { 0x41be08, 1, 0x04, 0x00000004 },
45 { 0x41be0c, 1, 0x04, 0x00000000 },
46 { 0x41be10, 1, 0x04, 0x003b8bc7 },
47 { 0x41be14, 2, 0x04, 0x00000000 },
48 {}
49};
50
51const struct nvc0_graph_init
52nvd7_graph_init_wwdx_0[] = {
53 { 0x41bfd4, 1, 0x04, 0x00800000 },
54 { 0x41bfdc, 1, 0x04, 0x00000000 },
55 { 0x41bff8, 2, 0x04, 0x00000000 },
56 {}
57};
58
59const struct nvc0_graph_init
60nvd7_graph_init_cbm_0[] = {
61 { 0x41becc, 1, 0x04, 0x00000000 },
62 { 0x41bee8, 2, 0x04, 0x00000000 },
63 {}
64};
65
66static const struct nvc0_graph_pack
67nvd7_graph_pack_mmio[] = {
68 { nvc0_graph_init_main_0 },
69 { nvc0_graph_init_fe_0 },
70 { nvc0_graph_init_pri_0 },
71 { nvc0_graph_init_rstr2d_0 },
72 { nvd9_graph_init_pd_0 },
73 { nvd9_graph_init_ds_0 },
74 { nvc0_graph_init_scc_0 },
75 { nvd9_graph_init_prop_0 },
76 { nvc1_graph_init_gpc_unk_0 },
77 { nvc0_graph_init_setup_0 },
78 { nvc0_graph_init_crstr_0 },
79 { nvc1_graph_init_setup_1 },
80 { nvc0_graph_init_zcull_0 },
81 { nvd9_graph_init_gpm_0 },
82 { nvd9_graph_init_gpc_unk_1 },
83 { nvc0_graph_init_gcc_0 },
84 { nvc0_graph_init_tpccs_0 },
85 { nvd9_graph_init_tex_0 },
86 { nvd7_graph_init_pe_0 },
87 { nvc0_graph_init_l1c_0 },
88 { nvc0_graph_init_mpc_0 },
89 { nvd9_graph_init_sm_0 },
90 { nvd7_graph_init_pes_0 },
91 { nvd7_graph_init_wwdx_0 },
92 { nvd7_graph_init_cbm_0 },
93 { nvc0_graph_init_be_0 },
94 { nvd9_graph_init_fe_1 },
95 {}
96};
26 97
27/******************************************************************************* 98/*******************************************************************************
28 * PGRAPH engine/subdev functions 99 * PGRAPH engine/subdev functions
@@ -48,108 +119,6 @@ nvd7_graph_gpccs_ucode = {
48 .data.size = sizeof(nvd7_grgpc_data), 119 .data.size = sizeof(nvd7_grgpc_data),
49}; 120};
50 121
51static struct nvc0_graph_init
52nvd7_graph_init_gpc[] = {
53 { 0x418408, 1, 0x04, 0x00000000 },
54 { 0x4184a0, 1, 0x04, 0x00000000 },
55 { 0x4184a4, 2, 0x04, 0x00000000 },
56 { 0x418604, 1, 0x04, 0x00000000 },
57 { 0x418680, 1, 0x04, 0x00000000 },
58 { 0x418714, 1, 0x04, 0x00000000 },
59 { 0x418384, 1, 0x04, 0x00000000 },
60 { 0x418814, 3, 0x04, 0x00000000 },
61 { 0x418b04, 1, 0x04, 0x00000000 },
62 { 0x4188c8, 2, 0x04, 0x00000000 },
63 { 0x4188d0, 1, 0x04, 0x00010000 },
64 { 0x4188d4, 1, 0x04, 0x00000001 },
65 { 0x418910, 1, 0x04, 0x00010001 },
66 { 0x418914, 1, 0x04, 0x00000301 },
67 { 0x418918, 1, 0x04, 0x00800000 },
68 { 0x418980, 1, 0x04, 0x77777770 },
69 { 0x418984, 3, 0x04, 0x77777777 },
70 { 0x418c04, 1, 0x04, 0x00000000 },
71 { 0x418c64, 1, 0x04, 0x00000000 },
72 { 0x418c68, 1, 0x04, 0x00000000 },
73 { 0x418c88, 1, 0x04, 0x00000000 },
74 { 0x418cb4, 2, 0x04, 0x00000000 },
75 { 0x418d00, 1, 0x04, 0x00000000 },
76 { 0x418d28, 1, 0x04, 0x00000000 },
77 { 0x418f00, 1, 0x04, 0x00000000 },
78 { 0x418f08, 1, 0x04, 0x00000000 },
79 { 0x418f20, 2, 0x04, 0x00000000 },
80 { 0x418e00, 1, 0x04, 0x00000003 },
81 { 0x418e08, 1, 0x04, 0x00000000 },
82 { 0x418e1c, 1, 0x04, 0x00000000 },
83 { 0x418e20, 1, 0x04, 0x00000000 },
84 { 0x41900c, 1, 0x04, 0x00000000 },
85 { 0x419018, 1, 0x04, 0x00000000 },
86 {}
87};
88
89static struct nvc0_graph_init
90nvd7_graph_init_tpc[] = {
91 { 0x419d08, 2, 0x04, 0x00000000 },
92 { 0x419d10, 1, 0x04, 0x00000014 },
93 { 0x419ab0, 1, 0x04, 0x00000000 },
94 { 0x419ac8, 1, 0x04, 0x00000000 },
95 { 0x419ab8, 1, 0x04, 0x000000e7 },
96 { 0x419abc, 2, 0x04, 0x00000000 },
97 { 0x419ab4, 1, 0x04, 0x00000000 },
98 { 0x41980c, 1, 0x04, 0x00000010 },
99 { 0x419844, 1, 0x04, 0x00000000 },
100 { 0x41984c, 1, 0x04, 0x00005bc8 },
101 { 0x419850, 2, 0x04, 0x00000000 },
102 { 0x419c98, 1, 0x04, 0x00000000 },
103 { 0x419ca8, 1, 0x04, 0x80000000 },
104 { 0x419cb4, 1, 0x04, 0x00000000 },
105 { 0x419cb8, 1, 0x04, 0x00008bf4 },
106 { 0x419cbc, 1, 0x04, 0x28137606 },
107 { 0x419cc0, 2, 0x04, 0x00000000 },
108 { 0x419c0c, 1, 0x04, 0x00000000 },
109 { 0x419e00, 1, 0x04, 0x00000000 },
110 { 0x419ea0, 1, 0x04, 0x00000000 },
111 { 0x419ea4, 1, 0x04, 0x00000100 },
112 { 0x419ea8, 1, 0x04, 0x02001100 },
113 { 0x419eac, 1, 0x04, 0x11100702 },
114 { 0x419eb0, 1, 0x04, 0x00000003 },
115 { 0x419eb4, 4, 0x04, 0x00000000 },
116 { 0x419ec8, 1, 0x04, 0x0e063818 },
117 { 0x419ecc, 1, 0x04, 0x0e060e06 },
118 { 0x419ed0, 1, 0x04, 0x00003818 },
119 { 0x419ed4, 1, 0x04, 0x011104f1 },
120 { 0x419edc, 1, 0x04, 0x00000000 },
121 { 0x419f00, 1, 0x04, 0x00000000 },
122 { 0x419f2c, 1, 0x04, 0x00000000 },
123 {}
124};
125
126static struct nvc0_graph_init
127nvd7_graph_init_tpc_0[] = {
128 { 0x40402c, 1, 0x04, 0x00000000 },
129 { 0x4040f0, 1, 0x04, 0x00000000 },
130 { 0x404174, 1, 0x04, 0x00000000 },
131 { 0x503018, 1, 0x04, 0x00000001 },
132 {}
133};
134
135static struct nvc0_graph_init *
136nvd7_graph_init_mmio[] = {
137 nvc0_graph_init_regs,
138 nvc0_graph_init_unk40xx,
139 nvc0_graph_init_unk44xx,
140 nvc0_graph_init_unk78xx,
141 nvc0_graph_init_unk60xx,
142 nvd9_graph_init_unk64xx,
143 nvd9_graph_init_unk58xx,
144 nvc0_graph_init_unk80xx,
145 nvd7_graph_init_gpc,
146 nvd7_graph_init_tpc,
147 nve4_graph_init_unk,
148 nvc0_graph_init_unk88xx,
149 nvd7_graph_init_tpc_0,
150 NULL
151};
152
153struct nouveau_oclass * 122struct nouveau_oclass *
154nvd7_graph_oclass = &(struct nvc0_graph_oclass) { 123nvd7_graph_oclass = &(struct nvc0_graph_oclass) {
155 .base.handle = NV_ENGINE(GR, 0xd7), 124 .base.handle = NV_ENGINE(GR, 0xd7),
@@ -161,7 +130,7 @@ nvd7_graph_oclass = &(struct nvc0_graph_oclass) {
161 }, 130 },
162 .cclass = &nvd7_grctx_oclass, 131 .cclass = &nvd7_grctx_oclass,
163 .sclass = nvc8_graph_sclass, 132 .sclass = nvc8_graph_sclass,
164 .mmio = nvd7_graph_init_mmio, 133 .mmio = nvd7_graph_pack_mmio,
165 .fecs.ucode = &nvd7_graph_fecs_ucode, 134 .fecs.ucode = &nvd7_graph_fecs_ucode,
166 .gpccs.ucode = &nvd7_graph_gpccs_ucode, 135 .gpccs.ucode = &nvd7_graph_gpccs_ucode,
167}.base; 136}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvd9.c b/drivers/gpu/drm/nouveau/core/engine/graph/nvd9.c
index 652098e0df3f..00fdf202fb92 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvd9.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvd9.c
@@ -23,76 +23,70 @@
23 */ 23 */
24 24
25#include "nvc0.h" 25#include "nvc0.h"
26#include "ctxnvc0.h"
26 27
27/******************************************************************************* 28/*******************************************************************************
28 * PGRAPH engine/subdev functions 29 * PGRAPH register lists
29 ******************************************************************************/ 30 ******************************************************************************/
30 31
31struct nvc0_graph_init 32const struct nvc0_graph_init
32nvd9_graph_init_unk64xx[] = { 33nvd9_graph_init_pd_0[] = {
34 { 0x406024, 1, 0x04, 0x00000000 },
33 { 0x4064f0, 3, 0x04, 0x00000000 }, 35 { 0x4064f0, 3, 0x04, 0x00000000 },
34 {} 36 {}
35}; 37};
36 38
37struct nvc0_graph_init 39const struct nvc0_graph_init
38nvd9_graph_init_unk58xx[] = { 40nvd9_graph_init_ds_0[] = {
39 { 0x405844, 1, 0x04, 0x00ffffff }, 41 { 0x405844, 1, 0x04, 0x00ffffff },
40 { 0x405850, 1, 0x04, 0x00000000 }, 42 { 0x405850, 1, 0x04, 0x00000000 },
41 { 0x405900, 1, 0x04, 0x00002834 }, 43 { 0x405900, 1, 0x04, 0x00002834 },
42 { 0x405908, 1, 0x04, 0x00000000 }, 44 { 0x405908, 1, 0x04, 0x00000000 },
43 { 0x405928, 1, 0x04, 0x00000000 }, 45 { 0x405928, 2, 0x04, 0x00000000 },
44 { 0x40592c, 1, 0x04, 0x00000000 },
45 {} 46 {}
46}; 47};
47 48
48static struct nvc0_graph_init 49const struct nvc0_graph_init
49nvd9_graph_init_gpc[] = { 50nvd9_graph_init_prop_0[] = {
50 { 0x418408, 1, 0x04, 0x00000000 }, 51 { 0x418408, 1, 0x04, 0x00000000 },
51 { 0x4184a0, 1, 0x04, 0x00000000 }, 52 { 0x4184a0, 3, 0x04, 0x00000000 },
52 { 0x4184a4, 2, 0x04, 0x00000000 }, 53 {}
53 { 0x418604, 1, 0x04, 0x00000000 }, 54};
54 { 0x418680, 1, 0x04, 0x00000000 }, 55
55 { 0x418714, 1, 0x04, 0x00000000 }, 56const struct nvc0_graph_init
56 { 0x418384, 1, 0x04, 0x00000000 }, 57nvd9_graph_init_gpm_0[] = {
57 { 0x418814, 3, 0x04, 0x00000000 },
58 { 0x418b04, 1, 0x04, 0x00000000 },
59 { 0x4188c8, 2, 0x04, 0x00000000 },
60 { 0x4188d0, 1, 0x04, 0x00010000 },
61 { 0x4188d4, 1, 0x04, 0x00000001 },
62 { 0x418910, 1, 0x04, 0x00010001 },
63 { 0x418914, 1, 0x04, 0x00000301 },
64 { 0x418918, 1, 0x04, 0x00800000 },
65 { 0x418980, 1, 0x04, 0x77777770 },
66 { 0x418984, 3, 0x04, 0x77777777 },
67 { 0x418c04, 1, 0x04, 0x00000000 }, 58 { 0x418c04, 1, 0x04, 0x00000000 },
68 { 0x418c64, 1, 0x04, 0x00000000 }, 59 { 0x418c64, 2, 0x04, 0x00000000 },
69 { 0x418c68, 1, 0x04, 0x00000000 },
70 { 0x418c88, 1, 0x04, 0x00000000 }, 60 { 0x418c88, 1, 0x04, 0x00000000 },
71 { 0x418cb4, 2, 0x04, 0x00000000 }, 61 { 0x418cb4, 2, 0x04, 0x00000000 },
62 {}
63};
64
65const struct nvc0_graph_init
66nvd9_graph_init_gpc_unk_1[] = {
72 { 0x418d00, 1, 0x04, 0x00000000 }, 67 { 0x418d00, 1, 0x04, 0x00000000 },
73 { 0x418d28, 1, 0x04, 0x00000000 }, 68 { 0x418d28, 2, 0x04, 0x00000000 },
74 { 0x418d2c, 1, 0x04, 0x00000000 },
75 { 0x418f00, 1, 0x04, 0x00000000 }, 69 { 0x418f00, 1, 0x04, 0x00000000 },
76 { 0x418f08, 1, 0x04, 0x00000000 }, 70 { 0x418f08, 1, 0x04, 0x00000000 },
77 { 0x418f20, 2, 0x04, 0x00000000 }, 71 { 0x418f20, 2, 0x04, 0x00000000 },
78 { 0x418e00, 1, 0x04, 0x00000003 }, 72 { 0x418e00, 1, 0x04, 0x00000003 },
79 { 0x418e08, 1, 0x04, 0x00000000 }, 73 { 0x418e08, 1, 0x04, 0x00000000 },
80 { 0x418e1c, 1, 0x04, 0x00000000 }, 74 { 0x418e1c, 2, 0x04, 0x00000000 },
81 { 0x418e20, 1, 0x04, 0x00000000 },
82 { 0x41900c, 1, 0x04, 0x00000000 },
83 { 0x419018, 1, 0x04, 0x00000000 },
84 {} 75 {}
85}; 76};
86 77
87static struct nvc0_graph_init 78const struct nvc0_graph_init
88nvd9_graph_init_tpc[] = { 79nvd9_graph_init_tex_0[] = {
89 { 0x419d08, 2, 0x04, 0x00000000 },
90 { 0x419d10, 1, 0x04, 0x00000014 },
91 { 0x419ab0, 1, 0x04, 0x00000000 }, 80 { 0x419ab0, 1, 0x04, 0x00000000 },
92 { 0x419ac8, 1, 0x04, 0x00000000 }, 81 { 0x419ac8, 1, 0x04, 0x00000000 },
93 { 0x419ab8, 1, 0x04, 0x000000e7 }, 82 { 0x419ab8, 1, 0x04, 0x000000e7 },
94 { 0x419abc, 2, 0x04, 0x00000000 }, 83 { 0x419abc, 2, 0x04, 0x00000000 },
95 { 0x419ab4, 1, 0x04, 0x00000000 }, 84 { 0x419ab4, 1, 0x04, 0x00000000 },
85 {}
86};
87
88static const struct nvc0_graph_init
89nvd9_graph_init_pe_0[] = {
96 { 0x41980c, 1, 0x04, 0x00000010 }, 90 { 0x41980c, 1, 0x04, 0x00000010 },
97 { 0x419810, 1, 0x04, 0x00000000 }, 91 { 0x419810, 1, 0x04, 0x00000000 },
98 { 0x419814, 1, 0x04, 0x00000004 }, 92 { 0x419814, 1, 0x04, 0x00000004 },
@@ -100,20 +94,26 @@ nvd9_graph_init_tpc[] = {
100 { 0x41984c, 1, 0x04, 0x0000a918 }, 94 { 0x41984c, 1, 0x04, 0x0000a918 },
101 { 0x419850, 4, 0x04, 0x00000000 }, 95 { 0x419850, 4, 0x04, 0x00000000 },
102 { 0x419880, 1, 0x04, 0x00000002 }, 96 { 0x419880, 1, 0x04, 0x00000002 },
103 { 0x419c98, 1, 0x04, 0x00000000 }, 97 {}
104 { 0x419ca8, 1, 0x04, 0x80000000 }, 98};
105 { 0x419cb4, 1, 0x04, 0x00000000 }, 99
106 { 0x419cb8, 1, 0x04, 0x00008bf4 }, 100static const struct nvc0_graph_init
107 { 0x419cbc, 1, 0x04, 0x28137606 }, 101nvd9_graph_init_wwdx_0[] = {
108 { 0x419cc0, 2, 0x04, 0x00000000 },
109 { 0x419bd4, 1, 0x04, 0x00800000 }, 102 { 0x419bd4, 1, 0x04, 0x00800000 },
110 { 0x419bdc, 1, 0x04, 0x00000000 }, 103 { 0x419bdc, 1, 0x04, 0x00000000 },
111 { 0x419bf8, 1, 0x04, 0x00000000 }, 104 { 0x419bf8, 2, 0x04, 0x00000000 },
112 { 0x419bfc, 1, 0x04, 0x00000000 }, 105 {}
106};
107
108static const struct nvc0_graph_init
109nvd9_graph_init_tpccs_1[] = {
113 { 0x419d2c, 1, 0x04, 0x00000000 }, 110 { 0x419d2c, 1, 0x04, 0x00000000 },
114 { 0x419d48, 1, 0x04, 0x00000000 }, 111 { 0x419d48, 2, 0x04, 0x00000000 },
115 { 0x419d4c, 1, 0x04, 0x00000000 }, 112 {}
116 { 0x419c0c, 1, 0x04, 0x00000000 }, 113};
114
115const struct nvc0_graph_init
116nvd9_graph_init_sm_0[] = {
117 { 0x419e00, 1, 0x04, 0x00000000 }, 117 { 0x419e00, 1, 0x04, 0x00000000 },
118 { 0x419ea0, 1, 0x04, 0x00000000 }, 118 { 0x419ea0, 1, 0x04, 0x00000000 },
119 { 0x419ea4, 1, 0x04, 0x00000100 }, 119 { 0x419ea4, 1, 0x04, 0x00000100 },
@@ -131,23 +131,49 @@ nvd9_graph_init_tpc[] = {
131 {} 131 {}
132}; 132};
133 133
134static struct nvc0_graph_init * 134const struct nvc0_graph_init
135nvd9_graph_init_mmio[] = { 135nvd9_graph_init_fe_1[] = {
136 nvc0_graph_init_regs, 136 { 0x40402c, 1, 0x04, 0x00000000 },
137 nvc0_graph_init_unk40xx, 137 { 0x4040f0, 1, 0x04, 0x00000000 },
138 nvc0_graph_init_unk44xx, 138 { 0x404174, 1, 0x04, 0x00000000 },
139 nvc0_graph_init_unk78xx, 139 {}
140 nvc0_graph_init_unk60xx,
141 nvd9_graph_init_unk64xx,
142 nvd9_graph_init_unk58xx,
143 nvc0_graph_init_unk80xx,
144 nvd9_graph_init_gpc,
145 nvd9_graph_init_tpc,
146 nvc0_graph_init_unk88xx,
147 nvc0_graph_tpc_0,
148 NULL
149}; 140};
150 141
142static const struct nvc0_graph_pack
143nvd9_graph_pack_mmio[] = {
144 { nvc0_graph_init_main_0 },
145 { nvc0_graph_init_fe_0 },
146 { nvc0_graph_init_pri_0 },
147 { nvc0_graph_init_rstr2d_0 },
148 { nvd9_graph_init_pd_0 },
149 { nvd9_graph_init_ds_0 },
150 { nvc0_graph_init_scc_0 },
151 { nvd9_graph_init_prop_0 },
152 { nvc1_graph_init_gpc_unk_0 },
153 { nvc0_graph_init_setup_0 },
154 { nvc0_graph_init_crstr_0 },
155 { nvc1_graph_init_setup_1 },
156 { nvc0_graph_init_zcull_0 },
157 { nvd9_graph_init_gpm_0 },
158 { nvd9_graph_init_gpc_unk_1 },
159 { nvc0_graph_init_gcc_0 },
160 { nvc0_graph_init_tpccs_0 },
161 { nvd9_graph_init_tex_0 },
162 { nvd9_graph_init_pe_0 },
163 { nvc0_graph_init_l1c_0 },
164 { nvd9_graph_init_wwdx_0 },
165 { nvd9_graph_init_tpccs_1 },
166 { nvc0_graph_init_mpc_0 },
167 { nvd9_graph_init_sm_0 },
168 { nvc0_graph_init_be_0 },
169 { nvd9_graph_init_fe_1 },
170 {}
171};
172
173/*******************************************************************************
174 * PGRAPH engine/subdev functions
175 ******************************************************************************/
176
151struct nouveau_oclass * 177struct nouveau_oclass *
152nvd9_graph_oclass = &(struct nvc0_graph_oclass) { 178nvd9_graph_oclass = &(struct nvc0_graph_oclass) {
153 .base.handle = NV_ENGINE(GR, 0xd9), 179 .base.handle = NV_ENGINE(GR, 0xd9),
@@ -159,7 +185,7 @@ nvd9_graph_oclass = &(struct nvc0_graph_oclass) {
159 }, 185 },
160 .cclass = &nvd9_grctx_oclass, 186 .cclass = &nvd9_grctx_oclass,
161 .sclass = nvc8_graph_sclass, 187 .sclass = nvc8_graph_sclass,
162 .mmio = nvd9_graph_init_mmio, 188 .mmio = nvd9_graph_pack_mmio,
163 .fecs.ucode = &nvc0_graph_fecs_ucode, 189 .fecs.ucode = &nvc0_graph_fecs_ucode,
164 .gpccs.ucode = &nvc0_graph_gpccs_ucode, 190 .gpccs.ucode = &nvc0_graph_gpccs_ucode,
165}.base; 191}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nve4.c b/drivers/gpu/drm/nouveau/core/engine/graph/nve4.c
index 05ec09c88517..f7c011217175 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nve4.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nve4.c
@@ -23,6 +23,7 @@
23 */ 23 */
24 24
25#include "nvc0.h" 25#include "nvc0.h"
26#include "ctxnvc0.h"
26 27
27/******************************************************************************* 28/*******************************************************************************
28 * Graphics object classes 29 * Graphics object classes
@@ -38,11 +39,11 @@ nve4_graph_sclass[] = {
38}; 39};
39 40
40/******************************************************************************* 41/*******************************************************************************
41 * PGRAPH engine/subdev functions 42 * PGRAPH register lists
42 ******************************************************************************/ 43 ******************************************************************************/
43 44
44struct nvc0_graph_init 45const struct nvc0_graph_init
45nve4_graph_init_regs[] = { 46nve4_graph_init_main_0[] = {
46 { 0x400080, 1, 0x04, 0x003083c2 }, 47 { 0x400080, 1, 0x04, 0x003083c2 },
47 { 0x400088, 1, 0x04, 0x0001ffe7 }, 48 { 0x400088, 1, 0x04, 0x0001ffe7 },
48 { 0x40008c, 1, 0x04, 0x00000000 }, 49 { 0x40008c, 1, 0x04, 0x00000000 },
@@ -57,81 +58,59 @@ nve4_graph_init_regs[] = {
57 {} 58 {}
58}; 59};
59 60
60static struct nvc0_graph_init 61static const struct nvc0_graph_init
61nve4_graph_init_unk58xx[] = { 62nve4_graph_init_ds_0[] = {
62 { 0x405844, 1, 0x04, 0x00ffffff }, 63 { 0x405844, 1, 0x04, 0x00ffffff },
63 { 0x405850, 1, 0x04, 0x00000000 }, 64 { 0x405850, 1, 0x04, 0x00000000 },
64 { 0x405900, 1, 0x04, 0x0000ff34 }, 65 { 0x405900, 1, 0x04, 0x0000ff34 },
65 { 0x405908, 1, 0x04, 0x00000000 }, 66 { 0x405908, 1, 0x04, 0x00000000 },
66 { 0x405928, 1, 0x04, 0x00000000 }, 67 { 0x405928, 2, 0x04, 0x00000000 },
67 { 0x40592c, 1, 0x04, 0x00000000 },
68 {} 68 {}
69}; 69};
70 70
71static struct nvc0_graph_init 71static const struct nvc0_graph_init
72nve4_graph_init_unk70xx[] = { 72nve4_graph_init_sked_0[] = {
73 { 0x407010, 1, 0x04, 0x00000000 }, 73 { 0x407010, 1, 0x04, 0x00000000 },
74 {} 74 {}
75}; 75};
76 76
77struct nvc0_graph_init 77static const struct nvc0_graph_init
78nve4_graph_init_unk5bxx[] = { 78nve4_graph_init_cwd_0[] = {
79 { 0x405b50, 1, 0x04, 0x00000000 }, 79 { 0x405b50, 1, 0x04, 0x00000000 },
80 {} 80 {}
81}; 81};
82 82
83static struct nvc0_graph_init 83static const struct nvc0_graph_init
84nve4_graph_init_gpc[] = { 84nve4_graph_init_gpc_unk_1[] = {
85 { 0x418408, 1, 0x04, 0x00000000 },
86 { 0x4184a0, 1, 0x04, 0x00000000 },
87 { 0x4184a4, 2, 0x04, 0x00000000 },
88 { 0x418604, 1, 0x04, 0x00000000 },
89 { 0x418680, 1, 0x04, 0x00000000 },
90 { 0x418714, 1, 0x04, 0x00000000 },
91 { 0x418384, 1, 0x04, 0x00000000 },
92 { 0x418814, 3, 0x04, 0x00000000 },
93 { 0x418b04, 1, 0x04, 0x00000000 },
94 { 0x4188c8, 2, 0x04, 0x00000000 },
95 { 0x4188d0, 1, 0x04, 0x00010000 },
96 { 0x4188d4, 1, 0x04, 0x00000001 },
97 { 0x418910, 1, 0x04, 0x00010001 },
98 { 0x418914, 1, 0x04, 0x00000301 },
99 { 0x418918, 1, 0x04, 0x00800000 },
100 { 0x418980, 1, 0x04, 0x77777770 },
101 { 0x418984, 3, 0x04, 0x77777777 },
102 { 0x418c04, 1, 0x04, 0x00000000 },
103 { 0x418c64, 1, 0x04, 0x00000000 },
104 { 0x418c68, 1, 0x04, 0x00000000 },
105 { 0x418c88, 1, 0x04, 0x00000000 },
106 { 0x418cb4, 2, 0x04, 0x00000000 },
107 { 0x418d00, 1, 0x04, 0x00000000 }, 85 { 0x418d00, 1, 0x04, 0x00000000 },
108 { 0x418d28, 1, 0x04, 0x00000000 }, 86 { 0x418d28, 2, 0x04, 0x00000000 },
109 { 0x418d2c, 1, 0x04, 0x00000000 },
110 { 0x418f00, 1, 0x04, 0x00000000 }, 87 { 0x418f00, 1, 0x04, 0x00000000 },
111 { 0x418f08, 1, 0x04, 0x00000000 }, 88 { 0x418f08, 1, 0x04, 0x00000000 },
112 { 0x418f20, 2, 0x04, 0x00000000 }, 89 { 0x418f20, 2, 0x04, 0x00000000 },
113 { 0x418e00, 1, 0x04, 0x00000060 }, 90 { 0x418e00, 1, 0x04, 0x00000060 },
114 { 0x418e08, 1, 0x04, 0x00000000 }, 91 { 0x418e08, 1, 0x04, 0x00000000 },
115 { 0x418e1c, 1, 0x04, 0x00000000 }, 92 { 0x418e1c, 2, 0x04, 0x00000000 },
116 { 0x418e20, 1, 0x04, 0x00000000 },
117 { 0x41900c, 1, 0x04, 0x00000000 },
118 { 0x419018, 1, 0x04, 0x00000000 },
119 {} 93 {}
120}; 94};
121 95
122static struct nvc0_graph_init 96const struct nvc0_graph_init
123nve4_graph_init_tpc[] = { 97nve4_graph_init_tpccs_0[] = {
124 { 0x419d0c, 1, 0x04, 0x00000000 }, 98 { 0x419d0c, 1, 0x04, 0x00000000 },
125 { 0x419d10, 1, 0x04, 0x00000014 }, 99 { 0x419d10, 1, 0x04, 0x00000014 },
126 { 0x419ab0, 1, 0x04, 0x00000000 }, 100 {}
127 { 0x419ac8, 1, 0x04, 0x00000000 }, 101};
128 { 0x419ab8, 1, 0x04, 0x000000e7 }, 102
129 { 0x419abc, 2, 0x04, 0x00000000 }, 103const struct nvc0_graph_init
130 { 0x419ab4, 1, 0x04, 0x00000000 }, 104nve4_graph_init_pe_0[] = {
131 { 0x41980c, 1, 0x04, 0x00000010 }, 105 { 0x41980c, 1, 0x04, 0x00000010 },
132 { 0x419844, 1, 0x04, 0x00000000 }, 106 { 0x419844, 1, 0x04, 0x00000000 },
133 { 0x419850, 1, 0x04, 0x00000004 }, 107 { 0x419850, 1, 0x04, 0x00000004 },
134 { 0x419854, 2, 0x04, 0x00000000 }, 108 { 0x419854, 2, 0x04, 0x00000000 },
109 {}
110};
111
112static const struct nvc0_graph_init
113nve4_graph_init_l1c_0[] = {
135 { 0x419c98, 1, 0x04, 0x00000000 }, 114 { 0x419c98, 1, 0x04, 0x00000000 },
136 { 0x419ca8, 1, 0x04, 0x00000000 }, 115 { 0x419ca8, 1, 0x04, 0x00000000 },
137 { 0x419cb0, 1, 0x04, 0x01000000 }, 116 { 0x419cb0, 1, 0x04, 0x01000000 },
@@ -141,39 +120,25 @@ nve4_graph_init_tpc[] = {
141 { 0x419cbc, 1, 0x04, 0x28137646 }, 120 { 0x419cbc, 1, 0x04, 0x28137646 },
142 { 0x419cc0, 2, 0x04, 0x00000000 }, 121 { 0x419cc0, 2, 0x04, 0x00000000 },
143 { 0x419c80, 1, 0x04, 0x00020232 }, 122 { 0x419c80, 1, 0x04, 0x00020232 },
144 { 0x419c0c, 1, 0x04, 0x00000000 }, 123 {}
124};
125
126static const struct nvc0_graph_init
127nve4_graph_init_sm_0[] = {
145 { 0x419e00, 1, 0x04, 0x00000000 }, 128 { 0x419e00, 1, 0x04, 0x00000000 },
146 { 0x419ea0, 1, 0x04, 0x00000000 }, 129 { 0x419ea0, 1, 0x04, 0x00000000 },
147 { 0x419ee4, 1, 0x04, 0x00000000 }, 130 { 0x419ee4, 1, 0x04, 0x00000000 },
148 { 0x419ea4, 1, 0x04, 0x00000100 }, 131 { 0x419ea4, 1, 0x04, 0x00000100 },
149 { 0x419ea8, 1, 0x04, 0x00000000 }, 132 { 0x419ea8, 1, 0x04, 0x00000000 },
150 { 0x419eb4, 1, 0x04, 0x00000000 }, 133 { 0x419eb4, 4, 0x04, 0x00000000 },
151 { 0x419eb8, 3, 0x04, 0x00000000 },
152 { 0x419edc, 1, 0x04, 0x00000000 }, 134 { 0x419edc, 1, 0x04, 0x00000000 },
153 { 0x419f00, 1, 0x04, 0x00000000 }, 135 { 0x419f00, 1, 0x04, 0x00000000 },
154 { 0x419f74, 1, 0x04, 0x00000555 }, 136 { 0x419f74, 1, 0x04, 0x00000555 },
155 {} 137 {}
156}; 138};
157 139
158struct nvc0_graph_init 140const struct nvc0_graph_init
159nve4_graph_init_unk[] = { 141nve4_graph_init_be_0[] = {
160 { 0x41be04, 1, 0x04, 0x00000000 },
161 { 0x41be08, 1, 0x04, 0x00000004 },
162 { 0x41be0c, 1, 0x04, 0x00000000 },
163 { 0x41be10, 1, 0x04, 0x003b8bc7 },
164 { 0x41be14, 2, 0x04, 0x00000000 },
165 { 0x41bfd4, 1, 0x04, 0x00800000 },
166 { 0x41bfdc, 1, 0x04, 0x00000000 },
167 { 0x41bff8, 1, 0x04, 0x00000000 },
168 { 0x41bffc, 1, 0x04, 0x00000000 },
169 { 0x41becc, 1, 0x04, 0x00000000 },
170 { 0x41bee8, 1, 0x04, 0x00000000 },
171 { 0x41beec, 1, 0x04, 0x00000000 },
172 {}
173};
174
175struct nvc0_graph_init
176nve4_graph_init_unk88xx[] = {
177 { 0x40880c, 1, 0x04, 0x00000000 }, 142 { 0x40880c, 1, 0x04, 0x00000000 },
178 { 0x408850, 1, 0x04, 0x00000004 }, 143 { 0x408850, 1, 0x04, 0x00000004 },
179 { 0x408910, 9, 0x04, 0x00000000 }, 144 { 0x408910, 9, 0x04, 0x00000000 },
@@ -186,6 +151,67 @@ nve4_graph_init_unk88xx[] = {
186 {} 151 {}
187}; 152};
188 153
154static const struct nvc0_graph_pack
155nve4_graph_pack_mmio[] = {
156 { nve4_graph_init_main_0 },
157 { nvc0_graph_init_fe_0 },
158 { nvc0_graph_init_pri_0 },
159 { nvc0_graph_init_rstr2d_0 },
160 { nvd9_graph_init_pd_0 },
161 { nve4_graph_init_ds_0 },
162 { nvc0_graph_init_scc_0 },
163 { nve4_graph_init_sked_0 },
164 { nve4_graph_init_cwd_0 },
165 { nvd9_graph_init_prop_0 },
166 { nvc1_graph_init_gpc_unk_0 },
167 { nvc0_graph_init_setup_0 },
168 { nvc0_graph_init_crstr_0 },
169 { nvc1_graph_init_setup_1 },
170 { nvc0_graph_init_zcull_0 },
171 { nvd9_graph_init_gpm_0 },
172 { nve4_graph_init_gpc_unk_1 },
173 { nvc0_graph_init_gcc_0 },
174 { nve4_graph_init_tpccs_0 },
175 { nvd9_graph_init_tex_0 },
176 { nve4_graph_init_pe_0 },
177 { nve4_graph_init_l1c_0 },
178 { nvc0_graph_init_mpc_0 },
179 { nve4_graph_init_sm_0 },
180 { nvd7_graph_init_pes_0 },
181 { nvd7_graph_init_wwdx_0 },
182 { nvd7_graph_init_cbm_0 },
183 { nve4_graph_init_be_0 },
184 { nvc0_graph_init_fe_1 },
185 {}
186};
187
188/*******************************************************************************
189 * PGRAPH engine/subdev functions
190 ******************************************************************************/
191
192static int
193nve4_graph_fini(struct nouveau_object *object, bool suspend)
194{
195 struct nvc0_graph_priv *priv = (void *)object;
196
197 /*XXX: this is a nasty hack to power on gr on certain boards
198 * where it's disabled by therm, somehow. ideally it'd
199 * be nice to know when we should be doing this, and why,
200 * but, it's yet to be determined. for now we test for
201 * the particular mmio error that occurs in the situation,
202 * and then bash therm in the way nvidia do.
203 */
204 nv_mask(priv, 0x000200, 0x08001000, 0x08001000);
205 nv_rd32(priv, 0x000200);
206 if (nv_rd32(priv, 0x400700) == 0xbadf1000) {
207 nv_mask(priv, 0x000200, 0x08001000, 0x00000000);
208 nv_rd32(priv, 0x000200);
209 nv_mask(priv, 0x020004, 0xc0000000, 0x40000000);
210 }
211
212 return nouveau_graph_fini(&priv->base, suspend);
213}
214
189int 215int
190nve4_graph_init(struct nouveau_object *object) 216nve4_graph_init(struct nouveau_object *object)
191{ 217{
@@ -210,8 +236,7 @@ nve4_graph_init(struct nouveau_object *object)
210 nv_wr32(priv, GPC_BCAST(0x08b4), priv->unk4188b4->addr >> 8); 236 nv_wr32(priv, GPC_BCAST(0x08b4), priv->unk4188b4->addr >> 8);
211 nv_wr32(priv, GPC_BCAST(0x08b8), priv->unk4188b8->addr >> 8); 237 nv_wr32(priv, GPC_BCAST(0x08b8), priv->unk4188b8->addr >> 8);
212 238
213 for (i = 0; oclass->mmio[i]; i++) 239 nvc0_graph_mmio(priv, oclass->mmio);
214 nvc0_graph_mmio(priv, oclass->mmio[i]);
215 240
216 nv_wr32(priv, GPC_UNIT(0, 0x3018), 0x00000001); 241 nv_wr32(priv, GPC_UNIT(0, 0x3018), 0x00000001);
217 242
@@ -298,25 +323,6 @@ nve4_graph_init(struct nouveau_object *object)
298 return nvc0_graph_init_ctxctl(priv); 323 return nvc0_graph_init_ctxctl(priv);
299} 324}
300 325
301static struct nvc0_graph_init *
302nve4_graph_init_mmio[] = {
303 nve4_graph_init_regs,
304 nvc0_graph_init_unk40xx,
305 nvc0_graph_init_unk44xx,
306 nvc0_graph_init_unk78xx,
307 nvc0_graph_init_unk60xx,
308 nvd9_graph_init_unk64xx,
309 nve4_graph_init_unk58xx,
310 nvc0_graph_init_unk80xx,
311 nve4_graph_init_unk70xx,
312 nve4_graph_init_unk5bxx,
313 nve4_graph_init_gpc,
314 nve4_graph_init_tpc,
315 nve4_graph_init_unk,
316 nve4_graph_init_unk88xx,
317 NULL
318};
319
320#include "fuc/hubnve0.fuc.h" 326#include "fuc/hubnve0.fuc.h"
321 327
322static struct nvc0_graph_ucode 328static struct nvc0_graph_ucode
@@ -344,11 +350,11 @@ nve4_graph_oclass = &(struct nvc0_graph_oclass) {
344 .ctor = nvc0_graph_ctor, 350 .ctor = nvc0_graph_ctor,
345 .dtor = nvc0_graph_dtor, 351 .dtor = nvc0_graph_dtor,
346 .init = nve4_graph_init, 352 .init = nve4_graph_init,
347 .fini = _nouveau_graph_fini, 353 .fini = nve4_graph_fini,
348 }, 354 },
349 .cclass = &nve4_grctx_oclass, 355 .cclass = &nve4_grctx_oclass,
350 .sclass = nve4_graph_sclass, 356 .sclass = nve4_graph_sclass,
351 .mmio = nve4_graph_init_mmio, 357 .mmio = nve4_graph_pack_mmio,
352 .fecs.ucode = &nve4_graph_fecs_ucode, 358 .fecs.ucode = &nve4_graph_fecs_ucode,
353 .gpccs.ucode = &nve4_graph_gpccs_ucode, 359 .gpccs.ucode = &nve4_graph_gpccs_ucode,
354}.base; 360}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvf0.c b/drivers/gpu/drm/nouveau/core/engine/graph/nvf0.c
index b1acb9939d95..c96762122b9b 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvf0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvf0.c
@@ -23,6 +23,7 @@
23 */ 23 */
24 24
25#include "nvc0.h" 25#include "nvc0.h"
26#include "ctxnvc0.h"
26 27
27/******************************************************************************* 28/*******************************************************************************
28 * Graphics object classes 29 * Graphics object classes
@@ -38,86 +39,57 @@ nvf0_graph_sclass[] = {
38}; 39};
39 40
40/******************************************************************************* 41/*******************************************************************************
41 * PGRAPH engine/subdev functions 42 * PGRAPH register lists
42 ******************************************************************************/ 43 ******************************************************************************/
43 44
44struct nvc0_graph_init 45const struct nvc0_graph_init
45nvf0_graph_init_unk40xx[] = { 46nvf0_graph_init_fe_0[] = {
46 { 0x40415c, 1, 0x04, 0x00000000 }, 47 { 0x40415c, 1, 0x04, 0x00000000 },
47 { 0x404170, 1, 0x04, 0x00000000 }, 48 { 0x404170, 1, 0x04, 0x00000000 },
48 { 0x4041b4, 1, 0x04, 0x00000000 }, 49 { 0x4041b4, 1, 0x04, 0x00000000 },
49 {} 50 {}
50}; 51};
51 52
52static struct nvc0_graph_init 53static const struct nvc0_graph_init
53nvf0_graph_init_unk58xx[] = { 54nvf0_graph_init_ds_0[] = {
54 { 0x405844, 1, 0x04, 0x00ffffff }, 55 { 0x405844, 1, 0x04, 0x00ffffff },
55 { 0x405850, 1, 0x04, 0x00000000 }, 56 { 0x405850, 1, 0x04, 0x00000000 },
56 { 0x405900, 1, 0x04, 0x0000ff00 }, 57 { 0x405900, 1, 0x04, 0x0000ff00 },
57 { 0x405908, 1, 0x04, 0x00000000 }, 58 { 0x405908, 1, 0x04, 0x00000000 },
58 { 0x405928, 1, 0x04, 0x00000000 }, 59 { 0x405928, 2, 0x04, 0x00000000 },
59 { 0x40592c, 1, 0x04, 0x00000000 },
60 {} 60 {}
61}; 61};
62 62
63struct nvc0_graph_init 63const struct nvc0_graph_init
64nvf0_graph_init_unk70xx[] = { 64nvf0_graph_init_sked_0[] = {
65 { 0x407010, 1, 0x04, 0x00000000 }, 65 { 0x407010, 1, 0x04, 0x00000000 },
66 { 0x407040, 1, 0x04, 0x80440424 }, 66 { 0x407040, 1, 0x04, 0x80440424 },
67 { 0x407048, 1, 0x04, 0x0000000a }, 67 { 0x407048, 1, 0x04, 0x0000000a },
68 {} 68 {}
69}; 69};
70 70
71struct nvc0_graph_init 71const struct nvc0_graph_init
72nvf0_graph_init_unk5bxx[] = { 72nvf0_graph_init_cwd_0[] = {
73 { 0x405b44, 1, 0x04, 0x00000000 }, 73 { 0x405b44, 1, 0x04, 0x00000000 },
74 { 0x405b50, 1, 0x04, 0x00000000 }, 74 { 0x405b50, 1, 0x04, 0x00000000 },
75 {} 75 {}
76}; 76};
77 77
78static struct nvc0_graph_init 78const struct nvc0_graph_init
79nvf0_graph_init_gpc[] = { 79nvf0_graph_init_gpc_unk_1[] = {
80 { 0x418408, 1, 0x04, 0x00000000 },
81 { 0x4184a0, 1, 0x04, 0x00000000 },
82 { 0x4184a4, 2, 0x04, 0x00000000 },
83 { 0x418604, 1, 0x04, 0x00000000 },
84 { 0x418680, 1, 0x04, 0x00000000 },
85 { 0x418714, 1, 0x04, 0x00000000 },
86 { 0x418384, 1, 0x04, 0x00000000 },
87 { 0x418814, 3, 0x04, 0x00000000 },
88 { 0x418b04, 1, 0x04, 0x00000000 },
89 { 0x4188c8, 2, 0x04, 0x00000000 },
90 { 0x4188d0, 1, 0x04, 0x00010000 },
91 { 0x4188d4, 1, 0x04, 0x00000001 },
92 { 0x418910, 1, 0x04, 0x00010001 },
93 { 0x418914, 1, 0x04, 0x00000301 },
94 { 0x418918, 1, 0x04, 0x00800000 },
95 { 0x418980, 1, 0x04, 0x77777770 },
96 { 0x418984, 3, 0x04, 0x77777777 },
97 { 0x418c04, 1, 0x04, 0x00000000 },
98 { 0x418c64, 1, 0x04, 0x00000000 },
99 { 0x418c68, 1, 0x04, 0x00000000 },
100 { 0x418c88, 1, 0x04, 0x00000000 },
101 { 0x418cb4, 2, 0x04, 0x00000000 },
102 { 0x418d00, 1, 0x04, 0x00000000 }, 80 { 0x418d00, 1, 0x04, 0x00000000 },
103 { 0x418d28, 1, 0x04, 0x00000000 }, 81 { 0x418d28, 2, 0x04, 0x00000000 },
104 { 0x418d2c, 1, 0x04, 0x00000000 },
105 { 0x418f00, 1, 0x04, 0x00000400 }, 82 { 0x418f00, 1, 0x04, 0x00000400 },
106 { 0x418f08, 1, 0x04, 0x00000000 }, 83 { 0x418f08, 1, 0x04, 0x00000000 },
107 { 0x418f20, 1, 0x04, 0x00000000 }, 84 { 0x418f20, 2, 0x04, 0x00000000 },
108 { 0x418f24, 1, 0x04, 0x00000000 },
109 { 0x418e00, 1, 0x04, 0x00000000 }, 85 { 0x418e00, 1, 0x04, 0x00000000 },
110 { 0x418e08, 1, 0x04, 0x00000000 }, 86 { 0x418e08, 1, 0x04, 0x00000000 },
111 { 0x418e1c, 2, 0x04, 0x00000000 }, 87 { 0x418e1c, 2, 0x04, 0x00000000 },
112 { 0x41900c, 1, 0x04, 0x00000000 },
113 { 0x419018, 1, 0x04, 0x00000000 },
114 {} 88 {}
115}; 89};
116 90
117struct nvc0_graph_init 91static const struct nvc0_graph_init
118nvf0_graph_init_tpc[] = { 92nvf0_graph_init_tex_0[] = {
119 { 0x419d0c, 1, 0x04, 0x00000000 },
120 { 0x419d10, 1, 0x04, 0x00000014 },
121 { 0x419ab0, 1, 0x04, 0x00000000 }, 93 { 0x419ab0, 1, 0x04, 0x00000000 },
122 { 0x419ac8, 1, 0x04, 0x00000000 }, 94 { 0x419ac8, 1, 0x04, 0x00000000 },
123 { 0x419ab8, 1, 0x04, 0x000000e7 }, 95 { 0x419ab8, 1, 0x04, 0x000000e7 },
@@ -125,10 +97,11 @@ nvf0_graph_init_tpc[] = {
125 { 0x419abc, 2, 0x04, 0x00000000 }, 97 { 0x419abc, 2, 0x04, 0x00000000 },
126 { 0x419ab4, 1, 0x04, 0x00000000 }, 98 { 0x419ab4, 1, 0x04, 0x00000000 },
127 { 0x419aa8, 2, 0x04, 0x00000000 }, 99 { 0x419aa8, 2, 0x04, 0x00000000 },
128 { 0x41980c, 1, 0x04, 0x00000010 }, 100 {}
129 { 0x419844, 1, 0x04, 0x00000000 }, 101};
130 { 0x419850, 1, 0x04, 0x00000004 }, 102
131 { 0x419854, 2, 0x04, 0x00000000 }, 103static const struct nvc0_graph_init
104nvf0_graph_init_l1c_0[] = {
132 { 0x419c98, 1, 0x04, 0x00000000 }, 105 { 0x419c98, 1, 0x04, 0x00000000 },
133 { 0x419ca8, 1, 0x04, 0x00000000 }, 106 { 0x419ca8, 1, 0x04, 0x00000000 },
134 { 0x419cb0, 1, 0x04, 0x01000000 }, 107 { 0x419cb0, 1, 0x04, 0x01000000 },
@@ -139,7 +112,11 @@ nvf0_graph_init_tpc[] = {
139 { 0x419cc0, 2, 0x04, 0x00000000 }, 112 { 0x419cc0, 2, 0x04, 0x00000000 },
140 { 0x419c80, 1, 0x04, 0x00020230 }, 113 { 0x419c80, 1, 0x04, 0x00020230 },
141 { 0x419ccc, 2, 0x04, 0x00000000 }, 114 { 0x419ccc, 2, 0x04, 0x00000000 },
142 { 0x419c0c, 1, 0x04, 0x00000000 }, 115 {}
116};
117
118const struct nvc0_graph_init
119nvf0_graph_init_sm_0[] = {
143 { 0x419e00, 1, 0x04, 0x00000080 }, 120 { 0x419e00, 1, 0x04, 0x00000080 },
144 { 0x419ea0, 1, 0x04, 0x00000000 }, 121 { 0x419ea0, 1, 0x04, 0x00000000 },
145 { 0x419ee4, 1, 0x04, 0x00000000 }, 122 { 0x419ee4, 1, 0x04, 0x00000000 },
@@ -155,6 +132,44 @@ nvf0_graph_init_tpc[] = {
155 {} 132 {}
156}; 133};
157 134
135static const struct nvc0_graph_pack
136nvf0_graph_pack_mmio[] = {
137 { nve4_graph_init_main_0 },
138 { nvf0_graph_init_fe_0 },
139 { nvc0_graph_init_pri_0 },
140 { nvc0_graph_init_rstr2d_0 },
141 { nvd9_graph_init_pd_0 },
142 { nvf0_graph_init_ds_0 },
143 { nvc0_graph_init_scc_0 },
144 { nvf0_graph_init_sked_0 },
145 { nvf0_graph_init_cwd_0 },
146 { nvd9_graph_init_prop_0 },
147 { nvc1_graph_init_gpc_unk_0 },
148 { nvc0_graph_init_setup_0 },
149 { nvc0_graph_init_crstr_0 },
150 { nvc1_graph_init_setup_1 },
151 { nvc0_graph_init_zcull_0 },
152 { nvd9_graph_init_gpm_0 },
153 { nvf0_graph_init_gpc_unk_1 },
154 { nvc0_graph_init_gcc_0 },
155 { nve4_graph_init_tpccs_0 },
156 { nvf0_graph_init_tex_0 },
157 { nve4_graph_init_pe_0 },
158 { nvf0_graph_init_l1c_0 },
159 { nvc0_graph_init_mpc_0 },
160 { nvf0_graph_init_sm_0 },
161 { nvd7_graph_init_pes_0 },
162 { nvd7_graph_init_wwdx_0 },
163 { nvd7_graph_init_cbm_0 },
164 { nve4_graph_init_be_0 },
165 { nvc0_graph_init_fe_1 },
166 {}
167};
168
169/*******************************************************************************
170 * PGRAPH engine/subdev functions
171 ******************************************************************************/
172
158static int 173static int
159nvf0_graph_fini(struct nouveau_object *object, bool suspend) 174nvf0_graph_fini(struct nouveau_object *object, bool suspend)
160{ 175{
@@ -192,25 +207,6 @@ nvf0_graph_fini(struct nouveau_object *object, bool suspend)
192 return nouveau_graph_fini(&priv->base, suspend); 207 return nouveau_graph_fini(&priv->base, suspend);
193} 208}
194 209
195static struct nvc0_graph_init *
196nvf0_graph_init_mmio[] = {
197 nve4_graph_init_regs,
198 nvf0_graph_init_unk40xx,
199 nvc0_graph_init_unk44xx,
200 nvc0_graph_init_unk78xx,
201 nvc0_graph_init_unk60xx,
202 nvd9_graph_init_unk64xx,
203 nvf0_graph_init_unk58xx,
204 nvc0_graph_init_unk80xx,
205 nvf0_graph_init_unk70xx,
206 nvf0_graph_init_unk5bxx,
207 nvf0_graph_init_gpc,
208 nvf0_graph_init_tpc,
209 nve4_graph_init_unk,
210 nve4_graph_init_unk88xx,
211 NULL
212};
213
214#include "fuc/hubnvf0.fuc.h" 210#include "fuc/hubnvf0.fuc.h"
215 211
216static struct nvc0_graph_ucode 212static struct nvc0_graph_ucode
@@ -242,7 +238,7 @@ nvf0_graph_oclass = &(struct nvc0_graph_oclass) {
242 }, 238 },
243 .cclass = &nvf0_grctx_oclass, 239 .cclass = &nvf0_grctx_oclass,
244 .sclass = nvf0_graph_sclass, 240 .sclass = nvf0_graph_sclass,
245 .mmio = nvf0_graph_init_mmio, 241 .mmio = nvf0_graph_pack_mmio,
246 .fecs.ucode = &nvf0_graph_fecs_ucode, 242 .fecs.ucode = &nvf0_graph_fecs_ucode,
247 .gpccs.ucode = &nvf0_graph_gpccs_ucode, 243 .gpccs.ucode = &nvf0_graph_gpccs_ucode,
248}.base; 244}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/xtensa.c b/drivers/gpu/drm/nouveau/core/engine/xtensa.c
index 5f6ede7c4892..92384759d2f5 100644
--- a/drivers/gpu/drm/nouveau/core/engine/xtensa.c
+++ b/drivers/gpu/drm/nouveau/core/engine/xtensa.c
@@ -112,7 +112,7 @@ _nouveau_xtensa_init(struct nouveau_object *object)
112 snprintf(name, sizeof(name), "nouveau/nv84_xuc%03x", 112 snprintf(name, sizeof(name), "nouveau/nv84_xuc%03x",
113 xtensa->addr >> 12); 113 xtensa->addr >> 12);
114 114
115 ret = request_firmware(&fw, name, &device->pdev->dev); 115 ret = request_firmware(&fw, name, nv_device_base(device));
116 if (ret) { 116 if (ret) {
117 nv_warn(xtensa, "unable to load firmware %s\n", name); 117 nv_warn(xtensa, "unable to load firmware %s\n", name);
118 return ret; 118 return ret;
diff --git a/drivers/gpu/drm/nouveau/core/include/core/class.h b/drivers/gpu/drm/nouveau/core/include/core/class.h
index e71a4325e670..9c0cd73462d9 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/class.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/class.h
@@ -258,6 +258,7 @@ struct nv04_display_scanoutpos {
258 * 9070: NVD0_DISP 258 * 9070: NVD0_DISP
259 * 9170: NVE0_DISP 259 * 9170: NVE0_DISP
260 * 9270: NVF0_DISP 260 * 9270: NVF0_DISP
261 * 9470: GM107_DISP
261 */ 262 */
262 263
263#define NV50_DISP_CLASS 0x00005070 264#define NV50_DISP_CLASS 0x00005070
@@ -268,6 +269,7 @@ struct nv04_display_scanoutpos {
268#define NVD0_DISP_CLASS 0x00009070 269#define NVD0_DISP_CLASS 0x00009070
269#define NVE0_DISP_CLASS 0x00009170 270#define NVE0_DISP_CLASS 0x00009170
270#define NVF0_DISP_CLASS 0x00009270 271#define NVF0_DISP_CLASS 0x00009270
272#define GM107_DISP_CLASS 0x00009470
271 273
272#define NV50_DISP_MTHD 0x00000000 274#define NV50_DISP_MTHD 0x00000000
273#define NV50_DISP_MTHD_HEAD 0x00000003 275#define NV50_DISP_MTHD_HEAD 0x00000003
@@ -342,6 +344,7 @@ struct nv50_display_class {
342 * 907a: NVD0_DISP_CURS 344 * 907a: NVD0_DISP_CURS
343 * 917a: NVE0_DISP_CURS 345 * 917a: NVE0_DISP_CURS
344 * 927a: NVF0_DISP_CURS 346 * 927a: NVF0_DISP_CURS
347 * 947a: GM107_DISP_CURS
345 */ 348 */
346 349
347#define NV50_DISP_CURS_CLASS 0x0000507a 350#define NV50_DISP_CURS_CLASS 0x0000507a
@@ -352,6 +355,7 @@ struct nv50_display_class {
352#define NVD0_DISP_CURS_CLASS 0x0000907a 355#define NVD0_DISP_CURS_CLASS 0x0000907a
353#define NVE0_DISP_CURS_CLASS 0x0000917a 356#define NVE0_DISP_CURS_CLASS 0x0000917a
354#define NVF0_DISP_CURS_CLASS 0x0000927a 357#define NVF0_DISP_CURS_CLASS 0x0000927a
358#define GM107_DISP_CURS_CLASS 0x0000947a
355 359
356struct nv50_display_curs_class { 360struct nv50_display_curs_class {
357 u32 head; 361 u32 head;
@@ -365,6 +369,7 @@ struct nv50_display_curs_class {
365 * 907b: NVD0_DISP_OIMM 369 * 907b: NVD0_DISP_OIMM
366 * 917b: NVE0_DISP_OIMM 370 * 917b: NVE0_DISP_OIMM
367 * 927b: NVE0_DISP_OIMM 371 * 927b: NVE0_DISP_OIMM
372 * 947b: GM107_DISP_OIMM
368 */ 373 */
369 374
370#define NV50_DISP_OIMM_CLASS 0x0000507b 375#define NV50_DISP_OIMM_CLASS 0x0000507b
@@ -375,6 +380,7 @@ struct nv50_display_curs_class {
375#define NVD0_DISP_OIMM_CLASS 0x0000907b 380#define NVD0_DISP_OIMM_CLASS 0x0000907b
376#define NVE0_DISP_OIMM_CLASS 0x0000917b 381#define NVE0_DISP_OIMM_CLASS 0x0000917b
377#define NVF0_DISP_OIMM_CLASS 0x0000927b 382#define NVF0_DISP_OIMM_CLASS 0x0000927b
383#define GM107_DISP_OIMM_CLASS 0x0000947b
378 384
379struct nv50_display_oimm_class { 385struct nv50_display_oimm_class {
380 u32 head; 386 u32 head;
@@ -388,6 +394,7 @@ struct nv50_display_oimm_class {
388 * 907c: NVD0_DISP_SYNC 394 * 907c: NVD0_DISP_SYNC
389 * 917c: NVE0_DISP_SYNC 395 * 917c: NVE0_DISP_SYNC
390 * 927c: NVF0_DISP_SYNC 396 * 927c: NVF0_DISP_SYNC
397 * 947c: GM107_DISP_SYNC
391 */ 398 */
392 399
393#define NV50_DISP_SYNC_CLASS 0x0000507c 400#define NV50_DISP_SYNC_CLASS 0x0000507c
@@ -398,6 +405,7 @@ struct nv50_display_oimm_class {
398#define NVD0_DISP_SYNC_CLASS 0x0000907c 405#define NVD0_DISP_SYNC_CLASS 0x0000907c
399#define NVE0_DISP_SYNC_CLASS 0x0000917c 406#define NVE0_DISP_SYNC_CLASS 0x0000917c
400#define NVF0_DISP_SYNC_CLASS 0x0000927c 407#define NVF0_DISP_SYNC_CLASS 0x0000927c
408#define GM107_DISP_SYNC_CLASS 0x0000947c
401 409
402struct nv50_display_sync_class { 410struct nv50_display_sync_class {
403 u32 pushbuf; 411 u32 pushbuf;
@@ -412,6 +420,7 @@ struct nv50_display_sync_class {
412 * 907d: NVD0_DISP_MAST 420 * 907d: NVD0_DISP_MAST
413 * 917d: NVE0_DISP_MAST 421 * 917d: NVE0_DISP_MAST
414 * 927d: NVF0_DISP_MAST 422 * 927d: NVF0_DISP_MAST
423 * 947d: GM107_DISP_MAST
415 */ 424 */
416 425
417#define NV50_DISP_MAST_CLASS 0x0000507d 426#define NV50_DISP_MAST_CLASS 0x0000507d
@@ -422,6 +431,7 @@ struct nv50_display_sync_class {
422#define NVD0_DISP_MAST_CLASS 0x0000907d 431#define NVD0_DISP_MAST_CLASS 0x0000907d
423#define NVE0_DISP_MAST_CLASS 0x0000917d 432#define NVE0_DISP_MAST_CLASS 0x0000917d
424#define NVF0_DISP_MAST_CLASS 0x0000927d 433#define NVF0_DISP_MAST_CLASS 0x0000927d
434#define GM107_DISP_MAST_CLASS 0x0000947d
425 435
426struct nv50_display_mast_class { 436struct nv50_display_mast_class {
427 u32 pushbuf; 437 u32 pushbuf;
@@ -435,6 +445,7 @@ struct nv50_display_mast_class {
435 * 907e: NVD0_DISP_OVLY 445 * 907e: NVD0_DISP_OVLY
436 * 917e: NVE0_DISP_OVLY 446 * 917e: NVE0_DISP_OVLY
437 * 927e: NVF0_DISP_OVLY 447 * 927e: NVF0_DISP_OVLY
448 * 947e: GM107_DISP_OVLY
438 */ 449 */
439 450
440#define NV50_DISP_OVLY_CLASS 0x0000507e 451#define NV50_DISP_OVLY_CLASS 0x0000507e
@@ -445,6 +456,7 @@ struct nv50_display_mast_class {
445#define NVD0_DISP_OVLY_CLASS 0x0000907e 456#define NVD0_DISP_OVLY_CLASS 0x0000907e
446#define NVE0_DISP_OVLY_CLASS 0x0000917e 457#define NVE0_DISP_OVLY_CLASS 0x0000917e
447#define NVF0_DISP_OVLY_CLASS 0x0000927e 458#define NVF0_DISP_OVLY_CLASS 0x0000927e
459#define GM107_DISP_OVLY_CLASS 0x0000947e
448 460
449struct nv50_display_ovly_class { 461struct nv50_display_ovly_class {
450 u32 pushbuf; 462 u32 pushbuf;
diff --git a/drivers/gpu/drm/nouveau/core/include/core/device.h b/drivers/gpu/drm/nouveau/core/include/core/device.h
index 7b8ea221b00d..a8a9a9cf16cb 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/device.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/device.h
@@ -40,6 +40,7 @@ enum nv_subdev_type {
40 40
41 NVDEV_ENGINE_FIRST, 41 NVDEV_ENGINE_FIRST,
42 NVDEV_ENGINE_DMAOBJ = NVDEV_ENGINE_FIRST, 42 NVDEV_ENGINE_DMAOBJ = NVDEV_ENGINE_FIRST,
43 NVDEV_ENGINE_IFB,
43 NVDEV_ENGINE_FIFO, 44 NVDEV_ENGINE_FIFO,
44 NVDEV_ENGINE_SW, 45 NVDEV_ENGINE_SW,
45 NVDEV_ENGINE_GR, 46 NVDEV_ENGINE_GR,
@@ -65,6 +66,7 @@ struct nouveau_device {
65 struct list_head head; 66 struct list_head head;
66 67
67 struct pci_dev *pdev; 68 struct pci_dev *pdev;
69 struct platform_device *platformdev;
68 u64 handle; 70 u64 handle;
69 71
70 const char *cfgopt; 72 const char *cfgopt;
@@ -84,6 +86,7 @@ struct nouveau_device {
84 NV_C0 = 0xc0, 86 NV_C0 = 0xc0,
85 NV_D0 = 0xd0, 87 NV_D0 = 0xd0,
86 NV_E0 = 0xe0, 88 NV_E0 = 0xe0,
89 GM100 = 0x110,
87 } card_type; 90 } card_type;
88 u32 chipset; 91 u32 chipset;
89 u32 crystal; 92 u32 crystal;
@@ -140,4 +143,32 @@ nv_device_match(struct nouveau_object *object, u16 dev, u16 ven, u16 sub)
140 device->pdev->subsystem_device == sub; 143 device->pdev->subsystem_device == sub;
141} 144}
142 145
146static inline bool
147nv_device_is_pci(struct nouveau_device *device)
148{
149 return device->pdev != NULL;
150}
151
152static inline struct device *
153nv_device_base(struct nouveau_device *device)
154{
155 return nv_device_is_pci(device) ? &device->pdev->dev :
156 &device->platformdev->dev;
157}
158
159resource_size_t
160nv_device_resource_start(struct nouveau_device *device, unsigned int bar);
161
162resource_size_t
163nv_device_resource_len(struct nouveau_device *device, unsigned int bar);
164
165dma_addr_t
166nv_device_map_page(struct nouveau_device *device, struct page *page);
167
168void
169nv_device_unmap_page(struct nouveau_device *device, dma_addr_t addr);
170
171int
172nv_device_get_irq(struct nouveau_device *device, bool stall);
173
143#endif 174#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/namedb.h b/drivers/gpu/drm/nouveau/core/include/core/namedb.h
index 8897e0886085..f5b5fd8e1fc9 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/namedb.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/namedb.h
@@ -33,7 +33,7 @@ nv_namedb(void *obj)
33 33
34int nouveau_namedb_create_(struct nouveau_object *, struct nouveau_object *, 34int nouveau_namedb_create_(struct nouveau_object *, struct nouveau_object *,
35 struct nouveau_oclass *, u32 pclass, 35 struct nouveau_oclass *, u32 pclass,
36 struct nouveau_oclass *, u32 engcls, 36 struct nouveau_oclass *, u64 engcls,
37 int size, void **); 37 int size, void **);
38 38
39int _nouveau_namedb_ctor(struct nouveau_object *, struct nouveau_object *, 39int _nouveau_namedb_ctor(struct nouveau_object *, struct nouveau_object *,
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/device.h b/drivers/gpu/drm/nouveau/core/include/engine/device.h
index b3dd2c4c2f1e..672d3c8f4145 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/device.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/device.h
@@ -3,11 +3,20 @@
3 3
4#include <core/device.h> 4#include <core/device.h>
5 5
6#define nouveau_device_create(p,n,s,c,d,u) \ 6struct platform_device;
7 nouveau_device_create_((p), (n), (s), (c), (d), sizeof(**u), (void **)u)
8 7
9int nouveau_device_create_(struct pci_dev *, u64 name, const char *sname, 8enum nv_bus_type {
10 const char *cfg, const char *dbg, int, void **); 9 NOUVEAU_BUS_PCI,
10 NOUVEAU_BUS_PLATFORM,
11};
12
13#define nouveau_device_create(p,t,n,s,c,d,u) \
14 nouveau_device_create_((void *)(p), (t), (n), (s), (c), (d), \
15 sizeof(**u), (void **)u)
16
17int nouveau_device_create_(void *, enum nv_bus_type type, u64 name,
18 const char *sname, const char *cfg, const char *dbg,
19 int, void **);
11 20
12int nv04_identify(struct nouveau_device *); 21int nv04_identify(struct nouveau_device *);
13int nv10_identify(struct nouveau_device *); 22int nv10_identify(struct nouveau_device *);
@@ -17,6 +26,7 @@ int nv40_identify(struct nouveau_device *);
17int nv50_identify(struct nouveau_device *); 26int nv50_identify(struct nouveau_device *);
18int nvc0_identify(struct nouveau_device *); 27int nvc0_identify(struct nouveau_device *);
19int nve0_identify(struct nouveau_device *); 28int nve0_identify(struct nouveau_device *);
29int gm100_identify(struct nouveau_device *);
20 30
21struct nouveau_device *nouveau_device_find(u64 name); 31struct nouveau_device *nouveau_device_find(u64 name);
22 32
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/disp.h b/drivers/gpu/drm/nouveau/core/include/engine/disp.h
index 4b21fabfbddb..fd0c68804de3 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/disp.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/disp.h
@@ -36,14 +36,15 @@ void _nouveau_disp_dtor(struct nouveau_object *);
36#define _nouveau_disp_init _nouveau_engine_init 36#define _nouveau_disp_init _nouveau_engine_init
37#define _nouveau_disp_fini _nouveau_engine_fini 37#define _nouveau_disp_fini _nouveau_engine_fini
38 38
39extern struct nouveau_oclass nv04_disp_oclass; 39extern struct nouveau_oclass *nv04_disp_oclass;
40extern struct nouveau_oclass nv50_disp_oclass; 40extern struct nouveau_oclass *nv50_disp_oclass;
41extern struct nouveau_oclass nv84_disp_oclass; 41extern struct nouveau_oclass *nv84_disp_oclass;
42extern struct nouveau_oclass nva0_disp_oclass; 42extern struct nouveau_oclass *nva0_disp_oclass;
43extern struct nouveau_oclass nv94_disp_oclass; 43extern struct nouveau_oclass *nv94_disp_oclass;
44extern struct nouveau_oclass nva3_disp_oclass; 44extern struct nouveau_oclass *nva3_disp_oclass;
45extern struct nouveau_oclass nvd0_disp_oclass; 45extern struct nouveau_oclass *nvd0_disp_oclass;
46extern struct nouveau_oclass nve0_disp_oclass; 46extern struct nouveau_oclass *nve0_disp_oclass;
47extern struct nouveau_oclass nvf0_disp_oclass; 47extern struct nouveau_oclass *nvf0_disp_oclass;
48extern struct nouveau_oclass *gm107_disp_oclass;
48 49
49#endif 50#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/graph.h b/drivers/gpu/drm/nouveau/core/include/engine/graph.h
index 97705618de97..871edfdf3d5b 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/graph.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/graph.h
@@ -63,13 +63,14 @@ extern struct nouveau_oclass nv40_graph_oclass;
63extern struct nouveau_oclass nv50_graph_oclass; 63extern struct nouveau_oclass nv50_graph_oclass;
64extern struct nouveau_oclass *nvc0_graph_oclass; 64extern struct nouveau_oclass *nvc0_graph_oclass;
65extern struct nouveau_oclass *nvc1_graph_oclass; 65extern struct nouveau_oclass *nvc1_graph_oclass;
66extern struct nouveau_oclass *nvc3_graph_oclass; 66extern struct nouveau_oclass *nvc4_graph_oclass;
67extern struct nouveau_oclass *nvc8_graph_oclass; 67extern struct nouveau_oclass *nvc8_graph_oclass;
68extern struct nouveau_oclass *nvd7_graph_oclass; 68extern struct nouveau_oclass *nvd7_graph_oclass;
69extern struct nouveau_oclass *nvd9_graph_oclass; 69extern struct nouveau_oclass *nvd9_graph_oclass;
70extern struct nouveau_oclass *nve4_graph_oclass; 70extern struct nouveau_oclass *nve4_graph_oclass;
71extern struct nouveau_oclass *nvf0_graph_oclass; 71extern struct nouveau_oclass *nvf0_graph_oclass;
72extern struct nouveau_oclass *nv108_graph_oclass; 72extern struct nouveau_oclass *nv108_graph_oclass;
73extern struct nouveau_oclass *gm107_graph_oclass;
73 74
74extern const struct nouveau_bitfield nv04_graph_nsource[]; 75extern const struct nouveau_bitfield nv04_graph_nsource[];
75extern struct nouveau_ofuncs nv04_graph_ofuncs; 76extern struct nouveau_ofuncs nv04_graph_ofuncs;
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/P0260.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/P0260.h
new file mode 100644
index 000000000000..bba01ab1e049
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/P0260.h
@@ -0,0 +1,23 @@
1#ifndef __NVBIOS_P0260_H__
2#define __NVBIOS_P0260_H__
3
4u32 nvbios_P0260Te(struct nouveau_bios *,
5 u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *xnr, u8 *xsz);
6
7struct nvbios_P0260E {
8 u32 data;
9};
10
11u32 nvbios_P0260Ee(struct nouveau_bios *, int idx, u8 *ver, u8 *hdr);
12u32 nvbios_P0260Ep(struct nouveau_bios *, int idx, u8 *ver, u8 *hdr,
13 struct nvbios_P0260E *);
14
15struct nvbios_P0260X {
16 u32 data;
17};
18
19u32 nvbios_P0260Xe(struct nouveau_bios *, int idx, u8 *ver, u8 *hdr);
20u32 nvbios_P0260Xp(struct nouveau_bios *, int idx, u8 *ver, u8 *hdr,
21 struct nvbios_P0260X *);
22
23#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/conn.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/conn.h
index c1270548fd0d..a32feb3f3fb6 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/conn.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/conn.h
@@ -16,6 +16,7 @@ enum dcb_connector_type {
16 DCB_CONNECTOR_eDP = 0x47, 16 DCB_CONNECTOR_eDP = 0x47,
17 DCB_CONNECTOR_HDMI_0 = 0x60, 17 DCB_CONNECTOR_HDMI_0 = 0x60,
18 DCB_CONNECTOR_HDMI_1 = 0x61, 18 DCB_CONNECTOR_HDMI_1 = 0x61,
19 DCB_CONNECTOR_HDMI_C = 0x63,
19 DCB_CONNECTOR_DMS59_DP0 = 0x64, 20 DCB_CONNECTOR_DMS59_DP0 = 0x64,
20 DCB_CONNECTOR_DMS59_DP1 = 0x65, 21 DCB_CONNECTOR_DMS59_DP1 = 0x65,
21 DCB_CONNECTOR_NONE = 0xff 22 DCB_CONNECTOR_NONE = 0xff
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/ramcfg.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/ramcfg.h
index c5e6d1e6ac1d..c086ac6d677d 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/ramcfg.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/ramcfg.h
@@ -61,6 +61,6 @@ struct nvbios_ramcfg {
61}; 61};
62 62
63u8 nvbios_ramcfg_count(struct nouveau_bios *); 63u8 nvbios_ramcfg_count(struct nouveau_bios *);
64u8 nvbios_ramcfg_index(struct nouveau_bios *); 64u8 nvbios_ramcfg_index(struct nouveau_subdev *);
65 65
66#endif 66#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/therm.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/therm.h
index 083541dbe9c8..8dc5051df55d 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/therm.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/therm.h
@@ -31,6 +31,12 @@ struct nouveau_therm_trip_point {
31 int hysteresis; 31 int hysteresis;
32}; 32};
33 33
34enum nvbios_therm_fan_mode {
35 NVBIOS_THERM_FAN_TRIP = 0,
36 NVBIOS_THERM_FAN_LINEAR = 1,
37 NVBIOS_THERM_FAN_OTHER = 2,
38};
39
34struct nvbios_therm_fan { 40struct nvbios_therm_fan {
35 u16 pwm_freq; 41 u16 pwm_freq;
36 42
@@ -40,6 +46,7 @@ struct nvbios_therm_fan {
40 u16 bump_period; 46 u16 bump_period;
41 u16 slow_down_period; 47 u16 slow_down_period;
42 48
49 enum nvbios_therm_fan_mode fan_mode;
43 struct nouveau_therm_trip_point trip[NOUVEAU_TEMP_FAN_TRIP_MAX]; 50 struct nouveau_therm_trip_point trip[NOUVEAU_TEMP_FAN_TRIP_MAX];
44 u8 nr_fan_trip; 51 u8 nr_fan_trip;
45 u8 linear_min_temp; 52 u8 linear_min_temp;
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/devinit.h b/drivers/gpu/drm/nouveau/core/include/subdev/devinit.h
index ed1ac68c38b3..e292271a84e4 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/devinit.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/devinit.h
@@ -9,6 +9,7 @@ struct nouveau_devinit {
9 bool post; 9 bool post;
10 void (*meminit)(struct nouveau_devinit *); 10 void (*meminit)(struct nouveau_devinit *);
11 int (*pll_set)(struct nouveau_devinit *, u32 type, u32 freq); 11 int (*pll_set)(struct nouveau_devinit *, u32 type, u32 freq);
12 u32 (*mmio)(struct nouveau_devinit *, u32 addr);
12}; 13};
13 14
14static inline struct nouveau_devinit * 15static inline struct nouveau_devinit *
@@ -28,5 +29,6 @@ extern struct nouveau_oclass *nv98_devinit_oclass;
28extern struct nouveau_oclass *nva3_devinit_oclass; 29extern struct nouveau_oclass *nva3_devinit_oclass;
29extern struct nouveau_oclass *nvaf_devinit_oclass; 30extern struct nouveau_oclass *nvaf_devinit_oclass;
30extern struct nouveau_oclass *nvc0_devinit_oclass; 31extern struct nouveau_oclass *nvc0_devinit_oclass;
32extern struct nouveau_oclass *gm107_devinit_oclass;
31 33
32#endif 34#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/fb.h b/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
index d7ecafbae1ca..58c7ccdebb01 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
@@ -105,6 +105,7 @@ extern struct nouveau_oclass *nvaa_fb_oclass;
105extern struct nouveau_oclass *nvaf_fb_oclass; 105extern struct nouveau_oclass *nvaf_fb_oclass;
106extern struct nouveau_oclass *nvc0_fb_oclass; 106extern struct nouveau_oclass *nvc0_fb_oclass;
107extern struct nouveau_oclass *nve0_fb_oclass; 107extern struct nouveau_oclass *nve0_fb_oclass;
108extern struct nouveau_oclass *gm107_fb_oclass;
108 109
109#include <subdev/bios/ramcfg.h> 110#include <subdev/bios/ramcfg.h>
110 111
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/ltcg.h b/drivers/gpu/drm/nouveau/core/include/subdev/ltcg.h
index a1985ed3d58d..c9c1950b7743 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/ltcg.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/ltcg.h
@@ -35,6 +35,7 @@ nouveau_ltcg(void *obj)
35#define _nouveau_ltcg_init _nouveau_subdev_init 35#define _nouveau_ltcg_init _nouveau_subdev_init
36#define _nouveau_ltcg_fini _nouveau_subdev_fini 36#define _nouveau_ltcg_fini _nouveau_subdev_fini
37 37
38extern struct nouveau_oclass nvc0_ltcg_oclass; 38extern struct nouveau_oclass *gf100_ltcg_oclass;
39extern struct nouveau_oclass *gm107_ltcg_oclass;
39 40
40#endif 41#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/mc.h b/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
index 3c6738edd127..72b176831be6 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
@@ -12,6 +12,7 @@ struct nouveau_mc_intr {
12struct nouveau_mc { 12struct nouveau_mc {
13 struct nouveau_subdev base; 13 struct nouveau_subdev base;
14 bool use_msi; 14 bool use_msi;
15 unsigned int irq;
15}; 16};
16 17
17static inline struct nouveau_mc * 18static inline struct nouveau_mc *
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/therm.h b/drivers/gpu/drm/nouveau/core/include/subdev/therm.h
index 69891d4a3fe7..d4a68179e586 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/therm.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/therm.h
@@ -31,7 +31,7 @@ struct nouveau_therm {
31 int (*pwm_ctrl)(struct nouveau_therm *, int line, bool); 31 int (*pwm_ctrl)(struct nouveau_therm *, int line, bool);
32 int (*pwm_get)(struct nouveau_therm *, int line, u32 *, u32 *); 32 int (*pwm_get)(struct nouveau_therm *, int line, u32 *, u32 *);
33 int (*pwm_set)(struct nouveau_therm *, int line, u32, u32); 33 int (*pwm_set)(struct nouveau_therm *, int line, u32, u32);
34 int (*pwm_clock)(struct nouveau_therm *); 34 int (*pwm_clock)(struct nouveau_therm *, int line);
35 35
36 int (*fan_get)(struct nouveau_therm *); 36 int (*fan_get)(struct nouveau_therm *);
37 int (*fan_set)(struct nouveau_therm *, int); 37 int (*fan_set)(struct nouveau_therm *, int);
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/timer.h b/drivers/gpu/drm/nouveau/core/include/subdev/timer.h
index 9ab70dfe5b02..db9be803a874 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/timer.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/timer.h
@@ -59,5 +59,6 @@ int nouveau_timer_create_(struct nouveau_object *, struct nouveau_engine *,
59 struct nouveau_oclass *, int size, void **); 59 struct nouveau_oclass *, int size, void **);
60 60
61extern struct nouveau_oclass nv04_timer_oclass; 61extern struct nouveau_oclass nv04_timer_oclass;
62extern struct nouveau_oclass gk20a_timer_oclass;
62 63
63#endif 64#endif
diff --git a/drivers/gpu/drm/nouveau/core/os.h b/drivers/gpu/drm/nouveau/core/os.h
index 191e739f30d1..d0ced94ca54c 100644
--- a/drivers/gpu/drm/nouveau/core/os.h
+++ b/drivers/gpu/drm/nouveau/core/os.h
@@ -5,6 +5,7 @@
5#include <linux/slab.h> 5#include <linux/slab.h>
6#include <linux/mutex.h> 6#include <linux/mutex.h>
7#include <linux/pci.h> 7#include <linux/pci.h>
8#include <linux/platform_device.h>
8#include <linux/printk.h> 9#include <linux/printk.h>
9#include <linux/bitops.h> 10#include <linux/bitops.h>
10#include <linux/firmware.h> 11#include <linux/firmware.h>
@@ -23,17 +24,6 @@
23 24
24#include <asm/unaligned.h> 25#include <asm/unaligned.h>
25 26
26static inline int
27ffsll(u64 mask)
28{
29 int i;
30 for (i = 0; i < 64; i++) {
31 if (mask & (1ULL << i))
32 return i + 1;
33 }
34 return 0;
35}
36
37#ifndef ioread32_native 27#ifndef ioread32_native
38#ifdef __BIG_ENDIAN 28#ifdef __BIG_ENDIAN
39#define ioread16_native ioread16be 29#define ioread16_native ioread16be
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bar/base.c b/drivers/gpu/drm/nouveau/core/subdev/bar/base.c
index 7098ddd54678..bdf594116f3f 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bar/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bar/base.c
@@ -118,8 +118,8 @@ nouveau_bar_create_(struct nouveau_object *parent,
118 if (ret) 118 if (ret)
119 return ret; 119 return ret;
120 120
121 bar->iomem = ioremap(pci_resource_start(device->pdev, 3), 121 bar->iomem = ioremap(nv_device_resource_start(device, 3),
122 pci_resource_len(device->pdev, 3)); 122 nv_device_resource_len(device, 3));
123 return 0; 123 return 0;
124} 124}
125 125
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bar/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/bar/nv50.c
index 090d594a21b3..f748ba49dfc8 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bar/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bar/nv50.c
@@ -139,7 +139,7 @@ nv50_bar_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
139 139
140 /* BAR3 */ 140 /* BAR3 */
141 start = 0x0100000000ULL; 141 start = 0x0100000000ULL;
142 limit = start + pci_resource_len(device->pdev, 3); 142 limit = start + nv_device_resource_len(device, 3);
143 143
144 ret = nouveau_vm_new(device, start, limit, start, &vm); 144 ret = nouveau_vm_new(device, start, limit, start, &vm);
145 if (ret) 145 if (ret)
@@ -173,7 +173,7 @@ nv50_bar_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
173 173
174 /* BAR1 */ 174 /* BAR1 */
175 start = 0x0000000000ULL; 175 start = 0x0000000000ULL;
176 limit = start + pci_resource_len(device->pdev, 1); 176 limit = start + nv_device_resource_len(device, 1);
177 177
178 ret = nouveau_vm_new(device, start, limit--, start, &vm); 178 ret = nouveau_vm_new(device, start, limit--, start, &vm);
179 if (ret) 179 if (ret)
@@ -231,7 +231,7 @@ static int
231nv50_bar_init(struct nouveau_object *object) 231nv50_bar_init(struct nouveau_object *object)
232{ 232{
233 struct nv50_bar_priv *priv = (void *)object; 233 struct nv50_bar_priv *priv = (void *)object;
234 int ret; 234 int ret, i;
235 235
236 ret = nouveau_bar_init(&priv->base); 236 ret = nouveau_bar_init(&priv->base);
237 if (ret) 237 if (ret)
@@ -249,6 +249,8 @@ nv50_bar_init(struct nouveau_object *object)
249 nv_wr32(priv, 0x001704, 0x40000000 | priv->mem->addr >> 12); 249 nv_wr32(priv, 0x001704, 0x40000000 | priv->mem->addr >> 12);
250 nv_wr32(priv, 0x001708, 0x80000000 | priv->bar1->node->offset >> 4); 250 nv_wr32(priv, 0x001708, 0x80000000 | priv->bar1->node->offset >> 4);
251 nv_wr32(priv, 0x00170c, 0x80000000 | priv->bar3->node->offset >> 4); 251 nv_wr32(priv, 0x00170c, 0x80000000 | priv->bar3->node->offset >> 4);
252 for (i = 0; i < 8; i++)
253 nv_wr32(priv, 0x001900 + (i * 4), 0x00000000);
252 return 0; 254 return 0;
253} 255}
254 256
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c
index bac5e754de35..3f30db62e656 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c
@@ -84,7 +84,6 @@ nvc0_bar_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
84 struct nouveau_object **pobject) 84 struct nouveau_object **pobject)
85{ 85{
86 struct nouveau_device *device = nv_device(parent); 86 struct nouveau_device *device = nv_device(parent);
87 struct pci_dev *pdev = device->pdev;
88 struct nvc0_bar_priv *priv; 87 struct nvc0_bar_priv *priv;
89 struct nouveau_gpuobj *mem; 88 struct nouveau_gpuobj *mem;
90 struct nouveau_vm *vm; 89 struct nouveau_vm *vm;
@@ -107,14 +106,14 @@ nvc0_bar_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
107 if (ret) 106 if (ret)
108 return ret; 107 return ret;
109 108
110 ret = nouveau_vm_new(device, 0, pci_resource_len(pdev, 3), 0, &vm); 109 ret = nouveau_vm_new(device, 0, nv_device_resource_len(device, 3), 0, &vm);
111 if (ret) 110 if (ret)
112 return ret; 111 return ret;
113 112
114 atomic_inc(&vm->engref[NVDEV_SUBDEV_BAR]); 113 atomic_inc(&vm->engref[NVDEV_SUBDEV_BAR]);
115 114
116 ret = nouveau_gpuobj_new(nv_object(priv), NULL, 115 ret = nouveau_gpuobj_new(nv_object(priv), NULL,
117 (pci_resource_len(pdev, 3) >> 12) * 8, 116 (nv_device_resource_len(device, 3) >> 12) * 8,
118 0x1000, NVOBJ_FLAG_ZERO_ALLOC, 117 0x1000, NVOBJ_FLAG_ZERO_ALLOC,
119 &vm->pgt[0].obj[0]); 118 &vm->pgt[0].obj[0]);
120 vm->pgt[0].refcount[0] = 1; 119 vm->pgt[0].refcount[0] = 1;
@@ -128,8 +127,8 @@ nvc0_bar_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
128 127
129 nv_wo32(mem, 0x0200, lower_32_bits(priv->bar[0].pgd->addr)); 128 nv_wo32(mem, 0x0200, lower_32_bits(priv->bar[0].pgd->addr));
130 nv_wo32(mem, 0x0204, upper_32_bits(priv->bar[0].pgd->addr)); 129 nv_wo32(mem, 0x0204, upper_32_bits(priv->bar[0].pgd->addr));
131 nv_wo32(mem, 0x0208, lower_32_bits(pci_resource_len(pdev, 3) - 1)); 130 nv_wo32(mem, 0x0208, lower_32_bits(nv_device_resource_len(device, 3) - 1));
132 nv_wo32(mem, 0x020c, upper_32_bits(pci_resource_len(pdev, 3) - 1)); 131 nv_wo32(mem, 0x020c, upper_32_bits(nv_device_resource_len(device, 3) - 1));
133 132
134 /* BAR1 */ 133 /* BAR1 */
135 ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x1000, 0, 0, 134 ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x1000, 0, 0,
@@ -143,7 +142,7 @@ nvc0_bar_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
143 if (ret) 142 if (ret)
144 return ret; 143 return ret;
145 144
146 ret = nouveau_vm_new(device, 0, pci_resource_len(pdev, 1), 0, &vm); 145 ret = nouveau_vm_new(device, 0, nv_device_resource_len(device, 1), 0, &vm);
147 if (ret) 146 if (ret)
148 return ret; 147 return ret;
149 148
@@ -156,8 +155,8 @@ nvc0_bar_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
156 155
157 nv_wo32(mem, 0x0200, lower_32_bits(priv->bar[1].pgd->addr)); 156 nv_wo32(mem, 0x0200, lower_32_bits(priv->bar[1].pgd->addr));
158 nv_wo32(mem, 0x0204, upper_32_bits(priv->bar[1].pgd->addr)); 157 nv_wo32(mem, 0x0204, upper_32_bits(priv->bar[1].pgd->addr));
159 nv_wo32(mem, 0x0208, lower_32_bits(pci_resource_len(pdev, 1) - 1)); 158 nv_wo32(mem, 0x0208, lower_32_bits(nv_device_resource_len(device, 1) - 1));
160 nv_wo32(mem, 0x020c, upper_32_bits(pci_resource_len(pdev, 1) - 1)); 159 nv_wo32(mem, 0x020c, upper_32_bits(nv_device_resource_len(device, 1) - 1));
161 160
162 priv->base.alloc = nouveau_bar_alloc; 161 priv->base.alloc = nouveau_bar_alloc;
163 priv->base.kmap = nvc0_bar_kmap; 162 priv->base.kmap = nvc0_bar_kmap;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/P0260.c b/drivers/gpu/drm/nouveau/core/subdev/bios/P0260.c
new file mode 100644
index 000000000000..199f4e5f7488
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/P0260.c
@@ -0,0 +1,109 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/bios.h>
26#include <subdev/bios/bit.h>
27#include <subdev/bios/ramcfg.h>
28#include <subdev/bios/P0260.h>
29
30u32
31nvbios_P0260Te(struct nouveau_bios *bios,
32 u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *xnr, u8 *xsz)
33{
34 struct bit_entry bit_P;
35 u32 data = 0x00000000;
36
37 if (!bit_entry(bios, 'P', &bit_P)) {
38 if (bit_P.version == 2 && bit_P.length > 0x63)
39 data = nv_ro32(bios, bit_P.offset + 0x60);
40 if (data) {
41 *ver = nv_ro08(bios, data + 0);
42 switch (*ver) {
43 case 0x10:
44 *hdr = nv_ro08(bios, data + 1);
45 *cnt = nv_ro08(bios, data + 2);
46 *len = 4;
47 *xnr = nv_ro08(bios, data + 3);
48 *xsz = 4;
49 return data;
50 default:
51 break;
52 }
53 }
54 }
55
56 return 0x00000000;
57}
58
59u32
60nvbios_P0260Ee(struct nouveau_bios *bios, int idx, u8 *ver, u8 *len)
61{
62 u8 hdr, cnt, xnr, xsz;
63 u32 data = nvbios_P0260Te(bios, ver, &hdr, &cnt, len, &xnr, &xsz);
64 if (data && idx < cnt)
65 return data + hdr + (idx * *len);
66 return 0x00000000;
67}
68
69u32
70nvbios_P0260Ep(struct nouveau_bios *bios, int idx, u8 *ver, u8 *len,
71 struct nvbios_P0260E *info)
72{
73 u32 data = nvbios_P0260Ee(bios, idx, ver, len);
74 memset(info, 0x00, sizeof(*info));
75 switch (!!data * *ver) {
76 case 0x10:
77 info->data = nv_ro32(bios, data);
78 return data;
79 default:
80 break;
81 }
82 return 0x00000000;
83}
84
85u32
86nvbios_P0260Xe(struct nouveau_bios *bios, int idx, u8 *ver, u8 *xsz)
87{
88 u8 hdr, cnt, len, xnr;
89 u32 data = nvbios_P0260Te(bios, ver, &hdr, &cnt, &len, &xnr, xsz);
90 if (data && idx < xnr)
91 return data + hdr + (cnt * len) + (idx * *xsz);
92 return 0x00000000;
93}
94
95u32
96nvbios_P0260Xp(struct nouveau_bios *bios, int idx, u8 *ver, u8 *hdr,
97 struct nvbios_P0260X *info)
98{
99 u32 data = nvbios_P0260Xe(bios, idx, ver, hdr);
100 memset(info, 0x00, sizeof(*info));
101 switch (!!data * *ver) {
102 case 0x10:
103 info->data = nv_ro32(bios, data);
104 return data;
105 default:
106 break;
107 }
108 return 0x00000000;
109}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
index ef0c9c4a8cc3..e9df94f96d78 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
@@ -90,10 +90,26 @@ nouveau_bios_shadow_pramin(struct nouveau_bios *bios)
90 int i; 90 int i;
91 91
92 if (device->card_type >= NV_50) { 92 if (device->card_type >= NV_50) {
93 if ( device->card_type < NV_C0 || 93 if (device->card_type >= NV_C0 && device->card_type < GM100) {
94 !(nv_rd32(bios, 0x022500) & 0x00000001)) 94 if (nv_rd32(bios, 0x022500) & 0x00000001)
95 addr = (u64)(nv_rd32(bios, 0x619f04) & 0xffffff00) << 8; 95 return;
96 } else
97 if (device->card_type >= GM100) {
98 if (nv_rd32(bios, 0x021c04) & 0x00000001)
99 return;
100 }
101
102 addr = nv_rd32(bios, 0x619f04);
103 if (!(addr & 0x00000008)) {
104 nv_debug(bios, "... not enabled\n");
105 return;
106 }
107 if ( (addr & 0x00000003) != 1) {
108 nv_debug(bios, "... not in vram\n");
109 return;
110 }
96 111
112 addr = (u64)(addr >> 8) << 8;
97 if (!addr) { 113 if (!addr) {
98 addr = (u64)nv_rd32(bios, 0x001700) << 16; 114 addr = (u64)nv_rd32(bios, 0x001700) << 16;
99 addr += 0xf0000; 115 addr += 0xf0000;
@@ -141,6 +157,10 @@ nouveau_bios_shadow_prom(struct nouveau_bios *bios)
141 pcireg = 0x001850; 157 pcireg = 0x001850;
142 access = nv_mask(bios, pcireg, 0x00000001, 0x00000000); 158 access = nv_mask(bios, pcireg, 0x00000001, 0x00000000);
143 159
160 /* WARNING: PROM accesses should always be 32-bits aligned. Other
161 * accesses work on most chipset but do not on Kepler chipsets
162 */
163
144 /* bail if no rom signature, with a workaround for a PROM reading 164 /* bail if no rom signature, with a workaround for a PROM reading
145 * issue on some chipsets. the first read after a period of 165 * issue on some chipsets. the first read after a period of
146 * inactivity returns the wrong result, so retry the first header 166 * inactivity returns the wrong result, so retry the first header
@@ -148,31 +168,32 @@ nouveau_bios_shadow_prom(struct nouveau_bios *bios)
148 */ 168 */
149 i = 16; 169 i = 16;
150 do { 170 do {
151 if (nv_rd08(bios, 0x300000) == 0x55) 171 if ((nv_rd32(bios, 0x300000) & 0xffff) == 0xaa55)
152 break; 172 break;
153 } while (i--); 173 } while (i--);
154 174
155 if (!i || nv_rd08(bios, 0x300001) != 0xaa) 175 if (!i)
156 goto out;
157
158 /* additional check (see note below) - read PCI record header */
159 pcir = nv_rd08(bios, 0x300018) |
160 nv_rd08(bios, 0x300019) << 8;
161 if (nv_rd08(bios, 0x300000 + pcir) != 'P' ||
162 nv_rd08(bios, 0x300001 + pcir) != 'C' ||
163 nv_rd08(bios, 0x300002 + pcir) != 'I' ||
164 nv_rd08(bios, 0x300003 + pcir) != 'R')
165 goto out; 176 goto out;
166 177
167 /* read entire bios image to system memory */ 178 /* read entire bios image to system memory */
168 bios->size = nv_rd08(bios, 0x300002) * 512; 179 bios->size = ((nv_rd32(bios, 0x300000) >> 16) & 0xff) * 512;
169 if (!bios->size) 180 if (!bios->size)
170 goto out; 181 goto out;
171 182
172 bios->data = kmalloc(bios->size, GFP_KERNEL); 183 bios->data = kmalloc(bios->size, GFP_KERNEL);
173 if (bios->data) { 184 if (bios->data) {
174 for (i = 0; i < bios->size; i++) 185 for (i = 0; i < bios->size; i+=4)
175 nv_wo08(bios, i, nv_rd08(bios, 0x300000 + i)); 186 nv_wo32(bios, i, nv_rd32(bios, 0x300000 + i));
187 }
188
189 /* check the PCI record header */
190 pcir = nv_ro16(bios, 0x0018);
191 if (bios->data[pcir + 0] != 'P' ||
192 bios->data[pcir + 1] != 'C' ||
193 bios->data[pcir + 2] != 'I' ||
194 bios->data[pcir + 3] != 'R') {
195 bios->size = 0;
196 kfree(bios->data);
176 } 197 }
177 198
178out: 199out:
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c b/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c
index 2d9b9d7a7992..88606bfaf847 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c
@@ -142,9 +142,36 @@ dcb_outp_parse(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len,
142 if (*ver >= 0x40) { 142 if (*ver >= 0x40) {
143 u32 conf = nv_ro32(bios, dcb + 0x04); 143 u32 conf = nv_ro32(bios, dcb + 0x04);
144 switch (outp->type) { 144 switch (outp->type) {
145 case DCB_OUTPUT_DP:
146 switch (conf & 0x00e00000) {
147 case 0x00000000:
148 outp->dpconf.link_bw = 0x06;
149 break;
150 case 0x00200000:
151 outp->dpconf.link_bw = 0x0a;
152 break;
153 case 0x00400000:
154 default:
155 outp->dpconf.link_bw = 0x14;
156 break;
157 }
158
159 switch (conf & 0x0f000000) {
160 case 0x0f000000:
161 outp->dpconf.link_nr = 4;
162 break;
163 case 0x03000000:
164 outp->dpconf.link_nr = 2;
165 break;
166 case 0x01000000:
167 default:
168 outp->dpconf.link_nr = 1;
169 break;
170 }
171
172 /* fall-through... */
145 case DCB_OUTPUT_TMDS: 173 case DCB_OUTPUT_TMDS:
146 case DCB_OUTPUT_LVDS: 174 case DCB_OUTPUT_LVDS:
147 case DCB_OUTPUT_DP:
148 outp->link = (conf & 0x00000030) >> 4; 175 outp->link = (conf & 0x00000030) >> 4;
149 outp->sorconf.link = outp->link; /*XXX*/ 176 outp->sorconf.link = outp->link; /*XXX*/
150 outp->extdev = 0x00; 177 outp->extdev = 0x00;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
index de201baeb053..acaeaf79e3f0 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
@@ -118,6 +118,8 @@ init_conn(struct nvbios_init *init)
118static inline u32 118static inline u32
119init_nvreg(struct nvbios_init *init, u32 reg) 119init_nvreg(struct nvbios_init *init, u32 reg)
120{ 120{
121 struct nouveau_devinit *devinit = nouveau_devinit(init->bios);
122
121 /* C51 (at least) sometimes has the lower bits set which the VBIOS 123 /* C51 (at least) sometimes has the lower bits set which the VBIOS
122 * interprets to mean that access needs to go through certain IO 124 * interprets to mean that access needs to go through certain IO
123 * ports instead. The NVIDIA binary driver has been seen to access 125 * ports instead. The NVIDIA binary driver has been seen to access
@@ -147,6 +149,9 @@ init_nvreg(struct nvbios_init *init, u32 reg)
147 149
148 if (reg & ~0x00fffffc) 150 if (reg & ~0x00fffffc)
149 warn("unknown bits in register 0x%08x\n", reg); 151 warn("unknown bits in register 0x%08x\n", reg);
152
153 if (devinit->mmio)
154 reg = devinit->mmio(devinit, reg);
150 return reg; 155 return reg;
151} 156}
152 157
@@ -154,7 +159,7 @@ static u32
154init_rd32(struct nvbios_init *init, u32 reg) 159init_rd32(struct nvbios_init *init, u32 reg)
155{ 160{
156 reg = init_nvreg(init, reg); 161 reg = init_nvreg(init, reg);
157 if (init_exec(init)) 162 if (reg != ~0 && init_exec(init))
158 return nv_rd32(init->subdev, reg); 163 return nv_rd32(init->subdev, reg);
159 return 0x00000000; 164 return 0x00000000;
160} 165}
@@ -163,7 +168,7 @@ static void
163init_wr32(struct nvbios_init *init, u32 reg, u32 val) 168init_wr32(struct nvbios_init *init, u32 reg, u32 val)
164{ 169{
165 reg = init_nvreg(init, reg); 170 reg = init_nvreg(init, reg);
166 if (init_exec(init)) 171 if (reg != ~0 && init_exec(init))
167 nv_wr32(init->subdev, reg, val); 172 nv_wr32(init->subdev, reg, val);
168} 173}
169 174
@@ -171,7 +176,7 @@ static u32
171init_mask(struct nvbios_init *init, u32 reg, u32 mask, u32 val) 176init_mask(struct nvbios_init *init, u32 reg, u32 mask, u32 val)
172{ 177{
173 reg = init_nvreg(init, reg); 178 reg = init_nvreg(init, reg);
174 if (init_exec(init)) { 179 if (reg != ~0 && init_exec(init)) {
175 u32 tmp = nv_rd32(init->subdev, reg); 180 u32 tmp = nv_rd32(init->subdev, reg);
176 nv_wr32(init->subdev, reg, (tmp & ~mask) | val); 181 nv_wr32(init->subdev, reg, (tmp & ~mask) | val);
177 return tmp; 182 return tmp;
@@ -410,7 +415,7 @@ init_ram_restrict(struct nvbios_init *init)
410 * in case *not* re-reading the strap causes similar breakage. 415 * in case *not* re-reading the strap causes similar breakage.
411 */ 416 */
412 if (!init->ramcfg || init->bios->version.major < 0x70) 417 if (!init->ramcfg || init->bios->version.major < 0x70)
413 init->ramcfg = 0x80000000 | nvbios_ramcfg_index(init->bios); 418 init->ramcfg = 0x80000000 | nvbios_ramcfg_index(init->subdev);
414 return (init->ramcfg & 0x7fffffff); 419 return (init->ramcfg & 0x7fffffff);
415} 420}
416 421
@@ -845,9 +850,8 @@ init_idx_addr_latched(struct nvbios_init *init)
845 u32 data = nv_ro32(bios, init->offset + 13); 850 u32 data = nv_ro32(bios, init->offset + 13);
846 u8 count = nv_ro08(bios, init->offset + 17); 851 u8 count = nv_ro08(bios, init->offset + 17);
847 852
848 trace("INDEX_ADDRESS_LATCHED\t" 853 trace("INDEX_ADDRESS_LATCHED\tR[0x%06x] : R[0x%06x]\n", creg, dreg);
849 "R[0x%06x] : R[0x%06x]\n\tCTRL &= 0x%08x |= 0x%08x\n", 854 trace("\tCTRL &= 0x%08x |= 0x%08x\n", mask, data);
850 creg, dreg, mask, data);
851 init->offset += 18; 855 init->offset += 18;
852 856
853 while (count--) { 857 while (count--) {
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/ramcfg.c b/drivers/gpu/drm/nouveau/core/subdev/bios/ramcfg.c
index 991aedda999b..6c401f70ab99 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/ramcfg.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/ramcfg.c
@@ -27,9 +27,9 @@
27#include <subdev/bios/ramcfg.h> 27#include <subdev/bios/ramcfg.h>
28 28
29static u8 29static u8
30nvbios_ramcfg_strap(struct nouveau_bios *bios) 30nvbios_ramcfg_strap(struct nouveau_subdev *subdev)
31{ 31{
32 return (nv_rd32(bios, 0x101000) & 0x0000003c) >> 2; 32 return (nv_rd32(subdev, 0x101000) & 0x0000003c) >> 2;
33} 33}
34 34
35u8 35u8
@@ -48,9 +48,10 @@ nvbios_ramcfg_count(struct nouveau_bios *bios)
48} 48}
49 49
50u8 50u8
51nvbios_ramcfg_index(struct nouveau_bios *bios) 51nvbios_ramcfg_index(struct nouveau_subdev *subdev)
52{ 52{
53 u8 strap = nvbios_ramcfg_strap(bios); 53 struct nouveau_bios *bios = nouveau_bios(subdev);
54 u8 strap = nvbios_ramcfg_strap(subdev);
54 u32 xlat = 0x00000000; 55 u32 xlat = 0x00000000;
55 struct bit_entry bit_M; 56 struct bit_entry bit_M;
56 57
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/therm.c b/drivers/gpu/drm/nouveau/core/subdev/bios/therm.c
index 22ac6dbd6c8f..d15854094078 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/therm.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/therm.c
@@ -164,6 +164,7 @@ nvbios_therm_fan_parse(struct nouveau_bios *bios,
164 164
165 i = 0; 165 i = 0;
166 fan->nr_fan_trip = 0; 166 fan->nr_fan_trip = 0;
167 fan->fan_mode = NVBIOS_THERM_FAN_OTHER;
167 while ((entry = nvbios_therm_entry(bios, i++, &ver, &len))) { 168 while ((entry = nvbios_therm_entry(bios, i++, &ver, &len))) {
168 s16 value = nv_ro16(bios, entry + 1); 169 s16 value = nv_ro16(bios, entry + 1);
169 170
@@ -174,6 +175,8 @@ nvbios_therm_fan_parse(struct nouveau_bios *bios,
174 break; 175 break;
175 case 0x24: 176 case 0x24:
176 fan->nr_fan_trip++; 177 fan->nr_fan_trip++;
178 if (fan->fan_mode > NVBIOS_THERM_FAN_TRIP)
179 fan->fan_mode = NVBIOS_THERM_FAN_TRIP;
177 cur_trip = &fan->trip[fan->nr_fan_trip - 1]; 180 cur_trip = &fan->trip[fan->nr_fan_trip - 1];
178 cur_trip->hysteresis = value & 0xf; 181 cur_trip->hysteresis = value & 0xf;
179 cur_trip->temp = (value & 0xff0) >> 4; 182 cur_trip->temp = (value & 0xff0) >> 4;
@@ -194,11 +197,19 @@ nvbios_therm_fan_parse(struct nouveau_bios *bios,
194 fan->slow_down_period = value; 197 fan->slow_down_period = value;
195 break; 198 break;
196 case 0x46: 199 case 0x46:
200 if (fan->fan_mode > NVBIOS_THERM_FAN_LINEAR)
201 fan->fan_mode = NVBIOS_THERM_FAN_LINEAR;
197 fan->linear_min_temp = nv_ro08(bios, entry + 1); 202 fan->linear_min_temp = nv_ro08(bios, entry + 1);
198 fan->linear_max_temp = nv_ro08(bios, entry + 2); 203 fan->linear_max_temp = nv_ro08(bios, entry + 2);
199 break; 204 break;
200 } 205 }
201 } 206 }
202 207
208 /* starting from fermi, fan management is always linear */
209 if (nv_device(bios)->card_type >= NV_C0 &&
210 fan->fan_mode == NVBIOS_THERM_FAN_OTHER) {
211 fan->fan_mode = NVBIOS_THERM_FAN_LINEAR;
212 }
213
203 return 0; 214 return 0;
204} 215}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/base.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/base.c
index 8fa34e8152c2..239acfe876c3 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/base.c
@@ -96,5 +96,6 @@ nouveau_devinit_create_(struct nouveau_object *parent,
96 devinit->post = nouveau_boolopt(device->cfgopt, "NvForcePost", false); 96 devinit->post = nouveau_boolopt(device->cfgopt, "NvForcePost", false);
97 devinit->meminit = impl->meminit; 97 devinit->meminit = impl->meminit;
98 devinit->pll_set = impl->pll_set; 98 devinit->pll_set = impl->pll_set;
99 devinit->mmio = impl->mmio;
99 return 0; 100 return 0;
100} 101}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/fbmem.h b/drivers/gpu/drm/nouveau/core/subdev/devinit/fbmem.h
index 6b56a0f4cb40..4fe49cf4c99a 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/fbmem.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/fbmem.h
@@ -24,6 +24,8 @@
24 * 24 *
25 */ 25 */
26 26
27#include <core/device.h>
28
27#define NV04_PFB_BOOT_0 0x00100000 29#define NV04_PFB_BOOT_0 0x00100000
28# define NV04_PFB_BOOT_0_RAM_AMOUNT 0x00000003 30# define NV04_PFB_BOOT_0_RAM_AMOUNT 0x00000003
29# define NV04_PFB_BOOT_0_RAM_AMOUNT_32MB 0x00000000 31# define NV04_PFB_BOOT_0_RAM_AMOUNT_32MB 0x00000000
@@ -60,10 +62,10 @@
60# define NV10_PFB_REFCTRL_VALID_1 (1 << 31) 62# define NV10_PFB_REFCTRL_VALID_1 (1 << 31)
61 63
62static inline struct io_mapping * 64static inline struct io_mapping *
63fbmem_init(struct pci_dev *pdev) 65fbmem_init(struct nouveau_device *dev)
64{ 66{
65 return io_mapping_create_wc(pci_resource_start(pdev, 1), 67 return io_mapping_create_wc(nv_device_resource_start(dev, 1),
66 pci_resource_len(pdev, 1)); 68 nv_device_resource_len(dev, 1));
67} 69}
68 70
69static inline void 71static inline void
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/gm107.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/gm107.c
new file mode 100644
index 000000000000..c69bc7f54e37
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/gm107.c
@@ -0,0 +1,56 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "nv50.h"
26
27static u64
28gm107_devinit_disable(struct nouveau_devinit *devinit)
29{
30 struct nv50_devinit_priv *priv = (void *)devinit;
31 u32 r021c00 = nv_rd32(priv, 0x021c00);
32 u32 r021c04 = nv_rd32(priv, 0x021c04);
33 u64 disable = 0ULL;
34
35 if (r021c00 & 0x00000001)
36 disable |= (1ULL << NVDEV_ENGINE_COPY0);
37 if (r021c00 & 0x00000004)
38 disable |= (1ULL << NVDEV_ENGINE_COPY2);
39 if (r021c04 & 0x00000001)
40 disable |= (1ULL << NVDEV_ENGINE_DISP);
41
42 return disable;
43}
44
45struct nouveau_oclass *
46gm107_devinit_oclass = &(struct nouveau_devinit_impl) {
47 .base.handle = NV_SUBDEV(DEVINIT, 0x07),
48 .base.ofuncs = &(struct nouveau_ofuncs) {
49 .ctor = nv50_devinit_ctor,
50 .dtor = _nouveau_devinit_dtor,
51 .init = nv50_devinit_init,
52 .fini = _nouveau_devinit_fini,
53 },
54 .pll_set = nvc0_devinit_pll_set,
55 .disable = gm107_devinit_disable,
56}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.c
index 7037eae46e44..052ad690b468 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.c
@@ -38,7 +38,7 @@ nv04_devinit_meminit(struct nouveau_devinit *devinit)
38 int i; 38 int i;
39 39
40 /* Map the framebuffer aperture */ 40 /* Map the framebuffer aperture */
41 fb = fbmem_init(nv_device(priv)->pdev); 41 fb = fbmem_init(nv_device(priv));
42 if (!fb) { 42 if (!fb) {
43 nv_error(priv, "failed to map fb\n"); 43 nv_error(priv, "failed to map fb\n");
44 return; 44 return;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv05.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv05.c
index 98b7e6780dc7..4a19c10e5178 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv05.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv05.c
@@ -53,7 +53,7 @@ nv05_devinit_meminit(struct nouveau_devinit *devinit)
53 int i, v; 53 int i, v;
54 54
55 /* Map the framebuffer aperture */ 55 /* Map the framebuffer aperture */
56 fb = fbmem_init(nv_device(priv)->pdev); 56 fb = fbmem_init(nv_device(priv));
57 if (!fb) { 57 if (!fb) {
58 nv_error(priv, "failed to map fb\n"); 58 nv_error(priv, "failed to map fb\n");
59 return; 59 return;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv10.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv10.c
index 32b3d2131a7f..3b8d657da279 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv10.c
@@ -46,7 +46,7 @@ nv10_devinit_meminit(struct nouveau_devinit *devinit)
46 mem_width_count = 2; 46 mem_width_count = 2;
47 47
48 /* Map the framebuffer aperture */ 48 /* Map the framebuffer aperture */
49 fb = fbmem_init(nv_device(priv)->pdev); 49 fb = fbmem_init(nv_device(priv));
50 if (!fb) { 50 if (!fb) {
51 nv_error(priv, "failed to map fb\n"); 51 nv_error(priv, "failed to map fb\n");
52 return; 52 return;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv20.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv20.c
index 4689ba303b0b..04bc9732644c 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv20.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv20.c
@@ -37,7 +37,7 @@ nv20_devinit_meminit(struct nouveau_devinit *devinit)
37 struct io_mapping *fb; 37 struct io_mapping *fb;
38 38
39 /* Map the framebuffer aperture */ 39 /* Map the framebuffer aperture */
40 fb = fbmem_init(nv_device(priv)->pdev); 40 fb = fbmem_init(nv_device(priv));
41 if (!fb) { 41 if (!fb) {
42 nv_error(priv, "failed to map fb\n"); 42 nv_error(priv, "failed to map fb\n");
43 return; 43 return;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.h b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.h
index 141c27e9f182..51d5076333ec 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.h
@@ -5,6 +5,7 @@
5 5
6struct nv50_devinit_priv { 6struct nv50_devinit_priv {
7 struct nouveau_devinit base; 7 struct nouveau_devinit base;
8 u32 r001540;
8}; 9};
9 10
10int nv50_devinit_ctor(struct nouveau_object *, struct nouveau_object *, 11int nv50_devinit_ctor(struct nouveau_object *, struct nouveau_object *,
@@ -15,4 +16,6 @@ int nv50_devinit_pll_set(struct nouveau_devinit *, u32, u32);
15 16
16int nva3_devinit_pll_set(struct nouveau_devinit *, u32, u32); 17int nva3_devinit_pll_set(struct nouveau_devinit *, u32, u32);
17 18
19int nvc0_devinit_pll_set(struct nouveau_devinit *, u32, u32);
20
18#endif 21#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nva3.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nva3.c
index 6dedf1dad7f7..006cf348bda7 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nva3.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nva3.c
@@ -81,6 +81,55 @@ nva3_devinit_disable(struct nouveau_devinit *devinit)
81 return disable; 81 return disable;
82} 82}
83 83
84static u32
85nva3_devinit_mmio_part[] = {
86 0x100720, 0x1008bc, 4,
87 0x100a20, 0x100adc, 4,
88 0x100d80, 0x100ddc, 4,
89 0x110000, 0x110f9c, 4,
90 0x111000, 0x11103c, 8,
91 0x111080, 0x1110fc, 4,
92 0x111120, 0x1111fc, 4,
93 0x111300, 0x1114bc, 4,
94 0,
95};
96
97static u32
98nva3_devinit_mmio(struct nouveau_devinit *devinit, u32 addr)
99{
100 struct nv50_devinit_priv *priv = (void *)devinit;
101 u32 *mmio = nva3_devinit_mmio_part;
102
103 /* the init tables on some boards have INIT_RAM_RESTRICT_ZM_REG_GROUP
104 * instructions which touch registers that may not even exist on
105 * some configurations (Quadro 400), which causes the register
106 * interface to screw up for some amount of time after attempting to
107 * write to one of these, and results in all sorts of things going
108 * horribly wrong.
109 *
110 * the binary driver avoids touching these registers at all, however,
111 * the video bios doesn't care and does what the scripts say. it's
112 * presumed that the io-port access to priv registers isn't effected
113 * by the screw-up bug mentioned above.
114 *
115 * really, a new opcode should've been invented to handle these
116 * requirements, but whatever, it's too late for that now.
117 */
118 while (mmio[0]) {
119 if (addr >= mmio[0] && addr <= mmio[1]) {
120 u32 part = (addr / mmio[2]) & 7;
121 if (!priv->r001540)
122 priv->r001540 = nv_rd32(priv, 0x001540);
123 if (part >= hweight8((priv->r001540 >> 16) & 0xff))
124 return ~0;
125 return addr;
126 }
127 mmio += 3;
128 }
129
130 return addr;
131}
132
84struct nouveau_oclass * 133struct nouveau_oclass *
85nva3_devinit_oclass = &(struct nouveau_devinit_impl) { 134nva3_devinit_oclass = &(struct nouveau_devinit_impl) {
86 .base.handle = NV_SUBDEV(DEVINIT, 0xa3), 135 .base.handle = NV_SUBDEV(DEVINIT, 0xa3),
@@ -92,4 +141,5 @@ nva3_devinit_oclass = &(struct nouveau_devinit_impl) {
92 }, 141 },
93 .pll_set = nva3_devinit_pll_set, 142 .pll_set = nva3_devinit_pll_set,
94 .disable = nva3_devinit_disable, 143 .disable = nva3_devinit_disable,
144 .mmio = nva3_devinit_mmio,
95}.base; 145}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nvc0.c
index fa7e63766b1b..30c765747eea 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nvc0.c
@@ -24,7 +24,7 @@
24 24
25#include "nv50.h" 25#include "nv50.h"
26 26
27static int 27int
28nvc0_devinit_pll_set(struct nouveau_devinit *devinit, u32 type, u32 freq) 28nvc0_devinit_pll_set(struct nouveau_devinit *devinit, u32 type, u32 freq)
29{ 29{
30 struct nv50_devinit_priv *priv = (void *)devinit; 30 struct nv50_devinit_priv *priv = (void *)devinit;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/priv.h b/drivers/gpu/drm/nouveau/core/subdev/devinit/priv.h
index 822a2fbf44a5..f0e8683ad840 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/priv.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/priv.h
@@ -11,6 +11,7 @@ struct nouveau_devinit_impl {
11 void (*meminit)(struct nouveau_devinit *); 11 void (*meminit)(struct nouveau_devinit *);
12 int (*pll_set)(struct nouveau_devinit *, u32 type, u32 freq); 12 int (*pll_set)(struct nouveau_devinit *, u32 type, u32 freq);
13 u64 (*disable)(struct nouveau_devinit *); 13 u64 (*disable)(struct nouveau_devinit *);
14 u32 (*mmio)(struct nouveau_devinit *, u32);
14}; 15};
15 16
16#define nouveau_devinit_create(p,e,o,d) \ 17#define nouveau_devinit_create(p,e,o,d) \
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/gm107.c b/drivers/gpu/drm/nouveau/core/subdev/fb/gm107.c
new file mode 100644
index 000000000000..c4840aedc2dc
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/gm107.c
@@ -0,0 +1,38 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "nvc0.h"
26
27struct nouveau_oclass *
28gm107_fb_oclass = &(struct nouveau_fb_impl) {
29 .base.handle = NV_SUBDEV(FB, 0x07),
30 .base.ofuncs = &(struct nouveau_ofuncs) {
31 .ctor = nvc0_fb_ctor,
32 .dtor = nvc0_fb_dtor,
33 .init = nvc0_fb_init,
34 .fini = _nouveau_fb_fini,
35 },
36 .memtype = nvc0_fb_memtype_valid,
37 .ram = &gm107_ram_oclass,
38}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c
index cbc7f00c1278..1fc55c1e91a1 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c
@@ -250,10 +250,8 @@ nv50_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
250 250
251 priv->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO); 251 priv->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
252 if (priv->r100c08_page) { 252 if (priv->r100c08_page) {
253 priv->r100c08 = pci_map_page(device->pdev, priv->r100c08_page, 253 priv->r100c08 = nv_device_map_page(device, priv->r100c08_page);
254 0, PAGE_SIZE, 254 if (!priv->r100c08)
255 PCI_DMA_BIDIRECTIONAL);
256 if (pci_dma_mapping_error(device->pdev, priv->r100c08))
257 nv_warn(priv, "failed 0x100c08 page map\n"); 255 nv_warn(priv, "failed 0x100c08 page map\n");
258 } else { 256 } else {
259 nv_warn(priv, "failed 0x100c08 page alloc\n"); 257 nv_warn(priv, "failed 0x100c08 page alloc\n");
@@ -270,8 +268,7 @@ nv50_fb_dtor(struct nouveau_object *object)
270 struct nv50_fb_priv *priv = (void *)object; 268 struct nv50_fb_priv *priv = (void *)object;
271 269
272 if (priv->r100c08_page) { 270 if (priv->r100c08_page) {
273 pci_unmap_page(device->pdev, priv->r100c08, PAGE_SIZE, 271 nv_device_unmap_page(device, priv->r100c08);
274 PCI_DMA_BIDIRECTIONAL);
275 __free_page(priv->r100c08_page); 272 __free_page(priv->r100c08_page);
276 } 273 }
277 274
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
index 45470e1f0385..0670ae33ee45 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
@@ -70,8 +70,7 @@ nvc0_fb_dtor(struct nouveau_object *object)
70 struct nvc0_fb_priv *priv = (void *)object; 70 struct nvc0_fb_priv *priv = (void *)object;
71 71
72 if (priv->r100c10_page) { 72 if (priv->r100c10_page) {
73 pci_unmap_page(device->pdev, priv->r100c10, PAGE_SIZE, 73 nv_device_unmap_page(device, priv->r100c10);
74 PCI_DMA_BIDIRECTIONAL);
75 __free_page(priv->r100c10_page); 74 __free_page(priv->r100c10_page);
76 } 75 }
77 76
@@ -94,10 +93,8 @@ nvc0_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
94 93
95 priv->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO); 94 priv->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
96 if (priv->r100c10_page) { 95 if (priv->r100c10_page) {
97 priv->r100c10 = pci_map_page(device->pdev, priv->r100c10_page, 96 priv->r100c10 = nv_device_map_page(device, priv->r100c10_page);
98 0, PAGE_SIZE, 97 if (!priv->r100c10)
99 PCI_DMA_BIDIRECTIONAL);
100 if (pci_dma_mapping_error(device->pdev, priv->r100c10))
101 return -EFAULT; 98 return -EFAULT;
102 } 99 }
103 100
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.h b/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.h
index 9e1931eb746f..705a06d755ad 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.h
@@ -18,12 +18,14 @@ int nvc0_fb_init(struct nouveau_object *);
18bool nvc0_fb_memtype_valid(struct nouveau_fb *, u32); 18bool nvc0_fb_memtype_valid(struct nouveau_fb *, u32);
19 19
20 20
21#define nvc0_ram_create(p,e,o,d) \ 21#define nvc0_ram_create(p,e,o,m,d) \
22 nvc0_ram_create_((p), (e), (o), sizeof(**d), (void **)d) 22 nvc0_ram_create_((p), (e), (o), (m), sizeof(**d), (void **)d)
23int nvc0_ram_create_(struct nouveau_object *, struct nouveau_object *, 23int nvc0_ram_create_(struct nouveau_object *, struct nouveau_object *,
24 struct nouveau_oclass *, int, void **); 24 struct nouveau_oclass *, u32, int, void **);
25int nvc0_ram_get(struct nouveau_fb *, u64, u32, u32, u32, 25int nvc0_ram_get(struct nouveau_fb *, u64, u32, u32, u32,
26 struct nouveau_mem **); 26 struct nouveau_mem **);
27void nvc0_ram_put(struct nouveau_fb *, struct nouveau_mem **); 27void nvc0_ram_put(struct nouveau_fb *, struct nouveau_mem **);
28 28
29int nve0_ram_init(struct nouveau_object*);
30
29#endif 31#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h b/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h
index edaf95dee612..da74c889aed4 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h
@@ -32,6 +32,7 @@ extern struct nouveau_oclass nva3_ram_oclass;
32extern struct nouveau_oclass nvaa_ram_oclass; 32extern struct nouveau_oclass nvaa_ram_oclass;
33extern struct nouveau_oclass nvc0_ram_oclass; 33extern struct nouveau_oclass nvc0_ram_oclass;
34extern struct nouveau_oclass nve0_ram_oclass; 34extern struct nouveau_oclass nve0_ram_oclass;
35extern struct nouveau_oclass gm107_ram_oclass;
35 36
36int nouveau_sddr3_calc(struct nouveau_ram *ram); 37int nouveau_sddr3_calc(struct nouveau_ram *ram);
37int nouveau_gddr5_calc(struct nouveau_ram *ram, bool nuts); 38int nouveau_gddr5_calc(struct nouveau_ram *ram, bool nuts);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramgm107.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramgm107.c
new file mode 100644
index 000000000000..4c6363595c79
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramgm107.c
@@ -0,0 +1,56 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "nvc0.h"
26
27struct gm107_ram {
28 struct nouveau_ram base;
29};
30
31static int
32gm107_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
33 struct nouveau_oclass *oclass, void *data, u32 size,
34 struct nouveau_object **pobject)
35{
36 struct gm107_ram *ram;
37 int ret;
38
39 ret = nvc0_ram_create(parent, engine, oclass, 0x021c14, &ram);
40 *pobject = nv_object(ram);
41 if (ret)
42 return ret;
43
44 return 0;
45}
46
47struct nouveau_oclass
48gm107_ram_oclass = {
49 .handle = 0,
50 .ofuncs = &(struct nouveau_ofuncs) {
51 .ctor = gm107_ram_ctor,
52 .dtor = _nouveau_ram_dtor,
53 .init = nve0_ram_init,
54 .fini = _nouveau_ram_fini,
55 }
56};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c
index c7fdb3a9e88b..ef91b6e893af 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c
@@ -91,7 +91,7 @@ nv50_ram_calc(struct nouveau_fb *pfb, u32 freq)
91 } while (perfE.memory < freq); 91 } while (perfE.memory < freq);
92 92
93 /* locate specific data set for the attached memory */ 93 /* locate specific data set for the attached memory */
94 strap = nvbios_ramcfg_index(bios); 94 strap = nvbios_ramcfg_index(nv_subdev(pfb));
95 if (strap >= cnt) { 95 if (strap >= cnt) {
96 nv_error(pfb, "invalid ramcfg strap\n"); 96 nv_error(pfb, "invalid ramcfg strap\n");
97 return -EINVAL; 97 return -EINVAL;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnva3.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnva3.c
index f4ae8aa46a25..6eb97f16fbda 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnva3.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnva3.c
@@ -98,7 +98,7 @@ nva3_ram_calc(struct nouveau_fb *pfb, u32 freq)
98 } 98 }
99 99
100 /* locate specific data set for the attached memory */ 100 /* locate specific data set for the attached memory */
101 strap = nvbios_ramcfg_index(bios); 101 strap = nvbios_ramcfg_index(nv_subdev(pfb));
102 if (strap >= cnt) { 102 if (strap >= cnt) {
103 nv_error(pfb, "invalid ramcfg strap\n"); 103 nv_error(pfb, "invalid ramcfg strap\n");
104 return -EINVAL; 104 return -EINVAL;
@@ -335,21 +335,23 @@ nva3_ram_init(struct nouveau_object *object)
335 /* prepare for ddr link training, and load training patterns */ 335 /* prepare for ddr link training, and load training patterns */
336 switch (ram->base.type) { 336 switch (ram->base.type) {
337 case NV_MEM_TYPE_DDR3: { 337 case NV_MEM_TYPE_DDR3: {
338 static const u32 pattern[16] = { 338 if (nv_device(pfb)->chipset == 0xa8) {
339 0xaaaaaaaa, 0xcccccccc, 0xdddddddd, 0xeeeeeeee, 339 static const u32 pattern[16] = {
340 0x00000000, 0x11111111, 0x44444444, 0xdddddddd, 340 0xaaaaaaaa, 0xcccccccc, 0xdddddddd, 0xeeeeeeee,
341 0x33333333, 0x55555555, 0x77777777, 0x66666666, 341 0x00000000, 0x11111111, 0x44444444, 0xdddddddd,
342 0x99999999, 0x88888888, 0xeeeeeeee, 0xbbbbbbbb, 342 0x33333333, 0x55555555, 0x77777777, 0x66666666,
343 }; 343 0x99999999, 0x88888888, 0xeeeeeeee, 0xbbbbbbbb,
344 344 };
345 nv_wr32(pfb, 0x100538, 0x10001ff6); /*XXX*/ 345
346 nv_wr32(pfb, 0x1005a8, 0x0000ffff); 346 nv_wr32(pfb, 0x100538, 0x10001ff6); /*XXX*/
347 nv_mask(pfb, 0x10f800, 0x00000001, 0x00000001); 347 nv_wr32(pfb, 0x1005a8, 0x0000ffff);
348 for (i = 0; i < 0x30; i++) { 348 nv_mask(pfb, 0x10f800, 0x00000001, 0x00000001);
349 nv_wr32(pfb, 0x10f8c0, (i << 8) | i); 349 for (i = 0; i < 0x30; i++) {
350 nv_wr32(pfb, 0x10f8e0, (i << 8) | i); 350 nv_wr32(pfb, 0x10f8c0, (i << 8) | i);
351 nv_wr32(pfb, 0x10f900, pattern[i % 16]); 351 nv_wr32(pfb, 0x10f8e0, (i << 8) | i);
352 nv_wr32(pfb, 0x10f920, pattern[i % 16]); 352 nv_wr32(pfb, 0x10f900, pattern[i % 16]);
353 nv_wr32(pfb, 0x10f920, pattern[i % 16]);
354 }
353 } 355 }
354 } 356 }
355 break; 357 break;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c
index 0391b824ee76..8edc92224c84 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c
@@ -152,7 +152,7 @@ nvc0_ram_calc(struct nouveau_fb *pfb, u32 freq)
152 } 152 }
153 153
154 /* locate specific data set for the attached memory */ 154 /* locate specific data set for the attached memory */
155 strap = nvbios_ramcfg_index(bios); 155 strap = nvbios_ramcfg_index(nv_subdev(pfb));
156 if (strap >= cnt) { 156 if (strap >= cnt) {
157 nv_error(pfb, "invalid ramcfg strap\n"); 157 nv_error(pfb, "invalid ramcfg strap\n");
158 return -EINVAL; 158 return -EINVAL;
@@ -505,7 +505,8 @@ nvc0_ram_get(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin,
505 505
506int 506int
507nvc0_ram_create_(struct nouveau_object *parent, struct nouveau_object *engine, 507nvc0_ram_create_(struct nouveau_object *parent, struct nouveau_object *engine,
508 struct nouveau_oclass *oclass, int size, void **pobject) 508 struct nouveau_oclass *oclass, u32 maskaddr, int size,
509 void **pobject)
509{ 510{
510 struct nouveau_fb *pfb = nouveau_fb(parent); 511 struct nouveau_fb *pfb = nouveau_fb(parent);
511 struct nouveau_bios *bios = nouveau_bios(pfb); 512 struct nouveau_bios *bios = nouveau_bios(pfb);
@@ -513,7 +514,7 @@ nvc0_ram_create_(struct nouveau_object *parent, struct nouveau_object *engine,
513 const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */ 514 const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
514 const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */ 515 const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
515 u32 parts = nv_rd32(pfb, 0x022438); 516 u32 parts = nv_rd32(pfb, 0x022438);
516 u32 pmask = nv_rd32(pfb, 0x022554); 517 u32 pmask = nv_rd32(pfb, maskaddr);
517 u32 bsize = nv_rd32(pfb, 0x10f20c); 518 u32 bsize = nv_rd32(pfb, 0x10f20c);
518 u32 offset, length; 519 u32 offset, length;
519 bool uniform = true; 520 bool uniform = true;
@@ -630,7 +631,7 @@ nvc0_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
630 struct nvc0_ram *ram; 631 struct nvc0_ram *ram;
631 int ret; 632 int ret;
632 633
633 ret = nvc0_ram_create(parent, engine, oclass, &ram); 634 ret = nvc0_ram_create(parent, engine, oclass, 0x022554, &ram);
634 *pobject = nv_object(ram); 635 *pobject = nv_object(ram);
635 if (ret) 636 if (ret)
636 return ret; 637 return ret;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c
index 3257c522a021..16752192cf87 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c
@@ -950,10 +950,11 @@ nve0_ram_calc_data(struct nouveau_fb *pfb, u32 freq,
950 } 950 }
951 951
952 /* locate specific data set for the attached memory */ 952 /* locate specific data set for the attached memory */
953 strap = nvbios_ramcfg_index(nv_subdev(pfb));
953 ram->base.ramcfg.data = nvbios_rammapSp(bios, ram->base.rammap.data, 954 ram->base.ramcfg.data = nvbios_rammapSp(bios, ram->base.rammap.data,
954 ram->base.rammap.version, 955 ram->base.rammap.version,
955 ram->base.rammap.size, cnt, len, 956 ram->base.rammap.size,
956 nvbios_ramcfg_index(bios), 957 cnt, len, strap,
957 &ram->base.ramcfg.version, 958 &ram->base.ramcfg.version,
958 &ram->base.ramcfg.size, 959 &ram->base.ramcfg.size,
959 &data->bios); 960 &data->bios);
@@ -1123,7 +1124,7 @@ nve0_ram_tidy(struct nouveau_fb *pfb)
1123 ram_exec(fuc, false); 1124 ram_exec(fuc, false);
1124} 1125}
1125 1126
1126static int 1127int
1127nve0_ram_init(struct nouveau_object *object) 1128nve0_ram_init(struct nouveau_object *object)
1128{ 1129{
1129 struct nouveau_fb *pfb = (void *)object->parent; 1130 struct nouveau_fb *pfb = (void *)object->parent;
@@ -1226,7 +1227,7 @@ nve0_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
1226 int ret, i; 1227 int ret, i;
1227 u32 tmp; 1228 u32 tmp;
1228 1229
1229 ret = nvc0_ram_create(parent, engine, oclass, &ram); 1230 ret = nvc0_ram_create(parent, engine, oclass, 0x022554, &ram);
1230 *pobject = nv_object(ram); 1231 *pobject = nv_object(ram);
1231 if (ret) 1232 if (ret)
1232 return ret; 1233 return ret;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c
index c4c1d415e7fe..2ef774731629 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c
@@ -46,7 +46,8 @@ nv50_gpio_reset(struct nouveau_gpio *gpio, u8 match)
46 u8 unk0 = !!(data & 0x02000000); 46 u8 unk0 = !!(data & 0x02000000);
47 u8 unk1 = !!(data & 0x04000000); 47 u8 unk1 = !!(data & 0x04000000);
48 u32 val = (unk1 << 16) | unk0; 48 u32 val = (unk1 << 16) | unk0;
49 u32 reg = regs[line >> 4]; line &= 0x0f; 49 u32 reg = regs[line >> 4];
50 u32 lsh = line & 0x0f;
50 51
51 if ( func == DCB_GPIO_UNUSED || 52 if ( func == DCB_GPIO_UNUSED ||
52 (match != DCB_GPIO_UNUSED && match != func)) 53 (match != DCB_GPIO_UNUSED && match != func))
@@ -54,7 +55,7 @@ nv50_gpio_reset(struct nouveau_gpio *gpio, u8 match)
54 55
55 gpio->set(gpio, 0, func, line, defs); 56 gpio->set(gpio, 0, func, line, defs);
56 57
57 nv_mask(priv, reg, 0x00010001 << line, val << line); 58 nv_mask(priv, reg, 0x00010001 << lsh, val << lsh);
58 } 59 }
59} 60}
60 61
@@ -79,7 +80,7 @@ nv50_gpio_drive(struct nouveau_gpio *gpio, int line, int dir, int out)
79 if (nv50_gpio_location(line, &reg, &shift)) 80 if (nv50_gpio_location(line, &reg, &shift))
80 return -EINVAL; 81 return -EINVAL;
81 82
82 nv_mask(gpio, reg, 7 << shift, (((dir ^ 1) << 1) | out) << shift); 83 nv_mask(gpio, reg, 3 << shift, (((dir ^ 1) << 1) | out) << shift);
83 return 0; 84 return 0;
84} 85}
85 86
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
index c33c03d2f4af..378e05b88e6f 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
@@ -111,7 +111,7 @@ nouveau_i2c_port_create_(struct nouveau_object *parent,
111 snprintf(port->adapter.name, sizeof(port->adapter.name), 111 snprintf(port->adapter.name, sizeof(port->adapter.name),
112 "nouveau-%s-%d", device->name, index); 112 "nouveau-%s-%d", device->name, index);
113 port->adapter.owner = THIS_MODULE; 113 port->adapter.owner = THIS_MODULE;
114 port->adapter.dev.parent = &device->pdev->dev; 114 port->adapter.dev.parent = nv_device_base(device);
115 port->index = index; 115 port->index = index;
116 port->func = func; 116 port->func = func;
117 i2c_set_adapdata(&port->adapter, i2c); 117 i2c_set_adapdata(&port->adapter, i2c);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c
index ec0b9661d614..8803809f9fc5 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c
@@ -50,7 +50,6 @@ nv40_instmem_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
50 struct nouveau_object **pobject) 50 struct nouveau_object **pobject)
51{ 51{
52 struct nouveau_device *device = nv_device(parent); 52 struct nouveau_device *device = nv_device(parent);
53 struct pci_dev *pdev = device->pdev;
54 struct nv04_instmem_priv *priv; 53 struct nv04_instmem_priv *priv;
55 int ret, bar, vs; 54 int ret, bar, vs;
56 55
@@ -60,13 +59,13 @@ nv40_instmem_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
60 return ret; 59 return ret;
61 60
62 /* map bar */ 61 /* map bar */
63 if (pci_resource_len(pdev, 2)) 62 if (nv_device_resource_len(device, 2))
64 bar = 2; 63 bar = 2;
65 else 64 else
66 bar = 3; 65 bar = 3;
67 66
68 priv->iomem = ioremap(pci_resource_start(pdev, bar), 67 priv->iomem = ioremap(nv_device_resource_start(device, bar),
69 pci_resource_len(pdev, bar)); 68 nv_device_resource_len(device, bar));
70 if (!priv->iomem) { 69 if (!priv->iomem) {
71 nv_error(priv, "unable to map PRAMIN BAR\n"); 70 nv_error(priv, "unable to map PRAMIN BAR\n");
72 return -EFAULT; 71 return -EFAULT;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/ltcg/gf100.c
index cce65cc56514..f2f3338a967a 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/ltcg/gf100.c
@@ -22,44 +22,35 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <subdev/ltcg.h>
26#include <subdev/fb.h> 25#include <subdev/fb.h>
27#include <subdev/timer.h> 26#include <subdev/timer.h>
28 27
29struct nvc0_ltcg_priv { 28#include "gf100.h"
30 struct nouveau_ltcg base;
31 u32 part_nr;
32 u32 subp_nr;
33 u32 num_tags;
34 u32 tag_base;
35 struct nouveau_mm tags;
36 struct nouveau_mm_node *tag_ram;
37};
38 29
39static void 30static void
40nvc0_ltcg_subp_isr(struct nvc0_ltcg_priv *priv, int unit, int subp) 31gf100_ltcg_lts_isr(struct gf100_ltcg_priv *priv, int ltc, int lts)
41{ 32{
42 u32 subp_base = 0x141000 + (unit * 0x2000) + (subp * 0x400); 33 u32 base = 0x141000 + (ltc * 0x2000) + (lts * 0x400);
43 u32 stat = nv_rd32(priv, subp_base + 0x020); 34 u32 stat = nv_rd32(priv, base + 0x020);
44 35
45 if (stat) { 36 if (stat) {
46 nv_info(priv, "LTC%d_LTS%d: 0x%08x\n", unit, subp, stat); 37 nv_info(priv, "LTC%d_LTS%d: 0x%08x\n", ltc, lts, stat);
47 nv_wr32(priv, subp_base + 0x020, stat); 38 nv_wr32(priv, base + 0x020, stat);
48 } 39 }
49} 40}
50 41
51static void 42static void
52nvc0_ltcg_intr(struct nouveau_subdev *subdev) 43gf100_ltcg_intr(struct nouveau_subdev *subdev)
53{ 44{
54 struct nvc0_ltcg_priv *priv = (void *)subdev; 45 struct gf100_ltcg_priv *priv = (void *)subdev;
55 u32 units; 46 u32 mask;
56 47
57 units = nv_rd32(priv, 0x00017c); 48 mask = nv_rd32(priv, 0x00017c);
58 while (units) { 49 while (mask) {
59 u32 subp, unit = ffs(units) - 1; 50 u32 lts, ltc = __ffs(mask);
60 for (subp = 0; subp < priv->subp_nr; subp++) 51 for (lts = 0; lts < priv->lts_nr; lts++)
61 nvc0_ltcg_subp_isr(priv, unit, subp); 52 gf100_ltcg_lts_isr(priv, ltc, lts);
62 units &= ~(1 << unit); 53 mask &= ~(1 << ltc);
63 } 54 }
64 55
65 /* we do something horribly wrong and upset PMFB a lot, so mask off 56 /* we do something horribly wrong and upset PMFB a lot, so mask off
@@ -68,11 +59,11 @@ nvc0_ltcg_intr(struct nouveau_subdev *subdev)
68 nv_mask(priv, 0x000640, 0x02000000, 0x00000000); 59 nv_mask(priv, 0x000640, 0x02000000, 0x00000000);
69} 60}
70 61
71static int 62int
72nvc0_ltcg_tags_alloc(struct nouveau_ltcg *ltcg, u32 n, 63gf100_ltcg_tags_alloc(struct nouveau_ltcg *ltcg, u32 n,
73 struct nouveau_mm_node **pnode) 64 struct nouveau_mm_node **pnode)
74{ 65{
75 struct nvc0_ltcg_priv *priv = (struct nvc0_ltcg_priv *)ltcg; 66 struct gf100_ltcg_priv *priv = (struct gf100_ltcg_priv *)ltcg;
76 int ret; 67 int ret;
77 68
78 ret = nouveau_mm_head(&priv->tags, 1, n, n, 1, pnode); 69 ret = nouveau_mm_head(&priv->tags, 1, n, n, 1, pnode);
@@ -82,18 +73,18 @@ nvc0_ltcg_tags_alloc(struct nouveau_ltcg *ltcg, u32 n,
82 return ret; 73 return ret;
83} 74}
84 75
85static void 76void
86nvc0_ltcg_tags_free(struct nouveau_ltcg *ltcg, struct nouveau_mm_node **pnode) 77gf100_ltcg_tags_free(struct nouveau_ltcg *ltcg, struct nouveau_mm_node **pnode)
87{ 78{
88 struct nvc0_ltcg_priv *priv = (struct nvc0_ltcg_priv *)ltcg; 79 struct gf100_ltcg_priv *priv = (struct gf100_ltcg_priv *)ltcg;
89 80
90 nouveau_mm_free(&priv->tags, pnode); 81 nouveau_mm_free(&priv->tags, pnode);
91} 82}
92 83
93static void 84static void
94nvc0_ltcg_tags_clear(struct nouveau_ltcg *ltcg, u32 first, u32 count) 85gf100_ltcg_tags_clear(struct nouveau_ltcg *ltcg, u32 first, u32 count)
95{ 86{
96 struct nvc0_ltcg_priv *priv = (struct nvc0_ltcg_priv *)ltcg; 87 struct gf100_ltcg_priv *priv = (struct gf100_ltcg_priv *)ltcg;
97 u32 last = first + count - 1; 88 u32 last = first + count - 1;
98 int p, i; 89 int p, i;
99 90
@@ -104,16 +95,16 @@ nvc0_ltcg_tags_clear(struct nouveau_ltcg *ltcg, u32 first, u32 count)
104 nv_wr32(priv, 0x17e8c8, 0x4); /* trigger clear */ 95 nv_wr32(priv, 0x17e8c8, 0x4); /* trigger clear */
105 96
106 /* wait until it's finished with clearing */ 97 /* wait until it's finished with clearing */
107 for (p = 0; p < priv->part_nr; ++p) { 98 for (p = 0; p < priv->ltc_nr; ++p) {
108 for (i = 0; i < priv->subp_nr; ++i) 99 for (i = 0; i < priv->lts_nr; ++i)
109 nv_wait(priv, 0x1410c8 + p * 0x2000 + i * 0x400, ~0, 0); 100 nv_wait(priv, 0x1410c8 + p * 0x2000 + i * 0x400, ~0, 0);
110 } 101 }
111} 102}
112 103
113/* TODO: Figure out tag memory details and drop the over-cautious allocation. 104/* TODO: Figure out tag memory details and drop the over-cautious allocation.
114 */ 105 */
115static int 106int
116nvc0_ltcg_init_tag_ram(struct nouveau_fb *pfb, struct nvc0_ltcg_priv *priv) 107gf100_ltcg_init_tag_ram(struct nouveau_fb *pfb, struct gf100_ltcg_priv *priv)
117{ 108{
118 u32 tag_size, tag_margin, tag_align; 109 u32 tag_size, tag_margin, tag_align;
119 int ret; 110 int ret;
@@ -124,7 +115,7 @@ nvc0_ltcg_init_tag_ram(struct nouveau_fb *pfb, struct nvc0_ltcg_priv *priv)
124 priv->num_tags = 1 << 17; /* we have 17 bits in PTE */ 115 priv->num_tags = 1 << 17; /* we have 17 bits in PTE */
125 priv->num_tags = (priv->num_tags + 63) & ~63; /* round up to 64 */ 116 priv->num_tags = (priv->num_tags + 63) & ~63; /* round up to 64 */
126 117
127 tag_align = priv->part_nr * 0x800; 118 tag_align = priv->ltc_nr * 0x800;
128 tag_margin = (tag_align < 0x6000) ? 0x6000 : tag_align; 119 tag_margin = (tag_align < 0x6000) ? 0x6000 : tag_align;
129 120
130 /* 4 part 4 sub: 0x2000 bytes for 56 tags */ 121 /* 4 part 4 sub: 0x2000 bytes for 56 tags */
@@ -157,11 +148,11 @@ nvc0_ltcg_init_tag_ram(struct nouveau_fb *pfb, struct nvc0_ltcg_priv *priv)
157} 148}
158 149
159static int 150static int
160nvc0_ltcg_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 151gf100_ltcg_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
161 struct nouveau_oclass *oclass, void *data, u32 size, 152 struct nouveau_oclass *oclass, void *data, u32 size,
162 struct nouveau_object **pobject) 153 struct nouveau_object **pobject)
163{ 154{
164 struct nvc0_ltcg_priv *priv; 155 struct gf100_ltcg_priv *priv;
165 struct nouveau_fb *pfb = nouveau_fb(parent); 156 struct nouveau_fb *pfb = nouveau_fb(parent);
166 u32 parts, mask; 157 u32 parts, mask;
167 int ret, i; 158 int ret, i;
@@ -175,27 +166,27 @@ nvc0_ltcg_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
175 mask = nv_rd32(priv, 0x022554); 166 mask = nv_rd32(priv, 0x022554);
176 for (i = 0; i < parts; i++) { 167 for (i = 0; i < parts; i++) {
177 if (!(mask & (1 << i))) 168 if (!(mask & (1 << i)))
178 priv->part_nr++; 169 priv->ltc_nr++;
179 } 170 }
180 priv->subp_nr = nv_rd32(priv, 0x17e8dc) >> 28; 171 priv->lts_nr = nv_rd32(priv, 0x17e8dc) >> 28;
181 172
182 ret = nvc0_ltcg_init_tag_ram(pfb, priv); 173 ret = gf100_ltcg_init_tag_ram(pfb, priv);
183 if (ret) 174 if (ret)
184 return ret; 175 return ret;
185 176
186 priv->base.tags_alloc = nvc0_ltcg_tags_alloc; 177 priv->base.tags_alloc = gf100_ltcg_tags_alloc;
187 priv->base.tags_free = nvc0_ltcg_tags_free; 178 priv->base.tags_free = gf100_ltcg_tags_free;
188 priv->base.tags_clear = nvc0_ltcg_tags_clear; 179 priv->base.tags_clear = gf100_ltcg_tags_clear;
189 180
190 nv_subdev(priv)->intr = nvc0_ltcg_intr; 181 nv_subdev(priv)->intr = gf100_ltcg_intr;
191 return 0; 182 return 0;
192} 183}
193 184
194static void 185void
195nvc0_ltcg_dtor(struct nouveau_object *object) 186gf100_ltcg_dtor(struct nouveau_object *object)
196{ 187{
197 struct nouveau_ltcg *ltcg = (struct nouveau_ltcg *)object; 188 struct nouveau_ltcg *ltcg = (struct nouveau_ltcg *)object;
198 struct nvc0_ltcg_priv *priv = (struct nvc0_ltcg_priv *)ltcg; 189 struct gf100_ltcg_priv *priv = (struct gf100_ltcg_priv *)ltcg;
199 struct nouveau_fb *pfb = nouveau_fb(ltcg->base.base.parent); 190 struct nouveau_fb *pfb = nouveau_fb(ltcg->base.base.parent);
200 191
201 nouveau_mm_fini(&priv->tags); 192 nouveau_mm_fini(&priv->tags);
@@ -205,10 +196,10 @@ nvc0_ltcg_dtor(struct nouveau_object *object)
205} 196}
206 197
207static int 198static int
208nvc0_ltcg_init(struct nouveau_object *object) 199gf100_ltcg_init(struct nouveau_object *object)
209{ 200{
210 struct nouveau_ltcg *ltcg = (struct nouveau_ltcg *)object; 201 struct nouveau_ltcg *ltcg = (struct nouveau_ltcg *)object;
211 struct nvc0_ltcg_priv *priv = (struct nvc0_ltcg_priv *)ltcg; 202 struct gf100_ltcg_priv *priv = (struct gf100_ltcg_priv *)ltcg;
212 int ret; 203 int ret;
213 204
214 ret = nouveau_ltcg_init(ltcg); 205 ret = nouveau_ltcg_init(ltcg);
@@ -216,20 +207,20 @@ nvc0_ltcg_init(struct nouveau_object *object)
216 return ret; 207 return ret;
217 208
218 nv_mask(priv, 0x17e820, 0x00100000, 0x00000000); /* INTR_EN &= ~0x10 */ 209 nv_mask(priv, 0x17e820, 0x00100000, 0x00000000); /* INTR_EN &= ~0x10 */
219 nv_wr32(priv, 0x17e8d8, priv->part_nr); 210 nv_wr32(priv, 0x17e8d8, priv->ltc_nr);
220 if (nv_device(ltcg)->card_type >= NV_E0) 211 if (nv_device(ltcg)->card_type >= NV_E0)
221 nv_wr32(priv, 0x17e000, priv->part_nr); 212 nv_wr32(priv, 0x17e000, priv->ltc_nr);
222 nv_wr32(priv, 0x17e8d4, priv->tag_base); 213 nv_wr32(priv, 0x17e8d4, priv->tag_base);
223 return 0; 214 return 0;
224} 215}
225 216
226struct nouveau_oclass 217struct nouveau_oclass *
227nvc0_ltcg_oclass = { 218gf100_ltcg_oclass = &(struct nouveau_oclass) {
228 .handle = NV_SUBDEV(LTCG, 0xc0), 219 .handle = NV_SUBDEV(LTCG, 0xc0),
229 .ofuncs = &(struct nouveau_ofuncs) { 220 .ofuncs = &(struct nouveau_ofuncs) {
230 .ctor = nvc0_ltcg_ctor, 221 .ctor = gf100_ltcg_ctor,
231 .dtor = nvc0_ltcg_dtor, 222 .dtor = gf100_ltcg_dtor,
232 .init = nvc0_ltcg_init, 223 .init = gf100_ltcg_init,
233 .fini = _nouveau_ltcg_fini, 224 .fini = _nouveau_ltcg_fini,
234 }, 225 },
235}; 226};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/ltcg/gf100.h b/drivers/gpu/drm/nouveau/core/subdev/ltcg/gf100.h
new file mode 100644
index 000000000000..87b10b8412ea
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/ltcg/gf100.h
@@ -0,0 +1,21 @@
1#ifndef __NVKM_LTCG_PRIV_GF100_H__
2#define __NVKM_LTCG_PRIV_GF100_H__
3
4#include <subdev/ltcg.h>
5
6struct gf100_ltcg_priv {
7 struct nouveau_ltcg base;
8 u32 ltc_nr;
9 u32 lts_nr;
10 u32 num_tags;
11 u32 tag_base;
12 struct nouveau_mm tags;
13 struct nouveau_mm_node *tag_ram;
14};
15
16void gf100_ltcg_dtor(struct nouveau_object *);
17int gf100_ltcg_init_tag_ram(struct nouveau_fb *, struct gf100_ltcg_priv *);
18int gf100_ltcg_tags_alloc(struct nouveau_ltcg *, u32, struct nouveau_mm_node **);
19void gf100_ltcg_tags_free(struct nouveau_ltcg *, struct nouveau_mm_node **);
20
21#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/ltcg/gm107.c b/drivers/gpu/drm/nouveau/core/subdev/ltcg/gm107.c
new file mode 100644
index 000000000000..e79d0e81de40
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/ltcg/gm107.c
@@ -0,0 +1,142 @@
1/*
2 * Copyright 2014 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/fb.h>
26#include <subdev/timer.h>
27
28#include "gf100.h"
29
30static void
31gm107_ltcg_lts_isr(struct gf100_ltcg_priv *priv, int ltc, int lts)
32{
33 u32 base = 0x140000 + (ltc * 0x2000) + (lts * 0x400);
34 u32 stat = nv_rd32(priv, base + 0x00c);
35
36 if (stat) {
37 nv_info(priv, "LTC%d_LTS%d: 0x%08x\n", ltc, lts, stat);
38 nv_wr32(priv, base + 0x00c, stat);
39 }
40}
41
42static void
43gm107_ltcg_intr(struct nouveau_subdev *subdev)
44{
45 struct gf100_ltcg_priv *priv = (void *)subdev;
46 u32 mask;
47
48 mask = nv_rd32(priv, 0x00017c);
49 while (mask) {
50 u32 lts, ltc = __ffs(mask);
51 for (lts = 0; lts < priv->lts_nr; lts++)
52 gm107_ltcg_lts_isr(priv, ltc, lts);
53 mask &= ~(1 << ltc);
54 }
55
56 /* we do something horribly wrong and upset PMFB a lot, so mask off
57 * interrupts from it after the first one until it's fixed
58 */
59 nv_mask(priv, 0x000640, 0x02000000, 0x00000000);
60}
61
62static void
63gm107_ltcg_tags_clear(struct nouveau_ltcg *ltcg, u32 first, u32 count)
64{
65 struct gf100_ltcg_priv *priv = (struct gf100_ltcg_priv *)ltcg;
66 u32 last = first + count - 1;
67 int p, i;
68
69 BUG_ON((first > last) || (last >= priv->num_tags));
70
71 nv_wr32(priv, 0x17e270, first);
72 nv_wr32(priv, 0x17e274, last);
73 nv_wr32(priv, 0x17e26c, 0x4); /* trigger clear */
74
75 /* wait until it's finished with clearing */
76 for (p = 0; p < priv->ltc_nr; ++p) {
77 for (i = 0; i < priv->lts_nr; ++i)
78 nv_wait(priv, 0x14046c + p * 0x2000 + i * 0x200, ~0, 0);
79 }
80}
81
82static int
83gm107_ltcg_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
84 struct nouveau_oclass *oclass, void *data, u32 size,
85 struct nouveau_object **pobject)
86{
87 struct gf100_ltcg_priv *priv;
88 struct nouveau_fb *pfb = nouveau_fb(parent);
89 u32 parts, mask;
90 int ret, i;
91
92 ret = nouveau_ltcg_create(parent, engine, oclass, &priv);
93 *pobject = nv_object(priv);
94 if (ret)
95 return ret;
96
97 parts = nv_rd32(priv, 0x022438);
98 mask = nv_rd32(priv, 0x021c14);
99 for (i = 0; i < parts; i++) {
100 if (!(mask & (1 << i)))
101 priv->ltc_nr++;
102 }
103 priv->lts_nr = nv_rd32(priv, 0x17e280) >> 28;
104
105 ret = gf100_ltcg_init_tag_ram(pfb, priv);
106 if (ret)
107 return ret;
108
109 priv->base.tags_alloc = gf100_ltcg_tags_alloc;
110 priv->base.tags_free = gf100_ltcg_tags_free;
111 priv->base.tags_clear = gm107_ltcg_tags_clear;
112
113 nv_subdev(priv)->intr = gm107_ltcg_intr;
114 return 0;
115}
116
117static int
118gm107_ltcg_init(struct nouveau_object *object)
119{
120 struct nouveau_ltcg *ltcg = (struct nouveau_ltcg *)object;
121 struct gf100_ltcg_priv *priv = (struct gf100_ltcg_priv *)ltcg;
122 int ret;
123
124 ret = nouveau_ltcg_init(ltcg);
125 if (ret)
126 return ret;
127
128 nv_wr32(priv, 0x17e27c, priv->ltc_nr);
129 nv_wr32(priv, 0x17e278, priv->tag_base);
130 return 0;
131}
132
133struct nouveau_oclass *
134gm107_ltcg_oclass = &(struct nouveau_oclass) {
135 .handle = NV_SUBDEV(LTCG, 0xff),
136 .ofuncs = &(struct nouveau_ofuncs) {
137 .ctor = gm107_ltcg_ctor,
138 .dtor = gf100_ltcg_dtor,
139 .init = gm107_ltcg_init,
140 .fini = _nouveau_ltcg_fini,
141 },
142};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
index b4b9943773bc..8a5555192fa5 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
@@ -93,7 +93,7 @@ _nouveau_mc_dtor(struct nouveau_object *object)
93{ 93{
94 struct nouveau_device *device = nv_device(object); 94 struct nouveau_device *device = nv_device(object);
95 struct nouveau_mc *pmc = (void *)object; 95 struct nouveau_mc *pmc = (void *)object;
96 free_irq(device->pdev->irq, pmc); 96 free_irq(pmc->irq, pmc);
97 if (pmc->use_msi) 97 if (pmc->use_msi)
98 pci_disable_msi(device->pdev); 98 pci_disable_msi(device->pdev);
99 nouveau_subdev_destroy(&pmc->base); 99 nouveau_subdev_destroy(&pmc->base);
@@ -114,33 +114,44 @@ nouveau_mc_create_(struct nouveau_object *parent, struct nouveau_object *engine,
114 if (ret) 114 if (ret)
115 return ret; 115 return ret;
116 116
117 switch (device->pdev->device & 0x0ff0) { 117 if (nv_device_is_pci(device))
118 case 0x00f0: 118 switch (device->pdev->device & 0x0ff0) {
119 case 0x02e0: 119 case 0x00f0:
120 /* BR02? NFI how these would be handled yet exactly */ 120 case 0x02e0:
121 break; 121 /* BR02? NFI how these would be handled yet exactly */
122 default:
123 switch (device->chipset) {
124 case 0xaa: break; /* reported broken, nv also disable it */
125 default:
126 pmc->use_msi = true;
127 break; 122 break;
123 default:
124 switch (device->chipset) {
125 case 0xaa:
126 /* reported broken, nv also disable it */
127 break;
128 default:
129 pmc->use_msi = true;
130 break;
128 } 131 }
129 }
130 132
131 pmc->use_msi = nouveau_boolopt(device->cfgopt, "NvMSI", pmc->use_msi); 133 pmc->use_msi = nouveau_boolopt(device->cfgopt, "NvMSI",
132 if (pmc->use_msi && oclass->msi_rearm) { 134 pmc->use_msi);
133 pmc->use_msi = pci_enable_msi(device->pdev) == 0; 135
134 if (pmc->use_msi) { 136 if (pmc->use_msi && oclass->msi_rearm) {
135 nv_info(pmc, "MSI interrupts enabled\n"); 137 pmc->use_msi = pci_enable_msi(device->pdev) == 0;
136 oclass->msi_rearm(pmc); 138 if (pmc->use_msi) {
139 nv_info(pmc, "MSI interrupts enabled\n");
140 oclass->msi_rearm(pmc);
141 }
142 } else {
143 pmc->use_msi = false;
137 } 144 }
138 } else {
139 pmc->use_msi = false;
140 } 145 }
141 146
142 ret = request_irq(device->pdev->irq, nouveau_mc_intr, 147 ret = nv_device_get_irq(device, true);
143 IRQF_SHARED, "nouveau", pmc); 148 if (ret < 0)
149 return ret;
150 pmc->irq = ret;
151
152 ret = request_irq(pmc->irq, nouveau_mc_intr, IRQF_SHARED, "nouveau",
153 pmc);
154
144 if (ret < 0) 155 if (ret < 0)
145 return ret; 156 return ret;
146 157
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mxm/base.c b/drivers/gpu/drm/nouveau/core/subdev/mxm/base.c
index 13c5af88a601..51fcf7960417 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mxm/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mxm/base.c
@@ -96,7 +96,7 @@ mxm_shadow_dsm(struct nouveau_mxm *mxm, u8 version)
96 acpi_handle handle; 96 acpi_handle handle;
97 int rev; 97 int rev;
98 98
99 handle = ACPI_HANDLE(&device->pdev->dev); 99 handle = ACPI_HANDLE(nv_device_base(device));
100 if (!handle) 100 if (!handle)
101 return false; 101 return false;
102 102
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/base.c b/drivers/gpu/drm/nouveau/core/subdev/therm/base.c
index 80e584a1bd1c..9ad01da6eacb 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/base.c
@@ -110,16 +110,18 @@ nouveau_therm_update(struct nouveau_therm *therm, int mode)
110 poll = false; 110 poll = false;
111 break; 111 break;
112 case NOUVEAU_THERM_CTRL_AUTO: 112 case NOUVEAU_THERM_CTRL_AUTO:
113 if (priv->fan->bios.nr_fan_trip) { 113 switch(priv->fan->bios.fan_mode) {
114 case NVBIOS_THERM_FAN_TRIP:
114 duty = nouveau_therm_update_trip(therm); 115 duty = nouveau_therm_update_trip(therm);
115 } else 116 break;
116 if (priv->fan->bios.linear_min_temp || 117 case NVBIOS_THERM_FAN_LINEAR:
117 priv->fan->bios.linear_max_temp) {
118 duty = nouveau_therm_update_linear(therm); 118 duty = nouveau_therm_update_linear(therm);
119 } else { 119 break;
120 case NVBIOS_THERM_FAN_OTHER:
120 if (priv->cstate) 121 if (priv->cstate)
121 duty = priv->cstate; 122 duty = priv->cstate;
122 poll = false; 123 poll = false;
124 break;
123 } 125 }
124 immd = false; 126 immd = false;
125 break; 127 break;
@@ -179,7 +181,7 @@ nouveau_therm_fan_mode(struct nouveau_therm *therm, int mode)
179 181
180 /* do not allow automatic fan management if the thermal sensor is 182 /* do not allow automatic fan management if the thermal sensor is
181 * not available */ 183 * not available */
182 if (priv->mode == NOUVEAU_THERM_CTRL_AUTO && therm->temp_get(therm) < 0) 184 if (mode == NOUVEAU_THERM_CTRL_AUTO && therm->temp_get(therm) < 0)
183 return -EINVAL; 185 return -EINVAL;
184 186
185 if (priv->mode == mode) 187 if (priv->mode == mode)
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c b/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c
index 95f6129eeede..016990a8252c 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c
@@ -54,8 +54,10 @@ nouveau_fan_update(struct nouveau_fan *fan, bool immediate, int target)
54 54
55 /* check that we're not already at the target duty cycle */ 55 /* check that we're not already at the target duty cycle */
56 duty = fan->get(therm); 56 duty = fan->get(therm);
57 if (duty == target) 57 if (duty == target) {
58 goto done; 58 spin_unlock_irqrestore(&fan->lock, flags);
59 return 0;
60 }
59 61
60 /* smooth out the fanspeed increase/decrease */ 62 /* smooth out the fanspeed increase/decrease */
61 if (!immediate && duty >= 0) { 63 if (!immediate && duty >= 0) {
@@ -73,8 +75,15 @@ nouveau_fan_update(struct nouveau_fan *fan, bool immediate, int target)
73 75
74 nv_debug(therm, "FAN update: %d\n", duty); 76 nv_debug(therm, "FAN update: %d\n", duty);
75 ret = fan->set(therm, duty); 77 ret = fan->set(therm, duty);
76 if (ret) 78 if (ret) {
77 goto done; 79 spin_unlock_irqrestore(&fan->lock, flags);
80 return ret;
81 }
82
83 /* fan speed updated, drop the fan lock before grabbing the
84 * alarm-scheduling lock and risking a deadlock
85 */
86 spin_unlock_irqrestore(&fan->lock, flags);
78 87
79 /* schedule next fan update, if not at target speed already */ 88 /* schedule next fan update, if not at target speed already */
80 if (list_empty(&fan->alarm.head) && target != duty) { 89 if (list_empty(&fan->alarm.head) && target != duty) {
@@ -92,8 +101,6 @@ nouveau_fan_update(struct nouveau_fan *fan, bool immediate, int target)
92 ptimer->alarm(ptimer, delay * 1000 * 1000, &fan->alarm); 101 ptimer->alarm(ptimer, delay * 1000 * 1000, &fan->alarm);
93 } 102 }
94 103
95done:
96 spin_unlock_irqrestore(&fan->lock, flags);
97 return ret; 104 return ret;
98} 105}
99 106
@@ -185,11 +192,8 @@ nouveau_therm_fan_set_defaults(struct nouveau_therm *therm)
185 priv->fan->bios.max_duty = 100; 192 priv->fan->bios.max_duty = 100;
186 priv->fan->bios.bump_period = 500; 193 priv->fan->bios.bump_period = 500;
187 priv->fan->bios.slow_down_period = 2000; 194 priv->fan->bios.slow_down_period = 2000;
188/*XXX: talk to mupuf */
189#if 0
190 priv->fan->bios.linear_min_temp = 40; 195 priv->fan->bios.linear_min_temp = 40;
191 priv->fan->bios.linear_max_temp = 85; 196 priv->fan->bios.linear_max_temp = 85;
192#endif
193} 197}
194 198
195static void 199static void
@@ -235,7 +239,8 @@ nouveau_therm_fan_ctor(struct nouveau_therm *therm)
235 /* attempt to locate a drivable fan, and determine control method */ 239 /* attempt to locate a drivable fan, and determine control method */
236 ret = gpio->find(gpio, 0, DCB_GPIO_FAN, 0xff, &func); 240 ret = gpio->find(gpio, 0, DCB_GPIO_FAN, 0xff, &func);
237 if (ret == 0) { 241 if (ret == 0) {
238 if (func.log[0] & DCB_GPIO_LOG_DIR_IN) { 242 /* FIXME: is this really the place to perform such checks ? */
243 if (func.line != 16 && func.log[0] & DCB_GPIO_LOG_DIR_IN) {
239 nv_debug(therm, "GPIO_FAN is in input mode\n"); 244 nv_debug(therm, "GPIO_FAN is in input mode\n");
240 ret = -EINVAL; 245 ret = -EINVAL;
241 } else { 246 } else {
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/fanpwm.c b/drivers/gpu/drm/nouveau/core/subdev/therm/fanpwm.c
index 5f71db8e8992..9a5c07340263 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/fanpwm.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/fanpwm.c
@@ -67,7 +67,7 @@ nouveau_fanpwm_set(struct nouveau_therm *therm, int percent)
67 if (priv->base.bios.pwm_freq) { 67 if (priv->base.bios.pwm_freq) {
68 divs = 1; 68 divs = 1;
69 if (therm->pwm_clock) 69 if (therm->pwm_clock)
70 divs = therm->pwm_clock(therm); 70 divs = therm->pwm_clock(therm, priv->func.line);
71 divs /= priv->base.bios.pwm_freq; 71 divs /= priv->base.bios.pwm_freq;
72 } 72 }
73 73
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/therm/nv50.c
index 8cf7597a2182..321db927d638 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/nv50.c
@@ -93,7 +93,7 @@ nv50_fan_pwm_set(struct nouveau_therm *therm, int line, u32 divs, u32 duty)
93} 93}
94 94
95int 95int
96nv50_fan_pwm_clock(struct nouveau_therm *therm) 96nv50_fan_pwm_clock(struct nouveau_therm *therm, int line)
97{ 97{
98 int chipset = nv_device(therm)->chipset; 98 int chipset = nv_device(therm)->chipset;
99 int crystal = nv_device(therm)->crystal; 99 int crystal = nv_device(therm)->crystal;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c b/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c
index 4dd4f81ae873..43fec17ea540 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c
@@ -32,10 +32,12 @@ static int
32pwm_info(struct nouveau_therm *therm, int line) 32pwm_info(struct nouveau_therm *therm, int line)
33{ 33{
34 u32 gpio = nv_rd32(therm, 0x00d610 + (line * 0x04)); 34 u32 gpio = nv_rd32(therm, 0x00d610 + (line * 0x04));
35
35 switch (gpio & 0x000000c0) { 36 switch (gpio & 0x000000c0) {
36 case 0x00000000: /* normal mode, possibly pwm forced off by us */ 37 case 0x00000000: /* normal mode, possibly pwm forced off by us */
37 case 0x00000040: /* nvio special */ 38 case 0x00000040: /* nvio special */
38 switch (gpio & 0x0000001f) { 39 switch (gpio & 0x0000001f) {
40 case 0x00: return 2;
39 case 0x19: return 1; 41 case 0x19: return 1;
40 case 0x1c: return 0; 42 case 0x1c: return 0;
41 default: 43 default:
@@ -56,8 +58,9 @@ nvd0_fan_pwm_ctrl(struct nouveau_therm *therm, int line, bool enable)
56 int indx = pwm_info(therm, line); 58 int indx = pwm_info(therm, line);
57 if (indx < 0) 59 if (indx < 0)
58 return indx; 60 return indx;
59 61 else if (indx < 2)
60 nv_mask(therm, 0x00d610 + (line * 0x04), 0x000000c0, data); 62 nv_mask(therm, 0x00d610 + (line * 0x04), 0x000000c0, data);
63 /* nothing to do for indx == 2, it seems hardwired to PTHERM */
61 return 0; 64 return 0;
62} 65}
63 66
@@ -67,10 +70,15 @@ nvd0_fan_pwm_get(struct nouveau_therm *therm, int line, u32 *divs, u32 *duty)
67 int indx = pwm_info(therm, line); 70 int indx = pwm_info(therm, line);
68 if (indx < 0) 71 if (indx < 0)
69 return indx; 72 return indx;
70 73 else if (indx < 2) {
71 if (nv_rd32(therm, 0x00d610 + (line * 0x04)) & 0x00000040) { 74 if (nv_rd32(therm, 0x00d610 + (line * 0x04)) & 0x00000040) {
72 *divs = nv_rd32(therm, 0x00e114 + (indx * 8)); 75 *divs = nv_rd32(therm, 0x00e114 + (indx * 8));
73 *duty = nv_rd32(therm, 0x00e118 + (indx * 8)); 76 *duty = nv_rd32(therm, 0x00e118 + (indx * 8));
77 return 0;
78 }
79 } else if (indx == 2) {
80 *divs = nv_rd32(therm, 0x0200d8) & 0x1fff;
81 *duty = nv_rd32(therm, 0x0200dc) & 0x1fff;
74 return 0; 82 return 0;
75 } 83 }
76 84
@@ -83,16 +91,26 @@ nvd0_fan_pwm_set(struct nouveau_therm *therm, int line, u32 divs, u32 duty)
83 int indx = pwm_info(therm, line); 91 int indx = pwm_info(therm, line);
84 if (indx < 0) 92 if (indx < 0)
85 return indx; 93 return indx;
86 94 else if (indx < 2) {
87 nv_wr32(therm, 0x00e114 + (indx * 8), divs); 95 nv_wr32(therm, 0x00e114 + (indx * 8), divs);
88 nv_wr32(therm, 0x00e118 + (indx * 8), duty | 0x80000000); 96 nv_wr32(therm, 0x00e118 + (indx * 8), duty | 0x80000000);
97 } else if (indx == 2) {
98 nv_mask(therm, 0x0200d8, 0x1fff, divs); /* keep the high bits */
99 nv_wr32(therm, 0x0200dc, duty | 0x40000000);
100 }
89 return 0; 101 return 0;
90} 102}
91 103
92static int 104static int
93nvd0_fan_pwm_clock(struct nouveau_therm *therm) 105nvd0_fan_pwm_clock(struct nouveau_therm *therm, int line)
94{ 106{
95 return (nv_device(therm)->crystal * 1000) / 20; 107 int indx = pwm_info(therm, line);
108 if (indx < 0)
109 return 0;
110 else if (indx < 2)
111 return (nv_device(therm)->crystal * 1000) / 20;
112 else
113 return nv_device(therm)->crystal * 1000 / 10;
96} 114}
97 115
98static int 116static int
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h b/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h
index 96f8f95693ce..916fca5c7816 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h
@@ -143,7 +143,7 @@ void nv40_therm_intr(struct nouveau_subdev *);
143int nv50_fan_pwm_ctrl(struct nouveau_therm *, int, bool); 143int nv50_fan_pwm_ctrl(struct nouveau_therm *, int, bool);
144int nv50_fan_pwm_get(struct nouveau_therm *, int, u32 *, u32 *); 144int nv50_fan_pwm_get(struct nouveau_therm *, int, u32 *, u32 *);
145int nv50_fan_pwm_set(struct nouveau_therm *, int, u32, u32); 145int nv50_fan_pwm_set(struct nouveau_therm *, int, u32, u32);
146int nv50_fan_pwm_clock(struct nouveau_therm *); 146int nv50_fan_pwm_clock(struct nouveau_therm *, int);
147int nv84_temp_get(struct nouveau_therm *therm); 147int nv84_temp_get(struct nouveau_therm *therm);
148int nv84_therm_fini(struct nouveau_object *object, bool suspend); 148int nv84_therm_fini(struct nouveau_object *object, bool suspend);
149 149
diff --git a/drivers/gpu/drm/nouveau/core/subdev/timer/gk20a.c b/drivers/gpu/drm/nouveau/core/subdev/timer/gk20a.c
new file mode 100644
index 000000000000..37484db1f7fc
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/timer/gk20a.c
@@ -0,0 +1,57 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "nv04.h"
26
27static int
28gk20a_timer_init(struct nouveau_object *object)
29{
30 struct nv04_timer_priv *priv = (void *)object;
31 u32 hi = upper_32_bits(priv->suspend_time);
32 u32 lo = lower_32_bits(priv->suspend_time);
33 int ret;
34
35 ret = nouveau_timer_init(&priv->base);
36 if (ret)
37 return ret;
38
39 nv_debug(priv, "time low : 0x%08x\n", lo);
40 nv_debug(priv, "time high : 0x%08x\n", hi);
41
42 /* restore the time before suspend */
43 nv_wr32(priv, NV04_PTIMER_TIME_1, hi);
44 nv_wr32(priv, NV04_PTIMER_TIME_0, lo);
45 return 0;
46}
47
48struct nouveau_oclass
49gk20a_timer_oclass = {
50 .handle = NV_SUBDEV(TIMER, 0xff),
51 .ofuncs = &(struct nouveau_ofuncs) {
52 .ctor = nv04_timer_ctor,
53 .dtor = nv04_timer_dtor,
54 .init = gk20a_timer_init,
55 .fini = nv04_timer_fini,
56 }
57};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c
index c0bdd10358d7..240ed0b983a9 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c
@@ -22,22 +22,7 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <subdev/timer.h> 25#include "nv04.h"
26
27#define NV04_PTIMER_INTR_0 0x009100
28#define NV04_PTIMER_INTR_EN_0 0x009140
29#define NV04_PTIMER_NUMERATOR 0x009200
30#define NV04_PTIMER_DENOMINATOR 0x009210
31#define NV04_PTIMER_TIME_0 0x009400
32#define NV04_PTIMER_TIME_1 0x009410
33#define NV04_PTIMER_ALARM_0 0x009420
34
35struct nv04_timer_priv {
36 struct nouveau_timer base;
37 struct list_head alarms;
38 spinlock_t lock;
39 u64 suspend_time;
40};
41 26
42static u64 27static u64
43nv04_timer_read(struct nouveau_timer *ptimer) 28nv04_timer_read(struct nouveau_timer *ptimer)
@@ -142,35 +127,14 @@ nv04_timer_intr(struct nouveau_subdev *subdev)
142 } 127 }
143} 128}
144 129
145static int 130int
146nv04_timer_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 131nv04_timer_fini(struct nouveau_object *object, bool suspend)
147 struct nouveau_oclass *oclass, void *data, u32 size,
148 struct nouveau_object **pobject)
149{
150 struct nv04_timer_priv *priv;
151 int ret;
152
153 ret = nouveau_timer_create(parent, engine, oclass, &priv);
154 *pobject = nv_object(priv);
155 if (ret)
156 return ret;
157
158 priv->base.base.intr = nv04_timer_intr;
159 priv->base.read = nv04_timer_read;
160 priv->base.alarm = nv04_timer_alarm;
161 priv->base.alarm_cancel = nv04_timer_alarm_cancel;
162 priv->suspend_time = 0;
163
164 INIT_LIST_HEAD(&priv->alarms);
165 spin_lock_init(&priv->lock);
166 return 0;
167}
168
169static void
170nv04_timer_dtor(struct nouveau_object *object)
171{ 132{
172 struct nv04_timer_priv *priv = (void *)object; 133 struct nv04_timer_priv *priv = (void *)object;
173 return nouveau_timer_destroy(&priv->base); 134 if (suspend)
135 priv->suspend_time = nv04_timer_read(&priv->base);
136 nv_wr32(priv, NV04_PTIMER_INTR_EN_0, 0x00000000);
137 return nouveau_timer_fini(&priv->base, suspend);
174} 138}
175 139
176static int 140static int
@@ -257,14 +221,35 @@ nv04_timer_init(struct nouveau_object *object)
257 return 0; 221 return 0;
258} 222}
259 223
260static int 224void
261nv04_timer_fini(struct nouveau_object *object, bool suspend) 225nv04_timer_dtor(struct nouveau_object *object)
262{ 226{
263 struct nv04_timer_priv *priv = (void *)object; 227 struct nv04_timer_priv *priv = (void *)object;
264 if (suspend) 228 return nouveau_timer_destroy(&priv->base);
265 priv->suspend_time = nv04_timer_read(&priv->base); 229}
266 nv_wr32(priv, NV04_PTIMER_INTR_EN_0, 0x00000000); 230
267 return nouveau_timer_fini(&priv->base, suspend); 231int
232nv04_timer_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
233 struct nouveau_oclass *oclass, void *data, u32 size,
234 struct nouveau_object **pobject)
235{
236 struct nv04_timer_priv *priv;
237 int ret;
238
239 ret = nouveau_timer_create(parent, engine, oclass, &priv);
240 *pobject = nv_object(priv);
241 if (ret)
242 return ret;
243
244 priv->base.base.intr = nv04_timer_intr;
245 priv->base.read = nv04_timer_read;
246 priv->base.alarm = nv04_timer_alarm;
247 priv->base.alarm_cancel = nv04_timer_alarm_cancel;
248 priv->suspend_time = 0;
249
250 INIT_LIST_HEAD(&priv->alarms);
251 spin_lock_init(&priv->lock);
252 return 0;
268} 253}
269 254
270struct nouveau_oclass 255struct nouveau_oclass
diff --git a/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.h b/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.h
new file mode 100644
index 000000000000..4bc152697c37
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.h
@@ -0,0 +1,27 @@
1#ifndef __NVKM_TIMER_NV04_H__
2#define __NVKM_TIMER_NV04_H__
3
4#include "priv.h"
5
6#define NV04_PTIMER_INTR_0 0x009100
7#define NV04_PTIMER_INTR_EN_0 0x009140
8#define NV04_PTIMER_NUMERATOR 0x009200
9#define NV04_PTIMER_DENOMINATOR 0x009210
10#define NV04_PTIMER_TIME_0 0x009400
11#define NV04_PTIMER_TIME_1 0x009410
12#define NV04_PTIMER_ALARM_0 0x009420
13
14struct nv04_timer_priv {
15 struct nouveau_timer base;
16 struct list_head alarms;
17 spinlock_t lock;
18 u64 suspend_time;
19};
20
21int nv04_timer_ctor(struct nouveau_object *, struct nouveau_object *,
22 struct nouveau_oclass *, void *, u32,
23 struct nouveau_object **);
24void nv04_timer_dtor(struct nouveau_object *);
25int nv04_timer_fini(struct nouveau_object *, bool);
26
27#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/timer/priv.h b/drivers/gpu/drm/nouveau/core/subdev/timer/priv.h
new file mode 100644
index 000000000000..799dae3f2300
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/timer/priv.h
@@ -0,0 +1,6 @@
1#ifndef __NVKM_TIMER_PRIV_H__
2#define __NVKM_TIMER_PRIV_H__
3
4#include <subdev/timer.h>
5
6#endif
diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
index 0e3270c3ffd2..41be3424c906 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
@@ -239,7 +239,7 @@ nv_crtc_mode_set_vga(struct drm_crtc *crtc, struct drm_display_mode *mode)
239 struct drm_device *dev = crtc->dev; 239 struct drm_device *dev = crtc->dev;
240 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 240 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
241 struct nv04_crtc_reg *regp = &nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index]; 241 struct nv04_crtc_reg *regp = &nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index];
242 struct drm_framebuffer *fb = crtc->fb; 242 struct drm_framebuffer *fb = crtc->primary->fb;
243 243
244 /* Calculate our timings */ 244 /* Calculate our timings */
245 int horizDisplay = (mode->crtc_hdisplay >> 3) - 1; 245 int horizDisplay = (mode->crtc_hdisplay >> 3) - 1;
@@ -574,7 +574,7 @@ nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode)
574 regp->CRTC[NV_CIO_CRE_86] = 0x1; 574 regp->CRTC[NV_CIO_CRE_86] = 0x1;
575 } 575 }
576 576
577 regp->CRTC[NV_CIO_CRE_PIXEL_INDEX] = (crtc->fb->depth + 1) / 8; 577 regp->CRTC[NV_CIO_CRE_PIXEL_INDEX] = (crtc->primary->fb->depth + 1) / 8;
578 /* Enable slaved mode (called MODE_TV in nv4ref.h) */ 578 /* Enable slaved mode (called MODE_TV in nv4ref.h) */
579 if (lvds_output || tmds_output || tv_output) 579 if (lvds_output || tmds_output || tv_output)
580 regp->CRTC[NV_CIO_CRE_PIXEL_INDEX] |= (1 << 7); 580 regp->CRTC[NV_CIO_CRE_PIXEL_INDEX] |= (1 << 7);
@@ -588,7 +588,7 @@ nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode)
588 regp->ramdac_gen_ctrl = NV_PRAMDAC_GENERAL_CONTROL_BPC_8BITS | 588 regp->ramdac_gen_ctrl = NV_PRAMDAC_GENERAL_CONTROL_BPC_8BITS |
589 NV_PRAMDAC_GENERAL_CONTROL_VGA_STATE_SEL | 589 NV_PRAMDAC_GENERAL_CONTROL_VGA_STATE_SEL |
590 NV_PRAMDAC_GENERAL_CONTROL_PIXMIX_ON; 590 NV_PRAMDAC_GENERAL_CONTROL_PIXMIX_ON;
591 if (crtc->fb->depth == 16) 591 if (crtc->primary->fb->depth == 16)
592 regp->ramdac_gen_ctrl |= NV_PRAMDAC_GENERAL_CONTROL_ALT_MODE_SEL; 592 regp->ramdac_gen_ctrl |= NV_PRAMDAC_GENERAL_CONTROL_ALT_MODE_SEL;
593 if (nv_device(drm->device)->chipset >= 0x11) 593 if (nv_device(drm->device)->chipset >= 0x11)
594 regp->ramdac_gen_ctrl |= NV_PRAMDAC_GENERAL_CONTROL_PIPE_LONG; 594 regp->ramdac_gen_ctrl |= NV_PRAMDAC_GENERAL_CONTROL_PIPE_LONG;
@@ -609,7 +609,7 @@ static int
609nv_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb) 609nv_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb)
610{ 610{
611 struct nv04_display *disp = nv04_display(crtc->dev); 611 struct nv04_display *disp = nv04_display(crtc->dev);
612 struct nouveau_framebuffer *nvfb = nouveau_framebuffer(crtc->fb); 612 struct nouveau_framebuffer *nvfb = nouveau_framebuffer(crtc->primary->fb);
613 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 613 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
614 int ret; 614 int ret;
615 615
@@ -808,7 +808,7 @@ nv_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, uint32_t start,
808 * mark the lut values as dirty by setting depth==0, and it'll be 808 * mark the lut values as dirty by setting depth==0, and it'll be
809 * uploaded on the first mode_set_base() 809 * uploaded on the first mode_set_base()
810 */ 810 */
811 if (!nv_crtc->base.fb) { 811 if (!nv_crtc->base.primary->fb) {
812 nv_crtc->lut.depth = 0; 812 nv_crtc->lut.depth = 0;
813 return; 813 return;
814 } 814 }
@@ -832,7 +832,7 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
832 NV_DEBUG(drm, "index %d\n", nv_crtc->index); 832 NV_DEBUG(drm, "index %d\n", nv_crtc->index);
833 833
834 /* no fb bound */ 834 /* no fb bound */
835 if (!atomic && !crtc->fb) { 835 if (!atomic && !crtc->primary->fb) {
836 NV_DEBUG(drm, "No FB bound\n"); 836 NV_DEBUG(drm, "No FB bound\n");
837 return 0; 837 return 0;
838 } 838 }
@@ -844,8 +844,8 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
844 drm_fb = passed_fb; 844 drm_fb = passed_fb;
845 fb = nouveau_framebuffer(passed_fb); 845 fb = nouveau_framebuffer(passed_fb);
846 } else { 846 } else {
847 drm_fb = crtc->fb; 847 drm_fb = crtc->primary->fb;
848 fb = nouveau_framebuffer(crtc->fb); 848 fb = nouveau_framebuffer(crtc->primary->fb);
849 } 849 }
850 850
851 nv_crtc->fb.offset = fb->nvbo->bo.offset; 851 nv_crtc->fb.offset = fb->nvbo->bo.offset;
@@ -857,9 +857,9 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
857 857
858 /* Update the framebuffer format. */ 858 /* Update the framebuffer format. */
859 regp->CRTC[NV_CIO_CRE_PIXEL_INDEX] &= ~3; 859 regp->CRTC[NV_CIO_CRE_PIXEL_INDEX] &= ~3;
860 regp->CRTC[NV_CIO_CRE_PIXEL_INDEX] |= (crtc->fb->depth + 1) / 8; 860 regp->CRTC[NV_CIO_CRE_PIXEL_INDEX] |= (crtc->primary->fb->depth + 1) / 8;
861 regp->ramdac_gen_ctrl &= ~NV_PRAMDAC_GENERAL_CONTROL_ALT_MODE_SEL; 861 regp->ramdac_gen_ctrl &= ~NV_PRAMDAC_GENERAL_CONTROL_ALT_MODE_SEL;
862 if (crtc->fb->depth == 16) 862 if (crtc->primary->fb->depth == 16)
863 regp->ramdac_gen_ctrl |= NV_PRAMDAC_GENERAL_CONTROL_ALT_MODE_SEL; 863 regp->ramdac_gen_ctrl |= NV_PRAMDAC_GENERAL_CONTROL_ALT_MODE_SEL;
864 crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_PIXEL_INDEX); 864 crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_PIXEL_INDEX);
865 NVWriteRAMDAC(dev, nv_crtc->index, NV_PRAMDAC_GENERAL_CONTROL, 865 NVWriteRAMDAC(dev, nv_crtc->index, NV_PRAMDAC_GENERAL_CONTROL,
@@ -1048,7 +1048,7 @@ nouveau_crtc_set_config(struct drm_mode_set *set)
1048 1048
1049 /* get a pm reference here */ 1049 /* get a pm reference here */
1050 ret = pm_runtime_get_sync(dev->dev); 1050 ret = pm_runtime_get_sync(dev->dev);
1051 if (ret < 0) 1051 if (ret < 0 && ret != -EACCES)
1052 return ret; 1052 return ret;
1053 1053
1054 ret = drm_crtc_helper_set_config(set); 1054 ret = drm_crtc_helper_set_config(set);
diff --git a/drivers/gpu/drm/nouveau/dispnv04/dfp.c b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
index 7fdc51e2a571..a2d669b4acf2 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/dfp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
@@ -415,7 +415,7 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder,
415 /* Output property. */ 415 /* Output property. */
416 if ((nv_connector->dithering_mode == DITHERING_MODE_ON) || 416 if ((nv_connector->dithering_mode == DITHERING_MODE_ON) ||
417 (nv_connector->dithering_mode == DITHERING_MODE_AUTO && 417 (nv_connector->dithering_mode == DITHERING_MODE_AUTO &&
418 encoder->crtc->fb->depth > connector->display_info.bpc * 3)) { 418 encoder->crtc->primary->fb->depth > connector->display_info.bpc * 3)) {
419 if (nv_device(drm->device)->chipset == 0x11) 419 if (nv_device(drm->device)->chipset == 0x11)
420 regp->dither = savep->dither | 0x00010000; 420 regp->dither = savep->dither | 0x00010000;
421 else { 421 else {
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index 900fae01793e..b13f441c6431 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -97,6 +97,7 @@ nouveau_abi16_swclass(struct nouveau_drm *drm)
97 case NV_C0: 97 case NV_C0:
98 case NV_D0: 98 case NV_D0:
99 case NV_E0: 99 case NV_E0:
100 case GM100:
100 return 0x906e; 101 return 0x906e;
101 } 102 }
102 103
@@ -139,7 +140,7 @@ nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16,
139 140
140 /* destroy channel object, all children will be killed too */ 141 /* destroy channel object, all children will be killed too */
141 if (chan->chan) { 142 if (chan->chan) {
142 abi16->handles &= ~(1 << (chan->chan->handle & 0xffff)); 143 abi16->handles &= ~(1ULL << (chan->chan->handle & 0xffff));
143 nouveau_channel_del(&chan->chan); 144 nouveau_channel_del(&chan->chan);
144 } 145 }
145 146
@@ -179,12 +180,21 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
179 getparam->value = device->chipset; 180 getparam->value = device->chipset;
180 break; 181 break;
181 case NOUVEAU_GETPARAM_PCI_VENDOR: 182 case NOUVEAU_GETPARAM_PCI_VENDOR:
182 getparam->value = dev->pdev->vendor; 183 if (nv_device_is_pci(device))
184 getparam->value = dev->pdev->vendor;
185 else
186 getparam->value = 0;
183 break; 187 break;
184 case NOUVEAU_GETPARAM_PCI_DEVICE: 188 case NOUVEAU_GETPARAM_PCI_DEVICE:
185 getparam->value = dev->pdev->device; 189 if (nv_device_is_pci(device))
190 getparam->value = dev->pdev->device;
191 else
192 getparam->value = 0;
186 break; 193 break;
187 case NOUVEAU_GETPARAM_BUS_TYPE: 194 case NOUVEAU_GETPARAM_BUS_TYPE:
195 if (!nv_device_is_pci(device))
196 getparam->value = 3;
197 else
188 if (drm_pci_device_is_agp(dev)) 198 if (drm_pci_device_is_agp(dev))
189 getparam->value = 0; 199 getparam->value = 0;
190 else 200 else
@@ -270,8 +280,8 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
270 return nouveau_abi16_put(abi16, -EINVAL); 280 return nouveau_abi16_put(abi16, -EINVAL);
271 281
272 /* allocate "abi16 channel" data and make up a handle for it */ 282 /* allocate "abi16 channel" data and make up a handle for it */
273 init->channel = ffsll(~abi16->handles); 283 init->channel = __ffs64(~abi16->handles);
274 if (!init->channel--) 284 if (~abi16->handles == 0)
275 return nouveau_abi16_put(abi16, -ENOSPC); 285 return nouveau_abi16_put(abi16, -ENOSPC);
276 286
277 chan = kzalloc(sizeof(*chan), GFP_KERNEL); 287 chan = kzalloc(sizeof(*chan), GFP_KERNEL);
@@ -280,7 +290,7 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
280 290
281 INIT_LIST_HEAD(&chan->notifiers); 291 INIT_LIST_HEAD(&chan->notifiers);
282 list_add(&chan->head, &abi16->channels); 292 list_add(&chan->head, &abi16->channels);
283 abi16->handles |= (1 << init->channel); 293 abi16->handles |= (1ULL << init->channel);
284 294
285 /* create channel object and initialise dma and fence management */ 295 /* create channel object and initialise dma and fence management */
286 ret = nouveau_channel_new(drm, cli, NVDRM_DEVICE, NVDRM_CHAN | 296 ret = nouveau_channel_new(drm, cli, NVDRM_DEVICE, NVDRM_CHAN |
diff --git a/drivers/gpu/drm/nouveau/nouveau_agp.c b/drivers/gpu/drm/nouveau/nouveau_agp.c
index 2953c4e91e1a..51666daddb94 100644
--- a/drivers/gpu/drm/nouveau/nouveau_agp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_agp.c
@@ -75,7 +75,7 @@ nouveau_agp_enabled(struct nouveau_drm *drm)
75{ 75{
76 struct drm_device *dev = drm->dev; 76 struct drm_device *dev = drm->dev;
77 77
78 if (!drm_pci_device_is_agp(dev) || !dev->agp) 78 if (!dev->pdev || !drm_pci_device_is_agp(dev) || !dev->agp)
79 return false; 79 return false;
80 80
81 if (drm->agp.stat == UNKNOWN) { 81 if (drm->agp.stat == UNKNOWN) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 4c3feaaa1037..8268a4ccac15 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -1474,9 +1474,12 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
1474 case 0: 1474 case 0:
1475 entry->dpconf.link_bw = 162000; 1475 entry->dpconf.link_bw = 162000;
1476 break; 1476 break;
1477 default: 1477 case 1:
1478 entry->dpconf.link_bw = 270000; 1478 entry->dpconf.link_bw = 270000;
1479 break; 1479 break;
1480 default:
1481 entry->dpconf.link_bw = 540000;
1482 break;
1480 } 1483 }
1481 switch ((conf & 0x0f000000) >> 24) { 1484 switch ((conf & 0x0f000000) >> 24) {
1482 case 0xf: 1485 case 0xf:
@@ -2069,6 +2072,10 @@ nouveau_bios_init(struct drm_device *dev)
2069 struct nvbios *bios = &drm->vbios; 2072 struct nvbios *bios = &drm->vbios;
2070 int ret; 2073 int ret;
2071 2074
2075 /* only relevant for PCI devices */
2076 if (!dev->pdev)
2077 return 0;
2078
2072 if (!NVInitVBIOS(dev)) 2079 if (!NVInitVBIOS(dev))
2073 return -ENODEV; 2080 return -ENODEV;
2074 2081
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 4aed1714b9ab..b6dc85c614be 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -1255,7 +1255,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1255 /* fallthrough, tiled memory */ 1255 /* fallthrough, tiled memory */
1256 case TTM_PL_VRAM: 1256 case TTM_PL_VRAM:
1257 mem->bus.offset = mem->start << PAGE_SHIFT; 1257 mem->bus.offset = mem->start << PAGE_SHIFT;
1258 mem->bus.base = pci_resource_start(dev->pdev, 1); 1258 mem->bus.base = nv_device_resource_start(nouveau_dev(dev), 1);
1259 mem->bus.is_iomem = true; 1259 mem->bus.is_iomem = true;
1260 if (nv_device(drm->device)->card_type >= NV_50) { 1260 if (nv_device(drm->device)->card_type >= NV_50) {
1261 struct nouveau_bar *bar = nouveau_bar(drm->device); 1261 struct nouveau_bar *bar = nouveau_bar(drm->device);
@@ -1293,7 +1293,7 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
1293 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); 1293 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1294 struct nouveau_bo *nvbo = nouveau_bo(bo); 1294 struct nouveau_bo *nvbo = nouveau_bo(bo);
1295 struct nouveau_device *device = nv_device(drm->device); 1295 struct nouveau_device *device = nv_device(drm->device);
1296 u32 mappable = pci_resource_len(device->pdev, 1) >> PAGE_SHIFT; 1296 u32 mappable = nv_device_resource_len(device, 1) >> PAGE_SHIFT;
1297 int ret; 1297 int ret;
1298 1298
1299 /* as long as the bo isn't in vram, and isn't tiled, we've got 1299 /* as long as the bo isn't in vram, and isn't tiled, we've got
@@ -1331,6 +1331,7 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
1331{ 1331{
1332 struct ttm_dma_tt *ttm_dma = (void *)ttm; 1332 struct ttm_dma_tt *ttm_dma = (void *)ttm;
1333 struct nouveau_drm *drm; 1333 struct nouveau_drm *drm;
1334 struct nouveau_device *device;
1334 struct drm_device *dev; 1335 struct drm_device *dev;
1335 unsigned i; 1336 unsigned i;
1336 int r; 1337 int r;
@@ -1348,6 +1349,7 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
1348 } 1349 }
1349 1350
1350 drm = nouveau_bdev(ttm->bdev); 1351 drm = nouveau_bdev(ttm->bdev);
1352 device = nv_device(drm->device);
1351 dev = drm->dev; 1353 dev = drm->dev;
1352 1354
1353#if __OS_HAS_AGP 1355#if __OS_HAS_AGP
@@ -1368,13 +1370,12 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
1368 } 1370 }
1369 1371
1370 for (i = 0; i < ttm->num_pages; i++) { 1372 for (i = 0; i < ttm->num_pages; i++) {
1371 ttm_dma->dma_address[i] = pci_map_page(dev->pdev, ttm->pages[i], 1373 ttm_dma->dma_address[i] = nv_device_map_page(device,
1372 0, PAGE_SIZE, 1374 ttm->pages[i]);
1373 PCI_DMA_BIDIRECTIONAL); 1375 if (!ttm_dma->dma_address[i]) {
1374 if (pci_dma_mapping_error(dev->pdev, ttm_dma->dma_address[i])) {
1375 while (--i) { 1376 while (--i) {
1376 pci_unmap_page(dev->pdev, ttm_dma->dma_address[i], 1377 nv_device_unmap_page(device,
1377 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 1378 ttm_dma->dma_address[i]);
1378 ttm_dma->dma_address[i] = 0; 1379 ttm_dma->dma_address[i] = 0;
1379 } 1380 }
1380 ttm_pool_unpopulate(ttm); 1381 ttm_pool_unpopulate(ttm);
@@ -1389,6 +1390,7 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
1389{ 1390{
1390 struct ttm_dma_tt *ttm_dma = (void *)ttm; 1391 struct ttm_dma_tt *ttm_dma = (void *)ttm;
1391 struct nouveau_drm *drm; 1392 struct nouveau_drm *drm;
1393 struct nouveau_device *device;
1392 struct drm_device *dev; 1394 struct drm_device *dev;
1393 unsigned i; 1395 unsigned i;
1394 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); 1396 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
@@ -1397,6 +1399,7 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
1397 return; 1399 return;
1398 1400
1399 drm = nouveau_bdev(ttm->bdev); 1401 drm = nouveau_bdev(ttm->bdev);
1402 device = nv_device(drm->device);
1400 dev = drm->dev; 1403 dev = drm->dev;
1401 1404
1402#if __OS_HAS_AGP 1405#if __OS_HAS_AGP
@@ -1415,8 +1418,7 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
1415 1418
1416 for (i = 0; i < ttm->num_pages; i++) { 1419 for (i = 0; i < ttm->num_pages; i++) {
1417 if (ttm_dma->dma_address[i]) { 1420 if (ttm_dma->dma_address[i]) {
1418 pci_unmap_page(dev->pdev, ttm_dma->dma_address[i], 1421 nv_device_unmap_page(device, ttm_dma->dma_address[i]);
1419 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
1420 } 1422 }
1421 } 1423 }
1422 1424
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
index cc5152be2cf1..ccb6b452d6d0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -154,7 +154,7 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nouveau_cli *cli,
154 * nfi why this exists, it came from the -nv ddx. 154 * nfi why this exists, it came from the -nv ddx.
155 */ 155 */
156 args.flags = NV_DMA_TARGET_PCI | NV_DMA_ACCESS_RDWR; 156 args.flags = NV_DMA_TARGET_PCI | NV_DMA_ACCESS_RDWR;
157 args.start = pci_resource_start(device->pdev, 1); 157 args.start = nv_device_resource_start(device, 1);
158 args.limit = args.start + limit; 158 args.limit = args.start + limit;
159 } else { 159 } else {
160 args.flags = NV_DMA_TARGET_VRAM | NV_DMA_ACCESS_RDWR; 160 args.flags = NV_DMA_TARGET_VRAM | NV_DMA_ACCESS_RDWR;
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 1674882d60d5..d07ce028af51 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -255,7 +255,7 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
255 } 255 }
256 256
257 ret = pm_runtime_get_sync(connector->dev->dev); 257 ret = pm_runtime_get_sync(connector->dev->dev);
258 if (ret < 0) 258 if (ret < 0 && ret != -EACCES)
259 return conn_status; 259 return conn_status;
260 260
261 i2c = nouveau_connector_ddc_detect(connector, &nv_encoder); 261 i2c = nouveau_connector_ddc_detect(connector, &nv_encoder);
@@ -960,7 +960,8 @@ drm_conntype_from_dcb(enum dcb_connector_type dcb)
960 case DCB_CONNECTOR_DP : return DRM_MODE_CONNECTOR_DisplayPort; 960 case DCB_CONNECTOR_DP : return DRM_MODE_CONNECTOR_DisplayPort;
961 case DCB_CONNECTOR_eDP : return DRM_MODE_CONNECTOR_eDP; 961 case DCB_CONNECTOR_eDP : return DRM_MODE_CONNECTOR_eDP;
962 case DCB_CONNECTOR_HDMI_0 : 962 case DCB_CONNECTOR_HDMI_0 :
963 case DCB_CONNECTOR_HDMI_1 : return DRM_MODE_CONNECTOR_HDMIA; 963 case DCB_CONNECTOR_HDMI_1 :
964 case DCB_CONNECTOR_HDMI_C : return DRM_MODE_CONNECTOR_HDMIA;
964 default: 965 default:
965 break; 966 break;
966 } 967 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 24011596af43..3ff030dc1ee3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -105,7 +105,7 @@ nouveau_display_scanoutpos_head(struct drm_crtc *crtc, int *vpos, int *hpos,
105 if (retry) ndelay(crtc->linedur_ns); 105 if (retry) ndelay(crtc->linedur_ns);
106 } while (retry--); 106 } while (retry--);
107 107
108 *hpos = calc(args.hblanks, args.hblanke, args.htotal, args.hline); 108 *hpos = args.hline;
109 *vpos = calc(args.vblanks, args.vblanke, args.vtotal, args.vline); 109 *vpos = calc(args.vblanks, args.vblanke, args.vtotal, args.vline);
110 if (stime) *stime = ns_to_ktime(args.time[0]); 110 if (stime) *stime = ns_to_ktime(args.time[0]);
111 if (etime) *etime = ns_to_ktime(args.time[1]); 111 if (etime) *etime = ns_to_ktime(args.time[1]);
@@ -419,6 +419,7 @@ int
419nouveau_display_create(struct drm_device *dev) 419nouveau_display_create(struct drm_device *dev)
420{ 420{
421 struct nouveau_drm *drm = nouveau_drm(dev); 421 struct nouveau_drm *drm = nouveau_drm(dev);
422 struct nouveau_device *device = nouveau_dev(dev);
422 struct nouveau_display *disp; 423 struct nouveau_display *disp;
423 int ret, gen; 424 int ret, gen;
424 425
@@ -459,7 +460,7 @@ nouveau_display_create(struct drm_device *dev)
459 } 460 }
460 461
461 dev->mode_config.funcs = &nouveau_mode_config_funcs; 462 dev->mode_config.funcs = &nouveau_mode_config_funcs;
462 dev->mode_config.fb_base = pci_resource_start(dev->pdev, 1); 463 dev->mode_config.fb_base = nv_device_resource_start(device, 1);
463 464
464 dev->mode_config.min_width = 0; 465 dev->mode_config.min_width = 0;
465 dev->mode_config.min_height = 0; 466 dev->mode_config.min_height = 0;
@@ -488,6 +489,7 @@ nouveau_display_create(struct drm_device *dev)
488 489
489 if (drm->vbios.dcb.entries) { 490 if (drm->vbios.dcb.entries) {
490 static const u16 oclass[] = { 491 static const u16 oclass[] = {
492 GM107_DISP_CLASS,
491 NVF0_DISP_CLASS, 493 NVF0_DISP_CLASS,
492 NVE0_DISP_CLASS, 494 NVE0_DISP_CLASS,
493 NVD0_DISP_CLASS, 495 NVD0_DISP_CLASS,
@@ -569,7 +571,7 @@ nouveau_display_suspend(struct drm_device *dev)
569 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 571 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
570 struct nouveau_framebuffer *nouveau_fb; 572 struct nouveau_framebuffer *nouveau_fb;
571 573
572 nouveau_fb = nouveau_framebuffer(crtc->fb); 574 nouveau_fb = nouveau_framebuffer(crtc->primary->fb);
573 if (!nouveau_fb || !nouveau_fb->nvbo) 575 if (!nouveau_fb || !nouveau_fb->nvbo)
574 continue; 576 continue;
575 577
@@ -596,7 +598,7 @@ nouveau_display_repin(struct drm_device *dev)
596 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 598 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
597 struct nouveau_framebuffer *nouveau_fb; 599 struct nouveau_framebuffer *nouveau_fb;
598 600
599 nouveau_fb = nouveau_framebuffer(crtc->fb); 601 nouveau_fb = nouveau_framebuffer(crtc->primary->fb);
600 if (!nouveau_fb || !nouveau_fb->nvbo) 602 if (!nouveau_fb || !nouveau_fb->nvbo)
601 continue; 603 continue;
602 604
@@ -693,7 +695,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
693 const int swap_interval = (flags & DRM_MODE_PAGE_FLIP_ASYNC) ? 0 : 1; 695 const int swap_interval = (flags & DRM_MODE_PAGE_FLIP_ASYNC) ? 0 : 1;
694 struct drm_device *dev = crtc->dev; 696 struct drm_device *dev = crtc->dev;
695 struct nouveau_drm *drm = nouveau_drm(dev); 697 struct nouveau_drm *drm = nouveau_drm(dev);
696 struct nouveau_bo *old_bo = nouveau_framebuffer(crtc->fb)->nvbo; 698 struct nouveau_bo *old_bo = nouveau_framebuffer(crtc->primary->fb)->nvbo;
697 struct nouveau_bo *new_bo = nouveau_framebuffer(fb)->nvbo; 699 struct nouveau_bo *new_bo = nouveau_framebuffer(fb)->nvbo;
698 struct nouveau_page_flip_state *s; 700 struct nouveau_page_flip_state *s;
699 struct nouveau_channel *chan = drm->channel; 701 struct nouveau_channel *chan = drm->channel;
@@ -767,7 +769,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
767 goto fail_unreserve; 769 goto fail_unreserve;
768 770
769 /* Update the crtc struct and cleanup */ 771 /* Update the crtc struct and cleanup */
770 crtc->fb = fb; 772 crtc->primary->fb = fb;
771 773
772 nouveau_bo_fence(old_bo, fence); 774 nouveau_bo_fence(old_bo, fence);
773 ttm_bo_unreserve(&old_bo->bo); 775 ttm_bo_unreserve(&old_bo->bo);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 4ee702ac8907..ddd83756b9a2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -33,6 +33,7 @@
33#include <core/client.h> 33#include <core/client.h>
34#include <core/gpuobj.h> 34#include <core/gpuobj.h>
35#include <core/class.h> 35#include <core/class.h>
36#include <core/option.h>
36 37
37#include <engine/device.h> 38#include <engine/device.h>
38#include <engine/disp.h> 39#include <engine/disp.h>
@@ -81,7 +82,7 @@ module_param_named(runpm, nouveau_runtime_pm, int, 0400);
81static struct drm_driver driver; 82static struct drm_driver driver;
82 83
83static u64 84static u64
84nouveau_name(struct pci_dev *pdev) 85nouveau_pci_name(struct pci_dev *pdev)
85{ 86{
86 u64 name = (u64)pci_domain_nr(pdev->bus) << 32; 87 u64 name = (u64)pci_domain_nr(pdev->bus) << 32;
87 name |= pdev->bus->number << 16; 88 name |= pdev->bus->number << 16;
@@ -89,15 +90,30 @@ nouveau_name(struct pci_dev *pdev)
89 return name | PCI_FUNC(pdev->devfn); 90 return name | PCI_FUNC(pdev->devfn);
90} 91}
91 92
93static u64
94nouveau_platform_name(struct platform_device *platformdev)
95{
96 return platformdev->id;
97}
98
99static u64
100nouveau_name(struct drm_device *dev)
101{
102 if (dev->pdev)
103 return nouveau_pci_name(dev->pdev);
104 else
105 return nouveau_platform_name(dev->platformdev);
106}
107
92static int 108static int
93nouveau_cli_create(struct pci_dev *pdev, const char *name, 109nouveau_cli_create(u64 name, const char *sname,
94 int size, void **pcli) 110 int size, void **pcli)
95{ 111{
96 struct nouveau_cli *cli; 112 struct nouveau_cli *cli;
97 int ret; 113 int ret;
98 114
99 *pcli = NULL; 115 *pcli = NULL;
100 ret = nouveau_client_create_(name, nouveau_name(pdev), nouveau_config, 116 ret = nouveau_client_create_(sname, name, nouveau_config,
101 nouveau_debug, size, pcli); 117 nouveau_debug, size, pcli);
102 cli = *pcli; 118 cli = *pcli;
103 if (ret) { 119 if (ret) {
@@ -281,7 +297,8 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
281 remove_conflicting_framebuffers(aper, "nouveaufb", boot); 297 remove_conflicting_framebuffers(aper, "nouveaufb", boot);
282 kfree(aper); 298 kfree(aper);
283 299
284 ret = nouveau_device_create(pdev, nouveau_name(pdev), pci_name(pdev), 300 ret = nouveau_device_create(pdev, NOUVEAU_BUS_PCI,
301 nouveau_pci_name(pdev), pci_name(pdev),
285 nouveau_config, nouveau_debug, &device); 302 nouveau_config, nouveau_debug, &device);
286 if (ret) 303 if (ret)
287 return ret; 304 return ret;
@@ -300,22 +317,27 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
300#define PCI_CLASS_MULTIMEDIA_HD_AUDIO 0x0403 317#define PCI_CLASS_MULTIMEDIA_HD_AUDIO 0x0403
301 318
302static void 319static void
303nouveau_get_hdmi_dev(struct drm_device *dev) 320nouveau_get_hdmi_dev(struct nouveau_drm *drm)
304{ 321{
305 struct nouveau_drm *drm = dev->dev_private; 322 struct pci_dev *pdev = drm->dev->pdev;
306 struct pci_dev *pdev = dev->pdev; 323
324 if (!pdev) {
325 DRM_INFO("not a PCI device; no HDMI\n");
326 drm->hdmi_device = NULL;
327 return;
328 }
307 329
308 /* subfunction one is a hdmi audio device? */ 330 /* subfunction one is a hdmi audio device? */
309 drm->hdmi_device = pci_get_bus_and_slot((unsigned int)pdev->bus->number, 331 drm->hdmi_device = pci_get_bus_and_slot((unsigned int)pdev->bus->number,
310 PCI_DEVFN(PCI_SLOT(pdev->devfn), 1)); 332 PCI_DEVFN(PCI_SLOT(pdev->devfn), 1));
311 333
312 if (!drm->hdmi_device) { 334 if (!drm->hdmi_device) {
313 DRM_INFO("hdmi device not found %d %d %d\n", pdev->bus->number, PCI_SLOT(pdev->devfn), 1); 335 NV_DEBUG(drm, "hdmi device not found %d %d %d\n", pdev->bus->number, PCI_SLOT(pdev->devfn), 1);
314 return; 336 return;
315 } 337 }
316 338
317 if ((drm->hdmi_device->class >> 8) != PCI_CLASS_MULTIMEDIA_HD_AUDIO) { 339 if ((drm->hdmi_device->class >> 8) != PCI_CLASS_MULTIMEDIA_HD_AUDIO) {
318 DRM_INFO("possible hdmi device not audio %d\n", drm->hdmi_device->class); 340 NV_DEBUG(drm, "possible hdmi device not audio %d\n", drm->hdmi_device->class);
319 pci_dev_put(drm->hdmi_device); 341 pci_dev_put(drm->hdmi_device);
320 drm->hdmi_device = NULL; 342 drm->hdmi_device = NULL;
321 return; 343 return;
@@ -330,22 +352,24 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
330 struct nouveau_drm *drm; 352 struct nouveau_drm *drm;
331 int ret; 353 int ret;
332 354
333 ret = nouveau_cli_create(pdev, "DRM", sizeof(*drm), (void**)&drm); 355 ret = nouveau_cli_create(nouveau_name(dev), "DRM", sizeof(*drm),
356 (void **)&drm);
334 if (ret) 357 if (ret)
335 return ret; 358 return ret;
336 359
337 dev->dev_private = drm; 360 dev->dev_private = drm;
338 drm->dev = dev; 361 drm->dev = dev;
362 nouveau_client(drm)->debug = nouveau_dbgopt(nouveau_debug, "DRM");
339 363
340 INIT_LIST_HEAD(&drm->clients); 364 INIT_LIST_HEAD(&drm->clients);
341 spin_lock_init(&drm->tile.lock); 365 spin_lock_init(&drm->tile.lock);
342 366
343 nouveau_get_hdmi_dev(dev); 367 nouveau_get_hdmi_dev(drm);
344 368
345 /* make sure AGP controller is in a consistent state before we 369 /* make sure AGP controller is in a consistent state before we
346 * (possibly) execute vbios init tables (see nouveau_agp.h) 370 * (possibly) execute vbios init tables (see nouveau_agp.h)
347 */ 371 */
348 if (drm_pci_device_is_agp(dev) && dev->agp) { 372 if (pdev && drm_pci_device_is_agp(dev) && dev->agp) {
349 /* dummy device object, doesn't init anything, but allows 373 /* dummy device object, doesn't init anything, but allows
350 * agp code access to registers 374 * agp code access to registers
351 */ 375 */
@@ -486,13 +510,13 @@ nouveau_drm_remove(struct pci_dev *pdev)
486} 510}
487 511
488static int 512static int
489nouveau_do_suspend(struct drm_device *dev) 513nouveau_do_suspend(struct drm_device *dev, bool runtime)
490{ 514{
491 struct nouveau_drm *drm = nouveau_drm(dev); 515 struct nouveau_drm *drm = nouveau_drm(dev);
492 struct nouveau_cli *cli; 516 struct nouveau_cli *cli;
493 int ret; 517 int ret;
494 518
495 if (dev->mode_config.num_crtc) { 519 if (dev->mode_config.num_crtc && !runtime) {
496 NV_INFO(drm, "suspending display...\n"); 520 NV_INFO(drm, "suspending display...\n");
497 ret = nouveau_display_suspend(dev); 521 ret = nouveau_display_suspend(dev);
498 if (ret) 522 if (ret)
@@ -566,7 +590,7 @@ int nouveau_pmops_suspend(struct device *dev)
566 if (drm_dev->mode_config.num_crtc) 590 if (drm_dev->mode_config.num_crtc)
567 nouveau_fbcon_set_suspend(drm_dev, 1); 591 nouveau_fbcon_set_suspend(drm_dev, 1);
568 592
569 ret = nouveau_do_suspend(drm_dev); 593 ret = nouveau_do_suspend(drm_dev, false);
570 if (ret) 594 if (ret)
571 return ret; 595 return ret;
572 596
@@ -646,7 +670,7 @@ static int nouveau_pmops_freeze(struct device *dev)
646 if (drm_dev->mode_config.num_crtc) 670 if (drm_dev->mode_config.num_crtc)
647 nouveau_fbcon_set_suspend(drm_dev, 1); 671 nouveau_fbcon_set_suspend(drm_dev, 1);
648 672
649 ret = nouveau_do_suspend(drm_dev); 673 ret = nouveau_do_suspend(drm_dev, false);
650 return ret; 674 return ret;
651} 675}
652 676
@@ -671,7 +695,6 @@ static int nouveau_pmops_thaw(struct device *dev)
671static int 695static int
672nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv) 696nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv)
673{ 697{
674 struct pci_dev *pdev = dev->pdev;
675 struct nouveau_drm *drm = nouveau_drm(dev); 698 struct nouveau_drm *drm = nouveau_drm(dev);
676 struct nouveau_cli *cli; 699 struct nouveau_cli *cli;
677 char name[32], tmpname[TASK_COMM_LEN]; 700 char name[32], tmpname[TASK_COMM_LEN];
@@ -679,13 +702,15 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv)
679 702
680 /* need to bring up power immediately if opening device */ 703 /* need to bring up power immediately if opening device */
681 ret = pm_runtime_get_sync(dev->dev); 704 ret = pm_runtime_get_sync(dev->dev);
682 if (ret < 0) 705 if (ret < 0 && ret != -EACCES)
683 return ret; 706 return ret;
684 707
685 get_task_comm(tmpname, current); 708 get_task_comm(tmpname, current);
686 snprintf(name, sizeof(name), "%s[%d]", tmpname, pid_nr(fpriv->pid)); 709 snprintf(name, sizeof(name), "%s[%d]", tmpname, pid_nr(fpriv->pid));
687 710
688 ret = nouveau_cli_create(pdev, name, sizeof(*cli), (void **)&cli); 711 ret = nouveau_cli_create(nouveau_name(dev), name, sizeof(*cli),
712 (void **)&cli);
713
689 if (ret) 714 if (ret)
690 goto out_suspend; 715 goto out_suspend;
691 716
@@ -762,7 +787,7 @@ long nouveau_drm_ioctl(struct file *filp,
762 dev = file_priv->minor->dev; 787 dev = file_priv->minor->dev;
763 788
764 ret = pm_runtime_get_sync(dev->dev); 789 ret = pm_runtime_get_sync(dev->dev);
765 if (ret < 0) 790 if (ret < 0 && ret != -EACCES)
766 return ret; 791 return ret;
767 792
768 ret = drm_ioctl(filp, cmd, arg); 793 ret = drm_ioctl(filp, cmd, arg);
@@ -882,7 +907,7 @@ static int nouveau_pmops_runtime_suspend(struct device *dev)
882 drm_kms_helper_poll_disable(drm_dev); 907 drm_kms_helper_poll_disable(drm_dev);
883 vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF); 908 vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF);
884 nouveau_switcheroo_optimus_dsm(); 909 nouveau_switcheroo_optimus_dsm();
885 ret = nouveau_do_suspend(drm_dev); 910 ret = nouveau_do_suspend(drm_dev, true);
886 pci_save_state(pdev); 911 pci_save_state(pdev);
887 pci_disable_device(pdev); 912 pci_disable_device(pdev);
888 pci_set_power_state(pdev, PCI_D3cold); 913 pci_set_power_state(pdev, PCI_D3cold);
@@ -908,8 +933,6 @@ static int nouveau_pmops_runtime_resume(struct device *dev)
908 pci_set_master(pdev); 933 pci_set_master(pdev);
909 934
910 ret = nouveau_do_resume(drm_dev); 935 ret = nouveau_do_resume(drm_dev);
911 if (drm_dev->mode_config.num_crtc)
912 nouveau_display_resume(drm_dev);
913 drm_kms_helper_poll_enable(drm_dev); 936 drm_kms_helper_poll_enable(drm_dev);
914 /* do magic */ 937 /* do magic */
915 nv_mask(device, 0x88488, (1 << 25), (1 << 25)); 938 nv_mask(device, 0x88488, (1 << 25), (1 << 25));
@@ -980,6 +1003,25 @@ nouveau_drm_pci_driver = {
980 .driver.pm = &nouveau_pm_ops, 1003 .driver.pm = &nouveau_pm_ops,
981}; 1004};
982 1005
1006int nouveau_drm_platform_probe(struct platform_device *pdev)
1007{
1008 struct nouveau_device *device;
1009 int ret;
1010
1011 ret = nouveau_device_create(pdev, NOUVEAU_BUS_PLATFORM,
1012 nouveau_platform_name(pdev),
1013 dev_name(&pdev->dev), nouveau_config,
1014 nouveau_debug, &device);
1015
1016 ret = drm_platform_init(&driver, pdev);
1017 if (ret) {
1018 nouveau_object_ref(NULL, (struct nouveau_object **)&device);
1019 return ret;
1020 }
1021
1022 return ret;
1023}
1024
983static int __init 1025static int __init
984nouveau_drm_init(void) 1026nouveau_drm_init(void)
985{ 1027{
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
index 23ca7a517246..7efbafaf7c1d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
@@ -161,10 +161,7 @@ int nouveau_pmops_resume(struct device *);
161#define NV_ERROR(cli, fmt, args...) nv_error((cli), fmt, ##args) 161#define NV_ERROR(cli, fmt, args...) nv_error((cli), fmt, ##args)
162#define NV_WARN(cli, fmt, args...) nv_warn((cli), fmt, ##args) 162#define NV_WARN(cli, fmt, args...) nv_warn((cli), fmt, ##args)
163#define NV_INFO(cli, fmt, args...) nv_info((cli), fmt, ##args) 163#define NV_INFO(cli, fmt, args...) nv_info((cli), fmt, ##args)
164#define NV_DEBUG(cli, fmt, args...) do { \ 164#define NV_DEBUG(cli, fmt, args...) nv_debug((cli), fmt, ##args)
165 if (drm_debug & DRM_UT_DRIVER) \
166 nv_info((cli), fmt, ##args); \
167} while (0)
168 165
169extern int nouveau_modeset; 166extern int nouveau_modeset;
170 167
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 7903e0ed3c75..64a42cfd3717 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -528,10 +528,10 @@ nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
528 struct nouveau_drm *drm = nouveau_drm(dev); 528 struct nouveau_drm *drm = nouveau_drm(dev);
529 if (drm->fbcon) { 529 if (drm->fbcon) {
530 console_lock(); 530 console_lock();
531 if (state == 0) 531 if (state == 1)
532 nouveau_fbcon_save_disable_accel(dev); 532 nouveau_fbcon_save_disable_accel(dev);
533 fb_set_suspend(drm->fbcon->helper.fbdev, state); 533 fb_set_suspend(drm->fbcon->helper.fbdev, state);
534 if (state == 1) 534 if (state == 0)
535 nouveau_fbcon_restore_accel(dev); 535 nouveau_fbcon_restore_accel(dev);
536 console_unlock(); 536 console_unlock();
537 } 537 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 27c3fd89e8ce..c90c0dc0afe8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -228,8 +228,6 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
228 struct nouveau_bo *nvbo = NULL; 228 struct nouveau_bo *nvbo = NULL;
229 int ret = 0; 229 int ret = 0;
230 230
231 drm->ttm.bdev.dev_mapping = drm->dev->dev_mapping;
232
233 if (!pfb->memtype_valid(pfb, req->info.tile_flags)) { 231 if (!pfb->memtype_valid(pfb, req->info.tile_flags)) {
234 NV_ERROR(cli, "bad page flags: 0x%08x\n", req->info.tile_flags); 232 NV_ERROR(cli, "bad page flags: 0x%08x\n", req->info.tile_flags);
235 return -EINVAL; 233 return -EINVAL;
diff --git a/drivers/gpu/drm/nouveau/nouveau_hwmon.c b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
index 4aff04fa483c..19fd767bab10 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hwmon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
@@ -383,8 +383,9 @@ nouveau_hwmon_set_pwm1_enable(struct device *d, struct device_attribute *a,
383 long value; 383 long value;
384 int ret; 384 int ret;
385 385
386 if (strict_strtol(buf, 10, &value) == -EINVAL) 386 ret = kstrtol(buf, 10, &value);
387 return -EINVAL; 387 if (ret)
388 return ret;
388 389
389 ret = therm->attr_set(therm, NOUVEAU_THERM_ATTR_FAN_MODE, value); 390 ret = therm->attr_set(therm, NOUVEAU_THERM_ATTR_FAN_MODE, value);
390 if (ret) 391 if (ret)
@@ -587,18 +588,14 @@ nouveau_hwmon_init(struct drm_device *dev)
587 588
588 /* set the default attributes */ 589 /* set the default attributes */
589 ret = sysfs_create_group(&hwmon_dev->kobj, &hwmon_default_attrgroup); 590 ret = sysfs_create_group(&hwmon_dev->kobj, &hwmon_default_attrgroup);
590 if (ret) { 591 if (ret)
591 if (ret) 592 goto error;
592 goto error;
593 }
594 593
595 /* if the card has a working thermal sensor */ 594 /* if the card has a working thermal sensor */
596 if (therm->temp_get(therm) >= 0) { 595 if (therm->temp_get(therm) >= 0) {
597 ret = sysfs_create_group(&hwmon_dev->kobj, &hwmon_temp_attrgroup); 596 ret = sysfs_create_group(&hwmon_dev->kobj, &hwmon_temp_attrgroup);
598 if (ret) { 597 if (ret)
599 if (ret) 598 goto error;
600 goto error;
601 }
602 } 599 }
603 600
604 /* if the card has a pwm fan */ 601 /* if the card has a pwm fan */
diff --git a/drivers/gpu/drm/nouveau/nouveau_sysfs.c b/drivers/gpu/drm/nouveau/nouveau_sysfs.c
index 89201a17ce75..75dda2b07176 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sysfs.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sysfs.c
@@ -30,7 +30,7 @@
30static inline struct drm_device * 30static inline struct drm_device *
31drm_device(struct device *d) 31drm_device(struct device *d)
32{ 32{
33 return pci_get_drvdata(to_pci_dev(d)); 33 return dev_get_drvdata(d);
34} 34}
35 35
36#define snappendf(p,r,f,a...) do { \ 36#define snappendf(p,r,f,a...) do { \
@@ -132,9 +132,10 @@ nouveau_sysfs_fini(struct drm_device *dev)
132{ 132{
133 struct nouveau_sysfs *sysfs = nouveau_sysfs(dev); 133 struct nouveau_sysfs *sysfs = nouveau_sysfs(dev);
134 struct nouveau_drm *drm = nouveau_drm(dev); 134 struct nouveau_drm *drm = nouveau_drm(dev);
135 struct nouveau_device *device = nv_device(drm->device);
135 136
136 if (sysfs->ctrl) { 137 if (sysfs->ctrl) {
137 device_remove_file(&dev->pdev->dev, &dev_attr_pstate); 138 device_remove_file(nv_device_base(device), &dev_attr_pstate);
138 nouveau_object_del(nv_object(drm), NVDRM_DEVICE, NVDRM_CONTROL); 139 nouveau_object_del(nv_object(drm), NVDRM_DEVICE, NVDRM_CONTROL);
139 } 140 }
140 141
@@ -146,6 +147,7 @@ int
146nouveau_sysfs_init(struct drm_device *dev) 147nouveau_sysfs_init(struct drm_device *dev)
147{ 148{
148 struct nouveau_drm *drm = nouveau_drm(dev); 149 struct nouveau_drm *drm = nouveau_drm(dev);
150 struct nouveau_device *device = nv_device(drm->device);
149 struct nouveau_sysfs *sysfs; 151 struct nouveau_sysfs *sysfs;
150 int ret; 152 int ret;
151 153
@@ -156,7 +158,7 @@ nouveau_sysfs_init(struct drm_device *dev)
156 ret = nouveau_object_new(nv_object(drm), NVDRM_DEVICE, NVDRM_CONTROL, 158 ret = nouveau_object_new(nv_object(drm), NVDRM_DEVICE, NVDRM_CONTROL,
157 NV_CONTROL_CLASS, NULL, 0, &sysfs->ctrl); 159 NV_CONTROL_CLASS, NULL, 0, &sysfs->ctrl);
158 if (ret == 0) 160 if (ret == 0)
159 device_create_file(&dev->pdev->dev, &dev_attr_pstate); 161 device_create_file(nv_device_base(device), &dev_attr_pstate);
160 162
161 return 0; 163 return 0;
162} 164}
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index d45d50da978f..ab0228f640a5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -354,21 +354,26 @@ int
354nouveau_ttm_init(struct nouveau_drm *drm) 354nouveau_ttm_init(struct nouveau_drm *drm)
355{ 355{
356 struct drm_device *dev = drm->dev; 356 struct drm_device *dev = drm->dev;
357 struct nouveau_device *device = nv_device(drm->device);
357 u32 bits; 358 u32 bits;
358 int ret; 359 int ret;
359 360
360 bits = nouveau_vmmgr(drm->device)->dma_bits; 361 bits = nouveau_vmmgr(drm->device)->dma_bits;
361 if ( drm->agp.stat == ENABLED || 362 if (nv_device_is_pci(device)) {
362 !pci_dma_supported(dev->pdev, DMA_BIT_MASK(bits))) 363 if (drm->agp.stat == ENABLED ||
363 bits = 32; 364 !pci_dma_supported(dev->pdev, DMA_BIT_MASK(bits)))
364 365 bits = 32;
365 ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(bits)); 366
366 if (ret) 367 ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(bits));
367 return ret; 368 if (ret)
368 369 return ret;
369 ret = pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(bits)); 370
370 if (ret) 371 ret = pci_set_consistent_dma_mask(dev->pdev,
371 pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(32)); 372 DMA_BIT_MASK(bits));
373 if (ret)
374 pci_set_consistent_dma_mask(dev->pdev,
375 DMA_BIT_MASK(32));
376 }
372 377
373 ret = nouveau_ttm_global_init(drm); 378 ret = nouveau_ttm_global_init(drm);
374 if (ret) 379 if (ret)
@@ -376,7 +381,9 @@ nouveau_ttm_init(struct nouveau_drm *drm)
376 381
377 ret = ttm_bo_device_init(&drm->ttm.bdev, 382 ret = ttm_bo_device_init(&drm->ttm.bdev,
378 drm->ttm.bo_global_ref.ref.object, 383 drm->ttm.bo_global_ref.ref.object,
379 &nouveau_bo_driver, DRM_FILE_PAGE_OFFSET, 384 &nouveau_bo_driver,
385 dev->anon_inode->i_mapping,
386 DRM_FILE_PAGE_OFFSET,
380 bits <= 32 ? true : false); 387 bits <= 32 ? true : false);
381 if (ret) { 388 if (ret) {
382 NV_ERROR(drm, "error initialising bo driver, %d\n", ret); 389 NV_ERROR(drm, "error initialising bo driver, %d\n", ret);
@@ -394,8 +401,8 @@ nouveau_ttm_init(struct nouveau_drm *drm)
394 return ret; 401 return ret;
395 } 402 }
396 403
397 drm->ttm.mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 1), 404 drm->ttm.mtrr = arch_phys_wc_add(nv_device_resource_start(device, 1),
398 pci_resource_len(dev->pdev, 1)); 405 nv_device_resource_len(device, 1));
399 406
400 /* GART init */ 407 /* GART init */
401 if (drm->agp.stat != ENABLED) { 408 if (drm->agp.stat != ENABLED) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
index 471347edc27e..fb84da3cb50d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
@@ -84,6 +84,11 @@ nouveau_vga_init(struct nouveau_drm *drm)
84{ 84{
85 struct drm_device *dev = drm->dev; 85 struct drm_device *dev = drm->dev;
86 bool runtime = false; 86 bool runtime = false;
87
88 /* only relevant for PCI devices */
89 if (!dev->pdev)
90 return;
91
87 vga_client_register(dev->pdev, dev, NULL, nouveau_vga_set_decode); 92 vga_client_register(dev->pdev, dev, NULL, nouveau_vga_set_decode);
88 93
89 if (nouveau_runtime_pm == 1) 94 if (nouveau_runtime_pm == 1)
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 2dccafc6e9db..58af547b0b93 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -651,7 +651,7 @@ nv50_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool update)
651 nv_connector = nouveau_crtc_connector_get(nv_crtc); 651 nv_connector = nouveau_crtc_connector_get(nv_crtc);
652 connector = &nv_connector->base; 652 connector = &nv_connector->base;
653 if (nv_connector->dithering_mode == DITHERING_MODE_AUTO) { 653 if (nv_connector->dithering_mode == DITHERING_MODE_AUTO) {
654 if (nv_crtc->base.fb->depth > connector->display_info.bpc * 3) 654 if (nv_crtc->base.primary->fb->depth > connector->display_info.bpc * 3)
655 mode = DITHERING_MODE_DYNAMIC2X2; 655 mode = DITHERING_MODE_DYNAMIC2X2;
656 } else { 656 } else {
657 mode = nv_connector->dithering_mode; 657 mode = nv_connector->dithering_mode;
@@ -785,7 +785,8 @@ nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, bool update)
785 785
786 if (update) { 786 if (update) {
787 nv50_display_flip_stop(crtc); 787 nv50_display_flip_stop(crtc);
788 nv50_display_flip_next(crtc, crtc->fb, NULL, 1); 788 nv50_display_flip_next(crtc, crtc->primary->fb,
789 NULL, 1);
789 } 790 }
790 } 791 }
791 792
@@ -1028,7 +1029,7 @@ nv50_crtc_commit(struct drm_crtc *crtc)
1028 } 1029 }
1029 1030
1030 nv50_crtc_cursor_show_hide(nv_crtc, nv_crtc->cursor.visible, true); 1031 nv50_crtc_cursor_show_hide(nv_crtc, nv_crtc->cursor.visible, true);
1031 nv50_display_flip_next(crtc, crtc->fb, NULL, 1); 1032 nv50_display_flip_next(crtc, crtc->primary->fb, NULL, 1);
1032} 1033}
1033 1034
1034static bool 1035static bool
@@ -1042,7 +1043,7 @@ nv50_crtc_mode_fixup(struct drm_crtc *crtc, const struct drm_display_mode *mode,
1042static int 1043static int
1043nv50_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb) 1044nv50_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb)
1044{ 1045{
1045 struct nouveau_framebuffer *nvfb = nouveau_framebuffer(crtc->fb); 1046 struct nouveau_framebuffer *nvfb = nouveau_framebuffer(crtc->primary->fb);
1046 struct nv50_head *head = nv50_head(crtc); 1047 struct nv50_head *head = nv50_head(crtc);
1047 int ret; 1048 int ret;
1048 1049
@@ -1139,7 +1140,7 @@ nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode,
1139 nv50_crtc_set_dither(nv_crtc, false); 1140 nv50_crtc_set_dither(nv_crtc, false);
1140 nv50_crtc_set_scale(nv_crtc, false); 1141 nv50_crtc_set_scale(nv_crtc, false);
1141 nv50_crtc_set_color_vibrance(nv_crtc, false); 1142 nv50_crtc_set_color_vibrance(nv_crtc, false);
1142 nv50_crtc_set_image(nv_crtc, crtc->fb, x, y, false); 1143 nv50_crtc_set_image(nv_crtc, crtc->primary->fb, x, y, false);
1143 return 0; 1144 return 0;
1144} 1145}
1145 1146
@@ -1151,7 +1152,7 @@ nv50_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
1151 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 1152 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
1152 int ret; 1153 int ret;
1153 1154
1154 if (!crtc->fb) { 1155 if (!crtc->primary->fb) {
1155 NV_DEBUG(drm, "No FB bound\n"); 1156 NV_DEBUG(drm, "No FB bound\n");
1156 return 0; 1157 return 0;
1157 } 1158 }
@@ -1161,8 +1162,8 @@ nv50_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
1161 return ret; 1162 return ret;
1162 1163
1163 nv50_display_flip_stop(crtc); 1164 nv50_display_flip_stop(crtc);
1164 nv50_crtc_set_image(nv_crtc, crtc->fb, x, y, true); 1165 nv50_crtc_set_image(nv_crtc, crtc->primary->fb, x, y, true);
1165 nv50_display_flip_next(crtc, crtc->fb, NULL, 1); 1166 nv50_display_flip_next(crtc, crtc->primary->fb, NULL, 1);
1166 return 0; 1167 return 0;
1167} 1168}
1168 1169
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c
index 4313bb0a49a6..355157e4f78d 100644
--- a/drivers/gpu/drm/omapdrm/omap_crtc.c
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.c
@@ -245,7 +245,7 @@ static int omap_crtc_mode_set(struct drm_crtc *crtc,
245 copy_timings_drm_to_omap(&omap_crtc->timings, mode); 245 copy_timings_drm_to_omap(&omap_crtc->timings, mode);
246 omap_crtc->full_update = true; 246 omap_crtc->full_update = true;
247 247
248 return omap_plane_mode_set(omap_crtc->plane, crtc, crtc->fb, 248 return omap_plane_mode_set(omap_crtc->plane, crtc, crtc->primary->fb,
249 0, 0, mode->hdisplay, mode->vdisplay, 249 0, 0, mode->hdisplay, mode->vdisplay,
250 x << 16, y << 16, 250 x << 16, y << 16,
251 mode->hdisplay << 16, mode->vdisplay << 16, 251 mode->hdisplay << 16, mode->vdisplay << 16,
@@ -273,7 +273,7 @@ static int omap_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
273 struct drm_plane *plane = omap_crtc->plane; 273 struct drm_plane *plane = omap_crtc->plane;
274 struct drm_display_mode *mode = &crtc->mode; 274 struct drm_display_mode *mode = &crtc->mode;
275 275
276 return omap_plane_mode_set(plane, crtc, crtc->fb, 276 return omap_plane_mode_set(plane, crtc, crtc->primary->fb,
277 0, 0, mode->hdisplay, mode->vdisplay, 277 0, 0, mode->hdisplay, mode->vdisplay,
278 x << 16, y << 16, 278 x << 16, y << 16,
279 mode->hdisplay << 16, mode->vdisplay << 16, 279 mode->hdisplay << 16, mode->vdisplay << 16,
@@ -308,14 +308,14 @@ static void page_flip_worker(struct work_struct *work)
308 struct drm_gem_object *bo; 308 struct drm_gem_object *bo;
309 309
310 mutex_lock(&crtc->mutex); 310 mutex_lock(&crtc->mutex);
311 omap_plane_mode_set(omap_crtc->plane, crtc, crtc->fb, 311 omap_plane_mode_set(omap_crtc->plane, crtc, crtc->primary->fb,
312 0, 0, mode->hdisplay, mode->vdisplay, 312 0, 0, mode->hdisplay, mode->vdisplay,
313 crtc->x << 16, crtc->y << 16, 313 crtc->x << 16, crtc->y << 16,
314 mode->hdisplay << 16, mode->vdisplay << 16, 314 mode->hdisplay << 16, mode->vdisplay << 16,
315 vblank_cb, crtc); 315 vblank_cb, crtc);
316 mutex_unlock(&crtc->mutex); 316 mutex_unlock(&crtc->mutex);
317 317
318 bo = omap_framebuffer_bo(crtc->fb, 0); 318 bo = omap_framebuffer_bo(crtc->primary->fb, 0);
319 drm_gem_object_unreference_unlocked(bo); 319 drm_gem_object_unreference_unlocked(bo);
320} 320}
321 321
@@ -336,9 +336,10 @@ static int omap_crtc_page_flip_locked(struct drm_crtc *crtc,
336{ 336{
337 struct drm_device *dev = crtc->dev; 337 struct drm_device *dev = crtc->dev;
338 struct omap_crtc *omap_crtc = to_omap_crtc(crtc); 338 struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
339 struct drm_plane *primary = crtc->primary;
339 struct drm_gem_object *bo; 340 struct drm_gem_object *bo;
340 341
341 DBG("%d -> %d (event=%p)", crtc->fb ? crtc->fb->base.id : -1, 342 DBG("%d -> %d (event=%p)", primary->fb ? primary->fb->base.id : -1,
342 fb->base.id, event); 343 fb->base.id, event);
343 344
344 if (omap_crtc->old_fb) { 345 if (omap_crtc->old_fb) {
@@ -347,7 +348,7 @@ static int omap_crtc_page_flip_locked(struct drm_crtc *crtc,
347 } 348 }
348 349
349 omap_crtc->event = event; 350 omap_crtc->event = event;
350 crtc->fb = fb; 351 primary->fb = fb;
351 352
352 /* 353 /*
353 * Hold a reference temporarily until the crtc is updated 354 * Hold a reference temporarily until the crtc is updated
diff --git a/drivers/gpu/drm/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c
index f466c4aaee94..d2b8c49bfb4a 100644
--- a/drivers/gpu/drm/omapdrm/omap_fb.c
+++ b/drivers/gpu/drm/omapdrm/omap_fb.c
@@ -306,13 +306,14 @@ struct drm_connector *omap_framebuffer_get_next_connector(
306 struct drm_connector *connector = from; 306 struct drm_connector *connector = from;
307 307
308 if (!from) 308 if (!from)
309 return list_first_entry(connector_list, typeof(*from), head); 309 return list_first_entry_or_null(connector_list, typeof(*from),
310 head);
310 311
311 list_for_each_entry_from(connector, connector_list, head) { 312 list_for_each_entry_from(connector, connector_list, head) {
312 if (connector != from) { 313 if (connector != from) {
313 struct drm_encoder *encoder = connector->encoder; 314 struct drm_encoder *encoder = connector->encoder;
314 struct drm_crtc *crtc = encoder ? encoder->crtc : NULL; 315 struct drm_crtc *crtc = encoder ? encoder->crtc : NULL;
315 if (crtc && crtc->fb == fb) 316 if (crtc && crtc->primary->fb == fb)
316 return connector; 317 return connector;
317 318
318 } 319 }
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index 5aec3e81fe24..c8d972763889 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -153,24 +153,24 @@ static struct {
153static void evict_entry(struct drm_gem_object *obj, 153static void evict_entry(struct drm_gem_object *obj,
154 enum tiler_fmt fmt, struct usergart_entry *entry) 154 enum tiler_fmt fmt, struct usergart_entry *entry)
155{ 155{
156 if (obj->dev->dev_mapping) { 156 struct omap_gem_object *omap_obj = to_omap_bo(obj);
157 struct omap_gem_object *omap_obj = to_omap_bo(obj); 157 int n = usergart[fmt].height;
158 int n = usergart[fmt].height; 158 size_t size = PAGE_SIZE * n;
159 size_t size = PAGE_SIZE * n; 159 loff_t off = mmap_offset(obj) +
160 loff_t off = mmap_offset(obj) + 160 (entry->obj_pgoff << PAGE_SHIFT);
161 (entry->obj_pgoff << PAGE_SHIFT); 161 const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE);
162 const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE); 162
163 if (m > 1) { 163 if (m > 1) {
164 int i; 164 int i;
165 /* if stride > than PAGE_SIZE then sparse mapping: */ 165 /* if stride > than PAGE_SIZE then sparse mapping: */
166 for (i = n; i > 0; i--) { 166 for (i = n; i > 0; i--) {
167 unmap_mapping_range(obj->dev->dev_mapping, 167 unmap_mapping_range(obj->dev->anon_inode->i_mapping,
168 off, PAGE_SIZE, 1); 168 off, PAGE_SIZE, 1);
169 off += PAGE_SIZE * m; 169 off += PAGE_SIZE * m;
170 }
171 } else {
172 unmap_mapping_range(obj->dev->dev_mapping, off, size, 1);
173 } 170 }
171 } else {
172 unmap_mapping_range(obj->dev->anon_inode->i_mapping,
173 off, size, 1);
174 } 174 }
175 175
176 entry->obj = NULL; 176 entry->obj = NULL;
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index 3e0f13d1bc84..4ec874da5668 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -16,4 +16,18 @@ config DRM_PANEL_SIMPLE
16 that it can be automatically turned off when the panel goes into a 16 that it can be automatically turned off when the panel goes into a
17 low power state. 17 low power state.
18 18
19config DRM_PANEL_LD9040
20 tristate "LD9040 RGB/SPI panel"
21 depends on DRM && DRM_PANEL
22 depends on OF
23 select SPI
24 select VIDEOMODE_HELPERS
25
26config DRM_PANEL_S6E8AA0
27 tristate "S6E8AA0 DSI video mode panel"
28 depends on DRM && DRM_PANEL
29 depends on OF
30 select DRM_MIPI_DSI
31 select VIDEOMODE_HELPERS
32
19endmenu 33endmenu
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
index af9dfa235b94..8b929212fad7 100644
--- a/drivers/gpu/drm/panel/Makefile
+++ b/drivers/gpu/drm/panel/Makefile
@@ -1 +1,3 @@
1obj-$(CONFIG_DRM_PANEL_SIMPLE) += panel-simple.o 1obj-$(CONFIG_DRM_PANEL_SIMPLE) += panel-simple.o
2obj-$(CONFIG_DRM_PANEL_LD9040) += panel-ld9040.o
3obj-$(CONFIG_DRM_PANEL_S6E8AA0) += panel-s6e8aa0.o
diff --git a/drivers/gpu/drm/panel/panel-ld9040.c b/drivers/gpu/drm/panel/panel-ld9040.c
new file mode 100644
index 000000000000..1f1f8371a199
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-ld9040.c
@@ -0,0 +1,376 @@
1/*
2 * ld9040 AMOLED LCD drm_panel driver.
3 *
4 * Copyright (c) 2014 Samsung Electronics Co., Ltd
5 * Derived from drivers/video/backlight/ld9040.c
6 *
7 * Andrzej Hajda <a.hajda@samsung.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12*/
13
14#include <drm/drmP.h>
15#include <drm/drm_panel.h>
16
17#include <linux/gpio/consumer.h>
18#include <linux/regulator/consumer.h>
19#include <linux/spi/spi.h>
20
21#include <video/mipi_display.h>
22#include <video/of_videomode.h>
23#include <video/videomode.h>
24
25/* Manufacturer Command Set */
26#define MCS_MANPWR 0xb0
27#define MCS_ELVSS_ON 0xb1
28#define MCS_USER_SETTING 0xf0
29#define MCS_DISPCTL 0xf2
30#define MCS_GTCON 0xf7
31#define MCS_PANEL_CONDITION 0xf8
32#define MCS_GAMMA_SET1 0xf9
33#define MCS_GAMMA_CTRL 0xfb
34
35/* array of gamma tables for gamma value 2.2 */
36static u8 const ld9040_gammas[25][22] = {
37 { 0xf9, 0x00, 0x13, 0xb2, 0xba, 0xd2, 0x00, 0x30, 0x00, 0xaf, 0xc0,
38 0xb8, 0xcd, 0x00, 0x3d, 0x00, 0xa8, 0xb8, 0xb7, 0xcd, 0x00, 0x44 },
39 { 0xf9, 0x00, 0x13, 0xb9, 0xb9, 0xd0, 0x00, 0x3c, 0x00, 0xaf, 0xbf,
40 0xb6, 0xcb, 0x00, 0x4b, 0x00, 0xa8, 0xb9, 0xb5, 0xcc, 0x00, 0x52 },
41 { 0xf9, 0x00, 0x13, 0xba, 0xb9, 0xcd, 0x00, 0x41, 0x00, 0xb0, 0xbe,
42 0xb5, 0xc9, 0x00, 0x51, 0x00, 0xa9, 0xb9, 0xb5, 0xca, 0x00, 0x57 },
43 { 0xf9, 0x00, 0x13, 0xb9, 0xb8, 0xcd, 0x00, 0x46, 0x00, 0xb1, 0xbc,
44 0xb5, 0xc8, 0x00, 0x56, 0x00, 0xaa, 0xb8, 0xb4, 0xc9, 0x00, 0x5d },
45 { 0xf9, 0x00, 0x13, 0xba, 0xb8, 0xcb, 0x00, 0x4b, 0x00, 0xb3, 0xbc,
46 0xb4, 0xc7, 0x00, 0x5c, 0x00, 0xac, 0xb8, 0xb4, 0xc8, 0x00, 0x62 },
47 { 0xf9, 0x00, 0x13, 0xbb, 0xb7, 0xca, 0x00, 0x4f, 0x00, 0xb4, 0xbb,
48 0xb3, 0xc7, 0x00, 0x60, 0x00, 0xad, 0xb8, 0xb4, 0xc7, 0x00, 0x67 },
49 { 0xf9, 0x00, 0x47, 0xba, 0xb6, 0xca, 0x00, 0x53, 0x00, 0xb5, 0xbb,
50 0xb3, 0xc6, 0x00, 0x65, 0x00, 0xae, 0xb8, 0xb3, 0xc7, 0x00, 0x6c },
51 { 0xf9, 0x00, 0x71, 0xbb, 0xb5, 0xc8, 0x00, 0x57, 0x00, 0xb5, 0xbb,
52 0xb0, 0xc5, 0x00, 0x6a, 0x00, 0xae, 0xb9, 0xb1, 0xc6, 0x00, 0x70 },
53 { 0xf9, 0x00, 0x7b, 0xbb, 0xb4, 0xc8, 0x00, 0x5b, 0x00, 0xb5, 0xba,
54 0xb1, 0xc4, 0x00, 0x6e, 0x00, 0xae, 0xb9, 0xb0, 0xc5, 0x00, 0x75 },
55 { 0xf9, 0x00, 0x82, 0xba, 0xb4, 0xc7, 0x00, 0x5f, 0x00, 0xb5, 0xba,
56 0xb0, 0xc3, 0x00, 0x72, 0x00, 0xae, 0xb8, 0xb0, 0xc3, 0x00, 0x7a },
57 { 0xf9, 0x00, 0x89, 0xba, 0xb3, 0xc8, 0x00, 0x62, 0x00, 0xb6, 0xba,
58 0xaf, 0xc3, 0x00, 0x76, 0x00, 0xaf, 0xb7, 0xae, 0xc4, 0x00, 0x7e },
59 { 0xf9, 0x00, 0x8b, 0xb9, 0xb3, 0xc7, 0x00, 0x65, 0x00, 0xb7, 0xb8,
60 0xaf, 0xc3, 0x00, 0x7a, 0x00, 0x80, 0xb6, 0xae, 0xc4, 0x00, 0x81 },
61 { 0xf9, 0x00, 0x93, 0xba, 0xb3, 0xc5, 0x00, 0x69, 0x00, 0xb8, 0xb9,
62 0xae, 0xc1, 0x00, 0x7f, 0x00, 0xb0, 0xb6, 0xae, 0xc3, 0x00, 0x85 },
63 { 0xf9, 0x00, 0x97, 0xba, 0xb2, 0xc5, 0x00, 0x6c, 0x00, 0xb8, 0xb8,
64 0xae, 0xc1, 0x00, 0x82, 0x00, 0xb0, 0xb6, 0xae, 0xc2, 0x00, 0x89 },
65 { 0xf9, 0x00, 0x9a, 0xba, 0xb1, 0xc4, 0x00, 0x6f, 0x00, 0xb8, 0xb8,
66 0xad, 0xc0, 0x00, 0x86, 0x00, 0xb0, 0xb7, 0xad, 0xc0, 0x00, 0x8d },
67 { 0xf9, 0x00, 0x9c, 0xb9, 0xb0, 0xc4, 0x00, 0x72, 0x00, 0xb8, 0xb8,
68 0xac, 0xbf, 0x00, 0x8a, 0x00, 0xb0, 0xb6, 0xac, 0xc0, 0x00, 0x91 },
69 { 0xf9, 0x00, 0x9e, 0xba, 0xb0, 0xc2, 0x00, 0x75, 0x00, 0xb9, 0xb8,
70 0xab, 0xbe, 0x00, 0x8e, 0x00, 0xb0, 0xb6, 0xac, 0xbf, 0x00, 0x94 },
71 { 0xf9, 0x00, 0xa0, 0xb9, 0xaf, 0xc3, 0x00, 0x77, 0x00, 0xb9, 0xb7,
72 0xab, 0xbe, 0x00, 0x90, 0x00, 0xb0, 0xb6, 0xab, 0xbf, 0x00, 0x97 },
73 { 0xf9, 0x00, 0xa2, 0xb9, 0xaf, 0xc2, 0x00, 0x7a, 0x00, 0xb9, 0xb7,
74 0xaa, 0xbd, 0x00, 0x94, 0x00, 0xb0, 0xb5, 0xab, 0xbf, 0x00, 0x9a },
75 { 0xf9, 0x00, 0xa4, 0xb9, 0xaf, 0xc1, 0x00, 0x7d, 0x00, 0xb9, 0xb6,
76 0xaa, 0xbb, 0x00, 0x97, 0x00, 0xb1, 0xb5, 0xaa, 0xbf, 0x00, 0x9d },
77 { 0xf9, 0x00, 0xa4, 0xb8, 0xb0, 0xbf, 0x00, 0x80, 0x00, 0xb8, 0xb6,
78 0xaa, 0xbc, 0x00, 0x9a, 0x00, 0xb0, 0xb5, 0xab, 0xbd, 0x00, 0xa0 },
79 { 0xf9, 0x00, 0xa8, 0xb8, 0xae, 0xbe, 0x00, 0x84, 0x00, 0xb9, 0xb7,
80 0xa8, 0xbc, 0x00, 0x9d, 0x00, 0xb2, 0xb5, 0xaa, 0xbc, 0x00, 0xa4 },
81 { 0xf9, 0x00, 0xa9, 0xb6, 0xad, 0xbf, 0x00, 0x86, 0x00, 0xb8, 0xb5,
82 0xa8, 0xbc, 0x00, 0xa0, 0x00, 0xb3, 0xb3, 0xa9, 0xbc, 0x00, 0xa7 },
83 { 0xf9, 0x00, 0xa9, 0xb7, 0xae, 0xbd, 0x00, 0x89, 0x00, 0xb7, 0xb6,
84 0xa8, 0xba, 0x00, 0xa4, 0x00, 0xb1, 0xb4, 0xaa, 0xbb, 0x00, 0xaa },
85 { 0xf9, 0x00, 0xa7, 0xb4, 0xae, 0xbf, 0x00, 0x91, 0x00, 0xb2, 0xb4,
86 0xaa, 0xbb, 0x00, 0xac, 0x00, 0xb3, 0xb1, 0xaa, 0xbc, 0x00, 0xb3 },
87};
88
89struct ld9040 {
90 struct device *dev;
91 struct drm_panel panel;
92
93 struct regulator_bulk_data supplies[2];
94 struct gpio_desc *reset_gpio;
95 u32 power_on_delay;
96 u32 reset_delay;
97 struct videomode vm;
98 u32 width_mm;
99 u32 height_mm;
100
101 int brightness;
102
103 /* This field is tested by functions directly accessing bus before
104 * transfer, transfer is skipped if it is set. In case of transfer
105 * failure or unexpected response the field is set to error value.
106 * Such construct allows to eliminate many checks in higher level
107 * functions.
108 */
109 int error;
110};
111
112#define panel_to_ld9040(p) container_of(p, struct ld9040, panel)
113
114static int ld9040_clear_error(struct ld9040 *ctx)
115{
116 int ret = ctx->error;
117
118 ctx->error = 0;
119 return ret;
120}
121
122static int ld9040_spi_write_word(struct ld9040 *ctx, u16 data)
123{
124 struct spi_device *spi = to_spi_device(ctx->dev);
125 struct spi_transfer xfer = {
126 .len = 2,
127 .tx_buf = &data,
128 };
129 struct spi_message msg;
130
131 spi_message_init(&msg);
132 spi_message_add_tail(&xfer, &msg);
133
134 return spi_sync(spi, &msg);
135}
136
137static void ld9040_dcs_write(struct ld9040 *ctx, const u8 *data, size_t len)
138{
139 int ret = 0;
140
141 if (ctx->error < 0 || len == 0)
142 return;
143
144 dev_dbg(ctx->dev, "writing dcs seq: %*ph\n", len, data);
145 ret = ld9040_spi_write_word(ctx, *data);
146
147 while (!ret && --len) {
148 ++data;
149 ret = ld9040_spi_write_word(ctx, *data | 0x100);
150 }
151
152 if (ret) {
153 dev_err(ctx->dev, "error %d writing dcs seq: %*ph\n", ret, len,
154 data);
155 ctx->error = ret;
156 }
157
158 usleep_range(300, 310);
159}
160
161#define ld9040_dcs_write_seq_static(ctx, seq...) \
162({\
163 static const u8 d[] = { seq };\
164 ld9040_dcs_write(ctx, d, ARRAY_SIZE(d));\
165})
166
167static void ld9040_brightness_set(struct ld9040 *ctx)
168{
169 ld9040_dcs_write(ctx, ld9040_gammas[ctx->brightness],
170 ARRAY_SIZE(ld9040_gammas[ctx->brightness]));
171
172 ld9040_dcs_write_seq_static(ctx, MCS_GAMMA_CTRL, 0x02, 0x5a);
173}
174
175static void ld9040_init(struct ld9040 *ctx)
176{
177 ld9040_dcs_write_seq_static(ctx, MCS_USER_SETTING, 0x5a, 0x5a);
178 ld9040_dcs_write_seq_static(ctx, MCS_PANEL_CONDITION,
179 0x05, 0x65, 0x96, 0x71, 0x7d, 0x19, 0x3b, 0x0d,
180 0x19, 0x7e, 0x0d, 0xe2, 0x00, 0x00, 0x7e, 0x7d,
181 0x07, 0x07, 0x20, 0x20, 0x20, 0x02, 0x02);
182 ld9040_dcs_write_seq_static(ctx, MCS_DISPCTL,
183 0x02, 0x08, 0x08, 0x10, 0x10);
184 ld9040_dcs_write_seq_static(ctx, MCS_MANPWR, 0x04);
185 ld9040_dcs_write_seq_static(ctx, MCS_ELVSS_ON, 0x0d, 0x00, 0x16);
186 ld9040_dcs_write_seq_static(ctx, MCS_GTCON, 0x09, 0x00, 0x00);
187 ld9040_brightness_set(ctx);
188 ld9040_dcs_write_seq_static(ctx, MIPI_DCS_EXIT_SLEEP_MODE);
189 ld9040_dcs_write_seq_static(ctx, MIPI_DCS_SET_DISPLAY_ON);
190}
191
192static int ld9040_power_on(struct ld9040 *ctx)
193{
194 int ret;
195
196 ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
197 if (ret < 0)
198 return ret;
199
200 msleep(ctx->power_on_delay);
201 gpiod_set_value(ctx->reset_gpio, 0);
202 msleep(ctx->reset_delay);
203 gpiod_set_value(ctx->reset_gpio, 1);
204 msleep(ctx->reset_delay);
205
206 return 0;
207}
208
209static int ld9040_power_off(struct ld9040 *ctx)
210{
211 return regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
212}
213
214static int ld9040_disable(struct drm_panel *panel)
215{
216 struct ld9040 *ctx = panel_to_ld9040(panel);
217
218 msleep(120);
219 ld9040_dcs_write_seq_static(ctx, MIPI_DCS_SET_DISPLAY_OFF);
220 ld9040_dcs_write_seq_static(ctx, MIPI_DCS_ENTER_SLEEP_MODE);
221 msleep(40);
222
223 ld9040_clear_error(ctx);
224
225 return ld9040_power_off(ctx);
226}
227
228static int ld9040_enable(struct drm_panel *panel)
229{
230 struct ld9040 *ctx = panel_to_ld9040(panel);
231 int ret;
232
233 ret = ld9040_power_on(ctx);
234 if (ret < 0)
235 return ret;
236
237 ld9040_init(ctx);
238
239 ret = ld9040_clear_error(ctx);
240
241 if (ret < 0)
242 ld9040_disable(panel);
243
244 return ret;
245}
246
247static int ld9040_get_modes(struct drm_panel *panel)
248{
249 struct drm_connector *connector = panel->connector;
250 struct ld9040 *ctx = panel_to_ld9040(panel);
251 struct drm_display_mode *mode;
252
253 mode = drm_mode_create(connector->dev);
254 if (!mode) {
255 DRM_ERROR("failed to create a new display mode\n");
256 return 0;
257 }
258
259 drm_display_mode_from_videomode(&ctx->vm, mode);
260 mode->width_mm = ctx->width_mm;
261 mode->height_mm = ctx->height_mm;
262 connector->display_info.width_mm = mode->width_mm;
263 connector->display_info.height_mm = mode->height_mm;
264
265 mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
266 drm_mode_probed_add(connector, mode);
267
268 return 1;
269}
270
271static const struct drm_panel_funcs ld9040_drm_funcs = {
272 .disable = ld9040_disable,
273 .enable = ld9040_enable,
274 .get_modes = ld9040_get_modes,
275};
276
277static int ld9040_parse_dt(struct ld9040 *ctx)
278{
279 struct device *dev = ctx->dev;
280 struct device_node *np = dev->of_node;
281 int ret;
282
283 ret = of_get_videomode(np, &ctx->vm, 0);
284 if (ret < 0)
285 return ret;
286
287 of_property_read_u32(np, "power-on-delay", &ctx->power_on_delay);
288 of_property_read_u32(np, "reset-delay", &ctx->reset_delay);
289 of_property_read_u32(np, "panel-width-mm", &ctx->width_mm);
290 of_property_read_u32(np, "panel-height-mm", &ctx->height_mm);
291
292 return 0;
293}
294
295static int ld9040_probe(struct spi_device *spi)
296{
297 struct device *dev = &spi->dev;
298 struct ld9040 *ctx;
299 int ret;
300
301 ctx = devm_kzalloc(dev, sizeof(struct ld9040), GFP_KERNEL);
302 if (!ctx)
303 return -ENOMEM;
304
305 spi_set_drvdata(spi, ctx);
306
307 ctx->dev = dev;
308 ctx->brightness = ARRAY_SIZE(ld9040_gammas) - 1;
309
310 ret = ld9040_parse_dt(ctx);
311 if (ret < 0)
312 return ret;
313
314 ctx->supplies[0].supply = "vdd3";
315 ctx->supplies[1].supply = "vci";
316 ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ctx->supplies),
317 ctx->supplies);
318 if (ret < 0)
319 return ret;
320
321 ctx->reset_gpio = devm_gpiod_get(dev, "reset");
322 if (IS_ERR(ctx->reset_gpio)) {
323 dev_err(dev, "cannot get reset-gpios %ld\n",
324 PTR_ERR(ctx->reset_gpio));
325 return PTR_ERR(ctx->reset_gpio);
326 }
327 ret = gpiod_direction_output(ctx->reset_gpio, 1);
328 if (ret < 0) {
329 dev_err(dev, "cannot configure reset-gpios %d\n", ret);
330 return ret;
331 }
332
333 spi->bits_per_word = 9;
334 ret = spi_setup(spi);
335 if (ret < 0) {
336 dev_err(dev, "spi setup failed.\n");
337 return ret;
338 }
339
340 drm_panel_init(&ctx->panel);
341 ctx->panel.dev = dev;
342 ctx->panel.funcs = &ld9040_drm_funcs;
343
344 return drm_panel_add(&ctx->panel);
345}
346
347static int ld9040_remove(struct spi_device *spi)
348{
349 struct ld9040 *ctx = spi_get_drvdata(spi);
350
351 ld9040_power_off(ctx);
352 drm_panel_remove(&ctx->panel);
353
354 return 0;
355}
356
357static struct of_device_id ld9040_of_match[] = {
358 { .compatible = "samsung,ld9040" },
359 { }
360};
361MODULE_DEVICE_TABLE(of, ld9040_of_match);
362
363static struct spi_driver ld9040_driver = {
364 .probe = ld9040_probe,
365 .remove = ld9040_remove,
366 .driver = {
367 .name = "ld9040",
368 .owner = THIS_MODULE,
369 .of_match_table = ld9040_of_match,
370 },
371};
372module_spi_driver(ld9040_driver);
373
374MODULE_AUTHOR("Andrzej Hajda <a.hajda@samsung.com>");
375MODULE_DESCRIPTION("ld9040 LCD Driver");
376MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-s6e8aa0.c b/drivers/gpu/drm/panel/panel-s6e8aa0.c
new file mode 100644
index 000000000000..35941d2412b8
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-s6e8aa0.c
@@ -0,0 +1,1069 @@
1/*
2 * MIPI-DSI based s6e8aa0 AMOLED LCD 5.3 inch panel driver.
3 *
4 * Copyright (c) 2013 Samsung Electronics Co., Ltd
5 *
6 * Inki Dae, <inki.dae@samsung.com>
7 * Donghwa Lee, <dh09.lee@samsung.com>
8 * Joongmock Shin <jmock.shin@samsung.com>
9 * Eunchul Kim <chulspro.kim@samsung.com>
10 * Tomasz Figa <t.figa@samsung.com>
11 * Andrzej Hajda <a.hajda@samsung.com>
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
16*/
17
18#include <drm/drmP.h>
19#include <drm/drm_mipi_dsi.h>
20#include <drm/drm_panel.h>
21
22#include <linux/gpio/consumer.h>
23#include <linux/regulator/consumer.h>
24
25#include <video/mipi_display.h>
26#include <video/of_videomode.h>
27#include <video/videomode.h>
28
29#define LDI_MTP_LENGTH 24
30#define GAMMA_LEVEL_NUM 25
31#define GAMMA_TABLE_LEN 26
32
33#define PANELCTL_SS_MASK (1 << 5)
34#define PANELCTL_SS_1_800 (0 << 5)
35#define PANELCTL_SS_800_1 (1 << 5)
36#define PANELCTL_GTCON_MASK (7 << 2)
37#define PANELCTL_GTCON_110 (6 << 2)
38#define PANELCTL_GTCON_111 (7 << 2)
39
40#define PANELCTL_CLK1_CON_MASK (7 << 3)
41#define PANELCTL_CLK1_000 (0 << 3)
42#define PANELCTL_CLK1_001 (1 << 3)
43#define PANELCTL_CLK2_CON_MASK (7 << 0)
44#define PANELCTL_CLK2_000 (0 << 0)
45#define PANELCTL_CLK2_001 (1 << 0)
46
47#define PANELCTL_INT1_CON_MASK (7 << 3)
48#define PANELCTL_INT1_000 (0 << 3)
49#define PANELCTL_INT1_001 (1 << 3)
50#define PANELCTL_INT2_CON_MASK (7 << 0)
51#define PANELCTL_INT2_000 (0 << 0)
52#define PANELCTL_INT2_001 (1 << 0)
53
54#define PANELCTL_BICTL_CON_MASK (7 << 3)
55#define PANELCTL_BICTL_000 (0 << 3)
56#define PANELCTL_BICTL_001 (1 << 3)
57#define PANELCTL_BICTLB_CON_MASK (7 << 0)
58#define PANELCTL_BICTLB_000 (0 << 0)
59#define PANELCTL_BICTLB_001 (1 << 0)
60
61#define PANELCTL_EM_CLK1_CON_MASK (7 << 3)
62#define PANELCTL_EM_CLK1_110 (6 << 3)
63#define PANELCTL_EM_CLK1_111 (7 << 3)
64#define PANELCTL_EM_CLK1B_CON_MASK (7 << 0)
65#define PANELCTL_EM_CLK1B_110 (6 << 0)
66#define PANELCTL_EM_CLK1B_111 (7 << 0)
67
68#define PANELCTL_EM_CLK2_CON_MASK (7 << 3)
69#define PANELCTL_EM_CLK2_110 (6 << 3)
70#define PANELCTL_EM_CLK2_111 (7 << 3)
71#define PANELCTL_EM_CLK2B_CON_MASK (7 << 0)
72#define PANELCTL_EM_CLK2B_110 (6 << 0)
73#define PANELCTL_EM_CLK2B_111 (7 << 0)
74
75#define PANELCTL_EM_INT1_CON_MASK (7 << 3)
76#define PANELCTL_EM_INT1_000 (0 << 3)
77#define PANELCTL_EM_INT1_001 (1 << 3)
78#define PANELCTL_EM_INT2_CON_MASK (7 << 0)
79#define PANELCTL_EM_INT2_000 (0 << 0)
80#define PANELCTL_EM_INT2_001 (1 << 0)
81
82#define AID_DISABLE (0x4)
83#define AID_1 (0x5)
84#define AID_2 (0x6)
85#define AID_3 (0x7)
86
87typedef u8 s6e8aa0_gamma_table[GAMMA_TABLE_LEN];
88
89struct s6e8aa0_variant {
90 u8 version;
91 const s6e8aa0_gamma_table *gamma_tables;
92};
93
94struct s6e8aa0 {
95 struct device *dev;
96 struct drm_panel panel;
97
98 struct regulator_bulk_data supplies[2];
99 struct gpio_desc *reset_gpio;
100 u32 power_on_delay;
101 u32 reset_delay;
102 u32 init_delay;
103 bool flip_horizontal;
104 bool flip_vertical;
105 struct videomode vm;
106 u32 width_mm;
107 u32 height_mm;
108
109 u8 version;
110 u8 id;
111 const struct s6e8aa0_variant *variant;
112 int brightness;
113
114 /* This field is tested by functions directly accessing DSI bus before
115 * transfer, transfer is skipped if it is set. In case of transfer
116 * failure or unexpected response the field is set to error value.
117 * Such construct allows to eliminate many checks in higher level
118 * functions.
119 */
120 int error;
121};
122
123#define panel_to_s6e8aa0(p) container_of(p, struct s6e8aa0, panel)
124
125static int s6e8aa0_clear_error(struct s6e8aa0 *ctx)
126{
127 int ret = ctx->error;
128
129 ctx->error = 0;
130 return ret;
131}
132
133static void s6e8aa0_dcs_write(struct s6e8aa0 *ctx, const void *data, size_t len)
134{
135 struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
136 int ret;
137
138 if (ctx->error < 0)
139 return;
140
141 ret = mipi_dsi_dcs_write(dsi, dsi->channel, data, len);
142 if (ret < 0) {
143 dev_err(ctx->dev, "error %d writing dcs seq: %*ph\n", ret, len,
144 data);
145 ctx->error = ret;
146 }
147}
148
149static int s6e8aa0_dcs_read(struct s6e8aa0 *ctx, u8 cmd, void *data, size_t len)
150{
151 struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
152 int ret;
153
154 if (ctx->error < 0)
155 return ctx->error;
156
157 ret = mipi_dsi_dcs_read(dsi, dsi->channel, cmd, data, len);
158 if (ret < 0) {
159 dev_err(ctx->dev, "error %d reading dcs seq(%#x)\n", ret, cmd);
160 ctx->error = ret;
161 }
162
163 return ret;
164}
165
166#define s6e8aa0_dcs_write_seq(ctx, seq...) \
167({\
168 const u8 d[] = { seq };\
169 BUILD_BUG_ON_MSG(ARRAY_SIZE(d) > 64, "DCS sequence too big for stack");\
170 s6e8aa0_dcs_write(ctx, d, ARRAY_SIZE(d));\
171})
172
173#define s6e8aa0_dcs_write_seq_static(ctx, seq...) \
174({\
175 static const u8 d[] = { seq };\
176 s6e8aa0_dcs_write(ctx, d, ARRAY_SIZE(d));\
177})
178
179static void s6e8aa0_apply_level_1_key(struct s6e8aa0 *ctx)
180{
181 s6e8aa0_dcs_write_seq_static(ctx, 0xf0, 0x5a, 0x5a);
182}
183
184static void s6e8aa0_panel_cond_set_v142(struct s6e8aa0 *ctx)
185{
186 static const u8 aids[] = {
187 0x04, 0x04, 0x04, 0x04, 0x04, 0x60, 0x80, 0xA0
188 };
189 u8 aid = aids[ctx->id >> 5];
190 u8 cfg = 0x3d;
191 u8 clk_con = 0xc8;
192 u8 int_con = 0x08;
193 u8 bictl_con = 0x48;
194 u8 em_clk1_con = 0xff;
195 u8 em_clk2_con = 0xff;
196 u8 em_int_con = 0xc8;
197
198 if (ctx->flip_vertical) {
199 /* GTCON */
200 cfg &= ~(PANELCTL_GTCON_MASK);
201 cfg |= (PANELCTL_GTCON_110);
202 }
203
204 if (ctx->flip_horizontal) {
205 /* SS */
206 cfg &= ~(PANELCTL_SS_MASK);
207 cfg |= (PANELCTL_SS_1_800);
208 }
209
210 if (ctx->flip_horizontal || ctx->flip_vertical) {
211 /* CLK1,2_CON */
212 clk_con &= ~(PANELCTL_CLK1_CON_MASK |
213 PANELCTL_CLK2_CON_MASK);
214 clk_con |= (PANELCTL_CLK1_000 | PANELCTL_CLK2_001);
215
216 /* INT1,2_CON */
217 int_con &= ~(PANELCTL_INT1_CON_MASK |
218 PANELCTL_INT2_CON_MASK);
219 int_con |= (PANELCTL_INT1_000 | PANELCTL_INT2_001);
220
221 /* BICTL,B_CON */
222 bictl_con &= ~(PANELCTL_BICTL_CON_MASK |
223 PANELCTL_BICTLB_CON_MASK);
224 bictl_con |= (PANELCTL_BICTL_000 |
225 PANELCTL_BICTLB_001);
226
227 /* EM_CLK1,1B_CON */
228 em_clk1_con &= ~(PANELCTL_EM_CLK1_CON_MASK |
229 PANELCTL_EM_CLK1B_CON_MASK);
230 em_clk1_con |= (PANELCTL_EM_CLK1_110 |
231 PANELCTL_EM_CLK1B_110);
232
233 /* EM_CLK2,2B_CON */
234 em_clk2_con &= ~(PANELCTL_EM_CLK2_CON_MASK |
235 PANELCTL_EM_CLK2B_CON_MASK);
236 em_clk2_con |= (PANELCTL_EM_CLK2_110 |
237 PANELCTL_EM_CLK2B_110);
238
239 /* EM_INT1,2_CON */
240 em_int_con &= ~(PANELCTL_EM_INT1_CON_MASK |
241 PANELCTL_EM_INT2_CON_MASK);
242 em_int_con |= (PANELCTL_EM_INT1_000 |
243 PANELCTL_EM_INT2_001);
244 }
245
246 s6e8aa0_dcs_write_seq(ctx,
247 0xf8, cfg, 0x35, 0x00, 0x00, 0x00, 0x93, 0x00,
248 0x3c, 0x78, 0x08, 0x27, 0x7d, 0x3f, 0x00, 0x00,
249 0x00, 0x20, aid, 0x08, 0x6e, 0x00, 0x00, 0x00,
250 0x02, 0x07, 0x07, 0x23, 0x23, 0xc0, clk_con, int_con,
251 bictl_con, 0xc1, 0x00, 0xc1, em_clk1_con, em_clk2_con,
252 em_int_con);
253}
254
255static void s6e8aa0_panel_cond_set(struct s6e8aa0 *ctx)
256{
257 if (ctx->version < 142)
258 s6e8aa0_dcs_write_seq_static(ctx,
259 0xf8, 0x19, 0x35, 0x00, 0x00, 0x00, 0x94, 0x00,
260 0x3c, 0x78, 0x10, 0x27, 0x08, 0x6e, 0x00, 0x00,
261 0x00, 0x00, 0x04, 0x08, 0x6e, 0x00, 0x00, 0x00,
262 0x00, 0x07, 0x07, 0x23, 0x6e, 0xc0, 0xc1, 0x01,
263 0x81, 0xc1, 0x00, 0xc3, 0xf6, 0xf6, 0xc1
264 );
265 else
266 s6e8aa0_panel_cond_set_v142(ctx);
267}
268
269static void s6e8aa0_display_condition_set(struct s6e8aa0 *ctx)
270{
271 s6e8aa0_dcs_write_seq_static(ctx, 0xf2, 0x80, 0x03, 0x0d);
272}
273
274static void s6e8aa0_etc_source_control(struct s6e8aa0 *ctx)
275{
276 s6e8aa0_dcs_write_seq_static(ctx, 0xf6, 0x00, 0x02, 0x00);
277}
278
279static void s6e8aa0_etc_pentile_control(struct s6e8aa0 *ctx)
280{
281 static const u8 pent32[] = {
282 0xb6, 0x0c, 0x02, 0x03, 0x32, 0xc0, 0x44, 0x44, 0xc0, 0x00
283 };
284
285 static const u8 pent142[] = {
286 0xb6, 0x0c, 0x02, 0x03, 0x32, 0xff, 0x44, 0x44, 0xc0, 0x00
287 };
288
289 if (ctx->version < 142)
290 s6e8aa0_dcs_write(ctx, pent32, ARRAY_SIZE(pent32));
291 else
292 s6e8aa0_dcs_write(ctx, pent142, ARRAY_SIZE(pent142));
293}
294
295static void s6e8aa0_etc_power_control(struct s6e8aa0 *ctx)
296{
297 static const u8 pwr142[] = {
298 0xf4, 0xcf, 0x0a, 0x12, 0x10, 0x1e, 0x33, 0x02
299 };
300
301 static const u8 pwr32[] = {
302 0xf4, 0xcf, 0x0a, 0x15, 0x10, 0x19, 0x33, 0x02
303 };
304
305 if (ctx->version < 142)
306 s6e8aa0_dcs_write(ctx, pwr32, ARRAY_SIZE(pwr32));
307 else
308 s6e8aa0_dcs_write(ctx, pwr142, ARRAY_SIZE(pwr142));
309}
310
311static void s6e8aa0_etc_elvss_control(struct s6e8aa0 *ctx)
312{
313 u8 id = ctx->id ? 0 : 0x95;
314
315 s6e8aa0_dcs_write_seq(ctx, 0xb1, 0x04, id);
316}
317
318static void s6e8aa0_elvss_nvm_set_v142(struct s6e8aa0 *ctx)
319{
320 u8 br;
321
322 switch (ctx->brightness) {
323 case 0 ... 6: /* 30cd ~ 100cd */
324 br = 0xdf;
325 break;
326 case 7 ... 11: /* 120cd ~ 150cd */
327 br = 0xdd;
328 break;
329 case 12 ... 15: /* 180cd ~ 210cd */
330 default:
331 br = 0xd9;
332 break;
333 case 16 ... 24: /* 240cd ~ 300cd */
334 br = 0xd0;
335 break;
336 }
337
338 s6e8aa0_dcs_write_seq(ctx, 0xd9, 0x14, 0x40, 0x0c, 0xcb, 0xce, 0x6e,
339 0xc4, 0x0f, 0x40, 0x41, br, 0x00, 0x60, 0x19);
340}
341
342static void s6e8aa0_elvss_nvm_set(struct s6e8aa0 *ctx)
343{
344 if (ctx->version < 142)
345 s6e8aa0_dcs_write_seq_static(ctx,
346 0xd9, 0x14, 0x40, 0x0c, 0xcb, 0xce, 0x6e, 0xc4, 0x07,
347 0x40, 0x41, 0xc1, 0x00, 0x60, 0x19);
348 else
349 s6e8aa0_elvss_nvm_set_v142(ctx);
350};
351
352static void s6e8aa0_apply_level_2_key(struct s6e8aa0 *ctx)
353{
354 s6e8aa0_dcs_write_seq_static(ctx, 0xfc, 0x5a, 0x5a);
355}
356
357static const s6e8aa0_gamma_table s6e8aa0_gamma_tables_v142[GAMMA_LEVEL_NUM] = {
358 {
359 0xfa, 0x01, 0x71, 0x31, 0x7b, 0x62, 0x55, 0x55,
360 0xaf, 0xb1, 0xb1, 0xbd, 0xce, 0xb7, 0x9a, 0xb1,
361 0x90, 0xb2, 0xc4, 0xae, 0x00, 0x60, 0x00, 0x40,
362 0x00, 0x70,
363 }, {
364 0xfa, 0x01, 0x71, 0x31, 0x7b, 0x74, 0x68, 0x69,
365 0xb8, 0xc1, 0xb7, 0xbd, 0xcd, 0xb8, 0x93, 0xab,
366 0x88, 0xb4, 0xc4, 0xb1, 0x00, 0x6b, 0x00, 0x4d,
367 0x00, 0x7d,
368 }, {
369 0xfa, 0x01, 0x71, 0x31, 0x7b, 0x95, 0x8a, 0x89,
370 0xb4, 0xc6, 0xb2, 0xc5, 0xd2, 0xbf, 0x90, 0xa8,
371 0x85, 0xb5, 0xc4, 0xb3, 0x00, 0x7b, 0x00, 0x5d,
372 0x00, 0x8f,
373 }, {
374 0xfa, 0x01, 0x71, 0x31, 0x7b, 0x9f, 0x98, 0x92,
375 0xb3, 0xc4, 0xb0, 0xbc, 0xcc, 0xb4, 0x91, 0xa6,
376 0x87, 0xb5, 0xc5, 0xb4, 0x00, 0x87, 0x00, 0x6a,
377 0x00, 0x9e,
378 }, {
379 0xfa, 0x01, 0x71, 0x31, 0x7b, 0x99, 0x93, 0x8b,
380 0xb2, 0xc2, 0xb0, 0xbd, 0xce, 0xb4, 0x90, 0xa6,
381 0x87, 0xb3, 0xc3, 0xb2, 0x00, 0x8d, 0x00, 0x70,
382 0x00, 0xa4,
383 }, {
384 0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa7, 0xa5, 0x99,
385 0xb2, 0xc2, 0xb0, 0xbb, 0xcd, 0xb1, 0x93, 0xa7,
386 0x8a, 0xb2, 0xc1, 0xb0, 0x00, 0x92, 0x00, 0x75,
387 0x00, 0xaa,
388 }, {
389 0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa0, 0xa0, 0x93,
390 0xb6, 0xc4, 0xb4, 0xb5, 0xc8, 0xaa, 0x94, 0xa9,
391 0x8c, 0xb2, 0xc0, 0xb0, 0x00, 0x97, 0x00, 0x7a,
392 0x00, 0xaf,
393 }, {
394 0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa3, 0xa7, 0x96,
395 0xb3, 0xc2, 0xb0, 0xba, 0xcb, 0xb0, 0x94, 0xa8,
396 0x8c, 0xb0, 0xbf, 0xaf, 0x00, 0x9f, 0x00, 0x83,
397 0x00, 0xb9,
398 }, {
399 0xfa, 0x01, 0x71, 0x31, 0x7b, 0x9d, 0xa2, 0x90,
400 0xb6, 0xc5, 0xb3, 0xb8, 0xc9, 0xae, 0x94, 0xa8,
401 0x8d, 0xaf, 0xbd, 0xad, 0x00, 0xa4, 0x00, 0x88,
402 0x00, 0xbf,
403 }, {
404 0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa6, 0xac, 0x97,
405 0xb4, 0xc4, 0xb1, 0xbb, 0xcb, 0xb2, 0x93, 0xa7,
406 0x8d, 0xae, 0xbc, 0xad, 0x00, 0xa7, 0x00, 0x8c,
407 0x00, 0xc3,
408 }, {
409 0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa2, 0xa9, 0x93,
410 0xb6, 0xc5, 0xb2, 0xba, 0xc9, 0xb0, 0x93, 0xa7,
411 0x8d, 0xae, 0xbb, 0xac, 0x00, 0xab, 0x00, 0x90,
412 0x00, 0xc8,
413 }, {
414 0xfa, 0x01, 0x71, 0x31, 0x7b, 0x9e, 0xa6, 0x8f,
415 0xb7, 0xc6, 0xb3, 0xb8, 0xc8, 0xb0, 0x93, 0xa6,
416 0x8c, 0xae, 0xbb, 0xad, 0x00, 0xae, 0x00, 0x93,
417 0x00, 0xcc,
418 }, {
419 0xfa, 0x01, 0x71, 0x31, 0x7b, 0xab, 0xb4, 0x9c,
420 0xb3, 0xc3, 0xaf, 0xb7, 0xc7, 0xaf, 0x93, 0xa6,
421 0x8c, 0xaf, 0xbc, 0xad, 0x00, 0xb1, 0x00, 0x97,
422 0x00, 0xcf,
423 }, {
424 0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa6, 0xb1, 0x98,
425 0xb1, 0xc2, 0xab, 0xba, 0xc9, 0xb2, 0x93, 0xa6,
426 0x8d, 0xae, 0xba, 0xab, 0x00, 0xb5, 0x00, 0x9b,
427 0x00, 0xd4,
428 }, {
429 0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa3, 0xae, 0x94,
430 0xb2, 0xc3, 0xac, 0xbb, 0xca, 0xb4, 0x91, 0xa4,
431 0x8a, 0xae, 0xba, 0xac, 0x00, 0xb8, 0x00, 0x9e,
432 0x00, 0xd8,
433 }, {
434 0xfa, 0x01, 0x71, 0x31, 0x7b, 0xab, 0xb7, 0x9c,
435 0xae, 0xc0, 0xa9, 0xba, 0xc9, 0xb3, 0x92, 0xa5,
436 0x8b, 0xad, 0xb9, 0xab, 0x00, 0xbb, 0x00, 0xa1,
437 0x00, 0xdc,
438 }, {
439 0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa7, 0xb4, 0x97,
440 0xb0, 0xc1, 0xaa, 0xb9, 0xc8, 0xb2, 0x92, 0xa5,
441 0x8c, 0xae, 0xb9, 0xab, 0x00, 0xbe, 0x00, 0xa4,
442 0x00, 0xdf,
443 }, {
444 0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa3, 0xb0, 0x94,
445 0xb0, 0xc2, 0xab, 0xbb, 0xc9, 0xb3, 0x91, 0xa4,
446 0x8b, 0xad, 0xb8, 0xaa, 0x00, 0xc1, 0x00, 0xa8,
447 0x00, 0xe2,
448 }, {
449 0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa3, 0xb0, 0x94,
450 0xae, 0xbf, 0xa8, 0xb9, 0xc8, 0xb3, 0x92, 0xa4,
451 0x8b, 0xad, 0xb7, 0xa9, 0x00, 0xc4, 0x00, 0xab,
452 0x00, 0xe6,
453 }, {
454 0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa7, 0xb6, 0x98,
455 0xaf, 0xc0, 0xa8, 0xb8, 0xc7, 0xb2, 0x93, 0xa5,
456 0x8d, 0xad, 0xb7, 0xa9, 0x00, 0xc7, 0x00, 0xae,
457 0x00, 0xe9,
458 }, {
459 0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa4, 0xb3, 0x95,
460 0xaf, 0xc1, 0xa9, 0xb9, 0xc8, 0xb3, 0x92, 0xa4,
461 0x8b, 0xad, 0xb7, 0xaa, 0x00, 0xc9, 0x00, 0xb0,
462 0x00, 0xec,
463 }, {
464 0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa4, 0xb3, 0x95,
465 0xac, 0xbe, 0xa6, 0xbb, 0xc9, 0xb4, 0x90, 0xa3,
466 0x8a, 0xad, 0xb7, 0xa9, 0x00, 0xcc, 0x00, 0xb4,
467 0x00, 0xf0,
468 }, {
469 0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa0, 0xb0, 0x91,
470 0xae, 0xc0, 0xa6, 0xba, 0xc8, 0xb4, 0x91, 0xa4,
471 0x8b, 0xad, 0xb7, 0xa9, 0x00, 0xcf, 0x00, 0xb7,
472 0x00, 0xf3,
473 }, {
474 0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa7, 0xb8, 0x98,
475 0xab, 0xbd, 0xa4, 0xbb, 0xc9, 0xb5, 0x91, 0xa3,
476 0x8b, 0xac, 0xb6, 0xa8, 0x00, 0xd1, 0x00, 0xb9,
477 0x00, 0xf6,
478 }, {
479 0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa4, 0xb5, 0x95,
480 0xa9, 0xbc, 0xa1, 0xbb, 0xc9, 0xb5, 0x91, 0xa3,
481 0x8a, 0xad, 0xb6, 0xa8, 0x00, 0xd6, 0x00, 0xbf,
482 0x00, 0xfc,
483 },
484};
485
486static const s6e8aa0_gamma_table s6e8aa0_gamma_tables_v96[GAMMA_LEVEL_NUM] = {
487 {
488 0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xff, 0x00, 0xff,
489 0xdf, 0x1f, 0xd7, 0xdc, 0xb7, 0xe1, 0xc0, 0xaf,
490 0xc4, 0xd2, 0xd0, 0xcf, 0x00, 0x4d, 0x00, 0x40,
491 0x00, 0x5f,
492 }, {
493 0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xff, 0x00, 0xff,
494 0xd5, 0x35, 0xcf, 0xdc, 0xc1, 0xe1, 0xbf, 0xb3,
495 0xc1, 0xd2, 0xd1, 0xce, 0x00, 0x53, 0x00, 0x46,
496 0x00, 0x67,
497 }, {
498 0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xff, 0x00, 0xff,
499 0xd2, 0x64, 0xcf, 0xdb, 0xc6, 0xe1, 0xbd, 0xb3,
500 0xbd, 0xd2, 0xd2, 0xce, 0x00, 0x59, 0x00, 0x4b,
501 0x00, 0x6e,
502 }, {
503 0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xff, 0x00, 0xff,
504 0xd0, 0x7c, 0xcf, 0xdb, 0xc9, 0xe0, 0xbc, 0xb4,
505 0xbb, 0xcf, 0xd1, 0xcc, 0x00, 0x5f, 0x00, 0x50,
506 0x00, 0x75,
507 }, {
508 0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xff, 0x00, 0xff,
509 0xd0, 0x8e, 0xd1, 0xdb, 0xcc, 0xdf, 0xbb, 0xb6,
510 0xb9, 0xd0, 0xd1, 0xcd, 0x00, 0x63, 0x00, 0x54,
511 0x00, 0x7a,
512 }, {
513 0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xff, 0x00, 0xff,
514 0xd1, 0x9e, 0xd5, 0xda, 0xcd, 0xdd, 0xbb, 0xb7,
515 0xb9, 0xce, 0xce, 0xc9, 0x00, 0x68, 0x00, 0x59,
516 0x00, 0x81,
517 }, {
518 0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xff, 0x00, 0xff,
519 0xd0, 0xa5, 0xd6, 0xda, 0xcf, 0xdd, 0xbb, 0xb7,
520 0xb8, 0xcc, 0xcd, 0xc7, 0x00, 0x6c, 0x00, 0x5c,
521 0x00, 0x86,
522 }, {
523 0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xff, 0x1f, 0xfe,
524 0xd0, 0xae, 0xd7, 0xd9, 0xd0, 0xdb, 0xb9, 0xb6,
525 0xb5, 0xca, 0xcc, 0xc5, 0x00, 0x74, 0x00, 0x63,
526 0x00, 0x90,
527 }, {
528 0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xff, 0x1f, 0xf9,
529 0xcf, 0xb0, 0xd6, 0xd9, 0xd1, 0xdb, 0xb9, 0xb6,
530 0xb4, 0xca, 0xcb, 0xc5, 0x00, 0x77, 0x00, 0x66,
531 0x00, 0x94,
532 }, {
533 0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xff, 0x1f, 0xf7,
534 0xcf, 0xb3, 0xd7, 0xd8, 0xd1, 0xd9, 0xb7, 0xb6,
535 0xb3, 0xc9, 0xca, 0xc3, 0x00, 0x7b, 0x00, 0x69,
536 0x00, 0x99,
537
538 }, {
539 0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xfd, 0x2f, 0xf7,
540 0xdf, 0xb5, 0xd6, 0xd8, 0xd1, 0xd8, 0xb6, 0xb5,
541 0xb2, 0xca, 0xcb, 0xc4, 0x00, 0x7e, 0x00, 0x6c,
542 0x00, 0x9d,
543 }, {
544 0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xfa, 0x2f, 0xf5,
545 0xce, 0xb6, 0xd5, 0xd7, 0xd2, 0xd8, 0xb6, 0xb4,
546 0xb0, 0xc7, 0xc9, 0xc1, 0x00, 0x84, 0x00, 0x71,
547 0x00, 0xa5,
548 }, {
549 0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xf7, 0x2f, 0xf2,
550 0xce, 0xb9, 0xd5, 0xd8, 0xd2, 0xd8, 0xb4, 0xb4,
551 0xaf, 0xc7, 0xc9, 0xc1, 0x00, 0x87, 0x00, 0x73,
552 0x00, 0xa8,
553 }, {
554 0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xf5, 0x2f, 0xf0,
555 0xdf, 0xba, 0xd5, 0xd7, 0xd2, 0xd7, 0xb4, 0xb4,
556 0xaf, 0xc5, 0xc7, 0xbf, 0x00, 0x8a, 0x00, 0x76,
557 0x00, 0xac,
558 }, {
559 0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xf2, 0x2f, 0xed,
560 0xcE, 0xbb, 0xd4, 0xd6, 0xd2, 0xd6, 0xb5, 0xb4,
561 0xaF, 0xc5, 0xc7, 0xbf, 0x00, 0x8c, 0x00, 0x78,
562 0x00, 0xaf,
563 }, {
564 0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xef, 0x2f, 0xeb,
565 0xcd, 0xbb, 0xd2, 0xd7, 0xd3, 0xd6, 0xb3, 0xb4,
566 0xae, 0xc5, 0xc6, 0xbe, 0x00, 0x91, 0x00, 0x7d,
567 0x00, 0xb6,
568 }, {
569 0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xee, 0x2f, 0xea,
570 0xce, 0xbd, 0xd4, 0xd6, 0xd2, 0xd5, 0xb2, 0xb3,
571 0xad, 0xc3, 0xc4, 0xbb, 0x00, 0x94, 0x00, 0x7f,
572 0x00, 0xba,
573 }, {
574 0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xec, 0x2f, 0xe8,
575 0xce, 0xbe, 0xd3, 0xd6, 0xd3, 0xd5, 0xb2, 0xb2,
576 0xac, 0xc3, 0xc5, 0xbc, 0x00, 0x96, 0x00, 0x81,
577 0x00, 0xbd,
578 }, {
579 0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xeb, 0x2f, 0xe7,
580 0xce, 0xbf, 0xd3, 0xd6, 0xd2, 0xd5, 0xb1, 0xb2,
581 0xab, 0xc2, 0xc4, 0xbb, 0x00, 0x99, 0x00, 0x83,
582 0x00, 0xc0,
583 }, {
584 0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xef, 0x5f, 0xe9,
585 0xca, 0xbf, 0xd3, 0xd5, 0xd2, 0xd4, 0xb2, 0xb2,
586 0xab, 0xc1, 0xc4, 0xba, 0x00, 0x9b, 0x00, 0x85,
587 0x00, 0xc3,
588 }, {
589 0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xea, 0x5f, 0xe8,
590 0xee, 0xbf, 0xd2, 0xd5, 0xd2, 0xd4, 0xb1, 0xb2,
591 0xab, 0xc1, 0xc2, 0xb9, 0x00, 0x9D, 0x00, 0x87,
592 0x00, 0xc6,
593 }, {
594 0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xe9, 0x5f, 0xe7,
595 0xcd, 0xbf, 0xd2, 0xd6, 0xd2, 0xd4, 0xb1, 0xb2,
596 0xab, 0xbe, 0xc0, 0xb7, 0x00, 0xa1, 0x00, 0x8a,
597 0x00, 0xca,
598 }, {
599 0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xe8, 0x61, 0xe6,
600 0xcd, 0xbf, 0xd1, 0xd6, 0xd3, 0xd4, 0xaf, 0xb0,
601 0xa9, 0xbe, 0xc1, 0xb7, 0x00, 0xa3, 0x00, 0x8b,
602 0x00, 0xce,
603 }, {
604 0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xe8, 0x62, 0xe5,
605 0xcc, 0xc0, 0xd0, 0xd6, 0xd2, 0xd4, 0xaf, 0xb1,
606 0xa9, 0xbd, 0xc0, 0xb6, 0x00, 0xa5, 0x00, 0x8d,
607 0x00, 0xd0,
608 }, {
609 0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xe7, 0x7f, 0xe3,
610 0xcc, 0xc1, 0xd0, 0xd5, 0xd3, 0xd3, 0xae, 0xaf,
611 0xa8, 0xbe, 0xc0, 0xb7, 0x00, 0xa8, 0x00, 0x90,
612 0x00, 0xd3,
613 }
614};
615
616static const s6e8aa0_gamma_table s6e8aa0_gamma_tables_v32[GAMMA_LEVEL_NUM] = {
617 {
618 0xfa, 0x01, 0x43, 0x14, 0x45, 0x72, 0x5e, 0x6b,
619 0xa1, 0xa7, 0x9a, 0xb4, 0xcb, 0xb8, 0x92, 0xac,
620 0x97, 0xb4, 0xc3, 0xb5, 0x00, 0x4e, 0x00, 0x37,
621 0x00, 0x58,
622 }, {
623 0xfa, 0x01, 0x43, 0x14, 0x45, 0x85, 0x71, 0x7d,
624 0xa6, 0xb6, 0xa1, 0xb5, 0xca, 0xba, 0x93, 0xac,
625 0x98, 0xb2, 0xc0, 0xaf, 0x00, 0x59, 0x00, 0x43,
626 0x00, 0x64,
627 }, {
628 0xfa, 0x01, 0x43, 0x14, 0x45, 0xa4, 0x94, 0x9e,
629 0xa0, 0xbb, 0x9c, 0xc3, 0xd2, 0xc6, 0x93, 0xaa,
630 0x95, 0xb7, 0xc2, 0xb4, 0x00, 0x65, 0x00, 0x50,
631 0x00, 0x74,
632 }, {
633 0xfa, 0x01, 0x43, 0x14, 0x45, 0xad, 0xa1, 0xa6,
634 0xa0, 0xb9, 0x9b, 0xc3, 0xd1, 0xc8, 0x90, 0xa6,
635 0x90, 0xbb, 0xc3, 0xb7, 0x00, 0x6f, 0x00, 0x5b,
636 0x00, 0x80,
637 }, {
638 0xfa, 0x01, 0x43, 0x14, 0x45, 0xa6, 0x9d, 0x9f,
639 0x9f, 0xb8, 0x9a, 0xc7, 0xd5, 0xcc, 0x90, 0xa5,
640 0x8f, 0xb8, 0xc1, 0xb6, 0x00, 0x74, 0x00, 0x60,
641 0x00, 0x85,
642 }, {
643 0xfa, 0x01, 0x43, 0x14, 0x45, 0xb3, 0xae, 0xae,
644 0x9e, 0xb7, 0x9a, 0xc8, 0xd6, 0xce, 0x91, 0xa6,
645 0x90, 0xb6, 0xc0, 0xb3, 0x00, 0x78, 0x00, 0x65,
646 0x00, 0x8a,
647 }, {
648 0xfa, 0x01, 0x43, 0x14, 0x45, 0xad, 0xa9, 0xa8,
649 0xa3, 0xb9, 0x9e, 0xc4, 0xd3, 0xcb, 0x94, 0xa6,
650 0x90, 0xb6, 0xbf, 0xb3, 0x00, 0x7c, 0x00, 0x69,
651 0x00, 0x8e,
652 }, {
653 0xfa, 0x01, 0x43, 0x14, 0x45, 0xaf, 0xaf, 0xa9,
654 0xa5, 0xbc, 0xa2, 0xc7, 0xd5, 0xcd, 0x93, 0xa5,
655 0x8f, 0xb4, 0xbd, 0xb1, 0x00, 0x83, 0x00, 0x70,
656 0x00, 0x96,
657 }, {
658 0xfa, 0x01, 0x43, 0x14, 0x45, 0xa9, 0xab, 0xa3,
659 0xaa, 0xbf, 0xa7, 0xc5, 0xd3, 0xcb, 0x93, 0xa5,
660 0x8f, 0xb2, 0xbb, 0xb0, 0x00, 0x86, 0x00, 0x74,
661 0x00, 0x9b,
662 }, {
663 0xfa, 0x01, 0x43, 0x14, 0x45, 0xb1, 0xb5, 0xab,
664 0xab, 0xc0, 0xa9, 0xc7, 0xd4, 0xcc, 0x94, 0xa4,
665 0x8f, 0xb1, 0xbb, 0xaf, 0x00, 0x8a, 0x00, 0x77,
666 0x00, 0x9e,
667 }, {
668 0xfa, 0x01, 0x43, 0x14, 0x45, 0xad, 0xb2, 0xa7,
669 0xae, 0xc2, 0xab, 0xc5, 0xd3, 0xca, 0x93, 0xa4,
670 0x8f, 0xb1, 0xba, 0xae, 0x00, 0x8d, 0x00, 0x7b,
671 0x00, 0xa2,
672 }, {
673 0xfa, 0x01, 0x43, 0x14, 0x45, 0xa9, 0xaf, 0xa3,
674 0xb0, 0xc3, 0xae, 0xc4, 0xd1, 0xc8, 0x93, 0xa4,
675 0x8f, 0xb1, 0xba, 0xaf, 0x00, 0x8f, 0x00, 0x7d,
676 0x00, 0xa5,
677 }, {
678 0xfa, 0x01, 0x43, 0x14, 0x45, 0xb4, 0xbd, 0xaf,
679 0xae, 0xc1, 0xab, 0xc2, 0xd0, 0xc6, 0x94, 0xa4,
680 0x8f, 0xb1, 0xba, 0xaf, 0x00, 0x92, 0x00, 0x80,
681 0x00, 0xa8,
682 }, {
683 0xfa, 0x01, 0x43, 0x14, 0x45, 0xb0, 0xb9, 0xac,
684 0xad, 0xc1, 0xab, 0xc4, 0xd1, 0xc7, 0x95, 0xa4,
685 0x90, 0xb0, 0xb9, 0xad, 0x00, 0x95, 0x00, 0x84,
686 0x00, 0xac,
687 }, {
688 0xfa, 0x01, 0x43, 0x14, 0x45, 0xad, 0xb6, 0xa7,
689 0xaf, 0xc2, 0xae, 0xc5, 0xd1, 0xc7, 0x93, 0xa3,
690 0x8e, 0xb0, 0xb9, 0xad, 0x00, 0x98, 0x00, 0x86,
691 0x00, 0xaf,
692 }, {
693 0xfa, 0x01, 0x43, 0x14, 0x45, 0xb4, 0xbf, 0xaf,
694 0xad, 0xc1, 0xab, 0xc3, 0xd0, 0xc6, 0x94, 0xa3,
695 0x8f, 0xaf, 0xb8, 0xac, 0x00, 0x9a, 0x00, 0x89,
696 0x00, 0xb2,
697 }, {
698 0xfa, 0x01, 0x43, 0x14, 0x45, 0xb0, 0xbc, 0xac,
699 0xaf, 0xc2, 0xad, 0xc2, 0xcf, 0xc4, 0x94, 0xa3,
700 0x90, 0xaf, 0xb8, 0xad, 0x00, 0x9c, 0x00, 0x8b,
701 0x00, 0xb5,
702 }, {
703 0xfa, 0x01, 0x43, 0x14, 0x45, 0xad, 0xb9, 0xa7,
704 0xb1, 0xc4, 0xaf, 0xc3, 0xcf, 0xc5, 0x94, 0xa3,
705 0x8f, 0xae, 0xb7, 0xac, 0x00, 0x9f, 0x00, 0x8e,
706 0x00, 0xb8,
707 }, {
708 0xfa, 0x01, 0x43, 0x14, 0x45, 0xad, 0xb9, 0xa7,
709 0xaf, 0xc2, 0xad, 0xc1, 0xce, 0xc3, 0x95, 0xa3,
710 0x90, 0xad, 0xb6, 0xab, 0x00, 0xa2, 0x00, 0x91,
711 0x00, 0xbb,
712 }, {
713 0xfa, 0x01, 0x43, 0x14, 0x45, 0xb1, 0xbe, 0xac,
714 0xb1, 0xc4, 0xaf, 0xc1, 0xcd, 0xc1, 0x95, 0xa4,
715 0x91, 0xad, 0xb6, 0xab, 0x00, 0xa4, 0x00, 0x93,
716 0x00, 0xbd,
717 }, {
718 0xfa, 0x01, 0x43, 0x14, 0x45, 0xad, 0xbb, 0xa8,
719 0xb3, 0xc5, 0xb2, 0xc1, 0xcd, 0xc2, 0x95, 0xa3,
720 0x90, 0xad, 0xb6, 0xab, 0x00, 0xa6, 0x00, 0x95,
721 0x00, 0xc0,
722 }, {
723 0xfa, 0x01, 0x43, 0x14, 0x45, 0xad, 0xbb, 0xa8,
724 0xb0, 0xc3, 0xaf, 0xc2, 0xce, 0xc2, 0x94, 0xa2,
725 0x90, 0xac, 0xb6, 0xab, 0x00, 0xa8, 0x00, 0x98,
726 0x00, 0xc3,
727 }, {
728 0xfa, 0x01, 0x43, 0x14, 0x45, 0xa9, 0xb8, 0xa5,
729 0xb3, 0xc5, 0xb2, 0xc1, 0xcc, 0xc0, 0x95, 0xa2,
730 0x90, 0xad, 0xb6, 0xab, 0x00, 0xaa, 0x00, 0x9a,
731 0x00, 0xc5,
732 }, {
733 0xfa, 0x01, 0x43, 0x14, 0x45, 0xb0, 0xc0, 0xac,
734 0xb0, 0xc3, 0xaf, 0xc1, 0xcd, 0xc1, 0x95, 0xa2,
735 0x90, 0xac, 0xb5, 0xa9, 0x00, 0xac, 0x00, 0x9c,
736 0x00, 0xc8,
737 }, {
738 0xfa, 0x01, 0x43, 0x14, 0x45, 0xad, 0xbd, 0xa8,
739 0xaf, 0xc2, 0xaf, 0xc1, 0xcc, 0xc0, 0x95, 0xa2,
740 0x90, 0xac, 0xb5, 0xaa, 0x00, 0xb1, 0x00, 0xa1,
741 0x00, 0xcc,
742 },
743};
744
745static const struct s6e8aa0_variant s6e8aa0_variants[] = {
746 {
747 .version = 32,
748 .gamma_tables = s6e8aa0_gamma_tables_v32,
749 }, {
750 .version = 96,
751 .gamma_tables = s6e8aa0_gamma_tables_v96,
752 }, {
753 .version = 142,
754 .gamma_tables = s6e8aa0_gamma_tables_v142,
755 }, {
756 .version = 210,
757 .gamma_tables = s6e8aa0_gamma_tables_v142,
758 }
759};
760
761static void s6e8aa0_brightness_set(struct s6e8aa0 *ctx)
762{
763 const u8 *gamma;
764
765 if (ctx->error)
766 return;
767
768 gamma = ctx->variant->gamma_tables[ctx->brightness];
769
770 if (ctx->version >= 142)
771 s6e8aa0_elvss_nvm_set(ctx);
772
773 s6e8aa0_dcs_write(ctx, gamma, GAMMA_TABLE_LEN);
774
775 /* update gamma table. */
776 s6e8aa0_dcs_write_seq_static(ctx, 0xf7, 0x03);
777}
778
779static void s6e8aa0_panel_init(struct s6e8aa0 *ctx)
780{
781 s6e8aa0_apply_level_1_key(ctx);
782 s6e8aa0_apply_level_2_key(ctx);
783 msleep(20);
784
785 s6e8aa0_dcs_write_seq_static(ctx, MIPI_DCS_EXIT_SLEEP_MODE);
786 msleep(40);
787
788 s6e8aa0_panel_cond_set(ctx);
789 s6e8aa0_display_condition_set(ctx);
790 s6e8aa0_brightness_set(ctx);
791 s6e8aa0_etc_source_control(ctx);
792 s6e8aa0_etc_pentile_control(ctx);
793 s6e8aa0_elvss_nvm_set(ctx);
794 s6e8aa0_etc_power_control(ctx);
795 s6e8aa0_etc_elvss_control(ctx);
796 msleep(ctx->init_delay);
797}
798
799static void s6e8aa0_set_maximum_return_packet_size(struct s6e8aa0 *ctx,
800 int size)
801{
802 struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
803 const struct mipi_dsi_host_ops *ops = dsi->host->ops;
804 u8 buf[] = {size, 0};
805 struct mipi_dsi_msg msg = {
806 .channel = dsi->channel,
807 .type = MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE,
808 .tx_len = sizeof(buf),
809 .tx_buf = buf
810 };
811 int ret;
812
813 if (ctx->error < 0)
814 return;
815
816 if (!ops || !ops->transfer)
817 ret = -EIO;
818 else
819 ret = ops->transfer(dsi->host, &msg);
820
821 if (ret < 0) {
822 dev_err(ctx->dev,
823 "error %d setting maximum return packet size to %d\n",
824 ret, size);
825 ctx->error = ret;
826 }
827}
828
829static void s6e8aa0_read_mtp_id(struct s6e8aa0 *ctx)
830{
831 u8 id[3];
832 int ret, i;
833
834 ret = s6e8aa0_dcs_read(ctx, 0xd1, id, ARRAY_SIZE(id));
835 if (ret < ARRAY_SIZE(id) || id[0] == 0x00) {
836 dev_err(ctx->dev, "read id failed\n");
837 ctx->error = -EIO;
838 return;
839 }
840
841 dev_info(ctx->dev, "ID: 0x%2x, 0x%2x, 0x%2x\n", id[0], id[1], id[2]);
842
843 for (i = 0; i < ARRAY_SIZE(s6e8aa0_variants); ++i) {
844 if (id[1] == s6e8aa0_variants[i].version)
845 break;
846 }
847 if (i >= ARRAY_SIZE(s6e8aa0_variants)) {
848 dev_err(ctx->dev, "unsupported display version %d\n", id[1]);
849 ctx->error = -EINVAL;
850 }
851
852 ctx->variant = &s6e8aa0_variants[i];
853 ctx->version = id[1];
854 ctx->id = id[2];
855}
856
857static void s6e8aa0_set_sequence(struct s6e8aa0 *ctx)
858{
859 s6e8aa0_set_maximum_return_packet_size(ctx, 3);
860 s6e8aa0_read_mtp_id(ctx);
861 s6e8aa0_panel_init(ctx);
862 s6e8aa0_dcs_write_seq_static(ctx, MIPI_DCS_SET_DISPLAY_ON);
863}
864
865static int s6e8aa0_power_on(struct s6e8aa0 *ctx)
866{
867 int ret;
868
869 ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
870 if (ret < 0)
871 return ret;
872
873 msleep(ctx->power_on_delay);
874
875 gpiod_set_value(ctx->reset_gpio, 0);
876 usleep_range(10000, 11000);
877 gpiod_set_value(ctx->reset_gpio, 1);
878
879 msleep(ctx->reset_delay);
880
881 return 0;
882}
883
884static int s6e8aa0_power_off(struct s6e8aa0 *ctx)
885{
886 return regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
887}
888
889static int s6e8aa0_disable(struct drm_panel *panel)
890{
891 struct s6e8aa0 *ctx = panel_to_s6e8aa0(panel);
892
893 s6e8aa0_dcs_write_seq_static(ctx, MIPI_DCS_ENTER_SLEEP_MODE);
894 s6e8aa0_dcs_write_seq_static(ctx, MIPI_DCS_SET_DISPLAY_OFF);
895 msleep(40);
896
897 s6e8aa0_clear_error(ctx);
898
899 return s6e8aa0_power_off(ctx);
900}
901
902static int s6e8aa0_enable(struct drm_panel *panel)
903{
904 struct s6e8aa0 *ctx = panel_to_s6e8aa0(panel);
905 int ret;
906
907 ret = s6e8aa0_power_on(ctx);
908 if (ret < 0)
909 return ret;
910
911 s6e8aa0_set_sequence(ctx);
912 ret = ctx->error;
913
914 if (ret < 0)
915 s6e8aa0_disable(panel);
916
917 return ret;
918}
919
920static int s6e8aa0_get_modes(struct drm_panel *panel)
921{
922 struct drm_connector *connector = panel->connector;
923 struct s6e8aa0 *ctx = panel_to_s6e8aa0(panel);
924 struct drm_display_mode *mode;
925
926 mode = drm_mode_create(connector->dev);
927 if (!mode) {
928 DRM_ERROR("failed to create a new display mode\n");
929 return 0;
930 }
931
932 drm_display_mode_from_videomode(&ctx->vm, mode);
933 mode->width_mm = ctx->width_mm;
934 mode->height_mm = ctx->height_mm;
935 connector->display_info.width_mm = mode->width_mm;
936 connector->display_info.height_mm = mode->height_mm;
937
938 mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
939 drm_mode_probed_add(connector, mode);
940
941 return 1;
942}
943
944static const struct drm_panel_funcs s6e8aa0_drm_funcs = {
945 .disable = s6e8aa0_disable,
946 .enable = s6e8aa0_enable,
947 .get_modes = s6e8aa0_get_modes,
948};
949
950static int s6e8aa0_parse_dt(struct s6e8aa0 *ctx)
951{
952 struct device *dev = ctx->dev;
953 struct device_node *np = dev->of_node;
954 int ret;
955
956 ret = of_get_videomode(np, &ctx->vm, 0);
957 if (ret < 0)
958 return ret;
959
960 of_property_read_u32(np, "power-on-delay", &ctx->power_on_delay);
961 of_property_read_u32(np, "reset-delay", &ctx->reset_delay);
962 of_property_read_u32(np, "init-delay", &ctx->init_delay);
963 of_property_read_u32(np, "panel-width-mm", &ctx->width_mm);
964 of_property_read_u32(np, "panel-height-mm", &ctx->height_mm);
965
966 ctx->flip_horizontal = of_property_read_bool(np, "flip-horizontal");
967 ctx->flip_vertical = of_property_read_bool(np, "flip-vertical");
968
969 return 0;
970}
971
972static int s6e8aa0_probe(struct mipi_dsi_device *dsi)
973{
974 struct device *dev = &dsi->dev;
975 struct s6e8aa0 *ctx;
976 int ret;
977
978 ctx = devm_kzalloc(dev, sizeof(struct s6e8aa0), GFP_KERNEL);
979 if (!ctx)
980 return -ENOMEM;
981
982 mipi_dsi_set_drvdata(dsi, ctx);
983
984 ctx->dev = dev;
985
986 dsi->lanes = 4;
987 dsi->format = MIPI_DSI_FMT_RGB888;
988 dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST
989 | MIPI_DSI_MODE_VIDEO_HFP | MIPI_DSI_MODE_VIDEO_HBP
990 | MIPI_DSI_MODE_VIDEO_HSA | MIPI_DSI_MODE_EOT_PACKET
991 | MIPI_DSI_MODE_VSYNC_FLUSH | MIPI_DSI_MODE_VIDEO_AUTO_VERT;
992
993 ret = s6e8aa0_parse_dt(ctx);
994 if (ret < 0)
995 return ret;
996
997 ctx->supplies[0].supply = "vdd3";
998 ctx->supplies[1].supply = "vci";
999 ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ctx->supplies),
1000 ctx->supplies);
1001 if (ret < 0) {
1002 dev_err(dev, "failed to get regulators: %d\n", ret);
1003 return ret;
1004 }
1005
1006 ctx->reset_gpio = devm_gpiod_get(dev, "reset");
1007 if (IS_ERR(ctx->reset_gpio)) {
1008 dev_err(dev, "cannot get reset-gpios %ld\n",
1009 PTR_ERR(ctx->reset_gpio));
1010 return PTR_ERR(ctx->reset_gpio);
1011 }
1012 ret = gpiod_direction_output(ctx->reset_gpio, 1);
1013 if (ret < 0) {
1014 dev_err(dev, "cannot configure reset-gpios %d\n", ret);
1015 return ret;
1016 }
1017
1018 ctx->brightness = GAMMA_LEVEL_NUM - 1;
1019
1020 drm_panel_init(&ctx->panel);
1021 ctx->panel.dev = dev;
1022 ctx->panel.funcs = &s6e8aa0_drm_funcs;
1023
1024 ret = drm_panel_add(&ctx->panel);
1025 if (ret < 0)
1026 return ret;
1027
1028 ret = mipi_dsi_attach(dsi);
1029 if (ret < 0)
1030 drm_panel_remove(&ctx->panel);
1031
1032 return ret;
1033}
1034
1035static int s6e8aa0_remove(struct mipi_dsi_device *dsi)
1036{
1037 struct s6e8aa0 *ctx = mipi_dsi_get_drvdata(dsi);
1038
1039 mipi_dsi_detach(dsi);
1040 drm_panel_remove(&ctx->panel);
1041
1042 return 0;
1043}
1044
1045static struct of_device_id s6e8aa0_of_match[] = {
1046 { .compatible = "samsung,s6e8aa0" },
1047 { }
1048};
1049MODULE_DEVICE_TABLE(of, s6e8aa0_of_match);
1050
1051static struct mipi_dsi_driver s6e8aa0_driver = {
1052 .probe = s6e8aa0_probe,
1053 .remove = s6e8aa0_remove,
1054 .driver = {
1055 .name = "panel_s6e8aa0",
1056 .owner = THIS_MODULE,
1057 .of_match_table = s6e8aa0_of_match,
1058 },
1059};
1060module_mipi_dsi_driver(s6e8aa0_driver);
1061
1062MODULE_AUTHOR("Donghwa Lee <dh09.lee@samsung.com>");
1063MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
1064MODULE_AUTHOR("Joongmock Shin <jmock.shin@samsung.com>");
1065MODULE_AUTHOR("Eunchul Kim <chulspro.kim@samsung.com>");
1066MODULE_AUTHOR("Tomasz Figa <t.figa@samsung.com>");
1067MODULE_AUTHOR("Andrzej Hajda <a.hajda@samsung.com>");
1068MODULE_DESCRIPTION("MIPI-DSI based s6e8aa0 AMOLED LCD Panel Driver");
1069MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 59d52ca2c67f..309f29e9234a 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -22,9 +22,8 @@
22 */ 22 */
23 23
24#include <linux/backlight.h> 24#include <linux/backlight.h>
25#include <linux/gpio.h> 25#include <linux/gpio/consumer.h>
26#include <linux/module.h> 26#include <linux/module.h>
27#include <linux/of_gpio.h>
28#include <linux/of_platform.h> 27#include <linux/of_platform.h>
29#include <linux/platform_device.h> 28#include <linux/platform_device.h>
30#include <linux/regulator/consumer.h> 29#include <linux/regulator/consumer.h>
@@ -44,9 +43,6 @@ struct panel_desc {
44 } size; 43 } size;
45}; 44};
46 45
47/* TODO: convert to gpiod_*() API once it's been merged */
48#define GPIO_ACTIVE_LOW (1 << 0)
49
50struct panel_simple { 46struct panel_simple {
51 struct drm_panel base; 47 struct drm_panel base;
52 bool enabled; 48 bool enabled;
@@ -57,8 +53,7 @@ struct panel_simple {
57 struct regulator *supply; 53 struct regulator *supply;
58 struct i2c_adapter *ddc; 54 struct i2c_adapter *ddc;
59 55
60 unsigned long enable_gpio_flags; 56 struct gpio_desc *enable_gpio;
61 int enable_gpio;
62}; 57};
63 58
64static inline struct panel_simple *to_panel_simple(struct drm_panel *panel) 59static inline struct panel_simple *to_panel_simple(struct drm_panel *panel)
@@ -110,12 +105,8 @@ static int panel_simple_disable(struct drm_panel *panel)
110 backlight_update_status(p->backlight); 105 backlight_update_status(p->backlight);
111 } 106 }
112 107
113 if (gpio_is_valid(p->enable_gpio)) { 108 if (p->enable_gpio)
114 if (p->enable_gpio_flags & GPIO_ACTIVE_LOW) 109 gpiod_set_value_cansleep(p->enable_gpio, 0);
115 gpio_set_value(p->enable_gpio, 1);
116 else
117 gpio_set_value(p->enable_gpio, 0);
118 }
119 110
120 regulator_disable(p->supply); 111 regulator_disable(p->supply);
121 p->enabled = false; 112 p->enabled = false;
@@ -137,12 +128,8 @@ static int panel_simple_enable(struct drm_panel *panel)
137 return err; 128 return err;
138 } 129 }
139 130
140 if (gpio_is_valid(p->enable_gpio)) { 131 if (p->enable_gpio)
141 if (p->enable_gpio_flags & GPIO_ACTIVE_LOW) 132 gpiod_set_value_cansleep(p->enable_gpio, 1);
142 gpio_set_value(p->enable_gpio, 0);
143 else
144 gpio_set_value(p->enable_gpio, 1);
145 }
146 133
147 if (p->backlight) { 134 if (p->backlight) {
148 p->backlight->props.power = FB_BLANK_UNBLANK; 135 p->backlight->props.power = FB_BLANK_UNBLANK;
@@ -185,7 +172,6 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
185{ 172{
186 struct device_node *backlight, *ddc; 173 struct device_node *backlight, *ddc;
187 struct panel_simple *panel; 174 struct panel_simple *panel;
188 enum of_gpio_flags flags;
189 int err; 175 int err;
190 176
191 panel = devm_kzalloc(dev, sizeof(*panel), GFP_KERNEL); 177 panel = devm_kzalloc(dev, sizeof(*panel), GFP_KERNEL);
@@ -199,29 +185,20 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
199 if (IS_ERR(panel->supply)) 185 if (IS_ERR(panel->supply))
200 return PTR_ERR(panel->supply); 186 return PTR_ERR(panel->supply);
201 187
202 panel->enable_gpio = of_get_named_gpio_flags(dev->of_node, 188 panel->enable_gpio = devm_gpiod_get(dev, "enable");
203 "enable-gpios", 0, 189 if (IS_ERR(panel->enable_gpio)) {
204 &flags); 190 err = PTR_ERR(panel->enable_gpio);
205 if (gpio_is_valid(panel->enable_gpio)) { 191 if (err != -ENOENT) {
206 unsigned int value; 192 dev_err(dev, "failed to request GPIO: %d\n", err);
207
208 if (flags & OF_GPIO_ACTIVE_LOW)
209 panel->enable_gpio_flags |= GPIO_ACTIVE_LOW;
210
211 err = gpio_request(panel->enable_gpio, "enable");
212 if (err < 0) {
213 dev_err(dev, "failed to request GPIO#%u: %d\n",
214 panel->enable_gpio, err);
215 return err; 193 return err;
216 } 194 }
217 195
218 value = (panel->enable_gpio_flags & GPIO_ACTIVE_LOW) != 0; 196 panel->enable_gpio = NULL;
219 197 } else {
220 err = gpio_direction_output(panel->enable_gpio, value); 198 err = gpiod_direction_output(panel->enable_gpio, 0);
221 if (err < 0) { 199 if (err < 0) {
222 dev_err(dev, "failed to setup GPIO%u: %d\n", 200 dev_err(dev, "failed to setup GPIO: %d\n", err);
223 panel->enable_gpio, err); 201 return err;
224 goto free_gpio;
225 } 202 }
226 } 203 }
227 204
@@ -230,10 +207,8 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
230 panel->backlight = of_find_backlight_by_node(backlight); 207 panel->backlight = of_find_backlight_by_node(backlight);
231 of_node_put(backlight); 208 of_node_put(backlight);
232 209
233 if (!panel->backlight) { 210 if (!panel->backlight)
234 err = -EPROBE_DEFER; 211 return -EPROBE_DEFER;
235 goto free_gpio;
236 }
237 } 212 }
238 213
239 ddc = of_parse_phandle(dev->of_node, "ddc-i2c-bus", 0); 214 ddc = of_parse_phandle(dev->of_node, "ddc-i2c-bus", 0);
@@ -265,9 +240,6 @@ free_ddc:
265free_backlight: 240free_backlight:
266 if (panel->backlight) 241 if (panel->backlight)
267 put_device(&panel->backlight->dev); 242 put_device(&panel->backlight->dev);
268free_gpio:
269 if (gpio_is_valid(panel->enable_gpio))
270 gpio_free(panel->enable_gpio);
271 243
272 return err; 244 return err;
273} 245}
@@ -287,11 +259,6 @@ static int panel_simple_remove(struct device *dev)
287 if (panel->backlight) 259 if (panel->backlight)
288 put_device(&panel->backlight->dev); 260 put_device(&panel->backlight->dev);
289 261
290 if (gpio_is_valid(panel->enable_gpio))
291 gpio_free(panel->enable_gpio);
292
293 regulator_disable(panel->supply);
294
295 return 0; 262 return 0;
296} 263}
297 264
@@ -361,6 +328,28 @@ static const struct panel_desc chunghwa_claa101wb01 = {
361 }, 328 },
362}; 329};
363 330
331static const struct drm_display_mode lg_lp129qe_mode = {
332 .clock = 285250,
333 .hdisplay = 2560,
334 .hsync_start = 2560 + 48,
335 .hsync_end = 2560 + 48 + 32,
336 .htotal = 2560 + 48 + 32 + 80,
337 .vdisplay = 1700,
338 .vsync_start = 1700 + 3,
339 .vsync_end = 1700 + 3 + 10,
340 .vtotal = 1700 + 3 + 10 + 36,
341 .vrefresh = 60,
342};
343
344static const struct panel_desc lg_lp129qe = {
345 .modes = &lg_lp129qe_mode,
346 .num_modes = 1,
347 .size = {
348 .width = 272,
349 .height = 181,
350 },
351};
352
364static const struct drm_display_mode samsung_ltn101nt05_mode = { 353static const struct drm_display_mode samsung_ltn101nt05_mode = {
365 .clock = 54030, 354 .clock = 54030,
366 .hdisplay = 1024, 355 .hdisplay = 1024,
@@ -394,6 +383,9 @@ static const struct of_device_id platform_of_match[] = {
394 .compatible = "chunghwa,claa101wb01", 383 .compatible = "chunghwa,claa101wb01",
395 .data = &chunghwa_claa101wb01 384 .data = &chunghwa_claa101wb01
396 }, { 385 }, {
386 .compatible = "lg,lp129qe",
387 .data = &lg_lp129qe,
388 }, {
397 .compatible = "samsung,ltn101nt05", 389 .compatible = "samsung,ltn101nt05",
398 .data = &samsung_ltn101nt05, 390 .data = &samsung_ltn101nt05,
399 }, { 391 }, {
@@ -433,10 +425,65 @@ static struct platform_driver panel_simple_platform_driver = {
433struct panel_desc_dsi { 425struct panel_desc_dsi {
434 struct panel_desc desc; 426 struct panel_desc desc;
435 427
428 unsigned long flags;
436 enum mipi_dsi_pixel_format format; 429 enum mipi_dsi_pixel_format format;
437 unsigned int lanes; 430 unsigned int lanes;
438}; 431};
439 432
433static const struct drm_display_mode lg_ld070wx3_sl01_mode = {
434 .clock = 71000,
435 .hdisplay = 800,
436 .hsync_start = 800 + 32,
437 .hsync_end = 800 + 32 + 1,
438 .htotal = 800 + 32 + 1 + 57,
439 .vdisplay = 1280,
440 .vsync_start = 1280 + 28,
441 .vsync_end = 1280 + 28 + 1,
442 .vtotal = 1280 + 28 + 1 + 14,
443 .vrefresh = 60,
444};
445
446static const struct panel_desc_dsi lg_ld070wx3_sl01 = {
447 .desc = {
448 .modes = &lg_ld070wx3_sl01_mode,
449 .num_modes = 1,
450 .size = {
451 .width = 94,
452 .height = 151,
453 },
454 },
455 .flags = MIPI_DSI_MODE_VIDEO,
456 .format = MIPI_DSI_FMT_RGB888,
457 .lanes = 4,
458};
459
460static const struct drm_display_mode lg_lh500wx1_sd03_mode = {
461 .clock = 67000,
462 .hdisplay = 720,
463 .hsync_start = 720 + 12,
464 .hsync_end = 720 + 12 + 4,
465 .htotal = 720 + 12 + 4 + 112,
466 .vdisplay = 1280,
467 .vsync_start = 1280 + 8,
468 .vsync_end = 1280 + 8 + 4,
469 .vtotal = 1280 + 8 + 4 + 12,
470 .vrefresh = 60,
471};
472
473static const struct panel_desc_dsi lg_lh500wx1_sd03 = {
474 .desc = {
475 .modes = &lg_lh500wx1_sd03_mode,
476 .num_modes = 1,
477 .size = {
478 .width = 62,
479 .height = 110,
480 },
481 },
482 .flags = MIPI_DSI_MODE_VIDEO,
483 .format = MIPI_DSI_FMT_RGB888,
484 .lanes = 4,
485};
486
440static const struct drm_display_mode panasonic_vvx10f004b00_mode = { 487static const struct drm_display_mode panasonic_vvx10f004b00_mode = {
441 .clock = 157200, 488 .clock = 157200,
442 .hdisplay = 1920, 489 .hdisplay = 1920,
@@ -459,12 +506,19 @@ static const struct panel_desc_dsi panasonic_vvx10f004b00 = {
459 .height = 136, 506 .height = 136,
460 }, 507 },
461 }, 508 },
509 .flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE,
462 .format = MIPI_DSI_FMT_RGB888, 510 .format = MIPI_DSI_FMT_RGB888,
463 .lanes = 4, 511 .lanes = 4,
464}; 512};
465 513
466static const struct of_device_id dsi_of_match[] = { 514static const struct of_device_id dsi_of_match[] = {
467 { 515 {
516 .compatible = "lg,ld070wx3-sl01",
517 .data = &lg_ld070wx3_sl01
518 }, {
519 .compatible = "lg,lh500wx1-sd03",
520 .data = &lg_lh500wx1_sd03
521 }, {
468 .compatible = "panasonic,vvx10f004b00", 522 .compatible = "panasonic,vvx10f004b00",
469 .data = &panasonic_vvx10f004b00 523 .data = &panasonic_vvx10f004b00
470 }, { 524 }, {
@@ -489,6 +543,7 @@ static int panel_simple_dsi_probe(struct mipi_dsi_device *dsi)
489 if (err < 0) 543 if (err < 0)
490 return err; 544 return err;
491 545
546 dsi->mode_flags = desc->flags;
492 dsi->format = desc->format; 547 dsi->format = desc->format;
493 dsi->lanes = desc->lanes; 548 dsi->lanes = desc->lanes;
494 549
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 798bde2e5881..41bdd174657e 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -527,7 +527,7 @@ static int qxl_crtc_mode_set(struct drm_crtc *crtc,
527 bool recreate_primary = false; 527 bool recreate_primary = false;
528 int ret; 528 int ret;
529 int surf_id; 529 int surf_id;
530 if (!crtc->fb) { 530 if (!crtc->primary->fb) {
531 DRM_DEBUG_KMS("No FB bound\n"); 531 DRM_DEBUG_KMS("No FB bound\n");
532 return 0; 532 return 0;
533 } 533 }
@@ -536,7 +536,7 @@ static int qxl_crtc_mode_set(struct drm_crtc *crtc,
536 qfb = to_qxl_framebuffer(old_fb); 536 qfb = to_qxl_framebuffer(old_fb);
537 old_bo = gem_to_qxl_bo(qfb->obj); 537 old_bo = gem_to_qxl_bo(qfb->obj);
538 } 538 }
539 qfb = to_qxl_framebuffer(crtc->fb); 539 qfb = to_qxl_framebuffer(crtc->primary->fb);
540 bo = gem_to_qxl_bo(qfb->obj); 540 bo = gem_to_qxl_bo(qfb->obj);
541 if (!m) 541 if (!m)
542 /* and do we care? */ 542 /* and do we care? */
@@ -609,14 +609,14 @@ static void qxl_crtc_disable(struct drm_crtc *crtc)
609 struct qxl_crtc *qcrtc = to_qxl_crtc(crtc); 609 struct qxl_crtc *qcrtc = to_qxl_crtc(crtc);
610 struct drm_device *dev = crtc->dev; 610 struct drm_device *dev = crtc->dev;
611 struct qxl_device *qdev = dev->dev_private; 611 struct qxl_device *qdev = dev->dev_private;
612 if (crtc->fb) { 612 if (crtc->primary->fb) {
613 struct qxl_framebuffer *qfb = to_qxl_framebuffer(crtc->fb); 613 struct qxl_framebuffer *qfb = to_qxl_framebuffer(crtc->primary->fb);
614 struct qxl_bo *bo = gem_to_qxl_bo(qfb->obj); 614 struct qxl_bo *bo = gem_to_qxl_bo(qfb->obj);
615 int ret; 615 int ret;
616 ret = qxl_bo_reserve(bo, false); 616 ret = qxl_bo_reserve(bo, false);
617 qxl_bo_unpin(bo); 617 qxl_bo_unpin(bo);
618 qxl_bo_unreserve(bo); 618 qxl_bo_unreserve(bo);
619 crtc->fb = NULL; 619 crtc->primary->fb = NULL;
620 } 620 }
621 621
622 qxl_monitors_config_set(qdev, qcrtc->index, 0, 0, 0, 0, 0); 622 qxl_monitors_config_set(qdev, qcrtc->index, 0, 0, 0, 0, 0);
diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c
index 8691c76c5ef0..b95f144f0b49 100644
--- a/drivers/gpu/drm/qxl/qxl_object.c
+++ b/drivers/gpu/drm/qxl/qxl_object.c
@@ -82,8 +82,6 @@ int qxl_bo_create(struct qxl_device *qdev,
82 enum ttm_bo_type type; 82 enum ttm_bo_type type;
83 int r; 83 int r;
84 84
85 if (unlikely(qdev->mman.bdev.dev_mapping == NULL))
86 qdev->mman.bdev.dev_mapping = qdev->ddev->dev_mapping;
87 if (kernel) 85 if (kernel)
88 type = ttm_bo_type_kernel; 86 type = ttm_bo_type_kernel;
89 else 87 else
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index 821ab7b9409b..14e776f1d14e 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -349,7 +349,7 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release)
349 qxl_fence_add_release_locked(&qbo->fence, release->id); 349 qxl_fence_add_release_locked(&qbo->fence, release->id);
350 350
351 ttm_bo_add_to_lru(bo); 351 ttm_bo_add_to_lru(bo);
352 ww_mutex_unlock(&bo->resv->lock); 352 __ttm_bo_unreserve(bo);
353 entry->reserved = false; 353 entry->reserved = false;
354 } 354 }
355 spin_unlock(&bdev->fence_lock); 355 spin_unlock(&bdev->fence_lock);
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index c7e7e6590c2b..d52c27527b9a 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -433,6 +433,7 @@ static int qxl_sync_obj_flush(void *sync_obj)
433 433
434static void qxl_sync_obj_unref(void **sync_obj) 434static void qxl_sync_obj_unref(void **sync_obj)
435{ 435{
436 *sync_obj = NULL;
436} 437}
437 438
438static void *qxl_sync_obj_ref(void *sync_obj) 439static void *qxl_sync_obj_ref(void *sync_obj)
@@ -493,7 +494,9 @@ int qxl_ttm_init(struct qxl_device *qdev)
493 /* No others user of address space so set it to 0 */ 494 /* No others user of address space so set it to 0 */
494 r = ttm_bo_device_init(&qdev->mman.bdev, 495 r = ttm_bo_device_init(&qdev->mman.bdev,
495 qdev->mman.bo_global_ref.ref.object, 496 qdev->mman.bo_global_ref.ref.object,
496 &qxl_bo_driver, DRM_FILE_PAGE_OFFSET, 0); 497 &qxl_bo_driver,
498 qdev->ddev->anon_inode->i_mapping,
499 DRM_FILE_PAGE_OFFSET, 0);
497 if (r) { 500 if (r) {
498 DRM_ERROR("failed initializing buffer object driver(%d).\n", r); 501 DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
499 return r; 502 return r;
@@ -518,8 +521,6 @@ int qxl_ttm_init(struct qxl_device *qdev)
518 ((unsigned)num_io_pages * PAGE_SIZE) / (1024 * 1024)); 521 ((unsigned)num_io_pages * PAGE_SIZE) / (1024 * 1024));
519 DRM_INFO("qxl: %uM of Surface memory size\n", 522 DRM_INFO("qxl: %uM of Surface memory size\n",
520 (unsigned)qdev->surfaceram_size / (1024 * 1024)); 523 (unsigned)qdev->surfaceram_size / (1024 * 1024));
521 if (unlikely(qdev->mman.bdev.dev_mapping == NULL))
522 qdev->mman.bdev.dev_mapping = qdev->ddev->dev_mapping;
523 r = qxl_ttm_debugfs_init(qdev); 524 r = qxl_ttm_debugfs_init(qdev);
524 if (r) { 525 if (r) {
525 DRM_ERROR("Failed to init debugfs\n"); 526 DRM_ERROR("Failed to init debugfs\n");
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index 306364a1ecda..09433534dc47 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -80,7 +80,7 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
80 r600_dpm.o rs780_dpm.o rv6xx_dpm.o rv770_dpm.o rv730_dpm.o rv740_dpm.o \ 80 r600_dpm.o rs780_dpm.o rv6xx_dpm.o rv770_dpm.o rv730_dpm.o rv740_dpm.o \
81 rv770_smc.o cypress_dpm.o btc_dpm.o sumo_dpm.o sumo_smc.o trinity_dpm.o \ 81 rv770_smc.o cypress_dpm.o btc_dpm.o sumo_dpm.o sumo_smc.o trinity_dpm.o \
82 trinity_smc.o ni_dpm.o si_smc.o si_dpm.o kv_smc.o kv_dpm.o ci_smc.o \ 82 trinity_smc.o ni_dpm.o si_smc.o si_dpm.o kv_smc.o kv_dpm.o ci_smc.o \
83 ci_dpm.o dce6_afmt.o 83 ci_dpm.o dce6_afmt.o radeon_vm.o
84 84
85# add async DMA block 85# add async DMA block
86radeon-y += \ 86radeon-y += \
@@ -99,6 +99,12 @@ radeon-y += \
99 uvd_v3_1.o \ 99 uvd_v3_1.o \
100 uvd_v4_2.o 100 uvd_v4_2.o
101 101
102# add VCE block
103radeon-y += \
104 radeon_vce.o \
105 vce_v1_0.o \
106 vce_v2_0.o \
107
102radeon-$(CONFIG_COMPAT) += radeon_ioc32.o 108radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
103radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o 109radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o
104radeon-$(CONFIG_ACPI) += radeon_acpi.o 110radeon-$(CONFIG_ACPI) += radeon_acpi.o
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index daa4dd375ab1..fb187c78978f 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -1106,7 +1106,7 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
1106 int r; 1106 int r;
1107 1107
1108 /* no fb bound */ 1108 /* no fb bound */
1109 if (!atomic && !crtc->fb) { 1109 if (!atomic && !crtc->primary->fb) {
1110 DRM_DEBUG_KMS("No FB bound\n"); 1110 DRM_DEBUG_KMS("No FB bound\n");
1111 return 0; 1111 return 0;
1112 } 1112 }
@@ -1116,8 +1116,8 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
1116 target_fb = fb; 1116 target_fb = fb;
1117 } 1117 }
1118 else { 1118 else {
1119 radeon_fb = to_radeon_framebuffer(crtc->fb); 1119 radeon_fb = to_radeon_framebuffer(crtc->primary->fb);
1120 target_fb = crtc->fb; 1120 target_fb = crtc->primary->fb;
1121 } 1121 }
1122 1122
1123 /* If atomic, assume fb object is pinned & idle & fenced and 1123 /* If atomic, assume fb object is pinned & idle & fenced and
@@ -1316,7 +1316,7 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
1316 /* set pageflip to happen anywhere in vblank interval */ 1316 /* set pageflip to happen anywhere in vblank interval */
1317 WREG32(EVERGREEN_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0); 1317 WREG32(EVERGREEN_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0);
1318 1318
1319 if (!atomic && fb && fb != crtc->fb) { 1319 if (!atomic && fb && fb != crtc->primary->fb) {
1320 radeon_fb = to_radeon_framebuffer(fb); 1320 radeon_fb = to_radeon_framebuffer(fb);
1321 rbo = gem_to_radeon_bo(radeon_fb->obj); 1321 rbo = gem_to_radeon_bo(radeon_fb->obj);
1322 r = radeon_bo_reserve(rbo, false); 1322 r = radeon_bo_reserve(rbo, false);
@@ -1350,7 +1350,7 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
1350 int r; 1350 int r;
1351 1351
1352 /* no fb bound */ 1352 /* no fb bound */
1353 if (!atomic && !crtc->fb) { 1353 if (!atomic && !crtc->primary->fb) {
1354 DRM_DEBUG_KMS("No FB bound\n"); 1354 DRM_DEBUG_KMS("No FB bound\n");
1355 return 0; 1355 return 0;
1356 } 1356 }
@@ -1360,8 +1360,8 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
1360 target_fb = fb; 1360 target_fb = fb;
1361 } 1361 }
1362 else { 1362 else {
1363 radeon_fb = to_radeon_framebuffer(crtc->fb); 1363 radeon_fb = to_radeon_framebuffer(crtc->primary->fb);
1364 target_fb = crtc->fb; 1364 target_fb = crtc->primary->fb;
1365 } 1365 }
1366 1366
1367 obj = radeon_fb->obj; 1367 obj = radeon_fb->obj;
@@ -1485,7 +1485,7 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
1485 /* set pageflip to happen anywhere in vblank interval */ 1485 /* set pageflip to happen anywhere in vblank interval */
1486 WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0); 1486 WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0);
1487 1487
1488 if (!atomic && fb && fb != crtc->fb) { 1488 if (!atomic && fb && fb != crtc->primary->fb) {
1489 radeon_fb = to_radeon_framebuffer(fb); 1489 radeon_fb = to_radeon_framebuffer(fb);
1490 rbo = gem_to_radeon_bo(radeon_fb->obj); 1490 rbo = gem_to_radeon_bo(radeon_fb->obj);
1491 r = radeon_bo_reserve(rbo, false); 1491 r = radeon_bo_reserve(rbo, false);
@@ -1972,12 +1972,12 @@ static void atombios_crtc_disable(struct drm_crtc *crtc)
1972 int i; 1972 int i;
1973 1973
1974 atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); 1974 atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
1975 if (crtc->fb) { 1975 if (crtc->primary->fb) {
1976 int r; 1976 int r;
1977 struct radeon_framebuffer *radeon_fb; 1977 struct radeon_framebuffer *radeon_fb;
1978 struct radeon_bo *rbo; 1978 struct radeon_bo *rbo;
1979 1979
1980 radeon_fb = to_radeon_framebuffer(crtc->fb); 1980 radeon_fb = to_radeon_framebuffer(crtc->primary->fb);
1981 rbo = gem_to_radeon_bo(radeon_fb->obj); 1981 rbo = gem_to_radeon_bo(radeon_fb->obj);
1982 r = radeon_bo_reserve(rbo, false); 1982 r = radeon_bo_reserve(rbo, false);
1983 if (unlikely(r)) 1983 if (unlikely(r))
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 4ad7643fce5f..8b0ab170cef9 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -142,101 +142,69 @@ static int radeon_process_aux_ch(struct radeon_i2c_chan *chan,
142 return recv_bytes; 142 return recv_bytes;
143} 143}
144 144
145static int radeon_dp_aux_native_write(struct radeon_connector *radeon_connector, 145#define HEADER_SIZE 4
146 u16 address, u8 *send, u8 send_bytes, u8 delay)
147{
148 struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
149 int ret;
150 u8 msg[20];
151 int msg_bytes = send_bytes + 4;
152 u8 ack;
153 unsigned retry;
154
155 if (send_bytes > 16)
156 return -1;
157 146
158 msg[0] = address; 147static ssize_t
159 msg[1] = address >> 8; 148radeon_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
160 msg[2] = DP_AUX_NATIVE_WRITE << 4;
161 msg[3] = (msg_bytes << 4) | (send_bytes - 1);
162 memcpy(&msg[4], send, send_bytes);
163
164 for (retry = 0; retry < 7; retry++) {
165 ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus,
166 msg, msg_bytes, NULL, 0, delay, &ack);
167 if (ret == -EBUSY)
168 continue;
169 else if (ret < 0)
170 return ret;
171 ack >>= 4;
172 if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_ACK)
173 return send_bytes;
174 else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER)
175 usleep_range(400, 500);
176 else
177 return -EIO;
178 }
179
180 return -EIO;
181}
182
183static int radeon_dp_aux_native_read(struct radeon_connector *radeon_connector,
184 u16 address, u8 *recv, int recv_bytes, u8 delay)
185{ 149{
186 struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; 150 struct radeon_i2c_chan *chan =
187 u8 msg[4]; 151 container_of(aux, struct radeon_i2c_chan, aux);
188 int msg_bytes = 4;
189 u8 ack;
190 int ret; 152 int ret;
191 unsigned retry; 153 u8 tx_buf[20];
192 154 size_t tx_size;
193 msg[0] = address; 155 u8 ack, delay = 0;
194 msg[1] = address >> 8; 156
195 msg[2] = DP_AUX_NATIVE_READ << 4; 157 if (WARN_ON(msg->size > 16))
196 msg[3] = (msg_bytes << 4) | (recv_bytes - 1); 158 return -E2BIG;
197 159
198 for (retry = 0; retry < 7; retry++) { 160 tx_buf[0] = msg->address & 0xff;
199 ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus, 161 tx_buf[1] = msg->address >> 8;
200 msg, msg_bytes, recv, recv_bytes, delay, &ack); 162 tx_buf[2] = msg->request << 4;
201 if (ret == -EBUSY) 163 tx_buf[3] = msg->size - 1;
202 continue; 164
203 else if (ret < 0) 165 switch (msg->request & ~DP_AUX_I2C_MOT) {
204 return ret; 166 case DP_AUX_NATIVE_WRITE:
205 ack >>= 4; 167 case DP_AUX_I2C_WRITE:
206 if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_ACK) 168 tx_size = HEADER_SIZE + msg->size;
207 return ret; 169 tx_buf[3] |= tx_size << 4;
208 else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER) 170 memcpy(tx_buf + HEADER_SIZE, msg->buffer, msg->size);
209 usleep_range(400, 500); 171 ret = radeon_process_aux_ch(chan,
210 else if (ret == 0) 172 tx_buf, tx_size, NULL, 0, delay, &ack);
211 return -EPROTO; 173 if (ret >= 0)
212 else 174 /* Return payload size. */
213 return -EIO; 175 ret = msg->size;
176 break;
177 case DP_AUX_NATIVE_READ:
178 case DP_AUX_I2C_READ:
179 tx_size = HEADER_SIZE;
180 tx_buf[3] |= tx_size << 4;
181 ret = radeon_process_aux_ch(chan,
182 tx_buf, tx_size, msg->buffer, msg->size, delay, &ack);
183 break;
184 default:
185 ret = -EINVAL;
186 break;
214 } 187 }
215 188
216 return -EIO; 189 if (ret > 0)
217} 190 msg->reply = ack >> 4;
218 191
219static void radeon_write_dpcd_reg(struct radeon_connector *radeon_connector, 192 return ret;
220 u16 reg, u8 val)
221{
222 radeon_dp_aux_native_write(radeon_connector, reg, &val, 1, 0);
223} 193}
224 194
225static u8 radeon_read_dpcd_reg(struct radeon_connector *radeon_connector, 195void radeon_dp_aux_init(struct radeon_connector *radeon_connector)
226 u16 reg)
227{ 196{
228 u8 val = 0; 197 struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
229
230 radeon_dp_aux_native_read(radeon_connector, reg, &val, 1, 0);
231 198
232 return val; 199 dig_connector->dp_i2c_bus->aux.dev = radeon_connector->base.kdev;
200 dig_connector->dp_i2c_bus->aux.transfer = radeon_dp_aux_transfer;
233} 201}
234 202
235int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, 203int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
236 u8 write_byte, u8 *read_byte) 204 u8 write_byte, u8 *read_byte)
237{ 205{
238 struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; 206 struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
239 struct radeon_i2c_chan *auxch = (struct radeon_i2c_chan *)adapter; 207 struct radeon_i2c_chan *auxch = i2c_get_adapdata(adapter);
240 u16 address = algo_data->address; 208 u16 address = algo_data->address;
241 u8 msg[5]; 209 u8 msg[5];
242 u8 reply[2]; 210 u8 reply[2];
@@ -246,34 +214,30 @@ int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
246 int ret; 214 int ret;
247 u8 ack; 215 u8 ack;
248 216
249 /* Set up the command byte */ 217 /* Set up the address */
250 if (mode & MODE_I2C_READ)
251 msg[2] = DP_AUX_I2C_READ << 4;
252 else
253 msg[2] = DP_AUX_I2C_WRITE << 4;
254
255 if (!(mode & MODE_I2C_STOP))
256 msg[2] |= DP_AUX_I2C_MOT << 4;
257
258 msg[0] = address; 218 msg[0] = address;
259 msg[1] = address >> 8; 219 msg[1] = address >> 8;
260 220
261 switch (mode) { 221 /* Set up the command byte */
262 case MODE_I2C_WRITE: 222 if (mode & MODE_I2C_READ) {
223 msg[2] = DP_AUX_I2C_READ << 4;
224 msg_bytes = 4;
225 msg[3] = msg_bytes << 4;
226 } else {
227 msg[2] = DP_AUX_I2C_WRITE << 4;
263 msg_bytes = 5; 228 msg_bytes = 5;
264 msg[3] = msg_bytes << 4; 229 msg[3] = msg_bytes << 4;
265 msg[4] = write_byte; 230 msg[4] = write_byte;
266 break;
267 case MODE_I2C_READ:
268 msg_bytes = 4;
269 msg[3] = msg_bytes << 4;
270 break;
271 default:
272 msg_bytes = 4;
273 msg[3] = 3 << 4;
274 break;
275 } 231 }
276 232
233 /* special handling for start/stop */
234 if (mode & (MODE_I2C_START | MODE_I2C_STOP))
235 msg[3] = 3 << 4;
236
237 /* Set MOT bit for all but stop */
238 if ((mode & MODE_I2C_STOP) == 0)
239 msg[2] |= DP_AUX_I2C_MOT << 4;
240
277 for (retry = 0; retry < 7; retry++) { 241 for (retry = 0; retry < 7; retry++) {
278 ret = radeon_process_aux_ch(auxch, 242 ret = radeon_process_aux_ch(auxch,
279 msg, msg_bytes, reply, reply_bytes, 0, &ack); 243 msg, msg_bytes, reply, reply_bytes, 0, &ack);
@@ -472,11 +436,11 @@ static void radeon_dp_probe_oui(struct radeon_connector *radeon_connector)
472 if (!(dig_connector->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT)) 436 if (!(dig_connector->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
473 return; 437 return;
474 438
475 if (radeon_dp_aux_native_read(radeon_connector, DP_SINK_OUI, buf, 3, 0)) 439 if (drm_dp_dpcd_read(&dig_connector->dp_i2c_bus->aux, DP_SINK_OUI, buf, 3))
476 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n", 440 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
477 buf[0], buf[1], buf[2]); 441 buf[0], buf[1], buf[2]);
478 442
479 if (radeon_dp_aux_native_read(radeon_connector, DP_BRANCH_OUI, buf, 3, 0)) 443 if (drm_dp_dpcd_read(&dig_connector->dp_i2c_bus->aux, DP_BRANCH_OUI, buf, 3))
480 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n", 444 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
481 buf[0], buf[1], buf[2]); 445 buf[0], buf[1], buf[2]);
482} 446}
@@ -487,8 +451,8 @@ bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector)
487 u8 msg[DP_DPCD_SIZE]; 451 u8 msg[DP_DPCD_SIZE];
488 int ret, i; 452 int ret, i;
489 453
490 ret = radeon_dp_aux_native_read(radeon_connector, DP_DPCD_REV, msg, 454 ret = drm_dp_dpcd_read(&dig_connector->dp_i2c_bus->aux, DP_DPCD_REV, msg,
491 DP_DPCD_SIZE, 0); 455 DP_DPCD_SIZE);
492 if (ret > 0) { 456 if (ret > 0) {
493 memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE); 457 memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE);
494 DRM_DEBUG_KMS("DPCD: "); 458 DRM_DEBUG_KMS("DPCD: ");
@@ -510,6 +474,7 @@ int radeon_dp_get_panel_mode(struct drm_encoder *encoder,
510 struct drm_device *dev = encoder->dev; 474 struct drm_device *dev = encoder->dev;
511 struct radeon_device *rdev = dev->dev_private; 475 struct radeon_device *rdev = dev->dev_private;
512 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 476 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
477 struct radeon_connector_atom_dig *dig_connector;
513 int panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE; 478 int panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
514 u16 dp_bridge = radeon_connector_encoder_get_dp_bridge_encoder_id(connector); 479 u16 dp_bridge = radeon_connector_encoder_get_dp_bridge_encoder_id(connector);
515 u8 tmp; 480 u8 tmp;
@@ -517,9 +482,15 @@ int radeon_dp_get_panel_mode(struct drm_encoder *encoder,
517 if (!ASIC_IS_DCE4(rdev)) 482 if (!ASIC_IS_DCE4(rdev))
518 return panel_mode; 483 return panel_mode;
519 484
485 if (!radeon_connector->con_priv)
486 return panel_mode;
487
488 dig_connector = radeon_connector->con_priv;
489
520 if (dp_bridge != ENCODER_OBJECT_ID_NONE) { 490 if (dp_bridge != ENCODER_OBJECT_ID_NONE) {
521 /* DP bridge chips */ 491 /* DP bridge chips */
522 tmp = radeon_read_dpcd_reg(radeon_connector, DP_EDP_CONFIGURATION_CAP); 492 drm_dp_dpcd_readb(&dig_connector->dp_i2c_bus->aux,
493 DP_EDP_CONFIGURATION_CAP, &tmp);
523 if (tmp & 1) 494 if (tmp & 1)
524 panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE; 495 panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE;
525 else if ((dp_bridge == ENCODER_OBJECT_ID_NUTMEG) || 496 else if ((dp_bridge == ENCODER_OBJECT_ID_NUTMEG) ||
@@ -529,7 +500,8 @@ int radeon_dp_get_panel_mode(struct drm_encoder *encoder,
529 panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE; 500 panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
530 } else if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { 501 } else if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
531 /* eDP */ 502 /* eDP */
532 tmp = radeon_read_dpcd_reg(radeon_connector, DP_EDP_CONFIGURATION_CAP); 503 drm_dp_dpcd_readb(&dig_connector->dp_i2c_bus->aux,
504 DP_EDP_CONFIGURATION_CAP, &tmp);
533 if (tmp & 1) 505 if (tmp & 1)
534 panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE; 506 panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE;
535 } 507 }
@@ -577,37 +549,42 @@ int radeon_dp_mode_valid_helper(struct drm_connector *connector,
577 return MODE_OK; 549 return MODE_OK;
578} 550}
579 551
580static bool radeon_dp_get_link_status(struct radeon_connector *radeon_connector,
581 u8 link_status[DP_LINK_STATUS_SIZE])
582{
583 int ret;
584 ret = radeon_dp_aux_native_read(radeon_connector, DP_LANE0_1_STATUS,
585 link_status, DP_LINK_STATUS_SIZE, 100);
586 if (ret <= 0) {
587 return false;
588 }
589
590 DRM_DEBUG_KMS("link status %6ph\n", link_status);
591 return true;
592}
593
594bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector) 552bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector)
595{ 553{
596 u8 link_status[DP_LINK_STATUS_SIZE]; 554 u8 link_status[DP_LINK_STATUS_SIZE];
597 struct radeon_connector_atom_dig *dig = radeon_connector->con_priv; 555 struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
598 556
599 if (!radeon_dp_get_link_status(radeon_connector, link_status)) 557 if (drm_dp_dpcd_read_link_status(&dig->dp_i2c_bus->aux, link_status) <= 0)
600 return false; 558 return false;
601 if (drm_dp_channel_eq_ok(link_status, dig->dp_lane_count)) 559 if (drm_dp_channel_eq_ok(link_status, dig->dp_lane_count))
602 return false; 560 return false;
603 return true; 561 return true;
604} 562}
605 563
564void radeon_dp_set_rx_power_state(struct drm_connector *connector,
565 u8 power_state)
566{
567 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
568 struct radeon_connector_atom_dig *dig_connector;
569
570 if (!radeon_connector->con_priv)
571 return;
572
573 dig_connector = radeon_connector->con_priv;
574
575 /* power up/down the sink */
576 if (dig_connector->dpcd[0] >= 0x11) {
577 drm_dp_dpcd_writeb(&dig_connector->dp_i2c_bus->aux,
578 DP_SET_POWER, power_state);
579 usleep_range(1000, 2000);
580 }
581}
582
583
606struct radeon_dp_link_train_info { 584struct radeon_dp_link_train_info {
607 struct radeon_device *rdev; 585 struct radeon_device *rdev;
608 struct drm_encoder *encoder; 586 struct drm_encoder *encoder;
609 struct drm_connector *connector; 587 struct drm_connector *connector;
610 struct radeon_connector *radeon_connector;
611 int enc_id; 588 int enc_id;
612 int dp_clock; 589 int dp_clock;
613 int dp_lane_count; 590 int dp_lane_count;
@@ -617,6 +594,7 @@ struct radeon_dp_link_train_info {
617 u8 link_status[DP_LINK_STATUS_SIZE]; 594 u8 link_status[DP_LINK_STATUS_SIZE];
618 u8 tries; 595 u8 tries;
619 bool use_dpencoder; 596 bool use_dpencoder;
597 struct drm_dp_aux *aux;
620}; 598};
621 599
622static void radeon_dp_update_vs_emph(struct radeon_dp_link_train_info *dp_info) 600static void radeon_dp_update_vs_emph(struct radeon_dp_link_train_info *dp_info)
@@ -627,8 +605,8 @@ static void radeon_dp_update_vs_emph(struct radeon_dp_link_train_info *dp_info)
627 0, dp_info->train_set[0]); /* sets all lanes at once */ 605 0, dp_info->train_set[0]); /* sets all lanes at once */
628 606
629 /* set the vs/emph on the sink */ 607 /* set the vs/emph on the sink */
630 radeon_dp_aux_native_write(dp_info->radeon_connector, DP_TRAINING_LANE0_SET, 608 drm_dp_dpcd_write(dp_info->aux, DP_TRAINING_LANE0_SET,
631 dp_info->train_set, dp_info->dp_lane_count, 0); 609 dp_info->train_set, dp_info->dp_lane_count);
632} 610}
633 611
634static void radeon_dp_set_tp(struct radeon_dp_link_train_info *dp_info, int tp) 612static void radeon_dp_set_tp(struct radeon_dp_link_train_info *dp_info, int tp)
@@ -663,7 +641,7 @@ static void radeon_dp_set_tp(struct radeon_dp_link_train_info *dp_info, int tp)
663 } 641 }
664 642
665 /* enable training pattern on the sink */ 643 /* enable training pattern on the sink */
666 radeon_write_dpcd_reg(dp_info->radeon_connector, DP_TRAINING_PATTERN_SET, tp); 644 drm_dp_dpcd_writeb(dp_info->aux, DP_TRAINING_PATTERN_SET, tp);
667} 645}
668 646
669static int radeon_dp_link_train_init(struct radeon_dp_link_train_info *dp_info) 647static int radeon_dp_link_train_init(struct radeon_dp_link_train_info *dp_info)
@@ -673,34 +651,30 @@ static int radeon_dp_link_train_init(struct radeon_dp_link_train_info *dp_info)
673 u8 tmp; 651 u8 tmp;
674 652
675 /* power up the sink */ 653 /* power up the sink */
676 if (dp_info->dpcd[0] >= 0x11) { 654 radeon_dp_set_rx_power_state(dp_info->connector, DP_SET_POWER_D0);
677 radeon_write_dpcd_reg(dp_info->radeon_connector,
678 DP_SET_POWER, DP_SET_POWER_D0);
679 usleep_range(1000, 2000);
680 }
681 655
682 /* possibly enable downspread on the sink */ 656 /* possibly enable downspread on the sink */
683 if (dp_info->dpcd[3] & 0x1) 657 if (dp_info->dpcd[3] & 0x1)
684 radeon_write_dpcd_reg(dp_info->radeon_connector, 658 drm_dp_dpcd_writeb(dp_info->aux,
685 DP_DOWNSPREAD_CTRL, DP_SPREAD_AMP_0_5); 659 DP_DOWNSPREAD_CTRL, DP_SPREAD_AMP_0_5);
686 else 660 else
687 radeon_write_dpcd_reg(dp_info->radeon_connector, 661 drm_dp_dpcd_writeb(dp_info->aux,
688 DP_DOWNSPREAD_CTRL, 0); 662 DP_DOWNSPREAD_CTRL, 0);
689 663
690 if ((dp_info->connector->connector_type == DRM_MODE_CONNECTOR_eDP) && 664 if ((dp_info->connector->connector_type == DRM_MODE_CONNECTOR_eDP) &&
691 (dig->panel_mode == DP_PANEL_MODE_INTERNAL_DP2_MODE)) { 665 (dig->panel_mode == DP_PANEL_MODE_INTERNAL_DP2_MODE)) {
692 radeon_write_dpcd_reg(dp_info->radeon_connector, DP_EDP_CONFIGURATION_SET, 1); 666 drm_dp_dpcd_writeb(dp_info->aux, DP_EDP_CONFIGURATION_SET, 1);
693 } 667 }
694 668
695 /* set the lane count on the sink */ 669 /* set the lane count on the sink */
696 tmp = dp_info->dp_lane_count; 670 tmp = dp_info->dp_lane_count;
697 if (drm_dp_enhanced_frame_cap(dp_info->dpcd)) 671 if (drm_dp_enhanced_frame_cap(dp_info->dpcd))
698 tmp |= DP_LANE_COUNT_ENHANCED_FRAME_EN; 672 tmp |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
699 radeon_write_dpcd_reg(dp_info->radeon_connector, DP_LANE_COUNT_SET, tmp); 673 drm_dp_dpcd_writeb(dp_info->aux, DP_LANE_COUNT_SET, tmp);
700 674
701 /* set the link rate on the sink */ 675 /* set the link rate on the sink */
702 tmp = drm_dp_link_rate_to_bw_code(dp_info->dp_clock); 676 tmp = drm_dp_link_rate_to_bw_code(dp_info->dp_clock);
703 radeon_write_dpcd_reg(dp_info->radeon_connector, DP_LINK_BW_SET, tmp); 677 drm_dp_dpcd_writeb(dp_info->aux, DP_LINK_BW_SET, tmp);
704 678
705 /* start training on the source */ 679 /* start training on the source */
706 if (ASIC_IS_DCE4(dp_info->rdev) || !dp_info->use_dpencoder) 680 if (ASIC_IS_DCE4(dp_info->rdev) || !dp_info->use_dpencoder)
@@ -711,9 +685,9 @@ static int radeon_dp_link_train_init(struct radeon_dp_link_train_info *dp_info)
711 dp_info->dp_clock, dp_info->enc_id, 0); 685 dp_info->dp_clock, dp_info->enc_id, 0);
712 686
713 /* disable the training pattern on the sink */ 687 /* disable the training pattern on the sink */
714 radeon_write_dpcd_reg(dp_info->radeon_connector, 688 drm_dp_dpcd_writeb(dp_info->aux,
715 DP_TRAINING_PATTERN_SET, 689 DP_TRAINING_PATTERN_SET,
716 DP_TRAINING_PATTERN_DISABLE); 690 DP_TRAINING_PATTERN_DISABLE);
717 691
718 return 0; 692 return 0;
719} 693}
@@ -723,9 +697,9 @@ static int radeon_dp_link_train_finish(struct radeon_dp_link_train_info *dp_info
723 udelay(400); 697 udelay(400);
724 698
725 /* disable the training pattern on the sink */ 699 /* disable the training pattern on the sink */
726 radeon_write_dpcd_reg(dp_info->radeon_connector, 700 drm_dp_dpcd_writeb(dp_info->aux,
727 DP_TRAINING_PATTERN_SET, 701 DP_TRAINING_PATTERN_SET,
728 DP_TRAINING_PATTERN_DISABLE); 702 DP_TRAINING_PATTERN_DISABLE);
729 703
730 /* disable the training pattern on the source */ 704 /* disable the training pattern on the source */
731 if (ASIC_IS_DCE4(dp_info->rdev) || !dp_info->use_dpencoder) 705 if (ASIC_IS_DCE4(dp_info->rdev) || !dp_info->use_dpencoder)
@@ -757,7 +731,8 @@ static int radeon_dp_link_train_cr(struct radeon_dp_link_train_info *dp_info)
757 while (1) { 731 while (1) {
758 drm_dp_link_train_clock_recovery_delay(dp_info->dpcd); 732 drm_dp_link_train_clock_recovery_delay(dp_info->dpcd);
759 733
760 if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status)) { 734 if (drm_dp_dpcd_read_link_status(dp_info->aux,
735 dp_info->link_status) <= 0) {
761 DRM_ERROR("displayport link status failed\n"); 736 DRM_ERROR("displayport link status failed\n");
762 break; 737 break;
763 } 738 }
@@ -819,7 +794,8 @@ static int radeon_dp_link_train_ce(struct radeon_dp_link_train_info *dp_info)
819 while (1) { 794 while (1) {
820 drm_dp_link_train_channel_eq_delay(dp_info->dpcd); 795 drm_dp_link_train_channel_eq_delay(dp_info->dpcd);
821 796
822 if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status)) { 797 if (drm_dp_dpcd_read_link_status(dp_info->aux,
798 dp_info->link_status) <= 0) {
823 DRM_ERROR("displayport link status failed\n"); 799 DRM_ERROR("displayport link status failed\n");
824 break; 800 break;
825 } 801 }
@@ -902,7 +878,7 @@ void radeon_dp_link_train(struct drm_encoder *encoder,
902 else 878 else
903 dp_info.enc_id |= ATOM_DP_CONFIG_LINK_A; 879 dp_info.enc_id |= ATOM_DP_CONFIG_LINK_A;
904 880
905 tmp = radeon_read_dpcd_reg(radeon_connector, DP_MAX_LANE_COUNT); 881 drm_dp_dpcd_readb(&dig_connector->dp_i2c_bus->aux, DP_MAX_LANE_COUNT, &tmp);
906 if (ASIC_IS_DCE5(rdev) && (tmp & DP_TPS3_SUPPORTED)) 882 if (ASIC_IS_DCE5(rdev) && (tmp & DP_TPS3_SUPPORTED))
907 dp_info.tp3_supported = true; 883 dp_info.tp3_supported = true;
908 else 884 else
@@ -912,9 +888,9 @@ void radeon_dp_link_train(struct drm_encoder *encoder,
912 dp_info.rdev = rdev; 888 dp_info.rdev = rdev;
913 dp_info.encoder = encoder; 889 dp_info.encoder = encoder;
914 dp_info.connector = connector; 890 dp_info.connector = connector;
915 dp_info.radeon_connector = radeon_connector;
916 dp_info.dp_lane_count = dig_connector->dp_lane_count; 891 dp_info.dp_lane_count = dig_connector->dp_lane_count;
917 dp_info.dp_clock = dig_connector->dp_clock; 892 dp_info.dp_clock = dig_connector->dp_clock;
893 dp_info.aux = &dig_connector->dp_i2c_bus->aux;
918 894
919 if (radeon_dp_link_train_init(&dp_info)) 895 if (radeon_dp_link_train_init(&dp_info))
920 goto done; 896 goto done;
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 607dc14d195e..e6eb5097597f 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -1633,10 +1633,16 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
1633 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); 1633 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
1634 struct radeon_connector *radeon_connector = NULL; 1634 struct radeon_connector *radeon_connector = NULL;
1635 struct radeon_connector_atom_dig *radeon_dig_connector = NULL; 1635 struct radeon_connector_atom_dig *radeon_dig_connector = NULL;
1636 bool travis_quirk = false;
1636 1637
1637 if (connector) { 1638 if (connector) {
1638 radeon_connector = to_radeon_connector(connector); 1639 radeon_connector = to_radeon_connector(connector);
1639 radeon_dig_connector = radeon_connector->con_priv; 1640 radeon_dig_connector = radeon_connector->con_priv;
1641 if ((radeon_connector_encoder_get_dp_bridge_encoder_id(connector) ==
1642 ENCODER_OBJECT_ID_TRAVIS) &&
1643 (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) &&
1644 !ASIC_IS_DCE5(rdev))
1645 travis_quirk = true;
1640 } 1646 }
1641 1647
1642 switch (mode) { 1648 switch (mode) {
@@ -1657,17 +1663,13 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
1657 atombios_external_encoder_setup(encoder, ext_encoder, 1663 atombios_external_encoder_setup(encoder, ext_encoder,
1658 EXTERNAL_ENCODER_ACTION_V3_ENCODER_SETUP); 1664 EXTERNAL_ENCODER_ACTION_V3_ENCODER_SETUP);
1659 } 1665 }
1660 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
1661 } else if (ASIC_IS_DCE4(rdev)) { 1666 } else if (ASIC_IS_DCE4(rdev)) {
1662 /* setup and enable the encoder */ 1667 /* setup and enable the encoder */
1663 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0); 1668 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0);
1664 /* enable the transmitter */
1665 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
1666 } else { 1669 } else {
1667 /* setup and enable the encoder and transmitter */ 1670 /* setup and enable the encoder and transmitter */
1668 atombios_dig_encoder_setup(encoder, ATOM_ENABLE, 0); 1671 atombios_dig_encoder_setup(encoder, ATOM_ENABLE, 0);
1669 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0); 1672 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0);
1670 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
1671 } 1673 }
1672 if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) { 1674 if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {
1673 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { 1675 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
@@ -1675,68 +1677,56 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
1675 ATOM_TRANSMITTER_ACTION_POWER_ON); 1677 ATOM_TRANSMITTER_ACTION_POWER_ON);
1676 radeon_dig_connector->edp_on = true; 1678 radeon_dig_connector->edp_on = true;
1677 } 1679 }
1680 }
1681 /* enable the transmitter */
1682 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
1683 if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {
1684 /* DP_SET_POWER_D0 is set in radeon_dp_link_train */
1678 radeon_dp_link_train(encoder, connector); 1685 radeon_dp_link_train(encoder, connector);
1679 if (ASIC_IS_DCE4(rdev)) 1686 if (ASIC_IS_DCE4(rdev))
1680 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0); 1687 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0);
1681 } 1688 }
1682 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) 1689 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
1683 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0); 1690 atombios_dig_transmitter_setup(encoder,
1691 ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0);
1692 if (ext_encoder)
1693 atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE);
1684 break; 1694 break;
1685 case DRM_MODE_DPMS_STANDBY: 1695 case DRM_MODE_DPMS_STANDBY:
1686 case DRM_MODE_DPMS_SUSPEND: 1696 case DRM_MODE_DPMS_SUSPEND:
1687 case DRM_MODE_DPMS_OFF: 1697 case DRM_MODE_DPMS_OFF:
1688 if (ASIC_IS_DCE4(rdev)) { 1698 if (ASIC_IS_DCE4(rdev)) {
1699 if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector)
1700 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF, 0);
1701 }
1702 if (ext_encoder)
1703 atombios_external_encoder_setup(encoder, ext_encoder, ATOM_DISABLE);
1704 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
1705 atombios_dig_transmitter_setup(encoder,
1706 ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0);
1707
1708 if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) &&
1709 connector && !travis_quirk)
1710 radeon_dp_set_rx_power_state(connector, DP_SET_POWER_D3);
1711 if (ASIC_IS_DCE4(rdev)) {
1689 /* disable the transmitter */ 1712 /* disable the transmitter */
1690 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); 1713 atombios_dig_transmitter_setup(encoder,
1714 ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
1691 } else { 1715 } else {
1692 /* disable the encoder and transmitter */ 1716 /* disable the encoder and transmitter */
1693 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); 1717 atombios_dig_transmitter_setup(encoder,
1718 ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
1694 atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0); 1719 atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0);
1695 } 1720 }
1696 if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) { 1721 if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {
1697 if (ASIC_IS_DCE4(rdev)) 1722 if (travis_quirk)
1698 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF, 0); 1723 radeon_dp_set_rx_power_state(connector, DP_SET_POWER_D3);
1699 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { 1724 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
1700 atombios_set_edp_panel_power(connector, 1725 atombios_set_edp_panel_power(connector,
1701 ATOM_TRANSMITTER_ACTION_POWER_OFF); 1726 ATOM_TRANSMITTER_ACTION_POWER_OFF);
1702 radeon_dig_connector->edp_on = false; 1727 radeon_dig_connector->edp_on = false;
1703 } 1728 }
1704 } 1729 }
1705 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
1706 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0);
1707 break;
1708 }
1709}
1710
1711static void
1712radeon_atom_encoder_dpms_ext(struct drm_encoder *encoder,
1713 struct drm_encoder *ext_encoder,
1714 int mode)
1715{
1716 struct drm_device *dev = encoder->dev;
1717 struct radeon_device *rdev = dev->dev_private;
1718
1719 switch (mode) {
1720 case DRM_MODE_DPMS_ON:
1721 default:
1722 if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev)) {
1723 atombios_external_encoder_setup(encoder, ext_encoder,
1724 EXTERNAL_ENCODER_ACTION_V3_ENABLE_OUTPUT);
1725 atombios_external_encoder_setup(encoder, ext_encoder,
1726 EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING_OFF);
1727 } else
1728 atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE);
1729 break;
1730 case DRM_MODE_DPMS_STANDBY:
1731 case DRM_MODE_DPMS_SUSPEND:
1732 case DRM_MODE_DPMS_OFF:
1733 if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev)) {
1734 atombios_external_encoder_setup(encoder, ext_encoder,
1735 EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING);
1736 atombios_external_encoder_setup(encoder, ext_encoder,
1737 EXTERNAL_ENCODER_ACTION_V3_DISABLE_OUTPUT);
1738 } else
1739 atombios_external_encoder_setup(encoder, ext_encoder, ATOM_DISABLE);
1740 break; 1730 break;
1741 } 1731 }
1742} 1732}
@@ -1747,7 +1737,6 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
1747 struct drm_device *dev = encoder->dev; 1737 struct drm_device *dev = encoder->dev;
1748 struct radeon_device *rdev = dev->dev_private; 1738 struct radeon_device *rdev = dev->dev_private;
1749 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 1739 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1750 struct drm_encoder *ext_encoder = radeon_get_external_encoder(encoder);
1751 1740
1752 DRM_DEBUG_KMS("encoder dpms %d to mode %d, devices %08x, active_devices %08x\n", 1741 DRM_DEBUG_KMS("encoder dpms %d to mode %d, devices %08x, active_devices %08x\n",
1753 radeon_encoder->encoder_id, mode, radeon_encoder->devices, 1742 radeon_encoder->encoder_id, mode, radeon_encoder->devices,
@@ -1807,9 +1796,6 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
1807 return; 1796 return;
1808 } 1797 }
1809 1798
1810 if (ext_encoder)
1811 radeon_atom_encoder_dpms_ext(encoder, ext_encoder, mode);
1812
1813 radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); 1799 radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
1814 1800
1815} 1801}
diff --git a/drivers/gpu/drm/radeon/btc_dpm.c b/drivers/gpu/drm/radeon/btc_dpm.c
index ea103ccdf4bd..f81d7ca134db 100644
--- a/drivers/gpu/drm/radeon/btc_dpm.c
+++ b/drivers/gpu/drm/radeon/btc_dpm.c
@@ -2601,6 +2601,10 @@ int btc_dpm_init(struct radeon_device *rdev)
2601 pi->min_vddc_in_table = 0; 2601 pi->min_vddc_in_table = 0;
2602 pi->max_vddc_in_table = 0; 2602 pi->max_vddc_in_table = 0;
2603 2603
2604 ret = r600_get_platform_caps(rdev);
2605 if (ret)
2606 return ret;
2607
2604 ret = rv7xx_parse_power_table(rdev); 2608 ret = rv7xx_parse_power_table(rdev);
2605 if (ret) 2609 if (ret)
2606 return ret; 2610 return ret;
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index 8d49104ca6c2..cad89a977527 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -172,6 +172,8 @@ extern void si_trim_voltage_table_to_fit_state_table(struct radeon_device *rdev,
172extern void cik_enter_rlc_safe_mode(struct radeon_device *rdev); 172extern void cik_enter_rlc_safe_mode(struct radeon_device *rdev);
173extern void cik_exit_rlc_safe_mode(struct radeon_device *rdev); 173extern void cik_exit_rlc_safe_mode(struct radeon_device *rdev);
174extern int ci_mc_load_microcode(struct radeon_device *rdev); 174extern int ci_mc_load_microcode(struct radeon_device *rdev);
175extern void cik_update_cg(struct radeon_device *rdev,
176 u32 block, bool enable);
175 177
176static int ci_get_std_voltage_value_sidd(struct radeon_device *rdev, 178static int ci_get_std_voltage_value_sidd(struct radeon_device *rdev,
177 struct atom_voltage_table_entry *voltage_table, 179 struct atom_voltage_table_entry *voltage_table,
@@ -746,6 +748,14 @@ static void ci_apply_state_adjust_rules(struct radeon_device *rdev,
746 u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc; 748 u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
747 int i; 749 int i;
748 750
751 if (rps->vce_active) {
752 rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk;
753 rps->ecclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].ecclk;
754 } else {
755 rps->evclk = 0;
756 rps->ecclk = 0;
757 }
758
749 if ((rdev->pm.dpm.new_active_crtc_count > 1) || 759 if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
750 ci_dpm_vblank_too_short(rdev)) 760 ci_dpm_vblank_too_short(rdev))
751 disable_mclk_switching = true; 761 disable_mclk_switching = true;
@@ -804,6 +814,13 @@ static void ci_apply_state_adjust_rules(struct radeon_device *rdev,
804 sclk = ps->performance_levels[0].sclk; 814 sclk = ps->performance_levels[0].sclk;
805 } 815 }
806 816
817 if (rps->vce_active) {
818 if (sclk < rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].sclk)
819 sclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].sclk;
820 if (mclk < rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].mclk)
821 mclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].mclk;
822 }
823
807 ps->performance_levels[0].sclk = sclk; 824 ps->performance_levels[0].sclk = sclk;
808 ps->performance_levels[0].mclk = mclk; 825 ps->performance_levels[0].mclk = mclk;
809 826
@@ -3468,7 +3485,6 @@ static int ci_enable_uvd_dpm(struct radeon_device *rdev, bool enable)
3468 0 : -EINVAL; 3485 0 : -EINVAL;
3469} 3486}
3470 3487
3471#if 0
3472static int ci_enable_vce_dpm(struct radeon_device *rdev, bool enable) 3488static int ci_enable_vce_dpm(struct radeon_device *rdev, bool enable)
3473{ 3489{
3474 struct ci_power_info *pi = ci_get_pi(rdev); 3490 struct ci_power_info *pi = ci_get_pi(rdev);
@@ -3501,6 +3517,7 @@ static int ci_enable_vce_dpm(struct radeon_device *rdev, bool enable)
3501 0 : -EINVAL; 3517 0 : -EINVAL;
3502} 3518}
3503 3519
3520#if 0
3504static int ci_enable_samu_dpm(struct radeon_device *rdev, bool enable) 3521static int ci_enable_samu_dpm(struct radeon_device *rdev, bool enable)
3505{ 3522{
3506 struct ci_power_info *pi = ci_get_pi(rdev); 3523 struct ci_power_info *pi = ci_get_pi(rdev);
@@ -3587,7 +3604,6 @@ static int ci_update_uvd_dpm(struct radeon_device *rdev, bool gate)
3587 return ci_enable_uvd_dpm(rdev, !gate); 3604 return ci_enable_uvd_dpm(rdev, !gate);
3588} 3605}
3589 3606
3590#if 0
3591static u8 ci_get_vce_boot_level(struct radeon_device *rdev) 3607static u8 ci_get_vce_boot_level(struct radeon_device *rdev)
3592{ 3608{
3593 u8 i; 3609 u8 i;
@@ -3608,15 +3624,15 @@ static int ci_update_vce_dpm(struct radeon_device *rdev,
3608 struct radeon_ps *radeon_current_state) 3624 struct radeon_ps *radeon_current_state)
3609{ 3625{
3610 struct ci_power_info *pi = ci_get_pi(rdev); 3626 struct ci_power_info *pi = ci_get_pi(rdev);
3611 bool new_vce_clock_non_zero = (radeon_new_state->evclk != 0);
3612 bool old_vce_clock_non_zero = (radeon_current_state->evclk != 0);
3613 int ret = 0; 3627 int ret = 0;
3614 u32 tmp; 3628 u32 tmp;
3615 3629
3616 if (new_vce_clock_non_zero != old_vce_clock_non_zero) { 3630 if (radeon_current_state->evclk != radeon_new_state->evclk) {
3617 if (new_vce_clock_non_zero) { 3631 if (radeon_new_state->evclk) {
3618 pi->smc_state_table.VceBootLevel = ci_get_vce_boot_level(rdev); 3632 /* turn the clocks on when encoding */
3633 cik_update_cg(rdev, RADEON_CG_BLOCK_VCE, false);
3619 3634
3635 pi->smc_state_table.VceBootLevel = ci_get_vce_boot_level(rdev);
3620 tmp = RREG32_SMC(DPM_TABLE_475); 3636 tmp = RREG32_SMC(DPM_TABLE_475);
3621 tmp &= ~VceBootLevel_MASK; 3637 tmp &= ~VceBootLevel_MASK;
3622 tmp |= VceBootLevel(pi->smc_state_table.VceBootLevel); 3638 tmp |= VceBootLevel(pi->smc_state_table.VceBootLevel);
@@ -3624,12 +3640,16 @@ static int ci_update_vce_dpm(struct radeon_device *rdev,
3624 3640
3625 ret = ci_enable_vce_dpm(rdev, true); 3641 ret = ci_enable_vce_dpm(rdev, true);
3626 } else { 3642 } else {
3643 /* turn the clocks off when not encoding */
3644 cik_update_cg(rdev, RADEON_CG_BLOCK_VCE, true);
3645
3627 ret = ci_enable_vce_dpm(rdev, false); 3646 ret = ci_enable_vce_dpm(rdev, false);
3628 } 3647 }
3629 } 3648 }
3630 return ret; 3649 return ret;
3631} 3650}
3632 3651
3652#if 0
3633static int ci_update_samu_dpm(struct radeon_device *rdev, bool gate) 3653static int ci_update_samu_dpm(struct radeon_device *rdev, bool gate)
3634{ 3654{
3635 return ci_enable_samu_dpm(rdev, gate); 3655 return ci_enable_samu_dpm(rdev, gate);
@@ -4752,13 +4772,13 @@ int ci_dpm_set_power_state(struct radeon_device *rdev)
4752 DRM_ERROR("ci_generate_dpm_level_enable_mask failed\n"); 4772 DRM_ERROR("ci_generate_dpm_level_enable_mask failed\n");
4753 return ret; 4773 return ret;
4754 } 4774 }
4755#if 0 4775
4756 ret = ci_update_vce_dpm(rdev, new_ps, old_ps); 4776 ret = ci_update_vce_dpm(rdev, new_ps, old_ps);
4757 if (ret) { 4777 if (ret) {
4758 DRM_ERROR("ci_update_vce_dpm failed\n"); 4778 DRM_ERROR("ci_update_vce_dpm failed\n");
4759 return ret; 4779 return ret;
4760 } 4780 }
4761#endif 4781
4762 ret = ci_update_sclk_t(rdev); 4782 ret = ci_update_sclk_t(rdev);
4763 if (ret) { 4783 if (ret) {
4764 DRM_ERROR("ci_update_sclk_t failed\n"); 4784 DRM_ERROR("ci_update_sclk_t failed\n");
@@ -4959,9 +4979,6 @@ static int ci_parse_power_table(struct radeon_device *rdev)
4959 if (!rdev->pm.dpm.ps) 4979 if (!rdev->pm.dpm.ps)
4960 return -ENOMEM; 4980 return -ENOMEM;
4961 power_state_offset = (u8 *)state_array->states; 4981 power_state_offset = (u8 *)state_array->states;
4962 rdev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
4963 rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
4964 rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
4965 for (i = 0; i < state_array->ucNumEntries; i++) { 4982 for (i = 0; i < state_array->ucNumEntries; i++) {
4966 u8 *idx; 4983 u8 *idx;
4967 power_state = (union pplib_power_state *)power_state_offset; 4984 power_state = (union pplib_power_state *)power_state_offset;
@@ -4998,6 +5015,21 @@ static int ci_parse_power_table(struct radeon_device *rdev)
4998 power_state_offset += 2 + power_state->v2.ucNumDPMLevels; 5015 power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
4999 } 5016 }
5000 rdev->pm.dpm.num_ps = state_array->ucNumEntries; 5017 rdev->pm.dpm.num_ps = state_array->ucNumEntries;
5018
5019 /* fill in the vce power states */
5020 for (i = 0; i < RADEON_MAX_VCE_LEVELS; i++) {
5021 u32 sclk, mclk;
5022 clock_array_index = rdev->pm.dpm.vce_states[i].clk_idx;
5023 clock_info = (union pplib_clock_info *)
5024 &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
5025 sclk = le16_to_cpu(clock_info->ci.usEngineClockLow);
5026 sclk |= clock_info->ci.ucEngineClockHigh << 16;
5027 mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow);
5028 mclk |= clock_info->ci.ucMemoryClockHigh << 16;
5029 rdev->pm.dpm.vce_states[i].sclk = sclk;
5030 rdev->pm.dpm.vce_states[i].mclk = mclk;
5031 }
5032
5001 return 0; 5033 return 0;
5002} 5034}
5003 5035
@@ -5077,17 +5109,25 @@ int ci_dpm_init(struct radeon_device *rdev)
5077 ci_dpm_fini(rdev); 5109 ci_dpm_fini(rdev);
5078 return ret; 5110 return ret;
5079 } 5111 }
5080 ret = ci_parse_power_table(rdev); 5112
5113 ret = r600_get_platform_caps(rdev);
5081 if (ret) { 5114 if (ret) {
5082 ci_dpm_fini(rdev); 5115 ci_dpm_fini(rdev);
5083 return ret; 5116 return ret;
5084 } 5117 }
5118
5085 ret = r600_parse_extended_power_table(rdev); 5119 ret = r600_parse_extended_power_table(rdev);
5086 if (ret) { 5120 if (ret) {
5087 ci_dpm_fini(rdev); 5121 ci_dpm_fini(rdev);
5088 return ret; 5122 return ret;
5089 } 5123 }
5090 5124
5125 ret = ci_parse_power_table(rdev);
5126 if (ret) {
5127 ci_dpm_fini(rdev);
5128 return ret;
5129 }
5130
5091 pi->dll_default_on = false; 5131 pi->dll_default_on = false;
5092 pi->sram_end = SMC_RAM_END; 5132 pi->sram_end = SMC_RAM_END;
5093 5133
@@ -5120,6 +5160,7 @@ int ci_dpm_init(struct radeon_device *rdev)
5120 pi->caps_sclk_throttle_low_notification = false; 5160 pi->caps_sclk_throttle_low_notification = false;
5121 5161
5122 pi->caps_uvd_dpm = true; 5162 pi->caps_uvd_dpm = true;
5163 pi->caps_vce_dpm = true;
5123 5164
5124 ci_get_leakage_voltages(rdev); 5165 ci_get_leakage_voltages(rdev);
5125 ci_patch_dependency_tables_with_leakage(rdev); 5166 ci_patch_dependency_tables_with_leakage(rdev);
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index bbb17841a9e5..745143c2358f 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -75,6 +75,7 @@ extern void si_init_uvd_internal_cg(struct radeon_device *rdev);
75extern int cik_sdma_resume(struct radeon_device *rdev); 75extern int cik_sdma_resume(struct radeon_device *rdev);
76extern void cik_sdma_enable(struct radeon_device *rdev, bool enable); 76extern void cik_sdma_enable(struct radeon_device *rdev, bool enable);
77extern void cik_sdma_fini(struct radeon_device *rdev); 77extern void cik_sdma_fini(struct radeon_device *rdev);
78extern void vce_v2_0_enable_mgcg(struct radeon_device *rdev, bool enable);
78static void cik_rlc_stop(struct radeon_device *rdev); 79static void cik_rlc_stop(struct radeon_device *rdev);
79static void cik_pcie_gen3_enable(struct radeon_device *rdev); 80static void cik_pcie_gen3_enable(struct radeon_device *rdev);
80static void cik_program_aspm(struct radeon_device *rdev); 81static void cik_program_aspm(struct radeon_device *rdev);
@@ -1095,7 +1096,7 @@ static const u32 spectre_golden_registers[] =
1095 0x8a14, 0xf000003f, 0x00000007, 1096 0x8a14, 0xf000003f, 0x00000007,
1096 0x8b24, 0xffffffff, 0x00ffffff, 1097 0x8b24, 0xffffffff, 0x00ffffff,
1097 0x28350, 0x3f3f3fff, 0x00000082, 1098 0x28350, 0x3f3f3fff, 0x00000082,
1098 0x28355, 0x0000003f, 0x00000000, 1099 0x28354, 0x0000003f, 0x00000000,
1099 0x3e78, 0x00000001, 0x00000002, 1100 0x3e78, 0x00000001, 0x00000002,
1100 0x913c, 0xffff03df, 0x00000004, 1101 0x913c, 0xffff03df, 0x00000004,
1101 0xc768, 0x00000008, 0x00000008, 1102 0xc768, 0x00000008, 0x00000008,
@@ -2028,6 +2029,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev)
2028 break; 2029 break;
2029 case 5: 2030 case 5:
2030 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 2031 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2032 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2031 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 2033 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2032 break; 2034 break;
2033 case 6: 2035 case 6:
@@ -2048,6 +2050,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev)
2048 break; 2050 break;
2049 case 9: 2051 case 9:
2050 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 2052 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2053 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2051 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING)); 2054 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
2052 break; 2055 break;
2053 case 10: 2056 case 10:
@@ -2070,6 +2073,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev)
2070 break; 2073 break;
2071 case 13: 2074 case 13:
2072 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 2075 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2076 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2073 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING)); 2077 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
2074 break; 2078 break;
2075 case 14: 2079 case 14:
@@ -2092,6 +2096,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev)
2092 break; 2096 break;
2093 case 27: 2097 case 27:
2094 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 2098 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2099 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2095 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING)); 2100 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
2096 break; 2101 break;
2097 case 28: 2102 case 28:
@@ -2246,6 +2251,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev)
2246 break; 2251 break;
2247 case 5: 2252 case 5:
2248 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 2253 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2254 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2249 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 2255 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2250 break; 2256 break;
2251 case 6: 2257 case 6:
@@ -2266,6 +2272,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev)
2266 break; 2272 break;
2267 case 9: 2273 case 9:
2268 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 2274 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2275 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2269 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING)); 2276 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
2270 break; 2277 break;
2271 case 10: 2278 case 10:
@@ -2288,6 +2295,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev)
2288 break; 2295 break;
2289 case 13: 2296 case 13:
2290 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 2297 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2298 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2291 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING)); 2299 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
2292 break; 2300 break;
2293 case 14: 2301 case 14:
@@ -2310,6 +2318,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev)
2310 break; 2318 break;
2311 case 27: 2319 case 27:
2312 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 2320 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2321 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2313 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING)); 2322 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
2314 break; 2323 break;
2315 case 28: 2324 case 28:
@@ -2466,6 +2475,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev)
2466 break; 2475 break;
2467 case 5: 2476 case 5:
2468 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 2477 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2478 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2469 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 2479 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2470 break; 2480 break;
2471 case 6: 2481 case 6:
@@ -2486,6 +2496,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev)
2486 break; 2496 break;
2487 case 9: 2497 case 9:
2488 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 2498 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2499 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2489 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING)); 2500 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
2490 break; 2501 break;
2491 case 10: 2502 case 10:
@@ -2508,6 +2519,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev)
2508 break; 2519 break;
2509 case 13: 2520 case 13:
2510 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 2521 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2522 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2511 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING)); 2523 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
2512 break; 2524 break;
2513 case 14: 2525 case 14:
@@ -2530,6 +2542,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev)
2530 break; 2542 break;
2531 case 27: 2543 case 27:
2532 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 2544 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2545 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2533 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING)); 2546 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
2534 break; 2547 break;
2535 case 28: 2548 case 28:
@@ -2592,6 +2605,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev)
2592 break; 2605 break;
2593 case 5: 2606 case 5:
2594 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 2607 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2608 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2595 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 2609 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2596 break; 2610 break;
2597 case 6: 2611 case 6:
@@ -2612,6 +2626,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev)
2612 break; 2626 break;
2613 case 9: 2627 case 9:
2614 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 2628 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2629 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2615 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING)); 2630 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
2616 break; 2631 break;
2617 case 10: 2632 case 10:
@@ -2634,6 +2649,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev)
2634 break; 2649 break;
2635 case 13: 2650 case 13:
2636 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 2651 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2652 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2637 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING)); 2653 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
2638 break; 2654 break;
2639 case 14: 2655 case 14:
@@ -2656,6 +2672,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev)
2656 break; 2672 break;
2657 case 27: 2673 case 27:
2658 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 2674 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2675 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2659 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING)); 2676 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
2660 break; 2677 break;
2661 case 28: 2678 case 28:
@@ -2812,6 +2829,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev)
2812 break; 2829 break;
2813 case 5: 2830 case 5:
2814 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 2831 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2832 PIPE_CONFIG(ADDR_SURF_P2) |
2815 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 2833 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2816 break; 2834 break;
2817 case 6: 2835 case 6:
@@ -2827,11 +2845,13 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev)
2827 TILE_SPLIT(split_equal_to_row_size)); 2845 TILE_SPLIT(split_equal_to_row_size));
2828 break; 2846 break;
2829 case 8: 2847 case 8:
2830 gb_tile_moden = ARRAY_MODE(ARRAY_LINEAR_ALIGNED); 2848 gb_tile_moden = ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
2849 PIPE_CONFIG(ADDR_SURF_P2);
2831 break; 2850 break;
2832 case 9: 2851 case 9:
2833 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 2852 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2834 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING)); 2853 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2854 PIPE_CONFIG(ADDR_SURF_P2));
2835 break; 2855 break;
2836 case 10: 2856 case 10:
2837 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 2857 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
@@ -2853,6 +2873,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev)
2853 break; 2873 break;
2854 case 13: 2874 case 13:
2855 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 2875 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2876 PIPE_CONFIG(ADDR_SURF_P2) |
2856 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING)); 2877 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
2857 break; 2878 break;
2858 case 14: 2879 case 14:
@@ -2875,7 +2896,8 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev)
2875 break; 2896 break;
2876 case 27: 2897 case 27:
2877 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 2898 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2878 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING)); 2899 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2900 PIPE_CONFIG(ADDR_SURF_P2));
2879 break; 2901 break;
2880 case 28: 2902 case 28:
2881 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) | 2903 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
@@ -4030,8 +4052,6 @@ static int cik_cp_gfx_resume(struct radeon_device *rdev)
4030 WREG32(CP_RB0_BASE, rb_addr); 4052 WREG32(CP_RB0_BASE, rb_addr);
4031 WREG32(CP_RB0_BASE_HI, upper_32_bits(rb_addr)); 4053 WREG32(CP_RB0_BASE_HI, upper_32_bits(rb_addr));
4032 4054
4033 ring->rptr = RREG32(CP_RB0_RPTR);
4034
4035 /* start the ring */ 4055 /* start the ring */
4036 cik_cp_gfx_start(rdev); 4056 cik_cp_gfx_start(rdev);
4037 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = true; 4057 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = true;
@@ -4589,8 +4609,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev)
4589 rdev->ring[idx].wptr = 0; 4609 rdev->ring[idx].wptr = 0;
4590 mqd->queue_state.cp_hqd_pq_wptr = rdev->ring[idx].wptr; 4610 mqd->queue_state.cp_hqd_pq_wptr = rdev->ring[idx].wptr;
4591 WREG32(CP_HQD_PQ_WPTR, mqd->queue_state.cp_hqd_pq_wptr); 4611 WREG32(CP_HQD_PQ_WPTR, mqd->queue_state.cp_hqd_pq_wptr);
4592 rdev->ring[idx].rptr = RREG32(CP_HQD_PQ_RPTR); 4612 mqd->queue_state.cp_hqd_pq_rptr = RREG32(CP_HQD_PQ_RPTR);
4593 mqd->queue_state.cp_hqd_pq_rptr = rdev->ring[idx].rptr;
4594 4613
4595 /* set the vmid for the queue */ 4614 /* set the vmid for the queue */
4596 mqd->queue_state.cp_hqd_vmid = 0; 4615 mqd->queue_state.cp_hqd_vmid = 0;
@@ -5120,11 +5139,9 @@ bool cik_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
5120 if (!(reset_mask & (RADEON_RESET_GFX | 5139 if (!(reset_mask & (RADEON_RESET_GFX |
5121 RADEON_RESET_COMPUTE | 5140 RADEON_RESET_COMPUTE |
5122 RADEON_RESET_CP))) { 5141 RADEON_RESET_CP))) {
5123 radeon_ring_lockup_update(ring); 5142 radeon_ring_lockup_update(rdev, ring);
5124 return false; 5143 return false;
5125 } 5144 }
5126 /* force CP activities */
5127 radeon_ring_force_activity(rdev, ring);
5128 return radeon_ring_test_lockup(rdev, ring); 5145 return radeon_ring_test_lockup(rdev, ring);
5129} 5146}
5130 5147
@@ -6144,6 +6161,10 @@ void cik_update_cg(struct radeon_device *rdev,
6144 cik_enable_hdp_mgcg(rdev, enable); 6161 cik_enable_hdp_mgcg(rdev, enable);
6145 cik_enable_hdp_ls(rdev, enable); 6162 cik_enable_hdp_ls(rdev, enable);
6146 } 6163 }
6164
6165 if (block & RADEON_CG_BLOCK_VCE) {
6166 vce_v2_0_enable_mgcg(rdev, enable);
6167 }
6147} 6168}
6148 6169
6149static void cik_init_cg(struct radeon_device *rdev) 6170static void cik_init_cg(struct radeon_device *rdev)
@@ -6521,8 +6542,8 @@ void cik_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer)
6521 buffer[count++] = cpu_to_le32(0x00000000); 6542 buffer[count++] = cpu_to_le32(0x00000000);
6522 break; 6543 break;
6523 case CHIP_HAWAII: 6544 case CHIP_HAWAII:
6524 buffer[count++] = 0x3a00161a; 6545 buffer[count++] = cpu_to_le32(0x3a00161a);
6525 buffer[count++] = 0x0000002e; 6546 buffer[count++] = cpu_to_le32(0x0000002e);
6526 break; 6547 break;
6527 default: 6548 default:
6528 buffer[count++] = cpu_to_le32(0x00000000); 6549 buffer[count++] = cpu_to_le32(0x00000000);
@@ -7493,6 +7514,20 @@ restart_ih:
7493 /* reset addr and status */ 7514 /* reset addr and status */
7494 WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1); 7515 WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
7495 break; 7516 break;
7517 case 167: /* VCE */
7518 DRM_DEBUG("IH: VCE int: 0x%08x\n", src_data);
7519 switch (src_data) {
7520 case 0:
7521 radeon_fence_process(rdev, TN_RING_TYPE_VCE1_INDEX);
7522 break;
7523 case 1:
7524 radeon_fence_process(rdev, TN_RING_TYPE_VCE2_INDEX);
7525 break;
7526 default:
7527 DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
7528 break;
7529 }
7530 break;
7496 case 176: /* GFX RB CP_INT */ 7531 case 176: /* GFX RB CP_INT */
7497 case 177: /* GFX IB CP_INT */ 7532 case 177: /* GFX IB CP_INT */
7498 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX); 7533 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
@@ -7792,6 +7827,22 @@ static int cik_startup(struct radeon_device *rdev)
7792 if (r) 7827 if (r)
7793 rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0; 7828 rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
7794 7829
7830 r = radeon_vce_resume(rdev);
7831 if (!r) {
7832 r = vce_v2_0_resume(rdev);
7833 if (!r)
7834 r = radeon_fence_driver_start_ring(rdev,
7835 TN_RING_TYPE_VCE1_INDEX);
7836 if (!r)
7837 r = radeon_fence_driver_start_ring(rdev,
7838 TN_RING_TYPE_VCE2_INDEX);
7839 }
7840 if (r) {
7841 dev_err(rdev->dev, "VCE init error (%d).\n", r);
7842 rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_size = 0;
7843 rdev->ring[TN_RING_TYPE_VCE2_INDEX].ring_size = 0;
7844 }
7845
7795 /* Enable IRQ */ 7846 /* Enable IRQ */
7796 if (!rdev->irq.installed) { 7847 if (!rdev->irq.installed) {
7797 r = radeon_irq_kms_init(rdev); 7848 r = radeon_irq_kms_init(rdev);
@@ -7867,6 +7918,23 @@ static int cik_startup(struct radeon_device *rdev)
7867 DRM_ERROR("radeon: failed initializing UVD (%d).\n", r); 7918 DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
7868 } 7919 }
7869 7920
7921 r = -ENOENT;
7922
7923 ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX];
7924 if (ring->ring_size)
7925 r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
7926 VCE_CMD_NO_OP);
7927
7928 ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX];
7929 if (ring->ring_size)
7930 r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
7931 VCE_CMD_NO_OP);
7932
7933 if (!r)
7934 r = vce_v1_0_init(rdev);
7935 else if (r != -ENOENT)
7936 DRM_ERROR("radeon: failed initializing VCE (%d).\n", r);
7937
7870 r = radeon_ib_pool_init(rdev); 7938 r = radeon_ib_pool_init(rdev);
7871 if (r) { 7939 if (r) {
7872 dev_err(rdev->dev, "IB initialization failed (%d).\n", r); 7940 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
@@ -7938,6 +8006,7 @@ int cik_suspend(struct radeon_device *rdev)
7938 cik_sdma_enable(rdev, false); 8006 cik_sdma_enable(rdev, false);
7939 uvd_v1_0_fini(rdev); 8007 uvd_v1_0_fini(rdev);
7940 radeon_uvd_suspend(rdev); 8008 radeon_uvd_suspend(rdev);
8009 radeon_vce_suspend(rdev);
7941 cik_fini_pg(rdev); 8010 cik_fini_pg(rdev);
7942 cik_fini_cg(rdev); 8011 cik_fini_cg(rdev);
7943 cik_irq_suspend(rdev); 8012 cik_irq_suspend(rdev);
@@ -8070,6 +8139,17 @@ int cik_init(struct radeon_device *rdev)
8070 r600_ring_init(rdev, ring, 4096); 8139 r600_ring_init(rdev, ring, 4096);
8071 } 8140 }
8072 8141
8142 r = radeon_vce_init(rdev);
8143 if (!r) {
8144 ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX];
8145 ring->ring_obj = NULL;
8146 r600_ring_init(rdev, ring, 4096);
8147
8148 ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX];
8149 ring->ring_obj = NULL;
8150 r600_ring_init(rdev, ring, 4096);
8151 }
8152
8073 rdev->ih.ring_obj = NULL; 8153 rdev->ih.ring_obj = NULL;
8074 r600_ih_ring_init(rdev, 64 * 1024); 8154 r600_ih_ring_init(rdev, 64 * 1024);
8075 8155
@@ -8131,6 +8211,7 @@ void cik_fini(struct radeon_device *rdev)
8131 radeon_irq_kms_fini(rdev); 8211 radeon_irq_kms_fini(rdev);
8132 uvd_v1_0_fini(rdev); 8212 uvd_v1_0_fini(rdev);
8133 radeon_uvd_fini(rdev); 8213 radeon_uvd_fini(rdev);
8214 radeon_vce_fini(rdev);
8134 cik_pcie_gart_fini(rdev); 8215 cik_pcie_gart_fini(rdev);
8135 r600_vram_scratch_fini(rdev); 8216 r600_vram_scratch_fini(rdev);
8136 radeon_gem_fini(rdev); 8217 radeon_gem_fini(rdev);
@@ -8869,6 +8950,41 @@ int cik_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
8869 return r; 8950 return r;
8870} 8951}
8871 8952
8953int cik_set_vce_clocks(struct radeon_device *rdev, u32 evclk, u32 ecclk)
8954{
8955 int r, i;
8956 struct atom_clock_dividers dividers;
8957 u32 tmp;
8958
8959 r = radeon_atom_get_clock_dividers(rdev, COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
8960 ecclk, false, &dividers);
8961 if (r)
8962 return r;
8963
8964 for (i = 0; i < 100; i++) {
8965 if (RREG32_SMC(CG_ECLK_STATUS) & ECLK_STATUS)
8966 break;
8967 mdelay(10);
8968 }
8969 if (i == 100)
8970 return -ETIMEDOUT;
8971
8972 tmp = RREG32_SMC(CG_ECLK_CNTL);
8973 tmp &= ~(ECLK_DIR_CNTL_EN|ECLK_DIVIDER_MASK);
8974 tmp |= dividers.post_divider;
8975 WREG32_SMC(CG_ECLK_CNTL, tmp);
8976
8977 for (i = 0; i < 100; i++) {
8978 if (RREG32_SMC(CG_ECLK_STATUS) & ECLK_STATUS)
8979 break;
8980 mdelay(10);
8981 }
8982 if (i == 100)
8983 return -ETIMEDOUT;
8984
8985 return 0;
8986}
8987
8872static void cik_pcie_gen3_enable(struct radeon_device *rdev) 8988static void cik_pcie_gen3_enable(struct radeon_device *rdev)
8873{ 8989{
8874 struct pci_dev *root = rdev->pdev->bus->self; 8990 struct pci_dev *root = rdev->pdev->bus->self;
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
index 94626ea90fa5..89b4afa5041c 100644
--- a/drivers/gpu/drm/radeon/cik_sdma.c
+++ b/drivers/gpu/drm/radeon/cik_sdma.c
@@ -369,8 +369,6 @@ static int cik_sdma_gfx_resume(struct radeon_device *rdev)
369 ring->wptr = 0; 369 ring->wptr = 0;
370 WREG32(SDMA0_GFX_RB_WPTR + reg_offset, ring->wptr << 2); 370 WREG32(SDMA0_GFX_RB_WPTR + reg_offset, ring->wptr << 2);
371 371
372 ring->rptr = RREG32(SDMA0_GFX_RB_RPTR + reg_offset) >> 2;
373
374 /* enable DMA RB */ 372 /* enable DMA RB */
375 WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl | SDMA_RB_ENABLE); 373 WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl | SDMA_RB_ENABLE);
376 374
@@ -713,11 +711,9 @@ bool cik_sdma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
713 mask = RADEON_RESET_DMA1; 711 mask = RADEON_RESET_DMA1;
714 712
715 if (!(reset_mask & mask)) { 713 if (!(reset_mask & mask)) {
716 radeon_ring_lockup_update(ring); 714 radeon_ring_lockup_update(rdev, ring);
717 return false; 715 return false;
718 } 716 }
719 /* force ring activities */
720 radeon_ring_force_activity(rdev, ring);
721 return radeon_ring_test_lockup(rdev, ring); 717 return radeon_ring_test_lockup(rdev, ring);
722} 718}
723 719
diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h
index 98bae9d7b74d..213873270d5f 100644
--- a/drivers/gpu/drm/radeon/cikd.h
+++ b/drivers/gpu/drm/radeon/cikd.h
@@ -203,6 +203,12 @@
203#define CTF_TEMP_MASK 0x0003fe00 203#define CTF_TEMP_MASK 0x0003fe00
204#define CTF_TEMP_SHIFT 9 204#define CTF_TEMP_SHIFT 9
205 205
206#define CG_ECLK_CNTL 0xC05000AC
207# define ECLK_DIVIDER_MASK 0x7f
208# define ECLK_DIR_CNTL_EN (1 << 8)
209#define CG_ECLK_STATUS 0xC05000B0
210# define ECLK_STATUS (1 << 0)
211
206#define CG_SPLL_FUNC_CNTL 0xC0500140 212#define CG_SPLL_FUNC_CNTL 0xC0500140
207#define SPLL_RESET (1 << 0) 213#define SPLL_RESET (1 << 0)
208#define SPLL_PWRON (1 << 1) 214#define SPLL_PWRON (1 << 1)
@@ -2010,4 +2016,47 @@
2010/* UVD CTX indirect */ 2016/* UVD CTX indirect */
2011#define UVD_CGC_MEM_CTRL 0xC0 2017#define UVD_CGC_MEM_CTRL 0xC0
2012 2018
2019/* VCE */
2020
2021#define VCE_VCPU_CACHE_OFFSET0 0x20024
2022#define VCE_VCPU_CACHE_SIZE0 0x20028
2023#define VCE_VCPU_CACHE_OFFSET1 0x2002c
2024#define VCE_VCPU_CACHE_SIZE1 0x20030
2025#define VCE_VCPU_CACHE_OFFSET2 0x20034
2026#define VCE_VCPU_CACHE_SIZE2 0x20038
2027#define VCE_RB_RPTR2 0x20178
2028#define VCE_RB_WPTR2 0x2017c
2029#define VCE_RB_RPTR 0x2018c
2030#define VCE_RB_WPTR 0x20190
2031#define VCE_CLOCK_GATING_A 0x202f8
2032# define CGC_CLK_GATE_DLY_TIMER_MASK (0xf << 0)
2033# define CGC_CLK_GATE_DLY_TIMER(x) ((x) << 0)
2034# define CGC_CLK_GATER_OFF_DLY_TIMER_MASK (0xff << 4)
2035# define CGC_CLK_GATER_OFF_DLY_TIMER(x) ((x) << 4)
2036# define CGC_UENC_WAIT_AWAKE (1 << 18)
2037#define VCE_CLOCK_GATING_B 0x202fc
2038#define VCE_CGTT_CLK_OVERRIDE 0x207a0
2039#define VCE_UENC_CLOCK_GATING 0x207bc
2040# define CLOCK_ON_DELAY_MASK (0xf << 0)
2041# define CLOCK_ON_DELAY(x) ((x) << 0)
2042# define CLOCK_OFF_DELAY_MASK (0xff << 4)
2043# define CLOCK_OFF_DELAY(x) ((x) << 4)
2044#define VCE_UENC_REG_CLOCK_GATING 0x207c0
2045#define VCE_SYS_INT_EN 0x21300
2046# define VCE_SYS_INT_TRAP_INTERRUPT_EN (1 << 3)
2047#define VCE_LMI_CTRL2 0x21474
2048#define VCE_LMI_CTRL 0x21498
2049#define VCE_LMI_VM_CTRL 0x214a0
2050#define VCE_LMI_SWAP_CNTL 0x214b4
2051#define VCE_LMI_SWAP_CNTL1 0x214b8
2052#define VCE_LMI_CACHE_CTRL 0x214f4
2053
2054#define VCE_CMD_NO_OP 0x00000000
2055#define VCE_CMD_END 0x00000001
2056#define VCE_CMD_IB 0x00000002
2057#define VCE_CMD_FENCE 0x00000003
2058#define VCE_CMD_TRAP 0x00000004
2059#define VCE_CMD_IB_AUTO 0x00000005
2060#define VCE_CMD_SEMAPHORE 0x00000006
2061
2013#endif 2062#endif
diff --git a/drivers/gpu/drm/radeon/cypress_dpm.c b/drivers/gpu/drm/radeon/cypress_dpm.c
index cf783fc0ef21..5a9a5f4d7888 100644
--- a/drivers/gpu/drm/radeon/cypress_dpm.c
+++ b/drivers/gpu/drm/radeon/cypress_dpm.c
@@ -2036,6 +2036,10 @@ int cypress_dpm_init(struct radeon_device *rdev)
2036 pi->min_vddc_in_table = 0; 2036 pi->min_vddc_in_table = 0;
2037 pi->max_vddc_in_table = 0; 2037 pi->max_vddc_in_table = 0;
2038 2038
2039 ret = r600_get_platform_caps(rdev);
2040 if (ret)
2041 return ret;
2042
2039 ret = rv7xx_parse_power_table(rdev); 2043 ret = rv7xx_parse_power_table(rdev);
2040 if (ret) 2044 if (ret)
2041 return ret; 2045 return ret;
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 27b0ff16082e..b406546440da 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -2990,8 +2990,6 @@ static int evergreen_cp_resume(struct radeon_device *rdev)
2990 WREG32(CP_RB_BASE, ring->gpu_addr >> 8); 2990 WREG32(CP_RB_BASE, ring->gpu_addr >> 8);
2991 WREG32(CP_DEBUG, (1 << 27) | (1 << 28)); 2991 WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
2992 2992
2993 ring->rptr = RREG32(CP_RB_RPTR);
2994
2995 evergreen_cp_start(rdev); 2993 evergreen_cp_start(rdev);
2996 ring->ready = true; 2994 ring->ready = true;
2997 r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring); 2995 r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring);
@@ -3952,11 +3950,9 @@ bool evergreen_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *rin
3952 if (!(reset_mask & (RADEON_RESET_GFX | 3950 if (!(reset_mask & (RADEON_RESET_GFX |
3953 RADEON_RESET_COMPUTE | 3951 RADEON_RESET_COMPUTE |
3954 RADEON_RESET_CP))) { 3952 RADEON_RESET_CP))) {
3955 radeon_ring_lockup_update(ring); 3953 radeon_ring_lockup_update(rdev, ring);
3956 return false; 3954 return false;
3957 } 3955 }
3958 /* force CP activities */
3959 radeon_ring_force_activity(rdev, ring);
3960 return radeon_ring_test_lockup(rdev, ring); 3956 return radeon_ring_test_lockup(rdev, ring);
3961} 3957}
3962 3958
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index c7cac07f139b..5c8b358f9fba 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -1165,7 +1165,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1165 "0x%04X\n", reg); 1165 "0x%04X\n", reg);
1166 return -EINVAL; 1166 return -EINVAL;
1167 } 1167 }
1168 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1168 ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
1169 break; 1169 break;
1170 case DB_DEPTH_CONTROL: 1170 case DB_DEPTH_CONTROL:
1171 track->db_depth_control = radeon_get_ib_value(p, idx); 1171 track->db_depth_control = radeon_get_ib_value(p, idx);
@@ -1196,12 +1196,12 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1196 } 1196 }
1197 ib[idx] &= ~Z_ARRAY_MODE(0xf); 1197 ib[idx] &= ~Z_ARRAY_MODE(0xf);
1198 track->db_z_info &= ~Z_ARRAY_MODE(0xf); 1198 track->db_z_info &= ~Z_ARRAY_MODE(0xf);
1199 ib[idx] |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags)); 1199 ib[idx] |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->tiling_flags));
1200 track->db_z_info |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags)); 1200 track->db_z_info |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->tiling_flags));
1201 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { 1201 if (reloc->tiling_flags & RADEON_TILING_MACRO) {
1202 unsigned bankw, bankh, mtaspect, tile_split; 1202 unsigned bankw, bankh, mtaspect, tile_split;
1203 1203
1204 evergreen_tiling_fields(reloc->lobj.tiling_flags, 1204 evergreen_tiling_fields(reloc->tiling_flags,
1205 &bankw, &bankh, &mtaspect, 1205 &bankw, &bankh, &mtaspect,
1206 &tile_split); 1206 &tile_split);
1207 ib[idx] |= DB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks)); 1207 ib[idx] |= DB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
@@ -1237,7 +1237,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1237 return -EINVAL; 1237 return -EINVAL;
1238 } 1238 }
1239 track->db_z_read_offset = radeon_get_ib_value(p, idx); 1239 track->db_z_read_offset = radeon_get_ib_value(p, idx);
1240 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1240 ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
1241 track->db_z_read_bo = reloc->robj; 1241 track->db_z_read_bo = reloc->robj;
1242 track->db_dirty = true; 1242 track->db_dirty = true;
1243 break; 1243 break;
@@ -1249,7 +1249,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1249 return -EINVAL; 1249 return -EINVAL;
1250 } 1250 }
1251 track->db_z_write_offset = radeon_get_ib_value(p, idx); 1251 track->db_z_write_offset = radeon_get_ib_value(p, idx);
1252 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1252 ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
1253 track->db_z_write_bo = reloc->robj; 1253 track->db_z_write_bo = reloc->robj;
1254 track->db_dirty = true; 1254 track->db_dirty = true;
1255 break; 1255 break;
@@ -1261,7 +1261,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1261 return -EINVAL; 1261 return -EINVAL;
1262 } 1262 }
1263 track->db_s_read_offset = radeon_get_ib_value(p, idx); 1263 track->db_s_read_offset = radeon_get_ib_value(p, idx);
1264 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1264 ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
1265 track->db_s_read_bo = reloc->robj; 1265 track->db_s_read_bo = reloc->robj;
1266 track->db_dirty = true; 1266 track->db_dirty = true;
1267 break; 1267 break;
@@ -1273,7 +1273,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1273 return -EINVAL; 1273 return -EINVAL;
1274 } 1274 }
1275 track->db_s_write_offset = radeon_get_ib_value(p, idx); 1275 track->db_s_write_offset = radeon_get_ib_value(p, idx);
1276 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1276 ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
1277 track->db_s_write_bo = reloc->robj; 1277 track->db_s_write_bo = reloc->robj;
1278 track->db_dirty = true; 1278 track->db_dirty = true;
1279 break; 1279 break;
@@ -1297,7 +1297,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1297 } 1297 }
1298 tmp = (reg - VGT_STRMOUT_BUFFER_BASE_0) / 16; 1298 tmp = (reg - VGT_STRMOUT_BUFFER_BASE_0) / 16;
1299 track->vgt_strmout_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8; 1299 track->vgt_strmout_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8;
1300 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1300 ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
1301 track->vgt_strmout_bo[tmp] = reloc->robj; 1301 track->vgt_strmout_bo[tmp] = reloc->robj;
1302 track->streamout_dirty = true; 1302 track->streamout_dirty = true;
1303 break; 1303 break;
@@ -1317,7 +1317,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1317 "0x%04X\n", reg); 1317 "0x%04X\n", reg);
1318 return -EINVAL; 1318 return -EINVAL;
1319 } 1319 }
1320 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1320 ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
1321 case CB_TARGET_MASK: 1321 case CB_TARGET_MASK:
1322 track->cb_target_mask = radeon_get_ib_value(p, idx); 1322 track->cb_target_mask = radeon_get_ib_value(p, idx);
1323 track->cb_dirty = true; 1323 track->cb_dirty = true;
@@ -1381,8 +1381,8 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1381 "0x%04X\n", reg); 1381 "0x%04X\n", reg);
1382 return -EINVAL; 1382 return -EINVAL;
1383 } 1383 }
1384 ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags)); 1384 ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->tiling_flags));
1385 track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags)); 1385 track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->tiling_flags));
1386 } 1386 }
1387 track->cb_dirty = true; 1387 track->cb_dirty = true;
1388 break; 1388 break;
@@ -1399,8 +1399,8 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1399 "0x%04X\n", reg); 1399 "0x%04X\n", reg);
1400 return -EINVAL; 1400 return -EINVAL;
1401 } 1401 }
1402 ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags)); 1402 ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->tiling_flags));
1403 track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags)); 1403 track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->tiling_flags));
1404 } 1404 }
1405 track->cb_dirty = true; 1405 track->cb_dirty = true;
1406 break; 1406 break;
@@ -1461,10 +1461,10 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1461 return -EINVAL; 1461 return -EINVAL;
1462 } 1462 }
1463 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { 1463 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
1464 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { 1464 if (reloc->tiling_flags & RADEON_TILING_MACRO) {
1465 unsigned bankw, bankh, mtaspect, tile_split; 1465 unsigned bankw, bankh, mtaspect, tile_split;
1466 1466
1467 evergreen_tiling_fields(reloc->lobj.tiling_flags, 1467 evergreen_tiling_fields(reloc->tiling_flags,
1468 &bankw, &bankh, &mtaspect, 1468 &bankw, &bankh, &mtaspect,
1469 &tile_split); 1469 &tile_split);
1470 ib[idx] |= CB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks)); 1470 ib[idx] |= CB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
@@ -1489,10 +1489,10 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1489 return -EINVAL; 1489 return -EINVAL;
1490 } 1490 }
1491 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { 1491 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
1492 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { 1492 if (reloc->tiling_flags & RADEON_TILING_MACRO) {
1493 unsigned bankw, bankh, mtaspect, tile_split; 1493 unsigned bankw, bankh, mtaspect, tile_split;
1494 1494
1495 evergreen_tiling_fields(reloc->lobj.tiling_flags, 1495 evergreen_tiling_fields(reloc->tiling_flags,
1496 &bankw, &bankh, &mtaspect, 1496 &bankw, &bankh, &mtaspect,
1497 &tile_split); 1497 &tile_split);
1498 ib[idx] |= CB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks)); 1498 ib[idx] |= CB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
@@ -1520,7 +1520,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1520 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg); 1520 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
1521 return -EINVAL; 1521 return -EINVAL;
1522 } 1522 }
1523 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1523 ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
1524 track->cb_color_fmask_bo[tmp] = reloc->robj; 1524 track->cb_color_fmask_bo[tmp] = reloc->robj;
1525 break; 1525 break;
1526 case CB_COLOR0_CMASK: 1526 case CB_COLOR0_CMASK:
@@ -1537,7 +1537,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1537 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg); 1537 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
1538 return -EINVAL; 1538 return -EINVAL;
1539 } 1539 }
1540 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1540 ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
1541 track->cb_color_cmask_bo[tmp] = reloc->robj; 1541 track->cb_color_cmask_bo[tmp] = reloc->robj;
1542 break; 1542 break;
1543 case CB_COLOR0_FMASK_SLICE: 1543 case CB_COLOR0_FMASK_SLICE:
@@ -1578,7 +1578,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1578 } 1578 }
1579 tmp = (reg - CB_COLOR0_BASE) / 0x3c; 1579 tmp = (reg - CB_COLOR0_BASE) / 0x3c;
1580 track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx); 1580 track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx);
1581 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1581 ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
1582 track->cb_color_bo[tmp] = reloc->robj; 1582 track->cb_color_bo[tmp] = reloc->robj;
1583 track->cb_dirty = true; 1583 track->cb_dirty = true;
1584 break; 1584 break;
@@ -1594,7 +1594,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1594 } 1594 }
1595 tmp = ((reg - CB_COLOR8_BASE) / 0x1c) + 8; 1595 tmp = ((reg - CB_COLOR8_BASE) / 0x1c) + 8;
1596 track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx); 1596 track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx);
1597 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1597 ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
1598 track->cb_color_bo[tmp] = reloc->robj; 1598 track->cb_color_bo[tmp] = reloc->robj;
1599 track->cb_dirty = true; 1599 track->cb_dirty = true;
1600 break; 1600 break;
@@ -1606,7 +1606,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1606 return -EINVAL; 1606 return -EINVAL;
1607 } 1607 }
1608 track->htile_offset = radeon_get_ib_value(p, idx); 1608 track->htile_offset = radeon_get_ib_value(p, idx);
1609 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1609 ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
1610 track->htile_bo = reloc->robj; 1610 track->htile_bo = reloc->robj;
1611 track->db_dirty = true; 1611 track->db_dirty = true;
1612 break; 1612 break;
@@ -1723,7 +1723,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1723 "0x%04X\n", reg); 1723 "0x%04X\n", reg);
1724 return -EINVAL; 1724 return -EINVAL;
1725 } 1725 }
1726 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1726 ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
1727 break; 1727 break;
1728 case SX_MEMORY_EXPORT_BASE: 1728 case SX_MEMORY_EXPORT_BASE:
1729 if (p->rdev->family >= CHIP_CAYMAN) { 1729 if (p->rdev->family >= CHIP_CAYMAN) {
@@ -1737,7 +1737,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1737 "0x%04X\n", reg); 1737 "0x%04X\n", reg);
1738 return -EINVAL; 1738 return -EINVAL;
1739 } 1739 }
1740 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1740 ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
1741 break; 1741 break;
1742 case CAYMAN_SX_SCATTER_EXPORT_BASE: 1742 case CAYMAN_SX_SCATTER_EXPORT_BASE:
1743 if (p->rdev->family < CHIP_CAYMAN) { 1743 if (p->rdev->family < CHIP_CAYMAN) {
@@ -1751,7 +1751,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1751 "0x%04X\n", reg); 1751 "0x%04X\n", reg);
1752 return -EINVAL; 1752 return -EINVAL;
1753 } 1753 }
1754 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1754 ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
1755 break; 1755 break;
1756 case SX_MISC: 1756 case SX_MISC:
1757 track->sx_misc_kill_all_prims = (radeon_get_ib_value(p, idx) & 0x1) != 0; 1757 track->sx_misc_kill_all_prims = (radeon_get_ib_value(p, idx) & 0x1) != 0;
@@ -1836,7 +1836,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
1836 return -EINVAL; 1836 return -EINVAL;
1837 } 1837 }
1838 1838
1839 offset = reloc->lobj.gpu_offset + 1839 offset = reloc->gpu_offset +
1840 (idx_value & 0xfffffff0) + 1840 (idx_value & 0xfffffff0) +
1841 ((u64)(tmp & 0xff) << 32); 1841 ((u64)(tmp & 0xff) << 32);
1842 1842
@@ -1882,7 +1882,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
1882 return -EINVAL; 1882 return -EINVAL;
1883 } 1883 }
1884 1884
1885 offset = reloc->lobj.gpu_offset + 1885 offset = reloc->gpu_offset +
1886 idx_value + 1886 idx_value +
1887 ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32); 1887 ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
1888 1888
@@ -1909,7 +1909,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
1909 return -EINVAL; 1909 return -EINVAL;
1910 } 1910 }
1911 1911
1912 offset = reloc->lobj.gpu_offset + 1912 offset = reloc->gpu_offset +
1913 idx_value + 1913 idx_value +
1914 ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32); 1914 ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
1915 1915
@@ -1937,7 +1937,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
1937 return -EINVAL; 1937 return -EINVAL;
1938 } 1938 }
1939 1939
1940 offset = reloc->lobj.gpu_offset + 1940 offset = reloc->gpu_offset +
1941 radeon_get_ib_value(p, idx+1) + 1941 radeon_get_ib_value(p, idx+1) +
1942 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32); 1942 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
1943 1943
@@ -2027,7 +2027,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
2027 DRM_ERROR("bad DISPATCH_INDIRECT\n"); 2027 DRM_ERROR("bad DISPATCH_INDIRECT\n");
2028 return -EINVAL; 2028 return -EINVAL;
2029 } 2029 }
2030 ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff); 2030 ib[idx+0] = idx_value + (u32)(reloc->gpu_offset & 0xffffffff);
2031 r = evergreen_cs_track_check(p); 2031 r = evergreen_cs_track_check(p);
2032 if (r) { 2032 if (r) {
2033 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); 2033 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
@@ -2049,7 +2049,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
2049 return -EINVAL; 2049 return -EINVAL;
2050 } 2050 }
2051 2051
2052 offset = reloc->lobj.gpu_offset + 2052 offset = reloc->gpu_offset +
2053 (radeon_get_ib_value(p, idx+1) & 0xfffffffc) + 2053 (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
2054 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32); 2054 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
2055 2055
@@ -2106,7 +2106,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
2106 tmp = radeon_get_ib_value(p, idx) + 2106 tmp = radeon_get_ib_value(p, idx) +
2107 ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32); 2107 ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
2108 2108
2109 offset = reloc->lobj.gpu_offset + tmp; 2109 offset = reloc->gpu_offset + tmp;
2110 2110
2111 if ((tmp + size) > radeon_bo_size(reloc->robj)) { 2111 if ((tmp + size) > radeon_bo_size(reloc->robj)) {
2112 dev_warn(p->dev, "CP DMA src buffer too small (%llu %lu)\n", 2112 dev_warn(p->dev, "CP DMA src buffer too small (%llu %lu)\n",
@@ -2144,7 +2144,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
2144 tmp = radeon_get_ib_value(p, idx+2) + 2144 tmp = radeon_get_ib_value(p, idx+2) +
2145 ((u64)(radeon_get_ib_value(p, idx+3) & 0xff) << 32); 2145 ((u64)(radeon_get_ib_value(p, idx+3) & 0xff) << 32);
2146 2146
2147 offset = reloc->lobj.gpu_offset + tmp; 2147 offset = reloc->gpu_offset + tmp;
2148 2148
2149 if ((tmp + size) > radeon_bo_size(reloc->robj)) { 2149 if ((tmp + size) > radeon_bo_size(reloc->robj)) {
2150 dev_warn(p->dev, "CP DMA dst buffer too small (%llu %lu)\n", 2150 dev_warn(p->dev, "CP DMA dst buffer too small (%llu %lu)\n",
@@ -2174,7 +2174,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
2174 DRM_ERROR("bad SURFACE_SYNC\n"); 2174 DRM_ERROR("bad SURFACE_SYNC\n");
2175 return -EINVAL; 2175 return -EINVAL;
2176 } 2176 }
2177 ib[idx+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 2177 ib[idx+2] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
2178 } 2178 }
2179 break; 2179 break;
2180 case PACKET3_EVENT_WRITE: 2180 case PACKET3_EVENT_WRITE:
@@ -2190,7 +2190,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
2190 DRM_ERROR("bad EVENT_WRITE\n"); 2190 DRM_ERROR("bad EVENT_WRITE\n");
2191 return -EINVAL; 2191 return -EINVAL;
2192 } 2192 }
2193 offset = reloc->lobj.gpu_offset + 2193 offset = reloc->gpu_offset +
2194 (radeon_get_ib_value(p, idx+1) & 0xfffffff8) + 2194 (radeon_get_ib_value(p, idx+1) & 0xfffffff8) +
2195 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32); 2195 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
2196 2196
@@ -2212,7 +2212,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
2212 return -EINVAL; 2212 return -EINVAL;
2213 } 2213 }
2214 2214
2215 offset = reloc->lobj.gpu_offset + 2215 offset = reloc->gpu_offset +
2216 (radeon_get_ib_value(p, idx+1) & 0xfffffffc) + 2216 (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
2217 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32); 2217 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
2218 2218
@@ -2234,7 +2234,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
2234 return -EINVAL; 2234 return -EINVAL;
2235 } 2235 }
2236 2236
2237 offset = reloc->lobj.gpu_offset + 2237 offset = reloc->gpu_offset +
2238 (radeon_get_ib_value(p, idx+1) & 0xfffffffc) + 2238 (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
2239 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32); 2239 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
2240 2240
@@ -2302,11 +2302,11 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
2302 } 2302 }
2303 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { 2303 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
2304 ib[idx+1+(i*8)+1] |= 2304 ib[idx+1+(i*8)+1] |=
2305 TEX_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags)); 2305 TEX_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->tiling_flags));
2306 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { 2306 if (reloc->tiling_flags & RADEON_TILING_MACRO) {
2307 unsigned bankw, bankh, mtaspect, tile_split; 2307 unsigned bankw, bankh, mtaspect, tile_split;
2308 2308
2309 evergreen_tiling_fields(reloc->lobj.tiling_flags, 2309 evergreen_tiling_fields(reloc->tiling_flags,
2310 &bankw, &bankh, &mtaspect, 2310 &bankw, &bankh, &mtaspect,
2311 &tile_split); 2311 &tile_split);
2312 ib[idx+1+(i*8)+6] |= TEX_TILE_SPLIT(tile_split); 2312 ib[idx+1+(i*8)+6] |= TEX_TILE_SPLIT(tile_split);
@@ -2318,7 +2318,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
2318 } 2318 }
2319 } 2319 }
2320 texture = reloc->robj; 2320 texture = reloc->robj;
2321 toffset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 2321 toffset = (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
2322 2322
2323 /* tex mip base */ 2323 /* tex mip base */
2324 tex_dim = ib[idx+1+(i*8)+0] & 0x7; 2324 tex_dim = ib[idx+1+(i*8)+0] & 0x7;
@@ -2337,7 +2337,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
2337 DRM_ERROR("bad SET_RESOURCE (tex)\n"); 2337 DRM_ERROR("bad SET_RESOURCE (tex)\n");
2338 return -EINVAL; 2338 return -EINVAL;
2339 } 2339 }
2340 moffset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 2340 moffset = (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
2341 mipmap = reloc->robj; 2341 mipmap = reloc->robj;
2342 } 2342 }
2343 2343
@@ -2364,7 +2364,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
2364 ib[idx+1+(i*8)+1] = radeon_bo_size(reloc->robj) - offset; 2364 ib[idx+1+(i*8)+1] = radeon_bo_size(reloc->robj) - offset;
2365 } 2365 }
2366 2366
2367 offset64 = reloc->lobj.gpu_offset + offset; 2367 offset64 = reloc->gpu_offset + offset;
2368 ib[idx+1+(i*8)+0] = offset64; 2368 ib[idx+1+(i*8)+0] = offset64;
2369 ib[idx+1+(i*8)+2] = (ib[idx+1+(i*8)+2] & 0xffffff00) | 2369 ib[idx+1+(i*8)+2] = (ib[idx+1+(i*8)+2] & 0xffffff00) |
2370 (upper_32_bits(offset64) & 0xff); 2370 (upper_32_bits(offset64) & 0xff);
@@ -2445,7 +2445,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
2445 offset + 4, radeon_bo_size(reloc->robj)); 2445 offset + 4, radeon_bo_size(reloc->robj));
2446 return -EINVAL; 2446 return -EINVAL;
2447 } 2447 }
2448 offset += reloc->lobj.gpu_offset; 2448 offset += reloc->gpu_offset;
2449 ib[idx+1] = offset; 2449 ib[idx+1] = offset;
2450 ib[idx+2] = upper_32_bits(offset) & 0xff; 2450 ib[idx+2] = upper_32_bits(offset) & 0xff;
2451 } 2451 }
@@ -2464,7 +2464,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
2464 offset + 4, radeon_bo_size(reloc->robj)); 2464 offset + 4, radeon_bo_size(reloc->robj));
2465 return -EINVAL; 2465 return -EINVAL;
2466 } 2466 }
2467 offset += reloc->lobj.gpu_offset; 2467 offset += reloc->gpu_offset;
2468 ib[idx+3] = offset; 2468 ib[idx+3] = offset;
2469 ib[idx+4] = upper_32_bits(offset) & 0xff; 2469 ib[idx+4] = upper_32_bits(offset) & 0xff;
2470 } 2470 }
@@ -2493,7 +2493,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
2493 offset + 8, radeon_bo_size(reloc->robj)); 2493 offset + 8, radeon_bo_size(reloc->robj));
2494 return -EINVAL; 2494 return -EINVAL;
2495 } 2495 }
2496 offset += reloc->lobj.gpu_offset; 2496 offset += reloc->gpu_offset;
2497 ib[idx+0] = offset; 2497 ib[idx+0] = offset;
2498 ib[idx+1] = upper_32_bits(offset) & 0xff; 2498 ib[idx+1] = upper_32_bits(offset) & 0xff;
2499 break; 2499 break;
@@ -2518,7 +2518,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
2518 offset + 4, radeon_bo_size(reloc->robj)); 2518 offset + 4, radeon_bo_size(reloc->robj));
2519 return -EINVAL; 2519 return -EINVAL;
2520 } 2520 }
2521 offset += reloc->lobj.gpu_offset; 2521 offset += reloc->gpu_offset;
2522 ib[idx+1] = offset; 2522 ib[idx+1] = offset;
2523 ib[idx+2] = upper_32_bits(offset) & 0xff; 2523 ib[idx+2] = upper_32_bits(offset) & 0xff;
2524 } else { 2524 } else {
@@ -2542,7 +2542,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
2542 offset + 4, radeon_bo_size(reloc->robj)); 2542 offset + 4, radeon_bo_size(reloc->robj));
2543 return -EINVAL; 2543 return -EINVAL;
2544 } 2544 }
2545 offset += reloc->lobj.gpu_offset; 2545 offset += reloc->gpu_offset;
2546 ib[idx+3] = offset; 2546 ib[idx+3] = offset;
2547 ib[idx+4] = upper_32_bits(offset) & 0xff; 2547 ib[idx+4] = upper_32_bits(offset) & 0xff;
2548 } else { 2548 } else {
@@ -2717,7 +2717,7 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
2717 dst_offset = radeon_get_ib_value(p, idx+1); 2717 dst_offset = radeon_get_ib_value(p, idx+1);
2718 dst_offset <<= 8; 2718 dst_offset <<= 8;
2719 2719
2720 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); 2720 ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8);
2721 p->idx += count + 7; 2721 p->idx += count + 7;
2722 break; 2722 break;
2723 /* linear */ 2723 /* linear */
@@ -2725,8 +2725,8 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
2725 dst_offset = radeon_get_ib_value(p, idx+1); 2725 dst_offset = radeon_get_ib_value(p, idx+1);
2726 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32; 2726 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
2727 2727
2728 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); 2728 ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
2729 ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; 2729 ib[idx+2] += upper_32_bits(dst_reloc->gpu_offset) & 0xff;
2730 p->idx += count + 3; 2730 p->idx += count + 3;
2731 break; 2731 break;
2732 default: 2732 default:
@@ -2768,10 +2768,10 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
2768 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); 2768 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
2769 return -EINVAL; 2769 return -EINVAL;
2770 } 2770 }
2771 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); 2771 ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
2772 ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); 2772 ib[idx+2] += (u32)(src_reloc->gpu_offset & 0xfffffffc);
2773 ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; 2773 ib[idx+3] += upper_32_bits(dst_reloc->gpu_offset) & 0xff;
2774 ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; 2774 ib[idx+4] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
2775 p->idx += 5; 2775 p->idx += 5;
2776 break; 2776 break;
2777 /* Copy L2T/T2L */ 2777 /* Copy L2T/T2L */
@@ -2781,22 +2781,22 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
2781 /* tiled src, linear dst */ 2781 /* tiled src, linear dst */
2782 src_offset = radeon_get_ib_value(p, idx+1); 2782 src_offset = radeon_get_ib_value(p, idx+1);
2783 src_offset <<= 8; 2783 src_offset <<= 8;
2784 ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8); 2784 ib[idx+1] += (u32)(src_reloc->gpu_offset >> 8);
2785 2785
2786 dst_offset = radeon_get_ib_value(p, idx + 7); 2786 dst_offset = radeon_get_ib_value(p, idx + 7);
2787 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32; 2787 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
2788 ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); 2788 ib[idx+7] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
2789 ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; 2789 ib[idx+8] += upper_32_bits(dst_reloc->gpu_offset) & 0xff;
2790 } else { 2790 } else {
2791 /* linear src, tiled dst */ 2791 /* linear src, tiled dst */
2792 src_offset = radeon_get_ib_value(p, idx+7); 2792 src_offset = radeon_get_ib_value(p, idx+7);
2793 src_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32; 2793 src_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
2794 ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); 2794 ib[idx+7] += (u32)(src_reloc->gpu_offset & 0xfffffffc);
2795 ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; 2795 ib[idx+8] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
2796 2796
2797 dst_offset = radeon_get_ib_value(p, idx+1); 2797 dst_offset = radeon_get_ib_value(p, idx+1);
2798 dst_offset <<= 8; 2798 dst_offset <<= 8;
2799 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); 2799 ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8);
2800 } 2800 }
2801 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { 2801 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
2802 dev_warn(p->dev, "DMA L2T, src buffer too small (%llu %lu)\n", 2802 dev_warn(p->dev, "DMA L2T, src buffer too small (%llu %lu)\n",
@@ -2827,10 +2827,10 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
2827 dst_offset + count, radeon_bo_size(dst_reloc->robj)); 2827 dst_offset + count, radeon_bo_size(dst_reloc->robj));
2828 return -EINVAL; 2828 return -EINVAL;
2829 } 2829 }
2830 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xffffffff); 2830 ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xffffffff);
2831 ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xffffffff); 2831 ib[idx+2] += (u32)(src_reloc->gpu_offset & 0xffffffff);
2832 ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; 2832 ib[idx+3] += upper_32_bits(dst_reloc->gpu_offset) & 0xff;
2833 ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; 2833 ib[idx+4] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
2834 p->idx += 5; 2834 p->idx += 5;
2835 break; 2835 break;
2836 /* Copy L2L, partial */ 2836 /* Copy L2L, partial */
@@ -2840,10 +2840,10 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
2840 DRM_ERROR("L2L Partial is cayman only !\n"); 2840 DRM_ERROR("L2L Partial is cayman only !\n");
2841 return -EINVAL; 2841 return -EINVAL;
2842 } 2842 }
2843 ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset & 0xffffffff); 2843 ib[idx+1] += (u32)(src_reloc->gpu_offset & 0xffffffff);
2844 ib[idx+2] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; 2844 ib[idx+2] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
2845 ib[idx+4] += (u32)(dst_reloc->lobj.gpu_offset & 0xffffffff); 2845 ib[idx+4] += (u32)(dst_reloc->gpu_offset & 0xffffffff);
2846 ib[idx+5] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; 2846 ib[idx+5] += upper_32_bits(dst_reloc->gpu_offset) & 0xff;
2847 2847
2848 p->idx += 9; 2848 p->idx += 9;
2849 break; 2849 break;
@@ -2876,12 +2876,12 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
2876 dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj)); 2876 dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
2877 return -EINVAL; 2877 return -EINVAL;
2878 } 2878 }
2879 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); 2879 ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
2880 ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset & 0xfffffffc); 2880 ib[idx+2] += (u32)(dst2_reloc->gpu_offset & 0xfffffffc);
2881 ib[idx+3] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); 2881 ib[idx+3] += (u32)(src_reloc->gpu_offset & 0xfffffffc);
2882 ib[idx+4] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; 2882 ib[idx+4] += upper_32_bits(dst_reloc->gpu_offset) & 0xff;
2883 ib[idx+5] += upper_32_bits(dst2_reloc->lobj.gpu_offset) & 0xff; 2883 ib[idx+5] += upper_32_bits(dst2_reloc->gpu_offset) & 0xff;
2884 ib[idx+6] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; 2884 ib[idx+6] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
2885 p->idx += 7; 2885 p->idx += 7;
2886 break; 2886 break;
2887 /* Copy L2T Frame to Field */ 2887 /* Copy L2T Frame to Field */
@@ -2916,10 +2916,10 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
2916 dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj)); 2916 dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
2917 return -EINVAL; 2917 return -EINVAL;
2918 } 2918 }
2919 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); 2919 ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8);
2920 ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8); 2920 ib[idx+2] += (u32)(dst2_reloc->gpu_offset >> 8);
2921 ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); 2921 ib[idx+8] += (u32)(src_reloc->gpu_offset & 0xfffffffc);
2922 ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; 2922 ib[idx+9] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
2923 p->idx += 10; 2923 p->idx += 10;
2924 break; 2924 break;
2925 /* Copy L2T/T2L, partial */ 2925 /* Copy L2T/T2L, partial */
@@ -2932,16 +2932,16 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
2932 /* detile bit */ 2932 /* detile bit */
2933 if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) { 2933 if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
2934 /* tiled src, linear dst */ 2934 /* tiled src, linear dst */
2935 ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8); 2935 ib[idx+1] += (u32)(src_reloc->gpu_offset >> 8);
2936 2936
2937 ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); 2937 ib[idx+7] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
2938 ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; 2938 ib[idx+8] += upper_32_bits(dst_reloc->gpu_offset) & 0xff;
2939 } else { 2939 } else {
2940 /* linear src, tiled dst */ 2940 /* linear src, tiled dst */
2941 ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); 2941 ib[idx+7] += (u32)(src_reloc->gpu_offset & 0xfffffffc);
2942 ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; 2942 ib[idx+8] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
2943 2943
2944 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); 2944 ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8);
2945 } 2945 }
2946 p->idx += 12; 2946 p->idx += 12;
2947 break; 2947 break;
@@ -2978,10 +2978,10 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
2978 dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj)); 2978 dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
2979 return -EINVAL; 2979 return -EINVAL;
2980 } 2980 }
2981 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); 2981 ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8);
2982 ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8); 2982 ib[idx+2] += (u32)(dst2_reloc->gpu_offset >> 8);
2983 ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); 2983 ib[idx+8] += (u32)(src_reloc->gpu_offset & 0xfffffffc);
2984 ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; 2984 ib[idx+9] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
2985 p->idx += 10; 2985 p->idx += 10;
2986 break; 2986 break;
2987 /* Copy L2T/T2L (tile units) */ 2987 /* Copy L2T/T2L (tile units) */
@@ -2992,22 +2992,22 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
2992 /* tiled src, linear dst */ 2992 /* tiled src, linear dst */
2993 src_offset = radeon_get_ib_value(p, idx+1); 2993 src_offset = radeon_get_ib_value(p, idx+1);
2994 src_offset <<= 8; 2994 src_offset <<= 8;
2995 ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8); 2995 ib[idx+1] += (u32)(src_reloc->gpu_offset >> 8);
2996 2996
2997 dst_offset = radeon_get_ib_value(p, idx+7); 2997 dst_offset = radeon_get_ib_value(p, idx+7);
2998 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32; 2998 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
2999 ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); 2999 ib[idx+7] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
3000 ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; 3000 ib[idx+8] += upper_32_bits(dst_reloc->gpu_offset) & 0xff;
3001 } else { 3001 } else {
3002 /* linear src, tiled dst */ 3002 /* linear src, tiled dst */
3003 src_offset = radeon_get_ib_value(p, idx+7); 3003 src_offset = radeon_get_ib_value(p, idx+7);
3004 src_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32; 3004 src_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
3005 ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); 3005 ib[idx+7] += (u32)(src_reloc->gpu_offset & 0xfffffffc);
3006 ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; 3006 ib[idx+8] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
3007 3007
3008 dst_offset = radeon_get_ib_value(p, idx+1); 3008 dst_offset = radeon_get_ib_value(p, idx+1);
3009 dst_offset <<= 8; 3009 dst_offset <<= 8;
3010 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); 3010 ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8);
3011 } 3011 }
3012 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { 3012 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
3013 dev_warn(p->dev, "DMA L2T, T2L src buffer too small (%llu %lu)\n", 3013 dev_warn(p->dev, "DMA L2T, T2L src buffer too small (%llu %lu)\n",
@@ -3028,8 +3028,8 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
3028 DRM_ERROR("L2T, T2L Partial is cayman only !\n"); 3028 DRM_ERROR("L2T, T2L Partial is cayman only !\n");
3029 return -EINVAL; 3029 return -EINVAL;
3030 } 3030 }
3031 ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8); 3031 ib[idx+1] += (u32)(src_reloc->gpu_offset >> 8);
3032 ib[idx+4] += (u32)(dst_reloc->lobj.gpu_offset >> 8); 3032 ib[idx+4] += (u32)(dst_reloc->gpu_offset >> 8);
3033 p->idx += 13; 3033 p->idx += 13;
3034 break; 3034 break;
3035 /* Copy L2T broadcast (tile units) */ 3035 /* Copy L2T broadcast (tile units) */
@@ -3065,10 +3065,10 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
3065 dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj)); 3065 dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
3066 return -EINVAL; 3066 return -EINVAL;
3067 } 3067 }
3068 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); 3068 ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8);
3069 ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8); 3069 ib[idx+2] += (u32)(dst2_reloc->gpu_offset >> 8);
3070 ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); 3070 ib[idx+8] += (u32)(src_reloc->gpu_offset & 0xfffffffc);
3071 ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; 3071 ib[idx+9] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
3072 p->idx += 10; 3072 p->idx += 10;
3073 break; 3073 break;
3074 default: 3074 default:
@@ -3089,8 +3089,8 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
3089 dst_offset, radeon_bo_size(dst_reloc->robj)); 3089 dst_offset, radeon_bo_size(dst_reloc->robj));
3090 return -EINVAL; 3090 return -EINVAL;
3091 } 3091 }
3092 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); 3092 ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
3093 ib[idx+3] += (upper_32_bits(dst_reloc->lobj.gpu_offset) << 16) & 0x00ff0000; 3093 ib[idx+3] += (upper_32_bits(dst_reloc->gpu_offset) << 16) & 0x00ff0000;
3094 p->idx += 4; 3094 p->idx += 4;
3095 break; 3095 break;
3096 case DMA_PACKET_NOP: 3096 case DMA_PACKET_NOP:
diff --git a/drivers/gpu/drm/radeon/evergreen_dma.c b/drivers/gpu/drm/radeon/evergreen_dma.c
index a37b54436382..287fe966d7de 100644
--- a/drivers/gpu/drm/radeon/evergreen_dma.c
+++ b/drivers/gpu/drm/radeon/evergreen_dma.c
@@ -174,11 +174,9 @@ bool evergreen_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *rin
174 u32 reset_mask = evergreen_gpu_check_soft_reset(rdev); 174 u32 reset_mask = evergreen_gpu_check_soft_reset(rdev);
175 175
176 if (!(reset_mask & RADEON_RESET_DMA)) { 176 if (!(reset_mask & RADEON_RESET_DMA)) {
177 radeon_ring_lockup_update(ring); 177 radeon_ring_lockup_update(rdev, ring);
178 return false; 178 return false;
179 } 179 }
180 /* force ring activities */
181 radeon_ring_force_activity(rdev, ring);
182 return radeon_ring_test_lockup(rdev, ring); 180 return radeon_ring_test_lockup(rdev, ring);
183} 181}
184 182
diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c
index 351db361239d..16ec9d56a234 100644
--- a/drivers/gpu/drm/radeon/kv_dpm.c
+++ b/drivers/gpu/drm/radeon/kv_dpm.c
@@ -1338,13 +1338,11 @@ static int kv_enable_uvd_dpm(struct radeon_device *rdev, bool enable)
1338 PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable); 1338 PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable);
1339} 1339}
1340 1340
1341#if 0
1342static int kv_enable_vce_dpm(struct radeon_device *rdev, bool enable) 1341static int kv_enable_vce_dpm(struct radeon_device *rdev, bool enable)
1343{ 1342{
1344 return kv_notify_message_to_smu(rdev, enable ? 1343 return kv_notify_message_to_smu(rdev, enable ?
1345 PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable); 1344 PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable);
1346} 1345}
1347#endif
1348 1346
1349static int kv_enable_samu_dpm(struct radeon_device *rdev, bool enable) 1347static int kv_enable_samu_dpm(struct radeon_device *rdev, bool enable)
1350{ 1348{
@@ -1389,7 +1387,6 @@ static int kv_update_uvd_dpm(struct radeon_device *rdev, bool gate)
1389 return kv_enable_uvd_dpm(rdev, !gate); 1387 return kv_enable_uvd_dpm(rdev, !gate);
1390} 1388}
1391 1389
1392#if 0
1393static u8 kv_get_vce_boot_level(struct radeon_device *rdev) 1390static u8 kv_get_vce_boot_level(struct radeon_device *rdev)
1394{ 1391{
1395 u8 i; 1392 u8 i;
@@ -1414,6 +1411,9 @@ static int kv_update_vce_dpm(struct radeon_device *rdev,
1414 int ret; 1411 int ret;
1415 1412
1416 if (radeon_new_state->evclk > 0 && radeon_current_state->evclk == 0) { 1413 if (radeon_new_state->evclk > 0 && radeon_current_state->evclk == 0) {
1414 kv_dpm_powergate_vce(rdev, false);
1415 /* turn the clocks on when encoding */
1416 cik_update_cg(rdev, RADEON_CG_BLOCK_VCE, false);
1417 if (pi->caps_stable_p_state) 1417 if (pi->caps_stable_p_state)
1418 pi->vce_boot_level = table->count - 1; 1418 pi->vce_boot_level = table->count - 1;
1419 else 1419 else
@@ -1436,11 +1436,13 @@ static int kv_update_vce_dpm(struct radeon_device *rdev,
1436 kv_enable_vce_dpm(rdev, true); 1436 kv_enable_vce_dpm(rdev, true);
1437 } else if (radeon_new_state->evclk == 0 && radeon_current_state->evclk > 0) { 1437 } else if (radeon_new_state->evclk == 0 && radeon_current_state->evclk > 0) {
1438 kv_enable_vce_dpm(rdev, false); 1438 kv_enable_vce_dpm(rdev, false);
1439 /* turn the clocks off when not encoding */
1440 cik_update_cg(rdev, RADEON_CG_BLOCK_VCE, true);
1441 kv_dpm_powergate_vce(rdev, true);
1439 } 1442 }
1440 1443
1441 return 0; 1444 return 0;
1442} 1445}
1443#endif
1444 1446
1445static int kv_update_samu_dpm(struct radeon_device *rdev, bool gate) 1447static int kv_update_samu_dpm(struct radeon_device *rdev, bool gate)
1446{ 1448{
@@ -1575,11 +1577,16 @@ static void kv_dpm_powergate_vce(struct radeon_device *rdev, bool gate)
1575 pi->vce_power_gated = gate; 1577 pi->vce_power_gated = gate;
1576 1578
1577 if (gate) { 1579 if (gate) {
1578 if (pi->caps_vce_pg) 1580 if (pi->caps_vce_pg) {
1581 /* XXX do we need a vce_v1_0_stop() ? */
1579 kv_notify_message_to_smu(rdev, PPSMC_MSG_VCEPowerOFF); 1582 kv_notify_message_to_smu(rdev, PPSMC_MSG_VCEPowerOFF);
1583 }
1580 } else { 1584 } else {
1581 if (pi->caps_vce_pg) 1585 if (pi->caps_vce_pg) {
1582 kv_notify_message_to_smu(rdev, PPSMC_MSG_VCEPowerON); 1586 kv_notify_message_to_smu(rdev, PPSMC_MSG_VCEPowerON);
1587 vce_v2_0_resume(rdev);
1588 vce_v1_0_start(rdev);
1589 }
1583 } 1590 }
1584} 1591}
1585 1592
@@ -1768,7 +1775,7 @@ int kv_dpm_set_power_state(struct radeon_device *rdev)
1768{ 1775{
1769 struct kv_power_info *pi = kv_get_pi(rdev); 1776 struct kv_power_info *pi = kv_get_pi(rdev);
1770 struct radeon_ps *new_ps = &pi->requested_rps; 1777 struct radeon_ps *new_ps = &pi->requested_rps;
1771 /*struct radeon_ps *old_ps = &pi->current_rps;*/ 1778 struct radeon_ps *old_ps = &pi->current_rps;
1772 int ret; 1779 int ret;
1773 1780
1774 if (pi->bapm_enable) { 1781 if (pi->bapm_enable) {
@@ -1798,13 +1805,12 @@ int kv_dpm_set_power_state(struct radeon_device *rdev)
1798 kv_set_enabled_levels(rdev); 1805 kv_set_enabled_levels(rdev);
1799 kv_force_lowest_valid(rdev); 1806 kv_force_lowest_valid(rdev);
1800 kv_unforce_levels(rdev); 1807 kv_unforce_levels(rdev);
1801#if 0 1808
1802 ret = kv_update_vce_dpm(rdev, new_ps, old_ps); 1809 ret = kv_update_vce_dpm(rdev, new_ps, old_ps);
1803 if (ret) { 1810 if (ret) {
1804 DRM_ERROR("kv_update_vce_dpm failed\n"); 1811 DRM_ERROR("kv_update_vce_dpm failed\n");
1805 return ret; 1812 return ret;
1806 } 1813 }
1807#endif
1808 kv_update_sclk_t(rdev); 1814 kv_update_sclk_t(rdev);
1809 } 1815 }
1810 } else { 1816 } else {
@@ -1823,13 +1829,11 @@ int kv_dpm_set_power_state(struct radeon_device *rdev)
1823 kv_program_nbps_index_settings(rdev, new_ps); 1829 kv_program_nbps_index_settings(rdev, new_ps);
1824 kv_freeze_sclk_dpm(rdev, false); 1830 kv_freeze_sclk_dpm(rdev, false);
1825 kv_set_enabled_levels(rdev); 1831 kv_set_enabled_levels(rdev);
1826#if 0
1827 ret = kv_update_vce_dpm(rdev, new_ps, old_ps); 1832 ret = kv_update_vce_dpm(rdev, new_ps, old_ps);
1828 if (ret) { 1833 if (ret) {
1829 DRM_ERROR("kv_update_vce_dpm failed\n"); 1834 DRM_ERROR("kv_update_vce_dpm failed\n");
1830 return ret; 1835 return ret;
1831 } 1836 }
1832#endif
1833 kv_update_acp_boot_level(rdev); 1837 kv_update_acp_boot_level(rdev);
1834 kv_update_sclk_t(rdev); 1838 kv_update_sclk_t(rdev);
1835 kv_enable_nb_dpm(rdev); 1839 kv_enable_nb_dpm(rdev);
@@ -2037,6 +2041,14 @@ static void kv_apply_state_adjust_rules(struct radeon_device *rdev,
2037 struct radeon_clock_and_voltage_limits *max_limits = 2041 struct radeon_clock_and_voltage_limits *max_limits =
2038 &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac; 2042 &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
2039 2043
2044 if (new_rps->vce_active) {
2045 new_rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk;
2046 new_rps->ecclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].ecclk;
2047 } else {
2048 new_rps->evclk = 0;
2049 new_rps->ecclk = 0;
2050 }
2051
2040 mclk = max_limits->mclk; 2052 mclk = max_limits->mclk;
2041 sclk = min_sclk; 2053 sclk = min_sclk;
2042 2054
@@ -2056,6 +2068,11 @@ static void kv_apply_state_adjust_rules(struct radeon_device *rdev,
2056 sclk = stable_p_state_sclk; 2068 sclk = stable_p_state_sclk;
2057 } 2069 }
2058 2070
2071 if (new_rps->vce_active) {
2072 if (sclk < rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].sclk)
2073 sclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].sclk;
2074 }
2075
2059 ps->need_dfs_bypass = true; 2076 ps->need_dfs_bypass = true;
2060 2077
2061 for (i = 0; i < ps->num_levels; i++) { 2078 for (i = 0; i < ps->num_levels; i++) {
@@ -2092,7 +2109,8 @@ static void kv_apply_state_adjust_rules(struct radeon_device *rdev,
2092 } 2109 }
2093 } 2110 }
2094 2111
2095 pi->video_start = new_rps->dclk || new_rps->vclk; 2112 pi->video_start = new_rps->dclk || new_rps->vclk ||
2113 new_rps->evclk || new_rps->ecclk;
2096 2114
2097 if ((new_rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 2115 if ((new_rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) ==
2098 ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) 2116 ATOM_PPLIB_CLASSIFICATION_UI_BATTERY)
@@ -2538,9 +2556,6 @@ static int kv_parse_power_table(struct radeon_device *rdev)
2538 if (!rdev->pm.dpm.ps) 2556 if (!rdev->pm.dpm.ps)
2539 return -ENOMEM; 2557 return -ENOMEM;
2540 power_state_offset = (u8 *)state_array->states; 2558 power_state_offset = (u8 *)state_array->states;
2541 rdev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
2542 rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
2543 rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
2544 for (i = 0; i < state_array->ucNumEntries; i++) { 2559 for (i = 0; i < state_array->ucNumEntries; i++) {
2545 u8 *idx; 2560 u8 *idx;
2546 power_state = (union pplib_power_state *)power_state_offset; 2561 power_state = (union pplib_power_state *)power_state_offset;
@@ -2577,6 +2592,19 @@ static int kv_parse_power_table(struct radeon_device *rdev)
2577 power_state_offset += 2 + power_state->v2.ucNumDPMLevels; 2592 power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
2578 } 2593 }
2579 rdev->pm.dpm.num_ps = state_array->ucNumEntries; 2594 rdev->pm.dpm.num_ps = state_array->ucNumEntries;
2595
2596 /* fill in the vce power states */
2597 for (i = 0; i < RADEON_MAX_VCE_LEVELS; i++) {
2598 u32 sclk;
2599 clock_array_index = rdev->pm.dpm.vce_states[i].clk_idx;
2600 clock_info = (union pplib_clock_info *)
2601 &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
2602 sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow);
2603 sclk |= clock_info->sumo.ucEngineClockHigh << 16;
2604 rdev->pm.dpm.vce_states[i].sclk = sclk;
2605 rdev->pm.dpm.vce_states[i].mclk = 0;
2606 }
2607
2580 return 0; 2608 return 0;
2581} 2609}
2582 2610
@@ -2590,6 +2618,10 @@ int kv_dpm_init(struct radeon_device *rdev)
2590 return -ENOMEM; 2618 return -ENOMEM;
2591 rdev->pm.dpm.priv = pi; 2619 rdev->pm.dpm.priv = pi;
2592 2620
2621 ret = r600_get_platform_caps(rdev);
2622 if (ret)
2623 return ret;
2624
2593 ret = r600_parse_extended_power_table(rdev); 2625 ret = r600_parse_extended_power_table(rdev);
2594 if (ret) 2626 if (ret)
2595 return ret; 2627 return ret;
@@ -2623,7 +2655,7 @@ int kv_dpm_init(struct radeon_device *rdev)
2623 pi->caps_fps = false; /* true? */ 2655 pi->caps_fps = false; /* true? */
2624 pi->caps_uvd_pg = true; 2656 pi->caps_uvd_pg = true;
2625 pi->caps_uvd_dpm = true; 2657 pi->caps_uvd_dpm = true;
2626 pi->caps_vce_pg = false; 2658 pi->caps_vce_pg = false; /* XXX true */
2627 pi->caps_samu_pg = false; 2659 pi->caps_samu_pg = false;
2628 pi->caps_acp_pg = false; 2660 pi->caps_acp_pg = false;
2629 pi->caps_stable_p_state = false; 2661 pi->caps_stable_p_state = false;
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index bf6300cfd62d..d246e043421a 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -1642,8 +1642,8 @@ static int cayman_cp_resume(struct radeon_device *rdev)
1642 ring = &rdev->ring[ridx[i]]; 1642 ring = &rdev->ring[ridx[i]];
1643 WREG32_P(cp_rb_cntl[i], RB_RPTR_WR_ENA, ~RB_RPTR_WR_ENA); 1643 WREG32_P(cp_rb_cntl[i], RB_RPTR_WR_ENA, ~RB_RPTR_WR_ENA);
1644 1644
1645 ring->rptr = ring->wptr = 0; 1645 ring->wptr = 0;
1646 WREG32(cp_rb_rptr[i], ring->rptr); 1646 WREG32(cp_rb_rptr[i], 0);
1647 WREG32(cp_rb_wptr[i], ring->wptr); 1647 WREG32(cp_rb_wptr[i], ring->wptr);
1648 1648
1649 mdelay(1); 1649 mdelay(1);
@@ -1917,11 +1917,9 @@ bool cayman_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
1917 if (!(reset_mask & (RADEON_RESET_GFX | 1917 if (!(reset_mask & (RADEON_RESET_GFX |
1918 RADEON_RESET_COMPUTE | 1918 RADEON_RESET_COMPUTE |
1919 RADEON_RESET_CP))) { 1919 RADEON_RESET_CP))) {
1920 radeon_ring_lockup_update(ring); 1920 radeon_ring_lockup_update(rdev, ring);
1921 return false; 1921 return false;
1922 } 1922 }
1923 /* force CP activities */
1924 radeon_ring_force_activity(rdev, ring);
1925 return radeon_ring_test_lockup(rdev, ring); 1923 return radeon_ring_test_lockup(rdev, ring);
1926} 1924}
1927 1925
diff --git a/drivers/gpu/drm/radeon/ni_dma.c b/drivers/gpu/drm/radeon/ni_dma.c
index 7cf96b15377f..6378e0276691 100644
--- a/drivers/gpu/drm/radeon/ni_dma.c
+++ b/drivers/gpu/drm/radeon/ni_dma.c
@@ -248,8 +248,6 @@ int cayman_dma_resume(struct radeon_device *rdev)
248 ring->wptr = 0; 248 ring->wptr = 0;
249 WREG32(DMA_RB_WPTR + reg_offset, ring->wptr << 2); 249 WREG32(DMA_RB_WPTR + reg_offset, ring->wptr << 2);
250 250
251 ring->rptr = RREG32(DMA_RB_RPTR + reg_offset) >> 2;
252
253 WREG32(DMA_RB_CNTL + reg_offset, rb_cntl | DMA_RB_ENABLE); 251 WREG32(DMA_RB_CNTL + reg_offset, rb_cntl | DMA_RB_ENABLE);
254 252
255 ring->ready = true; 253 ring->ready = true;
@@ -302,11 +300,9 @@ bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
302 mask = RADEON_RESET_DMA1; 300 mask = RADEON_RESET_DMA1;
303 301
304 if (!(reset_mask & mask)) { 302 if (!(reset_mask & mask)) {
305 radeon_ring_lockup_update(ring); 303 radeon_ring_lockup_update(rdev, ring);
306 return false; 304 return false;
307 } 305 }
308 /* force ring activities */
309 radeon_ring_force_activity(rdev, ring);
310 return radeon_ring_test_lockup(rdev, ring); 306 return radeon_ring_test_lockup(rdev, ring);
311} 307}
312 308
diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c
index ca814276b075..004c931606c4 100644
--- a/drivers/gpu/drm/radeon/ni_dpm.c
+++ b/drivers/gpu/drm/radeon/ni_dpm.c
@@ -4025,9 +4025,6 @@ static int ni_parse_power_table(struct radeon_device *rdev)
4025 power_info->pplib.ucNumStates, GFP_KERNEL); 4025 power_info->pplib.ucNumStates, GFP_KERNEL);
4026 if (!rdev->pm.dpm.ps) 4026 if (!rdev->pm.dpm.ps)
4027 return -ENOMEM; 4027 return -ENOMEM;
4028 rdev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
4029 rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
4030 rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
4031 4028
4032 for (i = 0; i < power_info->pplib.ucNumStates; i++) { 4029 for (i = 0; i < power_info->pplib.ucNumStates; i++) {
4033 power_state = (union pplib_power_state *) 4030 power_state = (union pplib_power_state *)
@@ -4089,6 +4086,10 @@ int ni_dpm_init(struct radeon_device *rdev)
4089 pi->min_vddc_in_table = 0; 4086 pi->min_vddc_in_table = 0;
4090 pi->max_vddc_in_table = 0; 4087 pi->max_vddc_in_table = 0;
4091 4088
4089 ret = r600_get_platform_caps(rdev);
4090 if (ret)
4091 return ret;
4092
4092 ret = ni_parse_power_table(rdev); 4093 ret = ni_parse_power_table(rdev);
4093 if (ret) 4094 if (ret)
4094 return ret; 4095 return ret;
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 3cc78bb66042..b6c32640df20 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -1193,7 +1193,6 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
1193 1193
1194 WREG32(RADEON_CP_RB_CNTL, tmp); 1194 WREG32(RADEON_CP_RB_CNTL, tmp);
1195 udelay(10); 1195 udelay(10);
1196 ring->rptr = RREG32(RADEON_CP_RB_RPTR);
1197 /* Set cp mode to bus mastering & enable cp*/ 1196 /* Set cp mode to bus mastering & enable cp*/
1198 WREG32(RADEON_CP_CSQ_MODE, 1197 WREG32(RADEON_CP_CSQ_MODE,
1199 REG_SET(RADEON_INDIRECT2_START, indirect2_start) | 1198 REG_SET(RADEON_INDIRECT2_START, indirect2_start) |
@@ -1275,12 +1274,12 @@ int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
1275 1274
1276 value = radeon_get_ib_value(p, idx); 1275 value = radeon_get_ib_value(p, idx);
1277 tmp = value & 0x003fffff; 1276 tmp = value & 0x003fffff;
1278 tmp += (((u32)reloc->lobj.gpu_offset) >> 10); 1277 tmp += (((u32)reloc->gpu_offset) >> 10);
1279 1278
1280 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { 1279 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
1281 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) 1280 if (reloc->tiling_flags & RADEON_TILING_MACRO)
1282 tile_flags |= RADEON_DST_TILE_MACRO; 1281 tile_flags |= RADEON_DST_TILE_MACRO;
1283 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) { 1282 if (reloc->tiling_flags & RADEON_TILING_MICRO) {
1284 if (reg == RADEON_SRC_PITCH_OFFSET) { 1283 if (reg == RADEON_SRC_PITCH_OFFSET) {
1285 DRM_ERROR("Cannot src blit from microtiled surface\n"); 1284 DRM_ERROR("Cannot src blit from microtiled surface\n");
1286 radeon_cs_dump_packet(p, pkt); 1285 radeon_cs_dump_packet(p, pkt);
@@ -1326,7 +1325,7 @@ int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
1326 return r; 1325 return r;
1327 } 1326 }
1328 idx_value = radeon_get_ib_value(p, idx); 1327 idx_value = radeon_get_ib_value(p, idx);
1329 ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset); 1328 ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->gpu_offset);
1330 1329
1331 track->arrays[i + 0].esize = idx_value >> 8; 1330 track->arrays[i + 0].esize = idx_value >> 8;
1332 track->arrays[i + 0].robj = reloc->robj; 1331 track->arrays[i + 0].robj = reloc->robj;
@@ -1338,7 +1337,7 @@ int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
1338 radeon_cs_dump_packet(p, pkt); 1337 radeon_cs_dump_packet(p, pkt);
1339 return r; 1338 return r;
1340 } 1339 }
1341 ib[idx+2] = radeon_get_ib_value(p, idx + 2) + ((u32)reloc->lobj.gpu_offset); 1340 ib[idx+2] = radeon_get_ib_value(p, idx + 2) + ((u32)reloc->gpu_offset);
1342 track->arrays[i + 1].robj = reloc->robj; 1341 track->arrays[i + 1].robj = reloc->robj;
1343 track->arrays[i + 1].esize = idx_value >> 24; 1342 track->arrays[i + 1].esize = idx_value >> 24;
1344 track->arrays[i + 1].esize &= 0x7F; 1343 track->arrays[i + 1].esize &= 0x7F;
@@ -1352,7 +1351,7 @@ int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
1352 return r; 1351 return r;
1353 } 1352 }
1354 idx_value = radeon_get_ib_value(p, idx); 1353 idx_value = radeon_get_ib_value(p, idx);
1355 ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset); 1354 ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->gpu_offset);
1356 track->arrays[i + 0].robj = reloc->robj; 1355 track->arrays[i + 0].robj = reloc->robj;
1357 track->arrays[i + 0].esize = idx_value >> 8; 1356 track->arrays[i + 0].esize = idx_value >> 8;
1358 track->arrays[i + 0].esize &= 0x7F; 1357 track->arrays[i + 0].esize &= 0x7F;
@@ -1595,7 +1594,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1595 track->zb.robj = reloc->robj; 1594 track->zb.robj = reloc->robj;
1596 track->zb.offset = idx_value; 1595 track->zb.offset = idx_value;
1597 track->zb_dirty = true; 1596 track->zb_dirty = true;
1598 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 1597 ib[idx] = idx_value + ((u32)reloc->gpu_offset);
1599 break; 1598 break;
1600 case RADEON_RB3D_COLOROFFSET: 1599 case RADEON_RB3D_COLOROFFSET:
1601 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1600 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
@@ -1608,7 +1607,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1608 track->cb[0].robj = reloc->robj; 1607 track->cb[0].robj = reloc->robj;
1609 track->cb[0].offset = idx_value; 1608 track->cb[0].offset = idx_value;
1610 track->cb_dirty = true; 1609 track->cb_dirty = true;
1611 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 1610 ib[idx] = idx_value + ((u32)reloc->gpu_offset);
1612 break; 1611 break;
1613 case RADEON_PP_TXOFFSET_0: 1612 case RADEON_PP_TXOFFSET_0:
1614 case RADEON_PP_TXOFFSET_1: 1613 case RADEON_PP_TXOFFSET_1:
@@ -1622,16 +1621,16 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1622 return r; 1621 return r;
1623 } 1622 }
1624 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { 1623 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
1625 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) 1624 if (reloc->tiling_flags & RADEON_TILING_MACRO)
1626 tile_flags |= RADEON_TXO_MACRO_TILE; 1625 tile_flags |= RADEON_TXO_MACRO_TILE;
1627 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) 1626 if (reloc->tiling_flags & RADEON_TILING_MICRO)
1628 tile_flags |= RADEON_TXO_MICRO_TILE_X2; 1627 tile_flags |= RADEON_TXO_MICRO_TILE_X2;
1629 1628
1630 tmp = idx_value & ~(0x7 << 2); 1629 tmp = idx_value & ~(0x7 << 2);
1631 tmp |= tile_flags; 1630 tmp |= tile_flags;
1632 ib[idx] = tmp + ((u32)reloc->lobj.gpu_offset); 1631 ib[idx] = tmp + ((u32)reloc->gpu_offset);
1633 } else 1632 } else
1634 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 1633 ib[idx] = idx_value + ((u32)reloc->gpu_offset);
1635 track->textures[i].robj = reloc->robj; 1634 track->textures[i].robj = reloc->robj;
1636 track->tex_dirty = true; 1635 track->tex_dirty = true;
1637 break; 1636 break;
@@ -1649,7 +1648,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1649 return r; 1648 return r;
1650 } 1649 }
1651 track->textures[0].cube_info[i].offset = idx_value; 1650 track->textures[0].cube_info[i].offset = idx_value;
1652 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 1651 ib[idx] = idx_value + ((u32)reloc->gpu_offset);
1653 track->textures[0].cube_info[i].robj = reloc->robj; 1652 track->textures[0].cube_info[i].robj = reloc->robj;
1654 track->tex_dirty = true; 1653 track->tex_dirty = true;
1655 break; 1654 break;
@@ -1667,7 +1666,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1667 return r; 1666 return r;
1668 } 1667 }
1669 track->textures[1].cube_info[i].offset = idx_value; 1668 track->textures[1].cube_info[i].offset = idx_value;
1670 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 1669 ib[idx] = idx_value + ((u32)reloc->gpu_offset);
1671 track->textures[1].cube_info[i].robj = reloc->robj; 1670 track->textures[1].cube_info[i].robj = reloc->robj;
1672 track->tex_dirty = true; 1671 track->tex_dirty = true;
1673 break; 1672 break;
@@ -1685,7 +1684,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1685 return r; 1684 return r;
1686 } 1685 }
1687 track->textures[2].cube_info[i].offset = idx_value; 1686 track->textures[2].cube_info[i].offset = idx_value;
1688 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 1687 ib[idx] = idx_value + ((u32)reloc->gpu_offset);
1689 track->textures[2].cube_info[i].robj = reloc->robj; 1688 track->textures[2].cube_info[i].robj = reloc->robj;
1690 track->tex_dirty = true; 1689 track->tex_dirty = true;
1691 break; 1690 break;
@@ -1703,9 +1702,9 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1703 return r; 1702 return r;
1704 } 1703 }
1705 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { 1704 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
1706 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) 1705 if (reloc->tiling_flags & RADEON_TILING_MACRO)
1707 tile_flags |= RADEON_COLOR_TILE_ENABLE; 1706 tile_flags |= RADEON_COLOR_TILE_ENABLE;
1708 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) 1707 if (reloc->tiling_flags & RADEON_TILING_MICRO)
1709 tile_flags |= RADEON_COLOR_MICROTILE_ENABLE; 1708 tile_flags |= RADEON_COLOR_MICROTILE_ENABLE;
1710 1709
1711 tmp = idx_value & ~(0x7 << 16); 1710 tmp = idx_value & ~(0x7 << 16);
@@ -1773,7 +1772,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1773 radeon_cs_dump_packet(p, pkt); 1772 radeon_cs_dump_packet(p, pkt);
1774 return r; 1773 return r;
1775 } 1774 }
1776 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 1775 ib[idx] = idx_value + ((u32)reloc->gpu_offset);
1777 break; 1776 break;
1778 case RADEON_PP_CNTL: 1777 case RADEON_PP_CNTL:
1779 { 1778 {
@@ -1933,7 +1932,7 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
1933 radeon_cs_dump_packet(p, pkt); 1932 radeon_cs_dump_packet(p, pkt);
1934 return r; 1933 return r;
1935 } 1934 }
1936 ib[idx+1] = radeon_get_ib_value(p, idx+1) + ((u32)reloc->lobj.gpu_offset); 1935 ib[idx+1] = radeon_get_ib_value(p, idx+1) + ((u32)reloc->gpu_offset);
1937 r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj); 1936 r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj);
1938 if (r) { 1937 if (r) {
1939 return r; 1938 return r;
@@ -1947,7 +1946,7 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
1947 radeon_cs_dump_packet(p, pkt); 1946 radeon_cs_dump_packet(p, pkt);
1948 return r; 1947 return r;
1949 } 1948 }
1950 ib[idx] = radeon_get_ib_value(p, idx) + ((u32)reloc->lobj.gpu_offset); 1949 ib[idx] = radeon_get_ib_value(p, idx) + ((u32)reloc->gpu_offset);
1951 track->num_arrays = 1; 1950 track->num_arrays = 1;
1952 track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 2)); 1951 track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 2));
1953 1952
@@ -2523,11 +2522,9 @@ bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
2523 2522
2524 rbbm_status = RREG32(R_000E40_RBBM_STATUS); 2523 rbbm_status = RREG32(R_000E40_RBBM_STATUS);
2525 if (!G_000E40_GUI_ACTIVE(rbbm_status)) { 2524 if (!G_000E40_GUI_ACTIVE(rbbm_status)) {
2526 radeon_ring_lockup_update(ring); 2525 radeon_ring_lockup_update(rdev, ring);
2527 return false; 2526 return false;
2528 } 2527 }
2529 /* force CP activities */
2530 radeon_ring_force_activity(rdev, ring);
2531 return radeon_ring_test_lockup(rdev, ring); 2528 return radeon_ring_test_lockup(rdev, ring);
2532} 2529}
2533 2530
@@ -3223,12 +3220,12 @@ void r100_bandwidth_update(struct radeon_device *rdev)
3223 3220
3224 if (rdev->mode_info.crtcs[0]->base.enabled) { 3221 if (rdev->mode_info.crtcs[0]->base.enabled) {
3225 mode1 = &rdev->mode_info.crtcs[0]->base.mode; 3222 mode1 = &rdev->mode_info.crtcs[0]->base.mode;
3226 pixel_bytes1 = rdev->mode_info.crtcs[0]->base.fb->bits_per_pixel / 8; 3223 pixel_bytes1 = rdev->mode_info.crtcs[0]->base.primary->fb->bits_per_pixel / 8;
3227 } 3224 }
3228 if (!(rdev->flags & RADEON_SINGLE_CRTC)) { 3225 if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
3229 if (rdev->mode_info.crtcs[1]->base.enabled) { 3226 if (rdev->mode_info.crtcs[1]->base.enabled) {
3230 mode2 = &rdev->mode_info.crtcs[1]->base.mode; 3227 mode2 = &rdev->mode_info.crtcs[1]->base.mode;
3231 pixel_bytes2 = rdev->mode_info.crtcs[1]->base.fb->bits_per_pixel / 8; 3228 pixel_bytes2 = rdev->mode_info.crtcs[1]->base.primary->fb->bits_per_pixel / 8;
3232 } 3229 }
3233 } 3230 }
3234 3231
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c
index b3807edb1936..58f0473aa73f 100644
--- a/drivers/gpu/drm/radeon/r200.c
+++ b/drivers/gpu/drm/radeon/r200.c
@@ -185,7 +185,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
185 track->zb.robj = reloc->robj; 185 track->zb.robj = reloc->robj;
186 track->zb.offset = idx_value; 186 track->zb.offset = idx_value;
187 track->zb_dirty = true; 187 track->zb_dirty = true;
188 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 188 ib[idx] = idx_value + ((u32)reloc->gpu_offset);
189 break; 189 break;
190 case RADEON_RB3D_COLOROFFSET: 190 case RADEON_RB3D_COLOROFFSET:
191 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 191 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
@@ -198,7 +198,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
198 track->cb[0].robj = reloc->robj; 198 track->cb[0].robj = reloc->robj;
199 track->cb[0].offset = idx_value; 199 track->cb[0].offset = idx_value;
200 track->cb_dirty = true; 200 track->cb_dirty = true;
201 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 201 ib[idx] = idx_value + ((u32)reloc->gpu_offset);
202 break; 202 break;
203 case R200_PP_TXOFFSET_0: 203 case R200_PP_TXOFFSET_0:
204 case R200_PP_TXOFFSET_1: 204 case R200_PP_TXOFFSET_1:
@@ -215,16 +215,16 @@ int r200_packet0_check(struct radeon_cs_parser *p,
215 return r; 215 return r;
216 } 216 }
217 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { 217 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
218 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) 218 if (reloc->tiling_flags & RADEON_TILING_MACRO)
219 tile_flags |= R200_TXO_MACRO_TILE; 219 tile_flags |= R200_TXO_MACRO_TILE;
220 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) 220 if (reloc->tiling_flags & RADEON_TILING_MICRO)
221 tile_flags |= R200_TXO_MICRO_TILE; 221 tile_flags |= R200_TXO_MICRO_TILE;
222 222
223 tmp = idx_value & ~(0x7 << 2); 223 tmp = idx_value & ~(0x7 << 2);
224 tmp |= tile_flags; 224 tmp |= tile_flags;
225 ib[idx] = tmp + ((u32)reloc->lobj.gpu_offset); 225 ib[idx] = tmp + ((u32)reloc->gpu_offset);
226 } else 226 } else
227 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 227 ib[idx] = idx_value + ((u32)reloc->gpu_offset);
228 track->textures[i].robj = reloc->robj; 228 track->textures[i].robj = reloc->robj;
229 track->tex_dirty = true; 229 track->tex_dirty = true;
230 break; 230 break;
@@ -268,7 +268,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
268 return r; 268 return r;
269 } 269 }
270 track->textures[i].cube_info[face - 1].offset = idx_value; 270 track->textures[i].cube_info[face - 1].offset = idx_value;
271 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 271 ib[idx] = idx_value + ((u32)reloc->gpu_offset);
272 track->textures[i].cube_info[face - 1].robj = reloc->robj; 272 track->textures[i].cube_info[face - 1].robj = reloc->robj;
273 track->tex_dirty = true; 273 track->tex_dirty = true;
274 break; 274 break;
@@ -287,9 +287,9 @@ int r200_packet0_check(struct radeon_cs_parser *p,
287 } 287 }
288 288
289 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { 289 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
290 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) 290 if (reloc->tiling_flags & RADEON_TILING_MACRO)
291 tile_flags |= RADEON_COLOR_TILE_ENABLE; 291 tile_flags |= RADEON_COLOR_TILE_ENABLE;
292 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) 292 if (reloc->tiling_flags & RADEON_TILING_MICRO)
293 tile_flags |= RADEON_COLOR_MICROTILE_ENABLE; 293 tile_flags |= RADEON_COLOR_MICROTILE_ENABLE;
294 294
295 tmp = idx_value & ~(0x7 << 16); 295 tmp = idx_value & ~(0x7 << 16);
@@ -362,7 +362,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
362 radeon_cs_dump_packet(p, pkt); 362 radeon_cs_dump_packet(p, pkt);
363 return r; 363 return r;
364 } 364 }
365 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 365 ib[idx] = idx_value + ((u32)reloc->gpu_offset);
366 break; 366 break;
367 case RADEON_PP_CNTL: 367 case RADEON_PP_CNTL:
368 { 368 {
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 0b658b34b33a..206caf9700b7 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -640,7 +640,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
640 track->cb[i].robj = reloc->robj; 640 track->cb[i].robj = reloc->robj;
641 track->cb[i].offset = idx_value; 641 track->cb[i].offset = idx_value;
642 track->cb_dirty = true; 642 track->cb_dirty = true;
643 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 643 ib[idx] = idx_value + ((u32)reloc->gpu_offset);
644 break; 644 break;
645 case R300_ZB_DEPTHOFFSET: 645 case R300_ZB_DEPTHOFFSET:
646 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 646 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
@@ -653,7 +653,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
653 track->zb.robj = reloc->robj; 653 track->zb.robj = reloc->robj;
654 track->zb.offset = idx_value; 654 track->zb.offset = idx_value;
655 track->zb_dirty = true; 655 track->zb_dirty = true;
656 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 656 ib[idx] = idx_value + ((u32)reloc->gpu_offset);
657 break; 657 break;
658 case R300_TX_OFFSET_0: 658 case R300_TX_OFFSET_0:
659 case R300_TX_OFFSET_0+4: 659 case R300_TX_OFFSET_0+4:
@@ -682,16 +682,16 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
682 682
683 if (p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) { 683 if (p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) {
684 ib[idx] = (idx_value & 31) | /* keep the 1st 5 bits */ 684 ib[idx] = (idx_value & 31) | /* keep the 1st 5 bits */
685 ((idx_value & ~31) + (u32)reloc->lobj.gpu_offset); 685 ((idx_value & ~31) + (u32)reloc->gpu_offset);
686 } else { 686 } else {
687 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) 687 if (reloc->tiling_flags & RADEON_TILING_MACRO)
688 tile_flags |= R300_TXO_MACRO_TILE; 688 tile_flags |= R300_TXO_MACRO_TILE;
689 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) 689 if (reloc->tiling_flags & RADEON_TILING_MICRO)
690 tile_flags |= R300_TXO_MICRO_TILE; 690 tile_flags |= R300_TXO_MICRO_TILE;
691 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE) 691 else if (reloc->tiling_flags & RADEON_TILING_MICRO_SQUARE)
692 tile_flags |= R300_TXO_MICRO_TILE_SQUARE; 692 tile_flags |= R300_TXO_MICRO_TILE_SQUARE;
693 693
694 tmp = idx_value + ((u32)reloc->lobj.gpu_offset); 694 tmp = idx_value + ((u32)reloc->gpu_offset);
695 tmp |= tile_flags; 695 tmp |= tile_flags;
696 ib[idx] = tmp; 696 ib[idx] = tmp;
697 } 697 }
@@ -753,11 +753,11 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
753 return r; 753 return r;
754 } 754 }
755 755
756 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) 756 if (reloc->tiling_flags & RADEON_TILING_MACRO)
757 tile_flags |= R300_COLOR_TILE_ENABLE; 757 tile_flags |= R300_COLOR_TILE_ENABLE;
758 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) 758 if (reloc->tiling_flags & RADEON_TILING_MICRO)
759 tile_flags |= R300_COLOR_MICROTILE_ENABLE; 759 tile_flags |= R300_COLOR_MICROTILE_ENABLE;
760 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE) 760 else if (reloc->tiling_flags & RADEON_TILING_MICRO_SQUARE)
761 tile_flags |= R300_COLOR_MICROTILE_SQUARE_ENABLE; 761 tile_flags |= R300_COLOR_MICROTILE_SQUARE_ENABLE;
762 762
763 tmp = idx_value & ~(0x7 << 16); 763 tmp = idx_value & ~(0x7 << 16);
@@ -838,11 +838,11 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
838 return r; 838 return r;
839 } 839 }
840 840
841 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) 841 if (reloc->tiling_flags & RADEON_TILING_MACRO)
842 tile_flags |= R300_DEPTHMACROTILE_ENABLE; 842 tile_flags |= R300_DEPTHMACROTILE_ENABLE;
843 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) 843 if (reloc->tiling_flags & RADEON_TILING_MICRO)
844 tile_flags |= R300_DEPTHMICROTILE_TILED; 844 tile_flags |= R300_DEPTHMICROTILE_TILED;
845 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE) 845 else if (reloc->tiling_flags & RADEON_TILING_MICRO_SQUARE)
846 tile_flags |= R300_DEPTHMICROTILE_TILED_SQUARE; 846 tile_flags |= R300_DEPTHMICROTILE_TILED_SQUARE;
847 847
848 tmp = idx_value & ~(0x7 << 16); 848 tmp = idx_value & ~(0x7 << 16);
@@ -1052,7 +1052,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
1052 radeon_cs_dump_packet(p, pkt); 1052 radeon_cs_dump_packet(p, pkt);
1053 return r; 1053 return r;
1054 } 1054 }
1055 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 1055 ib[idx] = idx_value + ((u32)reloc->gpu_offset);
1056 break; 1056 break;
1057 case 0x4e0c: 1057 case 0x4e0c:
1058 /* RB3D_COLOR_CHANNEL_MASK */ 1058 /* RB3D_COLOR_CHANNEL_MASK */
@@ -1097,7 +1097,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
1097 track->aa.robj = reloc->robj; 1097 track->aa.robj = reloc->robj;
1098 track->aa.offset = idx_value; 1098 track->aa.offset = idx_value;
1099 track->aa_dirty = true; 1099 track->aa_dirty = true;
1100 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 1100 ib[idx] = idx_value + ((u32)reloc->gpu_offset);
1101 break; 1101 break;
1102 case R300_RB3D_AARESOLVE_PITCH: 1102 case R300_RB3D_AARESOLVE_PITCH:
1103 track->aa.pitch = idx_value & 0x3FFE; 1103 track->aa.pitch = idx_value & 0x3FFE;
@@ -1162,7 +1162,7 @@ static int r300_packet3_check(struct radeon_cs_parser *p,
1162 radeon_cs_dump_packet(p, pkt); 1162 radeon_cs_dump_packet(p, pkt);
1163 return r; 1163 return r;
1164 } 1164 }
1165 ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset); 1165 ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->gpu_offset);
1166 r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj); 1166 r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj);
1167 if (r) { 1167 if (r) {
1168 return r; 1168 return r;
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 647ef4079217..6e887d004eba 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -1748,11 +1748,9 @@ bool r600_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
1748 if (!(reset_mask & (RADEON_RESET_GFX | 1748 if (!(reset_mask & (RADEON_RESET_GFX |
1749 RADEON_RESET_COMPUTE | 1749 RADEON_RESET_COMPUTE |
1750 RADEON_RESET_CP))) { 1750 RADEON_RESET_CP))) {
1751 radeon_ring_lockup_update(ring); 1751 radeon_ring_lockup_update(rdev, ring);
1752 return false; 1752 return false;
1753 } 1753 }
1754 /* force CP activities */
1755 radeon_ring_force_activity(rdev, ring);
1756 return radeon_ring_test_lockup(rdev, ring); 1754 return radeon_ring_test_lockup(rdev, ring);
1757} 1755}
1758 1756
@@ -2604,8 +2602,6 @@ int r600_cp_resume(struct radeon_device *rdev)
2604 WREG32(CP_RB_BASE, ring->gpu_addr >> 8); 2602 WREG32(CP_RB_BASE, ring->gpu_addr >> 8);
2605 WREG32(CP_DEBUG, (1 << 27) | (1 << 28)); 2603 WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
2606 2604
2607 ring->rptr = RREG32(CP_RB_RPTR);
2608
2609 r600_cp_start(rdev); 2605 r600_cp_start(rdev);
2610 ring->ready = true; 2606 ring->ready = true;
2611 r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring); 2607 r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring);
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 2812c7d1ae6f..12511bb5fd6f 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -1022,7 +1022,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1022 "0x%04X\n", reg); 1022 "0x%04X\n", reg);
1023 return -EINVAL; 1023 return -EINVAL;
1024 } 1024 }
1025 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1025 ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
1026 break; 1026 break;
1027 case SQ_CONFIG: 1027 case SQ_CONFIG:
1028 track->sq_config = radeon_get_ib_value(p, idx); 1028 track->sq_config = radeon_get_ib_value(p, idx);
@@ -1043,7 +1043,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1043 track->db_depth_info = radeon_get_ib_value(p, idx); 1043 track->db_depth_info = radeon_get_ib_value(p, idx);
1044 ib[idx] &= C_028010_ARRAY_MODE; 1044 ib[idx] &= C_028010_ARRAY_MODE;
1045 track->db_depth_info &= C_028010_ARRAY_MODE; 1045 track->db_depth_info &= C_028010_ARRAY_MODE;
1046 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { 1046 if (reloc->tiling_flags & RADEON_TILING_MACRO) {
1047 ib[idx] |= S_028010_ARRAY_MODE(V_028010_ARRAY_2D_TILED_THIN1); 1047 ib[idx] |= S_028010_ARRAY_MODE(V_028010_ARRAY_2D_TILED_THIN1);
1048 track->db_depth_info |= S_028010_ARRAY_MODE(V_028010_ARRAY_2D_TILED_THIN1); 1048 track->db_depth_info |= S_028010_ARRAY_MODE(V_028010_ARRAY_2D_TILED_THIN1);
1049 } else { 1049 } else {
@@ -1084,9 +1084,9 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1084 } 1084 }
1085 tmp = (reg - VGT_STRMOUT_BUFFER_BASE_0) / 16; 1085 tmp = (reg - VGT_STRMOUT_BUFFER_BASE_0) / 16;
1086 track->vgt_strmout_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8; 1086 track->vgt_strmout_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8;
1087 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1087 ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
1088 track->vgt_strmout_bo[tmp] = reloc->robj; 1088 track->vgt_strmout_bo[tmp] = reloc->robj;
1089 track->vgt_strmout_bo_mc[tmp] = reloc->lobj.gpu_offset; 1089 track->vgt_strmout_bo_mc[tmp] = reloc->gpu_offset;
1090 track->streamout_dirty = true; 1090 track->streamout_dirty = true;
1091 break; 1091 break;
1092 case VGT_STRMOUT_BUFFER_SIZE_0: 1092 case VGT_STRMOUT_BUFFER_SIZE_0:
@@ -1105,7 +1105,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1105 "0x%04X\n", reg); 1105 "0x%04X\n", reg);
1106 return -EINVAL; 1106 return -EINVAL;
1107 } 1107 }
1108 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1108 ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
1109 break; 1109 break;
1110 case R_028238_CB_TARGET_MASK: 1110 case R_028238_CB_TARGET_MASK:
1111 track->cb_target_mask = radeon_get_ib_value(p, idx); 1111 track->cb_target_mask = radeon_get_ib_value(p, idx);
@@ -1142,10 +1142,10 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1142 } 1142 }
1143 tmp = (reg - R_0280A0_CB_COLOR0_INFO) / 4; 1143 tmp = (reg - R_0280A0_CB_COLOR0_INFO) / 4;
1144 track->cb_color_info[tmp] = radeon_get_ib_value(p, idx); 1144 track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
1145 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { 1145 if (reloc->tiling_flags & RADEON_TILING_MACRO) {
1146 ib[idx] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_2D_TILED_THIN1); 1146 ib[idx] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_2D_TILED_THIN1);
1147 track->cb_color_info[tmp] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_2D_TILED_THIN1); 1147 track->cb_color_info[tmp] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_2D_TILED_THIN1);
1148 } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) { 1148 } else if (reloc->tiling_flags & RADEON_TILING_MICRO) {
1149 ib[idx] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_1D_TILED_THIN1); 1149 ib[idx] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_1D_TILED_THIN1);
1150 track->cb_color_info[tmp] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_1D_TILED_THIN1); 1150 track->cb_color_info[tmp] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_1D_TILED_THIN1);
1151 } 1151 }
@@ -1214,7 +1214,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1214 } 1214 }
1215 track->cb_color_frag_bo[tmp] = reloc->robj; 1215 track->cb_color_frag_bo[tmp] = reloc->robj;
1216 track->cb_color_frag_offset[tmp] = (u64)ib[idx] << 8; 1216 track->cb_color_frag_offset[tmp] = (u64)ib[idx] << 8;
1217 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1217 ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
1218 } 1218 }
1219 if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) { 1219 if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) {
1220 track->cb_dirty = true; 1220 track->cb_dirty = true;
@@ -1245,7 +1245,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1245 } 1245 }
1246 track->cb_color_tile_bo[tmp] = reloc->robj; 1246 track->cb_color_tile_bo[tmp] = reloc->robj;
1247 track->cb_color_tile_offset[tmp] = (u64)ib[idx] << 8; 1247 track->cb_color_tile_offset[tmp] = (u64)ib[idx] << 8;
1248 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1248 ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
1249 } 1249 }
1250 if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) { 1250 if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) {
1251 track->cb_dirty = true; 1251 track->cb_dirty = true;
@@ -1281,10 +1281,10 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1281 } 1281 }
1282 tmp = (reg - CB_COLOR0_BASE) / 4; 1282 tmp = (reg - CB_COLOR0_BASE) / 4;
1283 track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8; 1283 track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8;
1284 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1284 ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
1285 track->cb_color_base_last[tmp] = ib[idx]; 1285 track->cb_color_base_last[tmp] = ib[idx];
1286 track->cb_color_bo[tmp] = reloc->robj; 1286 track->cb_color_bo[tmp] = reloc->robj;
1287 track->cb_color_bo_mc[tmp] = reloc->lobj.gpu_offset; 1287 track->cb_color_bo_mc[tmp] = reloc->gpu_offset;
1288 track->cb_dirty = true; 1288 track->cb_dirty = true;
1289 break; 1289 break;
1290 case DB_DEPTH_BASE: 1290 case DB_DEPTH_BASE:
@@ -1295,9 +1295,9 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1295 return -EINVAL; 1295 return -EINVAL;
1296 } 1296 }
1297 track->db_offset = radeon_get_ib_value(p, idx) << 8; 1297 track->db_offset = radeon_get_ib_value(p, idx) << 8;
1298 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1298 ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
1299 track->db_bo = reloc->robj; 1299 track->db_bo = reloc->robj;
1300 track->db_bo_mc = reloc->lobj.gpu_offset; 1300 track->db_bo_mc = reloc->gpu_offset;
1301 track->db_dirty = true; 1301 track->db_dirty = true;
1302 break; 1302 break;
1303 case DB_HTILE_DATA_BASE: 1303 case DB_HTILE_DATA_BASE:
@@ -1308,7 +1308,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1308 return -EINVAL; 1308 return -EINVAL;
1309 } 1309 }
1310 track->htile_offset = radeon_get_ib_value(p, idx) << 8; 1310 track->htile_offset = radeon_get_ib_value(p, idx) << 8;
1311 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1311 ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
1312 track->htile_bo = reloc->robj; 1312 track->htile_bo = reloc->robj;
1313 track->db_dirty = true; 1313 track->db_dirty = true;
1314 break; 1314 break;
@@ -1377,7 +1377,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1377 "0x%04X\n", reg); 1377 "0x%04X\n", reg);
1378 return -EINVAL; 1378 return -EINVAL;
1379 } 1379 }
1380 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1380 ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
1381 break; 1381 break;
1382 case SX_MEMORY_EXPORT_BASE: 1382 case SX_MEMORY_EXPORT_BASE:
1383 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); 1383 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
@@ -1386,7 +1386,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1386 "0x%04X\n", reg); 1386 "0x%04X\n", reg);
1387 return -EINVAL; 1387 return -EINVAL;
1388 } 1388 }
1389 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1389 ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
1390 break; 1390 break;
1391 case SX_MISC: 1391 case SX_MISC:
1392 track->sx_misc_kill_all_prims = (radeon_get_ib_value(p, idx) & 0x1) != 0; 1392 track->sx_misc_kill_all_prims = (radeon_get_ib_value(p, idx) & 0x1) != 0;
@@ -1672,7 +1672,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
1672 return -EINVAL; 1672 return -EINVAL;
1673 } 1673 }
1674 1674
1675 offset = reloc->lobj.gpu_offset + 1675 offset = reloc->gpu_offset +
1676 (idx_value & 0xfffffff0) + 1676 (idx_value & 0xfffffff0) +
1677 ((u64)(tmp & 0xff) << 32); 1677 ((u64)(tmp & 0xff) << 32);
1678 1678
@@ -1713,7 +1713,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
1713 return -EINVAL; 1713 return -EINVAL;
1714 } 1714 }
1715 1715
1716 offset = reloc->lobj.gpu_offset + 1716 offset = reloc->gpu_offset +
1717 idx_value + 1717 idx_value +
1718 ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32); 1718 ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
1719 1719
@@ -1765,7 +1765,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
1765 return -EINVAL; 1765 return -EINVAL;
1766 } 1766 }
1767 1767
1768 offset = reloc->lobj.gpu_offset + 1768 offset = reloc->gpu_offset +
1769 (radeon_get_ib_value(p, idx+1) & 0xfffffff0) + 1769 (radeon_get_ib_value(p, idx+1) & 0xfffffff0) +
1770 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32); 1770 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
1771 1771
@@ -1805,7 +1805,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
1805 tmp = radeon_get_ib_value(p, idx) + 1805 tmp = radeon_get_ib_value(p, idx) +
1806 ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32); 1806 ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
1807 1807
1808 offset = reloc->lobj.gpu_offset + tmp; 1808 offset = reloc->gpu_offset + tmp;
1809 1809
1810 if ((tmp + size) > radeon_bo_size(reloc->robj)) { 1810 if ((tmp + size) > radeon_bo_size(reloc->robj)) {
1811 dev_warn(p->dev, "CP DMA src buffer too small (%llu %lu)\n", 1811 dev_warn(p->dev, "CP DMA src buffer too small (%llu %lu)\n",
@@ -1835,7 +1835,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
1835 tmp = radeon_get_ib_value(p, idx+2) + 1835 tmp = radeon_get_ib_value(p, idx+2) +
1836 ((u64)(radeon_get_ib_value(p, idx+3) & 0xff) << 32); 1836 ((u64)(radeon_get_ib_value(p, idx+3) & 0xff) << 32);
1837 1837
1838 offset = reloc->lobj.gpu_offset + tmp; 1838 offset = reloc->gpu_offset + tmp;
1839 1839
1840 if ((tmp + size) > radeon_bo_size(reloc->robj)) { 1840 if ((tmp + size) > radeon_bo_size(reloc->robj)) {
1841 dev_warn(p->dev, "CP DMA dst buffer too small (%llu %lu)\n", 1841 dev_warn(p->dev, "CP DMA dst buffer too small (%llu %lu)\n",
@@ -1861,7 +1861,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
1861 DRM_ERROR("bad SURFACE_SYNC\n"); 1861 DRM_ERROR("bad SURFACE_SYNC\n");
1862 return -EINVAL; 1862 return -EINVAL;
1863 } 1863 }
1864 ib[idx+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1864 ib[idx+2] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
1865 } 1865 }
1866 break; 1866 break;
1867 case PACKET3_EVENT_WRITE: 1867 case PACKET3_EVENT_WRITE:
@@ -1877,7 +1877,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
1877 DRM_ERROR("bad EVENT_WRITE\n"); 1877 DRM_ERROR("bad EVENT_WRITE\n");
1878 return -EINVAL; 1878 return -EINVAL;
1879 } 1879 }
1880 offset = reloc->lobj.gpu_offset + 1880 offset = reloc->gpu_offset +
1881 (radeon_get_ib_value(p, idx+1) & 0xfffffff8) + 1881 (radeon_get_ib_value(p, idx+1) & 0xfffffff8) +
1882 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32); 1882 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
1883 1883
@@ -1899,7 +1899,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
1899 return -EINVAL; 1899 return -EINVAL;
1900 } 1900 }
1901 1901
1902 offset = reloc->lobj.gpu_offset + 1902 offset = reloc->gpu_offset +
1903 (radeon_get_ib_value(p, idx+1) & 0xfffffffc) + 1903 (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
1904 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32); 1904 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
1905 1905
@@ -1964,11 +1964,11 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
1964 DRM_ERROR("bad SET_RESOURCE\n"); 1964 DRM_ERROR("bad SET_RESOURCE\n");
1965 return -EINVAL; 1965 return -EINVAL;
1966 } 1966 }
1967 base_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1967 base_offset = (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
1968 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { 1968 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
1969 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) 1969 if (reloc->tiling_flags & RADEON_TILING_MACRO)
1970 ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1); 1970 ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
1971 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) 1971 else if (reloc->tiling_flags & RADEON_TILING_MICRO)
1972 ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1); 1972 ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
1973 } 1973 }
1974 texture = reloc->robj; 1974 texture = reloc->robj;
@@ -1978,13 +1978,13 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
1978 DRM_ERROR("bad SET_RESOURCE\n"); 1978 DRM_ERROR("bad SET_RESOURCE\n");
1979 return -EINVAL; 1979 return -EINVAL;
1980 } 1980 }
1981 mip_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1981 mip_offset = (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
1982 mipmap = reloc->robj; 1982 mipmap = reloc->robj;
1983 r = r600_check_texture_resource(p, idx+(i*7)+1, 1983 r = r600_check_texture_resource(p, idx+(i*7)+1,
1984 texture, mipmap, 1984 texture, mipmap,
1985 base_offset + radeon_get_ib_value(p, idx+1+(i*7)+2), 1985 base_offset + radeon_get_ib_value(p, idx+1+(i*7)+2),
1986 mip_offset + radeon_get_ib_value(p, idx+1+(i*7)+3), 1986 mip_offset + radeon_get_ib_value(p, idx+1+(i*7)+3),
1987 reloc->lobj.tiling_flags); 1987 reloc->tiling_flags);
1988 if (r) 1988 if (r)
1989 return r; 1989 return r;
1990 ib[idx+1+(i*7)+2] += base_offset; 1990 ib[idx+1+(i*7)+2] += base_offset;
@@ -2008,7 +2008,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
2008 ib[idx+1+(i*7)+1] = radeon_bo_size(reloc->robj) - offset; 2008 ib[idx+1+(i*7)+1] = radeon_bo_size(reloc->robj) - offset;
2009 } 2009 }
2010 2010
2011 offset64 = reloc->lobj.gpu_offset + offset; 2011 offset64 = reloc->gpu_offset + offset;
2012 ib[idx+1+(i*8)+0] = offset64; 2012 ib[idx+1+(i*8)+0] = offset64;
2013 ib[idx+1+(i*8)+2] = (ib[idx+1+(i*8)+2] & 0xffffff00) | 2013 ib[idx+1+(i*8)+2] = (ib[idx+1+(i*8)+2] & 0xffffff00) |
2014 (upper_32_bits(offset64) & 0xff); 2014 (upper_32_bits(offset64) & 0xff);
@@ -2118,7 +2118,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
2118 offset + 4, radeon_bo_size(reloc->robj)); 2118 offset + 4, radeon_bo_size(reloc->robj));
2119 return -EINVAL; 2119 return -EINVAL;
2120 } 2120 }
2121 ib[idx+1] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 2121 ib[idx+1] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
2122 } 2122 }
2123 break; 2123 break;
2124 case PACKET3_SURFACE_BASE_UPDATE: 2124 case PACKET3_SURFACE_BASE_UPDATE:
@@ -2151,7 +2151,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
2151 offset + 4, radeon_bo_size(reloc->robj)); 2151 offset + 4, radeon_bo_size(reloc->robj));
2152 return -EINVAL; 2152 return -EINVAL;
2153 } 2153 }
2154 offset += reloc->lobj.gpu_offset; 2154 offset += reloc->gpu_offset;
2155 ib[idx+1] = offset; 2155 ib[idx+1] = offset;
2156 ib[idx+2] = upper_32_bits(offset) & 0xff; 2156 ib[idx+2] = upper_32_bits(offset) & 0xff;
2157 } 2157 }
@@ -2170,7 +2170,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
2170 offset + 4, radeon_bo_size(reloc->robj)); 2170 offset + 4, radeon_bo_size(reloc->robj));
2171 return -EINVAL; 2171 return -EINVAL;
2172 } 2172 }
2173 offset += reloc->lobj.gpu_offset; 2173 offset += reloc->gpu_offset;
2174 ib[idx+3] = offset; 2174 ib[idx+3] = offset;
2175 ib[idx+4] = upper_32_bits(offset) & 0xff; 2175 ib[idx+4] = upper_32_bits(offset) & 0xff;
2176 } 2176 }
@@ -2199,7 +2199,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
2199 offset + 8, radeon_bo_size(reloc->robj)); 2199 offset + 8, radeon_bo_size(reloc->robj));
2200 return -EINVAL; 2200 return -EINVAL;
2201 } 2201 }
2202 offset += reloc->lobj.gpu_offset; 2202 offset += reloc->gpu_offset;
2203 ib[idx+0] = offset; 2203 ib[idx+0] = offset;
2204 ib[idx+1] = upper_32_bits(offset) & 0xff; 2204 ib[idx+1] = upper_32_bits(offset) & 0xff;
2205 break; 2205 break;
@@ -2224,7 +2224,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
2224 offset + 4, radeon_bo_size(reloc->robj)); 2224 offset + 4, radeon_bo_size(reloc->robj));
2225 return -EINVAL; 2225 return -EINVAL;
2226 } 2226 }
2227 offset += reloc->lobj.gpu_offset; 2227 offset += reloc->gpu_offset;
2228 ib[idx+1] = offset; 2228 ib[idx+1] = offset;
2229 ib[idx+2] = upper_32_bits(offset) & 0xff; 2229 ib[idx+2] = upper_32_bits(offset) & 0xff;
2230 } else { 2230 } else {
@@ -2248,7 +2248,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
2248 offset + 4, radeon_bo_size(reloc->robj)); 2248 offset + 4, radeon_bo_size(reloc->robj));
2249 return -EINVAL; 2249 return -EINVAL;
2250 } 2250 }
2251 offset += reloc->lobj.gpu_offset; 2251 offset += reloc->gpu_offset;
2252 ib[idx+3] = offset; 2252 ib[idx+3] = offset;
2253 ib[idx+4] = upper_32_bits(offset) & 0xff; 2253 ib[idx+4] = upper_32_bits(offset) & 0xff;
2254 } else { 2254 } else {
@@ -2505,14 +2505,14 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p)
2505 dst_offset = radeon_get_ib_value(p, idx+1); 2505 dst_offset = radeon_get_ib_value(p, idx+1);
2506 dst_offset <<= 8; 2506 dst_offset <<= 8;
2507 2507
2508 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); 2508 ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8);
2509 p->idx += count + 5; 2509 p->idx += count + 5;
2510 } else { 2510 } else {
2511 dst_offset = radeon_get_ib_value(p, idx+1); 2511 dst_offset = radeon_get_ib_value(p, idx+1);
2512 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32; 2512 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
2513 2513
2514 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); 2514 ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
2515 ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; 2515 ib[idx+2] += upper_32_bits(dst_reloc->gpu_offset) & 0xff;
2516 p->idx += count + 3; 2516 p->idx += count + 3;
2517 } 2517 }
2518 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { 2518 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
@@ -2539,22 +2539,22 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p)
2539 /* tiled src, linear dst */ 2539 /* tiled src, linear dst */
2540 src_offset = radeon_get_ib_value(p, idx+1); 2540 src_offset = radeon_get_ib_value(p, idx+1);
2541 src_offset <<= 8; 2541 src_offset <<= 8;
2542 ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8); 2542 ib[idx+1] += (u32)(src_reloc->gpu_offset >> 8);
2543 2543
2544 dst_offset = radeon_get_ib_value(p, idx+5); 2544 dst_offset = radeon_get_ib_value(p, idx+5);
2545 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32; 2545 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32;
2546 ib[idx+5] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); 2546 ib[idx+5] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
2547 ib[idx+6] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; 2547 ib[idx+6] += upper_32_bits(dst_reloc->gpu_offset) & 0xff;
2548 } else { 2548 } else {
2549 /* linear src, tiled dst */ 2549 /* linear src, tiled dst */
2550 src_offset = radeon_get_ib_value(p, idx+5); 2550 src_offset = radeon_get_ib_value(p, idx+5);
2551 src_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32; 2551 src_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32;
2552 ib[idx+5] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); 2552 ib[idx+5] += (u32)(src_reloc->gpu_offset & 0xfffffffc);
2553 ib[idx+6] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; 2553 ib[idx+6] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
2554 2554
2555 dst_offset = radeon_get_ib_value(p, idx+1); 2555 dst_offset = radeon_get_ib_value(p, idx+1);
2556 dst_offset <<= 8; 2556 dst_offset <<= 8;
2557 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); 2557 ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8);
2558 } 2558 }
2559 p->idx += 7; 2559 p->idx += 7;
2560 } else { 2560 } else {
@@ -2564,10 +2564,10 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p)
2564 dst_offset = radeon_get_ib_value(p, idx+1); 2564 dst_offset = radeon_get_ib_value(p, idx+1);
2565 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32; 2565 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
2566 2566
2567 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); 2567 ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
2568 ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); 2568 ib[idx+2] += (u32)(src_reloc->gpu_offset & 0xfffffffc);
2569 ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; 2569 ib[idx+3] += upper_32_bits(dst_reloc->gpu_offset) & 0xff;
2570 ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; 2570 ib[idx+4] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
2571 p->idx += 5; 2571 p->idx += 5;
2572 } else { 2572 } else {
2573 src_offset = radeon_get_ib_value(p, idx+2); 2573 src_offset = radeon_get_ib_value(p, idx+2);
@@ -2575,10 +2575,10 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p)
2575 dst_offset = radeon_get_ib_value(p, idx+1); 2575 dst_offset = radeon_get_ib_value(p, idx+1);
2576 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff0000)) << 16; 2576 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff0000)) << 16;
2577 2577
2578 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); 2578 ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
2579 ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); 2579 ib[idx+2] += (u32)(src_reloc->gpu_offset & 0xfffffffc);
2580 ib[idx+3] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; 2580 ib[idx+3] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
2581 ib[idx+3] += (upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff) << 16; 2581 ib[idx+3] += (upper_32_bits(dst_reloc->gpu_offset) & 0xff) << 16;
2582 p->idx += 4; 2582 p->idx += 4;
2583 } 2583 }
2584 } 2584 }
@@ -2610,8 +2610,8 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p)
2610 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); 2610 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
2611 return -EINVAL; 2611 return -EINVAL;
2612 } 2612 }
2613 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); 2613 ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
2614 ib[idx+3] += (upper_32_bits(dst_reloc->lobj.gpu_offset) << 16) & 0x00ff0000; 2614 ib[idx+3] += (upper_32_bits(dst_reloc->gpu_offset) << 16) & 0x00ff0000;
2615 p->idx += 4; 2615 p->idx += 4;
2616 break; 2616 break;
2617 case DMA_PACKET_NOP: 2617 case DMA_PACKET_NOP:
diff --git a/drivers/gpu/drm/radeon/r600_dma.c b/drivers/gpu/drm/radeon/r600_dma.c
index b2d4c91e6272..53fcb28f5578 100644
--- a/drivers/gpu/drm/radeon/r600_dma.c
+++ b/drivers/gpu/drm/radeon/r600_dma.c
@@ -176,8 +176,6 @@ int r600_dma_resume(struct radeon_device *rdev)
176 ring->wptr = 0; 176 ring->wptr = 0;
177 WREG32(DMA_RB_WPTR, ring->wptr << 2); 177 WREG32(DMA_RB_WPTR, ring->wptr << 2);
178 178
179 ring->rptr = RREG32(DMA_RB_RPTR) >> 2;
180
181 WREG32(DMA_RB_CNTL, rb_cntl | DMA_RB_ENABLE); 179 WREG32(DMA_RB_CNTL, rb_cntl | DMA_RB_ENABLE);
182 180
183 ring->ready = true; 181 ring->ready = true;
@@ -221,11 +219,9 @@ bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
221 u32 reset_mask = r600_gpu_check_soft_reset(rdev); 219 u32 reset_mask = r600_gpu_check_soft_reset(rdev);
222 220
223 if (!(reset_mask & RADEON_RESET_DMA)) { 221 if (!(reset_mask & RADEON_RESET_DMA)) {
224 radeon_ring_lockup_update(ring); 222 radeon_ring_lockup_update(rdev, ring);
225 return false; 223 return false;
226 } 224 }
227 /* force ring activities */
228 radeon_ring_force_activity(rdev, ring);
229 return radeon_ring_test_lockup(rdev, ring); 225 return radeon_ring_test_lockup(rdev, ring);
230} 226}
231 227
diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c
index e4cc9b314ce9..cbf7e3269f84 100644
--- a/drivers/gpu/drm/radeon/r600_dpm.c
+++ b/drivers/gpu/drm/radeon/r600_dpm.c
@@ -834,6 +834,26 @@ static int r600_parse_clk_voltage_dep_table(struct radeon_clock_voltage_dependen
834 return 0; 834 return 0;
835} 835}
836 836
837int r600_get_platform_caps(struct radeon_device *rdev)
838{
839 struct radeon_mode_info *mode_info = &rdev->mode_info;
840 union power_info *power_info;
841 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
842 u16 data_offset;
843 u8 frev, crev;
844
845 if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
846 &frev, &crev, &data_offset))
847 return -EINVAL;
848 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
849
850 rdev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
851 rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
852 rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
853
854 return 0;
855}
856
837/* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */ 857/* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */
838#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12 858#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12
839#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14 859#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14
@@ -1043,7 +1063,15 @@ int r600_parse_extended_power_table(struct radeon_device *rdev)
1043 (mode_info->atom_context->bios + data_offset + 1063 (mode_info->atom_context->bios + data_offset +
1044 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 + 1064 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
1045 1 + array->ucNumEntries * sizeof(VCEClockInfo)); 1065 1 + array->ucNumEntries * sizeof(VCEClockInfo));
1066 ATOM_PPLIB_VCE_State_Table *states =
1067 (ATOM_PPLIB_VCE_State_Table *)
1068 (mode_info->atom_context->bios + data_offset +
1069 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
1070 1 + (array->ucNumEntries * sizeof (VCEClockInfo)) +
1071 1 + (limits->numEntries * sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record)));
1046 ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *entry; 1072 ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *entry;
1073 ATOM_PPLIB_VCE_State_Record *state_entry;
1074 VCEClockInfo *vce_clk;
1047 u32 size = limits->numEntries * 1075 u32 size = limits->numEntries *
1048 sizeof(struct radeon_vce_clock_voltage_dependency_entry); 1076 sizeof(struct radeon_vce_clock_voltage_dependency_entry);
1049 rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries = 1077 rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries =
@@ -1055,8 +1083,9 @@ int r600_parse_extended_power_table(struct radeon_device *rdev)
1055 rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count = 1083 rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count =
1056 limits->numEntries; 1084 limits->numEntries;
1057 entry = &limits->entries[0]; 1085 entry = &limits->entries[0];
1086 state_entry = &states->entries[0];
1058 for (i = 0; i < limits->numEntries; i++) { 1087 for (i = 0; i < limits->numEntries; i++) {
1059 VCEClockInfo *vce_clk = (VCEClockInfo *) 1088 vce_clk = (VCEClockInfo *)
1060 ((u8 *)&array->entries[0] + 1089 ((u8 *)&array->entries[0] +
1061 (entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo))); 1090 (entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
1062 rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].evclk = 1091 rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].evclk =
@@ -1068,6 +1097,23 @@ int r600_parse_extended_power_table(struct radeon_device *rdev)
1068 entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *) 1097 entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *)
1069 ((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record)); 1098 ((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record));
1070 } 1099 }
1100 for (i = 0; i < states->numEntries; i++) {
1101 if (i >= RADEON_MAX_VCE_LEVELS)
1102 break;
1103 vce_clk = (VCEClockInfo *)
1104 ((u8 *)&array->entries[0] +
1105 (state_entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
1106 rdev->pm.dpm.vce_states[i].evclk =
1107 le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
1108 rdev->pm.dpm.vce_states[i].ecclk =
1109 le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
1110 rdev->pm.dpm.vce_states[i].clk_idx =
1111 state_entry->ucClockInfoIndex & 0x3f;
1112 rdev->pm.dpm.vce_states[i].pstate =
1113 (state_entry->ucClockInfoIndex & 0xc0) >> 6;
1114 state_entry = (ATOM_PPLIB_VCE_State_Record *)
1115 ((u8 *)state_entry + sizeof(ATOM_PPLIB_VCE_State_Record));
1116 }
1071 } 1117 }
1072 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3) && 1118 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3) &&
1073 ext_hdr->usUVDTableOffset) { 1119 ext_hdr->usUVDTableOffset) {
diff --git a/drivers/gpu/drm/radeon/r600_dpm.h b/drivers/gpu/drm/radeon/r600_dpm.h
index 07eab2b04e81..46b9d2a03018 100644
--- a/drivers/gpu/drm/radeon/r600_dpm.h
+++ b/drivers/gpu/drm/radeon/r600_dpm.h
@@ -215,6 +215,8 @@ void r600_stop_dpm(struct radeon_device *rdev);
215 215
216bool r600_is_internal_thermal_sensor(enum radeon_int_thermal_type sensor); 216bool r600_is_internal_thermal_sensor(enum radeon_int_thermal_type sensor);
217 217
218int r600_get_platform_caps(struct radeon_device *rdev);
219
218int r600_parse_extended_power_table(struct radeon_device *rdev); 220int r600_parse_extended_power_table(struct radeon_device *rdev);
219void r600_free_extended_power_table(struct radeon_device *rdev); 221void r600_free_extended_power_table(struct radeon_device *rdev);
220 222
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index e887d027b6d0..f21db7a0b34d 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -113,19 +113,16 @@ extern int radeon_hard_reset;
113#define RADEONFB_CONN_LIMIT 4 113#define RADEONFB_CONN_LIMIT 4
114#define RADEON_BIOS_NUM_SCRATCH 8 114#define RADEON_BIOS_NUM_SCRATCH 8
115 115
116/* max number of rings */
117#define RADEON_NUM_RINGS 6
118
119/* fence seq are set to this number when signaled */ 116/* fence seq are set to this number when signaled */
120#define RADEON_FENCE_SIGNALED_SEQ 0LL 117#define RADEON_FENCE_SIGNALED_SEQ 0LL
121 118
122/* internal ring indices */ 119/* internal ring indices */
123/* r1xx+ has gfx CP ring */ 120/* r1xx+ has gfx CP ring */
124#define RADEON_RING_TYPE_GFX_INDEX 0 121#define RADEON_RING_TYPE_GFX_INDEX 0
125 122
126/* cayman has 2 compute CP rings */ 123/* cayman has 2 compute CP rings */
127#define CAYMAN_RING_TYPE_CP1_INDEX 1 124#define CAYMAN_RING_TYPE_CP1_INDEX 1
128#define CAYMAN_RING_TYPE_CP2_INDEX 2 125#define CAYMAN_RING_TYPE_CP2_INDEX 2
129 126
130/* R600+ has an async dma ring */ 127/* R600+ has an async dma ring */
131#define R600_RING_TYPE_DMA_INDEX 3 128#define R600_RING_TYPE_DMA_INDEX 3
@@ -133,7 +130,17 @@ extern int radeon_hard_reset;
133#define CAYMAN_RING_TYPE_DMA1_INDEX 4 130#define CAYMAN_RING_TYPE_DMA1_INDEX 4
134 131
135/* R600+ */ 132/* R600+ */
136#define R600_RING_TYPE_UVD_INDEX 5 133#define R600_RING_TYPE_UVD_INDEX 5
134
135/* TN+ */
136#define TN_RING_TYPE_VCE1_INDEX 6
137#define TN_RING_TYPE_VCE2_INDEX 7
138
139/* max number of rings */
140#define RADEON_NUM_RINGS 8
141
142/* number of hw syncs before falling back on blocking */
143#define RADEON_NUM_SYNCS 4
137 144
138/* number of hw syncs before falling back on blocking */ 145/* number of hw syncs before falling back on blocking */
139#define RADEON_NUM_SYNCS 4 146#define RADEON_NUM_SYNCS 4
@@ -356,9 +363,8 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence **fence, i
356void radeon_fence_process(struct radeon_device *rdev, int ring); 363void radeon_fence_process(struct radeon_device *rdev, int ring);
357bool radeon_fence_signaled(struct radeon_fence *fence); 364bool radeon_fence_signaled(struct radeon_fence *fence);
358int radeon_fence_wait(struct radeon_fence *fence, bool interruptible); 365int radeon_fence_wait(struct radeon_fence *fence, bool interruptible);
359int radeon_fence_wait_locked(struct radeon_fence *fence); 366int radeon_fence_wait_next(struct radeon_device *rdev, int ring);
360int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring); 367int radeon_fence_wait_empty(struct radeon_device *rdev, int ring);
361int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring);
362int radeon_fence_wait_any(struct radeon_device *rdev, 368int radeon_fence_wait_any(struct radeon_device *rdev,
363 struct radeon_fence **fences, 369 struct radeon_fence **fences,
364 bool intr); 370 bool intr);
@@ -450,6 +456,7 @@ struct radeon_bo {
450 /* Protected by gem.mutex */ 456 /* Protected by gem.mutex */
451 struct list_head list; 457 struct list_head list;
452 /* Protected by tbo.reserved */ 458 /* Protected by tbo.reserved */
459 u32 initial_domain;
453 u32 placements[3]; 460 u32 placements[3];
454 struct ttm_placement placement; 461 struct ttm_placement placement;
455 struct ttm_buffer_object tbo; 462 struct ttm_buffer_object tbo;
@@ -472,16 +479,6 @@ struct radeon_bo {
472}; 479};
473#define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base) 480#define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base)
474 481
475struct radeon_bo_list {
476 struct ttm_validate_buffer tv;
477 struct radeon_bo *bo;
478 uint64_t gpu_offset;
479 bool written;
480 unsigned domain;
481 unsigned alt_domain;
482 u32 tiling_flags;
483};
484
485int radeon_gem_debugfs_init(struct radeon_device *rdev); 482int radeon_gem_debugfs_init(struct radeon_device *rdev);
486 483
487/* sub-allocation manager, it has to be protected by another lock. 484/* sub-allocation manager, it has to be protected by another lock.
@@ -789,7 +786,6 @@ struct radeon_ib {
789struct radeon_ring { 786struct radeon_ring {
790 struct radeon_bo *ring_obj; 787 struct radeon_bo *ring_obj;
791 volatile uint32_t *ring; 788 volatile uint32_t *ring;
792 unsigned rptr;
793 unsigned rptr_offs; 789 unsigned rptr_offs;
794 unsigned rptr_save_reg; 790 unsigned rptr_save_reg;
795 u64 next_rptr_gpu_addr; 791 u64 next_rptr_gpu_addr;
@@ -799,8 +795,8 @@ struct radeon_ring {
799 unsigned ring_size; 795 unsigned ring_size;
800 unsigned ring_free_dw; 796 unsigned ring_free_dw;
801 int count_dw; 797 int count_dw;
802 unsigned long last_activity; 798 atomic_t last_rptr;
803 unsigned last_rptr; 799 atomic64_t last_activity;
804 uint64_t gpu_addr; 800 uint64_t gpu_addr;
805 uint32_t align_mask; 801 uint32_t align_mask;
806 uint32_t ptr_mask; 802 uint32_t ptr_mask;
@@ -852,17 +848,22 @@ struct radeon_mec {
852#define R600_PTE_READABLE (1 << 5) 848#define R600_PTE_READABLE (1 << 5)
853#define R600_PTE_WRITEABLE (1 << 6) 849#define R600_PTE_WRITEABLE (1 << 6)
854 850
851struct radeon_vm_pt {
852 struct radeon_bo *bo;
853 uint64_t addr;
854};
855
855struct radeon_vm { 856struct radeon_vm {
856 struct list_head list;
857 struct list_head va; 857 struct list_head va;
858 unsigned id; 858 unsigned id;
859 859
860 /* contains the page directory */ 860 /* contains the page directory */
861 struct radeon_sa_bo *page_directory; 861 struct radeon_bo *page_directory;
862 uint64_t pd_gpu_addr; 862 uint64_t pd_gpu_addr;
863 unsigned max_pde_used;
863 864
864 /* array of page tables, one for each page directory entry */ 865 /* array of page tables, one for each page directory entry */
865 struct radeon_sa_bo **page_tables; 866 struct radeon_vm_pt *page_tables;
866 867
867 struct mutex mutex; 868 struct mutex mutex;
868 /* last fence for cs using this vm */ 869 /* last fence for cs using this vm */
@@ -874,10 +875,7 @@ struct radeon_vm {
874}; 875};
875 876
876struct radeon_vm_manager { 877struct radeon_vm_manager {
877 struct mutex lock;
878 struct list_head lru_vm;
879 struct radeon_fence *active[RADEON_NUM_VM]; 878 struct radeon_fence *active[RADEON_NUM_VM];
880 struct radeon_sa_manager sa_manager;
881 uint32_t max_pfn; 879 uint32_t max_pfn;
882 /* number of VMIDs */ 880 /* number of VMIDs */
883 unsigned nvm; 881 unsigned nvm;
@@ -953,8 +951,8 @@ void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *c
953void radeon_ring_undo(struct radeon_ring *ring); 951void radeon_ring_undo(struct radeon_ring *ring);
954void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *cp); 952void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *cp);
955int radeon_ring_test(struct radeon_device *rdev, struct radeon_ring *cp); 953int radeon_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
956void radeon_ring_force_activity(struct radeon_device *rdev, struct radeon_ring *ring); 954void radeon_ring_lockup_update(struct radeon_device *rdev,
957void radeon_ring_lockup_update(struct radeon_ring *ring); 955 struct radeon_ring *ring);
958bool radeon_ring_test_lockup(struct radeon_device *rdev, struct radeon_ring *ring); 956bool radeon_ring_test_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
959unsigned radeon_ring_backup(struct radeon_device *rdev, struct radeon_ring *ring, 957unsigned radeon_ring_backup(struct radeon_device *rdev, struct radeon_ring *ring,
960 uint32_t **data); 958 uint32_t **data);
@@ -980,9 +978,12 @@ void cayman_dma_fini(struct radeon_device *rdev);
980struct radeon_cs_reloc { 978struct radeon_cs_reloc {
981 struct drm_gem_object *gobj; 979 struct drm_gem_object *gobj;
982 struct radeon_bo *robj; 980 struct radeon_bo *robj;
983 struct radeon_bo_list lobj; 981 struct ttm_validate_buffer tv;
982 uint64_t gpu_offset;
983 unsigned domain;
984 unsigned alt_domain;
985 uint32_t tiling_flags;
984 uint32_t handle; 986 uint32_t handle;
985 uint32_t flags;
986}; 987};
987 988
988struct radeon_cs_chunk { 989struct radeon_cs_chunk {
@@ -1006,6 +1007,7 @@ struct radeon_cs_parser {
1006 unsigned nrelocs; 1007 unsigned nrelocs;
1007 struct radeon_cs_reloc *relocs; 1008 struct radeon_cs_reloc *relocs;
1008 struct radeon_cs_reloc **relocs_ptr; 1009 struct radeon_cs_reloc **relocs_ptr;
1010 struct radeon_cs_reloc *vm_bos;
1009 struct list_head validated; 1011 struct list_head validated;
1010 unsigned dma_reloc_idx; 1012 unsigned dma_reloc_idx;
1011 /* indices of various chunks */ 1013 /* indices of various chunks */
@@ -1255,6 +1257,17 @@ enum radeon_dpm_event_src {
1255 RADEON_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL = 4 1257 RADEON_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL = 4
1256}; 1258};
1257 1259
1260#define RADEON_MAX_VCE_LEVELS 6
1261
1262enum radeon_vce_level {
1263 RADEON_VCE_LEVEL_AC_ALL = 0, /* AC, All cases */
1264 RADEON_VCE_LEVEL_DC_EE = 1, /* DC, entropy encoding */
1265 RADEON_VCE_LEVEL_DC_LL_LOW = 2, /* DC, low latency queue, res <= 720 */
1266 RADEON_VCE_LEVEL_DC_LL_HIGH = 3, /* DC, low latency queue, 1080 >= res > 720 */
1267 RADEON_VCE_LEVEL_DC_GP_LOW = 4, /* DC, general purpose queue, res <= 720 */
1268 RADEON_VCE_LEVEL_DC_GP_HIGH = 5, /* DC, general purpose queue, 1080 >= res > 720 */
1269};
1270
1258struct radeon_ps { 1271struct radeon_ps {
1259 u32 caps; /* vbios flags */ 1272 u32 caps; /* vbios flags */
1260 u32 class; /* vbios flags */ 1273 u32 class; /* vbios flags */
@@ -1265,6 +1278,8 @@ struct radeon_ps {
1265 /* VCE clocks */ 1278 /* VCE clocks */
1266 u32 evclk; 1279 u32 evclk;
1267 u32 ecclk; 1280 u32 ecclk;
1281 bool vce_active;
1282 enum radeon_vce_level vce_level;
1268 /* asic priv */ 1283 /* asic priv */
1269 void *ps_priv; 1284 void *ps_priv;
1270}; 1285};
@@ -1439,6 +1454,17 @@ enum radeon_dpm_forced_level {
1439 RADEON_DPM_FORCED_LEVEL_HIGH = 2, 1454 RADEON_DPM_FORCED_LEVEL_HIGH = 2,
1440}; 1455};
1441 1456
1457struct radeon_vce_state {
1458 /* vce clocks */
1459 u32 evclk;
1460 u32 ecclk;
1461 /* gpu clocks */
1462 u32 sclk;
1463 u32 mclk;
1464 u8 clk_idx;
1465 u8 pstate;
1466};
1467
1442struct radeon_dpm { 1468struct radeon_dpm {
1443 struct radeon_ps *ps; 1469 struct radeon_ps *ps;
1444 /* number of valid power states */ 1470 /* number of valid power states */
@@ -1451,6 +1477,9 @@ struct radeon_dpm {
1451 struct radeon_ps *boot_ps; 1477 struct radeon_ps *boot_ps;
1452 /* default uvd power state */ 1478 /* default uvd power state */
1453 struct radeon_ps *uvd_ps; 1479 struct radeon_ps *uvd_ps;
1480 /* vce requirements */
1481 struct radeon_vce_state vce_states[RADEON_MAX_VCE_LEVELS];
1482 enum radeon_vce_level vce_level;
1454 enum radeon_pm_state_type state; 1483 enum radeon_pm_state_type state;
1455 enum radeon_pm_state_type user_state; 1484 enum radeon_pm_state_type user_state;
1456 u32 platform_caps; 1485 u32 platform_caps;
@@ -1476,6 +1505,7 @@ struct radeon_dpm {
1476 /* special states active */ 1505 /* special states active */
1477 bool thermal_active; 1506 bool thermal_active;
1478 bool uvd_active; 1507 bool uvd_active;
1508 bool vce_active;
1479 /* thermal handling */ 1509 /* thermal handling */
1480 struct radeon_dpm_thermal thermal; 1510 struct radeon_dpm_thermal thermal;
1481 /* forced levels */ 1511 /* forced levels */
@@ -1486,6 +1516,7 @@ struct radeon_dpm {
1486}; 1516};
1487 1517
1488void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable); 1518void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable);
1519void radeon_dpm_enable_vce(struct radeon_device *rdev, bool enable);
1489 1520
1490struct radeon_pm { 1521struct radeon_pm {
1491 struct mutex mutex; 1522 struct mutex mutex;
@@ -1591,6 +1622,45 @@ int radeon_uvd_calc_upll_dividers(struct radeon_device *rdev,
1591int radeon_uvd_send_upll_ctlreq(struct radeon_device *rdev, 1622int radeon_uvd_send_upll_ctlreq(struct radeon_device *rdev,
1592 unsigned cg_upll_func_cntl); 1623 unsigned cg_upll_func_cntl);
1593 1624
1625/*
1626 * VCE
1627 */
1628#define RADEON_MAX_VCE_HANDLES 16
1629#define RADEON_VCE_STACK_SIZE (1024*1024)
1630#define RADEON_VCE_HEAP_SIZE (4*1024*1024)
1631
1632struct radeon_vce {
1633 struct radeon_bo *vcpu_bo;
1634 uint64_t gpu_addr;
1635 unsigned fw_version;
1636 unsigned fb_version;
1637 atomic_t handles[RADEON_MAX_VCE_HANDLES];
1638 struct drm_file *filp[RADEON_MAX_VCE_HANDLES];
1639 struct delayed_work idle_work;
1640};
1641
1642int radeon_vce_init(struct radeon_device *rdev);
1643void radeon_vce_fini(struct radeon_device *rdev);
1644int radeon_vce_suspend(struct radeon_device *rdev);
1645int radeon_vce_resume(struct radeon_device *rdev);
1646int radeon_vce_get_create_msg(struct radeon_device *rdev, int ring,
1647 uint32_t handle, struct radeon_fence **fence);
1648int radeon_vce_get_destroy_msg(struct radeon_device *rdev, int ring,
1649 uint32_t handle, struct radeon_fence **fence);
1650void radeon_vce_free_handles(struct radeon_device *rdev, struct drm_file *filp);
1651void radeon_vce_note_usage(struct radeon_device *rdev);
1652int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi);
1653int radeon_vce_cs_parse(struct radeon_cs_parser *p);
1654bool radeon_vce_semaphore_emit(struct radeon_device *rdev,
1655 struct radeon_ring *ring,
1656 struct radeon_semaphore *semaphore,
1657 bool emit_wait);
1658void radeon_vce_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
1659void radeon_vce_fence_emit(struct radeon_device *rdev,
1660 struct radeon_fence *fence);
1661int radeon_vce_ring_test(struct radeon_device *rdev, struct radeon_ring *ring);
1662int radeon_vce_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
1663
1594struct r600_audio_pin { 1664struct r600_audio_pin {
1595 int channels; 1665 int channels;
1596 int rate; 1666 int rate;
@@ -1780,6 +1850,7 @@ struct radeon_asic {
1780 void (*set_pcie_lanes)(struct radeon_device *rdev, int lanes); 1850 void (*set_pcie_lanes)(struct radeon_device *rdev, int lanes);
1781 void (*set_clock_gating)(struct radeon_device *rdev, int enable); 1851 void (*set_clock_gating)(struct radeon_device *rdev, int enable);
1782 int (*set_uvd_clocks)(struct radeon_device *rdev, u32 vclk, u32 dclk); 1852 int (*set_uvd_clocks)(struct radeon_device *rdev, u32 vclk, u32 dclk);
1853 int (*set_vce_clocks)(struct radeon_device *rdev, u32 evclk, u32 ecclk);
1783 int (*get_temperature)(struct radeon_device *rdev); 1854 int (*get_temperature)(struct radeon_device *rdev);
1784 } pm; 1855 } pm;
1785 /* dynamic power management */ 1856 /* dynamic power management */
@@ -2041,6 +2112,8 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
2041 struct drm_file *filp); 2112 struct drm_file *filp);
2042int radeon_gem_va_ioctl(struct drm_device *dev, void *data, 2113int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
2043 struct drm_file *filp); 2114 struct drm_file *filp);
2115int radeon_gem_op_ioctl(struct drm_device *dev, void *data,
2116 struct drm_file *filp);
2044int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); 2117int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
2045int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data, 2118int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
2046 struct drm_file *filp); 2119 struct drm_file *filp);
@@ -2186,6 +2259,7 @@ struct radeon_device {
2186 struct radeon_gem gem; 2259 struct radeon_gem gem;
2187 struct radeon_pm pm; 2260 struct radeon_pm pm;
2188 struct radeon_uvd uvd; 2261 struct radeon_uvd uvd;
2262 struct radeon_vce vce;
2189 uint32_t bios_scratch[RADEON_BIOS_NUM_SCRATCH]; 2263 uint32_t bios_scratch[RADEON_BIOS_NUM_SCRATCH];
2190 struct radeon_wb wb; 2264 struct radeon_wb wb;
2191 struct radeon_dummy_page dummy_page; 2265 struct radeon_dummy_page dummy_page;
@@ -2205,6 +2279,7 @@ struct radeon_device {
2205 const struct firmware *sdma_fw; /* CIK SDMA firmware */ 2279 const struct firmware *sdma_fw; /* CIK SDMA firmware */
2206 const struct firmware *smc_fw; /* SMC firmware */ 2280 const struct firmware *smc_fw; /* SMC firmware */
2207 const struct firmware *uvd_fw; /* UVD firmware */ 2281 const struct firmware *uvd_fw; /* UVD firmware */
2282 const struct firmware *vce_fw; /* VCE firmware */
2208 struct r600_vram_scratch vram_scratch; 2283 struct r600_vram_scratch vram_scratch;
2209 int msi_enabled; /* msi enabled */ 2284 int msi_enabled; /* msi enabled */
2210 struct r600_ih ih; /* r6/700 interrupt ring */ 2285 struct r600_ih ih; /* r6/700 interrupt ring */
@@ -2229,6 +2304,10 @@ struct radeon_device {
2229 /* virtual memory */ 2304 /* virtual memory */
2230 struct radeon_vm_manager vm_manager; 2305 struct radeon_vm_manager vm_manager;
2231 struct mutex gpu_clock_mutex; 2306 struct mutex gpu_clock_mutex;
2307 /* memory stats */
2308 atomic64_t vram_usage;
2309 atomic64_t gtt_usage;
2310 atomic64_t num_bytes_moved;
2232 /* ACPI interface */ 2311 /* ACPI interface */
2233 struct radeon_atif atif; 2312 struct radeon_atif atif;
2234 struct radeon_atcs atcs; 2313 struct radeon_atcs atcs;
@@ -2639,6 +2718,7 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
2639#define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->pm.set_pcie_lanes((rdev), (l)) 2718#define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->pm.set_pcie_lanes((rdev), (l))
2640#define radeon_set_clock_gating(rdev, e) (rdev)->asic->pm.set_clock_gating((rdev), (e)) 2719#define radeon_set_clock_gating(rdev, e) (rdev)->asic->pm.set_clock_gating((rdev), (e))
2641#define radeon_set_uvd_clocks(rdev, v, d) (rdev)->asic->pm.set_uvd_clocks((rdev), (v), (d)) 2720#define radeon_set_uvd_clocks(rdev, v, d) (rdev)->asic->pm.set_uvd_clocks((rdev), (v), (d))
2721#define radeon_set_vce_clocks(rdev, ev, ec) (rdev)->asic->pm.set_vce_clocks((rdev), (ev), (ec))
2642#define radeon_get_temperature(rdev) (rdev)->asic->pm.get_temperature((rdev)) 2722#define radeon_get_temperature(rdev) (rdev)->asic->pm.get_temperature((rdev))
2643#define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->surface.set_reg((rdev), (r), (f), (p), (o), (s))) 2723#define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->surface.set_reg((rdev), (r), (f), (p), (o), (s)))
2644#define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->surface.clear_reg((rdev), (r))) 2724#define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->surface.clear_reg((rdev), (r)))
@@ -2715,16 +2795,22 @@ extern void radeon_program_register_sequence(struct radeon_device *rdev,
2715 */ 2795 */
2716int radeon_vm_manager_init(struct radeon_device *rdev); 2796int radeon_vm_manager_init(struct radeon_device *rdev);
2717void radeon_vm_manager_fini(struct radeon_device *rdev); 2797void radeon_vm_manager_fini(struct radeon_device *rdev);
2718void radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm); 2798int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm);
2719void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm); 2799void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm);
2720int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm); 2800struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev,
2721void radeon_vm_add_to_lru(struct radeon_device *rdev, struct radeon_vm *vm); 2801 struct radeon_vm *vm,
2802 struct list_head *head);
2722struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev, 2803struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
2723 struct radeon_vm *vm, int ring); 2804 struct radeon_vm *vm, int ring);
2805void radeon_vm_flush(struct radeon_device *rdev,
2806 struct radeon_vm *vm,
2807 int ring);
2724void radeon_vm_fence(struct radeon_device *rdev, 2808void radeon_vm_fence(struct radeon_device *rdev,
2725 struct radeon_vm *vm, 2809 struct radeon_vm *vm,
2726 struct radeon_fence *fence); 2810 struct radeon_fence *fence);
2727uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr); 2811uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr);
2812int radeon_vm_update_page_directory(struct radeon_device *rdev,
2813 struct radeon_vm *vm);
2728int radeon_vm_bo_update(struct radeon_device *rdev, 2814int radeon_vm_bo_update(struct radeon_device *rdev,
2729 struct radeon_vm *vm, 2815 struct radeon_vm *vm,
2730 struct radeon_bo *bo, 2816 struct radeon_bo *bo,
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index dda02bfc10a4..b8a24a75d4ff 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -1987,6 +1987,19 @@ static struct radeon_asic_ring ci_dma_ring = {
1987 .set_wptr = &cik_sdma_set_wptr, 1987 .set_wptr = &cik_sdma_set_wptr,
1988}; 1988};
1989 1989
1990static struct radeon_asic_ring ci_vce_ring = {
1991 .ib_execute = &radeon_vce_ib_execute,
1992 .emit_fence = &radeon_vce_fence_emit,
1993 .emit_semaphore = &radeon_vce_semaphore_emit,
1994 .cs_parse = &radeon_vce_cs_parse,
1995 .ring_test = &radeon_vce_ring_test,
1996 .ib_test = &radeon_vce_ib_test,
1997 .is_lockup = &radeon_ring_test_lockup,
1998 .get_rptr = &vce_v1_0_get_rptr,
1999 .get_wptr = &vce_v1_0_get_wptr,
2000 .set_wptr = &vce_v1_0_set_wptr,
2001};
2002
1990static struct radeon_asic ci_asic = { 2003static struct radeon_asic ci_asic = {
1991 .init = &cik_init, 2004 .init = &cik_init,
1992 .fini = &cik_fini, 2005 .fini = &cik_fini,
@@ -2015,6 +2028,8 @@ static struct radeon_asic ci_asic = {
2015 [R600_RING_TYPE_DMA_INDEX] = &ci_dma_ring, 2028 [R600_RING_TYPE_DMA_INDEX] = &ci_dma_ring,
2016 [CAYMAN_RING_TYPE_DMA1_INDEX] = &ci_dma_ring, 2029 [CAYMAN_RING_TYPE_DMA1_INDEX] = &ci_dma_ring,
2017 [R600_RING_TYPE_UVD_INDEX] = &cayman_uvd_ring, 2030 [R600_RING_TYPE_UVD_INDEX] = &cayman_uvd_ring,
2031 [TN_RING_TYPE_VCE1_INDEX] = &ci_vce_ring,
2032 [TN_RING_TYPE_VCE2_INDEX] = &ci_vce_ring,
2018 }, 2033 },
2019 .irq = { 2034 .irq = {
2020 .set = &cik_irq_set, 2035 .set = &cik_irq_set,
@@ -2061,6 +2076,7 @@ static struct radeon_asic ci_asic = {
2061 .set_pcie_lanes = NULL, 2076 .set_pcie_lanes = NULL,
2062 .set_clock_gating = NULL, 2077 .set_clock_gating = NULL,
2063 .set_uvd_clocks = &cik_set_uvd_clocks, 2078 .set_uvd_clocks = &cik_set_uvd_clocks,
2079 .set_vce_clocks = &cik_set_vce_clocks,
2064 .get_temperature = &ci_get_temp, 2080 .get_temperature = &ci_get_temp,
2065 }, 2081 },
2066 .dpm = { 2082 .dpm = {
@@ -2117,6 +2133,8 @@ static struct radeon_asic kv_asic = {
2117 [R600_RING_TYPE_DMA_INDEX] = &ci_dma_ring, 2133 [R600_RING_TYPE_DMA_INDEX] = &ci_dma_ring,
2118 [CAYMAN_RING_TYPE_DMA1_INDEX] = &ci_dma_ring, 2134 [CAYMAN_RING_TYPE_DMA1_INDEX] = &ci_dma_ring,
2119 [R600_RING_TYPE_UVD_INDEX] = &cayman_uvd_ring, 2135 [R600_RING_TYPE_UVD_INDEX] = &cayman_uvd_ring,
2136 [TN_RING_TYPE_VCE1_INDEX] = &ci_vce_ring,
2137 [TN_RING_TYPE_VCE2_INDEX] = &ci_vce_ring,
2120 }, 2138 },
2121 .irq = { 2139 .irq = {
2122 .set = &cik_irq_set, 2140 .set = &cik_irq_set,
@@ -2163,6 +2181,7 @@ static struct radeon_asic kv_asic = {
2163 .set_pcie_lanes = NULL, 2181 .set_pcie_lanes = NULL,
2164 .set_clock_gating = NULL, 2182 .set_clock_gating = NULL,
2165 .set_uvd_clocks = &cik_set_uvd_clocks, 2183 .set_uvd_clocks = &cik_set_uvd_clocks,
2184 .set_vce_clocks = &cik_set_vce_clocks,
2166 .get_temperature = &kv_get_temp, 2185 .get_temperature = &kv_get_temp,
2167 }, 2186 },
2168 .dpm = { 2187 .dpm = {
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index ae637cfda783..3d55a3a39e82 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -717,6 +717,7 @@ u32 cik_get_xclk(struct radeon_device *rdev);
717uint32_t cik_pciep_rreg(struct radeon_device *rdev, uint32_t reg); 717uint32_t cik_pciep_rreg(struct radeon_device *rdev, uint32_t reg);
718void cik_pciep_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); 718void cik_pciep_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
719int cik_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk); 719int cik_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
720int cik_set_vce_clocks(struct radeon_device *rdev, u32 evclk, u32 ecclk);
720void cik_sdma_fence_ring_emit(struct radeon_device *rdev, 721void cik_sdma_fence_ring_emit(struct radeon_device *rdev,
721 struct radeon_fence *fence); 722 struct radeon_fence *fence);
722bool cik_sdma_semaphore_ring_emit(struct radeon_device *rdev, 723bool cik_sdma_semaphore_ring_emit(struct radeon_device *rdev,
@@ -863,4 +864,17 @@ bool uvd_v3_1_semaphore_emit(struct radeon_device *rdev,
863/* uvd v4.2 */ 864/* uvd v4.2 */
864int uvd_v4_2_resume(struct radeon_device *rdev); 865int uvd_v4_2_resume(struct radeon_device *rdev);
865 866
867/* vce v1.0 */
868uint32_t vce_v1_0_get_rptr(struct radeon_device *rdev,
869 struct radeon_ring *ring);
870uint32_t vce_v1_0_get_wptr(struct radeon_device *rdev,
871 struct radeon_ring *ring);
872void vce_v1_0_set_wptr(struct radeon_device *rdev,
873 struct radeon_ring *ring);
874int vce_v1_0_init(struct radeon_device *rdev);
875int vce_v1_0_start(struct radeon_device *rdev);
876
877/* vce v2.0 */
878int vce_v2_0_resume(struct radeon_device *rdev);
879
866#endif 880#endif
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 82d4f865546e..c566b486ca08 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -89,7 +89,7 @@ static void radeon_property_change_mode(struct drm_encoder *encoder)
89 89
90 if (crtc && crtc->enabled) { 90 if (crtc && crtc->enabled) {
91 drm_crtc_helper_set_mode(crtc, &crtc->mode, 91 drm_crtc_helper_set_mode(crtc, &crtc->mode,
92 crtc->x, crtc->y, crtc->fb); 92 crtc->x, crtc->y, crtc->primary->fb);
93 } 93 }
94} 94}
95 95
@@ -1595,6 +1595,7 @@ radeon_add_atom_connector(struct drm_device *dev,
1595 uint32_t subpixel_order = SubPixelNone; 1595 uint32_t subpixel_order = SubPixelNone;
1596 bool shared_ddc = false; 1596 bool shared_ddc = false;
1597 bool is_dp_bridge = false; 1597 bool is_dp_bridge = false;
1598 bool has_aux = false;
1598 1599
1599 if (connector_type == DRM_MODE_CONNECTOR_Unknown) 1600 if (connector_type == DRM_MODE_CONNECTOR_Unknown)
1600 return; 1601 return;
@@ -1672,7 +1673,9 @@ radeon_add_atom_connector(struct drm_device *dev,
1672 radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "eDP-auxch"); 1673 radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "eDP-auxch");
1673 else 1674 else
1674 radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "DP-auxch"); 1675 radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "DP-auxch");
1675 if (!radeon_dig_connector->dp_i2c_bus) 1676 if (radeon_dig_connector->dp_i2c_bus)
1677 has_aux = true;
1678 else
1676 DRM_ERROR("DP: Failed to assign dp ddc bus! Check dmesg for i2c errors.\n"); 1679 DRM_ERROR("DP: Failed to assign dp ddc bus! Check dmesg for i2c errors.\n");
1677 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); 1680 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
1678 if (!radeon_connector->ddc_bus) 1681 if (!radeon_connector->ddc_bus)
@@ -1895,7 +1898,9 @@ radeon_add_atom_connector(struct drm_device *dev,
1895 if (!radeon_dig_connector->dp_i2c_bus) 1898 if (!radeon_dig_connector->dp_i2c_bus)
1896 DRM_ERROR("DP: Failed to assign dp ddc bus! Check dmesg for i2c errors.\n"); 1899 DRM_ERROR("DP: Failed to assign dp ddc bus! Check dmesg for i2c errors.\n");
1897 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); 1900 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
1898 if (!radeon_connector->ddc_bus) 1901 if (radeon_connector->ddc_bus)
1902 has_aux = true;
1903 else
1899 DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); 1904 DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
1900 } 1905 }
1901 subpixel_order = SubPixelHorizontalRGB; 1906 subpixel_order = SubPixelHorizontalRGB;
@@ -1939,7 +1944,9 @@ radeon_add_atom_connector(struct drm_device *dev,
1939 if (i2c_bus->valid) { 1944 if (i2c_bus->valid) {
1940 /* add DP i2c bus */ 1945 /* add DP i2c bus */
1941 radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "eDP-auxch"); 1946 radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "eDP-auxch");
1942 if (!radeon_dig_connector->dp_i2c_bus) 1947 if (radeon_dig_connector->dp_i2c_bus)
1948 has_aux = true;
1949 else
1943 DRM_ERROR("DP: Failed to assign dp ddc bus! Check dmesg for i2c errors.\n"); 1950 DRM_ERROR("DP: Failed to assign dp ddc bus! Check dmesg for i2c errors.\n");
1944 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); 1951 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
1945 if (!radeon_connector->ddc_bus) 1952 if (!radeon_connector->ddc_bus)
@@ -2000,6 +2007,10 @@ radeon_add_atom_connector(struct drm_device *dev,
2000 2007
2001 connector->display_info.subpixel_order = subpixel_order; 2008 connector->display_info.subpixel_order = subpixel_order;
2002 drm_sysfs_connector_add(connector); 2009 drm_sysfs_connector_add(connector);
2010
2011 if (has_aux)
2012 radeon_dp_aux_init(radeon_connector);
2013
2003 return; 2014 return;
2004 2015
2005failed: 2016failed:
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index dfb5a1db87d4..2b6e0ebcc13a 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -24,16 +24,59 @@
24 * Authors: 24 * Authors:
25 * Jerome Glisse <glisse@freedesktop.org> 25 * Jerome Glisse <glisse@freedesktop.org>
26 */ 26 */
27#include <linux/list_sort.h>
27#include <drm/drmP.h> 28#include <drm/drmP.h>
28#include <drm/radeon_drm.h> 29#include <drm/radeon_drm.h>
29#include "radeon_reg.h" 30#include "radeon_reg.h"
30#include "radeon.h" 31#include "radeon.h"
31#include "radeon_trace.h" 32#include "radeon_trace.h"
32 33
34#define RADEON_CS_MAX_PRIORITY 32u
35#define RADEON_CS_NUM_BUCKETS (RADEON_CS_MAX_PRIORITY + 1)
36
37/* This is based on the bucket sort with O(n) time complexity.
38 * An item with priority "i" is added to bucket[i]. The lists are then
39 * concatenated in descending order.
40 */
41struct radeon_cs_buckets {
42 struct list_head bucket[RADEON_CS_NUM_BUCKETS];
43};
44
45static void radeon_cs_buckets_init(struct radeon_cs_buckets *b)
46{
47 unsigned i;
48
49 for (i = 0; i < RADEON_CS_NUM_BUCKETS; i++)
50 INIT_LIST_HEAD(&b->bucket[i]);
51}
52
53static void radeon_cs_buckets_add(struct radeon_cs_buckets *b,
54 struct list_head *item, unsigned priority)
55{
56 /* Since buffers which appear sooner in the relocation list are
57 * likely to be used more often than buffers which appear later
58 * in the list, the sort mustn't change the ordering of buffers
59 * with the same priority, i.e. it must be stable.
60 */
61 list_add_tail(item, &b->bucket[min(priority, RADEON_CS_MAX_PRIORITY)]);
62}
63
64static void radeon_cs_buckets_get_list(struct radeon_cs_buckets *b,
65 struct list_head *out_list)
66{
67 unsigned i;
68
69 /* Connect the sorted buckets in the output list. */
70 for (i = 0; i < RADEON_CS_NUM_BUCKETS; i++) {
71 list_splice(&b->bucket[i], out_list);
72 }
73}
74
33static int radeon_cs_parser_relocs(struct radeon_cs_parser *p) 75static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
34{ 76{
35 struct drm_device *ddev = p->rdev->ddev; 77 struct drm_device *ddev = p->rdev->ddev;
36 struct radeon_cs_chunk *chunk; 78 struct radeon_cs_chunk *chunk;
79 struct radeon_cs_buckets buckets;
37 unsigned i, j; 80 unsigned i, j;
38 bool duplicate; 81 bool duplicate;
39 82
@@ -52,8 +95,12 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
52 if (p->relocs == NULL) { 95 if (p->relocs == NULL) {
53 return -ENOMEM; 96 return -ENOMEM;
54 } 97 }
98
99 radeon_cs_buckets_init(&buckets);
100
55 for (i = 0; i < p->nrelocs; i++) { 101 for (i = 0; i < p->nrelocs; i++) {
56 struct drm_radeon_cs_reloc *r; 102 struct drm_radeon_cs_reloc *r;
103 unsigned priority;
57 104
58 duplicate = false; 105 duplicate = false;
59 r = (struct drm_radeon_cs_reloc *)&chunk->kdata[i*4]; 106 r = (struct drm_radeon_cs_reloc *)&chunk->kdata[i*4];
@@ -78,8 +125,14 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
78 } 125 }
79 p->relocs_ptr[i] = &p->relocs[i]; 126 p->relocs_ptr[i] = &p->relocs[i];
80 p->relocs[i].robj = gem_to_radeon_bo(p->relocs[i].gobj); 127 p->relocs[i].robj = gem_to_radeon_bo(p->relocs[i].gobj);
81 p->relocs[i].lobj.bo = p->relocs[i].robj; 128
82 p->relocs[i].lobj.written = !!r->write_domain; 129 /* The userspace buffer priorities are from 0 to 15. A higher
130 * number means the buffer is more important.
131 * Also, the buffers used for write have a higher priority than
132 * the buffers used for read only, which doubles the range
133 * to 0 to 31. 32 is reserved for the kernel driver.
134 */
135 priority = (r->flags & 0xf) * 2 + !!r->write_domain;
83 136
84 /* the first reloc of an UVD job is the msg and that must be in 137 /* the first reloc of an UVD job is the msg and that must be in
85 VRAM, also but everything into VRAM on AGP cards to avoid 138 VRAM, also but everything into VRAM on AGP cards to avoid
@@ -87,29 +140,38 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
87 if (p->ring == R600_RING_TYPE_UVD_INDEX && 140 if (p->ring == R600_RING_TYPE_UVD_INDEX &&
88 (i == 0 || drm_pci_device_is_agp(p->rdev->ddev))) { 141 (i == 0 || drm_pci_device_is_agp(p->rdev->ddev))) {
89 /* TODO: is this still needed for NI+ ? */ 142 /* TODO: is this still needed for NI+ ? */
90 p->relocs[i].lobj.domain = 143 p->relocs[i].domain =
91 RADEON_GEM_DOMAIN_VRAM; 144 RADEON_GEM_DOMAIN_VRAM;
92 145
93 p->relocs[i].lobj.alt_domain = 146 p->relocs[i].alt_domain =
94 RADEON_GEM_DOMAIN_VRAM; 147 RADEON_GEM_DOMAIN_VRAM;
95 148
149 /* prioritize this over any other relocation */
150 priority = RADEON_CS_MAX_PRIORITY;
96 } else { 151 } else {
97 uint32_t domain = r->write_domain ? 152 uint32_t domain = r->write_domain ?
98 r->write_domain : r->read_domains; 153 r->write_domain : r->read_domains;
99 154
100 p->relocs[i].lobj.domain = domain; 155 p->relocs[i].domain = domain;
101 if (domain == RADEON_GEM_DOMAIN_VRAM) 156 if (domain == RADEON_GEM_DOMAIN_VRAM)
102 domain |= RADEON_GEM_DOMAIN_GTT; 157 domain |= RADEON_GEM_DOMAIN_GTT;
103 p->relocs[i].lobj.alt_domain = domain; 158 p->relocs[i].alt_domain = domain;
104 } 159 }
105 160
106 p->relocs[i].lobj.tv.bo = &p->relocs[i].robj->tbo; 161 p->relocs[i].tv.bo = &p->relocs[i].robj->tbo;
107 p->relocs[i].handle = r->handle; 162 p->relocs[i].handle = r->handle;
108 163
109 radeon_bo_list_add_object(&p->relocs[i].lobj, 164 radeon_cs_buckets_add(&buckets, &p->relocs[i].tv.head,
110 &p->validated); 165 priority);
111 } 166 }
112 return radeon_bo_list_validate(&p->ticket, &p->validated, p->ring); 167
168 radeon_cs_buckets_get_list(&buckets, &p->validated);
169
170 if (p->cs_flags & RADEON_CS_USE_VM)
171 p->vm_bos = radeon_vm_get_bos(p->rdev, p->ib.vm,
172 &p->validated);
173
174 return radeon_bo_list_validate(p->rdev, &p->ticket, &p->validated, p->ring);
113} 175}
114 176
115static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority) 177static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority)
@@ -147,6 +209,10 @@ static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority
147 case RADEON_CS_RING_UVD: 209 case RADEON_CS_RING_UVD:
148 p->ring = R600_RING_TYPE_UVD_INDEX; 210 p->ring = R600_RING_TYPE_UVD_INDEX;
149 break; 211 break;
212 case RADEON_CS_RING_VCE:
213 /* TODO: only use the low priority ring for now */
214 p->ring = TN_RING_TYPE_VCE1_INDEX;
215 break;
150 } 216 }
151 return 0; 217 return 0;
152} 218}
@@ -286,6 +352,16 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
286 return 0; 352 return 0;
287} 353}
288 354
355static int cmp_size_smaller_first(void *priv, struct list_head *a,
356 struct list_head *b)
357{
358 struct radeon_cs_reloc *la = list_entry(a, struct radeon_cs_reloc, tv.head);
359 struct radeon_cs_reloc *lb = list_entry(b, struct radeon_cs_reloc, tv.head);
360
361 /* Sort A before B if A is smaller. */
362 return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages;
363}
364
289/** 365/**
290 * cs_parser_fini() - clean parser states 366 * cs_parser_fini() - clean parser states
291 * @parser: parser structure holding parsing context. 367 * @parser: parser structure holding parsing context.
@@ -299,6 +375,18 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bo
299 unsigned i; 375 unsigned i;
300 376
301 if (!error) { 377 if (!error) {
378 /* Sort the buffer list from the smallest to largest buffer,
379 * which affects the order of buffers in the LRU list.
380 * This assures that the smallest buffers are added first
381 * to the LRU list, so they are likely to be later evicted
382 * first, instead of large buffers whose eviction is more
383 * expensive.
384 *
385 * This slightly lowers the number of bytes moved by TTM
386 * per frame under memory pressure.
387 */
388 list_sort(NULL, &parser->validated, cmp_size_smaller_first);
389
302 ttm_eu_fence_buffer_objects(&parser->ticket, 390 ttm_eu_fence_buffer_objects(&parser->ticket,
303 &parser->validated, 391 &parser->validated,
304 parser->ib.fence); 392 parser->ib.fence);
@@ -316,6 +404,7 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bo
316 kfree(parser->track); 404 kfree(parser->track);
317 kfree(parser->relocs); 405 kfree(parser->relocs);
318 kfree(parser->relocs_ptr); 406 kfree(parser->relocs_ptr);
407 kfree(parser->vm_bos);
319 for (i = 0; i < parser->nchunks; i++) 408 for (i = 0; i < parser->nchunks; i++)
320 drm_free_large(parser->chunks[i].kdata); 409 drm_free_large(parser->chunks[i].kdata);
321 kfree(parser->chunks); 410 kfree(parser->chunks);
@@ -343,6 +432,9 @@ static int radeon_cs_ib_chunk(struct radeon_device *rdev,
343 432
344 if (parser->ring == R600_RING_TYPE_UVD_INDEX) 433 if (parser->ring == R600_RING_TYPE_UVD_INDEX)
345 radeon_uvd_note_usage(rdev); 434 radeon_uvd_note_usage(rdev);
435 else if ((parser->ring == TN_RING_TYPE_VCE1_INDEX) ||
436 (parser->ring == TN_RING_TYPE_VCE2_INDEX))
437 radeon_vce_note_usage(rdev);
346 438
347 radeon_cs_sync_rings(parser); 439 radeon_cs_sync_rings(parser);
348 r = radeon_ib_schedule(rdev, &parser->ib, NULL); 440 r = radeon_ib_schedule(rdev, &parser->ib, NULL);
@@ -352,24 +444,32 @@ static int radeon_cs_ib_chunk(struct radeon_device *rdev,
352 return r; 444 return r;
353} 445}
354 446
355static int radeon_bo_vm_update_pte(struct radeon_cs_parser *parser, 447static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
356 struct radeon_vm *vm) 448 struct radeon_vm *vm)
357{ 449{
358 struct radeon_device *rdev = parser->rdev; 450 struct radeon_device *rdev = p->rdev;
359 struct radeon_bo_list *lobj; 451 int i, r;
360 struct radeon_bo *bo;
361 int r;
362 452
363 r = radeon_vm_bo_update(rdev, vm, rdev->ring_tmp_bo.bo, &rdev->ring_tmp_bo.bo->tbo.mem); 453 r = radeon_vm_update_page_directory(rdev, vm);
364 if (r) { 454 if (r)
365 return r; 455 return r;
366 } 456
367 list_for_each_entry(lobj, &parser->validated, tv.head) { 457 r = radeon_vm_bo_update(rdev, vm, rdev->ring_tmp_bo.bo,
368 bo = lobj->bo; 458 &rdev->ring_tmp_bo.bo->tbo.mem);
369 r = radeon_vm_bo_update(parser->rdev, vm, bo, &bo->tbo.mem); 459 if (r)
370 if (r) { 460 return r;
461
462 for (i = 0; i < p->nrelocs; i++) {
463 struct radeon_bo *bo;
464
465 /* ignore duplicates */
466 if (p->relocs_ptr[i] != &p->relocs[i])
467 continue;
468
469 bo = p->relocs[i].robj;
470 r = radeon_vm_bo_update(rdev, vm, bo, &bo->tbo.mem);
471 if (r)
371 return r; 472 return r;
372 }
373 } 473 }
374 return 0; 474 return 0;
375} 475}
@@ -401,20 +501,13 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
401 if (parser->ring == R600_RING_TYPE_UVD_INDEX) 501 if (parser->ring == R600_RING_TYPE_UVD_INDEX)
402 radeon_uvd_note_usage(rdev); 502 radeon_uvd_note_usage(rdev);
403 503
404 mutex_lock(&rdev->vm_manager.lock);
405 mutex_lock(&vm->mutex); 504 mutex_lock(&vm->mutex);
406 r = radeon_vm_alloc_pt(rdev, vm);
407 if (r) {
408 goto out;
409 }
410 r = radeon_bo_vm_update_pte(parser, vm); 505 r = radeon_bo_vm_update_pte(parser, vm);
411 if (r) { 506 if (r) {
412 goto out; 507 goto out;
413 } 508 }
414 radeon_cs_sync_rings(parser); 509 radeon_cs_sync_rings(parser);
415 radeon_semaphore_sync_to(parser->ib.semaphore, vm->fence); 510 radeon_semaphore_sync_to(parser->ib.semaphore, vm->fence);
416 radeon_semaphore_sync_to(parser->ib.semaphore,
417 radeon_vm_grab_id(rdev, vm, parser->ring));
418 511
419 if ((rdev->family >= CHIP_TAHITI) && 512 if ((rdev->family >= CHIP_TAHITI) &&
420 (parser->chunk_const_ib_idx != -1)) { 513 (parser->chunk_const_ib_idx != -1)) {
@@ -423,14 +516,8 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
423 r = radeon_ib_schedule(rdev, &parser->ib, NULL); 516 r = radeon_ib_schedule(rdev, &parser->ib, NULL);
424 } 517 }
425 518
426 if (!r) {
427 radeon_vm_fence(rdev, vm, parser->ib.fence);
428 }
429
430out: 519out:
431 radeon_vm_add_to_lru(rdev, vm);
432 mutex_unlock(&vm->mutex); 520 mutex_unlock(&vm->mutex);
433 mutex_unlock(&rdev->vm_manager.lock);
434 return r; 521 return r;
435} 522}
436 523
@@ -698,9 +785,9 @@ int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p,
698 /* FIXME: we assume reloc size is 4 dwords */ 785 /* FIXME: we assume reloc size is 4 dwords */
699 if (nomm) { 786 if (nomm) {
700 *cs_reloc = p->relocs; 787 *cs_reloc = p->relocs;
701 (*cs_reloc)->lobj.gpu_offset = 788 (*cs_reloc)->gpu_offset =
702 (u64)relocs_chunk->kdata[idx + 3] << 32; 789 (u64)relocs_chunk->kdata[idx + 3] << 32;
703 (*cs_reloc)->lobj.gpu_offset |= relocs_chunk->kdata[idx + 0]; 790 (*cs_reloc)->gpu_offset |= relocs_chunk->kdata[idx + 0];
704 } else 791 } else
705 *cs_reloc = p->relocs_ptr[(idx / 4)]; 792 *cs_reloc = p->relocs_ptr[(idx / 4)];
706 return 0; 793 return 0;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 044bc98fb459..835516d2d257 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1191,14 +1191,12 @@ int radeon_device_init(struct radeon_device *rdev,
1191 r = radeon_gem_init(rdev); 1191 r = radeon_gem_init(rdev);
1192 if (r) 1192 if (r)
1193 return r; 1193 return r;
1194 /* initialize vm here */ 1194
1195 mutex_init(&rdev->vm_manager.lock);
1196 /* Adjust VM size here. 1195 /* Adjust VM size here.
1197 * Currently set to 4GB ((1 << 20) 4k pages). 1196 * Currently set to 4GB ((1 << 20) 4k pages).
1198 * Max GPUVM size for cayman and SI is 40 bits. 1197 * Max GPUVM size for cayman and SI is 40 bits.
1199 */ 1198 */
1200 rdev->vm_manager.max_pfn = 1 << 20; 1199 rdev->vm_manager.max_pfn = 1 << 20;
1201 INIT_LIST_HEAD(&rdev->vm_manager.lru_vm);
1202 1200
1203 /* Set asic functions */ 1201 /* Set asic functions */
1204 r = radeon_asic_init(rdev); 1202 r = radeon_asic_init(rdev);
@@ -1426,7 +1424,7 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
1426 1424
1427 /* unpin the front buffers */ 1425 /* unpin the front buffers */
1428 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 1426 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1429 struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb); 1427 struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->primary->fb);
1430 struct radeon_bo *robj; 1428 struct radeon_bo *robj;
1431 1429
1432 if (rfb == NULL || rfb->obj == NULL) { 1430 if (rfb == NULL || rfb->obj == NULL) {
@@ -1445,10 +1443,9 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
1445 /* evict vram memory */ 1443 /* evict vram memory */
1446 radeon_bo_evict_vram(rdev); 1444 radeon_bo_evict_vram(rdev);
1447 1445
1448 mutex_lock(&rdev->ring_lock);
1449 /* wait for gpu to finish processing current batch */ 1446 /* wait for gpu to finish processing current batch */
1450 for (i = 0; i < RADEON_NUM_RINGS; i++) { 1447 for (i = 0; i < RADEON_NUM_RINGS; i++) {
1451 r = radeon_fence_wait_empty_locked(rdev, i); 1448 r = radeon_fence_wait_empty(rdev, i);
1452 if (r) { 1449 if (r) {
1453 /* delay GPU reset to resume */ 1450 /* delay GPU reset to resume */
1454 force_completion = true; 1451 force_completion = true;
@@ -1457,7 +1454,6 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
1457 if (force_completion) { 1454 if (force_completion) {
1458 radeon_fence_driver_force_completion(rdev); 1455 radeon_fence_driver_force_completion(rdev);
1459 } 1456 }
1460 mutex_unlock(&rdev->ring_lock);
1461 1457
1462 radeon_save_bios_scratch_regs(rdev); 1458 radeon_save_bios_scratch_regs(rdev);
1463 1459
@@ -1555,10 +1551,12 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
1555 /* reset hpd state */ 1551 /* reset hpd state */
1556 radeon_hpd_init(rdev); 1552 radeon_hpd_init(rdev);
1557 /* blat the mode back in */ 1553 /* blat the mode back in */
1558 drm_helper_resume_force_mode(dev); 1554 if (fbcon) {
1559 /* turn on display hw */ 1555 drm_helper_resume_force_mode(dev);
1560 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 1556 /* turn on display hw */
1561 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); 1557 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1558 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
1559 }
1562 } 1560 }
1563 1561
1564 drm_kms_helper_poll_enable(dev); 1562 drm_kms_helper_poll_enable(dev);
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index fbd8b930f2be..386cfa4c194d 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -34,6 +34,8 @@
34#include <drm/drm_crtc_helper.h> 34#include <drm/drm_crtc_helper.h>
35#include <drm/drm_edid.h> 35#include <drm/drm_edid.h>
36 36
37#include <linux/gcd.h>
38
37static void avivo_crtc_load_lut(struct drm_crtc *crtc) 39static void avivo_crtc_load_lut(struct drm_crtc *crtc)
38{ 40{
39 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 41 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
@@ -369,7 +371,7 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
369 work->event = event; 371 work->event = event;
370 work->rdev = rdev; 372 work->rdev = rdev;
371 work->crtc_id = radeon_crtc->crtc_id; 373 work->crtc_id = radeon_crtc->crtc_id;
372 old_radeon_fb = to_radeon_framebuffer(crtc->fb); 374 old_radeon_fb = to_radeon_framebuffer(crtc->primary->fb);
373 new_radeon_fb = to_radeon_framebuffer(fb); 375 new_radeon_fb = to_radeon_framebuffer(fb);
374 /* schedule unpin of the old buffer */ 376 /* schedule unpin of the old buffer */
375 obj = old_radeon_fb->obj; 377 obj = old_radeon_fb->obj;
@@ -460,7 +462,7 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
460 spin_unlock_irqrestore(&dev->event_lock, flags); 462 spin_unlock_irqrestore(&dev->event_lock, flags);
461 463
462 /* update crtc fb */ 464 /* update crtc fb */
463 crtc->fb = fb; 465 crtc->primary->fb = fb;
464 466
465 r = drm_vblank_get(dev, radeon_crtc->crtc_id); 467 r = drm_vblank_get(dev, radeon_crtc->crtc_id);
466 if (r) { 468 if (r) {
@@ -792,6 +794,7 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
792 if (radeon_connector->edid) { 794 if (radeon_connector->edid) {
793 drm_mode_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid); 795 drm_mode_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid);
794 ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid); 796 ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid);
797 drm_edid_to_eld(&radeon_connector->base, radeon_connector->edid);
795 return ret; 798 return ret;
796 } 799 }
797 drm_mode_connector_update_edid_property(&radeon_connector->base, NULL); 800 drm_mode_connector_update_edid_property(&radeon_connector->base, NULL);
@@ -799,66 +802,57 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
799} 802}
800 803
801/* avivo */ 804/* avivo */
802static void avivo_get_fb_div(struct radeon_pll *pll,
803 u32 target_clock,
804 u32 post_div,
805 u32 ref_div,
806 u32 *fb_div,
807 u32 *frac_fb_div)
808{
809 u32 tmp = post_div * ref_div;
810 805
811 tmp *= target_clock; 806/**
812 *fb_div = tmp / pll->reference_freq; 807 * avivo_reduce_ratio - fractional number reduction
813 *frac_fb_div = tmp % pll->reference_freq; 808 *
814 809 * @nom: nominator
815 if (*fb_div > pll->max_feedback_div) 810 * @den: denominator
816 *fb_div = pll->max_feedback_div; 811 * @nom_min: minimum value for nominator
817 else if (*fb_div < pll->min_feedback_div) 812 * @den_min: minimum value for denominator
818 *fb_div = pll->min_feedback_div; 813 *
819} 814 * Find the greatest common divisor and apply it on both nominator and
820 815 * denominator, but make nominator and denominator are at least as large
821static u32 avivo_get_post_div(struct radeon_pll *pll, 816 * as their minimum values.
822 u32 target_clock) 817 */
818static void avivo_reduce_ratio(unsigned *nom, unsigned *den,
819 unsigned nom_min, unsigned den_min)
823{ 820{
824 u32 vco, post_div, tmp; 821 unsigned tmp;
825 822
826 if (pll->flags & RADEON_PLL_USE_POST_DIV) 823 /* reduce the numbers to a simpler ratio */
827 return pll->post_div; 824 tmp = gcd(*nom, *den);
828 825 *nom /= tmp;
829 if (pll->flags & RADEON_PLL_PREFER_MINM_OVER_MAXP) { 826 *den /= tmp;
830 if (pll->flags & RADEON_PLL_IS_LCD) 827
831 vco = pll->lcd_pll_out_min; 828 /* make sure nominator is large enough */
832 else 829 if (*nom < nom_min) {
833 vco = pll->pll_out_min; 830 tmp = (nom_min + *nom - 1) / *nom;
834 } else { 831 *nom *= tmp;
835 if (pll->flags & RADEON_PLL_IS_LCD) 832 *den *= tmp;
836 vco = pll->lcd_pll_out_max;
837 else
838 vco = pll->pll_out_max;
839 } 833 }
840 834
841 post_div = vco / target_clock; 835 /* make sure the denominator is large enough */
842 tmp = vco % target_clock; 836 if (*den < den_min) {
843 837 tmp = (den_min + *den - 1) / *den;
844 if (pll->flags & RADEON_PLL_PREFER_MINM_OVER_MAXP) { 838 *nom *= tmp;
845 if (tmp) 839 *den *= tmp;
846 post_div++;
847 } else {
848 if (!tmp)
849 post_div--;
850 } 840 }
851
852 if (post_div > pll->max_post_div)
853 post_div = pll->max_post_div;
854 else if (post_div < pll->min_post_div)
855 post_div = pll->min_post_div;
856
857 return post_div;
858} 841}
859 842
860#define MAX_TOLERANCE 10 843/**
861 844 * radeon_compute_pll_avivo - compute PLL paramaters
845 *
846 * @pll: information about the PLL
847 * @dot_clock_p: resulting pixel clock
848 * fb_div_p: resulting feedback divider
849 * frac_fb_div_p: fractional part of the feedback divider
850 * ref_div_p: resulting reference divider
851 * post_div_p: resulting reference divider
852 *
853 * Try to calculate the PLL parameters to generate the given frequency:
854 * dot_clock = (ref_freq * feedback_div) / (ref_div * post_div)
855 */
862void radeon_compute_pll_avivo(struct radeon_pll *pll, 856void radeon_compute_pll_avivo(struct radeon_pll *pll,
863 u32 freq, 857 u32 freq,
864 u32 *dot_clock_p, 858 u32 *dot_clock_p,
@@ -867,53 +861,123 @@ void radeon_compute_pll_avivo(struct radeon_pll *pll,
867 u32 *ref_div_p, 861 u32 *ref_div_p,
868 u32 *post_div_p) 862 u32 *post_div_p)
869{ 863{
870 u32 target_clock = freq / 10; 864 unsigned fb_div_min, fb_div_max, fb_div;
871 u32 post_div = avivo_get_post_div(pll, target_clock); 865 unsigned post_div_min, post_div_max, post_div;
872 u32 ref_div = pll->min_ref_div; 866 unsigned ref_div_min, ref_div_max, ref_div;
873 u32 fb_div = 0, frac_fb_div = 0, tmp; 867 unsigned post_div_best, diff_best;
868 unsigned nom, den, tmp;
874 869
875 if (pll->flags & RADEON_PLL_USE_REF_DIV) 870 /* determine allowed feedback divider range */
876 ref_div = pll->reference_div; 871 fb_div_min = pll->min_feedback_div;
872 fb_div_max = pll->max_feedback_div;
877 873
878 if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) { 874 if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) {
879 avivo_get_fb_div(pll, target_clock, post_div, ref_div, &fb_div, &frac_fb_div); 875 fb_div_min *= 10;
880 frac_fb_div = (100 * frac_fb_div) / pll->reference_freq; 876 fb_div_max *= 10;
881 if (frac_fb_div >= 5) { 877 }
882 frac_fb_div -= 5; 878
883 frac_fb_div = frac_fb_div / 10; 879 /* determine allowed ref divider range */
884 frac_fb_div++; 880 if (pll->flags & RADEON_PLL_USE_REF_DIV)
881 ref_div_min = pll->reference_div;
882 else
883 ref_div_min = pll->min_ref_div;
884 ref_div_max = pll->max_ref_div;
885
886 /* determine allowed post divider range */
887 if (pll->flags & RADEON_PLL_USE_POST_DIV) {
888 post_div_min = pll->post_div;
889 post_div_max = pll->post_div;
890 } else {
891 unsigned target_clock = freq / 10;
892 unsigned vco_min, vco_max;
893
894 if (pll->flags & RADEON_PLL_IS_LCD) {
895 vco_min = pll->lcd_pll_out_min;
896 vco_max = pll->lcd_pll_out_max;
897 } else {
898 vco_min = pll->pll_out_min;
899 vco_max = pll->pll_out_max;
885 } 900 }
886 if (frac_fb_div >= 10) { 901
887 fb_div++; 902 post_div_min = vco_min / target_clock;
888 frac_fb_div = 0; 903 if ((target_clock * post_div_min) < vco_min)
904 ++post_div_min;
905 if (post_div_min < pll->min_post_div)
906 post_div_min = pll->min_post_div;
907
908 post_div_max = vco_max / target_clock;
909 if ((target_clock * post_div_max) > vco_max)
910 --post_div_max;
911 if (post_div_max > pll->max_post_div)
912 post_div_max = pll->max_post_div;
913 }
914
915 /* represent the searched ratio as fractional number */
916 nom = pll->flags & RADEON_PLL_USE_FRAC_FB_DIV ? freq : freq / 10;
917 den = pll->reference_freq;
918
919 /* reduce the numbers to a simpler ratio */
920 avivo_reduce_ratio(&nom, &den, fb_div_min, post_div_min);
921
922 /* now search for a post divider */
923 if (pll->flags & RADEON_PLL_PREFER_MINM_OVER_MAXP)
924 post_div_best = post_div_min;
925 else
926 post_div_best = post_div_max;
927 diff_best = ~0;
928
929 for (post_div = post_div_min; post_div <= post_div_max; ++post_div) {
930 unsigned diff = abs(den - den / post_div * post_div);
931 if (diff < diff_best || (diff == diff_best &&
932 !(pll->flags & RADEON_PLL_PREFER_MINM_OVER_MAXP))) {
933
934 post_div_best = post_div;
935 diff_best = diff;
889 } 936 }
937 }
938 post_div = post_div_best;
939
940 /* get matching reference and feedback divider */
941 ref_div = max(den / post_div, 1u);
942 fb_div = nom;
943
944 /* we're almost done, but reference and feedback
945 divider might be to large now */
946
947 tmp = ref_div;
948
949 if (fb_div > fb_div_max) {
950 ref_div = ref_div * fb_div_max / fb_div;
951 fb_div = fb_div_max;
952 }
953
954 if (ref_div > ref_div_max) {
955 ref_div = ref_div_max;
956 fb_div = nom * ref_div_max / tmp;
957 }
958
959 /* reduce the numbers to a simpler ratio once more */
960 /* this also makes sure that the reference divider is large enough */
961 avivo_reduce_ratio(&fb_div, &ref_div, fb_div_min, ref_div_min);
962
963 /* and finally save the result */
964 if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) {
965 *fb_div_p = fb_div / 10;
966 *frac_fb_div_p = fb_div % 10;
890 } else { 967 } else {
891 while (ref_div <= pll->max_ref_div) { 968 *fb_div_p = fb_div;
892 avivo_get_fb_div(pll, target_clock, post_div, ref_div, 969 *frac_fb_div_p = 0;
893 &fb_div, &frac_fb_div);
894 if (frac_fb_div >= (pll->reference_freq / 2))
895 fb_div++;
896 frac_fb_div = 0;
897 tmp = (pll->reference_freq * fb_div) / (post_div * ref_div);
898 tmp = (tmp * 10000) / target_clock;
899
900 if (tmp > (10000 + MAX_TOLERANCE))
901 ref_div++;
902 else if (tmp >= (10000 - MAX_TOLERANCE))
903 break;
904 else
905 ref_div++;
906 }
907 } 970 }
908 971
909 *dot_clock_p = ((pll->reference_freq * fb_div * 10) + (pll->reference_freq * frac_fb_div)) / 972 *dot_clock_p = ((pll->reference_freq * *fb_div_p * 10) +
910 (ref_div * post_div * 10); 973 (pll->reference_freq * *frac_fb_div_p)) /
911 *fb_div_p = fb_div; 974 (ref_div * post_div * 10);
912 *frac_fb_div_p = frac_fb_div;
913 *ref_div_p = ref_div; 975 *ref_div_p = ref_div;
914 *post_div_p = post_div; 976 *post_div_p = post_div;
915 DRM_DEBUG_KMS("%d, pll dividers - fb: %d.%d ref: %d, post %d\n", 977
916 *dot_clock_p, fb_div, frac_fb_div, ref_div, post_div); 978 DRM_DEBUG_KMS("%d - %d, pll dividers - fb: %d.%d ref: %d, post %d\n",
979 freq, *dot_clock_p, *fb_div_p, *frac_fb_div_p,
980 ref_div, post_div);
917} 981}
918 982
919/* pre-avivo */ 983/* pre-avivo */
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index f633c2782170..d0eba48dd74e 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -79,9 +79,11 @@
79 * 2.35.0 - Add CIK macrotile mode array query 79 * 2.35.0 - Add CIK macrotile mode array query
80 * 2.36.0 - Fix CIK DCE tiling setup 80 * 2.36.0 - Fix CIK DCE tiling setup
81 * 2.37.0 - allow GS ring setup on r6xx/r7xx 81 * 2.37.0 - allow GS ring setup on r6xx/r7xx
82 * 2.38.0 - RADEON_GEM_OP (GET_INITIAL_DOMAIN, SET_INITIAL_DOMAIN),
83 * CIK: 1D and linear tiling modes contain valid PIPE_CONFIG
82 */ 84 */
83#define KMS_DRIVER_MAJOR 2 85#define KMS_DRIVER_MAJOR 2
84#define KMS_DRIVER_MINOR 37 86#define KMS_DRIVER_MINOR 38
85#define KMS_DRIVER_PATCHLEVEL 0 87#define KMS_DRIVER_PATCHLEVEL 0
86int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); 88int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
87int radeon_driver_unload_kms(struct drm_device *dev); 89int radeon_driver_unload_kms(struct drm_device *dev);
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index c37cb79a9489..a77b1c13ea43 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -288,7 +288,6 @@ static bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq)
288 * @rdev: radeon device pointer 288 * @rdev: radeon device pointer
289 * @target_seq: sequence number(s) we want to wait for 289 * @target_seq: sequence number(s) we want to wait for
290 * @intr: use interruptable sleep 290 * @intr: use interruptable sleep
291 * @lock_ring: whether the ring should be locked or not
292 * 291 *
293 * Wait for the requested sequence number(s) to be written by any ring 292 * Wait for the requested sequence number(s) to be written by any ring
294 * (all asics). Sequnce number array is indexed by ring id. 293 * (all asics). Sequnce number array is indexed by ring id.
@@ -299,7 +298,7 @@ static bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq)
299 * -EDEADLK is returned when a GPU lockup has been detected. 298 * -EDEADLK is returned when a GPU lockup has been detected.
300 */ 299 */
301static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 *target_seq, 300static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 *target_seq,
302 bool intr, bool lock_ring) 301 bool intr)
303{ 302{
304 uint64_t last_seq[RADEON_NUM_RINGS]; 303 uint64_t last_seq[RADEON_NUM_RINGS];
305 bool signaled; 304 bool signaled;
@@ -358,9 +357,6 @@ static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 *target_seq,
358 if (i != RADEON_NUM_RINGS) 357 if (i != RADEON_NUM_RINGS)
359 continue; 358 continue;
360 359
361 if (lock_ring)
362 mutex_lock(&rdev->ring_lock);
363
364 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 360 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
365 if (!target_seq[i]) 361 if (!target_seq[i])
366 continue; 362 continue;
@@ -378,14 +374,9 @@ static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 *target_seq,
378 374
379 /* remember that we need an reset */ 375 /* remember that we need an reset */
380 rdev->needs_reset = true; 376 rdev->needs_reset = true;
381 if (lock_ring)
382 mutex_unlock(&rdev->ring_lock);
383 wake_up_all(&rdev->fence_queue); 377 wake_up_all(&rdev->fence_queue);
384 return -EDEADLK; 378 return -EDEADLK;
385 } 379 }
386
387 if (lock_ring)
388 mutex_unlock(&rdev->ring_lock);
389 } 380 }
390 } 381 }
391 return 0; 382 return 0;
@@ -416,7 +407,7 @@ int radeon_fence_wait(struct radeon_fence *fence, bool intr)
416 if (seq[fence->ring] == RADEON_FENCE_SIGNALED_SEQ) 407 if (seq[fence->ring] == RADEON_FENCE_SIGNALED_SEQ)
417 return 0; 408 return 0;
418 409
419 r = radeon_fence_wait_seq(fence->rdev, seq, intr, true); 410 r = radeon_fence_wait_seq(fence->rdev, seq, intr);
420 if (r) 411 if (r)
421 return r; 412 return r;
422 413
@@ -464,7 +455,7 @@ int radeon_fence_wait_any(struct radeon_device *rdev,
464 if (num_rings == 0) 455 if (num_rings == 0)
465 return -ENOENT; 456 return -ENOENT;
466 457
467 r = radeon_fence_wait_seq(rdev, seq, intr, true); 458 r = radeon_fence_wait_seq(rdev, seq, intr);
468 if (r) { 459 if (r) {
469 return r; 460 return r;
470 } 461 }
@@ -472,37 +463,7 @@ int radeon_fence_wait_any(struct radeon_device *rdev,
472} 463}
473 464
474/** 465/**
475 * radeon_fence_wait_locked - wait for a fence to signal 466 * radeon_fence_wait_next - wait for the next fence to signal
476 *
477 * @fence: radeon fence object
478 *
479 * Wait for the requested fence to signal (all asics).
480 * Returns 0 if the fence has passed, error for all other cases.
481 */
482int radeon_fence_wait_locked(struct radeon_fence *fence)
483{
484 uint64_t seq[RADEON_NUM_RINGS] = {};
485 int r;
486
487 if (fence == NULL) {
488 WARN(1, "Querying an invalid fence : %p !\n", fence);
489 return -EINVAL;
490 }
491
492 seq[fence->ring] = fence->seq;
493 if (seq[fence->ring] == RADEON_FENCE_SIGNALED_SEQ)
494 return 0;
495
496 r = radeon_fence_wait_seq(fence->rdev, seq, false, false);
497 if (r)
498 return r;
499
500 fence->seq = RADEON_FENCE_SIGNALED_SEQ;
501 return 0;
502}
503
504/**
505 * radeon_fence_wait_next_locked - wait for the next fence to signal
506 * 467 *
507 * @rdev: radeon device pointer 468 * @rdev: radeon device pointer
508 * @ring: ring index the fence is associated with 469 * @ring: ring index the fence is associated with
@@ -511,7 +472,7 @@ int radeon_fence_wait_locked(struct radeon_fence *fence)
511 * Returns 0 if the next fence has passed, error for all other cases. 472 * Returns 0 if the next fence has passed, error for all other cases.
512 * Caller must hold ring lock. 473 * Caller must hold ring lock.
513 */ 474 */
514int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring) 475int radeon_fence_wait_next(struct radeon_device *rdev, int ring)
515{ 476{
516 uint64_t seq[RADEON_NUM_RINGS] = {}; 477 uint64_t seq[RADEON_NUM_RINGS] = {};
517 478
@@ -521,11 +482,11 @@ int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring)
521 already the last emited fence */ 482 already the last emited fence */
522 return -ENOENT; 483 return -ENOENT;
523 } 484 }
524 return radeon_fence_wait_seq(rdev, seq, false, false); 485 return radeon_fence_wait_seq(rdev, seq, false);
525} 486}
526 487
527/** 488/**
528 * radeon_fence_wait_empty_locked - wait for all fences to signal 489 * radeon_fence_wait_empty - wait for all fences to signal
529 * 490 *
530 * @rdev: radeon device pointer 491 * @rdev: radeon device pointer
531 * @ring: ring index the fence is associated with 492 * @ring: ring index the fence is associated with
@@ -534,7 +495,7 @@ int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring)
534 * Returns 0 if the fences have passed, error for all other cases. 495 * Returns 0 if the fences have passed, error for all other cases.
535 * Caller must hold ring lock. 496 * Caller must hold ring lock.
536 */ 497 */
537int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring) 498int radeon_fence_wait_empty(struct radeon_device *rdev, int ring)
538{ 499{
539 uint64_t seq[RADEON_NUM_RINGS] = {}; 500 uint64_t seq[RADEON_NUM_RINGS] = {};
540 int r; 501 int r;
@@ -543,7 +504,7 @@ int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring)
543 if (!seq[ring]) 504 if (!seq[ring])
544 return 0; 505 return 0;
545 506
546 r = radeon_fence_wait_seq(rdev, seq, false, false); 507 r = radeon_fence_wait_seq(rdev, seq, false);
547 if (r) { 508 if (r) {
548 if (r == -EDEADLK) 509 if (r == -EDEADLK)
549 return -EDEADLK; 510 return -EDEADLK;
@@ -794,7 +755,7 @@ void radeon_fence_driver_fini(struct radeon_device *rdev)
794 for (ring = 0; ring < RADEON_NUM_RINGS; ring++) { 755 for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
795 if (!rdev->fence_drv[ring].initialized) 756 if (!rdev->fence_drv[ring].initialized)
796 continue; 757 continue;
797 r = radeon_fence_wait_empty_locked(rdev, ring); 758 r = radeon_fence_wait_empty(rdev, ring);
798 if (r) { 759 if (r) {
799 /* no need to trigger GPU reset as we are unloading */ 760 /* no need to trigger GPU reset as we are unloading */
800 radeon_fence_driver_force_completion(rdev); 761 radeon_fence_driver_force_completion(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index a8f9b463bf2a..2e723651069b 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -28,8 +28,6 @@
28#include <drm/drmP.h> 28#include <drm/drmP.h>
29#include <drm/radeon_drm.h> 29#include <drm/radeon_drm.h>
30#include "radeon.h" 30#include "radeon.h"
31#include "radeon_reg.h"
32#include "radeon_trace.h"
33 31
34/* 32/*
35 * GART 33 * GART
@@ -394,959 +392,3 @@ void radeon_gart_fini(struct radeon_device *rdev)
394 392
395 radeon_dummy_page_fini(rdev); 393 radeon_dummy_page_fini(rdev);
396} 394}
397
398/*
399 * GPUVM
400 * GPUVM is similar to the legacy gart on older asics, however
401 * rather than there being a single global gart table
402 * for the entire GPU, there are multiple VM page tables active
403 * at any given time. The VM page tables can contain a mix
404 * vram pages and system memory pages and system memory pages
405 * can be mapped as snooped (cached system pages) or unsnooped
406 * (uncached system pages).
407 * Each VM has an ID associated with it and there is a page table
408 * associated with each VMID. When execting a command buffer,
409 * the kernel tells the the ring what VMID to use for that command
410 * buffer. VMIDs are allocated dynamically as commands are submitted.
411 * The userspace drivers maintain their own address space and the kernel
412 * sets up their pages tables accordingly when they submit their
413 * command buffers and a VMID is assigned.
414 * Cayman/Trinity support up to 8 active VMs at any given time;
415 * SI supports 16.
416 */
417
418/*
419 * vm helpers
420 *
421 * TODO bind a default page at vm initialization for default address
422 */
423
424/**
425 * radeon_vm_num_pde - return the number of page directory entries
426 *
427 * @rdev: radeon_device pointer
428 *
429 * Calculate the number of page directory entries (cayman+).
430 */
431static unsigned radeon_vm_num_pdes(struct radeon_device *rdev)
432{
433 return rdev->vm_manager.max_pfn >> RADEON_VM_BLOCK_SIZE;
434}
435
436/**
437 * radeon_vm_directory_size - returns the size of the page directory in bytes
438 *
439 * @rdev: radeon_device pointer
440 *
441 * Calculate the size of the page directory in bytes (cayman+).
442 */
443static unsigned radeon_vm_directory_size(struct radeon_device *rdev)
444{
445 return RADEON_GPU_PAGE_ALIGN(radeon_vm_num_pdes(rdev) * 8);
446}
447
448/**
449 * radeon_vm_manager_init - init the vm manager
450 *
451 * @rdev: radeon_device pointer
452 *
453 * Init the vm manager (cayman+).
454 * Returns 0 for success, error for failure.
455 */
456int radeon_vm_manager_init(struct radeon_device *rdev)
457{
458 struct radeon_vm *vm;
459 struct radeon_bo_va *bo_va;
460 int r;
461 unsigned size;
462
463 if (!rdev->vm_manager.enabled) {
464 /* allocate enough for 2 full VM pts */
465 size = radeon_vm_directory_size(rdev);
466 size += rdev->vm_manager.max_pfn * 8;
467 size *= 2;
468 r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager,
469 RADEON_GPU_PAGE_ALIGN(size),
470 RADEON_VM_PTB_ALIGN_SIZE,
471 RADEON_GEM_DOMAIN_VRAM);
472 if (r) {
473 dev_err(rdev->dev, "failed to allocate vm bo (%dKB)\n",
474 (rdev->vm_manager.max_pfn * 8) >> 10);
475 return r;
476 }
477
478 r = radeon_asic_vm_init(rdev);
479 if (r)
480 return r;
481
482 rdev->vm_manager.enabled = true;
483
484 r = radeon_sa_bo_manager_start(rdev, &rdev->vm_manager.sa_manager);
485 if (r)
486 return r;
487 }
488
489 /* restore page table */
490 list_for_each_entry(vm, &rdev->vm_manager.lru_vm, list) {
491 if (vm->page_directory == NULL)
492 continue;
493
494 list_for_each_entry(bo_va, &vm->va, vm_list) {
495 bo_va->valid = false;
496 }
497 }
498 return 0;
499}
500
501/**
502 * radeon_vm_free_pt - free the page table for a specific vm
503 *
504 * @rdev: radeon_device pointer
505 * @vm: vm to unbind
506 *
507 * Free the page table of a specific vm (cayman+).
508 *
509 * Global and local mutex must be lock!
510 */
511static void radeon_vm_free_pt(struct radeon_device *rdev,
512 struct radeon_vm *vm)
513{
514 struct radeon_bo_va *bo_va;
515 int i;
516
517 if (!vm->page_directory)
518 return;
519
520 list_del_init(&vm->list);
521 radeon_sa_bo_free(rdev, &vm->page_directory, vm->fence);
522
523 list_for_each_entry(bo_va, &vm->va, vm_list) {
524 bo_va->valid = false;
525 }
526
527 if (vm->page_tables == NULL)
528 return;
529
530 for (i = 0; i < radeon_vm_num_pdes(rdev); i++)
531 radeon_sa_bo_free(rdev, &vm->page_tables[i], vm->fence);
532
533 kfree(vm->page_tables);
534}
535
536/**
537 * radeon_vm_manager_fini - tear down the vm manager
538 *
539 * @rdev: radeon_device pointer
540 *
541 * Tear down the VM manager (cayman+).
542 */
543void radeon_vm_manager_fini(struct radeon_device *rdev)
544{
545 struct radeon_vm *vm, *tmp;
546 int i;
547
548 if (!rdev->vm_manager.enabled)
549 return;
550
551 mutex_lock(&rdev->vm_manager.lock);
552 /* free all allocated page tables */
553 list_for_each_entry_safe(vm, tmp, &rdev->vm_manager.lru_vm, list) {
554 mutex_lock(&vm->mutex);
555 radeon_vm_free_pt(rdev, vm);
556 mutex_unlock(&vm->mutex);
557 }
558 for (i = 0; i < RADEON_NUM_VM; ++i) {
559 radeon_fence_unref(&rdev->vm_manager.active[i]);
560 }
561 radeon_asic_vm_fini(rdev);
562 mutex_unlock(&rdev->vm_manager.lock);
563
564 radeon_sa_bo_manager_suspend(rdev, &rdev->vm_manager.sa_manager);
565 radeon_sa_bo_manager_fini(rdev, &rdev->vm_manager.sa_manager);
566 rdev->vm_manager.enabled = false;
567}
568
569/**
570 * radeon_vm_evict - evict page table to make room for new one
571 *
572 * @rdev: radeon_device pointer
573 * @vm: VM we want to allocate something for
574 *
575 * Evict a VM from the lru, making sure that it isn't @vm. (cayman+).
576 * Returns 0 for success, -ENOMEM for failure.
577 *
578 * Global and local mutex must be locked!
579 */
580static int radeon_vm_evict(struct radeon_device *rdev, struct radeon_vm *vm)
581{
582 struct radeon_vm *vm_evict;
583
584 if (list_empty(&rdev->vm_manager.lru_vm))
585 return -ENOMEM;
586
587 vm_evict = list_first_entry(&rdev->vm_manager.lru_vm,
588 struct radeon_vm, list);
589 if (vm_evict == vm)
590 return -ENOMEM;
591
592 mutex_lock(&vm_evict->mutex);
593 radeon_vm_free_pt(rdev, vm_evict);
594 mutex_unlock(&vm_evict->mutex);
595 return 0;
596}
597
598/**
599 * radeon_vm_alloc_pt - allocates a page table for a VM
600 *
601 * @rdev: radeon_device pointer
602 * @vm: vm to bind
603 *
604 * Allocate a page table for the requested vm (cayman+).
605 * Returns 0 for success, error for failure.
606 *
607 * Global and local mutex must be locked!
608 */
609int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm)
610{
611 unsigned pd_size, pd_entries, pts_size;
612 struct radeon_ib ib;
613 int r;
614
615 if (vm == NULL) {
616 return -EINVAL;
617 }
618
619 if (vm->page_directory != NULL) {
620 return 0;
621 }
622
623 pd_size = radeon_vm_directory_size(rdev);
624 pd_entries = radeon_vm_num_pdes(rdev);
625
626retry:
627 r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager,
628 &vm->page_directory, pd_size,
629 RADEON_VM_PTB_ALIGN_SIZE, false);
630 if (r == -ENOMEM) {
631 r = radeon_vm_evict(rdev, vm);
632 if (r)
633 return r;
634 goto retry;
635
636 } else if (r) {
637 return r;
638 }
639
640 vm->pd_gpu_addr = radeon_sa_bo_gpu_addr(vm->page_directory);
641
642 /* Initially clear the page directory */
643 r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib,
644 NULL, pd_entries * 2 + 64);
645 if (r) {
646 radeon_sa_bo_free(rdev, &vm->page_directory, vm->fence);
647 return r;
648 }
649
650 ib.length_dw = 0;
651
652 radeon_asic_vm_set_page(rdev, &ib, vm->pd_gpu_addr,
653 0, pd_entries, 0, 0);
654
655 radeon_semaphore_sync_to(ib.semaphore, vm->fence);
656 r = radeon_ib_schedule(rdev, &ib, NULL);
657 if (r) {
658 radeon_ib_free(rdev, &ib);
659 radeon_sa_bo_free(rdev, &vm->page_directory, vm->fence);
660 return r;
661 }
662 radeon_fence_unref(&vm->fence);
663 vm->fence = radeon_fence_ref(ib.fence);
664 radeon_ib_free(rdev, &ib);
665 radeon_fence_unref(&vm->last_flush);
666
667 /* allocate page table array */
668 pts_size = radeon_vm_num_pdes(rdev) * sizeof(struct radeon_sa_bo *);
669 vm->page_tables = kzalloc(pts_size, GFP_KERNEL);
670
671 if (vm->page_tables == NULL) {
672 DRM_ERROR("Cannot allocate memory for page table array\n");
673 radeon_sa_bo_free(rdev, &vm->page_directory, vm->fence);
674 return -ENOMEM;
675 }
676
677 return 0;
678}
679
680/**
681 * radeon_vm_add_to_lru - add VMs page table to LRU list
682 *
683 * @rdev: radeon_device pointer
684 * @vm: vm to add to LRU
685 *
686 * Add the allocated page table to the LRU list (cayman+).
687 *
688 * Global mutex must be locked!
689 */
690void radeon_vm_add_to_lru(struct radeon_device *rdev, struct radeon_vm *vm)
691{
692 list_del_init(&vm->list);
693 list_add_tail(&vm->list, &rdev->vm_manager.lru_vm);
694}
695
696/**
697 * radeon_vm_grab_id - allocate the next free VMID
698 *
699 * @rdev: radeon_device pointer
700 * @vm: vm to allocate id for
701 * @ring: ring we want to submit job to
702 *
703 * Allocate an id for the vm (cayman+).
704 * Returns the fence we need to sync to (if any).
705 *
706 * Global and local mutex must be locked!
707 */
708struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
709 struct radeon_vm *vm, int ring)
710{
711 struct radeon_fence *best[RADEON_NUM_RINGS] = {};
712 unsigned choices[2] = {};
713 unsigned i;
714
715 /* check if the id is still valid */
716 if (vm->last_id_use && vm->last_id_use == rdev->vm_manager.active[vm->id])
717 return NULL;
718
719 /* we definately need to flush */
720 radeon_fence_unref(&vm->last_flush);
721
722 /* skip over VMID 0, since it is the system VM */
723 for (i = 1; i < rdev->vm_manager.nvm; ++i) {
724 struct radeon_fence *fence = rdev->vm_manager.active[i];
725
726 if (fence == NULL) {
727 /* found a free one */
728 vm->id = i;
729 trace_radeon_vm_grab_id(vm->id, ring);
730 return NULL;
731 }
732
733 if (radeon_fence_is_earlier(fence, best[fence->ring])) {
734 best[fence->ring] = fence;
735 choices[fence->ring == ring ? 0 : 1] = i;
736 }
737 }
738
739 for (i = 0; i < 2; ++i) {
740 if (choices[i]) {
741 vm->id = choices[i];
742 trace_radeon_vm_grab_id(vm->id, ring);
743 return rdev->vm_manager.active[choices[i]];
744 }
745 }
746
747 /* should never happen */
748 BUG();
749 return NULL;
750}
751
752/**
753 * radeon_vm_fence - remember fence for vm
754 *
755 * @rdev: radeon_device pointer
756 * @vm: vm we want to fence
757 * @fence: fence to remember
758 *
759 * Fence the vm (cayman+).
760 * Set the fence used to protect page table and id.
761 *
762 * Global and local mutex must be locked!
763 */
764void radeon_vm_fence(struct radeon_device *rdev,
765 struct radeon_vm *vm,
766 struct radeon_fence *fence)
767{
768 radeon_fence_unref(&rdev->vm_manager.active[vm->id]);
769 rdev->vm_manager.active[vm->id] = radeon_fence_ref(fence);
770
771 radeon_fence_unref(&vm->fence);
772 vm->fence = radeon_fence_ref(fence);
773
774 radeon_fence_unref(&vm->last_id_use);
775 vm->last_id_use = radeon_fence_ref(fence);
776}
777
778/**
779 * radeon_vm_bo_find - find the bo_va for a specific vm & bo
780 *
781 * @vm: requested vm
782 * @bo: requested buffer object
783 *
784 * Find @bo inside the requested vm (cayman+).
785 * Search inside the @bos vm list for the requested vm
786 * Returns the found bo_va or NULL if none is found
787 *
788 * Object has to be reserved!
789 */
790struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm,
791 struct radeon_bo *bo)
792{
793 struct radeon_bo_va *bo_va;
794
795 list_for_each_entry(bo_va, &bo->va, bo_list) {
796 if (bo_va->vm == vm) {
797 return bo_va;
798 }
799 }
800 return NULL;
801}
802
803/**
804 * radeon_vm_bo_add - add a bo to a specific vm
805 *
806 * @rdev: radeon_device pointer
807 * @vm: requested vm
808 * @bo: radeon buffer object
809 *
810 * Add @bo into the requested vm (cayman+).
811 * Add @bo to the list of bos associated with the vm
812 * Returns newly added bo_va or NULL for failure
813 *
814 * Object has to be reserved!
815 */
816struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev,
817 struct radeon_vm *vm,
818 struct radeon_bo *bo)
819{
820 struct radeon_bo_va *bo_va;
821
822 bo_va = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
823 if (bo_va == NULL) {
824 return NULL;
825 }
826 bo_va->vm = vm;
827 bo_va->bo = bo;
828 bo_va->soffset = 0;
829 bo_va->eoffset = 0;
830 bo_va->flags = 0;
831 bo_va->valid = false;
832 bo_va->ref_count = 1;
833 INIT_LIST_HEAD(&bo_va->bo_list);
834 INIT_LIST_HEAD(&bo_va->vm_list);
835
836 mutex_lock(&vm->mutex);
837 list_add(&bo_va->vm_list, &vm->va);
838 list_add_tail(&bo_va->bo_list, &bo->va);
839 mutex_unlock(&vm->mutex);
840
841 return bo_va;
842}
843
844/**
845 * radeon_vm_bo_set_addr - set bos virtual address inside a vm
846 *
847 * @rdev: radeon_device pointer
848 * @bo_va: bo_va to store the address
849 * @soffset: requested offset of the buffer in the VM address space
850 * @flags: attributes of pages (read/write/valid/etc.)
851 *
852 * Set offset of @bo_va (cayman+).
853 * Validate and set the offset requested within the vm address space.
854 * Returns 0 for success, error for failure.
855 *
856 * Object has to be reserved!
857 */
858int radeon_vm_bo_set_addr(struct radeon_device *rdev,
859 struct radeon_bo_va *bo_va,
860 uint64_t soffset,
861 uint32_t flags)
862{
863 uint64_t size = radeon_bo_size(bo_va->bo);
864 uint64_t eoffset, last_offset = 0;
865 struct radeon_vm *vm = bo_va->vm;
866 struct radeon_bo_va *tmp;
867 struct list_head *head;
868 unsigned last_pfn;
869
870 if (soffset) {
871 /* make sure object fit at this offset */
872 eoffset = soffset + size;
873 if (soffset >= eoffset) {
874 return -EINVAL;
875 }
876
877 last_pfn = eoffset / RADEON_GPU_PAGE_SIZE;
878 if (last_pfn > rdev->vm_manager.max_pfn) {
879 dev_err(rdev->dev, "va above limit (0x%08X > 0x%08X)\n",
880 last_pfn, rdev->vm_manager.max_pfn);
881 return -EINVAL;
882 }
883
884 } else {
885 eoffset = last_pfn = 0;
886 }
887
888 mutex_lock(&vm->mutex);
889 head = &vm->va;
890 last_offset = 0;
891 list_for_each_entry(tmp, &vm->va, vm_list) {
892 if (bo_va == tmp) {
893 /* skip over currently modified bo */
894 continue;
895 }
896
897 if (soffset >= last_offset && eoffset <= tmp->soffset) {
898 /* bo can be added before this one */
899 break;
900 }
901 if (eoffset > tmp->soffset && soffset < tmp->eoffset) {
902 /* bo and tmp overlap, invalid offset */
903 dev_err(rdev->dev, "bo %p va 0x%08X conflict with (bo %p 0x%08X 0x%08X)\n",
904 bo_va->bo, (unsigned)bo_va->soffset, tmp->bo,
905 (unsigned)tmp->soffset, (unsigned)tmp->eoffset);
906 mutex_unlock(&vm->mutex);
907 return -EINVAL;
908 }
909 last_offset = tmp->eoffset;
910 head = &tmp->vm_list;
911 }
912
913 bo_va->soffset = soffset;
914 bo_va->eoffset = eoffset;
915 bo_va->flags = flags;
916 bo_va->valid = false;
917 list_move(&bo_va->vm_list, head);
918
919 mutex_unlock(&vm->mutex);
920 return 0;
921}
922
923/**
924 * radeon_vm_map_gart - get the physical address of a gart page
925 *
926 * @rdev: radeon_device pointer
927 * @addr: the unmapped addr
928 *
929 * Look up the physical address of the page that the pte resolves
930 * to (cayman+).
931 * Returns the physical address of the page.
932 */
933uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr)
934{
935 uint64_t result;
936
937 /* page table offset */
938 result = rdev->gart.pages_addr[addr >> PAGE_SHIFT];
939
940 /* in case cpu page size != gpu page size*/
941 result |= addr & (~PAGE_MASK);
942
943 return result;
944}
945
946/**
947 * radeon_vm_page_flags - translate page flags to what the hw uses
948 *
949 * @flags: flags comming from userspace
950 *
951 * Translate the flags the userspace ABI uses to hw flags.
952 */
953static uint32_t radeon_vm_page_flags(uint32_t flags)
954{
955 uint32_t hw_flags = 0;
956 hw_flags |= (flags & RADEON_VM_PAGE_VALID) ? R600_PTE_VALID : 0;
957 hw_flags |= (flags & RADEON_VM_PAGE_READABLE) ? R600_PTE_READABLE : 0;
958 hw_flags |= (flags & RADEON_VM_PAGE_WRITEABLE) ? R600_PTE_WRITEABLE : 0;
959 if (flags & RADEON_VM_PAGE_SYSTEM) {
960 hw_flags |= R600_PTE_SYSTEM;
961 hw_flags |= (flags & RADEON_VM_PAGE_SNOOPED) ? R600_PTE_SNOOPED : 0;
962 }
963 return hw_flags;
964}
965
966/**
967 * radeon_vm_update_pdes - make sure that page directory is valid
968 *
969 * @rdev: radeon_device pointer
970 * @vm: requested vm
971 * @start: start of GPU address range
972 * @end: end of GPU address range
973 *
974 * Allocates new page tables if necessary
975 * and updates the page directory (cayman+).
976 * Returns 0 for success, error for failure.
977 *
978 * Global and local mutex must be locked!
979 */
980static int radeon_vm_update_pdes(struct radeon_device *rdev,
981 struct radeon_vm *vm,
982 struct radeon_ib *ib,
983 uint64_t start, uint64_t end)
984{
985 static const uint32_t incr = RADEON_VM_PTE_COUNT * 8;
986
987 uint64_t last_pde = ~0, last_pt = ~0;
988 unsigned count = 0;
989 uint64_t pt_idx;
990 int r;
991
992 start = (start / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE;
993 end = (end / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE;
994
995 /* walk over the address space and update the page directory */
996 for (pt_idx = start; pt_idx <= end; ++pt_idx) {
997 uint64_t pde, pt;
998
999 if (vm->page_tables[pt_idx])
1000 continue;
1001
1002retry:
1003 r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager,
1004 &vm->page_tables[pt_idx],
1005 RADEON_VM_PTE_COUNT * 8,
1006 RADEON_GPU_PAGE_SIZE, false);
1007
1008 if (r == -ENOMEM) {
1009 r = radeon_vm_evict(rdev, vm);
1010 if (r)
1011 return r;
1012 goto retry;
1013 } else if (r) {
1014 return r;
1015 }
1016
1017 pde = vm->pd_gpu_addr + pt_idx * 8;
1018
1019 pt = radeon_sa_bo_gpu_addr(vm->page_tables[pt_idx]);
1020
1021 if (((last_pde + 8 * count) != pde) ||
1022 ((last_pt + incr * count) != pt)) {
1023
1024 if (count) {
1025 radeon_asic_vm_set_page(rdev, ib, last_pde,
1026 last_pt, count, incr,
1027 R600_PTE_VALID);
1028
1029 count *= RADEON_VM_PTE_COUNT;
1030 radeon_asic_vm_set_page(rdev, ib, last_pt, 0,
1031 count, 0, 0);
1032 }
1033
1034 count = 1;
1035 last_pde = pde;
1036 last_pt = pt;
1037 } else {
1038 ++count;
1039 }
1040 }
1041
1042 if (count) {
1043 radeon_asic_vm_set_page(rdev, ib, last_pde, last_pt, count,
1044 incr, R600_PTE_VALID);
1045
1046 count *= RADEON_VM_PTE_COUNT;
1047 radeon_asic_vm_set_page(rdev, ib, last_pt, 0,
1048 count, 0, 0);
1049 }
1050
1051 return 0;
1052}
1053
1054/**
1055 * radeon_vm_update_ptes - make sure that page tables are valid
1056 *
1057 * @rdev: radeon_device pointer
1058 * @vm: requested vm
1059 * @start: start of GPU address range
1060 * @end: end of GPU address range
1061 * @dst: destination address to map to
1062 * @flags: mapping flags
1063 *
1064 * Update the page tables in the range @start - @end (cayman+).
1065 *
1066 * Global and local mutex must be locked!
1067 */
1068static void radeon_vm_update_ptes(struct radeon_device *rdev,
1069 struct radeon_vm *vm,
1070 struct radeon_ib *ib,
1071 uint64_t start, uint64_t end,
1072 uint64_t dst, uint32_t flags)
1073{
1074 static const uint64_t mask = RADEON_VM_PTE_COUNT - 1;
1075
1076 uint64_t last_pte = ~0, last_dst = ~0;
1077 unsigned count = 0;
1078 uint64_t addr;
1079
1080 start = start / RADEON_GPU_PAGE_SIZE;
1081 end = end / RADEON_GPU_PAGE_SIZE;
1082
1083 /* walk over the address space and update the page tables */
1084 for (addr = start; addr < end; ) {
1085 uint64_t pt_idx = addr >> RADEON_VM_BLOCK_SIZE;
1086 unsigned nptes;
1087 uint64_t pte;
1088
1089 if ((addr & ~mask) == (end & ~mask))
1090 nptes = end - addr;
1091 else
1092 nptes = RADEON_VM_PTE_COUNT - (addr & mask);
1093
1094 pte = radeon_sa_bo_gpu_addr(vm->page_tables[pt_idx]);
1095 pte += (addr & mask) * 8;
1096
1097 if ((last_pte + 8 * count) != pte) {
1098
1099 if (count) {
1100 radeon_asic_vm_set_page(rdev, ib, last_pte,
1101 last_dst, count,
1102 RADEON_GPU_PAGE_SIZE,
1103 flags);
1104 }
1105
1106 count = nptes;
1107 last_pte = pte;
1108 last_dst = dst;
1109 } else {
1110 count += nptes;
1111 }
1112
1113 addr += nptes;
1114 dst += nptes * RADEON_GPU_PAGE_SIZE;
1115 }
1116
1117 if (count) {
1118 radeon_asic_vm_set_page(rdev, ib, last_pte,
1119 last_dst, count,
1120 RADEON_GPU_PAGE_SIZE, flags);
1121 }
1122}
1123
1124/**
1125 * radeon_vm_bo_update - map a bo into the vm page table
1126 *
1127 * @rdev: radeon_device pointer
1128 * @vm: requested vm
1129 * @bo: radeon buffer object
1130 * @mem: ttm mem
1131 *
1132 * Fill in the page table entries for @bo (cayman+).
1133 * Returns 0 for success, -EINVAL for failure.
1134 *
1135 * Object have to be reserved & global and local mutex must be locked!
1136 */
1137int radeon_vm_bo_update(struct radeon_device *rdev,
1138 struct radeon_vm *vm,
1139 struct radeon_bo *bo,
1140 struct ttm_mem_reg *mem)
1141{
1142 struct radeon_ib ib;
1143 struct radeon_bo_va *bo_va;
1144 unsigned nptes, npdes, ndw;
1145 uint64_t addr;
1146 int r;
1147
1148 /* nothing to do if vm isn't bound */
1149 if (vm->page_directory == NULL)
1150 return 0;
1151
1152 bo_va = radeon_vm_bo_find(vm, bo);
1153 if (bo_va == NULL) {
1154 dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm);
1155 return -EINVAL;
1156 }
1157
1158 if (!bo_va->soffset) {
1159 dev_err(rdev->dev, "bo %p don't has a mapping in vm %p\n",
1160 bo, vm);
1161 return -EINVAL;
1162 }
1163
1164 if ((bo_va->valid && mem) || (!bo_va->valid && mem == NULL))
1165 return 0;
1166
1167 bo_va->flags &= ~RADEON_VM_PAGE_VALID;
1168 bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM;
1169 if (mem) {
1170 addr = mem->start << PAGE_SHIFT;
1171 if (mem->mem_type != TTM_PL_SYSTEM) {
1172 bo_va->flags |= RADEON_VM_PAGE_VALID;
1173 bo_va->valid = true;
1174 }
1175 if (mem->mem_type == TTM_PL_TT) {
1176 bo_va->flags |= RADEON_VM_PAGE_SYSTEM;
1177 } else {
1178 addr += rdev->vm_manager.vram_base_offset;
1179 }
1180 } else {
1181 addr = 0;
1182 bo_va->valid = false;
1183 }
1184
1185 trace_radeon_vm_bo_update(bo_va);
1186
1187 nptes = radeon_bo_ngpu_pages(bo);
1188
1189 /* assume two extra pdes in case the mapping overlaps the borders */
1190 npdes = (nptes >> RADEON_VM_BLOCK_SIZE) + 2;
1191
1192 /* padding, etc. */
1193 ndw = 64;
1194
1195 if (RADEON_VM_BLOCK_SIZE > 11)
1196 /* reserve space for one header for every 2k dwords */
1197 ndw += (nptes >> 11) * 4;
1198 else
1199 /* reserve space for one header for
1200 every (1 << BLOCK_SIZE) entries */
1201 ndw += (nptes >> RADEON_VM_BLOCK_SIZE) * 4;
1202
1203 /* reserve space for pte addresses */
1204 ndw += nptes * 2;
1205
1206 /* reserve space for one header for every 2k dwords */
1207 ndw += (npdes >> 11) * 4;
1208
1209 /* reserve space for pde addresses */
1210 ndw += npdes * 2;
1211
1212 /* reserve space for clearing new page tables */
1213 ndw += npdes * 2 * RADEON_VM_PTE_COUNT;
1214
1215 /* update too big for an IB */
1216 if (ndw > 0xfffff)
1217 return -ENOMEM;
1218
1219 r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, NULL, ndw * 4);
1220 if (r)
1221 return r;
1222 ib.length_dw = 0;
1223
1224 r = radeon_vm_update_pdes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset);
1225 if (r) {
1226 radeon_ib_free(rdev, &ib);
1227 return r;
1228 }
1229
1230 radeon_vm_update_ptes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset,
1231 addr, radeon_vm_page_flags(bo_va->flags));
1232
1233 radeon_semaphore_sync_to(ib.semaphore, vm->fence);
1234 r = radeon_ib_schedule(rdev, &ib, NULL);
1235 if (r) {
1236 radeon_ib_free(rdev, &ib);
1237 return r;
1238 }
1239 radeon_fence_unref(&vm->fence);
1240 vm->fence = radeon_fence_ref(ib.fence);
1241 radeon_ib_free(rdev, &ib);
1242 radeon_fence_unref(&vm->last_flush);
1243
1244 return 0;
1245}
1246
1247/**
1248 * radeon_vm_bo_rmv - remove a bo to a specific vm
1249 *
1250 * @rdev: radeon_device pointer
1251 * @bo_va: requested bo_va
1252 *
1253 * Remove @bo_va->bo from the requested vm (cayman+).
1254 * Remove @bo_va->bo from the list of bos associated with the bo_va->vm and
1255 * remove the ptes for @bo_va in the page table.
1256 * Returns 0 for success.
1257 *
1258 * Object have to be reserved!
1259 */
1260int radeon_vm_bo_rmv(struct radeon_device *rdev,
1261 struct radeon_bo_va *bo_va)
1262{
1263 int r = 0;
1264
1265 mutex_lock(&rdev->vm_manager.lock);
1266 mutex_lock(&bo_va->vm->mutex);
1267 if (bo_va->soffset) {
1268 r = radeon_vm_bo_update(rdev, bo_va->vm, bo_va->bo, NULL);
1269 }
1270 mutex_unlock(&rdev->vm_manager.lock);
1271 list_del(&bo_va->vm_list);
1272 mutex_unlock(&bo_va->vm->mutex);
1273 list_del(&bo_va->bo_list);
1274
1275 kfree(bo_va);
1276 return r;
1277}
1278
1279/**
1280 * radeon_vm_bo_invalidate - mark the bo as invalid
1281 *
1282 * @rdev: radeon_device pointer
1283 * @vm: requested vm
1284 * @bo: radeon buffer object
1285 *
1286 * Mark @bo as invalid (cayman+).
1287 */
1288void radeon_vm_bo_invalidate(struct radeon_device *rdev,
1289 struct radeon_bo *bo)
1290{
1291 struct radeon_bo_va *bo_va;
1292
1293 list_for_each_entry(bo_va, &bo->va, bo_list) {
1294 bo_va->valid = false;
1295 }
1296}
1297
1298/**
1299 * radeon_vm_init - initialize a vm instance
1300 *
1301 * @rdev: radeon_device pointer
1302 * @vm: requested vm
1303 *
1304 * Init @vm fields (cayman+).
1305 */
1306void radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
1307{
1308 vm->id = 0;
1309 vm->fence = NULL;
1310 vm->last_flush = NULL;
1311 vm->last_id_use = NULL;
1312 mutex_init(&vm->mutex);
1313 INIT_LIST_HEAD(&vm->list);
1314 INIT_LIST_HEAD(&vm->va);
1315}
1316
1317/**
1318 * radeon_vm_fini - tear down a vm instance
1319 *
1320 * @rdev: radeon_device pointer
1321 * @vm: requested vm
1322 *
1323 * Tear down @vm (cayman+).
1324 * Unbind the VM and remove all bos from the vm bo list
1325 */
1326void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
1327{
1328 struct radeon_bo_va *bo_va, *tmp;
1329 int r;
1330
1331 mutex_lock(&rdev->vm_manager.lock);
1332 mutex_lock(&vm->mutex);
1333 radeon_vm_free_pt(rdev, vm);
1334 mutex_unlock(&rdev->vm_manager.lock);
1335
1336 if (!list_empty(&vm->va)) {
1337 dev_err(rdev->dev, "still active bo inside vm\n");
1338 }
1339 list_for_each_entry_safe(bo_va, tmp, &vm->va, vm_list) {
1340 list_del_init(&bo_va->vm_list);
1341 r = radeon_bo_reserve(bo_va->bo, false);
1342 if (!r) {
1343 list_del_init(&bo_va->bo_list);
1344 radeon_bo_unreserve(bo_va->bo);
1345 kfree(bo_va);
1346 }
1347 }
1348 radeon_fence_unref(&vm->fence);
1349 radeon_fence_unref(&vm->last_flush);
1350 radeon_fence_unref(&vm->last_id_use);
1351 mutex_unlock(&vm->mutex);
1352}
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index b96c819024b3..d09650c1d720 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -344,18 +344,7 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
344 } 344 }
345 robj = gem_to_radeon_bo(gobj); 345 robj = gem_to_radeon_bo(gobj);
346 r = radeon_bo_wait(robj, &cur_placement, true); 346 r = radeon_bo_wait(robj, &cur_placement, true);
347 switch (cur_placement) { 347 args->domain = radeon_mem_type_to_domain(cur_placement);
348 case TTM_PL_VRAM:
349 args->domain = RADEON_GEM_DOMAIN_VRAM;
350 break;
351 case TTM_PL_TT:
352 args->domain = RADEON_GEM_DOMAIN_GTT;
353 break;
354 case TTM_PL_SYSTEM:
355 args->domain = RADEON_GEM_DOMAIN_CPU;
356 default:
357 break;
358 }
359 drm_gem_object_unreference_unlocked(gobj); 348 drm_gem_object_unreference_unlocked(gobj);
360 r = radeon_gem_handle_lockup(rdev, r); 349 r = radeon_gem_handle_lockup(rdev, r);
361 return r; 350 return r;
@@ -533,6 +522,42 @@ out:
533 return r; 522 return r;
534} 523}
535 524
525int radeon_gem_op_ioctl(struct drm_device *dev, void *data,
526 struct drm_file *filp)
527{
528 struct drm_radeon_gem_op *args = data;
529 struct drm_gem_object *gobj;
530 struct radeon_bo *robj;
531 int r;
532
533 gobj = drm_gem_object_lookup(dev, filp, args->handle);
534 if (gobj == NULL) {
535 return -ENOENT;
536 }
537 robj = gem_to_radeon_bo(gobj);
538 r = radeon_bo_reserve(robj, false);
539 if (unlikely(r))
540 goto out;
541
542 switch (args->op) {
543 case RADEON_GEM_OP_GET_INITIAL_DOMAIN:
544 args->value = robj->initial_domain;
545 break;
546 case RADEON_GEM_OP_SET_INITIAL_DOMAIN:
547 robj->initial_domain = args->value & (RADEON_GEM_DOMAIN_VRAM |
548 RADEON_GEM_DOMAIN_GTT |
549 RADEON_GEM_DOMAIN_CPU);
550 break;
551 default:
552 r = -EINVAL;
553 }
554
555 radeon_bo_unreserve(robj);
556out:
557 drm_gem_object_unreference_unlocked(gobj);
558 return r;
559}
560
536int radeon_mode_dumb_create(struct drm_file *file_priv, 561int radeon_mode_dumb_create(struct drm_file *file_priv,
537 struct drm_device *dev, 562 struct drm_device *dev,
538 struct drm_mode_create_dumb *args) 563 struct drm_mode_create_dumb *args)
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 66ed3ea71440..3e49342a20e6 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -441,6 +441,9 @@ static int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file
441 case RADEON_CS_RING_UVD: 441 case RADEON_CS_RING_UVD:
442 *value = rdev->ring[R600_RING_TYPE_UVD_INDEX].ready; 442 *value = rdev->ring[R600_RING_TYPE_UVD_INDEX].ready;
443 break; 443 break;
444 case RADEON_CS_RING_VCE:
445 *value = rdev->ring[TN_RING_TYPE_VCE1_INDEX].ready;
446 break;
444 default: 447 default:
445 return -EINVAL; 448 return -EINVAL;
446 } 449 }
@@ -485,6 +488,27 @@ static int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file
485 else 488 else
486 *value = rdev->pm.default_sclk * 10; 489 *value = rdev->pm.default_sclk * 10;
487 break; 490 break;
491 case RADEON_INFO_VCE_FW_VERSION:
492 *value = rdev->vce.fw_version;
493 break;
494 case RADEON_INFO_VCE_FB_VERSION:
495 *value = rdev->vce.fb_version;
496 break;
497 case RADEON_INFO_NUM_BYTES_MOVED:
498 value = (uint32_t*)&value64;
499 value_size = sizeof(uint64_t);
500 value64 = atomic64_read(&rdev->num_bytes_moved);
501 break;
502 case RADEON_INFO_VRAM_USAGE:
503 value = (uint32_t*)&value64;
504 value_size = sizeof(uint64_t);
505 value64 = atomic64_read(&rdev->vram_usage);
506 break;
507 case RADEON_INFO_GTT_USAGE:
508 value = (uint32_t*)&value64;
509 value_size = sizeof(uint64_t);
510 value64 = atomic64_read(&rdev->gtt_usage);
511 break;
488 default: 512 default:
489 DRM_DEBUG_KMS("Invalid request %d\n", info->request); 513 DRM_DEBUG_KMS("Invalid request %d\n", info->request);
490 return -EINVAL; 514 return -EINVAL;
@@ -543,7 +567,9 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
543 return -ENOMEM; 567 return -ENOMEM;
544 } 568 }
545 569
546 radeon_vm_init(rdev, &fpriv->vm); 570 r = radeon_vm_init(rdev, &fpriv->vm);
571 if (r)
572 return r;
547 573
548 r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); 574 r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
549 if (r) 575 if (r)
@@ -624,6 +650,7 @@ void radeon_driver_preclose_kms(struct drm_device *dev,
624 if (rdev->cmask_filp == file_priv) 650 if (rdev->cmask_filp == file_priv)
625 rdev->cmask_filp = NULL; 651 rdev->cmask_filp = NULL;
626 radeon_uvd_free_handles(rdev, file_priv); 652 radeon_uvd_free_handles(rdev, file_priv);
653 radeon_vce_free_handles(rdev, file_priv);
627} 654}
628 655
629/* 656/*
@@ -818,5 +845,6 @@ const struct drm_ioctl_desc radeon_ioctls_kms[] = {
818 DRM_IOCTL_DEF_DRV(RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), 845 DRM_IOCTL_DEF_DRV(RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
819 DRM_IOCTL_DEF_DRV(RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), 846 DRM_IOCTL_DEF_DRV(RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
820 DRM_IOCTL_DEF_DRV(RADEON_GEM_VA, radeon_gem_va_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), 847 DRM_IOCTL_DEF_DRV(RADEON_GEM_VA, radeon_gem_va_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
848 DRM_IOCTL_DEF_DRV(RADEON_GEM_OP, radeon_gem_op_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
821}; 849};
822int radeon_max_kms_ioctl = DRM_ARRAY_SIZE(radeon_ioctls_kms); 850int radeon_max_kms_ioctl = DRM_ARRAY_SIZE(radeon_ioctls_kms);
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index 0b158f98d287..cafb1ccf2ec3 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -385,7 +385,7 @@ int radeon_crtc_do_set_base(struct drm_crtc *crtc,
385 385
386 DRM_DEBUG_KMS("\n"); 386 DRM_DEBUG_KMS("\n");
387 /* no fb bound */ 387 /* no fb bound */
388 if (!atomic && !crtc->fb) { 388 if (!atomic && !crtc->primary->fb) {
389 DRM_DEBUG_KMS("No FB bound\n"); 389 DRM_DEBUG_KMS("No FB bound\n");
390 return 0; 390 return 0;
391 } 391 }
@@ -395,8 +395,8 @@ int radeon_crtc_do_set_base(struct drm_crtc *crtc,
395 target_fb = fb; 395 target_fb = fb;
396 } 396 }
397 else { 397 else {
398 radeon_fb = to_radeon_framebuffer(crtc->fb); 398 radeon_fb = to_radeon_framebuffer(crtc->primary->fb);
399 target_fb = crtc->fb; 399 target_fb = crtc->primary->fb;
400 } 400 }
401 401
402 switch (target_fb->bits_per_pixel) { 402 switch (target_fb->bits_per_pixel) {
@@ -444,7 +444,7 @@ retry:
444 * We don't shutdown the display controller because new buffer 444 * We don't shutdown the display controller because new buffer
445 * will end up in same spot. 445 * will end up in same spot.
446 */ 446 */
447 if (!atomic && fb && fb != crtc->fb) { 447 if (!atomic && fb && fb != crtc->primary->fb) {
448 struct radeon_bo *old_rbo; 448 struct radeon_bo *old_rbo;
449 unsigned long nsize, osize; 449 unsigned long nsize, osize;
450 450
@@ -555,7 +555,7 @@ retry:
555 WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, crtc_offset); 555 WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, crtc_offset);
556 WREG32(RADEON_CRTC_PITCH + radeon_crtc->crtc_offset, crtc_pitch); 556 WREG32(RADEON_CRTC_PITCH + radeon_crtc->crtc_offset, crtc_pitch);
557 557
558 if (!atomic && fb && fb != crtc->fb) { 558 if (!atomic && fb && fb != crtc->primary->fb) {
559 radeon_fb = to_radeon_framebuffer(fb); 559 radeon_fb = to_radeon_framebuffer(fb);
560 rbo = gem_to_radeon_bo(radeon_fb->obj); 560 rbo = gem_to_radeon_bo(radeon_fb->obj);
561 r = radeon_bo_reserve(rbo, false); 561 r = radeon_bo_reserve(rbo, false);
@@ -599,7 +599,7 @@ static bool radeon_set_crtc_timing(struct drm_crtc *crtc, struct drm_display_mod
599 } 599 }
600 } 600 }
601 601
602 switch (crtc->fb->bits_per_pixel) { 602 switch (crtc->primary->fb->bits_per_pixel) {
603 case 8: 603 case 8:
604 format = 2; 604 format = 2;
605 break; 605 break;
@@ -1087,12 +1087,12 @@ static void radeon_crtc_commit(struct drm_crtc *crtc)
1087static void radeon_crtc_disable(struct drm_crtc *crtc) 1087static void radeon_crtc_disable(struct drm_crtc *crtc)
1088{ 1088{
1089 radeon_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); 1089 radeon_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
1090 if (crtc->fb) { 1090 if (crtc->primary->fb) {
1091 int r; 1091 int r;
1092 struct radeon_framebuffer *radeon_fb; 1092 struct radeon_framebuffer *radeon_fb;
1093 struct radeon_bo *rbo; 1093 struct radeon_bo *rbo;
1094 1094
1095 radeon_fb = to_radeon_framebuffer(crtc->fb); 1095 radeon_fb = to_radeon_framebuffer(crtc->primary->fb);
1096 rbo = gem_to_radeon_bo(radeon_fb->obj); 1096 rbo = gem_to_radeon_bo(radeon_fb->obj);
1097 r = radeon_bo_reserve(rbo, false); 1097 r = radeon_bo_reserve(rbo, false);
1098 if (unlikely(r)) 1098 if (unlikely(r))
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 402dbe32c234..832d9fa1a4c4 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -192,6 +192,7 @@ struct radeon_i2c_chan {
192 struct i2c_algo_dp_aux_data dp; 192 struct i2c_algo_dp_aux_data dp;
193 } algo; 193 } algo;
194 struct radeon_i2c_bus_rec rec; 194 struct radeon_i2c_bus_rec rec;
195 struct drm_dp_aux aux;
195}; 196};
196 197
197/* mostly for macs, but really any system without connector tables */ 198/* mostly for macs, but really any system without connector tables */
@@ -690,6 +691,9 @@ extern u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector);
690extern bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector); 691extern bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector);
691extern int radeon_dp_get_panel_mode(struct drm_encoder *encoder, 692extern int radeon_dp_get_panel_mode(struct drm_encoder *encoder,
692 struct drm_connector *connector); 693 struct drm_connector *connector);
694extern void radeon_dp_set_rx_power_state(struct drm_connector *connector,
695 u8 power_state);
696extern void radeon_dp_aux_init(struct radeon_connector *radeon_connector);
693extern void atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mode); 697extern void atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mode);
694extern void radeon_atom_encoder_init(struct radeon_device *rdev); 698extern void radeon_atom_encoder_init(struct radeon_device *rdev);
695extern void radeon_atom_disp_eng_pll_init(struct radeon_device *rdev); 699extern void radeon_atom_disp_eng_pll_init(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 08595cf90b01..19bec0dbfa38 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -56,11 +56,36 @@ static void radeon_bo_clear_va(struct radeon_bo *bo)
56 } 56 }
57} 57}
58 58
59static void radeon_update_memory_usage(struct radeon_bo *bo,
60 unsigned mem_type, int sign)
61{
62 struct radeon_device *rdev = bo->rdev;
63 u64 size = (u64)bo->tbo.num_pages << PAGE_SHIFT;
64
65 switch (mem_type) {
66 case TTM_PL_TT:
67 if (sign > 0)
68 atomic64_add(size, &rdev->gtt_usage);
69 else
70 atomic64_sub(size, &rdev->gtt_usage);
71 break;
72 case TTM_PL_VRAM:
73 if (sign > 0)
74 atomic64_add(size, &rdev->vram_usage);
75 else
76 atomic64_sub(size, &rdev->vram_usage);
77 break;
78 }
79}
80
59static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo) 81static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
60{ 82{
61 struct radeon_bo *bo; 83 struct radeon_bo *bo;
62 84
63 bo = container_of(tbo, struct radeon_bo, tbo); 85 bo = container_of(tbo, struct radeon_bo, tbo);
86
87 radeon_update_memory_usage(bo, bo->tbo.mem.mem_type, -1);
88
64 mutex_lock(&bo->rdev->gem.mutex); 89 mutex_lock(&bo->rdev->gem.mutex);
65 list_del_init(&bo->list); 90 list_del_init(&bo->list);
66 mutex_unlock(&bo->rdev->gem.mutex); 91 mutex_unlock(&bo->rdev->gem.mutex);
@@ -79,7 +104,7 @@ bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo)
79 104
80void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain) 105void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
81{ 106{
82 u32 c = 0; 107 u32 c = 0, i;
83 108
84 rbo->placement.fpfn = 0; 109 rbo->placement.fpfn = 0;
85 rbo->placement.lpfn = 0; 110 rbo->placement.lpfn = 0;
@@ -106,6 +131,17 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
106 rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; 131 rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
107 rbo->placement.num_placement = c; 132 rbo->placement.num_placement = c;
108 rbo->placement.num_busy_placement = c; 133 rbo->placement.num_busy_placement = c;
134
135 /*
136 * Use two-ended allocation depending on the buffer size to
137 * improve fragmentation quality.
138 * 512kb was measured as the most optimal number.
139 */
140 if (rbo->tbo.mem.size > 512 * 1024) {
141 for (i = 0; i < c; i++) {
142 rbo->placements[i] |= TTM_PL_FLAG_TOPDOWN;
143 }
144 }
109} 145}
110 146
111int radeon_bo_create(struct radeon_device *rdev, 147int radeon_bo_create(struct radeon_device *rdev,
@@ -120,7 +156,6 @@ int radeon_bo_create(struct radeon_device *rdev,
120 156
121 size = ALIGN(size, PAGE_SIZE); 157 size = ALIGN(size, PAGE_SIZE);
122 158
123 rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping;
124 if (kernel) { 159 if (kernel) {
125 type = ttm_bo_type_kernel; 160 type = ttm_bo_type_kernel;
126 } else if (sg) { 161 } else if (sg) {
@@ -145,6 +180,9 @@ int radeon_bo_create(struct radeon_device *rdev,
145 bo->surface_reg = -1; 180 bo->surface_reg = -1;
146 INIT_LIST_HEAD(&bo->list); 181 INIT_LIST_HEAD(&bo->list);
147 INIT_LIST_HEAD(&bo->va); 182 INIT_LIST_HEAD(&bo->va);
183 bo->initial_domain = domain & (RADEON_GEM_DOMAIN_VRAM |
184 RADEON_GEM_DOMAIN_GTT |
185 RADEON_GEM_DOMAIN_CPU);
148 radeon_ttm_placement_from_domain(bo, domain); 186 radeon_ttm_placement_from_domain(bo, domain);
149 /* Kernel allocation are uninterruptible */ 187 /* Kernel allocation are uninterruptible */
150 down_read(&rdev->pm.mclk_lock); 188 down_read(&rdev->pm.mclk_lock);
@@ -338,39 +376,105 @@ void radeon_bo_fini(struct radeon_device *rdev)
338 arch_phys_wc_del(rdev->mc.vram_mtrr); 376 arch_phys_wc_del(rdev->mc.vram_mtrr);
339} 377}
340 378
341void radeon_bo_list_add_object(struct radeon_bo_list *lobj, 379/* Returns how many bytes TTM can move per IB.
342 struct list_head *head) 380 */
381static u64 radeon_bo_get_threshold_for_moves(struct radeon_device *rdev)
343{ 382{
344 if (lobj->written) { 383 u64 real_vram_size = rdev->mc.real_vram_size;
345 list_add(&lobj->tv.head, head); 384 u64 vram_usage = atomic64_read(&rdev->vram_usage);
346 } else { 385
347 list_add_tail(&lobj->tv.head, head); 386 /* This function is based on the current VRAM usage.
348 } 387 *
388 * - If all of VRAM is free, allow relocating the number of bytes that
389 * is equal to 1/4 of the size of VRAM for this IB.
390
391 * - If more than one half of VRAM is occupied, only allow relocating
392 * 1 MB of data for this IB.
393 *
394 * - From 0 to one half of used VRAM, the threshold decreases
395 * linearly.
396 * __________________
397 * 1/4 of -|\ |
398 * VRAM | \ |
399 * | \ |
400 * | \ |
401 * | \ |
402 * | \ |
403 * | \ |
404 * | \________|1 MB
405 * |----------------|
406 * VRAM 0 % 100 %
407 * used used
408 *
409 * Note: It's a threshold, not a limit. The threshold must be crossed
410 * for buffer relocations to stop, so any buffer of an arbitrary size
411 * can be moved as long as the threshold isn't crossed before
412 * the relocation takes place. We don't want to disable buffer
413 * relocations completely.
414 *
415 * The idea is that buffers should be placed in VRAM at creation time
416 * and TTM should only do a minimum number of relocations during
417 * command submission. In practice, you need to submit at least
418 * a dozen IBs to move all buffers to VRAM if they are in GTT.
419 *
420 * Also, things can get pretty crazy under memory pressure and actual
421 * VRAM usage can change a lot, so playing safe even at 50% does
422 * consistently increase performance.
423 */
424
425 u64 half_vram = real_vram_size >> 1;
426 u64 half_free_vram = vram_usage >= half_vram ? 0 : half_vram - vram_usage;
427 u64 bytes_moved_threshold = half_free_vram >> 1;
428 return max(bytes_moved_threshold, 1024*1024ull);
349} 429}
350 430
351int radeon_bo_list_validate(struct ww_acquire_ctx *ticket, 431int radeon_bo_list_validate(struct radeon_device *rdev,
432 struct ww_acquire_ctx *ticket,
352 struct list_head *head, int ring) 433 struct list_head *head, int ring)
353{ 434{
354 struct radeon_bo_list *lobj; 435 struct radeon_cs_reloc *lobj;
355 struct radeon_bo *bo; 436 struct radeon_bo *bo;
356 u32 domain;
357 int r; 437 int r;
438 u64 bytes_moved = 0, initial_bytes_moved;
439 u64 bytes_moved_threshold = radeon_bo_get_threshold_for_moves(rdev);
358 440
359 r = ttm_eu_reserve_buffers(ticket, head); 441 r = ttm_eu_reserve_buffers(ticket, head);
360 if (unlikely(r != 0)) { 442 if (unlikely(r != 0)) {
361 return r; 443 return r;
362 } 444 }
445
363 list_for_each_entry(lobj, head, tv.head) { 446 list_for_each_entry(lobj, head, tv.head) {
364 bo = lobj->bo; 447 bo = lobj->robj;
365 if (!bo->pin_count) { 448 if (!bo->pin_count) {
366 domain = lobj->domain; 449 u32 domain = lobj->domain;
367 450 u32 current_domain =
451 radeon_mem_type_to_domain(bo->tbo.mem.mem_type);
452
453 /* Check if this buffer will be moved and don't move it
454 * if we have moved too many buffers for this IB already.
455 *
456 * Note that this allows moving at least one buffer of
457 * any size, because it doesn't take the current "bo"
458 * into account. We don't want to disallow buffer moves
459 * completely.
460 */
461 if (current_domain != RADEON_GEM_DOMAIN_CPU &&
462 (domain & current_domain) == 0 && /* will be moved */
463 bytes_moved > bytes_moved_threshold) {
464 /* don't move it */
465 domain = current_domain;
466 }
467
368 retry: 468 retry:
369 radeon_ttm_placement_from_domain(bo, domain); 469 radeon_ttm_placement_from_domain(bo, domain);
370 if (ring == R600_RING_TYPE_UVD_INDEX) 470 if (ring == R600_RING_TYPE_UVD_INDEX)
371 radeon_uvd_force_into_uvd_segment(bo); 471 radeon_uvd_force_into_uvd_segment(bo);
372 r = ttm_bo_validate(&bo->tbo, &bo->placement, 472
373 true, false); 473 initial_bytes_moved = atomic64_read(&rdev->num_bytes_moved);
474 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
475 bytes_moved += atomic64_read(&rdev->num_bytes_moved) -
476 initial_bytes_moved;
477
374 if (unlikely(r)) { 478 if (unlikely(r)) {
375 if (r != -ERESTARTSYS && domain != lobj->alt_domain) { 479 if (r != -ERESTARTSYS && domain != lobj->alt_domain) {
376 domain = lobj->alt_domain; 480 domain = lobj->alt_domain;
@@ -564,14 +668,23 @@ int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
564} 668}
565 669
566void radeon_bo_move_notify(struct ttm_buffer_object *bo, 670void radeon_bo_move_notify(struct ttm_buffer_object *bo,
567 struct ttm_mem_reg *mem) 671 struct ttm_mem_reg *new_mem)
568{ 672{
569 struct radeon_bo *rbo; 673 struct radeon_bo *rbo;
674
570 if (!radeon_ttm_bo_is_radeon_bo(bo)) 675 if (!radeon_ttm_bo_is_radeon_bo(bo))
571 return; 676 return;
677
572 rbo = container_of(bo, struct radeon_bo, tbo); 678 rbo = container_of(bo, struct radeon_bo, tbo);
573 radeon_bo_check_tiling(rbo, 0, 1); 679 radeon_bo_check_tiling(rbo, 0, 1);
574 radeon_vm_bo_invalidate(rbo->rdev, rbo); 680 radeon_vm_bo_invalidate(rbo->rdev, rbo);
681
682 /* update statistics */
683 if (!new_mem)
684 return;
685
686 radeon_update_memory_usage(rbo, bo->mem.mem_type, -1);
687 radeon_update_memory_usage(rbo, new_mem->mem_type, 1);
575} 688}
576 689
577int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) 690int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index 209b11150263..9e7b25a0629d 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -138,9 +138,8 @@ extern int radeon_bo_evict_vram(struct radeon_device *rdev);
138extern void radeon_bo_force_delete(struct radeon_device *rdev); 138extern void radeon_bo_force_delete(struct radeon_device *rdev);
139extern int radeon_bo_init(struct radeon_device *rdev); 139extern int radeon_bo_init(struct radeon_device *rdev);
140extern void radeon_bo_fini(struct radeon_device *rdev); 140extern void radeon_bo_fini(struct radeon_device *rdev);
141extern void radeon_bo_list_add_object(struct radeon_bo_list *lobj, 141extern int radeon_bo_list_validate(struct radeon_device *rdev,
142 struct list_head *head); 142 struct ww_acquire_ctx *ticket,
143extern int radeon_bo_list_validate(struct ww_acquire_ctx *ticket,
144 struct list_head *head, int ring); 143 struct list_head *head, int ring);
145extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo, 144extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
146 struct vm_area_struct *vma); 145 struct vm_area_struct *vma);
@@ -151,7 +150,7 @@ extern void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
151extern int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved, 150extern int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
152 bool force_drop); 151 bool force_drop);
153extern void radeon_bo_move_notify(struct ttm_buffer_object *bo, 152extern void radeon_bo_move_notify(struct ttm_buffer_object *bo,
154 struct ttm_mem_reg *mem); 153 struct ttm_mem_reg *new_mem);
155extern int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo); 154extern int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
156extern int radeon_bo_get_surface_reg(struct radeon_bo *bo); 155extern int radeon_bo_get_surface_reg(struct radeon_bo *bo);
157 156
@@ -181,7 +180,7 @@ extern int radeon_sa_bo_manager_suspend(struct radeon_device *rdev,
181extern int radeon_sa_bo_new(struct radeon_device *rdev, 180extern int radeon_sa_bo_new(struct radeon_device *rdev,
182 struct radeon_sa_manager *sa_manager, 181 struct radeon_sa_manager *sa_manager,
183 struct radeon_sa_bo **sa_bo, 182 struct radeon_sa_bo **sa_bo,
184 unsigned size, unsigned align, bool block); 183 unsigned size, unsigned align);
185extern void radeon_sa_bo_free(struct radeon_device *rdev, 184extern void radeon_sa_bo_free(struct radeon_device *rdev,
186 struct radeon_sa_bo **sa_bo, 185 struct radeon_sa_bo **sa_bo,
187 struct radeon_fence *fence); 186 struct radeon_fence *fence);
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 8e8153e471c2..ee738a524639 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -260,7 +260,7 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
260 if (!ring->ready) { 260 if (!ring->ready) {
261 continue; 261 continue;
262 } 262 }
263 r = radeon_fence_wait_empty_locked(rdev, i); 263 r = radeon_fence_wait_empty(rdev, i);
264 if (r) { 264 if (r) {
265 /* needs a GPU reset dont reset here */ 265 /* needs a GPU reset dont reset here */
266 mutex_unlock(&rdev->ring_lock); 266 mutex_unlock(&rdev->ring_lock);
@@ -826,6 +826,9 @@ static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev)
826 826
827 /* no need to reprogram if nothing changed unless we are on BTC+ */ 827 /* no need to reprogram if nothing changed unless we are on BTC+ */
828 if (rdev->pm.dpm.current_ps == rdev->pm.dpm.requested_ps) { 828 if (rdev->pm.dpm.current_ps == rdev->pm.dpm.requested_ps) {
829 /* vce just modifies an existing state so force a change */
830 if (ps->vce_active != rdev->pm.dpm.vce_active)
831 goto force;
829 if ((rdev->family < CHIP_BARTS) || (rdev->flags & RADEON_IS_IGP)) { 832 if ((rdev->family < CHIP_BARTS) || (rdev->flags & RADEON_IS_IGP)) {
830 /* for pre-BTC and APUs if the num crtcs changed but state is the same, 833 /* for pre-BTC and APUs if the num crtcs changed but state is the same,
831 * all we need to do is update the display configuration. 834 * all we need to do is update the display configuration.
@@ -862,16 +865,21 @@ static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev)
862 } 865 }
863 } 866 }
864 867
868force:
865 if (radeon_dpm == 1) { 869 if (radeon_dpm == 1) {
866 printk("switching from power state:\n"); 870 printk("switching from power state:\n");
867 radeon_dpm_print_power_state(rdev, rdev->pm.dpm.current_ps); 871 radeon_dpm_print_power_state(rdev, rdev->pm.dpm.current_ps);
868 printk("switching to power state:\n"); 872 printk("switching to power state:\n");
869 radeon_dpm_print_power_state(rdev, rdev->pm.dpm.requested_ps); 873 radeon_dpm_print_power_state(rdev, rdev->pm.dpm.requested_ps);
870 } 874 }
875
871 mutex_lock(&rdev->ddev->struct_mutex); 876 mutex_lock(&rdev->ddev->struct_mutex);
872 down_write(&rdev->pm.mclk_lock); 877 down_write(&rdev->pm.mclk_lock);
873 mutex_lock(&rdev->ring_lock); 878 mutex_lock(&rdev->ring_lock);
874 879
880 /* update whether vce is active */
881 ps->vce_active = rdev->pm.dpm.vce_active;
882
875 ret = radeon_dpm_pre_set_power_state(rdev); 883 ret = radeon_dpm_pre_set_power_state(rdev);
876 if (ret) 884 if (ret)
877 goto done; 885 goto done;
@@ -888,7 +896,7 @@ static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev)
888 for (i = 0; i < RADEON_NUM_RINGS; i++) { 896 for (i = 0; i < RADEON_NUM_RINGS; i++) {
889 struct radeon_ring *ring = &rdev->ring[i]; 897 struct radeon_ring *ring = &rdev->ring[i];
890 if (ring->ready) 898 if (ring->ready)
891 radeon_fence_wait_empty_locked(rdev, i); 899 radeon_fence_wait_empty(rdev, i);
892 } 900 }
893 901
894 /* program the new power state */ 902 /* program the new power state */
@@ -935,8 +943,6 @@ void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable)
935 if (enable) { 943 if (enable) {
936 mutex_lock(&rdev->pm.mutex); 944 mutex_lock(&rdev->pm.mutex);
937 rdev->pm.dpm.uvd_active = true; 945 rdev->pm.dpm.uvd_active = true;
938 /* disable this for now */
939#if 0
940 if ((rdev->pm.dpm.sd == 1) && (rdev->pm.dpm.hd == 0)) 946 if ((rdev->pm.dpm.sd == 1) && (rdev->pm.dpm.hd == 0))
941 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_SD; 947 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_SD;
942 else if ((rdev->pm.dpm.sd == 2) && (rdev->pm.dpm.hd == 0)) 948 else if ((rdev->pm.dpm.sd == 2) && (rdev->pm.dpm.hd == 0))
@@ -946,7 +952,6 @@ void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable)
946 else if ((rdev->pm.dpm.sd == 0) && (rdev->pm.dpm.hd == 2)) 952 else if ((rdev->pm.dpm.sd == 0) && (rdev->pm.dpm.hd == 2))
947 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD2; 953 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD2;
948 else 954 else
949#endif
950 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD; 955 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD;
951 rdev->pm.dpm.state = dpm_state; 956 rdev->pm.dpm.state = dpm_state;
952 mutex_unlock(&rdev->pm.mutex); 957 mutex_unlock(&rdev->pm.mutex);
@@ -960,6 +965,23 @@ void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable)
960 } 965 }
961} 966}
962 967
968void radeon_dpm_enable_vce(struct radeon_device *rdev, bool enable)
969{
970 if (enable) {
971 mutex_lock(&rdev->pm.mutex);
972 rdev->pm.dpm.vce_active = true;
973 /* XXX select vce level based on ring/task */
974 rdev->pm.dpm.vce_level = RADEON_VCE_LEVEL_AC_ALL;
975 mutex_unlock(&rdev->pm.mutex);
976 } else {
977 mutex_lock(&rdev->pm.mutex);
978 rdev->pm.dpm.vce_active = false;
979 mutex_unlock(&rdev->pm.mutex);
980 }
981
982 radeon_pm_compute_clocks(rdev);
983}
984
963static void radeon_pm_suspend_old(struct radeon_device *rdev) 985static void radeon_pm_suspend_old(struct radeon_device *rdev)
964{ 986{
965 mutex_lock(&rdev->pm.mutex); 987 mutex_lock(&rdev->pm.mutex);
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 15e44a7281ab..f8050f5429e2 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -63,7 +63,7 @@ int radeon_ib_get(struct radeon_device *rdev, int ring,
63{ 63{
64 int r; 64 int r;
65 65
66 r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &ib->sa_bo, size, 256, true); 66 r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &ib->sa_bo, size, 256);
67 if (r) { 67 if (r) {
68 dev_err(rdev->dev, "failed to get a new IB (%d)\n", r); 68 dev_err(rdev->dev, "failed to get a new IB (%d)\n", r);
69 return r; 69 return r;
@@ -145,6 +145,13 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
145 return r; 145 return r;
146 } 146 }
147 147
148 /* grab a vm id if necessary */
149 if (ib->vm) {
150 struct radeon_fence *vm_id_fence;
151 vm_id_fence = radeon_vm_grab_id(rdev, ib->vm, ib->ring);
152 radeon_semaphore_sync_to(ib->semaphore, vm_id_fence);
153 }
154
148 /* sync with other rings */ 155 /* sync with other rings */
149 r = radeon_semaphore_sync_rings(rdev, ib->semaphore, ib->ring); 156 r = radeon_semaphore_sync_rings(rdev, ib->semaphore, ib->ring);
150 if (r) { 157 if (r) {
@@ -153,11 +160,9 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
153 return r; 160 return r;
154 } 161 }
155 162
156 /* if we can't remember our last VM flush then flush now! */ 163 if (ib->vm)
157 /* XXX figure out why we have to flush for every IB */ 164 radeon_vm_flush(rdev, ib->vm, ib->ring);
158 if (ib->vm /*&& !ib->vm->last_flush*/) { 165
159 radeon_ring_vm_flush(rdev, ib->ring, ib->vm);
160 }
161 if (const_ib) { 166 if (const_ib) {
162 radeon_ring_ib_execute(rdev, const_ib->ring, const_ib); 167 radeon_ring_ib_execute(rdev, const_ib->ring, const_ib);
163 radeon_semaphore_free(rdev, &const_ib->semaphore, NULL); 168 radeon_semaphore_free(rdev, &const_ib->semaphore, NULL);
@@ -172,10 +177,10 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
172 if (const_ib) { 177 if (const_ib) {
173 const_ib->fence = radeon_fence_ref(ib->fence); 178 const_ib->fence = radeon_fence_ref(ib->fence);
174 } 179 }
175 /* we just flushed the VM, remember that */ 180
176 if (ib->vm && !ib->vm->last_flush) { 181 if (ib->vm)
177 ib->vm->last_flush = radeon_fence_ref(ib->fence); 182 radeon_vm_fence(rdev, ib->vm, ib->fence);
178 } 183
179 radeon_ring_unlock_commit(rdev, ring); 184 radeon_ring_unlock_commit(rdev, ring);
180 return 0; 185 return 0;
181} 186}
@@ -257,6 +262,7 @@ int radeon_ib_ring_tests(struct radeon_device *rdev)
257 r = radeon_ib_test(rdev, i, ring); 262 r = radeon_ib_test(rdev, i, ring);
258 if (r) { 263 if (r) {
259 ring->ready = false; 264 ring->ready = false;
265 rdev->needs_reset = false;
260 266
261 if (i == RADEON_RING_TYPE_GFX_INDEX) { 267 if (i == RADEON_RING_TYPE_GFX_INDEX) {
262 /* oh, oh, that's really bad */ 268 /* oh, oh, that's really bad */
@@ -342,13 +348,17 @@ bool radeon_ring_supports_scratch_reg(struct radeon_device *rdev,
342 */ 348 */
343void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *ring) 349void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *ring)
344{ 350{
345 ring->rptr = radeon_ring_get_rptr(rdev, ring); 351 uint32_t rptr = radeon_ring_get_rptr(rdev, ring);
352
346 /* This works because ring_size is a power of 2 */ 353 /* This works because ring_size is a power of 2 */
347 ring->ring_free_dw = (ring->rptr + (ring->ring_size / 4)); 354 ring->ring_free_dw = rptr + (ring->ring_size / 4);
348 ring->ring_free_dw -= ring->wptr; 355 ring->ring_free_dw -= ring->wptr;
349 ring->ring_free_dw &= ring->ptr_mask; 356 ring->ring_free_dw &= ring->ptr_mask;
350 if (!ring->ring_free_dw) { 357 if (!ring->ring_free_dw) {
358 /* this is an empty ring */
351 ring->ring_free_dw = ring->ring_size / 4; 359 ring->ring_free_dw = ring->ring_size / 4;
360 /* update lockup info to avoid false positive */
361 radeon_ring_lockup_update(rdev, ring);
352 } 362 }
353} 363}
354 364
@@ -372,19 +382,13 @@ int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsi
372 /* Align requested size with padding so unlock_commit can 382 /* Align requested size with padding so unlock_commit can
373 * pad safely */ 383 * pad safely */
374 radeon_ring_free_size(rdev, ring); 384 radeon_ring_free_size(rdev, ring);
375 if (ring->ring_free_dw == (ring->ring_size / 4)) {
376 /* This is an empty ring update lockup info to avoid
377 * false positive.
378 */
379 radeon_ring_lockup_update(ring);
380 }
381 ndw = (ndw + ring->align_mask) & ~ring->align_mask; 385 ndw = (ndw + ring->align_mask) & ~ring->align_mask;
382 while (ndw > (ring->ring_free_dw - 1)) { 386 while (ndw > (ring->ring_free_dw - 1)) {
383 radeon_ring_free_size(rdev, ring); 387 radeon_ring_free_size(rdev, ring);
384 if (ndw < ring->ring_free_dw) { 388 if (ndw < ring->ring_free_dw) {
385 break; 389 break;
386 } 390 }
387 r = radeon_fence_wait_next_locked(rdev, ring->idx); 391 r = radeon_fence_wait_next(rdev, ring->idx);
388 if (r) 392 if (r)
389 return r; 393 return r;
390 } 394 }
@@ -478,39 +482,17 @@ void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *rin
478} 482}
479 483
480/** 484/**
481 * radeon_ring_force_activity - add some nop packets to the ring
482 *
483 * @rdev: radeon_device pointer
484 * @ring: radeon_ring structure holding ring information
485 *
486 * Add some nop packets to the ring to force activity (all asics).
487 * Used for lockup detection to see if the rptr is advancing.
488 */
489void radeon_ring_force_activity(struct radeon_device *rdev, struct radeon_ring *ring)
490{
491 int r;
492
493 radeon_ring_free_size(rdev, ring);
494 if (ring->rptr == ring->wptr) {
495 r = radeon_ring_alloc(rdev, ring, 1);
496 if (!r) {
497 radeon_ring_write(ring, ring->nop);
498 radeon_ring_commit(rdev, ring);
499 }
500 }
501}
502
503/**
504 * radeon_ring_lockup_update - update lockup variables 485 * radeon_ring_lockup_update - update lockup variables
505 * 486 *
506 * @ring: radeon_ring structure holding ring information 487 * @ring: radeon_ring structure holding ring information
507 * 488 *
508 * Update the last rptr value and timestamp (all asics). 489 * Update the last rptr value and timestamp (all asics).
509 */ 490 */
510void radeon_ring_lockup_update(struct radeon_ring *ring) 491void radeon_ring_lockup_update(struct radeon_device *rdev,
492 struct radeon_ring *ring)
511{ 493{
512 ring->last_rptr = ring->rptr; 494 atomic_set(&ring->last_rptr, radeon_ring_get_rptr(rdev, ring));
513 ring->last_activity = jiffies; 495 atomic64_set(&ring->last_activity, jiffies_64);
514} 496}
515 497
516/** 498/**
@@ -518,40 +500,23 @@ void radeon_ring_lockup_update(struct radeon_ring *ring)
518 * @rdev: radeon device structure 500 * @rdev: radeon device structure
519 * @ring: radeon_ring structure holding ring information 501 * @ring: radeon_ring structure holding ring information
520 * 502 *
521 * We don't need to initialize the lockup tracking information as we will either 503 */
522 * have CP rptr to a different value of jiffies wrap around which will force
523 * initialization of the lockup tracking informations.
524 *
525 * A possible false positivie is if we get call after while and last_cp_rptr ==
526 * the current CP rptr, even if it's unlikely it might happen. To avoid this
527 * if the elapsed time since last call is bigger than 2 second than we return
528 * false and update the tracking information. Due to this the caller must call
529 * radeon_ring_test_lockup several time in less than 2sec for lockup to be reported
530 * the fencing code should be cautious about that.
531 *
532 * Caller should write to the ring to force CP to do something so we don't get
533 * false positive when CP is just gived nothing to do.
534 *
535 **/
536bool radeon_ring_test_lockup(struct radeon_device *rdev, struct radeon_ring *ring) 504bool radeon_ring_test_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
537{ 505{
538 unsigned long cjiffies, elapsed; 506 uint32_t rptr = radeon_ring_get_rptr(rdev, ring);
507 uint64_t last = atomic64_read(&ring->last_activity);
508 uint64_t elapsed;
539 509
540 cjiffies = jiffies; 510 if (rptr != atomic_read(&ring->last_rptr)) {
541 if (!time_after(cjiffies, ring->last_activity)) { 511 /* ring is still working, no lockup */
542 /* likely a wrap around */ 512 radeon_ring_lockup_update(rdev, ring);
543 radeon_ring_lockup_update(ring);
544 return false; 513 return false;
545 } 514 }
546 ring->rptr = radeon_ring_get_rptr(rdev, ring); 515
547 if (ring->rptr != ring->last_rptr) { 516 elapsed = jiffies_to_msecs(jiffies_64 - last);
548 /* CP is still working no lockup */
549 radeon_ring_lockup_update(ring);
550 return false;
551 }
552 elapsed = jiffies_to_msecs(cjiffies - ring->last_activity);
553 if (radeon_lockup_timeout && elapsed >= radeon_lockup_timeout) { 517 if (radeon_lockup_timeout && elapsed >= radeon_lockup_timeout) {
554 dev_err(rdev->dev, "GPU lockup CP stall for more than %lumsec\n", elapsed); 518 dev_err(rdev->dev, "ring %d stalled for more than %llumsec\n",
519 ring->idx, elapsed);
555 return true; 520 return true;
556 } 521 }
557 /* give a chance to the GPU ... */ 522 /* give a chance to the GPU ... */
@@ -709,7 +674,7 @@ int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsig
709 if (radeon_debugfs_ring_init(rdev, ring)) { 674 if (radeon_debugfs_ring_init(rdev, ring)) {
710 DRM_ERROR("Failed to register debugfs file for rings !\n"); 675 DRM_ERROR("Failed to register debugfs file for rings !\n");
711 } 676 }
712 radeon_ring_lockup_update(ring); 677 radeon_ring_lockup_update(rdev, ring);
713 return 0; 678 return 0;
714} 679}
715 680
@@ -780,8 +745,6 @@ static int radeon_debugfs_ring_info(struct seq_file *m, void *data)
780 745
781 seq_printf(m, "driver's copy of the wptr: 0x%08x [%5d]\n", 746 seq_printf(m, "driver's copy of the wptr: 0x%08x [%5d]\n",
782 ring->wptr, ring->wptr); 747 ring->wptr, ring->wptr);
783 seq_printf(m, "driver's copy of the rptr: 0x%08x [%5d]\n",
784 ring->rptr, ring->rptr);
785 seq_printf(m, "last semaphore signal addr : 0x%016llx\n", 748 seq_printf(m, "last semaphore signal addr : 0x%016llx\n",
786 ring->last_semaphore_signal_addr); 749 ring->last_semaphore_signal_addr);
787 seq_printf(m, "last semaphore wait addr : 0x%016llx\n", 750 seq_printf(m, "last semaphore wait addr : 0x%016llx\n",
@@ -814,6 +777,8 @@ static int cayman_cp2_index = CAYMAN_RING_TYPE_CP2_INDEX;
814static int radeon_dma1_index = R600_RING_TYPE_DMA_INDEX; 777static int radeon_dma1_index = R600_RING_TYPE_DMA_INDEX;
815static int radeon_dma2_index = CAYMAN_RING_TYPE_DMA1_INDEX; 778static int radeon_dma2_index = CAYMAN_RING_TYPE_DMA1_INDEX;
816static int r600_uvd_index = R600_RING_TYPE_UVD_INDEX; 779static int r600_uvd_index = R600_RING_TYPE_UVD_INDEX;
780static int si_vce1_index = TN_RING_TYPE_VCE1_INDEX;
781static int si_vce2_index = TN_RING_TYPE_VCE2_INDEX;
817 782
818static struct drm_info_list radeon_debugfs_ring_info_list[] = { 783static struct drm_info_list radeon_debugfs_ring_info_list[] = {
819 {"radeon_ring_gfx", radeon_debugfs_ring_info, 0, &radeon_gfx_index}, 784 {"radeon_ring_gfx", radeon_debugfs_ring_info, 0, &radeon_gfx_index},
@@ -822,6 +787,8 @@ static struct drm_info_list radeon_debugfs_ring_info_list[] = {
822 {"radeon_ring_dma1", radeon_debugfs_ring_info, 0, &radeon_dma1_index}, 787 {"radeon_ring_dma1", radeon_debugfs_ring_info, 0, &radeon_dma1_index},
823 {"radeon_ring_dma2", radeon_debugfs_ring_info, 0, &radeon_dma2_index}, 788 {"radeon_ring_dma2", radeon_debugfs_ring_info, 0, &radeon_dma2_index},
824 {"radeon_ring_uvd", radeon_debugfs_ring_info, 0, &r600_uvd_index}, 789 {"radeon_ring_uvd", radeon_debugfs_ring_info, 0, &r600_uvd_index},
790 {"radeon_ring_vce1", radeon_debugfs_ring_info, 0, &si_vce1_index},
791 {"radeon_ring_vce2", radeon_debugfs_ring_info, 0, &si_vce2_index},
825}; 792};
826 793
827static int radeon_debugfs_sa_info(struct seq_file *m, void *data) 794static int radeon_debugfs_sa_info(struct seq_file *m, void *data)
diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c
index c0625805cdd7..adcf3e2f07da 100644
--- a/drivers/gpu/drm/radeon/radeon_sa.c
+++ b/drivers/gpu/drm/radeon/radeon_sa.c
@@ -312,7 +312,7 @@ static bool radeon_sa_bo_next_hole(struct radeon_sa_manager *sa_manager,
312int radeon_sa_bo_new(struct radeon_device *rdev, 312int radeon_sa_bo_new(struct radeon_device *rdev,
313 struct radeon_sa_manager *sa_manager, 313 struct radeon_sa_manager *sa_manager,
314 struct radeon_sa_bo **sa_bo, 314 struct radeon_sa_bo **sa_bo,
315 unsigned size, unsigned align, bool block) 315 unsigned size, unsigned align)
316{ 316{
317 struct radeon_fence *fences[RADEON_NUM_RINGS]; 317 struct radeon_fence *fences[RADEON_NUM_RINGS];
318 unsigned tries[RADEON_NUM_RINGS]; 318 unsigned tries[RADEON_NUM_RINGS];
@@ -353,14 +353,11 @@ int radeon_sa_bo_new(struct radeon_device *rdev,
353 r = radeon_fence_wait_any(rdev, fences, false); 353 r = radeon_fence_wait_any(rdev, fences, false);
354 spin_lock(&sa_manager->wq.lock); 354 spin_lock(&sa_manager->wq.lock);
355 /* if we have nothing to wait for block */ 355 /* if we have nothing to wait for block */
356 if (r == -ENOENT && block) { 356 if (r == -ENOENT) {
357 r = wait_event_interruptible_locked( 357 r = wait_event_interruptible_locked(
358 sa_manager->wq, 358 sa_manager->wq,
359 radeon_sa_event(sa_manager, size, align) 359 radeon_sa_event(sa_manager, size, align)
360 ); 360 );
361
362 } else if (r == -ENOENT) {
363 r = -ENOMEM;
364 } 361 }
365 362
366 } while (!r); 363 } while (!r);
diff --git a/drivers/gpu/drm/radeon/radeon_semaphore.c b/drivers/gpu/drm/radeon/radeon_semaphore.c
index 9006b32d5eed..dbd6bcde92de 100644
--- a/drivers/gpu/drm/radeon/radeon_semaphore.c
+++ b/drivers/gpu/drm/radeon/radeon_semaphore.c
@@ -42,7 +42,7 @@ int radeon_semaphore_create(struct radeon_device *rdev,
42 return -ENOMEM; 42 return -ENOMEM;
43 } 43 }
44 r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &(*semaphore)->sa_bo, 44 r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &(*semaphore)->sa_bo,
45 8 * RADEON_NUM_SYNCS, 8, true); 45 8 * RADEON_NUM_SYNCS, 8);
46 if (r) { 46 if (r) {
47 kfree(*semaphore); 47 kfree(*semaphore);
48 *semaphore = NULL; 48 *semaphore = NULL;
@@ -147,7 +147,9 @@ int radeon_semaphore_sync_rings(struct radeon_device *rdev,
147 147
148 if (++count > RADEON_NUM_SYNCS) { 148 if (++count > RADEON_NUM_SYNCS) {
149 /* not enough room, wait manually */ 149 /* not enough room, wait manually */
150 radeon_fence_wait_locked(fence); 150 r = radeon_fence_wait(fence, false);
151 if (r)
152 return r;
151 continue; 153 continue;
152 } 154 }
153 155
@@ -161,7 +163,9 @@ int radeon_semaphore_sync_rings(struct radeon_device *rdev,
161 if (!radeon_semaphore_emit_signal(rdev, i, semaphore)) { 163 if (!radeon_semaphore_emit_signal(rdev, i, semaphore)) {
162 /* signaling wasn't successful wait manually */ 164 /* signaling wasn't successful wait manually */
163 radeon_ring_undo(&rdev->ring[i]); 165 radeon_ring_undo(&rdev->ring[i]);
164 radeon_fence_wait_locked(fence); 166 r = radeon_fence_wait(fence, false);
167 if (r)
168 return r;
165 continue; 169 continue;
166 } 170 }
167 171
@@ -169,7 +173,9 @@ int radeon_semaphore_sync_rings(struct radeon_device *rdev,
169 if (!radeon_semaphore_emit_wait(rdev, ring, semaphore)) { 173 if (!radeon_semaphore_emit_wait(rdev, ring, semaphore)) {
170 /* waiting wasn't successful wait manually */ 174 /* waiting wasn't successful wait manually */
171 radeon_ring_undo(&rdev->ring[i]); 175 radeon_ring_undo(&rdev->ring[i]);
172 radeon_fence_wait_locked(fence); 176 r = radeon_fence_wait(fence, false);
177 if (r)
178 return r;
173 continue; 179 continue;
174 } 180 }
175 181
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index 12e8099a0823..3a13e0d1055c 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -257,20 +257,36 @@ static int radeon_test_create_and_emit_fence(struct radeon_device *rdev,
257 struct radeon_ring *ring, 257 struct radeon_ring *ring,
258 struct radeon_fence **fence) 258 struct radeon_fence **fence)
259{ 259{
260 uint32_t handle = ring->idx ^ 0xdeafbeef;
260 int r; 261 int r;
261 262
262 if (ring->idx == R600_RING_TYPE_UVD_INDEX) { 263 if (ring->idx == R600_RING_TYPE_UVD_INDEX) {
263 r = radeon_uvd_get_create_msg(rdev, ring->idx, 1, NULL); 264 r = radeon_uvd_get_create_msg(rdev, ring->idx, handle, NULL);
264 if (r) { 265 if (r) {
265 DRM_ERROR("Failed to get dummy create msg\n"); 266 DRM_ERROR("Failed to get dummy create msg\n");
266 return r; 267 return r;
267 } 268 }
268 269
269 r = radeon_uvd_get_destroy_msg(rdev, ring->idx, 1, fence); 270 r = radeon_uvd_get_destroy_msg(rdev, ring->idx, handle, fence);
270 if (r) { 271 if (r) {
271 DRM_ERROR("Failed to get dummy destroy msg\n"); 272 DRM_ERROR("Failed to get dummy destroy msg\n");
272 return r; 273 return r;
273 } 274 }
275
276 } else if (ring->idx == TN_RING_TYPE_VCE1_INDEX ||
277 ring->idx == TN_RING_TYPE_VCE2_INDEX) {
278 r = radeon_vce_get_create_msg(rdev, ring->idx, handle, NULL);
279 if (r) {
280 DRM_ERROR("Failed to get dummy create msg\n");
281 return r;
282 }
283
284 r = radeon_vce_get_destroy_msg(rdev, ring->idx, handle, fence);
285 if (r) {
286 DRM_ERROR("Failed to get dummy destroy msg\n");
287 return r;
288 }
289
274 } else { 290 } else {
275 r = radeon_ring_lock(rdev, ring, 64); 291 r = radeon_ring_lock(rdev, ring, 64);
276 if (r) { 292 if (r) {
@@ -486,6 +502,16 @@ out_cleanup:
486 printk(KERN_WARNING "Error while testing ring sync (%d).\n", r); 502 printk(KERN_WARNING "Error while testing ring sync (%d).\n", r);
487} 503}
488 504
505static bool radeon_test_sync_possible(struct radeon_ring *ringA,
506 struct radeon_ring *ringB)
507{
508 if (ringA->idx == TN_RING_TYPE_VCE2_INDEX &&
509 ringB->idx == TN_RING_TYPE_VCE1_INDEX)
510 return false;
511
512 return true;
513}
514
489void radeon_test_syncing(struct radeon_device *rdev) 515void radeon_test_syncing(struct radeon_device *rdev)
490{ 516{
491 int i, j, k; 517 int i, j, k;
@@ -500,6 +526,9 @@ void radeon_test_syncing(struct radeon_device *rdev)
500 if (!ringB->ready) 526 if (!ringB->ready)
501 continue; 527 continue;
502 528
529 if (!radeon_test_sync_possible(ringA, ringB))
530 continue;
531
503 DRM_INFO("Testing syncing between rings %d and %d...\n", i, j); 532 DRM_INFO("Testing syncing between rings %d and %d...\n", i, j);
504 radeon_test_ring_sync(rdev, ringA, ringB); 533 radeon_test_ring_sync(rdev, ringA, ringB);
505 534
@@ -511,6 +540,12 @@ void radeon_test_syncing(struct radeon_device *rdev)
511 if (!ringC->ready) 540 if (!ringC->ready)
512 continue; 541 continue;
513 542
543 if (!radeon_test_sync_possible(ringA, ringC))
544 continue;
545
546 if (!radeon_test_sync_possible(ringB, ringC))
547 continue;
548
514 DRM_INFO("Testing syncing between rings %d, %d and %d...\n", i, j, k); 549 DRM_INFO("Testing syncing between rings %d, %d and %d...\n", i, j, k);
515 radeon_test_ring_sync2(rdev, ringA, ringB, ringC); 550 radeon_test_ring_sync2(rdev, ringA, ringB, ringC);
516 551
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 040a2a10ea17..c8a8a5144ec1 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -406,8 +406,14 @@ static int radeon_bo_move(struct ttm_buffer_object *bo,
406 if (r) { 406 if (r) {
407memcpy: 407memcpy:
408 r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem); 408 r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
409 if (r) {
410 return r;
411 }
409 } 412 }
410 return r; 413
414 /* update statistics */
415 atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &rdev->num_bytes_moved);
416 return 0;
411} 417}
412 418
413static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) 419static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
@@ -701,7 +707,9 @@ int radeon_ttm_init(struct radeon_device *rdev)
701 /* No others user of address space so set it to 0 */ 707 /* No others user of address space so set it to 0 */
702 r = ttm_bo_device_init(&rdev->mman.bdev, 708 r = ttm_bo_device_init(&rdev->mman.bdev,
703 rdev->mman.bo_global_ref.ref.object, 709 rdev->mman.bo_global_ref.ref.object,
704 &radeon_bo_driver, DRM_FILE_PAGE_OFFSET, 710 &radeon_bo_driver,
711 rdev->ddev->anon_inode->i_mapping,
712 DRM_FILE_PAGE_OFFSET,
705 rdev->need_dma32); 713 rdev->need_dma32);
706 if (r) { 714 if (r) {
707 DRM_ERROR("failed initializing buffer object driver(%d).\n", r); 715 DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
@@ -742,7 +750,6 @@ int radeon_ttm_init(struct radeon_device *rdev)
742 } 750 }
743 DRM_INFO("radeon: %uM of GTT memory ready.\n", 751 DRM_INFO("radeon: %uM of GTT memory ready.\n",
744 (unsigned)(rdev->mc.gtt_size / (1024 * 1024))); 752 (unsigned)(rdev->mc.gtt_size / (1024 * 1024)));
745 rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping;
746 753
747 r = radeon_ttm_debugfs_init(rdev); 754 r = radeon_ttm_debugfs_init(rdev);
748 if (r) { 755 if (r) {
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 3e6804b2b2ef..5748bdaeacce 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -455,7 +455,7 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
455 } 455 }
456 456
457 reloc = p->relocs_ptr[(idx / 4)]; 457 reloc = p->relocs_ptr[(idx / 4)];
458 start = reloc->lobj.gpu_offset; 458 start = reloc->gpu_offset;
459 end = start + radeon_bo_size(reloc->robj); 459 end = start + radeon_bo_size(reloc->robj);
460 start += offset; 460 start += offset;
461 461
@@ -807,8 +807,7 @@ void radeon_uvd_note_usage(struct radeon_device *rdev)
807 (rdev->pm.dpm.hd != hd)) { 807 (rdev->pm.dpm.hd != hd)) {
808 rdev->pm.dpm.sd = sd; 808 rdev->pm.dpm.sd = sd;
809 rdev->pm.dpm.hd = hd; 809 rdev->pm.dpm.hd = hd;
810 /* disable this for now */ 810 streams_changed = true;
811 /*streams_changed = true;*/
812 } 811 }
813 } 812 }
814 813
diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c
new file mode 100644
index 000000000000..76e9904bc537
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_vce.c
@@ -0,0 +1,699 @@
1/*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 * Authors: Christian König <christian.koenig@amd.com>
26 */
27
28#include <linux/firmware.h>
29#include <linux/module.h>
30#include <drm/drmP.h>
31#include <drm/drm.h>
32
33#include "radeon.h"
34#include "radeon_asic.h"
35#include "sid.h"
36
37/* 1 second timeout */
38#define VCE_IDLE_TIMEOUT_MS 1000
39
40/* Firmware Names */
41#define FIRMWARE_BONAIRE "radeon/BONAIRE_vce.bin"
42
43MODULE_FIRMWARE(FIRMWARE_BONAIRE);
44
45static void radeon_vce_idle_work_handler(struct work_struct *work);
46
47/**
48 * radeon_vce_init - allocate memory, load vce firmware
49 *
50 * @rdev: radeon_device pointer
51 *
52 * First step to get VCE online, allocate memory and load the firmware
53 */
54int radeon_vce_init(struct radeon_device *rdev)
55{
56 static const char *fw_version = "[ATI LIB=VCEFW,";
57 static const char *fb_version = "[ATI LIB=VCEFWSTATS,";
58 unsigned long size;
59 const char *fw_name, *c;
60 uint8_t start, mid, end;
61 int i, r;
62
63 INIT_DELAYED_WORK(&rdev->vce.idle_work, radeon_vce_idle_work_handler);
64
65 switch (rdev->family) {
66 case CHIP_BONAIRE:
67 case CHIP_KAVERI:
68 case CHIP_KABINI:
69 fw_name = FIRMWARE_BONAIRE;
70 break;
71
72 default:
73 return -EINVAL;
74 }
75
76 r = request_firmware(&rdev->vce_fw, fw_name, rdev->dev);
77 if (r) {
78 dev_err(rdev->dev, "radeon_vce: Can't load firmware \"%s\"\n",
79 fw_name);
80 return r;
81 }
82
83 /* search for firmware version */
84
85 size = rdev->vce_fw->size - strlen(fw_version) - 9;
86 c = rdev->vce_fw->data;
87 for (;size > 0; --size, ++c)
88 if (strncmp(c, fw_version, strlen(fw_version)) == 0)
89 break;
90
91 if (size == 0)
92 return -EINVAL;
93
94 c += strlen(fw_version);
95 if (sscanf(c, "%2hhd.%2hhd.%2hhd]", &start, &mid, &end) != 3)
96 return -EINVAL;
97
98 /* search for feedback version */
99
100 size = rdev->vce_fw->size - strlen(fb_version) - 3;
101 c = rdev->vce_fw->data;
102 for (;size > 0; --size, ++c)
103 if (strncmp(c, fb_version, strlen(fb_version)) == 0)
104 break;
105
106 if (size == 0)
107 return -EINVAL;
108
109 c += strlen(fb_version);
110 if (sscanf(c, "%2u]", &rdev->vce.fb_version) != 1)
111 return -EINVAL;
112
113 DRM_INFO("Found VCE firmware/feedback version %hhd.%hhd.%hhd / %d!\n",
114 start, mid, end, rdev->vce.fb_version);
115
116 rdev->vce.fw_version = (start << 24) | (mid << 16) | (end << 8);
117
118 /* we can only work with this fw version for now */
119 if (rdev->vce.fw_version != ((40 << 24) | (2 << 16) | (2 << 8)))
120 return -EINVAL;
121
122 /* allocate firmware, stack and heap BO */
123
124 size = RADEON_GPU_PAGE_ALIGN(rdev->vce_fw->size) +
125 RADEON_VCE_STACK_SIZE + RADEON_VCE_HEAP_SIZE;
126 r = radeon_bo_create(rdev, size, PAGE_SIZE, true,
127 RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->vce.vcpu_bo);
128 if (r) {
129 dev_err(rdev->dev, "(%d) failed to allocate VCE bo\n", r);
130 return r;
131 }
132
133 r = radeon_bo_reserve(rdev->vce.vcpu_bo, false);
134 if (r) {
135 radeon_bo_unref(&rdev->vce.vcpu_bo);
136 dev_err(rdev->dev, "(%d) failed to reserve VCE bo\n", r);
137 return r;
138 }
139
140 r = radeon_bo_pin(rdev->vce.vcpu_bo, RADEON_GEM_DOMAIN_VRAM,
141 &rdev->vce.gpu_addr);
142 radeon_bo_unreserve(rdev->vce.vcpu_bo);
143 if (r) {
144 radeon_bo_unref(&rdev->vce.vcpu_bo);
145 dev_err(rdev->dev, "(%d) VCE bo pin failed\n", r);
146 return r;
147 }
148
149 for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) {
150 atomic_set(&rdev->vce.handles[i], 0);
151 rdev->vce.filp[i] = NULL;
152 }
153
154 return 0;
155}
156
157/**
158 * radeon_vce_fini - free memory
159 *
160 * @rdev: radeon_device pointer
161 *
162 * Last step on VCE teardown, free firmware memory
163 */
164void radeon_vce_fini(struct radeon_device *rdev)
165{
166 if (rdev->vce.vcpu_bo == NULL)
167 return;
168
169 radeon_bo_unref(&rdev->vce.vcpu_bo);
170
171 release_firmware(rdev->vce_fw);
172}
173
174/**
175 * radeon_vce_suspend - unpin VCE fw memory
176 *
177 * @rdev: radeon_device pointer
178 *
179 */
180int radeon_vce_suspend(struct radeon_device *rdev)
181{
182 int i;
183
184 if (rdev->vce.vcpu_bo == NULL)
185 return 0;
186
187 for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i)
188 if (atomic_read(&rdev->vce.handles[i]))
189 break;
190
191 if (i == RADEON_MAX_VCE_HANDLES)
192 return 0;
193
194 /* TODO: suspending running encoding sessions isn't supported */
195 return -EINVAL;
196}
197
198/**
199 * radeon_vce_resume - pin VCE fw memory
200 *
201 * @rdev: radeon_device pointer
202 *
203 */
204int radeon_vce_resume(struct radeon_device *rdev)
205{
206 void *cpu_addr;
207 int r;
208
209 if (rdev->vce.vcpu_bo == NULL)
210 return -EINVAL;
211
212 r = radeon_bo_reserve(rdev->vce.vcpu_bo, false);
213 if (r) {
214 dev_err(rdev->dev, "(%d) failed to reserve VCE bo\n", r);
215 return r;
216 }
217
218 r = radeon_bo_kmap(rdev->vce.vcpu_bo, &cpu_addr);
219 if (r) {
220 radeon_bo_unreserve(rdev->vce.vcpu_bo);
221 dev_err(rdev->dev, "(%d) VCE map failed\n", r);
222 return r;
223 }
224
225 memcpy(cpu_addr, rdev->vce_fw->data, rdev->vce_fw->size);
226
227 radeon_bo_kunmap(rdev->vce.vcpu_bo);
228
229 radeon_bo_unreserve(rdev->vce.vcpu_bo);
230
231 return 0;
232}
233
234/**
235 * radeon_vce_idle_work_handler - power off VCE
236 *
237 * @work: pointer to work structure
238 *
239 * power of VCE when it's not used any more
240 */
241static void radeon_vce_idle_work_handler(struct work_struct *work)
242{
243 struct radeon_device *rdev =
244 container_of(work, struct radeon_device, vce.idle_work.work);
245
246 if ((radeon_fence_count_emitted(rdev, TN_RING_TYPE_VCE1_INDEX) == 0) &&
247 (radeon_fence_count_emitted(rdev, TN_RING_TYPE_VCE2_INDEX) == 0)) {
248 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
249 radeon_dpm_enable_vce(rdev, false);
250 } else {
251 radeon_set_vce_clocks(rdev, 0, 0);
252 }
253 } else {
254 schedule_delayed_work(&rdev->vce.idle_work,
255 msecs_to_jiffies(VCE_IDLE_TIMEOUT_MS));
256 }
257}
258
259/**
260 * radeon_vce_note_usage - power up VCE
261 *
262 * @rdev: radeon_device pointer
263 *
264 * Make sure VCE is powerd up when we want to use it
265 */
266void radeon_vce_note_usage(struct radeon_device *rdev)
267{
268 bool streams_changed = false;
269 bool set_clocks = !cancel_delayed_work_sync(&rdev->vce.idle_work);
270 set_clocks &= schedule_delayed_work(&rdev->vce.idle_work,
271 msecs_to_jiffies(VCE_IDLE_TIMEOUT_MS));
272
273 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
274 /* XXX figure out if the streams changed */
275 streams_changed = false;
276 }
277
278 if (set_clocks || streams_changed) {
279 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
280 radeon_dpm_enable_vce(rdev, true);
281 } else {
282 radeon_set_vce_clocks(rdev, 53300, 40000);
283 }
284 }
285}
286
287/**
288 * radeon_vce_free_handles - free still open VCE handles
289 *
290 * @rdev: radeon_device pointer
291 * @filp: drm file pointer
292 *
293 * Close all VCE handles still open by this file pointer
294 */
295void radeon_vce_free_handles(struct radeon_device *rdev, struct drm_file *filp)
296{
297 int i, r;
298 for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) {
299 uint32_t handle = atomic_read(&rdev->vce.handles[i]);
300 if (!handle || rdev->vce.filp[i] != filp)
301 continue;
302
303 radeon_vce_note_usage(rdev);
304
305 r = radeon_vce_get_destroy_msg(rdev, TN_RING_TYPE_VCE1_INDEX,
306 handle, NULL);
307 if (r)
308 DRM_ERROR("Error destroying VCE handle (%d)!\n", r);
309
310 rdev->vce.filp[i] = NULL;
311 atomic_set(&rdev->vce.handles[i], 0);
312 }
313}
314
315/**
316 * radeon_vce_get_create_msg - generate a VCE create msg
317 *
318 * @rdev: radeon_device pointer
319 * @ring: ring we should submit the msg to
320 * @handle: VCE session handle to use
321 * @fence: optional fence to return
322 *
323 * Open up a stream for HW test
324 */
325int radeon_vce_get_create_msg(struct radeon_device *rdev, int ring,
326 uint32_t handle, struct radeon_fence **fence)
327{
328 const unsigned ib_size_dw = 1024;
329 struct radeon_ib ib;
330 uint64_t dummy;
331 int i, r;
332
333 r = radeon_ib_get(rdev, ring, &ib, NULL, ib_size_dw * 4);
334 if (r) {
335 DRM_ERROR("radeon: failed to get ib (%d).\n", r);
336 return r;
337 }
338
339 dummy = ib.gpu_addr + 1024;
340
341 /* stitch together an VCE create msg */
342 ib.length_dw = 0;
343 ib.ptr[ib.length_dw++] = 0x0000000c; /* len */
344 ib.ptr[ib.length_dw++] = 0x00000001; /* session cmd */
345 ib.ptr[ib.length_dw++] = handle;
346
347 ib.ptr[ib.length_dw++] = 0x00000030; /* len */
348 ib.ptr[ib.length_dw++] = 0x01000001; /* create cmd */
349 ib.ptr[ib.length_dw++] = 0x00000000;
350 ib.ptr[ib.length_dw++] = 0x00000042;
351 ib.ptr[ib.length_dw++] = 0x0000000a;
352 ib.ptr[ib.length_dw++] = 0x00000001;
353 ib.ptr[ib.length_dw++] = 0x00000080;
354 ib.ptr[ib.length_dw++] = 0x00000060;
355 ib.ptr[ib.length_dw++] = 0x00000100;
356 ib.ptr[ib.length_dw++] = 0x00000100;
357 ib.ptr[ib.length_dw++] = 0x0000000c;
358 ib.ptr[ib.length_dw++] = 0x00000000;
359
360 ib.ptr[ib.length_dw++] = 0x00000014; /* len */
361 ib.ptr[ib.length_dw++] = 0x05000005; /* feedback buffer */
362 ib.ptr[ib.length_dw++] = upper_32_bits(dummy);
363 ib.ptr[ib.length_dw++] = dummy;
364 ib.ptr[ib.length_dw++] = 0x00000001;
365
366 for (i = ib.length_dw; i < ib_size_dw; ++i)
367 ib.ptr[i] = 0x0;
368
369 r = radeon_ib_schedule(rdev, &ib, NULL);
370 if (r) {
371 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
372 }
373
374 if (fence)
375 *fence = radeon_fence_ref(ib.fence);
376
377 radeon_ib_free(rdev, &ib);
378
379 return r;
380}
381
382/**
383 * radeon_vce_get_destroy_msg - generate a VCE destroy msg
384 *
385 * @rdev: radeon_device pointer
386 * @ring: ring we should submit the msg to
387 * @handle: VCE session handle to use
388 * @fence: optional fence to return
389 *
390 * Close up a stream for HW test or if userspace failed to do so
391 */
392int radeon_vce_get_destroy_msg(struct radeon_device *rdev, int ring,
393 uint32_t handle, struct radeon_fence **fence)
394{
395 const unsigned ib_size_dw = 1024;
396 struct radeon_ib ib;
397 uint64_t dummy;
398 int i, r;
399
400 r = radeon_ib_get(rdev, ring, &ib, NULL, ib_size_dw * 4);
401 if (r) {
402 DRM_ERROR("radeon: failed to get ib (%d).\n", r);
403 return r;
404 }
405
406 dummy = ib.gpu_addr + 1024;
407
408 /* stitch together an VCE destroy msg */
409 ib.length_dw = 0;
410 ib.ptr[ib.length_dw++] = 0x0000000c; /* len */
411 ib.ptr[ib.length_dw++] = 0x00000001; /* session cmd */
412 ib.ptr[ib.length_dw++] = handle;
413
414 ib.ptr[ib.length_dw++] = 0x00000014; /* len */
415 ib.ptr[ib.length_dw++] = 0x05000005; /* feedback buffer */
416 ib.ptr[ib.length_dw++] = upper_32_bits(dummy);
417 ib.ptr[ib.length_dw++] = dummy;
418 ib.ptr[ib.length_dw++] = 0x00000001;
419
420 ib.ptr[ib.length_dw++] = 0x00000008; /* len */
421 ib.ptr[ib.length_dw++] = 0x02000001; /* destroy cmd */
422
423 for (i = ib.length_dw; i < ib_size_dw; ++i)
424 ib.ptr[i] = 0x0;
425
426 r = radeon_ib_schedule(rdev, &ib, NULL);
427 if (r) {
428 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
429 }
430
431 if (fence)
432 *fence = radeon_fence_ref(ib.fence);
433
434 radeon_ib_free(rdev, &ib);
435
436 return r;
437}
438
439/**
440 * radeon_vce_cs_reloc - command submission relocation
441 *
442 * @p: parser context
443 * @lo: address of lower dword
444 * @hi: address of higher dword
445 *
446 * Patch relocation inside command stream with real buffer address
447 */
448int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi)
449{
450 struct radeon_cs_chunk *relocs_chunk;
451 uint64_t offset;
452 unsigned idx;
453
454 relocs_chunk = &p->chunks[p->chunk_relocs_idx];
455 offset = radeon_get_ib_value(p, lo);
456 idx = radeon_get_ib_value(p, hi);
457
458 if (idx >= relocs_chunk->length_dw) {
459 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
460 idx, relocs_chunk->length_dw);
461 return -EINVAL;
462 }
463
464 offset += p->relocs_ptr[(idx / 4)]->gpu_offset;
465
466 p->ib.ptr[lo] = offset & 0xFFFFFFFF;
467 p->ib.ptr[hi] = offset >> 32;
468
469 return 0;
470}
471
472/**
473 * radeon_vce_cs_parse - parse and validate the command stream
474 *
475 * @p: parser context
476 *
477 */
478int radeon_vce_cs_parse(struct radeon_cs_parser *p)
479{
480 uint32_t handle = 0;
481 bool destroy = false;
482 int i, r;
483
484 while (p->idx < p->chunks[p->chunk_ib_idx].length_dw) {
485 uint32_t len = radeon_get_ib_value(p, p->idx);
486 uint32_t cmd = radeon_get_ib_value(p, p->idx + 1);
487
488 if ((len < 8) || (len & 3)) {
489 DRM_ERROR("invalid VCE command length (%d)!\n", len);
490 return -EINVAL;
491 }
492
493 switch (cmd) {
494 case 0x00000001: // session
495 handle = radeon_get_ib_value(p, p->idx + 2);
496 break;
497
498 case 0x00000002: // task info
499 case 0x01000001: // create
500 case 0x04000001: // config extension
501 case 0x04000002: // pic control
502 case 0x04000005: // rate control
503 case 0x04000007: // motion estimation
504 case 0x04000008: // rdo
505 break;
506
507 case 0x03000001: // encode
508 r = radeon_vce_cs_reloc(p, p->idx + 10, p->idx + 9);
509 if (r)
510 return r;
511
512 r = radeon_vce_cs_reloc(p, p->idx + 12, p->idx + 11);
513 if (r)
514 return r;
515 break;
516
517 case 0x02000001: // destroy
518 destroy = true;
519 break;
520
521 case 0x05000001: // context buffer
522 case 0x05000004: // video bitstream buffer
523 case 0x05000005: // feedback buffer
524 r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2);
525 if (r)
526 return r;
527 break;
528
529 default:
530 DRM_ERROR("invalid VCE command (0x%x)!\n", cmd);
531 return -EINVAL;
532 }
533
534 p->idx += len / 4;
535 }
536
537 if (destroy) {
538 /* IB contains a destroy msg, free the handle */
539 for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i)
540 atomic_cmpxchg(&p->rdev->vce.handles[i], handle, 0);
541
542 return 0;
543 }
544
545 /* create or encode, validate the handle */
546 for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) {
547 if (atomic_read(&p->rdev->vce.handles[i]) == handle)
548 return 0;
549 }
550
551 /* handle not found try to alloc a new one */
552 for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) {
553 if (!atomic_cmpxchg(&p->rdev->vce.handles[i], 0, handle)) {
554 p->rdev->vce.filp[i] = p->filp;
555 return 0;
556 }
557 }
558
559 DRM_ERROR("No more free VCE handles!\n");
560 return -EINVAL;
561}
562
563/**
564 * radeon_vce_semaphore_emit - emit a semaphore command
565 *
566 * @rdev: radeon_device pointer
567 * @ring: engine to use
568 * @semaphore: address of semaphore
569 * @emit_wait: true=emit wait, false=emit signal
570 *
571 */
572bool radeon_vce_semaphore_emit(struct radeon_device *rdev,
573 struct radeon_ring *ring,
574 struct radeon_semaphore *semaphore,
575 bool emit_wait)
576{
577 uint64_t addr = semaphore->gpu_addr;
578
579 radeon_ring_write(ring, VCE_CMD_SEMAPHORE);
580 radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF);
581 radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF);
582 radeon_ring_write(ring, 0x01003000 | (emit_wait ? 1 : 0));
583 if (!emit_wait)
584 radeon_ring_write(ring, VCE_CMD_END);
585
586 return true;
587}
588
589/**
590 * radeon_vce_ib_execute - execute indirect buffer
591 *
592 * @rdev: radeon_device pointer
593 * @ib: the IB to execute
594 *
595 */
596void radeon_vce_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
597{
598 struct radeon_ring *ring = &rdev->ring[ib->ring];
599 radeon_ring_write(ring, VCE_CMD_IB);
600 radeon_ring_write(ring, ib->gpu_addr);
601 radeon_ring_write(ring, upper_32_bits(ib->gpu_addr));
602 radeon_ring_write(ring, ib->length_dw);
603}
604
605/**
606 * radeon_vce_fence_emit - add a fence command to the ring
607 *
608 * @rdev: radeon_device pointer
609 * @fence: the fence
610 *
611 */
612void radeon_vce_fence_emit(struct radeon_device *rdev,
613 struct radeon_fence *fence)
614{
615 struct radeon_ring *ring = &rdev->ring[fence->ring];
616 uint32_t addr = rdev->fence_drv[fence->ring].gpu_addr;
617
618 radeon_ring_write(ring, VCE_CMD_FENCE);
619 radeon_ring_write(ring, addr);
620 radeon_ring_write(ring, upper_32_bits(addr));
621 radeon_ring_write(ring, fence->seq);
622 radeon_ring_write(ring, VCE_CMD_TRAP);
623 radeon_ring_write(ring, VCE_CMD_END);
624}
625
626/**
627 * radeon_vce_ring_test - test if VCE ring is working
628 *
629 * @rdev: radeon_device pointer
630 * @ring: the engine to test on
631 *
632 */
633int radeon_vce_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
634{
635 uint32_t rptr = vce_v1_0_get_rptr(rdev, ring);
636 unsigned i;
637 int r;
638
639 r = radeon_ring_lock(rdev, ring, 16);
640 if (r) {
641 DRM_ERROR("radeon: vce failed to lock ring %d (%d).\n",
642 ring->idx, r);
643 return r;
644 }
645 radeon_ring_write(ring, VCE_CMD_END);
646 radeon_ring_unlock_commit(rdev, ring);
647
648 for (i = 0; i < rdev->usec_timeout; i++) {
649 if (vce_v1_0_get_rptr(rdev, ring) != rptr)
650 break;
651 DRM_UDELAY(1);
652 }
653
654 if (i < rdev->usec_timeout) {
655 DRM_INFO("ring test on %d succeeded in %d usecs\n",
656 ring->idx, i);
657 } else {
658 DRM_ERROR("radeon: ring %d test failed\n",
659 ring->idx);
660 r = -ETIMEDOUT;
661 }
662
663 return r;
664}
665
666/**
667 * radeon_vce_ib_test - test if VCE IBs are working
668 *
669 * @rdev: radeon_device pointer
670 * @ring: the engine to test on
671 *
672 */
673int radeon_vce_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
674{
675 struct radeon_fence *fence = NULL;
676 int r;
677
678 r = radeon_vce_get_create_msg(rdev, ring->idx, 1, NULL);
679 if (r) {
680 DRM_ERROR("radeon: failed to get create msg (%d).\n", r);
681 goto error;
682 }
683
684 r = radeon_vce_get_destroy_msg(rdev, ring->idx, 1, &fence);
685 if (r) {
686 DRM_ERROR("radeon: failed to get destroy ib (%d).\n", r);
687 goto error;
688 }
689
690 r = radeon_fence_wait(fence, false);
691 if (r) {
692 DRM_ERROR("radeon: fence wait failed (%d).\n", r);
693 } else {
694 DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
695 }
696error:
697 radeon_fence_unref(&fence);
698 return r;
699}
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
new file mode 100644
index 000000000000..2aae6ce49d32
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -0,0 +1,966 @@
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include <drm/drmP.h>
29#include <drm/radeon_drm.h>
30#include "radeon.h"
31#include "radeon_trace.h"
32
33/*
34 * GPUVM
35 * GPUVM is similar to the legacy gart on older asics, however
36 * rather than there being a single global gart table
37 * for the entire GPU, there are multiple VM page tables active
38 * at any given time. The VM page tables can contain a mix
39 * vram pages and system memory pages and system memory pages
40 * can be mapped as snooped (cached system pages) or unsnooped
41 * (uncached system pages).
42 * Each VM has an ID associated with it and there is a page table
43 * associated with each VMID. When execting a command buffer,
44 * the kernel tells the the ring what VMID to use for that command
45 * buffer. VMIDs are allocated dynamically as commands are submitted.
46 * The userspace drivers maintain their own address space and the kernel
47 * sets up their pages tables accordingly when they submit their
48 * command buffers and a VMID is assigned.
49 * Cayman/Trinity support up to 8 active VMs at any given time;
50 * SI supports 16.
51 */
52
53/**
54 * radeon_vm_num_pde - return the number of page directory entries
55 *
56 * @rdev: radeon_device pointer
57 *
58 * Calculate the number of page directory entries (cayman+).
59 */
60static unsigned radeon_vm_num_pdes(struct radeon_device *rdev)
61{
62 return rdev->vm_manager.max_pfn >> RADEON_VM_BLOCK_SIZE;
63}
64
65/**
66 * radeon_vm_directory_size - returns the size of the page directory in bytes
67 *
68 * @rdev: radeon_device pointer
69 *
70 * Calculate the size of the page directory in bytes (cayman+).
71 */
72static unsigned radeon_vm_directory_size(struct radeon_device *rdev)
73{
74 return RADEON_GPU_PAGE_ALIGN(radeon_vm_num_pdes(rdev) * 8);
75}
76
77/**
78 * radeon_vm_manager_init - init the vm manager
79 *
80 * @rdev: radeon_device pointer
81 *
82 * Init the vm manager (cayman+).
83 * Returns 0 for success, error for failure.
84 */
85int radeon_vm_manager_init(struct radeon_device *rdev)
86{
87 int r;
88
89 if (!rdev->vm_manager.enabled) {
90 r = radeon_asic_vm_init(rdev);
91 if (r)
92 return r;
93
94 rdev->vm_manager.enabled = true;
95 }
96 return 0;
97}
98
99/**
100 * radeon_vm_manager_fini - tear down the vm manager
101 *
102 * @rdev: radeon_device pointer
103 *
104 * Tear down the VM manager (cayman+).
105 */
106void radeon_vm_manager_fini(struct radeon_device *rdev)
107{
108 int i;
109
110 if (!rdev->vm_manager.enabled)
111 return;
112
113 for (i = 0; i < RADEON_NUM_VM; ++i)
114 radeon_fence_unref(&rdev->vm_manager.active[i]);
115 radeon_asic_vm_fini(rdev);
116 rdev->vm_manager.enabled = false;
117}
118
119/**
120 * radeon_vm_get_bos - add the vm BOs to a validation list
121 *
122 * @vm: vm providing the BOs
123 * @head: head of validation list
124 *
125 * Add the page directory to the list of BOs to
126 * validate for command submission (cayman+).
127 */
128struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev,
129 struct radeon_vm *vm,
130 struct list_head *head)
131{
132 struct radeon_cs_reloc *list;
133 unsigned i, idx, size;
134
135 size = (radeon_vm_num_pdes(rdev) + 1) * sizeof(struct radeon_cs_reloc);
136 list = kmalloc(size, GFP_KERNEL);
137 if (!list)
138 return NULL;
139
140 /* add the vm page table to the list */
141 list[0].gobj = NULL;
142 list[0].robj = vm->page_directory;
143 list[0].domain = RADEON_GEM_DOMAIN_VRAM;
144 list[0].alt_domain = RADEON_GEM_DOMAIN_VRAM;
145 list[0].tv.bo = &vm->page_directory->tbo;
146 list[0].tiling_flags = 0;
147 list[0].handle = 0;
148 list_add(&list[0].tv.head, head);
149
150 for (i = 0, idx = 1; i <= vm->max_pde_used; i++) {
151 if (!vm->page_tables[i].bo)
152 continue;
153
154 list[idx].gobj = NULL;
155 list[idx].robj = vm->page_tables[i].bo;
156 list[idx].domain = RADEON_GEM_DOMAIN_VRAM;
157 list[idx].alt_domain = RADEON_GEM_DOMAIN_VRAM;
158 list[idx].tv.bo = &list[idx].robj->tbo;
159 list[idx].tiling_flags = 0;
160 list[idx].handle = 0;
161 list_add(&list[idx++].tv.head, head);
162 }
163
164 return list;
165}
166
167/**
168 * radeon_vm_grab_id - allocate the next free VMID
169 *
170 * @rdev: radeon_device pointer
171 * @vm: vm to allocate id for
172 * @ring: ring we want to submit job to
173 *
174 * Allocate an id for the vm (cayman+).
175 * Returns the fence we need to sync to (if any).
176 *
177 * Global and local mutex must be locked!
178 */
179struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
180 struct radeon_vm *vm, int ring)
181{
182 struct radeon_fence *best[RADEON_NUM_RINGS] = {};
183 unsigned choices[2] = {};
184 unsigned i;
185
186 /* check if the id is still valid */
187 if (vm->last_id_use && vm->last_id_use == rdev->vm_manager.active[vm->id])
188 return NULL;
189
190 /* we definately need to flush */
191 radeon_fence_unref(&vm->last_flush);
192
193 /* skip over VMID 0, since it is the system VM */
194 for (i = 1; i < rdev->vm_manager.nvm; ++i) {
195 struct radeon_fence *fence = rdev->vm_manager.active[i];
196
197 if (fence == NULL) {
198 /* found a free one */
199 vm->id = i;
200 trace_radeon_vm_grab_id(vm->id, ring);
201 return NULL;
202 }
203
204 if (radeon_fence_is_earlier(fence, best[fence->ring])) {
205 best[fence->ring] = fence;
206 choices[fence->ring == ring ? 0 : 1] = i;
207 }
208 }
209
210 for (i = 0; i < 2; ++i) {
211 if (choices[i]) {
212 vm->id = choices[i];
213 trace_radeon_vm_grab_id(vm->id, ring);
214 return rdev->vm_manager.active[choices[i]];
215 }
216 }
217
218 /* should never happen */
219 BUG();
220 return NULL;
221}
222
223/**
224 * radeon_vm_flush - hardware flush the vm
225 *
226 * @rdev: radeon_device pointer
227 * @vm: vm we want to flush
228 * @ring: ring to use for flush
229 *
230 * Flush the vm (cayman+).
231 *
232 * Global and local mutex must be locked!
233 */
234void radeon_vm_flush(struct radeon_device *rdev,
235 struct radeon_vm *vm,
236 int ring)
237{
238 uint64_t pd_addr = radeon_bo_gpu_offset(vm->page_directory);
239
240 /* if we can't remember our last VM flush then flush now! */
241 /* XXX figure out why we have to flush all the time */
242 if (!vm->last_flush || true || pd_addr != vm->pd_gpu_addr) {
243 vm->pd_gpu_addr = pd_addr;
244 radeon_ring_vm_flush(rdev, ring, vm);
245 }
246}
247
248/**
249 * radeon_vm_fence - remember fence for vm
250 *
251 * @rdev: radeon_device pointer
252 * @vm: vm we want to fence
253 * @fence: fence to remember
254 *
255 * Fence the vm (cayman+).
256 * Set the fence used to protect page table and id.
257 *
258 * Global and local mutex must be locked!
259 */
260void radeon_vm_fence(struct radeon_device *rdev,
261 struct radeon_vm *vm,
262 struct radeon_fence *fence)
263{
264 radeon_fence_unref(&vm->fence);
265 vm->fence = radeon_fence_ref(fence);
266
267 radeon_fence_unref(&rdev->vm_manager.active[vm->id]);
268 rdev->vm_manager.active[vm->id] = radeon_fence_ref(fence);
269
270 radeon_fence_unref(&vm->last_id_use);
271 vm->last_id_use = radeon_fence_ref(fence);
272
273 /* we just flushed the VM, remember that */
274 if (!vm->last_flush)
275 vm->last_flush = radeon_fence_ref(fence);
276}
277
278/**
279 * radeon_vm_bo_find - find the bo_va for a specific vm & bo
280 *
281 * @vm: requested vm
282 * @bo: requested buffer object
283 *
284 * Find @bo inside the requested vm (cayman+).
285 * Search inside the @bos vm list for the requested vm
286 * Returns the found bo_va or NULL if none is found
287 *
288 * Object has to be reserved!
289 */
290struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm,
291 struct radeon_bo *bo)
292{
293 struct radeon_bo_va *bo_va;
294
295 list_for_each_entry(bo_va, &bo->va, bo_list) {
296 if (bo_va->vm == vm) {
297 return bo_va;
298 }
299 }
300 return NULL;
301}
302
303/**
304 * radeon_vm_bo_add - add a bo to a specific vm
305 *
306 * @rdev: radeon_device pointer
307 * @vm: requested vm
308 * @bo: radeon buffer object
309 *
310 * Add @bo into the requested vm (cayman+).
311 * Add @bo to the list of bos associated with the vm
312 * Returns newly added bo_va or NULL for failure
313 *
314 * Object has to be reserved!
315 */
316struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev,
317 struct radeon_vm *vm,
318 struct radeon_bo *bo)
319{
320 struct radeon_bo_va *bo_va;
321
322 bo_va = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
323 if (bo_va == NULL) {
324 return NULL;
325 }
326 bo_va->vm = vm;
327 bo_va->bo = bo;
328 bo_va->soffset = 0;
329 bo_va->eoffset = 0;
330 bo_va->flags = 0;
331 bo_va->valid = false;
332 bo_va->ref_count = 1;
333 INIT_LIST_HEAD(&bo_va->bo_list);
334 INIT_LIST_HEAD(&bo_va->vm_list);
335
336 mutex_lock(&vm->mutex);
337 list_add(&bo_va->vm_list, &vm->va);
338 list_add_tail(&bo_va->bo_list, &bo->va);
339 mutex_unlock(&vm->mutex);
340
341 return bo_va;
342}
343
344/**
345 * radeon_vm_clear_bo - initially clear the page dir/table
346 *
347 * @rdev: radeon_device pointer
348 * @bo: bo to clear
349 */
350static int radeon_vm_clear_bo(struct radeon_device *rdev,
351 struct radeon_bo *bo)
352{
353 struct ttm_validate_buffer tv;
354 struct ww_acquire_ctx ticket;
355 struct list_head head;
356 struct radeon_ib ib;
357 unsigned entries;
358 uint64_t addr;
359 int r;
360
361 memset(&tv, 0, sizeof(tv));
362 tv.bo = &bo->tbo;
363
364 INIT_LIST_HEAD(&head);
365 list_add(&tv.head, &head);
366
367 r = ttm_eu_reserve_buffers(&ticket, &head);
368 if (r)
369 return r;
370
371 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
372 if (r)
373 goto error;
374
375 addr = radeon_bo_gpu_offset(bo);
376 entries = radeon_bo_size(bo) / 8;
377
378 r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib,
379 NULL, entries * 2 + 64);
380 if (r)
381 goto error;
382
383 ib.length_dw = 0;
384
385 radeon_asic_vm_set_page(rdev, &ib, addr, 0, entries, 0, 0);
386
387 r = radeon_ib_schedule(rdev, &ib, NULL);
388 if (r)
389 goto error;
390
391 ttm_eu_fence_buffer_objects(&ticket, &head, ib.fence);
392 radeon_ib_free(rdev, &ib);
393
394 return 0;
395
396error:
397 ttm_eu_backoff_reservation(&ticket, &head);
398 return r;
399}
400
401/**
402 * radeon_vm_bo_set_addr - set bos virtual address inside a vm
403 *
404 * @rdev: radeon_device pointer
405 * @bo_va: bo_va to store the address
406 * @soffset: requested offset of the buffer in the VM address space
407 * @flags: attributes of pages (read/write/valid/etc.)
408 *
409 * Set offset of @bo_va (cayman+).
410 * Validate and set the offset requested within the vm address space.
411 * Returns 0 for success, error for failure.
412 *
413 * Object has to be reserved!
414 */
415int radeon_vm_bo_set_addr(struct radeon_device *rdev,
416 struct radeon_bo_va *bo_va,
417 uint64_t soffset,
418 uint32_t flags)
419{
420 uint64_t size = radeon_bo_size(bo_va->bo);
421 uint64_t eoffset, last_offset = 0;
422 struct radeon_vm *vm = bo_va->vm;
423 struct radeon_bo_va *tmp;
424 struct list_head *head;
425 unsigned last_pfn, pt_idx;
426 int r;
427
428 if (soffset) {
429 /* make sure object fit at this offset */
430 eoffset = soffset + size;
431 if (soffset >= eoffset) {
432 return -EINVAL;
433 }
434
435 last_pfn = eoffset / RADEON_GPU_PAGE_SIZE;
436 if (last_pfn > rdev->vm_manager.max_pfn) {
437 dev_err(rdev->dev, "va above limit (0x%08X > 0x%08X)\n",
438 last_pfn, rdev->vm_manager.max_pfn);
439 return -EINVAL;
440 }
441
442 } else {
443 eoffset = last_pfn = 0;
444 }
445
446 mutex_lock(&vm->mutex);
447 head = &vm->va;
448 last_offset = 0;
449 list_for_each_entry(tmp, &vm->va, vm_list) {
450 if (bo_va == tmp) {
451 /* skip over currently modified bo */
452 continue;
453 }
454
455 if (soffset >= last_offset && eoffset <= tmp->soffset) {
456 /* bo can be added before this one */
457 break;
458 }
459 if (eoffset > tmp->soffset && soffset < tmp->eoffset) {
460 /* bo and tmp overlap, invalid offset */
461 dev_err(rdev->dev, "bo %p va 0x%08X conflict with (bo %p 0x%08X 0x%08X)\n",
462 bo_va->bo, (unsigned)bo_va->soffset, tmp->bo,
463 (unsigned)tmp->soffset, (unsigned)tmp->eoffset);
464 mutex_unlock(&vm->mutex);
465 return -EINVAL;
466 }
467 last_offset = tmp->eoffset;
468 head = &tmp->vm_list;
469 }
470
471 bo_va->soffset = soffset;
472 bo_va->eoffset = eoffset;
473 bo_va->flags = flags;
474 bo_va->valid = false;
475 list_move(&bo_va->vm_list, head);
476
477 soffset = (soffset / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE;
478 eoffset = (eoffset / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE;
479
480 if (eoffset > vm->max_pde_used)
481 vm->max_pde_used = eoffset;
482
483 radeon_bo_unreserve(bo_va->bo);
484
485 /* walk over the address space and allocate the page tables */
486 for (pt_idx = soffset; pt_idx <= eoffset; ++pt_idx) {
487 struct radeon_bo *pt;
488
489 if (vm->page_tables[pt_idx].bo)
490 continue;
491
492 /* drop mutex to allocate and clear page table */
493 mutex_unlock(&vm->mutex);
494
495 r = radeon_bo_create(rdev, RADEON_VM_PTE_COUNT * 8,
496 RADEON_GPU_PAGE_SIZE, false,
497 RADEON_GEM_DOMAIN_VRAM, NULL, &pt);
498 if (r)
499 return r;
500
501 r = radeon_vm_clear_bo(rdev, pt);
502 if (r) {
503 radeon_bo_unref(&pt);
504 radeon_bo_reserve(bo_va->bo, false);
505 return r;
506 }
507
508 /* aquire mutex again */
509 mutex_lock(&vm->mutex);
510 if (vm->page_tables[pt_idx].bo) {
511 /* someone else allocated the pt in the meantime */
512 mutex_unlock(&vm->mutex);
513 radeon_bo_unref(&pt);
514 mutex_lock(&vm->mutex);
515 continue;
516 }
517
518 vm->page_tables[pt_idx].addr = 0;
519 vm->page_tables[pt_idx].bo = pt;
520 }
521
522 mutex_unlock(&vm->mutex);
523 return radeon_bo_reserve(bo_va->bo, false);
524}
525
526/**
527 * radeon_vm_map_gart - get the physical address of a gart page
528 *
529 * @rdev: radeon_device pointer
530 * @addr: the unmapped addr
531 *
532 * Look up the physical address of the page that the pte resolves
533 * to (cayman+).
534 * Returns the physical address of the page.
535 */
536uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr)
537{
538 uint64_t result;
539
540 /* page table offset */
541 result = rdev->gart.pages_addr[addr >> PAGE_SHIFT];
542
543 /* in case cpu page size != gpu page size*/
544 result |= addr & (~PAGE_MASK);
545
546 return result;
547}
548
549/**
550 * radeon_vm_page_flags - translate page flags to what the hw uses
551 *
552 * @flags: flags comming from userspace
553 *
554 * Translate the flags the userspace ABI uses to hw flags.
555 */
556static uint32_t radeon_vm_page_flags(uint32_t flags)
557{
558 uint32_t hw_flags = 0;
559 hw_flags |= (flags & RADEON_VM_PAGE_VALID) ? R600_PTE_VALID : 0;
560 hw_flags |= (flags & RADEON_VM_PAGE_READABLE) ? R600_PTE_READABLE : 0;
561 hw_flags |= (flags & RADEON_VM_PAGE_WRITEABLE) ? R600_PTE_WRITEABLE : 0;
562 if (flags & RADEON_VM_PAGE_SYSTEM) {
563 hw_flags |= R600_PTE_SYSTEM;
564 hw_flags |= (flags & RADEON_VM_PAGE_SNOOPED) ? R600_PTE_SNOOPED : 0;
565 }
566 return hw_flags;
567}
568
569/**
570 * radeon_vm_update_pdes - make sure that page directory is valid
571 *
572 * @rdev: radeon_device pointer
573 * @vm: requested vm
574 * @start: start of GPU address range
575 * @end: end of GPU address range
576 *
577 * Allocates new page tables if necessary
578 * and updates the page directory (cayman+).
579 * Returns 0 for success, error for failure.
580 *
581 * Global and local mutex must be locked!
582 */
583int radeon_vm_update_page_directory(struct radeon_device *rdev,
584 struct radeon_vm *vm)
585{
586 static const uint32_t incr = RADEON_VM_PTE_COUNT * 8;
587
588 uint64_t pd_addr = radeon_bo_gpu_offset(vm->page_directory);
589 uint64_t last_pde = ~0, last_pt = ~0;
590 unsigned count = 0, pt_idx, ndw;
591 struct radeon_ib ib;
592 int r;
593
594 /* padding, etc. */
595 ndw = 64;
596
597 /* assume the worst case */
598 ndw += vm->max_pde_used * 12;
599
600 /* update too big for an IB */
601 if (ndw > 0xfffff)
602 return -ENOMEM;
603
604 r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, NULL, ndw * 4);
605 if (r)
606 return r;
607 ib.length_dw = 0;
608
609 /* walk over the address space and update the page directory */
610 for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) {
611 struct radeon_bo *bo = vm->page_tables[pt_idx].bo;
612 uint64_t pde, pt;
613
614 if (bo == NULL)
615 continue;
616
617 pt = radeon_bo_gpu_offset(bo);
618 if (vm->page_tables[pt_idx].addr == pt)
619 continue;
620 vm->page_tables[pt_idx].addr = pt;
621
622 pde = pd_addr + pt_idx * 8;
623 if (((last_pde + 8 * count) != pde) ||
624 ((last_pt + incr * count) != pt)) {
625
626 if (count) {
627 radeon_asic_vm_set_page(rdev, &ib, last_pde,
628 last_pt, count, incr,
629 R600_PTE_VALID);
630 }
631
632 count = 1;
633 last_pde = pde;
634 last_pt = pt;
635 } else {
636 ++count;
637 }
638 }
639
640 if (count)
641 radeon_asic_vm_set_page(rdev, &ib, last_pde, last_pt, count,
642 incr, R600_PTE_VALID);
643
644 if (ib.length_dw != 0) {
645 radeon_semaphore_sync_to(ib.semaphore, vm->last_id_use);
646 r = radeon_ib_schedule(rdev, &ib, NULL);
647 if (r) {
648 radeon_ib_free(rdev, &ib);
649 return r;
650 }
651 radeon_fence_unref(&vm->fence);
652 vm->fence = radeon_fence_ref(ib.fence);
653 radeon_fence_unref(&vm->last_flush);
654 }
655 radeon_ib_free(rdev, &ib);
656
657 return 0;
658}
659
660/**
661 * radeon_vm_update_ptes - make sure that page tables are valid
662 *
663 * @rdev: radeon_device pointer
664 * @vm: requested vm
665 * @start: start of GPU address range
666 * @end: end of GPU address range
667 * @dst: destination address to map to
668 * @flags: mapping flags
669 *
670 * Update the page tables in the range @start - @end (cayman+).
671 *
672 * Global and local mutex must be locked!
673 */
674static void radeon_vm_update_ptes(struct radeon_device *rdev,
675 struct radeon_vm *vm,
676 struct radeon_ib *ib,
677 uint64_t start, uint64_t end,
678 uint64_t dst, uint32_t flags)
679{
680 static const uint64_t mask = RADEON_VM_PTE_COUNT - 1;
681
682 uint64_t last_pte = ~0, last_dst = ~0;
683 unsigned count = 0;
684 uint64_t addr;
685
686 start = start / RADEON_GPU_PAGE_SIZE;
687 end = end / RADEON_GPU_PAGE_SIZE;
688
689 /* walk over the address space and update the page tables */
690 for (addr = start; addr < end; ) {
691 uint64_t pt_idx = addr >> RADEON_VM_BLOCK_SIZE;
692 unsigned nptes;
693 uint64_t pte;
694
695 if ((addr & ~mask) == (end & ~mask))
696 nptes = end - addr;
697 else
698 nptes = RADEON_VM_PTE_COUNT - (addr & mask);
699
700 pte = radeon_bo_gpu_offset(vm->page_tables[pt_idx].bo);
701 pte += (addr & mask) * 8;
702
703 if ((last_pte + 8 * count) != pte) {
704
705 if (count) {
706 radeon_asic_vm_set_page(rdev, ib, last_pte,
707 last_dst, count,
708 RADEON_GPU_PAGE_SIZE,
709 flags);
710 }
711
712 count = nptes;
713 last_pte = pte;
714 last_dst = dst;
715 } else {
716 count += nptes;
717 }
718
719 addr += nptes;
720 dst += nptes * RADEON_GPU_PAGE_SIZE;
721 }
722
723 if (count) {
724 radeon_asic_vm_set_page(rdev, ib, last_pte,
725 last_dst, count,
726 RADEON_GPU_PAGE_SIZE, flags);
727 }
728}
729
730/**
731 * radeon_vm_bo_update - map a bo into the vm page table
732 *
733 * @rdev: radeon_device pointer
734 * @vm: requested vm
735 * @bo: radeon buffer object
736 * @mem: ttm mem
737 *
738 * Fill in the page table entries for @bo (cayman+).
739 * Returns 0 for success, -EINVAL for failure.
740 *
741 * Object have to be reserved and mutex must be locked!
742 */
743int radeon_vm_bo_update(struct radeon_device *rdev,
744 struct radeon_vm *vm,
745 struct radeon_bo *bo,
746 struct ttm_mem_reg *mem)
747{
748 struct radeon_ib ib;
749 struct radeon_bo_va *bo_va;
750 unsigned nptes, ndw;
751 uint64_t addr;
752 int r;
753
754 bo_va = radeon_vm_bo_find(vm, bo);
755 if (bo_va == NULL) {
756 dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm);
757 return -EINVAL;
758 }
759
760 if (!bo_va->soffset) {
761 dev_err(rdev->dev, "bo %p don't has a mapping in vm %p\n",
762 bo, vm);
763 return -EINVAL;
764 }
765
766 if ((bo_va->valid && mem) || (!bo_va->valid && mem == NULL))
767 return 0;
768
769 bo_va->flags &= ~RADEON_VM_PAGE_VALID;
770 bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM;
771 if (mem) {
772 addr = mem->start << PAGE_SHIFT;
773 if (mem->mem_type != TTM_PL_SYSTEM) {
774 bo_va->flags |= RADEON_VM_PAGE_VALID;
775 bo_va->valid = true;
776 }
777 if (mem->mem_type == TTM_PL_TT) {
778 bo_va->flags |= RADEON_VM_PAGE_SYSTEM;
779 } else {
780 addr += rdev->vm_manager.vram_base_offset;
781 }
782 } else {
783 addr = 0;
784 bo_va->valid = false;
785 }
786
787 trace_radeon_vm_bo_update(bo_va);
788
789 nptes = radeon_bo_ngpu_pages(bo);
790
791 /* padding, etc. */
792 ndw = 64;
793
794 if (RADEON_VM_BLOCK_SIZE > 11)
795 /* reserve space for one header for every 2k dwords */
796 ndw += (nptes >> 11) * 4;
797 else
798 /* reserve space for one header for
799 every (1 << BLOCK_SIZE) entries */
800 ndw += (nptes >> RADEON_VM_BLOCK_SIZE) * 4;
801
802 /* reserve space for pte addresses */
803 ndw += nptes * 2;
804
805 /* update too big for an IB */
806 if (ndw > 0xfffff)
807 return -ENOMEM;
808
809 r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, NULL, ndw * 4);
810 if (r)
811 return r;
812 ib.length_dw = 0;
813
814 radeon_vm_update_ptes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset,
815 addr, radeon_vm_page_flags(bo_va->flags));
816
817 radeon_semaphore_sync_to(ib.semaphore, vm->fence);
818 r = radeon_ib_schedule(rdev, &ib, NULL);
819 if (r) {
820 radeon_ib_free(rdev, &ib);
821 return r;
822 }
823 radeon_fence_unref(&vm->fence);
824 vm->fence = radeon_fence_ref(ib.fence);
825 radeon_ib_free(rdev, &ib);
826 radeon_fence_unref(&vm->last_flush);
827
828 return 0;
829}
830
831/**
832 * radeon_vm_bo_rmv - remove a bo to a specific vm
833 *
834 * @rdev: radeon_device pointer
835 * @bo_va: requested bo_va
836 *
837 * Remove @bo_va->bo from the requested vm (cayman+).
838 * Remove @bo_va->bo from the list of bos associated with the bo_va->vm and
839 * remove the ptes for @bo_va in the page table.
840 * Returns 0 for success.
841 *
842 * Object have to be reserved!
843 */
844int radeon_vm_bo_rmv(struct radeon_device *rdev,
845 struct radeon_bo_va *bo_va)
846{
847 int r = 0;
848
849 mutex_lock(&bo_va->vm->mutex);
850 if (bo_va->soffset)
851 r = radeon_vm_bo_update(rdev, bo_va->vm, bo_va->bo, NULL);
852
853 list_del(&bo_va->vm_list);
854 mutex_unlock(&bo_va->vm->mutex);
855 list_del(&bo_va->bo_list);
856
857 kfree(bo_va);
858 return r;
859}
860
861/**
862 * radeon_vm_bo_invalidate - mark the bo as invalid
863 *
864 * @rdev: radeon_device pointer
865 * @vm: requested vm
866 * @bo: radeon buffer object
867 *
868 * Mark @bo as invalid (cayman+).
869 */
870void radeon_vm_bo_invalidate(struct radeon_device *rdev,
871 struct radeon_bo *bo)
872{
873 struct radeon_bo_va *bo_va;
874
875 list_for_each_entry(bo_va, &bo->va, bo_list) {
876 bo_va->valid = false;
877 }
878}
879
880/**
881 * radeon_vm_init - initialize a vm instance
882 *
883 * @rdev: radeon_device pointer
884 * @vm: requested vm
885 *
886 * Init @vm fields (cayman+).
887 */
888int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
889{
890 unsigned pd_size, pd_entries, pts_size;
891 int r;
892
893 vm->id = 0;
894 vm->fence = NULL;
895 vm->last_flush = NULL;
896 vm->last_id_use = NULL;
897 mutex_init(&vm->mutex);
898 INIT_LIST_HEAD(&vm->va);
899
900 pd_size = radeon_vm_directory_size(rdev);
901 pd_entries = radeon_vm_num_pdes(rdev);
902
903 /* allocate page table array */
904 pts_size = pd_entries * sizeof(struct radeon_vm_pt);
905 vm->page_tables = kzalloc(pts_size, GFP_KERNEL);
906 if (vm->page_tables == NULL) {
907 DRM_ERROR("Cannot allocate memory for page table array\n");
908 return -ENOMEM;
909 }
910
911 r = radeon_bo_create(rdev, pd_size, RADEON_VM_PTB_ALIGN_SIZE, false,
912 RADEON_GEM_DOMAIN_VRAM, NULL,
913 &vm->page_directory);
914 if (r)
915 return r;
916
917 r = radeon_vm_clear_bo(rdev, vm->page_directory);
918 if (r) {
919 radeon_bo_unref(&vm->page_directory);
920 vm->page_directory = NULL;
921 return r;
922 }
923
924 return 0;
925}
926
927/**
928 * radeon_vm_fini - tear down a vm instance
929 *
930 * @rdev: radeon_device pointer
931 * @vm: requested vm
932 *
933 * Tear down @vm (cayman+).
934 * Unbind the VM and remove all bos from the vm bo list
935 */
936void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
937{
938 struct radeon_bo_va *bo_va, *tmp;
939 int i, r;
940
941 if (!list_empty(&vm->va)) {
942 dev_err(rdev->dev, "still active bo inside vm\n");
943 }
944 list_for_each_entry_safe(bo_va, tmp, &vm->va, vm_list) {
945 list_del_init(&bo_va->vm_list);
946 r = radeon_bo_reserve(bo_va->bo, false);
947 if (!r) {
948 list_del_init(&bo_va->bo_list);
949 radeon_bo_unreserve(bo_va->bo);
950 kfree(bo_va);
951 }
952 }
953
954
955 for (i = 0; i < radeon_vm_num_pdes(rdev); i++)
956 radeon_bo_unref(&vm->page_tables[i].bo);
957 kfree(vm->page_tables);
958
959 radeon_bo_unref(&vm->page_directory);
960
961 radeon_fence_unref(&vm->fence);
962 radeon_fence_unref(&vm->last_flush);
963 radeon_fence_unref(&vm->last_id_use);
964
965 mutex_destroy(&vm->mutex);
966}
diff --git a/drivers/gpu/drm/radeon/rs780_dpm.c b/drivers/gpu/drm/radeon/rs780_dpm.c
index 8512085b0aef..02f7710de470 100644
--- a/drivers/gpu/drm/radeon/rs780_dpm.c
+++ b/drivers/gpu/drm/radeon/rs780_dpm.c
@@ -807,9 +807,6 @@ static int rs780_parse_power_table(struct radeon_device *rdev)
807 power_info->pplib.ucNumStates, GFP_KERNEL); 807 power_info->pplib.ucNumStates, GFP_KERNEL);
808 if (!rdev->pm.dpm.ps) 808 if (!rdev->pm.dpm.ps)
809 return -ENOMEM; 809 return -ENOMEM;
810 rdev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
811 rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
812 rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
813 810
814 for (i = 0; i < power_info->pplib.ucNumStates; i++) { 811 for (i = 0; i < power_info->pplib.ucNumStates; i++) {
815 power_state = (union pplib_power_state *) 812 power_state = (union pplib_power_state *)
@@ -859,6 +856,10 @@ int rs780_dpm_init(struct radeon_device *rdev)
859 return -ENOMEM; 856 return -ENOMEM;
860 rdev->pm.dpm.priv = pi; 857 rdev->pm.dpm.priv = pi;
861 858
859 ret = r600_get_platform_caps(rdev);
860 if (ret)
861 return ret;
862
862 ret = rs780_parse_power_table(rdev); 863 ret = rs780_parse_power_table(rdev);
863 if (ret) 864 if (ret)
864 return ret; 865 return ret;
diff --git a/drivers/gpu/drm/radeon/rv6xx_dpm.c b/drivers/gpu/drm/radeon/rv6xx_dpm.c
index bebf31c4d841..e7045b085715 100644
--- a/drivers/gpu/drm/radeon/rv6xx_dpm.c
+++ b/drivers/gpu/drm/radeon/rv6xx_dpm.c
@@ -1891,9 +1891,6 @@ static int rv6xx_parse_power_table(struct radeon_device *rdev)
1891 power_info->pplib.ucNumStates, GFP_KERNEL); 1891 power_info->pplib.ucNumStates, GFP_KERNEL);
1892 if (!rdev->pm.dpm.ps) 1892 if (!rdev->pm.dpm.ps)
1893 return -ENOMEM; 1893 return -ENOMEM;
1894 rdev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
1895 rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
1896 rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
1897 1894
1898 for (i = 0; i < power_info->pplib.ucNumStates; i++) { 1895 for (i = 0; i < power_info->pplib.ucNumStates; i++) {
1899 power_state = (union pplib_power_state *) 1896 power_state = (union pplib_power_state *)
@@ -1943,6 +1940,10 @@ int rv6xx_dpm_init(struct radeon_device *rdev)
1943 return -ENOMEM; 1940 return -ENOMEM;
1944 rdev->pm.dpm.priv = pi; 1941 rdev->pm.dpm.priv = pi;
1945 1942
1943 ret = r600_get_platform_caps(rdev);
1944 if (ret)
1945 return ret;
1946
1946 ret = rv6xx_parse_power_table(rdev); 1947 ret = rv6xx_parse_power_table(rdev);
1947 if (ret) 1948 if (ret)
1948 return ret; 1949 return ret;
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c
index b5f63f5e22a3..da041a43d82e 100644
--- a/drivers/gpu/drm/radeon/rv770_dpm.c
+++ b/drivers/gpu/drm/radeon/rv770_dpm.c
@@ -2281,9 +2281,6 @@ int rv7xx_parse_power_table(struct radeon_device *rdev)
2281 power_info->pplib.ucNumStates, GFP_KERNEL); 2281 power_info->pplib.ucNumStates, GFP_KERNEL);
2282 if (!rdev->pm.dpm.ps) 2282 if (!rdev->pm.dpm.ps)
2283 return -ENOMEM; 2283 return -ENOMEM;
2284 rdev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
2285 rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
2286 rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
2287 2284
2288 for (i = 0; i < power_info->pplib.ucNumStates; i++) { 2285 for (i = 0; i < power_info->pplib.ucNumStates; i++) {
2289 power_state = (union pplib_power_state *) 2286 power_state = (union pplib_power_state *)
@@ -2361,6 +2358,10 @@ int rv770_dpm_init(struct radeon_device *rdev)
2361 pi->min_vddc_in_table = 0; 2358 pi->min_vddc_in_table = 0;
2362 pi->max_vddc_in_table = 0; 2359 pi->max_vddc_in_table = 0;
2363 2360
2361 ret = r600_get_platform_caps(rdev);
2362 if (ret)
2363 return ret;
2364
2364 ret = rv7xx_parse_power_table(rdev); 2365 ret = rv7xx_parse_power_table(rdev);
2365 if (ret) 2366 if (ret)
2366 return ret; 2367 return ret;
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 9a124d0608b3..d589475fe9e6 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -3434,8 +3434,6 @@ static int si_cp_resume(struct radeon_device *rdev)
3434 3434
3435 WREG32(CP_RB0_BASE, ring->gpu_addr >> 8); 3435 WREG32(CP_RB0_BASE, ring->gpu_addr >> 8);
3436 3436
3437 ring->rptr = RREG32(CP_RB0_RPTR);
3438
3439 /* ring1 - compute only */ 3437 /* ring1 - compute only */
3440 /* Set ring buffer size */ 3438 /* Set ring buffer size */
3441 ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]; 3439 ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
@@ -3460,8 +3458,6 @@ static int si_cp_resume(struct radeon_device *rdev)
3460 3458
3461 WREG32(CP_RB1_BASE, ring->gpu_addr >> 8); 3459 WREG32(CP_RB1_BASE, ring->gpu_addr >> 8);
3462 3460
3463 ring->rptr = RREG32(CP_RB1_RPTR);
3464
3465 /* ring2 - compute only */ 3461 /* ring2 - compute only */
3466 /* Set ring buffer size */ 3462 /* Set ring buffer size */
3467 ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]; 3463 ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
@@ -3486,8 +3482,6 @@ static int si_cp_resume(struct radeon_device *rdev)
3486 3482
3487 WREG32(CP_RB2_BASE, ring->gpu_addr >> 8); 3483 WREG32(CP_RB2_BASE, ring->gpu_addr >> 8);
3488 3484
3489 ring->rptr = RREG32(CP_RB2_RPTR);
3490
3491 /* start the rings */ 3485 /* start the rings */
3492 si_cp_start(rdev); 3486 si_cp_start(rdev);
3493 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = true; 3487 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = true;
@@ -3872,11 +3866,9 @@ bool si_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
3872 if (!(reset_mask & (RADEON_RESET_GFX | 3866 if (!(reset_mask & (RADEON_RESET_GFX |
3873 RADEON_RESET_COMPUTE | 3867 RADEON_RESET_COMPUTE |
3874 RADEON_RESET_CP))) { 3868 RADEON_RESET_CP))) {
3875 radeon_ring_lockup_update(ring); 3869 radeon_ring_lockup_update(rdev, ring);
3876 return false; 3870 return false;
3877 } 3871 }
3878 /* force CP activities */
3879 radeon_ring_force_activity(rdev, ring);
3880 return radeon_ring_test_lockup(rdev, ring); 3872 return radeon_ring_test_lockup(rdev, ring);
3881} 3873}
3882 3874
diff --git a/drivers/gpu/drm/radeon/si_dma.c b/drivers/gpu/drm/radeon/si_dma.c
index 59be2cfcbb47..cf0fdad8c278 100644
--- a/drivers/gpu/drm/radeon/si_dma.c
+++ b/drivers/gpu/drm/radeon/si_dma.c
@@ -49,11 +49,9 @@ bool si_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
49 mask = RADEON_RESET_DMA1; 49 mask = RADEON_RESET_DMA1;
50 50
51 if (!(reset_mask & mask)) { 51 if (!(reset_mask & mask)) {
52 radeon_ring_lockup_update(ring); 52 radeon_ring_lockup_update(rdev, ring);
53 return false; 53 return false;
54 } 54 }
55 /* force ring activities */
56 radeon_ring_force_activity(rdev, ring);
57 return radeon_ring_test_lockup(rdev, ring); 55 return radeon_ring_test_lockup(rdev, ring);
58} 56}
59 57
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 0a2f5b4bca43..9a3567bedaae 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -6271,9 +6271,6 @@ static int si_parse_power_table(struct radeon_device *rdev)
6271 if (!rdev->pm.dpm.ps) 6271 if (!rdev->pm.dpm.ps)
6272 return -ENOMEM; 6272 return -ENOMEM;
6273 power_state_offset = (u8 *)state_array->states; 6273 power_state_offset = (u8 *)state_array->states;
6274 rdev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
6275 rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
6276 rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
6277 for (i = 0; i < state_array->ucNumEntries; i++) { 6274 for (i = 0; i < state_array->ucNumEntries; i++) {
6278 u8 *idx; 6275 u8 *idx;
6279 power_state = (union pplib_power_state *)power_state_offset; 6276 power_state = (union pplib_power_state *)power_state_offset;
@@ -6350,6 +6347,10 @@ int si_dpm_init(struct radeon_device *rdev)
6350 pi->min_vddc_in_table = 0; 6347 pi->min_vddc_in_table = 0;
6351 pi->max_vddc_in_table = 0; 6348 pi->max_vddc_in_table = 0;
6352 6349
6350 ret = r600_get_platform_caps(rdev);
6351 if (ret)
6352 return ret;
6353
6353 ret = si_parse_power_table(rdev); 6354 ret = si_parse_power_table(rdev);
6354 if (ret) 6355 if (ret)
6355 return ret; 6356 return ret;
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index 9239a6d29128..683532f84931 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -1798,4 +1798,51 @@
1798#define DMA_PACKET_CONSTANT_FILL 0xd 1798#define DMA_PACKET_CONSTANT_FILL 0xd
1799#define DMA_PACKET_NOP 0xf 1799#define DMA_PACKET_NOP 0xf
1800 1800
1801#define VCE_STATUS 0x20004
1802#define VCE_VCPU_CNTL 0x20014
1803#define VCE_CLK_EN (1 << 0)
1804#define VCE_VCPU_CACHE_OFFSET0 0x20024
1805#define VCE_VCPU_CACHE_SIZE0 0x20028
1806#define VCE_VCPU_CACHE_OFFSET1 0x2002c
1807#define VCE_VCPU_CACHE_SIZE1 0x20030
1808#define VCE_VCPU_CACHE_OFFSET2 0x20034
1809#define VCE_VCPU_CACHE_SIZE2 0x20038
1810#define VCE_SOFT_RESET 0x20120
1811#define VCE_ECPU_SOFT_RESET (1 << 0)
1812#define VCE_FME_SOFT_RESET (1 << 2)
1813#define VCE_RB_BASE_LO2 0x2016c
1814#define VCE_RB_BASE_HI2 0x20170
1815#define VCE_RB_SIZE2 0x20174
1816#define VCE_RB_RPTR2 0x20178
1817#define VCE_RB_WPTR2 0x2017c
1818#define VCE_RB_BASE_LO 0x20180
1819#define VCE_RB_BASE_HI 0x20184
1820#define VCE_RB_SIZE 0x20188
1821#define VCE_RB_RPTR 0x2018c
1822#define VCE_RB_WPTR 0x20190
1823#define VCE_CLOCK_GATING_A 0x202f8
1824#define VCE_CLOCK_GATING_B 0x202fc
1825#define VCE_UENC_CLOCK_GATING 0x205bc
1826#define VCE_UENC_REG_CLOCK_GATING 0x205c0
1827#define VCE_FW_REG_STATUS 0x20e10
1828# define VCE_FW_REG_STATUS_BUSY (1 << 0)
1829# define VCE_FW_REG_STATUS_PASS (1 << 3)
1830# define VCE_FW_REG_STATUS_DONE (1 << 11)
1831#define VCE_LMI_FW_START_KEYSEL 0x20e18
1832#define VCE_LMI_FW_PERIODIC_CTRL 0x20e20
1833#define VCE_LMI_CTRL2 0x20e74
1834#define VCE_LMI_CTRL 0x20e98
1835#define VCE_LMI_VM_CTRL 0x20ea0
1836#define VCE_LMI_SWAP_CNTL 0x20eb4
1837#define VCE_LMI_SWAP_CNTL1 0x20eb8
1838#define VCE_LMI_CACHE_CTRL 0x20ef4
1839
1840#define VCE_CMD_NO_OP 0x00000000
1841#define VCE_CMD_END 0x00000001
1842#define VCE_CMD_IB 0x00000002
1843#define VCE_CMD_FENCE 0x00000003
1844#define VCE_CMD_TRAP 0x00000004
1845#define VCE_CMD_IB_AUTO 0x00000005
1846#define VCE_CMD_SEMAPHORE 0x00000006
1847
1801#endif 1848#endif
diff --git a/drivers/gpu/drm/radeon/sumo_dpm.c b/drivers/gpu/drm/radeon/sumo_dpm.c
index 8b47b3cd0357..3f0e8d7b8dbe 100644
--- a/drivers/gpu/drm/radeon/sumo_dpm.c
+++ b/drivers/gpu/drm/radeon/sumo_dpm.c
@@ -1484,9 +1484,6 @@ static int sumo_parse_power_table(struct radeon_device *rdev)
1484 if (!rdev->pm.dpm.ps) 1484 if (!rdev->pm.dpm.ps)
1485 return -ENOMEM; 1485 return -ENOMEM;
1486 power_state_offset = (u8 *)state_array->states; 1486 power_state_offset = (u8 *)state_array->states;
1487 rdev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
1488 rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
1489 rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
1490 for (i = 0; i < state_array->ucNumEntries; i++) { 1487 for (i = 0; i < state_array->ucNumEntries; i++) {
1491 u8 *idx; 1488 u8 *idx;
1492 power_state = (union pplib_power_state *)power_state_offset; 1489 power_state = (union pplib_power_state *)power_state_offset;
@@ -1772,6 +1769,10 @@ int sumo_dpm_init(struct radeon_device *rdev)
1772 1769
1773 sumo_construct_boot_and_acpi_state(rdev); 1770 sumo_construct_boot_and_acpi_state(rdev);
1774 1771
1772 ret = r600_get_platform_caps(rdev);
1773 if (ret)
1774 return ret;
1775
1775 ret = sumo_parse_power_table(rdev); 1776 ret = sumo_parse_power_table(rdev);
1776 if (ret) 1777 if (ret)
1777 return ret; 1778 return ret;
diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c
index 2da0e17eb960..2a2822c03329 100644
--- a/drivers/gpu/drm/radeon/trinity_dpm.c
+++ b/drivers/gpu/drm/radeon/trinity_dpm.c
@@ -1694,9 +1694,6 @@ static int trinity_parse_power_table(struct radeon_device *rdev)
1694 if (!rdev->pm.dpm.ps) 1694 if (!rdev->pm.dpm.ps)
1695 return -ENOMEM; 1695 return -ENOMEM;
1696 power_state_offset = (u8 *)state_array->states; 1696 power_state_offset = (u8 *)state_array->states;
1697 rdev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
1698 rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
1699 rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
1700 for (i = 0; i < state_array->ucNumEntries; i++) { 1697 for (i = 0; i < state_array->ucNumEntries; i++) {
1701 u8 *idx; 1698 u8 *idx;
1702 power_state = (union pplib_power_state *)power_state_offset; 1699 power_state = (union pplib_power_state *)power_state_offset;
@@ -1895,6 +1892,10 @@ int trinity_dpm_init(struct radeon_device *rdev)
1895 1892
1896 trinity_construct_boot_state(rdev); 1893 trinity_construct_boot_state(rdev);
1897 1894
1895 ret = r600_get_platform_caps(rdev);
1896 if (ret)
1897 return ret;
1898
1898 ret = trinity_parse_power_table(rdev); 1899 ret = trinity_parse_power_table(rdev);
1899 if (ret) 1900 if (ret)
1900 return ret; 1901 return ret;
diff --git a/drivers/gpu/drm/radeon/uvd_v1_0.c b/drivers/gpu/drm/radeon/uvd_v1_0.c
index d4a68af1a279..0a243f0e5d68 100644
--- a/drivers/gpu/drm/radeon/uvd_v1_0.c
+++ b/drivers/gpu/drm/radeon/uvd_v1_0.c
@@ -262,7 +262,7 @@ int uvd_v1_0_start(struct radeon_device *rdev)
262 /* Initialize the ring buffer's read and write pointers */ 262 /* Initialize the ring buffer's read and write pointers */
263 WREG32(UVD_RBC_RB_RPTR, 0x0); 263 WREG32(UVD_RBC_RB_RPTR, 0x0);
264 264
265 ring->wptr = ring->rptr = RREG32(UVD_RBC_RB_RPTR); 265 ring->wptr = RREG32(UVD_RBC_RB_RPTR);
266 WREG32(UVD_RBC_RB_WPTR, ring->wptr); 266 WREG32(UVD_RBC_RB_WPTR, ring->wptr);
267 267
268 /* set the ring address */ 268 /* set the ring address */
diff --git a/drivers/gpu/drm/radeon/vce_v1_0.c b/drivers/gpu/drm/radeon/vce_v1_0.c
new file mode 100644
index 000000000000..b44d9c842f7b
--- /dev/null
+++ b/drivers/gpu/drm/radeon/vce_v1_0.c
@@ -0,0 +1,187 @@
1/*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 * Authors: Christian König <christian.koenig@amd.com>
26 */
27
28#include <linux/firmware.h>
29#include <drm/drmP.h>
30#include "radeon.h"
31#include "radeon_asic.h"
32#include "sid.h"
33
34/**
35 * vce_v1_0_get_rptr - get read pointer
36 *
37 * @rdev: radeon_device pointer
38 * @ring: radeon_ring pointer
39 *
40 * Returns the current hardware read pointer
41 */
42uint32_t vce_v1_0_get_rptr(struct radeon_device *rdev,
43 struct radeon_ring *ring)
44{
45 if (ring->idx == TN_RING_TYPE_VCE1_INDEX)
46 return RREG32(VCE_RB_RPTR);
47 else
48 return RREG32(VCE_RB_RPTR2);
49}
50
51/**
52 * vce_v1_0_get_wptr - get write pointer
53 *
54 * @rdev: radeon_device pointer
55 * @ring: radeon_ring pointer
56 *
57 * Returns the current hardware write pointer
58 */
59uint32_t vce_v1_0_get_wptr(struct radeon_device *rdev,
60 struct radeon_ring *ring)
61{
62 if (ring->idx == TN_RING_TYPE_VCE1_INDEX)
63 return RREG32(VCE_RB_WPTR);
64 else
65 return RREG32(VCE_RB_WPTR2);
66}
67
68/**
69 * vce_v1_0_set_wptr - set write pointer
70 *
71 * @rdev: radeon_device pointer
72 * @ring: radeon_ring pointer
73 *
74 * Commits the write pointer to the hardware
75 */
76void vce_v1_0_set_wptr(struct radeon_device *rdev,
77 struct radeon_ring *ring)
78{
79 if (ring->idx == TN_RING_TYPE_VCE1_INDEX)
80 WREG32(VCE_RB_WPTR, ring->wptr);
81 else
82 WREG32(VCE_RB_WPTR2, ring->wptr);
83}
84
85/**
86 * vce_v1_0_start - start VCE block
87 *
88 * @rdev: radeon_device pointer
89 *
90 * Setup and start the VCE block
91 */
92int vce_v1_0_start(struct radeon_device *rdev)
93{
94 struct radeon_ring *ring;
95 int i, j, r;
96
97 /* set BUSY flag */
98 WREG32_P(VCE_STATUS, 1, ~1);
99
100 ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX];
101 WREG32(VCE_RB_RPTR, ring->wptr);
102 WREG32(VCE_RB_WPTR, ring->wptr);
103 WREG32(VCE_RB_BASE_LO, ring->gpu_addr);
104 WREG32(VCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
105 WREG32(VCE_RB_SIZE, ring->ring_size / 4);
106
107 ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX];
108 WREG32(VCE_RB_RPTR2, ring->wptr);
109 WREG32(VCE_RB_WPTR2, ring->wptr);
110 WREG32(VCE_RB_BASE_LO2, ring->gpu_addr);
111 WREG32(VCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
112 WREG32(VCE_RB_SIZE2, ring->ring_size / 4);
113
114 WREG32_P(VCE_VCPU_CNTL, VCE_CLK_EN, ~VCE_CLK_EN);
115
116 WREG32_P(VCE_SOFT_RESET,
117 VCE_ECPU_SOFT_RESET |
118 VCE_FME_SOFT_RESET, ~(
119 VCE_ECPU_SOFT_RESET |
120 VCE_FME_SOFT_RESET));
121
122 mdelay(100);
123
124 WREG32_P(VCE_SOFT_RESET, 0, ~(
125 VCE_ECPU_SOFT_RESET |
126 VCE_FME_SOFT_RESET));
127
128 for (i = 0; i < 10; ++i) {
129 uint32_t status;
130 for (j = 0; j < 100; ++j) {
131 status = RREG32(VCE_STATUS);
132 if (status & 2)
133 break;
134 mdelay(10);
135 }
136 r = 0;
137 if (status & 2)
138 break;
139
140 DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n");
141 WREG32_P(VCE_SOFT_RESET, VCE_ECPU_SOFT_RESET, ~VCE_ECPU_SOFT_RESET);
142 mdelay(10);
143 WREG32_P(VCE_SOFT_RESET, 0, ~VCE_ECPU_SOFT_RESET);
144 mdelay(10);
145 r = -1;
146 }
147
148 /* clear BUSY flag */
149 WREG32_P(VCE_STATUS, 0, ~1);
150
151 if (r) {
152 DRM_ERROR("VCE not responding, giving up!!!\n");
153 return r;
154 }
155
156 return 0;
157}
158
159int vce_v1_0_init(struct radeon_device *rdev)
160{
161 struct radeon_ring *ring;
162 int r;
163
164 r = vce_v1_0_start(rdev);
165 if (r)
166 return r;
167
168 ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX];
169 ring->ready = true;
170 r = radeon_ring_test(rdev, TN_RING_TYPE_VCE1_INDEX, ring);
171 if (r) {
172 ring->ready = false;
173 return r;
174 }
175
176 ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX];
177 ring->ready = true;
178 r = radeon_ring_test(rdev, TN_RING_TYPE_VCE2_INDEX, ring);
179 if (r) {
180 ring->ready = false;
181 return r;
182 }
183
184 DRM_INFO("VCE initialized successfully.\n");
185
186 return 0;
187}
diff --git a/drivers/gpu/drm/radeon/vce_v2_0.c b/drivers/gpu/drm/radeon/vce_v2_0.c
new file mode 100644
index 000000000000..1ac7bb825a1b
--- /dev/null
+++ b/drivers/gpu/drm/radeon/vce_v2_0.c
@@ -0,0 +1,181 @@
1/*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 * Authors: Christian König <christian.koenig@amd.com>
26 */
27
28#include <linux/firmware.h>
29#include <drm/drmP.h>
30#include "radeon.h"
31#include "radeon_asic.h"
32#include "cikd.h"
33
34static void vce_v2_0_set_sw_cg(struct radeon_device *rdev, bool gated)
35{
36 u32 tmp;
37
38 if (gated) {
39 tmp = RREG32(VCE_CLOCK_GATING_B);
40 tmp |= 0xe70000;
41 WREG32(VCE_CLOCK_GATING_B, tmp);
42
43 tmp = RREG32(VCE_UENC_CLOCK_GATING);
44 tmp |= 0xff000000;
45 WREG32(VCE_UENC_CLOCK_GATING, tmp);
46
47 tmp = RREG32(VCE_UENC_REG_CLOCK_GATING);
48 tmp &= ~0x3fc;
49 WREG32(VCE_UENC_REG_CLOCK_GATING, tmp);
50
51 WREG32(VCE_CGTT_CLK_OVERRIDE, 0);
52 } else {
53 tmp = RREG32(VCE_CLOCK_GATING_B);
54 tmp |= 0xe7;
55 tmp &= ~0xe70000;
56 WREG32(VCE_CLOCK_GATING_B, tmp);
57
58 tmp = RREG32(VCE_UENC_CLOCK_GATING);
59 tmp |= 0x1fe000;
60 tmp &= ~0xff000000;
61 WREG32(VCE_UENC_CLOCK_GATING, tmp);
62
63 tmp = RREG32(VCE_UENC_REG_CLOCK_GATING);
64 tmp |= 0x3fc;
65 WREG32(VCE_UENC_REG_CLOCK_GATING, tmp);
66 }
67}
68
69static void vce_v2_0_set_dyn_cg(struct radeon_device *rdev, bool gated)
70{
71 u32 orig, tmp;
72
73 tmp = RREG32(VCE_CLOCK_GATING_B);
74 tmp &= ~0x00060006;
75 if (gated) {
76 tmp |= 0xe10000;
77 } else {
78 tmp |= 0xe1;
79 tmp &= ~0xe10000;
80 }
81 WREG32(VCE_CLOCK_GATING_B, tmp);
82
83 orig = tmp = RREG32(VCE_UENC_CLOCK_GATING);
84 tmp &= ~0x1fe000;
85 tmp &= ~0xff000000;
86 if (tmp != orig)
87 WREG32(VCE_UENC_CLOCK_GATING, tmp);
88
89 orig = tmp = RREG32(VCE_UENC_REG_CLOCK_GATING);
90 tmp &= ~0x3fc;
91 if (tmp != orig)
92 WREG32(VCE_UENC_REG_CLOCK_GATING, tmp);
93
94 if (gated)
95 WREG32(VCE_CGTT_CLK_OVERRIDE, 0);
96}
97
98static void vce_v2_0_disable_cg(struct radeon_device *rdev)
99{
100 WREG32(VCE_CGTT_CLK_OVERRIDE, 7);
101}
102
103void vce_v2_0_enable_mgcg(struct radeon_device *rdev, bool enable)
104{
105 bool sw_cg = false;
106
107 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_VCE_MGCG)) {
108 if (sw_cg)
109 vce_v2_0_set_sw_cg(rdev, true);
110 else
111 vce_v2_0_set_dyn_cg(rdev, true);
112 } else {
113 vce_v2_0_disable_cg(rdev);
114
115 if (sw_cg)
116 vce_v2_0_set_sw_cg(rdev, false);
117 else
118 vce_v2_0_set_dyn_cg(rdev, false);
119 }
120}
121
122static void vce_v2_0_init_cg(struct radeon_device *rdev)
123{
124 u32 tmp;
125
126 tmp = RREG32(VCE_CLOCK_GATING_A);
127 tmp &= ~(CGC_CLK_GATE_DLY_TIMER_MASK | CGC_CLK_GATER_OFF_DLY_TIMER_MASK);
128 tmp |= (CGC_CLK_GATE_DLY_TIMER(0) | CGC_CLK_GATER_OFF_DLY_TIMER(4));
129 tmp |= CGC_UENC_WAIT_AWAKE;
130 WREG32(VCE_CLOCK_GATING_A, tmp);
131
132 tmp = RREG32(VCE_UENC_CLOCK_GATING);
133 tmp &= ~(CLOCK_ON_DELAY_MASK | CLOCK_OFF_DELAY_MASK);
134 tmp |= (CLOCK_ON_DELAY(0) | CLOCK_OFF_DELAY(4));
135 WREG32(VCE_UENC_CLOCK_GATING, tmp);
136
137 tmp = RREG32(VCE_CLOCK_GATING_B);
138 tmp |= 0x10;
139 tmp &= ~0x100000;
140 WREG32(VCE_CLOCK_GATING_B, tmp);
141}
142
143int vce_v2_0_resume(struct radeon_device *rdev)
144{
145 uint64_t addr = rdev->vce.gpu_addr;
146 uint32_t size;
147
148 WREG32_P(VCE_CLOCK_GATING_A, 0, ~(1 << 16));
149 WREG32_P(VCE_UENC_CLOCK_GATING, 0x1FF000, ~0xFF9FF000);
150 WREG32_P(VCE_UENC_REG_CLOCK_GATING, 0x3F, ~0x3F);
151 WREG32(VCE_CLOCK_GATING_B, 0xf7);
152
153 WREG32(VCE_LMI_CTRL, 0x00398000);
154 WREG32_P(VCE_LMI_CACHE_CTRL, 0x0, ~0x1);
155 WREG32(VCE_LMI_SWAP_CNTL, 0);
156 WREG32(VCE_LMI_SWAP_CNTL1, 0);
157 WREG32(VCE_LMI_VM_CTRL, 0);
158
159 size = RADEON_GPU_PAGE_ALIGN(rdev->vce_fw->size);
160 WREG32(VCE_VCPU_CACHE_OFFSET0, addr & 0x7fffffff);
161 WREG32(VCE_VCPU_CACHE_SIZE0, size);
162
163 addr += size;
164 size = RADEON_VCE_STACK_SIZE;
165 WREG32(VCE_VCPU_CACHE_OFFSET1, addr & 0x7fffffff);
166 WREG32(VCE_VCPU_CACHE_SIZE1, size);
167
168 addr += size;
169 size = RADEON_VCE_HEAP_SIZE;
170 WREG32(VCE_VCPU_CACHE_OFFSET2, addr & 0x7fffffff);
171 WREG32(VCE_VCPU_CACHE_SIZE2, size);
172
173 WREG32_P(VCE_LMI_CTRL2, 0x0, ~0x100);
174
175 WREG32_P(VCE_SYS_INT_EN, VCE_SYS_INT_TRAP_INTERRUPT_EN,
176 ~VCE_SYS_INT_TRAP_INTERRUPT_EN);
177
178 vce_v2_0_init_cg(rdev);
179
180 return 0;
181}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
index fbf4be316d0b..299267db2898 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
@@ -299,7 +299,7 @@ static void rcar_du_crtc_update_base(struct rcar_du_crtc *rcrtc)
299{ 299{
300 struct drm_crtc *crtc = &rcrtc->crtc; 300 struct drm_crtc *crtc = &rcrtc->crtc;
301 301
302 rcar_du_plane_compute_base(rcrtc->plane, crtc->fb); 302 rcar_du_plane_compute_base(rcrtc->plane, crtc->primary->fb);
303 rcar_du_plane_update_base(rcrtc->plane); 303 rcar_du_plane_update_base(rcrtc->plane);
304} 304}
305 305
@@ -358,10 +358,10 @@ static int rcar_du_crtc_mode_set(struct drm_crtc *crtc,
358 const struct rcar_du_format_info *format; 358 const struct rcar_du_format_info *format;
359 int ret; 359 int ret;
360 360
361 format = rcar_du_format_info(crtc->fb->pixel_format); 361 format = rcar_du_format_info(crtc->primary->fb->pixel_format);
362 if (format == NULL) { 362 if (format == NULL) {
363 dev_dbg(rcdu->dev, "mode_set: unsupported format %08x\n", 363 dev_dbg(rcdu->dev, "mode_set: unsupported format %08x\n",
364 crtc->fb->pixel_format); 364 crtc->primary->fb->pixel_format);
365 ret = -EINVAL; 365 ret = -EINVAL;
366 goto error; 366 goto error;
367 } 367 }
@@ -377,7 +377,7 @@ static int rcar_du_crtc_mode_set(struct drm_crtc *crtc,
377 rcrtc->plane->width = mode->hdisplay; 377 rcrtc->plane->width = mode->hdisplay;
378 rcrtc->plane->height = mode->vdisplay; 378 rcrtc->plane->height = mode->vdisplay;
379 379
380 rcar_du_plane_compute_base(rcrtc->plane, crtc->fb); 380 rcar_du_plane_compute_base(rcrtc->plane, crtc->primary->fb);
381 381
382 rcrtc->outputs = 0; 382 rcrtc->outputs = 0;
383 383
@@ -510,7 +510,7 @@ static int rcar_du_crtc_page_flip(struct drm_crtc *crtc,
510 } 510 }
511 spin_unlock_irqrestore(&dev->event_lock, flags); 511 spin_unlock_irqrestore(&dev->event_lock, flags);
512 512
513 crtc->fb = fb; 513 crtc->primary->fb = fb;
514 rcar_du_crtc_update_base(rcrtc); 514 rcar_du_crtc_update_base(rcrtc);
515 515
516 if (event) { 516 if (event) {
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index fbeabd9a281f..a87edfac111f 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -248,7 +248,10 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu)
248 continue; 248 continue;
249 } 249 }
250 250
251 rcar_du_encoder_init(rcdu, pdata->type, pdata->output, pdata); 251 ret = rcar_du_encoder_init(rcdu, pdata->type, pdata->output,
252 pdata);
253 if (ret < 0)
254 return ret;
252 } 255 }
253 256
254 /* Set the possible CRTCs and possible clones. There's always at least 257 /* Set the possible CRTCs and possible clones. There's always at least
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
index 0428076f1ce8..e9e5e6d368cc 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
@@ -173,7 +173,7 @@ static void shmob_drm_crtc_start(struct shmob_drm_crtc *scrtc)
173 if (scrtc->started) 173 if (scrtc->started)
174 return; 174 return;
175 175
176 format = shmob_drm_format_info(crtc->fb->pixel_format); 176 format = shmob_drm_format_info(crtc->primary->fb->pixel_format);
177 if (WARN_ON(format == NULL)) 177 if (WARN_ON(format == NULL))
178 return; 178 return;
179 179
@@ -247,7 +247,7 @@ static void shmob_drm_crtc_start(struct shmob_drm_crtc *scrtc)
247 lcdc_write(sdev, LDDDSR, value); 247 lcdc_write(sdev, LDDDSR, value);
248 248
249 /* Setup planes. */ 249 /* Setup planes. */
250 list_for_each_entry(plane, &dev->mode_config.plane_list, head) { 250 drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
251 if (plane->crtc == crtc) 251 if (plane->crtc == crtc)
252 shmob_drm_plane_setup(plane); 252 shmob_drm_plane_setup(plane);
253 } 253 }
@@ -303,7 +303,7 @@ static void shmob_drm_crtc_compute_base(struct shmob_drm_crtc *scrtc,
303 int x, int y) 303 int x, int y)
304{ 304{
305 struct drm_crtc *crtc = &scrtc->crtc; 305 struct drm_crtc *crtc = &scrtc->crtc;
306 struct drm_framebuffer *fb = crtc->fb; 306 struct drm_framebuffer *fb = crtc->primary->fb;
307 struct shmob_drm_device *sdev = crtc->dev->dev_private; 307 struct shmob_drm_device *sdev = crtc->dev->dev_private;
308 struct drm_gem_cma_object *gem; 308 struct drm_gem_cma_object *gem;
309 unsigned int bpp; 309 unsigned int bpp;
@@ -382,15 +382,15 @@ static int shmob_drm_crtc_mode_set(struct drm_crtc *crtc,
382 const struct shmob_drm_format_info *format; 382 const struct shmob_drm_format_info *format;
383 void *cache; 383 void *cache;
384 384
385 format = shmob_drm_format_info(crtc->fb->pixel_format); 385 format = shmob_drm_format_info(crtc->primary->fb->pixel_format);
386 if (format == NULL) { 386 if (format == NULL) {
387 dev_dbg(sdev->dev, "mode_set: unsupported format %08x\n", 387 dev_dbg(sdev->dev, "mode_set: unsupported format %08x\n",
388 crtc->fb->pixel_format); 388 crtc->primary->fb->pixel_format);
389 return -EINVAL; 389 return -EINVAL;
390 } 390 }
391 391
392 scrtc->format = format; 392 scrtc->format = format;
393 scrtc->line_size = crtc->fb->pitches[0]; 393 scrtc->line_size = crtc->primary->fb->pitches[0];
394 394
395 if (sdev->meram) { 395 if (sdev->meram) {
396 /* Enable MERAM cache if configured. We need to de-init 396 /* Enable MERAM cache if configured. We need to de-init
@@ -402,7 +402,7 @@ static int shmob_drm_crtc_mode_set(struct drm_crtc *crtc,
402 } 402 }
403 403
404 cache = sh_mobile_meram_cache_alloc(sdev->meram, mdata, 404 cache = sh_mobile_meram_cache_alloc(sdev->meram, mdata,
405 crtc->fb->pitches[0], 405 crtc->primary->fb->pitches[0],
406 adjusted_mode->vdisplay, 406 adjusted_mode->vdisplay,
407 format->meram, 407 format->meram,
408 &scrtc->line_size); 408 &scrtc->line_size);
@@ -489,7 +489,7 @@ static int shmob_drm_crtc_page_flip(struct drm_crtc *crtc,
489 } 489 }
490 spin_unlock_irqrestore(&dev->event_lock, flags); 490 spin_unlock_irqrestore(&dev->event_lock, flags);
491 491
492 crtc->fb = fb; 492 crtc->primary->fb = fb;
493 shmob_drm_crtc_update_base(scrtc); 493 shmob_drm_crtc_update_base(scrtc);
494 494
495 if (event) { 495 if (event) {
diff --git a/drivers/gpu/drm/tegra/Makefile b/drivers/gpu/drm/tegra/Makefile
index 8d220afbd85f..d43f21bb4596 100644
--- a/drivers/gpu/drm/tegra/Makefile
+++ b/drivers/gpu/drm/tegra/Makefile
@@ -11,6 +11,8 @@ tegra-drm-y := \
11 hdmi.o \ 11 hdmi.o \
12 mipi-phy.o \ 12 mipi-phy.o \
13 dsi.o \ 13 dsi.o \
14 sor.o \
15 dpaux.o \
14 gr2d.o \ 16 gr2d.o \
15 gr3d.o 17 gr3d.o
16 18
diff --git a/drivers/gpu/drm/tegra/bus.c b/drivers/gpu/drm/tegra/bus.c
index e38e5967d77b..71cef5c13dc8 100644
--- a/drivers/gpu/drm/tegra/bus.c
+++ b/drivers/gpu/drm/tegra/bus.c
@@ -63,7 +63,7 @@ int drm_host1x_init(struct drm_driver *driver, struct host1x_device *device)
63 return 0; 63 return 0;
64 64
65err_free: 65err_free:
66 drm_dev_free(drm); 66 drm_dev_unref(drm);
67 return ret; 67 return ret;
68} 68}
69 69
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index 9336006b475d..36c717af6cf9 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -235,14 +235,14 @@ static void tegra_dc_finish_page_flip(struct tegra_dc *dc)
235 if (!dc->event) 235 if (!dc->event)
236 return; 236 return;
237 237
238 bo = tegra_fb_get_plane(crtc->fb, 0); 238 bo = tegra_fb_get_plane(crtc->primary->fb, 0);
239 239
240 /* check if new start address has been latched */ 240 /* check if new start address has been latched */
241 tegra_dc_writel(dc, READ_MUX, DC_CMD_STATE_ACCESS); 241 tegra_dc_writel(dc, READ_MUX, DC_CMD_STATE_ACCESS);
242 base = tegra_dc_readl(dc, DC_WINBUF_START_ADDR); 242 base = tegra_dc_readl(dc, DC_WINBUF_START_ADDR);
243 tegra_dc_writel(dc, 0, DC_CMD_STATE_ACCESS); 243 tegra_dc_writel(dc, 0, DC_CMD_STATE_ACCESS);
244 244
245 if (base == bo->paddr + crtc->fb->offsets[0]) { 245 if (base == bo->paddr + crtc->primary->fb->offsets[0]) {
246 spin_lock_irqsave(&drm->event_lock, flags); 246 spin_lock_irqsave(&drm->event_lock, flags);
247 drm_send_vblank_event(drm, dc->pipe, dc->event); 247 drm_send_vblank_event(drm, dc->pipe, dc->event);
248 drm_vblank_put(drm, dc->pipe); 248 drm_vblank_put(drm, dc->pipe);
@@ -284,7 +284,7 @@ static int tegra_dc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
284 } 284 }
285 285
286 tegra_dc_set_base(dc, 0, 0, fb); 286 tegra_dc_set_base(dc, 0, 0, fb);
287 crtc->fb = fb; 287 crtc->primary->fb = fb;
288 288
289 return 0; 289 return 0;
290} 290}
@@ -645,7 +645,7 @@ static int tegra_crtc_mode_set(struct drm_crtc *crtc,
645 struct drm_display_mode *adjusted, 645 struct drm_display_mode *adjusted,
646 int x, int y, struct drm_framebuffer *old_fb) 646 int x, int y, struct drm_framebuffer *old_fb)
647{ 647{
648 struct tegra_bo *bo = tegra_fb_get_plane(crtc->fb, 0); 648 struct tegra_bo *bo = tegra_fb_get_plane(crtc->primary->fb, 0);
649 struct tegra_dc *dc = to_tegra_dc(crtc); 649 struct tegra_dc *dc = to_tegra_dc(crtc);
650 struct tegra_dc_window window; 650 struct tegra_dc_window window;
651 unsigned long div, value; 651 unsigned long div, value;
@@ -682,9 +682,9 @@ static int tegra_crtc_mode_set(struct drm_crtc *crtc,
682 window.dst.y = 0; 682 window.dst.y = 0;
683 window.dst.w = mode->hdisplay; 683 window.dst.w = mode->hdisplay;
684 window.dst.h = mode->vdisplay; 684 window.dst.h = mode->vdisplay;
685 window.format = tegra_dc_format(crtc->fb->pixel_format); 685 window.format = tegra_dc_format(crtc->primary->fb->pixel_format);
686 window.bits_per_pixel = crtc->fb->bits_per_pixel; 686 window.bits_per_pixel = crtc->primary->fb->bits_per_pixel;
687 window.stride[0] = crtc->fb->pitches[0]; 687 window.stride[0] = crtc->primary->fb->pitches[0];
688 window.base[0] = bo->paddr; 688 window.base[0] = bo->paddr;
689 689
690 err = tegra_dc_setup_window(dc, 0, &window); 690 err = tegra_dc_setup_window(dc, 0, &window);
@@ -699,7 +699,7 @@ static int tegra_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
699{ 699{
700 struct tegra_dc *dc = to_tegra_dc(crtc); 700 struct tegra_dc *dc = to_tegra_dc(crtc);
701 701
702 return tegra_dc_set_base(dc, x, y, crtc->fb); 702 return tegra_dc_set_base(dc, x, y, crtc->primary->fb);
703} 703}
704 704
705static void tegra_crtc_prepare(struct drm_crtc *crtc) 705static void tegra_crtc_prepare(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/tegra/dc.h b/drivers/gpu/drm/tegra/dc.h
index 3c2c0ea1cd87..c94101494826 100644
--- a/drivers/gpu/drm/tegra/dc.h
+++ b/drivers/gpu/drm/tegra/dc.h
@@ -118,6 +118,7 @@
118#define DC_DISP_DISP_WIN_OPTIONS 0x402 118#define DC_DISP_DISP_WIN_OPTIONS 0x402
119#define HDMI_ENABLE (1 << 30) 119#define HDMI_ENABLE (1 << 30)
120#define DSI_ENABLE (1 << 29) 120#define DSI_ENABLE (1 << 29)
121#define SOR_ENABLE (1 << 25)
121 122
122#define DC_DISP_DISP_MEM_HIGH_PRIORITY 0x403 123#define DC_DISP_DISP_MEM_HIGH_PRIORITY 0x403
123#define CURSOR_THRESHOLD(x) (((x) & 0x03) << 24) 124#define CURSOR_THRESHOLD(x) (((x) & 0x03) << 24)
diff --git a/drivers/gpu/drm/tegra/dpaux.c b/drivers/gpu/drm/tegra/dpaux.c
new file mode 100644
index 000000000000..d536ed381fbd
--- /dev/null
+++ b/drivers/gpu/drm/tegra/dpaux.c
@@ -0,0 +1,544 @@
1/*
2 * Copyright (C) 2013 NVIDIA Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#include <linux/clk.h>
10#include <linux/delay.h>
11#include <linux/gpio.h>
12#include <linux/interrupt.h>
13#include <linux/io.h>
14#include <linux/of_gpio.h>
15#include <linux/platform_device.h>
16#include <linux/reset.h>
17#include <linux/regulator/consumer.h>
18
19#include <drm/drm_dp_helper.h>
20#include <drm/drm_panel.h>
21
22#include "dpaux.h"
23#include "drm.h"
24
25static DEFINE_MUTEX(dpaux_lock);
26static LIST_HEAD(dpaux_list);
27
28struct tegra_dpaux {
29 struct drm_dp_aux aux;
30 struct device *dev;
31
32 void __iomem *regs;
33 int irq;
34
35 struct tegra_output *output;
36
37 struct reset_control *rst;
38 struct clk *clk_parent;
39 struct clk *clk;
40
41 struct regulator *vdd;
42
43 struct completion complete;
44 struct list_head list;
45};
46
47static inline struct tegra_dpaux *to_dpaux(struct drm_dp_aux *aux)
48{
49 return container_of(aux, struct tegra_dpaux, aux);
50}
51
52static inline unsigned long tegra_dpaux_readl(struct tegra_dpaux *dpaux,
53 unsigned long offset)
54{
55 return readl(dpaux->regs + (offset << 2));
56}
57
58static inline void tegra_dpaux_writel(struct tegra_dpaux *dpaux,
59 unsigned long value,
60 unsigned long offset)
61{
62 writel(value, dpaux->regs + (offset << 2));
63}
64
65static void tegra_dpaux_write_fifo(struct tegra_dpaux *dpaux, const u8 *buffer,
66 size_t size)
67{
68 unsigned long offset = DPAUX_DP_AUXDATA_WRITE(0);
69 size_t i, j;
70
71 for (i = 0; i < size; i += 4) {
72 size_t num = min_t(size_t, size - i, 4);
73 unsigned long value = 0;
74
75 for (j = 0; j < num; j++)
76 value |= buffer[i + j] << (j * 8);
77
78 tegra_dpaux_writel(dpaux, value, offset++);
79 }
80}
81
82static void tegra_dpaux_read_fifo(struct tegra_dpaux *dpaux, u8 *buffer,
83 size_t size)
84{
85 unsigned long offset = DPAUX_DP_AUXDATA_READ(0);
86 size_t i, j;
87
88 for (i = 0; i < size; i += 4) {
89 size_t num = min_t(size_t, size - i, 4);
90 unsigned long value;
91
92 value = tegra_dpaux_readl(dpaux, offset++);
93
94 for (j = 0; j < num; j++)
95 buffer[i + j] = value >> (j * 8);
96 }
97}
98
99static ssize_t tegra_dpaux_transfer(struct drm_dp_aux *aux,
100 struct drm_dp_aux_msg *msg)
101{
102 unsigned long value = DPAUX_DP_AUXCTL_TRANSACTREQ;
103 unsigned long timeout = msecs_to_jiffies(250);
104 struct tegra_dpaux *dpaux = to_dpaux(aux);
105 unsigned long status;
106 ssize_t ret = 0;
107
108 if (msg->size < 1 || msg->size > 16)
109 return -EINVAL;
110
111 tegra_dpaux_writel(dpaux, msg->address, DPAUX_DP_AUXADDR);
112
113 switch (msg->request & ~DP_AUX_I2C_MOT) {
114 case DP_AUX_I2C_WRITE:
115 if (msg->request & DP_AUX_I2C_MOT)
116 value = DPAUX_DP_AUXCTL_CMD_MOT_WR;
117 else
118 value = DPAUX_DP_AUXCTL_CMD_I2C_WR;
119
120 break;
121
122 case DP_AUX_I2C_READ:
123 if (msg->request & DP_AUX_I2C_MOT)
124 value = DPAUX_DP_AUXCTL_CMD_MOT_RD;
125 else
126 value = DPAUX_DP_AUXCTL_CMD_I2C_RD;
127
128 break;
129
130 case DP_AUX_I2C_STATUS:
131 if (msg->request & DP_AUX_I2C_MOT)
132 value = DPAUX_DP_AUXCTL_CMD_MOT_RQ;
133 else
134 value = DPAUX_DP_AUXCTL_CMD_I2C_RQ;
135
136 break;
137
138 case DP_AUX_NATIVE_WRITE:
139 value = DPAUX_DP_AUXCTL_CMD_AUX_WR;
140 break;
141
142 case DP_AUX_NATIVE_READ:
143 value = DPAUX_DP_AUXCTL_CMD_AUX_RD;
144 break;
145
146 default:
147 return -EINVAL;
148 }
149
150 value |= DPAUX_DP_AUXCTL_CMDLEN(msg->size - 1);
151 tegra_dpaux_writel(dpaux, value, DPAUX_DP_AUXCTL);
152
153 if ((msg->request & DP_AUX_I2C_READ) == 0) {
154 tegra_dpaux_write_fifo(dpaux, msg->buffer, msg->size);
155 ret = msg->size;
156 }
157
158 /* start transaction */
159 value = tegra_dpaux_readl(dpaux, DPAUX_DP_AUXCTL);
160 value |= DPAUX_DP_AUXCTL_TRANSACTREQ;
161 tegra_dpaux_writel(dpaux, value, DPAUX_DP_AUXCTL);
162
163 status = wait_for_completion_timeout(&dpaux->complete, timeout);
164 if (!status)
165 return -ETIMEDOUT;
166
167 /* read status and clear errors */
168 value = tegra_dpaux_readl(dpaux, DPAUX_DP_AUXSTAT);
169 tegra_dpaux_writel(dpaux, 0xf00, DPAUX_DP_AUXSTAT);
170
171 if (value & DPAUX_DP_AUXSTAT_TIMEOUT_ERROR)
172 return -ETIMEDOUT;
173
174 if ((value & DPAUX_DP_AUXSTAT_RX_ERROR) ||
175 (value & DPAUX_DP_AUXSTAT_SINKSTAT_ERROR) ||
176 (value & DPAUX_DP_AUXSTAT_NO_STOP_ERROR))
177 return -EIO;
178
179 switch ((value & DPAUX_DP_AUXSTAT_REPLY_TYPE_MASK) >> 16) {
180 case 0x00:
181 msg->reply = DP_AUX_NATIVE_REPLY_ACK;
182 break;
183
184 case 0x01:
185 msg->reply = DP_AUX_NATIVE_REPLY_NACK;
186 break;
187
188 case 0x02:
189 msg->reply = DP_AUX_NATIVE_REPLY_DEFER;
190 break;
191
192 case 0x04:
193 msg->reply = DP_AUX_I2C_REPLY_NACK;
194 break;
195
196 case 0x08:
197 msg->reply = DP_AUX_I2C_REPLY_DEFER;
198 break;
199 }
200
201 if (msg->reply == DP_AUX_NATIVE_REPLY_ACK) {
202 if (msg->request & DP_AUX_I2C_READ) {
203 size_t count = value & DPAUX_DP_AUXSTAT_REPLY_MASK;
204
205 if (WARN_ON(count != msg->size))
206 count = min_t(size_t, count, msg->size);
207
208 tegra_dpaux_read_fifo(dpaux, msg->buffer, count);
209 ret = count;
210 }
211 }
212
213 return ret;
214}
215
216static irqreturn_t tegra_dpaux_irq(int irq, void *data)
217{
218 struct tegra_dpaux *dpaux = data;
219 irqreturn_t ret = IRQ_HANDLED;
220 unsigned long value;
221
222 /* clear interrupts */
223 value = tegra_dpaux_readl(dpaux, DPAUX_INTR_AUX);
224 tegra_dpaux_writel(dpaux, value, DPAUX_INTR_AUX);
225
226 if (value & DPAUX_INTR_PLUG_EVENT) {
227 if (dpaux->output) {
228 drm_helper_hpd_irq_event(dpaux->output->connector.dev);
229 }
230 }
231
232 if (value & DPAUX_INTR_UNPLUG_EVENT) {
233 if (dpaux->output)
234 drm_helper_hpd_irq_event(dpaux->output->connector.dev);
235 }
236
237 if (value & DPAUX_INTR_IRQ_EVENT) {
238 /* TODO: handle this */
239 }
240
241 if (value & DPAUX_INTR_AUX_DONE)
242 complete(&dpaux->complete);
243
244 return ret;
245}
246
247static int tegra_dpaux_probe(struct platform_device *pdev)
248{
249 struct tegra_dpaux *dpaux;
250 struct resource *regs;
251 unsigned long value;
252 int err;
253
254 dpaux = devm_kzalloc(&pdev->dev, sizeof(*dpaux), GFP_KERNEL);
255 if (!dpaux)
256 return -ENOMEM;
257
258 init_completion(&dpaux->complete);
259 INIT_LIST_HEAD(&dpaux->list);
260 dpaux->dev = &pdev->dev;
261
262 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
263 dpaux->regs = devm_ioremap_resource(&pdev->dev, regs);
264 if (IS_ERR(dpaux->regs))
265 return PTR_ERR(dpaux->regs);
266
267 dpaux->irq = platform_get_irq(pdev, 0);
268 if (dpaux->irq < 0) {
269 dev_err(&pdev->dev, "failed to get IRQ\n");
270 return -ENXIO;
271 }
272
273 dpaux->rst = devm_reset_control_get(&pdev->dev, "dpaux");
274 if (IS_ERR(dpaux->rst))
275 return PTR_ERR(dpaux->rst);
276
277 dpaux->clk = devm_clk_get(&pdev->dev, NULL);
278 if (IS_ERR(dpaux->clk))
279 return PTR_ERR(dpaux->clk);
280
281 err = clk_prepare_enable(dpaux->clk);
282 if (err < 0)
283 return err;
284
285 reset_control_deassert(dpaux->rst);
286
287 dpaux->clk_parent = devm_clk_get(&pdev->dev, "parent");
288 if (IS_ERR(dpaux->clk_parent))
289 return PTR_ERR(dpaux->clk_parent);
290
291 err = clk_prepare_enable(dpaux->clk_parent);
292 if (err < 0)
293 return err;
294
295 err = clk_set_rate(dpaux->clk_parent, 270000000);
296 if (err < 0) {
297 dev_err(&pdev->dev, "failed to set clock to 270 MHz: %d\n",
298 err);
299 return err;
300 }
301
302 dpaux->vdd = devm_regulator_get(&pdev->dev, "vdd");
303 if (IS_ERR(dpaux->vdd))
304 return PTR_ERR(dpaux->vdd);
305
306 err = devm_request_irq(dpaux->dev, dpaux->irq, tegra_dpaux_irq, 0,
307 dev_name(dpaux->dev), dpaux);
308 if (err < 0) {
309 dev_err(dpaux->dev, "failed to request IRQ#%u: %d\n",
310 dpaux->irq, err);
311 return err;
312 }
313
314 dpaux->aux.transfer = tegra_dpaux_transfer;
315 dpaux->aux.dev = &pdev->dev;
316
317 err = drm_dp_aux_register_i2c_bus(&dpaux->aux);
318 if (err < 0)
319 return err;
320
321 /* enable and clear all interrupts */
322 value = DPAUX_INTR_AUX_DONE | DPAUX_INTR_IRQ_EVENT |
323 DPAUX_INTR_UNPLUG_EVENT | DPAUX_INTR_PLUG_EVENT;
324 tegra_dpaux_writel(dpaux, value, DPAUX_INTR_EN_AUX);
325 tegra_dpaux_writel(dpaux, value, DPAUX_INTR_AUX);
326
327 mutex_lock(&dpaux_lock);
328 list_add_tail(&dpaux->list, &dpaux_list);
329 mutex_unlock(&dpaux_lock);
330
331 platform_set_drvdata(pdev, dpaux);
332
333 return 0;
334}
335
336static int tegra_dpaux_remove(struct platform_device *pdev)
337{
338 struct tegra_dpaux *dpaux = platform_get_drvdata(pdev);
339
340 drm_dp_aux_unregister_i2c_bus(&dpaux->aux);
341
342 mutex_lock(&dpaux_lock);
343 list_del(&dpaux->list);
344 mutex_unlock(&dpaux_lock);
345
346 clk_disable_unprepare(dpaux->clk_parent);
347 reset_control_assert(dpaux->rst);
348 clk_disable_unprepare(dpaux->clk);
349
350 return 0;
351}
352
353static const struct of_device_id tegra_dpaux_of_match[] = {
354 { .compatible = "nvidia,tegra124-dpaux", },
355 { },
356};
357
358struct platform_driver tegra_dpaux_driver = {
359 .driver = {
360 .name = "tegra-dpaux",
361 .of_match_table = tegra_dpaux_of_match,
362 },
363 .probe = tegra_dpaux_probe,
364 .remove = tegra_dpaux_remove,
365};
366
367struct tegra_dpaux *tegra_dpaux_find_by_of_node(struct device_node *np)
368{
369 struct tegra_dpaux *dpaux;
370
371 mutex_lock(&dpaux_lock);
372
373 list_for_each_entry(dpaux, &dpaux_list, list)
374 if (np == dpaux->dev->of_node) {
375 mutex_unlock(&dpaux_lock);
376 return dpaux;
377 }
378
379 mutex_unlock(&dpaux_lock);
380
381 return NULL;
382}
383
384int tegra_dpaux_attach(struct tegra_dpaux *dpaux, struct tegra_output *output)
385{
386 unsigned long timeout;
387 int err;
388
389 dpaux->output = output;
390
391 err = regulator_enable(dpaux->vdd);
392 if (err < 0)
393 return err;
394
395 timeout = jiffies + msecs_to_jiffies(250);
396
397 while (time_before(jiffies, timeout)) {
398 enum drm_connector_status status;
399
400 status = tegra_dpaux_detect(dpaux);
401 if (status == connector_status_connected)
402 return 0;
403
404 usleep_range(1000, 2000);
405 }
406
407 return -ETIMEDOUT;
408}
409
410int tegra_dpaux_detach(struct tegra_dpaux *dpaux)
411{
412 unsigned long timeout;
413 int err;
414
415 err = regulator_disable(dpaux->vdd);
416 if (err < 0)
417 return err;
418
419 timeout = jiffies + msecs_to_jiffies(250);
420
421 while (time_before(jiffies, timeout)) {
422 enum drm_connector_status status;
423
424 status = tegra_dpaux_detect(dpaux);
425 if (status == connector_status_disconnected) {
426 dpaux->output = NULL;
427 return 0;
428 }
429
430 usleep_range(1000, 2000);
431 }
432
433 return -ETIMEDOUT;
434}
435
436enum drm_connector_status tegra_dpaux_detect(struct tegra_dpaux *dpaux)
437{
438 unsigned long value;
439
440 value = tegra_dpaux_readl(dpaux, DPAUX_DP_AUXSTAT);
441
442 if (value & DPAUX_DP_AUXSTAT_HPD_STATUS)
443 return connector_status_connected;
444
445 return connector_status_disconnected;
446}
447
448int tegra_dpaux_enable(struct tegra_dpaux *dpaux)
449{
450 unsigned long value;
451
452 value = DPAUX_HYBRID_PADCTL_AUX_CMH(2) |
453 DPAUX_HYBRID_PADCTL_AUX_DRVZ(4) |
454 DPAUX_HYBRID_PADCTL_AUX_DRVI(0x18) |
455 DPAUX_HYBRID_PADCTL_AUX_INPUT_RCV |
456 DPAUX_HYBRID_PADCTL_MODE_AUX;
457 tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_PADCTL);
458
459 value = tegra_dpaux_readl(dpaux, DPAUX_HYBRID_SPARE);
460 value &= ~DPAUX_HYBRID_SPARE_PAD_POWER_DOWN;
461 tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_SPARE);
462
463 return 0;
464}
465
466int tegra_dpaux_disable(struct tegra_dpaux *dpaux)
467{
468 unsigned long value;
469
470 value = tegra_dpaux_readl(dpaux, DPAUX_HYBRID_SPARE);
471 value |= DPAUX_HYBRID_SPARE_PAD_POWER_DOWN;
472 tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_SPARE);
473
474 return 0;
475}
476
477int tegra_dpaux_prepare(struct tegra_dpaux *dpaux, u8 encoding)
478{
479 int err;
480
481 err = drm_dp_dpcd_writeb(&dpaux->aux, DP_MAIN_LINK_CHANNEL_CODING_SET,
482 encoding);
483 if (err < 0)
484 return err;
485
486 return 0;
487}
488
489int tegra_dpaux_train(struct tegra_dpaux *dpaux, struct drm_dp_link *link,
490 u8 pattern)
491{
492 u8 tp = pattern & DP_TRAINING_PATTERN_MASK;
493 u8 status[DP_LINK_STATUS_SIZE], values[4];
494 unsigned int i;
495 int err;
496
497 err = drm_dp_dpcd_writeb(&dpaux->aux, DP_TRAINING_PATTERN_SET, pattern);
498 if (err < 0)
499 return err;
500
501 if (tp == DP_TRAINING_PATTERN_DISABLE)
502 return 0;
503
504 for (i = 0; i < link->num_lanes; i++)
505 values[i] = DP_TRAIN_MAX_PRE_EMPHASIS_REACHED |
506 DP_TRAIN_PRE_EMPHASIS_0 |
507 DP_TRAIN_MAX_SWING_REACHED |
508 DP_TRAIN_VOLTAGE_SWING_400;
509
510 err = drm_dp_dpcd_write(&dpaux->aux, DP_TRAINING_LANE0_SET, values,
511 link->num_lanes);
512 if (err < 0)
513 return err;
514
515 usleep_range(500, 1000);
516
517 err = drm_dp_dpcd_read_link_status(&dpaux->aux, status);
518 if (err < 0)
519 return err;
520
521 switch (tp) {
522 case DP_TRAINING_PATTERN_1:
523 if (!drm_dp_clock_recovery_ok(status, link->num_lanes))
524 return -EAGAIN;
525
526 break;
527
528 case DP_TRAINING_PATTERN_2:
529 if (!drm_dp_channel_eq_ok(status, link->num_lanes))
530 return -EAGAIN;
531
532 break;
533
534 default:
535 dev_err(dpaux->dev, "unsupported training pattern %u\n", tp);
536 return -EINVAL;
537 }
538
539 err = drm_dp_dpcd_writeb(&dpaux->aux, DP_EDP_CONFIGURATION_SET, 0);
540 if (err < 0)
541 return err;
542
543 return 0;
544}
diff --git a/drivers/gpu/drm/tegra/dpaux.h b/drivers/gpu/drm/tegra/dpaux.h
new file mode 100644
index 000000000000..4f5bf10fdff9
--- /dev/null
+++ b/drivers/gpu/drm/tegra/dpaux.h
@@ -0,0 +1,73 @@
1/*
2 * Copyright (C) 2013 NVIDIA Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef DRM_TEGRA_DPAUX_H
10#define DRM_TEGRA_DPAUX_H
11
12#define DPAUX_CTXSW 0x00
13
14#define DPAUX_INTR_EN_AUX 0x01
15#define DPAUX_INTR_AUX 0x05
16#define DPAUX_INTR_AUX_DONE (1 << 3)
17#define DPAUX_INTR_IRQ_EVENT (1 << 2)
18#define DPAUX_INTR_UNPLUG_EVENT (1 << 1)
19#define DPAUX_INTR_PLUG_EVENT (1 << 0)
20
21#define DPAUX_DP_AUXDATA_WRITE(x) (0x09 + ((x) << 2))
22#define DPAUX_DP_AUXDATA_READ(x) (0x19 + ((x) << 2))
23#define DPAUX_DP_AUXADDR 0x29
24
25#define DPAUX_DP_AUXCTL 0x2d
26#define DPAUX_DP_AUXCTL_TRANSACTREQ (1 << 16)
27#define DPAUX_DP_AUXCTL_CMD_AUX_RD (9 << 12)
28#define DPAUX_DP_AUXCTL_CMD_AUX_WR (8 << 12)
29#define DPAUX_DP_AUXCTL_CMD_MOT_RQ (6 << 12)
30#define DPAUX_DP_AUXCTL_CMD_MOT_RD (5 << 12)
31#define DPAUX_DP_AUXCTL_CMD_MOT_WR (4 << 12)
32#define DPAUX_DP_AUXCTL_CMD_I2C_RQ (2 << 12)
33#define DPAUX_DP_AUXCTL_CMD_I2C_RD (1 << 12)
34#define DPAUX_DP_AUXCTL_CMD_I2C_WR (0 << 12)
35#define DPAUX_DP_AUXCTL_CMDLEN(x) ((x) & 0xff)
36
37#define DPAUX_DP_AUXSTAT 0x31
38#define DPAUX_DP_AUXSTAT_HPD_STATUS (1 << 28)
39#define DPAUX_DP_AUXSTAT_REPLY_TYPE_MASK (0xf0000)
40#define DPAUX_DP_AUXSTAT_NO_STOP_ERROR (1 << 11)
41#define DPAUX_DP_AUXSTAT_SINKSTAT_ERROR (1 << 10)
42#define DPAUX_DP_AUXSTAT_RX_ERROR (1 << 9)
43#define DPAUX_DP_AUXSTAT_TIMEOUT_ERROR (1 << 8)
44#define DPAUX_DP_AUXSTAT_REPLY_MASK (0xff)
45
46#define DPAUX_DP_AUX_SINKSTAT_LO 0x35
47#define DPAUX_DP_AUX_SINKSTAT_HI 0x39
48
49#define DPAUX_HPD_CONFIG 0x3d
50#define DPAUX_HPD_CONFIG_UNPLUG_MIN_TIME(x) (((x) & 0xffff) << 16)
51#define DPAUX_HPD_CONFIG_PLUG_MIN_TIME(x) ((x) & 0xffff)
52
53#define DPAUX_HPD_IRQ_CONFIG 0x41
54#define DPAUX_HPD_IRQ_CONFIG_MIN_LOW_TIME(x) ((x) & 0xffff)
55
56#define DPAUX_DP_AUX_CONFIG 0x45
57
58#define DPAUX_HYBRID_PADCTL 0x49
59#define DPAUX_HYBRID_PADCTL_AUX_CMH(x) (((x) & 0x3) << 12)
60#define DPAUX_HYBRID_PADCTL_AUX_DRVZ(x) (((x) & 0x7) << 8)
61#define DPAUX_HYBRID_PADCTL_AUX_DRVI(x) (((x) & 0x3f) << 2)
62#define DPAUX_HYBRID_PADCTL_AUX_INPUT_RCV (1 << 1)
63#define DPAUX_HYBRID_PADCTL_MODE_I2C (1 << 0)
64#define DPAUX_HYBRID_PADCTL_MODE_AUX (0 << 0)
65
66#define DPAUX_HYBRID_SPARE 0x4d
67#define DPAUX_HYBRID_SPARE_PAD_POWER_DOWN (1 << 0)
68
69#define DPAUX_SCRATCH_REG0 0x51
70#define DPAUX_SCRATCH_REG1 0x55
71#define DPAUX_SCRATCH_REG2 0x59
72
73#endif
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index c71594754f46..6f5b6e2f552e 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -665,6 +665,7 @@ static const struct of_device_id host1x_drm_subdevs[] = {
665 { .compatible = "nvidia,tegra114-hdmi", }, 665 { .compatible = "nvidia,tegra114-hdmi", },
666 { .compatible = "nvidia,tegra114-gr3d", }, 666 { .compatible = "nvidia,tegra114-gr3d", },
667 { .compatible = "nvidia,tegra124-dc", }, 667 { .compatible = "nvidia,tegra124-dc", },
668 { .compatible = "nvidia,tegra124-sor", },
668 { /* sentinel */ } 669 { /* sentinel */ }
669}; 670};
670 671
@@ -691,14 +692,22 @@ static int __init host1x_drm_init(void)
691 if (err < 0) 692 if (err < 0)
692 goto unregister_dc; 693 goto unregister_dc;
693 694
694 err = platform_driver_register(&tegra_hdmi_driver); 695 err = platform_driver_register(&tegra_sor_driver);
695 if (err < 0) 696 if (err < 0)
696 goto unregister_dsi; 697 goto unregister_dsi;
697 698
698 err = platform_driver_register(&tegra_gr2d_driver); 699 err = platform_driver_register(&tegra_hdmi_driver);
700 if (err < 0)
701 goto unregister_sor;
702
703 err = platform_driver_register(&tegra_dpaux_driver);
699 if (err < 0) 704 if (err < 0)
700 goto unregister_hdmi; 705 goto unregister_hdmi;
701 706
707 err = platform_driver_register(&tegra_gr2d_driver);
708 if (err < 0)
709 goto unregister_dpaux;
710
702 err = platform_driver_register(&tegra_gr3d_driver); 711 err = platform_driver_register(&tegra_gr3d_driver);
703 if (err < 0) 712 if (err < 0)
704 goto unregister_gr2d; 713 goto unregister_gr2d;
@@ -707,8 +716,12 @@ static int __init host1x_drm_init(void)
707 716
708unregister_gr2d: 717unregister_gr2d:
709 platform_driver_unregister(&tegra_gr2d_driver); 718 platform_driver_unregister(&tegra_gr2d_driver);
719unregister_dpaux:
720 platform_driver_unregister(&tegra_dpaux_driver);
710unregister_hdmi: 721unregister_hdmi:
711 platform_driver_unregister(&tegra_hdmi_driver); 722 platform_driver_unregister(&tegra_hdmi_driver);
723unregister_sor:
724 platform_driver_unregister(&tegra_sor_driver);
712unregister_dsi: 725unregister_dsi:
713 platform_driver_unregister(&tegra_dsi_driver); 726 platform_driver_unregister(&tegra_dsi_driver);
714unregister_dc: 727unregister_dc:
@@ -723,7 +736,9 @@ static void __exit host1x_drm_exit(void)
723{ 736{
724 platform_driver_unregister(&tegra_gr3d_driver); 737 platform_driver_unregister(&tegra_gr3d_driver);
725 platform_driver_unregister(&tegra_gr2d_driver); 738 platform_driver_unregister(&tegra_gr2d_driver);
739 platform_driver_unregister(&tegra_dpaux_driver);
726 platform_driver_unregister(&tegra_hdmi_driver); 740 platform_driver_unregister(&tegra_hdmi_driver);
741 platform_driver_unregister(&tegra_sor_driver);
727 platform_driver_unregister(&tegra_dsi_driver); 742 platform_driver_unregister(&tegra_dsi_driver);
728 platform_driver_unregister(&tegra_dc_driver); 743 platform_driver_unregister(&tegra_dc_driver);
729 host1x_driver_unregister(&host1x_drm_driver); 744 host1x_driver_unregister(&host1x_drm_driver);
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
index bf1cac7658f8..126332c3ecbb 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -179,12 +179,14 @@ struct tegra_output_ops {
179 int (*check_mode)(struct tegra_output *output, 179 int (*check_mode)(struct tegra_output *output,
180 struct drm_display_mode *mode, 180 struct drm_display_mode *mode,
181 enum drm_mode_status *status); 181 enum drm_mode_status *status);
182 enum drm_connector_status (*detect)(struct tegra_output *output);
182}; 183};
183 184
184enum tegra_output_type { 185enum tegra_output_type {
185 TEGRA_OUTPUT_RGB, 186 TEGRA_OUTPUT_RGB,
186 TEGRA_OUTPUT_HDMI, 187 TEGRA_OUTPUT_HDMI,
187 TEGRA_OUTPUT_DSI, 188 TEGRA_OUTPUT_DSI,
189 TEGRA_OUTPUT_EDP,
188}; 190};
189 191
190struct tegra_output { 192struct tegra_output {
@@ -265,6 +267,22 @@ extern int tegra_output_remove(struct tegra_output *output);
265extern int tegra_output_init(struct drm_device *drm, struct tegra_output *output); 267extern int tegra_output_init(struct drm_device *drm, struct tegra_output *output);
266extern int tegra_output_exit(struct tegra_output *output); 268extern int tegra_output_exit(struct tegra_output *output);
267 269
270/* from dpaux.c */
271
272struct tegra_dpaux;
273struct drm_dp_link;
274struct drm_dp_aux;
275
276struct tegra_dpaux *tegra_dpaux_find_by_of_node(struct device_node *np);
277enum drm_connector_status tegra_dpaux_detect(struct tegra_dpaux *dpaux);
278int tegra_dpaux_attach(struct tegra_dpaux *dpaux, struct tegra_output *output);
279int tegra_dpaux_detach(struct tegra_dpaux *dpaux);
280int tegra_dpaux_enable(struct tegra_dpaux *dpaux);
281int tegra_dpaux_disable(struct tegra_dpaux *dpaux);
282int tegra_dpaux_prepare(struct tegra_dpaux *dpaux, u8 encoding);
283int tegra_dpaux_train(struct tegra_dpaux *dpaux, struct drm_dp_link *link,
284 u8 pattern);
285
268/* from fb.c */ 286/* from fb.c */
269struct tegra_bo *tegra_fb_get_plane(struct drm_framebuffer *framebuffer, 287struct tegra_bo *tegra_fb_get_plane(struct drm_framebuffer *framebuffer,
270 unsigned int index); 288 unsigned int index);
@@ -278,7 +296,9 @@ extern void tegra_fbdev_restore_mode(struct tegra_fbdev *fbdev);
278 296
279extern struct platform_driver tegra_dc_driver; 297extern struct platform_driver tegra_dc_driver;
280extern struct platform_driver tegra_dsi_driver; 298extern struct platform_driver tegra_dsi_driver;
299extern struct platform_driver tegra_sor_driver;
281extern struct platform_driver tegra_hdmi_driver; 300extern struct platform_driver tegra_hdmi_driver;
301extern struct platform_driver tegra_dpaux_driver;
282extern struct platform_driver tegra_gr2d_driver; 302extern struct platform_driver tegra_gr2d_driver;
283extern struct platform_driver tegra_gr3d_driver; 303extern struct platform_driver tegra_gr3d_driver;
284 304
diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
index d452faab0235..0e599f0417c0 100644
--- a/drivers/gpu/drm/tegra/dsi.c
+++ b/drivers/gpu/drm/tegra/dsi.c
@@ -1,23 +1,9 @@
1/* 1/*
2 * Copyright (C) 2013 NVIDIA Corporation 2 * Copyright (C) 2013 NVIDIA Corporation
3 * 3 *
4 * Permission to use, copy, modify, distribute, and sell this software and its 4 * This program is free software; you can redistribute it and/or modify
5 * documentation for any purpose is hereby granted without fee, provided that 5 * it under the terms of the GNU General Public License version 2 as
6 * the above copyright notice appear in all copies and that both that copyright 6 * published by the Free Software Foundation.
7 * notice and this permission notice appear in supporting documentation, and
8 * that the name of the copyright holders not be used in advertising or
9 * publicity pertaining to distribution of the software without specific,
10 * written prior permission. The copyright holders make no representations
11 * about the suitability of this software for any purpose. It is provided "as
12 * is" without express or implied warranty.
13 *
14 * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
15 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
16 * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
17 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
18 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
19 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
20 * OF THIS SOFTWARE.
21 */ 7 */
22 8
23#include <linux/clk.h> 9#include <linux/clk.h>
diff --git a/drivers/gpu/drm/tegra/dsi.h b/drivers/gpu/drm/tegra/dsi.h
index 00e79c1f448c..1db5cc24ea91 100644
--- a/drivers/gpu/drm/tegra/dsi.h
+++ b/drivers/gpu/drm/tegra/dsi.h
@@ -1,23 +1,9 @@
1/* 1/*
2 * Copyright (C) 2013 NVIDIA Corporation 2 * Copyright (C) 2013 NVIDIA Corporation
3 * 3 *
4 * Permission to use, copy, modify, distribute, and sell this software and its 4 * This program is free software; you can redistribute it and/or modify
5 * documentation for any purpose is hereby granted without fee, provided that 5 * it under the terms of the GNU General Public License version 2 as
6 * the above copyright notice appear in all copies and that both that copyright 6 * published by the Free Software Foundation.
7 * notice and this permission notice appear in supporting documentation, and
8 * that the name of the copyright holders not be used in advertising or
9 * publicity pertaining to distribution of the software without specific,
10 * written prior permission. The copyright holders make no representations
11 * about the suitability of this software for any purpose. It is provided "as
12 * is" without express or implied warranty.
13 *
14 * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
15 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
16 * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
17 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
18 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
19 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
20 * OF THIS SOFTWARE.
21 */ 7 */
22 8
23#ifndef DRM_TEGRA_DSI_H 9#ifndef DRM_TEGRA_DSI_H
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index ef853e558036..bcf9895cef9f 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -8,14 +8,9 @@
8 * 8 *
9 * Copyright (c) 2011 Samsung Electronics Co., Ltd. 9 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
10 * 10 *
11 * This program is free software; you can redistribute it and/or 11 * This program is free software; you can redistribute it and/or modify
12 * modify it under the terms of the GNU General Public License 12 * it under the terms of the GNU General Public License version 2 as
13 * as published by the Free Software Foundation; either version 2 13 * published by the Free Software Foundation.
14 * of the License, or (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 */ 14 */
20 15
21#include <linux/dma-buf.h> 16#include <linux/dma-buf.h>
@@ -394,6 +389,18 @@ static int tegra_gem_prime_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
394 return -EINVAL; 389 return -EINVAL;
395} 390}
396 391
392static void *tegra_gem_prime_vmap(struct dma_buf *buf)
393{
394 struct drm_gem_object *gem = buf->priv;
395 struct tegra_bo *bo = to_tegra_bo(gem);
396
397 return bo->vaddr;
398}
399
400static void tegra_gem_prime_vunmap(struct dma_buf *buf, void *vaddr)
401{
402}
403
397static const struct dma_buf_ops tegra_gem_prime_dmabuf_ops = { 404static const struct dma_buf_ops tegra_gem_prime_dmabuf_ops = {
398 .map_dma_buf = tegra_gem_prime_map_dma_buf, 405 .map_dma_buf = tegra_gem_prime_map_dma_buf,
399 .unmap_dma_buf = tegra_gem_prime_unmap_dma_buf, 406 .unmap_dma_buf = tegra_gem_prime_unmap_dma_buf,
@@ -403,6 +410,8 @@ static const struct dma_buf_ops tegra_gem_prime_dmabuf_ops = {
403 .kmap = tegra_gem_prime_kmap, 410 .kmap = tegra_gem_prime_kmap,
404 .kunmap = tegra_gem_prime_kunmap, 411 .kunmap = tegra_gem_prime_kunmap,
405 .mmap = tegra_gem_prime_mmap, 412 .mmap = tegra_gem_prime_mmap,
413 .vmap = tegra_gem_prime_vmap,
414 .vunmap = tegra_gem_prime_vunmap,
406}; 415};
407 416
408struct dma_buf *tegra_gem_prime_export(struct drm_device *drm, 417struct dma_buf *tegra_gem_prime_export(struct drm_device *drm,
diff --git a/drivers/gpu/drm/tegra/gem.h b/drivers/gpu/drm/tegra/gem.h
index ffd4f792b410..2f3fe96c5154 100644
--- a/drivers/gpu/drm/tegra/gem.h
+++ b/drivers/gpu/drm/tegra/gem.h
@@ -3,17 +3,9 @@
3 * 3 *
4 * Copyright (c) 2012-2013, NVIDIA Corporation. 4 * Copyright (c) 2012-2013, NVIDIA Corporation.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify
7 * under the terms and conditions of the GNU General Public License, 7 * it under the terms of the GNU General Public License version 2 as
8 * version 2, as published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */ 9 */
18 10
19#ifndef __HOST1X_GEM_H 11#ifndef __HOST1X_GEM_H
diff --git a/drivers/gpu/drm/tegra/gr2d.c b/drivers/gpu/drm/tegra/gr2d.c
index 7ec4259ffded..2c7ca748edf5 100644
--- a/drivers/gpu/drm/tegra/gr2d.c
+++ b/drivers/gpu/drm/tegra/gr2d.c
@@ -1,17 +1,9 @@
1/* 1/*
2 * Copyright (c) 2012-2013, NVIDIA Corporation. 2 * Copyright (c) 2012-2013, NVIDIA Corporation.
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify it 4 * This program is free software; you can redistribute it and/or modify
5 * under the terms and conditions of the GNU General Public License, 5 * it under the terms of the GNU General Public License version 2 as
6 * version 2, as published by the Free Software Foundation. 6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */ 7 */
16 8
17#include <linux/clk.h> 9#include <linux/clk.h>
diff --git a/drivers/gpu/drm/tegra/mipi-phy.c b/drivers/gpu/drm/tegra/mipi-phy.c
index e2c4aedaee78..486d19d589c8 100644
--- a/drivers/gpu/drm/tegra/mipi-phy.c
+++ b/drivers/gpu/drm/tegra/mipi-phy.c
@@ -1,23 +1,9 @@
1/* 1/*
2 * Copyright (C) 2013 NVIDIA Corporation 2 * Copyright (C) 2013 NVIDIA Corporation
3 * 3 *
4 * Permission to use, copy, modify, distribute, and sell this software and its 4 * This program is free software; you can redistribute it and/or modify
5 * documentation for any purpose is hereby granted without fee, provided that 5 * it under the terms of the GNU General Public License version 2 as
6 * the above copyright notice appear in all copies and that both that copyright 6 * published by the Free Software Foundation.
7 * notice and this permission notice appear in supporting documentation, and
8 * that the name of the copyright holders not be used in advertising or
9 * publicity pertaining to distribution of the software without specific,
10 * written prior permission. The copyright holders make no representations
11 * about the suitability of this software for any purpose. It is provided "as
12 * is" without express or implied warranty.
13 *
14 * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
15 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
16 * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
17 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
18 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
19 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
20 * OF THIS SOFTWARE.
21 */ 7 */
22 8
23#include <linux/errno.h> 9#include <linux/errno.h>
diff --git a/drivers/gpu/drm/tegra/mipi-phy.h b/drivers/gpu/drm/tegra/mipi-phy.h
index d3591694432d..012ea8ac36d7 100644
--- a/drivers/gpu/drm/tegra/mipi-phy.h
+++ b/drivers/gpu/drm/tegra/mipi-phy.h
@@ -1,23 +1,9 @@
1/* 1/*
2 * Copyright (C) 2013 NVIDIA Corporation 2 * Copyright (C) 2013 NVIDIA Corporation
3 * 3 *
4 * Permission to use, copy, modify, distribute, and sell this software and its 4 * This program is free software; you can redistribute it and/or modify
5 * documentation for any purpose is hereby granted without fee, provided that 5 * it under the terms of the GNU General Public License version 2 as
6 * the above copyright notice appear in all copies and that both that copyright 6 * published by the Free Software Foundation.
7 * notice and this permission notice appear in supporting documentation, and
8 * that the name of the copyright holders not be used in advertising or
9 * publicity pertaining to distribution of the software without specific,
10 * written prior permission. The copyright holders make no representations
11 * about the suitability of this software for any purpose. It is provided "as
12 * is" without express or implied warranty.
13 *
14 * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
15 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
16 * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
17 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
18 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
19 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
20 * OF THIS SOFTWARE.
21 */ 7 */
22 8
23#ifndef DRM_TEGRA_MIPI_PHY_H 9#ifndef DRM_TEGRA_MIPI_PHY_H
diff --git a/drivers/gpu/drm/tegra/output.c b/drivers/gpu/drm/tegra/output.c
index 57cecbd18ca8..a3e4f1eca6f7 100644
--- a/drivers/gpu/drm/tegra/output.c
+++ b/drivers/gpu/drm/tegra/output.c
@@ -77,6 +77,9 @@ tegra_connector_detect(struct drm_connector *connector, bool force)
77 struct tegra_output *output = connector_to_output(connector); 77 struct tegra_output *output = connector_to_output(connector);
78 enum drm_connector_status status = connector_status_unknown; 78 enum drm_connector_status status = connector_status_unknown;
79 79
80 if (output->ops->detect)
81 return output->ops->detect(output);
82
80 if (gpio_is_valid(output->hpd_gpio)) { 83 if (gpio_is_valid(output->hpd_gpio)) {
81 if (gpio_get_value(output->hpd_gpio) == 0) 84 if (gpio_get_value(output->hpd_gpio) == 0)
82 status = connector_status_disconnected; 85 status = connector_status_disconnected;
@@ -292,6 +295,11 @@ int tegra_output_init(struct drm_device *drm, struct tegra_output *output)
292 encoder = DRM_MODE_ENCODER_DSI; 295 encoder = DRM_MODE_ENCODER_DSI;
293 break; 296 break;
294 297
298 case TEGRA_OUTPUT_EDP:
299 connector = DRM_MODE_CONNECTOR_eDP;
300 encoder = DRM_MODE_ENCODER_TMDS;
301 break;
302
295 default: 303 default:
296 connector = DRM_MODE_CONNECTOR_Unknown; 304 connector = DRM_MODE_CONNECTOR_Unknown;
297 encoder = DRM_MODE_ENCODER_NONE; 305 encoder = DRM_MODE_ENCODER_NONE;
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
new file mode 100644
index 000000000000..49ef5729f435
--- /dev/null
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -0,0 +1,1092 @@
1/*
2 * Copyright (C) 2013 NVIDIA Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#include <linux/clk.h>
10#include <linux/io.h>
11#include <linux/platform_device.h>
12#include <linux/reset.h>
13#include <linux/tegra-powergate.h>
14
15#include <drm/drm_dp_helper.h>
16
17#include "dc.h"
18#include "drm.h"
19#include "sor.h"
20
21struct tegra_sor {
22 struct host1x_client client;
23 struct tegra_output output;
24 struct device *dev;
25
26 void __iomem *regs;
27
28 struct reset_control *rst;
29 struct clk *clk_parent;
30 struct clk *clk_safe;
31 struct clk *clk_dp;
32 struct clk *clk;
33
34 struct tegra_dpaux *dpaux;
35
36 bool enabled;
37};
38
39static inline struct tegra_sor *
40host1x_client_to_sor(struct host1x_client *client)
41{
42 return container_of(client, struct tegra_sor, client);
43}
44
45static inline struct tegra_sor *to_sor(struct tegra_output *output)
46{
47 return container_of(output, struct tegra_sor, output);
48}
49
50static inline unsigned long tegra_sor_readl(struct tegra_sor *sor,
51 unsigned long offset)
52{
53 return readl(sor->regs + (offset << 2));
54}
55
56static inline void tegra_sor_writel(struct tegra_sor *sor, unsigned long value,
57 unsigned long offset)
58{
59 writel(value, sor->regs + (offset << 2));
60}
61
62static int tegra_sor_dp_train_fast(struct tegra_sor *sor,
63 struct drm_dp_link *link)
64{
65 unsigned long value;
66 unsigned int i;
67 u8 pattern;
68 int err;
69
70 /* setup lane parameters */
71 value = SOR_LANE_DRIVE_CURRENT_LANE3(0x40) |
72 SOR_LANE_DRIVE_CURRENT_LANE2(0x40) |
73 SOR_LANE_DRIVE_CURRENT_LANE1(0x40) |
74 SOR_LANE_DRIVE_CURRENT_LANE0(0x40);
75 tegra_sor_writel(sor, value, SOR_LANE_DRIVE_CURRENT_0);
76
77 value = SOR_LANE_PREEMPHASIS_LANE3(0x0f) |
78 SOR_LANE_PREEMPHASIS_LANE2(0x0f) |
79 SOR_LANE_PREEMPHASIS_LANE1(0x0f) |
80 SOR_LANE_PREEMPHASIS_LANE0(0x0f);
81 tegra_sor_writel(sor, value, SOR_LANE_PREEMPHASIS_0);
82
83 value = SOR_LANE_POST_CURSOR_LANE3(0x00) |
84 SOR_LANE_POST_CURSOR_LANE2(0x00) |
85 SOR_LANE_POST_CURSOR_LANE1(0x00) |
86 SOR_LANE_POST_CURSOR_LANE0(0x00);
87 tegra_sor_writel(sor, value, SOR_LANE_POST_CURSOR_0);
88
89 /* disable LVDS mode */
90 tegra_sor_writel(sor, 0, SOR_LVDS);
91
92 value = tegra_sor_readl(sor, SOR_DP_PADCTL_0);
93 value |= SOR_DP_PADCTL_TX_PU_ENABLE;
94 value &= ~SOR_DP_PADCTL_TX_PU_MASK;
95 value |= SOR_DP_PADCTL_TX_PU(2); /* XXX: don't hardcode? */
96 tegra_sor_writel(sor, value, SOR_DP_PADCTL_0);
97
98 value = tegra_sor_readl(sor, SOR_DP_PADCTL_0);
99 value |= SOR_DP_PADCTL_CM_TXD_3 | SOR_DP_PADCTL_CM_TXD_2 |
100 SOR_DP_PADCTL_CM_TXD_1 | SOR_DP_PADCTL_CM_TXD_0;
101 tegra_sor_writel(sor, value, SOR_DP_PADCTL_0);
102
103 usleep_range(10, 100);
104
105 value = tegra_sor_readl(sor, SOR_DP_PADCTL_0);
106 value &= ~(SOR_DP_PADCTL_CM_TXD_3 | SOR_DP_PADCTL_CM_TXD_2 |
107 SOR_DP_PADCTL_CM_TXD_1 | SOR_DP_PADCTL_CM_TXD_0);
108 tegra_sor_writel(sor, value, SOR_DP_PADCTL_0);
109
110 err = tegra_dpaux_prepare(sor->dpaux, DP_SET_ANSI_8B10B);
111 if (err < 0)
112 return err;
113
114 for (i = 0, value = 0; i < link->num_lanes; i++) {
115 unsigned long lane = SOR_DP_TPG_CHANNEL_CODING |
116 SOR_DP_TPG_SCRAMBLER_NONE |
117 SOR_DP_TPG_PATTERN_TRAIN1;
118 value = (value << 8) | lane;
119 }
120
121 tegra_sor_writel(sor, value, SOR_DP_TPG);
122
123 pattern = DP_TRAINING_PATTERN_1;
124
125 err = tegra_dpaux_train(sor->dpaux, link, pattern);
126 if (err < 0)
127 return err;
128
129 value = tegra_sor_readl(sor, SOR_DP_SPARE_0);
130 value |= SOR_DP_SPARE_SEQ_ENABLE;
131 value &= ~SOR_DP_SPARE_PANEL_INTERNAL;
132 value |= SOR_DP_SPARE_MACRO_SOR_CLK;
133 tegra_sor_writel(sor, value, SOR_DP_SPARE_0);
134
135 for (i = 0, value = 0; i < link->num_lanes; i++) {
136 unsigned long lane = SOR_DP_TPG_CHANNEL_CODING |
137 SOR_DP_TPG_SCRAMBLER_NONE |
138 SOR_DP_TPG_PATTERN_TRAIN2;
139 value = (value << 8) | lane;
140 }
141
142 tegra_sor_writel(sor, value, SOR_DP_TPG);
143
144 pattern = DP_LINK_SCRAMBLING_DISABLE | DP_TRAINING_PATTERN_2;
145
146 err = tegra_dpaux_train(sor->dpaux, link, pattern);
147 if (err < 0)
148 return err;
149
150 for (i = 0, value = 0; i < link->num_lanes; i++) {
151 unsigned long lane = SOR_DP_TPG_CHANNEL_CODING |
152 SOR_DP_TPG_SCRAMBLER_GALIOS |
153 SOR_DP_TPG_PATTERN_NONE;
154 value = (value << 8) | lane;
155 }
156
157 tegra_sor_writel(sor, value, SOR_DP_TPG);
158
159 pattern = DP_TRAINING_PATTERN_DISABLE;
160
161 err = tegra_dpaux_train(sor->dpaux, link, pattern);
162 if (err < 0)
163 return err;
164
165 return 0;
166}
167
168static void tegra_sor_super_update(struct tegra_sor *sor)
169{
170 tegra_sor_writel(sor, 0, SOR_SUPER_STATE_0);
171 tegra_sor_writel(sor, 1, SOR_SUPER_STATE_0);
172 tegra_sor_writel(sor, 0, SOR_SUPER_STATE_0);
173}
174
175static void tegra_sor_update(struct tegra_sor *sor)
176{
177 tegra_sor_writel(sor, 0, SOR_STATE_0);
178 tegra_sor_writel(sor, 1, SOR_STATE_0);
179 tegra_sor_writel(sor, 0, SOR_STATE_0);
180}
181
182static int tegra_sor_setup_pwm(struct tegra_sor *sor, unsigned long timeout)
183{
184 unsigned long value;
185
186 value = tegra_sor_readl(sor, SOR_PWM_DIV);
187 value &= ~SOR_PWM_DIV_MASK;
188 value |= 0x400; /* period */
189 tegra_sor_writel(sor, value, SOR_PWM_DIV);
190
191 value = tegra_sor_readl(sor, SOR_PWM_CTL);
192 value &= ~SOR_PWM_CTL_DUTY_CYCLE_MASK;
193 value |= 0x400; /* duty cycle */
194 value &= ~SOR_PWM_CTL_CLK_SEL; /* clock source: PCLK */
195 value |= SOR_PWM_CTL_TRIGGER;
196 tegra_sor_writel(sor, value, SOR_PWM_CTL);
197
198 timeout = jiffies + msecs_to_jiffies(timeout);
199
200 while (time_before(jiffies, timeout)) {
201 value = tegra_sor_readl(sor, SOR_PWM_CTL);
202 if ((value & SOR_PWM_CTL_TRIGGER) == 0)
203 return 0;
204
205 usleep_range(25, 100);
206 }
207
208 return -ETIMEDOUT;
209}
210
211static int tegra_sor_attach(struct tegra_sor *sor)
212{
213 unsigned long value, timeout;
214
215 /* wake up in normal mode */
216 value = tegra_sor_readl(sor, SOR_SUPER_STATE_1);
217 value |= SOR_SUPER_STATE_HEAD_MODE_AWAKE;
218 value |= SOR_SUPER_STATE_MODE_NORMAL;
219 tegra_sor_writel(sor, value, SOR_SUPER_STATE_1);
220 tegra_sor_super_update(sor);
221
222 /* attach */
223 value = tegra_sor_readl(sor, SOR_SUPER_STATE_1);
224 value |= SOR_SUPER_STATE_ATTACHED;
225 tegra_sor_writel(sor, value, SOR_SUPER_STATE_1);
226 tegra_sor_super_update(sor);
227
228 timeout = jiffies + msecs_to_jiffies(250);
229
230 while (time_before(jiffies, timeout)) {
231 value = tegra_sor_readl(sor, SOR_TEST);
232 if ((value & SOR_TEST_ATTACHED) != 0)
233 return 0;
234
235 usleep_range(25, 100);
236 }
237
238 return -ETIMEDOUT;
239}
240
241static int tegra_sor_wakeup(struct tegra_sor *sor)
242{
243 struct tegra_dc *dc = to_tegra_dc(sor->output.encoder.crtc);
244 unsigned long value, timeout;
245
246 /* enable display controller outputs */
247 value = tegra_dc_readl(dc, DC_CMD_DISPLAY_POWER_CONTROL);
248 value |= PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE |
249 PW4_ENABLE | PM0_ENABLE | PM1_ENABLE;
250 tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL);
251
252 tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
253 tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
254
255 timeout = jiffies + msecs_to_jiffies(250);
256
257 /* wait for head to wake up */
258 while (time_before(jiffies, timeout)) {
259 value = tegra_sor_readl(sor, SOR_TEST);
260 value &= SOR_TEST_HEAD_MODE_MASK;
261
262 if (value == SOR_TEST_HEAD_MODE_AWAKE)
263 return 0;
264
265 usleep_range(25, 100);
266 }
267
268 return -ETIMEDOUT;
269}
270
271static int tegra_sor_power_up(struct tegra_sor *sor, unsigned long timeout)
272{
273 unsigned long value;
274
275 value = tegra_sor_readl(sor, SOR_PWR);
276 value |= SOR_PWR_TRIGGER | SOR_PWR_NORMAL_STATE_PU;
277 tegra_sor_writel(sor, value, SOR_PWR);
278
279 timeout = jiffies + msecs_to_jiffies(timeout);
280
281 while (time_before(jiffies, timeout)) {
282 value = tegra_sor_readl(sor, SOR_PWR);
283 if ((value & SOR_PWR_TRIGGER) == 0)
284 return 0;
285
286 usleep_range(25, 100);
287 }
288
289 return -ETIMEDOUT;
290}
291
292static int tegra_output_sor_enable(struct tegra_output *output)
293{
294 struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc);
295 struct drm_display_mode *mode = &dc->base.mode;
296 unsigned int vbe, vse, hbe, hse, vbs, hbs, i;
297 struct tegra_sor *sor = to_sor(output);
298 unsigned long value;
299 int err;
300
301 if (sor->enabled)
302 return 0;
303
304 err = clk_prepare_enable(sor->clk);
305 if (err < 0)
306 return err;
307
308 reset_control_deassert(sor->rst);
309
310 if (sor->dpaux) {
311 err = tegra_dpaux_enable(sor->dpaux);
312 if (err < 0)
313 dev_err(sor->dev, "failed to enable DP: %d\n", err);
314 }
315
316 err = clk_set_parent(sor->clk, sor->clk_safe);
317 if (err < 0)
318 dev_err(sor->dev, "failed to set safe parent clock: %d\n", err);
319
320 value = tegra_sor_readl(sor, SOR_CLK_CNTRL);
321 value &= ~SOR_CLK_CNTRL_DP_CLK_SEL_MASK;
322 value |= SOR_CLK_CNTRL_DP_CLK_SEL_SINGLE_DPCLK;
323 tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
324
325 value = tegra_sor_readl(sor, SOR_PLL_2);
326 value &= ~SOR_PLL_2_BANDGAP_POWERDOWN;
327 tegra_sor_writel(sor, value, SOR_PLL_2);
328 usleep_range(20, 100);
329
330 value = tegra_sor_readl(sor, SOR_PLL_3);
331 value |= SOR_PLL_3_PLL_VDD_MODE_V3_3;
332 tegra_sor_writel(sor, value, SOR_PLL_3);
333
334 value = SOR_PLL_0_ICHPMP(0xf) | SOR_PLL_0_VCOCAP_RST |
335 SOR_PLL_0_PLLREG_LEVEL_V45 | SOR_PLL_0_RESISTOR_EXT;
336 tegra_sor_writel(sor, value, SOR_PLL_0);
337
338 value = tegra_sor_readl(sor, SOR_PLL_2);
339 value |= SOR_PLL_2_SEQ_PLLCAPPD;
340 value &= ~SOR_PLL_2_SEQ_PLLCAPPD_ENFORCE;
341 value |= SOR_PLL_2_LVDS_ENABLE;
342 tegra_sor_writel(sor, value, SOR_PLL_2);
343
344 value = SOR_PLL_1_TERM_COMPOUT | SOR_PLL_1_TMDS_TERM;
345 tegra_sor_writel(sor, value, SOR_PLL_1);
346
347 while (true) {
348 value = tegra_sor_readl(sor, SOR_PLL_2);
349 if ((value & SOR_PLL_2_SEQ_PLLCAPPD_ENFORCE) == 0)
350 break;
351
352 usleep_range(250, 1000);
353 }
354
355 value = tegra_sor_readl(sor, SOR_PLL_2);
356 value &= ~SOR_PLL_2_POWERDOWN_OVERRIDE;
357 value &= ~SOR_PLL_2_PORT_POWERDOWN;
358 tegra_sor_writel(sor, value, SOR_PLL_2);
359
360 /*
361 * power up
362 */
363
364 /* set safe link bandwidth (1.62 Gbps) */
365 value = tegra_sor_readl(sor, SOR_CLK_CNTRL);
366 value &= ~SOR_CLK_CNTRL_DP_LINK_SPEED_MASK;
367 value |= SOR_CLK_CNTRL_DP_LINK_SPEED_G1_62;
368 tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
369
370 /* step 1 */
371 value = tegra_sor_readl(sor, SOR_PLL_2);
372 value |= SOR_PLL_2_SEQ_PLLCAPPD_ENFORCE | SOR_PLL_2_PORT_POWERDOWN |
373 SOR_PLL_2_BANDGAP_POWERDOWN;
374 tegra_sor_writel(sor, value, SOR_PLL_2);
375
376 value = tegra_sor_readl(sor, SOR_PLL_0);
377 value |= SOR_PLL_0_VCOPD | SOR_PLL_0_POWER_OFF;
378 tegra_sor_writel(sor, value, SOR_PLL_0);
379
380 value = tegra_sor_readl(sor, SOR_DP_PADCTL_0);
381 value &= ~SOR_DP_PADCTL_PAD_CAL_PD;
382 tegra_sor_writel(sor, value, SOR_DP_PADCTL_0);
383
384 /* step 2 */
385 err = tegra_io_rail_power_on(TEGRA_IO_RAIL_LVDS);
386 if (err < 0) {
387 dev_err(sor->dev, "failed to power on I/O rail: %d\n", err);
388 return err;
389 }
390
391 usleep_range(5, 100);
392
393 /* step 3 */
394 value = tegra_sor_readl(sor, SOR_PLL_2);
395 value &= ~SOR_PLL_2_BANDGAP_POWERDOWN;
396 tegra_sor_writel(sor, value, SOR_PLL_2);
397
398 usleep_range(20, 100);
399
400 /* step 4 */
401 value = tegra_sor_readl(sor, SOR_PLL_0);
402 value &= ~SOR_PLL_0_POWER_OFF;
403 value &= ~SOR_PLL_0_VCOPD;
404 tegra_sor_writel(sor, value, SOR_PLL_0);
405
406 value = tegra_sor_readl(sor, SOR_PLL_2);
407 value &= ~SOR_PLL_2_SEQ_PLLCAPPD_ENFORCE;
408 tegra_sor_writel(sor, value, SOR_PLL_2);
409
410 usleep_range(200, 1000);
411
412 /* step 5 */
413 value = tegra_sor_readl(sor, SOR_PLL_2);
414 value &= ~SOR_PLL_2_PORT_POWERDOWN;
415 tegra_sor_writel(sor, value, SOR_PLL_2);
416
417 /* switch to DP clock */
418 err = clk_set_parent(sor->clk, sor->clk_dp);
419 if (err < 0)
420 dev_err(sor->dev, "failed to set DP parent clock: %d\n", err);
421
422 /* power dplanes (XXX parameterize based on link?) */
423 value = tegra_sor_readl(sor, SOR_DP_PADCTL_0);
424 value |= SOR_DP_PADCTL_PD_TXD_3 | SOR_DP_PADCTL_PD_TXD_0 |
425 SOR_DP_PADCTL_PD_TXD_1 | SOR_DP_PADCTL_PD_TXD_2;
426 tegra_sor_writel(sor, value, SOR_DP_PADCTL_0);
427
428 value = tegra_sor_readl(sor, SOR_DP_LINKCTL_0);
429 value &= ~SOR_DP_LINKCTL_LANE_COUNT_MASK;
430 value |= SOR_DP_LINKCTL_LANE_COUNT(4);
431 tegra_sor_writel(sor, value, SOR_DP_LINKCTL_0);
432
433 /* start lane sequencer */
434 value = SOR_LANE_SEQ_CTL_TRIGGER | SOR_LANE_SEQ_CTL_SEQUENCE_DOWN |
435 SOR_LANE_SEQ_CTL_POWER_STATE_UP;
436 tegra_sor_writel(sor, value, SOR_LANE_SEQ_CTL);
437
438 while (true) {
439 value = tegra_sor_readl(sor, SOR_LANE_SEQ_CTL);
440 if ((value & SOR_LANE_SEQ_CTL_TRIGGER) == 0)
441 break;
442
443 usleep_range(250, 1000);
444 }
445
446 /* set link bandwidth (2.7 GHz, XXX: parameterize based on link?) */
447 value = tegra_sor_readl(sor, SOR_CLK_CNTRL);
448 value &= ~SOR_CLK_CNTRL_DP_LINK_SPEED_MASK;
449 value |= SOR_CLK_CNTRL_DP_LINK_SPEED_G2_70;
450 tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
451
452 /* set linkctl */
453 value = tegra_sor_readl(sor, SOR_DP_LINKCTL_0);
454 value |= SOR_DP_LINKCTL_ENABLE;
455
456 value &= ~SOR_DP_LINKCTL_TU_SIZE_MASK;
457 value |= SOR_DP_LINKCTL_TU_SIZE(59); /* XXX: don't hardcode? */
458
459 value |= SOR_DP_LINKCTL_ENHANCED_FRAME;
460 tegra_sor_writel(sor, value, SOR_DP_LINKCTL_0);
461
462 for (i = 0, value = 0; i < 4; i++) {
463 unsigned long lane = SOR_DP_TPG_CHANNEL_CODING |
464 SOR_DP_TPG_SCRAMBLER_GALIOS |
465 SOR_DP_TPG_PATTERN_NONE;
466 value = (value << 8) | lane;
467 }
468
469 tegra_sor_writel(sor, value, SOR_DP_TPG);
470
471 value = tegra_sor_readl(sor, SOR_DP_CONFIG_0);
472 value &= ~SOR_DP_CONFIG_WATERMARK_MASK;
473 value |= SOR_DP_CONFIG_WATERMARK(14); /* XXX: don't hardcode? */
474
475 value &= ~SOR_DP_CONFIG_ACTIVE_SYM_COUNT_MASK;
476 value |= SOR_DP_CONFIG_ACTIVE_SYM_COUNT(47); /* XXX: don't hardcode? */
477
478 value &= ~SOR_DP_CONFIG_ACTIVE_SYM_FRAC_MASK;
479 value |= SOR_DP_CONFIG_ACTIVE_SYM_FRAC(9); /* XXX: don't hardcode? */
480
481 value &= ~SOR_DP_CONFIG_ACTIVE_SYM_POLARITY; /* XXX: don't hardcode? */
482
483 value |= SOR_DP_CONFIG_ACTIVE_SYM_ENABLE;
484 value |= SOR_DP_CONFIG_DISPARITY_NEGATIVE; /* XXX: don't hardcode? */
485 tegra_sor_writel(sor, value, SOR_DP_CONFIG_0);
486
487 value = tegra_sor_readl(sor, SOR_DP_AUDIO_HBLANK_SYMBOLS);
488 value &= ~SOR_DP_AUDIO_HBLANK_SYMBOLS_MASK;
489 value |= 137; /* XXX: don't hardcode? */
490 tegra_sor_writel(sor, value, SOR_DP_AUDIO_HBLANK_SYMBOLS);
491
492 value = tegra_sor_readl(sor, SOR_DP_AUDIO_VBLANK_SYMBOLS);
493 value &= ~SOR_DP_AUDIO_VBLANK_SYMBOLS_MASK;
494 value |= 2368; /* XXX: don't hardcode? */
495 tegra_sor_writel(sor, value, SOR_DP_AUDIO_VBLANK_SYMBOLS);
496
497 /* enable pad calibration logic */
498 value = tegra_sor_readl(sor, SOR_DP_PADCTL_0);
499 value |= SOR_DP_PADCTL_PAD_CAL_PD;
500 tegra_sor_writel(sor, value, SOR_DP_PADCTL_0);
501
502 if (sor->dpaux) {
503 /* FIXME: properly convert to struct drm_dp_aux */
504 struct drm_dp_aux *aux = (struct drm_dp_aux *)sor->dpaux;
505 struct drm_dp_link link;
506 u8 rate, lanes;
507
508 err = drm_dp_link_probe(aux, &link);
509 if (err < 0) {
510 dev_err(sor->dev, "failed to probe eDP link: %d\n",
511 err);
512 return err;
513 }
514
515 err = drm_dp_link_power_up(aux, &link);
516 if (err < 0) {
517 dev_err(sor->dev, "failed to power up eDP link: %d\n",
518 err);
519 return err;
520 }
521
522 err = drm_dp_link_configure(aux, &link);
523 if (err < 0) {
524 dev_err(sor->dev, "failed to configure eDP link: %d\n",
525 err);
526 return err;
527 }
528
529 rate = drm_dp_link_rate_to_bw_code(link.rate);
530 lanes = link.num_lanes;
531
532 value = tegra_sor_readl(sor, SOR_CLK_CNTRL);
533 value &= ~SOR_CLK_CNTRL_DP_LINK_SPEED_MASK;
534 value |= SOR_CLK_CNTRL_DP_LINK_SPEED(rate);
535 tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
536
537 value = tegra_sor_readl(sor, SOR_DP_LINKCTL_0);
538 value &= ~SOR_DP_LINKCTL_LANE_COUNT_MASK;
539 value |= SOR_DP_LINKCTL_LANE_COUNT(lanes);
540
541 if (link.capabilities & DP_LINK_CAP_ENHANCED_FRAMING)
542 value |= SOR_DP_LINKCTL_ENHANCED_FRAME;
543
544 tegra_sor_writel(sor, value, SOR_DP_LINKCTL_0);
545
546 /* disable training pattern generator */
547
548 for (i = 0; i < link.num_lanes; i++) {
549 unsigned long lane = SOR_DP_TPG_CHANNEL_CODING |
550 SOR_DP_TPG_SCRAMBLER_GALIOS |
551 SOR_DP_TPG_PATTERN_NONE;
552 value = (value << 8) | lane;
553 }
554
555 tegra_sor_writel(sor, value, SOR_DP_TPG);
556
557 err = tegra_sor_dp_train_fast(sor, &link);
558 if (err < 0) {
559 dev_err(sor->dev, "DP fast link training failed: %d\n",
560 err);
561 return err;
562 }
563
564 dev_dbg(sor->dev, "fast link training succeeded\n");
565 }
566
567 err = tegra_sor_power_up(sor, 250);
568 if (err < 0) {
569 dev_err(sor->dev, "failed to power up SOR: %d\n", err);
570 return err;
571 }
572
573 /* start display controller in continuous mode */
574 value = tegra_dc_readl(dc, DC_CMD_STATE_ACCESS);
575 value |= WRITE_MUX;
576 tegra_dc_writel(dc, value, DC_CMD_STATE_ACCESS);
577
578 tegra_dc_writel(dc, VSYNC_H_POSITION(1), DC_DISP_DISP_TIMING_OPTIONS);
579 tegra_dc_writel(dc, DISP_CTRL_MODE_C_DISPLAY, DC_CMD_DISPLAY_COMMAND);
580
581 value = tegra_dc_readl(dc, DC_CMD_STATE_ACCESS);
582 value &= ~WRITE_MUX;
583 tegra_dc_writel(dc, value, DC_CMD_STATE_ACCESS);
584
585 /*
586 * configure panel (24bpp, vsync-, hsync-, DP-A protocol, complete
587 * raster, associate with display controller)
588 */
589 value = SOR_STATE_ASY_PIXELDEPTH_BPP_24_444 |
590 SOR_STATE_ASY_VSYNCPOL |
591 SOR_STATE_ASY_HSYNCPOL |
592 SOR_STATE_ASY_PROTOCOL_DP_A |
593 SOR_STATE_ASY_CRC_MODE_COMPLETE |
594 SOR_STATE_ASY_OWNER(dc->pipe + 1);
595 tegra_sor_writel(sor, value, SOR_STATE_1);
596
597 /*
598 * TODO: The video timing programming below doesn't seem to match the
599 * register definitions.
600 */
601
602 value = ((mode->vtotal & 0x7fff) << 16) | (mode->htotal & 0x7fff);
603 tegra_sor_writel(sor, value, SOR_HEAD_STATE_1(0));
604
605 vse = mode->vsync_end - mode->vsync_start - 1;
606 hse = mode->hsync_end - mode->hsync_start - 1;
607
608 value = ((vse & 0x7fff) << 16) | (hse & 0x7fff);
609 tegra_sor_writel(sor, value, SOR_HEAD_STATE_2(0));
610
611 vbe = vse + (mode->vsync_start - mode->vdisplay);
612 hbe = hse + (mode->hsync_start - mode->hdisplay);
613
614 value = ((vbe & 0x7fff) << 16) | (hbe & 0x7fff);
615 tegra_sor_writel(sor, value, SOR_HEAD_STATE_3(0));
616
617 vbs = vbe + mode->vdisplay;
618 hbs = hbe + mode->hdisplay;
619
620 value = ((vbs & 0x7fff) << 16) | (hbs & 0x7fff);
621 tegra_sor_writel(sor, value, SOR_HEAD_STATE_4(0));
622
623 /* XXX interlaced mode */
624 tegra_sor_writel(sor, 0x00000001, SOR_HEAD_STATE_5(0));
625
626 /* CSTM (LVDS, link A/B, upper) */
627 value = SOR_CSTM_LVDS | SOR_CSTM_LINK_ACT_B | SOR_CSTM_LINK_ACT_B |
628 SOR_CSTM_UPPER;
629 tegra_sor_writel(sor, value, SOR_CSTM);
630
631 /* PWM setup */
632 err = tegra_sor_setup_pwm(sor, 250);
633 if (err < 0) {
634 dev_err(sor->dev, "failed to setup PWM: %d\n", err);
635 return err;
636 }
637
638 value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
639 value |= SOR_ENABLE;
640 tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
641
642 tegra_sor_update(sor);
643
644 err = tegra_sor_attach(sor);
645 if (err < 0) {
646 dev_err(sor->dev, "failed to attach SOR: %d\n", err);
647 return err;
648 }
649
650 err = tegra_sor_wakeup(sor);
651 if (err < 0) {
652 dev_err(sor->dev, "failed to enable DC: %d\n", err);
653 return err;
654 }
655
656 sor->enabled = true;
657
658 return 0;
659}
660
661static int tegra_sor_detach(struct tegra_sor *sor)
662{
663 unsigned long value, timeout;
664
665 /* switch to safe mode */
666 value = tegra_sor_readl(sor, SOR_SUPER_STATE_1);
667 value &= ~SOR_SUPER_STATE_MODE_NORMAL;
668 tegra_sor_writel(sor, value, SOR_SUPER_STATE_1);
669 tegra_sor_super_update(sor);
670
671 timeout = jiffies + msecs_to_jiffies(250);
672
673 while (time_before(jiffies, timeout)) {
674 value = tegra_sor_readl(sor, SOR_PWR);
675 if (value & SOR_PWR_MODE_SAFE)
676 break;
677 }
678
679 if ((value & SOR_PWR_MODE_SAFE) == 0)
680 return -ETIMEDOUT;
681
682 /* go to sleep */
683 value = tegra_sor_readl(sor, SOR_SUPER_STATE_1);
684 value &= ~SOR_SUPER_STATE_HEAD_MODE_MASK;
685 tegra_sor_writel(sor, value, SOR_SUPER_STATE_1);
686 tegra_sor_super_update(sor);
687
688 /* detach */
689 value = tegra_sor_readl(sor, SOR_SUPER_STATE_1);
690 value &= ~SOR_SUPER_STATE_ATTACHED;
691 tegra_sor_writel(sor, value, SOR_SUPER_STATE_1);
692 tegra_sor_super_update(sor);
693
694 timeout = jiffies + msecs_to_jiffies(250);
695
696 while (time_before(jiffies, timeout)) {
697 value = tegra_sor_readl(sor, SOR_TEST);
698 if ((value & SOR_TEST_ATTACHED) == 0)
699 break;
700
701 usleep_range(25, 100);
702 }
703
704 if ((value & SOR_TEST_ATTACHED) != 0)
705 return -ETIMEDOUT;
706
707 return 0;
708}
709
710static int tegra_sor_power_down(struct tegra_sor *sor)
711{
712 unsigned long value, timeout;
713 int err;
714
715 value = tegra_sor_readl(sor, SOR_PWR);
716 value &= ~SOR_PWR_NORMAL_STATE_PU;
717 value |= SOR_PWR_TRIGGER;
718 tegra_sor_writel(sor, value, SOR_PWR);
719
720 timeout = jiffies + msecs_to_jiffies(250);
721
722 while (time_before(jiffies, timeout)) {
723 value = tegra_sor_readl(sor, SOR_PWR);
724 if ((value & SOR_PWR_TRIGGER) == 0)
725 return 0;
726
727 usleep_range(25, 100);
728 }
729
730 if ((value & SOR_PWR_TRIGGER) != 0)
731 return -ETIMEDOUT;
732
733 err = clk_set_parent(sor->clk, sor->clk_safe);
734 if (err < 0)
735 dev_err(sor->dev, "failed to set safe parent clock: %d\n", err);
736
737 value = tegra_sor_readl(sor, SOR_DP_PADCTL_0);
738 value &= ~(SOR_DP_PADCTL_PD_TXD_3 | SOR_DP_PADCTL_PD_TXD_0 |
739 SOR_DP_PADCTL_PD_TXD_1 | SOR_DP_PADCTL_PD_TXD_2);
740 tegra_sor_writel(sor, value, SOR_DP_PADCTL_0);
741
742 /* stop lane sequencer */
743 value = SOR_LANE_SEQ_CTL_TRIGGER | SOR_LANE_SEQ_CTL_SEQUENCE_DOWN |
744 SOR_LANE_SEQ_CTL_POWER_STATE_DOWN;
745 tegra_sor_writel(sor, value, SOR_LANE_SEQ_CTL);
746
747 timeout = jiffies + msecs_to_jiffies(250);
748
749 while (time_before(jiffies, timeout)) {
750 value = tegra_sor_readl(sor, SOR_LANE_SEQ_CTL);
751 if ((value & SOR_LANE_SEQ_CTL_TRIGGER) == 0)
752 break;
753
754 usleep_range(25, 100);
755 }
756
757 if ((value & SOR_LANE_SEQ_CTL_TRIGGER) != 0)
758 return -ETIMEDOUT;
759
760 value = tegra_sor_readl(sor, SOR_PLL_2);
761 value |= SOR_PLL_2_PORT_POWERDOWN;
762 tegra_sor_writel(sor, value, SOR_PLL_2);
763
764 usleep_range(20, 100);
765
766 value = tegra_sor_readl(sor, SOR_PLL_0);
767 value |= SOR_PLL_0_POWER_OFF;
768 value |= SOR_PLL_0_VCOPD;
769 tegra_sor_writel(sor, value, SOR_PLL_0);
770
771 value = tegra_sor_readl(sor, SOR_PLL_2);
772 value |= SOR_PLL_2_SEQ_PLLCAPPD;
773 value |= SOR_PLL_2_SEQ_PLLCAPPD_ENFORCE;
774 tegra_sor_writel(sor, value, SOR_PLL_2);
775
776 usleep_range(20, 100);
777
778 return 0;
779}
780
781static int tegra_output_sor_disable(struct tegra_output *output)
782{
783 struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc);
784 struct tegra_sor *sor = to_sor(output);
785 unsigned long value;
786 int err;
787
788 if (!sor->enabled)
789 return 0;
790
791 err = tegra_sor_detach(sor);
792 if (err < 0) {
793 dev_err(sor->dev, "failed to detach SOR: %d\n", err);
794 return err;
795 }
796
797 tegra_sor_writel(sor, 0, SOR_STATE_1);
798 tegra_sor_update(sor);
799
800 /*
801 * The following accesses registers of the display controller, so make
802 * sure it's only executed when the output is attached to one.
803 */
804 if (dc) {
805 /*
806 * XXX: We can't do this here because it causes the SOR to go
807 * into an erroneous state and the output will look scrambled
808 * the next time it is enabled. Presumably this is because we
809 * should be doing this only on the next VBLANK. A possible
810 * solution would be to queue a "power-off" event to trigger
811 * this code to be run during the next VBLANK.
812 */
813 /*
814 value = tegra_dc_readl(dc, DC_CMD_DISPLAY_POWER_CONTROL);
815 value &= ~(PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE |
816 PW4_ENABLE | PM0_ENABLE | PM1_ENABLE);
817 tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL);
818 */
819
820 value = tegra_dc_readl(dc, DC_CMD_DISPLAY_COMMAND);
821 value &= ~DISP_CTRL_MODE_MASK;
822 tegra_dc_writel(dc, value, DC_CMD_DISPLAY_COMMAND);
823
824 value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
825 value &= ~SOR_ENABLE;
826 tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
827
828 tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
829 tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
830 }
831
832 err = tegra_sor_power_down(sor);
833 if (err < 0) {
834 dev_err(sor->dev, "failed to power down SOR: %d\n", err);
835 return err;
836 }
837
838 if (sor->dpaux) {
839 err = tegra_dpaux_disable(sor->dpaux);
840 if (err < 0) {
841 dev_err(sor->dev, "failed to disable DP: %d\n", err);
842 return err;
843 }
844 }
845
846 err = tegra_io_rail_power_off(TEGRA_IO_RAIL_LVDS);
847 if (err < 0) {
848 dev_err(sor->dev, "failed to power off I/O rail: %d\n", err);
849 return err;
850 }
851
852 reset_control_assert(sor->rst);
853 clk_disable_unprepare(sor->clk);
854
855 sor->enabled = false;
856
857 return 0;
858}
859
860static int tegra_output_sor_setup_clock(struct tegra_output *output,
861 struct clk *clk, unsigned long pclk)
862{
863 struct tegra_sor *sor = to_sor(output);
864 int err;
865
866 /* round to next MHz */
867 pclk = DIV_ROUND_UP(pclk / 2, 1000000) * 1000000;
868
869 err = clk_set_parent(clk, sor->clk_parent);
870 if (err < 0) {
871 dev_err(sor->dev, "failed to set parent clock: %d\n", err);
872 return err;
873 }
874
875 err = clk_set_rate(sor->clk_parent, pclk);
876 if (err < 0) {
877 dev_err(sor->dev, "failed to set base clock rate to %lu Hz\n",
878 pclk * 2);
879 return err;
880 }
881
882 return 0;
883}
884
885static int tegra_output_sor_check_mode(struct tegra_output *output,
886 struct drm_display_mode *mode,
887 enum drm_mode_status *status)
888{
889 /*
890 * FIXME: For now, always assume that the mode is okay.
891 */
892
893 *status = MODE_OK;
894
895 return 0;
896}
897
898static enum drm_connector_status
899tegra_output_sor_detect(struct tegra_output *output)
900{
901 struct tegra_sor *sor = to_sor(output);
902
903 if (sor->dpaux)
904 return tegra_dpaux_detect(sor->dpaux);
905
906 return connector_status_unknown;
907}
908
909static const struct tegra_output_ops sor_ops = {
910 .enable = tegra_output_sor_enable,
911 .disable = tegra_output_sor_disable,
912 .setup_clock = tegra_output_sor_setup_clock,
913 .check_mode = tegra_output_sor_check_mode,
914 .detect = tegra_output_sor_detect,
915};
916
917static int tegra_sor_init(struct host1x_client *client)
918{
919 struct tegra_drm *tegra = dev_get_drvdata(client->parent);
920 struct tegra_sor *sor = host1x_client_to_sor(client);
921 int err;
922
923 if (!sor->dpaux)
924 return -ENODEV;
925
926 sor->output.type = TEGRA_OUTPUT_EDP;
927
928 sor->output.dev = sor->dev;
929 sor->output.ops = &sor_ops;
930
931 err = tegra_output_init(tegra->drm, &sor->output);
932 if (err < 0) {
933 dev_err(sor->dev, "output setup failed: %d\n", err);
934 return err;
935 }
936
937 if (sor->dpaux) {
938 err = tegra_dpaux_attach(sor->dpaux, &sor->output);
939 if (err < 0) {
940 dev_err(sor->dev, "failed to attach DP: %d\n", err);
941 return err;
942 }
943 }
944
945 return 0;
946}
947
948static int tegra_sor_exit(struct host1x_client *client)
949{
950 struct tegra_sor *sor = host1x_client_to_sor(client);
951 int err;
952
953 err = tegra_output_disable(&sor->output);
954 if (err < 0) {
955 dev_err(sor->dev, "output failed to disable: %d\n", err);
956 return err;
957 }
958
959 if (sor->dpaux) {
960 err = tegra_dpaux_detach(sor->dpaux);
961 if (err < 0) {
962 dev_err(sor->dev, "failed to detach DP: %d\n", err);
963 return err;
964 }
965 }
966
967 err = tegra_output_exit(&sor->output);
968 if (err < 0) {
969 dev_err(sor->dev, "output cleanup failed: %d\n", err);
970 return err;
971 }
972
973 return 0;
974}
975
976static const struct host1x_client_ops sor_client_ops = {
977 .init = tegra_sor_init,
978 .exit = tegra_sor_exit,
979};
980
981static int tegra_sor_probe(struct platform_device *pdev)
982{
983 struct device_node *np;
984 struct tegra_sor *sor;
985 struct resource *regs;
986 int err;
987
988 sor = devm_kzalloc(&pdev->dev, sizeof(*sor), GFP_KERNEL);
989 if (!sor)
990 return -ENOMEM;
991
992 sor->output.dev = sor->dev = &pdev->dev;
993
994 np = of_parse_phandle(pdev->dev.of_node, "nvidia,dpaux", 0);
995 if (np) {
996 sor->dpaux = tegra_dpaux_find_by_of_node(np);
997 of_node_put(np);
998
999 if (!sor->dpaux)
1000 return -EPROBE_DEFER;
1001 }
1002
1003 err = tegra_output_probe(&sor->output);
1004 if (err < 0)
1005 return err;
1006
1007 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1008 sor->regs = devm_ioremap_resource(&pdev->dev, regs);
1009 if (IS_ERR(sor->regs))
1010 return PTR_ERR(sor->regs);
1011
1012 sor->rst = devm_reset_control_get(&pdev->dev, "sor");
1013 if (IS_ERR(sor->rst))
1014 return PTR_ERR(sor->rst);
1015
1016 sor->clk = devm_clk_get(&pdev->dev, NULL);
1017 if (IS_ERR(sor->clk))
1018 return PTR_ERR(sor->clk);
1019
1020 sor->clk_parent = devm_clk_get(&pdev->dev, "parent");
1021 if (IS_ERR(sor->clk_parent))
1022 return PTR_ERR(sor->clk_parent);
1023
1024 err = clk_prepare_enable(sor->clk_parent);
1025 if (err < 0)
1026 return err;
1027
1028 sor->clk_safe = devm_clk_get(&pdev->dev, "safe");
1029 if (IS_ERR(sor->clk_safe))
1030 return PTR_ERR(sor->clk_safe);
1031
1032 err = clk_prepare_enable(sor->clk_safe);
1033 if (err < 0)
1034 return err;
1035
1036 sor->clk_dp = devm_clk_get(&pdev->dev, "dp");
1037 if (IS_ERR(sor->clk_dp))
1038 return PTR_ERR(sor->clk_dp);
1039
1040 err = clk_prepare_enable(sor->clk_dp);
1041 if (err < 0)
1042 return err;
1043
1044 INIT_LIST_HEAD(&sor->client.list);
1045 sor->client.ops = &sor_client_ops;
1046 sor->client.dev = &pdev->dev;
1047
1048 err = host1x_client_register(&sor->client);
1049 if (err < 0) {
1050 dev_err(&pdev->dev, "failed to register host1x client: %d\n",
1051 err);
1052 return err;
1053 }
1054
1055 platform_set_drvdata(pdev, sor);
1056
1057 return 0;
1058}
1059
1060static int tegra_sor_remove(struct platform_device *pdev)
1061{
1062 struct tegra_sor *sor = platform_get_drvdata(pdev);
1063 int err;
1064
1065 err = host1x_client_unregister(&sor->client);
1066 if (err < 0) {
1067 dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
1068 err);
1069 return err;
1070 }
1071
1072 clk_disable_unprepare(sor->clk_parent);
1073 clk_disable_unprepare(sor->clk_safe);
1074 clk_disable_unprepare(sor->clk_dp);
1075 clk_disable_unprepare(sor->clk);
1076
1077 return 0;
1078}
1079
1080static const struct of_device_id tegra_sor_of_match[] = {
1081 { .compatible = "nvidia,tegra124-sor", },
1082 { },
1083};
1084
1085struct platform_driver tegra_sor_driver = {
1086 .driver = {
1087 .name = "tegra-sor",
1088 .of_match_table = tegra_sor_of_match,
1089 },
1090 .probe = tegra_sor_probe,
1091 .remove = tegra_sor_remove,
1092};
diff --git a/drivers/gpu/drm/tegra/sor.h b/drivers/gpu/drm/tegra/sor.h
new file mode 100644
index 000000000000..f4156d54cd05
--- /dev/null
+++ b/drivers/gpu/drm/tegra/sor.h
@@ -0,0 +1,278 @@
1/*
2 * Copyright (C) 2013 NVIDIA Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef DRM_TEGRA_SOR_H
10#define DRM_TEGRA_SOR_H
11
12#define SOR_CTXSW 0x00
13
14#define SOR_SUPER_STATE_0 0x01
15
16#define SOR_SUPER_STATE_1 0x02
17#define SOR_SUPER_STATE_ATTACHED (1 << 3)
18#define SOR_SUPER_STATE_MODE_NORMAL (1 << 2)
19#define SOR_SUPER_STATE_HEAD_MODE_MASK (3 << 0)
20#define SOR_SUPER_STATE_HEAD_MODE_AWAKE (2 << 0)
21#define SOR_SUPER_STATE_HEAD_MODE_SNOOZE (1 << 0)
22#define SOR_SUPER_STATE_HEAD_MODE_SLEEP (0 << 0)
23
24#define SOR_STATE_0 0x03
25
26#define SOR_STATE_1 0x04
27#define SOR_STATE_ASY_PIXELDEPTH_MASK (0xf << 17)
28#define SOR_STATE_ASY_PIXELDEPTH_BPP_18_444 (0x2 << 17)
29#define SOR_STATE_ASY_PIXELDEPTH_BPP_24_444 (0x5 << 17)
30#define SOR_STATE_ASY_VSYNCPOL (1 << 13)
31#define SOR_STATE_ASY_HSYNCPOL (1 << 12)
32#define SOR_STATE_ASY_PROTOCOL_MASK (0xf << 8)
33#define SOR_STATE_ASY_PROTOCOL_CUSTOM (0xf << 8)
34#define SOR_STATE_ASY_PROTOCOL_DP_A (0x8 << 8)
35#define SOR_STATE_ASY_PROTOCOL_DP_B (0x9 << 8)
36#define SOR_STATE_ASY_PROTOCOL_LVDS (0x0 << 8)
37#define SOR_STATE_ASY_CRC_MODE_MASK (0x3 << 6)
38#define SOR_STATE_ASY_CRC_MODE_NON_ACTIVE (0x2 << 6)
39#define SOR_STATE_ASY_CRC_MODE_COMPLETE (0x1 << 6)
40#define SOR_STATE_ASY_CRC_MODE_ACTIVE (0x0 << 6)
41#define SOR_STATE_ASY_OWNER(x) (((x) & 0xf) << 0)
42
43#define SOR_HEAD_STATE_0(x) (0x05 + (x))
44#define SOR_HEAD_STATE_1(x) (0x07 + (x))
45#define SOR_HEAD_STATE_2(x) (0x09 + (x))
46#define SOR_HEAD_STATE_3(x) (0x0b + (x))
47#define SOR_HEAD_STATE_4(x) (0x0d + (x))
48#define SOR_HEAD_STATE_5(x) (0x0f + (x))
49#define SOR_CRC_CNTRL 0x11
50#define SOR_DP_DEBUG_MVID 0x12
51
52#define SOR_CLK_CNTRL 0x13
53#define SOR_CLK_CNTRL_DP_LINK_SPEED_MASK (0x1f << 2)
54#define SOR_CLK_CNTRL_DP_LINK_SPEED(x) (((x) & 0x1f) << 2)
55#define SOR_CLK_CNTRL_DP_LINK_SPEED_G1_62 (0x06 << 2)
56#define SOR_CLK_CNTRL_DP_LINK_SPEED_G2_70 (0x0a << 2)
57#define SOR_CLK_CNTRL_DP_LINK_SPEED_G5_40 (0x14 << 2)
58#define SOR_CLK_CNTRL_DP_CLK_SEL_MASK (3 << 0)
59#define SOR_CLK_CNTRL_DP_CLK_SEL_SINGLE_PCLK (0 << 0)
60#define SOR_CLK_CNTRL_DP_CLK_SEL_DIFF_PCLK (1 << 0)
61#define SOR_CLK_CNTRL_DP_CLK_SEL_SINGLE_DPCLK (2 << 0)
62#define SOR_CLK_CNTRL_DP_CLK_SEL_DIFF_DPCLK (3 << 0)
63
64#define SOR_CAP 0x14
65
66#define SOR_PWR 0x15
67#define SOR_PWR_TRIGGER (1 << 31)
68#define SOR_PWR_MODE_SAFE (1 << 28)
69#define SOR_PWR_NORMAL_STATE_PU (1 << 0)
70
71#define SOR_TEST 0x16
72#define SOR_TEST_ATTACHED (1 << 10)
73#define SOR_TEST_HEAD_MODE_MASK (3 << 8)
74#define SOR_TEST_HEAD_MODE_AWAKE (2 << 8)
75
76#define SOR_PLL_0 0x17
77#define SOR_PLL_0_ICHPMP_MASK (0xf << 24)
78#define SOR_PLL_0_ICHPMP(x) (((x) & 0xf) << 24)
79#define SOR_PLL_0_VCOCAP_MASK (0xf << 8)
80#define SOR_PLL_0_VCOCAP(x) (((x) & 0xf) << 8)
81#define SOR_PLL_0_VCOCAP_RST SOR_PLL_0_VCOCAP(3)
82#define SOR_PLL_0_PLLREG_MASK (0x3 << 6)
83#define SOR_PLL_0_PLLREG_LEVEL(x) (((x) & 0x3) << 6)
84#define SOR_PLL_0_PLLREG_LEVEL_V25 SOR_PLL_0_PLLREG_LEVEL(0)
85#define SOR_PLL_0_PLLREG_LEVEL_V15 SOR_PLL_0_PLLREG_LEVEL(1)
86#define SOR_PLL_0_PLLREG_LEVEL_V35 SOR_PLL_0_PLLREG_LEVEL(2)
87#define SOR_PLL_0_PLLREG_LEVEL_V45 SOR_PLL_0_PLLREG_LEVEL(3)
88#define SOR_PLL_0_PULLDOWN (1 << 5)
89#define SOR_PLL_0_RESISTOR_EXT (1 << 4)
90#define SOR_PLL_0_VCOPD (1 << 2)
91#define SOR_PLL_0_POWER_OFF (1 << 0)
92
93#define SOR_PLL_1 0x18
94/* XXX: read-only bit? */
95#define SOR_PLL_1_TERM_COMPOUT (1 << 15)
96#define SOR_PLL_1_TMDS_TERM (1 << 8)
97
98#define SOR_PLL_2 0x19
99#define SOR_PLL_2_LVDS_ENABLE (1 << 25)
100#define SOR_PLL_2_SEQ_PLLCAPPD_ENFORCE (1 << 24)
101#define SOR_PLL_2_PORT_POWERDOWN (1 << 23)
102#define SOR_PLL_2_BANDGAP_POWERDOWN (1 << 22)
103#define SOR_PLL_2_POWERDOWN_OVERRIDE (1 << 18)
104#define SOR_PLL_2_SEQ_PLLCAPPD (1 << 17)
105
106#define SOR_PLL_3 0x1a
107#define SOR_PLL_3_PLL_VDD_MODE_V1_8 (0 << 13)
108#define SOR_PLL_3_PLL_VDD_MODE_V3_3 (1 << 13)
109
110#define SOR_CSTM 0x1b
111#define SOR_CSTM_LVDS (1 << 16)
112#define SOR_CSTM_LINK_ACT_B (1 << 15)
113#define SOR_CSTM_LINK_ACT_A (1 << 14)
114#define SOR_CSTM_UPPER (1 << 11)
115
116#define SOR_LVDS 0x1c
117#define SOR_CRC_A 0x1d
118#define SOR_CRC_B 0x1e
119#define SOR_BLANK 0x1f
120#define SOR_SEQ_CTL 0x20
121
122#define SOR_LANE_SEQ_CTL 0x21
123#define SOR_LANE_SEQ_CTL_TRIGGER (1 << 31)
124#define SOR_LANE_SEQ_CTL_SEQUENCE_UP (0 << 20)
125#define SOR_LANE_SEQ_CTL_SEQUENCE_DOWN (1 << 20)
126#define SOR_LANE_SEQ_CTL_POWER_STATE_UP (0 << 16)
127#define SOR_LANE_SEQ_CTL_POWER_STATE_DOWN (1 << 16)
128
129#define SOR_SEQ_INST(x) (0x22 + (x))
130
131#define SOR_PWM_DIV 0x32
132#define SOR_PWM_DIV_MASK 0xffffff
133
134#define SOR_PWM_CTL 0x33
135#define SOR_PWM_CTL_TRIGGER (1 << 31)
136#define SOR_PWM_CTL_CLK_SEL (1 << 30)
137#define SOR_PWM_CTL_DUTY_CYCLE_MASK 0xffffff
138
139#define SOR_VCRC_A_0 0x34
140#define SOR_VCRC_A_1 0x35
141#define SOR_VCRC_B_0 0x36
142#define SOR_VCRC_B_1 0x37
143#define SOR_CCRC_A_0 0x38
144#define SOR_CCRC_A_1 0x39
145#define SOR_CCRC_B_0 0x3a
146#define SOR_CCRC_B_1 0x3b
147#define SOR_EDATA_A_0 0x3c
148#define SOR_EDATA_A_1 0x3d
149#define SOR_EDATA_B_0 0x3e
150#define SOR_EDATA_B_1 0x3f
151#define SOR_COUNT_A_0 0x40
152#define SOR_COUNT_A_1 0x41
153#define SOR_COUNT_B_0 0x42
154#define SOR_COUNT_B_1 0x43
155#define SOR_DEBUG_A_0 0x44
156#define SOR_DEBUG_A_1 0x45
157#define SOR_DEBUG_B_0 0x46
158#define SOR_DEBUG_B_1 0x47
159#define SOR_TRIG 0x48
160#define SOR_MSCHECK 0x49
161#define SOR_XBAR_CTRL 0x4a
162#define SOR_XBAR_POL 0x4b
163
164#define SOR_DP_LINKCTL_0 0x4c
165#define SOR_DP_LINKCTL_LANE_COUNT_MASK (0x1f << 16)
166#define SOR_DP_LINKCTL_LANE_COUNT(x) (((1 << (x)) - 1) << 16)
167#define SOR_DP_LINKCTL_ENHANCED_FRAME (1 << 14)
168#define SOR_DP_LINKCTL_TU_SIZE_MASK (0x7f << 2)
169#define SOR_DP_LINKCTL_TU_SIZE(x) (((x) & 0x7f) << 2)
170#define SOR_DP_LINKCTL_ENABLE (1 << 0)
171
172#define SOR_DP_LINKCTL_1 0x4d
173
174#define SOR_LANE_DRIVE_CURRENT_0 0x4e
175#define SOR_LANE_DRIVE_CURRENT_1 0x4f
176#define SOR_LANE4_DRIVE_CURRENT_0 0x50
177#define SOR_LANE4_DRIVE_CURRENT_1 0x51
178#define SOR_LANE_DRIVE_CURRENT_LANE3(x) (((x) & 0xff) << 24)
179#define SOR_LANE_DRIVE_CURRENT_LANE2(x) (((x) & 0xff) << 16)
180#define SOR_LANE_DRIVE_CURRENT_LANE1(x) (((x) & 0xff) << 8)
181#define SOR_LANE_DRIVE_CURRENT_LANE0(x) (((x) & 0xff) << 0)
182
183#define SOR_LANE_PREEMPHASIS_0 0x52
184#define SOR_LANE_PREEMPHASIS_1 0x53
185#define SOR_LANE4_PREEMPHASIS_0 0x54
186#define SOR_LANE4_PREEMPHASIS_1 0x55
187#define SOR_LANE_PREEMPHASIS_LANE3(x) (((x) & 0xff) << 24)
188#define SOR_LANE_PREEMPHASIS_LANE2(x) (((x) & 0xff) << 16)
189#define SOR_LANE_PREEMPHASIS_LANE1(x) (((x) & 0xff) << 8)
190#define SOR_LANE_PREEMPHASIS_LANE0(x) (((x) & 0xff) << 0)
191
192#define SOR_LANE_POST_CURSOR_0 0x56
193#define SOR_LANE_POST_CURSOR_1 0x57
194#define SOR_LANE_POST_CURSOR_LANE3(x) (((x) & 0xff) << 24)
195#define SOR_LANE_POST_CURSOR_LANE2(x) (((x) & 0xff) << 16)
196#define SOR_LANE_POST_CURSOR_LANE1(x) (((x) & 0xff) << 8)
197#define SOR_LANE_POST_CURSOR_LANE0(x) (((x) & 0xff) << 0)
198
199#define SOR_DP_CONFIG_0 0x58
200#define SOR_DP_CONFIG_DISPARITY_NEGATIVE (1 << 31)
201#define SOR_DP_CONFIG_ACTIVE_SYM_ENABLE (1 << 26)
202#define SOR_DP_CONFIG_ACTIVE_SYM_POLARITY (1 << 24)
203#define SOR_DP_CONFIG_ACTIVE_SYM_FRAC_MASK (0xf << 16)
204#define SOR_DP_CONFIG_ACTIVE_SYM_FRAC(x) (((x) & 0xf) << 16)
205#define SOR_DP_CONFIG_ACTIVE_SYM_COUNT_MASK (0x7f << 8)
206#define SOR_DP_CONFIG_ACTIVE_SYM_COUNT(x) (((x) & 0x7f) << 8)
207#define SOR_DP_CONFIG_WATERMARK_MASK (0x3f << 0)
208#define SOR_DP_CONFIG_WATERMARK(x) (((x) & 0x3f) << 0)
209
210#define SOR_DP_CONFIG_1 0x59
211#define SOR_DP_MN_0 0x5a
212#define SOR_DP_MN_1 0x5b
213
214#define SOR_DP_PADCTL_0 0x5c
215#define SOR_DP_PADCTL_PAD_CAL_PD (1 << 23)
216#define SOR_DP_PADCTL_TX_PU_ENABLE (1 << 22)
217#define SOR_DP_PADCTL_TX_PU_MASK (0xff << 8)
218#define SOR_DP_PADCTL_TX_PU(x) (((x) & 0xff) << 8)
219#define SOR_DP_PADCTL_CM_TXD_3 (1 << 7)
220#define SOR_DP_PADCTL_CM_TXD_2 (1 << 6)
221#define SOR_DP_PADCTL_CM_TXD_1 (1 << 5)
222#define SOR_DP_PADCTL_CM_TXD_0 (1 << 4)
223#define SOR_DP_PADCTL_PD_TXD_3 (1 << 3)
224#define SOR_DP_PADCTL_PD_TXD_0 (1 << 2)
225#define SOR_DP_PADCTL_PD_TXD_1 (1 << 1)
226#define SOR_DP_PADCTL_PD_TXD_2 (1 << 0)
227
228#define SOR_DP_PADCTL_1 0x5d
229
230#define SOR_DP_DEBUG_0 0x5e
231#define SOR_DP_DEBUG_1 0x5f
232
233#define SOR_DP_SPARE_0 0x60
234#define SOR_DP_SPARE_MACRO_SOR_CLK (1 << 2)
235#define SOR_DP_SPARE_PANEL_INTERNAL (1 << 1)
236#define SOR_DP_SPARE_SEQ_ENABLE (1 << 0)
237
238#define SOR_DP_SPARE_1 0x61
239#define SOR_DP_AUDIO_CTRL 0x62
240
241#define SOR_DP_AUDIO_HBLANK_SYMBOLS 0x63
242#define SOR_DP_AUDIO_HBLANK_SYMBOLS_MASK (0x01ffff << 0)
243
244#define SOR_DP_AUDIO_VBLANK_SYMBOLS 0x64
245#define SOR_DP_AUDIO_VBLANK_SYMBOLS_MASK (0x1fffff << 0)
246
247#define SOR_DP_GENERIC_INFOFRAME_HEADER 0x65
248#define SOR_DP_GENERIC_INFOFRAME_SUBPACK_0 0x66
249#define SOR_DP_GENERIC_INFOFRAME_SUBPACK_1 0x67
250#define SOR_DP_GENERIC_INFOFRAME_SUBPACK_2 0x68
251#define SOR_DP_GENERIC_INFOFRAME_SUBPACK_3 0x69
252#define SOR_DP_GENERIC_INFOFRAME_SUBPACK_4 0x6a
253#define SOR_DP_GENERIC_INFOFRAME_SUBPACK_5 0x6b
254#define SOR_DP_GENERIC_INFOFRAME_SUBPACK_6 0x6c
255
256#define SOR_DP_TPG 0x6d
257#define SOR_DP_TPG_CHANNEL_CODING (1 << 6)
258#define SOR_DP_TPG_SCRAMBLER_MASK (3 << 4)
259#define SOR_DP_TPG_SCRAMBLER_FIBONACCI (2 << 4)
260#define SOR_DP_TPG_SCRAMBLER_GALIOS (1 << 4)
261#define SOR_DP_TPG_SCRAMBLER_NONE (0 << 4)
262#define SOR_DP_TPG_PATTERN_MASK (0xf << 0)
263#define SOR_DP_TPG_PATTERN_HBR2 (0x8 << 0)
264#define SOR_DP_TPG_PATTERN_CSTM (0x7 << 0)
265#define SOR_DP_TPG_PATTERN_PRBS7 (0x6 << 0)
266#define SOR_DP_TPG_PATTERN_SBLERRRATE (0x5 << 0)
267#define SOR_DP_TPG_PATTERN_D102 (0x4 << 0)
268#define SOR_DP_TPG_PATTERN_TRAIN3 (0x3 << 0)
269#define SOR_DP_TPG_PATTERN_TRAIN2 (0x2 << 0)
270#define SOR_DP_TPG_PATTERN_TRAIN1 (0x1 << 0)
271#define SOR_DP_TPG_PATTERN_NONE (0x0 << 0)
272
273#define SOR_DP_TPG_CONFIG 0x6e
274#define SOR_DP_LQ_CSTM_0 0x6f
275#define SOR_DP_LQ_CSTM_1 0x70
276#define SOR_DP_LQ_CSTM_2 0x71
277
278#endif
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
index d36efc13b16f..d642d4a02134 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
@@ -74,7 +74,7 @@ static void set_scanout(struct drm_crtc *crtc, int n)
74 drm_flip_work_queue(&tilcdc_crtc->unref_work, tilcdc_crtc->scanout[n]); 74 drm_flip_work_queue(&tilcdc_crtc->unref_work, tilcdc_crtc->scanout[n]);
75 drm_flip_work_commit(&tilcdc_crtc->unref_work, priv->wq); 75 drm_flip_work_commit(&tilcdc_crtc->unref_work, priv->wq);
76 } 76 }
77 tilcdc_crtc->scanout[n] = crtc->fb; 77 tilcdc_crtc->scanout[n] = crtc->primary->fb;
78 drm_framebuffer_reference(tilcdc_crtc->scanout[n]); 78 drm_framebuffer_reference(tilcdc_crtc->scanout[n]);
79 tilcdc_crtc->dirty &= ~stat[n]; 79 tilcdc_crtc->dirty &= ~stat[n];
80 pm_runtime_put_sync(dev->dev); 80 pm_runtime_put_sync(dev->dev);
@@ -84,7 +84,7 @@ static void update_scanout(struct drm_crtc *crtc)
84{ 84{
85 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc); 85 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
86 struct drm_device *dev = crtc->dev; 86 struct drm_device *dev = crtc->dev;
87 struct drm_framebuffer *fb = crtc->fb; 87 struct drm_framebuffer *fb = crtc->primary->fb;
88 struct drm_gem_cma_object *gem; 88 struct drm_gem_cma_object *gem;
89 unsigned int depth, bpp; 89 unsigned int depth, bpp;
90 90
@@ -159,7 +159,7 @@ static int tilcdc_crtc_page_flip(struct drm_crtc *crtc,
159 return -EBUSY; 159 return -EBUSY;
160 } 160 }
161 161
162 crtc->fb = fb; 162 crtc->primary->fb = fb;
163 tilcdc_crtc->event = event; 163 tilcdc_crtc->event = event;
164 update_scanout(crtc); 164 update_scanout(crtc);
165 165
@@ -339,7 +339,7 @@ static int tilcdc_crtc_mode_set(struct drm_crtc *crtc,
339 if (priv->rev == 2) { 339 if (priv->rev == 2) {
340 unsigned int depth, bpp; 340 unsigned int depth, bpp;
341 341
342 drm_fb_get_bpp_depth(crtc->fb->pixel_format, &depth, &bpp); 342 drm_fb_get_bpp_depth(crtc->primary->fb->pixel_format, &depth, &bpp);
343 switch (bpp) { 343 switch (bpp) {
344 case 16: 344 case 16:
345 break; 345 break;
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 214b7992a3aa..4ab9f7171c4f 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -412,7 +412,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
412 int ret; 412 int ret;
413 413
414 spin_lock(&glob->lru_lock); 414 spin_lock(&glob->lru_lock);
415 ret = ttm_bo_reserve_nolru(bo, false, true, false, 0); 415 ret = __ttm_bo_reserve(bo, false, true, false, 0);
416 416
417 spin_lock(&bdev->fence_lock); 417 spin_lock(&bdev->fence_lock);
418 (void) ttm_bo_wait(bo, false, false, true); 418 (void) ttm_bo_wait(bo, false, false, true);
@@ -443,7 +443,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
443 ttm_bo_add_to_lru(bo); 443 ttm_bo_add_to_lru(bo);
444 } 444 }
445 445
446 ww_mutex_unlock(&bo->resv->lock); 446 __ttm_bo_unreserve(bo);
447 } 447 }
448 448
449 kref_get(&bo->list_kref); 449 kref_get(&bo->list_kref);
@@ -494,7 +494,7 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
494 sync_obj = driver->sync_obj_ref(bo->sync_obj); 494 sync_obj = driver->sync_obj_ref(bo->sync_obj);
495 spin_unlock(&bdev->fence_lock); 495 spin_unlock(&bdev->fence_lock);
496 496
497 ww_mutex_unlock(&bo->resv->lock); 497 __ttm_bo_unreserve(bo);
498 spin_unlock(&glob->lru_lock); 498 spin_unlock(&glob->lru_lock);
499 499
500 ret = driver->sync_obj_wait(sync_obj, false, interruptible); 500 ret = driver->sync_obj_wait(sync_obj, false, interruptible);
@@ -514,7 +514,7 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
514 return ret; 514 return ret;
515 515
516 spin_lock(&glob->lru_lock); 516 spin_lock(&glob->lru_lock);
517 ret = ttm_bo_reserve_nolru(bo, false, true, false, 0); 517 ret = __ttm_bo_reserve(bo, false, true, false, 0);
518 518
519 /* 519 /*
520 * We raced, and lost, someone else holds the reservation now, 520 * We raced, and lost, someone else holds the reservation now,
@@ -532,7 +532,7 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
532 spin_unlock(&bdev->fence_lock); 532 spin_unlock(&bdev->fence_lock);
533 533
534 if (ret || unlikely(list_empty(&bo->ddestroy))) { 534 if (ret || unlikely(list_empty(&bo->ddestroy))) {
535 ww_mutex_unlock(&bo->resv->lock); 535 __ttm_bo_unreserve(bo);
536 spin_unlock(&glob->lru_lock); 536 spin_unlock(&glob->lru_lock);
537 return ret; 537 return ret;
538 } 538 }
@@ -577,11 +577,11 @@ static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
577 kref_get(&nentry->list_kref); 577 kref_get(&nentry->list_kref);
578 } 578 }
579 579
580 ret = ttm_bo_reserve_nolru(entry, false, true, false, 0); 580 ret = __ttm_bo_reserve(entry, false, true, false, 0);
581 if (remove_all && ret) { 581 if (remove_all && ret) {
582 spin_unlock(&glob->lru_lock); 582 spin_unlock(&glob->lru_lock);
583 ret = ttm_bo_reserve_nolru(entry, false, false, 583 ret = __ttm_bo_reserve(entry, false, false,
584 false, 0); 584 false, 0);
585 spin_lock(&glob->lru_lock); 585 spin_lock(&glob->lru_lock);
586 } 586 }
587 587
@@ -726,7 +726,7 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
726 726
727 spin_lock(&glob->lru_lock); 727 spin_lock(&glob->lru_lock);
728 list_for_each_entry(bo, &man->lru, lru) { 728 list_for_each_entry(bo, &man->lru, lru) {
729 ret = ttm_bo_reserve_nolru(bo, false, true, false, 0); 729 ret = __ttm_bo_reserve(bo, false, true, false, 0);
730 if (!ret) 730 if (!ret)
731 break; 731 break;
732 } 732 }
@@ -1451,6 +1451,7 @@ EXPORT_SYMBOL(ttm_bo_device_release);
1451int ttm_bo_device_init(struct ttm_bo_device *bdev, 1451int ttm_bo_device_init(struct ttm_bo_device *bdev,
1452 struct ttm_bo_global *glob, 1452 struct ttm_bo_global *glob,
1453 struct ttm_bo_driver *driver, 1453 struct ttm_bo_driver *driver,
1454 struct address_space *mapping,
1454 uint64_t file_page_offset, 1455 uint64_t file_page_offset,
1455 bool need_dma32) 1456 bool need_dma32)
1456{ 1457{
@@ -1472,7 +1473,7 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
1472 0x10000000); 1473 0x10000000);
1473 INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue); 1474 INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
1474 INIT_LIST_HEAD(&bdev->ddestroy); 1475 INIT_LIST_HEAD(&bdev->ddestroy);
1475 bdev->dev_mapping = NULL; 1476 bdev->dev_mapping = mapping;
1476 bdev->glob = glob; 1477 bdev->glob = glob;
1477 bdev->need_dma32 = need_dma32; 1478 bdev->need_dma32 = need_dma32;
1478 bdev->val_seq = 0; 1479 bdev->val_seq = 0;
@@ -1629,7 +1630,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
1629 1630
1630 spin_lock(&glob->lru_lock); 1631 spin_lock(&glob->lru_lock);
1631 list_for_each_entry(bo, &glob->swap_lru, swap) { 1632 list_for_each_entry(bo, &glob->swap_lru, swap) {
1632 ret = ttm_bo_reserve_nolru(bo, false, true, false, 0); 1633 ret = __ttm_bo_reserve(bo, false, true, false, 0);
1633 if (!ret) 1634 if (!ret)
1634 break; 1635 break;
1635 } 1636 }
@@ -1696,7 +1697,7 @@ out:
1696 * already swapped buffer. 1697 * already swapped buffer.
1697 */ 1698 */
1698 1699
1699 ww_mutex_unlock(&bo->resv->lock); 1700 __ttm_bo_unreserve(bo);
1700 kref_put(&bo->list_kref, ttm_bo_release_list); 1701 kref_put(&bo->list_kref, ttm_bo_release_list);
1701 return ret; 1702 return ret;
1702} 1703}
@@ -1730,10 +1731,10 @@ int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo)
1730 return -ERESTARTSYS; 1731 return -ERESTARTSYS;
1731 if (!ww_mutex_is_locked(&bo->resv->lock)) 1732 if (!ww_mutex_is_locked(&bo->resv->lock))
1732 goto out_unlock; 1733 goto out_unlock;
1733 ret = ttm_bo_reserve_nolru(bo, true, false, false, NULL); 1734 ret = __ttm_bo_reserve(bo, true, false, false, NULL);
1734 if (unlikely(ret != 0)) 1735 if (unlikely(ret != 0))
1735 goto out_unlock; 1736 goto out_unlock;
1736 ww_mutex_unlock(&bo->resv->lock); 1737 __ttm_bo_unreserve(bo);
1737 1738
1738out_unlock: 1739out_unlock:
1739 mutex_unlock(&bo->wu_mutex); 1740 mutex_unlock(&bo->wu_mutex);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c
index c58eba33bd5f..bd850c9f4bca 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_manager.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c
@@ -55,6 +55,7 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
55 struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv; 55 struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
56 struct drm_mm *mm = &rman->mm; 56 struct drm_mm *mm = &rman->mm;
57 struct drm_mm_node *node = NULL; 57 struct drm_mm_node *node = NULL;
58 enum drm_mm_allocator_flags aflags = DRM_MM_CREATE_DEFAULT;
58 unsigned long lpfn; 59 unsigned long lpfn;
59 int ret; 60 int ret;
60 61
@@ -66,11 +67,15 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
66 if (!node) 67 if (!node)
67 return -ENOMEM; 68 return -ENOMEM;
68 69
70 if (bo->mem.placement & TTM_PL_FLAG_TOPDOWN)
71 aflags = DRM_MM_CREATE_TOP;
72
69 spin_lock(&rman->lock); 73 spin_lock(&rman->lock);
70 ret = drm_mm_insert_node_in_range(mm, node, mem->num_pages, 74 ret = drm_mm_insert_node_in_range_generic(mm, node, mem->num_pages,
71 mem->page_alignment, 75 mem->page_alignment, 0,
72 placement->fpfn, lpfn, 76 placement->fpfn, lpfn,
73 DRM_MM_SEARCH_BEST); 77 DRM_MM_SEARCH_BEST,
78 aflags);
74 spin_unlock(&rman->lock); 79 spin_unlock(&rman->lock);
75 80
76 if (unlikely(ret)) { 81 if (unlikely(ret)) {
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index 479e9418e3d7..e8dac8758528 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -46,7 +46,7 @@ static void ttm_eu_backoff_reservation_locked(struct list_head *list)
46 ttm_bo_add_to_lru(bo); 46 ttm_bo_add_to_lru(bo);
47 entry->removed = false; 47 entry->removed = false;
48 } 48 }
49 ww_mutex_unlock(&bo->resv->lock); 49 __ttm_bo_unreserve(bo);
50 } 50 }
51} 51}
52 52
@@ -140,8 +140,8 @@ retry:
140 if (entry->reserved) 140 if (entry->reserved)
141 continue; 141 continue;
142 142
143 ret = ttm_bo_reserve_nolru(bo, true, (ticket == NULL), true, 143 ret = __ttm_bo_reserve(bo, true, (ticket == NULL), true,
144 ticket); 144 ticket);
145 145
146 if (ret == -EDEADLK) { 146 if (ret == -EDEADLK) {
147 /* uh oh, we lost out, drop every reservation and try 147 /* uh oh, we lost out, drop every reservation and try
@@ -224,7 +224,7 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
224 entry->old_sync_obj = bo->sync_obj; 224 entry->old_sync_obj = bo->sync_obj;
225 bo->sync_obj = driver->sync_obj_ref(sync_obj); 225 bo->sync_obj = driver->sync_obj_ref(sync_obj);
226 ttm_bo_add_to_lru(bo); 226 ttm_bo_add_to_lru(bo);
227 ww_mutex_unlock(&bo->resv->lock); 227 __ttm_bo_unreserve(bo);
228 entry->reserved = false; 228 entry->reserved = false;
229 } 229 }
230 spin_unlock(&bdev->fence_lock); 230 spin_unlock(&bdev->fence_lock);
diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c
index 53b51c4e671a..d2a053352789 100644
--- a/drivers/gpu/drm/ttm/ttm_object.c
+++ b/drivers/gpu/drm/ttm/ttm_object.c
@@ -270,6 +270,52 @@ ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint32_t key)
270} 270}
271EXPORT_SYMBOL(ttm_base_object_lookup_for_ref); 271EXPORT_SYMBOL(ttm_base_object_lookup_for_ref);
272 272
273/**
274 * ttm_ref_object_exists - Check whether a caller has a valid ref object
275 * (has opened) a base object.
276 *
277 * @tfile: Pointer to a struct ttm_object_file identifying the caller.
278 * @base: Pointer to a struct base object.
279 *
280 * Checks wether the caller identified by @tfile has put a valid USAGE
281 * reference object on the base object identified by @base.
282 */
283bool ttm_ref_object_exists(struct ttm_object_file *tfile,
284 struct ttm_base_object *base)
285{
286 struct drm_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE];
287 struct drm_hash_item *hash;
288 struct ttm_ref_object *ref;
289
290 rcu_read_lock();
291 if (unlikely(drm_ht_find_item_rcu(ht, base->hash.key, &hash) != 0))
292 goto out_false;
293
294 /*
295 * Verify that the ref object is really pointing to our base object.
296 * Our base object could actually be dead, and the ref object pointing
297 * to another base object with the same handle.
298 */
299 ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
300 if (unlikely(base != ref->obj))
301 goto out_false;
302
303 /*
304 * Verify that the ref->obj pointer was actually valid!
305 */
306 rmb();
307 if (unlikely(atomic_read(&ref->kref.refcount) == 0))
308 goto out_false;
309
310 rcu_read_unlock();
311 return true;
312
313 out_false:
314 rcu_read_unlock();
315 return false;
316}
317EXPORT_SYMBOL(ttm_ref_object_exists);
318
273int ttm_ref_object_add(struct ttm_object_file *tfile, 319int ttm_ref_object_add(struct ttm_object_file *tfile,
274 struct ttm_base_object *base, 320 struct ttm_base_object *base,
275 enum ttm_ref_type ref_type, bool *existed) 321 enum ttm_ref_type ref_type, bool *existed)
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index dbadd49e4c4a..377176372da8 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -421,7 +421,7 @@ static int udl_user_framebuffer_dirty(struct drm_framebuffer *fb,
421 clips[i].x2 - clips[i].x1, 421 clips[i].x2 - clips[i].x1,
422 clips[i].y2 - clips[i].y1); 422 clips[i].y2 - clips[i].y1);
423 if (ret) 423 if (ret)
424 goto unlock; 424 break;
425 } 425 }
426 426
427 if (ufb->obj->base.import_attach) { 427 if (ufb->obj->base.import_attach) {
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index 0394811251bd..c041cd73f399 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -60,7 +60,7 @@ int udl_dumb_create(struct drm_file *file,
60 struct drm_device *dev, 60 struct drm_device *dev,
61 struct drm_mode_create_dumb *args) 61 struct drm_mode_create_dumb *args)
62{ 62{
63 args->pitch = args->width * ((args->bpp + 1) / 8); 63 args->pitch = args->width * DIV_ROUND_UP(args->bpp, 8);
64 args->size = args->pitch * args->height; 64 args->size = args->pitch * args->height;
65 return udl_gem_create(file, dev, 65 return udl_gem_create(file, dev,
66 args->size, &args->handle); 66 args->size, &args->handle);
diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c
index 2ae1eb7d1635..cddc4fcf35cf 100644
--- a/drivers/gpu/drm/udl/udl_modeset.c
+++ b/drivers/gpu/drm/udl/udl_modeset.c
@@ -310,7 +310,7 @@ static int udl_crtc_mode_set(struct drm_crtc *crtc,
310 310
311{ 311{
312 struct drm_device *dev = crtc->dev; 312 struct drm_device *dev = crtc->dev;
313 struct udl_framebuffer *ufb = to_udl_fb(crtc->fb); 313 struct udl_framebuffer *ufb = to_udl_fb(crtc->primary->fb);
314 struct udl_device *udl = dev->dev_private; 314 struct udl_device *udl = dev->dev_private;
315 char *buf; 315 char *buf;
316 char *wrptr; 316 char *wrptr;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
index 1e80152674b5..8bb26dcd9eae 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
@@ -117,10 +117,10 @@ static void vmw_hw_context_destroy(struct vmw_resource *res)
117 (void) vmw_context_binding_state_kill 117 (void) vmw_context_binding_state_kill
118 (&container_of(res, struct vmw_user_context, res)->cbs); 118 (&container_of(res, struct vmw_user_context, res)->cbs);
119 (void) vmw_gb_context_destroy(res); 119 (void) vmw_gb_context_destroy(res);
120 mutex_unlock(&dev_priv->binding_mutex);
120 if (dev_priv->pinned_bo != NULL && 121 if (dev_priv->pinned_bo != NULL &&
121 !dev_priv->query_cid_valid) 122 !dev_priv->query_cid_valid)
122 __vmw_execbuf_release_pinned_bo(dev_priv, NULL); 123 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
123 mutex_unlock(&dev_priv->binding_mutex);
124 mutex_unlock(&dev_priv->cmdbuf_mutex); 124 mutex_unlock(&dev_priv->cmdbuf_mutex);
125 return; 125 return;
126 } 126 }
@@ -462,7 +462,6 @@ int vmw_context_define_ioctl(struct drm_device *dev, void *data,
462 struct vmw_resource *tmp; 462 struct vmw_resource *tmp;
463 struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data; 463 struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
464 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 464 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
465 struct vmw_master *vmaster = vmw_master(file_priv->master);
466 int ret; 465 int ret;
467 466
468 467
@@ -474,7 +473,7 @@ int vmw_context_define_ioctl(struct drm_device *dev, void *data,
474 if (unlikely(vmw_user_context_size == 0)) 473 if (unlikely(vmw_user_context_size == 0))
475 vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128; 474 vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128;
476 475
477 ret = ttm_read_lock(&vmaster->lock, true); 476 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
478 if (unlikely(ret != 0)) 477 if (unlikely(ret != 0))
479 return ret; 478 return ret;
480 479
@@ -521,7 +520,7 @@ int vmw_context_define_ioctl(struct drm_device *dev, void *data,
521out_err: 520out_err:
522 vmw_resource_unreference(&res); 521 vmw_resource_unreference(&res);
523out_unlock: 522out_unlock:
524 ttm_read_unlock(&vmaster->lock); 523 ttm_read_unlock(&dev_priv->reservation_sem);
525 return ret; 524 return ret;
526 525
527} 526}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
index a75840211b3c..70ddce8358b0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
@@ -52,11 +52,10 @@ int vmw_dmabuf_to_placement(struct vmw_private *dev_priv,
52 struct ttm_placement *placement, 52 struct ttm_placement *placement,
53 bool interruptible) 53 bool interruptible)
54{ 54{
55 struct vmw_master *vmaster = dev_priv->active_master;
56 struct ttm_buffer_object *bo = &buf->base; 55 struct ttm_buffer_object *bo = &buf->base;
57 int ret; 56 int ret;
58 57
59 ret = ttm_write_lock(&vmaster->lock, interruptible); 58 ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
60 if (unlikely(ret != 0)) 59 if (unlikely(ret != 0))
61 return ret; 60 return ret;
62 61
@@ -71,7 +70,7 @@ int vmw_dmabuf_to_placement(struct vmw_private *dev_priv,
71 ttm_bo_unreserve(bo); 70 ttm_bo_unreserve(bo);
72 71
73err: 72err:
74 ttm_write_unlock(&vmaster->lock); 73 ttm_write_unlock(&dev_priv->reservation_sem);
75 return ret; 74 return ret;
76} 75}
77 76
@@ -95,12 +94,11 @@ int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv,
95 struct vmw_dma_buffer *buf, 94 struct vmw_dma_buffer *buf,
96 bool pin, bool interruptible) 95 bool pin, bool interruptible)
97{ 96{
98 struct vmw_master *vmaster = dev_priv->active_master;
99 struct ttm_buffer_object *bo = &buf->base; 97 struct ttm_buffer_object *bo = &buf->base;
100 struct ttm_placement *placement; 98 struct ttm_placement *placement;
101 int ret; 99 int ret;
102 100
103 ret = ttm_write_lock(&vmaster->lock, interruptible); 101 ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
104 if (unlikely(ret != 0)) 102 if (unlikely(ret != 0))
105 return ret; 103 return ret;
106 104
@@ -143,7 +141,7 @@ int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv,
143err_unreserve: 141err_unreserve:
144 ttm_bo_unreserve(bo); 142 ttm_bo_unreserve(bo);
145err: 143err:
146 ttm_write_unlock(&vmaster->lock); 144 ttm_write_unlock(&dev_priv->reservation_sem);
147 return ret; 145 return ret;
148} 146}
149 147
@@ -198,7 +196,6 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv,
198 struct vmw_dma_buffer *buf, 196 struct vmw_dma_buffer *buf,
199 bool pin, bool interruptible) 197 bool pin, bool interruptible)
200{ 198{
201 struct vmw_master *vmaster = dev_priv->active_master;
202 struct ttm_buffer_object *bo = &buf->base; 199 struct ttm_buffer_object *bo = &buf->base;
203 struct ttm_placement placement; 200 struct ttm_placement placement;
204 int ret = 0; 201 int ret = 0;
@@ -209,7 +206,7 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv,
209 placement = vmw_vram_placement; 206 placement = vmw_vram_placement;
210 placement.lpfn = bo->num_pages; 207 placement.lpfn = bo->num_pages;
211 208
212 ret = ttm_write_lock(&vmaster->lock, interruptible); 209 ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
213 if (unlikely(ret != 0)) 210 if (unlikely(ret != 0))
214 return ret; 211 return ret;
215 212
@@ -232,7 +229,7 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv,
232 229
233 ttm_bo_unreserve(bo); 230 ttm_bo_unreserve(bo);
234err_unlock: 231err_unlock:
235 ttm_write_unlock(&vmaster->lock); 232 ttm_write_unlock(&dev_priv->reservation_sem);
236 233
237 return ret; 234 return ret;
238} 235}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 0083cbf99edf..4a223bbea3b3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -142,11 +142,11 @@
142 142
143static const struct drm_ioctl_desc vmw_ioctls[] = { 143static const struct drm_ioctl_desc vmw_ioctls[] = {
144 VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl, 144 VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
145 DRM_AUTH | DRM_UNLOCKED), 145 DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
146 VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl, 146 VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl,
147 DRM_AUTH | DRM_UNLOCKED), 147 DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
148 VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl, 148 VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl,
149 DRM_AUTH | DRM_UNLOCKED), 149 DRM_UNLOCKED | DRM_RENDER_ALLOW),
150 VMW_IOCTL_DEF(VMW_CURSOR_BYPASS, 150 VMW_IOCTL_DEF(VMW_CURSOR_BYPASS,
151 vmw_kms_cursor_bypass_ioctl, 151 vmw_kms_cursor_bypass_ioctl,
152 DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED), 152 DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
@@ -159,29 +159,28 @@ static const struct drm_ioctl_desc vmw_ioctls[] = {
159 DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED), 159 DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
160 160
161 VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl, 161 VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
162 DRM_AUTH | DRM_UNLOCKED), 162 DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
163 VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl, 163 VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
164 DRM_AUTH | DRM_UNLOCKED), 164 DRM_UNLOCKED | DRM_RENDER_ALLOW),
165 VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl, 165 VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
166 DRM_AUTH | DRM_UNLOCKED), 166 DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
167 VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl, 167 VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
168 DRM_AUTH | DRM_UNLOCKED), 168 DRM_UNLOCKED | DRM_RENDER_ALLOW),
169 VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl, 169 VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
170 DRM_AUTH | DRM_UNLOCKED), 170 DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
171 VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl, 171 VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl,
172 DRM_AUTH | DRM_UNLOCKED), 172 DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
173 VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl, 173 VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
174 DRM_AUTH | DRM_UNLOCKED), 174 DRM_UNLOCKED | DRM_RENDER_ALLOW),
175 VMW_IOCTL_DEF(VMW_FENCE_SIGNALED, 175 VMW_IOCTL_DEF(VMW_FENCE_SIGNALED,
176 vmw_fence_obj_signaled_ioctl, 176 vmw_fence_obj_signaled_ioctl,
177 DRM_AUTH | DRM_UNLOCKED), 177 DRM_UNLOCKED | DRM_RENDER_ALLOW),
178 VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl, 178 VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl,
179 DRM_AUTH | DRM_UNLOCKED), 179 DRM_UNLOCKED | DRM_RENDER_ALLOW),
180 VMW_IOCTL_DEF(VMW_FENCE_EVENT, 180 VMW_IOCTL_DEF(VMW_FENCE_EVENT, vmw_fence_event_ioctl,
181 vmw_fence_event_ioctl, 181 DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
182 DRM_AUTH | DRM_UNLOCKED),
183 VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl, 182 VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl,
184 DRM_AUTH | DRM_UNLOCKED), 183 DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
185 184
186 /* these allow direct access to the framebuffers mark as master only */ 185 /* these allow direct access to the framebuffers mark as master only */
187 VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl, 186 VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl,
@@ -194,19 +193,19 @@ static const struct drm_ioctl_desc vmw_ioctls[] = {
194 DRM_MASTER | DRM_UNLOCKED), 193 DRM_MASTER | DRM_UNLOCKED),
195 VMW_IOCTL_DEF(VMW_CREATE_SHADER, 194 VMW_IOCTL_DEF(VMW_CREATE_SHADER,
196 vmw_shader_define_ioctl, 195 vmw_shader_define_ioctl,
197 DRM_AUTH | DRM_UNLOCKED), 196 DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
198 VMW_IOCTL_DEF(VMW_UNREF_SHADER, 197 VMW_IOCTL_DEF(VMW_UNREF_SHADER,
199 vmw_shader_destroy_ioctl, 198 vmw_shader_destroy_ioctl,
200 DRM_AUTH | DRM_UNLOCKED), 199 DRM_UNLOCKED | DRM_RENDER_ALLOW),
201 VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE, 200 VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE,
202 vmw_gb_surface_define_ioctl, 201 vmw_gb_surface_define_ioctl,
203 DRM_AUTH | DRM_UNLOCKED), 202 DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
204 VMW_IOCTL_DEF(VMW_GB_SURFACE_REF, 203 VMW_IOCTL_DEF(VMW_GB_SURFACE_REF,
205 vmw_gb_surface_reference_ioctl, 204 vmw_gb_surface_reference_ioctl,
206 DRM_AUTH | DRM_UNLOCKED), 205 DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
207 VMW_IOCTL_DEF(VMW_SYNCCPU, 206 VMW_IOCTL_DEF(VMW_SYNCCPU,
208 vmw_user_dmabuf_synccpu_ioctl, 207 vmw_user_dmabuf_synccpu_ioctl,
209 DRM_AUTH | DRM_UNLOCKED), 208 DRM_UNLOCKED | DRM_RENDER_ALLOW),
210}; 209};
211 210
212static struct pci_device_id vmw_pci_id_list[] = { 211static struct pci_device_id vmw_pci_id_list[] = {
@@ -606,6 +605,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
606 mutex_init(&dev_priv->release_mutex); 605 mutex_init(&dev_priv->release_mutex);
607 mutex_init(&dev_priv->binding_mutex); 606 mutex_init(&dev_priv->binding_mutex);
608 rwlock_init(&dev_priv->resource_lock); 607 rwlock_init(&dev_priv->resource_lock);
608 ttm_lock_init(&dev_priv->reservation_sem);
609 609
610 for (i = vmw_res_context; i < vmw_res_max; ++i) { 610 for (i = vmw_res_context; i < vmw_res_max; ++i) {
611 idr_init(&dev_priv->res_idr[i]); 611 idr_init(&dev_priv->res_idr[i]);
@@ -722,7 +722,9 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
722 722
723 ret = ttm_bo_device_init(&dev_priv->bdev, 723 ret = ttm_bo_device_init(&dev_priv->bdev,
724 dev_priv->bo_global_ref.ref.object, 724 dev_priv->bo_global_ref.ref.object,
725 &vmw_bo_driver, VMWGFX_FILE_PAGE_OFFSET, 725 &vmw_bo_driver,
726 dev->anon_inode->i_mapping,
727 VMWGFX_FILE_PAGE_OFFSET,
726 false); 728 false);
727 if (unlikely(ret != 0)) { 729 if (unlikely(ret != 0)) {
728 DRM_ERROR("Failed initializing TTM buffer object driver.\n"); 730 DRM_ERROR("Failed initializing TTM buffer object driver.\n");
@@ -969,7 +971,6 @@ static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
969 goto out_no_shman; 971 goto out_no_shman;
970 972
971 file_priv->driver_priv = vmw_fp; 973 file_priv->driver_priv = vmw_fp;
972 dev_priv->bdev.dev_mapping = dev->dev_mapping;
973 974
974 return 0; 975 return 0;
975 976
@@ -980,12 +981,70 @@ out_no_tfile:
980 return ret; 981 return ret;
981} 982}
982 983
983static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd, 984static struct vmw_master *vmw_master_check(struct drm_device *dev,
984 unsigned long arg) 985 struct drm_file *file_priv,
986 unsigned int flags)
987{
988 int ret;
989 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
990 struct vmw_master *vmaster;
991
992 if (file_priv->minor->type != DRM_MINOR_LEGACY ||
993 !(flags & DRM_AUTH))
994 return NULL;
995
996 ret = mutex_lock_interruptible(&dev->master_mutex);
997 if (unlikely(ret != 0))
998 return ERR_PTR(-ERESTARTSYS);
999
1000 if (file_priv->is_master) {
1001 mutex_unlock(&dev->master_mutex);
1002 return NULL;
1003 }
1004
1005 /*
1006 * Check if we were previously master, but now dropped.
1007 */
1008 if (vmw_fp->locked_master) {
1009 mutex_unlock(&dev->master_mutex);
1010 DRM_ERROR("Dropped master trying to access ioctl that "
1011 "requires authentication.\n");
1012 return ERR_PTR(-EACCES);
1013 }
1014 mutex_unlock(&dev->master_mutex);
1015
1016 /*
1017 * Taking the drm_global_mutex after the TTM lock might deadlock
1018 */
1019 if (!(flags & DRM_UNLOCKED)) {
1020 DRM_ERROR("Refusing locked ioctl access.\n");
1021 return ERR_PTR(-EDEADLK);
1022 }
1023
1024 /*
1025 * Take the TTM lock. Possibly sleep waiting for the authenticating
1026 * master to become master again, or for a SIGTERM if the
1027 * authenticating master exits.
1028 */
1029 vmaster = vmw_master(file_priv->master);
1030 ret = ttm_read_lock(&vmaster->lock, true);
1031 if (unlikely(ret != 0))
1032 vmaster = ERR_PTR(ret);
1033
1034 return vmaster;
1035}
1036
1037static long vmw_generic_ioctl(struct file *filp, unsigned int cmd,
1038 unsigned long arg,
1039 long (*ioctl_func)(struct file *, unsigned int,
1040 unsigned long))
985{ 1041{
986 struct drm_file *file_priv = filp->private_data; 1042 struct drm_file *file_priv = filp->private_data;
987 struct drm_device *dev = file_priv->minor->dev; 1043 struct drm_device *dev = file_priv->minor->dev;
988 unsigned int nr = DRM_IOCTL_NR(cmd); 1044 unsigned int nr = DRM_IOCTL_NR(cmd);
1045 struct vmw_master *vmaster;
1046 unsigned int flags;
1047 long ret;
989 1048
990 /* 1049 /*
991 * Do extra checking on driver private ioctls. 1050 * Do extra checking on driver private ioctls.
@@ -994,18 +1053,44 @@ static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
994 if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) 1053 if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
995 && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) { 1054 && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
996 const struct drm_ioctl_desc *ioctl = 1055 const struct drm_ioctl_desc *ioctl =
997 &vmw_ioctls[nr - DRM_COMMAND_BASE]; 1056 &vmw_ioctls[nr - DRM_COMMAND_BASE];
998 1057
999 if (unlikely(ioctl->cmd_drv != cmd)) { 1058 if (unlikely(ioctl->cmd_drv != cmd)) {
1000 DRM_ERROR("Invalid command format, ioctl %d\n", 1059 DRM_ERROR("Invalid command format, ioctl %d\n",
1001 nr - DRM_COMMAND_BASE); 1060 nr - DRM_COMMAND_BASE);
1002 return -EINVAL; 1061 return -EINVAL;
1003 } 1062 }
1063 flags = ioctl->flags;
1064 } else if (!drm_ioctl_flags(nr, &flags))
1065 return -EINVAL;
1066
1067 vmaster = vmw_master_check(dev, file_priv, flags);
1068 if (unlikely(IS_ERR(vmaster))) {
1069 DRM_INFO("IOCTL ERROR %d\n", nr);
1070 return PTR_ERR(vmaster);
1004 } 1071 }
1005 1072
1006 return drm_ioctl(filp, cmd, arg); 1073 ret = ioctl_func(filp, cmd, arg);
1074 if (vmaster)
1075 ttm_read_unlock(&vmaster->lock);
1076
1077 return ret;
1078}
1079
1080static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
1081 unsigned long arg)
1082{
1083 return vmw_generic_ioctl(filp, cmd, arg, &drm_ioctl);
1007} 1084}
1008 1085
1086#ifdef CONFIG_COMPAT
1087static long vmw_compat_ioctl(struct file *filp, unsigned int cmd,
1088 unsigned long arg)
1089{
1090 return vmw_generic_ioctl(filp, cmd, arg, &drm_compat_ioctl);
1091}
1092#endif
1093
1009static void vmw_lastclose(struct drm_device *dev) 1094static void vmw_lastclose(struct drm_device *dev)
1010{ 1095{
1011 struct drm_crtc *crtc; 1096 struct drm_crtc *crtc;
@@ -1174,12 +1259,11 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
1174{ 1259{
1175 struct vmw_private *dev_priv = 1260 struct vmw_private *dev_priv =
1176 container_of(nb, struct vmw_private, pm_nb); 1261 container_of(nb, struct vmw_private, pm_nb);
1177 struct vmw_master *vmaster = dev_priv->active_master;
1178 1262
1179 switch (val) { 1263 switch (val) {
1180 case PM_HIBERNATION_PREPARE: 1264 case PM_HIBERNATION_PREPARE:
1181 case PM_SUSPEND_PREPARE: 1265 case PM_SUSPEND_PREPARE:
1182 ttm_suspend_lock(&vmaster->lock); 1266 ttm_suspend_lock(&dev_priv->reservation_sem);
1183 1267
1184 /** 1268 /**
1185 * This empties VRAM and unbinds all GMR bindings. 1269 * This empties VRAM and unbinds all GMR bindings.
@@ -1193,7 +1277,7 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
1193 case PM_POST_HIBERNATION: 1277 case PM_POST_HIBERNATION:
1194 case PM_POST_SUSPEND: 1278 case PM_POST_SUSPEND:
1195 case PM_POST_RESTORE: 1279 case PM_POST_RESTORE:
1196 ttm_suspend_unlock(&vmaster->lock); 1280 ttm_suspend_unlock(&dev_priv->reservation_sem);
1197 1281
1198 break; 1282 break;
1199 case PM_RESTORE_PREPARE: 1283 case PM_RESTORE_PREPARE:
@@ -1314,14 +1398,14 @@ static const struct file_operations vmwgfx_driver_fops = {
1314 .poll = vmw_fops_poll, 1398 .poll = vmw_fops_poll,
1315 .read = vmw_fops_read, 1399 .read = vmw_fops_read,
1316#if defined(CONFIG_COMPAT) 1400#if defined(CONFIG_COMPAT)
1317 .compat_ioctl = drm_compat_ioctl, 1401 .compat_ioctl = vmw_compat_ioctl,
1318#endif 1402#endif
1319 .llseek = noop_llseek, 1403 .llseek = noop_llseek,
1320}; 1404};
1321 1405
1322static struct drm_driver driver = { 1406static struct drm_driver driver = {
1323 .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | 1407 .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
1324 DRIVER_MODESET | DRIVER_PRIME, 1408 DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER,
1325 .load = vmw_driver_load, 1409 .load = vmw_driver_load,
1326 .unload = vmw_driver_unload, 1410 .unload = vmw_driver_unload,
1327 .lastclose = vmw_lastclose, 1411 .lastclose = vmw_lastclose,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 07831554dad7..6b252a887ae2 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -40,9 +40,9 @@
40#include <drm/ttm/ttm_module.h> 40#include <drm/ttm/ttm_module.h>
41#include "vmwgfx_fence.h" 41#include "vmwgfx_fence.h"
42 42
43#define VMWGFX_DRIVER_DATE "20140228" 43#define VMWGFX_DRIVER_DATE "20140325"
44#define VMWGFX_DRIVER_MAJOR 2 44#define VMWGFX_DRIVER_MAJOR 2
45#define VMWGFX_DRIVER_MINOR 5 45#define VMWGFX_DRIVER_MINOR 6
46#define VMWGFX_DRIVER_PATCHLEVEL 0 46#define VMWGFX_DRIVER_PATCHLEVEL 0
47#define VMWGFX_FILE_PAGE_OFFSET 0x00100000 47#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
48#define VMWGFX_FIFO_STATIC_SIZE (1024*1024) 48#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
@@ -487,6 +487,11 @@ struct vmw_private {
487 uint32_t num_3d_resources; 487 uint32_t num_3d_resources;
488 488
489 /* 489 /*
490 * Replace this with an rwsem as soon as we have down_xx_interruptible()
491 */
492 struct ttm_lock reservation_sem;
493
494 /*
490 * Query processing. These members 495 * Query processing. These members
491 * are protected by the cmdbuf mutex. 496 * are protected by the cmdbuf mutex.
492 */ 497 */
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index efb575a7996c..931490b9cfed 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -2712,7 +2712,6 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
2712{ 2712{
2713 struct vmw_private *dev_priv = vmw_priv(dev); 2713 struct vmw_private *dev_priv = vmw_priv(dev);
2714 struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data; 2714 struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data;
2715 struct vmw_master *vmaster = vmw_master(file_priv->master);
2716 int ret; 2715 int ret;
2717 2716
2718 /* 2717 /*
@@ -2729,7 +2728,7 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
2729 return -EINVAL; 2728 return -EINVAL;
2730 } 2729 }
2731 2730
2732 ret = ttm_read_lock(&vmaster->lock, true); 2731 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
2733 if (unlikely(ret != 0)) 2732 if (unlikely(ret != 0))
2734 return ret; 2733 return ret;
2735 2734
@@ -2745,6 +2744,6 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
2745 vmw_kms_cursor_post_execbuf(dev_priv); 2744 vmw_kms_cursor_post_execbuf(dev_priv);
2746 2745
2747out_unlock: 2746out_unlock:
2748 ttm_read_unlock(&vmaster->lock); 2747 ttm_read_unlock(&dev_priv->reservation_sem);
2749 return ret; 2748 return ret;
2750} 2749}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index ed5ce2a41bbf..a89ad938eacf 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -147,7 +147,7 @@ static int vmw_fb_check_var(struct fb_var_screeninfo *var,
147 } 147 }
148 148
149 if (!vmw_kms_validate_mode_vram(vmw_priv, 149 if (!vmw_kms_validate_mode_vram(vmw_priv,
150 info->fix.line_length, 150 var->xres * var->bits_per_pixel/8,
151 var->yoffset + var->yres)) { 151 var->yoffset + var->yres)) {
152 DRM_ERROR("Requested geom can not fit in framebuffer\n"); 152 DRM_ERROR("Requested geom can not fit in framebuffer\n");
153 return -EINVAL; 153 return -EINVAL;
@@ -162,6 +162,8 @@ static int vmw_fb_set_par(struct fb_info *info)
162 struct vmw_private *vmw_priv = par->vmw_priv; 162 struct vmw_private *vmw_priv = par->vmw_priv;
163 int ret; 163 int ret;
164 164
165 info->fix.line_length = info->var.xres * info->var.bits_per_pixel/8;
166
165 ret = vmw_kms_write_svga(vmw_priv, info->var.xres, info->var.yres, 167 ret = vmw_kms_write_svga(vmw_priv, info->var.xres, info->var.yres,
166 info->fix.line_length, 168 info->fix.line_length,
167 par->bpp, par->depth); 169 par->bpp, par->depth);
@@ -177,6 +179,7 @@ static int vmw_fb_set_par(struct fb_info *info)
177 vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, info->var.yoffset); 179 vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, info->var.yoffset);
178 vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, info->var.xres); 180 vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, info->var.xres);
179 vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, info->var.yres); 181 vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, info->var.yres);
182 vmw_write(vmw_priv, SVGA_REG_BYTES_PER_LINE, info->fix.line_length);
180 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); 183 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
181 } 184 }
182 185
@@ -377,14 +380,13 @@ static int vmw_fb_create_bo(struct vmw_private *vmw_priv,
377 380
378 ne_placement.lpfn = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 381 ne_placement.lpfn = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
379 382
380 /* interuptable? */ 383 (void) ttm_write_lock(&vmw_priv->reservation_sem, false);
381 ret = ttm_write_lock(&vmw_priv->fbdev_master.lock, false);
382 if (unlikely(ret != 0))
383 return ret;
384 384
385 vmw_bo = kmalloc(sizeof(*vmw_bo), GFP_KERNEL); 385 vmw_bo = kmalloc(sizeof(*vmw_bo), GFP_KERNEL);
386 if (!vmw_bo) 386 if (!vmw_bo) {
387 ret = -ENOMEM;
387 goto err_unlock; 388 goto err_unlock;
389 }
388 390
389 ret = vmw_dmabuf_init(vmw_priv, vmw_bo, size, 391 ret = vmw_dmabuf_init(vmw_priv, vmw_bo, size,
390 &ne_placement, 392 &ne_placement,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index 47b70949bf3a..37881ecf5d7a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -226,7 +226,6 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
226 struct drm_vmw_present_arg *arg = 226 struct drm_vmw_present_arg *arg =
227 (struct drm_vmw_present_arg *)data; 227 (struct drm_vmw_present_arg *)data;
228 struct vmw_surface *surface; 228 struct vmw_surface *surface;
229 struct vmw_master *vmaster = vmw_master(file_priv->master);
230 struct drm_vmw_rect __user *clips_ptr; 229 struct drm_vmw_rect __user *clips_ptr;
231 struct drm_vmw_rect *clips = NULL; 230 struct drm_vmw_rect *clips = NULL;
232 struct drm_framebuffer *fb; 231 struct drm_framebuffer *fb;
@@ -271,7 +270,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
271 } 270 }
272 vfb = vmw_framebuffer_to_vfb(fb); 271 vfb = vmw_framebuffer_to_vfb(fb);
273 272
274 ret = ttm_read_lock(&vmaster->lock, true); 273 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
275 if (unlikely(ret != 0)) 274 if (unlikely(ret != 0))
276 goto out_no_ttm_lock; 275 goto out_no_ttm_lock;
277 276
@@ -291,7 +290,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
291 vmw_surface_unreference(&surface); 290 vmw_surface_unreference(&surface);
292 291
293out_no_surface: 292out_no_surface:
294 ttm_read_unlock(&vmaster->lock); 293 ttm_read_unlock(&dev_priv->reservation_sem);
295out_no_ttm_lock: 294out_no_ttm_lock:
296 drm_framebuffer_unreference(fb); 295 drm_framebuffer_unreference(fb);
297out_no_fb: 296out_no_fb:
@@ -311,7 +310,6 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
311 struct drm_vmw_fence_rep __user *user_fence_rep = 310 struct drm_vmw_fence_rep __user *user_fence_rep =
312 (struct drm_vmw_fence_rep __user *) 311 (struct drm_vmw_fence_rep __user *)
313 (unsigned long)arg->fence_rep; 312 (unsigned long)arg->fence_rep;
314 struct vmw_master *vmaster = vmw_master(file_priv->master);
315 struct drm_vmw_rect __user *clips_ptr; 313 struct drm_vmw_rect __user *clips_ptr;
316 struct drm_vmw_rect *clips = NULL; 314 struct drm_vmw_rect *clips = NULL;
317 struct drm_framebuffer *fb; 315 struct drm_framebuffer *fb;
@@ -361,7 +359,7 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
361 goto out_no_ttm_lock; 359 goto out_no_ttm_lock;
362 } 360 }
363 361
364 ret = ttm_read_lock(&vmaster->lock, true); 362 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
365 if (unlikely(ret != 0)) 363 if (unlikely(ret != 0))
366 goto out_no_ttm_lock; 364 goto out_no_ttm_lock;
367 365
@@ -369,7 +367,7 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
369 vfb, user_fence_rep, 367 vfb, user_fence_rep,
370 clips, num_clips); 368 clips, num_clips);
371 369
372 ttm_read_unlock(&vmaster->lock); 370 ttm_read_unlock(&dev_priv->reservation_sem);
373out_no_ttm_lock: 371out_no_ttm_lock:
374 drm_framebuffer_unreference(fb); 372 drm_framebuffer_unreference(fb);
375out_no_fb: 373out_no_fb:
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 8a650413dea5..a2dde5ad8138 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -468,7 +468,7 @@ static int do_surface_dirty_sou(struct vmw_private *dev_priv,
468 num_units = 0; 468 num_units = 0;
469 list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, 469 list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list,
470 head) { 470 head) {
471 if (crtc->fb != &framebuffer->base) 471 if (crtc->primary->fb != &framebuffer->base)
472 continue; 472 continue;
473 units[num_units++] = vmw_crtc_to_du(crtc); 473 units[num_units++] = vmw_crtc_to_du(crtc);
474 } 474 }
@@ -596,7 +596,6 @@ static int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
596 unsigned num_clips) 596 unsigned num_clips)
597{ 597{
598 struct vmw_private *dev_priv = vmw_priv(framebuffer->dev); 598 struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
599 struct vmw_master *vmaster = vmw_master(file_priv->master);
600 struct vmw_framebuffer_surface *vfbs = 599 struct vmw_framebuffer_surface *vfbs =
601 vmw_framebuffer_to_vfbs(framebuffer); 600 vmw_framebuffer_to_vfbs(framebuffer);
602 struct drm_clip_rect norect; 601 struct drm_clip_rect norect;
@@ -611,7 +610,7 @@ static int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
611 610
612 drm_modeset_lock_all(dev_priv->dev); 611 drm_modeset_lock_all(dev_priv->dev);
613 612
614 ret = ttm_read_lock(&vmaster->lock, true); 613 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
615 if (unlikely(ret != 0)) { 614 if (unlikely(ret != 0)) {
616 drm_modeset_unlock_all(dev_priv->dev); 615 drm_modeset_unlock_all(dev_priv->dev);
617 return ret; 616 return ret;
@@ -632,7 +631,7 @@ static int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
632 flags, color, 631 flags, color,
633 clips, num_clips, inc, NULL); 632 clips, num_clips, inc, NULL);
634 633
635 ttm_read_unlock(&vmaster->lock); 634 ttm_read_unlock(&dev_priv->reservation_sem);
636 635
637 drm_modeset_unlock_all(dev_priv->dev); 636 drm_modeset_unlock_all(dev_priv->dev);
638 637
@@ -883,7 +882,7 @@ static int do_dmabuf_dirty_sou(struct drm_file *file_priv,
883 882
884 num_units = 0; 883 num_units = 0;
885 list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) { 884 list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) {
886 if (crtc->fb != &framebuffer->base) 885 if (crtc->primary->fb != &framebuffer->base)
887 continue; 886 continue;
888 units[num_units++] = vmw_crtc_to_du(crtc); 887 units[num_units++] = vmw_crtc_to_du(crtc);
889 } 888 }
@@ -954,7 +953,6 @@ static int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
954 unsigned num_clips) 953 unsigned num_clips)
955{ 954{
956 struct vmw_private *dev_priv = vmw_priv(framebuffer->dev); 955 struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
957 struct vmw_master *vmaster = vmw_master(file_priv->master);
958 struct vmw_framebuffer_dmabuf *vfbd = 956 struct vmw_framebuffer_dmabuf *vfbd =
959 vmw_framebuffer_to_vfbd(framebuffer); 957 vmw_framebuffer_to_vfbd(framebuffer);
960 struct drm_clip_rect norect; 958 struct drm_clip_rect norect;
@@ -962,7 +960,7 @@ static int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
962 960
963 drm_modeset_lock_all(dev_priv->dev); 961 drm_modeset_lock_all(dev_priv->dev);
964 962
965 ret = ttm_read_lock(&vmaster->lock, true); 963 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
966 if (unlikely(ret != 0)) { 964 if (unlikely(ret != 0)) {
967 drm_modeset_unlock_all(dev_priv->dev); 965 drm_modeset_unlock_all(dev_priv->dev);
968 return ret; 966 return ret;
@@ -989,7 +987,7 @@ static int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
989 clips, num_clips, increment, NULL); 987 clips, num_clips, increment, NULL);
990 } 988 }
991 989
992 ttm_read_unlock(&vmaster->lock); 990 ttm_read_unlock(&dev_priv->reservation_sem);
993 991
994 drm_modeset_unlock_all(dev_priv->dev); 992 drm_modeset_unlock_all(dev_priv->dev);
995 993
@@ -1245,7 +1243,7 @@ int vmw_kms_present(struct vmw_private *dev_priv,
1245 1243
1246 num_units = 0; 1244 num_units = 0;
1247 list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) { 1245 list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) {
1248 if (crtc->fb != &vfb->base) 1246 if (crtc->primary->fb != &vfb->base)
1249 continue; 1247 continue;
1250 units[num_units++] = vmw_crtc_to_du(crtc); 1248 units[num_units++] = vmw_crtc_to_du(crtc);
1251 } 1249 }
@@ -1382,7 +1380,7 @@ int vmw_kms_readback(struct vmw_private *dev_priv,
1382 1380
1383 num_units = 0; 1381 num_units = 0;
1384 list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) { 1382 list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) {
1385 if (crtc->fb != &vfb->base) 1383 if (crtc->primary->fb != &vfb->base)
1386 continue; 1384 continue;
1387 units[num_units++] = vmw_crtc_to_du(crtc); 1385 units[num_units++] = vmw_crtc_to_du(crtc);
1388 } 1386 }
@@ -1725,7 +1723,7 @@ int vmw_du_page_flip(struct drm_crtc *crtc,
1725 uint32_t page_flip_flags) 1723 uint32_t page_flip_flags)
1726{ 1724{
1727 struct vmw_private *dev_priv = vmw_priv(crtc->dev); 1725 struct vmw_private *dev_priv = vmw_priv(crtc->dev);
1728 struct drm_framebuffer *old_fb = crtc->fb; 1726 struct drm_framebuffer *old_fb = crtc->primary->fb;
1729 struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(fb); 1727 struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(fb);
1730 struct drm_file *file_priv ; 1728 struct drm_file *file_priv ;
1731 struct vmw_fence_obj *fence = NULL; 1729 struct vmw_fence_obj *fence = NULL;
@@ -1743,7 +1741,7 @@ int vmw_du_page_flip(struct drm_crtc *crtc,
1743 if (!vmw_kms_screen_object_flippable(dev_priv, crtc)) 1741 if (!vmw_kms_screen_object_flippable(dev_priv, crtc))
1744 return -EINVAL; 1742 return -EINVAL;
1745 1743
1746 crtc->fb = fb; 1744 crtc->primary->fb = fb;
1747 1745
1748 /* do a full screen dirty update */ 1746 /* do a full screen dirty update */
1749 clips.x1 = clips.y1 = 0; 1747 clips.x1 = clips.y1 = 0;
@@ -1783,7 +1781,7 @@ int vmw_du_page_flip(struct drm_crtc *crtc,
1783 return ret; 1781 return ret;
1784 1782
1785out_no_fence: 1783out_no_fence:
1786 crtc->fb = old_fb; 1784 crtc->primary->fb = old_fb;
1787 return ret; 1785 return ret;
1788} 1786}
1789 1787
@@ -2022,7 +2020,6 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
2022 struct vmw_private *dev_priv = vmw_priv(dev); 2020 struct vmw_private *dev_priv = vmw_priv(dev);
2023 struct drm_vmw_update_layout_arg *arg = 2021 struct drm_vmw_update_layout_arg *arg =
2024 (struct drm_vmw_update_layout_arg *)data; 2022 (struct drm_vmw_update_layout_arg *)data;
2025 struct vmw_master *vmaster = vmw_master(file_priv->master);
2026 void __user *user_rects; 2023 void __user *user_rects;
2027 struct drm_vmw_rect *rects; 2024 struct drm_vmw_rect *rects;
2028 unsigned rects_size; 2025 unsigned rects_size;
@@ -2030,7 +2027,7 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
2030 int i; 2027 int i;
2031 struct drm_mode_config *mode_config = &dev->mode_config; 2028 struct drm_mode_config *mode_config = &dev->mode_config;
2032 2029
2033 ret = ttm_read_lock(&vmaster->lock, true); 2030 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
2034 if (unlikely(ret != 0)) 2031 if (unlikely(ret != 0))
2035 return ret; 2032 return ret;
2036 2033
@@ -2072,6 +2069,6 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
2072out_free: 2069out_free:
2073 kfree(rects); 2070 kfree(rects);
2074out_unlock: 2071out_unlock:
2075 ttm_read_unlock(&vmaster->lock); 2072 ttm_read_unlock(&dev_priv->reservation_sem);
2076 return ret; 2073 return ret;
2077} 2074}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index a055a26819c2..b2b9bd23aeee 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -93,7 +93,7 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
93 93
94 if (crtc == NULL) 94 if (crtc == NULL)
95 return 0; 95 return 0;
96 fb = entry->base.crtc.fb; 96 fb = entry->base.crtc.primary->fb;
97 97
98 return vmw_kms_write_svga(dev_priv, w, h, fb->pitches[0], 98 return vmw_kms_write_svga(dev_priv, w, h, fb->pitches[0],
99 fb->bits_per_pixel, fb->depth); 99 fb->bits_per_pixel, fb->depth);
@@ -101,7 +101,7 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
101 101
102 if (!list_empty(&lds->active)) { 102 if (!list_empty(&lds->active)) {
103 entry = list_entry(lds->active.next, typeof(*entry), active); 103 entry = list_entry(lds->active.next, typeof(*entry), active);
104 fb = entry->base.crtc.fb; 104 fb = entry->base.crtc.primary->fb;
105 105
106 vmw_kms_write_svga(dev_priv, fb->width, fb->height, fb->pitches[0], 106 vmw_kms_write_svga(dev_priv, fb->width, fb->height, fb->pitches[0],
107 fb->bits_per_pixel, fb->depth); 107 fb->bits_per_pixel, fb->depth);
@@ -259,7 +259,7 @@ static int vmw_ldu_crtc_set_config(struct drm_mode_set *set)
259 259
260 connector->encoder = NULL; 260 connector->encoder = NULL;
261 encoder->crtc = NULL; 261 encoder->crtc = NULL;
262 crtc->fb = NULL; 262 crtc->primary->fb = NULL;
263 crtc->enabled = false; 263 crtc->enabled = false;
264 264
265 vmw_ldu_del_active(dev_priv, ldu); 265 vmw_ldu_del_active(dev_priv, ldu);
@@ -280,7 +280,7 @@ static int vmw_ldu_crtc_set_config(struct drm_mode_set *set)
280 280
281 vmw_fb_off(dev_priv); 281 vmw_fb_off(dev_priv);
282 282
283 crtc->fb = fb; 283 crtc->primary->fb = fb;
284 encoder->crtc = crtc; 284 encoder->crtc = crtc;
285 connector->encoder = encoder; 285 connector->encoder = encoder;
286 crtc->x = set->x; 286 crtc->x = set->x;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 9757b57f8388..01d68f0a69dc 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -538,8 +538,13 @@ int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
538 return -EPERM; 538 return -EPERM;
539 539
540 vmw_user_bo = vmw_user_dma_buffer(bo); 540 vmw_user_bo = vmw_user_dma_buffer(bo);
541 return (vmw_user_bo->prime.base.tfile == tfile || 541
542 vmw_user_bo->prime.base.shareable) ? 0 : -EPERM; 542 /* Check that the caller has opened the object. */
543 if (likely(ttm_ref_object_exists(tfile, &vmw_user_bo->prime.base)))
544 return 0;
545
546 DRM_ERROR("Could not grant buffer access.\n");
547 return -EPERM;
543} 548}
544 549
545/** 550/**
@@ -676,10 +681,9 @@ int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
676 struct drm_vmw_dmabuf_rep *rep = &arg->rep; 681 struct drm_vmw_dmabuf_rep *rep = &arg->rep;
677 struct vmw_dma_buffer *dma_buf; 682 struct vmw_dma_buffer *dma_buf;
678 uint32_t handle; 683 uint32_t handle;
679 struct vmw_master *vmaster = vmw_master(file_priv->master);
680 int ret; 684 int ret;
681 685
682 ret = ttm_read_lock(&vmaster->lock, true); 686 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
683 if (unlikely(ret != 0)) 687 if (unlikely(ret != 0))
684 return ret; 688 return ret;
685 689
@@ -696,7 +700,7 @@ int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
696 vmw_dmabuf_unreference(&dma_buf); 700 vmw_dmabuf_unreference(&dma_buf);
697 701
698out_no_dmabuf: 702out_no_dmabuf:
699 ttm_read_unlock(&vmaster->lock); 703 ttm_read_unlock(&dev_priv->reservation_sem);
700 704
701 return ret; 705 return ret;
702} 706}
@@ -873,7 +877,6 @@ int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
873 struct vmw_resource *tmp; 877 struct vmw_resource *tmp;
874 struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data; 878 struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
875 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 879 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
876 struct vmw_master *vmaster = vmw_master(file_priv->master);
877 int ret; 880 int ret;
878 881
879 /* 882 /*
@@ -884,7 +887,7 @@ int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
884 if (unlikely(vmw_user_stream_size == 0)) 887 if (unlikely(vmw_user_stream_size == 0))
885 vmw_user_stream_size = ttm_round_pot(sizeof(*stream)) + 128; 888 vmw_user_stream_size = ttm_round_pot(sizeof(*stream)) + 128;
886 889
887 ret = ttm_read_lock(&vmaster->lock, true); 890 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
888 if (unlikely(ret != 0)) 891 if (unlikely(ret != 0))
889 return ret; 892 return ret;
890 893
@@ -932,7 +935,7 @@ int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
932out_err: 935out_err:
933 vmw_resource_unreference(&res); 936 vmw_resource_unreference(&res);
934out_unlock: 937out_unlock:
935 ttm_read_unlock(&vmaster->lock); 938 ttm_read_unlock(&dev_priv->reservation_sem);
936 return ret; 939 return ret;
937} 940}
938 941
@@ -985,14 +988,13 @@ int vmw_dumb_create(struct drm_file *file_priv,
985 struct drm_mode_create_dumb *args) 988 struct drm_mode_create_dumb *args)
986{ 989{
987 struct vmw_private *dev_priv = vmw_priv(dev); 990 struct vmw_private *dev_priv = vmw_priv(dev);
988 struct vmw_master *vmaster = vmw_master(file_priv->master);
989 struct vmw_dma_buffer *dma_buf; 991 struct vmw_dma_buffer *dma_buf;
990 int ret; 992 int ret;
991 993
992 args->pitch = args->width * ((args->bpp + 7) / 8); 994 args->pitch = args->width * ((args->bpp + 7) / 8);
993 args->size = args->pitch * args->height; 995 args->size = args->pitch * args->height;
994 996
995 ret = ttm_read_lock(&vmaster->lock, true); 997 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
996 if (unlikely(ret != 0)) 998 if (unlikely(ret != 0))
997 return ret; 999 return ret;
998 1000
@@ -1004,7 +1006,7 @@ int vmw_dumb_create(struct drm_file *file_priv,
1004 1006
1005 vmw_dmabuf_unreference(&dma_buf); 1007 vmw_dmabuf_unreference(&dma_buf);
1006out_no_dmabuf: 1008out_no_dmabuf:
1007 ttm_read_unlock(&vmaster->lock); 1009 ttm_read_unlock(&dev_priv->reservation_sem);
1008 return ret; 1010 return ret;
1009} 1011}
1010 1012
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index 22406c8651ea..a95d3a0cabe4 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -307,7 +307,7 @@ static int vmw_sou_crtc_set_config(struct drm_mode_set *set)
307 307
308 connector->encoder = NULL; 308 connector->encoder = NULL;
309 encoder->crtc = NULL; 309 encoder->crtc = NULL;
310 crtc->fb = NULL; 310 crtc->primary->fb = NULL;
311 crtc->x = 0; 311 crtc->x = 0;
312 crtc->y = 0; 312 crtc->y = 0;
313 crtc->enabled = false; 313 crtc->enabled = false;
@@ -368,7 +368,7 @@ static int vmw_sou_crtc_set_config(struct drm_mode_set *set)
368 368
369 connector->encoder = NULL; 369 connector->encoder = NULL;
370 encoder->crtc = NULL; 370 encoder->crtc = NULL;
371 crtc->fb = NULL; 371 crtc->primary->fb = NULL;
372 crtc->x = 0; 372 crtc->x = 0;
373 crtc->y = 0; 373 crtc->y = 0;
374 crtc->enabled = false; 374 crtc->enabled = false;
@@ -381,7 +381,7 @@ static int vmw_sou_crtc_set_config(struct drm_mode_set *set)
381 connector->encoder = encoder; 381 connector->encoder = encoder;
382 encoder->crtc = crtc; 382 encoder->crtc = crtc;
383 crtc->mode = *mode; 383 crtc->mode = *mode;
384 crtc->fb = fb; 384 crtc->primary->fb = fb;
385 crtc->x = set->x; 385 crtc->x = set->x;
386 crtc->y = set->y; 386 crtc->y = set->y;
387 crtc->enabled = true; 387 crtc->enabled = true;
@@ -572,5 +572,5 @@ void vmw_kms_screen_object_update_implicit_fb(struct vmw_private *dev_priv,
572 BUG_ON(!sou->base.is_implicit); 572 BUG_ON(!sou->base.is_implicit);
573 573
574 dev_priv->sou_priv->implicit_fb = 574 dev_priv->sou_priv->implicit_fb =
575 vmw_framebuffer_to_vfb(sou->base.crtc.fb); 575 vmw_framebuffer_to_vfb(sou->base.crtc.primary->fb);
576} 576}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
index ee3856578a12..c1559eeaffe9 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -449,7 +449,6 @@ int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
449 struct drm_vmw_shader_create_arg *arg = 449 struct drm_vmw_shader_create_arg *arg =
450 (struct drm_vmw_shader_create_arg *)data; 450 (struct drm_vmw_shader_create_arg *)data;
451 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 451 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
452 struct vmw_master *vmaster = vmw_master(file_priv->master);
453 struct vmw_dma_buffer *buffer = NULL; 452 struct vmw_dma_buffer *buffer = NULL;
454 SVGA3dShaderType shader_type; 453 SVGA3dShaderType shader_type;
455 int ret; 454 int ret;
@@ -487,14 +486,14 @@ int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
487 goto out_bad_arg; 486 goto out_bad_arg;
488 } 487 }
489 488
490 ret = ttm_read_lock(&vmaster->lock, true); 489 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
491 if (unlikely(ret != 0)) 490 if (unlikely(ret != 0))
492 goto out_bad_arg; 491 goto out_bad_arg;
493 492
494 ret = vmw_shader_alloc(dev_priv, buffer, arg->size, arg->offset, 493 ret = vmw_shader_alloc(dev_priv, buffer, arg->size, arg->offset,
495 shader_type, tfile, &arg->shader_handle); 494 shader_type, tfile, &arg->shader_handle);
496 495
497 ttm_read_unlock(&vmaster->lock); 496 ttm_read_unlock(&dev_priv->reservation_sem);
498out_bad_arg: 497out_bad_arg:
499 vmw_dmabuf_unreference(&buffer); 498 vmw_dmabuf_unreference(&buffer);
500 return ret; 499 return ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index e7af580ab977..4ecdbf3e59da 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -36,11 +36,13 @@
36 * @base: The TTM base object handling user-space visibility. 36 * @base: The TTM base object handling user-space visibility.
37 * @srf: The surface metadata. 37 * @srf: The surface metadata.
38 * @size: TTM accounting size for the surface. 38 * @size: TTM accounting size for the surface.
39 * @master: master of the creating client. Used for security check.
39 */ 40 */
40struct vmw_user_surface { 41struct vmw_user_surface {
41 struct ttm_prime_object prime; 42 struct ttm_prime_object prime;
42 struct vmw_surface srf; 43 struct vmw_surface srf;
43 uint32_t size; 44 uint32_t size;
45 struct drm_master *master;
44}; 46};
45 47
46/** 48/**
@@ -624,6 +626,8 @@ static void vmw_user_surface_free(struct vmw_resource *res)
624 struct vmw_private *dev_priv = srf->res.dev_priv; 626 struct vmw_private *dev_priv = srf->res.dev_priv;
625 uint32_t size = user_srf->size; 627 uint32_t size = user_srf->size;
626 628
629 if (user_srf->master)
630 drm_master_put(&user_srf->master);
627 kfree(srf->offsets); 631 kfree(srf->offsets);
628 kfree(srf->sizes); 632 kfree(srf->sizes);
629 kfree(srf->snooper.image); 633 kfree(srf->snooper.image);
@@ -697,7 +701,6 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
697 struct vmw_surface_offset *cur_offset; 701 struct vmw_surface_offset *cur_offset;
698 uint32_t num_sizes; 702 uint32_t num_sizes;
699 uint32_t size; 703 uint32_t size;
700 struct vmw_master *vmaster = vmw_master(file_priv->master);
701 const struct svga3d_surface_desc *desc; 704 const struct svga3d_surface_desc *desc;
702 705
703 if (unlikely(vmw_user_surface_size == 0)) 706 if (unlikely(vmw_user_surface_size == 0))
@@ -723,7 +726,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
723 return -EINVAL; 726 return -EINVAL;
724 } 727 }
725 728
726 ret = ttm_read_lock(&vmaster->lock, true); 729 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
727 if (unlikely(ret != 0)) 730 if (unlikely(ret != 0))
728 return ret; 731 return ret;
729 732
@@ -820,6 +823,8 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
820 823
821 user_srf->prime.base.shareable = false; 824 user_srf->prime.base.shareable = false;
822 user_srf->prime.base.tfile = NULL; 825 user_srf->prime.base.tfile = NULL;
826 if (drm_is_primary_client(file_priv))
827 user_srf->master = drm_master_get(file_priv->master);
823 828
824 /** 829 /**
825 * From this point, the generic resource management functions 830 * From this point, the generic resource management functions
@@ -862,7 +867,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
862 rep->sid = user_srf->prime.base.hash.key; 867 rep->sid = user_srf->prime.base.hash.key;
863 vmw_resource_unreference(&res); 868 vmw_resource_unreference(&res);
864 869
865 ttm_read_unlock(&vmaster->lock); 870 ttm_read_unlock(&dev_priv->reservation_sem);
866 return 0; 871 return 0;
867out_no_copy: 872out_no_copy:
868 kfree(srf->offsets); 873 kfree(srf->offsets);
@@ -873,7 +878,81 @@ out_no_sizes:
873out_no_user_srf: 878out_no_user_srf:
874 ttm_mem_global_free(vmw_mem_glob(dev_priv), size); 879 ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
875out_unlock: 880out_unlock:
876 ttm_read_unlock(&vmaster->lock); 881 ttm_read_unlock(&dev_priv->reservation_sem);
882 return ret;
883}
884
885
886static int
887vmw_surface_handle_reference(struct vmw_private *dev_priv,
888 struct drm_file *file_priv,
889 uint32_t u_handle,
890 enum drm_vmw_handle_type handle_type,
891 struct ttm_base_object **base_p)
892{
893 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
894 struct vmw_user_surface *user_srf;
895 uint32_t handle;
896 struct ttm_base_object *base;
897 int ret;
898
899 if (handle_type == DRM_VMW_HANDLE_PRIME) {
900 ret = ttm_prime_fd_to_handle(tfile, u_handle, &handle);
901 if (unlikely(ret != 0))
902 return ret;
903 } else {
904 if (unlikely(drm_is_render_client(file_priv))) {
905 DRM_ERROR("Render client refused legacy "
906 "surface reference.\n");
907 return -EACCES;
908 }
909 handle = u_handle;
910 }
911
912 ret = -EINVAL;
913 base = ttm_base_object_lookup_for_ref(dev_priv->tdev, handle);
914 if (unlikely(base == NULL)) {
915 DRM_ERROR("Could not find surface to reference.\n");
916 goto out_no_lookup;
917 }
918
919 if (unlikely(ttm_base_object_type(base) != VMW_RES_SURFACE)) {
920 DRM_ERROR("Referenced object is not a surface.\n");
921 goto out_bad_resource;
922 }
923
924 if (handle_type != DRM_VMW_HANDLE_PRIME) {
925 user_srf = container_of(base, struct vmw_user_surface,
926 prime.base);
927
928 /*
929 * Make sure the surface creator has the same
930 * authenticating master.
931 */
932 if (drm_is_primary_client(file_priv) &&
933 user_srf->master != file_priv->master) {
934 DRM_ERROR("Trying to reference surface outside of"
935 " master domain.\n");
936 ret = -EACCES;
937 goto out_bad_resource;
938 }
939
940 ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
941 if (unlikely(ret != 0)) {
942 DRM_ERROR("Could not add a reference to a surface.\n");
943 goto out_bad_resource;
944 }
945 }
946
947 *base_p = base;
948 return 0;
949
950out_bad_resource:
951 ttm_base_object_unref(&base);
952out_no_lookup:
953 if (handle_type == DRM_VMW_HANDLE_PRIME)
954 (void) ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE);
955
877 return ret; 956 return ret;
878} 957}
879 958
@@ -898,27 +977,16 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
898 struct vmw_user_surface *user_srf; 977 struct vmw_user_surface *user_srf;
899 struct drm_vmw_size __user *user_sizes; 978 struct drm_vmw_size __user *user_sizes;
900 struct ttm_base_object *base; 979 struct ttm_base_object *base;
901 int ret = -EINVAL; 980 int ret;
902
903 base = ttm_base_object_lookup_for_ref(dev_priv->tdev, req->sid);
904 if (unlikely(base == NULL)) {
905 DRM_ERROR("Could not find surface to reference.\n");
906 return -EINVAL;
907 }
908 981
909 if (unlikely(ttm_base_object_type(base) != VMW_RES_SURFACE)) 982 ret = vmw_surface_handle_reference(dev_priv, file_priv, req->sid,
910 goto out_bad_resource; 983 req->handle_type, &base);
984 if (unlikely(ret != 0))
985 return ret;
911 986
912 user_srf = container_of(base, struct vmw_user_surface, prime.base); 987 user_srf = container_of(base, struct vmw_user_surface, prime.base);
913 srf = &user_srf->srf; 988 srf = &user_srf->srf;
914 989
915 ret = ttm_ref_object_add(tfile, &user_srf->prime.base,
916 TTM_REF_USAGE, NULL);
917 if (unlikely(ret != 0)) {
918 DRM_ERROR("Could not add a reference to a surface.\n");
919 goto out_no_reference;
920 }
921
922 rep->flags = srf->flags; 990 rep->flags = srf->flags;
923 rep->format = srf->format; 991 rep->format = srf->format;
924 memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels)); 992 memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels));
@@ -931,10 +999,10 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
931 if (unlikely(ret != 0)) { 999 if (unlikely(ret != 0)) {
932 DRM_ERROR("copy_to_user failed %p %u\n", 1000 DRM_ERROR("copy_to_user failed %p %u\n",
933 user_sizes, srf->num_sizes); 1001 user_sizes, srf->num_sizes);
1002 ttm_ref_object_base_unref(tfile, base->hash.key, TTM_REF_USAGE);
934 ret = -EFAULT; 1003 ret = -EFAULT;
935 } 1004 }
936out_bad_resource: 1005
937out_no_reference:
938 ttm_base_object_unref(&base); 1006 ttm_base_object_unref(&base);
939 1007
940 return ret; 1008 return ret;
@@ -1173,7 +1241,6 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
1173 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 1241 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1174 int ret; 1242 int ret;
1175 uint32_t size; 1243 uint32_t size;
1176 struct vmw_master *vmaster = vmw_master(file_priv->master);
1177 const struct svga3d_surface_desc *desc; 1244 const struct svga3d_surface_desc *desc;
1178 uint32_t backup_handle; 1245 uint32_t backup_handle;
1179 1246
@@ -1189,7 +1256,7 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
1189 return -EINVAL; 1256 return -EINVAL;
1190 } 1257 }
1191 1258
1192 ret = ttm_read_lock(&vmaster->lock, true); 1259 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
1193 if (unlikely(ret != 0)) 1260 if (unlikely(ret != 0))
1194 return ret; 1261 return ret;
1195 1262
@@ -1228,6 +1295,8 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
1228 1295
1229 user_srf->prime.base.shareable = false; 1296 user_srf->prime.base.shareable = false;
1230 user_srf->prime.base.tfile = NULL; 1297 user_srf->prime.base.tfile = NULL;
1298 if (drm_is_primary_client(file_priv))
1299 user_srf->master = drm_master_get(file_priv->master);
1231 1300
1232 /** 1301 /**
1233 * From this point, the generic resource management functions 1302 * From this point, the generic resource management functions
@@ -1283,12 +1352,12 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
1283 1352
1284 vmw_resource_unreference(&res); 1353 vmw_resource_unreference(&res);
1285 1354
1286 ttm_read_unlock(&vmaster->lock); 1355 ttm_read_unlock(&dev_priv->reservation_sem);
1287 return 0; 1356 return 0;
1288out_no_user_srf: 1357out_no_user_srf:
1289 ttm_mem_global_free(vmw_mem_glob(dev_priv), size); 1358 ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
1290out_unlock: 1359out_unlock:
1291 ttm_read_unlock(&vmaster->lock); 1360 ttm_read_unlock(&dev_priv->reservation_sem);
1292 return ret; 1361 return ret;
1293} 1362}
1294 1363
@@ -1315,14 +1384,10 @@ int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
1315 uint32_t backup_handle; 1384 uint32_t backup_handle;
1316 int ret = -EINVAL; 1385 int ret = -EINVAL;
1317 1386
1318 base = ttm_base_object_lookup_for_ref(dev_priv->tdev, req->sid); 1387 ret = vmw_surface_handle_reference(dev_priv, file_priv, req->sid,
1319 if (unlikely(base == NULL)) { 1388 req->handle_type, &base);
1320 DRM_ERROR("Could not find surface to reference.\n"); 1389 if (unlikely(ret != 0))
1321 return -EINVAL; 1390 return ret;
1322 }
1323
1324 if (unlikely(ttm_base_object_type(base) != VMW_RES_SURFACE))
1325 goto out_bad_resource;
1326 1391
1327 user_srf = container_of(base, struct vmw_user_surface, prime.base); 1392 user_srf = container_of(base, struct vmw_user_surface, prime.base);
1328 srf = &user_srf->srf; 1393 srf = &user_srf->srf;
@@ -1331,13 +1396,6 @@ int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
1331 goto out_bad_resource; 1396 goto out_bad_resource;
1332 } 1397 }
1333 1398
1334 ret = ttm_ref_object_add(tfile, &user_srf->prime.base,
1335 TTM_REF_USAGE, NULL);
1336 if (unlikely(ret != 0)) {
1337 DRM_ERROR("Could not add a reference to a GB surface.\n");
1338 goto out_bad_resource;
1339 }
1340
1341 mutex_lock(&dev_priv->cmdbuf_mutex); /* Protect res->backup */ 1399 mutex_lock(&dev_priv->cmdbuf_mutex); /* Protect res->backup */
1342 ret = vmw_user_dmabuf_reference(tfile, srf->res.backup, 1400 ret = vmw_user_dmabuf_reference(tfile, srf->res.backup,
1343 &backup_handle); 1401 &backup_handle);
@@ -1346,8 +1404,7 @@ int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
1346 if (unlikely(ret != 0)) { 1404 if (unlikely(ret != 0)) {
1347 DRM_ERROR("Could not add a reference to a GB surface " 1405 DRM_ERROR("Could not add a reference to a GB surface "
1348 "backup buffer.\n"); 1406 "backup buffer.\n");
1349 (void) ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile, 1407 (void) ttm_ref_object_base_unref(tfile, base->hash.key,
1350 req->sid,
1351 TTM_REF_USAGE); 1408 TTM_REF_USAGE);
1352 goto out_bad_resource; 1409 goto out_bad_resource;
1353 } 1410 }
diff --git a/drivers/gpu/host1x/syncpt.c b/drivers/gpu/host1x/syncpt.c
index bfb09d802abd..b10550ee1d89 100644
--- a/drivers/gpu/host1x/syncpt.c
+++ b/drivers/gpu/host1x/syncpt.c
@@ -102,6 +102,7 @@ u32 host1x_syncpt_incr_max(struct host1x_syncpt *sp, u32 incrs)
102{ 102{
103 return (u32)atomic_add_return(incrs, &sp->max_val); 103 return (u32)atomic_add_return(incrs, &sp->max_val);
104} 104}
105EXPORT_SYMBOL(host1x_syncpt_incr_max);
105 106
106 /* 107 /*
107 * Write cached syncpoint and waitbase values to hardware. 108 * Write cached syncpoint and waitbase values to hardware.
diff --git a/drivers/staging/imx-drm/ipuv3-crtc.c b/drivers/staging/imx-drm/ipuv3-crtc.c
index a8d017854615..c48f640db006 100644
--- a/drivers/staging/imx-drm/ipuv3-crtc.c
+++ b/drivers/staging/imx-drm/ipuv3-crtc.c
@@ -121,7 +121,7 @@ static int ipu_page_flip(struct drm_crtc *crtc,
121 121
122 ipu_crtc->newfb = fb; 122 ipu_crtc->newfb = fb;
123 ipu_crtc->page_flip_event = event; 123 ipu_crtc->page_flip_event = event;
124 crtc->fb = fb; 124 crtc->primary->fb = fb;
125 125
126 return 0; 126 return 0;
127} 127}
@@ -193,7 +193,7 @@ static int ipu_crtc_mode_set(struct drm_crtc *crtc,
193 return ret; 193 return ret;
194 } 194 }
195 195
196 return ipu_plane_mode_set(ipu_crtc->plane[0], crtc, mode, crtc->fb, 196 return ipu_plane_mode_set(ipu_crtc->plane[0], crtc, mode, crtc->primary->fb,
197 0, 0, mode->hdisplay, mode->vdisplay, 197 0, 0, mode->hdisplay, mode->vdisplay,
198 x, y, mode->hdisplay, mode->vdisplay); 198 x, y, mode->hdisplay, mode->vdisplay);
199} 199}
@@ -219,7 +219,7 @@ static irqreturn_t ipu_irq_handler(int irq, void *dev_id)
219 219
220 if (ipu_crtc->newfb) { 220 if (ipu_crtc->newfb) {
221 ipu_crtc->newfb = NULL; 221 ipu_crtc->newfb = NULL;
222 ipu_plane_set_base(ipu_crtc->plane[0], ipu_crtc->base.fb, 222 ipu_plane_set_base(ipu_crtc->plane[0], ipu_crtc->base.primary->fb,
223 ipu_crtc->plane[0]->x, ipu_crtc->plane[0]->y); 223 ipu_crtc->plane[0]->x, ipu_crtc->plane[0]->y);
224 ipu_crtc_handle_pageflip(ipu_crtc); 224 ipu_crtc_handle_pageflip(ipu_crtc);
225 } 225 }
diff --git a/drivers/staging/imx-drm/ipuv3-plane.c b/drivers/staging/imx-drm/ipuv3-plane.c
index b0c9b6ce4854..27a8d735dae0 100644
--- a/drivers/staging/imx-drm/ipuv3-plane.c
+++ b/drivers/staging/imx-drm/ipuv3-plane.c
@@ -68,7 +68,7 @@ int ipu_plane_set_base(struct ipu_plane *ipu_plane, struct drm_framebuffer *fb,
68 68
69 cma_obj = drm_fb_cma_get_gem_obj(fb, 0); 69 cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
70 if (!cma_obj) { 70 if (!cma_obj) {
71 DRM_LOG_KMS("entry is null.\n"); 71 DRM_DEBUG_KMS("entry is null.\n");
72 return -EFAULT; 72 return -EFAULT;
73 } 73 }
74 74
diff --git a/drivers/video/exynos/Kconfig b/drivers/video/exynos/Kconfig
index eb6f2b059821..fcf2d48ac6d1 100644
--- a/drivers/video/exynos/Kconfig
+++ b/drivers/video/exynos/Kconfig
@@ -29,11 +29,4 @@ config EXYNOS_LCD_S6E8AX0
29 If you have an S6E8AX0 MIPI AMOLED LCD Panel, say Y to enable its 29 If you have an S6E8AX0 MIPI AMOLED LCD Panel, say Y to enable its
30 LCD control driver. 30 LCD control driver.
31 31
32config EXYNOS_DP
33 bool "EXYNOS DP driver support"
34 depends on ARCH_EXYNOS
35 default n
36 help
37 This enables support for DP device.
38
39endif # EXYNOS_VIDEO 32endif # EXYNOS_VIDEO
diff --git a/drivers/video/exynos/Makefile b/drivers/video/exynos/Makefile
index ec7772e452a9..b5b1bd228abb 100644
--- a/drivers/video/exynos/Makefile
+++ b/drivers/video/exynos/Makefile
@@ -5,4 +5,3 @@
5obj-$(CONFIG_EXYNOS_MIPI_DSI) += exynos_mipi_dsi.o exynos_mipi_dsi_common.o \ 5obj-$(CONFIG_EXYNOS_MIPI_DSI) += exynos_mipi_dsi.o exynos_mipi_dsi_common.o \
6 exynos_mipi_dsi_lowlevel.o 6 exynos_mipi_dsi_lowlevel.o
7obj-$(CONFIG_EXYNOS_LCD_S6E8AX0) += s6e8ax0.o 7obj-$(CONFIG_EXYNOS_LCD_S6E8AX0) += s6e8ax0.o
8obj-$(CONFIG_EXYNOS_DP) += exynos_dp_core.o exynos_dp_reg.o