aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-06-12 14:32:30 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-06-12 14:32:30 -0400
commit682b7c1c8ea8885aa681ddf530d6cf2ad4f2dc15 (patch)
tree882003bb4fc56af816246168f8c85d6dde8c6ed9
parent16b9057804c02e2d351e9c8f606e909b43cbd9e7 (diff)
parentbc1dfff04a5d4064ba0db1fab13f84ab4f333d2b (diff)
Merge branch 'drm-next' of git://people.freedesktop.org/~airlied/linux
Pull drm updates from Dave Airlie: "This is the main drm merge window pull request, changes all over the place, mostly normal levels of churn. Highlights: Core drm: More cleanups, fix race on connector/encoder naming, docs updates, object locking rework in prep for atomic modeset i915: mipi DSI support, valleyview power fixes, cursor size fixes, execlist refactoring, vblank improvements, userptr support, OOM handling improvements radeon: GPUVM tuning and large page size support, gart fixes, deep color HDMI support, HDMI audio cleanups nouveau: - displayport rework should fix lots of issues - initial gk20a support - gk110b support - gk208 fixes exynos: probe order fixes, HDMI changes, IPP consolidation msm: debugfs updates, misc fixes ast: ast2400 support, sync with UMS driver tegra: cleanups, hdmi + hw cursor for Tegra 124. panel: fixes existing panels add some new ones. ipuv3: moved from staging to drivers/gpu" * 'drm-next' of git://people.freedesktop.org/~airlied/linux: (761 commits) drm/nouveau/disp/dp: fix tmds passthrough on dp connector drm/nouveau/dp: probe dpcd to determine connectedness drm/nv50-: trigger update after all connectors disabled drm/nv50-: prepare for attaching a SOR to multiple heads drm/gf119-/disp: fix debug output on update failure drm/nouveau/disp/dp: make use of postcursor when its available drm/g94-/disp/dp: take max pullup value across all lanes drm/nouveau/bios/dp: parse lane postcursor data drm/nouveau/dp: fix support for dpms drm/nouveau: register a drm_dp_aux channel for each dp connector drm/g94-/disp: add method to power-off dp lanes drm/nouveau/disp/dp: maintain link in response to hpd signal drm/g94-/disp: bash and wait for something after changing lane power regs drm/nouveau/disp/dp: split link config/power into two steps drm/nv50/disp: train PIOR-attached DP from second supervisor drm/nouveau/disp/dp: make use of existing output data for link training drm/gf119/disp: start removing direct vbios parsing from supervisor drm/nv50/disp: start removing direct vbios parsing from supervisor drm/nouveau/disp/dp: maintain receiver caps in response to hpd signal drm/nouveau/disp/dp: create subclass for dp outputs ...
-rw-r--r--Documentation/DocBook/drm.tmpl1027
-rw-r--r--Documentation/EDID/1024x768.S2
-rw-r--r--Documentation/EDID/1280x1024.S2
-rw-r--r--Documentation/EDID/1600x1200.S2
-rw-r--r--Documentation/EDID/1680x1050.S2
-rw-r--r--Documentation/EDID/1920x1080.S2
-rw-r--r--Documentation/EDID/800x600.S41
-rw-r--r--Documentation/EDID/HOWTO.txt2
-rw-r--r--Documentation/EDID/edid.S17
-rw-r--r--Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt2
-rw-r--r--Documentation/devicetree/bindings/panel/auo,b133xtn01.txt7
-rw-r--r--Documentation/devicetree/bindings/panel/edt,et057090dhu.txt7
-rw-r--r--Documentation/devicetree/bindings/panel/edt,et070080dh6.txt10
-rw-r--r--Documentation/devicetree/bindings/panel/edt,etm0700g0dh6.txt10
-rw-r--r--Documentation/devicetree/bindings/video/exynos_dp.txt4
-rw-r--r--Documentation/devicetree/bindings/video/exynos_hdmi.txt3
-rw-r--r--MAINTAINERS1
-rw-r--r--arch/x86/kernel/early-quirks.c46
-rw-r--r--drivers/gpu/Makefile1
-rw-r--r--drivers/gpu/drm/Kconfig4
-rw-r--r--drivers/gpu/drm/Makefile5
-rw-r--r--drivers/gpu/drm/armada/armada_drv.c4
-rw-r--r--drivers/gpu/drm/armada/armada_fbdev.c4
-rw-r--r--drivers/gpu/drm/armada/armada_gem.c4
-rw-r--r--drivers/gpu/drm/ast/Makefile4
-rw-r--r--drivers/gpu/drm/ast/ast_dp501.c410
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c3
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h24
-rw-r--r--drivers/gpu/drm/ast/ast_main.c97
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c100
-rw-r--r--drivers/gpu/drm/ast/ast_post.c902
-rw-r--r--drivers/gpu/drm/ast/ast_tables.h67
-rw-r--r--drivers/gpu/drm/bochs/bochs_mm.c6
-rw-r--r--drivers/gpu/drm/bridge/ptn3460.c7
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_main.c6
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_mode.c8
-rw-r--r--drivers/gpu/drm/drm_bufs.c34
-rw-r--r--drivers/gpu/drm/drm_cache.c6
-rw-r--r--drivers/gpu/drm/drm_crtc.c421
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c27
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c25
-rw-r--r--drivers/gpu/drm/drm_edid.c293
-rw-r--r--drivers/gpu/drm/drm_edid_load.c23
-rw-r--r--drivers/gpu/drm/drm_fb_cma_helper.c9
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c77
-rw-r--r--drivers/gpu/drm/drm_fops.c9
-rw-r--r--drivers/gpu/drm/drm_gem.c19
-rw-r--r--drivers/gpu/drm/drm_info.c6
-rw-r--r--drivers/gpu/drm/drm_ioctl.c37
-rw-r--r--drivers/gpu/drm/drm_irq.c461
-rw-r--r--drivers/gpu/drm/drm_mipi_dsi.c10
-rw-r--r--drivers/gpu/drm/drm_modes.c9
-rw-r--r--drivers/gpu/drm/drm_modeset_lock.c247
-rw-r--r--drivers/gpu/drm/drm_pci.c159
-rw-r--r--drivers/gpu/drm/drm_plane_helper.c154
-rw-r--r--drivers/gpu/drm/drm_platform.c40
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c76
-rw-r--r--drivers/gpu/drm/drm_stub.c61
-rw-r--r--drivers/gpu/drm/drm_sysfs.c6
-rw-r--r--drivers/gpu/drm/drm_usb.c34
-rw-r--r--drivers/gpu/drm/exynos/Kconfig8
-rw-r--r--drivers/gpu/drm/exynos/exynos_ddc.c63
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp_core.c211
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp_core.h60
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp_reg.c46
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_core.c216
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c17
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.h4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dpi.c74
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c446
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h87
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c114
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c14
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c427
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c211
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c22
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c10
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.c258
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.h12
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.c11
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c108
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c652
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.h23
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmiphy.c65
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c67
-rw-r--r--drivers/gpu/drm/exynos/regs-hdmi.h16
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c2
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c8
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c6
-rw-r--r--drivers/gpu/drm/i810/i810_dma.c2
-rw-r--r--drivers/gpu/drm/i915/Kconfig3
-rw-r--r--drivers/gpu/drm/i915/Makefile8
-rw-r--r--drivers/gpu/drm/i915/dvo_ch7xxx.c2
-rw-r--r--drivers/gpu/drm/i915/dvo_ivch.c2
-rw-r--r--drivers/gpu/drm/i915/dvo_ns2501.c24
-rw-r--r--drivers/gpu/drm/i915/dvo_sil164.c2
-rw-r--r--drivers/gpu/drm/i915/dvo_tfp410.c2
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c758
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c269
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c74
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c626
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h479
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c559
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c95
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c14
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c136
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c212
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h284
-rw-r--r--drivers/gpu/drm/i915/i915_gem_render_state.c198
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c711
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c55
-rw-r--r--drivers/gpu/drm/i915/i915_ioc32.c2
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c1146
-rw-r--r--drivers/gpu/drm/i915/i915_params.c8
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h780
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c2
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c6
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h101
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c331
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h64
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c83
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c97
-rw-r--r--drivers/gpu/drm/i915/intel_display.c1854
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c581
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h96
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c190
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.h19
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_cmd.c4
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_cmd.h5
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_panel_vbt.c589
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c6
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c40
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c260
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c23
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c4
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c16
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c168
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c770
-rw-r--r--drivers/gpu/drm/i915/intel_renderstate.h48
-rw-r--r--drivers/gpu/drm/i915/intel_renderstate_gen6.c289
-rw-r--r--drivers/gpu/drm/i915/intel_renderstate_gen7.c253
-rw-r--r--drivers/gpu/drm/i915/intel_renderstate_gen8.c479
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c820
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h168
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c45
-rw-r--r--drivers/gpu/drm/i915/intel_sideband.c59
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c239
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c221
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c108
-rw-r--r--drivers/gpu/drm/mga/mga_ioc32.c2
-rw-r--r--drivers/gpu/drm/mga/mga_state.c4
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_main.c6
-rw-r--r--drivers/gpu/drm/msm/Kconfig2
-rw-r--r--drivers/gpu/drm/msm/Makefile2
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c20
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_connector.c52
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c2
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c2
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c22
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c5
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c56
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h17
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h1
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c1
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c107
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h31
-rw-r--r--drivers/gpu/drm/msm/msm_perf.c275
-rw-r--r--drivers/gpu/drm/msm/msm_rd.c337
-rw-r--r--drivers/gpu/drm/nouveau/Makefile14
-rw-r--r--drivers/gpu/drm/nouveau/core/core/event.c83
-rw-r--r--drivers/gpu/drm/nouveau/core/core/object.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/gm100.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv04.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv10.c32
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv20.c16
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv30.c20
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv40.c64
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv50.c56
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nvc0.c36
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nve0.c74
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/base.c124
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/conn.c172
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/conn.h59
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/dport.c292
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/dport.h45
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/gm107.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv04.c16
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv50.c286
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv50.h14
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv84.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv94.c11
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nva0.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nva3.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c206
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nve0.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/outp.c137
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/outp.h59
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/outpdp.c276
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/outpdp.h65
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/piornv50.c122
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/priv.h32
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c28
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c85
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c72
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/base.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/gk20a.c35
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nve0.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxgk20a.c53
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnv108.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.h9
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnve4.c14
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnvf0.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/gk20a.c47
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv50.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c14
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nve4.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nv50.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nv50.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nvc0.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/class.h4
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/event.h29
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/disp.h30
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/fifo.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/graph.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/conn.h22
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/dp.h9
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/clock.h8
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/fb.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/gpio.h34
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/i2c.h83
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/ibus.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bar/base.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c114
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/base.c9
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/conn.c62
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/dp.c23
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/init.c9
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/base.c7
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nv40.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nv50.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nvaa.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nve0.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/gk20a.c56
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramgk20a.c152
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnva3.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/gpio/base.c130
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/gpio/nv10.c115
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c152
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/gpio/nv92.c74
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/gpio/nvd0.c56
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/gpio/nve0.c137
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/gpio/priv.h62
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/anx9805.c27
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/aux.c36
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/base.c212
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/bit.c8
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/nv04.c33
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/nv4e.c33
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.c30
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/nv94.c108
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/nvd0.c38
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/nve0.c72
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/pad.c84
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/pad.h58
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/padnv04.c35
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/padnv94.c86
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/port.h15
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/priv.h85
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/ibus/gk20a.c103
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c5
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c5
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c5
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mxm/nv50.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/ic.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/nva3.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/dac.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/dfp.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv04.c3
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv17.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c212
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.h10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_crtc.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c18
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c14
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_encoder.h5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ioc32.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vga.c11
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c107
-rw-r--r--drivers/gpu/drm/omapdrm/omap_crtc.c10
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c4
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fb.c1
-rw-r--r--drivers/gpu/drm/panel/panel-ld9040.c3
-rw-r--r--drivers/gpu/drm/panel/panel-s6e8aa0.c1
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c99
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c6
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.c1
-rw-r--r--drivers/gpu/drm/qxl/qxl_ioctl.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_irq.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c6
-rw-r--r--drivers/gpu/drm/r128/r128_ioc32.c2
-rw-r--r--drivers/gpu/drm/r128/r128_state.c4
-rw-r--r--drivers/gpu/drm/radeon/Makefile2
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c92
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c40
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c5
-rw-r--r--drivers/gpu/drm/radeon/atombios_i2c.c17
-rw-r--r--drivers/gpu/drm/radeon/cik.c33
-rw-r--r--drivers/gpu/drm/radeon/cik_sdma.c43
-rw-r--r--drivers/gpu/drm/radeon/cikd.h1
-rw-r--r--drivers/gpu/drm/radeon/clearstate_cayman.h8
-rw-r--r--drivers/gpu/drm/radeon/clearstate_ci.h4
-rw-r--r--drivers/gpu/drm/radeon/clearstate_si.h4
-rw-r--r--drivers/gpu/drm/radeon/dce3_1_afmt.c244
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c72
-rw-r--r--drivers/gpu/drm/radeon/evergreen_hdmi.c48
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h3
-rw-r--r--drivers/gpu/drm/radeon/ni.c17
-rw-r--r--drivers/gpu/drm/radeon/nid.h1
-rw-r--r--drivers/gpu/drm/radeon/r100.c63
-rw-r--r--drivers/gpu/drm/radeon/r300.c7
-rw-r--r--drivers/gpu/drm/radeon/r600.c15
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c341
-rw-r--r--drivers/gpu/drm/radeon/r600d.h17
-rw-r--r--drivers/gpu/drm/radeon/radeon.h55
-rw-r--r--drivers/gpu/drm/radeon/radeon_agp.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c72
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h40
-rw-r--r--drivers/gpu/drm/radeon/radeon_bios.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c78
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c55
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c282
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c22
-rw-r--r--drivers/gpu/drm/radeon/radeon_i2c.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_ioc32.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c18
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h7
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_state.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_vce.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c123
-rw-r--r--drivers/gpu/drm/radeon/rs400.c7
-rw-r--r--drivers/gpu/drm/radeon/rs600.c44
-rw-r--r--drivers/gpu/drm/radeon/rv770.c13
-rw-r--r--drivers/gpu/drm/radeon/si.c33
-rw-r--r--drivers/gpu/drm/radeon/si_dma.c24
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c13
-rw-r--r--drivers/gpu/drm/radeon/sid.h1
-rw-r--r--drivers/gpu/drm/radeon/uvd_v2_2.c2
-rw-r--r--drivers/gpu/drm/rcar-du/Kconfig2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c7
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_vgacon.c7
-rw-r--r--drivers/gpu/drm/savage/savage_bci.c2
-rw-r--r--drivers/gpu/drm/shmobile/Kconfig3
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_crtc.c7
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_drv.c2
-rw-r--r--drivers/gpu/drm/sis/sis_mm.c2
-rw-r--r--drivers/gpu/drm/tegra/Makefile1
-rw-r--r--drivers/gpu/drm/tegra/bus.c75
-rw-r--r--drivers/gpu/drm/tegra/dc.c657
-rw-r--r--drivers/gpu/drm/tegra/dc.h33
-rw-r--r--drivers/gpu/drm/tegra/dpaux.c35
-rw-r--r--drivers/gpu/drm/tegra/drm.c36
-rw-r--r--drivers/gpu/drm/tegra/drm.h58
-rw-r--r--drivers/gpu/drm/tegra/dsi.c250
-rw-r--r--drivers/gpu/drm/tegra/dsi.h10
-rw-r--r--drivers/gpu/drm/tegra/fb.c7
-rw-r--r--drivers/gpu/drm/tegra/gem.c3
-rw-r--r--drivers/gpu/drm/tegra/gr2d.c8
-rw-r--r--drivers/gpu/drm/tegra/gr3d.c8
-rw-r--r--drivers/gpu/drm/tegra/hdmi.c202
-rw-r--r--drivers/gpu/drm/tegra/hdmi.h5
-rw-r--r--drivers/gpu/drm/tegra/rgb.c31
-rw-r--r--drivers/gpu/drm/tegra/sor.c478
-rw-r--r--drivers/gpu/drm/tegra/sor.h4
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.c2
-rw-r--r--drivers/gpu/drm/udl/udl_main.c1
-rw-r--r--drivers/gpu/drm/via/via_dma.c2
-rw-r--r--drivers/gpu/drm/via/via_mm.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/Kconfig7
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c10
-rw-r--r--drivers/gpu/host1x/bus.c12
-rw-r--r--drivers/gpu/ipu-v3/Kconfig7
-rw-r--r--drivers/gpu/ipu-v3/Makefile (renamed from drivers/staging/imx-drm/ipu-v3/Makefile)4
-rw-r--r--drivers/gpu/ipu-v3/ipu-common.c (renamed from drivers/staging/imx-drm/ipu-v3/ipu-common.c)82
-rw-r--r--drivers/gpu/ipu-v3/ipu-dc.c (renamed from drivers/staging/imx-drm/ipu-v3/ipu-dc.c)3
-rw-r--r--drivers/gpu/ipu-v3/ipu-di.c (renamed from drivers/staging/imx-drm/ipu-v3/ipu-di.c)2
-rw-r--r--drivers/gpu/ipu-v3/ipu-dmfc.c (renamed from drivers/staging/imx-drm/ipu-v3/ipu-dmfc.c)2
-rw-r--r--drivers/gpu/ipu-v3/ipu-dp.c (renamed from drivers/staging/imx-drm/ipu-v3/ipu-dp.c)2
-rw-r--r--drivers/gpu/ipu-v3/ipu-prv.h (renamed from drivers/staging/imx-drm/ipu-v3/ipu-prv.h)8
-rw-r--r--drivers/gpu/ipu-v3/ipu-smfc.c97
-rw-r--r--drivers/gpu/vga/vga_switcheroo.c3
-rw-r--r--drivers/staging/imx-drm/Kconfig11
-rw-r--r--drivers/staging/imx-drm/Makefile1
-rw-r--r--drivers/staging/imx-drm/imx-drm-core.c9
-rw-r--r--drivers/staging/imx-drm/imx-drm.h2
-rw-r--r--drivers/staging/imx-drm/imx-hdmi.c3
-rw-r--r--drivers/staging/imx-drm/imx-ldb.c1
-rw-r--r--drivers/staging/imx-drm/imx-tve.c7
-rw-r--r--drivers/staging/imx-drm/ipuv3-crtc.c2
-rw-r--r--drivers/staging/imx-drm/ipuv3-plane.c2
-rw-r--r--drivers/staging/imx-drm/parallel-display.c1
-rw-r--r--drivers/video/Kconfig1
-rw-r--r--include/drm/drmP.h54
-rw-r--r--include/drm/drm_crtc.h92
-rw-r--r--include/drm/drm_crtc_helper.h6
-rw-r--r--include/drm/drm_dp_helper.h88
-rw-r--r--include/drm/drm_edid.h5
-rw-r--r--include/drm/drm_fb_helper.h2
-rw-r--r--include/drm/drm_flip_work.h1
-rw-r--r--include/drm/drm_mipi_dsi.h2
-rw-r--r--include/drm/drm_modes.h2
-rw-r--r--include/drm/drm_modeset_lock.h126
-rw-r--r--include/drm/drm_plane_helper.h24
-rw-r--r--include/drm/i915_pciids.h28
-rw-r--r--include/drm/ttm/ttm_bo_api.h5
-rw-r--r--include/uapi/drm/drm_mode.h16
-rw-r--r--include/uapi/drm/i915_drm.h17
-rw-r--r--include/uapi/drm/radeon_drm.h2
-rw-r--r--include/video/imx-ipu-v3.h (renamed from drivers/staging/imx-drm/ipu-v3/imx-ipu-v3.h)16
-rw-r--r--lib/Kconfig14
-rw-r--r--lib/Kconfig.debug1
-rw-r--r--lib/Makefile3
-rw-r--r--lib/interval_tree.c6
-rw-r--r--lib/interval_tree_test.c (renamed from lib/interval_tree_test_main.c)0
447 files changed, 24787 insertions, 9542 deletions
diff --git a/Documentation/DocBook/drm.tmpl b/Documentation/DocBook/drm.tmpl
index ba60d93c1855..7df3134ebc0e 100644
--- a/Documentation/DocBook/drm.tmpl
+++ b/Documentation/DocBook/drm.tmpl
@@ -142,6 +142,12 @@
142 to register it with the DRM subsystem. 142 to register it with the DRM subsystem.
143 </para> 143 </para>
144 <para> 144 <para>
145 Newer drivers that no longer require a <structname>drm_bus</structname>
146 structure can alternatively use the low-level device initialization and
147 registration functions such as <function>drm_dev_alloc()</function> and
148 <function>drm_dev_register()</function> directly.
149 </para>
150 <para>
145 The <structname>drm_driver</structname> structure contains static 151 The <structname>drm_driver</structname> structure contains static
146 information that describes the driver and features it supports, and 152 information that describes the driver and features it supports, and
147 pointers to methods that the DRM core will call to implement the DRM API. 153 pointers to methods that the DRM core will call to implement the DRM API.
@@ -282,6 +288,36 @@ char *date;</synopsis>
282 </sect3> 288 </sect3>
283 </sect2> 289 </sect2>
284 <sect2> 290 <sect2>
291 <title>Device Registration</title>
292 <para>
293 A number of functions are provided to help with device registration.
294 The functions deal with PCI, USB and platform devices, respectively.
295 </para>
296!Edrivers/gpu/drm/drm_pci.c
297!Edrivers/gpu/drm/drm_usb.c
298!Edrivers/gpu/drm/drm_platform.c
299 <para>
300 New drivers that no longer rely on the services provided by the
301 <structname>drm_bus</structname> structure can call the low-level
302 device registration functions directly. The
303 <function>drm_dev_alloc()</function> function can be used to allocate
304 and initialize a new <structname>drm_device</structname> structure.
305 Drivers will typically want to perform some additional setup on this
306 structure, such as allocating driver-specific data and storing a
307 pointer to it in the DRM device's <structfield>dev_private</structfield>
308 field. Drivers should also set the device's unique name using the
309 <function>drm_dev_set_unique()</function> function. After it has been
310 set up a device can be registered with the DRM subsystem by calling
311 <function>drm_dev_register()</function>. This will cause the device to
312 be exposed to userspace and will call the driver's
313 <structfield>.load()</structfield> implementation. When a device is
314 removed, the DRM device can safely be unregistered and freed by calling
315 <function>drm_dev_unregister()</function> followed by a call to
316 <function>drm_dev_unref()</function>.
317 </para>
318!Edrivers/gpu/drm/drm_stub.c
319 </sect2>
320 <sect2>
285 <title>Driver Load</title> 321 <title>Driver Load</title>
286 <para> 322 <para>
287 The <methodname>load</methodname> method is the driver and device 323 The <methodname>load</methodname> method is the driver and device
@@ -342,21 +378,13 @@ char *date;</synopsis>
342 <sect4> 378 <sect4>
343 <title>Managed IRQ Registration</title> 379 <title>Managed IRQ Registration</title>
344 <para> 380 <para>
345 Both the <function>drm_irq_install</function> and
346 <function>drm_irq_uninstall</function> functions get the device IRQ by
347 calling <function>drm_dev_to_irq</function>. This inline function will
348 call a bus-specific operation to retrieve the IRQ number. For platform
349 devices, <function>platform_get_irq</function>(..., 0) is used to
350 retrieve the IRQ number.
351 </para>
352 <para>
353 <function>drm_irq_install</function> starts by calling the 381 <function>drm_irq_install</function> starts by calling the
354 <methodname>irq_preinstall</methodname> driver operation. The operation 382 <methodname>irq_preinstall</methodname> driver operation. The operation
355 is optional and must make sure that the interrupt will not get fired by 383 is optional and must make sure that the interrupt will not get fired by
356 clearing all pending interrupt flags or disabling the interrupt. 384 clearing all pending interrupt flags or disabling the interrupt.
357 </para> 385 </para>
358 <para> 386 <para>
359 The IRQ will then be requested by a call to 387 The passed-in IRQ will then be requested by a call to
360 <function>request_irq</function>. If the DRIVER_IRQ_SHARED driver 388 <function>request_irq</function>. If the DRIVER_IRQ_SHARED driver
361 feature flag is set, a shared (IRQF_SHARED) IRQ handler will be 389 feature flag is set, a shared (IRQF_SHARED) IRQ handler will be
362 requested. 390 requested.
@@ -1799,6 +1827,12 @@ void intel_crt_init(struct drm_device *dev)
1799 <title>KMS API Functions</title> 1827 <title>KMS API Functions</title>
1800!Edrivers/gpu/drm/drm_crtc.c 1828!Edrivers/gpu/drm/drm_crtc.c
1801 </sect2> 1829 </sect2>
1830 <sect2>
1831 <title>KMS Locking</title>
1832!Pdrivers/gpu/drm/drm_modeset_lock.c kms locking
1833!Iinclude/drm/drm_modeset_lock.h
1834!Edrivers/gpu/drm/drm_modeset_lock.c
1835 </sect2>
1802 </sect1> 1836 </sect1>
1803 1837
1804 <!-- Internals: kms helper functions --> 1838 <!-- Internals: kms helper functions -->
@@ -1903,8 +1937,8 @@ void intel_crt_init(struct drm_device *dev)
1903 <para> 1937 <para>
1904 The function filters out modes larger than 1938 The function filters out modes larger than
1905 <parameter>max_width</parameter> and <parameter>max_height</parameter> 1939 <parameter>max_width</parameter> and <parameter>max_height</parameter>
1906 if specified. It then calls the connector 1940 if specified. It then calls the optional connector
1907 <methodname>mode_valid</methodname> helper operation for each mode in 1941 <methodname>mode_valid</methodname> helper operation for each mode in
1908 the probed list to check whether the mode is valid for the connector. 1942 the probed list to check whether the mode is valid for the connector.
1909 </para> 1943 </para>
1910 </listitem> 1944 </listitem>
@@ -2265,7 +2299,7 @@ void intel_crt_init(struct drm_device *dev)
2265 <para> 2299 <para>
2266 Verify whether a mode is valid for the connector. Return MODE_OK for 2300 Verify whether a mode is valid for the connector. Return MODE_OK for
2267 supported modes and one of the enum drm_mode_status values (MODE_*) 2301 supported modes and one of the enum drm_mode_status values (MODE_*)
2268 for unsupported modes. This operation is mandatory. 2302 for unsupported modes. This operation is optional.
2269 </para> 2303 </para>
2270 <para> 2304 <para>
2271 As the mode rejection reason is currently not used beside for 2305 As the mode rejection reason is currently not used beside for
@@ -2450,6 +2484,863 @@ void intel_crt_init(struct drm_device *dev)
2450 pointer to the target object, a pointer to the previously created property 2484 pointer to the target object, a pointer to the previously created property
2451 and an initial instance value. 2485 and an initial instance value.
2452 </para> 2486 </para>
2487 <sect2>
2488 <title>Existing KMS Properties</title>
2489 <para>
2490 The following table gives description of drm properties exposed by various
2491 modules/drivers.
2492 </para>
2493 <table border="1" cellpadding="0" cellspacing="0">
2494 <tbody>
2495 <tr style="font-weight: bold;">
2496 <td valign="top" >Owner Module/Drivers</td>
2497 <td valign="top" >Group</td>
2498 <td valign="top" >Property Name</td>
2499 <td valign="top" >Type</td>
2500 <td valign="top" >Property Values</td>
2501 <td valign="top" >Object attached</td>
2502 <td valign="top" >Description/Restrictions</td>
2503 </tr>
2504 <tr>
2505 <td rowspan="20" valign="top" >DRM</td>
2506 <td rowspan="2" valign="top" >Generic</td>
2507 <td valign="top" >“EDID”</td>
2508 <td valign="top" >BLOB | IMMUTABLE</td>
2509 <td valign="top" >0</td>
2510 <td valign="top" >Connector</td>
2511 <td valign="top" >Contains id of edid blob ptr object.</td>
2512 </tr>
2513 <tr>
2514 <td valign="top" >“DPMS”</td>
2515 <td valign="top" >ENUM</td>
2516 <td valign="top" >{ “On”, “Standby”, “Suspend”, “Off” }</td>
2517 <td valign="top" >Connector</td>
2518 <td valign="top" >Contains DPMS operation mode value.</td>
2519 </tr>
2520 <tr>
2521 <td rowspan="1" valign="top" >Plane</td>
2522 <td valign="top" >“type”</td>
2523 <td valign="top" >ENUM | IMMUTABLE</td>
2524 <td valign="top" >{ "Overlay", "Primary", "Cursor" }</td>
2525 <td valign="top" >Plane</td>
2526 <td valign="top" >Plane type</td>
2527 </tr>
2528 <tr>
2529 <td rowspan="2" valign="top" >DVI-I</td>
2530 <td valign="top" >“subconnector”</td>
2531 <td valign="top" >ENUM</td>
2532 <td valign="top" >{ “Unknown”, “DVI-D”, “DVI-A” }</td>
2533 <td valign="top" >Connector</td>
2534 <td valign="top" >TBD</td>
2535 </tr>
2536 <tr>
2537 <td valign="top" >“select subconnector”</td>
2538 <td valign="top" >ENUM</td>
2539 <td valign="top" >{ “Automatic”, “DVI-D”, “DVI-A” }</td>
2540 <td valign="top" >Connector</td>
2541 <td valign="top" >TBD</td>
2542 </tr>
2543 <tr>
2544 <td rowspan="13" valign="top" >TV</td>
2545 <td valign="top" >“subconnector”</td>
2546 <td valign="top" >ENUM</td>
2547 <td valign="top" >{ "Unknown", "Composite", "SVIDEO", "Component", "SCART" }</td>
2548 <td valign="top" >Connector</td>
2549 <td valign="top" >TBD</td>
2550 </tr>
2551 <tr>
2552 <td valign="top" >“select subconnector”</td>
2553 <td valign="top" >ENUM</td>
2554 <td valign="top" >{ "Automatic", "Composite", "SVIDEO", "Component", "SCART" }</td>
2555 <td valign="top" >Connector</td>
2556 <td valign="top" >TBD</td>
2557 </tr>
2558 <tr>
2559 <td valign="top" >“mode”</td>
2560 <td valign="top" >ENUM</td>
2561 <td valign="top" >{ "NTSC_M", "NTSC_J", "NTSC_443", "PAL_B" } etc.</td>
2562 <td valign="top" >Connector</td>
2563 <td valign="top" >TBD</td>
2564 </tr>
2565 <tr>
2566 <td valign="top" >“left margin”</td>
2567 <td valign="top" >RANGE</td>
2568 <td valign="top" >Min=0, Max=100</td>
2569 <td valign="top" >Connector</td>
2570 <td valign="top" >TBD</td>
2571 </tr>
2572 <tr>
2573 <td valign="top" >“right margin”</td>
2574 <td valign="top" >RANGE</td>
2575 <td valign="top" >Min=0, Max=100</td>
2576 <td valign="top" >Connector</td>
2577 <td valign="top" >TBD</td>
2578 </tr>
2579 <tr>
2580 <td valign="top" >“top margin”</td>
2581 <td valign="top" >RANGE</td>
2582 <td valign="top" >Min=0, Max=100</td>
2583 <td valign="top" >Connector</td>
2584 <td valign="top" >TBD</td>
2585 </tr>
2586 <tr>
2587 <td valign="top" >“bottom margin”</td>
2588 <td valign="top" >RANGE</td>
2589 <td valign="top" >Min=0, Max=100</td>
2590 <td valign="top" >Connector</td>
2591 <td valign="top" >TBD</td>
2592 </tr>
2593 <tr>
2594 <td valign="top" >“brightness”</td>
2595 <td valign="top" >RANGE</td>
2596 <td valign="top" >Min=0, Max=100</td>
2597 <td valign="top" >Connector</td>
2598 <td valign="top" >TBD</td>
2599 </tr>
2600 <tr>
2601 <td valign="top" >“contrast”</td>
2602 <td valign="top" >RANGE</td>
2603 <td valign="top" >Min=0, Max=100</td>
2604 <td valign="top" >Connector</td>
2605 <td valign="top" >TBD</td>
2606 </tr>
2607 <tr>
2608 <td valign="top" >“flicker reduction”</td>
2609 <td valign="top" >RANGE</td>
2610 <td valign="top" >Min=0, Max=100</td>
2611 <td valign="top" >Connector</td>
2612 <td valign="top" >TBD</td>
2613 </tr>
2614 <tr>
2615 <td valign="top" >“overscan”</td>
2616 <td valign="top" >RANGE</td>
2617 <td valign="top" >Min=0, Max=100</td>
2618 <td valign="top" >Connector</td>
2619 <td valign="top" >TBD</td>
2620 </tr>
2621 <tr>
2622 <td valign="top" >“saturation”</td>
2623 <td valign="top" >RANGE</td>
2624 <td valign="top" >Min=0, Max=100</td>
2625 <td valign="top" >Connector</td>
2626 <td valign="top" >TBD</td>
2627 </tr>
2628 <tr>
2629 <td valign="top" >“hue”</td>
2630 <td valign="top" >RANGE</td>
2631 <td valign="top" >Min=0, Max=100</td>
2632 <td valign="top" >Connector</td>
2633 <td valign="top" >TBD</td>
2634 </tr>
2635 <tr>
2636 <td rowspan="2" valign="top" >Optional</td>
2637 <td valign="top" >“scaling mode”</td>
2638 <td valign="top" >ENUM</td>
2639 <td valign="top" >{ "None", "Full", "Center", "Full aspect" }</td>
2640 <td valign="top" >Connector</td>
2641 <td valign="top" >TBD</td>
2642 </tr>
2643 <tr>
2644 <td valign="top" >“dirty”</td>
2645 <td valign="top" >ENUM | IMMUTABLE</td>
2646 <td valign="top" >{ "Off", "On", "Annotate" }</td>
2647 <td valign="top" >Connector</td>
2648 <td valign="top" >TBD</td>
2649 </tr>
2650 <tr>
2651 <td rowspan="21" valign="top" >i915</td>
2652 <td rowspan="3" valign="top" >Generic</td>
2653 <td valign="top" >"Broadcast RGB"</td>
2654 <td valign="top" >ENUM</td>
2655 <td valign="top" >{ "Automatic", "Full", "Limited 16:235" }</td>
2656 <td valign="top" >Connector</td>
2657 <td valign="top" >TBD</td>
2658 </tr>
2659 <tr>
2660 <td valign="top" >“audio”</td>
2661 <td valign="top" >ENUM</td>
2662 <td valign="top" >{ "force-dvi", "off", "auto", "on" }</td>
2663 <td valign="top" >Connector</td>
2664 <td valign="top" >TBD</td>
2665 </tr>
2666 <tr>
2667 <td valign="top" >Standard name as in DRM</td>
2668 <td valign="top" >Standard type as in DRM</td>
2669 <td valign="top" >Standard value as in DRM</td>
2670 <td valign="top" >Standard Object as in DRM</td>
2671 <td valign="top" >TBD</td>
2672 </tr>
2673 <tr>
2674 <td rowspan="17" valign="top" >SDVO-TV</td>
2675 <td valign="top" >“mode”</td>
2676 <td valign="top" >ENUM</td>
2677 <td valign="top" >{ "NTSC_M", "NTSC_J", "NTSC_443", "PAL_B" } etc.</td>
2678 <td valign="top" >Connector</td>
2679 <td valign="top" >TBD</td>
2680 </tr>
2681 <tr>
2682 <td valign="top" >"left_margin"</td>
2683 <td valign="top" >RANGE</td>
2684 <td valign="top" >Min=0, Max= SDVO dependent</td>
2685 <td valign="top" >Connector</td>
2686 <td valign="top" >TBD</td>
2687 </tr>
2688 <tr>
2689 <td valign="top" >"right_margin"</td>
2690 <td valign="top" >RANGE</td>
2691 <td valign="top" >Min=0, Max= SDVO dependent</td>
2692 <td valign="top" >Connector</td>
2693 <td valign="top" >TBD</td>
2694 </tr>
2695 <tr>
2696 <td valign="top" >"top_margin"</td>
2697 <td valign="top" >RANGE</td>
2698 <td valign="top" >Min=0, Max= SDVO dependent</td>
2699 <td valign="top" >Connector</td>
2700 <td valign="top" >TBD</td>
2701 </tr>
2702 <tr>
2703 <td valign="top" >"bottom_margin"</td>
2704 <td valign="top" >RANGE</td>
2705 <td valign="top" >Min=0, Max= SDVO dependent</td>
2706 <td valign="top" >Connector</td>
2707 <td valign="top" >TBD</td>
2708 </tr>
2709 <tr>
2710 <td valign="top" >“hpos”</td>
2711 <td valign="top" >RANGE</td>
2712 <td valign="top" >Min=0, Max= SDVO dependent</td>
2713 <td valign="top" >Connector</td>
2714 <td valign="top" >TBD</td>
2715 </tr>
2716 <tr>
2717 <td valign="top" >“vpos”</td>
2718 <td valign="top" >RANGE</td>
2719 <td valign="top" >Min=0, Max= SDVO dependent</td>
2720 <td valign="top" >Connector</td>
2721 <td valign="top" >TBD</td>
2722 </tr>
2723 <tr>
2724 <td valign="top" >“contrast”</td>
2725 <td valign="top" >RANGE</td>
2726 <td valign="top" >Min=0, Max= SDVO dependent</td>
2727 <td valign="top" >Connector</td>
2728 <td valign="top" >TBD</td>
2729 </tr>
2730 <tr>
2731 <td valign="top" >“saturation”</td>
2732 <td valign="top" >RANGE</td>
2733 <td valign="top" >Min=0, Max= SDVO dependent</td>
2734 <td valign="top" >Connector</td>
2735 <td valign="top" >TBD</td>
2736 </tr>
2737 <tr>
2738 <td valign="top" >“hue”</td>
2739 <td valign="top" >RANGE</td>
2740 <td valign="top" >Min=0, Max= SDVO dependent</td>
2741 <td valign="top" >Connector</td>
2742 <td valign="top" >TBD</td>
2743 </tr>
2744 <tr>
2745 <td valign="top" >“sharpness”</td>
2746 <td valign="top" >RANGE</td>
2747 <td valign="top" >Min=0, Max= SDVO dependent</td>
2748 <td valign="top" >Connector</td>
2749 <td valign="top" >TBD</td>
2750 </tr>
2751 <tr>
2752 <td valign="top" >“flicker_filter”</td>
2753 <td valign="top" >RANGE</td>
2754 <td valign="top" >Min=0, Max= SDVO dependent</td>
2755 <td valign="top" >Connector</td>
2756 <td valign="top" >TBD</td>
2757 </tr>
2758 <tr>
2759 <td valign="top" >“flicker_filter_adaptive”</td>
2760 <td valign="top" >RANGE</td>
2761 <td valign="top" >Min=0, Max= SDVO dependent</td>
2762 <td valign="top" >Connector</td>
2763 <td valign="top" >TBD</td>
2764 </tr>
2765 <tr>
2766 <td valign="top" >“flicker_filter_2d”</td>
2767 <td valign="top" >RANGE</td>
2768 <td valign="top" >Min=0, Max= SDVO dependent</td>
2769 <td valign="top" >Connector</td>
2770 <td valign="top" >TBD</td>
2771 </tr>
2772 <tr>
2773 <td valign="top" >“tv_chroma_filter”</td>
2774 <td valign="top" >RANGE</td>
2775 <td valign="top" >Min=0, Max= SDVO dependent</td>
2776 <td valign="top" >Connector</td>
2777 <td valign="top" >TBD</td>
2778 </tr>
2779 <tr>
2780 <td valign="top" >“tv_luma_filter”</td>
2781 <td valign="top" >RANGE</td>
2782 <td valign="top" >Min=0, Max= SDVO dependent</td>
2783 <td valign="top" >Connector</td>
2784 <td valign="top" >TBD</td>
2785 </tr>
2786 <tr>
2787 <td valign="top" >“dot_crawl”</td>
2788 <td valign="top" >RANGE</td>
2789 <td valign="top" >Min=0, Max=1</td>
2790 <td valign="top" >Connector</td>
2791 <td valign="top" >TBD</td>
2792 </tr>
2793 <tr>
2794 <td valign="top" >SDVO-TV/LVDS</td>
2795 <td valign="top" >“brightness”</td>
2796 <td valign="top" >RANGE</td>
2797 <td valign="top" >Min=0, Max= SDVO dependent</td>
2798 <td valign="top" >Connector</td>
2799 <td valign="top" >TBD</td>
2800 </tr>
2801 <tr>
2802 <td rowspan="3" valign="top" >CDV gma-500</td>
2803 <td rowspan="3" valign="top" >Generic</td>
2804 <td valign="top" >"Broadcast RGB"</td>
2805 <td valign="top" >ENUM</td>
2806 <td valign="top" >{ “Full”, “Limited 16:235” }</td>
2807 <td valign="top" >Connector</td>
2808 <td valign="top" >TBD</td>
2809 </tr>
2810 <tr>
2811 <td valign="top" >"Broadcast RGB"</td>
2812 <td valign="top" >ENUM</td>
2813 <td valign="top" >{ “off”, “auto”, “on” }</td>
2814 <td valign="top" >Connector</td>
2815 <td valign="top" >TBD</td>
2816 </tr>
2817 <tr>
2818 <td valign="top" >Standard name as in DRM</td>
2819 <td valign="top" >Standard type as in DRM</td>
2820 <td valign="top" >Standard value as in DRM</td>
2821 <td valign="top" >Standard Object as in DRM</td>
2822 <td valign="top" >TBD</td>
2823 </tr>
2824 <tr>
2825 <td rowspan="20" valign="top" >Poulsbo</td>
2826 <td rowspan="2" valign="top" >Generic</td>
2827 <td valign="top" >“backlight”</td>
2828 <td valign="top" >RANGE</td>
2829 <td valign="top" >Min=0, Max=100</td>
2830 <td valign="top" >Connector</td>
2831 <td valign="top" >TBD</td>
2832 </tr>
2833 <tr>
2834 <td valign="top" >Standard name as in DRM</td>
2835 <td valign="top" >Standard type as in DRM</td>
2836 <td valign="top" >Standard value as in DRM</td>
2837 <td valign="top" >Standard Object as in DRM</td>
2838 <td valign="top" >TBD</td>
2839 </tr>
2840 <tr>
2841 <td rowspan="17" valign="top" >SDVO-TV</td>
2842 <td valign="top" >“mode”</td>
2843 <td valign="top" >ENUM</td>
2844 <td valign="top" >{ "NTSC_M", "NTSC_J", "NTSC_443", "PAL_B" } etc.</td>
2845 <td valign="top" >Connector</td>
2846 <td valign="top" >TBD</td>
2847 </tr>
2848 <tr>
2849 <td valign="top" >"left_margin"</td>
2850 <td valign="top" >RANGE</td>
2851 <td valign="top" >Min=0, Max= SDVO dependent</td>
2852 <td valign="top" >Connector</td>
2853 <td valign="top" >TBD</td>
2854 </tr>
2855 <tr>
2856 <td valign="top" >"right_margin"</td>
2857 <td valign="top" >RANGE</td>
2858 <td valign="top" >Min=0, Max= SDVO dependent</td>
2859 <td valign="top" >Connector</td>
2860 <td valign="top" >TBD</td>
2861 </tr>
2862 <tr>
2863 <td valign="top" >"top_margin"</td>
2864 <td valign="top" >RANGE</td>
2865 <td valign="top" >Min=0, Max= SDVO dependent</td>
2866 <td valign="top" >Connector</td>
2867 <td valign="top" >TBD</td>
2868 </tr>
2869 <tr>
2870 <td valign="top" >"bottom_margin"</td>
2871 <td valign="top" >RANGE</td>
2872 <td valign="top" >Min=0, Max= SDVO dependent</td>
2873 <td valign="top" >Connector</td>
2874 <td valign="top" >TBD</td>
2875 </tr>
2876 <tr>
2877 <td valign="top" >“hpos”</td>
2878 <td valign="top" >RANGE</td>
2879 <td valign="top" >Min=0, Max= SDVO dependent</td>
2880 <td valign="top" >Connector</td>
2881 <td valign="top" >TBD</td>
2882 </tr>
2883 <tr>
2884 <td valign="top" >“vpos”</td>
2885 <td valign="top" >RANGE</td>
2886 <td valign="top" >Min=0, Max= SDVO dependent</td>
2887 <td valign="top" >Connector</td>
2888 <td valign="top" >TBD</td>
2889 </tr>
2890 <tr>
2891 <td valign="top" >“contrast”</td>
2892 <td valign="top" >RANGE</td>
2893 <td valign="top" >Min=0, Max= SDVO dependent</td>
2894 <td valign="top" >Connector</td>
2895 <td valign="top" >TBD</td>
2896 </tr>
2897 <tr>
2898 <td valign="top" >“saturation”</td>
2899 <td valign="top" >RANGE</td>
2900 <td valign="top" >Min=0, Max= SDVO dependent</td>
2901 <td valign="top" >Connector</td>
2902 <td valign="top" >TBD</td>
2903 </tr>
2904 <tr>
2905 <td valign="top" >“hue”</td>
2906 <td valign="top" >RANGE</td>
2907 <td valign="top" >Min=0, Max= SDVO dependent</td>
2908 <td valign="top" >Connector</td>
2909 <td valign="top" >TBD</td>
2910 </tr>
2911 <tr>
2912 <td valign="top" >“sharpness”</td>
2913 <td valign="top" >RANGE</td>
2914 <td valign="top" >Min=0, Max= SDVO dependent</td>
2915 <td valign="top" >Connector</td>
2916 <td valign="top" >TBD</td>
2917 </tr>
2918 <tr>
2919 <td valign="top" >“flicker_filter”</td>
2920 <td valign="top" >RANGE</td>
2921 <td valign="top" >Min=0, Max= SDVO dependent</td>
2922 <td valign="top" >Connector</td>
2923 <td valign="top" >TBD</td>
2924 </tr>
2925 <tr>
2926 <td valign="top" >“flicker_filter_adaptive”</td>
2927 <td valign="top" >RANGE</td>
2928 <td valign="top" >Min=0, Max= SDVO dependent</td>
2929 <td valign="top" >Connector</td>
2930 <td valign="top" >TBD</td>
2931 </tr>
2932 <tr>
2933 <td valign="top" >“flicker_filter_2d”</td>
2934 <td valign="top" >RANGE</td>
2935 <td valign="top" >Min=0, Max= SDVO dependent</td>
2936 <td valign="top" >Connector</td>
2937 <td valign="top" >TBD</td>
2938 </tr>
2939 <tr>
2940 <td valign="top" >“tv_chroma_filter”</td>
2941 <td valign="top" >RANGE</td>
2942 <td valign="top" >Min=0, Max= SDVO dependent</td>
2943 <td valign="top" >Connector</td>
2944 <td valign="top" >TBD</td>
2945 </tr>
2946 <tr>
2947 <td valign="top" >“tv_luma_filter”</td>
2948 <td valign="top" >RANGE</td>
2949 <td valign="top" >Min=0, Max= SDVO dependent</td>
2950 <td valign="top" >Connector</td>
2951 <td valign="top" >TBD</td>
2952 </tr>
2953 <tr>
2954 <td valign="top" >“dot_crawl”</td>
2955 <td valign="top" >RANGE</td>
2956 <td valign="top" >Min=0, Max=1</td>
2957 <td valign="top" >Connector</td>
2958 <td valign="top" >TBD</td>
2959 </tr>
2960 <tr>
2961 <td valign="top" >SDVO-TV/LVDS</td>
2962 <td valign="top" >“brightness”</td>
2963 <td valign="top" >RANGE</td>
2964 <td valign="top" >Min=0, Max= SDVO dependent</td>
2965 <td valign="top" >Connector</td>
2966 <td valign="top" >TBD</td>
2967 </tr>
2968 <tr>
2969 <td rowspan="11" valign="top" >armada</td>
2970 <td rowspan="2" valign="top" >CRTC</td>
2971 <td valign="top" >"CSC_YUV"</td>
2972 <td valign="top" >ENUM</td>
2973 <td valign="top" >{ "Auto" , "CCIR601", "CCIR709" }</td>
2974 <td valign="top" >CRTC</td>
2975 <td valign="top" >TBD</td>
2976 </tr>
2977 <tr>
2978 <td valign="top" >"CSC_RGB"</td>
2979 <td valign="top" >ENUM</td>
2980 <td valign="top" >{ "Auto", "Computer system", "Studio" }</td>
2981 <td valign="top" >CRTC</td>
2982 <td valign="top" >TBD</td>
2983 </tr>
2984 <tr>
2985 <td rowspan="9" valign="top" >Overlay</td>
2986 <td valign="top" >"colorkey"</td>
2987 <td valign="top" >RANGE</td>
2988 <td valign="top" >Min=0, Max=0xffffff</td>
2989 <td valign="top" >Plane</td>
2990 <td valign="top" >TBD</td>
2991 </tr>
2992 <tr>
2993 <td valign="top" >"colorkey_min"</td>
2994 <td valign="top" >RANGE</td>
2995 <td valign="top" >Min=0, Max=0xffffff</td>
2996 <td valign="top" >Plane</td>
2997 <td valign="top" >TBD</td>
2998 </tr>
2999 <tr>
3000 <td valign="top" >"colorkey_max"</td>
3001 <td valign="top" >RANGE</td>
3002 <td valign="top" >Min=0, Max=0xffffff</td>
3003 <td valign="top" >Plane</td>
3004 <td valign="top" >TBD</td>
3005 </tr>
3006 <tr>
3007 <td valign="top" >"colorkey_val"</td>
3008 <td valign="top" >RANGE</td>
3009 <td valign="top" >Min=0, Max=0xffffff</td>
3010 <td valign="top" >Plane</td>
3011 <td valign="top" >TBD</td>
3012 </tr>
3013 <tr>
3014 <td valign="top" >"colorkey_alpha"</td>
3015 <td valign="top" >RANGE</td>
3016 <td valign="top" >Min=0, Max=0xffffff</td>
3017 <td valign="top" >Plane</td>
3018 <td valign="top" >TBD</td>
3019 </tr>
3020 <tr>
3021 <td valign="top" >"colorkey_mode"</td>
3022 <td valign="top" >ENUM</td>
3023 <td valign="top" >{ "disabled", "Y component", "U component"
3024 , "V component", "RGB", “R component", "G component", "B component" }</td>
3025 <td valign="top" >Plane</td>
3026 <td valign="top" >TBD</td>
3027 </tr>
3028 <tr>
3029 <td valign="top" >"brightness"</td>
3030 <td valign="top" >RANGE</td>
3031 <td valign="top" >Min=0, Max=256 + 255</td>
3032 <td valign="top" >Plane</td>
3033 <td valign="top" >TBD</td>
3034 </tr>
3035 <tr>
3036 <td valign="top" >"contrast"</td>
3037 <td valign="top" >RANGE</td>
3038 <td valign="top" >Min=0, Max=0x7fff</td>
3039 <td valign="top" >Plane</td>
3040 <td valign="top" >TBD</td>
3041 </tr>
3042 <tr>
3043 <td valign="top" >"saturation"</td>
3044 <td valign="top" >RANGE</td>
3045 <td valign="top" >Min=0, Max=0x7fff</td>
3046 <td valign="top" >Plane</td>
3047 <td valign="top" >TBD</td>
3048 </tr>
3049 <tr>
3050 <td rowspan="2" valign="top" >exynos</td>
3051 <td valign="top" >CRTC</td>
3052 <td valign="top" >“mode”</td>
3053 <td valign="top" >ENUM</td>
3054 <td valign="top" >{ "normal", "blank" }</td>
3055 <td valign="top" >CRTC</td>
3056 <td valign="top" >TBD</td>
3057 </tr>
3058 <tr>
3059 <td valign="top" >Overlay</td>
3060 <td valign="top" >“zpos”</td>
3061 <td valign="top" >RANGE</td>
3062 <td valign="top" >Min=0, Max=MAX_PLANE-1</td>
3063 <td valign="top" >Plane</td>
3064 <td valign="top" >TBD</td>
3065 </tr>
3066 <tr>
3067 <td rowspan="3" valign="top" >i2c/ch7006_drv</td>
3068 <td valign="top" >Generic</td>
3069 <td valign="top" >“scale”</td>
3070 <td valign="top" >RANGE</td>
3071 <td valign="top" >Min=0, Max=2</td>
3072 <td valign="top" >Connector</td>
3073 <td valign="top" >TBD</td>
3074 </tr>
3075 <tr>
3076 <td rowspan="2" valign="top" >TV</td>
3077 <td valign="top" >Standard names as in DRM</td>
3078 <td valign="top" >Standard types as in DRM</td>
3079 <td valign="top" >Standard Values as in DRM</td>
3080 <td valign="top" >Standard object as in DRM</td>
3081 <td valign="top" >TBD</td>
3082 </tr>
3083 <tr>
3084 <td valign="top" >“mode”</td>
3085 <td valign="top" >ENUM</td>
3086 <td valign="top" >{ "PAL", "PAL-M","PAL-N"}, ”PAL-Nc"
3087 , "PAL-60", "NTSC-M", "NTSC-J" }</td>
3088 <td valign="top" >Connector</td>
3089 <td valign="top" >TBD</td>
3090 </tr>
3091 <tr>
3092 <td rowspan="16" valign="top" >nouveau</td>
3093 <td rowspan="6" valign="top" >NV10 Overlay</td>
3094 <td valign="top" >"colorkey"</td>
3095 <td valign="top" >RANGE</td>
3096 <td valign="top" >Min=0, Max=0x01ffffff</td>
3097 <td valign="top" >Plane</td>
3098 <td valign="top" >TBD</td>
3099 </tr>
3100 <tr>
3101 <td valign="top" >“contrast”</td>
3102 <td valign="top" >RANGE</td>
3103 <td valign="top" >Min=0, Max=8192-1</td>
3104 <td valign="top" >Plane</td>
3105 <td valign="top" >TBD</td>
3106 </tr>
3107 <tr>
3108 <td valign="top" >“brightness”</td>
3109 <td valign="top" >RANGE</td>
3110 <td valign="top" >Min=0, Max=1024</td>
3111 <td valign="top" >Plane</td>
3112 <td valign="top" >TBD</td>
3113 </tr>
3114 <tr>
3115 <td valign="top" >“hue”</td>
3116 <td valign="top" >RANGE</td>
3117 <td valign="top" >Min=0, Max=359</td>
3118 <td valign="top" >Plane</td>
3119 <td valign="top" >TBD</td>
3120 </tr>
3121 <tr>
3122 <td valign="top" >“saturation”</td>
3123 <td valign="top" >RANGE</td>
3124 <td valign="top" >Min=0, Max=8192-1</td>
3125 <td valign="top" >Plane</td>
3126 <td valign="top" >TBD</td>
3127 </tr>
3128 <tr>
3129 <td valign="top" >“iturbt_709”</td>
3130 <td valign="top" >RANGE</td>
3131 <td valign="top" >Min=0, Max=1</td>
3132 <td valign="top" >Plane</td>
3133 <td valign="top" >TBD</td>
3134 </tr>
3135 <tr>
3136 <td rowspan="2" valign="top" >Nv04 Overlay</td>
3137 <td valign="top" >“colorkey”</td>
3138 <td valign="top" >RANGE</td>
3139 <td valign="top" >Min=0, Max=0x01ffffff</td>
3140 <td valign="top" >Plane</td>
3141 <td valign="top" >TBD</td>
3142 </tr>
3143 <tr>
3144 <td valign="top" >“brightness”</td>
3145 <td valign="top" >RANGE</td>
3146 <td valign="top" >Min=0, Max=1024</td>
3147 <td valign="top" >Plane</td>
3148 <td valign="top" >TBD</td>
3149 </tr>
3150 <tr>
3151 <td rowspan="7" valign="top" >Display</td>
3152 <td valign="top" >“dithering mode”</td>
3153 <td valign="top" >ENUM</td>
3154 <td valign="top" >{ "auto", "off", "on" }</td>
3155 <td valign="top" >Connector</td>
3156 <td valign="top" >TBD</td>
3157 </tr>
3158 <tr>
3159 <td valign="top" >“dithering depth”</td>
3160 <td valign="top" >ENUM</td>
3161 <td valign="top" >{ "auto", "off", "on", "static 2x2", "dynamic 2x2", "temporal" }</td>
3162 <td valign="top" >Connector</td>
3163 <td valign="top" >TBD</td>
3164 </tr>
3165 <tr>
3166 <td valign="top" >“underscan”</td>
3167 <td valign="top" >ENUM</td>
3168 <td valign="top" >{ "auto", "6 bpc", "8 bpc" }</td>
3169 <td valign="top" >Connector</td>
3170 <td valign="top" >TBD</td>
3171 </tr>
3172 <tr>
3173 <td valign="top" >“underscan hborder”</td>
3174 <td valign="top" >RANGE</td>
3175 <td valign="top" >Min=0, Max=128</td>
3176 <td valign="top" >Connector</td>
3177 <td valign="top" >TBD</td>
3178 </tr>
3179 <tr>
3180 <td valign="top" >“underscan vborder”</td>
3181 <td valign="top" >RANGE</td>
3182 <td valign="top" >Min=0, Max=128</td>
3183 <td valign="top" >Connector</td>
3184 <td valign="top" >TBD</td>
3185 </tr>
3186 <tr>
3187 <td valign="top" >“vibrant hue”</td>
3188 <td valign="top" >RANGE</td>
3189 <td valign="top" >Min=0, Max=180</td>
3190 <td valign="top" >Connector</td>
3191 <td valign="top" >TBD</td>
3192 </tr>
3193 <tr>
3194 <td valign="top" >“color vibrance”</td>
3195 <td valign="top" >RANGE</td>
3196 <td valign="top" >Min=0, Max=200</td>
3197 <td valign="top" >Connector</td>
3198 <td valign="top" >TBD</td>
3199 </tr>
3200 <tr>
3201 <td valign="top" >Generic</td>
3202 <td valign="top" >Standard name as in DRM</td>
3203 <td valign="top" >Standard type as in DRM</td>
3204 <td valign="top" >Standard value as in DRM</td>
3205 <td valign="top" >Standard Object as in DRM</td>
3206 <td valign="top" >TBD</td>
3207 </tr>
3208 <tr>
3209 <td rowspan="2" valign="top" >omap</td>
3210 <td rowspan="2" valign="top" >Generic</td>
3211 <td valign="top" >“rotation”</td>
3212 <td valign="top" >BITMASK</td>
3213 <td valign="top" >{ 0, "rotate-0" },
3214 { 1, "rotate-90" },
3215 { 2, "rotate-180" },
3216 { 3, "rotate-270" },
3217 { 4, "reflect-x" },
3218 { 5, "reflect-y" }</td>
3219 <td valign="top" >CRTC, Plane</td>
3220 <td valign="top" >TBD</td>
3221 </tr>
3222 <tr>
3223 <td valign="top" >“zorder”</td>
3224 <td valign="top" >RANGE</td>
3225 <td valign="top" >Min=0, Max=3</td>
3226 <td valign="top" >CRTC, Plane</td>
3227 <td valign="top" >TBD</td>
3228 </tr>
3229 <tr>
3230 <td valign="top" >qxl</td>
3231 <td valign="top" >Generic</td>
3232 <td valign="top" >“hotplug_mode_update"</td>
3233 <td valign="top" >RANGE</td>
3234 <td valign="top" >Min=0, Max=1</td>
3235 <td valign="top" >Connector</td>
3236 <td valign="top" >TBD</td>
3237 </tr>
3238 <tr>
3239 <td rowspan="10" valign="top" >radeon</td>
3240 <td valign="top" >DVI-I</td>
3241 <td valign="top" >“coherent”</td>
3242 <td valign="top" >RANGE</td>
3243 <td valign="top" >Min=0, Max=1</td>
3244 <td valign="top" >Connector</td>
3245 <td valign="top" >TBD</td>
3246 </tr>
3247 <tr>
3248 <td valign="top" >DAC enable load detect</td>
3249 <td valign="top" >“load detection”</td>
3250 <td valign="top" >RANGE</td>
3251 <td valign="top" >Min=0, Max=1</td>
3252 <td valign="top" >Connector</td>
3253 <td valign="top" >TBD</td>
3254 </tr>
3255 <tr>
3256 <td valign="top" >TV Standard</td>
3257 <td valign="top" >"tv standard"</td>
3258 <td valign="top" >ENUM</td>
3259 <td valign="top" >{ "ntsc", "pal", "pal-m", "pal-60", "ntsc-j"
3260 , "scart-pal", "pal-cn", "secam" }</td>
3261 <td valign="top" >Connector</td>
3262 <td valign="top" >TBD</td>
3263 </tr>
3264 <tr>
3265 <td valign="top" >legacy TMDS PLL detect</td>
3266 <td valign="top" >"tmds_pll"</td>
3267 <td valign="top" >ENUM</td>
3268 <td valign="top" >{ "driver", "bios" }</td>
3269 <td valign="top" >-</td>
3270 <td valign="top" >TBD</td>
3271 </tr>
3272 <tr>
3273 <td rowspan="3" valign="top" >Underscan</td>
3274 <td valign="top" >"underscan"</td>
3275 <td valign="top" >ENUM</td>
3276 <td valign="top" >{ "off", "on", "auto" }</td>
3277 <td valign="top" >Connector</td>
3278 <td valign="top" >TBD</td>
3279 </tr>
3280 <tr>
3281 <td valign="top" >"underscan hborder"</td>
3282 <td valign="top" >RANGE</td>
3283 <td valign="top" >Min=0, Max=128</td>
3284 <td valign="top" >Connector</td>
3285 <td valign="top" >TBD</td>
3286 </tr>
3287 <tr>
3288 <td valign="top" >"underscan vborder"</td>
3289 <td valign="top" >RANGE</td>
3290 <td valign="top" >Min=0, Max=128</td>
3291 <td valign="top" >Connector</td>
3292 <td valign="top" >TBD</td>
3293 </tr>
3294 <tr>
3295 <td valign="top" >Audio</td>
3296 <td valign="top" >“audio”</td>
3297 <td valign="top" >ENUM</td>
3298 <td valign="top" >{ "off", "on", "auto" }</td>
3299 <td valign="top" >Connector</td>
3300 <td valign="top" >TBD</td>
3301 </tr>
3302 <tr>
3303 <td valign="top" >FMT Dithering</td>
3304 <td valign="top" >“dither”</td>
3305 <td valign="top" >ENUM</td>
3306 <td valign="top" >{ "off", "on" }</td>
3307 <td valign="top" >Connector</td>
3308 <td valign="top" >TBD</td>
3309 </tr>
3310 <tr>
3311 <td valign="top" >Generic</td>
3312 <td valign="top" >Standard name as in DRM</td>
3313 <td valign="top" >Standard type as in DRM</td>
3314 <td valign="top" >Standard value as in DRM</td>
3315 <td valign="top" >Standard Object as in DRM</td>
3316 <td valign="top" >TBD</td>
3317 </tr>
3318 <tr>
3319 <td rowspan="3" valign="top" >rcar-du</td>
3320 <td rowspan="3" valign="top" >Generic</td>
3321 <td valign="top" >"alpha"</td>
3322 <td valign="top" >RANGE</td>
3323 <td valign="top" >Min=0, Max=255</td>
3324 <td valign="top" >Plane</td>
3325 <td valign="top" >TBD</td>
3326 </tr>
3327 <tr>
3328 <td valign="top" >"colorkey"</td>
3329 <td valign="top" >RANGE</td>
3330 <td valign="top" >Min=0, Max=0x01ffffff</td>
3331 <td valign="top" >Plane</td>
3332 <td valign="top" >TBD</td>
3333 </tr>
3334 <tr>
3335 <td valign="top" >"zpos"</td>
3336 <td valign="top" >RANGE</td>
3337 <td valign="top" >Min=1, Max=7</td>
3338 <td valign="top" >Plane</td>
3339 <td valign="top" >TBD</td>
3340 </tr>
3341 </tbody>
3342 </table>
3343 </sect2>
2453 </sect1> 3344 </sect1>
2454 3345
2455 <!-- Internals: vertical blanking --> 3346 <!-- Internals: vertical blanking -->
@@ -2527,6 +3418,10 @@ void (*disable_vblank) (struct drm_device *dev, int crtc);</synopsis>
2527 with a call to <function>drm_vblank_cleanup</function> in the driver 3418 with a call to <function>drm_vblank_cleanup</function> in the driver
2528 <methodname>unload</methodname> operation handler. 3419 <methodname>unload</methodname> operation handler.
2529 </para> 3420 </para>
3421 <sect2>
3422 <title>Vertical Blanking and Interrupt Handling Functions Reference</title>
3423!Edrivers/gpu/drm/drm_irq.c
3424 </sect2>
2530 </sect1> 3425 </sect1>
2531 3426
2532 <!-- Internals: open/close, file operations and ioctls --> 3427 <!-- Internals: open/close, file operations and ioctls -->
@@ -2869,17 +3764,16 @@ int num_ioctls;</synopsis>
2869 <term>DRM_IOCTL_MODESET_CTL</term> 3764 <term>DRM_IOCTL_MODESET_CTL</term>
2870 <listitem> 3765 <listitem>
2871 <para> 3766 <para>
2872 This should be called by application level drivers before and 3767 This was only used for user-mode-settind drivers around
2873 after mode setting, since on many devices the vertical blank 3768 modesetting changes to allow the kernel to update the vblank
2874 counter is reset at that time. Internally, the DRM snapshots 3769 interrupt after mode setting, since on many devices the vertical
2875 the last vblank count when the ioctl is called with the 3770 blank counter is reset to 0 at some point during modeset. Modern
2876 _DRM_PRE_MODESET command, so that the counter won't go backwards 3771 drivers should not call this any more since with kernel mode
2877 (which is dealt with when _DRM_POST_MODESET is used). 3772 setting it is a no-op.
2878 </para> 3773 </para>
2879 </listitem> 3774 </listitem>
2880 </varlistentry> 3775 </varlistentry>
2881 </variablelist> 3776 </variablelist>
2882<!--!Edrivers/char/drm/drm_irq.c-->
2883 </para> 3777 </para>
2884 </sect1> 3778 </sect1>
2885 3779
@@ -2942,6 +3836,96 @@ int num_ioctls;</synopsis>
2942 probing, so those sections fully apply. 3836 probing, so those sections fully apply.
2943 </para> 3837 </para>
2944 </sect2> 3838 </sect2>
3839 <sect2>
3840 <title>DPIO</title>
3841!Pdrivers/gpu/drm/i915/i915_reg.h DPIO
3842 <table id="dpiox2">
3843 <title>Dual channel PHY (VLV/CHV)</title>
3844 <tgroup cols="8">
3845 <colspec colname="c0" />
3846 <colspec colname="c1" />
3847 <colspec colname="c2" />
3848 <colspec colname="c3" />
3849 <colspec colname="c4" />
3850 <colspec colname="c5" />
3851 <colspec colname="c6" />
3852 <colspec colname="c7" />
3853 <spanspec spanname="ch0" namest="c0" nameend="c3" />
3854 <spanspec spanname="ch1" namest="c4" nameend="c7" />
3855 <spanspec spanname="ch0pcs01" namest="c0" nameend="c1" />
3856 <spanspec spanname="ch0pcs23" namest="c2" nameend="c3" />
3857 <spanspec spanname="ch1pcs01" namest="c4" nameend="c5" />
3858 <spanspec spanname="ch1pcs23" namest="c6" nameend="c7" />
3859 <thead>
3860 <row>
3861 <entry spanname="ch0">CH0</entry>
3862 <entry spanname="ch1">CH1</entry>
3863 </row>
3864 </thead>
3865 <tbody valign="top" align="center">
3866 <row>
3867 <entry spanname="ch0">CMN/PLL/REF</entry>
3868 <entry spanname="ch1">CMN/PLL/REF</entry>
3869 </row>
3870 <row>
3871 <entry spanname="ch0pcs01">PCS01</entry>
3872 <entry spanname="ch0pcs23">PCS23</entry>
3873 <entry spanname="ch1pcs01">PCS01</entry>
3874 <entry spanname="ch1pcs23">PCS23</entry>
3875 </row>
3876 <row>
3877 <entry>TX0</entry>
3878 <entry>TX1</entry>
3879 <entry>TX2</entry>
3880 <entry>TX3</entry>
3881 <entry>TX0</entry>
3882 <entry>TX1</entry>
3883 <entry>TX2</entry>
3884 <entry>TX3</entry>
3885 </row>
3886 <row>
3887 <entry spanname="ch0">DDI0</entry>
3888 <entry spanname="ch1">DDI1</entry>
3889 </row>
3890 </tbody>
3891 </tgroup>
3892 </table>
3893 <table id="dpiox1">
3894 <title>Single channel PHY (CHV)</title>
3895 <tgroup cols="4">
3896 <colspec colname="c0" />
3897 <colspec colname="c1" />
3898 <colspec colname="c2" />
3899 <colspec colname="c3" />
3900 <spanspec spanname="ch0" namest="c0" nameend="c3" />
3901 <spanspec spanname="ch0pcs01" namest="c0" nameend="c1" />
3902 <spanspec spanname="ch0pcs23" namest="c2" nameend="c3" />
3903 <thead>
3904 <row>
3905 <entry spanname="ch0">CH0</entry>
3906 </row>
3907 </thead>
3908 <tbody valign="top" align="center">
3909 <row>
3910 <entry spanname="ch0">CMN/PLL/REF</entry>
3911 </row>
3912 <row>
3913 <entry spanname="ch0pcs01">PCS01</entry>
3914 <entry spanname="ch0pcs23">PCS23</entry>
3915 </row>
3916 <row>
3917 <entry>TX0</entry>
3918 <entry>TX1</entry>
3919 <entry>TX2</entry>
3920 <entry>TX3</entry>
3921 </row>
3922 <row>
3923 <entry spanname="ch0">DDI2</entry>
3924 </row>
3925 </tbody>
3926 </tgroup>
3927 </table>
3928 </sect2>
2945 </sect1> 3929 </sect1>
2946 3930
2947 <sect1> 3931 <sect1>
@@ -2950,6 +3934,11 @@ int num_ioctls;</synopsis>
2950 This sections covers all things related to the GEM implementation in the 3934 This sections covers all things related to the GEM implementation in the
2951 i915 driver. 3935 i915 driver.
2952 </para> 3936 </para>
3937 <sect2>
3938 <title>Batchbuffer Parsing</title>
3939!Pdrivers/gpu/drm/i915/i915_cmd_parser.c batch buffer command parser
3940!Idrivers/gpu/drm/i915/i915_cmd_parser.c
3941 </sect2>
2953 </sect1> 3942 </sect1>
2954 </chapter> 3943 </chapter>
2955</part> 3944</part>
diff --git a/Documentation/EDID/1024x768.S b/Documentation/EDID/1024x768.S
index 4b486fe31b32..6f3e4b75e49e 100644
--- a/Documentation/EDID/1024x768.S
+++ b/Documentation/EDID/1024x768.S
@@ -36,7 +36,7 @@
36#define DPI 72 36#define DPI 72
37#define VFREQ 60 /* Hz */ 37#define VFREQ 60 /* Hz */
38#define TIMING_NAME "Linux XGA" 38#define TIMING_NAME "Linux XGA"
39#define ESTABLISHED_TIMINGS_BITS 0x08 /* Bit 3 -> 1024x768 @60 Hz */ 39#define ESTABLISHED_TIMING2_BITS 0x08 /* Bit 3 -> 1024x768 @60 Hz */
40#define HSYNC_POL 0 40#define HSYNC_POL 0
41#define VSYNC_POL 0 41#define VSYNC_POL 0
42#define CRC 0x55 42#define CRC 0x55
diff --git a/Documentation/EDID/1280x1024.S b/Documentation/EDID/1280x1024.S
index a2799fe33a4d..bd9bef2a65af 100644
--- a/Documentation/EDID/1280x1024.S
+++ b/Documentation/EDID/1280x1024.S
@@ -36,7 +36,7 @@
36#define DPI 72 36#define DPI 72
37#define VFREQ 60 /* Hz */ 37#define VFREQ 60 /* Hz */
38#define TIMING_NAME "Linux SXGA" 38#define TIMING_NAME "Linux SXGA"
39#define ESTABLISHED_TIMINGS_BITS 0x00 /* none */ 39/* No ESTABLISHED_TIMINGx_BITS */
40#define HSYNC_POL 1 40#define HSYNC_POL 1
41#define VSYNC_POL 1 41#define VSYNC_POL 1
42#define CRC 0xa0 42#define CRC 0xa0
diff --git a/Documentation/EDID/1600x1200.S b/Documentation/EDID/1600x1200.S
index 0ded64cfd1f5..a45101c6160c 100644
--- a/Documentation/EDID/1600x1200.S
+++ b/Documentation/EDID/1600x1200.S
@@ -36,7 +36,7 @@
36#define DPI 72 36#define DPI 72
37#define VFREQ 60 /* Hz */ 37#define VFREQ 60 /* Hz */
38#define TIMING_NAME "Linux UXGA" 38#define TIMING_NAME "Linux UXGA"
39#define ESTABLISHED_TIMINGS_BITS 0x00 /* none */ 39/* No ESTABLISHED_TIMINGx_BITS */
40#define HSYNC_POL 1 40#define HSYNC_POL 1
41#define VSYNC_POL 1 41#define VSYNC_POL 1
42#define CRC 0x9d 42#define CRC 0x9d
diff --git a/Documentation/EDID/1680x1050.S b/Documentation/EDID/1680x1050.S
index 96f67cafcf2e..b0d7c69282b4 100644
--- a/Documentation/EDID/1680x1050.S
+++ b/Documentation/EDID/1680x1050.S
@@ -36,7 +36,7 @@
36#define DPI 96 36#define DPI 96
37#define VFREQ 60 /* Hz */ 37#define VFREQ 60 /* Hz */
38#define TIMING_NAME "Linux WSXGA" 38#define TIMING_NAME "Linux WSXGA"
39#define ESTABLISHED_TIMINGS_BITS 0x00 /* none */ 39/* No ESTABLISHED_TIMINGx_BITS */
40#define HSYNC_POL 1 40#define HSYNC_POL 1
41#define VSYNC_POL 1 41#define VSYNC_POL 1
42#define CRC 0x26 42#define CRC 0x26
diff --git a/Documentation/EDID/1920x1080.S b/Documentation/EDID/1920x1080.S
index 36ed5d571d0a..3084355e81e7 100644
--- a/Documentation/EDID/1920x1080.S
+++ b/Documentation/EDID/1920x1080.S
@@ -36,7 +36,7 @@
36#define DPI 96 36#define DPI 96
37#define VFREQ 60 /* Hz */ 37#define VFREQ 60 /* Hz */
38#define TIMING_NAME "Linux FHD" 38#define TIMING_NAME "Linux FHD"
39#define ESTABLISHED_TIMINGS_BITS 0x00 /* none */ 39/* No ESTABLISHED_TIMINGx_BITS */
40#define HSYNC_POL 1 40#define HSYNC_POL 1
41#define VSYNC_POL 1 41#define VSYNC_POL 1
42#define CRC 0x05 42#define CRC 0x05
diff --git a/Documentation/EDID/800x600.S b/Documentation/EDID/800x600.S
new file mode 100644
index 000000000000..6644e26d5801
--- /dev/null
+++ b/Documentation/EDID/800x600.S
@@ -0,0 +1,41 @@
1/*
2 800x600.S: EDID data set for standard 800x600 60 Hz monitor
3
4 Copyright (C) 2011 Carsten Emde <C.Emde@osadl.org>
5 Copyright (C) 2014 Linaro Limited
6
7 This program is free software; you can redistribute it and/or
8 modify it under the terms of the GNU General Public License
9 as published by the Free Software Foundation; either version 2
10 of the License, or (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16*/
17
18/* EDID */
19#define VERSION 1
20#define REVISION 3
21
22/* Display */
23#define CLOCK 40000 /* kHz */
24#define XPIX 800
25#define YPIX 600
26#define XY_RATIO XY_RATIO_4_3
27#define XBLANK 256
28#define YBLANK 28
29#define XOFFSET 40
30#define XPULSE 128
31#define YOFFSET (63+1)
32#define YPULSE (63+4)
33#define DPI 72
34#define VFREQ 60 /* Hz */
35#define TIMING_NAME "Linux SVGA"
36#define ESTABLISHED_TIMING1_BITS 0x01 /* Bit 0: 800x600 @ 60Hz */
37#define HSYNC_POL 1
38#define VSYNC_POL 1
39#define CRC 0xc2
40
41#include "edid.S"
diff --git a/Documentation/EDID/HOWTO.txt b/Documentation/EDID/HOWTO.txt
index 7146db1d9e8c..835db332289b 100644
--- a/Documentation/EDID/HOWTO.txt
+++ b/Documentation/EDID/HOWTO.txt
@@ -18,7 +18,7 @@ CONFIG_DRM_LOAD_EDID_FIRMWARE was introduced. It allows to provide an
18individually prepared or corrected EDID data set in the /lib/firmware 18individually prepared or corrected EDID data set in the /lib/firmware
19directory from where it is loaded via the firmware interface. The code 19directory from where it is loaded via the firmware interface. The code
20(see drivers/gpu/drm/drm_edid_load.c) contains built-in data sets for 20(see drivers/gpu/drm/drm_edid_load.c) contains built-in data sets for
21commonly used screen resolutions (1024x768, 1280x1024, 1600x1200, 21commonly used screen resolutions (800x600, 1024x768, 1280x1024, 1600x1200,
221680x1050, 1920x1080) as binary blobs, but the kernel source tree does 221680x1050, 1920x1080) as binary blobs, but the kernel source tree does
23not contain code to create these data. In order to elucidate the origin 23not contain code to create these data. In order to elucidate the origin
24of the built-in binary EDID blobs and to facilitate the creation of 24of the built-in binary EDID blobs and to facilitate the creation of
diff --git a/Documentation/EDID/edid.S b/Documentation/EDID/edid.S
index ea97ae275fca..7ac03276d7a2 100644
--- a/Documentation/EDID/edid.S
+++ b/Documentation/EDID/edid.S
@@ -33,6 +33,17 @@
33#define XY_RATIO_5_4 0b10 33#define XY_RATIO_5_4 0b10
34#define XY_RATIO_16_9 0b11 34#define XY_RATIO_16_9 0b11
35 35
36/* Provide defaults for the timing bits */
37#ifndef ESTABLISHED_TIMING1_BITS
38#define ESTABLISHED_TIMING1_BITS 0x00
39#endif
40#ifndef ESTABLISHED_TIMING2_BITS
41#define ESTABLISHED_TIMING2_BITS 0x00
42#endif
43#ifndef ESTABLISHED_TIMING3_BITS
44#define ESTABLISHED_TIMING3_BITS 0x00
45#endif
46
36#define mfgname2id(v1,v2,v3) \ 47#define mfgname2id(v1,v2,v3) \
37 ((((v1-'@')&0x1f)<<10)+(((v2-'@')&0x1f)<<5)+((v3-'@')&0x1f)) 48 ((((v1-'@')&0x1f)<<10)+(((v2-'@')&0x1f)<<5)+((v3-'@')&0x1f))
38#define swap16(v1) ((v1>>8)+((v1&0xff)<<8)) 49#define swap16(v1) ((v1>>8)+((v1&0xff)<<8))
@@ -139,7 +150,7 @@ white_x_y_msb: .byte 0x50,0x54
139 Bit 2 640x480 @ 75 Hz 150 Bit 2 640x480 @ 75 Hz
140 Bit 1 800x600 @ 56 Hz 151 Bit 1 800x600 @ 56 Hz
141 Bit 0 800x600 @ 60 Hz */ 152 Bit 0 800x600 @ 60 Hz */
142estbl_timing1: .byte 0x00 153estbl_timing1: .byte ESTABLISHED_TIMING1_BITS
143 154
144/* Bit 7 800x600 @ 72 Hz 155/* Bit 7 800x600 @ 72 Hz
145 Bit 6 800x600 @ 75 Hz 156 Bit 6 800x600 @ 75 Hz
@@ -149,11 +160,11 @@ estbl_timing1: .byte 0x00
149 Bit 2 1024x768 @ 72 Hz 160 Bit 2 1024x768 @ 72 Hz
150 Bit 1 1024x768 @ 75 Hz 161 Bit 1 1024x768 @ 75 Hz
151 Bit 0 1280x1024 @ 75 Hz */ 162 Bit 0 1280x1024 @ 75 Hz */
152estbl_timing2: .byte ESTABLISHED_TIMINGS_BITS 163estbl_timing2: .byte ESTABLISHED_TIMING2_BITS
153 164
154/* Bit 7 1152x870 @ 75 Hz (Apple Macintosh II) 165/* Bit 7 1152x870 @ 75 Hz (Apple Macintosh II)
155 Bits 6-0 Other manufacturer-specific display mod */ 166 Bits 6-0 Other manufacturer-specific display mod */
156estbl_timing3: .byte 0x00 167estbl_timing3: .byte ESTABLISHED_TIMING3_BITS
157 168
158/* Standard timing */ 169/* Standard timing */
159/* X resolution, less 31, divided by 8 (256-2288 pixels) */ 170/* X resolution, less 31, divided by 8 (256-2288 pixels) */
diff --git a/Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt b/Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt
index efa8b8451f93..b48f4ef31d93 100644
--- a/Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt
+++ b/Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt
@@ -136,6 +136,7 @@ of the following host1x client modules:
136 - compatible: "nvidia,tegra<chip>-hdmi" 136 - compatible: "nvidia,tegra<chip>-hdmi"
137 - reg: Physical base address and length of the controller's registers. 137 - reg: Physical base address and length of the controller's registers.
138 - interrupts: The interrupt outputs from the controller. 138 - interrupts: The interrupt outputs from the controller.
139 - hdmi-supply: supply for the +5V HDMI connector pin
139 - vdd-supply: regulator for supply voltage 140 - vdd-supply: regulator for supply voltage
140 - pll-supply: regulator for PLL 141 - pll-supply: regulator for PLL
141 - clocks: Must contain an entry for each entry in clock-names. 142 - clocks: Must contain an entry for each entry in clock-names.
@@ -180,6 +181,7 @@ of the following host1x client modules:
180 See ../reset/reset.txt for details. 181 See ../reset/reset.txt for details.
181 - reset-names: Must include the following entries: 182 - reset-names: Must include the following entries:
182 - dsi 183 - dsi
184 - avdd-dsi-supply: phandle of a supply that powers the DSI controller
183 - nvidia,mipi-calibrate: Should contain a phandle and a specifier specifying 185 - nvidia,mipi-calibrate: Should contain a phandle and a specifier specifying
184 which pads are used by this DSI output and need to be calibrated. See also 186 which pads are used by this DSI output and need to be calibrated. See also
185 ../mipi/nvidia,tegra114-mipi.txt. 187 ../mipi/nvidia,tegra114-mipi.txt.
diff --git a/Documentation/devicetree/bindings/panel/auo,b133xtn01.txt b/Documentation/devicetree/bindings/panel/auo,b133xtn01.txt
new file mode 100644
index 000000000000..7443b7c76769
--- /dev/null
+++ b/Documentation/devicetree/bindings/panel/auo,b133xtn01.txt
@@ -0,0 +1,7 @@
1AU Optronics Corporation 13.3" WXGA (1366x768) TFT LCD panel
2
3Required properties:
4- compatible: should be "auo,b133xtn01"
5
6This binding is compatible with the simple-panel binding, which is specified
7in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/panel/edt,et057090dhu.txt b/Documentation/devicetree/bindings/panel/edt,et057090dhu.txt
new file mode 100644
index 000000000000..4903d7b1d947
--- /dev/null
+++ b/Documentation/devicetree/bindings/panel/edt,et057090dhu.txt
@@ -0,0 +1,7 @@
1Emerging Display Technology Corp. 5.7" VGA TFT LCD panel
2
3Required properties:
4- compatible: should be "edt,et057090dhu"
5
6This binding is compatible with the simple-panel binding, which is specified
7in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/panel/edt,et070080dh6.txt b/Documentation/devicetree/bindings/panel/edt,et070080dh6.txt
new file mode 100644
index 000000000000..20cb38e836e4
--- /dev/null
+++ b/Documentation/devicetree/bindings/panel/edt,et070080dh6.txt
@@ -0,0 +1,10 @@
1Emerging Display Technology Corp. ET070080DH6 7.0" WVGA TFT LCD panel
2
3Required properties:
4- compatible: should be "edt,et070080dh6"
5
6This panel is the same as ETM0700G0DH6 except for the touchscreen.
7ET070080DH6 is the model with resistive touch.
8
9This binding is compatible with the simple-panel binding, which is specified
10in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/panel/edt,etm0700g0dh6.txt b/Documentation/devicetree/bindings/panel/edt,etm0700g0dh6.txt
new file mode 100644
index 000000000000..ee4b18053e40
--- /dev/null
+++ b/Documentation/devicetree/bindings/panel/edt,etm0700g0dh6.txt
@@ -0,0 +1,10 @@
1Emerging Display Technology Corp. ETM0700G0DH6 7.0" WVGA TFT LCD panel
2
3Required properties:
4- compatible: should be "edt,etm0700g0dh6"
5
6This panel is the same as ET070080DH6 except for the touchscreen.
7ETM0700G0DH6 is the model with capacitive multitouch.
8
9This binding is compatible with the simple-panel binding, which is specified
10in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/video/exynos_dp.txt b/Documentation/devicetree/bindings/video/exynos_dp.txt
index 57ccdde02c3a..53dbccfa80ca 100644
--- a/Documentation/devicetree/bindings/video/exynos_dp.txt
+++ b/Documentation/devicetree/bindings/video/exynos_dp.txt
@@ -62,6 +62,10 @@ Optional properties for dp-controller:
62 -hsync-active-high: 62 -hsync-active-high:
63 HSYNC polarity configuration. 63 HSYNC polarity configuration.
64 High if defined, Low if not defined 64 High if defined, Low if not defined
65 -samsung,hpd-gpio:
66 Hotplug detect GPIO.
67 Indicates which GPIO should be used for hotplug
68 detection
65 69
66Example: 70Example:
67 71
diff --git a/Documentation/devicetree/bindings/video/exynos_hdmi.txt b/Documentation/devicetree/bindings/video/exynos_hdmi.txt
index f9187a259259..1fd8cf9cbfac 100644
--- a/Documentation/devicetree/bindings/video/exynos_hdmi.txt
+++ b/Documentation/devicetree/bindings/video/exynos_hdmi.txt
@@ -5,6 +5,7 @@ Required properties:
5 1) "samsung,exynos5-hdmi" <DEPRECATED> 5 1) "samsung,exynos5-hdmi" <DEPRECATED>
6 2) "samsung,exynos4210-hdmi" 6 2) "samsung,exynos4210-hdmi"
7 3) "samsung,exynos4212-hdmi" 7 3) "samsung,exynos4212-hdmi"
8 4) "samsung,exynos5420-hdmi"
8- reg: physical base address of the hdmi and length of memory mapped 9- reg: physical base address of the hdmi and length of memory mapped
9 region. 10 region.
10- interrupts: interrupt number to the cpu. 11- interrupts: interrupt number to the cpu.
@@ -27,6 +28,7 @@ Required properties:
27 "hdmi", "sclk_hdmi", "sclk_pixel", "sclk_hdmiphy" and "mout_hdmi". 28 "hdmi", "sclk_hdmi", "sclk_pixel", "sclk_hdmiphy" and "mout_hdmi".
28- ddc: phandle to the hdmi ddc node 29- ddc: phandle to the hdmi ddc node
29- phy: phandle to the hdmi phy node 30- phy: phandle to the hdmi phy node
31- samsung,syscon-phandle: phandle for system controller node for PMU.
30 32
31Example: 33Example:
32 34
@@ -37,4 +39,5 @@ Example:
37 hpd-gpio = <&gpx3 7 1>; 39 hpd-gpio = <&gpx3 7 1>;
38 ddc = <&hdmi_ddc_node>; 40 ddc = <&hdmi_ddc_node>;
39 phy = <&hdmi_phy_node>; 41 phy = <&hdmi_phy_node>;
42 samsung,syscon-phandle = <&pmu_system_controller>;
40 }; 43 };
diff --git a/MAINTAINERS b/MAINTAINERS
index b4a66b9d6b4d..c2297fab77c8 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2952,6 +2952,7 @@ L: dri-devel@lists.freedesktop.org
2952T: git git://people.freedesktop.org/~airlied/linux 2952T: git git://people.freedesktop.org/~airlied/linux
2953S: Maintained 2953S: Maintained
2954F: drivers/gpu/drm/ 2954F: drivers/gpu/drm/
2955F: drivers/gpu/vga/
2955F: include/drm/ 2956F: include/drm/
2956F: include/uapi/drm/ 2957F: include/uapi/drm/
2957 2958
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
index 6cda0baeac9d..2e1a6853e00c 100644
--- a/arch/x86/kernel/early-quirks.c
+++ b/arch/x86/kernel/early-quirks.c
@@ -419,7 +419,7 @@ static size_t __init gen6_stolen_size(int num, int slot, int func)
419 return gmch_ctrl << 25; /* 32 MB units */ 419 return gmch_ctrl << 25; /* 32 MB units */
420} 420}
421 421
422static size_t gen8_stolen_size(int num, int slot, int func) 422static size_t __init gen8_stolen_size(int num, int slot, int func)
423{ 423{
424 u16 gmch_ctrl; 424 u16 gmch_ctrl;
425 425
@@ -429,48 +429,73 @@ static size_t gen8_stolen_size(int num, int slot, int func)
429 return gmch_ctrl << 25; /* 32 MB units */ 429 return gmch_ctrl << 25; /* 32 MB units */
430} 430}
431 431
432static size_t __init chv_stolen_size(int num, int slot, int func)
433{
434 u16 gmch_ctrl;
435
436 gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL);
437 gmch_ctrl >>= SNB_GMCH_GMS_SHIFT;
438 gmch_ctrl &= SNB_GMCH_GMS_MASK;
439
440 /*
441 * 0x0 to 0x10: 32MB increments starting at 0MB
442 * 0x11 to 0x16: 4MB increments starting at 8MB
443 * 0x17 to 0x1d: 4MB increments start at 36MB
444 */
445 if (gmch_ctrl < 0x11)
446 return gmch_ctrl << 25;
447 else if (gmch_ctrl < 0x17)
448 return (gmch_ctrl - 0x11 + 2) << 22;
449 else
450 return (gmch_ctrl - 0x17 + 9) << 22;
451}
432 452
433struct intel_stolen_funcs { 453struct intel_stolen_funcs {
434 size_t (*size)(int num, int slot, int func); 454 size_t (*size)(int num, int slot, int func);
435 u32 (*base)(int num, int slot, int func, size_t size); 455 u32 (*base)(int num, int slot, int func, size_t size);
436}; 456};
437 457
438static const struct intel_stolen_funcs i830_stolen_funcs = { 458static const struct intel_stolen_funcs i830_stolen_funcs __initconst = {
439 .base = i830_stolen_base, 459 .base = i830_stolen_base,
440 .size = i830_stolen_size, 460 .size = i830_stolen_size,
441}; 461};
442 462
443static const struct intel_stolen_funcs i845_stolen_funcs = { 463static const struct intel_stolen_funcs i845_stolen_funcs __initconst = {
444 .base = i845_stolen_base, 464 .base = i845_stolen_base,
445 .size = i830_stolen_size, 465 .size = i830_stolen_size,
446}; 466};
447 467
448static const struct intel_stolen_funcs i85x_stolen_funcs = { 468static const struct intel_stolen_funcs i85x_stolen_funcs __initconst = {
449 .base = i85x_stolen_base, 469 .base = i85x_stolen_base,
450 .size = gen3_stolen_size, 470 .size = gen3_stolen_size,
451}; 471};
452 472
453static const struct intel_stolen_funcs i865_stolen_funcs = { 473static const struct intel_stolen_funcs i865_stolen_funcs __initconst = {
454 .base = i865_stolen_base, 474 .base = i865_stolen_base,
455 .size = gen3_stolen_size, 475 .size = gen3_stolen_size,
456}; 476};
457 477
458static const struct intel_stolen_funcs gen3_stolen_funcs = { 478static const struct intel_stolen_funcs gen3_stolen_funcs __initconst = {
459 .base = intel_stolen_base, 479 .base = intel_stolen_base,
460 .size = gen3_stolen_size, 480 .size = gen3_stolen_size,
461}; 481};
462 482
463static const struct intel_stolen_funcs gen6_stolen_funcs = { 483static const struct intel_stolen_funcs gen6_stolen_funcs __initconst = {
464 .base = intel_stolen_base, 484 .base = intel_stolen_base,
465 .size = gen6_stolen_size, 485 .size = gen6_stolen_size,
466}; 486};
467 487
468static const struct intel_stolen_funcs gen8_stolen_funcs = { 488static const struct intel_stolen_funcs gen8_stolen_funcs __initconst = {
469 .base = intel_stolen_base, 489 .base = intel_stolen_base,
470 .size = gen8_stolen_size, 490 .size = gen8_stolen_size,
471}; 491};
472 492
473static struct pci_device_id intel_stolen_ids[] __initdata = { 493static const struct intel_stolen_funcs chv_stolen_funcs __initconst = {
494 .base = intel_stolen_base,
495 .size = chv_stolen_size,
496};
497
498static const struct pci_device_id intel_stolen_ids[] __initconst = {
474 INTEL_I830_IDS(&i830_stolen_funcs), 499 INTEL_I830_IDS(&i830_stolen_funcs),
475 INTEL_I845G_IDS(&i845_stolen_funcs), 500 INTEL_I845G_IDS(&i845_stolen_funcs),
476 INTEL_I85X_IDS(&i85x_stolen_funcs), 501 INTEL_I85X_IDS(&i85x_stolen_funcs),
@@ -496,7 +521,8 @@ static struct pci_device_id intel_stolen_ids[] __initdata = {
496 INTEL_HSW_D_IDS(&gen6_stolen_funcs), 521 INTEL_HSW_D_IDS(&gen6_stolen_funcs),
497 INTEL_HSW_M_IDS(&gen6_stolen_funcs), 522 INTEL_HSW_M_IDS(&gen6_stolen_funcs),
498 INTEL_BDW_M_IDS(&gen8_stolen_funcs), 523 INTEL_BDW_M_IDS(&gen8_stolen_funcs),
499 INTEL_BDW_D_IDS(&gen8_stolen_funcs) 524 INTEL_BDW_D_IDS(&gen8_stolen_funcs),
525 INTEL_CHV_IDS(&chv_stolen_funcs),
500}; 526};
501 527
502static void __init intel_graphics_stolen(int num, int slot, int func) 528static void __init intel_graphics_stolen(int num, int slot, int func)
diff --git a/drivers/gpu/Makefile b/drivers/gpu/Makefile
index d8a22c2a579d..70da9eb52a42 100644
--- a/drivers/gpu/Makefile
+++ b/drivers/gpu/Makefile
@@ -1,2 +1,3 @@
1obj-y += drm/ vga/ 1obj-y += drm/ vga/
2obj-$(CONFIG_TEGRA_HOST1X) += host1x/ 2obj-$(CONFIG_TEGRA_HOST1X) += host1x/
3obj-$(CONFIG_IMX_IPUV3_CORE) += ipu-v3/
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index d1cc2f613a78..f5120046ff80 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -83,6 +83,8 @@ config DRM_KMS_CMA_HELPER
83 83
84source "drivers/gpu/drm/i2c/Kconfig" 84source "drivers/gpu/drm/i2c/Kconfig"
85 85
86source "drivers/gpu/drm/bridge/Kconfig"
87
86config DRM_TDFX 88config DRM_TDFX
87 tristate "3dfx Banshee/Voodoo3+" 89 tristate "3dfx Banshee/Voodoo3+"
88 depends on DRM && PCI 90 depends on DRM && PCI
@@ -199,5 +201,3 @@ source "drivers/gpu/drm/msm/Kconfig"
199source "drivers/gpu/drm/tegra/Kconfig" 201source "drivers/gpu/drm/tegra/Kconfig"
200 202
201source "drivers/gpu/drm/panel/Kconfig" 203source "drivers/gpu/drm/panel/Kconfig"
202
203source "drivers/gpu/drm/bridge/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 48e38ba22783..dd2ba4269740 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -14,7 +14,7 @@ drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \
14 drm_info.o drm_debugfs.o drm_encoder_slave.o \ 14 drm_info.o drm_debugfs.o drm_encoder_slave.o \
15 drm_trace_points.o drm_global.o drm_prime.o \ 15 drm_trace_points.o drm_global.o drm_prime.o \
16 drm_rect.o drm_vma_manager.o drm_flip_work.o \ 16 drm_rect.o drm_vma_manager.o drm_flip_work.o \
17 drm_plane_helper.o 17 drm_modeset_lock.o
18 18
19drm-$(CONFIG_COMPAT) += drm_ioc32.o 19drm-$(CONFIG_COMPAT) += drm_ioc32.o
20drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o 20drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o
@@ -23,7 +23,8 @@ drm-$(CONFIG_DRM_PANEL) += drm_panel.o
23 23
24drm-usb-y := drm_usb.o 24drm-usb-y := drm_usb.o
25 25
26drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o 26drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \
27 drm_plane_helper.o
27drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o 28drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
28drm_kms_helper-$(CONFIG_DRM_KMS_FB_HELPER) += drm_fb_helper.o 29drm_kms_helper-$(CONFIG_DRM_KMS_FB_HELPER) += drm_fb_helper.o
29drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o 30drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c
index 32982da82694..8ab3cd1a8cdb 100644
--- a/drivers/gpu/drm/armada/armada_drv.c
+++ b/drivers/gpu/drm/armada/armada_drv.c
@@ -173,7 +173,7 @@ static int armada_drm_load(struct drm_device *dev, unsigned long flags)
173 if (ret) 173 if (ret)
174 goto err_kms; 174 goto err_kms;
175 175
176 ret = drm_irq_install(dev); 176 ret = drm_irq_install(dev, platform_get_irq(dev->platformdev, 0));
177 if (ret) 177 if (ret)
178 goto err_kms; 178 goto err_kms;
179 179
@@ -402,7 +402,7 @@ static struct platform_driver armada_drm_platform_driver = {
402 402
403static int __init armada_drm_init(void) 403static int __init armada_drm_init(void)
404{ 404{
405 armada_drm_driver.num_ioctls = DRM_ARRAY_SIZE(armada_ioctls); 405 armada_drm_driver.num_ioctls = ARRAY_SIZE(armada_ioctls);
406 return platform_driver_register(&armada_drm_platform_driver); 406 return platform_driver_register(&armada_drm_platform_driver);
407} 407}
408module_init(armada_drm_init); 408module_init(armada_drm_init);
diff --git a/drivers/gpu/drm/armada/armada_fbdev.c b/drivers/gpu/drm/armada/armada_fbdev.c
index 948cb14c561e..fd166f532ab9 100644
--- a/drivers/gpu/drm/armada/armada_fbdev.c
+++ b/drivers/gpu/drm/armada/armada_fbdev.c
@@ -181,10 +181,8 @@ void armada_fbdev_lastclose(struct drm_device *dev)
181{ 181{
182 struct armada_private *priv = dev->dev_private; 182 struct armada_private *priv = dev->dev_private;
183 183
184 drm_modeset_lock_all(dev);
185 if (priv->fbdev) 184 if (priv->fbdev)
186 drm_fb_helper_restore_fbdev_mode(priv->fbdev); 185 drm_fb_helper_restore_fbdev_mode_unlocked(priv->fbdev);
187 drm_modeset_unlock_all(dev);
188} 186}
189 187
190void armada_fbdev_fini(struct drm_device *dev) 188void armada_fbdev_fini(struct drm_device *dev)
diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c
index 887816f43476..bb9b642d8485 100644
--- a/drivers/gpu/drm/armada/armada_gem.c
+++ b/drivers/gpu/drm/armada/armada_gem.c
@@ -433,7 +433,6 @@ armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
433 433
434 if (dobj->obj.filp) { 434 if (dobj->obj.filp) {
435 struct address_space *mapping; 435 struct address_space *mapping;
436 gfp_t gfp;
437 int count; 436 int count;
438 437
439 count = dobj->obj.size / PAGE_SIZE; 438 count = dobj->obj.size / PAGE_SIZE;
@@ -441,12 +440,11 @@ armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
441 goto free_sgt; 440 goto free_sgt;
442 441
443 mapping = file_inode(dobj->obj.filp)->i_mapping; 442 mapping = file_inode(dobj->obj.filp)->i_mapping;
444 gfp = mapping_gfp_mask(mapping);
445 443
446 for_each_sg(sgt->sgl, sg, count, i) { 444 for_each_sg(sgt->sgl, sg, count, i) {
447 struct page *page; 445 struct page *page;
448 446
449 page = shmem_read_mapping_page_gfp(mapping, i, gfp); 447 page = shmem_read_mapping_page(mapping, i);
450 if (IS_ERR(page)) { 448 if (IS_ERR(page)) {
451 num = i; 449 num = i;
452 goto release; 450 goto release;
diff --git a/drivers/gpu/drm/ast/Makefile b/drivers/gpu/drm/ast/Makefile
index 8df4f284ee24..171aa0622b66 100644
--- a/drivers/gpu/drm/ast/Makefile
+++ b/drivers/gpu/drm/ast/Makefile
@@ -4,6 +4,6 @@
4 4
5ccflags-y := -Iinclude/drm 5ccflags-y := -Iinclude/drm
6 6
7ast-y := ast_drv.o ast_main.o ast_mode.o ast_fb.o ast_ttm.o ast_post.o 7ast-y := ast_drv.o ast_main.o ast_mode.o ast_fb.o ast_ttm.o ast_post.o ast_dp501.o
8 8
9obj-$(CONFIG_DRM_AST) := ast.o \ No newline at end of file 9obj-$(CONFIG_DRM_AST) := ast.o
diff --git a/drivers/gpu/drm/ast/ast_dp501.c b/drivers/gpu/drm/ast/ast_dp501.c
new file mode 100644
index 000000000000..5da4b62285fa
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_dp501.c
@@ -0,0 +1,410 @@
1
2#include <linux/firmware.h>
3#include <drm/drmP.h>
4#include "ast_drv.h"
5MODULE_FIRMWARE("ast_dp501_fw.bin");
6
7int ast_load_dp501_microcode(struct drm_device *dev)
8{
9 struct ast_private *ast = dev->dev_private;
10 static char *fw_name = "ast_dp501_fw.bin";
11 int err;
12 err = request_firmware(&ast->dp501_fw, fw_name, dev->dev);
13 if (err)
14 return err;
15
16 return 0;
17}
18
19static void send_ack(struct ast_private *ast)
20{
21 u8 sendack;
22 sendack = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9b, 0xff);
23 sendack |= 0x80;
24 ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9b, 0x00, sendack);
25}
26
27static void send_nack(struct ast_private *ast)
28{
29 u8 sendack;
30 sendack = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9b, 0xff);
31 sendack &= ~0x80;
32 ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9b, 0x00, sendack);
33}
34
35static bool wait_ack(struct ast_private *ast)
36{
37 u8 waitack;
38 u32 retry = 0;
39 do {
40 waitack = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd2, 0xff);
41 waitack &= 0x80;
42 udelay(100);
43 } while ((!waitack) && (retry++ < 1000));
44
45 if (retry < 1000)
46 return true;
47 else
48 return false;
49}
50
51static bool wait_nack(struct ast_private *ast)
52{
53 u8 waitack;
54 u32 retry = 0;
55 do {
56 waitack = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd2, 0xff);
57 waitack &= 0x80;
58 udelay(100);
59 } while ((waitack) && (retry++ < 1000));
60
61 if (retry < 1000)
62 return true;
63 else
64 return false;
65}
66
67static void set_cmd_trigger(struct ast_private *ast)
68{
69 ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9b, ~0x40, 0x40);
70}
71
72static void clear_cmd_trigger(struct ast_private *ast)
73{
74 ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9b, ~0x40, 0x00);
75}
76
77#if 0
78static bool wait_fw_ready(struct ast_private *ast)
79{
80 u8 waitready;
81 u32 retry = 0;
82 do {
83 waitready = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd2, 0xff);
84 waitready &= 0x40;
85 udelay(100);
86 } while ((!waitready) && (retry++ < 1000));
87
88 if (retry < 1000)
89 return true;
90 else
91 return false;
92}
93#endif
94
95static bool ast_write_cmd(struct drm_device *dev, u8 data)
96{
97 struct ast_private *ast = dev->dev_private;
98 int retry = 0;
99 if (wait_nack(ast)) {
100 send_nack(ast);
101 ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9a, 0x00, data);
102 send_ack(ast);
103 set_cmd_trigger(ast);
104 do {
105 if (wait_ack(ast)) {
106 clear_cmd_trigger(ast);
107 send_nack(ast);
108 return true;
109 }
110 } while (retry++ < 100);
111 }
112 clear_cmd_trigger(ast);
113 send_nack(ast);
114 return false;
115}
116
117static bool ast_write_data(struct drm_device *dev, u8 data)
118{
119 struct ast_private *ast = dev->dev_private;
120
121 if (wait_nack(ast)) {
122 send_nack(ast);
123 ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9a, 0x00, data);
124 send_ack(ast);
125 if (wait_ack(ast)) {
126 send_nack(ast);
127 return true;
128 }
129 }
130 send_nack(ast);
131 return false;
132}
133
134#if 0
135static bool ast_read_data(struct drm_device *dev, u8 *data)
136{
137 struct ast_private *ast = dev->dev_private;
138 u8 tmp;
139
140 *data = 0;
141
142 if (wait_ack(ast) == false)
143 return false;
144 tmp = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd3, 0xff);
145 *data = tmp;
146 if (wait_nack(ast) == false) {
147 send_nack(ast);
148 return false;
149 }
150 send_nack(ast);
151 return true;
152}
153
154static void clear_cmd(struct ast_private *ast)
155{
156 send_nack(ast);
157 ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9a, 0x00, 0x00);
158}
159#endif
160
161void ast_set_dp501_video_output(struct drm_device *dev, u8 mode)
162{
163 ast_write_cmd(dev, 0x40);
164 ast_write_data(dev, mode);
165
166 msleep(10);
167}
168
169static u32 get_fw_base(struct ast_private *ast)
170{
171 return ast_mindwm(ast, 0x1e6e2104) & 0x7fffffff;
172}
173
174bool ast_backup_fw(struct drm_device *dev, u8 *addr, u32 size)
175{
176 struct ast_private *ast = dev->dev_private;
177 u32 i, data;
178 u32 boot_address;
179
180 data = ast_mindwm(ast, 0x1e6e2100) & 0x01;
181 if (data) {
182 boot_address = get_fw_base(ast);
183 for (i = 0; i < size; i += 4)
184 *(u32 *)(addr + i) = ast_mindwm(ast, boot_address + i);
185 return true;
186 }
187 return false;
188}
189
190bool ast_launch_m68k(struct drm_device *dev)
191{
192 struct ast_private *ast = dev->dev_private;
193 u32 i, data, len = 0;
194 u32 boot_address;
195 u8 *fw_addr = NULL;
196 u8 jreg;
197
198 data = ast_mindwm(ast, 0x1e6e2100) & 0x01;
199 if (!data) {
200
201 if (ast->dp501_fw_addr) {
202 fw_addr = ast->dp501_fw_addr;
203 len = 32*1024;
204 } else if (ast->dp501_fw) {
205 fw_addr = (u8 *)ast->dp501_fw->data;
206 len = ast->dp501_fw->size;
207 }
208 /* Get BootAddress */
209 ast_moutdwm(ast, 0x1e6e2000, 0x1688a8a8);
210 data = ast_mindwm(ast, 0x1e6e0004);
211 switch (data & 0x03) {
212 case 0:
213 boot_address = 0x44000000;
214 break;
215 default:
216 case 1:
217 boot_address = 0x48000000;
218 break;
219 case 2:
220 boot_address = 0x50000000;
221 break;
222 case 3:
223 boot_address = 0x60000000;
224 break;
225 }
226 boot_address -= 0x200000; /* -2MB */
227
228 /* copy image to buffer */
229 for (i = 0; i < len; i += 4) {
230 data = *(u32 *)(fw_addr + i);
231 ast_moutdwm(ast, boot_address + i, data);
232 }
233
234 /* Init SCU */
235 ast_moutdwm(ast, 0x1e6e2000, 0x1688a8a8);
236
237 /* Launch FW */
238 ast_moutdwm(ast, 0x1e6e2104, 0x80000000 + boot_address);
239 ast_moutdwm(ast, 0x1e6e2100, 1);
240
241 /* Update Scratch */
242 data = ast_mindwm(ast, 0x1e6e2040) & 0xfffff1ff; /* D[11:9] = 100b: UEFI handling */
243 data |= 0x800;
244 ast_moutdwm(ast, 0x1e6e2040, data);
245
246 jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x99, 0xfc); /* D[1:0]: Reserved Video Buffer */
247 jreg |= 0x02;
248 ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x99, jreg);
249 }
250 return true;
251}
252
253u8 ast_get_dp501_max_clk(struct drm_device *dev)
254{
255 struct ast_private *ast = dev->dev_private;
256 u32 boot_address, offset, data;
257 u8 linkcap[4], linkrate, linklanes, maxclk = 0xff;
258
259 boot_address = get_fw_base(ast);
260
261 /* validate FW version */
262 offset = 0xf000;
263 data = ast_mindwm(ast, boot_address + offset);
264 if ((data & 0xf0) != 0x10) /* version: 1x */
265 return maxclk;
266
267 /* Read Link Capability */
268 offset = 0xf014;
269 *(u32 *)linkcap = ast_mindwm(ast, boot_address + offset);
270 if (linkcap[2] == 0) {
271 linkrate = linkcap[0];
272 linklanes = linkcap[1];
273 data = (linkrate == 0x0a) ? (90 * linklanes) : (54 * linklanes);
274 if (data > 0xff)
275 data = 0xff;
276 maxclk = (u8)data;
277 }
278 return maxclk;
279}
280
281bool ast_dp501_read_edid(struct drm_device *dev, u8 *ediddata)
282{
283 struct ast_private *ast = dev->dev_private;
284 u32 i, boot_address, offset, data;
285
286 boot_address = get_fw_base(ast);
287
288 /* validate FW version */
289 offset = 0xf000;
290 data = ast_mindwm(ast, boot_address + offset);
291 if ((data & 0xf0) != 0x10)
292 return false;
293
294 /* validate PnP Monitor */
295 offset = 0xf010;
296 data = ast_mindwm(ast, boot_address + offset);
297 if (!(data & 0x01))
298 return false;
299
300 /* Read EDID */
301 offset = 0xf020;
302 for (i = 0; i < 128; i += 4) {
303 data = ast_mindwm(ast, boot_address + offset + i);
304 *(u32 *)(ediddata + i) = data;
305 }
306
307 return true;
308}
309
310static bool ast_init_dvo(struct drm_device *dev)
311{
312 struct ast_private *ast = dev->dev_private;
313 u8 jreg;
314 u32 data;
315 ast_write32(ast, 0xf004, 0x1e6e0000);
316 ast_write32(ast, 0xf000, 0x1);
317 ast_write32(ast, 0x12000, 0x1688a8a8);
318
319 jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff);
320 if (!(jreg & 0x80)) {
321 /* Init SCU DVO Settings */
322 data = ast_read32(ast, 0x12008);
323 /* delay phase */
324 data &= 0xfffff8ff;
325 data |= 0x00000500;
326 ast_write32(ast, 0x12008, data);
327
328 if (ast->chip == AST2300) {
329 data = ast_read32(ast, 0x12084);
330 /* multi-pins for DVO single-edge */
331 data |= 0xfffe0000;
332 ast_write32(ast, 0x12084, data);
333
334 data = ast_read32(ast, 0x12088);
335 /* multi-pins for DVO single-edge */
336 data |= 0x000fffff;
337 ast_write32(ast, 0x12088, data);
338
339 data = ast_read32(ast, 0x12090);
340 /* multi-pins for DVO single-edge */
341 data &= 0xffffffcf;
342 data |= 0x00000020;
343 ast_write32(ast, 0x12090, data);
344 } else { /* AST2400 */
345 data = ast_read32(ast, 0x12088);
346 /* multi-pins for DVO single-edge */
347 data |= 0x30000000;
348 ast_write32(ast, 0x12088, data);
349
350 data = ast_read32(ast, 0x1208c);
351 /* multi-pins for DVO single-edge */
352 data |= 0x000000cf;
353 ast_write32(ast, 0x1208c, data);
354
355 data = ast_read32(ast, 0x120a4);
356 /* multi-pins for DVO single-edge */
357 data |= 0xffff0000;
358 ast_write32(ast, 0x120a4, data);
359
360 data = ast_read32(ast, 0x120a8);
361 /* multi-pins for DVO single-edge */
362 data |= 0x0000000f;
363 ast_write32(ast, 0x120a8, data);
364
365 data = ast_read32(ast, 0x12094);
366 /* multi-pins for DVO single-edge */
367 data |= 0x00000002;
368 ast_write32(ast, 0x12094, data);
369 }
370 }
371
372 /* Force to DVO */
373 data = ast_read32(ast, 0x1202c);
374 data &= 0xfffbffff;
375 ast_write32(ast, 0x1202c, data);
376
377 /* Init VGA DVO Settings */
378 ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xcf, 0x80);
379 return true;
380}
381
382void ast_init_3rdtx(struct drm_device *dev)
383{
384 struct ast_private *ast = dev->dev_private;
385 u8 jreg;
386 u32 data;
387 if (ast->chip == AST2300 || ast->chip == AST2400) {
388 jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd1, 0xff);
389 switch (jreg & 0x0e) {
390 case 0x04:
391 ast_init_dvo(dev);
392 break;
393 case 0x08:
394 ast_launch_m68k(dev);
395 break;
396 case 0x0c:
397 ast_init_dvo(dev);
398 break;
399 default:
400 if (ast->tx_chip_type == AST_TX_SIL164)
401 ast_init_dvo(dev);
402 else {
403 ast_write32(ast, 0x12000, 0x1688a8a8);
404 data = ast_read32(ast, 0x1202c);
405 data &= 0xfffcffff;
406 ast_write32(ast, 0, data);
407 }
408 }
409 }
410}
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index 5137f15dba19..44074fbcf7ff 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -94,9 +94,7 @@ static int ast_drm_thaw(struct drm_device *dev)
94 ast_post_gpu(dev); 94 ast_post_gpu(dev);
95 95
96 drm_mode_config_reset(dev); 96 drm_mode_config_reset(dev);
97 drm_modeset_lock_all(dev);
98 drm_helper_resume_force_mode(dev); 97 drm_helper_resume_force_mode(dev);
99 drm_modeset_unlock_all(dev);
100 98
101 console_lock(); 99 console_lock();
102 ast_fbdev_set_suspend(dev, 0); 100 ast_fbdev_set_suspend(dev, 0);
@@ -198,7 +196,6 @@ static const struct file_operations ast_fops = {
198 196
199static struct drm_driver driver = { 197static struct drm_driver driver = {
200 .driver_features = DRIVER_MODESET | DRIVER_GEM, 198 .driver_features = DRIVER_MODESET | DRIVER_GEM,
201 .dev_priv_size = 0,
202 199
203 .load = ast_driver_load, 200 .load = ast_driver_load,
204 .unload = ast_driver_unload, 201 .unload = ast_driver_unload,
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index 9833a1b1acc1..5d6a87573c33 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -61,9 +61,17 @@ enum ast_chip {
61 AST2200, 61 AST2200,
62 AST2150, 62 AST2150,
63 AST2300, 63 AST2300,
64 AST2400,
64 AST1180, 65 AST1180,
65}; 66};
66 67
68enum ast_tx_chip {
69 AST_TX_NONE,
70 AST_TX_SIL164,
71 AST_TX_ITE66121,
72 AST_TX_DP501,
73};
74
67#define AST_DRAM_512Mx16 0 75#define AST_DRAM_512Mx16 0
68#define AST_DRAM_1Gx16 1 76#define AST_DRAM_1Gx16 1
69#define AST_DRAM_512Mx32 2 77#define AST_DRAM_512Mx32 2
@@ -102,6 +110,12 @@ struct ast_private {
102 * we have. */ 110 * we have. */
103 struct ttm_bo_kmap_obj cache_kmap; 111 struct ttm_bo_kmap_obj cache_kmap;
104 int next_cursor; 112 int next_cursor;
113 bool support_wide_screen;
114
115 enum ast_tx_chip tx_chip_type;
116 u8 dp501_maxclk;
117 u8 *dp501_fw_addr;
118 const struct firmware *dp501_fw; /* dp501 fw */
105}; 119};
106 120
107int ast_driver_load(struct drm_device *dev, unsigned long flags); 121int ast_driver_load(struct drm_device *dev, unsigned long flags);
@@ -368,4 +382,14 @@ int ast_mmap(struct file *filp, struct vm_area_struct *vma);
368 382
369/* ast post */ 383/* ast post */
370void ast_post_gpu(struct drm_device *dev); 384void ast_post_gpu(struct drm_device *dev);
385u32 ast_mindwm(struct ast_private *ast, u32 r);
386void ast_moutdwm(struct ast_private *ast, u32 r, u32 v);
387/* ast dp501 */
388int ast_load_dp501_microcode(struct drm_device *dev);
389void ast_set_dp501_video_output(struct drm_device *dev, u8 mode);
390bool ast_launch_m68k(struct drm_device *dev);
391bool ast_backup_fw(struct drm_device *dev, u8 *addr, u32 size);
392bool ast_dp501_read_edid(struct drm_device *dev, u8 *ediddata);
393u8 ast_get_dp501_max_clk(struct drm_device *dev);
394void ast_init_3rdtx(struct drm_device *dev);
371#endif 395#endif
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index 50535fd5a88d..a2cc6be97983 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -66,12 +66,16 @@ uint8_t ast_get_index_reg_mask(struct ast_private *ast,
66static int ast_detect_chip(struct drm_device *dev) 66static int ast_detect_chip(struct drm_device *dev)
67{ 67{
68 struct ast_private *ast = dev->dev_private; 68 struct ast_private *ast = dev->dev_private;
69 uint32_t data, jreg;
69 70
70 if (dev->pdev->device == PCI_CHIP_AST1180) { 71 if (dev->pdev->device == PCI_CHIP_AST1180) {
71 ast->chip = AST1100; 72 ast->chip = AST1100;
72 DRM_INFO("AST 1180 detected\n"); 73 DRM_INFO("AST 1180 detected\n");
73 } else { 74 } else {
74 if (dev->pdev->revision >= 0x20) { 75 if (dev->pdev->revision >= 0x30) {
76 ast->chip = AST2400;
77 DRM_INFO("AST 2400 detected\n");
78 } else if (dev->pdev->revision >= 0x20) {
75 ast->chip = AST2300; 79 ast->chip = AST2300;
76 DRM_INFO("AST 2300 detected\n"); 80 DRM_INFO("AST 2300 detected\n");
77 } else if (dev->pdev->revision >= 0x10) { 81 } else if (dev->pdev->revision >= 0x10) {
@@ -104,6 +108,59 @@ static int ast_detect_chip(struct drm_device *dev)
104 DRM_INFO("AST 2000 detected\n"); 108 DRM_INFO("AST 2000 detected\n");
105 } 109 }
106 } 110 }
111
112 switch (ast->chip) {
113 case AST1180:
114 ast->support_wide_screen = true;
115 break;
116 case AST2000:
117 ast->support_wide_screen = false;
118 break;
119 default:
120 jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff);
121 if (!(jreg & 0x80))
122 ast->support_wide_screen = true;
123 else if (jreg & 0x01)
124 ast->support_wide_screen = true;
125 else {
126 ast->support_wide_screen = false;
127 ast_write32(ast, 0xf004, 0x1e6e0000);
128 ast_write32(ast, 0xf000, 0x1);
129 data = ast_read32(ast, 0x1207c);
130 data &= 0x300;
131 if (ast->chip == AST2300 && data == 0x0) /* ast1300 */
132 ast->support_wide_screen = true;
133 if (ast->chip == AST2400 && data == 0x100) /* ast1400 */
134 ast->support_wide_screen = true;
135 }
136 break;
137 }
138
139 ast->tx_chip_type = AST_TX_NONE;
140 jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xff);
141 if (jreg & 0x80)
142 ast->tx_chip_type = AST_TX_SIL164;
143 if ((ast->chip == AST2300) || (ast->chip == AST2400)) {
144 jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd1, 0xff);
145 switch (jreg) {
146 case 0x04:
147 ast->tx_chip_type = AST_TX_SIL164;
148 break;
149 case 0x08:
150 ast->dp501_fw_addr = kzalloc(32*1024, GFP_KERNEL);
151 if (ast->dp501_fw_addr) {
152 /* backup firmware */
153 if (ast_backup_fw(dev, ast->dp501_fw_addr, 32*1024)) {
154 kfree(ast->dp501_fw_addr);
155 ast->dp501_fw_addr = NULL;
156 }
157 }
158 /* fallthrough */
159 case 0x0c:
160 ast->tx_chip_type = AST_TX_DP501;
161 }
162 }
163
107 return 0; 164 return 0;
108} 165}
109 166
@@ -129,7 +186,7 @@ static int ast_get_dram_info(struct drm_device *dev)
129 else 186 else
130 ast->dram_bus_width = 32; 187 ast->dram_bus_width = 32;
131 188
132 if (ast->chip == AST2300) { 189 if (ast->chip == AST2300 || ast->chip == AST2400) {
133 switch (data & 0x03) { 190 switch (data & 0x03) {
134 case 0: 191 case 0:
135 ast->dram_type = AST_DRAM_512Mx16; 192 ast->dram_type = AST_DRAM_512Mx16;
@@ -257,17 +314,32 @@ static u32 ast_get_vram_info(struct drm_device *dev)
257{ 314{
258 struct ast_private *ast = dev->dev_private; 315 struct ast_private *ast = dev->dev_private;
259 u8 jreg; 316 u8 jreg;
260 317 u32 vram_size;
261 ast_open_key(ast); 318 ast_open_key(ast);
262 319
320 vram_size = AST_VIDMEM_DEFAULT_SIZE;
263 jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xaa, 0xff); 321 jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xaa, 0xff);
264 switch (jreg & 3) { 322 switch (jreg & 3) {
265 case 0: return AST_VIDMEM_SIZE_8M; 323 case 0: vram_size = AST_VIDMEM_SIZE_8M; break;
266 case 1: return AST_VIDMEM_SIZE_16M; 324 case 1: vram_size = AST_VIDMEM_SIZE_16M; break;
267 case 2: return AST_VIDMEM_SIZE_32M; 325 case 2: vram_size = AST_VIDMEM_SIZE_32M; break;
268 case 3: return AST_VIDMEM_SIZE_64M; 326 case 3: vram_size = AST_VIDMEM_SIZE_64M; break;
327 }
328
329 jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x99, 0xff);
330 switch (jreg & 0x03) {
331 case 1:
332 vram_size -= 0x100000;
333 break;
334 case 2:
335 vram_size -= 0x200000;
336 break;
337 case 3:
338 vram_size -= 0x400000;
339 break;
269 } 340 }
270 return AST_VIDMEM_DEFAULT_SIZE; 341
342 return vram_size;
271} 343}
272 344
273int ast_driver_load(struct drm_device *dev, unsigned long flags) 345int ast_driver_load(struct drm_device *dev, unsigned long flags)
@@ -316,6 +388,7 @@ int ast_driver_load(struct drm_device *dev, unsigned long flags)
316 if (ast->chip == AST2100 || 388 if (ast->chip == AST2100 ||
317 ast->chip == AST2200 || 389 ast->chip == AST2200 ||
318 ast->chip == AST2300 || 390 ast->chip == AST2300 ||
391 ast->chip == AST2400 ||
319 ast->chip == AST1180) { 392 ast->chip == AST1180) {
320 dev->mode_config.max_width = 1920; 393 dev->mode_config.max_width = 1920;
321 dev->mode_config.max_height = 2048; 394 dev->mode_config.max_height = 2048;
@@ -343,6 +416,7 @@ int ast_driver_unload(struct drm_device *dev)
343{ 416{
344 struct ast_private *ast = dev->dev_private; 417 struct ast_private *ast = dev->dev_private;
345 418
419 kfree(ast->dp501_fw_addr);
346 ast_mode_fini(dev); 420 ast_mode_fini(dev);
347 ast_fbdev_fini(dev); 421 ast_fbdev_fini(dev);
348 drm_mode_config_cleanup(dev); 422 drm_mode_config_cleanup(dev);
@@ -411,16 +485,13 @@ static void ast_bo_unref(struct ast_bo **bo)
411 485
412 tbo = &((*bo)->bo); 486 tbo = &((*bo)->bo);
413 ttm_bo_unref(&tbo); 487 ttm_bo_unref(&tbo);
414 if (tbo == NULL) 488 *bo = NULL;
415 *bo = NULL;
416
417} 489}
490
418void ast_gem_free_object(struct drm_gem_object *obj) 491void ast_gem_free_object(struct drm_gem_object *obj)
419{ 492{
420 struct ast_bo *ast_bo = gem_to_ast_bo(obj); 493 struct ast_bo *ast_bo = gem_to_ast_bo(obj);
421 494
422 if (!ast_bo)
423 return;
424 ast_bo_unref(&ast_bo); 495 ast_bo_unref(&ast_bo);
425} 496}
426 497
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index a4afdc8bb578..114aee941d46 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -115,11 +115,17 @@ static bool ast_get_vbios_mode_info(struct drm_crtc *crtc, struct drm_display_mo
115 else 115 else
116 vbios_mode->enh_table = &res_1280x1024[refresh_rate_index]; 116 vbios_mode->enh_table = &res_1280x1024[refresh_rate_index];
117 break; 117 break;
118 case 1360:
119 vbios_mode->enh_table = &res_1360x768[refresh_rate_index];
120 break;
118 case 1440: 121 case 1440:
119 vbios_mode->enh_table = &res_1440x900[refresh_rate_index]; 122 vbios_mode->enh_table = &res_1440x900[refresh_rate_index];
120 break; 123 break;
121 case 1600: 124 case 1600:
122 vbios_mode->enh_table = &res_1600x1200[refresh_rate_index]; 125 if (crtc->mode.crtc_vdisplay == 900)
126 vbios_mode->enh_table = &res_1600x900[refresh_rate_index];
127 else
128 vbios_mode->enh_table = &res_1600x1200[refresh_rate_index];
123 break; 129 break;
124 case 1680: 130 case 1680:
125 vbios_mode->enh_table = &res_1680x1050[refresh_rate_index]; 131 vbios_mode->enh_table = &res_1680x1050[refresh_rate_index];
@@ -175,14 +181,17 @@ static bool ast_get_vbios_mode_info(struct drm_crtc *crtc, struct drm_display_mo
175 ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x8d, refresh_rate_index & 0xff); 181 ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x8d, refresh_rate_index & 0xff);
176 ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x8e, mode_id & 0xff); 182 ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x8e, mode_id & 0xff);
177 183
178 ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x91, 0xa8); 184 ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x91, 0x00);
179 ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x92, crtc->primary->fb->bits_per_pixel); 185 if (vbios_mode->enh_table->flags & NewModeInfo) {
180 ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x93, adjusted_mode->clock / 1000); 186 ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x91, 0xa8);
181 ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x94, adjusted_mode->crtc_hdisplay); 187 ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x92, crtc->primary->fb->bits_per_pixel);
182 ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x95, adjusted_mode->crtc_hdisplay >> 8); 188 ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x93, adjusted_mode->clock / 1000);
189 ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x94, adjusted_mode->crtc_hdisplay);
190 ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x95, adjusted_mode->crtc_hdisplay >> 8);
183 191
184 ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x96, adjusted_mode->crtc_vdisplay); 192 ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x96, adjusted_mode->crtc_vdisplay);
185 ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x97, adjusted_mode->crtc_vdisplay >> 8); 193 ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x97, adjusted_mode->crtc_vdisplay >> 8);
194 }
186 } 195 }
187 196
188 return true; 197 return true;
@@ -389,7 +398,7 @@ static void ast_set_ext_reg(struct drm_crtc *crtc, struct drm_display_mode *mode
389 ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa8, 0xfd, jregA8); 398 ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa8, 0xfd, jregA8);
390 399
391 /* Set Threshold */ 400 /* Set Threshold */
392 if (ast->chip == AST2300) { 401 if (ast->chip == AST2300 || ast->chip == AST2400) {
393 ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa7, 0x78); 402 ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa7, 0x78);
394 ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa6, 0x60); 403 ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa6, 0x60);
395 } else if (ast->chip == AST2100 || 404 } else if (ast->chip == AST2100 ||
@@ -451,9 +460,13 @@ static void ast_crtc_dpms(struct drm_crtc *crtc, int mode)
451 case DRM_MODE_DPMS_STANDBY: 460 case DRM_MODE_DPMS_STANDBY:
452 case DRM_MODE_DPMS_SUSPEND: 461 case DRM_MODE_DPMS_SUSPEND:
453 ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x1, 0xdf, 0); 462 ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x1, 0xdf, 0);
463 if (ast->tx_chip_type == AST_TX_DP501)
464 ast_set_dp501_video_output(crtc->dev, 1);
454 ast_crtc_load_lut(crtc); 465 ast_crtc_load_lut(crtc);
455 break; 466 break;
456 case DRM_MODE_DPMS_OFF: 467 case DRM_MODE_DPMS_OFF:
468 if (ast->tx_chip_type == AST_TX_DP501)
469 ast_set_dp501_video_output(crtc->dev, 0);
457 ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x1, 0xdf, 0x20); 470 ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x1, 0xdf, 0x20);
458 break; 471 break;
459 } 472 }
@@ -729,10 +742,24 @@ static int ast_encoder_init(struct drm_device *dev)
729static int ast_get_modes(struct drm_connector *connector) 742static int ast_get_modes(struct drm_connector *connector)
730{ 743{
731 struct ast_connector *ast_connector = to_ast_connector(connector); 744 struct ast_connector *ast_connector = to_ast_connector(connector);
745 struct ast_private *ast = connector->dev->dev_private;
732 struct edid *edid; 746 struct edid *edid;
733 int ret; 747 int ret;
734 748 bool flags = false;
735 edid = drm_get_edid(connector, &ast_connector->i2c->adapter); 749 if (ast->tx_chip_type == AST_TX_DP501) {
750 ast->dp501_maxclk = 0xff;
751 edid = kmalloc(128, GFP_KERNEL);
752 if (!edid)
753 return -ENOMEM;
754
755 flags = ast_dp501_read_edid(connector->dev, (u8 *)edid);
756 if (flags)
757 ast->dp501_maxclk = ast_get_dp501_max_clk(connector->dev);
758 else
759 kfree(edid);
760 }
761 if (!flags)
762 edid = drm_get_edid(connector, &ast_connector->i2c->adapter);
736 if (edid) { 763 if (edid) {
737 drm_mode_connector_update_edid_property(&ast_connector->base, edid); 764 drm_mode_connector_update_edid_property(&ast_connector->base, edid);
738 ret = drm_add_edid_modes(connector, edid); 765 ret = drm_add_edid_modes(connector, edid);
@@ -746,7 +773,56 @@ static int ast_get_modes(struct drm_connector *connector)
746static int ast_mode_valid(struct drm_connector *connector, 773static int ast_mode_valid(struct drm_connector *connector,
747 struct drm_display_mode *mode) 774 struct drm_display_mode *mode)
748{ 775{
749 return MODE_OK; 776 struct ast_private *ast = connector->dev->dev_private;
777 int flags = MODE_NOMODE;
778 uint32_t jtemp;
779
780 if (ast->support_wide_screen) {
781 if ((mode->hdisplay == 1680) && (mode->vdisplay == 1050))
782 return MODE_OK;
783 if ((mode->hdisplay == 1280) && (mode->vdisplay == 800))
784 return MODE_OK;
785 if ((mode->hdisplay == 1440) && (mode->vdisplay == 900))
786 return MODE_OK;
787 if ((mode->hdisplay == 1360) && (mode->vdisplay == 768))
788 return MODE_OK;
789 if ((mode->hdisplay == 1600) && (mode->vdisplay == 900))
790 return MODE_OK;
791
792 if ((ast->chip == AST2100) || (ast->chip == AST2200) || (ast->chip == AST2300) || (ast->chip == AST2400) || (ast->chip == AST1180)) {
793 if ((mode->hdisplay == 1920) && (mode->vdisplay == 1080))
794 return MODE_OK;
795
796 if ((mode->hdisplay == 1920) && (mode->vdisplay == 1200)) {
797 jtemp = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd1, 0xff);
798 if (jtemp & 0x01)
799 return MODE_NOMODE;
800 else
801 return MODE_OK;
802 }
803 }
804 }
805 switch (mode->hdisplay) {
806 case 640:
807 if (mode->vdisplay == 480) flags = MODE_OK;
808 break;
809 case 800:
810 if (mode->vdisplay == 600) flags = MODE_OK;
811 break;
812 case 1024:
813 if (mode->vdisplay == 768) flags = MODE_OK;
814 break;
815 case 1280:
816 if (mode->vdisplay == 1024) flags = MODE_OK;
817 break;
818 case 1600:
819 if (mode->vdisplay == 1200) flags = MODE_OK;
820 break;
821 default:
822 return flags;
823 }
824
825 return flags;
750} 826}
751 827
752static void ast_connector_destroy(struct drm_connector *connector) 828static void ast_connector_destroy(struct drm_connector *connector)
diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c
index 635f6ffc27c2..38d437f3a267 100644
--- a/drivers/gpu/drm/ast/ast_post.c
+++ b/drivers/gpu/drm/ast/ast_post.c
@@ -78,7 +78,7 @@ ast_set_def_ext_reg(struct drm_device *dev)
78 for (i = 0x81; i <= 0x8f; i++) 78 for (i = 0x81; i <= 0x8f; i++)
79 ast_set_index_reg(ast, AST_IO_CRTC_PORT, i, 0x00); 79 ast_set_index_reg(ast, AST_IO_CRTC_PORT, i, 0x00);
80 80
81 if (ast->chip == AST2300) { 81 if (ast->chip == AST2300 || ast->chip == AST2400) {
82 if (dev->pdev->revision >= 0x20) 82 if (dev->pdev->revision >= 0x20)
83 ext_reg_info = extreginfo_ast2300; 83 ext_reg_info = extreginfo_ast2300;
84 else 84 else
@@ -102,23 +102,32 @@ ast_set_def_ext_reg(struct drm_device *dev)
102 102
103 /* Enable RAMDAC for A1 */ 103 /* Enable RAMDAC for A1 */
104 reg = 0x04; 104 reg = 0x04;
105 if (ast->chip == AST2300) 105 if (ast->chip == AST2300 || ast->chip == AST2400)
106 reg |= 0x20; 106 reg |= 0x20;
107 ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb6, 0xff, reg); 107 ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb6, 0xff, reg);
108} 108}
109 109
110static inline u32 mindwm(struct ast_private *ast, u32 r) 110u32 ast_mindwm(struct ast_private *ast, u32 r)
111{ 111{
112 uint32_t data;
113
112 ast_write32(ast, 0xf004, r & 0xffff0000); 114 ast_write32(ast, 0xf004, r & 0xffff0000);
113 ast_write32(ast, 0xf000, 0x1); 115 ast_write32(ast, 0xf000, 0x1);
114 116
117 do {
118 data = ast_read32(ast, 0xf004) & 0xffff0000;
119 } while (data != (r & 0xffff0000));
115 return ast_read32(ast, 0x10000 + (r & 0x0000ffff)); 120 return ast_read32(ast, 0x10000 + (r & 0x0000ffff));
116} 121}
117 122
118static inline void moutdwm(struct ast_private *ast, u32 r, u32 v) 123void ast_moutdwm(struct ast_private *ast, u32 r, u32 v)
119{ 124{
125 uint32_t data;
120 ast_write32(ast, 0xf004, r & 0xffff0000); 126 ast_write32(ast, 0xf004, r & 0xffff0000);
121 ast_write32(ast, 0xf000, 0x1); 127 ast_write32(ast, 0xf000, 0x1);
128 do {
129 data = ast_read32(ast, 0xf004) & 0xffff0000;
130 } while (data != (r & 0xffff0000));
122 ast_write32(ast, 0x10000 + (r & 0x0000ffff), v); 131 ast_write32(ast, 0x10000 + (r & 0x0000ffff), v);
123} 132}
124 133
@@ -154,28 +163,28 @@ static u32 mmctestburst2_ast2150(struct ast_private *ast, u32 datagen)
154{ 163{
155 u32 data, timeout; 164 u32 data, timeout;
156 165
157 moutdwm(ast, 0x1e6e0070, 0x00000000); 166 ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
158 moutdwm(ast, 0x1e6e0070, 0x00000001 | (datagen << 3)); 167 ast_moutdwm(ast, 0x1e6e0070, 0x00000001 | (datagen << 3));
159 timeout = 0; 168 timeout = 0;
160 do { 169 do {
161 data = mindwm(ast, 0x1e6e0070) & 0x40; 170 data = ast_mindwm(ast, 0x1e6e0070) & 0x40;
162 if (++timeout > TIMEOUT_AST2150) { 171 if (++timeout > TIMEOUT_AST2150) {
163 moutdwm(ast, 0x1e6e0070, 0x00000000); 172 ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
164 return 0xffffffff; 173 return 0xffffffff;
165 } 174 }
166 } while (!data); 175 } while (!data);
167 moutdwm(ast, 0x1e6e0070, 0x00000000); 176 ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
168 moutdwm(ast, 0x1e6e0070, 0x00000003 | (datagen << 3)); 177 ast_moutdwm(ast, 0x1e6e0070, 0x00000003 | (datagen << 3));
169 timeout = 0; 178 timeout = 0;
170 do { 179 do {
171 data = mindwm(ast, 0x1e6e0070) & 0x40; 180 data = ast_mindwm(ast, 0x1e6e0070) & 0x40;
172 if (++timeout > TIMEOUT_AST2150) { 181 if (++timeout > TIMEOUT_AST2150) {
173 moutdwm(ast, 0x1e6e0070, 0x00000000); 182 ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
174 return 0xffffffff; 183 return 0xffffffff;
175 } 184 }
176 } while (!data); 185 } while (!data);
177 data = (mindwm(ast, 0x1e6e0070) & 0x80) >> 7; 186 data = (ast_mindwm(ast, 0x1e6e0070) & 0x80) >> 7;
178 moutdwm(ast, 0x1e6e0070, 0x00000000); 187 ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
179 return data; 188 return data;
180} 189}
181 190
@@ -184,18 +193,18 @@ static u32 mmctestsingle2_ast2150(struct ast_private *ast, u32 datagen)
184{ 193{
185 u32 data, timeout; 194 u32 data, timeout;
186 195
187 moutdwm(ast, 0x1e6e0070, 0x00000000); 196 ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
188 moutdwm(ast, 0x1e6e0070, 0x00000005 | (datagen << 3)); 197 ast_moutdwm(ast, 0x1e6e0070, 0x00000005 | (datagen << 3));
189 timeout = 0; 198 timeout = 0;
190 do { 199 do {
191 data = mindwm(ast, 0x1e6e0070) & 0x40; 200 data = ast_mindwm(ast, 0x1e6e0070) & 0x40;
192 if (++timeout > TIMEOUT_AST2150) { 201 if (++timeout > TIMEOUT_AST2150) {
193 moutdwm(ast, 0x1e6e0070, 0x00000000); 202 ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
194 return 0xffffffff; 203 return 0xffffffff;
195 } 204 }
196 } while (!data); 205 } while (!data);
197 data = (mindwm(ast, 0x1e6e0070) & 0x80) >> 7; 206 data = (ast_mindwm(ast, 0x1e6e0070) & 0x80) >> 7;
198 moutdwm(ast, 0x1e6e0070, 0x00000000); 207 ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
199 return data; 208 return data;
200} 209}
201#endif 210#endif
@@ -215,7 +224,7 @@ static int cbrscan_ast2150(struct ast_private *ast, int busw)
215 u32 patcnt, loop; 224 u32 patcnt, loop;
216 225
217 for (patcnt = 0; patcnt < CBR_PATNUM_AST2150; patcnt++) { 226 for (patcnt = 0; patcnt < CBR_PATNUM_AST2150; patcnt++) {
218 moutdwm(ast, 0x1e6e007c, pattern_AST2150[patcnt]); 227 ast_moutdwm(ast, 0x1e6e007c, pattern_AST2150[patcnt]);
219 for (loop = 0; loop < CBR_PASSNUM_AST2150; loop++) { 228 for (loop = 0; loop < CBR_PASSNUM_AST2150; loop++) {
220 if (cbrtest_ast2150(ast)) 229 if (cbrtest_ast2150(ast))
221 break; 230 break;
@@ -237,7 +246,7 @@ cbr_start:
237 passcnt = 0; 246 passcnt = 0;
238 247
239 for (dlli = 0; dlli < 100; dlli++) { 248 for (dlli = 0; dlli < 100; dlli++) {
240 moutdwm(ast, 0x1e6e0068, dlli | (dlli << 8) | (dlli << 16) | (dlli << 24)); 249 ast_moutdwm(ast, 0x1e6e0068, dlli | (dlli << 8) | (dlli << 16) | (dlli << 24));
241 data = cbrscan_ast2150(ast, busw); 250 data = cbrscan_ast2150(ast, busw);
242 if (data != 0) { 251 if (data != 0) {
243 if (data & 0x1) { 252 if (data & 0x1) {
@@ -254,7 +263,7 @@ cbr_start:
254 goto cbr_start; 263 goto cbr_start;
255 264
256 dlli = dll_min[0] + (((dll_max[0] - dll_min[0]) * 7) >> 4); 265 dlli = dll_min[0] + (((dll_max[0] - dll_min[0]) * 7) >> 4);
257 moutdwm(ast, 0x1e6e0068, dlli | (dlli << 8) | (dlli << 16) | (dlli << 24)); 266 ast_moutdwm(ast, 0x1e6e0068, dlli | (dlli << 8) | (dlli << 16) | (dlli << 24));
258} 267}
259 268
260 269
@@ -365,10 +374,12 @@ void ast_post_gpu(struct drm_device *dev)
365 ast_open_key(ast); 374 ast_open_key(ast);
366 ast_set_def_ext_reg(dev); 375 ast_set_def_ext_reg(dev);
367 376
368 if (ast->chip == AST2300) 377 if (ast->chip == AST2300 || ast->chip == AST2400)
369 ast_init_dram_2300(dev); 378 ast_init_dram_2300(dev);
370 else 379 else
371 ast_init_dram_reg(dev); 380 ast_init_dram_reg(dev);
381
382 ast_init_3rdtx(dev);
372} 383}
373 384
374/* AST 2300 DRAM settings */ 385/* AST 2300 DRAM settings */
@@ -403,6 +414,7 @@ struct ast2300_dram_param {
403/* 414/*
404 * DQSI DLL CBR Setting 415 * DQSI DLL CBR Setting
405 */ 416 */
417#define CBR_SIZE0 ((1 << 10) - 1)
406#define CBR_SIZE1 ((4 << 10) - 1) 418#define CBR_SIZE1 ((4 << 10) - 1)
407#define CBR_SIZE2 ((64 << 10) - 1) 419#define CBR_SIZE2 ((64 << 10) - 1)
408#define CBR_PASSNUM 5 420#define CBR_PASSNUM 5
@@ -423,88 +435,84 @@ static const u32 pattern[8] = {
423 0x7C61D253 435 0x7C61D253
424}; 436};
425 437
426#if 0 /* unused in DDX, included for completeness */
427static int mmc_test_burst(struct ast_private *ast, u32 datagen) 438static int mmc_test_burst(struct ast_private *ast, u32 datagen)
428{ 439{
429 u32 data, timeout; 440 u32 data, timeout;
430 441
431 moutdwm(ast, 0x1e6e0070, 0x00000000); 442 ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
432 moutdwm(ast, 0x1e6e0070, 0x000000c1 | (datagen << 3)); 443 ast_moutdwm(ast, 0x1e6e0070, 0x000000c1 | (datagen << 3));
433 timeout = 0; 444 timeout = 0;
434 do { 445 do {
435 data = mindwm(ast, 0x1e6e0070) & 0x3000; 446 data = ast_mindwm(ast, 0x1e6e0070) & 0x3000;
436 if (data & 0x2000) { 447 if (data & 0x2000) {
437 return 0; 448 return 0;
438 } 449 }
439 if (++timeout > TIMEOUT) { 450 if (++timeout > TIMEOUT) {
440 moutdwm(ast, 0x1e6e0070, 0x00000000); 451 ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
441 return 0; 452 return 0;
442 } 453 }
443 } while (!data); 454 } while (!data);
444 moutdwm(ast, 0x1e6e0070, 0x00000000); 455 ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
445 return 1; 456 return 1;
446} 457}
447#endif
448 458
449static int mmc_test_burst2(struct ast_private *ast, u32 datagen) 459static int mmc_test_burst2(struct ast_private *ast, u32 datagen)
450{ 460{
451 u32 data, timeout; 461 u32 data, timeout;
452 462
453 moutdwm(ast, 0x1e6e0070, 0x00000000); 463 ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
454 moutdwm(ast, 0x1e6e0070, 0x00000041 | (datagen << 3)); 464 ast_moutdwm(ast, 0x1e6e0070, 0x00000041 | (datagen << 3));
455 timeout = 0; 465 timeout = 0;
456 do { 466 do {
457 data = mindwm(ast, 0x1e6e0070) & 0x1000; 467 data = ast_mindwm(ast, 0x1e6e0070) & 0x1000;
458 if (++timeout > TIMEOUT) { 468 if (++timeout > TIMEOUT) {
459 moutdwm(ast, 0x1e6e0070, 0x0); 469 ast_moutdwm(ast, 0x1e6e0070, 0x0);
460 return -1; 470 return -1;
461 } 471 }
462 } while (!data); 472 } while (!data);
463 data = mindwm(ast, 0x1e6e0078); 473 data = ast_mindwm(ast, 0x1e6e0078);
464 data = (data | (data >> 16)) & 0xffff; 474 data = (data | (data >> 16)) & 0xffff;
465 moutdwm(ast, 0x1e6e0070, 0x0); 475 ast_moutdwm(ast, 0x1e6e0070, 0x0);
466 return data; 476 return data;
467} 477}
468 478
469#if 0 /* Unused in DDX here for completeness */
470static int mmc_test_single(struct ast_private *ast, u32 datagen) 479static int mmc_test_single(struct ast_private *ast, u32 datagen)
471{ 480{
472 u32 data, timeout; 481 u32 data, timeout;
473 482
474 moutdwm(ast, 0x1e6e0070, 0x00000000); 483 ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
475 moutdwm(ast, 0x1e6e0070, 0x000000c5 | (datagen << 3)); 484 ast_moutdwm(ast, 0x1e6e0070, 0x000000c5 | (datagen << 3));
476 timeout = 0; 485 timeout = 0;
477 do { 486 do {
478 data = mindwm(ast, 0x1e6e0070) & 0x3000; 487 data = ast_mindwm(ast, 0x1e6e0070) & 0x3000;
479 if (data & 0x2000) 488 if (data & 0x2000)
480 return 0; 489 return 0;
481 if (++timeout > TIMEOUT) { 490 if (++timeout > TIMEOUT) {
482 moutdwm(ast, 0x1e6e0070, 0x0); 491 ast_moutdwm(ast, 0x1e6e0070, 0x0);
483 return 0; 492 return 0;
484 } 493 }
485 } while (!data); 494 } while (!data);
486 moutdwm(ast, 0x1e6e0070, 0x0); 495 ast_moutdwm(ast, 0x1e6e0070, 0x0);
487 return 1; 496 return 1;
488} 497}
489#endif
490 498
491static int mmc_test_single2(struct ast_private *ast, u32 datagen) 499static int mmc_test_single2(struct ast_private *ast, u32 datagen)
492{ 500{
493 u32 data, timeout; 501 u32 data, timeout;
494 502
495 moutdwm(ast, 0x1e6e0070, 0x00000000); 503 ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
496 moutdwm(ast, 0x1e6e0070, 0x00000005 | (datagen << 3)); 504 ast_moutdwm(ast, 0x1e6e0070, 0x00000005 | (datagen << 3));
497 timeout = 0; 505 timeout = 0;
498 do { 506 do {
499 data = mindwm(ast, 0x1e6e0070) & 0x1000; 507 data = ast_mindwm(ast, 0x1e6e0070) & 0x1000;
500 if (++timeout > TIMEOUT) { 508 if (++timeout > TIMEOUT) {
501 moutdwm(ast, 0x1e6e0070, 0x0); 509 ast_moutdwm(ast, 0x1e6e0070, 0x0);
502 return -1; 510 return -1;
503 } 511 }
504 } while (!data); 512 } while (!data);
505 data = mindwm(ast, 0x1e6e0078); 513 data = ast_mindwm(ast, 0x1e6e0078);
506 data = (data | (data >> 16)) & 0xffff; 514 data = (data | (data >> 16)) & 0xffff;
507 moutdwm(ast, 0x1e6e0070, 0x0); 515 ast_moutdwm(ast, 0x1e6e0070, 0x0);
508 return data; 516 return data;
509} 517}
510 518
@@ -533,7 +541,7 @@ static int cbr_scan(struct ast_private *ast)
533 541
534 data2 = 3; 542 data2 = 3;
535 for (patcnt = 0; patcnt < CBR_PATNUM; patcnt++) { 543 for (patcnt = 0; patcnt < CBR_PATNUM; patcnt++) {
536 moutdwm(ast, 0x1e6e007c, pattern[patcnt]); 544 ast_moutdwm(ast, 0x1e6e007c, pattern[patcnt]);
537 for (loop = 0; loop < CBR_PASSNUM2; loop++) { 545 for (loop = 0; loop < CBR_PASSNUM2; loop++) {
538 if ((data = cbr_test(ast)) != 0) { 546 if ((data = cbr_test(ast)) != 0) {
539 data2 &= data; 547 data2 &= data;
@@ -568,7 +576,7 @@ static u32 cbr_scan2(struct ast_private *ast)
568 576
569 data2 = 0xffff; 577 data2 = 0xffff;
570 for (patcnt = 0; patcnt < CBR_PATNUM; patcnt++) { 578 for (patcnt = 0; patcnt < CBR_PATNUM; patcnt++) {
571 moutdwm(ast, 0x1e6e007c, pattern[patcnt]); 579 ast_moutdwm(ast, 0x1e6e007c, pattern[patcnt]);
572 for (loop = 0; loop < CBR_PASSNUM2; loop++) { 580 for (loop = 0; loop < CBR_PASSNUM2; loop++) {
573 if ((data = cbr_test2(ast)) != 0) { 581 if ((data = cbr_test2(ast)) != 0) {
574 data2 &= data; 582 data2 &= data;
@@ -583,106 +591,35 @@ static u32 cbr_scan2(struct ast_private *ast)
583 return data2; 591 return data2;
584} 592}
585 593
586#if 0 /* unused in DDX - added for completeness */ 594static u32 cbr_test3(struct ast_private *ast)
587static void finetuneDQI(struct ast_private *ast, struct ast2300_dram_param *param)
588{ 595{
589 u32 gold_sadj[2], dllmin[16], dllmax[16], dlli, data, cnt, mask, passcnt; 596 if (!mmc_test_burst(ast, 0))
590 597 return 0;
591 gold_sadj[0] = (mindwm(ast, 0x1E6E0024) >> 16) & 0xffff; 598 if (!mmc_test_single(ast, 0))
592 gold_sadj[1] = gold_sadj[0] >> 8; 599 return 0;
593 gold_sadj[0] = gold_sadj[0] & 0xff; 600 return 1;
594 gold_sadj[0] = (gold_sadj[0] + gold_sadj[1]) >> 1; 601}
595 gold_sadj[1] = gold_sadj[0];
596
597 for (cnt = 0; cnt < 16; cnt++) {
598 dllmin[cnt] = 0xff;
599 dllmax[cnt] = 0x0;
600 }
601 passcnt = 0;
602 for (dlli = 0; dlli < 76; dlli++) {
603 moutdwm(ast, 0x1E6E0068, 0x00001400 | (dlli << 16) | (dlli << 24));
604 /* Wait DQSI latch phase calibration */
605 moutdwm(ast, 0x1E6E0074, 0x00000010);
606 moutdwm(ast, 0x1E6E0070, 0x00000003);
607 do {
608 data = mindwm(ast, 0x1E6E0070);
609 } while (!(data & 0x00001000));
610 moutdwm(ast, 0x1E6E0070, 0x00000000);
611 602
612 moutdwm(ast, 0x1E6E0074, CBR_SIZE1); 603static u32 cbr_scan3(struct ast_private *ast)
613 data = cbr_scan2(ast); 604{
614 if (data != 0) { 605 u32 patcnt, loop;
615 mask = 0x00010001;
616 for (cnt = 0; cnt < 16; cnt++) {
617 if (data & mask) {
618 if (dllmin[cnt] > dlli) {
619 dllmin[cnt] = dlli;
620 }
621 if (dllmax[cnt] < dlli) {
622 dllmax[cnt] = dlli;
623 }
624 }
625 mask <<= 1;
626 }
627 passcnt++;
628 } else if (passcnt >= CBR_THRESHOLD) {
629 break;
630 }
631 }
632 data = 0;
633 for (cnt = 0; cnt < 8; cnt++) {
634 data >>= 3;
635 if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD)) {
636 dlli = (dllmin[cnt] + dllmax[cnt]) >> 1;
637 if (gold_sadj[0] >= dlli) {
638 dlli = (gold_sadj[0] - dlli) >> 1;
639 if (dlli > 3) {
640 dlli = 3;
641 }
642 } else {
643 dlli = (dlli - gold_sadj[0]) >> 1;
644 if (dlli > 4) {
645 dlli = 4;
646 }
647 dlli = (8 - dlli) & 0x7;
648 }
649 data |= dlli << 21;
650 }
651 }
652 moutdwm(ast, 0x1E6E0080, data);
653 606
654 data = 0; 607 for (patcnt = 0; patcnt < CBR_PATNUM; patcnt++) {
655 for (cnt = 8; cnt < 16; cnt++) { 608 ast_moutdwm(ast, 0x1e6e007c, pattern[patcnt]);
656 data >>= 3; 609 for (loop = 0; loop < 2; loop++) {
657 if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD)) { 610 if (cbr_test3(ast))
658 dlli = (dllmin[cnt] + dllmax[cnt]) >> 1; 611 break;
659 if (gold_sadj[1] >= dlli) {
660 dlli = (gold_sadj[1] - dlli) >> 1;
661 if (dlli > 3) {
662 dlli = 3;
663 } else {
664 dlli = (dlli - 1) & 0x7;
665 }
666 } else {
667 dlli = (dlli - gold_sadj[1]) >> 1;
668 dlli += 1;
669 if (dlli > 4) {
670 dlli = 4;
671 }
672 dlli = (8 - dlli) & 0x7;
673 }
674 data |= dlli << 21;
675 } 612 }
613 if (loop == 2)
614 return 0;
676 } 615 }
677 moutdwm(ast, 0x1E6E0084, data); 616 return 1;
678 617}
679} /* finetuneDQI */
680#endif
681 618
682static void finetuneDQI_L(struct ast_private *ast, struct ast2300_dram_param *param) 619static bool finetuneDQI_L(struct ast_private *ast, struct ast2300_dram_param *param)
683{ 620{
684 u32 gold_sadj[2], dllmin[16], dllmax[16], dlli, data, cnt, mask, passcnt; 621 u32 gold_sadj[2], dllmin[16], dllmax[16], dlli, data, cnt, mask, passcnt, retry = 0;
685 622 bool status = false;
686FINETUNE_START: 623FINETUNE_START:
687 for (cnt = 0; cnt < 16; cnt++) { 624 for (cnt = 0; cnt < 16; cnt++) {
688 dllmin[cnt] = 0xff; 625 dllmin[cnt] = 0xff;
@@ -690,16 +627,8 @@ FINETUNE_START:
690 } 627 }
691 passcnt = 0; 628 passcnt = 0;
692 for (dlli = 0; dlli < 76; dlli++) { 629 for (dlli = 0; dlli < 76; dlli++) {
693 moutdwm(ast, 0x1E6E0068, 0x00001400 | (dlli << 16) | (dlli << 24)); 630 ast_moutdwm(ast, 0x1E6E0068, 0x00001400 | (dlli << 16) | (dlli << 24));
694 /* Wait DQSI latch phase calibration */ 631 ast_moutdwm(ast, 0x1E6E0074, CBR_SIZE1);
695 moutdwm(ast, 0x1E6E0074, 0x00000010);
696 moutdwm(ast, 0x1E6E0070, 0x00000003);
697 do {
698 data = mindwm(ast, 0x1E6E0070);
699 } while (!(data & 0x00001000));
700 moutdwm(ast, 0x1E6E0070, 0x00000000);
701
702 moutdwm(ast, 0x1E6E0074, CBR_SIZE1);
703 data = cbr_scan2(ast); 632 data = cbr_scan2(ast);
704 if (data != 0) { 633 if (data != 0) {
705 mask = 0x00010001; 634 mask = 0x00010001;
@@ -727,9 +656,13 @@ FINETUNE_START:
727 passcnt++; 656 passcnt++;
728 } 657 }
729 } 658 }
659 if (retry++ > 10)
660 goto FINETUNE_DONE;
730 if (passcnt != 16) { 661 if (passcnt != 16) {
731 goto FINETUNE_START; 662 goto FINETUNE_START;
732 } 663 }
664 status = true;
665FINETUNE_DONE:
733 gold_sadj[0] = gold_sadj[0] >> 4; 666 gold_sadj[0] = gold_sadj[0] >> 4;
734 gold_sadj[1] = gold_sadj[0]; 667 gold_sadj[1] = gold_sadj[0];
735 668
@@ -753,7 +686,7 @@ FINETUNE_START:
753 data |= dlli << 21; 686 data |= dlli << 21;
754 } 687 }
755 } 688 }
756 moutdwm(ast, 0x1E6E0080, data); 689 ast_moutdwm(ast, 0x1E6E0080, data);
757 690
758 data = 0; 691 data = 0;
759 for (cnt = 8; cnt < 16; cnt++) { 692 for (cnt = 8; cnt < 16; cnt++) {
@@ -778,162 +711,116 @@ FINETUNE_START:
778 data |= dlli << 21; 711 data |= dlli << 21;
779 } 712 }
780 } 713 }
781 moutdwm(ast, 0x1E6E0084, data); 714 ast_moutdwm(ast, 0x1E6E0084, data);
782 715 return status;
783} /* finetuneDQI_L */ 716} /* finetuneDQI_L */
784 717
785static void finetuneDQI_L2(struct ast_private *ast, struct ast2300_dram_param *param) 718static void finetuneDQSI(struct ast_private *ast)
786{ 719{
787 u32 gold_sadj[2], dllmin[16], dllmax[16], dlli, data, cnt, mask, passcnt, data2; 720 u32 dlli, dqsip, dqidly;
721 u32 reg_mcr18, reg_mcr0c, passcnt[2], diff;
722 u32 g_dqidly, g_dqsip, g_margin, g_side;
723 u16 pass[32][2][2];
724 char tag[2][76];
725
726 /* Disable DQI CBR */
727 reg_mcr0c = ast_mindwm(ast, 0x1E6E000C);
728 reg_mcr18 = ast_mindwm(ast, 0x1E6E0018);
729 reg_mcr18 &= 0x0000ffff;
730 ast_moutdwm(ast, 0x1E6E0018, reg_mcr18);
788 731
789 for (cnt = 0; cnt < 16; cnt++) {
790 dllmin[cnt] = 0xff;
791 dllmax[cnt] = 0x0;
792 }
793 passcnt = 0;
794 for (dlli = 0; dlli < 76; dlli++) { 732 for (dlli = 0; dlli < 76; dlli++) {
795 moutdwm(ast, 0x1E6E0068, 0x00001400 | (dlli << 16) | (dlli << 24)); 733 tag[0][dlli] = 0x0;
796 /* Wait DQSI latch phase calibration */ 734 tag[1][dlli] = 0x0;
797 moutdwm(ast, 0x1E6E0074, 0x00000010);
798 moutdwm(ast, 0x1E6E0070, 0x00000003);
799 do {
800 data = mindwm(ast, 0x1E6E0070);
801 } while (!(data & 0x00001000));
802 moutdwm(ast, 0x1E6E0070, 0x00000000);
803
804 moutdwm(ast, 0x1E6E0074, CBR_SIZE2);
805 data = cbr_scan2(ast);
806 if (data != 0) {
807 mask = 0x00010001;
808 for (cnt = 0; cnt < 16; cnt++) {
809 if (data & mask) {
810 if (dllmin[cnt] > dlli) {
811 dllmin[cnt] = dlli;
812 }
813 if (dllmax[cnt] < dlli) {
814 dllmax[cnt] = dlli;
815 }
816 }
817 mask <<= 1;
818 }
819 passcnt++;
820 } else if (passcnt >= CBR_THRESHOLD2) {
821 break;
822 }
823 } 735 }
824 gold_sadj[0] = 0x0; 736 for (dqidly = 0; dqidly < 32; dqidly++) {
825 gold_sadj[1] = 0xFF; 737 pass[dqidly][0][0] = 0xff;
826 for (cnt = 0; cnt < 8; cnt++) { 738 pass[dqidly][0][1] = 0x0;
827 if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) { 739 pass[dqidly][1][0] = 0xff;
828 if (gold_sadj[0] < dllmin[cnt]) { 740 pass[dqidly][1][1] = 0x0;
829 gold_sadj[0] = dllmin[cnt];
830 }
831 if (gold_sadj[1] > dllmax[cnt]) {
832 gold_sadj[1] = dllmax[cnt];
833 }
834 }
835 } 741 }
836 gold_sadj[0] = (gold_sadj[1] + gold_sadj[0]) >> 1; 742 for (dqidly = 0; dqidly < 32; dqidly++) {
837 gold_sadj[1] = mindwm(ast, 0x1E6E0080); 743 passcnt[0] = passcnt[1] = 0;
838 744 for (dqsip = 0; dqsip < 2; dqsip++) {
839 data = 0; 745 ast_moutdwm(ast, 0x1E6E000C, 0);
840 for (cnt = 0; cnt < 8; cnt++) { 746 ast_moutdwm(ast, 0x1E6E0018, reg_mcr18 | (dqidly << 16) | (dqsip << 23));
841 data >>= 3; 747 ast_moutdwm(ast, 0x1E6E000C, reg_mcr0c);
842 data2 = gold_sadj[1] & 0x7; 748 for (dlli = 0; dlli < 76; dlli++) {
843 gold_sadj[1] >>= 3; 749 ast_moutdwm(ast, 0x1E6E0068, 0x00001300 | (dlli << 16) | (dlli << 24));
844 if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) { 750 ast_moutdwm(ast, 0x1E6E0070, 0);
845 dlli = (dllmin[cnt] + dllmax[cnt]) >> 1; 751 ast_moutdwm(ast, 0x1E6E0074, CBR_SIZE0);
846 if (gold_sadj[0] >= dlli) { 752 if (cbr_scan3(ast)) {
847 dlli = (gold_sadj[0] - dlli) >> 1; 753 if (dlli == 0)
848 if (dlli > 0) { 754 break;
849 dlli = 1; 755 passcnt[dqsip]++;
850 } 756 tag[dqsip][dlli] = 'P';
851 if (data2 != 3) { 757 if (dlli < pass[dqidly][dqsip][0])
852 data2 = (data2 + dlli) & 0x7; 758 pass[dqidly][dqsip][0] = (u16) dlli;
853 } 759 if (dlli > pass[dqidly][dqsip][1])
854 } else { 760 pass[dqidly][dqsip][1] = (u16) dlli;
855 dlli = (dlli - gold_sadj[0]) >> 1; 761 } else if (passcnt[dqsip] >= 5)
856 if (dlli > 0) { 762 break;
857 dlli = 1; 763 else {
858 } 764 pass[dqidly][dqsip][0] = 0xff;
859 if (data2 != 4) { 765 pass[dqidly][dqsip][1] = 0x0;
860 data2 = (data2 - dlli) & 0x7;
861 } 766 }
862 } 767 }
863 } 768 }
864 data |= data2 << 21; 769 if (passcnt[0] == 0 && passcnt[1] == 0)
865 } 770 dqidly++;
866 moutdwm(ast, 0x1E6E0080, data);
867
868 gold_sadj[0] = 0x0;
869 gold_sadj[1] = 0xFF;
870 for (cnt = 8; cnt < 16; cnt++) {
871 if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) {
872 if (gold_sadj[0] < dllmin[cnt]) {
873 gold_sadj[0] = dllmin[cnt];
874 }
875 if (gold_sadj[1] > dllmax[cnt]) {
876 gold_sadj[1] = dllmax[cnt];
877 }
878 }
879 } 771 }
880 gold_sadj[0] = (gold_sadj[1] + gold_sadj[0]) >> 1; 772 /* Search margin */
881 gold_sadj[1] = mindwm(ast, 0x1E6E0084); 773 g_dqidly = g_dqsip = g_margin = g_side = 0;
882 774
883 data = 0; 775 for (dqidly = 0; dqidly < 32; dqidly++) {
884 for (cnt = 8; cnt < 16; cnt++) { 776 for (dqsip = 0; dqsip < 2; dqsip++) {
885 data >>= 3; 777 if (pass[dqidly][dqsip][0] > pass[dqidly][dqsip][1])
886 data2 = gold_sadj[1] & 0x7; 778 continue;
887 gold_sadj[1] >>= 3; 779 diff = pass[dqidly][dqsip][1] - pass[dqidly][dqsip][0];
888 if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) { 780 if ((diff+2) < g_margin)
889 dlli = (dllmin[cnt] + dllmax[cnt]) >> 1; 781 continue;
890 if (gold_sadj[0] >= dlli) { 782 passcnt[0] = passcnt[1] = 0;
891 dlli = (gold_sadj[0] - dlli) >> 1; 783 for (dlli = pass[dqidly][dqsip][0]; dlli > 0 && tag[dqsip][dlli] != 0; dlli--, passcnt[0]++);
892 if (dlli > 0) { 784 for (dlli = pass[dqidly][dqsip][1]; dlli < 76 && tag[dqsip][dlli] != 0; dlli++, passcnt[1]++);
893 dlli = 1; 785 if (passcnt[0] > passcnt[1])
894 } 786 passcnt[0] = passcnt[1];
895 if (data2 != 3) { 787 passcnt[1] = 0;
896 data2 = (data2 + dlli) & 0x7; 788 if (passcnt[0] > g_side)
897 } 789 passcnt[1] = passcnt[0] - g_side;
898 } else { 790 if (diff > (g_margin+1) && (passcnt[1] > 0 || passcnt[0] > 8)) {
899 dlli = (dlli - gold_sadj[0]) >> 1; 791 g_margin = diff;
900 if (dlli > 0) { 792 g_dqidly = dqidly;
901 dlli = 1; 793 g_dqsip = dqsip;
902 } 794 g_side = passcnt[0];
903 if (data2 != 4) { 795 } else if (passcnt[1] > 1 && g_side < 8) {
904 data2 = (data2 - dlli) & 0x7; 796 if (diff > g_margin)
905 } 797 g_margin = diff;
798 g_dqidly = dqidly;
799 g_dqsip = dqsip;
800 g_side = passcnt[0];
906 } 801 }
907 } 802 }
908 data |= data2 << 21;
909 } 803 }
910 moutdwm(ast, 0x1E6E0084, data); 804 reg_mcr18 = reg_mcr18 | (g_dqidly << 16) | (g_dqsip << 23);
911 805 ast_moutdwm(ast, 0x1E6E0018, reg_mcr18);
912} /* finetuneDQI_L2 */
913 806
914static void cbr_dll2(struct ast_private *ast, struct ast2300_dram_param *param) 807}
808static bool cbr_dll2(struct ast_private *ast, struct ast2300_dram_param *param)
915{ 809{
916 u32 dllmin[2], dllmax[2], dlli, data, data2, passcnt; 810 u32 dllmin[2], dllmax[2], dlli, data, passcnt, retry = 0;
811 bool status = false;
917 812
918 813 finetuneDQSI(ast);
919 finetuneDQI_L(ast, param); 814 if (finetuneDQI_L(ast, param) == false)
920 finetuneDQI_L2(ast, param); 815 return status;
921 816
922CBR_START2: 817CBR_START2:
923 dllmin[0] = dllmin[1] = 0xff; 818 dllmin[0] = dllmin[1] = 0xff;
924 dllmax[0] = dllmax[1] = 0x0; 819 dllmax[0] = dllmax[1] = 0x0;
925 passcnt = 0; 820 passcnt = 0;
926 for (dlli = 0; dlli < 76; dlli++) { 821 for (dlli = 0; dlli < 76; dlli++) {
927 moutdwm(ast, 0x1E6E0068, 0x00001300 | (dlli << 16) | (dlli << 24)); 822 ast_moutdwm(ast, 0x1E6E0068, 0x00001300 | (dlli << 16) | (dlli << 24));
928 /* Wait DQSI latch phase calibration */ 823 ast_moutdwm(ast, 0x1E6E0074, CBR_SIZE2);
929 moutdwm(ast, 0x1E6E0074, 0x00000010);
930 moutdwm(ast, 0x1E6E0070, 0x00000003);
931 do {
932 data = mindwm(ast, 0x1E6E0070);
933 } while (!(data & 0x00001000));
934 moutdwm(ast, 0x1E6E0070, 0x00000000);
935
936 moutdwm(ast, 0x1E6E0074, CBR_SIZE2);
937 data = cbr_scan(ast); 824 data = cbr_scan(ast);
938 if (data != 0) { 825 if (data != 0) {
939 if (data & 0x1) { 826 if (data & 0x1) {
@@ -957,44 +844,31 @@ CBR_START2:
957 break; 844 break;
958 } 845 }
959 } 846 }
847 if (retry++ > 10)
848 goto CBR_DONE2;
960 if (dllmax[0] == 0 || (dllmax[0]-dllmin[0]) < CBR_THRESHOLD) { 849 if (dllmax[0] == 0 || (dllmax[0]-dllmin[0]) < CBR_THRESHOLD) {
961 goto CBR_START2; 850 goto CBR_START2;
962 } 851 }
963 if (dllmax[1] == 0 || (dllmax[1]-dllmin[1]) < CBR_THRESHOLD) { 852 if (dllmax[1] == 0 || (dllmax[1]-dllmin[1]) < CBR_THRESHOLD) {
964 goto CBR_START2; 853 goto CBR_START2;
965 } 854 }
855 status = true;
856CBR_DONE2:
966 dlli = (dllmin[1] + dllmax[1]) >> 1; 857 dlli = (dllmin[1] + dllmax[1]) >> 1;
967 dlli <<= 8; 858 dlli <<= 8;
968 dlli += (dllmin[0] + dllmax[0]) >> 1; 859 dlli += (dllmin[0] + dllmax[0]) >> 1;
969 moutdwm(ast, 0x1E6E0068, (mindwm(ast, 0x1E6E0068) & 0xFFFF) | (dlli << 16)); 860 ast_moutdwm(ast, 0x1E6E0068, ast_mindwm(ast, 0x1E720058) | (dlli << 16));
970 861 return status;
971 data = (mindwm(ast, 0x1E6E0080) >> 24) & 0x1F;
972 data2 = (mindwm(ast, 0x1E6E0018) & 0xff80ffff) | (data << 16);
973 moutdwm(ast, 0x1E6E0018, data2);
974 moutdwm(ast, 0x1E6E0024, 0x8001 | (data << 1) | (param->dll2_finetune_step << 8));
975
976 /* Wait DQSI latch phase calibration */
977 moutdwm(ast, 0x1E6E0074, 0x00000010);
978 moutdwm(ast, 0x1E6E0070, 0x00000003);
979 do {
980 data = mindwm(ast, 0x1E6E0070);
981 } while (!(data & 0x00001000));
982 moutdwm(ast, 0x1E6E0070, 0x00000000);
983 moutdwm(ast, 0x1E6E0070, 0x00000003);
984 do {
985 data = mindwm(ast, 0x1E6E0070);
986 } while (!(data & 0x00001000));
987 moutdwm(ast, 0x1E6E0070, 0x00000000);
988} /* CBRDLL2 */ 862} /* CBRDLL2 */
989 863
990static void get_ddr3_info(struct ast_private *ast, struct ast2300_dram_param *param) 864static void get_ddr3_info(struct ast_private *ast, struct ast2300_dram_param *param)
991{ 865{
992 u32 trap, trap_AC2, trap_MRS; 866 u32 trap, trap_AC2, trap_MRS;
993 867
994 moutdwm(ast, 0x1E6E2000, 0x1688A8A8); 868 ast_moutdwm(ast, 0x1E6E2000, 0x1688A8A8);
995 869
996 /* Ger trap info */ 870 /* Ger trap info */
997 trap = (mindwm(ast, 0x1E6E2070) >> 25) & 0x3; 871 trap = (ast_mindwm(ast, 0x1E6E2070) >> 25) & 0x3;
998 trap_AC2 = 0x00020000 + (trap << 16); 872 trap_AC2 = 0x00020000 + (trap << 16);
999 trap_AC2 |= 0x00300000 + ((trap & 0x2) << 19); 873 trap_AC2 |= 0x00300000 + ((trap & 0x2) << 19);
1000 trap_MRS = 0x00000010 + (trap << 4); 874 trap_MRS = 0x00000010 + (trap << 4);
@@ -1008,22 +882,35 @@ static void get_ddr3_info(struct ast_private *ast, struct ast2300_dram_param *pa
1008 882
1009 switch (param->dram_freq) { 883 switch (param->dram_freq) {
1010 case 336: 884 case 336:
1011 moutdwm(ast, 0x1E6E2020, 0x0190); 885 ast_moutdwm(ast, 0x1E6E2020, 0x0190);
1012 param->wodt = 0; 886 param->wodt = 0;
1013 param->reg_AC1 = 0x22202725; 887 param->reg_AC1 = 0x22202725;
1014 param->reg_AC2 = 0xAA007613 | trap_AC2; 888 param->reg_AC2 = 0xAA007613 | trap_AC2;
1015 param->reg_DQSIC = 0x000000BA; 889 param->reg_DQSIC = 0x000000BA;
1016 param->reg_MRS = 0x04001400 | trap_MRS; 890 param->reg_MRS = 0x04001400 | trap_MRS;
1017 param->reg_EMRS = 0x00000000; 891 param->reg_EMRS = 0x00000000;
1018 param->reg_IOZ = 0x00000034; 892 param->reg_IOZ = 0x00000023;
1019 param->reg_DQIDLY = 0x00000074; 893 param->reg_DQIDLY = 0x00000074;
1020 param->reg_FREQ = 0x00004DC0; 894 param->reg_FREQ = 0x00004DC0;
1021 param->madj_max = 96; 895 param->madj_max = 96;
1022 param->dll2_finetune_step = 3; 896 param->dll2_finetune_step = 3;
897 switch (param->dram_chipid) {
898 default:
899 case AST_DRAM_512Mx16:
900 case AST_DRAM_1Gx16:
901 param->reg_AC2 = 0xAA007613 | trap_AC2;
902 break;
903 case AST_DRAM_2Gx16:
904 param->reg_AC2 = 0xAA00761C | trap_AC2;
905 break;
906 case AST_DRAM_4Gx16:
907 param->reg_AC2 = 0xAA007636 | trap_AC2;
908 break;
909 }
1023 break; 910 break;
1024 default: 911 default:
1025 case 396: 912 case 396:
1026 moutdwm(ast, 0x1E6E2020, 0x03F1); 913 ast_moutdwm(ast, 0x1E6E2020, 0x03F1);
1027 param->wodt = 1; 914 param->wodt = 1;
1028 param->reg_AC1 = 0x33302825; 915 param->reg_AC1 = 0x33302825;
1029 param->reg_AC2 = 0xCC009617 | trap_AC2; 916 param->reg_AC2 = 0xCC009617 | trap_AC2;
@@ -1033,7 +920,7 @@ static void get_ddr3_info(struct ast_private *ast, struct ast2300_dram_param *pa
1033 param->reg_IOZ = 0x00000034; 920 param->reg_IOZ = 0x00000034;
1034 param->reg_DRV = 0x000000FA; 921 param->reg_DRV = 0x000000FA;
1035 param->reg_DQIDLY = 0x00000089; 922 param->reg_DQIDLY = 0x00000089;
1036 param->reg_FREQ = 0x000050C0; 923 param->reg_FREQ = 0x00005040;
1037 param->madj_max = 96; 924 param->madj_max = 96;
1038 param->dll2_finetune_step = 4; 925 param->dll2_finetune_step = 4;
1039 926
@@ -1053,14 +940,14 @@ static void get_ddr3_info(struct ast_private *ast, struct ast2300_dram_param *pa
1053 break; 940 break;
1054 941
1055 case 408: 942 case 408:
1056 moutdwm(ast, 0x1E6E2020, 0x01F0); 943 ast_moutdwm(ast, 0x1E6E2020, 0x01F0);
1057 param->wodt = 1; 944 param->wodt = 1;
1058 param->reg_AC1 = 0x33302825; 945 param->reg_AC1 = 0x33302825;
1059 param->reg_AC2 = 0xCC009617 | trap_AC2; 946 param->reg_AC2 = 0xCC009617 | trap_AC2;
1060 param->reg_DQSIC = 0x000000E2; 947 param->reg_DQSIC = 0x000000E2;
1061 param->reg_MRS = 0x04001600 | trap_MRS; 948 param->reg_MRS = 0x04001600 | trap_MRS;
1062 param->reg_EMRS = 0x00000000; 949 param->reg_EMRS = 0x00000000;
1063 param->reg_IOZ = 0x00000034; 950 param->reg_IOZ = 0x00000023;
1064 param->reg_DRV = 0x000000FA; 951 param->reg_DRV = 0x000000FA;
1065 param->reg_DQIDLY = 0x00000089; 952 param->reg_DQIDLY = 0x00000089;
1066 param->reg_FREQ = 0x000050C0; 953 param->reg_FREQ = 0x000050C0;
@@ -1083,7 +970,7 @@ static void get_ddr3_info(struct ast_private *ast, struct ast2300_dram_param *pa
1083 970
1084 break; 971 break;
1085 case 456: 972 case 456:
1086 moutdwm(ast, 0x1E6E2020, 0x0230); 973 ast_moutdwm(ast, 0x1E6E2020, 0x0230);
1087 param->wodt = 0; 974 param->wodt = 0;
1088 param->reg_AC1 = 0x33302926; 975 param->reg_AC1 = 0x33302926;
1089 param->reg_AC2 = 0xCD44961A; 976 param->reg_AC2 = 0xCD44961A;
@@ -1097,7 +984,7 @@ static void get_ddr3_info(struct ast_private *ast, struct ast2300_dram_param *pa
1097 param->dll2_finetune_step = 4; 984 param->dll2_finetune_step = 4;
1098 break; 985 break;
1099 case 504: 986 case 504:
1100 moutdwm(ast, 0x1E6E2020, 0x0270); 987 ast_moutdwm(ast, 0x1E6E2020, 0x0270);
1101 param->wodt = 1; 988 param->wodt = 1;
1102 param->reg_AC1 = 0x33302926; 989 param->reg_AC1 = 0x33302926;
1103 param->reg_AC2 = 0xDE44A61D; 990 param->reg_AC2 = 0xDE44A61D;
@@ -1111,7 +998,7 @@ static void get_ddr3_info(struct ast_private *ast, struct ast2300_dram_param *pa
1111 param->dll2_finetune_step = 4; 998 param->dll2_finetune_step = 4;
1112 break; 999 break;
1113 case 528: 1000 case 528:
1114 moutdwm(ast, 0x1E6E2020, 0x0290); 1001 ast_moutdwm(ast, 0x1E6E2020, 0x0290);
1115 param->wodt = 1; 1002 param->wodt = 1;
1116 param->rodt = 1; 1003 param->rodt = 1;
1117 param->reg_AC1 = 0x33302926; 1004 param->reg_AC1 = 0x33302926;
@@ -1127,7 +1014,7 @@ static void get_ddr3_info(struct ast_private *ast, struct ast2300_dram_param *pa
1127 param->dll2_finetune_step = 3; 1014 param->dll2_finetune_step = 3;
1128 break; 1015 break;
1129 case 576: 1016 case 576:
1130 moutdwm(ast, 0x1E6E2020, 0x0140); 1017 ast_moutdwm(ast, 0x1E6E2020, 0x0140);
1131 param->reg_MADJ = 0x00136868; 1018 param->reg_MADJ = 0x00136868;
1132 param->reg_SADJ = 0x00004534; 1019 param->reg_SADJ = 0x00004534;
1133 param->wodt = 1; 1020 param->wodt = 1;
@@ -1145,7 +1032,7 @@ static void get_ddr3_info(struct ast_private *ast, struct ast2300_dram_param *pa
1145 param->dll2_finetune_step = 3; 1032 param->dll2_finetune_step = 3;
1146 break; 1033 break;
1147 case 600: 1034 case 600:
1148 moutdwm(ast, 0x1E6E2020, 0x02E1); 1035 ast_moutdwm(ast, 0x1E6E2020, 0x02E1);
1149 param->reg_MADJ = 0x00136868; 1036 param->reg_MADJ = 0x00136868;
1150 param->reg_SADJ = 0x00004534; 1037 param->reg_SADJ = 0x00004534;
1151 param->wodt = 1; 1038 param->wodt = 1;
@@ -1163,7 +1050,7 @@ static void get_ddr3_info(struct ast_private *ast, struct ast2300_dram_param *pa
1163 param->dll2_finetune_step = 3; 1050 param->dll2_finetune_step = 3;
1164 break; 1051 break;
1165 case 624: 1052 case 624:
1166 moutdwm(ast, 0x1E6E2020, 0x0160); 1053 ast_moutdwm(ast, 0x1E6E2020, 0x0160);
1167 param->reg_MADJ = 0x00136868; 1054 param->reg_MADJ = 0x00136868;
1168 param->reg_SADJ = 0x00004534; 1055 param->reg_SADJ = 0x00004534;
1169 param->wodt = 1; 1056 param->wodt = 1;
@@ -1196,7 +1083,7 @@ static void get_ddr3_info(struct ast_private *ast, struct ast2300_dram_param *pa
1196 case AST_DRAM_4Gx16: 1083 case AST_DRAM_4Gx16:
1197 param->dram_config = 0x133; 1084 param->dram_config = 0x133;
1198 break; 1085 break;
1199 }; /* switch size */ 1086 } /* switch size */
1200 1087
1201 switch (param->vram_size) { 1088 switch (param->vram_size) {
1202 default: 1089 default:
@@ -1218,106 +1105,98 @@ static void get_ddr3_info(struct ast_private *ast, struct ast2300_dram_param *pa
1218 1105
1219static void ddr3_init(struct ast_private *ast, struct ast2300_dram_param *param) 1106static void ddr3_init(struct ast_private *ast, struct ast2300_dram_param *param)
1220{ 1107{
1221 u32 data, data2; 1108 u32 data, data2, retry = 0;
1222 1109
1223 moutdwm(ast, 0x1E6E0000, 0xFC600309); 1110ddr3_init_start:
1224 moutdwm(ast, 0x1E6E0018, 0x00000100); 1111 ast_moutdwm(ast, 0x1E6E0000, 0xFC600309);
1225 moutdwm(ast, 0x1E6E0024, 0x00000000); 1112 ast_moutdwm(ast, 0x1E6E0018, 0x00000100);
1226 moutdwm(ast, 0x1E6E0034, 0x00000000); 1113 ast_moutdwm(ast, 0x1E6E0024, 0x00000000);
1114 ast_moutdwm(ast, 0x1E6E0034, 0x00000000);
1227 udelay(10); 1115 udelay(10);
1228 moutdwm(ast, 0x1E6E0064, param->reg_MADJ); 1116 ast_moutdwm(ast, 0x1E6E0064, param->reg_MADJ);
1229 moutdwm(ast, 0x1E6E0068, param->reg_SADJ); 1117 ast_moutdwm(ast, 0x1E6E0068, param->reg_SADJ);
1230 udelay(10); 1118 udelay(10);
1231 moutdwm(ast, 0x1E6E0064, param->reg_MADJ | 0xC0000); 1119 ast_moutdwm(ast, 0x1E6E0064, param->reg_MADJ | 0xC0000);
1232 udelay(10); 1120 udelay(10);
1233 1121
1234 moutdwm(ast, 0x1E6E0004, param->dram_config); 1122 ast_moutdwm(ast, 0x1E6E0004, param->dram_config);
1235 moutdwm(ast, 0x1E6E0008, 0x90040f); 1123 ast_moutdwm(ast, 0x1E6E0008, 0x90040f);
1236 moutdwm(ast, 0x1E6E0010, param->reg_AC1); 1124 ast_moutdwm(ast, 0x1E6E0010, param->reg_AC1);
1237 moutdwm(ast, 0x1E6E0014, param->reg_AC2); 1125 ast_moutdwm(ast, 0x1E6E0014, param->reg_AC2);
1238 moutdwm(ast, 0x1E6E0020, param->reg_DQSIC); 1126 ast_moutdwm(ast, 0x1E6E0020, param->reg_DQSIC);
1239 moutdwm(ast, 0x1E6E0080, 0x00000000); 1127 ast_moutdwm(ast, 0x1E6E0080, 0x00000000);
1240 moutdwm(ast, 0x1E6E0084, 0x00000000); 1128 ast_moutdwm(ast, 0x1E6E0084, 0x00000000);
1241 moutdwm(ast, 0x1E6E0088, param->reg_DQIDLY); 1129 ast_moutdwm(ast, 0x1E6E0088, param->reg_DQIDLY);
1242 moutdwm(ast, 0x1E6E0018, 0x4040A170); 1130 ast_moutdwm(ast, 0x1E6E0018, 0x4000A170);
1243 moutdwm(ast, 0x1E6E0018, 0x20402370); 1131 ast_moutdwm(ast, 0x1E6E0018, 0x00002370);
1244 moutdwm(ast, 0x1E6E0038, 0x00000000); 1132 ast_moutdwm(ast, 0x1E6E0038, 0x00000000);
1245 moutdwm(ast, 0x1E6E0040, 0xFF444444); 1133 ast_moutdwm(ast, 0x1E6E0040, 0xFF444444);
1246 moutdwm(ast, 0x1E6E0044, 0x22222222); 1134 ast_moutdwm(ast, 0x1E6E0044, 0x22222222);
1247 moutdwm(ast, 0x1E6E0048, 0x22222222); 1135 ast_moutdwm(ast, 0x1E6E0048, 0x22222222);
1248 moutdwm(ast, 0x1E6E004C, 0x00000002); 1136 ast_moutdwm(ast, 0x1E6E004C, 0x00000002);
1249 moutdwm(ast, 0x1E6E0050, 0x80000000); 1137 ast_moutdwm(ast, 0x1E6E0050, 0x80000000);
1250 moutdwm(ast, 0x1E6E0050, 0x00000000); 1138 ast_moutdwm(ast, 0x1E6E0050, 0x00000000);
1251 moutdwm(ast, 0x1E6E0054, 0); 1139 ast_moutdwm(ast, 0x1E6E0054, 0);
1252 moutdwm(ast, 0x1E6E0060, param->reg_DRV); 1140 ast_moutdwm(ast, 0x1E6E0060, param->reg_DRV);
1253 moutdwm(ast, 0x1E6E006C, param->reg_IOZ); 1141 ast_moutdwm(ast, 0x1E6E006C, param->reg_IOZ);
1254 moutdwm(ast, 0x1E6E0070, 0x00000000); 1142 ast_moutdwm(ast, 0x1E6E0070, 0x00000000);
1255 moutdwm(ast, 0x1E6E0074, 0x00000000); 1143 ast_moutdwm(ast, 0x1E6E0074, 0x00000000);
1256 moutdwm(ast, 0x1E6E0078, 0x00000000); 1144 ast_moutdwm(ast, 0x1E6E0078, 0x00000000);
1257 moutdwm(ast, 0x1E6E007C, 0x00000000); 1145 ast_moutdwm(ast, 0x1E6E007C, 0x00000000);
1258 /* Wait MCLK2X lock to MCLK */ 1146 /* Wait MCLK2X lock to MCLK */
1259 do { 1147 do {
1260 data = mindwm(ast, 0x1E6E001C); 1148 data = ast_mindwm(ast, 0x1E6E001C);
1261 } while (!(data & 0x08000000)); 1149 } while (!(data & 0x08000000));
1262 moutdwm(ast, 0x1E6E0034, 0x00000001); 1150 data = ast_mindwm(ast, 0x1E6E001C);
1263 moutdwm(ast, 0x1E6E000C, 0x00005C04);
1264 udelay(10);
1265 moutdwm(ast, 0x1E6E000C, 0x00000000);
1266 moutdwm(ast, 0x1E6E0034, 0x00000000);
1267 data = mindwm(ast, 0x1E6E001C);
1268 data = (data >> 8) & 0xff; 1151 data = (data >> 8) & 0xff;
1269 while ((data & 0x08) || ((data & 0x7) < 2) || (data < 4)) { 1152 while ((data & 0x08) || ((data & 0x7) < 2) || (data < 4)) {
1270 data2 = (mindwm(ast, 0x1E6E0064) & 0xfff3ffff) + 4; 1153 data2 = (ast_mindwm(ast, 0x1E6E0064) & 0xfff3ffff) + 4;
1271 if ((data2 & 0xff) > param->madj_max) { 1154 if ((data2 & 0xff) > param->madj_max) {
1272 break; 1155 break;
1273 } 1156 }
1274 moutdwm(ast, 0x1E6E0064, data2); 1157 ast_moutdwm(ast, 0x1E6E0064, data2);
1275 if (data2 & 0x00100000) { 1158 if (data2 & 0x00100000) {
1276 data2 = ((data2 & 0xff) >> 3) + 3; 1159 data2 = ((data2 & 0xff) >> 3) + 3;
1277 } else { 1160 } else {
1278 data2 = ((data2 & 0xff) >> 2) + 5; 1161 data2 = ((data2 & 0xff) >> 2) + 5;
1279 } 1162 }
1280 data = mindwm(ast, 0x1E6E0068) & 0xffff00ff; 1163 data = ast_mindwm(ast, 0x1E6E0068) & 0xffff00ff;
1281 data2 += data & 0xff; 1164 data2 += data & 0xff;
1282 data = data | (data2 << 8); 1165 data = data | (data2 << 8);
1283 moutdwm(ast, 0x1E6E0068, data); 1166 ast_moutdwm(ast, 0x1E6E0068, data);
1284 udelay(10); 1167 udelay(10);
1285 moutdwm(ast, 0x1E6E0064, mindwm(ast, 0x1E6E0064) | 0xC0000); 1168 ast_moutdwm(ast, 0x1E6E0064, ast_mindwm(ast, 0x1E6E0064) | 0xC0000);
1286 udelay(10); 1169 udelay(10);
1287 data = mindwm(ast, 0x1E6E0018) & 0xfffff1ff; 1170 data = ast_mindwm(ast, 0x1E6E0018) & 0xfffff1ff;
1288 moutdwm(ast, 0x1E6E0018, data); 1171 ast_moutdwm(ast, 0x1E6E0018, data);
1289 data = data | 0x200; 1172 data = data | 0x200;
1290 moutdwm(ast, 0x1E6E0018, data); 1173 ast_moutdwm(ast, 0x1E6E0018, data);
1291 do { 1174 do {
1292 data = mindwm(ast, 0x1E6E001C); 1175 data = ast_mindwm(ast, 0x1E6E001C);
1293 } while (!(data & 0x08000000)); 1176 } while (!(data & 0x08000000));
1294 1177
1295 moutdwm(ast, 0x1E6E0034, 0x00000001); 1178 data = ast_mindwm(ast, 0x1E6E001C);
1296 moutdwm(ast, 0x1E6E000C, 0x00005C04);
1297 udelay(10);
1298 moutdwm(ast, 0x1E6E000C, 0x00000000);
1299 moutdwm(ast, 0x1E6E0034, 0x00000000);
1300 data = mindwm(ast, 0x1E6E001C);
1301 data = (data >> 8) & 0xff; 1179 data = (data >> 8) & 0xff;
1302 } 1180 }
1303 data = mindwm(ast, 0x1E6E0018) | 0xC00; 1181 ast_moutdwm(ast, 0x1E720058, ast_mindwm(ast, 0x1E6E0068) & 0xffff);
1304 moutdwm(ast, 0x1E6E0018, data); 1182 data = ast_mindwm(ast, 0x1E6E0018) | 0xC00;
1183 ast_moutdwm(ast, 0x1E6E0018, data);
1305 1184
1306 moutdwm(ast, 0x1E6E0034, 0x00000001); 1185 ast_moutdwm(ast, 0x1E6E0034, 0x00000001);
1307 moutdwm(ast, 0x1E6E000C, 0x00000040); 1186 ast_moutdwm(ast, 0x1E6E000C, 0x00000040);
1308 udelay(50); 1187 udelay(50);
1309 /* Mode Register Setting */ 1188 /* Mode Register Setting */
1310 moutdwm(ast, 0x1E6E002C, param->reg_MRS | 0x100); 1189 ast_moutdwm(ast, 0x1E6E002C, param->reg_MRS | 0x100);
1311 moutdwm(ast, 0x1E6E0030, param->reg_EMRS); 1190 ast_moutdwm(ast, 0x1E6E0030, param->reg_EMRS);
1312 moutdwm(ast, 0x1E6E0028, 0x00000005); 1191 ast_moutdwm(ast, 0x1E6E0028, 0x00000005);
1313 moutdwm(ast, 0x1E6E0028, 0x00000007); 1192 ast_moutdwm(ast, 0x1E6E0028, 0x00000007);
1314 moutdwm(ast, 0x1E6E0028, 0x00000003); 1193 ast_moutdwm(ast, 0x1E6E0028, 0x00000003);
1315 moutdwm(ast, 0x1E6E0028, 0x00000001); 1194 ast_moutdwm(ast, 0x1E6E0028, 0x00000001);
1316 moutdwm(ast, 0x1E6E002C, param->reg_MRS); 1195 ast_moutdwm(ast, 0x1E6E002C, param->reg_MRS);
1317 moutdwm(ast, 0x1E6E000C, 0x00005C08); 1196 ast_moutdwm(ast, 0x1E6E000C, 0x00005C08);
1318 moutdwm(ast, 0x1E6E0028, 0x00000001); 1197 ast_moutdwm(ast, 0x1E6E0028, 0x00000001);
1319 1198
1320 moutdwm(ast, 0x1E6E000C, 0x7FFF5C01); 1199 ast_moutdwm(ast, 0x1E6E000C, 0x00005C01);
1321 data = 0; 1200 data = 0;
1322 if (param->wodt) { 1201 if (param->wodt) {
1323 data = 0x300; 1202 data = 0x300;
@@ -1325,30 +1204,23 @@ static void ddr3_init(struct ast_private *ast, struct ast2300_dram_param *param)
1325 if (param->rodt) { 1204 if (param->rodt) {
1326 data = data | 0x3000 | ((param->reg_AC2 & 0x60000) >> 3); 1205 data = data | 0x3000 | ((param->reg_AC2 & 0x60000) >> 3);
1327 } 1206 }
1328 moutdwm(ast, 0x1E6E0034, data | 0x3); 1207 ast_moutdwm(ast, 0x1E6E0034, data | 0x3);
1329 1208
1330 /* Wait DQI delay lock */
1331 do {
1332 data = mindwm(ast, 0x1E6E0080);
1333 } while (!(data & 0x40000000));
1334 /* Wait DQSI delay lock */
1335 do {
1336 data = mindwm(ast, 0x1E6E0020);
1337 } while (!(data & 0x00000800));
1338 /* Calibrate the DQSI delay */ 1209 /* Calibrate the DQSI delay */
1339 cbr_dll2(ast, param); 1210 if ((cbr_dll2(ast, param) == false) && (retry++ < 10))
1211 goto ddr3_init_start;
1340 1212
1341 moutdwm(ast, 0x1E6E0120, param->reg_FREQ); 1213 ast_moutdwm(ast, 0x1E6E0120, param->reg_FREQ);
1342 /* ECC Memory Initialization */ 1214 /* ECC Memory Initialization */
1343#ifdef ECC 1215#ifdef ECC
1344 moutdwm(ast, 0x1E6E007C, 0x00000000); 1216 ast_moutdwm(ast, 0x1E6E007C, 0x00000000);
1345 moutdwm(ast, 0x1E6E0070, 0x221); 1217 ast_moutdwm(ast, 0x1E6E0070, 0x221);
1346 do { 1218 do {
1347 data = mindwm(ast, 0x1E6E0070); 1219 data = ast_mindwm(ast, 0x1E6E0070);
1348 } while (!(data & 0x00001000)); 1220 } while (!(data & 0x00001000));
1349 moutdwm(ast, 0x1E6E0070, 0x00000000); 1221 ast_moutdwm(ast, 0x1E6E0070, 0x00000000);
1350 moutdwm(ast, 0x1E6E0050, 0x80000000); 1222 ast_moutdwm(ast, 0x1E6E0050, 0x80000000);
1351 moutdwm(ast, 0x1E6E0050, 0x00000000); 1223 ast_moutdwm(ast, 0x1E6E0050, 0x00000000);
1352#endif 1224#endif
1353 1225
1354 1226
@@ -1358,10 +1230,10 @@ static void get_ddr2_info(struct ast_private *ast, struct ast2300_dram_param *pa
1358{ 1230{
1359 u32 trap, trap_AC2, trap_MRS; 1231 u32 trap, trap_AC2, trap_MRS;
1360 1232
1361 moutdwm(ast, 0x1E6E2000, 0x1688A8A8); 1233 ast_moutdwm(ast, 0x1E6E2000, 0x1688A8A8);
1362 1234
1363 /* Ger trap info */ 1235 /* Ger trap info */
1364 trap = (mindwm(ast, 0x1E6E2070) >> 25) & 0x3; 1236 trap = (ast_mindwm(ast, 0x1E6E2070) >> 25) & 0x3;
1365 trap_AC2 = (trap << 20) | (trap << 16); 1237 trap_AC2 = (trap << 20) | (trap << 16);
1366 trap_AC2 += 0x00110000; 1238 trap_AC2 += 0x00110000;
1367 trap_MRS = 0x00000040 | (trap << 4); 1239 trap_MRS = 0x00000040 | (trap << 4);
@@ -1375,7 +1247,7 @@ static void get_ddr2_info(struct ast_private *ast, struct ast2300_dram_param *pa
1375 1247
1376 switch (param->dram_freq) { 1248 switch (param->dram_freq) {
1377 case 264: 1249 case 264:
1378 moutdwm(ast, 0x1E6E2020, 0x0130); 1250 ast_moutdwm(ast, 0x1E6E2020, 0x0130);
1379 param->wodt = 0; 1251 param->wodt = 0;
1380 param->reg_AC1 = 0x11101513; 1252 param->reg_AC1 = 0x11101513;
1381 param->reg_AC2 = 0x78117011; 1253 param->reg_AC2 = 0x78117011;
@@ -1390,7 +1262,7 @@ static void get_ddr2_info(struct ast_private *ast, struct ast2300_dram_param *pa
1390 param->dll2_finetune_step = 3; 1262 param->dll2_finetune_step = 3;
1391 break; 1263 break;
1392 case 336: 1264 case 336:
1393 moutdwm(ast, 0x1E6E2020, 0x0190); 1265 ast_moutdwm(ast, 0x1E6E2020, 0x0190);
1394 param->wodt = 1; 1266 param->wodt = 1;
1395 param->reg_AC1 = 0x22202613; 1267 param->reg_AC1 = 0x22202613;
1396 param->reg_AC2 = 0xAA009016 | trap_AC2; 1268 param->reg_AC2 = 0xAA009016 | trap_AC2;
@@ -1403,10 +1275,25 @@ static void get_ddr2_info(struct ast_private *ast, struct ast2300_dram_param *pa
1403 param->reg_FREQ = 0x00004DC0; 1275 param->reg_FREQ = 0x00004DC0;
1404 param->madj_max = 96; 1276 param->madj_max = 96;
1405 param->dll2_finetune_step = 3; 1277 param->dll2_finetune_step = 3;
1278 switch (param->dram_chipid) {
1279 default:
1280 case AST_DRAM_512Mx16:
1281 param->reg_AC2 = 0xAA009012 | trap_AC2;
1282 break;
1283 case AST_DRAM_1Gx16:
1284 param->reg_AC2 = 0xAA009016 | trap_AC2;
1285 break;
1286 case AST_DRAM_2Gx16:
1287 param->reg_AC2 = 0xAA009023 | trap_AC2;
1288 break;
1289 case AST_DRAM_4Gx16:
1290 param->reg_AC2 = 0xAA00903B | trap_AC2;
1291 break;
1292 }
1406 break; 1293 break;
1407 default: 1294 default:
1408 case 396: 1295 case 396:
1409 moutdwm(ast, 0x1E6E2020, 0x03F1); 1296 ast_moutdwm(ast, 0x1E6E2020, 0x03F1);
1410 param->wodt = 1; 1297 param->wodt = 1;
1411 param->rodt = 0; 1298 param->rodt = 0;
1412 param->reg_AC1 = 0x33302714; 1299 param->reg_AC1 = 0x33302714;
@@ -1417,7 +1304,7 @@ static void get_ddr2_info(struct ast_private *ast, struct ast2300_dram_param *pa
1417 param->reg_DRV = 0x000000FA; 1304 param->reg_DRV = 0x000000FA;
1418 param->reg_IOZ = 0x00000034; 1305 param->reg_IOZ = 0x00000034;
1419 param->reg_DQIDLY = 0x00000089; 1306 param->reg_DQIDLY = 0x00000089;
1420 param->reg_FREQ = 0x000050C0; 1307 param->reg_FREQ = 0x00005040;
1421 param->madj_max = 96; 1308 param->madj_max = 96;
1422 param->dll2_finetune_step = 4; 1309 param->dll2_finetune_step = 4;
1423 1310
@@ -1440,7 +1327,7 @@ static void get_ddr2_info(struct ast_private *ast, struct ast2300_dram_param *pa
1440 break; 1327 break;
1441 1328
1442 case 408: 1329 case 408:
1443 moutdwm(ast, 0x1E6E2020, 0x01F0); 1330 ast_moutdwm(ast, 0x1E6E2020, 0x01F0);
1444 param->wodt = 1; 1331 param->wodt = 1;
1445 param->rodt = 0; 1332 param->rodt = 0;
1446 param->reg_AC1 = 0x33302714; 1333 param->reg_AC1 = 0x33302714;
@@ -1473,7 +1360,7 @@ static void get_ddr2_info(struct ast_private *ast, struct ast2300_dram_param *pa
1473 1360
1474 break; 1361 break;
1475 case 456: 1362 case 456:
1476 moutdwm(ast, 0x1E6E2020, 0x0230); 1363 ast_moutdwm(ast, 0x1E6E2020, 0x0230);
1477 param->wodt = 0; 1364 param->wodt = 0;
1478 param->reg_AC1 = 0x33302815; 1365 param->reg_AC1 = 0x33302815;
1479 param->reg_AC2 = 0xCD44B01E; 1366 param->reg_AC2 = 0xCD44B01E;
@@ -1488,7 +1375,7 @@ static void get_ddr2_info(struct ast_private *ast, struct ast2300_dram_param *pa
1488 param->dll2_finetune_step = 3; 1375 param->dll2_finetune_step = 3;
1489 break; 1376 break;
1490 case 504: 1377 case 504:
1491 moutdwm(ast, 0x1E6E2020, 0x0261); 1378 ast_moutdwm(ast, 0x1E6E2020, 0x0261);
1492 param->wodt = 1; 1379 param->wodt = 1;
1493 param->rodt = 1; 1380 param->rodt = 1;
1494 param->reg_AC1 = 0x33302815; 1381 param->reg_AC1 = 0x33302815;
@@ -1504,7 +1391,7 @@ static void get_ddr2_info(struct ast_private *ast, struct ast2300_dram_param *pa
1504 param->dll2_finetune_step = 3; 1391 param->dll2_finetune_step = 3;
1505 break; 1392 break;
1506 case 528: 1393 case 528:
1507 moutdwm(ast, 0x1E6E2020, 0x0120); 1394 ast_moutdwm(ast, 0x1E6E2020, 0x0120);
1508 param->wodt = 1; 1395 param->wodt = 1;
1509 param->rodt = 1; 1396 param->rodt = 1;
1510 param->reg_AC1 = 0x33302815; 1397 param->reg_AC1 = 0x33302815;
@@ -1520,7 +1407,7 @@ static void get_ddr2_info(struct ast_private *ast, struct ast2300_dram_param *pa
1520 param->dll2_finetune_step = 3; 1407 param->dll2_finetune_step = 3;
1521 break; 1408 break;
1522 case 552: 1409 case 552:
1523 moutdwm(ast, 0x1E6E2020, 0x02A1); 1410 ast_moutdwm(ast, 0x1E6E2020, 0x02A1);
1524 param->wodt = 1; 1411 param->wodt = 1;
1525 param->rodt = 1; 1412 param->rodt = 1;
1526 param->reg_AC1 = 0x43402915; 1413 param->reg_AC1 = 0x43402915;
@@ -1536,7 +1423,7 @@ static void get_ddr2_info(struct ast_private *ast, struct ast2300_dram_param *pa
1536 param->dll2_finetune_step = 3; 1423 param->dll2_finetune_step = 3;
1537 break; 1424 break;
1538 case 576: 1425 case 576:
1539 moutdwm(ast, 0x1E6E2020, 0x0140); 1426 ast_moutdwm(ast, 0x1E6E2020, 0x0140);
1540 param->wodt = 1; 1427 param->wodt = 1;
1541 param->rodt = 1; 1428 param->rodt = 1;
1542 param->reg_AC1 = 0x43402915; 1429 param->reg_AC1 = 0x43402915;
@@ -1567,7 +1454,7 @@ static void get_ddr2_info(struct ast_private *ast, struct ast2300_dram_param *pa
1567 case AST_DRAM_4Gx16: 1454 case AST_DRAM_4Gx16:
1568 param->dram_config = 0x123; 1455 param->dram_config = 0x123;
1569 break; 1456 break;
1570 }; /* switch size */ 1457 } /* switch size */
1571 1458
1572 switch (param->vram_size) { 1459 switch (param->vram_size) {
1573 default: 1460 default:
@@ -1588,110 +1475,102 @@ static void get_ddr2_info(struct ast_private *ast, struct ast2300_dram_param *pa
1588 1475
1589static void ddr2_init(struct ast_private *ast, struct ast2300_dram_param *param) 1476static void ddr2_init(struct ast_private *ast, struct ast2300_dram_param *param)
1590{ 1477{
1591 u32 data, data2; 1478 u32 data, data2, retry = 0;
1592 1479
1593 moutdwm(ast, 0x1E6E0000, 0xFC600309); 1480ddr2_init_start:
1594 moutdwm(ast, 0x1E6E0018, 0x00000100); 1481 ast_moutdwm(ast, 0x1E6E0000, 0xFC600309);
1595 moutdwm(ast, 0x1E6E0024, 0x00000000); 1482 ast_moutdwm(ast, 0x1E6E0018, 0x00000100);
1596 moutdwm(ast, 0x1E6E0064, param->reg_MADJ); 1483 ast_moutdwm(ast, 0x1E6E0024, 0x00000000);
1597 moutdwm(ast, 0x1E6E0068, param->reg_SADJ); 1484 ast_moutdwm(ast, 0x1E6E0064, param->reg_MADJ);
1485 ast_moutdwm(ast, 0x1E6E0068, param->reg_SADJ);
1598 udelay(10); 1486 udelay(10);
1599 moutdwm(ast, 0x1E6E0064, param->reg_MADJ | 0xC0000); 1487 ast_moutdwm(ast, 0x1E6E0064, param->reg_MADJ | 0xC0000);
1600 udelay(10); 1488 udelay(10);
1601 1489
1602 moutdwm(ast, 0x1E6E0004, param->dram_config); 1490 ast_moutdwm(ast, 0x1E6E0004, param->dram_config);
1603 moutdwm(ast, 0x1E6E0008, 0x90040f); 1491 ast_moutdwm(ast, 0x1E6E0008, 0x90040f);
1604 moutdwm(ast, 0x1E6E0010, param->reg_AC1); 1492 ast_moutdwm(ast, 0x1E6E0010, param->reg_AC1);
1605 moutdwm(ast, 0x1E6E0014, param->reg_AC2); 1493 ast_moutdwm(ast, 0x1E6E0014, param->reg_AC2);
1606 moutdwm(ast, 0x1E6E0020, param->reg_DQSIC); 1494 ast_moutdwm(ast, 0x1E6E0020, param->reg_DQSIC);
1607 moutdwm(ast, 0x1E6E0080, 0x00000000); 1495 ast_moutdwm(ast, 0x1E6E0080, 0x00000000);
1608 moutdwm(ast, 0x1E6E0084, 0x00000000); 1496 ast_moutdwm(ast, 0x1E6E0084, 0x00000000);
1609 moutdwm(ast, 0x1E6E0088, param->reg_DQIDLY); 1497 ast_moutdwm(ast, 0x1E6E0088, param->reg_DQIDLY);
1610 moutdwm(ast, 0x1E6E0018, 0x4040A130); 1498 ast_moutdwm(ast, 0x1E6E0018, 0x4000A130);
1611 moutdwm(ast, 0x1E6E0018, 0x20402330); 1499 ast_moutdwm(ast, 0x1E6E0018, 0x00002330);
1612 moutdwm(ast, 0x1E6E0038, 0x00000000); 1500 ast_moutdwm(ast, 0x1E6E0038, 0x00000000);
1613 moutdwm(ast, 0x1E6E0040, 0xFF808000); 1501 ast_moutdwm(ast, 0x1E6E0040, 0xFF808000);
1614 moutdwm(ast, 0x1E6E0044, 0x88848466); 1502 ast_moutdwm(ast, 0x1E6E0044, 0x88848466);
1615 moutdwm(ast, 0x1E6E0048, 0x44440008); 1503 ast_moutdwm(ast, 0x1E6E0048, 0x44440008);
1616 moutdwm(ast, 0x1E6E004C, 0x00000000); 1504 ast_moutdwm(ast, 0x1E6E004C, 0x00000000);
1617 moutdwm(ast, 0x1E6E0050, 0x80000000); 1505 ast_moutdwm(ast, 0x1E6E0050, 0x80000000);
1618 moutdwm(ast, 0x1E6E0050, 0x00000000); 1506 ast_moutdwm(ast, 0x1E6E0050, 0x00000000);
1619 moutdwm(ast, 0x1E6E0054, 0); 1507 ast_moutdwm(ast, 0x1E6E0054, 0);
1620 moutdwm(ast, 0x1E6E0060, param->reg_DRV); 1508 ast_moutdwm(ast, 0x1E6E0060, param->reg_DRV);
1621 moutdwm(ast, 0x1E6E006C, param->reg_IOZ); 1509 ast_moutdwm(ast, 0x1E6E006C, param->reg_IOZ);
1622 moutdwm(ast, 0x1E6E0070, 0x00000000); 1510 ast_moutdwm(ast, 0x1E6E0070, 0x00000000);
1623 moutdwm(ast, 0x1E6E0074, 0x00000000); 1511 ast_moutdwm(ast, 0x1E6E0074, 0x00000000);
1624 moutdwm(ast, 0x1E6E0078, 0x00000000); 1512 ast_moutdwm(ast, 0x1E6E0078, 0x00000000);
1625 moutdwm(ast, 0x1E6E007C, 0x00000000); 1513 ast_moutdwm(ast, 0x1E6E007C, 0x00000000);
1626 1514
1627 /* Wait MCLK2X lock to MCLK */ 1515 /* Wait MCLK2X lock to MCLK */
1628 do { 1516 do {
1629 data = mindwm(ast, 0x1E6E001C); 1517 data = ast_mindwm(ast, 0x1E6E001C);
1630 } while (!(data & 0x08000000)); 1518 } while (!(data & 0x08000000));
1631 moutdwm(ast, 0x1E6E0034, 0x00000001); 1519 data = ast_mindwm(ast, 0x1E6E001C);
1632 moutdwm(ast, 0x1E6E000C, 0x00005C04);
1633 udelay(10);
1634 moutdwm(ast, 0x1E6E000C, 0x00000000);
1635 moutdwm(ast, 0x1E6E0034, 0x00000000);
1636 data = mindwm(ast, 0x1E6E001C);
1637 data = (data >> 8) & 0xff; 1520 data = (data >> 8) & 0xff;
1638 while ((data & 0x08) || ((data & 0x7) < 2) || (data < 4)) { 1521 while ((data & 0x08) || ((data & 0x7) < 2) || (data < 4)) {
1639 data2 = (mindwm(ast, 0x1E6E0064) & 0xfff3ffff) + 4; 1522 data2 = (ast_mindwm(ast, 0x1E6E0064) & 0xfff3ffff) + 4;
1640 if ((data2 & 0xff) > param->madj_max) { 1523 if ((data2 & 0xff) > param->madj_max) {
1641 break; 1524 break;
1642 } 1525 }
1643 moutdwm(ast, 0x1E6E0064, data2); 1526 ast_moutdwm(ast, 0x1E6E0064, data2);
1644 if (data2 & 0x00100000) { 1527 if (data2 & 0x00100000) {
1645 data2 = ((data2 & 0xff) >> 3) + 3; 1528 data2 = ((data2 & 0xff) >> 3) + 3;
1646 } else { 1529 } else {
1647 data2 = ((data2 & 0xff) >> 2) + 5; 1530 data2 = ((data2 & 0xff) >> 2) + 5;
1648 } 1531 }
1649 data = mindwm(ast, 0x1E6E0068) & 0xffff00ff; 1532 data = ast_mindwm(ast, 0x1E6E0068) & 0xffff00ff;
1650 data2 += data & 0xff; 1533 data2 += data & 0xff;
1651 data = data | (data2 << 8); 1534 data = data | (data2 << 8);
1652 moutdwm(ast, 0x1E6E0068, data); 1535 ast_moutdwm(ast, 0x1E6E0068, data);
1653 udelay(10); 1536 udelay(10);
1654 moutdwm(ast, 0x1E6E0064, mindwm(ast, 0x1E6E0064) | 0xC0000); 1537 ast_moutdwm(ast, 0x1E6E0064, ast_mindwm(ast, 0x1E6E0064) | 0xC0000);
1655 udelay(10); 1538 udelay(10);
1656 data = mindwm(ast, 0x1E6E0018) & 0xfffff1ff; 1539 data = ast_mindwm(ast, 0x1E6E0018) & 0xfffff1ff;
1657 moutdwm(ast, 0x1E6E0018, data); 1540 ast_moutdwm(ast, 0x1E6E0018, data);
1658 data = data | 0x200; 1541 data = data | 0x200;
1659 moutdwm(ast, 0x1E6E0018, data); 1542 ast_moutdwm(ast, 0x1E6E0018, data);
1660 do { 1543 do {
1661 data = mindwm(ast, 0x1E6E001C); 1544 data = ast_mindwm(ast, 0x1E6E001C);
1662 } while (!(data & 0x08000000)); 1545 } while (!(data & 0x08000000));
1663 1546
1664 moutdwm(ast, 0x1E6E0034, 0x00000001); 1547 data = ast_mindwm(ast, 0x1E6E001C);
1665 moutdwm(ast, 0x1E6E000C, 0x00005C04);
1666 udelay(10);
1667 moutdwm(ast, 0x1E6E000C, 0x00000000);
1668 moutdwm(ast, 0x1E6E0034, 0x00000000);
1669 data = mindwm(ast, 0x1E6E001C);
1670 data = (data >> 8) & 0xff; 1548 data = (data >> 8) & 0xff;
1671 } 1549 }
1672 data = mindwm(ast, 0x1E6E0018) | 0xC00; 1550 ast_moutdwm(ast, 0x1E720058, ast_mindwm(ast, 0x1E6E0008) & 0xffff);
1673 moutdwm(ast, 0x1E6E0018, data); 1551 data = ast_mindwm(ast, 0x1E6E0018) | 0xC00;
1552 ast_moutdwm(ast, 0x1E6E0018, data);
1674 1553
1675 moutdwm(ast, 0x1E6E0034, 0x00000001); 1554 ast_moutdwm(ast, 0x1E6E0034, 0x00000001);
1676 moutdwm(ast, 0x1E6E000C, 0x00000000); 1555 ast_moutdwm(ast, 0x1E6E000C, 0x00000000);
1677 udelay(50); 1556 udelay(50);
1678 /* Mode Register Setting */ 1557 /* Mode Register Setting */
1679 moutdwm(ast, 0x1E6E002C, param->reg_MRS | 0x100); 1558 ast_moutdwm(ast, 0x1E6E002C, param->reg_MRS | 0x100);
1680 moutdwm(ast, 0x1E6E0030, param->reg_EMRS); 1559 ast_moutdwm(ast, 0x1E6E0030, param->reg_EMRS);
1681 moutdwm(ast, 0x1E6E0028, 0x00000005); 1560 ast_moutdwm(ast, 0x1E6E0028, 0x00000005);
1682 moutdwm(ast, 0x1E6E0028, 0x00000007); 1561 ast_moutdwm(ast, 0x1E6E0028, 0x00000007);
1683 moutdwm(ast, 0x1E6E0028, 0x00000003); 1562 ast_moutdwm(ast, 0x1E6E0028, 0x00000003);
1684 moutdwm(ast, 0x1E6E0028, 0x00000001); 1563 ast_moutdwm(ast, 0x1E6E0028, 0x00000001);
1685 1564
1686 moutdwm(ast, 0x1E6E000C, 0x00005C08); 1565 ast_moutdwm(ast, 0x1E6E000C, 0x00005C08);
1687 moutdwm(ast, 0x1E6E002C, param->reg_MRS); 1566 ast_moutdwm(ast, 0x1E6E002C, param->reg_MRS);
1688 moutdwm(ast, 0x1E6E0028, 0x00000001); 1567 ast_moutdwm(ast, 0x1E6E0028, 0x00000001);
1689 moutdwm(ast, 0x1E6E0030, param->reg_EMRS | 0x380); 1568 ast_moutdwm(ast, 0x1E6E0030, param->reg_EMRS | 0x380);
1690 moutdwm(ast, 0x1E6E0028, 0x00000003); 1569 ast_moutdwm(ast, 0x1E6E0028, 0x00000003);
1691 moutdwm(ast, 0x1E6E0030, param->reg_EMRS); 1570 ast_moutdwm(ast, 0x1E6E0030, param->reg_EMRS);
1692 moutdwm(ast, 0x1E6E0028, 0x00000003); 1571 ast_moutdwm(ast, 0x1E6E0028, 0x00000003);
1693 1572
1694 moutdwm(ast, 0x1E6E000C, 0x7FFF5C01); 1573 ast_moutdwm(ast, 0x1E6E000C, 0x7FFF5C01);
1695 data = 0; 1574 data = 0;
1696 if (param->wodt) { 1575 if (param->wodt) {
1697 data = 0x500; 1576 data = 0x500;
@@ -1699,30 +1578,23 @@ static void ddr2_init(struct ast_private *ast, struct ast2300_dram_param *param)
1699 if (param->rodt) { 1578 if (param->rodt) {
1700 data = data | 0x3000 | ((param->reg_AC2 & 0x60000) >> 3); 1579 data = data | 0x3000 | ((param->reg_AC2 & 0x60000) >> 3);
1701 } 1580 }
1702 moutdwm(ast, 0x1E6E0034, data | 0x3); 1581 ast_moutdwm(ast, 0x1E6E0034, data | 0x3);
1703 moutdwm(ast, 0x1E6E0120, param->reg_FREQ); 1582 ast_moutdwm(ast, 0x1E6E0120, param->reg_FREQ);
1704 1583
1705 /* Wait DQI delay lock */
1706 do {
1707 data = mindwm(ast, 0x1E6E0080);
1708 } while (!(data & 0x40000000));
1709 /* Wait DQSI delay lock */
1710 do {
1711 data = mindwm(ast, 0x1E6E0020);
1712 } while (!(data & 0x00000800));
1713 /* Calibrate the DQSI delay */ 1584 /* Calibrate the DQSI delay */
1714 cbr_dll2(ast, param); 1585 if ((cbr_dll2(ast, param) == false) && (retry++ < 10))
1586 goto ddr2_init_start;
1715 1587
1716 /* ECC Memory Initialization */ 1588 /* ECC Memory Initialization */
1717#ifdef ECC 1589#ifdef ECC
1718 moutdwm(ast, 0x1E6E007C, 0x00000000); 1590 ast_moutdwm(ast, 0x1E6E007C, 0x00000000);
1719 moutdwm(ast, 0x1E6E0070, 0x221); 1591 ast_moutdwm(ast, 0x1E6E0070, 0x221);
1720 do { 1592 do {
1721 data = mindwm(ast, 0x1E6E0070); 1593 data = ast_mindwm(ast, 0x1E6E0070);
1722 } while (!(data & 0x00001000)); 1594 } while (!(data & 0x00001000));
1723 moutdwm(ast, 0x1E6E0070, 0x00000000); 1595 ast_moutdwm(ast, 0x1E6E0070, 0x00000000);
1724 moutdwm(ast, 0x1E6E0050, 0x80000000); 1596 ast_moutdwm(ast, 0x1E6E0050, 0x80000000);
1725 moutdwm(ast, 0x1E6E0050, 0x00000000); 1597 ast_moutdwm(ast, 0x1E6E0050, 0x00000000);
1726#endif 1598#endif
1727 1599
1728} 1600}
@@ -1768,8 +1640,8 @@ static void ast_init_dram_2300(struct drm_device *dev)
1768 ddr2_init(ast, &param); 1640 ddr2_init(ast, &param);
1769 } 1641 }
1770 1642
1771 temp = mindwm(ast, 0x1e6e2040); 1643 temp = ast_mindwm(ast, 0x1e6e2040);
1772 moutdwm(ast, 0x1e6e2040, temp | 0x40); 1644 ast_moutdwm(ast, 0x1e6e2040, temp | 0x40);
1773 } 1645 }
1774 1646
1775 /* wait ready */ 1647 /* wait ready */
diff --git a/drivers/gpu/drm/ast/ast_tables.h b/drivers/gpu/drm/ast/ast_tables.h
index 95fa6aba26bc..4c761dcea972 100644
--- a/drivers/gpu/drm/ast/ast_tables.h
+++ b/drivers/gpu/drm/ast/ast_tables.h
@@ -42,7 +42,7 @@
42#define HBorder 0x00000020 42#define HBorder 0x00000020
43#define VBorder 0x00000010 43#define VBorder 0x00000010
44#define WideScreenMode 0x00000100 44#define WideScreenMode 0x00000100
45 45#define NewModeInfo 0x00000200
46 46
47/* DCLK Index */ 47/* DCLK Index */
48#define VCLK25_175 0x00 48#define VCLK25_175 0x00
@@ -67,6 +67,11 @@
67#define VCLK106_5 0x12 67#define VCLK106_5 0x12
68#define VCLK146_25 0x13 68#define VCLK146_25 0x13
69#define VCLK148_5 0x14 69#define VCLK148_5 0x14
70#define VCLK71 0x15
71#define VCLK88_75 0x16
72#define VCLK119 0x17
73#define VCLK85_5 0x18
74#define VCLK97_75 0x19
70 75
71static struct ast_vbios_dclk_info dclk_table[] = { 76static struct ast_vbios_dclk_info dclk_table[] = {
72 {0x2C, 0xE7, 0x03}, /* 00: VCLK25_175 */ 77 {0x2C, 0xE7, 0x03}, /* 00: VCLK25_175 */
@@ -90,6 +95,10 @@ static struct ast_vbios_dclk_info dclk_table[] = {
90 {0x28, 0x49, 0x80}, /* 12: VCLK106.5 */ 95 {0x28, 0x49, 0x80}, /* 12: VCLK106.5 */
91 {0x37, 0x49, 0x80}, /* 13: VCLK146.25 */ 96 {0x37, 0x49, 0x80}, /* 13: VCLK146.25 */
92 {0x1f, 0x45, 0x80}, /* 14: VCLK148.5 */ 97 {0x1f, 0x45, 0x80}, /* 14: VCLK148.5 */
98 {0x47, 0x6c, 0x80}, /* 15: VCLK71 */
99 {0x25, 0x65, 0x80}, /* 16: VCLK88.75 */
100 {0x77, 0x58, 0x80}, /* 17: VCLK119 */
101 {0x32, 0x67, 0x80}, /* 18: VCLK85_5 */
93}; 102};
94 103
95static struct ast_vbios_stdtable vbios_stdtable[] = { 104static struct ast_vbios_stdtable vbios_stdtable[] = {
@@ -225,41 +234,63 @@ static struct ast_vbios_enhtable res_1600x1200[] = {
225 (SyncPP | Charx8Dot), 0xFF, 1, 0x33 }, 234 (SyncPP | Charx8Dot), 0xFF, 1, 0x33 },
226}; 235};
227 236
228static struct ast_vbios_enhtable res_1920x1200[] = { 237/* 16:9 */
229 {2080, 1920, 48, 32, 1235, 1200, 3, 6, VCLK154, /* 60Hz */ 238static struct ast_vbios_enhtable res_1360x768[] = {
230 (SyncNP | Charx8Dot), 60, 1, 0x34 }, 239 {1792, 1360, 64,112, 795, 768, 3, 6, VCLK85_5, /* 60Hz */
231 {2080, 1920, 48, 32, 1235, 1200, 3, 6, VCLK154, /* 60Hz */ 240 (SyncPP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 1, 0x39 },
232 (SyncNP | Charx8Dot), 0xFF, 1, 0x34 }, 241 {1792, 1360, 64,112, 795, 768, 3, 6, VCLK85_5, /* end */
242 (SyncPP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 0xFF, 1, 0x39 },
243};
244
245static struct ast_vbios_enhtable res_1600x900[] = {
246 {1760, 1600, 48, 32, 926, 900, 3, 5, VCLK97_75, /* 60Hz CVT RB */
247 (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 1, 0x3A },
248 {1760, 1600, 48, 32, 926, 900, 3, 5, VCLK97_75, /* end */
249 (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 0xFF, 1, 0x3A }
233}; 250};
234 251
252static struct ast_vbios_enhtable res_1920x1080[] = {
253 {2200, 1920, 88, 44, 1125, 1080, 4, 5, VCLK148_5, /* 60Hz */
254 (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 1, 0x38 },
255 {2200, 1920, 88, 44, 1125, 1080, 4, 5, VCLK148_5, /* 60Hz */
256 (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 0xFF, 1, 0x38 },
257};
258
259
235/* 16:10 */ 260/* 16:10 */
236static struct ast_vbios_enhtable res_1280x800[] = { 261static struct ast_vbios_enhtable res_1280x800[] = {
262 {1440, 1280, 48, 32, 823, 800, 3, 6, VCLK71, /* 60Hz RB */
263 (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 1, 35 },
237 {1680, 1280, 72,128, 831, 800, 3, 6, VCLK83_5, /* 60Hz */ 264 {1680, 1280, 72,128, 831, 800, 3, 6, VCLK83_5, /* 60Hz */
238 (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode), 60, 1, 0x35 }, 265 (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 1, 0x35 },
239 {1680, 1280, 72,128, 831, 800, 3, 6, VCLK83_5, /* 60Hz */ 266 {1680, 1280, 72,128, 831, 800, 3, 6, VCLK83_5, /* 60Hz */
240 (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode), 0xFF, 1, 0x35 }, 267 (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 0xFF, 1, 0x35 },
241 268
242}; 269};
243 270
244static struct ast_vbios_enhtable res_1440x900[] = { 271static struct ast_vbios_enhtable res_1440x900[] = {
272 {1600, 1440, 48, 32, 926, 900, 3, 6, VCLK88_75, /* 60Hz RB */
273 (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 1, 0x36 },
245 {1904, 1440, 80,152, 934, 900, 3, 6, VCLK106_5, /* 60Hz */ 274 {1904, 1440, 80,152, 934, 900, 3, 6, VCLK106_5, /* 60Hz */
246 (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode), 60, 1, 0x36 }, 275 (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 1, 0x36 },
247 {1904, 1440, 80,152, 934, 900, 3, 6, VCLK106_5, /* 60Hz */ 276 {1904, 1440, 80,152, 934, 900, 3, 6, VCLK106_5, /* 60Hz */
248 (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode), 0xFF, 1, 0x36 }, 277 (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 0xFF, 1, 0x36 },
249}; 278};
250 279
251static struct ast_vbios_enhtable res_1680x1050[] = { 280static struct ast_vbios_enhtable res_1680x1050[] = {
281 {1840, 1680, 48, 32, 1080, 1050, 3, 6, VCLK119, /* 60Hz RB */
282 (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 1, 0x37 },
252 {2240, 1680,104,176, 1089, 1050, 3, 6, VCLK146_25, /* 60Hz */ 283 {2240, 1680,104,176, 1089, 1050, 3, 6, VCLK146_25, /* 60Hz */
253 (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode), 60, 1, 0x37 }, 284 (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 1, 0x37 },
254 {2240, 1680,104,176, 1089, 1050, 3, 6, VCLK146_25, /* 60Hz */ 285 {2240, 1680,104,176, 1089, 1050, 3, 6, VCLK146_25, /* 60Hz */
255 (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode), 0xFF, 1, 0x37 }, 286 (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 0xFF, 1, 0x37 },
256}; 287};
257 288
258/* HDTV */ 289static struct ast_vbios_enhtable res_1920x1200[] = {
259static struct ast_vbios_enhtable res_1920x1080[] = { 290 {2080, 1920, 48, 32, 1235, 1200, 3, 6, VCLK154, /* 60Hz */
260 {2200, 1920, 88, 44, 1125, 1080, 4, 5, VCLK148_5, /* 60Hz */ 291 (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 1, 0x34 },
261 (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode), 60, 1, 0x38 }, 292 {2080, 1920, 48, 32, 1235, 1200, 3, 6, VCLK154, /* 60Hz */
262 {2200, 1920, 88, 44, 1125, 1080, 4, 5, VCLK148_5, /* 60Hz */ 293 (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 0xFF, 1, 0x34 },
263 (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode), 0xFF, 1, 0x38 },
264}; 294};
295
265#endif 296#endif
diff --git a/drivers/gpu/drm/bochs/bochs_mm.c b/drivers/gpu/drm/bochs/bochs_mm.c
index f488be55d650..b9a695d92792 100644
--- a/drivers/gpu/drm/bochs/bochs_mm.c
+++ b/drivers/gpu/drm/bochs/bochs_mm.c
@@ -434,17 +434,13 @@ static void bochs_bo_unref(struct bochs_bo **bo)
434 434
435 tbo = &((*bo)->bo); 435 tbo = &((*bo)->bo);
436 ttm_bo_unref(&tbo); 436 ttm_bo_unref(&tbo);
437 if (tbo == NULL) 437 *bo = NULL;
438 *bo = NULL;
439
440} 438}
441 439
442void bochs_gem_free_object(struct drm_gem_object *obj) 440void bochs_gem_free_object(struct drm_gem_object *obj)
443{ 441{
444 struct bochs_bo *bochs_bo = gem_to_bochs_bo(obj); 442 struct bochs_bo *bochs_bo = gem_to_bochs_bo(obj);
445 443
446 if (!bochs_bo)
447 return;
448 bochs_bo_unref(&bochs_bo); 444 bochs_bo_unref(&bochs_bo);
449} 445}
450 446
diff --git a/drivers/gpu/drm/bridge/ptn3460.c b/drivers/gpu/drm/bridge/ptn3460.c
index b171901a3553..98fd17ae4916 100644
--- a/drivers/gpu/drm/bridge/ptn3460.c
+++ b/drivers/gpu/drm/bridge/ptn3460.c
@@ -225,12 +225,6 @@ out:
225 return num_modes; 225 return num_modes;
226} 226}
227 227
228static int ptn3460_mode_valid(struct drm_connector *connector,
229 struct drm_display_mode *mode)
230{
231 return MODE_OK;
232}
233
234struct drm_encoder *ptn3460_best_encoder(struct drm_connector *connector) 228struct drm_encoder *ptn3460_best_encoder(struct drm_connector *connector)
235{ 229{
236 struct ptn3460_bridge *ptn_bridge; 230 struct ptn3460_bridge *ptn_bridge;
@@ -242,7 +236,6 @@ struct drm_encoder *ptn3460_best_encoder(struct drm_connector *connector)
242 236
243struct drm_connector_helper_funcs ptn3460_connector_helper_funcs = { 237struct drm_connector_helper_funcs ptn3460_connector_helper_funcs = {
244 .get_modes = ptn3460_get_modes, 238 .get_modes = ptn3460_get_modes,
245 .mode_valid = ptn3460_mode_valid,
246 .best_encoder = ptn3460_best_encoder, 239 .best_encoder = ptn3460_best_encoder,
247}; 240};
248 241
diff --git a/drivers/gpu/drm/cirrus/cirrus_main.c b/drivers/gpu/drm/cirrus/cirrus_main.c
index 4b0170cf53fd..99c1983f99d2 100644
--- a/drivers/gpu/drm/cirrus/cirrus_main.c
+++ b/drivers/gpu/drm/cirrus/cirrus_main.c
@@ -264,17 +264,13 @@ static void cirrus_bo_unref(struct cirrus_bo **bo)
264 264
265 tbo = &((*bo)->bo); 265 tbo = &((*bo)->bo);
266 ttm_bo_unref(&tbo); 266 ttm_bo_unref(&tbo);
267 if (tbo == NULL) 267 *bo = NULL;
268 *bo = NULL;
269
270} 268}
271 269
272void cirrus_gem_free_object(struct drm_gem_object *obj) 270void cirrus_gem_free_object(struct drm_gem_object *obj)
273{ 271{
274 struct cirrus_bo *cirrus_bo = gem_to_cirrus_bo(obj); 272 struct cirrus_bo *cirrus_bo = gem_to_cirrus_bo(obj);
275 273
276 if (!cirrus_bo)
277 return;
278 cirrus_bo_unref(&cirrus_bo); 274 cirrus_bo_unref(&cirrus_bo);
279} 275}
280 276
diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c
index f59433b7610c..49332c5fe35b 100644
--- a/drivers/gpu/drm/cirrus/cirrus_mode.c
+++ b/drivers/gpu/drm/cirrus/cirrus_mode.c
@@ -505,13 +505,6 @@ static int cirrus_vga_get_modes(struct drm_connector *connector)
505 return count; 505 return count;
506} 506}
507 507
508static int cirrus_vga_mode_valid(struct drm_connector *connector,
509 struct drm_display_mode *mode)
510{
511 /* Any mode we've added is valid */
512 return MODE_OK;
513}
514
515static struct drm_encoder *cirrus_connector_best_encoder(struct drm_connector 508static struct drm_encoder *cirrus_connector_best_encoder(struct drm_connector
516 *connector) 509 *connector)
517{ 510{
@@ -546,7 +539,6 @@ static void cirrus_connector_destroy(struct drm_connector *connector)
546 539
547struct drm_connector_helper_funcs cirrus_vga_connector_helper_funcs = { 540struct drm_connector_helper_funcs cirrus_vga_connector_helper_funcs = {
548 .get_modes = cirrus_vga_get_modes, 541 .get_modes = cirrus_vga_get_modes,
549 .mode_valid = cirrus_vga_mode_valid,
550 .best_encoder = cirrus_connector_best_encoder, 542 .best_encoder = cirrus_connector_best_encoder,
551}; 543};
552 544
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index edec31fe3fed..68175b54504b 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -363,7 +363,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
363 list->master = dev->primary->master; 363 list->master = dev->primary->master;
364 *maplist = list; 364 *maplist = list;
365 return 0; 365 return 0;
366 } 366}
367 367
368int drm_addmap(struct drm_device * dev, resource_size_t offset, 368int drm_addmap(struct drm_device * dev, resource_size_t offset,
369 unsigned int size, enum drm_map_type type, 369 unsigned int size, enum drm_map_type type,
@@ -656,13 +656,13 @@ int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
656 DRM_DEBUG("zone invalid\n"); 656 DRM_DEBUG("zone invalid\n");
657 return -EINVAL; 657 return -EINVAL;
658 } 658 }
659 spin_lock(&dev->count_lock); 659 spin_lock(&dev->buf_lock);
660 if (dev->buf_use) { 660 if (dev->buf_use) {
661 spin_unlock(&dev->count_lock); 661 spin_unlock(&dev->buf_lock);
662 return -EBUSY; 662 return -EBUSY;
663 } 663 }
664 atomic_inc(&dev->buf_alloc); 664 atomic_inc(&dev->buf_alloc);
665 spin_unlock(&dev->count_lock); 665 spin_unlock(&dev->buf_lock);
666 666
667 mutex_lock(&dev->struct_mutex); 667 mutex_lock(&dev->struct_mutex);
668 entry = &dma->bufs[order]; 668 entry = &dma->bufs[order];
@@ -805,13 +805,13 @@ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
805 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; 805 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
806 total = PAGE_SIZE << page_order; 806 total = PAGE_SIZE << page_order;
807 807
808 spin_lock(&dev->count_lock); 808 spin_lock(&dev->buf_lock);
809 if (dev->buf_use) { 809 if (dev->buf_use) {
810 spin_unlock(&dev->count_lock); 810 spin_unlock(&dev->buf_lock);
811 return -EBUSY; 811 return -EBUSY;
812 } 812 }
813 atomic_inc(&dev->buf_alloc); 813 atomic_inc(&dev->buf_alloc);
814 spin_unlock(&dev->count_lock); 814 spin_unlock(&dev->buf_lock);
815 815
816 mutex_lock(&dev->struct_mutex); 816 mutex_lock(&dev->struct_mutex);
817 entry = &dma->bufs[order]; 817 entry = &dma->bufs[order];
@@ -1015,13 +1015,13 @@ static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request
1015 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) 1015 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1016 return -EINVAL; 1016 return -EINVAL;
1017 1017
1018 spin_lock(&dev->count_lock); 1018 spin_lock(&dev->buf_lock);
1019 if (dev->buf_use) { 1019 if (dev->buf_use) {
1020 spin_unlock(&dev->count_lock); 1020 spin_unlock(&dev->buf_lock);
1021 return -EBUSY; 1021 return -EBUSY;
1022 } 1022 }
1023 atomic_inc(&dev->buf_alloc); 1023 atomic_inc(&dev->buf_alloc);
1024 spin_unlock(&dev->count_lock); 1024 spin_unlock(&dev->buf_lock);
1025 1025
1026 mutex_lock(&dev->struct_mutex); 1026 mutex_lock(&dev->struct_mutex);
1027 entry = &dma->bufs[order]; 1027 entry = &dma->bufs[order];
@@ -1175,7 +1175,7 @@ int drm_addbufs(struct drm_device *dev, void *data,
1175 * \param arg pointer to a drm_buf_info structure. 1175 * \param arg pointer to a drm_buf_info structure.
1176 * \return zero on success or a negative number on failure. 1176 * \return zero on success or a negative number on failure.
1177 * 1177 *
1178 * Increments drm_device::buf_use while holding the drm_device::count_lock 1178 * Increments drm_device::buf_use while holding the drm_device::buf_lock
1179 * lock, preventing of allocating more buffers after this call. Information 1179 * lock, preventing of allocating more buffers after this call. Information
1180 * about each requested buffer is then copied into user space. 1180 * about each requested buffer is then copied into user space.
1181 */ 1181 */
@@ -1196,13 +1196,13 @@ int drm_infobufs(struct drm_device *dev, void *data,
1196 if (!dma) 1196 if (!dma)
1197 return -EINVAL; 1197 return -EINVAL;
1198 1198
1199 spin_lock(&dev->count_lock); 1199 spin_lock(&dev->buf_lock);
1200 if (atomic_read(&dev->buf_alloc)) { 1200 if (atomic_read(&dev->buf_alloc)) {
1201 spin_unlock(&dev->count_lock); 1201 spin_unlock(&dev->buf_lock);
1202 return -EBUSY; 1202 return -EBUSY;
1203 } 1203 }
1204 ++dev->buf_use; /* Can't allocate more after this call */ 1204 ++dev->buf_use; /* Can't allocate more after this call */
1205 spin_unlock(&dev->count_lock); 1205 spin_unlock(&dev->buf_lock);
1206 1206
1207 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) { 1207 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1208 if (dma->bufs[i].buf_count) 1208 if (dma->bufs[i].buf_count)
@@ -1381,13 +1381,13 @@ int drm_mapbufs(struct drm_device *dev, void *data,
1381 if (!dma) 1381 if (!dma)
1382 return -EINVAL; 1382 return -EINVAL;
1383 1383
1384 spin_lock(&dev->count_lock); 1384 spin_lock(&dev->buf_lock);
1385 if (atomic_read(&dev->buf_alloc)) { 1385 if (atomic_read(&dev->buf_alloc)) {
1386 spin_unlock(&dev->count_lock); 1386 spin_unlock(&dev->buf_lock);
1387 return -EBUSY; 1387 return -EBUSY;
1388 } 1388 }
1389 dev->buf_use++; /* Can't allocate more after this call */ 1389 dev->buf_use++; /* Can't allocate more after this call */
1390 spin_unlock(&dev->count_lock); 1390 spin_unlock(&dev->buf_lock);
1391 1391
1392 if (request->count >= dma->buf_count) { 1392 if (request->count >= dma->buf_count) {
1393 if ((dev->agp && (dma->flags & _DRM_DMA_USE_AGP)) 1393 if ((dev->agp && (dma->flags & _DRM_DMA_USE_AGP))
diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c
index 534cb89b160d..a6b690626a6b 100644
--- a/drivers/gpu/drm/drm_cache.c
+++ b/drivers/gpu/drm/drm_cache.c
@@ -131,14 +131,14 @@ drm_clflush_sg(struct sg_table *st)
131EXPORT_SYMBOL(drm_clflush_sg); 131EXPORT_SYMBOL(drm_clflush_sg);
132 132
133void 133void
134drm_clflush_virt_range(char *addr, unsigned long length) 134drm_clflush_virt_range(void *addr, unsigned long length)
135{ 135{
136#if defined(CONFIG_X86) 136#if defined(CONFIG_X86)
137 if (cpu_has_clflush) { 137 if (cpu_has_clflush) {
138 char *end = addr + length; 138 void *end = addr + length;
139 mb(); 139 mb();
140 for (; addr < end; addr += boot_cpu_data.x86_clflush_size) 140 for (; addr < end; addr += boot_cpu_data.x86_clflush_size)
141 clflush(addr); 141 clflushopt(addr);
142 clflushopt(end - 1); 142 clflushopt(end - 1);
143 mb(); 143 mb();
144 return; 144 return;
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index d8b7099abece..fe94cc10cd35 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -37,6 +37,7 @@
37#include <drm/drm_crtc.h> 37#include <drm/drm_crtc.h>
38#include <drm/drm_edid.h> 38#include <drm/drm_edid.h>
39#include <drm/drm_fourcc.h> 39#include <drm/drm_fourcc.h>
40#include <drm/drm_modeset_lock.h>
40 41
41#include "drm_crtc_internal.h" 42#include "drm_crtc_internal.h"
42 43
@@ -50,12 +51,42 @@
50 */ 51 */
51void drm_modeset_lock_all(struct drm_device *dev) 52void drm_modeset_lock_all(struct drm_device *dev)
52{ 53{
53 struct drm_crtc *crtc; 54 struct drm_mode_config *config = &dev->mode_config;
55 struct drm_modeset_acquire_ctx *ctx;
56 int ret;
54 57
55 mutex_lock(&dev->mode_config.mutex); 58 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
59 if (WARN_ON(!ctx))
60 return;
56 61
57 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) 62 mutex_lock(&config->mutex);
58 mutex_lock_nest_lock(&crtc->mutex, &dev->mode_config.mutex); 63
64 drm_modeset_acquire_init(ctx, 0);
65
66retry:
67 ret = drm_modeset_lock(&config->connection_mutex, ctx);
68 if (ret)
69 goto fail;
70 ret = drm_modeset_lock_all_crtcs(dev, ctx);
71 if (ret)
72 goto fail;
73
74 WARN_ON(config->acquire_ctx);
75
76 /* now we hold the locks, so now that it is safe, stash the
77 * ctx for drm_modeset_unlock_all():
78 */
79 config->acquire_ctx = ctx;
80
81 drm_warn_on_modeset_not_all_locked(dev);
82
83 return;
84
85fail:
86 if (ret == -EDEADLK) {
87 drm_modeset_backoff(ctx);
88 goto retry;
89 }
59} 90}
60EXPORT_SYMBOL(drm_modeset_lock_all); 91EXPORT_SYMBOL(drm_modeset_lock_all);
61 92
@@ -67,10 +98,17 @@ EXPORT_SYMBOL(drm_modeset_lock_all);
67 */ 98 */
68void drm_modeset_unlock_all(struct drm_device *dev) 99void drm_modeset_unlock_all(struct drm_device *dev)
69{ 100{
70 struct drm_crtc *crtc; 101 struct drm_mode_config *config = &dev->mode_config;
102 struct drm_modeset_acquire_ctx *ctx = config->acquire_ctx;
71 103
72 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) 104 if (WARN_ON(!ctx))
73 mutex_unlock(&crtc->mutex); 105 return;
106
107 config->acquire_ctx = NULL;
108 drm_modeset_drop_locks(ctx);
109 drm_modeset_acquire_fini(ctx);
110
111 kfree(ctx);
74 112
75 mutex_unlock(&dev->mode_config.mutex); 113 mutex_unlock(&dev->mode_config.mutex);
76} 114}
@@ -91,8 +129,9 @@ void drm_warn_on_modeset_not_all_locked(struct drm_device *dev)
91 return; 129 return;
92 130
93 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) 131 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
94 WARN_ON(!mutex_is_locked(&crtc->mutex)); 132 WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
95 133
134 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
96 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); 135 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
97} 136}
98EXPORT_SYMBOL(drm_warn_on_modeset_not_all_locked); 137EXPORT_SYMBOL(drm_warn_on_modeset_not_all_locked);
@@ -227,6 +266,7 @@ static const struct drm_prop_enum_list drm_encoder_enum_list[] =
227 { DRM_MODE_ENCODER_TVDAC, "TV" }, 266 { DRM_MODE_ENCODER_TVDAC, "TV" },
228 { DRM_MODE_ENCODER_VIRTUAL, "Virtual" }, 267 { DRM_MODE_ENCODER_VIRTUAL, "Virtual" },
229 { DRM_MODE_ENCODER_DSI, "DSI" }, 268 { DRM_MODE_ENCODER_DSI, "DSI" },
269 { DRM_MODE_ENCODER_DPMST, "DP MST" },
230}; 270};
231 271
232static const struct drm_prop_enum_list drm_subpixel_enum_list[] = 272static const struct drm_prop_enum_list drm_subpixel_enum_list[] =
@@ -256,46 +296,6 @@ void drm_connector_ida_destroy(void)
256} 296}
257 297
258/** 298/**
259 * drm_get_encoder_name - return a string for encoder
260 * @encoder: encoder to compute name of
261 *
262 * Note that the buffer used by this function is globally shared and owned by
263 * the function itself.
264 *
265 * FIXME: This isn't really multithreading safe.
266 */
267const char *drm_get_encoder_name(const struct drm_encoder *encoder)
268{
269 static char buf[32];
270
271 snprintf(buf, 32, "%s-%d",
272 drm_encoder_enum_list[encoder->encoder_type].name,
273 encoder->base.id);
274 return buf;
275}
276EXPORT_SYMBOL(drm_get_encoder_name);
277
278/**
279 * drm_get_connector_name - return a string for connector
280 * @connector: connector to compute name of
281 *
282 * Note that the buffer used by this function is globally shared and owned by
283 * the function itself.
284 *
285 * FIXME: This isn't really multithreading safe.
286 */
287const char *drm_get_connector_name(const struct drm_connector *connector)
288{
289 static char buf[32];
290
291 snprintf(buf, 32, "%s-%d",
292 drm_connector_enum_list[connector->connector_type].name,
293 connector->connector_type_id);
294 return buf;
295}
296EXPORT_SYMBOL(drm_get_connector_name);
297
298/**
299 * drm_get_connector_status_name - return a string for connector status 299 * drm_get_connector_status_name - return a string for connector status
300 * @status: connector status to compute name of 300 * @status: connector status to compute name of
301 * 301 *
@@ -409,6 +409,21 @@ void drm_mode_object_put(struct drm_device *dev,
409 mutex_unlock(&dev->mode_config.idr_mutex); 409 mutex_unlock(&dev->mode_config.idr_mutex);
410} 410}
411 411
412static struct drm_mode_object *_object_find(struct drm_device *dev,
413 uint32_t id, uint32_t type)
414{
415 struct drm_mode_object *obj = NULL;
416
417 mutex_lock(&dev->mode_config.idr_mutex);
418 obj = idr_find(&dev->mode_config.crtc_idr, id);
419 if (!obj || (type != DRM_MODE_OBJECT_ANY && obj->type != type) ||
420 (obj->id != id))
421 obj = NULL;
422 mutex_unlock(&dev->mode_config.idr_mutex);
423
424 return obj;
425}
426
412/** 427/**
413 * drm_mode_object_find - look up a drm object with static lifetime 428 * drm_mode_object_find - look up a drm object with static lifetime
414 * @dev: drm device 429 * @dev: drm device
@@ -416,7 +431,9 @@ void drm_mode_object_put(struct drm_device *dev,
416 * @type: type of the mode object 431 * @type: type of the mode object
417 * 432 *
418 * Note that framebuffers cannot be looked up with this functions - since those 433 * Note that framebuffers cannot be looked up with this functions - since those
419 * are reference counted, they need special treatment. 434 * are reference counted, they need special treatment. Even with
435 * DRM_MODE_OBJECT_ANY (although that will simply return NULL
436 * rather than WARN_ON()).
420 */ 437 */
421struct drm_mode_object *drm_mode_object_find(struct drm_device *dev, 438struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
422 uint32_t id, uint32_t type) 439 uint32_t id, uint32_t type)
@@ -426,13 +443,10 @@ struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
426 /* Framebuffers are reference counted and need their own lookup 443 /* Framebuffers are reference counted and need their own lookup
427 * function.*/ 444 * function.*/
428 WARN_ON(type == DRM_MODE_OBJECT_FB); 445 WARN_ON(type == DRM_MODE_OBJECT_FB);
429 446 obj = _object_find(dev, id, type);
430 mutex_lock(&dev->mode_config.idr_mutex); 447 /* don't leak out unref'd fb's */
431 obj = idr_find(&dev->mode_config.crtc_idr, id); 448 if (obj && (obj->type == DRM_MODE_OBJECT_FB))
432 if (!obj || (obj->type != type) || (obj->id != id))
433 obj = NULL; 449 obj = NULL;
434 mutex_unlock(&dev->mode_config.idr_mutex);
435
436 return obj; 450 return obj;
437} 451}
438EXPORT_SYMBOL(drm_mode_object_find); 452EXPORT_SYMBOL(drm_mode_object_find);
@@ -538,7 +552,7 @@ EXPORT_SYMBOL(drm_framebuffer_lookup);
538 */ 552 */
539void drm_framebuffer_unreference(struct drm_framebuffer *fb) 553void drm_framebuffer_unreference(struct drm_framebuffer *fb)
540{ 554{
541 DRM_DEBUG("FB ID: %d\n", fb->base.id); 555 DRM_DEBUG("%p: FB ID: %d (%d)\n", fb, fb->base.id, atomic_read(&fb->refcount.refcount));
542 kref_put(&fb->refcount, drm_framebuffer_free); 556 kref_put(&fb->refcount, drm_framebuffer_free);
543} 557}
544EXPORT_SYMBOL(drm_framebuffer_unreference); 558EXPORT_SYMBOL(drm_framebuffer_unreference);
@@ -551,7 +565,7 @@ EXPORT_SYMBOL(drm_framebuffer_unreference);
551 */ 565 */
552void drm_framebuffer_reference(struct drm_framebuffer *fb) 566void drm_framebuffer_reference(struct drm_framebuffer *fb)
553{ 567{
554 DRM_DEBUG("FB ID: %d\n", fb->base.id); 568 DRM_DEBUG("%p: FB ID: %d (%d)\n", fb, fb->base.id, atomic_read(&fb->refcount.refcount));
555 kref_get(&fb->refcount); 569 kref_get(&fb->refcount);
556} 570}
557EXPORT_SYMBOL(drm_framebuffer_reference); 571EXPORT_SYMBOL(drm_framebuffer_reference);
@@ -563,7 +577,7 @@ static void drm_framebuffer_free_bug(struct kref *kref)
563 577
564static void __drm_framebuffer_unreference(struct drm_framebuffer *fb) 578static void __drm_framebuffer_unreference(struct drm_framebuffer *fb)
565{ 579{
566 DRM_DEBUG("FB ID: %d\n", fb->base.id); 580 DRM_DEBUG("%p: FB ID: %d (%d)\n", fb, fb->base.id, atomic_read(&fb->refcount.refcount));
567 kref_put(&fb->refcount, drm_framebuffer_free_bug); 581 kref_put(&fb->refcount, drm_framebuffer_free_bug);
568} 582}
569 583
@@ -691,6 +705,8 @@ void drm_framebuffer_remove(struct drm_framebuffer *fb)
691} 705}
692EXPORT_SYMBOL(drm_framebuffer_remove); 706EXPORT_SYMBOL(drm_framebuffer_remove);
693 707
708DEFINE_WW_CLASS(crtc_ww_class);
709
694/** 710/**
695 * drm_crtc_init_with_planes - Initialise a new CRTC object with 711 * drm_crtc_init_with_planes - Initialise a new CRTC object with
696 * specified primary and cursor planes. 712 * specified primary and cursor planes.
@@ -710,6 +726,7 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
710 void *cursor, 726 void *cursor,
711 const struct drm_crtc_funcs *funcs) 727 const struct drm_crtc_funcs *funcs)
712{ 728{
729 struct drm_mode_config *config = &dev->mode_config;
713 int ret; 730 int ret;
714 731
715 crtc->dev = dev; 732 crtc->dev = dev;
@@ -717,8 +734,9 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
717 crtc->invert_dimensions = false; 734 crtc->invert_dimensions = false;
718 735
719 drm_modeset_lock_all(dev); 736 drm_modeset_lock_all(dev);
720 mutex_init(&crtc->mutex); 737 drm_modeset_lock_init(&crtc->mutex);
721 mutex_lock_nest_lock(&crtc->mutex, &dev->mode_config.mutex); 738 /* dropped by _unlock_all(): */
739 drm_modeset_lock(&crtc->mutex, config->acquire_ctx);
722 740
723 ret = drm_mode_object_get(dev, &crtc->base, DRM_MODE_OBJECT_CRTC); 741 ret = drm_mode_object_get(dev, &crtc->base, DRM_MODE_OBJECT_CRTC);
724 if (ret) 742 if (ret)
@@ -726,8 +744,8 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
726 744
727 crtc->base.properties = &crtc->properties; 745 crtc->base.properties = &crtc->properties;
728 746
729 list_add_tail(&crtc->head, &dev->mode_config.crtc_list); 747 list_add_tail(&crtc->head, &config->crtc_list);
730 dev->mode_config.num_crtc++; 748 config->num_crtc++;
731 749
732 crtc->primary = primary; 750 crtc->primary = primary;
733 if (primary) 751 if (primary)
@@ -755,6 +773,8 @@ void drm_crtc_cleanup(struct drm_crtc *crtc)
755 kfree(crtc->gamma_store); 773 kfree(crtc->gamma_store);
756 crtc->gamma_store = NULL; 774 crtc->gamma_store = NULL;
757 775
776 drm_modeset_lock_fini(&crtc->mutex);
777
758 drm_mode_object_put(dev, &crtc->base); 778 drm_mode_object_put(dev, &crtc->base);
759 list_del(&crtc->head); 779 list_del(&crtc->head);
760 dev->mode_config.num_crtc--; 780 dev->mode_config.num_crtc--;
@@ -824,7 +844,7 @@ int drm_connector_init(struct drm_device *dev,
824 844
825 ret = drm_mode_object_get(dev, &connector->base, DRM_MODE_OBJECT_CONNECTOR); 845 ret = drm_mode_object_get(dev, &connector->base, DRM_MODE_OBJECT_CONNECTOR);
826 if (ret) 846 if (ret)
827 goto out; 847 goto out_unlock;
828 848
829 connector->base.properties = &connector->properties; 849 connector->base.properties = &connector->properties;
830 connector->dev = dev; 850 connector->dev = dev;
@@ -834,9 +854,17 @@ int drm_connector_init(struct drm_device *dev,
834 ida_simple_get(connector_ida, 1, 0, GFP_KERNEL); 854 ida_simple_get(connector_ida, 1, 0, GFP_KERNEL);
835 if (connector->connector_type_id < 0) { 855 if (connector->connector_type_id < 0) {
836 ret = connector->connector_type_id; 856 ret = connector->connector_type_id;
837 drm_mode_object_put(dev, &connector->base); 857 goto out_put;
838 goto out; 858 }
859 connector->name =
860 kasprintf(GFP_KERNEL, "%s-%d",
861 drm_connector_enum_list[connector_type].name,
862 connector->connector_type_id);
863 if (!connector->name) {
864 ret = -ENOMEM;
865 goto out_put;
839 } 866 }
867
840 INIT_LIST_HEAD(&connector->probed_modes); 868 INIT_LIST_HEAD(&connector->probed_modes);
841 INIT_LIST_HEAD(&connector->modes); 869 INIT_LIST_HEAD(&connector->modes);
842 connector->edid_blob_ptr = NULL; 870 connector->edid_blob_ptr = NULL;
@@ -853,7 +881,11 @@ int drm_connector_init(struct drm_device *dev,
853 drm_object_attach_property(&connector->base, 881 drm_object_attach_property(&connector->base,
854 dev->mode_config.dpms_property, 0); 882 dev->mode_config.dpms_property, 0);
855 883
856 out: 884out_put:
885 if (ret)
886 drm_mode_object_put(dev, &connector->base);
887
888out_unlock:
857 drm_modeset_unlock_all(dev); 889 drm_modeset_unlock_all(dev);
858 890
859 return ret; 891 return ret;
@@ -881,6 +913,8 @@ void drm_connector_cleanup(struct drm_connector *connector)
881 connector->connector_type_id); 913 connector->connector_type_id);
882 914
883 drm_mode_object_put(dev, &connector->base); 915 drm_mode_object_put(dev, &connector->base);
916 kfree(connector->name);
917 connector->name = NULL;
884 list_del(&connector->head); 918 list_del(&connector->head);
885 dev->mode_config.num_connector--; 919 dev->mode_config.num_connector--;
886} 920}
@@ -982,16 +1016,27 @@ int drm_encoder_init(struct drm_device *dev,
982 1016
983 ret = drm_mode_object_get(dev, &encoder->base, DRM_MODE_OBJECT_ENCODER); 1017 ret = drm_mode_object_get(dev, &encoder->base, DRM_MODE_OBJECT_ENCODER);
984 if (ret) 1018 if (ret)
985 goto out; 1019 goto out_unlock;
986 1020
987 encoder->dev = dev; 1021 encoder->dev = dev;
988 encoder->encoder_type = encoder_type; 1022 encoder->encoder_type = encoder_type;
989 encoder->funcs = funcs; 1023 encoder->funcs = funcs;
1024 encoder->name = kasprintf(GFP_KERNEL, "%s-%d",
1025 drm_encoder_enum_list[encoder_type].name,
1026 encoder->base.id);
1027 if (!encoder->name) {
1028 ret = -ENOMEM;
1029 goto out_put;
1030 }
990 1031
991 list_add_tail(&encoder->head, &dev->mode_config.encoder_list); 1032 list_add_tail(&encoder->head, &dev->mode_config.encoder_list);
992 dev->mode_config.num_encoder++; 1033 dev->mode_config.num_encoder++;
993 1034
994 out: 1035out_put:
1036 if (ret)
1037 drm_mode_object_put(dev, &encoder->base);
1038
1039out_unlock:
995 drm_modeset_unlock_all(dev); 1040 drm_modeset_unlock_all(dev);
996 1041
997 return ret; 1042 return ret;
@@ -1009,6 +1054,8 @@ void drm_encoder_cleanup(struct drm_encoder *encoder)
1009 struct drm_device *dev = encoder->dev; 1054 struct drm_device *dev = encoder->dev;
1010 drm_modeset_lock_all(dev); 1055 drm_modeset_lock_all(dev);
1011 drm_mode_object_put(dev, &encoder->base); 1056 drm_mode_object_put(dev, &encoder->base);
1057 kfree(encoder->name);
1058 encoder->name = NULL;
1012 list_del(&encoder->head); 1059 list_del(&encoder->head);
1013 dev->mode_config.num_encoder--; 1060 dev->mode_config.num_encoder--;
1014 drm_modeset_unlock_all(dev); 1061 drm_modeset_unlock_all(dev);
@@ -1145,16 +1192,19 @@ EXPORT_SYMBOL(drm_plane_cleanup);
1145 */ 1192 */
1146void drm_plane_force_disable(struct drm_plane *plane) 1193void drm_plane_force_disable(struct drm_plane *plane)
1147{ 1194{
1195 struct drm_framebuffer *old_fb = plane->fb;
1148 int ret; 1196 int ret;
1149 1197
1150 if (!plane->fb) 1198 if (!old_fb)
1151 return; 1199 return;
1152 1200
1153 ret = plane->funcs->disable_plane(plane); 1201 ret = plane->funcs->disable_plane(plane);
1154 if (ret) 1202 if (ret) {
1155 DRM_ERROR("failed to disable plane with busy fb\n"); 1203 DRM_ERROR("failed to disable plane with busy fb\n");
1204 return;
1205 }
1156 /* disconnect the plane from the fb and crtc: */ 1206 /* disconnect the plane from the fb and crtc: */
1157 __drm_framebuffer_unreference(plane->fb); 1207 __drm_framebuffer_unreference(old_fb);
1158 plane->fb = NULL; 1208 plane->fb = NULL;
1159 plane->crtc = NULL; 1209 plane->crtc = NULL;
1160} 1210}
@@ -1378,6 +1428,12 @@ static int drm_mode_group_init(struct drm_device *dev, struct drm_mode_group *gr
1378 return 0; 1428 return 0;
1379} 1429}
1380 1430
1431void drm_mode_group_destroy(struct drm_mode_group *group)
1432{
1433 kfree(group->id_list);
1434 group->id_list = NULL;
1435}
1436
1381/* 1437/*
1382 * NOTE: Driver's shouldn't ever call drm_mode_group_init_legacy_group - it is 1438 * NOTE: Driver's shouldn't ever call drm_mode_group_init_legacy_group - it is
1383 * the drm core's responsibility to set up mode control groups. 1439 * the drm core's responsibility to set up mode control groups.
@@ -1614,7 +1670,7 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
1614 &dev->mode_config.encoder_list, 1670 &dev->mode_config.encoder_list,
1615 head) { 1671 head) {
1616 DRM_DEBUG_KMS("[ENCODER:%d:%s]\n", encoder->base.id, 1672 DRM_DEBUG_KMS("[ENCODER:%d:%s]\n", encoder->base.id,
1617 drm_get_encoder_name(encoder)); 1673 encoder->name);
1618 if (put_user(encoder->base.id, encoder_id + 1674 if (put_user(encoder->base.id, encoder_id +
1619 copied)) { 1675 copied)) {
1620 ret = -EFAULT; 1676 ret = -EFAULT;
@@ -1646,7 +1702,7 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
1646 head) { 1702 head) {
1647 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", 1703 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
1648 connector->base.id, 1704 connector->base.id,
1649 drm_get_connector_name(connector)); 1705 connector->name);
1650 if (put_user(connector->base.id, 1706 if (put_user(connector->base.id,
1651 connector_id + copied)) { 1707 connector_id + copied)) {
1652 ret = -EFAULT; 1708 ret = -EFAULT;
@@ -1695,7 +1751,6 @@ int drm_mode_getcrtc(struct drm_device *dev,
1695{ 1751{
1696 struct drm_mode_crtc *crtc_resp = data; 1752 struct drm_mode_crtc *crtc_resp = data;
1697 struct drm_crtc *crtc; 1753 struct drm_crtc *crtc;
1698 struct drm_mode_object *obj;
1699 int ret = 0; 1754 int ret = 0;
1700 1755
1701 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 1756 if (!drm_core_check_feature(dev, DRIVER_MODESET))
@@ -1703,13 +1758,11 @@ int drm_mode_getcrtc(struct drm_device *dev,
1703 1758
1704 drm_modeset_lock_all(dev); 1759 drm_modeset_lock_all(dev);
1705 1760
1706 obj = drm_mode_object_find(dev, crtc_resp->crtc_id, 1761 crtc = drm_crtc_find(dev, crtc_resp->crtc_id);
1707 DRM_MODE_OBJECT_CRTC); 1762 if (!crtc) {
1708 if (!obj) {
1709 ret = -ENOENT; 1763 ret = -ENOENT;
1710 goto out; 1764 goto out;
1711 } 1765 }
1712 crtc = obj_to_crtc(obj);
1713 1766
1714 crtc_resp->x = crtc->x; 1767 crtc_resp->x = crtc->x;
1715 crtc_resp->y = crtc->y; 1768 crtc_resp->y = crtc->y;
@@ -1763,7 +1816,6 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
1763 struct drm_file *file_priv) 1816 struct drm_file *file_priv)
1764{ 1817{
1765 struct drm_mode_get_connector *out_resp = data; 1818 struct drm_mode_get_connector *out_resp = data;
1766 struct drm_mode_object *obj;
1767 struct drm_connector *connector; 1819 struct drm_connector *connector;
1768 struct drm_display_mode *mode; 1820 struct drm_display_mode *mode;
1769 int mode_count = 0; 1821 int mode_count = 0;
@@ -1787,13 +1839,11 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
1787 1839
1788 mutex_lock(&dev->mode_config.mutex); 1840 mutex_lock(&dev->mode_config.mutex);
1789 1841
1790 obj = drm_mode_object_find(dev, out_resp->connector_id, 1842 connector = drm_connector_find(dev, out_resp->connector_id);
1791 DRM_MODE_OBJECT_CONNECTOR); 1843 if (!connector) {
1792 if (!obj) {
1793 ret = -ENOENT; 1844 ret = -ENOENT;
1794 goto out; 1845 goto out;
1795 } 1846 }
1796 connector = obj_to_connector(obj);
1797 1847
1798 props_count = connector->properties.count; 1848 props_count = connector->properties.count;
1799 1849
@@ -1821,10 +1871,12 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
1821 out_resp->mm_height = connector->display_info.height_mm; 1871 out_resp->mm_height = connector->display_info.height_mm;
1822 out_resp->subpixel = connector->display_info.subpixel_order; 1872 out_resp->subpixel = connector->display_info.subpixel_order;
1823 out_resp->connection = connector->status; 1873 out_resp->connection = connector->status;
1874 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
1824 if (connector->encoder) 1875 if (connector->encoder)
1825 out_resp->encoder_id = connector->encoder->base.id; 1876 out_resp->encoder_id = connector->encoder->base.id;
1826 else 1877 else
1827 out_resp->encoder_id = 0; 1878 out_resp->encoder_id = 0;
1879 drm_modeset_unlock(&dev->mode_config.connection_mutex);
1828 1880
1829 /* 1881 /*
1830 * This ioctl is called twice, once to determine how much space is 1882 * This ioctl is called twice, once to determine how much space is
@@ -1908,7 +1960,6 @@ int drm_mode_getencoder(struct drm_device *dev, void *data,
1908 struct drm_file *file_priv) 1960 struct drm_file *file_priv)
1909{ 1961{
1910 struct drm_mode_get_encoder *enc_resp = data; 1962 struct drm_mode_get_encoder *enc_resp = data;
1911 struct drm_mode_object *obj;
1912 struct drm_encoder *encoder; 1963 struct drm_encoder *encoder;
1913 int ret = 0; 1964 int ret = 0;
1914 1965
@@ -1916,13 +1967,11 @@ int drm_mode_getencoder(struct drm_device *dev, void *data,
1916 return -EINVAL; 1967 return -EINVAL;
1917 1968
1918 drm_modeset_lock_all(dev); 1969 drm_modeset_lock_all(dev);
1919 obj = drm_mode_object_find(dev, enc_resp->encoder_id, 1970 encoder = drm_encoder_find(dev, enc_resp->encoder_id);
1920 DRM_MODE_OBJECT_ENCODER); 1971 if (!encoder) {
1921 if (!obj) {
1922 ret = -ENOENT; 1972 ret = -ENOENT;
1923 goto out; 1973 goto out;
1924 } 1974 }
1925 encoder = obj_to_encoder(obj);
1926 1975
1927 if (encoder->crtc) 1976 if (encoder->crtc)
1928 enc_resp->crtc_id = encoder->crtc->base.id; 1977 enc_resp->crtc_id = encoder->crtc->base.id;
@@ -2020,7 +2069,6 @@ int drm_mode_getplane(struct drm_device *dev, void *data,
2020 struct drm_file *file_priv) 2069 struct drm_file *file_priv)
2021{ 2070{
2022 struct drm_mode_get_plane *plane_resp = data; 2071 struct drm_mode_get_plane *plane_resp = data;
2023 struct drm_mode_object *obj;
2024 struct drm_plane *plane; 2072 struct drm_plane *plane;
2025 uint32_t __user *format_ptr; 2073 uint32_t __user *format_ptr;
2026 int ret = 0; 2074 int ret = 0;
@@ -2029,13 +2077,11 @@ int drm_mode_getplane(struct drm_device *dev, void *data,
2029 return -EINVAL; 2077 return -EINVAL;
2030 2078
2031 drm_modeset_lock_all(dev); 2079 drm_modeset_lock_all(dev);
2032 obj = drm_mode_object_find(dev, plane_resp->plane_id, 2080 plane = drm_plane_find(dev, plane_resp->plane_id);
2033 DRM_MODE_OBJECT_PLANE); 2081 if (!plane) {
2034 if (!obj) {
2035 ret = -ENOENT; 2082 ret = -ENOENT;
2036 goto out; 2083 goto out;
2037 } 2084 }
2038 plane = obj_to_plane(obj);
2039 2085
2040 if (plane->crtc) 2086 if (plane->crtc)
2041 plane_resp->crtc_id = plane->crtc->base.id; 2087 plane_resp->crtc_id = plane->crtc->base.id;
@@ -2088,7 +2134,6 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
2088 struct drm_file *file_priv) 2134 struct drm_file *file_priv)
2089{ 2135{
2090 struct drm_mode_set_plane *plane_req = data; 2136 struct drm_mode_set_plane *plane_req = data;
2091 struct drm_mode_object *obj;
2092 struct drm_plane *plane; 2137 struct drm_plane *plane;
2093 struct drm_crtc *crtc; 2138 struct drm_crtc *crtc;
2094 struct drm_framebuffer *fb = NULL, *old_fb = NULL; 2139 struct drm_framebuffer *fb = NULL, *old_fb = NULL;
@@ -2103,35 +2148,42 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
2103 * First, find the plane, crtc, and fb objects. If not available, 2148 * First, find the plane, crtc, and fb objects. If not available,
2104 * we don't bother to call the driver. 2149 * we don't bother to call the driver.
2105 */ 2150 */
2106 obj = drm_mode_object_find(dev, plane_req->plane_id, 2151 plane = drm_plane_find(dev, plane_req->plane_id);
2107 DRM_MODE_OBJECT_PLANE); 2152 if (!plane) {
2108 if (!obj) {
2109 DRM_DEBUG_KMS("Unknown plane ID %d\n", 2153 DRM_DEBUG_KMS("Unknown plane ID %d\n",
2110 plane_req->plane_id); 2154 plane_req->plane_id);
2111 return -ENOENT; 2155 return -ENOENT;
2112 } 2156 }
2113 plane = obj_to_plane(obj);
2114 2157
2115 /* No fb means shut it down */ 2158 /* No fb means shut it down */
2116 if (!plane_req->fb_id) { 2159 if (!plane_req->fb_id) {
2117 drm_modeset_lock_all(dev); 2160 drm_modeset_lock_all(dev);
2118 old_fb = plane->fb; 2161 old_fb = plane->fb;
2119 plane->funcs->disable_plane(plane); 2162 ret = plane->funcs->disable_plane(plane);
2120 plane->crtc = NULL; 2163 if (!ret) {
2121 plane->fb = NULL; 2164 plane->crtc = NULL;
2165 plane->fb = NULL;
2166 } else {
2167 old_fb = NULL;
2168 }
2122 drm_modeset_unlock_all(dev); 2169 drm_modeset_unlock_all(dev);
2123 goto out; 2170 goto out;
2124 } 2171 }
2125 2172
2126 obj = drm_mode_object_find(dev, plane_req->crtc_id, 2173 crtc = drm_crtc_find(dev, plane_req->crtc_id);
2127 DRM_MODE_OBJECT_CRTC); 2174 if (!crtc) {
2128 if (!obj) {
2129 DRM_DEBUG_KMS("Unknown crtc ID %d\n", 2175 DRM_DEBUG_KMS("Unknown crtc ID %d\n",
2130 plane_req->crtc_id); 2176 plane_req->crtc_id);
2131 ret = -ENOENT; 2177 ret = -ENOENT;
2132 goto out; 2178 goto out;
2133 } 2179 }
2134 crtc = obj_to_crtc(obj); 2180
2181 /* Check whether this plane is usable on this CRTC */
2182 if (!(plane->possible_crtcs & drm_crtc_mask(crtc))) {
2183 DRM_DEBUG_KMS("Invalid crtc for plane\n");
2184 ret = -EINVAL;
2185 goto out;
2186 }
2135 2187
2136 fb = drm_framebuffer_lookup(dev, plane_req->fb_id); 2188 fb = drm_framebuffer_lookup(dev, plane_req->fb_id);
2137 if (!fb) { 2189 if (!fb) {
@@ -2187,16 +2239,18 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
2187 } 2239 }
2188 2240
2189 drm_modeset_lock_all(dev); 2241 drm_modeset_lock_all(dev);
2242 old_fb = plane->fb;
2190 ret = plane->funcs->update_plane(plane, crtc, fb, 2243 ret = plane->funcs->update_plane(plane, crtc, fb,
2191 plane_req->crtc_x, plane_req->crtc_y, 2244 plane_req->crtc_x, plane_req->crtc_y,
2192 plane_req->crtc_w, plane_req->crtc_h, 2245 plane_req->crtc_w, plane_req->crtc_h,
2193 plane_req->src_x, plane_req->src_y, 2246 plane_req->src_x, plane_req->src_y,
2194 plane_req->src_w, plane_req->src_h); 2247 plane_req->src_w, plane_req->src_h);
2195 if (!ret) { 2248 if (!ret) {
2196 old_fb = plane->fb;
2197 plane->crtc = crtc; 2249 plane->crtc = crtc;
2198 plane->fb = fb; 2250 plane->fb = fb;
2199 fb = NULL; 2251 fb = NULL;
2252 } else {
2253 old_fb = NULL;
2200 } 2254 }
2201 drm_modeset_unlock_all(dev); 2255 drm_modeset_unlock_all(dev);
2202 2256
@@ -2239,9 +2293,7 @@ int drm_mode_set_config_internal(struct drm_mode_set *set)
2239 ret = crtc->funcs->set_config(set); 2293 ret = crtc->funcs->set_config(set);
2240 if (ret == 0) { 2294 if (ret == 0) {
2241 crtc->primary->crtc = crtc; 2295 crtc->primary->crtc = crtc;
2242 2296 crtc->primary->fb = fb;
2243 /* crtc->fb must be updated by ->set_config, enforces this. */
2244 WARN_ON(fb != crtc->primary->fb);
2245 } 2297 }
2246 2298
2247 list_for_each_entry(tmp, &crtc->dev->mode_config.crtc_list, head) { 2299 list_for_each_entry(tmp, &crtc->dev->mode_config.crtc_list, head) {
@@ -2318,7 +2370,6 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
2318{ 2370{
2319 struct drm_mode_config *config = &dev->mode_config; 2371 struct drm_mode_config *config = &dev->mode_config;
2320 struct drm_mode_crtc *crtc_req = data; 2372 struct drm_mode_crtc *crtc_req = data;
2321 struct drm_mode_object *obj;
2322 struct drm_crtc *crtc; 2373 struct drm_crtc *crtc;
2323 struct drm_connector **connector_set = NULL, *connector; 2374 struct drm_connector **connector_set = NULL, *connector;
2324 struct drm_framebuffer *fb = NULL; 2375 struct drm_framebuffer *fb = NULL;
@@ -2336,14 +2387,12 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
2336 return -ERANGE; 2387 return -ERANGE;
2337 2388
2338 drm_modeset_lock_all(dev); 2389 drm_modeset_lock_all(dev);
2339 obj = drm_mode_object_find(dev, crtc_req->crtc_id, 2390 crtc = drm_crtc_find(dev, crtc_req->crtc_id);
2340 DRM_MODE_OBJECT_CRTC); 2391 if (!crtc) {
2341 if (!obj) {
2342 DRM_DEBUG_KMS("Unknown CRTC ID %d\n", crtc_req->crtc_id); 2392 DRM_DEBUG_KMS("Unknown CRTC ID %d\n", crtc_req->crtc_id);
2343 ret = -ENOENT; 2393 ret = -ENOENT;
2344 goto out; 2394 goto out;
2345 } 2395 }
2346 crtc = obj_to_crtc(obj);
2347 DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id); 2396 DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
2348 2397
2349 if (crtc_req->mode_valid) { 2398 if (crtc_req->mode_valid) {
@@ -2426,18 +2475,16 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
2426 goto out; 2475 goto out;
2427 } 2476 }
2428 2477
2429 obj = drm_mode_object_find(dev, out_id, 2478 connector = drm_connector_find(dev, out_id);
2430 DRM_MODE_OBJECT_CONNECTOR); 2479 if (!connector) {
2431 if (!obj) {
2432 DRM_DEBUG_KMS("Connector id %d unknown\n", 2480 DRM_DEBUG_KMS("Connector id %d unknown\n",
2433 out_id); 2481 out_id);
2434 ret = -ENOENT; 2482 ret = -ENOENT;
2435 goto out; 2483 goto out;
2436 } 2484 }
2437 connector = obj_to_connector(obj);
2438 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", 2485 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
2439 connector->base.id, 2486 connector->base.id,
2440 drm_get_connector_name(connector)); 2487 connector->name);
2441 2488
2442 connector_set[i] = connector; 2489 connector_set[i] = connector;
2443 } 2490 }
@@ -2466,7 +2513,6 @@ static int drm_mode_cursor_common(struct drm_device *dev,
2466 struct drm_mode_cursor2 *req, 2513 struct drm_mode_cursor2 *req,
2467 struct drm_file *file_priv) 2514 struct drm_file *file_priv)
2468{ 2515{
2469 struct drm_mode_object *obj;
2470 struct drm_crtc *crtc; 2516 struct drm_crtc *crtc;
2471 int ret = 0; 2517 int ret = 0;
2472 2518
@@ -2476,14 +2522,13 @@ static int drm_mode_cursor_common(struct drm_device *dev,
2476 if (!req->flags || (~DRM_MODE_CURSOR_FLAGS & req->flags)) 2522 if (!req->flags || (~DRM_MODE_CURSOR_FLAGS & req->flags))
2477 return -EINVAL; 2523 return -EINVAL;
2478 2524
2479 obj = drm_mode_object_find(dev, req->crtc_id, DRM_MODE_OBJECT_CRTC); 2525 crtc = drm_crtc_find(dev, req->crtc_id);
2480 if (!obj) { 2526 if (!crtc) {
2481 DRM_DEBUG_KMS("Unknown CRTC ID %d\n", req->crtc_id); 2527 DRM_DEBUG_KMS("Unknown CRTC ID %d\n", req->crtc_id);
2482 return -ENOENT; 2528 return -ENOENT;
2483 } 2529 }
2484 crtc = obj_to_crtc(obj);
2485 2530
2486 mutex_lock(&crtc->mutex); 2531 drm_modeset_lock(&crtc->mutex, NULL);
2487 if (req->flags & DRM_MODE_CURSOR_BO) { 2532 if (req->flags & DRM_MODE_CURSOR_BO) {
2488 if (!crtc->funcs->cursor_set && !crtc->funcs->cursor_set2) { 2533 if (!crtc->funcs->cursor_set && !crtc->funcs->cursor_set2) {
2489 ret = -ENXIO; 2534 ret = -ENXIO;
@@ -2507,7 +2552,7 @@ static int drm_mode_cursor_common(struct drm_device *dev,
2507 } 2552 }
2508 } 2553 }
2509out: 2554out:
2510 mutex_unlock(&crtc->mutex); 2555 drm_modeset_unlock(&crtc->mutex);
2511 2556
2512 return ret; 2557 return ret;
2513 2558
@@ -3097,6 +3142,8 @@ struct drm_property *drm_property_create(struct drm_device *dev, int flags,
3097 if (!property) 3142 if (!property)
3098 return NULL; 3143 return NULL;
3099 3144
3145 property->dev = dev;
3146
3100 if (num_values) { 3147 if (num_values) {
3101 property->values = kzalloc(sizeof(uint64_t)*num_values, GFP_KERNEL); 3148 property->values = kzalloc(sizeof(uint64_t)*num_values, GFP_KERNEL);
3102 if (!property->values) 3149 if (!property->values)
@@ -3117,6 +3164,9 @@ struct drm_property *drm_property_create(struct drm_device *dev, int flags,
3117 } 3164 }
3118 3165
3119 list_add_tail(&property->head, &dev->mode_config.property_list); 3166 list_add_tail(&property->head, &dev->mode_config.property_list);
3167
3168 WARN_ON(!drm_property_type_valid(property));
3169
3120 return property; 3170 return property;
3121fail: 3171fail:
3122 kfree(property->values); 3172 kfree(property->values);
@@ -3217,6 +3267,22 @@ struct drm_property *drm_property_create_bitmask(struct drm_device *dev,
3217} 3267}
3218EXPORT_SYMBOL(drm_property_create_bitmask); 3268EXPORT_SYMBOL(drm_property_create_bitmask);
3219 3269
3270static struct drm_property *property_create_range(struct drm_device *dev,
3271 int flags, const char *name,
3272 uint64_t min, uint64_t max)
3273{
3274 struct drm_property *property;
3275
3276 property = drm_property_create(dev, flags, name, 2);
3277 if (!property)
3278 return NULL;
3279
3280 property->values[0] = min;
3281 property->values[1] = max;
3282
3283 return property;
3284}
3285
3220/** 3286/**
3221 * drm_property_create - create a new ranged property type 3287 * drm_property_create - create a new ranged property type
3222 * @dev: drm device 3288 * @dev: drm device
@@ -3239,20 +3305,36 @@ struct drm_property *drm_property_create_range(struct drm_device *dev, int flags
3239 const char *name, 3305 const char *name,
3240 uint64_t min, uint64_t max) 3306 uint64_t min, uint64_t max)
3241{ 3307{
3308 return property_create_range(dev, DRM_MODE_PROP_RANGE | flags,
3309 name, min, max);
3310}
3311EXPORT_SYMBOL(drm_property_create_range);
3312
3313struct drm_property *drm_property_create_signed_range(struct drm_device *dev,
3314 int flags, const char *name,
3315 int64_t min, int64_t max)
3316{
3317 return property_create_range(dev, DRM_MODE_PROP_SIGNED_RANGE | flags,
3318 name, I642U64(min), I642U64(max));
3319}
3320EXPORT_SYMBOL(drm_property_create_signed_range);
3321
3322struct drm_property *drm_property_create_object(struct drm_device *dev,
3323 int flags, const char *name, uint32_t type)
3324{
3242 struct drm_property *property; 3325 struct drm_property *property;
3243 3326
3244 flags |= DRM_MODE_PROP_RANGE; 3327 flags |= DRM_MODE_PROP_OBJECT;
3245 3328
3246 property = drm_property_create(dev, flags, name, 2); 3329 property = drm_property_create(dev, flags, name, 1);
3247 if (!property) 3330 if (!property)
3248 return NULL; 3331 return NULL;
3249 3332
3250 property->values[0] = min; 3333 property->values[0] = type;
3251 property->values[1] = max;
3252 3334
3253 return property; 3335 return property;
3254} 3336}
3255EXPORT_SYMBOL(drm_property_create_range); 3337EXPORT_SYMBOL(drm_property_create_object);
3256 3338
3257/** 3339/**
3258 * drm_property_add_enum - add a possible value to an enumeration property 3340 * drm_property_add_enum - add a possible value to an enumeration property
@@ -3274,14 +3356,16 @@ int drm_property_add_enum(struct drm_property *property, int index,
3274{ 3356{
3275 struct drm_property_enum *prop_enum; 3357 struct drm_property_enum *prop_enum;
3276 3358
3277 if (!(property->flags & (DRM_MODE_PROP_ENUM | DRM_MODE_PROP_BITMASK))) 3359 if (!(drm_property_type_is(property, DRM_MODE_PROP_ENUM) ||
3360 drm_property_type_is(property, DRM_MODE_PROP_BITMASK)))
3278 return -EINVAL; 3361 return -EINVAL;
3279 3362
3280 /* 3363 /*
3281 * Bitmask enum properties have the additional constraint of values 3364 * Bitmask enum properties have the additional constraint of values
3282 * from 0 to 63 3365 * from 0 to 63
3283 */ 3366 */
3284 if ((property->flags & DRM_MODE_PROP_BITMASK) && (value > 63)) 3367 if (drm_property_type_is(property, DRM_MODE_PROP_BITMASK) &&
3368 (value > 63))
3285 return -EINVAL; 3369 return -EINVAL;
3286 3370
3287 if (!list_empty(&property->enum_blob_list)) { 3371 if (!list_empty(&property->enum_blob_list)) {
@@ -3438,7 +3522,6 @@ EXPORT_SYMBOL(drm_object_property_get_value);
3438int drm_mode_getproperty_ioctl(struct drm_device *dev, 3522int drm_mode_getproperty_ioctl(struct drm_device *dev,
3439 void *data, struct drm_file *file_priv) 3523 void *data, struct drm_file *file_priv)
3440{ 3524{
3441 struct drm_mode_object *obj;
3442 struct drm_mode_get_property *out_resp = data; 3525 struct drm_mode_get_property *out_resp = data;
3443 struct drm_property *property; 3526 struct drm_property *property;
3444 int enum_count = 0; 3527 int enum_count = 0;
@@ -3457,17 +3540,17 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
3457 return -EINVAL; 3540 return -EINVAL;
3458 3541
3459 drm_modeset_lock_all(dev); 3542 drm_modeset_lock_all(dev);
3460 obj = drm_mode_object_find(dev, out_resp->prop_id, DRM_MODE_OBJECT_PROPERTY); 3543 property = drm_property_find(dev, out_resp->prop_id);
3461 if (!obj) { 3544 if (!property) {
3462 ret = -ENOENT; 3545 ret = -ENOENT;
3463 goto done; 3546 goto done;
3464 } 3547 }
3465 property = obj_to_property(obj);
3466 3548
3467 if (property->flags & (DRM_MODE_PROP_ENUM | DRM_MODE_PROP_BITMASK)) { 3549 if (drm_property_type_is(property, DRM_MODE_PROP_ENUM) ||
3550 drm_property_type_is(property, DRM_MODE_PROP_BITMASK)) {
3468 list_for_each_entry(prop_enum, &property->enum_blob_list, head) 3551 list_for_each_entry(prop_enum, &property->enum_blob_list, head)
3469 enum_count++; 3552 enum_count++;
3470 } else if (property->flags & DRM_MODE_PROP_BLOB) { 3553 } else if (drm_property_type_is(property, DRM_MODE_PROP_BLOB)) {
3471 list_for_each_entry(prop_blob, &property->enum_blob_list, head) 3554 list_for_each_entry(prop_blob, &property->enum_blob_list, head)
3472 blob_count++; 3555 blob_count++;
3473 } 3556 }
@@ -3489,7 +3572,8 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
3489 } 3572 }
3490 out_resp->count_values = value_count; 3573 out_resp->count_values = value_count;
3491 3574
3492 if (property->flags & (DRM_MODE_PROP_ENUM | DRM_MODE_PROP_BITMASK)) { 3575 if (drm_property_type_is(property, DRM_MODE_PROP_ENUM) ||
3576 drm_property_type_is(property, DRM_MODE_PROP_BITMASK)) {
3493 if ((out_resp->count_enum_blobs >= enum_count) && enum_count) { 3577 if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
3494 copied = 0; 3578 copied = 0;
3495 enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr; 3579 enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr;
@@ -3511,7 +3595,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
3511 out_resp->count_enum_blobs = enum_count; 3595 out_resp->count_enum_blobs = enum_count;
3512 } 3596 }
3513 3597
3514 if (property->flags & DRM_MODE_PROP_BLOB) { 3598 if (drm_property_type_is(property, DRM_MODE_PROP_BLOB)) {
3515 if ((out_resp->count_enum_blobs >= blob_count) && blob_count) { 3599 if ((out_resp->count_enum_blobs >= blob_count) && blob_count) {
3516 copied = 0; 3600 copied = 0;
3517 blob_id_ptr = (uint32_t __user *)(unsigned long)out_resp->enum_blob_ptr; 3601 blob_id_ptr = (uint32_t __user *)(unsigned long)out_resp->enum_blob_ptr;
@@ -3590,7 +3674,6 @@ static void drm_property_destroy_blob(struct drm_device *dev,
3590int drm_mode_getblob_ioctl(struct drm_device *dev, 3674int drm_mode_getblob_ioctl(struct drm_device *dev,
3591 void *data, struct drm_file *file_priv) 3675 void *data, struct drm_file *file_priv)
3592{ 3676{
3593 struct drm_mode_object *obj;
3594 struct drm_mode_get_blob *out_resp = data; 3677 struct drm_mode_get_blob *out_resp = data;
3595 struct drm_property_blob *blob; 3678 struct drm_property_blob *blob;
3596 int ret = 0; 3679 int ret = 0;
@@ -3600,12 +3683,11 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
3600 return -EINVAL; 3683 return -EINVAL;
3601 3684
3602 drm_modeset_lock_all(dev); 3685 drm_modeset_lock_all(dev);
3603 obj = drm_mode_object_find(dev, out_resp->blob_id, DRM_MODE_OBJECT_BLOB); 3686 blob = drm_property_blob_find(dev, out_resp->blob_id);
3604 if (!obj) { 3687 if (!blob) {
3605 ret = -ENOENT; 3688 ret = -ENOENT;
3606 goto done; 3689 goto done;
3607 } 3690 }
3608 blob = obj_to_blob(obj);
3609 3691
3610 if (out_resp->length == blob->length) { 3692 if (out_resp->length == blob->length) {
3611 blob_ptr = (void __user *)(unsigned long)out_resp->data; 3693 blob_ptr = (void __user *)(unsigned long)out_resp->data;
@@ -3667,19 +3749,40 @@ static bool drm_property_change_is_valid(struct drm_property *property,
3667{ 3749{
3668 if (property->flags & DRM_MODE_PROP_IMMUTABLE) 3750 if (property->flags & DRM_MODE_PROP_IMMUTABLE)
3669 return false; 3751 return false;
3670 if (property->flags & DRM_MODE_PROP_RANGE) { 3752
3753 if (drm_property_type_is(property, DRM_MODE_PROP_RANGE)) {
3671 if (value < property->values[0] || value > property->values[1]) 3754 if (value < property->values[0] || value > property->values[1])
3672 return false; 3755 return false;
3673 return true; 3756 return true;
3674 } else if (property->flags & DRM_MODE_PROP_BITMASK) { 3757 } else if (drm_property_type_is(property, DRM_MODE_PROP_SIGNED_RANGE)) {
3758 int64_t svalue = U642I64(value);
3759 if (svalue < U642I64(property->values[0]) ||
3760 svalue > U642I64(property->values[1]))
3761 return false;
3762 return true;
3763 } else if (drm_property_type_is(property, DRM_MODE_PROP_BITMASK)) {
3675 int i; 3764 int i;
3676 uint64_t valid_mask = 0; 3765 uint64_t valid_mask = 0;
3677 for (i = 0; i < property->num_values; i++) 3766 for (i = 0; i < property->num_values; i++)
3678 valid_mask |= (1ULL << property->values[i]); 3767 valid_mask |= (1ULL << property->values[i]);
3679 return !(value & ~valid_mask); 3768 return !(value & ~valid_mask);
3680 } else if (property->flags & DRM_MODE_PROP_BLOB) { 3769 } else if (drm_property_type_is(property, DRM_MODE_PROP_BLOB)) {
3681 /* Only the driver knows */ 3770 /* Only the driver knows */
3682 return true; 3771 return true;
3772 } else if (drm_property_type_is(property, DRM_MODE_PROP_OBJECT)) {
3773 struct drm_mode_object *obj;
3774 /* a zero value for an object property translates to null: */
3775 if (value == 0)
3776 return true;
3777 /*
3778 * NOTE: use _object_find() directly to bypass restriction on
3779 * looking up refcnt'd objects (ie. fb's). For a refcnt'd
3780 * object this could race against object finalization, so it
3781 * simply tells us that the object *was* valid. Which is good
3782 * enough.
3783 */
3784 obj = _object_find(property->dev, value, property->values[0]);
3785 return obj != NULL;
3683 } else { 3786 } else {
3684 int i; 3787 int i;
3685 for (i = 0; i < property->num_values; i++) 3788 for (i = 0; i < property->num_values; i++)
@@ -3987,7 +4090,6 @@ int drm_mode_gamma_set_ioctl(struct drm_device *dev,
3987 void *data, struct drm_file *file_priv) 4090 void *data, struct drm_file *file_priv)
3988{ 4091{
3989 struct drm_mode_crtc_lut *crtc_lut = data; 4092 struct drm_mode_crtc_lut *crtc_lut = data;
3990 struct drm_mode_object *obj;
3991 struct drm_crtc *crtc; 4093 struct drm_crtc *crtc;
3992 void *r_base, *g_base, *b_base; 4094 void *r_base, *g_base, *b_base;
3993 int size; 4095 int size;
@@ -3997,12 +4099,11 @@ int drm_mode_gamma_set_ioctl(struct drm_device *dev,
3997 return -EINVAL; 4099 return -EINVAL;
3998 4100
3999 drm_modeset_lock_all(dev); 4101 drm_modeset_lock_all(dev);
4000 obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC); 4102 crtc = drm_crtc_find(dev, crtc_lut->crtc_id);
4001 if (!obj) { 4103 if (!crtc) {
4002 ret = -ENOENT; 4104 ret = -ENOENT;
4003 goto out; 4105 goto out;
4004 } 4106 }
4005 crtc = obj_to_crtc(obj);
4006 4107
4007 if (crtc->funcs->gamma_set == NULL) { 4108 if (crtc->funcs->gamma_set == NULL) {
4008 ret = -ENOSYS; 4109 ret = -ENOSYS;
@@ -4061,7 +4162,6 @@ int drm_mode_gamma_get_ioctl(struct drm_device *dev,
4061 void *data, struct drm_file *file_priv) 4162 void *data, struct drm_file *file_priv)
4062{ 4163{
4063 struct drm_mode_crtc_lut *crtc_lut = data; 4164 struct drm_mode_crtc_lut *crtc_lut = data;
4064 struct drm_mode_object *obj;
4065 struct drm_crtc *crtc; 4165 struct drm_crtc *crtc;
4066 void *r_base, *g_base, *b_base; 4166 void *r_base, *g_base, *b_base;
4067 int size; 4167 int size;
@@ -4071,12 +4171,11 @@ int drm_mode_gamma_get_ioctl(struct drm_device *dev,
4071 return -EINVAL; 4171 return -EINVAL;
4072 4172
4073 drm_modeset_lock_all(dev); 4173 drm_modeset_lock_all(dev);
4074 obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC); 4174 crtc = drm_crtc_find(dev, crtc_lut->crtc_id);
4075 if (!obj) { 4175 if (!crtc) {
4076 ret = -ENOENT; 4176 ret = -ENOENT;
4077 goto out; 4177 goto out;
4078 } 4178 }
4079 crtc = obj_to_crtc(obj);
4080 4179
4081 /* memcpy into gamma store */ 4180 /* memcpy into gamma store */
4082 if (crtc_lut->gamma_size != crtc->gamma_size) { 4181 if (crtc_lut->gamma_size != crtc->gamma_size) {
@@ -4129,7 +4228,6 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
4129 void *data, struct drm_file *file_priv) 4228 void *data, struct drm_file *file_priv)
4130{ 4229{
4131 struct drm_mode_crtc_page_flip *page_flip = data; 4230 struct drm_mode_crtc_page_flip *page_flip = data;
4132 struct drm_mode_object *obj;
4133 struct drm_crtc *crtc; 4231 struct drm_crtc *crtc;
4134 struct drm_framebuffer *fb = NULL, *old_fb = NULL; 4232 struct drm_framebuffer *fb = NULL, *old_fb = NULL;
4135 struct drm_pending_vblank_event *e = NULL; 4233 struct drm_pending_vblank_event *e = NULL;
@@ -4143,12 +4241,11 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
4143 if ((page_flip->flags & DRM_MODE_PAGE_FLIP_ASYNC) && !dev->mode_config.async_page_flip) 4241 if ((page_flip->flags & DRM_MODE_PAGE_FLIP_ASYNC) && !dev->mode_config.async_page_flip)
4144 return -EINVAL; 4242 return -EINVAL;
4145 4243
4146 obj = drm_mode_object_find(dev, page_flip->crtc_id, DRM_MODE_OBJECT_CRTC); 4244 crtc = drm_crtc_find(dev, page_flip->crtc_id);
4147 if (!obj) 4245 if (!crtc)
4148 return -ENOENT; 4246 return -ENOENT;
4149 crtc = obj_to_crtc(obj);
4150 4247
4151 mutex_lock(&crtc->mutex); 4248 drm_modeset_lock(&crtc->mutex, NULL);
4152 if (crtc->primary->fb == NULL) { 4249 if (crtc->primary->fb == NULL) {
4153 /* The framebuffer is currently unbound, presumably 4250 /* The framebuffer is currently unbound, presumably
4154 * due to a hotplug event, that userspace has not 4251 * due to a hotplug event, that userspace has not
@@ -4232,7 +4329,7 @@ out:
4232 drm_framebuffer_unreference(fb); 4329 drm_framebuffer_unreference(fb);
4233 if (old_fb) 4330 if (old_fb)
4234 drm_framebuffer_unreference(old_fb); 4331 drm_framebuffer_unreference(old_fb);
4235 mutex_unlock(&crtc->mutex); 4332 drm_modeset_unlock(&crtc->mutex);
4236 4333
4237 return ret; 4334 return ret;
4238} 4335}
@@ -4597,6 +4694,7 @@ EXPORT_SYMBOL(drm_format_vert_chroma_subsampling);
4597void drm_mode_config_init(struct drm_device *dev) 4694void drm_mode_config_init(struct drm_device *dev)
4598{ 4695{
4599 mutex_init(&dev->mode_config.mutex); 4696 mutex_init(&dev->mode_config.mutex);
4697 drm_modeset_lock_init(&dev->mode_config.connection_mutex);
4600 mutex_init(&dev->mode_config.idr_mutex); 4698 mutex_init(&dev->mode_config.idr_mutex);
4601 mutex_init(&dev->mode_config.fb_lock); 4699 mutex_init(&dev->mode_config.fb_lock);
4602 INIT_LIST_HEAD(&dev->mode_config.fb_list); 4700 INIT_LIST_HEAD(&dev->mode_config.fb_list);
@@ -4696,5 +4794,6 @@ void drm_mode_config_cleanup(struct drm_device *dev)
4696 } 4794 }
4697 4795
4698 idr_destroy(&dev->mode_config.crtc_idr); 4796 idr_destroy(&dev->mode_config.crtc_idr);
4797 drm_modeset_lock_fini(&dev->mode_config.connection_mutex);
4699} 4798}
4700EXPORT_SYMBOL(drm_mode_config_cleanup); 4799EXPORT_SYMBOL(drm_mode_config_cleanup);
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 872ba11c4533..78b37f3febd3 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -93,8 +93,10 @@ bool drm_helper_encoder_in_use(struct drm_encoder *encoder)
93 * We can expect this mutex to be locked if we are not panicking. 93 * We can expect this mutex to be locked if we are not panicking.
94 * Locking is currently fubar in the panic handler. 94 * Locking is currently fubar in the panic handler.
95 */ 95 */
96 if (!oops_in_progress) 96 if (!oops_in_progress) {
97 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); 97 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
98 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
99 }
98 100
99 list_for_each_entry(connector, &dev->mode_config.connector_list, head) 101 list_for_each_entry(connector, &dev->mode_config.connector_list, head)
100 if (connector->encoder == encoder) 102 if (connector->encoder == encoder)
@@ -153,20 +155,14 @@ drm_encoder_disable(struct drm_encoder *encoder)
153static void __drm_helper_disable_unused_functions(struct drm_device *dev) 155static void __drm_helper_disable_unused_functions(struct drm_device *dev)
154{ 156{
155 struct drm_encoder *encoder; 157 struct drm_encoder *encoder;
156 struct drm_connector *connector;
157 struct drm_crtc *crtc; 158 struct drm_crtc *crtc;
158 159
159 drm_warn_on_modeset_not_all_locked(dev); 160 drm_warn_on_modeset_not_all_locked(dev);
160 161
161 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
162 if (!connector->encoder)
163 continue;
164 }
165
166 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 162 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
167 if (!drm_helper_encoder_in_use(encoder)) { 163 if (!drm_helper_encoder_in_use(encoder)) {
168 drm_encoder_disable(encoder); 164 drm_encoder_disable(encoder);
169 /* disconnector encoder from any connector */ 165 /* disconnect encoder from any connector */
170 encoder->crtc = NULL; 166 encoder->crtc = NULL;
171 } 167 }
172 } 168 }
@@ -349,7 +345,7 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
349 continue; 345 continue;
350 346
351 DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%d:%s]\n", 347 DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%d:%s]\n",
352 encoder->base.id, drm_get_encoder_name(encoder), 348 encoder->base.id, encoder->name,
353 mode->base.id, mode->name); 349 mode->base.id, mode->name);
354 encoder_funcs = encoder->helper_private; 350 encoder_funcs = encoder->helper_private;
355 encoder_funcs->mode_set(encoder, mode, adjusted_mode); 351 encoder_funcs->mode_set(encoder, mode, adjusted_mode);
@@ -400,8 +396,7 @@ done:
400} 396}
401EXPORT_SYMBOL(drm_crtc_helper_set_mode); 397EXPORT_SYMBOL(drm_crtc_helper_set_mode);
402 398
403 399static void
404static int
405drm_crtc_helper_disable(struct drm_crtc *crtc) 400drm_crtc_helper_disable(struct drm_crtc *crtc)
406{ 401{
407 struct drm_device *dev = crtc->dev; 402 struct drm_device *dev = crtc->dev;
@@ -430,7 +425,6 @@ drm_crtc_helper_disable(struct drm_crtc *crtc)
430 } 425 }
431 426
432 __drm_helper_disable_unused_functions(dev); 427 __drm_helper_disable_unused_functions(dev);
433 return 0;
434} 428}
435 429
436/** 430/**
@@ -481,7 +475,8 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
481 (int)set->num_connectors, set->x, set->y); 475 (int)set->num_connectors, set->x, set->y);
482 } else { 476 } else {
483 DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id); 477 DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id);
484 return drm_crtc_helper_disable(set->crtc); 478 drm_crtc_helper_disable(set->crtc);
479 return 0;
485 } 480 }
486 481
487 dev = set->crtc->dev; 482 dev = set->crtc->dev;
@@ -620,11 +615,11 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
620 } 615 }
621 if (new_crtc) { 616 if (new_crtc) {
622 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n", 617 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n",
623 connector->base.id, drm_get_connector_name(connector), 618 connector->base.id, connector->name,
624 new_crtc->base.id); 619 new_crtc->base.id);
625 } else { 620 } else {
626 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n", 621 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n",
627 connector->base.id, drm_get_connector_name(connector)); 622 connector->base.id, connector->name);
628 } 623 }
629 } 624 }
630 625
@@ -650,7 +645,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
650 DRM_DEBUG_KMS("Setting connector DPMS state to on\n"); 645 DRM_DEBUG_KMS("Setting connector DPMS state to on\n");
651 for (i = 0; i < set->num_connectors; i++) { 646 for (i = 0; i < set->num_connectors; i++) {
652 DRM_DEBUG_KMS("\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id, 647 DRM_DEBUG_KMS("\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id,
653 drm_get_connector_name(set->connectors[i])); 648 set->connectors[i]->name);
654 set->connectors[i]->funcs->dpms(set->connectors[i], DRM_MODE_DPMS_ON); 649 set->connectors[i]->funcs->dpms(set->connectors[i], DRM_MODE_DPMS_ON);
655 } 650 }
656 } 651 }
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index 4b6e6f3ba0a1..08e33b8b13a4 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -206,13 +206,17 @@ i2c_dp_aux_prepare_bus(struct i2c_adapter *adapter)
206 * i2c_dp_aux_add_bus() - register an i2c adapter using the aux ch helper 206 * i2c_dp_aux_add_bus() - register an i2c adapter using the aux ch helper
207 * @adapter: i2c adapter to register 207 * @adapter: i2c adapter to register
208 * 208 *
209 * This registers an i2c adapater that uses dp aux channel as it's underlaying 209 * This registers an i2c adapter that uses dp aux channel as it's underlaying
210 * transport. The driver needs to fill out the &i2c_algo_dp_aux_data structure 210 * transport. The driver needs to fill out the &i2c_algo_dp_aux_data structure
211 * and store it in the algo_data member of the @adapter argument. This will be 211 * and store it in the algo_data member of the @adapter argument. This will be
212 * used by the i2c over dp aux algorithm to drive the hardware. 212 * used by the i2c over dp aux algorithm to drive the hardware.
213 * 213 *
214 * RETURNS: 214 * RETURNS:
215 * 0 on success, -ERRNO on failure. 215 * 0 on success, -ERRNO on failure.
216 *
217 * IMPORTANT:
218 * This interface is deprecated, please switch to the new dp aux helpers and
219 * drm_dp_aux_register().
216 */ 220 */
217int 221int
218i2c_dp_aux_add_bus(struct i2c_adapter *adapter) 222i2c_dp_aux_add_bus(struct i2c_adapter *adapter)
@@ -378,7 +382,10 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
378 * transactions. 382 * transactions.
379 */ 383 */
380 for (retry = 0; retry < 7; retry++) { 384 for (retry = 0; retry < 7; retry++) {
385
386 mutex_lock(&aux->hw_mutex);
381 err = aux->transfer(aux, &msg); 387 err = aux->transfer(aux, &msg);
388 mutex_unlock(&aux->hw_mutex);
382 if (err < 0) { 389 if (err < 0) {
383 if (err == -EBUSY) 390 if (err == -EBUSY)
384 continue; 391 continue;
@@ -592,7 +599,9 @@ static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
592 * before giving up the AUX transaction. 599 * before giving up the AUX transaction.
593 */ 600 */
594 for (retry = 0; retry < 7; retry++) { 601 for (retry = 0; retry < 7; retry++) {
602 mutex_lock(&aux->hw_mutex);
595 err = aux->transfer(aux, msg); 603 err = aux->transfer(aux, msg);
604 mutex_unlock(&aux->hw_mutex);
596 if (err < 0) { 605 if (err < 0) {
597 if (err == -EBUSY) 606 if (err == -EBUSY)
598 continue; 607 continue;
@@ -725,13 +734,15 @@ static const struct i2c_algorithm drm_dp_i2c_algo = {
725}; 734};
726 735
727/** 736/**
728 * drm_dp_aux_register_i2c_bus() - register an I2C adapter for I2C-over-AUX 737 * drm_dp_aux_register() - initialise and register aux channel
729 * @aux: DisplayPort AUX channel 738 * @aux: DisplayPort AUX channel
730 * 739 *
731 * Returns 0 on success or a negative error code on failure. 740 * Returns 0 on success or a negative error code on failure.
732 */ 741 */
733int drm_dp_aux_register_i2c_bus(struct drm_dp_aux *aux) 742int drm_dp_aux_register(struct drm_dp_aux *aux)
734{ 743{
744 mutex_init(&aux->hw_mutex);
745
735 aux->ddc.algo = &drm_dp_i2c_algo; 746 aux->ddc.algo = &drm_dp_i2c_algo;
736 aux->ddc.algo_data = aux; 747 aux->ddc.algo_data = aux;
737 aux->ddc.retries = 3; 748 aux->ddc.retries = 3;
@@ -746,14 +757,14 @@ int drm_dp_aux_register_i2c_bus(struct drm_dp_aux *aux)
746 757
747 return i2c_add_adapter(&aux->ddc); 758 return i2c_add_adapter(&aux->ddc);
748} 759}
749EXPORT_SYMBOL(drm_dp_aux_register_i2c_bus); 760EXPORT_SYMBOL(drm_dp_aux_register);
750 761
751/** 762/**
752 * drm_dp_aux_unregister_i2c_bus() - unregister an I2C-over-AUX adapter 763 * drm_dp_aux_unregister() - unregister an AUX adapter
753 * @aux: DisplayPort AUX channel 764 * @aux: DisplayPort AUX channel
754 */ 765 */
755void drm_dp_aux_unregister_i2c_bus(struct drm_dp_aux *aux) 766void drm_dp_aux_unregister(struct drm_dp_aux *aux)
756{ 767{
757 i2c_del_adapter(&aux->ddc); 768 i2c_del_adapter(&aux->ddc);
758} 769}
759EXPORT_SYMBOL(drm_dp_aux_unregister_i2c_bus); 770EXPORT_SYMBOL(drm_dp_aux_unregister);
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index d4e3f9d9370f..dfa9769b26b5 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -70,6 +70,8 @@
70#define EDID_QUIRK_FORCE_REDUCED_BLANKING (1 << 7) 70#define EDID_QUIRK_FORCE_REDUCED_BLANKING (1 << 7)
71/* Force 8bpc */ 71/* Force 8bpc */
72#define EDID_QUIRK_FORCE_8BPC (1 << 8) 72#define EDID_QUIRK_FORCE_8BPC (1 << 8)
73/* Force 12bpc */
74#define EDID_QUIRK_FORCE_12BPC (1 << 9)
73 75
74struct detailed_mode_closure { 76struct detailed_mode_closure {
75 struct drm_connector *connector; 77 struct drm_connector *connector;
@@ -125,6 +127,9 @@ static struct edid_quirk {
125 { "SAM", 596, EDID_QUIRK_PREFER_LARGE_60 }, 127 { "SAM", 596, EDID_QUIRK_PREFER_LARGE_60 },
126 { "SAM", 638, EDID_QUIRK_PREFER_LARGE_60 }, 128 { "SAM", 638, EDID_QUIRK_PREFER_LARGE_60 },
127 129
130 /* Sony PVM-2541A does up to 12 bpc, but only reports max 8 bpc */
131 { "SNY", 0x2541, EDID_QUIRK_FORCE_12BPC },
132
128 /* ViewSonic VA2026w */ 133 /* ViewSonic VA2026w */
129 { "VSC", 5020, EDID_QUIRK_FORCE_REDUCED_BLANKING }, 134 { "VSC", 5020, EDID_QUIRK_FORCE_REDUCED_BLANKING },
130 135
@@ -984,9 +989,13 @@ static const u8 edid_header[] = {
984 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 989 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00
985}; 990};
986 991
987 /* 992/**
988 * Sanity check the header of the base EDID block. Return 8 if the header 993 * drm_edid_header_is_valid - sanity check the header of the base EDID block
989 * is perfect, down to 0 if it's totally wrong. 994 * @raw_edid: pointer to raw base EDID block
995 *
996 * Sanity check the header of the base EDID block.
997 *
998 * Return: 8 if the header is perfect, down to 0 if it's totally wrong.
990 */ 999 */
991int drm_edid_header_is_valid(const u8 *raw_edid) 1000int drm_edid_header_is_valid(const u8 *raw_edid)
992{ 1001{
@@ -1005,9 +1014,16 @@ module_param_named(edid_fixup, edid_fixup, int, 0400);
1005MODULE_PARM_DESC(edid_fixup, 1014MODULE_PARM_DESC(edid_fixup,
1006 "Minimum number of valid EDID header bytes (0-8, default 6)"); 1015 "Minimum number of valid EDID header bytes (0-8, default 6)");
1007 1016
1008/* 1017/**
1009 * Sanity check the EDID block (base or extension). Return 0 if the block 1018 * drm_edid_block_valid - Sanity check the EDID block (base or extension)
1010 * doesn't check out, or 1 if it's valid. 1019 * @raw_edid: pointer to raw EDID block
1020 * @block: type of block to validate (0 for base, extension otherwise)
1021 * @print_bad_edid: if true, dump bad EDID blocks to the console
1022 *
1023 * Validate a base or extension EDID block and optionally dump bad blocks to
1024 * the console.
1025 *
1026 * Return: True if the block is valid, false otherwise.
1011 */ 1027 */
1012bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid) 1028bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid)
1013{ 1029{
@@ -1077,6 +1093,8 @@ EXPORT_SYMBOL(drm_edid_block_valid);
1077 * @edid: EDID data 1093 * @edid: EDID data
1078 * 1094 *
1079 * Sanity-check an entire EDID record (including extensions) 1095 * Sanity-check an entire EDID record (including extensions)
1096 *
1097 * Return: True if the EDID data is valid, false otherwise.
1080 */ 1098 */
1081bool drm_edid_is_valid(struct edid *edid) 1099bool drm_edid_is_valid(struct edid *edid)
1082{ 1100{
@@ -1096,18 +1114,15 @@ EXPORT_SYMBOL(drm_edid_is_valid);
1096 1114
1097#define DDC_SEGMENT_ADDR 0x30 1115#define DDC_SEGMENT_ADDR 0x30
1098/** 1116/**
1099 * Get EDID information via I2C. 1117 * drm_do_probe_ddc_edid() - get EDID information via I2C
1100 * 1118 * @adapter: I2C device adaptor
1101 * @adapter : i2c device adaptor
1102 * @buf: EDID data buffer to be filled 1119 * @buf: EDID data buffer to be filled
1103 * @block: 128 byte EDID block to start fetching from 1120 * @block: 128 byte EDID block to start fetching from
1104 * @len: EDID data buffer length to fetch 1121 * @len: EDID data buffer length to fetch
1105 * 1122 *
1106 * Returns: 1123 * Try to fetch EDID information by calling I2C driver functions.
1107 *
1108 * 0 on success or -1 on failure.
1109 * 1124 *
1110 * Try to fetch EDID information by calling i2c driver function. 1125 * Return: 0 on success or -1 on failure.
1111 */ 1126 */
1112static int 1127static int
1113drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf, 1128drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf,
@@ -1118,7 +1133,8 @@ drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf,
1118 unsigned char xfers = segment ? 3 : 2; 1133 unsigned char xfers = segment ? 3 : 2;
1119 int ret, retries = 5; 1134 int ret, retries = 5;
1120 1135
1121 /* The core i2c driver will automatically retry the transfer if the 1136 /*
1137 * The core I2C driver will automatically retry the transfer if the
1122 * adapter reports EAGAIN. However, we find that bit-banging transfers 1138 * adapter reports EAGAIN. However, we find that bit-banging transfers
1123 * are susceptible to errors under a heavily loaded machine and 1139 * are susceptible to errors under a heavily loaded machine and
1124 * generate spurious NAKs and timeouts. Retrying the transfer 1140 * generate spurious NAKs and timeouts. Retrying the transfer
@@ -1144,10 +1160,10 @@ drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf,
1144 } 1160 }
1145 }; 1161 };
1146 1162
1147 /* 1163 /*
1148 * Avoid sending the segment addr to not upset non-compliant ddc 1164 * Avoid sending the segment addr to not upset non-compliant
1149 * monitors. 1165 * DDC monitors.
1150 */ 1166 */
1151 ret = i2c_transfer(adapter, &msgs[3 - xfers], xfers); 1167 ret = i2c_transfer(adapter, &msgs[3 - xfers], xfers);
1152 1168
1153 if (ret == -ENXIO) { 1169 if (ret == -ENXIO) {
@@ -1216,7 +1232,7 @@ drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
1216 if (i == 4 && print_bad_edid) { 1232 if (i == 4 && print_bad_edid) {
1217 dev_warn(connector->dev->dev, 1233 dev_warn(connector->dev->dev,
1218 "%s: Ignoring invalid EDID block %d.\n", 1234 "%s: Ignoring invalid EDID block %d.\n",
1219 drm_get_connector_name(connector), j); 1235 connector->name, j);
1220 1236
1221 connector->bad_edid_counter++; 1237 connector->bad_edid_counter++;
1222 } 1238 }
@@ -1236,7 +1252,7 @@ drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
1236carp: 1252carp:
1237 if (print_bad_edid) { 1253 if (print_bad_edid) {
1238 dev_warn(connector->dev->dev, "%s: EDID block %d invalid.\n", 1254 dev_warn(connector->dev->dev, "%s: EDID block %d invalid.\n",
1239 drm_get_connector_name(connector), j); 1255 connector->name, j);
1240 } 1256 }
1241 connector->bad_edid_counter++; 1257 connector->bad_edid_counter++;
1242 1258
@@ -1246,12 +1262,10 @@ out:
1246} 1262}
1247 1263
1248/** 1264/**
1249 * Probe DDC presence. 1265 * drm_probe_ddc() - probe DDC presence
1250 * @adapter: i2c adapter to probe 1266 * @adapter: I2C adapter to probe
1251 * 1267 *
1252 * Returns: 1268 * Return: True on success, false on failure.
1253 *
1254 * 1 on success
1255 */ 1269 */
1256bool 1270bool
1257drm_probe_ddc(struct i2c_adapter *adapter) 1271drm_probe_ddc(struct i2c_adapter *adapter)
@@ -1265,12 +1279,12 @@ EXPORT_SYMBOL(drm_probe_ddc);
1265/** 1279/**
1266 * drm_get_edid - get EDID data, if available 1280 * drm_get_edid - get EDID data, if available
1267 * @connector: connector we're probing 1281 * @connector: connector we're probing
1268 * @adapter: i2c adapter to use for DDC 1282 * @adapter: I2C adapter to use for DDC
1269 * 1283 *
1270 * Poke the given i2c channel to grab EDID data if possible. If found, 1284 * Poke the given I2C channel to grab EDID data if possible. If found,
1271 * attach it to the connector. 1285 * attach it to the connector.
1272 * 1286 *
1273 * Return edid data or NULL if we couldn't find any. 1287 * Return: Pointer to valid EDID or NULL if we couldn't find any.
1274 */ 1288 */
1275struct edid *drm_get_edid(struct drm_connector *connector, 1289struct edid *drm_get_edid(struct drm_connector *connector,
1276 struct i2c_adapter *adapter) 1290 struct i2c_adapter *adapter)
@@ -1288,7 +1302,7 @@ EXPORT_SYMBOL(drm_get_edid);
1288 * drm_edid_duplicate - duplicate an EDID and the extensions 1302 * drm_edid_duplicate - duplicate an EDID and the extensions
1289 * @edid: EDID to duplicate 1303 * @edid: EDID to duplicate
1290 * 1304 *
1291 * Return duplicate edid or NULL on allocation failure. 1305 * Return: Pointer to duplicated EDID or NULL on allocation failure.
1292 */ 1306 */
1293struct edid *drm_edid_duplicate(const struct edid *edid) 1307struct edid *drm_edid_duplicate(const struct edid *edid)
1294{ 1308{
@@ -1411,7 +1425,8 @@ mode_is_rb(const struct drm_display_mode *mode)
1411 * @rb: Mode reduced-blanking-ness 1425 * @rb: Mode reduced-blanking-ness
1412 * 1426 *
1413 * Walk the DMT mode list looking for a match for the given parameters. 1427 * Walk the DMT mode list looking for a match for the given parameters.
1414 * Return a newly allocated copy of the mode, or NULL if not found. 1428 *
1429 * Return: A newly allocated copy of the mode, or NULL if not found.
1415 */ 1430 */
1416struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev, 1431struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
1417 int hsize, int vsize, int fresh, 1432 int hsize, int vsize, int fresh,
@@ -1595,14 +1610,13 @@ bad_std_timing(u8 a, u8 b)
1595 * @connector: connector of for the EDID block 1610 * @connector: connector of for the EDID block
1596 * @edid: EDID block to scan 1611 * @edid: EDID block to scan
1597 * @t: standard timing params 1612 * @t: standard timing params
1598 * @revision: standard timing level
1599 * 1613 *
1600 * Take the standard timing params (in this case width, aspect, and refresh) 1614 * Take the standard timing params (in this case width, aspect, and refresh)
1601 * and convert them into a real mode using CVT/GTF/DMT. 1615 * and convert them into a real mode using CVT/GTF/DMT.
1602 */ 1616 */
1603static struct drm_display_mode * 1617static struct drm_display_mode *
1604drm_mode_std(struct drm_connector *connector, struct edid *edid, 1618drm_mode_std(struct drm_connector *connector, struct edid *edid,
1605 struct std_timing *t, int revision) 1619 struct std_timing *t)
1606{ 1620{
1607 struct drm_device *dev = connector->dev; 1621 struct drm_device *dev = connector->dev;
1608 struct drm_display_mode *m, *mode = NULL; 1622 struct drm_display_mode *m, *mode = NULL;
@@ -1623,7 +1637,7 @@ drm_mode_std(struct drm_connector *connector, struct edid *edid,
1623 vrefresh_rate = vfreq + 60; 1637 vrefresh_rate = vfreq + 60;
1624 /* the vdisplay is calculated based on the aspect ratio */ 1638 /* the vdisplay is calculated based on the aspect ratio */
1625 if (aspect_ratio == 0) { 1639 if (aspect_ratio == 0) {
1626 if (revision < 3) 1640 if (edid->revision < 3)
1627 vsize = hsize; 1641 vsize = hsize;
1628 else 1642 else
1629 vsize = (hsize * 10) / 16; 1643 vsize = (hsize * 10) / 16;
@@ -2140,7 +2154,7 @@ do_established_modes(struct detailed_timing *timing, void *c)
2140 2154
2141/** 2155/**
2142 * add_established_modes - get est. modes from EDID and add them 2156 * add_established_modes - get est. modes from EDID and add them
2143 * @connector: connector of for the EDID block 2157 * @connector: connector to add mode(s) to
2144 * @edid: EDID block to scan 2158 * @edid: EDID block to scan
2145 * 2159 *
2146 * Each EDID block contains a bitmap of the supported "established modes" list 2160 * Each EDID block contains a bitmap of the supported "established modes" list
@@ -2191,8 +2205,7 @@ do_standard_modes(struct detailed_timing *timing, void *c)
2191 struct drm_display_mode *newmode; 2205 struct drm_display_mode *newmode;
2192 2206
2193 std = &data->data.timings[i]; 2207 std = &data->data.timings[i];
2194 newmode = drm_mode_std(connector, edid, std, 2208 newmode = drm_mode_std(connector, edid, std);
2195 edid->revision);
2196 if (newmode) { 2209 if (newmode) {
2197 drm_mode_probed_add(connector, newmode); 2210 drm_mode_probed_add(connector, newmode);
2198 closure->modes++; 2211 closure->modes++;
@@ -2203,7 +2216,7 @@ do_standard_modes(struct detailed_timing *timing, void *c)
2203 2216
2204/** 2217/**
2205 * add_standard_modes - get std. modes from EDID and add them 2218 * add_standard_modes - get std. modes from EDID and add them
2206 * @connector: connector of for the EDID block 2219 * @connector: connector to add mode(s) to
2207 * @edid: EDID block to scan 2220 * @edid: EDID block to scan
2208 * 2221 *
2209 * Standard modes can be calculated using the appropriate standard (DMT, 2222 * Standard modes can be calculated using the appropriate standard (DMT,
@@ -2221,8 +2234,7 @@ add_standard_modes(struct drm_connector *connector, struct edid *edid)
2221 struct drm_display_mode *newmode; 2234 struct drm_display_mode *newmode;
2222 2235
2223 newmode = drm_mode_std(connector, edid, 2236 newmode = drm_mode_std(connector, edid,
2224 &edid->standard_timings[i], 2237 &edid->standard_timings[i]);
2225 edid->revision);
2226 if (newmode) { 2238 if (newmode) {
2227 drm_mode_probed_add(connector, newmode); 2239 drm_mode_probed_add(connector, newmode);
2228 modes++; 2240 modes++;
@@ -2425,7 +2437,7 @@ cea_mode_alternate_clock(const struct drm_display_mode *cea_mode)
2425 * drm_match_cea_mode - look for a CEA mode matching given mode 2437 * drm_match_cea_mode - look for a CEA mode matching given mode
2426 * @to_match: display mode 2438 * @to_match: display mode
2427 * 2439 *
2428 * Returns the CEA Video ID (VIC) of the mode or 0 if it isn't a CEA-861 2440 * Return: The CEA Video ID (VIC) of the mode or 0 if it isn't a CEA-861
2429 * mode. 2441 * mode.
2430 */ 2442 */
2431u8 drm_match_cea_mode(const struct drm_display_mode *to_match) 2443u8 drm_match_cea_mode(const struct drm_display_mode *to_match)
@@ -2452,6 +2464,22 @@ u8 drm_match_cea_mode(const struct drm_display_mode *to_match)
2452} 2464}
2453EXPORT_SYMBOL(drm_match_cea_mode); 2465EXPORT_SYMBOL(drm_match_cea_mode);
2454 2466
2467/**
2468 * drm_get_cea_aspect_ratio - get the picture aspect ratio corresponding to
2469 * the input VIC from the CEA mode list
2470 * @video_code: ID given to each of the CEA modes
2471 *
2472 * Returns picture aspect ratio
2473 */
2474enum hdmi_picture_aspect drm_get_cea_aspect_ratio(const u8 video_code)
2475{
2476 /* return picture aspect ratio for video_code - 1 to access the
2477 * right array element
2478 */
2479 return edid_cea_modes[video_code-1].picture_aspect_ratio;
2480}
2481EXPORT_SYMBOL(drm_get_cea_aspect_ratio);
2482
2455/* 2483/*
2456 * Calculate the alternate clock for HDMI modes (those from the HDMI vendor 2484 * Calculate the alternate clock for HDMI modes (those from the HDMI vendor
2457 * specific block). 2485 * specific block).
@@ -3023,11 +3051,9 @@ monitor_name(struct detailed_timing *t, void *data)
3023 * @connector: connector corresponding to the HDMI/DP sink 3051 * @connector: connector corresponding to the HDMI/DP sink
3024 * @edid: EDID to parse 3052 * @edid: EDID to parse
3025 * 3053 *
3026 * Fill the ELD (EDID-Like Data) buffer for passing to the audio driver. 3054 * Fill the ELD (EDID-Like Data) buffer for passing to the audio driver. The
3027 * Some ELD fields are left to the graphics driver caller: 3055 * Conn_Type, HDCP and Port_ID ELD fields are left for the graphics driver to
3028 * - Conn_Type 3056 * fill in.
3029 * - HDCP
3030 * - Port_ID
3031 */ 3057 */
3032void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid) 3058void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid)
3033{ 3059{
@@ -3111,9 +3137,10 @@ EXPORT_SYMBOL(drm_edid_to_eld);
3111 * @sads: pointer that will be set to the extracted SADs 3137 * @sads: pointer that will be set to the extracted SADs
3112 * 3138 *
3113 * Looks for CEA EDID block and extracts SADs (Short Audio Descriptors) from it. 3139 * Looks for CEA EDID block and extracts SADs (Short Audio Descriptors) from it.
3114 * Note: returned pointer needs to be kfreed
3115 * 3140 *
3116 * Return number of found SADs or negative number on error. 3141 * Note: The returned pointer needs to be freed using kfree().
3142 *
3143 * Return: The number of found SADs or negative number on error.
3117 */ 3144 */
3118int drm_edid_to_sad(struct edid *edid, struct cea_sad **sads) 3145int drm_edid_to_sad(struct edid *edid, struct cea_sad **sads)
3119{ 3146{
@@ -3170,9 +3197,11 @@ EXPORT_SYMBOL(drm_edid_to_sad);
3170 * @sadb: pointer to the speaker block 3197 * @sadb: pointer to the speaker block
3171 * 3198 *
3172 * Looks for CEA EDID block and extracts the Speaker Allocation Data Block from it. 3199 * Looks for CEA EDID block and extracts the Speaker Allocation Data Block from it.
3173 * Note: returned pointer needs to be kfreed
3174 * 3200 *
3175 * Return number of found Speaker Allocation Blocks or negative number on error. 3201 * Note: The returned pointer needs to be freed using kfree().
3202 *
3203 * Return: The number of found Speaker Allocation Blocks or negative number on
3204 * error.
3176 */ 3205 */
3177int drm_edid_to_speaker_allocation(struct edid *edid, u8 **sadb) 3206int drm_edid_to_speaker_allocation(struct edid *edid, u8 **sadb)
3178{ 3207{
@@ -3204,10 +3233,9 @@ int drm_edid_to_speaker_allocation(struct edid *edid, u8 **sadb)
3204 3233
3205 /* Speaker Allocation Data Block */ 3234 /* Speaker Allocation Data Block */
3206 if (dbl == 3) { 3235 if (dbl == 3) {
3207 *sadb = kmalloc(dbl, GFP_KERNEL); 3236 *sadb = kmemdup(&db[1], dbl, GFP_KERNEL);
3208 if (!*sadb) 3237 if (!*sadb)
3209 return -ENOMEM; 3238 return -ENOMEM;
3210 memcpy(*sadb, &db[1], dbl);
3211 count = dbl; 3239 count = dbl;
3212 break; 3240 break;
3213 } 3241 }
@@ -3219,9 +3247,12 @@ int drm_edid_to_speaker_allocation(struct edid *edid, u8 **sadb)
3219EXPORT_SYMBOL(drm_edid_to_speaker_allocation); 3247EXPORT_SYMBOL(drm_edid_to_speaker_allocation);
3220 3248
3221/** 3249/**
3222 * drm_av_sync_delay - HDMI/DP sink audio-video sync delay in millisecond 3250 * drm_av_sync_delay - compute the HDMI/DP sink audio-video sync delay
3223 * @connector: connector associated with the HDMI/DP sink 3251 * @connector: connector associated with the HDMI/DP sink
3224 * @mode: the display mode 3252 * @mode: the display mode
3253 *
3254 * Return: The HDMI/DP sink's audio-video sync delay in milliseconds or 0 if
3255 * the sink doesn't support audio or video.
3225 */ 3256 */
3226int drm_av_sync_delay(struct drm_connector *connector, 3257int drm_av_sync_delay(struct drm_connector *connector,
3227 struct drm_display_mode *mode) 3258 struct drm_display_mode *mode)
@@ -3263,6 +3294,9 @@ EXPORT_SYMBOL(drm_av_sync_delay);
3263 * 3294 *
3264 * It's possible for one encoder to be associated with multiple HDMI/DP sinks. 3295 * It's possible for one encoder to be associated with multiple HDMI/DP sinks.
3265 * The policy is now hard coded to simply use the first HDMI/DP sink's ELD. 3296 * The policy is now hard coded to simply use the first HDMI/DP sink's ELD.
3297 *
3298 * Return: The connector associated with the first HDMI/DP sink that has ELD
3299 * attached to it.
3266 */ 3300 */
3267struct drm_connector *drm_select_eld(struct drm_encoder *encoder, 3301struct drm_connector *drm_select_eld(struct drm_encoder *encoder,
3268 struct drm_display_mode *mode) 3302 struct drm_display_mode *mode)
@@ -3270,6 +3304,8 @@ struct drm_connector *drm_select_eld(struct drm_encoder *encoder,
3270 struct drm_connector *connector; 3304 struct drm_connector *connector;
3271 struct drm_device *dev = encoder->dev; 3305 struct drm_device *dev = encoder->dev;
3272 3306
3307 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
3308
3273 list_for_each_entry(connector, &dev->mode_config.connector_list, head) 3309 list_for_each_entry(connector, &dev->mode_config.connector_list, head)
3274 if (connector->encoder == encoder && connector->eld[0]) 3310 if (connector->encoder == encoder && connector->eld[0])
3275 return connector; 3311 return connector;
@@ -3279,11 +3315,12 @@ struct drm_connector *drm_select_eld(struct drm_encoder *encoder,
3279EXPORT_SYMBOL(drm_select_eld); 3315EXPORT_SYMBOL(drm_select_eld);
3280 3316
3281/** 3317/**
3282 * drm_detect_hdmi_monitor - detect whether monitor is hdmi. 3318 * drm_detect_hdmi_monitor - detect whether monitor is HDMI
3283 * @edid: monitor EDID information 3319 * @edid: monitor EDID information
3284 * 3320 *
3285 * Parse the CEA extension according to CEA-861-B. 3321 * Parse the CEA extension according to CEA-861-B.
3286 * Return true if HDMI, false if not or unknown. 3322 *
3323 * Return: True if the monitor is HDMI, false if not or unknown.
3287 */ 3324 */
3288bool drm_detect_hdmi_monitor(struct edid *edid) 3325bool drm_detect_hdmi_monitor(struct edid *edid)
3289{ 3326{
@@ -3321,6 +3358,7 @@ EXPORT_SYMBOL(drm_detect_hdmi_monitor);
3321 * audio format, assume at least 'basic audio' support, even if 'basic 3358 * audio format, assume at least 'basic audio' support, even if 'basic
3322 * audio' is not defined in EDID. 3359 * audio' is not defined in EDID.
3323 * 3360 *
3361 * Return: True if the monitor supports audio, false otherwise.
3324 */ 3362 */
3325bool drm_detect_monitor_audio(struct edid *edid) 3363bool drm_detect_monitor_audio(struct edid *edid)
3326{ 3364{
@@ -3364,6 +3402,8 @@ EXPORT_SYMBOL(drm_detect_monitor_audio);
3364 * Check whether the monitor reports the RGB quantization range selection 3402 * Check whether the monitor reports the RGB quantization range selection
3365 * as supported. The AVI infoframe can then be used to inform the monitor 3403 * as supported. The AVI infoframe can then be used to inform the monitor
3366 * which quantization range (full or limited) is used. 3404 * which quantization range (full or limited) is used.
3405 *
3406 * Return: True if the RGB quantization range is selectable, false otherwise.
3367 */ 3407 */
3368bool drm_rgb_quant_range_selectable(struct edid *edid) 3408bool drm_rgb_quant_range_selectable(struct edid *edid)
3369{ 3409{
@@ -3390,16 +3430,119 @@ bool drm_rgb_quant_range_selectable(struct edid *edid)
3390EXPORT_SYMBOL(drm_rgb_quant_range_selectable); 3430EXPORT_SYMBOL(drm_rgb_quant_range_selectable);
3391 3431
3392/** 3432/**
3433 * drm_assign_hdmi_deep_color_info - detect whether monitor supports
3434 * hdmi deep color modes and update drm_display_info if so.
3435 *
3436 * @edid: monitor EDID information
3437 * @info: Updated with maximum supported deep color bpc and color format
3438 * if deep color supported.
3439 *
3440 * Parse the CEA extension according to CEA-861-B.
3441 * Return true if HDMI deep color supported, false if not or unknown.
3442 */
3443static bool drm_assign_hdmi_deep_color_info(struct edid *edid,
3444 struct drm_display_info *info,
3445 struct drm_connector *connector)
3446{
3447 u8 *edid_ext, *hdmi;
3448 int i;
3449 int start_offset, end_offset;
3450 unsigned int dc_bpc = 0;
3451
3452 edid_ext = drm_find_cea_extension(edid);
3453 if (!edid_ext)
3454 return false;
3455
3456 if (cea_db_offsets(edid_ext, &start_offset, &end_offset))
3457 return false;
3458
3459 /*
3460 * Because HDMI identifier is in Vendor Specific Block,
3461 * search it from all data blocks of CEA extension.
3462 */
3463 for_each_cea_db(edid_ext, i, start_offset, end_offset) {
3464 if (cea_db_is_hdmi_vsdb(&edid_ext[i])) {
3465 /* HDMI supports at least 8 bpc */
3466 info->bpc = 8;
3467
3468 hdmi = &edid_ext[i];
3469 if (cea_db_payload_len(hdmi) < 6)
3470 return false;
3471
3472 if (hdmi[6] & DRM_EDID_HDMI_DC_30) {
3473 dc_bpc = 10;
3474 info->edid_hdmi_dc_modes |= DRM_EDID_HDMI_DC_30;
3475 DRM_DEBUG("%s: HDMI sink does deep color 30.\n",
3476 connector->name);
3477 }
3478
3479 if (hdmi[6] & DRM_EDID_HDMI_DC_36) {
3480 dc_bpc = 12;
3481 info->edid_hdmi_dc_modes |= DRM_EDID_HDMI_DC_36;
3482 DRM_DEBUG("%s: HDMI sink does deep color 36.\n",
3483 connector->name);
3484 }
3485
3486 if (hdmi[6] & DRM_EDID_HDMI_DC_48) {
3487 dc_bpc = 16;
3488 info->edid_hdmi_dc_modes |= DRM_EDID_HDMI_DC_48;
3489 DRM_DEBUG("%s: HDMI sink does deep color 48.\n",
3490 connector->name);
3491 }
3492
3493 if (dc_bpc > 0) {
3494 DRM_DEBUG("%s: Assigning HDMI sink color depth as %d bpc.\n",
3495 connector->name, dc_bpc);
3496 info->bpc = dc_bpc;
3497
3498 /*
3499 * Deep color support mandates RGB444 support for all video
3500 * modes and forbids YCRCB422 support for all video modes per
3501 * HDMI 1.3 spec.
3502 */
3503 info->color_formats = DRM_COLOR_FORMAT_RGB444;
3504
3505 /* YCRCB444 is optional according to spec. */
3506 if (hdmi[6] & DRM_EDID_HDMI_DC_Y444) {
3507 info->color_formats |= DRM_COLOR_FORMAT_YCRCB444;
3508 DRM_DEBUG("%s: HDMI sink does YCRCB444 in deep color.\n",
3509 connector->name);
3510 }
3511
3512 /*
3513 * Spec says that if any deep color mode is supported at all,
3514 * then deep color 36 bit must be supported.
3515 */
3516 if (!(hdmi[6] & DRM_EDID_HDMI_DC_36)) {
3517 DRM_DEBUG("%s: HDMI sink should do DC_36, but does not!\n",
3518 connector->name);
3519 }
3520
3521 return true;
3522 }
3523 else {
3524 DRM_DEBUG("%s: No deep color support on this HDMI sink.\n",
3525 connector->name);
3526 }
3527 }
3528 }
3529
3530 return false;
3531}
3532
3533/**
3393 * drm_add_display_info - pull display info out if present 3534 * drm_add_display_info - pull display info out if present
3394 * @edid: EDID data 3535 * @edid: EDID data
3395 * @info: display info (attached to connector) 3536 * @info: display info (attached to connector)
3537 * @connector: connector whose edid is used to build display info
3396 * 3538 *
3397 * Grab any available display info and stuff it into the drm_display_info 3539 * Grab any available display info and stuff it into the drm_display_info
3398 * structure that's part of the connector. Useful for tracking bpp and 3540 * structure that's part of the connector. Useful for tracking bpp and
3399 * color spaces. 3541 * color spaces.
3400 */ 3542 */
3401static void drm_add_display_info(struct edid *edid, 3543static void drm_add_display_info(struct edid *edid,
3402 struct drm_display_info *info) 3544 struct drm_display_info *info,
3545 struct drm_connector *connector)
3403{ 3546{
3404 u8 *edid_ext; 3547 u8 *edid_ext;
3405 3548
@@ -3429,6 +3572,9 @@ static void drm_add_display_info(struct edid *edid,
3429 info->color_formats |= DRM_COLOR_FORMAT_YCRCB422; 3572 info->color_formats |= DRM_COLOR_FORMAT_YCRCB422;
3430 } 3573 }
3431 3574
3575 /* HDMI deep color modes supported? Assign to info, if so */
3576 drm_assign_hdmi_deep_color_info(edid, info, connector);
3577
3432 /* Only defined for 1.4 with digital displays */ 3578 /* Only defined for 1.4 with digital displays */
3433 if (edid->revision < 4) 3579 if (edid->revision < 4)
3434 return; 3580 return;
@@ -3458,6 +3604,9 @@ static void drm_add_display_info(struct edid *edid,
3458 break; 3604 break;
3459 } 3605 }
3460 3606
3607 DRM_DEBUG("%s: Assigning EDID-1.4 digital sink color depth as %d bpc.\n",
3608 connector->name, info->bpc);
3609
3461 info->color_formats |= DRM_COLOR_FORMAT_RGB444; 3610 info->color_formats |= DRM_COLOR_FORMAT_RGB444;
3462 if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB444) 3611 if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB444)
3463 info->color_formats |= DRM_COLOR_FORMAT_YCRCB444; 3612 info->color_formats |= DRM_COLOR_FORMAT_YCRCB444;
@@ -3468,11 +3617,11 @@ static void drm_add_display_info(struct edid *edid,
3468/** 3617/**
3469 * drm_add_edid_modes - add modes from EDID data, if available 3618 * drm_add_edid_modes - add modes from EDID data, if available
3470 * @connector: connector we're probing 3619 * @connector: connector we're probing
3471 * @edid: edid data 3620 * @edid: EDID data
3472 * 3621 *
3473 * Add the specified modes to the connector's mode list. 3622 * Add the specified modes to the connector's mode list.
3474 * 3623 *
3475 * Return number of modes added or 0 if we couldn't find any. 3624 * Return: The number of modes added or 0 if we couldn't find any.
3476 */ 3625 */
3477int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid) 3626int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
3478{ 3627{
@@ -3484,7 +3633,7 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
3484 } 3633 }
3485 if (!drm_edid_is_valid(edid)) { 3634 if (!drm_edid_is_valid(edid)) {
3486 dev_warn(connector->dev->dev, "%s: EDID invalid.\n", 3635 dev_warn(connector->dev->dev, "%s: EDID invalid.\n",
3487 drm_get_connector_name(connector)); 3636 connector->name);
3488 return 0; 3637 return 0;
3489 } 3638 }
3490 3639
@@ -3516,11 +3665,14 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
3516 if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75)) 3665 if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75))
3517 edid_fixup_preferred(connector, quirks); 3666 edid_fixup_preferred(connector, quirks);
3518 3667
3519 drm_add_display_info(edid, &connector->display_info); 3668 drm_add_display_info(edid, &connector->display_info, connector);
3520 3669
3521 if (quirks & EDID_QUIRK_FORCE_8BPC) 3670 if (quirks & EDID_QUIRK_FORCE_8BPC)
3522 connector->display_info.bpc = 8; 3671 connector->display_info.bpc = 8;
3523 3672
3673 if (quirks & EDID_QUIRK_FORCE_12BPC)
3674 connector->display_info.bpc = 12;
3675
3524 return num_modes; 3676 return num_modes;
3525} 3677}
3526EXPORT_SYMBOL(drm_add_edid_modes); 3678EXPORT_SYMBOL(drm_add_edid_modes);
@@ -3534,7 +3686,7 @@ EXPORT_SYMBOL(drm_add_edid_modes);
3534 * Add the specified modes to the connector's mode list. Only when the 3686 * Add the specified modes to the connector's mode list. Only when the
3535 * hdisplay/vdisplay is not beyond the given limit, it will be added. 3687 * hdisplay/vdisplay is not beyond the given limit, it will be added.
3536 * 3688 *
3537 * Return number of modes added or 0 if we couldn't find any. 3689 * Return: The number of modes added or 0 if we couldn't find any.
3538 */ 3690 */
3539int drm_add_modes_noedid(struct drm_connector *connector, 3691int drm_add_modes_noedid(struct drm_connector *connector,
3540 int hdisplay, int vdisplay) 3692 int hdisplay, int vdisplay)
@@ -3573,13 +3725,22 @@ int drm_add_modes_noedid(struct drm_connector *connector,
3573} 3725}
3574EXPORT_SYMBOL(drm_add_modes_noedid); 3726EXPORT_SYMBOL(drm_add_modes_noedid);
3575 3727
3728/**
3729 * drm_set_preferred_mode - Sets the preferred mode of a connector
3730 * @connector: connector whose mode list should be processed
3731 * @hpref: horizontal resolution of preferred mode
3732 * @vpref: vertical resolution of preferred mode
3733 *
3734 * Marks a mode as preferred if it matches the resolution specified by @hpref
3735 * and @vpref.
3736 */
3576void drm_set_preferred_mode(struct drm_connector *connector, 3737void drm_set_preferred_mode(struct drm_connector *connector,
3577 int hpref, int vpref) 3738 int hpref, int vpref)
3578{ 3739{
3579 struct drm_display_mode *mode; 3740 struct drm_display_mode *mode;
3580 3741
3581 list_for_each_entry(mode, &connector->probed_modes, head) { 3742 list_for_each_entry(mode, &connector->probed_modes, head) {
3582 if (mode->hdisplay == hpref && 3743 if (mode->hdisplay == hpref &&
3583 mode->vdisplay == vpref) 3744 mode->vdisplay == vpref)
3584 mode->type |= DRM_MODE_TYPE_PREFERRED; 3745 mode->type |= DRM_MODE_TYPE_PREFERRED;
3585 } 3746 }
@@ -3592,7 +3753,7 @@ EXPORT_SYMBOL(drm_set_preferred_mode);
3592 * @frame: HDMI AVI infoframe 3753 * @frame: HDMI AVI infoframe
3593 * @mode: DRM display mode 3754 * @mode: DRM display mode
3594 * 3755 *
3595 * Returns 0 on success or a negative error code on failure. 3756 * Return: 0 on success or a negative error code on failure.
3596 */ 3757 */
3597int 3758int
3598drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame, 3759drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
@@ -3613,6 +3774,12 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
3613 frame->video_code = drm_match_cea_mode(mode); 3774 frame->video_code = drm_match_cea_mode(mode);
3614 3775
3615 frame->picture_aspect = HDMI_PICTURE_ASPECT_NONE; 3776 frame->picture_aspect = HDMI_PICTURE_ASPECT_NONE;
3777
3778 /* Populate picture aspect ratio from CEA mode list */
3779 if (frame->video_code > 0)
3780 frame->picture_aspect = drm_get_cea_aspect_ratio(
3781 frame->video_code);
3782
3616 frame->active_aspect = HDMI_ACTIVE_ASPECT_PICTURE; 3783 frame->active_aspect = HDMI_ACTIVE_ASPECT_PICTURE;
3617 frame->scan_mode = HDMI_SCAN_MODE_UNDERSCAN; 3784 frame->scan_mode = HDMI_SCAN_MODE_UNDERSCAN;
3618 3785
@@ -3657,7 +3824,7 @@ s3d_structure_from_display_mode(const struct drm_display_mode *mode)
3657 * 4k or stereoscopic 3D mode. So when giving any other mode as input this 3824 * 4k or stereoscopic 3D mode. So when giving any other mode as input this
3658 * function will return -EINVAL, error that can be safely ignored. 3825 * function will return -EINVAL, error that can be safely ignored.
3659 * 3826 *
3660 * Returns 0 on success or a negative error code on failure. 3827 * Return: 0 on success or a negative error code on failure.
3661 */ 3828 */
3662int 3829int
3663drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame, 3830drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame,
diff --git a/drivers/gpu/drm/drm_edid_load.c b/drivers/gpu/drm/drm_edid_load.c
index 1b4c7a5442c5..0a235fe61c9b 100644
--- a/drivers/gpu/drm/drm_edid_load.c
+++ b/drivers/gpu/drm/drm_edid_load.c
@@ -31,8 +31,9 @@ module_param_string(edid_firmware, edid_firmware, sizeof(edid_firmware), 0644);
31MODULE_PARM_DESC(edid_firmware, "Do not probe monitor, use specified EDID blob " 31MODULE_PARM_DESC(edid_firmware, "Do not probe monitor, use specified EDID blob "
32 "from built-in data or /lib/firmware instead. "); 32 "from built-in data or /lib/firmware instead. ");
33 33
34#define GENERIC_EDIDS 5 34#define GENERIC_EDIDS 6
35static const char *generic_edid_name[GENERIC_EDIDS] = { 35static const char *generic_edid_name[GENERIC_EDIDS] = {
36 "edid/800x600.bin",
36 "edid/1024x768.bin", 37 "edid/1024x768.bin",
37 "edid/1280x1024.bin", 38 "edid/1280x1024.bin",
38 "edid/1600x1200.bin", 39 "edid/1600x1200.bin",
@@ -44,6 +45,24 @@ static const u8 generic_edid[GENERIC_EDIDS][128] = {
44 { 45 {
45 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 46 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
46 0x31, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 47 0x31, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
48 0x05, 0x16, 0x01, 0x03, 0x6d, 0x1b, 0x14, 0x78,
49 0xea, 0x5e, 0xc0, 0xa4, 0x59, 0x4a, 0x98, 0x25,
50 0x20, 0x50, 0x54, 0x01, 0x00, 0x00, 0x45, 0x40,
51 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
52 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0xa0, 0x0f,
53 0x20, 0x00, 0x31, 0x58, 0x1c, 0x20, 0x28, 0x80,
54 0x14, 0x00, 0x15, 0xd0, 0x10, 0x00, 0x00, 0x1e,
55 0x00, 0x00, 0x00, 0xff, 0x00, 0x4c, 0x69, 0x6e,
56 0x75, 0x78, 0x20, 0x23, 0x30, 0x0a, 0x20, 0x20,
57 0x20, 0x20, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x3b,
58 0x3d, 0x24, 0x26, 0x05, 0x00, 0x0a, 0x20, 0x20,
59 0x20, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xfc,
60 0x00, 0x4c, 0x69, 0x6e, 0x75, 0x78, 0x20, 0x53,
61 0x56, 0x47, 0x41, 0x0a, 0x20, 0x20, 0x00, 0xc2,
62 },
63 {
64 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
65 0x31, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
47 0x05, 0x16, 0x01, 0x03, 0x6d, 0x23, 0x1a, 0x78, 66 0x05, 0x16, 0x01, 0x03, 0x6d, 0x23, 0x1a, 0x78,
48 0xea, 0x5e, 0xc0, 0xa4, 0x59, 0x4a, 0x98, 0x25, 67 0xea, 0x5e, 0xc0, 0xa4, 0x59, 0x4a, 0x98, 0x25,
49 0x20, 0x50, 0x54, 0x00, 0x08, 0x00, 0x61, 0x40, 68 0x20, 0x50, 0x54, 0x00, 0x08, 0x00, 0x61, 0x40,
@@ -242,7 +261,7 @@ out:
242 261
243int drm_load_edid_firmware(struct drm_connector *connector) 262int drm_load_edid_firmware(struct drm_connector *connector)
244{ 263{
245 const char *connector_name = drm_get_connector_name(connector); 264 const char *connector_name = connector->name;
246 char *edidname = edid_firmware, *last, *colon; 265 char *edidname = edid_firmware, *last, *colon;
247 int ret; 266 int ret;
248 struct edid *edid; 267 struct edid *edid;
diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c
index 61b5a47ad239..f27c883be391 100644
--- a/drivers/gpu/drm/drm_fb_cma_helper.c
+++ b/drivers/gpu/drm/drm_fb_cma_helper.c
@@ -429,13 +429,8 @@ EXPORT_SYMBOL_GPL(drm_fbdev_cma_fini);
429 */ 429 */
430void drm_fbdev_cma_restore_mode(struct drm_fbdev_cma *fbdev_cma) 430void drm_fbdev_cma_restore_mode(struct drm_fbdev_cma *fbdev_cma)
431{ 431{
432 if (fbdev_cma) { 432 if (fbdev_cma)
433 struct drm_device *dev = fbdev_cma->fb_helper.dev; 433 drm_fb_helper_restore_fbdev_mode_unlocked(&fbdev_cma->fb_helper);
434
435 drm_modeset_lock_all(dev);
436 drm_fb_helper_restore_fbdev_mode(&fbdev_cma->fb_helper);
437 drm_modeset_unlock_all(dev);
438 }
439} 434}
440EXPORT_SYMBOL_GPL(drm_fbdev_cma_restore_mode); 435EXPORT_SYMBOL_GPL(drm_fbdev_cma_restore_mode);
441 436
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 04d3fd3658f3..d5d8cea1a679 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -45,13 +45,13 @@ static LIST_HEAD(kernel_fb_helper_list);
45 * DOC: fbdev helpers 45 * DOC: fbdev helpers
46 * 46 *
47 * The fb helper functions are useful to provide an fbdev on top of a drm kernel 47 * The fb helper functions are useful to provide an fbdev on top of a drm kernel
48 * mode setting driver. They can be used mostly independantely from the crtc 48 * mode setting driver. They can be used mostly independently from the crtc
49 * helper functions used by many drivers to implement the kernel mode setting 49 * helper functions used by many drivers to implement the kernel mode setting
50 * interfaces. 50 * interfaces.
51 * 51 *
52 * Initialization is done as a three-step process with drm_fb_helper_init(), 52 * Initialization is done as a three-step process with drm_fb_helper_init(),
53 * drm_fb_helper_single_add_all_connectors() and drm_fb_helper_initial_config(). 53 * drm_fb_helper_single_add_all_connectors() and drm_fb_helper_initial_config().
54 * Drivers with fancier requirements than the default beheviour can override the 54 * Drivers with fancier requirements than the default behaviour can override the
55 * second step with their own code. Teardown is done with drm_fb_helper_fini(). 55 * second step with their own code. Teardown is done with drm_fb_helper_fini().
56 * 56 *
57 * At runtime drivers should restore the fbdev console by calling 57 * At runtime drivers should restore the fbdev console by calling
@@ -59,7 +59,7 @@ static LIST_HEAD(kernel_fb_helper_list);
59 * should also notify the fb helper code from updates to the output 59 * should also notify the fb helper code from updates to the output
60 * configuration by calling drm_fb_helper_hotplug_event(). For easier 60 * configuration by calling drm_fb_helper_hotplug_event(). For easier
61 * integration with the output polling code in drm_crtc_helper.c the modeset 61 * integration with the output polling code in drm_crtc_helper.c the modeset
62 * code proves a ->output_poll_changed callback. 62 * code provides a ->output_poll_changed callback.
63 * 63 *
64 * All other functions exported by the fb helper library can be used to 64 * All other functions exported by the fb helper library can be used to
65 * implement the fbdev driver interface by the driver. 65 * implement the fbdev driver interface by the driver.
@@ -120,7 +120,7 @@ static int drm_fb_helper_parse_command_line(struct drm_fb_helper *fb_helper)
120 mode = &fb_helper_conn->cmdline_mode; 120 mode = &fb_helper_conn->cmdline_mode;
121 121
122 /* do something on return - turn off connector maybe */ 122 /* do something on return - turn off connector maybe */
123 if (fb_get_options(drm_get_connector_name(connector), &option)) 123 if (fb_get_options(connector->name, &option))
124 continue; 124 continue;
125 125
126 if (drm_mode_parse_command_line_for_connector(option, 126 if (drm_mode_parse_command_line_for_connector(option,
@@ -142,12 +142,12 @@ static int drm_fb_helper_parse_command_line(struct drm_fb_helper *fb_helper)
142 } 142 }
143 143
144 DRM_INFO("forcing %s connector %s\n", 144 DRM_INFO("forcing %s connector %s\n",
145 drm_get_connector_name(connector), s); 145 connector->name, s);
146 connector->force = mode->force; 146 connector->force = mode->force;
147 } 147 }
148 148
149 DRM_DEBUG_KMS("cmdline mode for connector %s %dx%d@%dHz%s%s%s\n", 149 DRM_DEBUG_KMS("cmdline mode for connector %s %dx%d@%dHz%s%s%s\n",
150 drm_get_connector_name(connector), 150 connector->name,
151 mode->xres, mode->yres, 151 mode->xres, mode->yres,
152 mode->refresh_specified ? mode->refresh : 60, 152 mode->refresh_specified ? mode->refresh : 60,
153 mode->rb ? " reduced blanking" : "", 153 mode->rb ? " reduced blanking" : "",
@@ -273,15 +273,7 @@ int drm_fb_helper_debug_leave(struct fb_info *info)
273} 273}
274EXPORT_SYMBOL(drm_fb_helper_debug_leave); 274EXPORT_SYMBOL(drm_fb_helper_debug_leave);
275 275
276/** 276static bool restore_fbdev_mode(struct drm_fb_helper *fb_helper)
277 * drm_fb_helper_restore_fbdev_mode - restore fbdev configuration
278 * @fb_helper: fbcon to restore
279 *
280 * This should be called from driver's drm ->lastclose callback
281 * when implementing an fbcon on top of kms using this helper. This ensures that
282 * the user isn't greeted with a black screen when e.g. X dies.
283 */
284bool drm_fb_helper_restore_fbdev_mode(struct drm_fb_helper *fb_helper)
285{ 277{
286 struct drm_device *dev = fb_helper->dev; 278 struct drm_device *dev = fb_helper->dev;
287 struct drm_plane *plane; 279 struct drm_plane *plane;
@@ -311,7 +303,40 @@ bool drm_fb_helper_restore_fbdev_mode(struct drm_fb_helper *fb_helper)
311 } 303 }
312 return error; 304 return error;
313} 305}
314EXPORT_SYMBOL(drm_fb_helper_restore_fbdev_mode); 306/**
307 * drm_fb_helper_restore_fbdev_mode - restore fbdev configuration
308 * @fb_helper: fbcon to restore
309 *
310 * This should be called from driver's drm ->lastclose callback
311 * when implementing an fbcon on top of kms using this helper. This ensures that
312 * the user isn't greeted with a black screen when e.g. X dies.
313 *
314 * Use this variant if you need to bypass locking (panic), or already
315 * hold all modeset locks. Otherwise use drm_fb_helper_restore_fbdev_mode_unlocked()
316 */
317static bool drm_fb_helper_restore_fbdev_mode(struct drm_fb_helper *fb_helper)
318{
319 return restore_fbdev_mode(fb_helper);
320}
321
322/**
323 * drm_fb_helper_restore_fbdev_mode_unlocked - restore fbdev configuration
324 * @fb_helper: fbcon to restore
325 *
326 * This should be called from driver's drm ->lastclose callback
327 * when implementing an fbcon on top of kms using this helper. This ensures that
328 * the user isn't greeted with a black screen when e.g. X dies.
329 */
330bool drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper)
331{
332 struct drm_device *dev = fb_helper->dev;
333 bool ret;
334 drm_modeset_lock_all(dev);
335 ret = restore_fbdev_mode(fb_helper);
336 drm_modeset_unlock_all(dev);
337 return ret;
338}
339EXPORT_SYMBOL(drm_fb_helper_restore_fbdev_mode_unlocked);
315 340
316/* 341/*
317 * restore fbcon display for all kms driver's using this helper, used for sysrq 342 * restore fbcon display for all kms driver's using this helper, used for sysrq
@@ -326,12 +351,25 @@ static bool drm_fb_helper_force_kernel_mode(void)
326 return false; 351 return false;
327 352
328 list_for_each_entry(helper, &kernel_fb_helper_list, kernel_fb_list) { 353 list_for_each_entry(helper, &kernel_fb_helper_list, kernel_fb_list) {
329 if (helper->dev->switch_power_state == DRM_SWITCH_POWER_OFF) 354 struct drm_device *dev = helper->dev;
355
356 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
357 continue;
358
359 /* NOTE: we use lockless flag below to avoid grabbing other
360 * modeset locks. So just trylock the underlying mutex
361 * directly:
362 */
363 if (!mutex_trylock(&dev->mode_config.mutex)) {
364 error = true;
330 continue; 365 continue;
366 }
331 367
332 ret = drm_fb_helper_restore_fbdev_mode(helper); 368 ret = drm_fb_helper_restore_fbdev_mode(helper);
333 if (ret) 369 if (ret)
334 error = true; 370 error = true;
371
372 mutex_unlock(&dev->mode_config.mutex);
335 } 373 }
336 return error; 374 return error;
337} 375}
@@ -811,7 +849,6 @@ EXPORT_SYMBOL(drm_fb_helper_check_var);
811int drm_fb_helper_set_par(struct fb_info *info) 849int drm_fb_helper_set_par(struct fb_info *info)
812{ 850{
813 struct drm_fb_helper *fb_helper = info->par; 851 struct drm_fb_helper *fb_helper = info->par;
814 struct drm_device *dev = fb_helper->dev;
815 struct fb_var_screeninfo *var = &info->var; 852 struct fb_var_screeninfo *var = &info->var;
816 853
817 if (var->pixclock != 0) { 854 if (var->pixclock != 0) {
@@ -819,9 +856,7 @@ int drm_fb_helper_set_par(struct fb_info *info)
819 return -EINVAL; 856 return -EINVAL;
820 } 857 }
821 858
822 drm_modeset_lock_all(dev); 859 drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper);
823 drm_fb_helper_restore_fbdev_mode(fb_helper);
824 drm_modeset_unlock_all(dev);
825 860
826 if (fb_helper->delayed_hotplug) { 861 if (fb_helper->delayed_hotplug) {
827 fb_helper->delayed_hotplug = false; 862 fb_helper->delayed_hotplug = false;
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index e1eba0b7cd45..021fe5d11df5 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -43,8 +43,7 @@
43DEFINE_MUTEX(drm_global_mutex); 43DEFINE_MUTEX(drm_global_mutex);
44EXPORT_SYMBOL(drm_global_mutex); 44EXPORT_SYMBOL(drm_global_mutex);
45 45
46static int drm_open_helper(struct inode *inode, struct file *filp, 46static int drm_open_helper(struct file *filp, struct drm_minor *minor);
47 struct drm_minor *minor);
48 47
49static int drm_setup(struct drm_device * dev) 48static int drm_setup(struct drm_device * dev)
50{ 49{
@@ -95,7 +94,7 @@ int drm_open(struct inode *inode, struct file *filp)
95 /* share address_space across all char-devs of a single device */ 94 /* share address_space across all char-devs of a single device */
96 filp->f_mapping = dev->anon_inode->i_mapping; 95 filp->f_mapping = dev->anon_inode->i_mapping;
97 96
98 retcode = drm_open_helper(inode, filp, minor); 97 retcode = drm_open_helper(filp, minor);
99 if (retcode) 98 if (retcode)
100 goto err_undo; 99 goto err_undo;
101 if (need_setup) { 100 if (need_setup) {
@@ -171,7 +170,6 @@ static int drm_cpu_valid(void)
171/** 170/**
172 * Called whenever a process opens /dev/drm. 171 * Called whenever a process opens /dev/drm.
173 * 172 *
174 * \param inode device inode.
175 * \param filp file pointer. 173 * \param filp file pointer.
176 * \param minor acquired minor-object. 174 * \param minor acquired minor-object.
177 * \return zero on success or a negative number on failure. 175 * \return zero on success or a negative number on failure.
@@ -179,8 +177,7 @@ static int drm_cpu_valid(void)
179 * Creates and initializes a drm_file structure for the file private data in \p 177 * Creates and initializes a drm_file structure for the file private data in \p
180 * filp and add it into the double linked list in \p dev. 178 * filp and add it into the double linked list in \p dev.
181 */ 179 */
182static int drm_open_helper(struct inode *inode, struct file *filp, 180static int drm_open_helper(struct file *filp, struct drm_minor *minor)
183 struct drm_minor *minor)
184{ 181{
185 struct drm_device *dev = minor->dev; 182 struct drm_device *dev = minor->dev;
186 struct drm_file *priv; 183 struct drm_file *priv;
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 9909bef59800..f7d71190aad5 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -474,21 +474,10 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask)
474 goto fail; 474 goto fail;
475 pages[i] = p; 475 pages[i] = p;
476 476
477 /* There is a hypothetical issue w/ drivers that require 477 /* Make sure shmem keeps __GFP_DMA32 allocated pages in the
478 * buffer memory in the low 4GB.. if the pages are un- 478 * correct region during swapin. Note that this requires
479 * pinned, and swapped out, they can end up swapped back 479 * __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping)
480 * in above 4GB. If pages are already in memory, then 480 * so shmem can relocate pages during swapin if required.
481 * shmem_read_mapping_page_gfp will ignore the gfpmask,
482 * even if the already in-memory page disobeys the mask.
483 *
484 * It is only a theoretical issue today, because none of
485 * the devices with this limitation can be populated with
486 * enough memory to trigger the issue. But this BUG_ON()
487 * is here as a reminder in case the problem with
488 * shmem_read_mapping_page_gfp() isn't solved by the time
489 * it does become a real issue.
490 *
491 * See this thread: http://lkml.org/lkml/2011/7/11/238
492 */ 481 */
493 BUG_ON((gfpmask & __GFP_DMA32) && 482 BUG_ON((gfpmask & __GFP_DMA32) &&
494 (page_to_pfn(p) >= 0x00100000UL)); 483 (page_to_pfn(p) >= 0x00100000UL));
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
index 7473035dd28b..86feedd5e6f6 100644
--- a/drivers/gpu/drm/drm_info.c
+++ b/drivers/gpu/drm/drm_info.c
@@ -47,18 +47,16 @@ int drm_name_info(struct seq_file *m, void *data)
47 struct drm_minor *minor = node->minor; 47 struct drm_minor *minor = node->minor;
48 struct drm_device *dev = minor->dev; 48 struct drm_device *dev = minor->dev;
49 struct drm_master *master = minor->master; 49 struct drm_master *master = minor->master;
50 const char *bus_name;
51 if (!master) 50 if (!master)
52 return 0; 51 return 0;
53 52
54 bus_name = dev->driver->bus->get_name(dev);
55 if (master->unique) { 53 if (master->unique) {
56 seq_printf(m, "%s %s %s\n", 54 seq_printf(m, "%s %s %s\n",
57 bus_name, 55 dev->driver->name,
58 dev_name(dev->dev), master->unique); 56 dev_name(dev->dev), master->unique);
59 } else { 57 } else {
60 seq_printf(m, "%s %s\n", 58 seq_printf(m, "%s %s\n",
61 bus_name, dev_name(dev->dev)); 59 dev->driver->name, dev_name(dev->dev));
62 } 60 }
63 return 0; 61 return 0;
64} 62}
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 93a42040bedb..69c61f392e66 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -72,9 +72,6 @@ static void
72drm_unset_busid(struct drm_device *dev, 72drm_unset_busid(struct drm_device *dev,
73 struct drm_master *master) 73 struct drm_master *master)
74{ 74{
75 kfree(dev->devname);
76 dev->devname = NULL;
77
78 kfree(master->unique); 75 kfree(master->unique);
79 master->unique = NULL; 76 master->unique = NULL;
80 master->unique_len = 0; 77 master->unique_len = 0;
@@ -93,7 +90,8 @@ drm_unset_busid(struct drm_device *dev,
93 * Copies the bus id from userspace into drm_device::unique, and verifies that 90 * Copies the bus id from userspace into drm_device::unique, and verifies that
94 * it matches the device this DRM is attached to (EINVAL otherwise). Deprecated 91 * it matches the device this DRM is attached to (EINVAL otherwise). Deprecated
95 * in interface version 1.1 and will return EBUSY when setversion has requested 92 * in interface version 1.1 and will return EBUSY when setversion has requested
96 * version 1.1 or greater. 93 * version 1.1 or greater. Also note that KMS is all version 1.1 and later and
94 * UMS was only ever supported on pci devices.
97 */ 95 */
98int drm_setunique(struct drm_device *dev, void *data, 96int drm_setunique(struct drm_device *dev, void *data,
99 struct drm_file *file_priv) 97 struct drm_file *file_priv)
@@ -108,10 +106,13 @@ int drm_setunique(struct drm_device *dev, void *data,
108 if (!u->unique_len || u->unique_len > 1024) 106 if (!u->unique_len || u->unique_len > 1024)
109 return -EINVAL; 107 return -EINVAL;
110 108
111 if (!dev->driver->bus->set_unique) 109 if (drm_core_check_feature(dev, DRIVER_MODESET))
110 return 0;
111
112 if (WARN_ON(!dev->pdev))
112 return -EINVAL; 113 return -EINVAL;
113 114
114 ret = dev->driver->bus->set_unique(dev, master, u); 115 ret = drm_pci_set_unique(dev, master, u);
115 if (ret) 116 if (ret)
116 goto err; 117 goto err;
117 118
@@ -130,13 +131,25 @@ static int drm_set_busid(struct drm_device *dev, struct drm_file *file_priv)
130 if (master->unique != NULL) 131 if (master->unique != NULL)
131 drm_unset_busid(dev, master); 132 drm_unset_busid(dev, master);
132 133
133 ret = dev->driver->bus->set_busid(dev, master); 134 if (dev->driver->bus && dev->driver->bus->set_busid) {
134 if (ret) 135 ret = dev->driver->bus->set_busid(dev, master);
135 goto err; 136 if (ret) {
137 drm_unset_busid(dev, master);
138 return ret;
139 }
140 } else {
141 if (WARN(dev->unique == NULL,
142 "No drm_bus.set_busid() implementation provided by "
143 "%ps. Use drm_dev_set_unique() to set the unique "
144 "name explicitly.", dev->driver))
145 return -EINVAL;
146
147 master->unique = kstrdup(dev->unique, GFP_KERNEL);
148 if (master->unique)
149 master->unique_len = strlen(dev->unique);
150 }
151
136 return 0; 152 return 0;
137err:
138 drm_unset_busid(dev, master);
139 return ret;
140} 153}
141 154
142/** 155/**
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index ec5c3f4cdd01..0de123afdb34 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -1,6 +1,5 @@
1/** 1/*
2 * \file drm_irq.c 2 * drm_irq.c IRQ and vblank support
3 * IRQ support
4 * 3 *
5 * \author Rickard E. (Rik) Faith <faith@valinux.com> 4 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com> 5 * \author Gareth Hughes <gareth@valinux.com>
@@ -56,33 +55,6 @@
56 */ 55 */
57#define DRM_REDUNDANT_VBLIRQ_THRESH_NS 1000000 56#define DRM_REDUNDANT_VBLIRQ_THRESH_NS 1000000
58 57
59/**
60 * Get interrupt from bus id.
61 *
62 * \param inode device inode.
63 * \param file_priv DRM file private.
64 * \param cmd command.
65 * \param arg user argument, pointing to a drm_irq_busid structure.
66 * \return zero on success or a negative number on failure.
67 *
68 * Finds the PCI device with the specified bus id and gets its IRQ number.
69 * This IOCTL is deprecated, and will now return EINVAL for any busid not equal
70 * to that of the device that this DRM instance attached to.
71 */
72int drm_irq_by_busid(struct drm_device *dev, void *data,
73 struct drm_file *file_priv)
74{
75 struct drm_irq_busid *p = data;
76
77 if (!dev->driver->bus->irq_by_busid)
78 return -EINVAL;
79
80 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
81 return -EINVAL;
82
83 return dev->driver->bus->irq_by_busid(dev, p);
84}
85
86/* 58/*
87 * Clear vblank timestamp buffer for a crtc. 59 * Clear vblank timestamp buffer for a crtc.
88 */ 60 */
@@ -167,33 +139,40 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
167 139
168static void vblank_disable_fn(unsigned long arg) 140static void vblank_disable_fn(unsigned long arg)
169{ 141{
170 struct drm_device *dev = (struct drm_device *)arg; 142 struct drm_vblank_crtc *vblank = (void *)arg;
143 struct drm_device *dev = vblank->dev;
171 unsigned long irqflags; 144 unsigned long irqflags;
172 int i; 145 int crtc = vblank->crtc;
173 146
174 if (!dev->vblank_disable_allowed) 147 if (!dev->vblank_disable_allowed)
175 return; 148 return;
176 149
177 for (i = 0; i < dev->num_crtcs; i++) { 150 spin_lock_irqsave(&dev->vbl_lock, irqflags);
178 spin_lock_irqsave(&dev->vbl_lock, irqflags); 151 if (atomic_read(&vblank->refcount) == 0 && vblank->enabled) {
179 if (atomic_read(&dev->vblank[i].refcount) == 0 && 152 DRM_DEBUG("disabling vblank on crtc %d\n", crtc);
180 dev->vblank[i].enabled) { 153 vblank_disable_and_save(dev, crtc);
181 DRM_DEBUG("disabling vblank on crtc %d\n", i);
182 vblank_disable_and_save(dev, i);
183 }
184 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
185 } 154 }
155 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
186} 156}
187 157
158/**
159 * drm_vblank_cleanup - cleanup vblank support
160 * @dev: DRM device
161 *
162 * This function cleans up any resources allocated in drm_vblank_init.
163 */
188void drm_vblank_cleanup(struct drm_device *dev) 164void drm_vblank_cleanup(struct drm_device *dev)
189{ 165{
166 int crtc;
167
190 /* Bail if the driver didn't call drm_vblank_init() */ 168 /* Bail if the driver didn't call drm_vblank_init() */
191 if (dev->num_crtcs == 0) 169 if (dev->num_crtcs == 0)
192 return; 170 return;
193 171
194 del_timer_sync(&dev->vblank_disable_timer); 172 for (crtc = 0; crtc < dev->num_crtcs; crtc++) {
195 173 del_timer_sync(&dev->vblank[crtc].disable_timer);
196 vblank_disable_fn((unsigned long)dev); 174 vblank_disable_fn((unsigned long)&dev->vblank[crtc]);
175 }
197 176
198 kfree(dev->vblank); 177 kfree(dev->vblank);
199 178
@@ -201,12 +180,20 @@ void drm_vblank_cleanup(struct drm_device *dev)
201} 180}
202EXPORT_SYMBOL(drm_vblank_cleanup); 181EXPORT_SYMBOL(drm_vblank_cleanup);
203 182
183/**
184 * drm_vblank_init - initialize vblank support
185 * @dev: drm_device
186 * @num_crtcs: number of crtcs supported by @dev
187 *
188 * This function initializes vblank support for @num_crtcs display pipelines.
189 *
190 * Returns:
191 * Zero on success or a negative error code on failure.
192 */
204int drm_vblank_init(struct drm_device *dev, int num_crtcs) 193int drm_vblank_init(struct drm_device *dev, int num_crtcs)
205{ 194{
206 int i, ret = -ENOMEM; 195 int i, ret = -ENOMEM;
207 196
208 setup_timer(&dev->vblank_disable_timer, vblank_disable_fn,
209 (unsigned long)dev);
210 spin_lock_init(&dev->vbl_lock); 197 spin_lock_init(&dev->vbl_lock);
211 spin_lock_init(&dev->vblank_time_lock); 198 spin_lock_init(&dev->vblank_time_lock);
212 199
@@ -216,8 +203,13 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs)
216 if (!dev->vblank) 203 if (!dev->vblank)
217 goto err; 204 goto err;
218 205
219 for (i = 0; i < num_crtcs; i++) 206 for (i = 0; i < num_crtcs; i++) {
207 dev->vblank[i].dev = dev;
208 dev->vblank[i].crtc = i;
220 init_waitqueue_head(&dev->vblank[i].queue); 209 init_waitqueue_head(&dev->vblank[i].queue);
210 setup_timer(&dev->vblank[i].disable_timer, vblank_disable_fn,
211 (unsigned long)&dev->vblank[i]);
212 }
221 213
222 DRM_INFO("Supports vblank timestamp caching Rev 2 (21.10.2013).\n"); 214 DRM_INFO("Supports vblank timestamp caching Rev 2 (21.10.2013).\n");
223 215
@@ -261,42 +253,42 @@ static void drm_irq_vgaarb_nokms(void *cookie, bool state)
261} 253}
262 254
263/** 255/**
264 * Install IRQ handler. 256 * drm_irq_install - install IRQ handler
265 * 257 * @dev: DRM device
266 * \param dev DRM device. 258 * @irq: IRQ number to install the handler for
267 * 259 *
268 * Initializes the IRQ related data. Installs the handler, calling the driver 260 * Initializes the IRQ related data. Installs the handler, calling the driver
269 * \c irq_preinstall() and \c irq_postinstall() functions 261 * irq_preinstall() and irq_postinstall() functions before and after the
270 * before and after the installation. 262 * installation.
263 *
264 * This is the simplified helper interface provided for drivers with no special
265 * needs. Drivers which need to install interrupt handlers for multiple
266 * interrupts must instead set drm_device->irq_enabled to signal the DRM core
267 * that vblank interrupts are available.
268 *
269 * Returns:
270 * Zero on success or a negative error code on failure.
271 */ 271 */
272int drm_irq_install(struct drm_device *dev) 272int drm_irq_install(struct drm_device *dev, int irq)
273{ 273{
274 int ret; 274 int ret;
275 unsigned long sh_flags = 0; 275 unsigned long sh_flags = 0;
276 char *irqname;
277 276
278 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) 277 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
279 return -EINVAL; 278 return -EINVAL;
280 279
281 if (drm_dev_to_irq(dev) == 0) 280 if (irq == 0)
282 return -EINVAL; 281 return -EINVAL;
283 282
284 mutex_lock(&dev->struct_mutex);
285
286 /* Driver must have been initialized */ 283 /* Driver must have been initialized */
287 if (!dev->dev_private) { 284 if (!dev->dev_private)
288 mutex_unlock(&dev->struct_mutex);
289 return -EINVAL; 285 return -EINVAL;
290 }
291 286
292 if (dev->irq_enabled) { 287 if (dev->irq_enabled)
293 mutex_unlock(&dev->struct_mutex);
294 return -EBUSY; 288 return -EBUSY;
295 }
296 dev->irq_enabled = true; 289 dev->irq_enabled = true;
297 mutex_unlock(&dev->struct_mutex);
298 290
299 DRM_DEBUG("irq=%d\n", drm_dev_to_irq(dev)); 291 DRM_DEBUG("irq=%d\n", irq);
300 292
301 /* Before installing handler */ 293 /* Before installing handler */
302 if (dev->driver->irq_preinstall) 294 if (dev->driver->irq_preinstall)
@@ -306,18 +298,11 @@ int drm_irq_install(struct drm_device *dev)
306 if (drm_core_check_feature(dev, DRIVER_IRQ_SHARED)) 298 if (drm_core_check_feature(dev, DRIVER_IRQ_SHARED))
307 sh_flags = IRQF_SHARED; 299 sh_flags = IRQF_SHARED;
308 300
309 if (dev->devname) 301 ret = request_irq(irq, dev->driver->irq_handler,
310 irqname = dev->devname; 302 sh_flags, dev->driver->name, dev);
311 else
312 irqname = dev->driver->name;
313
314 ret = request_irq(drm_dev_to_irq(dev), dev->driver->irq_handler,
315 sh_flags, irqname, dev);
316 303
317 if (ret < 0) { 304 if (ret < 0) {
318 mutex_lock(&dev->struct_mutex);
319 dev->irq_enabled = false; 305 dev->irq_enabled = false;
320 mutex_unlock(&dev->struct_mutex);
321 return ret; 306 return ret;
322 } 307 }
323 308
@@ -329,12 +314,12 @@ int drm_irq_install(struct drm_device *dev)
329 ret = dev->driver->irq_postinstall(dev); 314 ret = dev->driver->irq_postinstall(dev);
330 315
331 if (ret < 0) { 316 if (ret < 0) {
332 mutex_lock(&dev->struct_mutex);
333 dev->irq_enabled = false; 317 dev->irq_enabled = false;
334 mutex_unlock(&dev->struct_mutex);
335 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 318 if (!drm_core_check_feature(dev, DRIVER_MODESET))
336 vga_client_register(dev->pdev, NULL, NULL, NULL); 319 vga_client_register(dev->pdev, NULL, NULL, NULL);
337 free_irq(drm_dev_to_irq(dev), dev); 320 free_irq(irq, dev);
321 } else {
322 dev->irq = irq;
338 } 323 }
339 324
340 return ret; 325 return ret;
@@ -342,11 +327,20 @@ int drm_irq_install(struct drm_device *dev)
342EXPORT_SYMBOL(drm_irq_install); 327EXPORT_SYMBOL(drm_irq_install);
343 328
344/** 329/**
345 * Uninstall the IRQ handler. 330 * drm_irq_uninstall - uninstall the IRQ handler
331 * @dev: DRM device
332 *
333 * Calls the driver's irq_uninstall() function and unregisters the IRQ handler.
334 * This should only be called by drivers which used drm_irq_install() to set up
335 * their interrupt handler. Other drivers must only reset
336 * drm_device->irq_enabled to false.
346 * 337 *
347 * \param dev DRM device. 338 * Note that for kernel modesetting drivers it is a bug if this function fails.
339 * The sanity checks are only to catch buggy user modesetting drivers which call
340 * the same function through an ioctl.
348 * 341 *
349 * Calls the driver's \c irq_uninstall() function, and stops the irq. 342 * Returns:
343 * Zero on success or a negative error code on failure.
350 */ 344 */
351int drm_irq_uninstall(struct drm_device *dev) 345int drm_irq_uninstall(struct drm_device *dev)
352{ 346{
@@ -357,10 +351,8 @@ int drm_irq_uninstall(struct drm_device *dev)
357 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) 351 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
358 return -EINVAL; 352 return -EINVAL;
359 353
360 mutex_lock(&dev->struct_mutex);
361 irq_enabled = dev->irq_enabled; 354 irq_enabled = dev->irq_enabled;
362 dev->irq_enabled = false; 355 dev->irq_enabled = false;
363 mutex_unlock(&dev->struct_mutex);
364 356
365 /* 357 /*
366 * Wake up any waiters so they don't hang. 358 * Wake up any waiters so they don't hang.
@@ -379,7 +371,7 @@ int drm_irq_uninstall(struct drm_device *dev)
379 if (!irq_enabled) 371 if (!irq_enabled)
380 return -EINVAL; 372 return -EINVAL;
381 373
382 DRM_DEBUG("irq=%d\n", drm_dev_to_irq(dev)); 374 DRM_DEBUG("irq=%d\n", dev->irq);
383 375
384 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 376 if (!drm_core_check_feature(dev, DRIVER_MODESET))
385 vga_client_register(dev->pdev, NULL, NULL, NULL); 377 vga_client_register(dev->pdev, NULL, NULL, NULL);
@@ -387,13 +379,13 @@ int drm_irq_uninstall(struct drm_device *dev)
387 if (dev->driver->irq_uninstall) 379 if (dev->driver->irq_uninstall)
388 dev->driver->irq_uninstall(dev); 380 dev->driver->irq_uninstall(dev);
389 381
390 free_irq(drm_dev_to_irq(dev), dev); 382 free_irq(dev->irq, dev);
391 383
392 return 0; 384 return 0;
393} 385}
394EXPORT_SYMBOL(drm_irq_uninstall); 386EXPORT_SYMBOL(drm_irq_uninstall);
395 387
396/** 388/*
397 * IRQ control ioctl. 389 * IRQ control ioctl.
398 * 390 *
399 * \param inode device inode. 391 * \param inode device inode.
@@ -408,43 +400,52 @@ int drm_control(struct drm_device *dev, void *data,
408 struct drm_file *file_priv) 400 struct drm_file *file_priv)
409{ 401{
410 struct drm_control *ctl = data; 402 struct drm_control *ctl = data;
403 int ret = 0, irq;
411 404
412 /* if we haven't irq we fallback for compatibility reasons - 405 /* if we haven't irq we fallback for compatibility reasons -
413 * this used to be a separate function in drm_dma.h 406 * this used to be a separate function in drm_dma.h
414 */ 407 */
415 408
409 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
410 return 0;
411 if (drm_core_check_feature(dev, DRIVER_MODESET))
412 return 0;
413 /* UMS was only ever support on pci devices. */
414 if (WARN_ON(!dev->pdev))
415 return -EINVAL;
416 416
417 switch (ctl->func) { 417 switch (ctl->func) {
418 case DRM_INST_HANDLER: 418 case DRM_INST_HANDLER:
419 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) 419 irq = dev->pdev->irq;
420 return 0; 420
421 if (drm_core_check_feature(dev, DRIVER_MODESET))
422 return 0;
423 if (dev->if_version < DRM_IF_VERSION(1, 2) && 421 if (dev->if_version < DRM_IF_VERSION(1, 2) &&
424 ctl->irq != drm_dev_to_irq(dev)) 422 ctl->irq != irq)
425 return -EINVAL; 423 return -EINVAL;
426 return drm_irq_install(dev); 424 mutex_lock(&dev->struct_mutex);
425 ret = drm_irq_install(dev, irq);
426 mutex_unlock(&dev->struct_mutex);
427
428 return ret;
427 case DRM_UNINST_HANDLER: 429 case DRM_UNINST_HANDLER:
428 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) 430 mutex_lock(&dev->struct_mutex);
429 return 0; 431 ret = drm_irq_uninstall(dev);
430 if (drm_core_check_feature(dev, DRIVER_MODESET)) 432 mutex_unlock(&dev->struct_mutex);
431 return 0; 433
432 return drm_irq_uninstall(dev); 434 return ret;
433 default: 435 default:
434 return -EINVAL; 436 return -EINVAL;
435 } 437 }
436} 438}
437 439
438/** 440/**
439 * drm_calc_timestamping_constants - Calculate vblank timestamp constants 441 * drm_calc_timestamping_constants - calculate vblank timestamp constants
440 * 442 * @crtc: drm_crtc whose timestamp constants should be updated.
441 * @crtc drm_crtc whose timestamp constants should be updated. 443 * @mode: display mode containing the scanout timings
442 * @mode display mode containing the scanout timings
443 * 444 *
444 * Calculate and store various constants which are later 445 * Calculate and store various constants which are later
445 * needed by vblank and swap-completion timestamping, e.g, 446 * needed by vblank and swap-completion timestamping, e.g,
446 * by drm_calc_vbltimestamp_from_scanoutpos(). They are 447 * by drm_calc_vbltimestamp_from_scanoutpos(). They are
447 * derived from crtc's true scanout timing, so they take 448 * derived from CRTC's true scanout timing, so they take
448 * things like panel scaling or other adjustments into account. 449 * things like panel scaling or other adjustments into account.
449 */ 450 */
450void drm_calc_timestamping_constants(struct drm_crtc *crtc, 451void drm_calc_timestamping_constants(struct drm_crtc *crtc,
@@ -489,11 +490,22 @@ void drm_calc_timestamping_constants(struct drm_crtc *crtc,
489EXPORT_SYMBOL(drm_calc_timestamping_constants); 490EXPORT_SYMBOL(drm_calc_timestamping_constants);
490 491
491/** 492/**
492 * drm_calc_vbltimestamp_from_scanoutpos - helper routine for kms 493 * drm_calc_vbltimestamp_from_scanoutpos - precise vblank timestamp helper
493 * drivers. Implements calculation of exact vblank timestamps from 494 * @dev: DRM device
494 * given drm_display_mode timings and current video scanout position 495 * @crtc: Which CRTC's vblank timestamp to retrieve
495 * of a crtc. This can be called from within get_vblank_timestamp() 496 * @max_error: Desired maximum allowable error in timestamps (nanosecs)
496 * implementation of a kms driver to implement the actual timestamping. 497 * On return contains true maximum error of timestamp
498 * @vblank_time: Pointer to struct timeval which should receive the timestamp
499 * @flags: Flags to pass to driver:
500 * 0 = Default,
501 * DRM_CALLED_FROM_VBLIRQ = If function is called from vbl IRQ handler
502 * @refcrtc: CRTC which defines scanout timing
503 * @mode: mode which defines the scanout timings
504 *
505 * Implements calculation of exact vblank timestamps from given drm_display_mode
506 * timings and current video scanout position of a CRTC. This can be called from
507 * within get_vblank_timestamp() implementation of a kms driver to implement the
508 * actual timestamping.
497 * 509 *
498 * Should return timestamps conforming to the OML_sync_control OpenML 510 * Should return timestamps conforming to the OML_sync_control OpenML
499 * extension specification. The timestamp corresponds to the end of 511 * extension specification. The timestamp corresponds to the end of
@@ -508,21 +520,11 @@ EXPORT_SYMBOL(drm_calc_timestamping_constants);
508 * returns as no operation if a doublescan or interlaced video mode is 520 * returns as no operation if a doublescan or interlaced video mode is
509 * active. Higher level code is expected to handle this. 521 * active. Higher level code is expected to handle this.
510 * 522 *
511 * @dev: DRM device. 523 * Returns:
512 * @crtc: Which crtc's vblank timestamp to retrieve. 524 * Negative value on error, failure or if not supported in current
513 * @max_error: Desired maximum allowable error in timestamps (nanosecs).
514 * On return contains true maximum error of timestamp.
515 * @vblank_time: Pointer to struct timeval which should receive the timestamp.
516 * @flags: Flags to pass to driver:
517 * 0 = Default.
518 * DRM_CALLED_FROM_VBLIRQ = If function is called from vbl irq handler.
519 * @refcrtc: drm_crtc* of crtc which defines scanout timing.
520 * @mode: mode which defines the scanout timings
521 *
522 * Returns negative value on error, failure or if not supported in current
523 * video mode: 525 * video mode:
524 * 526 *
525 * -EINVAL - Invalid crtc. 527 * -EINVAL - Invalid CRTC.
526 * -EAGAIN - Temporary unavailable, e.g., called before initial modeset. 528 * -EAGAIN - Temporary unavailable, e.g., called before initial modeset.
527 * -ENOTSUPP - Function not supported in current display mode. 529 * -ENOTSUPP - Function not supported in current display mode.
528 * -EIO - Failed, e.g., due to failed scanout position query. 530 * -EIO - Failed, e.g., due to failed scanout position query.
@@ -671,23 +673,23 @@ static struct timeval get_drm_timestamp(void)
671 673
672/** 674/**
673 * drm_get_last_vbltimestamp - retrieve raw timestamp for the most recent 675 * drm_get_last_vbltimestamp - retrieve raw timestamp for the most recent
674 * vblank interval. 676 * vblank interval
675 *
676 * @dev: DRM device 677 * @dev: DRM device
677 * @crtc: which crtc's vblank timestamp to retrieve 678 * @crtc: which CRTC's vblank timestamp to retrieve
678 * @tvblank: Pointer to target struct timeval which should receive the timestamp 679 * @tvblank: Pointer to target struct timeval which should receive the timestamp
679 * @flags: Flags to pass to driver: 680 * @flags: Flags to pass to driver:
680 * 0 = Default. 681 * 0 = Default,
681 * DRM_CALLED_FROM_VBLIRQ = If function is called from vbl irq handler. 682 * DRM_CALLED_FROM_VBLIRQ = If function is called from vbl IRQ handler
682 * 683 *
683 * Fetches the system timestamp corresponding to the time of the most recent 684 * Fetches the system timestamp corresponding to the time of the most recent
684 * vblank interval on specified crtc. May call into kms-driver to 685 * vblank interval on specified CRTC. May call into kms-driver to
685 * compute the timestamp with a high-precision GPU specific method. 686 * compute the timestamp with a high-precision GPU specific method.
686 * 687 *
687 * Returns zero if timestamp originates from uncorrected do_gettimeofday() 688 * Returns zero if timestamp originates from uncorrected do_gettimeofday()
688 * call, i.e., it isn't very precisely locked to the true vblank. 689 * call, i.e., it isn't very precisely locked to the true vblank.
689 * 690 *
690 * Returns non-zero if timestamp is considered to be very precise. 691 * Returns:
692 * Non-zero if timestamp is considered to be very precise, zero otherwise.
691 */ 693 */
692u32 drm_get_last_vbltimestamp(struct drm_device *dev, int crtc, 694u32 drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
693 struct timeval *tvblank, unsigned flags) 695 struct timeval *tvblank, unsigned flags)
@@ -722,6 +724,9 @@ EXPORT_SYMBOL(drm_get_last_vbltimestamp);
722 * Fetches the "cooked" vblank count value that represents the number of 724 * Fetches the "cooked" vblank count value that represents the number of
723 * vblank events since the system was booted, including lost events due to 725 * vblank events since the system was booted, including lost events due to
724 * modesetting activity. 726 * modesetting activity.
727 *
728 * Returns:
729 * The software vblank counter.
725 */ 730 */
726u32 drm_vblank_count(struct drm_device *dev, int crtc) 731u32 drm_vblank_count(struct drm_device *dev, int crtc)
727{ 732{
@@ -740,8 +745,7 @@ EXPORT_SYMBOL(drm_vblank_count);
740 * Fetches the "cooked" vblank count value that represents the number of 745 * Fetches the "cooked" vblank count value that represents the number of
741 * vblank events since the system was booted, including lost events due to 746 * vblank events since the system was booted, including lost events due to
742 * modesetting activity. Returns corresponding system timestamp of the time 747 * modesetting activity. Returns corresponding system timestamp of the time
743 * of the vblank interval that corresponds to the current value vblank counter 748 * of the vblank interval that corresponds to the current vblank counter value.
744 * value.
745 */ 749 */
746u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc, 750u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
747 struct timeval *vblanktime) 751 struct timeval *vblanktime)
@@ -870,6 +874,42 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc)
870} 874}
871 875
872/** 876/**
877 * drm_vblank_enable - enable the vblank interrupt on a CRTC
878 * @dev: DRM device
879 * @crtc: CRTC in question
880 */
881static int drm_vblank_enable(struct drm_device *dev, int crtc)
882{
883 int ret = 0;
884
885 assert_spin_locked(&dev->vbl_lock);
886
887 spin_lock(&dev->vblank_time_lock);
888
889 if (!dev->vblank[crtc].enabled) {
890 /*
891 * Enable vblank irqs under vblank_time_lock protection.
892 * All vblank count & timestamp updates are held off
893 * until we are done reinitializing master counter and
894 * timestamps. Filtercode in drm_handle_vblank() will
895 * prevent double-accounting of same vblank interval.
896 */
897 ret = dev->driver->enable_vblank(dev, crtc);
898 DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n", crtc, ret);
899 if (ret)
900 atomic_dec(&dev->vblank[crtc].refcount);
901 else {
902 dev->vblank[crtc].enabled = true;
903 drm_update_vblank_count(dev, crtc);
904 }
905 }
906
907 spin_unlock(&dev->vblank_time_lock);
908
909 return ret;
910}
911
912/**
873 * drm_vblank_get - get a reference count on vblank events 913 * drm_vblank_get - get a reference count on vblank events
874 * @dev: DRM device 914 * @dev: DRM device
875 * @crtc: which CRTC to own 915 * @crtc: which CRTC to own
@@ -877,36 +917,20 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc)
877 * Acquire a reference count on vblank events to avoid having them disabled 917 * Acquire a reference count on vblank events to avoid having them disabled
878 * while in use. 918 * while in use.
879 * 919 *
880 * RETURNS 920 * This is the legacy version of drm_crtc_vblank_get().
921 *
922 * Returns:
881 * Zero on success, nonzero on failure. 923 * Zero on success, nonzero on failure.
882 */ 924 */
883int drm_vblank_get(struct drm_device *dev, int crtc) 925int drm_vblank_get(struct drm_device *dev, int crtc)
884{ 926{
885 unsigned long irqflags, irqflags2; 927 unsigned long irqflags;
886 int ret = 0; 928 int ret = 0;
887 929
888 spin_lock_irqsave(&dev->vbl_lock, irqflags); 930 spin_lock_irqsave(&dev->vbl_lock, irqflags);
889 /* Going from 0->1 means we have to enable interrupts again */ 931 /* Going from 0->1 means we have to enable interrupts again */
890 if (atomic_add_return(1, &dev->vblank[crtc].refcount) == 1) { 932 if (atomic_add_return(1, &dev->vblank[crtc].refcount) == 1) {
891 spin_lock_irqsave(&dev->vblank_time_lock, irqflags2); 933 ret = drm_vblank_enable(dev, crtc);
892 if (!dev->vblank[crtc].enabled) {
893 /* Enable vblank irqs under vblank_time_lock protection.
894 * All vblank count & timestamp updates are held off
895 * until we are done reinitializing master counter and
896 * timestamps. Filtercode in drm_handle_vblank() will
897 * prevent double-accounting of same vblank interval.
898 */
899 ret = dev->driver->enable_vblank(dev, crtc);
900 DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n",
901 crtc, ret);
902 if (ret)
903 atomic_dec(&dev->vblank[crtc].refcount);
904 else {
905 dev->vblank[crtc].enabled = true;
906 drm_update_vblank_count(dev, crtc);
907 }
908 }
909 spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags2);
910 } else { 934 } else {
911 if (!dev->vblank[crtc].enabled) { 935 if (!dev->vblank[crtc].enabled) {
912 atomic_dec(&dev->vblank[crtc].refcount); 936 atomic_dec(&dev->vblank[crtc].refcount);
@@ -920,12 +944,32 @@ int drm_vblank_get(struct drm_device *dev, int crtc)
920EXPORT_SYMBOL(drm_vblank_get); 944EXPORT_SYMBOL(drm_vblank_get);
921 945
922/** 946/**
947 * drm_crtc_vblank_get - get a reference count on vblank events
948 * @crtc: which CRTC to own
949 *
950 * Acquire a reference count on vblank events to avoid having them disabled
951 * while in use.
952 *
953 * This is the native kms version of drm_vblank_off().
954 *
955 * Returns:
956 * Zero on success, nonzero on failure.
957 */
958int drm_crtc_vblank_get(struct drm_crtc *crtc)
959{
960 return drm_vblank_get(crtc->dev, drm_crtc_index(crtc));
961}
962EXPORT_SYMBOL(drm_crtc_vblank_get);
963
964/**
923 * drm_vblank_put - give up ownership of vblank events 965 * drm_vblank_put - give up ownership of vblank events
924 * @dev: DRM device 966 * @dev: DRM device
925 * @crtc: which counter to give up 967 * @crtc: which counter to give up
926 * 968 *
927 * Release ownership of a given vblank counter, turning off interrupts 969 * Release ownership of a given vblank counter, turning off interrupts
928 * if possible. Disable interrupts after drm_vblank_offdelay milliseconds. 970 * if possible. Disable interrupts after drm_vblank_offdelay milliseconds.
971 *
972 * This is the legacy version of drm_crtc_vblank_put().
929 */ 973 */
930void drm_vblank_put(struct drm_device *dev, int crtc) 974void drm_vblank_put(struct drm_device *dev, int crtc)
931{ 975{
@@ -934,17 +978,39 @@ void drm_vblank_put(struct drm_device *dev, int crtc)
934 /* Last user schedules interrupt disable */ 978 /* Last user schedules interrupt disable */
935 if (atomic_dec_and_test(&dev->vblank[crtc].refcount) && 979 if (atomic_dec_and_test(&dev->vblank[crtc].refcount) &&
936 (drm_vblank_offdelay > 0)) 980 (drm_vblank_offdelay > 0))
937 mod_timer(&dev->vblank_disable_timer, 981 mod_timer(&dev->vblank[crtc].disable_timer,
938 jiffies + ((drm_vblank_offdelay * HZ)/1000)); 982 jiffies + ((drm_vblank_offdelay * HZ)/1000));
939} 983}
940EXPORT_SYMBOL(drm_vblank_put); 984EXPORT_SYMBOL(drm_vblank_put);
941 985
942/** 986/**
987 * drm_crtc_vblank_put - give up ownership of vblank events
988 * @crtc: which counter to give up
989 *
990 * Release ownership of a given vblank counter, turning off interrupts
991 * if possible. Disable interrupts after drm_vblank_offdelay milliseconds.
992 *
993 * This is the native kms version of drm_vblank_put().
994 */
995void drm_crtc_vblank_put(struct drm_crtc *crtc)
996{
997 drm_vblank_put(crtc->dev, drm_crtc_index(crtc));
998}
999EXPORT_SYMBOL(drm_crtc_vblank_put);
1000
1001/**
943 * drm_vblank_off - disable vblank events on a CRTC 1002 * drm_vblank_off - disable vblank events on a CRTC
944 * @dev: DRM device 1003 * @dev: DRM device
945 * @crtc: CRTC in question 1004 * @crtc: CRTC in question
946 * 1005 *
947 * Caller must hold event lock. 1006 * Drivers can use this function to shut down the vblank interrupt handling when
1007 * disabling a crtc. This function ensures that the latest vblank frame count is
1008 * stored so that drm_vblank_on() can restore it again.
1009 *
1010 * Drivers must use this function when the hardware vblank counter can get
1011 * reset, e.g. when suspending.
1012 *
1013 * This is the legacy version of drm_crtc_vblank_off().
948 */ 1014 */
949void drm_vblank_off(struct drm_device *dev, int crtc) 1015void drm_vblank_off(struct drm_device *dev, int crtc)
950{ 1016{
@@ -978,12 +1044,87 @@ void drm_vblank_off(struct drm_device *dev, int crtc)
978EXPORT_SYMBOL(drm_vblank_off); 1044EXPORT_SYMBOL(drm_vblank_off);
979 1045
980/** 1046/**
1047 * drm_crtc_vblank_off - disable vblank events on a CRTC
1048 * @crtc: CRTC in question
1049 *
1050 * Drivers can use this function to shut down the vblank interrupt handling when
1051 * disabling a crtc. This function ensures that the latest vblank frame count is
1052 * stored so that drm_vblank_on can restore it again.
1053 *
1054 * Drivers must use this function when the hardware vblank counter can get
1055 * reset, e.g. when suspending.
1056 *
1057 * This is the native kms version of drm_vblank_off().
1058 */
1059void drm_crtc_vblank_off(struct drm_crtc *crtc)
1060{
1061 drm_vblank_off(crtc->dev, drm_crtc_index(crtc));
1062}
1063EXPORT_SYMBOL(drm_crtc_vblank_off);
1064
1065/**
1066 * drm_vblank_on - enable vblank events on a CRTC
1067 * @dev: DRM device
1068 * @crtc: CRTC in question
1069 *
1070 * This functions restores the vblank interrupt state captured with
1071 * drm_vblank_off() again. Note that calls to drm_vblank_on() and
1072 * drm_vblank_off() can be unbalanced and so can also be unconditionaly called
1073 * in driver load code to reflect the current hardware state of the crtc.
1074 *
1075 * This is the legacy version of drm_crtc_vblank_on().
1076 */
1077void drm_vblank_on(struct drm_device *dev, int crtc)
1078{
1079 unsigned long irqflags;
1080
1081 spin_lock_irqsave(&dev->vbl_lock, irqflags);
1082 /* re-enable interrupts if there's are users left */
1083 if (atomic_read(&dev->vblank[crtc].refcount) != 0)
1084 WARN_ON(drm_vblank_enable(dev, crtc));
1085 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
1086}
1087EXPORT_SYMBOL(drm_vblank_on);
1088
1089/**
1090 * drm_crtc_vblank_on - enable vblank events on a CRTC
1091 * @crtc: CRTC in question
1092 *
1093 * This functions restores the vblank interrupt state captured with
1094 * drm_vblank_off() again. Note that calls to drm_vblank_on() and
1095 * drm_vblank_off() can be unbalanced and so can also be unconditionaly called
1096 * in driver load code to reflect the current hardware state of the crtc.
1097 *
1098 * This is the native kms version of drm_vblank_on().
1099 */
1100void drm_crtc_vblank_on(struct drm_crtc *crtc)
1101{
1102 drm_vblank_on(crtc->dev, drm_crtc_index(crtc));
1103}
1104EXPORT_SYMBOL(drm_crtc_vblank_on);
1105
1106/**
981 * drm_vblank_pre_modeset - account for vblanks across mode sets 1107 * drm_vblank_pre_modeset - account for vblanks across mode sets
982 * @dev: DRM device 1108 * @dev: DRM device
983 * @crtc: CRTC in question 1109 * @crtc: CRTC in question
984 * 1110 *
985 * Account for vblank events across mode setting events, which will likely 1111 * Account for vblank events across mode setting events, which will likely
986 * reset the hardware frame counter. 1112 * reset the hardware frame counter.
1113 *
1114 * This is done by grabbing a temporary vblank reference to ensure that the
1115 * vblank interrupt keeps running across the modeset sequence. With this the
1116 * software-side vblank frame counting will ensure that there are no jumps or
1117 * discontinuities.
1118 *
1119 * Unfortunately this approach is racy and also doesn't work when the vblank
1120 * interrupt stops running, e.g. across system suspend resume. It is therefore
1121 * highly recommended that drivers use the newer drm_vblank_off() and
1122 * drm_vblank_on() instead. drm_vblank_pre_modeset() only works correctly when
1123 * using "cooked" software vblank frame counters and not relying on any hardware
1124 * counters.
1125 *
1126 * Drivers must call drm_vblank_post_modeset() when re-enabling the same crtc
1127 * again.
987 */ 1128 */
988void drm_vblank_pre_modeset(struct drm_device *dev, int crtc) 1129void drm_vblank_pre_modeset(struct drm_device *dev, int crtc)
989{ 1130{
@@ -1005,6 +1146,14 @@ void drm_vblank_pre_modeset(struct drm_device *dev, int crtc)
1005} 1146}
1006EXPORT_SYMBOL(drm_vblank_pre_modeset); 1147EXPORT_SYMBOL(drm_vblank_pre_modeset);
1007 1148
1149/**
1150 * drm_vblank_post_modeset - undo drm_vblank_pre_modeset changes
1151 * @dev: DRM device
1152 * @crtc: CRTC in question
1153 *
1154 * This function again drops the temporary vblank reference acquired in
1155 * drm_vblank_pre_modeset.
1156 */
1008void drm_vblank_post_modeset(struct drm_device *dev, int crtc) 1157void drm_vblank_post_modeset(struct drm_device *dev, int crtc)
1009{ 1158{
1010 unsigned long irqflags; 1159 unsigned long irqflags;
@@ -1026,7 +1175,7 @@ void drm_vblank_post_modeset(struct drm_device *dev, int crtc)
1026} 1175}
1027EXPORT_SYMBOL(drm_vblank_post_modeset); 1176EXPORT_SYMBOL(drm_vblank_post_modeset);
1028 1177
1029/** 1178/*
1030 * drm_modeset_ctl - handle vblank event counter changes across mode switch 1179 * drm_modeset_ctl - handle vblank event counter changes across mode switch
1031 * @DRM_IOCTL_ARGS: standard ioctl arguments 1180 * @DRM_IOCTL_ARGS: standard ioctl arguments
1032 * 1181 *
@@ -1139,7 +1288,7 @@ err_put:
1139 return ret; 1288 return ret;
1140} 1289}
1141 1290
1142/** 1291/*
1143 * Wait for VBLANK. 1292 * Wait for VBLANK.
1144 * 1293 *
1145 * \param inode device inode. 1294 * \param inode device inode.
@@ -1150,7 +1299,7 @@ err_put:
1150 * 1299 *
1151 * This function enables the vblank interrupt on the pipe requested, then 1300 * This function enables the vblank interrupt on the pipe requested, then
1152 * sleeps waiting for the requested sequence number to occur, and drops 1301 * sleeps waiting for the requested sequence number to occur, and drops
1153 * the vblank interrupt refcount afterwards. (vblank irq disable follows that 1302 * the vblank interrupt refcount afterwards. (vblank IRQ disable follows that
1154 * after a timeout with no further vblank waits scheduled). 1303 * after a timeout with no further vblank waits scheduled).
1155 */ 1304 */
1156int drm_wait_vblank(struct drm_device *dev, void *data, 1305int drm_wait_vblank(struct drm_device *dev, void *data,
@@ -1160,9 +1309,8 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
1160 int ret; 1309 int ret;
1161 unsigned int flags, seq, crtc, high_crtc; 1310 unsigned int flags, seq, crtc, high_crtc;
1162 1311
1163 if (drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) 1312 if (!dev->irq_enabled)
1164 if ((!drm_dev_to_irq(dev)) || (!dev->irq_enabled)) 1313 return -EINVAL;
1165 return -EINVAL;
1166 1314
1167 if (vblwait->request.type & _DRM_VBLANK_SIGNAL) 1315 if (vblwait->request.type & _DRM_VBLANK_SIGNAL)
1168 return -EINVAL; 1316 return -EINVAL;
@@ -1222,6 +1370,7 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
1222 DRM_WAIT_ON(ret, dev->vblank[crtc].queue, 3 * HZ, 1370 DRM_WAIT_ON(ret, dev->vblank[crtc].queue, 3 * HZ,
1223 (((drm_vblank_count(dev, crtc) - 1371 (((drm_vblank_count(dev, crtc) -
1224 vblwait->request.sequence) <= (1 << 23)) || 1372 vblwait->request.sequence) <= (1 << 23)) ||
1373 !dev->vblank[crtc].enabled ||
1225 !dev->irq_enabled)); 1374 !dev->irq_enabled));
1226 1375
1227 if (ret != -EINTR) { 1376 if (ret != -EINTR) {
diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
index 09821f46d768..e633df2f68d8 100644
--- a/drivers/gpu/drm/drm_mipi_dsi.c
+++ b/drivers/gpu/drm/drm_mipi_dsi.c
@@ -282,6 +282,14 @@ static int mipi_dsi_drv_remove(struct device *dev)
282 return drv->remove(dsi); 282 return drv->remove(dsi);
283} 283}
284 284
285static void mipi_dsi_drv_shutdown(struct device *dev)
286{
287 struct mipi_dsi_driver *drv = to_mipi_dsi_driver(dev->driver);
288 struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev);
289
290 drv->shutdown(dsi);
291}
292
285/** 293/**
286 * mipi_dsi_driver_register - register a driver for DSI devices 294 * mipi_dsi_driver_register - register a driver for DSI devices
287 * @drv: DSI driver structure 295 * @drv: DSI driver structure
@@ -293,6 +301,8 @@ int mipi_dsi_driver_register(struct mipi_dsi_driver *drv)
293 drv->driver.probe = mipi_dsi_drv_probe; 301 drv->driver.probe = mipi_dsi_drv_probe;
294 if (drv->remove) 302 if (drv->remove)
295 drv->driver.remove = mipi_dsi_drv_remove; 303 drv->driver.remove = mipi_dsi_drv_remove;
304 if (drv->shutdown)
305 drv->driver.shutdown = mipi_dsi_drv_shutdown;
296 306
297 return driver_register(&drv->driver); 307 return driver_register(&drv->driver);
298} 308}
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 8b410576fce4..bedf1894e17e 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -1013,6 +1013,7 @@ EXPORT_SYMBOL(drm_mode_sort);
1013/** 1013/**
1014 * drm_mode_connector_list_update - update the mode list for the connector 1014 * drm_mode_connector_list_update - update the mode list for the connector
1015 * @connector: the connector to update 1015 * @connector: the connector to update
1016 * @merge_type_bits: whether to merge or overright type bits.
1016 * 1017 *
1017 * This moves the modes from the @connector probed_modes list 1018 * This moves the modes from the @connector probed_modes list
1018 * to the actual mode list. It compares the probed mode against the current 1019 * to the actual mode list. It compares the probed mode against the current
@@ -1021,7 +1022,8 @@ EXPORT_SYMBOL(drm_mode_sort);
1021 * This is just a helper functions doesn't validate any modes itself and also 1022 * This is just a helper functions doesn't validate any modes itself and also
1022 * doesn't prune any invalid modes. Callers need to do that themselves. 1023 * doesn't prune any invalid modes. Callers need to do that themselves.
1023 */ 1024 */
1024void drm_mode_connector_list_update(struct drm_connector *connector) 1025void drm_mode_connector_list_update(struct drm_connector *connector,
1026 bool merge_type_bits)
1025{ 1027{
1026 struct drm_display_mode *mode; 1028 struct drm_display_mode *mode;
1027 struct drm_display_mode *pmode, *pt; 1029 struct drm_display_mode *pmode, *pt;
@@ -1039,7 +1041,10 @@ void drm_mode_connector_list_update(struct drm_connector *connector)
1039 /* if equal delete the probed mode */ 1041 /* if equal delete the probed mode */
1040 mode->status = pmode->status; 1042 mode->status = pmode->status;
1041 /* Merge type bits together */ 1043 /* Merge type bits together */
1042 mode->type |= pmode->type; 1044 if (merge_type_bits)
1045 mode->type |= pmode->type;
1046 else
1047 mode->type = pmode->type;
1043 list_del(&pmode->head); 1048 list_del(&pmode->head);
1044 drm_mode_destroy(connector->dev, pmode); 1049 drm_mode_destroy(connector->dev, pmode);
1045 break; 1050 break;
diff --git a/drivers/gpu/drm/drm_modeset_lock.c b/drivers/gpu/drm/drm_modeset_lock.c
new file mode 100644
index 000000000000..7c2497dea1e9
--- /dev/null
+++ b/drivers/gpu/drm/drm_modeset_lock.c
@@ -0,0 +1,247 @@
1/*
2 * Copyright (C) 2014 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24#include <drm/drmP.h>
25#include <drm/drm_crtc.h>
26#include <drm/drm_modeset_lock.h>
27
28/**
29 * DOC: kms locking
30 *
31 * As KMS moves toward more fine grained locking, and atomic ioctl where
32 * userspace can indirectly control locking order, it becomes necessary
33 * to use ww_mutex and acquire-contexts to avoid deadlocks. But because
34 * the locking is more distributed around the driver code, we want a bit
35 * of extra utility/tracking out of our acquire-ctx. This is provided
36 * by drm_modeset_lock / drm_modeset_acquire_ctx.
37 *
38 * For basic principles of ww_mutex, see: Documentation/ww-mutex-design.txt
39 *
40 * The basic usage pattern is to:
41 *
42 * drm_modeset_acquire_init(&ctx)
43 * retry:
44 * foreach (lock in random_ordered_set_of_locks) {
45 * ret = drm_modeset_lock(lock, &ctx)
46 * if (ret == -EDEADLK) {
47 * drm_modeset_backoff(&ctx);
48 * goto retry;
49 * }
50 * }
51 *
52 * ... do stuff ...
53 *
54 * drm_modeset_drop_locks(&ctx);
55 * drm_modeset_acquire_fini(&ctx);
56 */
57
58
59/**
60 * drm_modeset_acquire_init - initialize acquire context
61 * @ctx: the acquire context
62 * @flags: for future
63 */
64void drm_modeset_acquire_init(struct drm_modeset_acquire_ctx *ctx,
65 uint32_t flags)
66{
67 ww_acquire_init(&ctx->ww_ctx, &crtc_ww_class);
68 INIT_LIST_HEAD(&ctx->locked);
69}
70EXPORT_SYMBOL(drm_modeset_acquire_init);
71
72/**
73 * drm_modeset_acquire_fini - cleanup acquire context
74 * @ctx: the acquire context
75 */
76void drm_modeset_acquire_fini(struct drm_modeset_acquire_ctx *ctx)
77{
78 ww_acquire_fini(&ctx->ww_ctx);
79}
80EXPORT_SYMBOL(drm_modeset_acquire_fini);
81
82/**
83 * drm_modeset_drop_locks - drop all locks
84 * @ctx: the acquire context
85 *
86 * Drop all locks currently held against this acquire context.
87 */
88void drm_modeset_drop_locks(struct drm_modeset_acquire_ctx *ctx)
89{
90 WARN_ON(ctx->contended);
91 while (!list_empty(&ctx->locked)) {
92 struct drm_modeset_lock *lock;
93
94 lock = list_first_entry(&ctx->locked,
95 struct drm_modeset_lock, head);
96
97 drm_modeset_unlock(lock);
98 }
99}
100EXPORT_SYMBOL(drm_modeset_drop_locks);
101
102static inline int modeset_lock(struct drm_modeset_lock *lock,
103 struct drm_modeset_acquire_ctx *ctx,
104 bool interruptible, bool slow)
105{
106 int ret;
107
108 WARN_ON(ctx->contended);
109
110 if (interruptible && slow) {
111 ret = ww_mutex_lock_slow_interruptible(&lock->mutex, &ctx->ww_ctx);
112 } else if (interruptible) {
113 ret = ww_mutex_lock_interruptible(&lock->mutex, &ctx->ww_ctx);
114 } else if (slow) {
115 ww_mutex_lock_slow(&lock->mutex, &ctx->ww_ctx);
116 ret = 0;
117 } else {
118 ret = ww_mutex_lock(&lock->mutex, &ctx->ww_ctx);
119 }
120 if (!ret) {
121 WARN_ON(!list_empty(&lock->head));
122 list_add(&lock->head, &ctx->locked);
123 } else if (ret == -EALREADY) {
124 /* we already hold the lock.. this is fine. For atomic
125 * we will need to be able to drm_modeset_lock() things
126 * without having to keep track of what is already locked
127 * or not.
128 */
129 ret = 0;
130 } else if (ret == -EDEADLK) {
131 ctx->contended = lock;
132 }
133
134 return ret;
135}
136
137static int modeset_backoff(struct drm_modeset_acquire_ctx *ctx,
138 bool interruptible)
139{
140 struct drm_modeset_lock *contended = ctx->contended;
141
142 ctx->contended = NULL;
143
144 if (WARN_ON(!contended))
145 return 0;
146
147 drm_modeset_drop_locks(ctx);
148
149 return modeset_lock(contended, ctx, interruptible, true);
150}
151
152/**
153 * drm_modeset_backoff - deadlock avoidance backoff
154 * @ctx: the acquire context
155 *
156 * If deadlock is detected (ie. drm_modeset_lock() returns -EDEADLK),
157 * you must call this function to drop all currently held locks and
158 * block until the contended lock becomes available.
159 */
160void drm_modeset_backoff(struct drm_modeset_acquire_ctx *ctx)
161{
162 modeset_backoff(ctx, false);
163}
164EXPORT_SYMBOL(drm_modeset_backoff);
165
166/**
167 * drm_modeset_backoff_interruptible - deadlock avoidance backoff
168 * @ctx: the acquire context
169 *
170 * Interruptible version of drm_modeset_backoff()
171 */
172int drm_modeset_backoff_interruptible(struct drm_modeset_acquire_ctx *ctx)
173{
174 return modeset_backoff(ctx, true);
175}
176EXPORT_SYMBOL(drm_modeset_backoff_interruptible);
177
178/**
179 * drm_modeset_lock - take modeset lock
180 * @lock: lock to take
181 * @ctx: acquire ctx
182 *
183 * If ctx is not NULL, then its ww acquire context is used and the
184 * lock will be tracked by the context and can be released by calling
185 * drm_modeset_drop_locks(). If -EDEADLK is returned, this means a
186 * deadlock scenario has been detected and it is an error to attempt
187 * to take any more locks without first calling drm_modeset_backoff().
188 */
189int drm_modeset_lock(struct drm_modeset_lock *lock,
190 struct drm_modeset_acquire_ctx *ctx)
191{
192 if (ctx)
193 return modeset_lock(lock, ctx, false, false);
194
195 ww_mutex_lock(&lock->mutex, NULL);
196 return 0;
197}
198EXPORT_SYMBOL(drm_modeset_lock);
199
200/**
201 * drm_modeset_lock_interruptible - take modeset lock
202 * @lock: lock to take
203 * @ctx: acquire ctx
204 *
205 * Interruptible version of drm_modeset_lock()
206 */
207int drm_modeset_lock_interruptible(struct drm_modeset_lock *lock,
208 struct drm_modeset_acquire_ctx *ctx)
209{
210 if (ctx)
211 return modeset_lock(lock, ctx, true, false);
212
213 return ww_mutex_lock_interruptible(&lock->mutex, NULL);
214}
215EXPORT_SYMBOL(drm_modeset_lock_interruptible);
216
217/**
218 * drm_modeset_unlock - drop modeset lock
219 * @lock: lock to release
220 */
221void drm_modeset_unlock(struct drm_modeset_lock *lock)
222{
223 list_del_init(&lock->head);
224 ww_mutex_unlock(&lock->mutex);
225}
226EXPORT_SYMBOL(drm_modeset_unlock);
227
228/* Temporary.. until we have sufficiently fine grained locking, there
229 * are a couple scenarios where it is convenient to grab all crtc locks.
230 * It is planned to remove this:
231 */
232int drm_modeset_lock_all_crtcs(struct drm_device *dev,
233 struct drm_modeset_acquire_ctx *ctx)
234{
235 struct drm_mode_config *config = &dev->mode_config;
236 struct drm_crtc *crtc;
237 int ret = 0;
238
239 list_for_each_entry(crtc, &config->crtc_list, head) {
240 ret = drm_modeset_lock(&crtc->mutex, ctx);
241 if (ret)
242 return ret;
243 }
244
245 return 0;
246}
247EXPORT_SYMBOL(drm_modeset_lock_all_crtcs);
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index 9c696a5ad74d..020cfd934854 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -1,17 +1,3 @@
1/* drm_pci.h -- PCI DMA memory management wrappers for DRM -*- linux-c -*- */
2/**
3 * \file drm_pci.c
4 * \brief Functions and ioctls to manage PCI memory
5 *
6 * \warning These interfaces aren't stable yet.
7 *
8 * \todo Implement the remaining ioctl's for the PCI pools.
9 * \todo The wrappers here are so thin that they would be better off inlined..
10 *
11 * \author José Fonseca <jrfonseca@tungstengraphics.com>
12 * \author Leif Delgass <ldelgass@retinalburn.net>
13 */
14
15/* 1/*
16 * Copyright 2003 José Fonseca. 2 * Copyright 2003 José Fonseca.
17 * Copyright 2003 Leif Delgass. 3 * Copyright 2003 Leif Delgass.
@@ -42,12 +28,14 @@
42#include <linux/export.h> 28#include <linux/export.h>
43#include <drm/drmP.h> 29#include <drm/drmP.h>
44 30
45/**********************************************************************/
46/** \name PCI memory */
47/*@{*/
48
49/** 31/**
50 * \brief Allocate a PCI consistent memory block, for DMA. 32 * drm_pci_alloc - Allocate a PCI consistent memory block, for DMA.
33 * @dev: DRM device
34 * @size: size of block to allocate
35 * @align: alignment of block
36 *
37 * Return: A handle to the allocated memory block on success or NULL on
38 * failure.
51 */ 39 */
52drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align) 40drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align)
53{ 41{
@@ -88,8 +76,8 @@ drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t ali
88 76
89EXPORT_SYMBOL(drm_pci_alloc); 77EXPORT_SYMBOL(drm_pci_alloc);
90 78
91/** 79/*
92 * \brief Free a PCI consistent memory block without freeing its descriptor. 80 * Free a PCI consistent memory block without freeing its descriptor.
93 * 81 *
94 * This function is for internal use in the Linux-specific DRM core code. 82 * This function is for internal use in the Linux-specific DRM core code.
95 */ 83 */
@@ -111,7 +99,9 @@ void __drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
111} 99}
112 100
113/** 101/**
114 * \brief Free a PCI consistent memory block 102 * drm_pci_free - Free a PCI consistent memory block
103 * @dev: DRM device
104 * @dmah: handle to memory block
115 */ 105 */
116void drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah) 106void drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
117{ 107{
@@ -137,21 +127,9 @@ static int drm_get_pci_domain(struct drm_device *dev)
137 return pci_domain_nr(dev->pdev->bus); 127 return pci_domain_nr(dev->pdev->bus);
138} 128}
139 129
140static int drm_pci_get_irq(struct drm_device *dev)
141{
142 return dev->pdev->irq;
143}
144
145static const char *drm_pci_get_name(struct drm_device *dev)
146{
147 struct pci_driver *pdriver = dev->driver->kdriver.pci;
148 return pdriver->name;
149}
150
151static int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master) 130static int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master)
152{ 131{
153 int len, ret; 132 int len, ret;
154 struct pci_driver *pdriver = dev->driver->kdriver.pci;
155 master->unique_len = 40; 133 master->unique_len = 40;
156 master->unique_size = master->unique_len; 134 master->unique_size = master->unique_len;
157 master->unique = kmalloc(master->unique_size, GFP_KERNEL); 135 master->unique = kmalloc(master->unique_size, GFP_KERNEL);
@@ -173,29 +151,16 @@ static int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master)
173 } else 151 } else
174 master->unique_len = len; 152 master->unique_len = len;
175 153
176 dev->devname =
177 kmalloc(strlen(pdriver->name) +
178 master->unique_len + 2, GFP_KERNEL);
179
180 if (dev->devname == NULL) {
181 ret = -ENOMEM;
182 goto err;
183 }
184
185 sprintf(dev->devname, "%s@%s", pdriver->name,
186 master->unique);
187
188 return 0; 154 return 0;
189err: 155err:
190 return ret; 156 return ret;
191} 157}
192 158
193static int drm_pci_set_unique(struct drm_device *dev, 159int drm_pci_set_unique(struct drm_device *dev,
194 struct drm_master *master, 160 struct drm_master *master,
195 struct drm_unique *u) 161 struct drm_unique *u)
196{ 162{
197 int domain, bus, slot, func, ret; 163 int domain, bus, slot, func, ret;
198 const char *bus_name;
199 164
200 master->unique_len = u->unique_len; 165 master->unique_len = u->unique_len;
201 master->unique_size = u->unique_len + 1; 166 master->unique_size = u->unique_len + 1;
@@ -212,17 +177,6 @@ static int drm_pci_set_unique(struct drm_device *dev,
212 177
213 master->unique[master->unique_len] = '\0'; 178 master->unique[master->unique_len] = '\0';
214 179
215 bus_name = dev->driver->bus->get_name(dev);
216 dev->devname = kmalloc(strlen(bus_name) +
217 strlen(master->unique) + 2, GFP_KERNEL);
218 if (!dev->devname) {
219 ret = -ENOMEM;
220 goto err;
221 }
222
223 sprintf(dev->devname, "%s@%s", bus_name,
224 master->unique);
225
226 /* Return error if the busid submitted doesn't match the device's actual 180 /* Return error if the busid submitted doesn't match the device's actual
227 * busid. 181 * busid.
228 */ 182 */
@@ -247,7 +201,6 @@ err:
247 return ret; 201 return ret;
248} 202}
249 203
250
251static int drm_pci_irq_by_busid(struct drm_device *dev, struct drm_irq_busid *p) 204static int drm_pci_irq_by_busid(struct drm_device *dev, struct drm_irq_busid *p)
252{ 205{
253 if ((p->busnum >> 8) != drm_get_pci_domain(dev) || 206 if ((p->busnum >> 8) != drm_get_pci_domain(dev) ||
@@ -262,6 +215,36 @@ static int drm_pci_irq_by_busid(struct drm_device *dev, struct drm_irq_busid *p)
262 return 0; 215 return 0;
263} 216}
264 217
218/**
219 * drm_irq_by_busid - Get interrupt from bus ID
220 * @dev: DRM device
221 * @data: IOCTL parameter pointing to a drm_irq_busid structure
222 * @file_priv: DRM file private.
223 *
224 * Finds the PCI device with the specified bus id and gets its IRQ number.
225 * This IOCTL is deprecated, and will now return EINVAL for any busid not equal
226 * to that of the device that this DRM instance attached to.
227 *
228 * Return: 0 on success or a negative error code on failure.
229 */
230int drm_irq_by_busid(struct drm_device *dev, void *data,
231 struct drm_file *file_priv)
232{
233 struct drm_irq_busid *p = data;
234
235 if (drm_core_check_feature(dev, DRIVER_MODESET))
236 return -EINVAL;
237
238 /* UMS was only ever support on PCI devices. */
239 if (WARN_ON(!dev->pdev))
240 return -EINVAL;
241
242 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
243 return -EINVAL;
244
245 return drm_pci_irq_by_busid(dev, p);
246}
247
265static void drm_pci_agp_init(struct drm_device *dev) 248static void drm_pci_agp_init(struct drm_device *dev)
266{ 249{
267 if (drm_core_check_feature(dev, DRIVER_USE_AGP)) { 250 if (drm_core_check_feature(dev, DRIVER_USE_AGP)) {
@@ -287,24 +270,20 @@ void drm_pci_agp_destroy(struct drm_device *dev)
287} 270}
288 271
289static struct drm_bus drm_pci_bus = { 272static struct drm_bus drm_pci_bus = {
290 .bus_type = DRIVER_BUS_PCI,
291 .get_irq = drm_pci_get_irq,
292 .get_name = drm_pci_get_name,
293 .set_busid = drm_pci_set_busid, 273 .set_busid = drm_pci_set_busid,
294 .set_unique = drm_pci_set_unique,
295 .irq_by_busid = drm_pci_irq_by_busid,
296}; 274};
297 275
298/** 276/**
299 * Register. 277 * drm_get_pci_dev - Register a PCI device with the DRM subsystem
300 * 278 * @pdev: PCI device
301 * \param pdev - PCI device structure 279 * @ent: entry from the PCI ID table that matches @pdev
302 * \param ent entry from the PCI ID table with device type flags 280 * @driver: DRM device driver
303 * \return zero on success or a negative number on failure.
304 * 281 *
305 * Attempt to gets inter module "drm" information. If we are first 282 * Attempt to gets inter module "drm" information. If we are first
306 * then register the character device and inter module information. 283 * then register the character device and inter module information.
307 * Try and register, if we fail to register, backout previous work. 284 * Try and register, if we fail to register, backout previous work.
285 *
286 * Return: 0 on success or a negative error code on failure.
308 */ 287 */
309int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent, 288int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
310 struct drm_driver *driver) 289 struct drm_driver *driver)
@@ -357,15 +336,14 @@ err_free:
357EXPORT_SYMBOL(drm_get_pci_dev); 336EXPORT_SYMBOL(drm_get_pci_dev);
358 337
359/** 338/**
360 * PCI device initialization. Called direct from modules at load time. 339 * drm_pci_init - Register matching PCI devices with the DRM subsystem
361 * 340 * @driver: DRM device driver
362 * \return zero on success or a negative number on failure. 341 * @pdriver: PCI device driver
363 * 342 *
364 * Initializes a drm_device structures,registering the 343 * Initializes a drm_device structures, registering the stubs and initializing
365 * stubs and initializing the AGP device. 344 * the AGP device.
366 * 345 *
367 * Expands the \c DRIVER_PREINIT and \c DRIVER_POST_INIT macros before and 346 * Return: 0 on success or a negative error code on failure.
368 * after the initialization for driver customization.
369 */ 347 */
370int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver) 348int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver)
371{ 349{
@@ -375,7 +353,6 @@ int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver)
375 353
376 DRM_DEBUG("\n"); 354 DRM_DEBUG("\n");
377 355
378 driver->kdriver.pci = pdriver;
379 driver->bus = &drm_pci_bus; 356 driver->bus = &drm_pci_bus;
380 357
381 if (driver->driver_features & DRIVER_MODESET) 358 if (driver->driver_features & DRIVER_MODESET)
@@ -453,11 +430,31 @@ int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver)
453} 430}
454 431
455void drm_pci_agp_destroy(struct drm_device *dev) {} 432void drm_pci_agp_destroy(struct drm_device *dev) {}
433
434int drm_irq_by_busid(struct drm_device *dev, void *data,
435 struct drm_file *file_priv)
436{
437 return -EINVAL;
438}
439
440int drm_pci_set_unique(struct drm_device *dev,
441 struct drm_master *master,
442 struct drm_unique *u)
443{
444 return -EINVAL;
445}
456#endif 446#endif
457 447
458EXPORT_SYMBOL(drm_pci_init); 448EXPORT_SYMBOL(drm_pci_init);
459 449
460/*@}*/ 450/**
451 * drm_pci_exit - Unregister matching PCI devices from the DRM subsystem
452 * @driver: DRM device driver
453 * @pdriver: PCI device driver
454 *
455 * Unregisters one or more devices matched by a PCI driver from the DRM
456 * subsystem.
457 */
461void drm_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver) 458void drm_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver)
462{ 459{
463 struct drm_device *dev, *tmp; 460 struct drm_device *dev, *tmp;
diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c
index d2b1c03b3d71..6d133149cc74 100644
--- a/drivers/gpu/drm/drm_plane_helper.c
+++ b/drivers/gpu/drm/drm_plane_helper.c
@@ -25,7 +25,9 @@
25 25
26#include <linux/list.h> 26#include <linux/list.h>
27#include <drm/drmP.h> 27#include <drm/drmP.h>
28#include <drm/drm_plane_helper.h>
28#include <drm/drm_rect.h> 29#include <drm/drm_rect.h>
30#include <drm/drm_plane_helper.h>
29 31
30#define SUBPIXEL_MASK 0xffff 32#define SUBPIXEL_MASK 0xffff
31 33
@@ -36,9 +38,9 @@
36 * creating the primary plane. However drivers that still call 38 * creating the primary plane. However drivers that still call
37 * drm_plane_init() will use this minimal format list as the default. 39 * drm_plane_init() will use this minimal format list as the default.
38 */ 40 */
39const static uint32_t safe_modeset_formats[] = { 41static const uint32_t safe_modeset_formats[] = {
40 DRM_FORMAT_XRGB8888, 42 DRM_FORMAT_XRGB8888,
41 DRM_FORMAT_ARGB8888, 43 DRM_FORMAT_ARGB8888,
42}; 44};
43 45
44/* 46/*
@@ -54,6 +56,13 @@ static int get_connectors_for_crtc(struct drm_crtc *crtc,
54 struct drm_connector *connector; 56 struct drm_connector *connector;
55 int count = 0; 57 int count = 0;
56 58
59 /*
60 * Note: Once we change the plane hooks to more fine-grained locking we
61 * need to grab the connection_mutex here to be able to make these
62 * checks.
63 */
64 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
65
57 list_for_each_entry(connector, &dev->mode_config.connector_list, head) 66 list_for_each_entry(connector, &dev->mode_config.connector_list, head)
58 if (connector->encoder && connector->encoder->crtc == crtc) { 67 if (connector->encoder && connector->encoder->crtc == crtc) {
59 if (connector_list != NULL && count < num_connectors) 68 if (connector_list != NULL && count < num_connectors)
@@ -66,6 +75,79 @@ static int get_connectors_for_crtc(struct drm_crtc *crtc,
66} 75}
67 76
68/** 77/**
78 * drm_plane_helper_check_update() - Check plane update for validity
79 * @plane: plane object to update
80 * @crtc: owning CRTC of owning plane
81 * @fb: framebuffer to flip onto plane
82 * @src: source coordinates in 16.16 fixed point
83 * @dest: integer destination coordinates
84 * @clip: integer clipping coordinates
85 * @min_scale: minimum @src:@dest scaling factor in 16.16 fixed point
86 * @max_scale: maximum @src:@dest scaling factor in 16.16 fixed point
87 * @can_position: is it legal to position the plane such that it
88 * doesn't cover the entire crtc? This will generally
89 * only be false for primary planes.
90 * @can_update_disabled: can the plane be updated while the crtc
91 * is disabled?
92 * @visible: output parameter indicating whether plane is still visible after
93 * clipping
94 *
95 * Checks that a desired plane update is valid. Drivers that provide
96 * their own plane handling rather than helper-provided implementations may
97 * still wish to call this function to avoid duplication of error checking
98 * code.
99 *
100 * RETURNS:
101 * Zero if update appears valid, error code on failure
102 */
103int drm_plane_helper_check_update(struct drm_plane *plane,
104 struct drm_crtc *crtc,
105 struct drm_framebuffer *fb,
106 struct drm_rect *src,
107 struct drm_rect *dest,
108 const struct drm_rect *clip,
109 int min_scale,
110 int max_scale,
111 bool can_position,
112 bool can_update_disabled,
113 bool *visible)
114{
115 int hscale, vscale;
116
117 if (!crtc->enabled && !can_update_disabled) {
118 DRM_DEBUG_KMS("Cannot update plane of a disabled CRTC.\n");
119 return -EINVAL;
120 }
121
122 /* Check scaling */
123 hscale = drm_rect_calc_hscale(src, dest, min_scale, max_scale);
124 vscale = drm_rect_calc_vscale(src, dest, min_scale, max_scale);
125 if (hscale < 0 || vscale < 0) {
126 DRM_DEBUG_KMS("Invalid scaling of plane\n");
127 return -ERANGE;
128 }
129
130 *visible = drm_rect_clip_scaled(src, dest, clip, hscale, vscale);
131 if (!*visible)
132 /*
133 * Plane isn't visible; some drivers can handle this
134 * so we just return success here. Drivers that can't
135 * (including those that use the primary plane helper's
136 * update function) will return an error from their
137 * update_plane handler.
138 */
139 return 0;
140
141 if (!can_position && !drm_rect_equals(dest, clip)) {
142 DRM_DEBUG_KMS("Plane must cover entire CRTC\n");
143 return -EINVAL;
144 }
145
146 return 0;
147}
148EXPORT_SYMBOL(drm_plane_helper_check_update);
149
150/**
69 * drm_primary_helper_update() - Helper for primary plane update 151 * drm_primary_helper_update() - Helper for primary plane update
70 * @plane: plane object to update 152 * @plane: plane object to update
71 * @crtc: owning CRTC of owning plane 153 * @crtc: owning CRTC of owning plane
@@ -113,56 +195,42 @@ int drm_primary_helper_update(struct drm_plane *plane, struct drm_crtc *crtc,
113 .x = src_x >> 16, 195 .x = src_x >> 16,
114 .y = src_y >> 16, 196 .y = src_y >> 16,
115 }; 197 };
198 struct drm_rect src = {
199 .x1 = src_x,
200 .y1 = src_y,
201 .x2 = src_x + src_w,
202 .y2 = src_y + src_h,
203 };
116 struct drm_rect dest = { 204 struct drm_rect dest = {
117 .x1 = crtc_x, 205 .x1 = crtc_x,
118 .y1 = crtc_y, 206 .y1 = crtc_y,
119 .x2 = crtc_x + crtc_w, 207 .x2 = crtc_x + crtc_w,
120 .y2 = crtc_y + crtc_h, 208 .y2 = crtc_y + crtc_h,
121 }; 209 };
122 struct drm_rect clip = { 210 const struct drm_rect clip = {
123 .x2 = crtc->mode.hdisplay, 211 .x2 = crtc->mode.hdisplay,
124 .y2 = crtc->mode.vdisplay, 212 .y2 = crtc->mode.vdisplay,
125 }; 213 };
126 struct drm_connector **connector_list; 214 struct drm_connector **connector_list;
127 struct drm_framebuffer *tmpfb;
128 int num_connectors, ret; 215 int num_connectors, ret;
216 bool visible;
129 217
130 if (!crtc->enabled) { 218 ret = drm_plane_helper_check_update(plane, crtc, fb,
131 DRM_DEBUG_KMS("Cannot update primary plane of a disabled CRTC.\n"); 219 &src, &dest, &clip,
132 return -EINVAL; 220 DRM_PLANE_HELPER_NO_SCALING,
133 } 221 DRM_PLANE_HELPER_NO_SCALING,
134 222 false, false, &visible);
135 /* Disallow subpixel positioning */
136 if ((src_x | src_y | src_w | src_h) & SUBPIXEL_MASK) {
137 DRM_DEBUG_KMS("Primary plane does not support subpixel positioning\n");
138 return -EINVAL;
139 }
140
141 /* Primary planes are locked to their owning CRTC */
142 if (plane->possible_crtcs != drm_crtc_mask(crtc)) {
143 DRM_DEBUG_KMS("Cannot change primary plane CRTC\n");
144 return -EINVAL;
145 }
146
147 /* Disallow scaling */
148 if (crtc_w != src_w || crtc_h != src_h) {
149 DRM_DEBUG_KMS("Can't scale primary plane\n");
150 return -EINVAL;
151 }
152
153 /* Make sure primary plane covers entire CRTC */
154 drm_rect_intersect(&dest, &clip);
155 if (dest.x1 != 0 || dest.y1 != 0 ||
156 dest.x2 != crtc->mode.hdisplay || dest.y2 != crtc->mode.vdisplay) {
157 DRM_DEBUG_KMS("Primary plane must cover entire CRTC\n");
158 return -EINVAL;
159 }
160
161 /* Framebuffer must be big enough to cover entire plane */
162 ret = drm_crtc_check_viewport(crtc, crtc_x, crtc_y, &crtc->mode, fb);
163 if (ret) 223 if (ret)
164 return ret; 224 return ret;
165 225
226 if (!visible)
227 /*
228 * Primary plane isn't visible. Note that unless a driver
229 * provides their own disable function, this will just
230 * wind up returning -EINVAL to userspace.
231 */
232 return plane->funcs->disable_plane(plane);
233
166 /* Find current connectors for CRTC */ 234 /* Find current connectors for CRTC */
167 num_connectors = get_connectors_for_crtc(crtc, NULL, 0); 235 num_connectors = get_connectors_for_crtc(crtc, NULL, 0);
168 BUG_ON(num_connectors == 0); 236 BUG_ON(num_connectors == 0);
@@ -176,21 +244,14 @@ int drm_primary_helper_update(struct drm_plane *plane, struct drm_crtc *crtc,
176 set.num_connectors = num_connectors; 244 set.num_connectors = num_connectors;
177 245
178 /* 246 /*
179 * set_config() adjusts crtc->primary->fb; however the DRM setplane 247 * We call set_config() directly here rather than using
180 * code that called us expects to handle the framebuffer update and
181 * reference counting; save and restore the current fb before
182 * calling it.
183 *
184 * N.B., we call set_config() directly here rather than using
185 * drm_mode_set_config_internal. We're reprogramming the same 248 * drm_mode_set_config_internal. We're reprogramming the same
186 * connectors that were already in use, so we shouldn't need the extra 249 * connectors that were already in use, so we shouldn't need the extra
187 * cross-CRTC fb refcounting to accomodate stealing connectors. 250 * cross-CRTC fb refcounting to accomodate stealing connectors.
188 * drm_mode_setplane() already handles the basic refcounting for the 251 * drm_mode_setplane() already handles the basic refcounting for the
189 * framebuffers involved in this operation. 252 * framebuffers involved in this operation.
190 */ 253 */
191 tmpfb = plane->fb;
192 ret = crtc->funcs->set_config(&set); 254 ret = crtc->funcs->set_config(&set);
193 plane->fb = tmpfb;
194 255
195 kfree(connector_list); 256 kfree(connector_list);
196 return ret; 257 return ret;
@@ -232,7 +293,6 @@ EXPORT_SYMBOL(drm_primary_helper_disable);
232 */ 293 */
233void drm_primary_helper_destroy(struct drm_plane *plane) 294void drm_primary_helper_destroy(struct drm_plane *plane)
234{ 295{
235 plane->funcs->disable_plane(plane);
236 drm_plane_cleanup(plane); 296 drm_plane_cleanup(plane);
237 kfree(plane); 297 kfree(plane);
238} 298}
diff --git a/drivers/gpu/drm/drm_platform.c b/drivers/gpu/drm/drm_platform.c
index 319ff5385601..d5b76f148c12 100644
--- a/drivers/gpu/drm/drm_platform.c
+++ b/drivers/gpu/drm/drm_platform.c
@@ -68,16 +68,6 @@ err_free:
68 return ret; 68 return ret;
69} 69}
70 70
71static int drm_platform_get_irq(struct drm_device *dev)
72{
73 return platform_get_irq(dev->platformdev, 0);
74}
75
76static const char *drm_platform_get_name(struct drm_device *dev)
77{
78 return dev->platformdev->name;
79}
80
81static int drm_platform_set_busid(struct drm_device *dev, struct drm_master *master) 71static int drm_platform_set_busid(struct drm_device *dev, struct drm_master *master)
82{ 72{
83 int len, ret, id; 73 int len, ret, id;
@@ -106,46 +96,30 @@ static int drm_platform_set_busid(struct drm_device *dev, struct drm_master *mas
106 goto err; 96 goto err;
107 } 97 }
108 98
109 dev->devname =
110 kmalloc(strlen(dev->platformdev->name) +
111 master->unique_len + 2, GFP_KERNEL);
112
113 if (dev->devname == NULL) {
114 ret = -ENOMEM;
115 goto err;
116 }
117
118 sprintf(dev->devname, "%s@%s", dev->platformdev->name,
119 master->unique);
120 return 0; 99 return 0;
121err: 100err:
122 return ret; 101 return ret;
123} 102}
124 103
125static struct drm_bus drm_platform_bus = { 104static struct drm_bus drm_platform_bus = {
126 .bus_type = DRIVER_BUS_PLATFORM,
127 .get_irq = drm_platform_get_irq,
128 .get_name = drm_platform_get_name,
129 .set_busid = drm_platform_set_busid, 105 .set_busid = drm_platform_set_busid,
130}; 106};
131 107
132/** 108/**
133 * Platform device initialization. Called direct from modules. 109 * drm_platform_init - Register a platform device with the DRM subsystem
110 * @driver: DRM device driver
111 * @platform_device: platform device to register
134 * 112 *
135 * \return zero on success or a negative number on failure. 113 * Registers the specified DRM device driver and platform device with the DRM
136 * 114 * subsystem, initializing a drm_device structure and calling the driver's
137 * Initializes a drm_device structures,registering the 115 * .load() function.
138 * stubs
139 * 116 *
140 * Expands the \c DRIVER_PREINIT and \c DRIVER_POST_INIT macros before and 117 * Return: 0 on success or a negative error code on failure.
141 * after the initialization for driver customization.
142 */ 118 */
143
144int drm_platform_init(struct drm_driver *driver, struct platform_device *platform_device) 119int drm_platform_init(struct drm_driver *driver, struct platform_device *platform_device)
145{ 120{
146 DRM_DEBUG("\n"); 121 DRM_DEBUG("\n");
147 122
148 driver->kdriver.platform_device = platform_device;
149 driver->bus = &drm_platform_bus; 123 driver->bus = &drm_platform_bus;
150 return drm_get_platform_dev(platform_device, driver); 124 return drm_get_platform_dev(platform_device, driver);
151} 125}
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index e70f54d4a581..d22676b89cbb 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -82,26 +82,8 @@ static void drm_mode_validate_flag(struct drm_connector *connector,
82 return; 82 return;
83} 83}
84 84
85/** 85static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connector *connector,
86 * drm_helper_probe_single_connector_modes - get complete set of display modes 86 uint32_t maxX, uint32_t maxY, bool merge_type_bits)
87 * @connector: connector to probe
88 * @maxX: max width for modes
89 * @maxY: max height for modes
90 *
91 * Based on the helper callbacks implemented by @connector try to detect all
92 * valid modes. Modes will first be added to the connector's probed_modes list,
93 * then culled (based on validity and the @maxX, @maxY parameters) and put into
94 * the normal modes list.
95 *
96 * Intended to be use as a generic implementation of the ->fill_modes()
97 * @connector vfunc for drivers that use the crtc helpers for output mode
98 * filtering and detection.
99 *
100 * Returns:
101 * The number of modes found on @connector.
102 */
103int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
104 uint32_t maxX, uint32_t maxY)
105{ 87{
106 struct drm_device *dev = connector->dev; 88 struct drm_device *dev = connector->dev;
107 struct drm_display_mode *mode; 89 struct drm_display_mode *mode;
@@ -114,7 +96,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
114 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); 96 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
115 97
116 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, 98 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id,
117 drm_get_connector_name(connector)); 99 connector->name);
118 /* set all modes to the unverified state */ 100 /* set all modes to the unverified state */
119 list_for_each_entry(mode, &connector->modes, head) 101 list_for_each_entry(mode, &connector->modes, head)
120 mode->status = MODE_UNVERIFIED; 102 mode->status = MODE_UNVERIFIED;
@@ -138,7 +120,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
138 120
139 if (connector->status == connector_status_disconnected) { 121 if (connector->status == connector_status_disconnected) {
140 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n", 122 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n",
141 connector->base.id, drm_get_connector_name(connector)); 123 connector->base.id, connector->name);
142 drm_mode_connector_update_edid_property(connector, NULL); 124 drm_mode_connector_update_edid_property(connector, NULL);
143 verbose_prune = false; 125 verbose_prune = false;
144 goto prune; 126 goto prune;
@@ -155,7 +137,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
155 if (count == 0) 137 if (count == 0)
156 goto prune; 138 goto prune;
157 139
158 drm_mode_connector_list_update(connector); 140 drm_mode_connector_list_update(connector, merge_type_bits);
159 141
160 if (maxX && maxY) 142 if (maxX && maxY)
161 drm_mode_validate_size(dev, &connector->modes, maxX, maxY); 143 drm_mode_validate_size(dev, &connector->modes, maxX, maxY);
@@ -169,7 +151,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
169 drm_mode_validate_flag(connector, mode_flags); 151 drm_mode_validate_flag(connector, mode_flags);
170 152
171 list_for_each_entry(mode, &connector->modes, head) { 153 list_for_each_entry(mode, &connector->modes, head) {
172 if (mode->status == MODE_OK) 154 if (mode->status == MODE_OK && connector_funcs->mode_valid)
173 mode->status = connector_funcs->mode_valid(connector, 155 mode->status = connector_funcs->mode_valid(connector,
174 mode); 156 mode);
175 } 157 }
@@ -186,7 +168,7 @@ prune:
186 drm_mode_sort(&connector->modes); 168 drm_mode_sort(&connector->modes);
187 169
188 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] probed modes :\n", connector->base.id, 170 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] probed modes :\n", connector->base.id,
189 drm_get_connector_name(connector)); 171 connector->name);
190 list_for_each_entry(mode, &connector->modes, head) { 172 list_for_each_entry(mode, &connector->modes, head) {
191 drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V); 173 drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
192 drm_mode_debug_printmodeline(mode); 174 drm_mode_debug_printmodeline(mode);
@@ -194,9 +176,49 @@ prune:
194 176
195 return count; 177 return count;
196} 178}
179
180/**
181 * drm_helper_probe_single_connector_modes - get complete set of display modes
182 * @connector: connector to probe
183 * @maxX: max width for modes
184 * @maxY: max height for modes
185 *
186 * Based on the helper callbacks implemented by @connector try to detect all
187 * valid modes. Modes will first be added to the connector's probed_modes list,
188 * then culled (based on validity and the @maxX, @maxY parameters) and put into
189 * the normal modes list.
190 *
191 * Intended to be use as a generic implementation of the ->fill_modes()
192 * @connector vfunc for drivers that use the crtc helpers for output mode
193 * filtering and detection.
194 *
195 * Returns:
196 * The number of modes found on @connector.
197 */
198int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
199 uint32_t maxX, uint32_t maxY)
200{
201 return drm_helper_probe_single_connector_modes_merge_bits(connector, maxX, maxY, true);
202}
197EXPORT_SYMBOL(drm_helper_probe_single_connector_modes); 203EXPORT_SYMBOL(drm_helper_probe_single_connector_modes);
198 204
199/** 205/**
206 * drm_helper_probe_single_connector_modes_nomerge - get complete set of display modes
207 * @connector: connector to probe
208 * @maxX: max width for modes
209 * @maxY: max height for modes
210 *
211 * This operates like drm_hehlper_probe_single_connector_modes except it
212 * replaces the mode bits instead of merging them for preferred modes.
213 */
214int drm_helper_probe_single_connector_modes_nomerge(struct drm_connector *connector,
215 uint32_t maxX, uint32_t maxY)
216{
217 return drm_helper_probe_single_connector_modes_merge_bits(connector, maxX, maxY, false);
218}
219EXPORT_SYMBOL(drm_helper_probe_single_connector_modes_nomerge);
220
221/**
200 * drm_kms_helper_hotplug_event - fire off KMS hotplug events 222 * drm_kms_helper_hotplug_event - fire off KMS hotplug events
201 * @dev: drm_device whose connector state changed 223 * @dev: drm_device whose connector state changed
202 * 224 *
@@ -264,7 +286,7 @@ static void output_poll_execute(struct work_struct *work)
264 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] " 286 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] "
265 "status updated from %s to %s\n", 287 "status updated from %s to %s\n",
266 connector->base.id, 288 connector->base.id,
267 drm_get_connector_name(connector), 289 connector->name,
268 old, new); 290 old, new);
269 291
270 changed = true; 292 changed = true;
@@ -409,7 +431,7 @@ bool drm_helper_hpd_irq_event(struct drm_device *dev)
409 connector->status = connector->funcs->detect(connector, false); 431 connector->status = connector->funcs->detect(connector, false);
410 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n", 432 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
411 connector->base.id, 433 connector->base.id,
412 drm_get_connector_name(connector), 434 connector->name,
413 drm_get_connector_status_name(old_status), 435 drm_get_connector_status_name(old_status),
414 drm_get_connector_status_name(connector->status)); 436 drm_get_connector_status_name(connector->status));
415 if (old_status != connector->status) 437 if (old_status != connector->status)
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index 4c24c3ac1efa..14d16464000a 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -1,16 +1,11 @@
1/**
2 * \file drm_stub.h
3 * Stub support
4 *
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 */
7
8/* 1/*
9 * Created: Fri Jan 19 10:48:35 2001 by faith@acm.org 2 * Created: Fri Jan 19 10:48:35 2001 by faith@acm.org
10 * 3 *
11 * Copyright 2001 VA Linux Systems, Inc., Sunnyvale, California. 4 * Copyright 2001 VA Linux Systems, Inc., Sunnyvale, California.
12 * All Rights Reserved. 5 * All Rights Reserved.
13 * 6 *
7 * Author Rickard E. (Rik) Faith <faith@valinux.com>
8 *
14 * Permission is hereby granted, free of charge, to any person obtaining a 9 * Permission is hereby granted, free of charge, to any person obtaining a
15 * copy of this software and associated documentation files (the "Software"), 10 * copy of this software and associated documentation files (the "Software"),
16 * to deal in the Software without restriction, including without limitation 11 * to deal in the Software without restriction, including without limitation
@@ -128,7 +123,10 @@ struct drm_master *drm_master_create(struct drm_minor *minor)
128 kref_init(&master->refcount); 123 kref_init(&master->refcount);
129 spin_lock_init(&master->lock.spinlock); 124 spin_lock_init(&master->lock.spinlock);
130 init_waitqueue_head(&master->lock.lock_queue); 125 init_waitqueue_head(&master->lock.lock_queue);
131 drm_ht_create(&master->magiclist, DRM_MAGIC_HASH_ORDER); 126 if (drm_ht_create(&master->magiclist, DRM_MAGIC_HASH_ORDER)) {
127 kfree(master);
128 return NULL;
129 }
132 INIT_LIST_HEAD(&master->magicfree); 130 INIT_LIST_HEAD(&master->magicfree);
133 master->minor = minor; 131 master->minor = minor;
134 132
@@ -166,9 +164,6 @@ static void drm_master_destroy(struct kref *kref)
166 master->unique_len = 0; 164 master->unique_len = 0;
167 } 165 }
168 166
169 kfree(dev->devname);
170 dev->devname = NULL;
171
172 list_for_each_entry_safe(pt, next, &master->magicfree, head) { 167 list_for_each_entry_safe(pt, next, &master->magicfree, head) {
173 list_del(&pt->head); 168 list_del(&pt->head);
174 drm_ht_remove_item(&master->magiclist, &pt->hash_item); 169 drm_ht_remove_item(&master->magiclist, &pt->hash_item);
@@ -294,6 +289,7 @@ static void drm_minor_free(struct drm_device *dev, unsigned int type)
294 289
295 slot = drm_minor_get_slot(dev, type); 290 slot = drm_minor_get_slot(dev, type);
296 if (*slot) { 291 if (*slot) {
292 drm_mode_group_destroy(&(*slot)->mode_group);
297 kfree(*slot); 293 kfree(*slot);
298 *slot = NULL; 294 *slot = NULL;
299 } 295 }
@@ -424,11 +420,15 @@ void drm_minor_release(struct drm_minor *minor)
424} 420}
425 421
426/** 422/**
427 * Called via drm_exit() at module unload time or when pci device is 423 * drm_put_dev - Unregister and release a DRM device
428 * unplugged. 424 * @dev: DRM device
429 * 425 *
430 * Cleans up all DRM device, calling drm_lastclose(). 426 * Called at module unload time or when a PCI device is unplugged.
431 * 427 *
428 * Use of this function is discouraged. It will eventually go away completely.
429 * Please use drm_dev_unregister() and drm_dev_unref() explicitly instead.
430 *
431 * Cleans up all DRM device, calling drm_lastclose().
432 */ 432 */
433void drm_put_dev(struct drm_device *dev) 433void drm_put_dev(struct drm_device *dev)
434{ 434{
@@ -535,7 +535,7 @@ static void drm_fs_inode_free(struct inode *inode)
535} 535}
536 536
537/** 537/**
538 * drm_dev_alloc - Allocate new drm device 538 * drm_dev_alloc - Allocate new DRM device
539 * @driver: DRM driver to allocate device for 539 * @driver: DRM driver to allocate device for
540 * @parent: Parent device object 540 * @parent: Parent device object
541 * 541 *
@@ -569,7 +569,7 @@ struct drm_device *drm_dev_alloc(struct drm_driver *driver,
569 INIT_LIST_HEAD(&dev->maplist); 569 INIT_LIST_HEAD(&dev->maplist);
570 INIT_LIST_HEAD(&dev->vblank_event_list); 570 INIT_LIST_HEAD(&dev->vblank_event_list);
571 571
572 spin_lock_init(&dev->count_lock); 572 spin_lock_init(&dev->buf_lock);
573 spin_lock_init(&dev->event_lock); 573 spin_lock_init(&dev->event_lock);
574 mutex_init(&dev->struct_mutex); 574 mutex_init(&dev->struct_mutex);
575 mutex_init(&dev->ctxlist_mutex); 575 mutex_init(&dev->ctxlist_mutex);
@@ -648,9 +648,8 @@ static void drm_dev_release(struct kref *ref)
648 drm_minor_free(dev, DRM_MINOR_RENDER); 648 drm_minor_free(dev, DRM_MINOR_RENDER);
649 drm_minor_free(dev, DRM_MINOR_CONTROL); 649 drm_minor_free(dev, DRM_MINOR_CONTROL);
650 650
651 kfree(dev->devname);
652
653 mutex_destroy(&dev->master_mutex); 651 mutex_destroy(&dev->master_mutex);
652 kfree(dev->unique);
654 kfree(dev); 653 kfree(dev);
655} 654}
656 655
@@ -690,6 +689,7 @@ EXPORT_SYMBOL(drm_dev_unref);
690/** 689/**
691 * drm_dev_register - Register DRM device 690 * drm_dev_register - Register DRM device
692 * @dev: Device to register 691 * @dev: Device to register
692 * @flags: Flags passed to the driver's .load() function
693 * 693 *
694 * Register the DRM device @dev with the system, advertise device to user-space 694 * Register the DRM device @dev with the system, advertise device to user-space
695 * and start normal device operation. @dev must be allocated via drm_dev_alloc() 695 * and start normal device operation. @dev must be allocated via drm_dev_alloc()
@@ -778,3 +778,28 @@ void drm_dev_unregister(struct drm_device *dev)
778 drm_minor_unregister(dev, DRM_MINOR_CONTROL); 778 drm_minor_unregister(dev, DRM_MINOR_CONTROL);
779} 779}
780EXPORT_SYMBOL(drm_dev_unregister); 780EXPORT_SYMBOL(drm_dev_unregister);
781
782/**
783 * drm_dev_set_unique - Set the unique name of a DRM device
784 * @dev: device of which to set the unique name
785 * @fmt: format string for unique name
786 *
787 * Sets the unique name of a DRM device using the specified format string and
788 * a variable list of arguments. Drivers can use this at driver probe time if
789 * the unique name of the devices they drive is static.
790 *
791 * Return: 0 on success or a negative error code on failure.
792 */
793int drm_dev_set_unique(struct drm_device *dev, const char *fmt, ...)
794{
795 va_list ap;
796
797 kfree(dev->unique);
798
799 va_start(ap, fmt);
800 dev->unique = kvasprintf(GFP_KERNEL, fmt, ap);
801 va_end(ap);
802
803 return dev->unique ? 0 : -ENOMEM;
804}
805EXPORT_SYMBOL(drm_dev_set_unique);
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index c22c3097c3e8..369b26278e76 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -380,9 +380,9 @@ int drm_sysfs_connector_add(struct drm_connector *connector)
380 380
381 connector->kdev = device_create(drm_class, dev->primary->kdev, 381 connector->kdev = device_create(drm_class, dev->primary->kdev,
382 0, connector, "card%d-%s", 382 0, connector, "card%d-%s",
383 dev->primary->index, drm_get_connector_name(connector)); 383 dev->primary->index, connector->name);
384 DRM_DEBUG("adding \"%s\" to sysfs\n", 384 DRM_DEBUG("adding \"%s\" to sysfs\n",
385 drm_get_connector_name(connector)); 385 connector->name);
386 386
387 if (IS_ERR(connector->kdev)) { 387 if (IS_ERR(connector->kdev)) {
388 DRM_ERROR("failed to register connector device: %ld\n", PTR_ERR(connector->kdev)); 388 DRM_ERROR("failed to register connector device: %ld\n", PTR_ERR(connector->kdev));
@@ -460,7 +460,7 @@ void drm_sysfs_connector_remove(struct drm_connector *connector)
460 if (!connector->kdev) 460 if (!connector->kdev)
461 return; 461 return;
462 DRM_DEBUG("removing \"%s\" from sysfs\n", 462 DRM_DEBUG("removing \"%s\" from sysfs\n",
463 drm_get_connector_name(connector)); 463 connector->name);
464 464
465 for (i = 0; i < ARRAY_SIZE(connector_attrs); i++) 465 for (i = 0; i < ARRAY_SIZE(connector_attrs); i++)
466 device_remove_file(connector->kdev, &connector_attrs[i]); 466 device_remove_file(connector->kdev, &connector_attrs[i]);
diff --git a/drivers/gpu/drm/drm_usb.c b/drivers/gpu/drm/drm_usb.c
index c3406aad2944..f2fe94aab901 100644
--- a/drivers/gpu/drm/drm_usb.c
+++ b/drivers/gpu/drm/drm_usb.c
@@ -36,16 +36,6 @@ err_free:
36} 36}
37EXPORT_SYMBOL(drm_get_usb_dev); 37EXPORT_SYMBOL(drm_get_usb_dev);
38 38
39static int drm_usb_get_irq(struct drm_device *dev)
40{
41 return 0;
42}
43
44static const char *drm_usb_get_name(struct drm_device *dev)
45{
46 return "USB";
47}
48
49static int drm_usb_set_busid(struct drm_device *dev, 39static int drm_usb_set_busid(struct drm_device *dev,
50 struct drm_master *master) 40 struct drm_master *master)
51{ 41{
@@ -53,18 +43,24 @@ static int drm_usb_set_busid(struct drm_device *dev,
53} 43}
54 44
55static struct drm_bus drm_usb_bus = { 45static struct drm_bus drm_usb_bus = {
56 .bus_type = DRIVER_BUS_USB,
57 .get_irq = drm_usb_get_irq,
58 .get_name = drm_usb_get_name,
59 .set_busid = drm_usb_set_busid, 46 .set_busid = drm_usb_set_busid,
60}; 47};
61 48
49/**
50 * drm_usb_init - Register matching USB devices with the DRM subsystem
51 * @driver: DRM device driver
52 * @udriver: USB device driver
53 *
54 * Registers one or more devices matched by a USB driver with the DRM
55 * subsystem.
56 *
57 * Return: 0 on success or a negative error code on failure.
58 */
62int drm_usb_init(struct drm_driver *driver, struct usb_driver *udriver) 59int drm_usb_init(struct drm_driver *driver, struct usb_driver *udriver)
63{ 60{
64 int res; 61 int res;
65 DRM_DEBUG("\n"); 62 DRM_DEBUG("\n");
66 63
67 driver->kdriver.usb = udriver;
68 driver->bus = &drm_usb_bus; 64 driver->bus = &drm_usb_bus;
69 65
70 res = usb_register(udriver); 66 res = usb_register(udriver);
@@ -72,6 +68,14 @@ int drm_usb_init(struct drm_driver *driver, struct usb_driver *udriver)
72} 68}
73EXPORT_SYMBOL(drm_usb_init); 69EXPORT_SYMBOL(drm_usb_init);
74 70
71/**
72 * drm_usb_exit - Unregister matching USB devices from the DRM subsystem
73 * @driver: DRM device driver
74 * @udriver: USB device driver
75 *
76 * Unregisters one or more devices matched by a USB driver from the DRM
77 * subsystem.
78 */
75void drm_usb_exit(struct drm_driver *driver, 79void drm_usb_exit(struct drm_driver *driver,
76 struct usb_driver *udriver) 80 struct usb_driver *udriver)
77{ 81{
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 5bf5bca94f56..178d2a9672a8 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -26,14 +26,14 @@ config DRM_EXYNOS_DMABUF
26 26
27config DRM_EXYNOS_FIMD 27config DRM_EXYNOS_FIMD
28 bool "Exynos DRM FIMD" 28 bool "Exynos DRM FIMD"
29 depends on DRM_EXYNOS && !FB_S3C && !ARCH_MULTIPLATFORM 29 depends on DRM_EXYNOS && !FB_S3C
30 select FB_MODE_HELPERS 30 select FB_MODE_HELPERS
31 help 31 help
32 Choose this option if you want to use Exynos FIMD for DRM. 32 Choose this option if you want to use Exynos FIMD for DRM.
33 33
34config DRM_EXYNOS_DPI 34config DRM_EXYNOS_DPI
35 bool "EXYNOS DRM parallel output support" 35 bool "EXYNOS DRM parallel output support"
36 depends on DRM_EXYNOS 36 depends on DRM_EXYNOS_FIMD
37 select DRM_PANEL 37 select DRM_PANEL
38 default n 38 default n
39 help 39 help
@@ -41,7 +41,7 @@ config DRM_EXYNOS_DPI
41 41
42config DRM_EXYNOS_DSI 42config DRM_EXYNOS_DSI
43 bool "EXYNOS DRM MIPI-DSI driver support" 43 bool "EXYNOS DRM MIPI-DSI driver support"
44 depends on DRM_EXYNOS 44 depends on DRM_EXYNOS_FIMD
45 select DRM_MIPI_DSI 45 select DRM_MIPI_DSI
46 select DRM_PANEL 46 select DRM_PANEL
47 default n 47 default n
@@ -50,7 +50,7 @@ config DRM_EXYNOS_DSI
50 50
51config DRM_EXYNOS_DP 51config DRM_EXYNOS_DP
52 bool "EXYNOS DRM DP driver support" 52 bool "EXYNOS DRM DP driver support"
53 depends on DRM_EXYNOS && ARCH_EXYNOS 53 depends on DRM_EXYNOS_FIMD && ARCH_EXYNOS && (DRM_PTN3460=n || DRM_PTN3460=y || DRM_PTN3460=DRM_EXYNOS)
54 default DRM_EXYNOS 54 default DRM_EXYNOS
55 help 55 help
56 This enables support for DP device. 56 This enables support for DP device.
diff --git a/drivers/gpu/drm/exynos/exynos_ddc.c b/drivers/gpu/drm/exynos/exynos_ddc.c
deleted file mode 100644
index 6a8c84e7c839..000000000000
--- a/drivers/gpu/drm/exynos/exynos_ddc.c
+++ /dev/null
@@ -1,63 +0,0 @@
1/*
2 * Copyright (C) 2011 Samsung Electronics Co.Ltd
3 * Authors:
4 * Seung-Woo Kim <sw0312.kim@samsung.com>
5 * Inki Dae <inki.dae@samsung.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; either version 2 of the License, or (at your
10 * option) any later version.
11 *
12 */
13
14#include <drm/drmP.h>
15
16#include <linux/kernel.h>
17#include <linux/i2c.h>
18#include <linux/of.h>
19
20#include "exynos_drm_drv.h"
21#include "exynos_hdmi.h"
22
23static int s5p_ddc_probe(struct i2c_client *client,
24 const struct i2c_device_id *dev_id)
25{
26 hdmi_attach_ddc_client(client);
27
28 dev_info(&client->adapter->dev,
29 "attached %s into i2c adapter successfully\n",
30 client->name);
31
32 return 0;
33}
34
35static int s5p_ddc_remove(struct i2c_client *client)
36{
37 dev_info(&client->adapter->dev,
38 "detached %s from i2c adapter successfully\n",
39 client->name);
40
41 return 0;
42}
43
44static struct of_device_id hdmiddc_match_types[] = {
45 {
46 .compatible = "samsung,exynos5-hdmiddc",
47 }, {
48 .compatible = "samsung,exynos4210-hdmiddc",
49 }, {
50 /* end node */
51 }
52};
53
54struct i2c_driver ddc_driver = {
55 .driver = {
56 .name = "exynos-hdmiddc",
57 .owner = THIS_MODULE,
58 .of_match_table = hdmiddc_match_types,
59 },
60 .probe = s5p_ddc_probe,
61 .remove = s5p_ddc_remove,
62 .command = NULL,
63};
diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.c b/drivers/gpu/drm/exynos/exynos_dp_core.c
index aed533bbfd31..a8ffc8c1477b 100644
--- a/drivers/gpu/drm/exynos/exynos_dp_core.c
+++ b/drivers/gpu/drm/exynos/exynos_dp_core.c
@@ -18,6 +18,9 @@
18#include <linux/interrupt.h> 18#include <linux/interrupt.h>
19#include <linux/delay.h> 19#include <linux/delay.h>
20#include <linux/of.h> 20#include <linux/of.h>
21#include <linux/of_gpio.h>
22#include <linux/gpio.h>
23#include <linux/component.h>
21#include <linux/phy/phy.h> 24#include <linux/phy/phy.h>
22#include <video/of_display_timing.h> 25#include <video/of_display_timing.h>
23#include <video/of_videomode.h> 26#include <video/of_videomode.h>
@@ -141,15 +144,15 @@ static int exynos_dp_read_edid(struct exynos_dp_device *dp)
141 return -EIO; 144 return -EIO;
142 } 145 }
143 146
144 exynos_dp_read_byte_from_dpcd(dp, DPCD_ADDR_TEST_REQUEST, 147 exynos_dp_read_byte_from_dpcd(dp, DP_TEST_REQUEST,
145 &test_vector); 148 &test_vector);
146 if (test_vector & DPCD_TEST_EDID_READ) { 149 if (test_vector & DP_TEST_LINK_EDID_READ) {
147 exynos_dp_write_byte_to_dpcd(dp, 150 exynos_dp_write_byte_to_dpcd(dp,
148 DPCD_ADDR_TEST_EDID_CHECKSUM, 151 DP_TEST_EDID_CHECKSUM,
149 edid[EDID_BLOCK_LENGTH + EDID_CHECKSUM]); 152 edid[EDID_BLOCK_LENGTH + EDID_CHECKSUM]);
150 exynos_dp_write_byte_to_dpcd(dp, 153 exynos_dp_write_byte_to_dpcd(dp,
151 DPCD_ADDR_TEST_RESPONSE, 154 DP_TEST_RESPONSE,
152 DPCD_TEST_EDID_CHECKSUM_WRITE); 155 DP_TEST_EDID_CHECKSUM_WRITE);
153 } 156 }
154 } else { 157 } else {
155 dev_info(dp->dev, "EDID data does not include any extensions.\n"); 158 dev_info(dp->dev, "EDID data does not include any extensions.\n");
@@ -171,15 +174,15 @@ static int exynos_dp_read_edid(struct exynos_dp_device *dp)
171 } 174 }
172 175
173 exynos_dp_read_byte_from_dpcd(dp, 176 exynos_dp_read_byte_from_dpcd(dp,
174 DPCD_ADDR_TEST_REQUEST, 177 DP_TEST_REQUEST,
175 &test_vector); 178 &test_vector);
176 if (test_vector & DPCD_TEST_EDID_READ) { 179 if (test_vector & DP_TEST_LINK_EDID_READ) {
177 exynos_dp_write_byte_to_dpcd(dp, 180 exynos_dp_write_byte_to_dpcd(dp,
178 DPCD_ADDR_TEST_EDID_CHECKSUM, 181 DP_TEST_EDID_CHECKSUM,
179 edid[EDID_CHECKSUM]); 182 edid[EDID_CHECKSUM]);
180 exynos_dp_write_byte_to_dpcd(dp, 183 exynos_dp_write_byte_to_dpcd(dp,
181 DPCD_ADDR_TEST_RESPONSE, 184 DP_TEST_RESPONSE,
182 DPCD_TEST_EDID_CHECKSUM_WRITE); 185 DP_TEST_EDID_CHECKSUM_WRITE);
183 } 186 }
184 } 187 }
185 188
@@ -193,8 +196,8 @@ static int exynos_dp_handle_edid(struct exynos_dp_device *dp)
193 int i; 196 int i;
194 int retval; 197 int retval;
195 198
196 /* Read DPCD DPCD_ADDR_DPCD_REV~RECEIVE_PORT1_CAP_1 */ 199 /* Read DPCD DP_DPCD_REV~RECEIVE_PORT1_CAP_1 */
197 retval = exynos_dp_read_bytes_from_dpcd(dp, DPCD_ADDR_DPCD_REV, 200 retval = exynos_dp_read_bytes_from_dpcd(dp, DP_DPCD_REV,
198 12, buf); 201 12, buf);
199 if (retval) 202 if (retval)
200 return retval; 203 return retval;
@@ -214,14 +217,14 @@ static void exynos_dp_enable_rx_to_enhanced_mode(struct exynos_dp_device *dp,
214{ 217{
215 u8 data; 218 u8 data;
216 219
217 exynos_dp_read_byte_from_dpcd(dp, DPCD_ADDR_LANE_COUNT_SET, &data); 220 exynos_dp_read_byte_from_dpcd(dp, DP_LANE_COUNT_SET, &data);
218 221
219 if (enable) 222 if (enable)
220 exynos_dp_write_byte_to_dpcd(dp, DPCD_ADDR_LANE_COUNT_SET, 223 exynos_dp_write_byte_to_dpcd(dp, DP_LANE_COUNT_SET,
221 DPCD_ENHANCED_FRAME_EN | 224 DP_LANE_COUNT_ENHANCED_FRAME_EN |
222 DPCD_LANE_COUNT_SET(data)); 225 DPCD_LANE_COUNT_SET(data));
223 else 226 else
224 exynos_dp_write_byte_to_dpcd(dp, DPCD_ADDR_LANE_COUNT_SET, 227 exynos_dp_write_byte_to_dpcd(dp, DP_LANE_COUNT_SET,
225 DPCD_LANE_COUNT_SET(data)); 228 DPCD_LANE_COUNT_SET(data));
226} 229}
227 230
@@ -230,7 +233,7 @@ static int exynos_dp_is_enhanced_mode_available(struct exynos_dp_device *dp)
230 u8 data; 233 u8 data;
231 int retval; 234 int retval;
232 235
233 exynos_dp_read_byte_from_dpcd(dp, DPCD_ADDR_MAX_LANE_COUNT, &data); 236 exynos_dp_read_byte_from_dpcd(dp, DP_MAX_LANE_COUNT, &data);
234 retval = DPCD_ENHANCED_FRAME_CAP(data); 237 retval = DPCD_ENHANCED_FRAME_CAP(data);
235 238
236 return retval; 239 return retval;
@@ -250,8 +253,8 @@ static void exynos_dp_training_pattern_dis(struct exynos_dp_device *dp)
250 exynos_dp_set_training_pattern(dp, DP_NONE); 253 exynos_dp_set_training_pattern(dp, DP_NONE);
251 254
252 exynos_dp_write_byte_to_dpcd(dp, 255 exynos_dp_write_byte_to_dpcd(dp,
253 DPCD_ADDR_TRAINING_PATTERN_SET, 256 DP_TRAINING_PATTERN_SET,
254 DPCD_TRAINING_PATTERN_DISABLED); 257 DP_TRAINING_PATTERN_DISABLE);
255} 258}
256 259
257static void exynos_dp_set_lane_lane_pre_emphasis(struct exynos_dp_device *dp, 260static void exynos_dp_set_lane_lane_pre_emphasis(struct exynos_dp_device *dp,
@@ -295,7 +298,7 @@ static int exynos_dp_link_start(struct exynos_dp_device *dp)
295 /* Setup RX configuration */ 298 /* Setup RX configuration */
296 buf[0] = dp->link_train.link_rate; 299 buf[0] = dp->link_train.link_rate;
297 buf[1] = dp->link_train.lane_count; 300 buf[1] = dp->link_train.lane_count;
298 retval = exynos_dp_write_bytes_to_dpcd(dp, DPCD_ADDR_LINK_BW_SET, 301 retval = exynos_dp_write_bytes_to_dpcd(dp, DP_LINK_BW_SET,
299 2, buf); 302 2, buf);
300 if (retval) 303 if (retval)
301 return retval; 304 return retval;
@@ -322,16 +325,16 @@ static int exynos_dp_link_start(struct exynos_dp_device *dp)
322 325
323 /* Set RX training pattern */ 326 /* Set RX training pattern */
324 retval = exynos_dp_write_byte_to_dpcd(dp, 327 retval = exynos_dp_write_byte_to_dpcd(dp,
325 DPCD_ADDR_TRAINING_PATTERN_SET, 328 DP_TRAINING_PATTERN_SET,
326 DPCD_SCRAMBLING_DISABLED | DPCD_TRAINING_PATTERN_1); 329 DP_LINK_SCRAMBLING_DISABLE | DP_TRAINING_PATTERN_1);
327 if (retval) 330 if (retval)
328 return retval; 331 return retval;
329 332
330 for (lane = 0; lane < lane_count; lane++) 333 for (lane = 0; lane < lane_count; lane++)
331 buf[lane] = DPCD_PRE_EMPHASIS_PATTERN2_LEVEL0 | 334 buf[lane] = DP_TRAIN_PRE_EMPHASIS_0 |
332 DPCD_VOLTAGE_SWING_PATTERN1_LEVEL0; 335 DP_TRAIN_VOLTAGE_SWING_400;
333 336
334 retval = exynos_dp_write_bytes_to_dpcd(dp, DPCD_ADDR_TRAINING_LANE0_SET, 337 retval = exynos_dp_write_bytes_to_dpcd(dp, DP_TRAINING_LANE0_SET,
335 lane_count, buf); 338 lane_count, buf);
336 339
337 return retval; 340 return retval;
@@ -352,7 +355,7 @@ static int exynos_dp_clock_recovery_ok(u8 link_status[2], int lane_count)
352 355
353 for (lane = 0; lane < lane_count; lane++) { 356 for (lane = 0; lane < lane_count; lane++) {
354 lane_status = exynos_dp_get_lane_status(link_status, lane); 357 lane_status = exynos_dp_get_lane_status(link_status, lane);
355 if ((lane_status & DPCD_LANE_CR_DONE) == 0) 358 if ((lane_status & DP_LANE_CR_DONE) == 0)
356 return -EINVAL; 359 return -EINVAL;
357 } 360 }
358 return 0; 361 return 0;
@@ -364,13 +367,13 @@ static int exynos_dp_channel_eq_ok(u8 link_status[2], u8 link_align,
364 int lane; 367 int lane;
365 u8 lane_status; 368 u8 lane_status;
366 369
367 if ((link_align & DPCD_INTERLANE_ALIGN_DONE) == 0) 370 if ((link_align & DP_INTERLANE_ALIGN_DONE) == 0)
368 return -EINVAL; 371 return -EINVAL;
369 372
370 for (lane = 0; lane < lane_count; lane++) { 373 for (lane = 0; lane < lane_count; lane++) {
371 lane_status = exynos_dp_get_lane_status(link_status, lane); 374 lane_status = exynos_dp_get_lane_status(link_status, lane);
372 lane_status &= DPCD_CHANNEL_EQ_BITS; 375 lane_status &= DP_CHANNEL_EQ_BITS;
373 if (lane_status != DPCD_CHANNEL_EQ_BITS) 376 if (lane_status != DP_CHANNEL_EQ_BITS)
374 return -EINVAL; 377 return -EINVAL;
375 } 378 }
376 379
@@ -468,9 +471,9 @@ static void exynos_dp_get_adjust_training_lane(struct exynos_dp_device *dp,
468 DPCD_PRE_EMPHASIS_SET(pre_emphasis); 471 DPCD_PRE_EMPHASIS_SET(pre_emphasis);
469 472
470 if (voltage_swing == VOLTAGE_LEVEL_3) 473 if (voltage_swing == VOLTAGE_LEVEL_3)
471 training_lane |= DPCD_MAX_SWING_REACHED; 474 training_lane |= DP_TRAIN_MAX_SWING_REACHED;
472 if (pre_emphasis == PRE_EMPHASIS_LEVEL_3) 475 if (pre_emphasis == PRE_EMPHASIS_LEVEL_3)
473 training_lane |= DPCD_MAX_PRE_EMPHASIS_REACHED; 476 training_lane |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
474 477
475 dp->link_train.training_lane[lane] = training_lane; 478 dp->link_train.training_lane[lane] = training_lane;
476 } 479 }
@@ -487,12 +490,12 @@ static int exynos_dp_process_clock_recovery(struct exynos_dp_device *dp)
487 lane_count = dp->link_train.lane_count; 490 lane_count = dp->link_train.lane_count;
488 491
489 retval = exynos_dp_read_bytes_from_dpcd(dp, 492 retval = exynos_dp_read_bytes_from_dpcd(dp,
490 DPCD_ADDR_LANE0_1_STATUS, 2, link_status); 493 DP_LANE0_1_STATUS, 2, link_status);
491 if (retval) 494 if (retval)
492 return retval; 495 return retval;
493 496
494 retval = exynos_dp_read_bytes_from_dpcd(dp, 497 retval = exynos_dp_read_bytes_from_dpcd(dp,
495 DPCD_ADDR_ADJUST_REQUEST_LANE0_1, 2, adjust_request); 498 DP_ADJUST_REQUEST_LANE0_1, 2, adjust_request);
496 if (retval) 499 if (retval)
497 return retval; 500 return retval;
498 501
@@ -501,9 +504,9 @@ static int exynos_dp_process_clock_recovery(struct exynos_dp_device *dp)
501 exynos_dp_set_training_pattern(dp, TRAINING_PTN2); 504 exynos_dp_set_training_pattern(dp, TRAINING_PTN2);
502 505
503 retval = exynos_dp_write_byte_to_dpcd(dp, 506 retval = exynos_dp_write_byte_to_dpcd(dp,
504 DPCD_ADDR_TRAINING_PATTERN_SET, 507 DP_TRAINING_PATTERN_SET,
505 DPCD_SCRAMBLING_DISABLED | 508 DP_LINK_SCRAMBLING_DISABLE |
506 DPCD_TRAINING_PATTERN_2); 509 DP_TRAINING_PATTERN_2);
507 if (retval) 510 if (retval)
508 return retval; 511 return retval;
509 512
@@ -543,7 +546,7 @@ static int exynos_dp_process_clock_recovery(struct exynos_dp_device *dp)
543 dp->link_train.training_lane[lane], lane); 546 dp->link_train.training_lane[lane], lane);
544 547
545 retval = exynos_dp_write_bytes_to_dpcd(dp, 548 retval = exynos_dp_write_bytes_to_dpcd(dp,
546 DPCD_ADDR_TRAINING_LANE0_SET, lane_count, 549 DP_TRAINING_LANE0_SET, lane_count,
547 dp->link_train.training_lane); 550 dp->link_train.training_lane);
548 if (retval) 551 if (retval)
549 return retval; 552 return retval;
@@ -562,7 +565,7 @@ static int exynos_dp_process_equalizer_training(struct exynos_dp_device *dp)
562 lane_count = dp->link_train.lane_count; 565 lane_count = dp->link_train.lane_count;
563 566
564 retval = exynos_dp_read_bytes_from_dpcd(dp, 567 retval = exynos_dp_read_bytes_from_dpcd(dp,
565 DPCD_ADDR_LANE0_1_STATUS, 2, link_status); 568 DP_LANE0_1_STATUS, 2, link_status);
566 if (retval) 569 if (retval)
567 return retval; 570 return retval;
568 571
@@ -572,12 +575,12 @@ static int exynos_dp_process_equalizer_training(struct exynos_dp_device *dp)
572 } 575 }
573 576
574 retval = exynos_dp_read_bytes_from_dpcd(dp, 577 retval = exynos_dp_read_bytes_from_dpcd(dp,
575 DPCD_ADDR_ADJUST_REQUEST_LANE0_1, 2, adjust_request); 578 DP_ADJUST_REQUEST_LANE0_1, 2, adjust_request);
576 if (retval) 579 if (retval)
577 return retval; 580 return retval;
578 581
579 retval = exynos_dp_read_byte_from_dpcd(dp, 582 retval = exynos_dp_read_byte_from_dpcd(dp,
580 DPCD_ADDR_LANE_ALIGN_STATUS_UPDATED, &link_align); 583 DP_LANE_ALIGN_STATUS_UPDATED, &link_align);
581 if (retval) 584 if (retval)
582 return retval; 585 return retval;
583 586
@@ -619,7 +622,7 @@ static int exynos_dp_process_equalizer_training(struct exynos_dp_device *dp)
619 exynos_dp_set_lane_link_training(dp, 622 exynos_dp_set_lane_link_training(dp,
620 dp->link_train.training_lane[lane], lane); 623 dp->link_train.training_lane[lane], lane);
621 624
622 retval = exynos_dp_write_bytes_to_dpcd(dp, DPCD_ADDR_TRAINING_LANE0_SET, 625 retval = exynos_dp_write_bytes_to_dpcd(dp, DP_TRAINING_LANE0_SET,
623 lane_count, dp->link_train.training_lane); 626 lane_count, dp->link_train.training_lane);
624 627
625 return retval; 628 return retval;
@@ -634,7 +637,7 @@ static void exynos_dp_get_max_rx_bandwidth(struct exynos_dp_device *dp,
634 * For DP rev.1.1, Maximum link rate of Main Link lanes 637 * For DP rev.1.1, Maximum link rate of Main Link lanes
635 * 0x06 = 1.62 Gbps, 0x0a = 2.7 Gbps 638 * 0x06 = 1.62 Gbps, 0x0a = 2.7 Gbps
636 */ 639 */
637 exynos_dp_read_byte_from_dpcd(dp, DPCD_ADDR_MAX_LINK_RATE, &data); 640 exynos_dp_read_byte_from_dpcd(dp, DP_MAX_LINK_RATE, &data);
638 *bandwidth = data; 641 *bandwidth = data;
639} 642}
640 643
@@ -647,7 +650,7 @@ static void exynos_dp_get_max_rx_lane_count(struct exynos_dp_device *dp,
647 * For DP rev.1.1, Maximum number of Main Link lanes 650 * For DP rev.1.1, Maximum number of Main Link lanes
648 * 0x01 = 1 lane, 0x02 = 2 lanes, 0x04 = 4 lanes 651 * 0x01 = 1 lane, 0x02 = 2 lanes, 0x04 = 4 lanes
649 */ 652 */
650 exynos_dp_read_byte_from_dpcd(dp, DPCD_ADDR_MAX_LANE_COUNT, &data); 653 exynos_dp_read_byte_from_dpcd(dp, DP_MAX_LANE_COUNT, &data);
651 *lane_count = DPCD_MAX_LANE_COUNT(data); 654 *lane_count = DPCD_MAX_LANE_COUNT(data);
652} 655}
653 656
@@ -819,20 +822,20 @@ static void exynos_dp_enable_scramble(struct exynos_dp_device *dp, bool enable)
819 exynos_dp_enable_scrambling(dp); 822 exynos_dp_enable_scrambling(dp);
820 823
821 exynos_dp_read_byte_from_dpcd(dp, 824 exynos_dp_read_byte_from_dpcd(dp,
822 DPCD_ADDR_TRAINING_PATTERN_SET, 825 DP_TRAINING_PATTERN_SET,
823 &data); 826 &data);
824 exynos_dp_write_byte_to_dpcd(dp, 827 exynos_dp_write_byte_to_dpcd(dp,
825 DPCD_ADDR_TRAINING_PATTERN_SET, 828 DP_TRAINING_PATTERN_SET,
826 (u8)(data & ~DPCD_SCRAMBLING_DISABLED)); 829 (u8)(data & ~DP_LINK_SCRAMBLING_DISABLE));
827 } else { 830 } else {
828 exynos_dp_disable_scrambling(dp); 831 exynos_dp_disable_scrambling(dp);
829 832
830 exynos_dp_read_byte_from_dpcd(dp, 833 exynos_dp_read_byte_from_dpcd(dp,
831 DPCD_ADDR_TRAINING_PATTERN_SET, 834 DP_TRAINING_PATTERN_SET,
832 &data); 835 &data);
833 exynos_dp_write_byte_to_dpcd(dp, 836 exynos_dp_write_byte_to_dpcd(dp,
834 DPCD_ADDR_TRAINING_PATTERN_SET, 837 DP_TRAINING_PATTERN_SET,
835 (u8)(data | DPCD_SCRAMBLING_DISABLED)); 838 (u8)(data | DP_LINK_SCRAMBLING_DISABLE));
836 } 839 }
837} 840}
838 841
@@ -949,12 +952,6 @@ static int exynos_dp_get_modes(struct drm_connector *connector)
949 return 1; 952 return 1;
950} 953}
951 954
952static int exynos_dp_mode_valid(struct drm_connector *connector,
953 struct drm_display_mode *mode)
954{
955 return MODE_OK;
956}
957
958static struct drm_encoder *exynos_dp_best_encoder( 955static struct drm_encoder *exynos_dp_best_encoder(
959 struct drm_connector *connector) 956 struct drm_connector *connector)
960{ 957{
@@ -965,20 +962,9 @@ static struct drm_encoder *exynos_dp_best_encoder(
965 962
966static struct drm_connector_helper_funcs exynos_dp_connector_helper_funcs = { 963static struct drm_connector_helper_funcs exynos_dp_connector_helper_funcs = {
967 .get_modes = exynos_dp_get_modes, 964 .get_modes = exynos_dp_get_modes,
968 .mode_valid = exynos_dp_mode_valid,
969 .best_encoder = exynos_dp_best_encoder, 965 .best_encoder = exynos_dp_best_encoder,
970}; 966};
971 967
972static int exynos_dp_initialize(struct exynos_drm_display *display,
973 struct drm_device *drm_dev)
974{
975 struct exynos_dp_device *dp = display->ctx;
976
977 dp->drm_dev = drm_dev;
978
979 return 0;
980}
981
982static bool find_bridge(const char *compat, struct bridge_init *bridge) 968static bool find_bridge(const char *compat, struct bridge_init *bridge)
983{ 969{
984 bridge->client = NULL; 970 bridge->client = NULL;
@@ -1101,12 +1087,11 @@ static void exynos_dp_dpms(struct exynos_drm_display *display, int mode)
1101 break; 1087 break;
1102 default: 1088 default:
1103 break; 1089 break;
1104 }; 1090 }
1105 dp->dpms_mode = mode; 1091 dp->dpms_mode = mode;
1106} 1092}
1107 1093
1108static struct exynos_drm_display_ops exynos_dp_display_ops = { 1094static struct exynos_drm_display_ops exynos_dp_display_ops = {
1109 .initialize = exynos_dp_initialize,
1110 .create_connector = exynos_dp_create_connector, 1095 .create_connector = exynos_dp_create_connector,
1111 .dpms = exynos_dp_dpms, 1096 .dpms = exynos_dp_dpms,
1112}; 1097};
@@ -1123,10 +1108,8 @@ static struct video_info *exynos_dp_dt_parse_pdata(struct device *dev)
1123 1108
1124 dp_video_config = devm_kzalloc(dev, 1109 dp_video_config = devm_kzalloc(dev,
1125 sizeof(*dp_video_config), GFP_KERNEL); 1110 sizeof(*dp_video_config), GFP_KERNEL);
1126 if (!dp_video_config) { 1111 if (!dp_video_config)
1127 dev_err(dev, "memory allocation for video config failed\n");
1128 return ERR_PTR(-ENOMEM); 1112 return ERR_PTR(-ENOMEM);
1129 }
1130 1113
1131 dp_video_config->h_sync_polarity = 1114 dp_video_config->h_sync_polarity =
1132 of_property_read_bool(dp_node, "hsync-active-high"); 1115 of_property_read_bool(dp_node, "hsync-active-high");
@@ -1185,10 +1168,7 @@ static int exynos_dp_dt_parse_phydata(struct exynos_dp_device *dp)
1185 dp_phy_node = of_find_node_by_name(dp_phy_node, "dptx-phy"); 1168 dp_phy_node = of_find_node_by_name(dp_phy_node, "dptx-phy");
1186 if (!dp_phy_node) { 1169 if (!dp_phy_node) {
1187 dp->phy = devm_phy_get(dp->dev, "dp"); 1170 dp->phy = devm_phy_get(dp->dev, "dp");
1188 if (IS_ERR(dp->phy)) 1171 return PTR_ERR_OR_ZERO(dp->phy);
1189 return PTR_ERR(dp->phy);
1190 else
1191 return 0;
1192 } 1172 }
1193 1173
1194 if (of_property_read_u32(dp_phy_node, "reg", &phy_base)) { 1174 if (of_property_read_u32(dp_phy_node, "reg", &phy_base)) {
@@ -1230,19 +1210,20 @@ static int exynos_dp_dt_parse_panel(struct exynos_dp_device *dp)
1230 return 0; 1210 return 0;
1231} 1211}
1232 1212
1233static int exynos_dp_probe(struct platform_device *pdev) 1213static int exynos_dp_bind(struct device *dev, struct device *master, void *data)
1234{ 1214{
1215 struct platform_device *pdev = to_platform_device(dev);
1216 struct drm_device *drm_dev = data;
1235 struct resource *res; 1217 struct resource *res;
1236 struct exynos_dp_device *dp; 1218 struct exynos_dp_device *dp;
1219 unsigned int irq_flags;
1237 1220
1238 int ret = 0; 1221 int ret = 0;
1239 1222
1240 dp = devm_kzalloc(&pdev->dev, sizeof(struct exynos_dp_device), 1223 dp = devm_kzalloc(&pdev->dev, sizeof(struct exynos_dp_device),
1241 GFP_KERNEL); 1224 GFP_KERNEL);
1242 if (!dp) { 1225 if (!dp)
1243 dev_err(&pdev->dev, "no memory for device data\n");
1244 return -ENOMEM; 1226 return -ENOMEM;
1245 }
1246 1227
1247 dp->dev = &pdev->dev; 1228 dp->dev = &pdev->dev;
1248 dp->dpms_mode = DRM_MODE_DPMS_OFF; 1229 dp->dpms_mode = DRM_MODE_DPMS_OFF;
@@ -1273,7 +1254,30 @@ static int exynos_dp_probe(struct platform_device *pdev)
1273 if (IS_ERR(dp->reg_base)) 1254 if (IS_ERR(dp->reg_base))
1274 return PTR_ERR(dp->reg_base); 1255 return PTR_ERR(dp->reg_base);
1275 1256
1276 dp->irq = platform_get_irq(pdev, 0); 1257 dp->hpd_gpio = of_get_named_gpio(dev->of_node, "samsung,hpd-gpio", 0);
1258
1259 if (gpio_is_valid(dp->hpd_gpio)) {
1260 /*
1261 * Set up the hotplug GPIO from the device tree as an interrupt.
1262 * Simply specifying a different interrupt in the device tree
1263 * doesn't work since we handle hotplug rather differently when
1264 * using a GPIO. We also need the actual GPIO specifier so
1265 * that we can get the current state of the GPIO.
1266 */
1267 ret = devm_gpio_request_one(&pdev->dev, dp->hpd_gpio, GPIOF_IN,
1268 "hpd_gpio");
1269 if (ret) {
1270 dev_err(&pdev->dev, "failed to get hpd gpio\n");
1271 return ret;
1272 }
1273 dp->irq = gpio_to_irq(dp->hpd_gpio);
1274 irq_flags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING;
1275 } else {
1276 dp->hpd_gpio = -ENODEV;
1277 dp->irq = platform_get_irq(pdev, 0);
1278 irq_flags = 0;
1279 }
1280
1277 if (dp->irq == -ENXIO) { 1281 if (dp->irq == -ENXIO) {
1278 dev_err(&pdev->dev, "failed to get irq\n"); 1282 dev_err(&pdev->dev, "failed to get irq\n");
1279 return -ENODEV; 1283 return -ENODEV;
@@ -1285,28 +1289,61 @@ static int exynos_dp_probe(struct platform_device *pdev)
1285 1289
1286 exynos_dp_init_dp(dp); 1290 exynos_dp_init_dp(dp);
1287 1291
1288 ret = devm_request_irq(&pdev->dev, dp->irq, exynos_dp_irq_handler, 0, 1292 ret = devm_request_irq(&pdev->dev, dp->irq, exynos_dp_irq_handler,
1289 "exynos-dp", dp); 1293 irq_flags, "exynos-dp", dp);
1290 if (ret) { 1294 if (ret) {
1291 dev_err(&pdev->dev, "failed to request irq\n"); 1295 dev_err(&pdev->dev, "failed to request irq\n");
1292 return ret; 1296 return ret;
1293 } 1297 }
1294 disable_irq(dp->irq); 1298 disable_irq(dp->irq);
1295 1299
1300 dp->drm_dev = drm_dev;
1296 exynos_dp_display.ctx = dp; 1301 exynos_dp_display.ctx = dp;
1297 1302
1298 platform_set_drvdata(pdev, &exynos_dp_display); 1303 platform_set_drvdata(pdev, &exynos_dp_display);
1299 exynos_drm_display_register(&exynos_dp_display);
1300 1304
1301 return 0; 1305 return exynos_drm_create_enc_conn(drm_dev, &exynos_dp_display);
1302} 1306}
1303 1307
1304static int exynos_dp_remove(struct platform_device *pdev) 1308static void exynos_dp_unbind(struct device *dev, struct device *master,
1309 void *data)
1305{ 1310{
1306 struct exynos_drm_display *display = platform_get_drvdata(pdev); 1311 struct exynos_drm_display *display = dev_get_drvdata(dev);
1312 struct exynos_dp_device *dp = display->ctx;
1313 struct drm_encoder *encoder = dp->encoder;
1307 1314
1308 exynos_dp_dpms(display, DRM_MODE_DPMS_OFF); 1315 exynos_dp_dpms(display, DRM_MODE_DPMS_OFF);
1309 exynos_drm_display_unregister(&exynos_dp_display); 1316
1317 encoder->funcs->destroy(encoder);
1318 drm_connector_cleanup(&dp->connector);
1319}
1320
1321static const struct component_ops exynos_dp_ops = {
1322 .bind = exynos_dp_bind,
1323 .unbind = exynos_dp_unbind,
1324};
1325
1326static int exynos_dp_probe(struct platform_device *pdev)
1327{
1328 int ret;
1329
1330 ret = exynos_drm_component_add(&pdev->dev, EXYNOS_DEVICE_TYPE_CONNECTOR,
1331 exynos_dp_display.type);
1332 if (ret)
1333 return ret;
1334
1335 ret = component_add(&pdev->dev, &exynos_dp_ops);
1336 if (ret)
1337 exynos_drm_component_del(&pdev->dev,
1338 EXYNOS_DEVICE_TYPE_CONNECTOR);
1339
1340 return ret;
1341}
1342
1343static int exynos_dp_remove(struct platform_device *pdev)
1344{
1345 component_del(&pdev->dev, &exynos_dp_ops);
1346 exynos_drm_component_del(&pdev->dev, EXYNOS_DEVICE_TYPE_CONNECTOR);
1310 1347
1311 return 0; 1348 return 0;
1312} 1349}
diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.h b/drivers/gpu/drm/exynos/exynos_dp_core.h
index d6a900d4ee40..02cc4f9ab903 100644
--- a/drivers/gpu/drm/exynos/exynos_dp_core.h
+++ b/drivers/gpu/drm/exynos/exynos_dp_core.h
@@ -14,6 +14,7 @@
14#define _EXYNOS_DP_CORE_H 14#define _EXYNOS_DP_CORE_H
15 15
16#include <drm/drm_crtc.h> 16#include <drm/drm_crtc.h>
17#include <drm/drm_dp_helper.h>
17#include <drm/exynos_drm.h> 18#include <drm/exynos_drm.h>
18 19
19#define DP_TIMEOUT_LOOP_COUNT 100 20#define DP_TIMEOUT_LOOP_COUNT 100
@@ -159,6 +160,7 @@ struct exynos_dp_device {
159 struct work_struct hotplug_work; 160 struct work_struct hotplug_work;
160 struct phy *phy; 161 struct phy *phy;
161 int dpms_mode; 162 int dpms_mode;
163 int hpd_gpio;
162 164
163 struct exynos_drm_panel_info panel; 165 struct exynos_drm_panel_info panel;
164}; 166};
@@ -261,69 +263,17 @@ void exynos_dp_disable_scrambling(struct exynos_dp_device *dp);
261#define EDID_EXTENSION_FLAG 0x7e 263#define EDID_EXTENSION_FLAG 0x7e
262#define EDID_CHECKSUM 0x7f 264#define EDID_CHECKSUM 0x7f
263 265
264/* Definition for DPCD Register */ 266/* DP_MAX_LANE_COUNT */
265#define DPCD_ADDR_DPCD_REV 0x0000
266#define DPCD_ADDR_MAX_LINK_RATE 0x0001
267#define DPCD_ADDR_MAX_LANE_COUNT 0x0002
268#define DPCD_ADDR_LINK_BW_SET 0x0100
269#define DPCD_ADDR_LANE_COUNT_SET 0x0101
270#define DPCD_ADDR_TRAINING_PATTERN_SET 0x0102
271#define DPCD_ADDR_TRAINING_LANE0_SET 0x0103
272#define DPCD_ADDR_LANE0_1_STATUS 0x0202
273#define DPCD_ADDR_LANE_ALIGN_STATUS_UPDATED 0x0204
274#define DPCD_ADDR_ADJUST_REQUEST_LANE0_1 0x0206
275#define DPCD_ADDR_ADJUST_REQUEST_LANE2_3 0x0207
276#define DPCD_ADDR_TEST_REQUEST 0x0218
277#define DPCD_ADDR_TEST_RESPONSE 0x0260
278#define DPCD_ADDR_TEST_EDID_CHECKSUM 0x0261
279#define DPCD_ADDR_SINK_POWER_STATE 0x0600
280
281/* DPCD_ADDR_MAX_LANE_COUNT */
282#define DPCD_ENHANCED_FRAME_CAP(x) (((x) >> 7) & 0x1) 267#define DPCD_ENHANCED_FRAME_CAP(x) (((x) >> 7) & 0x1)
283#define DPCD_MAX_LANE_COUNT(x) ((x) & 0x1f) 268#define DPCD_MAX_LANE_COUNT(x) ((x) & 0x1f)
284 269
285/* DPCD_ADDR_LANE_COUNT_SET */ 270/* DP_LANE_COUNT_SET */
286#define DPCD_ENHANCED_FRAME_EN (0x1 << 7)
287#define DPCD_LANE_COUNT_SET(x) ((x) & 0x1f) 271#define DPCD_LANE_COUNT_SET(x) ((x) & 0x1f)
288 272
289/* DPCD_ADDR_TRAINING_PATTERN_SET */ 273/* DP_TRAINING_LANE0_SET */
290#define DPCD_SCRAMBLING_DISABLED (0x1 << 5)
291#define DPCD_SCRAMBLING_ENABLED (0x0 << 5)
292#define DPCD_TRAINING_PATTERN_2 (0x2 << 0)
293#define DPCD_TRAINING_PATTERN_1 (0x1 << 0)
294#define DPCD_TRAINING_PATTERN_DISABLED (0x0 << 0)
295
296/* DPCD_ADDR_TRAINING_LANE0_SET */
297#define DPCD_MAX_PRE_EMPHASIS_REACHED (0x1 << 5)
298#define DPCD_PRE_EMPHASIS_SET(x) (((x) & 0x3) << 3) 274#define DPCD_PRE_EMPHASIS_SET(x) (((x) & 0x3) << 3)
299#define DPCD_PRE_EMPHASIS_GET(x) (((x) >> 3) & 0x3) 275#define DPCD_PRE_EMPHASIS_GET(x) (((x) >> 3) & 0x3)
300#define DPCD_PRE_EMPHASIS_PATTERN2_LEVEL0 (0x0 << 3)
301#define DPCD_MAX_SWING_REACHED (0x1 << 2)
302#define DPCD_VOLTAGE_SWING_SET(x) (((x) & 0x3) << 0) 276#define DPCD_VOLTAGE_SWING_SET(x) (((x) & 0x3) << 0)
303#define DPCD_VOLTAGE_SWING_GET(x) (((x) >> 0) & 0x3) 277#define DPCD_VOLTAGE_SWING_GET(x) (((x) >> 0) & 0x3)
304#define DPCD_VOLTAGE_SWING_PATTERN1_LEVEL0 (0x0 << 0)
305
306/* DPCD_ADDR_LANE0_1_STATUS */
307#define DPCD_LANE_SYMBOL_LOCKED (0x1 << 2)
308#define DPCD_LANE_CHANNEL_EQ_DONE (0x1 << 1)
309#define DPCD_LANE_CR_DONE (0x1 << 0)
310#define DPCD_CHANNEL_EQ_BITS (DPCD_LANE_CR_DONE| \
311 DPCD_LANE_CHANNEL_EQ_DONE|\
312 DPCD_LANE_SYMBOL_LOCKED)
313
314/* DPCD_ADDR_LANE_ALIGN__STATUS_UPDATED */
315#define DPCD_LINK_STATUS_UPDATED (0x1 << 7)
316#define DPCD_DOWNSTREAM_PORT_STATUS_CHANGED (0x1 << 6)
317#define DPCD_INTERLANE_ALIGN_DONE (0x1 << 0)
318
319/* DPCD_ADDR_TEST_REQUEST */
320#define DPCD_TEST_EDID_READ (0x1 << 2)
321
322/* DPCD_ADDR_TEST_RESPONSE */
323#define DPCD_TEST_EDID_CHECKSUM_WRITE (0x1 << 2)
324
325/* DPCD_ADDR_SINK_POWER_STATE */
326#define DPCD_SET_POWER_STATE_D0 (0x1 << 0)
327#define DPCD_SET_POWER_STATE_D4 (0x2 << 0)
328 278
329#endif /* _EXYNOS_DP_CORE_H */ 279#endif /* _EXYNOS_DP_CORE_H */
diff --git a/drivers/gpu/drm/exynos/exynos_dp_reg.c b/drivers/gpu/drm/exynos/exynos_dp_reg.c
index b70da5052ff0..c1f87a2a9284 100644
--- a/drivers/gpu/drm/exynos/exynos_dp_reg.c
+++ b/drivers/gpu/drm/exynos/exynos_dp_reg.c
@@ -13,6 +13,7 @@
13#include <linux/device.h> 13#include <linux/device.h>
14#include <linux/io.h> 14#include <linux/io.h>
15#include <linux/delay.h> 15#include <linux/delay.h>
16#include <linux/gpio.h>
16 17
17#include "exynos_dp_core.h" 18#include "exynos_dp_core.h"
18#include "exynos_dp_reg.h" 19#include "exynos_dp_reg.h"
@@ -326,6 +327,9 @@ void exynos_dp_clear_hotplug_interrupts(struct exynos_dp_device *dp)
326{ 327{
327 u32 reg; 328 u32 reg;
328 329
330 if (gpio_is_valid(dp->hpd_gpio))
331 return;
332
329 reg = HOTPLUG_CHG | HPD_LOST | PLUG; 333 reg = HOTPLUG_CHG | HPD_LOST | PLUG;
330 writel(reg, dp->reg_base + EXYNOS_DP_COMMON_INT_STA_4); 334 writel(reg, dp->reg_base + EXYNOS_DP_COMMON_INT_STA_4);
331 335
@@ -337,6 +341,9 @@ void exynos_dp_init_hpd(struct exynos_dp_device *dp)
337{ 341{
338 u32 reg; 342 u32 reg;
339 343
344 if (gpio_is_valid(dp->hpd_gpio))
345 return;
346
340 exynos_dp_clear_hotplug_interrupts(dp); 347 exynos_dp_clear_hotplug_interrupts(dp);
341 348
342 reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_3); 349 reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_3);
@@ -348,19 +355,27 @@ enum dp_irq_type exynos_dp_get_irq_type(struct exynos_dp_device *dp)
348{ 355{
349 u32 reg; 356 u32 reg;
350 357
351 /* Parse hotplug interrupt status register */ 358 if (gpio_is_valid(dp->hpd_gpio)) {
352 reg = readl(dp->reg_base + EXYNOS_DP_COMMON_INT_STA_4); 359 reg = gpio_get_value(dp->hpd_gpio);
360 if (reg)
361 return DP_IRQ_TYPE_HP_CABLE_IN;
362 else
363 return DP_IRQ_TYPE_HP_CABLE_OUT;
364 } else {
365 /* Parse hotplug interrupt status register */
366 reg = readl(dp->reg_base + EXYNOS_DP_COMMON_INT_STA_4);
353 367
354 if (reg & PLUG) 368 if (reg & PLUG)
355 return DP_IRQ_TYPE_HP_CABLE_IN; 369 return DP_IRQ_TYPE_HP_CABLE_IN;
356 370
357 if (reg & HPD_LOST) 371 if (reg & HPD_LOST)
358 return DP_IRQ_TYPE_HP_CABLE_OUT; 372 return DP_IRQ_TYPE_HP_CABLE_OUT;
359 373
360 if (reg & HOTPLUG_CHG) 374 if (reg & HOTPLUG_CHG)
361 return DP_IRQ_TYPE_HP_CHANGE; 375 return DP_IRQ_TYPE_HP_CHANGE;
362 376
363 return DP_IRQ_TYPE_UNKNOWN; 377 return DP_IRQ_TYPE_UNKNOWN;
378 }
364} 379}
365 380
366void exynos_dp_reset_aux(struct exynos_dp_device *dp) 381void exynos_dp_reset_aux(struct exynos_dp_device *dp)
@@ -386,7 +401,7 @@ void exynos_dp_init_aux(struct exynos_dp_device *dp)
386 /* Disable AUX transaction H/W retry */ 401 /* Disable AUX transaction H/W retry */
387 reg = AUX_BIT_PERIOD_EXPECTED_DELAY(3) | AUX_HW_RETRY_COUNT_SEL(0)| 402 reg = AUX_BIT_PERIOD_EXPECTED_DELAY(3) | AUX_HW_RETRY_COUNT_SEL(0)|
388 AUX_HW_RETRY_INTERVAL_600_MICROSECONDS; 403 AUX_HW_RETRY_INTERVAL_600_MICROSECONDS;
389 writel(reg, dp->reg_base + EXYNOS_DP_AUX_HW_RETRY_CTL) ; 404 writel(reg, dp->reg_base + EXYNOS_DP_AUX_HW_RETRY_CTL);
390 405
391 /* Receive AUX Channel DEFER commands equal to DEFFER_COUNT*64 */ 406 /* Receive AUX Channel DEFER commands equal to DEFFER_COUNT*64 */
392 reg = DEFER_CTRL_EN | DEFER_COUNT(1); 407 reg = DEFER_CTRL_EN | DEFER_COUNT(1);
@@ -402,9 +417,14 @@ int exynos_dp_get_plug_in_status(struct exynos_dp_device *dp)
402{ 417{
403 u32 reg; 418 u32 reg;
404 419
405 reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_3); 420 if (gpio_is_valid(dp->hpd_gpio)) {
406 if (reg & HPD_STATUS) 421 if (gpio_get_value(dp->hpd_gpio))
407 return 0; 422 return 0;
423 } else {
424 reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_3);
425 if (reg & HPD_STATUS)
426 return 0;
427 }
408 428
409 return -EINVAL; 429 return -EINVAL;
410} 430}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_core.c b/drivers/gpu/drm/exynos/exynos_drm_core.c
index 0e9e06ce36b8..4c9f972eaa07 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_core.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_core.c
@@ -19,21 +19,19 @@
19#include "exynos_drm_fbdev.h" 19#include "exynos_drm_fbdev.h"
20 20
21static LIST_HEAD(exynos_drm_subdrv_list); 21static LIST_HEAD(exynos_drm_subdrv_list);
22static LIST_HEAD(exynos_drm_manager_list);
23static LIST_HEAD(exynos_drm_display_list);
24 22
25static int exynos_drm_create_enc_conn(struct drm_device *dev, 23int exynos_drm_create_enc_conn(struct drm_device *dev,
26 struct exynos_drm_display *display) 24 struct exynos_drm_display *display)
27{ 25{
28 struct drm_encoder *encoder; 26 struct drm_encoder *encoder;
29 struct exynos_drm_manager *manager;
30 int ret; 27 int ret;
31 unsigned long possible_crtcs = 0; 28 unsigned long possible_crtcs = 0;
32 29
33 /* Find possible crtcs for this display */ 30 ret = exynos_drm_crtc_get_pipe_from_type(dev, display->type);
34 list_for_each_entry(manager, &exynos_drm_manager_list, list) 31 if (ret < 0)
35 if (manager->type == display->type) 32 return ret;
36 possible_crtcs |= 1 << manager->pipe; 33
34 possible_crtcs |= 1 << ret;
37 35
38 /* create and initialize a encoder for this sub driver. */ 36 /* create and initialize a encoder for this sub driver. */
39 encoder = exynos_drm_encoder_create(dev, display, possible_crtcs); 37 encoder = exynos_drm_encoder_create(dev, display, possible_crtcs);
@@ -57,127 +55,29 @@ err_destroy_encoder:
57 return ret; 55 return ret;
58} 56}
59 57
60static int exynos_drm_subdrv_probe(struct drm_device *dev, 58int exynos_drm_subdrv_register(struct exynos_drm_subdrv *subdrv)
61 struct exynos_drm_subdrv *subdrv)
62{
63 if (subdrv->probe) {
64 int ret;
65
66 subdrv->drm_dev = dev;
67
68 /*
69 * this probe callback would be called by sub driver
70 * after setting of all resources to this sub driver,
71 * such as clock, irq and register map are done or by load()
72 * of exynos drm driver.
73 *
74 * P.S. note that this driver is considered for modularization.
75 */
76 ret = subdrv->probe(dev, subdrv->dev);
77 if (ret)
78 return ret;
79 }
80
81 return 0;
82}
83
84static void exynos_drm_subdrv_remove(struct drm_device *dev,
85 struct exynos_drm_subdrv *subdrv)
86{
87 if (subdrv->remove)
88 subdrv->remove(dev, subdrv->dev);
89}
90
91int exynos_drm_initialize_managers(struct drm_device *dev)
92{ 59{
93 struct exynos_drm_manager *manager, *n; 60 if (!subdrv)
94 int ret, pipe = 0; 61 return -EINVAL;
95
96 list_for_each_entry(manager, &exynos_drm_manager_list, list) {
97 if (manager->ops->initialize) {
98 ret = manager->ops->initialize(manager, dev, pipe);
99 if (ret) {
100 DRM_ERROR("Mgr init [%d] failed with %d\n",
101 manager->type, ret);
102 goto err;
103 }
104 }
105 62
106 manager->drm_dev = dev; 63 list_add_tail(&subdrv->list, &exynos_drm_subdrv_list);
107 manager->pipe = pipe++;
108 64
109 ret = exynos_drm_crtc_create(manager);
110 if (ret) {
111 DRM_ERROR("CRTC create [%d] failed with %d\n",
112 manager->type, ret);
113 goto err;
114 }
115 }
116 return 0; 65 return 0;
117
118err:
119 list_for_each_entry_safe(manager, n, &exynos_drm_manager_list, list) {
120 if (pipe-- > 0)
121 exynos_drm_manager_unregister(manager);
122 else
123 list_del(&manager->list);
124 }
125 return ret;
126}
127
128void exynos_drm_remove_managers(struct drm_device *dev)
129{
130 struct exynos_drm_manager *manager, *n;
131
132 list_for_each_entry_safe(manager, n, &exynos_drm_manager_list, list)
133 exynos_drm_manager_unregister(manager);
134} 66}
67EXPORT_SYMBOL_GPL(exynos_drm_subdrv_register);
135 68
136int exynos_drm_initialize_displays(struct drm_device *dev) 69int exynos_drm_subdrv_unregister(struct exynos_drm_subdrv *subdrv)
137{ 70{
138 struct exynos_drm_display *display, *n; 71 if (!subdrv)
139 int ret, initialized = 0; 72 return -EINVAL;
140
141 list_for_each_entry(display, &exynos_drm_display_list, list) {
142 if (display->ops->initialize) {
143 ret = display->ops->initialize(display, dev);
144 if (ret) {
145 DRM_ERROR("Display init [%d] failed with %d\n",
146 display->type, ret);
147 goto err;
148 }
149 }
150 73
151 initialized++; 74 list_del(&subdrv->list);
152 75
153 ret = exynos_drm_create_enc_conn(dev, display);
154 if (ret) {
155 DRM_ERROR("Encoder create [%d] failed with %d\n",
156 display->type, ret);
157 goto err;
158 }
159 }
160 return 0; 76 return 0;
161
162err:
163 list_for_each_entry_safe(display, n, &exynos_drm_display_list, list) {
164 if (initialized-- > 0)
165 exynos_drm_display_unregister(display);
166 else
167 list_del(&display->list);
168 }
169 return ret;
170}
171
172void exynos_drm_remove_displays(struct drm_device *dev)
173{
174 struct exynos_drm_display *display, *n;
175
176 list_for_each_entry_safe(display, n, &exynos_drm_display_list, list)
177 exynos_drm_display_unregister(display);
178} 77}
78EXPORT_SYMBOL_GPL(exynos_drm_subdrv_unregister);
179 79
180int exynos_drm_device_register(struct drm_device *dev) 80int exynos_drm_device_subdrv_probe(struct drm_device *dev)
181{ 81{
182 struct exynos_drm_subdrv *subdrv, *n; 82 struct exynos_drm_subdrv *subdrv, *n;
183 int err; 83 int err;
@@ -186,19 +86,28 @@ int exynos_drm_device_register(struct drm_device *dev)
186 return -EINVAL; 86 return -EINVAL;
187 87
188 list_for_each_entry_safe(subdrv, n, &exynos_drm_subdrv_list, list) { 88 list_for_each_entry_safe(subdrv, n, &exynos_drm_subdrv_list, list) {
189 err = exynos_drm_subdrv_probe(dev, subdrv); 89 if (subdrv->probe) {
190 if (err) { 90 subdrv->drm_dev = dev;
191 DRM_DEBUG("exynos drm subdrv probe failed.\n"); 91
192 list_del(&subdrv->list); 92 /*
193 continue; 93 * this probe callback would be called by sub driver
94 * after setting of all resources to this sub driver,
95 * such as clock, irq and register map are done.
96 */
97 err = subdrv->probe(dev, subdrv->dev);
98 if (err) {
99 DRM_DEBUG("exynos drm subdrv probe failed.\n");
100 list_del(&subdrv->list);
101 continue;
102 }
194 } 103 }
195 } 104 }
196 105
197 return 0; 106 return 0;
198} 107}
199EXPORT_SYMBOL_GPL(exynos_drm_device_register); 108EXPORT_SYMBOL_GPL(exynos_drm_device_subdrv_probe);
200 109
201int exynos_drm_device_unregister(struct drm_device *dev) 110int exynos_drm_device_subdrv_remove(struct drm_device *dev)
202{ 111{
203 struct exynos_drm_subdrv *subdrv; 112 struct exynos_drm_subdrv *subdrv;
204 113
@@ -208,66 +117,13 @@ int exynos_drm_device_unregister(struct drm_device *dev)
208 } 117 }
209 118
210 list_for_each_entry(subdrv, &exynos_drm_subdrv_list, list) { 119 list_for_each_entry(subdrv, &exynos_drm_subdrv_list, list) {
211 exynos_drm_subdrv_remove(dev, subdrv); 120 if (subdrv->remove)
121 subdrv->remove(dev, subdrv->dev);
212 } 122 }
213 123
214 return 0; 124 return 0;
215} 125}
216EXPORT_SYMBOL_GPL(exynos_drm_device_unregister); 126EXPORT_SYMBOL_GPL(exynos_drm_device_subdrv_remove);
217
218int exynos_drm_manager_register(struct exynos_drm_manager *manager)
219{
220 BUG_ON(!manager->ops);
221 list_add_tail(&manager->list, &exynos_drm_manager_list);
222 return 0;
223}
224
225int exynos_drm_manager_unregister(struct exynos_drm_manager *manager)
226{
227 if (manager->ops->remove)
228 manager->ops->remove(manager);
229
230 list_del(&manager->list);
231 return 0;
232}
233
234int exynos_drm_display_register(struct exynos_drm_display *display)
235{
236 BUG_ON(!display->ops);
237 list_add_tail(&display->list, &exynos_drm_display_list);
238 return 0;
239}
240
241int exynos_drm_display_unregister(struct exynos_drm_display *display)
242{
243 if (display->ops->remove)
244 display->ops->remove(display);
245
246 list_del(&display->list);
247 return 0;
248}
249
250int exynos_drm_subdrv_register(struct exynos_drm_subdrv *subdrv)
251{
252 if (!subdrv)
253 return -EINVAL;
254
255 list_add_tail(&subdrv->list, &exynos_drm_subdrv_list);
256
257 return 0;
258}
259EXPORT_SYMBOL_GPL(exynos_drm_subdrv_register);
260
261int exynos_drm_subdrv_unregister(struct exynos_drm_subdrv *subdrv)
262{
263 if (!subdrv)
264 return -EINVAL;
265
266 list_del(&subdrv->list);
267
268 return 0;
269}
270EXPORT_SYMBOL_GPL(exynos_drm_subdrv_unregister);
271 127
272int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file) 128int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file)
273{ 129{
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index 1ef5ab9c9d51..95c9435d0266 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -368,6 +368,7 @@ int exynos_drm_crtc_create(struct exynos_drm_manager *manager)
368 return -ENOMEM; 368 return -ENOMEM;
369 } 369 }
370 370
371 manager->crtc = &exynos_crtc->drm_crtc;
371 crtc = &exynos_crtc->drm_crtc; 372 crtc = &exynos_crtc->drm_crtc;
372 373
373 private->crtc[manager->pipe] = crtc; 374 private->crtc[manager->pipe] = crtc;
@@ -491,3 +492,19 @@ void exynos_drm_crtc_complete_scanout(struct drm_framebuffer *fb)
491 manager->ops->wait_for_vblank(manager); 492 manager->ops->wait_for_vblank(manager);
492 } 493 }
493} 494}
495
496int exynos_drm_crtc_get_pipe_from_type(struct drm_device *drm_dev,
497 unsigned int out_type)
498{
499 struct drm_crtc *crtc;
500
501 list_for_each_entry(crtc, &drm_dev->mode_config.crtc_list, head) {
502 struct exynos_drm_crtc *exynos_crtc;
503
504 exynos_crtc = to_exynos_crtc(crtc);
505 if (exynos_crtc->manager->type == out_type)
506 return exynos_crtc->manager->pipe;
507 }
508
509 return -EPERM;
510}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.h b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
index c27b66cc5d24..9f74b10a8a01 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
@@ -32,4 +32,8 @@ void exynos_drm_crtc_plane_commit(struct drm_crtc *crtc, int zpos);
32void exynos_drm_crtc_plane_enable(struct drm_crtc *crtc, int zpos); 32void exynos_drm_crtc_plane_enable(struct drm_crtc *crtc, int zpos);
33void exynos_drm_crtc_plane_disable(struct drm_crtc *crtc, int zpos); 33void exynos_drm_crtc_plane_disable(struct drm_crtc *crtc, int zpos);
34 34
35/* This function gets pipe value to crtc device matched with out_type. */
36int exynos_drm_crtc_get_pipe_from_type(struct drm_device *drm_dev,
37 unsigned int out_type);
38
35#endif 39#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dpi.c b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
index 2b09c7c0bfcc..482127f633c5 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dpi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
@@ -40,20 +40,10 @@ exynos_dpi_detect(struct drm_connector *connector, bool force)
40{ 40{
41 struct exynos_dpi *ctx = connector_to_dpi(connector); 41 struct exynos_dpi *ctx = connector_to_dpi(connector);
42 42
43 /* panels supported only by boot-loader are always connected */ 43 if (!ctx->panel->connector)
44 if (!ctx->panel_node) 44 drm_panel_attach(ctx->panel, &ctx->connector);
45 return connector_status_connected;
46
47 if (!ctx->panel) {
48 ctx->panel = of_drm_find_panel(ctx->panel_node);
49 if (ctx->panel)
50 drm_panel_attach(ctx->panel, &ctx->connector);
51 }
52
53 if (ctx->panel)
54 return connector_status_connected;
55 45
56 return connector_status_disconnected; 46 return connector_status_connected;
57} 47}
58 48
59static void exynos_dpi_connector_destroy(struct drm_connector *connector) 49static void exynos_dpi_connector_destroy(struct drm_connector *connector)
@@ -94,12 +84,6 @@ static int exynos_dpi_get_modes(struct drm_connector *connector)
94 return 0; 84 return 0;
95} 85}
96 86
97static int exynos_dpi_mode_valid(struct drm_connector *connector,
98 struct drm_display_mode *mode)
99{
100 return MODE_OK;
101}
102
103static struct drm_encoder * 87static struct drm_encoder *
104exynos_dpi_best_encoder(struct drm_connector *connector) 88exynos_dpi_best_encoder(struct drm_connector *connector)
105{ 89{
@@ -110,7 +94,6 @@ exynos_dpi_best_encoder(struct drm_connector *connector)
110 94
111static struct drm_connector_helper_funcs exynos_dpi_connector_helper_funcs = { 95static struct drm_connector_helper_funcs exynos_dpi_connector_helper_funcs = {
112 .get_modes = exynos_dpi_get_modes, 96 .get_modes = exynos_dpi_get_modes,
113 .mode_valid = exynos_dpi_mode_valid,
114 .best_encoder = exynos_dpi_best_encoder, 97 .best_encoder = exynos_dpi_best_encoder,
115}; 98};
116 99
@@ -123,10 +106,7 @@ static int exynos_dpi_create_connector(struct exynos_drm_display *display,
123 106
124 ctx->encoder = encoder; 107 ctx->encoder = encoder;
125 108
126 if (ctx->panel_node) 109 connector->polled = DRM_CONNECTOR_POLL_HPD;
127 connector->polled = DRM_CONNECTOR_POLL_CONNECT;
128 else
129 connector->polled = DRM_CONNECTOR_POLL_HPD;
130 110
131 ret = drm_connector_init(encoder->dev, connector, 111 ret = drm_connector_init(encoder->dev, connector,
132 &exynos_dpi_connector_funcs, 112 &exynos_dpi_connector_funcs,
@@ -172,7 +152,7 @@ static void exynos_dpi_dpms(struct exynos_drm_display *display, int mode)
172 break; 152 break;
173 default: 153 default:
174 break; 154 break;
175 }; 155 }
176 ctx->dpms_mode = mode; 156 ctx->dpms_mode = mode;
177} 157}
178 158
@@ -294,8 +274,10 @@ static int exynos_dpi_parse_dt(struct exynos_dpi *ctx)
294 return -ENOMEM; 274 return -ENOMEM;
295 275
296 ret = of_get_videomode(dn, vm, 0); 276 ret = of_get_videomode(dn, vm, 0);
297 if (ret < 0) 277 if (ret < 0) {
278 devm_kfree(dev, vm);
298 return ret; 279 return ret;
280 }
299 281
300 ctx->vm = vm; 282 ctx->vm = vm;
301 283
@@ -308,32 +290,58 @@ static int exynos_dpi_parse_dt(struct exynos_dpi *ctx)
308 return 0; 290 return 0;
309} 291}
310 292
311int exynos_dpi_probe(struct device *dev) 293struct exynos_drm_display *exynos_dpi_probe(struct device *dev)
312{ 294{
313 struct exynos_dpi *ctx; 295 struct exynos_dpi *ctx;
314 int ret; 296 int ret;
315 297
298 ret = exynos_drm_component_add(dev,
299 EXYNOS_DEVICE_TYPE_CONNECTOR,
300 exynos_dpi_display.type);
301 if (ret)
302 return ERR_PTR(ret);
303
316 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); 304 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
317 if (!ctx) 305 if (!ctx)
318 return -ENOMEM; 306 goto err_del_component;
319 307
320 ctx->dev = dev; 308 ctx->dev = dev;
321 exynos_dpi_display.ctx = ctx; 309 exynos_dpi_display.ctx = ctx;
322 ctx->dpms_mode = DRM_MODE_DPMS_OFF; 310 ctx->dpms_mode = DRM_MODE_DPMS_OFF;
323 311
324 ret = exynos_dpi_parse_dt(ctx); 312 ret = exynos_dpi_parse_dt(ctx);
325 if (ret < 0) 313 if (ret < 0) {
326 return ret; 314 devm_kfree(dev, ctx);
315 goto err_del_component;
316 }
327 317
328 exynos_drm_display_register(&exynos_dpi_display); 318 if (ctx->panel_node) {
319 ctx->panel = of_drm_find_panel(ctx->panel_node);
320 if (!ctx->panel) {
321 exynos_drm_component_del(dev,
322 EXYNOS_DEVICE_TYPE_CONNECTOR);
323 return ERR_PTR(-EPROBE_DEFER);
324 }
325 }
329 326
330 return 0; 327 return &exynos_dpi_display;
328
329err_del_component:
330 exynos_drm_component_del(dev, EXYNOS_DEVICE_TYPE_CONNECTOR);
331
332 return NULL;
331} 333}
332 334
333int exynos_dpi_remove(struct device *dev) 335int exynos_dpi_remove(struct device *dev)
334{ 336{
337 struct drm_encoder *encoder = exynos_dpi_display.encoder;
338 struct exynos_dpi *ctx = exynos_dpi_display.ctx;
339
335 exynos_dpi_dpms(&exynos_dpi_display, DRM_MODE_DPMS_OFF); 340 exynos_dpi_dpms(&exynos_dpi_display, DRM_MODE_DPMS_OFF);
336 exynos_drm_display_unregister(&exynos_dpi_display); 341 encoder->funcs->destroy(encoder);
342 drm_connector_cleanup(&ctx->connector);
343
344 exynos_drm_component_del(dev, EXYNOS_DEVICE_TYPE_CONNECTOR);
337 345
338 return 0; 346 return 0;
339} 347}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 2d27ba23a6a8..d91f27777537 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -16,6 +16,7 @@
16#include <drm/drm_crtc_helper.h> 16#include <drm/drm_crtc_helper.h>
17 17
18#include <linux/anon_inodes.h> 18#include <linux/anon_inodes.h>
19#include <linux/component.h>
19 20
20#include <drm/exynos_drm.h> 21#include <drm/exynos_drm.h>
21 22
@@ -40,9 +41,19 @@
40 41
41#define VBLANK_OFF_DELAY 50000 42#define VBLANK_OFF_DELAY 50000
42 43
43/* platform device pointer for eynos drm device. */
44static struct platform_device *exynos_drm_pdev; 44static struct platform_device *exynos_drm_pdev;
45 45
46static DEFINE_MUTEX(drm_component_lock);
47static LIST_HEAD(drm_component_list);
48
49struct component_dev {
50 struct list_head list;
51 struct device *crtc_dev;
52 struct device *conn_dev;
53 enum exynos_drm_output_type out_type;
54 unsigned int dev_type_flag;
55};
56
46static int exynos_drm_load(struct drm_device *dev, unsigned long flags) 57static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
47{ 58{
48 struct exynos_drm_private *private; 59 struct exynos_drm_private *private;
@@ -73,38 +84,21 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
73 84
74 exynos_drm_mode_config_init(dev); 85 exynos_drm_mode_config_init(dev);
75 86
76 ret = exynos_drm_initialize_managers(dev);
77 if (ret)
78 goto err_mode_config_cleanup;
79
80 for (nr = 0; nr < MAX_PLANE; nr++) { 87 for (nr = 0; nr < MAX_PLANE; nr++) {
81 struct drm_plane *plane; 88 struct drm_plane *plane;
82 unsigned long possible_crtcs = (1 << MAX_CRTC) - 1; 89 unsigned long possible_crtcs = (1 << MAX_CRTC) - 1;
83 90
84 plane = exynos_plane_init(dev, possible_crtcs, false); 91 plane = exynos_plane_init(dev, possible_crtcs, false);
85 if (!plane) 92 if (!plane)
86 goto err_manager_cleanup; 93 goto err_mode_config_cleanup;
87 } 94 }
88 95
89 ret = exynos_drm_initialize_displays(dev);
90 if (ret)
91 goto err_manager_cleanup;
92
93 /* init kms poll for handling hpd */ 96 /* init kms poll for handling hpd */
94 drm_kms_helper_poll_init(dev); 97 drm_kms_helper_poll_init(dev);
95 98
96 ret = drm_vblank_init(dev, MAX_CRTC); 99 ret = drm_vblank_init(dev, MAX_CRTC);
97 if (ret) 100 if (ret)
98 goto err_display_cleanup; 101 goto err_mode_config_cleanup;
99
100 /*
101 * probe sub drivers such as display controller and hdmi driver,
102 * that were registered at probe() of platform driver
103 * to the sub driver and create encoder and connector for them.
104 */
105 ret = exynos_drm_device_register(dev);
106 if (ret)
107 goto err_vblank;
108 102
109 /* setup possible_clones. */ 103 /* setup possible_clones. */
110 exynos_drm_encoder_setup(dev); 104 exynos_drm_encoder_setup(dev);
@@ -113,17 +107,25 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
113 107
114 platform_set_drvdata(dev->platformdev, dev); 108 platform_set_drvdata(dev->platformdev, dev);
115 109
110 /* Try to bind all sub drivers. */
111 ret = component_bind_all(dev->dev, dev);
112 if (ret)
113 goto err_cleanup_vblank;
114
115 /* Probe non kms sub drivers and virtual display driver. */
116 ret = exynos_drm_device_subdrv_probe(dev);
117 if (ret)
118 goto err_unbind_all;
119
116 /* force connectors detection */ 120 /* force connectors detection */
117 drm_helper_hpd_irq_event(dev); 121 drm_helper_hpd_irq_event(dev);
118 122
119 return 0; 123 return 0;
120 124
121err_vblank: 125err_unbind_all:
126 component_unbind_all(dev->dev, dev);
127err_cleanup_vblank:
122 drm_vblank_cleanup(dev); 128 drm_vblank_cleanup(dev);
123err_display_cleanup:
124 exynos_drm_remove_displays(dev);
125err_manager_cleanup:
126 exynos_drm_remove_managers(dev);
127err_mode_config_cleanup: 129err_mode_config_cleanup:
128 drm_mode_config_cleanup(dev); 130 drm_mode_config_cleanup(dev);
129 drm_release_iommu_mapping(dev); 131 drm_release_iommu_mapping(dev);
@@ -135,17 +137,17 @@ err_free_private:
135 137
136static int exynos_drm_unload(struct drm_device *dev) 138static int exynos_drm_unload(struct drm_device *dev)
137{ 139{
140 exynos_drm_device_subdrv_remove(dev);
141
138 exynos_drm_fbdev_fini(dev); 142 exynos_drm_fbdev_fini(dev);
139 exynos_drm_device_unregister(dev);
140 drm_vblank_cleanup(dev); 143 drm_vblank_cleanup(dev);
141 drm_kms_helper_poll_fini(dev); 144 drm_kms_helper_poll_fini(dev);
142 exynos_drm_remove_displays(dev);
143 exynos_drm_remove_managers(dev);
144 drm_mode_config_cleanup(dev); 145 drm_mode_config_cleanup(dev);
145 146
146 drm_release_iommu_mapping(dev); 147 drm_release_iommu_mapping(dev);
147 kfree(dev->dev_private); 148 kfree(dev->dev_private);
148 149
150 component_unbind_all(dev->dev, dev);
149 dev->dev_private = NULL; 151 dev->dev_private = NULL;
150 152
151 return 0; 153 return 0;
@@ -183,9 +185,9 @@ static int exynos_drm_resume(struct drm_device *dev)
183 if (connector->funcs->dpms) 185 if (connector->funcs->dpms)
184 connector->funcs->dpms(connector, connector->dpms); 186 connector->funcs->dpms(connector, connector->dpms);
185 } 187 }
188 drm_modeset_unlock_all(dev);
186 189
187 drm_helper_resume_force_mode(dev); 190 drm_helper_resume_force_mode(dev);
188 drm_modeset_unlock_all(dev);
189 191
190 return 0; 192 return 0;
191} 193}
@@ -323,8 +325,7 @@ static const struct file_operations exynos_drm_driver_fops = {
323}; 325};
324 326
325static struct drm_driver exynos_drm_driver = { 327static struct drm_driver exynos_drm_driver = {
326 .driver_features = DRIVER_HAVE_IRQ | DRIVER_MODESET | 328 .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME,
327 DRIVER_GEM | DRIVER_PRIME,
328 .load = exynos_drm_load, 329 .load = exynos_drm_load,
329 .unload = exynos_drm_unload, 330 .unload = exynos_drm_unload,
330 .suspend = exynos_drm_suspend, 331 .suspend = exynos_drm_suspend,
@@ -355,27 +356,6 @@ static struct drm_driver exynos_drm_driver = {
355 .minor = DRIVER_MINOR, 356 .minor = DRIVER_MINOR,
356}; 357};
357 358
358static int exynos_drm_platform_probe(struct platform_device *pdev)
359{
360 int ret;
361
362 ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
363 if (ret)
364 return ret;
365
366 pm_runtime_enable(&pdev->dev);
367 pm_runtime_get_sync(&pdev->dev);
368
369 return drm_platform_init(&exynos_drm_driver, pdev);
370}
371
372static int exynos_drm_platform_remove(struct platform_device *pdev)
373{
374 drm_put_dev(platform_get_drvdata(pdev));
375
376 return 0;
377}
378
379#ifdef CONFIG_PM_SLEEP 359#ifdef CONFIG_PM_SLEEP
380static int exynos_drm_sys_suspend(struct device *dev) 360static int exynos_drm_sys_suspend(struct device *dev)
381{ 361{
@@ -400,196 +380,319 @@ static int exynos_drm_sys_resume(struct device *dev)
400} 380}
401#endif 381#endif
402 382
403#ifdef CONFIG_PM_RUNTIME 383static const struct dev_pm_ops exynos_drm_pm_ops = {
404static int exynos_drm_runtime_suspend(struct device *dev) 384 SET_SYSTEM_SLEEP_PM_OPS(exynos_drm_sys_suspend, exynos_drm_sys_resume)
385};
386
387int exynos_drm_component_add(struct device *dev,
388 enum exynos_drm_device_type dev_type,
389 enum exynos_drm_output_type out_type)
405{ 390{
406 struct drm_device *drm_dev = dev_get_drvdata(dev); 391 struct component_dev *cdev;
407 pm_message_t message;
408 392
409 if (pm_runtime_suspended(dev)) 393 if (dev_type != EXYNOS_DEVICE_TYPE_CRTC &&
410 return 0; 394 dev_type != EXYNOS_DEVICE_TYPE_CONNECTOR) {
395 DRM_ERROR("invalid device type.\n");
396 return -EINVAL;
397 }
411 398
412 message.event = PM_EVENT_SUSPEND; 399 mutex_lock(&drm_component_lock);
413 return exynos_drm_suspend(drm_dev, message); 400
401 /*
402 * Make sure to check if there is a component which has two device
403 * objects, for connector and for encoder/connector.
404 * It should make sure that crtc and encoder/connector drivers are
405 * ready before exynos drm core binds them.
406 */
407 list_for_each_entry(cdev, &drm_component_list, list) {
408 if (cdev->out_type == out_type) {
409 /*
410 * If crtc and encoder/connector device objects are
411 * added already just return.
412 */
413 if (cdev->dev_type_flag == (EXYNOS_DEVICE_TYPE_CRTC |
414 EXYNOS_DEVICE_TYPE_CONNECTOR)) {
415 mutex_unlock(&drm_component_lock);
416 return 0;
417 }
418
419 if (dev_type == EXYNOS_DEVICE_TYPE_CRTC) {
420 cdev->crtc_dev = dev;
421 cdev->dev_type_flag |= dev_type;
422 }
423
424 if (dev_type == EXYNOS_DEVICE_TYPE_CONNECTOR) {
425 cdev->conn_dev = dev;
426 cdev->dev_type_flag |= dev_type;
427 }
428
429 mutex_unlock(&drm_component_lock);
430 return 0;
431 }
432 }
433
434 mutex_unlock(&drm_component_lock);
435
436 cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
437 if (!cdev)
438 return -ENOMEM;
439
440 if (dev_type == EXYNOS_DEVICE_TYPE_CRTC)
441 cdev->crtc_dev = dev;
442 if (dev_type == EXYNOS_DEVICE_TYPE_CONNECTOR)
443 cdev->conn_dev = dev;
444
445 cdev->out_type = out_type;
446 cdev->dev_type_flag = dev_type;
447
448 mutex_lock(&drm_component_lock);
449 list_add_tail(&cdev->list, &drm_component_list);
450 mutex_unlock(&drm_component_lock);
451
452 return 0;
414} 453}
415 454
416static int exynos_drm_runtime_resume(struct device *dev) 455void exynos_drm_component_del(struct device *dev,
456 enum exynos_drm_device_type dev_type)
417{ 457{
418 struct drm_device *drm_dev = dev_get_drvdata(dev); 458 struct component_dev *cdev, *next;
419 459
420 if (!pm_runtime_suspended(dev)) 460 mutex_lock(&drm_component_lock);
421 return 0;
422 461
423 return exynos_drm_resume(drm_dev); 462 list_for_each_entry_safe(cdev, next, &drm_component_list, list) {
463 if (dev_type == EXYNOS_DEVICE_TYPE_CRTC) {
464 if (cdev->crtc_dev == dev) {
465 cdev->crtc_dev = NULL;
466 cdev->dev_type_flag &= ~dev_type;
467 }
468 }
469
470 if (dev_type == EXYNOS_DEVICE_TYPE_CONNECTOR) {
471 if (cdev->conn_dev == dev) {
472 cdev->conn_dev = NULL;
473 cdev->dev_type_flag &= ~dev_type;
474 }
475 }
476
477 /*
478 * Release cdev object only in case that both of crtc and
479 * encoder/connector device objects are NULL.
480 */
481 if (!cdev->crtc_dev && !cdev->conn_dev) {
482 list_del(&cdev->list);
483 kfree(cdev);
484 }
485
486 break;
487 }
488
489 mutex_unlock(&drm_component_lock);
424} 490}
425#endif
426 491
427static const struct dev_pm_ops exynos_drm_pm_ops = { 492static int compare_of(struct device *dev, void *data)
428 SET_SYSTEM_SLEEP_PM_OPS(exynos_drm_sys_suspend, exynos_drm_sys_resume) 493{
429 SET_RUNTIME_PM_OPS(exynos_drm_runtime_suspend, 494 return dev == (struct device *)data;
430 exynos_drm_runtime_resume, NULL) 495}
431};
432 496
433static struct platform_driver exynos_drm_platform_driver = { 497static int exynos_drm_add_components(struct device *dev, struct master *m)
434 .probe = exynos_drm_platform_probe, 498{
435 .remove = exynos_drm_platform_remove, 499 struct component_dev *cdev;
436 .driver = { 500 unsigned int attach_cnt = 0;
437 .owner = THIS_MODULE, 501
438 .name = "exynos-drm", 502 mutex_lock(&drm_component_lock);
439 .pm = &exynos_drm_pm_ops, 503
440 }, 504 list_for_each_entry(cdev, &drm_component_list, list) {
505 int ret;
506
507 /*
508 * Add components to master only in case that crtc and
509 * encoder/connector device objects exist.
510 */
511 if (!cdev->crtc_dev || !cdev->conn_dev)
512 continue;
513
514 attach_cnt++;
515
516 mutex_unlock(&drm_component_lock);
517
518 /*
519 * fimd and dpi modules have same device object so add
520 * only crtc device object in this case.
521 *
522 * TODO. if dpi module follows driver-model driver then
523 * below codes can be removed.
524 */
525 if (cdev->crtc_dev == cdev->conn_dev) {
526 ret = component_master_add_child(m, compare_of,
527 cdev->crtc_dev);
528 if (ret < 0)
529 return ret;
530
531 goto out_lock;
532 }
533
534 /*
535 * Do not chage below call order.
536 * crtc device first should be added to master because
537 * connector/encoder need pipe number of crtc when they
538 * are created.
539 */
540 ret = component_master_add_child(m, compare_of, cdev->crtc_dev);
541 ret |= component_master_add_child(m, compare_of,
542 cdev->conn_dev);
543 if (ret < 0)
544 return ret;
545
546out_lock:
547 mutex_lock(&drm_component_lock);
548 }
549
550 mutex_unlock(&drm_component_lock);
551
552 return attach_cnt ? 0 : -ENODEV;
553}
554
555static int exynos_drm_bind(struct device *dev)
556{
557 return drm_platform_init(&exynos_drm_driver, to_platform_device(dev));
558}
559
560static void exynos_drm_unbind(struct device *dev)
561{
562 drm_put_dev(dev_get_drvdata(dev));
563}
564
565static const struct component_master_ops exynos_drm_ops = {
566 .add_components = exynos_drm_add_components,
567 .bind = exynos_drm_bind,
568 .unbind = exynos_drm_unbind,
441}; 569};
442 570
443static int __init exynos_drm_init(void) 571static int exynos_drm_platform_probe(struct platform_device *pdev)
444{ 572{
445 int ret; 573 int ret;
446 574
575 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
576 exynos_drm_driver.num_ioctls = ARRAY_SIZE(exynos_ioctls);
577
578#ifdef CONFIG_DRM_EXYNOS_FIMD
579 ret = platform_driver_register(&fimd_driver);
580 if (ret < 0)
581 return ret;
582#endif
583
447#ifdef CONFIG_DRM_EXYNOS_DP 584#ifdef CONFIG_DRM_EXYNOS_DP
448 ret = platform_driver_register(&dp_driver); 585 ret = platform_driver_register(&dp_driver);
449 if (ret < 0) 586 if (ret < 0)
450 goto out_dp; 587 goto err_unregister_fimd_drv;
451#endif 588#endif
452 589
453#ifdef CONFIG_DRM_EXYNOS_DSI 590#ifdef CONFIG_DRM_EXYNOS_DSI
454 ret = platform_driver_register(&dsi_driver); 591 ret = platform_driver_register(&dsi_driver);
455 if (ret < 0) 592 if (ret < 0)
456 goto out_dsi; 593 goto err_unregister_dp_drv;
457#endif
458
459#ifdef CONFIG_DRM_EXYNOS_FIMD
460 ret = platform_driver_register(&fimd_driver);
461 if (ret < 0)
462 goto out_fimd;
463#endif 594#endif
464 595
465#ifdef CONFIG_DRM_EXYNOS_HDMI 596#ifdef CONFIG_DRM_EXYNOS_HDMI
466 ret = platform_driver_register(&hdmi_driver);
467 if (ret < 0)
468 goto out_hdmi;
469 ret = platform_driver_register(&mixer_driver); 597 ret = platform_driver_register(&mixer_driver);
470 if (ret < 0) 598 if (ret < 0)
471 goto out_mixer; 599 goto err_unregister_dsi_drv;
472#endif 600 ret = platform_driver_register(&hdmi_driver);
473
474#ifdef CONFIG_DRM_EXYNOS_VIDI
475 ret = platform_driver_register(&vidi_driver);
476 if (ret < 0) 601 if (ret < 0)
477 goto out_vidi; 602 goto err_unregister_mixer_drv;
478#endif 603#endif
479 604
480#ifdef CONFIG_DRM_EXYNOS_G2D 605#ifdef CONFIG_DRM_EXYNOS_G2D
481 ret = platform_driver_register(&g2d_driver); 606 ret = platform_driver_register(&g2d_driver);
482 if (ret < 0) 607 if (ret < 0)
483 goto out_g2d; 608 goto err_unregister_hdmi_drv;
484#endif 609#endif
485 610
486#ifdef CONFIG_DRM_EXYNOS_FIMC 611#ifdef CONFIG_DRM_EXYNOS_FIMC
487 ret = platform_driver_register(&fimc_driver); 612 ret = platform_driver_register(&fimc_driver);
488 if (ret < 0) 613 if (ret < 0)
489 goto out_fimc; 614 goto err_unregister_g2d_drv;
490#endif 615#endif
491 616
492#ifdef CONFIG_DRM_EXYNOS_ROTATOR 617#ifdef CONFIG_DRM_EXYNOS_ROTATOR
493 ret = platform_driver_register(&rotator_driver); 618 ret = platform_driver_register(&rotator_driver);
494 if (ret < 0) 619 if (ret < 0)
495 goto out_rotator; 620 goto err_unregister_fimc_drv;
496#endif 621#endif
497 622
498#ifdef CONFIG_DRM_EXYNOS_GSC 623#ifdef CONFIG_DRM_EXYNOS_GSC
499 ret = platform_driver_register(&gsc_driver); 624 ret = platform_driver_register(&gsc_driver);
500 if (ret < 0) 625 if (ret < 0)
501 goto out_gsc; 626 goto err_unregister_rotator_drv;
502#endif 627#endif
503 628
504#ifdef CONFIG_DRM_EXYNOS_IPP 629#ifdef CONFIG_DRM_EXYNOS_IPP
505 ret = platform_driver_register(&ipp_driver); 630 ret = platform_driver_register(&ipp_driver);
506 if (ret < 0) 631 if (ret < 0)
507 goto out_ipp; 632 goto err_unregister_gsc_drv;
508 633
509 ret = exynos_platform_device_ipp_register(); 634 ret = exynos_platform_device_ipp_register();
510 if (ret < 0) 635 if (ret < 0)
511 goto out_ipp_dev; 636 goto err_unregister_ipp_drv;
512#endif 637#endif
513 638
514 ret = platform_driver_register(&exynos_drm_platform_driver); 639 ret = component_master_add(&pdev->dev, &exynos_drm_ops);
515 if (ret < 0) 640 if (ret < 0)
516 goto out_drm; 641 DRM_DEBUG_KMS("re-tried by last sub driver probed later.\n");
517
518 exynos_drm_pdev = platform_device_register_simple("exynos-drm", -1,
519 NULL, 0);
520 if (IS_ERR(exynos_drm_pdev)) {
521 ret = PTR_ERR(exynos_drm_pdev);
522 goto out;
523 }
524 642
525 return 0; 643 return 0;
526 644
527out:
528 platform_driver_unregister(&exynos_drm_platform_driver);
529
530out_drm:
531#ifdef CONFIG_DRM_EXYNOS_IPP 645#ifdef CONFIG_DRM_EXYNOS_IPP
532 exynos_platform_device_ipp_unregister(); 646err_unregister_ipp_drv:
533out_ipp_dev:
534 platform_driver_unregister(&ipp_driver); 647 platform_driver_unregister(&ipp_driver);
535out_ipp: 648err_unregister_gsc_drv:
536#endif 649#endif
537 650
538#ifdef CONFIG_DRM_EXYNOS_GSC 651#ifdef CONFIG_DRM_EXYNOS_GSC
539 platform_driver_unregister(&gsc_driver); 652 platform_driver_unregister(&gsc_driver);
540out_gsc: 653err_unregister_rotator_drv:
541#endif 654#endif
542 655
543#ifdef CONFIG_DRM_EXYNOS_ROTATOR 656#ifdef CONFIG_DRM_EXYNOS_ROTATOR
544 platform_driver_unregister(&rotator_driver); 657 platform_driver_unregister(&rotator_driver);
545out_rotator: 658err_unregister_fimc_drv:
546#endif 659#endif
547 660
548#ifdef CONFIG_DRM_EXYNOS_FIMC 661#ifdef CONFIG_DRM_EXYNOS_FIMC
549 platform_driver_unregister(&fimc_driver); 662 platform_driver_unregister(&fimc_driver);
550out_fimc: 663err_unregister_g2d_drv:
551#endif 664#endif
552 665
553#ifdef CONFIG_DRM_EXYNOS_G2D 666#ifdef CONFIG_DRM_EXYNOS_G2D
554 platform_driver_unregister(&g2d_driver); 667 platform_driver_unregister(&g2d_driver);
555out_g2d: 668err_unregister_hdmi_drv:
556#endif
557
558#ifdef CONFIG_DRM_EXYNOS_VIDI
559 platform_driver_unregister(&vidi_driver);
560out_vidi:
561#endif 669#endif
562 670
563#ifdef CONFIG_DRM_EXYNOS_HDMI 671#ifdef CONFIG_DRM_EXYNOS_HDMI
564 platform_driver_unregister(&mixer_driver);
565out_mixer:
566 platform_driver_unregister(&hdmi_driver); 672 platform_driver_unregister(&hdmi_driver);
567out_hdmi: 673err_unregister_mixer_drv:
568#endif 674 platform_driver_unregister(&mixer_driver);
569 675err_unregister_dsi_drv:
570#ifdef CONFIG_DRM_EXYNOS_FIMD
571 platform_driver_unregister(&fimd_driver);
572out_fimd:
573#endif 676#endif
574 677
575#ifdef CONFIG_DRM_EXYNOS_DSI 678#ifdef CONFIG_DRM_EXYNOS_DSI
576 platform_driver_unregister(&dsi_driver); 679 platform_driver_unregister(&dsi_driver);
577out_dsi: 680err_unregister_dp_drv:
578#endif 681#endif
579 682
580#ifdef CONFIG_DRM_EXYNOS_DP 683#ifdef CONFIG_DRM_EXYNOS_DP
581 platform_driver_unregister(&dp_driver); 684 platform_driver_unregister(&dp_driver);
582out_dp: 685err_unregister_fimd_drv:
686#endif
687
688#ifdef CONFIG_DRM_EXYNOS_FIMD
689 platform_driver_unregister(&fimd_driver);
583#endif 690#endif
584 return ret; 691 return ret;
585} 692}
586 693
587static void __exit exynos_drm_exit(void) 694static int exynos_drm_platform_remove(struct platform_device *pdev)
588{ 695{
589 platform_device_unregister(exynos_drm_pdev);
590
591 platform_driver_unregister(&exynos_drm_platform_driver);
592
593#ifdef CONFIG_DRM_EXYNOS_IPP 696#ifdef CONFIG_DRM_EXYNOS_IPP
594 exynos_platform_device_ipp_unregister(); 697 exynos_platform_device_ipp_unregister();
595 platform_driver_unregister(&ipp_driver); 698 platform_driver_unregister(&ipp_driver);
@@ -616,10 +719,6 @@ static void __exit exynos_drm_exit(void)
616 platform_driver_unregister(&hdmi_driver); 719 platform_driver_unregister(&hdmi_driver);
617#endif 720#endif
618 721
619#ifdef CONFIG_DRM_EXYNOS_VIDI
620 platform_driver_unregister(&vidi_driver);
621#endif
622
623#ifdef CONFIG_DRM_EXYNOS_FIMD 722#ifdef CONFIG_DRM_EXYNOS_FIMD
624 platform_driver_unregister(&fimd_driver); 723 platform_driver_unregister(&fimd_driver);
625#endif 724#endif
@@ -631,6 +730,59 @@ static void __exit exynos_drm_exit(void)
631#ifdef CONFIG_DRM_EXYNOS_DP 730#ifdef CONFIG_DRM_EXYNOS_DP
632 platform_driver_unregister(&dp_driver); 731 platform_driver_unregister(&dp_driver);
633#endif 732#endif
733 component_master_del(&pdev->dev, &exynos_drm_ops);
734 return 0;
735}
736
737static struct platform_driver exynos_drm_platform_driver = {
738 .probe = exynos_drm_platform_probe,
739 .remove = exynos_drm_platform_remove,
740 .driver = {
741 .owner = THIS_MODULE,
742 .name = "exynos-drm",
743 .pm = &exynos_drm_pm_ops,
744 },
745};
746
747static int exynos_drm_init(void)
748{
749 int ret;
750
751 exynos_drm_pdev = platform_device_register_simple("exynos-drm", -1,
752 NULL, 0);
753 if (IS_ERR(exynos_drm_pdev))
754 return PTR_ERR(exynos_drm_pdev);
755
756#ifdef CONFIG_DRM_EXYNOS_VIDI
757 ret = exynos_drm_probe_vidi();
758 if (ret < 0)
759 goto err_unregister_pd;
760#endif
761
762 ret = platform_driver_register(&exynos_drm_platform_driver);
763 if (ret)
764 goto err_remove_vidi;
765
766 return 0;
767
768err_unregister_pd:
769 platform_device_unregister(exynos_drm_pdev);
770
771err_remove_vidi:
772#ifdef CONFIG_DRM_EXYNOS_VIDI
773 exynos_drm_remove_vidi();
774#endif
775
776 return ret;
777}
778
779static void exynos_drm_exit(void)
780{
781#ifdef CONFIG_DRM_EXYNOS_VIDI
782 exynos_drm_remove_vidi();
783#endif
784 platform_device_unregister(exynos_drm_pdev);
785 platform_driver_unregister(&exynos_drm_platform_driver);
634} 786}
635 787
636module_init(exynos_drm_init); 788module_init(exynos_drm_init);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index ce3e6a30deaa..36535f398848 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -42,6 +42,13 @@ struct drm_connector;
42 42
43extern unsigned int drm_vblank_offdelay; 43extern unsigned int drm_vblank_offdelay;
44 44
45/* This enumerates device type. */
46enum exynos_drm_device_type {
47 EXYNOS_DEVICE_TYPE_NONE,
48 EXYNOS_DEVICE_TYPE_CRTC,
49 EXYNOS_DEVICE_TYPE_CONNECTOR,
50};
51
45/* this enumerates display type. */ 52/* this enumerates display type. */
46enum exynos_drm_output_type { 53enum exynos_drm_output_type {
47 EXYNOS_DISPLAY_TYPE_NONE, 54 EXYNOS_DISPLAY_TYPE_NONE,
@@ -122,7 +129,6 @@ struct exynos_drm_overlay {
122 * Exynos DRM Display Structure. 129 * Exynos DRM Display Structure.
123 * - this structure is common to analog tv, digital tv and lcd panel. 130 * - this structure is common to analog tv, digital tv and lcd panel.
124 * 131 *
125 * @initialize: initializes the display with drm_dev
126 * @remove: cleans up the display for removal 132 * @remove: cleans up the display for removal
127 * @mode_fixup: fix mode data comparing to hw specific display mode. 133 * @mode_fixup: fix mode data comparing to hw specific display mode.
128 * @mode_set: convert drm_display_mode to hw specific display mode and 134 * @mode_set: convert drm_display_mode to hw specific display mode and
@@ -133,8 +139,6 @@ struct exynos_drm_overlay {
133 */ 139 */
134struct exynos_drm_display; 140struct exynos_drm_display;
135struct exynos_drm_display_ops { 141struct exynos_drm_display_ops {
136 int (*initialize)(struct exynos_drm_display *display,
137 struct drm_device *drm_dev);
138 int (*create_connector)(struct exynos_drm_display *display, 142 int (*create_connector)(struct exynos_drm_display *display,
139 struct drm_encoder *encoder); 143 struct drm_encoder *encoder);
140 void (*remove)(struct exynos_drm_display *display); 144 void (*remove)(struct exynos_drm_display *display);
@@ -172,8 +176,6 @@ struct exynos_drm_display {
172/* 176/*
173 * Exynos drm manager ops 177 * Exynos drm manager ops
174 * 178 *
175 * @initialize: initializes the manager with drm_dev
176 * @remove: cleans up the manager for removal
177 * @dpms: control device power. 179 * @dpms: control device power.
178 * @mode_fixup: fix mode data before applying it 180 * @mode_fixup: fix mode data before applying it
179 * @mode_set: set the given mode to the manager 181 * @mode_set: set the given mode to the manager
@@ -189,9 +191,6 @@ struct exynos_drm_display {
189 */ 191 */
190struct exynos_drm_manager; 192struct exynos_drm_manager;
191struct exynos_drm_manager_ops { 193struct exynos_drm_manager_ops {
192 int (*initialize)(struct exynos_drm_manager *mgr,
193 struct drm_device *drm_dev, int pipe);
194 void (*remove)(struct exynos_drm_manager *mgr);
195 void (*dpms)(struct exynos_drm_manager *mgr, int mode); 194 void (*dpms)(struct exynos_drm_manager *mgr, int mode);
196 bool (*mode_fixup)(struct exynos_drm_manager *mgr, 195 bool (*mode_fixup)(struct exynos_drm_manager *mgr,
197 const struct drm_display_mode *mode, 196 const struct drm_display_mode *mode,
@@ -215,6 +214,7 @@ struct exynos_drm_manager_ops {
215 * @list: the list entry for this manager 214 * @list: the list entry for this manager
216 * @type: one of EXYNOS_DISPLAY_TYPE_LCD and HDMI. 215 * @type: one of EXYNOS_DISPLAY_TYPE_LCD and HDMI.
217 * @drm_dev: pointer to the drm device 216 * @drm_dev: pointer to the drm device
217 * @crtc: crtc object.
218 * @pipe: the pipe number for this crtc/manager 218 * @pipe: the pipe number for this crtc/manager
219 * @ops: pointer to callbacks for exynos drm specific functionality 219 * @ops: pointer to callbacks for exynos drm specific functionality
220 * @ctx: A pointer to the manager's implementation specific context 220 * @ctx: A pointer to the manager's implementation specific context
@@ -223,6 +223,7 @@ struct exynos_drm_manager {
223 struct list_head list; 223 struct list_head list;
224 enum exynos_drm_output_type type; 224 enum exynos_drm_output_type type;
225 struct drm_device *drm_dev; 225 struct drm_device *drm_dev;
226 struct drm_crtc *crtc;
226 int pipe; 227 int pipe;
227 struct exynos_drm_manager_ops *ops; 228 struct exynos_drm_manager_ops *ops;
228 void *ctx; 229 void *ctx;
@@ -254,6 +255,7 @@ struct drm_exynos_file_private {
254 * otherwise default one. 255 * otherwise default one.
255 * @da_space_size: size of device address space. 256 * @da_space_size: size of device address space.
256 * if 0 then default value is used for it. 257 * if 0 then default value is used for it.
258 * @pipe: the pipe number for this crtc/manager.
257 */ 259 */
258struct exynos_drm_private { 260struct exynos_drm_private {
259 struct drm_fb_helper *fb_helper; 261 struct drm_fb_helper *fb_helper;
@@ -271,6 +273,8 @@ struct exynos_drm_private {
271 273
272 unsigned long da_start; 274 unsigned long da_start;
273 unsigned long da_space_size; 275 unsigned long da_space_size;
276
277 unsigned int pipe;
274}; 278};
275 279
276/* 280/*
@@ -281,11 +285,11 @@ struct exynos_drm_private {
281 * @drm_dev: pointer to drm_device and this pointer would be set 285 * @drm_dev: pointer to drm_device and this pointer would be set
282 * when sub driver calls exynos_drm_subdrv_register(). 286 * when sub driver calls exynos_drm_subdrv_register().
283 * @manager: subdrv has its own manager to control a hardware appropriately 287 * @manager: subdrv has its own manager to control a hardware appropriately
284 * and we can access a hardware drawing on this manager. 288 * and we can access a hardware drawing on this manager.
285 * @probe: this callback would be called by exynos drm driver after 289 * @probe: this callback would be called by exynos drm driver after
286 * subdrv is registered to it. 290 * subdrv is registered to it.
287 * @remove: this callback is used to release resources created 291 * @remove: this callback is used to release resources created
288 * by probe callback. 292 * by probe callback.
289 * @open: this would be called with drm device file open. 293 * @open: this would be called with drm device file open.
290 * @close: this would be called with drm device file close. 294 * @close: this would be called with drm device file close.
291 */ 295 */
@@ -302,39 +306,14 @@ struct exynos_drm_subdrv {
302 struct drm_file *file); 306 struct drm_file *file);
303}; 307};
304 308
305/* 309 /* This function would be called by non kms drivers such as g2d and ipp. */
306 * this function calls a probe callback registered to sub driver list and
307 * create its own encoder and connector and then set drm_device object
308 * to global one.
309 */
310int exynos_drm_device_register(struct drm_device *dev);
311/*
312 * this function calls a remove callback registered to sub driver list and
313 * destroy its own encoder and connetor.
314 */
315int exynos_drm_device_unregister(struct drm_device *dev);
316
317int exynos_drm_initialize_managers(struct drm_device *dev);
318void exynos_drm_remove_managers(struct drm_device *dev);
319int exynos_drm_initialize_displays(struct drm_device *dev);
320void exynos_drm_remove_displays(struct drm_device *dev);
321
322int exynos_drm_manager_register(struct exynos_drm_manager *manager);
323int exynos_drm_manager_unregister(struct exynos_drm_manager *manager);
324int exynos_drm_display_register(struct exynos_drm_display *display);
325int exynos_drm_display_unregister(struct exynos_drm_display *display);
326
327/*
328 * this function would be called by sub drivers such as display controller
329 * or hdmi driver to register this sub driver object to exynos drm driver
330 * and when a sub driver is registered to exynos drm driver a probe callback
331 * of the sub driver is called and creates its own encoder and connector.
332 */
333int exynos_drm_subdrv_register(struct exynos_drm_subdrv *drm_subdrv); 310int exynos_drm_subdrv_register(struct exynos_drm_subdrv *drm_subdrv);
334 311
335/* this function removes subdrv list from exynos drm driver */ 312/* this function removes subdrv list from exynos drm driver */
336int exynos_drm_subdrv_unregister(struct exynos_drm_subdrv *drm_subdrv); 313int exynos_drm_subdrv_unregister(struct exynos_drm_subdrv *drm_subdrv);
337 314
315int exynos_drm_device_subdrv_probe(struct drm_device *dev);
316int exynos_drm_device_subdrv_remove(struct drm_device *dev);
338int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file); 317int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file);
339void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file); 318void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file);
340 319
@@ -360,18 +339,40 @@ int exynos_platform_device_ipp_register(void);
360void exynos_platform_device_ipp_unregister(void); 339void exynos_platform_device_ipp_unregister(void);
361 340
362#ifdef CONFIG_DRM_EXYNOS_DPI 341#ifdef CONFIG_DRM_EXYNOS_DPI
363int exynos_dpi_probe(struct device *dev); 342struct exynos_drm_display * exynos_dpi_probe(struct device *dev);
364int exynos_dpi_remove(struct device *dev); 343int exynos_dpi_remove(struct device *dev);
365#else 344#else
366static inline int exynos_dpi_probe(struct device *dev) { return 0; } 345static inline struct exynos_drm_display *
346exynos_dpi_probe(struct device *dev) { return 0; }
367static inline int exynos_dpi_remove(struct device *dev) { return 0; } 347static inline int exynos_dpi_remove(struct device *dev) { return 0; }
368#endif 348#endif
369 349
350/*
351 * this function registers exynos drm vidi platform device/driver.
352 */
353int exynos_drm_probe_vidi(void);
354
355/*
356 * this function unregister exynos drm vidi platform device/driver.
357 */
358void exynos_drm_remove_vidi(void);
359
360/* This function creates a encoder and a connector, and initializes them. */
361int exynos_drm_create_enc_conn(struct drm_device *dev,
362 struct exynos_drm_display *display);
363
364int exynos_drm_component_add(struct device *dev,
365 enum exynos_drm_device_type dev_type,
366 enum exynos_drm_output_type out_type);
367
368void exynos_drm_component_del(struct device *dev,
369 enum exynos_drm_device_type dev_type);
370
371extern struct platform_driver fimd_driver;
370extern struct platform_driver dp_driver; 372extern struct platform_driver dp_driver;
371extern struct platform_driver dsi_driver; 373extern struct platform_driver dsi_driver;
372extern struct platform_driver fimd_driver;
373extern struct platform_driver hdmi_driver;
374extern struct platform_driver mixer_driver; 374extern struct platform_driver mixer_driver;
375extern struct platform_driver hdmi_driver;
375extern struct platform_driver exynos_drm_common_hdmi_driver; 376extern struct platform_driver exynos_drm_common_hdmi_driver;
376extern struct platform_driver vidi_driver; 377extern struct platform_driver vidi_driver;
377extern struct platform_driver g2d_driver; 378extern struct platform_driver g2d_driver;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index 4ac438187568..6302aa64f6c1 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -19,6 +19,7 @@
19#include <linux/irq.h> 19#include <linux/irq.h>
20#include <linux/phy/phy.h> 20#include <linux/phy/phy.h>
21#include <linux/regulator/consumer.h> 21#include <linux/regulator/consumer.h>
22#include <linux/component.h>
22 23
23#include <video/mipi_display.h> 24#include <video/mipi_display.h>
24#include <video/videomode.h> 25#include <video/videomode.h>
@@ -1378,16 +1379,60 @@ end:
1378 return ret; 1379 return ret;
1379} 1380}
1380 1381
1382static int exynos_dsi_bind(struct device *dev, struct device *master,
1383 void *data)
1384{
1385 struct drm_device *drm_dev = data;
1386 struct exynos_dsi *dsi;
1387 int ret;
1388
1389 ret = exynos_drm_create_enc_conn(drm_dev, &exynos_dsi_display);
1390 if (ret) {
1391 DRM_ERROR("Encoder create [%d] failed with %d\n",
1392 exynos_dsi_display.type, ret);
1393 return ret;
1394 }
1395
1396 dsi = exynos_dsi_display.ctx;
1397
1398 return mipi_dsi_host_register(&dsi->dsi_host);
1399}
1400
1401static void exynos_dsi_unbind(struct device *dev, struct device *master,
1402 void *data)
1403{
1404 struct exynos_dsi *dsi = exynos_dsi_display.ctx;
1405 struct drm_encoder *encoder = dsi->encoder;
1406
1407 exynos_dsi_dpms(&exynos_dsi_display, DRM_MODE_DPMS_OFF);
1408
1409 mipi_dsi_host_unregister(&dsi->dsi_host);
1410
1411 encoder->funcs->destroy(encoder);
1412 drm_connector_cleanup(&dsi->connector);
1413}
1414
1415static const struct component_ops exynos_dsi_component_ops = {
1416 .bind = exynos_dsi_bind,
1417 .unbind = exynos_dsi_unbind,
1418};
1419
1381static int exynos_dsi_probe(struct platform_device *pdev) 1420static int exynos_dsi_probe(struct platform_device *pdev)
1382{ 1421{
1383 struct resource *res; 1422 struct resource *res;
1384 struct exynos_dsi *dsi; 1423 struct exynos_dsi *dsi;
1385 int ret; 1424 int ret;
1386 1425
1426 ret = exynos_drm_component_add(&pdev->dev, EXYNOS_DEVICE_TYPE_CONNECTOR,
1427 exynos_dsi_display.type);
1428 if (ret)
1429 return ret;
1430
1387 dsi = devm_kzalloc(&pdev->dev, sizeof(*dsi), GFP_KERNEL); 1431 dsi = devm_kzalloc(&pdev->dev, sizeof(*dsi), GFP_KERNEL);
1388 if (!dsi) { 1432 if (!dsi) {
1389 dev_err(&pdev->dev, "failed to allocate dsi object.\n"); 1433 dev_err(&pdev->dev, "failed to allocate dsi object.\n");
1390 return -ENOMEM; 1434 ret = -ENOMEM;
1435 goto err_del_component;
1391 } 1436 }
1392 1437
1393 init_completion(&dsi->completed); 1438 init_completion(&dsi->completed);
@@ -1401,7 +1446,7 @@ static int exynos_dsi_probe(struct platform_device *pdev)
1401 1446
1402 ret = exynos_dsi_parse_dt(dsi); 1447 ret = exynos_dsi_parse_dt(dsi);
1403 if (ret) 1448 if (ret)
1404 return ret; 1449 goto err_del_component;
1405 1450
1406 dsi->supplies[0].supply = "vddcore"; 1451 dsi->supplies[0].supply = "vddcore";
1407 dsi->supplies[1].supply = "vddio"; 1452 dsi->supplies[1].supply = "vddio";
@@ -1415,32 +1460,37 @@ static int exynos_dsi_probe(struct platform_device *pdev)
1415 dsi->pll_clk = devm_clk_get(&pdev->dev, "pll_clk"); 1460 dsi->pll_clk = devm_clk_get(&pdev->dev, "pll_clk");
1416 if (IS_ERR(dsi->pll_clk)) { 1461 if (IS_ERR(dsi->pll_clk)) {
1417 dev_info(&pdev->dev, "failed to get dsi pll input clock\n"); 1462 dev_info(&pdev->dev, "failed to get dsi pll input clock\n");
1418 return -EPROBE_DEFER; 1463 ret = PTR_ERR(dsi->pll_clk);
1464 goto err_del_component;
1419 } 1465 }
1420 1466
1421 dsi->bus_clk = devm_clk_get(&pdev->dev, "bus_clk"); 1467 dsi->bus_clk = devm_clk_get(&pdev->dev, "bus_clk");
1422 if (IS_ERR(dsi->bus_clk)) { 1468 if (IS_ERR(dsi->bus_clk)) {
1423 dev_info(&pdev->dev, "failed to get dsi bus clock\n"); 1469 dev_info(&pdev->dev, "failed to get dsi bus clock\n");
1424 return -EPROBE_DEFER; 1470 ret = PTR_ERR(dsi->bus_clk);
1471 goto err_del_component;
1425 } 1472 }
1426 1473
1427 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1474 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1428 dsi->reg_base = devm_ioremap_resource(&pdev->dev, res); 1475 dsi->reg_base = devm_ioremap_resource(&pdev->dev, res);
1429 if (IS_ERR(dsi->reg_base)) { 1476 if (IS_ERR(dsi->reg_base)) {
1430 dev_err(&pdev->dev, "failed to remap io region\n"); 1477 dev_err(&pdev->dev, "failed to remap io region\n");
1431 return PTR_ERR(dsi->reg_base); 1478 ret = PTR_ERR(dsi->reg_base);
1479 goto err_del_component;
1432 } 1480 }
1433 1481
1434 dsi->phy = devm_phy_get(&pdev->dev, "dsim"); 1482 dsi->phy = devm_phy_get(&pdev->dev, "dsim");
1435 if (IS_ERR(dsi->phy)) { 1483 if (IS_ERR(dsi->phy)) {
1436 dev_info(&pdev->dev, "failed to get dsim phy\n"); 1484 dev_info(&pdev->dev, "failed to get dsim phy\n");
1437 return -EPROBE_DEFER; 1485 ret = PTR_ERR(dsi->phy);
1486 goto err_del_component;
1438 } 1487 }
1439 1488
1440 dsi->irq = platform_get_irq(pdev, 0); 1489 dsi->irq = platform_get_irq(pdev, 0);
1441 if (dsi->irq < 0) { 1490 if (dsi->irq < 0) {
1442 dev_err(&pdev->dev, "failed to request dsi irq resource\n"); 1491 dev_err(&pdev->dev, "failed to request dsi irq resource\n");
1443 return dsi->irq; 1492 ret = dsi->irq;
1493 goto err_del_component;
1444 } 1494 }
1445 1495
1446 irq_set_status_flags(dsi->irq, IRQ_NOAUTOEN); 1496 irq_set_status_flags(dsi->irq, IRQ_NOAUTOEN);
@@ -1449,58 +1499,31 @@ static int exynos_dsi_probe(struct platform_device *pdev)
1449 dev_name(&pdev->dev), dsi); 1499 dev_name(&pdev->dev), dsi);
1450 if (ret) { 1500 if (ret) {
1451 dev_err(&pdev->dev, "failed to request dsi irq\n"); 1501 dev_err(&pdev->dev, "failed to request dsi irq\n");
1452 return ret; 1502 goto err_del_component;
1453 } 1503 }
1454 1504
1455 exynos_dsi_display.ctx = dsi; 1505 exynos_dsi_display.ctx = dsi;
1456 1506
1457 platform_set_drvdata(pdev, &exynos_dsi_display); 1507 platform_set_drvdata(pdev, &exynos_dsi_display);
1458 exynos_drm_display_register(&exynos_dsi_display);
1459
1460 return mipi_dsi_host_register(&dsi->dsi_host);
1461}
1462
1463static int exynos_dsi_remove(struct platform_device *pdev)
1464{
1465 struct exynos_dsi *dsi = exynos_dsi_display.ctx;
1466
1467 exynos_dsi_dpms(&exynos_dsi_display, DRM_MODE_DPMS_OFF);
1468
1469 exynos_drm_display_unregister(&exynos_dsi_display);
1470 mipi_dsi_host_unregister(&dsi->dsi_host);
1471
1472 return 0;
1473}
1474 1508
1475#if CONFIG_PM_SLEEP 1509 ret = component_add(&pdev->dev, &exynos_dsi_component_ops);
1476static int exynos_dsi_resume(struct device *dev) 1510 if (ret)
1477{ 1511 goto err_del_component;
1478 struct exynos_dsi *dsi = exynos_dsi_display.ctx;
1479 1512
1480 if (dsi->state & DSIM_STATE_ENABLED) { 1513 return ret;
1481 dsi->state &= ~DSIM_STATE_ENABLED;
1482 exynos_dsi_enable(dsi);
1483 }
1484 1514
1485 return 0; 1515err_del_component:
1516 exynos_drm_component_del(&pdev->dev, EXYNOS_DEVICE_TYPE_CONNECTOR);
1517 return ret;
1486} 1518}
1487 1519
1488static int exynos_dsi_suspend(struct device *dev) 1520static int exynos_dsi_remove(struct platform_device *pdev)
1489{ 1521{
1490 struct exynos_dsi *dsi = exynos_dsi_display.ctx; 1522 component_del(&pdev->dev, &exynos_dsi_component_ops);
1491 1523 exynos_drm_component_del(&pdev->dev, EXYNOS_DEVICE_TYPE_CONNECTOR);
1492 if (dsi->state & DSIM_STATE_ENABLED) {
1493 exynos_dsi_disable(dsi);
1494 dsi->state |= DSIM_STATE_ENABLED;
1495 }
1496 1524
1497 return 0; 1525 return 0;
1498} 1526}
1499#endif
1500
1501static const struct dev_pm_ops exynos_dsi_pm_ops = {
1502 SET_SYSTEM_SLEEP_PM_OPS(exynos_dsi_suspend, exynos_dsi_resume)
1503};
1504 1527
1505static struct of_device_id exynos_dsi_of_match[] = { 1528static struct of_device_id exynos_dsi_of_match[] = {
1506 { .compatible = "samsung,exynos4210-mipi-dsi" }, 1529 { .compatible = "samsung,exynos4210-mipi-dsi" },
@@ -1513,7 +1536,6 @@ struct platform_driver dsi_driver = {
1513 .driver = { 1536 .driver = {
1514 .name = "exynos-dsi", 1537 .name = "exynos-dsi",
1515 .owner = THIS_MODULE, 1538 .owner = THIS_MODULE,
1516 .pm = &exynos_dsi_pm_ops,
1517 .of_match_table = exynos_dsi_of_match, 1539 .of_match_table = exynos_dsi_of_match,
1518 }, 1540 },
1519}; 1541};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index addbf7536da4..d771b467cf0c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -121,16 +121,8 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
121 offset = fbi->var.xoffset * (fb->bits_per_pixel >> 3); 121 offset = fbi->var.xoffset * (fb->bits_per_pixel >> 3);
122 offset += fbi->var.yoffset * fb->pitches[0]; 122 offset += fbi->var.yoffset * fb->pitches[0];
123 123
124 dev->mode_config.fb_base = (resource_size_t)buffer->dma_addr;
125 fbi->screen_base = buffer->kvaddr + offset; 124 fbi->screen_base = buffer->kvaddr + offset;
126 if (is_drm_iommu_supported(dev))
127 fbi->fix.smem_start = (unsigned long)
128 (page_to_phys(sg_page(buffer->sgt->sgl)) + offset);
129 else
130 fbi->fix.smem_start = (unsigned long)buffer->dma_addr;
131
132 fbi->screen_size = size; 125 fbi->screen_size = size;
133 fbi->fix.smem_len = size;
134 126
135 return 0; 127 return 0;
136} 128}
@@ -237,7 +229,7 @@ static struct drm_fb_helper_funcs exynos_drm_fb_helper_funcs = {
237 .fb_probe = exynos_drm_fbdev_create, 229 .fb_probe = exynos_drm_fbdev_create,
238}; 230};
239 231
240bool exynos_drm_fbdev_is_anything_connected(struct drm_device *dev) 232static bool exynos_drm_fbdev_is_anything_connected(struct drm_device *dev)
241{ 233{
242 struct drm_connector *connector; 234 struct drm_connector *connector;
243 bool ret = false; 235 bool ret = false;
@@ -375,7 +367,5 @@ void exynos_drm_fbdev_restore_mode(struct drm_device *dev)
375 if (!private || !private->fb_helper) 367 if (!private || !private->fb_helper)
376 return; 368 return;
377 369
378 drm_modeset_lock_all(dev); 370 drm_fb_helper_restore_fbdev_mode_unlocked(private->fb_helper);
379 drm_fb_helper_restore_fbdev_mode(private->fb_helper);
380 drm_modeset_unlock_all(dev);
381} 371}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index 30d76b2ff9c2..831dde9034c6 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -18,6 +18,7 @@
18#include <linux/clk.h> 18#include <linux/clk.h>
19#include <linux/pm_runtime.h> 19#include <linux/pm_runtime.h>
20#include <linux/of.h> 20#include <linux/of.h>
21#include <linux/spinlock.h>
21 22
22#include <drm/drmP.h> 23#include <drm/drmP.h>
23#include <drm/exynos_drm.h> 24#include <drm/exynos_drm.h>
@@ -57,7 +58,6 @@
57#define FIMC_SHFACTOR 10 58#define FIMC_SHFACTOR 10
58#define FIMC_BUF_STOP 1 59#define FIMC_BUF_STOP 1
59#define FIMC_BUF_START 2 60#define FIMC_BUF_START 2
60#define FIMC_REG_SZ 32
61#define FIMC_WIDTH_ITU_709 1280 61#define FIMC_WIDTH_ITU_709 1280
62#define FIMC_REFRESH_MAX 60 62#define FIMC_REFRESH_MAX 60
63#define FIMC_REFRESH_MIN 12 63#define FIMC_REFRESH_MIN 12
@@ -69,9 +69,6 @@
69#define get_fimc_context(dev) platform_get_drvdata(to_platform_device(dev)) 69#define get_fimc_context(dev) platform_get_drvdata(to_platform_device(dev))
70#define get_ctx_from_ippdrv(ippdrv) container_of(ippdrv,\ 70#define get_ctx_from_ippdrv(ippdrv) container_of(ippdrv,\
71 struct fimc_context, ippdrv); 71 struct fimc_context, ippdrv);
72#define fimc_read(offset) readl(ctx->regs + (offset))
73#define fimc_write(cfg, offset) writel(cfg, ctx->regs + (offset))
74
75enum fimc_wb { 72enum fimc_wb {
76 FIMC_WB_NONE, 73 FIMC_WB_NONE,
77 FIMC_WB_A, 74 FIMC_WB_A,
@@ -161,7 +158,7 @@ struct fimc_context {
161 struct exynos_drm_ippdrv ippdrv; 158 struct exynos_drm_ippdrv ippdrv;
162 struct resource *regs_res; 159 struct resource *regs_res;
163 void __iomem *regs; 160 void __iomem *regs;
164 struct mutex lock; 161 spinlock_t lock;
165 struct clk *clocks[FIMC_CLKS_MAX]; 162 struct clk *clocks[FIMC_CLKS_MAX];
166 u32 clk_frequency; 163 u32 clk_frequency;
167 struct regmap *sysreg; 164 struct regmap *sysreg;
@@ -172,39 +169,53 @@ struct fimc_context {
172 bool suspended; 169 bool suspended;
173}; 170};
174 171
172static u32 fimc_read(struct fimc_context *ctx, u32 reg)
173{
174 return readl(ctx->regs + reg);
175}
176
177static void fimc_write(struct fimc_context *ctx, u32 val, u32 reg)
178{
179 writel(val, ctx->regs + reg);
180}
181
182static void fimc_set_bits(struct fimc_context *ctx, u32 reg, u32 bits)
183{
184 void __iomem *r = ctx->regs + reg;
185
186 writel(readl(r) | bits, r);
187}
188
189static void fimc_clear_bits(struct fimc_context *ctx, u32 reg, u32 bits)
190{
191 void __iomem *r = ctx->regs + reg;
192
193 writel(readl(r) & ~bits, r);
194}
195
175static void fimc_sw_reset(struct fimc_context *ctx) 196static void fimc_sw_reset(struct fimc_context *ctx)
176{ 197{
177 u32 cfg; 198 u32 cfg;
178 199
179 /* stop dma operation */ 200 /* stop dma operation */
180 cfg = fimc_read(EXYNOS_CISTATUS); 201 cfg = fimc_read(ctx, EXYNOS_CISTATUS);
181 if (EXYNOS_CISTATUS_GET_ENVID_STATUS(cfg)) { 202 if (EXYNOS_CISTATUS_GET_ENVID_STATUS(cfg))
182 cfg = fimc_read(EXYNOS_MSCTRL); 203 fimc_clear_bits(ctx, EXYNOS_MSCTRL, EXYNOS_MSCTRL_ENVID);
183 cfg &= ~EXYNOS_MSCTRL_ENVID;
184 fimc_write(cfg, EXYNOS_MSCTRL);
185 }
186 204
187 cfg = fimc_read(EXYNOS_CISRCFMT); 205 fimc_set_bits(ctx, EXYNOS_CISRCFMT, EXYNOS_CISRCFMT_ITU601_8BIT);
188 cfg |= EXYNOS_CISRCFMT_ITU601_8BIT;
189 fimc_write(cfg, EXYNOS_CISRCFMT);
190 206
191 /* disable image capture */ 207 /* disable image capture */
192 cfg = fimc_read(EXYNOS_CIIMGCPT); 208 fimc_clear_bits(ctx, EXYNOS_CIIMGCPT,
193 cfg &= ~(EXYNOS_CIIMGCPT_IMGCPTEN_SC | EXYNOS_CIIMGCPT_IMGCPTEN); 209 EXYNOS_CIIMGCPT_IMGCPTEN_SC | EXYNOS_CIIMGCPT_IMGCPTEN);
194 fimc_write(cfg, EXYNOS_CIIMGCPT);
195 210
196 /* s/w reset */ 211 /* s/w reset */
197 cfg = fimc_read(EXYNOS_CIGCTRL); 212 fimc_set_bits(ctx, EXYNOS_CIGCTRL, EXYNOS_CIGCTRL_SWRST);
198 cfg |= (EXYNOS_CIGCTRL_SWRST);
199 fimc_write(cfg, EXYNOS_CIGCTRL);
200 213
201 /* s/w reset complete */ 214 /* s/w reset complete */
202 cfg = fimc_read(EXYNOS_CIGCTRL); 215 fimc_clear_bits(ctx, EXYNOS_CIGCTRL, EXYNOS_CIGCTRL_SWRST);
203 cfg &= ~EXYNOS_CIGCTRL_SWRST;
204 fimc_write(cfg, EXYNOS_CIGCTRL);
205 216
206 /* reset sequence */ 217 /* reset sequence */
207 fimc_write(0x0, EXYNOS_CIFCNTSEQ); 218 fimc_write(ctx, 0x0, EXYNOS_CIFCNTSEQ);
208} 219}
209 220
210static int fimc_set_camblk_fimd0_wb(struct fimc_context *ctx) 221static int fimc_set_camblk_fimd0_wb(struct fimc_context *ctx)
@@ -220,7 +231,7 @@ static void fimc_set_type_ctrl(struct fimc_context *ctx, enum fimc_wb wb)
220 231
221 DRM_DEBUG_KMS("wb[%d]\n", wb); 232 DRM_DEBUG_KMS("wb[%d]\n", wb);
222 233
223 cfg = fimc_read(EXYNOS_CIGCTRL); 234 cfg = fimc_read(ctx, EXYNOS_CIGCTRL);
224 cfg &= ~(EXYNOS_CIGCTRL_TESTPATTERN_MASK | 235 cfg &= ~(EXYNOS_CIGCTRL_TESTPATTERN_MASK |
225 EXYNOS_CIGCTRL_SELCAM_ITU_MASK | 236 EXYNOS_CIGCTRL_SELCAM_ITU_MASK |
226 EXYNOS_CIGCTRL_SELCAM_MIPI_MASK | 237 EXYNOS_CIGCTRL_SELCAM_MIPI_MASK |
@@ -246,7 +257,7 @@ static void fimc_set_type_ctrl(struct fimc_context *ctx, enum fimc_wb wb)
246 break; 257 break;
247 } 258 }
248 259
249 fimc_write(cfg, EXYNOS_CIGCTRL); 260 fimc_write(ctx, cfg, EXYNOS_CIGCTRL);
250} 261}
251 262
252static void fimc_set_polarity(struct fimc_context *ctx, 263static void fimc_set_polarity(struct fimc_context *ctx,
@@ -259,7 +270,7 @@ static void fimc_set_polarity(struct fimc_context *ctx,
259 DRM_DEBUG_KMS("inv_href[%d]inv_hsync[%d]\n", 270 DRM_DEBUG_KMS("inv_href[%d]inv_hsync[%d]\n",
260 pol->inv_href, pol->inv_hsync); 271 pol->inv_href, pol->inv_hsync);
261 272
262 cfg = fimc_read(EXYNOS_CIGCTRL); 273 cfg = fimc_read(ctx, EXYNOS_CIGCTRL);
263 cfg &= ~(EXYNOS_CIGCTRL_INVPOLPCLK | EXYNOS_CIGCTRL_INVPOLVSYNC | 274 cfg &= ~(EXYNOS_CIGCTRL_INVPOLPCLK | EXYNOS_CIGCTRL_INVPOLVSYNC |
264 EXYNOS_CIGCTRL_INVPOLHREF | EXYNOS_CIGCTRL_INVPOLHSYNC); 275 EXYNOS_CIGCTRL_INVPOLHREF | EXYNOS_CIGCTRL_INVPOLHSYNC);
265 276
@@ -272,7 +283,7 @@ static void fimc_set_polarity(struct fimc_context *ctx,
272 if (pol->inv_hsync) 283 if (pol->inv_hsync)
273 cfg |= EXYNOS_CIGCTRL_INVPOLHSYNC; 284 cfg |= EXYNOS_CIGCTRL_INVPOLHSYNC;
274 285
275 fimc_write(cfg, EXYNOS_CIGCTRL); 286 fimc_write(ctx, cfg, EXYNOS_CIGCTRL);
276} 287}
277 288
278static void fimc_handle_jpeg(struct fimc_context *ctx, bool enable) 289static void fimc_handle_jpeg(struct fimc_context *ctx, bool enable)
@@ -281,70 +292,54 @@ static void fimc_handle_jpeg(struct fimc_context *ctx, bool enable)
281 292
282 DRM_DEBUG_KMS("enable[%d]\n", enable); 293 DRM_DEBUG_KMS("enable[%d]\n", enable);
283 294
284 cfg = fimc_read(EXYNOS_CIGCTRL); 295 cfg = fimc_read(ctx, EXYNOS_CIGCTRL);
285 if (enable) 296 if (enable)
286 cfg |= EXYNOS_CIGCTRL_CAM_JPEG; 297 cfg |= EXYNOS_CIGCTRL_CAM_JPEG;
287 else 298 else
288 cfg &= ~EXYNOS_CIGCTRL_CAM_JPEG; 299 cfg &= ~EXYNOS_CIGCTRL_CAM_JPEG;
289 300
290 fimc_write(cfg, EXYNOS_CIGCTRL); 301 fimc_write(ctx, cfg, EXYNOS_CIGCTRL);
291} 302}
292 303
293static void fimc_handle_irq(struct fimc_context *ctx, bool enable, 304static void fimc_mask_irq(struct fimc_context *ctx, bool enable)
294 bool overflow, bool level)
295{ 305{
296 u32 cfg; 306 u32 cfg;
297 307
298 DRM_DEBUG_KMS("enable[%d]overflow[%d]level[%d]\n", 308 DRM_DEBUG_KMS("enable[%d]\n", enable);
299 enable, overflow, level);
300 309
301 cfg = fimc_read(EXYNOS_CIGCTRL); 310 cfg = fimc_read(ctx, EXYNOS_CIGCTRL);
302 if (enable) { 311 if (enable) {
303 cfg &= ~(EXYNOS_CIGCTRL_IRQ_OVFEN | EXYNOS_CIGCTRL_IRQ_LEVEL); 312 cfg &= ~EXYNOS_CIGCTRL_IRQ_OVFEN;
304 cfg |= EXYNOS_CIGCTRL_IRQ_ENABLE; 313 cfg |= EXYNOS_CIGCTRL_IRQ_ENABLE | EXYNOS_CIGCTRL_IRQ_LEVEL;
305 if (overflow)
306 cfg |= EXYNOS_CIGCTRL_IRQ_OVFEN;
307 if (level)
308 cfg |= EXYNOS_CIGCTRL_IRQ_LEVEL;
309 } else 314 } else
310 cfg &= ~(EXYNOS_CIGCTRL_IRQ_OVFEN | EXYNOS_CIGCTRL_IRQ_ENABLE); 315 cfg &= ~EXYNOS_CIGCTRL_IRQ_ENABLE;
311 316 fimc_write(ctx, cfg, EXYNOS_CIGCTRL);
312 fimc_write(cfg, EXYNOS_CIGCTRL);
313} 317}
314 318
315static void fimc_clear_irq(struct fimc_context *ctx) 319static void fimc_clear_irq(struct fimc_context *ctx)
316{ 320{
317 u32 cfg; 321 fimc_set_bits(ctx, EXYNOS_CIGCTRL, EXYNOS_CIGCTRL_IRQ_CLR);
318
319 cfg = fimc_read(EXYNOS_CIGCTRL);
320 cfg |= EXYNOS_CIGCTRL_IRQ_CLR;
321 fimc_write(cfg, EXYNOS_CIGCTRL);
322} 322}
323 323
324static bool fimc_check_ovf(struct fimc_context *ctx) 324static bool fimc_check_ovf(struct fimc_context *ctx)
325{ 325{
326 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; 326 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
327 u32 cfg, status, flag; 327 u32 status, flag;
328 328
329 status = fimc_read(EXYNOS_CISTATUS); 329 status = fimc_read(ctx, EXYNOS_CISTATUS);
330 flag = EXYNOS_CISTATUS_OVFIY | EXYNOS_CISTATUS_OVFICB | 330 flag = EXYNOS_CISTATUS_OVFIY | EXYNOS_CISTATUS_OVFICB |
331 EXYNOS_CISTATUS_OVFICR; 331 EXYNOS_CISTATUS_OVFICR;
332 332
333 DRM_DEBUG_KMS("flag[0x%x]\n", flag); 333 DRM_DEBUG_KMS("flag[0x%x]\n", flag);
334 334
335 if (status & flag) { 335 if (status & flag) {
336 cfg = fimc_read(EXYNOS_CIWDOFST); 336 fimc_set_bits(ctx, EXYNOS_CIWDOFST,
337 cfg |= (EXYNOS_CIWDOFST_CLROVFIY | EXYNOS_CIWDOFST_CLROVFICB | 337 EXYNOS_CIWDOFST_CLROVFIY | EXYNOS_CIWDOFST_CLROVFICB |
338 EXYNOS_CIWDOFST_CLROVFICR); 338 EXYNOS_CIWDOFST_CLROVFICR);
339 339 fimc_clear_bits(ctx, EXYNOS_CIWDOFST,
340 fimc_write(cfg, EXYNOS_CIWDOFST); 340 EXYNOS_CIWDOFST_CLROVFIY | EXYNOS_CIWDOFST_CLROVFICB |
341
342 cfg = fimc_read(EXYNOS_CIWDOFST);
343 cfg &= ~(EXYNOS_CIWDOFST_CLROVFIY | EXYNOS_CIWDOFST_CLROVFICB |
344 EXYNOS_CIWDOFST_CLROVFICR); 341 EXYNOS_CIWDOFST_CLROVFICR);
345 342
346 fimc_write(cfg, EXYNOS_CIWDOFST);
347
348 dev_err(ippdrv->dev, "occurred overflow at %d, status 0x%x.\n", 343 dev_err(ippdrv->dev, "occurred overflow at %d, status 0x%x.\n",
349 ctx->id, status); 344 ctx->id, status);
350 return true; 345 return true;
@@ -357,7 +352,7 @@ static bool fimc_check_frame_end(struct fimc_context *ctx)
357{ 352{
358 u32 cfg; 353 u32 cfg;
359 354
360 cfg = fimc_read(EXYNOS_CISTATUS); 355 cfg = fimc_read(ctx, EXYNOS_CISTATUS);
361 356
362 DRM_DEBUG_KMS("cfg[0x%x]\n", cfg); 357 DRM_DEBUG_KMS("cfg[0x%x]\n", cfg);
363 358
@@ -365,7 +360,7 @@ static bool fimc_check_frame_end(struct fimc_context *ctx)
365 return false; 360 return false;
366 361
367 cfg &= ~(EXYNOS_CISTATUS_FRAMEEND); 362 cfg &= ~(EXYNOS_CISTATUS_FRAMEEND);
368 fimc_write(cfg, EXYNOS_CISTATUS); 363 fimc_write(ctx, cfg, EXYNOS_CISTATUS);
369 364
370 return true; 365 return true;
371} 366}
@@ -375,7 +370,7 @@ static int fimc_get_buf_id(struct fimc_context *ctx)
375 u32 cfg; 370 u32 cfg;
376 int frame_cnt, buf_id; 371 int frame_cnt, buf_id;
377 372
378 cfg = fimc_read(EXYNOS_CISTATUS2); 373 cfg = fimc_read(ctx, EXYNOS_CISTATUS2);
379 frame_cnt = EXYNOS_CISTATUS2_GET_FRAMECOUNT_BEFORE(cfg); 374 frame_cnt = EXYNOS_CISTATUS2_GET_FRAMECOUNT_BEFORE(cfg);
380 375
381 if (frame_cnt == 0) 376 if (frame_cnt == 0)
@@ -402,13 +397,13 @@ static void fimc_handle_lastend(struct fimc_context *ctx, bool enable)
402 397
403 DRM_DEBUG_KMS("enable[%d]\n", enable); 398 DRM_DEBUG_KMS("enable[%d]\n", enable);
404 399
405 cfg = fimc_read(EXYNOS_CIOCTRL); 400 cfg = fimc_read(ctx, EXYNOS_CIOCTRL);
406 if (enable) 401 if (enable)
407 cfg |= EXYNOS_CIOCTRL_LASTENDEN; 402 cfg |= EXYNOS_CIOCTRL_LASTENDEN;
408 else 403 else
409 cfg &= ~EXYNOS_CIOCTRL_LASTENDEN; 404 cfg &= ~EXYNOS_CIOCTRL_LASTENDEN;
410 405
411 fimc_write(cfg, EXYNOS_CIOCTRL); 406 fimc_write(ctx, cfg, EXYNOS_CIOCTRL);
412} 407}
413 408
414 409
@@ -420,18 +415,18 @@ static int fimc_src_set_fmt_order(struct fimc_context *ctx, u32 fmt)
420 DRM_DEBUG_KMS("fmt[0x%x]\n", fmt); 415 DRM_DEBUG_KMS("fmt[0x%x]\n", fmt);
421 416
422 /* RGB */ 417 /* RGB */
423 cfg = fimc_read(EXYNOS_CISCCTRL); 418 cfg = fimc_read(ctx, EXYNOS_CISCCTRL);
424 cfg &= ~EXYNOS_CISCCTRL_INRGB_FMT_RGB_MASK; 419 cfg &= ~EXYNOS_CISCCTRL_INRGB_FMT_RGB_MASK;
425 420
426 switch (fmt) { 421 switch (fmt) {
427 case DRM_FORMAT_RGB565: 422 case DRM_FORMAT_RGB565:
428 cfg |= EXYNOS_CISCCTRL_INRGB_FMT_RGB565; 423 cfg |= EXYNOS_CISCCTRL_INRGB_FMT_RGB565;
429 fimc_write(cfg, EXYNOS_CISCCTRL); 424 fimc_write(ctx, cfg, EXYNOS_CISCCTRL);
430 return 0; 425 return 0;
431 case DRM_FORMAT_RGB888: 426 case DRM_FORMAT_RGB888:
432 case DRM_FORMAT_XRGB8888: 427 case DRM_FORMAT_XRGB8888:
433 cfg |= EXYNOS_CISCCTRL_INRGB_FMT_RGB888; 428 cfg |= EXYNOS_CISCCTRL_INRGB_FMT_RGB888;
434 fimc_write(cfg, EXYNOS_CISCCTRL); 429 fimc_write(ctx, cfg, EXYNOS_CISCCTRL);
435 return 0; 430 return 0;
436 default: 431 default:
437 /* bypass */ 432 /* bypass */
@@ -439,7 +434,7 @@ static int fimc_src_set_fmt_order(struct fimc_context *ctx, u32 fmt)
439 } 434 }
440 435
441 /* YUV */ 436 /* YUV */
442 cfg = fimc_read(EXYNOS_MSCTRL); 437 cfg = fimc_read(ctx, EXYNOS_MSCTRL);
443 cfg &= ~(EXYNOS_MSCTRL_ORDER2P_SHIFT_MASK | 438 cfg &= ~(EXYNOS_MSCTRL_ORDER2P_SHIFT_MASK |
444 EXYNOS_MSCTRL_C_INT_IN_2PLANE | 439 EXYNOS_MSCTRL_C_INT_IN_2PLANE |
445 EXYNOS_MSCTRL_ORDER422_YCBYCR); 440 EXYNOS_MSCTRL_ORDER422_YCBYCR);
@@ -479,7 +474,7 @@ static int fimc_src_set_fmt_order(struct fimc_context *ctx, u32 fmt)
479 return -EINVAL; 474 return -EINVAL;
480 } 475 }
481 476
482 fimc_write(cfg, EXYNOS_MSCTRL); 477 fimc_write(ctx, cfg, EXYNOS_MSCTRL);
483 478
484 return 0; 479 return 0;
485} 480}
@@ -492,7 +487,7 @@ static int fimc_src_set_fmt(struct device *dev, u32 fmt)
492 487
493 DRM_DEBUG_KMS("fmt[0x%x]\n", fmt); 488 DRM_DEBUG_KMS("fmt[0x%x]\n", fmt);
494 489
495 cfg = fimc_read(EXYNOS_MSCTRL); 490 cfg = fimc_read(ctx, EXYNOS_MSCTRL);
496 cfg &= ~EXYNOS_MSCTRL_INFORMAT_RGB; 491 cfg &= ~EXYNOS_MSCTRL_INFORMAT_RGB;
497 492
498 switch (fmt) { 493 switch (fmt) {
@@ -527,9 +522,9 @@ static int fimc_src_set_fmt(struct device *dev, u32 fmt)
527 return -EINVAL; 522 return -EINVAL;
528 } 523 }
529 524
530 fimc_write(cfg, EXYNOS_MSCTRL); 525 fimc_write(ctx, cfg, EXYNOS_MSCTRL);
531 526
532 cfg = fimc_read(EXYNOS_CIDMAPARAM); 527 cfg = fimc_read(ctx, EXYNOS_CIDMAPARAM);
533 cfg &= ~EXYNOS_CIDMAPARAM_R_MODE_MASK; 528 cfg &= ~EXYNOS_CIDMAPARAM_R_MODE_MASK;
534 529
535 if (fmt == DRM_FORMAT_NV12MT) 530 if (fmt == DRM_FORMAT_NV12MT)
@@ -537,7 +532,7 @@ static int fimc_src_set_fmt(struct device *dev, u32 fmt)
537 else 532 else
538 cfg |= EXYNOS_CIDMAPARAM_R_MODE_LINEAR; 533 cfg |= EXYNOS_CIDMAPARAM_R_MODE_LINEAR;
539 534
540 fimc_write(cfg, EXYNOS_CIDMAPARAM); 535 fimc_write(ctx, cfg, EXYNOS_CIDMAPARAM);
541 536
542 return fimc_src_set_fmt_order(ctx, fmt); 537 return fimc_src_set_fmt_order(ctx, fmt);
543} 538}
@@ -552,11 +547,11 @@ static int fimc_src_set_transf(struct device *dev,
552 547
553 DRM_DEBUG_KMS("degree[%d]flip[0x%x]\n", degree, flip); 548 DRM_DEBUG_KMS("degree[%d]flip[0x%x]\n", degree, flip);
554 549
555 cfg1 = fimc_read(EXYNOS_MSCTRL); 550 cfg1 = fimc_read(ctx, EXYNOS_MSCTRL);
556 cfg1 &= ~(EXYNOS_MSCTRL_FLIP_X_MIRROR | 551 cfg1 &= ~(EXYNOS_MSCTRL_FLIP_X_MIRROR |
557 EXYNOS_MSCTRL_FLIP_Y_MIRROR); 552 EXYNOS_MSCTRL_FLIP_Y_MIRROR);
558 553
559 cfg2 = fimc_read(EXYNOS_CITRGFMT); 554 cfg2 = fimc_read(ctx, EXYNOS_CITRGFMT);
560 cfg2 &= ~EXYNOS_CITRGFMT_INROT90_CLOCKWISE; 555 cfg2 &= ~EXYNOS_CITRGFMT_INROT90_CLOCKWISE;
561 556
562 switch (degree) { 557 switch (degree) {
@@ -595,8 +590,8 @@ static int fimc_src_set_transf(struct device *dev,
595 return -EINVAL; 590 return -EINVAL;
596 } 591 }
597 592
598 fimc_write(cfg1, EXYNOS_MSCTRL); 593 fimc_write(ctx, cfg1, EXYNOS_MSCTRL);
599 fimc_write(cfg2, EXYNOS_CITRGFMT); 594 fimc_write(ctx, cfg2, EXYNOS_CITRGFMT);
600 *swap = (cfg2 & EXYNOS_CITRGFMT_INROT90_CLOCKWISE) ? 1 : 0; 595 *swap = (cfg2 & EXYNOS_CITRGFMT_INROT90_CLOCKWISE) ? 1 : 0;
601 596
602 return 0; 597 return 0;
@@ -621,17 +616,17 @@ static int fimc_set_window(struct fimc_context *ctx,
621 * set window offset 1, 2 size 616 * set window offset 1, 2 size
622 * check figure 43-21 in user manual 617 * check figure 43-21 in user manual
623 */ 618 */
624 cfg = fimc_read(EXYNOS_CIWDOFST); 619 cfg = fimc_read(ctx, EXYNOS_CIWDOFST);
625 cfg &= ~(EXYNOS_CIWDOFST_WINHOROFST_MASK | 620 cfg &= ~(EXYNOS_CIWDOFST_WINHOROFST_MASK |
626 EXYNOS_CIWDOFST_WINVEROFST_MASK); 621 EXYNOS_CIWDOFST_WINVEROFST_MASK);
627 cfg |= (EXYNOS_CIWDOFST_WINHOROFST(h1) | 622 cfg |= (EXYNOS_CIWDOFST_WINHOROFST(h1) |
628 EXYNOS_CIWDOFST_WINVEROFST(v1)); 623 EXYNOS_CIWDOFST_WINVEROFST(v1));
629 cfg |= EXYNOS_CIWDOFST_WINOFSEN; 624 cfg |= EXYNOS_CIWDOFST_WINOFSEN;
630 fimc_write(cfg, EXYNOS_CIWDOFST); 625 fimc_write(ctx, cfg, EXYNOS_CIWDOFST);
631 626
632 cfg = (EXYNOS_CIWDOFST2_WINHOROFST2(h2) | 627 cfg = (EXYNOS_CIWDOFST2_WINHOROFST2(h2) |
633 EXYNOS_CIWDOFST2_WINVEROFST2(v2)); 628 EXYNOS_CIWDOFST2_WINVEROFST2(v2));
634 fimc_write(cfg, EXYNOS_CIWDOFST2); 629 fimc_write(ctx, cfg, EXYNOS_CIWDOFST2);
635 630
636 return 0; 631 return 0;
637} 632}
@@ -651,7 +646,7 @@ static int fimc_src_set_size(struct device *dev, int swap,
651 cfg = (EXYNOS_ORGISIZE_HORIZONTAL(img_sz.hsize) | 646 cfg = (EXYNOS_ORGISIZE_HORIZONTAL(img_sz.hsize) |
652 EXYNOS_ORGISIZE_VERTICAL(img_sz.vsize)); 647 EXYNOS_ORGISIZE_VERTICAL(img_sz.vsize));
653 648
654 fimc_write(cfg, EXYNOS_ORGISIZE); 649 fimc_write(ctx, cfg, EXYNOS_ORGISIZE);
655 650
656 DRM_DEBUG_KMS("x[%d]y[%d]w[%d]h[%d]\n", pos->x, pos->y, pos->w, pos->h); 651 DRM_DEBUG_KMS("x[%d]y[%d]w[%d]h[%d]\n", pos->x, pos->y, pos->w, pos->h);
657 652
@@ -663,12 +658,12 @@ static int fimc_src_set_size(struct device *dev, int swap,
663 } 658 }
664 659
665 /* set input DMA image size */ 660 /* set input DMA image size */
666 cfg = fimc_read(EXYNOS_CIREAL_ISIZE); 661 cfg = fimc_read(ctx, EXYNOS_CIREAL_ISIZE);
667 cfg &= ~(EXYNOS_CIREAL_ISIZE_HEIGHT_MASK | 662 cfg &= ~(EXYNOS_CIREAL_ISIZE_HEIGHT_MASK |
668 EXYNOS_CIREAL_ISIZE_WIDTH_MASK); 663 EXYNOS_CIREAL_ISIZE_WIDTH_MASK);
669 cfg |= (EXYNOS_CIREAL_ISIZE_WIDTH(img_pos.w) | 664 cfg |= (EXYNOS_CIREAL_ISIZE_WIDTH(img_pos.w) |
670 EXYNOS_CIREAL_ISIZE_HEIGHT(img_pos.h)); 665 EXYNOS_CIREAL_ISIZE_HEIGHT(img_pos.h));
671 fimc_write(cfg, EXYNOS_CIREAL_ISIZE); 666 fimc_write(ctx, cfg, EXYNOS_CIREAL_ISIZE);
672 667
673 /* 668 /*
674 * set input FIFO image size 669 * set input FIFO image size
@@ -677,18 +672,18 @@ static int fimc_src_set_size(struct device *dev, int swap,
677 cfg = (EXYNOS_CISRCFMT_ITU601_8BIT | 672 cfg = (EXYNOS_CISRCFMT_ITU601_8BIT |
678 EXYNOS_CISRCFMT_SOURCEHSIZE(img_sz.hsize) | 673 EXYNOS_CISRCFMT_SOURCEHSIZE(img_sz.hsize) |
679 EXYNOS_CISRCFMT_SOURCEVSIZE(img_sz.vsize)); 674 EXYNOS_CISRCFMT_SOURCEVSIZE(img_sz.vsize));
680 fimc_write(cfg, EXYNOS_CISRCFMT); 675 fimc_write(ctx, cfg, EXYNOS_CISRCFMT);
681 676
682 /* offset Y(RGB), Cb, Cr */ 677 /* offset Y(RGB), Cb, Cr */
683 cfg = (EXYNOS_CIIYOFF_HORIZONTAL(img_pos.x) | 678 cfg = (EXYNOS_CIIYOFF_HORIZONTAL(img_pos.x) |
684 EXYNOS_CIIYOFF_VERTICAL(img_pos.y)); 679 EXYNOS_CIIYOFF_VERTICAL(img_pos.y));
685 fimc_write(cfg, EXYNOS_CIIYOFF); 680 fimc_write(ctx, cfg, EXYNOS_CIIYOFF);
686 cfg = (EXYNOS_CIICBOFF_HORIZONTAL(img_pos.x) | 681 cfg = (EXYNOS_CIICBOFF_HORIZONTAL(img_pos.x) |
687 EXYNOS_CIICBOFF_VERTICAL(img_pos.y)); 682 EXYNOS_CIICBOFF_VERTICAL(img_pos.y));
688 fimc_write(cfg, EXYNOS_CIICBOFF); 683 fimc_write(ctx, cfg, EXYNOS_CIICBOFF);
689 cfg = (EXYNOS_CIICROFF_HORIZONTAL(img_pos.x) | 684 cfg = (EXYNOS_CIICROFF_HORIZONTAL(img_pos.x) |
690 EXYNOS_CIICROFF_VERTICAL(img_pos.y)); 685 EXYNOS_CIICROFF_VERTICAL(img_pos.y));
691 fimc_write(cfg, EXYNOS_CIICROFF); 686 fimc_write(ctx, cfg, EXYNOS_CIICROFF);
692 687
693 return fimc_set_window(ctx, &img_pos, &img_sz); 688 return fimc_set_window(ctx, &img_pos, &img_sz);
694} 689}
@@ -722,25 +717,25 @@ static int fimc_src_set_addr(struct device *dev,
722 switch (buf_type) { 717 switch (buf_type) {
723 case IPP_BUF_ENQUEUE: 718 case IPP_BUF_ENQUEUE:
724 config = &property->config[EXYNOS_DRM_OPS_SRC]; 719 config = &property->config[EXYNOS_DRM_OPS_SRC];
725 fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_Y], 720 fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_Y],
726 EXYNOS_CIIYSA(buf_id)); 721 EXYNOS_CIIYSA(buf_id));
727 722
728 if (config->fmt == DRM_FORMAT_YVU420) { 723 if (config->fmt == DRM_FORMAT_YVU420) {
729 fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR], 724 fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CR],
730 EXYNOS_CIICBSA(buf_id)); 725 EXYNOS_CIICBSA(buf_id));
731 fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB], 726 fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CB],
732 EXYNOS_CIICRSA(buf_id)); 727 EXYNOS_CIICRSA(buf_id));
733 } else { 728 } else {
734 fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB], 729 fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CB],
735 EXYNOS_CIICBSA(buf_id)); 730 EXYNOS_CIICBSA(buf_id));
736 fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR], 731 fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CR],
737 EXYNOS_CIICRSA(buf_id)); 732 EXYNOS_CIICRSA(buf_id));
738 } 733 }
739 break; 734 break;
740 case IPP_BUF_DEQUEUE: 735 case IPP_BUF_DEQUEUE:
741 fimc_write(0x0, EXYNOS_CIIYSA(buf_id)); 736 fimc_write(ctx, 0x0, EXYNOS_CIIYSA(buf_id));
742 fimc_write(0x0, EXYNOS_CIICBSA(buf_id)); 737 fimc_write(ctx, 0x0, EXYNOS_CIICBSA(buf_id));
743 fimc_write(0x0, EXYNOS_CIICRSA(buf_id)); 738 fimc_write(ctx, 0x0, EXYNOS_CIICRSA(buf_id));
744 break; 739 break;
745 default: 740 default:
746 /* bypass */ 741 /* bypass */
@@ -765,22 +760,22 @@ static int fimc_dst_set_fmt_order(struct fimc_context *ctx, u32 fmt)
765 DRM_DEBUG_KMS("fmt[0x%x]\n", fmt); 760 DRM_DEBUG_KMS("fmt[0x%x]\n", fmt);
766 761
767 /* RGB */ 762 /* RGB */
768 cfg = fimc_read(EXYNOS_CISCCTRL); 763 cfg = fimc_read(ctx, EXYNOS_CISCCTRL);
769 cfg &= ~EXYNOS_CISCCTRL_OUTRGB_FMT_RGB_MASK; 764 cfg &= ~EXYNOS_CISCCTRL_OUTRGB_FMT_RGB_MASK;
770 765
771 switch (fmt) { 766 switch (fmt) {
772 case DRM_FORMAT_RGB565: 767 case DRM_FORMAT_RGB565:
773 cfg |= EXYNOS_CISCCTRL_OUTRGB_FMT_RGB565; 768 cfg |= EXYNOS_CISCCTRL_OUTRGB_FMT_RGB565;
774 fimc_write(cfg, EXYNOS_CISCCTRL); 769 fimc_write(ctx, cfg, EXYNOS_CISCCTRL);
775 return 0; 770 return 0;
776 case DRM_FORMAT_RGB888: 771 case DRM_FORMAT_RGB888:
777 cfg |= EXYNOS_CISCCTRL_OUTRGB_FMT_RGB888; 772 cfg |= EXYNOS_CISCCTRL_OUTRGB_FMT_RGB888;
778 fimc_write(cfg, EXYNOS_CISCCTRL); 773 fimc_write(ctx, cfg, EXYNOS_CISCCTRL);
779 return 0; 774 return 0;
780 case DRM_FORMAT_XRGB8888: 775 case DRM_FORMAT_XRGB8888:
781 cfg |= (EXYNOS_CISCCTRL_OUTRGB_FMT_RGB888 | 776 cfg |= (EXYNOS_CISCCTRL_OUTRGB_FMT_RGB888 |
782 EXYNOS_CISCCTRL_EXTRGB_EXTENSION); 777 EXYNOS_CISCCTRL_EXTRGB_EXTENSION);
783 fimc_write(cfg, EXYNOS_CISCCTRL); 778 fimc_write(ctx, cfg, EXYNOS_CISCCTRL);
784 break; 779 break;
785 default: 780 default:
786 /* bypass */ 781 /* bypass */
@@ -788,7 +783,7 @@ static int fimc_dst_set_fmt_order(struct fimc_context *ctx, u32 fmt)
788 } 783 }
789 784
790 /* YUV */ 785 /* YUV */
791 cfg = fimc_read(EXYNOS_CIOCTRL); 786 cfg = fimc_read(ctx, EXYNOS_CIOCTRL);
792 cfg &= ~(EXYNOS_CIOCTRL_ORDER2P_MASK | 787 cfg &= ~(EXYNOS_CIOCTRL_ORDER2P_MASK |
793 EXYNOS_CIOCTRL_ORDER422_MASK | 788 EXYNOS_CIOCTRL_ORDER422_MASK |
794 EXYNOS_CIOCTRL_YCBCR_PLANE_MASK); 789 EXYNOS_CIOCTRL_YCBCR_PLANE_MASK);
@@ -830,7 +825,7 @@ static int fimc_dst_set_fmt_order(struct fimc_context *ctx, u32 fmt)
830 return -EINVAL; 825 return -EINVAL;
831 } 826 }
832 827
833 fimc_write(cfg, EXYNOS_CIOCTRL); 828 fimc_write(ctx, cfg, EXYNOS_CIOCTRL);
834 829
835 return 0; 830 return 0;
836} 831}
@@ -843,16 +838,16 @@ static int fimc_dst_set_fmt(struct device *dev, u32 fmt)
843 838
844 DRM_DEBUG_KMS("fmt[0x%x]\n", fmt); 839 DRM_DEBUG_KMS("fmt[0x%x]\n", fmt);
845 840
846 cfg = fimc_read(EXYNOS_CIEXTEN); 841 cfg = fimc_read(ctx, EXYNOS_CIEXTEN);
847 842
848 if (fmt == DRM_FORMAT_AYUV) { 843 if (fmt == DRM_FORMAT_AYUV) {
849 cfg |= EXYNOS_CIEXTEN_YUV444_OUT; 844 cfg |= EXYNOS_CIEXTEN_YUV444_OUT;
850 fimc_write(cfg, EXYNOS_CIEXTEN); 845 fimc_write(ctx, cfg, EXYNOS_CIEXTEN);
851 } else { 846 } else {
852 cfg &= ~EXYNOS_CIEXTEN_YUV444_OUT; 847 cfg &= ~EXYNOS_CIEXTEN_YUV444_OUT;
853 fimc_write(cfg, EXYNOS_CIEXTEN); 848 fimc_write(ctx, cfg, EXYNOS_CIEXTEN);
854 849
855 cfg = fimc_read(EXYNOS_CITRGFMT); 850 cfg = fimc_read(ctx, EXYNOS_CITRGFMT);
856 cfg &= ~EXYNOS_CITRGFMT_OUTFORMAT_MASK; 851 cfg &= ~EXYNOS_CITRGFMT_OUTFORMAT_MASK;
857 852
858 switch (fmt) { 853 switch (fmt) {
@@ -885,10 +880,10 @@ static int fimc_dst_set_fmt(struct device *dev, u32 fmt)
885 return -EINVAL; 880 return -EINVAL;
886 } 881 }
887 882
888 fimc_write(cfg, EXYNOS_CITRGFMT); 883 fimc_write(ctx, cfg, EXYNOS_CITRGFMT);
889 } 884 }
890 885
891 cfg = fimc_read(EXYNOS_CIDMAPARAM); 886 cfg = fimc_read(ctx, EXYNOS_CIDMAPARAM);
892 cfg &= ~EXYNOS_CIDMAPARAM_W_MODE_MASK; 887 cfg &= ~EXYNOS_CIDMAPARAM_W_MODE_MASK;
893 888
894 if (fmt == DRM_FORMAT_NV12MT) 889 if (fmt == DRM_FORMAT_NV12MT)
@@ -896,7 +891,7 @@ static int fimc_dst_set_fmt(struct device *dev, u32 fmt)
896 else 891 else
897 cfg |= EXYNOS_CIDMAPARAM_W_MODE_LINEAR; 892 cfg |= EXYNOS_CIDMAPARAM_W_MODE_LINEAR;
898 893
899 fimc_write(cfg, EXYNOS_CIDMAPARAM); 894 fimc_write(ctx, cfg, EXYNOS_CIDMAPARAM);
900 895
901 return fimc_dst_set_fmt_order(ctx, fmt); 896 return fimc_dst_set_fmt_order(ctx, fmt);
902} 897}
@@ -911,7 +906,7 @@ static int fimc_dst_set_transf(struct device *dev,
911 906
912 DRM_DEBUG_KMS("degree[%d]flip[0x%x]\n", degree, flip); 907 DRM_DEBUG_KMS("degree[%d]flip[0x%x]\n", degree, flip);
913 908
914 cfg = fimc_read(EXYNOS_CITRGFMT); 909 cfg = fimc_read(ctx, EXYNOS_CITRGFMT);
915 cfg &= ~EXYNOS_CITRGFMT_FLIP_MASK; 910 cfg &= ~EXYNOS_CITRGFMT_FLIP_MASK;
916 cfg &= ~EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE; 911 cfg &= ~EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE;
917 912
@@ -951,53 +946,23 @@ static int fimc_dst_set_transf(struct device *dev,
951 return -EINVAL; 946 return -EINVAL;
952 } 947 }
953 948
954 fimc_write(cfg, EXYNOS_CITRGFMT); 949 fimc_write(ctx, cfg, EXYNOS_CITRGFMT);
955 *swap = (cfg & EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE) ? 1 : 0; 950 *swap = (cfg & EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE) ? 1 : 0;
956 951
957 return 0; 952 return 0;
958} 953}
959 954
960static int fimc_get_ratio_shift(u32 src, u32 dst, u32 *ratio, u32 *shift)
961{
962 DRM_DEBUG_KMS("src[%d]dst[%d]\n", src, dst);
963
964 if (src >= dst * 64) {
965 DRM_ERROR("failed to make ratio and shift.\n");
966 return -EINVAL;
967 } else if (src >= dst * 32) {
968 *ratio = 32;
969 *shift = 5;
970 } else if (src >= dst * 16) {
971 *ratio = 16;
972 *shift = 4;
973 } else if (src >= dst * 8) {
974 *ratio = 8;
975 *shift = 3;
976 } else if (src >= dst * 4) {
977 *ratio = 4;
978 *shift = 2;
979 } else if (src >= dst * 2) {
980 *ratio = 2;
981 *shift = 1;
982 } else {
983 *ratio = 1;
984 *shift = 0;
985 }
986
987 return 0;
988}
989
990static int fimc_set_prescaler(struct fimc_context *ctx, struct fimc_scaler *sc, 955static int fimc_set_prescaler(struct fimc_context *ctx, struct fimc_scaler *sc,
991 struct drm_exynos_pos *src, struct drm_exynos_pos *dst) 956 struct drm_exynos_pos *src, struct drm_exynos_pos *dst)
992{ 957{
993 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; 958 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
994 u32 cfg, cfg_ext, shfactor; 959 u32 cfg, cfg_ext, shfactor;
995 u32 pre_dst_width, pre_dst_height; 960 u32 pre_dst_width, pre_dst_height;
996 u32 pre_hratio, hfactor, pre_vratio, vfactor; 961 u32 hfactor, vfactor;
997 int ret = 0; 962 int ret = 0;
998 u32 src_w, src_h, dst_w, dst_h; 963 u32 src_w, src_h, dst_w, dst_h;
999 964
1000 cfg_ext = fimc_read(EXYNOS_CITRGFMT); 965 cfg_ext = fimc_read(ctx, EXYNOS_CITRGFMT);
1001 if (cfg_ext & EXYNOS_CITRGFMT_INROT90_CLOCKWISE) { 966 if (cfg_ext & EXYNOS_CITRGFMT_INROT90_CLOCKWISE) {
1002 src_w = src->h; 967 src_w = src->h;
1003 src_h = src->w; 968 src_h = src->w;
@@ -1014,24 +979,24 @@ static int fimc_set_prescaler(struct fimc_context *ctx, struct fimc_scaler *sc,
1014 dst_h = dst->h; 979 dst_h = dst->h;
1015 } 980 }
1016 981
1017 ret = fimc_get_ratio_shift(src_w, dst_w, &pre_hratio, &hfactor); 982 /* fimc_ippdrv_check_property assures that dividers are not null */
1018 if (ret) { 983 hfactor = fls(src_w / dst_w / 2);
984 if (hfactor > FIMC_SHFACTOR / 2) {
1019 dev_err(ippdrv->dev, "failed to get ratio horizontal.\n"); 985 dev_err(ippdrv->dev, "failed to get ratio horizontal.\n");
1020 return ret; 986 return -EINVAL;
1021 } 987 }
1022 988
1023 ret = fimc_get_ratio_shift(src_h, dst_h, &pre_vratio, &vfactor); 989 vfactor = fls(src_h / dst_h / 2);
1024 if (ret) { 990 if (vfactor > FIMC_SHFACTOR / 2) {
1025 dev_err(ippdrv->dev, "failed to get ratio vertical.\n"); 991 dev_err(ippdrv->dev, "failed to get ratio vertical.\n");
1026 return ret; 992 return -EINVAL;
1027 } 993 }
1028 994
1029 pre_dst_width = src_w / pre_hratio; 995 pre_dst_width = src_w >> hfactor;
1030 pre_dst_height = src_h / pre_vratio; 996 pre_dst_height = src_h >> vfactor;
1031 DRM_DEBUG_KMS("pre_dst_width[%d]pre_dst_height[%d]\n", 997 DRM_DEBUG_KMS("pre_dst_width[%d]pre_dst_height[%d]\n",
1032 pre_dst_width, pre_dst_height); 998 pre_dst_width, pre_dst_height);
1033 DRM_DEBUG_KMS("pre_hratio[%d]hfactor[%d]pre_vratio[%d]vfactor[%d]\n", 999 DRM_DEBUG_KMS("hfactor[%d]vfactor[%d]\n", hfactor, vfactor);
1034 pre_hratio, hfactor, pre_vratio, vfactor);
1035 1000
1036 sc->hratio = (src_w << 14) / (dst_w << hfactor); 1001 sc->hratio = (src_w << 14) / (dst_w << hfactor);
1037 sc->vratio = (src_h << 14) / (dst_h << vfactor); 1002 sc->vratio = (src_h << 14) / (dst_h << vfactor);
@@ -1044,13 +1009,13 @@ static int fimc_set_prescaler(struct fimc_context *ctx, struct fimc_scaler *sc,
1044 DRM_DEBUG_KMS("shfactor[%d]\n", shfactor); 1009 DRM_DEBUG_KMS("shfactor[%d]\n", shfactor);
1045 1010
1046 cfg = (EXYNOS_CISCPRERATIO_SHFACTOR(shfactor) | 1011 cfg = (EXYNOS_CISCPRERATIO_SHFACTOR(shfactor) |
1047 EXYNOS_CISCPRERATIO_PREHORRATIO(pre_hratio) | 1012 EXYNOS_CISCPRERATIO_PREHORRATIO(1 << hfactor) |
1048 EXYNOS_CISCPRERATIO_PREVERRATIO(pre_vratio)); 1013 EXYNOS_CISCPRERATIO_PREVERRATIO(1 << vfactor));
1049 fimc_write(cfg, EXYNOS_CISCPRERATIO); 1014 fimc_write(ctx, cfg, EXYNOS_CISCPRERATIO);
1050 1015
1051 cfg = (EXYNOS_CISCPREDST_PREDSTWIDTH(pre_dst_width) | 1016 cfg = (EXYNOS_CISCPREDST_PREDSTWIDTH(pre_dst_width) |
1052 EXYNOS_CISCPREDST_PREDSTHEIGHT(pre_dst_height)); 1017 EXYNOS_CISCPREDST_PREDSTHEIGHT(pre_dst_height));
1053 fimc_write(cfg, EXYNOS_CISCPREDST); 1018 fimc_write(ctx, cfg, EXYNOS_CISCPREDST);
1054 1019
1055 return ret; 1020 return ret;
1056} 1021}
@@ -1064,7 +1029,7 @@ static void fimc_set_scaler(struct fimc_context *ctx, struct fimc_scaler *sc)
1064 DRM_DEBUG_KMS("hratio[%d]vratio[%d]\n", 1029 DRM_DEBUG_KMS("hratio[%d]vratio[%d]\n",
1065 sc->hratio, sc->vratio); 1030 sc->hratio, sc->vratio);
1066 1031
1067 cfg = fimc_read(EXYNOS_CISCCTRL); 1032 cfg = fimc_read(ctx, EXYNOS_CISCCTRL);
1068 cfg &= ~(EXYNOS_CISCCTRL_SCALERBYPASS | 1033 cfg &= ~(EXYNOS_CISCCTRL_SCALERBYPASS |
1069 EXYNOS_CISCCTRL_SCALEUP_H | EXYNOS_CISCCTRL_SCALEUP_V | 1034 EXYNOS_CISCCTRL_SCALEUP_H | EXYNOS_CISCCTRL_SCALEUP_V |
1070 EXYNOS_CISCCTRL_MAIN_V_RATIO_MASK | 1035 EXYNOS_CISCCTRL_MAIN_V_RATIO_MASK |
@@ -1084,14 +1049,14 @@ static void fimc_set_scaler(struct fimc_context *ctx, struct fimc_scaler *sc)
1084 1049
1085 cfg |= (EXYNOS_CISCCTRL_MAINHORRATIO((sc->hratio >> 6)) | 1050 cfg |= (EXYNOS_CISCCTRL_MAINHORRATIO((sc->hratio >> 6)) |
1086 EXYNOS_CISCCTRL_MAINVERRATIO((sc->vratio >> 6))); 1051 EXYNOS_CISCCTRL_MAINVERRATIO((sc->vratio >> 6)));
1087 fimc_write(cfg, EXYNOS_CISCCTRL); 1052 fimc_write(ctx, cfg, EXYNOS_CISCCTRL);
1088 1053
1089 cfg_ext = fimc_read(EXYNOS_CIEXTEN); 1054 cfg_ext = fimc_read(ctx, EXYNOS_CIEXTEN);
1090 cfg_ext &= ~EXYNOS_CIEXTEN_MAINHORRATIO_EXT_MASK; 1055 cfg_ext &= ~EXYNOS_CIEXTEN_MAINHORRATIO_EXT_MASK;
1091 cfg_ext &= ~EXYNOS_CIEXTEN_MAINVERRATIO_EXT_MASK; 1056 cfg_ext &= ~EXYNOS_CIEXTEN_MAINVERRATIO_EXT_MASK;
1092 cfg_ext |= (EXYNOS_CIEXTEN_MAINHORRATIO_EXT(sc->hratio) | 1057 cfg_ext |= (EXYNOS_CIEXTEN_MAINHORRATIO_EXT(sc->hratio) |
1093 EXYNOS_CIEXTEN_MAINVERRATIO_EXT(sc->vratio)); 1058 EXYNOS_CIEXTEN_MAINVERRATIO_EXT(sc->vratio));
1094 fimc_write(cfg_ext, EXYNOS_CIEXTEN); 1059 fimc_write(ctx, cfg_ext, EXYNOS_CIEXTEN);
1095} 1060}
1096 1061
1097static int fimc_dst_set_size(struct device *dev, int swap, 1062static int fimc_dst_set_size(struct device *dev, int swap,
@@ -1109,12 +1074,12 @@ static int fimc_dst_set_size(struct device *dev, int swap,
1109 cfg = (EXYNOS_ORGOSIZE_HORIZONTAL(img_sz.hsize) | 1074 cfg = (EXYNOS_ORGOSIZE_HORIZONTAL(img_sz.hsize) |
1110 EXYNOS_ORGOSIZE_VERTICAL(img_sz.vsize)); 1075 EXYNOS_ORGOSIZE_VERTICAL(img_sz.vsize));
1111 1076
1112 fimc_write(cfg, EXYNOS_ORGOSIZE); 1077 fimc_write(ctx, cfg, EXYNOS_ORGOSIZE);
1113 1078
1114 DRM_DEBUG_KMS("x[%d]y[%d]w[%d]h[%d]\n", pos->x, pos->y, pos->w, pos->h); 1079 DRM_DEBUG_KMS("x[%d]y[%d]w[%d]h[%d]\n", pos->x, pos->y, pos->w, pos->h);
1115 1080
1116 /* CSC ITU */ 1081 /* CSC ITU */
1117 cfg = fimc_read(EXYNOS_CIGCTRL); 1082 cfg = fimc_read(ctx, EXYNOS_CIGCTRL);
1118 cfg &= ~EXYNOS_CIGCTRL_CSC_MASK; 1083 cfg &= ~EXYNOS_CIGCTRL_CSC_MASK;
1119 1084
1120 if (sz->hsize >= FIMC_WIDTH_ITU_709) 1085 if (sz->hsize >= FIMC_WIDTH_ITU_709)
@@ -1122,7 +1087,7 @@ static int fimc_dst_set_size(struct device *dev, int swap,
1122 else 1087 else
1123 cfg |= EXYNOS_CIGCTRL_CSC_ITU601; 1088 cfg |= EXYNOS_CIGCTRL_CSC_ITU601;
1124 1089
1125 fimc_write(cfg, EXYNOS_CIGCTRL); 1090 fimc_write(ctx, cfg, EXYNOS_CIGCTRL);
1126 1091
1127 if (swap) { 1092 if (swap) {
1128 img_pos.w = pos->h; 1093 img_pos.w = pos->h;
@@ -1132,41 +1097,38 @@ static int fimc_dst_set_size(struct device *dev, int swap,
1132 } 1097 }
1133 1098
1134 /* target image size */ 1099 /* target image size */
1135 cfg = fimc_read(EXYNOS_CITRGFMT); 1100 cfg = fimc_read(ctx, EXYNOS_CITRGFMT);
1136 cfg &= ~(EXYNOS_CITRGFMT_TARGETH_MASK | 1101 cfg &= ~(EXYNOS_CITRGFMT_TARGETH_MASK |
1137 EXYNOS_CITRGFMT_TARGETV_MASK); 1102 EXYNOS_CITRGFMT_TARGETV_MASK);
1138 cfg |= (EXYNOS_CITRGFMT_TARGETHSIZE(img_pos.w) | 1103 cfg |= (EXYNOS_CITRGFMT_TARGETHSIZE(img_pos.w) |
1139 EXYNOS_CITRGFMT_TARGETVSIZE(img_pos.h)); 1104 EXYNOS_CITRGFMT_TARGETVSIZE(img_pos.h));
1140 fimc_write(cfg, EXYNOS_CITRGFMT); 1105 fimc_write(ctx, cfg, EXYNOS_CITRGFMT);
1141 1106
1142 /* target area */ 1107 /* target area */
1143 cfg = EXYNOS_CITAREA_TARGET_AREA(img_pos.w * img_pos.h); 1108 cfg = EXYNOS_CITAREA_TARGET_AREA(img_pos.w * img_pos.h);
1144 fimc_write(cfg, EXYNOS_CITAREA); 1109 fimc_write(ctx, cfg, EXYNOS_CITAREA);
1145 1110
1146 /* offset Y(RGB), Cb, Cr */ 1111 /* offset Y(RGB), Cb, Cr */
1147 cfg = (EXYNOS_CIOYOFF_HORIZONTAL(img_pos.x) | 1112 cfg = (EXYNOS_CIOYOFF_HORIZONTAL(img_pos.x) |
1148 EXYNOS_CIOYOFF_VERTICAL(img_pos.y)); 1113 EXYNOS_CIOYOFF_VERTICAL(img_pos.y));
1149 fimc_write(cfg, EXYNOS_CIOYOFF); 1114 fimc_write(ctx, cfg, EXYNOS_CIOYOFF);
1150 cfg = (EXYNOS_CIOCBOFF_HORIZONTAL(img_pos.x) | 1115 cfg = (EXYNOS_CIOCBOFF_HORIZONTAL(img_pos.x) |
1151 EXYNOS_CIOCBOFF_VERTICAL(img_pos.y)); 1116 EXYNOS_CIOCBOFF_VERTICAL(img_pos.y));
1152 fimc_write(cfg, EXYNOS_CIOCBOFF); 1117 fimc_write(ctx, cfg, EXYNOS_CIOCBOFF);
1153 cfg = (EXYNOS_CIOCROFF_HORIZONTAL(img_pos.x) | 1118 cfg = (EXYNOS_CIOCROFF_HORIZONTAL(img_pos.x) |
1154 EXYNOS_CIOCROFF_VERTICAL(img_pos.y)); 1119 EXYNOS_CIOCROFF_VERTICAL(img_pos.y));
1155 fimc_write(cfg, EXYNOS_CIOCROFF); 1120 fimc_write(ctx, cfg, EXYNOS_CIOCROFF);
1156 1121
1157 return 0; 1122 return 0;
1158} 1123}
1159 1124
1160static int fimc_dst_get_buf_seq(struct fimc_context *ctx) 1125static int fimc_dst_get_buf_count(struct fimc_context *ctx)
1161{ 1126{
1162 u32 cfg, i, buf_num = 0; 1127 u32 cfg, buf_num;
1163 u32 mask = 0x00000001;
1164 1128
1165 cfg = fimc_read(EXYNOS_CIFCNTSEQ); 1129 cfg = fimc_read(ctx, EXYNOS_CIFCNTSEQ);
1166 1130
1167 for (i = 0; i < FIMC_REG_SZ; i++) 1131 buf_num = hweight32(cfg);
1168 if (cfg & (mask << i))
1169 buf_num++;
1170 1132
1171 DRM_DEBUG_KMS("buf_num[%d]\n", buf_num); 1133 DRM_DEBUG_KMS("buf_num[%d]\n", buf_num);
1172 1134
@@ -1181,13 +1143,14 @@ static int fimc_dst_set_buf_seq(struct fimc_context *ctx, u32 buf_id,
1181 u32 cfg; 1143 u32 cfg;
1182 u32 mask = 0x00000001 << buf_id; 1144 u32 mask = 0x00000001 << buf_id;
1183 int ret = 0; 1145 int ret = 0;
1146 unsigned long flags;
1184 1147
1185 DRM_DEBUG_KMS("buf_id[%d]buf_type[%d]\n", buf_id, buf_type); 1148 DRM_DEBUG_KMS("buf_id[%d]buf_type[%d]\n", buf_id, buf_type);
1186 1149
1187 mutex_lock(&ctx->lock); 1150 spin_lock_irqsave(&ctx->lock, flags);
1188 1151
1189 /* mask register set */ 1152 /* mask register set */
1190 cfg = fimc_read(EXYNOS_CIFCNTSEQ); 1153 cfg = fimc_read(ctx, EXYNOS_CIFCNTSEQ);
1191 1154
1192 switch (buf_type) { 1155 switch (buf_type) {
1193 case IPP_BUF_ENQUEUE: 1156 case IPP_BUF_ENQUEUE:
@@ -1205,20 +1168,20 @@ static int fimc_dst_set_buf_seq(struct fimc_context *ctx, u32 buf_id,
1205 /* sequence id */ 1168 /* sequence id */
1206 cfg &= ~mask; 1169 cfg &= ~mask;
1207 cfg |= (enable << buf_id); 1170 cfg |= (enable << buf_id);
1208 fimc_write(cfg, EXYNOS_CIFCNTSEQ); 1171 fimc_write(ctx, cfg, EXYNOS_CIFCNTSEQ);
1209 1172
1210 /* interrupt enable */ 1173 /* interrupt enable */
1211 if (buf_type == IPP_BUF_ENQUEUE && 1174 if (buf_type == IPP_BUF_ENQUEUE &&
1212 fimc_dst_get_buf_seq(ctx) >= FIMC_BUF_START) 1175 fimc_dst_get_buf_count(ctx) >= FIMC_BUF_START)
1213 fimc_handle_irq(ctx, true, false, true); 1176 fimc_mask_irq(ctx, true);
1214 1177
1215 /* interrupt disable */ 1178 /* interrupt disable */
1216 if (buf_type == IPP_BUF_DEQUEUE && 1179 if (buf_type == IPP_BUF_DEQUEUE &&
1217 fimc_dst_get_buf_seq(ctx) <= FIMC_BUF_STOP) 1180 fimc_dst_get_buf_count(ctx) <= FIMC_BUF_STOP)
1218 fimc_handle_irq(ctx, false, false, true); 1181 fimc_mask_irq(ctx, false);
1219 1182
1220err_unlock: 1183err_unlock:
1221 mutex_unlock(&ctx->lock); 1184 spin_unlock_irqrestore(&ctx->lock, flags);
1222 return ret; 1185 return ret;
1223} 1186}
1224 1187
@@ -1252,25 +1215,25 @@ static int fimc_dst_set_addr(struct device *dev,
1252 case IPP_BUF_ENQUEUE: 1215 case IPP_BUF_ENQUEUE:
1253 config = &property->config[EXYNOS_DRM_OPS_DST]; 1216 config = &property->config[EXYNOS_DRM_OPS_DST];
1254 1217
1255 fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_Y], 1218 fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_Y],
1256 EXYNOS_CIOYSA(buf_id)); 1219 EXYNOS_CIOYSA(buf_id));
1257 1220
1258 if (config->fmt == DRM_FORMAT_YVU420) { 1221 if (config->fmt == DRM_FORMAT_YVU420) {
1259 fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR], 1222 fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CR],
1260 EXYNOS_CIOCBSA(buf_id)); 1223 EXYNOS_CIOCBSA(buf_id));
1261 fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB], 1224 fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CB],
1262 EXYNOS_CIOCRSA(buf_id)); 1225 EXYNOS_CIOCRSA(buf_id));
1263 } else { 1226 } else {
1264 fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB], 1227 fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CB],
1265 EXYNOS_CIOCBSA(buf_id)); 1228 EXYNOS_CIOCBSA(buf_id));
1266 fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR], 1229 fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CR],
1267 EXYNOS_CIOCRSA(buf_id)); 1230 EXYNOS_CIOCRSA(buf_id));
1268 } 1231 }
1269 break; 1232 break;
1270 case IPP_BUF_DEQUEUE: 1233 case IPP_BUF_DEQUEUE:
1271 fimc_write(0x0, EXYNOS_CIOYSA(buf_id)); 1234 fimc_write(ctx, 0x0, EXYNOS_CIOYSA(buf_id));
1272 fimc_write(0x0, EXYNOS_CIOCBSA(buf_id)); 1235 fimc_write(ctx, 0x0, EXYNOS_CIOCBSA(buf_id));
1273 fimc_write(0x0, EXYNOS_CIOCRSA(buf_id)); 1236 fimc_write(ctx, 0x0, EXYNOS_CIOCRSA(buf_id));
1274 break; 1237 break;
1275 default: 1238 default:
1276 /* bypass */ 1239 /* bypass */
@@ -1342,11 +1305,7 @@ static irqreturn_t fimc_irq_handler(int irq, void *dev_id)
1342 1305
1343static int fimc_init_prop_list(struct exynos_drm_ippdrv *ippdrv) 1306static int fimc_init_prop_list(struct exynos_drm_ippdrv *ippdrv)
1344{ 1307{
1345 struct drm_exynos_ipp_prop_list *prop_list; 1308 struct drm_exynos_ipp_prop_list *prop_list = &ippdrv->prop_list;
1346
1347 prop_list = devm_kzalloc(ippdrv->dev, sizeof(*prop_list), GFP_KERNEL);
1348 if (!prop_list)
1349 return -ENOMEM;
1350 1309
1351 prop_list->version = 1; 1310 prop_list->version = 1;
1352 prop_list->writeback = 1; 1311 prop_list->writeback = 1;
@@ -1371,8 +1330,6 @@ static int fimc_init_prop_list(struct exynos_drm_ippdrv *ippdrv)
1371 prop_list->scale_min.hsize = FIMC_SCALE_MIN; 1330 prop_list->scale_min.hsize = FIMC_SCALE_MIN;
1372 prop_list->scale_min.vsize = FIMC_SCALE_MIN; 1331 prop_list->scale_min.vsize = FIMC_SCALE_MIN;
1373 1332
1374 ippdrv->prop_list = prop_list;
1375
1376 return 0; 1333 return 0;
1377} 1334}
1378 1335
@@ -1395,7 +1352,7 @@ static int fimc_ippdrv_check_property(struct device *dev,
1395{ 1352{
1396 struct fimc_context *ctx = get_fimc_context(dev); 1353 struct fimc_context *ctx = get_fimc_context(dev);
1397 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; 1354 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
1398 struct drm_exynos_ipp_prop_list *pp = ippdrv->prop_list; 1355 struct drm_exynos_ipp_prop_list *pp = &ippdrv->prop_list;
1399 struct drm_exynos_ipp_config *config; 1356 struct drm_exynos_ipp_config *config;
1400 struct drm_exynos_pos *pos; 1357 struct drm_exynos_pos *pos;
1401 struct drm_exynos_sz *sz; 1358 struct drm_exynos_sz *sz;
@@ -1508,15 +1465,15 @@ static void fimc_clear_addr(struct fimc_context *ctx)
1508 int i; 1465 int i;
1509 1466
1510 for (i = 0; i < FIMC_MAX_SRC; i++) { 1467 for (i = 0; i < FIMC_MAX_SRC; i++) {
1511 fimc_write(0, EXYNOS_CIIYSA(i)); 1468 fimc_write(ctx, 0, EXYNOS_CIIYSA(i));
1512 fimc_write(0, EXYNOS_CIICBSA(i)); 1469 fimc_write(ctx, 0, EXYNOS_CIICBSA(i));
1513 fimc_write(0, EXYNOS_CIICRSA(i)); 1470 fimc_write(ctx, 0, EXYNOS_CIICRSA(i));
1514 } 1471 }
1515 1472
1516 for (i = 0; i < FIMC_MAX_DST; i++) { 1473 for (i = 0; i < FIMC_MAX_DST; i++) {
1517 fimc_write(0, EXYNOS_CIOYSA(i)); 1474 fimc_write(ctx, 0, EXYNOS_CIOYSA(i));
1518 fimc_write(0, EXYNOS_CIOCBSA(i)); 1475 fimc_write(ctx, 0, EXYNOS_CIOCBSA(i));
1519 fimc_write(0, EXYNOS_CIOCRSA(i)); 1476 fimc_write(ctx, 0, EXYNOS_CIOCRSA(i));
1520 } 1477 }
1521} 1478}
1522 1479
@@ -1556,7 +1513,7 @@ static int fimc_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd)
1556 1513
1557 property = &c_node->property; 1514 property = &c_node->property;
1558 1515
1559 fimc_handle_irq(ctx, true, false, true); 1516 fimc_mask_irq(ctx, true);
1560 1517
1561 for_each_ipp_ops(i) { 1518 for_each_ipp_ops(i) {
1562 config = &property->config[i]; 1519 config = &property->config[i];
@@ -1582,10 +1539,10 @@ static int fimc_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd)
1582 fimc_handle_lastend(ctx, false); 1539 fimc_handle_lastend(ctx, false);
1583 1540
1584 /* setup dma */ 1541 /* setup dma */
1585 cfg0 = fimc_read(EXYNOS_MSCTRL); 1542 cfg0 = fimc_read(ctx, EXYNOS_MSCTRL);
1586 cfg0 &= ~EXYNOS_MSCTRL_INPUT_MASK; 1543 cfg0 &= ~EXYNOS_MSCTRL_INPUT_MASK;
1587 cfg0 |= EXYNOS_MSCTRL_INPUT_MEMORY; 1544 cfg0 |= EXYNOS_MSCTRL_INPUT_MEMORY;
1588 fimc_write(cfg0, EXYNOS_MSCTRL); 1545 fimc_write(ctx, cfg0, EXYNOS_MSCTRL);
1589 break; 1546 break;
1590 case IPP_CMD_WB: 1547 case IPP_CMD_WB:
1591 fimc_set_type_ctrl(ctx, FIMC_WB_A); 1548 fimc_set_type_ctrl(ctx, FIMC_WB_A);
@@ -1610,41 +1567,33 @@ static int fimc_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd)
1610 } 1567 }
1611 1568
1612 /* Reset status */ 1569 /* Reset status */
1613 fimc_write(0x0, EXYNOS_CISTATUS); 1570 fimc_write(ctx, 0x0, EXYNOS_CISTATUS);
1614 1571
1615 cfg0 = fimc_read(EXYNOS_CIIMGCPT); 1572 cfg0 = fimc_read(ctx, EXYNOS_CIIMGCPT);
1616 cfg0 &= ~EXYNOS_CIIMGCPT_IMGCPTEN_SC; 1573 cfg0 &= ~EXYNOS_CIIMGCPT_IMGCPTEN_SC;
1617 cfg0 |= EXYNOS_CIIMGCPT_IMGCPTEN_SC; 1574 cfg0 |= EXYNOS_CIIMGCPT_IMGCPTEN_SC;
1618 1575
1619 /* Scaler */ 1576 /* Scaler */
1620 cfg1 = fimc_read(EXYNOS_CISCCTRL); 1577 cfg1 = fimc_read(ctx, EXYNOS_CISCCTRL);
1621 cfg1 &= ~EXYNOS_CISCCTRL_SCAN_MASK; 1578 cfg1 &= ~EXYNOS_CISCCTRL_SCAN_MASK;
1622 cfg1 |= (EXYNOS_CISCCTRL_PROGRESSIVE | 1579 cfg1 |= (EXYNOS_CISCCTRL_PROGRESSIVE |
1623 EXYNOS_CISCCTRL_SCALERSTART); 1580 EXYNOS_CISCCTRL_SCALERSTART);
1624 1581
1625 fimc_write(cfg1, EXYNOS_CISCCTRL); 1582 fimc_write(ctx, cfg1, EXYNOS_CISCCTRL);
1626 1583
1627 /* Enable image capture*/ 1584 /* Enable image capture*/
1628 cfg0 |= EXYNOS_CIIMGCPT_IMGCPTEN; 1585 cfg0 |= EXYNOS_CIIMGCPT_IMGCPTEN;
1629 fimc_write(cfg0, EXYNOS_CIIMGCPT); 1586 fimc_write(ctx, cfg0, EXYNOS_CIIMGCPT);
1630 1587
1631 /* Disable frame end irq */ 1588 /* Disable frame end irq */
1632 cfg0 = fimc_read(EXYNOS_CIGCTRL); 1589 fimc_clear_bits(ctx, EXYNOS_CIGCTRL, EXYNOS_CIGCTRL_IRQ_END_DISABLE);
1633 cfg0 &= ~EXYNOS_CIGCTRL_IRQ_END_DISABLE;
1634 fimc_write(cfg0, EXYNOS_CIGCTRL);
1635 1590
1636 cfg0 = fimc_read(EXYNOS_CIOCTRL); 1591 fimc_clear_bits(ctx, EXYNOS_CIOCTRL, EXYNOS_CIOCTRL_WEAVE_MASK);
1637 cfg0 &= ~EXYNOS_CIOCTRL_WEAVE_MASK;
1638 fimc_write(cfg0, EXYNOS_CIOCTRL);
1639 1592
1640 if (cmd == IPP_CMD_M2M) { 1593 if (cmd == IPP_CMD_M2M) {
1641 cfg0 = fimc_read(EXYNOS_MSCTRL); 1594 fimc_set_bits(ctx, EXYNOS_MSCTRL, EXYNOS_MSCTRL_ENVID);
1642 cfg0 |= EXYNOS_MSCTRL_ENVID;
1643 fimc_write(cfg0, EXYNOS_MSCTRL);
1644 1595
1645 cfg0 = fimc_read(EXYNOS_MSCTRL); 1596 fimc_set_bits(ctx, EXYNOS_MSCTRL, EXYNOS_MSCTRL_ENVID);
1646 cfg0 |= EXYNOS_MSCTRL_ENVID;
1647 fimc_write(cfg0, EXYNOS_MSCTRL);
1648 } 1597 }
1649 1598
1650 return 0; 1599 return 0;
@@ -1661,10 +1610,10 @@ static void fimc_ippdrv_stop(struct device *dev, enum drm_exynos_ipp_cmd cmd)
1661 switch (cmd) { 1610 switch (cmd) {
1662 case IPP_CMD_M2M: 1611 case IPP_CMD_M2M:
1663 /* Source clear */ 1612 /* Source clear */
1664 cfg = fimc_read(EXYNOS_MSCTRL); 1613 cfg = fimc_read(ctx, EXYNOS_MSCTRL);
1665 cfg &= ~EXYNOS_MSCTRL_INPUT_MASK; 1614 cfg &= ~EXYNOS_MSCTRL_INPUT_MASK;
1666 cfg &= ~EXYNOS_MSCTRL_ENVID; 1615 cfg &= ~EXYNOS_MSCTRL_ENVID;
1667 fimc_write(cfg, EXYNOS_MSCTRL); 1616 fimc_write(ctx, cfg, EXYNOS_MSCTRL);
1668 break; 1617 break;
1669 case IPP_CMD_WB: 1618 case IPP_CMD_WB:
1670 exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)&set_wb); 1619 exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)&set_wb);
@@ -1675,25 +1624,20 @@ static void fimc_ippdrv_stop(struct device *dev, enum drm_exynos_ipp_cmd cmd)
1675 break; 1624 break;
1676 } 1625 }
1677 1626
1678 fimc_handle_irq(ctx, false, false, true); 1627 fimc_mask_irq(ctx, false);
1679 1628
1680 /* reset sequence */ 1629 /* reset sequence */
1681 fimc_write(0x0, EXYNOS_CIFCNTSEQ); 1630 fimc_write(ctx, 0x0, EXYNOS_CIFCNTSEQ);
1682 1631
1683 /* Scaler disable */ 1632 /* Scaler disable */
1684 cfg = fimc_read(EXYNOS_CISCCTRL); 1633 fimc_clear_bits(ctx, EXYNOS_CISCCTRL, EXYNOS_CISCCTRL_SCALERSTART);
1685 cfg &= ~EXYNOS_CISCCTRL_SCALERSTART;
1686 fimc_write(cfg, EXYNOS_CISCCTRL);
1687 1634
1688 /* Disable image capture */ 1635 /* Disable image capture */
1689 cfg = fimc_read(EXYNOS_CIIMGCPT); 1636 fimc_clear_bits(ctx, EXYNOS_CIIMGCPT,
1690 cfg &= ~(EXYNOS_CIIMGCPT_IMGCPTEN_SC | EXYNOS_CIIMGCPT_IMGCPTEN); 1637 EXYNOS_CIIMGCPT_IMGCPTEN_SC | EXYNOS_CIIMGCPT_IMGCPTEN);
1691 fimc_write(cfg, EXYNOS_CIIMGCPT);
1692 1638
1693 /* Enable frame end irq */ 1639 /* Enable frame end irq */
1694 cfg = fimc_read(EXYNOS_CIGCTRL); 1640 fimc_set_bits(ctx, EXYNOS_CIGCTRL, EXYNOS_CIGCTRL_IRQ_END_DISABLE);
1695 cfg |= EXYNOS_CIGCTRL_IRQ_END_DISABLE;
1696 fimc_write(cfg, EXYNOS_CIGCTRL);
1697} 1641}
1698 1642
1699static void fimc_put_clocks(struct fimc_context *ctx) 1643static void fimc_put_clocks(struct fimc_context *ctx)
@@ -1848,7 +1792,7 @@ static int fimc_probe(struct platform_device *pdev)
1848 1792
1849 DRM_DEBUG_KMS("id[%d]ippdrv[0x%x]\n", ctx->id, (int)ippdrv); 1793 DRM_DEBUG_KMS("id[%d]ippdrv[0x%x]\n", ctx->id, (int)ippdrv);
1850 1794
1851 mutex_init(&ctx->lock); 1795 spin_lock_init(&ctx->lock);
1852 platform_set_drvdata(pdev, ctx); 1796 platform_set_drvdata(pdev, ctx);
1853 1797
1854 pm_runtime_set_active(dev); 1798 pm_runtime_set_active(dev);
@@ -1879,7 +1823,6 @@ static int fimc_remove(struct platform_device *pdev)
1879 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; 1823 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
1880 1824
1881 exynos_drm_ippdrv_unregister(ippdrv); 1825 exynos_drm_ippdrv_unregister(ippdrv);
1882 mutex_destroy(&ctx->lock);
1883 1826
1884 fimc_put_clocks(ctx); 1827 fimc_put_clocks(ctx);
1885 pm_runtime_set_suspended(dev); 1828 pm_runtime_set_suspended(dev);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 40fd6ccfcd6f..bb45ab2e7384 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -19,6 +19,7 @@
19#include <linux/of.h> 19#include <linux/of.h>
20#include <linux/of_device.h> 20#include <linux/of_device.h>
21#include <linux/pm_runtime.h> 21#include <linux/pm_runtime.h>
22#include <linux/component.h>
22 23
23#include <video/of_display_timing.h> 24#include <video/of_display_timing.h>
24#include <video/of_videomode.h> 25#include <video/of_videomode.h>
@@ -38,6 +39,7 @@
38 */ 39 */
39 40
40#define FIMD_DEFAULT_FRAMERATE 60 41#define FIMD_DEFAULT_FRAMERATE 60
42#define MIN_FB_WIDTH_FOR_16WORD_BURST 128
41 43
42/* position control register for hardware window 0, 2 ~ 4.*/ 44/* position control register for hardware window 0, 2 ~ 4.*/
43#define VIDOSD_A(win) (VIDOSD_BASE + 0x00 + (win) * 16) 45#define VIDOSD_A(win) (VIDOSD_BASE + 0x00 + (win) * 16)
@@ -122,6 +124,7 @@ struct fimd_context {
122 124
123 struct exynos_drm_panel_info panel; 125 struct exynos_drm_panel_info panel;
124 struct fimd_driver_data *driver_data; 126 struct fimd_driver_data *driver_data;
127 struct exynos_drm_display *display;
125}; 128};
126 129
127static const struct of_device_id fimd_driver_dt_match[] = { 130static const struct of_device_id fimd_driver_dt_match[] = {
@@ -143,13 +146,57 @@ static inline struct fimd_driver_data *drm_fimd_get_driver_data(
143 return (struct fimd_driver_data *)of_id->data; 146 return (struct fimd_driver_data *)of_id->data;
144} 147}
145 148
149static void fimd_wait_for_vblank(struct exynos_drm_manager *mgr)
150{
151 struct fimd_context *ctx = mgr->ctx;
152
153 if (ctx->suspended)
154 return;
155
156 atomic_set(&ctx->wait_vsync_event, 1);
157
158 /*
159 * wait for FIMD to signal VSYNC interrupt or return after
160 * timeout which is set to 50ms (refresh rate of 20).
161 */
162 if (!wait_event_timeout(ctx->wait_vsync_queue,
163 !atomic_read(&ctx->wait_vsync_event),
164 HZ/20))
165 DRM_DEBUG_KMS("vblank wait timed out.\n");
166}
167
168
169static void fimd_clear_channel(struct exynos_drm_manager *mgr)
170{
171 struct fimd_context *ctx = mgr->ctx;
172 int win, ch_enabled = 0;
173
174 DRM_DEBUG_KMS("%s\n", __FILE__);
175
176 /* Check if any channel is enabled. */
177 for (win = 0; win < WINDOWS_NR; win++) {
178 u32 val = readl(ctx->regs + SHADOWCON);
179 if (val & SHADOWCON_CHx_ENABLE(win)) {
180 val &= ~SHADOWCON_CHx_ENABLE(win);
181 writel(val, ctx->regs + SHADOWCON);
182 ch_enabled = 1;
183 }
184 }
185
186 /* Wait for vsync, as disable channel takes effect at next vsync */
187 if (ch_enabled)
188 fimd_wait_for_vblank(mgr);
189}
190
146static int fimd_mgr_initialize(struct exynos_drm_manager *mgr, 191static int fimd_mgr_initialize(struct exynos_drm_manager *mgr,
147 struct drm_device *drm_dev, int pipe) 192 struct drm_device *drm_dev)
148{ 193{
149 struct fimd_context *ctx = mgr->ctx; 194 struct fimd_context *ctx = mgr->ctx;
195 struct exynos_drm_private *priv;
196 priv = drm_dev->dev_private;
150 197
151 ctx->drm_dev = drm_dev; 198 mgr->drm_dev = ctx->drm_dev = drm_dev;
152 ctx->pipe = pipe; 199 mgr->pipe = ctx->pipe = priv->pipe++;
153 200
154 /* 201 /*
155 * enable drm irq mode. 202 * enable drm irq mode.
@@ -169,8 +216,14 @@ static int fimd_mgr_initialize(struct exynos_drm_manager *mgr,
169 drm_dev->vblank_disable_allowed = true; 216 drm_dev->vblank_disable_allowed = true;
170 217
171 /* attach this sub driver to iommu mapping if supported. */ 218 /* attach this sub driver to iommu mapping if supported. */
172 if (is_drm_iommu_supported(ctx->drm_dev)) 219 if (is_drm_iommu_supported(ctx->drm_dev)) {
220 /*
221 * If any channel is already active, iommu will throw
222 * a PAGE FAULT when enabled. So clear any channel if enabled.
223 */
224 fimd_clear_channel(mgr);
173 drm_iommu_attach_device(ctx->drm_dev, ctx->dev); 225 drm_iommu_attach_device(ctx->drm_dev, ctx->dev);
226 }
174 227
175 return 0; 228 return 0;
176} 229}
@@ -324,25 +377,6 @@ static void fimd_disable_vblank(struct exynos_drm_manager *mgr)
324 } 377 }
325} 378}
326 379
327static void fimd_wait_for_vblank(struct exynos_drm_manager *mgr)
328{
329 struct fimd_context *ctx = mgr->ctx;
330
331 if (ctx->suspended)
332 return;
333
334 atomic_set(&ctx->wait_vsync_event, 1);
335
336 /*
337 * wait for FIMD to signal VSYNC interrupt or return after
338 * timeout which is set to 50ms (refresh rate of 20).
339 */
340 if (!wait_event_timeout(ctx->wait_vsync_queue,
341 !atomic_read(&ctx->wait_vsync_event),
342 HZ/20))
343 DRM_DEBUG_KMS("vblank wait timed out.\n");
344}
345
346static void fimd_win_mode_set(struct exynos_drm_manager *mgr, 380static void fimd_win_mode_set(struct exynos_drm_manager *mgr,
347 struct exynos_drm_overlay *overlay) 381 struct exynos_drm_overlay *overlay)
348{ 382{
@@ -446,6 +480,19 @@ static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win)
446 480
447 DRM_DEBUG_KMS("bpp = %d\n", win_data->bpp); 481 DRM_DEBUG_KMS("bpp = %d\n", win_data->bpp);
448 482
483 /*
484 * In case of exynos, setting dma-burst to 16Word causes permanent
485 * tearing for very small buffers, e.g. cursor buffer. Burst Mode
486 * switching which is based on overlay size is not recommended as
487 * overlay size varies alot towards the end of the screen and rapid
488 * movement causes unstable DMA which results into iommu crash/tear.
489 */
490
491 if (win_data->fb_width < MIN_FB_WIDTH_FOR_16WORD_BURST) {
492 val &= ~WINCONx_BURSTLEN_MASK;
493 val |= WINCONx_BURSTLEN_4WORD;
494 }
495
449 writel(val, ctx->regs + WINCON(win)); 496 writel(val, ctx->regs + WINCON(win));
450} 497}
451 498
@@ -656,19 +703,6 @@ static void fimd_win_disable(struct exynos_drm_manager *mgr, int zpos)
656 win_data->enabled = false; 703 win_data->enabled = false;
657} 704}
658 705
659static void fimd_clear_win(struct fimd_context *ctx, int win)
660{
661 writel(0, ctx->regs + WINCON(win));
662 writel(0, ctx->regs + VIDOSD_A(win));
663 writel(0, ctx->regs + VIDOSD_B(win));
664 writel(0, ctx->regs + VIDOSD_C(win));
665
666 if (win == 1 || win == 2)
667 writel(0, ctx->regs + VIDOSD_D(win));
668
669 fimd_shadow_protect_win(ctx, win, false);
670}
671
672static void fimd_window_suspend(struct exynos_drm_manager *mgr) 706static void fimd_window_suspend(struct exynos_drm_manager *mgr)
673{ 707{
674 struct fimd_context *ctx = mgr->ctx; 708 struct fimd_context *ctx = mgr->ctx;
@@ -803,8 +837,6 @@ static void fimd_dpms(struct exynos_drm_manager *mgr, int mode)
803} 837}
804 838
805static struct exynos_drm_manager_ops fimd_manager_ops = { 839static struct exynos_drm_manager_ops fimd_manager_ops = {
806 .initialize = fimd_mgr_initialize,
807 .remove = fimd_mgr_remove,
808 .dpms = fimd_dpms, 840 .dpms = fimd_dpms,
809 .mode_fixup = fimd_mode_fixup, 841 .mode_fixup = fimd_mode_fixup,
810 .mode_set = fimd_mode_set, 842 .mode_set = fimd_mode_set,
@@ -849,20 +881,64 @@ out:
849 return IRQ_HANDLED; 881 return IRQ_HANDLED;
850} 882}
851 883
884static int fimd_bind(struct device *dev, struct device *master, void *data)
885{
886 struct fimd_context *ctx = fimd_manager.ctx;
887 struct drm_device *drm_dev = data;
888
889 fimd_mgr_initialize(&fimd_manager, drm_dev);
890 exynos_drm_crtc_create(&fimd_manager);
891 if (ctx->display)
892 exynos_drm_create_enc_conn(drm_dev, ctx->display);
893
894 return 0;
895
896}
897
898static void fimd_unbind(struct device *dev, struct device *master,
899 void *data)
900{
901 struct exynos_drm_manager *mgr = dev_get_drvdata(dev);
902 struct fimd_context *ctx = fimd_manager.ctx;
903 struct drm_crtc *crtc = mgr->crtc;
904
905 fimd_dpms(mgr, DRM_MODE_DPMS_OFF);
906
907 if (ctx->display)
908 exynos_dpi_remove(dev);
909
910 fimd_mgr_remove(mgr);
911
912 crtc->funcs->destroy(crtc);
913}
914
915static const struct component_ops fimd_component_ops = {
916 .bind = fimd_bind,
917 .unbind = fimd_unbind,
918};
919
852static int fimd_probe(struct platform_device *pdev) 920static int fimd_probe(struct platform_device *pdev)
853{ 921{
854 struct device *dev = &pdev->dev; 922 struct device *dev = &pdev->dev;
855 struct fimd_context *ctx; 923 struct fimd_context *ctx;
856 struct resource *res; 924 struct resource *res;
857 int win;
858 int ret = -EINVAL; 925 int ret = -EINVAL;
859 926
860 if (!dev->of_node) 927 ret = exynos_drm_component_add(&pdev->dev, EXYNOS_DEVICE_TYPE_CRTC,
861 return -ENODEV; 928 fimd_manager.type);
929 if (ret)
930 return ret;
931
932 if (!dev->of_node) {
933 ret = -ENODEV;
934 goto err_del_component;
935 }
862 936
863 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); 937 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
864 if (!ctx) 938 if (!ctx) {
865 return -ENOMEM; 939 ret = -ENOMEM;
940 goto err_del_component;
941 }
866 942
867 ctx->dev = dev; 943 ctx->dev = dev;
868 ctx->suspended = true; 944 ctx->suspended = true;
@@ -875,32 +951,37 @@ static int fimd_probe(struct platform_device *pdev)
875 ctx->bus_clk = devm_clk_get(dev, "fimd"); 951 ctx->bus_clk = devm_clk_get(dev, "fimd");
876 if (IS_ERR(ctx->bus_clk)) { 952 if (IS_ERR(ctx->bus_clk)) {
877 dev_err(dev, "failed to get bus clock\n"); 953 dev_err(dev, "failed to get bus clock\n");
878 return PTR_ERR(ctx->bus_clk); 954 ret = PTR_ERR(ctx->bus_clk);
955 goto err_del_component;
879 } 956 }
880 957
881 ctx->lcd_clk = devm_clk_get(dev, "sclk_fimd"); 958 ctx->lcd_clk = devm_clk_get(dev, "sclk_fimd");
882 if (IS_ERR(ctx->lcd_clk)) { 959 if (IS_ERR(ctx->lcd_clk)) {
883 dev_err(dev, "failed to get lcd clock\n"); 960 dev_err(dev, "failed to get lcd clock\n");
884 return PTR_ERR(ctx->lcd_clk); 961 ret = PTR_ERR(ctx->lcd_clk);
962 goto err_del_component;
885 } 963 }
886 964
887 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 965 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
888 966
889 ctx->regs = devm_ioremap_resource(dev, res); 967 ctx->regs = devm_ioremap_resource(dev, res);
890 if (IS_ERR(ctx->regs)) 968 if (IS_ERR(ctx->regs)) {
891 return PTR_ERR(ctx->regs); 969 ret = PTR_ERR(ctx->regs);
970 goto err_del_component;
971 }
892 972
893 res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "vsync"); 973 res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "vsync");
894 if (!res) { 974 if (!res) {
895 dev_err(dev, "irq request failed.\n"); 975 dev_err(dev, "irq request failed.\n");
896 return -ENXIO; 976 ret = -ENXIO;
977 goto err_del_component;
897 } 978 }
898 979
899 ret = devm_request_irq(dev, res->start, fimd_irq_handler, 980 ret = devm_request_irq(dev, res->start, fimd_irq_handler,
900 0, "drm_fimd", ctx); 981 0, "drm_fimd", ctx);
901 if (ret) { 982 if (ret) {
902 dev_err(dev, "irq request failed.\n"); 983 dev_err(dev, "irq request failed.\n");
903 return ret; 984 goto err_del_component;
904 } 985 }
905 986
906 ctx->driver_data = drm_fimd_get_driver_data(pdev); 987 ctx->driver_data = drm_fimd_get_driver_data(pdev);
@@ -910,30 +991,34 @@ static int fimd_probe(struct platform_device *pdev)
910 platform_set_drvdata(pdev, &fimd_manager); 991 platform_set_drvdata(pdev, &fimd_manager);
911 992
912 fimd_manager.ctx = ctx; 993 fimd_manager.ctx = ctx;
913 exynos_drm_manager_register(&fimd_manager);
914 994
915 exynos_dpi_probe(ctx->dev); 995 ctx->display = exynos_dpi_probe(dev);
996 if (IS_ERR(ctx->display))
997 return PTR_ERR(ctx->display);
916 998
917 pm_runtime_enable(dev); 999 pm_runtime_enable(&pdev->dev);
918 1000
919 for (win = 0; win < WINDOWS_NR; win++) 1001 ret = component_add(&pdev->dev, &fimd_component_ops);
920 fimd_clear_win(ctx, win); 1002 if (ret)
1003 goto err_disable_pm_runtime;
921 1004
922 return 0; 1005 return ret;
1006
1007err_disable_pm_runtime:
1008 pm_runtime_disable(&pdev->dev);
1009
1010err_del_component:
1011 exynos_drm_component_del(&pdev->dev, EXYNOS_DEVICE_TYPE_CRTC);
1012 return ret;
923} 1013}
924 1014
925static int fimd_remove(struct platform_device *pdev) 1015static int fimd_remove(struct platform_device *pdev)
926{ 1016{
927 struct exynos_drm_manager *mgr = platform_get_drvdata(pdev);
928
929 exynos_dpi_remove(&pdev->dev);
930
931 exynos_drm_manager_unregister(&fimd_manager);
932
933 fimd_dpms(mgr, DRM_MODE_DPMS_OFF);
934
935 pm_runtime_disable(&pdev->dev); 1017 pm_runtime_disable(&pdev->dev);
936 1018
1019 component_del(&pdev->dev, &fimd_component_ops);
1020 exynos_drm_component_del(&pdev->dev, EXYNOS_DEVICE_TYPE_CRTC);
1021
937 return 0; 1022 return 0;
938} 1023}
939 1024
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 42d2904d88c7..163a054922cb 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -612,22 +612,20 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
612 args->pitch = args->width * ((args->bpp + 7) / 8); 612 args->pitch = args->width * ((args->bpp + 7) / 8);
613 args->size = args->pitch * args->height; 613 args->size = args->pitch * args->height;
614 614
615 exynos_gem_obj = exynos_drm_gem_create(dev, EXYNOS_BO_CONTIG | 615 if (is_drm_iommu_supported(dev)) {
616 EXYNOS_BO_WC, args->size); 616 exynos_gem_obj = exynos_drm_gem_create(dev,
617 /* 617 EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC,
618 * If physically contiguous memory allocation fails and if IOMMU is 618 args->size);
619 * supported then try to get buffer from non physically contiguous 619 } else {
620 * memory area.
621 */
622 if (IS_ERR(exynos_gem_obj) && is_drm_iommu_supported(dev)) {
623 dev_warn(dev->dev, "contiguous FB allocation failed, falling back to non-contiguous\n");
624 exynos_gem_obj = exynos_drm_gem_create(dev, 620 exynos_gem_obj = exynos_drm_gem_create(dev,
625 EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC, 621 EXYNOS_BO_CONTIG | EXYNOS_BO_WC,
626 args->size); 622 args->size);
627 } 623 }
628 624
629 if (IS_ERR(exynos_gem_obj)) 625 if (IS_ERR(exynos_gem_obj)) {
626 dev_warn(dev->dev, "FB allocation failed.\n");
630 return PTR_ERR(exynos_gem_obj); 627 return PTR_ERR(exynos_gem_obj);
628 }
631 629
632 ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv, 630 ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv,
633 &args->handle); 631 &args->handle);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index fa75059a6104..9e3ff1672965 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -1335,11 +1335,7 @@ static irqreturn_t gsc_irq_handler(int irq, void *dev_id)
1335 1335
1336static int gsc_init_prop_list(struct exynos_drm_ippdrv *ippdrv) 1336static int gsc_init_prop_list(struct exynos_drm_ippdrv *ippdrv)
1337{ 1337{
1338 struct drm_exynos_ipp_prop_list *prop_list; 1338 struct drm_exynos_ipp_prop_list *prop_list = &ippdrv->prop_list;
1339
1340 prop_list = devm_kzalloc(ippdrv->dev, sizeof(*prop_list), GFP_KERNEL);
1341 if (!prop_list)
1342 return -ENOMEM;
1343 1339
1344 prop_list->version = 1; 1340 prop_list->version = 1;
1345 prop_list->writeback = 1; 1341 prop_list->writeback = 1;
@@ -1363,8 +1359,6 @@ static int gsc_init_prop_list(struct exynos_drm_ippdrv *ippdrv)
1363 prop_list->scale_min.hsize = GSC_SCALE_MIN; 1359 prop_list->scale_min.hsize = GSC_SCALE_MIN;
1364 prop_list->scale_min.vsize = GSC_SCALE_MIN; 1360 prop_list->scale_min.vsize = GSC_SCALE_MIN;
1365 1361
1366 ippdrv->prop_list = prop_list;
1367
1368 return 0; 1362 return 0;
1369} 1363}
1370 1364
@@ -1387,7 +1381,7 @@ static int gsc_ippdrv_check_property(struct device *dev,
1387{ 1381{
1388 struct gsc_context *ctx = get_gsc_context(dev); 1382 struct gsc_context *ctx = get_gsc_context(dev);
1389 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; 1383 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
1390 struct drm_exynos_ipp_prop_list *pp = ippdrv->prop_list; 1384 struct drm_exynos_ipp_prop_list *pp = &ippdrv->prop_list;
1391 struct drm_exynos_ipp_config *config; 1385 struct drm_exynos_ipp_config *config;
1392 struct drm_exynos_pos *pos; 1386 struct drm_exynos_pos *pos;
1393 struct drm_exynos_sz *sz; 1387 struct drm_exynos_sz *sz;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
index 3d78144387ac..a1888e128f1d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -167,6 +167,13 @@ static int ipp_create_id(struct idr *id_idr, struct mutex *lock, void *obj,
167 return 0; 167 return 0;
168} 168}
169 169
170static void ipp_remove_id(struct idr *id_idr, struct mutex *lock, u32 id)
171{
172 mutex_lock(lock);
173 idr_remove(id_idr, id);
174 mutex_unlock(lock);
175}
176
170static void *ipp_find_obj(struct idr *id_idr, struct mutex *lock, u32 id) 177static void *ipp_find_obj(struct idr *id_idr, struct mutex *lock, u32 id)
171{ 178{
172 void *obj; 179 void *obj;
@@ -276,11 +283,6 @@ static struct exynos_drm_ippdrv *ipp_find_drv_by_handle(u32 prop_id)
276 283
277 DRM_DEBUG_KMS("prop_id[%d]\n", prop_id); 284 DRM_DEBUG_KMS("prop_id[%d]\n", prop_id);
278 285
279 if (list_empty(&exynos_drm_ippdrv_list)) {
280 DRM_DEBUG_KMS("ippdrv_list is empty.\n");
281 return ERR_PTR(-ENODEV);
282 }
283
284 /* 286 /*
285 * This case is search ipp driver by prop_id handle. 287 * This case is search ipp driver by prop_id handle.
286 * sometimes, ipp subsystem find driver by prop_id. 288 * sometimes, ipp subsystem find driver by prop_id.
@@ -289,11 +291,14 @@ static struct exynos_drm_ippdrv *ipp_find_drv_by_handle(u32 prop_id)
289 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) { 291 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
290 DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]\n", count++, (int)ippdrv); 292 DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]\n", count++, (int)ippdrv);
291 293
292 if (!list_empty(&ippdrv->cmd_list)) { 294 mutex_lock(&ippdrv->cmd_lock);
293 list_for_each_entry(c_node, &ippdrv->cmd_list, list) 295 list_for_each_entry(c_node, &ippdrv->cmd_list, list) {
294 if (c_node->property.prop_id == prop_id) 296 if (c_node->property.prop_id == prop_id) {
295 return ippdrv; 297 mutex_unlock(&ippdrv->cmd_lock);
298 return ippdrv;
299 }
296 } 300 }
301 mutex_unlock(&ippdrv->cmd_lock);
297 } 302 }
298 303
299 return ERR_PTR(-ENODEV); 304 return ERR_PTR(-ENODEV);
@@ -325,6 +330,7 @@ int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data,
325 if (!prop_list->ipp_id) { 330 if (!prop_list->ipp_id) {
326 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) 331 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list)
327 count++; 332 count++;
333
328 /* 334 /*
329 * Supports ippdrv list count for user application. 335 * Supports ippdrv list count for user application.
330 * First step user application getting ippdrv count. 336 * First step user application getting ippdrv count.
@@ -346,7 +352,7 @@ int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data,
346 return PTR_ERR(ippdrv); 352 return PTR_ERR(ippdrv);
347 } 353 }
348 354
349 prop_list = ippdrv->prop_list; 355 *prop_list = ippdrv->prop_list;
350 } 356 }
351 357
352 return 0; 358 return 0;
@@ -386,9 +392,11 @@ static int ipp_find_and_set_property(struct drm_exynos_ipp_property *property)
386 * when we find this command no using prop_id. 392 * when we find this command no using prop_id.
387 * return property information set in this command node. 393 * return property information set in this command node.
388 */ 394 */
395 mutex_lock(&ippdrv->cmd_lock);
389 list_for_each_entry(c_node, &ippdrv->cmd_list, list) { 396 list_for_each_entry(c_node, &ippdrv->cmd_list, list) {
390 if ((c_node->property.prop_id == prop_id) && 397 if ((c_node->property.prop_id == prop_id) &&
391 (c_node->state == IPP_STATE_STOP)) { 398 (c_node->state == IPP_STATE_STOP)) {
399 mutex_unlock(&ippdrv->cmd_lock);
392 DRM_DEBUG_KMS("found cmd[%d]ippdrv[0x%x]\n", 400 DRM_DEBUG_KMS("found cmd[%d]ippdrv[0x%x]\n",
393 property->cmd, (int)ippdrv); 401 property->cmd, (int)ippdrv);
394 402
@@ -396,6 +404,7 @@ static int ipp_find_and_set_property(struct drm_exynos_ipp_property *property)
396 return 0; 404 return 0;
397 } 405 }
398 } 406 }
407 mutex_unlock(&ippdrv->cmd_lock);
399 408
400 DRM_ERROR("failed to search property.\n"); 409 DRM_ERROR("failed to search property.\n");
401 410
@@ -499,7 +508,7 @@ int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
499 c_node->start_work = ipp_create_cmd_work(); 508 c_node->start_work = ipp_create_cmd_work();
500 if (IS_ERR(c_node->start_work)) { 509 if (IS_ERR(c_node->start_work)) {
501 DRM_ERROR("failed to create start work.\n"); 510 DRM_ERROR("failed to create start work.\n");
502 goto err_clear; 511 goto err_remove_id;
503 } 512 }
504 513
505 c_node->stop_work = ipp_create_cmd_work(); 514 c_node->stop_work = ipp_create_cmd_work();
@@ -514,7 +523,7 @@ int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
514 goto err_free_stop; 523 goto err_free_stop;
515 } 524 }
516 525
517 mutex_init(&c_node->cmd_lock); 526 mutex_init(&c_node->lock);
518 mutex_init(&c_node->mem_lock); 527 mutex_init(&c_node->mem_lock);
519 mutex_init(&c_node->event_lock); 528 mutex_init(&c_node->event_lock);
520 529
@@ -526,7 +535,9 @@ int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
526 535
527 INIT_LIST_HEAD(&c_node->event_list); 536 INIT_LIST_HEAD(&c_node->event_list);
528 list_splice_init(&priv->event_list, &c_node->event_list); 537 list_splice_init(&priv->event_list, &c_node->event_list);
538 mutex_lock(&ippdrv->cmd_lock);
529 list_add_tail(&c_node->list, &ippdrv->cmd_list); 539 list_add_tail(&c_node->list, &ippdrv->cmd_list);
540 mutex_unlock(&ippdrv->cmd_lock);
530 541
531 /* make dedicated state without m2m */ 542 /* make dedicated state without m2m */
532 if (!ipp_is_m2m_cmd(property->cmd)) 543 if (!ipp_is_m2m_cmd(property->cmd))
@@ -538,18 +549,24 @@ err_free_stop:
538 kfree(c_node->stop_work); 549 kfree(c_node->stop_work);
539err_free_start: 550err_free_start:
540 kfree(c_node->start_work); 551 kfree(c_node->start_work);
552err_remove_id:
553 ipp_remove_id(&ctx->prop_idr, &ctx->prop_lock, property->prop_id);
541err_clear: 554err_clear:
542 kfree(c_node); 555 kfree(c_node);
543 return ret; 556 return ret;
544} 557}
545 558
546static void ipp_clean_cmd_node(struct drm_exynos_ipp_cmd_node *c_node) 559static void ipp_clean_cmd_node(struct ipp_context *ctx,
560 struct drm_exynos_ipp_cmd_node *c_node)
547{ 561{
548 /* delete list */ 562 /* delete list */
549 list_del(&c_node->list); 563 list_del(&c_node->list);
550 564
565 ipp_remove_id(&ctx->prop_idr, &ctx->prop_lock,
566 c_node->property.prop_id);
567
551 /* destroy mutex */ 568 /* destroy mutex */
552 mutex_destroy(&c_node->cmd_lock); 569 mutex_destroy(&c_node->lock);
553 mutex_destroy(&c_node->mem_lock); 570 mutex_destroy(&c_node->mem_lock);
554 mutex_destroy(&c_node->event_lock); 571 mutex_destroy(&c_node->event_lock);
555 572
@@ -567,17 +584,10 @@ static int ipp_check_mem_list(struct drm_exynos_ipp_cmd_node *c_node)
567 struct list_head *head; 584 struct list_head *head;
568 int ret, i, count[EXYNOS_DRM_OPS_MAX] = { 0, }; 585 int ret, i, count[EXYNOS_DRM_OPS_MAX] = { 0, };
569 586
570 mutex_lock(&c_node->mem_lock);
571
572 for_each_ipp_ops(i) { 587 for_each_ipp_ops(i) {
573 /* source/destination memory list */ 588 /* source/destination memory list */
574 head = &c_node->mem_list[i]; 589 head = &c_node->mem_list[i];
575 590
576 if (list_empty(head)) {
577 DRM_DEBUG_KMS("%s memory empty.\n", i ? "dst" : "src");
578 continue;
579 }
580
581 /* find memory node entry */ 591 /* find memory node entry */
582 list_for_each_entry(m_node, head, list) { 592 list_for_each_entry(m_node, head, list) {
583 DRM_DEBUG_KMS("%s,count[%d]m_node[0x%x]\n", 593 DRM_DEBUG_KMS("%s,count[%d]m_node[0x%x]\n",
@@ -602,8 +612,6 @@ static int ipp_check_mem_list(struct drm_exynos_ipp_cmd_node *c_node)
602 ret = max(count[EXYNOS_DRM_OPS_SRC], 612 ret = max(count[EXYNOS_DRM_OPS_SRC],
603 count[EXYNOS_DRM_OPS_DST]); 613 count[EXYNOS_DRM_OPS_DST]);
604 614
605 mutex_unlock(&c_node->mem_lock);
606
607 return ret; 615 return ret;
608} 616}
609 617
@@ -646,16 +654,13 @@ static int ipp_set_mem_node(struct exynos_drm_ippdrv *ippdrv,
646 return -EFAULT; 654 return -EFAULT;
647 } 655 }
648 656
649 mutex_lock(&c_node->mem_lock);
650
651 DRM_DEBUG_KMS("ops_id[%d]\n", m_node->ops_id); 657 DRM_DEBUG_KMS("ops_id[%d]\n", m_node->ops_id);
652 658
653 /* get operations callback */ 659 /* get operations callback */
654 ops = ippdrv->ops[m_node->ops_id]; 660 ops = ippdrv->ops[m_node->ops_id];
655 if (!ops) { 661 if (!ops) {
656 DRM_ERROR("not support ops.\n"); 662 DRM_ERROR("not support ops.\n");
657 ret = -EFAULT; 663 return -EFAULT;
658 goto err_unlock;
659 } 664 }
660 665
661 /* set address and enable irq */ 666 /* set address and enable irq */
@@ -664,12 +669,10 @@ static int ipp_set_mem_node(struct exynos_drm_ippdrv *ippdrv,
664 m_node->buf_id, IPP_BUF_ENQUEUE); 669 m_node->buf_id, IPP_BUF_ENQUEUE);
665 if (ret) { 670 if (ret) {
666 DRM_ERROR("failed to set addr.\n"); 671 DRM_ERROR("failed to set addr.\n");
667 goto err_unlock; 672 return ret;
668 } 673 }
669 } 674 }
670 675
671err_unlock:
672 mutex_unlock(&c_node->mem_lock);
673 return ret; 676 return ret;
674} 677}
675 678
@@ -684,11 +687,9 @@ static struct drm_exynos_ipp_mem_node
684 void *addr; 687 void *addr;
685 int i; 688 int i;
686 689
687 mutex_lock(&c_node->mem_lock);
688
689 m_node = kzalloc(sizeof(*m_node), GFP_KERNEL); 690 m_node = kzalloc(sizeof(*m_node), GFP_KERNEL);
690 if (!m_node) 691 if (!m_node)
691 goto err_unlock; 692 return ERR_PTR(-ENOMEM);
692 693
693 /* clear base address for error handling */ 694 /* clear base address for error handling */
694 memset(&buf_info, 0x0, sizeof(buf_info)); 695 memset(&buf_info, 0x0, sizeof(buf_info));
@@ -722,15 +723,14 @@ static struct drm_exynos_ipp_mem_node
722 723
723 m_node->filp = file; 724 m_node->filp = file;
724 m_node->buf_info = buf_info; 725 m_node->buf_info = buf_info;
726 mutex_lock(&c_node->mem_lock);
725 list_add_tail(&m_node->list, &c_node->mem_list[qbuf->ops_id]); 727 list_add_tail(&m_node->list, &c_node->mem_list[qbuf->ops_id]);
726
727 mutex_unlock(&c_node->mem_lock); 728 mutex_unlock(&c_node->mem_lock);
729
728 return m_node; 730 return m_node;
729 731
730err_clear: 732err_clear:
731 kfree(m_node); 733 kfree(m_node);
732err_unlock:
733 mutex_unlock(&c_node->mem_lock);
734 return ERR_PTR(-EFAULT); 734 return ERR_PTR(-EFAULT);
735} 735}
736 736
@@ -747,13 +747,6 @@ static int ipp_put_mem_node(struct drm_device *drm_dev,
747 return -EFAULT; 747 return -EFAULT;
748 } 748 }
749 749
750 if (list_empty(&m_node->list)) {
751 DRM_ERROR("empty memory node.\n");
752 return -ENOMEM;
753 }
754
755 mutex_lock(&c_node->mem_lock);
756
757 DRM_DEBUG_KMS("ops_id[%d]\n", m_node->ops_id); 750 DRM_DEBUG_KMS("ops_id[%d]\n", m_node->ops_id);
758 751
759 /* put gem buffer */ 752 /* put gem buffer */
@@ -768,8 +761,6 @@ static int ipp_put_mem_node(struct drm_device *drm_dev,
768 list_del(&m_node->list); 761 list_del(&m_node->list);
769 kfree(m_node); 762 kfree(m_node);
770 763
771 mutex_unlock(&c_node->mem_lock);
772
773 return 0; 764 return 0;
774} 765}
775 766
@@ -805,7 +796,9 @@ static int ipp_get_event(struct drm_device *drm_dev,
805 e->base.event = &e->event.base; 796 e->base.event = &e->event.base;
806 e->base.file_priv = file; 797 e->base.file_priv = file;
807 e->base.destroy = ipp_free_event; 798 e->base.destroy = ipp_free_event;
799 mutex_lock(&c_node->event_lock);
808 list_add_tail(&e->base.link, &c_node->event_list); 800 list_add_tail(&e->base.link, &c_node->event_list);
801 mutex_unlock(&c_node->event_lock);
809 802
810 return 0; 803 return 0;
811} 804}
@@ -816,11 +809,7 @@ static void ipp_put_event(struct drm_exynos_ipp_cmd_node *c_node,
816 struct drm_exynos_ipp_send_event *e, *te; 809 struct drm_exynos_ipp_send_event *e, *te;
817 int count = 0; 810 int count = 0;
818 811
819 if (list_empty(&c_node->event_list)) { 812 mutex_lock(&c_node->event_lock);
820 DRM_DEBUG_KMS("event_list is empty.\n");
821 return;
822 }
823
824 list_for_each_entry_safe(e, te, &c_node->event_list, base.link) { 813 list_for_each_entry_safe(e, te, &c_node->event_list, base.link) {
825 DRM_DEBUG_KMS("count[%d]e[0x%x]\n", count++, (int)e); 814 DRM_DEBUG_KMS("count[%d]e[0x%x]\n", count++, (int)e);
826 815
@@ -841,9 +830,13 @@ static void ipp_put_event(struct drm_exynos_ipp_cmd_node *c_node,
841 /* delete list */ 830 /* delete list */
842 list_del(&e->base.link); 831 list_del(&e->base.link);
843 kfree(e); 832 kfree(e);
844 return; 833 goto out_unlock;
845 } 834 }
846 } 835 }
836
837out_unlock:
838 mutex_unlock(&c_node->event_lock);
839 return;
847} 840}
848 841
849static void ipp_handle_cmd_work(struct device *dev, 842static void ipp_handle_cmd_work(struct device *dev,
@@ -887,7 +880,9 @@ static int ipp_queue_buf_with_run(struct device *dev,
887 return 0; 880 return 0;
888 } 881 }
889 882
883 mutex_lock(&c_node->mem_lock);
890 if (!ipp_check_mem_list(c_node)) { 884 if (!ipp_check_mem_list(c_node)) {
885 mutex_unlock(&c_node->mem_lock);
891 DRM_DEBUG_KMS("empty memory.\n"); 886 DRM_DEBUG_KMS("empty memory.\n");
892 return 0; 887 return 0;
893 } 888 }
@@ -904,10 +899,12 @@ static int ipp_queue_buf_with_run(struct device *dev,
904 } else { 899 } else {
905 ret = ipp_set_mem_node(ippdrv, c_node, m_node); 900 ret = ipp_set_mem_node(ippdrv, c_node, m_node);
906 if (ret) { 901 if (ret) {
902 mutex_unlock(&c_node->mem_lock);
907 DRM_ERROR("failed to set m node.\n"); 903 DRM_ERROR("failed to set m node.\n");
908 return ret; 904 return ret;
909 } 905 }
910 } 906 }
907 mutex_unlock(&c_node->mem_lock);
911 908
912 return 0; 909 return 0;
913} 910}
@@ -918,15 +915,15 @@ static void ipp_clean_queue_buf(struct drm_device *drm_dev,
918{ 915{
919 struct drm_exynos_ipp_mem_node *m_node, *tm_node; 916 struct drm_exynos_ipp_mem_node *m_node, *tm_node;
920 917
921 if (!list_empty(&c_node->mem_list[qbuf->ops_id])) { 918 /* delete list */
922 /* delete list */ 919 mutex_lock(&c_node->mem_lock);
923 list_for_each_entry_safe(m_node, tm_node, 920 list_for_each_entry_safe(m_node, tm_node,
924 &c_node->mem_list[qbuf->ops_id], list) { 921 &c_node->mem_list[qbuf->ops_id], list) {
925 if (m_node->buf_id == qbuf->buf_id && 922 if (m_node->buf_id == qbuf->buf_id &&
926 m_node->ops_id == qbuf->ops_id) 923 m_node->ops_id == qbuf->ops_id)
927 ipp_put_mem_node(drm_dev, c_node, m_node); 924 ipp_put_mem_node(drm_dev, c_node, m_node);
928 }
929 } 925 }
926 mutex_unlock(&c_node->mem_lock);
930} 927}
931 928
932int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data, 929int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
@@ -998,7 +995,7 @@ int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
998 } 995 }
999 break; 996 break;
1000 case IPP_BUF_DEQUEUE: 997 case IPP_BUF_DEQUEUE:
1001 mutex_lock(&c_node->cmd_lock); 998 mutex_lock(&c_node->lock);
1002 999
1003 /* put event for destination buffer */ 1000 /* put event for destination buffer */
1004 if (qbuf->ops_id == EXYNOS_DRM_OPS_DST) 1001 if (qbuf->ops_id == EXYNOS_DRM_OPS_DST)
@@ -1006,7 +1003,7 @@ int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
1006 1003
1007 ipp_clean_queue_buf(drm_dev, c_node, qbuf); 1004 ipp_clean_queue_buf(drm_dev, c_node, qbuf);
1008 1005
1009 mutex_unlock(&c_node->cmd_lock); 1006 mutex_unlock(&c_node->lock);
1010 break; 1007 break;
1011 default: 1008 default:
1012 DRM_ERROR("invalid buffer control.\n"); 1009 DRM_ERROR("invalid buffer control.\n");
@@ -1109,12 +1106,12 @@ int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data,
1109 case IPP_CTRL_PLAY: 1106 case IPP_CTRL_PLAY:
1110 if (pm_runtime_suspended(ippdrv->dev)) 1107 if (pm_runtime_suspended(ippdrv->dev))
1111 pm_runtime_get_sync(ippdrv->dev); 1108 pm_runtime_get_sync(ippdrv->dev);
1109
1112 c_node->state = IPP_STATE_START; 1110 c_node->state = IPP_STATE_START;
1113 1111
1114 cmd_work = c_node->start_work; 1112 cmd_work = c_node->start_work;
1115 cmd_work->ctrl = cmd_ctrl->ctrl; 1113 cmd_work->ctrl = cmd_ctrl->ctrl;
1116 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node); 1114 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1117 c_node->state = IPP_STATE_START;
1118 break; 1115 break;
1119 case IPP_CTRL_STOP: 1116 case IPP_CTRL_STOP:
1120 cmd_work = c_node->stop_work; 1117 cmd_work = c_node->stop_work;
@@ -1129,10 +1126,12 @@ int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data,
1129 1126
1130 c_node->state = IPP_STATE_STOP; 1127 c_node->state = IPP_STATE_STOP;
1131 ippdrv->dedicated = false; 1128 ippdrv->dedicated = false;
1132 ipp_clean_cmd_node(c_node); 1129 mutex_lock(&ippdrv->cmd_lock);
1130 ipp_clean_cmd_node(ctx, c_node);
1133 1131
1134 if (list_empty(&ippdrv->cmd_list)) 1132 if (list_empty(&ippdrv->cmd_list))
1135 pm_runtime_put_sync(ippdrv->dev); 1133 pm_runtime_put_sync(ippdrv->dev);
1134 mutex_unlock(&ippdrv->cmd_lock);
1136 break; 1135 break;
1137 case IPP_CTRL_PAUSE: 1136 case IPP_CTRL_PAUSE:
1138 cmd_work = c_node->stop_work; 1137 cmd_work = c_node->stop_work;
@@ -1260,9 +1259,11 @@ static int ipp_start_property(struct exynos_drm_ippdrv *ippdrv,
1260 /* store command info in ippdrv */ 1259 /* store command info in ippdrv */
1261 ippdrv->c_node = c_node; 1260 ippdrv->c_node = c_node;
1262 1261
1262 mutex_lock(&c_node->mem_lock);
1263 if (!ipp_check_mem_list(c_node)) { 1263 if (!ipp_check_mem_list(c_node)) {
1264 DRM_DEBUG_KMS("empty memory.\n"); 1264 DRM_DEBUG_KMS("empty memory.\n");
1265 return -ENOMEM; 1265 ret = -ENOMEM;
1266 goto err_unlock;
1266 } 1267 }
1267 1268
1268 /* set current property in ippdrv */ 1269 /* set current property in ippdrv */
@@ -1270,7 +1271,7 @@ static int ipp_start_property(struct exynos_drm_ippdrv *ippdrv,
1270 if (ret) { 1271 if (ret) {
1271 DRM_ERROR("failed to set property.\n"); 1272 DRM_ERROR("failed to set property.\n");
1272 ippdrv->c_node = NULL; 1273 ippdrv->c_node = NULL;
1273 return ret; 1274 goto err_unlock;
1274 } 1275 }
1275 1276
1276 /* check command */ 1277 /* check command */
@@ -1285,7 +1286,7 @@ static int ipp_start_property(struct exynos_drm_ippdrv *ippdrv,
1285 if (!m_node) { 1286 if (!m_node) {
1286 DRM_ERROR("failed to get node.\n"); 1287 DRM_ERROR("failed to get node.\n");
1287 ret = -EFAULT; 1288 ret = -EFAULT;
1288 return ret; 1289 goto err_unlock;
1289 } 1290 }
1290 1291
1291 DRM_DEBUG_KMS("m_node[0x%x]\n", (int)m_node); 1292 DRM_DEBUG_KMS("m_node[0x%x]\n", (int)m_node);
@@ -1293,7 +1294,7 @@ static int ipp_start_property(struct exynos_drm_ippdrv *ippdrv,
1293 ret = ipp_set_mem_node(ippdrv, c_node, m_node); 1294 ret = ipp_set_mem_node(ippdrv, c_node, m_node);
1294 if (ret) { 1295 if (ret) {
1295 DRM_ERROR("failed to set m node.\n"); 1296 DRM_ERROR("failed to set m node.\n");
1296 return ret; 1297 goto err_unlock;
1297 } 1298 }
1298 } 1299 }
1299 break; 1300 break;
@@ -1305,7 +1306,7 @@ static int ipp_start_property(struct exynos_drm_ippdrv *ippdrv,
1305 ret = ipp_set_mem_node(ippdrv, c_node, m_node); 1306 ret = ipp_set_mem_node(ippdrv, c_node, m_node);
1306 if (ret) { 1307 if (ret) {
1307 DRM_ERROR("failed to set m node.\n"); 1308 DRM_ERROR("failed to set m node.\n");
1308 return ret; 1309 goto err_unlock;
1309 } 1310 }
1310 } 1311 }
1311 break; 1312 break;
@@ -1317,14 +1318,16 @@ static int ipp_start_property(struct exynos_drm_ippdrv *ippdrv,
1317 ret = ipp_set_mem_node(ippdrv, c_node, m_node); 1318 ret = ipp_set_mem_node(ippdrv, c_node, m_node);
1318 if (ret) { 1319 if (ret) {
1319 DRM_ERROR("failed to set m node.\n"); 1320 DRM_ERROR("failed to set m node.\n");
1320 return ret; 1321 goto err_unlock;
1321 } 1322 }
1322 } 1323 }
1323 break; 1324 break;
1324 default: 1325 default:
1325 DRM_ERROR("invalid operations.\n"); 1326 DRM_ERROR("invalid operations.\n");
1326 return -EINVAL; 1327 ret = -EINVAL;
1328 goto err_unlock;
1327 } 1329 }
1330 mutex_unlock(&c_node->mem_lock);
1328 1331
1329 DRM_DEBUG_KMS("cmd[%d]\n", property->cmd); 1332 DRM_DEBUG_KMS("cmd[%d]\n", property->cmd);
1330 1333
@@ -1333,11 +1336,17 @@ static int ipp_start_property(struct exynos_drm_ippdrv *ippdrv,
1333 ret = ippdrv->start(ippdrv->dev, property->cmd); 1336 ret = ippdrv->start(ippdrv->dev, property->cmd);
1334 if (ret) { 1337 if (ret) {
1335 DRM_ERROR("failed to start ops.\n"); 1338 DRM_ERROR("failed to start ops.\n");
1339 ippdrv->c_node = NULL;
1336 return ret; 1340 return ret;
1337 } 1341 }
1338 } 1342 }
1339 1343
1340 return 0; 1344 return 0;
1345
1346err_unlock:
1347 mutex_unlock(&c_node->mem_lock);
1348 ippdrv->c_node = NULL;
1349 return ret;
1341} 1350}
1342 1351
1343static int ipp_stop_property(struct drm_device *drm_dev, 1352static int ipp_stop_property(struct drm_device *drm_dev,
@@ -1354,6 +1363,8 @@ static int ipp_stop_property(struct drm_device *drm_dev,
1354 /* put event */ 1363 /* put event */
1355 ipp_put_event(c_node, NULL); 1364 ipp_put_event(c_node, NULL);
1356 1365
1366 mutex_lock(&c_node->mem_lock);
1367
1357 /* check command */ 1368 /* check command */
1358 switch (property->cmd) { 1369 switch (property->cmd) {
1359 case IPP_CMD_M2M: 1370 case IPP_CMD_M2M:
@@ -1361,11 +1372,6 @@ static int ipp_stop_property(struct drm_device *drm_dev,
1361 /* source/destination memory list */ 1372 /* source/destination memory list */
1362 head = &c_node->mem_list[i]; 1373 head = &c_node->mem_list[i];
1363 1374
1364 if (list_empty(head)) {
1365 DRM_DEBUG_KMS("mem_list is empty.\n");
1366 break;
1367 }
1368
1369 list_for_each_entry_safe(m_node, tm_node, 1375 list_for_each_entry_safe(m_node, tm_node,
1370 head, list) { 1376 head, list) {
1371 ret = ipp_put_mem_node(drm_dev, c_node, 1377 ret = ipp_put_mem_node(drm_dev, c_node,
@@ -1381,11 +1387,6 @@ static int ipp_stop_property(struct drm_device *drm_dev,
1381 /* destination memory list */ 1387 /* destination memory list */
1382 head = &c_node->mem_list[EXYNOS_DRM_OPS_DST]; 1388 head = &c_node->mem_list[EXYNOS_DRM_OPS_DST];
1383 1389
1384 if (list_empty(head)) {
1385 DRM_DEBUG_KMS("mem_list is empty.\n");
1386 break;
1387 }
1388
1389 list_for_each_entry_safe(m_node, tm_node, head, list) { 1390 list_for_each_entry_safe(m_node, tm_node, head, list) {
1390 ret = ipp_put_mem_node(drm_dev, c_node, m_node); 1391 ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1391 if (ret) { 1392 if (ret) {
@@ -1398,11 +1399,6 @@ static int ipp_stop_property(struct drm_device *drm_dev,
1398 /* source memory list */ 1399 /* source memory list */
1399 head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC]; 1400 head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
1400 1401
1401 if (list_empty(head)) {
1402 DRM_DEBUG_KMS("mem_list is empty.\n");
1403 break;
1404 }
1405
1406 list_for_each_entry_safe(m_node, tm_node, head, list) { 1402 list_for_each_entry_safe(m_node, tm_node, head, list) {
1407 ret = ipp_put_mem_node(drm_dev, c_node, m_node); 1403 ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1408 if (ret) { 1404 if (ret) {
@@ -1418,6 +1414,8 @@ static int ipp_stop_property(struct drm_device *drm_dev,
1418 } 1414 }
1419 1415
1420err_clear: 1416err_clear:
1417 mutex_unlock(&c_node->mem_lock);
1418
1421 /* stop operations */ 1419 /* stop operations */
1422 if (ippdrv->stop) 1420 if (ippdrv->stop)
1423 ippdrv->stop(ippdrv->dev, property->cmd); 1421 ippdrv->stop(ippdrv->dev, property->cmd);
@@ -1446,7 +1444,7 @@ void ipp_sched_cmd(struct work_struct *work)
1446 return; 1444 return;
1447 } 1445 }
1448 1446
1449 mutex_lock(&c_node->cmd_lock); 1447 mutex_lock(&c_node->lock);
1450 1448
1451 property = &c_node->property; 1449 property = &c_node->property;
1452 1450
@@ -1494,7 +1492,7 @@ void ipp_sched_cmd(struct work_struct *work)
1494 DRM_DEBUG_KMS("ctrl[%d] done.\n", cmd_work->ctrl); 1492 DRM_DEBUG_KMS("ctrl[%d] done.\n", cmd_work->ctrl);
1495 1493
1496err_unlock: 1494err_unlock:
1497 mutex_unlock(&c_node->cmd_lock); 1495 mutex_unlock(&c_node->lock);
1498} 1496}
1499 1497
1500static int ipp_send_event(struct exynos_drm_ippdrv *ippdrv, 1498static int ipp_send_event(struct exynos_drm_ippdrv *ippdrv,
@@ -1524,14 +1522,18 @@ static int ipp_send_event(struct exynos_drm_ippdrv *ippdrv,
1524 return -EINVAL; 1522 return -EINVAL;
1525 } 1523 }
1526 1524
1525 mutex_lock(&c_node->event_lock);
1527 if (list_empty(&c_node->event_list)) { 1526 if (list_empty(&c_node->event_list)) {
1528 DRM_DEBUG_KMS("event list is empty.\n"); 1527 DRM_DEBUG_KMS("event list is empty.\n");
1529 return 0; 1528 ret = 0;
1529 goto err_event_unlock;
1530 } 1530 }
1531 1531
1532 mutex_lock(&c_node->mem_lock);
1532 if (!ipp_check_mem_list(c_node)) { 1533 if (!ipp_check_mem_list(c_node)) {
1533 DRM_DEBUG_KMS("empty memory.\n"); 1534 DRM_DEBUG_KMS("empty memory.\n");
1534 return 0; 1535 ret = 0;
1536 goto err_mem_unlock;
1535 } 1537 }
1536 1538
1537 /* check command */ 1539 /* check command */
@@ -1545,7 +1547,8 @@ static int ipp_send_event(struct exynos_drm_ippdrv *ippdrv,
1545 struct drm_exynos_ipp_mem_node, list); 1547 struct drm_exynos_ipp_mem_node, list);
1546 if (!m_node) { 1548 if (!m_node) {
1547 DRM_ERROR("empty memory node.\n"); 1549 DRM_ERROR("empty memory node.\n");
1548 return -ENOMEM; 1550 ret = -ENOMEM;
1551 goto err_mem_unlock;
1549 } 1552 }
1550 1553
1551 tbuf_id[i] = m_node->buf_id; 1554 tbuf_id[i] = m_node->buf_id;
@@ -1567,7 +1570,8 @@ static int ipp_send_event(struct exynos_drm_ippdrv *ippdrv,
1567 m_node = ipp_find_mem_node(c_node, &qbuf); 1570 m_node = ipp_find_mem_node(c_node, &qbuf);
1568 if (!m_node) { 1571 if (!m_node) {
1569 DRM_ERROR("empty memory node.\n"); 1572 DRM_ERROR("empty memory node.\n");
1570 return -ENOMEM; 1573 ret = -ENOMEM;
1574 goto err_mem_unlock;
1571 } 1575 }
1572 1576
1573 tbuf_id[EXYNOS_DRM_OPS_DST] = m_node->buf_id; 1577 tbuf_id[EXYNOS_DRM_OPS_DST] = m_node->buf_id;
@@ -1584,7 +1588,8 @@ static int ipp_send_event(struct exynos_drm_ippdrv *ippdrv,
1584 struct drm_exynos_ipp_mem_node, list); 1588 struct drm_exynos_ipp_mem_node, list);
1585 if (!m_node) { 1589 if (!m_node) {
1586 DRM_ERROR("empty memory node.\n"); 1590 DRM_ERROR("empty memory node.\n");
1587 return -ENOMEM; 1591 ret = -ENOMEM;
1592 goto err_mem_unlock;
1588 } 1593 }
1589 1594
1590 tbuf_id[EXYNOS_DRM_OPS_SRC] = m_node->buf_id; 1595 tbuf_id[EXYNOS_DRM_OPS_SRC] = m_node->buf_id;
@@ -1595,8 +1600,10 @@ static int ipp_send_event(struct exynos_drm_ippdrv *ippdrv,
1595 break; 1600 break;
1596 default: 1601 default:
1597 DRM_ERROR("invalid operations.\n"); 1602 DRM_ERROR("invalid operations.\n");
1598 return -EINVAL; 1603 ret = -EINVAL;
1604 goto err_mem_unlock;
1599 } 1605 }
1606 mutex_unlock(&c_node->mem_lock);
1600 1607
1601 if (tbuf_id[EXYNOS_DRM_OPS_DST] != buf_id[EXYNOS_DRM_OPS_DST]) 1608 if (tbuf_id[EXYNOS_DRM_OPS_DST] != buf_id[EXYNOS_DRM_OPS_DST])
1602 DRM_ERROR("failed to match buf_id[%d %d]prop_id[%d]\n", 1609 DRM_ERROR("failed to match buf_id[%d %d]prop_id[%d]\n",
@@ -1611,11 +1618,6 @@ static int ipp_send_event(struct exynos_drm_ippdrv *ippdrv,
1611 e = list_first_entry(&c_node->event_list, 1618 e = list_first_entry(&c_node->event_list,
1612 struct drm_exynos_ipp_send_event, base.link); 1619 struct drm_exynos_ipp_send_event, base.link);
1613 1620
1614 if (!e) {
1615 DRM_ERROR("empty event.\n");
1616 return -EINVAL;
1617 }
1618
1619 do_gettimeofday(&now); 1621 do_gettimeofday(&now);
1620 DRM_DEBUG_KMS("tv_sec[%ld]tv_usec[%ld]\n", now.tv_sec, now.tv_usec); 1622 DRM_DEBUG_KMS("tv_sec[%ld]tv_usec[%ld]\n", now.tv_sec, now.tv_usec);
1621 e->event.tv_sec = now.tv_sec; 1623 e->event.tv_sec = now.tv_sec;
@@ -1630,11 +1632,18 @@ static int ipp_send_event(struct exynos_drm_ippdrv *ippdrv,
1630 list_move_tail(&e->base.link, &e->base.file_priv->event_list); 1632 list_move_tail(&e->base.link, &e->base.file_priv->event_list);
1631 wake_up_interruptible(&e->base.file_priv->event_wait); 1633 wake_up_interruptible(&e->base.file_priv->event_wait);
1632 spin_unlock_irqrestore(&drm_dev->event_lock, flags); 1634 spin_unlock_irqrestore(&drm_dev->event_lock, flags);
1635 mutex_unlock(&c_node->event_lock);
1633 1636
1634 DRM_DEBUG_KMS("done cmd[%d]prop_id[%d]buf_id[%d]\n", 1637 DRM_DEBUG_KMS("done cmd[%d]prop_id[%d]buf_id[%d]\n",
1635 property->cmd, property->prop_id, tbuf_id[EXYNOS_DRM_OPS_DST]); 1638 property->cmd, property->prop_id, tbuf_id[EXYNOS_DRM_OPS_DST]);
1636 1639
1637 return 0; 1640 return 0;
1641
1642err_mem_unlock:
1643 mutex_unlock(&c_node->mem_lock);
1644err_event_unlock:
1645 mutex_unlock(&c_node->event_lock);
1646 return ret;
1638} 1647}
1639 1648
1640void ipp_sched_event(struct work_struct *work) 1649void ipp_sched_event(struct work_struct *work)
@@ -1676,8 +1685,6 @@ void ipp_sched_event(struct work_struct *work)
1676 goto err_completion; 1685 goto err_completion;
1677 } 1686 }
1678 1687
1679 mutex_lock(&c_node->event_lock);
1680
1681 ret = ipp_send_event(ippdrv, c_node, event_work->buf_id); 1688 ret = ipp_send_event(ippdrv, c_node, event_work->buf_id);
1682 if (ret) { 1689 if (ret) {
1683 DRM_ERROR("failed to send event.\n"); 1690 DRM_ERROR("failed to send event.\n");
@@ -1687,8 +1694,6 @@ void ipp_sched_event(struct work_struct *work)
1687err_completion: 1694err_completion:
1688 if (ipp_is_m2m_cmd(c_node->property.cmd)) 1695 if (ipp_is_m2m_cmd(c_node->property.cmd))
1689 complete(&c_node->start_complete); 1696 complete(&c_node->start_complete);
1690
1691 mutex_unlock(&c_node->event_lock);
1692} 1697}
1693 1698
1694static int ipp_subdrv_probe(struct drm_device *drm_dev, struct device *dev) 1699static int ipp_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
@@ -1699,23 +1704,21 @@ static int ipp_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
1699 1704
1700 /* get ipp driver entry */ 1705 /* get ipp driver entry */
1701 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) { 1706 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
1707 u32 ipp_id;
1708
1702 ippdrv->drm_dev = drm_dev; 1709 ippdrv->drm_dev = drm_dev;
1703 1710
1704 ret = ipp_create_id(&ctx->ipp_idr, &ctx->ipp_lock, ippdrv, 1711 ret = ipp_create_id(&ctx->ipp_idr, &ctx->ipp_lock, ippdrv,
1705 &ippdrv->ipp_id); 1712 &ipp_id);
1706 if (ret) { 1713 if (ret || ipp_id == 0) {
1707 DRM_ERROR("failed to create id.\n"); 1714 DRM_ERROR("failed to create id.\n");
1708 goto err_idr; 1715 goto err;
1709 } 1716 }
1710 1717
1711 DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]ipp_id[%d]\n", 1718 DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]ipp_id[%d]\n",
1712 count++, (int)ippdrv, ippdrv->ipp_id); 1719 count++, (int)ippdrv, ipp_id);
1713 1720
1714 if (ippdrv->ipp_id == 0) { 1721 ippdrv->prop_list.ipp_id = ipp_id;
1715 DRM_ERROR("failed to get ipp_id[%d]\n",
1716 ippdrv->ipp_id);
1717 goto err_idr;
1718 }
1719 1722
1720 /* store parent device for node */ 1723 /* store parent device for node */
1721 ippdrv->parent_dev = dev; 1724 ippdrv->parent_dev = dev;
@@ -1724,39 +1727,46 @@ static int ipp_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
1724 ippdrv->event_workq = ctx->event_workq; 1727 ippdrv->event_workq = ctx->event_workq;
1725 ippdrv->sched_event = ipp_sched_event; 1728 ippdrv->sched_event = ipp_sched_event;
1726 INIT_LIST_HEAD(&ippdrv->cmd_list); 1729 INIT_LIST_HEAD(&ippdrv->cmd_list);
1730 mutex_init(&ippdrv->cmd_lock);
1727 1731
1728 if (is_drm_iommu_supported(drm_dev)) { 1732 if (is_drm_iommu_supported(drm_dev)) {
1729 ret = drm_iommu_attach_device(drm_dev, ippdrv->dev); 1733 ret = drm_iommu_attach_device(drm_dev, ippdrv->dev);
1730 if (ret) { 1734 if (ret) {
1731 DRM_ERROR("failed to activate iommu\n"); 1735 DRM_ERROR("failed to activate iommu\n");
1732 goto err_iommu; 1736 goto err;
1733 } 1737 }
1734 } 1738 }
1735 } 1739 }
1736 1740
1737 return 0; 1741 return 0;
1738 1742
1739err_iommu: 1743err:
1740 /* get ipp driver entry */ 1744 /* get ipp driver entry */
1741 list_for_each_entry_reverse(ippdrv, &exynos_drm_ippdrv_list, drv_list) 1745 list_for_each_entry_continue_reverse(ippdrv, &exynos_drm_ippdrv_list,
1746 drv_list) {
1742 if (is_drm_iommu_supported(drm_dev)) 1747 if (is_drm_iommu_supported(drm_dev))
1743 drm_iommu_detach_device(drm_dev, ippdrv->dev); 1748 drm_iommu_detach_device(drm_dev, ippdrv->dev);
1744 1749
1745err_idr: 1750 ipp_remove_id(&ctx->ipp_idr, &ctx->ipp_lock,
1746 idr_destroy(&ctx->ipp_idr); 1751 ippdrv->prop_list.ipp_id);
1747 idr_destroy(&ctx->prop_idr); 1752 }
1753
1748 return ret; 1754 return ret;
1749} 1755}
1750 1756
1751static void ipp_subdrv_remove(struct drm_device *drm_dev, struct device *dev) 1757static void ipp_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
1752{ 1758{
1753 struct exynos_drm_ippdrv *ippdrv; 1759 struct exynos_drm_ippdrv *ippdrv;
1760 struct ipp_context *ctx = get_ipp_context(dev);
1754 1761
1755 /* get ipp driver entry */ 1762 /* get ipp driver entry */
1756 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) { 1763 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
1757 if (is_drm_iommu_supported(drm_dev)) 1764 if (is_drm_iommu_supported(drm_dev))
1758 drm_iommu_detach_device(drm_dev, ippdrv->dev); 1765 drm_iommu_detach_device(drm_dev, ippdrv->dev);
1759 1766
1767 ipp_remove_id(&ctx->ipp_idr, &ctx->ipp_lock,
1768 ippdrv->prop_list.ipp_id);
1769
1760 ippdrv->drm_dev = NULL; 1770 ippdrv->drm_dev = NULL;
1761 exynos_drm_ippdrv_unregister(ippdrv); 1771 exynos_drm_ippdrv_unregister(ippdrv);
1762 } 1772 }
@@ -1787,20 +1797,14 @@ static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev,
1787 struct drm_exynos_file_private *file_priv = file->driver_priv; 1797 struct drm_exynos_file_private *file_priv = file->driver_priv;
1788 struct exynos_drm_ipp_private *priv = file_priv->ipp_priv; 1798 struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
1789 struct exynos_drm_ippdrv *ippdrv = NULL; 1799 struct exynos_drm_ippdrv *ippdrv = NULL;
1800 struct ipp_context *ctx = get_ipp_context(dev);
1790 struct drm_exynos_ipp_cmd_node *c_node, *tc_node; 1801 struct drm_exynos_ipp_cmd_node *c_node, *tc_node;
1791 int count = 0; 1802 int count = 0;
1792 1803
1793 DRM_DEBUG_KMS("for priv[0x%x]\n", (int)priv); 1804 DRM_DEBUG_KMS("for priv[0x%x]\n", (int)priv);
1794 1805
1795 if (list_empty(&exynos_drm_ippdrv_list)) {
1796 DRM_DEBUG_KMS("ippdrv_list is empty.\n");
1797 goto err_clear;
1798 }
1799
1800 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) { 1806 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
1801 if (list_empty(&ippdrv->cmd_list)) 1807 mutex_lock(&ippdrv->cmd_lock);
1802 continue;
1803
1804 list_for_each_entry_safe(c_node, tc_node, 1808 list_for_each_entry_safe(c_node, tc_node,
1805 &ippdrv->cmd_list, list) { 1809 &ippdrv->cmd_list, list) {
1806 DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]\n", 1810 DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]\n",
@@ -1820,14 +1824,14 @@ static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev,
1820 } 1824 }
1821 1825
1822 ippdrv->dedicated = false; 1826 ippdrv->dedicated = false;
1823 ipp_clean_cmd_node(c_node); 1827 ipp_clean_cmd_node(ctx, c_node);
1824 if (list_empty(&ippdrv->cmd_list)) 1828 if (list_empty(&ippdrv->cmd_list))
1825 pm_runtime_put_sync(ippdrv->dev); 1829 pm_runtime_put_sync(ippdrv->dev);
1826 } 1830 }
1827 } 1831 }
1832 mutex_unlock(&ippdrv->cmd_lock);
1828 } 1833 }
1829 1834
1830err_clear:
1831 kfree(priv); 1835 kfree(priv);
1832 return; 1836 return;
1833} 1837}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.h b/drivers/gpu/drm/exynos/exynos_drm_ipp.h
index ab1634befc05..7aaeaae757c2 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.h
@@ -52,7 +52,7 @@ struct drm_exynos_ipp_cmd_work {
52 * @list: list head to command queue information. 52 * @list: list head to command queue information.
53 * @event_list: list head of event. 53 * @event_list: list head of event.
54 * @mem_list: list head to source,destination memory queue information. 54 * @mem_list: list head to source,destination memory queue information.
55 * @cmd_lock: lock for synchronization of access to ioctl. 55 * @lock: lock for synchronization of access to ioctl.
56 * @mem_lock: lock for synchronization of access to memory nodes. 56 * @mem_lock: lock for synchronization of access to memory nodes.
57 * @event_lock: lock for synchronization of access to scheduled event. 57 * @event_lock: lock for synchronization of access to scheduled event.
58 * @start_complete: completion of start of command. 58 * @start_complete: completion of start of command.
@@ -68,7 +68,7 @@ struct drm_exynos_ipp_cmd_node {
68 struct list_head list; 68 struct list_head list;
69 struct list_head event_list; 69 struct list_head event_list;
70 struct list_head mem_list[EXYNOS_DRM_OPS_MAX]; 70 struct list_head mem_list[EXYNOS_DRM_OPS_MAX];
71 struct mutex cmd_lock; 71 struct mutex lock;
72 struct mutex mem_lock; 72 struct mutex mem_lock;
73 struct mutex event_lock; 73 struct mutex event_lock;
74 struct completion start_complete; 74 struct completion start_complete;
@@ -83,7 +83,7 @@ struct drm_exynos_ipp_cmd_node {
83/* 83/*
84 * A structure of buffer information. 84 * A structure of buffer information.
85 * 85 *
86 * @gem_objs: Y, Cb, Cr each gem object. 86 * @handles: Y, Cb, Cr each gem object handle.
87 * @base: Y, Cb, Cr each planar address. 87 * @base: Y, Cb, Cr each planar address.
88 */ 88 */
89struct drm_exynos_ipp_buf_info { 89struct drm_exynos_ipp_buf_info {
@@ -142,12 +142,12 @@ struct exynos_drm_ipp_ops {
142 * @parent_dev: parent device information. 142 * @parent_dev: parent device information.
143 * @dev: platform device. 143 * @dev: platform device.
144 * @drm_dev: drm device. 144 * @drm_dev: drm device.
145 * @ipp_id: id of ipp driver.
146 * @dedicated: dedicated ipp device. 145 * @dedicated: dedicated ipp device.
147 * @ops: source, destination operations. 146 * @ops: source, destination operations.
148 * @event_workq: event work queue. 147 * @event_workq: event work queue.
149 * @c_node: current command information. 148 * @c_node: current command information.
150 * @cmd_list: list head for command information. 149 * @cmd_list: list head for command information.
150 * @cmd_lock: lock for synchronization of access to cmd_list.
151 * @prop_list: property informations of current ipp driver. 151 * @prop_list: property informations of current ipp driver.
152 * @check_property: check property about format, size, buffer. 152 * @check_property: check property about format, size, buffer.
153 * @reset: reset ipp block. 153 * @reset: reset ipp block.
@@ -160,13 +160,13 @@ struct exynos_drm_ippdrv {
160 struct device *parent_dev; 160 struct device *parent_dev;
161 struct device *dev; 161 struct device *dev;
162 struct drm_device *drm_dev; 162 struct drm_device *drm_dev;
163 u32 ipp_id;
164 bool dedicated; 163 bool dedicated;
165 struct exynos_drm_ipp_ops *ops[EXYNOS_DRM_OPS_MAX]; 164 struct exynos_drm_ipp_ops *ops[EXYNOS_DRM_OPS_MAX];
166 struct workqueue_struct *event_workq; 165 struct workqueue_struct *event_workq;
167 struct drm_exynos_ipp_cmd_node *c_node; 166 struct drm_exynos_ipp_cmd_node *c_node;
168 struct list_head cmd_list; 167 struct list_head cmd_list;
169 struct drm_exynos_ipp_prop_list *prop_list; 168 struct mutex cmd_lock;
169 struct drm_exynos_ipp_prop_list prop_list;
170 170
171 int (*check_property)(struct device *dev, 171 int (*check_property)(struct device *dev,
172 struct drm_exynos_ipp_property *property); 172 struct drm_exynos_ipp_property *property);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
index 7b901688defa..f01fbb6dc1f0 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -158,8 +158,9 @@ static irqreturn_t rotator_irq_handler(int irq, void *arg)
158 rot->cur_buf_id[EXYNOS_DRM_OPS_DST]; 158 rot->cur_buf_id[EXYNOS_DRM_OPS_DST];
159 queue_work(ippdrv->event_workq, 159 queue_work(ippdrv->event_workq,
160 (struct work_struct *)event_work); 160 (struct work_struct *)event_work);
161 } else 161 } else {
162 DRM_ERROR("the SFR is set illegally\n"); 162 DRM_ERROR("the SFR is set illegally\n");
163 }
163 164
164 return IRQ_HANDLED; 165 return IRQ_HANDLED;
165} 166}
@@ -469,11 +470,7 @@ static struct exynos_drm_ipp_ops rot_dst_ops = {
469 470
470static int rotator_init_prop_list(struct exynos_drm_ippdrv *ippdrv) 471static int rotator_init_prop_list(struct exynos_drm_ippdrv *ippdrv)
471{ 472{
472 struct drm_exynos_ipp_prop_list *prop_list; 473 struct drm_exynos_ipp_prop_list *prop_list = &ippdrv->prop_list;
473
474 prop_list = devm_kzalloc(ippdrv->dev, sizeof(*prop_list), GFP_KERNEL);
475 if (!prop_list)
476 return -ENOMEM;
477 474
478 prop_list->version = 1; 475 prop_list->version = 1;
479 prop_list->flip = (1 << EXYNOS_DRM_FLIP_VERTICAL) | 476 prop_list->flip = (1 << EXYNOS_DRM_FLIP_VERTICAL) |
@@ -486,8 +483,6 @@ static int rotator_init_prop_list(struct exynos_drm_ippdrv *ippdrv)
486 prop_list->crop = 0; 483 prop_list->crop = 0;
487 prop_list->scale = 0; 484 prop_list->scale = 0;
488 485
489 ippdrv->prop_list = prop_list;
490
491 return 0; 486 return 0;
492} 487}
493 488
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index 852f2dadaebd..2fb8705d6461 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -51,6 +51,7 @@ struct vidi_context {
51 struct drm_crtc *crtc; 51 struct drm_crtc *crtc;
52 struct drm_encoder *encoder; 52 struct drm_encoder *encoder;
53 struct drm_connector connector; 53 struct drm_connector connector;
54 struct exynos_drm_subdrv subdrv;
54 struct vidi_win_data win_data[WINDOWS_NR]; 55 struct vidi_win_data win_data[WINDOWS_NR];
55 struct edid *raw_edid; 56 struct edid *raw_edid;
56 unsigned int clkdiv; 57 unsigned int clkdiv;
@@ -294,14 +295,13 @@ static void vidi_dpms(struct exynos_drm_manager *mgr, int mode)
294} 295}
295 296
296static int vidi_mgr_initialize(struct exynos_drm_manager *mgr, 297static int vidi_mgr_initialize(struct exynos_drm_manager *mgr,
297 struct drm_device *drm_dev, int pipe) 298 struct drm_device *drm_dev)
298{ 299{
299 struct vidi_context *ctx = mgr->ctx; 300 struct vidi_context *ctx = mgr->ctx;
301 struct exynos_drm_private *priv = drm_dev->dev_private;
300 302
301 DRM_ERROR("vidi initialize ct=%p dev=%p pipe=%d\n", ctx, drm_dev, pipe); 303 mgr->drm_dev = ctx->drm_dev = drm_dev;
302 304 mgr->pipe = ctx->pipe = priv->pipe++;
303 ctx->drm_dev = drm_dev;
304 ctx->pipe = pipe;
305 305
306 /* 306 /*
307 * enable drm irq mode. 307 * enable drm irq mode.
@@ -324,7 +324,6 @@ static int vidi_mgr_initialize(struct exynos_drm_manager *mgr,
324} 324}
325 325
326static struct exynos_drm_manager_ops vidi_manager_ops = { 326static struct exynos_drm_manager_ops vidi_manager_ops = {
327 .initialize = vidi_mgr_initialize,
328 .dpms = vidi_dpms, 327 .dpms = vidi_dpms,
329 .commit = vidi_commit, 328 .commit = vidi_commit,
330 .enable_vblank = vidi_enable_vblank, 329 .enable_vblank = vidi_enable_vblank,
@@ -533,12 +532,6 @@ static int vidi_get_modes(struct drm_connector *connector)
533 return drm_add_edid_modes(connector, edid); 532 return drm_add_edid_modes(connector, edid);
534} 533}
535 534
536static int vidi_mode_valid(struct drm_connector *connector,
537 struct drm_display_mode *mode)
538{
539 return MODE_OK;
540}
541
542static struct drm_encoder *vidi_best_encoder(struct drm_connector *connector) 535static struct drm_encoder *vidi_best_encoder(struct drm_connector *connector)
543{ 536{
544 struct vidi_context *ctx = ctx_from_connector(connector); 537 struct vidi_context *ctx = ctx_from_connector(connector);
@@ -548,7 +541,6 @@ static struct drm_encoder *vidi_best_encoder(struct drm_connector *connector)
548 541
549static struct drm_connector_helper_funcs vidi_connector_helper_funcs = { 542static struct drm_connector_helper_funcs vidi_connector_helper_funcs = {
550 .get_modes = vidi_get_modes, 543 .get_modes = vidi_get_modes,
551 .mode_valid = vidi_mode_valid,
552 .best_encoder = vidi_best_encoder, 544 .best_encoder = vidi_best_encoder,
553}; 545};
554 546
@@ -586,13 +578,38 @@ static struct exynos_drm_display vidi_display = {
586 .ops = &vidi_display_ops, 578 .ops = &vidi_display_ops,
587}; 579};
588 580
581static int vidi_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
582{
583 struct exynos_drm_manager *mgr = get_vidi_mgr(dev);
584 struct vidi_context *ctx = mgr->ctx;
585 struct drm_crtc *crtc = ctx->crtc;
586 int ret;
587
588 vidi_mgr_initialize(mgr, drm_dev);
589
590 ret = exynos_drm_crtc_create(&vidi_manager);
591 if (ret) {
592 DRM_ERROR("failed to create crtc.\n");
593 return ret;
594 }
595
596 ret = exynos_drm_create_enc_conn(drm_dev, &vidi_display);
597 if (ret) {
598 crtc->funcs->destroy(crtc);
599 DRM_ERROR("failed to create encoder and connector.\n");
600 return ret;
601 }
602
603 return 0;
604}
605
589static int vidi_probe(struct platform_device *pdev) 606static int vidi_probe(struct platform_device *pdev)
590{ 607{
591 struct device *dev = &pdev->dev; 608 struct exynos_drm_subdrv *subdrv;
592 struct vidi_context *ctx; 609 struct vidi_context *ctx;
593 int ret; 610 int ret;
594 611
595 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); 612 ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
596 if (!ctx) 613 if (!ctx)
597 return -ENOMEM; 614 return -ENOMEM;
598 615
@@ -607,28 +624,43 @@ static int vidi_probe(struct platform_device *pdev)
607 624
608 platform_set_drvdata(pdev, &vidi_manager); 625 platform_set_drvdata(pdev, &vidi_manager);
609 626
610 ret = device_create_file(dev, &dev_attr_connection); 627 subdrv = &ctx->subdrv;
611 if (ret < 0) 628 subdrv->dev = &pdev->dev;
612 DRM_INFO("failed to create connection sysfs.\n"); 629 subdrv->probe = vidi_subdrv_probe;
630
631 ret = exynos_drm_subdrv_register(subdrv);
632 if (ret < 0) {
633 dev_err(&pdev->dev, "failed to register drm vidi device\n");
634 return ret;
635 }
613 636
614 exynos_drm_manager_register(&vidi_manager); 637 ret = device_create_file(&pdev->dev, &dev_attr_connection);
615 exynos_drm_display_register(&vidi_display); 638 if (ret < 0) {
639 exynos_drm_subdrv_unregister(subdrv);
640 DRM_INFO("failed to create connection sysfs.\n");
641 }
616 642
617 return 0; 643 return 0;
618} 644}
619 645
620static int vidi_remove(struct platform_device *pdev) 646static int vidi_remove(struct platform_device *pdev)
621{ 647{
622 struct vidi_context *ctx = platform_get_drvdata(pdev); 648 struct exynos_drm_manager *mgr = platform_get_drvdata(pdev);
623 649 struct vidi_context *ctx = mgr->ctx;
624 exynos_drm_display_unregister(&vidi_display); 650 struct drm_encoder *encoder = ctx->encoder;
625 exynos_drm_manager_unregister(&vidi_manager); 651 struct drm_crtc *crtc = mgr->crtc;
626 652
627 if (ctx->raw_edid != (struct edid *)fake_edid_info) { 653 if (ctx->raw_edid != (struct edid *)fake_edid_info) {
628 kfree(ctx->raw_edid); 654 kfree(ctx->raw_edid);
629 ctx->raw_edid = NULL; 655 ctx->raw_edid = NULL;
656
657 return -EINVAL;
630 } 658 }
631 659
660 crtc->funcs->destroy(crtc);
661 encoder->funcs->destroy(encoder);
662 drm_connector_cleanup(&ctx->connector);
663
632 return 0; 664 return 0;
633} 665}
634 666
@@ -640,3 +672,31 @@ struct platform_driver vidi_driver = {
640 .owner = THIS_MODULE, 672 .owner = THIS_MODULE,
641 }, 673 },
642}; 674};
675
676int exynos_drm_probe_vidi(void)
677{
678 struct platform_device *pdev;
679 int ret;
680
681 pdev = platform_device_register_simple("exynos-drm-vidi", -1, NULL, 0);
682 if (IS_ERR(pdev))
683 return PTR_ERR(pdev);
684
685 ret = platform_driver_register(&vidi_driver);
686 if (ret) {
687 platform_device_unregister(pdev);
688 return ret;
689 }
690
691 return ret;
692}
693
694void exynos_drm_remove_vidi(void)
695{
696 struct vidi_context *ctx = vidi_manager.ctx;
697 struct exynos_drm_subdrv *subdrv = &ctx->subdrv;
698 struct platform_device *pdev = to_platform_device(subdrv->dev);
699
700 platform_driver_unregister(&vidi_driver);
701 platform_device_unregister(pdev);
702}
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 9a6d652a3ef2..c104d0c9b385 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -33,13 +33,17 @@
33#include <linux/regulator/consumer.h> 33#include <linux/regulator/consumer.h>
34#include <linux/io.h> 34#include <linux/io.h>
35#include <linux/of.h> 35#include <linux/of.h>
36#include <linux/i2c.h> 36#include <linux/of_address.h>
37#include <linux/of_gpio.h> 37#include <linux/of_gpio.h>
38#include <linux/hdmi.h> 38#include <linux/hdmi.h>
39#include <linux/component.h>
40#include <linux/mfd/syscon.h>
41#include <linux/regmap.h>
39 42
40#include <drm/exynos_drm.h> 43#include <drm/exynos_drm.h>
41 44
42#include "exynos_drm_drv.h" 45#include "exynos_drm_drv.h"
46#include "exynos_drm_crtc.h"
43#include "exynos_mixer.h" 47#include "exynos_mixer.h"
44 48
45#include <linux/gpio.h> 49#include <linux/gpio.h>
@@ -48,6 +52,8 @@
48#define get_hdmi_display(dev) platform_get_drvdata(to_platform_device(dev)) 52#define get_hdmi_display(dev) platform_get_drvdata(to_platform_device(dev))
49#define ctx_from_connector(c) container_of(c, struct hdmi_context, connector) 53#define ctx_from_connector(c) container_of(c, struct hdmi_context, connector)
50 54
55#define HOTPLUG_DEBOUNCE_MS 1100
56
51/* AVI header and aspect ratio */ 57/* AVI header and aspect ratio */
52#define HDMI_AVI_VERSION 0x02 58#define HDMI_AVI_VERSION 0x02
53#define HDMI_AVI_LENGTH 0x0D 59#define HDMI_AVI_LENGTH 0x0D
@@ -66,6 +72,8 @@ enum hdmi_type {
66 72
67struct hdmi_driver_data { 73struct hdmi_driver_data {
68 unsigned int type; 74 unsigned int type;
75 const struct hdmiphy_config *phy_confs;
76 unsigned int phy_conf_count;
69 unsigned int is_apb_phy:1; 77 unsigned int is_apb_phy:1;
70}; 78};
71 79
@@ -74,7 +82,6 @@ struct hdmi_resources {
74 struct clk *sclk_hdmi; 82 struct clk *sclk_hdmi;
75 struct clk *sclk_pixel; 83 struct clk *sclk_pixel;
76 struct clk *sclk_hdmiphy; 84 struct clk *sclk_hdmiphy;
77 struct clk *hdmiphy;
78 struct clk *mout_hdmi; 85 struct clk *mout_hdmi;
79 struct regulator_bulk_data *regul_bulk; 86 struct regulator_bulk_data *regul_bulk;
80 int regul_count; 87 int regul_count;
@@ -185,17 +192,23 @@ struct hdmi_context {
185 192
186 void __iomem *regs; 193 void __iomem *regs;
187 int irq; 194 int irq;
195 struct delayed_work hotplug_work;
188 196
189 struct i2c_adapter *ddc_adpt; 197 struct i2c_adapter *ddc_adpt;
190 struct i2c_client *hdmiphy_port; 198 struct i2c_client *hdmiphy_port;
191 199
192 /* current hdmiphy conf regs */ 200 /* current hdmiphy conf regs */
201 struct drm_display_mode current_mode;
193 struct hdmi_conf_regs mode_conf; 202 struct hdmi_conf_regs mode_conf;
194 203
195 struct hdmi_resources res; 204 struct hdmi_resources res;
196 205
197 int hpd_gpio; 206 int hpd_gpio;
207 void __iomem *regs_hdmiphy;
208 const struct hdmiphy_config *phy_confs;
209 unsigned int phy_conf_count;
198 210
211 struct regmap *pmureg;
199 enum hdmi_type type; 212 enum hdmi_type type;
200}; 213};
201 214
@@ -204,14 +217,6 @@ struct hdmiphy_config {
204 u8 conf[32]; 217 u8 conf[32];
205}; 218};
206 219
207struct hdmi_driver_data exynos4212_hdmi_driver_data = {
208 .type = HDMI_TYPE14,
209};
210
211struct hdmi_driver_data exynos5_hdmi_driver_data = {
212 .type = HDMI_TYPE14,
213};
214
215/* list of phy config settings */ 220/* list of phy config settings */
216static const struct hdmiphy_config hdmiphy_v13_configs[] = { 221static const struct hdmiphy_config hdmiphy_v13_configs[] = {
217 { 222 {
@@ -319,18 +324,18 @@ static const struct hdmiphy_config hdmiphy_v14_configs[] = {
319 { 324 {
320 .pixel_clock = 71000000, 325 .pixel_clock = 71000000,
321 .conf = { 326 .conf = {
322 0x01, 0x91, 0x1e, 0x15, 0x40, 0x3c, 0xce, 0x08, 327 0x01, 0xd1, 0x3b, 0x35, 0x40, 0x0c, 0x04, 0x08,
323 0x04, 0x20, 0xb2, 0xd8, 0x45, 0xa0, 0xac, 0x80, 328 0x85, 0xa0, 0x63, 0xd9, 0x45, 0xa0, 0xac, 0x80,
324 0x06, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86, 329 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
325 0x54, 0xad, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80, 330 0x54, 0xad, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80,
326 }, 331 },
327 }, 332 },
328 { 333 {
329 .pixel_clock = 73250000, 334 .pixel_clock = 73250000,
330 .conf = { 335 .conf = {
331 0x01, 0xd1, 0x1f, 0x15, 0x40, 0x18, 0xe9, 0x08, 336 0x01, 0xd1, 0x3d, 0x35, 0x40, 0x18, 0x02, 0x08,
332 0x02, 0xa0, 0xb7, 0xd8, 0x45, 0xa0, 0xac, 0x80, 337 0x83, 0xa0, 0x6e, 0xd9, 0x45, 0xa0, 0xac, 0x80,
333 0x06, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86, 338 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
334 0x54, 0xa8, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80, 339 0x54, 0xa8, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80,
335 }, 340 },
336 }, 341 },
@@ -362,15 +367,6 @@ static const struct hdmiphy_config hdmiphy_v14_configs[] = {
362 }, 367 },
363 }, 368 },
364 { 369 {
365 .pixel_clock = 88750000,
366 .conf = {
367 0x01, 0x91, 0x25, 0x17, 0x40, 0x30, 0xfe, 0x08,
368 0x06, 0x20, 0xde, 0xd8, 0x45, 0xa0, 0xac, 0x80,
369 0x06, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
370 0x54, 0x8a, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80,
371 },
372 },
373 {
374 .pixel_clock = 106500000, 370 .pixel_clock = 106500000,
375 .conf = { 371 .conf = {
376 0x01, 0xd1, 0x2c, 0x12, 0x40, 0x0c, 0x09, 0x08, 372 0x01, 0xd1, 0x2c, 0x12, 0x40, 0x0c, 0x09, 0x08,
@@ -391,18 +387,18 @@ static const struct hdmiphy_config hdmiphy_v14_configs[] = {
391 { 387 {
392 .pixel_clock = 115500000, 388 .pixel_clock = 115500000,
393 .conf = { 389 .conf = {
394 0x01, 0xd1, 0x30, 0x1a, 0x40, 0x40, 0x10, 0x04, 390 0x01, 0xd1, 0x30, 0x12, 0x40, 0x40, 0x10, 0x08,
395 0x04, 0xa0, 0x21, 0xd9, 0x45, 0xa0, 0xac, 0x80, 391 0x80, 0x80, 0x21, 0xd9, 0x45, 0xa0, 0xac, 0x80,
396 0x06, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86, 392 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
397 0x54, 0xaa, 0x25, 0x03, 0x00, 0x00, 0x01, 0x80, 393 0x54, 0xaa, 0x25, 0x03, 0x00, 0x00, 0x01, 0x80,
398 }, 394 },
399 }, 395 },
400 { 396 {
401 .pixel_clock = 119000000, 397 .pixel_clock = 119000000,
402 .conf = { 398 .conf = {
403 0x01, 0x91, 0x32, 0x14, 0x40, 0x60, 0xd8, 0x08, 399 0x01, 0xd1, 0x32, 0x1a, 0x40, 0x30, 0xd8, 0x08,
404 0x06, 0x20, 0x2a, 0xd9, 0x45, 0xa0, 0xac, 0x80, 400 0x04, 0xa0, 0x2a, 0xd9, 0x45, 0xa0, 0xac, 0x80,
405 0x06, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86, 401 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
406 0x54, 0x9d, 0x25, 0x03, 0x00, 0x00, 0x01, 0x80, 402 0x54, 0x9d, 0x25, 0x03, 0x00, 0x00, 0x01, 0x80,
407 }, 403 },
408 }, 404 },
@@ -426,6 +422,183 @@ static const struct hdmiphy_config hdmiphy_v14_configs[] = {
426 }, 422 },
427}; 423};
428 424
425static const struct hdmiphy_config hdmiphy_5420_configs[] = {
426 {
427 .pixel_clock = 25200000,
428 .conf = {
429 0x01, 0x52, 0x3F, 0x55, 0x40, 0x01, 0x00, 0xC8,
430 0x82, 0xC8, 0xBD, 0xD8, 0x45, 0xA0, 0xAC, 0x80,
431 0x06, 0x80, 0x01, 0x84, 0x05, 0x02, 0x24, 0x66,
432 0x54, 0xF4, 0x24, 0x00, 0x00, 0x00, 0x01, 0x80,
433 },
434 },
435 {
436 .pixel_clock = 27000000,
437 .conf = {
438 0x01, 0xD1, 0x22, 0x51, 0x40, 0x08, 0xFC, 0xE0,
439 0x98, 0xE8, 0xCB, 0xD8, 0x45, 0xA0, 0xAC, 0x80,
440 0x06, 0x80, 0x09, 0x84, 0x05, 0x02, 0x24, 0x66,
441 0x54, 0xE4, 0x24, 0x00, 0x00, 0x00, 0x01, 0x80,
442 },
443 },
444 {
445 .pixel_clock = 27027000,
446 .conf = {
447 0x01, 0xD1, 0x2D, 0x72, 0x40, 0x64, 0x12, 0xC8,
448 0x43, 0xE8, 0x0E, 0xD9, 0x45, 0xA0, 0xAC, 0x80,
449 0x26, 0x80, 0x09, 0x84, 0x05, 0x02, 0x24, 0x66,
450 0x54, 0xE3, 0x24, 0x00, 0x00, 0x00, 0x01, 0x80,
451 },
452 },
453 {
454 .pixel_clock = 36000000,
455 .conf = {
456 0x01, 0x51, 0x2D, 0x55, 0x40, 0x40, 0x00, 0xC8,
457 0x02, 0xC8, 0x0E, 0xD9, 0x45, 0xA0, 0xAC, 0x80,
458 0x08, 0x80, 0x09, 0x84, 0x05, 0x02, 0x24, 0x66,
459 0x54, 0xAB, 0x24, 0x00, 0x00, 0x00, 0x01, 0x80,
460 },
461 },
462 {
463 .pixel_clock = 40000000,
464 .conf = {
465 0x01, 0xD1, 0x21, 0x31, 0x40, 0x3C, 0x28, 0xC8,
466 0x87, 0xE8, 0xC8, 0xD8, 0x45, 0xA0, 0xAC, 0x80,
467 0x08, 0x80, 0x09, 0x84, 0x05, 0x02, 0x24, 0x66,
468 0x54, 0x9A, 0x24, 0x00, 0x00, 0x00, 0x01, 0x80,
469 },
470 },
471 {
472 .pixel_clock = 65000000,
473 .conf = {
474 0x01, 0xD1, 0x36, 0x34, 0x40, 0x0C, 0x04, 0xC8,
475 0x82, 0xE8, 0x45, 0xD9, 0x45, 0xA0, 0xAC, 0x80,
476 0x08, 0x80, 0x09, 0x84, 0x05, 0x02, 0x24, 0x66,
477 0x54, 0xBD, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80,
478 },
479 },
480 {
481 .pixel_clock = 71000000,
482 .conf = {
483 0x01, 0xD1, 0x3B, 0x35, 0x40, 0x0C, 0x04, 0xC8,
484 0x85, 0xE8, 0x63, 0xD9, 0x45, 0xA0, 0xAC, 0x80,
485 0x08, 0x80, 0x09, 0x84, 0x05, 0x02, 0x24, 0x66,
486 0x54, 0x57, 0x24, 0x00, 0x00, 0x00, 0x01, 0x80,
487 },
488 },
489 {
490 .pixel_clock = 73250000,
491 .conf = {
492 0x01, 0xD1, 0x1F, 0x10, 0x40, 0x78, 0x8D, 0xC8,
493 0x81, 0xE8, 0xB7, 0xD8, 0x45, 0xA0, 0xAC, 0x80,
494 0x56, 0x80, 0x09, 0x84, 0x05, 0x02, 0x24, 0x66,
495 0x54, 0xA8, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80,
496 },
497 },
498 {
499 .pixel_clock = 74176000,
500 .conf = {
501 0x01, 0xD1, 0x1F, 0x10, 0x40, 0x5B, 0xEF, 0xC8,
502 0x81, 0xE8, 0xB9, 0xD8, 0x45, 0xA0, 0xAC, 0x80,
503 0x56, 0x80, 0x09, 0x84, 0x05, 0x02, 0x24, 0x66,
504 0x54, 0xA6, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80,
505 },
506 },
507 {
508 .pixel_clock = 74250000,
509 .conf = {
510 0x01, 0xD1, 0x1F, 0x10, 0x40, 0x40, 0xF8, 0x08,
511 0x81, 0xE8, 0xBA, 0xD8, 0x45, 0xA0, 0xAC, 0x80,
512 0x26, 0x80, 0x09, 0x84, 0x05, 0x22, 0x24, 0x66,
513 0x54, 0xA5, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80,
514 },
515 },
516 {
517 .pixel_clock = 83500000,
518 .conf = {
519 0x01, 0xD1, 0x23, 0x11, 0x40, 0x0C, 0xFB, 0xC8,
520 0x85, 0xE8, 0xD1, 0xD8, 0x45, 0xA0, 0xAC, 0x80,
521 0x08, 0x80, 0x09, 0x84, 0x05, 0x02, 0x24, 0x66,
522 0x54, 0x4A, 0x24, 0x00, 0x00, 0x00, 0x01, 0x80,
523 },
524 },
525 {
526 .pixel_clock = 88750000,
527 .conf = {
528 0x01, 0xD1, 0x25, 0x11, 0x40, 0x18, 0xFF, 0xC8,
529 0x83, 0xE8, 0xDE, 0xD8, 0x45, 0xA0, 0xAC, 0x80,
530 0x08, 0x80, 0x09, 0x84, 0x05, 0x02, 0x24, 0x66,
531 0x54, 0x45, 0x24, 0x00, 0x00, 0x00, 0x01, 0x80,
532 },
533 },
534 {
535 .pixel_clock = 106500000,
536 .conf = {
537 0x01, 0xD1, 0x2C, 0x12, 0x40, 0x0C, 0x09, 0xC8,
538 0x84, 0xE8, 0x0A, 0xD9, 0x45, 0xA0, 0xAC, 0x80,
539 0x08, 0x80, 0x09, 0x84, 0x05, 0x02, 0x24, 0x66,
540 0x54, 0x73, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80,
541 },
542 },
543 {
544 .pixel_clock = 108000000,
545 .conf = {
546 0x01, 0x51, 0x2D, 0x15, 0x40, 0x01, 0x00, 0xC8,
547 0x82, 0xC8, 0x0E, 0xD9, 0x45, 0xA0, 0xAC, 0x80,
548 0x08, 0x80, 0x09, 0x84, 0x05, 0x02, 0x24, 0x66,
549 0x54, 0xC7, 0x25, 0x03, 0x00, 0x00, 0x01, 0x80,
550 },
551 },
552 {
553 .pixel_clock = 115500000,
554 .conf = {
555 0x01, 0xD1, 0x30, 0x14, 0x40, 0x0C, 0x03, 0xC8,
556 0x88, 0xE8, 0x21, 0xD9, 0x45, 0xA0, 0xAC, 0x80,
557 0x08, 0x80, 0x09, 0x84, 0x05, 0x02, 0x24, 0x66,
558 0x54, 0x6A, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80,
559 },
560 },
561 {
562 .pixel_clock = 146250000,
563 .conf = {
564 0x01, 0xD1, 0x3D, 0x15, 0x40, 0x18, 0xFD, 0xC8,
565 0x83, 0xE8, 0x6E, 0xD9, 0x45, 0xA0, 0xAC, 0x80,
566 0x08, 0x80, 0x09, 0x84, 0x05, 0x02, 0x24, 0x66,
567 0x54, 0x54, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80,
568 },
569 },
570 {
571 .pixel_clock = 148500000,
572 .conf = {
573 0x01, 0xD1, 0x1F, 0x00, 0x40, 0x40, 0xF8, 0x08,
574 0x81, 0xE8, 0xBA, 0xD8, 0x45, 0xA0, 0xAC, 0x80,
575 0x26, 0x80, 0x09, 0x84, 0x05, 0x22, 0x24, 0x66,
576 0x54, 0x4B, 0x25, 0x03, 0x00, 0x80, 0x01, 0x80,
577 },
578 },
579};
580
581static struct hdmi_driver_data exynos5420_hdmi_driver_data = {
582 .type = HDMI_TYPE14,
583 .phy_confs = hdmiphy_5420_configs,
584 .phy_conf_count = ARRAY_SIZE(hdmiphy_5420_configs),
585 .is_apb_phy = 1,
586};
587
588static struct hdmi_driver_data exynos4212_hdmi_driver_data = {
589 .type = HDMI_TYPE14,
590 .phy_confs = hdmiphy_v14_configs,
591 .phy_conf_count = ARRAY_SIZE(hdmiphy_v14_configs),
592 .is_apb_phy = 0,
593};
594
595static struct hdmi_driver_data exynos5_hdmi_driver_data = {
596 .type = HDMI_TYPE14,
597 .phy_confs = hdmiphy_v13_configs,
598 .phy_conf_count = ARRAY_SIZE(hdmiphy_v13_configs),
599 .is_apb_phy = 0,
600};
601
429static inline u32 hdmi_reg_read(struct hdmi_context *hdata, u32 reg_id) 602static inline u32 hdmi_reg_read(struct hdmi_context *hdata, u32 reg_id)
430{ 603{
431 return readl(hdata->regs + reg_id); 604 return readl(hdata->regs + reg_id);
@@ -445,6 +618,48 @@ static inline void hdmi_reg_writemask(struct hdmi_context *hdata,
445 writel(value, hdata->regs + reg_id); 618 writel(value, hdata->regs + reg_id);
446} 619}
447 620
621static int hdmiphy_reg_writeb(struct hdmi_context *hdata,
622 u32 reg_offset, u8 value)
623{
624 if (hdata->hdmiphy_port) {
625 u8 buffer[2];
626 int ret;
627
628 buffer[0] = reg_offset;
629 buffer[1] = value;
630
631 ret = i2c_master_send(hdata->hdmiphy_port, buffer, 2);
632 if (ret == 2)
633 return 0;
634 return ret;
635 } else {
636 writeb(value, hdata->regs_hdmiphy + (reg_offset<<2));
637 return 0;
638 }
639}
640
641static int hdmiphy_reg_write_buf(struct hdmi_context *hdata,
642 u32 reg_offset, const u8 *buf, u32 len)
643{
644 if ((reg_offset + len) > 32)
645 return -EINVAL;
646
647 if (hdata->hdmiphy_port) {
648 int ret;
649
650 ret = i2c_master_send(hdata->hdmiphy_port, buf, len);
651 if (ret == len)
652 return 0;
653 return ret;
654 } else {
655 int i;
656 for (i = 0; i < len; i++)
657 writeb(buf[i], hdata->regs_hdmiphy +
658 ((reg_offset + i)<<2));
659 return 0;
660 }
661}
662
448static void hdmi_v13_regs_dump(struct hdmi_context *hdata, char *prefix) 663static void hdmi_v13_regs_dump(struct hdmi_context *hdata, char *prefix)
449{ 664{
450#define DUMPREG(reg_id) \ 665#define DUMPREG(reg_id) \
@@ -809,6 +1024,8 @@ static enum drm_connector_status hdmi_detect(struct drm_connector *connector,
809{ 1024{
810 struct hdmi_context *hdata = ctx_from_connector(connector); 1025 struct hdmi_context *hdata = ctx_from_connector(connector);
811 1026
1027 hdata->hpd = gpio_get_value(hdata->hpd_gpio);
1028
812 return hdata->hpd ? connector_status_connected : 1029 return hdata->hpd ? connector_status_connected :
813 connector_status_disconnected; 1030 connector_status_disconnected;
814} 1031}
@@ -848,20 +1065,10 @@ static int hdmi_get_modes(struct drm_connector *connector)
848 1065
849static int hdmi_find_phy_conf(struct hdmi_context *hdata, u32 pixel_clock) 1066static int hdmi_find_phy_conf(struct hdmi_context *hdata, u32 pixel_clock)
850{ 1067{
851 const struct hdmiphy_config *confs; 1068 int i;
852 int count, i;
853
854 if (hdata->type == HDMI_TYPE13) {
855 confs = hdmiphy_v13_configs;
856 count = ARRAY_SIZE(hdmiphy_v13_configs);
857 } else if (hdata->type == HDMI_TYPE14) {
858 confs = hdmiphy_v14_configs;
859 count = ARRAY_SIZE(hdmiphy_v14_configs);
860 } else
861 return -EINVAL;
862 1069
863 for (i = 0; i < count; i++) 1070 for (i = 0; i < hdata->phy_conf_count; i++)
864 if (confs[i].pixel_clock == pixel_clock) 1071 if (hdata->phy_confs[i].pixel_clock == pixel_clock)
865 return i; 1072 return i;
866 1073
867 DRM_DEBUG_KMS("Could not find phy config for %d\n", pixel_clock); 1074 DRM_DEBUG_KMS("Could not find phy config for %d\n", pixel_clock);
@@ -928,16 +1135,6 @@ static int hdmi_create_connector(struct exynos_drm_display *display,
928 return 0; 1135 return 0;
929} 1136}
930 1137
931static int hdmi_initialize(struct exynos_drm_display *display,
932 struct drm_device *drm_dev)
933{
934 struct hdmi_context *hdata = display->ctx;
935
936 hdata->drm_dev = drm_dev;
937
938 return 0;
939}
940
941static void hdmi_mode_fixup(struct exynos_drm_display *display, 1138static void hdmi_mode_fixup(struct exynos_drm_display *display,
942 struct drm_connector *connector, 1139 struct drm_connector *connector,
943 const struct drm_display_mode *mode, 1140 const struct drm_display_mode *mode,
@@ -1136,20 +1333,15 @@ static void hdmi_audio_control(struct hdmi_context *hdata, bool onoff)
1136 HDMI_ASP_EN : HDMI_ASP_DIS, HDMI_ASP_MASK); 1333 HDMI_ASP_EN : HDMI_ASP_DIS, HDMI_ASP_MASK);
1137} 1334}
1138 1335
1139static void hdmi_conf_reset(struct hdmi_context *hdata) 1336static void hdmi_start(struct hdmi_context *hdata, bool start)
1140{ 1337{
1141 u32 reg; 1338 u32 val = start ? HDMI_TG_EN : 0;
1142 1339
1143 if (hdata->type == HDMI_TYPE13) 1340 if (hdata->current_mode.flags & DRM_MODE_FLAG_INTERLACE)
1144 reg = HDMI_V13_CORE_RSTOUT; 1341 val |= HDMI_FIELD_EN;
1145 else
1146 reg = HDMI_CORE_RSTOUT;
1147 1342
1148 /* resetting HDMI core */ 1343 hdmi_reg_writemask(hdata, HDMI_CON_0, val, HDMI_EN);
1149 hdmi_reg_writemask(hdata, reg, 0, HDMI_CORE_SW_RSTOUT); 1344 hdmi_reg_writemask(hdata, HDMI_TG_CMD, val, HDMI_TG_EN | HDMI_FIELD_EN);
1150 usleep_range(10000, 12000);
1151 hdmi_reg_writemask(hdata, reg, ~0, HDMI_CORE_SW_RSTOUT);
1152 usleep_range(10000, 12000);
1153} 1345}
1154 1346
1155static void hdmi_conf_init(struct hdmi_context *hdata) 1347static void hdmi_conf_init(struct hdmi_context *hdata)
@@ -1163,6 +1355,8 @@ static void hdmi_conf_init(struct hdmi_context *hdata)
1163 /* choose HDMI mode */ 1355 /* choose HDMI mode */
1164 hdmi_reg_writemask(hdata, HDMI_MODE_SEL, 1356 hdmi_reg_writemask(hdata, HDMI_MODE_SEL,
1165 HDMI_MODE_HDMI_EN, HDMI_MODE_MASK); 1357 HDMI_MODE_HDMI_EN, HDMI_MODE_MASK);
1358 /* Apply Video preable and Guard band in HDMI mode only */
1359 hdmi_reg_writeb(hdata, HDMI_CON_2, 0);
1166 /* disable bluescreen */ 1360 /* disable bluescreen */
1167 hdmi_reg_writemask(hdata, HDMI_CON_0, 0, HDMI_BLUE_SCR_EN); 1361 hdmi_reg_writemask(hdata, HDMI_CON_0, 0, HDMI_BLUE_SCR_EN);
1168 1362
@@ -1286,12 +1480,7 @@ static void hdmi_v13_mode_apply(struct hdmi_context *hdata)
1286 clk_prepare_enable(hdata->res.sclk_hdmi); 1480 clk_prepare_enable(hdata->res.sclk_hdmi);
1287 1481
1288 /* enable HDMI and timing generator */ 1482 /* enable HDMI and timing generator */
1289 hdmi_reg_writemask(hdata, HDMI_CON_0, ~0, HDMI_EN); 1483 hdmi_start(hdata, true);
1290 if (core->int_pro_mode[0])
1291 hdmi_reg_writemask(hdata, HDMI_TG_CMD, ~0, HDMI_TG_EN |
1292 HDMI_FIELD_EN);
1293 else
1294 hdmi_reg_writemask(hdata, HDMI_TG_CMD, ~0, HDMI_TG_EN);
1295} 1484}
1296 1485
1297static void hdmi_v14_mode_apply(struct hdmi_context *hdata) 1486static void hdmi_v14_mode_apply(struct hdmi_context *hdata)
@@ -1453,12 +1642,7 @@ static void hdmi_v14_mode_apply(struct hdmi_context *hdata)
1453 clk_prepare_enable(hdata->res.sclk_hdmi); 1642 clk_prepare_enable(hdata->res.sclk_hdmi);
1454 1643
1455 /* enable HDMI and timing generator */ 1644 /* enable HDMI and timing generator */
1456 hdmi_reg_writemask(hdata, HDMI_CON_0, ~0, HDMI_EN); 1645 hdmi_start(hdata, true);
1457 if (core->int_pro_mode[0])
1458 hdmi_reg_writemask(hdata, HDMI_TG_CMD, ~0, HDMI_TG_EN |
1459 HDMI_FIELD_EN);
1460 else
1461 hdmi_reg_writemask(hdata, HDMI_TG_CMD, ~0, HDMI_TG_EN);
1462} 1646}
1463 1647
1464static void hdmi_mode_apply(struct hdmi_context *hdata) 1648static void hdmi_mode_apply(struct hdmi_context *hdata)
@@ -1499,32 +1683,51 @@ static void hdmiphy_conf_reset(struct hdmi_context *hdata)
1499 1683
1500static void hdmiphy_poweron(struct hdmi_context *hdata) 1684static void hdmiphy_poweron(struct hdmi_context *hdata)
1501{ 1685{
1502 if (hdata->type == HDMI_TYPE14) 1686 if (hdata->type != HDMI_TYPE14)
1503 hdmi_reg_writemask(hdata, HDMI_PHY_CON_0, 0, 1687 return;
1504 HDMI_PHY_POWER_OFF_EN); 1688
1689 DRM_DEBUG_KMS("\n");
1690
1691 /* For PHY Mode Setting */
1692 hdmiphy_reg_writeb(hdata, HDMIPHY_MODE_SET_DONE,
1693 HDMI_PHY_ENABLE_MODE_SET);
1694 /* Phy Power On */
1695 hdmiphy_reg_writeb(hdata, HDMIPHY_POWER,
1696 HDMI_PHY_POWER_ON);
1697 /* For PHY Mode Setting */
1698 hdmiphy_reg_writeb(hdata, HDMIPHY_MODE_SET_DONE,
1699 HDMI_PHY_DISABLE_MODE_SET);
1700 /* PHY SW Reset */
1701 hdmiphy_conf_reset(hdata);
1505} 1702}
1506 1703
1507static void hdmiphy_poweroff(struct hdmi_context *hdata) 1704static void hdmiphy_poweroff(struct hdmi_context *hdata)
1508{ 1705{
1509 if (hdata->type == HDMI_TYPE14) 1706 if (hdata->type != HDMI_TYPE14)
1510 hdmi_reg_writemask(hdata, HDMI_PHY_CON_0, ~0, 1707 return;
1511 HDMI_PHY_POWER_OFF_EN); 1708
1709 DRM_DEBUG_KMS("\n");
1710
1711 /* PHY SW Reset */
1712 hdmiphy_conf_reset(hdata);
1713 /* For PHY Mode Setting */
1714 hdmiphy_reg_writeb(hdata, HDMIPHY_MODE_SET_DONE,
1715 HDMI_PHY_ENABLE_MODE_SET);
1716
1717 /* PHY Power Off */
1718 hdmiphy_reg_writeb(hdata, HDMIPHY_POWER,
1719 HDMI_PHY_POWER_OFF);
1720
1721 /* For PHY Mode Setting */
1722 hdmiphy_reg_writeb(hdata, HDMIPHY_MODE_SET_DONE,
1723 HDMI_PHY_DISABLE_MODE_SET);
1512} 1724}
1513 1725
1514static void hdmiphy_conf_apply(struct hdmi_context *hdata) 1726static void hdmiphy_conf_apply(struct hdmi_context *hdata)
1515{ 1727{
1516 const u8 *hdmiphy_data;
1517 u8 buffer[32];
1518 u8 operation[2];
1519 u8 read_buffer[32] = {0, };
1520 int ret; 1728 int ret;
1521 int i; 1729 int i;
1522 1730
1523 if (!hdata->hdmiphy_port) {
1524 DRM_ERROR("hdmiphy is not attached\n");
1525 return;
1526 }
1527
1528 /* pixel clock */ 1731 /* pixel clock */
1529 i = hdmi_find_phy_conf(hdata, hdata->mode_conf.pixel_clock); 1732 i = hdmi_find_phy_conf(hdata, hdata->mode_conf.pixel_clock);
1530 if (i < 0) { 1733 if (i < 0) {
@@ -1532,39 +1735,21 @@ static void hdmiphy_conf_apply(struct hdmi_context *hdata)
1532 return; 1735 return;
1533 } 1736 }
1534 1737
1535 if (hdata->type == HDMI_TYPE13) 1738 ret = hdmiphy_reg_write_buf(hdata, 0, hdata->phy_confs[i].conf, 32);
1536 hdmiphy_data = hdmiphy_v13_configs[i].conf; 1739 if (ret) {
1537 else 1740 DRM_ERROR("failed to configure hdmiphy\n");
1538 hdmiphy_data = hdmiphy_v14_configs[i].conf;
1539
1540 memcpy(buffer, hdmiphy_data, 32);
1541 ret = i2c_master_send(hdata->hdmiphy_port, buffer, 32);
1542 if (ret != 32) {
1543 DRM_ERROR("failed to configure HDMIPHY via I2C\n");
1544 return; 1741 return;
1545 } 1742 }
1546 1743
1547 usleep_range(10000, 12000); 1744 usleep_range(10000, 12000);
1548 1745
1549 /* operation mode */ 1746 ret = hdmiphy_reg_writeb(hdata, HDMIPHY_MODE_SET_DONE,
1550 operation[0] = 0x1f; 1747 HDMI_PHY_DISABLE_MODE_SET);
1551 operation[1] = 0x80; 1748 if (ret) {
1552
1553 ret = i2c_master_send(hdata->hdmiphy_port, operation, 2);
1554 if (ret != 2) {
1555 DRM_ERROR("failed to enable hdmiphy\n"); 1749 DRM_ERROR("failed to enable hdmiphy\n");
1556 return; 1750 return;
1557 } 1751 }
1558 1752
1559 ret = i2c_master_recv(hdata->hdmiphy_port, read_buffer, 32);
1560 if (ret < 0) {
1561 DRM_ERROR("failed to read hdmiphy config\n");
1562 return;
1563 }
1564
1565 for (i = 0; i < ret; i++)
1566 DRM_DEBUG_KMS("hdmiphy[0x%02x] write[0x%02x] - "
1567 "recv [0x%02x]\n", i, buffer[i], read_buffer[i]);
1568} 1753}
1569 1754
1570static void hdmi_conf_apply(struct hdmi_context *hdata) 1755static void hdmi_conf_apply(struct hdmi_context *hdata)
@@ -1573,7 +1758,7 @@ static void hdmi_conf_apply(struct hdmi_context *hdata)
1573 hdmiphy_conf_apply(hdata); 1758 hdmiphy_conf_apply(hdata);
1574 1759
1575 mutex_lock(&hdata->hdmi_mutex); 1760 mutex_lock(&hdata->hdmi_mutex);
1576 hdmi_conf_reset(hdata); 1761 hdmi_start(hdata, false);
1577 hdmi_conf_init(hdata); 1762 hdmi_conf_init(hdata);
1578 mutex_unlock(&hdata->hdmi_mutex); 1763 mutex_unlock(&hdata->hdmi_mutex);
1579 1764
@@ -1814,6 +1999,9 @@ static void hdmi_mode_set(struct exynos_drm_display *display,
1814 m->vrefresh, (m->flags & DRM_MODE_FLAG_INTERLACE) ? 1999 m->vrefresh, (m->flags & DRM_MODE_FLAG_INTERLACE) ?
1815 "INTERLACED" : "PROGERESSIVE"); 2000 "INTERLACED" : "PROGERESSIVE");
1816 2001
2002 /* preserve mode information for later use. */
2003 drm_mode_copy(&hdata->current_mode, mode);
2004
1817 if (hdata->type == HDMI_TYPE13) 2005 if (hdata->type == HDMI_TYPE13)
1818 hdmi_v13_mode_set(hdata, mode); 2006 hdmi_v13_mode_set(hdata, mode);
1819 else 2007 else
@@ -1854,7 +2042,10 @@ static void hdmi_poweron(struct exynos_drm_display *display)
1854 if (regulator_bulk_enable(res->regul_count, res->regul_bulk)) 2042 if (regulator_bulk_enable(res->regul_count, res->regul_bulk))
1855 DRM_DEBUG_KMS("failed to enable regulator bulk\n"); 2043 DRM_DEBUG_KMS("failed to enable regulator bulk\n");
1856 2044
1857 clk_prepare_enable(res->hdmiphy); 2045 /* set pmu hdmiphy control bit to enable hdmiphy */
2046 regmap_update_bits(hdata->pmureg, PMU_HDMI_PHY_CONTROL,
2047 PMU_HDMI_PHY_ENABLE_BIT, 1);
2048
1858 clk_prepare_enable(res->hdmi); 2049 clk_prepare_enable(res->hdmi);
1859 clk_prepare_enable(res->sclk_hdmi); 2050 clk_prepare_enable(res->sclk_hdmi);
1860 2051
@@ -1872,16 +2063,20 @@ static void hdmi_poweroff(struct exynos_drm_display *display)
1872 goto out; 2063 goto out;
1873 mutex_unlock(&hdata->hdmi_mutex); 2064 mutex_unlock(&hdata->hdmi_mutex);
1874 2065
1875 /* 2066 /* HDMI System Disable */
1876 * The TV power domain needs any condition of hdmiphy to turn off and 2067 hdmi_reg_writemask(hdata, HDMI_CON_0, 0, HDMI_EN);
1877 * its reset state seems to meet the condition. 2068
1878 */
1879 hdmiphy_conf_reset(hdata);
1880 hdmiphy_poweroff(hdata); 2069 hdmiphy_poweroff(hdata);
1881 2070
2071 cancel_delayed_work(&hdata->hotplug_work);
2072
1882 clk_disable_unprepare(res->sclk_hdmi); 2073 clk_disable_unprepare(res->sclk_hdmi);
1883 clk_disable_unprepare(res->hdmi); 2074 clk_disable_unprepare(res->hdmi);
1884 clk_disable_unprepare(res->hdmiphy); 2075
2076 /* reset pmu hdmiphy control bit to disable hdmiphy */
2077 regmap_update_bits(hdata->pmureg, PMU_HDMI_PHY_CONTROL,
2078 PMU_HDMI_PHY_ENABLE_BIT, 0);
2079
1885 regulator_bulk_disable(res->regul_count, res->regul_bulk); 2080 regulator_bulk_disable(res->regul_count, res->regul_bulk);
1886 2081
1887 pm_runtime_put_sync(hdata->dev); 2082 pm_runtime_put_sync(hdata->dev);
@@ -1913,7 +2108,6 @@ static void hdmi_dpms(struct exynos_drm_display *display, int mode)
1913} 2108}
1914 2109
1915static struct exynos_drm_display_ops hdmi_display_ops = { 2110static struct exynos_drm_display_ops hdmi_display_ops = {
1916 .initialize = hdmi_initialize,
1917 .create_connector = hdmi_create_connector, 2111 .create_connector = hdmi_create_connector,
1918 .mode_fixup = hdmi_mode_fixup, 2112 .mode_fixup = hdmi_mode_fixup,
1919 .mode_set = hdmi_mode_set, 2113 .mode_set = hdmi_mode_set,
@@ -1926,9 +2120,11 @@ static struct exynos_drm_display hdmi_display = {
1926 .ops = &hdmi_display_ops, 2120 .ops = &hdmi_display_ops,
1927}; 2121};
1928 2122
1929static irqreturn_t hdmi_irq_thread(int irq, void *arg) 2123static void hdmi_hotplug_work_func(struct work_struct *work)
1930{ 2124{
1931 struct hdmi_context *hdata = arg; 2125 struct hdmi_context *hdata;
2126
2127 hdata = container_of(work, struct hdmi_context, hotplug_work.work);
1932 2128
1933 mutex_lock(&hdata->hdmi_mutex); 2129 mutex_lock(&hdata->hdmi_mutex);
1934 hdata->hpd = gpio_get_value(hdata->hpd_gpio); 2130 hdata->hpd = gpio_get_value(hdata->hpd_gpio);
@@ -1936,6 +2132,14 @@ static irqreturn_t hdmi_irq_thread(int irq, void *arg)
1936 2132
1937 if (hdata->drm_dev) 2133 if (hdata->drm_dev)
1938 drm_helper_hpd_irq_event(hdata->drm_dev); 2134 drm_helper_hpd_irq_event(hdata->drm_dev);
2135}
2136
2137static irqreturn_t hdmi_irq_thread(int irq, void *arg)
2138{
2139 struct hdmi_context *hdata = arg;
2140
2141 mod_delayed_work(system_wq, &hdata->hotplug_work,
2142 msecs_to_jiffies(HOTPLUG_DEBOUNCE_MS));
1939 2143
1940 return IRQ_HANDLED; 2144 return IRQ_HANDLED;
1941} 2145}
@@ -1954,37 +2158,35 @@ static int hdmi_resources_init(struct hdmi_context *hdata)
1954 2158
1955 DRM_DEBUG_KMS("HDMI resource init\n"); 2159 DRM_DEBUG_KMS("HDMI resource init\n");
1956 2160
1957 memset(res, 0, sizeof(*res));
1958
1959 /* get clocks, power */ 2161 /* get clocks, power */
1960 res->hdmi = devm_clk_get(dev, "hdmi"); 2162 res->hdmi = devm_clk_get(dev, "hdmi");
1961 if (IS_ERR(res->hdmi)) { 2163 if (IS_ERR(res->hdmi)) {
1962 DRM_ERROR("failed to get clock 'hdmi'\n"); 2164 DRM_ERROR("failed to get clock 'hdmi'\n");
2165 ret = PTR_ERR(res->hdmi);
1963 goto fail; 2166 goto fail;
1964 } 2167 }
1965 res->sclk_hdmi = devm_clk_get(dev, "sclk_hdmi"); 2168 res->sclk_hdmi = devm_clk_get(dev, "sclk_hdmi");
1966 if (IS_ERR(res->sclk_hdmi)) { 2169 if (IS_ERR(res->sclk_hdmi)) {
1967 DRM_ERROR("failed to get clock 'sclk_hdmi'\n"); 2170 DRM_ERROR("failed to get clock 'sclk_hdmi'\n");
2171 ret = PTR_ERR(res->sclk_hdmi);
1968 goto fail; 2172 goto fail;
1969 } 2173 }
1970 res->sclk_pixel = devm_clk_get(dev, "sclk_pixel"); 2174 res->sclk_pixel = devm_clk_get(dev, "sclk_pixel");
1971 if (IS_ERR(res->sclk_pixel)) { 2175 if (IS_ERR(res->sclk_pixel)) {
1972 DRM_ERROR("failed to get clock 'sclk_pixel'\n"); 2176 DRM_ERROR("failed to get clock 'sclk_pixel'\n");
2177 ret = PTR_ERR(res->sclk_pixel);
1973 goto fail; 2178 goto fail;
1974 } 2179 }
1975 res->sclk_hdmiphy = devm_clk_get(dev, "sclk_hdmiphy"); 2180 res->sclk_hdmiphy = devm_clk_get(dev, "sclk_hdmiphy");
1976 if (IS_ERR(res->sclk_hdmiphy)) { 2181 if (IS_ERR(res->sclk_hdmiphy)) {
1977 DRM_ERROR("failed to get clock 'sclk_hdmiphy'\n"); 2182 DRM_ERROR("failed to get clock 'sclk_hdmiphy'\n");
1978 goto fail; 2183 ret = PTR_ERR(res->sclk_hdmiphy);
1979 }
1980 res->hdmiphy = devm_clk_get(dev, "hdmiphy");
1981 if (IS_ERR(res->hdmiphy)) {
1982 DRM_ERROR("failed to get clock 'hdmiphy'\n");
1983 goto fail; 2184 goto fail;
1984 } 2185 }
1985 res->mout_hdmi = devm_clk_get(dev, "mout_hdmi"); 2186 res->mout_hdmi = devm_clk_get(dev, "mout_hdmi");
1986 if (IS_ERR(res->mout_hdmi)) { 2187 if (IS_ERR(res->mout_hdmi)) {
1987 DRM_ERROR("failed to get clock 'mout_hdmi'\n"); 2188 DRM_ERROR("failed to get clock 'mout_hdmi'\n");
2189 ret = PTR_ERR(res->mout_hdmi);
1988 goto fail; 2190 goto fail;
1989 } 2191 }
1990 2192
@@ -1992,8 +2194,10 @@ static int hdmi_resources_init(struct hdmi_context *hdata)
1992 2194
1993 res->regul_bulk = devm_kzalloc(dev, ARRAY_SIZE(supply) * 2195 res->regul_bulk = devm_kzalloc(dev, ARRAY_SIZE(supply) *
1994 sizeof(res->regul_bulk[0]), GFP_KERNEL); 2196 sizeof(res->regul_bulk[0]), GFP_KERNEL);
1995 if (!res->regul_bulk) 2197 if (!res->regul_bulk) {
2198 ret = -ENOMEM;
1996 goto fail; 2199 goto fail;
2200 }
1997 for (i = 0; i < ARRAY_SIZE(supply); ++i) { 2201 for (i = 0; i < ARRAY_SIZE(supply); ++i) {
1998 res->regul_bulk[i].supply = supply[i]; 2202 res->regul_bulk[i].supply = supply[i];
1999 res->regul_bulk[i].consumer = NULL; 2203 res->regul_bulk[i].consumer = NULL;
@@ -2001,14 +2205,14 @@ static int hdmi_resources_init(struct hdmi_context *hdata)
2001 ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(supply), res->regul_bulk); 2205 ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(supply), res->regul_bulk);
2002 if (ret) { 2206 if (ret) {
2003 DRM_ERROR("failed to get regulators\n"); 2207 DRM_ERROR("failed to get regulators\n");
2004 goto fail; 2208 return ret;
2005 } 2209 }
2006 res->regul_count = ARRAY_SIZE(supply); 2210 res->regul_count = ARRAY_SIZE(supply);
2007 2211
2008 return 0; 2212 return ret;
2009fail: 2213fail:
2010 DRM_ERROR("HDMI resource init - failed\n"); 2214 DRM_ERROR("HDMI resource init - failed\n");
2011 return -ENODEV; 2215 return ret;
2012} 2216}
2013 2217
2014static struct s5p_hdmi_platform_data *drm_hdmi_dt_parse_pdata 2218static struct s5p_hdmi_platform_data *drm_hdmi_dt_parse_pdata
@@ -2043,42 +2247,105 @@ static struct of_device_id hdmi_match_types[] = {
2043 .compatible = "samsung,exynos4212-hdmi", 2247 .compatible = "samsung,exynos4212-hdmi",
2044 .data = &exynos4212_hdmi_driver_data, 2248 .data = &exynos4212_hdmi_driver_data,
2045 }, { 2249 }, {
2250 .compatible = "samsung,exynos5420-hdmi",
2251 .data = &exynos5420_hdmi_driver_data,
2252 }, {
2046 /* end node */ 2253 /* end node */
2047 } 2254 }
2048}; 2255};
2049 2256
2257static int hdmi_bind(struct device *dev, struct device *master, void *data)
2258{
2259 struct drm_device *drm_dev = data;
2260 struct hdmi_context *hdata;
2261
2262 hdata = hdmi_display.ctx;
2263 hdata->drm_dev = drm_dev;
2264
2265 return exynos_drm_create_enc_conn(drm_dev, &hdmi_display);
2266}
2267
2268static void hdmi_unbind(struct device *dev, struct device *master, void *data)
2269{
2270 struct exynos_drm_display *display = get_hdmi_display(dev);
2271 struct drm_encoder *encoder = display->encoder;
2272 struct hdmi_context *hdata = display->ctx;
2273
2274 encoder->funcs->destroy(encoder);
2275 drm_connector_cleanup(&hdata->connector);
2276}
2277
2278static const struct component_ops hdmi_component_ops = {
2279 .bind = hdmi_bind,
2280 .unbind = hdmi_unbind,
2281};
2282
2283static struct device_node *hdmi_legacy_ddc_dt_binding(struct device *dev)
2284{
2285 const char *compatible_str = "samsung,exynos4210-hdmiddc";
2286 struct device_node *np;
2287
2288 np = of_find_compatible_node(NULL, NULL, compatible_str);
2289 if (np)
2290 return of_get_next_parent(np);
2291
2292 return NULL;
2293}
2294
2295static struct device_node *hdmi_legacy_phy_dt_binding(struct device *dev)
2296{
2297 const char *compatible_str = "samsung,exynos4212-hdmiphy";
2298
2299 return of_find_compatible_node(NULL, NULL, compatible_str);
2300}
2301
2050static int hdmi_probe(struct platform_device *pdev) 2302static int hdmi_probe(struct platform_device *pdev)
2051{ 2303{
2304 struct device_node *ddc_node, *phy_node;
2305 struct s5p_hdmi_platform_data *pdata;
2306 struct hdmi_driver_data *drv_data;
2307 const struct of_device_id *match;
2052 struct device *dev = &pdev->dev; 2308 struct device *dev = &pdev->dev;
2053 struct hdmi_context *hdata; 2309 struct hdmi_context *hdata;
2054 struct s5p_hdmi_platform_data *pdata;
2055 struct resource *res; 2310 struct resource *res;
2056 const struct of_device_id *match;
2057 struct device_node *ddc_node, *phy_node;
2058 struct hdmi_driver_data *drv_data;
2059 int ret; 2311 int ret;
2060 2312
2061 if (!dev->of_node) 2313 ret = exynos_drm_component_add(&pdev->dev, EXYNOS_DEVICE_TYPE_CONNECTOR,
2062 return -ENODEV; 2314 hdmi_display.type);
2315 if (ret)
2316 return ret;
2317
2318 if (!dev->of_node) {
2319 ret = -ENODEV;
2320 goto err_del_component;
2321 }
2063 2322
2064 pdata = drm_hdmi_dt_parse_pdata(dev); 2323 pdata = drm_hdmi_dt_parse_pdata(dev);
2065 if (!pdata) 2324 if (!pdata) {
2066 return -EINVAL; 2325 ret = -EINVAL;
2326 goto err_del_component;
2327 }
2067 2328
2068 hdata = devm_kzalloc(dev, sizeof(struct hdmi_context), GFP_KERNEL); 2329 hdata = devm_kzalloc(dev, sizeof(struct hdmi_context), GFP_KERNEL);
2069 if (!hdata) 2330 if (!hdata) {
2070 return -ENOMEM; 2331 ret = -ENOMEM;
2332 goto err_del_component;
2333 }
2071 2334
2072 mutex_init(&hdata->hdmi_mutex); 2335 mutex_init(&hdata->hdmi_mutex);
2073 2336
2074 platform_set_drvdata(pdev, &hdmi_display); 2337 platform_set_drvdata(pdev, &hdmi_display);
2075 2338
2076 match = of_match_node(hdmi_match_types, dev->of_node); 2339 match = of_match_node(hdmi_match_types, dev->of_node);
2077 if (!match) 2340 if (!match) {
2078 return -ENODEV; 2341 ret = -ENODEV;
2342 goto err_del_component;
2343 }
2079 2344
2080 drv_data = (struct hdmi_driver_data *)match->data; 2345 drv_data = (struct hdmi_driver_data *)match->data;
2081 hdata->type = drv_data->type; 2346 hdata->type = drv_data->type;
2347 hdata->phy_confs = drv_data->phy_confs;
2348 hdata->phy_conf_count = drv_data->phy_conf_count;
2082 2349
2083 hdata->hpd_gpio = pdata->hpd_gpio; 2350 hdata->hpd_gpio = pdata->hpd_gpio;
2084 hdata->dev = dev; 2351 hdata->dev = dev;
@@ -2086,35 +2353,44 @@ static int hdmi_probe(struct platform_device *pdev)
2086 ret = hdmi_resources_init(hdata); 2353 ret = hdmi_resources_init(hdata);
2087 if (ret) { 2354 if (ret) {
2088 DRM_ERROR("hdmi_resources_init failed\n"); 2355 DRM_ERROR("hdmi_resources_init failed\n");
2089 return -EINVAL; 2356 return ret;
2090 } 2357 }
2091 2358
2092 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2359 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2093 hdata->regs = devm_ioremap_resource(dev, res); 2360 hdata->regs = devm_ioremap_resource(dev, res);
2094 if (IS_ERR(hdata->regs)) 2361 if (IS_ERR(hdata->regs)) {
2095 return PTR_ERR(hdata->regs); 2362 ret = PTR_ERR(hdata->regs);
2363 goto err_del_component;
2364 }
2096 2365
2097 ret = devm_gpio_request(dev, hdata->hpd_gpio, "HPD"); 2366 ret = devm_gpio_request(dev, hdata->hpd_gpio, "HPD");
2098 if (ret) { 2367 if (ret) {
2099 DRM_ERROR("failed to request HPD gpio\n"); 2368 DRM_ERROR("failed to request HPD gpio\n");
2100 return ret; 2369 goto err_del_component;
2101 } 2370 }
2102 2371
2372 ddc_node = hdmi_legacy_ddc_dt_binding(dev);
2373 if (ddc_node)
2374 goto out_get_ddc_adpt;
2375
2103 /* DDC i2c driver */ 2376 /* DDC i2c driver */
2104 ddc_node = of_parse_phandle(dev->of_node, "ddc", 0); 2377 ddc_node = of_parse_phandle(dev->of_node, "ddc", 0);
2105 if (!ddc_node) { 2378 if (!ddc_node) {
2106 DRM_ERROR("Failed to find ddc node in device tree\n"); 2379 DRM_ERROR("Failed to find ddc node in device tree\n");
2107 return -ENODEV; 2380 ret = -ENODEV;
2381 goto err_del_component;
2108 } 2382 }
2383
2384out_get_ddc_adpt:
2109 hdata->ddc_adpt = of_find_i2c_adapter_by_node(ddc_node); 2385 hdata->ddc_adpt = of_find_i2c_adapter_by_node(ddc_node);
2110 if (!hdata->ddc_adpt) { 2386 if (!hdata->ddc_adpt) {
2111 DRM_ERROR("Failed to get ddc i2c adapter by node\n"); 2387 DRM_ERROR("Failed to get ddc i2c adapter by node\n");
2112 return -ENODEV; 2388 return -EPROBE_DEFER;
2113 } 2389 }
2114 2390
2115 /* Not support APB PHY yet. */ 2391 phy_node = hdmi_legacy_phy_dt_binding(dev);
2116 if (drv_data->is_apb_phy) 2392 if (phy_node)
2117 return -EPERM; 2393 goto out_get_phy_port;
2118 2394
2119 /* hdmiphy i2c driver */ 2395 /* hdmiphy i2c driver */
2120 phy_node = of_parse_phandle(dev->of_node, "phy", 0); 2396 phy_node = of_parse_phandle(dev->of_node, "phy", 0);
@@ -2123,11 +2399,22 @@ static int hdmi_probe(struct platform_device *pdev)
2123 ret = -ENODEV; 2399 ret = -ENODEV;
2124 goto err_ddc; 2400 goto err_ddc;
2125 } 2401 }
2126 hdata->hdmiphy_port = of_find_i2c_device_by_node(phy_node); 2402
2127 if (!hdata->hdmiphy_port) { 2403out_get_phy_port:
2128 DRM_ERROR("Failed to get hdmi phy i2c client from node\n"); 2404 if (drv_data->is_apb_phy) {
2129 ret = -ENODEV; 2405 hdata->regs_hdmiphy = of_iomap(phy_node, 0);
2130 goto err_ddc; 2406 if (!hdata->regs_hdmiphy) {
2407 DRM_ERROR("failed to ioremap hdmi phy\n");
2408 ret = -ENOMEM;
2409 goto err_ddc;
2410 }
2411 } else {
2412 hdata->hdmiphy_port = of_find_i2c_device_by_node(phy_node);
2413 if (!hdata->hdmiphy_port) {
2414 DRM_ERROR("Failed to get hdmi phy i2c client\n");
2415 ret = -EPROBE_DEFER;
2416 goto err_ddc;
2417 }
2131 } 2418 }
2132 2419
2133 hdata->irq = gpio_to_irq(hdata->hpd_gpio); 2420 hdata->irq = gpio_to_irq(hdata->hpd_gpio);
@@ -2139,6 +2426,8 @@ static int hdmi_probe(struct platform_device *pdev)
2139 2426
2140 hdata->hpd = gpio_get_value(hdata->hpd_gpio); 2427 hdata->hpd = gpio_get_value(hdata->hpd_gpio);
2141 2428
2429 INIT_DELAYED_WORK(&hdata->hotplug_work, hdmi_hotplug_work_func);
2430
2142 ret = devm_request_threaded_irq(dev, hdata->irq, NULL, 2431 ret = devm_request_threaded_irq(dev, hdata->irq, NULL,
2143 hdmi_irq_thread, IRQF_TRIGGER_RISING | 2432 hdmi_irq_thread, IRQF_TRIGGER_RISING |
2144 IRQF_TRIGGER_FALLING | IRQF_ONESHOT, 2433 IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
@@ -2148,30 +2437,51 @@ static int hdmi_probe(struct platform_device *pdev)
2148 goto err_hdmiphy; 2437 goto err_hdmiphy;
2149 } 2438 }
2150 2439
2151 pm_runtime_enable(dev); 2440 hdata->pmureg = syscon_regmap_lookup_by_phandle(dev->of_node,
2441 "samsung,syscon-phandle");
2442 if (IS_ERR(hdata->pmureg)) {
2443 DRM_ERROR("syscon regmap lookup failed.\n");
2444 ret = -EPROBE_DEFER;
2445 goto err_hdmiphy;
2446 }
2152 2447
2448 pm_runtime_enable(dev);
2153 hdmi_display.ctx = hdata; 2449 hdmi_display.ctx = hdata;
2154 exynos_drm_display_register(&hdmi_display);
2155 2450
2156 return 0; 2451 ret = component_add(&pdev->dev, &hdmi_component_ops);
2452 if (ret)
2453 goto err_disable_pm_runtime;
2454
2455 return ret;
2456
2457err_disable_pm_runtime:
2458 pm_runtime_disable(dev);
2157 2459
2158err_hdmiphy: 2460err_hdmiphy:
2159 put_device(&hdata->hdmiphy_port->dev); 2461 if (hdata->hdmiphy_port)
2462 put_device(&hdata->hdmiphy_port->dev);
2160err_ddc: 2463err_ddc:
2161 put_device(&hdata->ddc_adpt->dev); 2464 put_device(&hdata->ddc_adpt->dev);
2465
2466err_del_component:
2467 exynos_drm_component_del(&pdev->dev, EXYNOS_DEVICE_TYPE_CONNECTOR);
2468
2162 return ret; 2469 return ret;
2163} 2470}
2164 2471
2165static int hdmi_remove(struct platform_device *pdev) 2472static int hdmi_remove(struct platform_device *pdev)
2166{ 2473{
2167 struct device *dev = &pdev->dev; 2474 struct hdmi_context *hdata = hdmi_display.ctx;
2168 struct exynos_drm_display *display = get_hdmi_display(dev); 2475
2169 struct hdmi_context *hdata = display->ctx; 2476 cancel_delayed_work_sync(&hdata->hotplug_work);
2170 2477
2171 put_device(&hdata->hdmiphy_port->dev); 2478 put_device(&hdata->hdmiphy_port->dev);
2172 put_device(&hdata->ddc_adpt->dev); 2479 put_device(&hdata->ddc_adpt->dev);
2480
2173 pm_runtime_disable(&pdev->dev); 2481 pm_runtime_disable(&pdev->dev);
2482 component_del(&pdev->dev, &hdmi_component_ops);
2174 2483
2484 exynos_drm_component_del(&pdev->dev, EXYNOS_DEVICE_TYPE_CONNECTOR);
2175 return 0; 2485 return 0;
2176} 2486}
2177 2487
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.h b/drivers/gpu/drm/exynos/exynos_hdmi.h
deleted file mode 100644
index 0ddf3957de15..000000000000
--- a/drivers/gpu/drm/exynos/exynos_hdmi.h
+++ /dev/null
@@ -1,23 +0,0 @@
1/*
2 *
3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 * Authors:
5 * Inki Dae <inki.dae@samsung.com>
6 * Seung-Woo Kim <sw0312.kim@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 */
13
14#ifndef _EXYNOS_HDMI_H_
15#define _EXYNOS_HDMI_H_
16
17void hdmi_attach_ddc_client(struct i2c_client *ddc);
18void hdmi_attach_hdmiphy_client(struct i2c_client *hdmiphy);
19
20extern struct i2c_driver hdmiphy_driver;
21extern struct i2c_driver ddc_driver;
22
23#endif
diff --git a/drivers/gpu/drm/exynos/exynos_hdmiphy.c b/drivers/gpu/drm/exynos/exynos_hdmiphy.c
deleted file mode 100644
index 59abb1494ceb..000000000000
--- a/drivers/gpu/drm/exynos/exynos_hdmiphy.c
+++ /dev/null
@@ -1,65 +0,0 @@
1/*
2 * Copyright (C) 2011 Samsung Electronics Co.Ltd
3 * Authors:
4 * Seung-Woo Kim <sw0312.kim@samsung.com>
5 * Inki Dae <inki.dae@samsung.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; either version 2 of the License, or (at your
10 * option) any later version.
11 *
12 */
13
14#include <drm/drmP.h>
15
16#include <linux/kernel.h>
17#include <linux/i2c.h>
18#include <linux/of.h>
19
20#include "exynos_drm_drv.h"
21#include "exynos_hdmi.h"
22
23
24static int hdmiphy_probe(struct i2c_client *client,
25 const struct i2c_device_id *id)
26{
27 hdmi_attach_hdmiphy_client(client);
28
29 dev_info(&client->adapter->dev, "attached s5p_hdmiphy "
30 "into i2c adapter successfully\n");
31
32 return 0;
33}
34
35static int hdmiphy_remove(struct i2c_client *client)
36{
37 dev_info(&client->adapter->dev, "detached s5p_hdmiphy "
38 "from i2c adapter successfully\n");
39
40 return 0;
41}
42
43static struct of_device_id hdmiphy_match_types[] = {
44 {
45 .compatible = "samsung,exynos5-hdmiphy",
46 }, {
47 .compatible = "samsung,exynos4210-hdmiphy",
48 }, {
49 .compatible = "samsung,exynos4212-hdmiphy",
50 }, {
51 /* end node */
52 }
53};
54
55struct i2c_driver hdmiphy_driver = {
56 .driver = {
57 .name = "exynos-hdmiphy",
58 .owner = THIS_MODULE,
59 .of_match_table = hdmiphy_match_types,
60 },
61 .probe = hdmiphy_probe,
62 .remove = hdmiphy_remove,
63 .command = NULL,
64};
65EXPORT_SYMBOL(hdmiphy_driver);
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index ce288818d2c0..4c5aed7e54c8 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -31,6 +31,7 @@
31#include <linux/clk.h> 31#include <linux/clk.h>
32#include <linux/regulator/consumer.h> 32#include <linux/regulator/consumer.h>
33#include <linux/of.h> 33#include <linux/of.h>
34#include <linux/component.h>
34 35
35#include <drm/exynos_drm.h> 36#include <drm/exynos_drm.h>
36 37
@@ -830,13 +831,15 @@ static int vp_resources_init(struct mixer_context *mixer_ctx)
830} 831}
831 832
832static int mixer_initialize(struct exynos_drm_manager *mgr, 833static int mixer_initialize(struct exynos_drm_manager *mgr,
833 struct drm_device *drm_dev, int pipe) 834 struct drm_device *drm_dev)
834{ 835{
835 int ret; 836 int ret;
836 struct mixer_context *mixer_ctx = mgr->ctx; 837 struct mixer_context *mixer_ctx = mgr->ctx;
838 struct exynos_drm_private *priv;
839 priv = drm_dev->dev_private;
837 840
838 mixer_ctx->drm_dev = drm_dev; 841 mgr->drm_dev = mixer_ctx->drm_dev = drm_dev;
839 mixer_ctx->pipe = pipe; 842 mgr->pipe = mixer_ctx->pipe = priv->pipe++;
840 843
841 /* acquire resources: regs, irqs, clocks */ 844 /* acquire resources: regs, irqs, clocks */
842 ret = mixer_resources_init(mixer_ctx); 845 ret = mixer_resources_init(mixer_ctx);
@@ -1142,8 +1145,6 @@ int mixer_check_mode(struct drm_display_mode *mode)
1142} 1145}
1143 1146
1144static struct exynos_drm_manager_ops mixer_manager_ops = { 1147static struct exynos_drm_manager_ops mixer_manager_ops = {
1145 .initialize = mixer_initialize,
1146 .remove = mixer_mgr_remove,
1147 .dpms = mixer_dpms, 1148 .dpms = mixer_dpms,
1148 .enable_vblank = mixer_enable_vblank, 1149 .enable_vblank = mixer_enable_vblank,
1149 .disable_vblank = mixer_disable_vblank, 1150 .disable_vblank = mixer_disable_vblank,
@@ -1200,11 +1201,13 @@ static struct of_device_id mixer_match_types[] = {
1200 } 1201 }
1201}; 1202};
1202 1203
1203static int mixer_probe(struct platform_device *pdev) 1204static int mixer_bind(struct device *dev, struct device *manager, void *data)
1204{ 1205{
1205 struct device *dev = &pdev->dev; 1206 struct platform_device *pdev = to_platform_device(dev);
1207 struct drm_device *drm_dev = data;
1206 struct mixer_context *ctx; 1208 struct mixer_context *ctx;
1207 struct mixer_drv_data *drv; 1209 struct mixer_drv_data *drv;
1210 int ret;
1208 1211
1209 dev_info(dev, "probe start\n"); 1212 dev_info(dev, "probe start\n");
1210 1213
@@ -1233,19 +1236,61 @@ static int mixer_probe(struct platform_device *pdev)
1233 atomic_set(&ctx->wait_vsync_event, 0); 1236 atomic_set(&ctx->wait_vsync_event, 0);
1234 1237
1235 mixer_manager.ctx = ctx; 1238 mixer_manager.ctx = ctx;
1239 ret = mixer_initialize(&mixer_manager, drm_dev);
1240 if (ret)
1241 return ret;
1242
1236 platform_set_drvdata(pdev, &mixer_manager); 1243 platform_set_drvdata(pdev, &mixer_manager);
1237 exynos_drm_manager_register(&mixer_manager); 1244 ret = exynos_drm_crtc_create(&mixer_manager);
1245 if (ret) {
1246 mixer_mgr_remove(&mixer_manager);
1247 return ret;
1248 }
1238 1249
1239 pm_runtime_enable(dev); 1250 pm_runtime_enable(dev);
1240 1251
1241 return 0; 1252 return 0;
1242} 1253}
1243 1254
1244static int mixer_remove(struct platform_device *pdev) 1255static void mixer_unbind(struct device *dev, struct device *master, void *data)
1256{
1257 struct exynos_drm_manager *mgr = dev_get_drvdata(dev);
1258 struct drm_crtc *crtc = mgr->crtc;
1259
1260 dev_info(dev, "remove successful\n");
1261
1262 mixer_mgr_remove(mgr);
1263
1264 pm_runtime_disable(dev);
1265
1266 crtc->funcs->destroy(crtc);
1267}
1268
1269static const struct component_ops mixer_component_ops = {
1270 .bind = mixer_bind,
1271 .unbind = mixer_unbind,
1272};
1273
1274static int mixer_probe(struct platform_device *pdev)
1245{ 1275{
1246 dev_info(&pdev->dev, "remove successful\n"); 1276 int ret;
1247 1277
1248 pm_runtime_disable(&pdev->dev); 1278 ret = exynos_drm_component_add(&pdev->dev, EXYNOS_DEVICE_TYPE_CRTC,
1279 mixer_manager.type);
1280 if (ret)
1281 return ret;
1282
1283 ret = component_add(&pdev->dev, &mixer_component_ops);
1284 if (ret)
1285 exynos_drm_component_del(&pdev->dev, EXYNOS_DEVICE_TYPE_CRTC);
1286
1287 return ret;
1288}
1289
1290static int mixer_remove(struct platform_device *pdev)
1291{
1292 component_del(&pdev->dev, &mixer_component_ops);
1293 exynos_drm_component_del(&pdev->dev, EXYNOS_DEVICE_TYPE_CRTC);
1249 1294
1250 return 0; 1295 return 0;
1251} 1296}
diff --git a/drivers/gpu/drm/exynos/regs-hdmi.h b/drivers/gpu/drm/exynos/regs-hdmi.h
index ef1b3eb3ba6e..3f35ac6d8a47 100644
--- a/drivers/gpu/drm/exynos/regs-hdmi.h
+++ b/drivers/gpu/drm/exynos/regs-hdmi.h
@@ -578,4 +578,20 @@
578#define HDMI_TG_VACT_ST4_H HDMI_TG_BASE(0x0074) 578#define HDMI_TG_VACT_ST4_H HDMI_TG_BASE(0x0074)
579#define HDMI_TG_3D HDMI_TG_BASE(0x00F0) 579#define HDMI_TG_3D HDMI_TG_BASE(0x00F0)
580 580
581/* HDMI PHY Registers Offsets*/
582#define HDMIPHY_POWER (0x74 >> 2)
583#define HDMIPHY_MODE_SET_DONE (0x7c >> 2)
584
585/* HDMI PHY Values */
586#define HDMI_PHY_POWER_ON 0x80
587#define HDMI_PHY_POWER_OFF 0xff
588
589/* HDMI PHY Values */
590#define HDMI_PHY_DISABLE_MODE_SET 0x80
591#define HDMI_PHY_ENABLE_MODE_SET 0x00
592
593/* PMU Registers for PHY */
594#define PMU_HDMI_PHY_CONTROL 0x700
595#define PMU_HDMI_PHY_ENABLE_BIT BIT(0)
596
581#endif /* SAMSUNG_REGS_HDMI_H */ 597#endif /* SAMSUNG_REGS_HDMI_H */
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c b/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c
index 489ffd2c66e5..87885d8c06e8 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c
@@ -148,7 +148,7 @@ static int handle_dsi_error(struct mdfld_dsi_pkg_sender *sender, u32 mask)
148 break; 148 break;
149 case BIT(14): 149 case BIT(14):
150 /*wait for all fifo empty*/ 150 /*wait for all fifo empty*/
151 /*wait_for_all_fifos_empty(sender)*/; 151 /*wait_for_all_fifos_empty(sender)*/
152 break; 152 break;
153 case BIT(15): 153 case BIT(15):
154 dev_dbg(sender->dev->dev, "No Action required\n"); 154 dev_dbg(sender->dev->dev, "No Action required\n");
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index b686e56646eb..6e8fe9ec02b5 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -112,11 +112,9 @@ static void psb_driver_lastclose(struct drm_device *dev)
112 struct drm_psb_private *dev_priv = dev->dev_private; 112 struct drm_psb_private *dev_priv = dev->dev_private;
113 struct psb_fbdev *fbdev = dev_priv->fbdev; 113 struct psb_fbdev *fbdev = dev_priv->fbdev;
114 114
115 drm_modeset_lock_all(dev); 115 ret = drm_fb_helper_restore_fbdev_mode_unlocked(&fbdev->psb_fb_helper);
116 ret = drm_fb_helper_restore_fbdev_mode(&fbdev->psb_fb_helper);
117 if (ret) 116 if (ret)
118 DRM_DEBUG("failed to restore crtc mode\n"); 117 DRM_DEBUG("failed to restore crtc mode\n");
119 drm_modeset_unlock_all(dev);
120 118
121 return; 119 return;
122} 120}
@@ -354,7 +352,7 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags)
354 PSB_WVDC32(0xFFFFFFFF, PSB_INT_MASK_R); 352 PSB_WVDC32(0xFFFFFFFF, PSB_INT_MASK_R);
355 spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags); 353 spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
356 354
357 drm_irq_install(dev); 355 drm_irq_install(dev, dev->pdev->irq);
358 356
359 dev->vblank_disable_allowed = true; 357 dev->vblank_disable_allowed = true;
360 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 358 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
@@ -479,7 +477,7 @@ static struct drm_driver driver = {
479 .lastclose = psb_driver_lastclose, 477 .lastclose = psb_driver_lastclose,
480 .preclose = psb_driver_preclose, 478 .preclose = psb_driver_preclose,
481 479
482 .num_ioctls = DRM_ARRAY_SIZE(psb_ioctls), 480 .num_ioctls = ARRAY_SIZE(psb_ioctls),
483 .device_is_agp = psb_driver_device_is_agp, 481 .device_is_agp = psb_driver_device_is_agp,
484 .irq_preinstall = psb_irq_preinstall, 482 .irq_preinstall = psb_irq_preinstall,
485 .irq_postinstall = psb_irq_postinstall, 483 .irq_postinstall = psb_irq_postinstall,
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index 48af5cac1902..240c331405b9 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -568,11 +568,11 @@ static irqreturn_t tda998x_irq_thread(int irq, void *data)
568 568
569static uint8_t tda998x_cksum(uint8_t *buf, size_t bytes) 569static uint8_t tda998x_cksum(uint8_t *buf, size_t bytes)
570{ 570{
571 uint8_t sum = 0; 571 int sum = 0;
572 572
573 while (bytes--) 573 while (bytes--)
574 sum += *buf++; 574 sum -= *buf++;
575 return (255 - sum) + 1; 575 return sum;
576} 576}
577 577
578#define HB(x) (x) 578#define HB(x) (x)
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
index aeace37415aa..e88bac1d781f 100644
--- a/drivers/gpu/drm/i810/i810_dma.c
+++ b/drivers/gpu/drm/i810/i810_dma.c
@@ -1251,7 +1251,7 @@ const struct drm_ioctl_desc i810_ioctls[] = {
1251 DRM_IOCTL_DEF_DRV(I810_FLIP, i810_flip_bufs, DRM_AUTH|DRM_UNLOCKED), 1251 DRM_IOCTL_DEF_DRV(I810_FLIP, i810_flip_bufs, DRM_AUTH|DRM_UNLOCKED),
1252}; 1252};
1253 1253
1254int i810_max_ioctl = DRM_ARRAY_SIZE(i810_ioctls); 1254int i810_max_ioctl = ARRAY_SIZE(i810_ioctls);
1255 1255
1256/** 1256/**
1257 * Determine if the device really is AGP or not. 1257 * Determine if the device really is AGP or not.
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index bea2d67196fb..437e1824d0bf 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -5,6 +5,7 @@ config DRM_I915
5 depends on (AGP || AGP=n) 5 depends on (AGP || AGP=n)
6 select INTEL_GTT 6 select INTEL_GTT
7 select AGP_INTEL if AGP 7 select AGP_INTEL if AGP
8 select INTERVAL_TREE
8 # we need shmfs for the swappable backing store, and in particular 9 # we need shmfs for the swappable backing store, and in particular
9 # the shmem_readpage() which depends upon tmpfs 10 # the shmem_readpage() which depends upon tmpfs
10 select SHMEM 11 select SHMEM
@@ -71,7 +72,7 @@ config DRM_I915_PRELIMINARY_HW_SUPPORT
71 72
72config DRM_I915_UMS 73config DRM_I915_UMS
73 bool "Enable userspace modesetting on Intel hardware (DEPRECATED)" 74 bool "Enable userspace modesetting on Intel hardware (DEPRECATED)"
74 depends on DRM_I915 75 depends on DRM_I915 && BROKEN
75 default n 76 default n
76 help 77 help
77 Choose this option if you still need userspace modesetting. 78 Choose this option if you still need userspace modesetting.
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index b1445b73465b..cad1683d8bb5 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -18,6 +18,7 @@ i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o
18# GEM code 18# GEM code
19i915-y += i915_cmd_parser.o \ 19i915-y += i915_cmd_parser.o \
20 i915_gem_context.o \ 20 i915_gem_context.o \
21 i915_gem_render_state.o \
21 i915_gem_debug.o \ 22 i915_gem_debug.o \
22 i915_gem_dmabuf.o \ 23 i915_gem_dmabuf.o \
23 i915_gem_evict.o \ 24 i915_gem_evict.o \
@@ -26,12 +27,18 @@ i915-y += i915_cmd_parser.o \
26 i915_gem.o \ 27 i915_gem.o \
27 i915_gem_stolen.o \ 28 i915_gem_stolen.o \
28 i915_gem_tiling.o \ 29 i915_gem_tiling.o \
30 i915_gem_userptr.o \
29 i915_gpu_error.o \ 31 i915_gpu_error.o \
30 i915_irq.o \ 32 i915_irq.o \
31 i915_trace_points.o \ 33 i915_trace_points.o \
32 intel_ringbuffer.o \ 34 intel_ringbuffer.o \
33 intel_uncore.o 35 intel_uncore.o
34 36
37# autogenerated null render state
38i915-y += intel_renderstate_gen6.o \
39 intel_renderstate_gen7.o \
40 intel_renderstate_gen8.o
41
35# modesetting core code 42# modesetting core code
36i915-y += intel_bios.o \ 43i915-y += intel_bios.o \
37 intel_display.o \ 44 intel_display.o \
@@ -55,6 +62,7 @@ i915-y += dvo_ch7017.o \
55 intel_dsi_cmd.o \ 62 intel_dsi_cmd.o \
56 intel_dsi.o \ 63 intel_dsi.o \
57 intel_dsi_pll.o \ 64 intel_dsi_pll.o \
65 intel_dsi_panel_vbt.o \
58 intel_dvo.o \ 66 intel_dvo.o \
59 intel_hdmi.o \ 67 intel_hdmi.o \
60 intel_i2c.o \ 68 intel_i2c.o \
diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c
index a0f5bdd69491..80449f475960 100644
--- a/drivers/gpu/drm/i915/dvo_ch7xxx.c
+++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c
@@ -160,7 +160,7 @@ static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
160 if (i2c_transfer(adapter, msgs, 2) == 2) { 160 if (i2c_transfer(adapter, msgs, 2) == 2) {
161 *ch = in_buf[0]; 161 *ch = in_buf[0];
162 return true; 162 return true;
163 }; 163 }
164 164
165 if (!ch7xxx->quiet) { 165 if (!ch7xxx->quiet) {
166 DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n", 166 DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c
index 0f1865d7d4d8..0f2587ff347c 100644
--- a/drivers/gpu/drm/i915/dvo_ivch.c
+++ b/drivers/gpu/drm/i915/dvo_ivch.c
@@ -195,7 +195,7 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data)
195 if (i2c_transfer(adapter, msgs, 3) == 3) { 195 if (i2c_transfer(adapter, msgs, 3) == 3) {
196 *data = (in_buf[1] << 8) | in_buf[0]; 196 *data = (in_buf[1] << 8) | in_buf[0];
197 return true; 197 return true;
198 }; 198 }
199 199
200 if (!priv->quiet) { 200 if (!priv->quiet) {
201 DRM_DEBUG_KMS("Unable to read register 0x%02x from " 201 DRM_DEBUG_KMS("Unable to read register 0x%02x from "
diff --git a/drivers/gpu/drm/i915/dvo_ns2501.c b/drivers/gpu/drm/i915/dvo_ns2501.c
index 8155ded79079..74f2af7c2d3e 100644
--- a/drivers/gpu/drm/i915/dvo_ns2501.c
+++ b/drivers/gpu/drm/i915/dvo_ns2501.c
@@ -121,7 +121,7 @@ static bool ns2501_readb(struct intel_dvo_device *dvo, int addr, uint8_t * ch)
121 if (i2c_transfer(adapter, msgs, 2) == 2) { 121 if (i2c_transfer(adapter, msgs, 2) == 2) {
122 *ch = in_buf[0]; 122 *ch = in_buf[0];
123 return true; 123 return true;
124 }; 124 }
125 125
126 if (!ns->quiet) { 126 if (!ns->quiet) {
127 DRM_DEBUG_KMS 127 DRM_DEBUG_KMS
@@ -233,9 +233,8 @@ static enum drm_mode_status ns2501_mode_valid(struct intel_dvo_device *dvo,
233 struct drm_display_mode *mode) 233 struct drm_display_mode *mode)
234{ 234{
235 DRM_DEBUG_KMS 235 DRM_DEBUG_KMS
236 ("%s: is mode valid (hdisplay=%d,htotal=%d,vdisplay=%d,vtotal=%d)\n", 236 ("is mode valid (hdisplay=%d,htotal=%d,vdisplay=%d,vtotal=%d)\n",
237 __FUNCTION__, mode->hdisplay, mode->htotal, mode->vdisplay, 237 mode->hdisplay, mode->htotal, mode->vdisplay, mode->vtotal);
238 mode->vtotal);
239 238
240 /* 239 /*
241 * Currently, these are all the modes I have data from. 240 * Currently, these are all the modes I have data from.
@@ -261,9 +260,8 @@ static void ns2501_mode_set(struct intel_dvo_device *dvo,
261 struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv); 260 struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
262 261
263 DRM_DEBUG_KMS 262 DRM_DEBUG_KMS
264 ("%s: set mode (hdisplay=%d,htotal=%d,vdisplay=%d,vtotal=%d).\n", 263 ("set mode (hdisplay=%d,htotal=%d,vdisplay=%d,vtotal=%d).\n",
265 __FUNCTION__, mode->hdisplay, mode->htotal, mode->vdisplay, 264 mode->hdisplay, mode->htotal, mode->vdisplay, mode->vtotal);
266 mode->vtotal);
267 265
268 /* 266 /*
269 * Where do I find the native resolution for which scaling is not required??? 267 * Where do I find the native resolution for which scaling is not required???
@@ -277,8 +275,7 @@ static void ns2501_mode_set(struct intel_dvo_device *dvo,
277 if (mode->hdisplay == 800 && mode->vdisplay == 600) { 275 if (mode->hdisplay == 800 && mode->vdisplay == 600) {
278 /* mode 277 */ 276 /* mode 277 */
279 ns->reg_8_shadow &= ~NS2501_8_BPAS; 277 ns->reg_8_shadow &= ~NS2501_8_BPAS;
280 DRM_DEBUG_KMS("%s: switching to 800x600\n", 278 DRM_DEBUG_KMS("switching to 800x600\n");
281 __FUNCTION__);
282 279
283 /* 280 /*
284 * No, I do not know where this data comes from. 281 * No, I do not know where this data comes from.
@@ -341,8 +338,7 @@ static void ns2501_mode_set(struct intel_dvo_device *dvo,
341 338
342 } else if (mode->hdisplay == 640 && mode->vdisplay == 480) { 339 } else if (mode->hdisplay == 640 && mode->vdisplay == 480) {
343 /* mode 274 */ 340 /* mode 274 */
344 DRM_DEBUG_KMS("%s: switching to 640x480\n", 341 DRM_DEBUG_KMS("switching to 640x480\n");
345 __FUNCTION__);
346 /* 342 /*
347 * No, I do not know where this data comes from. 343 * No, I do not know where this data comes from.
348 * It is just what the video bios left in the DVO, so 344 * It is just what the video bios left in the DVO, so
@@ -406,8 +402,7 @@ static void ns2501_mode_set(struct intel_dvo_device *dvo,
406 402
407 } else if (mode->hdisplay == 1024 && mode->vdisplay == 768) { 403 } else if (mode->hdisplay == 1024 && mode->vdisplay == 768) {
408 /* mode 280 */ 404 /* mode 280 */
409 DRM_DEBUG_KMS("%s: switching to 1024x768\n", 405 DRM_DEBUG_KMS("switching to 1024x768\n");
410 __FUNCTION__);
411 /* 406 /*
412 * This might or might not work, actually. I'm silently 407 * This might or might not work, actually. I'm silently
413 * assuming here that the native panel resolution is 408 * assuming here that the native panel resolution is
@@ -458,8 +453,7 @@ static void ns2501_dpms(struct intel_dvo_device *dvo, bool enable)
458 struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv); 453 struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
459 unsigned char ch; 454 unsigned char ch;
460 455
461 DRM_DEBUG_KMS("%s: Trying set the dpms of the DVO to %i\n", 456 DRM_DEBUG_KMS("Trying set the dpms of the DVO to %i\n", enable);
462 __FUNCTION__, enable);
463 457
464 ch = ns->reg_8_shadow; 458 ch = ns->reg_8_shadow;
465 459
diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c
index 7b3e9e936200..fa0114967076 100644
--- a/drivers/gpu/drm/i915/dvo_sil164.c
+++ b/drivers/gpu/drm/i915/dvo_sil164.c
@@ -93,7 +93,7 @@ static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
93 if (i2c_transfer(adapter, msgs, 2) == 2) { 93 if (i2c_transfer(adapter, msgs, 2) == 2) {
94 *ch = in_buf[0]; 94 *ch = in_buf[0];
95 return true; 95 return true;
96 }; 96 }
97 97
98 if (!sil->quiet) { 98 if (!sil->quiet) {
99 DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n", 99 DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c
index 12ea4b164692..7853719a0e81 100644
--- a/drivers/gpu/drm/i915/dvo_tfp410.c
+++ b/drivers/gpu/drm/i915/dvo_tfp410.c
@@ -118,7 +118,7 @@ static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
118 if (i2c_transfer(adapter, msgs, 2) == 2) { 118 if (i2c_transfer(adapter, msgs, 2) == 2) {
119 *ch = in_buf[0]; 119 *ch = in_buf[0];
120 return true; 120 return true;
121 }; 121 }
122 122
123 if (!tfp->quiet) { 123 if (!tfp->quiet) {
124 DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n", 124 DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index 4cf6d020d513..9d7954366bd2 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -28,7 +28,7 @@
28#include "i915_drv.h" 28#include "i915_drv.h"
29 29
30/** 30/**
31 * DOC: i915 batch buffer command parser 31 * DOC: batch buffer command parser
32 * 32 *
33 * Motivation: 33 * Motivation:
34 * Certain OpenGL features (e.g. transform feedback, performance monitoring) 34 * Certain OpenGL features (e.g. transform feedback, performance monitoring)
@@ -86,6 +86,367 @@
86 * general bitmasking mechanism. 86 * general bitmasking mechanism.
87 */ 87 */
88 88
89#define STD_MI_OPCODE_MASK 0xFF800000
90#define STD_3D_OPCODE_MASK 0xFFFF0000
91#define STD_2D_OPCODE_MASK 0xFFC00000
92#define STD_MFX_OPCODE_MASK 0xFFFF0000
93
94#define CMD(op, opm, f, lm, fl, ...) \
95 { \
96 .flags = (fl) | ((f) ? CMD_DESC_FIXED : 0), \
97 .cmd = { (op), (opm) }, \
98 .length = { (lm) }, \
99 __VA_ARGS__ \
100 }
101
102/* Convenience macros to compress the tables */
103#define SMI STD_MI_OPCODE_MASK
104#define S3D STD_3D_OPCODE_MASK
105#define S2D STD_2D_OPCODE_MASK
106#define SMFX STD_MFX_OPCODE_MASK
107#define F true
108#define S CMD_DESC_SKIP
109#define R CMD_DESC_REJECT
110#define W CMD_DESC_REGISTER
111#define B CMD_DESC_BITMASK
112#define M CMD_DESC_MASTER
113
114/* Command Mask Fixed Len Action
115 ---------------------------------------------------------- */
116static const struct drm_i915_cmd_descriptor common_cmds[] = {
117 CMD( MI_NOOP, SMI, F, 1, S ),
118 CMD( MI_USER_INTERRUPT, SMI, F, 1, R ),
119 CMD( MI_WAIT_FOR_EVENT, SMI, F, 1, M ),
120 CMD( MI_ARB_CHECK, SMI, F, 1, S ),
121 CMD( MI_REPORT_HEAD, SMI, F, 1, S ),
122 CMD( MI_SUSPEND_FLUSH, SMI, F, 1, S ),
123 CMD( MI_SEMAPHORE_MBOX, SMI, !F, 0xFF, R ),
124 CMD( MI_STORE_DWORD_INDEX, SMI, !F, 0xFF, R ),
125 CMD( MI_LOAD_REGISTER_IMM(1), SMI, !F, 0xFF, W,
126 .reg = { .offset = 1, .mask = 0x007FFFFC } ),
127 CMD( MI_STORE_REGISTER_MEM(1), SMI, !F, 0xFF, W | B,
128 .reg = { .offset = 1, .mask = 0x007FFFFC },
129 .bits = {{
130 .offset = 0,
131 .mask = MI_GLOBAL_GTT,
132 .expected = 0,
133 }}, ),
134 CMD( MI_LOAD_REGISTER_MEM, SMI, !F, 0xFF, W | B,
135 .reg = { .offset = 1, .mask = 0x007FFFFC },
136 .bits = {{
137 .offset = 0,
138 .mask = MI_GLOBAL_GTT,
139 .expected = 0,
140 }}, ),
141 CMD( MI_BATCH_BUFFER_START, SMI, !F, 0xFF, S ),
142};
143
144static const struct drm_i915_cmd_descriptor render_cmds[] = {
145 CMD( MI_FLUSH, SMI, F, 1, S ),
146 CMD( MI_ARB_ON_OFF, SMI, F, 1, R ),
147 CMD( MI_PREDICATE, SMI, F, 1, S ),
148 CMD( MI_TOPOLOGY_FILTER, SMI, F, 1, S ),
149 CMD( MI_DISPLAY_FLIP, SMI, !F, 0xFF, R ),
150 CMD( MI_SET_CONTEXT, SMI, !F, 0xFF, R ),
151 CMD( MI_URB_CLEAR, SMI, !F, 0xFF, S ),
152 CMD( MI_STORE_DWORD_IMM, SMI, !F, 0x3F, B,
153 .bits = {{
154 .offset = 0,
155 .mask = MI_GLOBAL_GTT,
156 .expected = 0,
157 }}, ),
158 CMD( MI_UPDATE_GTT, SMI, !F, 0xFF, R ),
159 CMD( MI_CLFLUSH, SMI, !F, 0x3FF, B,
160 .bits = {{
161 .offset = 0,
162 .mask = MI_GLOBAL_GTT,
163 .expected = 0,
164 }}, ),
165 CMD( MI_REPORT_PERF_COUNT, SMI, !F, 0x3F, B,
166 .bits = {{
167 .offset = 1,
168 .mask = MI_REPORT_PERF_COUNT_GGTT,
169 .expected = 0,
170 }}, ),
171 CMD( MI_CONDITIONAL_BATCH_BUFFER_END, SMI, !F, 0xFF, B,
172 .bits = {{
173 .offset = 0,
174 .mask = MI_GLOBAL_GTT,
175 .expected = 0,
176 }}, ),
177 CMD( GFX_OP_3DSTATE_VF_STATISTICS, S3D, F, 1, S ),
178 CMD( PIPELINE_SELECT, S3D, F, 1, S ),
179 CMD( MEDIA_VFE_STATE, S3D, !F, 0xFFFF, B,
180 .bits = {{
181 .offset = 2,
182 .mask = MEDIA_VFE_STATE_MMIO_ACCESS_MASK,
183 .expected = 0,
184 }}, ),
185 CMD( GPGPU_OBJECT, S3D, !F, 0xFF, S ),
186 CMD( GPGPU_WALKER, S3D, !F, 0xFF, S ),
187 CMD( GFX_OP_3DSTATE_SO_DECL_LIST, S3D, !F, 0x1FF, S ),
188 CMD( GFX_OP_PIPE_CONTROL(5), S3D, !F, 0xFF, B,
189 .bits = {{
190 .offset = 1,
191 .mask = (PIPE_CONTROL_MMIO_WRITE | PIPE_CONTROL_NOTIFY),
192 .expected = 0,
193 },
194 {
195 .offset = 1,
196 .mask = (PIPE_CONTROL_GLOBAL_GTT_IVB |
197 PIPE_CONTROL_STORE_DATA_INDEX),
198 .expected = 0,
199 .condition_offset = 1,
200 .condition_mask = PIPE_CONTROL_POST_SYNC_OP_MASK,
201 }}, ),
202};
203
204static const struct drm_i915_cmd_descriptor hsw_render_cmds[] = {
205 CMD( MI_SET_PREDICATE, SMI, F, 1, S ),
206 CMD( MI_RS_CONTROL, SMI, F, 1, S ),
207 CMD( MI_URB_ATOMIC_ALLOC, SMI, F, 1, S ),
208 CMD( MI_RS_CONTEXT, SMI, F, 1, S ),
209 CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, M ),
210 CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, R ),
211 CMD( MI_LOAD_REGISTER_REG, SMI, !F, 0xFF, R ),
212 CMD( MI_RS_STORE_DATA_IMM, SMI, !F, 0xFF, S ),
213 CMD( MI_LOAD_URB_MEM, SMI, !F, 0xFF, S ),
214 CMD( MI_STORE_URB_MEM, SMI, !F, 0xFF, S ),
215 CMD( GFX_OP_3DSTATE_DX9_CONSTANTF_VS, S3D, !F, 0x7FF, S ),
216 CMD( GFX_OP_3DSTATE_DX9_CONSTANTF_PS, S3D, !F, 0x7FF, S ),
217
218 CMD( GFX_OP_3DSTATE_BINDING_TABLE_EDIT_VS, S3D, !F, 0x1FF, S ),
219 CMD( GFX_OP_3DSTATE_BINDING_TABLE_EDIT_GS, S3D, !F, 0x1FF, S ),
220 CMD( GFX_OP_3DSTATE_BINDING_TABLE_EDIT_HS, S3D, !F, 0x1FF, S ),
221 CMD( GFX_OP_3DSTATE_BINDING_TABLE_EDIT_DS, S3D, !F, 0x1FF, S ),
222 CMD( GFX_OP_3DSTATE_BINDING_TABLE_EDIT_PS, S3D, !F, 0x1FF, S ),
223};
224
225static const struct drm_i915_cmd_descriptor video_cmds[] = {
226 CMD( MI_ARB_ON_OFF, SMI, F, 1, R ),
227 CMD( MI_STORE_DWORD_IMM, SMI, !F, 0xFF, B,
228 .bits = {{
229 .offset = 0,
230 .mask = MI_GLOBAL_GTT,
231 .expected = 0,
232 }}, ),
233 CMD( MI_UPDATE_GTT, SMI, !F, 0x3F, R ),
234 CMD( MI_FLUSH_DW, SMI, !F, 0x3F, B,
235 .bits = {{
236 .offset = 0,
237 .mask = MI_FLUSH_DW_NOTIFY,
238 .expected = 0,
239 },
240 {
241 .offset = 1,
242 .mask = MI_FLUSH_DW_USE_GTT,
243 .expected = 0,
244 .condition_offset = 0,
245 .condition_mask = MI_FLUSH_DW_OP_MASK,
246 },
247 {
248 .offset = 0,
249 .mask = MI_FLUSH_DW_STORE_INDEX,
250 .expected = 0,
251 .condition_offset = 0,
252 .condition_mask = MI_FLUSH_DW_OP_MASK,
253 }}, ),
254 CMD( MI_CONDITIONAL_BATCH_BUFFER_END, SMI, !F, 0xFF, B,
255 .bits = {{
256 .offset = 0,
257 .mask = MI_GLOBAL_GTT,
258 .expected = 0,
259 }}, ),
260 /*
261 * MFX_WAIT doesn't fit the way we handle length for most commands.
262 * It has a length field but it uses a non-standard length bias.
263 * It is always 1 dword though, so just treat it as fixed length.
264 */
265 CMD( MFX_WAIT, SMFX, F, 1, S ),
266};
267
268static const struct drm_i915_cmd_descriptor vecs_cmds[] = {
269 CMD( MI_ARB_ON_OFF, SMI, F, 1, R ),
270 CMD( MI_STORE_DWORD_IMM, SMI, !F, 0xFF, B,
271 .bits = {{
272 .offset = 0,
273 .mask = MI_GLOBAL_GTT,
274 .expected = 0,
275 }}, ),
276 CMD( MI_UPDATE_GTT, SMI, !F, 0x3F, R ),
277 CMD( MI_FLUSH_DW, SMI, !F, 0x3F, B,
278 .bits = {{
279 .offset = 0,
280 .mask = MI_FLUSH_DW_NOTIFY,
281 .expected = 0,
282 },
283 {
284 .offset = 1,
285 .mask = MI_FLUSH_DW_USE_GTT,
286 .expected = 0,
287 .condition_offset = 0,
288 .condition_mask = MI_FLUSH_DW_OP_MASK,
289 },
290 {
291 .offset = 0,
292 .mask = MI_FLUSH_DW_STORE_INDEX,
293 .expected = 0,
294 .condition_offset = 0,
295 .condition_mask = MI_FLUSH_DW_OP_MASK,
296 }}, ),
297 CMD( MI_CONDITIONAL_BATCH_BUFFER_END, SMI, !F, 0xFF, B,
298 .bits = {{
299 .offset = 0,
300 .mask = MI_GLOBAL_GTT,
301 .expected = 0,
302 }}, ),
303};
304
305static const struct drm_i915_cmd_descriptor blt_cmds[] = {
306 CMD( MI_DISPLAY_FLIP, SMI, !F, 0xFF, R ),
307 CMD( MI_STORE_DWORD_IMM, SMI, !F, 0x3FF, B,
308 .bits = {{
309 .offset = 0,
310 .mask = MI_GLOBAL_GTT,
311 .expected = 0,
312 }}, ),
313 CMD( MI_UPDATE_GTT, SMI, !F, 0x3F, R ),
314 CMD( MI_FLUSH_DW, SMI, !F, 0x3F, B,
315 .bits = {{
316 .offset = 0,
317 .mask = MI_FLUSH_DW_NOTIFY,
318 .expected = 0,
319 },
320 {
321 .offset = 1,
322 .mask = MI_FLUSH_DW_USE_GTT,
323 .expected = 0,
324 .condition_offset = 0,
325 .condition_mask = MI_FLUSH_DW_OP_MASK,
326 },
327 {
328 .offset = 0,
329 .mask = MI_FLUSH_DW_STORE_INDEX,
330 .expected = 0,
331 .condition_offset = 0,
332 .condition_mask = MI_FLUSH_DW_OP_MASK,
333 }}, ),
334 CMD( COLOR_BLT, S2D, !F, 0x3F, S ),
335 CMD( SRC_COPY_BLT, S2D, !F, 0x3F, S ),
336};
337
338static const struct drm_i915_cmd_descriptor hsw_blt_cmds[] = {
339 CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, M ),
340 CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, R ),
341};
342
343#undef CMD
344#undef SMI
345#undef S3D
346#undef S2D
347#undef SMFX
348#undef F
349#undef S
350#undef R
351#undef W
352#undef B
353#undef M
354
355static const struct drm_i915_cmd_table gen7_render_cmds[] = {
356 { common_cmds, ARRAY_SIZE(common_cmds) },
357 { render_cmds, ARRAY_SIZE(render_cmds) },
358};
359
360static const struct drm_i915_cmd_table hsw_render_ring_cmds[] = {
361 { common_cmds, ARRAY_SIZE(common_cmds) },
362 { render_cmds, ARRAY_SIZE(render_cmds) },
363 { hsw_render_cmds, ARRAY_SIZE(hsw_render_cmds) },
364};
365
366static const struct drm_i915_cmd_table gen7_video_cmds[] = {
367 { common_cmds, ARRAY_SIZE(common_cmds) },
368 { video_cmds, ARRAY_SIZE(video_cmds) },
369};
370
371static const struct drm_i915_cmd_table hsw_vebox_cmds[] = {
372 { common_cmds, ARRAY_SIZE(common_cmds) },
373 { vecs_cmds, ARRAY_SIZE(vecs_cmds) },
374};
375
376static const struct drm_i915_cmd_table gen7_blt_cmds[] = {
377 { common_cmds, ARRAY_SIZE(common_cmds) },
378 { blt_cmds, ARRAY_SIZE(blt_cmds) },
379};
380
381static const struct drm_i915_cmd_table hsw_blt_ring_cmds[] = {
382 { common_cmds, ARRAY_SIZE(common_cmds) },
383 { blt_cmds, ARRAY_SIZE(blt_cmds) },
384 { hsw_blt_cmds, ARRAY_SIZE(hsw_blt_cmds) },
385};
386
387/*
388 * Register whitelists, sorted by increasing register offset.
389 *
390 * Some registers that userspace accesses are 64 bits. The register
391 * access commands only allow 32-bit accesses. Hence, we have to include
392 * entries for both halves of the 64-bit registers.
393 */
394
395/* Convenience macro for adding 64-bit registers */
396#define REG64(addr) (addr), (addr + sizeof(u32))
397
398static const u32 gen7_render_regs[] = {
399 REG64(HS_INVOCATION_COUNT),
400 REG64(DS_INVOCATION_COUNT),
401 REG64(IA_VERTICES_COUNT),
402 REG64(IA_PRIMITIVES_COUNT),
403 REG64(VS_INVOCATION_COUNT),
404 REG64(GS_INVOCATION_COUNT),
405 REG64(GS_PRIMITIVES_COUNT),
406 REG64(CL_INVOCATION_COUNT),
407 REG64(CL_PRIMITIVES_COUNT),
408 REG64(PS_INVOCATION_COUNT),
409 REG64(PS_DEPTH_COUNT),
410 OACONTROL, /* Only allowed for LRI and SRM. See below. */
411 GEN7_3DPRIM_END_OFFSET,
412 GEN7_3DPRIM_START_VERTEX,
413 GEN7_3DPRIM_VERTEX_COUNT,
414 GEN7_3DPRIM_INSTANCE_COUNT,
415 GEN7_3DPRIM_START_INSTANCE,
416 GEN7_3DPRIM_BASE_VERTEX,
417 REG64(GEN7_SO_NUM_PRIMS_WRITTEN(0)),
418 REG64(GEN7_SO_NUM_PRIMS_WRITTEN(1)),
419 REG64(GEN7_SO_NUM_PRIMS_WRITTEN(2)),
420 REG64(GEN7_SO_NUM_PRIMS_WRITTEN(3)),
421 REG64(GEN7_SO_PRIM_STORAGE_NEEDED(0)),
422 REG64(GEN7_SO_PRIM_STORAGE_NEEDED(1)),
423 REG64(GEN7_SO_PRIM_STORAGE_NEEDED(2)),
424 REG64(GEN7_SO_PRIM_STORAGE_NEEDED(3)),
425 GEN7_SO_WRITE_OFFSET(0),
426 GEN7_SO_WRITE_OFFSET(1),
427 GEN7_SO_WRITE_OFFSET(2),
428 GEN7_SO_WRITE_OFFSET(3),
429};
430
431static const u32 gen7_blt_regs[] = {
432 BCS_SWCTRL,
433};
434
435static const u32 ivb_master_regs[] = {
436 FORCEWAKE_MT,
437 DERRMR,
438 GEN7_PIPE_DE_LOAD_SL(PIPE_A),
439 GEN7_PIPE_DE_LOAD_SL(PIPE_B),
440 GEN7_PIPE_DE_LOAD_SL(PIPE_C),
441};
442
443static const u32 hsw_master_regs[] = {
444 FORCEWAKE_MT,
445 DERRMR,
446};
447
448#undef REG64
449
89static u32 gen7_render_get_cmd_length_mask(u32 cmd_header) 450static u32 gen7_render_get_cmd_length_mask(u32 cmd_header)
90{ 451{
91 u32 client = (cmd_header & INSTR_CLIENT_MASK) >> INSTR_CLIENT_SHIFT; 452 u32 client = (cmd_header & INSTR_CLIENT_MASK) >> INSTR_CLIENT_SHIFT;
@@ -137,15 +498,18 @@ static u32 gen7_blt_get_cmd_length_mask(u32 cmd_header)
137 return 0; 498 return 0;
138} 499}
139 500
140static void validate_cmds_sorted(struct intel_ring_buffer *ring) 501static bool validate_cmds_sorted(struct intel_engine_cs *ring,
502 const struct drm_i915_cmd_table *cmd_tables,
503 int cmd_table_count)
141{ 504{
142 int i; 505 int i;
506 bool ret = true;
143 507
144 if (!ring->cmd_tables || ring->cmd_table_count == 0) 508 if (!cmd_tables || cmd_table_count == 0)
145 return; 509 return true;
146 510
147 for (i = 0; i < ring->cmd_table_count; i++) { 511 for (i = 0; i < cmd_table_count; i++) {
148 const struct drm_i915_cmd_table *table = &ring->cmd_tables[i]; 512 const struct drm_i915_cmd_table *table = &cmd_tables[i];
149 u32 previous = 0; 513 u32 previous = 0;
150 int j; 514 int j;
151 515
@@ -154,35 +518,107 @@ static void validate_cmds_sorted(struct intel_ring_buffer *ring)
154 &table->table[i]; 518 &table->table[i];
155 u32 curr = desc->cmd.value & desc->cmd.mask; 519 u32 curr = desc->cmd.value & desc->cmd.mask;
156 520
157 if (curr < previous) 521 if (curr < previous) {
158 DRM_ERROR("CMD: table not sorted ring=%d table=%d entry=%d cmd=0x%08X prev=0x%08X\n", 522 DRM_ERROR("CMD: table not sorted ring=%d table=%d entry=%d cmd=0x%08X prev=0x%08X\n",
159 ring->id, i, j, curr, previous); 523 ring->id, i, j, curr, previous);
524 ret = false;
525 }
160 526
161 previous = curr; 527 previous = curr;
162 } 528 }
163 } 529 }
530
531 return ret;
164} 532}
165 533
166static void check_sorted(int ring_id, const u32 *reg_table, int reg_count) 534static bool check_sorted(int ring_id, const u32 *reg_table, int reg_count)
167{ 535{
168 int i; 536 int i;
169 u32 previous = 0; 537 u32 previous = 0;
538 bool ret = true;
170 539
171 for (i = 0; i < reg_count; i++) { 540 for (i = 0; i < reg_count; i++) {
172 u32 curr = reg_table[i]; 541 u32 curr = reg_table[i];
173 542
174 if (curr < previous) 543 if (curr < previous) {
175 DRM_ERROR("CMD: table not sorted ring=%d entry=%d reg=0x%08X prev=0x%08X\n", 544 DRM_ERROR("CMD: table not sorted ring=%d entry=%d reg=0x%08X prev=0x%08X\n",
176 ring_id, i, curr, previous); 545 ring_id, i, curr, previous);
546 ret = false;
547 }
177 548
178 previous = curr; 549 previous = curr;
179 } 550 }
551
552 return ret;
553}
554
555static bool validate_regs_sorted(struct intel_engine_cs *ring)
556{
557 return check_sorted(ring->id, ring->reg_table, ring->reg_count) &&
558 check_sorted(ring->id, ring->master_reg_table,
559 ring->master_reg_count);
560}
561
562struct cmd_node {
563 const struct drm_i915_cmd_descriptor *desc;
564 struct hlist_node node;
565};
566
567/*
568 * Different command ranges have different numbers of bits for the opcode. For
569 * example, MI commands use bits 31:23 while 3D commands use bits 31:16. The
570 * problem is that, for example, MI commands use bits 22:16 for other fields
571 * such as GGTT vs PPGTT bits. If we include those bits in the mask then when
572 * we mask a command from a batch it could hash to the wrong bucket due to
573 * non-opcode bits being set. But if we don't include those bits, some 3D
574 * commands may hash to the same bucket due to not including opcode bits that
575 * make the command unique. For now, we will risk hashing to the same bucket.
576 *
577 * If we attempt to generate a perfect hash, we should be able to look at bits
578 * 31:29 of a command from a batch buffer and use the full mask for that
579 * client. The existing INSTR_CLIENT_MASK/SHIFT defines can be used for this.
580 */
581#define CMD_HASH_MASK STD_MI_OPCODE_MASK
582
583static int init_hash_table(struct intel_engine_cs *ring,
584 const struct drm_i915_cmd_table *cmd_tables,
585 int cmd_table_count)
586{
587 int i, j;
588
589 hash_init(ring->cmd_hash);
590
591 for (i = 0; i < cmd_table_count; i++) {
592 const struct drm_i915_cmd_table *table = &cmd_tables[i];
593
594 for (j = 0; j < table->count; j++) {
595 const struct drm_i915_cmd_descriptor *desc =
596 &table->table[j];
597 struct cmd_node *desc_node =
598 kmalloc(sizeof(*desc_node), GFP_KERNEL);
599
600 if (!desc_node)
601 return -ENOMEM;
602
603 desc_node->desc = desc;
604 hash_add(ring->cmd_hash, &desc_node->node,
605 desc->cmd.value & CMD_HASH_MASK);
606 }
607 }
608
609 return 0;
180} 610}
181 611
182static void validate_regs_sorted(struct intel_ring_buffer *ring) 612static void fini_hash_table(struct intel_engine_cs *ring)
183{ 613{
184 check_sorted(ring->id, ring->reg_table, ring->reg_count); 614 struct hlist_node *tmp;
185 check_sorted(ring->id, ring->master_reg_table, ring->master_reg_count); 615 struct cmd_node *desc_node;
616 int i;
617
618 hash_for_each_safe(ring->cmd_hash, i, tmp, desc_node, node) {
619 hash_del(&desc_node->node);
620 kfree(desc_node);
621 }
186} 622}
187 623
188/** 624/**
@@ -190,25 +626,74 @@ static void validate_regs_sorted(struct intel_ring_buffer *ring)
190 * @ring: the ringbuffer to initialize 626 * @ring: the ringbuffer to initialize
191 * 627 *
192 * Optionally initializes fields related to batch buffer command parsing in the 628 * Optionally initializes fields related to batch buffer command parsing in the
193 * struct intel_ring_buffer based on whether the platform requires software 629 * struct intel_engine_cs based on whether the platform requires software
194 * command parsing. 630 * command parsing.
631 *
632 * Return: non-zero if initialization fails
195 */ 633 */
196void i915_cmd_parser_init_ring(struct intel_ring_buffer *ring) 634int i915_cmd_parser_init_ring(struct intel_engine_cs *ring)
197{ 635{
636 const struct drm_i915_cmd_table *cmd_tables;
637 int cmd_table_count;
638 int ret;
639
198 if (!IS_GEN7(ring->dev)) 640 if (!IS_GEN7(ring->dev))
199 return; 641 return 0;
200 642
201 switch (ring->id) { 643 switch (ring->id) {
202 case RCS: 644 case RCS:
645 if (IS_HASWELL(ring->dev)) {
646 cmd_tables = hsw_render_ring_cmds;
647 cmd_table_count =
648 ARRAY_SIZE(hsw_render_ring_cmds);
649 } else {
650 cmd_tables = gen7_render_cmds;
651 cmd_table_count = ARRAY_SIZE(gen7_render_cmds);
652 }
653
654 ring->reg_table = gen7_render_regs;
655 ring->reg_count = ARRAY_SIZE(gen7_render_regs);
656
657 if (IS_HASWELL(ring->dev)) {
658 ring->master_reg_table = hsw_master_regs;
659 ring->master_reg_count = ARRAY_SIZE(hsw_master_regs);
660 } else {
661 ring->master_reg_table = ivb_master_regs;
662 ring->master_reg_count = ARRAY_SIZE(ivb_master_regs);
663 }
664
203 ring->get_cmd_length_mask = gen7_render_get_cmd_length_mask; 665 ring->get_cmd_length_mask = gen7_render_get_cmd_length_mask;
204 break; 666 break;
205 case VCS: 667 case VCS:
668 cmd_tables = gen7_video_cmds;
669 cmd_table_count = ARRAY_SIZE(gen7_video_cmds);
206 ring->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask; 670 ring->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
207 break; 671 break;
208 case BCS: 672 case BCS:
673 if (IS_HASWELL(ring->dev)) {
674 cmd_tables = hsw_blt_ring_cmds;
675 cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmds);
676 } else {
677 cmd_tables = gen7_blt_cmds;
678 cmd_table_count = ARRAY_SIZE(gen7_blt_cmds);
679 }
680
681 ring->reg_table = gen7_blt_regs;
682 ring->reg_count = ARRAY_SIZE(gen7_blt_regs);
683
684 if (IS_HASWELL(ring->dev)) {
685 ring->master_reg_table = hsw_master_regs;
686 ring->master_reg_count = ARRAY_SIZE(hsw_master_regs);
687 } else {
688 ring->master_reg_table = ivb_master_regs;
689 ring->master_reg_count = ARRAY_SIZE(ivb_master_regs);
690 }
691
209 ring->get_cmd_length_mask = gen7_blt_get_cmd_length_mask; 692 ring->get_cmd_length_mask = gen7_blt_get_cmd_length_mask;
210 break; 693 break;
211 case VECS: 694 case VECS:
695 cmd_tables = hsw_vebox_cmds;
696 cmd_table_count = ARRAY_SIZE(hsw_vebox_cmds);
212 /* VECS can use the same length_mask function as VCS */ 697 /* VECS can use the same length_mask function as VCS */
213 ring->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask; 698 ring->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
214 break; 699 break;
@@ -218,18 +703,45 @@ void i915_cmd_parser_init_ring(struct intel_ring_buffer *ring)
218 BUG(); 703 BUG();
219 } 704 }
220 705
221 validate_cmds_sorted(ring); 706 BUG_ON(!validate_cmds_sorted(ring, cmd_tables, cmd_table_count));
222 validate_regs_sorted(ring); 707 BUG_ON(!validate_regs_sorted(ring));
708
709 ret = init_hash_table(ring, cmd_tables, cmd_table_count);
710 if (ret) {
711 DRM_ERROR("CMD: cmd_parser_init failed!\n");
712 fini_hash_table(ring);
713 return ret;
714 }
715
716 ring->needs_cmd_parser = true;
717
718 return 0;
719}
720
721/**
722 * i915_cmd_parser_fini_ring() - clean up cmd parser related fields
723 * @ring: the ringbuffer to clean up
724 *
725 * Releases any resources related to command parsing that may have been
726 * initialized for the specified ring.
727 */
728void i915_cmd_parser_fini_ring(struct intel_engine_cs *ring)
729{
730 if (!ring->needs_cmd_parser)
731 return;
732
733 fini_hash_table(ring);
223} 734}
224 735
225static const struct drm_i915_cmd_descriptor* 736static const struct drm_i915_cmd_descriptor*
226find_cmd_in_table(const struct drm_i915_cmd_table *table, 737find_cmd_in_table(struct intel_engine_cs *ring,
227 u32 cmd_header) 738 u32 cmd_header)
228{ 739{
229 int i; 740 struct cmd_node *desc_node;
230 741
231 for (i = 0; i < table->count; i++) { 742 hash_for_each_possible(ring->cmd_hash, desc_node, node,
232 const struct drm_i915_cmd_descriptor *desc = &table->table[i]; 743 cmd_header & CMD_HASH_MASK) {
744 const struct drm_i915_cmd_descriptor *desc = desc_node->desc;
233 u32 masked_cmd = desc->cmd.mask & cmd_header; 745 u32 masked_cmd = desc->cmd.mask & cmd_header;
234 u32 masked_value = desc->cmd.value & desc->cmd.mask; 746 u32 masked_value = desc->cmd.value & desc->cmd.mask;
235 747
@@ -249,20 +761,16 @@ find_cmd_in_table(const struct drm_i915_cmd_table *table,
249 * ring's default length encoding and returns default_desc. 761 * ring's default length encoding and returns default_desc.
250 */ 762 */
251static const struct drm_i915_cmd_descriptor* 763static const struct drm_i915_cmd_descriptor*
252find_cmd(struct intel_ring_buffer *ring, 764find_cmd(struct intel_engine_cs *ring,
253 u32 cmd_header, 765 u32 cmd_header,
254 struct drm_i915_cmd_descriptor *default_desc) 766 struct drm_i915_cmd_descriptor *default_desc)
255{ 767{
768 const struct drm_i915_cmd_descriptor *desc;
256 u32 mask; 769 u32 mask;
257 int i;
258 770
259 for (i = 0; i < ring->cmd_table_count; i++) { 771 desc = find_cmd_in_table(ring, cmd_header);
260 const struct drm_i915_cmd_descriptor *desc; 772 if (desc)
261 773 return desc;
262 desc = find_cmd_in_table(&ring->cmd_tables[i], cmd_header);
263 if (desc)
264 return desc;
265 }
266 774
267 mask = ring->get_cmd_length_mask(cmd_header); 775 mask = ring->get_cmd_length_mask(cmd_header);
268 if (!mask) 776 if (!mask)
@@ -329,15 +837,112 @@ finish:
329 * 837 *
330 * Return: true if the ring requires software command parsing 838 * Return: true if the ring requires software command parsing
331 */ 839 */
332bool i915_needs_cmd_parser(struct intel_ring_buffer *ring) 840bool i915_needs_cmd_parser(struct intel_engine_cs *ring)
333{ 841{
334 /* No command tables indicates a platform without parsing */ 842 struct drm_i915_private *dev_priv = ring->dev->dev_private;
335 if (!ring->cmd_tables) 843
844 if (!ring->needs_cmd_parser)
845 return false;
846
847 /*
848 * XXX: VLV is Gen7 and therefore has cmd_tables, but has PPGTT
849 * disabled. That will cause all of the parser's PPGTT checks to
850 * fail. For now, disable parsing when PPGTT is off.
851 */
852 if (!dev_priv->mm.aliasing_ppgtt)
336 return false; 853 return false;
337 854
338 return (i915.enable_cmd_parser == 1); 855 return (i915.enable_cmd_parser == 1);
339} 856}
340 857
858static bool check_cmd(const struct intel_engine_cs *ring,
859 const struct drm_i915_cmd_descriptor *desc,
860 const u32 *cmd,
861 const bool is_master,
862 bool *oacontrol_set)
863{
864 if (desc->flags & CMD_DESC_REJECT) {
865 DRM_DEBUG_DRIVER("CMD: Rejected command: 0x%08X\n", *cmd);
866 return false;
867 }
868
869 if ((desc->flags & CMD_DESC_MASTER) && !is_master) {
870 DRM_DEBUG_DRIVER("CMD: Rejected master-only command: 0x%08X\n",
871 *cmd);
872 return false;
873 }
874
875 if (desc->flags & CMD_DESC_REGISTER) {
876 u32 reg_addr = cmd[desc->reg.offset] & desc->reg.mask;
877
878 /*
879 * OACONTROL requires some special handling for writes. We
880 * want to make sure that any batch which enables OA also
881 * disables it before the end of the batch. The goal is to
882 * prevent one process from snooping on the perf data from
883 * another process. To do that, we need to check the value
884 * that will be written to the register. Hence, limit
885 * OACONTROL writes to only MI_LOAD_REGISTER_IMM commands.
886 */
887 if (reg_addr == OACONTROL) {
888 if (desc->cmd.value == MI_LOAD_REGISTER_MEM)
889 return false;
890
891 if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1))
892 *oacontrol_set = (cmd[2] != 0);
893 }
894
895 if (!valid_reg(ring->reg_table,
896 ring->reg_count, reg_addr)) {
897 if (!is_master ||
898 !valid_reg(ring->master_reg_table,
899 ring->master_reg_count,
900 reg_addr)) {
901 DRM_DEBUG_DRIVER("CMD: Rejected register 0x%08X in command: 0x%08X (ring=%d)\n",
902 reg_addr,
903 *cmd,
904 ring->id);
905 return false;
906 }
907 }
908 }
909
910 if (desc->flags & CMD_DESC_BITMASK) {
911 int i;
912
913 for (i = 0; i < MAX_CMD_DESC_BITMASKS; i++) {
914 u32 dword;
915
916 if (desc->bits[i].mask == 0)
917 break;
918
919 if (desc->bits[i].condition_mask != 0) {
920 u32 offset =
921 desc->bits[i].condition_offset;
922 u32 condition = cmd[offset] &
923 desc->bits[i].condition_mask;
924
925 if (condition == 0)
926 continue;
927 }
928
929 dword = cmd[desc->bits[i].offset] &
930 desc->bits[i].mask;
931
932 if (dword != desc->bits[i].expected) {
933 DRM_DEBUG_DRIVER("CMD: Rejected command 0x%08X for bitmask 0x%08X (exp=0x%08X act=0x%08X) (ring=%d)\n",
934 *cmd,
935 desc->bits[i].mask,
936 desc->bits[i].expected,
937 dword, ring->id);
938 return false;
939 }
940 }
941 }
942
943 return true;
944}
945
341#define LENGTH_BIAS 2 946#define LENGTH_BIAS 2
342 947
343/** 948/**
@@ -352,7 +957,7 @@ bool i915_needs_cmd_parser(struct intel_ring_buffer *ring)
352 * 957 *
353 * Return: non-zero if the parser finds violations or otherwise fails 958 * Return: non-zero if the parser finds violations or otherwise fails
354 */ 959 */
355int i915_parse_cmds(struct intel_ring_buffer *ring, 960int i915_parse_cmds(struct intel_engine_cs *ring,
356 struct drm_i915_gem_object *batch_obj, 961 struct drm_i915_gem_object *batch_obj,
357 u32 batch_start_offset, 962 u32 batch_start_offset,
358 bool is_master) 963 bool is_master)
@@ -361,6 +966,7 @@ int i915_parse_cmds(struct intel_ring_buffer *ring,
361 u32 *cmd, *batch_base, *batch_end; 966 u32 *cmd, *batch_base, *batch_end;
362 struct drm_i915_cmd_descriptor default_desc = { 0 }; 967 struct drm_i915_cmd_descriptor default_desc = { 0 };
363 int needs_clflush = 0; 968 int needs_clflush = 0;
969 bool oacontrol_set = false; /* OACONTROL tracking. See check_cmd() */
364 970
365 ret = i915_gem_obj_prepare_shmem_read(batch_obj, &needs_clflush); 971 ret = i915_gem_obj_prepare_shmem_read(batch_obj, &needs_clflush);
366 if (ret) { 972 if (ret) {
@@ -402,76 +1008,27 @@ int i915_parse_cmds(struct intel_ring_buffer *ring,
402 length = ((*cmd & desc->length.mask) + LENGTH_BIAS); 1008 length = ((*cmd & desc->length.mask) + LENGTH_BIAS);
403 1009
404 if ((batch_end - cmd) < length) { 1010 if ((batch_end - cmd) < length) {
405 DRM_DEBUG_DRIVER("CMD: Command length exceeds batch length: 0x%08X length=%d batchlen=%td\n", 1011 DRM_DEBUG_DRIVER("CMD: Command length exceeds batch length: 0x%08X length=%u batchlen=%td\n",
406 *cmd, 1012 *cmd,
407 length, 1013 length,
408 (unsigned long)(batch_end - cmd)); 1014 batch_end - cmd);
409 ret = -EINVAL; 1015 ret = -EINVAL;
410 break; 1016 break;
411 } 1017 }
412 1018
413 if (desc->flags & CMD_DESC_REJECT) { 1019 if (!check_cmd(ring, desc, cmd, is_master, &oacontrol_set)) {
414 DRM_DEBUG_DRIVER("CMD: Rejected command: 0x%08X\n", *cmd);
415 ret = -EINVAL; 1020 ret = -EINVAL;
416 break; 1021 break;
417 } 1022 }
418 1023
419 if ((desc->flags & CMD_DESC_MASTER) && !is_master) {
420 DRM_DEBUG_DRIVER("CMD: Rejected master-only command: 0x%08X\n",
421 *cmd);
422 ret = -EINVAL;
423 break;
424 }
425
426 if (desc->flags & CMD_DESC_REGISTER) {
427 u32 reg_addr = cmd[desc->reg.offset] & desc->reg.mask;
428
429 if (!valid_reg(ring->reg_table,
430 ring->reg_count, reg_addr)) {
431 if (!is_master ||
432 !valid_reg(ring->master_reg_table,
433 ring->master_reg_count,
434 reg_addr)) {
435 DRM_DEBUG_DRIVER("CMD: Rejected register 0x%08X in command: 0x%08X (ring=%d)\n",
436 reg_addr,
437 *cmd,
438 ring->id);
439 ret = -EINVAL;
440 break;
441 }
442 }
443 }
444
445 if (desc->flags & CMD_DESC_BITMASK) {
446 int i;
447
448 for (i = 0; i < MAX_CMD_DESC_BITMASKS; i++) {
449 u32 dword;
450
451 if (desc->bits[i].mask == 0)
452 break;
453
454 dword = cmd[desc->bits[i].offset] &
455 desc->bits[i].mask;
456
457 if (dword != desc->bits[i].expected) {
458 DRM_DEBUG_DRIVER("CMD: Rejected command 0x%08X for bitmask 0x%08X (exp=0x%08X act=0x%08X) (ring=%d)\n",
459 *cmd,
460 desc->bits[i].mask,
461 desc->bits[i].expected,
462 dword, ring->id);
463 ret = -EINVAL;
464 break;
465 }
466 }
467
468 if (ret)
469 break;
470 }
471
472 cmd += length; 1024 cmd += length;
473 } 1025 }
474 1026
1027 if (oacontrol_set) {
1028 DRM_DEBUG_DRIVER("CMD: batch set OACONTROL but did not clear it\n");
1029 ret = -EINVAL;
1030 }
1031
475 if (cmd >= batch_end) { 1032 if (cmd >= batch_end) {
476 DRM_DEBUG_DRIVER("CMD: Got to the end of the buffer w/o a BBE cmd!\n"); 1033 DRM_DEBUG_DRIVER("CMD: Got to the end of the buffer w/o a BBE cmd!\n");
477 ret = -EINVAL; 1034 ret = -EINVAL;
@@ -483,3 +1040,22 @@ int i915_parse_cmds(struct intel_ring_buffer *ring,
483 1040
484 return ret; 1041 return ret;
485} 1042}
1043
1044/**
1045 * i915_cmd_parser_get_version() - get the cmd parser version number
1046 *
1047 * The cmd parser maintains a simple increasing integer version number suitable
1048 * for passing to userspace clients to determine what operations are permitted.
1049 *
1050 * Return: the current version number of the cmd parser
1051 */
1052int i915_cmd_parser_get_version(void)
1053{
1054 /*
1055 * Command parser version history
1056 *
1057 * 1. Initial version. Checks batches and reports violations, but leaves
1058 * hardware parsing enabled (so does not allow new use cases).
1059 */
1060 return 1;
1061}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 195fe5bc0aac..601caa88c092 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -79,7 +79,7 @@ drm_add_fake_info_node(struct drm_minor *minor,
79 79
80static int i915_capabilities(struct seq_file *m, void *data) 80static int i915_capabilities(struct seq_file *m, void *data)
81{ 81{
82 struct drm_info_node *node = (struct drm_info_node *) m->private; 82 struct drm_info_node *node = m->private;
83 struct drm_device *dev = node->minor->dev; 83 struct drm_device *dev = node->minor->dev;
84 const struct intel_device_info *info = INTEL_INFO(dev); 84 const struct intel_device_info *info = INTEL_INFO(dev);
85 85
@@ -172,7 +172,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
172 seq_printf(m, " (%s)", obj->ring->name); 172 seq_printf(m, " (%s)", obj->ring->name);
173} 173}
174 174
175static void describe_ctx(struct seq_file *m, struct i915_hw_context *ctx) 175static void describe_ctx(struct seq_file *m, struct intel_context *ctx)
176{ 176{
177 seq_putc(m, ctx->is_initialized ? 'I' : 'i'); 177 seq_putc(m, ctx->is_initialized ? 'I' : 'i');
178 seq_putc(m, ctx->remap_slice ? 'R' : 'r'); 178 seq_putc(m, ctx->remap_slice ? 'R' : 'r');
@@ -181,7 +181,7 @@ static void describe_ctx(struct seq_file *m, struct i915_hw_context *ctx)
181 181
182static int i915_gem_object_list_info(struct seq_file *m, void *data) 182static int i915_gem_object_list_info(struct seq_file *m, void *data)
183{ 183{
184 struct drm_info_node *node = (struct drm_info_node *) m->private; 184 struct drm_info_node *node = m->private;
185 uintptr_t list = (uintptr_t) node->info_ent->data; 185 uintptr_t list = (uintptr_t) node->info_ent->data;
186 struct list_head *head; 186 struct list_head *head;
187 struct drm_device *dev = node->minor->dev; 187 struct drm_device *dev = node->minor->dev;
@@ -239,7 +239,7 @@ static int obj_rank_by_stolen(void *priv,
239 239
240static int i915_gem_stolen_list_info(struct seq_file *m, void *data) 240static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
241{ 241{
242 struct drm_info_node *node = (struct drm_info_node *) m->private; 242 struct drm_info_node *node = m->private;
243 struct drm_device *dev = node->minor->dev; 243 struct drm_device *dev = node->minor->dev;
244 struct drm_i915_private *dev_priv = dev->dev_private; 244 struct drm_i915_private *dev_priv = dev->dev_private;
245 struct drm_i915_gem_object *obj; 245 struct drm_i915_gem_object *obj;
@@ -371,7 +371,7 @@ static int per_file_stats(int id, void *ptr, void *data)
371 371
372static int i915_gem_object_info(struct seq_file *m, void* data) 372static int i915_gem_object_info(struct seq_file *m, void* data)
373{ 373{
374 struct drm_info_node *node = (struct drm_info_node *) m->private; 374 struct drm_info_node *node = m->private;
375 struct drm_device *dev = node->minor->dev; 375 struct drm_device *dev = node->minor->dev;
376 struct drm_i915_private *dev_priv = dev->dev_private; 376 struct drm_i915_private *dev_priv = dev->dev_private;
377 u32 count, mappable_count, purgeable_count; 377 u32 count, mappable_count, purgeable_count;
@@ -474,7 +474,7 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
474 474
475static int i915_gem_gtt_info(struct seq_file *m, void *data) 475static int i915_gem_gtt_info(struct seq_file *m, void *data)
476{ 476{
477 struct drm_info_node *node = (struct drm_info_node *) m->private; 477 struct drm_info_node *node = m->private;
478 struct drm_device *dev = node->minor->dev; 478 struct drm_device *dev = node->minor->dev;
479 uintptr_t list = (uintptr_t) node->info_ent->data; 479 uintptr_t list = (uintptr_t) node->info_ent->data;
480 struct drm_i915_private *dev_priv = dev->dev_private; 480 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -509,12 +509,12 @@ static int i915_gem_gtt_info(struct seq_file *m, void *data)
509 509
510static int i915_gem_pageflip_info(struct seq_file *m, void *data) 510static int i915_gem_pageflip_info(struct seq_file *m, void *data)
511{ 511{
512 struct drm_info_node *node = (struct drm_info_node *) m->private; 512 struct drm_info_node *node = m->private;
513 struct drm_device *dev = node->minor->dev; 513 struct drm_device *dev = node->minor->dev;
514 unsigned long flags; 514 unsigned long flags;
515 struct intel_crtc *crtc; 515 struct intel_crtc *crtc;
516 516
517 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) { 517 for_each_intel_crtc(dev, crtc) {
518 const char pipe = pipe_name(crtc->pipe); 518 const char pipe = pipe_name(crtc->pipe);
519 const char plane = plane_name(crtc->plane); 519 const char plane = plane_name(crtc->plane);
520 struct intel_unpin_work *work; 520 struct intel_unpin_work *work;
@@ -559,10 +559,10 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
559 559
560static int i915_gem_request_info(struct seq_file *m, void *data) 560static int i915_gem_request_info(struct seq_file *m, void *data)
561{ 561{
562 struct drm_info_node *node = (struct drm_info_node *) m->private; 562 struct drm_info_node *node = m->private;
563 struct drm_device *dev = node->minor->dev; 563 struct drm_device *dev = node->minor->dev;
564 struct drm_i915_private *dev_priv = dev->dev_private; 564 struct drm_i915_private *dev_priv = dev->dev_private;
565 struct intel_ring_buffer *ring; 565 struct intel_engine_cs *ring;
566 struct drm_i915_gem_request *gem_request; 566 struct drm_i915_gem_request *gem_request;
567 int ret, count, i; 567 int ret, count, i;
568 568
@@ -594,7 +594,7 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
594} 594}
595 595
596static void i915_ring_seqno_info(struct seq_file *m, 596static void i915_ring_seqno_info(struct seq_file *m,
597 struct intel_ring_buffer *ring) 597 struct intel_engine_cs *ring)
598{ 598{
599 if (ring->get_seqno) { 599 if (ring->get_seqno) {
600 seq_printf(m, "Current sequence (%s): %u\n", 600 seq_printf(m, "Current sequence (%s): %u\n",
@@ -604,10 +604,10 @@ static void i915_ring_seqno_info(struct seq_file *m,
604 604
605static int i915_gem_seqno_info(struct seq_file *m, void *data) 605static int i915_gem_seqno_info(struct seq_file *m, void *data)
606{ 606{
607 struct drm_info_node *node = (struct drm_info_node *) m->private; 607 struct drm_info_node *node = m->private;
608 struct drm_device *dev = node->minor->dev; 608 struct drm_device *dev = node->minor->dev;
609 struct drm_i915_private *dev_priv = dev->dev_private; 609 struct drm_i915_private *dev_priv = dev->dev_private;
610 struct intel_ring_buffer *ring; 610 struct intel_engine_cs *ring;
611 int ret, i; 611 int ret, i;
612 612
613 ret = mutex_lock_interruptible(&dev->struct_mutex); 613 ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -627,10 +627,10 @@ static int i915_gem_seqno_info(struct seq_file *m, void *data)
627 627
628static int i915_interrupt_info(struct seq_file *m, void *data) 628static int i915_interrupt_info(struct seq_file *m, void *data)
629{ 629{
630 struct drm_info_node *node = (struct drm_info_node *) m->private; 630 struct drm_info_node *node = m->private;
631 struct drm_device *dev = node->minor->dev; 631 struct drm_device *dev = node->minor->dev;
632 struct drm_i915_private *dev_priv = dev->dev_private; 632 struct drm_i915_private *dev_priv = dev->dev_private;
633 struct intel_ring_buffer *ring; 633 struct intel_engine_cs *ring;
634 int ret, i, pipe; 634 int ret, i, pipe;
635 635
636 ret = mutex_lock_interruptible(&dev->struct_mutex); 636 ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -638,7 +638,47 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
638 return ret; 638 return ret;
639 intel_runtime_pm_get(dev_priv); 639 intel_runtime_pm_get(dev_priv);
640 640
641 if (INTEL_INFO(dev)->gen >= 8) { 641 if (IS_CHERRYVIEW(dev)) {
642 int i;
643 seq_printf(m, "Master Interrupt Control:\t%08x\n",
644 I915_READ(GEN8_MASTER_IRQ));
645
646 seq_printf(m, "Display IER:\t%08x\n",
647 I915_READ(VLV_IER));
648 seq_printf(m, "Display IIR:\t%08x\n",
649 I915_READ(VLV_IIR));
650 seq_printf(m, "Display IIR_RW:\t%08x\n",
651 I915_READ(VLV_IIR_RW));
652 seq_printf(m, "Display IMR:\t%08x\n",
653 I915_READ(VLV_IMR));
654 for_each_pipe(pipe)
655 seq_printf(m, "Pipe %c stat:\t%08x\n",
656 pipe_name(pipe),
657 I915_READ(PIPESTAT(pipe)));
658
659 seq_printf(m, "Port hotplug:\t%08x\n",
660 I915_READ(PORT_HOTPLUG_EN));
661 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
662 I915_READ(VLV_DPFLIPSTAT));
663 seq_printf(m, "DPINVGTT:\t%08x\n",
664 I915_READ(DPINVGTT));
665
666 for (i = 0; i < 4; i++) {
667 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
668 i, I915_READ(GEN8_GT_IMR(i)));
669 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
670 i, I915_READ(GEN8_GT_IIR(i)));
671 seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
672 i, I915_READ(GEN8_GT_IER(i)));
673 }
674
675 seq_printf(m, "PCU interrupt mask:\t%08x\n",
676 I915_READ(GEN8_PCU_IMR));
677 seq_printf(m, "PCU interrupt identity:\t%08x\n",
678 I915_READ(GEN8_PCU_IIR));
679 seq_printf(m, "PCU interrupt enable:\t%08x\n",
680 I915_READ(GEN8_PCU_IER));
681 } else if (INTEL_INFO(dev)->gen >= 8) {
642 seq_printf(m, "Master Interrupt Control:\t%08x\n", 682 seq_printf(m, "Master Interrupt Control:\t%08x\n",
643 I915_READ(GEN8_MASTER_IRQ)); 683 I915_READ(GEN8_MASTER_IRQ));
644 684
@@ -768,7 +808,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
768 808
769static int i915_gem_fence_regs_info(struct seq_file *m, void *data) 809static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
770{ 810{
771 struct drm_info_node *node = (struct drm_info_node *) m->private; 811 struct drm_info_node *node = m->private;
772 struct drm_device *dev = node->minor->dev; 812 struct drm_device *dev = node->minor->dev;
773 struct drm_i915_private *dev_priv = dev->dev_private; 813 struct drm_i915_private *dev_priv = dev->dev_private;
774 int i, ret; 814 int i, ret;
@@ -797,10 +837,10 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
797 837
798static int i915_hws_info(struct seq_file *m, void *data) 838static int i915_hws_info(struct seq_file *m, void *data)
799{ 839{
800 struct drm_info_node *node = (struct drm_info_node *) m->private; 840 struct drm_info_node *node = m->private;
801 struct drm_device *dev = node->minor->dev; 841 struct drm_device *dev = node->minor->dev;
802 struct drm_i915_private *dev_priv = dev->dev_private; 842 struct drm_i915_private *dev_priv = dev->dev_private;
803 struct intel_ring_buffer *ring; 843 struct intel_engine_cs *ring;
804 const u32 *hws; 844 const u32 *hws;
805 int i; 845 int i;
806 846
@@ -945,7 +985,7 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops,
945 985
946static int i915_rstdby_delays(struct seq_file *m, void *unused) 986static int i915_rstdby_delays(struct seq_file *m, void *unused)
947{ 987{
948 struct drm_info_node *node = (struct drm_info_node *) m->private; 988 struct drm_info_node *node = m->private;
949 struct drm_device *dev = node->minor->dev; 989 struct drm_device *dev = node->minor->dev;
950 struct drm_i915_private *dev_priv = dev->dev_private; 990 struct drm_i915_private *dev_priv = dev->dev_private;
951 u16 crstanddelay; 991 u16 crstanddelay;
@@ -966,9 +1006,9 @@ static int i915_rstdby_delays(struct seq_file *m, void *unused)
966 return 0; 1006 return 0;
967} 1007}
968 1008
969static int i915_cur_delayinfo(struct seq_file *m, void *unused) 1009static int i915_frequency_info(struct seq_file *m, void *unused)
970{ 1010{
971 struct drm_info_node *node = (struct drm_info_node *) m->private; 1011 struct drm_info_node *node = m->private;
972 struct drm_device *dev = node->minor->dev; 1012 struct drm_device *dev = node->minor->dev;
973 struct drm_i915_private *dev_priv = dev->dev_private; 1013 struct drm_i915_private *dev_priv = dev->dev_private;
974 int ret = 0; 1014 int ret = 0;
@@ -991,6 +1031,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
991 u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); 1031 u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
992 u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS); 1032 u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
993 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 1033 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
1034 u32 rpmodectl, rpinclimit, rpdeclimit;
994 u32 rpstat, cagf, reqf; 1035 u32 rpstat, cagf, reqf;
995 u32 rpupei, rpcurup, rpprevup; 1036 u32 rpupei, rpcurup, rpprevup;
996 u32 rpdownei, rpcurdown, rpprevdown; 1037 u32 rpdownei, rpcurdown, rpprevdown;
@@ -1011,6 +1052,10 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
1011 reqf >>= 25; 1052 reqf >>= 25;
1012 reqf *= GT_FREQUENCY_MULTIPLIER; 1053 reqf *= GT_FREQUENCY_MULTIPLIER;
1013 1054
1055 rpmodectl = I915_READ(GEN6_RP_CONTROL);
1056 rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
1057 rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
1058
1014 rpstat = I915_READ(GEN6_RPSTAT1); 1059 rpstat = I915_READ(GEN6_RPSTAT1);
1015 rpupei = I915_READ(GEN6_RP_CUR_UP_EI); 1060 rpupei = I915_READ(GEN6_RP_CUR_UP_EI);
1016 rpcurup = I915_READ(GEN6_RP_CUR_UP); 1061 rpcurup = I915_READ(GEN6_RP_CUR_UP);
@@ -1027,14 +1072,23 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
1027 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL); 1072 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
1028 mutex_unlock(&dev->struct_mutex); 1073 mutex_unlock(&dev->struct_mutex);
1029 1074
1075 seq_printf(m, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n",
1076 I915_READ(GEN6_PMIER),
1077 I915_READ(GEN6_PMIMR),
1078 I915_READ(GEN6_PMISR),
1079 I915_READ(GEN6_PMIIR),
1080 I915_READ(GEN6_PMINTRMSK));
1030 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status); 1081 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
1031 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
1032 seq_printf(m, "Render p-state ratio: %d\n", 1082 seq_printf(m, "Render p-state ratio: %d\n",
1033 (gt_perf_status & 0xff00) >> 8); 1083 (gt_perf_status & 0xff00) >> 8);
1034 seq_printf(m, "Render p-state VID: %d\n", 1084 seq_printf(m, "Render p-state VID: %d\n",
1035 gt_perf_status & 0xff); 1085 gt_perf_status & 0xff);
1036 seq_printf(m, "Render p-state limit: %d\n", 1086 seq_printf(m, "Render p-state limit: %d\n",
1037 rp_state_limits & 0xff); 1087 rp_state_limits & 0xff);
1088 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
1089 seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
1090 seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
1091 seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
1038 seq_printf(m, "RPNSWREQ: %dMHz\n", reqf); 1092 seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
1039 seq_printf(m, "CAGF: %dMHz\n", cagf); 1093 seq_printf(m, "CAGF: %dMHz\n", cagf);
1040 seq_printf(m, "RP CUR UP EI: %dus\n", rpupei & 1094 seq_printf(m, "RP CUR UP EI: %dus\n", rpupei &
@@ -1094,7 +1148,7 @@ out:
1094 1148
1095static int i915_delayfreq_table(struct seq_file *m, void *unused) 1149static int i915_delayfreq_table(struct seq_file *m, void *unused)
1096{ 1150{
1097 struct drm_info_node *node = (struct drm_info_node *) m->private; 1151 struct drm_info_node *node = m->private;
1098 struct drm_device *dev = node->minor->dev; 1152 struct drm_device *dev = node->minor->dev;
1099 struct drm_i915_private *dev_priv = dev->dev_private; 1153 struct drm_i915_private *dev_priv = dev->dev_private;
1100 u32 delayfreq; 1154 u32 delayfreq;
@@ -1125,7 +1179,7 @@ static inline int MAP_TO_MV(int map)
1125 1179
1126static int i915_inttoext_table(struct seq_file *m, void *unused) 1180static int i915_inttoext_table(struct seq_file *m, void *unused)
1127{ 1181{
1128 struct drm_info_node *node = (struct drm_info_node *) m->private; 1182 struct drm_info_node *node = m->private;
1129 struct drm_device *dev = node->minor->dev; 1183 struct drm_device *dev = node->minor->dev;
1130 struct drm_i915_private *dev_priv = dev->dev_private; 1184 struct drm_i915_private *dev_priv = dev->dev_private;
1131 u32 inttoext; 1185 u32 inttoext;
@@ -1149,7 +1203,7 @@ static int i915_inttoext_table(struct seq_file *m, void *unused)
1149 1203
1150static int ironlake_drpc_info(struct seq_file *m) 1204static int ironlake_drpc_info(struct seq_file *m)
1151{ 1205{
1152 struct drm_info_node *node = (struct drm_info_node *) m->private; 1206 struct drm_info_node *node = m->private;
1153 struct drm_device *dev = node->minor->dev; 1207 struct drm_device *dev = node->minor->dev;
1154 struct drm_i915_private *dev_priv = dev->dev_private; 1208 struct drm_i915_private *dev_priv = dev->dev_private;
1155 u32 rgvmodectl, rstdbyctl; 1209 u32 rgvmodectl, rstdbyctl;
@@ -1219,15 +1273,19 @@ static int ironlake_drpc_info(struct seq_file *m)
1219static int vlv_drpc_info(struct seq_file *m) 1273static int vlv_drpc_info(struct seq_file *m)
1220{ 1274{
1221 1275
1222 struct drm_info_node *node = (struct drm_info_node *) m->private; 1276 struct drm_info_node *node = m->private;
1223 struct drm_device *dev = node->minor->dev; 1277 struct drm_device *dev = node->minor->dev;
1224 struct drm_i915_private *dev_priv = dev->dev_private; 1278 struct drm_i915_private *dev_priv = dev->dev_private;
1225 u32 rpmodectl1, rcctl1; 1279 u32 rpmodectl1, rcctl1;
1226 unsigned fw_rendercount = 0, fw_mediacount = 0; 1280 unsigned fw_rendercount = 0, fw_mediacount = 0;
1227 1281
1282 intel_runtime_pm_get(dev_priv);
1283
1228 rpmodectl1 = I915_READ(GEN6_RP_CONTROL); 1284 rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
1229 rcctl1 = I915_READ(GEN6_RC_CONTROL); 1285 rcctl1 = I915_READ(GEN6_RC_CONTROL);
1230 1286
1287 intel_runtime_pm_put(dev_priv);
1288
1231 seq_printf(m, "Video Turbo Mode: %s\n", 1289 seq_printf(m, "Video Turbo Mode: %s\n",
1232 yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO)); 1290 yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
1233 seq_printf(m, "Turbo enabled: %s\n", 1291 seq_printf(m, "Turbo enabled: %s\n",
@@ -1247,6 +1305,11 @@ static int vlv_drpc_info(struct seq_file *m)
1247 (I915_READ(VLV_GTLC_PW_STATUS) & 1305 (I915_READ(VLV_GTLC_PW_STATUS) &
1248 VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down"); 1306 VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
1249 1307
1308 seq_printf(m, "Render RC6 residency since boot: %u\n",
1309 I915_READ(VLV_GT_RENDER_RC6));
1310 seq_printf(m, "Media RC6 residency since boot: %u\n",
1311 I915_READ(VLV_GT_MEDIA_RC6));
1312
1250 spin_lock_irq(&dev_priv->uncore.lock); 1313 spin_lock_irq(&dev_priv->uncore.lock);
1251 fw_rendercount = dev_priv->uncore.fw_rendercount; 1314 fw_rendercount = dev_priv->uncore.fw_rendercount;
1252 fw_mediacount = dev_priv->uncore.fw_mediacount; 1315 fw_mediacount = dev_priv->uncore.fw_mediacount;
@@ -1263,7 +1326,7 @@ static int vlv_drpc_info(struct seq_file *m)
1263static int gen6_drpc_info(struct seq_file *m) 1326static int gen6_drpc_info(struct seq_file *m)
1264{ 1327{
1265 1328
1266 struct drm_info_node *node = (struct drm_info_node *) m->private; 1329 struct drm_info_node *node = m->private;
1267 struct drm_device *dev = node->minor->dev; 1330 struct drm_device *dev = node->minor->dev;
1268 struct drm_i915_private *dev_priv = dev->dev_private; 1331 struct drm_i915_private *dev_priv = dev->dev_private;
1269 u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0; 1332 u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0;
@@ -1362,7 +1425,7 @@ static int gen6_drpc_info(struct seq_file *m)
1362 1425
1363static int i915_drpc_info(struct seq_file *m, void *unused) 1426static int i915_drpc_info(struct seq_file *m, void *unused)
1364{ 1427{
1365 struct drm_info_node *node = (struct drm_info_node *) m->private; 1428 struct drm_info_node *node = m->private;
1366 struct drm_device *dev = node->minor->dev; 1429 struct drm_device *dev = node->minor->dev;
1367 1430
1368 if (IS_VALLEYVIEW(dev)) 1431 if (IS_VALLEYVIEW(dev))
@@ -1375,7 +1438,7 @@ static int i915_drpc_info(struct seq_file *m, void *unused)
1375 1438
1376static int i915_fbc_status(struct seq_file *m, void *unused) 1439static int i915_fbc_status(struct seq_file *m, void *unused)
1377{ 1440{
1378 struct drm_info_node *node = (struct drm_info_node *) m->private; 1441 struct drm_info_node *node = m->private;
1379 struct drm_device *dev = node->minor->dev; 1442 struct drm_device *dev = node->minor->dev;
1380 struct drm_i915_private *dev_priv = dev->dev_private; 1443 struct drm_i915_private *dev_priv = dev->dev_private;
1381 1444
@@ -1437,7 +1500,7 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
1437 1500
1438static int i915_ips_status(struct seq_file *m, void *unused) 1501static int i915_ips_status(struct seq_file *m, void *unused)
1439{ 1502{
1440 struct drm_info_node *node = (struct drm_info_node *) m->private; 1503 struct drm_info_node *node = m->private;
1441 struct drm_device *dev = node->minor->dev; 1504 struct drm_device *dev = node->minor->dev;
1442 struct drm_i915_private *dev_priv = dev->dev_private; 1505 struct drm_i915_private *dev_priv = dev->dev_private;
1443 1506
@@ -1460,7 +1523,7 @@ static int i915_ips_status(struct seq_file *m, void *unused)
1460 1523
1461static int i915_sr_status(struct seq_file *m, void *unused) 1524static int i915_sr_status(struct seq_file *m, void *unused)
1462{ 1525{
1463 struct drm_info_node *node = (struct drm_info_node *) m->private; 1526 struct drm_info_node *node = m->private;
1464 struct drm_device *dev = node->minor->dev; 1527 struct drm_device *dev = node->minor->dev;
1465 struct drm_i915_private *dev_priv = dev->dev_private; 1528 struct drm_i915_private *dev_priv = dev->dev_private;
1466 bool sr_enabled = false; 1529 bool sr_enabled = false;
@@ -1486,7 +1549,7 @@ static int i915_sr_status(struct seq_file *m, void *unused)
1486 1549
1487static int i915_emon_status(struct seq_file *m, void *unused) 1550static int i915_emon_status(struct seq_file *m, void *unused)
1488{ 1551{
1489 struct drm_info_node *node = (struct drm_info_node *) m->private; 1552 struct drm_info_node *node = m->private;
1490 struct drm_device *dev = node->minor->dev; 1553 struct drm_device *dev = node->minor->dev;
1491 struct drm_i915_private *dev_priv = dev->dev_private; 1554 struct drm_i915_private *dev_priv = dev->dev_private;
1492 unsigned long temp, chipset, gfx; 1555 unsigned long temp, chipset, gfx;
@@ -1514,7 +1577,7 @@ static int i915_emon_status(struct seq_file *m, void *unused)
1514 1577
1515static int i915_ring_freq_table(struct seq_file *m, void *unused) 1578static int i915_ring_freq_table(struct seq_file *m, void *unused)
1516{ 1579{
1517 struct drm_info_node *node = (struct drm_info_node *) m->private; 1580 struct drm_info_node *node = m->private;
1518 struct drm_device *dev = node->minor->dev; 1581 struct drm_device *dev = node->minor->dev;
1519 struct drm_i915_private *dev_priv = dev->dev_private; 1582 struct drm_i915_private *dev_priv = dev->dev_private;
1520 int ret = 0; 1583 int ret = 0;
@@ -1557,7 +1620,7 @@ out:
1557 1620
1558static int i915_gfxec(struct seq_file *m, void *unused) 1621static int i915_gfxec(struct seq_file *m, void *unused)
1559{ 1622{
1560 struct drm_info_node *node = (struct drm_info_node *) m->private; 1623 struct drm_info_node *node = m->private;
1561 struct drm_device *dev = node->minor->dev; 1624 struct drm_device *dev = node->minor->dev;
1562 struct drm_i915_private *dev_priv = dev->dev_private; 1625 struct drm_i915_private *dev_priv = dev->dev_private;
1563 int ret; 1626 int ret;
@@ -1577,7 +1640,7 @@ static int i915_gfxec(struct seq_file *m, void *unused)
1577 1640
1578static int i915_opregion(struct seq_file *m, void *unused) 1641static int i915_opregion(struct seq_file *m, void *unused)
1579{ 1642{
1580 struct drm_info_node *node = (struct drm_info_node *) m->private; 1643 struct drm_info_node *node = m->private;
1581 struct drm_device *dev = node->minor->dev; 1644 struct drm_device *dev = node->minor->dev;
1582 struct drm_i915_private *dev_priv = dev->dev_private; 1645 struct drm_i915_private *dev_priv = dev->dev_private;
1583 struct intel_opregion *opregion = &dev_priv->opregion; 1646 struct intel_opregion *opregion = &dev_priv->opregion;
@@ -1605,7 +1668,7 @@ out:
1605 1668
1606static int i915_gem_framebuffer_info(struct seq_file *m, void *data) 1669static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1607{ 1670{
1608 struct drm_info_node *node = (struct drm_info_node *) m->private; 1671 struct drm_info_node *node = m->private;
1609 struct drm_device *dev = node->minor->dev; 1672 struct drm_device *dev = node->minor->dev;
1610 struct intel_fbdev *ifbdev = NULL; 1673 struct intel_fbdev *ifbdev = NULL;
1611 struct intel_framebuffer *fb; 1674 struct intel_framebuffer *fb;
@@ -1651,11 +1714,11 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1651 1714
1652static int i915_context_status(struct seq_file *m, void *unused) 1715static int i915_context_status(struct seq_file *m, void *unused)
1653{ 1716{
1654 struct drm_info_node *node = (struct drm_info_node *) m->private; 1717 struct drm_info_node *node = m->private;
1655 struct drm_device *dev = node->minor->dev; 1718 struct drm_device *dev = node->minor->dev;
1656 struct drm_i915_private *dev_priv = dev->dev_private; 1719 struct drm_i915_private *dev_priv = dev->dev_private;
1657 struct intel_ring_buffer *ring; 1720 struct intel_engine_cs *ring;
1658 struct i915_hw_context *ctx; 1721 struct intel_context *ctx;
1659 int ret, i; 1722 int ret, i;
1660 1723
1661 ret = mutex_lock_interruptible(&dev->mode_config.mutex); 1724 ret = mutex_lock_interruptible(&dev->mode_config.mutex);
@@ -1675,6 +1738,9 @@ static int i915_context_status(struct seq_file *m, void *unused)
1675 } 1738 }
1676 1739
1677 list_for_each_entry(ctx, &dev_priv->context_list, link) { 1740 list_for_each_entry(ctx, &dev_priv->context_list, link) {
1741 if (ctx->obj == NULL)
1742 continue;
1743
1678 seq_puts(m, "HW context "); 1744 seq_puts(m, "HW context ");
1679 describe_ctx(m, ctx); 1745 describe_ctx(m, ctx);
1680 for_each_ring(ring, dev_priv, i) 1746 for_each_ring(ring, dev_priv, i)
@@ -1692,7 +1758,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
1692 1758
1693static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data) 1759static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data)
1694{ 1760{
1695 struct drm_info_node *node = (struct drm_info_node *) m->private; 1761 struct drm_info_node *node = m->private;
1696 struct drm_device *dev = node->minor->dev; 1762 struct drm_device *dev = node->minor->dev;
1697 struct drm_i915_private *dev_priv = dev->dev_private; 1763 struct drm_i915_private *dev_priv = dev->dev_private;
1698 unsigned forcewake_count = 0, fw_rendercount = 0, fw_mediacount = 0; 1764 unsigned forcewake_count = 0, fw_rendercount = 0, fw_mediacount = 0;
@@ -1740,7 +1806,7 @@ static const char *swizzle_string(unsigned swizzle)
1740 1806
1741static int i915_swizzle_info(struct seq_file *m, void *data) 1807static int i915_swizzle_info(struct seq_file *m, void *data)
1742{ 1808{
1743 struct drm_info_node *node = (struct drm_info_node *) m->private; 1809 struct drm_info_node *node = m->private;
1744 struct drm_device *dev = node->minor->dev; 1810 struct drm_device *dev = node->minor->dev;
1745 struct drm_i915_private *dev_priv = dev->dev_private; 1811 struct drm_i915_private *dev_priv = dev->dev_private;
1746 int ret; 1812 int ret;
@@ -1788,10 +1854,14 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
1788 1854
1789static int per_file_ctx(int id, void *ptr, void *data) 1855static int per_file_ctx(int id, void *ptr, void *data)
1790{ 1856{
1791 struct i915_hw_context *ctx = ptr; 1857 struct intel_context *ctx = ptr;
1792 struct seq_file *m = data; 1858 struct seq_file *m = data;
1793 struct i915_hw_ppgtt *ppgtt = ctx_to_ppgtt(ctx); 1859 struct i915_hw_ppgtt *ppgtt = ctx_to_ppgtt(ctx);
1794 1860
1861 if (i915_gem_context_is_default(ctx))
1862 seq_puts(m, " default context:\n");
1863 else
1864 seq_printf(m, " context %d:\n", ctx->id);
1795 ppgtt->debug_dump(ppgtt, m); 1865 ppgtt->debug_dump(ppgtt, m);
1796 1866
1797 return 0; 1867 return 0;
@@ -1800,7 +1870,7 @@ static int per_file_ctx(int id, void *ptr, void *data)
1800static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev) 1870static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
1801{ 1871{
1802 struct drm_i915_private *dev_priv = dev->dev_private; 1872 struct drm_i915_private *dev_priv = dev->dev_private;
1803 struct intel_ring_buffer *ring; 1873 struct intel_engine_cs *ring;
1804 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; 1874 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
1805 int unused, i; 1875 int unused, i;
1806 1876
@@ -1816,8 +1886,7 @@ static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
1816 u64 pdp = I915_READ(ring->mmio_base + offset + 4); 1886 u64 pdp = I915_READ(ring->mmio_base + offset + 4);
1817 pdp <<= 32; 1887 pdp <<= 32;
1818 pdp |= I915_READ(ring->mmio_base + offset); 1888 pdp |= I915_READ(ring->mmio_base + offset);
1819 for (i = 0; i < 4; i++) 1889 seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp);
1820 seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp);
1821 } 1890 }
1822 } 1891 }
1823} 1892}
@@ -1825,7 +1894,7 @@ static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
1825static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev) 1894static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
1826{ 1895{
1827 struct drm_i915_private *dev_priv = dev->dev_private; 1896 struct drm_i915_private *dev_priv = dev->dev_private;
1828 struct intel_ring_buffer *ring; 1897 struct intel_engine_cs *ring;
1829 struct drm_file *file; 1898 struct drm_file *file;
1830 int i; 1899 int i;
1831 1900
@@ -1852,12 +1921,9 @@ static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
1852 1921
1853 list_for_each_entry_reverse(file, &dev->filelist, lhead) { 1922 list_for_each_entry_reverse(file, &dev->filelist, lhead) {
1854 struct drm_i915_file_private *file_priv = file->driver_priv; 1923 struct drm_i915_file_private *file_priv = file->driver_priv;
1855 struct i915_hw_ppgtt *pvt_ppgtt;
1856 1924
1857 pvt_ppgtt = ctx_to_ppgtt(file_priv->private_default_ctx);
1858 seq_printf(m, "proc: %s\n", 1925 seq_printf(m, "proc: %s\n",
1859 get_pid_task(file->pid, PIDTYPE_PID)->comm); 1926 get_pid_task(file->pid, PIDTYPE_PID)->comm);
1860 seq_puts(m, " default context:\n");
1861 idr_for_each(&file_priv->context_idr, per_file_ctx, m); 1927 idr_for_each(&file_priv->context_idr, per_file_ctx, m);
1862 } 1928 }
1863 seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK)); 1929 seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
@@ -1865,7 +1931,7 @@ static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
1865 1931
1866static int i915_ppgtt_info(struct seq_file *m, void *data) 1932static int i915_ppgtt_info(struct seq_file *m, void *data)
1867{ 1933{
1868 struct drm_info_node *node = (struct drm_info_node *) m->private; 1934 struct drm_info_node *node = m->private;
1869 struct drm_device *dev = node->minor->dev; 1935 struct drm_device *dev = node->minor->dev;
1870 struct drm_i915_private *dev_priv = dev->dev_private; 1936 struct drm_i915_private *dev_priv = dev->dev_private;
1871 1937
@@ -1885,56 +1951,9 @@ static int i915_ppgtt_info(struct seq_file *m, void *data)
1885 return 0; 1951 return 0;
1886} 1952}
1887 1953
1888static int i915_dpio_info(struct seq_file *m, void *data)
1889{
1890 struct drm_info_node *node = (struct drm_info_node *) m->private;
1891 struct drm_device *dev = node->minor->dev;
1892 struct drm_i915_private *dev_priv = dev->dev_private;
1893 int ret;
1894
1895
1896 if (!IS_VALLEYVIEW(dev)) {
1897 seq_puts(m, "unsupported\n");
1898 return 0;
1899 }
1900
1901 ret = mutex_lock_interruptible(&dev_priv->dpio_lock);
1902 if (ret)
1903 return ret;
1904
1905 seq_printf(m, "DPIO_CTL: 0x%08x\n", I915_READ(DPIO_CTL));
1906
1907 seq_printf(m, "DPIO PLL DW3 CH0 : 0x%08x\n",
1908 vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW3(0)));
1909 seq_printf(m, "DPIO PLL DW3 CH1: 0x%08x\n",
1910 vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW3(1)));
1911
1912 seq_printf(m, "DPIO PLL DW5 CH0: 0x%08x\n",
1913 vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW5(0)));
1914 seq_printf(m, "DPIO PLL DW5 CH1: 0x%08x\n",
1915 vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW5(1)));
1916
1917 seq_printf(m, "DPIO PLL DW7 CH0: 0x%08x\n",
1918 vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW7(0)));
1919 seq_printf(m, "DPIO PLL DW7 CH1: 0x%08x\n",
1920 vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW7(1)));
1921
1922 seq_printf(m, "DPIO PLL DW10 CH0: 0x%08x\n",
1923 vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW10(0)));
1924 seq_printf(m, "DPIO PLL DW10 CH1: 0x%08x\n",
1925 vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW10(1)));
1926
1927 seq_printf(m, "DPIO_FASTCLK_DISABLE: 0x%08x\n",
1928 vlv_dpio_read(dev_priv, PIPE_A, VLV_CMN_DW0));
1929
1930 mutex_unlock(&dev_priv->dpio_lock);
1931
1932 return 0;
1933}
1934
1935static int i915_llc(struct seq_file *m, void *data) 1954static int i915_llc(struct seq_file *m, void *data)
1936{ 1955{
1937 struct drm_info_node *node = (struct drm_info_node *) m->private; 1956 struct drm_info_node *node = m->private;
1938 struct drm_device *dev = node->minor->dev; 1957 struct drm_device *dev = node->minor->dev;
1939 struct drm_i915_private *dev_priv = dev->dev_private; 1958 struct drm_i915_private *dev_priv = dev->dev_private;
1940 1959
@@ -2040,11 +2059,11 @@ static int i915_energy_uJ(struct seq_file *m, void *data)
2040 2059
2041static int i915_pc8_status(struct seq_file *m, void *unused) 2060static int i915_pc8_status(struct seq_file *m, void *unused)
2042{ 2061{
2043 struct drm_info_node *node = (struct drm_info_node *) m->private; 2062 struct drm_info_node *node = m->private;
2044 struct drm_device *dev = node->minor->dev; 2063 struct drm_device *dev = node->minor->dev;
2045 struct drm_i915_private *dev_priv = dev->dev_private; 2064 struct drm_i915_private *dev_priv = dev->dev_private;
2046 2065
2047 if (!IS_HASWELL(dev)) { 2066 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) {
2048 seq_puts(m, "not supported\n"); 2067 seq_puts(m, "not supported\n");
2049 return 0; 2068 return 0;
2050 } 2069 }
@@ -2115,7 +2134,7 @@ static const char *power_domain_str(enum intel_display_power_domain domain)
2115 2134
2116static int i915_power_domain_info(struct seq_file *m, void *unused) 2135static int i915_power_domain_info(struct seq_file *m, void *unused)
2117{ 2136{
2118 struct drm_info_node *node = (struct drm_info_node *) m->private; 2137 struct drm_info_node *node = m->private;
2119 struct drm_device *dev = node->minor->dev; 2138 struct drm_device *dev = node->minor->dev;
2120 struct drm_i915_private *dev_priv = dev->dev_private; 2139 struct drm_i915_private *dev_priv = dev->dev_private;
2121 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2140 struct i915_power_domains *power_domains = &dev_priv->power_domains;
@@ -2170,7 +2189,7 @@ static void intel_encoder_info(struct seq_file *m,
2170 struct intel_crtc *intel_crtc, 2189 struct intel_crtc *intel_crtc,
2171 struct intel_encoder *intel_encoder) 2190 struct intel_encoder *intel_encoder)
2172{ 2191{
2173 struct drm_info_node *node = (struct drm_info_node *) m->private; 2192 struct drm_info_node *node = m->private;
2174 struct drm_device *dev = node->minor->dev; 2193 struct drm_device *dev = node->minor->dev;
2175 struct drm_crtc *crtc = &intel_crtc->base; 2194 struct drm_crtc *crtc = &intel_crtc->base;
2176 struct intel_connector *intel_connector; 2195 struct intel_connector *intel_connector;
@@ -2178,12 +2197,12 @@ static void intel_encoder_info(struct seq_file *m,
2178 2197
2179 encoder = &intel_encoder->base; 2198 encoder = &intel_encoder->base;
2180 seq_printf(m, "\tencoder %d: type: %s, connectors:\n", 2199 seq_printf(m, "\tencoder %d: type: %s, connectors:\n",
2181 encoder->base.id, drm_get_encoder_name(encoder)); 2200 encoder->base.id, encoder->name);
2182 for_each_connector_on_encoder(dev, encoder, intel_connector) { 2201 for_each_connector_on_encoder(dev, encoder, intel_connector) {
2183 struct drm_connector *connector = &intel_connector->base; 2202 struct drm_connector *connector = &intel_connector->base;
2184 seq_printf(m, "\t\tconnector %d: type: %s, status: %s", 2203 seq_printf(m, "\t\tconnector %d: type: %s, status: %s",
2185 connector->base.id, 2204 connector->base.id,
2186 drm_get_connector_name(connector), 2205 connector->name,
2187 drm_get_connector_status_name(connector->status)); 2206 drm_get_connector_status_name(connector->status));
2188 if (connector->status == connector_status_connected) { 2207 if (connector->status == connector_status_connected) {
2189 struct drm_display_mode *mode = &crtc->mode; 2208 struct drm_display_mode *mode = &crtc->mode;
@@ -2197,7 +2216,7 @@ static void intel_encoder_info(struct seq_file *m,
2197 2216
2198static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc) 2217static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2199{ 2218{
2200 struct drm_info_node *node = (struct drm_info_node *) m->private; 2219 struct drm_info_node *node = m->private;
2201 struct drm_device *dev = node->minor->dev; 2220 struct drm_device *dev = node->minor->dev;
2202 struct drm_crtc *crtc = &intel_crtc->base; 2221 struct drm_crtc *crtc = &intel_crtc->base;
2203 struct intel_encoder *intel_encoder; 2222 struct intel_encoder *intel_encoder;
@@ -2254,7 +2273,7 @@ static void intel_connector_info(struct seq_file *m,
2254 struct drm_display_mode *mode; 2273 struct drm_display_mode *mode;
2255 2274
2256 seq_printf(m, "connector %d: type %s, status: %s\n", 2275 seq_printf(m, "connector %d: type %s, status: %s\n",
2257 connector->base.id, drm_get_connector_name(connector), 2276 connector->base.id, connector->name,
2258 drm_get_connector_status_name(connector->status)); 2277 drm_get_connector_status_name(connector->status));
2259 if (connector->status == connector_status_connected) { 2278 if (connector->status == connector_status_connected) {
2260 seq_printf(m, "\tname: %s\n", connector->display_info.name); 2279 seq_printf(m, "\tname: %s\n", connector->display_info.name);
@@ -2286,10 +2305,8 @@ static bool cursor_active(struct drm_device *dev, int pipe)
2286 2305
2287 if (IS_845G(dev) || IS_I865G(dev)) 2306 if (IS_845G(dev) || IS_I865G(dev))
2288 state = I915_READ(_CURACNTR) & CURSOR_ENABLE; 2307 state = I915_READ(_CURACNTR) & CURSOR_ENABLE;
2289 else if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev))
2290 state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
2291 else 2308 else
2292 state = I915_READ(CURCNTR_IVB(pipe)) & CURSOR_MODE; 2309 state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
2293 2310
2294 return state; 2311 return state;
2295} 2312}
@@ -2299,10 +2316,7 @@ static bool cursor_position(struct drm_device *dev, int pipe, int *x, int *y)
2299 struct drm_i915_private *dev_priv = dev->dev_private; 2316 struct drm_i915_private *dev_priv = dev->dev_private;
2300 u32 pos; 2317 u32 pos;
2301 2318
2302 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_BROADWELL(dev)) 2319 pos = I915_READ(CURPOS(pipe));
2303 pos = I915_READ(CURPOS_IVB(pipe));
2304 else
2305 pos = I915_READ(CURPOS(pipe));
2306 2320
2307 *x = (pos >> CURSOR_X_SHIFT) & CURSOR_POS_MASK; 2321 *x = (pos >> CURSOR_X_SHIFT) & CURSOR_POS_MASK;
2308 if (pos & (CURSOR_POS_SIGN << CURSOR_X_SHIFT)) 2322 if (pos & (CURSOR_POS_SIGN << CURSOR_X_SHIFT))
@@ -2317,7 +2331,7 @@ static bool cursor_position(struct drm_device *dev, int pipe, int *x, int *y)
2317 2331
2318static int i915_display_info(struct seq_file *m, void *unused) 2332static int i915_display_info(struct seq_file *m, void *unused)
2319{ 2333{
2320 struct drm_info_node *node = (struct drm_info_node *) m->private; 2334 struct drm_info_node *node = m->private;
2321 struct drm_device *dev = node->minor->dev; 2335 struct drm_device *dev = node->minor->dev;
2322 struct drm_i915_private *dev_priv = dev->dev_private; 2336 struct drm_i915_private *dev_priv = dev->dev_private;
2323 struct intel_crtc *crtc; 2337 struct intel_crtc *crtc;
@@ -2327,7 +2341,7 @@ static int i915_display_info(struct seq_file *m, void *unused)
2327 drm_modeset_lock_all(dev); 2341 drm_modeset_lock_all(dev);
2328 seq_printf(m, "CRTC info\n"); 2342 seq_printf(m, "CRTC info\n");
2329 seq_printf(m, "---------\n"); 2343 seq_printf(m, "---------\n");
2330 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) { 2344 for_each_intel_crtc(dev, crtc) {
2331 bool active; 2345 bool active;
2332 int x, y; 2346 int x, y;
2333 2347
@@ -2339,10 +2353,14 @@ static int i915_display_info(struct seq_file *m, void *unused)
2339 2353
2340 active = cursor_position(dev, crtc->pipe, &x, &y); 2354 active = cursor_position(dev, crtc->pipe, &x, &y);
2341 seq_printf(m, "\tcursor visible? %s, position (%d, %d), addr 0x%08x, active? %s\n", 2355 seq_printf(m, "\tcursor visible? %s, position (%d, %d), addr 0x%08x, active? %s\n",
2342 yesno(crtc->cursor_visible), 2356 yesno(crtc->cursor_base),
2343 x, y, crtc->cursor_addr, 2357 x, y, crtc->cursor_addr,
2344 yesno(active)); 2358 yesno(active));
2345 } 2359 }
2360
2361 seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
2362 yesno(!crtc->cpu_fifo_underrun_disabled),
2363 yesno(!crtc->pch_fifo_underrun_disabled));
2346 } 2364 }
2347 2365
2348 seq_printf(m, "\n"); 2366 seq_printf(m, "\n");
@@ -2595,7 +2613,7 @@ static int i9xx_pipe_crc_auto_source(struct drm_device *dev, enum pipe pipe,
2595 2613
2596 *source = INTEL_PIPE_CRC_SOURCE_PIPE; 2614 *source = INTEL_PIPE_CRC_SOURCE_PIPE;
2597 2615
2598 mutex_lock(&dev->mode_config.mutex); 2616 drm_modeset_lock_all(dev);
2599 list_for_each_entry(encoder, &dev->mode_config.encoder_list, 2617 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2600 base.head) { 2618 base.head) {
2601 if (!encoder->base.crtc) 2619 if (!encoder->base.crtc)
@@ -2631,7 +2649,7 @@ static int i9xx_pipe_crc_auto_source(struct drm_device *dev, enum pipe pipe,
2631 break; 2649 break;
2632 } 2650 }
2633 } 2651 }
2634 mutex_unlock(&dev->mode_config.mutex); 2652 drm_modeset_unlock_all(dev);
2635 2653
2636 return ret; 2654 return ret;
2637} 2655}
@@ -3106,7 +3124,7 @@ static const struct file_operations i915_display_crc_ctl_fops = {
3106static void wm_latency_show(struct seq_file *m, const uint16_t wm[5]) 3124static void wm_latency_show(struct seq_file *m, const uint16_t wm[5])
3107{ 3125{
3108 struct drm_device *dev = m->private; 3126 struct drm_device *dev = m->private;
3109 int num_levels = IS_HASWELL(dev) || IS_BROADWELL(dev) ? 5 : 4; 3127 int num_levels = ilk_wm_max_level(dev) + 1;
3110 int level; 3128 int level;
3111 3129
3112 drm_modeset_lock_all(dev); 3130 drm_modeset_lock_all(dev);
@@ -3189,7 +3207,7 @@ static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
3189 struct seq_file *m = file->private_data; 3207 struct seq_file *m = file->private_data;
3190 struct drm_device *dev = m->private; 3208 struct drm_device *dev = m->private;
3191 uint16_t new[5] = { 0 }; 3209 uint16_t new[5] = { 0 };
3192 int num_levels = IS_HASWELL(dev) || IS_BROADWELL(dev) ? 5 : 4; 3210 int num_levels = ilk_wm_max_level(dev) + 1;
3193 int level; 3211 int level;
3194 int ret; 3212 int ret;
3195 char tmp[32]; 3213 char tmp[32];
@@ -3286,9 +3304,15 @@ static int
3286i915_wedged_set(void *data, u64 val) 3304i915_wedged_set(void *data, u64 val)
3287{ 3305{
3288 struct drm_device *dev = data; 3306 struct drm_device *dev = data;
3307 struct drm_i915_private *dev_priv = dev->dev_private;
3308
3309 intel_runtime_pm_get(dev_priv);
3289 3310
3290 i915_handle_error(dev, val, 3311 i915_handle_error(dev, val,
3291 "Manually setting wedged to %llu", val); 3312 "Manually setting wedged to %llu", val);
3313
3314 intel_runtime_pm_put(dev_priv);
3315
3292 return 0; 3316 return 0;
3293} 3317}
3294 3318
@@ -3774,7 +3798,7 @@ static const struct drm_info_list i915_debugfs_list[] = {
3774 {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS}, 3798 {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS},
3775 {"i915_gem_hws_vebox", i915_hws_info, 0, (void *)VECS}, 3799 {"i915_gem_hws_vebox", i915_hws_info, 0, (void *)VECS},
3776 {"i915_rstdby_delays", i915_rstdby_delays, 0}, 3800 {"i915_rstdby_delays", i915_rstdby_delays, 0},
3777 {"i915_cur_delayinfo", i915_cur_delayinfo, 0}, 3801 {"i915_frequency_info", i915_frequency_info, 0},
3778 {"i915_delayfreq_table", i915_delayfreq_table, 0}, 3802 {"i915_delayfreq_table", i915_delayfreq_table, 0},
3779 {"i915_inttoext_table", i915_inttoext_table, 0}, 3803 {"i915_inttoext_table", i915_inttoext_table, 0},
3780 {"i915_drpc_info", i915_drpc_info, 0}, 3804 {"i915_drpc_info", i915_drpc_info, 0},
@@ -3790,7 +3814,6 @@ static const struct drm_info_list i915_debugfs_list[] = {
3790 {"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info, 0}, 3814 {"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info, 0},
3791 {"i915_swizzle_info", i915_swizzle_info, 0}, 3815 {"i915_swizzle_info", i915_swizzle_info, 0},
3792 {"i915_ppgtt_info", i915_ppgtt_info, 0}, 3816 {"i915_ppgtt_info", i915_ppgtt_info, 0},
3793 {"i915_dpio", i915_dpio_info, 0},
3794 {"i915_llc", i915_llc, 0}, 3817 {"i915_llc", i915_llc, 0},
3795 {"i915_edp_psr_status", i915_edp_psr_status, 0}, 3818 {"i915_edp_psr_status", i915_edp_psr_status, 0},
3796 {"i915_sink_crc_eDP1", i915_sink_crc, 0}, 3819 {"i915_sink_crc_eDP1", i915_sink_crc, 0},
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index eedb023af27d..4c22a5b7f4c5 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -44,6 +44,7 @@
44#include <acpi/video.h> 44#include <acpi/video.h>
45#include <linux/pm.h> 45#include <linux/pm.h>
46#include <linux/pm_runtime.h> 46#include <linux/pm_runtime.h>
47#include <linux/oom.h>
47 48
48#define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS]) 49#define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS])
49 50
@@ -63,7 +64,7 @@
63 * has access to the ring. 64 * has access to the ring.
64 */ 65 */
65#define RING_LOCK_TEST_WITH_RETURN(dev, file) do { \ 66#define RING_LOCK_TEST_WITH_RETURN(dev, file) do { \
66 if (LP_RING(dev->dev_private)->obj == NULL) \ 67 if (LP_RING(dev->dev_private)->buffer->obj == NULL) \
67 LOCK_TEST_WITH_RETURN(dev, file); \ 68 LOCK_TEST_WITH_RETURN(dev, file); \
68} while (0) 69} while (0)
69 70
@@ -119,7 +120,7 @@ static void i915_write_hws_pga(struct drm_device *dev)
119static void i915_free_hws(struct drm_device *dev) 120static void i915_free_hws(struct drm_device *dev)
120{ 121{
121 struct drm_i915_private *dev_priv = dev->dev_private; 122 struct drm_i915_private *dev_priv = dev->dev_private;
122 struct intel_ring_buffer *ring = LP_RING(dev_priv); 123 struct intel_engine_cs *ring = LP_RING(dev_priv);
123 124
124 if (dev_priv->status_page_dmah) { 125 if (dev_priv->status_page_dmah) {
125 drm_pci_free(dev, dev_priv->status_page_dmah); 126 drm_pci_free(dev, dev_priv->status_page_dmah);
@@ -139,7 +140,8 @@ void i915_kernel_lost_context(struct drm_device * dev)
139{ 140{
140 struct drm_i915_private *dev_priv = dev->dev_private; 141 struct drm_i915_private *dev_priv = dev->dev_private;
141 struct drm_i915_master_private *master_priv; 142 struct drm_i915_master_private *master_priv;
142 struct intel_ring_buffer *ring = LP_RING(dev_priv); 143 struct intel_engine_cs *ring = LP_RING(dev_priv);
144 struct intel_ringbuffer *ringbuf = ring->buffer;
143 145
144 /* 146 /*
145 * We should never lose context on the ring with modesetting 147 * We should never lose context on the ring with modesetting
@@ -148,17 +150,17 @@ void i915_kernel_lost_context(struct drm_device * dev)
148 if (drm_core_check_feature(dev, DRIVER_MODESET)) 150 if (drm_core_check_feature(dev, DRIVER_MODESET))
149 return; 151 return;
150 152
151 ring->head = I915_READ_HEAD(ring) & HEAD_ADDR; 153 ringbuf->head = I915_READ_HEAD(ring) & HEAD_ADDR;
152 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; 154 ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
153 ring->space = ring->head - (ring->tail + I915_RING_FREE_SPACE); 155 ringbuf->space = ringbuf->head - (ringbuf->tail + I915_RING_FREE_SPACE);
154 if (ring->space < 0) 156 if (ringbuf->space < 0)
155 ring->space += ring->size; 157 ringbuf->space += ringbuf->size;
156 158
157 if (!dev->primary->master) 159 if (!dev->primary->master)
158 return; 160 return;
159 161
160 master_priv = dev->primary->master->driver_priv; 162 master_priv = dev->primary->master->driver_priv;
161 if (ring->head == ring->tail && master_priv->sarea_priv) 163 if (ringbuf->head == ringbuf->tail && master_priv->sarea_priv)
162 master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY; 164 master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
163} 165}
164 166
@@ -201,7 +203,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
201 } 203 }
202 204
203 if (init->ring_size != 0) { 205 if (init->ring_size != 0) {
204 if (LP_RING(dev_priv)->obj != NULL) { 206 if (LP_RING(dev_priv)->buffer->obj != NULL) {
205 i915_dma_cleanup(dev); 207 i915_dma_cleanup(dev);
206 DRM_ERROR("Client tried to initialize ringbuffer in " 208 DRM_ERROR("Client tried to initialize ringbuffer in "
207 "GEM mode\n"); 209 "GEM mode\n");
@@ -234,11 +236,11 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
234static int i915_dma_resume(struct drm_device * dev) 236static int i915_dma_resume(struct drm_device * dev)
235{ 237{
236 struct drm_i915_private *dev_priv = dev->dev_private; 238 struct drm_i915_private *dev_priv = dev->dev_private;
237 struct intel_ring_buffer *ring = LP_RING(dev_priv); 239 struct intel_engine_cs *ring = LP_RING(dev_priv);
238 240
239 DRM_DEBUG_DRIVER("%s\n", __func__); 241 DRM_DEBUG_DRIVER("%s\n", __func__);
240 242
241 if (ring->virtual_start == NULL) { 243 if (ring->buffer->virtual_start == NULL) {
242 DRM_ERROR("can not ioremap virtual address for" 244 DRM_ERROR("can not ioremap virtual address for"
243 " ring buffer\n"); 245 " ring buffer\n");
244 return -ENOMEM; 246 return -ENOMEM;
@@ -360,7 +362,7 @@ static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords)
360 struct drm_i915_private *dev_priv = dev->dev_private; 362 struct drm_i915_private *dev_priv = dev->dev_private;
361 int i, ret; 363 int i, ret;
362 364
363 if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->size - 8) 365 if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->buffer->size - 8)
364 return -EINVAL; 366 return -EINVAL;
365 367
366 for (i = 0; i < dwords;) { 368 for (i = 0; i < dwords;) {
@@ -782,7 +784,7 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
782 struct drm_i915_private *dev_priv = dev->dev_private; 784 struct drm_i915_private *dev_priv = dev->dev_private;
783 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 785 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
784 int ret = 0; 786 int ret = 0;
785 struct intel_ring_buffer *ring = LP_RING(dev_priv); 787 struct intel_engine_cs *ring = LP_RING(dev_priv);
786 788
787 DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr, 789 DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr,
788 READ_BREADCRUMB(dev_priv)); 790 READ_BREADCRUMB(dev_priv));
@@ -823,7 +825,7 @@ static int i915_irq_emit(struct drm_device *dev, void *data,
823 if (drm_core_check_feature(dev, DRIVER_MODESET)) 825 if (drm_core_check_feature(dev, DRIVER_MODESET))
824 return -ENODEV; 826 return -ENODEV;
825 827
826 if (!dev_priv || !LP_RING(dev_priv)->virtual_start) { 828 if (!dev_priv || !LP_RING(dev_priv)->buffer->virtual_start) {
827 DRM_ERROR("called with no initialization\n"); 829 DRM_ERROR("called with no initialization\n");
828 return -EINVAL; 830 return -EINVAL;
829 } 831 }
@@ -1017,6 +1019,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
1017 case I915_PARAM_HAS_EXEC_HANDLE_LUT: 1019 case I915_PARAM_HAS_EXEC_HANDLE_LUT:
1018 value = 1; 1020 value = 1;
1019 break; 1021 break;
1022 case I915_PARAM_CMD_PARSER_VERSION:
1023 value = i915_cmd_parser_get_version();
1024 break;
1020 default: 1025 default:
1021 DRM_DEBUG("Unknown parameter %d\n", param->param); 1026 DRM_DEBUG("Unknown parameter %d\n", param->param);
1022 return -EINVAL; 1027 return -EINVAL;
@@ -1070,7 +1075,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
1070{ 1075{
1071 struct drm_i915_private *dev_priv = dev->dev_private; 1076 struct drm_i915_private *dev_priv = dev->dev_private;
1072 drm_i915_hws_addr_t *hws = data; 1077 drm_i915_hws_addr_t *hws = data;
1073 struct intel_ring_buffer *ring; 1078 struct intel_engine_cs *ring;
1074 1079
1075 if (drm_core_check_feature(dev, DRIVER_MODESET)) 1080 if (drm_core_check_feature(dev, DRIVER_MODESET))
1076 return -ENODEV; 1081 return -ENODEV;
@@ -1277,12 +1282,13 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_
1277static bool i915_switcheroo_can_switch(struct pci_dev *pdev) 1282static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
1278{ 1283{
1279 struct drm_device *dev = pci_get_drvdata(pdev); 1284 struct drm_device *dev = pci_get_drvdata(pdev);
1280 bool can_switch;
1281 1285
1282 spin_lock(&dev->count_lock); 1286 /*
1283 can_switch = (dev->open_count == 0); 1287 * FIXME: open_count is protected by drm_global_mutex but that would lead to
1284 spin_unlock(&dev->count_lock); 1288 * locking inversion with the driver load path. And the access here is
1285 return can_switch; 1289 * completely racy anyway. So don't bother with locking for now.
1290 */
1291 return dev->open_count == 0;
1286} 1292}
1287 1293
1288static const struct vga_switcheroo_client_ops i915_switcheroo_ops = { 1294static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
@@ -1326,7 +1332,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
1326 1332
1327 intel_power_domains_init_hw(dev_priv); 1333 intel_power_domains_init_hw(dev_priv);
1328 1334
1329 ret = drm_irq_install(dev); 1335 ret = drm_irq_install(dev, dev->pdev->irq);
1330 if (ret) 1336 if (ret)
1331 goto cleanup_gem_stolen; 1337 goto cleanup_gem_stolen;
1332 1338
@@ -1336,7 +1342,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
1336 1342
1337 ret = i915_gem_init(dev); 1343 ret = i915_gem_init(dev);
1338 if (ret) 1344 if (ret)
1339 goto cleanup_power; 1345 goto cleanup_irq;
1340 1346
1341 INIT_WORK(&dev_priv->console_resume_work, intel_console_resume); 1347 INIT_WORK(&dev_priv->console_resume_work, intel_console_resume);
1342 1348
@@ -1345,10 +1351,8 @@ static int i915_load_modeset_init(struct drm_device *dev)
1345 /* Always safe in the mode setting case. */ 1351 /* Always safe in the mode setting case. */
1346 /* FIXME: do pre/post-mode set stuff in core KMS code */ 1352 /* FIXME: do pre/post-mode set stuff in core KMS code */
1347 dev->vblank_disable_allowed = true; 1353 dev->vblank_disable_allowed = true;
1348 if (INTEL_INFO(dev)->num_pipes == 0) { 1354 if (INTEL_INFO(dev)->num_pipes == 0)
1349 intel_display_power_put(dev_priv, POWER_DOMAIN_VGA);
1350 return 0; 1355 return 0;
1351 }
1352 1356
1353 ret = intel_fbdev_init(dev); 1357 ret = intel_fbdev_init(dev);
1354 if (ret) 1358 if (ret)
@@ -1383,8 +1387,7 @@ cleanup_gem:
1383 mutex_unlock(&dev->struct_mutex); 1387 mutex_unlock(&dev->struct_mutex);
1384 WARN_ON(dev_priv->mm.aliasing_ppgtt); 1388 WARN_ON(dev_priv->mm.aliasing_ppgtt);
1385 drm_mm_takedown(&dev_priv->gtt.base.mm); 1389 drm_mm_takedown(&dev_priv->gtt.base.mm);
1386cleanup_power: 1390cleanup_irq:
1387 intel_display_power_put(dev_priv, POWER_DOMAIN_VGA);
1388 drm_irq_uninstall(dev); 1391 drm_irq_uninstall(dev);
1389cleanup_gem_stolen: 1392cleanup_gem_stolen:
1390 i915_gem_cleanup_stolen(dev); 1393 i915_gem_cleanup_stolen(dev);
@@ -1739,8 +1742,8 @@ out_power_well:
1739 intel_power_domains_remove(dev_priv); 1742 intel_power_domains_remove(dev_priv);
1740 drm_vblank_cleanup(dev); 1743 drm_vblank_cleanup(dev);
1741out_gem_unload: 1744out_gem_unload:
1742 if (dev_priv->mm.inactive_shrinker.scan_objects) 1745 WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
1743 unregister_shrinker(&dev_priv->mm.inactive_shrinker); 1746 unregister_shrinker(&dev_priv->mm.shrinker);
1744 1747
1745 if (dev->pdev->msi_enabled) 1748 if (dev->pdev->msi_enabled)
1746 pci_disable_msi(dev->pdev); 1749 pci_disable_msi(dev->pdev);
@@ -1791,8 +1794,8 @@ int i915_driver_unload(struct drm_device *dev)
1791 1794
1792 i915_teardown_sysfs(dev); 1795 i915_teardown_sysfs(dev);
1793 1796
1794 if (dev_priv->mm.inactive_shrinker.scan_objects) 1797 WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
1795 unregister_shrinker(&dev_priv->mm.inactive_shrinker); 1798 unregister_shrinker(&dev_priv->mm.shrinker);
1796 1799
1797 io_mapping_free(dev_priv->gtt.mappable); 1800 io_mapping_free(dev_priv->gtt.mappable);
1798 arch_phys_wc_del(dev_priv->gtt.mtrr); 1801 arch_phys_wc_del(dev_priv->gtt.mtrr);
@@ -1864,7 +1867,7 @@ int i915_driver_unload(struct drm_device *dev)
1864 kmem_cache_destroy(dev_priv->slab); 1867 kmem_cache_destroy(dev_priv->slab);
1865 1868
1866 pci_dev_put(dev_priv->bridge_dev); 1869 pci_dev_put(dev_priv->bridge_dev);
1867 kfree(dev->dev_private); 1870 kfree(dev_priv);
1868 1871
1869 return 0; 1872 return 0;
1870} 1873}
@@ -1925,6 +1928,8 @@ void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
1925{ 1928{
1926 struct drm_i915_file_private *file_priv = file->driver_priv; 1929 struct drm_i915_file_private *file_priv = file->driver_priv;
1927 1930
1931 if (file_priv && file_priv->bsd_ring)
1932 file_priv->bsd_ring = NULL;
1928 kfree(file_priv); 1933 kfree(file_priv);
1929} 1934}
1930 1935
@@ -1978,9 +1983,10 @@ const struct drm_ioctl_desc i915_ioctls[] = {
1978 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), 1983 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1979 DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), 1984 DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1980 DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_get_reset_stats_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), 1985 DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_get_reset_stats_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1986 DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1981}; 1987};
1982 1988
1983int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); 1989int i915_max_ioctl = ARRAY_SIZE(i915_ioctls);
1984 1990
1985/* 1991/*
1986 * This is really ugly: Because old userspace abused the linux agp interface to 1992 * This is really ugly: Because old userspace abused the linux agp interface to
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 82f4d1f47d3b..651e65e051c0 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -36,6 +36,7 @@
36 36
37#include <linux/console.h> 37#include <linux/console.h>
38#include <linux/module.h> 38#include <linux/module.h>
39#include <linux/pm_runtime.h>
39#include <drm/drm_crtc_helper.h> 40#include <drm/drm_crtc_helper.h>
40 41
41static struct drm_driver driver; 42static struct drm_driver driver;
@@ -49,12 +50,30 @@ static struct drm_driver driver;
49 .dpll_md_offsets = { DPLL_A_MD_OFFSET, DPLL_B_MD_OFFSET }, \ 50 .dpll_md_offsets = { DPLL_A_MD_OFFSET, DPLL_B_MD_OFFSET }, \
50 .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET } 51 .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET }
51 52
53#define GEN_CHV_PIPEOFFSETS \
54 .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
55 CHV_PIPE_C_OFFSET }, \
56 .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
57 CHV_TRANSCODER_C_OFFSET, }, \
58 .dpll_offsets = { DPLL_A_OFFSET, DPLL_B_OFFSET, \
59 CHV_DPLL_C_OFFSET }, \
60 .dpll_md_offsets = { DPLL_A_MD_OFFSET, DPLL_B_MD_OFFSET, \
61 CHV_DPLL_C_MD_OFFSET }, \
62 .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET, \
63 CHV_PALETTE_C_OFFSET }
64
65#define CURSOR_OFFSETS \
66 .cursor_offsets = { CURSOR_A_OFFSET, CURSOR_B_OFFSET, CHV_CURSOR_C_OFFSET }
67
68#define IVB_CURSOR_OFFSETS \
69 .cursor_offsets = { CURSOR_A_OFFSET, IVB_CURSOR_B_OFFSET, IVB_CURSOR_C_OFFSET }
52 70
53static const struct intel_device_info intel_i830_info = { 71static const struct intel_device_info intel_i830_info = {
54 .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2, 72 .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
55 .has_overlay = 1, .overlay_needs_physical = 1, 73 .has_overlay = 1, .overlay_needs_physical = 1,
56 .ring_mask = RENDER_RING, 74 .ring_mask = RENDER_RING,
57 GEN_DEFAULT_PIPEOFFSETS, 75 GEN_DEFAULT_PIPEOFFSETS,
76 CURSOR_OFFSETS,
58}; 77};
59 78
60static const struct intel_device_info intel_845g_info = { 79static const struct intel_device_info intel_845g_info = {
@@ -62,6 +81,7 @@ static const struct intel_device_info intel_845g_info = {
62 .has_overlay = 1, .overlay_needs_physical = 1, 81 .has_overlay = 1, .overlay_needs_physical = 1,
63 .ring_mask = RENDER_RING, 82 .ring_mask = RENDER_RING,
64 GEN_DEFAULT_PIPEOFFSETS, 83 GEN_DEFAULT_PIPEOFFSETS,
84 CURSOR_OFFSETS,
65}; 85};
66 86
67static const struct intel_device_info intel_i85x_info = { 87static const struct intel_device_info intel_i85x_info = {
@@ -71,6 +91,7 @@ static const struct intel_device_info intel_i85x_info = {
71 .has_fbc = 1, 91 .has_fbc = 1,
72 .ring_mask = RENDER_RING, 92 .ring_mask = RENDER_RING,
73 GEN_DEFAULT_PIPEOFFSETS, 93 GEN_DEFAULT_PIPEOFFSETS,
94 CURSOR_OFFSETS,
74}; 95};
75 96
76static const struct intel_device_info intel_i865g_info = { 97static const struct intel_device_info intel_i865g_info = {
@@ -78,6 +99,7 @@ static const struct intel_device_info intel_i865g_info = {
78 .has_overlay = 1, .overlay_needs_physical = 1, 99 .has_overlay = 1, .overlay_needs_physical = 1,
79 .ring_mask = RENDER_RING, 100 .ring_mask = RENDER_RING,
80 GEN_DEFAULT_PIPEOFFSETS, 101 GEN_DEFAULT_PIPEOFFSETS,
102 CURSOR_OFFSETS,
81}; 103};
82 104
83static const struct intel_device_info intel_i915g_info = { 105static const struct intel_device_info intel_i915g_info = {
@@ -85,6 +107,7 @@ static const struct intel_device_info intel_i915g_info = {
85 .has_overlay = 1, .overlay_needs_physical = 1, 107 .has_overlay = 1, .overlay_needs_physical = 1,
86 .ring_mask = RENDER_RING, 108 .ring_mask = RENDER_RING,
87 GEN_DEFAULT_PIPEOFFSETS, 109 GEN_DEFAULT_PIPEOFFSETS,
110 CURSOR_OFFSETS,
88}; 111};
89static const struct intel_device_info intel_i915gm_info = { 112static const struct intel_device_info intel_i915gm_info = {
90 .gen = 3, .is_mobile = 1, .num_pipes = 2, 113 .gen = 3, .is_mobile = 1, .num_pipes = 2,
@@ -94,12 +117,14 @@ static const struct intel_device_info intel_i915gm_info = {
94 .has_fbc = 1, 117 .has_fbc = 1,
95 .ring_mask = RENDER_RING, 118 .ring_mask = RENDER_RING,
96 GEN_DEFAULT_PIPEOFFSETS, 119 GEN_DEFAULT_PIPEOFFSETS,
120 CURSOR_OFFSETS,
97}; 121};
98static const struct intel_device_info intel_i945g_info = { 122static const struct intel_device_info intel_i945g_info = {
99 .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2, 123 .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2,
100 .has_overlay = 1, .overlay_needs_physical = 1, 124 .has_overlay = 1, .overlay_needs_physical = 1,
101 .ring_mask = RENDER_RING, 125 .ring_mask = RENDER_RING,
102 GEN_DEFAULT_PIPEOFFSETS, 126 GEN_DEFAULT_PIPEOFFSETS,
127 CURSOR_OFFSETS,
103}; 128};
104static const struct intel_device_info intel_i945gm_info = { 129static const struct intel_device_info intel_i945gm_info = {
105 .gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2, 130 .gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2,
@@ -109,6 +134,7 @@ static const struct intel_device_info intel_i945gm_info = {
109 .has_fbc = 1, 134 .has_fbc = 1,
110 .ring_mask = RENDER_RING, 135 .ring_mask = RENDER_RING,
111 GEN_DEFAULT_PIPEOFFSETS, 136 GEN_DEFAULT_PIPEOFFSETS,
137 CURSOR_OFFSETS,
112}; 138};
113 139
114static const struct intel_device_info intel_i965g_info = { 140static const struct intel_device_info intel_i965g_info = {
@@ -117,6 +143,7 @@ static const struct intel_device_info intel_i965g_info = {
117 .has_overlay = 1, 143 .has_overlay = 1,
118 .ring_mask = RENDER_RING, 144 .ring_mask = RENDER_RING,
119 GEN_DEFAULT_PIPEOFFSETS, 145 GEN_DEFAULT_PIPEOFFSETS,
146 CURSOR_OFFSETS,
120}; 147};
121 148
122static const struct intel_device_info intel_i965gm_info = { 149static const struct intel_device_info intel_i965gm_info = {
@@ -126,6 +153,7 @@ static const struct intel_device_info intel_i965gm_info = {
126 .supports_tv = 1, 153 .supports_tv = 1,
127 .ring_mask = RENDER_RING, 154 .ring_mask = RENDER_RING,
128 GEN_DEFAULT_PIPEOFFSETS, 155 GEN_DEFAULT_PIPEOFFSETS,
156 CURSOR_OFFSETS,
129}; 157};
130 158
131static const struct intel_device_info intel_g33_info = { 159static const struct intel_device_info intel_g33_info = {
@@ -134,6 +162,7 @@ static const struct intel_device_info intel_g33_info = {
134 .has_overlay = 1, 162 .has_overlay = 1,
135 .ring_mask = RENDER_RING, 163 .ring_mask = RENDER_RING,
136 GEN_DEFAULT_PIPEOFFSETS, 164 GEN_DEFAULT_PIPEOFFSETS,
165 CURSOR_OFFSETS,
137}; 166};
138 167
139static const struct intel_device_info intel_g45_info = { 168static const struct intel_device_info intel_g45_info = {
@@ -141,6 +170,7 @@ static const struct intel_device_info intel_g45_info = {
141 .has_pipe_cxsr = 1, .has_hotplug = 1, 170 .has_pipe_cxsr = 1, .has_hotplug = 1,
142 .ring_mask = RENDER_RING | BSD_RING, 171 .ring_mask = RENDER_RING | BSD_RING,
143 GEN_DEFAULT_PIPEOFFSETS, 172 GEN_DEFAULT_PIPEOFFSETS,
173 CURSOR_OFFSETS,
144}; 174};
145 175
146static const struct intel_device_info intel_gm45_info = { 176static const struct intel_device_info intel_gm45_info = {
@@ -150,6 +180,7 @@ static const struct intel_device_info intel_gm45_info = {
150 .supports_tv = 1, 180 .supports_tv = 1,
151 .ring_mask = RENDER_RING | BSD_RING, 181 .ring_mask = RENDER_RING | BSD_RING,
152 GEN_DEFAULT_PIPEOFFSETS, 182 GEN_DEFAULT_PIPEOFFSETS,
183 CURSOR_OFFSETS,
153}; 184};
154 185
155static const struct intel_device_info intel_pineview_info = { 186static const struct intel_device_info intel_pineview_info = {
@@ -157,6 +188,7 @@ static const struct intel_device_info intel_pineview_info = {
157 .need_gfx_hws = 1, .has_hotplug = 1, 188 .need_gfx_hws = 1, .has_hotplug = 1,
158 .has_overlay = 1, 189 .has_overlay = 1,
159 GEN_DEFAULT_PIPEOFFSETS, 190 GEN_DEFAULT_PIPEOFFSETS,
191 CURSOR_OFFSETS,
160}; 192};
161 193
162static const struct intel_device_info intel_ironlake_d_info = { 194static const struct intel_device_info intel_ironlake_d_info = {
@@ -164,6 +196,7 @@ static const struct intel_device_info intel_ironlake_d_info = {
164 .need_gfx_hws = 1, .has_hotplug = 1, 196 .need_gfx_hws = 1, .has_hotplug = 1,
165 .ring_mask = RENDER_RING | BSD_RING, 197 .ring_mask = RENDER_RING | BSD_RING,
166 GEN_DEFAULT_PIPEOFFSETS, 198 GEN_DEFAULT_PIPEOFFSETS,
199 CURSOR_OFFSETS,
167}; 200};
168 201
169static const struct intel_device_info intel_ironlake_m_info = { 202static const struct intel_device_info intel_ironlake_m_info = {
@@ -172,6 +205,7 @@ static const struct intel_device_info intel_ironlake_m_info = {
172 .has_fbc = 1, 205 .has_fbc = 1,
173 .ring_mask = RENDER_RING | BSD_RING, 206 .ring_mask = RENDER_RING | BSD_RING,
174 GEN_DEFAULT_PIPEOFFSETS, 207 GEN_DEFAULT_PIPEOFFSETS,
208 CURSOR_OFFSETS,
175}; 209};
176 210
177static const struct intel_device_info intel_sandybridge_d_info = { 211static const struct intel_device_info intel_sandybridge_d_info = {
@@ -181,6 +215,7 @@ static const struct intel_device_info intel_sandybridge_d_info = {
181 .ring_mask = RENDER_RING | BSD_RING | BLT_RING, 215 .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
182 .has_llc = 1, 216 .has_llc = 1,
183 GEN_DEFAULT_PIPEOFFSETS, 217 GEN_DEFAULT_PIPEOFFSETS,
218 CURSOR_OFFSETS,
184}; 219};
185 220
186static const struct intel_device_info intel_sandybridge_m_info = { 221static const struct intel_device_info intel_sandybridge_m_info = {
@@ -190,6 +225,7 @@ static const struct intel_device_info intel_sandybridge_m_info = {
190 .ring_mask = RENDER_RING | BSD_RING | BLT_RING, 225 .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
191 .has_llc = 1, 226 .has_llc = 1,
192 GEN_DEFAULT_PIPEOFFSETS, 227 GEN_DEFAULT_PIPEOFFSETS,
228 CURSOR_OFFSETS,
193}; 229};
194 230
195#define GEN7_FEATURES \ 231#define GEN7_FEATURES \
@@ -203,6 +239,7 @@ static const struct intel_device_info intel_ivybridge_d_info = {
203 GEN7_FEATURES, 239 GEN7_FEATURES,
204 .is_ivybridge = 1, 240 .is_ivybridge = 1,
205 GEN_DEFAULT_PIPEOFFSETS, 241 GEN_DEFAULT_PIPEOFFSETS,
242 IVB_CURSOR_OFFSETS,
206}; 243};
207 244
208static const struct intel_device_info intel_ivybridge_m_info = { 245static const struct intel_device_info intel_ivybridge_m_info = {
@@ -210,6 +247,7 @@ static const struct intel_device_info intel_ivybridge_m_info = {
210 .is_ivybridge = 1, 247 .is_ivybridge = 1,
211 .is_mobile = 1, 248 .is_mobile = 1,
212 GEN_DEFAULT_PIPEOFFSETS, 249 GEN_DEFAULT_PIPEOFFSETS,
250 IVB_CURSOR_OFFSETS,
213}; 251};
214 252
215static const struct intel_device_info intel_ivybridge_q_info = { 253static const struct intel_device_info intel_ivybridge_q_info = {
@@ -217,6 +255,7 @@ static const struct intel_device_info intel_ivybridge_q_info = {
217 .is_ivybridge = 1, 255 .is_ivybridge = 1,
218 .num_pipes = 0, /* legal, last one wins */ 256 .num_pipes = 0, /* legal, last one wins */
219 GEN_DEFAULT_PIPEOFFSETS, 257 GEN_DEFAULT_PIPEOFFSETS,
258 IVB_CURSOR_OFFSETS,
220}; 259};
221 260
222static const struct intel_device_info intel_valleyview_m_info = { 261static const struct intel_device_info intel_valleyview_m_info = {
@@ -228,6 +267,7 @@ static const struct intel_device_info intel_valleyview_m_info = {
228 .has_fbc = 0, /* legal, last one wins */ 267 .has_fbc = 0, /* legal, last one wins */
229 .has_llc = 0, /* legal, last one wins */ 268 .has_llc = 0, /* legal, last one wins */
230 GEN_DEFAULT_PIPEOFFSETS, 269 GEN_DEFAULT_PIPEOFFSETS,
270 CURSOR_OFFSETS,
231}; 271};
232 272
233static const struct intel_device_info intel_valleyview_d_info = { 273static const struct intel_device_info intel_valleyview_d_info = {
@@ -238,6 +278,7 @@ static const struct intel_device_info intel_valleyview_d_info = {
238 .has_fbc = 0, /* legal, last one wins */ 278 .has_fbc = 0, /* legal, last one wins */
239 .has_llc = 0, /* legal, last one wins */ 279 .has_llc = 0, /* legal, last one wins */
240 GEN_DEFAULT_PIPEOFFSETS, 280 GEN_DEFAULT_PIPEOFFSETS,
281 CURSOR_OFFSETS,
241}; 282};
242 283
243static const struct intel_device_info intel_haswell_d_info = { 284static const struct intel_device_info intel_haswell_d_info = {
@@ -247,6 +288,7 @@ static const struct intel_device_info intel_haswell_d_info = {
247 .has_fpga_dbg = 1, 288 .has_fpga_dbg = 1,
248 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, 289 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
249 GEN_DEFAULT_PIPEOFFSETS, 290 GEN_DEFAULT_PIPEOFFSETS,
291 IVB_CURSOR_OFFSETS,
250}; 292};
251 293
252static const struct intel_device_info intel_haswell_m_info = { 294static const struct intel_device_info intel_haswell_m_info = {
@@ -257,6 +299,7 @@ static const struct intel_device_info intel_haswell_m_info = {
257 .has_fpga_dbg = 1, 299 .has_fpga_dbg = 1,
258 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, 300 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
259 GEN_DEFAULT_PIPEOFFSETS, 301 GEN_DEFAULT_PIPEOFFSETS,
302 IVB_CURSOR_OFFSETS,
260}; 303};
261 304
262static const struct intel_device_info intel_broadwell_d_info = { 305static const struct intel_device_info intel_broadwell_d_info = {
@@ -267,6 +310,7 @@ static const struct intel_device_info intel_broadwell_d_info = {
267 .has_ddi = 1, 310 .has_ddi = 1,
268 .has_fbc = 1, 311 .has_fbc = 1,
269 GEN_DEFAULT_PIPEOFFSETS, 312 GEN_DEFAULT_PIPEOFFSETS,
313 IVB_CURSOR_OFFSETS,
270}; 314};
271 315
272static const struct intel_device_info intel_broadwell_m_info = { 316static const struct intel_device_info intel_broadwell_m_info = {
@@ -277,6 +321,40 @@ static const struct intel_device_info intel_broadwell_m_info = {
277 .has_ddi = 1, 321 .has_ddi = 1,
278 .has_fbc = 1, 322 .has_fbc = 1,
279 GEN_DEFAULT_PIPEOFFSETS, 323 GEN_DEFAULT_PIPEOFFSETS,
324 IVB_CURSOR_OFFSETS,
325};
326
327static const struct intel_device_info intel_broadwell_gt3d_info = {
328 .gen = 8, .num_pipes = 3,
329 .need_gfx_hws = 1, .has_hotplug = 1,
330 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
331 .has_llc = 1,
332 .has_ddi = 1,
333 .has_fbc = 1,
334 GEN_DEFAULT_PIPEOFFSETS,
335 IVB_CURSOR_OFFSETS,
336};
337
338static const struct intel_device_info intel_broadwell_gt3m_info = {
339 .gen = 8, .is_mobile = 1, .num_pipes = 3,
340 .need_gfx_hws = 1, .has_hotplug = 1,
341 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
342 .has_llc = 1,
343 .has_ddi = 1,
344 .has_fbc = 1,
345 GEN_DEFAULT_PIPEOFFSETS,
346 IVB_CURSOR_OFFSETS,
347};
348
349static const struct intel_device_info intel_cherryview_info = {
350 .is_preliminary = 1,
351 .gen = 8, .num_pipes = 3,
352 .need_gfx_hws = 1, .has_hotplug = 1,
353 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
354 .is_valleyview = 1,
355 .display_mmio_offset = VLV_DISPLAY_BASE,
356 GEN_CHV_PIPEOFFSETS,
357 CURSOR_OFFSETS,
280}; 358};
281 359
282/* 360/*
@@ -311,8 +389,11 @@ static const struct intel_device_info intel_broadwell_m_info = {
311 INTEL_HSW_M_IDS(&intel_haswell_m_info), \ 389 INTEL_HSW_M_IDS(&intel_haswell_m_info), \
312 INTEL_VLV_M_IDS(&intel_valleyview_m_info), \ 390 INTEL_VLV_M_IDS(&intel_valleyview_m_info), \
313 INTEL_VLV_D_IDS(&intel_valleyview_d_info), \ 391 INTEL_VLV_D_IDS(&intel_valleyview_d_info), \
314 INTEL_BDW_M_IDS(&intel_broadwell_m_info), \ 392 INTEL_BDW_GT12M_IDS(&intel_broadwell_m_info), \
315 INTEL_BDW_D_IDS(&intel_broadwell_d_info) 393 INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info), \
394 INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info), \
395 INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info), \
396 INTEL_CHV_IDS(&intel_cherryview_info)
316 397
317static const struct pci_device_id pciidlist[] = { /* aka */ 398static const struct pci_device_id pciidlist[] = { /* aka */
318 INTEL_PCI_IDS, 399 INTEL_PCI_IDS,
@@ -445,18 +526,20 @@ static int i915_drm_freeze(struct drm_device *dev)
445 return error; 526 return error;
446 } 527 }
447 528
448 cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work);
449
450 drm_irq_uninstall(dev); 529 drm_irq_uninstall(dev);
451 dev_priv->enable_hotplug_processing = false; 530 dev_priv->enable_hotplug_processing = false;
531
532 intel_disable_gt_powersave(dev);
533
452 /* 534 /*
453 * Disable CRTCs directly since we want to preserve sw state 535 * Disable CRTCs directly since we want to preserve sw state
454 * for _thaw. 536 * for _thaw.
455 */ 537 */
456 mutex_lock(&dev->mode_config.mutex); 538 drm_modeset_lock_all(dev);
457 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) 539 for_each_crtc(dev, crtc) {
458 dev_priv->display.crtc_disable(crtc); 540 dev_priv->display.crtc_disable(crtc);
459 mutex_unlock(&dev->mode_config.mutex); 541 }
542 drm_modeset_unlock_all(dev);
460 543
461 intel_modeset_suspend_hw(dev); 544 intel_modeset_suspend_hw(dev);
462 } 545 }
@@ -519,24 +602,6 @@ void intel_console_resume(struct work_struct *work)
519 console_unlock(); 602 console_unlock();
520} 603}
521 604
522static void intel_resume_hotplug(struct drm_device *dev)
523{
524 struct drm_mode_config *mode_config = &dev->mode_config;
525 struct intel_encoder *encoder;
526
527 mutex_lock(&mode_config->mutex);
528 DRM_DEBUG_KMS("running encoder hotplug functions\n");
529
530 list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
531 if (encoder->hot_plug)
532 encoder->hot_plug(encoder);
533
534 mutex_unlock(&mode_config->mutex);
535
536 /* Just fire off a uevent and let userspace tell us what to do */
537 drm_helper_hpd_irq_event(dev);
538}
539
540static int i915_drm_thaw_early(struct drm_device *dev) 605static int i915_drm_thaw_early(struct drm_device *dev)
541{ 606{
542 struct drm_i915_private *dev_priv = dev->dev_private; 607 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -551,7 +616,6 @@ static int i915_drm_thaw_early(struct drm_device *dev)
551static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings) 616static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
552{ 617{
553 struct drm_i915_private *dev_priv = dev->dev_private; 618 struct drm_i915_private *dev_priv = dev->dev_private;
554 int error = 0;
555 619
556 if (drm_core_check_feature(dev, DRIVER_MODESET) && 620 if (drm_core_check_feature(dev, DRIVER_MODESET) &&
557 restore_gtt_mappings) { 621 restore_gtt_mappings) {
@@ -569,12 +633,14 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
569 drm_mode_config_reset(dev); 633 drm_mode_config_reset(dev);
570 634
571 mutex_lock(&dev->struct_mutex); 635 mutex_lock(&dev->struct_mutex);
572 636 if (i915_gem_init_hw(dev)) {
573 error = i915_gem_init_hw(dev); 637 DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
638 atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
639 }
574 mutex_unlock(&dev->struct_mutex); 640 mutex_unlock(&dev->struct_mutex);
575 641
576 /* We need working interrupts for modeset enabling ... */ 642 /* We need working interrupts for modeset enabling ... */
577 drm_irq_install(dev); 643 drm_irq_install(dev, dev->pdev->irq);
578 644
579 intel_modeset_init_hw(dev); 645 intel_modeset_init_hw(dev);
580 646
@@ -591,7 +657,7 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
591 intel_hpd_init(dev); 657 intel_hpd_init(dev);
592 dev_priv->enable_hotplug_processing = true; 658 dev_priv->enable_hotplug_processing = true;
593 /* Config may have changed between suspend and resume */ 659 /* Config may have changed between suspend and resume */
594 intel_resume_hotplug(dev); 660 drm_helper_hpd_irq_event(dev);
595 } 661 }
596 662
597 intel_opregion_init(dev); 663 intel_opregion_init(dev);
@@ -613,7 +679,7 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
613 mutex_unlock(&dev_priv->modeset_restore_lock); 679 mutex_unlock(&dev_priv->modeset_restore_lock);
614 680
615 intel_runtime_pm_put(dev_priv); 681 intel_runtime_pm_put(dev_priv);
616 return error; 682 return 0;
617} 683}
618 684
619static int i915_drm_thaw(struct drm_device *dev) 685static int i915_drm_thaw(struct drm_device *dev)
@@ -746,18 +812,20 @@ int i915_reset(struct drm_device *dev)
746 return ret; 812 return ret;
747 } 813 }
748 814
749 drm_irq_uninstall(dev); 815 /*
750 drm_irq_install(dev); 816 * FIXME: This races pretty badly against concurrent holders of
817 * ring interrupts. This is possible since we've started to drop
818 * dev->struct_mutex in select places when waiting for the gpu.
819 */
751 820
752 /* rps/rc6 re-init is necessary to restore state lost after the 821 /*
753 * reset and the re-install of drm irq. Skip for ironlake per 822 * rps/rc6 re-init is necessary to restore state lost after the
823 * reset and the re-install of gt irqs. Skip for ironlake per
754 * previous concerns that it doesn't respond well to some forms 824 * previous concerns that it doesn't respond well to some forms
755 * of re-init after reset. */ 825 * of re-init after reset.
756 if (INTEL_INFO(dev)->gen > 5) { 826 */
757 mutex_lock(&dev->struct_mutex); 827 if (INTEL_INFO(dev)->gen > 5)
758 intel_enable_gt_powersave(dev); 828 intel_reset_gt_powersave(dev);
759 mutex_unlock(&dev->struct_mutex);
760 }
761 829
762 intel_hpd_init(dev); 830 intel_hpd_init(dev);
763 } else { 831 } else {
@@ -891,21 +959,453 @@ static int i915_pm_poweroff(struct device *dev)
891 return i915_drm_freeze(drm_dev); 959 return i915_drm_freeze(drm_dev);
892} 960}
893 961
894static int i915_runtime_suspend(struct device *device) 962static int hsw_runtime_suspend(struct drm_i915_private *dev_priv)
963{
964 hsw_enable_pc8(dev_priv);
965
966 return 0;
967}
968
969static int snb_runtime_resume(struct drm_i915_private *dev_priv)
970{
971 struct drm_device *dev = dev_priv->dev;
972
973 intel_init_pch_refclk(dev);
974
975 return 0;
976}
977
978static int hsw_runtime_resume(struct drm_i915_private *dev_priv)
979{
980 hsw_disable_pc8(dev_priv);
981
982 return 0;
983}
984
985/*
986 * Save all Gunit registers that may be lost after a D3 and a subsequent
987 * S0i[R123] transition. The list of registers needing a save/restore is
988 * defined in the VLV2_S0IXRegs document. This documents marks all Gunit
989 * registers in the following way:
990 * - Driver: saved/restored by the driver
991 * - Punit : saved/restored by the Punit firmware
992 * - No, w/o marking: no need to save/restore, since the register is R/O or
993 * used internally by the HW in a way that doesn't depend
994 * keeping the content across a suspend/resume.
995 * - Debug : used for debugging
996 *
997 * We save/restore all registers marked with 'Driver', with the following
998 * exceptions:
999 * - Registers out of use, including also registers marked with 'Debug'.
1000 * These have no effect on the driver's operation, so we don't save/restore
1001 * them to reduce the overhead.
1002 * - Registers that are fully setup by an initialization function called from
1003 * the resume path. For example many clock gating and RPS/RC6 registers.
1004 * - Registers that provide the right functionality with their reset defaults.
1005 *
1006 * TODO: Except for registers that based on the above 3 criteria can be safely
1007 * ignored, we save/restore all others, practically treating the HW context as
1008 * a black-box for the driver. Further investigation is needed to reduce the
1009 * saved/restored registers even further, by following the same 3 criteria.
1010 */
1011static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv)
1012{
1013 struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
1014 int i;
1015
1016 /* GAM 0x4000-0x4770 */
1017 s->wr_watermark = I915_READ(GEN7_WR_WATERMARK);
1018 s->gfx_prio_ctrl = I915_READ(GEN7_GFX_PRIO_CTRL);
1019 s->arb_mode = I915_READ(ARB_MODE);
1020 s->gfx_pend_tlb0 = I915_READ(GEN7_GFX_PEND_TLB0);
1021 s->gfx_pend_tlb1 = I915_READ(GEN7_GFX_PEND_TLB1);
1022
1023 for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
1024 s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS_BASE + i * 4);
1025
1026 s->media_max_req_count = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
1027 s->gfx_max_req_count = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
1028
1029 s->render_hwsp = I915_READ(RENDER_HWS_PGA_GEN7);
1030 s->ecochk = I915_READ(GAM_ECOCHK);
1031 s->bsd_hwsp = I915_READ(BSD_HWS_PGA_GEN7);
1032 s->blt_hwsp = I915_READ(BLT_HWS_PGA_GEN7);
1033
1034 s->tlb_rd_addr = I915_READ(GEN7_TLB_RD_ADDR);
1035
1036 /* MBC 0x9024-0x91D0, 0x8500 */
1037 s->g3dctl = I915_READ(VLV_G3DCTL);
1038 s->gsckgctl = I915_READ(VLV_GSCKGCTL);
1039 s->mbctl = I915_READ(GEN6_MBCTL);
1040
1041 /* GCP 0x9400-0x9424, 0x8100-0x810C */
1042 s->ucgctl1 = I915_READ(GEN6_UCGCTL1);
1043 s->ucgctl3 = I915_READ(GEN6_UCGCTL3);
1044 s->rcgctl1 = I915_READ(GEN6_RCGCTL1);
1045 s->rcgctl2 = I915_READ(GEN6_RCGCTL2);
1046 s->rstctl = I915_READ(GEN6_RSTCTL);
1047 s->misccpctl = I915_READ(GEN7_MISCCPCTL);
1048
1049 /* GPM 0xA000-0xAA84, 0x8000-0x80FC */
1050 s->gfxpause = I915_READ(GEN6_GFXPAUSE);
1051 s->rpdeuhwtc = I915_READ(GEN6_RPDEUHWTC);
1052 s->rpdeuc = I915_READ(GEN6_RPDEUC);
1053 s->ecobus = I915_READ(ECOBUS);
1054 s->pwrdwnupctl = I915_READ(VLV_PWRDWNUPCTL);
1055 s->rp_down_timeout = I915_READ(GEN6_RP_DOWN_TIMEOUT);
1056 s->rp_deucsw = I915_READ(GEN6_RPDEUCSW);
1057 s->rcubmabdtmr = I915_READ(GEN6_RCUBMABDTMR);
1058 s->rcedata = I915_READ(VLV_RCEDATA);
1059 s->spare2gh = I915_READ(VLV_SPAREG2H);
1060
1061 /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
1062 s->gt_imr = I915_READ(GTIMR);
1063 s->gt_ier = I915_READ(GTIER);
1064 s->pm_imr = I915_READ(GEN6_PMIMR);
1065 s->pm_ier = I915_READ(GEN6_PMIER);
1066
1067 for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
1068 s->gt_scratch[i] = I915_READ(GEN7_GT_SCRATCH_BASE + i * 4);
1069
1070 /* GT SA CZ domain, 0x100000-0x138124 */
1071 s->tilectl = I915_READ(TILECTL);
1072 s->gt_fifoctl = I915_READ(GTFIFOCTL);
1073 s->gtlc_wake_ctrl = I915_READ(VLV_GTLC_WAKE_CTRL);
1074 s->gtlc_survive = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
1075 s->pmwgicz = I915_READ(VLV_PMWGICZ);
1076
1077 /* Gunit-Display CZ domain, 0x182028-0x1821CF */
1078 s->gu_ctl0 = I915_READ(VLV_GU_CTL0);
1079 s->gu_ctl1 = I915_READ(VLV_GU_CTL1);
1080 s->clock_gate_dis2 = I915_READ(VLV_GUNIT_CLOCK_GATE2);
1081
1082 /*
1083 * Not saving any of:
1084 * DFT, 0x9800-0x9EC0
1085 * SARB, 0xB000-0xB1FC
1086 * GAC, 0x5208-0x524C, 0x14000-0x14C000
1087 * PCI CFG
1088 */
1089}
1090
1091static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv)
1092{
1093 struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
1094 u32 val;
1095 int i;
1096
1097 /* GAM 0x4000-0x4770 */
1098 I915_WRITE(GEN7_WR_WATERMARK, s->wr_watermark);
1099 I915_WRITE(GEN7_GFX_PRIO_CTRL, s->gfx_prio_ctrl);
1100 I915_WRITE(ARB_MODE, s->arb_mode | (0xffff << 16));
1101 I915_WRITE(GEN7_GFX_PEND_TLB0, s->gfx_pend_tlb0);
1102 I915_WRITE(GEN7_GFX_PEND_TLB1, s->gfx_pend_tlb1);
1103
1104 for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
1105 I915_WRITE(GEN7_LRA_LIMITS_BASE + i * 4, s->lra_limits[i]);
1106
1107 I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count);
1108 I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->gfx_max_req_count);
1109
1110 I915_WRITE(RENDER_HWS_PGA_GEN7, s->render_hwsp);
1111 I915_WRITE(GAM_ECOCHK, s->ecochk);
1112 I915_WRITE(BSD_HWS_PGA_GEN7, s->bsd_hwsp);
1113 I915_WRITE(BLT_HWS_PGA_GEN7, s->blt_hwsp);
1114
1115 I915_WRITE(GEN7_TLB_RD_ADDR, s->tlb_rd_addr);
1116
1117 /* MBC 0x9024-0x91D0, 0x8500 */
1118 I915_WRITE(VLV_G3DCTL, s->g3dctl);
1119 I915_WRITE(VLV_GSCKGCTL, s->gsckgctl);
1120 I915_WRITE(GEN6_MBCTL, s->mbctl);
1121
1122 /* GCP 0x9400-0x9424, 0x8100-0x810C */
1123 I915_WRITE(GEN6_UCGCTL1, s->ucgctl1);
1124 I915_WRITE(GEN6_UCGCTL3, s->ucgctl3);
1125 I915_WRITE(GEN6_RCGCTL1, s->rcgctl1);
1126 I915_WRITE(GEN6_RCGCTL2, s->rcgctl2);
1127 I915_WRITE(GEN6_RSTCTL, s->rstctl);
1128 I915_WRITE(GEN7_MISCCPCTL, s->misccpctl);
1129
1130 /* GPM 0xA000-0xAA84, 0x8000-0x80FC */
1131 I915_WRITE(GEN6_GFXPAUSE, s->gfxpause);
1132 I915_WRITE(GEN6_RPDEUHWTC, s->rpdeuhwtc);
1133 I915_WRITE(GEN6_RPDEUC, s->rpdeuc);
1134 I915_WRITE(ECOBUS, s->ecobus);
1135 I915_WRITE(VLV_PWRDWNUPCTL, s->pwrdwnupctl);
1136 I915_WRITE(GEN6_RP_DOWN_TIMEOUT,s->rp_down_timeout);
1137 I915_WRITE(GEN6_RPDEUCSW, s->rp_deucsw);
1138 I915_WRITE(GEN6_RCUBMABDTMR, s->rcubmabdtmr);
1139 I915_WRITE(VLV_RCEDATA, s->rcedata);
1140 I915_WRITE(VLV_SPAREG2H, s->spare2gh);
1141
1142 /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
1143 I915_WRITE(GTIMR, s->gt_imr);
1144 I915_WRITE(GTIER, s->gt_ier);
1145 I915_WRITE(GEN6_PMIMR, s->pm_imr);
1146 I915_WRITE(GEN6_PMIER, s->pm_ier);
1147
1148 for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
1149 I915_WRITE(GEN7_GT_SCRATCH_BASE + i * 4, s->gt_scratch[i]);
1150
1151 /* GT SA CZ domain, 0x100000-0x138124 */
1152 I915_WRITE(TILECTL, s->tilectl);
1153 I915_WRITE(GTFIFOCTL, s->gt_fifoctl);
1154 /*
1155 * Preserve the GT allow wake and GFX force clock bit, they are not
1156 * be restored, as they are used to control the s0ix suspend/resume
1157 * sequence by the caller.
1158 */
1159 val = I915_READ(VLV_GTLC_WAKE_CTRL);
1160 val &= VLV_GTLC_ALLOWWAKEREQ;
1161 val |= s->gtlc_wake_ctrl & ~VLV_GTLC_ALLOWWAKEREQ;
1162 I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
1163
1164 val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
1165 val &= VLV_GFX_CLK_FORCE_ON_BIT;
1166 val |= s->gtlc_survive & ~VLV_GFX_CLK_FORCE_ON_BIT;
1167 I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);
1168
1169 I915_WRITE(VLV_PMWGICZ, s->pmwgicz);
1170
1171 /* Gunit-Display CZ domain, 0x182028-0x1821CF */
1172 I915_WRITE(VLV_GU_CTL0, s->gu_ctl0);
1173 I915_WRITE(VLV_GU_CTL1, s->gu_ctl1);
1174 I915_WRITE(VLV_GUNIT_CLOCK_GATE2, s->clock_gate_dis2);
1175}
1176
1177int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on)
1178{
1179 u32 val;
1180 int err;
1181
1182 val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
1183 WARN_ON(!!(val & VLV_GFX_CLK_FORCE_ON_BIT) == force_on);
1184
1185#define COND (I915_READ(VLV_GTLC_SURVIVABILITY_REG) & VLV_GFX_CLK_STATUS_BIT)
1186 /* Wait for a previous force-off to settle */
1187 if (force_on) {
1188 err = wait_for(!COND, 20);
1189 if (err) {
1190 DRM_ERROR("timeout waiting for GFX clock force-off (%08x)\n",
1191 I915_READ(VLV_GTLC_SURVIVABILITY_REG));
1192 return err;
1193 }
1194 }
1195
1196 val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
1197 val &= ~VLV_GFX_CLK_FORCE_ON_BIT;
1198 if (force_on)
1199 val |= VLV_GFX_CLK_FORCE_ON_BIT;
1200 I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);
1201
1202 if (!force_on)
1203 return 0;
1204
1205 err = wait_for(COND, 20);
1206 if (err)
1207 DRM_ERROR("timeout waiting for GFX clock force-on (%08x)\n",
1208 I915_READ(VLV_GTLC_SURVIVABILITY_REG));
1209
1210 return err;
1211#undef COND
1212}
1213
1214static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow)
1215{
1216 u32 val;
1217 int err = 0;
1218
1219 val = I915_READ(VLV_GTLC_WAKE_CTRL);
1220 val &= ~VLV_GTLC_ALLOWWAKEREQ;
1221 if (allow)
1222 val |= VLV_GTLC_ALLOWWAKEREQ;
1223 I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
1224 POSTING_READ(VLV_GTLC_WAKE_CTRL);
1225
1226#define COND (!!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEACK) == \
1227 allow)
1228 err = wait_for(COND, 1);
1229 if (err)
1230 DRM_ERROR("timeout disabling GT waking\n");
1231 return err;
1232#undef COND
1233}
1234
1235static int vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
1236 bool wait_for_on)
1237{
1238 u32 mask;
1239 u32 val;
1240 int err;
1241
1242 mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK;
1243 val = wait_for_on ? mask : 0;
1244#define COND ((I915_READ(VLV_GTLC_PW_STATUS) & mask) == val)
1245 if (COND)
1246 return 0;
1247
1248 DRM_DEBUG_KMS("waiting for GT wells to go %s (%08x)\n",
1249 wait_for_on ? "on" : "off",
1250 I915_READ(VLV_GTLC_PW_STATUS));
1251
1252 /*
1253 * RC6 transitioning can be delayed up to 2 msec (see
1254 * valleyview_enable_rps), use 3 msec for safety.
1255 */
1256 err = wait_for(COND, 3);
1257 if (err)
1258 DRM_ERROR("timeout waiting for GT wells to go %s\n",
1259 wait_for_on ? "on" : "off");
1260
1261 return err;
1262#undef COND
1263}
1264
1265static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv)
1266{
1267 if (!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEERR))
1268 return;
1269
1270 DRM_ERROR("GT register access while GT waking disabled\n");
1271 I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR);
1272}
1273
1274static int vlv_runtime_suspend(struct drm_i915_private *dev_priv)
1275{
1276 u32 mask;
1277 int err;
1278
1279 /*
1280 * Bspec defines the following GT well on flags as debug only, so
1281 * don't treat them as hard failures.
1282 */
1283 (void)vlv_wait_for_gt_wells(dev_priv, false);
1284
1285 mask = VLV_GTLC_RENDER_CTX_EXISTS | VLV_GTLC_MEDIA_CTX_EXISTS;
1286 WARN_ON((I915_READ(VLV_GTLC_WAKE_CTRL) & mask) != mask);
1287
1288 vlv_check_no_gt_access(dev_priv);
1289
1290 err = vlv_force_gfx_clock(dev_priv, true);
1291 if (err)
1292 goto err1;
1293
1294 err = vlv_allow_gt_wake(dev_priv, false);
1295 if (err)
1296 goto err2;
1297 vlv_save_gunit_s0ix_state(dev_priv);
1298
1299 err = vlv_force_gfx_clock(dev_priv, false);
1300 if (err)
1301 goto err2;
1302
1303 return 0;
1304
1305err2:
1306 /* For safety always re-enable waking and disable gfx clock forcing */
1307 vlv_allow_gt_wake(dev_priv, true);
1308err1:
1309 vlv_force_gfx_clock(dev_priv, false);
1310
1311 return err;
1312}
1313
1314static int vlv_runtime_resume(struct drm_i915_private *dev_priv)
1315{
1316 struct drm_device *dev = dev_priv->dev;
1317 int err;
1318 int ret;
1319
1320 /*
1321 * If any of the steps fail just try to continue, that's the best we
1322 * can do at this point. Return the first error code (which will also
1323 * leave RPM permanently disabled).
1324 */
1325 ret = vlv_force_gfx_clock(dev_priv, true);
1326
1327 vlv_restore_gunit_s0ix_state(dev_priv);
1328
1329 err = vlv_allow_gt_wake(dev_priv, true);
1330 if (!ret)
1331 ret = err;
1332
1333 err = vlv_force_gfx_clock(dev_priv, false);
1334 if (!ret)
1335 ret = err;
1336
1337 vlv_check_no_gt_access(dev_priv);
1338
1339 intel_init_clock_gating(dev);
1340 i915_gem_restore_fences(dev);
1341
1342 return ret;
1343}
1344
1345static int intel_runtime_suspend(struct device *device)
895{ 1346{
896 struct pci_dev *pdev = to_pci_dev(device); 1347 struct pci_dev *pdev = to_pci_dev(device);
897 struct drm_device *dev = pci_get_drvdata(pdev); 1348 struct drm_device *dev = pci_get_drvdata(pdev);
898 struct drm_i915_private *dev_priv = dev->dev_private; 1349 struct drm_i915_private *dev_priv = dev->dev_private;
1350 int ret;
1351
1352 if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6(dev))))
1353 return -ENODEV;
899 1354
900 WARN_ON(!HAS_RUNTIME_PM(dev)); 1355 WARN_ON(!HAS_RUNTIME_PM(dev));
901 assert_force_wake_inactive(dev_priv); 1356 assert_force_wake_inactive(dev_priv);
902 1357
903 DRM_DEBUG_KMS("Suspending device\n"); 1358 DRM_DEBUG_KMS("Suspending device\n");
904 1359
905 if (HAS_PC8(dev)) 1360 /*
906 hsw_enable_pc8(dev_priv); 1361 * We could deadlock here in case another thread holding struct_mutex
1362 * calls RPM suspend concurrently, since the RPM suspend will wait
1363 * first for this RPM suspend to finish. In this case the concurrent
1364 * RPM resume will be followed by its RPM suspend counterpart. Still
1365 * for consistency return -EAGAIN, which will reschedule this suspend.
1366 */
1367 if (!mutex_trylock(&dev->struct_mutex)) {
1368 DRM_DEBUG_KMS("device lock contention, deffering suspend\n");
1369 /*
1370 * Bump the expiration timestamp, otherwise the suspend won't
1371 * be rescheduled.
1372 */
1373 pm_runtime_mark_last_busy(device);
907 1374
1375 return -EAGAIN;
1376 }
1377 /*
1378 * We are safe here against re-faults, since the fault handler takes
1379 * an RPM reference.
1380 */
908 i915_gem_release_all_mmaps(dev_priv); 1381 i915_gem_release_all_mmaps(dev_priv);
1382 mutex_unlock(&dev->struct_mutex);
1383
1384 /*
1385 * rps.work can't be rearmed here, since we get here only after making
1386 * sure the GPU is idle and the RPS freq is set to the minimum. See
1387 * intel_mark_idle().
1388 */
1389 cancel_work_sync(&dev_priv->rps.work);
1390 intel_runtime_pm_disable_interrupts(dev);
1391
1392 if (IS_GEN6(dev)) {
1393 ret = 0;
1394 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
1395 ret = hsw_runtime_suspend(dev_priv);
1396 } else if (IS_VALLEYVIEW(dev)) {
1397 ret = vlv_runtime_suspend(dev_priv);
1398 } else {
1399 ret = -ENODEV;
1400 WARN_ON(1);
1401 }
1402
1403 if (ret) {
1404 DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
1405 intel_runtime_pm_restore_interrupts(dev);
1406
1407 return ret;
1408 }
909 1409
910 del_timer_sync(&dev_priv->gpu_error.hangcheck_timer); 1410 del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
911 dev_priv->pm.suspended = true; 1411 dev_priv->pm.suspended = true;
@@ -923,11 +1423,12 @@ static int i915_runtime_suspend(struct device *device)
923 return 0; 1423 return 0;
924} 1424}
925 1425
926static int i915_runtime_resume(struct device *device) 1426static int intel_runtime_resume(struct device *device)
927{ 1427{
928 struct pci_dev *pdev = to_pci_dev(device); 1428 struct pci_dev *pdev = to_pci_dev(device);
929 struct drm_device *dev = pci_get_drvdata(pdev); 1429 struct drm_device *dev = pci_get_drvdata(pdev);
930 struct drm_i915_private *dev_priv = dev->dev_private; 1430 struct drm_i915_private *dev_priv = dev->dev_private;
1431 int ret;
931 1432
932 WARN_ON(!HAS_RUNTIME_PM(dev)); 1433 WARN_ON(!HAS_RUNTIME_PM(dev));
933 1434
@@ -936,11 +1437,33 @@ static int i915_runtime_resume(struct device *device)
936 intel_opregion_notify_adapter(dev, PCI_D0); 1437 intel_opregion_notify_adapter(dev, PCI_D0);
937 dev_priv->pm.suspended = false; 1438 dev_priv->pm.suspended = false;
938 1439
939 if (HAS_PC8(dev)) 1440 if (IS_GEN6(dev)) {
940 hsw_disable_pc8(dev_priv); 1441 ret = snb_runtime_resume(dev_priv);
1442 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
1443 ret = hsw_runtime_resume(dev_priv);
1444 } else if (IS_VALLEYVIEW(dev)) {
1445 ret = vlv_runtime_resume(dev_priv);
1446 } else {
1447 WARN_ON(1);
1448 ret = -ENODEV;
1449 }
941 1450
942 DRM_DEBUG_KMS("Device resumed\n"); 1451 /*
943 return 0; 1452 * No point of rolling back things in case of an error, as the best
1453 * we can do is to hope that things will still work (and disable RPM).
1454 */
1455 i915_gem_init_swizzling(dev);
1456 gen6_update_ring_freq(dev);
1457
1458 intel_runtime_pm_restore_interrupts(dev);
1459 intel_reset_gt_powersave(dev);
1460
1461 if (ret)
1462 DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret);
1463 else
1464 DRM_DEBUG_KMS("Device resumed\n");
1465
1466 return ret;
944} 1467}
945 1468
946static const struct dev_pm_ops i915_pm_ops = { 1469static const struct dev_pm_ops i915_pm_ops = {
@@ -954,8 +1477,8 @@ static const struct dev_pm_ops i915_pm_ops = {
954 .poweroff = i915_pm_poweroff, 1477 .poweroff = i915_pm_poweroff,
955 .restore_early = i915_pm_resume_early, 1478 .restore_early = i915_pm_resume_early,
956 .restore = i915_pm_resume, 1479 .restore = i915_pm_resume,
957 .runtime_suspend = i915_runtime_suspend, 1480 .runtime_suspend = intel_runtime_suspend,
958 .runtime_resume = i915_runtime_resume, 1481 .runtime_resume = intel_runtime_resume,
959}; 1482};
960 1483
961static const struct vm_operations_struct i915_gem_vm_ops = { 1484static const struct vm_operations_struct i915_gem_vm_ops = {
@@ -1062,6 +1585,7 @@ static int __init i915_init(void)
1062 driver.get_vblank_timestamp = NULL; 1585 driver.get_vblank_timestamp = NULL;
1063#ifndef CONFIG_DRM_I915_UMS 1586#ifndef CONFIG_DRM_I915_UMS
1064 /* Silently fail loading to not upset userspace. */ 1587 /* Silently fail loading to not upset userspace. */
1588 DRM_DEBUG_DRIVER("KMS and UMS disabled.\n");
1065 return 0; 1589 return 0;
1066#endif 1590#endif
1067 } 1591 }
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 388c028e223c..49414d30e8d4 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -35,11 +35,13 @@
35#include "i915_reg.h" 35#include "i915_reg.h"
36#include "intel_bios.h" 36#include "intel_bios.h"
37#include "intel_ringbuffer.h" 37#include "intel_ringbuffer.h"
38#include "i915_gem_gtt.h"
38#include <linux/io-mapping.h> 39#include <linux/io-mapping.h>
39#include <linux/i2c.h> 40#include <linux/i2c.h>
40#include <linux/i2c-algo-bit.h> 41#include <linux/i2c-algo-bit.h>
41#include <drm/intel-gtt.h> 42#include <drm/intel-gtt.h>
42#include <linux/backlight.h> 43#include <linux/backlight.h>
44#include <linux/hashtable.h>
43#include <linux/intel-iommu.h> 45#include <linux/intel-iommu.h>
44#include <linux/kref.h> 46#include <linux/kref.h>
45#include <linux/pm_qos.h> 47#include <linux/pm_qos.h>
@@ -91,7 +93,7 @@ enum port {
91}; 93};
92#define port_name(p) ((p) + 'A') 94#define port_name(p) ((p) + 'A')
93 95
94#define I915_NUM_PHYS_VLV 1 96#define I915_NUM_PHYS_VLV 2
95 97
96enum dpio_channel { 98enum dpio_channel {
97 DPIO_CH0, 99 DPIO_CH0,
@@ -162,6 +164,12 @@ enum hpd_pin {
162#define for_each_pipe(p) for ((p) = 0; (p) < INTEL_INFO(dev)->num_pipes; (p)++) 164#define for_each_pipe(p) for ((p) = 0; (p) < INTEL_INFO(dev)->num_pipes; (p)++)
163#define for_each_sprite(p, s) for ((s) = 0; (s) < INTEL_INFO(dev)->num_sprites[(p)]; (s)++) 165#define for_each_sprite(p, s) for ((s) = 0; (s) < INTEL_INFO(dev)->num_sprites[(p)]; (s)++)
164 166
167#define for_each_crtc(dev, crtc) \
168 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
169
170#define for_each_intel_crtc(dev, intel_crtc) \
171 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head)
172
165#define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \ 173#define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
166 list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \ 174 list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
167 if ((intel_encoder)->base.crtc == (__crtc)) 175 if ((intel_encoder)->base.crtc == (__crtc))
@@ -171,6 +179,7 @@ enum hpd_pin {
171 if ((intel_connector)->base.encoder == (__encoder)) 179 if ((intel_connector)->base.encoder == (__encoder))
172 180
173struct drm_i915_private; 181struct drm_i915_private;
182struct i915_mmu_object;
174 183
175enum intel_dpll_id { 184enum intel_dpll_id {
176 DPLL_ID_PRIVATE = -1, /* non-shared dpll in use */ 185 DPLL_ID_PRIVATE = -1, /* non-shared dpll in use */
@@ -312,7 +321,6 @@ struct drm_i915_error_state {
312 u32 gab_ctl; 321 u32 gab_ctl;
313 u32 gfx_mode; 322 u32 gfx_mode;
314 u32 extra_instdone[I915_NUM_INSTDONE_REG]; 323 u32 extra_instdone[I915_NUM_INSTDONE_REG];
315 u32 pipestat[I915_MAX_PIPES];
316 u64 fence[I915_MAX_NUM_FENCES]; 324 u64 fence[I915_MAX_NUM_FENCES];
317 struct intel_overlay_error_state *overlay; 325 struct intel_overlay_error_state *overlay;
318 struct intel_display_error_state *display; 326 struct intel_display_error_state *display;
@@ -346,7 +354,7 @@ struct drm_i915_error_state {
346 u64 bbaddr; 354 u64 bbaddr;
347 u64 acthd; 355 u64 acthd;
348 u32 fault_reg; 356 u32 fault_reg;
349 u32 faddr; 357 u64 faddr;
350 u32 rc_psmi; /* sleep state */ 358 u32 rc_psmi; /* sleep state */
351 u32 semaphore_mboxes[I915_NUM_RINGS - 1]; 359 u32 semaphore_mboxes[I915_NUM_RINGS - 1];
352 360
@@ -385,6 +393,7 @@ struct drm_i915_error_state {
385 u32 tiling:2; 393 u32 tiling:2;
386 u32 dirty:1; 394 u32 dirty:1;
387 u32 purgeable:1; 395 u32 purgeable:1;
396 u32 userptr:1;
388 s32 ring:4; 397 s32 ring:4;
389 u32 cache_level:3; 398 u32 cache_level:3;
390 } **active_bo, **pinned_bo; 399 } **active_bo, **pinned_bo;
@@ -449,10 +458,11 @@ struct drm_i915_display_funcs {
449 int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc, 458 int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
450 struct drm_framebuffer *fb, 459 struct drm_framebuffer *fb,
451 struct drm_i915_gem_object *obj, 460 struct drm_i915_gem_object *obj,
461 struct intel_engine_cs *ring,
452 uint32_t flags); 462 uint32_t flags);
453 int (*update_primary_plane)(struct drm_crtc *crtc, 463 void (*update_primary_plane)(struct drm_crtc *crtc,
454 struct drm_framebuffer *fb, 464 struct drm_framebuffer *fb,
455 int x, int y); 465 int x, int y);
456 void (*hpd_irq_setup)(struct drm_device *dev); 466 void (*hpd_irq_setup)(struct drm_device *dev);
457 /* clock updates for mode set */ 467 /* clock updates for mode set */
458 /* cursor updates */ 468 /* cursor updates */
@@ -545,6 +555,7 @@ struct intel_device_info {
545 int dpll_offsets[I915_MAX_PIPES]; 555 int dpll_offsets[I915_MAX_PIPES];
546 int dpll_md_offsets[I915_MAX_PIPES]; 556 int dpll_md_offsets[I915_MAX_PIPES];
547 int palette_offsets[I915_MAX_PIPES]; 557 int palette_offsets[I915_MAX_PIPES];
558 int cursor_offsets[I915_MAX_PIPES];
548}; 559};
549 560
550#undef DEFINE_FLAG 561#undef DEFINE_FLAG
@@ -560,168 +571,6 @@ enum i915_cache_level {
560 I915_CACHE_WT, /* hsw:gt3e WriteThrough for scanouts */ 571 I915_CACHE_WT, /* hsw:gt3e WriteThrough for scanouts */
561}; 572};
562 573
563typedef uint32_t gen6_gtt_pte_t;
564
565/**
566 * A VMA represents a GEM BO that is bound into an address space. Therefore, a
567 * VMA's presence cannot be guaranteed before binding, or after unbinding the
568 * object into/from the address space.
569 *
570 * To make things as simple as possible (ie. no refcounting), a VMA's lifetime
571 * will always be <= an objects lifetime. So object refcounting should cover us.
572 */
573struct i915_vma {
574 struct drm_mm_node node;
575 struct drm_i915_gem_object *obj;
576 struct i915_address_space *vm;
577
578 /** This object's place on the active/inactive lists */
579 struct list_head mm_list;
580
581 struct list_head vma_link; /* Link in the object's VMA list */
582
583 /** This vma's place in the batchbuffer or on the eviction list */
584 struct list_head exec_list;
585
586 /**
587 * Used for performing relocations during execbuffer insertion.
588 */
589 struct hlist_node exec_node;
590 unsigned long exec_handle;
591 struct drm_i915_gem_exec_object2 *exec_entry;
592
593 /**
594 * How many users have pinned this object in GTT space. The following
595 * users can each hold at most one reference: pwrite/pread, pin_ioctl
596 * (via user_pin_count), execbuffer (objects are not allowed multiple
597 * times for the same batchbuffer), and the framebuffer code. When
598 * switching/pageflipping, the framebuffer code has at most two buffers
599 * pinned per crtc.
600 *
601 * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
602 * bits with absolutely no headroom. So use 4 bits. */
603 unsigned int pin_count:4;
604#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
605
606 /** Unmap an object from an address space. This usually consists of
607 * setting the valid PTE entries to a reserved scratch page. */
608 void (*unbind_vma)(struct i915_vma *vma);
609 /* Map an object into an address space with the given cache flags. */
610#define GLOBAL_BIND (1<<0)
611 void (*bind_vma)(struct i915_vma *vma,
612 enum i915_cache_level cache_level,
613 u32 flags);
614};
615
616struct i915_address_space {
617 struct drm_mm mm;
618 struct drm_device *dev;
619 struct list_head global_link;
620 unsigned long start; /* Start offset always 0 for dri2 */
621 size_t total; /* size addr space maps (ex. 2GB for ggtt) */
622
623 struct {
624 dma_addr_t addr;
625 struct page *page;
626 } scratch;
627
628 /**
629 * List of objects currently involved in rendering.
630 *
631 * Includes buffers having the contents of their GPU caches
632 * flushed, not necessarily primitives. last_rendering_seqno
633 * represents when the rendering involved will be completed.
634 *
635 * A reference is held on the buffer while on this list.
636 */
637 struct list_head active_list;
638
639 /**
640 * LRU list of objects which are not in the ringbuffer and
641 * are ready to unbind, but are still in the GTT.
642 *
643 * last_rendering_seqno is 0 while an object is in this list.
644 *
645 * A reference is not held on the buffer while on this list,
646 * as merely being GTT-bound shouldn't prevent its being
647 * freed, and we'll pull it off the list in the free path.
648 */
649 struct list_head inactive_list;
650
651 /* FIXME: Need a more generic return type */
652 gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr,
653 enum i915_cache_level level,
654 bool valid); /* Create a valid PTE */
655 void (*clear_range)(struct i915_address_space *vm,
656 uint64_t start,
657 uint64_t length,
658 bool use_scratch);
659 void (*insert_entries)(struct i915_address_space *vm,
660 struct sg_table *st,
661 uint64_t start,
662 enum i915_cache_level cache_level);
663 void (*cleanup)(struct i915_address_space *vm);
664};
665
666/* The Graphics Translation Table is the way in which GEN hardware translates a
667 * Graphics Virtual Address into a Physical Address. In addition to the normal
668 * collateral associated with any va->pa translations GEN hardware also has a
669 * portion of the GTT which can be mapped by the CPU and remain both coherent
670 * and correct (in cases like swizzling). That region is referred to as GMADR in
671 * the spec.
672 */
673struct i915_gtt {
674 struct i915_address_space base;
675 size_t stolen_size; /* Total size of stolen memory */
676
677 unsigned long mappable_end; /* End offset that we can CPU map */
678 struct io_mapping *mappable; /* Mapping to our CPU mappable region */
679 phys_addr_t mappable_base; /* PA of our GMADR */
680
681 /** "Graphics Stolen Memory" holds the global PTEs */
682 void __iomem *gsm;
683
684 bool do_idle_maps;
685
686 int mtrr;
687
688 /* global gtt ops */
689 int (*gtt_probe)(struct drm_device *dev, size_t *gtt_total,
690 size_t *stolen, phys_addr_t *mappable_base,
691 unsigned long *mappable_end);
692};
693#define gtt_total_entries(gtt) ((gtt).base.total >> PAGE_SHIFT)
694
695#define GEN8_LEGACY_PDPS 4
696struct i915_hw_ppgtt {
697 struct i915_address_space base;
698 struct kref ref;
699 struct drm_mm_node node;
700 unsigned num_pd_entries;
701 unsigned num_pd_pages; /* gen8+ */
702 union {
703 struct page **pt_pages;
704 struct page **gen8_pt_pages[GEN8_LEGACY_PDPS];
705 };
706 struct page *pd_pages;
707 union {
708 uint32_t pd_offset;
709 dma_addr_t pd_dma_addr[GEN8_LEGACY_PDPS];
710 };
711 union {
712 dma_addr_t *pt_dma_addr;
713 dma_addr_t *gen8_pt_dma_addr[4];
714 };
715
716 struct i915_hw_context *ctx;
717
718 int (*enable)(struct i915_hw_ppgtt *ppgtt);
719 int (*switch_mm)(struct i915_hw_ppgtt *ppgtt,
720 struct intel_ring_buffer *ring,
721 bool synchronous);
722 void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m);
723};
724
725struct i915_ctx_hang_stats { 574struct i915_ctx_hang_stats {
726 /* This context had batch pending when hang was declared */ 575 /* This context had batch pending when hang was declared */
727 unsigned batch_pending; 576 unsigned batch_pending;
@@ -738,13 +587,13 @@ struct i915_ctx_hang_stats {
738 587
739/* This must match up with the value previously used for execbuf2.rsvd1. */ 588/* This must match up with the value previously used for execbuf2.rsvd1. */
740#define DEFAULT_CONTEXT_ID 0 589#define DEFAULT_CONTEXT_ID 0
741struct i915_hw_context { 590struct intel_context {
742 struct kref ref; 591 struct kref ref;
743 int id; 592 int id;
744 bool is_initialized; 593 bool is_initialized;
745 uint8_t remap_slice; 594 uint8_t remap_slice;
746 struct drm_i915_file_private *file_priv; 595 struct drm_i915_file_private *file_priv;
747 struct intel_ring_buffer *last_ring; 596 struct intel_engine_cs *last_ring;
748 struct drm_i915_gem_object *obj; 597 struct drm_i915_gem_object *obj;
749 struct i915_ctx_hang_stats hang_stats; 598 struct i915_ctx_hang_stats hang_stats;
750 struct i915_address_space *vm; 599 struct i915_address_space *vm;
@@ -782,6 +631,10 @@ struct i915_fbc {
782 } no_fbc_reason; 631 } no_fbc_reason;
783}; 632};
784 633
634struct i915_drrs {
635 struct intel_connector *connector;
636};
637
785struct i915_psr { 638struct i915_psr {
786 bool sink_support; 639 bool sink_support;
787 bool source_ok; 640 bool source_ok;
@@ -965,6 +818,67 @@ struct i915_suspend_saved_registers {
965 u32 savePCH_PORT_HOTPLUG; 818 u32 savePCH_PORT_HOTPLUG;
966}; 819};
967 820
821struct vlv_s0ix_state {
822 /* GAM */
823 u32 wr_watermark;
824 u32 gfx_prio_ctrl;
825 u32 arb_mode;
826 u32 gfx_pend_tlb0;
827 u32 gfx_pend_tlb1;
828 u32 lra_limits[GEN7_LRA_LIMITS_REG_NUM];
829 u32 media_max_req_count;
830 u32 gfx_max_req_count;
831 u32 render_hwsp;
832 u32 ecochk;
833 u32 bsd_hwsp;
834 u32 blt_hwsp;
835 u32 tlb_rd_addr;
836
837 /* MBC */
838 u32 g3dctl;
839 u32 gsckgctl;
840 u32 mbctl;
841
842 /* GCP */
843 u32 ucgctl1;
844 u32 ucgctl3;
845 u32 rcgctl1;
846 u32 rcgctl2;
847 u32 rstctl;
848 u32 misccpctl;
849
850 /* GPM */
851 u32 gfxpause;
852 u32 rpdeuhwtc;
853 u32 rpdeuc;
854 u32 ecobus;
855 u32 pwrdwnupctl;
856 u32 rp_down_timeout;
857 u32 rp_deucsw;
858 u32 rcubmabdtmr;
859 u32 rcedata;
860 u32 spare2gh;
861
862 /* Display 1 CZ domain */
863 u32 gt_imr;
864 u32 gt_ier;
865 u32 pm_imr;
866 u32 pm_ier;
867 u32 gt_scratch[GEN7_GT_SCRATCH_REG_NUM];
868
869 /* GT SA CZ domain */
870 u32 tilectl;
871 u32 gt_fifoctl;
872 u32 gtlc_wake_ctrl;
873 u32 gtlc_survive;
874 u32 pmwgicz;
875
876 /* Display 2 CZ domain */
877 u32 gu_ctl0;
878 u32 gu_ctl1;
879 u32 clock_gate_dis2;
880};
881
968struct intel_gen6_power_mgmt { 882struct intel_gen6_power_mgmt {
969 /* work and pm_iir are protected by dev_priv->irq_lock */ 883 /* work and pm_iir are protected by dev_priv->irq_lock */
970 struct work_struct work; 884 struct work_struct work;
@@ -1074,6 +988,7 @@ struct i915_power_domains {
1074 * time are on. They are kept on until after the first modeset. 988 * time are on. They are kept on until after the first modeset.
1075 */ 989 */
1076 bool init_power_on; 990 bool init_power_on;
991 bool initializing;
1077 int power_well_count; 992 int power_well_count;
1078 993
1079 struct mutex lock; 994 struct mutex lock;
@@ -1132,7 +1047,8 @@ struct i915_gem_mm {
1132 /** PPGTT used for aliasing the PPGTT with the GTT */ 1047 /** PPGTT used for aliasing the PPGTT with the GTT */
1133 struct i915_hw_ppgtt *aliasing_ppgtt; 1048 struct i915_hw_ppgtt *aliasing_ppgtt;
1134 1049
1135 struct shrinker inactive_shrinker; 1050 struct notifier_block oom_notifier;
1051 struct shrinker shrinker;
1136 bool shrinker_no_lock_stealing; 1052 bool shrinker_no_lock_stealing;
1137 1053
1138 /** LRU list of objects with fence regs on them. */ 1054 /** LRU list of objects with fence regs on them. */
@@ -1170,6 +1086,9 @@ struct i915_gem_mm {
1170 */ 1086 */
1171 bool busy; 1087 bool busy;
1172 1088
1089 /* the indicator for dispatch video commands on two BSD rings */
1090 int bsd_ring_dispatch_index;
1091
1173 /** Bit 6 swizzling required for X tiling */ 1092 /** Bit 6 swizzling required for X tiling */
1174 uint32_t bit_6_swizzle_x; 1093 uint32_t bit_6_swizzle_x;
1175 /** Bit 6 swizzling required for Y tiling */ 1094 /** Bit 6 swizzling required for Y tiling */
@@ -1245,8 +1164,12 @@ struct i915_gpu_error {
1245 */ 1164 */
1246 wait_queue_head_t reset_queue; 1165 wait_queue_head_t reset_queue;
1247 1166
1248 /* For gpu hang simulation. */ 1167 /* Userspace knobs for gpu hang simulation;
1249 unsigned int stop_rings; 1168 * combines both a ring mask, and extra flags
1169 */
1170 u32 stop_rings;
1171#define I915_STOP_RING_ALLOW_BAN (1 << 31)
1172#define I915_STOP_RING_ALLOW_WARN (1 << 30)
1250 1173
1251 /* For missed irq/seqno simulation. */ 1174 /* For missed irq/seqno simulation. */
1252 unsigned int test_irq_rings; 1175 unsigned int test_irq_rings;
@@ -1266,6 +1189,12 @@ struct ddi_vbt_port_info {
1266 uint8_t supports_dp:1; 1189 uint8_t supports_dp:1;
1267}; 1190};
1268 1191
1192enum drrs_support_type {
1193 DRRS_NOT_SUPPORTED = 0,
1194 STATIC_DRRS_SUPPORT = 1,
1195 SEAMLESS_DRRS_SUPPORT = 2
1196};
1197
1269struct intel_vbt_data { 1198struct intel_vbt_data {
1270 struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */ 1199 struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
1271 struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */ 1200 struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
@@ -1278,9 +1207,12 @@ struct intel_vbt_data {
1278 unsigned int lvds_use_ssc:1; 1207 unsigned int lvds_use_ssc:1;
1279 unsigned int display_clock_mode:1; 1208 unsigned int display_clock_mode:1;
1280 unsigned int fdi_rx_polarity_inverted:1; 1209 unsigned int fdi_rx_polarity_inverted:1;
1210 unsigned int has_mipi:1;
1281 int lvds_ssc_freq; 1211 int lvds_ssc_freq;
1282 unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */ 1212 unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
1283 1213
1214 enum drrs_support_type drrs_type;
1215
1284 /* eDP */ 1216 /* eDP */
1285 int edp_rate; 1217 int edp_rate;
1286 int edp_lanes; 1218 int edp_lanes;
@@ -1299,7 +1231,14 @@ struct intel_vbt_data {
1299 1231
1300 /* MIPI DSI */ 1232 /* MIPI DSI */
1301 struct { 1233 struct {
1234 u16 port;
1302 u16 panel_id; 1235 u16 panel_id;
1236 struct mipi_config *config;
1237 struct mipi_pps_data *pps;
1238 u8 seq_version;
1239 u32 size;
1240 u8 *data;
1241 u8 *sequence[MIPI_SEQ_MAX];
1303 } dsi; 1242 } dsi;
1304 1243
1305 int crt_ddc_pin; 1244 int crt_ddc_pin;
@@ -1351,23 +1290,13 @@ struct ilk_wm_values {
1351 * goes back to false exactly before we reenable the IRQs. We use this variable 1290 * goes back to false exactly before we reenable the IRQs. We use this variable
1352 * to check if someone is trying to enable/disable IRQs while they're supposed 1291 * to check if someone is trying to enable/disable IRQs while they're supposed
1353 * to be disabled. This shouldn't happen and we'll print some error messages in 1292 * to be disabled. This shouldn't happen and we'll print some error messages in
1354 * case it happens, but if it actually happens we'll also update the variables 1293 * case it happens.
1355 * inside struct regsave so when we restore the IRQs they will contain the
1356 * latest expected values.
1357 * 1294 *
1358 * For more, read the Documentation/power/runtime_pm.txt. 1295 * For more, read the Documentation/power/runtime_pm.txt.
1359 */ 1296 */
1360struct i915_runtime_pm { 1297struct i915_runtime_pm {
1361 bool suspended; 1298 bool suspended;
1362 bool irqs_disabled; 1299 bool irqs_disabled;
1363
1364 struct {
1365 uint32_t deimr;
1366 uint32_t sdeimr;
1367 uint32_t gtimr;
1368 uint32_t gtier;
1369 uint32_t gen6_pmimr;
1370 } regsave;
1371}; 1300};
1372 1301
1373enum intel_pipe_crc_source { 1302enum intel_pipe_crc_source {
@@ -1400,7 +1329,7 @@ struct intel_pipe_crc {
1400 wait_queue_head_t wq; 1329 wait_queue_head_t wq;
1401}; 1330};
1402 1331
1403typedef struct drm_i915_private { 1332struct drm_i915_private {
1404 struct drm_device *dev; 1333 struct drm_device *dev;
1405 struct kmem_cache *slab; 1334 struct kmem_cache *slab;
1406 1335
@@ -1424,10 +1353,13 @@ typedef struct drm_i915_private {
1424 */ 1353 */
1425 uint32_t gpio_mmio_base; 1354 uint32_t gpio_mmio_base;
1426 1355
1356 /* MMIO base address for MIPI regs */
1357 uint32_t mipi_mmio_base;
1358
1427 wait_queue_head_t gmbus_wait_queue; 1359 wait_queue_head_t gmbus_wait_queue;
1428 1360
1429 struct pci_dev *bridge_dev; 1361 struct pci_dev *bridge_dev;
1430 struct intel_ring_buffer ring[I915_NUM_RINGS]; 1362 struct intel_engine_cs ring[I915_NUM_RINGS];
1431 uint32_t last_seqno, next_seqno; 1363 uint32_t last_seqno, next_seqno;
1432 1364
1433 drm_dma_handle_t *status_page_dmah; 1365 drm_dma_handle_t *status_page_dmah;
@@ -1469,6 +1401,7 @@ typedef struct drm_i915_private {
1469 struct timer_list hotplug_reenable_timer; 1401 struct timer_list hotplug_reenable_timer;
1470 1402
1471 struct i915_fbc fbc; 1403 struct i915_fbc fbc;
1404 struct i915_drrs drrs;
1472 struct intel_opregion opregion; 1405 struct intel_opregion opregion;
1473 struct intel_vbt_data vbt; 1406 struct intel_vbt_data vbt;
1474 1407
@@ -1486,6 +1419,7 @@ typedef struct drm_i915_private {
1486 int num_fence_regs; /* 8 on pre-965, 16 otherwise */ 1419 int num_fence_regs; /* 8 on pre-965, 16 otherwise */
1487 1420
1488 unsigned int fsb_freq, mem_freq, is_ddr3; 1421 unsigned int fsb_freq, mem_freq, is_ddr3;
1422 unsigned int vlv_cdclk_freq;
1489 1423
1490 /** 1424 /**
1491 * wq - Driver workqueue for GEM. 1425 * wq - Driver workqueue for GEM.
@@ -1509,9 +1443,12 @@ typedef struct drm_i915_private {
1509 struct mutex modeset_restore_lock; 1443 struct mutex modeset_restore_lock;
1510 1444
1511 struct list_head vm_list; /* Global list of all address spaces */ 1445 struct list_head vm_list; /* Global list of all address spaces */
1512 struct i915_gtt gtt; /* VMA representing the global address space */ 1446 struct i915_gtt gtt; /* VM representing the global address space */
1513 1447
1514 struct i915_gem_mm mm; 1448 struct i915_gem_mm mm;
1449#if defined(CONFIG_MMU_NOTIFIER)
1450 DECLARE_HASHTABLE(mmu_notifiers, 7);
1451#endif
1515 1452
1516 /* Kernel Modesetting */ 1453 /* Kernel Modesetting */
1517 1454
@@ -1580,6 +1517,7 @@ typedef struct drm_i915_private {
1580 1517
1581 u32 suspend_count; 1518 u32 suspend_count;
1582 struct i915_suspend_saved_registers regfile; 1519 struct i915_suspend_saved_registers regfile;
1520 struct vlv_s0ix_state vlv_s0ix_state;
1583 1521
1584 struct { 1522 struct {
1585 /* 1523 /*
@@ -1605,7 +1543,12 @@ typedef struct drm_i915_private {
1605 struct i915_dri1_state dri1; 1543 struct i915_dri1_state dri1;
1606 /* Old ums support infrastructure, same warning applies. */ 1544 /* Old ums support infrastructure, same warning applies. */
1607 struct i915_ums_state ums; 1545 struct i915_ums_state ums;
1608} drm_i915_private_t; 1546
1547 /*
1548 * NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch
1549 * will be rejected. Instead look for a better place.
1550 */
1551};
1609 1552
1610static inline struct drm_i915_private *to_i915(const struct drm_device *dev) 1553static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
1611{ 1554{
@@ -1642,6 +1585,8 @@ struct drm_i915_gem_object_ops {
1642 */ 1585 */
1643 int (*get_pages)(struct drm_i915_gem_object *); 1586 int (*get_pages)(struct drm_i915_gem_object *);
1644 void (*put_pages)(struct drm_i915_gem_object *); 1587 void (*put_pages)(struct drm_i915_gem_object *);
1588 int (*dmabuf_export)(struct drm_i915_gem_object *);
1589 void (*release)(struct drm_i915_gem_object *);
1645}; 1590};
1646 1591
1647struct drm_i915_gem_object { 1592struct drm_i915_gem_object {
@@ -1732,7 +1677,7 @@ struct drm_i915_gem_object {
1732 void *dma_buf_vmapping; 1677 void *dma_buf_vmapping;
1733 int vmapping_count; 1678 int vmapping_count;
1734 1679
1735 struct intel_ring_buffer *ring; 1680 struct intel_engine_cs *ring;
1736 1681
1737 /** Breadcrumb of last rendering to the buffer. */ 1682 /** Breadcrumb of last rendering to the buffer. */
1738 uint32_t last_read_seqno; 1683 uint32_t last_read_seqno;
@@ -1755,8 +1700,20 @@ struct drm_i915_gem_object {
1755 1700
1756 /** for phy allocated objects */ 1701 /** for phy allocated objects */
1757 drm_dma_handle_t *phys_handle; 1702 drm_dma_handle_t *phys_handle;
1758};
1759 1703
1704 union {
1705 struct i915_gem_userptr {
1706 uintptr_t ptr;
1707 unsigned read_only :1;
1708 unsigned workers :4;
1709#define I915_GEM_USERPTR_MAX_WORKERS 15
1710
1711 struct mm_struct *mm;
1712 struct i915_mmu_object *mn;
1713 struct work_struct *work;
1714 } userptr;
1715 };
1716};
1760#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base) 1717#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
1761 1718
1762/** 1719/**
@@ -1771,7 +1728,7 @@ struct drm_i915_gem_object {
1771 */ 1728 */
1772struct drm_i915_gem_request { 1729struct drm_i915_gem_request {
1773 /** On Which ring this request was generated */ 1730 /** On Which ring this request was generated */
1774 struct intel_ring_buffer *ring; 1731 struct intel_engine_cs *ring;
1775 1732
1776 /** GEM sequence number associated with this request. */ 1733 /** GEM sequence number associated with this request. */
1777 uint32_t seqno; 1734 uint32_t seqno;
@@ -1783,7 +1740,7 @@ struct drm_i915_gem_request {
1783 u32 tail; 1740 u32 tail;
1784 1741
1785 /** Context related to this request */ 1742 /** Context related to this request */
1786 struct i915_hw_context *ctx; 1743 struct intel_context *ctx;
1787 1744
1788 /** Batch buffer related to this request if any */ 1745 /** Batch buffer related to this request if any */
1789 struct drm_i915_gem_object *batch_obj; 1746 struct drm_i915_gem_object *batch_obj;
@@ -1810,8 +1767,8 @@ struct drm_i915_file_private {
1810 } mm; 1767 } mm;
1811 struct idr context_idr; 1768 struct idr context_idr;
1812 1769
1813 struct i915_hw_context *private_default_ctx;
1814 atomic_t rps_wait_boost; 1770 atomic_t rps_wait_boost;
1771 struct intel_engine_cs *bsd_ring;
1815}; 1772};
1816 1773
1817/* 1774/*
@@ -1879,11 +1836,17 @@ struct drm_i915_cmd_descriptor {
1879 * the expected value, the parser rejects it. Only valid if flags has 1836 * the expected value, the parser rejects it. Only valid if flags has
1880 * the CMD_DESC_BITMASK bit set. Only entries where mask is non-zero 1837 * the CMD_DESC_BITMASK bit set. Only entries where mask is non-zero
1881 * are valid. 1838 * are valid.
1839 *
1840 * If the check specifies a non-zero condition_mask then the parser
1841 * only performs the check when the bits specified by condition_mask
1842 * are non-zero.
1882 */ 1843 */
1883 struct { 1844 struct {
1884 u32 offset; 1845 u32 offset;
1885 u32 mask; 1846 u32 mask;
1886 u32 expected; 1847 u32 expected;
1848 u32 condition_offset;
1849 u32 condition_mask;
1887 } bits[MAX_CMD_DESC_BITMASKS]; 1850 } bits[MAX_CMD_DESC_BITMASKS];
1888}; 1851};
1889 1852
@@ -1925,8 +1888,9 @@ struct drm_i915_cmd_table {
1925 (dev)->pdev->device == 0x0106 || \ 1888 (dev)->pdev->device == 0x0106 || \
1926 (dev)->pdev->device == 0x010A) 1889 (dev)->pdev->device == 0x010A)
1927#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview) 1890#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
1891#define IS_CHERRYVIEW(dev) (INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev))
1928#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell) 1892#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
1929#define IS_BROADWELL(dev) (INTEL_INFO(dev)->gen == 8) 1893#define IS_BROADWELL(dev) (!INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev))
1930#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) 1894#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
1931#define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \ 1895#define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \
1932 ((dev)->pdev->device & 0xFF00) == 0x0C00) 1896 ((dev)->pdev->device & 0xFF00) == 0x0C00)
@@ -1962,17 +1926,21 @@ struct drm_i915_cmd_table {
1962#define BSD_RING (1<<VCS) 1926#define BSD_RING (1<<VCS)
1963#define BLT_RING (1<<BCS) 1927#define BLT_RING (1<<BCS)
1964#define VEBOX_RING (1<<VECS) 1928#define VEBOX_RING (1<<VECS)
1965#define HAS_BSD(dev) (INTEL_INFO(dev)->ring_mask & BSD_RING) 1929#define BSD2_RING (1<<VCS2)
1966#define HAS_BLT(dev) (INTEL_INFO(dev)->ring_mask & BLT_RING) 1930#define HAS_BSD(dev) (INTEL_INFO(dev)->ring_mask & BSD_RING)
1967#define HAS_VEBOX(dev) (INTEL_INFO(dev)->ring_mask & VEBOX_RING) 1931#define HAS_BSD2(dev) (INTEL_INFO(dev)->ring_mask & BSD2_RING)
1968#define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc) 1932#define HAS_BLT(dev) (INTEL_INFO(dev)->ring_mask & BLT_RING)
1969#define HAS_WT(dev) (IS_HASWELL(dev) && to_i915(dev)->ellc_size) 1933#define HAS_VEBOX(dev) (INTEL_INFO(dev)->ring_mask & VEBOX_RING)
1934#define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc)
1935#define HAS_WT(dev) ((IS_HASWELL(dev) || IS_BROADWELL(dev)) && \
1936 to_i915(dev)->ellc_size)
1970#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) 1937#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
1971 1938
1972#define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6) 1939#define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6)
1973#define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >= 6 && !IS_VALLEYVIEW(dev)) 1940#define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >= 6 && \
1974#define HAS_PPGTT(dev) (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev) \ 1941 (!IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)))
1975 && !IS_BROADWELL(dev)) 1942#define HAS_PPGTT(dev) (INTEL_INFO(dev)->gen >= 7 \
1943 && !IS_GEN8(dev))
1976#define USES_PPGTT(dev) intel_enable_ppgtt(dev, false) 1944#define USES_PPGTT(dev) intel_enable_ppgtt(dev, false)
1977#define USES_FULL_PPGTT(dev) intel_enable_ppgtt(dev, true) 1945#define USES_FULL_PPGTT(dev) intel_enable_ppgtt(dev, true)
1978 1946
@@ -2010,8 +1978,8 @@ struct drm_i915_cmd_table {
2010#define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi) 1978#define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi)
2011#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg) 1979#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg)
2012#define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev)) 1980#define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev))
2013#define HAS_PC8(dev) (IS_HASWELL(dev)) /* XXX HSW:ULX */ 1981#define HAS_RUNTIME_PM(dev) (IS_GEN6(dev) || IS_HASWELL(dev) || \
2014#define HAS_RUNTIME_PM(dev) (IS_HASWELL(dev)) 1982 IS_BROADWELL(dev) || IS_VALLEYVIEW(dev))
2015 1983
2016#define INTEL_PCH_DEVICE_ID_MASK 0xff00 1984#define INTEL_PCH_DEVICE_ID_MASK 0xff00
2017#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 1985#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
@@ -2068,6 +2036,7 @@ struct i915_params {
2068 bool prefault_disable; 2036 bool prefault_disable;
2069 bool reset; 2037 bool reset;
2070 bool disable_display; 2038 bool disable_display;
2039 bool disable_vtd_wa;
2071}; 2040};
2072extern struct i915_params i915 __read_mostly; 2041extern struct i915_params i915 __read_mostly;
2073 2042
@@ -2096,6 +2065,7 @@ extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
2096extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv); 2065extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
2097extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv); 2066extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
2098extern void i915_update_gfx_val(struct drm_i915_private *dev_priv); 2067extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
2068int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
2099 2069
2100extern void intel_console_resume(struct work_struct *work); 2070extern void intel_console_resume(struct work_struct *work);
2101 2071
@@ -2170,6 +2140,9 @@ int i915_gem_set_tiling(struct drm_device *dev, void *data,
2170 struct drm_file *file_priv); 2140 struct drm_file *file_priv);
2171int i915_gem_get_tiling(struct drm_device *dev, void *data, 2141int i915_gem_get_tiling(struct drm_device *dev, void *data,
2172 struct drm_file *file_priv); 2142 struct drm_file *file_priv);
2143int i915_gem_init_userptr(struct drm_device *dev);
2144int i915_gem_userptr_ioctl(struct drm_device *dev, void *data,
2145 struct drm_file *file);
2173int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, 2146int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
2174 struct drm_file *file_priv); 2147 struct drm_file *file_priv);
2175int i915_gem_wait_ioctl(struct drm_device *dev, void *data, 2148int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
@@ -2227,9 +2200,9 @@ static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
2227 2200
2228int __must_check i915_mutex_lock_interruptible(struct drm_device *dev); 2201int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
2229int i915_gem_object_sync(struct drm_i915_gem_object *obj, 2202int i915_gem_object_sync(struct drm_i915_gem_object *obj,
2230 struct intel_ring_buffer *to); 2203 struct intel_engine_cs *to);
2231void i915_vma_move_to_active(struct i915_vma *vma, 2204void i915_vma_move_to_active(struct i915_vma *vma,
2232 struct intel_ring_buffer *ring); 2205 struct intel_engine_cs *ring);
2233int i915_gem_dumb_create(struct drm_file *file_priv, 2206int i915_gem_dumb_create(struct drm_file *file_priv,
2234 struct drm_device *dev, 2207 struct drm_device *dev,
2235 struct drm_mode_create_dumb *args); 2208 struct drm_mode_create_dumb *args);
@@ -2249,31 +2222,14 @@ int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);
2249int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj); 2222int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj);
2250int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj); 2223int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
2251 2224
2252static inline bool 2225bool i915_gem_object_pin_fence(struct drm_i915_gem_object *obj);
2253i915_gem_object_pin_fence(struct drm_i915_gem_object *obj) 2226void i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj);
2254{
2255 if (obj->fence_reg != I915_FENCE_REG_NONE) {
2256 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2257 dev_priv->fence_regs[obj->fence_reg].pin_count++;
2258 return true;
2259 } else
2260 return false;
2261}
2262
2263static inline void
2264i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
2265{
2266 if (obj->fence_reg != I915_FENCE_REG_NONE) {
2267 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2268 WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0);
2269 dev_priv->fence_regs[obj->fence_reg].pin_count--;
2270 }
2271}
2272 2227
2273struct drm_i915_gem_request * 2228struct drm_i915_gem_request *
2274i915_gem_find_active_request(struct intel_ring_buffer *ring); 2229i915_gem_find_active_request(struct intel_engine_cs *ring);
2275 2230
2276bool i915_gem_retire_requests(struct drm_device *dev); 2231bool i915_gem_retire_requests(struct drm_device *dev);
2232void i915_gem_retire_requests_ring(struct intel_engine_cs *ring);
2277int __must_check i915_gem_check_wedge(struct i915_gpu_error *error, 2233int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
2278 bool interruptible); 2234 bool interruptible);
2279static inline bool i915_reset_in_progress(struct i915_gpu_error *error) 2235static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
@@ -2292,23 +2248,35 @@ static inline u32 i915_reset_count(struct i915_gpu_error *error)
2292 return ((atomic_read(&error->reset_counter) & ~I915_WEDGED) + 1) / 2; 2248 return ((atomic_read(&error->reset_counter) & ~I915_WEDGED) + 1) / 2;
2293} 2249}
2294 2250
2251static inline bool i915_stop_ring_allow_ban(struct drm_i915_private *dev_priv)
2252{
2253 return dev_priv->gpu_error.stop_rings == 0 ||
2254 dev_priv->gpu_error.stop_rings & I915_STOP_RING_ALLOW_BAN;
2255}
2256
2257static inline bool i915_stop_ring_allow_warn(struct drm_i915_private *dev_priv)
2258{
2259 return dev_priv->gpu_error.stop_rings == 0 ||
2260 dev_priv->gpu_error.stop_rings & I915_STOP_RING_ALLOW_WARN;
2261}
2262
2295void i915_gem_reset(struct drm_device *dev); 2263void i915_gem_reset(struct drm_device *dev);
2296bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force); 2264bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
2297int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj); 2265int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
2298int __must_check i915_gem_init(struct drm_device *dev); 2266int __must_check i915_gem_init(struct drm_device *dev);
2299int __must_check i915_gem_init_hw(struct drm_device *dev); 2267int __must_check i915_gem_init_hw(struct drm_device *dev);
2300int i915_gem_l3_remap(struct intel_ring_buffer *ring, int slice); 2268int i915_gem_l3_remap(struct intel_engine_cs *ring, int slice);
2301void i915_gem_init_swizzling(struct drm_device *dev); 2269void i915_gem_init_swizzling(struct drm_device *dev);
2302void i915_gem_cleanup_ringbuffer(struct drm_device *dev); 2270void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
2303int __must_check i915_gpu_idle(struct drm_device *dev); 2271int __must_check i915_gpu_idle(struct drm_device *dev);
2304int __must_check i915_gem_suspend(struct drm_device *dev); 2272int __must_check i915_gem_suspend(struct drm_device *dev);
2305int __i915_add_request(struct intel_ring_buffer *ring, 2273int __i915_add_request(struct intel_engine_cs *ring,
2306 struct drm_file *file, 2274 struct drm_file *file,
2307 struct drm_i915_gem_object *batch_obj, 2275 struct drm_i915_gem_object *batch_obj,
2308 u32 *seqno); 2276 u32 *seqno);
2309#define i915_add_request(ring, seqno) \ 2277#define i915_add_request(ring, seqno) \
2310 __i915_add_request(ring, NULL, NULL, seqno) 2278 __i915_add_request(ring, NULL, NULL, seqno)
2311int __must_check i915_wait_seqno(struct intel_ring_buffer *ring, 2279int __must_check i915_wait_seqno(struct intel_engine_cs *ring,
2312 uint32_t seqno); 2280 uint32_t seqno);
2313int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); 2281int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
2314int __must_check 2282int __must_check
@@ -2319,7 +2287,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
2319int __must_check 2287int __must_check
2320i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, 2288i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
2321 u32 alignment, 2289 u32 alignment,
2322 struct intel_ring_buffer *pipelined); 2290 struct intel_engine_cs *pipelined);
2323void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj); 2291void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj);
2324int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, 2292int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
2325 int align); 2293 int align);
@@ -2416,22 +2384,22 @@ void i915_gem_context_reset(struct drm_device *dev);
2416int i915_gem_context_open(struct drm_device *dev, struct drm_file *file); 2384int i915_gem_context_open(struct drm_device *dev, struct drm_file *file);
2417int i915_gem_context_enable(struct drm_i915_private *dev_priv); 2385int i915_gem_context_enable(struct drm_i915_private *dev_priv);
2418void i915_gem_context_close(struct drm_device *dev, struct drm_file *file); 2386void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
2419int i915_switch_context(struct intel_ring_buffer *ring, 2387int i915_switch_context(struct intel_engine_cs *ring,
2420 struct i915_hw_context *to); 2388 struct intel_context *to);
2421struct i915_hw_context * 2389struct intel_context *
2422i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id); 2390i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id);
2423void i915_gem_context_free(struct kref *ctx_ref); 2391void i915_gem_context_free(struct kref *ctx_ref);
2424static inline void i915_gem_context_reference(struct i915_hw_context *ctx) 2392static inline void i915_gem_context_reference(struct intel_context *ctx)
2425{ 2393{
2426 kref_get(&ctx->ref); 2394 kref_get(&ctx->ref);
2427} 2395}
2428 2396
2429static inline void i915_gem_context_unreference(struct i915_hw_context *ctx) 2397static inline void i915_gem_context_unreference(struct intel_context *ctx)
2430{ 2398{
2431 kref_put(&ctx->ref, i915_gem_context_free); 2399 kref_put(&ctx->ref, i915_gem_context_free);
2432} 2400}
2433 2401
2434static inline bool i915_gem_context_is_default(const struct i915_hw_context *c) 2402static inline bool i915_gem_context_is_default(const struct intel_context *c)
2435{ 2403{
2436 return c->id == DEFAULT_CONTEXT_ID; 2404 return c->id == DEFAULT_CONTEXT_ID;
2437} 2405}
@@ -2441,6 +2409,8 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
2441int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, 2409int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
2442 struct drm_file *file); 2410 struct drm_file *file);
2443 2411
2412/* i915_gem_render_state.c */
2413int i915_gem_render_state_init(struct intel_engine_cs *ring);
2444/* i915_gem_evict.c */ 2414/* i915_gem_evict.c */
2445int __must_check i915_gem_evict_something(struct drm_device *dev, 2415int __must_check i915_gem_evict_something(struct drm_device *dev,
2446 struct i915_address_space *vm, 2416 struct i915_address_space *vm,
@@ -2453,23 +2423,12 @@ int __must_check i915_gem_evict_something(struct drm_device *dev,
2453int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle); 2423int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
2454int i915_gem_evict_everything(struct drm_device *dev); 2424int i915_gem_evict_everything(struct drm_device *dev);
2455 2425
2456/* i915_gem_gtt.c */ 2426/* belongs in i915_gem_gtt.h */
2457void i915_check_and_clear_faults(struct drm_device *dev);
2458void i915_gem_suspend_gtt_mappings(struct drm_device *dev);
2459void i915_gem_restore_gtt_mappings(struct drm_device *dev);
2460int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
2461void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
2462void i915_gem_init_global_gtt(struct drm_device *dev);
2463void i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start,
2464 unsigned long mappable_end, unsigned long end);
2465int i915_gem_gtt_init(struct drm_device *dev);
2466static inline void i915_gem_chipset_flush(struct drm_device *dev) 2427static inline void i915_gem_chipset_flush(struct drm_device *dev)
2467{ 2428{
2468 if (INTEL_INFO(dev)->gen < 6) 2429 if (INTEL_INFO(dev)->gen < 6)
2469 intel_gtt_chipset_flush(); 2430 intel_gtt_chipset_flush();
2470} 2431}
2471int i915_gem_init_ppgtt(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt);
2472bool intel_enable_ppgtt(struct drm_device *dev, bool full);
2473 2432
2474/* i915_gem_stolen.c */ 2433/* i915_gem_stolen.c */
2475int i915_gem_init_stolen(struct drm_device *dev); 2434int i915_gem_init_stolen(struct drm_device *dev);
@@ -2537,9 +2496,11 @@ void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone);
2537const char *i915_cache_level_str(int type); 2496const char *i915_cache_level_str(int type);
2538 2497
2539/* i915_cmd_parser.c */ 2498/* i915_cmd_parser.c */
2540void i915_cmd_parser_init_ring(struct intel_ring_buffer *ring); 2499int i915_cmd_parser_get_version(void);
2541bool i915_needs_cmd_parser(struct intel_ring_buffer *ring); 2500int i915_cmd_parser_init_ring(struct intel_engine_cs *ring);
2542int i915_parse_cmds(struct intel_ring_buffer *ring, 2501void i915_cmd_parser_fini_ring(struct intel_engine_cs *ring);
2502bool i915_needs_cmd_parser(struct intel_engine_cs *ring);
2503int i915_parse_cmds(struct intel_engine_cs *ring,
2543 struct drm_i915_gem_object *batch_obj, 2504 struct drm_i915_gem_object *batch_obj,
2544 u32 batch_start_offset, 2505 u32 batch_start_offset,
2545 bool is_master); 2506 bool is_master);
@@ -2688,20 +2649,6 @@ void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
2688int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val); 2649int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val);
2689int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val); 2650int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val);
2690 2651
2691void vlv_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine);
2692void vlv_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine);
2693
2694#define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \
2695 (((reg) >= 0x2000 && (reg) < 0x4000) ||\
2696 ((reg) >= 0x5000 && (reg) < 0x8000) ||\
2697 ((reg) >= 0xB000 && (reg) < 0x12000) ||\
2698 ((reg) >= 0x2E000 && (reg) < 0x30000))
2699
2700#define FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)\
2701 (((reg) >= 0x12000 && (reg) < 0x14000) ||\
2702 ((reg) >= 0x22000 && (reg) < 0x24000) ||\
2703 ((reg) >= 0x30000 && (reg) < 0x40000))
2704
2705#define FORCEWAKE_RENDER (1 << 0) 2652#define FORCEWAKE_RENDER (1 << 0)
2706#define FORCEWAKE_MEDIA (1 << 1) 2653#define FORCEWAKE_MEDIA (1 << 1)
2707#define FORCEWAKE_ALL (FORCEWAKE_RENDER | FORCEWAKE_MEDIA) 2654#define FORCEWAKE_ALL (FORCEWAKE_RENDER | FORCEWAKE_MEDIA)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 3326770c9ed2..f36126383d26 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -31,6 +31,7 @@
31#include "i915_drv.h" 31#include "i915_drv.h"
32#include "i915_trace.h" 32#include "i915_trace.h"
33#include "intel_drv.h" 33#include "intel_drv.h"
34#include <linux/oom.h>
34#include <linux/shmem_fs.h> 35#include <linux/shmem_fs.h>
35#include <linux/slab.h> 36#include <linux/slab.h>
36#include <linux/swap.h> 37#include <linux/swap.h>
@@ -43,6 +44,8 @@ static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *o
43static __must_check int 44static __must_check int
44i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, 45i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
45 bool readonly); 46 bool readonly);
47static void
48i915_gem_object_retire(struct drm_i915_gem_object *obj);
46 49
47static void i915_gem_write_fence(struct drm_device *dev, int reg, 50static void i915_gem_write_fence(struct drm_device *dev, int reg,
48 struct drm_i915_gem_object *obj); 51 struct drm_i915_gem_object *obj);
@@ -50,14 +53,15 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
50 struct drm_i915_fence_reg *fence, 53 struct drm_i915_fence_reg *fence,
51 bool enable); 54 bool enable);
52 55
53static unsigned long i915_gem_inactive_count(struct shrinker *shrinker, 56static unsigned long i915_gem_shrinker_count(struct shrinker *shrinker,
54 struct shrink_control *sc); 57 struct shrink_control *sc);
55static unsigned long i915_gem_inactive_scan(struct shrinker *shrinker, 58static unsigned long i915_gem_shrinker_scan(struct shrinker *shrinker,
56 struct shrink_control *sc); 59 struct shrink_control *sc);
60static int i915_gem_shrinker_oom(struct notifier_block *nb,
61 unsigned long event,
62 void *ptr);
57static unsigned long i915_gem_purge(struct drm_i915_private *dev_priv, long target); 63static unsigned long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
58static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv); 64static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
59static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
60static void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring);
61 65
62static bool cpu_cache_is_coherent(struct drm_device *dev, 66static bool cpu_cache_is_coherent(struct drm_device *dev,
63 enum i915_cache_level level) 67 enum i915_cache_level level)
@@ -470,6 +474,8 @@ int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
470 ret = i915_gem_object_wait_rendering(obj, true); 474 ret = i915_gem_object_wait_rendering(obj, true);
471 if (ret) 475 if (ret)
472 return ret; 476 return ret;
477
478 i915_gem_object_retire(obj);
473 } 479 }
474 480
475 ret = i915_gem_object_get_pages(obj); 481 ret = i915_gem_object_get_pages(obj);
@@ -885,6 +891,8 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
885 ret = i915_gem_object_wait_rendering(obj, false); 891 ret = i915_gem_object_wait_rendering(obj, false);
886 if (ret) 892 if (ret)
887 return ret; 893 return ret;
894
895 i915_gem_object_retire(obj);
888 } 896 }
889 /* Same trick applies to invalidate partially written cachelines read 897 /* Same trick applies to invalidate partially written cachelines read
890 * before writing. */ 898 * before writing. */
@@ -1088,7 +1096,7 @@ i915_gem_check_wedge(struct i915_gpu_error *error,
1088 * equal. 1096 * equal.
1089 */ 1097 */
1090static int 1098static int
1091i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno) 1099i915_gem_check_olr(struct intel_engine_cs *ring, u32 seqno)
1092{ 1100{
1093 int ret; 1101 int ret;
1094 1102
@@ -1107,7 +1115,7 @@ static void fake_irq(unsigned long data)
1107} 1115}
1108 1116
1109static bool missed_irq(struct drm_i915_private *dev_priv, 1117static bool missed_irq(struct drm_i915_private *dev_priv,
1110 struct intel_ring_buffer *ring) 1118 struct intel_engine_cs *ring)
1111{ 1119{
1112 return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings); 1120 return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings);
1113} 1121}
@@ -1138,7 +1146,7 @@ static bool can_wait_boost(struct drm_i915_file_private *file_priv)
1138 * Returns 0 if the seqno was found within the alloted time. Else returns the 1146 * Returns 0 if the seqno was found within the alloted time. Else returns the
1139 * errno with remaining time filled in timeout argument. 1147 * errno with remaining time filled in timeout argument.
1140 */ 1148 */
1141static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno, 1149static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno,
1142 unsigned reset_counter, 1150 unsigned reset_counter,
1143 bool interruptible, 1151 bool interruptible,
1144 struct timespec *timeout, 1152 struct timespec *timeout,
@@ -1245,7 +1253,7 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
1245 * request and object lists appropriately for that event. 1253 * request and object lists appropriately for that event.
1246 */ 1254 */
1247int 1255int
1248i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno) 1256i915_wait_seqno(struct intel_engine_cs *ring, uint32_t seqno)
1249{ 1257{
1250 struct drm_device *dev = ring->dev; 1258 struct drm_device *dev = ring->dev;
1251 struct drm_i915_private *dev_priv = dev->dev_private; 1259 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1270,9 +1278,10 @@ i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
1270 1278
1271static int 1279static int
1272i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj, 1280i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj,
1273 struct intel_ring_buffer *ring) 1281 struct intel_engine_cs *ring)
1274{ 1282{
1275 i915_gem_retire_requests_ring(ring); 1283 if (!obj->active)
1284 return 0;
1276 1285
1277 /* Manually manage the write flush as we may have not yet 1286 /* Manually manage the write flush as we may have not yet
1278 * retired the buffer. 1287 * retired the buffer.
@@ -1282,7 +1291,6 @@ i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj,
1282 * we know we have passed the last write. 1291 * we know we have passed the last write.
1283 */ 1292 */
1284 obj->last_write_seqno = 0; 1293 obj->last_write_seqno = 0;
1285 obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
1286 1294
1287 return 0; 1295 return 0;
1288} 1296}
@@ -1295,7 +1303,7 @@ static __must_check int
1295i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, 1303i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
1296 bool readonly) 1304 bool readonly)
1297{ 1305{
1298 struct intel_ring_buffer *ring = obj->ring; 1306 struct intel_engine_cs *ring = obj->ring;
1299 u32 seqno; 1307 u32 seqno;
1300 int ret; 1308 int ret;
1301 1309
@@ -1320,7 +1328,7 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
1320{ 1328{
1321 struct drm_device *dev = obj->base.dev; 1329 struct drm_device *dev = obj->base.dev;
1322 struct drm_i915_private *dev_priv = dev->dev_private; 1330 struct drm_i915_private *dev_priv = dev->dev_private;
1323 struct intel_ring_buffer *ring = obj->ring; 1331 struct intel_engine_cs *ring = obj->ring;
1324 unsigned reset_counter; 1332 unsigned reset_counter;
1325 u32 seqno; 1333 u32 seqno;
1326 int ret; 1334 int ret;
@@ -1536,7 +1544,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1536 1544
1537 /* Access to snoopable pages through the GTT is incoherent. */ 1545 /* Access to snoopable pages through the GTT is incoherent. */
1538 if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) { 1546 if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
1539 ret = -EINVAL; 1547 ret = -EFAULT;
1540 goto unlock; 1548 goto unlock;
1541 } 1549 }
1542 1550
@@ -1803,12 +1811,16 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1803 return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset); 1811 return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
1804} 1812}
1805 1813
1814static inline int
1815i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
1816{
1817 return obj->madv == I915_MADV_DONTNEED;
1818}
1819
1806/* Immediately discard the backing storage */ 1820/* Immediately discard the backing storage */
1807static void 1821static void
1808i915_gem_object_truncate(struct drm_i915_gem_object *obj) 1822i915_gem_object_truncate(struct drm_i915_gem_object *obj)
1809{ 1823{
1810 struct inode *inode;
1811
1812 i915_gem_object_free_mmap_offset(obj); 1824 i915_gem_object_free_mmap_offset(obj);
1813 1825
1814 if (obj->base.filp == NULL) 1826 if (obj->base.filp == NULL)
@@ -1819,16 +1831,28 @@ i915_gem_object_truncate(struct drm_i915_gem_object *obj)
1819 * To do this we must instruct the shmfs to drop all of its 1831 * To do this we must instruct the shmfs to drop all of its
1820 * backing pages, *now*. 1832 * backing pages, *now*.
1821 */ 1833 */
1822 inode = file_inode(obj->base.filp); 1834 shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
1823 shmem_truncate_range(inode, 0, (loff_t)-1);
1824
1825 obj->madv = __I915_MADV_PURGED; 1835 obj->madv = __I915_MADV_PURGED;
1826} 1836}
1827 1837
1828static inline int 1838/* Try to discard unwanted pages */
1829i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj) 1839static void
1840i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
1830{ 1841{
1831 return obj->madv == I915_MADV_DONTNEED; 1842 struct address_space *mapping;
1843
1844 switch (obj->madv) {
1845 case I915_MADV_DONTNEED:
1846 i915_gem_object_truncate(obj);
1847 case __I915_MADV_PURGED:
1848 return;
1849 }
1850
1851 if (obj->base.filp == NULL)
1852 return;
1853
1854 mapping = file_inode(obj->base.filp)->i_mapping,
1855 invalidate_mapping_pages(mapping, 0, (loff_t)-1);
1832} 1856}
1833 1857
1834static void 1858static void
@@ -1893,8 +1917,7 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
1893 ops->put_pages(obj); 1917 ops->put_pages(obj);
1894 obj->pages = NULL; 1918 obj->pages = NULL;
1895 1919
1896 if (i915_gem_object_is_purgeable(obj)) 1920 i915_gem_object_invalidate(obj);
1897 i915_gem_object_truncate(obj);
1898 1921
1899 return 0; 1922 return 0;
1900} 1923}
@@ -1903,58 +1926,58 @@ static unsigned long
1903__i915_gem_shrink(struct drm_i915_private *dev_priv, long target, 1926__i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
1904 bool purgeable_only) 1927 bool purgeable_only)
1905{ 1928{
1906 struct list_head still_bound_list; 1929 struct list_head still_in_list;
1907 struct drm_i915_gem_object *obj, *next; 1930 struct drm_i915_gem_object *obj;
1908 unsigned long count = 0; 1931 unsigned long count = 0;
1909 1932
1910 list_for_each_entry_safe(obj, next,
1911 &dev_priv->mm.unbound_list,
1912 global_list) {
1913 if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
1914 i915_gem_object_put_pages(obj) == 0) {
1915 count += obj->base.size >> PAGE_SHIFT;
1916 if (count >= target)
1917 return count;
1918 }
1919 }
1920
1921 /* 1933 /*
1922 * As we may completely rewrite the bound list whilst unbinding 1934 * As we may completely rewrite the (un)bound list whilst unbinding
1923 * (due to retiring requests) we have to strictly process only 1935 * (due to retiring requests) we have to strictly process only
1924 * one element of the list at the time, and recheck the list 1936 * one element of the list at the time, and recheck the list
1925 * on every iteration. 1937 * on every iteration.
1938 *
1939 * In particular, we must hold a reference whilst removing the
1940 * object as we may end up waiting for and/or retiring the objects.
1941 * This might release the final reference (held by the active list)
1942 * and result in the object being freed from under us. This is
1943 * similar to the precautions the eviction code must take whilst
1944 * removing objects.
1945 *
1946 * Also note that although these lists do not hold a reference to
1947 * the object we can safely grab one here: The final object
1948 * unreferencing and the bound_list are both protected by the
1949 * dev->struct_mutex and so we won't ever be able to observe an
1950 * object on the bound_list with a reference count equals 0.
1926 */ 1951 */
1927 INIT_LIST_HEAD(&still_bound_list); 1952 INIT_LIST_HEAD(&still_in_list);
1953 while (count < target && !list_empty(&dev_priv->mm.unbound_list)) {
1954 obj = list_first_entry(&dev_priv->mm.unbound_list,
1955 typeof(*obj), global_list);
1956 list_move_tail(&obj->global_list, &still_in_list);
1957
1958 if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
1959 continue;
1960
1961 drm_gem_object_reference(&obj->base);
1962
1963 if (i915_gem_object_put_pages(obj) == 0)
1964 count += obj->base.size >> PAGE_SHIFT;
1965
1966 drm_gem_object_unreference(&obj->base);
1967 }
1968 list_splice(&still_in_list, &dev_priv->mm.unbound_list);
1969
1970 INIT_LIST_HEAD(&still_in_list);
1928 while (count < target && !list_empty(&dev_priv->mm.bound_list)) { 1971 while (count < target && !list_empty(&dev_priv->mm.bound_list)) {
1929 struct i915_vma *vma, *v; 1972 struct i915_vma *vma, *v;
1930 1973
1931 obj = list_first_entry(&dev_priv->mm.bound_list, 1974 obj = list_first_entry(&dev_priv->mm.bound_list,
1932 typeof(*obj), global_list); 1975 typeof(*obj), global_list);
1933 list_move_tail(&obj->global_list, &still_bound_list); 1976 list_move_tail(&obj->global_list, &still_in_list);
1934 1977
1935 if (!i915_gem_object_is_purgeable(obj) && purgeable_only) 1978 if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
1936 continue; 1979 continue;
1937 1980
1938 /*
1939 * Hold a reference whilst we unbind this object, as we may
1940 * end up waiting for and retiring requests. This might
1941 * release the final reference (held by the active list)
1942 * and result in the object being freed from under us.
1943 * in this object being freed.
1944 *
1945 * Note 1: Shrinking the bound list is special since only active
1946 * (and hence bound objects) can contain such limbo objects, so
1947 * we don't need special tricks for shrinking the unbound list.
1948 * The only other place where we have to be careful with active
1949 * objects suddenly disappearing due to retiring requests is the
1950 * eviction code.
1951 *
1952 * Note 2: Even though the bound list doesn't hold a reference
1953 * to the object we can safely grab one here: The final object
1954 * unreferencing and the bound_list are both protected by the
1955 * dev->struct_mutex and so we won't ever be able to observe an
1956 * object on the bound_list with a reference count equals 0.
1957 */
1958 drm_gem_object_reference(&obj->base); 1981 drm_gem_object_reference(&obj->base);
1959 1982
1960 list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link) 1983 list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link)
@@ -1966,7 +1989,7 @@ __i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
1966 1989
1967 drm_gem_object_unreference(&obj->base); 1990 drm_gem_object_unreference(&obj->base);
1968 } 1991 }
1969 list_splice(&still_bound_list, &dev_priv->mm.bound_list); 1992 list_splice(&still_in_list, &dev_priv->mm.bound_list);
1970 1993
1971 return count; 1994 return count;
1972} 1995}
@@ -1980,17 +2003,8 @@ i915_gem_purge(struct drm_i915_private *dev_priv, long target)
1980static unsigned long 2003static unsigned long
1981i915_gem_shrink_all(struct drm_i915_private *dev_priv) 2004i915_gem_shrink_all(struct drm_i915_private *dev_priv)
1982{ 2005{
1983 struct drm_i915_gem_object *obj, *next;
1984 long freed = 0;
1985
1986 i915_gem_evict_everything(dev_priv->dev); 2006 i915_gem_evict_everything(dev_priv->dev);
1987 2007 return __i915_gem_shrink(dev_priv, LONG_MAX, false);
1988 list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list,
1989 global_list) {
1990 if (i915_gem_object_put_pages(obj) == 0)
1991 freed += obj->base.size >> PAGE_SHIFT;
1992 }
1993 return freed;
1994} 2008}
1995 2009
1996static int 2010static int
@@ -2094,7 +2108,19 @@ err_pages:
2094 page_cache_release(sg_page_iter_page(&sg_iter)); 2108 page_cache_release(sg_page_iter_page(&sg_iter));
2095 sg_free_table(st); 2109 sg_free_table(st);
2096 kfree(st); 2110 kfree(st);
2097 return PTR_ERR(page); 2111
2112 /* shmemfs first checks if there is enough memory to allocate the page
2113 * and reports ENOSPC should there be insufficient, along with the usual
2114 * ENOMEM for a genuine allocation failure.
2115 *
2116 * We use ENOSPC in our driver to mean that we have run out of aperture
2117 * space and so want to translate the error from shmemfs back to our
2118 * usual understanding of ENOMEM.
2119 */
2120 if (PTR_ERR(page) == -ENOSPC)
2121 return -ENOMEM;
2122 else
2123 return PTR_ERR(page);
2098} 2124}
2099 2125
2100/* Ensure that the associated pages are gathered from the backing storage 2126/* Ensure that the associated pages are gathered from the backing storage
@@ -2131,7 +2157,7 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
2131 2157
2132static void 2158static void
2133i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, 2159i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
2134 struct intel_ring_buffer *ring) 2160 struct intel_engine_cs *ring)
2135{ 2161{
2136 struct drm_device *dev = obj->base.dev; 2162 struct drm_device *dev = obj->base.dev;
2137 struct drm_i915_private *dev_priv = dev->dev_private; 2163 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2169,7 +2195,7 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
2169} 2195}
2170 2196
2171void i915_vma_move_to_active(struct i915_vma *vma, 2197void i915_vma_move_to_active(struct i915_vma *vma,
2172 struct intel_ring_buffer *ring) 2198 struct intel_engine_cs *ring)
2173{ 2199{
2174 list_move_tail(&vma->mm_list, &vma->vm->active_list); 2200 list_move_tail(&vma->mm_list, &vma->vm->active_list);
2175 return i915_gem_object_move_to_active(vma->obj, ring); 2201 return i915_gem_object_move_to_active(vma->obj, ring);
@@ -2207,11 +2233,24 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
2207 WARN_ON(i915_verify_lists(dev)); 2233 WARN_ON(i915_verify_lists(dev));
2208} 2234}
2209 2235
2236static void
2237i915_gem_object_retire(struct drm_i915_gem_object *obj)
2238{
2239 struct intel_engine_cs *ring = obj->ring;
2240
2241 if (ring == NULL)
2242 return;
2243
2244 if (i915_seqno_passed(ring->get_seqno(ring, true),
2245 obj->last_read_seqno))
2246 i915_gem_object_move_to_inactive(obj);
2247}
2248
2210static int 2249static int
2211i915_gem_init_seqno(struct drm_device *dev, u32 seqno) 2250i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
2212{ 2251{
2213 struct drm_i915_private *dev_priv = dev->dev_private; 2252 struct drm_i915_private *dev_priv = dev->dev_private;
2214 struct intel_ring_buffer *ring; 2253 struct intel_engine_cs *ring;
2215 int ret, i, j; 2254 int ret, i, j;
2216 2255
2217 /* Carefully retire all requests without writing to the rings */ 2256 /* Carefully retire all requests without writing to the rings */
@@ -2226,8 +2265,8 @@ i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
2226 for_each_ring(ring, dev_priv, i) { 2265 for_each_ring(ring, dev_priv, i) {
2227 intel_ring_init_seqno(ring, seqno); 2266 intel_ring_init_seqno(ring, seqno);
2228 2267
2229 for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++) 2268 for (j = 0; j < ARRAY_SIZE(ring->semaphore.sync_seqno); j++)
2230 ring->sync_seqno[j] = 0; 2269 ring->semaphore.sync_seqno[j] = 0;
2231 } 2270 }
2232 2271
2233 return 0; 2272 return 0;
@@ -2277,7 +2316,7 @@ i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
2277 return 0; 2316 return 0;
2278} 2317}
2279 2318
2280int __i915_add_request(struct intel_ring_buffer *ring, 2319int __i915_add_request(struct intel_engine_cs *ring,
2281 struct drm_file *file, 2320 struct drm_file *file,
2282 struct drm_i915_gem_object *obj, 2321 struct drm_i915_gem_object *obj,
2283 u32 *out_seqno) 2322 u32 *out_seqno)
@@ -2382,7 +2421,7 @@ i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
2382} 2421}
2383 2422
2384static bool i915_context_is_banned(struct drm_i915_private *dev_priv, 2423static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
2385 const struct i915_hw_context *ctx) 2424 const struct intel_context *ctx)
2386{ 2425{
2387 unsigned long elapsed; 2426 unsigned long elapsed;
2388 2427
@@ -2395,8 +2434,9 @@ static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
2395 if (!i915_gem_context_is_default(ctx)) { 2434 if (!i915_gem_context_is_default(ctx)) {
2396 DRM_DEBUG("context hanging too fast, banning!\n"); 2435 DRM_DEBUG("context hanging too fast, banning!\n");
2397 return true; 2436 return true;
2398 } else if (dev_priv->gpu_error.stop_rings == 0) { 2437 } else if (i915_stop_ring_allow_ban(dev_priv)) {
2399 DRM_ERROR("gpu hanging too fast, banning!\n"); 2438 if (i915_stop_ring_allow_warn(dev_priv))
2439 DRM_ERROR("gpu hanging too fast, banning!\n");
2400 return true; 2440 return true;
2401 } 2441 }
2402 } 2442 }
@@ -2405,7 +2445,7 @@ static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
2405} 2445}
2406 2446
2407static void i915_set_reset_status(struct drm_i915_private *dev_priv, 2447static void i915_set_reset_status(struct drm_i915_private *dev_priv,
2408 struct i915_hw_context *ctx, 2448 struct intel_context *ctx,
2409 const bool guilty) 2449 const bool guilty)
2410{ 2450{
2411 struct i915_ctx_hang_stats *hs; 2451 struct i915_ctx_hang_stats *hs;
@@ -2436,7 +2476,7 @@ static void i915_gem_free_request(struct drm_i915_gem_request *request)
2436} 2476}
2437 2477
2438struct drm_i915_gem_request * 2478struct drm_i915_gem_request *
2439i915_gem_find_active_request(struct intel_ring_buffer *ring) 2479i915_gem_find_active_request(struct intel_engine_cs *ring)
2440{ 2480{
2441 struct drm_i915_gem_request *request; 2481 struct drm_i915_gem_request *request;
2442 u32 completed_seqno; 2482 u32 completed_seqno;
@@ -2454,7 +2494,7 @@ i915_gem_find_active_request(struct intel_ring_buffer *ring)
2454} 2494}
2455 2495
2456static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv, 2496static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
2457 struct intel_ring_buffer *ring) 2497 struct intel_engine_cs *ring)
2458{ 2498{
2459 struct drm_i915_gem_request *request; 2499 struct drm_i915_gem_request *request;
2460 bool ring_hung; 2500 bool ring_hung;
@@ -2473,7 +2513,7 @@ static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
2473} 2513}
2474 2514
2475static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv, 2515static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
2476 struct intel_ring_buffer *ring) 2516 struct intel_engine_cs *ring)
2477{ 2517{
2478 while (!list_empty(&ring->active_list)) { 2518 while (!list_empty(&ring->active_list)) {
2479 struct drm_i915_gem_object *obj; 2519 struct drm_i915_gem_object *obj;
@@ -2501,6 +2541,11 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
2501 2541
2502 i915_gem_free_request(request); 2542 i915_gem_free_request(request);
2503 } 2543 }
2544
2545 /* These may not have been flush before the reset, do so now */
2546 kfree(ring->preallocated_lazy_request);
2547 ring->preallocated_lazy_request = NULL;
2548 ring->outstanding_lazy_seqno = 0;
2504} 2549}
2505 2550
2506void i915_gem_restore_fences(struct drm_device *dev) 2551void i915_gem_restore_fences(struct drm_device *dev)
@@ -2527,7 +2572,7 @@ void i915_gem_restore_fences(struct drm_device *dev)
2527void i915_gem_reset(struct drm_device *dev) 2572void i915_gem_reset(struct drm_device *dev)
2528{ 2573{
2529 struct drm_i915_private *dev_priv = dev->dev_private; 2574 struct drm_i915_private *dev_priv = dev->dev_private;
2530 struct intel_ring_buffer *ring; 2575 struct intel_engine_cs *ring;
2531 int i; 2576 int i;
2532 2577
2533 /* 2578 /*
@@ -2541,8 +2586,6 @@ void i915_gem_reset(struct drm_device *dev)
2541 for_each_ring(ring, dev_priv, i) 2586 for_each_ring(ring, dev_priv, i)
2542 i915_gem_reset_ring_cleanup(dev_priv, ring); 2587 i915_gem_reset_ring_cleanup(dev_priv, ring);
2543 2588
2544 i915_gem_cleanup_ringbuffer(dev);
2545
2546 i915_gem_context_reset(dev); 2589 i915_gem_context_reset(dev);
2547 2590
2548 i915_gem_restore_fences(dev); 2591 i915_gem_restore_fences(dev);
@@ -2551,8 +2594,8 @@ void i915_gem_reset(struct drm_device *dev)
2551/** 2594/**
2552 * This function clears the request list as sequence numbers are passed. 2595 * This function clears the request list as sequence numbers are passed.
2553 */ 2596 */
2554static void 2597void
2555i915_gem_retire_requests_ring(struct intel_ring_buffer *ring) 2598i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
2556{ 2599{
2557 uint32_t seqno; 2600 uint32_t seqno;
2558 2601
@@ -2597,7 +2640,7 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
2597 * of tail of the request to update the last known position 2640 * of tail of the request to update the last known position
2598 * of the GPU head. 2641 * of the GPU head.
2599 */ 2642 */
2600 ring->last_retired_head = request->tail; 2643 ring->buffer->last_retired_head = request->tail;
2601 2644
2602 i915_gem_free_request(request); 2645 i915_gem_free_request(request);
2603 } 2646 }
@@ -2615,7 +2658,7 @@ bool
2615i915_gem_retire_requests(struct drm_device *dev) 2658i915_gem_retire_requests(struct drm_device *dev)
2616{ 2659{
2617 struct drm_i915_private *dev_priv = dev->dev_private; 2660 struct drm_i915_private *dev_priv = dev->dev_private;
2618 struct intel_ring_buffer *ring; 2661 struct intel_engine_cs *ring;
2619 bool idle = true; 2662 bool idle = true;
2620 int i; 2663 int i;
2621 2664
@@ -2709,7 +2752,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2709 struct drm_i915_private *dev_priv = dev->dev_private; 2752 struct drm_i915_private *dev_priv = dev->dev_private;
2710 struct drm_i915_gem_wait *args = data; 2753 struct drm_i915_gem_wait *args = data;
2711 struct drm_i915_gem_object *obj; 2754 struct drm_i915_gem_object *obj;
2712 struct intel_ring_buffer *ring = NULL; 2755 struct intel_engine_cs *ring = NULL;
2713 struct timespec timeout_stack, *timeout = NULL; 2756 struct timespec timeout_stack, *timeout = NULL;
2714 unsigned reset_counter; 2757 unsigned reset_counter;
2715 u32 seqno = 0; 2758 u32 seqno = 0;
@@ -2780,9 +2823,9 @@ out:
2780 */ 2823 */
2781int 2824int
2782i915_gem_object_sync(struct drm_i915_gem_object *obj, 2825i915_gem_object_sync(struct drm_i915_gem_object *obj,
2783 struct intel_ring_buffer *to) 2826 struct intel_engine_cs *to)
2784{ 2827{
2785 struct intel_ring_buffer *from = obj->ring; 2828 struct intel_engine_cs *from = obj->ring;
2786 u32 seqno; 2829 u32 seqno;
2787 int ret, idx; 2830 int ret, idx;
2788 2831
@@ -2795,7 +2838,7 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
2795 idx = intel_ring_sync_index(from, to); 2838 idx = intel_ring_sync_index(from, to);
2796 2839
2797 seqno = obj->last_read_seqno; 2840 seqno = obj->last_read_seqno;
2798 if (seqno <= from->sync_seqno[idx]) 2841 if (seqno <= from->semaphore.sync_seqno[idx])
2799 return 0; 2842 return 0;
2800 2843
2801 ret = i915_gem_check_olr(obj->ring, seqno); 2844 ret = i915_gem_check_olr(obj->ring, seqno);
@@ -2803,13 +2846,13 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
2803 return ret; 2846 return ret;
2804 2847
2805 trace_i915_gem_ring_sync_to(from, to, seqno); 2848 trace_i915_gem_ring_sync_to(from, to, seqno);
2806 ret = to->sync_to(to, from, seqno); 2849 ret = to->semaphore.sync_to(to, from, seqno);
2807 if (!ret) 2850 if (!ret)
2808 /* We use last_read_seqno because sync_to() 2851 /* We use last_read_seqno because sync_to()
2809 * might have just caused seqno wrap under 2852 * might have just caused seqno wrap under
2810 * the radar. 2853 * the radar.
2811 */ 2854 */
2812 from->sync_seqno[idx] = obj->last_read_seqno; 2855 from->semaphore.sync_seqno[idx] = obj->last_read_seqno;
2813 2856
2814 return ret; 2857 return ret;
2815} 2858}
@@ -2865,12 +2908,14 @@ int i915_vma_unbind(struct i915_vma *vma)
2865 * cause memory corruption through use-after-free. 2908 * cause memory corruption through use-after-free.
2866 */ 2909 */
2867 2910
2868 i915_gem_object_finish_gtt(obj); 2911 if (i915_is_ggtt(vma->vm)) {
2912 i915_gem_object_finish_gtt(obj);
2869 2913
2870 /* release the fence reg _after_ flushing */ 2914 /* release the fence reg _after_ flushing */
2871 ret = i915_gem_object_put_fence(obj); 2915 ret = i915_gem_object_put_fence(obj);
2872 if (ret) 2916 if (ret)
2873 return ret; 2917 return ret;
2918 }
2874 2919
2875 trace_i915_vma_unbind(vma); 2920 trace_i915_vma_unbind(vma);
2876 2921
@@ -2903,7 +2948,7 @@ int i915_vma_unbind(struct i915_vma *vma)
2903int i915_gpu_idle(struct drm_device *dev) 2948int i915_gpu_idle(struct drm_device *dev)
2904{ 2949{
2905 struct drm_i915_private *dev_priv = dev->dev_private; 2950 struct drm_i915_private *dev_priv = dev->dev_private;
2906 struct intel_ring_buffer *ring; 2951 struct intel_engine_cs *ring;
2907 int ret, i; 2952 int ret, i;
2908 2953
2909 /* Flush everything onto the inactive list. */ 2954 /* Flush everything onto the inactive list. */
@@ -3144,6 +3189,9 @@ i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
3144 3189
3145 fence = &dev_priv->fence_regs[obj->fence_reg]; 3190 fence = &dev_priv->fence_regs[obj->fence_reg];
3146 3191
3192 if (WARN_ON(fence->pin_count))
3193 return -EBUSY;
3194
3147 i915_gem_object_fence_lost(obj); 3195 i915_gem_object_fence_lost(obj);
3148 i915_gem_object_update_fence(obj, fence, false); 3196 i915_gem_object_update_fence(obj, fence, false);
3149 3197
@@ -3548,6 +3596,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3548 if (ret) 3596 if (ret)
3549 return ret; 3597 return ret;
3550 3598
3599 i915_gem_object_retire(obj);
3551 i915_gem_object_flush_cpu_write_domain(obj, false); 3600 i915_gem_object_flush_cpu_write_domain(obj, false);
3552 3601
3553 /* Serialise direct access to this object with the barriers for 3602 /* Serialise direct access to this object with the barriers for
@@ -3646,6 +3695,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3646 * in obj->write_domain and have been skipping the clflushes. 3695 * in obj->write_domain and have been skipping the clflushes.
3647 * Just set it to the CPU cache for now. 3696 * Just set it to the CPU cache for now.
3648 */ 3697 */
3698 i915_gem_object_retire(obj);
3649 WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU); 3699 WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
3650 3700
3651 old_read_domains = obj->base.read_domains; 3701 old_read_domains = obj->base.read_domains;
@@ -3743,6 +3793,15 @@ unlock:
3743 3793
3744static bool is_pin_display(struct drm_i915_gem_object *obj) 3794static bool is_pin_display(struct drm_i915_gem_object *obj)
3745{ 3795{
3796 struct i915_vma *vma;
3797
3798 if (list_empty(&obj->vma_list))
3799 return false;
3800
3801 vma = i915_gem_obj_to_ggtt(obj);
3802 if (!vma)
3803 return false;
3804
3746 /* There are 3 sources that pin objects: 3805 /* There are 3 sources that pin objects:
3747 * 1. The display engine (scanouts, sprites, cursors); 3806 * 1. The display engine (scanouts, sprites, cursors);
3748 * 2. Reservations for execbuffer; 3807 * 2. Reservations for execbuffer;
@@ -3754,7 +3813,7 @@ static bool is_pin_display(struct drm_i915_gem_object *obj)
3754 * subtracting the potential reference by the user, any pin_count 3813 * subtracting the potential reference by the user, any pin_count
3755 * remains, it must be due to another use by the display engine. 3814 * remains, it must be due to another use by the display engine.
3756 */ 3815 */
3757 return i915_gem_obj_to_ggtt(obj)->pin_count - !!obj->user_pin_count; 3816 return vma->pin_count - !!obj->user_pin_count;
3758} 3817}
3759 3818
3760/* 3819/*
@@ -3765,9 +3824,10 @@ static bool is_pin_display(struct drm_i915_gem_object *obj)
3765int 3824int
3766i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, 3825i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3767 u32 alignment, 3826 u32 alignment,
3768 struct intel_ring_buffer *pipelined) 3827 struct intel_engine_cs *pipelined)
3769{ 3828{
3770 u32 old_read_domains, old_write_domain; 3829 u32 old_read_domains, old_write_domain;
3830 bool was_pin_display;
3771 int ret; 3831 int ret;
3772 3832
3773 if (pipelined != obj->ring) { 3833 if (pipelined != obj->ring) {
@@ -3779,6 +3839,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3779 /* Mark the pin_display early so that we account for the 3839 /* Mark the pin_display early so that we account for the
3780 * display coherency whilst setting up the cache domains. 3840 * display coherency whilst setting up the cache domains.
3781 */ 3841 */
3842 was_pin_display = obj->pin_display;
3782 obj->pin_display = true; 3843 obj->pin_display = true;
3783 3844
3784 /* The display engine is not coherent with the LLC cache on gen6. As 3845 /* The display engine is not coherent with the LLC cache on gen6. As
@@ -3821,7 +3882,8 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3821 return 0; 3882 return 0;
3822 3883
3823err_unpin_display: 3884err_unpin_display:
3824 obj->pin_display = is_pin_display(obj); 3885 WARN_ON(was_pin_display != is_pin_display(obj));
3886 obj->pin_display = was_pin_display;
3825 return ret; 3887 return ret;
3826} 3888}
3827 3889
@@ -3868,6 +3930,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
3868 if (ret) 3930 if (ret)
3869 return ret; 3931 return ret;
3870 3932
3933 i915_gem_object_retire(obj);
3871 i915_gem_object_flush_gtt_write_domain(obj); 3934 i915_gem_object_flush_gtt_write_domain(obj);
3872 3935
3873 old_write_domain = obj->base.write_domain; 3936 old_write_domain = obj->base.write_domain;
@@ -3917,7 +3980,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
3917 struct drm_i915_file_private *file_priv = file->driver_priv; 3980 struct drm_i915_file_private *file_priv = file->driver_priv;
3918 unsigned long recent_enough = jiffies - msecs_to_jiffies(20); 3981 unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
3919 struct drm_i915_gem_request *request; 3982 struct drm_i915_gem_request *request;
3920 struct intel_ring_buffer *ring = NULL; 3983 struct intel_engine_cs *ring = NULL;
3921 unsigned reset_counter; 3984 unsigned reset_counter;
3922 u32 seqno = 0; 3985 u32 seqno = 0;
3923 int ret; 3986 int ret;
@@ -3976,9 +4039,13 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
3976 uint32_t alignment, 4039 uint32_t alignment,
3977 uint64_t flags) 4040 uint64_t flags)
3978{ 4041{
4042 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3979 struct i915_vma *vma; 4043 struct i915_vma *vma;
3980 int ret; 4044 int ret;
3981 4045
4046 if (WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base))
4047 return -ENODEV;
4048
3982 if (WARN_ON(flags & (PIN_GLOBAL | PIN_MAPPABLE) && !i915_is_ggtt(vm))) 4049 if (WARN_ON(flags & (PIN_GLOBAL | PIN_MAPPABLE) && !i915_is_ggtt(vm)))
3983 return -EINVAL; 4050 return -EINVAL;
3984 4051
@@ -4032,6 +4099,32 @@ i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj)
4032 obj->pin_mappable = false; 4099 obj->pin_mappable = false;
4033} 4100}
4034 4101
4102bool
4103i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
4104{
4105 if (obj->fence_reg != I915_FENCE_REG_NONE) {
4106 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
4107 struct i915_vma *ggtt_vma = i915_gem_obj_to_ggtt(obj);
4108
4109 WARN_ON(!ggtt_vma ||
4110 dev_priv->fence_regs[obj->fence_reg].pin_count >
4111 ggtt_vma->pin_count);
4112 dev_priv->fence_regs[obj->fence_reg].pin_count++;
4113 return true;
4114 } else
4115 return false;
4116}
4117
4118void
4119i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
4120{
4121 if (obj->fence_reg != I915_FENCE_REG_NONE) {
4122 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
4123 WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0);
4124 dev_priv->fence_regs[obj->fence_reg].pin_count--;
4125 }
4126}
4127
4035int 4128int
4036i915_gem_pin_ioctl(struct drm_device *dev, void *data, 4129i915_gem_pin_ioctl(struct drm_device *dev, void *data,
4037 struct drm_file *file) 4130 struct drm_file *file)
@@ -4292,6 +4385,30 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
4292 return obj; 4385 return obj;
4293} 4386}
4294 4387
4388static bool discard_backing_storage(struct drm_i915_gem_object *obj)
4389{
4390 /* If we are the last user of the backing storage (be it shmemfs
4391 * pages or stolen etc), we know that the pages are going to be
4392 * immediately released. In this case, we can then skip copying
4393 * back the contents from the GPU.
4394 */
4395
4396 if (obj->madv != I915_MADV_WILLNEED)
4397 return false;
4398
4399 if (obj->base.filp == NULL)
4400 return true;
4401
4402 /* At first glance, this looks racy, but then again so would be
4403 * userspace racing mmap against close. However, the first external
4404 * reference to the filp can only be obtained through the
4405 * i915_gem_mmap_ioctl() which safeguards us against the user
4406 * acquiring such a reference whilst we are in the middle of
4407 * freeing the object.
4408 */
4409 return atomic_long_read(&obj->base.filp->f_count) == 1;
4410}
4411
4295void i915_gem_free_object(struct drm_gem_object *gem_obj) 4412void i915_gem_free_object(struct drm_gem_object *gem_obj)
4296{ 4413{
4297 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); 4414 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
@@ -4329,6 +4446,8 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
4329 4446
4330 if (WARN_ON(obj->pages_pin_count)) 4447 if (WARN_ON(obj->pages_pin_count))
4331 obj->pages_pin_count = 0; 4448 obj->pages_pin_count = 0;
4449 if (discard_backing_storage(obj))
4450 obj->madv = I915_MADV_DONTNEED;
4332 i915_gem_object_put_pages(obj); 4451 i915_gem_object_put_pages(obj);
4333 i915_gem_object_free_mmap_offset(obj); 4452 i915_gem_object_free_mmap_offset(obj);
4334 i915_gem_object_release_stolen(obj); 4453 i915_gem_object_release_stolen(obj);
@@ -4338,6 +4457,9 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
4338 if (obj->base.import_attach) 4457 if (obj->base.import_attach)
4339 drm_prime_gem_destroy(&obj->base, NULL); 4458 drm_prime_gem_destroy(&obj->base, NULL);
4340 4459
4460 if (obj->ops->release)
4461 obj->ops->release(obj);
4462
4341 drm_gem_object_release(&obj->base); 4463 drm_gem_object_release(&obj->base);
4342 i915_gem_info_remove_obj(dev_priv, obj->base.size); 4464 i915_gem_info_remove_obj(dev_priv, obj->base.size);
4343 4465
@@ -4371,6 +4493,17 @@ void i915_gem_vma_destroy(struct i915_vma *vma)
4371 kfree(vma); 4493 kfree(vma);
4372} 4494}
4373 4495
4496static void
4497i915_gem_stop_ringbuffers(struct drm_device *dev)
4498{
4499 struct drm_i915_private *dev_priv = dev->dev_private;
4500 struct intel_engine_cs *ring;
4501 int i;
4502
4503 for_each_ring(ring, dev_priv, i)
4504 intel_stop_ring_buffer(ring);
4505}
4506
4374int 4507int
4375i915_gem_suspend(struct drm_device *dev) 4508i915_gem_suspend(struct drm_device *dev)
4376{ 4509{
@@ -4392,7 +4525,7 @@ i915_gem_suspend(struct drm_device *dev)
4392 i915_gem_evict_everything(dev); 4525 i915_gem_evict_everything(dev);
4393 4526
4394 i915_kernel_lost_context(dev); 4527 i915_kernel_lost_context(dev);
4395 i915_gem_cleanup_ringbuffer(dev); 4528 i915_gem_stop_ringbuffers(dev);
4396 4529
4397 /* Hack! Don't let anybody do execbuf while we don't control the chip. 4530 /* Hack! Don't let anybody do execbuf while we don't control the chip.
4398 * We need to replace this with a semaphore, or something. 4531 * We need to replace this with a semaphore, or something.
@@ -4413,7 +4546,7 @@ err:
4413 return ret; 4546 return ret;
4414} 4547}
4415 4548
4416int i915_gem_l3_remap(struct intel_ring_buffer *ring, int slice) 4549int i915_gem_l3_remap(struct intel_engine_cs *ring, int slice)
4417{ 4550{
4418 struct drm_device *dev = ring->dev; 4551 struct drm_device *dev = ring->dev;
4419 struct drm_i915_private *dev_priv = dev->dev_private; 4552 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4512,13 +4645,20 @@ static int i915_gem_init_rings(struct drm_device *dev)
4512 goto cleanup_blt_ring; 4645 goto cleanup_blt_ring;
4513 } 4646 }
4514 4647
4648 if (HAS_BSD2(dev)) {
4649 ret = intel_init_bsd2_ring_buffer(dev);
4650 if (ret)
4651 goto cleanup_vebox_ring;
4652 }
4515 4653
4516 ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000)); 4654 ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
4517 if (ret) 4655 if (ret)
4518 goto cleanup_vebox_ring; 4656 goto cleanup_bsd2_ring;
4519 4657
4520 return 0; 4658 return 0;
4521 4659
4660cleanup_bsd2_ring:
4661 intel_cleanup_ring_buffer(&dev_priv->ring[VCS2]);
4522cleanup_vebox_ring: 4662cleanup_vebox_ring:
4523 intel_cleanup_ring_buffer(&dev_priv->ring[VECS]); 4663 intel_cleanup_ring_buffer(&dev_priv->ring[VECS]);
4524cleanup_blt_ring: 4664cleanup_blt_ring:
@@ -4576,15 +4716,11 @@ i915_gem_init_hw(struct drm_device *dev)
4576 * the do_switch), but before enabling PPGTT. So don't move this. 4716 * the do_switch), but before enabling PPGTT. So don't move this.
4577 */ 4717 */
4578 ret = i915_gem_context_enable(dev_priv); 4718 ret = i915_gem_context_enable(dev_priv);
4579 if (ret) { 4719 if (ret && ret != -EIO) {
4580 DRM_ERROR("Context enable failed %d\n", ret); 4720 DRM_ERROR("Context enable failed %d\n", ret);
4581 goto err_out; 4721 i915_gem_cleanup_ringbuffer(dev);
4582 } 4722 }
4583 4723
4584 return 0;
4585
4586err_out:
4587 i915_gem_cleanup_ringbuffer(dev);
4588 return ret; 4724 return ret;
4589} 4725}
4590 4726
@@ -4597,11 +4733,13 @@ int i915_gem_init(struct drm_device *dev)
4597 4733
4598 if (IS_VALLEYVIEW(dev)) { 4734 if (IS_VALLEYVIEW(dev)) {
4599 /* VLVA0 (potential hack), BIOS isn't actually waking us */ 4735 /* VLVA0 (potential hack), BIOS isn't actually waking us */
4600 I915_WRITE(VLV_GTLC_WAKE_CTRL, 1); 4736 I915_WRITE(VLV_GTLC_WAKE_CTRL, VLV_GTLC_ALLOWWAKEREQ);
4601 if (wait_for((I915_READ(VLV_GTLC_PW_STATUS) & 1) == 1, 10)) 4737 if (wait_for((I915_READ(VLV_GTLC_PW_STATUS) &
4738 VLV_GTLC_ALLOWWAKEACK), 10))
4602 DRM_DEBUG_DRIVER("allow wake ack timed out\n"); 4739 DRM_DEBUG_DRIVER("allow wake ack timed out\n");
4603 } 4740 }
4604 4741
4742 i915_gem_init_userptr(dev);
4605 i915_gem_init_global_gtt(dev); 4743 i915_gem_init_global_gtt(dev);
4606 4744
4607 ret = i915_gem_context_init(dev); 4745 ret = i915_gem_context_init(dev);
@@ -4611,25 +4749,28 @@ int i915_gem_init(struct drm_device *dev)
4611 } 4749 }
4612 4750
4613 ret = i915_gem_init_hw(dev); 4751 ret = i915_gem_init_hw(dev);
4614 mutex_unlock(&dev->struct_mutex); 4752 if (ret == -EIO) {
4615 if (ret) { 4753 /* Allow ring initialisation to fail by marking the GPU as
4616 WARN_ON(dev_priv->mm.aliasing_ppgtt); 4754 * wedged. But we only want to do this where the GPU is angry,
4617 i915_gem_context_fini(dev); 4755 * for all other failure, such as an allocation failure, bail.
4618 drm_mm_takedown(&dev_priv->gtt.base.mm); 4756 */
4619 return ret; 4757 DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
4758 atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
4759 ret = 0;
4620 } 4760 }
4761 mutex_unlock(&dev->struct_mutex);
4621 4762
4622 /* Allow hardware batchbuffers unless told otherwise, but not for KMS. */ 4763 /* Allow hardware batchbuffers unless told otherwise, but not for KMS. */
4623 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 4764 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4624 dev_priv->dri1.allow_batchbuffer = 1; 4765 dev_priv->dri1.allow_batchbuffer = 1;
4625 return 0; 4766 return ret;
4626} 4767}
4627 4768
4628void 4769void
4629i915_gem_cleanup_ringbuffer(struct drm_device *dev) 4770i915_gem_cleanup_ringbuffer(struct drm_device *dev)
4630{ 4771{
4631 struct drm_i915_private *dev_priv = dev->dev_private; 4772 struct drm_i915_private *dev_priv = dev->dev_private;
4632 struct intel_ring_buffer *ring; 4773 struct intel_engine_cs *ring;
4633 int i; 4774 int i;
4634 4775
4635 for_each_ring(ring, dev_priv, i) 4776 for_each_ring(ring, dev_priv, i)
@@ -4661,16 +4802,15 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4661 } 4802 }
4662 4803
4663 BUG_ON(!list_empty(&dev_priv->gtt.base.active_list)); 4804 BUG_ON(!list_empty(&dev_priv->gtt.base.active_list));
4664 mutex_unlock(&dev->struct_mutex);
4665 4805
4666 ret = drm_irq_install(dev); 4806 ret = drm_irq_install(dev, dev->pdev->irq);
4667 if (ret) 4807 if (ret)
4668 goto cleanup_ringbuffer; 4808 goto cleanup_ringbuffer;
4809 mutex_unlock(&dev->struct_mutex);
4669 4810
4670 return 0; 4811 return 0;
4671 4812
4672cleanup_ringbuffer: 4813cleanup_ringbuffer:
4673 mutex_lock(&dev->struct_mutex);
4674 i915_gem_cleanup_ringbuffer(dev); 4814 i915_gem_cleanup_ringbuffer(dev);
4675 dev_priv->ums.mm_suspended = 1; 4815 dev_priv->ums.mm_suspended = 1;
4676 mutex_unlock(&dev->struct_mutex); 4816 mutex_unlock(&dev->struct_mutex);
@@ -4685,7 +4825,9 @@ i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
4685 if (drm_core_check_feature(dev, DRIVER_MODESET)) 4825 if (drm_core_check_feature(dev, DRIVER_MODESET))
4686 return 0; 4826 return 0;
4687 4827
4828 mutex_lock(&dev->struct_mutex);
4688 drm_irq_uninstall(dev); 4829 drm_irq_uninstall(dev);
4830 mutex_unlock(&dev->struct_mutex);
4689 4831
4690 return i915_gem_suspend(dev); 4832 return i915_gem_suspend(dev);
4691} 4833}
@@ -4704,7 +4846,7 @@ i915_gem_lastclose(struct drm_device *dev)
4704} 4846}
4705 4847
4706static void 4848static void
4707init_ring_lists(struct intel_ring_buffer *ring) 4849init_ring_lists(struct intel_engine_cs *ring)
4708{ 4850{
4709 INIT_LIST_HEAD(&ring->active_list); 4851 INIT_LIST_HEAD(&ring->active_list);
4710 INIT_LIST_HEAD(&ring->request_list); 4852 INIT_LIST_HEAD(&ring->request_list);
@@ -4752,7 +4894,7 @@ i915_gem_load(struct drm_device *dev)
4752 init_waitqueue_head(&dev_priv->gpu_error.reset_queue); 4894 init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
4753 4895
4754 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */ 4896 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
4755 if (IS_GEN3(dev)) { 4897 if (!drm_core_check_feature(dev, DRIVER_MODESET) && IS_GEN3(dev)) {
4756 I915_WRITE(MI_ARB_STATE, 4898 I915_WRITE(MI_ARB_STATE,
4757 _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE)); 4899 _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
4758 } 4900 }
@@ -4779,10 +4921,13 @@ i915_gem_load(struct drm_device *dev)
4779 4921
4780 dev_priv->mm.interruptible = true; 4922 dev_priv->mm.interruptible = true;
4781 4923
4782 dev_priv->mm.inactive_shrinker.scan_objects = i915_gem_inactive_scan; 4924 dev_priv->mm.shrinker.scan_objects = i915_gem_shrinker_scan;
4783 dev_priv->mm.inactive_shrinker.count_objects = i915_gem_inactive_count; 4925 dev_priv->mm.shrinker.count_objects = i915_gem_shrinker_count;
4784 dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS; 4926 dev_priv->mm.shrinker.seeks = DEFAULT_SEEKS;
4785 register_shrinker(&dev_priv->mm.inactive_shrinker); 4927 register_shrinker(&dev_priv->mm.shrinker);
4928
4929 dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
4930 register_oom_notifier(&dev_priv->mm.oom_notifier);
4786} 4931}
4787 4932
4788void i915_gem_release(struct drm_device *dev, struct drm_file *file) 4933void i915_gem_release(struct drm_device *dev, struct drm_file *file)
@@ -4857,27 +5002,46 @@ static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
4857#endif 5002#endif
4858} 5003}
4859 5004
5005static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
5006{
5007 if (!mutex_trylock(&dev->struct_mutex)) {
5008 if (!mutex_is_locked_by(&dev->struct_mutex, current))
5009 return false;
5010
5011 if (to_i915(dev)->mm.shrinker_no_lock_stealing)
5012 return false;
5013
5014 *unlock = false;
5015 } else
5016 *unlock = true;
5017
5018 return true;
5019}
5020
5021static int num_vma_bound(struct drm_i915_gem_object *obj)
5022{
5023 struct i915_vma *vma;
5024 int count = 0;
5025
5026 list_for_each_entry(vma, &obj->vma_list, vma_link)
5027 if (drm_mm_node_allocated(&vma->node))
5028 count++;
5029
5030 return count;
5031}
5032
4860static unsigned long 5033static unsigned long
4861i915_gem_inactive_count(struct shrinker *shrinker, struct shrink_control *sc) 5034i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
4862{ 5035{
4863 struct drm_i915_private *dev_priv = 5036 struct drm_i915_private *dev_priv =
4864 container_of(shrinker, 5037 container_of(shrinker, struct drm_i915_private, mm.shrinker);
4865 struct drm_i915_private,
4866 mm.inactive_shrinker);
4867 struct drm_device *dev = dev_priv->dev; 5038 struct drm_device *dev = dev_priv->dev;
4868 struct drm_i915_gem_object *obj; 5039 struct drm_i915_gem_object *obj;
4869 bool unlock = true;
4870 unsigned long count; 5040 unsigned long count;
5041 bool unlock;
4871 5042
4872 if (!mutex_trylock(&dev->struct_mutex)) { 5043 if (!i915_gem_shrinker_lock(dev, &unlock))
4873 if (!mutex_is_locked_by(&dev->struct_mutex, current)) 5044 return 0;
4874 return 0;
4875
4876 if (dev_priv->mm.shrinker_no_lock_stealing)
4877 return 0;
4878
4879 unlock = false;
4880 }
4881 5045
4882 count = 0; 5046 count = 0;
4883 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) 5047 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
@@ -4885,10 +5049,8 @@ i915_gem_inactive_count(struct shrinker *shrinker, struct shrink_control *sc)
4885 count += obj->base.size >> PAGE_SHIFT; 5049 count += obj->base.size >> PAGE_SHIFT;
4886 5050
4887 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { 5051 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
4888 if (obj->active) 5052 if (!i915_gem_obj_is_pinned(obj) &&
4889 continue; 5053 obj->pages_pin_count == num_vma_bound(obj))
4890
4891 if (!i915_gem_obj_is_pinned(obj) && obj->pages_pin_count == 0)
4892 count += obj->base.size >> PAGE_SHIFT; 5054 count += obj->base.size >> PAGE_SHIFT;
4893 } 5055 }
4894 5056
@@ -4961,44 +5123,99 @@ unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
4961} 5123}
4962 5124
4963static unsigned long 5125static unsigned long
4964i915_gem_inactive_scan(struct shrinker *shrinker, struct shrink_control *sc) 5126i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
4965{ 5127{
4966 struct drm_i915_private *dev_priv = 5128 struct drm_i915_private *dev_priv =
4967 container_of(shrinker, 5129 container_of(shrinker, struct drm_i915_private, mm.shrinker);
4968 struct drm_i915_private,
4969 mm.inactive_shrinker);
4970 struct drm_device *dev = dev_priv->dev; 5130 struct drm_device *dev = dev_priv->dev;
4971 unsigned long freed; 5131 unsigned long freed;
4972 bool unlock = true; 5132 bool unlock;
4973 5133
4974 if (!mutex_trylock(&dev->struct_mutex)) { 5134 if (!i915_gem_shrinker_lock(dev, &unlock))
4975 if (!mutex_is_locked_by(&dev->struct_mutex, current)) 5135 return SHRINK_STOP;
4976 return SHRINK_STOP;
4977
4978 if (dev_priv->mm.shrinker_no_lock_stealing)
4979 return SHRINK_STOP;
4980
4981 unlock = false;
4982 }
4983 5136
4984 freed = i915_gem_purge(dev_priv, sc->nr_to_scan); 5137 freed = i915_gem_purge(dev_priv, sc->nr_to_scan);
4985 if (freed < sc->nr_to_scan) 5138 if (freed < sc->nr_to_scan)
4986 freed += __i915_gem_shrink(dev_priv, 5139 freed += __i915_gem_shrink(dev_priv,
4987 sc->nr_to_scan - freed, 5140 sc->nr_to_scan - freed,
4988 false); 5141 false);
4989 if (freed < sc->nr_to_scan)
4990 freed += i915_gem_shrink_all(dev_priv);
4991
4992 if (unlock) 5142 if (unlock)
4993 mutex_unlock(&dev->struct_mutex); 5143 mutex_unlock(&dev->struct_mutex);
4994 5144
4995 return freed; 5145 return freed;
4996} 5146}
4997 5147
5148static int
5149i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
5150{
5151 struct drm_i915_private *dev_priv =
5152 container_of(nb, struct drm_i915_private, mm.oom_notifier);
5153 struct drm_device *dev = dev_priv->dev;
5154 struct drm_i915_gem_object *obj;
5155 unsigned long timeout = msecs_to_jiffies(5000) + 1;
5156 unsigned long pinned, bound, unbound, freed;
5157 bool was_interruptible;
5158 bool unlock;
5159
5160 while (!i915_gem_shrinker_lock(dev, &unlock) && --timeout)
5161 schedule_timeout_killable(1);
5162 if (timeout == 0) {
5163 pr_err("Unable to purge GPU memory due lock contention.\n");
5164 return NOTIFY_DONE;
5165 }
5166
5167 was_interruptible = dev_priv->mm.interruptible;
5168 dev_priv->mm.interruptible = false;
5169
5170 freed = i915_gem_shrink_all(dev_priv);
5171
5172 dev_priv->mm.interruptible = was_interruptible;
5173
5174 /* Because we may be allocating inside our own driver, we cannot
5175 * assert that there are no objects with pinned pages that are not
5176 * being pointed to by hardware.
5177 */
5178 unbound = bound = pinned = 0;
5179 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
5180 if (!obj->base.filp) /* not backed by a freeable object */
5181 continue;
5182
5183 if (obj->pages_pin_count)
5184 pinned += obj->base.size;
5185 else
5186 unbound += obj->base.size;
5187 }
5188 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
5189 if (!obj->base.filp)
5190 continue;
5191
5192 if (obj->pages_pin_count)
5193 pinned += obj->base.size;
5194 else
5195 bound += obj->base.size;
5196 }
5197
5198 if (unlock)
5199 mutex_unlock(&dev->struct_mutex);
5200
5201 pr_info("Purging GPU memory, %lu bytes freed, %lu bytes still pinned.\n",
5202 freed, pinned);
5203 if (unbound || bound)
5204 pr_err("%lu and %lu bytes still available in the "
5205 "bound and unbound GPU page lists.\n",
5206 bound, unbound);
5207
5208 *(unsigned long *)ptr += freed;
5209 return NOTIFY_DONE;
5210}
5211
4998struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj) 5212struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
4999{ 5213{
5000 struct i915_vma *vma; 5214 struct i915_vma *vma;
5001 5215
5216 /* This WARN has probably outlived its usefulness (callers already
5217 * WARN if they don't find the GGTT vma they expect). When removing,
5218 * remember to remove the pre-check in is_pin_display() as well */
5002 if (WARN_ON(list_empty(&obj->vma_list))) 5219 if (WARN_ON(list_empty(&obj->vma_list)))
5003 return NULL; 5220 return NULL;
5004 5221
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index d72db15afa02..3ffe308d5893 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -178,7 +178,7 @@ static int get_context_size(struct drm_device *dev)
178 178
179void i915_gem_context_free(struct kref *ctx_ref) 179void i915_gem_context_free(struct kref *ctx_ref)
180{ 180{
181 struct i915_hw_context *ctx = container_of(ctx_ref, 181 struct intel_context *ctx = container_of(ctx_ref,
182 typeof(*ctx), ref); 182 typeof(*ctx), ref);
183 struct i915_hw_ppgtt *ppgtt = NULL; 183 struct i915_hw_ppgtt *ppgtt = NULL;
184 184
@@ -199,7 +199,7 @@ void i915_gem_context_free(struct kref *ctx_ref)
199} 199}
200 200
201static struct i915_hw_ppgtt * 201static struct i915_hw_ppgtt *
202create_vm_for_ctx(struct drm_device *dev, struct i915_hw_context *ctx) 202create_vm_for_ctx(struct drm_device *dev, struct intel_context *ctx)
203{ 203{
204 struct i915_hw_ppgtt *ppgtt; 204 struct i915_hw_ppgtt *ppgtt;
205 int ret; 205 int ret;
@@ -218,12 +218,12 @@ create_vm_for_ctx(struct drm_device *dev, struct i915_hw_context *ctx)
218 return ppgtt; 218 return ppgtt;
219} 219}
220 220
221static struct i915_hw_context * 221static struct intel_context *
222__create_hw_context(struct drm_device *dev, 222__create_hw_context(struct drm_device *dev,
223 struct drm_i915_file_private *file_priv) 223 struct drm_i915_file_private *file_priv)
224{ 224{
225 struct drm_i915_private *dev_priv = dev->dev_private; 225 struct drm_i915_private *dev_priv = dev->dev_private;
226 struct i915_hw_context *ctx; 226 struct intel_context *ctx;
227 int ret; 227 int ret;
228 228
229 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 229 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
@@ -240,7 +240,15 @@ __create_hw_context(struct drm_device *dev,
240 goto err_out; 240 goto err_out;
241 } 241 }
242 242
243 if (INTEL_INFO(dev)->gen >= 7) { 243 /*
244 * Try to make the context utilize L3 as well as LLC.
245 *
246 * On VLV we don't have L3 controls in the PTEs so we
247 * shouldn't touch the cache level, especially as that
248 * would make the object snooped which might have a
249 * negative performance impact.
250 */
251 if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev)) {
244 ret = i915_gem_object_set_cache_level(ctx->obj, 252 ret = i915_gem_object_set_cache_level(ctx->obj,
245 I915_CACHE_L3_LLC); 253 I915_CACHE_L3_LLC);
246 /* Failure shouldn't ever happen this early */ 254 /* Failure shouldn't ever happen this early */
@@ -277,14 +285,14 @@ err_out:
277 * context state of the GPU for applications that don't utilize HW contexts, as 285 * context state of the GPU for applications that don't utilize HW contexts, as
278 * well as an idle case. 286 * well as an idle case.
279 */ 287 */
280static struct i915_hw_context * 288static struct intel_context *
281i915_gem_create_context(struct drm_device *dev, 289i915_gem_create_context(struct drm_device *dev,
282 struct drm_i915_file_private *file_priv, 290 struct drm_i915_file_private *file_priv,
283 bool create_vm) 291 bool create_vm)
284{ 292{
285 const bool is_global_default_ctx = file_priv == NULL; 293 const bool is_global_default_ctx = file_priv == NULL;
286 struct drm_i915_private *dev_priv = dev->dev_private; 294 struct drm_i915_private *dev_priv = dev->dev_private;
287 struct i915_hw_context *ctx; 295 struct intel_context *ctx;
288 int ret = 0; 296 int ret = 0;
289 297
290 BUG_ON(!mutex_is_locked(&dev->struct_mutex)); 298 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
@@ -356,8 +364,8 @@ void i915_gem_context_reset(struct drm_device *dev)
356 /* Prevent the hardware from restoring the last context (which hung) on 364 /* Prevent the hardware from restoring the last context (which hung) on
357 * the next switch */ 365 * the next switch */
358 for (i = 0; i < I915_NUM_RINGS; i++) { 366 for (i = 0; i < I915_NUM_RINGS; i++) {
359 struct intel_ring_buffer *ring = &dev_priv->ring[i]; 367 struct intel_engine_cs *ring = &dev_priv->ring[i];
360 struct i915_hw_context *dctx = ring->default_context; 368 struct intel_context *dctx = ring->default_context;
361 369
362 /* Do a fake switch to the default context */ 370 /* Do a fake switch to the default context */
363 if (ring->last_context == dctx) 371 if (ring->last_context == dctx)
@@ -383,7 +391,7 @@ void i915_gem_context_reset(struct drm_device *dev)
383int i915_gem_context_init(struct drm_device *dev) 391int i915_gem_context_init(struct drm_device *dev)
384{ 392{
385 struct drm_i915_private *dev_priv = dev->dev_private; 393 struct drm_i915_private *dev_priv = dev->dev_private;
386 struct i915_hw_context *ctx; 394 struct intel_context *ctx;
387 int i; 395 int i;
388 396
389 /* Init should only be called once per module load. Eventually the 397 /* Init should only be called once per module load. Eventually the
@@ -418,7 +426,7 @@ int i915_gem_context_init(struct drm_device *dev)
418void i915_gem_context_fini(struct drm_device *dev) 426void i915_gem_context_fini(struct drm_device *dev)
419{ 427{
420 struct drm_i915_private *dev_priv = dev->dev_private; 428 struct drm_i915_private *dev_priv = dev->dev_private;
421 struct i915_hw_context *dctx = dev_priv->ring[RCS].default_context; 429 struct intel_context *dctx = dev_priv->ring[RCS].default_context;
422 int i; 430 int i;
423 431
424 if (dctx->obj) { 432 if (dctx->obj) {
@@ -441,10 +449,12 @@ void i915_gem_context_fini(struct drm_device *dev)
441 i915_gem_context_unreference(dctx); 449 i915_gem_context_unreference(dctx);
442 dev_priv->ring[RCS].last_context = NULL; 450 dev_priv->ring[RCS].last_context = NULL;
443 } 451 }
452
453 i915_gem_object_ggtt_unpin(dctx->obj);
444 } 454 }
445 455
446 for (i = 0; i < I915_NUM_RINGS; i++) { 456 for (i = 0; i < I915_NUM_RINGS; i++) {
447 struct intel_ring_buffer *ring = &dev_priv->ring[i]; 457 struct intel_engine_cs *ring = &dev_priv->ring[i];
448 458
449 if (ring->last_context) 459 if (ring->last_context)
450 i915_gem_context_unreference(ring->last_context); 460 i915_gem_context_unreference(ring->last_context);
@@ -453,13 +463,12 @@ void i915_gem_context_fini(struct drm_device *dev)
453 ring->last_context = NULL; 463 ring->last_context = NULL;
454 } 464 }
455 465
456 i915_gem_object_ggtt_unpin(dctx->obj);
457 i915_gem_context_unreference(dctx); 466 i915_gem_context_unreference(dctx);
458} 467}
459 468
460int i915_gem_context_enable(struct drm_i915_private *dev_priv) 469int i915_gem_context_enable(struct drm_i915_private *dev_priv)
461{ 470{
462 struct intel_ring_buffer *ring; 471 struct intel_engine_cs *ring;
463 int ret, i; 472 int ret, i;
464 473
465 /* This is the only place the aliasing PPGTT gets enabled, which means 474 /* This is the only place the aliasing PPGTT gets enabled, which means
@@ -486,11 +495,7 @@ int i915_gem_context_enable(struct drm_i915_private *dev_priv)
486 495
487static int context_idr_cleanup(int id, void *p, void *data) 496static int context_idr_cleanup(int id, void *p, void *data)
488{ 497{
489 struct i915_hw_context *ctx = p; 498 struct intel_context *ctx = p;
490
491 /* Ignore the default context because close will handle it */
492 if (i915_gem_context_is_default(ctx))
493 return 0;
494 499
495 i915_gem_context_unreference(ctx); 500 i915_gem_context_unreference(ctx);
496 return 0; 501 return 0;
@@ -499,17 +504,17 @@ static int context_idr_cleanup(int id, void *p, void *data)
499int i915_gem_context_open(struct drm_device *dev, struct drm_file *file) 504int i915_gem_context_open(struct drm_device *dev, struct drm_file *file)
500{ 505{
501 struct drm_i915_file_private *file_priv = file->driver_priv; 506 struct drm_i915_file_private *file_priv = file->driver_priv;
507 struct intel_context *ctx;
502 508
503 idr_init(&file_priv->context_idr); 509 idr_init(&file_priv->context_idr);
504 510
505 mutex_lock(&dev->struct_mutex); 511 mutex_lock(&dev->struct_mutex);
506 file_priv->private_default_ctx = 512 ctx = i915_gem_create_context(dev, file_priv, USES_FULL_PPGTT(dev));
507 i915_gem_create_context(dev, file_priv, USES_FULL_PPGTT(dev));
508 mutex_unlock(&dev->struct_mutex); 513 mutex_unlock(&dev->struct_mutex);
509 514
510 if (IS_ERR(file_priv->private_default_ctx)) { 515 if (IS_ERR(ctx)) {
511 idr_destroy(&file_priv->context_idr); 516 idr_destroy(&file_priv->context_idr);
512 return PTR_ERR(file_priv->private_default_ctx); 517 return PTR_ERR(ctx);
513 } 518 }
514 519
515 return 0; 520 return 0;
@@ -521,16 +526,14 @@ void i915_gem_context_close(struct drm_device *dev, struct drm_file *file)
521 526
522 idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL); 527 idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
523 idr_destroy(&file_priv->context_idr); 528 idr_destroy(&file_priv->context_idr);
524
525 i915_gem_context_unreference(file_priv->private_default_ctx);
526} 529}
527 530
528struct i915_hw_context * 531struct intel_context *
529i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id) 532i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id)
530{ 533{
531 struct i915_hw_context *ctx; 534 struct intel_context *ctx;
532 535
533 ctx = (struct i915_hw_context *)idr_find(&file_priv->context_idr, id); 536 ctx = (struct intel_context *)idr_find(&file_priv->context_idr, id);
534 if (!ctx) 537 if (!ctx)
535 return ERR_PTR(-ENOENT); 538 return ERR_PTR(-ENOENT);
536 539
@@ -538,8 +541,8 @@ i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id)
538} 541}
539 542
540static inline int 543static inline int
541mi_set_context(struct intel_ring_buffer *ring, 544mi_set_context(struct intel_engine_cs *ring,
542 struct i915_hw_context *new_context, 545 struct intel_context *new_context,
543 u32 hw_flags) 546 u32 hw_flags)
544{ 547{
545 int ret; 548 int ret;
@@ -549,7 +552,7 @@ mi_set_context(struct intel_ring_buffer *ring,
549 * explicitly, so we rely on the value at ring init, stored in 552 * explicitly, so we rely on the value at ring init, stored in
550 * itlb_before_ctx_switch. 553 * itlb_before_ctx_switch.
551 */ 554 */
552 if (IS_GEN6(ring->dev) && ring->itlb_before_ctx_switch) { 555 if (IS_GEN6(ring->dev)) {
553 ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, 0); 556 ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, 0);
554 if (ret) 557 if (ret)
555 return ret; 558 return ret;
@@ -559,8 +562,8 @@ mi_set_context(struct intel_ring_buffer *ring,
559 if (ret) 562 if (ret)
560 return ret; 563 return ret;
561 564
562 /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw */ 565 /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
563 if (IS_GEN7(ring->dev)) 566 if (INTEL_INFO(ring->dev)->gen >= 7)
564 intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE); 567 intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
565 else 568 else
566 intel_ring_emit(ring, MI_NOOP); 569 intel_ring_emit(ring, MI_NOOP);
@@ -578,7 +581,7 @@ mi_set_context(struct intel_ring_buffer *ring,
578 */ 581 */
579 intel_ring_emit(ring, MI_NOOP); 582 intel_ring_emit(ring, MI_NOOP);
580 583
581 if (IS_GEN7(ring->dev)) 584 if (INTEL_INFO(ring->dev)->gen >= 7)
582 intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE); 585 intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE);
583 else 586 else
584 intel_ring_emit(ring, MI_NOOP); 587 intel_ring_emit(ring, MI_NOOP);
@@ -588,11 +591,11 @@ mi_set_context(struct intel_ring_buffer *ring,
588 return ret; 591 return ret;
589} 592}
590 593
591static int do_switch(struct intel_ring_buffer *ring, 594static int do_switch(struct intel_engine_cs *ring,
592 struct i915_hw_context *to) 595 struct intel_context *to)
593{ 596{
594 struct drm_i915_private *dev_priv = ring->dev->dev_private; 597 struct drm_i915_private *dev_priv = ring->dev->dev_private;
595 struct i915_hw_context *from = ring->last_context; 598 struct intel_context *from = ring->last_context;
596 struct i915_hw_ppgtt *ppgtt = ctx_to_ppgtt(to); 599 struct i915_hw_ppgtt *ppgtt = ctx_to_ppgtt(to);
597 u32 hw_flags = 0; 600 u32 hw_flags = 0;
598 int ret, i; 601 int ret, i;
@@ -693,13 +696,19 @@ static int do_switch(struct intel_ring_buffer *ring,
693 i915_gem_context_unreference(from); 696 i915_gem_context_unreference(from);
694 } 697 }
695 698
696 to->is_initialized = true;
697
698done: 699done:
699 i915_gem_context_reference(to); 700 i915_gem_context_reference(to);
700 ring->last_context = to; 701 ring->last_context = to;
701 to->last_ring = ring; 702 to->last_ring = ring;
702 703
704 if (ring->id == RCS && !to->is_initialized && from == NULL) {
705 ret = i915_gem_render_state_init(ring);
706 if (ret)
707 DRM_ERROR("init render state: %d\n", ret);
708 }
709
710 to->is_initialized = true;
711
703 return 0; 712 return 0;
704 713
705unpin_out: 714unpin_out:
@@ -718,8 +727,8 @@ unpin_out:
718 * it will have a refoucnt > 1. This allows us to destroy the context abstract 727 * it will have a refoucnt > 1. This allows us to destroy the context abstract
719 * object while letting the normal object tracking destroy the backing BO. 728 * object while letting the normal object tracking destroy the backing BO.
720 */ 729 */
721int i915_switch_context(struct intel_ring_buffer *ring, 730int i915_switch_context(struct intel_engine_cs *ring,
722 struct i915_hw_context *to) 731 struct intel_context *to)
723{ 732{
724 struct drm_i915_private *dev_priv = ring->dev->dev_private; 733 struct drm_i915_private *dev_priv = ring->dev->dev_private;
725 734
@@ -748,7 +757,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
748{ 757{
749 struct drm_i915_gem_context_create *args = data; 758 struct drm_i915_gem_context_create *args = data;
750 struct drm_i915_file_private *file_priv = file->driver_priv; 759 struct drm_i915_file_private *file_priv = file->driver_priv;
751 struct i915_hw_context *ctx; 760 struct intel_context *ctx;
752 int ret; 761 int ret;
753 762
754 if (!hw_context_enabled(dev)) 763 if (!hw_context_enabled(dev))
@@ -774,7 +783,7 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
774{ 783{
775 struct drm_i915_gem_context_destroy *args = data; 784 struct drm_i915_gem_context_destroy *args = data;
776 struct drm_i915_file_private *file_priv = file->driver_priv; 785 struct drm_i915_file_private *file_priv = file->driver_priv;
777 struct i915_hw_context *ctx; 786 struct intel_context *ctx;
778 int ret; 787 int ret;
779 788
780 if (args->ctx_id == DEFAULT_CONTEXT_ID) 789 if (args->ctx_id == DEFAULT_CONTEXT_ID)
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 9bb533e0d762..580aa42443ed 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -161,12 +161,8 @@ static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
161{ 161{
162 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); 162 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
163 struct drm_device *dev = obj->base.dev; 163 struct drm_device *dev = obj->base.dev;
164 int ret;
165
166 ret = i915_mutex_lock_interruptible(dev);
167 if (ret)
168 return;
169 164
165 mutex_lock(&dev->struct_mutex);
170 if (--obj->vmapping_count == 0) { 166 if (--obj->vmapping_count == 0) {
171 vunmap(obj->dma_buf_vmapping); 167 vunmap(obj->dma_buf_vmapping);
172 obj->dma_buf_vmapping = NULL; 168 obj->dma_buf_vmapping = NULL;
@@ -233,6 +229,14 @@ static const struct dma_buf_ops i915_dmabuf_ops = {
233struct dma_buf *i915_gem_prime_export(struct drm_device *dev, 229struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
234 struct drm_gem_object *gem_obj, int flags) 230 struct drm_gem_object *gem_obj, int flags)
235{ 231{
232 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
233
234 if (obj->ops->dmabuf_export) {
235 int ret = obj->ops->dmabuf_export(obj);
236 if (ret)
237 return ERR_PTR(ret);
238 }
239
236 return dma_buf_export(gem_obj, &i915_dmabuf_ops, gem_obj->size, flags); 240 return dma_buf_export(gem_obj, &i915_dmabuf_ops, gem_obj->size, flags);
237} 241}
238 242
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 20fef6c50267..3a30133f93e8 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -265,10 +265,12 @@ static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
265 265
266static int 266static int
267relocate_entry_cpu(struct drm_i915_gem_object *obj, 267relocate_entry_cpu(struct drm_i915_gem_object *obj,
268 struct drm_i915_gem_relocation_entry *reloc) 268 struct drm_i915_gem_relocation_entry *reloc,
269 uint64_t target_offset)
269{ 270{
270 struct drm_device *dev = obj->base.dev; 271 struct drm_device *dev = obj->base.dev;
271 uint32_t page_offset = offset_in_page(reloc->offset); 272 uint32_t page_offset = offset_in_page(reloc->offset);
273 uint64_t delta = reloc->delta + target_offset;
272 char *vaddr; 274 char *vaddr;
273 int ret; 275 int ret;
274 276
@@ -278,7 +280,7 @@ relocate_entry_cpu(struct drm_i915_gem_object *obj,
278 280
279 vaddr = kmap_atomic(i915_gem_object_get_page(obj, 281 vaddr = kmap_atomic(i915_gem_object_get_page(obj,
280 reloc->offset >> PAGE_SHIFT)); 282 reloc->offset >> PAGE_SHIFT));
281 *(uint32_t *)(vaddr + page_offset) = reloc->delta; 283 *(uint32_t *)(vaddr + page_offset) = lower_32_bits(delta);
282 284
283 if (INTEL_INFO(dev)->gen >= 8) { 285 if (INTEL_INFO(dev)->gen >= 8) {
284 page_offset = offset_in_page(page_offset + sizeof(uint32_t)); 286 page_offset = offset_in_page(page_offset + sizeof(uint32_t));
@@ -289,7 +291,7 @@ relocate_entry_cpu(struct drm_i915_gem_object *obj,
289 (reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT)); 291 (reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT));
290 } 292 }
291 293
292 *(uint32_t *)(vaddr + page_offset) = 0; 294 *(uint32_t *)(vaddr + page_offset) = upper_32_bits(delta);
293 } 295 }
294 296
295 kunmap_atomic(vaddr); 297 kunmap_atomic(vaddr);
@@ -299,10 +301,12 @@ relocate_entry_cpu(struct drm_i915_gem_object *obj,
299 301
300static int 302static int
301relocate_entry_gtt(struct drm_i915_gem_object *obj, 303relocate_entry_gtt(struct drm_i915_gem_object *obj,
302 struct drm_i915_gem_relocation_entry *reloc) 304 struct drm_i915_gem_relocation_entry *reloc,
305 uint64_t target_offset)
303{ 306{
304 struct drm_device *dev = obj->base.dev; 307 struct drm_device *dev = obj->base.dev;
305 struct drm_i915_private *dev_priv = dev->dev_private; 308 struct drm_i915_private *dev_priv = dev->dev_private;
309 uint64_t delta = reloc->delta + target_offset;
306 uint32_t __iomem *reloc_entry; 310 uint32_t __iomem *reloc_entry;
307 void __iomem *reloc_page; 311 void __iomem *reloc_page;
308 int ret; 312 int ret;
@@ -321,7 +325,7 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
321 reloc->offset & PAGE_MASK); 325 reloc->offset & PAGE_MASK);
322 reloc_entry = (uint32_t __iomem *) 326 reloc_entry = (uint32_t __iomem *)
323 (reloc_page + offset_in_page(reloc->offset)); 327 (reloc_page + offset_in_page(reloc->offset));
324 iowrite32(reloc->delta, reloc_entry); 328 iowrite32(lower_32_bits(delta), reloc_entry);
325 329
326 if (INTEL_INFO(dev)->gen >= 8) { 330 if (INTEL_INFO(dev)->gen >= 8) {
327 reloc_entry += 1; 331 reloc_entry += 1;
@@ -334,7 +338,7 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
334 reloc_entry = reloc_page; 338 reloc_entry = reloc_page;
335 } 339 }
336 340
337 iowrite32(0, reloc_entry); 341 iowrite32(upper_32_bits(delta), reloc_entry);
338 } 342 }
339 343
340 io_mapping_unmap_atomic(reloc_page); 344 io_mapping_unmap_atomic(reloc_page);
@@ -351,7 +355,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
351 struct drm_gem_object *target_obj; 355 struct drm_gem_object *target_obj;
352 struct drm_i915_gem_object *target_i915_obj; 356 struct drm_i915_gem_object *target_i915_obj;
353 struct i915_vma *target_vma; 357 struct i915_vma *target_vma;
354 uint32_t target_offset; 358 uint64_t target_offset;
355 int ret; 359 int ret;
356 360
357 /* we've already hold a reference to all valid objects */ 361 /* we've already hold a reference to all valid objects */
@@ -429,11 +433,10 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
429 if (obj->active && in_atomic()) 433 if (obj->active && in_atomic())
430 return -EFAULT; 434 return -EFAULT;
431 435
432 reloc->delta += target_offset;
433 if (use_cpu_reloc(obj)) 436 if (use_cpu_reloc(obj))
434 ret = relocate_entry_cpu(obj, reloc); 437 ret = relocate_entry_cpu(obj, reloc, target_offset);
435 else 438 else
436 ret = relocate_entry_gtt(obj, reloc); 439 ret = relocate_entry_gtt(obj, reloc, target_offset);
437 440
438 if (ret) 441 if (ret)
439 return ret; 442 return ret;
@@ -541,7 +544,7 @@ need_reloc_mappable(struct i915_vma *vma)
541 544
542static int 545static int
543i915_gem_execbuffer_reserve_vma(struct i915_vma *vma, 546i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
544 struct intel_ring_buffer *ring, 547 struct intel_engine_cs *ring,
545 bool *need_reloc) 548 bool *need_reloc)
546{ 549{
547 struct drm_i915_gem_object *obj = vma->obj; 550 struct drm_i915_gem_object *obj = vma->obj;
@@ -628,7 +631,7 @@ eb_vma_misplaced(struct i915_vma *vma, bool has_fenced_gpu_access)
628} 631}
629 632
630static int 633static int
631i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, 634i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
632 struct list_head *vmas, 635 struct list_head *vmas,
633 bool *need_relocs) 636 bool *need_relocs)
634{ 637{
@@ -642,6 +645,8 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
642 if (list_empty(vmas)) 645 if (list_empty(vmas))
643 return 0; 646 return 0;
644 647
648 i915_gem_retire_requests_ring(ring);
649
645 vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm; 650 vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
646 651
647 INIT_LIST_HEAD(&ordered_vmas); 652 INIT_LIST_HEAD(&ordered_vmas);
@@ -727,7 +732,7 @@ static int
727i915_gem_execbuffer_relocate_slow(struct drm_device *dev, 732i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
728 struct drm_i915_gem_execbuffer2 *args, 733 struct drm_i915_gem_execbuffer2 *args,
729 struct drm_file *file, 734 struct drm_file *file,
730 struct intel_ring_buffer *ring, 735 struct intel_engine_cs *ring,
731 struct eb_vmas *eb, 736 struct eb_vmas *eb,
732 struct drm_i915_gem_exec_object2 *exec) 737 struct drm_i915_gem_exec_object2 *exec)
733{ 738{
@@ -843,7 +848,7 @@ err:
843} 848}
844 849
845static int 850static int
846i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring, 851i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring,
847 struct list_head *vmas) 852 struct list_head *vmas)
848{ 853{
849 struct i915_vma *vma; 854 struct i915_vma *vma;
@@ -926,11 +931,11 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
926 return 0; 931 return 0;
927} 932}
928 933
929static struct i915_hw_context * 934static struct intel_context *
930i915_gem_validate_context(struct drm_device *dev, struct drm_file *file, 935i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
931 struct intel_ring_buffer *ring, const u32 ctx_id) 936 struct intel_engine_cs *ring, const u32 ctx_id)
932{ 937{
933 struct i915_hw_context *ctx = NULL; 938 struct intel_context *ctx = NULL;
934 struct i915_ctx_hang_stats *hs; 939 struct i915_ctx_hang_stats *hs;
935 940
936 if (ring->id != RCS && ctx_id != DEFAULT_CONTEXT_ID) 941 if (ring->id != RCS && ctx_id != DEFAULT_CONTEXT_ID)
@@ -951,7 +956,7 @@ i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
951 956
952static void 957static void
953i915_gem_execbuffer_move_to_active(struct list_head *vmas, 958i915_gem_execbuffer_move_to_active(struct list_head *vmas,
954 struct intel_ring_buffer *ring) 959 struct intel_engine_cs *ring)
955{ 960{
956 struct i915_vma *vma; 961 struct i915_vma *vma;
957 962
@@ -974,6 +979,9 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
974 if (i915_gem_obj_ggtt_bound(obj) && 979 if (i915_gem_obj_ggtt_bound(obj) &&
975 i915_gem_obj_to_ggtt(obj)->pin_count) 980 i915_gem_obj_to_ggtt(obj)->pin_count)
976 intel_mark_fb_busy(obj, ring); 981 intel_mark_fb_busy(obj, ring);
982
983 /* update for the implicit flush after a batch */
984 obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
977 } 985 }
978 986
979 trace_i915_gem_object_change_domain(obj, old_read, old_write); 987 trace_i915_gem_object_change_domain(obj, old_read, old_write);
@@ -983,7 +991,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
983static void 991static void
984i915_gem_execbuffer_retire_commands(struct drm_device *dev, 992i915_gem_execbuffer_retire_commands(struct drm_device *dev,
985 struct drm_file *file, 993 struct drm_file *file,
986 struct intel_ring_buffer *ring, 994 struct intel_engine_cs *ring,
987 struct drm_i915_gem_object *obj) 995 struct drm_i915_gem_object *obj)
988{ 996{
989 /* Unconditionally force add_request to emit a full flush. */ 997 /* Unconditionally force add_request to emit a full flush. */
@@ -995,13 +1003,15 @@ i915_gem_execbuffer_retire_commands(struct drm_device *dev,
995 1003
996static int 1004static int
997i915_reset_gen7_sol_offsets(struct drm_device *dev, 1005i915_reset_gen7_sol_offsets(struct drm_device *dev,
998 struct intel_ring_buffer *ring) 1006 struct intel_engine_cs *ring)
999{ 1007{
1000 struct drm_i915_private *dev_priv = dev->dev_private; 1008 struct drm_i915_private *dev_priv = dev->dev_private;
1001 int ret, i; 1009 int ret, i;
1002 1010
1003 if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS]) 1011 if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS]) {
1004 return 0; 1012 DRM_DEBUG("sol reset is gen7/rcs only\n");
1013 return -EINVAL;
1014 }
1005 1015
1006 ret = intel_ring_begin(ring, 4 * 3); 1016 ret = intel_ring_begin(ring, 4 * 3);
1007 if (ret) 1017 if (ret)
@@ -1018,6 +1028,37 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev,
1018 return 0; 1028 return 0;
1019} 1029}
1020 1030
1031/**
1032 * Find one BSD ring to dispatch the corresponding BSD command.
1033 * The Ring ID is returned.
1034 */
1035static int gen8_dispatch_bsd_ring(struct drm_device *dev,
1036 struct drm_file *file)
1037{
1038 struct drm_i915_private *dev_priv = dev->dev_private;
1039 struct drm_i915_file_private *file_priv = file->driver_priv;
1040
1041 /* Check whether the file_priv is using one ring */
1042 if (file_priv->bsd_ring)
1043 return file_priv->bsd_ring->id;
1044 else {
1045 /* If no, use the ping-pong mechanism to select one ring */
1046 int ring_id;
1047
1048 mutex_lock(&dev->struct_mutex);
1049 if (dev_priv->mm.bsd_ring_dispatch_index == 0) {
1050 ring_id = VCS;
1051 dev_priv->mm.bsd_ring_dispatch_index = 1;
1052 } else {
1053 ring_id = VCS2;
1054 dev_priv->mm.bsd_ring_dispatch_index = 0;
1055 }
1056 file_priv->bsd_ring = &dev_priv->ring[ring_id];
1057 mutex_unlock(&dev->struct_mutex);
1058 return ring_id;
1059 }
1060}
1061
1021static struct drm_i915_gem_object * 1062static struct drm_i915_gem_object *
1022eb_get_batch(struct eb_vmas *eb) 1063eb_get_batch(struct eb_vmas *eb)
1023{ 1064{
@@ -1047,11 +1088,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1047 struct eb_vmas *eb; 1088 struct eb_vmas *eb;
1048 struct drm_i915_gem_object *batch_obj; 1089 struct drm_i915_gem_object *batch_obj;
1049 struct drm_clip_rect *cliprects = NULL; 1090 struct drm_clip_rect *cliprects = NULL;
1050 struct intel_ring_buffer *ring; 1091 struct intel_engine_cs *ring;
1051 struct i915_hw_context *ctx; 1092 struct intel_context *ctx;
1052 struct i915_address_space *vm; 1093 struct i915_address_space *vm;
1053 const u32 ctx_id = i915_execbuffer2_get_context_id(*args); 1094 const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
1054 u32 exec_start = args->batch_start_offset, exec_len; 1095 u64 exec_start = args->batch_start_offset, exec_len;
1055 u32 mask, flags; 1096 u32 mask, flags;
1056 int ret, mode, i; 1097 int ret, mode, i;
1057 bool need_relocs; 1098 bool need_relocs;
@@ -1073,7 +1114,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1073 if (args->flags & I915_EXEC_IS_PINNED) 1114 if (args->flags & I915_EXEC_IS_PINNED)
1074 flags |= I915_DISPATCH_PINNED; 1115 flags |= I915_DISPATCH_PINNED;
1075 1116
1076 if ((args->flags & I915_EXEC_RING_MASK) > I915_NUM_RINGS) { 1117 if ((args->flags & I915_EXEC_RING_MASK) > LAST_USER_RING) {
1077 DRM_DEBUG("execbuf with unknown ring: %d\n", 1118 DRM_DEBUG("execbuf with unknown ring: %d\n",
1078 (int)(args->flags & I915_EXEC_RING_MASK)); 1119 (int)(args->flags & I915_EXEC_RING_MASK));
1079 return -EINVAL; 1120 return -EINVAL;
@@ -1081,7 +1122,14 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1081 1122
1082 if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_DEFAULT) 1123 if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_DEFAULT)
1083 ring = &dev_priv->ring[RCS]; 1124 ring = &dev_priv->ring[RCS];
1084 else 1125 else if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_BSD) {
1126 if (HAS_BSD2(dev)) {
1127 int ring_id;
1128 ring_id = gen8_dispatch_bsd_ring(dev, file);
1129 ring = &dev_priv->ring[ring_id];
1130 } else
1131 ring = &dev_priv->ring[VCS];
1132 } else
1085 ring = &dev_priv->ring[(args->flags & I915_EXEC_RING_MASK) - 1]; 1133 ring = &dev_priv->ring[(args->flags & I915_EXEC_RING_MASK) - 1];
1086 1134
1087 if (!intel_ring_initialized(ring)) { 1135 if (!intel_ring_initialized(ring)) {
@@ -1096,14 +1144,22 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1096 case I915_EXEC_CONSTANTS_REL_GENERAL: 1144 case I915_EXEC_CONSTANTS_REL_GENERAL:
1097 case I915_EXEC_CONSTANTS_ABSOLUTE: 1145 case I915_EXEC_CONSTANTS_ABSOLUTE:
1098 case I915_EXEC_CONSTANTS_REL_SURFACE: 1146 case I915_EXEC_CONSTANTS_REL_SURFACE:
1099 if (ring == &dev_priv->ring[RCS] && 1147 if (mode != 0 && ring != &dev_priv->ring[RCS]) {
1100 mode != dev_priv->relative_constants_mode) { 1148 DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
1101 if (INTEL_INFO(dev)->gen < 4) 1149 return -EINVAL;
1150 }
1151
1152 if (mode != dev_priv->relative_constants_mode) {
1153 if (INTEL_INFO(dev)->gen < 4) {
1154 DRM_DEBUG("no rel constants on pre-gen4\n");
1102 return -EINVAL; 1155 return -EINVAL;
1156 }
1103 1157
1104 if (INTEL_INFO(dev)->gen > 5 && 1158 if (INTEL_INFO(dev)->gen > 5 &&
1105 mode == I915_EXEC_CONSTANTS_REL_SURFACE) 1159 mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
1160 DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
1106 return -EINVAL; 1161 return -EINVAL;
1162 }
1107 1163
1108 /* The HW changed the meaning on this bit on gen6 */ 1164 /* The HW changed the meaning on this bit on gen6 */
1109 if (INTEL_INFO(dev)->gen >= 6) 1165 if (INTEL_INFO(dev)->gen >= 6)
@@ -1151,6 +1207,16 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1151 ret = -EFAULT; 1207 ret = -EFAULT;
1152 goto pre_mutex_err; 1208 goto pre_mutex_err;
1153 } 1209 }
1210 } else {
1211 if (args->DR4 == 0xffffffff) {
1212 DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
1213 args->DR4 = 0;
1214 }
1215
1216 if (args->DR1 || args->DR4 || args->cliprects_ptr) {
1217 DRM_DEBUG("0 cliprects but dirt in cliprects fields\n");
1218 return -EINVAL;
1219 }
1154 } 1220 }
1155 1221
1156 intel_runtime_pm_get(dev_priv); 1222 intel_runtime_pm_get(dev_priv);
@@ -1170,7 +1236,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1170 mutex_unlock(&dev->struct_mutex); 1236 mutex_unlock(&dev->struct_mutex);
1171 ret = PTR_ERR(ctx); 1237 ret = PTR_ERR(ctx);
1172 goto pre_mutex_err; 1238 goto pre_mutex_err;
1173 } 1239 }
1174 1240
1175 i915_gem_context_reference(ctx); 1241 i915_gem_context_reference(ctx);
1176 1242
@@ -1180,6 +1246,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1180 1246
1181 eb = eb_create(args); 1247 eb = eb_create(args);
1182 if (eb == NULL) { 1248 if (eb == NULL) {
1249 i915_gem_context_unreference(ctx);
1183 mutex_unlock(&dev->struct_mutex); 1250 mutex_unlock(&dev->struct_mutex);
1184 ret = -ENOMEM; 1251 ret = -ENOMEM;
1185 goto pre_mutex_err; 1252 goto pre_mutex_err;
@@ -1430,6 +1497,11 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
1430 return -EINVAL; 1497 return -EINVAL;
1431 } 1498 }
1432 1499
1500 if (args->rsvd2 != 0) {
1501 DRM_DEBUG("dirty rvsd2 field\n");
1502 return -EINVAL;
1503 }
1504
1433 exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count, 1505 exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count,
1434 GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY); 1506 GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
1435 if (exec2_list == NULL) 1507 if (exec2_list == NULL)
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 5deb22864c52..eec820aec022 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -30,7 +30,8 @@
30#include "i915_trace.h" 30#include "i915_trace.h"
31#include "intel_drv.h" 31#include "intel_drv.h"
32 32
33static void gen8_setup_private_ppat(struct drm_i915_private *dev_priv); 33static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv);
34static void chv_setup_private_ppat(struct drm_i915_private *dev_priv);
34 35
35bool intel_enable_ppgtt(struct drm_device *dev, bool full) 36bool intel_enable_ppgtt(struct drm_device *dev, bool full)
36{ 37{
@@ -65,59 +66,6 @@ static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
65 return HAS_ALIASING_PPGTT(dev) ? 1 : 0; 66 return HAS_ALIASING_PPGTT(dev) ? 1 : 0;
66} 67}
67 68
68#define GEN6_PPGTT_PD_ENTRIES 512
69#define I915_PPGTT_PT_ENTRIES (PAGE_SIZE / sizeof(gen6_gtt_pte_t))
70typedef uint64_t gen8_gtt_pte_t;
71typedef gen8_gtt_pte_t gen8_ppgtt_pde_t;
72
73/* PPGTT stuff */
74#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
75#define HSW_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0x7f0))
76
77#define GEN6_PDE_VALID (1 << 0)
78/* gen6+ has bit 11-4 for physical addr bit 39-32 */
79#define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
80
81#define GEN6_PTE_VALID (1 << 0)
82#define GEN6_PTE_UNCACHED (1 << 1)
83#define HSW_PTE_UNCACHED (0)
84#define GEN6_PTE_CACHE_LLC (2 << 1)
85#define GEN7_PTE_CACHE_L3_LLC (3 << 1)
86#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
87#define HSW_PTE_ADDR_ENCODE(addr) HSW_GTT_ADDR_ENCODE(addr)
88
89/* Cacheability Control is a 4-bit value. The low three bits are stored in *
90 * bits 3:1 of the PTE, while the fourth bit is stored in bit 11 of the PTE.
91 */
92#define HSW_CACHEABILITY_CONTROL(bits) ((((bits) & 0x7) << 1) | \
93 (((bits) & 0x8) << (11 - 3)))
94#define HSW_WB_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x2)
95#define HSW_WB_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x3)
96#define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb)
97#define HSW_WB_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x8)
98#define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6)
99#define HSW_WT_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x7)
100
101#define GEN8_PTES_PER_PAGE (PAGE_SIZE / sizeof(gen8_gtt_pte_t))
102#define GEN8_PDES_PER_PAGE (PAGE_SIZE / sizeof(gen8_ppgtt_pde_t))
103
104/* GEN8 legacy style addressis defined as a 3 level page table:
105 * 31:30 | 29:21 | 20:12 | 11:0
106 * PDPE | PDE | PTE | offset
107 * The difference as compared to normal x86 3 level page table is the PDPEs are
108 * programmed via register.
109 */
110#define GEN8_PDPE_SHIFT 30
111#define GEN8_PDPE_MASK 0x3
112#define GEN8_PDE_SHIFT 21
113#define GEN8_PDE_MASK 0x1ff
114#define GEN8_PTE_SHIFT 12
115#define GEN8_PTE_MASK 0x1ff
116
117#define PPAT_UNCACHED_INDEX (_PAGE_PWT | _PAGE_PCD)
118#define PPAT_CACHED_PDE_INDEX 0 /* WB LLC */
119#define PPAT_CACHED_INDEX _PAGE_PAT /* WB LLCeLLC */
120#define PPAT_DISPLAY_ELLC_INDEX _PAGE_PCD /* WT eLLC */
121 69
122static void ppgtt_bind_vma(struct i915_vma *vma, 70static void ppgtt_bind_vma(struct i915_vma *vma,
123 enum i915_cache_level cache_level, 71 enum i915_cache_level cache_level,
@@ -131,10 +79,19 @@ static inline gen8_gtt_pte_t gen8_pte_encode(dma_addr_t addr,
131{ 79{
132 gen8_gtt_pte_t pte = valid ? _PAGE_PRESENT | _PAGE_RW : 0; 80 gen8_gtt_pte_t pte = valid ? _PAGE_PRESENT | _PAGE_RW : 0;
133 pte |= addr; 81 pte |= addr;
134 if (level != I915_CACHE_NONE) 82
135 pte |= PPAT_CACHED_INDEX; 83 switch (level) {
136 else 84 case I915_CACHE_NONE:
137 pte |= PPAT_UNCACHED_INDEX; 85 pte |= PPAT_UNCACHED_INDEX;
86 break;
87 case I915_CACHE_WT:
88 pte |= PPAT_DISPLAY_ELLC_INDEX;
89 break;
90 default:
91 pte |= PPAT_CACHED_INDEX;
92 break;
93 }
94
138 return pte; 95 return pte;
139} 96}
140 97
@@ -197,9 +154,6 @@ static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr,
197 return pte; 154 return pte;
198} 155}
199 156
200#define BYT_PTE_WRITEABLE (1 << 1)
201#define BYT_PTE_SNOOPED_BY_CPU_CACHES (1 << 2)
202
203static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr, 157static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr,
204 enum i915_cache_level level, 158 enum i915_cache_level level,
205 bool valid) 159 bool valid)
@@ -253,7 +207,7 @@ static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr,
253} 207}
254 208
255/* Broadwell Page Directory Pointer Descriptors */ 209/* Broadwell Page Directory Pointer Descriptors */
256static int gen8_write_pdp(struct intel_ring_buffer *ring, unsigned entry, 210static int gen8_write_pdp(struct intel_engine_cs *ring, unsigned entry,
257 uint64_t val, bool synchronous) 211 uint64_t val, bool synchronous)
258{ 212{
259 struct drm_i915_private *dev_priv = ring->dev->dev_private; 213 struct drm_i915_private *dev_priv = ring->dev->dev_private;
@@ -283,7 +237,7 @@ static int gen8_write_pdp(struct intel_ring_buffer *ring, unsigned entry,
283} 237}
284 238
285static int gen8_mm_switch(struct i915_hw_ppgtt *ppgtt, 239static int gen8_mm_switch(struct i915_hw_ppgtt *ppgtt,
286 struct intel_ring_buffer *ring, 240 struct intel_engine_cs *ring,
287 bool synchronous) 241 bool synchronous)
288{ 242{
289 int i, ret; 243 int i, ret;
@@ -332,6 +286,8 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
332 num_entries--; 286 num_entries--;
333 } 287 }
334 288
289 if (!HAS_LLC(ppgtt->base.dev))
290 drm_clflush_virt_range(pt_vaddr, PAGE_SIZE);
335 kunmap_atomic(pt_vaddr); 291 kunmap_atomic(pt_vaddr);
336 292
337 pte = 0; 293 pte = 0;
@@ -368,6 +324,8 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
368 gen8_pte_encode(sg_page_iter_dma_address(&sg_iter), 324 gen8_pte_encode(sg_page_iter_dma_address(&sg_iter),
369 cache_level, true); 325 cache_level, true);
370 if (++pte == GEN8_PTES_PER_PAGE) { 326 if (++pte == GEN8_PTES_PER_PAGE) {
327 if (!HAS_LLC(ppgtt->base.dev))
328 drm_clflush_virt_range(pt_vaddr, PAGE_SIZE);
371 kunmap_atomic(pt_vaddr); 329 kunmap_atomic(pt_vaddr);
372 pt_vaddr = NULL; 330 pt_vaddr = NULL;
373 if (++pde == GEN8_PDES_PER_PAGE) { 331 if (++pde == GEN8_PDES_PER_PAGE) {
@@ -377,8 +335,11 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
377 pte = 0; 335 pte = 0;
378 } 336 }
379 } 337 }
380 if (pt_vaddr) 338 if (pt_vaddr) {
339 if (!HAS_LLC(ppgtt->base.dev))
340 drm_clflush_virt_range(pt_vaddr, PAGE_SIZE);
381 kunmap_atomic(pt_vaddr); 341 kunmap_atomic(pt_vaddr);
342 }
382} 343}
383 344
384static void gen8_free_page_tables(struct page **pt_pages) 345static void gen8_free_page_tables(struct page **pt_pages)
@@ -641,6 +602,8 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
641 pd_vaddr[j] = gen8_pde_encode(ppgtt->base.dev, addr, 602 pd_vaddr[j] = gen8_pde_encode(ppgtt->base.dev, addr,
642 I915_CACHE_LLC); 603 I915_CACHE_LLC);
643 } 604 }
605 if (!HAS_LLC(ppgtt->base.dev))
606 drm_clflush_virt_range(pd_vaddr, PAGE_SIZE);
644 kunmap_atomic(pd_vaddr); 607 kunmap_atomic(pd_vaddr);
645 } 608 }
646 609
@@ -753,7 +716,7 @@ static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt)
753} 716}
754 717
755static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt, 718static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
756 struct intel_ring_buffer *ring, 719 struct intel_engine_cs *ring,
757 bool synchronous) 720 bool synchronous)
758{ 721{
759 struct drm_device *dev = ppgtt->base.dev; 722 struct drm_device *dev = ppgtt->base.dev;
@@ -797,7 +760,7 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
797} 760}
798 761
799static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt, 762static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
800 struct intel_ring_buffer *ring, 763 struct intel_engine_cs *ring,
801 bool synchronous) 764 bool synchronous)
802{ 765{
803 struct drm_device *dev = ppgtt->base.dev; 766 struct drm_device *dev = ppgtt->base.dev;
@@ -848,7 +811,7 @@ static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
848} 811}
849 812
850static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt, 813static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
851 struct intel_ring_buffer *ring, 814 struct intel_engine_cs *ring,
852 bool synchronous) 815 bool synchronous)
853{ 816{
854 struct drm_device *dev = ppgtt->base.dev; 817 struct drm_device *dev = ppgtt->base.dev;
@@ -869,7 +832,7 @@ static int gen8_ppgtt_enable(struct i915_hw_ppgtt *ppgtt)
869{ 832{
870 struct drm_device *dev = ppgtt->base.dev; 833 struct drm_device *dev = ppgtt->base.dev;
871 struct drm_i915_private *dev_priv = dev->dev_private; 834 struct drm_i915_private *dev_priv = dev->dev_private;
872 struct intel_ring_buffer *ring; 835 struct intel_engine_cs *ring;
873 int j, ret; 836 int j, ret;
874 837
875 for_each_ring(ring, dev_priv, j) { 838 for_each_ring(ring, dev_priv, j) {
@@ -899,7 +862,7 @@ static int gen7_ppgtt_enable(struct i915_hw_ppgtt *ppgtt)
899{ 862{
900 struct drm_device *dev = ppgtt->base.dev; 863 struct drm_device *dev = ppgtt->base.dev;
901 struct drm_i915_private *dev_priv = dev->dev_private; 864 struct drm_i915_private *dev_priv = dev->dev_private;
902 struct intel_ring_buffer *ring; 865 struct intel_engine_cs *ring;
903 uint32_t ecochk, ecobits; 866 uint32_t ecochk, ecobits;
904 int i; 867 int i;
905 868
@@ -938,7 +901,7 @@ static int gen6_ppgtt_enable(struct i915_hw_ppgtt *ppgtt)
938{ 901{
939 struct drm_device *dev = ppgtt->base.dev; 902 struct drm_device *dev = ppgtt->base.dev;
940 struct drm_i915_private *dev_priv = dev->dev_private; 903 struct drm_i915_private *dev_priv = dev->dev_private;
941 struct intel_ring_buffer *ring; 904 struct intel_engine_cs *ring;
942 uint32_t ecochk, gab_ctl, ecobits; 905 uint32_t ecochk, gab_ctl, ecobits;
943 int i; 906 int i;
944 907
@@ -1067,8 +1030,6 @@ static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
1067 1030
1068static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt) 1031static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
1069{ 1032{
1070#define GEN6_PD_ALIGN (PAGE_SIZE * 16)
1071#define GEN6_PD_SIZE (GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE)
1072 struct drm_device *dev = ppgtt->base.dev; 1033 struct drm_device *dev = ppgtt->base.dev;
1073 struct drm_i915_private *dev_priv = dev->dev_private; 1034 struct drm_i915_private *dev_priv = dev->dev_private;
1074 bool retried = false; 1035 bool retried = false;
@@ -1084,8 +1045,7 @@ alloc:
1084 &ppgtt->node, GEN6_PD_SIZE, 1045 &ppgtt->node, GEN6_PD_SIZE,
1085 GEN6_PD_ALIGN, 0, 1046 GEN6_PD_ALIGN, 0,
1086 0, dev_priv->gtt.base.total, 1047 0, dev_priv->gtt.base.total,
1087 DRM_MM_SEARCH_DEFAULT, 1048 DRM_MM_TOPDOWN);
1088 DRM_MM_CREATE_DEFAULT);
1089 if (ret == -ENOSPC && !retried) { 1049 if (ret == -ENOSPC && !retried) {
1090 ret = i915_gem_evict_something(dev, &dev_priv->gtt.base, 1050 ret = i915_gem_evict_something(dev, &dev_priv->gtt.base,
1091 GEN6_PD_SIZE, GEN6_PD_ALIGN, 1051 GEN6_PD_SIZE, GEN6_PD_ALIGN,
@@ -1311,7 +1271,7 @@ static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
1311void i915_check_and_clear_faults(struct drm_device *dev) 1271void i915_check_and_clear_faults(struct drm_device *dev)
1312{ 1272{
1313 struct drm_i915_private *dev_priv = dev->dev_private; 1273 struct drm_i915_private *dev_priv = dev->dev_private;
1314 struct intel_ring_buffer *ring; 1274 struct intel_engine_cs *ring;
1315 int i; 1275 int i;
1316 1276
1317 if (INTEL_INFO(dev)->gen < 6) 1277 if (INTEL_INFO(dev)->gen < 6)
@@ -1386,7 +1346,11 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
1386 1346
1387 1347
1388 if (INTEL_INFO(dev)->gen >= 8) { 1348 if (INTEL_INFO(dev)->gen >= 8) {
1389 gen8_setup_private_ppat(dev_priv); 1349 if (IS_CHERRYVIEW(dev))
1350 chv_setup_private_ppat(dev_priv);
1351 else
1352 bdw_setup_private_ppat(dev_priv);
1353
1390 return; 1354 return;
1391 } 1355 }
1392 1356
@@ -1438,7 +1402,7 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
1438 (gen8_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry; 1402 (gen8_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
1439 int i = 0; 1403 int i = 0;
1440 struct sg_page_iter sg_iter; 1404 struct sg_page_iter sg_iter;
1441 dma_addr_t addr; 1405 dma_addr_t addr = 0;
1442 1406
1443 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) { 1407 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
1444 addr = sg_dma_address(sg_iter.sg) + 1408 addr = sg_dma_address(sg_iter.sg) +
@@ -1811,9 +1775,27 @@ static inline unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
1811 bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK; 1775 bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
1812 if (bdw_gmch_ctl) 1776 if (bdw_gmch_ctl)
1813 bdw_gmch_ctl = 1 << bdw_gmch_ctl; 1777 bdw_gmch_ctl = 1 << bdw_gmch_ctl;
1778
1779#ifdef CONFIG_X86_32
1780 /* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * PAGE_SIZE */
1781 if (bdw_gmch_ctl > 4)
1782 bdw_gmch_ctl = 4;
1783#endif
1784
1814 return bdw_gmch_ctl << 20; 1785 return bdw_gmch_ctl << 20;
1815} 1786}
1816 1787
1788static inline unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
1789{
1790 gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT;
1791 gmch_ctrl &= SNB_GMCH_GGMS_MASK;
1792
1793 if (gmch_ctrl)
1794 return 1 << (20 + gmch_ctrl);
1795
1796 return 0;
1797}
1798
1817static inline size_t gen6_get_stolen_size(u16 snb_gmch_ctl) 1799static inline size_t gen6_get_stolen_size(u16 snb_gmch_ctl)
1818{ 1800{
1819 snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT; 1801 snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT;
@@ -1828,6 +1810,24 @@ static inline size_t gen8_get_stolen_size(u16 bdw_gmch_ctl)
1828 return bdw_gmch_ctl << 25; /* 32 MB units */ 1810 return bdw_gmch_ctl << 25; /* 32 MB units */
1829} 1811}
1830 1812
1813static size_t chv_get_stolen_size(u16 gmch_ctrl)
1814{
1815 gmch_ctrl >>= SNB_GMCH_GMS_SHIFT;
1816 gmch_ctrl &= SNB_GMCH_GMS_MASK;
1817
1818 /*
1819 * 0x0 to 0x10: 32MB increments starting at 0MB
1820 * 0x11 to 0x16: 4MB increments starting at 8MB
1821 * 0x17 to 0x1d: 4MB increments start at 36MB
1822 */
1823 if (gmch_ctrl < 0x11)
1824 return gmch_ctrl << 25;
1825 else if (gmch_ctrl < 0x17)
1826 return (gmch_ctrl - 0x11 + 2) << 22;
1827 else
1828 return (gmch_ctrl - 0x17 + 9) << 22;
1829}
1830
1831static int ggtt_probe_common(struct drm_device *dev, 1831static int ggtt_probe_common(struct drm_device *dev,
1832 size_t gtt_size) 1832 size_t gtt_size)
1833{ 1833{
@@ -1858,19 +1858,8 @@ static int ggtt_probe_common(struct drm_device *dev,
1858/* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability 1858/* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
1859 * bits. When using advanced contexts each context stores its own PAT, but 1859 * bits. When using advanced contexts each context stores its own PAT, but
1860 * writing this data shouldn't be harmful even in those cases. */ 1860 * writing this data shouldn't be harmful even in those cases. */
1861static void gen8_setup_private_ppat(struct drm_i915_private *dev_priv) 1861static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv)
1862{ 1862{
1863#define GEN8_PPAT_UC (0<<0)
1864#define GEN8_PPAT_WC (1<<0)
1865#define GEN8_PPAT_WT (2<<0)
1866#define GEN8_PPAT_WB (3<<0)
1867#define GEN8_PPAT_ELLC_OVERRIDE (0<<2)
1868/* FIXME(BDW): Bspec is completely confused about cache control bits. */
1869#define GEN8_PPAT_LLC (1<<2)
1870#define GEN8_PPAT_LLCELLC (2<<2)
1871#define GEN8_PPAT_LLCeLLC (3<<2)
1872#define GEN8_PPAT_AGE(x) (x<<4)
1873#define GEN8_PPAT(i, x) ((uint64_t) (x) << ((i) * 8))
1874 uint64_t pat; 1863 uint64_t pat;
1875 1864
1876 pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC) | /* for normal objects, no eLLC */ 1865 pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC) | /* for normal objects, no eLLC */
@@ -1888,6 +1877,33 @@ static void gen8_setup_private_ppat(struct drm_i915_private *dev_priv)
1888 I915_WRITE(GEN8_PRIVATE_PAT + 4, pat >> 32); 1877 I915_WRITE(GEN8_PRIVATE_PAT + 4, pat >> 32);
1889} 1878}
1890 1879
1880static void chv_setup_private_ppat(struct drm_i915_private *dev_priv)
1881{
1882 uint64_t pat;
1883
1884 /*
1885 * Map WB on BDW to snooped on CHV.
1886 *
1887 * Only the snoop bit has meaning for CHV, the rest is
1888 * ignored.
1889 *
1890 * Note that the harware enforces snooping for all page
1891 * table accesses. The snoop bit is actually ignored for
1892 * PDEs.
1893 */
1894 pat = GEN8_PPAT(0, CHV_PPAT_SNOOP) |
1895 GEN8_PPAT(1, 0) |
1896 GEN8_PPAT(2, 0) |
1897 GEN8_PPAT(3, 0) |
1898 GEN8_PPAT(4, CHV_PPAT_SNOOP) |
1899 GEN8_PPAT(5, CHV_PPAT_SNOOP) |
1900 GEN8_PPAT(6, CHV_PPAT_SNOOP) |
1901 GEN8_PPAT(7, CHV_PPAT_SNOOP);
1902
1903 I915_WRITE(GEN8_PRIVATE_PAT, pat);
1904 I915_WRITE(GEN8_PRIVATE_PAT + 4, pat >> 32);
1905}
1906
1891static int gen8_gmch_probe(struct drm_device *dev, 1907static int gen8_gmch_probe(struct drm_device *dev,
1892 size_t *gtt_total, 1908 size_t *gtt_total,
1893 size_t *stolen, 1909 size_t *stolen,
@@ -1908,12 +1924,20 @@ static int gen8_gmch_probe(struct drm_device *dev,
1908 1924
1909 pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl); 1925 pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
1910 1926
1911 *stolen = gen8_get_stolen_size(snb_gmch_ctl); 1927 if (IS_CHERRYVIEW(dev)) {
1928 *stolen = chv_get_stolen_size(snb_gmch_ctl);
1929 gtt_size = chv_get_total_gtt_size(snb_gmch_ctl);
1930 } else {
1931 *stolen = gen8_get_stolen_size(snb_gmch_ctl);
1932 gtt_size = gen8_get_total_gtt_size(snb_gmch_ctl);
1933 }
1912 1934
1913 gtt_size = gen8_get_total_gtt_size(snb_gmch_ctl);
1914 *gtt_total = (gtt_size / sizeof(gen8_gtt_pte_t)) << PAGE_SHIFT; 1935 *gtt_total = (gtt_size / sizeof(gen8_gtt_pte_t)) << PAGE_SHIFT;
1915 1936
1916 gen8_setup_private_ppat(dev_priv); 1937 if (IS_CHERRYVIEW(dev))
1938 chv_setup_private_ppat(dev_priv);
1939 else
1940 bdw_setup_private_ppat(dev_priv);
1917 1941
1918 ret = ggtt_probe_common(dev, gtt_size); 1942 ret = ggtt_probe_common(dev, gtt_size);
1919 1943
@@ -2043,6 +2067,10 @@ int i915_gem_gtt_init(struct drm_device *dev)
2043 gtt->base.total >> 20); 2067 gtt->base.total >> 20);
2044 DRM_DEBUG_DRIVER("GMADR size = %ldM\n", gtt->mappable_end >> 20); 2068 DRM_DEBUG_DRIVER("GMADR size = %ldM\n", gtt->mappable_end >> 20);
2045 DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", gtt->stolen_size >> 20); 2069 DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", gtt->stolen_size >> 20);
2070#ifdef CONFIG_INTEL_IOMMU
2071 if (intel_iommu_gfx_mapped)
2072 DRM_INFO("VT-d active for gfx access\n");
2073#endif
2046 /* 2074 /*
2047 * i915.enable_ppgtt is read-only, so do an early pass to validate the 2075 * i915.enable_ppgtt is read-only, so do an early pass to validate the
2048 * user's requested state against the hardware/driver capabilities. We 2076 * user's requested state against the hardware/driver capabilities. We
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
new file mode 100644
index 000000000000..1b96a06be3cb
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -0,0 +1,284 @@
1/*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Please try to maintain the following order within this file unless it makes
24 * sense to do otherwise. From top to bottom:
25 * 1. typedefs
26 * 2. #defines, and macros
27 * 3. structure definitions
28 * 4. function prototypes
29 *
30 * Within each section, please try to order by generation in ascending order,
31 * from top to bottom (ie. gen6 on the top, gen8 on the bottom).
32 */
33
34#ifndef __I915_GEM_GTT_H__
35#define __I915_GEM_GTT_H__
36
37typedef uint32_t gen6_gtt_pte_t;
38typedef uint64_t gen8_gtt_pte_t;
39typedef gen8_gtt_pte_t gen8_ppgtt_pde_t;
40
41#define gtt_total_entries(gtt) ((gtt).base.total >> PAGE_SHIFT)
42
43#define I915_PPGTT_PT_ENTRIES (PAGE_SIZE / sizeof(gen6_gtt_pte_t))
44/* gen6-hsw has bit 11-4 for physical addr bit 39-32 */
45#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
46#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
47#define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
48#define GEN6_PTE_CACHE_LLC (2 << 1)
49#define GEN6_PTE_UNCACHED (1 << 1)
50#define GEN6_PTE_VALID (1 << 0)
51
52#define GEN6_PPGTT_PD_ENTRIES 512
53#define GEN6_PD_SIZE (GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE)
54#define GEN6_PD_ALIGN (PAGE_SIZE * 16)
55#define GEN6_PDE_VALID (1 << 0)
56
57#define GEN7_PTE_CACHE_L3_LLC (3 << 1)
58
59#define BYT_PTE_SNOOPED_BY_CPU_CACHES (1 << 2)
60#define BYT_PTE_WRITEABLE (1 << 1)
61
62/* Cacheability Control is a 4-bit value. The low three bits are stored in bits
63 * 3:1 of the PTE, while the fourth bit is stored in bit 11 of the PTE.
64 */
65#define HSW_CACHEABILITY_CONTROL(bits) ((((bits) & 0x7) << 1) | \
66 (((bits) & 0x8) << (11 - 3)))
67#define HSW_WB_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x2)
68#define HSW_WB_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x3)
69#define HSW_WB_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x8)
70#define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb)
71#define HSW_WT_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x7)
72#define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6)
73#define HSW_PTE_UNCACHED (0)
74#define HSW_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0x7f0))
75#define HSW_PTE_ADDR_ENCODE(addr) HSW_GTT_ADDR_ENCODE(addr)
76
77/* GEN8 legacy style address is defined as a 3 level page table:
78 * 31:30 | 29:21 | 20:12 | 11:0
79 * PDPE | PDE | PTE | offset
80 * The difference as compared to normal x86 3 level page table is the PDPEs are
81 * programmed via register.
82 */
83#define GEN8_PDPE_SHIFT 30
84#define GEN8_PDPE_MASK 0x3
85#define GEN8_PDE_SHIFT 21
86#define GEN8_PDE_MASK 0x1ff
87#define GEN8_PTE_SHIFT 12
88#define GEN8_PTE_MASK 0x1ff
89#define GEN8_LEGACY_PDPS 4
90#define GEN8_PTES_PER_PAGE (PAGE_SIZE / sizeof(gen8_gtt_pte_t))
91#define GEN8_PDES_PER_PAGE (PAGE_SIZE / sizeof(gen8_ppgtt_pde_t))
92
93#define PPAT_UNCACHED_INDEX (_PAGE_PWT | _PAGE_PCD)
94#define PPAT_CACHED_PDE_INDEX 0 /* WB LLC */
95#define PPAT_CACHED_INDEX _PAGE_PAT /* WB LLCeLLC */
96#define PPAT_DISPLAY_ELLC_INDEX _PAGE_PCD /* WT eLLC */
97
98#define CHV_PPAT_SNOOP (1<<6)
99#define GEN8_PPAT_AGE(x) (x<<4)
100#define GEN8_PPAT_LLCeLLC (3<<2)
101#define GEN8_PPAT_LLCELLC (2<<2)
102#define GEN8_PPAT_LLC (1<<2)
103#define GEN8_PPAT_WB (3<<0)
104#define GEN8_PPAT_WT (2<<0)
105#define GEN8_PPAT_WC (1<<0)
106#define GEN8_PPAT_UC (0<<0)
107#define GEN8_PPAT_ELLC_OVERRIDE (0<<2)
108#define GEN8_PPAT(i, x) ((uint64_t) (x) << ((i) * 8))
109
110enum i915_cache_level;
111/**
112 * A VMA represents a GEM BO that is bound into an address space. Therefore, a
113 * VMA's presence cannot be guaranteed before binding, or after unbinding the
114 * object into/from the address space.
115 *
116 * To make things as simple as possible (ie. no refcounting), a VMA's lifetime
117 * will always be <= an objects lifetime. So object refcounting should cover us.
118 */
119struct i915_vma {
120 struct drm_mm_node node;
121 struct drm_i915_gem_object *obj;
122 struct i915_address_space *vm;
123
124 /** This object's place on the active/inactive lists */
125 struct list_head mm_list;
126
127 struct list_head vma_link; /* Link in the object's VMA list */
128
129 /** This vma's place in the batchbuffer or on the eviction list */
130 struct list_head exec_list;
131
132 /**
133 * Used for performing relocations during execbuffer insertion.
134 */
135 struct hlist_node exec_node;
136 unsigned long exec_handle;
137 struct drm_i915_gem_exec_object2 *exec_entry;
138
139 /**
140 * How many users have pinned this object in GTT space. The following
141 * users can each hold at most one reference: pwrite/pread, pin_ioctl
142 * (via user_pin_count), execbuffer (objects are not allowed multiple
143 * times for the same batchbuffer), and the framebuffer code. When
144 * switching/pageflipping, the framebuffer code has at most two buffers
145 * pinned per crtc.
146 *
147 * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
148 * bits with absolutely no headroom. So use 4 bits. */
149 unsigned int pin_count:4;
150#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
151
152 /** Unmap an object from an address space. This usually consists of
153 * setting the valid PTE entries to a reserved scratch page. */
154 void (*unbind_vma)(struct i915_vma *vma);
155 /* Map an object into an address space with the given cache flags. */
156#define GLOBAL_BIND (1<<0)
157 void (*bind_vma)(struct i915_vma *vma,
158 enum i915_cache_level cache_level,
159 u32 flags);
160};
161
162struct i915_address_space {
163 struct drm_mm mm;
164 struct drm_device *dev;
165 struct list_head global_link;
166 unsigned long start; /* Start offset always 0 for dri2 */
167 size_t total; /* size addr space maps (ex. 2GB for ggtt) */
168
169 struct {
170 dma_addr_t addr;
171 struct page *page;
172 } scratch;
173
174 /**
175 * List of objects currently involved in rendering.
176 *
177 * Includes buffers having the contents of their GPU caches
178 * flushed, not necessarily primitives. last_rendering_seqno
179 * represents when the rendering involved will be completed.
180 *
181 * A reference is held on the buffer while on this list.
182 */
183 struct list_head active_list;
184
185 /**
186 * LRU list of objects which are not in the ringbuffer and
187 * are ready to unbind, but are still in the GTT.
188 *
189 * last_rendering_seqno is 0 while an object is in this list.
190 *
191 * A reference is not held on the buffer while on this list,
192 * as merely being GTT-bound shouldn't prevent its being
193 * freed, and we'll pull it off the list in the free path.
194 */
195 struct list_head inactive_list;
196
197 /* FIXME: Need a more generic return type */
198 gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr,
199 enum i915_cache_level level,
200 bool valid); /* Create a valid PTE */
201 void (*clear_range)(struct i915_address_space *vm,
202 uint64_t start,
203 uint64_t length,
204 bool use_scratch);
205 void (*insert_entries)(struct i915_address_space *vm,
206 struct sg_table *st,
207 uint64_t start,
208 enum i915_cache_level cache_level);
209 void (*cleanup)(struct i915_address_space *vm);
210};
211
212/* The Graphics Translation Table is the way in which GEN hardware translates a
213 * Graphics Virtual Address into a Physical Address. In addition to the normal
214 * collateral associated with any va->pa translations GEN hardware also has a
215 * portion of the GTT which can be mapped by the CPU and remain both coherent
216 * and correct (in cases like swizzling). That region is referred to as GMADR in
217 * the spec.
218 */
219struct i915_gtt {
220 struct i915_address_space base;
221 size_t stolen_size; /* Total size of stolen memory */
222
223 unsigned long mappable_end; /* End offset that we can CPU map */
224 struct io_mapping *mappable; /* Mapping to our CPU mappable region */
225 phys_addr_t mappable_base; /* PA of our GMADR */
226
227 /** "Graphics Stolen Memory" holds the global PTEs */
228 void __iomem *gsm;
229
230 bool do_idle_maps;
231
232 int mtrr;
233
234 /* global gtt ops */
235 int (*gtt_probe)(struct drm_device *dev, size_t *gtt_total,
236 size_t *stolen, phys_addr_t *mappable_base,
237 unsigned long *mappable_end);
238};
239
240struct i915_hw_ppgtt {
241 struct i915_address_space base;
242 struct kref ref;
243 struct drm_mm_node node;
244 unsigned num_pd_entries;
245 unsigned num_pd_pages; /* gen8+ */
246 union {
247 struct page **pt_pages;
248 struct page **gen8_pt_pages[GEN8_LEGACY_PDPS];
249 };
250 struct page *pd_pages;
251 union {
252 uint32_t pd_offset;
253 dma_addr_t pd_dma_addr[GEN8_LEGACY_PDPS];
254 };
255 union {
256 dma_addr_t *pt_dma_addr;
257 dma_addr_t *gen8_pt_dma_addr[4];
258 };
259
260 struct intel_context *ctx;
261
262 int (*enable)(struct i915_hw_ppgtt *ppgtt);
263 int (*switch_mm)(struct i915_hw_ppgtt *ppgtt,
264 struct intel_engine_cs *ring,
265 bool synchronous);
266 void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m);
267};
268
269int i915_gem_gtt_init(struct drm_device *dev);
270void i915_gem_init_global_gtt(struct drm_device *dev);
271void i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start,
272 unsigned long mappable_end, unsigned long end);
273
274bool intel_enable_ppgtt(struct drm_device *dev, bool full);
275int i915_gem_init_ppgtt(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt);
276
277void i915_check_and_clear_faults(struct drm_device *dev);
278void i915_gem_suspend_gtt_mappings(struct drm_device *dev);
279void i915_gem_restore_gtt_mappings(struct drm_device *dev);
280
281int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
282void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
283
284#endif
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
new file mode 100644
index 000000000000..3521f998a178
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -0,0 +1,198 @@
1/*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Mika Kuoppala <mika.kuoppala@intel.com>
25 *
26 */
27
28#include "i915_drv.h"
29#include "intel_renderstate.h"
30
31struct i915_render_state {
32 struct drm_i915_gem_object *obj;
33 unsigned long ggtt_offset;
34 void *batch;
35 u32 size;
36 u32 len;
37};
38
39static struct i915_render_state *render_state_alloc(struct drm_device *dev)
40{
41 struct i915_render_state *so;
42 struct page *page;
43 int ret;
44
45 so = kzalloc(sizeof(*so), GFP_KERNEL);
46 if (!so)
47 return ERR_PTR(-ENOMEM);
48
49 so->obj = i915_gem_alloc_object(dev, 4096);
50 if (so->obj == NULL) {
51 ret = -ENOMEM;
52 goto free;
53 }
54 so->size = 4096;
55
56 ret = i915_gem_obj_ggtt_pin(so->obj, 4096, 0);
57 if (ret)
58 goto free_gem;
59
60 BUG_ON(so->obj->pages->nents != 1);
61 page = sg_page(so->obj->pages->sgl);
62
63 so->batch = kmap(page);
64 if (!so->batch) {
65 ret = -ENOMEM;
66 goto unpin;
67 }
68
69 so->ggtt_offset = i915_gem_obj_ggtt_offset(so->obj);
70
71 return so;
72unpin:
73 i915_gem_object_ggtt_unpin(so->obj);
74free_gem:
75 drm_gem_object_unreference(&so->obj->base);
76free:
77 kfree(so);
78 return ERR_PTR(ret);
79}
80
81static void render_state_free(struct i915_render_state *so)
82{
83 kunmap(so->batch);
84 i915_gem_object_ggtt_unpin(so->obj);
85 drm_gem_object_unreference(&so->obj->base);
86 kfree(so);
87}
88
89static const struct intel_renderstate_rodata *
90render_state_get_rodata(struct drm_device *dev, const int gen)
91{
92 switch (gen) {
93 case 6:
94 return &gen6_null_state;
95 case 7:
96 return &gen7_null_state;
97 case 8:
98 return &gen8_null_state;
99 }
100
101 return NULL;
102}
103
104static int render_state_setup(const int gen,
105 const struct intel_renderstate_rodata *rodata,
106 struct i915_render_state *so)
107{
108 const u64 goffset = i915_gem_obj_ggtt_offset(so->obj);
109 u32 reloc_index = 0;
110 u32 * const d = so->batch;
111 unsigned int i = 0;
112 int ret;
113
114 if (!rodata || rodata->batch_items * 4 > so->size)
115 return -EINVAL;
116
117 ret = i915_gem_object_set_to_cpu_domain(so->obj, true);
118 if (ret)
119 return ret;
120
121 while (i < rodata->batch_items) {
122 u32 s = rodata->batch[i];
123
124 if (reloc_index < rodata->reloc_items &&
125 i * 4 == rodata->reloc[reloc_index]) {
126
127 s += goffset & 0xffffffff;
128
129 /* We keep batch offsets max 32bit */
130 if (gen >= 8) {
131 if (i + 1 >= rodata->batch_items ||
132 rodata->batch[i + 1] != 0)
133 return -EINVAL;
134
135 d[i] = s;
136 i++;
137 s = (goffset & 0xffffffff00000000ull) >> 32;
138 }
139
140 reloc_index++;
141 }
142
143 d[i] = s;
144 i++;
145 }
146
147 ret = i915_gem_object_set_to_gtt_domain(so->obj, false);
148 if (ret)
149 return ret;
150
151 if (rodata->reloc_items != reloc_index) {
152 DRM_ERROR("not all relocs resolved, %d out of %d\n",
153 reloc_index, rodata->reloc_items);
154 return -EINVAL;
155 }
156
157 so->len = rodata->batch_items * 4;
158
159 return 0;
160}
161
162int i915_gem_render_state_init(struct intel_engine_cs *ring)
163{
164 const int gen = INTEL_INFO(ring->dev)->gen;
165 struct i915_render_state *so;
166 const struct intel_renderstate_rodata *rodata;
167 int ret;
168
169 if (WARN_ON(ring->id != RCS))
170 return -ENOENT;
171
172 rodata = render_state_get_rodata(ring->dev, gen);
173 if (rodata == NULL)
174 return 0;
175
176 so = render_state_alloc(ring->dev);
177 if (IS_ERR(so))
178 return PTR_ERR(so);
179
180 ret = render_state_setup(gen, rodata, so);
181 if (ret)
182 goto out;
183
184 ret = ring->dispatch_execbuffer(ring,
185 i915_gem_obj_ggtt_offset(so->obj),
186 so->len,
187 I915_DISPATCH_SECURE);
188 if (ret)
189 goto out;
190
191 i915_vma_move_to_active(i915_gem_obj_to_ggtt(so->obj), ring);
192
193 ret = __i915_add_request(ring, NULL, so->obj, NULL);
194 /* __i915_add_request moves object to inactive if it fails */
195out:
196 render_state_free(so);
197 return ret;
198}
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
new file mode 100644
index 000000000000..21ea92886a56
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -0,0 +1,711 @@
1/*
2 * Copyright © 2012-2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25#include "drmP.h"
26#include "i915_drm.h"
27#include "i915_drv.h"
28#include "i915_trace.h"
29#include "intel_drv.h"
30#include <linux/mmu_context.h>
31#include <linux/mmu_notifier.h>
32#include <linux/mempolicy.h>
33#include <linux/swap.h>
34
35#if defined(CONFIG_MMU_NOTIFIER)
36#include <linux/interval_tree.h>
37
38struct i915_mmu_notifier {
39 spinlock_t lock;
40 struct hlist_node node;
41 struct mmu_notifier mn;
42 struct rb_root objects;
43 struct drm_device *dev;
44 struct mm_struct *mm;
45 struct work_struct work;
46 unsigned long count;
47 unsigned long serial;
48};
49
50struct i915_mmu_object {
51 struct i915_mmu_notifier *mmu;
52 struct interval_tree_node it;
53 struct drm_i915_gem_object *obj;
54};
55
56static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
57 struct mm_struct *mm,
58 unsigned long start,
59 unsigned long end)
60{
61 struct i915_mmu_notifier *mn = container_of(_mn, struct i915_mmu_notifier, mn);
62 struct interval_tree_node *it = NULL;
63 unsigned long serial = 0;
64
65 end--; /* interval ranges are inclusive, but invalidate range is exclusive */
66 while (start < end) {
67 struct drm_i915_gem_object *obj;
68
69 obj = NULL;
70 spin_lock(&mn->lock);
71 if (serial == mn->serial)
72 it = interval_tree_iter_next(it, start, end);
73 else
74 it = interval_tree_iter_first(&mn->objects, start, end);
75 if (it != NULL) {
76 obj = container_of(it, struct i915_mmu_object, it)->obj;
77 drm_gem_object_reference(&obj->base);
78 serial = mn->serial;
79 }
80 spin_unlock(&mn->lock);
81 if (obj == NULL)
82 return;
83
84 mutex_lock(&mn->dev->struct_mutex);
85 /* Cancel any active worker and force us to re-evaluate gup */
86 obj->userptr.work = NULL;
87
88 if (obj->pages != NULL) {
89 struct drm_i915_private *dev_priv = to_i915(mn->dev);
90 struct i915_vma *vma, *tmp;
91 bool was_interruptible;
92
93 was_interruptible = dev_priv->mm.interruptible;
94 dev_priv->mm.interruptible = false;
95
96 list_for_each_entry_safe(vma, tmp, &obj->vma_list, vma_link) {
97 int ret = i915_vma_unbind(vma);
98 WARN_ON(ret && ret != -EIO);
99 }
100 WARN_ON(i915_gem_object_put_pages(obj));
101
102 dev_priv->mm.interruptible = was_interruptible;
103 }
104
105 start = obj->userptr.ptr + obj->base.size;
106
107 drm_gem_object_unreference(&obj->base);
108 mutex_unlock(&mn->dev->struct_mutex);
109 }
110}
111
112static const struct mmu_notifier_ops i915_gem_userptr_notifier = {
113 .invalidate_range_start = i915_gem_userptr_mn_invalidate_range_start,
114};
115
116static struct i915_mmu_notifier *
117__i915_mmu_notifier_lookup(struct drm_device *dev, struct mm_struct *mm)
118{
119 struct drm_i915_private *dev_priv = to_i915(dev);
120 struct i915_mmu_notifier *mmu;
121
122 /* Protected by dev->struct_mutex */
123 hash_for_each_possible(dev_priv->mmu_notifiers, mmu, node, (unsigned long)mm)
124 if (mmu->mm == mm)
125 return mmu;
126
127 return NULL;
128}
129
130static struct i915_mmu_notifier *
131i915_mmu_notifier_get(struct drm_device *dev, struct mm_struct *mm)
132{
133 struct drm_i915_private *dev_priv = to_i915(dev);
134 struct i915_mmu_notifier *mmu;
135 int ret;
136
137 lockdep_assert_held(&dev->struct_mutex);
138
139 mmu = __i915_mmu_notifier_lookup(dev, mm);
140 if (mmu)
141 return mmu;
142
143 mmu = kmalloc(sizeof(*mmu), GFP_KERNEL);
144 if (mmu == NULL)
145 return ERR_PTR(-ENOMEM);
146
147 spin_lock_init(&mmu->lock);
148 mmu->dev = dev;
149 mmu->mn.ops = &i915_gem_userptr_notifier;
150 mmu->mm = mm;
151 mmu->objects = RB_ROOT;
152 mmu->count = 0;
153 mmu->serial = 0;
154
155 /* Protected by mmap_sem (write-lock) */
156 ret = __mmu_notifier_register(&mmu->mn, mm);
157 if (ret) {
158 kfree(mmu);
159 return ERR_PTR(ret);
160 }
161
162 /* Protected by dev->struct_mutex */
163 hash_add(dev_priv->mmu_notifiers, &mmu->node, (unsigned long)mm);
164 return mmu;
165}
166
167static void
168__i915_mmu_notifier_destroy_worker(struct work_struct *work)
169{
170 struct i915_mmu_notifier *mmu = container_of(work, typeof(*mmu), work);
171 mmu_notifier_unregister(&mmu->mn, mmu->mm);
172 kfree(mmu);
173}
174
175static void
176__i915_mmu_notifier_destroy(struct i915_mmu_notifier *mmu)
177{
178 lockdep_assert_held(&mmu->dev->struct_mutex);
179
180 /* Protected by dev->struct_mutex */
181 hash_del(&mmu->node);
182
183 /* Our lock ordering is: mmap_sem, mmu_notifier_scru, struct_mutex.
184 * We enter the function holding struct_mutex, therefore we need
185 * to drop our mutex prior to calling mmu_notifier_unregister in
186 * order to prevent lock inversion (and system-wide deadlock)
187 * between the mmap_sem and struct-mutex. Hence we defer the
188 * unregistration to a workqueue where we hold no locks.
189 */
190 INIT_WORK(&mmu->work, __i915_mmu_notifier_destroy_worker);
191 schedule_work(&mmu->work);
192}
193
194static void __i915_mmu_notifier_update_serial(struct i915_mmu_notifier *mmu)
195{
196 if (++mmu->serial == 0)
197 mmu->serial = 1;
198}
199
200static void
201i915_mmu_notifier_del(struct i915_mmu_notifier *mmu,
202 struct i915_mmu_object *mn)
203{
204 lockdep_assert_held(&mmu->dev->struct_mutex);
205
206 spin_lock(&mmu->lock);
207 interval_tree_remove(&mn->it, &mmu->objects);
208 __i915_mmu_notifier_update_serial(mmu);
209 spin_unlock(&mmu->lock);
210
211 /* Protected against _add() by dev->struct_mutex */
212 if (--mmu->count == 0)
213 __i915_mmu_notifier_destroy(mmu);
214}
215
216static int
217i915_mmu_notifier_add(struct i915_mmu_notifier *mmu,
218 struct i915_mmu_object *mn)
219{
220 struct interval_tree_node *it;
221 int ret;
222
223 ret = i915_mutex_lock_interruptible(mmu->dev);
224 if (ret)
225 return ret;
226
227 /* Make sure we drop the final active reference (and thereby
228 * remove the objects from the interval tree) before we do
229 * the check for overlapping objects.
230 */
231 i915_gem_retire_requests(mmu->dev);
232
233 /* Disallow overlapping userptr objects */
234 spin_lock(&mmu->lock);
235 it = interval_tree_iter_first(&mmu->objects,
236 mn->it.start, mn->it.last);
237 if (it) {
238 struct drm_i915_gem_object *obj;
239
240 /* We only need to check the first object in the range as it
241 * either has cancelled gup work queued and we need to
242 * return back to the user to give time for the gup-workers
243 * to flush their object references upon which the object will
244 * be removed from the interval-tree, or the the range is
245 * still in use by another client and the overlap is invalid.
246 */
247
248 obj = container_of(it, struct i915_mmu_object, it)->obj;
249 ret = obj->userptr.workers ? -EAGAIN : -EINVAL;
250 } else {
251 interval_tree_insert(&mn->it, &mmu->objects);
252 __i915_mmu_notifier_update_serial(mmu);
253 ret = 0;
254 }
255 spin_unlock(&mmu->lock);
256 mutex_unlock(&mmu->dev->struct_mutex);
257
258 return ret;
259}
260
261static void
262i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
263{
264 struct i915_mmu_object *mn;
265
266 mn = obj->userptr.mn;
267 if (mn == NULL)
268 return;
269
270 i915_mmu_notifier_del(mn->mmu, mn);
271 obj->userptr.mn = NULL;
272}
273
274static int
275i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
276 unsigned flags)
277{
278 struct i915_mmu_notifier *mmu;
279 struct i915_mmu_object *mn;
280 int ret;
281
282 if (flags & I915_USERPTR_UNSYNCHRONIZED)
283 return capable(CAP_SYS_ADMIN) ? 0 : -EPERM;
284
285 down_write(&obj->userptr.mm->mmap_sem);
286 ret = i915_mutex_lock_interruptible(obj->base.dev);
287 if (ret == 0) {
288 mmu = i915_mmu_notifier_get(obj->base.dev, obj->userptr.mm);
289 if (!IS_ERR(mmu))
290 mmu->count++; /* preemptive add to act as a refcount */
291 else
292 ret = PTR_ERR(mmu);
293 mutex_unlock(&obj->base.dev->struct_mutex);
294 }
295 up_write(&obj->userptr.mm->mmap_sem);
296 if (ret)
297 return ret;
298
299 mn = kzalloc(sizeof(*mn), GFP_KERNEL);
300 if (mn == NULL) {
301 ret = -ENOMEM;
302 goto destroy_mmu;
303 }
304
305 mn->mmu = mmu;
306 mn->it.start = obj->userptr.ptr;
307 mn->it.last = mn->it.start + obj->base.size - 1;
308 mn->obj = obj;
309
310 ret = i915_mmu_notifier_add(mmu, mn);
311 if (ret)
312 goto free_mn;
313
314 obj->userptr.mn = mn;
315 return 0;
316
317free_mn:
318 kfree(mn);
319destroy_mmu:
320 mutex_lock(&obj->base.dev->struct_mutex);
321 if (--mmu->count == 0)
322 __i915_mmu_notifier_destroy(mmu);
323 mutex_unlock(&obj->base.dev->struct_mutex);
324 return ret;
325}
326
327#else
328
329static void
330i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
331{
332}
333
334static int
335i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
336 unsigned flags)
337{
338 if ((flags & I915_USERPTR_UNSYNCHRONIZED) == 0)
339 return -ENODEV;
340
341 if (!capable(CAP_SYS_ADMIN))
342 return -EPERM;
343
344 return 0;
345}
346#endif
347
348struct get_pages_work {
349 struct work_struct work;
350 struct drm_i915_gem_object *obj;
351 struct task_struct *task;
352};
353
354
355#if IS_ENABLED(CONFIG_SWIOTLB)
356#define swiotlb_active() swiotlb_nr_tbl()
357#else
358#define swiotlb_active() 0
359#endif
360
361static int
362st_set_pages(struct sg_table **st, struct page **pvec, int num_pages)
363{
364 struct scatterlist *sg;
365 int ret, n;
366
367 *st = kmalloc(sizeof(**st), GFP_KERNEL);
368 if (*st == NULL)
369 return -ENOMEM;
370
371 if (swiotlb_active()) {
372 ret = sg_alloc_table(*st, num_pages, GFP_KERNEL);
373 if (ret)
374 goto err;
375
376 for_each_sg((*st)->sgl, sg, num_pages, n)
377 sg_set_page(sg, pvec[n], PAGE_SIZE, 0);
378 } else {
379 ret = sg_alloc_table_from_pages(*st, pvec, num_pages,
380 0, num_pages << PAGE_SHIFT,
381 GFP_KERNEL);
382 if (ret)
383 goto err;
384 }
385
386 return 0;
387
388err:
389 kfree(*st);
390 *st = NULL;
391 return ret;
392}
393
394static void
395__i915_gem_userptr_get_pages_worker(struct work_struct *_work)
396{
397 struct get_pages_work *work = container_of(_work, typeof(*work), work);
398 struct drm_i915_gem_object *obj = work->obj;
399 struct drm_device *dev = obj->base.dev;
400 const int num_pages = obj->base.size >> PAGE_SHIFT;
401 struct page **pvec;
402 int pinned, ret;
403
404 ret = -ENOMEM;
405 pinned = 0;
406
407 pvec = kmalloc(num_pages*sizeof(struct page *),
408 GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
409 if (pvec == NULL)
410 pvec = drm_malloc_ab(num_pages, sizeof(struct page *));
411 if (pvec != NULL) {
412 struct mm_struct *mm = obj->userptr.mm;
413
414 down_read(&mm->mmap_sem);
415 while (pinned < num_pages) {
416 ret = get_user_pages(work->task, mm,
417 obj->userptr.ptr + pinned * PAGE_SIZE,
418 num_pages - pinned,
419 !obj->userptr.read_only, 0,
420 pvec + pinned, NULL);
421 if (ret < 0)
422 break;
423
424 pinned += ret;
425 }
426 up_read(&mm->mmap_sem);
427 }
428
429 mutex_lock(&dev->struct_mutex);
430 if (obj->userptr.work != &work->work) {
431 ret = 0;
432 } else if (pinned == num_pages) {
433 ret = st_set_pages(&obj->pages, pvec, num_pages);
434 if (ret == 0) {
435 list_add_tail(&obj->global_list, &to_i915(dev)->mm.unbound_list);
436 pinned = 0;
437 }
438 }
439
440 obj->userptr.work = ERR_PTR(ret);
441 obj->userptr.workers--;
442 drm_gem_object_unreference(&obj->base);
443 mutex_unlock(&dev->struct_mutex);
444
445 release_pages(pvec, pinned, 0);
446 drm_free_large(pvec);
447
448 put_task_struct(work->task);
449 kfree(work);
450}
451
452static int
453i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
454{
455 const int num_pages = obj->base.size >> PAGE_SHIFT;
456 struct page **pvec;
457 int pinned, ret;
458
459 /* If userspace should engineer that these pages are replaced in
460 * the vma between us binding this page into the GTT and completion
461 * of rendering... Their loss. If they change the mapping of their
462 * pages they need to create a new bo to point to the new vma.
463 *
464 * However, that still leaves open the possibility of the vma
465 * being copied upon fork. Which falls under the same userspace
466 * synchronisation issue as a regular bo, except that this time
467 * the process may not be expecting that a particular piece of
468 * memory is tied to the GPU.
469 *
470 * Fortunately, we can hook into the mmu_notifier in order to
471 * discard the page references prior to anything nasty happening
472 * to the vma (discard or cloning) which should prevent the more
473 * egregious cases from causing harm.
474 */
475
476 pvec = NULL;
477 pinned = 0;
478 if (obj->userptr.mm == current->mm) {
479 pvec = kmalloc(num_pages*sizeof(struct page *),
480 GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
481 if (pvec == NULL) {
482 pvec = drm_malloc_ab(num_pages, sizeof(struct page *));
483 if (pvec == NULL)
484 return -ENOMEM;
485 }
486
487 pinned = __get_user_pages_fast(obj->userptr.ptr, num_pages,
488 !obj->userptr.read_only, pvec);
489 }
490 if (pinned < num_pages) {
491 if (pinned < 0) {
492 ret = pinned;
493 pinned = 0;
494 } else {
495 /* Spawn a worker so that we can acquire the
496 * user pages without holding our mutex. Access
497 * to the user pages requires mmap_sem, and we have
498 * a strict lock ordering of mmap_sem, struct_mutex -
499 * we already hold struct_mutex here and so cannot
500 * call gup without encountering a lock inversion.
501 *
502 * Userspace will keep on repeating the operation
503 * (thanks to EAGAIN) until either we hit the fast
504 * path or the worker completes. If the worker is
505 * cancelled or superseded, the task is still run
506 * but the results ignored. (This leads to
507 * complications that we may have a stray object
508 * refcount that we need to be wary of when
509 * checking for existing objects during creation.)
510 * If the worker encounters an error, it reports
511 * that error back to this function through
512 * obj->userptr.work = ERR_PTR.
513 */
514 ret = -EAGAIN;
515 if (obj->userptr.work == NULL &&
516 obj->userptr.workers < I915_GEM_USERPTR_MAX_WORKERS) {
517 struct get_pages_work *work;
518
519 work = kmalloc(sizeof(*work), GFP_KERNEL);
520 if (work != NULL) {
521 obj->userptr.work = &work->work;
522 obj->userptr.workers++;
523
524 work->obj = obj;
525 drm_gem_object_reference(&obj->base);
526
527 work->task = current;
528 get_task_struct(work->task);
529
530 INIT_WORK(&work->work, __i915_gem_userptr_get_pages_worker);
531 schedule_work(&work->work);
532 } else
533 ret = -ENOMEM;
534 } else {
535 if (IS_ERR(obj->userptr.work)) {
536 ret = PTR_ERR(obj->userptr.work);
537 obj->userptr.work = NULL;
538 }
539 }
540 }
541 } else {
542 ret = st_set_pages(&obj->pages, pvec, num_pages);
543 if (ret == 0) {
544 obj->userptr.work = NULL;
545 pinned = 0;
546 }
547 }
548
549 release_pages(pvec, pinned, 0);
550 drm_free_large(pvec);
551 return ret;
552}
553
554static void
555i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
556{
557 struct scatterlist *sg;
558 int i;
559
560 BUG_ON(obj->userptr.work != NULL);
561
562 if (obj->madv != I915_MADV_WILLNEED)
563 obj->dirty = 0;
564
565 for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) {
566 struct page *page = sg_page(sg);
567
568 if (obj->dirty)
569 set_page_dirty(page);
570
571 mark_page_accessed(page);
572 page_cache_release(page);
573 }
574 obj->dirty = 0;
575
576 sg_free_table(obj->pages);
577 kfree(obj->pages);
578}
579
580static void
581i915_gem_userptr_release(struct drm_i915_gem_object *obj)
582{
583 i915_gem_userptr_release__mmu_notifier(obj);
584
585 if (obj->userptr.mm) {
586 mmput(obj->userptr.mm);
587 obj->userptr.mm = NULL;
588 }
589}
590
591static int
592i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
593{
594 if (obj->userptr.mn)
595 return 0;
596
597 return i915_gem_userptr_init__mmu_notifier(obj, 0);
598}
599
600static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
601 .dmabuf_export = i915_gem_userptr_dmabuf_export,
602 .get_pages = i915_gem_userptr_get_pages,
603 .put_pages = i915_gem_userptr_put_pages,
604 .release = i915_gem_userptr_release,
605};
606
607/**
608 * Creates a new mm object that wraps some normal memory from the process
609 * context - user memory.
610 *
611 * We impose several restrictions upon the memory being mapped
612 * into the GPU.
613 * 1. It must be page aligned (both start/end addresses, i.e ptr and size).
614 * 2. It cannot overlap any other userptr object in the same address space.
615 * 3. It must be normal system memory, not a pointer into another map of IO
616 * space (e.g. it must not be a GTT mmapping of another object).
617 * 4. We only allow a bo as large as we could in theory map into the GTT,
618 * that is we limit the size to the total size of the GTT.
619 * 5. The bo is marked as being snoopable. The backing pages are left
620 * accessible directly by the CPU, but reads and writes by the GPU may
621 * incur the cost of a snoop (unless you have an LLC architecture).
622 *
623 * Synchronisation between multiple users and the GPU is left to userspace
624 * through the normal set-domain-ioctl. The kernel will enforce that the
625 * GPU relinquishes the VMA before it is returned back to the system
626 * i.e. upon free(), munmap() or process termination. However, the userspace
627 * malloc() library may not immediately relinquish the VMA after free() and
628 * instead reuse it whilst the GPU is still reading and writing to the VMA.
629 * Caveat emptor.
630 *
631 * Also note, that the object created here is not currently a "first class"
632 * object, in that several ioctls are banned. These are the CPU access
633 * ioctls: mmap(), pwrite and pread. In practice, you are expected to use
634 * direct access via your pointer rather than use those ioctls.
635 *
636 * If you think this is a good interface to use to pass GPU memory between
637 * drivers, please use dma-buf instead. In fact, wherever possible use
638 * dma-buf instead.
639 */
640int
641i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
642{
643 struct drm_i915_private *dev_priv = dev->dev_private;
644 struct drm_i915_gem_userptr *args = data;
645 struct drm_i915_gem_object *obj;
646 int ret;
647 u32 handle;
648
649 if (args->flags & ~(I915_USERPTR_READ_ONLY |
650 I915_USERPTR_UNSYNCHRONIZED))
651 return -EINVAL;
652
653 if (offset_in_page(args->user_ptr | args->user_size))
654 return -EINVAL;
655
656 if (args->user_size > dev_priv->gtt.base.total)
657 return -E2BIG;
658
659 if (!access_ok(args->flags & I915_USERPTR_READ_ONLY ? VERIFY_READ : VERIFY_WRITE,
660 (char __user *)(unsigned long)args->user_ptr, args->user_size))
661 return -EFAULT;
662
663 if (args->flags & I915_USERPTR_READ_ONLY) {
664 /* On almost all of the current hw, we cannot tell the GPU that a
665 * page is readonly, so this is just a placeholder in the uAPI.
666 */
667 return -ENODEV;
668 }
669
670 /* Allocate the new object */
671 obj = i915_gem_object_alloc(dev);
672 if (obj == NULL)
673 return -ENOMEM;
674
675 drm_gem_private_object_init(dev, &obj->base, args->user_size);
676 i915_gem_object_init(obj, &i915_gem_userptr_ops);
677 obj->cache_level = I915_CACHE_LLC;
678 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
679 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
680
681 obj->userptr.ptr = args->user_ptr;
682 obj->userptr.read_only = !!(args->flags & I915_USERPTR_READ_ONLY);
683
684 /* And keep a pointer to the current->mm for resolving the user pages
685 * at binding. This means that we need to hook into the mmu_notifier
686 * in order to detect if the mmu is destroyed.
687 */
688 ret = -ENOMEM;
689 if ((obj->userptr.mm = get_task_mm(current)))
690 ret = i915_gem_userptr_init__mmu_notifier(obj, args->flags);
691 if (ret == 0)
692 ret = drm_gem_handle_create(file, &obj->base, &handle);
693
694 /* drop reference from allocate - handle holds it now */
695 drm_gem_object_unreference_unlocked(&obj->base);
696 if (ret)
697 return ret;
698
699 args->handle = handle;
700 return 0;
701}
702
703int
704i915_gem_init_userptr(struct drm_device *dev)
705{
706#if defined(CONFIG_MMU_NOTIFIER)
707 struct drm_i915_private *dev_priv = to_i915(dev);
708 hash_init(dev_priv->mmu_notifiers);
709#endif
710 return 0;
711}
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 12f1d43b2d68..87ec60e181a7 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -42,6 +42,7 @@ static const char *ring_str(int ring)
42 case VCS: return "bsd"; 42 case VCS: return "bsd";
43 case BCS: return "blt"; 43 case BCS: return "blt";
44 case VECS: return "vebox"; 44 case VECS: return "vebox";
45 case VCS2: return "bsd2";
45 default: return ""; 46 default: return "";
46 } 47 }
47} 48}
@@ -204,6 +205,7 @@ static void print_error_buffers(struct drm_i915_error_state_buf *m,
204 err_puts(m, tiling_flag(err->tiling)); 205 err_puts(m, tiling_flag(err->tiling));
205 err_puts(m, dirty_flag(err->dirty)); 206 err_puts(m, dirty_flag(err->dirty));
206 err_puts(m, purgeable_flag(err->purgeable)); 207 err_puts(m, purgeable_flag(err->purgeable));
208 err_puts(m, err->userptr ? " userptr" : "");
207 err_puts(m, err->ring != -1 ? " " : ""); 209 err_puts(m, err->ring != -1 ? " " : "");
208 err_puts(m, ring_str(err->ring)); 210 err_puts(m, ring_str(err->ring));
209 err_puts(m, i915_cache_level_str(err->cache_level)); 211 err_puts(m, i915_cache_level_str(err->cache_level));
@@ -257,7 +259,8 @@ static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
257 err_printf(m, " INSTPS: 0x%08x\n", ring->instps); 259 err_printf(m, " INSTPS: 0x%08x\n", ring->instps);
258 } 260 }
259 err_printf(m, " INSTPM: 0x%08x\n", ring->instpm); 261 err_printf(m, " INSTPM: 0x%08x\n", ring->instpm);
260 err_printf(m, " FADDR: 0x%08x\n", ring->faddr); 262 err_printf(m, " FADDR: 0x%08x %08x\n", upper_32_bits(ring->faddr),
263 lower_32_bits(ring->faddr));
261 if (INTEL_INFO(dev)->gen >= 6) { 264 if (INTEL_INFO(dev)->gen >= 6) {
262 err_printf(m, " RC PSMI: 0x%08x\n", ring->rc_psmi); 265 err_printf(m, " RC PSMI: 0x%08x\n", ring->rc_psmi);
263 err_printf(m, " FAULT_REG: 0x%08x\n", ring->fault_reg); 266 err_printf(m, " FAULT_REG: 0x%08x\n", ring->fault_reg);
@@ -452,16 +455,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
452 err_printf(m, "%s --- HW Context = 0x%08x\n", 455 err_printf(m, "%s --- HW Context = 0x%08x\n",
453 dev_priv->ring[i].name, 456 dev_priv->ring[i].name,
454 obj->gtt_offset); 457 obj->gtt_offset);
455 offset = 0; 458 print_error_obj(m, obj);
456 for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
457 err_printf(m, "[%04x] %08x %08x %08x %08x\n",
458 offset,
459 obj->pages[0][elt],
460 obj->pages[0][elt+1],
461 obj->pages[0][elt+2],
462 obj->pages[0][elt+3]);
463 offset += 16;
464 }
465 } 459 }
466 } 460 }
467 461
@@ -648,6 +642,7 @@ static void capture_bo(struct drm_i915_error_buffer *err,
648 err->tiling = obj->tiling_mode; 642 err->tiling = obj->tiling_mode;
649 err->dirty = obj->dirty; 643 err->dirty = obj->dirty;
650 err->purgeable = obj->madv != I915_MADV_WILLNEED; 644 err->purgeable = obj->madv != I915_MADV_WILLNEED;
645 err->userptr = obj->userptr.mm != NULL;
651 err->ring = obj->ring ? obj->ring->id : -1; 646 err->ring = obj->ring ? obj->ring->id : -1;
652 err->cache_level = obj->cache_level; 647 err->cache_level = obj->cache_level;
653} 648}
@@ -752,7 +747,7 @@ static void i915_gem_record_fences(struct drm_device *dev,
752} 747}
753 748
754static void i915_record_ring_state(struct drm_device *dev, 749static void i915_record_ring_state(struct drm_device *dev,
755 struct intel_ring_buffer *ring, 750 struct intel_engine_cs *ring,
756 struct drm_i915_error_ring *ering) 751 struct drm_i915_error_ring *ering)
757{ 752{
758 struct drm_i915_private *dev_priv = dev->dev_private; 753 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -764,14 +759,14 @@ static void i915_record_ring_state(struct drm_device *dev,
764 = I915_READ(RING_SYNC_0(ring->mmio_base)); 759 = I915_READ(RING_SYNC_0(ring->mmio_base));
765 ering->semaphore_mboxes[1] 760 ering->semaphore_mboxes[1]
766 = I915_READ(RING_SYNC_1(ring->mmio_base)); 761 = I915_READ(RING_SYNC_1(ring->mmio_base));
767 ering->semaphore_seqno[0] = ring->sync_seqno[0]; 762 ering->semaphore_seqno[0] = ring->semaphore.sync_seqno[0];
768 ering->semaphore_seqno[1] = ring->sync_seqno[1]; 763 ering->semaphore_seqno[1] = ring->semaphore.sync_seqno[1];
769 } 764 }
770 765
771 if (HAS_VEBOX(dev)) { 766 if (HAS_VEBOX(dev)) {
772 ering->semaphore_mboxes[2] = 767 ering->semaphore_mboxes[2] =
773 I915_READ(RING_SYNC_2(ring->mmio_base)); 768 I915_READ(RING_SYNC_2(ring->mmio_base));
774 ering->semaphore_seqno[2] = ring->sync_seqno[2]; 769 ering->semaphore_seqno[2] = ring->semaphore.sync_seqno[2];
775 } 770 }
776 771
777 if (INTEL_INFO(dev)->gen >= 4) { 772 if (INTEL_INFO(dev)->gen >= 4) {
@@ -781,8 +776,10 @@ static void i915_record_ring_state(struct drm_device *dev,
781 ering->instdone = I915_READ(RING_INSTDONE(ring->mmio_base)); 776 ering->instdone = I915_READ(RING_INSTDONE(ring->mmio_base));
782 ering->instps = I915_READ(RING_INSTPS(ring->mmio_base)); 777 ering->instps = I915_READ(RING_INSTPS(ring->mmio_base));
783 ering->bbaddr = I915_READ(RING_BBADDR(ring->mmio_base)); 778 ering->bbaddr = I915_READ(RING_BBADDR(ring->mmio_base));
784 if (INTEL_INFO(dev)->gen >= 8) 779 if (INTEL_INFO(dev)->gen >= 8) {
780 ering->faddr |= (u64) I915_READ(RING_DMA_FADD_UDW(ring->mmio_base)) << 32;
785 ering->bbaddr |= (u64) I915_READ(RING_BBADDR_UDW(ring->mmio_base)) << 32; 781 ering->bbaddr |= (u64) I915_READ(RING_BBADDR_UDW(ring->mmio_base)) << 32;
782 }
786 ering->bbstate = I915_READ(RING_BBSTATE(ring->mmio_base)); 783 ering->bbstate = I915_READ(RING_BBSTATE(ring->mmio_base));
787 } else { 784 } else {
788 ering->faddr = I915_READ(DMA_FADD_I8XX); 785 ering->faddr = I915_READ(DMA_FADD_I8XX);
@@ -828,8 +825,8 @@ static void i915_record_ring_state(struct drm_device *dev,
828 ering->hws = I915_READ(mmio); 825 ering->hws = I915_READ(mmio);
829 } 826 }
830 827
831 ering->cpu_ring_head = ring->head; 828 ering->cpu_ring_head = ring->buffer->head;
832 ering->cpu_ring_tail = ring->tail; 829 ering->cpu_ring_tail = ring->buffer->tail;
833 830
834 ering->hangcheck_score = ring->hangcheck.score; 831 ering->hangcheck_score = ring->hangcheck.score;
835 ering->hangcheck_action = ring->hangcheck.action; 832 ering->hangcheck_action = ring->hangcheck.action;
@@ -862,7 +859,7 @@ static void i915_record_ring_state(struct drm_device *dev,
862} 859}
863 860
864 861
865static void i915_gem_record_active_context(struct intel_ring_buffer *ring, 862static void i915_gem_record_active_context(struct intel_engine_cs *ring,
866 struct drm_i915_error_state *error, 863 struct drm_i915_error_state *error,
867 struct drm_i915_error_ring *ering) 864 struct drm_i915_error_ring *ering)
868{ 865{
@@ -875,10 +872,7 @@ static void i915_gem_record_active_context(struct intel_ring_buffer *ring,
875 872
876 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { 873 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
877 if ((error->ccid & PAGE_MASK) == i915_gem_obj_ggtt_offset(obj)) { 874 if ((error->ccid & PAGE_MASK) == i915_gem_obj_ggtt_offset(obj)) {
878 ering->ctx = i915_error_object_create_sized(dev_priv, 875 ering->ctx = i915_error_ggtt_object_create(dev_priv, obj);
879 obj,
880 &dev_priv->gtt.base,
881 1);
882 break; 876 break;
883 } 877 }
884 } 878 }
@@ -892,7 +886,7 @@ static void i915_gem_record_rings(struct drm_device *dev,
892 int i, count; 886 int i, count;
893 887
894 for (i = 0; i < I915_NUM_RINGS; i++) { 888 for (i = 0; i < I915_NUM_RINGS; i++) {
895 struct intel_ring_buffer *ring = &dev_priv->ring[i]; 889 struct intel_engine_cs *ring = &dev_priv->ring[i];
896 890
897 if (ring->dev == NULL) 891 if (ring->dev == NULL)
898 continue; 892 continue;
@@ -936,7 +930,7 @@ static void i915_gem_record_rings(struct drm_device *dev,
936 } 930 }
937 931
938 error->ring[i].ringbuffer = 932 error->ring[i].ringbuffer =
939 i915_error_ggtt_object_create(dev_priv, ring->obj); 933 i915_error_ggtt_object_create(dev_priv, ring->buffer->obj);
940 934
941 if (ring->status_page.obj) 935 if (ring->status_page.obj)
942 error->ring[i].hws_page = 936 error->ring[i].hws_page =
@@ -1037,7 +1031,6 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
1037 struct drm_i915_error_state *error) 1031 struct drm_i915_error_state *error)
1038{ 1032{
1039 struct drm_device *dev = dev_priv->dev; 1033 struct drm_device *dev = dev_priv->dev;
1040 int pipe;
1041 1034
1042 /* General organization 1035 /* General organization
1043 * 1. Registers specific to a single generation 1036 * 1. Registers specific to a single generation
@@ -1062,9 +1055,6 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
1062 error->gfx_mode = I915_READ(GFX_MODE); 1055 error->gfx_mode = I915_READ(GFX_MODE);
1063 } 1056 }
1064 1057
1065 if (IS_GEN2(dev))
1066 error->ier = I915_READ16(IER);
1067
1068 /* 2: Registers which belong to multiple generations */ 1058 /* 2: Registers which belong to multiple generations */
1069 if (INTEL_INFO(dev)->gen >= 7) 1059 if (INTEL_INFO(dev)->gen >= 7)
1070 error->forcewake = I915_READ(FORCEWAKE_MT); 1060 error->forcewake = I915_READ(FORCEWAKE_MT);
@@ -1088,9 +1078,10 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
1088 if (HAS_PCH_SPLIT(dev)) 1078 if (HAS_PCH_SPLIT(dev))
1089 error->ier = I915_READ(DEIER) | I915_READ(GTIER); 1079 error->ier = I915_READ(DEIER) | I915_READ(GTIER);
1090 else { 1080 else {
1091 error->ier = I915_READ(IER); 1081 if (IS_GEN2(dev))
1092 for_each_pipe(pipe) 1082 error->ier = I915_READ16(IER);
1093 error->pipestat[pipe] = I915_READ(PIPESTAT(pipe)); 1083 else
1084 error->ier = I915_READ(IER);
1094 } 1085 }
1095 1086
1096 /* 4: Everything else */ 1087 /* 4: Everything else */
diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
index 3c59584161c2..2e0613e26251 100644
--- a/drivers/gpu/drm/i915/i915_ioc32.c
+++ b/drivers/gpu/drm/i915/i915_ioc32.c
@@ -208,7 +208,7 @@ long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
208 if (nr < DRM_COMMAND_BASE) 208 if (nr < DRM_COMMAND_BASE)
209 return drm_compat_ioctl(filp, cmd, arg); 209 return drm_compat_ioctl(filp, cmd, arg);
210 210
211 if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(i915_compat_ioctls)) 211 if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(i915_compat_ioctls))
212 fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE]; 212 fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
213 213
214 if (fn != NULL) 214 if (fn != NULL)
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 0b99de95593b..6f8017a7e937 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -80,17 +80,64 @@ static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */
80 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 80 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
81}; 81};
82 82
83/* IIR can theoretically queue up two events. Be paranoid. */
84#define GEN8_IRQ_RESET_NDX(type, which) do { \
85 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
86 POSTING_READ(GEN8_##type##_IMR(which)); \
87 I915_WRITE(GEN8_##type##_IER(which), 0); \
88 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
89 POSTING_READ(GEN8_##type##_IIR(which)); \
90 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
91 POSTING_READ(GEN8_##type##_IIR(which)); \
92} while (0)
93
94#define GEN5_IRQ_RESET(type) do { \
95 I915_WRITE(type##IMR, 0xffffffff); \
96 POSTING_READ(type##IMR); \
97 I915_WRITE(type##IER, 0); \
98 I915_WRITE(type##IIR, 0xffffffff); \
99 POSTING_READ(type##IIR); \
100 I915_WRITE(type##IIR, 0xffffffff); \
101 POSTING_READ(type##IIR); \
102} while (0)
103
104/*
105 * We should clear IMR at preinstall/uninstall, and just check at postinstall.
106 */
107#define GEN5_ASSERT_IIR_IS_ZERO(reg) do { \
108 u32 val = I915_READ(reg); \
109 if (val) { \
110 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", \
111 (reg), val); \
112 I915_WRITE((reg), 0xffffffff); \
113 POSTING_READ(reg); \
114 I915_WRITE((reg), 0xffffffff); \
115 POSTING_READ(reg); \
116 } \
117} while (0)
118
119#define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
120 GEN5_ASSERT_IIR_IS_ZERO(GEN8_##type##_IIR(which)); \
121 I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
122 I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
123 POSTING_READ(GEN8_##type##_IER(which)); \
124} while (0)
125
126#define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
127 GEN5_ASSERT_IIR_IS_ZERO(type##IIR); \
128 I915_WRITE(type##IMR, (imr_val)); \
129 I915_WRITE(type##IER, (ier_val)); \
130 POSTING_READ(type##IER); \
131} while (0)
132
83/* For display hotplug interrupt */ 133/* For display hotplug interrupt */
84static void 134static void
85ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask) 135ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
86{ 136{
87 assert_spin_locked(&dev_priv->irq_lock); 137 assert_spin_locked(&dev_priv->irq_lock);
88 138
89 if (dev_priv->pm.irqs_disabled) { 139 if (WARN_ON(dev_priv->pm.irqs_disabled))
90 WARN(1, "IRQs disabled\n");
91 dev_priv->pm.regsave.deimr &= ~mask;
92 return; 140 return;
93 }
94 141
95 if ((dev_priv->irq_mask & mask) != 0) { 142 if ((dev_priv->irq_mask & mask) != 0) {
96 dev_priv->irq_mask &= ~mask; 143 dev_priv->irq_mask &= ~mask;
@@ -104,11 +151,8 @@ ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
104{ 151{
105 assert_spin_locked(&dev_priv->irq_lock); 152 assert_spin_locked(&dev_priv->irq_lock);
106 153
107 if (dev_priv->pm.irqs_disabled) { 154 if (WARN_ON(dev_priv->pm.irqs_disabled))
108 WARN(1, "IRQs disabled\n");
109 dev_priv->pm.regsave.deimr |= mask;
110 return; 155 return;
111 }
112 156
113 if ((dev_priv->irq_mask & mask) != mask) { 157 if ((dev_priv->irq_mask & mask) != mask) {
114 dev_priv->irq_mask |= mask; 158 dev_priv->irq_mask |= mask;
@@ -129,13 +173,8 @@ static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
129{ 173{
130 assert_spin_locked(&dev_priv->irq_lock); 174 assert_spin_locked(&dev_priv->irq_lock);
131 175
132 if (dev_priv->pm.irqs_disabled) { 176 if (WARN_ON(dev_priv->pm.irqs_disabled))
133 WARN(1, "IRQs disabled\n");
134 dev_priv->pm.regsave.gtimr &= ~interrupt_mask;
135 dev_priv->pm.regsave.gtimr |= (~enabled_irq_mask &
136 interrupt_mask);
137 return; 177 return;
138 }
139 178
140 dev_priv->gt_irq_mask &= ~interrupt_mask; 179 dev_priv->gt_irq_mask &= ~interrupt_mask;
141 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask); 180 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
@@ -167,13 +206,8 @@ static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
167 206
168 assert_spin_locked(&dev_priv->irq_lock); 207 assert_spin_locked(&dev_priv->irq_lock);
169 208
170 if (dev_priv->pm.irqs_disabled) { 209 if (WARN_ON(dev_priv->pm.irqs_disabled))
171 WARN(1, "IRQs disabled\n");
172 dev_priv->pm.regsave.gen6_pmimr &= ~interrupt_mask;
173 dev_priv->pm.regsave.gen6_pmimr |= (~enabled_irq_mask &
174 interrupt_mask);
175 return; 210 return;
176 }
177 211
178 new_val = dev_priv->pm_irq_mask; 212 new_val = dev_priv->pm_irq_mask;
179 new_val &= ~interrupt_mask; 213 new_val &= ~interrupt_mask;
@@ -214,6 +248,46 @@ static bool ivb_can_enable_err_int(struct drm_device *dev)
214 return true; 248 return true;
215} 249}
216 250
251/**
252 * bdw_update_pm_irq - update GT interrupt 2
253 * @dev_priv: driver private
254 * @interrupt_mask: mask of interrupt bits to update
255 * @enabled_irq_mask: mask of interrupt bits to enable
256 *
257 * Copied from the snb function, updated with relevant register offsets
258 */
259static void bdw_update_pm_irq(struct drm_i915_private *dev_priv,
260 uint32_t interrupt_mask,
261 uint32_t enabled_irq_mask)
262{
263 uint32_t new_val;
264
265 assert_spin_locked(&dev_priv->irq_lock);
266
267 if (WARN_ON(dev_priv->pm.irqs_disabled))
268 return;
269
270 new_val = dev_priv->pm_irq_mask;
271 new_val &= ~interrupt_mask;
272 new_val |= (~enabled_irq_mask & interrupt_mask);
273
274 if (new_val != dev_priv->pm_irq_mask) {
275 dev_priv->pm_irq_mask = new_val;
276 I915_WRITE(GEN8_GT_IMR(2), dev_priv->pm_irq_mask);
277 POSTING_READ(GEN8_GT_IMR(2));
278 }
279}
280
281void bdw_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
282{
283 bdw_update_pm_irq(dev_priv, mask, mask);
284}
285
286void bdw_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
287{
288 bdw_update_pm_irq(dev_priv, mask, 0);
289}
290
217static bool cpt_can_enable_serr_int(struct drm_device *dev) 291static bool cpt_can_enable_serr_int(struct drm_device *dev)
218{ 292{
219 struct drm_i915_private *dev_priv = dev->dev_private; 293 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -232,16 +306,51 @@ static bool cpt_can_enable_serr_int(struct drm_device *dev)
232 return true; 306 return true;
233} 307}
234 308
235static void i9xx_clear_fifo_underrun(struct drm_device *dev, enum pipe pipe) 309void i9xx_check_fifo_underruns(struct drm_device *dev)
310{
311 struct drm_i915_private *dev_priv = dev->dev_private;
312 struct intel_crtc *crtc;
313 unsigned long flags;
314
315 spin_lock_irqsave(&dev_priv->irq_lock, flags);
316
317 for_each_intel_crtc(dev, crtc) {
318 u32 reg = PIPESTAT(crtc->pipe);
319 u32 pipestat;
320
321 if (crtc->cpu_fifo_underrun_disabled)
322 continue;
323
324 pipestat = I915_READ(reg) & 0xffff0000;
325 if ((pipestat & PIPE_FIFO_UNDERRUN_STATUS) == 0)
326 continue;
327
328 I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS);
329 POSTING_READ(reg);
330
331 DRM_ERROR("pipe %c underrun\n", pipe_name(crtc->pipe));
332 }
333
334 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
335}
336
337static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev,
338 enum pipe pipe,
339 bool enable, bool old)
236{ 340{
237 struct drm_i915_private *dev_priv = dev->dev_private; 341 struct drm_i915_private *dev_priv = dev->dev_private;
238 u32 reg = PIPESTAT(pipe); 342 u32 reg = PIPESTAT(pipe);
239 u32 pipestat = I915_READ(reg) & 0x7fff0000; 343 u32 pipestat = I915_READ(reg) & 0xffff0000;
240 344
241 assert_spin_locked(&dev_priv->irq_lock); 345 assert_spin_locked(&dev_priv->irq_lock);
242 346
243 I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS); 347 if (enable) {
244 POSTING_READ(reg); 348 I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS);
349 POSTING_READ(reg);
350 } else {
351 if (old && pipestat & PIPE_FIFO_UNDERRUN_STATUS)
352 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
353 }
245} 354}
246 355
247static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev, 356static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
@@ -258,7 +367,8 @@ static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
258} 367}
259 368
260static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev, 369static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
261 enum pipe pipe, bool enable) 370 enum pipe pipe,
371 bool enable, bool old)
262{ 372{
263 struct drm_i915_private *dev_priv = dev->dev_private; 373 struct drm_i915_private *dev_priv = dev->dev_private;
264 if (enable) { 374 if (enable) {
@@ -269,15 +379,12 @@ static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
269 379
270 ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB); 380 ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
271 } else { 381 } else {
272 bool was_enabled = !(I915_READ(DEIMR) & DE_ERR_INT_IVB);
273
274 /* Change the state _after_ we've read out the current one. */
275 ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB); 382 ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
276 383
277 if (!was_enabled && 384 if (old &&
278 (I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe))) { 385 I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe)) {
279 DRM_DEBUG_KMS("uncleared fifo underrun on pipe %c\n", 386 DRM_ERROR("uncleared fifo underrun on pipe %c\n",
280 pipe_name(pipe)); 387 pipe_name(pipe));
281 } 388 }
282 } 389 }
283} 390}
@@ -313,14 +420,8 @@ static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
313 420
314 assert_spin_locked(&dev_priv->irq_lock); 421 assert_spin_locked(&dev_priv->irq_lock);
315 422
316 if (dev_priv->pm.irqs_disabled && 423 if (WARN_ON(dev_priv->pm.irqs_disabled))
317 (interrupt_mask & SDE_HOTPLUG_MASK_CPT)) {
318 WARN(1, "IRQs disabled\n");
319 dev_priv->pm.regsave.sdeimr &= ~interrupt_mask;
320 dev_priv->pm.regsave.sdeimr |= (~enabled_irq_mask &
321 interrupt_mask);
322 return; 424 return;
323 }
324 425
325 I915_WRITE(SDEIMR, sdeimr); 426 I915_WRITE(SDEIMR, sdeimr);
326 POSTING_READ(SDEIMR); 427 POSTING_READ(SDEIMR);
@@ -346,7 +447,7 @@ static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
346 447
347static void cpt_set_fifo_underrun_reporting(struct drm_device *dev, 448static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
348 enum transcoder pch_transcoder, 449 enum transcoder pch_transcoder,
349 bool enable) 450 bool enable, bool old)
350{ 451{
351 struct drm_i915_private *dev_priv = dev->dev_private; 452 struct drm_i915_private *dev_priv = dev->dev_private;
352 453
@@ -359,16 +460,12 @@ static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
359 460
360 ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT); 461 ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT);
361 } else { 462 } else {
362 uint32_t tmp = I915_READ(SERR_INT);
363 bool was_enabled = !(I915_READ(SDEIMR) & SDE_ERROR_CPT);
364
365 /* Change the state _after_ we've read out the current one. */
366 ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT); 463 ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT);
367 464
368 if (!was_enabled && 465 if (old && I915_READ(SERR_INT) &
369 (tmp & SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder))) { 466 SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) {
370 DRM_DEBUG_KMS("uncleared pch fifo underrun on pch transcoder %c\n", 467 DRM_ERROR("uncleared pch fifo underrun on pch transcoder %c\n",
371 transcoder_name(pch_transcoder)); 468 transcoder_name(pch_transcoder));
372 } 469 }
373 } 470 }
374} 471}
@@ -387,34 +484,29 @@ static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
387 * 484 *
388 * Returns the previous state of underrun reporting. 485 * Returns the previous state of underrun reporting.
389 */ 486 */
390bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev, 487static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
391 enum pipe pipe, bool enable) 488 enum pipe pipe, bool enable)
392{ 489{
393 struct drm_i915_private *dev_priv = dev->dev_private; 490 struct drm_i915_private *dev_priv = dev->dev_private;
394 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 491 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
395 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 492 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
396 bool ret; 493 bool old;
397 494
398 assert_spin_locked(&dev_priv->irq_lock); 495 assert_spin_locked(&dev_priv->irq_lock);
399 496
400 ret = !intel_crtc->cpu_fifo_underrun_disabled; 497 old = !intel_crtc->cpu_fifo_underrun_disabled;
401
402 if (enable == ret)
403 goto done;
404
405 intel_crtc->cpu_fifo_underrun_disabled = !enable; 498 intel_crtc->cpu_fifo_underrun_disabled = !enable;
406 499
407 if (enable && (INTEL_INFO(dev)->gen < 5 || IS_VALLEYVIEW(dev))) 500 if (INTEL_INFO(dev)->gen < 5 || IS_VALLEYVIEW(dev))
408 i9xx_clear_fifo_underrun(dev, pipe); 501 i9xx_set_fifo_underrun_reporting(dev, pipe, enable, old);
409 else if (IS_GEN5(dev) || IS_GEN6(dev)) 502 else if (IS_GEN5(dev) || IS_GEN6(dev))
410 ironlake_set_fifo_underrun_reporting(dev, pipe, enable); 503 ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
411 else if (IS_GEN7(dev)) 504 else if (IS_GEN7(dev))
412 ivybridge_set_fifo_underrun_reporting(dev, pipe, enable); 505 ivybridge_set_fifo_underrun_reporting(dev, pipe, enable, old);
413 else if (IS_GEN8(dev)) 506 else if (IS_GEN8(dev))
414 broadwell_set_fifo_underrun_reporting(dev, pipe, enable); 507 broadwell_set_fifo_underrun_reporting(dev, pipe, enable);
415 508
416done: 509 return old;
417 return ret;
418} 510}
419 511
420bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev, 512bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
@@ -463,7 +555,7 @@ bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
463 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder]; 555 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder];
464 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 556 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
465 unsigned long flags; 557 unsigned long flags;
466 bool ret; 558 bool old;
467 559
468 /* 560 /*
469 * NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT 561 * NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT
@@ -476,21 +568,16 @@ bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
476 568
477 spin_lock_irqsave(&dev_priv->irq_lock, flags); 569 spin_lock_irqsave(&dev_priv->irq_lock, flags);
478 570
479 ret = !intel_crtc->pch_fifo_underrun_disabled; 571 old = !intel_crtc->pch_fifo_underrun_disabled;
480
481 if (enable == ret)
482 goto done;
483
484 intel_crtc->pch_fifo_underrun_disabled = !enable; 572 intel_crtc->pch_fifo_underrun_disabled = !enable;
485 573
486 if (HAS_PCH_IBX(dev)) 574 if (HAS_PCH_IBX(dev))
487 ibx_set_fifo_underrun_reporting(dev, pch_transcoder, enable); 575 ibx_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
488 else 576 else
489 cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable); 577 cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable, old);
490 578
491done:
492 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 579 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
493 return ret; 580 return old;
494} 581}
495 582
496 583
@@ -503,8 +590,10 @@ __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
503 590
504 assert_spin_locked(&dev_priv->irq_lock); 591 assert_spin_locked(&dev_priv->irq_lock);
505 592
506 if (WARN_ON_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || 593 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
507 status_mask & ~PIPESTAT_INT_STATUS_MASK)) 594 status_mask & ~PIPESTAT_INT_STATUS_MASK,
595 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
596 pipe_name(pipe), enable_mask, status_mask))
508 return; 597 return;
509 598
510 if ((pipestat & enable_mask) == enable_mask) 599 if ((pipestat & enable_mask) == enable_mask)
@@ -527,8 +616,10 @@ __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
527 616
528 assert_spin_locked(&dev_priv->irq_lock); 617 assert_spin_locked(&dev_priv->irq_lock);
529 618
530 if (WARN_ON_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || 619 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
531 status_mask & ~PIPESTAT_INT_STATUS_MASK)) 620 status_mask & ~PIPESTAT_INT_STATUS_MASK,
621 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
622 pipe_name(pipe), enable_mask, status_mask))
532 return; 623 return;
533 624
534 if ((pipestat & enable_mask) == 0) 625 if ((pipestat & enable_mask) == 0)
@@ -546,11 +637,17 @@ static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask)
546 u32 enable_mask = status_mask << 16; 637 u32 enable_mask = status_mask << 16;
547 638
548 /* 639 /*
549 * On pipe A we don't support the PSR interrupt yet, on pipe B the 640 * On pipe A we don't support the PSR interrupt yet,
550 * same bit MBZ. 641 * on pipe B and C the same bit MBZ.
551 */ 642 */
552 if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV)) 643 if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
553 return 0; 644 return 0;
645 /*
646 * On pipe B and C we don't support the PSR interrupt yet, on pipe
647 * A the same bit is for perf counters which we don't use either.
648 */
649 if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
650 return 0;
554 651
555 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS | 652 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
556 SPRITE0_FLIP_DONE_INT_EN_VLV | 653 SPRITE0_FLIP_DONE_INT_EN_VLV |
@@ -637,6 +734,56 @@ i915_pipe_enabled(struct drm_device *dev, int pipe)
637 } 734 }
638} 735}
639 736
737/*
738 * This timing diagram depicts the video signal in and
739 * around the vertical blanking period.
740 *
741 * Assumptions about the fictitious mode used in this example:
742 * vblank_start >= 3
743 * vsync_start = vblank_start + 1
744 * vsync_end = vblank_start + 2
745 * vtotal = vblank_start + 3
746 *
747 * start of vblank:
748 * latch double buffered registers
749 * increment frame counter (ctg+)
750 * generate start of vblank interrupt (gen4+)
751 * |
752 * | frame start:
753 * | generate frame start interrupt (aka. vblank interrupt) (gmch)
754 * | may be shifted forward 1-3 extra lines via PIPECONF
755 * | |
756 * | | start of vsync:
757 * | | generate vsync interrupt
758 * | | |
759 * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx
760 * . \hs/ . \hs/ \hs/ \hs/ . \hs/
761 * ----va---> <-----------------vb--------------------> <--------va-------------
762 * | | <----vs-----> |
763 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
764 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
765 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
766 * | | |
767 * last visible pixel first visible pixel
768 * | increment frame counter (gen3/4)
769 * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4)
770 *
771 * x = horizontal active
772 * _ = horizontal blanking
773 * hs = horizontal sync
774 * va = vertical active
775 * vb = vertical blanking
776 * vs = vertical sync
777 * vbs = vblank_start (number)
778 *
779 * Summary:
780 * - most events happen at the start of horizontal sync
781 * - frame start happens at the start of horizontal blank, 1-4 lines
782 * (depending on PIPECONF settings) after the start of vblank
783 * - gen3/4 pixel and frame counter are synchronized with the start
784 * of horizontal active on the first line of vertical active
785 */
786
640static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe) 787static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe)
641{ 788{
642 /* Gen2 doesn't have a hardware frame counter */ 789 /* Gen2 doesn't have a hardware frame counter */
@@ -651,7 +798,7 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
651 struct drm_i915_private *dev_priv = dev->dev_private; 798 struct drm_i915_private *dev_priv = dev->dev_private;
652 unsigned long high_frame; 799 unsigned long high_frame;
653 unsigned long low_frame; 800 unsigned long low_frame;
654 u32 high1, high2, low, pixel, vbl_start; 801 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
655 802
656 if (!i915_pipe_enabled(dev, pipe)) { 803 if (!i915_pipe_enabled(dev, pipe)) {
657 DRM_DEBUG_DRIVER("trying to get vblank count for disabled " 804 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
@@ -665,17 +812,28 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
665 const struct drm_display_mode *mode = 812 const struct drm_display_mode *mode =
666 &intel_crtc->config.adjusted_mode; 813 &intel_crtc->config.adjusted_mode;
667 814
668 vbl_start = mode->crtc_vblank_start * mode->crtc_htotal; 815 htotal = mode->crtc_htotal;
816 hsync_start = mode->crtc_hsync_start;
817 vbl_start = mode->crtc_vblank_start;
818 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
819 vbl_start = DIV_ROUND_UP(vbl_start, 2);
669 } else { 820 } else {
670 enum transcoder cpu_transcoder = (enum transcoder) pipe; 821 enum transcoder cpu_transcoder = (enum transcoder) pipe;
671 u32 htotal;
672 822
673 htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1; 823 htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1;
824 hsync_start = (I915_READ(HSYNC(cpu_transcoder)) & 0x1fff) + 1;
674 vbl_start = (I915_READ(VBLANK(cpu_transcoder)) & 0x1fff) + 1; 825 vbl_start = (I915_READ(VBLANK(cpu_transcoder)) & 0x1fff) + 1;
675 826 if ((I915_READ(PIPECONF(cpu_transcoder)) &
676 vbl_start *= htotal; 827 PIPECONF_INTERLACE_MASK) != PIPECONF_PROGRESSIVE)
828 vbl_start = DIV_ROUND_UP(vbl_start, 2);
677 } 829 }
678 830
831 /* Convert to pixel count */
832 vbl_start *= htotal;
833
834 /* Start of vblank event occurs at start of hsync */
835 vbl_start -= htotal - hsync_start;
836
679 high_frame = PIPEFRAME(pipe); 837 high_frame = PIPEFRAME(pipe);
680 low_frame = PIPEFRAMEPIXEL(pipe); 838 low_frame = PIPEFRAMEPIXEL(pipe);
681 839
@@ -719,24 +877,28 @@ static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
719/* raw reads, only for fast reads of display block, no need for forcewake etc. */ 877/* raw reads, only for fast reads of display block, no need for forcewake etc. */
720#define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__)) 878#define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
721 879
722static bool ilk_pipe_in_vblank_locked(struct drm_device *dev, enum pipe pipe) 880static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
723{ 881{
882 struct drm_device *dev = crtc->base.dev;
724 struct drm_i915_private *dev_priv = dev->dev_private; 883 struct drm_i915_private *dev_priv = dev->dev_private;
725 uint32_t status; 884 const struct drm_display_mode *mode = &crtc->config.adjusted_mode;
726 int reg; 885 enum pipe pipe = crtc->pipe;
886 int position, vtotal;
727 887
728 if (INTEL_INFO(dev)->gen >= 8) { 888 vtotal = mode->crtc_vtotal;
729 status = GEN8_PIPE_VBLANK; 889 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
730 reg = GEN8_DE_PIPE_ISR(pipe); 890 vtotal /= 2;
731 } else if (INTEL_INFO(dev)->gen >= 7) { 891
732 status = DE_PIPE_VBLANK_IVB(pipe); 892 if (IS_GEN2(dev))
733 reg = DEISR; 893 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
734 } else { 894 else
735 status = DE_PIPE_VBLANK(pipe); 895 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
736 reg = DEISR;
737 }
738 896
739 return __raw_i915_read32(dev_priv, reg) & status; 897 /*
898 * See update_scanline_offset() for the details on the
899 * scanline_offset adjustment.
900 */
901 return (position + crtc->scanline_offset) % vtotal;
740} 902}
741 903
742static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, 904static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
@@ -748,7 +910,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
748 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 910 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
749 const struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode; 911 const struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
750 int position; 912 int position;
751 int vbl_start, vbl_end, htotal, vtotal; 913 int vbl_start, vbl_end, hsync_start, htotal, vtotal;
752 bool in_vbl = true; 914 bool in_vbl = true;
753 int ret = 0; 915 int ret = 0;
754 unsigned long irqflags; 916 unsigned long irqflags;
@@ -760,6 +922,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
760 } 922 }
761 923
762 htotal = mode->crtc_htotal; 924 htotal = mode->crtc_htotal;
925 hsync_start = mode->crtc_hsync_start;
763 vtotal = mode->crtc_vtotal; 926 vtotal = mode->crtc_vtotal;
764 vbl_start = mode->crtc_vblank_start; 927 vbl_start = mode->crtc_vblank_start;
765 vbl_end = mode->crtc_vblank_end; 928 vbl_end = mode->crtc_vblank_end;
@@ -778,7 +941,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
778 * following code must not block on uncore.lock. 941 * following code must not block on uncore.lock.
779 */ 942 */
780 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 943 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
781 944
782 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ 945 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
783 946
784 /* Get optional system timestamp before query. */ 947 /* Get optional system timestamp before query. */
@@ -789,68 +952,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
789 /* No obvious pixelcount register. Only query vertical 952 /* No obvious pixelcount register. Only query vertical
790 * scanout position from Display scan line register. 953 * scanout position from Display scan line register.
791 */ 954 */
792 if (IS_GEN2(dev)) 955 position = __intel_get_crtc_scanline(intel_crtc);
793 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
794 else
795 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
796
797 if (HAS_DDI(dev)) {
798 /*
799 * On HSW HDMI outputs there seems to be a 2 line
800 * difference, whereas eDP has the normal 1 line
801 * difference that earlier platforms have. External
802 * DP is unknown. For now just check for the 2 line
803 * difference case on all output types on HSW+.
804 *
805 * This might misinterpret the scanline counter being
806 * one line too far along on eDP, but that's less
807 * dangerous than the alternative since that would lead
808 * the vblank timestamp code astray when it sees a
809 * scanline count before vblank_start during a vblank
810 * interrupt.
811 */
812 in_vbl = ilk_pipe_in_vblank_locked(dev, pipe);
813 if ((in_vbl && (position == vbl_start - 2 ||
814 position == vbl_start - 1)) ||
815 (!in_vbl && (position == vbl_end - 2 ||
816 position == vbl_end - 1)))
817 position = (position + 2) % vtotal;
818 } else if (HAS_PCH_SPLIT(dev)) {
819 /*
820 * The scanline counter increments at the leading edge
821 * of hsync, ie. it completely misses the active portion
822 * of the line. Fix up the counter at both edges of vblank
823 * to get a more accurate picture whether we're in vblank
824 * or not.
825 */
826 in_vbl = ilk_pipe_in_vblank_locked(dev, pipe);
827 if ((in_vbl && position == vbl_start - 1) ||
828 (!in_vbl && position == vbl_end - 1))
829 position = (position + 1) % vtotal;
830 } else {
831 /*
832 * ISR vblank status bits don't work the way we'd want
833 * them to work on non-PCH platforms (for
834 * ilk_pipe_in_vblank_locked()), and there doesn't
835 * appear any other way to determine if we're currently
836 * in vblank.
837 *
838 * Instead let's assume that we're already in vblank if
839 * we got called from the vblank interrupt and the
840 * scanline counter value indicates that we're on the
841 * line just prior to vblank start. This should result
842 * in the correct answer, unless the vblank interrupt
843 * delivery really got delayed for almost exactly one
844 * full frame/field.
845 */
846 if (flags & DRM_CALLED_FROM_VBLIRQ &&
847 position == vbl_start - 1) {
848 position = (position + 1) % vtotal;
849
850 /* Signal this correction as "applied". */
851 ret |= 0x8;
852 }
853 }
854 } else { 956 } else {
855 /* Have access to pixelcount since start of frame. 957 /* Have access to pixelcount since start of frame.
856 * We can split this into vertical and horizontal 958 * We can split this into vertical and horizontal
@@ -862,6 +964,29 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
862 vbl_start *= htotal; 964 vbl_start *= htotal;
863 vbl_end *= htotal; 965 vbl_end *= htotal;
864 vtotal *= htotal; 966 vtotal *= htotal;
967
968 /*
969 * In interlaced modes, the pixel counter counts all pixels,
970 * so one field will have htotal more pixels. In order to avoid
971 * the reported position from jumping backwards when the pixel
972 * counter is beyond the length of the shorter field, just
973 * clamp the position the length of the shorter field. This
974 * matches how the scanline counter based position works since
975 * the scanline counter doesn't count the two half lines.
976 */
977 if (position >= vtotal)
978 position = vtotal - 1;
979
980 /*
981 * Start of vblank interrupt is triggered at start of hsync,
982 * just prior to the first active line of vblank. However we
983 * consider lines to start at the leading edge of horizontal
984 * active. So, should we get here before we've crossed into
985 * the horizontal active of the first line in vblank, we would
986 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
987 * always add htotal-hsync_start to the current pixel position.
988 */
989 position = (position + htotal - hsync_start) % vtotal;
865 } 990 }
866 991
867 /* Get optional system timestamp after query. */ 992 /* Get optional system timestamp after query. */
@@ -900,6 +1025,19 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
900 return ret; 1025 return ret;
901} 1026}
902 1027
1028int intel_get_crtc_scanline(struct intel_crtc *crtc)
1029{
1030 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
1031 unsigned long irqflags;
1032 int position;
1033
1034 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
1035 position = __intel_get_crtc_scanline(crtc);
1036 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
1037
1038 return position;
1039}
1040
903static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe, 1041static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
904 int *max_error, 1042 int *max_error,
905 struct timeval *vblank_time, 1043 struct timeval *vblank_time,
@@ -945,7 +1083,7 @@ static bool intel_hpd_irq_event(struct drm_device *dev,
945 1083
946 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n", 1084 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
947 connector->base.id, 1085 connector->base.id,
948 drm_get_connector_name(connector), 1086 connector->name,
949 drm_get_connector_status_name(old_status), 1087 drm_get_connector_status_name(old_status),
950 drm_get_connector_status_name(connector->status)); 1088 drm_get_connector_status_name(connector->status));
951 1089
@@ -990,7 +1128,7 @@ static void i915_hotplug_work_func(struct work_struct *work)
990 connector->polled == DRM_CONNECTOR_POLL_HPD) { 1128 connector->polled == DRM_CONNECTOR_POLL_HPD) {
991 DRM_INFO("HPD interrupt storm detected on connector %s: " 1129 DRM_INFO("HPD interrupt storm detected on connector %s: "
992 "switching from hotplug detection to polling\n", 1130 "switching from hotplug detection to polling\n",
993 drm_get_connector_name(connector)); 1131 connector->name);
994 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED; 1132 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED;
995 connector->polled = DRM_CONNECTOR_POLL_CONNECT 1133 connector->polled = DRM_CONNECTOR_POLL_CONNECT
996 | DRM_CONNECTOR_POLL_DISCONNECT; 1134 | DRM_CONNECTOR_POLL_DISCONNECT;
@@ -998,7 +1136,7 @@ static void i915_hotplug_work_func(struct work_struct *work)
998 } 1136 }
999 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) { 1137 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
1000 DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n", 1138 DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
1001 drm_get_connector_name(connector), intel_encoder->hpd_pin); 1139 connector->name, intel_encoder->hpd_pin);
1002 } 1140 }
1003 } 1141 }
1004 /* if there were no outputs to poll, poll was disabled, 1142 /* if there were no outputs to poll, poll was disabled,
@@ -1073,9 +1211,9 @@ static void ironlake_rps_change_irq_handler(struct drm_device *dev)
1073} 1211}
1074 1212
1075static void notify_ring(struct drm_device *dev, 1213static void notify_ring(struct drm_device *dev,
1076 struct intel_ring_buffer *ring) 1214 struct intel_engine_cs *ring)
1077{ 1215{
1078 if (ring->obj == NULL) 1216 if (!intel_ring_initialized(ring))
1079 return; 1217 return;
1080 1218
1081 trace_i915_gem_request_complete(ring); 1219 trace_i915_gem_request_complete(ring);
@@ -1094,8 +1232,12 @@ static void gen6_pm_rps_work(struct work_struct *work)
1094 spin_lock_irq(&dev_priv->irq_lock); 1232 spin_lock_irq(&dev_priv->irq_lock);
1095 pm_iir = dev_priv->rps.pm_iir; 1233 pm_iir = dev_priv->rps.pm_iir;
1096 dev_priv->rps.pm_iir = 0; 1234 dev_priv->rps.pm_iir = 0;
1097 /* Make sure not to corrupt PMIMR state used by ringbuffer code */ 1235 if (IS_BROADWELL(dev_priv->dev))
1098 snb_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); 1236 bdw_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
1237 else {
1238 /* Make sure not to corrupt PMIMR state used by ringbuffer */
1239 snb_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
1240 }
1099 spin_unlock_irq(&dev_priv->irq_lock); 1241 spin_unlock_irq(&dev_priv->irq_lock);
1100 1242
1101 /* Make sure we didn't queue anything we're not going to process. */ 1243 /* Make sure we didn't queue anything we're not going to process. */
@@ -1292,6 +1434,19 @@ static void snb_gt_irq_handler(struct drm_device *dev,
1292 ivybridge_parity_error_irq_handler(dev, gt_iir); 1434 ivybridge_parity_error_irq_handler(dev, gt_iir);
1293} 1435}
1294 1436
1437static void gen8_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1438{
1439 if ((pm_iir & dev_priv->pm_rps_events) == 0)
1440 return;
1441
1442 spin_lock(&dev_priv->irq_lock);
1443 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
1444 bdw_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
1445 spin_unlock(&dev_priv->irq_lock);
1446
1447 queue_work(dev_priv->wq, &dev_priv->rps.work);
1448}
1449
1295static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev, 1450static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
1296 struct drm_i915_private *dev_priv, 1451 struct drm_i915_private *dev_priv,
1297 u32 master_ctl) 1452 u32 master_ctl)
@@ -1315,18 +1470,32 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
1315 DRM_ERROR("The master control interrupt lied (GT0)!\n"); 1470 DRM_ERROR("The master control interrupt lied (GT0)!\n");
1316 } 1471 }
1317 1472
1318 if (master_ctl & GEN8_GT_VCS1_IRQ) { 1473 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
1319 tmp = I915_READ(GEN8_GT_IIR(1)); 1474 tmp = I915_READ(GEN8_GT_IIR(1));
1320 if (tmp) { 1475 if (tmp) {
1321 ret = IRQ_HANDLED; 1476 ret = IRQ_HANDLED;
1322 vcs = tmp >> GEN8_VCS1_IRQ_SHIFT; 1477 vcs = tmp >> GEN8_VCS1_IRQ_SHIFT;
1323 if (vcs & GT_RENDER_USER_INTERRUPT) 1478 if (vcs & GT_RENDER_USER_INTERRUPT)
1324 notify_ring(dev, &dev_priv->ring[VCS]); 1479 notify_ring(dev, &dev_priv->ring[VCS]);
1480 vcs = tmp >> GEN8_VCS2_IRQ_SHIFT;
1481 if (vcs & GT_RENDER_USER_INTERRUPT)
1482 notify_ring(dev, &dev_priv->ring[VCS2]);
1325 I915_WRITE(GEN8_GT_IIR(1), tmp); 1483 I915_WRITE(GEN8_GT_IIR(1), tmp);
1326 } else 1484 } else
1327 DRM_ERROR("The master control interrupt lied (GT1)!\n"); 1485 DRM_ERROR("The master control interrupt lied (GT1)!\n");
1328 } 1486 }
1329 1487
1488 if (master_ctl & GEN8_GT_PM_IRQ) {
1489 tmp = I915_READ(GEN8_GT_IIR(2));
1490 if (tmp & dev_priv->pm_rps_events) {
1491 ret = IRQ_HANDLED;
1492 gen8_rps_irq_handler(dev_priv, tmp);
1493 I915_WRITE(GEN8_GT_IIR(2),
1494 tmp & dev_priv->pm_rps_events);
1495 } else
1496 DRM_ERROR("The master control interrupt lied (PM)!\n");
1497 }
1498
1330 if (master_ctl & GEN8_GT_VECS_IRQ) { 1499 if (master_ctl & GEN8_GT_VECS_IRQ) {
1331 tmp = I915_READ(GEN8_GT_IIR(3)); 1500 tmp = I915_READ(GEN8_GT_IIR(3));
1332 if (tmp) { 1501 if (tmp) {
@@ -1549,6 +1718,19 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1549 } 1718 }
1550} 1719}
1551 1720
1721static bool intel_pipe_handle_vblank(struct drm_device *dev, enum pipe pipe)
1722{
1723 struct intel_crtc *crtc;
1724
1725 if (!drm_handle_vblank(dev, pipe))
1726 return false;
1727
1728 crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe));
1729 wake_up(&crtc->vbl_wait);
1730
1731 return true;
1732}
1733
1552static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir) 1734static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
1553{ 1735{
1554 struct drm_i915_private *dev_priv = dev->dev_private; 1736 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1578,6 +1760,9 @@ static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
1578 case PIPE_B: 1760 case PIPE_B:
1579 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 1761 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1580 break; 1762 break;
1763 case PIPE_C:
1764 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
1765 break;
1581 } 1766 }
1582 if (iir & iir_bit) 1767 if (iir & iir_bit)
1583 mask |= dev_priv->pipestat_irq_mask[pipe]; 1768 mask |= dev_priv->pipestat_irq_mask[pipe];
@@ -1600,7 +1785,7 @@ static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
1600 1785
1601 for_each_pipe(pipe) { 1786 for_each_pipe(pipe) {
1602 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) 1787 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
1603 drm_handle_vblank(dev, pipe); 1788 intel_pipe_handle_vblank(dev, pipe);
1604 1789
1605 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) { 1790 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) {
1606 intel_prepare_page_flip(dev, pipe); 1791 intel_prepare_page_flip(dev, pipe);
@@ -1619,9 +1804,36 @@ static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
1619 gmbus_irq_handler(dev); 1804 gmbus_irq_handler(dev);
1620} 1805}
1621 1806
1807static void i9xx_hpd_irq_handler(struct drm_device *dev)
1808{
1809 struct drm_i915_private *dev_priv = dev->dev_private;
1810 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
1811
1812 if (IS_G4X(dev)) {
1813 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
1814
1815 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_g4x);
1816 } else {
1817 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1818
1819 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
1820 }
1821
1822 if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) &&
1823 hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1824 dp_aux_irq_handler(dev);
1825
1826 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1827 /*
1828 * Make sure hotplug status is cleared before we clear IIR, or else we
1829 * may miss hotplug events.
1830 */
1831 POSTING_READ(PORT_HOTPLUG_STAT);
1832}
1833
1622static irqreturn_t valleyview_irq_handler(int irq, void *arg) 1834static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1623{ 1835{
1624 struct drm_device *dev = (struct drm_device *) arg; 1836 struct drm_device *dev = arg;
1625 struct drm_i915_private *dev_priv = dev->dev_private; 1837 struct drm_i915_private *dev_priv = dev->dev_private;
1626 u32 iir, gt_iir, pm_iir; 1838 u32 iir, gt_iir, pm_iir;
1627 irqreturn_t ret = IRQ_NONE; 1839 irqreturn_t ret = IRQ_NONE;
@@ -1641,19 +1853,8 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1641 valleyview_pipestat_irq_handler(dev, iir); 1853 valleyview_pipestat_irq_handler(dev, iir);
1642 1854
1643 /* Consume port. Then clear IIR or we'll miss events */ 1855 /* Consume port. Then clear IIR or we'll miss events */
1644 if (iir & I915_DISPLAY_PORT_INTERRUPT) { 1856 if (iir & I915_DISPLAY_PORT_INTERRUPT)
1645 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 1857 i9xx_hpd_irq_handler(dev);
1646 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1647
1648 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
1649
1650 if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1651 dp_aux_irq_handler(dev);
1652
1653 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1654 I915_READ(PORT_HOTPLUG_STAT);
1655 }
1656
1657 1858
1658 if (pm_iir) 1859 if (pm_iir)
1659 gen6_rps_irq_handler(dev_priv, pm_iir); 1860 gen6_rps_irq_handler(dev_priv, pm_iir);
@@ -1667,6 +1868,40 @@ out:
1667 return ret; 1868 return ret;
1668} 1869}
1669 1870
1871static irqreturn_t cherryview_irq_handler(int irq, void *arg)
1872{
1873 struct drm_device *dev = arg;
1874 struct drm_i915_private *dev_priv = dev->dev_private;
1875 u32 master_ctl, iir;
1876 irqreturn_t ret = IRQ_NONE;
1877
1878 for (;;) {
1879 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
1880 iir = I915_READ(VLV_IIR);
1881
1882 if (master_ctl == 0 && iir == 0)
1883 break;
1884
1885 I915_WRITE(GEN8_MASTER_IRQ, 0);
1886
1887 gen8_gt_irq_handler(dev, dev_priv, master_ctl);
1888
1889 valleyview_pipestat_irq_handler(dev, iir);
1890
1891 /* Consume port. Then clear IIR or we'll miss events */
1892 i9xx_hpd_irq_handler(dev);
1893
1894 I915_WRITE(VLV_IIR, iir);
1895
1896 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
1897 POSTING_READ(GEN8_MASTER_IRQ);
1898
1899 ret = IRQ_HANDLED;
1900 }
1901
1902 return ret;
1903}
1904
1670static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) 1905static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
1671{ 1906{
1672 struct drm_i915_private *dev_priv = dev->dev_private; 1907 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1827,7 +2062,7 @@ static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
1827 2062
1828 for_each_pipe(pipe) { 2063 for_each_pipe(pipe) {
1829 if (de_iir & DE_PIPE_VBLANK(pipe)) 2064 if (de_iir & DE_PIPE_VBLANK(pipe))
1830 drm_handle_vblank(dev, pipe); 2065 intel_pipe_handle_vblank(dev, pipe);
1831 2066
1832 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) 2067 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
1833 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, false)) 2068 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
@@ -1877,7 +2112,7 @@ static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
1877 2112
1878 for_each_pipe(pipe) { 2113 for_each_pipe(pipe) {
1879 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe))) 2114 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)))
1880 drm_handle_vblank(dev, pipe); 2115 intel_pipe_handle_vblank(dev, pipe);
1881 2116
1882 /* plane/pipes map 1:1 on ilk+ */ 2117 /* plane/pipes map 1:1 on ilk+ */
1883 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) { 2118 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) {
@@ -1899,7 +2134,7 @@ static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
1899 2134
1900static irqreturn_t ironlake_irq_handler(int irq, void *arg) 2135static irqreturn_t ironlake_irq_handler(int irq, void *arg)
1901{ 2136{
1902 struct drm_device *dev = (struct drm_device *) arg; 2137 struct drm_device *dev = arg;
1903 struct drm_i915_private *dev_priv = dev->dev_private; 2138 struct drm_i915_private *dev_priv = dev->dev_private;
1904 u32 de_iir, gt_iir, de_ier, sde_ier = 0; 2139 u32 de_iir, gt_iir, de_ier, sde_ier = 0;
1905 irqreturn_t ret = IRQ_NONE; 2140 irqreturn_t ret = IRQ_NONE;
@@ -2020,9 +2255,9 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
2020 2255
2021 pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe)); 2256 pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
2022 if (pipe_iir & GEN8_PIPE_VBLANK) 2257 if (pipe_iir & GEN8_PIPE_VBLANK)
2023 drm_handle_vblank(dev, pipe); 2258 intel_pipe_handle_vblank(dev, pipe);
2024 2259
2025 if (pipe_iir & GEN8_PIPE_FLIP_DONE) { 2260 if (pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE) {
2026 intel_prepare_page_flip(dev, pipe); 2261 intel_prepare_page_flip(dev, pipe);
2027 intel_finish_page_flip_plane(dev, pipe); 2262 intel_finish_page_flip_plane(dev, pipe);
2028 } 2263 }
@@ -2075,7 +2310,7 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
2075static void i915_error_wake_up(struct drm_i915_private *dev_priv, 2310static void i915_error_wake_up(struct drm_i915_private *dev_priv,
2076 bool reset_completed) 2311 bool reset_completed)
2077{ 2312{
2078 struct intel_ring_buffer *ring; 2313 struct intel_engine_cs *ring;
2079 int i; 2314 int i;
2080 2315
2081 /* 2316 /*
@@ -2137,6 +2372,14 @@ static void i915_error_work_func(struct work_struct *work)
2137 reset_event); 2372 reset_event);
2138 2373
2139 /* 2374 /*
2375 * In most cases it's guaranteed that we get here with an RPM
2376 * reference held, for example because there is a pending GPU
2377 * request that won't finish until the reset is done. This
2378 * isn't the case at least when we get here by doing a
2379 * simulated reset via debugs, so get an RPM reference.
2380 */
2381 intel_runtime_pm_get(dev_priv);
2382 /*
2140 * All state reset _must_ be completed before we update the 2383 * All state reset _must_ be completed before we update the
2141 * reset counter, for otherwise waiters might miss the reset 2384 * reset counter, for otherwise waiters might miss the reset
2142 * pending state and not properly drop locks, resulting in 2385 * pending state and not properly drop locks, resulting in
@@ -2146,6 +2389,8 @@ static void i915_error_work_func(struct work_struct *work)
2146 2389
2147 intel_display_handle_reset(dev); 2390 intel_display_handle_reset(dev);
2148 2391
2392 intel_runtime_pm_put(dev_priv);
2393
2149 if (ret == 0) { 2394 if (ret == 0) {
2150 /* 2395 /*
2151 * After all the gem state is reset, increment the reset 2396 * After all the gem state is reset, increment the reset
@@ -2383,10 +2628,6 @@ static int i915_enable_vblank(struct drm_device *dev, int pipe)
2383 else 2628 else
2384 i915_enable_pipestat(dev_priv, pipe, 2629 i915_enable_pipestat(dev_priv, pipe,
2385 PIPE_VBLANK_INTERRUPT_STATUS); 2630 PIPE_VBLANK_INTERRUPT_STATUS);
2386
2387 /* maintain vblank delivery even in deep C-states */
2388 if (INTEL_INFO(dev)->gen == 3)
2389 I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS));
2390 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2631 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2391 2632
2392 return 0; 2633 return 0;
@@ -2450,9 +2691,6 @@ static void i915_disable_vblank(struct drm_device *dev, int pipe)
2450 unsigned long irqflags; 2691 unsigned long irqflags;
2451 2692
2452 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2693 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2453 if (INTEL_INFO(dev)->gen == 3)
2454 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS));
2455
2456 i915_disable_pipestat(dev_priv, pipe, 2694 i915_disable_pipestat(dev_priv, pipe,
2457 PIPE_VBLANK_INTERRUPT_STATUS | 2695 PIPE_VBLANK_INTERRUPT_STATUS |
2458 PIPE_START_VBLANK_INTERRUPT_STATUS); 2696 PIPE_START_VBLANK_INTERRUPT_STATUS);
@@ -2498,29 +2736,77 @@ static void gen8_disable_vblank(struct drm_device *dev, int pipe)
2498} 2736}
2499 2737
2500static u32 2738static u32
2501ring_last_seqno(struct intel_ring_buffer *ring) 2739ring_last_seqno(struct intel_engine_cs *ring)
2502{ 2740{
2503 return list_entry(ring->request_list.prev, 2741 return list_entry(ring->request_list.prev,
2504 struct drm_i915_gem_request, list)->seqno; 2742 struct drm_i915_gem_request, list)->seqno;
2505} 2743}
2506 2744
2507static bool 2745static bool
2508ring_idle(struct intel_ring_buffer *ring, u32 seqno) 2746ring_idle(struct intel_engine_cs *ring, u32 seqno)
2509{ 2747{
2510 return (list_empty(&ring->request_list) || 2748 return (list_empty(&ring->request_list) ||
2511 i915_seqno_passed(seqno, ring_last_seqno(ring))); 2749 i915_seqno_passed(seqno, ring_last_seqno(ring)));
2512} 2750}
2513 2751
2514static struct intel_ring_buffer * 2752static bool
2515semaphore_waits_for(struct intel_ring_buffer *ring, u32 *seqno) 2753ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
2754{
2755 if (INTEL_INFO(dev)->gen >= 8) {
2756 /*
2757 * FIXME: gen8 semaphore support - currently we don't emit
2758 * semaphores on bdw anyway, but this needs to be addressed when
2759 * we merge that code.
2760 */
2761 return false;
2762 } else {
2763 ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
2764 return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
2765 MI_SEMAPHORE_REGISTER);
2766 }
2767}
2768
2769static struct intel_engine_cs *
2770semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr)
2771{
2772 struct drm_i915_private *dev_priv = ring->dev->dev_private;
2773 struct intel_engine_cs *signaller;
2774 int i;
2775
2776 if (INTEL_INFO(dev_priv->dev)->gen >= 8) {
2777 /*
2778 * FIXME: gen8 semaphore support - currently we don't emit
2779 * semaphores on bdw anyway, but this needs to be addressed when
2780 * we merge that code.
2781 */
2782 return NULL;
2783 } else {
2784 u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
2785
2786 for_each_ring(signaller, dev_priv, i) {
2787 if(ring == signaller)
2788 continue;
2789
2790 if (sync_bits == signaller->semaphore.mbox.wait[ring->id])
2791 return signaller;
2792 }
2793 }
2794
2795 DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x\n",
2796 ring->id, ipehr);
2797
2798 return NULL;
2799}
2800
2801static struct intel_engine_cs *
2802semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
2516{ 2803{
2517 struct drm_i915_private *dev_priv = ring->dev->dev_private; 2804 struct drm_i915_private *dev_priv = ring->dev->dev_private;
2518 u32 cmd, ipehr, head; 2805 u32 cmd, ipehr, head;
2519 int i; 2806 int i;
2520 2807
2521 ipehr = I915_READ(RING_IPEHR(ring->mmio_base)); 2808 ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
2522 if ((ipehr & ~(0x3 << 16)) != 2809 if (!ipehr_is_semaphore_wait(ring->dev, ipehr))
2523 (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | MI_SEMAPHORE_REGISTER))
2524 return NULL; 2810 return NULL;
2525 2811
2526 /* 2812 /*
@@ -2538,10 +2824,10 @@ semaphore_waits_for(struct intel_ring_buffer *ring, u32 *seqno)
2538 * our ring is smaller than what the hardware (and hence 2824 * our ring is smaller than what the hardware (and hence
2539 * HEAD_ADDR) allows. Also handles wrap-around. 2825 * HEAD_ADDR) allows. Also handles wrap-around.
2540 */ 2826 */
2541 head &= ring->size - 1; 2827 head &= ring->buffer->size - 1;
2542 2828
2543 /* This here seems to blow up */ 2829 /* This here seems to blow up */
2544 cmd = ioread32(ring->virtual_start + head); 2830 cmd = ioread32(ring->buffer->virtual_start + head);
2545 if (cmd == ipehr) 2831 if (cmd == ipehr)
2546 break; 2832 break;
2547 2833
@@ -2551,14 +2837,14 @@ semaphore_waits_for(struct intel_ring_buffer *ring, u32 *seqno)
2551 if (!i) 2837 if (!i)
2552 return NULL; 2838 return NULL;
2553 2839
2554 *seqno = ioread32(ring->virtual_start + head + 4) + 1; 2840 *seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1;
2555 return &dev_priv->ring[(ring->id + (((ipehr >> 17) & 1) + 1)) % 3]; 2841 return semaphore_wait_to_signaller_ring(ring, ipehr);
2556} 2842}
2557 2843
2558static int semaphore_passed(struct intel_ring_buffer *ring) 2844static int semaphore_passed(struct intel_engine_cs *ring)
2559{ 2845{
2560 struct drm_i915_private *dev_priv = ring->dev->dev_private; 2846 struct drm_i915_private *dev_priv = ring->dev->dev_private;
2561 struct intel_ring_buffer *signaller; 2847 struct intel_engine_cs *signaller;
2562 u32 seqno, ctl; 2848 u32 seqno, ctl;
2563 2849
2564 ring->hangcheck.deadlock = true; 2850 ring->hangcheck.deadlock = true;
@@ -2577,7 +2863,7 @@ static int semaphore_passed(struct intel_ring_buffer *ring)
2577 2863
2578static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv) 2864static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
2579{ 2865{
2580 struct intel_ring_buffer *ring; 2866 struct intel_engine_cs *ring;
2581 int i; 2867 int i;
2582 2868
2583 for_each_ring(ring, dev_priv, i) 2869 for_each_ring(ring, dev_priv, i)
@@ -2585,7 +2871,7 @@ static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
2585} 2871}
2586 2872
2587static enum intel_ring_hangcheck_action 2873static enum intel_ring_hangcheck_action
2588ring_stuck(struct intel_ring_buffer *ring, u64 acthd) 2874ring_stuck(struct intel_engine_cs *ring, u64 acthd)
2589{ 2875{
2590 struct drm_device *dev = ring->dev; 2876 struct drm_device *dev = ring->dev;
2591 struct drm_i915_private *dev_priv = dev->dev_private; 2877 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2641,7 +2927,7 @@ static void i915_hangcheck_elapsed(unsigned long data)
2641{ 2927{
2642 struct drm_device *dev = (struct drm_device *)data; 2928 struct drm_device *dev = (struct drm_device *)data;
2643 struct drm_i915_private *dev_priv = dev->dev_private; 2929 struct drm_i915_private *dev_priv = dev->dev_private;
2644 struct intel_ring_buffer *ring; 2930 struct intel_engine_cs *ring;
2645 int i; 2931 int i;
2646 int busy_count = 0, rings_hung = 0; 2932 int busy_count = 0, rings_hung = 0;
2647 bool stuck[I915_NUM_RINGS] = { 0 }; 2933 bool stuck[I915_NUM_RINGS] = { 0 };
@@ -2759,57 +3045,63 @@ void i915_queue_hangcheck(struct drm_device *dev)
2759 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); 3045 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
2760} 3046}
2761 3047
2762static void ibx_irq_preinstall(struct drm_device *dev) 3048static void ibx_irq_reset(struct drm_device *dev)
2763{ 3049{
2764 struct drm_i915_private *dev_priv = dev->dev_private; 3050 struct drm_i915_private *dev_priv = dev->dev_private;
2765 3051
2766 if (HAS_PCH_NOP(dev)) 3052 if (HAS_PCH_NOP(dev))
2767 return; 3053 return;
2768 3054
2769 /* south display irq */ 3055 GEN5_IRQ_RESET(SDE);
2770 I915_WRITE(SDEIMR, 0xffffffff); 3056
2771 /* 3057 if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
2772 * SDEIER is also touched by the interrupt handler to work around missed 3058 I915_WRITE(SERR_INT, 0xffffffff);
2773 * PCH interrupts. Hence we can't update it after the interrupt handler 3059}
2774 * is enabled - instead we unconditionally enable all PCH interrupt 3060
2775 * sources here, but then only unmask them as needed with SDEIMR. 3061/*
2776 */ 3062 * SDEIER is also touched by the interrupt handler to work around missed PCH
3063 * interrupts. Hence we can't update it after the interrupt handler is enabled -
3064 * instead we unconditionally enable all PCH interrupt sources here, but then
3065 * only unmask them as needed with SDEIMR.
3066 *
3067 * This function needs to be called before interrupts are enabled.
3068 */
3069static void ibx_irq_pre_postinstall(struct drm_device *dev)
3070{
3071 struct drm_i915_private *dev_priv = dev->dev_private;
3072
3073 if (HAS_PCH_NOP(dev))
3074 return;
3075
3076 WARN_ON(I915_READ(SDEIER) != 0);
2777 I915_WRITE(SDEIER, 0xffffffff); 3077 I915_WRITE(SDEIER, 0xffffffff);
2778 POSTING_READ(SDEIER); 3078 POSTING_READ(SDEIER);
2779} 3079}
2780 3080
2781static void gen5_gt_irq_preinstall(struct drm_device *dev) 3081static void gen5_gt_irq_reset(struct drm_device *dev)
2782{ 3082{
2783 struct drm_i915_private *dev_priv = dev->dev_private; 3083 struct drm_i915_private *dev_priv = dev->dev_private;
2784 3084
2785 /* and GT */ 3085 GEN5_IRQ_RESET(GT);
2786 I915_WRITE(GTIMR, 0xffffffff); 3086 if (INTEL_INFO(dev)->gen >= 6)
2787 I915_WRITE(GTIER, 0x0); 3087 GEN5_IRQ_RESET(GEN6_PM);
2788 POSTING_READ(GTIER);
2789
2790 if (INTEL_INFO(dev)->gen >= 6) {
2791 /* and PM */
2792 I915_WRITE(GEN6_PMIMR, 0xffffffff);
2793 I915_WRITE(GEN6_PMIER, 0x0);
2794 POSTING_READ(GEN6_PMIER);
2795 }
2796} 3088}
2797 3089
2798/* drm_dma.h hooks 3090/* drm_dma.h hooks
2799*/ 3091*/
2800static void ironlake_irq_preinstall(struct drm_device *dev) 3092static void ironlake_irq_reset(struct drm_device *dev)
2801{ 3093{
2802 struct drm_i915_private *dev_priv = dev->dev_private; 3094 struct drm_i915_private *dev_priv = dev->dev_private;
2803 3095
2804 I915_WRITE(HWSTAM, 0xeffe); 3096 I915_WRITE(HWSTAM, 0xffffffff);
2805 3097
2806 I915_WRITE(DEIMR, 0xffffffff); 3098 GEN5_IRQ_RESET(DE);
2807 I915_WRITE(DEIER, 0x0); 3099 if (IS_GEN7(dev))
2808 POSTING_READ(DEIER); 3100 I915_WRITE(GEN7_ERR_INT, 0xffffffff);
2809 3101
2810 gen5_gt_irq_preinstall(dev); 3102 gen5_gt_irq_reset(dev);
2811 3103
2812 ibx_irq_preinstall(dev); 3104 ibx_irq_reset(dev);
2813} 3105}
2814 3106
2815static void valleyview_irq_preinstall(struct drm_device *dev) 3107static void valleyview_irq_preinstall(struct drm_device *dev)
@@ -2827,7 +3119,7 @@ static void valleyview_irq_preinstall(struct drm_device *dev)
2827 I915_WRITE(GTIIR, I915_READ(GTIIR)); 3119 I915_WRITE(GTIIR, I915_READ(GTIIR));
2828 I915_WRITE(GTIIR, I915_READ(GTIIR)); 3120 I915_WRITE(GTIIR, I915_READ(GTIIR));
2829 3121
2830 gen5_gt_irq_preinstall(dev); 3122 gen5_gt_irq_reset(dev);
2831 3123
2832 I915_WRITE(DPINVGTT, 0xff); 3124 I915_WRITE(DPINVGTT, 0xff);
2833 3125
@@ -2841,7 +3133,15 @@ static void valleyview_irq_preinstall(struct drm_device *dev)
2841 POSTING_READ(VLV_IER); 3133 POSTING_READ(VLV_IER);
2842} 3134}
2843 3135
2844static void gen8_irq_preinstall(struct drm_device *dev) 3136static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
3137{
3138 GEN8_IRQ_RESET_NDX(GT, 0);
3139 GEN8_IRQ_RESET_NDX(GT, 1);
3140 GEN8_IRQ_RESET_NDX(GT, 2);
3141 GEN8_IRQ_RESET_NDX(GT, 3);
3142}
3143
3144static void gen8_irq_reset(struct drm_device *dev)
2845{ 3145{
2846 struct drm_i915_private *dev_priv = dev->dev_private; 3146 struct drm_i915_private *dev_priv = dev->dev_private;
2847 int pipe; 3147 int pipe;
@@ -2849,43 +3149,44 @@ static void gen8_irq_preinstall(struct drm_device *dev)
2849 I915_WRITE(GEN8_MASTER_IRQ, 0); 3149 I915_WRITE(GEN8_MASTER_IRQ, 0);
2850 POSTING_READ(GEN8_MASTER_IRQ); 3150 POSTING_READ(GEN8_MASTER_IRQ);
2851 3151
2852 /* IIR can theoretically queue up two events. Be paranoid */ 3152 gen8_gt_irq_reset(dev_priv);
2853#define GEN8_IRQ_INIT_NDX(type, which) do { \
2854 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
2855 POSTING_READ(GEN8_##type##_IMR(which)); \
2856 I915_WRITE(GEN8_##type##_IER(which), 0); \
2857 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
2858 POSTING_READ(GEN8_##type##_IIR(which)); \
2859 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
2860 } while (0)
2861
2862#define GEN8_IRQ_INIT(type) do { \
2863 I915_WRITE(GEN8_##type##_IMR, 0xffffffff); \
2864 POSTING_READ(GEN8_##type##_IMR); \
2865 I915_WRITE(GEN8_##type##_IER, 0); \
2866 I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
2867 POSTING_READ(GEN8_##type##_IIR); \
2868 I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
2869 } while (0)
2870
2871 GEN8_IRQ_INIT_NDX(GT, 0);
2872 GEN8_IRQ_INIT_NDX(GT, 1);
2873 GEN8_IRQ_INIT_NDX(GT, 2);
2874 GEN8_IRQ_INIT_NDX(GT, 3);
2875 3153
2876 for_each_pipe(pipe) { 3154 for_each_pipe(pipe)
2877 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe); 3155 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
2878 } 3156
3157 GEN5_IRQ_RESET(GEN8_DE_PORT_);
3158 GEN5_IRQ_RESET(GEN8_DE_MISC_);
3159 GEN5_IRQ_RESET(GEN8_PCU_);
3160
3161 ibx_irq_reset(dev);
3162}
3163
3164static void cherryview_irq_preinstall(struct drm_device *dev)
3165{
3166 struct drm_i915_private *dev_priv = dev->dev_private;
3167 int pipe;
3168
3169 I915_WRITE(GEN8_MASTER_IRQ, 0);
3170 POSTING_READ(GEN8_MASTER_IRQ);
2879 3171
2880 GEN8_IRQ_INIT(DE_PORT); 3172 gen8_gt_irq_reset(dev_priv);
2881 GEN8_IRQ_INIT(DE_MISC); 3173
2882 GEN8_IRQ_INIT(PCU); 3174 GEN5_IRQ_RESET(GEN8_PCU_);
2883#undef GEN8_IRQ_INIT
2884#undef GEN8_IRQ_INIT_NDX
2885 3175
2886 POSTING_READ(GEN8_PCU_IIR); 3176 POSTING_READ(GEN8_PCU_IIR);
2887 3177
2888 ibx_irq_preinstall(dev); 3178 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
3179
3180 I915_WRITE(PORT_HOTPLUG_EN, 0);
3181 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3182
3183 for_each_pipe(pipe)
3184 I915_WRITE(PIPESTAT(pipe), 0xffff);
3185
3186 I915_WRITE(VLV_IMR, 0xffffffff);
3187 I915_WRITE(VLV_IER, 0x0);
3188 I915_WRITE(VLV_IIR, 0xffffffff);
3189 POSTING_READ(VLV_IIR);
2889} 3190}
2890 3191
2891static void ibx_hpd_irq_setup(struct drm_device *dev) 3192static void ibx_hpd_irq_setup(struct drm_device *dev)
@@ -2931,15 +3232,12 @@ static void ibx_irq_postinstall(struct drm_device *dev)
2931 if (HAS_PCH_NOP(dev)) 3232 if (HAS_PCH_NOP(dev))
2932 return; 3233 return;
2933 3234
2934 if (HAS_PCH_IBX(dev)) { 3235 if (HAS_PCH_IBX(dev))
2935 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON; 3236 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
2936 } else { 3237 else
2937 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT; 3238 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
2938 3239
2939 I915_WRITE(SERR_INT, I915_READ(SERR_INT)); 3240 GEN5_ASSERT_IIR_IS_ZERO(SDEIIR);
2940 }
2941
2942 I915_WRITE(SDEIIR, I915_READ(SDEIIR));
2943 I915_WRITE(SDEIMR, ~mask); 3241 I915_WRITE(SDEIMR, ~mask);
2944} 3242}
2945 3243
@@ -2965,10 +3263,7 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
2965 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT; 3263 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
2966 } 3264 }
2967 3265
2968 I915_WRITE(GTIIR, I915_READ(GTIIR)); 3266 GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
2969 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
2970 I915_WRITE(GTIER, gt_irqs);
2971 POSTING_READ(GTIER);
2972 3267
2973 if (INTEL_INFO(dev)->gen >= 6) { 3268 if (INTEL_INFO(dev)->gen >= 6) {
2974 pm_irqs |= dev_priv->pm_rps_events; 3269 pm_irqs |= dev_priv->pm_rps_events;
@@ -2977,10 +3272,7 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
2977 pm_irqs |= PM_VEBOX_USER_INTERRUPT; 3272 pm_irqs |= PM_VEBOX_USER_INTERRUPT;
2978 3273
2979 dev_priv->pm_irq_mask = 0xffffffff; 3274 dev_priv->pm_irq_mask = 0xffffffff;
2980 I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR)); 3275 GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs);
2981 I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask);
2982 I915_WRITE(GEN6_PMIER, pm_irqs);
2983 POSTING_READ(GEN6_PMIER);
2984 } 3276 }
2985} 3277}
2986 3278
@@ -2997,8 +3289,6 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
2997 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB); 3289 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
2998 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | 3290 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
2999 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB); 3291 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB);
3000
3001 I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
3002 } else { 3292 } else {
3003 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 3293 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3004 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE | 3294 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
@@ -3011,11 +3301,11 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
3011 3301
3012 dev_priv->irq_mask = ~display_mask; 3302 dev_priv->irq_mask = ~display_mask;
3013 3303
3014 /* should always can generate irq */ 3304 I915_WRITE(HWSTAM, 0xeffe);
3015 I915_WRITE(DEIIR, I915_READ(DEIIR)); 3305
3016 I915_WRITE(DEIMR, dev_priv->irq_mask); 3306 ibx_irq_pre_postinstall(dev);
3017 I915_WRITE(DEIER, display_mask | extra_mask); 3307
3018 POSTING_READ(DEIER); 3308 GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
3019 3309
3020 gen5_gt_irq_postinstall(dev); 3310 gen5_gt_irq_postinstall(dev);
3021 3311
@@ -3175,21 +3465,16 @@ static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
3175 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT 3465 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT
3176 }; 3466 };
3177 3467
3178 for (i = 0; i < ARRAY_SIZE(gt_interrupts); i++) { 3468 for (i = 0; i < ARRAY_SIZE(gt_interrupts); i++)
3179 u32 tmp = I915_READ(GEN8_GT_IIR(i)); 3469 GEN8_IRQ_INIT_NDX(GT, i, ~gt_interrupts[i], gt_interrupts[i]);
3180 if (tmp) 3470
3181 DRM_ERROR("Interrupt (%d) should have been masked in pre-install 0x%08x\n", 3471 dev_priv->pm_irq_mask = 0xffffffff;
3182 i, tmp);
3183 I915_WRITE(GEN8_GT_IMR(i), ~gt_interrupts[i]);
3184 I915_WRITE(GEN8_GT_IER(i), gt_interrupts[i]);
3185 }
3186 POSTING_READ(GEN8_GT_IER(0));
3187} 3472}
3188 3473
3189static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) 3474static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3190{ 3475{
3191 struct drm_device *dev = dev_priv->dev; 3476 struct drm_device *dev = dev_priv->dev;
3192 uint32_t de_pipe_masked = GEN8_PIPE_FLIP_DONE | 3477 uint32_t de_pipe_masked = GEN8_PIPE_PRIMARY_FLIP_DONE |
3193 GEN8_PIPE_CDCLK_CRC_DONE | 3478 GEN8_PIPE_CDCLK_CRC_DONE |
3194 GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 3479 GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
3195 uint32_t de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK | 3480 uint32_t de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
@@ -3199,25 +3484,19 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3199 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked; 3484 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
3200 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked; 3485 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
3201 3486
3202 for_each_pipe(pipe) { 3487 for_each_pipe(pipe)
3203 u32 tmp = I915_READ(GEN8_DE_PIPE_IIR(pipe)); 3488 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, dev_priv->de_irq_mask[pipe],
3204 if (tmp) 3489 de_pipe_enables);
3205 DRM_ERROR("Interrupt (%d) should have been masked in pre-install 0x%08x\n",
3206 pipe, tmp);
3207 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
3208 I915_WRITE(GEN8_DE_PIPE_IER(pipe), de_pipe_enables);
3209 }
3210 POSTING_READ(GEN8_DE_PIPE_ISR(0));
3211 3490
3212 I915_WRITE(GEN8_DE_PORT_IMR, ~GEN8_AUX_CHANNEL_A); 3491 GEN5_IRQ_INIT(GEN8_DE_PORT_, ~GEN8_AUX_CHANNEL_A, GEN8_AUX_CHANNEL_A);
3213 I915_WRITE(GEN8_DE_PORT_IER, GEN8_AUX_CHANNEL_A);
3214 POSTING_READ(GEN8_DE_PORT_IER);
3215} 3492}
3216 3493
3217static int gen8_irq_postinstall(struct drm_device *dev) 3494static int gen8_irq_postinstall(struct drm_device *dev)
3218{ 3495{
3219 struct drm_i915_private *dev_priv = dev->dev_private; 3496 struct drm_i915_private *dev_priv = dev->dev_private;
3220 3497
3498 ibx_irq_pre_postinstall(dev);
3499
3221 gen8_gt_irq_postinstall(dev_priv); 3500 gen8_gt_irq_postinstall(dev_priv);
3222 gen8_de_irq_postinstall(dev_priv); 3501 gen8_de_irq_postinstall(dev_priv);
3223 3502
@@ -3229,44 +3508,55 @@ static int gen8_irq_postinstall(struct drm_device *dev)
3229 return 0; 3508 return 0;
3230} 3509}
3231 3510
3232static void gen8_irq_uninstall(struct drm_device *dev) 3511static int cherryview_irq_postinstall(struct drm_device *dev)
3233{ 3512{
3234 struct drm_i915_private *dev_priv = dev->dev_private; 3513 struct drm_i915_private *dev_priv = dev->dev_private;
3514 u32 enable_mask = I915_DISPLAY_PORT_INTERRUPT |
3515 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3516 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3517 I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
3518 u32 pipestat_enable = PLANE_FLIP_DONE_INT_STATUS_VLV |
3519 PIPE_CRC_DONE_INTERRUPT_STATUS;
3520 unsigned long irqflags;
3235 int pipe; 3521 int pipe;
3236 3522
3237 if (!dev_priv) 3523 /*
3238 return; 3524 * Leave vblank interrupts masked initially. enable/disable will
3525 * toggle them based on usage.
3526 */
3527 dev_priv->irq_mask = ~enable_mask;
3239 3528
3240 I915_WRITE(GEN8_MASTER_IRQ, 0); 3529 for_each_pipe(pipe)
3530 I915_WRITE(PIPESTAT(pipe), 0xffff);
3241 3531
3242#define GEN8_IRQ_FINI_NDX(type, which) do { \ 3532 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3243 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \ 3533 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3244 I915_WRITE(GEN8_##type##_IER(which), 0); \ 3534 for_each_pipe(pipe)
3245 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 3535 i915_enable_pipestat(dev_priv, pipe, pipestat_enable);
3246 } while (0) 3536 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3247 3537
3248#define GEN8_IRQ_FINI(type) do { \ 3538 I915_WRITE(VLV_IIR, 0xffffffff);
3249 I915_WRITE(GEN8_##type##_IMR, 0xffffffff); \ 3539 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3250 I915_WRITE(GEN8_##type##_IER, 0); \ 3540 I915_WRITE(VLV_IER, enable_mask);
3251 I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
3252 } while (0)
3253 3541
3254 GEN8_IRQ_FINI_NDX(GT, 0); 3542 gen8_gt_irq_postinstall(dev_priv);
3255 GEN8_IRQ_FINI_NDX(GT, 1);
3256 GEN8_IRQ_FINI_NDX(GT, 2);
3257 GEN8_IRQ_FINI_NDX(GT, 3);
3258 3543
3259 for_each_pipe(pipe) { 3544 I915_WRITE(GEN8_MASTER_IRQ, MASTER_INTERRUPT_ENABLE);
3260 GEN8_IRQ_FINI_NDX(DE_PIPE, pipe); 3545 POSTING_READ(GEN8_MASTER_IRQ);
3261 }
3262 3546
3263 GEN8_IRQ_FINI(DE_PORT); 3547 return 0;
3264 GEN8_IRQ_FINI(DE_MISC); 3548}
3265 GEN8_IRQ_FINI(PCU);
3266#undef GEN8_IRQ_FINI
3267#undef GEN8_IRQ_FINI_NDX
3268 3549
3269 POSTING_READ(GEN8_PCU_IIR); 3550static void gen8_irq_uninstall(struct drm_device *dev)
3551{
3552 struct drm_i915_private *dev_priv = dev->dev_private;
3553
3554 if (!dev_priv)
3555 return;
3556
3557 intel_hpd_irq_uninstall(dev_priv);
3558
3559 gen8_irq_reset(dev);
3270} 3560}
3271 3561
3272static void valleyview_irq_uninstall(struct drm_device *dev) 3562static void valleyview_irq_uninstall(struct drm_device *dev)
@@ -3278,6 +3568,8 @@ static void valleyview_irq_uninstall(struct drm_device *dev)
3278 if (!dev_priv) 3568 if (!dev_priv)
3279 return; 3569 return;
3280 3570
3571 I915_WRITE(VLV_MASTER_IER, 0);
3572
3281 intel_hpd_irq_uninstall(dev_priv); 3573 intel_hpd_irq_uninstall(dev_priv);
3282 3574
3283 for_each_pipe(pipe) 3575 for_each_pipe(pipe)
@@ -3300,35 +3592,67 @@ static void valleyview_irq_uninstall(struct drm_device *dev)
3300 POSTING_READ(VLV_IER); 3592 POSTING_READ(VLV_IER);
3301} 3593}
3302 3594
3303static void ironlake_irq_uninstall(struct drm_device *dev) 3595static void cherryview_irq_uninstall(struct drm_device *dev)
3304{ 3596{
3305 struct drm_i915_private *dev_priv = dev->dev_private; 3597 struct drm_i915_private *dev_priv = dev->dev_private;
3598 int pipe;
3306 3599
3307 if (!dev_priv) 3600 if (!dev_priv)
3308 return; 3601 return;
3309 3602
3310 intel_hpd_irq_uninstall(dev_priv); 3603 I915_WRITE(GEN8_MASTER_IRQ, 0);
3604 POSTING_READ(GEN8_MASTER_IRQ);
3311 3605
3312 I915_WRITE(HWSTAM, 0xffffffff); 3606#define GEN8_IRQ_FINI_NDX(type, which) \
3607do { \
3608 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
3609 I915_WRITE(GEN8_##type##_IER(which), 0); \
3610 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
3611 POSTING_READ(GEN8_##type##_IIR(which)); \
3612 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
3613} while (0)
3614
3615#define GEN8_IRQ_FINI(type) \
3616do { \
3617 I915_WRITE(GEN8_##type##_IMR, 0xffffffff); \
3618 I915_WRITE(GEN8_##type##_IER, 0); \
3619 I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
3620 POSTING_READ(GEN8_##type##_IIR); \
3621 I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
3622} while (0)
3313 3623
3314 I915_WRITE(DEIMR, 0xffffffff); 3624 GEN8_IRQ_FINI_NDX(GT, 0);
3315 I915_WRITE(DEIER, 0x0); 3625 GEN8_IRQ_FINI_NDX(GT, 1);
3316 I915_WRITE(DEIIR, I915_READ(DEIIR)); 3626 GEN8_IRQ_FINI_NDX(GT, 2);
3317 if (IS_GEN7(dev)) 3627 GEN8_IRQ_FINI_NDX(GT, 3);
3318 I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
3319 3628
3320 I915_WRITE(GTIMR, 0xffffffff); 3629 GEN8_IRQ_FINI(PCU);
3321 I915_WRITE(GTIER, 0x0);
3322 I915_WRITE(GTIIR, I915_READ(GTIIR));
3323 3630
3324 if (HAS_PCH_NOP(dev)) 3631#undef GEN8_IRQ_FINI
3632#undef GEN8_IRQ_FINI_NDX
3633
3634 I915_WRITE(PORT_HOTPLUG_EN, 0);
3635 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3636
3637 for_each_pipe(pipe)
3638 I915_WRITE(PIPESTAT(pipe), 0xffff);
3639
3640 I915_WRITE(VLV_IMR, 0xffffffff);
3641 I915_WRITE(VLV_IER, 0x0);
3642 I915_WRITE(VLV_IIR, 0xffffffff);
3643 POSTING_READ(VLV_IIR);
3644}
3645
3646static void ironlake_irq_uninstall(struct drm_device *dev)
3647{
3648 struct drm_i915_private *dev_priv = dev->dev_private;
3649
3650 if (!dev_priv)
3325 return; 3651 return;
3326 3652
3327 I915_WRITE(SDEIMR, 0xffffffff); 3653 intel_hpd_irq_uninstall(dev_priv);
3328 I915_WRITE(SDEIER, 0x0); 3654
3329 I915_WRITE(SDEIIR, I915_READ(SDEIIR)); 3655 ironlake_irq_reset(dev);
3330 if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
3331 I915_WRITE(SERR_INT, I915_READ(SERR_INT));
3332} 3656}
3333 3657
3334static void i8xx_irq_preinstall(struct drm_device * dev) 3658static void i8xx_irq_preinstall(struct drm_device * dev)
@@ -3386,7 +3710,7 @@ static bool i8xx_handle_vblank(struct drm_device *dev,
3386 struct drm_i915_private *dev_priv = dev->dev_private; 3710 struct drm_i915_private *dev_priv = dev->dev_private;
3387 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); 3711 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3388 3712
3389 if (!drm_handle_vblank(dev, pipe)) 3713 if (!intel_pipe_handle_vblank(dev, pipe))
3390 return false; 3714 return false;
3391 3715
3392 if ((iir & flip_pending) == 0) 3716 if ((iir & flip_pending) == 0)
@@ -3410,7 +3734,7 @@ static bool i8xx_handle_vblank(struct drm_device *dev,
3410 3734
3411static irqreturn_t i8xx_irq_handler(int irq, void *arg) 3735static irqreturn_t i8xx_irq_handler(int irq, void *arg)
3412{ 3736{
3413 struct drm_device *dev = (struct drm_device *) arg; 3737 struct drm_device *dev = arg;
3414 struct drm_i915_private *dev_priv = dev->dev_private; 3738 struct drm_i915_private *dev_priv = dev->dev_private;
3415 u16 iir, new_iir; 3739 u16 iir, new_iir;
3416 u32 pipe_stats[2]; 3740 u32 pipe_stats[2];
@@ -3571,7 +3895,7 @@ static bool i915_handle_vblank(struct drm_device *dev,
3571 struct drm_i915_private *dev_priv = dev->dev_private; 3895 struct drm_i915_private *dev_priv = dev->dev_private;
3572 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); 3896 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3573 3897
3574 if (!drm_handle_vblank(dev, pipe)) 3898 if (!intel_pipe_handle_vblank(dev, pipe))
3575 return false; 3899 return false;
3576 3900
3577 if ((iir & flip_pending) == 0) 3901 if ((iir & flip_pending) == 0)
@@ -3595,7 +3919,7 @@ static bool i915_handle_vblank(struct drm_device *dev,
3595 3919
3596static irqreturn_t i915_irq_handler(int irq, void *arg) 3920static irqreturn_t i915_irq_handler(int irq, void *arg)
3597{ 3921{
3598 struct drm_device *dev = (struct drm_device *) arg; 3922 struct drm_device *dev = arg;
3599 struct drm_i915_private *dev_priv = dev->dev_private; 3923 struct drm_i915_private *dev_priv = dev->dev_private;
3600 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES]; 3924 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
3601 unsigned long irqflags; 3925 unsigned long irqflags;
@@ -3636,16 +3960,9 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
3636 break; 3960 break;
3637 3961
3638 /* Consume port. Then clear IIR or we'll miss events */ 3962 /* Consume port. Then clear IIR or we'll miss events */
3639 if ((I915_HAS_HOTPLUG(dev)) && 3963 if (I915_HAS_HOTPLUG(dev) &&
3640 (iir & I915_DISPLAY_PORT_INTERRUPT)) { 3964 iir & I915_DISPLAY_PORT_INTERRUPT)
3641 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 3965 i9xx_hpd_irq_handler(dev);
3642 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
3643
3644 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
3645
3646 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
3647 POSTING_READ(PORT_HOTPLUG_STAT);
3648 }
3649 3966
3650 I915_WRITE(IIR, iir & ~flip_mask); 3967 I915_WRITE(IIR, iir & ~flip_mask);
3651 new_iir = I915_READ(IIR); /* Flush posted writes */ 3968 new_iir = I915_READ(IIR); /* Flush posted writes */
@@ -3832,7 +4149,7 @@ static void i915_hpd_irq_setup(struct drm_device *dev)
3832 4149
3833static irqreturn_t i965_irq_handler(int irq, void *arg) 4150static irqreturn_t i965_irq_handler(int irq, void *arg)
3834{ 4151{
3835 struct drm_device *dev = (struct drm_device *) arg; 4152 struct drm_device *dev = arg;
3836 struct drm_i915_private *dev_priv = dev->dev_private; 4153 struct drm_i915_private *dev_priv = dev->dev_private;
3837 u32 iir, new_iir; 4154 u32 iir, new_iir;
3838 u32 pipe_stats[I915_MAX_PIPES]; 4155 u32 pipe_stats[I915_MAX_PIPES];
@@ -3879,22 +4196,8 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
3879 ret = IRQ_HANDLED; 4196 ret = IRQ_HANDLED;
3880 4197
3881 /* Consume port. Then clear IIR or we'll miss events */ 4198 /* Consume port. Then clear IIR or we'll miss events */
3882 if (iir & I915_DISPLAY_PORT_INTERRUPT) { 4199 if (iir & I915_DISPLAY_PORT_INTERRUPT)
3883 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 4200 i9xx_hpd_irq_handler(dev);
3884 u32 hotplug_trigger = hotplug_status & (IS_G4X(dev) ?
3885 HOTPLUG_INT_STATUS_G4X :
3886 HOTPLUG_INT_STATUS_I915);
3887
3888 intel_hpd_irq_handler(dev, hotplug_trigger,
3889 IS_G4X(dev) ? hpd_status_g4x : hpd_status_i915);
3890
3891 if (IS_G4X(dev) &&
3892 (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X))
3893 dp_aux_irq_handler(dev);
3894
3895 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
3896 I915_READ(PORT_HOTPLUG_STAT);
3897 }
3898 4201
3899 I915_WRITE(IIR, iir & ~flip_mask); 4202 I915_WRITE(IIR, iir & ~flip_mask);
3900 new_iir = I915_READ(IIR); /* Flush posted writes */ 4203 new_iir = I915_READ(IIR); /* Flush posted writes */
@@ -3997,7 +4300,7 @@ static void intel_hpd_irq_reenable(unsigned long data)
3997 if (intel_connector->encoder->hpd_pin == i) { 4300 if (intel_connector->encoder->hpd_pin == i) {
3998 if (connector->polled != intel_connector->polled) 4301 if (connector->polled != intel_connector->polled)
3999 DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n", 4302 DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
4000 drm_get_connector_name(connector)); 4303 connector->name);
4001 connector->polled = intel_connector->polled; 4304 connector->polled = intel_connector->polled;
4002 if (!connector->polled) 4305 if (!connector->polled)
4003 connector->polled = DRM_CONNECTOR_POLL_HPD; 4306 connector->polled = DRM_CONNECTOR_POLL_HPD;
@@ -4045,7 +4348,15 @@ void intel_irq_init(struct drm_device *dev)
4045 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; 4348 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
4046 } 4349 }
4047 4350
4048 if (IS_VALLEYVIEW(dev)) { 4351 if (IS_CHERRYVIEW(dev)) {
4352 dev->driver->irq_handler = cherryview_irq_handler;
4353 dev->driver->irq_preinstall = cherryview_irq_preinstall;
4354 dev->driver->irq_postinstall = cherryview_irq_postinstall;
4355 dev->driver->irq_uninstall = cherryview_irq_uninstall;
4356 dev->driver->enable_vblank = valleyview_enable_vblank;
4357 dev->driver->disable_vblank = valleyview_disable_vblank;
4358 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4359 } else if (IS_VALLEYVIEW(dev)) {
4049 dev->driver->irq_handler = valleyview_irq_handler; 4360 dev->driver->irq_handler = valleyview_irq_handler;
4050 dev->driver->irq_preinstall = valleyview_irq_preinstall; 4361 dev->driver->irq_preinstall = valleyview_irq_preinstall;
4051 dev->driver->irq_postinstall = valleyview_irq_postinstall; 4362 dev->driver->irq_postinstall = valleyview_irq_postinstall;
@@ -4055,7 +4366,7 @@ void intel_irq_init(struct drm_device *dev)
4055 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4366 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4056 } else if (IS_GEN8(dev)) { 4367 } else if (IS_GEN8(dev)) {
4057 dev->driver->irq_handler = gen8_irq_handler; 4368 dev->driver->irq_handler = gen8_irq_handler;
4058 dev->driver->irq_preinstall = gen8_irq_preinstall; 4369 dev->driver->irq_preinstall = gen8_irq_reset;
4059 dev->driver->irq_postinstall = gen8_irq_postinstall; 4370 dev->driver->irq_postinstall = gen8_irq_postinstall;
4060 dev->driver->irq_uninstall = gen8_irq_uninstall; 4371 dev->driver->irq_uninstall = gen8_irq_uninstall;
4061 dev->driver->enable_vblank = gen8_enable_vblank; 4372 dev->driver->enable_vblank = gen8_enable_vblank;
@@ -4063,7 +4374,7 @@ void intel_irq_init(struct drm_device *dev)
4063 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup; 4374 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
4064 } else if (HAS_PCH_SPLIT(dev)) { 4375 } else if (HAS_PCH_SPLIT(dev)) {
4065 dev->driver->irq_handler = ironlake_irq_handler; 4376 dev->driver->irq_handler = ironlake_irq_handler;
4066 dev->driver->irq_preinstall = ironlake_irq_preinstall; 4377 dev->driver->irq_preinstall = ironlake_irq_reset;
4067 dev->driver->irq_postinstall = ironlake_irq_postinstall; 4378 dev->driver->irq_postinstall = ironlake_irq_postinstall;
4068 dev->driver->irq_uninstall = ironlake_irq_uninstall; 4379 dev->driver->irq_uninstall = ironlake_irq_uninstall;
4069 dev->driver->enable_vblank = ironlake_enable_vblank; 4380 dev->driver->enable_vblank = ironlake_enable_vblank;
@@ -4121,57 +4432,20 @@ void intel_hpd_init(struct drm_device *dev)
4121} 4432}
4122 4433
4123/* Disable interrupts so we can allow runtime PM. */ 4434/* Disable interrupts so we can allow runtime PM. */
4124void hsw_runtime_pm_disable_interrupts(struct drm_device *dev) 4435void intel_runtime_pm_disable_interrupts(struct drm_device *dev)
4125{ 4436{
4126 struct drm_i915_private *dev_priv = dev->dev_private; 4437 struct drm_i915_private *dev_priv = dev->dev_private;
4127 unsigned long irqflags;
4128
4129 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
4130
4131 dev_priv->pm.regsave.deimr = I915_READ(DEIMR);
4132 dev_priv->pm.regsave.sdeimr = I915_READ(SDEIMR);
4133 dev_priv->pm.regsave.gtimr = I915_READ(GTIMR);
4134 dev_priv->pm.regsave.gtier = I915_READ(GTIER);
4135 dev_priv->pm.regsave.gen6_pmimr = I915_READ(GEN6_PMIMR);
4136
4137 ironlake_disable_display_irq(dev_priv, 0xffffffff);
4138 ibx_disable_display_interrupt(dev_priv, 0xffffffff);
4139 ilk_disable_gt_irq(dev_priv, 0xffffffff);
4140 snb_disable_pm_irq(dev_priv, 0xffffffff);
4141 4438
4439 dev->driver->irq_uninstall(dev);
4142 dev_priv->pm.irqs_disabled = true; 4440 dev_priv->pm.irqs_disabled = true;
4143
4144 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
4145} 4441}
4146 4442
4147/* Restore interrupts so we can recover from runtime PM. */ 4443/* Restore interrupts so we can recover from runtime PM. */
4148void hsw_runtime_pm_restore_interrupts(struct drm_device *dev) 4444void intel_runtime_pm_restore_interrupts(struct drm_device *dev)
4149{ 4445{
4150 struct drm_i915_private *dev_priv = dev->dev_private; 4446 struct drm_i915_private *dev_priv = dev->dev_private;
4151 unsigned long irqflags;
4152 uint32_t val;
4153
4154 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
4155
4156 val = I915_READ(DEIMR);
4157 WARN(val != 0xffffffff, "DEIMR is 0x%08x\n", val);
4158
4159 val = I915_READ(SDEIMR);
4160 WARN(val != 0xffffffff, "SDEIMR is 0x%08x\n", val);
4161
4162 val = I915_READ(GTIMR);
4163 WARN(val != 0xffffffff, "GTIMR is 0x%08x\n", val);
4164
4165 val = I915_READ(GEN6_PMIMR);
4166 WARN(val != 0xffffffff, "GEN6_PMIMR is 0x%08x\n", val);
4167 4447
4168 dev_priv->pm.irqs_disabled = false; 4448 dev_priv->pm.irqs_disabled = false;
4169 4449 dev->driver->irq_preinstall(dev);
4170 ironlake_enable_display_irq(dev_priv, ~dev_priv->pm.regsave.deimr); 4450 dev->driver->irq_postinstall(dev);
4171 ibx_enable_display_interrupt(dev_priv, ~dev_priv->pm.regsave.sdeimr);
4172 ilk_enable_gt_irq(dev_priv, ~dev_priv->pm.regsave.gtimr);
4173 snb_enable_pm_irq(dev_priv, ~dev_priv->pm.regsave.gen6_pmimr);
4174 I915_WRITE(GTIER, dev_priv->pm.regsave.gtier);
4175
4176 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
4177} 4451}
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index d1d7980f0e01..d05a2afa17dc 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -46,7 +46,8 @@ struct i915_params i915 __read_mostly = {
46 .reset = true, 46 .reset = true,
47 .invert_brightness = 0, 47 .invert_brightness = 0,
48 .disable_display = 0, 48 .disable_display = 0,
49 .enable_cmd_parser = 0, 49 .enable_cmd_parser = 1,
50 .disable_vtd_wa = 0,
50}; 51};
51 52
52module_param_named(modeset, i915.modeset, int, 0400); 53module_param_named(modeset, i915.modeset, int, 0400);
@@ -149,6 +150,9 @@ MODULE_PARM_DESC(invert_brightness,
149module_param_named(disable_display, i915.disable_display, bool, 0600); 150module_param_named(disable_display, i915.disable_display, bool, 0600);
150MODULE_PARM_DESC(disable_display, "Disable display (default: false)"); 151MODULE_PARM_DESC(disable_display, "Disable display (default: false)");
151 152
153module_param_named(disable_vtd_wa, i915.disable_vtd_wa, bool, 0600);
154MODULE_PARM_DESC(disable_vtd_wa, "Disable all VT-d workarounds (default: false)");
155
152module_param_named(enable_cmd_parser, i915.enable_cmd_parser, int, 0600); 156module_param_named(enable_cmd_parser, i915.enable_cmd_parser, int, 0600);
153MODULE_PARM_DESC(enable_cmd_parser, 157MODULE_PARM_DESC(enable_cmd_parser,
154 "Enable command parsing (1=enabled, 0=disabled [default])"); 158 "Enable command parsing (1=enabled [default], 0=disabled)");
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index c77af69c2d8f..e691b30b2817 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -29,6 +29,8 @@
29#define _TRANSCODER(tran, a, b) ((a) + (tran)*((b)-(a))) 29#define _TRANSCODER(tran, a, b) ((a) + (tran)*((b)-(a)))
30 30
31#define _PORT(port, a, b) ((a) + (port)*((b)-(a))) 31#define _PORT(port, a, b) ((a) + (port)*((b)-(a)))
32#define _PIPE3(pipe, a, b, c) (pipe < 2 ? _PIPE(pipe, a, b) : c)
33#define _PORT3(port, a, b, c) (port < 2 ? _PORT(port, a, b) : c)
32 34
33#define _MASKED_BIT_ENABLE(a) (((a) << 16) | (a)) 35#define _MASKED_BIT_ENABLE(a) (((a) << 16) | (a))
34#define _MASKED_BIT_DISABLE(a) ((a) << 16) 36#define _MASKED_BIT_DISABLE(a) ((a) << 16)
@@ -77,13 +79,19 @@
77 79
78/* Graphics reset regs */ 80/* Graphics reset regs */
79#define I965_GDRST 0xc0 /* PCI config register */ 81#define I965_GDRST 0xc0 /* PCI config register */
80#define ILK_GDSR 0x2ca4 /* MCHBAR offset */
81#define GRDOM_FULL (0<<2) 82#define GRDOM_FULL (0<<2)
82#define GRDOM_RENDER (1<<2) 83#define GRDOM_RENDER (1<<2)
83#define GRDOM_MEDIA (3<<2) 84#define GRDOM_MEDIA (3<<2)
84#define GRDOM_MASK (3<<2) 85#define GRDOM_MASK (3<<2)
85#define GRDOM_RESET_ENABLE (1<<0) 86#define GRDOM_RESET_ENABLE (1<<0)
86 87
88#define ILK_GDSR 0x2ca4 /* MCHBAR offset */
89#define ILK_GRDOM_FULL (0<<1)
90#define ILK_GRDOM_RENDER (1<<1)
91#define ILK_GRDOM_MEDIA (3<<1)
92#define ILK_GRDOM_MASK (3<<1)
93#define ILK_GRDOM_RESET_ENABLE (1<<0)
94
87#define GEN6_MBCUNIT_SNPCR 0x900c /* for LLC config */ 95#define GEN6_MBCUNIT_SNPCR 0x900c /* for LLC config */
88#define GEN6_MBC_SNPCR_SHIFT 21 96#define GEN6_MBC_SNPCR_SHIFT 21
89#define GEN6_MBC_SNPCR_MASK (3<<21) 97#define GEN6_MBC_SNPCR_MASK (3<<21)
@@ -92,6 +100,9 @@
92#define GEN6_MBC_SNPCR_LOW (2<<21) 100#define GEN6_MBC_SNPCR_LOW (2<<21)
93#define GEN6_MBC_SNPCR_MIN (3<<21) /* only 1/16th of the cache is shared */ 101#define GEN6_MBC_SNPCR_MIN (3<<21) /* only 1/16th of the cache is shared */
94 102
103#define VLV_G3DCTL 0x9024
104#define VLV_GSCKGCTL 0x9028
105
95#define GEN6_MBCTL 0x0907c 106#define GEN6_MBCTL 0x0907c
96#define GEN6_MBCTL_ENABLE_BOOT_FETCH (1 << 4) 107#define GEN6_MBCTL_ENABLE_BOOT_FETCH (1 << 4)
97#define GEN6_MBCTL_CTX_FETCH_NEEDED (1 << 3) 108#define GEN6_MBCTL_CTX_FETCH_NEEDED (1 << 3)
@@ -190,6 +201,8 @@
190 * Memory interface instructions used by the kernel 201 * Memory interface instructions used by the kernel
191 */ 202 */
192#define MI_INSTR(opcode, flags) (((opcode) << 23) | (flags)) 203#define MI_INSTR(opcode, flags) (((opcode) << 23) | (flags))
204/* Many MI commands use bit 22 of the header dword for GGTT vs PPGTT */
205#define MI_GLOBAL_GTT (1<<22)
193 206
194#define MI_NOOP MI_INSTR(0, 0) 207#define MI_NOOP MI_INSTR(0, 0)
195#define MI_USER_INTERRUPT MI_INSTR(0x02, 0) 208#define MI_USER_INTERRUPT MI_INSTR(0x02, 0)
@@ -244,7 +257,8 @@
244#define MI_SEMAPHORE_SYNC_BVE (0<<16) /* VECS wait for BCS (VEBSYNC) */ 257#define MI_SEMAPHORE_SYNC_BVE (0<<16) /* VECS wait for BCS (VEBSYNC) */
245#define MI_SEMAPHORE_SYNC_VVE (1<<16) /* VECS wait for VCS (VEVSYNC) */ 258#define MI_SEMAPHORE_SYNC_VVE (1<<16) /* VECS wait for VCS (VEVSYNC) */
246#define MI_SEMAPHORE_SYNC_RVE (2<<16) /* VECS wait for RCS (VERSYNC) */ 259#define MI_SEMAPHORE_SYNC_RVE (2<<16) /* VECS wait for RCS (VERSYNC) */
247#define MI_SEMAPHORE_SYNC_INVALID (3<<16) 260#define MI_SEMAPHORE_SYNC_INVALID (3<<16)
261#define MI_SEMAPHORE_SYNC_MASK (3<<16)
248#define MI_SET_CONTEXT MI_INSTR(0x18, 0) 262#define MI_SET_CONTEXT MI_INSTR(0x18, 0)
249#define MI_MM_SPACE_GTT (1<<8) 263#define MI_MM_SPACE_GTT (1<<8)
250#define MI_MM_SPACE_PHYSICAL (0<<8) 264#define MI_MM_SPACE_PHYSICAL (0<<8)
@@ -262,13 +276,16 @@
262 * - One can actually load arbitrary many arbitrary registers: Simply issue x 276 * - One can actually load arbitrary many arbitrary registers: Simply issue x
263 * address/value pairs. Don't overdue it, though, x <= 2^4 must hold! 277 * address/value pairs. Don't overdue it, though, x <= 2^4 must hold!
264 */ 278 */
265#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1) 279#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*(x)-1)
266#define MI_STORE_REGISTER_MEM(x) MI_INSTR(0x24, 2*x-1) 280#define MI_STORE_REGISTER_MEM(x) MI_INSTR(0x24, 2*(x)-1)
281#define MI_STORE_REGISTER_MEM_GEN8(x) MI_INSTR(0x24, 3*(x)-1)
267#define MI_SRM_LRM_GLOBAL_GTT (1<<22) 282#define MI_SRM_LRM_GLOBAL_GTT (1<<22)
268#define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */ 283#define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */
269#define MI_FLUSH_DW_STORE_INDEX (1<<21) 284#define MI_FLUSH_DW_STORE_INDEX (1<<21)
270#define MI_INVALIDATE_TLB (1<<18) 285#define MI_INVALIDATE_TLB (1<<18)
271#define MI_FLUSH_DW_OP_STOREDW (1<<14) 286#define MI_FLUSH_DW_OP_STOREDW (1<<14)
287#define MI_FLUSH_DW_OP_MASK (3<<14)
288#define MI_FLUSH_DW_NOTIFY (1<<8)
272#define MI_INVALIDATE_BSD (1<<7) 289#define MI_INVALIDATE_BSD (1<<7)
273#define MI_FLUSH_DW_USE_GTT (1<<2) 290#define MI_FLUSH_DW_USE_GTT (1<<2)
274#define MI_FLUSH_DW_USE_PPGTT (0<<2) 291#define MI_FLUSH_DW_USE_PPGTT (0<<2)
@@ -330,9 +347,12 @@
330#define DISPLAY_PLANE_B (1<<20) 347#define DISPLAY_PLANE_B (1<<20)
331#define GFX_OP_PIPE_CONTROL(len) ((0x3<<29)|(0x3<<27)|(0x2<<24)|(len-2)) 348#define GFX_OP_PIPE_CONTROL(len) ((0x3<<29)|(0x3<<27)|(0x2<<24)|(len-2))
332#define PIPE_CONTROL_GLOBAL_GTT_IVB (1<<24) /* gen7+ */ 349#define PIPE_CONTROL_GLOBAL_GTT_IVB (1<<24) /* gen7+ */
350#define PIPE_CONTROL_MMIO_WRITE (1<<23)
351#define PIPE_CONTROL_STORE_DATA_INDEX (1<<21)
333#define PIPE_CONTROL_CS_STALL (1<<20) 352#define PIPE_CONTROL_CS_STALL (1<<20)
334#define PIPE_CONTROL_TLB_INVALIDATE (1<<18) 353#define PIPE_CONTROL_TLB_INVALIDATE (1<<18)
335#define PIPE_CONTROL_QW_WRITE (1<<14) 354#define PIPE_CONTROL_QW_WRITE (1<<14)
355#define PIPE_CONTROL_POST_SYNC_OP_MASK (3<<14)
336#define PIPE_CONTROL_DEPTH_STALL (1<<13) 356#define PIPE_CONTROL_DEPTH_STALL (1<<13)
337#define PIPE_CONTROL_WRITE_FLUSH (1<<12) 357#define PIPE_CONTROL_WRITE_FLUSH (1<<12)
338#define PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH (1<<12) /* gen6+ */ 358#define PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH (1<<12) /* gen6+ */
@@ -347,6 +367,94 @@
347#define PIPE_CONTROL_DEPTH_CACHE_FLUSH (1<<0) 367#define PIPE_CONTROL_DEPTH_CACHE_FLUSH (1<<0)
348#define PIPE_CONTROL_GLOBAL_GTT (1<<2) /* in addr dword */ 368#define PIPE_CONTROL_GLOBAL_GTT (1<<2) /* in addr dword */
349 369
370/*
371 * Commands used only by the command parser
372 */
373#define MI_SET_PREDICATE MI_INSTR(0x01, 0)
374#define MI_ARB_CHECK MI_INSTR(0x05, 0)
375#define MI_RS_CONTROL MI_INSTR(0x06, 0)
376#define MI_URB_ATOMIC_ALLOC MI_INSTR(0x09, 0)
377#define MI_PREDICATE MI_INSTR(0x0C, 0)
378#define MI_RS_CONTEXT MI_INSTR(0x0F, 0)
379#define MI_TOPOLOGY_FILTER MI_INSTR(0x0D, 0)
380#define MI_LOAD_SCAN_LINES_EXCL MI_INSTR(0x13, 0)
381#define MI_URB_CLEAR MI_INSTR(0x19, 0)
382#define MI_UPDATE_GTT MI_INSTR(0x23, 0)
383#define MI_CLFLUSH MI_INSTR(0x27, 0)
384#define MI_REPORT_PERF_COUNT MI_INSTR(0x28, 0)
385#define MI_REPORT_PERF_COUNT_GGTT (1<<0)
386#define MI_LOAD_REGISTER_MEM MI_INSTR(0x29, 0)
387#define MI_LOAD_REGISTER_REG MI_INSTR(0x2A, 0)
388#define MI_RS_STORE_DATA_IMM MI_INSTR(0x2B, 0)
389#define MI_LOAD_URB_MEM MI_INSTR(0x2C, 0)
390#define MI_STORE_URB_MEM MI_INSTR(0x2D, 0)
391#define MI_CONDITIONAL_BATCH_BUFFER_END MI_INSTR(0x36, 0)
392
393#define PIPELINE_SELECT ((0x3<<29)|(0x1<<27)|(0x1<<24)|(0x4<<16))
394#define GFX_OP_3DSTATE_VF_STATISTICS ((0x3<<29)|(0x1<<27)|(0x0<<24)|(0xB<<16))
395#define MEDIA_VFE_STATE ((0x3<<29)|(0x2<<27)|(0x0<<24)|(0x0<<16))
396#define MEDIA_VFE_STATE_MMIO_ACCESS_MASK (0x18)
397#define GPGPU_OBJECT ((0x3<<29)|(0x2<<27)|(0x1<<24)|(0x4<<16))
398#define GPGPU_WALKER ((0x3<<29)|(0x2<<27)|(0x1<<24)|(0x5<<16))
399#define GFX_OP_3DSTATE_DX9_CONSTANTF_VS \
400 ((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x39<<16))
401#define GFX_OP_3DSTATE_DX9_CONSTANTF_PS \
402 ((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x3A<<16))
403#define GFX_OP_3DSTATE_SO_DECL_LIST \
404 ((0x3<<29)|(0x3<<27)|(0x1<<24)|(0x17<<16))
405
406#define GFX_OP_3DSTATE_BINDING_TABLE_EDIT_VS \
407 ((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x43<<16))
408#define GFX_OP_3DSTATE_BINDING_TABLE_EDIT_GS \
409 ((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x44<<16))
410#define GFX_OP_3DSTATE_BINDING_TABLE_EDIT_HS \
411 ((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x45<<16))
412#define GFX_OP_3DSTATE_BINDING_TABLE_EDIT_DS \
413 ((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x46<<16))
414#define GFX_OP_3DSTATE_BINDING_TABLE_EDIT_PS \
415 ((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x47<<16))
416
417#define MFX_WAIT ((0x3<<29)|(0x1<<27)|(0x0<<16))
418
419#define COLOR_BLT ((0x2<<29)|(0x40<<22))
420#define SRC_COPY_BLT ((0x2<<29)|(0x43<<22))
421
422/*
423 * Registers used only by the command parser
424 */
425#define BCS_SWCTRL 0x22200
426
427#define HS_INVOCATION_COUNT 0x2300
428#define DS_INVOCATION_COUNT 0x2308
429#define IA_VERTICES_COUNT 0x2310
430#define IA_PRIMITIVES_COUNT 0x2318
431#define VS_INVOCATION_COUNT 0x2320
432#define GS_INVOCATION_COUNT 0x2328
433#define GS_PRIMITIVES_COUNT 0x2330
434#define CL_INVOCATION_COUNT 0x2338
435#define CL_PRIMITIVES_COUNT 0x2340
436#define PS_INVOCATION_COUNT 0x2348
437#define PS_DEPTH_COUNT 0x2350
438
439/* There are the 4 64-bit counter registers, one for each stream output */
440#define GEN7_SO_NUM_PRIMS_WRITTEN(n) (0x5200 + (n) * 8)
441
442#define GEN7_SO_PRIM_STORAGE_NEEDED(n) (0x5240 + (n) * 8)
443
444#define GEN7_3DPRIM_END_OFFSET 0x2420
445#define GEN7_3DPRIM_START_VERTEX 0x2430
446#define GEN7_3DPRIM_VERTEX_COUNT 0x2434
447#define GEN7_3DPRIM_INSTANCE_COUNT 0x2438
448#define GEN7_3DPRIM_START_INSTANCE 0x243C
449#define GEN7_3DPRIM_BASE_VERTEX 0x2440
450
451#define OACONTROL 0x2360
452
453#define _GEN7_PIPEA_DE_LOAD_SL 0x70068
454#define _GEN7_PIPEB_DE_LOAD_SL 0x71068
455#define GEN7_PIPE_DE_LOAD_SL(pipe) _PIPE(pipe, \
456 _GEN7_PIPEA_DE_LOAD_SL, \
457 _GEN7_PIPEB_DE_LOAD_SL)
350 458
351/* 459/*
352 * Reset registers 460 * Reset registers
@@ -370,6 +478,7 @@
370#define IOSF_PORT_PUNIT 0x4 478#define IOSF_PORT_PUNIT 0x4
371#define IOSF_PORT_NC 0x11 479#define IOSF_PORT_NC 0x11
372#define IOSF_PORT_DPIO 0x12 480#define IOSF_PORT_DPIO 0x12
481#define IOSF_PORT_DPIO_2 0x1a
373#define IOSF_PORT_GPIO_NC 0x13 482#define IOSF_PORT_GPIO_NC 0x13
374#define IOSF_PORT_CCK 0x14 483#define IOSF_PORT_CCK 0x14
375#define IOSF_PORT_CCU 0xA9 484#define IOSF_PORT_CCU 0xA9
@@ -381,9 +490,6 @@
381/* See configdb bunit SB addr map */ 490/* See configdb bunit SB addr map */
382#define BUNIT_REG_BISOC 0x11 491#define BUNIT_REG_BISOC 0x11
383 492
384#define PUNIT_OPCODE_REG_READ 6
385#define PUNIT_OPCODE_REG_WRITE 7
386
387#define PUNIT_REG_DSPFREQ 0x36 493#define PUNIT_REG_DSPFREQ 0x36
388#define DSPFREQSTAT_SHIFT 30 494#define DSPFREQSTAT_SHIFT 30
389#define DSPFREQSTAT_MASK (0x3 << DSPFREQSTAT_SHIFT) 495#define DSPFREQSTAT_MASK (0x3 << DSPFREQSTAT_SHIFT)
@@ -469,16 +575,91 @@ enum punit_power_well {
469#define DSI_PLL_M1_DIV_MASK (0x1ff << 0) 575#define DSI_PLL_M1_DIV_MASK (0x1ff << 0)
470#define CCK_DISPLAY_CLOCK_CONTROL 0x6b 576#define CCK_DISPLAY_CLOCK_CONTROL 0x6b
471 577
472/* 578/**
473 * DPIO - a special bus for various display related registers to hide behind 579 * DOC: DPIO
580 *
581 * VLV and CHV have slightly peculiar display PHYs for driving DP/HDMI
582 * ports. DPIO is the name given to such a display PHY. These PHYs
583 * don't follow the standard programming model using direct MMIO
584 * registers, and instead their registers must be accessed trough IOSF
585 * sideband. VLV has one such PHY for driving ports B and C, and CHV
586 * adds another PHY for driving port D. Each PHY responds to specific
587 * IOSF-SB port.
588 *
589 * Each display PHY is made up of one or two channels. Each channel
590 * houses a common lane part which contains the PLL and other common
591 * logic. CH0 common lane also contains the IOSF-SB logic for the
592 * Common Register Interface (CRI) ie. the DPIO registers. CRI clock
593 * must be running when any DPIO registers are accessed.
594 *
595 * In addition to having their own registers, the PHYs are also
596 * controlled through some dedicated signals from the display
597 * controller. These include PLL reference clock enable, PLL enable,
598 * and CRI clock selection, for example.
474 * 599 *
475 * DPIO is VLV only. 600 * Eeach channel also has two splines (also called data lanes), and
601 * each spline is made up of one Physical Access Coding Sub-Layer
602 * (PCS) block and two TX lanes. So each channel has two PCS blocks
603 * and four TX lanes. The TX lanes are used as DP lanes or TMDS
604 * data/clock pairs depending on the output type.
605 *
606 * Additionally the PHY also contains an AUX lane with AUX blocks
607 * for each channel. This is used for DP AUX communication, but
608 * this fact isn't really relevant for the driver since AUX is
609 * controlled from the display controller side. No DPIO registers
610 * need to be accessed during AUX communication,
611 *
612 * Generally the common lane corresponds to the pipe and
613 * the spline (PCS/TX) correponds to the port.
614 *
615 * For dual channel PHY (VLV/CHV):
616 *
617 * pipe A == CMN/PLL/REF CH0
618 *
619 * pipe B == CMN/PLL/REF CH1
620 *
621 * port B == PCS/TX CH0
622 *
623 * port C == PCS/TX CH1
624 *
625 * This is especially important when we cross the streams
626 * ie. drive port B with pipe B, or port C with pipe A.
627 *
628 * For single channel PHY (CHV):
629 *
630 * pipe C == CMN/PLL/REF CH0
631 *
632 * port D == PCS/TX CH0
633 *
634 * Note: digital port B is DDI0, digital port C is DDI1,
635 * digital port D is DDI2
636 */
637/*
638 * Dual channel PHY (VLV/CHV)
639 * ---------------------------------
640 * | CH0 | CH1 |
641 * | CMN/PLL/REF | CMN/PLL/REF |
642 * |---------------|---------------| Display PHY
643 * | PCS01 | PCS23 | PCS01 | PCS23 |
644 * |-------|-------|-------|-------|
645 * |TX0|TX1|TX2|TX3|TX0|TX1|TX2|TX3|
646 * ---------------------------------
647 * | DDI0 | DDI1 | DP/HDMI ports
648 * ---------------------------------
476 * 649 *
477 * Note: digital port B is DDI0, digital pot C is DDI1 650 * Single channel PHY (CHV)
651 * -----------------
652 * | CH0 |
653 * | CMN/PLL/REF |
654 * |---------------| Display PHY
655 * | PCS01 | PCS23 |
656 * |-------|-------|
657 * |TX0|TX1|TX2|TX3|
658 * -----------------
659 * | DDI2 | DP/HDMI port
660 * -----------------
478 */ 661 */
479#define DPIO_DEVFN 0 662#define DPIO_DEVFN 0
480#define DPIO_OPCODE_REG_WRITE 1
481#define DPIO_OPCODE_REG_READ 0
482 663
483#define DPIO_CTL (VLV_DISPLAY_BASE + 0x2110) 664#define DPIO_CTL (VLV_DISPLAY_BASE + 0x2110)
484#define DPIO_MODSEL1 (1<<3) /* if ref clk b == 27 */ 665#define DPIO_MODSEL1 (1<<3) /* if ref clk b == 27 */
@@ -555,14 +736,29 @@ enum punit_power_well {
555#define DPIO_PCS_TX_LANE1_RESET (1<<7) 736#define DPIO_PCS_TX_LANE1_RESET (1<<7)
556#define VLV_PCS_DW0(ch) _PORT(ch, _VLV_PCS_DW0_CH0, _VLV_PCS_DW0_CH1) 737#define VLV_PCS_DW0(ch) _PORT(ch, _VLV_PCS_DW0_CH0, _VLV_PCS_DW0_CH1)
557 738
739#define _VLV_PCS01_DW0_CH0 0x200
740#define _VLV_PCS23_DW0_CH0 0x400
741#define _VLV_PCS01_DW0_CH1 0x2600
742#define _VLV_PCS23_DW0_CH1 0x2800
743#define VLV_PCS01_DW0(ch) _PORT(ch, _VLV_PCS01_DW0_CH0, _VLV_PCS01_DW0_CH1)
744#define VLV_PCS23_DW0(ch) _PORT(ch, _VLV_PCS23_DW0_CH0, _VLV_PCS23_DW0_CH1)
745
558#define _VLV_PCS_DW1_CH0 0x8204 746#define _VLV_PCS_DW1_CH0 0x8204
559#define _VLV_PCS_DW1_CH1 0x8404 747#define _VLV_PCS_DW1_CH1 0x8404
748#define CHV_PCS_REQ_SOFTRESET_EN (1<<23)
560#define DPIO_PCS_CLK_CRI_RXEB_EIOS_EN (1<<22) 749#define DPIO_PCS_CLK_CRI_RXEB_EIOS_EN (1<<22)
561#define DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN (1<<21) 750#define DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN (1<<21)
562#define DPIO_PCS_CLK_DATAWIDTH_SHIFT (6) 751#define DPIO_PCS_CLK_DATAWIDTH_SHIFT (6)
563#define DPIO_PCS_CLK_SOFT_RESET (1<<5) 752#define DPIO_PCS_CLK_SOFT_RESET (1<<5)
564#define VLV_PCS_DW1(ch) _PORT(ch, _VLV_PCS_DW1_CH0, _VLV_PCS_DW1_CH1) 753#define VLV_PCS_DW1(ch) _PORT(ch, _VLV_PCS_DW1_CH0, _VLV_PCS_DW1_CH1)
565 754
755#define _VLV_PCS01_DW1_CH0 0x204
756#define _VLV_PCS23_DW1_CH0 0x404
757#define _VLV_PCS01_DW1_CH1 0x2604
758#define _VLV_PCS23_DW1_CH1 0x2804
759#define VLV_PCS01_DW1(ch) _PORT(ch, _VLV_PCS01_DW1_CH0, _VLV_PCS01_DW1_CH1)
760#define VLV_PCS23_DW1(ch) _PORT(ch, _VLV_PCS23_DW1_CH0, _VLV_PCS23_DW1_CH1)
761
566#define _VLV_PCS_DW8_CH0 0x8220 762#define _VLV_PCS_DW8_CH0 0x8220
567#define _VLV_PCS_DW8_CH1 0x8420 763#define _VLV_PCS_DW8_CH1 0x8420
568#define VLV_PCS_DW8(ch) _PORT(ch, _VLV_PCS_DW8_CH0, _VLV_PCS_DW8_CH1) 764#define VLV_PCS_DW8(ch) _PORT(ch, _VLV_PCS_DW8_CH0, _VLV_PCS_DW8_CH1)
@@ -578,6 +774,19 @@ enum punit_power_well {
578#define _VLV_PCS_DW9_CH1 0x8424 774#define _VLV_PCS_DW9_CH1 0x8424
579#define VLV_PCS_DW9(ch) _PORT(ch, _VLV_PCS_DW9_CH0, _VLV_PCS_DW9_CH1) 775#define VLV_PCS_DW9(ch) _PORT(ch, _VLV_PCS_DW9_CH0, _VLV_PCS_DW9_CH1)
580 776
777#define _CHV_PCS_DW10_CH0 0x8228
778#define _CHV_PCS_DW10_CH1 0x8428
779#define DPIO_PCS_SWING_CALC_TX0_TX2 (1<<30)
780#define DPIO_PCS_SWING_CALC_TX1_TX3 (1<<31)
781#define CHV_PCS_DW10(ch) _PORT(ch, _CHV_PCS_DW10_CH0, _CHV_PCS_DW10_CH1)
782
783#define _VLV_PCS01_DW10_CH0 0x0228
784#define _VLV_PCS23_DW10_CH0 0x0428
785#define _VLV_PCS01_DW10_CH1 0x2628
786#define _VLV_PCS23_DW10_CH1 0x2828
787#define VLV_PCS01_DW10(port) _PORT(port, _VLV_PCS01_DW10_CH0, _VLV_PCS01_DW10_CH1)
788#define VLV_PCS23_DW10(port) _PORT(port, _VLV_PCS23_DW10_CH0, _VLV_PCS23_DW10_CH1)
789
581#define _VLV_PCS_DW11_CH0 0x822c 790#define _VLV_PCS_DW11_CH0 0x822c
582#define _VLV_PCS_DW11_CH1 0x842c 791#define _VLV_PCS_DW11_CH1 0x842c
583#define VLV_PCS_DW11(ch) _PORT(ch, _VLV_PCS_DW11_CH0, _VLV_PCS_DW11_CH1) 792#define VLV_PCS_DW11(ch) _PORT(ch, _VLV_PCS_DW11_CH0, _VLV_PCS_DW11_CH1)
@@ -596,14 +805,21 @@ enum punit_power_well {
596 805
597#define _VLV_TX_DW2_CH0 0x8288 806#define _VLV_TX_DW2_CH0 0x8288
598#define _VLV_TX_DW2_CH1 0x8488 807#define _VLV_TX_DW2_CH1 0x8488
808#define DPIO_SWING_MARGIN_SHIFT 16
809#define DPIO_SWING_MARGIN_MASK (0xff << DPIO_SWING_MARGIN_SHIFT)
810#define DPIO_UNIQ_TRANS_SCALE_SHIFT 8
599#define VLV_TX_DW2(ch) _PORT(ch, _VLV_TX_DW2_CH0, _VLV_TX_DW2_CH1) 811#define VLV_TX_DW2(ch) _PORT(ch, _VLV_TX_DW2_CH0, _VLV_TX_DW2_CH1)
600 812
601#define _VLV_TX_DW3_CH0 0x828c 813#define _VLV_TX_DW3_CH0 0x828c
602#define _VLV_TX_DW3_CH1 0x848c 814#define _VLV_TX_DW3_CH1 0x848c
815/* The following bit for CHV phy */
816#define DPIO_TX_UNIQ_TRANS_SCALE_EN (1<<27)
603#define VLV_TX_DW3(ch) _PORT(ch, _VLV_TX_DW3_CH0, _VLV_TX_DW3_CH1) 817#define VLV_TX_DW3(ch) _PORT(ch, _VLV_TX_DW3_CH0, _VLV_TX_DW3_CH1)
604 818
605#define _VLV_TX_DW4_CH0 0x8290 819#define _VLV_TX_DW4_CH0 0x8290
606#define _VLV_TX_DW4_CH1 0x8490 820#define _VLV_TX_DW4_CH1 0x8490
821#define DPIO_SWING_DEEMPH9P5_SHIFT 24
822#define DPIO_SWING_DEEMPH9P5_MASK (0xff << DPIO_SWING_DEEMPH9P5_SHIFT)
607#define VLV_TX_DW4(ch) _PORT(ch, _VLV_TX_DW4_CH0, _VLV_TX_DW4_CH1) 823#define VLV_TX_DW4(ch) _PORT(ch, _VLV_TX_DW4_CH0, _VLV_TX_DW4_CH1)
608 824
609#define _VLV_TX3_DW4_CH0 0x690 825#define _VLV_TX3_DW4_CH0 0x690
@@ -623,6 +839,73 @@ enum punit_power_well {
623#define _VLV_TX_DW14_CH1 0x84b8 839#define _VLV_TX_DW14_CH1 0x84b8
624#define VLV_TX_DW14(ch) _PORT(ch, _VLV_TX_DW14_CH0, _VLV_TX_DW14_CH1) 840#define VLV_TX_DW14(ch) _PORT(ch, _VLV_TX_DW14_CH0, _VLV_TX_DW14_CH1)
625 841
842/* CHV dpPhy registers */
843#define _CHV_PLL_DW0_CH0 0x8000
844#define _CHV_PLL_DW0_CH1 0x8180
845#define CHV_PLL_DW0(ch) _PIPE(ch, _CHV_PLL_DW0_CH0, _CHV_PLL_DW0_CH1)
846
847#define _CHV_PLL_DW1_CH0 0x8004
848#define _CHV_PLL_DW1_CH1 0x8184
849#define DPIO_CHV_N_DIV_SHIFT 8
850#define DPIO_CHV_M1_DIV_BY_2 (0 << 0)
851#define CHV_PLL_DW1(ch) _PIPE(ch, _CHV_PLL_DW1_CH0, _CHV_PLL_DW1_CH1)
852
853#define _CHV_PLL_DW2_CH0 0x8008
854#define _CHV_PLL_DW2_CH1 0x8188
855#define CHV_PLL_DW2(ch) _PIPE(ch, _CHV_PLL_DW2_CH0, _CHV_PLL_DW2_CH1)
856
857#define _CHV_PLL_DW3_CH0 0x800c
858#define _CHV_PLL_DW3_CH1 0x818c
859#define DPIO_CHV_FRAC_DIV_EN (1 << 16)
860#define DPIO_CHV_FIRST_MOD (0 << 8)
861#define DPIO_CHV_SECOND_MOD (1 << 8)
862#define DPIO_CHV_FEEDFWD_GAIN_SHIFT 0
863#define CHV_PLL_DW3(ch) _PIPE(ch, _CHV_PLL_DW3_CH0, _CHV_PLL_DW3_CH1)
864
865#define _CHV_PLL_DW6_CH0 0x8018
866#define _CHV_PLL_DW6_CH1 0x8198
867#define DPIO_CHV_GAIN_CTRL_SHIFT 16
868#define DPIO_CHV_INT_COEFF_SHIFT 8
869#define DPIO_CHV_PROP_COEFF_SHIFT 0
870#define CHV_PLL_DW6(ch) _PIPE(ch, _CHV_PLL_DW6_CH0, _CHV_PLL_DW6_CH1)
871
872#define _CHV_CMN_DW13_CH0 0x8134
873#define _CHV_CMN_DW0_CH1 0x8080
874#define DPIO_CHV_S1_DIV_SHIFT 21
875#define DPIO_CHV_P1_DIV_SHIFT 13 /* 3 bits */
876#define DPIO_CHV_P2_DIV_SHIFT 8 /* 5 bits */
877#define DPIO_CHV_K_DIV_SHIFT 4
878#define DPIO_PLL_FREQLOCK (1 << 1)
879#define DPIO_PLL_LOCK (1 << 0)
880#define CHV_CMN_DW13(ch) _PIPE(ch, _CHV_CMN_DW13_CH0, _CHV_CMN_DW0_CH1)
881
882#define _CHV_CMN_DW14_CH0 0x8138
883#define _CHV_CMN_DW1_CH1 0x8084
884#define DPIO_AFC_RECAL (1 << 14)
885#define DPIO_DCLKP_EN (1 << 13)
886#define CHV_CMN_DW14(ch) _PIPE(ch, _CHV_CMN_DW14_CH0, _CHV_CMN_DW1_CH1)
887
888#define CHV_CMN_DW30 0x8178
889#define DPIO_LRC_BYPASS (1 << 3)
890
891#define _TXLANE(ch, lane, offset) ((ch ? 0x2400 : 0) + \
892 (lane) * 0x200 + (offset))
893
894#define CHV_TX_DW0(ch, lane) _TXLANE(ch, lane, 0x80)
895#define CHV_TX_DW1(ch, lane) _TXLANE(ch, lane, 0x84)
896#define CHV_TX_DW2(ch, lane) _TXLANE(ch, lane, 0x88)
897#define CHV_TX_DW3(ch, lane) _TXLANE(ch, lane, 0x8c)
898#define CHV_TX_DW4(ch, lane) _TXLANE(ch, lane, 0x90)
899#define CHV_TX_DW5(ch, lane) _TXLANE(ch, lane, 0x94)
900#define CHV_TX_DW6(ch, lane) _TXLANE(ch, lane, 0x98)
901#define CHV_TX_DW7(ch, lane) _TXLANE(ch, lane, 0x9c)
902#define CHV_TX_DW8(ch, lane) _TXLANE(ch, lane, 0xa0)
903#define CHV_TX_DW9(ch, lane) _TXLANE(ch, lane, 0xa4)
904#define CHV_TX_DW10(ch, lane) _TXLANE(ch, lane, 0xa8)
905#define CHV_TX_DW11(ch, lane) _TXLANE(ch, lane, 0xac)
906#define DPIO_FRC_LATENCY_SHFIT 8
907#define CHV_TX_DW14(ch, lane) _TXLANE(ch, lane, 0xb8)
908#define DPIO_UPAR_SHIFT 30
626/* 909/*
627 * Fence registers 910 * Fence registers
628 */ 911 */
@@ -663,6 +946,7 @@ enum punit_power_well {
663#define RENDER_RING_BASE 0x02000 946#define RENDER_RING_BASE 0x02000
664#define BSD_RING_BASE 0x04000 947#define BSD_RING_BASE 0x04000
665#define GEN6_BSD_RING_BASE 0x12000 948#define GEN6_BSD_RING_BASE 0x12000
949#define GEN8_BSD2_RING_BASE 0x1c000
666#define VEBOX_RING_BASE 0x1a000 950#define VEBOX_RING_BASE 0x1a000
667#define BLT_RING_BASE 0x22000 951#define BLT_RING_BASE 0x22000
668#define RING_TAIL(base) ((base)+0x30) 952#define RING_TAIL(base) ((base)+0x30)
@@ -688,9 +972,20 @@ enum punit_power_well {
688#define RING_MAX_IDLE(base) ((base)+0x54) 972#define RING_MAX_IDLE(base) ((base)+0x54)
689#define RING_HWS_PGA(base) ((base)+0x80) 973#define RING_HWS_PGA(base) ((base)+0x80)
690#define RING_HWS_PGA_GEN6(base) ((base)+0x2080) 974#define RING_HWS_PGA_GEN6(base) ((base)+0x2080)
691#define ARB_MODE 0x04030 975
976#define GEN7_WR_WATERMARK 0x4028
977#define GEN7_GFX_PRIO_CTRL 0x402C
978#define ARB_MODE 0x4030
692#define ARB_MODE_SWIZZLE_SNB (1<<4) 979#define ARB_MODE_SWIZZLE_SNB (1<<4)
693#define ARB_MODE_SWIZZLE_IVB (1<<5) 980#define ARB_MODE_SWIZZLE_IVB (1<<5)
981#define GEN7_GFX_PEND_TLB0 0x4034
982#define GEN7_GFX_PEND_TLB1 0x4038
983/* L3, CVS, ZTLB, RCC, CASC LRA min, max values */
984#define GEN7_LRA_LIMITS_BASE 0x403C
985#define GEN7_LRA_LIMITS_REG_NUM 13
986#define GEN7_MEDIA_MAX_REQ_COUNT 0x4070
987#define GEN7_GFX_MAX_REQ_COUNT 0x4074
988
694#define GAMTARBMODE 0x04a08 989#define GAMTARBMODE 0x04a08
695#define ARB_MODE_BWGTLB_DISABLE (1<<9) 990#define ARB_MODE_BWGTLB_DISABLE (1<<9)
696#define ARB_MODE_SWIZZLE_BDW (1<<1) 991#define ARB_MODE_SWIZZLE_BDW (1<<1)
@@ -725,6 +1020,9 @@ enum punit_power_well {
725#define RING_WAIT_I8XX (1<<0) /* gen2, PRBx_HEAD */ 1020#define RING_WAIT_I8XX (1<<0) /* gen2, PRBx_HEAD */
726#define RING_WAIT (1<<11) /* gen3+, PRBx_CTL */ 1021#define RING_WAIT (1<<11) /* gen3+, PRBx_CTL */
727#define RING_WAIT_SEMAPHORE (1<<10) /* gen6+ */ 1022#define RING_WAIT_SEMAPHORE (1<<10) /* gen6+ */
1023
1024#define GEN7_TLB_RD_ADDR 0x4700
1025
728#if 0 1026#if 0
729#define PRB0_TAIL 0x02030 1027#define PRB0_TAIL 0x02030
730#define PRB0_HEAD 0x02034 1028#define PRB0_HEAD 0x02034
@@ -748,6 +1046,7 @@ enum punit_power_well {
748#define RING_INSTDONE(base) ((base)+0x6c) 1046#define RING_INSTDONE(base) ((base)+0x6c)
749#define RING_INSTPS(base) ((base)+0x70) 1047#define RING_INSTPS(base) ((base)+0x70)
750#define RING_DMA_FADD(base) ((base)+0x78) 1048#define RING_DMA_FADD(base) ((base)+0x78)
1049#define RING_DMA_FADD_UDW(base) ((base)+0x60) /* gen8+ */
751#define RING_INSTPM(base) ((base)+0xc0) 1050#define RING_INSTPM(base) ((base)+0xc0)
752#define RING_MI_MODE(base) ((base)+0x9c) 1051#define RING_MI_MODE(base) ((base)+0x9c)
753#define INSTPS 0x02070 /* 965+ only */ 1052#define INSTPS 0x02070 /* 965+ only */
@@ -842,21 +1141,26 @@ enum punit_power_well {
842#define GFX_MODE_GEN7 0x0229c 1141#define GFX_MODE_GEN7 0x0229c
843#define RING_MODE_GEN7(ring) ((ring)->mmio_base+0x29c) 1142#define RING_MODE_GEN7(ring) ((ring)->mmio_base+0x29c)
844#define GFX_RUN_LIST_ENABLE (1<<15) 1143#define GFX_RUN_LIST_ENABLE (1<<15)
845#define GFX_TLB_INVALIDATE_ALWAYS (1<<13) 1144#define GFX_TLB_INVALIDATE_EXPLICIT (1<<13)
846#define GFX_SURFACE_FAULT_ENABLE (1<<12) 1145#define GFX_SURFACE_FAULT_ENABLE (1<<12)
847#define GFX_REPLAY_MODE (1<<11) 1146#define GFX_REPLAY_MODE (1<<11)
848#define GFX_PSMI_GRANULARITY (1<<10) 1147#define GFX_PSMI_GRANULARITY (1<<10)
849#define GFX_PPGTT_ENABLE (1<<9) 1148#define GFX_PPGTT_ENABLE (1<<9)
850 1149
851#define VLV_DISPLAY_BASE 0x180000 1150#define VLV_DISPLAY_BASE 0x180000
1151#define VLV_MIPI_BASE VLV_DISPLAY_BASE
852 1152
1153#define VLV_GU_CTL0 (VLV_DISPLAY_BASE + 0x2030)
1154#define VLV_GU_CTL1 (VLV_DISPLAY_BASE + 0x2034)
853#define SCPD0 0x0209c /* 915+ only */ 1155#define SCPD0 0x0209c /* 915+ only */
854#define IER 0x020a0 1156#define IER 0x020a0
855#define IIR 0x020a4 1157#define IIR 0x020a4
856#define IMR 0x020a8 1158#define IMR 0x020a8
857#define ISR 0x020ac 1159#define ISR 0x020ac
858#define VLV_GUNIT_CLOCK_GATE (VLV_DISPLAY_BASE + 0x2060) 1160#define VLV_GUNIT_CLOCK_GATE (VLV_DISPLAY_BASE + 0x2060)
1161#define GINT_DIS (1<<22)
859#define GCFG_DIS (1<<8) 1162#define GCFG_DIS (1<<8)
1163#define VLV_GUNIT_CLOCK_GATE2 (VLV_DISPLAY_BASE + 0x2064)
860#define VLV_IIR_RW (VLV_DISPLAY_BASE + 0x2084) 1164#define VLV_IIR_RW (VLV_DISPLAY_BASE + 0x2084)
861#define VLV_IER (VLV_DISPLAY_BASE + 0x20a0) 1165#define VLV_IER (VLV_DISPLAY_BASE + 0x20a0)
862#define VLV_IIR (VLV_DISPLAY_BASE + 0x20a4) 1166#define VLV_IIR (VLV_DISPLAY_BASE + 0x20a4)
@@ -875,7 +1179,7 @@ enum punit_power_well {
875#define I915_ERROR_INSTRUCTION (1<<0) 1179#define I915_ERROR_INSTRUCTION (1<<0)
876#define INSTPM 0x020c0 1180#define INSTPM 0x020c0
877#define INSTPM_SELF_EN (1<<12) /* 915GM only */ 1181#define INSTPM_SELF_EN (1<<12) /* 915GM only */
878#define INSTPM_AGPBUSY_DIS (1<<11) /* gen3: when disabled, pending interrupts 1182#define INSTPM_AGPBUSY_INT_EN (1<<11) /* gen3: when disabled, pending interrupts
879 will not assert AGPBUSY# and will only 1183 will not assert AGPBUSY# and will only
880 be delivered when out of C3. */ 1184 be delivered when out of C3. */
881#define INSTPM_FORCE_ORDERING (1<<7) /* GEN6+ */ 1185#define INSTPM_FORCE_ORDERING (1<<7) /* GEN6+ */
@@ -956,6 +1260,10 @@ enum punit_power_well {
956#define MI_ARB_DISPLAY_PRIORITY_A_B (0 << 0) /* display A > display B */ 1260#define MI_ARB_DISPLAY_PRIORITY_A_B (0 << 0) /* display A > display B */
957#define MI_ARB_DISPLAY_PRIORITY_B_A (1 << 0) /* display B > display A */ 1261#define MI_ARB_DISPLAY_PRIORITY_B_A (1 << 0) /* display B > display A */
958 1262
1263#define MI_STATE 0x020e4 /* gen2 only */
1264#define MI_AGPBUSY_INT_EN (1 << 1) /* 85x only */
1265#define MI_AGPBUSY_830_MODE (1 << 0) /* 85x only */
1266
959#define CACHE_MODE_0 0x02120 /* 915+ only */ 1267#define CACHE_MODE_0 0x02120 /* 915+ only */
960#define CM0_PIPELINED_RENDER_FLUSH_DISABLE (1<<8) 1268#define CM0_PIPELINED_RENDER_FLUSH_DISABLE (1<<8)
961#define CM0_IZ_OPT_DISABLE (1<<6) 1269#define CM0_IZ_OPT_DISABLE (1<<6)
@@ -973,6 +1281,7 @@ enum punit_power_well {
973#define ECO_FLIP_DONE (1<<0) 1281#define ECO_FLIP_DONE (1<<0)
974 1282
975#define CACHE_MODE_0_GEN7 0x7000 /* IVB+ */ 1283#define CACHE_MODE_0_GEN7 0x7000 /* IVB+ */
1284#define RC_OP_FLUSH_ENABLE (1<<0)
976#define HIZ_RAW_STALL_OPT_DISABLE (1<<2) 1285#define HIZ_RAW_STALL_OPT_DISABLE (1<<2)
977#define CACHE_MODE_1 0x7004 /* IVB+ */ 1286#define CACHE_MODE_1 0x7004 /* IVB+ */
978#define PIXEL_SUBSPAN_COLLECT_OPT_DISABLE (1<<6) 1287#define PIXEL_SUBSPAN_COLLECT_OPT_DISABLE (1<<6)
@@ -984,6 +1293,7 @@ enum punit_power_well {
984 1293
985#define GEN6_RC_SLEEP_PSMI_CONTROL 0x2050 1294#define GEN6_RC_SLEEP_PSMI_CONTROL 0x2050
986#define GEN8_RC_SEMA_IDLE_MSG_DISABLE (1 << 12) 1295#define GEN8_RC_SEMA_IDLE_MSG_DISABLE (1 << 12)
1296#define GEN8_FF_DOP_CLOCK_GATE_DISABLE (1<<10)
987 1297
988#define GEN6_BSD_SLEEP_PSMI_CONTROL 0x12050 1298#define GEN6_BSD_SLEEP_PSMI_CONTROL 0x12050
989#define GEN6_BSD_SLEEP_MSG_DISABLE (1 << 0) 1299#define GEN6_BSD_SLEEP_MSG_DISABLE (1 << 0)
@@ -1024,24 +1334,43 @@ enum punit_power_well {
1024 1334
1025/* These are all the "old" interrupts */ 1335/* These are all the "old" interrupts */
1026#define ILK_BSD_USER_INTERRUPT (1<<5) 1336#define ILK_BSD_USER_INTERRUPT (1<<5)
1337
1338#define I915_PM_INTERRUPT (1<<31)
1339#define I915_ISP_INTERRUPT (1<<22)
1340#define I915_LPE_PIPE_B_INTERRUPT (1<<21)
1341#define I915_LPE_PIPE_A_INTERRUPT (1<<20)
1342#define I915_MIPIB_INTERRUPT (1<<19)
1343#define I915_MIPIA_INTERRUPT (1<<18)
1027#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18) 1344#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18)
1028#define I915_DISPLAY_PORT_INTERRUPT (1<<17) 1345#define I915_DISPLAY_PORT_INTERRUPT (1<<17)
1346#define I915_DISPLAY_PIPE_C_HBLANK_INTERRUPT (1<<16)
1347#define I915_MASTER_ERROR_INTERRUPT (1<<15)
1029#define I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT (1<<15) 1348#define I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT (1<<15)
1349#define I915_DISPLAY_PIPE_B_HBLANK_INTERRUPT (1<<14)
1030#define I915_GMCH_THERMAL_SENSOR_EVENT_INTERRUPT (1<<14) /* p-state */ 1350#define I915_GMCH_THERMAL_SENSOR_EVENT_INTERRUPT (1<<14) /* p-state */
1351#define I915_DISPLAY_PIPE_A_HBLANK_INTERRUPT (1<<13)
1031#define I915_HWB_OOM_INTERRUPT (1<<13) 1352#define I915_HWB_OOM_INTERRUPT (1<<13)
1353#define I915_LPE_PIPE_C_INTERRUPT (1<<12)
1032#define I915_SYNC_STATUS_INTERRUPT (1<<12) 1354#define I915_SYNC_STATUS_INTERRUPT (1<<12)
1355#define I915_MISC_INTERRUPT (1<<11)
1033#define I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT (1<<11) 1356#define I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT (1<<11)
1357#define I915_DISPLAY_PIPE_C_VBLANK_INTERRUPT (1<<10)
1034#define I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT (1<<10) 1358#define I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT (1<<10)
1359#define I915_DISPLAY_PIPE_C_EVENT_INTERRUPT (1<<9)
1035#define I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT (1<<9) 1360#define I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT (1<<9)
1361#define I915_DISPLAY_PIPE_C_DPBM_INTERRUPT (1<<8)
1036#define I915_DISPLAY_PLANE_C_FLIP_PENDING_INTERRUPT (1<<8) 1362#define I915_DISPLAY_PLANE_C_FLIP_PENDING_INTERRUPT (1<<8)
1037#define I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT (1<<7) 1363#define I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT (1<<7)
1038#define I915_DISPLAY_PIPE_A_EVENT_INTERRUPT (1<<6) 1364#define I915_DISPLAY_PIPE_A_EVENT_INTERRUPT (1<<6)
1039#define I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT (1<<5) 1365#define I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT (1<<5)
1040#define I915_DISPLAY_PIPE_B_EVENT_INTERRUPT (1<<4) 1366#define I915_DISPLAY_PIPE_B_EVENT_INTERRUPT (1<<4)
1367#define I915_DISPLAY_PIPE_A_DPBM_INTERRUPT (1<<3)
1368#define I915_DISPLAY_PIPE_B_DPBM_INTERRUPT (1<<2)
1041#define I915_DEBUG_INTERRUPT (1<<2) 1369#define I915_DEBUG_INTERRUPT (1<<2)
1370#define I915_WINVALID_INTERRUPT (1<<1)
1042#define I915_USER_INTERRUPT (1<<1) 1371#define I915_USER_INTERRUPT (1<<1)
1043#define I915_ASLE_INTERRUPT (1<<0) 1372#define I915_ASLE_INTERRUPT (1<<0)
1044#define I915_BSD_USER_INTERRUPT (1 << 25) 1373#define I915_BSD_USER_INTERRUPT (1<<25)
1045 1374
1046#define GEN6_BSD_RNCID 0x12198 1375#define GEN6_BSD_RNCID 0x12198
1047 1376
@@ -1198,6 +1527,7 @@ enum punit_power_well {
1198#define GMBUS_PORT_SSC 1 1527#define GMBUS_PORT_SSC 1
1199#define GMBUS_PORT_VGADDC 2 1528#define GMBUS_PORT_VGADDC 2
1200#define GMBUS_PORT_PANEL 3 1529#define GMBUS_PORT_PANEL 3
1530#define GMBUS_PORT_DPD_CHV 3 /* HDMID_CHV */
1201#define GMBUS_PORT_DPC 4 /* HDMIC */ 1531#define GMBUS_PORT_DPC 4 /* HDMIC */
1202#define GMBUS_PORT_DPB 5 /* SDVO, HDMIB */ 1532#define GMBUS_PORT_DPB 5 /* SDVO, HDMIB */
1203#define GMBUS_PORT_DPD 6 /* HDMID */ 1533#define GMBUS_PORT_DPD 6 /* HDMID */
@@ -1239,6 +1569,7 @@ enum punit_power_well {
1239 */ 1569 */
1240#define DPLL_A_OFFSET 0x6014 1570#define DPLL_A_OFFSET 0x6014
1241#define DPLL_B_OFFSET 0x6018 1571#define DPLL_B_OFFSET 0x6018
1572#define CHV_DPLL_C_OFFSET 0x6030
1242#define DPLL(pipe) (dev_priv->info.dpll_offsets[pipe] + \ 1573#define DPLL(pipe) (dev_priv->info.dpll_offsets[pipe] + \
1243 dev_priv->info.display_mmio_offset) 1574 dev_priv->info.display_mmio_offset)
1244 1575
@@ -1273,10 +1604,23 @@ enum punit_power_well {
1273#define DPLL_LOCK_VLV (1<<15) 1604#define DPLL_LOCK_VLV (1<<15)
1274#define DPLL_INTEGRATED_CRI_CLK_VLV (1<<14) 1605#define DPLL_INTEGRATED_CRI_CLK_VLV (1<<14)
1275#define DPLL_INTEGRATED_CLOCK_VLV (1<<13) 1606#define DPLL_INTEGRATED_CLOCK_VLV (1<<13)
1607#define DPLL_SSC_REF_CLOCK_CHV (1<<13)
1276#define DPLL_PORTC_READY_MASK (0xf << 4) 1608#define DPLL_PORTC_READY_MASK (0xf << 4)
1277#define DPLL_PORTB_READY_MASK (0xf) 1609#define DPLL_PORTB_READY_MASK (0xf)
1278 1610
1279#define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000 1611#define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000
1612
1613/* Additional CHV pll/phy registers */
1614#define DPIO_PHY_STATUS (VLV_DISPLAY_BASE + 0x6240)
1615#define DPLL_PORTD_READY_MASK (0xf)
1616#define DISPLAY_PHY_CONTROL (VLV_DISPLAY_BASE + 0x60100)
1617#define PHY_COM_LANE_RESET_DEASSERT(phy, val) \
1618 ((phy == DPIO_PHY0) ? (val | 1) : (val | 2))
1619#define PHY_COM_LANE_RESET_ASSERT(phy, val) \
1620 ((phy == DPIO_PHY0) ? (val & ~1) : (val & ~2))
1621#define DISPLAY_PHY_STATUS (VLV_DISPLAY_BASE + 0x60104)
1622#define PHY_POWERGOOD(phy) ((phy == DPIO_PHY0) ? (1<<31) : (1<<30))
1623
1280/* 1624/*
1281 * The i830 generation, in LVDS mode, defines P1 as the bit number set within 1625 * The i830 generation, in LVDS mode, defines P1 as the bit number set within
1282 * this field (only one bit may be set). 1626 * this field (only one bit may be set).
@@ -1317,6 +1661,7 @@ enum punit_power_well {
1317 1661
1318#define DPLL_A_MD_OFFSET 0x601c /* 965+ only */ 1662#define DPLL_A_MD_OFFSET 0x601c /* 965+ only */
1319#define DPLL_B_MD_OFFSET 0x6020 /* 965+ only */ 1663#define DPLL_B_MD_OFFSET 0x6020 /* 965+ only */
1664#define CHV_DPLL_C_MD_OFFSET 0x603c
1320#define DPLL_MD(pipe) (dev_priv->info.dpll_md_offsets[pipe] + \ 1665#define DPLL_MD(pipe) (dev_priv->info.dpll_md_offsets[pipe] + \
1321 dev_priv->info.display_mmio_offset) 1666 dev_priv->info.display_mmio_offset)
1322 1667
@@ -1416,7 +1761,7 @@ enum punit_power_well {
1416# define DPIOUNIT_CLOCK_GATE_DISABLE (1 << 6) /* 915-945 */ 1761# define DPIOUNIT_CLOCK_GATE_DISABLE (1 << 6) /* 915-945 */
1417# define OVFUNIT_CLOCK_GATE_DISABLE (1 << 5) 1762# define OVFUNIT_CLOCK_GATE_DISABLE (1 << 5)
1418# define OVBUNIT_CLOCK_GATE_DISABLE (1 << 4) 1763# define OVBUNIT_CLOCK_GATE_DISABLE (1 << 4)
1419/** 1764/*
1420 * This bit must be set on the 830 to prevent hangs when turning off the 1765 * This bit must be set on the 830 to prevent hangs when turning off the
1421 * overlay scaler. 1766 * overlay scaler.
1422 */ 1767 */
@@ -1436,12 +1781,12 @@ enum punit_power_well {
1436# define COLOR_CALCULATOR_CLOCK_GATE_DISABLE (1 << 7) 1781# define COLOR_CALCULATOR_CLOCK_GATE_DISABLE (1 << 7)
1437# define MOTION_COMP_CLOCK_GATE_DISABLE (1 << 6) 1782# define MOTION_COMP_CLOCK_GATE_DISABLE (1 << 6)
1438# define MAG_CLOCK_GATE_DISABLE (1 << 5) 1783# define MAG_CLOCK_GATE_DISABLE (1 << 5)
1439/** This bit must be unset on 855,865 */ 1784/* This bit must be unset on 855,865 */
1440# define MECI_CLOCK_GATE_DISABLE (1 << 4) 1785# define MECI_CLOCK_GATE_DISABLE (1 << 4)
1441# define DCMP_CLOCK_GATE_DISABLE (1 << 3) 1786# define DCMP_CLOCK_GATE_DISABLE (1 << 3)
1442# define MEC_CLOCK_GATE_DISABLE (1 << 2) 1787# define MEC_CLOCK_GATE_DISABLE (1 << 2)
1443# define MECO_CLOCK_GATE_DISABLE (1 << 1) 1788# define MECO_CLOCK_GATE_DISABLE (1 << 1)
1444/** This bit must be set on 855,865. */ 1789/* This bit must be set on 855,865. */
1445# define SV_CLOCK_GATE_DISABLE (1 << 0) 1790# define SV_CLOCK_GATE_DISABLE (1 << 0)
1446# define I915_MPEG_CLOCK_GATE_DISABLE (1 << 16) 1791# define I915_MPEG_CLOCK_GATE_DISABLE (1 << 16)
1447# define I915_VLD_IP_PR_CLOCK_GATE_DISABLE (1 << 15) 1792# define I915_VLD_IP_PR_CLOCK_GATE_DISABLE (1 << 15)
@@ -1462,14 +1807,14 @@ enum punit_power_well {
1462# define I915_BY_CLOCK_GATE_DISABLE (1 << 0) 1807# define I915_BY_CLOCK_GATE_DISABLE (1 << 0)
1463 1808
1464# define I965_RCZ_CLOCK_GATE_DISABLE (1 << 30) 1809# define I965_RCZ_CLOCK_GATE_DISABLE (1 << 30)
1465/** This bit must always be set on 965G/965GM */ 1810/* This bit must always be set on 965G/965GM */
1466# define I965_RCC_CLOCK_GATE_DISABLE (1 << 29) 1811# define I965_RCC_CLOCK_GATE_DISABLE (1 << 29)
1467# define I965_RCPB_CLOCK_GATE_DISABLE (1 << 28) 1812# define I965_RCPB_CLOCK_GATE_DISABLE (1 << 28)
1468# define I965_DAP_CLOCK_GATE_DISABLE (1 << 27) 1813# define I965_DAP_CLOCK_GATE_DISABLE (1 << 27)
1469# define I965_ROC_CLOCK_GATE_DISABLE (1 << 26) 1814# define I965_ROC_CLOCK_GATE_DISABLE (1 << 26)
1470# define I965_GW_CLOCK_GATE_DISABLE (1 << 25) 1815# define I965_GW_CLOCK_GATE_DISABLE (1 << 25)
1471# define I965_TD_CLOCK_GATE_DISABLE (1 << 24) 1816# define I965_TD_CLOCK_GATE_DISABLE (1 << 24)
1472/** This bit must always be set on 965G */ 1817/* This bit must always be set on 965G */
1473# define I965_ISC_CLOCK_GATE_DISABLE (1 << 23) 1818# define I965_ISC_CLOCK_GATE_DISABLE (1 << 23)
1474# define I965_IC_CLOCK_GATE_DISABLE (1 << 22) 1819# define I965_IC_CLOCK_GATE_DISABLE (1 << 22)
1475# define I965_EU_CLOCK_GATE_DISABLE (1 << 21) 1820# define I965_EU_CLOCK_GATE_DISABLE (1 << 21)
@@ -1494,6 +1839,10 @@ enum punit_power_well {
1494#define VF_UNIT_CLOCK_GATE_DISABLE (1 << 9) 1839#define VF_UNIT_CLOCK_GATE_DISABLE (1 << 9)
1495#define GS_UNIT_CLOCK_GATE_DISABLE (1 << 7) 1840#define GS_UNIT_CLOCK_GATE_DISABLE (1 << 7)
1496#define CL_UNIT_CLOCK_GATE_DISABLE (1 << 6) 1841#define CL_UNIT_CLOCK_GATE_DISABLE (1 << 6)
1842
1843#define VDECCLK_GATE_D 0x620C /* g4x only */
1844#define VCP_UNIT_CLOCK_GATE_DISABLE (1 << 4)
1845
1497#define RAMCLK_GATE_D 0x6210 /* CRL only */ 1846#define RAMCLK_GATE_D 0x6210 /* CRL only */
1498#define DEUC 0x6214 /* CRL only */ 1847#define DEUC 0x6214 /* CRL only */
1499 1848
@@ -1513,6 +1862,7 @@ enum punit_power_well {
1513 */ 1862 */
1514#define PALETTE_A_OFFSET 0xa000 1863#define PALETTE_A_OFFSET 0xa000
1515#define PALETTE_B_OFFSET 0xa800 1864#define PALETTE_B_OFFSET 0xa800
1865#define CHV_PALETTE_C_OFFSET 0xc000
1516#define PALETTE(pipe) (dev_priv->info.palette_offsets[pipe] + \ 1866#define PALETTE(pipe) (dev_priv->info.palette_offsets[pipe] + \
1517 dev_priv->info.display_mmio_offset) 1867 dev_priv->info.display_mmio_offset)
1518 1868
@@ -1535,7 +1885,7 @@ enum punit_power_well {
1535/* Memory controller frequency in MCHBAR for Haswell (possible SNB+) */ 1885/* Memory controller frequency in MCHBAR for Haswell (possible SNB+) */
1536#define DCLK (MCHBAR_MIRROR_BASE_SNB + 0x5e04) 1886#define DCLK (MCHBAR_MIRROR_BASE_SNB + 0x5e04)
1537 1887
1538/** 915-945 and GM965 MCH register controlling DRAM channel access */ 1888/* 915-945 and GM965 MCH register controlling DRAM channel access */
1539#define DCC 0x10200 1889#define DCC 0x10200
1540#define DCC_ADDRESSING_MODE_SINGLE_CHANNEL (0 << 0) 1890#define DCC_ADDRESSING_MODE_SINGLE_CHANNEL (0 << 0)
1541#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC (1 << 0) 1891#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC (1 << 0)
@@ -1544,15 +1894,15 @@ enum punit_power_well {
1544#define DCC_CHANNEL_XOR_DISABLE (1 << 10) 1894#define DCC_CHANNEL_XOR_DISABLE (1 << 10)
1545#define DCC_CHANNEL_XOR_BIT_17 (1 << 9) 1895#define DCC_CHANNEL_XOR_BIT_17 (1 << 9)
1546 1896
1547/** Pineview MCH register contains DDR3 setting */ 1897/* Pineview MCH register contains DDR3 setting */
1548#define CSHRDDR3CTL 0x101a8 1898#define CSHRDDR3CTL 0x101a8
1549#define CSHRDDR3CTL_DDR3 (1 << 2) 1899#define CSHRDDR3CTL_DDR3 (1 << 2)
1550 1900
1551/** 965 MCH register controlling DRAM channel configuration */ 1901/* 965 MCH register controlling DRAM channel configuration */
1552#define C0DRB3 0x10206 1902#define C0DRB3 0x10206
1553#define C1DRB3 0x10606 1903#define C1DRB3 0x10606
1554 1904
1555/** snb MCH registers for reading the DRAM channel configuration */ 1905/* snb MCH registers for reading the DRAM channel configuration */
1556#define MAD_DIMM_C0 (MCHBAR_MIRROR_BASE_SNB + 0x5004) 1906#define MAD_DIMM_C0 (MCHBAR_MIRROR_BASE_SNB + 0x5004)
1557#define MAD_DIMM_C1 (MCHBAR_MIRROR_BASE_SNB + 0x5008) 1907#define MAD_DIMM_C1 (MCHBAR_MIRROR_BASE_SNB + 0x5008)
1558#define MAD_DIMM_C2 (MCHBAR_MIRROR_BASE_SNB + 0x500C) 1908#define MAD_DIMM_C2 (MCHBAR_MIRROR_BASE_SNB + 0x500C)
@@ -1574,7 +1924,7 @@ enum punit_power_well {
1574#define MAD_DIMM_A_SIZE_SHIFT 0 1924#define MAD_DIMM_A_SIZE_SHIFT 0
1575#define MAD_DIMM_A_SIZE_MASK (0xff << MAD_DIMM_A_SIZE_SHIFT) 1925#define MAD_DIMM_A_SIZE_MASK (0xff << MAD_DIMM_A_SIZE_SHIFT)
1576 1926
1577/** snb MCH registers for priority tuning */ 1927/* snb MCH registers for priority tuning */
1578#define MCH_SSKPD (MCHBAR_MIRROR_BASE_SNB + 0x5d10) 1928#define MCH_SSKPD (MCHBAR_MIRROR_BASE_SNB + 0x5d10)
1579#define MCH_SSKPD_WM0_MASK 0x3f 1929#define MCH_SSKPD_WM0_MASK 0x3f
1580#define MCH_SSKPD_WM0_VAL 0xc 1930#define MCH_SSKPD_WM0_VAL 0xc
@@ -2002,6 +2352,7 @@ enum punit_power_well {
2002#define TRANSCODER_A_OFFSET 0x60000 2352#define TRANSCODER_A_OFFSET 0x60000
2003#define TRANSCODER_B_OFFSET 0x61000 2353#define TRANSCODER_B_OFFSET 0x61000
2004#define TRANSCODER_C_OFFSET 0x62000 2354#define TRANSCODER_C_OFFSET 0x62000
2355#define CHV_TRANSCODER_C_OFFSET 0x63000
2005#define TRANSCODER_EDP_OFFSET 0x6f000 2356#define TRANSCODER_EDP_OFFSET 0x6f000
2006 2357
2007#define _TRANSCODER2(pipe, reg) (dev_priv->info.trans_offsets[(pipe)] - \ 2358#define _TRANSCODER2(pipe, reg) (dev_priv->info.trans_offsets[(pipe)] - \
@@ -2226,6 +2577,7 @@ enum punit_power_well {
2226#define GEN3_SDVOC 0x61160 2577#define GEN3_SDVOC 0x61160
2227#define GEN4_HDMIB GEN3_SDVOB 2578#define GEN4_HDMIB GEN3_SDVOB
2228#define GEN4_HDMIC GEN3_SDVOC 2579#define GEN4_HDMIC GEN3_SDVOC
2580#define CHV_HDMID 0x6116C
2229#define PCH_SDVOB 0xe1140 2581#define PCH_SDVOB 0xe1140
2230#define PCH_HDMIB PCH_SDVOB 2582#define PCH_HDMIB PCH_SDVOB
2231#define PCH_HDMIC 0xe1150 2583#define PCH_HDMIC 0xe1150
@@ -2246,7 +2598,7 @@ enum punit_power_well {
2246#define SDVO_PIPE_B_SELECT (1 << 30) 2598#define SDVO_PIPE_B_SELECT (1 << 30)
2247#define SDVO_STALL_SELECT (1 << 29) 2599#define SDVO_STALL_SELECT (1 << 29)
2248#define SDVO_INTERRUPT_ENABLE (1 << 26) 2600#define SDVO_INTERRUPT_ENABLE (1 << 26)
2249/** 2601/*
2250 * 915G/GM SDVO pixel multiplier. 2602 * 915G/GM SDVO pixel multiplier.
2251 * Programmed value is multiplier - 1, up to 5x. 2603 * Programmed value is multiplier - 1, up to 5x.
2252 * \sa DPLL_MD_UDI_MULTIPLIER_MASK 2604 * \sa DPLL_MD_UDI_MULTIPLIER_MASK
@@ -2286,6 +2638,10 @@ enum punit_power_well {
2286#define SDVO_PIPE_SEL_CPT(pipe) ((pipe) << 29) 2638#define SDVO_PIPE_SEL_CPT(pipe) ((pipe) << 29)
2287#define SDVO_PIPE_SEL_MASK_CPT (3 << 29) 2639#define SDVO_PIPE_SEL_MASK_CPT (3 << 29)
2288 2640
2641/* CHV SDVO/HDMI bits: */
2642#define SDVO_PIPE_SEL_CHV(pipe) ((pipe) << 24)
2643#define SDVO_PIPE_SEL_MASK_CHV (3 << 24)
2644
2289 2645
2290/* DVO port control */ 2646/* DVO port control */
2291#define DVOA 0x61120 2647#define DVOA 0x61120
@@ -2556,65 +2912,65 @@ enum punit_power_well {
2556 2912
2557/* TV port control */ 2913/* TV port control */
2558#define TV_CTL 0x68000 2914#define TV_CTL 0x68000
2559/** Enables the TV encoder */ 2915/* Enables the TV encoder */
2560# define TV_ENC_ENABLE (1 << 31) 2916# define TV_ENC_ENABLE (1 << 31)
2561/** Sources the TV encoder input from pipe B instead of A. */ 2917/* Sources the TV encoder input from pipe B instead of A. */
2562# define TV_ENC_PIPEB_SELECT (1 << 30) 2918# define TV_ENC_PIPEB_SELECT (1 << 30)
2563/** Outputs composite video (DAC A only) */ 2919/* Outputs composite video (DAC A only) */
2564# define TV_ENC_OUTPUT_COMPOSITE (0 << 28) 2920# define TV_ENC_OUTPUT_COMPOSITE (0 << 28)
2565/** Outputs SVideo video (DAC B/C) */ 2921/* Outputs SVideo video (DAC B/C) */
2566# define TV_ENC_OUTPUT_SVIDEO (1 << 28) 2922# define TV_ENC_OUTPUT_SVIDEO (1 << 28)
2567/** Outputs Component video (DAC A/B/C) */ 2923/* Outputs Component video (DAC A/B/C) */
2568# define TV_ENC_OUTPUT_COMPONENT (2 << 28) 2924# define TV_ENC_OUTPUT_COMPONENT (2 << 28)
2569/** Outputs Composite and SVideo (DAC A/B/C) */ 2925/* Outputs Composite and SVideo (DAC A/B/C) */
2570# define TV_ENC_OUTPUT_SVIDEO_COMPOSITE (3 << 28) 2926# define TV_ENC_OUTPUT_SVIDEO_COMPOSITE (3 << 28)
2571# define TV_TRILEVEL_SYNC (1 << 21) 2927# define TV_TRILEVEL_SYNC (1 << 21)
2572/** Enables slow sync generation (945GM only) */ 2928/* Enables slow sync generation (945GM only) */
2573# define TV_SLOW_SYNC (1 << 20) 2929# define TV_SLOW_SYNC (1 << 20)
2574/** Selects 4x oversampling for 480i and 576p */ 2930/* Selects 4x oversampling for 480i and 576p */
2575# define TV_OVERSAMPLE_4X (0 << 18) 2931# define TV_OVERSAMPLE_4X (0 << 18)
2576/** Selects 2x oversampling for 720p and 1080i */ 2932/* Selects 2x oversampling for 720p and 1080i */
2577# define TV_OVERSAMPLE_2X (1 << 18) 2933# define TV_OVERSAMPLE_2X (1 << 18)
2578/** Selects no oversampling for 1080p */ 2934/* Selects no oversampling for 1080p */
2579# define TV_OVERSAMPLE_NONE (2 << 18) 2935# define TV_OVERSAMPLE_NONE (2 << 18)
2580/** Selects 8x oversampling */ 2936/* Selects 8x oversampling */
2581# define TV_OVERSAMPLE_8X (3 << 18) 2937# define TV_OVERSAMPLE_8X (3 << 18)
2582/** Selects progressive mode rather than interlaced */ 2938/* Selects progressive mode rather than interlaced */
2583# define TV_PROGRESSIVE (1 << 17) 2939# define TV_PROGRESSIVE (1 << 17)
2584/** Sets the colorburst to PAL mode. Required for non-M PAL modes. */ 2940/* Sets the colorburst to PAL mode. Required for non-M PAL modes. */
2585# define TV_PAL_BURST (1 << 16) 2941# define TV_PAL_BURST (1 << 16)
2586/** Field for setting delay of Y compared to C */ 2942/* Field for setting delay of Y compared to C */
2587# define TV_YC_SKEW_MASK (7 << 12) 2943# define TV_YC_SKEW_MASK (7 << 12)
2588/** Enables a fix for 480p/576p standard definition modes on the 915GM only */ 2944/* Enables a fix for 480p/576p standard definition modes on the 915GM only */
2589# define TV_ENC_SDP_FIX (1 << 11) 2945# define TV_ENC_SDP_FIX (1 << 11)
2590/** 2946/*
2591 * Enables a fix for the 915GM only. 2947 * Enables a fix for the 915GM only.
2592 * 2948 *
2593 * Not sure what it does. 2949 * Not sure what it does.
2594 */ 2950 */
2595# define TV_ENC_C0_FIX (1 << 10) 2951# define TV_ENC_C0_FIX (1 << 10)
2596/** Bits that must be preserved by software */ 2952/* Bits that must be preserved by software */
2597# define TV_CTL_SAVE ((1 << 11) | (3 << 9) | (7 << 6) | 0xf) 2953# define TV_CTL_SAVE ((1 << 11) | (3 << 9) | (7 << 6) | 0xf)
2598# define TV_FUSE_STATE_MASK (3 << 4) 2954# define TV_FUSE_STATE_MASK (3 << 4)
2599/** Read-only state that reports all features enabled */ 2955/* Read-only state that reports all features enabled */
2600# define TV_FUSE_STATE_ENABLED (0 << 4) 2956# define TV_FUSE_STATE_ENABLED (0 << 4)
2601/** Read-only state that reports that Macrovision is disabled in hardware*/ 2957/* Read-only state that reports that Macrovision is disabled in hardware*/
2602# define TV_FUSE_STATE_NO_MACROVISION (1 << 4) 2958# define TV_FUSE_STATE_NO_MACROVISION (1 << 4)
2603/** Read-only state that reports that TV-out is disabled in hardware. */ 2959/* Read-only state that reports that TV-out is disabled in hardware. */
2604# define TV_FUSE_STATE_DISABLED (2 << 4) 2960# define TV_FUSE_STATE_DISABLED (2 << 4)
2605/** Normal operation */ 2961/* Normal operation */
2606# define TV_TEST_MODE_NORMAL (0 << 0) 2962# define TV_TEST_MODE_NORMAL (0 << 0)
2607/** Encoder test pattern 1 - combo pattern */ 2963/* Encoder test pattern 1 - combo pattern */
2608# define TV_TEST_MODE_PATTERN_1 (1 << 0) 2964# define TV_TEST_MODE_PATTERN_1 (1 << 0)
2609/** Encoder test pattern 2 - full screen vertical 75% color bars */ 2965/* Encoder test pattern 2 - full screen vertical 75% color bars */
2610# define TV_TEST_MODE_PATTERN_2 (2 << 0) 2966# define TV_TEST_MODE_PATTERN_2 (2 << 0)
2611/** Encoder test pattern 3 - full screen horizontal 75% color bars */ 2967/* Encoder test pattern 3 - full screen horizontal 75% color bars */
2612# define TV_TEST_MODE_PATTERN_3 (3 << 0) 2968# define TV_TEST_MODE_PATTERN_3 (3 << 0)
2613/** Encoder test pattern 4 - random noise */ 2969/* Encoder test pattern 4 - random noise */
2614# define TV_TEST_MODE_PATTERN_4 (4 << 0) 2970# define TV_TEST_MODE_PATTERN_4 (4 << 0)
2615/** Encoder test pattern 5 - linear color ramps */ 2971/* Encoder test pattern 5 - linear color ramps */
2616# define TV_TEST_MODE_PATTERN_5 (5 << 0) 2972# define TV_TEST_MODE_PATTERN_5 (5 << 0)
2617/** 2973/*
2618 * This test mode forces the DACs to 50% of full output. 2974 * This test mode forces the DACs to 50% of full output.
2619 * 2975 *
2620 * This is used for load detection in combination with TVDAC_SENSE_MASK 2976 * This is used for load detection in combination with TVDAC_SENSE_MASK
@@ -2624,35 +2980,35 @@ enum punit_power_well {
2624 2980
2625#define TV_DAC 0x68004 2981#define TV_DAC 0x68004
2626# define TV_DAC_SAVE 0x00ffff00 2982# define TV_DAC_SAVE 0x00ffff00
2627/** 2983/*
2628 * Reports that DAC state change logic has reported change (RO). 2984 * Reports that DAC state change logic has reported change (RO).
2629 * 2985 *
2630 * This gets cleared when TV_DAC_STATE_EN is cleared 2986 * This gets cleared when TV_DAC_STATE_EN is cleared
2631*/ 2987*/
2632# define TVDAC_STATE_CHG (1 << 31) 2988# define TVDAC_STATE_CHG (1 << 31)
2633# define TVDAC_SENSE_MASK (7 << 28) 2989# define TVDAC_SENSE_MASK (7 << 28)
2634/** Reports that DAC A voltage is above the detect threshold */ 2990/* Reports that DAC A voltage is above the detect threshold */
2635# define TVDAC_A_SENSE (1 << 30) 2991# define TVDAC_A_SENSE (1 << 30)
2636/** Reports that DAC B voltage is above the detect threshold */ 2992/* Reports that DAC B voltage is above the detect threshold */
2637# define TVDAC_B_SENSE (1 << 29) 2993# define TVDAC_B_SENSE (1 << 29)
2638/** Reports that DAC C voltage is above the detect threshold */ 2994/* Reports that DAC C voltage is above the detect threshold */
2639# define TVDAC_C_SENSE (1 << 28) 2995# define TVDAC_C_SENSE (1 << 28)
2640/** 2996/*
2641 * Enables DAC state detection logic, for load-based TV detection. 2997 * Enables DAC state detection logic, for load-based TV detection.
2642 * 2998 *
2643 * The PLL of the chosen pipe (in TV_CTL) must be running, and the encoder set 2999 * The PLL of the chosen pipe (in TV_CTL) must be running, and the encoder set
2644 * to off, for load detection to work. 3000 * to off, for load detection to work.
2645 */ 3001 */
2646# define TVDAC_STATE_CHG_EN (1 << 27) 3002# define TVDAC_STATE_CHG_EN (1 << 27)
2647/** Sets the DAC A sense value to high */ 3003/* Sets the DAC A sense value to high */
2648# define TVDAC_A_SENSE_CTL (1 << 26) 3004# define TVDAC_A_SENSE_CTL (1 << 26)
2649/** Sets the DAC B sense value to high */ 3005/* Sets the DAC B sense value to high */
2650# define TVDAC_B_SENSE_CTL (1 << 25) 3006# define TVDAC_B_SENSE_CTL (1 << 25)
2651/** Sets the DAC C sense value to high */ 3007/* Sets the DAC C sense value to high */
2652# define TVDAC_C_SENSE_CTL (1 << 24) 3008# define TVDAC_C_SENSE_CTL (1 << 24)
2653/** Overrides the ENC_ENABLE and DAC voltage levels */ 3009/* Overrides the ENC_ENABLE and DAC voltage levels */
2654# define DAC_CTL_OVERRIDE (1 << 7) 3010# define DAC_CTL_OVERRIDE (1 << 7)
2655/** Sets the slew rate. Must be preserved in software */ 3011/* Sets the slew rate. Must be preserved in software */
2656# define ENC_TVDAC_SLEW_FAST (1 << 6) 3012# define ENC_TVDAC_SLEW_FAST (1 << 6)
2657# define DAC_A_1_3_V (0 << 4) 3013# define DAC_A_1_3_V (0 << 4)
2658# define DAC_A_1_1_V (1 << 4) 3014# define DAC_A_1_1_V (1 << 4)
@@ -2667,7 +3023,7 @@ enum punit_power_well {
2667# define DAC_C_0_7_V (2 << 0) 3023# define DAC_C_0_7_V (2 << 0)
2668# define DAC_C_MASK (3 << 0) 3024# define DAC_C_MASK (3 << 0)
2669 3025
2670/** 3026/*
2671 * CSC coefficients are stored in a floating point format with 9 bits of 3027 * CSC coefficients are stored in a floating point format with 9 bits of
2672 * mantissa and 2 or 3 bits of exponent. The exponent is represented as 2**-n, 3028 * mantissa and 2 or 3 bits of exponent. The exponent is represented as 2**-n,
2673 * where 2-bit exponents are unsigned n, and 3-bit exponents are signed n with 3029 * where 2-bit exponents are unsigned n, and 3-bit exponents are signed n with
@@ -2682,7 +3038,7 @@ enum punit_power_well {
2682#define TV_CSC_Y2 0x68014 3038#define TV_CSC_Y2 0x68014
2683# define TV_BY_MASK 0x07ff0000 3039# define TV_BY_MASK 0x07ff0000
2684# define TV_BY_SHIFT 16 3040# define TV_BY_SHIFT 16
2685/** 3041/*
2686 * Y attenuation for component video. 3042 * Y attenuation for component video.
2687 * 3043 *
2688 * Stored in 1.9 fixed point. 3044 * Stored in 1.9 fixed point.
@@ -2699,7 +3055,7 @@ enum punit_power_well {
2699#define TV_CSC_U2 0x6801c 3055#define TV_CSC_U2 0x6801c
2700# define TV_BU_MASK 0x07ff0000 3056# define TV_BU_MASK 0x07ff0000
2701# define TV_BU_SHIFT 16 3057# define TV_BU_SHIFT 16
2702/** 3058/*
2703 * U attenuation for component video. 3059 * U attenuation for component video.
2704 * 3060 *
2705 * Stored in 1.9 fixed point. 3061 * Stored in 1.9 fixed point.
@@ -2716,7 +3072,7 @@ enum punit_power_well {
2716#define TV_CSC_V2 0x68024 3072#define TV_CSC_V2 0x68024
2717# define TV_BV_MASK 0x07ff0000 3073# define TV_BV_MASK 0x07ff0000
2718# define TV_BV_SHIFT 16 3074# define TV_BV_SHIFT 16
2719/** 3075/*
2720 * V attenuation for component video. 3076 * V attenuation for component video.
2721 * 3077 *
2722 * Stored in 1.9 fixed point. 3078 * Stored in 1.9 fixed point.
@@ -2725,74 +3081,74 @@ enum punit_power_well {
2725# define TV_AV_SHIFT 0 3081# define TV_AV_SHIFT 0
2726 3082
2727#define TV_CLR_KNOBS 0x68028 3083#define TV_CLR_KNOBS 0x68028
2728/** 2s-complement brightness adjustment */ 3084/* 2s-complement brightness adjustment */
2729# define TV_BRIGHTNESS_MASK 0xff000000 3085# define TV_BRIGHTNESS_MASK 0xff000000
2730# define TV_BRIGHTNESS_SHIFT 24 3086# define TV_BRIGHTNESS_SHIFT 24
2731/** Contrast adjustment, as a 2.6 unsigned floating point number */ 3087/* Contrast adjustment, as a 2.6 unsigned floating point number */
2732# define TV_CONTRAST_MASK 0x00ff0000 3088# define TV_CONTRAST_MASK 0x00ff0000
2733# define TV_CONTRAST_SHIFT 16 3089# define TV_CONTRAST_SHIFT 16
2734/** Saturation adjustment, as a 2.6 unsigned floating point number */ 3090/* Saturation adjustment, as a 2.6 unsigned floating point number */
2735# define TV_SATURATION_MASK 0x0000ff00 3091# define TV_SATURATION_MASK 0x0000ff00
2736# define TV_SATURATION_SHIFT 8 3092# define TV_SATURATION_SHIFT 8
2737/** Hue adjustment, as an integer phase angle in degrees */ 3093/* Hue adjustment, as an integer phase angle in degrees */
2738# define TV_HUE_MASK 0x000000ff 3094# define TV_HUE_MASK 0x000000ff
2739# define TV_HUE_SHIFT 0 3095# define TV_HUE_SHIFT 0
2740 3096
2741#define TV_CLR_LEVEL 0x6802c 3097#define TV_CLR_LEVEL 0x6802c
2742/** Controls the DAC level for black */ 3098/* Controls the DAC level for black */
2743# define TV_BLACK_LEVEL_MASK 0x01ff0000 3099# define TV_BLACK_LEVEL_MASK 0x01ff0000
2744# define TV_BLACK_LEVEL_SHIFT 16 3100# define TV_BLACK_LEVEL_SHIFT 16
2745/** Controls the DAC level for blanking */ 3101/* Controls the DAC level for blanking */
2746# define TV_BLANK_LEVEL_MASK 0x000001ff 3102# define TV_BLANK_LEVEL_MASK 0x000001ff
2747# define TV_BLANK_LEVEL_SHIFT 0 3103# define TV_BLANK_LEVEL_SHIFT 0
2748 3104
2749#define TV_H_CTL_1 0x68030 3105#define TV_H_CTL_1 0x68030
2750/** Number of pixels in the hsync. */ 3106/* Number of pixels in the hsync. */
2751# define TV_HSYNC_END_MASK 0x1fff0000 3107# define TV_HSYNC_END_MASK 0x1fff0000
2752# define TV_HSYNC_END_SHIFT 16 3108# define TV_HSYNC_END_SHIFT 16
2753/** Total number of pixels minus one in the line (display and blanking). */ 3109/* Total number of pixels minus one in the line (display and blanking). */
2754# define TV_HTOTAL_MASK 0x00001fff 3110# define TV_HTOTAL_MASK 0x00001fff
2755# define TV_HTOTAL_SHIFT 0 3111# define TV_HTOTAL_SHIFT 0
2756 3112
2757#define TV_H_CTL_2 0x68034 3113#define TV_H_CTL_2 0x68034
2758/** Enables the colorburst (needed for non-component color) */ 3114/* Enables the colorburst (needed for non-component color) */
2759# define TV_BURST_ENA (1 << 31) 3115# define TV_BURST_ENA (1 << 31)
2760/** Offset of the colorburst from the start of hsync, in pixels minus one. */ 3116/* Offset of the colorburst from the start of hsync, in pixels minus one. */
2761# define TV_HBURST_START_SHIFT 16 3117# define TV_HBURST_START_SHIFT 16
2762# define TV_HBURST_START_MASK 0x1fff0000 3118# define TV_HBURST_START_MASK 0x1fff0000
2763/** Length of the colorburst */ 3119/* Length of the colorburst */
2764# define TV_HBURST_LEN_SHIFT 0 3120# define TV_HBURST_LEN_SHIFT 0
2765# define TV_HBURST_LEN_MASK 0x0001fff 3121# define TV_HBURST_LEN_MASK 0x0001fff
2766 3122
2767#define TV_H_CTL_3 0x68038 3123#define TV_H_CTL_3 0x68038
2768/** End of hblank, measured in pixels minus one from start of hsync */ 3124/* End of hblank, measured in pixels minus one from start of hsync */
2769# define TV_HBLANK_END_SHIFT 16 3125# define TV_HBLANK_END_SHIFT 16
2770# define TV_HBLANK_END_MASK 0x1fff0000 3126# define TV_HBLANK_END_MASK 0x1fff0000
2771/** Start of hblank, measured in pixels minus one from start of hsync */ 3127/* Start of hblank, measured in pixels minus one from start of hsync */
2772# define TV_HBLANK_START_SHIFT 0 3128# define TV_HBLANK_START_SHIFT 0
2773# define TV_HBLANK_START_MASK 0x0001fff 3129# define TV_HBLANK_START_MASK 0x0001fff
2774 3130
2775#define TV_V_CTL_1 0x6803c 3131#define TV_V_CTL_1 0x6803c
2776/** XXX */ 3132/* XXX */
2777# define TV_NBR_END_SHIFT 16 3133# define TV_NBR_END_SHIFT 16
2778# define TV_NBR_END_MASK 0x07ff0000 3134# define TV_NBR_END_MASK 0x07ff0000
2779/** XXX */ 3135/* XXX */
2780# define TV_VI_END_F1_SHIFT 8 3136# define TV_VI_END_F1_SHIFT 8
2781# define TV_VI_END_F1_MASK 0x00003f00 3137# define TV_VI_END_F1_MASK 0x00003f00
2782/** XXX */ 3138/* XXX */
2783# define TV_VI_END_F2_SHIFT 0 3139# define TV_VI_END_F2_SHIFT 0
2784# define TV_VI_END_F2_MASK 0x0000003f 3140# define TV_VI_END_F2_MASK 0x0000003f
2785 3141
2786#define TV_V_CTL_2 0x68040 3142#define TV_V_CTL_2 0x68040
2787/** Length of vsync, in half lines */ 3143/* Length of vsync, in half lines */
2788# define TV_VSYNC_LEN_MASK 0x07ff0000 3144# define TV_VSYNC_LEN_MASK 0x07ff0000
2789# define TV_VSYNC_LEN_SHIFT 16 3145# define TV_VSYNC_LEN_SHIFT 16
2790/** Offset of the start of vsync in field 1, measured in one less than the 3146/* Offset of the start of vsync in field 1, measured in one less than the
2791 * number of half lines. 3147 * number of half lines.
2792 */ 3148 */
2793# define TV_VSYNC_START_F1_MASK 0x00007f00 3149# define TV_VSYNC_START_F1_MASK 0x00007f00
2794# define TV_VSYNC_START_F1_SHIFT 8 3150# define TV_VSYNC_START_F1_SHIFT 8
2795/** 3151/*
2796 * Offset of the start of vsync in field 2, measured in one less than the 3152 * Offset of the start of vsync in field 2, measured in one less than the
2797 * number of half lines. 3153 * number of half lines.
2798 */ 3154 */
@@ -2800,17 +3156,17 @@ enum punit_power_well {
2800# define TV_VSYNC_START_F2_SHIFT 0 3156# define TV_VSYNC_START_F2_SHIFT 0
2801 3157
2802#define TV_V_CTL_3 0x68044 3158#define TV_V_CTL_3 0x68044
2803/** Enables generation of the equalization signal */ 3159/* Enables generation of the equalization signal */
2804# define TV_EQUAL_ENA (1 << 31) 3160# define TV_EQUAL_ENA (1 << 31)
2805/** Length of vsync, in half lines */ 3161/* Length of vsync, in half lines */
2806# define TV_VEQ_LEN_MASK 0x007f0000 3162# define TV_VEQ_LEN_MASK 0x007f0000
2807# define TV_VEQ_LEN_SHIFT 16 3163# define TV_VEQ_LEN_SHIFT 16
2808/** Offset of the start of equalization in field 1, measured in one less than 3164/* Offset of the start of equalization in field 1, measured in one less than
2809 * the number of half lines. 3165 * the number of half lines.
2810 */ 3166 */
2811# define TV_VEQ_START_F1_MASK 0x0007f00 3167# define TV_VEQ_START_F1_MASK 0x0007f00
2812# define TV_VEQ_START_F1_SHIFT 8 3168# define TV_VEQ_START_F1_SHIFT 8
2813/** 3169/*
2814 * Offset of the start of equalization in field 2, measured in one less than 3170 * Offset of the start of equalization in field 2, measured in one less than
2815 * the number of half lines. 3171 * the number of half lines.
2816 */ 3172 */
@@ -2818,13 +3174,13 @@ enum punit_power_well {
2818# define TV_VEQ_START_F2_SHIFT 0 3174# define TV_VEQ_START_F2_SHIFT 0
2819 3175
2820#define TV_V_CTL_4 0x68048 3176#define TV_V_CTL_4 0x68048
2821/** 3177/*
2822 * Offset to start of vertical colorburst, measured in one less than the 3178 * Offset to start of vertical colorburst, measured in one less than the
2823 * number of lines from vertical start. 3179 * number of lines from vertical start.
2824 */ 3180 */
2825# define TV_VBURST_START_F1_MASK 0x003f0000 3181# define TV_VBURST_START_F1_MASK 0x003f0000
2826# define TV_VBURST_START_F1_SHIFT 16 3182# define TV_VBURST_START_F1_SHIFT 16
2827/** 3183/*
2828 * Offset to the end of vertical colorburst, measured in one less than the 3184 * Offset to the end of vertical colorburst, measured in one less than the
2829 * number of lines from the start of NBR. 3185 * number of lines from the start of NBR.
2830 */ 3186 */
@@ -2832,13 +3188,13 @@ enum punit_power_well {
2832# define TV_VBURST_END_F1_SHIFT 0 3188# define TV_VBURST_END_F1_SHIFT 0
2833 3189
2834#define TV_V_CTL_5 0x6804c 3190#define TV_V_CTL_5 0x6804c
2835/** 3191/*
2836 * Offset to start of vertical colorburst, measured in one less than the 3192 * Offset to start of vertical colorburst, measured in one less than the
2837 * number of lines from vertical start. 3193 * number of lines from vertical start.
2838 */ 3194 */
2839# define TV_VBURST_START_F2_MASK 0x003f0000 3195# define TV_VBURST_START_F2_MASK 0x003f0000
2840# define TV_VBURST_START_F2_SHIFT 16 3196# define TV_VBURST_START_F2_SHIFT 16
2841/** 3197/*
2842 * Offset to the end of vertical colorburst, measured in one less than the 3198 * Offset to the end of vertical colorburst, measured in one less than the
2843 * number of lines from the start of NBR. 3199 * number of lines from the start of NBR.
2844 */ 3200 */
@@ -2846,13 +3202,13 @@ enum punit_power_well {
2846# define TV_VBURST_END_F2_SHIFT 0 3202# define TV_VBURST_END_F2_SHIFT 0
2847 3203
2848#define TV_V_CTL_6 0x68050 3204#define TV_V_CTL_6 0x68050
2849/** 3205/*
2850 * Offset to start of vertical colorburst, measured in one less than the 3206 * Offset to start of vertical colorburst, measured in one less than the
2851 * number of lines from vertical start. 3207 * number of lines from vertical start.
2852 */ 3208 */
2853# define TV_VBURST_START_F3_MASK 0x003f0000 3209# define TV_VBURST_START_F3_MASK 0x003f0000
2854# define TV_VBURST_START_F3_SHIFT 16 3210# define TV_VBURST_START_F3_SHIFT 16
2855/** 3211/*
2856 * Offset to the end of vertical colorburst, measured in one less than the 3212 * Offset to the end of vertical colorburst, measured in one less than the
2857 * number of lines from the start of NBR. 3213 * number of lines from the start of NBR.
2858 */ 3214 */
@@ -2860,13 +3216,13 @@ enum punit_power_well {
2860# define TV_VBURST_END_F3_SHIFT 0 3216# define TV_VBURST_END_F3_SHIFT 0
2861 3217
2862#define TV_V_CTL_7 0x68054 3218#define TV_V_CTL_7 0x68054
2863/** 3219/*
2864 * Offset to start of vertical colorburst, measured in one less than the 3220 * Offset to start of vertical colorburst, measured in one less than the
2865 * number of lines from vertical start. 3221 * number of lines from vertical start.
2866 */ 3222 */
2867# define TV_VBURST_START_F4_MASK 0x003f0000 3223# define TV_VBURST_START_F4_MASK 0x003f0000
2868# define TV_VBURST_START_F4_SHIFT 16 3224# define TV_VBURST_START_F4_SHIFT 16
2869/** 3225/*
2870 * Offset to the end of vertical colorburst, measured in one less than the 3226 * Offset to the end of vertical colorburst, measured in one less than the
2871 * number of lines from the start of NBR. 3227 * number of lines from the start of NBR.
2872 */ 3228 */
@@ -2874,56 +3230,56 @@ enum punit_power_well {
2874# define TV_VBURST_END_F4_SHIFT 0 3230# define TV_VBURST_END_F4_SHIFT 0
2875 3231
2876#define TV_SC_CTL_1 0x68060 3232#define TV_SC_CTL_1 0x68060
2877/** Turns on the first subcarrier phase generation DDA */ 3233/* Turns on the first subcarrier phase generation DDA */
2878# define TV_SC_DDA1_EN (1 << 31) 3234# define TV_SC_DDA1_EN (1 << 31)
2879/** Turns on the first subcarrier phase generation DDA */ 3235/* Turns on the first subcarrier phase generation DDA */
2880# define TV_SC_DDA2_EN (1 << 30) 3236# define TV_SC_DDA2_EN (1 << 30)
2881/** Turns on the first subcarrier phase generation DDA */ 3237/* Turns on the first subcarrier phase generation DDA */
2882# define TV_SC_DDA3_EN (1 << 29) 3238# define TV_SC_DDA3_EN (1 << 29)
2883/** Sets the subcarrier DDA to reset frequency every other field */ 3239/* Sets the subcarrier DDA to reset frequency every other field */
2884# define TV_SC_RESET_EVERY_2 (0 << 24) 3240# define TV_SC_RESET_EVERY_2 (0 << 24)
2885/** Sets the subcarrier DDA to reset frequency every fourth field */ 3241/* Sets the subcarrier DDA to reset frequency every fourth field */
2886# define TV_SC_RESET_EVERY_4 (1 << 24) 3242# define TV_SC_RESET_EVERY_4 (1 << 24)
2887/** Sets the subcarrier DDA to reset frequency every eighth field */ 3243/* Sets the subcarrier DDA to reset frequency every eighth field */
2888# define TV_SC_RESET_EVERY_8 (2 << 24) 3244# define TV_SC_RESET_EVERY_8 (2 << 24)
2889/** Sets the subcarrier DDA to never reset the frequency */ 3245/* Sets the subcarrier DDA to never reset the frequency */
2890# define TV_SC_RESET_NEVER (3 << 24) 3246# define TV_SC_RESET_NEVER (3 << 24)
2891/** Sets the peak amplitude of the colorburst.*/ 3247/* Sets the peak amplitude of the colorburst.*/
2892# define TV_BURST_LEVEL_MASK 0x00ff0000 3248# define TV_BURST_LEVEL_MASK 0x00ff0000
2893# define TV_BURST_LEVEL_SHIFT 16 3249# define TV_BURST_LEVEL_SHIFT 16
2894/** Sets the increment of the first subcarrier phase generation DDA */ 3250/* Sets the increment of the first subcarrier phase generation DDA */
2895# define TV_SCDDA1_INC_MASK 0x00000fff 3251# define TV_SCDDA1_INC_MASK 0x00000fff
2896# define TV_SCDDA1_INC_SHIFT 0 3252# define TV_SCDDA1_INC_SHIFT 0
2897 3253
2898#define TV_SC_CTL_2 0x68064 3254#define TV_SC_CTL_2 0x68064
2899/** Sets the rollover for the second subcarrier phase generation DDA */ 3255/* Sets the rollover for the second subcarrier phase generation DDA */
2900# define TV_SCDDA2_SIZE_MASK 0x7fff0000 3256# define TV_SCDDA2_SIZE_MASK 0x7fff0000
2901# define TV_SCDDA2_SIZE_SHIFT 16 3257# define TV_SCDDA2_SIZE_SHIFT 16
2902/** Sets the increent of the second subcarrier phase generation DDA */ 3258/* Sets the increent of the second subcarrier phase generation DDA */
2903# define TV_SCDDA2_INC_MASK 0x00007fff 3259# define TV_SCDDA2_INC_MASK 0x00007fff
2904# define TV_SCDDA2_INC_SHIFT 0 3260# define TV_SCDDA2_INC_SHIFT 0
2905 3261
2906#define TV_SC_CTL_3 0x68068 3262#define TV_SC_CTL_3 0x68068
2907/** Sets the rollover for the third subcarrier phase generation DDA */ 3263/* Sets the rollover for the third subcarrier phase generation DDA */
2908# define TV_SCDDA3_SIZE_MASK 0x7fff0000 3264# define TV_SCDDA3_SIZE_MASK 0x7fff0000
2909# define TV_SCDDA3_SIZE_SHIFT 16 3265# define TV_SCDDA3_SIZE_SHIFT 16
2910/** Sets the increent of the third subcarrier phase generation DDA */ 3266/* Sets the increent of the third subcarrier phase generation DDA */
2911# define TV_SCDDA3_INC_MASK 0x00007fff 3267# define TV_SCDDA3_INC_MASK 0x00007fff
2912# define TV_SCDDA3_INC_SHIFT 0 3268# define TV_SCDDA3_INC_SHIFT 0
2913 3269
2914#define TV_WIN_POS 0x68070 3270#define TV_WIN_POS 0x68070
2915/** X coordinate of the display from the start of horizontal active */ 3271/* X coordinate of the display from the start of horizontal active */
2916# define TV_XPOS_MASK 0x1fff0000 3272# define TV_XPOS_MASK 0x1fff0000
2917# define TV_XPOS_SHIFT 16 3273# define TV_XPOS_SHIFT 16
2918/** Y coordinate of the display from the start of vertical active (NBR) */ 3274/* Y coordinate of the display from the start of vertical active (NBR) */
2919# define TV_YPOS_MASK 0x00000fff 3275# define TV_YPOS_MASK 0x00000fff
2920# define TV_YPOS_SHIFT 0 3276# define TV_YPOS_SHIFT 0
2921 3277
2922#define TV_WIN_SIZE 0x68074 3278#define TV_WIN_SIZE 0x68074
2923/** Horizontal size of the display window, measured in pixels*/ 3279/* Horizontal size of the display window, measured in pixels*/
2924# define TV_XSIZE_MASK 0x1fff0000 3280# define TV_XSIZE_MASK 0x1fff0000
2925# define TV_XSIZE_SHIFT 16 3281# define TV_XSIZE_SHIFT 16
2926/** 3282/*
2927 * Vertical size of the display window, measured in pixels. 3283 * Vertical size of the display window, measured in pixels.
2928 * 3284 *
2929 * Must be even for interlaced modes. 3285 * Must be even for interlaced modes.
@@ -2932,28 +3288,28 @@ enum punit_power_well {
2932# define TV_YSIZE_SHIFT 0 3288# define TV_YSIZE_SHIFT 0
2933 3289
2934#define TV_FILTER_CTL_1 0x68080 3290#define TV_FILTER_CTL_1 0x68080
2935/** 3291/*
2936 * Enables automatic scaling calculation. 3292 * Enables automatic scaling calculation.
2937 * 3293 *
2938 * If set, the rest of the registers are ignored, and the calculated values can 3294 * If set, the rest of the registers are ignored, and the calculated values can
2939 * be read back from the register. 3295 * be read back from the register.
2940 */ 3296 */
2941# define TV_AUTO_SCALE (1 << 31) 3297# define TV_AUTO_SCALE (1 << 31)
2942/** 3298/*
2943 * Disables the vertical filter. 3299 * Disables the vertical filter.
2944 * 3300 *
2945 * This is required on modes more than 1024 pixels wide */ 3301 * This is required on modes more than 1024 pixels wide */
2946# define TV_V_FILTER_BYPASS (1 << 29) 3302# define TV_V_FILTER_BYPASS (1 << 29)
2947/** Enables adaptive vertical filtering */ 3303/* Enables adaptive vertical filtering */
2948# define TV_VADAPT (1 << 28) 3304# define TV_VADAPT (1 << 28)
2949# define TV_VADAPT_MODE_MASK (3 << 26) 3305# define TV_VADAPT_MODE_MASK (3 << 26)
2950/** Selects the least adaptive vertical filtering mode */ 3306/* Selects the least adaptive vertical filtering mode */
2951# define TV_VADAPT_MODE_LEAST (0 << 26) 3307# define TV_VADAPT_MODE_LEAST (0 << 26)
2952/** Selects the moderately adaptive vertical filtering mode */ 3308/* Selects the moderately adaptive vertical filtering mode */
2953# define TV_VADAPT_MODE_MODERATE (1 << 26) 3309# define TV_VADAPT_MODE_MODERATE (1 << 26)
2954/** Selects the most adaptive vertical filtering mode */ 3310/* Selects the most adaptive vertical filtering mode */
2955# define TV_VADAPT_MODE_MOST (3 << 26) 3311# define TV_VADAPT_MODE_MOST (3 << 26)
2956/** 3312/*
2957 * Sets the horizontal scaling factor. 3313 * Sets the horizontal scaling factor.
2958 * 3314 *
2959 * This should be the fractional part of the horizontal scaling factor divided 3315 * This should be the fractional part of the horizontal scaling factor divided
@@ -2965,14 +3321,14 @@ enum punit_power_well {
2965# define TV_HSCALE_FRAC_SHIFT 0 3321# define TV_HSCALE_FRAC_SHIFT 0
2966 3322
2967#define TV_FILTER_CTL_2 0x68084 3323#define TV_FILTER_CTL_2 0x68084
2968/** 3324/*
2969 * Sets the integer part of the 3.15 fixed-point vertical scaling factor. 3325 * Sets the integer part of the 3.15 fixed-point vertical scaling factor.
2970 * 3326 *
2971 * TV_VSCALE should be (src height - 1) / ((interlace * dest height) - 1) 3327 * TV_VSCALE should be (src height - 1) / ((interlace * dest height) - 1)
2972 */ 3328 */
2973# define TV_VSCALE_INT_MASK 0x00038000 3329# define TV_VSCALE_INT_MASK 0x00038000
2974# define TV_VSCALE_INT_SHIFT 15 3330# define TV_VSCALE_INT_SHIFT 15
2975/** 3331/*
2976 * Sets the fractional part of the 3.15 fixed-point vertical scaling factor. 3332 * Sets the fractional part of the 3.15 fixed-point vertical scaling factor.
2977 * 3333 *
2978 * \sa TV_VSCALE_INT_MASK 3334 * \sa TV_VSCALE_INT_MASK
@@ -2981,7 +3337,7 @@ enum punit_power_well {
2981# define TV_VSCALE_FRAC_SHIFT 0 3337# define TV_VSCALE_FRAC_SHIFT 0
2982 3338
2983#define TV_FILTER_CTL_3 0x68088 3339#define TV_FILTER_CTL_3 0x68088
2984/** 3340/*
2985 * Sets the integer part of the 3.15 fixed-point vertical scaling factor. 3341 * Sets the integer part of the 3.15 fixed-point vertical scaling factor.
2986 * 3342 *
2987 * TV_VSCALE should be (src height - 1) / (1/4 * (dest height - 1)) 3343 * TV_VSCALE should be (src height - 1) / (1/4 * (dest height - 1))
@@ -2990,7 +3346,7 @@ enum punit_power_well {
2990 */ 3346 */
2991# define TV_VSCALE_IP_INT_MASK 0x00038000 3347# define TV_VSCALE_IP_INT_MASK 0x00038000
2992# define TV_VSCALE_IP_INT_SHIFT 15 3348# define TV_VSCALE_IP_INT_SHIFT 15
2993/** 3349/*
2994 * Sets the fractional part of the 3.15 fixed-point vertical scaling factor. 3350 * Sets the fractional part of the 3.15 fixed-point vertical scaling factor.
2995 * 3351 *
2996 * For progressive modes, TV_VSCALE_IP_INT should be set to zeroes. 3352 * For progressive modes, TV_VSCALE_IP_INT should be set to zeroes.
@@ -3002,26 +3358,26 @@ enum punit_power_well {
3002 3358
3003#define TV_CC_CONTROL 0x68090 3359#define TV_CC_CONTROL 0x68090
3004# define TV_CC_ENABLE (1 << 31) 3360# define TV_CC_ENABLE (1 << 31)
3005/** 3361/*
3006 * Specifies which field to send the CC data in. 3362 * Specifies which field to send the CC data in.
3007 * 3363 *
3008 * CC data is usually sent in field 0. 3364 * CC data is usually sent in field 0.
3009 */ 3365 */
3010# define TV_CC_FID_MASK (1 << 27) 3366# define TV_CC_FID_MASK (1 << 27)
3011# define TV_CC_FID_SHIFT 27 3367# define TV_CC_FID_SHIFT 27
3012/** Sets the horizontal position of the CC data. Usually 135. */ 3368/* Sets the horizontal position of the CC data. Usually 135. */
3013# define TV_CC_HOFF_MASK 0x03ff0000 3369# define TV_CC_HOFF_MASK 0x03ff0000
3014# define TV_CC_HOFF_SHIFT 16 3370# define TV_CC_HOFF_SHIFT 16
3015/** Sets the vertical position of the CC data. Usually 21 */ 3371/* Sets the vertical position of the CC data. Usually 21 */
3016# define TV_CC_LINE_MASK 0x0000003f 3372# define TV_CC_LINE_MASK 0x0000003f
3017# define TV_CC_LINE_SHIFT 0 3373# define TV_CC_LINE_SHIFT 0
3018 3374
3019#define TV_CC_DATA 0x68094 3375#define TV_CC_DATA 0x68094
3020# define TV_CC_RDY (1 << 31) 3376# define TV_CC_RDY (1 << 31)
3021/** Second word of CC data to be transmitted. */ 3377/* Second word of CC data to be transmitted. */
3022# define TV_CC_DATA_2_MASK 0x007f0000 3378# define TV_CC_DATA_2_MASK 0x007f0000
3023# define TV_CC_DATA_2_SHIFT 16 3379# define TV_CC_DATA_2_SHIFT 16
3024/** First word of CC data to be transmitted. */ 3380/* First word of CC data to be transmitted. */
3025# define TV_CC_DATA_1_MASK 0x0000007f 3381# define TV_CC_DATA_1_MASK 0x0000007f
3026# define TV_CC_DATA_1_SHIFT 0 3382# define TV_CC_DATA_1_SHIFT 0
3027 3383
@@ -3043,6 +3399,8 @@ enum punit_power_well {
3043#define DP_PORT_EN (1 << 31) 3399#define DP_PORT_EN (1 << 31)
3044#define DP_PIPEB_SELECT (1 << 30) 3400#define DP_PIPEB_SELECT (1 << 30)
3045#define DP_PIPE_MASK (1 << 30) 3401#define DP_PIPE_MASK (1 << 30)
3402#define DP_PIPE_SELECT_CHV(pipe) ((pipe) << 16)
3403#define DP_PIPE_MASK_CHV (3 << 16)
3046 3404
3047/* Link training mode - select a suitable mode for each stage */ 3405/* Link training mode - select a suitable mode for each stage */
3048#define DP_LINK_TRAIN_PAT_1 (0 << 28) 3406#define DP_LINK_TRAIN_PAT_1 (0 << 28)
@@ -3090,32 +3448,32 @@ enum punit_power_well {
3090#define DP_PLL_FREQ_160MHZ (1 << 16) 3448#define DP_PLL_FREQ_160MHZ (1 << 16)
3091#define DP_PLL_FREQ_MASK (3 << 16) 3449#define DP_PLL_FREQ_MASK (3 << 16)
3092 3450
3093/** locked once port is enabled */ 3451/* locked once port is enabled */
3094#define DP_PORT_REVERSAL (1 << 15) 3452#define DP_PORT_REVERSAL (1 << 15)
3095 3453
3096/* eDP */ 3454/* eDP */
3097#define DP_PLL_ENABLE (1 << 14) 3455#define DP_PLL_ENABLE (1 << 14)
3098 3456
3099/** sends the clock on lane 15 of the PEG for debug */ 3457/* sends the clock on lane 15 of the PEG for debug */
3100#define DP_CLOCK_OUTPUT_ENABLE (1 << 13) 3458#define DP_CLOCK_OUTPUT_ENABLE (1 << 13)
3101 3459
3102#define DP_SCRAMBLING_DISABLE (1 << 12) 3460#define DP_SCRAMBLING_DISABLE (1 << 12)
3103#define DP_SCRAMBLING_DISABLE_IRONLAKE (1 << 7) 3461#define DP_SCRAMBLING_DISABLE_IRONLAKE (1 << 7)
3104 3462
3105/** limit RGB values to avoid confusing TVs */ 3463/* limit RGB values to avoid confusing TVs */
3106#define DP_COLOR_RANGE_16_235 (1 << 8) 3464#define DP_COLOR_RANGE_16_235 (1 << 8)
3107 3465
3108/** Turn on the audio link */ 3466/* Turn on the audio link */
3109#define DP_AUDIO_OUTPUT_ENABLE (1 << 6) 3467#define DP_AUDIO_OUTPUT_ENABLE (1 << 6)
3110 3468
3111/** vs and hs sync polarity */ 3469/* vs and hs sync polarity */
3112#define DP_SYNC_VS_HIGH (1 << 4) 3470#define DP_SYNC_VS_HIGH (1 << 4)
3113#define DP_SYNC_HS_HIGH (1 << 3) 3471#define DP_SYNC_HS_HIGH (1 << 3)
3114 3472
3115/** A fantasy */ 3473/* A fantasy */
3116#define DP_DETECTED (1 << 2) 3474#define DP_DETECTED (1 << 2)
3117 3475
3118/** The aux channel provides a way to talk to the 3476/* The aux channel provides a way to talk to the
3119 * signal sink for DDC etc. Max packet size supported 3477 * signal sink for DDC etc. Max packet size supported
3120 * is 20 bytes in each direction, hence the 5 fixed 3478 * is 20 bytes in each direction, hence the 5 fixed
3121 * data registers 3479 * data registers
@@ -3258,6 +3616,7 @@ enum punit_power_well {
3258#define PIPECONF_INTERLACED_DBL_ILK (4 << 21) /* ilk/snb only */ 3616#define PIPECONF_INTERLACED_DBL_ILK (4 << 21) /* ilk/snb only */
3259#define PIPECONF_PFIT_PF_INTERLACED_DBL_ILK (5 << 21) /* ilk/snb only */ 3617#define PIPECONF_PFIT_PF_INTERLACED_DBL_ILK (5 << 21) /* ilk/snb only */
3260#define PIPECONF_INTERLACE_MODE_MASK (7 << 21) 3618#define PIPECONF_INTERLACE_MODE_MASK (7 << 21)
3619#define PIPECONF_EDP_RR_MODE_SWITCH (1 << 20)
3261#define PIPECONF_CXSR_DOWNCLOCK (1<<16) 3620#define PIPECONF_CXSR_DOWNCLOCK (1<<16)
3262#define PIPECONF_COLOR_RANGE_SELECT (1 << 13) 3621#define PIPECONF_COLOR_RANGE_SELECT (1 << 13)
3263#define PIPECONF_BPC_MASK (0x7 << 5) 3622#define PIPECONF_BPC_MASK (0x7 << 5)
@@ -3276,6 +3635,7 @@ enum punit_power_well {
3276#define SPRITE1_FLIP_DONE_INT_EN_VLV (1UL<<30) 3635#define SPRITE1_FLIP_DONE_INT_EN_VLV (1UL<<30)
3277#define PIPE_CRC_ERROR_ENABLE (1UL<<29) 3636#define PIPE_CRC_ERROR_ENABLE (1UL<<29)
3278#define PIPE_CRC_DONE_ENABLE (1UL<<28) 3637#define PIPE_CRC_DONE_ENABLE (1UL<<28)
3638#define PERF_COUNTER2_INTERRUPT_EN (1UL<<27)
3279#define PIPE_GMBUS_EVENT_ENABLE (1UL<<27) 3639#define PIPE_GMBUS_EVENT_ENABLE (1UL<<27)
3280#define PLANE_FLIP_DONE_INT_EN_VLV (1UL<<26) 3640#define PLANE_FLIP_DONE_INT_EN_VLV (1UL<<26)
3281#define PIPE_HOTPLUG_INTERRUPT_ENABLE (1UL<<26) 3641#define PIPE_HOTPLUG_INTERRUPT_ENABLE (1UL<<26)
@@ -3287,8 +3647,10 @@ enum punit_power_well {
3287#define PIPE_ODD_FIELD_INTERRUPT_ENABLE (1UL<<21) 3647#define PIPE_ODD_FIELD_INTERRUPT_ENABLE (1UL<<21)
3288#define PIPE_EVEN_FIELD_INTERRUPT_ENABLE (1UL<<20) 3648#define PIPE_EVEN_FIELD_INTERRUPT_ENABLE (1UL<<20)
3289#define PIPE_B_PSR_INTERRUPT_ENABLE_VLV (1UL<<19) 3649#define PIPE_B_PSR_INTERRUPT_ENABLE_VLV (1UL<<19)
3650#define PERF_COUNTER_INTERRUPT_EN (1UL<<19)
3290#define PIPE_HOTPLUG_TV_INTERRUPT_ENABLE (1UL<<18) /* pre-965 */ 3651#define PIPE_HOTPLUG_TV_INTERRUPT_ENABLE (1UL<<18) /* pre-965 */
3291#define PIPE_START_VBLANK_INTERRUPT_ENABLE (1UL<<18) /* 965 or later */ 3652#define PIPE_START_VBLANK_INTERRUPT_ENABLE (1UL<<18) /* 965 or later */
3653#define PIPE_FRAMESTART_INTERRUPT_ENABLE (1UL<<17)
3292#define PIPE_VBLANK_INTERRUPT_ENABLE (1UL<<17) 3654#define PIPE_VBLANK_INTERRUPT_ENABLE (1UL<<17)
3293#define PIPEA_HBLANK_INT_EN_VLV (1UL<<16) 3655#define PIPEA_HBLANK_INT_EN_VLV (1UL<<16)
3294#define PIPE_OVERLAY_UPDATED_ENABLE (1UL<<16) 3656#define PIPE_OVERLAY_UPDATED_ENABLE (1UL<<16)
@@ -3296,6 +3658,7 @@ enum punit_power_well {
3296#define SPRITE0_FLIP_DONE_INT_STATUS_VLV (1UL<<14) 3658#define SPRITE0_FLIP_DONE_INT_STATUS_VLV (1UL<<14)
3297#define PIPE_CRC_ERROR_INTERRUPT_STATUS (1UL<<13) 3659#define PIPE_CRC_ERROR_INTERRUPT_STATUS (1UL<<13)
3298#define PIPE_CRC_DONE_INTERRUPT_STATUS (1UL<<12) 3660#define PIPE_CRC_DONE_INTERRUPT_STATUS (1UL<<12)
3661#define PERF_COUNTER2_INTERRUPT_STATUS (1UL<<11)
3299#define PIPE_GMBUS_INTERRUPT_STATUS (1UL<<11) 3662#define PIPE_GMBUS_INTERRUPT_STATUS (1UL<<11)
3300#define PLANE_FLIP_DONE_INT_STATUS_VLV (1UL<<10) 3663#define PLANE_FLIP_DONE_INT_STATUS_VLV (1UL<<10)
3301#define PIPE_HOTPLUG_INTERRUPT_STATUS (1UL<<10) 3664#define PIPE_HOTPLUG_INTERRUPT_STATUS (1UL<<10)
@@ -3304,20 +3667,25 @@ enum punit_power_well {
3304#define PIPE_DPST_EVENT_STATUS (1UL<<7) 3667#define PIPE_DPST_EVENT_STATUS (1UL<<7)
3305#define PIPE_LEGACY_BLC_EVENT_STATUS (1UL<<6) 3668#define PIPE_LEGACY_BLC_EVENT_STATUS (1UL<<6)
3306#define PIPE_A_PSR_STATUS_VLV (1UL<<6) 3669#define PIPE_A_PSR_STATUS_VLV (1UL<<6)
3670#define PIPE_LEGACY_BLC_EVENT_STATUS (1UL<<6)
3307#define PIPE_ODD_FIELD_INTERRUPT_STATUS (1UL<<5) 3671#define PIPE_ODD_FIELD_INTERRUPT_STATUS (1UL<<5)
3308#define PIPE_EVEN_FIELD_INTERRUPT_STATUS (1UL<<4) 3672#define PIPE_EVEN_FIELD_INTERRUPT_STATUS (1UL<<4)
3309#define PIPE_B_PSR_STATUS_VLV (1UL<<3) 3673#define PIPE_B_PSR_STATUS_VLV (1UL<<3)
3674#define PERF_COUNTER_INTERRUPT_STATUS (1UL<<3)
3310#define PIPE_HOTPLUG_TV_INTERRUPT_STATUS (1UL<<2) /* pre-965 */ 3675#define PIPE_HOTPLUG_TV_INTERRUPT_STATUS (1UL<<2) /* pre-965 */
3311#define PIPE_START_VBLANK_INTERRUPT_STATUS (1UL<<2) /* 965 or later */ 3676#define PIPE_START_VBLANK_INTERRUPT_STATUS (1UL<<2) /* 965 or later */
3677#define PIPE_FRAMESTART_INTERRUPT_STATUS (1UL<<1)
3312#define PIPE_VBLANK_INTERRUPT_STATUS (1UL<<1) 3678#define PIPE_VBLANK_INTERRUPT_STATUS (1UL<<1)
3679#define PIPE_HBLANK_INT_STATUS (1UL<<0)
3313#define PIPE_OVERLAY_UPDATED_STATUS (1UL<<0) 3680#define PIPE_OVERLAY_UPDATED_STATUS (1UL<<0)
3314 3681
3315#define PIPESTAT_INT_ENABLE_MASK 0x7fff0000 3682#define PIPESTAT_INT_ENABLE_MASK 0x7fff0000
3316#define PIPESTAT_INT_STATUS_MASK 0x0000ffff 3683#define PIPESTAT_INT_STATUS_MASK 0x0000ffff
3317 3684
3318#define PIPE_A_OFFSET 0x70000 3685#define PIPE_A_OFFSET 0x70000
3319#define PIPE_B_OFFSET 0x71000 3686#define PIPE_B_OFFSET 0x71000
3320#define PIPE_C_OFFSET 0x72000 3687#define PIPE_C_OFFSET 0x72000
3688#define CHV_PIPE_C_OFFSET 0x74000
3321/* 3689/*
3322 * There's actually no pipe EDP. Some pipe registers have 3690 * There's actually no pipe EDP. Some pipe registers have
3323 * simply shifted from the pipe to the transcoder, while 3691 * simply shifted from the pipe to the transcoder, while
@@ -3355,14 +3723,25 @@ enum punit_power_well {
3355#define SPRITED_FLIP_DONE_INT_EN (1<<26) 3723#define SPRITED_FLIP_DONE_INT_EN (1<<26)
3356#define SPRITEC_FLIP_DONE_INT_EN (1<<25) 3724#define SPRITEC_FLIP_DONE_INT_EN (1<<25)
3357#define PLANEB_FLIP_DONE_INT_EN (1<<24) 3725#define PLANEB_FLIP_DONE_INT_EN (1<<24)
3726#define PIPE_PSR_INT_EN (1<<22)
3358#define PIPEA_LINE_COMPARE_INT_EN (1<<21) 3727#define PIPEA_LINE_COMPARE_INT_EN (1<<21)
3359#define PIPEA_HLINE_INT_EN (1<<20) 3728#define PIPEA_HLINE_INT_EN (1<<20)
3360#define PIPEA_VBLANK_INT_EN (1<<19) 3729#define PIPEA_VBLANK_INT_EN (1<<19)
3361#define SPRITEB_FLIP_DONE_INT_EN (1<<18) 3730#define SPRITEB_FLIP_DONE_INT_EN (1<<18)
3362#define SPRITEA_FLIP_DONE_INT_EN (1<<17) 3731#define SPRITEA_FLIP_DONE_INT_EN (1<<17)
3363#define PLANEA_FLIPDONE_INT_EN (1<<16) 3732#define PLANEA_FLIPDONE_INT_EN (1<<16)
3364 3733#define PIPEC_LINE_COMPARE_INT_EN (1<<13)
3365#define DPINVGTT (VLV_DISPLAY_BASE + 0x7002c) /* VLV only */ 3734#define PIPEC_HLINE_INT_EN (1<<12)
3735#define PIPEC_VBLANK_INT_EN (1<<11)
3736#define SPRITEF_FLIPDONE_INT_EN (1<<10)
3737#define SPRITEE_FLIPDONE_INT_EN (1<<9)
3738#define PLANEC_FLIPDONE_INT_EN (1<<8)
3739
3740#define DPINVGTT (VLV_DISPLAY_BASE + 0x7002c) /* VLV/CHV only */
3741#define SPRITEF_INVALID_GTT_INT_EN (1<<27)
3742#define SPRITEE_INVALID_GTT_INT_EN (1<<26)
3743#define PLANEC_INVALID_GTT_INT_EN (1<<25)
3744#define CURSORC_INVALID_GTT_INT_EN (1<<24)
3366#define CURSORB_INVALID_GTT_INT_EN (1<<23) 3745#define CURSORB_INVALID_GTT_INT_EN (1<<23)
3367#define CURSORA_INVALID_GTT_INT_EN (1<<22) 3746#define CURSORA_INVALID_GTT_INT_EN (1<<22)
3368#define SPRITED_INVALID_GTT_INT_EN (1<<21) 3747#define SPRITED_INVALID_GTT_INT_EN (1<<21)
@@ -3372,6 +3751,11 @@ enum punit_power_well {
3372#define SPRITEA_INVALID_GTT_INT_EN (1<<17) 3751#define SPRITEA_INVALID_GTT_INT_EN (1<<17)
3373#define PLANEA_INVALID_GTT_INT_EN (1<<16) 3752#define PLANEA_INVALID_GTT_INT_EN (1<<16)
3374#define DPINVGTT_EN_MASK 0xff0000 3753#define DPINVGTT_EN_MASK 0xff0000
3754#define DPINVGTT_EN_MASK_CHV 0xfff0000
3755#define SPRITEF_INVALID_GTT_STATUS (1<<11)
3756#define SPRITEE_INVALID_GTT_STATUS (1<<10)
3757#define PLANEC_INVALID_GTT_STATUS (1<<9)
3758#define CURSORC_INVALID_GTT_STATUS (1<<8)
3375#define CURSORB_INVALID_GTT_STATUS (1<<7) 3759#define CURSORB_INVALID_GTT_STATUS (1<<7)
3376#define CURSORA_INVALID_GTT_STATUS (1<<6) 3760#define CURSORA_INVALID_GTT_STATUS (1<<6)
3377#define SPRITED_INVALID_GTT_STATUS (1<<5) 3761#define SPRITED_INVALID_GTT_STATUS (1<<5)
@@ -3381,6 +3765,7 @@ enum punit_power_well {
3381#define SPRITEA_INVALID_GTT_STATUS (1<<1) 3765#define SPRITEA_INVALID_GTT_STATUS (1<<1)
3382#define PLANEA_INVALID_GTT_STATUS (1<<0) 3766#define PLANEA_INVALID_GTT_STATUS (1<<0)
3383#define DPINVGTT_STATUS_MASK 0xff 3767#define DPINVGTT_STATUS_MASK 0xff
3768#define DPINVGTT_STATUS_MASK_CHV 0xfff
3384 3769
3385#define DSPARB 0x70030 3770#define DSPARB 0x70030
3386#define DSPARB_CSTART_MASK (0x7f << 7) 3771#define DSPARB_CSTART_MASK (0x7f << 7)
@@ -3420,14 +3805,43 @@ enum punit_power_well {
3420#define DDL_CURSORA_PRECISION_32 (1<<31) 3805#define DDL_CURSORA_PRECISION_32 (1<<31)
3421#define DDL_CURSORA_PRECISION_16 (0<<31) 3806#define DDL_CURSORA_PRECISION_16 (0<<31)
3422#define DDL_CURSORA_SHIFT 24 3807#define DDL_CURSORA_SHIFT 24
3808#define DDL_SPRITEB_PRECISION_32 (1<<23)
3809#define DDL_SPRITEB_PRECISION_16 (0<<23)
3810#define DDL_SPRITEB_SHIFT 16
3811#define DDL_SPRITEA_PRECISION_32 (1<<15)
3812#define DDL_SPRITEA_PRECISION_16 (0<<15)
3813#define DDL_SPRITEA_SHIFT 8
3423#define DDL_PLANEA_PRECISION_32 (1<<7) 3814#define DDL_PLANEA_PRECISION_32 (1<<7)
3424#define DDL_PLANEA_PRECISION_16 (0<<7) 3815#define DDL_PLANEA_PRECISION_16 (0<<7)
3816#define DDL_PLANEA_SHIFT 0
3817
3425#define VLV_DDL2 (VLV_DISPLAY_BASE + 0x70054) 3818#define VLV_DDL2 (VLV_DISPLAY_BASE + 0x70054)
3426#define DDL_CURSORB_PRECISION_32 (1<<31) 3819#define DDL_CURSORB_PRECISION_32 (1<<31)
3427#define DDL_CURSORB_PRECISION_16 (0<<31) 3820#define DDL_CURSORB_PRECISION_16 (0<<31)
3428#define DDL_CURSORB_SHIFT 24 3821#define DDL_CURSORB_SHIFT 24
3822#define DDL_SPRITED_PRECISION_32 (1<<23)
3823#define DDL_SPRITED_PRECISION_16 (0<<23)
3824#define DDL_SPRITED_SHIFT 16
3825#define DDL_SPRITEC_PRECISION_32 (1<<15)
3826#define DDL_SPRITEC_PRECISION_16 (0<<15)
3827#define DDL_SPRITEC_SHIFT 8
3429#define DDL_PLANEB_PRECISION_32 (1<<7) 3828#define DDL_PLANEB_PRECISION_32 (1<<7)
3430#define DDL_PLANEB_PRECISION_16 (0<<7) 3829#define DDL_PLANEB_PRECISION_16 (0<<7)
3830#define DDL_PLANEB_SHIFT 0
3831
3832#define VLV_DDL3 (VLV_DISPLAY_BASE + 0x70058)
3833#define DDL_CURSORC_PRECISION_32 (1<<31)
3834#define DDL_CURSORC_PRECISION_16 (0<<31)
3835#define DDL_CURSORC_SHIFT 24
3836#define DDL_SPRITEF_PRECISION_32 (1<<23)
3837#define DDL_SPRITEF_PRECISION_16 (0<<23)
3838#define DDL_SPRITEF_SHIFT 16
3839#define DDL_SPRITEE_PRECISION_32 (1<<15)
3840#define DDL_SPRITEE_PRECISION_16 (0<<15)
3841#define DDL_SPRITEE_SHIFT 8
3842#define DDL_PLANEC_PRECISION_32 (1<<7)
3843#define DDL_PLANEC_PRECISION_16 (0<<7)
3844#define DDL_PLANEC_SHIFT 0
3431 3845
3432/* FIFO watermark sizes etc */ 3846/* FIFO watermark sizes etc */
3433#define G4X_FIFO_LINE_SIZE 64 3847#define G4X_FIFO_LINE_SIZE 64
@@ -3535,12 +3949,13 @@ enum punit_power_well {
3535#define PIPE_PIXEL_MASK 0x00ffffff 3949#define PIPE_PIXEL_MASK 0x00ffffff
3536#define PIPE_PIXEL_SHIFT 0 3950#define PIPE_PIXEL_SHIFT 0
3537/* GM45+ just has to be different */ 3951/* GM45+ just has to be different */
3538#define _PIPEA_FRMCOUNT_GM45 (dev_priv->info.display_mmio_offset + 0x70040) 3952#define _PIPEA_FRMCOUNT_GM45 0x70040
3539#define _PIPEA_FLIPCOUNT_GM45 (dev_priv->info.display_mmio_offset + 0x70044) 3953#define _PIPEA_FLIPCOUNT_GM45 0x70044
3540#define PIPE_FRMCOUNT_GM45(pipe) _PIPE(pipe, _PIPEA_FRMCOUNT_GM45, _PIPEB_FRMCOUNT_GM45) 3954#define PIPE_FRMCOUNT_GM45(pipe) _PIPE2(pipe, _PIPEA_FRMCOUNT_GM45)
3955#define PIPE_FLIPCOUNT_GM45(pipe) _PIPE2(pipe, _PIPEA_FLIPCOUNT_GM45)
3541 3956
3542/* Cursor A & B regs */ 3957/* Cursor A & B regs */
3543#define _CURACNTR (dev_priv->info.display_mmio_offset + 0x70080) 3958#define _CURACNTR 0x70080
3544/* Old style CUR*CNTR flags (desktop 8xx) */ 3959/* Old style CUR*CNTR flags (desktop 8xx) */
3545#define CURSOR_ENABLE 0x80000000 3960#define CURSOR_ENABLE 0x80000000
3546#define CURSOR_GAMMA_ENABLE 0x40000000 3961#define CURSOR_GAMMA_ENABLE 0x40000000
@@ -3567,28 +3982,34 @@ enum punit_power_well {
3567#define MCURSOR_PIPE_B (1 << 28) 3982#define MCURSOR_PIPE_B (1 << 28)
3568#define MCURSOR_GAMMA_ENABLE (1 << 26) 3983#define MCURSOR_GAMMA_ENABLE (1 << 26)
3569#define CURSOR_TRICKLE_FEED_DISABLE (1 << 14) 3984#define CURSOR_TRICKLE_FEED_DISABLE (1 << 14)
3570#define _CURABASE (dev_priv->info.display_mmio_offset + 0x70084) 3985#define _CURABASE 0x70084
3571#define _CURAPOS (dev_priv->info.display_mmio_offset + 0x70088) 3986#define _CURAPOS 0x70088
3572#define CURSOR_POS_MASK 0x007FF 3987#define CURSOR_POS_MASK 0x007FF
3573#define CURSOR_POS_SIGN 0x8000 3988#define CURSOR_POS_SIGN 0x8000
3574#define CURSOR_X_SHIFT 0 3989#define CURSOR_X_SHIFT 0
3575#define CURSOR_Y_SHIFT 16 3990#define CURSOR_Y_SHIFT 16
3576#define CURSIZE 0x700a0 3991#define CURSIZE 0x700a0
3577#define _CURBCNTR (dev_priv->info.display_mmio_offset + 0x700c0) 3992#define _CURBCNTR 0x700c0
3578#define _CURBBASE (dev_priv->info.display_mmio_offset + 0x700c4) 3993#define _CURBBASE 0x700c4
3579#define _CURBPOS (dev_priv->info.display_mmio_offset + 0x700c8) 3994#define _CURBPOS 0x700c8
3580 3995
3581#define _CURBCNTR_IVB 0x71080 3996#define _CURBCNTR_IVB 0x71080
3582#define _CURBBASE_IVB 0x71084 3997#define _CURBBASE_IVB 0x71084
3583#define _CURBPOS_IVB 0x71088 3998#define _CURBPOS_IVB 0x71088
3584 3999
3585#define CURCNTR(pipe) _PIPE(pipe, _CURACNTR, _CURBCNTR) 4000#define _CURSOR2(pipe, reg) (dev_priv->info.cursor_offsets[(pipe)] - \
3586#define CURBASE(pipe) _PIPE(pipe, _CURABASE, _CURBBASE) 4001 dev_priv->info.cursor_offsets[PIPE_A] + (reg) + \
3587#define CURPOS(pipe) _PIPE(pipe, _CURAPOS, _CURBPOS) 4002 dev_priv->info.display_mmio_offset)
4003
4004#define CURCNTR(pipe) _CURSOR2(pipe, _CURACNTR)
4005#define CURBASE(pipe) _CURSOR2(pipe, _CURABASE)
4006#define CURPOS(pipe) _CURSOR2(pipe, _CURAPOS)
3588 4007
3589#define CURCNTR_IVB(pipe) _PIPE(pipe, _CURACNTR, _CURBCNTR_IVB) 4008#define CURSOR_A_OFFSET 0x70080
3590#define CURBASE_IVB(pipe) _PIPE(pipe, _CURABASE, _CURBBASE_IVB) 4009#define CURSOR_B_OFFSET 0x700c0
3591#define CURPOS_IVB(pipe) _PIPE(pipe, _CURAPOS, _CURBPOS_IVB) 4010#define CHV_CURSOR_C_OFFSET 0x700e0
4011#define IVB_CURSOR_B_OFFSET 0x71080
4012#define IVB_CURSOR_C_OFFSET 0x72080
3592 4013
3593/* Display A control */ 4014/* Display A control */
3594#define _DSPACNTR 0x70180 4015#define _DSPACNTR 0x70180
@@ -4093,6 +4514,7 @@ enum punit_power_well {
4093#define GEN8_DE_PIPE_A_IRQ (1<<16) 4514#define GEN8_DE_PIPE_A_IRQ (1<<16)
4094#define GEN8_DE_PIPE_IRQ(pipe) (1<<(16+pipe)) 4515#define GEN8_DE_PIPE_IRQ(pipe) (1<<(16+pipe))
4095#define GEN8_GT_VECS_IRQ (1<<6) 4516#define GEN8_GT_VECS_IRQ (1<<6)
4517#define GEN8_GT_PM_IRQ (1<<4)
4096#define GEN8_GT_VCS2_IRQ (1<<3) 4518#define GEN8_GT_VCS2_IRQ (1<<3)
4097#define GEN8_GT_VCS1_IRQ (1<<2) 4519#define GEN8_GT_VCS1_IRQ (1<<2)
4098#define GEN8_GT_BCS_IRQ (1<<1) 4520#define GEN8_GT_BCS_IRQ (1<<1)
@@ -4120,7 +4542,7 @@ enum punit_power_well {
4120#define GEN8_PIPE_SPRITE_FAULT (1 << 9) 4542#define GEN8_PIPE_SPRITE_FAULT (1 << 9)
4121#define GEN8_PIPE_PRIMARY_FAULT (1 << 8) 4543#define GEN8_PIPE_PRIMARY_FAULT (1 << 8)
4122#define GEN8_PIPE_SPRITE_FLIP_DONE (1 << 5) 4544#define GEN8_PIPE_SPRITE_FLIP_DONE (1 << 5)
4123#define GEN8_PIPE_FLIP_DONE (1 << 4) 4545#define GEN8_PIPE_PRIMARY_FLIP_DONE (1 << 4)
4124#define GEN8_PIPE_SCAN_LINE_EVENT (1 << 2) 4546#define GEN8_PIPE_SCAN_LINE_EVENT (1 << 2)
4125#define GEN8_PIPE_VSYNC (1 << 1) 4547#define GEN8_PIPE_VSYNC (1 << 1)
4126#define GEN8_PIPE_VBLANK (1 << 0) 4548#define GEN8_PIPE_VBLANK (1 << 0)
@@ -4832,6 +5254,8 @@ enum punit_power_well {
4832#define PORT_TRANS_SEL_CPT(pipe) ((pipe) << 29) 5254#define PORT_TRANS_SEL_CPT(pipe) ((pipe) << 29)
4833#define PORT_TO_PIPE(val) (((val) & (1<<30)) >> 30) 5255#define PORT_TO_PIPE(val) (((val) & (1<<30)) >> 30)
4834#define PORT_TO_PIPE_CPT(val) (((val) & PORT_TRANS_SEL_MASK) >> 29) 5256#define PORT_TO_PIPE_CPT(val) (((val) & PORT_TRANS_SEL_MASK) >> 29)
5257#define SDVO_PORT_TO_PIPE_CHV(val) (((val) & (3<<24)) >> 24)
5258#define DP_PORT_TO_PIPE_CHV(val) (((val) & (3<<16)) >> 16)
4835 5259
4836#define TRANS_DP_CTL_A 0xe0300 5260#define TRANS_DP_CTL_A 0xe0300
4837#define TRANS_DP_CTL_B 0xe1300 5261#define TRANS_DP_CTL_B 0xe1300
@@ -4888,6 +5312,8 @@ enum punit_power_well {
4888 5312
4889#define EDP_LINK_TRAIN_VOL_EMP_MASK_IVB (0x3f<<22) 5313#define EDP_LINK_TRAIN_VOL_EMP_MASK_IVB (0x3f<<22)
4890 5314
5315#define VLV_PMWGICZ 0x1300a4
5316
4891#define FORCEWAKE 0xA18C 5317#define FORCEWAKE 0xA18C
4892#define FORCEWAKE_VLV 0x1300b0 5318#define FORCEWAKE_VLV 0x1300b0
4893#define FORCEWAKE_ACK_VLV 0x1300b4 5319#define FORCEWAKE_ACK_VLV 0x1300b4
@@ -4896,15 +5322,22 @@ enum punit_power_well {
4896#define FORCEWAKE_ACK_HSW 0x130044 5322#define FORCEWAKE_ACK_HSW 0x130044
4897#define FORCEWAKE_ACK 0x130090 5323#define FORCEWAKE_ACK 0x130090
4898#define VLV_GTLC_WAKE_CTRL 0x130090 5324#define VLV_GTLC_WAKE_CTRL 0x130090
5325#define VLV_GTLC_RENDER_CTX_EXISTS (1 << 25)
5326#define VLV_GTLC_MEDIA_CTX_EXISTS (1 << 24)
5327#define VLV_GTLC_ALLOWWAKEREQ (1 << 0)
5328
4899#define VLV_GTLC_PW_STATUS 0x130094 5329#define VLV_GTLC_PW_STATUS 0x130094
4900#define VLV_GTLC_PW_RENDER_STATUS_MASK 0x80 5330#define VLV_GTLC_ALLOWWAKEACK (1 << 0)
4901#define VLV_GTLC_PW_MEDIA_STATUS_MASK 0x20 5331#define VLV_GTLC_ALLOWWAKEERR (1 << 1)
5332#define VLV_GTLC_PW_MEDIA_STATUS_MASK (1 << 5)
5333#define VLV_GTLC_PW_RENDER_STATUS_MASK (1 << 7)
4902#define FORCEWAKE_MT 0xa188 /* multi-threaded */ 5334#define FORCEWAKE_MT 0xa188 /* multi-threaded */
4903#define FORCEWAKE_KERNEL 0x1 5335#define FORCEWAKE_KERNEL 0x1
4904#define FORCEWAKE_USER 0x2 5336#define FORCEWAKE_USER 0x2
4905#define FORCEWAKE_MT_ACK 0x130040 5337#define FORCEWAKE_MT_ACK 0x130040
4906#define ECOBUS 0xa180 5338#define ECOBUS 0xa180
4907#define FORCEWAKE_MT_ENABLE (1<<5) 5339#define FORCEWAKE_MT_ENABLE (1<<5)
5340#define VLV_SPAREG2H 0xA194
4908 5341
4909#define GTFIFODBG 0x120000 5342#define GTFIFODBG 0x120000
4910#define GT_FIFO_SBDROPERR (1<<6) 5343#define GT_FIFO_SBDROPERR (1<<6)
@@ -4924,6 +5357,7 @@ enum punit_power_well {
4924#define HSW_EDRAM_PRESENT 0x120010 5357#define HSW_EDRAM_PRESENT 0x120010
4925 5358
4926#define GEN6_UCGCTL1 0x9400 5359#define GEN6_UCGCTL1 0x9400
5360# define GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE (1 << 16)
4927# define GEN6_BLBUNIT_CLOCK_GATE_DISABLE (1 << 5) 5361# define GEN6_BLBUNIT_CLOCK_GATE_DISABLE (1 << 5)
4928# define GEN6_CSUNIT_CLOCK_GATE_DISABLE (1 << 7) 5362# define GEN6_CSUNIT_CLOCK_GATE_DISABLE (1 << 7)
4929 5363
@@ -4934,12 +5368,19 @@ enum punit_power_well {
4934# define GEN6_RCPBUNIT_CLOCK_GATE_DISABLE (1 << 12) 5368# define GEN6_RCPBUNIT_CLOCK_GATE_DISABLE (1 << 12)
4935# define GEN6_RCCUNIT_CLOCK_GATE_DISABLE (1 << 11) 5369# define GEN6_RCCUNIT_CLOCK_GATE_DISABLE (1 << 11)
4936 5370
5371#define GEN6_UCGCTL3 0x9408
5372
4937#define GEN7_UCGCTL4 0x940c 5373#define GEN7_UCGCTL4 0x940c
4938#define GEN7_L3BANK2X_CLOCK_GATE_DISABLE (1<<25) 5374#define GEN7_L3BANK2X_CLOCK_GATE_DISABLE (1<<25)
4939 5375
5376#define GEN6_RCGCTL1 0x9410
5377#define GEN6_RCGCTL2 0x9414
5378#define GEN6_RSTCTL 0x9420
5379
4940#define GEN8_UCGCTL6 0x9430 5380#define GEN8_UCGCTL6 0x9430
4941#define GEN8_SDEUNIT_CLOCK_GATE_DISABLE (1<<14) 5381#define GEN8_SDEUNIT_CLOCK_GATE_DISABLE (1<<14)
4942 5382
5383#define GEN6_GFXPAUSE 0xA000
4943#define GEN6_RPNSWREQ 0xA008 5384#define GEN6_RPNSWREQ 0xA008
4944#define GEN6_TURBO_DISABLE (1<<31) 5385#define GEN6_TURBO_DISABLE (1<<31)
4945#define GEN6_FREQUENCY(x) ((x)<<25) 5386#define GEN6_FREQUENCY(x) ((x)<<25)
@@ -4992,6 +5433,9 @@ enum punit_power_well {
4992#define GEN6_RP_UP_EI 0xA068 5433#define GEN6_RP_UP_EI 0xA068
4993#define GEN6_RP_DOWN_EI 0xA06C 5434#define GEN6_RP_DOWN_EI 0xA06C
4994#define GEN6_RP_IDLE_HYSTERSIS 0xA070 5435#define GEN6_RP_IDLE_HYSTERSIS 0xA070
5436#define GEN6_RPDEUHWTC 0xA080
5437#define GEN6_RPDEUC 0xA084
5438#define GEN6_RPDEUCSW 0xA088
4995#define GEN6_RC_STATE 0xA094 5439#define GEN6_RC_STATE 0xA094
4996#define GEN6_RC1_WAKE_RATE_LIMIT 0xA098 5440#define GEN6_RC1_WAKE_RATE_LIMIT 0xA098
4997#define GEN6_RC6_WAKE_RATE_LIMIT 0xA09C 5441#define GEN6_RC6_WAKE_RATE_LIMIT 0xA09C
@@ -4999,11 +5443,15 @@ enum punit_power_well {
4999#define GEN6_RC_EVALUATION_INTERVAL 0xA0A8 5443#define GEN6_RC_EVALUATION_INTERVAL 0xA0A8
5000#define GEN6_RC_IDLE_HYSTERSIS 0xA0AC 5444#define GEN6_RC_IDLE_HYSTERSIS 0xA0AC
5001#define GEN6_RC_SLEEP 0xA0B0 5445#define GEN6_RC_SLEEP 0xA0B0
5446#define GEN6_RCUBMABDTMR 0xA0B0
5002#define GEN6_RC1e_THRESHOLD 0xA0B4 5447#define GEN6_RC1e_THRESHOLD 0xA0B4
5003#define GEN6_RC6_THRESHOLD 0xA0B8 5448#define GEN6_RC6_THRESHOLD 0xA0B8
5004#define GEN6_RC6p_THRESHOLD 0xA0BC 5449#define GEN6_RC6p_THRESHOLD 0xA0BC
5450#define VLV_RCEDATA 0xA0BC
5005#define GEN6_RC6pp_THRESHOLD 0xA0C0 5451#define GEN6_RC6pp_THRESHOLD 0xA0C0
5006#define GEN6_PMINTRMSK 0xA168 5452#define GEN6_PMINTRMSK 0xA168
5453#define GEN8_PMINTR_REDIRECT_TO_NON_DISP (1<<31)
5454#define VLV_PWRDWNUPCTL 0xA294
5007 5455
5008#define GEN6_PMISR 0x44020 5456#define GEN6_PMISR 0x44020
5009#define GEN6_PMIMR 0x44024 /* rps_lock */ 5457#define GEN6_PMIMR 0x44024 /* rps_lock */
@@ -5020,6 +5468,9 @@ enum punit_power_well {
5020 GEN6_PM_RP_DOWN_THRESHOLD | \ 5468 GEN6_PM_RP_DOWN_THRESHOLD | \
5021 GEN6_PM_RP_DOWN_TIMEOUT) 5469 GEN6_PM_RP_DOWN_TIMEOUT)
5022 5470
5471#define GEN7_GT_SCRATCH_BASE 0x4F100
5472#define GEN7_GT_SCRATCH_REG_NUM 8
5473
5023#define VLV_GTLC_SURVIVABILITY_REG 0x130098 5474#define VLV_GTLC_SURVIVABILITY_REG 0x130098
5024#define VLV_GFX_CLK_STATUS_BIT (1<<3) 5475#define VLV_GFX_CLK_STATUS_BIT (1<<3)
5025#define VLV_GFX_CLK_FORCE_ON_BIT (1<<2) 5476#define VLV_GFX_CLK_FORCE_ON_BIT (1<<2)
@@ -5030,6 +5481,9 @@ enum punit_power_well {
5030#define VLV_MEDIA_RC6_COUNT_EN (1<<1) 5481#define VLV_MEDIA_RC6_COUNT_EN (1<<1)
5031#define VLV_RENDER_RC6_COUNT_EN (1<<0) 5482#define VLV_RENDER_RC6_COUNT_EN (1<<0)
5032#define GEN6_GT_GFX_RC6 0x138108 5483#define GEN6_GT_GFX_RC6 0x138108
5484#define VLV_GT_RENDER_RC6 0x138108
5485#define VLV_GT_MEDIA_RC6 0x13810C
5486
5033#define GEN6_GT_GFX_RC6p 0x13810C 5487#define GEN6_GT_GFX_RC6p 0x13810C
5034#define GEN6_GT_GFX_RC6pp 0x138110 5488#define GEN6_GT_GFX_RC6pp 0x138110
5035 5489
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 56785e8fb2eb..043123c77a1f 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -328,8 +328,6 @@ int i915_save_state(struct drm_device *dev)
328 } 328 }
329 } 329 }
330 330
331 intel_disable_gt_powersave(dev);
332
333 /* Cache mode state */ 331 /* Cache mode state */
334 if (INTEL_INFO(dev)->gen < 7) 332 if (INTEL_INFO(dev)->gen < 7)
335 dev_priv->regfile.saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0); 333 dev_priv->regfile.saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 9c57029f6f4b..86ce39aad0ff 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -186,7 +186,7 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
186 struct drm_minor *dminor = dev_to_drm_minor(dev); 186 struct drm_minor *dminor = dev_to_drm_minor(dev);
187 struct drm_device *drm_dev = dminor->dev; 187 struct drm_device *drm_dev = dminor->dev;
188 struct drm_i915_private *dev_priv = drm_dev->dev_private; 188 struct drm_i915_private *dev_priv = drm_dev->dev_private;
189 struct i915_hw_context *ctx; 189 struct intel_context *ctx;
190 u32 *temp = NULL; /* Just here to make handling failures easy */ 190 u32 *temp = NULL; /* Just here to make handling failures easy */
191 int slice = (int)(uintptr_t)attr->private; 191 int slice = (int)(uintptr_t)attr->private;
192 int ret; 192 int ret;
@@ -263,6 +263,8 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
263 263
264 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 264 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
265 265
266 intel_runtime_pm_get(dev_priv);
267
266 mutex_lock(&dev_priv->rps.hw_lock); 268 mutex_lock(&dev_priv->rps.hw_lock);
267 if (IS_VALLEYVIEW(dev_priv->dev)) { 269 if (IS_VALLEYVIEW(dev_priv->dev)) {
268 u32 freq; 270 u32 freq;
@@ -273,6 +275,8 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
273 } 275 }
274 mutex_unlock(&dev_priv->rps.hw_lock); 276 mutex_unlock(&dev_priv->rps.hw_lock);
275 277
278 intel_runtime_pm_put(dev_priv);
279
276 return snprintf(buf, PAGE_SIZE, "%d\n", ret); 280 return snprintf(buf, PAGE_SIZE, "%d\n", ret);
277} 281}
278 282
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 23c26f1f8b37..f5aa0067755a 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -7,6 +7,7 @@
7 7
8#include <drm/drmP.h> 8#include <drm/drmP.h>
9#include "i915_drv.h" 9#include "i915_drv.h"
10#include "intel_drv.h"
10#include "intel_ringbuffer.h" 11#include "intel_ringbuffer.h"
11 12
12#undef TRACE_SYSTEM 13#undef TRACE_SYSTEM
@@ -14,6 +15,80 @@
14#define TRACE_SYSTEM_STRING __stringify(TRACE_SYSTEM) 15#define TRACE_SYSTEM_STRING __stringify(TRACE_SYSTEM)
15#define TRACE_INCLUDE_FILE i915_trace 16#define TRACE_INCLUDE_FILE i915_trace
16 17
18/* pipe updates */
19
20TRACE_EVENT(i915_pipe_update_start,
21 TP_PROTO(struct intel_crtc *crtc, u32 min, u32 max),
22 TP_ARGS(crtc, min, max),
23
24 TP_STRUCT__entry(
25 __field(enum pipe, pipe)
26 __field(u32, frame)
27 __field(u32, scanline)
28 __field(u32, min)
29 __field(u32, max)
30 ),
31
32 TP_fast_assign(
33 __entry->pipe = crtc->pipe;
34 __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev,
35 crtc->pipe);
36 __entry->scanline = intel_get_crtc_scanline(crtc);
37 __entry->min = min;
38 __entry->max = max;
39 ),
40
41 TP_printk("pipe %c, frame=%u, scanline=%u, min=%u, max=%u",
42 pipe_name(__entry->pipe), __entry->frame,
43 __entry->scanline, __entry->min, __entry->max)
44);
45
46TRACE_EVENT(i915_pipe_update_vblank_evaded,
47 TP_PROTO(struct intel_crtc *crtc, u32 min, u32 max, u32 frame),
48 TP_ARGS(crtc, min, max, frame),
49
50 TP_STRUCT__entry(
51 __field(enum pipe, pipe)
52 __field(u32, frame)
53 __field(u32, scanline)
54 __field(u32, min)
55 __field(u32, max)
56 ),
57
58 TP_fast_assign(
59 __entry->pipe = crtc->pipe;
60 __entry->frame = frame;
61 __entry->scanline = intel_get_crtc_scanline(crtc);
62 __entry->min = min;
63 __entry->max = max;
64 ),
65
66 TP_printk("pipe %c, frame=%u, scanline=%u, min=%u, max=%u",
67 pipe_name(__entry->pipe), __entry->frame,
68 __entry->scanline, __entry->min, __entry->max)
69);
70
71TRACE_EVENT(i915_pipe_update_end,
72 TP_PROTO(struct intel_crtc *crtc, u32 frame),
73 TP_ARGS(crtc, frame),
74
75 TP_STRUCT__entry(
76 __field(enum pipe, pipe)
77 __field(u32, frame)
78 __field(u32, scanline)
79 ),
80
81 TP_fast_assign(
82 __entry->pipe = crtc->pipe;
83 __entry->frame = frame;
84 __entry->scanline = intel_get_crtc_scanline(crtc);
85 ),
86
87 TP_printk("pipe %c, frame=%u, scanline=%u",
88 pipe_name(__entry->pipe), __entry->frame,
89 __entry->scanline)
90);
91
17/* object tracking */ 92/* object tracking */
18 93
19TRACE_EVENT(i915_gem_object_create, 94TRACE_EVENT(i915_gem_object_create,
@@ -251,8 +326,8 @@ TRACE_EVENT(i915_gem_evict_vm,
251); 326);
252 327
253TRACE_EVENT(i915_gem_ring_sync_to, 328TRACE_EVENT(i915_gem_ring_sync_to,
254 TP_PROTO(struct intel_ring_buffer *from, 329 TP_PROTO(struct intel_engine_cs *from,
255 struct intel_ring_buffer *to, 330 struct intel_engine_cs *to,
256 u32 seqno), 331 u32 seqno),
257 TP_ARGS(from, to, seqno), 332 TP_ARGS(from, to, seqno),
258 333
@@ -277,7 +352,7 @@ TRACE_EVENT(i915_gem_ring_sync_to,
277); 352);
278 353
279TRACE_EVENT(i915_gem_ring_dispatch, 354TRACE_EVENT(i915_gem_ring_dispatch,
280 TP_PROTO(struct intel_ring_buffer *ring, u32 seqno, u32 flags), 355 TP_PROTO(struct intel_engine_cs *ring, u32 seqno, u32 flags),
281 TP_ARGS(ring, seqno, flags), 356 TP_ARGS(ring, seqno, flags),
282 357
283 TP_STRUCT__entry( 358 TP_STRUCT__entry(
@@ -300,7 +375,7 @@ TRACE_EVENT(i915_gem_ring_dispatch,
300); 375);
301 376
302TRACE_EVENT(i915_gem_ring_flush, 377TRACE_EVENT(i915_gem_ring_flush,
303 TP_PROTO(struct intel_ring_buffer *ring, u32 invalidate, u32 flush), 378 TP_PROTO(struct intel_engine_cs *ring, u32 invalidate, u32 flush),
304 TP_ARGS(ring, invalidate, flush), 379 TP_ARGS(ring, invalidate, flush),
305 380
306 TP_STRUCT__entry( 381 TP_STRUCT__entry(
@@ -323,7 +398,7 @@ TRACE_EVENT(i915_gem_ring_flush,
323); 398);
324 399
325DECLARE_EVENT_CLASS(i915_gem_request, 400DECLARE_EVENT_CLASS(i915_gem_request,
326 TP_PROTO(struct intel_ring_buffer *ring, u32 seqno), 401 TP_PROTO(struct intel_engine_cs *ring, u32 seqno),
327 TP_ARGS(ring, seqno), 402 TP_ARGS(ring, seqno),
328 403
329 TP_STRUCT__entry( 404 TP_STRUCT__entry(
@@ -343,12 +418,12 @@ DECLARE_EVENT_CLASS(i915_gem_request,
343); 418);
344 419
345DEFINE_EVENT(i915_gem_request, i915_gem_request_add, 420DEFINE_EVENT(i915_gem_request, i915_gem_request_add,
346 TP_PROTO(struct intel_ring_buffer *ring, u32 seqno), 421 TP_PROTO(struct intel_engine_cs *ring, u32 seqno),
347 TP_ARGS(ring, seqno) 422 TP_ARGS(ring, seqno)
348); 423);
349 424
350TRACE_EVENT(i915_gem_request_complete, 425TRACE_EVENT(i915_gem_request_complete,
351 TP_PROTO(struct intel_ring_buffer *ring), 426 TP_PROTO(struct intel_engine_cs *ring),
352 TP_ARGS(ring), 427 TP_ARGS(ring),
353 428
354 TP_STRUCT__entry( 429 TP_STRUCT__entry(
@@ -368,12 +443,12 @@ TRACE_EVENT(i915_gem_request_complete,
368); 443);
369 444
370DEFINE_EVENT(i915_gem_request, i915_gem_request_retire, 445DEFINE_EVENT(i915_gem_request, i915_gem_request_retire,
371 TP_PROTO(struct intel_ring_buffer *ring, u32 seqno), 446 TP_PROTO(struct intel_engine_cs *ring, u32 seqno),
372 TP_ARGS(ring, seqno) 447 TP_ARGS(ring, seqno)
373); 448);
374 449
375TRACE_EVENT(i915_gem_request_wait_begin, 450TRACE_EVENT(i915_gem_request_wait_begin,
376 TP_PROTO(struct intel_ring_buffer *ring, u32 seqno), 451 TP_PROTO(struct intel_engine_cs *ring, u32 seqno),
377 TP_ARGS(ring, seqno), 452 TP_ARGS(ring, seqno),
378 453
379 TP_STRUCT__entry( 454 TP_STRUCT__entry(
@@ -402,12 +477,12 @@ TRACE_EVENT(i915_gem_request_wait_begin,
402); 477);
403 478
404DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_end, 479DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_end,
405 TP_PROTO(struct intel_ring_buffer *ring, u32 seqno), 480 TP_PROTO(struct intel_engine_cs *ring, u32 seqno),
406 TP_ARGS(ring, seqno) 481 TP_ARGS(ring, seqno)
407); 482);
408 483
409DECLARE_EVENT_CLASS(i915_ring, 484DECLARE_EVENT_CLASS(i915_ring,
410 TP_PROTO(struct intel_ring_buffer *ring), 485 TP_PROTO(struct intel_engine_cs *ring),
411 TP_ARGS(ring), 486 TP_ARGS(ring),
412 487
413 TP_STRUCT__entry( 488 TP_STRUCT__entry(
@@ -424,12 +499,12 @@ DECLARE_EVENT_CLASS(i915_ring,
424); 499);
425 500
426DEFINE_EVENT(i915_ring, i915_ring_wait_begin, 501DEFINE_EVENT(i915_ring, i915_ring_wait_begin,
427 TP_PROTO(struct intel_ring_buffer *ring), 502 TP_PROTO(struct intel_engine_cs *ring),
428 TP_ARGS(ring) 503 TP_ARGS(ring)
429); 504);
430 505
431DEFINE_EVENT(i915_ring, i915_ring_wait_end, 506DEFINE_EVENT(i915_ring, i915_ring_wait_end,
432 TP_PROTO(struct intel_ring_buffer *ring), 507 TP_PROTO(struct intel_engine_cs *ring),
433 TP_ARGS(ring) 508 TP_ARGS(ring)
434); 509);
435 510
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index aff4a113cda3..1ee98f121a00 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -49,13 +49,19 @@ find_section(struct bdb_header *bdb, int section_id)
49 total = bdb->bdb_size; 49 total = bdb->bdb_size;
50 50
51 /* walk the sections looking for section_id */ 51 /* walk the sections looking for section_id */
52 while (index < total) { 52 while (index + 3 < total) {
53 current_id = *(base + index); 53 current_id = *(base + index);
54 index++; 54 index++;
55
55 current_size = *((u16 *)(base + index)); 56 current_size = *((u16 *)(base + index));
56 index += 2; 57 index += 2;
58
59 if (index + current_size > total)
60 return NULL;
61
57 if (current_id == section_id) 62 if (current_id == section_id)
58 return base + index; 63 return base + index;
64
59 index += current_size; 65 index += current_size;
60 } 66 }
61 67
@@ -206,7 +212,7 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
206 const struct lvds_dvo_timing *panel_dvo_timing; 212 const struct lvds_dvo_timing *panel_dvo_timing;
207 const struct lvds_fp_timing *fp_timing; 213 const struct lvds_fp_timing *fp_timing;
208 struct drm_display_mode *panel_fixed_mode; 214 struct drm_display_mode *panel_fixed_mode;
209 int i, downclock; 215 int i, downclock, drrs_mode;
210 216
211 lvds_options = find_section(bdb, BDB_LVDS_OPTIONS); 217 lvds_options = find_section(bdb, BDB_LVDS_OPTIONS);
212 if (!lvds_options) 218 if (!lvds_options)
@@ -218,6 +224,28 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
218 224
219 panel_type = lvds_options->panel_type; 225 panel_type = lvds_options->panel_type;
220 226
227 drrs_mode = (lvds_options->dps_panel_type_bits
228 >> (panel_type * 2)) & MODE_MASK;
229 /*
230 * VBT has static DRRS = 0 and seamless DRRS = 2.
231 * The below piece of code is required to adjust vbt.drrs_type
232 * to match the enum drrs_support_type.
233 */
234 switch (drrs_mode) {
235 case 0:
236 dev_priv->vbt.drrs_type = STATIC_DRRS_SUPPORT;
237 DRM_DEBUG_KMS("DRRS supported mode is static\n");
238 break;
239 case 2:
240 dev_priv->vbt.drrs_type = SEAMLESS_DRRS_SUPPORT;
241 DRM_DEBUG_KMS("DRRS supported mode is seamless\n");
242 break;
243 default:
244 dev_priv->vbt.drrs_type = DRRS_NOT_SUPPORTED;
245 DRM_DEBUG_KMS("DRRS not supported (VBT input)\n");
246 break;
247 }
248
221 lvds_lfp_data = find_section(bdb, BDB_LVDS_LFP_DATA); 249 lvds_lfp_data = find_section(bdb, BDB_LVDS_LFP_DATA);
222 if (!lvds_lfp_data) 250 if (!lvds_lfp_data)
223 return; 251 return;
@@ -526,6 +554,16 @@ parse_driver_features(struct drm_i915_private *dev_priv,
526 554
527 if (driver->dual_frequency) 555 if (driver->dual_frequency)
528 dev_priv->render_reclock_avail = true; 556 dev_priv->render_reclock_avail = true;
557
558 DRM_DEBUG_KMS("DRRS State Enabled:%d\n", driver->drrs_enabled);
559 /*
560 * If DRRS is not supported, drrs_type has to be set to 0.
561 * This is because, VBT is configured in such a way that
562 * static DRRS is 0 and DRRS not supported is represented by
563 * driver->drrs_enabled=false
564 */
565 if (!driver->drrs_enabled)
566 dev_priv->vbt.drrs_type = DRRS_NOT_SUPPORTED;
529} 567}
530 568
531static void 569static void
@@ -628,19 +666,221 @@ parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
628 } 666 }
629} 667}
630 668
669static u8 *goto_next_sequence(u8 *data, int *size)
670{
671 u16 len;
672 int tmp = *size;
673
674 if (--tmp < 0)
675 return NULL;
676
677 /* goto first element */
678 data++;
679 while (1) {
680 switch (*data) {
681 case MIPI_SEQ_ELEM_SEND_PKT:
682 /*
683 * skip by this element payload size
684 * skip elem id, command flag and data type
685 */
686 tmp -= 5;
687 if (tmp < 0)
688 return NULL;
689
690 data += 3;
691 len = *((u16 *)data);
692
693 tmp -= len;
694 if (tmp < 0)
695 return NULL;
696
697 /* skip by len */
698 data = data + 2 + len;
699 break;
700 case MIPI_SEQ_ELEM_DELAY:
701 /* skip by elem id, and delay is 4 bytes */
702 tmp -= 5;
703 if (tmp < 0)
704 return NULL;
705
706 data += 5;
707 break;
708 case MIPI_SEQ_ELEM_GPIO:
709 tmp -= 3;
710 if (tmp < 0)
711 return NULL;
712
713 data += 3;
714 break;
715 default:
716 DRM_ERROR("Unknown element\n");
717 return NULL;
718 }
719
720 /* end of sequence ? */
721 if (*data == 0)
722 break;
723 }
724
725 /* goto next sequence or end of block byte */
726 if (--tmp < 0)
727 return NULL;
728
729 data++;
730
731 /* update amount of data left for the sequence block to be parsed */
732 *size = tmp;
733 return data;
734}
735
631static void 736static void
632parse_mipi(struct drm_i915_private *dev_priv, struct bdb_header *bdb) 737parse_mipi(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
633{ 738{
634 struct bdb_mipi *mipi; 739 struct bdb_mipi_config *start;
740 struct bdb_mipi_sequence *sequence;
741 struct mipi_config *config;
742 struct mipi_pps_data *pps;
743 u8 *data, *seq_data;
744 int i, panel_id, seq_size;
745 u16 block_size;
746
747 /* parse MIPI blocks only if LFP type is MIPI */
748 if (!dev_priv->vbt.has_mipi)
749 return;
750
751 /* Initialize this to undefined indicating no generic MIPI support */
752 dev_priv->vbt.dsi.panel_id = MIPI_DSI_UNDEFINED_PANEL_ID;
753
754 /* Block #40 is already parsed and panel_fixed_mode is
755 * stored in dev_priv->lfp_lvds_vbt_mode
756 * resuse this when needed
757 */
758
759 /* Parse #52 for panel index used from panel_type already
760 * parsed
761 */
762 start = find_section(bdb, BDB_MIPI_CONFIG);
763 if (!start) {
764 DRM_DEBUG_KMS("No MIPI config BDB found");
765 return;
766 }
767
768 DRM_DEBUG_DRIVER("Found MIPI Config block, panel index = %d\n",
769 panel_type);
635 770
636 mipi = find_section(bdb, BDB_MIPI_CONFIG); 771 /*
637 if (!mipi) { 772 * get hold of the correct configuration block and pps data as per
638 DRM_DEBUG_KMS("No MIPI BDB found"); 773 * the panel_type as index
774 */
775 config = &start->config[panel_type];
776 pps = &start->pps[panel_type];
777
778 /* store as of now full data. Trim when we realise all is not needed */
779 dev_priv->vbt.dsi.config = kmemdup(config, sizeof(struct mipi_config), GFP_KERNEL);
780 if (!dev_priv->vbt.dsi.config)
781 return;
782
783 dev_priv->vbt.dsi.pps = kmemdup(pps, sizeof(struct mipi_pps_data), GFP_KERNEL);
784 if (!dev_priv->vbt.dsi.pps) {
785 kfree(dev_priv->vbt.dsi.config);
639 return; 786 return;
640 } 787 }
641 788
642 /* XXX: add more info */ 789 /* We have mandatory mipi config blocks. Initialize as generic panel */
643 dev_priv->vbt.dsi.panel_id = MIPI_DSI_GENERIC_PANEL_ID; 790 dev_priv->vbt.dsi.panel_id = MIPI_DSI_GENERIC_PANEL_ID;
791
792 /* Check if we have sequence block as well */
793 sequence = find_section(bdb, BDB_MIPI_SEQUENCE);
794 if (!sequence) {
795 DRM_DEBUG_KMS("No MIPI Sequence found, parsing complete\n");
796 return;
797 }
798
799 DRM_DEBUG_DRIVER("Found MIPI sequence block\n");
800
801 block_size = get_blocksize(sequence);
802
803 /*
804 * parse the sequence block for individual sequences
805 */
806 dev_priv->vbt.dsi.seq_version = sequence->version;
807
808 seq_data = &sequence->data[0];
809
810 /*
811 * sequence block is variable length and hence we need to parse and
812 * get the sequence data for specific panel id
813 */
814 for (i = 0; i < MAX_MIPI_CONFIGURATIONS; i++) {
815 panel_id = *seq_data;
816 seq_size = *((u16 *) (seq_data + 1));
817 if (panel_id == panel_type)
818 break;
819
820 /* skip the sequence including seq header of 3 bytes */
821 seq_data = seq_data + 3 + seq_size;
822 if ((seq_data - &sequence->data[0]) > block_size) {
823 DRM_ERROR("Sequence start is beyond sequence block size, corrupted sequence block\n");
824 return;
825 }
826 }
827
828 if (i == MAX_MIPI_CONFIGURATIONS) {
829 DRM_ERROR("Sequence block detected but no valid configuration\n");
830 return;
831 }
832
833 /* check if found sequence is completely within the sequence block
834 * just being paranoid */
835 if (seq_size > block_size) {
836 DRM_ERROR("Corrupted sequence/size, bailing out\n");
837 return;
838 }
839
840 /* skip the panel id(1 byte) and seq size(2 bytes) */
841 dev_priv->vbt.dsi.data = kmemdup(seq_data + 3, seq_size, GFP_KERNEL);
842 if (!dev_priv->vbt.dsi.data)
843 return;
844
845 /*
846 * loop into the sequence data and split into multiple sequneces
847 * There are only 5 types of sequences as of now
848 */
849 data = dev_priv->vbt.dsi.data;
850 dev_priv->vbt.dsi.size = seq_size;
851
852 /* two consecutive 0x00 indicate end of all sequences */
853 while (1) {
854 int seq_id = *data;
855 if (MIPI_SEQ_MAX > seq_id && seq_id > MIPI_SEQ_UNDEFINED) {
856 dev_priv->vbt.dsi.sequence[seq_id] = data;
857 DRM_DEBUG_DRIVER("Found mipi sequence - %d\n", seq_id);
858 } else {
859 DRM_ERROR("undefined sequence\n");
860 goto err;
861 }
862
863 /* partial parsing to skip elements */
864 data = goto_next_sequence(data, &seq_size);
865
866 if (data == NULL) {
867 DRM_ERROR("Sequence elements going beyond block itself. Sequence block parsing failed\n");
868 goto err;
869 }
870
871 if (*data == 0)
872 break; /* end of sequence reached */
873 }
874
875 DRM_DEBUG_DRIVER("MIPI related vbt parsing complete\n");
876 return;
877err:
878 kfree(dev_priv->vbt.dsi.data);
879 dev_priv->vbt.dsi.data = NULL;
880
881 /* error during parsing so set all pointers to null
882 * because of partial parsing */
883 memset(dev_priv->vbt.dsi.sequence, 0, MIPI_SEQ_MAX);
644} 884}
645 885
646static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port, 886static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
@@ -823,6 +1063,15 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
823 /* skip the device block if device type is invalid */ 1063 /* skip the device block if device type is invalid */
824 continue; 1064 continue;
825 } 1065 }
1066
1067 if (p_child->common.dvo_port >= DVO_PORT_MIPIA
1068 && p_child->common.dvo_port <= DVO_PORT_MIPID
1069 &&p_child->common.device_type & DEVICE_TYPE_MIPI_OUTPUT) {
1070 DRM_DEBUG_KMS("Found MIPI as LFP\n");
1071 dev_priv->vbt.has_mipi = 1;
1072 dev_priv->vbt.dsi.port = p_child->common.dvo_port;
1073 }
1074
826 child_dev_ptr = dev_priv->vbt.child_dev + count; 1075 child_dev_ptr = dev_priv->vbt.child_dev + count;
827 count++; 1076 count++;
828 memcpy((void *)child_dev_ptr, (void *)p_child, 1077 memcpy((void *)child_dev_ptr, (void *)p_child,
@@ -893,6 +1142,46 @@ static const struct dmi_system_id intel_no_opregion_vbt[] = {
893 { } 1142 { }
894}; 1143};
895 1144
1145static struct bdb_header *validate_vbt(char *base, size_t size,
1146 struct vbt_header *vbt,
1147 const char *source)
1148{
1149 size_t offset;
1150 struct bdb_header *bdb;
1151
1152 if (vbt == NULL) {
1153 DRM_DEBUG_DRIVER("VBT signature missing\n");
1154 return NULL;
1155 }
1156
1157 offset = (char *)vbt - base;
1158 if (offset + sizeof(struct vbt_header) > size) {
1159 DRM_DEBUG_DRIVER("VBT header incomplete\n");
1160 return NULL;
1161 }
1162
1163 if (memcmp(vbt->signature, "$VBT", 4)) {
1164 DRM_DEBUG_DRIVER("VBT invalid signature\n");
1165 return NULL;
1166 }
1167
1168 offset += vbt->bdb_offset;
1169 if (offset + sizeof(struct bdb_header) > size) {
1170 DRM_DEBUG_DRIVER("BDB header incomplete\n");
1171 return NULL;
1172 }
1173
1174 bdb = (struct bdb_header *)(base + offset);
1175 if (offset + bdb->bdb_size > size) {
1176 DRM_DEBUG_DRIVER("BDB incomplete\n");
1177 return NULL;
1178 }
1179
1180 DRM_DEBUG_KMS("Using VBT from %s: %20s\n",
1181 source, vbt->signature);
1182 return bdb;
1183}
1184
896/** 1185/**
897 * intel_parse_bios - find VBT and initialize settings from the BIOS 1186 * intel_parse_bios - find VBT and initialize settings from the BIOS
898 * @dev: DRM device 1187 * @dev: DRM device
@@ -916,20 +1205,13 @@ intel_parse_bios(struct drm_device *dev)
916 init_vbt_defaults(dev_priv); 1205 init_vbt_defaults(dev_priv);
917 1206
918 /* XXX Should this validation be moved to intel_opregion.c? */ 1207 /* XXX Should this validation be moved to intel_opregion.c? */
919 if (!dmi_check_system(intel_no_opregion_vbt) && dev_priv->opregion.vbt) { 1208 if (!dmi_check_system(intel_no_opregion_vbt) && dev_priv->opregion.vbt)
920 struct vbt_header *vbt = dev_priv->opregion.vbt; 1209 bdb = validate_vbt((char *)dev_priv->opregion.header, OPREGION_SIZE,
921 if (memcmp(vbt->signature, "$VBT", 4) == 0) { 1210 (struct vbt_header *)dev_priv->opregion.vbt,
922 DRM_DEBUG_KMS("Using VBT from OpRegion: %20s\n", 1211 "OpRegion");
923 vbt->signature);
924 bdb = (struct bdb_header *)((char *)vbt + vbt->bdb_offset);
925 } else
926 dev_priv->opregion.vbt = NULL;
927 }
928 1212
929 if (bdb == NULL) { 1213 if (bdb == NULL) {
930 struct vbt_header *vbt = NULL; 1214 size_t i, size;
931 size_t size;
932 int i;
933 1215
934 bios = pci_map_rom(pdev, &size); 1216 bios = pci_map_rom(pdev, &size);
935 if (!bios) 1217 if (!bios)
@@ -937,19 +1219,18 @@ intel_parse_bios(struct drm_device *dev)
937 1219
938 /* Scour memory looking for the VBT signature */ 1220 /* Scour memory looking for the VBT signature */
939 for (i = 0; i + 4 < size; i++) { 1221 for (i = 0; i + 4 < size; i++) {
940 if (!memcmp(bios + i, "$VBT", 4)) { 1222 if (memcmp(bios + i, "$VBT", 4) == 0) {
941 vbt = (struct vbt_header *)(bios + i); 1223 bdb = validate_vbt(bios, size,
1224 (struct vbt_header *)(bios + i),
1225 "PCI ROM");
942 break; 1226 break;
943 } 1227 }
944 } 1228 }
945 1229
946 if (!vbt) { 1230 if (!bdb) {
947 DRM_DEBUG_DRIVER("VBT signature missing\n");
948 pci_unmap_rom(pdev, bios); 1231 pci_unmap_rom(pdev, bios);
949 return -1; 1232 return -1;
950 } 1233 }
951
952 bdb = (struct bdb_header *)(bios + i + vbt->bdb_offset);
953 } 1234 }
954 1235
955 /* Grab useful general definitions */ 1236 /* Grab useful general definitions */
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index f27f7b282465..b98667796337 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -282,6 +282,9 @@ struct bdb_general_definitions {
282 union child_device_config devices[0]; 282 union child_device_config devices[0];
283} __packed; 283} __packed;
284 284
285/* Mask for DRRS / Panel Channel / SSC / BLT control bits extraction */
286#define MODE_MASK 0x3
287
285struct bdb_lvds_options { 288struct bdb_lvds_options {
286 u8 panel_type; 289 u8 panel_type;
287 u8 rsvd1; 290 u8 rsvd1;
@@ -294,6 +297,18 @@ struct bdb_lvds_options {
294 u8 lvds_edid:1; 297 u8 lvds_edid:1;
295 u8 rsvd2:1; 298 u8 rsvd2:1;
296 u8 rsvd4; 299 u8 rsvd4;
300 /* LVDS Panel channel bits stored here */
301 u32 lvds_panel_channel_bits;
302 /* LVDS SSC (Spread Spectrum Clock) bits stored here. */
303 u16 ssc_bits;
304 u16 ssc_freq;
305 u16 ssc_ddt;
306 /* Panel color depth defined here */
307 u16 panel_color_depth;
308 /* LVDS panel type bits stored here */
309 u32 dps_panel_type_bits;
310 /* LVDS backlight control type bits stored here */
311 u32 blt_control_type_bits;
297} __packed; 312} __packed;
298 313
299/* LFP pointer table contains entries to the struct below */ 314/* LFP pointer table contains entries to the struct below */
@@ -482,6 +497,20 @@ struct bdb_driver_features {
482 497
483 u8 hdmi_termination; 498 u8 hdmi_termination;
484 u8 custom_vbt_version; 499 u8 custom_vbt_version;
500 /* Driver features data block */
501 u16 rmpm_enabled:1;
502 u16 s2ddt_enabled:1;
503 u16 dpst_enabled:1;
504 u16 bltclt_enabled:1;
505 u16 adb_enabled:1;
506 u16 drrs_enabled:1;
507 u16 grs_enabled:1;
508 u16 gpmt_enabled:1;
509 u16 tbt_enabled:1;
510 u16 psr_enabled:1;
511 u16 ips_enabled:1;
512 u16 reserved3:4;
513 u16 pc_feature_valid:1;
485} __packed; 514} __packed;
486 515
487#define EDP_18BPP 0 516#define EDP_18BPP 0
@@ -714,6 +743,10 @@ int intel_parse_bios(struct drm_device *dev);
714#define DVO_PORT_DPC 8 743#define DVO_PORT_DPC 8
715#define DVO_PORT_DPD 9 744#define DVO_PORT_DPD 9
716#define DVO_PORT_DPA 10 745#define DVO_PORT_DPA 10
746#define DVO_PORT_MIPIA 21
747#define DVO_PORT_MIPIB 22
748#define DVO_PORT_MIPIC 23
749#define DVO_PORT_MIPID 24
717 750
718/* Block 52 contains MIPI Panel info 751/* Block 52 contains MIPI Panel info
719 * 6 such enteries will there. Index into correct 752 * 6 such enteries will there. Index into correct
@@ -870,4 +903,35 @@ struct bdb_mipi_sequence {
870 u8 data[0]; 903 u8 data[0];
871}; 904};
872 905
906/* MIPI Sequnece Block definitions */
907enum mipi_seq {
908 MIPI_SEQ_UNDEFINED = 0,
909 MIPI_SEQ_ASSERT_RESET,
910 MIPI_SEQ_INIT_OTP,
911 MIPI_SEQ_DISPLAY_ON,
912 MIPI_SEQ_DISPLAY_OFF,
913 MIPI_SEQ_DEASSERT_RESET,
914 MIPI_SEQ_MAX
915};
916
917enum mipi_seq_element {
918 MIPI_SEQ_ELEM_UNDEFINED = 0,
919 MIPI_SEQ_ELEM_SEND_PKT,
920 MIPI_SEQ_ELEM_DELAY,
921 MIPI_SEQ_ELEM_GPIO,
922 MIPI_SEQ_ELEM_STATUS,
923 MIPI_SEQ_ELEM_MAX
924};
925
926enum mipi_gpio_pin_index {
927 MIPI_GPIO_UNDEFINED = 0,
928 MIPI_GPIO_PANEL_ENABLE,
929 MIPI_GPIO_BL_ENABLE,
930 MIPI_GPIO_PWM_ENABLE,
931 MIPI_GPIO_RESET_N,
932 MIPI_GPIO_PWR_DOWN_R,
933 MIPI_GPIO_STDBY_RST_N,
934 MIPI_GPIO_MAX
935};
936
873#endif /* _I830_BIOS_H_ */ 937#endif /* _I830_BIOS_H_ */
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index aa5a3dc43342..5a045d3bd77e 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -144,28 +144,49 @@ static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode)
144 struct drm_device *dev = encoder->base.dev; 144 struct drm_device *dev = encoder->base.dev;
145 struct drm_i915_private *dev_priv = dev->dev_private; 145 struct drm_i915_private *dev_priv = dev->dev_private;
146 struct intel_crt *crt = intel_encoder_to_crt(encoder); 146 struct intel_crt *crt = intel_encoder_to_crt(encoder);
147 u32 temp; 147 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
148 struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode;
149 u32 adpa;
150
151 if (INTEL_INFO(dev)->gen >= 5)
152 adpa = ADPA_HOTPLUG_BITS;
153 else
154 adpa = 0;
148 155
149 temp = I915_READ(crt->adpa_reg); 156 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
150 temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE); 157 adpa |= ADPA_HSYNC_ACTIVE_HIGH;
151 temp &= ~ADPA_DAC_ENABLE; 158 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
159 adpa |= ADPA_VSYNC_ACTIVE_HIGH;
160
161 /* For CPT allow 3 pipe config, for others just use A or B */
162 if (HAS_PCH_LPT(dev))
163 ; /* Those bits don't exist here */
164 else if (HAS_PCH_CPT(dev))
165 adpa |= PORT_TRANS_SEL_CPT(crtc->pipe);
166 else if (crtc->pipe == 0)
167 adpa |= ADPA_PIPE_A_SELECT;
168 else
169 adpa |= ADPA_PIPE_B_SELECT;
170
171 if (!HAS_PCH_SPLIT(dev))
172 I915_WRITE(BCLRPAT(crtc->pipe), 0);
152 173
153 switch (mode) { 174 switch (mode) {
154 case DRM_MODE_DPMS_ON: 175 case DRM_MODE_DPMS_ON:
155 temp |= ADPA_DAC_ENABLE; 176 adpa |= ADPA_DAC_ENABLE;
156 break; 177 break;
157 case DRM_MODE_DPMS_STANDBY: 178 case DRM_MODE_DPMS_STANDBY:
158 temp |= ADPA_DAC_ENABLE | ADPA_HSYNC_CNTL_DISABLE; 179 adpa |= ADPA_DAC_ENABLE | ADPA_HSYNC_CNTL_DISABLE;
159 break; 180 break;
160 case DRM_MODE_DPMS_SUSPEND: 181 case DRM_MODE_DPMS_SUSPEND:
161 temp |= ADPA_DAC_ENABLE | ADPA_VSYNC_CNTL_DISABLE; 182 adpa |= ADPA_DAC_ENABLE | ADPA_VSYNC_CNTL_DISABLE;
162 break; 183 break;
163 case DRM_MODE_DPMS_OFF: 184 case DRM_MODE_DPMS_OFF:
164 temp |= ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE; 185 adpa |= ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE;
165 break; 186 break;
166 } 187 }
167 188
168 I915_WRITE(crt->adpa_reg, temp); 189 I915_WRITE(crt->adpa_reg, adpa);
169} 190}
170 191
171static void intel_disable_crt(struct intel_encoder *encoder) 192static void intel_disable_crt(struct intel_encoder *encoder)
@@ -274,42 +295,6 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder,
274 return true; 295 return true;
275} 296}
276 297
277static void intel_crt_mode_set(struct intel_encoder *encoder)
278{
279
280 struct drm_device *dev = encoder->base.dev;
281 struct intel_crt *crt = intel_encoder_to_crt(encoder);
282 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
283 struct drm_i915_private *dev_priv = dev->dev_private;
284 struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode;
285 u32 adpa;
286
287 if (INTEL_INFO(dev)->gen >= 5)
288 adpa = ADPA_HOTPLUG_BITS;
289 else
290 adpa = 0;
291
292 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
293 adpa |= ADPA_HSYNC_ACTIVE_HIGH;
294 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
295 adpa |= ADPA_VSYNC_ACTIVE_HIGH;
296
297 /* For CPT allow 3 pipe config, for others just use A or B */
298 if (HAS_PCH_LPT(dev))
299 ; /* Those bits don't exist here */
300 else if (HAS_PCH_CPT(dev))
301 adpa |= PORT_TRANS_SEL_CPT(crtc->pipe);
302 else if (crtc->pipe == 0)
303 adpa |= ADPA_PIPE_A_SELECT;
304 else
305 adpa |= ADPA_PIPE_B_SELECT;
306
307 if (!HAS_PCH_SPLIT(dev))
308 I915_WRITE(BCLRPAT(crtc->pipe), 0);
309
310 I915_WRITE(crt->adpa_reg, adpa);
311}
312
313static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector) 298static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
314{ 299{
315 struct drm_device *dev = connector->dev; 300 struct drm_device *dev = connector->dev;
@@ -645,11 +630,12 @@ intel_crt_detect(struct drm_connector *connector, bool force)
645 enum intel_display_power_domain power_domain; 630 enum intel_display_power_domain power_domain;
646 enum drm_connector_status status; 631 enum drm_connector_status status;
647 struct intel_load_detect_pipe tmp; 632 struct intel_load_detect_pipe tmp;
633 struct drm_modeset_acquire_ctx ctx;
648 634
649 intel_runtime_pm_get(dev_priv); 635 intel_runtime_pm_get(dev_priv);
650 636
651 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] force=%d\n", 637 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] force=%d\n",
652 connector->base.id, drm_get_connector_name(connector), 638 connector->base.id, connector->name,
653 force); 639 force);
654 640
655 power_domain = intel_display_port_power_domain(intel_encoder); 641 power_domain = intel_display_port_power_domain(intel_encoder);
@@ -688,12 +674,12 @@ intel_crt_detect(struct drm_connector *connector, bool force)
688 } 674 }
689 675
690 /* for pre-945g platforms use load detect */ 676 /* for pre-945g platforms use load detect */
691 if (intel_get_load_detect_pipe(connector, NULL, &tmp)) { 677 if (intel_get_load_detect_pipe(connector, NULL, &tmp, &ctx)) {
692 if (intel_crt_detect_ddc(connector)) 678 if (intel_crt_detect_ddc(connector))
693 status = connector_status_connected; 679 status = connector_status_connected;
694 else 680 else
695 status = intel_crt_load_detect(crt); 681 status = intel_crt_load_detect(crt);
696 intel_release_load_detect_pipe(connector, &tmp); 682 intel_release_load_detect_pipe(connector, &tmp, &ctx);
697 } else 683 } else
698 status = connector_status_unknown; 684 status = connector_status_unknown;
699 685
@@ -867,7 +853,6 @@ void intel_crt_init(struct drm_device *dev)
867 crt->adpa_reg = ADPA; 853 crt->adpa_reg = ADPA;
868 854
869 crt->base.compute_config = intel_crt_compute_config; 855 crt->base.compute_config = intel_crt_compute_config;
870 crt->base.mode_set = intel_crt_mode_set;
871 crt->base.disable = intel_disable_crt; 856 crt->base.disable = intel_disable_crt;
872 crt->base.enable = intel_enable_crt; 857 crt->base.enable = intel_enable_crt;
873 if (I915_HAS_HOTPLUG(dev)) 858 if (I915_HAS_HOTPLUG(dev))
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 0ad4e9600063..b17b9c7c769f 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -364,55 +364,6 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
364 DRM_ERROR("FDI link training failed!\n"); 364 DRM_ERROR("FDI link training failed!\n");
365} 365}
366 366
367static void intel_ddi_mode_set(struct intel_encoder *encoder)
368{
369 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
370 int port = intel_ddi_get_encoder_port(encoder);
371 int pipe = crtc->pipe;
372 int type = encoder->type;
373 struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode;
374
375 DRM_DEBUG_KMS("Preparing DDI mode on port %c, pipe %c\n",
376 port_name(port), pipe_name(pipe));
377
378 crtc->eld_vld = false;
379 if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
380 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
381 struct intel_digital_port *intel_dig_port =
382 enc_to_dig_port(&encoder->base);
383
384 intel_dp->DP = intel_dig_port->saved_port_bits |
385 DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW;
386 intel_dp->DP |= DDI_PORT_WIDTH(intel_dp->lane_count);
387
388 if (intel_dp->has_audio) {
389 DRM_DEBUG_DRIVER("DP audio on pipe %c on DDI\n",
390 pipe_name(crtc->pipe));
391
392 /* write eld */
393 DRM_DEBUG_DRIVER("DP audio: write eld information\n");
394 intel_write_eld(&encoder->base, adjusted_mode);
395 }
396 } else if (type == INTEL_OUTPUT_HDMI) {
397 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
398
399 if (intel_hdmi->has_audio) {
400 /* Proper support for digital audio needs a new logic
401 * and a new set of registers, so we leave it for future
402 * patch bombing.
403 */
404 DRM_DEBUG_DRIVER("HDMI audio on pipe %c on DDI\n",
405 pipe_name(crtc->pipe));
406
407 /* write eld */
408 DRM_DEBUG_DRIVER("HDMI audio: write eld information\n");
409 intel_write_eld(&encoder->base, adjusted_mode);
410 }
411
412 intel_hdmi->set_infoframes(&encoder->base, adjusted_mode);
413 }
414}
415
416static struct intel_encoder * 367static struct intel_encoder *
417intel_ddi_get_crtc_encoder(struct drm_crtc *crtc) 368intel_ddi_get_crtc_encoder(struct drm_crtc *crtc)
418{ 369{
@@ -1062,9 +1013,7 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
1062 } 1013 }
1063 1014
1064 if (type == INTEL_OUTPUT_HDMI) { 1015 if (type == INTEL_OUTPUT_HDMI) {
1065 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); 1016 if (intel_crtc->config.has_hdmi_sink)
1066
1067 if (intel_hdmi->has_hdmi_sink)
1068 temp |= TRANS_DDI_MODE_SELECT_HDMI; 1017 temp |= TRANS_DDI_MODE_SELECT_HDMI;
1069 else 1018 else
1070 temp |= TRANS_DDI_MODE_SELECT_DVI; 1019 temp |= TRANS_DDI_MODE_SELECT_DVI;
@@ -1293,28 +1242,48 @@ void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc)
1293static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder) 1242static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
1294{ 1243{
1295 struct drm_encoder *encoder = &intel_encoder->base; 1244 struct drm_encoder *encoder = &intel_encoder->base;
1296 struct drm_crtc *crtc = encoder->crtc;
1297 struct drm_i915_private *dev_priv = encoder->dev->dev_private; 1245 struct drm_i915_private *dev_priv = encoder->dev->dev_private;
1298 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1246 struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
1299 enum port port = intel_ddi_get_encoder_port(intel_encoder); 1247 enum port port = intel_ddi_get_encoder_port(intel_encoder);
1300 int type = intel_encoder->type; 1248 int type = intel_encoder->type;
1301 1249
1250 if (crtc->config.has_audio) {
1251 DRM_DEBUG_DRIVER("Audio on pipe %c on DDI\n",
1252 pipe_name(crtc->pipe));
1253
1254 /* write eld */
1255 DRM_DEBUG_DRIVER("DDI audio: write eld information\n");
1256 intel_write_eld(encoder, &crtc->config.adjusted_mode);
1257 }
1258
1302 if (type == INTEL_OUTPUT_EDP) { 1259 if (type == INTEL_OUTPUT_EDP) {
1303 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1260 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1304 intel_edp_panel_on(intel_dp); 1261 intel_edp_panel_on(intel_dp);
1305 } 1262 }
1306 1263
1307 WARN_ON(intel_crtc->ddi_pll_sel == PORT_CLK_SEL_NONE); 1264 WARN_ON(crtc->ddi_pll_sel == PORT_CLK_SEL_NONE);
1308 I915_WRITE(PORT_CLK_SEL(port), intel_crtc->ddi_pll_sel); 1265 I915_WRITE(PORT_CLK_SEL(port), crtc->ddi_pll_sel);
1309 1266
1310 if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) { 1267 if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
1311 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1268 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1269 struct intel_digital_port *intel_dig_port =
1270 enc_to_dig_port(encoder);
1271
1272 intel_dp->DP = intel_dig_port->saved_port_bits |
1273 DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW;
1274 intel_dp->DP |= DDI_PORT_WIDTH(intel_dp->lane_count);
1312 1275
1313 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); 1276 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
1314 intel_dp_start_link_train(intel_dp); 1277 intel_dp_start_link_train(intel_dp);
1315 intel_dp_complete_link_train(intel_dp); 1278 intel_dp_complete_link_train(intel_dp);
1316 if (port != PORT_A) 1279 if (port != PORT_A)
1317 intel_dp_stop_link_train(intel_dp); 1280 intel_dp_stop_link_train(intel_dp);
1281 } else if (type == INTEL_OUTPUT_HDMI) {
1282 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
1283
1284 intel_hdmi->set_infoframes(encoder,
1285 crtc->config.has_hdmi_sink,
1286 &crtc->config.adjusted_mode);
1318 } 1287 }
1319} 1288}
1320 1289
@@ -1385,7 +1354,8 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder)
1385 intel_edp_psr_enable(intel_dp); 1354 intel_edp_psr_enable(intel_dp);
1386 } 1355 }
1387 1356
1388 if (intel_crtc->eld_vld && type != INTEL_OUTPUT_EDP) { 1357 if (intel_crtc->config.has_audio) {
1358 intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
1389 tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD); 1359 tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
1390 tmp |= ((AUDIO_OUTPUT_ENABLE_A | AUDIO_ELD_VALID_A) << (pipe * 4)); 1360 tmp |= ((AUDIO_OUTPUT_ENABLE_A | AUDIO_ELD_VALID_A) << (pipe * 4));
1391 I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp); 1361 I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
@@ -1403,11 +1373,14 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder)
1403 struct drm_i915_private *dev_priv = dev->dev_private; 1373 struct drm_i915_private *dev_priv = dev->dev_private;
1404 uint32_t tmp; 1374 uint32_t tmp;
1405 1375
1406 if (intel_crtc->eld_vld && type != INTEL_OUTPUT_EDP) { 1376 /* We can't touch HSW_AUD_PIN_ELD_CP_VLD uncionditionally because this
1377 * register is part of the power well on Haswell. */
1378 if (intel_crtc->config.has_audio) {
1407 tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD); 1379 tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
1408 tmp &= ~((AUDIO_OUTPUT_ENABLE_A | AUDIO_ELD_VALID_A) << 1380 tmp &= ~((AUDIO_OUTPUT_ENABLE_A | AUDIO_ELD_VALID_A) <<
1409 (pipe * 4)); 1381 (pipe * 4));
1410 I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp); 1382 I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
1383 intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
1411 } 1384 }
1412 1385
1413 if (type == INTEL_OUTPUT_EDP) { 1386 if (type == INTEL_OUTPUT_EDP) {
@@ -1580,6 +1553,7 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
1580 1553
1581 switch (temp & TRANS_DDI_MODE_SELECT_MASK) { 1554 switch (temp & TRANS_DDI_MODE_SELECT_MASK) {
1582 case TRANS_DDI_MODE_SELECT_HDMI: 1555 case TRANS_DDI_MODE_SELECT_HDMI:
1556 pipe_config->has_hdmi_sink = true;
1583 case TRANS_DDI_MODE_SELECT_DVI: 1557 case TRANS_DDI_MODE_SELECT_DVI:
1584 case TRANS_DDI_MODE_SELECT_FDI: 1558 case TRANS_DDI_MODE_SELECT_FDI:
1585 break; 1559 break;
@@ -1592,6 +1566,12 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
1592 break; 1566 break;
1593 } 1567 }
1594 1568
1569 if (intel_display_power_enabled(dev_priv, POWER_DOMAIN_AUDIO)) {
1570 temp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
1571 if (temp & (AUDIO_OUTPUT_ENABLE_A << (intel_crtc->pipe * 4)))
1572 pipe_config->has_audio = true;
1573 }
1574
1595 if (encoder->type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp_bpp && 1575 if (encoder->type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp_bpp &&
1596 pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) { 1576 pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
1597 /* 1577 /*
@@ -1708,7 +1688,6 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
1708 DRM_MODE_ENCODER_TMDS); 1688 DRM_MODE_ENCODER_TMDS);
1709 1689
1710 intel_encoder->compute_config = intel_ddi_compute_config; 1690 intel_encoder->compute_config = intel_ddi_compute_config;
1711 intel_encoder->mode_set = intel_ddi_mode_set;
1712 intel_encoder->enable = intel_enable_ddi; 1691 intel_encoder->enable = intel_enable_ddi;
1713 intel_encoder->pre_enable = intel_ddi_pre_enable; 1692 intel_encoder->pre_enable = intel_ddi_pre_enable;
1714 intel_encoder->disable = intel_disable_ddi; 1693 intel_encoder->disable = intel_disable_ddi;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 5b60e25baa32..efd3cf50cb0f 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -41,6 +41,9 @@
41#include <drm/drm_crtc_helper.h> 41#include <drm/drm_crtc_helper.h>
42#include <linux/dma_remapping.h> 42#include <linux/dma_remapping.h>
43 43
44#define DIV_ROUND_CLOSEST_ULL(ll, d) \
45 ({ unsigned long long _tmp = (ll)+(d)/2; do_div(_tmp, d); _tmp; })
46
44static void intel_increase_pllclock(struct drm_crtc *crtc); 47static void intel_increase_pllclock(struct drm_crtc *crtc);
45static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on); 48static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
46 49
@@ -55,6 +58,15 @@ static int intel_framebuffer_init(struct drm_device *dev,
55 struct intel_framebuffer *ifb, 58 struct intel_framebuffer *ifb,
56 struct drm_mode_fb_cmd2 *mode_cmd, 59 struct drm_mode_fb_cmd2 *mode_cmd,
57 struct drm_i915_gem_object *obj); 60 struct drm_i915_gem_object *obj);
61static void intel_dp_set_m_n(struct intel_crtc *crtc);
62static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc);
63static void intel_set_pipe_timings(struct intel_crtc *intel_crtc);
64static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
65 struct intel_link_m_n *m_n);
66static void ironlake_set_pipeconf(struct drm_crtc *crtc);
67static void haswell_set_pipeconf(struct drm_crtc *crtc);
68static void intel_set_pipe_csc(struct drm_crtc *crtc);
69static void vlv_prepare_pll(struct intel_crtc *crtc);
58 70
59typedef struct { 71typedef struct {
60 int min, max; 72 int min, max;
@@ -328,6 +340,22 @@ static const intel_limit_t intel_limits_vlv = {
328 .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */ 340 .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
329}; 341};
330 342
343static const intel_limit_t intel_limits_chv = {
344 /*
345 * These are the data rate limits (measured in fast clocks)
346 * since those are the strictest limits we have. The fast
347 * clock and actual rate limits are more relaxed, so checking
348 * them would make no difference.
349 */
350 .dot = { .min = 25000 * 5, .max = 540000 * 5},
351 .vco = { .min = 4860000, .max = 6700000 },
352 .n = { .min = 1, .max = 1 },
353 .m1 = { .min = 2, .max = 2 },
354 .m2 = { .min = 24 << 22, .max = 175 << 22 },
355 .p1 = { .min = 2, .max = 4 },
356 .p2 = { .p2_slow = 1, .p2_fast = 14 },
357};
358
331static void vlv_clock(int refclk, intel_clock_t *clock) 359static void vlv_clock(int refclk, intel_clock_t *clock)
332{ 360{
333 clock->m = clock->m1 * clock->m2; 361 clock->m = clock->m1 * clock->m2;
@@ -412,6 +440,8 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk)
412 limit = &intel_limits_pineview_lvds; 440 limit = &intel_limits_pineview_lvds;
413 else 441 else
414 limit = &intel_limits_pineview_sdvo; 442 limit = &intel_limits_pineview_sdvo;
443 } else if (IS_CHERRYVIEW(dev)) {
444 limit = &intel_limits_chv;
415 } else if (IS_VALLEYVIEW(dev)) { 445 } else if (IS_VALLEYVIEW(dev)) {
416 limit = &intel_limits_vlv; 446 limit = &intel_limits_vlv;
417 } else if (!IS_GEN2(dev)) { 447 } else if (!IS_GEN2(dev)) {
@@ -456,6 +486,17 @@ static void i9xx_clock(int refclk, intel_clock_t *clock)
456 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 486 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
457} 487}
458 488
489static void chv_clock(int refclk, intel_clock_t *clock)
490{
491 clock->m = clock->m1 * clock->m2;
492 clock->p = clock->p1 * clock->p2;
493 if (WARN_ON(clock->n == 0 || clock->p == 0))
494 return;
495 clock->vco = DIV_ROUND_CLOSEST_ULL((uint64_t)refclk * clock->m,
496 clock->n << 22);
497 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
498}
499
459#define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0) 500#define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0)
460/** 501/**
461 * Returns whether the given set of divisors are valid for a given refclk with 502 * Returns whether the given set of divisors are valid for a given refclk with
@@ -731,6 +772,58 @@ vlv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
731 return found; 772 return found;
732} 773}
733 774
775static bool
776chv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
777 int target, int refclk, intel_clock_t *match_clock,
778 intel_clock_t *best_clock)
779{
780 struct drm_device *dev = crtc->dev;
781 intel_clock_t clock;
782 uint64_t m2;
783 int found = false;
784
785 memset(best_clock, 0, sizeof(*best_clock));
786
787 /*
788 * Based on hardware doc, the n always set to 1, and m1 always
789 * set to 2. If requires to support 200Mhz refclk, we need to
790 * revisit this because n may not 1 anymore.
791 */
792 clock.n = 1, clock.m1 = 2;
793 target *= 5; /* fast clock */
794
795 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
796 for (clock.p2 = limit->p2.p2_fast;
797 clock.p2 >= limit->p2.p2_slow;
798 clock.p2 -= clock.p2 > 10 ? 2 : 1) {
799
800 clock.p = clock.p1 * clock.p2;
801
802 m2 = DIV_ROUND_CLOSEST_ULL(((uint64_t)target * clock.p *
803 clock.n) << 22, refclk * clock.m1);
804
805 if (m2 > INT_MAX/clock.m1)
806 continue;
807
808 clock.m2 = m2;
809
810 chv_clock(refclk, &clock);
811
812 if (!intel_PLL_is_valid(dev, limit, &clock))
813 continue;
814
815 /* based on hardware requirement, prefer bigger p
816 */
817 if (clock.p > best_clock->p) {
818 *best_clock = clock;
819 found = true;
820 }
821 }
822 }
823
824 return found;
825}
826
734bool intel_crtc_active(struct drm_crtc *crtc) 827bool intel_crtc_active(struct drm_crtc *crtc)
735{ 828{
736 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 829 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -765,7 +858,7 @@ static void g4x_wait_for_vblank(struct drm_device *dev, int pipe)
765 frame = I915_READ(frame_reg); 858 frame = I915_READ(frame_reg);
766 859
767 if (wait_for(I915_READ_NOTRACE(frame_reg) != frame, 50)) 860 if (wait_for(I915_READ_NOTRACE(frame_reg) != frame, 50))
768 DRM_DEBUG_KMS("vblank wait timed out\n"); 861 WARN(1, "vblank wait timed out\n");
769} 862}
770 863
771/** 864/**
@@ -878,7 +971,7 @@ bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
878 u32 bit; 971 u32 bit;
879 972
880 if (HAS_PCH_IBX(dev_priv->dev)) { 973 if (HAS_PCH_IBX(dev_priv->dev)) {
881 switch(port->port) { 974 switch (port->port) {
882 case PORT_B: 975 case PORT_B:
883 bit = SDE_PORTB_HOTPLUG; 976 bit = SDE_PORTB_HOTPLUG;
884 break; 977 break;
@@ -892,7 +985,7 @@ bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
892 return true; 985 return true;
893 } 986 }
894 } else { 987 } else {
895 switch(port->port) { 988 switch (port->port) {
896 case PORT_B: 989 case PORT_B:
897 bit = SDE_PORTB_HOTPLUG_CPT; 990 bit = SDE_PORTB_HOTPLUG_CPT;
898 break; 991 break;
@@ -1097,10 +1190,8 @@ static void assert_cursor(struct drm_i915_private *dev_priv,
1097 1190
1098 if (IS_845G(dev) || IS_I865G(dev)) 1191 if (IS_845G(dev) || IS_I865G(dev))
1099 cur_state = I915_READ(_CURACNTR) & CURSOR_ENABLE; 1192 cur_state = I915_READ(_CURACNTR) & CURSOR_ENABLE;
1100 else if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev))
1101 cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
1102 else 1193 else
1103 cur_state = I915_READ(CURCNTR_IVB(pipe)) & CURSOR_MODE; 1194 cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
1104 1195
1105 WARN(cur_state != state, 1196 WARN(cur_state != state,
1106 "cursor on pipe %c assertion failure (expected %s, current %s)\n", 1197 "cursor on pipe %c assertion failure (expected %s, current %s)\n",
@@ -1253,6 +1344,9 @@ static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
1253 u32 trans_dp_ctl = I915_READ(trans_dp_ctl_reg); 1344 u32 trans_dp_ctl = I915_READ(trans_dp_ctl_reg);
1254 if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel) 1345 if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
1255 return false; 1346 return false;
1347 } else if (IS_CHERRYVIEW(dev_priv->dev)) {
1348 if ((val & DP_PIPE_MASK_CHV) != DP_PIPE_SELECT_CHV(pipe))
1349 return false;
1256 } else { 1350 } else {
1257 if ((val & DP_PIPE_MASK) != (pipe << 30)) 1351 if ((val & DP_PIPE_MASK) != (pipe << 30))
1258 return false; 1352 return false;
@@ -1269,6 +1363,9 @@ static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
1269 if (HAS_PCH_CPT(dev_priv->dev)) { 1363 if (HAS_PCH_CPT(dev_priv->dev)) {
1270 if ((val & SDVO_PIPE_SEL_MASK_CPT) != SDVO_PIPE_SEL_CPT(pipe)) 1364 if ((val & SDVO_PIPE_SEL_MASK_CPT) != SDVO_PIPE_SEL_CPT(pipe))
1271 return false; 1365 return false;
1366 } else if (IS_CHERRYVIEW(dev_priv->dev)) {
1367 if ((val & SDVO_PIPE_SEL_MASK_CHV) != SDVO_PIPE_SEL_CHV(pipe))
1368 return false;
1272 } else { 1369 } else {
1273 if ((val & SDVO_PIPE_SEL_MASK) != SDVO_PIPE_SEL(pipe)) 1370 if ((val & SDVO_PIPE_SEL_MASK) != SDVO_PIPE_SEL(pipe))
1274 return false; 1371 return false;
@@ -1367,7 +1464,17 @@ static void intel_init_dpio(struct drm_device *dev)
1367 if (!IS_VALLEYVIEW(dev)) 1464 if (!IS_VALLEYVIEW(dev))
1368 return; 1465 return;
1369 1466
1370 DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO; 1467 /*
1468 * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C),
1469 * CHV x1 PHY (DP/HDMI D)
1470 * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C)
1471 */
1472 if (IS_CHERRYVIEW(dev)) {
1473 DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2;
1474 DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO;
1475 } else {
1476 DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
1477 }
1371} 1478}
1372 1479
1373static void intel_reset_dpio(struct drm_device *dev) 1480static void intel_reset_dpio(struct drm_device *dev)
@@ -1377,25 +1484,48 @@ static void intel_reset_dpio(struct drm_device *dev)
1377 if (!IS_VALLEYVIEW(dev)) 1484 if (!IS_VALLEYVIEW(dev))
1378 return; 1485 return;
1379 1486
1380 /* 1487 if (IS_CHERRYVIEW(dev)) {
1381 * Enable the CRI clock source so we can get at the display and the 1488 enum dpio_phy phy;
1382 * reference clock for VGA hotplug / manual detection. 1489 u32 val;
1383 */
1384 I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
1385 DPLL_REFA_CLK_ENABLE_VLV |
1386 DPLL_INTEGRATED_CRI_CLK_VLV);
1387 1490
1388 /* 1491 for (phy = DPIO_PHY0; phy < I915_NUM_PHYS_VLV; phy++) {
1389 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx - 1492 /* Poll for phypwrgood signal */
1390 * 6. De-assert cmn_reset/side_reset. Same as VLV X0. 1493 if (wait_for(I915_READ(DISPLAY_PHY_STATUS) &
1391 * a. GUnit 0x2110 bit[0] set to 1 (def 0) 1494 PHY_POWERGOOD(phy), 1))
1392 * b. The other bits such as sfr settings / modesel may all be set 1495 DRM_ERROR("Display PHY %d is not power up\n", phy);
1393 * to 0. 1496
1394 * 1497 /*
1395 * This should only be done on init and resume from S3 with both 1498 * Deassert common lane reset for PHY.
1396 * PLLs disabled, or we risk losing DPIO and PLL synchronization. 1499 *
1397 */ 1500 * This should only be done on init and resume from S3
1398 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST); 1501 * with both PLLs disabled, or we risk losing DPIO and
1502 * PLL synchronization.
1503 */
1504 val = I915_READ(DISPLAY_PHY_CONTROL);
1505 I915_WRITE(DISPLAY_PHY_CONTROL,
1506 PHY_COM_LANE_RESET_DEASSERT(phy, val));
1507 }
1508
1509 } else {
1510 /*
1511 * If DPIO has already been reset, e.g. by BIOS, just skip all
1512 * this.
1513 */
1514 if (I915_READ(DPIO_CTL) & DPIO_CMNRST)
1515 return;
1516
1517 /*
1518 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
1519 * Need to assert and de-assert PHY SB reset by gating the
1520 * common lane power, then un-gating it.
1521 * Simply ungating isn't enough to reset the PHY enough to get
1522 * ports and lanes running.
1523 */
1524 __vlv_set_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC,
1525 false);
1526 __vlv_set_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC,
1527 true);
1528 }
1399} 1529}
1400 1530
1401static void vlv_enable_pll(struct intel_crtc *crtc) 1531static void vlv_enable_pll(struct intel_crtc *crtc)
@@ -1436,6 +1566,44 @@ static void vlv_enable_pll(struct intel_crtc *crtc)
1436 udelay(150); /* wait for warmup */ 1566 udelay(150); /* wait for warmup */
1437} 1567}
1438 1568
1569static void chv_enable_pll(struct intel_crtc *crtc)
1570{
1571 struct drm_device *dev = crtc->base.dev;
1572 struct drm_i915_private *dev_priv = dev->dev_private;
1573 int pipe = crtc->pipe;
1574 enum dpio_channel port = vlv_pipe_to_channel(pipe);
1575 u32 tmp;
1576
1577 assert_pipe_disabled(dev_priv, crtc->pipe);
1578
1579 BUG_ON(!IS_CHERRYVIEW(dev_priv->dev));
1580
1581 mutex_lock(&dev_priv->dpio_lock);
1582
1583 /* Enable back the 10bit clock to display controller */
1584 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1585 tmp |= DPIO_DCLKP_EN;
1586 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
1587
1588 /*
1589 * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
1590 */
1591 udelay(1);
1592
1593 /* Enable PLL */
1594 I915_WRITE(DPLL(pipe), crtc->config.dpll_hw_state.dpll);
1595
1596 /* Check PLL is locked */
1597 if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
1598 DRM_ERROR("PLL %d failed to lock\n", pipe);
1599
1600 /* not sure when this should be written */
1601 I915_WRITE(DPLL_MD(pipe), crtc->config.dpll_hw_state.dpll_md);
1602 POSTING_READ(DPLL_MD(pipe));
1603
1604 mutex_unlock(&dev_priv->dpio_lock);
1605}
1606
1439static void i9xx_enable_pll(struct intel_crtc *crtc) 1607static void i9xx_enable_pll(struct intel_crtc *crtc)
1440{ 1608{
1441 struct drm_device *dev = crtc->base.dev; 1609 struct drm_device *dev = crtc->base.dev;
@@ -1519,45 +1687,92 @@ static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1519 val = DPLL_INTEGRATED_CRI_CLK_VLV | DPLL_REFA_CLK_ENABLE_VLV; 1687 val = DPLL_INTEGRATED_CRI_CLK_VLV | DPLL_REFA_CLK_ENABLE_VLV;
1520 I915_WRITE(DPLL(pipe), val); 1688 I915_WRITE(DPLL(pipe), val);
1521 POSTING_READ(DPLL(pipe)); 1689 POSTING_READ(DPLL(pipe));
1690
1691}
1692
1693static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1694{
1695 enum dpio_channel port = vlv_pipe_to_channel(pipe);
1696 u32 val;
1697
1698 /* Make sure the pipe isn't still relying on us */
1699 assert_pipe_disabled(dev_priv, pipe);
1700
1701 /* Set PLL en = 0 */
1702 val = DPLL_SSC_REF_CLOCK_CHV;
1703 if (pipe != PIPE_A)
1704 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1705 I915_WRITE(DPLL(pipe), val);
1706 POSTING_READ(DPLL(pipe));
1707
1708 mutex_lock(&dev_priv->dpio_lock);
1709
1710 /* Disable 10bit clock to display controller */
1711 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1712 val &= ~DPIO_DCLKP_EN;
1713 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
1714
1715 mutex_unlock(&dev_priv->dpio_lock);
1522} 1716}
1523 1717
1524void vlv_wait_port_ready(struct drm_i915_private *dev_priv, 1718void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
1525 struct intel_digital_port *dport) 1719 struct intel_digital_port *dport)
1526{ 1720{
1527 u32 port_mask; 1721 u32 port_mask;
1722 int dpll_reg;
1528 1723
1529 switch (dport->port) { 1724 switch (dport->port) {
1530 case PORT_B: 1725 case PORT_B:
1531 port_mask = DPLL_PORTB_READY_MASK; 1726 port_mask = DPLL_PORTB_READY_MASK;
1727 dpll_reg = DPLL(0);
1532 break; 1728 break;
1533 case PORT_C: 1729 case PORT_C:
1534 port_mask = DPLL_PORTC_READY_MASK; 1730 port_mask = DPLL_PORTC_READY_MASK;
1731 dpll_reg = DPLL(0);
1732 break;
1733 case PORT_D:
1734 port_mask = DPLL_PORTD_READY_MASK;
1735 dpll_reg = DPIO_PHY_STATUS;
1535 break; 1736 break;
1536 default: 1737 default:
1537 BUG(); 1738 BUG();
1538 } 1739 }
1539 1740
1540 if (wait_for((I915_READ(DPLL(0)) & port_mask) == 0, 1000)) 1741 if (wait_for((I915_READ(dpll_reg) & port_mask) == 0, 1000))
1541 WARN(1, "timed out waiting for port %c ready: 0x%08x\n", 1742 WARN(1, "timed out waiting for port %c ready: 0x%08x\n",
1542 port_name(dport->port), I915_READ(DPLL(0))); 1743 port_name(dport->port), I915_READ(dpll_reg));
1744}
1745
1746static void intel_prepare_shared_dpll(struct intel_crtc *crtc)
1747{
1748 struct drm_device *dev = crtc->base.dev;
1749 struct drm_i915_private *dev_priv = dev->dev_private;
1750 struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
1751
1752 WARN_ON(!pll->refcount);
1753 if (pll->active == 0) {
1754 DRM_DEBUG_DRIVER("setting up %s\n", pll->name);
1755 WARN_ON(pll->on);
1756 assert_shared_dpll_disabled(dev_priv, pll);
1757
1758 pll->mode_set(dev_priv, pll);
1759 }
1543} 1760}
1544 1761
1545/** 1762/**
1546 * ironlake_enable_shared_dpll - enable PCH PLL 1763 * intel_enable_shared_dpll - enable PCH PLL
1547 * @dev_priv: i915 private structure 1764 * @dev_priv: i915 private structure
1548 * @pipe: pipe PLL to enable 1765 * @pipe: pipe PLL to enable
1549 * 1766 *
1550 * The PCH PLL needs to be enabled before the PCH transcoder, since it 1767 * The PCH PLL needs to be enabled before the PCH transcoder, since it
1551 * drives the transcoder clock. 1768 * drives the transcoder clock.
1552 */ 1769 */
1553static void ironlake_enable_shared_dpll(struct intel_crtc *crtc) 1770static void intel_enable_shared_dpll(struct intel_crtc *crtc)
1554{ 1771{
1555 struct drm_device *dev = crtc->base.dev; 1772 struct drm_device *dev = crtc->base.dev;
1556 struct drm_i915_private *dev_priv = dev->dev_private; 1773 struct drm_i915_private *dev_priv = dev->dev_private;
1557 struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc); 1774 struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
1558 1775
1559 /* PCH PLLs only available on ILK, SNB and IVB */
1560 BUG_ON(INTEL_INFO(dev)->gen < 5);
1561 if (WARN_ON(pll == NULL)) 1776 if (WARN_ON(pll == NULL))
1562 return; 1777 return;
1563 1778
@@ -1804,16 +2019,6 @@ static void intel_enable_pipe(struct intel_crtc *crtc)
1804 2019
1805 I915_WRITE(reg, val | PIPECONF_ENABLE); 2020 I915_WRITE(reg, val | PIPECONF_ENABLE);
1806 POSTING_READ(reg); 2021 POSTING_READ(reg);
1807
1808 /*
1809 * There's no guarantee the pipe will really start running now. It
1810 * depends on the Gen, the output type and the relative order between
1811 * pipe and plane enabling. Avoid waiting on HSW+ since it's not
1812 * necessary.
1813 * TODO: audit the previous gens.
1814 */
1815 if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
1816 intel_wait_for_vblank(dev_priv->dev, pipe);
1817} 2022}
1818 2023
1819/** 2024/**
@@ -1890,18 +2095,17 @@ static void intel_enable_primary_hw_plane(struct drm_i915_private *dev_priv,
1890 /* If the pipe isn't enabled, we can't pump pixels and may hang */ 2095 /* If the pipe isn't enabled, we can't pump pixels and may hang */
1891 assert_pipe_enabled(dev_priv, pipe); 2096 assert_pipe_enabled(dev_priv, pipe);
1892 2097
1893 WARN(intel_crtc->primary_enabled, "Primary plane already enabled\n"); 2098 if (intel_crtc->primary_enabled)
2099 return;
1894 2100
1895 intel_crtc->primary_enabled = true; 2101 intel_crtc->primary_enabled = true;
1896 2102
1897 reg = DSPCNTR(plane); 2103 reg = DSPCNTR(plane);
1898 val = I915_READ(reg); 2104 val = I915_READ(reg);
1899 if (val & DISPLAY_PLANE_ENABLE) 2105 WARN_ON(val & DISPLAY_PLANE_ENABLE);
1900 return;
1901 2106
1902 I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE); 2107 I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE);
1903 intel_flush_primary_plane(dev_priv, plane); 2108 intel_flush_primary_plane(dev_priv, plane);
1904 intel_wait_for_vblank(dev_priv->dev, pipe);
1905} 2109}
1906 2110
1907/** 2111/**
@@ -1920,18 +2124,17 @@ static void intel_disable_primary_hw_plane(struct drm_i915_private *dev_priv,
1920 int reg; 2124 int reg;
1921 u32 val; 2125 u32 val;
1922 2126
1923 WARN(!intel_crtc->primary_enabled, "Primary plane already disabled\n"); 2127 if (!intel_crtc->primary_enabled)
2128 return;
1924 2129
1925 intel_crtc->primary_enabled = false; 2130 intel_crtc->primary_enabled = false;
1926 2131
1927 reg = DSPCNTR(plane); 2132 reg = DSPCNTR(plane);
1928 val = I915_READ(reg); 2133 val = I915_READ(reg);
1929 if ((val & DISPLAY_PLANE_ENABLE) == 0) 2134 WARN_ON((val & DISPLAY_PLANE_ENABLE) == 0);
1930 return;
1931 2135
1932 I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE); 2136 I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE);
1933 intel_flush_primary_plane(dev_priv, plane); 2137 intel_flush_primary_plane(dev_priv, plane);
1934 intel_wait_for_vblank(dev_priv->dev, pipe);
1935} 2138}
1936 2139
1937static bool need_vtd_wa(struct drm_device *dev) 2140static bool need_vtd_wa(struct drm_device *dev)
@@ -1954,7 +2157,7 @@ static int intel_align_height(struct drm_device *dev, int height, bool tiled)
1954int 2157int
1955intel_pin_and_fence_fb_obj(struct drm_device *dev, 2158intel_pin_and_fence_fb_obj(struct drm_device *dev,
1956 struct drm_i915_gem_object *obj, 2159 struct drm_i915_gem_object *obj,
1957 struct intel_ring_buffer *pipelined) 2160 struct intel_engine_cs *pipelined)
1958{ 2161{
1959 struct drm_i915_private *dev_priv = dev->dev_private; 2162 struct drm_i915_private *dev_priv = dev->dev_private;
1960 u32 alignment; 2163 u32 alignment;
@@ -2134,7 +2337,7 @@ static void intel_find_plane_obj(struct intel_crtc *intel_crtc,
2134 * Failed to alloc the obj, check to see if we should share 2337 * Failed to alloc the obj, check to see if we should share
2135 * an fb with another CRTC instead 2338 * an fb with another CRTC instead
2136 */ 2339 */
2137 list_for_each_entry(c, &dev->mode_config.crtc_list, head) { 2340 for_each_crtc(dev, c) {
2138 i = to_intel_crtc(c); 2341 i = to_intel_crtc(c);
2139 2342
2140 if (c == &intel_crtc->base) 2343 if (c == &intel_crtc->base)
@@ -2152,9 +2355,9 @@ static void intel_find_plane_obj(struct intel_crtc *intel_crtc,
2152 } 2355 }
2153} 2356}
2154 2357
2155static int i9xx_update_primary_plane(struct drm_crtc *crtc, 2358static void i9xx_update_primary_plane(struct drm_crtc *crtc,
2156 struct drm_framebuffer *fb, 2359 struct drm_framebuffer *fb,
2157 int x, int y) 2360 int x, int y)
2158{ 2361{
2159 struct drm_device *dev = crtc->dev; 2362 struct drm_device *dev = crtc->dev;
2160 struct drm_i915_private *dev_priv = dev->dev_private; 2363 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2166,15 +2369,6 @@ static int i9xx_update_primary_plane(struct drm_crtc *crtc,
2166 u32 dspcntr; 2369 u32 dspcntr;
2167 u32 reg; 2370 u32 reg;
2168 2371
2169 switch (plane) {
2170 case 0:
2171 case 1:
2172 break;
2173 default:
2174 DRM_ERROR("Can't update plane %c in SAREA\n", plane_name(plane));
2175 return -EINVAL;
2176 }
2177
2178 intel_fb = to_intel_framebuffer(fb); 2372 intel_fb = to_intel_framebuffer(fb);
2179 obj = intel_fb->obj; 2373 obj = intel_fb->obj;
2180 2374
@@ -2249,13 +2443,11 @@ static int i9xx_update_primary_plane(struct drm_crtc *crtc,
2249 } else 2443 } else
2250 I915_WRITE(DSPADDR(plane), i915_gem_obj_ggtt_offset(obj) + linear_offset); 2444 I915_WRITE(DSPADDR(plane), i915_gem_obj_ggtt_offset(obj) + linear_offset);
2251 POSTING_READ(reg); 2445 POSTING_READ(reg);
2252
2253 return 0;
2254} 2446}
2255 2447
2256static int ironlake_update_primary_plane(struct drm_crtc *crtc, 2448static void ironlake_update_primary_plane(struct drm_crtc *crtc,
2257 struct drm_framebuffer *fb, 2449 struct drm_framebuffer *fb,
2258 int x, int y) 2450 int x, int y)
2259{ 2451{
2260 struct drm_device *dev = crtc->dev; 2452 struct drm_device *dev = crtc->dev;
2261 struct drm_i915_private *dev_priv = dev->dev_private; 2453 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2267,16 +2459,6 @@ static int ironlake_update_primary_plane(struct drm_crtc *crtc,
2267 u32 dspcntr; 2459 u32 dspcntr;
2268 u32 reg; 2460 u32 reg;
2269 2461
2270 switch (plane) {
2271 case 0:
2272 case 1:
2273 case 2:
2274 break;
2275 default:
2276 DRM_ERROR("Can't update plane %c in SAREA\n", plane_name(plane));
2277 return -EINVAL;
2278 }
2279
2280 intel_fb = to_intel_framebuffer(fb); 2462 intel_fb = to_intel_framebuffer(fb);
2281 obj = intel_fb->obj; 2463 obj = intel_fb->obj;
2282 2464
@@ -2343,8 +2525,6 @@ static int ironlake_update_primary_plane(struct drm_crtc *crtc,
2343 I915_WRITE(DSPLINOFF(plane), linear_offset); 2525 I915_WRITE(DSPLINOFF(plane), linear_offset);
2344 } 2526 }
2345 POSTING_READ(reg); 2527 POSTING_READ(reg);
2346
2347 return 0;
2348} 2528}
2349 2529
2350/* Assume fb object is pinned & idle & fenced and just update base pointers */ 2530/* Assume fb object is pinned & idle & fenced and just update base pointers */
@@ -2359,7 +2539,9 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
2359 dev_priv->display.disable_fbc(dev); 2539 dev_priv->display.disable_fbc(dev);
2360 intel_increase_pllclock(crtc); 2540 intel_increase_pllclock(crtc);
2361 2541
2362 return dev_priv->display.update_primary_plane(crtc, fb, x, y); 2542 dev_priv->display.update_primary_plane(crtc, fb, x, y);
2543
2544 return 0;
2363} 2545}
2364 2546
2365void intel_display_handle_reset(struct drm_device *dev) 2547void intel_display_handle_reset(struct drm_device *dev)
@@ -2381,7 +2563,7 @@ void intel_display_handle_reset(struct drm_device *dev)
2381 * pending_flip_queue really got woken up. 2563 * pending_flip_queue really got woken up.
2382 */ 2564 */
2383 2565
2384 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 2566 for_each_crtc(dev, crtc) {
2385 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2567 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2386 enum plane plane = intel_crtc->plane; 2568 enum plane plane = intel_crtc->plane;
2387 2569
@@ -2389,10 +2571,10 @@ void intel_display_handle_reset(struct drm_device *dev)
2389 intel_finish_page_flip_plane(dev, plane); 2571 intel_finish_page_flip_plane(dev, plane);
2390 } 2572 }
2391 2573
2392 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 2574 for_each_crtc(dev, crtc) {
2393 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2575 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2394 2576
2395 mutex_lock(&crtc->mutex); 2577 drm_modeset_lock(&crtc->mutex, NULL);
2396 /* 2578 /*
2397 * FIXME: Once we have proper support for primary planes (and 2579 * FIXME: Once we have proper support for primary planes (and
2398 * disabling them without disabling the entire crtc) allow again 2580 * disabling them without disabling the entire crtc) allow again
@@ -2403,7 +2585,7 @@ void intel_display_handle_reset(struct drm_device *dev)
2403 crtc->primary->fb, 2585 crtc->primary->fb,
2404 crtc->x, 2586 crtc->x,
2405 crtc->y); 2587 crtc->y);
2406 mutex_unlock(&crtc->mutex); 2588 drm_modeset_unlock(&crtc->mutex);
2407 } 2589 }
2408} 2590}
2409 2591
@@ -2518,14 +2700,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2518 intel_crtc->config.pipe_src_h = adjusted_mode->crtc_vdisplay; 2700 intel_crtc->config.pipe_src_h = adjusted_mode->crtc_vdisplay;
2519 } 2701 }
2520 2702
2521 ret = dev_priv->display.update_primary_plane(crtc, fb, x, y); 2703 dev_priv->display.update_primary_plane(crtc, fb, x, y);
2522 if (ret) {
2523 mutex_lock(&dev->struct_mutex);
2524 intel_unpin_fb_obj(to_intel_framebuffer(fb)->obj);
2525 mutex_unlock(&dev->struct_mutex);
2526 DRM_ERROR("failed to update base address\n");
2527 return ret;
2528 }
2529 2704
2530 old_fb = crtc->primary->fb; 2705 old_fb = crtc->primary->fb;
2531 crtc->primary->fb = fb; 2706 crtc->primary->fb = fb;
@@ -2628,12 +2803,10 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
2628 struct drm_i915_private *dev_priv = dev->dev_private; 2803 struct drm_i915_private *dev_priv = dev->dev_private;
2629 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2804 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2630 int pipe = intel_crtc->pipe; 2805 int pipe = intel_crtc->pipe;
2631 int plane = intel_crtc->plane;
2632 u32 reg, temp, tries; 2806 u32 reg, temp, tries;
2633 2807
2634 /* FDI needs bits from pipe & plane first */ 2808 /* FDI needs bits from pipe first */
2635 assert_pipe_enabled(dev_priv, pipe); 2809 assert_pipe_enabled(dev_priv, pipe);
2636 assert_plane_enabled(dev_priv, plane);
2637 2810
2638 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 2811 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2639 for train result */ 2812 for train result */
@@ -3064,9 +3237,8 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc)
3064 udelay(100); 3237 udelay(100);
3065 3238
3066 /* Ironlake workaround, disable clock pointer after downing FDI */ 3239 /* Ironlake workaround, disable clock pointer after downing FDI */
3067 if (HAS_PCH_IBX(dev)) { 3240 if (HAS_PCH_IBX(dev))
3068 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); 3241 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
3069 }
3070 3242
3071 /* still set train pattern 1 */ 3243 /* still set train pattern 1 */
3072 reg = FDI_TX_CTL(pipe); 3244 reg = FDI_TX_CTL(pipe);
@@ -3104,7 +3276,7 @@ bool intel_has_pending_fb_unpin(struct drm_device *dev)
3104 * cannot claim and pin a new fb without at least acquring the 3276 * cannot claim and pin a new fb without at least acquring the
3105 * struct_mutex and so serialising with us. 3277 * struct_mutex and so serialising with us.
3106 */ 3278 */
3107 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) { 3279 for_each_intel_crtc(dev, crtc) {
3108 if (atomic_read(&crtc->unpin_work_count) == 0) 3280 if (atomic_read(&crtc->unpin_work_count) == 0)
3109 continue; 3281 continue;
3110 3282
@@ -3117,7 +3289,7 @@ bool intel_has_pending_fb_unpin(struct drm_device *dev)
3117 return false; 3289 return false;
3118} 3290}
3119 3291
3120static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) 3292void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
3121{ 3293{
3122 struct drm_device *dev = crtc->dev; 3294 struct drm_device *dev = crtc->dev;
3123 struct drm_i915_private *dev_priv = dev->dev_private; 3295 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3127,8 +3299,9 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
3127 3299
3128 WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue)); 3300 WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue));
3129 3301
3130 wait_event(dev_priv->pending_flip_queue, 3302 WARN_ON(wait_event_timeout(dev_priv->pending_flip_queue,
3131 !intel_crtc_has_pending_flip(crtc)); 3303 !intel_crtc_has_pending_flip(crtc),
3304 60*HZ) == 0);
3132 3305
3133 mutex_lock(&dev->struct_mutex); 3306 mutex_lock(&dev->struct_mutex);
3134 intel_finish_fb(crtc->primary->fb); 3307 intel_finish_fb(crtc->primary->fb);
@@ -3341,7 +3514,7 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
3341 * Note that enable_shared_dpll tries to do the right thing, but 3514 * Note that enable_shared_dpll tries to do the right thing, but
3342 * get_shared_dpll unconditionally resets the pll - we need that to have 3515 * get_shared_dpll unconditionally resets the pll - we need that to have
3343 * the right LVDS enable sequence. */ 3516 * the right LVDS enable sequence. */
3344 ironlake_enable_shared_dpll(intel_crtc); 3517 intel_enable_shared_dpll(intel_crtc);
3345 3518
3346 /* set transcoder timing, panel must allow it */ 3519 /* set transcoder timing, panel must allow it */
3347 assert_panel_unlocked(dev_priv, pipe); 3520 assert_panel_unlocked(dev_priv, pipe);
@@ -3445,6 +3618,8 @@ static struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc)
3445 DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n", 3618 DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
3446 crtc->base.base.id, pll->name); 3619 crtc->base.base.id, pll->name);
3447 3620
3621 WARN_ON(pll->refcount);
3622
3448 goto found; 3623 goto found;
3449 } 3624 }
3450 3625
@@ -3478,20 +3653,13 @@ static struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc)
3478 return NULL; 3653 return NULL;
3479 3654
3480found: 3655found:
3656 if (pll->refcount == 0)
3657 pll->hw_state = crtc->config.dpll_hw_state;
3658
3481 crtc->config.shared_dpll = i; 3659 crtc->config.shared_dpll = i;
3482 DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->name, 3660 DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->name,
3483 pipe_name(crtc->pipe)); 3661 pipe_name(crtc->pipe));
3484 3662
3485 if (pll->active == 0) {
3486 memcpy(&pll->hw_state, &crtc->config.dpll_hw_state,
3487 sizeof(pll->hw_state));
3488
3489 DRM_DEBUG_DRIVER("setting up %s\n", pll->name);
3490 WARN_ON(pll->on);
3491 assert_shared_dpll_disabled(dev_priv, pll);
3492
3493 pll->mode_set(dev_priv, pll);
3494 }
3495 pll->refcount++; 3663 pll->refcount++;
3496 3664
3497 return pll; 3665 return pll;
@@ -3562,17 +3730,17 @@ static void intel_disable_planes(struct drm_crtc *crtc)
3562 3730
3563void hsw_enable_ips(struct intel_crtc *crtc) 3731void hsw_enable_ips(struct intel_crtc *crtc)
3564{ 3732{
3565 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 3733 struct drm_device *dev = crtc->base.dev;
3734 struct drm_i915_private *dev_priv = dev->dev_private;
3566 3735
3567 if (!crtc->config.ips_enabled) 3736 if (!crtc->config.ips_enabled)
3568 return; 3737 return;
3569 3738
3570 /* We can only enable IPS after we enable a plane and wait for a vblank. 3739 /* We can only enable IPS after we enable a plane and wait for a vblank */
3571 * We guarantee that the plane is enabled by calling intel_enable_ips 3740 intel_wait_for_vblank(dev, crtc->pipe);
3572 * only after intel_enable_plane. And intel_enable_plane already waits 3741
3573 * for a vblank, so all we need to do here is to enable the IPS bit. */
3574 assert_plane_enabled(dev_priv, crtc->plane); 3742 assert_plane_enabled(dev_priv, crtc->plane);
3575 if (IS_BROADWELL(crtc->base.dev)) { 3743 if (IS_BROADWELL(dev)) {
3576 mutex_lock(&dev_priv->rps.hw_lock); 3744 mutex_lock(&dev_priv->rps.hw_lock);
3577 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0xc0000000)); 3745 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0xc0000000));
3578 mutex_unlock(&dev_priv->rps.hw_lock); 3746 mutex_unlock(&dev_priv->rps.hw_lock);
@@ -3602,10 +3770,13 @@ void hsw_disable_ips(struct intel_crtc *crtc)
3602 return; 3770 return;
3603 3771
3604 assert_plane_enabled(dev_priv, crtc->plane); 3772 assert_plane_enabled(dev_priv, crtc->plane);
3605 if (IS_BROADWELL(crtc->base.dev)) { 3773 if (IS_BROADWELL(dev)) {
3606 mutex_lock(&dev_priv->rps.hw_lock); 3774 mutex_lock(&dev_priv->rps.hw_lock);
3607 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0)); 3775 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
3608 mutex_unlock(&dev_priv->rps.hw_lock); 3776 mutex_unlock(&dev_priv->rps.hw_lock);
3777 /* wait for pcode to finish disabling IPS, which may take up to 42ms */
3778 if (wait_for((I915_READ(IPS_CTL) & IPS_ENABLE) == 0, 42))
3779 DRM_ERROR("Timed out waiting for IPS disable\n");
3609 } else { 3780 } else {
3610 I915_WRITE(IPS_CTL, 0); 3781 I915_WRITE(IPS_CTL, 0);
3611 POSTING_READ(IPS_CTL); 3782 POSTING_READ(IPS_CTL);
@@ -3662,6 +3833,94 @@ static void intel_crtc_load_lut(struct drm_crtc *crtc)
3662 hsw_enable_ips(intel_crtc); 3833 hsw_enable_ips(intel_crtc);
3663} 3834}
3664 3835
3836static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
3837{
3838 if (!enable && intel_crtc->overlay) {
3839 struct drm_device *dev = intel_crtc->base.dev;
3840 struct drm_i915_private *dev_priv = dev->dev_private;
3841
3842 mutex_lock(&dev->struct_mutex);
3843 dev_priv->mm.interruptible = false;
3844 (void) intel_overlay_switch_off(intel_crtc->overlay);
3845 dev_priv->mm.interruptible = true;
3846 mutex_unlock(&dev->struct_mutex);
3847 }
3848
3849 /* Let userspace switch the overlay on again. In most cases userspace
3850 * has to recompute where to put it anyway.
3851 */
3852}
3853
3854/**
3855 * i9xx_fixup_plane - ugly workaround for G45 to fire up the hardware
3856 * cursor plane briefly if not already running after enabling the display
3857 * plane.
3858 * This workaround avoids occasional blank screens when self refresh is
3859 * enabled.
3860 */
3861static void
3862g4x_fixup_plane(struct drm_i915_private *dev_priv, enum pipe pipe)
3863{
3864 u32 cntl = I915_READ(CURCNTR(pipe));
3865
3866 if ((cntl & CURSOR_MODE) == 0) {
3867 u32 fw_bcl_self = I915_READ(FW_BLC_SELF);
3868
3869 I915_WRITE(FW_BLC_SELF, fw_bcl_self & ~FW_BLC_SELF_EN);
3870 I915_WRITE(CURCNTR(pipe), CURSOR_MODE_64_ARGB_AX);
3871 intel_wait_for_vblank(dev_priv->dev, pipe);
3872 I915_WRITE(CURCNTR(pipe), cntl);
3873 I915_WRITE(CURBASE(pipe), I915_READ(CURBASE(pipe)));
3874 I915_WRITE(FW_BLC_SELF, fw_bcl_self);
3875 }
3876}
3877
3878static void intel_crtc_enable_planes(struct drm_crtc *crtc)
3879{
3880 struct drm_device *dev = crtc->dev;
3881 struct drm_i915_private *dev_priv = dev->dev_private;
3882 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3883 int pipe = intel_crtc->pipe;
3884 int plane = intel_crtc->plane;
3885
3886 intel_enable_primary_hw_plane(dev_priv, plane, pipe);
3887 intel_enable_planes(crtc);
3888 /* The fixup needs to happen before cursor is enabled */
3889 if (IS_G4X(dev))
3890 g4x_fixup_plane(dev_priv, pipe);
3891 intel_crtc_update_cursor(crtc, true);
3892 intel_crtc_dpms_overlay(intel_crtc, true);
3893
3894 hsw_enable_ips(intel_crtc);
3895
3896 mutex_lock(&dev->struct_mutex);
3897 intel_update_fbc(dev);
3898 intel_edp_psr_update(dev);
3899 mutex_unlock(&dev->struct_mutex);
3900}
3901
3902static void intel_crtc_disable_planes(struct drm_crtc *crtc)
3903{
3904 struct drm_device *dev = crtc->dev;
3905 struct drm_i915_private *dev_priv = dev->dev_private;
3906 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3907 int pipe = intel_crtc->pipe;
3908 int plane = intel_crtc->plane;
3909
3910 intel_crtc_wait_for_pending_flips(crtc);
3911 drm_crtc_vblank_off(crtc);
3912
3913 if (dev_priv->fbc.plane == plane)
3914 intel_disable_fbc(dev);
3915
3916 hsw_disable_ips(intel_crtc);
3917
3918 intel_crtc_dpms_overlay(intel_crtc, false);
3919 intel_crtc_update_cursor(crtc, false);
3920 intel_disable_planes(crtc);
3921 intel_disable_primary_hw_plane(dev_priv, plane, pipe);
3922}
3923
3665static void ironlake_crtc_enable(struct drm_crtc *crtc) 3924static void ironlake_crtc_enable(struct drm_crtc *crtc)
3666{ 3925{
3667 struct drm_device *dev = crtc->dev; 3926 struct drm_device *dev = crtc->dev;
@@ -3669,13 +3928,35 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
3669 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3928 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3670 struct intel_encoder *encoder; 3929 struct intel_encoder *encoder;
3671 int pipe = intel_crtc->pipe; 3930 int pipe = intel_crtc->pipe;
3672 int plane = intel_crtc->plane; 3931 enum plane plane = intel_crtc->plane;
3673 3932
3674 WARN_ON(!crtc->enabled); 3933 WARN_ON(!crtc->enabled);
3675 3934
3676 if (intel_crtc->active) 3935 if (intel_crtc->active)
3677 return; 3936 return;
3678 3937
3938 if (intel_crtc->config.has_pch_encoder)
3939 intel_prepare_shared_dpll(intel_crtc);
3940
3941 if (intel_crtc->config.has_dp_encoder)
3942 intel_dp_set_m_n(intel_crtc);
3943
3944 intel_set_pipe_timings(intel_crtc);
3945
3946 if (intel_crtc->config.has_pch_encoder) {
3947 intel_cpu_transcoder_set_m_n(intel_crtc,
3948 &intel_crtc->config.fdi_m_n);
3949 }
3950
3951 ironlake_set_pipeconf(crtc);
3952
3953 /* Set up the display plane register */
3954 I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE);
3955 POSTING_READ(DSPCNTR(plane));
3956
3957 dev_priv->display.update_primary_plane(crtc, crtc->primary->fb,
3958 crtc->x, crtc->y);
3959
3679 intel_crtc->active = true; 3960 intel_crtc->active = true;
3680 3961
3681 intel_set_cpu_fifo_underrun_reporting(dev, pipe, true); 3962 intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
@@ -3705,32 +3986,19 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
3705 3986
3706 intel_update_watermarks(crtc); 3987 intel_update_watermarks(crtc);
3707 intel_enable_pipe(intel_crtc); 3988 intel_enable_pipe(intel_crtc);
3708 intel_enable_primary_hw_plane(dev_priv, plane, pipe);
3709 intel_enable_planes(crtc);
3710 intel_crtc_update_cursor(crtc, true);
3711 3989
3712 if (intel_crtc->config.has_pch_encoder) 3990 if (intel_crtc->config.has_pch_encoder)
3713 ironlake_pch_enable(crtc); 3991 ironlake_pch_enable(crtc);
3714 3992
3715 mutex_lock(&dev->struct_mutex);
3716 intel_update_fbc(dev);
3717 mutex_unlock(&dev->struct_mutex);
3718
3719 for_each_encoder_on_crtc(dev, crtc, encoder) 3993 for_each_encoder_on_crtc(dev, crtc, encoder)
3720 encoder->enable(encoder); 3994 encoder->enable(encoder);
3721 3995
3722 if (HAS_PCH_CPT(dev)) 3996 if (HAS_PCH_CPT(dev))
3723 cpt_verify_modeset(dev, intel_crtc->pipe); 3997 cpt_verify_modeset(dev, intel_crtc->pipe);
3724 3998
3725 /* 3999 intel_crtc_enable_planes(crtc);
3726 * There seems to be a race in PCH platform hw (at least on some 4000
3727 * outputs) where an enabled pipe still completes any pageflip right 4001 drm_crtc_vblank_on(crtc);
3728 * away (as if the pipe is off) instead of waiting for vblank. As soon
3729 * as the first vblank happend, everything works as expected. Hence just
3730 * wait for one vblank before returning to avoid strange things
3731 * happening.
3732 */
3733 intel_wait_for_vblank(dev, intel_crtc->pipe);
3734} 4002}
3735 4003
3736/* IPS only exists on ULT machines and is tied to pipe A. */ 4004/* IPS only exists on ULT machines and is tied to pipe A. */
@@ -3739,47 +4007,6 @@ static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
3739 return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A; 4007 return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A;
3740} 4008}
3741 4009
3742static void haswell_crtc_enable_planes(struct drm_crtc *crtc)
3743{
3744 struct drm_device *dev = crtc->dev;
3745 struct drm_i915_private *dev_priv = dev->dev_private;
3746 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3747 int pipe = intel_crtc->pipe;
3748 int plane = intel_crtc->plane;
3749
3750 intel_enable_primary_hw_plane(dev_priv, plane, pipe);
3751 intel_enable_planes(crtc);
3752 intel_crtc_update_cursor(crtc, true);
3753
3754 hsw_enable_ips(intel_crtc);
3755
3756 mutex_lock(&dev->struct_mutex);
3757 intel_update_fbc(dev);
3758 mutex_unlock(&dev->struct_mutex);
3759}
3760
3761static void haswell_crtc_disable_planes(struct drm_crtc *crtc)
3762{
3763 struct drm_device *dev = crtc->dev;
3764 struct drm_i915_private *dev_priv = dev->dev_private;
3765 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3766 int pipe = intel_crtc->pipe;
3767 int plane = intel_crtc->plane;
3768
3769 intel_crtc_wait_for_pending_flips(crtc);
3770 drm_vblank_off(dev, pipe);
3771
3772 /* FBC must be disabled before disabling the plane on HSW. */
3773 if (dev_priv->fbc.plane == plane)
3774 intel_disable_fbc(dev);
3775
3776 hsw_disable_ips(intel_crtc);
3777
3778 intel_crtc_update_cursor(crtc, false);
3779 intel_disable_planes(crtc);
3780 intel_disable_primary_hw_plane(dev_priv, plane, pipe);
3781}
3782
3783/* 4010/*
3784 * This implements the workaround described in the "notes" section of the mode 4011 * This implements the workaround described in the "notes" section of the mode
3785 * set sequence documentation. When going from no pipes or single pipe to 4012 * set sequence documentation. When going from no pipes or single pipe to
@@ -3793,7 +4020,7 @@ static void haswell_mode_set_planes_workaround(struct intel_crtc *crtc)
3793 4020
3794 /* We want to get the other_active_crtc only if there's only 1 other 4021 /* We want to get the other_active_crtc only if there's only 1 other
3795 * active crtc. */ 4022 * active crtc. */
3796 list_for_each_entry(crtc_it, &dev->mode_config.crtc_list, base.head) { 4023 for_each_intel_crtc(dev, crtc_it) {
3797 if (!crtc_it->active || crtc_it == crtc) 4024 if (!crtc_it->active || crtc_it == crtc)
3798 continue; 4025 continue;
3799 4026
@@ -3816,12 +4043,34 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
3816 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4043 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3817 struct intel_encoder *encoder; 4044 struct intel_encoder *encoder;
3818 int pipe = intel_crtc->pipe; 4045 int pipe = intel_crtc->pipe;
4046 enum plane plane = intel_crtc->plane;
3819 4047
3820 WARN_ON(!crtc->enabled); 4048 WARN_ON(!crtc->enabled);
3821 4049
3822 if (intel_crtc->active) 4050 if (intel_crtc->active)
3823 return; 4051 return;
3824 4052
4053 if (intel_crtc->config.has_dp_encoder)
4054 intel_dp_set_m_n(intel_crtc);
4055
4056 intel_set_pipe_timings(intel_crtc);
4057
4058 if (intel_crtc->config.has_pch_encoder) {
4059 intel_cpu_transcoder_set_m_n(intel_crtc,
4060 &intel_crtc->config.fdi_m_n);
4061 }
4062
4063 haswell_set_pipeconf(crtc);
4064
4065 intel_set_pipe_csc(crtc);
4066
4067 /* Set up the display plane register */
4068 I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE | DISPPLANE_PIPE_CSC_ENABLE);
4069 POSTING_READ(DSPCNTR(plane));
4070
4071 dev_priv->display.update_primary_plane(crtc, crtc->primary->fb,
4072 crtc->x, crtc->y);
4073
3825 intel_crtc->active = true; 4074 intel_crtc->active = true;
3826 4075
3827 intel_set_cpu_fifo_underrun_reporting(dev, pipe, true); 4076 intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
@@ -3862,7 +4111,9 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
3862 /* If we change the relative order between pipe/planes enabling, we need 4111 /* If we change the relative order between pipe/planes enabling, we need
3863 * to change the workaround. */ 4112 * to change the workaround. */
3864 haswell_mode_set_planes_workaround(intel_crtc); 4113 haswell_mode_set_planes_workaround(intel_crtc);
3865 haswell_crtc_enable_planes(crtc); 4114 intel_crtc_enable_planes(crtc);
4115
4116 drm_crtc_vblank_on(crtc);
3866} 4117}
3867 4118
3868static void ironlake_pfit_disable(struct intel_crtc *crtc) 4119static void ironlake_pfit_disable(struct intel_crtc *crtc)
@@ -3887,26 +4138,16 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
3887 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4138 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3888 struct intel_encoder *encoder; 4139 struct intel_encoder *encoder;
3889 int pipe = intel_crtc->pipe; 4140 int pipe = intel_crtc->pipe;
3890 int plane = intel_crtc->plane;
3891 u32 reg, temp; 4141 u32 reg, temp;
3892 4142
3893
3894 if (!intel_crtc->active) 4143 if (!intel_crtc->active)
3895 return; 4144 return;
3896 4145
4146 intel_crtc_disable_planes(crtc);
4147
3897 for_each_encoder_on_crtc(dev, crtc, encoder) 4148 for_each_encoder_on_crtc(dev, crtc, encoder)
3898 encoder->disable(encoder); 4149 encoder->disable(encoder);
3899 4150
3900 intel_crtc_wait_for_pending_flips(crtc);
3901 drm_vblank_off(dev, pipe);
3902
3903 if (dev_priv->fbc.plane == plane)
3904 intel_disable_fbc(dev);
3905
3906 intel_crtc_update_cursor(crtc, false);
3907 intel_disable_planes(crtc);
3908 intel_disable_primary_hw_plane(dev_priv, plane, pipe);
3909
3910 if (intel_crtc->config.has_pch_encoder) 4151 if (intel_crtc->config.has_pch_encoder)
3911 intel_set_pch_fifo_underrun_reporting(dev, pipe, false); 4152 intel_set_pch_fifo_underrun_reporting(dev, pipe, false);
3912 4153
@@ -3950,6 +4191,7 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
3950 4191
3951 mutex_lock(&dev->struct_mutex); 4192 mutex_lock(&dev->struct_mutex);
3952 intel_update_fbc(dev); 4193 intel_update_fbc(dev);
4194 intel_edp_psr_update(dev);
3953 mutex_unlock(&dev->struct_mutex); 4195 mutex_unlock(&dev->struct_mutex);
3954} 4196}
3955 4197
@@ -3965,7 +4207,7 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
3965 if (!intel_crtc->active) 4207 if (!intel_crtc->active)
3966 return; 4208 return;
3967 4209
3968 haswell_crtc_disable_planes(crtc); 4210 intel_crtc_disable_planes(crtc);
3969 4211
3970 for_each_encoder_on_crtc(dev, crtc, encoder) { 4212 for_each_encoder_on_crtc(dev, crtc, encoder) {
3971 intel_opregion_notify_encoder(encoder, false); 4213 intel_opregion_notify_encoder(encoder, false);
@@ -3997,6 +4239,7 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
3997 4239
3998 mutex_lock(&dev->struct_mutex); 4240 mutex_lock(&dev->struct_mutex);
3999 intel_update_fbc(dev); 4241 intel_update_fbc(dev);
4242 intel_edp_psr_update(dev);
4000 mutex_unlock(&dev->struct_mutex); 4243 mutex_unlock(&dev->struct_mutex);
4001} 4244}
4002 4245
@@ -4011,48 +4254,6 @@ static void haswell_crtc_off(struct drm_crtc *crtc)
4011 intel_ddi_put_crtc_pll(crtc); 4254 intel_ddi_put_crtc_pll(crtc);
4012} 4255}
4013 4256
4014static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
4015{
4016 if (!enable && intel_crtc->overlay) {
4017 struct drm_device *dev = intel_crtc->base.dev;
4018 struct drm_i915_private *dev_priv = dev->dev_private;
4019
4020 mutex_lock(&dev->struct_mutex);
4021 dev_priv->mm.interruptible = false;
4022 (void) intel_overlay_switch_off(intel_crtc->overlay);
4023 dev_priv->mm.interruptible = true;
4024 mutex_unlock(&dev->struct_mutex);
4025 }
4026
4027 /* Let userspace switch the overlay on again. In most cases userspace
4028 * has to recompute where to put it anyway.
4029 */
4030}
4031
4032/**
4033 * i9xx_fixup_plane - ugly workaround for G45 to fire up the hardware
4034 * cursor plane briefly if not already running after enabling the display
4035 * plane.
4036 * This workaround avoids occasional blank screens when self refresh is
4037 * enabled.
4038 */
4039static void
4040g4x_fixup_plane(struct drm_i915_private *dev_priv, enum pipe pipe)
4041{
4042 u32 cntl = I915_READ(CURCNTR(pipe));
4043
4044 if ((cntl & CURSOR_MODE) == 0) {
4045 u32 fw_bcl_self = I915_READ(FW_BLC_SELF);
4046
4047 I915_WRITE(FW_BLC_SELF, fw_bcl_self & ~FW_BLC_SELF_EN);
4048 I915_WRITE(CURCNTR(pipe), CURSOR_MODE_64_ARGB_AX);
4049 intel_wait_for_vblank(dev_priv->dev, pipe);
4050 I915_WRITE(CURCNTR(pipe), cntl);
4051 I915_WRITE(CURBASE(pipe), I915_READ(CURBASE(pipe)));
4052 I915_WRITE(FW_BLC_SELF, fw_bcl_self);
4053 }
4054}
4055
4056static void i9xx_pfit_enable(struct intel_crtc *crtc) 4257static void i9xx_pfit_enable(struct intel_crtc *crtc)
4057{ 4258{
4058 struct drm_device *dev = crtc->base.dev; 4259 struct drm_device *dev = crtc->base.dev;
@@ -4164,7 +4365,7 @@ static void modeset_update_crtc_power_domains(struct drm_device *dev)
4164 * First get all needed power domains, then put all unneeded, to avoid 4365 * First get all needed power domains, then put all unneeded, to avoid
4165 * any unnecessary toggling of the power wells. 4366 * any unnecessary toggling of the power wells.
4166 */ 4367 */
4167 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) { 4368 for_each_intel_crtc(dev, crtc) {
4168 enum intel_display_power_domain domain; 4369 enum intel_display_power_domain domain;
4169 4370
4170 if (!crtc->base.enabled) 4371 if (!crtc->base.enabled)
@@ -4176,7 +4377,7 @@ static void modeset_update_crtc_power_domains(struct drm_device *dev)
4176 intel_display_power_get(dev_priv, domain); 4377 intel_display_power_get(dev_priv, domain);
4177 } 4378 }
4178 4379
4179 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) { 4380 for_each_intel_crtc(dev, crtc) {
4180 enum intel_display_power_domain domain; 4381 enum intel_display_power_domain domain;
4181 4382
4182 for_each_power_domain(domain, crtc->enabled_power_domains) 4383 for_each_power_domain(domain, crtc->enabled_power_domains)
@@ -4207,6 +4408,9 @@ static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
4207 struct drm_i915_private *dev_priv = dev->dev_private; 4408 struct drm_i915_private *dev_priv = dev->dev_private;
4208 u32 val, cmd; 4409 u32 val, cmd;
4209 4410
4411 WARN_ON(valleyview_cur_cdclk(dev_priv) != dev_priv->vlv_cdclk_freq);
4412 dev_priv->vlv_cdclk_freq = cdclk;
4413
4210 if (cdclk >= 320) /* jump to highest voltage for 400MHz too */ 4414 if (cdclk >= 320) /* jump to highest voltage for 400MHz too */
4211 cmd = 2; 4415 cmd = 2;
4212 else if (cdclk == 266) 4416 else if (cdclk == 266)
@@ -4261,7 +4465,7 @@ static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
4261 intel_i2c_reset(dev); 4465 intel_i2c_reset(dev);
4262} 4466}
4263 4467
4264static int valleyview_cur_cdclk(struct drm_i915_private *dev_priv) 4468int valleyview_cur_cdclk(struct drm_i915_private *dev_priv)
4265{ 4469{
4266 int cur_cdclk, vco; 4470 int cur_cdclk, vco;
4267 int divider; 4471 int divider;
@@ -4282,10 +4486,6 @@ static int valleyview_cur_cdclk(struct drm_i915_private *dev_priv)
4282static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv, 4486static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
4283 int max_pixclk) 4487 int max_pixclk)
4284{ 4488{
4285 int cur_cdclk;
4286
4287 cur_cdclk = valleyview_cur_cdclk(dev_priv);
4288
4289 /* 4489 /*
4290 * Really only a few cases to deal with, as only 4 CDclks are supported: 4490 * Really only a few cases to deal with, as only 4 CDclks are supported:
4291 * 200MHz 4491 * 200MHz
@@ -4311,8 +4511,7 @@ static int intel_mode_max_pixclk(struct drm_i915_private *dev_priv)
4311 struct intel_crtc *intel_crtc; 4511 struct intel_crtc *intel_crtc;
4312 int max_pixclk = 0; 4512 int max_pixclk = 0;
4313 4513
4314 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, 4514 for_each_intel_crtc(dev, intel_crtc) {
4315 base.head) {
4316 if (intel_crtc->new_enabled) 4515 if (intel_crtc->new_enabled)
4317 max_pixclk = max(max_pixclk, 4516 max_pixclk = max(max_pixclk,
4318 intel_crtc->new_config->adjusted_mode.crtc_clock); 4517 intel_crtc->new_config->adjusted_mode.crtc_clock);
@@ -4327,14 +4526,13 @@ static void valleyview_modeset_global_pipes(struct drm_device *dev,
4327 struct drm_i915_private *dev_priv = dev->dev_private; 4526 struct drm_i915_private *dev_priv = dev->dev_private;
4328 struct intel_crtc *intel_crtc; 4527 struct intel_crtc *intel_crtc;
4329 int max_pixclk = intel_mode_max_pixclk(dev_priv); 4528 int max_pixclk = intel_mode_max_pixclk(dev_priv);
4330 int cur_cdclk = valleyview_cur_cdclk(dev_priv);
4331 4529
4332 if (valleyview_calc_cdclk(dev_priv, max_pixclk) == cur_cdclk) 4530 if (valleyview_calc_cdclk(dev_priv, max_pixclk) ==
4531 dev_priv->vlv_cdclk_freq)
4333 return; 4532 return;
4334 4533
4335 /* disable/enable all currently active pipes while we change cdclk */ 4534 /* disable/enable all currently active pipes while we change cdclk */
4336 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, 4535 for_each_intel_crtc(dev, intel_crtc)
4337 base.head)
4338 if (intel_crtc->base.enabled) 4536 if (intel_crtc->base.enabled)
4339 *prepare_pipes |= (1 << intel_crtc->pipe); 4537 *prepare_pipes |= (1 << intel_crtc->pipe);
4340} 4538}
@@ -4343,10 +4541,9 @@ static void valleyview_modeset_global_resources(struct drm_device *dev)
4343{ 4541{
4344 struct drm_i915_private *dev_priv = dev->dev_private; 4542 struct drm_i915_private *dev_priv = dev->dev_private;
4345 int max_pixclk = intel_mode_max_pixclk(dev_priv); 4543 int max_pixclk = intel_mode_max_pixclk(dev_priv);
4346 int cur_cdclk = valleyview_cur_cdclk(dev_priv);
4347 int req_cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk); 4544 int req_cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk);
4348 4545
4349 if (req_cdclk != cur_cdclk) 4546 if (req_cdclk != dev_priv->vlv_cdclk_freq)
4350 valleyview_set_cdclk(dev, req_cdclk); 4547 valleyview_set_cdclk(dev, req_cdclk);
4351 modeset_update_crtc_power_domains(dev); 4548 modeset_update_crtc_power_domains(dev);
4352} 4549}
@@ -4360,22 +4557,55 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
4360 int pipe = intel_crtc->pipe; 4557 int pipe = intel_crtc->pipe;
4361 int plane = intel_crtc->plane; 4558 int plane = intel_crtc->plane;
4362 bool is_dsi; 4559 bool is_dsi;
4560 u32 dspcntr;
4363 4561
4364 WARN_ON(!crtc->enabled); 4562 WARN_ON(!crtc->enabled);
4365 4563
4366 if (intel_crtc->active) 4564 if (intel_crtc->active)
4367 return; 4565 return;
4368 4566
4567 vlv_prepare_pll(intel_crtc);
4568
4569 /* Set up the display plane register */
4570 dspcntr = DISPPLANE_GAMMA_ENABLE;
4571
4572 if (intel_crtc->config.has_dp_encoder)
4573 intel_dp_set_m_n(intel_crtc);
4574
4575 intel_set_pipe_timings(intel_crtc);
4576
4577 /* pipesrc and dspsize control the size that is scaled from,
4578 * which should always be the user's requested size.
4579 */
4580 I915_WRITE(DSPSIZE(plane),
4581 ((intel_crtc->config.pipe_src_h - 1) << 16) |
4582 (intel_crtc->config.pipe_src_w - 1));
4583 I915_WRITE(DSPPOS(plane), 0);
4584
4585 i9xx_set_pipeconf(intel_crtc);
4586
4587 I915_WRITE(DSPCNTR(plane), dspcntr);
4588 POSTING_READ(DSPCNTR(plane));
4589
4590 dev_priv->display.update_primary_plane(crtc, crtc->primary->fb,
4591 crtc->x, crtc->y);
4592
4369 intel_crtc->active = true; 4593 intel_crtc->active = true;
4370 4594
4595 intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
4596
4371 for_each_encoder_on_crtc(dev, crtc, encoder) 4597 for_each_encoder_on_crtc(dev, crtc, encoder)
4372 if (encoder->pre_pll_enable) 4598 if (encoder->pre_pll_enable)
4373 encoder->pre_pll_enable(encoder); 4599 encoder->pre_pll_enable(encoder);
4374 4600
4375 is_dsi = intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI); 4601 is_dsi = intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI);
4376 4602
4377 if (!is_dsi) 4603 if (!is_dsi) {
4378 vlv_enable_pll(intel_crtc); 4604 if (IS_CHERRYVIEW(dev))
4605 chv_enable_pll(intel_crtc);
4606 else
4607 vlv_enable_pll(intel_crtc);
4608 }
4379 4609
4380 for_each_encoder_on_crtc(dev, crtc, encoder) 4610 for_each_encoder_on_crtc(dev, crtc, encoder)
4381 if (encoder->pre_enable) 4611 if (encoder->pre_enable)
@@ -4387,15 +4617,25 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
4387 4617
4388 intel_update_watermarks(crtc); 4618 intel_update_watermarks(crtc);
4389 intel_enable_pipe(intel_crtc); 4619 intel_enable_pipe(intel_crtc);
4390 intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
4391 intel_enable_primary_hw_plane(dev_priv, plane, pipe);
4392 intel_enable_planes(crtc);
4393 intel_crtc_update_cursor(crtc, true);
4394
4395 intel_update_fbc(dev);
4396 4620
4397 for_each_encoder_on_crtc(dev, crtc, encoder) 4621 for_each_encoder_on_crtc(dev, crtc, encoder)
4398 encoder->enable(encoder); 4622 encoder->enable(encoder);
4623
4624 intel_crtc_enable_planes(crtc);
4625
4626 drm_crtc_vblank_on(crtc);
4627
4628 /* Underruns don't raise interrupts, so check manually. */
4629 i9xx_check_fifo_underruns(dev);
4630}
4631
4632static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
4633{
4634 struct drm_device *dev = crtc->base.dev;
4635 struct drm_i915_private *dev_priv = dev->dev_private;
4636
4637 I915_WRITE(FP0(crtc->pipe), crtc->config.dpll_hw_state.fp0);
4638 I915_WRITE(FP1(crtc->pipe), crtc->config.dpll_hw_state.fp1);
4399} 4639}
4400 4640
4401static void i9xx_crtc_enable(struct drm_crtc *crtc) 4641static void i9xx_crtc_enable(struct drm_crtc *crtc)
@@ -4406,14 +4646,49 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
4406 struct intel_encoder *encoder; 4646 struct intel_encoder *encoder;
4407 int pipe = intel_crtc->pipe; 4647 int pipe = intel_crtc->pipe;
4408 int plane = intel_crtc->plane; 4648 int plane = intel_crtc->plane;
4649 u32 dspcntr;
4409 4650
4410 WARN_ON(!crtc->enabled); 4651 WARN_ON(!crtc->enabled);
4411 4652
4412 if (intel_crtc->active) 4653 if (intel_crtc->active)
4413 return; 4654 return;
4414 4655
4656 i9xx_set_pll_dividers(intel_crtc);
4657
4658 /* Set up the display plane register */
4659 dspcntr = DISPPLANE_GAMMA_ENABLE;
4660
4661 if (pipe == 0)
4662 dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
4663 else
4664 dspcntr |= DISPPLANE_SEL_PIPE_B;
4665
4666 if (intel_crtc->config.has_dp_encoder)
4667 intel_dp_set_m_n(intel_crtc);
4668
4669 intel_set_pipe_timings(intel_crtc);
4670
4671 /* pipesrc and dspsize control the size that is scaled from,
4672 * which should always be the user's requested size.
4673 */
4674 I915_WRITE(DSPSIZE(plane),
4675 ((intel_crtc->config.pipe_src_h - 1) << 16) |
4676 (intel_crtc->config.pipe_src_w - 1));
4677 I915_WRITE(DSPPOS(plane), 0);
4678
4679 i9xx_set_pipeconf(intel_crtc);
4680
4681 I915_WRITE(DSPCNTR(plane), dspcntr);
4682 POSTING_READ(DSPCNTR(plane));
4683
4684 dev_priv->display.update_primary_plane(crtc, crtc->primary->fb,
4685 crtc->x, crtc->y);
4686
4415 intel_crtc->active = true; 4687 intel_crtc->active = true;
4416 4688
4689 if (!IS_GEN2(dev))
4690 intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
4691
4417 for_each_encoder_on_crtc(dev, crtc, encoder) 4692 for_each_encoder_on_crtc(dev, crtc, encoder)
4418 if (encoder->pre_enable) 4693 if (encoder->pre_enable)
4419 encoder->pre_enable(encoder); 4694 encoder->pre_enable(encoder);
@@ -4426,21 +4701,26 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
4426 4701
4427 intel_update_watermarks(crtc); 4702 intel_update_watermarks(crtc);
4428 intel_enable_pipe(intel_crtc); 4703 intel_enable_pipe(intel_crtc);
4429 intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
4430 intel_enable_primary_hw_plane(dev_priv, plane, pipe);
4431 intel_enable_planes(crtc);
4432 /* The fixup needs to happen before cursor is enabled */
4433 if (IS_G4X(dev))
4434 g4x_fixup_plane(dev_priv, pipe);
4435 intel_crtc_update_cursor(crtc, true);
4436
4437 /* Give the overlay scaler a chance to enable if it's on this pipe */
4438 intel_crtc_dpms_overlay(intel_crtc, true);
4439
4440 intel_update_fbc(dev);
4441 4704
4442 for_each_encoder_on_crtc(dev, crtc, encoder) 4705 for_each_encoder_on_crtc(dev, crtc, encoder)
4443 encoder->enable(encoder); 4706 encoder->enable(encoder);
4707
4708 intel_crtc_enable_planes(crtc);
4709
4710 /*
4711 * Gen2 reports pipe underruns whenever all planes are disabled.
4712 * So don't enable underrun reporting before at least some planes
4713 * are enabled.
4714 * FIXME: Need to fix the logic to work when we turn off all planes
4715 * but leave the pipe running.
4716 */
4717 if (IS_GEN2(dev))
4718 intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
4719
4720 drm_crtc_vblank_on(crtc);
4721
4722 /* Underruns don't raise interrupts, so check manually. */
4723 i9xx_check_fifo_underruns(dev);
4444} 4724}
4445 4725
4446static void i9xx_pfit_disable(struct intel_crtc *crtc) 4726static void i9xx_pfit_disable(struct intel_crtc *crtc)
@@ -4465,27 +4745,31 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
4465 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4745 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4466 struct intel_encoder *encoder; 4746 struct intel_encoder *encoder;
4467 int pipe = intel_crtc->pipe; 4747 int pipe = intel_crtc->pipe;
4468 int plane = intel_crtc->plane;
4469 4748
4470 if (!intel_crtc->active) 4749 if (!intel_crtc->active)
4471 return; 4750 return;
4472 4751
4473 for_each_encoder_on_crtc(dev, crtc, encoder) 4752 /*
4474 encoder->disable(encoder); 4753 * Gen2 reports pipe underruns whenever all planes are disabled.
4754 * So diasble underrun reporting before all the planes get disabled.
4755 * FIXME: Need to fix the logic to work when we turn off all planes
4756 * but leave the pipe running.
4757 */
4758 if (IS_GEN2(dev))
4759 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false);
4475 4760
4476 /* Give the overlay scaler a chance to disable if it's on this pipe */ 4761 intel_crtc_disable_planes(crtc);
4477 intel_crtc_wait_for_pending_flips(crtc);
4478 drm_vblank_off(dev, pipe);
4479 4762
4480 if (dev_priv->fbc.plane == plane) 4763 for_each_encoder_on_crtc(dev, crtc, encoder)
4481 intel_disable_fbc(dev); 4764 encoder->disable(encoder);
4482 4765
4483 intel_crtc_dpms_overlay(intel_crtc, false); 4766 /*
4484 intel_crtc_update_cursor(crtc, false); 4767 * On gen2 planes are double buffered but the pipe isn't, so we must
4485 intel_disable_planes(crtc); 4768 * wait for planes to fully turn off before disabling the pipe.
4486 intel_disable_primary_hw_plane(dev_priv, plane, pipe); 4769 */
4770 if (IS_GEN2(dev))
4771 intel_wait_for_vblank(dev, pipe);
4487 4772
4488 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false);
4489 intel_disable_pipe(dev_priv, pipe); 4773 intel_disable_pipe(dev_priv, pipe);
4490 4774
4491 i9xx_pfit_disable(intel_crtc); 4775 i9xx_pfit_disable(intel_crtc);
@@ -4494,15 +4778,25 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
4494 if (encoder->post_disable) 4778 if (encoder->post_disable)
4495 encoder->post_disable(encoder); 4779 encoder->post_disable(encoder);
4496 4780
4497 if (IS_VALLEYVIEW(dev) && !intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI)) 4781 if (!intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI)) {
4498 vlv_disable_pll(dev_priv, pipe); 4782 if (IS_CHERRYVIEW(dev))
4499 else if (!IS_VALLEYVIEW(dev)) 4783 chv_disable_pll(dev_priv, pipe);
4500 i9xx_disable_pll(dev_priv, pipe); 4784 else if (IS_VALLEYVIEW(dev))
4785 vlv_disable_pll(dev_priv, pipe);
4786 else
4787 i9xx_disable_pll(dev_priv, pipe);
4788 }
4789
4790 if (!IS_GEN2(dev))
4791 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false);
4501 4792
4502 intel_crtc->active = false; 4793 intel_crtc->active = false;
4503 intel_update_watermarks(crtc); 4794 intel_update_watermarks(crtc);
4504 4795
4796 mutex_lock(&dev->struct_mutex);
4505 intel_update_fbc(dev); 4797 intel_update_fbc(dev);
4798 intel_edp_psr_update(dev);
4799 mutex_unlock(&dev->struct_mutex);
4506} 4800}
4507 4801
4508static void i9xx_crtc_off(struct drm_crtc *crtc) 4802static void i9xx_crtc_off(struct drm_crtc *crtc)
@@ -4565,13 +4859,11 @@ static void intel_crtc_disable(struct drm_crtc *crtc)
4565 struct drm_device *dev = crtc->dev; 4859 struct drm_device *dev = crtc->dev;
4566 struct drm_connector *connector; 4860 struct drm_connector *connector;
4567 struct drm_i915_private *dev_priv = dev->dev_private; 4861 struct drm_i915_private *dev_priv = dev->dev_private;
4568 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4569 4862
4570 /* crtc should still be enabled when we disable it. */ 4863 /* crtc should still be enabled when we disable it. */
4571 WARN_ON(!crtc->enabled); 4864 WARN_ON(!crtc->enabled);
4572 4865
4573 dev_priv->display.crtc_disable(crtc); 4866 dev_priv->display.crtc_disable(crtc);
4574 intel_crtc->eld_vld = false;
4575 intel_crtc_update_sarea(crtc, false); 4867 intel_crtc_update_sarea(crtc, false);
4576 dev_priv->display.off(crtc); 4868 dev_priv->display.off(crtc);
4577 4869
@@ -4635,7 +4927,7 @@ static void intel_connector_check_state(struct intel_connector *connector)
4635 4927
4636 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", 4928 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4637 connector->base.base.id, 4929 connector->base.base.id,
4638 drm_get_connector_name(&connector->base)); 4930 connector->base.name);
4639 4931
4640 WARN(connector->base.dpms == DRM_MODE_DPMS_OFF, 4932 WARN(connector->base.dpms == DRM_MODE_DPMS_OFF,
4641 "wrong connector dpms state\n"); 4933 "wrong connector dpms state\n");
@@ -5039,8 +5331,6 @@ static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
5039 intel_clock_t *reduced_clock) 5331 intel_clock_t *reduced_clock)
5040{ 5332{
5041 struct drm_device *dev = crtc->base.dev; 5333 struct drm_device *dev = crtc->base.dev;
5042 struct drm_i915_private *dev_priv = dev->dev_private;
5043 int pipe = crtc->pipe;
5044 u32 fp, fp2 = 0; 5334 u32 fp, fp2 = 0;
5045 5335
5046 if (IS_PINEVIEW(dev)) { 5336 if (IS_PINEVIEW(dev)) {
@@ -5053,17 +5343,14 @@ static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
5053 fp2 = i9xx_dpll_compute_fp(reduced_clock); 5343 fp2 = i9xx_dpll_compute_fp(reduced_clock);
5054 } 5344 }
5055 5345
5056 I915_WRITE(FP0(pipe), fp);
5057 crtc->config.dpll_hw_state.fp0 = fp; 5346 crtc->config.dpll_hw_state.fp0 = fp;
5058 5347
5059 crtc->lowfreq_avail = false; 5348 crtc->lowfreq_avail = false;
5060 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) && 5349 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
5061 reduced_clock && i915.powersave) { 5350 reduced_clock && i915.powersave) {
5062 I915_WRITE(FP1(pipe), fp2);
5063 crtc->config.dpll_hw_state.fp1 = fp2; 5351 crtc->config.dpll_hw_state.fp1 = fp2;
5064 crtc->lowfreq_avail = true; 5352 crtc->lowfreq_avail = true;
5065 } else { 5353 } else {
5066 I915_WRITE(FP1(pipe), fp);
5067 crtc->config.dpll_hw_state.fp1 = fp; 5354 crtc->config.dpll_hw_state.fp1 = fp;
5068 } 5355 }
5069} 5356}
@@ -5141,12 +5428,34 @@ static void intel_dp_set_m_n(struct intel_crtc *crtc)
5141 5428
5142static void vlv_update_pll(struct intel_crtc *crtc) 5429static void vlv_update_pll(struct intel_crtc *crtc)
5143{ 5430{
5431 u32 dpll, dpll_md;
5432
5433 /*
5434 * Enable DPIO clock input. We should never disable the reference
5435 * clock for pipe B, since VGA hotplug / manual detection depends
5436 * on it.
5437 */
5438 dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV |
5439 DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV;
5440 /* We should never disable this, set it here for state tracking */
5441 if (crtc->pipe == PIPE_B)
5442 dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
5443 dpll |= DPLL_VCO_ENABLE;
5444 crtc->config.dpll_hw_state.dpll = dpll;
5445
5446 dpll_md = (crtc->config.pixel_multiplier - 1)
5447 << DPLL_MD_UDI_MULTIPLIER_SHIFT;
5448 crtc->config.dpll_hw_state.dpll_md = dpll_md;
5449}
5450
5451static void vlv_prepare_pll(struct intel_crtc *crtc)
5452{
5144 struct drm_device *dev = crtc->base.dev; 5453 struct drm_device *dev = crtc->base.dev;
5145 struct drm_i915_private *dev_priv = dev->dev_private; 5454 struct drm_i915_private *dev_priv = dev->dev_private;
5146 int pipe = crtc->pipe; 5455 int pipe = crtc->pipe;
5147 u32 dpll, mdiv; 5456 u32 mdiv;
5148 u32 bestn, bestm1, bestm2, bestp1, bestp2; 5457 u32 bestn, bestm1, bestm2, bestp1, bestp2;
5149 u32 coreclk, reg_val, dpll_md; 5458 u32 coreclk, reg_val;
5150 5459
5151 mutex_lock(&dev_priv->dpio_lock); 5460 mutex_lock(&dev_priv->dpio_lock);
5152 5461
@@ -5159,7 +5468,7 @@ static void vlv_update_pll(struct intel_crtc *crtc)
5159 /* See eDP HDMI DPIO driver vbios notes doc */ 5468 /* See eDP HDMI DPIO driver vbios notes doc */
5160 5469
5161 /* PLL B needs special handling */ 5470 /* PLL B needs special handling */
5162 if (pipe) 5471 if (pipe == PIPE_B)
5163 vlv_pllb_recal_opamp(dev_priv, pipe); 5472 vlv_pllb_recal_opamp(dev_priv, pipe);
5164 5473
5165 /* Set up Tx target for periodic Rcomp update */ 5474 /* Set up Tx target for periodic Rcomp update */
@@ -5203,7 +5512,7 @@ static void vlv_update_pll(struct intel_crtc *crtc)
5203 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP) || 5512 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP) ||
5204 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT)) { 5513 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT)) {
5205 /* Use SSC source */ 5514 /* Use SSC source */
5206 if (!pipe) 5515 if (pipe == PIPE_A)
5207 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 5516 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
5208 0x0df40000); 5517 0x0df40000);
5209 else 5518 else
@@ -5211,7 +5520,7 @@ static void vlv_update_pll(struct intel_crtc *crtc)
5211 0x0df70000); 5520 0x0df70000);
5212 } else { /* HDMI or VGA */ 5521 } else { /* HDMI or VGA */
5213 /* Use bend source */ 5522 /* Use bend source */
5214 if (!pipe) 5523 if (pipe == PIPE_A)
5215 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 5524 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
5216 0x0df70000); 5525 0x0df70000);
5217 else 5526 else
@@ -5227,26 +5536,84 @@ static void vlv_update_pll(struct intel_crtc *crtc)
5227 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk); 5536 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
5228 5537
5229 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000); 5538 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
5539 mutex_unlock(&dev_priv->dpio_lock);
5540}
5541
5542static void chv_update_pll(struct intel_crtc *crtc)
5543{
5544 struct drm_device *dev = crtc->base.dev;
5545 struct drm_i915_private *dev_priv = dev->dev_private;
5546 int pipe = crtc->pipe;
5547 int dpll_reg = DPLL(crtc->pipe);
5548 enum dpio_channel port = vlv_pipe_to_channel(pipe);
5549 u32 loopfilter, intcoeff;
5550 u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
5551 int refclk;
5552
5553 crtc->config.dpll_hw_state.dpll = DPLL_SSC_REF_CLOCK_CHV |
5554 DPLL_REFA_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS |
5555 DPLL_VCO_ENABLE;
5556 if (pipe != PIPE_A)
5557 crtc->config.dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
5558
5559 crtc->config.dpll_hw_state.dpll_md =
5560 (crtc->config.pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
5561
5562 bestn = crtc->config.dpll.n;
5563 bestm2_frac = crtc->config.dpll.m2 & 0x3fffff;
5564 bestm1 = crtc->config.dpll.m1;
5565 bestm2 = crtc->config.dpll.m2 >> 22;
5566 bestp1 = crtc->config.dpll.p1;
5567 bestp2 = crtc->config.dpll.p2;
5230 5568
5231 /* 5569 /*
5232 * Enable DPIO clock input. We should never disable the reference 5570 * Enable Refclk and SSC
5233 * clock for pipe B, since VGA hotplug / manual detection depends
5234 * on it.
5235 */ 5571 */
5236 dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV | 5572 I915_WRITE(dpll_reg,
5237 DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV; 5573 crtc->config.dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
5238 /* We should never disable this, set it here for state tracking */
5239 if (pipe == PIPE_B)
5240 dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
5241 dpll |= DPLL_VCO_ENABLE;
5242 crtc->config.dpll_hw_state.dpll = dpll;
5243 5574
5244 dpll_md = (crtc->config.pixel_multiplier - 1) 5575 mutex_lock(&dev_priv->dpio_lock);
5245 << DPLL_MD_UDI_MULTIPLIER_SHIFT; 5576
5246 crtc->config.dpll_hw_state.dpll_md = dpll_md; 5577 /* p1 and p2 divider */
5578 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
5579 5 << DPIO_CHV_S1_DIV_SHIFT |
5580 bestp1 << DPIO_CHV_P1_DIV_SHIFT |
5581 bestp2 << DPIO_CHV_P2_DIV_SHIFT |
5582 1 << DPIO_CHV_K_DIV_SHIFT);
5583
5584 /* Feedback post-divider - m2 */
5585 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
5586
5587 /* Feedback refclk divider - n and m1 */
5588 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
5589 DPIO_CHV_M1_DIV_BY_2 |
5590 1 << DPIO_CHV_N_DIV_SHIFT);
5591
5592 /* M2 fraction division */
5593 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
5594
5595 /* M2 fraction division enable */
5596 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port),
5597 DPIO_CHV_FRAC_DIV_EN |
5598 (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT));
5599
5600 /* Loop filter */
5601 refclk = i9xx_get_refclk(&crtc->base, 0);
5602 loopfilter = 5 << DPIO_CHV_PROP_COEFF_SHIFT |
5603 2 << DPIO_CHV_GAIN_CTRL_SHIFT;
5604 if (refclk == 100000)
5605 intcoeff = 11;
5606 else if (refclk == 38400)
5607 intcoeff = 10;
5608 else
5609 intcoeff = 9;
5610 loopfilter |= intcoeff << DPIO_CHV_INT_COEFF_SHIFT;
5611 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
5247 5612
5248 if (crtc->config.has_dp_encoder) 5613 /* AFC Recal */
5249 intel_dp_set_m_n(crtc); 5614 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
5615 vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
5616 DPIO_AFC_RECAL);
5250 5617
5251 mutex_unlock(&dev_priv->dpio_lock); 5618 mutex_unlock(&dev_priv->dpio_lock);
5252} 5619}
@@ -5325,9 +5692,6 @@ static void i9xx_update_pll(struct intel_crtc *crtc,
5325 << DPLL_MD_UDI_MULTIPLIER_SHIFT; 5692 << DPLL_MD_UDI_MULTIPLIER_SHIFT;
5326 crtc->config.dpll_hw_state.dpll_md = dpll_md; 5693 crtc->config.dpll_hw_state.dpll_md = dpll_md;
5327 } 5694 }
5328
5329 if (crtc->config.has_dp_encoder)
5330 intel_dp_set_m_n(crtc);
5331} 5695}
5332 5696
5333static void i8xx_update_pll(struct intel_crtc *crtc, 5697static void i8xx_update_pll(struct intel_crtc *crtc,
@@ -5567,16 +5931,12 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
5567 struct drm_device *dev = crtc->dev; 5931 struct drm_device *dev = crtc->dev;
5568 struct drm_i915_private *dev_priv = dev->dev_private; 5932 struct drm_i915_private *dev_priv = dev->dev_private;
5569 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5933 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5570 int pipe = intel_crtc->pipe;
5571 int plane = intel_crtc->plane;
5572 int refclk, num_connectors = 0; 5934 int refclk, num_connectors = 0;
5573 intel_clock_t clock, reduced_clock; 5935 intel_clock_t clock, reduced_clock;
5574 u32 dspcntr;
5575 bool ok, has_reduced_clock = false; 5936 bool ok, has_reduced_clock = false;
5576 bool is_lvds = false, is_dsi = false; 5937 bool is_lvds = false, is_dsi = false;
5577 struct intel_encoder *encoder; 5938 struct intel_encoder *encoder;
5578 const intel_limit_t *limit; 5939 const intel_limit_t *limit;
5579 int ret;
5580 5940
5581 for_each_encoder_on_crtc(dev, crtc, encoder) { 5941 for_each_encoder_on_crtc(dev, crtc, encoder) {
5582 switch (encoder->type) { 5942 switch (encoder->type) {
@@ -5592,7 +5952,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
5592 } 5952 }
5593 5953
5594 if (is_dsi) 5954 if (is_dsi)
5595 goto skip_dpll; 5955 return 0;
5596 5956
5597 if (!intel_crtc->config.clock_set) { 5957 if (!intel_crtc->config.clock_set) {
5598 refclk = i9xx_get_refclk(crtc, num_connectors); 5958 refclk = i9xx_get_refclk(crtc, num_connectors);
@@ -5637,43 +5997,17 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
5637 i8xx_update_pll(intel_crtc, 5997 i8xx_update_pll(intel_crtc,
5638 has_reduced_clock ? &reduced_clock : NULL, 5998 has_reduced_clock ? &reduced_clock : NULL,
5639 num_connectors); 5999 num_connectors);
6000 } else if (IS_CHERRYVIEW(dev)) {
6001 chv_update_pll(intel_crtc);
5640 } else if (IS_VALLEYVIEW(dev)) { 6002 } else if (IS_VALLEYVIEW(dev)) {
5641 vlv_update_pll(intel_crtc); 6003 vlv_update_pll(intel_crtc);
5642 } else { 6004 } else {
5643 i9xx_update_pll(intel_crtc, 6005 i9xx_update_pll(intel_crtc,
5644 has_reduced_clock ? &reduced_clock : NULL, 6006 has_reduced_clock ? &reduced_clock : NULL,
5645 num_connectors); 6007 num_connectors);
5646 }
5647
5648skip_dpll:
5649 /* Set up the display plane register */
5650 dspcntr = DISPPLANE_GAMMA_ENABLE;
5651
5652 if (!IS_VALLEYVIEW(dev)) {
5653 if (pipe == 0)
5654 dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
5655 else
5656 dspcntr |= DISPPLANE_SEL_PIPE_B;
5657 } 6008 }
5658 6009
5659 intel_set_pipe_timings(intel_crtc); 6010 return 0;
5660
5661 /* pipesrc and dspsize control the size that is scaled from,
5662 * which should always be the user's requested size.
5663 */
5664 I915_WRITE(DSPSIZE(plane),
5665 ((intel_crtc->config.pipe_src_h - 1) << 16) |
5666 (intel_crtc->config.pipe_src_w - 1));
5667 I915_WRITE(DSPPOS(plane), 0);
5668
5669 i9xx_set_pipeconf(intel_crtc);
5670
5671 I915_WRITE(DSPCNTR(plane), dspcntr);
5672 POSTING_READ(DSPCNTR(plane));
5673
5674 ret = intel_pipe_set_base(crtc, x, y, fb);
5675
5676 return ret;
5677} 6011}
5678 6012
5679static void i9xx_get_pfit_config(struct intel_crtc *crtc, 6013static void i9xx_get_pfit_config(struct intel_crtc *crtc,
@@ -5793,6 +6127,36 @@ static void i9xx_get_plane_config(struct intel_crtc *crtc,
5793 6127
5794} 6128}
5795 6129
6130static void chv_crtc_clock_get(struct intel_crtc *crtc,
6131 struct intel_crtc_config *pipe_config)
6132{
6133 struct drm_device *dev = crtc->base.dev;
6134 struct drm_i915_private *dev_priv = dev->dev_private;
6135 int pipe = pipe_config->cpu_transcoder;
6136 enum dpio_channel port = vlv_pipe_to_channel(pipe);
6137 intel_clock_t clock;
6138 u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2;
6139 int refclk = 100000;
6140
6141 mutex_lock(&dev_priv->dpio_lock);
6142 cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
6143 pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
6144 pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
6145 pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
6146 mutex_unlock(&dev_priv->dpio_lock);
6147
6148 clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
6149 clock.m2 = ((pll_dw0 & 0xff) << 22) | (pll_dw2 & 0x3fffff);
6150 clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
6151 clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
6152 clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
6153
6154 chv_clock(refclk, &clock);
6155
6156 /* clock.dot is the fast clock */
6157 pipe_config->port_clock = clock.dot / 5;
6158}
6159
5796static bool i9xx_get_pipe_config(struct intel_crtc *crtc, 6160static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
5797 struct intel_crtc_config *pipe_config) 6161 struct intel_crtc_config *pipe_config)
5798{ 6162{
@@ -5827,6 +6191,9 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
5827 } 6191 }
5828 } 6192 }
5829 6193
6194 if (IS_VALLEYVIEW(dev) && (tmp & PIPECONF_COLOR_RANGE_SELECT))
6195 pipe_config->limited_color_range = true;
6196
5830 if (INTEL_INFO(dev)->gen < 4) 6197 if (INTEL_INFO(dev)->gen < 4)
5831 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE; 6198 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
5832 6199
@@ -5862,7 +6229,9 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
5862 DPLL_PORTB_READY_MASK); 6229 DPLL_PORTB_READY_MASK);
5863 } 6230 }
5864 6231
5865 if (IS_VALLEYVIEW(dev)) 6232 if (IS_CHERRYVIEW(dev))
6233 chv_crtc_clock_get(crtc, pipe_config);
6234 else if (IS_VALLEYVIEW(dev))
5866 vlv_crtc_clock_get(crtc, pipe_config); 6235 vlv_crtc_clock_get(crtc, pipe_config);
5867 else 6236 else
5868 i9xx_crtc_clock_get(crtc, pipe_config); 6237 i9xx_crtc_clock_get(crtc, pipe_config);
@@ -5983,8 +6352,7 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
5983 if (intel_panel_use_ssc(dev_priv) && can_ssc) { 6352 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
5984 DRM_DEBUG_KMS("Using SSC on eDP\n"); 6353 DRM_DEBUG_KMS("Using SSC on eDP\n");
5985 val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; 6354 val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
5986 } 6355 } else
5987 else
5988 val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; 6356 val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
5989 } else 6357 } else
5990 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 6358 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
@@ -6563,10 +6931,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
6563 struct drm_framebuffer *fb) 6931 struct drm_framebuffer *fb)
6564{ 6932{
6565 struct drm_device *dev = crtc->dev; 6933 struct drm_device *dev = crtc->dev;
6566 struct drm_i915_private *dev_priv = dev->dev_private;
6567 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6934 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6568 int pipe = intel_crtc->pipe;
6569 int plane = intel_crtc->plane;
6570 int num_connectors = 0; 6935 int num_connectors = 0;
6571 intel_clock_t clock, reduced_clock; 6936 intel_clock_t clock, reduced_clock;
6572 u32 dpll = 0, fp = 0, fp2 = 0; 6937 u32 dpll = 0, fp = 0, fp2 = 0;
@@ -6574,7 +6939,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
6574 bool is_lvds = false; 6939 bool is_lvds = false;
6575 struct intel_encoder *encoder; 6940 struct intel_encoder *encoder;
6576 struct intel_shared_dpll *pll; 6941 struct intel_shared_dpll *pll;
6577 int ret;
6578 6942
6579 for_each_encoder_on_crtc(dev, crtc, encoder) { 6943 for_each_encoder_on_crtc(dev, crtc, encoder) {
6580 switch (encoder->type) { 6944 switch (encoder->type) {
@@ -6624,36 +6988,18 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
6624 pll = intel_get_shared_dpll(intel_crtc); 6988 pll = intel_get_shared_dpll(intel_crtc);
6625 if (pll == NULL) { 6989 if (pll == NULL) {
6626 DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n", 6990 DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
6627 pipe_name(pipe)); 6991 pipe_name(intel_crtc->pipe));
6628 return -EINVAL; 6992 return -EINVAL;
6629 } 6993 }
6630 } else 6994 } else
6631 intel_put_shared_dpll(intel_crtc); 6995 intel_put_shared_dpll(intel_crtc);
6632 6996
6633 if (intel_crtc->config.has_dp_encoder)
6634 intel_dp_set_m_n(intel_crtc);
6635
6636 if (is_lvds && has_reduced_clock && i915.powersave) 6997 if (is_lvds && has_reduced_clock && i915.powersave)
6637 intel_crtc->lowfreq_avail = true; 6998 intel_crtc->lowfreq_avail = true;
6638 else 6999 else
6639 intel_crtc->lowfreq_avail = false; 7000 intel_crtc->lowfreq_avail = false;
6640 7001
6641 intel_set_pipe_timings(intel_crtc); 7002 return 0;
6642
6643 if (intel_crtc->config.has_pch_encoder) {
6644 intel_cpu_transcoder_set_m_n(intel_crtc,
6645 &intel_crtc->config.fdi_m_n);
6646 }
6647
6648 ironlake_set_pipeconf(crtc);
6649
6650 /* Set up the display plane register */
6651 I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE);
6652 POSTING_READ(DSPCNTR(plane));
6653
6654 ret = intel_pipe_set_base(crtc, x, y, fb);
6655
6656 return ret;
6657} 7003}
6658 7004
6659static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc, 7005static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
@@ -6831,6 +7177,9 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
6831 break; 7177 break;
6832 } 7178 }
6833 7179
7180 if (tmp & PIPECONF_COLOR_RANGE_SELECT)
7181 pipe_config->limited_color_range = true;
7182
6834 if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) { 7183 if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
6835 struct intel_shared_dpll *pll; 7184 struct intel_shared_dpll *pll;
6836 7185
@@ -6880,10 +7229,8 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
6880 struct drm_device *dev = dev_priv->dev; 7229 struct drm_device *dev = dev_priv->dev;
6881 struct intel_ddi_plls *plls = &dev_priv->ddi_plls; 7230 struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
6882 struct intel_crtc *crtc; 7231 struct intel_crtc *crtc;
6883 unsigned long irqflags;
6884 uint32_t val;
6885 7232
6886 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) 7233 for_each_intel_crtc(dev, crtc)
6887 WARN(crtc->active, "CRTC for pipe %c enabled\n", 7234 WARN(crtc->active, "CRTC for pipe %c enabled\n",
6888 pipe_name(crtc->pipe)); 7235 pipe_name(crtc->pipe));
6889 7236
@@ -6902,14 +7249,29 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
6902 "Utility pin enabled\n"); 7249 "Utility pin enabled\n");
6903 WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n"); 7250 WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n");
6904 7251
6905 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 7252 /*
6906 val = I915_READ(DEIMR); 7253 * In theory we can still leave IRQs enabled, as long as only the HPD
6907 WARN((val | DE_PCH_EVENT_IVB) != 0xffffffff, 7254 * interrupts remain enabled. We used to check for that, but since it's
6908 "Unexpected DEIMR bits enabled: 0x%x\n", val); 7255 * gen-specific and since we only disable LCPLL after we fully disable
6909 val = I915_READ(SDEIMR); 7256 * the interrupts, the check below should be enough.
6910 WARN((val | SDE_HOTPLUG_MASK_CPT) != 0xffffffff, 7257 */
6911 "Unexpected SDEIMR bits enabled: 0x%x\n", val); 7258 WARN(!dev_priv->pm.irqs_disabled, "IRQs enabled\n");
6912 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 7259}
7260
7261static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val)
7262{
7263 struct drm_device *dev = dev_priv->dev;
7264
7265 if (IS_HASWELL(dev)) {
7266 mutex_lock(&dev_priv->rps.hw_lock);
7267 if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP,
7268 val))
7269 DRM_ERROR("Failed to disable D_COMP\n");
7270 mutex_unlock(&dev_priv->rps.hw_lock);
7271 } else {
7272 I915_WRITE(D_COMP, val);
7273 }
7274 POSTING_READ(D_COMP);
6913} 7275}
6914 7276
6915/* 7277/*
@@ -6949,11 +7311,7 @@ static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
6949 7311
6950 val = I915_READ(D_COMP); 7312 val = I915_READ(D_COMP);
6951 val |= D_COMP_COMP_DISABLE; 7313 val |= D_COMP_COMP_DISABLE;
6952 mutex_lock(&dev_priv->rps.hw_lock); 7314 hsw_write_dcomp(dev_priv, val);
6953 if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP, val))
6954 DRM_ERROR("Failed to disable D_COMP\n");
6955 mutex_unlock(&dev_priv->rps.hw_lock);
6956 POSTING_READ(D_COMP);
6957 ndelay(100); 7315 ndelay(100);
6958 7316
6959 if (wait_for((I915_READ(D_COMP) & D_COMP_RCOMP_IN_PROGRESS) == 0, 1)) 7317 if (wait_for((I915_READ(D_COMP) & D_COMP_RCOMP_IN_PROGRESS) == 0, 1))
@@ -7008,11 +7366,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
7008 val = I915_READ(D_COMP); 7366 val = I915_READ(D_COMP);
7009 val |= D_COMP_COMP_FORCE; 7367 val |= D_COMP_COMP_FORCE;
7010 val &= ~D_COMP_COMP_DISABLE; 7368 val &= ~D_COMP_COMP_DISABLE;
7011 mutex_lock(&dev_priv->rps.hw_lock); 7369 hsw_write_dcomp(dev_priv, val);
7012 if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP, val))
7013 DRM_ERROR("Failed to enable D_COMP\n");
7014 mutex_unlock(&dev_priv->rps.hw_lock);
7015 POSTING_READ(D_COMP);
7016 7370
7017 val = I915_READ(LCPLL_CTL); 7371 val = I915_READ(LCPLL_CTL);
7018 val &= ~LCPLL_PLL_DISABLE; 7372 val &= ~LCPLL_PLL_DISABLE;
@@ -7066,8 +7420,6 @@ void hsw_enable_pc8(struct drm_i915_private *dev_priv)
7066 struct drm_device *dev = dev_priv->dev; 7420 struct drm_device *dev = dev_priv->dev;
7067 uint32_t val; 7421 uint32_t val;
7068 7422
7069 WARN_ON(!HAS_PC8(dev));
7070
7071 DRM_DEBUG_KMS("Enabling package C8+\n"); 7423 DRM_DEBUG_KMS("Enabling package C8+\n");
7072 7424
7073 if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) { 7425 if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
@@ -7077,7 +7429,6 @@ void hsw_enable_pc8(struct drm_i915_private *dev_priv)
7077 } 7429 }
7078 7430
7079 lpt_disable_clkout_dp(dev); 7431 lpt_disable_clkout_dp(dev);
7080 hsw_runtime_pm_disable_interrupts(dev);
7081 hsw_disable_lcpll(dev_priv, true, true); 7432 hsw_disable_lcpll(dev_priv, true, true);
7082} 7433}
7083 7434
@@ -7086,12 +7437,9 @@ void hsw_disable_pc8(struct drm_i915_private *dev_priv)
7086 struct drm_device *dev = dev_priv->dev; 7437 struct drm_device *dev = dev_priv->dev;
7087 uint32_t val; 7438 uint32_t val;
7088 7439
7089 WARN_ON(!HAS_PC8(dev));
7090
7091 DRM_DEBUG_KMS("Disabling package C8+\n"); 7440 DRM_DEBUG_KMS("Disabling package C8+\n");
7092 7441
7093 hsw_restore_lcpll(dev_priv); 7442 hsw_restore_lcpll(dev_priv);
7094 hsw_runtime_pm_restore_interrupts(dev);
7095 lpt_init_pch_refclk(dev); 7443 lpt_init_pch_refclk(dev);
7096 7444
7097 if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) { 7445 if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
@@ -7101,10 +7449,11 @@ void hsw_disable_pc8(struct drm_i915_private *dev_priv)
7101 } 7449 }
7102 7450
7103 intel_prepare_ddi(dev); 7451 intel_prepare_ddi(dev);
7104 i915_gem_init_swizzling(dev); 7452}
7105 mutex_lock(&dev_priv->rps.hw_lock); 7453
7106 gen6_update_ring_freq(dev); 7454static void snb_modeset_global_resources(struct drm_device *dev)
7107 mutex_unlock(&dev_priv->rps.hw_lock); 7455{
7456 modeset_update_crtc_power_domains(dev);
7108} 7457}
7109 7458
7110static void haswell_modeset_global_resources(struct drm_device *dev) 7459static void haswell_modeset_global_resources(struct drm_device *dev)
@@ -7116,39 +7465,15 @@ static int haswell_crtc_mode_set(struct drm_crtc *crtc,
7116 int x, int y, 7465 int x, int y,
7117 struct drm_framebuffer *fb) 7466 struct drm_framebuffer *fb)
7118{ 7467{
7119 struct drm_device *dev = crtc->dev;
7120 struct drm_i915_private *dev_priv = dev->dev_private;
7121 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 7468 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7122 int plane = intel_crtc->plane;
7123 int ret;
7124 7469
7125 if (!intel_ddi_pll_select(intel_crtc)) 7470 if (!intel_ddi_pll_select(intel_crtc))
7126 return -EINVAL; 7471 return -EINVAL;
7127 intel_ddi_pll_enable(intel_crtc); 7472 intel_ddi_pll_enable(intel_crtc);
7128 7473
7129 if (intel_crtc->config.has_dp_encoder)
7130 intel_dp_set_m_n(intel_crtc);
7131
7132 intel_crtc->lowfreq_avail = false; 7474 intel_crtc->lowfreq_avail = false;
7133 7475
7134 intel_set_pipe_timings(intel_crtc); 7476 return 0;
7135
7136 if (intel_crtc->config.has_pch_encoder) {
7137 intel_cpu_transcoder_set_m_n(intel_crtc,
7138 &intel_crtc->config.fdi_m_n);
7139 }
7140
7141 haswell_set_pipeconf(crtc);
7142
7143 intel_set_pipe_csc(crtc);
7144
7145 /* Set up the display plane register */
7146 I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE | DISPPLANE_PIPE_CSC_ENABLE);
7147 POSTING_READ(DSPCNTR(plane));
7148
7149 ret = intel_pipe_set_base(crtc, x, y, fb);
7150
7151 return ret;
7152} 7477}
7153 7478
7154static bool haswell_get_pipe_config(struct intel_crtc *crtc, 7479static bool haswell_get_pipe_config(struct intel_crtc *crtc,
@@ -7228,38 +7553,6 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
7228 return true; 7553 return true;
7229} 7554}
7230 7555
7231static int intel_crtc_mode_set(struct drm_crtc *crtc,
7232 int x, int y,
7233 struct drm_framebuffer *fb)
7234{
7235 struct drm_device *dev = crtc->dev;
7236 struct drm_i915_private *dev_priv = dev->dev_private;
7237 struct intel_encoder *encoder;
7238 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7239 struct drm_display_mode *mode = &intel_crtc->config.requested_mode;
7240 int pipe = intel_crtc->pipe;
7241 int ret;
7242
7243 drm_vblank_pre_modeset(dev, pipe);
7244
7245 ret = dev_priv->display.crtc_mode_set(crtc, x, y, fb);
7246
7247 drm_vblank_post_modeset(dev, pipe);
7248
7249 if (ret != 0)
7250 return ret;
7251
7252 for_each_encoder_on_crtc(dev, crtc, encoder) {
7253 DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%d:%s]\n",
7254 encoder->base.base.id,
7255 drm_get_encoder_name(&encoder->base),
7256 mode->base.id, mode->name);
7257 encoder->mode_set(encoder);
7258 }
7259
7260 return 0;
7261}
7262
7263static struct { 7556static struct {
7264 int clock; 7557 int clock;
7265 u32 config; 7558 u32 config;
@@ -7374,8 +7667,6 @@ static void haswell_write_eld(struct drm_connector *connector,
7374{ 7667{
7375 struct drm_i915_private *dev_priv = connector->dev->dev_private; 7668 struct drm_i915_private *dev_priv = connector->dev->dev_private;
7376 uint8_t *eld = connector->eld; 7669 uint8_t *eld = connector->eld;
7377 struct drm_device *dev = crtc->dev;
7378 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7379 uint32_t eldv; 7670 uint32_t eldv;
7380 uint32_t i; 7671 uint32_t i;
7381 int len; 7672 int len;
@@ -7387,17 +7678,14 @@ static void haswell_write_eld(struct drm_connector *connector,
7387 int aud_config = HSW_AUD_CFG(pipe); 7678 int aud_config = HSW_AUD_CFG(pipe);
7388 int aud_cntrl_st2 = HSW_AUD_PIN_ELD_CP_VLD; 7679 int aud_cntrl_st2 = HSW_AUD_PIN_ELD_CP_VLD;
7389 7680
7390
7391 DRM_DEBUG_DRIVER("HDMI: Haswell Audio initialize....\n");
7392
7393 /* Audio output enable */ 7681 /* Audio output enable */
7394 DRM_DEBUG_DRIVER("HDMI audio: enable codec\n"); 7682 DRM_DEBUG_DRIVER("HDMI audio: enable codec\n");
7395 tmp = I915_READ(aud_cntrl_st2); 7683 tmp = I915_READ(aud_cntrl_st2);
7396 tmp |= (AUDIO_OUTPUT_ENABLE_A << (pipe * 4)); 7684 tmp |= (AUDIO_OUTPUT_ENABLE_A << (pipe * 4));
7397 I915_WRITE(aud_cntrl_st2, tmp); 7685 I915_WRITE(aud_cntrl_st2, tmp);
7686 POSTING_READ(aud_cntrl_st2);
7398 7687
7399 /* Wait for 1 vertical blank */ 7688 assert_pipe_disabled(dev_priv, to_intel_crtc(crtc)->pipe);
7400 intel_wait_for_vblank(dev, pipe);
7401 7689
7402 /* Set ELD valid state */ 7690 /* Set ELD valid state */
7403 tmp = I915_READ(aud_cntrl_st2); 7691 tmp = I915_READ(aud_cntrl_st2);
@@ -7417,7 +7705,6 @@ static void haswell_write_eld(struct drm_connector *connector,
7417 DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe)); 7705 DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe));
7418 7706
7419 eldv = AUDIO_ELD_VALID_A << (pipe * 4); 7707 eldv = AUDIO_ELD_VALID_A << (pipe * 4);
7420 intel_crtc->eld_vld = true;
7421 7708
7422 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { 7709 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
7423 DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n"); 7710 DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
@@ -7564,9 +7851,9 @@ void intel_write_eld(struct drm_encoder *encoder,
7564 7851
7565 DRM_DEBUG_DRIVER("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", 7852 DRM_DEBUG_DRIVER("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
7566 connector->base.id, 7853 connector->base.id,
7567 drm_get_connector_name(connector), 7854 connector->name,
7568 connector->encoder->base.id, 7855 connector->encoder->base.id,
7569 drm_get_encoder_name(connector->encoder)); 7856 connector->encoder->name);
7570 7857
7571 connector->eld[6] = drm_av_sync_delay(connector, mode) / 2; 7858 connector->eld[6] = drm_av_sync_delay(connector, mode) / 2;
7572 7859
@@ -7579,29 +7866,33 @@ static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
7579 struct drm_device *dev = crtc->dev; 7866 struct drm_device *dev = crtc->dev;
7580 struct drm_i915_private *dev_priv = dev->dev_private; 7867 struct drm_i915_private *dev_priv = dev->dev_private;
7581 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 7868 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7582 bool visible = base != 0; 7869 uint32_t cntl;
7583 u32 cntl;
7584 7870
7585 if (intel_crtc->cursor_visible == visible) 7871 if (base != intel_crtc->cursor_base) {
7586 return;
7587
7588 cntl = I915_READ(_CURACNTR);
7589 if (visible) {
7590 /* On these chipsets we can only modify the base whilst 7872 /* On these chipsets we can only modify the base whilst
7591 * the cursor is disabled. 7873 * the cursor is disabled.
7592 */ 7874 */
7875 if (intel_crtc->cursor_cntl) {
7876 I915_WRITE(_CURACNTR, 0);
7877 POSTING_READ(_CURACNTR);
7878 intel_crtc->cursor_cntl = 0;
7879 }
7880
7593 I915_WRITE(_CURABASE, base); 7881 I915_WRITE(_CURABASE, base);
7882 POSTING_READ(_CURABASE);
7883 }
7594 7884
7595 cntl &= ~(CURSOR_FORMAT_MASK); 7885 /* XXX width must be 64, stride 256 => 0x00 << 28 */
7596 /* XXX width must be 64, stride 256 => 0x00 << 28 */ 7886 cntl = 0;
7597 cntl |= CURSOR_ENABLE | 7887 if (base)
7888 cntl = (CURSOR_ENABLE |
7598 CURSOR_GAMMA_ENABLE | 7889 CURSOR_GAMMA_ENABLE |
7599 CURSOR_FORMAT_ARGB; 7890 CURSOR_FORMAT_ARGB);
7600 } else 7891 if (intel_crtc->cursor_cntl != cntl) {
7601 cntl &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE); 7892 I915_WRITE(_CURACNTR, cntl);
7602 I915_WRITE(_CURACNTR, cntl); 7893 POSTING_READ(_CURACNTR);
7603 7894 intel_crtc->cursor_cntl = cntl;
7604 intel_crtc->cursor_visible = visible; 7895 }
7605} 7896}
7606 7897
7607static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base) 7898static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
@@ -7610,16 +7901,12 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
7610 struct drm_i915_private *dev_priv = dev->dev_private; 7901 struct drm_i915_private *dev_priv = dev->dev_private;
7611 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 7902 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7612 int pipe = intel_crtc->pipe; 7903 int pipe = intel_crtc->pipe;
7613 bool visible = base != 0; 7904 uint32_t cntl;
7614 7905
7615 if (intel_crtc->cursor_visible != visible) { 7906 cntl = 0;
7616 int16_t width = intel_crtc->cursor_width; 7907 if (base) {
7617 uint32_t cntl = I915_READ(CURCNTR(pipe)); 7908 cntl = MCURSOR_GAMMA_ENABLE;
7618 if (base) { 7909 switch (intel_crtc->cursor_width) {
7619 cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT);
7620 cntl |= MCURSOR_GAMMA_ENABLE;
7621
7622 switch (width) {
7623 case 64: 7910 case 64:
7624 cntl |= CURSOR_MODE_64_ARGB_AX; 7911 cntl |= CURSOR_MODE_64_ARGB_AX;
7625 break; 7912 break;
@@ -7632,18 +7919,16 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
7632 default: 7919 default:
7633 WARN_ON(1); 7920 WARN_ON(1);
7634 return; 7921 return;
7635 }
7636 cntl |= pipe << 28; /* Connect to correct pipe */
7637 } else {
7638 cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
7639 cntl |= CURSOR_MODE_DISABLE;
7640 } 7922 }
7923 cntl |= pipe << 28; /* Connect to correct pipe */
7924 }
7925 if (intel_crtc->cursor_cntl != cntl) {
7641 I915_WRITE(CURCNTR(pipe), cntl); 7926 I915_WRITE(CURCNTR(pipe), cntl);
7642 7927 POSTING_READ(CURCNTR(pipe));
7643 intel_crtc->cursor_visible = visible; 7928 intel_crtc->cursor_cntl = cntl;
7644 } 7929 }
7930
7645 /* and commit changes on next vblank */ 7931 /* and commit changes on next vblank */
7646 POSTING_READ(CURCNTR(pipe));
7647 I915_WRITE(CURBASE(pipe), base); 7932 I915_WRITE(CURBASE(pipe), base);
7648 POSTING_READ(CURBASE(pipe)); 7933 POSTING_READ(CURBASE(pipe));
7649} 7934}
@@ -7654,15 +7939,12 @@ static void ivb_update_cursor(struct drm_crtc *crtc, u32 base)
7654 struct drm_i915_private *dev_priv = dev->dev_private; 7939 struct drm_i915_private *dev_priv = dev->dev_private;
7655 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 7940 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7656 int pipe = intel_crtc->pipe; 7941 int pipe = intel_crtc->pipe;
7657 bool visible = base != 0; 7942 uint32_t cntl;
7658 7943
7659 if (intel_crtc->cursor_visible != visible) { 7944 cntl = 0;
7660 int16_t width = intel_crtc->cursor_width; 7945 if (base) {
7661 uint32_t cntl = I915_READ(CURCNTR_IVB(pipe)); 7946 cntl = MCURSOR_GAMMA_ENABLE;
7662 if (base) { 7947 switch (intel_crtc->cursor_width) {
7663 cntl &= ~CURSOR_MODE;
7664 cntl |= MCURSOR_GAMMA_ENABLE;
7665 switch (width) {
7666 case 64: 7948 case 64:
7667 cntl |= CURSOR_MODE_64_ARGB_AX; 7949 cntl |= CURSOR_MODE_64_ARGB_AX;
7668 break; 7950 break;
@@ -7675,23 +7957,20 @@ static void ivb_update_cursor(struct drm_crtc *crtc, u32 base)
7675 default: 7957 default:
7676 WARN_ON(1); 7958 WARN_ON(1);
7677 return; 7959 return;
7678 }
7679 } else {
7680 cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
7681 cntl |= CURSOR_MODE_DISABLE;
7682 }
7683 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
7684 cntl |= CURSOR_PIPE_CSC_ENABLE;
7685 cntl &= ~CURSOR_TRICKLE_FEED_DISABLE;
7686 } 7960 }
7687 I915_WRITE(CURCNTR_IVB(pipe), cntl); 7961 }
7962 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
7963 cntl |= CURSOR_PIPE_CSC_ENABLE;
7688 7964
7689 intel_crtc->cursor_visible = visible; 7965 if (intel_crtc->cursor_cntl != cntl) {
7966 I915_WRITE(CURCNTR(pipe), cntl);
7967 POSTING_READ(CURCNTR(pipe));
7968 intel_crtc->cursor_cntl = cntl;
7690 } 7969 }
7970
7691 /* and commit changes on next vblank */ 7971 /* and commit changes on next vblank */
7692 POSTING_READ(CURCNTR_IVB(pipe)); 7972 I915_WRITE(CURBASE(pipe), base);
7693 I915_WRITE(CURBASE_IVB(pipe), base); 7973 POSTING_READ(CURBASE(pipe));
7694 POSTING_READ(CURBASE_IVB(pipe));
7695} 7974}
7696 7975
7697/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */ 7976/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
@@ -7705,7 +7984,6 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
7705 int x = intel_crtc->cursor_x; 7984 int x = intel_crtc->cursor_x;
7706 int y = intel_crtc->cursor_y; 7985 int y = intel_crtc->cursor_y;
7707 u32 base = 0, pos = 0; 7986 u32 base = 0, pos = 0;
7708 bool visible;
7709 7987
7710 if (on) 7988 if (on)
7711 base = intel_crtc->cursor_addr; 7989 base = intel_crtc->cursor_addr;
@@ -7734,20 +8012,18 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
7734 } 8012 }
7735 pos |= y << CURSOR_Y_SHIFT; 8013 pos |= y << CURSOR_Y_SHIFT;
7736 8014
7737 visible = base != 0; 8015 if (base == 0 && intel_crtc->cursor_base == 0)
7738 if (!visible && !intel_crtc->cursor_visible)
7739 return; 8016 return;
7740 8017
7741 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_BROADWELL(dev)) { 8018 I915_WRITE(CURPOS(pipe), pos);
7742 I915_WRITE(CURPOS_IVB(pipe), pos); 8019
8020 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_BROADWELL(dev))
7743 ivb_update_cursor(crtc, base); 8021 ivb_update_cursor(crtc, base);
7744 } else { 8022 else if (IS_845G(dev) || IS_I865G(dev))
7745 I915_WRITE(CURPOS(pipe), pos); 8023 i845_update_cursor(crtc, base);
7746 if (IS_845G(dev) || IS_I865G(dev)) 8024 else
7747 i845_update_cursor(crtc, base); 8025 i9xx_update_cursor(crtc, base);
7748 else 8026 intel_crtc->cursor_base = base;
7749 i9xx_update_cursor(crtc, base);
7750 }
7751} 8027}
7752 8028
7753static int intel_crtc_cursor_set(struct drm_crtc *crtc, 8029static int intel_crtc_cursor_set(struct drm_crtc *crtc,
@@ -8015,7 +8291,8 @@ mode_fits_in_fbdev(struct drm_device *dev,
8015 8291
8016bool intel_get_load_detect_pipe(struct drm_connector *connector, 8292bool intel_get_load_detect_pipe(struct drm_connector *connector,
8017 struct drm_display_mode *mode, 8293 struct drm_display_mode *mode,
8018 struct intel_load_detect_pipe *old) 8294 struct intel_load_detect_pipe *old,
8295 struct drm_modeset_acquire_ctx *ctx)
8019{ 8296{
8020 struct intel_crtc *intel_crtc; 8297 struct intel_crtc *intel_crtc;
8021 struct intel_encoder *intel_encoder = 8298 struct intel_encoder *intel_encoder =
@@ -8025,11 +8302,19 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
8025 struct drm_crtc *crtc = NULL; 8302 struct drm_crtc *crtc = NULL;
8026 struct drm_device *dev = encoder->dev; 8303 struct drm_device *dev = encoder->dev;
8027 struct drm_framebuffer *fb; 8304 struct drm_framebuffer *fb;
8028 int i = -1; 8305 struct drm_mode_config *config = &dev->mode_config;
8306 int ret, i = -1;
8029 8307
8030 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", 8308 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
8031 connector->base.id, drm_get_connector_name(connector), 8309 connector->base.id, connector->name,
8032 encoder->base.id, drm_get_encoder_name(encoder)); 8310 encoder->base.id, encoder->name);
8311
8312 drm_modeset_acquire_init(ctx, 0);
8313
8314retry:
8315 ret = drm_modeset_lock(&config->connection_mutex, ctx);
8316 if (ret)
8317 goto fail_unlock;
8033 8318
8034 /* 8319 /*
8035 * Algorithm gets a little messy: 8320 * Algorithm gets a little messy:
@@ -8045,7 +8330,9 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
8045 if (encoder->crtc) { 8330 if (encoder->crtc) {
8046 crtc = encoder->crtc; 8331 crtc = encoder->crtc;
8047 8332
8048 mutex_lock(&crtc->mutex); 8333 ret = drm_modeset_lock(&crtc->mutex, ctx);
8334 if (ret)
8335 goto fail_unlock;
8049 8336
8050 old->dpms_mode = connector->dpms; 8337 old->dpms_mode = connector->dpms;
8051 old->load_detect_temp = false; 8338 old->load_detect_temp = false;
@@ -8058,7 +8345,7 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
8058 } 8345 }
8059 8346
8060 /* Find an unused one (if possible) */ 8347 /* Find an unused one (if possible) */
8061 list_for_each_entry(possible_crtc, &dev->mode_config.crtc_list, head) { 8348 for_each_crtc(dev, possible_crtc) {
8062 i++; 8349 i++;
8063 if (!(encoder->possible_crtcs & (1 << i))) 8350 if (!(encoder->possible_crtcs & (1 << i)))
8064 continue; 8351 continue;
@@ -8073,10 +8360,12 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
8073 */ 8360 */
8074 if (!crtc) { 8361 if (!crtc) {
8075 DRM_DEBUG_KMS("no pipe available for load-detect\n"); 8362 DRM_DEBUG_KMS("no pipe available for load-detect\n");
8076 return false; 8363 goto fail_unlock;
8077 } 8364 }
8078 8365
8079 mutex_lock(&crtc->mutex); 8366 ret = drm_modeset_lock(&crtc->mutex, ctx);
8367 if (ret)
8368 goto fail_unlock;
8080 intel_encoder->new_crtc = to_intel_crtc(crtc); 8369 intel_encoder->new_crtc = to_intel_crtc(crtc);
8081 to_intel_connector(connector)->new_encoder = intel_encoder; 8370 to_intel_connector(connector)->new_encoder = intel_encoder;
8082 8371
@@ -8126,12 +8415,21 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
8126 intel_crtc->new_config = &intel_crtc->config; 8415 intel_crtc->new_config = &intel_crtc->config;
8127 else 8416 else
8128 intel_crtc->new_config = NULL; 8417 intel_crtc->new_config = NULL;
8129 mutex_unlock(&crtc->mutex); 8418fail_unlock:
8419 if (ret == -EDEADLK) {
8420 drm_modeset_backoff(ctx);
8421 goto retry;
8422 }
8423
8424 drm_modeset_drop_locks(ctx);
8425 drm_modeset_acquire_fini(ctx);
8426
8130 return false; 8427 return false;
8131} 8428}
8132 8429
8133void intel_release_load_detect_pipe(struct drm_connector *connector, 8430void intel_release_load_detect_pipe(struct drm_connector *connector,
8134 struct intel_load_detect_pipe *old) 8431 struct intel_load_detect_pipe *old,
8432 struct drm_modeset_acquire_ctx *ctx)
8135{ 8433{
8136 struct intel_encoder *intel_encoder = 8434 struct intel_encoder *intel_encoder =
8137 intel_attached_encoder(connector); 8435 intel_attached_encoder(connector);
@@ -8140,8 +8438,8 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
8140 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 8438 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8141 8439
8142 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", 8440 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
8143 connector->base.id, drm_get_connector_name(connector), 8441 connector->base.id, connector->name,
8144 encoder->base.id, drm_get_encoder_name(encoder)); 8442 encoder->base.id, encoder->name);
8145 8443
8146 if (old->load_detect_temp) { 8444 if (old->load_detect_temp) {
8147 to_intel_connector(connector)->new_encoder = NULL; 8445 to_intel_connector(connector)->new_encoder = NULL;
@@ -8155,7 +8453,7 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
8155 drm_framebuffer_unreference(old->release_fb); 8453 drm_framebuffer_unreference(old->release_fb);
8156 } 8454 }
8157 8455
8158 mutex_unlock(&crtc->mutex); 8456 goto unlock;
8159 return; 8457 return;
8160 } 8458 }
8161 8459
@@ -8163,7 +8461,9 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
8163 if (old->dpms_mode != DRM_MODE_DPMS_ON) 8461 if (old->dpms_mode != DRM_MODE_DPMS_ON)
8164 connector->funcs->dpms(connector, old->dpms_mode); 8462 connector->funcs->dpms(connector, old->dpms_mode);
8165 8463
8166 mutex_unlock(&crtc->mutex); 8464unlock:
8465 drm_modeset_drop_locks(ctx);
8466 drm_modeset_acquire_fini(ctx);
8167} 8467}
8168 8468
8169static int i9xx_pll_refclk(struct drm_device *dev, 8469static int i9xx_pll_refclk(struct drm_device *dev,
@@ -8449,7 +8749,7 @@ void intel_mark_idle(struct drm_device *dev)
8449 if (!i915.powersave) 8749 if (!i915.powersave)
8450 goto out; 8750 goto out;
8451 8751
8452 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 8752 for_each_crtc(dev, crtc) {
8453 if (!crtc->primary->fb) 8753 if (!crtc->primary->fb)
8454 continue; 8754 continue;
8455 8755
@@ -8464,7 +8764,7 @@ out:
8464} 8764}
8465 8765
8466void intel_mark_fb_busy(struct drm_i915_gem_object *obj, 8766void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
8467 struct intel_ring_buffer *ring) 8767 struct intel_engine_cs *ring)
8468{ 8768{
8469 struct drm_device *dev = obj->base.dev; 8769 struct drm_device *dev = obj->base.dev;
8470 struct drm_crtc *crtc; 8770 struct drm_crtc *crtc;
@@ -8472,7 +8772,7 @@ void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
8472 if (!i915.powersave) 8772 if (!i915.powersave)
8473 return; 8773 return;
8474 8774
8475 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 8775 for_each_crtc(dev, crtc) {
8476 if (!crtc->primary->fb) 8776 if (!crtc->primary->fb)
8477 continue; 8777 continue;
8478 8778
@@ -8560,7 +8860,7 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
8560 if (work->event) 8860 if (work->event)
8561 drm_send_vblank_event(dev, intel_crtc->pipe, work->event); 8861 drm_send_vblank_event(dev, intel_crtc->pipe, work->event);
8562 8862
8563 drm_vblank_put(dev, intel_crtc->pipe); 8863 drm_crtc_vblank_put(crtc);
8564 8864
8565 spin_unlock_irqrestore(&dev->event_lock, flags); 8865 spin_unlock_irqrestore(&dev->event_lock, flags);
8566 8866
@@ -8587,6 +8887,48 @@ void intel_finish_page_flip_plane(struct drm_device *dev, int plane)
8587 do_intel_finish_page_flip(dev, crtc); 8887 do_intel_finish_page_flip(dev, crtc);
8588} 8888}
8589 8889
8890/* Is 'a' after or equal to 'b'? */
8891static bool g4x_flip_count_after_eq(u32 a, u32 b)
8892{
8893 return !((a - b) & 0x80000000);
8894}
8895
8896static bool page_flip_finished(struct intel_crtc *crtc)
8897{
8898 struct drm_device *dev = crtc->base.dev;
8899 struct drm_i915_private *dev_priv = dev->dev_private;
8900
8901 /*
8902 * The relevant registers doen't exist on pre-ctg.
8903 * As the flip done interrupt doesn't trigger for mmio
8904 * flips on gmch platforms, a flip count check isn't
8905 * really needed there. But since ctg has the registers,
8906 * include it in the check anyway.
8907 */
8908 if (INTEL_INFO(dev)->gen < 5 && !IS_G4X(dev))
8909 return true;
8910
8911 /*
8912 * A DSPSURFLIVE check isn't enough in case the mmio and CS flips
8913 * used the same base address. In that case the mmio flip might
8914 * have completed, but the CS hasn't even executed the flip yet.
8915 *
8916 * A flip count check isn't enough as the CS might have updated
8917 * the base address just after start of vblank, but before we
8918 * managed to process the interrupt. This means we'd complete the
8919 * CS flip too soon.
8920 *
8921 * Combining both checks should get us a good enough result. It may
8922 * still happen that the CS flip has been executed, but has not
8923 * yet actually completed. But in case the base address is the same
8924 * anyway, we don't really care.
8925 */
8926 return (I915_READ(DSPSURFLIVE(crtc->plane)) & ~0xfff) ==
8927 crtc->unpin_work->gtt_offset &&
8928 g4x_flip_count_after_eq(I915_READ(PIPE_FLIPCOUNT_GM45(crtc->pipe)),
8929 crtc->unpin_work->flip_count);
8930}
8931
8590void intel_prepare_page_flip(struct drm_device *dev, int plane) 8932void intel_prepare_page_flip(struct drm_device *dev, int plane)
8591{ 8933{
8592 struct drm_i915_private *dev_priv = dev->dev_private; 8934 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -8599,12 +8941,12 @@ void intel_prepare_page_flip(struct drm_device *dev, int plane)
8599 * is also accompanied by a spurious intel_prepare_page_flip(). 8941 * is also accompanied by a spurious intel_prepare_page_flip().
8600 */ 8942 */
8601 spin_lock_irqsave(&dev->event_lock, flags); 8943 spin_lock_irqsave(&dev->event_lock, flags);
8602 if (intel_crtc->unpin_work) 8944 if (intel_crtc->unpin_work && page_flip_finished(intel_crtc))
8603 atomic_inc_not_zero(&intel_crtc->unpin_work->pending); 8945 atomic_inc_not_zero(&intel_crtc->unpin_work->pending);
8604 spin_unlock_irqrestore(&dev->event_lock, flags); 8946 spin_unlock_irqrestore(&dev->event_lock, flags);
8605} 8947}
8606 8948
8607inline static void intel_mark_page_flip_active(struct intel_crtc *intel_crtc) 8949static inline void intel_mark_page_flip_active(struct intel_crtc *intel_crtc)
8608{ 8950{
8609 /* Ensure that the work item is consistent when activating it ... */ 8951 /* Ensure that the work item is consistent when activating it ... */
8610 smp_wmb(); 8952 smp_wmb();
@@ -8617,21 +8959,16 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
8617 struct drm_crtc *crtc, 8959 struct drm_crtc *crtc,
8618 struct drm_framebuffer *fb, 8960 struct drm_framebuffer *fb,
8619 struct drm_i915_gem_object *obj, 8961 struct drm_i915_gem_object *obj,
8962 struct intel_engine_cs *ring,
8620 uint32_t flags) 8963 uint32_t flags)
8621{ 8964{
8622 struct drm_i915_private *dev_priv = dev->dev_private;
8623 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 8965 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8624 u32 flip_mask; 8966 u32 flip_mask;
8625 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
8626 int ret; 8967 int ret;
8627 8968
8628 ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
8629 if (ret)
8630 goto err;
8631
8632 ret = intel_ring_begin(ring, 6); 8969 ret = intel_ring_begin(ring, 6);
8633 if (ret) 8970 if (ret)
8634 goto err_unpin; 8971 return ret;
8635 8972
8636 /* Can't queue multiple flips, so wait for the previous 8973 /* Can't queue multiple flips, so wait for the previous
8637 * one to finish before executing the next. 8974 * one to finish before executing the next.
@@ -8645,38 +8982,28 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
8645 intel_ring_emit(ring, MI_DISPLAY_FLIP | 8982 intel_ring_emit(ring, MI_DISPLAY_FLIP |
8646 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 8983 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
8647 intel_ring_emit(ring, fb->pitches[0]); 8984 intel_ring_emit(ring, fb->pitches[0]);
8648 intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset); 8985 intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
8649 intel_ring_emit(ring, 0); /* aux display base address, unused */ 8986 intel_ring_emit(ring, 0); /* aux display base address, unused */
8650 8987
8651 intel_mark_page_flip_active(intel_crtc); 8988 intel_mark_page_flip_active(intel_crtc);
8652 __intel_ring_advance(ring); 8989 __intel_ring_advance(ring);
8653 return 0; 8990 return 0;
8654
8655err_unpin:
8656 intel_unpin_fb_obj(obj);
8657err:
8658 return ret;
8659} 8991}
8660 8992
8661static int intel_gen3_queue_flip(struct drm_device *dev, 8993static int intel_gen3_queue_flip(struct drm_device *dev,
8662 struct drm_crtc *crtc, 8994 struct drm_crtc *crtc,
8663 struct drm_framebuffer *fb, 8995 struct drm_framebuffer *fb,
8664 struct drm_i915_gem_object *obj, 8996 struct drm_i915_gem_object *obj,
8997 struct intel_engine_cs *ring,
8665 uint32_t flags) 8998 uint32_t flags)
8666{ 8999{
8667 struct drm_i915_private *dev_priv = dev->dev_private;
8668 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 9000 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8669 u32 flip_mask; 9001 u32 flip_mask;
8670 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
8671 int ret; 9002 int ret;
8672 9003
8673 ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
8674 if (ret)
8675 goto err;
8676
8677 ret = intel_ring_begin(ring, 6); 9004 ret = intel_ring_begin(ring, 6);
8678 if (ret) 9005 if (ret)
8679 goto err_unpin; 9006 return ret;
8680 9007
8681 if (intel_crtc->plane) 9008 if (intel_crtc->plane)
8682 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; 9009 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
@@ -8687,38 +9014,29 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
8687 intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | 9014 intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
8688 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 9015 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
8689 intel_ring_emit(ring, fb->pitches[0]); 9016 intel_ring_emit(ring, fb->pitches[0]);
8690 intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset); 9017 intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
8691 intel_ring_emit(ring, MI_NOOP); 9018 intel_ring_emit(ring, MI_NOOP);
8692 9019
8693 intel_mark_page_flip_active(intel_crtc); 9020 intel_mark_page_flip_active(intel_crtc);
8694 __intel_ring_advance(ring); 9021 __intel_ring_advance(ring);
8695 return 0; 9022 return 0;
8696
8697err_unpin:
8698 intel_unpin_fb_obj(obj);
8699err:
8700 return ret;
8701} 9023}
8702 9024
8703static int intel_gen4_queue_flip(struct drm_device *dev, 9025static int intel_gen4_queue_flip(struct drm_device *dev,
8704 struct drm_crtc *crtc, 9026 struct drm_crtc *crtc,
8705 struct drm_framebuffer *fb, 9027 struct drm_framebuffer *fb,
8706 struct drm_i915_gem_object *obj, 9028 struct drm_i915_gem_object *obj,
9029 struct intel_engine_cs *ring,
8707 uint32_t flags) 9030 uint32_t flags)
8708{ 9031{
8709 struct drm_i915_private *dev_priv = dev->dev_private; 9032 struct drm_i915_private *dev_priv = dev->dev_private;
8710 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 9033 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8711 uint32_t pf, pipesrc; 9034 uint32_t pf, pipesrc;
8712 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
8713 int ret; 9035 int ret;
8714 9036
8715 ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
8716 if (ret)
8717 goto err;
8718
8719 ret = intel_ring_begin(ring, 4); 9037 ret = intel_ring_begin(ring, 4);
8720 if (ret) 9038 if (ret)
8721 goto err_unpin; 9039 return ret;
8722 9040
8723 /* i965+ uses the linear or tiled offsets from the 9041 /* i965+ uses the linear or tiled offsets from the
8724 * Display Registers (which do not change across a page-flip) 9042 * Display Registers (which do not change across a page-flip)
@@ -8727,8 +9045,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
8727 intel_ring_emit(ring, MI_DISPLAY_FLIP | 9045 intel_ring_emit(ring, MI_DISPLAY_FLIP |
8728 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 9046 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
8729 intel_ring_emit(ring, fb->pitches[0]); 9047 intel_ring_emit(ring, fb->pitches[0]);
8730 intel_ring_emit(ring, 9048 intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset |
8731 (i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset) |
8732 obj->tiling_mode); 9049 obj->tiling_mode);
8733 9050
8734 /* XXX Enabling the panel-fitter across page-flip is so far 9051 /* XXX Enabling the panel-fitter across page-flip is so far
@@ -8742,37 +9059,28 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
8742 intel_mark_page_flip_active(intel_crtc); 9059 intel_mark_page_flip_active(intel_crtc);
8743 __intel_ring_advance(ring); 9060 __intel_ring_advance(ring);
8744 return 0; 9061 return 0;
8745
8746err_unpin:
8747 intel_unpin_fb_obj(obj);
8748err:
8749 return ret;
8750} 9062}
8751 9063
8752static int intel_gen6_queue_flip(struct drm_device *dev, 9064static int intel_gen6_queue_flip(struct drm_device *dev,
8753 struct drm_crtc *crtc, 9065 struct drm_crtc *crtc,
8754 struct drm_framebuffer *fb, 9066 struct drm_framebuffer *fb,
8755 struct drm_i915_gem_object *obj, 9067 struct drm_i915_gem_object *obj,
9068 struct intel_engine_cs *ring,
8756 uint32_t flags) 9069 uint32_t flags)
8757{ 9070{
8758 struct drm_i915_private *dev_priv = dev->dev_private; 9071 struct drm_i915_private *dev_priv = dev->dev_private;
8759 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 9072 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8760 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
8761 uint32_t pf, pipesrc; 9073 uint32_t pf, pipesrc;
8762 int ret; 9074 int ret;
8763 9075
8764 ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
8765 if (ret)
8766 goto err;
8767
8768 ret = intel_ring_begin(ring, 4); 9076 ret = intel_ring_begin(ring, 4);
8769 if (ret) 9077 if (ret)
8770 goto err_unpin; 9078 return ret;
8771 9079
8772 intel_ring_emit(ring, MI_DISPLAY_FLIP | 9080 intel_ring_emit(ring, MI_DISPLAY_FLIP |
8773 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 9081 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
8774 intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode); 9082 intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode);
8775 intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset); 9083 intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
8776 9084
8777 /* Contrary to the suggestions in the documentation, 9085 /* Contrary to the suggestions in the documentation,
8778 * "Enable Panel Fitter" does not seem to be required when page 9086 * "Enable Panel Fitter" does not seem to be required when page
@@ -8787,34 +9095,20 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
8787 intel_mark_page_flip_active(intel_crtc); 9095 intel_mark_page_flip_active(intel_crtc);
8788 __intel_ring_advance(ring); 9096 __intel_ring_advance(ring);
8789 return 0; 9097 return 0;
8790
8791err_unpin:
8792 intel_unpin_fb_obj(obj);
8793err:
8794 return ret;
8795} 9098}
8796 9099
8797static int intel_gen7_queue_flip(struct drm_device *dev, 9100static int intel_gen7_queue_flip(struct drm_device *dev,
8798 struct drm_crtc *crtc, 9101 struct drm_crtc *crtc,
8799 struct drm_framebuffer *fb, 9102 struct drm_framebuffer *fb,
8800 struct drm_i915_gem_object *obj, 9103 struct drm_i915_gem_object *obj,
9104 struct intel_engine_cs *ring,
8801 uint32_t flags) 9105 uint32_t flags)
8802{ 9106{
8803 struct drm_i915_private *dev_priv = dev->dev_private;
8804 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 9107 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8805 struct intel_ring_buffer *ring;
8806 uint32_t plane_bit = 0; 9108 uint32_t plane_bit = 0;
8807 int len, ret; 9109 int len, ret;
8808 9110
8809 ring = obj->ring; 9111 switch (intel_crtc->plane) {
8810 if (IS_VALLEYVIEW(dev) || ring == NULL || ring->id != RCS)
8811 ring = &dev_priv->ring[BCS];
8812
8813 ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
8814 if (ret)
8815 goto err;
8816
8817 switch(intel_crtc->plane) {
8818 case PLANE_A: 9112 case PLANE_A:
8819 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A; 9113 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A;
8820 break; 9114 break;
@@ -8826,13 +9120,20 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
8826 break; 9120 break;
8827 default: 9121 default:
8828 WARN_ONCE(1, "unknown plane in flip command\n"); 9122 WARN_ONCE(1, "unknown plane in flip command\n");
8829 ret = -ENODEV; 9123 return -ENODEV;
8830 goto err_unpin;
8831 } 9124 }
8832 9125
8833 len = 4; 9126 len = 4;
8834 if (ring->id == RCS) 9127 if (ring->id == RCS) {
8835 len += 6; 9128 len += 6;
9129 /*
9130 * On Gen 8, SRM is now taking an extra dword to accommodate
9131 * 48bits addresses, and we need a NOOP for the batch size to
9132 * stay even.
9133 */
9134 if (IS_GEN8(dev))
9135 len += 2;
9136 }
8836 9137
8837 /* 9138 /*
8838 * BSpec MI_DISPLAY_FLIP for IVB: 9139 * BSpec MI_DISPLAY_FLIP for IVB:
@@ -8846,11 +9147,11 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
8846 */ 9147 */
8847 ret = intel_ring_cacheline_align(ring); 9148 ret = intel_ring_cacheline_align(ring);
8848 if (ret) 9149 if (ret)
8849 goto err_unpin; 9150 return ret;
8850 9151
8851 ret = intel_ring_begin(ring, len); 9152 ret = intel_ring_begin(ring, len);
8852 if (ret) 9153 if (ret)
8853 goto err_unpin; 9154 return ret;
8854 9155
8855 /* Unmask the flip-done completion message. Note that the bspec says that 9156 /* Unmask the flip-done completion message. Note that the bspec says that
8856 * we should do this for both the BCS and RCS, and that we must not unmask 9157 * we should do this for both the BCS and RCS, and that we must not unmask
@@ -8867,31 +9168,35 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
8867 intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE | 9168 intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
8868 DERRMR_PIPEB_PRI_FLIP_DONE | 9169 DERRMR_PIPEB_PRI_FLIP_DONE |
8869 DERRMR_PIPEC_PRI_FLIP_DONE)); 9170 DERRMR_PIPEC_PRI_FLIP_DONE));
8870 intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1) | 9171 if (IS_GEN8(dev))
8871 MI_SRM_LRM_GLOBAL_GTT); 9172 intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8(1) |
9173 MI_SRM_LRM_GLOBAL_GTT);
9174 else
9175 intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1) |
9176 MI_SRM_LRM_GLOBAL_GTT);
8872 intel_ring_emit(ring, DERRMR); 9177 intel_ring_emit(ring, DERRMR);
8873 intel_ring_emit(ring, ring->scratch.gtt_offset + 256); 9178 intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
9179 if (IS_GEN8(dev)) {
9180 intel_ring_emit(ring, 0);
9181 intel_ring_emit(ring, MI_NOOP);
9182 }
8874 } 9183 }
8875 9184
8876 intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit); 9185 intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
8877 intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode)); 9186 intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
8878 intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset); 9187 intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
8879 intel_ring_emit(ring, (MI_NOOP)); 9188 intel_ring_emit(ring, (MI_NOOP));
8880 9189
8881 intel_mark_page_flip_active(intel_crtc); 9190 intel_mark_page_flip_active(intel_crtc);
8882 __intel_ring_advance(ring); 9191 __intel_ring_advance(ring);
8883 return 0; 9192 return 0;
8884
8885err_unpin:
8886 intel_unpin_fb_obj(obj);
8887err:
8888 return ret;
8889} 9193}
8890 9194
8891static int intel_default_queue_flip(struct drm_device *dev, 9195static int intel_default_queue_flip(struct drm_device *dev,
8892 struct drm_crtc *crtc, 9196 struct drm_crtc *crtc,
8893 struct drm_framebuffer *fb, 9197 struct drm_framebuffer *fb,
8894 struct drm_i915_gem_object *obj, 9198 struct drm_i915_gem_object *obj,
9199 struct intel_engine_cs *ring,
8895 uint32_t flags) 9200 uint32_t flags)
8896{ 9201{
8897 return -ENODEV; 9202 return -ENODEV;
@@ -8908,6 +9213,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
8908 struct drm_i915_gem_object *obj = to_intel_framebuffer(fb)->obj; 9213 struct drm_i915_gem_object *obj = to_intel_framebuffer(fb)->obj;
8909 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 9214 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8910 struct intel_unpin_work *work; 9215 struct intel_unpin_work *work;
9216 struct intel_engine_cs *ring;
8911 unsigned long flags; 9217 unsigned long flags;
8912 int ret; 9218 int ret;
8913 9219
@@ -8936,7 +9242,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
8936 work->old_fb_obj = to_intel_framebuffer(old_fb)->obj; 9242 work->old_fb_obj = to_intel_framebuffer(old_fb)->obj;
8937 INIT_WORK(&work->work, intel_unpin_work_fn); 9243 INIT_WORK(&work->work, intel_unpin_work_fn);
8938 9244
8939 ret = drm_vblank_get(dev, intel_crtc->pipe); 9245 ret = drm_crtc_vblank_get(crtc);
8940 if (ret) 9246 if (ret)
8941 goto free_work; 9247 goto free_work;
8942 9248
@@ -8945,7 +9251,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
8945 if (intel_crtc->unpin_work) { 9251 if (intel_crtc->unpin_work) {
8946 spin_unlock_irqrestore(&dev->event_lock, flags); 9252 spin_unlock_irqrestore(&dev->event_lock, flags);
8947 kfree(work); 9253 kfree(work);
8948 drm_vblank_put(dev, intel_crtc->pipe); 9254 drm_crtc_vblank_put(crtc);
8949 9255
8950 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); 9256 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
8951 return -EBUSY; 9257 return -EBUSY;
@@ -8973,10 +9279,30 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
8973 atomic_inc(&intel_crtc->unpin_work_count); 9279 atomic_inc(&intel_crtc->unpin_work_count);
8974 intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); 9280 intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
8975 9281
8976 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, page_flip_flags); 9282 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
9283 work->flip_count = I915_READ(PIPE_FLIPCOUNT_GM45(intel_crtc->pipe)) + 1;
9284
9285 if (IS_VALLEYVIEW(dev)) {
9286 ring = &dev_priv->ring[BCS];
9287 } else if (INTEL_INFO(dev)->gen >= 7) {
9288 ring = obj->ring;
9289 if (ring == NULL || ring->id != RCS)
9290 ring = &dev_priv->ring[BCS];
9291 } else {
9292 ring = &dev_priv->ring[RCS];
9293 }
9294
9295 ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
8977 if (ret) 9296 if (ret)
8978 goto cleanup_pending; 9297 goto cleanup_pending;
8979 9298
9299 work->gtt_offset =
9300 i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset;
9301
9302 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, ring, page_flip_flags);
9303 if (ret)
9304 goto cleanup_unpin;
9305
8980 intel_disable_fbc(dev); 9306 intel_disable_fbc(dev);
8981 intel_mark_fb_busy(obj, NULL); 9307 intel_mark_fb_busy(obj, NULL);
8982 mutex_unlock(&dev->struct_mutex); 9308 mutex_unlock(&dev->struct_mutex);
@@ -8985,6 +9311,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
8985 9311
8986 return 0; 9312 return 0;
8987 9313
9314cleanup_unpin:
9315 intel_unpin_fb_obj(obj);
8988cleanup_pending: 9316cleanup_pending:
8989 atomic_dec(&intel_crtc->unpin_work_count); 9317 atomic_dec(&intel_crtc->unpin_work_count);
8990 crtc->primary->fb = old_fb; 9318 crtc->primary->fb = old_fb;
@@ -8997,7 +9325,7 @@ cleanup:
8997 intel_crtc->unpin_work = NULL; 9325 intel_crtc->unpin_work = NULL;
8998 spin_unlock_irqrestore(&dev->event_lock, flags); 9326 spin_unlock_irqrestore(&dev->event_lock, flags);
8999 9327
9000 drm_vblank_put(dev, intel_crtc->pipe); 9328 drm_crtc_vblank_put(crtc);
9001free_work: 9329free_work:
9002 kfree(work); 9330 kfree(work);
9003 9331
@@ -9040,8 +9368,7 @@ static void intel_modeset_update_staged_output_state(struct drm_device *dev)
9040 to_intel_crtc(encoder->base.crtc); 9368 to_intel_crtc(encoder->base.crtc);
9041 } 9369 }
9042 9370
9043 list_for_each_entry(crtc, &dev->mode_config.crtc_list, 9371 for_each_intel_crtc(dev, crtc) {
9044 base.head) {
9045 crtc->new_enabled = crtc->base.enabled; 9372 crtc->new_enabled = crtc->base.enabled;
9046 9373
9047 if (crtc->new_enabled) 9374 if (crtc->new_enabled)
@@ -9072,21 +9399,20 @@ static void intel_modeset_commit_output_state(struct drm_device *dev)
9072 encoder->base.crtc = &encoder->new_crtc->base; 9399 encoder->base.crtc = &encoder->new_crtc->base;
9073 } 9400 }
9074 9401
9075 list_for_each_entry(crtc, &dev->mode_config.crtc_list, 9402 for_each_intel_crtc(dev, crtc) {
9076 base.head) {
9077 crtc->base.enabled = crtc->new_enabled; 9403 crtc->base.enabled = crtc->new_enabled;
9078 } 9404 }
9079} 9405}
9080 9406
9081static void 9407static void
9082connected_sink_compute_bpp(struct intel_connector * connector, 9408connected_sink_compute_bpp(struct intel_connector *connector,
9083 struct intel_crtc_config *pipe_config) 9409 struct intel_crtc_config *pipe_config)
9084{ 9410{
9085 int bpp = pipe_config->pipe_bpp; 9411 int bpp = pipe_config->pipe_bpp;
9086 9412
9087 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] checking for sink bpp constrains\n", 9413 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] checking for sink bpp constrains\n",
9088 connector->base.base.id, 9414 connector->base.base.id,
9089 drm_get_connector_name(&connector->base)); 9415 connector->base.name);
9090 9416
9091 /* Don't use an invalid EDID bpc value */ 9417 /* Don't use an invalid EDID bpc value */
9092 if (connector->base.display_info.bpc && 9418 if (connector->base.display_info.bpc &&
@@ -9427,8 +9753,7 @@ intel_modeset_affected_pipes(struct drm_crtc *crtc, unsigned *modeset_pipes,
9427 } 9753 }
9428 9754
9429 /* Check for pipes that will be enabled/disabled ... */ 9755 /* Check for pipes that will be enabled/disabled ... */
9430 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, 9756 for_each_intel_crtc(dev, intel_crtc) {
9431 base.head) {
9432 if (intel_crtc->base.enabled == intel_crtc->new_enabled) 9757 if (intel_crtc->base.enabled == intel_crtc->new_enabled)
9433 continue; 9758 continue;
9434 9759
@@ -9501,8 +9826,7 @@ intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes)
9501 intel_modeset_commit_output_state(dev); 9826 intel_modeset_commit_output_state(dev);
9502 9827
9503 /* Double check state. */ 9828 /* Double check state. */
9504 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, 9829 for_each_intel_crtc(dev, intel_crtc) {
9505 base.head) {
9506 WARN_ON(intel_crtc->base.enabled != intel_crtc_in_use(&intel_crtc->base)); 9830 WARN_ON(intel_crtc->base.enabled != intel_crtc_in_use(&intel_crtc->base));
9507 WARN_ON(intel_crtc->new_config && 9831 WARN_ON(intel_crtc->new_config &&
9508 intel_crtc->new_config != &intel_crtc->config); 9832 intel_crtc->new_config != &intel_crtc->config);
@@ -9631,6 +9955,12 @@ intel_pipe_config_compare(struct drm_device *dev,
9631 PIPE_CONF_CHECK_I(adjusted_mode.crtc_vsync_end); 9955 PIPE_CONF_CHECK_I(adjusted_mode.crtc_vsync_end);
9632 9956
9633 PIPE_CONF_CHECK_I(pixel_multiplier); 9957 PIPE_CONF_CHECK_I(pixel_multiplier);
9958 PIPE_CONF_CHECK_I(has_hdmi_sink);
9959 if ((INTEL_INFO(dev)->gen < 8 && !IS_HASWELL(dev)) ||
9960 IS_VALLEYVIEW(dev))
9961 PIPE_CONF_CHECK_I(limited_color_range);
9962
9963 PIPE_CONF_CHECK_I(has_audio);
9634 9964
9635 PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags, 9965 PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
9636 DRM_MODE_FLAG_INTERLACE); 9966 DRM_MODE_FLAG_INTERLACE);
@@ -9728,7 +10058,7 @@ check_encoder_state(struct drm_device *dev)
9728 10058
9729 DRM_DEBUG_KMS("[ENCODER:%d:%s]\n", 10059 DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
9730 encoder->base.base.id, 10060 encoder->base.base.id,
9731 drm_get_encoder_name(&encoder->base)); 10061 encoder->base.name);
9732 10062
9733 WARN(&encoder->new_crtc->base != encoder->base.crtc, 10063 WARN(&encoder->new_crtc->base != encoder->base.crtc,
9734 "encoder's stage crtc doesn't match current crtc\n"); 10064 "encoder's stage crtc doesn't match current crtc\n");
@@ -9780,8 +10110,7 @@ check_crtc_state(struct drm_device *dev)
9780 struct intel_encoder *encoder; 10110 struct intel_encoder *encoder;
9781 struct intel_crtc_config pipe_config; 10111 struct intel_crtc_config pipe_config;
9782 10112
9783 list_for_each_entry(crtc, &dev->mode_config.crtc_list, 10113 for_each_intel_crtc(dev, crtc) {
9784 base.head) {
9785 bool enabled = false; 10114 bool enabled = false;
9786 bool active = false; 10115 bool active = false;
9787 10116
@@ -9870,8 +10199,7 @@ check_shared_dpll_state(struct drm_device *dev)
9870 "pll on state mismatch (expected %i, found %i)\n", 10199 "pll on state mismatch (expected %i, found %i)\n",
9871 pll->on, active); 10200 pll->on, active);
9872 10201
9873 list_for_each_entry(crtc, &dev->mode_config.crtc_list, 10202 for_each_intel_crtc(dev, crtc) {
9874 base.head) {
9875 if (crtc->base.enabled && intel_crtc_to_shared_dpll(crtc) == pll) 10203 if (crtc->base.enabled && intel_crtc_to_shared_dpll(crtc) == pll)
9876 enabled_crtcs++; 10204 enabled_crtcs++;
9877 if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll) 10205 if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll)
@@ -9911,6 +10239,44 @@ void ironlake_check_encoder_dotclock(const struct intel_crtc_config *pipe_config
9911 pipe_config->adjusted_mode.crtc_clock, dotclock); 10239 pipe_config->adjusted_mode.crtc_clock, dotclock);
9912} 10240}
9913 10241
10242static void update_scanline_offset(struct intel_crtc *crtc)
10243{
10244 struct drm_device *dev = crtc->base.dev;
10245
10246 /*
10247 * The scanline counter increments at the leading edge of hsync.
10248 *
10249 * On most platforms it starts counting from vtotal-1 on the
10250 * first active line. That means the scanline counter value is
10251 * always one less than what we would expect. Ie. just after
10252 * start of vblank, which also occurs at start of hsync (on the
10253 * last active line), the scanline counter will read vblank_start-1.
10254 *
10255 * On gen2 the scanline counter starts counting from 1 instead
10256 * of vtotal-1, so we have to subtract one (or rather add vtotal-1
10257 * to keep the value positive), instead of adding one.
10258 *
10259 * On HSW+ the behaviour of the scanline counter depends on the output
10260 * type. For DP ports it behaves like most other platforms, but on HDMI
10261 * there's an extra 1 line difference. So we need to add two instead of
10262 * one to the value.
10263 */
10264 if (IS_GEN2(dev)) {
10265 const struct drm_display_mode *mode = &crtc->config.adjusted_mode;
10266 int vtotal;
10267
10268 vtotal = mode->crtc_vtotal;
10269 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
10270 vtotal /= 2;
10271
10272 crtc->scanline_offset = vtotal - 1;
10273 } else if (HAS_DDI(dev) &&
10274 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI)) {
10275 crtc->scanline_offset = 2;
10276 } else
10277 crtc->scanline_offset = 1;
10278}
10279
9914static int __intel_set_mode(struct drm_crtc *crtc, 10280static int __intel_set_mode(struct drm_crtc *crtc,
9915 struct drm_display_mode *mode, 10281 struct drm_display_mode *mode,
9916 int x, int y, struct drm_framebuffer *fb) 10282 int x, int y, struct drm_framebuffer *fb)
@@ -10002,15 +10368,38 @@ static int __intel_set_mode(struct drm_crtc *crtc,
10002 * on the DPLL. 10368 * on the DPLL.
10003 */ 10369 */
10004 for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) { 10370 for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) {
10005 ret = intel_crtc_mode_set(&intel_crtc->base, 10371 struct drm_framebuffer *old_fb;
10006 x, y, fb); 10372
10373 mutex_lock(&dev->struct_mutex);
10374 ret = intel_pin_and_fence_fb_obj(dev,
10375 to_intel_framebuffer(fb)->obj,
10376 NULL);
10377 if (ret != 0) {
10378 DRM_ERROR("pin & fence failed\n");
10379 mutex_unlock(&dev->struct_mutex);
10380 goto done;
10381 }
10382 old_fb = crtc->primary->fb;
10383 if (old_fb)
10384 intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj);
10385 mutex_unlock(&dev->struct_mutex);
10386
10387 crtc->primary->fb = fb;
10388 crtc->x = x;
10389 crtc->y = y;
10390
10391 ret = dev_priv->display.crtc_mode_set(&intel_crtc->base,
10392 x, y, fb);
10007 if (ret) 10393 if (ret)
10008 goto done; 10394 goto done;
10009 } 10395 }
10010 10396
10011 /* Now enable the clocks, plane, pipe, and connectors that we set up. */ 10397 /* Now enable the clocks, plane, pipe, and connectors that we set up. */
10012 for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) 10398 for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) {
10399 update_scanline_offset(intel_crtc);
10400
10013 dev_priv->display.crtc_enable(&intel_crtc->base); 10401 dev_priv->display.crtc_enable(&intel_crtc->base);
10402 }
10014 10403
10015 /* FIXME: add subpixel order */ 10404 /* FIXME: add subpixel order */
10016done: 10405done:
@@ -10086,7 +10475,7 @@ static int intel_set_config_save_state(struct drm_device *dev,
10086 * restored, not the drivers personal bookkeeping. 10475 * restored, not the drivers personal bookkeeping.
10087 */ 10476 */
10088 count = 0; 10477 count = 0;
10089 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 10478 for_each_crtc(dev, crtc) {
10090 config->save_crtc_enabled[count++] = crtc->enabled; 10479 config->save_crtc_enabled[count++] = crtc->enabled;
10091 } 10480 }
10092 10481
@@ -10112,7 +10501,7 @@ static void intel_set_config_restore_state(struct drm_device *dev,
10112 int count; 10501 int count;
10113 10502
10114 count = 0; 10503 count = 0;
10115 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) { 10504 for_each_intel_crtc(dev, crtc) {
10116 crtc->new_enabled = config->save_crtc_enabled[count++]; 10505 crtc->new_enabled = config->save_crtc_enabled[count++];
10117 10506
10118 if (crtc->new_enabled) 10507 if (crtc->new_enabled)
@@ -10236,7 +10625,7 @@ intel_modeset_stage_output_state(struct drm_device *dev,
10236 10625
10237 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n", 10626 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n",
10238 connector->base.base.id, 10627 connector->base.base.id,
10239 drm_get_connector_name(&connector->base)); 10628 connector->base.name);
10240 } 10629 }
10241 10630
10242 10631
@@ -10271,7 +10660,7 @@ intel_modeset_stage_output_state(struct drm_device *dev,
10271 10660
10272 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n", 10661 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n",
10273 connector->base.base.id, 10662 connector->base.base.id,
10274 drm_get_connector_name(&connector->base), 10663 connector->base.name,
10275 new_crtc->base.id); 10664 new_crtc->base.id);
10276 } 10665 }
10277 10666
@@ -10302,8 +10691,7 @@ intel_modeset_stage_output_state(struct drm_device *dev,
10302 } 10691 }
10303 /* Now we've also updated encoder->new_crtc for all encoders. */ 10692 /* Now we've also updated encoder->new_crtc for all encoders. */
10304 10693
10305 list_for_each_entry(crtc, &dev->mode_config.crtc_list, 10694 for_each_intel_crtc(dev, crtc) {
10306 base.head) {
10307 crtc->new_enabled = false; 10695 crtc->new_enabled = false;
10308 10696
10309 list_for_each_entry(encoder, 10697 list_for_each_entry(encoder,
@@ -10516,7 +10904,7 @@ static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
10516 struct intel_crtc *crtc; 10904 struct intel_crtc *crtc;
10517 10905
10518 /* Make sure no transcoder isn't still depending on us. */ 10906 /* Make sure no transcoder isn't still depending on us. */
10519 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) { 10907 for_each_intel_crtc(dev, crtc) {
10520 if (intel_crtc_to_shared_dpll(crtc) == pll) 10908 if (intel_crtc_to_shared_dpll(crtc) == pll)
10521 assert_pch_transcoder_disabled(dev_priv, crtc->pipe); 10909 assert_pch_transcoder_disabled(dev_priv, crtc->pipe);
10522 } 10910 }
@@ -10573,16 +10961,6 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
10573 10961
10574 drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs); 10962 drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs);
10575 10963
10576 if (IS_GEN2(dev)) {
10577 intel_crtc->max_cursor_width = GEN2_CURSOR_WIDTH;
10578 intel_crtc->max_cursor_height = GEN2_CURSOR_HEIGHT;
10579 } else {
10580 intel_crtc->max_cursor_width = CURSOR_WIDTH;
10581 intel_crtc->max_cursor_height = CURSOR_HEIGHT;
10582 }
10583 dev->mode_config.cursor_width = intel_crtc->max_cursor_width;
10584 dev->mode_config.cursor_height = intel_crtc->max_cursor_height;
10585
10586 drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256); 10964 drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256);
10587 for (i = 0; i < 256; i++) { 10965 for (i = 0; i < 256; i++) {
10588 intel_crtc->lut_r[i] = i; 10966 intel_crtc->lut_r[i] = i;
@@ -10601,19 +10979,27 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
10601 intel_crtc->plane = !pipe; 10979 intel_crtc->plane = !pipe;
10602 } 10980 }
10603 10981
10982 intel_crtc->cursor_base = ~0;
10983 intel_crtc->cursor_cntl = ~0;
10984
10985 init_waitqueue_head(&intel_crtc->vbl_wait);
10986
10604 BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) || 10987 BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
10605 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL); 10988 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
10606 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base; 10989 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
10607 dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base; 10990 dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
10608 10991
10609 drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs); 10992 drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
10993
10994 WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
10610} 10995}
10611 10996
10612enum pipe intel_get_pipe_from_connector(struct intel_connector *connector) 10997enum pipe intel_get_pipe_from_connector(struct intel_connector *connector)
10613{ 10998{
10614 struct drm_encoder *encoder = connector->base.encoder; 10999 struct drm_encoder *encoder = connector->base.encoder;
11000 struct drm_device *dev = connector->base.dev;
10615 11001
10616 WARN_ON(!mutex_is_locked(&connector->base.dev->mode_config.mutex)); 11002 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
10617 11003
10618 if (!encoder) 11004 if (!encoder)
10619 return INVALID_PIPE; 11005 return INVALID_PIPE;
@@ -10709,7 +11095,7 @@ static void intel_setup_outputs(struct drm_device *dev)
10709 11095
10710 intel_lvds_init(dev); 11096 intel_lvds_init(dev);
10711 11097
10712 if (!IS_ULT(dev)) 11098 if (!IS_ULT(dev) && !IS_CHERRYVIEW(dev) && dev_priv->vbt.int_crt_support)
10713 intel_crt_init(dev); 11099 intel_crt_init(dev);
10714 11100
10715 if (HAS_DDI(dev)) { 11101 if (HAS_DDI(dev)) {
@@ -10773,6 +11159,15 @@ static void intel_setup_outputs(struct drm_device *dev)
10773 intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, PORT_C); 11159 intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, PORT_C);
10774 } 11160 }
10775 11161
11162 if (IS_CHERRYVIEW(dev)) {
11163 if (I915_READ(VLV_DISPLAY_BASE + CHV_HDMID) & SDVO_DETECTED) {
11164 intel_hdmi_init(dev, VLV_DISPLAY_BASE + CHV_HDMID,
11165 PORT_D);
11166 if (I915_READ(VLV_DISPLAY_BASE + DP_D) & DP_DETECTED)
11167 intel_dp_init(dev, VLV_DISPLAY_BASE + DP_D, PORT_D);
11168 }
11169 }
11170
10776 intel_dsi_init(dev); 11171 intel_dsi_init(dev);
10777 } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) { 11172 } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
10778 bool found = false; 11173 bool found = false;
@@ -11002,6 +11397,8 @@ static void intel_init_display(struct drm_device *dev)
11002 11397
11003 if (HAS_PCH_SPLIT(dev) || IS_G4X(dev)) 11398 if (HAS_PCH_SPLIT(dev) || IS_G4X(dev))
11004 dev_priv->display.find_dpll = g4x_find_best_dpll; 11399 dev_priv->display.find_dpll = g4x_find_best_dpll;
11400 else if (IS_CHERRYVIEW(dev))
11401 dev_priv->display.find_dpll = chv_find_best_dpll;
11005 else if (IS_VALLEYVIEW(dev)) 11402 else if (IS_VALLEYVIEW(dev))
11006 dev_priv->display.find_dpll = vlv_find_best_dpll; 11403 dev_priv->display.find_dpll = vlv_find_best_dpll;
11007 else if (IS_PINEVIEW(dev)) 11404 else if (IS_PINEVIEW(dev))
@@ -11083,6 +11480,8 @@ static void intel_init_display(struct drm_device *dev)
11083 } else if (IS_GEN6(dev)) { 11480 } else if (IS_GEN6(dev)) {
11084 dev_priv->display.fdi_link_train = gen6_fdi_link_train; 11481 dev_priv->display.fdi_link_train = gen6_fdi_link_train;
11085 dev_priv->display.write_eld = ironlake_write_eld; 11482 dev_priv->display.write_eld = ironlake_write_eld;
11483 dev_priv->display.modeset_global_resources =
11484 snb_modeset_global_resources;
11086 } else if (IS_IVYBRIDGE(dev)) { 11485 } else if (IS_IVYBRIDGE(dev)) {
11087 /* FIXME: detect B0+ stepping and use auto training */ 11486 /* FIXME: detect B0+ stepping and use auto training */
11088 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train; 11487 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
@@ -11211,9 +11610,6 @@ static struct intel_quirk intel_quirks[] = {
11211 /* ThinkPad T60 needs pipe A force quirk (bug #16494) */ 11610 /* ThinkPad T60 needs pipe A force quirk (bug #16494) */
11212 { 0x2782, 0x17aa, 0x201a, quirk_pipea_force }, 11611 { 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
11213 11612
11214 /* 830 needs to leave pipe A & dpll A up */
11215 { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
11216
11217 /* Lenovo U160 cannot use SSC on LVDS */ 11613 /* Lenovo U160 cannot use SSC on LVDS */
11218 { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable }, 11614 { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
11219 11615
@@ -11287,9 +11683,7 @@ void intel_modeset_init_hw(struct drm_device *dev)
11287 11683
11288 intel_reset_dpio(dev); 11684 intel_reset_dpio(dev);
11289 11685
11290 mutex_lock(&dev->struct_mutex);
11291 intel_enable_gt_powersave(dev); 11686 intel_enable_gt_powersave(dev);
11292 mutex_unlock(&dev->struct_mutex);
11293} 11687}
11294 11688
11295void intel_modeset_suspend_hw(struct drm_device *dev) 11689void intel_modeset_suspend_hw(struct drm_device *dev)
@@ -11333,6 +11727,15 @@ void intel_modeset_init(struct drm_device *dev)
11333 dev->mode_config.max_width = 8192; 11727 dev->mode_config.max_width = 8192;
11334 dev->mode_config.max_height = 8192; 11728 dev->mode_config.max_height = 8192;
11335 } 11729 }
11730
11731 if (IS_GEN2(dev)) {
11732 dev->mode_config.cursor_width = GEN2_CURSOR_WIDTH;
11733 dev->mode_config.cursor_height = GEN2_CURSOR_HEIGHT;
11734 } else {
11735 dev->mode_config.cursor_width = MAX_CURSOR_WIDTH;
11736 dev->mode_config.cursor_height = MAX_CURSOR_HEIGHT;
11737 }
11738
11336 dev->mode_config.fb_base = dev_priv->gtt.mappable_base; 11739 dev->mode_config.fb_base = dev_priv->gtt.mappable_base;
11337 11740
11338 DRM_DEBUG_KMS("%d display pipe%s available.\n", 11741 DRM_DEBUG_KMS("%d display pipe%s available.\n",
@@ -11362,12 +11765,11 @@ void intel_modeset_init(struct drm_device *dev)
11362 /* Just in case the BIOS is doing something questionable. */ 11765 /* Just in case the BIOS is doing something questionable. */
11363 intel_disable_fbc(dev); 11766 intel_disable_fbc(dev);
11364 11767
11365 mutex_lock(&dev->mode_config.mutex); 11768 drm_modeset_lock_all(dev);
11366 intel_modeset_setup_hw_state(dev, false); 11769 intel_modeset_setup_hw_state(dev, false);
11367 mutex_unlock(&dev->mode_config.mutex); 11770 drm_modeset_unlock_all(dev);
11368 11771
11369 list_for_each_entry(crtc, &dev->mode_config.crtc_list, 11772 for_each_intel_crtc(dev, crtc) {
11370 base.head) {
11371 if (!crtc->active) 11773 if (!crtc->active)
11372 continue; 11774 continue;
11373 11775
@@ -11395,6 +11797,7 @@ static void intel_enable_pipe_a(struct drm_device *dev)
11395 struct intel_connector *connector; 11797 struct intel_connector *connector;
11396 struct drm_connector *crt = NULL; 11798 struct drm_connector *crt = NULL;
11397 struct intel_load_detect_pipe load_detect_temp; 11799 struct intel_load_detect_pipe load_detect_temp;
11800 struct drm_modeset_acquire_ctx ctx;
11398 11801
11399 /* We can't just switch on the pipe A, we need to set things up with a 11802 /* We can't just switch on the pipe A, we need to set things up with a
11400 * proper mode and output configuration. As a gross hack, enable pipe A 11803 * proper mode and output configuration. As a gross hack, enable pipe A
@@ -11411,8 +11814,8 @@ static void intel_enable_pipe_a(struct drm_device *dev)
11411 if (!crt) 11814 if (!crt)
11412 return; 11815 return;
11413 11816
11414 if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp)) 11817 if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp, &ctx))
11415 intel_release_load_detect_pipe(crt, &load_detect_temp); 11818 intel_release_load_detect_pipe(crt, &load_detect_temp, &ctx);
11416 11819
11417 11820
11418} 11821}
@@ -11447,6 +11850,12 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
11447 reg = PIPECONF(crtc->config.cpu_transcoder); 11850 reg = PIPECONF(crtc->config.cpu_transcoder);
11448 I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK); 11851 I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
11449 11852
11853 /* restore vblank interrupts to correct state */
11854 if (crtc->active)
11855 drm_vblank_on(dev, crtc->pipe);
11856 else
11857 drm_vblank_off(dev, crtc->pipe);
11858
11450 /* We need to sanitize the plane -> pipe mapping first because this will 11859 /* We need to sanitize the plane -> pipe mapping first because this will
11451 * disable the crtc (and hence change the state) if it is wrong. Note 11860 * disable the crtc (and hence change the state) if it is wrong. Note
11452 * that gen4+ has a fixed plane -> pipe mapping. */ 11861 * that gen4+ has a fixed plane -> pipe mapping. */
@@ -11525,16 +11934,25 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
11525 encoder->base.crtc = NULL; 11934 encoder->base.crtc = NULL;
11526 } 11935 }
11527 } 11936 }
11528 if (crtc->active) { 11937
11938 if (crtc->active || IS_VALLEYVIEW(dev) || INTEL_INFO(dev)->gen < 5) {
11529 /* 11939 /*
11530 * We start out with underrun reporting disabled to avoid races. 11940 * We start out with underrun reporting disabled to avoid races.
11531 * For correct bookkeeping mark this on active crtcs. 11941 * For correct bookkeeping mark this on active crtcs.
11532 * 11942 *
11943 * Also on gmch platforms we dont have any hardware bits to
11944 * disable the underrun reporting. Which means we need to start
11945 * out with underrun reporting disabled also on inactive pipes,
11946 * since otherwise we'll complain about the garbage we read when
11947 * e.g. coming up after runtime pm.
11948 *
11533 * No protection against concurrent access is required - at 11949 * No protection against concurrent access is required - at
11534 * worst a fifo underrun happens which also sets this to false. 11950 * worst a fifo underrun happens which also sets this to false.
11535 */ 11951 */
11536 crtc->cpu_fifo_underrun_disabled = true; 11952 crtc->cpu_fifo_underrun_disabled = true;
11537 crtc->pch_fifo_underrun_disabled = true; 11953 crtc->pch_fifo_underrun_disabled = true;
11954
11955 update_scanline_offset(crtc);
11538 } 11956 }
11539} 11957}
11540 11958
@@ -11552,7 +11970,7 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
11552 if (encoder->connectors_active && !has_active_crtc) { 11970 if (encoder->connectors_active && !has_active_crtc) {
11553 DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n", 11971 DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
11554 encoder->base.base.id, 11972 encoder->base.base.id,
11555 drm_get_encoder_name(&encoder->base)); 11973 encoder->base.name);
11556 11974
11557 /* Connector is active, but has no active pipe. This is 11975 /* Connector is active, but has no active pipe. This is
11558 * fallout from our resume register restoring. Disable 11976 * fallout from our resume register restoring. Disable
@@ -11560,7 +11978,7 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
11560 if (encoder->base.crtc) { 11978 if (encoder->base.crtc) {
11561 DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n", 11979 DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
11562 encoder->base.base.id, 11980 encoder->base.base.id,
11563 drm_get_encoder_name(&encoder->base)); 11981 encoder->base.name);
11564 encoder->disable(encoder); 11982 encoder->disable(encoder);
11565 } 11983 }
11566 encoder->base.crtc = NULL; 11984 encoder->base.crtc = NULL;
@@ -11611,6 +12029,16 @@ void i915_redisable_vga(struct drm_device *dev)
11611 i915_redisable_vga_power_on(dev); 12029 i915_redisable_vga_power_on(dev);
11612} 12030}
11613 12031
12032static bool primary_get_hw_state(struct intel_crtc *crtc)
12033{
12034 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
12035
12036 if (!crtc->active)
12037 return false;
12038
12039 return I915_READ(DSPCNTR(crtc->plane)) & DISPLAY_PLANE_ENABLE;
12040}
12041
11614static void intel_modeset_readout_hw_state(struct drm_device *dev) 12042static void intel_modeset_readout_hw_state(struct drm_device *dev)
11615{ 12043{
11616 struct drm_i915_private *dev_priv = dev->dev_private; 12044 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -11620,8 +12048,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
11620 struct intel_connector *connector; 12048 struct intel_connector *connector;
11621 int i; 12049 int i;
11622 12050
11623 list_for_each_entry(crtc, &dev->mode_config.crtc_list, 12051 for_each_intel_crtc(dev, crtc) {
11624 base.head) {
11625 memset(&crtc->config, 0, sizeof(crtc->config)); 12052 memset(&crtc->config, 0, sizeof(crtc->config));
11626 12053
11627 crtc->config.quirks |= PIPE_CONFIG_QUIRK_INHERITED_MODE; 12054 crtc->config.quirks |= PIPE_CONFIG_QUIRK_INHERITED_MODE;
@@ -11630,7 +12057,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
11630 &crtc->config); 12057 &crtc->config);
11631 12058
11632 crtc->base.enabled = crtc->active; 12059 crtc->base.enabled = crtc->active;
11633 crtc->primary_enabled = crtc->active; 12060 crtc->primary_enabled = primary_get_hw_state(crtc);
11634 12061
11635 DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n", 12062 DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n",
11636 crtc->base.base.id, 12063 crtc->base.base.id,
@@ -11646,8 +12073,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
11646 12073
11647 pll->on = pll->get_hw_state(dev_priv, pll, &pll->hw_state); 12074 pll->on = pll->get_hw_state(dev_priv, pll, &pll->hw_state);
11648 pll->active = 0; 12075 pll->active = 0;
11649 list_for_each_entry(crtc, &dev->mode_config.crtc_list, 12076 for_each_intel_crtc(dev, crtc) {
11650 base.head) {
11651 if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll) 12077 if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll)
11652 pll->active++; 12078 pll->active++;
11653 } 12079 }
@@ -11672,7 +12098,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
11672 encoder->connectors_active = false; 12098 encoder->connectors_active = false;
11673 DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n", 12099 DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
11674 encoder->base.base.id, 12100 encoder->base.base.id,
11675 drm_get_encoder_name(&encoder->base), 12101 encoder->base.name,
11676 encoder->base.crtc ? "enabled" : "disabled", 12102 encoder->base.crtc ? "enabled" : "disabled",
11677 pipe_name(pipe)); 12103 pipe_name(pipe));
11678 } 12104 }
@@ -11689,7 +12115,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
11689 } 12115 }
11690 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n", 12116 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
11691 connector->base.base.id, 12117 connector->base.base.id,
11692 drm_get_connector_name(&connector->base), 12118 connector->base.name,
11693 connector->base.encoder ? "enabled" : "disabled"); 12119 connector->base.encoder ? "enabled" : "disabled");
11694 } 12120 }
11695} 12121}
@@ -11712,8 +12138,7 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
11712 * Note that this could go away if we move to using crtc_config 12138 * Note that this could go away if we move to using crtc_config
11713 * checking everywhere. 12139 * checking everywhere.
11714 */ 12140 */
11715 list_for_each_entry(crtc, &dev->mode_config.crtc_list, 12141 for_each_intel_crtc(dev, crtc) {
11716 base.head) {
11717 if (crtc->active && i915.fastboot) { 12142 if (crtc->active && i915.fastboot) {
11718 intel_mode_from_pipe_config(&crtc->base.mode, &crtc->config); 12143 intel_mode_from_pipe_config(&crtc->base.mode, &crtc->config);
11719 DRM_DEBUG_KMS("[CRTC:%d] found active mode: ", 12144 DRM_DEBUG_KMS("[CRTC:%d] found active mode: ",
@@ -11789,7 +12214,7 @@ void intel_modeset_gem_init(struct drm_device *dev)
11789 * for this. 12214 * for this.
11790 */ 12215 */
11791 mutex_lock(&dev->struct_mutex); 12216 mutex_lock(&dev->struct_mutex);
11792 list_for_each_entry(c, &dev->mode_config.crtc_list, head) { 12217 for_each_crtc(dev, c) {
11793 if (!c->primary->fb) 12218 if (!c->primary->fb)
11794 continue; 12219 continue;
11795 12220
@@ -11835,7 +12260,7 @@ void intel_modeset_cleanup(struct drm_device *dev)
11835 12260
11836 intel_unregister_dsm_handler(); 12261 intel_unregister_dsm_handler();
11837 12262
11838 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 12263 for_each_crtc(dev, crtc) {
11839 /* Skip inactive CRTCs */ 12264 /* Skip inactive CRTCs */
11840 if (!crtc->primary->fb) 12265 if (!crtc->primary->fb)
11841 continue; 12266 continue;
@@ -11933,6 +12358,7 @@ struct intel_display_error_state {
11933 struct intel_pipe_error_state { 12358 struct intel_pipe_error_state {
11934 bool power_domain_on; 12359 bool power_domain_on;
11935 u32 source; 12360 u32 source;
12361 u32 stat;
11936 } pipe[I915_MAX_PIPES]; 12362 } pipe[I915_MAX_PIPES];
11937 12363
11938 struct intel_plane_error_state { 12364 struct intel_plane_error_state {
@@ -11990,15 +12416,9 @@ intel_display_capture_error_state(struct drm_device *dev)
11990 if (!error->pipe[i].power_domain_on) 12416 if (!error->pipe[i].power_domain_on)
11991 continue; 12417 continue;
11992 12418
11993 if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev)) { 12419 error->cursor[i].control = I915_READ(CURCNTR(i));
11994 error->cursor[i].control = I915_READ(CURCNTR(i)); 12420 error->cursor[i].position = I915_READ(CURPOS(i));
11995 error->cursor[i].position = I915_READ(CURPOS(i)); 12421 error->cursor[i].base = I915_READ(CURBASE(i));
11996 error->cursor[i].base = I915_READ(CURBASE(i));
11997 } else {
11998 error->cursor[i].control = I915_READ(CURCNTR_IVB(i));
11999 error->cursor[i].position = I915_READ(CURPOS_IVB(i));
12000 error->cursor[i].base = I915_READ(CURBASE_IVB(i));
12001 }
12002 12422
12003 error->plane[i].control = I915_READ(DSPCNTR(i)); 12423 error->plane[i].control = I915_READ(DSPCNTR(i));
12004 error->plane[i].stride = I915_READ(DSPSTRIDE(i)); 12424 error->plane[i].stride = I915_READ(DSPSTRIDE(i));
@@ -12014,6 +12434,9 @@ intel_display_capture_error_state(struct drm_device *dev)
12014 } 12434 }
12015 12435
12016 error->pipe[i].source = I915_READ(PIPESRC(i)); 12436 error->pipe[i].source = I915_READ(PIPESRC(i));
12437
12438 if (!HAS_PCH_SPLIT(dev))
12439 error->pipe[i].stat = I915_READ(PIPESTAT(i));
12017 } 12440 }
12018 12441
12019 error->num_transcoders = INTEL_INFO(dev)->num_pipes; 12442 error->num_transcoders = INTEL_INFO(dev)->num_pipes;
@@ -12064,6 +12487,7 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m,
12064 err_printf(m, " Power: %s\n", 12487 err_printf(m, " Power: %s\n",
12065 error->pipe[i].power_domain_on ? "on" : "off"); 12488 error->pipe[i].power_domain_on ? "on" : "off");
12066 err_printf(m, " SRC: %08x\n", error->pipe[i].source); 12489 err_printf(m, " SRC: %08x\n", error->pipe[i].source);
12490 err_printf(m, " STAT: %08x\n", error->pipe[i].stat);
12067 12491
12068 err_printf(m, "Plane [%d]:\n", i); 12492 err_printf(m, "Plane [%d]:\n", i);
12069 err_printf(m, " CNTR: %08x\n", error->plane[i].control); 12493 err_printf(m, " CNTR: %08x\n", error->plane[i].control);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 2a00cb828d20..52fda950fd2a 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -64,6 +64,24 @@ static const struct dp_link_dpll vlv_dpll[] = {
64 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } } 64 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
65}; 65};
66 66
67/*
68 * CHV supports eDP 1.4 that have more link rates.
69 * Below only provides the fixed rate but exclude variable rate.
70 */
71static const struct dp_link_dpll chv_dpll[] = {
72 /*
73 * CHV requires to program fractional division for m2.
74 * m2 is stored in fixed point format using formula below
75 * (m2_int << 22) | m2_fraction
76 */
77 { DP_LINK_BW_1_62, /* m2_int = 32, m2_fraction = 1677722 */
78 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
79 { DP_LINK_BW_2_7, /* m2_int = 27, m2_fraction = 0 */
80 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
81 { DP_LINK_BW_5_4, /* m2_int = 27, m2_fraction = 0 */
82 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
83};
84
67/** 85/**
68 * is_edp - is the given port attached to an eDP panel (either CPU or PCH) 86 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
69 * @intel_dp: DP struct 87 * @intel_dp: DP struct
@@ -330,8 +348,12 @@ static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
330{ 348{
331 struct drm_device *dev = intel_dp_to_dev(intel_dp); 349 struct drm_device *dev = intel_dp_to_dev(intel_dp);
332 struct drm_i915_private *dev_priv = dev->dev_private; 350 struct drm_i915_private *dev_priv = dev->dev_private;
351 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
352 struct intel_encoder *intel_encoder = &intel_dig_port->base;
353 enum intel_display_power_domain power_domain;
333 354
334 return !dev_priv->pm.suspended && 355 power_domain = intel_display_port_power_domain(intel_encoder);
356 return intel_display_power_enabled(dev_priv, power_domain) &&
335 (I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD) != 0; 357 (I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD) != 0;
336} 358}
337 359
@@ -697,9 +719,9 @@ intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
697 DRM_DEBUG_KMS("registering %s bus for %s\n", name, 719 DRM_DEBUG_KMS("registering %s bus for %s\n", name,
698 connector->base.kdev->kobj.name); 720 connector->base.kdev->kobj.name);
699 721
700 ret = drm_dp_aux_register_i2c_bus(&intel_dp->aux); 722 ret = drm_dp_aux_register(&intel_dp->aux);
701 if (ret < 0) { 723 if (ret < 0) {
702 DRM_ERROR("drm_dp_aux_register_i2c_bus() for %s failed (%d)\n", 724 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
703 name, ret); 725 name, ret);
704 return; 726 return;
705 } 727 }
@@ -709,7 +731,7 @@ intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
709 intel_dp->aux.ddc.dev.kobj.name); 731 intel_dp->aux.ddc.dev.kobj.name);
710 if (ret < 0) { 732 if (ret < 0) {
711 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name, ret); 733 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name, ret);
712 drm_dp_aux_unregister_i2c_bus(&intel_dp->aux); 734 drm_dp_aux_unregister(&intel_dp->aux);
713 } 735 }
714} 736}
715 737
@@ -739,6 +761,9 @@ intel_dp_set_clock(struct intel_encoder *encoder,
739 } else if (HAS_PCH_SPLIT(dev)) { 761 } else if (HAS_PCH_SPLIT(dev)) {
740 divisor = pch_dpll; 762 divisor = pch_dpll;
741 count = ARRAY_SIZE(pch_dpll); 763 count = ARRAY_SIZE(pch_dpll);
764 } else if (IS_CHERRYVIEW(dev)) {
765 divisor = chv_dpll;
766 count = ARRAY_SIZE(chv_dpll);
742 } else if (IS_VALLEYVIEW(dev)) { 767 } else if (IS_VALLEYVIEW(dev)) {
743 divisor = vlv_dpll; 768 divisor = vlv_dpll;
744 count = ARRAY_SIZE(vlv_dpll); 769 count = ARRAY_SIZE(vlv_dpll);
@@ -755,6 +780,20 @@ intel_dp_set_clock(struct intel_encoder *encoder,
755 } 780 }
756} 781}
757 782
783static void
784intel_dp_set_m2_n2(struct intel_crtc *crtc, struct intel_link_m_n *m_n)
785{
786 struct drm_device *dev = crtc->base.dev;
787 struct drm_i915_private *dev_priv = dev->dev_private;
788 enum transcoder transcoder = crtc->config.cpu_transcoder;
789
790 I915_WRITE(PIPE_DATA_M2(transcoder),
791 TU_SIZE(m_n->tu) | m_n->gmch_m);
792 I915_WRITE(PIPE_DATA_N2(transcoder), m_n->gmch_n);
793 I915_WRITE(PIPE_LINK_M2(transcoder), m_n->link_m);
794 I915_WRITE(PIPE_LINK_N2(transcoder), m_n->link_n);
795}
796
758bool 797bool
759intel_dp_compute_config(struct intel_encoder *encoder, 798intel_dp_compute_config(struct intel_encoder *encoder,
760 struct intel_crtc_config *pipe_config) 799 struct intel_crtc_config *pipe_config)
@@ -780,6 +819,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
780 pipe_config->has_pch_encoder = true; 819 pipe_config->has_pch_encoder = true;
781 820
782 pipe_config->has_dp_encoder = true; 821 pipe_config->has_dp_encoder = true;
822 pipe_config->has_audio = intel_dp->has_audio;
783 823
784 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) { 824 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
785 intel_fixed_panel_mode(intel_connector->panel.fixed_mode, 825 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
@@ -880,6 +920,14 @@ found:
880 pipe_config->port_clock, 920 pipe_config->port_clock,
881 &pipe_config->dp_m_n); 921 &pipe_config->dp_m_n);
882 922
923 if (intel_connector->panel.downclock_mode != NULL &&
924 intel_dp->drrs_state.type == SEAMLESS_DRRS_SUPPORT) {
925 intel_link_compute_m_n(bpp, lane_count,
926 intel_connector->panel.downclock_mode->clock,
927 pipe_config->port_clock,
928 &pipe_config->dp_m2_n2);
929 }
930
883 intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw); 931 intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
884 932
885 return true; 933 return true;
@@ -915,7 +963,7 @@ static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
915 udelay(500); 963 udelay(500);
916} 964}
917 965
918static void intel_dp_mode_set(struct intel_encoder *encoder) 966static void intel_dp_prepare(struct intel_encoder *encoder)
919{ 967{
920 struct drm_device *dev = encoder->base.dev; 968 struct drm_device *dev = encoder->base.dev;
921 struct drm_i915_private *dev_priv = dev->dev_private; 969 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -950,7 +998,7 @@ static void intel_dp_mode_set(struct intel_encoder *encoder)
950 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; 998 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
951 intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count); 999 intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count);
952 1000
953 if (intel_dp->has_audio) { 1001 if (crtc->config.has_audio) {
954 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n", 1002 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
955 pipe_name(crtc->pipe)); 1003 pipe_name(crtc->pipe));
956 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE; 1004 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
@@ -983,14 +1031,15 @@ static void intel_dp_mode_set(struct intel_encoder *encoder)
983 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) 1031 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
984 intel_dp->DP |= DP_ENHANCED_FRAMING; 1032 intel_dp->DP |= DP_ENHANCED_FRAMING;
985 1033
986 if (crtc->pipe == 1) 1034 if (!IS_CHERRYVIEW(dev)) {
987 intel_dp->DP |= DP_PIPEB_SELECT; 1035 if (crtc->pipe == 1)
1036 intel_dp->DP |= DP_PIPEB_SELECT;
1037 } else {
1038 intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
1039 }
988 } else { 1040 } else {
989 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; 1041 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
990 } 1042 }
991
992 if (port == PORT_A && !IS_VALLEYVIEW(dev))
993 ironlake_set_pll_cpu_edp(intel_dp);
994} 1043}
995 1044
996#define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK) 1045#define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
@@ -1082,7 +1131,10 @@ static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
1082static bool _edp_panel_vdd_on(struct intel_dp *intel_dp) 1131static bool _edp_panel_vdd_on(struct intel_dp *intel_dp)
1083{ 1132{
1084 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1133 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1134 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1135 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1085 struct drm_i915_private *dev_priv = dev->dev_private; 1136 struct drm_i915_private *dev_priv = dev->dev_private;
1137 enum intel_display_power_domain power_domain;
1086 u32 pp; 1138 u32 pp;
1087 u32 pp_stat_reg, pp_ctrl_reg; 1139 u32 pp_stat_reg, pp_ctrl_reg;
1088 bool need_to_disable = !intel_dp->want_panel_vdd; 1140 bool need_to_disable = !intel_dp->want_panel_vdd;
@@ -1095,7 +1147,8 @@ static bool _edp_panel_vdd_on(struct intel_dp *intel_dp)
1095 if (edp_have_panel_vdd(intel_dp)) 1147 if (edp_have_panel_vdd(intel_dp))
1096 return need_to_disable; 1148 return need_to_disable;
1097 1149
1098 intel_runtime_pm_get(dev_priv); 1150 power_domain = intel_display_port_power_domain(intel_encoder);
1151 intel_display_power_get(dev_priv, power_domain);
1099 1152
1100 DRM_DEBUG_KMS("Turning eDP VDD on\n"); 1153 DRM_DEBUG_KMS("Turning eDP VDD on\n");
1101 1154
@@ -1139,9 +1192,14 @@ static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
1139 u32 pp; 1192 u32 pp;
1140 u32 pp_stat_reg, pp_ctrl_reg; 1193 u32 pp_stat_reg, pp_ctrl_reg;
1141 1194
1142 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); 1195 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
1143 1196
1144 if (!intel_dp->want_panel_vdd && edp_have_panel_vdd(intel_dp)) { 1197 if (!intel_dp->want_panel_vdd && edp_have_panel_vdd(intel_dp)) {
1198 struct intel_digital_port *intel_dig_port =
1199 dp_to_dig_port(intel_dp);
1200 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1201 enum intel_display_power_domain power_domain;
1202
1145 DRM_DEBUG_KMS("Turning eDP VDD off\n"); 1203 DRM_DEBUG_KMS("Turning eDP VDD off\n");
1146 1204
1147 pp = ironlake_get_pp_control(intel_dp); 1205 pp = ironlake_get_pp_control(intel_dp);
@@ -1160,7 +1218,8 @@ static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
1160 if ((pp & POWER_TARGET_ON) == 0) 1218 if ((pp & POWER_TARGET_ON) == 0)
1161 intel_dp->last_power_cycle = jiffies; 1219 intel_dp->last_power_cycle = jiffies;
1162 1220
1163 intel_runtime_pm_put(dev_priv); 1221 power_domain = intel_display_port_power_domain(intel_encoder);
1222 intel_display_power_put(dev_priv, power_domain);
1164 } 1223 }
1165} 1224}
1166 1225
@@ -1170,9 +1229,9 @@ static void edp_panel_vdd_work(struct work_struct *__work)
1170 struct intel_dp, panel_vdd_work); 1229 struct intel_dp, panel_vdd_work);
1171 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1230 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1172 1231
1173 mutex_lock(&dev->mode_config.mutex); 1232 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
1174 edp_panel_vdd_off_sync(intel_dp); 1233 edp_panel_vdd_off_sync(intel_dp);
1175 mutex_unlock(&dev->mode_config.mutex); 1234 drm_modeset_unlock(&dev->mode_config.connection_mutex);
1176} 1235}
1177 1236
1178static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) 1237static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
@@ -1244,8 +1303,11 @@ void intel_edp_panel_on(struct intel_dp *intel_dp)
1244 1303
1245void intel_edp_panel_off(struct intel_dp *intel_dp) 1304void intel_edp_panel_off(struct intel_dp *intel_dp)
1246{ 1305{
1306 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1307 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1247 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1308 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1248 struct drm_i915_private *dev_priv = dev->dev_private; 1309 struct drm_i915_private *dev_priv = dev->dev_private;
1310 enum intel_display_power_domain power_domain;
1249 u32 pp; 1311 u32 pp;
1250 u32 pp_ctrl_reg; 1312 u32 pp_ctrl_reg;
1251 1313
@@ -1275,7 +1337,8 @@ void intel_edp_panel_off(struct intel_dp *intel_dp)
1275 wait_panel_off(intel_dp); 1337 wait_panel_off(intel_dp);
1276 1338
1277 /* We got a reference when we enabled the VDD. */ 1339 /* We got a reference when we enabled the VDD. */
1278 intel_runtime_pm_put(dev_priv); 1340 power_domain = intel_display_port_power_domain(intel_encoder);
1341 intel_display_power_put(dev_priv, power_domain);
1279} 1342}
1280 1343
1281void intel_edp_backlight_on(struct intel_dp *intel_dp) 1344void intel_edp_backlight_on(struct intel_dp *intel_dp)
@@ -1432,6 +1495,8 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
1432 1495
1433 if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) { 1496 if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
1434 *pipe = PORT_TO_PIPE_CPT(tmp); 1497 *pipe = PORT_TO_PIPE_CPT(tmp);
1498 } else if (IS_CHERRYVIEW(dev)) {
1499 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
1435 } else if (!HAS_PCH_CPT(dev) || port == PORT_A) { 1500 } else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
1436 *pipe = PORT_TO_PIPE(tmp); 1501 *pipe = PORT_TO_PIPE(tmp);
1437 } else { 1502 } else {
@@ -1479,8 +1544,11 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
1479 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); 1544 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1480 int dotclock; 1545 int dotclock;
1481 1546
1547 tmp = I915_READ(intel_dp->output_reg);
1548 if (tmp & DP_AUDIO_OUTPUT_ENABLE)
1549 pipe_config->has_audio = true;
1550
1482 if ((port == PORT_A) || !HAS_PCH_CPT(dev)) { 1551 if ((port == PORT_A) || !HAS_PCH_CPT(dev)) {
1483 tmp = I915_READ(intel_dp->output_reg);
1484 if (tmp & DP_SYNC_HS_HIGH) 1552 if (tmp & DP_SYNC_HS_HIGH)
1485 flags |= DRM_MODE_FLAG_PHSYNC; 1553 flags |= DRM_MODE_FLAG_PHSYNC;
1486 else 1554 else
@@ -1816,17 +1884,59 @@ static void intel_disable_dp(struct intel_encoder *encoder)
1816 intel_dp_link_down(intel_dp); 1884 intel_dp_link_down(intel_dp);
1817} 1885}
1818 1886
1819static void intel_post_disable_dp(struct intel_encoder *encoder) 1887static void g4x_post_disable_dp(struct intel_encoder *encoder)
1820{ 1888{
1821 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1889 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1822 enum port port = dp_to_dig_port(intel_dp)->port; 1890 enum port port = dp_to_dig_port(intel_dp)->port;
1891
1892 if (port != PORT_A)
1893 return;
1894
1895 intel_dp_link_down(intel_dp);
1896 ironlake_edp_pll_off(intel_dp);
1897}
1898
1899static void vlv_post_disable_dp(struct intel_encoder *encoder)
1900{
1901 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1902
1903 intel_dp_link_down(intel_dp);
1904}
1905
1906static void chv_post_disable_dp(struct intel_encoder *encoder)
1907{
1908 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1909 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
1823 struct drm_device *dev = encoder->base.dev; 1910 struct drm_device *dev = encoder->base.dev;
1911 struct drm_i915_private *dev_priv = dev->dev_private;
1912 struct intel_crtc *intel_crtc =
1913 to_intel_crtc(encoder->base.crtc);
1914 enum dpio_channel ch = vlv_dport_to_channel(dport);
1915 enum pipe pipe = intel_crtc->pipe;
1916 u32 val;
1824 1917
1825 if (port == PORT_A || IS_VALLEYVIEW(dev)) { 1918 intel_dp_link_down(intel_dp);
1826 intel_dp_link_down(intel_dp); 1919
1827 if (!IS_VALLEYVIEW(dev)) 1920 mutex_lock(&dev_priv->dpio_lock);
1828 ironlake_edp_pll_off(intel_dp); 1921
1829 } 1922 /* Propagate soft reset to data lane reset */
1923 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
1924 val |= CHV_PCS_REQ_SOFTRESET_EN;
1925 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
1926
1927 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
1928 val |= CHV_PCS_REQ_SOFTRESET_EN;
1929 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
1930
1931 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
1932 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
1933 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
1934
1935 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
1936 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
1937 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
1938
1939 mutex_unlock(&dev_priv->dpio_lock);
1830} 1940}
1831 1941
1832static void intel_enable_dp(struct intel_encoder *encoder) 1942static void intel_enable_dp(struct intel_encoder *encoder)
@@ -1868,8 +1978,13 @@ static void g4x_pre_enable_dp(struct intel_encoder *encoder)
1868 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1978 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1869 struct intel_digital_port *dport = dp_to_dig_port(intel_dp); 1979 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
1870 1980
1871 if (dport->port == PORT_A) 1981 intel_dp_prepare(encoder);
1982
1983 /* Only ilk+ has port A */
1984 if (dport->port == PORT_A) {
1985 ironlake_set_pll_cpu_edp(intel_dp);
1872 ironlake_edp_pll_on(intel_dp); 1986 ironlake_edp_pll_on(intel_dp);
1987 }
1873} 1988}
1874 1989
1875static void vlv_pre_enable_dp(struct intel_encoder *encoder) 1990static void vlv_pre_enable_dp(struct intel_encoder *encoder)
@@ -1921,6 +2036,8 @@ static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
1921 enum dpio_channel port = vlv_dport_to_channel(dport); 2036 enum dpio_channel port = vlv_dport_to_channel(dport);
1922 int pipe = intel_crtc->pipe; 2037 int pipe = intel_crtc->pipe;
1923 2038
2039 intel_dp_prepare(encoder);
2040
1924 /* Program Tx lane resets to default */ 2041 /* Program Tx lane resets to default */
1925 mutex_lock(&dev_priv->dpio_lock); 2042 mutex_lock(&dev_priv->dpio_lock);
1926 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port), 2043 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
@@ -1939,6 +2056,69 @@ static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
1939 mutex_unlock(&dev_priv->dpio_lock); 2056 mutex_unlock(&dev_priv->dpio_lock);
1940} 2057}
1941 2058
2059static void chv_pre_enable_dp(struct intel_encoder *encoder)
2060{
2061 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2062 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2063 struct drm_device *dev = encoder->base.dev;
2064 struct drm_i915_private *dev_priv = dev->dev_private;
2065 struct edp_power_seq power_seq;
2066 struct intel_crtc *intel_crtc =
2067 to_intel_crtc(encoder->base.crtc);
2068 enum dpio_channel ch = vlv_dport_to_channel(dport);
2069 int pipe = intel_crtc->pipe;
2070 int data, i;
2071 u32 val;
2072
2073 mutex_lock(&dev_priv->dpio_lock);
2074
2075 /* Deassert soft data lane reset*/
2076 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2077 val |= CHV_PCS_REQ_SOFTRESET_EN;
2078 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2079
2080 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2081 val |= CHV_PCS_REQ_SOFTRESET_EN;
2082 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2083
2084 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2085 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2086 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2087
2088 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2089 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2090 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2091
2092 /* Program Tx lane latency optimal setting*/
2093 for (i = 0; i < 4; i++) {
2094 /* Set the latency optimal bit */
2095 data = (i == 1) ? 0x0 : 0x6;
2096 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW11(ch, i),
2097 data << DPIO_FRC_LATENCY_SHFIT);
2098
2099 /* Set the upar bit */
2100 data = (i == 1) ? 0x0 : 0x1;
2101 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2102 data << DPIO_UPAR_SHIFT);
2103 }
2104
2105 /* Data lane stagger programming */
2106 /* FIXME: Fix up value only after power analysis */
2107
2108 mutex_unlock(&dev_priv->dpio_lock);
2109
2110 if (is_edp(intel_dp)) {
2111 /* init power sequencer on this pipe and port */
2112 intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
2113 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
2114 &power_seq);
2115 }
2116
2117 intel_enable_dp(encoder);
2118
2119 vlv_wait_port_ready(dev_priv, dport);
2120}
2121
1942/* 2122/*
1943 * Native read with retry for link status and receiver capability reads for 2123 * Native read with retry for link status and receiver capability reads for
1944 * cases where the sink may still be asleep. 2124 * cases where the sink may still be asleep.
@@ -2163,6 +2343,166 @@ static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
2163 return 0; 2343 return 0;
2164} 2344}
2165 2345
2346static uint32_t intel_chv_signal_levels(struct intel_dp *intel_dp)
2347{
2348 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2349 struct drm_i915_private *dev_priv = dev->dev_private;
2350 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2351 struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
2352 u32 deemph_reg_value, margin_reg_value, val;
2353 uint8_t train_set = intel_dp->train_set[0];
2354 enum dpio_channel ch = vlv_dport_to_channel(dport);
2355 enum pipe pipe = intel_crtc->pipe;
2356 int i;
2357
2358 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
2359 case DP_TRAIN_PRE_EMPHASIS_0:
2360 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2361 case DP_TRAIN_VOLTAGE_SWING_400:
2362 deemph_reg_value = 128;
2363 margin_reg_value = 52;
2364 break;
2365 case DP_TRAIN_VOLTAGE_SWING_600:
2366 deemph_reg_value = 128;
2367 margin_reg_value = 77;
2368 break;
2369 case DP_TRAIN_VOLTAGE_SWING_800:
2370 deemph_reg_value = 128;
2371 margin_reg_value = 102;
2372 break;
2373 case DP_TRAIN_VOLTAGE_SWING_1200:
2374 deemph_reg_value = 128;
2375 margin_reg_value = 154;
2376 /* FIXME extra to set for 1200 */
2377 break;
2378 default:
2379 return 0;
2380 }
2381 break;
2382 case DP_TRAIN_PRE_EMPHASIS_3_5:
2383 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2384 case DP_TRAIN_VOLTAGE_SWING_400:
2385 deemph_reg_value = 85;
2386 margin_reg_value = 78;
2387 break;
2388 case DP_TRAIN_VOLTAGE_SWING_600:
2389 deemph_reg_value = 85;
2390 margin_reg_value = 116;
2391 break;
2392 case DP_TRAIN_VOLTAGE_SWING_800:
2393 deemph_reg_value = 85;
2394 margin_reg_value = 154;
2395 break;
2396 default:
2397 return 0;
2398 }
2399 break;
2400 case DP_TRAIN_PRE_EMPHASIS_6:
2401 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2402 case DP_TRAIN_VOLTAGE_SWING_400:
2403 deemph_reg_value = 64;
2404 margin_reg_value = 104;
2405 break;
2406 case DP_TRAIN_VOLTAGE_SWING_600:
2407 deemph_reg_value = 64;
2408 margin_reg_value = 154;
2409 break;
2410 default:
2411 return 0;
2412 }
2413 break;
2414 case DP_TRAIN_PRE_EMPHASIS_9_5:
2415 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2416 case DP_TRAIN_VOLTAGE_SWING_400:
2417 deemph_reg_value = 43;
2418 margin_reg_value = 154;
2419 break;
2420 default:
2421 return 0;
2422 }
2423 break;
2424 default:
2425 return 0;
2426 }
2427
2428 mutex_lock(&dev_priv->dpio_lock);
2429
2430 /* Clear calc init */
2431 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
2432 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
2433 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
2434
2435 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
2436 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
2437 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
2438
2439 /* Program swing deemph */
2440 for (i = 0; i < 4; i++) {
2441 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
2442 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
2443 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
2444 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
2445 }
2446
2447 /* Program swing margin */
2448 for (i = 0; i < 4; i++) {
2449 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
2450 val &= ~DPIO_SWING_MARGIN_MASK;
2451 val |= margin_reg_value << DPIO_SWING_MARGIN_SHIFT;
2452 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
2453 }
2454
2455 /* Disable unique transition scale */
2456 for (i = 0; i < 4; i++) {
2457 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
2458 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
2459 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
2460 }
2461
2462 if (((train_set & DP_TRAIN_PRE_EMPHASIS_MASK)
2463 == DP_TRAIN_PRE_EMPHASIS_0) &&
2464 ((train_set & DP_TRAIN_VOLTAGE_SWING_MASK)
2465 == DP_TRAIN_VOLTAGE_SWING_1200)) {
2466
2467 /*
2468 * The document said it needs to set bit 27 for ch0 and bit 26
2469 * for ch1. Might be a typo in the doc.
2470 * For now, for this unique transition scale selection, set bit
2471 * 27 for ch0 and ch1.
2472 */
2473 for (i = 0; i < 4; i++) {
2474 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
2475 val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
2476 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
2477 }
2478
2479 for (i = 0; i < 4; i++) {
2480 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
2481 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
2482 val |= (0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT);
2483 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
2484 }
2485 }
2486
2487 /* Start swing calculation */
2488 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
2489 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
2490 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
2491
2492 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
2493 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
2494 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
2495
2496 /* LRC Bypass */
2497 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
2498 val |= DPIO_LRC_BYPASS;
2499 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val);
2500
2501 mutex_unlock(&dev_priv->dpio_lock);
2502
2503 return 0;
2504}
2505
2166static void 2506static void
2167intel_get_adjust_train(struct intel_dp *intel_dp, 2507intel_get_adjust_train(struct intel_dp *intel_dp,
2168 const uint8_t link_status[DP_LINK_STATUS_SIZE]) 2508 const uint8_t link_status[DP_LINK_STATUS_SIZE])
@@ -2377,6 +2717,9 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
2377 } else if (IS_HASWELL(dev)) { 2717 } else if (IS_HASWELL(dev)) {
2378 signal_levels = intel_hsw_signal_levels(train_set); 2718 signal_levels = intel_hsw_signal_levels(train_set);
2379 mask = DDI_BUF_EMP_MASK; 2719 mask = DDI_BUF_EMP_MASK;
2720 } else if (IS_CHERRYVIEW(dev)) {
2721 signal_levels = intel_chv_signal_levels(intel_dp);
2722 mask = 0;
2380 } else if (IS_VALLEYVIEW(dev)) { 2723 } else if (IS_VALLEYVIEW(dev)) {
2381 signal_levels = intel_vlv_signal_levels(intel_dp); 2724 signal_levels = intel_vlv_signal_levels(intel_dp);
2382 mask = 0; 2725 mask = 0;
@@ -2743,22 +3086,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
2743 to_intel_crtc(intel_dig_port->base.base.crtc); 3086 to_intel_crtc(intel_dig_port->base.base.crtc);
2744 uint32_t DP = intel_dp->DP; 3087 uint32_t DP = intel_dp->DP;
2745 3088
2746 /* 3089 if (WARN_ON(HAS_DDI(dev)))
2747 * DDI code has a strict mode set sequence and we should try to respect
2748 * it, otherwise we might hang the machine in many different ways. So we
2749 * really should be disabling the port only on a complete crtc_disable
2750 * sequence. This function is just called under two conditions on DDI
2751 * code:
2752 * - Link train failed while doing crtc_enable, and on this case we
2753 * really should respect the mode set sequence and wait for a
2754 * crtc_disable.
2755 * - Someone turned the monitor off and intel_dp_check_link_status
2756 * called us. We don't need to disable the whole port on this case, so
2757 * when someone turns the monitor on again,
2758 * intel_ddi_prepare_link_retrain will take care of redoing the link
2759 * train.
2760 */
2761 if (HAS_DDI(dev))
2762 return; 3090 return;
2763 3091
2764 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0)) 3092 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
@@ -2775,9 +3103,6 @@ intel_dp_link_down(struct intel_dp *intel_dp)
2775 } 3103 }
2776 POSTING_READ(intel_dp->output_reg); 3104 POSTING_READ(intel_dp->output_reg);
2777 3105
2778 /* We don't really know why we're doing this */
2779 intel_wait_for_vblank(dev, intel_crtc->pipe);
2780
2781 if (HAS_PCH_IBX(dev) && 3106 if (HAS_PCH_IBX(dev) &&
2782 I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) { 3107 I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
2783 struct drm_crtc *crtc = intel_dig_port->base.base.crtc; 3108 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
@@ -2948,6 +3273,7 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
2948 u8 sink_irq_vector; 3273 u8 sink_irq_vector;
2949 u8 link_status[DP_LINK_STATUS_SIZE]; 3274 u8 link_status[DP_LINK_STATUS_SIZE];
2950 3275
3276 /* FIXME: This access isn't protected by any locks. */
2951 if (!intel_encoder->connectors_active) 3277 if (!intel_encoder->connectors_active)
2952 return; 3278 return;
2953 3279
@@ -2980,7 +3306,7 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
2980 3306
2981 if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) { 3307 if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
2982 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n", 3308 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
2983 drm_get_encoder_name(&intel_encoder->base)); 3309 intel_encoder->base.name);
2984 intel_dp_start_link_train(intel_dp); 3310 intel_dp_start_link_train(intel_dp);
2985 intel_dp_complete_link_train(intel_dp); 3311 intel_dp_complete_link_train(intel_dp);
2986 intel_dp_stop_link_train(intel_dp); 3312 intel_dp_stop_link_train(intel_dp);
@@ -3166,7 +3492,7 @@ intel_dp_detect(struct drm_connector *connector, bool force)
3166 intel_display_power_get(dev_priv, power_domain); 3492 intel_display_power_get(dev_priv, power_domain);
3167 3493
3168 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", 3494 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
3169 connector->base.id, drm_get_connector_name(connector)); 3495 connector->base.id, connector->name);
3170 3496
3171 intel_dp->has_audio = false; 3497 intel_dp->has_audio = false;
3172 3498
@@ -3374,13 +3700,13 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
3374 struct intel_dp *intel_dp = &intel_dig_port->dp; 3700 struct intel_dp *intel_dp = &intel_dig_port->dp;
3375 struct drm_device *dev = intel_dp_to_dev(intel_dp); 3701 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3376 3702
3377 drm_dp_aux_unregister_i2c_bus(&intel_dp->aux); 3703 drm_dp_aux_unregister(&intel_dp->aux);
3378 drm_encoder_cleanup(encoder); 3704 drm_encoder_cleanup(encoder);
3379 if (is_edp(intel_dp)) { 3705 if (is_edp(intel_dp)) {
3380 cancel_delayed_work_sync(&intel_dp->panel_vdd_work); 3706 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
3381 mutex_lock(&dev->mode_config.mutex); 3707 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
3382 edp_panel_vdd_off_sync(intel_dp); 3708 edp_panel_vdd_off_sync(intel_dp);
3383 mutex_unlock(&dev->mode_config.mutex); 3709 drm_modeset_unlock(&dev->mode_config.connection_mutex);
3384 } 3710 }
3385 kfree(intel_dig_port); 3711 kfree(intel_dig_port);
3386} 3712}
@@ -3651,6 +3977,130 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
3651 I915_READ(pp_div_reg)); 3977 I915_READ(pp_div_reg));
3652} 3978}
3653 3979
3980void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
3981{
3982 struct drm_i915_private *dev_priv = dev->dev_private;
3983 struct intel_encoder *encoder;
3984 struct intel_dp *intel_dp = NULL;
3985 struct intel_crtc_config *config = NULL;
3986 struct intel_crtc *intel_crtc = NULL;
3987 struct intel_connector *intel_connector = dev_priv->drrs.connector;
3988 u32 reg, val;
3989 enum edp_drrs_refresh_rate_type index = DRRS_HIGH_RR;
3990
3991 if (refresh_rate <= 0) {
3992 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
3993 return;
3994 }
3995
3996 if (intel_connector == NULL) {
3997 DRM_DEBUG_KMS("DRRS supported for eDP only.\n");
3998 return;
3999 }
4000
4001 if (INTEL_INFO(dev)->gen < 8 && intel_edp_is_psr_enabled(dev)) {
4002 DRM_DEBUG_KMS("DRRS is disabled as PSR is enabled\n");
4003 return;
4004 }
4005
4006 encoder = intel_attached_encoder(&intel_connector->base);
4007 intel_dp = enc_to_intel_dp(&encoder->base);
4008 intel_crtc = encoder->new_crtc;
4009
4010 if (!intel_crtc) {
4011 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
4012 return;
4013 }
4014
4015 config = &intel_crtc->config;
4016
4017 if (intel_dp->drrs_state.type < SEAMLESS_DRRS_SUPPORT) {
4018 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
4019 return;
4020 }
4021
4022 if (intel_connector->panel.downclock_mode->vrefresh == refresh_rate)
4023 index = DRRS_LOW_RR;
4024
4025 if (index == intel_dp->drrs_state.refresh_rate_type) {
4026 DRM_DEBUG_KMS(
4027 "DRRS requested for previously set RR...ignoring\n");
4028 return;
4029 }
4030
4031 if (!intel_crtc->active) {
4032 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
4033 return;
4034 }
4035
4036 if (INTEL_INFO(dev)->gen > 6 && INTEL_INFO(dev)->gen < 8) {
4037 reg = PIPECONF(intel_crtc->config.cpu_transcoder);
4038 val = I915_READ(reg);
4039 if (index > DRRS_HIGH_RR) {
4040 val |= PIPECONF_EDP_RR_MODE_SWITCH;
4041 intel_dp_set_m2_n2(intel_crtc, &config->dp_m2_n2);
4042 } else {
4043 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
4044 }
4045 I915_WRITE(reg, val);
4046 }
4047
4048 /*
4049 * mutex taken to ensure that there is no race between differnt
4050 * drrs calls trying to update refresh rate. This scenario may occur
4051 * in future when idleness detection based DRRS in kernel and
4052 * possible calls from user space to set differnt RR are made.
4053 */
4054
4055 mutex_lock(&intel_dp->drrs_state.mutex);
4056
4057 intel_dp->drrs_state.refresh_rate_type = index;
4058
4059 mutex_unlock(&intel_dp->drrs_state.mutex);
4060
4061 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
4062}
4063
4064static struct drm_display_mode *
4065intel_dp_drrs_init(struct intel_digital_port *intel_dig_port,
4066 struct intel_connector *intel_connector,
4067 struct drm_display_mode *fixed_mode)
4068{
4069 struct drm_connector *connector = &intel_connector->base;
4070 struct intel_dp *intel_dp = &intel_dig_port->dp;
4071 struct drm_device *dev = intel_dig_port->base.base.dev;
4072 struct drm_i915_private *dev_priv = dev->dev_private;
4073 struct drm_display_mode *downclock_mode = NULL;
4074
4075 if (INTEL_INFO(dev)->gen <= 6) {
4076 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
4077 return NULL;
4078 }
4079
4080 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
4081 DRM_INFO("VBT doesn't support DRRS\n");
4082 return NULL;
4083 }
4084
4085 downclock_mode = intel_find_panel_downclock
4086 (dev, fixed_mode, connector);
4087
4088 if (!downclock_mode) {
4089 DRM_INFO("DRRS not supported\n");
4090 return NULL;
4091 }
4092
4093 dev_priv->drrs.connector = intel_connector;
4094
4095 mutex_init(&intel_dp->drrs_state.mutex);
4096
4097 intel_dp->drrs_state.type = dev_priv->vbt.drrs_type;
4098
4099 intel_dp->drrs_state.refresh_rate_type = DRRS_HIGH_RR;
4100 DRM_INFO("seamless DRRS supported for eDP panel.\n");
4101 return downclock_mode;
4102}
4103
3654static bool intel_edp_init_connector(struct intel_dp *intel_dp, 4104static bool intel_edp_init_connector(struct intel_dp *intel_dp,
3655 struct intel_connector *intel_connector, 4105 struct intel_connector *intel_connector,
3656 struct edp_power_seq *power_seq) 4106 struct edp_power_seq *power_seq)
@@ -3661,10 +4111,13 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
3661 struct drm_device *dev = intel_encoder->base.dev; 4111 struct drm_device *dev = intel_encoder->base.dev;
3662 struct drm_i915_private *dev_priv = dev->dev_private; 4112 struct drm_i915_private *dev_priv = dev->dev_private;
3663 struct drm_display_mode *fixed_mode = NULL; 4113 struct drm_display_mode *fixed_mode = NULL;
4114 struct drm_display_mode *downclock_mode = NULL;
3664 bool has_dpcd; 4115 bool has_dpcd;
3665 struct drm_display_mode *scan; 4116 struct drm_display_mode *scan;
3666 struct edid *edid; 4117 struct edid *edid;
3667 4118
4119 intel_dp->drrs_state.type = DRRS_NOT_SUPPORTED;
4120
3668 if (!is_edp(intel_dp)) 4121 if (!is_edp(intel_dp))
3669 return true; 4122 return true;
3670 4123
@@ -3715,6 +4168,9 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
3715 list_for_each_entry(scan, &connector->probed_modes, head) { 4168 list_for_each_entry(scan, &connector->probed_modes, head) {
3716 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) { 4169 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
3717 fixed_mode = drm_mode_duplicate(dev, scan); 4170 fixed_mode = drm_mode_duplicate(dev, scan);
4171 downclock_mode = intel_dp_drrs_init(
4172 intel_dig_port,
4173 intel_connector, fixed_mode);
3718 break; 4174 break;
3719 } 4175 }
3720 } 4176 }
@@ -3728,7 +4184,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
3728 } 4184 }
3729 mutex_unlock(&dev->mode_config.mutex); 4185 mutex_unlock(&dev->mode_config.mutex);
3730 4186
3731 intel_panel_init(&intel_connector->panel, fixed_mode, NULL); 4187 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
3732 intel_panel_setup_backlight(connector); 4188 intel_panel_setup_backlight(connector);
3733 4189
3734 return true; 4190 return true;
@@ -3826,12 +4282,12 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
3826 intel_dp->psr_setup_done = false; 4282 intel_dp->psr_setup_done = false;
3827 4283
3828 if (!intel_edp_init_connector(intel_dp, intel_connector, &power_seq)) { 4284 if (!intel_edp_init_connector(intel_dp, intel_connector, &power_seq)) {
3829 drm_dp_aux_unregister_i2c_bus(&intel_dp->aux); 4285 drm_dp_aux_unregister(&intel_dp->aux);
3830 if (is_edp(intel_dp)) { 4286 if (is_edp(intel_dp)) {
3831 cancel_delayed_work_sync(&intel_dp->panel_vdd_work); 4287 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
3832 mutex_lock(&dev->mode_config.mutex); 4288 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
3833 edp_panel_vdd_off_sync(intel_dp); 4289 edp_panel_vdd_off_sync(intel_dp);
3834 mutex_unlock(&dev->mode_config.mutex); 4290 drm_modeset_unlock(&dev->mode_config.connection_mutex);
3835 } 4291 }
3836 drm_sysfs_connector_remove(connector); 4292 drm_sysfs_connector_remove(connector);
3837 drm_connector_cleanup(connector); 4293 drm_connector_cleanup(connector);
@@ -3877,25 +4333,36 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
3877 DRM_MODE_ENCODER_TMDS); 4333 DRM_MODE_ENCODER_TMDS);
3878 4334
3879 intel_encoder->compute_config = intel_dp_compute_config; 4335 intel_encoder->compute_config = intel_dp_compute_config;
3880 intel_encoder->mode_set = intel_dp_mode_set;
3881 intel_encoder->disable = intel_disable_dp; 4336 intel_encoder->disable = intel_disable_dp;
3882 intel_encoder->post_disable = intel_post_disable_dp;
3883 intel_encoder->get_hw_state = intel_dp_get_hw_state; 4337 intel_encoder->get_hw_state = intel_dp_get_hw_state;
3884 intel_encoder->get_config = intel_dp_get_config; 4338 intel_encoder->get_config = intel_dp_get_config;
3885 if (IS_VALLEYVIEW(dev)) { 4339 if (IS_CHERRYVIEW(dev)) {
4340 intel_encoder->pre_enable = chv_pre_enable_dp;
4341 intel_encoder->enable = vlv_enable_dp;
4342 intel_encoder->post_disable = chv_post_disable_dp;
4343 } else if (IS_VALLEYVIEW(dev)) {
3886 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable; 4344 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
3887 intel_encoder->pre_enable = vlv_pre_enable_dp; 4345 intel_encoder->pre_enable = vlv_pre_enable_dp;
3888 intel_encoder->enable = vlv_enable_dp; 4346 intel_encoder->enable = vlv_enable_dp;
4347 intel_encoder->post_disable = vlv_post_disable_dp;
3889 } else { 4348 } else {
3890 intel_encoder->pre_enable = g4x_pre_enable_dp; 4349 intel_encoder->pre_enable = g4x_pre_enable_dp;
3891 intel_encoder->enable = g4x_enable_dp; 4350 intel_encoder->enable = g4x_enable_dp;
4351 intel_encoder->post_disable = g4x_post_disable_dp;
3892 } 4352 }
3893 4353
3894 intel_dig_port->port = port; 4354 intel_dig_port->port = port;
3895 intel_dig_port->dp.output_reg = output_reg; 4355 intel_dig_port->dp.output_reg = output_reg;
3896 4356
3897 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; 4357 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
3898 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); 4358 if (IS_CHERRYVIEW(dev)) {
4359 if (port == PORT_D)
4360 intel_encoder->crtc_mask = 1 << 2;
4361 else
4362 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
4363 } else {
4364 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
4365 }
3899 intel_encoder->cloneable = 0; 4366 intel_encoder->cloneable = 0;
3900 intel_encoder->hot_plug = intel_dp_hot_plug; 4367 intel_encoder->hot_plug = intel_dp_hot_plug;
3901 4368
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 328b1a70264b..bda0ae3d80cc 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -81,8 +81,8 @@
81/* Maximum cursor sizes */ 81/* Maximum cursor sizes */
82#define GEN2_CURSOR_WIDTH 64 82#define GEN2_CURSOR_WIDTH 64
83#define GEN2_CURSOR_HEIGHT 64 83#define GEN2_CURSOR_HEIGHT 64
84#define CURSOR_WIDTH 256 84#define MAX_CURSOR_WIDTH 256
85#define CURSOR_HEIGHT 256 85#define MAX_CURSOR_HEIGHT 256
86 86
87#define INTEL_I2C_BUS_DVO 1 87#define INTEL_I2C_BUS_DVO 1
88#define INTEL_I2C_BUS_SDVO 2 88#define INTEL_I2C_BUS_SDVO 2
@@ -106,8 +106,8 @@
106#define INTEL_DVO_CHIP_TMDS 2 106#define INTEL_DVO_CHIP_TMDS 2
107#define INTEL_DVO_CHIP_TVOUT 4 107#define INTEL_DVO_CHIP_TVOUT 4
108 108
109#define INTEL_DSI_COMMAND_MODE 0 109#define INTEL_DSI_VIDEO_MODE 0
110#define INTEL_DSI_VIDEO_MODE 1 110#define INTEL_DSI_COMMAND_MODE 1
111 111
112struct intel_framebuffer { 112struct intel_framebuffer {
113 struct drm_framebuffer base; 113 struct drm_framebuffer base;
@@ -273,6 +273,13 @@ struct intel_crtc_config {
273 * accordingly. */ 273 * accordingly. */
274 bool has_dp_encoder; 274 bool has_dp_encoder;
275 275
276 /* Whether we should send NULL infoframes. Required for audio. */
277 bool has_hdmi_sink;
278
279 /* Audio enabled on this pipe. Only valid if either has_hdmi_sink or
280 * has_dp_encoder is set. */
281 bool has_audio;
282
276 /* 283 /*
277 * Enable dithering, used when the selected pipe bpp doesn't match the 284 * Enable dithering, used when the selected pipe bpp doesn't match the
278 * plane bpp. 285 * plane bpp.
@@ -306,6 +313,9 @@ struct intel_crtc_config {
306 int pipe_bpp; 313 int pipe_bpp;
307 struct intel_link_m_n dp_m_n; 314 struct intel_link_m_n dp_m_n;
308 315
316 /* m2_n2 for eDP downclock */
317 struct intel_link_m_n dp_m2_n2;
318
309 /* 319 /*
310 * Frequence the dpll for the port should run at. Differs from the 320 * Frequence the dpll for the port should run at. Differs from the
311 * adjusted dotclock e.g. for DP or 12bpc hdmi mode. This is also 321 * adjusted dotclock e.g. for DP or 12bpc hdmi mode. This is also
@@ -343,6 +353,9 @@ struct intel_pipe_wm {
343 struct intel_wm_level wm[5]; 353 struct intel_wm_level wm[5];
344 uint32_t linetime; 354 uint32_t linetime;
345 bool fbc_wm_enabled; 355 bool fbc_wm_enabled;
356 bool pipe_enabled;
357 bool sprites_enabled;
358 bool sprites_scaled;
346}; 359};
347 360
348struct intel_crtc { 361struct intel_crtc {
@@ -357,7 +370,6 @@ struct intel_crtc {
357 */ 370 */
358 bool active; 371 bool active;
359 unsigned long enabled_power_domains; 372 unsigned long enabled_power_domains;
360 bool eld_vld;
361 bool primary_enabled; /* is the primary plane (partially) visible? */ 373 bool primary_enabled; /* is the primary plane (partially) visible? */
362 bool lowfreq_avail; 374 bool lowfreq_avail;
363 struct intel_overlay *overlay; 375 struct intel_overlay *overlay;
@@ -374,8 +386,8 @@ struct intel_crtc {
374 uint32_t cursor_addr; 386 uint32_t cursor_addr;
375 int16_t cursor_x, cursor_y; 387 int16_t cursor_x, cursor_y;
376 int16_t cursor_width, cursor_height; 388 int16_t cursor_width, cursor_height;
377 int16_t max_cursor_width, max_cursor_height; 389 uint32_t cursor_cntl;
378 bool cursor_visible; 390 uint32_t cursor_base;
379 391
380 struct intel_plane_config plane_config; 392 struct intel_plane_config plane_config;
381 struct intel_crtc_config config; 393 struct intel_crtc_config config;
@@ -396,6 +408,10 @@ struct intel_crtc {
396 /* watermarks currently being used */ 408 /* watermarks currently being used */
397 struct intel_pipe_wm active; 409 struct intel_pipe_wm active;
398 } wm; 410 } wm;
411
412 wait_queue_head_t vbl_wait;
413
414 int scanline_offset;
399}; 415};
400 416
401struct intel_plane_wm_parameters { 417struct intel_plane_wm_parameters {
@@ -479,11 +495,23 @@ struct intel_hdmi {
479 enum hdmi_infoframe_type type, 495 enum hdmi_infoframe_type type,
480 const void *frame, ssize_t len); 496 const void *frame, ssize_t len);
481 void (*set_infoframes)(struct drm_encoder *encoder, 497 void (*set_infoframes)(struct drm_encoder *encoder,
498 bool enable,
482 struct drm_display_mode *adjusted_mode); 499 struct drm_display_mode *adjusted_mode);
483}; 500};
484 501
485#define DP_MAX_DOWNSTREAM_PORTS 0x10 502#define DP_MAX_DOWNSTREAM_PORTS 0x10
486 503
504/**
505 * HIGH_RR is the highest eDP panel refresh rate read from EDID
506 * LOW_RR is the lowest eDP panel refresh rate found from EDID
507 * parsing for same resolution.
508 */
509enum edp_drrs_refresh_rate_type {
510 DRRS_HIGH_RR,
511 DRRS_LOW_RR,
512 DRRS_MAX_RR, /* RR count */
513};
514
487struct intel_dp { 515struct intel_dp {
488 uint32_t output_reg; 516 uint32_t output_reg;
489 uint32_t aux_ch_ctl_reg; 517 uint32_t aux_ch_ctl_reg;
@@ -522,6 +550,12 @@ struct intel_dp {
522 bool has_aux_irq, 550 bool has_aux_irq,
523 int send_bytes, 551 int send_bytes,
524 uint32_t aux_clock_divider); 552 uint32_t aux_clock_divider);
553 struct {
554 enum drrs_support_type type;
555 enum edp_drrs_refresh_rate_type refresh_rate_type;
556 struct mutex mutex;
557 } drrs_state;
558
525}; 559};
526 560
527struct intel_digital_port { 561struct intel_digital_port {
@@ -537,6 +571,7 @@ vlv_dport_to_channel(struct intel_digital_port *dport)
537{ 571{
538 switch (dport->port) { 572 switch (dport->port) {
539 case PORT_B: 573 case PORT_B:
574 case PORT_D:
540 return DPIO_CH0; 575 return DPIO_CH0;
541 case PORT_C: 576 case PORT_C:
542 return DPIO_CH1; 577 return DPIO_CH1;
@@ -545,6 +580,20 @@ vlv_dport_to_channel(struct intel_digital_port *dport)
545 } 580 }
546} 581}
547 582
583static inline int
584vlv_pipe_to_channel(enum pipe pipe)
585{
586 switch (pipe) {
587 case PIPE_A:
588 case PIPE_C:
589 return DPIO_CH0;
590 case PIPE_B:
591 return DPIO_CH1;
592 default:
593 BUG();
594 }
595}
596
548static inline struct drm_crtc * 597static inline struct drm_crtc *
549intel_get_crtc_for_pipe(struct drm_device *dev, int pipe) 598intel_get_crtc_for_pipe(struct drm_device *dev, int pipe)
550{ 599{
@@ -569,6 +618,8 @@ struct intel_unpin_work {
569#define INTEL_FLIP_INACTIVE 0 618#define INTEL_FLIP_INACTIVE 0
570#define INTEL_FLIP_PENDING 1 619#define INTEL_FLIP_PENDING 1
571#define INTEL_FLIP_COMPLETE 2 620#define INTEL_FLIP_COMPLETE 2
621 u32 flip_count;
622 u32 gtt_offset;
572 bool enable_stall_check; 623 bool enable_stall_check;
573}; 624};
574 625
@@ -620,8 +671,6 @@ hdmi_to_dig_port(struct intel_hdmi *intel_hdmi)
620/* i915_irq.c */ 671/* i915_irq.c */
621bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev, 672bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
622 enum pipe pipe, bool enable); 673 enum pipe pipe, bool enable);
623bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
624 enum pipe pipe, bool enable);
625bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev, 674bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
626 enum transcoder pch_transcoder, 675 enum transcoder pch_transcoder,
627 bool enable); 676 bool enable);
@@ -629,8 +678,12 @@ void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
629void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask); 678void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
630void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); 679void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
631void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); 680void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
632void hsw_runtime_pm_disable_interrupts(struct drm_device *dev); 681void bdw_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
633void hsw_runtime_pm_restore_interrupts(struct drm_device *dev); 682void bdw_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
683void intel_runtime_pm_disable_interrupts(struct drm_device *dev);
684void intel_runtime_pm_restore_interrupts(struct drm_device *dev);
685int intel_get_crtc_scanline(struct intel_crtc *crtc);
686void i9xx_check_fifo_underruns(struct drm_device *dev);
634 687
635 688
636/* intel_crt.c */ 689/* intel_crt.c */
@@ -666,9 +719,10 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
666const char *intel_output_name(int output); 719const char *intel_output_name(int output);
667bool intel_has_pending_fb_unpin(struct drm_device *dev); 720bool intel_has_pending_fb_unpin(struct drm_device *dev);
668int intel_pch_rawclk(struct drm_device *dev); 721int intel_pch_rawclk(struct drm_device *dev);
722int valleyview_cur_cdclk(struct drm_i915_private *dev_priv);
669void intel_mark_busy(struct drm_device *dev); 723void intel_mark_busy(struct drm_device *dev);
670void intel_mark_fb_busy(struct drm_i915_gem_object *obj, 724void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
671 struct intel_ring_buffer *ring); 725 struct intel_engine_cs *ring);
672void intel_mark_idle(struct drm_device *dev); 726void intel_mark_idle(struct drm_device *dev);
673void intel_crtc_restore_mode(struct drm_crtc *crtc); 727void intel_crtc_restore_mode(struct drm_crtc *crtc);
674void intel_crtc_update_dpms(struct drm_crtc *crtc); 728void intel_crtc_update_dpms(struct drm_crtc *crtc);
@@ -695,12 +749,14 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
695 struct intel_digital_port *dport); 749 struct intel_digital_port *dport);
696bool intel_get_load_detect_pipe(struct drm_connector *connector, 750bool intel_get_load_detect_pipe(struct drm_connector *connector,
697 struct drm_display_mode *mode, 751 struct drm_display_mode *mode,
698 struct intel_load_detect_pipe *old); 752 struct intel_load_detect_pipe *old,
753 struct drm_modeset_acquire_ctx *ctx);
699void intel_release_load_detect_pipe(struct drm_connector *connector, 754void intel_release_load_detect_pipe(struct drm_connector *connector,
700 struct intel_load_detect_pipe *old); 755 struct intel_load_detect_pipe *old,
756 struct drm_modeset_acquire_ctx *ctx);
701int intel_pin_and_fence_fb_obj(struct drm_device *dev, 757int intel_pin_and_fence_fb_obj(struct drm_device *dev,
702 struct drm_i915_gem_object *obj, 758 struct drm_i915_gem_object *obj,
703 struct intel_ring_buffer *pipelined); 759 struct intel_engine_cs *pipelined);
704void intel_unpin_fb_obj(struct drm_i915_gem_object *obj); 760void intel_unpin_fb_obj(struct drm_i915_gem_object *obj);
705struct drm_framebuffer * 761struct drm_framebuffer *
706__intel_framebuffer_create(struct drm_device *dev, 762__intel_framebuffer_create(struct drm_device *dev,
@@ -751,6 +807,8 @@ int valleyview_get_vco(struct drm_i915_private *dev_priv);
751void intel_mode_from_pipe_config(struct drm_display_mode *mode, 807void intel_mode_from_pipe_config(struct drm_display_mode *mode,
752 struct intel_crtc_config *pipe_config); 808 struct intel_crtc_config *pipe_config);
753int intel_format_to_fourcc(int format); 809int intel_format_to_fourcc(int format);
810void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc);
811
754 812
755/* intel_dp.c */ 813/* intel_dp.c */
756void intel_dp_init(struct drm_device *dev, int output_reg, enum port port); 814void intel_dp_init(struct drm_device *dev, int output_reg, enum port port);
@@ -774,7 +832,7 @@ void intel_edp_panel_off(struct intel_dp *intel_dp);
774void intel_edp_psr_enable(struct intel_dp *intel_dp); 832void intel_edp_psr_enable(struct intel_dp *intel_dp);
775void intel_edp_psr_disable(struct intel_dp *intel_dp); 833void intel_edp_psr_disable(struct intel_dp *intel_dp);
776void intel_edp_psr_update(struct drm_device *dev); 834void intel_edp_psr_update(struct drm_device *dev);
777 835void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate);
778 836
779/* intel_dsi.c */ 837/* intel_dsi.c */
780bool intel_dsi_init(struct drm_device *dev); 838bool intel_dsi_init(struct drm_device *dev);
@@ -876,6 +934,7 @@ extern struct drm_display_mode *intel_find_panel_downclock(
876/* intel_pm.c */ 934/* intel_pm.c */
877void intel_init_clock_gating(struct drm_device *dev); 935void intel_init_clock_gating(struct drm_device *dev);
878void intel_suspend_hw(struct drm_device *dev); 936void intel_suspend_hw(struct drm_device *dev);
937int ilk_wm_max_level(const struct drm_device *dev);
879void intel_update_watermarks(struct drm_crtc *crtc); 938void intel_update_watermarks(struct drm_crtc *crtc);
880void intel_update_sprite_watermarks(struct drm_plane *plane, 939void intel_update_sprite_watermarks(struct drm_plane *plane,
881 struct drm_crtc *crtc, 940 struct drm_crtc *crtc,
@@ -902,6 +961,7 @@ void intel_init_gt_powersave(struct drm_device *dev);
902void intel_cleanup_gt_powersave(struct drm_device *dev); 961void intel_cleanup_gt_powersave(struct drm_device *dev);
903void intel_enable_gt_powersave(struct drm_device *dev); 962void intel_enable_gt_powersave(struct drm_device *dev);
904void intel_disable_gt_powersave(struct drm_device *dev); 963void intel_disable_gt_powersave(struct drm_device *dev);
964void intel_reset_gt_powersave(struct drm_device *dev);
905void ironlake_teardown_rc6(struct drm_device *dev); 965void ironlake_teardown_rc6(struct drm_device *dev);
906void gen6_update_ring_freq(struct drm_device *dev); 966void gen6_update_ring_freq(struct drm_device *dev);
907void gen6_rps_idle(struct drm_i915_private *dev_priv); 967void gen6_rps_idle(struct drm_i915_private *dev_priv);
@@ -909,11 +969,13 @@ void gen6_rps_boost(struct drm_i915_private *dev_priv);
909void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv); 969void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv);
910void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv); 970void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv);
911void intel_runtime_pm_get(struct drm_i915_private *dev_priv); 971void intel_runtime_pm_get(struct drm_i915_private *dev_priv);
972void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv);
912void intel_runtime_pm_put(struct drm_i915_private *dev_priv); 973void intel_runtime_pm_put(struct drm_i915_private *dev_priv);
913void intel_init_runtime_pm(struct drm_i915_private *dev_priv); 974void intel_init_runtime_pm(struct drm_i915_private *dev_priv);
914void intel_fini_runtime_pm(struct drm_i915_private *dev_priv); 975void intel_fini_runtime_pm(struct drm_i915_private *dev_priv);
915void ilk_wm_get_hw_state(struct drm_device *dev); 976void ilk_wm_get_hw_state(struct drm_device *dev);
916 977void __vlv_set_power_well(struct drm_i915_private *dev_priv,
978 enum punit_power_well power_well_id, bool enable);
917 979
918/* intel_sdvo.c */ 980/* intel_sdvo.c */
919bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob); 981bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob);
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index 33656647f8bc..02f99d768d49 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -35,6 +35,11 @@
35 35
36/* the sub-encoders aka panel drivers */ 36/* the sub-encoders aka panel drivers */
37static const struct intel_dsi_device intel_dsi_devices[] = { 37static const struct intel_dsi_device intel_dsi_devices[] = {
38 {
39 .panel_id = MIPI_DSI_GENERIC_PANEL_ID,
40 .name = "vbt-generic-dsi-vid-mode-display",
41 .dev_ops = &vbt_generic_dsi_display_ops,
42 },
38}; 43};
39 44
40static void band_gap_reset(struct drm_i915_private *dev_priv) 45static void band_gap_reset(struct drm_i915_private *dev_priv)
@@ -59,12 +64,12 @@ static struct intel_dsi *intel_attached_dsi(struct drm_connector *connector)
59 64
60static inline bool is_vid_mode(struct intel_dsi *intel_dsi) 65static inline bool is_vid_mode(struct intel_dsi *intel_dsi)
61{ 66{
62 return intel_dsi->dev.type == INTEL_DSI_VIDEO_MODE; 67 return intel_dsi->operation_mode == INTEL_DSI_VIDEO_MODE;
63} 68}
64 69
65static inline bool is_cmd_mode(struct intel_dsi *intel_dsi) 70static inline bool is_cmd_mode(struct intel_dsi *intel_dsi)
66{ 71{
67 return intel_dsi->dev.type == INTEL_DSI_COMMAND_MODE; 72 return intel_dsi->operation_mode == INTEL_DSI_COMMAND_MODE;
68} 73}
69 74
70static void intel_dsi_hot_plug(struct intel_encoder *encoder) 75static void intel_dsi_hot_plug(struct intel_encoder *encoder)
@@ -94,13 +99,6 @@ static bool intel_dsi_compute_config(struct intel_encoder *encoder,
94 return true; 99 return true;
95} 100}
96 101
97static void intel_dsi_pre_pll_enable(struct intel_encoder *encoder)
98{
99 DRM_DEBUG_KMS("\n");
100
101 vlv_enable_dsi_pll(encoder);
102}
103
104static void intel_dsi_device_ready(struct intel_encoder *encoder) 102static void intel_dsi_device_ready(struct intel_encoder *encoder)
105{ 103{
106 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 104 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
@@ -110,6 +108,15 @@ static void intel_dsi_device_ready(struct intel_encoder *encoder)
110 108
111 DRM_DEBUG_KMS("\n"); 109 DRM_DEBUG_KMS("\n");
112 110
111 mutex_lock(&dev_priv->dpio_lock);
112 /* program rcomp for compliance, reduce from 50 ohms to 45 ohms
113 * needed everytime after power gate */
114 vlv_flisdsi_write(dev_priv, 0x04, 0x0004);
115 mutex_unlock(&dev_priv->dpio_lock);
116
117 /* bandgap reset is needed after everytime we do power gate */
118 band_gap_reset(dev_priv);
119
113 val = I915_READ(MIPI_PORT_CTRL(pipe)); 120 val = I915_READ(MIPI_PORT_CTRL(pipe));
114 I915_WRITE(MIPI_PORT_CTRL(pipe), val | LP_OUTPUT_HOLD); 121 I915_WRITE(MIPI_PORT_CTRL(pipe), val | LP_OUTPUT_HOLD);
115 usleep_range(1000, 1500); 122 usleep_range(1000, 1500);
@@ -122,21 +129,6 @@ static void intel_dsi_device_ready(struct intel_encoder *encoder)
122 I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY); 129 I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY);
123 usleep_range(2000, 2500); 130 usleep_range(2000, 2500);
124} 131}
125static void intel_dsi_pre_enable(struct intel_encoder *encoder)
126{
127 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
128
129 DRM_DEBUG_KMS("\n");
130
131 if (intel_dsi->dev.dev_ops->panel_reset)
132 intel_dsi->dev.dev_ops->panel_reset(&intel_dsi->dev);
133
134 /* put device in ready state */
135 intel_dsi_device_ready(encoder);
136
137 if (intel_dsi->dev.dev_ops->send_otp_cmds)
138 intel_dsi->dev.dev_ops->send_otp_cmds(&intel_dsi->dev);
139}
140 132
141static void intel_dsi_enable(struct intel_encoder *encoder) 133static void intel_dsi_enable(struct intel_encoder *encoder)
142{ 134{
@@ -153,18 +145,78 @@ static void intel_dsi_enable(struct intel_encoder *encoder)
153 I915_WRITE(MIPI_MAX_RETURN_PKT_SIZE(pipe), 8 * 4); 145 I915_WRITE(MIPI_MAX_RETURN_PKT_SIZE(pipe), 8 * 4);
154 else { 146 else {
155 msleep(20); /* XXX */ 147 msleep(20); /* XXX */
156 dpi_send_cmd(intel_dsi, TURN_ON); 148 dpi_send_cmd(intel_dsi, TURN_ON, DPI_LP_MODE_EN);
157 msleep(100); 149 msleep(100);
158 150
151 if (intel_dsi->dev.dev_ops->enable)
152 intel_dsi->dev.dev_ops->enable(&intel_dsi->dev);
153
159 /* assert ip_tg_enable signal */ 154 /* assert ip_tg_enable signal */
160 temp = I915_READ(MIPI_PORT_CTRL(pipe)) & ~LANE_CONFIGURATION_MASK; 155 temp = I915_READ(MIPI_PORT_CTRL(pipe)) & ~LANE_CONFIGURATION_MASK;
161 temp = temp | intel_dsi->port_bits; 156 temp = temp | intel_dsi->port_bits;
162 I915_WRITE(MIPI_PORT_CTRL(pipe), temp | DPI_ENABLE); 157 I915_WRITE(MIPI_PORT_CTRL(pipe), temp | DPI_ENABLE);
163 POSTING_READ(MIPI_PORT_CTRL(pipe)); 158 POSTING_READ(MIPI_PORT_CTRL(pipe));
164 } 159 }
160}
161
162static void intel_dsi_pre_enable(struct intel_encoder *encoder)
163{
164 struct drm_device *dev = encoder->base.dev;
165 struct drm_i915_private *dev_priv = dev->dev_private;
166 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
167 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
168 enum pipe pipe = intel_crtc->pipe;
169 u32 tmp;
170
171 DRM_DEBUG_KMS("\n");
172
173 /* Disable DPOunit clock gating, can stall pipe
174 * and we need DPLL REFA always enabled */
175 tmp = I915_READ(DPLL(pipe));
176 tmp |= DPLL_REFA_CLK_ENABLE_VLV;
177 I915_WRITE(DPLL(pipe), tmp);
178
179 tmp = I915_READ(DSPCLK_GATE_D);
180 tmp |= DPOUNIT_CLOCK_GATE_DISABLE;
181 I915_WRITE(DSPCLK_GATE_D, tmp);
182
183 /* put device in ready state */
184 intel_dsi_device_ready(encoder);
185
186 msleep(intel_dsi->panel_on_delay);
187
188 if (intel_dsi->dev.dev_ops->panel_reset)
189 intel_dsi->dev.dev_ops->panel_reset(&intel_dsi->dev);
190
191 if (intel_dsi->dev.dev_ops->send_otp_cmds)
192 intel_dsi->dev.dev_ops->send_otp_cmds(&intel_dsi->dev);
165 193
166 if (intel_dsi->dev.dev_ops->enable) 194 /* Enable port in pre-enable phase itself because as per hw team
167 intel_dsi->dev.dev_ops->enable(&intel_dsi->dev); 195 * recommendation, port should be enabled befor plane & pipe */
196 intel_dsi_enable(encoder);
197}
198
199static void intel_dsi_enable_nop(struct intel_encoder *encoder)
200{
201 DRM_DEBUG_KMS("\n");
202
203 /* for DSI port enable has to be done before pipe
204 * and plane enable, so port enable is done in
205 * pre_enable phase itself unlike other encoders
206 */
207}
208
209static void intel_dsi_pre_disable(struct intel_encoder *encoder)
210{
211 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
212
213 DRM_DEBUG_KMS("\n");
214
215 if (is_vid_mode(intel_dsi)) {
216 /* Send Shutdown command to the panel in LP mode */
217 dpi_send_cmd(intel_dsi, SHUTDOWN, DPI_LP_MODE_EN);
218 msleep(10);
219 }
168} 220}
169 221
170static void intel_dsi_disable(struct intel_encoder *encoder) 222static void intel_dsi_disable(struct intel_encoder *encoder)
@@ -179,9 +231,6 @@ static void intel_dsi_disable(struct intel_encoder *encoder)
179 DRM_DEBUG_KMS("\n"); 231 DRM_DEBUG_KMS("\n");
180 232
181 if (is_vid_mode(intel_dsi)) { 233 if (is_vid_mode(intel_dsi)) {
182 dpi_send_cmd(intel_dsi, SHUTDOWN);
183 msleep(10);
184
185 /* de-assert ip_tg_enable signal */ 234 /* de-assert ip_tg_enable signal */
186 temp = I915_READ(MIPI_PORT_CTRL(pipe)); 235 temp = I915_READ(MIPI_PORT_CTRL(pipe));
187 I915_WRITE(MIPI_PORT_CTRL(pipe), temp & ~DPI_ENABLE); 236 I915_WRITE(MIPI_PORT_CTRL(pipe), temp & ~DPI_ENABLE);
@@ -190,6 +239,23 @@ static void intel_dsi_disable(struct intel_encoder *encoder)
190 msleep(2); 239 msleep(2);
191 } 240 }
192 241
242 /* Panel commands can be sent when clock is in LP11 */
243 I915_WRITE(MIPI_DEVICE_READY(pipe), 0x0);
244
245 temp = I915_READ(MIPI_CTRL(pipe));
246 temp &= ~ESCAPE_CLOCK_DIVIDER_MASK;
247 I915_WRITE(MIPI_CTRL(pipe), temp |
248 intel_dsi->escape_clk_div <<
249 ESCAPE_CLOCK_DIVIDER_SHIFT);
250
251 I915_WRITE(MIPI_EOT_DISABLE(pipe), CLOCKSTOP);
252
253 temp = I915_READ(MIPI_DSI_FUNC_PRG(pipe));
254 temp &= ~VID_MODE_FORMAT_MASK;
255 I915_WRITE(MIPI_DSI_FUNC_PRG(pipe), temp);
256
257 I915_WRITE(MIPI_DEVICE_READY(pipe), 0x1);
258
193 /* if disable packets are sent before sending shutdown packet then in 259 /* if disable packets are sent before sending shutdown packet then in
194 * some next enable sequence send turn on packet error is observed */ 260 * some next enable sequence send turn on packet error is observed */
195 if (intel_dsi->dev.dev_ops->disable) 261 if (intel_dsi->dev.dev_ops->disable)
@@ -227,16 +293,28 @@ static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
227 293
228 vlv_disable_dsi_pll(encoder); 294 vlv_disable_dsi_pll(encoder);
229} 295}
296
230static void intel_dsi_post_disable(struct intel_encoder *encoder) 297static void intel_dsi_post_disable(struct intel_encoder *encoder)
231{ 298{
299 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
232 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); 300 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
301 u32 val;
233 302
234 DRM_DEBUG_KMS("\n"); 303 DRM_DEBUG_KMS("\n");
235 304
305 intel_dsi_disable(encoder);
306
236 intel_dsi_clear_device_ready(encoder); 307 intel_dsi_clear_device_ready(encoder);
237 308
309 val = I915_READ(DSPCLK_GATE_D);
310 val &= ~DPOUNIT_CLOCK_GATE_DISABLE;
311 I915_WRITE(DSPCLK_GATE_D, val);
312
238 if (intel_dsi->dev.dev_ops->disable_panel_power) 313 if (intel_dsi->dev.dev_ops->disable_panel_power)
239 intel_dsi->dev.dev_ops->disable_panel_power(&intel_dsi->dev); 314 intel_dsi->dev.dev_ops->disable_panel_power(&intel_dsi->dev);
315
316 msleep(intel_dsi->panel_off_delay);
317 msleep(intel_dsi->panel_pwr_cycle_delay);
240} 318}
241 319
242static bool intel_dsi_get_hw_state(struct intel_encoder *encoder, 320static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
@@ -364,7 +442,7 @@ static void set_dsi_timings(struct drm_encoder *encoder,
364 I915_WRITE(MIPI_VBP_COUNT(pipe), vbp); 442 I915_WRITE(MIPI_VBP_COUNT(pipe), vbp);
365} 443}
366 444
367static void intel_dsi_mode_set(struct intel_encoder *intel_encoder) 445static void intel_dsi_prepare(struct intel_encoder *intel_encoder)
368{ 446{
369 struct drm_encoder *encoder = &intel_encoder->base; 447 struct drm_encoder *encoder = &intel_encoder->base;
370 struct drm_device *dev = encoder->dev; 448 struct drm_device *dev = encoder->dev;
@@ -379,9 +457,6 @@ static void intel_dsi_mode_set(struct intel_encoder *intel_encoder)
379 457
380 DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe)); 458 DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe));
381 459
382 /* XXX: Location of the call */
383 band_gap_reset(dev_priv);
384
385 /* escape clock divider, 20MHz, shared for A and C. device ready must be 460 /* escape clock divider, 20MHz, shared for A and C. device ready must be
386 * off when doing this! txclkesc? */ 461 * off when doing this! txclkesc? */
387 tmp = I915_READ(MIPI_CTRL(0)); 462 tmp = I915_READ(MIPI_CTRL(0));
@@ -452,10 +527,20 @@ static void intel_dsi_mode_set(struct intel_encoder *intel_encoder)
452 /* dphy stuff */ 527 /* dphy stuff */
453 528
454 /* in terms of low power clock */ 529 /* in terms of low power clock */
455 I915_WRITE(MIPI_INIT_COUNT(pipe), txclkesc(ESCAPE_CLOCK_DIVIDER_1, 100)); 530 I915_WRITE(MIPI_INIT_COUNT(pipe), txclkesc(intel_dsi->escape_clk_div, 100));
531
532 val = 0;
533 if (intel_dsi->eotp_pkt == 0)
534 val |= EOT_DISABLE;
535
536 if (intel_dsi->clock_stop)
537 val |= CLOCKSTOP;
456 538
457 /* recovery disables */ 539 /* recovery disables */
458 I915_WRITE(MIPI_EOT_DISABLE(pipe), intel_dsi->eot_disable); 540 I915_WRITE(MIPI_EOT_DISABLE(pipe), val);
541
542 /* in terms of low power clock */
543 I915_WRITE(MIPI_INIT_COUNT(pipe), intel_dsi->init_count);
459 544
460 /* in terms of txbyteclkhs. actual high to low switch + 545 /* in terms of txbyteclkhs. actual high to low switch +
461 * MIPI_STOP_STATE_STALL * MIPI_LP_BYTECLK. 546 * MIPI_STOP_STATE_STALL * MIPI_LP_BYTECLK.
@@ -484,9 +569,23 @@ static void intel_dsi_mode_set(struct intel_encoder *intel_encoder)
484 intel_dsi->clk_hs_to_lp_count << HS_LP_PWR_SW_CNT_SHIFT); 569 intel_dsi->clk_hs_to_lp_count << HS_LP_PWR_SW_CNT_SHIFT);
485 570
486 if (is_vid_mode(intel_dsi)) 571 if (is_vid_mode(intel_dsi))
572 /* Some panels might have resolution which is not a multiple of
573 * 64 like 1366 x 768. Enable RANDOM resolution support for such
574 * panels by default */
487 I915_WRITE(MIPI_VIDEO_MODE_FORMAT(pipe), 575 I915_WRITE(MIPI_VIDEO_MODE_FORMAT(pipe),
488 intel_dsi->video_frmt_cfg_bits | 576 intel_dsi->video_frmt_cfg_bits |
489 intel_dsi->video_mode_format); 577 intel_dsi->video_mode_format |
578 IP_TG_CONFIG |
579 RANDOM_DPI_DISPLAY_RESOLUTION);
580}
581
582static void intel_dsi_pre_pll_enable(struct intel_encoder *encoder)
583{
584 DRM_DEBUG_KMS("\n");
585
586 intel_dsi_prepare(encoder);
587
588 vlv_enable_dsi_pll(encoder);
490} 589}
491 590
492static enum drm_connector_status 591static enum drm_connector_status
@@ -566,11 +665,16 @@ bool intel_dsi_init(struct drm_device *dev)
566 struct intel_connector *intel_connector; 665 struct intel_connector *intel_connector;
567 struct drm_connector *connector; 666 struct drm_connector *connector;
568 struct drm_display_mode *fixed_mode = NULL; 667 struct drm_display_mode *fixed_mode = NULL;
668 struct drm_i915_private *dev_priv = dev->dev_private;
569 const struct intel_dsi_device *dsi; 669 const struct intel_dsi_device *dsi;
570 unsigned int i; 670 unsigned int i;
571 671
572 DRM_DEBUG_KMS("\n"); 672 DRM_DEBUG_KMS("\n");
573 673
674 /* There is no detection method for MIPI so rely on VBT */
675 if (!dev_priv->vbt.has_mipi)
676 return false;
677
574 intel_dsi = kzalloc(sizeof(*intel_dsi), GFP_KERNEL); 678 intel_dsi = kzalloc(sizeof(*intel_dsi), GFP_KERNEL);
575 if (!intel_dsi) 679 if (!intel_dsi)
576 return false; 680 return false;
@@ -585,6 +689,13 @@ bool intel_dsi_init(struct drm_device *dev)
585 encoder = &intel_encoder->base; 689 encoder = &intel_encoder->base;
586 intel_dsi->attached_connector = intel_connector; 690 intel_dsi->attached_connector = intel_connector;
587 691
692 if (IS_VALLEYVIEW(dev)) {
693 dev_priv->mipi_mmio_base = VLV_MIPI_BASE;
694 } else {
695 DRM_ERROR("Unsupported Mipi device to reg base");
696 return false;
697 }
698
588 connector = &intel_connector->base; 699 connector = &intel_connector->base;
589 700
590 drm_encoder_init(dev, encoder, &intel_dsi_funcs, DRM_MODE_ENCODER_DSI); 701 drm_encoder_init(dev, encoder, &intel_dsi_funcs, DRM_MODE_ENCODER_DSI);
@@ -594,9 +705,8 @@ bool intel_dsi_init(struct drm_device *dev)
594 intel_encoder->compute_config = intel_dsi_compute_config; 705 intel_encoder->compute_config = intel_dsi_compute_config;
595 intel_encoder->pre_pll_enable = intel_dsi_pre_pll_enable; 706 intel_encoder->pre_pll_enable = intel_dsi_pre_pll_enable;
596 intel_encoder->pre_enable = intel_dsi_pre_enable; 707 intel_encoder->pre_enable = intel_dsi_pre_enable;
597 intel_encoder->enable = intel_dsi_enable; 708 intel_encoder->enable = intel_dsi_enable_nop;
598 intel_encoder->mode_set = intel_dsi_mode_set; 709 intel_encoder->disable = intel_dsi_pre_disable;
599 intel_encoder->disable = intel_dsi_disable;
600 intel_encoder->post_disable = intel_dsi_post_disable; 710 intel_encoder->post_disable = intel_dsi_post_disable;
601 intel_encoder->get_hw_state = intel_dsi_get_hw_state; 711 intel_encoder->get_hw_state = intel_dsi_get_hw_state;
602 intel_encoder->get_config = intel_dsi_get_config; 712 intel_encoder->get_config = intel_dsi_get_config;
diff --git a/drivers/gpu/drm/i915/intel_dsi.h b/drivers/gpu/drm/i915/intel_dsi.h
index b4a27cec882f..31db33d3e5cc 100644
--- a/drivers/gpu/drm/i915/intel_dsi.h
+++ b/drivers/gpu/drm/i915/intel_dsi.h
@@ -31,7 +31,6 @@
31struct intel_dsi_device { 31struct intel_dsi_device {
32 unsigned int panel_id; 32 unsigned int panel_id;
33 const char *name; 33 const char *name;
34 int type;
35 const struct intel_dsi_dev_ops *dev_ops; 34 const struct intel_dsi_dev_ops *dev_ops;
36 void *dev_priv; 35 void *dev_priv;
37}; 36};
@@ -85,6 +84,9 @@ struct intel_dsi {
85 /* virtual channel */ 84 /* virtual channel */
86 int channel; 85 int channel;
87 86
87 /* Video mode or command mode */
88 u16 operation_mode;
89
88 /* number of DSI lanes */ 90 /* number of DSI lanes */
89 unsigned int lane_count; 91 unsigned int lane_count;
90 92
@@ -95,8 +97,10 @@ struct intel_dsi {
95 u32 video_mode_format; 97 u32 video_mode_format;
96 98
97 /* eot for MIPI_EOT_DISABLE register */ 99 /* eot for MIPI_EOT_DISABLE register */
98 u32 eot_disable; 100 u8 eotp_pkt;
101 u8 clock_stop;
99 102
103 u8 escape_clk_div;
100 u32 port_bits; 104 u32 port_bits;
101 u32 bw_timer; 105 u32 bw_timer;
102 u32 dphy_reg; 106 u32 dphy_reg;
@@ -110,6 +114,15 @@ struct intel_dsi {
110 u16 hs_to_lp_count; 114 u16 hs_to_lp_count;
111 u16 clk_lp_to_hs_count; 115 u16 clk_lp_to_hs_count;
112 u16 clk_hs_to_lp_count; 116 u16 clk_hs_to_lp_count;
117
118 u16 init_count;
119
120 /* all delays in ms */
121 u16 backlight_off_delay;
122 u16 backlight_on_delay;
123 u16 panel_on_delay;
124 u16 panel_off_delay;
125 u16 panel_pwr_cycle_delay;
113}; 126};
114 127
115static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder) 128static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder)
@@ -120,4 +133,6 @@ static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder)
120extern void vlv_enable_dsi_pll(struct intel_encoder *encoder); 133extern void vlv_enable_dsi_pll(struct intel_encoder *encoder);
121extern void vlv_disable_dsi_pll(struct intel_encoder *encoder); 134extern void vlv_disable_dsi_pll(struct intel_encoder *encoder);
122 135
136extern struct intel_dsi_dev_ops vbt_generic_dsi_display_ops;
137
123#endif /* _INTEL_DSI_H */ 138#endif /* _INTEL_DSI_H */
diff --git a/drivers/gpu/drm/i915/intel_dsi_cmd.c b/drivers/gpu/drm/i915/intel_dsi_cmd.c
index 7c40f981d2c7..3eeb21b9fddf 100644
--- a/drivers/gpu/drm/i915/intel_dsi_cmd.c
+++ b/drivers/gpu/drm/i915/intel_dsi_cmd.c
@@ -389,7 +389,7 @@ int dsi_vc_generic_read(struct intel_dsi *intel_dsi, int channel,
389 * 389 *
390 * XXX: commands with data in MIPI_DPI_DATA? 390 * XXX: commands with data in MIPI_DPI_DATA?
391 */ 391 */
392int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd) 392int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs)
393{ 393{
394 struct drm_encoder *encoder = &intel_dsi->base.base; 394 struct drm_encoder *encoder = &intel_dsi->base.base;
395 struct drm_device *dev = encoder->dev; 395 struct drm_device *dev = encoder->dev;
@@ -399,7 +399,7 @@ int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd)
399 u32 mask; 399 u32 mask;
400 400
401 /* XXX: pipe, hs */ 401 /* XXX: pipe, hs */
402 if (intel_dsi->hs) 402 if (hs)
403 cmd &= ~DPI_LP_MODE; 403 cmd &= ~DPI_LP_MODE;
404 else 404 else
405 cmd |= DPI_LP_MODE; 405 cmd |= DPI_LP_MODE;
diff --git a/drivers/gpu/drm/i915/intel_dsi_cmd.h b/drivers/gpu/drm/i915/intel_dsi_cmd.h
index 54c8a234a2e0..9a18cbfa5460 100644
--- a/drivers/gpu/drm/i915/intel_dsi_cmd.h
+++ b/drivers/gpu/drm/i915/intel_dsi_cmd.h
@@ -33,6 +33,9 @@
33#include "intel_drv.h" 33#include "intel_drv.h"
34#include "intel_dsi.h" 34#include "intel_dsi.h"
35 35
36#define DPI_LP_MODE_EN false
37#define DPI_HS_MODE_EN true
38
36void dsi_hs_mode_enable(struct intel_dsi *intel_dsi, bool enable); 39void dsi_hs_mode_enable(struct intel_dsi *intel_dsi, bool enable);
37 40
38int dsi_vc_dcs_write(struct intel_dsi *intel_dsi, int channel, 41int dsi_vc_dcs_write(struct intel_dsi *intel_dsi, int channel,
@@ -47,7 +50,7 @@ int dsi_vc_dcs_read(struct intel_dsi *intel_dsi, int channel, u8 dcs_cmd,
47int dsi_vc_generic_read(struct intel_dsi *intel_dsi, int channel, 50int dsi_vc_generic_read(struct intel_dsi *intel_dsi, int channel,
48 u8 *reqdata, int reqlen, u8 *buf, int buflen); 51 u8 *reqdata, int reqlen, u8 *buf, int buflen);
49 52
50int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd); 53int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs);
51 54
52/* XXX: questionable write helpers */ 55/* XXX: questionable write helpers */
53static inline int dsi_vc_dcs_write_0(struct intel_dsi *intel_dsi, 56static inline int dsi_vc_dcs_write_0(struct intel_dsi *intel_dsi,
diff --git a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
new file mode 100644
index 000000000000..21a0d348cedc
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
@@ -0,0 +1,589 @@
1/*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Author: Shobhit Kumar <shobhit.kumar@intel.com>
24 *
25 */
26
27#include <drm/drmP.h>
28#include <drm/drm_crtc.h>
29#include <drm/drm_edid.h>
30#include <drm/i915_drm.h>
31#include <linux/slab.h>
32#include <video/mipi_display.h>
33#include <asm/intel-mid.h>
34#include <video/mipi_display.h>
35#include "i915_drv.h"
36#include "intel_drv.h"
37#include "intel_dsi.h"
38#include "intel_dsi_cmd.h"
39
40#define MIPI_TRANSFER_MODE_SHIFT 0
41#define MIPI_VIRTUAL_CHANNEL_SHIFT 1
42#define MIPI_PORT_SHIFT 3
43
44#define PREPARE_CNT_MAX 0x3F
45#define EXIT_ZERO_CNT_MAX 0x3F
46#define CLK_ZERO_CNT_MAX 0xFF
47#define TRAIL_CNT_MAX 0x1F
48
49#define NS_KHZ_RATIO 1000000
50
51#define GPI0_NC_0_HV_DDI0_HPD 0x4130
52#define GPIO_NC_0_HV_DDI0_PAD 0x4138
53#define GPIO_NC_1_HV_DDI0_DDC_SDA 0x4120
54#define GPIO_NC_1_HV_DDI0_DDC_SDA_PAD 0x4128
55#define GPIO_NC_2_HV_DDI0_DDC_SCL 0x4110
56#define GPIO_NC_2_HV_DDI0_DDC_SCL_PAD 0x4118
57#define GPIO_NC_3_PANEL0_VDDEN 0x4140
58#define GPIO_NC_3_PANEL0_VDDEN_PAD 0x4148
59#define GPIO_NC_4_PANEL0_BLKEN 0x4150
60#define GPIO_NC_4_PANEL0_BLKEN_PAD 0x4158
61#define GPIO_NC_5_PANEL0_BLKCTL 0x4160
62#define GPIO_NC_5_PANEL0_BLKCTL_PAD 0x4168
63#define GPIO_NC_6_PCONF0 0x4180
64#define GPIO_NC_6_PAD 0x4188
65#define GPIO_NC_7_PCONF0 0x4190
66#define GPIO_NC_7_PAD 0x4198
67#define GPIO_NC_8_PCONF0 0x4170
68#define GPIO_NC_8_PAD 0x4178
69#define GPIO_NC_9_PCONF0 0x4100
70#define GPIO_NC_9_PAD 0x4108
71#define GPIO_NC_10_PCONF0 0x40E0
72#define GPIO_NC_10_PAD 0x40E8
73#define GPIO_NC_11_PCONF0 0x40F0
74#define GPIO_NC_11_PAD 0x40F8
75
76struct gpio_table {
77 u16 function_reg;
78 u16 pad_reg;
79 u8 init;
80};
81
82static struct gpio_table gtable[] = {
83 { GPI0_NC_0_HV_DDI0_HPD, GPIO_NC_0_HV_DDI0_PAD, 0 },
84 { GPIO_NC_1_HV_DDI0_DDC_SDA, GPIO_NC_1_HV_DDI0_DDC_SDA_PAD, 0 },
85 { GPIO_NC_2_HV_DDI0_DDC_SCL, GPIO_NC_2_HV_DDI0_DDC_SCL_PAD, 0 },
86 { GPIO_NC_3_PANEL0_VDDEN, GPIO_NC_3_PANEL0_VDDEN_PAD, 0 },
87 { GPIO_NC_4_PANEL0_BLKEN, GPIO_NC_4_PANEL0_BLKEN_PAD, 0 },
88 { GPIO_NC_5_PANEL0_BLKCTL, GPIO_NC_5_PANEL0_BLKCTL_PAD, 0 },
89 { GPIO_NC_6_PCONF0, GPIO_NC_6_PAD, 0 },
90 { GPIO_NC_7_PCONF0, GPIO_NC_7_PAD, 0 },
91 { GPIO_NC_8_PCONF0, GPIO_NC_8_PAD, 0 },
92 { GPIO_NC_9_PCONF0, GPIO_NC_9_PAD, 0 },
93 { GPIO_NC_10_PCONF0, GPIO_NC_10_PAD, 0},
94 { GPIO_NC_11_PCONF0, GPIO_NC_11_PAD, 0}
95};
96
97static u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi, u8 *data)
98{
99 u8 type, byte, mode, vc, port;
100 u16 len;
101
102 byte = *data++;
103 mode = (byte >> MIPI_TRANSFER_MODE_SHIFT) & 0x1;
104 vc = (byte >> MIPI_VIRTUAL_CHANNEL_SHIFT) & 0x3;
105 port = (byte >> MIPI_PORT_SHIFT) & 0x3;
106
107 /* LP or HS mode */
108 intel_dsi->hs = mode;
109
110 /* get packet type and increment the pointer */
111 type = *data++;
112
113 len = *((u16 *) data);
114 data += 2;
115
116 switch (type) {
117 case MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM:
118 dsi_vc_generic_write_0(intel_dsi, vc);
119 break;
120 case MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM:
121 dsi_vc_generic_write_1(intel_dsi, vc, *data);
122 break;
123 case MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM:
124 dsi_vc_generic_write_2(intel_dsi, vc, *data, *(data + 1));
125 break;
126 case MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM:
127 case MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM:
128 case MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM:
129 DRM_DEBUG_DRIVER("Generic Read not yet implemented or used\n");
130 break;
131 case MIPI_DSI_GENERIC_LONG_WRITE:
132 dsi_vc_generic_write(intel_dsi, vc, data, len);
133 break;
134 case MIPI_DSI_DCS_SHORT_WRITE:
135 dsi_vc_dcs_write_0(intel_dsi, vc, *data);
136 break;
137 case MIPI_DSI_DCS_SHORT_WRITE_PARAM:
138 dsi_vc_dcs_write_1(intel_dsi, vc, *data, *(data + 1));
139 break;
140 case MIPI_DSI_DCS_READ:
141 DRM_DEBUG_DRIVER("DCS Read not yet implemented or used\n");
142 break;
143 case MIPI_DSI_DCS_LONG_WRITE:
144 dsi_vc_dcs_write(intel_dsi, vc, data, len);
145 break;
146 };
147
148 data += len;
149
150 return data;
151}
152
153static u8 *mipi_exec_delay(struct intel_dsi *intel_dsi, u8 *data)
154{
155 u32 delay = *((u32 *) data);
156
157 usleep_range(delay, delay + 10);
158 data += 4;
159
160 return data;
161}
162
163static u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, u8 *data)
164{
165 u8 gpio, action;
166 u16 function, pad;
167 u32 val;
168 struct drm_device *dev = intel_dsi->base.base.dev;
169 struct drm_i915_private *dev_priv = dev->dev_private;
170
171 gpio = *data++;
172
173 /* pull up/down */
174 action = *data++;
175
176 function = gtable[gpio].function_reg;
177 pad = gtable[gpio].pad_reg;
178
179 mutex_lock(&dev_priv->dpio_lock);
180 if (!gtable[gpio].init) {
181 /* program the function */
182 /* FIXME: remove constant below */
183 vlv_gpio_nc_write(dev_priv, function, 0x2000CC00);
184 gtable[gpio].init = 1;
185 }
186
187 val = 0x4 | action;
188
189 /* pull up/down */
190 vlv_gpio_nc_write(dev_priv, pad, val);
191 mutex_unlock(&dev_priv->dpio_lock);
192
193 return data;
194}
195
196typedef u8 * (*fn_mipi_elem_exec)(struct intel_dsi *intel_dsi, u8 *data);
197static const fn_mipi_elem_exec exec_elem[] = {
198 NULL, /* reserved */
199 mipi_exec_send_packet,
200 mipi_exec_delay,
201 mipi_exec_gpio,
202 NULL, /* status read; later */
203};
204
205/*
206 * MIPI Sequence from VBT #53 parsing logic
207 * We have already separated each seqence during bios parsing
208 * Following is generic execution function for any sequence
209 */
210
211static const char * const seq_name[] = {
212 "UNDEFINED",
213 "MIPI_SEQ_ASSERT_RESET",
214 "MIPI_SEQ_INIT_OTP",
215 "MIPI_SEQ_DISPLAY_ON",
216 "MIPI_SEQ_DISPLAY_OFF",
217 "MIPI_SEQ_DEASSERT_RESET"
218};
219
220static void generic_exec_sequence(struct intel_dsi *intel_dsi, char *sequence)
221{
222 u8 *data = sequence;
223 fn_mipi_elem_exec mipi_elem_exec;
224 int index;
225
226 if (!sequence)
227 return;
228
229 DRM_DEBUG_DRIVER("Starting MIPI sequence - %s\n", seq_name[*data]);
230
231 /* go to the first element of the sequence */
232 data++;
233
234 /* parse each byte till we reach end of sequence byte - 0x00 */
235 while (1) {
236 index = *data;
237 mipi_elem_exec = exec_elem[index];
238 if (!mipi_elem_exec) {
239 DRM_ERROR("Unsupported MIPI element, skipping sequence execution\n");
240 return;
241 }
242
243 /* goto element payload */
244 data++;
245
246 /* execute the element specific rotines */
247 data = mipi_elem_exec(intel_dsi, data);
248
249 /*
250 * After processing the element, data should point to
251 * next element or end of sequence
252 * check if have we reached end of sequence
253 */
254 if (*data == 0x00)
255 break;
256 }
257}
258
259static bool generic_init(struct intel_dsi_device *dsi)
260{
261 struct intel_dsi *intel_dsi = container_of(dsi, struct intel_dsi, dev);
262 struct drm_device *dev = intel_dsi->base.base.dev;
263 struct drm_i915_private *dev_priv = dev->dev_private;
264 struct mipi_config *mipi_config = dev_priv->vbt.dsi.config;
265 struct mipi_pps_data *pps = dev_priv->vbt.dsi.pps;
266 struct drm_display_mode *mode = dev_priv->vbt.lfp_lvds_vbt_mode;
267 u32 bits_per_pixel = 24;
268 u32 tlpx_ns, extra_byte_count, bitrate, tlpx_ui;
269 u32 ui_num, ui_den;
270 u32 prepare_cnt, exit_zero_cnt, clk_zero_cnt, trail_cnt;
271 u32 ths_prepare_ns, tclk_trail_ns;
272 u32 tclk_prepare_clkzero, ths_prepare_hszero;
273 u32 lp_to_hs_switch, hs_to_lp_switch;
274
275 DRM_DEBUG_KMS("\n");
276
277 intel_dsi->eotp_pkt = mipi_config->eot_pkt_disabled ? 0 : 1;
278 intel_dsi->clock_stop = mipi_config->enable_clk_stop ? 1 : 0;
279 intel_dsi->lane_count = mipi_config->lane_cnt + 1;
280 intel_dsi->pixel_format = mipi_config->videomode_color_format << 7;
281
282 if (intel_dsi->pixel_format == VID_MODE_FORMAT_RGB666)
283 bits_per_pixel = 18;
284 else if (intel_dsi->pixel_format == VID_MODE_FORMAT_RGB565)
285 bits_per_pixel = 16;
286
287 bitrate = (mode->clock * bits_per_pixel) / intel_dsi->lane_count;
288
289 intel_dsi->operation_mode = mipi_config->is_cmd_mode;
290 intel_dsi->video_mode_format = mipi_config->video_transfer_mode;
291 intel_dsi->escape_clk_div = mipi_config->byte_clk_sel;
292 intel_dsi->lp_rx_timeout = mipi_config->lp_rx_timeout;
293 intel_dsi->turn_arnd_val = mipi_config->turn_around_timeout;
294 intel_dsi->rst_timer_val = mipi_config->device_reset_timer;
295 intel_dsi->init_count = mipi_config->master_init_timer;
296 intel_dsi->bw_timer = mipi_config->dbi_bw_timer;
297 intel_dsi->video_frmt_cfg_bits = mipi_config->bta_enabled ? DISABLE_VIDEO_BTA : 0;
298
299 switch (intel_dsi->escape_clk_div) {
300 case 0:
301 tlpx_ns = 50;
302 break;
303 case 1:
304 tlpx_ns = 100;
305 break;
306
307 case 2:
308 tlpx_ns = 200;
309 break;
310 default:
311 tlpx_ns = 50;
312 break;
313 }
314
315 switch (intel_dsi->lane_count) {
316 case 1:
317 case 2:
318 extra_byte_count = 2;
319 break;
320 case 3:
321 extra_byte_count = 4;
322 break;
323 case 4:
324 default:
325 extra_byte_count = 3;
326 break;
327 }
328
329 /*
330 * ui(s) = 1/f [f in hz]
331 * ui(ns) = 10^9 / (f*10^6) [f in Mhz] -> 10^3/f(Mhz)
332 */
333
334 /* in Kbps */
335 ui_num = NS_KHZ_RATIO;
336 ui_den = bitrate;
337
338 tclk_prepare_clkzero = mipi_config->tclk_prepare_clkzero;
339 ths_prepare_hszero = mipi_config->ths_prepare_hszero;
340
341 /*
342 * B060
343 * LP byte clock = TLPX/ (8UI)
344 */
345 intel_dsi->lp_byte_clk = DIV_ROUND_UP(tlpx_ns * ui_den, 8 * ui_num);
346
347 /* count values in UI = (ns value) * (bitrate / (2 * 10^6))
348 *
349 * Since txddrclkhs_i is 2xUI, all the count values programmed in
350 * DPHY param register are divided by 2
351 *
352 * prepare count
353 */
354 ths_prepare_ns = max(mipi_config->ths_prepare, mipi_config->tclk_prepare);
355 prepare_cnt = DIV_ROUND_UP(ths_prepare_ns * ui_den, ui_num * 2);
356
357 /* exit zero count */
358 exit_zero_cnt = DIV_ROUND_UP(
359 (ths_prepare_hszero - ths_prepare_ns) * ui_den,
360 ui_num * 2
361 );
362
363 /*
364 * Exit zero is unified val ths_zero and ths_exit
365 * minimum value for ths_exit = 110ns
366 * min (exit_zero_cnt * 2) = 110/UI
367 * exit_zero_cnt = 55/UI
368 */
369 if (exit_zero_cnt < (55 * ui_den / ui_num))
370 if ((55 * ui_den) % ui_num)
371 exit_zero_cnt += 1;
372
373 /* clk zero count */
374 clk_zero_cnt = DIV_ROUND_UP(
375 (tclk_prepare_clkzero - ths_prepare_ns)
376 * ui_den, 2 * ui_num);
377
378 /* trail count */
379 tclk_trail_ns = max(mipi_config->tclk_trail, mipi_config->ths_trail);
380 trail_cnt = DIV_ROUND_UP(tclk_trail_ns * ui_den, 2 * ui_num);
381
382 if (prepare_cnt > PREPARE_CNT_MAX ||
383 exit_zero_cnt > EXIT_ZERO_CNT_MAX ||
384 clk_zero_cnt > CLK_ZERO_CNT_MAX ||
385 trail_cnt > TRAIL_CNT_MAX)
386 DRM_DEBUG_DRIVER("Values crossing maximum limits, restricting to max values\n");
387
388 if (prepare_cnt > PREPARE_CNT_MAX)
389 prepare_cnt = PREPARE_CNT_MAX;
390
391 if (exit_zero_cnt > EXIT_ZERO_CNT_MAX)
392 exit_zero_cnt = EXIT_ZERO_CNT_MAX;
393
394 if (clk_zero_cnt > CLK_ZERO_CNT_MAX)
395 clk_zero_cnt = CLK_ZERO_CNT_MAX;
396
397 if (trail_cnt > TRAIL_CNT_MAX)
398 trail_cnt = TRAIL_CNT_MAX;
399
400 /* B080 */
401 intel_dsi->dphy_reg = exit_zero_cnt << 24 | trail_cnt << 16 |
402 clk_zero_cnt << 8 | prepare_cnt;
403
404 /*
405 * LP to HS switch count = 4TLPX + PREP_COUNT * 2 + EXIT_ZERO_COUNT * 2
406 * + 10UI + Extra Byte Count
407 *
408 * HS to LP switch count = THS-TRAIL + 2TLPX + Extra Byte Count
409 * Extra Byte Count is calculated according to number of lanes.
410 * High Low Switch Count is the Max of LP to HS and
411 * HS to LP switch count
412 *
413 */
414 tlpx_ui = DIV_ROUND_UP(tlpx_ns * ui_den, ui_num);
415
416 /* B044 */
417 /* FIXME:
418 * The comment above does not match with the code */
419 lp_to_hs_switch = DIV_ROUND_UP(4 * tlpx_ui + prepare_cnt * 2 +
420 exit_zero_cnt * 2 + 10, 8);
421
422 hs_to_lp_switch = DIV_ROUND_UP(mipi_config->ths_trail + 2 * tlpx_ui, 8);
423
424 intel_dsi->hs_to_lp_count = max(lp_to_hs_switch, hs_to_lp_switch);
425 intel_dsi->hs_to_lp_count += extra_byte_count;
426
427 /* B088 */
428 /* LP -> HS for clock lanes
429 * LP clk sync + LP11 + LP01 + tclk_prepare + tclk_zero +
430 * extra byte count
431 * 2TPLX + 1TLPX + 1 TPLX(in ns) + prepare_cnt * 2 + clk_zero_cnt *
432 * 2(in UI) + extra byte count
433 * In byteclks = (4TLPX + prepare_cnt * 2 + clk_zero_cnt *2 (in UI)) /
434 * 8 + extra byte count
435 */
436 intel_dsi->clk_lp_to_hs_count =
437 DIV_ROUND_UP(
438 4 * tlpx_ui + prepare_cnt * 2 +
439 clk_zero_cnt * 2,
440 8);
441
442 intel_dsi->clk_lp_to_hs_count += extra_byte_count;
443
444 /* HS->LP for Clock Lanes
445 * Low Power clock synchronisations + 1Tx byteclk + tclk_trail +
446 * Extra byte count
447 * 2TLPX + 8UI + (trail_count*2)(in UI) + Extra byte count
448 * In byteclks = (2*TLpx(in UI) + trail_count*2 +8)(in UI)/8 +
449 * Extra byte count
450 */
451 intel_dsi->clk_hs_to_lp_count =
452 DIV_ROUND_UP(2 * tlpx_ui + trail_cnt * 2 + 8,
453 8);
454 intel_dsi->clk_hs_to_lp_count += extra_byte_count;
455
456 DRM_DEBUG_KMS("Eot %s\n", intel_dsi->eotp_pkt ? "enabled" : "disabled");
457 DRM_DEBUG_KMS("Clockstop %s\n", intel_dsi->clock_stop ?
458 "disabled" : "enabled");
459 DRM_DEBUG_KMS("Mode %s\n", intel_dsi->operation_mode ? "command" : "video");
460 DRM_DEBUG_KMS("Pixel Format %d\n", intel_dsi->pixel_format);
461 DRM_DEBUG_KMS("TLPX %d\n", intel_dsi->escape_clk_div);
462 DRM_DEBUG_KMS("LP RX Timeout 0x%x\n", intel_dsi->lp_rx_timeout);
463 DRM_DEBUG_KMS("Turnaround Timeout 0x%x\n", intel_dsi->turn_arnd_val);
464 DRM_DEBUG_KMS("Init Count 0x%x\n", intel_dsi->init_count);
465 DRM_DEBUG_KMS("HS to LP Count 0x%x\n", intel_dsi->hs_to_lp_count);
466 DRM_DEBUG_KMS("LP Byte Clock %d\n", intel_dsi->lp_byte_clk);
467 DRM_DEBUG_KMS("DBI BW Timer 0x%x\n", intel_dsi->bw_timer);
468 DRM_DEBUG_KMS("LP to HS Clock Count 0x%x\n", intel_dsi->clk_lp_to_hs_count);
469 DRM_DEBUG_KMS("HS to LP Clock Count 0x%x\n", intel_dsi->clk_hs_to_lp_count);
470 DRM_DEBUG_KMS("BTA %s\n",
471 intel_dsi->video_frmt_cfg_bits & DISABLE_VIDEO_BTA ?
472 "disabled" : "enabled");
473
474 /* delays in VBT are in unit of 100us, so need to convert
475 * here in ms
476 * Delay (100us) * 100 /1000 = Delay / 10 (ms) */
477 intel_dsi->backlight_off_delay = pps->bl_disable_delay / 10;
478 intel_dsi->backlight_on_delay = pps->bl_enable_delay / 10;
479 intel_dsi->panel_on_delay = pps->panel_on_delay / 10;
480 intel_dsi->panel_off_delay = pps->panel_off_delay / 10;
481 intel_dsi->panel_pwr_cycle_delay = pps->panel_power_cycle_delay / 10;
482
483 return true;
484}
485
486static int generic_mode_valid(struct intel_dsi_device *dsi,
487 struct drm_display_mode *mode)
488{
489 return MODE_OK;
490}
491
492static bool generic_mode_fixup(struct intel_dsi_device *dsi,
493 const struct drm_display_mode *mode,
494 struct drm_display_mode *adjusted_mode) {
495 return true;
496}
497
498static void generic_panel_reset(struct intel_dsi_device *dsi)
499{
500 struct intel_dsi *intel_dsi = container_of(dsi, struct intel_dsi, dev);
501 struct drm_device *dev = intel_dsi->base.base.dev;
502 struct drm_i915_private *dev_priv = dev->dev_private;
503
504 char *sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_ASSERT_RESET];
505
506 generic_exec_sequence(intel_dsi, sequence);
507}
508
509static void generic_disable_panel_power(struct intel_dsi_device *dsi)
510{
511 struct intel_dsi *intel_dsi = container_of(dsi, struct intel_dsi, dev);
512 struct drm_device *dev = intel_dsi->base.base.dev;
513 struct drm_i915_private *dev_priv = dev->dev_private;
514
515 char *sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET];
516
517 generic_exec_sequence(intel_dsi, sequence);
518}
519
520static void generic_send_otp_cmds(struct intel_dsi_device *dsi)
521{
522 struct intel_dsi *intel_dsi = container_of(dsi, struct intel_dsi, dev);
523 struct drm_device *dev = intel_dsi->base.base.dev;
524 struct drm_i915_private *dev_priv = dev->dev_private;
525
526 char *sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP];
527
528 generic_exec_sequence(intel_dsi, sequence);
529}
530
531static void generic_enable(struct intel_dsi_device *dsi)
532{
533 struct intel_dsi *intel_dsi = container_of(dsi, struct intel_dsi, dev);
534 struct drm_device *dev = intel_dsi->base.base.dev;
535 struct drm_i915_private *dev_priv = dev->dev_private;
536
537 char *sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_DISPLAY_ON];
538
539 generic_exec_sequence(intel_dsi, sequence);
540}
541
542static void generic_disable(struct intel_dsi_device *dsi)
543{
544 struct intel_dsi *intel_dsi = container_of(dsi, struct intel_dsi, dev);
545 struct drm_device *dev = intel_dsi->base.base.dev;
546 struct drm_i915_private *dev_priv = dev->dev_private;
547
548 char *sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_DISPLAY_OFF];
549
550 generic_exec_sequence(intel_dsi, sequence);
551}
552
553static enum drm_connector_status generic_detect(struct intel_dsi_device *dsi)
554{
555 return connector_status_connected;
556}
557
558static bool generic_get_hw_state(struct intel_dsi_device *dev)
559{
560 return true;
561}
562
563static struct drm_display_mode *generic_get_modes(struct intel_dsi_device *dsi)
564{
565 struct intel_dsi *intel_dsi = container_of(dsi, struct intel_dsi, dev);
566 struct drm_device *dev = intel_dsi->base.base.dev;
567 struct drm_i915_private *dev_priv = dev->dev_private;
568
569 dev_priv->vbt.lfp_lvds_vbt_mode->type |= DRM_MODE_TYPE_PREFERRED;
570 return dev_priv->vbt.lfp_lvds_vbt_mode;
571}
572
573static void generic_destroy(struct intel_dsi_device *dsi) { }
574
575/* Callbacks. We might not need them all. */
576struct intel_dsi_dev_ops vbt_generic_dsi_display_ops = {
577 .init = generic_init,
578 .mode_valid = generic_mode_valid,
579 .mode_fixup = generic_mode_fixup,
580 .panel_reset = generic_panel_reset,
581 .disable_panel_power = generic_disable_panel_power,
582 .send_otp_cmds = generic_send_otp_cmds,
583 .enable = generic_enable,
584 .disable = generic_disable,
585 .detect = generic_detect,
586 .get_hw_state = generic_get_hw_state,
587 .get_modes = generic_get_modes,
588 .destroy = generic_destroy,
589};
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 7fe3feedfe03..a3631c0a5c28 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -285,7 +285,7 @@ static bool intel_dvo_compute_config(struct intel_encoder *encoder,
285 return true; 285 return true;
286} 286}
287 287
288static void intel_dvo_mode_set(struct intel_encoder *encoder) 288static void intel_dvo_pre_enable(struct intel_encoder *encoder)
289{ 289{
290 struct drm_device *dev = encoder->base.dev; 290 struct drm_device *dev = encoder->base.dev;
291 struct drm_i915_private *dev_priv = dev->dev_private; 291 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -343,7 +343,7 @@ intel_dvo_detect(struct drm_connector *connector, bool force)
343{ 343{
344 struct intel_dvo *intel_dvo = intel_attached_dvo(connector); 344 struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
345 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", 345 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
346 connector->base.id, drm_get_connector_name(connector)); 346 connector->base.id, connector->name);
347 return intel_dvo->dev.dev_ops->detect(&intel_dvo->dev); 347 return intel_dvo->dev.dev_ops->detect(&intel_dvo->dev);
348} 348}
349 349
@@ -475,7 +475,7 @@ void intel_dvo_init(struct drm_device *dev)
475 intel_encoder->get_hw_state = intel_dvo_get_hw_state; 475 intel_encoder->get_hw_state = intel_dvo_get_hw_state;
476 intel_encoder->get_config = intel_dvo_get_config; 476 intel_encoder->get_config = intel_dvo_get_config;
477 intel_encoder->compute_config = intel_dvo_compute_config; 477 intel_encoder->compute_config = intel_dvo_compute_config;
478 intel_encoder->mode_set = intel_dvo_mode_set; 478 intel_encoder->pre_enable = intel_dvo_pre_enable;
479 intel_connector->get_hw_state = intel_dvo_connector_get_hw_state; 479 intel_connector->get_hw_state = intel_dvo_connector_get_hw_state;
480 intel_connector->unregister = intel_connector_unregister; 480 intel_connector->unregister = intel_connector_unregister;
481 481
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index f73ba5e6b7a8..088fe9378a4c 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -343,15 +343,15 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
343 num_connectors_detected++; 343 num_connectors_detected++;
344 344
345 if (!enabled[i]) { 345 if (!enabled[i]) {
346 DRM_DEBUG_KMS("connector %d not enabled, skipping\n", 346 DRM_DEBUG_KMS("connector %s not enabled, skipping\n",
347 connector->base.id); 347 connector->name);
348 continue; 348 continue;
349 } 349 }
350 350
351 encoder = connector->encoder; 351 encoder = connector->encoder;
352 if (!encoder || WARN_ON(!encoder->crtc)) { 352 if (!encoder || WARN_ON(!encoder->crtc)) {
353 DRM_DEBUG_KMS("connector %d has no encoder or crtc, skipping\n", 353 DRM_DEBUG_KMS("connector %s has no encoder or crtc, skipping\n",
354 connector->base.id); 354 connector->name);
355 enabled[i] = false; 355 enabled[i] = false;
356 continue; 356 continue;
357 } 357 }
@@ -373,16 +373,16 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
373 } 373 }
374 } 374 }
375 375
376 DRM_DEBUG_KMS("looking for cmdline mode on connector %d\n", 376 DRM_DEBUG_KMS("looking for cmdline mode on connector %s\n",
377 fb_conn->connector->base.id); 377 connector->name);
378 378
379 /* go for command line mode first */ 379 /* go for command line mode first */
380 modes[i] = drm_pick_cmdline_mode(fb_conn, width, height); 380 modes[i] = drm_pick_cmdline_mode(fb_conn, width, height);
381 381
382 /* try for preferred next */ 382 /* try for preferred next */
383 if (!modes[i]) { 383 if (!modes[i]) {
384 DRM_DEBUG_KMS("looking for preferred mode on connector %d\n", 384 DRM_DEBUG_KMS("looking for preferred mode on connector %s\n",
385 fb_conn->connector->base.id); 385 connector->name);
386 modes[i] = drm_has_preferred_mode(fb_conn, width, 386 modes[i] = drm_has_preferred_mode(fb_conn, width,
387 height); 387 height);
388 } 388 }
@@ -390,7 +390,7 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
390 /* No preferred mode marked by the EDID? Are there any modes? */ 390 /* No preferred mode marked by the EDID? Are there any modes? */
391 if (!modes[i] && !list_empty(&connector->modes)) { 391 if (!modes[i] && !list_empty(&connector->modes)) {
392 DRM_DEBUG_KMS("using first mode listed on connector %s\n", 392 DRM_DEBUG_KMS("using first mode listed on connector %s\n",
393 drm_get_connector_name(connector)); 393 connector->name);
394 modes[i] = list_first_entry(&connector->modes, 394 modes[i] = list_first_entry(&connector->modes,
395 struct drm_display_mode, 395 struct drm_display_mode,
396 head); 396 head);
@@ -409,16 +409,20 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
409 * since the fb helper layer wants a pointer to 409 * since the fb helper layer wants a pointer to
410 * something we own. 410 * something we own.
411 */ 411 */
412 DRM_DEBUG_KMS("looking for current mode on connector %s\n",
413 connector->name);
412 intel_mode_from_pipe_config(&encoder->crtc->hwmode, 414 intel_mode_from_pipe_config(&encoder->crtc->hwmode,
413 &to_intel_crtc(encoder->crtc)->config); 415 &to_intel_crtc(encoder->crtc)->config);
414 modes[i] = &encoder->crtc->hwmode; 416 modes[i] = &encoder->crtc->hwmode;
415 } 417 }
416 crtcs[i] = new_crtc; 418 crtcs[i] = new_crtc;
417 419
418 DRM_DEBUG_KMS("connector %s on crtc %d: %s\n", 420 DRM_DEBUG_KMS("connector %s on pipe %d [CRTC:%d]: %dx%d%s\n",
419 drm_get_connector_name(connector), 421 connector->name,
422 pipe_name(to_intel_crtc(encoder->crtc)->pipe),
420 encoder->crtc->base.id, 423 encoder->crtc->base.id,
421 modes[i]->name); 424 modes[i]->hdisplay, modes[i]->vdisplay,
425 modes[i]->flags & DRM_MODE_FLAG_INTERLACE ? "i" :"");
422 426
423 fallback = false; 427 fallback = false;
424 } 428 }
@@ -497,7 +501,7 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
497 return false; 501 return false;
498 502
499 /* Find the largest fb */ 503 /* Find the largest fb */
500 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 504 for_each_crtc(dev, crtc) {
501 intel_crtc = to_intel_crtc(crtc); 505 intel_crtc = to_intel_crtc(crtc);
502 506
503 if (!intel_crtc->active || !crtc->primary->fb) { 507 if (!intel_crtc->active || !crtc->primary->fb) {
@@ -521,7 +525,7 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
521 } 525 }
522 526
523 /* Now make sure all the pipes will fit into it */ 527 /* Now make sure all the pipes will fit into it */
524 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 528 for_each_crtc(dev, crtc) {
525 unsigned int cur_size; 529 unsigned int cur_size;
526 530
527 intel_crtc = to_intel_crtc(crtc); 531 intel_crtc = to_intel_crtc(crtc);
@@ -586,7 +590,7 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
586 drm_framebuffer_reference(&ifbdev->fb->base); 590 drm_framebuffer_reference(&ifbdev->fb->base);
587 591
588 /* Final pass to check if any active pipes don't have fbs */ 592 /* Final pass to check if any active pipes don't have fbs */
589 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 593 for_each_crtc(dev, crtc) {
590 intel_crtc = to_intel_crtc(crtc); 594 intel_crtc = to_intel_crtc(crtc);
591 595
592 if (!intel_crtc->active) 596 if (!intel_crtc->active)
@@ -692,11 +696,7 @@ void intel_fbdev_restore_mode(struct drm_device *dev)
692 if (!dev_priv->fbdev) 696 if (!dev_priv->fbdev)
693 return; 697 return;
694 698
695 drm_modeset_lock_all(dev); 699 ret = drm_fb_helper_restore_fbdev_mode_unlocked(&dev_priv->fbdev->helper);
696
697 ret = drm_fb_helper_restore_fbdev_mode(&dev_priv->fbdev->helper);
698 if (ret) 700 if (ret)
699 DRM_DEBUG("failed to restore crtc mode\n"); 701 DRM_DEBUG("failed to restore crtc mode\n");
700
701 drm_modeset_unlock_all(dev);
702} 702}
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 157267aa3561..eee2bbec2958 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -418,6 +418,7 @@ intel_hdmi_set_hdmi_infoframe(struct drm_encoder *encoder,
418} 418}
419 419
420static void g4x_set_infoframes(struct drm_encoder *encoder, 420static void g4x_set_infoframes(struct drm_encoder *encoder,
421 bool enable,
421 struct drm_display_mode *adjusted_mode) 422 struct drm_display_mode *adjusted_mode)
422{ 423{
423 struct drm_i915_private *dev_priv = encoder->dev->dev_private; 424 struct drm_i915_private *dev_priv = encoder->dev->dev_private;
@@ -440,7 +441,7 @@ static void g4x_set_infoframes(struct drm_encoder *encoder,
440 * either. */ 441 * either. */
441 val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC; 442 val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC;
442 443
443 if (!intel_hdmi->has_hdmi_sink) { 444 if (!enable) {
444 if (!(val & VIDEO_DIP_ENABLE)) 445 if (!(val & VIDEO_DIP_ENABLE))
445 return; 446 return;
446 val &= ~VIDEO_DIP_ENABLE; 447 val &= ~VIDEO_DIP_ENABLE;
@@ -471,6 +472,7 @@ static void g4x_set_infoframes(struct drm_encoder *encoder,
471} 472}
472 473
473static void ibx_set_infoframes(struct drm_encoder *encoder, 474static void ibx_set_infoframes(struct drm_encoder *encoder,
475 bool enable,
474 struct drm_display_mode *adjusted_mode) 476 struct drm_display_mode *adjusted_mode)
475{ 477{
476 struct drm_i915_private *dev_priv = encoder->dev->dev_private; 478 struct drm_i915_private *dev_priv = encoder->dev->dev_private;
@@ -486,7 +488,7 @@ static void ibx_set_infoframes(struct drm_encoder *encoder,
486 /* See the big comment in g4x_set_infoframes() */ 488 /* See the big comment in g4x_set_infoframes() */
487 val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC; 489 val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC;
488 490
489 if (!intel_hdmi->has_hdmi_sink) { 491 if (!enable) {
490 if (!(val & VIDEO_DIP_ENABLE)) 492 if (!(val & VIDEO_DIP_ENABLE))
491 return; 493 return;
492 val &= ~VIDEO_DIP_ENABLE; 494 val &= ~VIDEO_DIP_ENABLE;
@@ -518,6 +520,7 @@ static void ibx_set_infoframes(struct drm_encoder *encoder,
518} 520}
519 521
520static void cpt_set_infoframes(struct drm_encoder *encoder, 522static void cpt_set_infoframes(struct drm_encoder *encoder,
523 bool enable,
521 struct drm_display_mode *adjusted_mode) 524 struct drm_display_mode *adjusted_mode)
522{ 525{
523 struct drm_i915_private *dev_priv = encoder->dev->dev_private; 526 struct drm_i915_private *dev_priv = encoder->dev->dev_private;
@@ -531,7 +534,7 @@ static void cpt_set_infoframes(struct drm_encoder *encoder,
531 /* See the big comment in g4x_set_infoframes() */ 534 /* See the big comment in g4x_set_infoframes() */
532 val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC; 535 val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC;
533 536
534 if (!intel_hdmi->has_hdmi_sink) { 537 if (!enable) {
535 if (!(val & VIDEO_DIP_ENABLE)) 538 if (!(val & VIDEO_DIP_ENABLE))
536 return; 539 return;
537 val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI); 540 val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI);
@@ -554,20 +557,23 @@ static void cpt_set_infoframes(struct drm_encoder *encoder,
554} 557}
555 558
556static void vlv_set_infoframes(struct drm_encoder *encoder, 559static void vlv_set_infoframes(struct drm_encoder *encoder,
560 bool enable,
557 struct drm_display_mode *adjusted_mode) 561 struct drm_display_mode *adjusted_mode)
558{ 562{
559 struct drm_i915_private *dev_priv = encoder->dev->dev_private; 563 struct drm_i915_private *dev_priv = encoder->dev->dev_private;
564 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
560 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 565 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
561 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); 566 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
562 u32 reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe); 567 u32 reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
563 u32 val = I915_READ(reg); 568 u32 val = I915_READ(reg);
569 u32 port = VIDEO_DIP_PORT(intel_dig_port->port);
564 570
565 assert_hdmi_port_disabled(intel_hdmi); 571 assert_hdmi_port_disabled(intel_hdmi);
566 572
567 /* See the big comment in g4x_set_infoframes() */ 573 /* See the big comment in g4x_set_infoframes() */
568 val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC; 574 val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC;
569 575
570 if (!intel_hdmi->has_hdmi_sink) { 576 if (!enable) {
571 if (!(val & VIDEO_DIP_ENABLE)) 577 if (!(val & VIDEO_DIP_ENABLE))
572 return; 578 return;
573 val &= ~VIDEO_DIP_ENABLE; 579 val &= ~VIDEO_DIP_ENABLE;
@@ -576,9 +582,19 @@ static void vlv_set_infoframes(struct drm_encoder *encoder,
576 return; 582 return;
577 } 583 }
578 584
585 if (port != (val & VIDEO_DIP_PORT_MASK)) {
586 if (val & VIDEO_DIP_ENABLE) {
587 val &= ~VIDEO_DIP_ENABLE;
588 I915_WRITE(reg, val);
589 POSTING_READ(reg);
590 }
591 val &= ~VIDEO_DIP_PORT_MASK;
592 val |= port;
593 }
594
579 val |= VIDEO_DIP_ENABLE; 595 val |= VIDEO_DIP_ENABLE;
580 val &= ~(VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT | 596 val &= ~(VIDEO_DIP_ENABLE_AVI | VIDEO_DIP_ENABLE_VENDOR |
581 VIDEO_DIP_ENABLE_GCP); 597 VIDEO_DIP_ENABLE_GAMUT | VIDEO_DIP_ENABLE_GCP);
582 598
583 I915_WRITE(reg, val); 599 I915_WRITE(reg, val);
584 POSTING_READ(reg); 600 POSTING_READ(reg);
@@ -589,6 +605,7 @@ static void vlv_set_infoframes(struct drm_encoder *encoder,
589} 605}
590 606
591static void hsw_set_infoframes(struct drm_encoder *encoder, 607static void hsw_set_infoframes(struct drm_encoder *encoder,
608 bool enable,
592 struct drm_display_mode *adjusted_mode) 609 struct drm_display_mode *adjusted_mode)
593{ 610{
594 struct drm_i915_private *dev_priv = encoder->dev->dev_private; 611 struct drm_i915_private *dev_priv = encoder->dev->dev_private;
@@ -599,7 +616,7 @@ static void hsw_set_infoframes(struct drm_encoder *encoder,
599 616
600 assert_hdmi_port_disabled(intel_hdmi); 617 assert_hdmi_port_disabled(intel_hdmi);
601 618
602 if (!intel_hdmi->has_hdmi_sink) { 619 if (!enable) {
603 I915_WRITE(reg, 0); 620 I915_WRITE(reg, 0);
604 POSTING_READ(reg); 621 POSTING_READ(reg);
605 return; 622 return;
@@ -616,7 +633,7 @@ static void hsw_set_infoframes(struct drm_encoder *encoder,
616 intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode); 633 intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode);
617} 634}
618 635
619static void intel_hdmi_mode_set(struct intel_encoder *encoder) 636static void intel_hdmi_prepare(struct intel_encoder *encoder)
620{ 637{
621 struct drm_device *dev = encoder->base.dev; 638 struct drm_device *dev = encoder->base.dev;
622 struct drm_i915_private *dev_priv = dev->dev_private; 639 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -638,27 +655,26 @@ static void intel_hdmi_mode_set(struct intel_encoder *encoder)
638 else 655 else
639 hdmi_val |= SDVO_COLOR_FORMAT_8bpc; 656 hdmi_val |= SDVO_COLOR_FORMAT_8bpc;
640 657
641 /* Required on CPT */ 658 if (crtc->config.has_hdmi_sink)
642 if (intel_hdmi->has_hdmi_sink && HAS_PCH_CPT(dev))
643 hdmi_val |= HDMI_MODE_SELECT_HDMI; 659 hdmi_val |= HDMI_MODE_SELECT_HDMI;
644 660
645 if (intel_hdmi->has_audio) { 661 if (crtc->config.has_audio) {
662 WARN_ON(!crtc->config.has_hdmi_sink);
646 DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n", 663 DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n",
647 pipe_name(crtc->pipe)); 664 pipe_name(crtc->pipe));
648 hdmi_val |= SDVO_AUDIO_ENABLE; 665 hdmi_val |= SDVO_AUDIO_ENABLE;
649 hdmi_val |= HDMI_MODE_SELECT_HDMI;
650 intel_write_eld(&encoder->base, adjusted_mode); 666 intel_write_eld(&encoder->base, adjusted_mode);
651 } 667 }
652 668
653 if (HAS_PCH_CPT(dev)) 669 if (HAS_PCH_CPT(dev))
654 hdmi_val |= SDVO_PIPE_SEL_CPT(crtc->pipe); 670 hdmi_val |= SDVO_PIPE_SEL_CPT(crtc->pipe);
671 else if (IS_CHERRYVIEW(dev))
672 hdmi_val |= SDVO_PIPE_SEL_CHV(crtc->pipe);
655 else 673 else
656 hdmi_val |= SDVO_PIPE_SEL(crtc->pipe); 674 hdmi_val |= SDVO_PIPE_SEL(crtc->pipe);
657 675
658 I915_WRITE(intel_hdmi->hdmi_reg, hdmi_val); 676 I915_WRITE(intel_hdmi->hdmi_reg, hdmi_val);
659 POSTING_READ(intel_hdmi->hdmi_reg); 677 POSTING_READ(intel_hdmi->hdmi_reg);
660
661 intel_hdmi->set_infoframes(&encoder->base, adjusted_mode);
662} 678}
663 679
664static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder, 680static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
@@ -681,6 +697,8 @@ static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
681 697
682 if (HAS_PCH_CPT(dev)) 698 if (HAS_PCH_CPT(dev))
683 *pipe = PORT_TO_PIPE_CPT(tmp); 699 *pipe = PORT_TO_PIPE_CPT(tmp);
700 else if (IS_CHERRYVIEW(dev))
701 *pipe = SDVO_PORT_TO_PIPE_CHV(tmp);
684 else 702 else
685 *pipe = PORT_TO_PIPE(tmp); 703 *pipe = PORT_TO_PIPE(tmp);
686 704
@@ -707,6 +725,12 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
707 else 725 else
708 flags |= DRM_MODE_FLAG_NVSYNC; 726 flags |= DRM_MODE_FLAG_NVSYNC;
709 727
728 if (tmp & HDMI_MODE_SELECT_HDMI)
729 pipe_config->has_hdmi_sink = true;
730
731 if (tmp & HDMI_MODE_SELECT_HDMI)
732 pipe_config->has_audio = true;
733
710 pipe_config->adjusted_mode.flags |= flags; 734 pipe_config->adjusted_mode.flags |= flags;
711 735
712 if ((tmp & SDVO_COLOR_FORMAT_MASK) == HDMI_COLOR_FORMAT_12bpc) 736 if ((tmp & SDVO_COLOR_FORMAT_MASK) == HDMI_COLOR_FORMAT_12bpc)
@@ -729,7 +753,7 @@ static void intel_enable_hdmi(struct intel_encoder *encoder)
729 u32 temp; 753 u32 temp;
730 u32 enable_bits = SDVO_ENABLE; 754 u32 enable_bits = SDVO_ENABLE;
731 755
732 if (intel_hdmi->has_audio) 756 if (intel_crtc->config.has_audio)
733 enable_bits |= SDVO_AUDIO_ENABLE; 757 enable_bits |= SDVO_AUDIO_ENABLE;
734 758
735 temp = I915_READ(intel_hdmi->hdmi_reg); 759 temp = I915_READ(intel_hdmi->hdmi_reg);
@@ -883,9 +907,11 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
883 int portclock_limit = hdmi_portclock_limit(intel_hdmi, false); 907 int portclock_limit = hdmi_portclock_limit(intel_hdmi, false);
884 int desired_bpp; 908 int desired_bpp;
885 909
910 pipe_config->has_hdmi_sink = intel_hdmi->has_hdmi_sink;
911
886 if (intel_hdmi->color_range_auto) { 912 if (intel_hdmi->color_range_auto) {
887 /* See CEA-861-E - 5.1 Default Encoding Parameters */ 913 /* See CEA-861-E - 5.1 Default Encoding Parameters */
888 if (intel_hdmi->has_hdmi_sink && 914 if (pipe_config->has_hdmi_sink &&
889 drm_match_cea_mode(adjusted_mode) > 1) 915 drm_match_cea_mode(adjusted_mode) > 1)
890 intel_hdmi->color_range = HDMI_COLOR_RANGE_16_235; 916 intel_hdmi->color_range = HDMI_COLOR_RANGE_16_235;
891 else 917 else
@@ -898,13 +924,16 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
898 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev)) 924 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev))
899 pipe_config->has_pch_encoder = true; 925 pipe_config->has_pch_encoder = true;
900 926
927 if (pipe_config->has_hdmi_sink && intel_hdmi->has_audio)
928 pipe_config->has_audio = true;
929
901 /* 930 /*
902 * HDMI is either 12 or 8, so if the display lets 10bpc sneak 931 * HDMI is either 12 or 8, so if the display lets 10bpc sneak
903 * through, clamp it down. Note that g4x/vlv don't support 12bpc hdmi 932 * through, clamp it down. Note that g4x/vlv don't support 12bpc hdmi
904 * outputs. We also need to check that the higher clock still fits 933 * outputs. We also need to check that the higher clock still fits
905 * within limits. 934 * within limits.
906 */ 935 */
907 if (pipe_config->pipe_bpp > 8*3 && intel_hdmi->has_hdmi_sink && 936 if (pipe_config->pipe_bpp > 8*3 && pipe_config->has_hdmi_sink &&
908 clock_12bpc <= portclock_limit && 937 clock_12bpc <= portclock_limit &&
909 hdmi_12bpc_possible(encoder->new_crtc)) { 938 hdmi_12bpc_possible(encoder->new_crtc)) {
910 DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n"); 939 DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n");
@@ -944,7 +973,7 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
944 enum drm_connector_status status = connector_status_disconnected; 973 enum drm_connector_status status = connector_status_disconnected;
945 974
946 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", 975 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
947 connector->base.id, drm_get_connector_name(connector)); 976 connector->base.id, connector->name);
948 977
949 power_domain = intel_display_port_power_domain(intel_encoder); 978 power_domain = intel_display_port_power_domain(intel_encoder);
950 intel_display_power_get(dev_priv, power_domain); 979 intel_display_power_get(dev_priv, power_domain);
@@ -1104,20 +1133,34 @@ done:
1104 return 0; 1133 return 0;
1105} 1134}
1106 1135
1136static void intel_hdmi_pre_enable(struct intel_encoder *encoder)
1137{
1138 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
1139 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
1140 struct drm_display_mode *adjusted_mode =
1141 &intel_crtc->config.adjusted_mode;
1142
1143 intel_hdmi_prepare(encoder);
1144
1145 intel_hdmi->set_infoframes(&encoder->base,
1146 intel_crtc->config.has_hdmi_sink,
1147 adjusted_mode);
1148}
1149
1107static void vlv_hdmi_pre_enable(struct intel_encoder *encoder) 1150static void vlv_hdmi_pre_enable(struct intel_encoder *encoder)
1108{ 1151{
1109 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); 1152 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
1153 struct intel_hdmi *intel_hdmi = &dport->hdmi;
1110 struct drm_device *dev = encoder->base.dev; 1154 struct drm_device *dev = encoder->base.dev;
1111 struct drm_i915_private *dev_priv = dev->dev_private; 1155 struct drm_i915_private *dev_priv = dev->dev_private;
1112 struct intel_crtc *intel_crtc = 1156 struct intel_crtc *intel_crtc =
1113 to_intel_crtc(encoder->base.crtc); 1157 to_intel_crtc(encoder->base.crtc);
1158 struct drm_display_mode *adjusted_mode =
1159 &intel_crtc->config.adjusted_mode;
1114 enum dpio_channel port = vlv_dport_to_channel(dport); 1160 enum dpio_channel port = vlv_dport_to_channel(dport);
1115 int pipe = intel_crtc->pipe; 1161 int pipe = intel_crtc->pipe;
1116 u32 val; 1162 u32 val;
1117 1163
1118 if (!IS_VALLEYVIEW(dev))
1119 return;
1120
1121 /* Enable clock channels for this port */ 1164 /* Enable clock channels for this port */
1122 mutex_lock(&dev_priv->dpio_lock); 1165 mutex_lock(&dev_priv->dpio_lock);
1123 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port)); 1166 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
@@ -1144,6 +1187,10 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder)
1144 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888); 1187 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
1145 mutex_unlock(&dev_priv->dpio_lock); 1188 mutex_unlock(&dev_priv->dpio_lock);
1146 1189
1190 intel_hdmi->set_infoframes(&encoder->base,
1191 intel_crtc->config.has_hdmi_sink,
1192 adjusted_mode);
1193
1147 intel_enable_hdmi(encoder); 1194 intel_enable_hdmi(encoder);
1148 1195
1149 vlv_wait_port_ready(dev_priv, dport); 1196 vlv_wait_port_ready(dev_priv, dport);
@@ -1159,8 +1206,7 @@ static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder)
1159 enum dpio_channel port = vlv_dport_to_channel(dport); 1206 enum dpio_channel port = vlv_dport_to_channel(dport);
1160 int pipe = intel_crtc->pipe; 1207 int pipe = intel_crtc->pipe;
1161 1208
1162 if (!IS_VALLEYVIEW(dev)) 1209 intel_hdmi_prepare(encoder);
1163 return;
1164 1210
1165 /* Program Tx lane resets to default */ 1211 /* Program Tx lane resets to default */
1166 mutex_lock(&dev_priv->dpio_lock); 1212 mutex_lock(&dev_priv->dpio_lock);
@@ -1199,6 +1245,152 @@ static void vlv_hdmi_post_disable(struct intel_encoder *encoder)
1199 mutex_unlock(&dev_priv->dpio_lock); 1245 mutex_unlock(&dev_priv->dpio_lock);
1200} 1246}
1201 1247
1248static void chv_hdmi_post_disable(struct intel_encoder *encoder)
1249{
1250 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
1251 struct drm_device *dev = encoder->base.dev;
1252 struct drm_i915_private *dev_priv = dev->dev_private;
1253 struct intel_crtc *intel_crtc =
1254 to_intel_crtc(encoder->base.crtc);
1255 enum dpio_channel ch = vlv_dport_to_channel(dport);
1256 enum pipe pipe = intel_crtc->pipe;
1257 u32 val;
1258
1259 mutex_lock(&dev_priv->dpio_lock);
1260
1261 /* Propagate soft reset to data lane reset */
1262 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
1263 val |= CHV_PCS_REQ_SOFTRESET_EN;
1264 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
1265
1266 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
1267 val |= CHV_PCS_REQ_SOFTRESET_EN;
1268 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
1269
1270 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
1271 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
1272 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
1273
1274 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
1275 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
1276 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
1277
1278 mutex_unlock(&dev_priv->dpio_lock);
1279}
1280
1281static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
1282{
1283 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
1284 struct drm_device *dev = encoder->base.dev;
1285 struct drm_i915_private *dev_priv = dev->dev_private;
1286 struct intel_crtc *intel_crtc =
1287 to_intel_crtc(encoder->base.crtc);
1288 enum dpio_channel ch = vlv_dport_to_channel(dport);
1289 int pipe = intel_crtc->pipe;
1290 int data, i;
1291 u32 val;
1292
1293 mutex_lock(&dev_priv->dpio_lock);
1294
1295 /* Deassert soft data lane reset*/
1296 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
1297 val |= CHV_PCS_REQ_SOFTRESET_EN;
1298 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
1299
1300 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
1301 val |= CHV_PCS_REQ_SOFTRESET_EN;
1302 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
1303
1304 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
1305 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
1306 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
1307
1308 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
1309 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
1310 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
1311
1312 /* Program Tx latency optimal setting */
1313 for (i = 0; i < 4; i++) {
1314 /* Set the latency optimal bit */
1315 data = (i == 1) ? 0x0 : 0x6;
1316 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW11(ch, i),
1317 data << DPIO_FRC_LATENCY_SHFIT);
1318
1319 /* Set the upar bit */
1320 data = (i == 1) ? 0x0 : 0x1;
1321 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
1322 data << DPIO_UPAR_SHIFT);
1323 }
1324
1325 /* Data lane stagger programming */
1326 /* FIXME: Fix up value only after power analysis */
1327
1328 /* Clear calc init */
1329 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
1330 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
1331 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
1332
1333 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
1334 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
1335 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
1336
1337 /* FIXME: Program the support xxx V-dB */
1338 /* Use 800mV-0dB */
1339 for (i = 0; i < 4; i++) {
1340 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
1341 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
1342 val |= 128 << DPIO_SWING_DEEMPH9P5_SHIFT;
1343 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
1344 }
1345
1346 for (i = 0; i < 4; i++) {
1347 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
1348 val &= ~DPIO_SWING_MARGIN_MASK;
1349 val |= 102 << DPIO_SWING_MARGIN_SHIFT;
1350 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
1351 }
1352
1353 /* Disable unique transition scale */
1354 for (i = 0; i < 4; i++) {
1355 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
1356 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
1357 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
1358 }
1359
1360 /* Additional steps for 1200mV-0dB */
1361#if 0
1362 val = vlv_dpio_read(dev_priv, pipe, VLV_TX_DW3(ch));
1363 if (ch)
1364 val |= DPIO_TX_UNIQ_TRANS_SCALE_CH1;
1365 else
1366 val |= DPIO_TX_UNIQ_TRANS_SCALE_CH0;
1367 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(ch), val);
1368
1369 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(ch),
1370 vlv_dpio_read(dev_priv, pipe, VLV_TX_DW2(ch)) |
1371 (0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT));
1372#endif
1373 /* Start swing calculation */
1374 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
1375 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
1376 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
1377
1378 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
1379 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
1380 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
1381
1382 /* LRC Bypass */
1383 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
1384 val |= DPIO_LRC_BYPASS;
1385 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val);
1386
1387 mutex_unlock(&dev_priv->dpio_lock);
1388
1389 intel_enable_hdmi(encoder);
1390
1391 vlv_wait_port_ready(dev_priv, dport);
1392}
1393
1202static void intel_hdmi_destroy(struct drm_connector *connector) 1394static void intel_hdmi_destroy(struct drm_connector *connector)
1203{ 1395{
1204 drm_connector_cleanup(connector); 1396 drm_connector_cleanup(connector);
@@ -1259,7 +1451,10 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
1259 intel_encoder->hpd_pin = HPD_PORT_C; 1451 intel_encoder->hpd_pin = HPD_PORT_C;
1260 break; 1452 break;
1261 case PORT_D: 1453 case PORT_D:
1262 intel_hdmi->ddc_bus = GMBUS_PORT_DPD; 1454 if (IS_CHERRYVIEW(dev))
1455 intel_hdmi->ddc_bus = GMBUS_PORT_DPD_CHV;
1456 else
1457 intel_hdmi->ddc_bus = GMBUS_PORT_DPD;
1263 intel_encoder->hpd_pin = HPD_PORT_D; 1458 intel_encoder->hpd_pin = HPD_PORT_D;
1264 break; 1459 break;
1265 case PORT_A: 1460 case PORT_A:
@@ -1329,21 +1524,32 @@ void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port)
1329 DRM_MODE_ENCODER_TMDS); 1524 DRM_MODE_ENCODER_TMDS);
1330 1525
1331 intel_encoder->compute_config = intel_hdmi_compute_config; 1526 intel_encoder->compute_config = intel_hdmi_compute_config;
1332 intel_encoder->mode_set = intel_hdmi_mode_set;
1333 intel_encoder->disable = intel_disable_hdmi; 1527 intel_encoder->disable = intel_disable_hdmi;
1334 intel_encoder->get_hw_state = intel_hdmi_get_hw_state; 1528 intel_encoder->get_hw_state = intel_hdmi_get_hw_state;
1335 intel_encoder->get_config = intel_hdmi_get_config; 1529 intel_encoder->get_config = intel_hdmi_get_config;
1336 if (IS_VALLEYVIEW(dev)) { 1530 if (IS_CHERRYVIEW(dev)) {
1531 intel_encoder->pre_enable = chv_hdmi_pre_enable;
1532 intel_encoder->enable = vlv_enable_hdmi;
1533 intel_encoder->post_disable = chv_hdmi_post_disable;
1534 } else if (IS_VALLEYVIEW(dev)) {
1337 intel_encoder->pre_pll_enable = vlv_hdmi_pre_pll_enable; 1535 intel_encoder->pre_pll_enable = vlv_hdmi_pre_pll_enable;
1338 intel_encoder->pre_enable = vlv_hdmi_pre_enable; 1536 intel_encoder->pre_enable = vlv_hdmi_pre_enable;
1339 intel_encoder->enable = vlv_enable_hdmi; 1537 intel_encoder->enable = vlv_enable_hdmi;
1340 intel_encoder->post_disable = vlv_hdmi_post_disable; 1538 intel_encoder->post_disable = vlv_hdmi_post_disable;
1341 } else { 1539 } else {
1540 intel_encoder->pre_enable = intel_hdmi_pre_enable;
1342 intel_encoder->enable = intel_enable_hdmi; 1541 intel_encoder->enable = intel_enable_hdmi;
1343 } 1542 }
1344 1543
1345 intel_encoder->type = INTEL_OUTPUT_HDMI; 1544 intel_encoder->type = INTEL_OUTPUT_HDMI;
1346 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); 1545 if (IS_CHERRYVIEW(dev)) {
1546 if (port == PORT_D)
1547 intel_encoder->crtc_mask = 1 << 2;
1548 else
1549 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
1550 } else {
1551 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
1552 }
1347 intel_encoder->cloneable = 1 << INTEL_OUTPUT_ANALOG; 1553 intel_encoder->cloneable = 1 << INTEL_OUTPUT_ANALOG;
1348 /* 1554 /*
1349 * BSpec is unclear about HDMI+HDMI cloning on g4x, but it seems 1555 * BSpec is unclear about HDMI+HDMI cloning on g4x, but it seems
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index f1ecf916474a..23126023aeba 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -111,13 +111,6 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
111 111
112 pipe_config->adjusted_mode.flags |= flags; 112 pipe_config->adjusted_mode.flags |= flags;
113 113
114 /* gen2/3 store dither state in pfit control, needs to match */
115 if (INTEL_INFO(dev)->gen < 4) {
116 tmp = I915_READ(PFIT_CONTROL);
117
118 pipe_config->gmch_pfit.control |= tmp & PANEL_8TO6_DITHER_ENABLE;
119 }
120
121 dotclock = pipe_config->port_clock; 114 dotclock = pipe_config->port_clock;
122 115
123 if (HAS_PCH_SPLIT(dev_priv->dev)) 116 if (HAS_PCH_SPLIT(dev_priv->dev))
@@ -126,10 +119,6 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
126 pipe_config->adjusted_mode.crtc_clock = dotclock; 119 pipe_config->adjusted_mode.crtc_clock = dotclock;
127} 120}
128 121
129/* The LVDS pin pair needs to be on before the DPLLs are enabled.
130 * This is an exception to the general rule that mode_set doesn't turn
131 * things on.
132 */
133static void intel_pre_enable_lvds(struct intel_encoder *encoder) 122static void intel_pre_enable_lvds(struct intel_encoder *encoder)
134{ 123{
135 struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); 124 struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
@@ -331,15 +320,6 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
331 return true; 320 return true;
332} 321}
333 322
334static void intel_lvds_mode_set(struct intel_encoder *encoder)
335{
336 /*
337 * We don't do anything here, the LVDS port is fully set up in the pre
338 * enable hook - the ordering constraints for enabling the lvds port vs.
339 * enabling the display pll are too strict.
340 */
341}
342
343/** 323/**
344 * Detect the LVDS connection. 324 * Detect the LVDS connection.
345 * 325 *
@@ -354,7 +334,7 @@ intel_lvds_detect(struct drm_connector *connector, bool force)
354 enum drm_connector_status status; 334 enum drm_connector_status status;
355 335
356 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", 336 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
357 connector->base.id, drm_get_connector_name(connector)); 337 connector->base.id, connector->name);
358 338
359 status = intel_panel_detect(dev); 339 status = intel_panel_detect(dev);
360 if (status != connector_status_unknown) 340 if (status != connector_status_unknown)
@@ -953,7 +933,6 @@ void intel_lvds_init(struct drm_device *dev)
953 intel_encoder->enable = intel_enable_lvds; 933 intel_encoder->enable = intel_enable_lvds;
954 intel_encoder->pre_enable = intel_pre_enable_lvds; 934 intel_encoder->pre_enable = intel_pre_enable_lvds;
955 intel_encoder->compute_config = intel_lvds_compute_config; 935 intel_encoder->compute_config = intel_lvds_compute_config;
956 intel_encoder->mode_set = intel_lvds_mode_set;
957 intel_encoder->disable = intel_disable_lvds; 936 intel_encoder->disable = intel_disable_lvds;
958 intel_encoder->get_hw_state = intel_lvds_get_hw_state; 937 intel_encoder->get_hw_state = intel_lvds_get_hw_state;
959 intel_encoder->get_config = intel_lvds_get_config; 938 intel_encoder->get_config = intel_lvds_get_config;
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index acde2945eb8a..2e2c71fcc9ed 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -410,7 +410,7 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
410 if (bclp > 255) 410 if (bclp > 255)
411 return ASLC_BACKLIGHT_FAILED; 411 return ASLC_BACKLIGHT_FAILED;
412 412
413 mutex_lock(&dev->mode_config.mutex); 413 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
414 414
415 /* 415 /*
416 * Update backlight on all connectors that support backlight (usually 416 * Update backlight on all connectors that support backlight (usually
@@ -421,7 +421,7 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
421 intel_panel_set_backlight(intel_connector, bclp, 255); 421 intel_panel_set_backlight(intel_connector, bclp, 255);
422 iowrite32(DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID, &asle->cblv); 422 iowrite32(DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID, &asle->cblv);
423 423
424 mutex_unlock(&dev->mode_config.mutex); 424 drm_modeset_unlock(&dev->mode_config.connection_mutex);
425 425
426 426
427 return 0; 427 return 0;
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 129db0c7d835..daa118978eec 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -213,7 +213,7 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
213{ 213{
214 struct drm_device *dev = overlay->dev; 214 struct drm_device *dev = overlay->dev;
215 struct drm_i915_private *dev_priv = dev->dev_private; 215 struct drm_i915_private *dev_priv = dev->dev_private;
216 struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; 216 struct intel_engine_cs *ring = &dev_priv->ring[RCS];
217 int ret; 217 int ret;
218 218
219 BUG_ON(overlay->last_flip_req); 219 BUG_ON(overlay->last_flip_req);
@@ -236,7 +236,7 @@ static int intel_overlay_on(struct intel_overlay *overlay)
236{ 236{
237 struct drm_device *dev = overlay->dev; 237 struct drm_device *dev = overlay->dev;
238 struct drm_i915_private *dev_priv = dev->dev_private; 238 struct drm_i915_private *dev_priv = dev->dev_private;
239 struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; 239 struct intel_engine_cs *ring = &dev_priv->ring[RCS];
240 int ret; 240 int ret;
241 241
242 BUG_ON(overlay->active); 242 BUG_ON(overlay->active);
@@ -263,7 +263,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
263{ 263{
264 struct drm_device *dev = overlay->dev; 264 struct drm_device *dev = overlay->dev;
265 struct drm_i915_private *dev_priv = dev->dev_private; 265 struct drm_i915_private *dev_priv = dev->dev_private;
266 struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; 266 struct intel_engine_cs *ring = &dev_priv->ring[RCS];
267 u32 flip_addr = overlay->flip_addr; 267 u32 flip_addr = overlay->flip_addr;
268 u32 tmp; 268 u32 tmp;
269 int ret; 269 int ret;
@@ -320,7 +320,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
320{ 320{
321 struct drm_device *dev = overlay->dev; 321 struct drm_device *dev = overlay->dev;
322 struct drm_i915_private *dev_priv = dev->dev_private; 322 struct drm_i915_private *dev_priv = dev->dev_private;
323 struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; 323 struct intel_engine_cs *ring = &dev_priv->ring[RCS];
324 u32 flip_addr = overlay->flip_addr; 324 u32 flip_addr = overlay->flip_addr;
325 int ret; 325 int ret;
326 326
@@ -363,7 +363,7 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
363{ 363{
364 struct drm_device *dev = overlay->dev; 364 struct drm_device *dev = overlay->dev;
365 struct drm_i915_private *dev_priv = dev->dev_private; 365 struct drm_i915_private *dev_priv = dev->dev_private;
366 struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; 366 struct intel_engine_cs *ring = &dev_priv->ring[RCS];
367 int ret; 367 int ret;
368 368
369 if (overlay->last_flip_req == 0) 369 if (overlay->last_flip_req == 0)
@@ -389,7 +389,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
389{ 389{
390 struct drm_device *dev = overlay->dev; 390 struct drm_device *dev = overlay->dev;
391 struct drm_i915_private *dev_priv = dev->dev_private; 391 struct drm_i915_private *dev_priv = dev->dev_private;
392 struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; 392 struct intel_engine_cs *ring = &dev_priv->ring[RCS];
393 int ret; 393 int ret;
394 394
395 /* Only wait if there is actually an old frame to release to 395 /* Only wait if there is actually an old frame to release to
@@ -688,7 +688,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
688 u32 swidth, swidthsw, sheight, ostride; 688 u32 swidth, swidthsw, sheight, ostride;
689 689
690 BUG_ON(!mutex_is_locked(&dev->struct_mutex)); 690 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
691 BUG_ON(!mutex_is_locked(&dev->mode_config.mutex)); 691 BUG_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
692 BUG_ON(!overlay); 692 BUG_ON(!overlay);
693 693
694 ret = intel_overlay_release_old_vid(overlay); 694 ret = intel_overlay_release_old_vid(overlay);
@@ -793,7 +793,7 @@ int intel_overlay_switch_off(struct intel_overlay *overlay)
793 int ret; 793 int ret;
794 794
795 BUG_ON(!mutex_is_locked(&dev->struct_mutex)); 795 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
796 BUG_ON(!mutex_is_locked(&dev->mode_config.mutex)); 796 BUG_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
797 797
798 ret = intel_overlay_recover_from_interrupt(overlay); 798 ret = intel_overlay_recover_from_interrupt(overlay);
799 if (ret != 0) 799 if (ret != 0)
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index cb8cfb7e0974..5e6c888b4928 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -42,6 +42,59 @@ intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
42 drm_mode_set_crtcinfo(adjusted_mode, 0); 42 drm_mode_set_crtcinfo(adjusted_mode, 0);
43} 43}
44 44
45/**
46 * intel_find_panel_downclock - find the reduced downclock for LVDS in EDID
47 * @dev: drm device
48 * @fixed_mode : panel native mode
49 * @connector: LVDS/eDP connector
50 *
51 * Return downclock_avail
52 * Find the reduced downclock for LVDS/eDP in EDID.
53 */
54struct drm_display_mode *
55intel_find_panel_downclock(struct drm_device *dev,
56 struct drm_display_mode *fixed_mode,
57 struct drm_connector *connector)
58{
59 struct drm_display_mode *scan, *tmp_mode;
60 int temp_downclock;
61
62 temp_downclock = fixed_mode->clock;
63 tmp_mode = NULL;
64
65 list_for_each_entry(scan, &connector->probed_modes, head) {
66 /*
67 * If one mode has the same resolution with the fixed_panel
68 * mode while they have the different refresh rate, it means
69 * that the reduced downclock is found. In such
70 * case we can set the different FPx0/1 to dynamically select
71 * between low and high frequency.
72 */
73 if (scan->hdisplay == fixed_mode->hdisplay &&
74 scan->hsync_start == fixed_mode->hsync_start &&
75 scan->hsync_end == fixed_mode->hsync_end &&
76 scan->htotal == fixed_mode->htotal &&
77 scan->vdisplay == fixed_mode->vdisplay &&
78 scan->vsync_start == fixed_mode->vsync_start &&
79 scan->vsync_end == fixed_mode->vsync_end &&
80 scan->vtotal == fixed_mode->vtotal) {
81 if (scan->clock < temp_downclock) {
82 /*
83 * The downclock is already found. But we
84 * expect to find the lower downclock.
85 */
86 temp_downclock = scan->clock;
87 tmp_mode = scan;
88 }
89 }
90 }
91
92 if (temp_downclock < fixed_mode->clock)
93 return drm_mode_duplicate(dev, tmp_mode);
94 else
95 return NULL;
96}
97
45/* adjusted_mode has been preset to be the panel's fixed mode */ 98/* adjusted_mode has been preset to be the panel's fixed mode */
46void 99void
47intel_pch_panel_fitting(struct intel_crtc *intel_crtc, 100intel_pch_panel_fitting(struct intel_crtc *intel_crtc,
@@ -308,21 +361,43 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
308 pfit_control |= ((intel_crtc->pipe << PFIT_PIPE_SHIFT) | 361 pfit_control |= ((intel_crtc->pipe << PFIT_PIPE_SHIFT) |
309 PFIT_FILTER_FUZZY); 362 PFIT_FILTER_FUZZY);
310 363
364 /* Make sure pre-965 set dither correctly for 18bpp panels. */
365 if (INTEL_INFO(dev)->gen < 4 && pipe_config->pipe_bpp == 18)
366 pfit_control |= PANEL_8TO6_DITHER_ENABLE;
367
311out: 368out:
312 if ((pfit_control & PFIT_ENABLE) == 0) { 369 if ((pfit_control & PFIT_ENABLE) == 0) {
313 pfit_control = 0; 370 pfit_control = 0;
314 pfit_pgm_ratios = 0; 371 pfit_pgm_ratios = 0;
315 } 372 }
316 373
317 /* Make sure pre-965 set dither correctly for 18bpp panels. */
318 if (INTEL_INFO(dev)->gen < 4 && pipe_config->pipe_bpp == 18)
319 pfit_control |= PANEL_8TO6_DITHER_ENABLE;
320
321 pipe_config->gmch_pfit.control = pfit_control; 374 pipe_config->gmch_pfit.control = pfit_control;
322 pipe_config->gmch_pfit.pgm_ratios = pfit_pgm_ratios; 375 pipe_config->gmch_pfit.pgm_ratios = pfit_pgm_ratios;
323 pipe_config->gmch_pfit.lvds_border_bits = border; 376 pipe_config->gmch_pfit.lvds_border_bits = border;
324} 377}
325 378
379enum drm_connector_status
380intel_panel_detect(struct drm_device *dev)
381{
382 struct drm_i915_private *dev_priv = dev->dev_private;
383
384 /* Assume that the BIOS does not lie through the OpRegion... */
385 if (!i915.panel_ignore_lid && dev_priv->opregion.lid_state) {
386 return ioread32(dev_priv->opregion.lid_state) & 0x1 ?
387 connector_status_connected :
388 connector_status_disconnected;
389 }
390
391 switch (i915.panel_ignore_lid) {
392 case -2:
393 return connector_status_connected;
394 case -1:
395 return connector_status_disconnected;
396 default:
397 return connector_status_unknown;
398 }
399}
400
326static u32 intel_panel_compute_brightness(struct intel_connector *connector, 401static u32 intel_panel_compute_brightness(struct intel_connector *connector,
327 u32 val) 402 u32 val)
328{ 403{
@@ -795,40 +870,18 @@ void intel_panel_enable_backlight(struct intel_connector *connector)
795 spin_unlock_irqrestore(&dev_priv->backlight_lock, flags); 870 spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
796} 871}
797 872
798enum drm_connector_status
799intel_panel_detect(struct drm_device *dev)
800{
801 struct drm_i915_private *dev_priv = dev->dev_private;
802
803 /* Assume that the BIOS does not lie through the OpRegion... */
804 if (!i915.panel_ignore_lid && dev_priv->opregion.lid_state) {
805 return ioread32(dev_priv->opregion.lid_state) & 0x1 ?
806 connector_status_connected :
807 connector_status_disconnected;
808 }
809
810 switch (i915.panel_ignore_lid) {
811 case -2:
812 return connector_status_connected;
813 case -1:
814 return connector_status_disconnected;
815 default:
816 return connector_status_unknown;
817 }
818}
819
820#if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE) 873#if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE)
821static int intel_backlight_device_update_status(struct backlight_device *bd) 874static int intel_backlight_device_update_status(struct backlight_device *bd)
822{ 875{
823 struct intel_connector *connector = bl_get_data(bd); 876 struct intel_connector *connector = bl_get_data(bd);
824 struct drm_device *dev = connector->base.dev; 877 struct drm_device *dev = connector->base.dev;
825 878
826 mutex_lock(&dev->mode_config.mutex); 879 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
827 DRM_DEBUG_KMS("updating intel_backlight, brightness=%d/%d\n", 880 DRM_DEBUG_KMS("updating intel_backlight, brightness=%d/%d\n",
828 bd->props.brightness, bd->props.max_brightness); 881 bd->props.brightness, bd->props.max_brightness);
829 intel_panel_set_backlight(connector, bd->props.brightness, 882 intel_panel_set_backlight(connector, bd->props.brightness,
830 bd->props.max_brightness); 883 bd->props.max_brightness);
831 mutex_unlock(&dev->mode_config.mutex); 884 drm_modeset_unlock(&dev->mode_config.connection_mutex);
832 return 0; 885 return 0;
833} 886}
834 887
@@ -840,9 +893,9 @@ static int intel_backlight_device_get_brightness(struct backlight_device *bd)
840 int ret; 893 int ret;
841 894
842 intel_runtime_pm_get(dev_priv); 895 intel_runtime_pm_get(dev_priv);
843 mutex_lock(&dev->mode_config.mutex); 896 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
844 ret = intel_panel_get_backlight(connector); 897 ret = intel_panel_get_backlight(connector);
845 mutex_unlock(&dev->mode_config.mutex); 898 drm_modeset_unlock(&dev->mode_config.connection_mutex);
846 intel_runtime_pm_put(dev_priv); 899 intel_runtime_pm_put(dev_priv);
847 900
848 return ret; 901 return ret;
@@ -1077,7 +1130,7 @@ int intel_panel_setup_backlight(struct drm_connector *connector)
1077 1130
1078 if (ret) { 1131 if (ret) {
1079 DRM_DEBUG_KMS("failed to setup backlight for connector %s\n", 1132 DRM_DEBUG_KMS("failed to setup backlight for connector %s\n",
1080 drm_get_connector_name(connector)); 1133 connector->name);
1081 return ret; 1134 return ret;
1082 } 1135 }
1083 1136
@@ -1103,59 +1156,6 @@ void intel_panel_destroy_backlight(struct drm_connector *connector)
1103 intel_backlight_device_unregister(intel_connector); 1156 intel_backlight_device_unregister(intel_connector);
1104} 1157}
1105 1158
1106/**
1107 * intel_find_panel_downclock - find the reduced downclock for LVDS in EDID
1108 * @dev: drm device
1109 * @fixed_mode : panel native mode
1110 * @connector: LVDS/eDP connector
1111 *
1112 * Return downclock_avail
1113 * Find the reduced downclock for LVDS/eDP in EDID.
1114 */
1115struct drm_display_mode *
1116intel_find_panel_downclock(struct drm_device *dev,
1117 struct drm_display_mode *fixed_mode,
1118 struct drm_connector *connector)
1119{
1120 struct drm_display_mode *scan, *tmp_mode;
1121 int temp_downclock;
1122
1123 temp_downclock = fixed_mode->clock;
1124 tmp_mode = NULL;
1125
1126 list_for_each_entry(scan, &connector->probed_modes, head) {
1127 /*
1128 * If one mode has the same resolution with the fixed_panel
1129 * mode while they have the different refresh rate, it means
1130 * that the reduced downclock is found. In such
1131 * case we can set the different FPx0/1 to dynamically select
1132 * between low and high frequency.
1133 */
1134 if (scan->hdisplay == fixed_mode->hdisplay &&
1135 scan->hsync_start == fixed_mode->hsync_start &&
1136 scan->hsync_end == fixed_mode->hsync_end &&
1137 scan->htotal == fixed_mode->htotal &&
1138 scan->vdisplay == fixed_mode->vdisplay &&
1139 scan->vsync_start == fixed_mode->vsync_start &&
1140 scan->vsync_end == fixed_mode->vsync_end &&
1141 scan->vtotal == fixed_mode->vtotal) {
1142 if (scan->clock < temp_downclock) {
1143 /*
1144 * The downclock is already found. But we
1145 * expect to find the lower downclock.
1146 */
1147 temp_downclock = scan->clock;
1148 tmp_mode = scan;
1149 }
1150 }
1151 }
1152
1153 if (temp_downclock < fixed_mode->clock)
1154 return drm_mode_duplicate(dev, tmp_mode);
1155 else
1156 return NULL;
1157}
1158
1159/* Set up chip specific backlight functions */ 1159/* Set up chip specific backlight functions */
1160void intel_panel_init_backlight_funcs(struct drm_device *dev) 1160void intel_panel_init_backlight_funcs(struct drm_device *dev)
1161{ 1161{
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index d93dcf683e8c..d1e53abec1b5 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -487,7 +487,7 @@ void intel_update_fbc(struct drm_device *dev)
487 * - new fb is too large to fit in compressed buffer 487 * - new fb is too large to fit in compressed buffer
488 * - going to an unsupported config (interlace, pixel multiply, etc.) 488 * - going to an unsupported config (interlace, pixel multiply, etc.)
489 */ 489 */
490 list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) { 490 for_each_crtc(dev, tmp_crtc) {
491 if (intel_crtc_active(tmp_crtc) && 491 if (intel_crtc_active(tmp_crtc) &&
492 to_intel_crtc(tmp_crtc)->primary_enabled) { 492 to_intel_crtc(tmp_crtc)->primary_enabled) {
493 if (crtc) { 493 if (crtc) {
@@ -1010,7 +1010,7 @@ static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
1010{ 1010{
1011 struct drm_crtc *crtc, *enabled = NULL; 1011 struct drm_crtc *crtc, *enabled = NULL;
1012 1012
1013 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 1013 for_each_crtc(dev, crtc) {
1014 if (intel_crtc_active(crtc)) { 1014 if (intel_crtc_active(crtc)) {
1015 if (enabled) 1015 if (enabled)
1016 return NULL; 1016 return NULL;
@@ -1831,6 +1831,40 @@ static unsigned int ilk_display_fifo_size(const struct drm_device *dev)
1831 return 512; 1831 return 512;
1832} 1832}
1833 1833
1834static unsigned int ilk_plane_wm_reg_max(const struct drm_device *dev,
1835 int level, bool is_sprite)
1836{
1837 if (INTEL_INFO(dev)->gen >= 8)
1838 /* BDW primary/sprite plane watermarks */
1839 return level == 0 ? 255 : 2047;
1840 else if (INTEL_INFO(dev)->gen >= 7)
1841 /* IVB/HSW primary/sprite plane watermarks */
1842 return level == 0 ? 127 : 1023;
1843 else if (!is_sprite)
1844 /* ILK/SNB primary plane watermarks */
1845 return level == 0 ? 127 : 511;
1846 else
1847 /* ILK/SNB sprite plane watermarks */
1848 return level == 0 ? 63 : 255;
1849}
1850
1851static unsigned int ilk_cursor_wm_reg_max(const struct drm_device *dev,
1852 int level)
1853{
1854 if (INTEL_INFO(dev)->gen >= 7)
1855 return level == 0 ? 63 : 255;
1856 else
1857 return level == 0 ? 31 : 63;
1858}
1859
1860static unsigned int ilk_fbc_wm_reg_max(const struct drm_device *dev)
1861{
1862 if (INTEL_INFO(dev)->gen >= 8)
1863 return 31;
1864 else
1865 return 15;
1866}
1867
1834/* Calculate the maximum primary/sprite plane watermark */ 1868/* Calculate the maximum primary/sprite plane watermark */
1835static unsigned int ilk_plane_wm_max(const struct drm_device *dev, 1869static unsigned int ilk_plane_wm_max(const struct drm_device *dev,
1836 int level, 1870 int level,
@@ -1839,7 +1873,6 @@ static unsigned int ilk_plane_wm_max(const struct drm_device *dev,
1839 bool is_sprite) 1873 bool is_sprite)
1840{ 1874{
1841 unsigned int fifo_size = ilk_display_fifo_size(dev); 1875 unsigned int fifo_size = ilk_display_fifo_size(dev);
1842 unsigned int max;
1843 1876
1844 /* if sprites aren't enabled, sprites get nothing */ 1877 /* if sprites aren't enabled, sprites get nothing */
1845 if (is_sprite && !config->sprites_enabled) 1878 if (is_sprite && !config->sprites_enabled)
@@ -1870,19 +1903,7 @@ static unsigned int ilk_plane_wm_max(const struct drm_device *dev,
1870 } 1903 }
1871 1904
1872 /* clamp to max that the registers can hold */ 1905 /* clamp to max that the registers can hold */
1873 if (INTEL_INFO(dev)->gen >= 8) 1906 return min(fifo_size, ilk_plane_wm_reg_max(dev, level, is_sprite));
1874 max = level == 0 ? 255 : 2047;
1875 else if (INTEL_INFO(dev)->gen >= 7)
1876 /* IVB/HSW primary/sprite plane watermarks */
1877 max = level == 0 ? 127 : 1023;
1878 else if (!is_sprite)
1879 /* ILK/SNB primary plane watermarks */
1880 max = level == 0 ? 127 : 511;
1881 else
1882 /* ILK/SNB sprite plane watermarks */
1883 max = level == 0 ? 63 : 255;
1884
1885 return min(fifo_size, max);
1886} 1907}
1887 1908
1888/* Calculate the maximum cursor plane watermark */ 1909/* Calculate the maximum cursor plane watermark */
@@ -1895,20 +1916,7 @@ static unsigned int ilk_cursor_wm_max(const struct drm_device *dev,
1895 return 64; 1916 return 64;
1896 1917
1897 /* otherwise just report max that registers can hold */ 1918 /* otherwise just report max that registers can hold */
1898 if (INTEL_INFO(dev)->gen >= 7) 1919 return ilk_cursor_wm_reg_max(dev, level);
1899 return level == 0 ? 63 : 255;
1900 else
1901 return level == 0 ? 31 : 63;
1902}
1903
1904/* Calculate the maximum FBC watermark */
1905static unsigned int ilk_fbc_wm_max(const struct drm_device *dev)
1906{
1907 /* max that registers can hold */
1908 if (INTEL_INFO(dev)->gen >= 8)
1909 return 31;
1910 else
1911 return 15;
1912} 1920}
1913 1921
1914static void ilk_compute_wm_maximums(const struct drm_device *dev, 1922static void ilk_compute_wm_maximums(const struct drm_device *dev,
@@ -1920,7 +1928,17 @@ static void ilk_compute_wm_maximums(const struct drm_device *dev,
1920 max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false); 1928 max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false);
1921 max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true); 1929 max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true);
1922 max->cur = ilk_cursor_wm_max(dev, level, config); 1930 max->cur = ilk_cursor_wm_max(dev, level, config);
1923 max->fbc = ilk_fbc_wm_max(dev); 1931 max->fbc = ilk_fbc_wm_reg_max(dev);
1932}
1933
1934static void ilk_compute_wm_reg_maximums(struct drm_device *dev,
1935 int level,
1936 struct ilk_wm_maximums *max)
1937{
1938 max->pri = ilk_plane_wm_reg_max(dev, level, false);
1939 max->spr = ilk_plane_wm_reg_max(dev, level, true);
1940 max->cur = ilk_cursor_wm_reg_max(dev, level);
1941 max->fbc = ilk_fbc_wm_reg_max(dev);
1924} 1942}
1925 1943
1926static bool ilk_validate_wm_level(int level, 1944static bool ilk_validate_wm_level(int level,
@@ -2059,7 +2077,7 @@ static void intel_fixup_cur_wm_latency(struct drm_device *dev, uint16_t wm[5])
2059 wm[3] *= 2; 2077 wm[3] *= 2;
2060} 2078}
2061 2079
2062static int ilk_wm_max_level(const struct drm_device *dev) 2080int ilk_wm_max_level(const struct drm_device *dev)
2063{ 2081{
2064 /* how many WM levels are we expecting */ 2082 /* how many WM levels are we expecting */
2065 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 2083 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
@@ -2155,38 +2173,52 @@ static void ilk_setup_wm_latency(struct drm_device *dev)
2155} 2173}
2156 2174
2157static void ilk_compute_wm_parameters(struct drm_crtc *crtc, 2175static void ilk_compute_wm_parameters(struct drm_crtc *crtc,
2158 struct ilk_pipe_wm_parameters *p, 2176 struct ilk_pipe_wm_parameters *p)
2159 struct intel_wm_config *config)
2160{ 2177{
2161 struct drm_device *dev = crtc->dev; 2178 struct drm_device *dev = crtc->dev;
2162 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2179 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2163 enum pipe pipe = intel_crtc->pipe; 2180 enum pipe pipe = intel_crtc->pipe;
2164 struct drm_plane *plane; 2181 struct drm_plane *plane;
2165 2182
2166 p->active = intel_crtc_active(crtc); 2183 if (!intel_crtc_active(crtc))
2167 if (p->active) { 2184 return;
2168 p->pipe_htotal = intel_crtc->config.adjusted_mode.crtc_htotal;
2169 p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc);
2170 p->pri.bytes_per_pixel = crtc->primary->fb->bits_per_pixel / 8;
2171 p->cur.bytes_per_pixel = 4;
2172 p->pri.horiz_pixels = intel_crtc->config.pipe_src_w;
2173 p->cur.horiz_pixels = intel_crtc->cursor_width;
2174 /* TODO: for now, assume primary and cursor planes are always enabled. */
2175 p->pri.enabled = true;
2176 p->cur.enabled = true;
2177 }
2178 2185
2179 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) 2186 p->active = true;
2180 config->num_pipes_active += intel_crtc_active(crtc); 2187 p->pipe_htotal = intel_crtc->config.adjusted_mode.crtc_htotal;
2188 p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc);
2189 p->pri.bytes_per_pixel = crtc->primary->fb->bits_per_pixel / 8;
2190 p->cur.bytes_per_pixel = 4;
2191 p->pri.horiz_pixels = intel_crtc->config.pipe_src_w;
2192 p->cur.horiz_pixels = intel_crtc->cursor_width;
2193 /* TODO: for now, assume primary and cursor planes are always enabled. */
2194 p->pri.enabled = true;
2195 p->cur.enabled = true;
2181 2196
2182 drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) { 2197 drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
2183 struct intel_plane *intel_plane = to_intel_plane(plane); 2198 struct intel_plane *intel_plane = to_intel_plane(plane);
2184 2199
2185 if (intel_plane->pipe == pipe) 2200 if (intel_plane->pipe == pipe) {
2186 p->spr = intel_plane->wm; 2201 p->spr = intel_plane->wm;
2202 break;
2203 }
2204 }
2205}
2206
2207static void ilk_compute_wm_config(struct drm_device *dev,
2208 struct intel_wm_config *config)
2209{
2210 struct intel_crtc *intel_crtc;
2187 2211
2188 config->sprites_enabled |= intel_plane->wm.enabled; 2212 /* Compute the currently _active_ config */
2189 config->sprites_scaled |= intel_plane->wm.scaled; 2213 for_each_intel_crtc(dev, intel_crtc) {
2214 const struct intel_pipe_wm *wm = &intel_crtc->wm.active;
2215
2216 if (!wm->pipe_enabled)
2217 continue;
2218
2219 config->sprites_enabled |= wm->sprites_enabled;
2220 config->sprites_scaled |= wm->sprites_scaled;
2221 config->num_pipes_active++;
2190 } 2222 }
2191} 2223}
2192 2224
@@ -2206,8 +2238,9 @@ static bool intel_compute_pipe_wm(struct drm_crtc *crtc,
2206 }; 2238 };
2207 struct ilk_wm_maximums max; 2239 struct ilk_wm_maximums max;
2208 2240
2209 /* LP0 watermarks always use 1/2 DDB partitioning */ 2241 pipe_wm->pipe_enabled = params->active;
2210 ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max); 2242 pipe_wm->sprites_enabled = params->spr.enabled;
2243 pipe_wm->sprites_scaled = params->spr.scaled;
2211 2244
2212 /* ILK/SNB: LP2+ watermarks only w/o sprites */ 2245 /* ILK/SNB: LP2+ watermarks only w/o sprites */
2213 if (INTEL_INFO(dev)->gen <= 6 && params->spr.enabled) 2246 if (INTEL_INFO(dev)->gen <= 6 && params->spr.enabled)
@@ -2217,15 +2250,37 @@ static bool intel_compute_pipe_wm(struct drm_crtc *crtc,
2217 if (params->spr.scaled) 2250 if (params->spr.scaled)
2218 max_level = 0; 2251 max_level = 0;
2219 2252
2220 for (level = 0; level <= max_level; level++) 2253 ilk_compute_wm_level(dev_priv, 0, params, &pipe_wm->wm[0]);
2221 ilk_compute_wm_level(dev_priv, level, params,
2222 &pipe_wm->wm[level]);
2223 2254
2224 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 2255 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2225 pipe_wm->linetime = hsw_compute_linetime_wm(dev, crtc); 2256 pipe_wm->linetime = hsw_compute_linetime_wm(dev, crtc);
2226 2257
2258 /* LP0 watermarks always use 1/2 DDB partitioning */
2259 ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max);
2260
2227 /* At least LP0 must be valid */ 2261 /* At least LP0 must be valid */
2228 return ilk_validate_wm_level(0, &max, &pipe_wm->wm[0]); 2262 if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0]))
2263 return false;
2264
2265 ilk_compute_wm_reg_maximums(dev, 1, &max);
2266
2267 for (level = 1; level <= max_level; level++) {
2268 struct intel_wm_level wm = {};
2269
2270 ilk_compute_wm_level(dev_priv, level, params, &wm);
2271
2272 /*
2273 * Disable any watermark level that exceeds the
2274 * register maximums since such watermarks are
2275 * always invalid.
2276 */
2277 if (!ilk_validate_wm_level(level, &max, &wm))
2278 break;
2279
2280 pipe_wm->wm[level] = wm;
2281 }
2282
2283 return true;
2229} 2284}
2230 2285
2231/* 2286/*
@@ -2237,20 +2292,28 @@ static void ilk_merge_wm_level(struct drm_device *dev,
2237{ 2292{
2238 const struct intel_crtc *intel_crtc; 2293 const struct intel_crtc *intel_crtc;
2239 2294
2240 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) { 2295 ret_wm->enable = true;
2241 const struct intel_wm_level *wm = 2296
2242 &intel_crtc->wm.active.wm[level]; 2297 for_each_intel_crtc(dev, intel_crtc) {
2298 const struct intel_pipe_wm *active = &intel_crtc->wm.active;
2299 const struct intel_wm_level *wm = &active->wm[level];
2300
2301 if (!active->pipe_enabled)
2302 continue;
2243 2303
2304 /*
2305 * The watermark values may have been used in the past,
2306 * so we must maintain them in the registers for some
2307 * time even if the level is now disabled.
2308 */
2244 if (!wm->enable) 2309 if (!wm->enable)
2245 return; 2310 ret_wm->enable = false;
2246 2311
2247 ret_wm->pri_val = max(ret_wm->pri_val, wm->pri_val); 2312 ret_wm->pri_val = max(ret_wm->pri_val, wm->pri_val);
2248 ret_wm->spr_val = max(ret_wm->spr_val, wm->spr_val); 2313 ret_wm->spr_val = max(ret_wm->spr_val, wm->spr_val);
2249 ret_wm->cur_val = max(ret_wm->cur_val, wm->cur_val); 2314 ret_wm->cur_val = max(ret_wm->cur_val, wm->cur_val);
2250 ret_wm->fbc_val = max(ret_wm->fbc_val, wm->fbc_val); 2315 ret_wm->fbc_val = max(ret_wm->fbc_val, wm->fbc_val);
2251 } 2316 }
2252
2253 ret_wm->enable = true;
2254} 2317}
2255 2318
2256/* 2319/*
@@ -2262,6 +2325,7 @@ static void ilk_wm_merge(struct drm_device *dev,
2262 struct intel_pipe_wm *merged) 2325 struct intel_pipe_wm *merged)
2263{ 2326{
2264 int level, max_level = ilk_wm_max_level(dev); 2327 int level, max_level = ilk_wm_max_level(dev);
2328 int last_enabled_level = max_level;
2265 2329
2266 /* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */ 2330 /* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
2267 if ((INTEL_INFO(dev)->gen <= 6 || IS_IVYBRIDGE(dev)) && 2331 if ((INTEL_INFO(dev)->gen <= 6 || IS_IVYBRIDGE(dev)) &&
@@ -2277,15 +2341,19 @@ static void ilk_wm_merge(struct drm_device *dev,
2277 2341
2278 ilk_merge_wm_level(dev, level, wm); 2342 ilk_merge_wm_level(dev, level, wm);
2279 2343
2280 if (!ilk_validate_wm_level(level, max, wm)) 2344 if (level > last_enabled_level)
2281 break; 2345 wm->enable = false;
2346 else if (!ilk_validate_wm_level(level, max, wm))
2347 /* make sure all following levels get disabled */
2348 last_enabled_level = level - 1;
2282 2349
2283 /* 2350 /*
2284 * The spec says it is preferred to disable 2351 * The spec says it is preferred to disable
2285 * FBC WMs instead of disabling a WM level. 2352 * FBC WMs instead of disabling a WM level.
2286 */ 2353 */
2287 if (wm->fbc_val > max->fbc) { 2354 if (wm->fbc_val > max->fbc) {
2288 merged->fbc_wm_enabled = false; 2355 if (wm->enable)
2356 merged->fbc_wm_enabled = false;
2289 wm->fbc_val = 0; 2357 wm->fbc_val = 0;
2290 } 2358 }
2291 } 2359 }
@@ -2340,14 +2408,19 @@ static void ilk_compute_wm_results(struct drm_device *dev,
2340 level = ilk_wm_lp_to_level(wm_lp, merged); 2408 level = ilk_wm_lp_to_level(wm_lp, merged);
2341 2409
2342 r = &merged->wm[level]; 2410 r = &merged->wm[level];
2343 if (!r->enable)
2344 break;
2345 2411
2346 results->wm_lp[wm_lp - 1] = WM3_LP_EN | 2412 /*
2413 * Maintain the watermark values even if the level is
2414 * disabled. Doing otherwise could cause underruns.
2415 */
2416 results->wm_lp[wm_lp - 1] =
2347 (ilk_wm_lp_latency(dev, level) << WM1_LP_LATENCY_SHIFT) | 2417 (ilk_wm_lp_latency(dev, level) << WM1_LP_LATENCY_SHIFT) |
2348 (r->pri_val << WM1_LP_SR_SHIFT) | 2418 (r->pri_val << WM1_LP_SR_SHIFT) |
2349 r->cur_val; 2419 r->cur_val;
2350 2420
2421 if (r->enable)
2422 results->wm_lp[wm_lp - 1] |= WM1_LP_SR_EN;
2423
2351 if (INTEL_INFO(dev)->gen >= 8) 2424 if (INTEL_INFO(dev)->gen >= 8)
2352 results->wm_lp[wm_lp - 1] |= 2425 results->wm_lp[wm_lp - 1] |=
2353 r->fbc_val << WM1_LP_FBC_SHIFT_BDW; 2426 r->fbc_val << WM1_LP_FBC_SHIFT_BDW;
@@ -2355,6 +2428,10 @@ static void ilk_compute_wm_results(struct drm_device *dev,
2355 results->wm_lp[wm_lp - 1] |= 2428 results->wm_lp[wm_lp - 1] |=
2356 r->fbc_val << WM1_LP_FBC_SHIFT; 2429 r->fbc_val << WM1_LP_FBC_SHIFT;
2357 2430
2431 /*
2432 * Always set WM1S_LP_EN when spr_val != 0, even if the
2433 * level is disabled. Doing otherwise could cause underruns.
2434 */
2358 if (INTEL_INFO(dev)->gen <= 6 && r->spr_val) { 2435 if (INTEL_INFO(dev)->gen <= 6 && r->spr_val) {
2359 WARN_ON(wm_lp != 1); 2436 WARN_ON(wm_lp != 1);
2360 results->wm_lp_spr[wm_lp - 1] = WM1S_LP_EN | r->spr_val; 2437 results->wm_lp_spr[wm_lp - 1] = WM1S_LP_EN | r->spr_val;
@@ -2363,7 +2440,7 @@ static void ilk_compute_wm_results(struct drm_device *dev,
2363 } 2440 }
2364 2441
2365 /* LP0 register values */ 2442 /* LP0 register values */
2366 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) { 2443 for_each_intel_crtc(dev, intel_crtc) {
2367 enum pipe pipe = intel_crtc->pipe; 2444 enum pipe pipe = intel_crtc->pipe;
2368 const struct intel_wm_level *r = 2445 const struct intel_wm_level *r =
2369 &intel_crtc->wm.active.wm[0]; 2446 &intel_crtc->wm.active.wm[0];
@@ -2598,7 +2675,7 @@ static void ilk_update_wm(struct drm_crtc *crtc)
2598 struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm; 2675 struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
2599 struct intel_wm_config config = {}; 2676 struct intel_wm_config config = {};
2600 2677
2601 ilk_compute_wm_parameters(crtc, &params, &config); 2678 ilk_compute_wm_parameters(crtc, &params);
2602 2679
2603 intel_compute_pipe_wm(crtc, &params, &pipe_wm); 2680 intel_compute_pipe_wm(crtc, &params, &pipe_wm);
2604 2681
@@ -2607,6 +2684,8 @@ static void ilk_update_wm(struct drm_crtc *crtc)
2607 2684
2608 intel_crtc->wm.active = pipe_wm; 2685 intel_crtc->wm.active = pipe_wm;
2609 2686
2687 ilk_compute_wm_config(dev, &config);
2688
2610 ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max); 2689 ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max);
2611 ilk_wm_merge(dev, &config, &max, &lp_wm_1_2); 2690 ilk_wm_merge(dev, &config, &max, &lp_wm_1_2);
2612 2691
@@ -2673,7 +2752,9 @@ static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
2673 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 2752 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2674 hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe)); 2753 hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
2675 2754
2676 if (intel_crtc_active(crtc)) { 2755 active->pipe_enabled = intel_crtc_active(crtc);
2756
2757 if (active->pipe_enabled) {
2677 u32 tmp = hw->wm_pipe[pipe]; 2758 u32 tmp = hw->wm_pipe[pipe];
2678 2759
2679 /* 2760 /*
@@ -2706,7 +2787,7 @@ void ilk_wm_get_hw_state(struct drm_device *dev)
2706 struct ilk_wm_values *hw = &dev_priv->wm.hw; 2787 struct ilk_wm_values *hw = &dev_priv->wm.hw;
2707 struct drm_crtc *crtc; 2788 struct drm_crtc *crtc;
2708 2789
2709 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) 2790 for_each_crtc(dev, crtc)
2710 ilk_pipe_wm_get_hw_state(crtc); 2791 ilk_pipe_wm_get_hw_state(crtc);
2711 2792
2712 hw->wm_lp[0] = I915_READ(WM1_LP_ILK); 2793 hw->wm_lp[0] = I915_READ(WM1_LP_ILK);
@@ -2714,8 +2795,10 @@ void ilk_wm_get_hw_state(struct drm_device *dev)
2714 hw->wm_lp[2] = I915_READ(WM3_LP_ILK); 2795 hw->wm_lp[2] = I915_READ(WM3_LP_ILK);
2715 2796
2716 hw->wm_lp_spr[0] = I915_READ(WM1S_LP_ILK); 2797 hw->wm_lp_spr[0] = I915_READ(WM1S_LP_ILK);
2717 hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB); 2798 if (INTEL_INFO(dev)->gen >= 7) {
2718 hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB); 2799 hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
2800 hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
2801 }
2719 2802
2720 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 2803 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2721 hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ? 2804 hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
@@ -3071,6 +3154,9 @@ static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
3071 if (INTEL_INFO(dev_priv->dev)->gen <= 7 && !IS_HASWELL(dev_priv->dev)) 3154 if (INTEL_INFO(dev_priv->dev)->gen <= 7 && !IS_HASWELL(dev_priv->dev))
3072 mask |= GEN6_PM_RP_UP_EI_EXPIRED; 3155 mask |= GEN6_PM_RP_UP_EI_EXPIRED;
3073 3156
3157 if (IS_GEN8(dev_priv->dev))
3158 mask |= GEN8_PMINTR_REDIRECT_TO_NON_DISP;
3159
3074 return ~mask; 3160 return ~mask;
3075} 3161}
3076 3162
@@ -3091,7 +3177,7 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
3091 if (val != dev_priv->rps.cur_freq) { 3177 if (val != dev_priv->rps.cur_freq) {
3092 gen6_set_rps_thresholds(dev_priv, val); 3178 gen6_set_rps_thresholds(dev_priv, val);
3093 3179
3094 if (IS_HASWELL(dev)) 3180 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
3095 I915_WRITE(GEN6_RPNSWREQ, 3181 I915_WRITE(GEN6_RPNSWREQ,
3096 HSW_FREQUENCY(val)); 3182 HSW_FREQUENCY(val));
3097 else 3183 else
@@ -3134,16 +3220,7 @@ static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
3134 /* Mask turbo interrupt so that they will not come in between */ 3220 /* Mask turbo interrupt so that they will not come in between */
3135 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff); 3221 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
3136 3222
3137 /* Bring up the Gfx clock */ 3223 vlv_force_gfx_clock(dev_priv, true);
3138 I915_WRITE(VLV_GTLC_SURVIVABILITY_REG,
3139 I915_READ(VLV_GTLC_SURVIVABILITY_REG) |
3140 VLV_GFX_CLK_FORCE_ON_BIT);
3141
3142 if (wait_for(((VLV_GFX_CLK_STATUS_BIT &
3143 I915_READ(VLV_GTLC_SURVIVABILITY_REG)) != 0), 5)) {
3144 DRM_ERROR("GFX_CLK_ON request timed out\n");
3145 return;
3146 }
3147 3224
3148 dev_priv->rps.cur_freq = dev_priv->rps.min_freq_softlimit; 3225 dev_priv->rps.cur_freq = dev_priv->rps.min_freq_softlimit;
3149 3226
@@ -3154,10 +3231,7 @@ static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
3154 & GENFREQSTATUS) == 0, 5)) 3231 & GENFREQSTATUS) == 0, 5))
3155 DRM_ERROR("timed out waiting for Punit\n"); 3232 DRM_ERROR("timed out waiting for Punit\n");
3156 3233
3157 /* Release the Gfx clock */ 3234 vlv_force_gfx_clock(dev_priv, false);
3158 I915_WRITE(VLV_GTLC_SURVIVABILITY_REG,
3159 I915_READ(VLV_GTLC_SURVIVABILITY_REG) &
3160 ~VLV_GFX_CLK_FORCE_ON_BIT);
3161 3235
3162 I915_WRITE(GEN6_PMINTRMSK, 3236 I915_WRITE(GEN6_PMINTRMSK,
3163 gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq)); 3237 gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq));
@@ -3215,6 +3289,26 @@ void valleyview_set_rps(struct drm_device *dev, u8 val)
3215 trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv, val)); 3289 trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv, val));
3216} 3290}
3217 3291
3292static void gen8_disable_rps_interrupts(struct drm_device *dev)
3293{
3294 struct drm_i915_private *dev_priv = dev->dev_private;
3295
3296 I915_WRITE(GEN6_PMINTRMSK, ~GEN8_PMINTR_REDIRECT_TO_NON_DISP);
3297 I915_WRITE(GEN8_GT_IER(2), I915_READ(GEN8_GT_IER(2)) &
3298 ~dev_priv->pm_rps_events);
3299 /* Complete PM interrupt masking here doesn't race with the rps work
3300 * item again unmasking PM interrupts because that is using a different
3301 * register (GEN8_GT_IMR(2)) to mask PM interrupts. The only risk is in
3302 * leaving stale bits in GEN8_GT_IIR(2) and GEN8_GT_IMR(2) which
3303 * gen8_enable_rps will clean up. */
3304
3305 spin_lock_irq(&dev_priv->irq_lock);
3306 dev_priv->rps.pm_iir = 0;
3307 spin_unlock_irq(&dev_priv->irq_lock);
3308
3309 I915_WRITE(GEN8_GT_IIR(2), dev_priv->pm_rps_events);
3310}
3311
3218static void gen6_disable_rps_interrupts(struct drm_device *dev) 3312static void gen6_disable_rps_interrupts(struct drm_device *dev)
3219{ 3313{
3220 struct drm_i915_private *dev_priv = dev->dev_private; 3314 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3241,7 +3335,10 @@ static void gen6_disable_rps(struct drm_device *dev)
3241 I915_WRITE(GEN6_RC_CONTROL, 0); 3335 I915_WRITE(GEN6_RC_CONTROL, 0);
3242 I915_WRITE(GEN6_RPNSWREQ, 1 << 31); 3336 I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
3243 3337
3244 gen6_disable_rps_interrupts(dev); 3338 if (IS_BROADWELL(dev))
3339 gen8_disable_rps_interrupts(dev);
3340 else
3341 gen6_disable_rps_interrupts(dev);
3245} 3342}
3246 3343
3247static void valleyview_disable_rps(struct drm_device *dev) 3344static void valleyview_disable_rps(struct drm_device *dev)
@@ -3255,21 +3352,44 @@ static void valleyview_disable_rps(struct drm_device *dev)
3255 3352
3256static void intel_print_rc6_info(struct drm_device *dev, u32 mode) 3353static void intel_print_rc6_info(struct drm_device *dev, u32 mode)
3257{ 3354{
3355 if (IS_VALLEYVIEW(dev)) {
3356 if (mode & (GEN7_RC_CTL_TO_MODE | GEN6_RC_CTL_EI_MODE(1)))
3357 mode = GEN6_RC_CTL_RC6_ENABLE;
3358 else
3359 mode = 0;
3360 }
3258 DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n", 3361 DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
3259 (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off", 3362 (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
3260 (mode & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off", 3363 (mode & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
3261 (mode & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off"); 3364 (mode & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
3262} 3365}
3263 3366
3264int intel_enable_rc6(const struct drm_device *dev) 3367static int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6)
3265{ 3368{
3266 /* No RC6 before Ironlake */ 3369 /* No RC6 before Ironlake */
3267 if (INTEL_INFO(dev)->gen < 5) 3370 if (INTEL_INFO(dev)->gen < 5)
3268 return 0; 3371 return 0;
3269 3372
3373 /* RC6 is only on Ironlake mobile not on desktop */
3374 if (INTEL_INFO(dev)->gen == 5 && !IS_IRONLAKE_M(dev))
3375 return 0;
3376
3270 /* Respect the kernel parameter if it is set */ 3377 /* Respect the kernel parameter if it is set */
3271 if (i915.enable_rc6 >= 0) 3378 if (enable_rc6 >= 0) {
3272 return i915.enable_rc6; 3379 int mask;
3380
3381 if (INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev))
3382 mask = INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE |
3383 INTEL_RC6pp_ENABLE;
3384 else
3385 mask = INTEL_RC6_ENABLE;
3386
3387 if ((enable_rc6 & mask) != enable_rc6)
3388 DRM_INFO("Adjusting RC6 mask to %d (requested %d, valid %d)\n",
3389 enable_rc6 & mask, enable_rc6, mask);
3390
3391 return enable_rc6 & mask;
3392 }
3273 3393
3274 /* Disable RC6 on Ironlake */ 3394 /* Disable RC6 on Ironlake */
3275 if (INTEL_INFO(dev)->gen == 5) 3395 if (INTEL_INFO(dev)->gen == 5)
@@ -3281,6 +3401,22 @@ int intel_enable_rc6(const struct drm_device *dev)
3281 return INTEL_RC6_ENABLE; 3401 return INTEL_RC6_ENABLE;
3282} 3402}
3283 3403
3404int intel_enable_rc6(const struct drm_device *dev)
3405{
3406 return i915.enable_rc6;
3407}
3408
3409static void gen8_enable_rps_interrupts(struct drm_device *dev)
3410{
3411 struct drm_i915_private *dev_priv = dev->dev_private;
3412
3413 spin_lock_irq(&dev_priv->irq_lock);
3414 WARN_ON(dev_priv->rps.pm_iir);
3415 bdw_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
3416 I915_WRITE(GEN8_GT_IIR(2), dev_priv->pm_rps_events);
3417 spin_unlock_irq(&dev_priv->irq_lock);
3418}
3419
3284static void gen6_enable_rps_interrupts(struct drm_device *dev) 3420static void gen6_enable_rps_interrupts(struct drm_device *dev)
3285{ 3421{
3286 struct drm_i915_private *dev_priv = dev->dev_private; 3422 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3292,10 +3428,31 @@ static void gen6_enable_rps_interrupts(struct drm_device *dev)
3292 spin_unlock_irq(&dev_priv->irq_lock); 3428 spin_unlock_irq(&dev_priv->irq_lock);
3293} 3429}
3294 3430
3431static void parse_rp_state_cap(struct drm_i915_private *dev_priv, u32 rp_state_cap)
3432{
3433 /* All of these values are in units of 50MHz */
3434 dev_priv->rps.cur_freq = 0;
3435 /* static values from HW: RP0 < RPe < RP1 < RPn (min_freq) */
3436 dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
3437 dev_priv->rps.rp0_freq = (rp_state_cap >> 0) & 0xff;
3438 dev_priv->rps.min_freq = (rp_state_cap >> 16) & 0xff;
3439 /* XXX: only BYT has a special efficient freq */
3440 dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
3441 /* hw_max = RP0 until we check for overclocking */
3442 dev_priv->rps.max_freq = dev_priv->rps.rp0_freq;
3443
3444 /* Preserve min/max settings in case of re-init */
3445 if (dev_priv->rps.max_freq_softlimit == 0)
3446 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
3447
3448 if (dev_priv->rps.min_freq_softlimit == 0)
3449 dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
3450}
3451
3295static void gen8_enable_rps(struct drm_device *dev) 3452static void gen8_enable_rps(struct drm_device *dev)
3296{ 3453{
3297 struct drm_i915_private *dev_priv = dev->dev_private; 3454 struct drm_i915_private *dev_priv = dev->dev_private;
3298 struct intel_ring_buffer *ring; 3455 struct intel_engine_cs *ring;
3299 uint32_t rc6_mask = 0, rp_state_cap; 3456 uint32_t rc6_mask = 0, rp_state_cap;
3300 int unused; 3457 int unused;
3301 3458
@@ -3310,6 +3467,7 @@ static void gen8_enable_rps(struct drm_device *dev)
3310 I915_WRITE(GEN6_RC_CONTROL, 0); 3467 I915_WRITE(GEN6_RC_CONTROL, 0);
3311 3468
3312 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 3469 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
3470 parse_rp_state_cap(dev_priv, rp_state_cap);
3313 3471
3314 /* 2b: Program RC6 thresholds.*/ 3472 /* 2b: Program RC6 thresholds.*/
3315 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16); 3473 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
@@ -3329,8 +3487,10 @@ static void gen8_enable_rps(struct drm_device *dev)
3329 rc6_mask); 3487 rc6_mask);
3330 3488
3331 /* 4 Program defaults and thresholds for RPS*/ 3489 /* 4 Program defaults and thresholds for RPS*/
3332 I915_WRITE(GEN6_RPNSWREQ, HSW_FREQUENCY(10)); /* Request 500 MHz */ 3490 I915_WRITE(GEN6_RPNSWREQ,
3333 I915_WRITE(GEN6_RC_VIDEO_FREQ, HSW_FREQUENCY(12)); /* Request 600 MHz */ 3491 HSW_FREQUENCY(dev_priv->rps.rp1_freq));
3492 I915_WRITE(GEN6_RC_VIDEO_FREQ,
3493 HSW_FREQUENCY(dev_priv->rps.rp1_freq));
3334 /* NB: Docs say 1s, and 1000000 - which aren't equivalent */ 3494 /* NB: Docs say 1s, and 1000000 - which aren't equivalent */
3335 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 100000000 / 128); /* 1 second timeout */ 3495 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 100000000 / 128); /* 1 second timeout */
3336 3496
@@ -3346,11 +3506,15 @@ static void gen8_enable_rps(struct drm_device *dev)
3346 3506
3347 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); 3507 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
3348 3508
3509 /* WaDisablePwrmtrEvent:chv (pre-production hw) */
3510 I915_WRITE(0xA80C, I915_READ(0xA80C) & 0x00ffffff);
3511 I915_WRITE(0xA810, I915_READ(0xA810) & 0xffffff00);
3512
3349 /* 5: Enable RPS */ 3513 /* 5: Enable RPS */
3350 I915_WRITE(GEN6_RP_CONTROL, 3514 I915_WRITE(GEN6_RP_CONTROL,
3351 GEN6_RP_MEDIA_TURBO | 3515 GEN6_RP_MEDIA_TURBO |
3352 GEN6_RP_MEDIA_HW_NORMAL_MODE | 3516 GEN6_RP_MEDIA_HW_NORMAL_MODE |
3353 GEN6_RP_MEDIA_IS_GFX | 3517 GEN6_RP_MEDIA_IS_GFX | /* WaSetMaskForGfxBusyness:chv (pre-production hw ?) */
3354 GEN6_RP_ENABLE | 3518 GEN6_RP_ENABLE |
3355 GEN6_RP_UP_BUSY_AVG | 3519 GEN6_RP_UP_BUSY_AVG |
3356 GEN6_RP_DOWN_IDLE_AVG); 3520 GEN6_RP_DOWN_IDLE_AVG);
@@ -3359,7 +3523,7 @@ static void gen8_enable_rps(struct drm_device *dev)
3359 3523
3360 gen6_set_rps(dev, (I915_READ(GEN6_GT_PERF_STATUS) & 0xff00) >> 8); 3524 gen6_set_rps(dev, (I915_READ(GEN6_GT_PERF_STATUS) & 0xff00) >> 8);
3361 3525
3362 gen6_enable_rps_interrupts(dev); 3526 gen8_enable_rps_interrupts(dev);
3363 3527
3364 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL); 3528 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
3365} 3529}
@@ -3367,7 +3531,7 @@ static void gen8_enable_rps(struct drm_device *dev)
3367static void gen6_enable_rps(struct drm_device *dev) 3531static void gen6_enable_rps(struct drm_device *dev)
3368{ 3532{
3369 struct drm_i915_private *dev_priv = dev->dev_private; 3533 struct drm_i915_private *dev_priv = dev->dev_private;
3370 struct intel_ring_buffer *ring; 3534 struct intel_engine_cs *ring;
3371 u32 rp_state_cap; 3535 u32 rp_state_cap;
3372 u32 gt_perf_status; 3536 u32 gt_perf_status;
3373 u32 rc6vids, pcu_mbox = 0, rc6_mask = 0; 3537 u32 rc6vids, pcu_mbox = 0, rc6_mask = 0;
@@ -3396,23 +3560,7 @@ static void gen6_enable_rps(struct drm_device *dev)
3396 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 3560 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
3397 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); 3561 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
3398 3562
3399 /* All of these values are in units of 50MHz */ 3563 parse_rp_state_cap(dev_priv, rp_state_cap);
3400 dev_priv->rps.cur_freq = 0;
3401 /* static values from HW: RP0 < RPe < RP1 < RPn (min_freq) */
3402 dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
3403 dev_priv->rps.rp0_freq = (rp_state_cap >> 0) & 0xff;
3404 dev_priv->rps.min_freq = (rp_state_cap >> 16) & 0xff;
3405 /* XXX: only BYT has a special efficient freq */
3406 dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
3407 /* hw_max = RP0 until we check for overclocking */
3408 dev_priv->rps.max_freq = dev_priv->rps.rp0_freq;
3409
3410 /* Preserve min/max settings in case of re-init */
3411 if (dev_priv->rps.max_freq_softlimit == 0)
3412 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
3413
3414 if (dev_priv->rps.min_freq_softlimit == 0)
3415 dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
3416 3564
3417 /* disable the counters and set deterministic thresholds */ 3565 /* disable the counters and set deterministic thresholds */
3418 I915_WRITE(GEN6_RC_CONTROL, 0); 3566 I915_WRITE(GEN6_RC_CONTROL, 0);
@@ -3494,7 +3642,7 @@ static void gen6_enable_rps(struct drm_device *dev)
3494 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL); 3642 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
3495} 3643}
3496 3644
3497void gen6_update_ring_freq(struct drm_device *dev) 3645static void __gen6_update_ring_freq(struct drm_device *dev)
3498{ 3646{
3499 struct drm_i915_private *dev_priv = dev->dev_private; 3647 struct drm_i915_private *dev_priv = dev->dev_private;
3500 int min_freq = 15; 3648 int min_freq = 15;
@@ -3564,6 +3712,18 @@ void gen6_update_ring_freq(struct drm_device *dev)
3564 } 3712 }
3565} 3713}
3566 3714
3715void gen6_update_ring_freq(struct drm_device *dev)
3716{
3717 struct drm_i915_private *dev_priv = dev->dev_private;
3718
3719 if (INTEL_INFO(dev)->gen < 6 || IS_VALLEYVIEW(dev))
3720 return;
3721
3722 mutex_lock(&dev_priv->rps.hw_lock);
3723 __gen6_update_ring_freq(dev);
3724 mutex_unlock(&dev_priv->rps.hw_lock);
3725}
3726
3567int valleyview_rps_max_freq(struct drm_i915_private *dev_priv) 3727int valleyview_rps_max_freq(struct drm_i915_private *dev_priv)
3568{ 3728{
3569 u32 val, rp0; 3729 u32 val, rp0;
@@ -3658,10 +3818,49 @@ static void valleyview_cleanup_pctx(struct drm_device *dev)
3658 dev_priv->vlv_pctx = NULL; 3818 dev_priv->vlv_pctx = NULL;
3659} 3819}
3660 3820
3821static void valleyview_init_gt_powersave(struct drm_device *dev)
3822{
3823 struct drm_i915_private *dev_priv = dev->dev_private;
3824
3825 valleyview_setup_pctx(dev);
3826
3827 mutex_lock(&dev_priv->rps.hw_lock);
3828
3829 dev_priv->rps.max_freq = valleyview_rps_max_freq(dev_priv);
3830 dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
3831 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
3832 vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq),
3833 dev_priv->rps.max_freq);
3834
3835 dev_priv->rps.efficient_freq = valleyview_rps_rpe_freq(dev_priv);
3836 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
3837 vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
3838 dev_priv->rps.efficient_freq);
3839
3840 dev_priv->rps.min_freq = valleyview_rps_min_freq(dev_priv);
3841 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
3842 vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq),
3843 dev_priv->rps.min_freq);
3844
3845 /* Preserve min/max settings in case of re-init */
3846 if (dev_priv->rps.max_freq_softlimit == 0)
3847 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
3848
3849 if (dev_priv->rps.min_freq_softlimit == 0)
3850 dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
3851
3852 mutex_unlock(&dev_priv->rps.hw_lock);
3853}
3854
3855static void valleyview_cleanup_gt_powersave(struct drm_device *dev)
3856{
3857 valleyview_cleanup_pctx(dev);
3858}
3859
3661static void valleyview_enable_rps(struct drm_device *dev) 3860static void valleyview_enable_rps(struct drm_device *dev)
3662{ 3861{
3663 struct drm_i915_private *dev_priv = dev->dev_private; 3862 struct drm_i915_private *dev_priv = dev->dev_private;
3664 struct intel_ring_buffer *ring; 3863 struct intel_engine_cs *ring;
3665 u32 gtfifodbg, val, rc6_mode = 0; 3864 u32 gtfifodbg, val, rc6_mode = 0;
3666 int i; 3865 int i;
3667 3866
@@ -3724,29 +3923,6 @@ static void valleyview_enable_rps(struct drm_device *dev)
3724 vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq), 3923 vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
3725 dev_priv->rps.cur_freq); 3924 dev_priv->rps.cur_freq);
3726 3925
3727 dev_priv->rps.max_freq = valleyview_rps_max_freq(dev_priv);
3728 dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
3729 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
3730 vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq),
3731 dev_priv->rps.max_freq);
3732
3733 dev_priv->rps.efficient_freq = valleyview_rps_rpe_freq(dev_priv);
3734 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
3735 vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
3736 dev_priv->rps.efficient_freq);
3737
3738 dev_priv->rps.min_freq = valleyview_rps_min_freq(dev_priv);
3739 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
3740 vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq),
3741 dev_priv->rps.min_freq);
3742
3743 /* Preserve min/max settings in case of re-init */
3744 if (dev_priv->rps.max_freq_softlimit == 0)
3745 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
3746
3747 if (dev_priv->rps.min_freq_softlimit == 0)
3748 dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
3749
3750 DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n", 3926 DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
3751 vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq), 3927 vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
3752 dev_priv->rps.efficient_freq); 3928 dev_priv->rps.efficient_freq);
@@ -3815,7 +3991,7 @@ static int ironlake_setup_rc6(struct drm_device *dev)
3815static void ironlake_enable_rc6(struct drm_device *dev) 3991static void ironlake_enable_rc6(struct drm_device *dev)
3816{ 3992{
3817 struct drm_i915_private *dev_priv = dev->dev_private; 3993 struct drm_i915_private *dev_priv = dev->dev_private;
3818 struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; 3994 struct intel_engine_cs *ring = &dev_priv->ring[RCS];
3819 bool was_interruptible; 3995 bool was_interruptible;
3820 int ret; 3996 int ret;
3821 3997
@@ -3873,7 +4049,7 @@ static void ironlake_enable_rc6(struct drm_device *dev)
3873 I915_WRITE(PWRCTXA, i915_gem_obj_ggtt_offset(dev_priv->ips.pwrctx) | PWRCTX_EN); 4049 I915_WRITE(PWRCTXA, i915_gem_obj_ggtt_offset(dev_priv->ips.pwrctx) | PWRCTX_EN);
3874 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); 4050 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
3875 4051
3876 intel_print_rc6_info(dev, INTEL_RC6_ENABLE); 4052 intel_print_rc6_info(dev, GEN6_RC_CTL_RC6_ENABLE);
3877} 4053}
3878 4054
3879static unsigned long intel_pxfreq(u32 vidfreq) 4055static unsigned long intel_pxfreq(u32 vidfreq)
@@ -4327,7 +4503,7 @@ EXPORT_SYMBOL_GPL(i915_gpu_lower);
4327bool i915_gpu_busy(void) 4503bool i915_gpu_busy(void)
4328{ 4504{
4329 struct drm_i915_private *dev_priv; 4505 struct drm_i915_private *dev_priv;
4330 struct intel_ring_buffer *ring; 4506 struct intel_engine_cs *ring;
4331 bool ret = false; 4507 bool ret = false;
4332 int i; 4508 int i;
4333 4509
@@ -4487,14 +4663,16 @@ static void intel_init_emon(struct drm_device *dev)
4487 4663
4488void intel_init_gt_powersave(struct drm_device *dev) 4664void intel_init_gt_powersave(struct drm_device *dev)
4489{ 4665{
4666 i915.enable_rc6 = sanitize_rc6_option(dev, i915.enable_rc6);
4667
4490 if (IS_VALLEYVIEW(dev)) 4668 if (IS_VALLEYVIEW(dev))
4491 valleyview_setup_pctx(dev); 4669 valleyview_init_gt_powersave(dev);
4492} 4670}
4493 4671
4494void intel_cleanup_gt_powersave(struct drm_device *dev) 4672void intel_cleanup_gt_powersave(struct drm_device *dev)
4495{ 4673{
4496 if (IS_VALLEYVIEW(dev)) 4674 if (IS_VALLEYVIEW(dev))
4497 valleyview_cleanup_pctx(dev); 4675 valleyview_cleanup_gt_powersave(dev);
4498} 4676}
4499 4677
4500void intel_disable_gt_powersave(struct drm_device *dev) 4678void intel_disable_gt_powersave(struct drm_device *dev)
@@ -4507,8 +4685,10 @@ void intel_disable_gt_powersave(struct drm_device *dev)
4507 if (IS_IRONLAKE_M(dev)) { 4685 if (IS_IRONLAKE_M(dev)) {
4508 ironlake_disable_drps(dev); 4686 ironlake_disable_drps(dev);
4509 ironlake_disable_rc6(dev); 4687 ironlake_disable_rc6(dev);
4510 } else if (INTEL_INFO(dev)->gen >= 6) { 4688 } else if (IS_GEN6(dev) || IS_GEN7(dev) || IS_BROADWELL(dev)) {
4511 cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work); 4689 if (cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work))
4690 intel_runtime_pm_put(dev_priv);
4691
4512 cancel_work_sync(&dev_priv->rps.work); 4692 cancel_work_sync(&dev_priv->rps.work);
4513 mutex_lock(&dev_priv->rps.hw_lock); 4693 mutex_lock(&dev_priv->rps.hw_lock);
4514 if (IS_VALLEYVIEW(dev)) 4694 if (IS_VALLEYVIEW(dev))
@@ -4533,13 +4713,15 @@ static void intel_gen6_powersave_work(struct work_struct *work)
4533 valleyview_enable_rps(dev); 4713 valleyview_enable_rps(dev);
4534 } else if (IS_BROADWELL(dev)) { 4714 } else if (IS_BROADWELL(dev)) {
4535 gen8_enable_rps(dev); 4715 gen8_enable_rps(dev);
4536 gen6_update_ring_freq(dev); 4716 __gen6_update_ring_freq(dev);
4537 } else { 4717 } else {
4538 gen6_enable_rps(dev); 4718 gen6_enable_rps(dev);
4539 gen6_update_ring_freq(dev); 4719 __gen6_update_ring_freq(dev);
4540 } 4720 }
4541 dev_priv->rps.enabled = true; 4721 dev_priv->rps.enabled = true;
4542 mutex_unlock(&dev_priv->rps.hw_lock); 4722 mutex_unlock(&dev_priv->rps.hw_lock);
4723
4724 intel_runtime_pm_put(dev_priv);
4543} 4725}
4544 4726
4545void intel_enable_gt_powersave(struct drm_device *dev) 4727void intel_enable_gt_powersave(struct drm_device *dev)
@@ -4547,20 +4729,38 @@ void intel_enable_gt_powersave(struct drm_device *dev)
4547 struct drm_i915_private *dev_priv = dev->dev_private; 4729 struct drm_i915_private *dev_priv = dev->dev_private;
4548 4730
4549 if (IS_IRONLAKE_M(dev)) { 4731 if (IS_IRONLAKE_M(dev)) {
4732 mutex_lock(&dev->struct_mutex);
4550 ironlake_enable_drps(dev); 4733 ironlake_enable_drps(dev);
4551 ironlake_enable_rc6(dev); 4734 ironlake_enable_rc6(dev);
4552 intel_init_emon(dev); 4735 intel_init_emon(dev);
4553 } else if (IS_GEN6(dev) || IS_GEN7(dev)) { 4736 mutex_unlock(&dev->struct_mutex);
4737 } else if (IS_GEN6(dev) || IS_GEN7(dev) || IS_BROADWELL(dev)) {
4554 /* 4738 /*
4555 * PCU communication is slow and this doesn't need to be 4739 * PCU communication is slow and this doesn't need to be
4556 * done at any specific time, so do this out of our fast path 4740 * done at any specific time, so do this out of our fast path
4557 * to make resume and init faster. 4741 * to make resume and init faster.
4742 *
4743 * We depend on the HW RC6 power context save/restore
4744 * mechanism when entering D3 through runtime PM suspend. So
4745 * disable RPM until RPS/RC6 is properly setup. We can only
4746 * get here via the driver load/system resume/runtime resume
4747 * paths, so the _noresume version is enough (and in case of
4748 * runtime resume it's necessary).
4558 */ 4749 */
4559 schedule_delayed_work(&dev_priv->rps.delayed_resume_work, 4750 if (schedule_delayed_work(&dev_priv->rps.delayed_resume_work,
4560 round_jiffies_up_relative(HZ)); 4751 round_jiffies_up_relative(HZ)))
4752 intel_runtime_pm_get_noresume(dev_priv);
4561 } 4753 }
4562} 4754}
4563 4755
4756void intel_reset_gt_powersave(struct drm_device *dev)
4757{
4758 struct drm_i915_private *dev_priv = dev->dev_private;
4759
4760 dev_priv->rps.enabled = false;
4761 intel_enable_gt_powersave(dev);
4762}
4763
4564static void ibx_init_clock_gating(struct drm_device *dev) 4764static void ibx_init_clock_gating(struct drm_device *dev)
4565{ 4765{
4566 struct drm_i915_private *dev_priv = dev->dev_private; 4766 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4666,6 +4866,9 @@ static void ironlake_init_clock_gating(struct drm_device *dev)
4666 I915_WRITE(CACHE_MODE_0, 4866 I915_WRITE(CACHE_MODE_0,
4667 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE)); 4867 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
4668 4868
4869 /* WaDisable_RenderCache_OperationalFlush:ilk */
4870 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
4871
4669 g4x_disable_trickle_feed(dev); 4872 g4x_disable_trickle_feed(dev);
4670 4873
4671 ibx_init_clock_gating(dev); 4874 ibx_init_clock_gating(dev);
@@ -4741,6 +4944,9 @@ static void gen6_init_clock_gating(struct drm_device *dev)
4741 I915_WRITE(GEN6_GT_MODE, 4944 I915_WRITE(GEN6_GT_MODE,
4742 _MASKED_BIT_ENABLE(GEN6_TD_FOUR_ROW_DISPATCH_DISABLE)); 4945 _MASKED_BIT_ENABLE(GEN6_TD_FOUR_ROW_DISPATCH_DISABLE));
4743 4946
4947 /* WaDisable_RenderCache_OperationalFlush:snb */
4948 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
4949
4744 /* 4950 /*
4745 * BSpec recoomends 8x4 when MSAA is used, 4951 * BSpec recoomends 8x4 when MSAA is used,
4746 * however in practice 16x4 seems fastest. 4952 * however in practice 16x4 seems fastest.
@@ -4909,6 +5115,10 @@ static void gen8_init_clock_gating(struct drm_device *dev)
4909 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1, 5115 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
4910 _MASKED_BIT_ENABLE(GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE)); 5116 _MASKED_BIT_ENABLE(GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE));
4911 5117
5118 /* WaDisableDopClockGating:bdw May not be needed for production */
5119 I915_WRITE(GEN7_ROW_CHICKEN2,
5120 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
5121
4912 /* WaSwitchSolVfFArbitrationPriority:bdw */ 5122 /* WaSwitchSolVfFArbitrationPriority:bdw */
4913 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL); 5123 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
4914 5124
@@ -4980,6 +5190,9 @@ static void haswell_init_clock_gating(struct drm_device *dev)
4980 I915_WRITE(GEN7_FF_THREAD_MODE, 5190 I915_WRITE(GEN7_FF_THREAD_MODE,
4981 I915_READ(GEN7_FF_THREAD_MODE) & ~GEN7_FF_VS_REF_CNT_FFME); 5191 I915_READ(GEN7_FF_THREAD_MODE) & ~GEN7_FF_VS_REF_CNT_FFME);
4982 5192
5193 /* WaDisable_RenderCache_OperationalFlush:hsw */
5194 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
5195
4983 /* enable HiZ Raw Stall Optimization */ 5196 /* enable HiZ Raw Stall Optimization */
4984 I915_WRITE(CACHE_MODE_0_GEN7, 5197 I915_WRITE(CACHE_MODE_0_GEN7,
4985 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE)); 5198 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
@@ -5032,6 +5245,9 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
5032 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1, 5245 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
5033 _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE)); 5246 _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
5034 5247
5248 /* WaDisable_RenderCache_OperationalFlush:ivb */
5249 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
5250
5035 /* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */ 5251 /* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
5036 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1, 5252 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
5037 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC); 5253 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
@@ -5126,6 +5342,10 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
5126 } 5342 }
5127 DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq); 5343 DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq);
5128 5344
5345 dev_priv->vlv_cdclk_freq = valleyview_cur_cdclk(dev_priv);
5346 DRM_DEBUG_DRIVER("Current CD clock rate: %d MHz",
5347 dev_priv->vlv_cdclk_freq);
5348
5129 I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE); 5349 I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
5130 5350
5131 /* WaDisableEarlyCull:vlv */ 5351 /* WaDisableEarlyCull:vlv */
@@ -5143,6 +5363,9 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
5143 _MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP | 5363 _MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP |
5144 GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE)); 5364 GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
5145 5365
5366 /* WaDisable_RenderCache_OperationalFlush:vlv */
5367 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
5368
5146 /* WaForceL3Serialization:vlv */ 5369 /* WaForceL3Serialization:vlv */
5147 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) & 5370 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
5148 ~L3SQ_URB_READ_CAM_MATCH_DISABLE); 5371 ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
@@ -5165,8 +5388,11 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
5165 I915_WRITE(GEN6_UCGCTL2, 5388 I915_WRITE(GEN6_UCGCTL2,
5166 GEN6_RCZUNIT_CLOCK_GATE_DISABLE); 5389 GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
5167 5390
5168 /* WaDisableL3Bank2xClockGate:vlv */ 5391 /* WaDisableL3Bank2xClockGate:vlv
5169 I915_WRITE(GEN7_UCGCTL4, GEN7_L3BANK2X_CLOCK_GATE_DISABLE); 5392 * Disabling L3 clock gating- MMIO 940c[25] = 1
5393 * Set bit 25, to disable L3_BANK_2x_CLK_GATING */
5394 I915_WRITE(GEN7_UCGCTL4,
5395 I915_READ(GEN7_UCGCTL4) | GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
5170 5396
5171 I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE); 5397 I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
5172 5398
@@ -5191,6 +5417,59 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
5191 I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS); 5417 I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
5192} 5418}
5193 5419
5420static void cherryview_init_clock_gating(struct drm_device *dev)
5421{
5422 struct drm_i915_private *dev_priv = dev->dev_private;
5423
5424 I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
5425
5426 I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
5427
5428 /* WaDisablePartialInstShootdown:chv */
5429 I915_WRITE(GEN8_ROW_CHICKEN,
5430 _MASKED_BIT_ENABLE(PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE));
5431
5432 /* WaDisableThreadStallDopClockGating:chv */
5433 I915_WRITE(GEN8_ROW_CHICKEN,
5434 _MASKED_BIT_ENABLE(STALL_DOP_GATING_DISABLE));
5435
5436 /* WaVSRefCountFullforceMissDisable:chv */
5437 /* WaDSRefCountFullforceMissDisable:chv */
5438 I915_WRITE(GEN7_FF_THREAD_MODE,
5439 I915_READ(GEN7_FF_THREAD_MODE) &
5440 ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
5441
5442 /* WaDisableSemaphoreAndSyncFlipWait:chv */
5443 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
5444 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
5445
5446 /* WaDisableCSUnitClockGating:chv */
5447 I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
5448 GEN6_CSUNIT_CLOCK_GATE_DISABLE);
5449
5450 /* WaDisableSDEUnitClockGating:chv */
5451 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
5452 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
5453
5454 /* WaDisableSamplerPowerBypass:chv (pre-production hw) */
5455 I915_WRITE(HALF_SLICE_CHICKEN3,
5456 _MASKED_BIT_ENABLE(GEN8_SAMPLER_POWER_BYPASS_DIS));
5457
5458 /* WaDisableGunitClockGating:chv (pre-production hw) */
5459 I915_WRITE(VLV_GUNIT_CLOCK_GATE, I915_READ(VLV_GUNIT_CLOCK_GATE) |
5460 GINT_DIS);
5461
5462 /* WaDisableFfDopClockGating:chv (pre-production hw) */
5463 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
5464 _MASKED_BIT_ENABLE(GEN8_FF_DOP_CLOCK_GATE_DISABLE));
5465
5466 /* WaDisableDopClockGating:chv (pre-production hw) */
5467 I915_WRITE(GEN7_ROW_CHICKEN2,
5468 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
5469 I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
5470 GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE);
5471}
5472
5194static void g4x_init_clock_gating(struct drm_device *dev) 5473static void g4x_init_clock_gating(struct drm_device *dev)
5195{ 5474{
5196 struct drm_i915_private *dev_priv = dev->dev_private; 5475 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -5212,6 +5491,9 @@ static void g4x_init_clock_gating(struct drm_device *dev)
5212 I915_WRITE(CACHE_MODE_0, 5491 I915_WRITE(CACHE_MODE_0,
5213 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE)); 5492 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
5214 5493
5494 /* WaDisable_RenderCache_OperationalFlush:g4x */
5495 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
5496
5215 g4x_disable_trickle_feed(dev); 5497 g4x_disable_trickle_feed(dev);
5216} 5498}
5217 5499
@@ -5226,6 +5508,9 @@ static void crestline_init_clock_gating(struct drm_device *dev)
5226 I915_WRITE16(DEUC, 0); 5508 I915_WRITE16(DEUC, 0);
5227 I915_WRITE(MI_ARB_STATE, 5509 I915_WRITE(MI_ARB_STATE,
5228 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE)); 5510 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
5511
5512 /* WaDisable_RenderCache_OperationalFlush:gen4 */
5513 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
5229} 5514}
5230 5515
5231static void broadwater_init_clock_gating(struct drm_device *dev) 5516static void broadwater_init_clock_gating(struct drm_device *dev)
@@ -5240,6 +5525,9 @@ static void broadwater_init_clock_gating(struct drm_device *dev)
5240 I915_WRITE(RENCLK_GATE_D2, 0); 5525 I915_WRITE(RENCLK_GATE_D2, 0);
5241 I915_WRITE(MI_ARB_STATE, 5526 I915_WRITE(MI_ARB_STATE,
5242 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE)); 5527 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
5528
5529 /* WaDisable_RenderCache_OperationalFlush:gen4 */
5530 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
5243} 5531}
5244 5532
5245static void gen3_init_clock_gating(struct drm_device *dev) 5533static void gen3_init_clock_gating(struct drm_device *dev)
@@ -5256,6 +5544,12 @@ static void gen3_init_clock_gating(struct drm_device *dev)
5256 5544
5257 /* IIR "flip pending" means done if this bit is set */ 5545 /* IIR "flip pending" means done if this bit is set */
5258 I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE)); 5546 I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
5547
5548 /* interrupts should cause a wake up from C3 */
5549 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_INT_EN));
5550
5551 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
5552 I915_WRITE(MI_ARB_STATE, _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
5259} 5553}
5260 5554
5261static void i85x_init_clock_gating(struct drm_device *dev) 5555static void i85x_init_clock_gating(struct drm_device *dev)
@@ -5263,6 +5557,10 @@ static void i85x_init_clock_gating(struct drm_device *dev)
5263 struct drm_i915_private *dev_priv = dev->dev_private; 5557 struct drm_i915_private *dev_priv = dev->dev_private;
5264 5558
5265 I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE); 5559 I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
5560
5561 /* interrupts should cause a wake up from C3 */
5562 I915_WRITE(MI_STATE, _MASKED_BIT_ENABLE(MI_AGPBUSY_INT_EN) |
5563 _MASKED_BIT_DISABLE(MI_AGPBUSY_830_MODE));
5266} 5564}
5267 5565
5268static void i830_init_clock_gating(struct drm_device *dev) 5566static void i830_init_clock_gating(struct drm_device *dev)
@@ -5314,10 +5612,25 @@ bool intel_display_power_enabled_sw(struct drm_i915_private *dev_priv,
5314 enum intel_display_power_domain domain) 5612 enum intel_display_power_domain domain)
5315{ 5613{
5316 struct i915_power_domains *power_domains; 5614 struct i915_power_domains *power_domains;
5615 struct i915_power_well *power_well;
5616 bool is_enabled;
5617 int i;
5618
5619 if (dev_priv->pm.suspended)
5620 return false;
5317 5621
5318 power_domains = &dev_priv->power_domains; 5622 power_domains = &dev_priv->power_domains;
5623 is_enabled = true;
5624 for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
5625 if (power_well->always_on)
5626 continue;
5319 5627
5320 return power_domains->domain_use_count[domain]; 5628 if (!power_well->count) {
5629 is_enabled = false;
5630 break;
5631 }
5632 }
5633 return is_enabled;
5321} 5634}
5322 5635
5323bool intel_display_power_enabled(struct drm_i915_private *dev_priv, 5636bool intel_display_power_enabled(struct drm_i915_private *dev_priv,
@@ -5392,33 +5705,6 @@ static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
5392 } 5705 }
5393} 5706}
5394 5707
5395static void reset_vblank_counter(struct drm_device *dev, enum pipe pipe)
5396{
5397 assert_spin_locked(&dev->vbl_lock);
5398
5399 dev->vblank[pipe].last = 0;
5400}
5401
5402static void hsw_power_well_post_disable(struct drm_i915_private *dev_priv)
5403{
5404 struct drm_device *dev = dev_priv->dev;
5405 enum pipe pipe;
5406 unsigned long irqflags;
5407
5408 /*
5409 * After this, the registers on the pipes that are part of the power
5410 * well will become zero, so we have to adjust our counters according to
5411 * that.
5412 *
5413 * FIXME: Should we do this in general in drm_vblank_post_modeset?
5414 */
5415 spin_lock_irqsave(&dev->vbl_lock, irqflags);
5416 for_each_pipe(pipe)
5417 if (pipe != PIPE_A)
5418 reset_vblank_counter(dev, pipe);
5419 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
5420}
5421
5422static void hsw_set_power_well(struct drm_i915_private *dev_priv, 5708static void hsw_set_power_well(struct drm_i915_private *dev_priv,
5423 struct i915_power_well *power_well, bool enable) 5709 struct i915_power_well *power_well, bool enable)
5424{ 5710{
@@ -5447,8 +5733,6 @@ static void hsw_set_power_well(struct drm_i915_private *dev_priv,
5447 I915_WRITE(HSW_PWR_WELL_DRIVER, 0); 5733 I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
5448 POSTING_READ(HSW_PWR_WELL_DRIVER); 5734 POSTING_READ(HSW_PWR_WELL_DRIVER);
5449 DRM_DEBUG_KMS("Requesting to disable the power well\n"); 5735 DRM_DEBUG_KMS("Requesting to disable the power well\n");
5450
5451 hsw_power_well_post_disable(dev_priv);
5452 } 5736 }
5453 } 5737 }
5454} 5738}
@@ -5489,13 +5773,34 @@ static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
5489 return true; 5773 return true;
5490} 5774}
5491 5775
5492static void vlv_set_power_well(struct drm_i915_private *dev_priv, 5776void __vlv_set_power_well(struct drm_i915_private *dev_priv,
5493 struct i915_power_well *power_well, bool enable) 5777 enum punit_power_well power_well_id, bool enable)
5494{ 5778{
5495 enum punit_power_well power_well_id = power_well->data; 5779 struct drm_device *dev = dev_priv->dev;
5496 u32 mask; 5780 u32 mask;
5497 u32 state; 5781 u32 state;
5498 u32 ctrl; 5782 u32 ctrl;
5783 enum pipe pipe;
5784
5785 if (power_well_id == PUNIT_POWER_WELL_DPIO_CMN_BC) {
5786 if (enable) {
5787 /*
5788 * Enable the CRI clock source so we can get at the
5789 * display and the reference clock for VGA
5790 * hotplug / manual detection.
5791 */
5792 I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
5793 DPLL_REFA_CLK_ENABLE_VLV |
5794 DPLL_INTEGRATED_CRI_CLK_VLV);
5795 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
5796 } else {
5797 for_each_pipe(pipe)
5798 assert_pll_disabled(dev_priv, pipe);
5799 /* Assert common reset */
5800 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) &
5801 ~DPIO_CMNRST);
5802 }
5803 }
5499 5804
5500 mask = PUNIT_PWRGT_MASK(power_well_id); 5805 mask = PUNIT_PWRGT_MASK(power_well_id);
5501 state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) : 5806 state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) :
@@ -5523,6 +5828,28 @@ static void vlv_set_power_well(struct drm_i915_private *dev_priv,
5523 5828
5524out: 5829out:
5525 mutex_unlock(&dev_priv->rps.hw_lock); 5830 mutex_unlock(&dev_priv->rps.hw_lock);
5831
5832 /*
5833 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
5834 * 6. De-assert cmn_reset/side_reset. Same as VLV X0.
5835 * a. GUnit 0x2110 bit[0] set to 1 (def 0)
5836 * b. The other bits such as sfr settings / modesel may all
5837 * be set to 0.
5838 *
5839 * This should only be done on init and resume from S3 with
5840 * both PLLs disabled, or we risk losing DPIO and PLL
5841 * synchronization.
5842 */
5843 if (power_well_id == PUNIT_POWER_WELL_DPIO_CMN_BC && enable)
5844 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
5845}
5846
5847static void vlv_set_power_well(struct drm_i915_private *dev_priv,
5848 struct i915_power_well *power_well, bool enable)
5849{
5850 enum punit_power_well power_well_id = power_well->data;
5851
5852 __vlv_set_power_well(dev_priv, power_well_id, enable);
5526} 5853}
5527 5854
5528static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv, 5855static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv,
@@ -5591,11 +5918,13 @@ static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
5591 spin_unlock_irq(&dev_priv->irq_lock); 5918 spin_unlock_irq(&dev_priv->irq_lock);
5592 5919
5593 /* 5920 /*
5594 * During driver initialization we need to defer enabling hotplug 5921 * During driver initialization/resume we can avoid restoring the
5595 * processing until fbdev is set up. 5922 * part of the HW/SW state that will be inited anyway explicitly.
5596 */ 5923 */
5597 if (dev_priv->enable_hotplug_processing) 5924 if (dev_priv->power_domains.initializing)
5598 intel_hpd_init(dev_priv->dev); 5925 return;
5926
5927 intel_hpd_init(dev_priv->dev);
5599 5928
5600 i915_redisable_vga_power_on(dev_priv->dev); 5929 i915_redisable_vga_power_on(dev_priv->dev);
5601} 5930}
@@ -5603,23 +5932,12 @@ static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
5603static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv, 5932static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
5604 struct i915_power_well *power_well) 5933 struct i915_power_well *power_well)
5605{ 5934{
5606 struct drm_device *dev = dev_priv->dev;
5607 enum pipe pipe;
5608
5609 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D); 5935 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
5610 5936
5611 spin_lock_irq(&dev_priv->irq_lock); 5937 spin_lock_irq(&dev_priv->irq_lock);
5612 for_each_pipe(pipe)
5613 __intel_set_cpu_fifo_underrun_reporting(dev, pipe, false);
5614
5615 valleyview_disable_display_irqs(dev_priv); 5938 valleyview_disable_display_irqs(dev_priv);
5616 spin_unlock_irq(&dev_priv->irq_lock); 5939 spin_unlock_irq(&dev_priv->irq_lock);
5617 5940
5618 spin_lock_irq(&dev->vbl_lock);
5619 for_each_pipe(pipe)
5620 reset_vblank_counter(dev, pipe);
5621 spin_unlock_irq(&dev->vbl_lock);
5622
5623 vlv_set_power_well(dev_priv, power_well, false); 5941 vlv_set_power_well(dev_priv, power_well, false);
5624} 5942}
5625 5943
@@ -5867,12 +6185,6 @@ static struct i915_power_well vlv_power_wells[] = {
5867 .ops = &vlv_display_power_well_ops, 6185 .ops = &vlv_display_power_well_ops,
5868 }, 6186 },
5869 { 6187 {
5870 .name = "dpio-common",
5871 .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
5872 .data = PUNIT_POWER_WELL_DPIO_CMN_BC,
5873 .ops = &vlv_dpio_power_well_ops,
5874 },
5875 {
5876 .name = "dpio-tx-b-01", 6188 .name = "dpio-tx-b-01",
5877 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | 6189 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
5878 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | 6190 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
@@ -5908,6 +6220,12 @@ static struct i915_power_well vlv_power_wells[] = {
5908 .ops = &vlv_dpio_power_well_ops, 6220 .ops = &vlv_dpio_power_well_ops,
5909 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23, 6221 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
5910 }, 6222 },
6223 {
6224 .name = "dpio-common",
6225 .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
6226 .data = PUNIT_POWER_WELL_DPIO_CMN_BC,
6227 .ops = &vlv_dpio_power_well_ops,
6228 },
5911}; 6229};
5912 6230
5913#define set_power_wells(power_domains, __power_wells) ({ \ 6231#define set_power_wells(power_domains, __power_wells) ({ \
@@ -5959,9 +6277,13 @@ static void intel_power_domains_resume(struct drm_i915_private *dev_priv)
5959 6277
5960void intel_power_domains_init_hw(struct drm_i915_private *dev_priv) 6278void intel_power_domains_init_hw(struct drm_i915_private *dev_priv)
5961{ 6279{
6280 struct i915_power_domains *power_domains = &dev_priv->power_domains;
6281
6282 power_domains->initializing = true;
5962 /* For now, we need the power well to be always enabled. */ 6283 /* For now, we need the power well to be always enabled. */
5963 intel_display_set_init_power(dev_priv, true); 6284 intel_display_set_init_power(dev_priv, true);
5964 intel_power_domains_resume(dev_priv); 6285 intel_power_domains_resume(dev_priv);
6286 power_domains->initializing = false;
5965} 6287}
5966 6288
5967void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv) 6289void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv)
@@ -5986,6 +6308,18 @@ void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
5986 WARN(dev_priv->pm.suspended, "Device still suspended.\n"); 6308 WARN(dev_priv->pm.suspended, "Device still suspended.\n");
5987} 6309}
5988 6310
6311void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
6312{
6313 struct drm_device *dev = dev_priv->dev;
6314 struct device *device = &dev->pdev->dev;
6315
6316 if (!HAS_RUNTIME_PM(dev))
6317 return;
6318
6319 WARN(dev_priv->pm.suspended, "Getting nosync-ref while suspended.\n");
6320 pm_runtime_get_noresume(device);
6321}
6322
5989void intel_runtime_pm_put(struct drm_i915_private *dev_priv) 6323void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
5990{ 6324{
5991 struct drm_device *dev = dev_priv->dev; 6325 struct drm_device *dev = dev_priv->dev;
@@ -6008,6 +6342,15 @@ void intel_init_runtime_pm(struct drm_i915_private *dev_priv)
6008 6342
6009 pm_runtime_set_active(device); 6343 pm_runtime_set_active(device);
6010 6344
6345 /*
6346 * RPM depends on RC6 to save restore the GT HW context, so make RC6 a
6347 * requirement.
6348 */
6349 if (!intel_enable_rc6(dev)) {
6350 DRM_INFO("RC6 disabled, disabling runtime PM support\n");
6351 return;
6352 }
6353
6011 pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */ 6354 pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */
6012 pm_runtime_mark_last_busy(device); 6355 pm_runtime_mark_last_busy(device);
6013 pm_runtime_use_autosuspend(device); 6356 pm_runtime_use_autosuspend(device);
@@ -6023,6 +6366,9 @@ void intel_fini_runtime_pm(struct drm_i915_private *dev_priv)
6023 if (!HAS_RUNTIME_PM(dev)) 6366 if (!HAS_RUNTIME_PM(dev))
6024 return; 6367 return;
6025 6368
6369 if (!intel_enable_rc6(dev))
6370 return;
6371
6026 /* Make sure we're not suspended first. */ 6372 /* Make sure we're not suspended first. */
6027 pm_runtime_get_sync(device); 6373 pm_runtime_get_sync(device);
6028 pm_runtime_disable(device); 6374 pm_runtime_disable(device);
@@ -6087,6 +6433,10 @@ void intel_init_pm(struct drm_device *dev)
6087 dev_priv->display.init_clock_gating = haswell_init_clock_gating; 6433 dev_priv->display.init_clock_gating = haswell_init_clock_gating;
6088 else if (INTEL_INFO(dev)->gen == 8) 6434 else if (INTEL_INFO(dev)->gen == 8)
6089 dev_priv->display.init_clock_gating = gen8_init_clock_gating; 6435 dev_priv->display.init_clock_gating = gen8_init_clock_gating;
6436 } else if (IS_CHERRYVIEW(dev)) {
6437 dev_priv->display.update_wm = valleyview_update_wm;
6438 dev_priv->display.init_clock_gating =
6439 cherryview_init_clock_gating;
6090 } else if (IS_VALLEYVIEW(dev)) { 6440 } else if (IS_VALLEYVIEW(dev)) {
6091 dev_priv->display.update_wm = valleyview_update_wm; 6441 dev_priv->display.update_wm = valleyview_update_wm;
6092 dev_priv->display.init_clock_gating = 6442 dev_priv->display.init_clock_gating =
diff --git a/drivers/gpu/drm/i915/intel_renderstate.h b/drivers/gpu/drm/i915/intel_renderstate.h
new file mode 100644
index 000000000000..a5e783a9928a
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_renderstate.h
@@ -0,0 +1,48 @@
1/*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24#ifndef _INTEL_RENDERSTATE_H
25#define _INTEL_RENDERSTATE_H
26
27#include <linux/types.h>
28
29struct intel_renderstate_rodata {
30 const u32 *reloc;
31 const u32 reloc_items;
32 const u32 *batch;
33 const u32 batch_items;
34};
35
36extern const struct intel_renderstate_rodata gen6_null_state;
37extern const struct intel_renderstate_rodata gen7_null_state;
38extern const struct intel_renderstate_rodata gen8_null_state;
39
40#define RO_RENDERSTATE(_g) \
41 const struct intel_renderstate_rodata gen ## _g ## _null_state = { \
42 .reloc = gen ## _g ## _null_state_relocs, \
43 .reloc_items = sizeof(gen ## _g ## _null_state_relocs)/4, \
44 .batch = gen ## _g ## _null_state_batch, \
45 .batch_items = sizeof(gen ## _g ## _null_state_batch)/4, \
46 }
47
48#endif /* INTEL_RENDERSTATE_H */
diff --git a/drivers/gpu/drm/i915/intel_renderstate_gen6.c b/drivers/gpu/drm/i915/intel_renderstate_gen6.c
new file mode 100644
index 000000000000..740538ad0977
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_renderstate_gen6.c
@@ -0,0 +1,289 @@
1#include "intel_renderstate.h"
2
3static const u32 gen6_null_state_relocs[] = {
4 0x00000020,
5 0x00000024,
6 0x0000002c,
7 0x000001e0,
8 0x000001e4,
9};
10
11static const u32 gen6_null_state_batch[] = {
12 0x69040000,
13 0x790d0001,
14 0x00000000,
15 0x00000000,
16 0x78180000,
17 0x00000001,
18 0x61010008,
19 0x00000000,
20 0x00000001, /* reloc */
21 0x00000001, /* reloc */
22 0x00000000,
23 0x00000001, /* reloc */
24 0x00000000,
25 0x00000001,
26 0x00000000,
27 0x00000001,
28 0x61020000,
29 0x00000000,
30 0x78050001,
31 0x00000018,
32 0x00000000,
33 0x780d1002,
34 0x00000000,
35 0x00000000,
36 0x00000420,
37 0x78150003,
38 0x00000000,
39 0x00000000,
40 0x00000000,
41 0x00000000,
42 0x78100004,
43 0x00000000,
44 0x00000000,
45 0x00000000,
46 0x00000000,
47 0x00000000,
48 0x78160003,
49 0x00000000,
50 0x00000000,
51 0x00000000,
52 0x00000000,
53 0x78110005,
54 0x00000000,
55 0x00000000,
56 0x00000000,
57 0x00000000,
58 0x00000000,
59 0x00000000,
60 0x78120002,
61 0x00000000,
62 0x00000000,
63 0x00000000,
64 0x78170003,
65 0x00000000,
66 0x00000000,
67 0x00000000,
68 0x00000000,
69 0x79050005,
70 0xe0040000,
71 0x00000000,
72 0x00000000,
73 0x00000000,
74 0x00000000,
75 0x00000000,
76 0x79100000,
77 0x00000000,
78 0x79000002,
79 0xffffffff,
80 0x00000000,
81 0x00000000,
82 0x780e0002,
83 0x00000441,
84 0x00000401,
85 0x00000401,
86 0x78021002,
87 0x00000000,
88 0x00000000,
89 0x00000400,
90 0x78130012,
91 0x00400810,
92 0x00000000,
93 0x20000000,
94 0x04000000,
95 0x00000000,
96 0x00000000,
97 0x00000000,
98 0x00000000,
99 0x00000000,
100 0x00000000,
101 0x00000000,
102 0x00000000,
103 0x00000000,
104 0x00000000,
105 0x00000000,
106 0x00000000,
107 0x00000000,
108 0x00000000,
109 0x00000000,
110 0x78140007,
111 0x00000280,
112 0x08080000,
113 0x00000000,
114 0x00060000,
115 0x4e080002,
116 0x00100400,
117 0x00000000,
118 0x00000000,
119 0x78090005,
120 0x02000000,
121 0x22220000,
122 0x02f60000,
123 0x11330000,
124 0x02850004,
125 0x11220000,
126 0x78011002,
127 0x00000000,
128 0x00000000,
129 0x00000200,
130 0x78080003,
131 0x00002000,
132 0x00000448, /* reloc */
133 0x00000448, /* reloc */
134 0x00000000,
135 0x05000000, /* cmds end */
136 0x00000000,
137 0x00000000,
138 0x00000000,
139 0x00000000,
140 0x00000220, /* state start */
141 0x00000240,
142 0x00000000,
143 0x00000000,
144 0x00000000,
145 0x00000000,
146 0x00000000,
147 0x00000000,
148 0x00000000,
149 0x00000000,
150 0x00000000,
151 0x00000000,
152 0x00000000,
153 0x00000000,
154 0x00000000,
155 0x00000000,
156 0x00000000,
157 0x00000000,
158 0x00000000,
159 0x00000000,
160 0x00000000,
161 0x00000000,
162 0x00000000,
163 0x00000000,
164 0x00000000,
165 0x00000000,
166 0x00000000,
167 0x00000000,
168 0x00000000,
169 0x00000000,
170 0x00000000,
171 0x00000000,
172 0x0060005a,
173 0x204077be,
174 0x000000c0,
175 0x008d0040,
176 0x0060005a,
177 0x206077be,
178 0x000000c0,
179 0x008d0080,
180 0x0060005a,
181 0x208077be,
182 0x000000d0,
183 0x008d0040,
184 0x0060005a,
185 0x20a077be,
186 0x000000d0,
187 0x008d0080,
188 0x00000201,
189 0x20080061,
190 0x00000000,
191 0x00000000,
192 0x00600001,
193 0x20200022,
194 0x008d0000,
195 0x00000000,
196 0x02800031,
197 0x21c01cc9,
198 0x00000020,
199 0x0a8a0001,
200 0x00600001,
201 0x204003be,
202 0x008d01c0,
203 0x00000000,
204 0x00600001,
205 0x206003be,
206 0x008d01e0,
207 0x00000000,
208 0x00600001,
209 0x208003be,
210 0x008d0200,
211 0x00000000,
212 0x00600001,
213 0x20a003be,
214 0x008d0220,
215 0x00000000,
216 0x00600001,
217 0x20c003be,
218 0x008d0240,
219 0x00000000,
220 0x00600001,
221 0x20e003be,
222 0x008d0260,
223 0x00000000,
224 0x00600001,
225 0x210003be,
226 0x008d0280,
227 0x00000000,
228 0x00600001,
229 0x212003be,
230 0x008d02a0,
231 0x00000000,
232 0x05800031,
233 0x24001cc8,
234 0x00000040,
235 0x90019000,
236 0x0000007e,
237 0x00000000,
238 0x00000000,
239 0x00000000,
240 0x0000007e,
241 0x00000000,
242 0x00000000,
243 0x00000000,
244 0x0000007e,
245 0x00000000,
246 0x00000000,
247 0x00000000,
248 0x0000007e,
249 0x00000000,
250 0x00000000,
251 0x00000000,
252 0x0000007e,
253 0x00000000,
254 0x00000000,
255 0x00000000,
256 0x0000007e,
257 0x00000000,
258 0x00000000,
259 0x00000000,
260 0x0000007e,
261 0x00000000,
262 0x00000000,
263 0x00000000,
264 0x0000007e,
265 0x00000000,
266 0x00000000,
267 0x00000000,
268 0x30000000,
269 0x00000124,
270 0x00000000,
271 0x00000000,
272 0x00000000,
273 0x00000000,
274 0x00000000,
275 0x00000000,
276 0xf99a130c,
277 0x799a130c,
278 0x00000000,
279 0x00000000,
280 0x00000000,
281 0x00000000,
282 0x00000000,
283 0x00000000,
284 0x80000031,
285 0x00000003,
286 0x00000000, /* state end */
287};
288
289RO_RENDERSTATE(6);
diff --git a/drivers/gpu/drm/i915/intel_renderstate_gen7.c b/drivers/gpu/drm/i915/intel_renderstate_gen7.c
new file mode 100644
index 000000000000..6fa7ff2a1298
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_renderstate_gen7.c
@@ -0,0 +1,253 @@
1#include "intel_renderstate.h"
2
3static const u32 gen7_null_state_relocs[] = {
4 0x0000000c,
5 0x00000010,
6 0x00000018,
7 0x000001ec,
8};
9
10static const u32 gen7_null_state_batch[] = {
11 0x69040000,
12 0x61010008,
13 0x00000000,
14 0x00000001, /* reloc */
15 0x00000001, /* reloc */
16 0x00000000,
17 0x00000001, /* reloc */
18 0x00000000,
19 0x00000001,
20 0x00000000,
21 0x00000001,
22 0x790d0002,
23 0x00000000,
24 0x00000000,
25 0x00000000,
26 0x78180000,
27 0x00000001,
28 0x79160000,
29 0x00000008,
30 0x78300000,
31 0x02010040,
32 0x78310000,
33 0x04000000,
34 0x78320000,
35 0x04000000,
36 0x78330000,
37 0x02000000,
38 0x78100004,
39 0x00000000,
40 0x00000000,
41 0x00000000,
42 0x00000000,
43 0x00000000,
44 0x781b0005,
45 0x00000000,
46 0x00000000,
47 0x00000000,
48 0x00000000,
49 0x00000000,
50 0x00000000,
51 0x781c0002,
52 0x00000000,
53 0x00000000,
54 0x00000000,
55 0x781d0004,
56 0x00000000,
57 0x00000000,
58 0x00000000,
59 0x00000000,
60 0x00000000,
61 0x78110005,
62 0x00000000,
63 0x00000000,
64 0x00000000,
65 0x00000000,
66 0x00000000,
67 0x00000000,
68 0x78120002,
69 0x00000000,
70 0x00000000,
71 0x00000000,
72 0x78210000,
73 0x00000000,
74 0x78130005,
75 0x00000000,
76 0x20000000,
77 0x04000000,
78 0x00000000,
79 0x00000000,
80 0x00000000,
81 0x78140001,
82 0x20000800,
83 0x00000000,
84 0x781e0001,
85 0x00000000,
86 0x00000000,
87 0x78050005,
88 0xe0040000,
89 0x00000000,
90 0x00000000,
91 0x00000000,
92 0x00000000,
93 0x00000000,
94 0x78040001,
95 0x00000000,
96 0x00000000,
97 0x78240000,
98 0x00000240,
99 0x78230000,
100 0x00000260,
101 0x782f0000,
102 0x00000280,
103 0x781f000c,
104 0x00400810,
105 0x00000000,
106 0x00000000,
107 0x00000000,
108 0x00000000,
109 0x00000000,
110 0x00000000,
111 0x00000000,
112 0x00000000,
113 0x00000000,
114 0x00000000,
115 0x00000000,
116 0x00000000,
117 0x78200006,
118 0x000002c0,
119 0x08080000,
120 0x00000000,
121 0x28000402,
122 0x00060000,
123 0x00000000,
124 0x00000000,
125 0x78090005,
126 0x02000000,
127 0x22220000,
128 0x02f60000,
129 0x11230000,
130 0x02f60004,
131 0x11230000,
132 0x78080003,
133 0x00006008,
134 0x00000340, /* reloc */
135 0xffffffff,
136 0x00000000,
137 0x782a0000,
138 0x00000360,
139 0x79000002,
140 0xffffffff,
141 0x00000000,
142 0x00000000,
143 0x7b000005,
144 0x0000000f,
145 0x00000003,
146 0x00000000,
147 0x00000001,
148 0x00000000,
149 0x00000000,
150 0x05000000, /* cmds end */
151 0x00000000,
152 0x00000000,
153 0x00000000,
154 0x00000000,
155 0x00000031, /* state start */
156 0x00000003,
157 0x00000000,
158 0x00000000,
159 0x00000000,
160 0x00000000,
161 0x00000000,
162 0x00000000,
163 0xf99a130c,
164 0x799a130c,
165 0x00000000,
166 0x00000000,
167 0x00000000,
168 0x00000000,
169 0x00000000,
170 0x00000000,
171 0x00000000,
172 0x00000000,
173 0x00000000,
174 0x00000492,
175 0x00000000,
176 0x00000000,
177 0x00000000,
178 0x00000000,
179 0x00000000,
180 0x00000000,
181 0x00000000,
182 0x00000000,
183 0x00000000,
184 0x00000000,
185 0x00000000,
186 0x00000000,
187 0x0080005a,
188 0x2e2077bd,
189 0x000000c0,
190 0x008d0040,
191 0x0080005a,
192 0x2e6077bd,
193 0x000000d0,
194 0x008d0040,
195 0x02800031,
196 0x21801fa9,
197 0x008d0e20,
198 0x08840001,
199 0x00800001,
200 0x2e2003bd,
201 0x008d0180,
202 0x00000000,
203 0x00800001,
204 0x2e6003bd,
205 0x008d01c0,
206 0x00000000,
207 0x00800001,
208 0x2ea003bd,
209 0x008d0200,
210 0x00000000,
211 0x00800001,
212 0x2ee003bd,
213 0x008d0240,
214 0x00000000,
215 0x05800031,
216 0x20001fa8,
217 0x008d0e20,
218 0x90031000,
219 0x00000000,
220 0x00000000,
221 0x00000000,
222 0x00000000,
223 0x00000000,
224 0x00000000,
225 0x00000000,
226 0x00000000,
227 0x00000380,
228 0x000003a0,
229 0x00000000,
230 0x00000000,
231 0x00000000,
232 0x00000000,
233 0x00000000,
234 0x00000000,
235 0x00000000,
236 0x00000000,
237 0x00000000,
238 0x00000000,
239 0x00000000,
240 0x00000000,
241 0x00000000,
242 0x00000000,
243 0x00000000,
244 0x00000000,
245 0x00000000,
246 0x00000000,
247 0x00000000,
248 0x00000000,
249 0x00000000,
250 0x00000000, /* state end */
251};
252
253RO_RENDERSTATE(7);
diff --git a/drivers/gpu/drm/i915/intel_renderstate_gen8.c b/drivers/gpu/drm/i915/intel_renderstate_gen8.c
new file mode 100644
index 000000000000..5c875615d42a
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_renderstate_gen8.c
@@ -0,0 +1,479 @@
1#include "intel_renderstate.h"
2
3static const u32 gen8_null_state_relocs[] = {
4 0x00000048,
5 0x00000050,
6 0x00000060,
7 0x000003ec,
8};
9
10static const u32 gen8_null_state_batch[] = {
11 0x69040000,
12 0x61020001,
13 0x00000000,
14 0x00000000,
15 0x79120000,
16 0x00000000,
17 0x79130000,
18 0x00000000,
19 0x79140000,
20 0x00000000,
21 0x79150000,
22 0x00000000,
23 0x79160000,
24 0x00000000,
25 0x6101000e,
26 0x00000001,
27 0x00000000,
28 0x00000001,
29 0x00000001, /* reloc */
30 0x00000000,
31 0x00000001, /* reloc */
32 0x00000000,
33 0x00000000,
34 0x00000000,
35 0x00000001, /* reloc */
36 0x00000000,
37 0xfffff001,
38 0x00001001,
39 0xfffff001,
40 0x00001001,
41 0x78230000,
42 0x000006e0,
43 0x78210000,
44 0x00000700,
45 0x78300000,
46 0x08010040,
47 0x78330000,
48 0x08000000,
49 0x78310000,
50 0x08000000,
51 0x78320000,
52 0x08000000,
53 0x78240000,
54 0x00000641,
55 0x780e0000,
56 0x00000601,
57 0x780d0000,
58 0x00000000,
59 0x78180000,
60 0x00000001,
61 0x78520003,
62 0x00000000,
63 0x00000000,
64 0x00000000,
65 0x00000000,
66 0x78190009,
67 0x00000000,
68 0x00000000,
69 0x00000000,
70 0x00000000,
71 0x00000000,
72 0x00000000,
73 0x00000000,
74 0x00000000,
75 0x00000000,
76 0x00000000,
77 0x781b0007,
78 0x00000000,
79 0x00000000,
80 0x00000000,
81 0x00000000,
82 0x00000000,
83 0x00000000,
84 0x00000000,
85 0x00000000,
86 0x78270000,
87 0x00000000,
88 0x782c0000,
89 0x00000000,
90 0x781c0002,
91 0x00000000,
92 0x00000000,
93 0x00000000,
94 0x78160009,
95 0x00000000,
96 0x00000000,
97 0x00000000,
98 0x00000000,
99 0x00000000,
100 0x00000000,
101 0x00000000,
102 0x00000000,
103 0x00000000,
104 0x00000000,
105 0x78110008,
106 0x00000000,
107 0x00000000,
108 0x00000000,
109 0x00000000,
110 0x00000000,
111 0x00000000,
112 0x00000000,
113 0x00000000,
114 0x00000000,
115 0x78290000,
116 0x00000000,
117 0x782e0000,
118 0x00000000,
119 0x781a0009,
120 0x00000000,
121 0x00000000,
122 0x00000000,
123 0x00000000,
124 0x00000000,
125 0x00000000,
126 0x00000000,
127 0x00000000,
128 0x00000000,
129 0x00000000,
130 0x781d0007,
131 0x00000000,
132 0x00000000,
133 0x00000000,
134 0x00000000,
135 0x00000000,
136 0x00000000,
137 0x00000000,
138 0x00000000,
139 0x78280000,
140 0x00000000,
141 0x782d0000,
142 0x00000000,
143 0x78260000,
144 0x00000000,
145 0x782b0000,
146 0x00000000,
147 0x78150009,
148 0x00000000,
149 0x00000000,
150 0x00000000,
151 0x00000000,
152 0x00000000,
153 0x00000000,
154 0x00000000,
155 0x00000000,
156 0x00000000,
157 0x00000000,
158 0x78100007,
159 0x00000000,
160 0x00000000,
161 0x00000000,
162 0x00000000,
163 0x00000000,
164 0x00000000,
165 0x00000000,
166 0x00000000,
167 0x781e0003,
168 0x00000000,
169 0x00000000,
170 0x00000000,
171 0x00000000,
172 0x78120002,
173 0x00000000,
174 0x00000000,
175 0x00000000,
176 0x781f0002,
177 0x30400820,
178 0x00000000,
179 0x00000000,
180 0x78510009,
181 0x00000000,
182 0x00000000,
183 0x00000000,
184 0x00000000,
185 0x00000000,
186 0x00000000,
187 0x00000000,
188 0x00000000,
189 0x00000000,
190 0x00000000,
191 0x78500003,
192 0x00210000,
193 0x00000000,
194 0x00000000,
195 0x00000000,
196 0x78130002,
197 0x00000000,
198 0x00000000,
199 0x00000000,
200 0x782a0000,
201 0x00000480,
202 0x782f0000,
203 0x00000540,
204 0x78140000,
205 0x00000800,
206 0x78170009,
207 0x00000000,
208 0x00000000,
209 0x00000000,
210 0x00000000,
211 0x00000000,
212 0x00000000,
213 0x00000000,
214 0x00000000,
215 0x00000000,
216 0x00000000,
217 0x7820000a,
218 0x00000580,
219 0x00000000,
220 0x08080000,
221 0x00000000,
222 0x00000000,
223 0x1f000002,
224 0x00060000,
225 0x00000000,
226 0x00000000,
227 0x00000000,
228 0x00000000,
229 0x784d0000,
230 0x40000000,
231 0x784f0000,
232 0x80000100,
233 0x780f0000,
234 0x00000740,
235 0x78050006,
236 0x00000000,
237 0x00000000,
238 0x00000000,
239 0x00000000,
240 0x00000000,
241 0x00000000,
242 0x00000000,
243 0x78070003,
244 0x00000000,
245 0x00000000,
246 0x00000000,
247 0x00000000,
248 0x78060003,
249 0x00000000,
250 0x00000000,
251 0x00000000,
252 0x00000000,
253 0x78040001,
254 0x00000000,
255 0x00000001,
256 0x79000002,
257 0xffffffff,
258 0x00000000,
259 0x00000000,
260 0x78080003,
261 0x00006000,
262 0x000005e0, /* reloc */
263 0x00000000,
264 0x00000000,
265 0x78090005,
266 0x02000000,
267 0x22220000,
268 0x02f60000,
269 0x11230000,
270 0x02850004,
271 0x11230000,
272 0x784b0000,
273 0x0000000f,
274 0x78490001,
275 0x00000000,
276 0x00000000,
277 0x7b000005,
278 0x00000000,
279 0x00000003,
280 0x00000000,
281 0x00000001,
282 0x00000000,
283 0x00000000,
284 0x05000000, /* cmds end */
285 0x00000000,
286 0x00000000,
287 0x00000000,
288 0x00000000,
289 0x00000000,
290 0x00000000,
291 0x00000000,
292 0x00000000,
293 0x00000000,
294 0x00000000,
295 0x00000000,
296 0x00000000,
297 0x00000000,
298 0x00000000,
299 0x000004c0, /* state start */
300 0x00000500,
301 0x00000000,
302 0x00000000,
303 0x00000000,
304 0x00000000,
305 0x00000000,
306 0x00000000,
307 0x00000000,
308 0x00000000,
309 0x00000000,
310 0x00000000,
311 0x00000000,
312 0x00000000,
313 0x00000000,
314 0x00000000,
315 0x00000000,
316 0x00000000,
317 0x00000000,
318 0x00000000,
319 0x00000000,
320 0x00000000,
321 0x00000000,
322 0x00000000,
323 0x00000000,
324 0x00000000,
325 0x00000000,
326 0x00000000,
327 0x00000000,
328 0x00000000,
329 0x00000000,
330 0x00000000,
331 0x00000000,
332 0x00000000,
333 0x00000000,
334 0x00000000,
335 0x00000000,
336 0x00000000,
337 0x00000000,
338 0x00000000,
339 0x00000000,
340 0x00000000,
341 0x00000000,
342 0x00000000,
343 0x00000000,
344 0x00000000,
345 0x00000000,
346 0x00000000,
347 0x00000000,
348 0x00000000,
349 0x00000000,
350 0x00000092,
351 0x00000000,
352 0x00000000,
353 0x00000000,
354 0x00000000,
355 0x00000000,
356 0x00000000,
357 0x00000000,
358 0x00000000,
359 0x00000000,
360 0x00000000,
361 0x00000000,
362 0x00000000,
363 0x0060005a,
364 0x21403ae8,
365 0x3a0000c0,
366 0x008d0040,
367 0x0060005a,
368 0x21603ae8,
369 0x3a0000c0,
370 0x008d0080,
371 0x0060005a,
372 0x21803ae8,
373 0x3a0000d0,
374 0x008d0040,
375 0x0060005a,
376 0x21a03ae8,
377 0x3a0000d0,
378 0x008d0080,
379 0x02800031,
380 0x2e0022e8,
381 0x0e000140,
382 0x08840001,
383 0x05800031,
384 0x200022e0,
385 0x0e000e00,
386 0x90031000,
387 0x00000000,
388 0x00000000,
389 0x00000000,
390 0x00000000,
391 0x00000000,
392 0x00000000,
393 0x00000000,
394 0x00000000,
395 0x00000000,
396 0x00000000,
397 0x00000000,
398 0x00000000,
399 0x00000000,
400 0x00000000,
401 0x00000000,
402 0x00000000,
403 0x00000000,
404 0x00000000,
405 0x00000000,
406 0x00000000,
407 0x00000000,
408 0x00000000,
409 0x00000000,
410 0x00000000,
411 0x00000000,
412 0x06200000,
413 0x00000002,
414 0x06200000,
415 0x00000002,
416 0x06200000,
417 0x00000002,
418 0x06200000,
419 0x00000002,
420 0x06200000,
421 0x00000002,
422 0x06200000,
423 0x00000002,
424 0x06200000,
425 0x00000002,
426 0x06200000,
427 0x00000002,
428 0x06200000,
429 0x00000002,
430 0x06200000,
431 0x00000002,
432 0x06200000,
433 0x00000002,
434 0x06200000,
435 0x00000002,
436 0x06200000,
437 0x00000002,
438 0x06200000,
439 0x00000002,
440 0x06200000,
441 0x00000002,
442 0x06200000,
443 0x00000002,
444 0x00000000,
445 0x00000000,
446 0x00000000,
447 0x00000000,
448 0x00000000,
449 0x00000000,
450 0x00000000,
451 0xf99a130c,
452 0x799a130c,
453 0x00000000,
454 0x00000000,
455 0x00000000,
456 0x00000000,
457 0x00000000,
458 0x00000000,
459 0x00000000,
460 0x00000000,
461 0x00000000,
462 0x00000000,
463 0x00000000,
464 0x00000000,
465 0x00000000,
466 0x00000000,
467 0x00000000,
468 0x3f800000,
469 0x00000000,
470 0x3f800000,
471 0x00000000,
472 0x00000000,
473 0x00000000,
474 0x00000000,
475 0x00000000,
476 0x00000000, /* state end */
477};
478
479RO_RENDERSTATE(8);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 79fb4cc2137c..279488addf3f 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -33,26 +33,44 @@
33#include "i915_trace.h" 33#include "i915_trace.h"
34#include "intel_drv.h" 34#include "intel_drv.h"
35 35
36static inline int ring_space(struct intel_ring_buffer *ring) 36/* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill,
37 * but keeps the logic simple. Indeed, the whole purpose of this macro is just
38 * to give some inclination as to some of the magic values used in the various
39 * workarounds!
40 */
41#define CACHELINE_BYTES 64
42
43static inline int __ring_space(int head, int tail, int size)
37{ 44{
38 int space = (ring->head & HEAD_ADDR) - (ring->tail + I915_RING_FREE_SPACE); 45 int space = head - (tail + I915_RING_FREE_SPACE);
39 if (space < 0) 46 if (space < 0)
40 space += ring->size; 47 space += size;
41 return space; 48 return space;
42} 49}
43 50
44void __intel_ring_advance(struct intel_ring_buffer *ring) 51static inline int ring_space(struct intel_engine_cs *ring)
52{
53 struct intel_ringbuffer *ringbuf = ring->buffer;
54 return __ring_space(ringbuf->head & HEAD_ADDR, ringbuf->tail, ringbuf->size);
55}
56
57static bool intel_ring_stopped(struct intel_engine_cs *ring)
45{ 58{
46 struct drm_i915_private *dev_priv = ring->dev->dev_private; 59 struct drm_i915_private *dev_priv = ring->dev->dev_private;
60 return dev_priv->gpu_error.stop_rings & intel_ring_flag(ring);
61}
47 62
48 ring->tail &= ring->size - 1; 63void __intel_ring_advance(struct intel_engine_cs *ring)
49 if (dev_priv->gpu_error.stop_rings & intel_ring_flag(ring)) 64{
65 struct intel_ringbuffer *ringbuf = ring->buffer;
66 ringbuf->tail &= ringbuf->size - 1;
67 if (intel_ring_stopped(ring))
50 return; 68 return;
51 ring->write_tail(ring, ring->tail); 69 ring->write_tail(ring, ringbuf->tail);
52} 70}
53 71
54static int 72static int
55gen2_render_ring_flush(struct intel_ring_buffer *ring, 73gen2_render_ring_flush(struct intel_engine_cs *ring,
56 u32 invalidate_domains, 74 u32 invalidate_domains,
57 u32 flush_domains) 75 u32 flush_domains)
58{ 76{
@@ -78,7 +96,7 @@ gen2_render_ring_flush(struct intel_ring_buffer *ring,
78} 96}
79 97
80static int 98static int
81gen4_render_ring_flush(struct intel_ring_buffer *ring, 99gen4_render_ring_flush(struct intel_engine_cs *ring,
82 u32 invalidate_domains, 100 u32 invalidate_domains,
83 u32 flush_domains) 101 u32 flush_domains)
84{ 102{
@@ -173,9 +191,9 @@ gen4_render_ring_flush(struct intel_ring_buffer *ring,
173 * really our business. That leaves only stall at scoreboard. 191 * really our business. That leaves only stall at scoreboard.
174 */ 192 */
175static int 193static int
176intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *ring) 194intel_emit_post_sync_nonzero_flush(struct intel_engine_cs *ring)
177{ 195{
178 u32 scratch_addr = ring->scratch.gtt_offset + 128; 196 u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
179 int ret; 197 int ret;
180 198
181 199
@@ -208,11 +226,11 @@ intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *ring)
208} 226}
209 227
210static int 228static int
211gen6_render_ring_flush(struct intel_ring_buffer *ring, 229gen6_render_ring_flush(struct intel_engine_cs *ring,
212 u32 invalidate_domains, u32 flush_domains) 230 u32 invalidate_domains, u32 flush_domains)
213{ 231{
214 u32 flags = 0; 232 u32 flags = 0;
215 u32 scratch_addr = ring->scratch.gtt_offset + 128; 233 u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
216 int ret; 234 int ret;
217 235
218 /* Force SNB workarounds for PIPE_CONTROL flushes */ 236 /* Force SNB workarounds for PIPE_CONTROL flushes */
@@ -260,7 +278,7 @@ gen6_render_ring_flush(struct intel_ring_buffer *ring,
260} 278}
261 279
262static int 280static int
263gen7_render_ring_cs_stall_wa(struct intel_ring_buffer *ring) 281gen7_render_ring_cs_stall_wa(struct intel_engine_cs *ring)
264{ 282{
265 int ret; 283 int ret;
266 284
@@ -278,7 +296,7 @@ gen7_render_ring_cs_stall_wa(struct intel_ring_buffer *ring)
278 return 0; 296 return 0;
279} 297}
280 298
281static int gen7_ring_fbc_flush(struct intel_ring_buffer *ring, u32 value) 299static int gen7_ring_fbc_flush(struct intel_engine_cs *ring, u32 value)
282{ 300{
283 int ret; 301 int ret;
284 302
@@ -302,11 +320,11 @@ static int gen7_ring_fbc_flush(struct intel_ring_buffer *ring, u32 value)
302} 320}
303 321
304static int 322static int
305gen7_render_ring_flush(struct intel_ring_buffer *ring, 323gen7_render_ring_flush(struct intel_engine_cs *ring,
306 u32 invalidate_domains, u32 flush_domains) 324 u32 invalidate_domains, u32 flush_domains)
307{ 325{
308 u32 flags = 0; 326 u32 flags = 0;
309 u32 scratch_addr = ring->scratch.gtt_offset + 128; 327 u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
310 int ret; 328 int ret;
311 329
312 /* 330 /*
@@ -363,11 +381,11 @@ gen7_render_ring_flush(struct intel_ring_buffer *ring,
363} 381}
364 382
365static int 383static int
366gen8_render_ring_flush(struct intel_ring_buffer *ring, 384gen8_render_ring_flush(struct intel_engine_cs *ring,
367 u32 invalidate_domains, u32 flush_domains) 385 u32 invalidate_domains, u32 flush_domains)
368{ 386{
369 u32 flags = 0; 387 u32 flags = 0;
370 u32 scratch_addr = ring->scratch.gtt_offset + 128; 388 u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
371 int ret; 389 int ret;
372 390
373 flags |= PIPE_CONTROL_CS_STALL; 391 flags |= PIPE_CONTROL_CS_STALL;
@@ -403,14 +421,14 @@ gen8_render_ring_flush(struct intel_ring_buffer *ring,
403 421
404} 422}
405 423
406static void ring_write_tail(struct intel_ring_buffer *ring, 424static void ring_write_tail(struct intel_engine_cs *ring,
407 u32 value) 425 u32 value)
408{ 426{
409 struct drm_i915_private *dev_priv = ring->dev->dev_private; 427 struct drm_i915_private *dev_priv = ring->dev->dev_private;
410 I915_WRITE_TAIL(ring, value); 428 I915_WRITE_TAIL(ring, value);
411} 429}
412 430
413u64 intel_ring_get_active_head(struct intel_ring_buffer *ring) 431u64 intel_ring_get_active_head(struct intel_engine_cs *ring)
414{ 432{
415 struct drm_i915_private *dev_priv = ring->dev->dev_private; 433 struct drm_i915_private *dev_priv = ring->dev->dev_private;
416 u64 acthd; 434 u64 acthd;
@@ -426,7 +444,7 @@ u64 intel_ring_get_active_head(struct intel_ring_buffer *ring)
426 return acthd; 444 return acthd;
427} 445}
428 446
429static void ring_setup_phys_status_page(struct intel_ring_buffer *ring) 447static void ring_setup_phys_status_page(struct intel_engine_cs *ring)
430{ 448{
431 struct drm_i915_private *dev_priv = ring->dev->dev_private; 449 struct drm_i915_private *dev_priv = ring->dev->dev_private;
432 u32 addr; 450 u32 addr;
@@ -437,7 +455,7 @@ static void ring_setup_phys_status_page(struct intel_ring_buffer *ring)
437 I915_WRITE(HWS_PGA, addr); 455 I915_WRITE(HWS_PGA, addr);
438} 456}
439 457
440static bool stop_ring(struct intel_ring_buffer *ring) 458static bool stop_ring(struct intel_engine_cs *ring)
441{ 459{
442 struct drm_i915_private *dev_priv = to_i915(ring->dev); 460 struct drm_i915_private *dev_priv = to_i915(ring->dev);
443 461
@@ -461,11 +479,12 @@ static bool stop_ring(struct intel_ring_buffer *ring)
461 return (I915_READ_HEAD(ring) & HEAD_ADDR) == 0; 479 return (I915_READ_HEAD(ring) & HEAD_ADDR) == 0;
462} 480}
463 481
464static int init_ring_common(struct intel_ring_buffer *ring) 482static int init_ring_common(struct intel_engine_cs *ring)
465{ 483{
466 struct drm_device *dev = ring->dev; 484 struct drm_device *dev = ring->dev;
467 struct drm_i915_private *dev_priv = dev->dev_private; 485 struct drm_i915_private *dev_priv = dev->dev_private;
468 struct drm_i915_gem_object *obj = ring->obj; 486 struct intel_ringbuffer *ringbuf = ring->buffer;
487 struct drm_i915_gem_object *obj = ringbuf->obj;
469 int ret = 0; 488 int ret = 0;
470 489
471 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL); 490 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
@@ -504,7 +523,7 @@ static int init_ring_common(struct intel_ring_buffer *ring)
504 * register values. */ 523 * register values. */
505 I915_WRITE_START(ring, i915_gem_obj_ggtt_offset(obj)); 524 I915_WRITE_START(ring, i915_gem_obj_ggtt_offset(obj));
506 I915_WRITE_CTL(ring, 525 I915_WRITE_CTL(ring,
507 ((ring->size - PAGE_SIZE) & RING_NR_PAGES) 526 ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES)
508 | RING_VALID); 527 | RING_VALID);
509 528
510 /* If the head is still not zero, the ring is dead */ 529 /* If the head is still not zero, the ring is dead */
@@ -512,12 +531,11 @@ static int init_ring_common(struct intel_ring_buffer *ring)
512 I915_READ_START(ring) == i915_gem_obj_ggtt_offset(obj) && 531 I915_READ_START(ring) == i915_gem_obj_ggtt_offset(obj) &&
513 (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) { 532 (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) {
514 DRM_ERROR("%s initialization failed " 533 DRM_ERROR("%s initialization failed "
515 "ctl %08x head %08x tail %08x start %08x\n", 534 "ctl %08x (valid? %d) head %08x tail %08x start %08x [expected %08lx]\n",
516 ring->name, 535 ring->name,
517 I915_READ_CTL(ring), 536 I915_READ_CTL(ring), I915_READ_CTL(ring) & RING_VALID,
518 I915_READ_HEAD(ring), 537 I915_READ_HEAD(ring), I915_READ_TAIL(ring),
519 I915_READ_TAIL(ring), 538 I915_READ_START(ring), (unsigned long)i915_gem_obj_ggtt_offset(obj));
520 I915_READ_START(ring));
521 ret = -EIO; 539 ret = -EIO;
522 goto out; 540 goto out;
523 } 541 }
@@ -525,10 +543,10 @@ static int init_ring_common(struct intel_ring_buffer *ring)
525 if (!drm_core_check_feature(ring->dev, DRIVER_MODESET)) 543 if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
526 i915_kernel_lost_context(ring->dev); 544 i915_kernel_lost_context(ring->dev);
527 else { 545 else {
528 ring->head = I915_READ_HEAD(ring); 546 ringbuf->head = I915_READ_HEAD(ring);
529 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; 547 ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
530 ring->space = ring_space(ring); 548 ringbuf->space = ring_space(ring);
531 ring->last_retired_head = -1; 549 ringbuf->last_retired_head = -1;
532 } 550 }
533 551
534 memset(&ring->hangcheck, 0, sizeof(ring->hangcheck)); 552 memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
@@ -540,7 +558,7 @@ out:
540} 558}
541 559
542static int 560static int
543init_pipe_control(struct intel_ring_buffer *ring) 561init_pipe_control(struct intel_engine_cs *ring)
544{ 562{
545 int ret; 563 int ret;
546 564
@@ -581,7 +599,7 @@ err:
581 return ret; 599 return ret;
582} 600}
583 601
584static int init_render_ring(struct intel_ring_buffer *ring) 602static int init_render_ring(struct intel_engine_cs *ring)
585{ 603{
586 struct drm_device *dev = ring->dev; 604 struct drm_device *dev = ring->dev;
587 struct drm_i915_private *dev_priv = dev->dev_private; 605 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -595,19 +613,21 @@ static int init_render_ring(struct intel_ring_buffer *ring)
595 * to use MI_WAIT_FOR_EVENT within the CS. It should already be 613 * to use MI_WAIT_FOR_EVENT within the CS. It should already be
596 * programmed to '1' on all products. 614 * programmed to '1' on all products.
597 * 615 *
598 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw 616 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw,chv
599 */ 617 */
600 if (INTEL_INFO(dev)->gen >= 6) 618 if (INTEL_INFO(dev)->gen >= 6)
601 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE)); 619 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
602 620
603 /* Required for the hardware to program scanline values for waiting */ 621 /* Required for the hardware to program scanline values for waiting */
622 /* WaEnableFlushTlbInvalidationMode:snb */
604 if (INTEL_INFO(dev)->gen == 6) 623 if (INTEL_INFO(dev)->gen == 6)
605 I915_WRITE(GFX_MODE, 624 I915_WRITE(GFX_MODE,
606 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_ALWAYS)); 625 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT));
607 626
627 /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
608 if (IS_GEN7(dev)) 628 if (IS_GEN7(dev))
609 I915_WRITE(GFX_MODE_GEN7, 629 I915_WRITE(GFX_MODE_GEN7,
610 _MASKED_BIT_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) | 630 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) |
611 _MASKED_BIT_ENABLE(GFX_REPLAY_MODE)); 631 _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
612 632
613 if (INTEL_INFO(dev)->gen >= 5) { 633 if (INTEL_INFO(dev)->gen >= 5) {
@@ -624,13 +644,6 @@ static int init_render_ring(struct intel_ring_buffer *ring)
624 */ 644 */
625 I915_WRITE(CACHE_MODE_0, 645 I915_WRITE(CACHE_MODE_0,
626 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB)); 646 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
627
628 /* This is not explicitly set for GEN6, so read the register.
629 * see intel_ring_mi_set_context() for why we care.
630 * TODO: consider explicitly setting the bit for GEN5
631 */
632 ring->itlb_before_ctx_switch =
633 !!(I915_READ(GFX_MODE) & GFX_TLB_INVALIDATE_ALWAYS);
634 } 647 }
635 648
636 if (INTEL_INFO(dev)->gen >= 6) 649 if (INTEL_INFO(dev)->gen >= 6)
@@ -642,7 +655,7 @@ static int init_render_ring(struct intel_ring_buffer *ring)
642 return ret; 655 return ret;
643} 656}
644 657
645static void render_ring_cleanup(struct intel_ring_buffer *ring) 658static void render_ring_cleanup(struct intel_engine_cs *ring)
646{ 659{
647 struct drm_device *dev = ring->dev; 660 struct drm_device *dev = ring->dev;
648 661
@@ -658,20 +671,46 @@ static void render_ring_cleanup(struct intel_ring_buffer *ring)
658 ring->scratch.obj = NULL; 671 ring->scratch.obj = NULL;
659} 672}
660 673
661static void 674static int gen6_signal(struct intel_engine_cs *signaller,
662update_mboxes(struct intel_ring_buffer *ring, 675 unsigned int num_dwords)
663 u32 mmio_offset)
664{ 676{
665/* NB: In order to be able to do semaphore MBOX updates for varying number 677 struct drm_device *dev = signaller->dev;
666 * of rings, it's easiest if we round up each individual update to a 678 struct drm_i915_private *dev_priv = dev->dev_private;
667 * multiple of 2 (since ring updates must always be a multiple of 2) 679 struct intel_engine_cs *useless;
668 * even though the actual update only requires 3 dwords. 680 int i, ret;
669 */ 681
682 /* NB: In order to be able to do semaphore MBOX updates for varying
683 * number of rings, it's easiest if we round up each individual update
684 * to a multiple of 2 (since ring updates must always be a multiple of
685 * 2) even though the actual update only requires 3 dwords.
686 */
670#define MBOX_UPDATE_DWORDS 4 687#define MBOX_UPDATE_DWORDS 4
671 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); 688 if (i915_semaphore_is_enabled(dev))
672 intel_ring_emit(ring, mmio_offset); 689 num_dwords += ((I915_NUM_RINGS-1) * MBOX_UPDATE_DWORDS);
673 intel_ring_emit(ring, ring->outstanding_lazy_seqno); 690 else
674 intel_ring_emit(ring, MI_NOOP); 691 return intel_ring_begin(signaller, num_dwords);
692
693 ret = intel_ring_begin(signaller, num_dwords);
694 if (ret)
695 return ret;
696#undef MBOX_UPDATE_DWORDS
697
698 for_each_ring(useless, dev_priv, i) {
699 u32 mbox_reg = signaller->semaphore.mbox.signal[i];
700 if (mbox_reg != GEN6_NOSYNC) {
701 intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1));
702 intel_ring_emit(signaller, mbox_reg);
703 intel_ring_emit(signaller, signaller->outstanding_lazy_seqno);
704 intel_ring_emit(signaller, MI_NOOP);
705 } else {
706 intel_ring_emit(signaller, MI_NOOP);
707 intel_ring_emit(signaller, MI_NOOP);
708 intel_ring_emit(signaller, MI_NOOP);
709 intel_ring_emit(signaller, MI_NOOP);
710 }
711 }
712
713 return 0;
675} 714}
676 715
677/** 716/**
@@ -684,29 +723,14 @@ update_mboxes(struct intel_ring_buffer *ring,
684 * This acts like a signal in the canonical semaphore. 723 * This acts like a signal in the canonical semaphore.
685 */ 724 */
686static int 725static int
687gen6_add_request(struct intel_ring_buffer *ring) 726gen6_add_request(struct intel_engine_cs *ring)
688{ 727{
689 struct drm_device *dev = ring->dev; 728 int ret;
690 struct drm_i915_private *dev_priv = dev->dev_private;
691 struct intel_ring_buffer *useless;
692 int i, ret, num_dwords = 4;
693
694 if (i915_semaphore_is_enabled(dev))
695 num_dwords += ((I915_NUM_RINGS-1) * MBOX_UPDATE_DWORDS);
696#undef MBOX_UPDATE_DWORDS
697 729
698 ret = intel_ring_begin(ring, num_dwords); 730 ret = ring->semaphore.signal(ring, 4);
699 if (ret) 731 if (ret)
700 return ret; 732 return ret;
701 733
702 if (i915_semaphore_is_enabled(dev)) {
703 for_each_ring(useless, dev_priv, i) {
704 u32 mbox_reg = ring->signal_mbox[i];
705 if (mbox_reg != GEN6_NOSYNC)
706 update_mboxes(ring, mbox_reg);
707 }
708 }
709
710 intel_ring_emit(ring, MI_STORE_DWORD_INDEX); 734 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
711 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 735 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
712 intel_ring_emit(ring, ring->outstanding_lazy_seqno); 736 intel_ring_emit(ring, ring->outstanding_lazy_seqno);
@@ -731,14 +755,15 @@ static inline bool i915_gem_has_seqno_wrapped(struct drm_device *dev,
731 * @seqno - seqno which the waiter will block on 755 * @seqno - seqno which the waiter will block on
732 */ 756 */
733static int 757static int
734gen6_ring_sync(struct intel_ring_buffer *waiter, 758gen6_ring_sync(struct intel_engine_cs *waiter,
735 struct intel_ring_buffer *signaller, 759 struct intel_engine_cs *signaller,
736 u32 seqno) 760 u32 seqno)
737{ 761{
738 int ret;
739 u32 dw1 = MI_SEMAPHORE_MBOX | 762 u32 dw1 = MI_SEMAPHORE_MBOX |
740 MI_SEMAPHORE_COMPARE | 763 MI_SEMAPHORE_COMPARE |
741 MI_SEMAPHORE_REGISTER; 764 MI_SEMAPHORE_REGISTER;
765 u32 wait_mbox = signaller->semaphore.mbox.wait[waiter->id];
766 int ret;
742 767
743 /* Throughout all of the GEM code, seqno passed implies our current 768 /* Throughout all of the GEM code, seqno passed implies our current
744 * seqno is >= the last seqno executed. However for hardware the 769 * seqno is >= the last seqno executed. However for hardware the
@@ -746,8 +771,7 @@ gen6_ring_sync(struct intel_ring_buffer *waiter,
746 */ 771 */
747 seqno -= 1; 772 seqno -= 1;
748 773
749 WARN_ON(signaller->semaphore_register[waiter->id] == 774 WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID);
750 MI_SEMAPHORE_SYNC_INVALID);
751 775
752 ret = intel_ring_begin(waiter, 4); 776 ret = intel_ring_begin(waiter, 4);
753 if (ret) 777 if (ret)
@@ -755,9 +779,7 @@ gen6_ring_sync(struct intel_ring_buffer *waiter,
755 779
756 /* If seqno wrap happened, omit the wait with no-ops */ 780 /* If seqno wrap happened, omit the wait with no-ops */
757 if (likely(!i915_gem_has_seqno_wrapped(waiter->dev, seqno))) { 781 if (likely(!i915_gem_has_seqno_wrapped(waiter->dev, seqno))) {
758 intel_ring_emit(waiter, 782 intel_ring_emit(waiter, dw1 | wait_mbox);
759 dw1 |
760 signaller->semaphore_register[waiter->id]);
761 intel_ring_emit(waiter, seqno); 783 intel_ring_emit(waiter, seqno);
762 intel_ring_emit(waiter, 0); 784 intel_ring_emit(waiter, 0);
763 intel_ring_emit(waiter, MI_NOOP); 785 intel_ring_emit(waiter, MI_NOOP);
@@ -782,9 +804,9 @@ do { \
782} while (0) 804} while (0)
783 805
784static int 806static int
785pc_render_add_request(struct intel_ring_buffer *ring) 807pc_render_add_request(struct intel_engine_cs *ring)
786{ 808{
787 u32 scratch_addr = ring->scratch.gtt_offset + 128; 809 u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
788 int ret; 810 int ret;
789 811
790 /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently 812 /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
@@ -806,15 +828,15 @@ pc_render_add_request(struct intel_ring_buffer *ring)
806 intel_ring_emit(ring, ring->outstanding_lazy_seqno); 828 intel_ring_emit(ring, ring->outstanding_lazy_seqno);
807 intel_ring_emit(ring, 0); 829 intel_ring_emit(ring, 0);
808 PIPE_CONTROL_FLUSH(ring, scratch_addr); 830 PIPE_CONTROL_FLUSH(ring, scratch_addr);
809 scratch_addr += 128; /* write to separate cachelines */ 831 scratch_addr += 2 * CACHELINE_BYTES; /* write to separate cachelines */
810 PIPE_CONTROL_FLUSH(ring, scratch_addr); 832 PIPE_CONTROL_FLUSH(ring, scratch_addr);
811 scratch_addr += 128; 833 scratch_addr += 2 * CACHELINE_BYTES;
812 PIPE_CONTROL_FLUSH(ring, scratch_addr); 834 PIPE_CONTROL_FLUSH(ring, scratch_addr);
813 scratch_addr += 128; 835 scratch_addr += 2 * CACHELINE_BYTES;
814 PIPE_CONTROL_FLUSH(ring, scratch_addr); 836 PIPE_CONTROL_FLUSH(ring, scratch_addr);
815 scratch_addr += 128; 837 scratch_addr += 2 * CACHELINE_BYTES;
816 PIPE_CONTROL_FLUSH(ring, scratch_addr); 838 PIPE_CONTROL_FLUSH(ring, scratch_addr);
817 scratch_addr += 128; 839 scratch_addr += 2 * CACHELINE_BYTES;
818 PIPE_CONTROL_FLUSH(ring, scratch_addr); 840 PIPE_CONTROL_FLUSH(ring, scratch_addr);
819 841
820 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | 842 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
@@ -830,7 +852,7 @@ pc_render_add_request(struct intel_ring_buffer *ring)
830} 852}
831 853
832static u32 854static u32
833gen6_ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency) 855gen6_ring_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
834{ 856{
835 /* Workaround to force correct ordering between irq and seqno writes on 857 /* Workaround to force correct ordering between irq and seqno writes on
836 * ivb (and maybe also on snb) by reading from a CS register (like 858 * ivb (and maybe also on snb) by reading from a CS register (like
@@ -844,31 +866,31 @@ gen6_ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
844} 866}
845 867
846static u32 868static u32
847ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency) 869ring_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
848{ 870{
849 return intel_read_status_page(ring, I915_GEM_HWS_INDEX); 871 return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
850} 872}
851 873
852static void 874static void
853ring_set_seqno(struct intel_ring_buffer *ring, u32 seqno) 875ring_set_seqno(struct intel_engine_cs *ring, u32 seqno)
854{ 876{
855 intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno); 877 intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
856} 878}
857 879
858static u32 880static u32
859pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency) 881pc_render_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
860{ 882{
861 return ring->scratch.cpu_page[0]; 883 return ring->scratch.cpu_page[0];
862} 884}
863 885
864static void 886static void
865pc_render_set_seqno(struct intel_ring_buffer *ring, u32 seqno) 887pc_render_set_seqno(struct intel_engine_cs *ring, u32 seqno)
866{ 888{
867 ring->scratch.cpu_page[0] = seqno; 889 ring->scratch.cpu_page[0] = seqno;
868} 890}
869 891
870static bool 892static bool
871gen5_ring_get_irq(struct intel_ring_buffer *ring) 893gen5_ring_get_irq(struct intel_engine_cs *ring)
872{ 894{
873 struct drm_device *dev = ring->dev; 895 struct drm_device *dev = ring->dev;
874 struct drm_i915_private *dev_priv = dev->dev_private; 896 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -886,7 +908,7 @@ gen5_ring_get_irq(struct intel_ring_buffer *ring)
886} 908}
887 909
888static void 910static void
889gen5_ring_put_irq(struct intel_ring_buffer *ring) 911gen5_ring_put_irq(struct intel_engine_cs *ring)
890{ 912{
891 struct drm_device *dev = ring->dev; 913 struct drm_device *dev = ring->dev;
892 struct drm_i915_private *dev_priv = dev->dev_private; 914 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -899,7 +921,7 @@ gen5_ring_put_irq(struct intel_ring_buffer *ring)
899} 921}
900 922
901static bool 923static bool
902i9xx_ring_get_irq(struct intel_ring_buffer *ring) 924i9xx_ring_get_irq(struct intel_engine_cs *ring)
903{ 925{
904 struct drm_device *dev = ring->dev; 926 struct drm_device *dev = ring->dev;
905 struct drm_i915_private *dev_priv = dev->dev_private; 927 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -920,7 +942,7 @@ i9xx_ring_get_irq(struct intel_ring_buffer *ring)
920} 942}
921 943
922static void 944static void
923i9xx_ring_put_irq(struct intel_ring_buffer *ring) 945i9xx_ring_put_irq(struct intel_engine_cs *ring)
924{ 946{
925 struct drm_device *dev = ring->dev; 947 struct drm_device *dev = ring->dev;
926 struct drm_i915_private *dev_priv = dev->dev_private; 948 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -936,7 +958,7 @@ i9xx_ring_put_irq(struct intel_ring_buffer *ring)
936} 958}
937 959
938static bool 960static bool
939i8xx_ring_get_irq(struct intel_ring_buffer *ring) 961i8xx_ring_get_irq(struct intel_engine_cs *ring)
940{ 962{
941 struct drm_device *dev = ring->dev; 963 struct drm_device *dev = ring->dev;
942 struct drm_i915_private *dev_priv = dev->dev_private; 964 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -957,7 +979,7 @@ i8xx_ring_get_irq(struct intel_ring_buffer *ring)
957} 979}
958 980
959static void 981static void
960i8xx_ring_put_irq(struct intel_ring_buffer *ring) 982i8xx_ring_put_irq(struct intel_engine_cs *ring)
961{ 983{
962 struct drm_device *dev = ring->dev; 984 struct drm_device *dev = ring->dev;
963 struct drm_i915_private *dev_priv = dev->dev_private; 985 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -972,7 +994,7 @@ i8xx_ring_put_irq(struct intel_ring_buffer *ring)
972 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 994 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
973} 995}
974 996
975void intel_ring_setup_status_page(struct intel_ring_buffer *ring) 997void intel_ring_setup_status_page(struct intel_engine_cs *ring)
976{ 998{
977 struct drm_device *dev = ring->dev; 999 struct drm_device *dev = ring->dev;
978 struct drm_i915_private *dev_priv = ring->dev->dev_private; 1000 struct drm_i915_private *dev_priv = ring->dev->dev_private;
@@ -989,6 +1011,11 @@ void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
989 case BCS: 1011 case BCS:
990 mmio = BLT_HWS_PGA_GEN7; 1012 mmio = BLT_HWS_PGA_GEN7;
991 break; 1013 break;
1014 /*
1015 * VCS2 actually doesn't exist on Gen7. Only shut up
1016 * gcc switch check warning
1017 */
1018 case VCS2:
992 case VCS: 1019 case VCS:
993 mmio = BSD_HWS_PGA_GEN7; 1020 mmio = BSD_HWS_PGA_GEN7;
994 break; 1021 break;
@@ -1030,7 +1057,7 @@ void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
1030} 1057}
1031 1058
1032static int 1059static int
1033bsd_ring_flush(struct intel_ring_buffer *ring, 1060bsd_ring_flush(struct intel_engine_cs *ring,
1034 u32 invalidate_domains, 1061 u32 invalidate_domains,
1035 u32 flush_domains) 1062 u32 flush_domains)
1036{ 1063{
@@ -1047,7 +1074,7 @@ bsd_ring_flush(struct intel_ring_buffer *ring,
1047} 1074}
1048 1075
1049static int 1076static int
1050i9xx_add_request(struct intel_ring_buffer *ring) 1077i9xx_add_request(struct intel_engine_cs *ring)
1051{ 1078{
1052 int ret; 1079 int ret;
1053 1080
@@ -1065,7 +1092,7 @@ i9xx_add_request(struct intel_ring_buffer *ring)
1065} 1092}
1066 1093
1067static bool 1094static bool
1068gen6_ring_get_irq(struct intel_ring_buffer *ring) 1095gen6_ring_get_irq(struct intel_engine_cs *ring)
1069{ 1096{
1070 struct drm_device *dev = ring->dev; 1097 struct drm_device *dev = ring->dev;
1071 struct drm_i915_private *dev_priv = dev->dev_private; 1098 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1090,7 +1117,7 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring)
1090} 1117}
1091 1118
1092static void 1119static void
1093gen6_ring_put_irq(struct intel_ring_buffer *ring) 1120gen6_ring_put_irq(struct intel_engine_cs *ring)
1094{ 1121{
1095 struct drm_device *dev = ring->dev; 1122 struct drm_device *dev = ring->dev;
1096 struct drm_i915_private *dev_priv = dev->dev_private; 1123 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1108,7 +1135,7 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring)
1108} 1135}
1109 1136
1110static bool 1137static bool
1111hsw_vebox_get_irq(struct intel_ring_buffer *ring) 1138hsw_vebox_get_irq(struct intel_engine_cs *ring)
1112{ 1139{
1113 struct drm_device *dev = ring->dev; 1140 struct drm_device *dev = ring->dev;
1114 struct drm_i915_private *dev_priv = dev->dev_private; 1141 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1128,7 +1155,7 @@ hsw_vebox_get_irq(struct intel_ring_buffer *ring)
1128} 1155}
1129 1156
1130static void 1157static void
1131hsw_vebox_put_irq(struct intel_ring_buffer *ring) 1158hsw_vebox_put_irq(struct intel_engine_cs *ring)
1132{ 1159{
1133 struct drm_device *dev = ring->dev; 1160 struct drm_device *dev = ring->dev;
1134 struct drm_i915_private *dev_priv = dev->dev_private; 1161 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1146,7 +1173,7 @@ hsw_vebox_put_irq(struct intel_ring_buffer *ring)
1146} 1173}
1147 1174
1148static bool 1175static bool
1149gen8_ring_get_irq(struct intel_ring_buffer *ring) 1176gen8_ring_get_irq(struct intel_engine_cs *ring)
1150{ 1177{
1151 struct drm_device *dev = ring->dev; 1178 struct drm_device *dev = ring->dev;
1152 struct drm_i915_private *dev_priv = dev->dev_private; 1179 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1172,7 +1199,7 @@ gen8_ring_get_irq(struct intel_ring_buffer *ring)
1172} 1199}
1173 1200
1174static void 1201static void
1175gen8_ring_put_irq(struct intel_ring_buffer *ring) 1202gen8_ring_put_irq(struct intel_engine_cs *ring)
1176{ 1203{
1177 struct drm_device *dev = ring->dev; 1204 struct drm_device *dev = ring->dev;
1178 struct drm_i915_private *dev_priv = dev->dev_private; 1205 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1192,8 +1219,8 @@ gen8_ring_put_irq(struct intel_ring_buffer *ring)
1192} 1219}
1193 1220
1194static int 1221static int
1195i965_dispatch_execbuffer(struct intel_ring_buffer *ring, 1222i965_dispatch_execbuffer(struct intel_engine_cs *ring,
1196 u32 offset, u32 length, 1223 u64 offset, u32 length,
1197 unsigned flags) 1224 unsigned flags)
1198{ 1225{
1199 int ret; 1226 int ret;
@@ -1215,8 +1242,8 @@ i965_dispatch_execbuffer(struct intel_ring_buffer *ring,
1215/* Just userspace ABI convention to limit the wa batch bo to a resonable size */ 1242/* Just userspace ABI convention to limit the wa batch bo to a resonable size */
1216#define I830_BATCH_LIMIT (256*1024) 1243#define I830_BATCH_LIMIT (256*1024)
1217static int 1244static int
1218i830_dispatch_execbuffer(struct intel_ring_buffer *ring, 1245i830_dispatch_execbuffer(struct intel_engine_cs *ring,
1219 u32 offset, u32 len, 1246 u64 offset, u32 len,
1220 unsigned flags) 1247 unsigned flags)
1221{ 1248{
1222 int ret; 1249 int ret;
@@ -1266,8 +1293,8 @@ i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
1266} 1293}
1267 1294
1268static int 1295static int
1269i915_dispatch_execbuffer(struct intel_ring_buffer *ring, 1296i915_dispatch_execbuffer(struct intel_engine_cs *ring,
1270 u32 offset, u32 len, 1297 u64 offset, u32 len,
1271 unsigned flags) 1298 unsigned flags)
1272{ 1299{
1273 int ret; 1300 int ret;
@@ -1283,7 +1310,7 @@ i915_dispatch_execbuffer(struct intel_ring_buffer *ring,
1283 return 0; 1310 return 0;
1284} 1311}
1285 1312
1286static void cleanup_status_page(struct intel_ring_buffer *ring) 1313static void cleanup_status_page(struct intel_engine_cs *ring)
1287{ 1314{
1288 struct drm_i915_gem_object *obj; 1315 struct drm_i915_gem_object *obj;
1289 1316
@@ -1297,50 +1324,44 @@ static void cleanup_status_page(struct intel_ring_buffer *ring)
1297 ring->status_page.obj = NULL; 1324 ring->status_page.obj = NULL;
1298} 1325}
1299 1326
1300static int init_status_page(struct intel_ring_buffer *ring) 1327static int init_status_page(struct intel_engine_cs *ring)
1301{ 1328{
1302 struct drm_device *dev = ring->dev;
1303 struct drm_i915_gem_object *obj; 1329 struct drm_i915_gem_object *obj;
1304 int ret;
1305 1330
1306 obj = i915_gem_alloc_object(dev, 4096); 1331 if ((obj = ring->status_page.obj) == NULL) {
1307 if (obj == NULL) { 1332 int ret;
1308 DRM_ERROR("Failed to allocate status page\n");
1309 ret = -ENOMEM;
1310 goto err;
1311 }
1312 1333
1313 ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); 1334 obj = i915_gem_alloc_object(ring->dev, 4096);
1314 if (ret) 1335 if (obj == NULL) {
1315 goto err_unref; 1336 DRM_ERROR("Failed to allocate status page\n");
1337 return -ENOMEM;
1338 }
1316 1339
1317 ret = i915_gem_obj_ggtt_pin(obj, 4096, 0); 1340 ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
1318 if (ret) 1341 if (ret)
1319 goto err_unref; 1342 goto err_unref;
1343
1344 ret = i915_gem_obj_ggtt_pin(obj, 4096, 0);
1345 if (ret) {
1346err_unref:
1347 drm_gem_object_unreference(&obj->base);
1348 return ret;
1349 }
1350
1351 ring->status_page.obj = obj;
1352 }
1320 1353
1321 ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj); 1354 ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj);
1322 ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl)); 1355 ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl));
1323 if (ring->status_page.page_addr == NULL) {
1324 ret = -ENOMEM;
1325 goto err_unpin;
1326 }
1327 ring->status_page.obj = obj;
1328 memset(ring->status_page.page_addr, 0, PAGE_SIZE); 1356 memset(ring->status_page.page_addr, 0, PAGE_SIZE);
1329 1357
1330 DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n", 1358 DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
1331 ring->name, ring->status_page.gfx_addr); 1359 ring->name, ring->status_page.gfx_addr);
1332 1360
1333 return 0; 1361 return 0;
1334
1335err_unpin:
1336 i915_gem_object_ggtt_unpin(obj);
1337err_unref:
1338 drm_gem_object_unreference(&obj->base);
1339err:
1340 return ret;
1341} 1362}
1342 1363
1343static int init_phys_status_page(struct intel_ring_buffer *ring) 1364static int init_phys_status_page(struct intel_engine_cs *ring)
1344{ 1365{
1345 struct drm_i915_private *dev_priv = ring->dev->dev_private; 1366 struct drm_i915_private *dev_priv = ring->dev->dev_private;
1346 1367
@@ -1357,44 +1378,24 @@ static int init_phys_status_page(struct intel_ring_buffer *ring)
1357 return 0; 1378 return 0;
1358} 1379}
1359 1380
1360static int intel_init_ring_buffer(struct drm_device *dev, 1381static int allocate_ring_buffer(struct intel_engine_cs *ring)
1361 struct intel_ring_buffer *ring)
1362{ 1382{
1383 struct drm_device *dev = ring->dev;
1384 struct drm_i915_private *dev_priv = to_i915(dev);
1385 struct intel_ringbuffer *ringbuf = ring->buffer;
1363 struct drm_i915_gem_object *obj; 1386 struct drm_i915_gem_object *obj;
1364 struct drm_i915_private *dev_priv = dev->dev_private;
1365 int ret; 1387 int ret;
1366 1388
1367 ring->dev = dev; 1389 if (intel_ring_initialized(ring))
1368 INIT_LIST_HEAD(&ring->active_list); 1390 return 0;
1369 INIT_LIST_HEAD(&ring->request_list);
1370 ring->size = 32 * PAGE_SIZE;
1371 memset(ring->sync_seqno, 0, sizeof(ring->sync_seqno));
1372
1373 init_waitqueue_head(&ring->irq_queue);
1374
1375 if (I915_NEED_GFX_HWS(dev)) {
1376 ret = init_status_page(ring);
1377 if (ret)
1378 return ret;
1379 } else {
1380 BUG_ON(ring->id != RCS);
1381 ret = init_phys_status_page(ring);
1382 if (ret)
1383 return ret;
1384 }
1385 1391
1386 obj = NULL; 1392 obj = NULL;
1387 if (!HAS_LLC(dev)) 1393 if (!HAS_LLC(dev))
1388 obj = i915_gem_object_create_stolen(dev, ring->size); 1394 obj = i915_gem_object_create_stolen(dev, ringbuf->size);
1389 if (obj == NULL) 1395 if (obj == NULL)
1390 obj = i915_gem_alloc_object(dev, ring->size); 1396 obj = i915_gem_alloc_object(dev, ringbuf->size);
1391 if (obj == NULL) { 1397 if (obj == NULL)
1392 DRM_ERROR("Failed to allocate ringbuffer\n"); 1398 return -ENOMEM;
1393 ret = -ENOMEM;
1394 goto err_hws;
1395 }
1396
1397 ring->obj = obj;
1398 1399
1399 ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE); 1400 ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE);
1400 if (ret) 1401 if (ret)
@@ -1404,65 +1405,102 @@ static int intel_init_ring_buffer(struct drm_device *dev,
1404 if (ret) 1405 if (ret)
1405 goto err_unpin; 1406 goto err_unpin;
1406 1407
1407 ring->virtual_start = 1408 ringbuf->virtual_start =
1408 ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj), 1409 ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
1409 ring->size); 1410 ringbuf->size);
1410 if (ring->virtual_start == NULL) { 1411 if (ringbuf->virtual_start == NULL) {
1411 DRM_ERROR("Failed to map ringbuffer.\n");
1412 ret = -EINVAL; 1412 ret = -EINVAL;
1413 goto err_unpin; 1413 goto err_unpin;
1414 } 1414 }
1415 1415
1416 ret = ring->init(ring); 1416 ringbuf->obj = obj;
1417 if (ret) 1417 return 0;
1418 goto err_unmap; 1418
1419err_unpin:
1420 i915_gem_object_ggtt_unpin(obj);
1421err_unref:
1422 drm_gem_object_unreference(&obj->base);
1423 return ret;
1424}
1425
1426static int intel_init_ring_buffer(struct drm_device *dev,
1427 struct intel_engine_cs *ring)
1428{
1429 struct intel_ringbuffer *ringbuf = ring->buffer;
1430 int ret;
1431
1432 if (ringbuf == NULL) {
1433 ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
1434 if (!ringbuf)
1435 return -ENOMEM;
1436 ring->buffer = ringbuf;
1437 }
1438
1439 ring->dev = dev;
1440 INIT_LIST_HEAD(&ring->active_list);
1441 INIT_LIST_HEAD(&ring->request_list);
1442 ringbuf->size = 32 * PAGE_SIZE;
1443 memset(ring->semaphore.sync_seqno, 0, sizeof(ring->semaphore.sync_seqno));
1444
1445 init_waitqueue_head(&ring->irq_queue);
1446
1447 if (I915_NEED_GFX_HWS(dev)) {
1448 ret = init_status_page(ring);
1449 if (ret)
1450 goto error;
1451 } else {
1452 BUG_ON(ring->id != RCS);
1453 ret = init_phys_status_page(ring);
1454 if (ret)
1455 goto error;
1456 }
1457
1458 ret = allocate_ring_buffer(ring);
1459 if (ret) {
1460 DRM_ERROR("Failed to allocate ringbuffer %s: %d\n", ring->name, ret);
1461 goto error;
1462 }
1419 1463
1420 /* Workaround an erratum on the i830 which causes a hang if 1464 /* Workaround an erratum on the i830 which causes a hang if
1421 * the TAIL pointer points to within the last 2 cachelines 1465 * the TAIL pointer points to within the last 2 cachelines
1422 * of the buffer. 1466 * of the buffer.
1423 */ 1467 */
1424 ring->effective_size = ring->size; 1468 ringbuf->effective_size = ringbuf->size;
1425 if (IS_I830(ring->dev) || IS_845G(ring->dev)) 1469 if (IS_I830(dev) || IS_845G(dev))
1426 ring->effective_size -= 128; 1470 ringbuf->effective_size -= 2 * CACHELINE_BYTES;
1427 1471
1428 i915_cmd_parser_init_ring(ring); 1472 ret = i915_cmd_parser_init_ring(ring);
1473 if (ret)
1474 goto error;
1475
1476 ret = ring->init(ring);
1477 if (ret)
1478 goto error;
1429 1479
1430 return 0; 1480 return 0;
1431 1481
1432err_unmap: 1482error:
1433 iounmap(ring->virtual_start); 1483 kfree(ringbuf);
1434err_unpin: 1484 ring->buffer = NULL;
1435 i915_gem_object_ggtt_unpin(obj);
1436err_unref:
1437 drm_gem_object_unreference(&obj->base);
1438 ring->obj = NULL;
1439err_hws:
1440 cleanup_status_page(ring);
1441 return ret; 1485 return ret;
1442} 1486}
1443 1487
1444void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring) 1488void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
1445{ 1489{
1446 struct drm_i915_private *dev_priv; 1490 struct drm_i915_private *dev_priv = to_i915(ring->dev);
1447 int ret; 1491 struct intel_ringbuffer *ringbuf = ring->buffer;
1448 1492
1449 if (ring->obj == NULL) 1493 if (!intel_ring_initialized(ring))
1450 return; 1494 return;
1451 1495
1452 /* Disable the ring buffer. The ring must be idle at this point */ 1496 intel_stop_ring_buffer(ring);
1453 dev_priv = ring->dev->dev_private; 1497 WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & MODE_IDLE) == 0);
1454 ret = intel_ring_idle(ring);
1455 if (ret && !i915_reset_in_progress(&dev_priv->gpu_error))
1456 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
1457 ring->name, ret);
1458
1459 I915_WRITE_CTL(ring, 0);
1460 1498
1461 iounmap(ring->virtual_start); 1499 iounmap(ringbuf->virtual_start);
1462 1500
1463 i915_gem_object_ggtt_unpin(ring->obj); 1501 i915_gem_object_ggtt_unpin(ringbuf->obj);
1464 drm_gem_object_unreference(&ring->obj->base); 1502 drm_gem_object_unreference(&ringbuf->obj->base);
1465 ring->obj = NULL; 1503 ringbuf->obj = NULL;
1466 ring->preallocated_lazy_request = NULL; 1504 ring->preallocated_lazy_request = NULL;
1467 ring->outstanding_lazy_seqno = 0; 1505 ring->outstanding_lazy_seqno = 0;
1468 1506
@@ -1470,44 +1508,34 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
1470 ring->cleanup(ring); 1508 ring->cleanup(ring);
1471 1509
1472 cleanup_status_page(ring); 1510 cleanup_status_page(ring);
1511
1512 i915_cmd_parser_fini_ring(ring);
1513
1514 kfree(ringbuf);
1515 ring->buffer = NULL;
1473} 1516}
1474 1517
1475static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n) 1518static int intel_ring_wait_request(struct intel_engine_cs *ring, int n)
1476{ 1519{
1520 struct intel_ringbuffer *ringbuf = ring->buffer;
1477 struct drm_i915_gem_request *request; 1521 struct drm_i915_gem_request *request;
1478 u32 seqno = 0, tail; 1522 u32 seqno = 0;
1479 int ret; 1523 int ret;
1480 1524
1481 if (ring->last_retired_head != -1) { 1525 if (ringbuf->last_retired_head != -1) {
1482 ring->head = ring->last_retired_head; 1526 ringbuf->head = ringbuf->last_retired_head;
1483 ring->last_retired_head = -1; 1527 ringbuf->last_retired_head = -1;
1484 1528
1485 ring->space = ring_space(ring); 1529 ringbuf->space = ring_space(ring);
1486 if (ring->space >= n) 1530 if (ringbuf->space >= n)
1487 return 0; 1531 return 0;
1488 } 1532 }
1489 1533
1490 list_for_each_entry(request, &ring->request_list, list) { 1534 list_for_each_entry(request, &ring->request_list, list) {
1491 int space; 1535 if (__ring_space(request->tail, ringbuf->tail, ringbuf->size) >= n) {
1492
1493 if (request->tail == -1)
1494 continue;
1495
1496 space = request->tail - (ring->tail + I915_RING_FREE_SPACE);
1497 if (space < 0)
1498 space += ring->size;
1499 if (space >= n) {
1500 seqno = request->seqno; 1536 seqno = request->seqno;
1501 tail = request->tail;
1502 break; 1537 break;
1503 } 1538 }
1504
1505 /* Consume this request in case we need more space than
1506 * is available and so need to prevent a race between
1507 * updating last_retired_head and direct reads of
1508 * I915_RING_HEAD. It also provides a nice sanity check.
1509 */
1510 request->tail = -1;
1511 } 1539 }
1512 1540
1513 if (seqno == 0) 1541 if (seqno == 0)
@@ -1517,18 +1545,19 @@ static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n)
1517 if (ret) 1545 if (ret)
1518 return ret; 1546 return ret;
1519 1547
1520 ring->head = tail; 1548 i915_gem_retire_requests_ring(ring);
1521 ring->space = ring_space(ring); 1549 ringbuf->head = ringbuf->last_retired_head;
1522 if (WARN_ON(ring->space < n)) 1550 ringbuf->last_retired_head = -1;
1523 return -ENOSPC;
1524 1551
1552 ringbuf->space = ring_space(ring);
1525 return 0; 1553 return 0;
1526} 1554}
1527 1555
1528static int ring_wait_for_space(struct intel_ring_buffer *ring, int n) 1556static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
1529{ 1557{
1530 struct drm_device *dev = ring->dev; 1558 struct drm_device *dev = ring->dev;
1531 struct drm_i915_private *dev_priv = dev->dev_private; 1559 struct drm_i915_private *dev_priv = dev->dev_private;
1560 struct intel_ringbuffer *ringbuf = ring->buffer;
1532 unsigned long end; 1561 unsigned long end;
1533 int ret; 1562 int ret;
1534 1563
@@ -1539,7 +1568,6 @@ static int ring_wait_for_space(struct intel_ring_buffer *ring, int n)
1539 /* force the tail write in case we have been skipping them */ 1568 /* force the tail write in case we have been skipping them */
1540 __intel_ring_advance(ring); 1569 __intel_ring_advance(ring);
1541 1570
1542 trace_i915_ring_wait_begin(ring);
1543 /* With GEM the hangcheck timer should kick us out of the loop, 1571 /* With GEM the hangcheck timer should kick us out of the loop,
1544 * leaving it early runs the risk of corrupting GEM state (due 1572 * leaving it early runs the risk of corrupting GEM state (due
1545 * to running on almost untested codepaths). But on resume 1573 * to running on almost untested codepaths). But on resume
@@ -1547,12 +1575,13 @@ static int ring_wait_for_space(struct intel_ring_buffer *ring, int n)
1547 * case by choosing an insanely large timeout. */ 1575 * case by choosing an insanely large timeout. */
1548 end = jiffies + 60 * HZ; 1576 end = jiffies + 60 * HZ;
1549 1577
1578 trace_i915_ring_wait_begin(ring);
1550 do { 1579 do {
1551 ring->head = I915_READ_HEAD(ring); 1580 ringbuf->head = I915_READ_HEAD(ring);
1552 ring->space = ring_space(ring); 1581 ringbuf->space = ring_space(ring);
1553 if (ring->space >= n) { 1582 if (ringbuf->space >= n) {
1554 trace_i915_ring_wait_end(ring); 1583 ret = 0;
1555 return 0; 1584 break;
1556 } 1585 }
1557 1586
1558 if (!drm_core_check_feature(dev, DRIVER_MODESET) && 1587 if (!drm_core_check_feature(dev, DRIVER_MODESET) &&
@@ -1564,38 +1593,49 @@ static int ring_wait_for_space(struct intel_ring_buffer *ring, int n)
1564 1593
1565 msleep(1); 1594 msleep(1);
1566 1595
1596 if (dev_priv->mm.interruptible && signal_pending(current)) {
1597 ret = -ERESTARTSYS;
1598 break;
1599 }
1600
1567 ret = i915_gem_check_wedge(&dev_priv->gpu_error, 1601 ret = i915_gem_check_wedge(&dev_priv->gpu_error,
1568 dev_priv->mm.interruptible); 1602 dev_priv->mm.interruptible);
1569 if (ret) 1603 if (ret)
1570 return ret; 1604 break;
1571 } while (!time_after(jiffies, end)); 1605
1606 if (time_after(jiffies, end)) {
1607 ret = -EBUSY;
1608 break;
1609 }
1610 } while (1);
1572 trace_i915_ring_wait_end(ring); 1611 trace_i915_ring_wait_end(ring);
1573 return -EBUSY; 1612 return ret;
1574} 1613}
1575 1614
1576static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring) 1615static int intel_wrap_ring_buffer(struct intel_engine_cs *ring)
1577{ 1616{
1578 uint32_t __iomem *virt; 1617 uint32_t __iomem *virt;
1579 int rem = ring->size - ring->tail; 1618 struct intel_ringbuffer *ringbuf = ring->buffer;
1619 int rem = ringbuf->size - ringbuf->tail;
1580 1620
1581 if (ring->space < rem) { 1621 if (ringbuf->space < rem) {
1582 int ret = ring_wait_for_space(ring, rem); 1622 int ret = ring_wait_for_space(ring, rem);
1583 if (ret) 1623 if (ret)
1584 return ret; 1624 return ret;
1585 } 1625 }
1586 1626
1587 virt = ring->virtual_start + ring->tail; 1627 virt = ringbuf->virtual_start + ringbuf->tail;
1588 rem /= 4; 1628 rem /= 4;
1589 while (rem--) 1629 while (rem--)
1590 iowrite32(MI_NOOP, virt++); 1630 iowrite32(MI_NOOP, virt++);
1591 1631
1592 ring->tail = 0; 1632 ringbuf->tail = 0;
1593 ring->space = ring_space(ring); 1633 ringbuf->space = ring_space(ring);
1594 1634
1595 return 0; 1635 return 0;
1596} 1636}
1597 1637
1598int intel_ring_idle(struct intel_ring_buffer *ring) 1638int intel_ring_idle(struct intel_engine_cs *ring)
1599{ 1639{
1600 u32 seqno; 1640 u32 seqno;
1601 int ret; 1641 int ret;
@@ -1619,7 +1659,7 @@ int intel_ring_idle(struct intel_ring_buffer *ring)
1619} 1659}
1620 1660
1621static int 1661static int
1622intel_ring_alloc_seqno(struct intel_ring_buffer *ring) 1662intel_ring_alloc_seqno(struct intel_engine_cs *ring)
1623{ 1663{
1624 if (ring->outstanding_lazy_seqno) 1664 if (ring->outstanding_lazy_seqno)
1625 return 0; 1665 return 0;
@@ -1637,18 +1677,19 @@ intel_ring_alloc_seqno(struct intel_ring_buffer *ring)
1637 return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno); 1677 return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno);
1638} 1678}
1639 1679
1640static int __intel_ring_prepare(struct intel_ring_buffer *ring, 1680static int __intel_ring_prepare(struct intel_engine_cs *ring,
1641 int bytes) 1681 int bytes)
1642{ 1682{
1683 struct intel_ringbuffer *ringbuf = ring->buffer;
1643 int ret; 1684 int ret;
1644 1685
1645 if (unlikely(ring->tail + bytes > ring->effective_size)) { 1686 if (unlikely(ringbuf->tail + bytes > ringbuf->effective_size)) {
1646 ret = intel_wrap_ring_buffer(ring); 1687 ret = intel_wrap_ring_buffer(ring);
1647 if (unlikely(ret)) 1688 if (unlikely(ret))
1648 return ret; 1689 return ret;
1649 } 1690 }
1650 1691
1651 if (unlikely(ring->space < bytes)) { 1692 if (unlikely(ringbuf->space < bytes)) {
1652 ret = ring_wait_for_space(ring, bytes); 1693 ret = ring_wait_for_space(ring, bytes);
1653 if (unlikely(ret)) 1694 if (unlikely(ret))
1654 return ret; 1695 return ret;
@@ -1657,7 +1698,7 @@ static int __intel_ring_prepare(struct intel_ring_buffer *ring,
1657 return 0; 1698 return 0;
1658} 1699}
1659 1700
1660int intel_ring_begin(struct intel_ring_buffer *ring, 1701int intel_ring_begin(struct intel_engine_cs *ring,
1661 int num_dwords) 1702 int num_dwords)
1662{ 1703{
1663 struct drm_i915_private *dev_priv = ring->dev->dev_private; 1704 struct drm_i915_private *dev_priv = ring->dev->dev_private;
@@ -1677,19 +1718,20 @@ int intel_ring_begin(struct intel_ring_buffer *ring,
1677 if (ret) 1718 if (ret)
1678 return ret; 1719 return ret;
1679 1720
1680 ring->space -= num_dwords * sizeof(uint32_t); 1721 ring->buffer->space -= num_dwords * sizeof(uint32_t);
1681 return 0; 1722 return 0;
1682} 1723}
1683 1724
1684/* Align the ring tail to a cacheline boundary */ 1725/* Align the ring tail to a cacheline boundary */
1685int intel_ring_cacheline_align(struct intel_ring_buffer *ring) 1726int intel_ring_cacheline_align(struct intel_engine_cs *ring)
1686{ 1727{
1687 int num_dwords = (64 - (ring->tail & 63)) / sizeof(uint32_t); 1728 int num_dwords = (ring->buffer->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
1688 int ret; 1729 int ret;
1689 1730
1690 if (num_dwords == 0) 1731 if (num_dwords == 0)
1691 return 0; 1732 return 0;
1692 1733
1734 num_dwords = CACHELINE_BYTES / sizeof(uint32_t) - num_dwords;
1693 ret = intel_ring_begin(ring, num_dwords); 1735 ret = intel_ring_begin(ring, num_dwords);
1694 if (ret) 1736 if (ret)
1695 return ret; 1737 return ret;
@@ -1702,7 +1744,7 @@ int intel_ring_cacheline_align(struct intel_ring_buffer *ring)
1702 return 0; 1744 return 0;
1703} 1745}
1704 1746
1705void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno) 1747void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno)
1706{ 1748{
1707 struct drm_i915_private *dev_priv = ring->dev->dev_private; 1749 struct drm_i915_private *dev_priv = ring->dev->dev_private;
1708 1750
@@ -1719,7 +1761,7 @@ void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno)
1719 ring->hangcheck.seqno = seqno; 1761 ring->hangcheck.seqno = seqno;
1720} 1762}
1721 1763
1722static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring, 1764static void gen6_bsd_ring_write_tail(struct intel_engine_cs *ring,
1723 u32 value) 1765 u32 value)
1724{ 1766{
1725 struct drm_i915_private *dev_priv = ring->dev->dev_private; 1767 struct drm_i915_private *dev_priv = ring->dev->dev_private;
@@ -1752,7 +1794,7 @@ static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
1752 _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); 1794 _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
1753} 1795}
1754 1796
1755static int gen6_bsd_ring_flush(struct intel_ring_buffer *ring, 1797static int gen6_bsd_ring_flush(struct intel_engine_cs *ring,
1756 u32 invalidate, u32 flush) 1798 u32 invalidate, u32 flush)
1757{ 1799{
1758 uint32_t cmd; 1800 uint32_t cmd;
@@ -1788,8 +1830,8 @@ static int gen6_bsd_ring_flush(struct intel_ring_buffer *ring,
1788} 1830}
1789 1831
1790static int 1832static int
1791gen8_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, 1833gen8_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
1792 u32 offset, u32 len, 1834 u64 offset, u32 len,
1793 unsigned flags) 1835 unsigned flags)
1794{ 1836{
1795 struct drm_i915_private *dev_priv = ring->dev->dev_private; 1837 struct drm_i915_private *dev_priv = ring->dev->dev_private;
@@ -1803,8 +1845,8 @@ gen8_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
1803 1845
1804 /* FIXME(BDW): Address space and security selectors. */ 1846 /* FIXME(BDW): Address space and security selectors. */
1805 intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8)); 1847 intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8));
1806 intel_ring_emit(ring, offset); 1848 intel_ring_emit(ring, lower_32_bits(offset));
1807 intel_ring_emit(ring, 0); 1849 intel_ring_emit(ring, upper_32_bits(offset));
1808 intel_ring_emit(ring, MI_NOOP); 1850 intel_ring_emit(ring, MI_NOOP);
1809 intel_ring_advance(ring); 1851 intel_ring_advance(ring);
1810 1852
@@ -1812,8 +1854,8 @@ gen8_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
1812} 1854}
1813 1855
1814static int 1856static int
1815hsw_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, 1857hsw_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
1816 u32 offset, u32 len, 1858 u64 offset, u32 len,
1817 unsigned flags) 1859 unsigned flags)
1818{ 1860{
1819 int ret; 1861 int ret;
@@ -1833,8 +1875,8 @@ hsw_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
1833} 1875}
1834 1876
1835static int 1877static int
1836gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, 1878gen6_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
1837 u32 offset, u32 len, 1879 u64 offset, u32 len,
1838 unsigned flags) 1880 unsigned flags)
1839{ 1881{
1840 int ret; 1882 int ret;
@@ -1855,7 +1897,7 @@ gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
1855 1897
1856/* Blitter support (SandyBridge+) */ 1898/* Blitter support (SandyBridge+) */
1857 1899
1858static int gen6_ring_flush(struct intel_ring_buffer *ring, 1900static int gen6_ring_flush(struct intel_engine_cs *ring,
1859 u32 invalidate, u32 flush) 1901 u32 invalidate, u32 flush)
1860{ 1902{
1861 struct drm_device *dev = ring->dev; 1903 struct drm_device *dev = ring->dev;
@@ -1898,7 +1940,7 @@ static int gen6_ring_flush(struct intel_ring_buffer *ring,
1898int intel_init_render_ring_buffer(struct drm_device *dev) 1940int intel_init_render_ring_buffer(struct drm_device *dev)
1899{ 1941{
1900 struct drm_i915_private *dev_priv = dev->dev_private; 1942 struct drm_i915_private *dev_priv = dev->dev_private;
1901 struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; 1943 struct intel_engine_cs *ring = &dev_priv->ring[RCS];
1902 1944
1903 ring->name = "render ring"; 1945 ring->name = "render ring";
1904 ring->id = RCS; 1946 ring->id = RCS;
@@ -1920,15 +1962,24 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
1920 ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT; 1962 ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
1921 ring->get_seqno = gen6_ring_get_seqno; 1963 ring->get_seqno = gen6_ring_get_seqno;
1922 ring->set_seqno = ring_set_seqno; 1964 ring->set_seqno = ring_set_seqno;
1923 ring->sync_to = gen6_ring_sync; 1965 ring->semaphore.sync_to = gen6_ring_sync;
1924 ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_INVALID; 1966 ring->semaphore.signal = gen6_signal;
1925 ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_RV; 1967 /*
1926 ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_RB; 1968 * The current semaphore is only applied on pre-gen8 platform.
1927 ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_RVE; 1969 * And there is no VCS2 ring on the pre-gen8 platform. So the
1928 ring->signal_mbox[RCS] = GEN6_NOSYNC; 1970 * semaphore between RCS and VCS2 is initialized as INVALID.
1929 ring->signal_mbox[VCS] = GEN6_VRSYNC; 1971 * Gen8 will initialize the sema between VCS2 and RCS later.
1930 ring->signal_mbox[BCS] = GEN6_BRSYNC; 1972 */
1931 ring->signal_mbox[VECS] = GEN6_VERSYNC; 1973 ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_INVALID;
1974 ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_RV;
1975 ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_RB;
1976 ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_RVE;
1977 ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
1978 ring->semaphore.mbox.signal[RCS] = GEN6_NOSYNC;
1979 ring->semaphore.mbox.signal[VCS] = GEN6_VRSYNC;
1980 ring->semaphore.mbox.signal[BCS] = GEN6_BRSYNC;
1981 ring->semaphore.mbox.signal[VECS] = GEN6_VERSYNC;
1982 ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
1932 } else if (IS_GEN5(dev)) { 1983 } else if (IS_GEN5(dev)) {
1933 ring->add_request = pc_render_add_request; 1984 ring->add_request = pc_render_add_request;
1934 ring->flush = gen4_render_ring_flush; 1985 ring->flush = gen4_render_ring_flush;
@@ -1999,16 +2050,25 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
1999int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size) 2050int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
2000{ 2051{
2001 struct drm_i915_private *dev_priv = dev->dev_private; 2052 struct drm_i915_private *dev_priv = dev->dev_private;
2002 struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; 2053 struct intel_engine_cs *ring = &dev_priv->ring[RCS];
2054 struct intel_ringbuffer *ringbuf = ring->buffer;
2003 int ret; 2055 int ret;
2004 2056
2057 if (ringbuf == NULL) {
2058 ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
2059 if (!ringbuf)
2060 return -ENOMEM;
2061 ring->buffer = ringbuf;
2062 }
2063
2005 ring->name = "render ring"; 2064 ring->name = "render ring";
2006 ring->id = RCS; 2065 ring->id = RCS;
2007 ring->mmio_base = RENDER_RING_BASE; 2066 ring->mmio_base = RENDER_RING_BASE;
2008 2067
2009 if (INTEL_INFO(dev)->gen >= 6) { 2068 if (INTEL_INFO(dev)->gen >= 6) {
2010 /* non-kms not supported on gen6+ */ 2069 /* non-kms not supported on gen6+ */
2011 return -ENODEV; 2070 ret = -ENODEV;
2071 goto err_ringbuf;
2012 } 2072 }
2013 2073
2014 /* Note: gem is not supported on gen5/ilk without kms (the corresponding 2074 /* Note: gem is not supported on gen5/ilk without kms (the corresponding
@@ -2043,31 +2103,39 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
2043 INIT_LIST_HEAD(&ring->active_list); 2103 INIT_LIST_HEAD(&ring->active_list);
2044 INIT_LIST_HEAD(&ring->request_list); 2104 INIT_LIST_HEAD(&ring->request_list);
2045 2105
2046 ring->size = size; 2106 ringbuf->size = size;
2047 ring->effective_size = ring->size; 2107 ringbuf->effective_size = ringbuf->size;
2048 if (IS_I830(ring->dev) || IS_845G(ring->dev)) 2108 if (IS_I830(ring->dev) || IS_845G(ring->dev))
2049 ring->effective_size -= 128; 2109 ringbuf->effective_size -= 2 * CACHELINE_BYTES;
2050 2110
2051 ring->virtual_start = ioremap_wc(start, size); 2111 ringbuf->virtual_start = ioremap_wc(start, size);
2052 if (ring->virtual_start == NULL) { 2112 if (ringbuf->virtual_start == NULL) {
2053 DRM_ERROR("can not ioremap virtual address for" 2113 DRM_ERROR("can not ioremap virtual address for"
2054 " ring buffer\n"); 2114 " ring buffer\n");
2055 return -ENOMEM; 2115 ret = -ENOMEM;
2116 goto err_ringbuf;
2056 } 2117 }
2057 2118
2058 if (!I915_NEED_GFX_HWS(dev)) { 2119 if (!I915_NEED_GFX_HWS(dev)) {
2059 ret = init_phys_status_page(ring); 2120 ret = init_phys_status_page(ring);
2060 if (ret) 2121 if (ret)
2061 return ret; 2122 goto err_vstart;
2062 } 2123 }
2063 2124
2064 return 0; 2125 return 0;
2126
2127err_vstart:
2128 iounmap(ringbuf->virtual_start);
2129err_ringbuf:
2130 kfree(ringbuf);
2131 ring->buffer = NULL;
2132 return ret;
2065} 2133}
2066 2134
2067int intel_init_bsd_ring_buffer(struct drm_device *dev) 2135int intel_init_bsd_ring_buffer(struct drm_device *dev)
2068{ 2136{
2069 struct drm_i915_private *dev_priv = dev->dev_private; 2137 struct drm_i915_private *dev_priv = dev->dev_private;
2070 struct intel_ring_buffer *ring = &dev_priv->ring[VCS]; 2138 struct intel_engine_cs *ring = &dev_priv->ring[VCS];
2071 2139
2072 ring->name = "bsd ring"; 2140 ring->name = "bsd ring";
2073 ring->id = VCS; 2141 ring->id = VCS;
@@ -2096,15 +2164,24 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
2096 ring->dispatch_execbuffer = 2164 ring->dispatch_execbuffer =
2097 gen6_ring_dispatch_execbuffer; 2165 gen6_ring_dispatch_execbuffer;
2098 } 2166 }
2099 ring->sync_to = gen6_ring_sync; 2167 ring->semaphore.sync_to = gen6_ring_sync;
2100 ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VR; 2168 ring->semaphore.signal = gen6_signal;
2101 ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_INVALID; 2169 /*
2102 ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_VB; 2170 * The current semaphore is only applied on pre-gen8 platform.
2103 ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_VVE; 2171 * And there is no VCS2 ring on the pre-gen8 platform. So the
2104 ring->signal_mbox[RCS] = GEN6_RVSYNC; 2172 * semaphore between VCS and VCS2 is initialized as INVALID.
2105 ring->signal_mbox[VCS] = GEN6_NOSYNC; 2173 * Gen8 will initialize the sema between VCS2 and VCS later.
2106 ring->signal_mbox[BCS] = GEN6_BVSYNC; 2174 */
2107 ring->signal_mbox[VECS] = GEN6_VEVSYNC; 2175 ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VR;
2176 ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_INVALID;
2177 ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VB;
2178 ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_VVE;
2179 ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
2180 ring->semaphore.mbox.signal[RCS] = GEN6_RVSYNC;
2181 ring->semaphore.mbox.signal[VCS] = GEN6_NOSYNC;
2182 ring->semaphore.mbox.signal[BCS] = GEN6_BVSYNC;
2183 ring->semaphore.mbox.signal[VECS] = GEN6_VEVSYNC;
2184 ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
2108 } else { 2185 } else {
2109 ring->mmio_base = BSD_RING_BASE; 2186 ring->mmio_base = BSD_RING_BASE;
2110 ring->flush = bsd_ring_flush; 2187 ring->flush = bsd_ring_flush;
@@ -2127,10 +2204,63 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
2127 return intel_init_ring_buffer(dev, ring); 2204 return intel_init_ring_buffer(dev, ring);
2128} 2205}
2129 2206
2207/**
2208 * Initialize the second BSD ring for Broadwell GT3.
2209 * It is noted that this only exists on Broadwell GT3.
2210 */
2211int intel_init_bsd2_ring_buffer(struct drm_device *dev)
2212{
2213 struct drm_i915_private *dev_priv = dev->dev_private;
2214 struct intel_engine_cs *ring = &dev_priv->ring[VCS2];
2215
2216 if ((INTEL_INFO(dev)->gen != 8)) {
2217 DRM_ERROR("No dual-BSD ring on non-BDW machine\n");
2218 return -EINVAL;
2219 }
2220
2221 ring->name = "bds2_ring";
2222 ring->id = VCS2;
2223
2224 ring->write_tail = ring_write_tail;
2225 ring->mmio_base = GEN8_BSD2_RING_BASE;
2226 ring->flush = gen6_bsd_ring_flush;
2227 ring->add_request = gen6_add_request;
2228 ring->get_seqno = gen6_ring_get_seqno;
2229 ring->set_seqno = ring_set_seqno;
2230 ring->irq_enable_mask =
2231 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT;
2232 ring->irq_get = gen8_ring_get_irq;
2233 ring->irq_put = gen8_ring_put_irq;
2234 ring->dispatch_execbuffer =
2235 gen8_ring_dispatch_execbuffer;
2236 ring->semaphore.sync_to = gen6_ring_sync;
2237 ring->semaphore.signal = gen6_signal;
2238 /*
2239 * The current semaphore is only applied on the pre-gen8. And there
2240 * is no bsd2 ring on the pre-gen8. So now the semaphore_register
2241 * between VCS2 and other ring is initialized as invalid.
2242 * Gen8 will initialize the sema between VCS2 and other ring later.
2243 */
2244 ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_INVALID;
2245 ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_INVALID;
2246 ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_INVALID;
2247 ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_INVALID;
2248 ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
2249 ring->semaphore.mbox.signal[RCS] = GEN6_NOSYNC;
2250 ring->semaphore.mbox.signal[VCS] = GEN6_NOSYNC;
2251 ring->semaphore.mbox.signal[BCS] = GEN6_NOSYNC;
2252 ring->semaphore.mbox.signal[VECS] = GEN6_NOSYNC;
2253 ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
2254
2255 ring->init = init_ring_common;
2256
2257 return intel_init_ring_buffer(dev, ring);
2258}
2259
2130int intel_init_blt_ring_buffer(struct drm_device *dev) 2260int intel_init_blt_ring_buffer(struct drm_device *dev)
2131{ 2261{
2132 struct drm_i915_private *dev_priv = dev->dev_private; 2262 struct drm_i915_private *dev_priv = dev->dev_private;
2133 struct intel_ring_buffer *ring = &dev_priv->ring[BCS]; 2263 struct intel_engine_cs *ring = &dev_priv->ring[BCS];
2134 2264
2135 ring->name = "blitter ring"; 2265 ring->name = "blitter ring";
2136 ring->id = BCS; 2266 ring->id = BCS;
@@ -2153,15 +2283,24 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
2153 ring->irq_put = gen6_ring_put_irq; 2283 ring->irq_put = gen6_ring_put_irq;
2154 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; 2284 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
2155 } 2285 }
2156 ring->sync_to = gen6_ring_sync; 2286 ring->semaphore.sync_to = gen6_ring_sync;
2157 ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_BR; 2287 ring->semaphore.signal = gen6_signal;
2158 ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_BV; 2288 /*
2159 ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_INVALID; 2289 * The current semaphore is only applied on pre-gen8 platform. And
2160 ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_BVE; 2290 * there is no VCS2 ring on the pre-gen8 platform. So the semaphore
2161 ring->signal_mbox[RCS] = GEN6_RBSYNC; 2291 * between BCS and VCS2 is initialized as INVALID.
2162 ring->signal_mbox[VCS] = GEN6_VBSYNC; 2292 * Gen8 will initialize the sema between BCS and VCS2 later.
2163 ring->signal_mbox[BCS] = GEN6_NOSYNC; 2293 */
2164 ring->signal_mbox[VECS] = GEN6_VEBSYNC; 2294 ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_BR;
2295 ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_BV;
2296 ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_INVALID;
2297 ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_BVE;
2298 ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
2299 ring->semaphore.mbox.signal[RCS] = GEN6_RBSYNC;
2300 ring->semaphore.mbox.signal[VCS] = GEN6_VBSYNC;
2301 ring->semaphore.mbox.signal[BCS] = GEN6_NOSYNC;
2302 ring->semaphore.mbox.signal[VECS] = GEN6_VEBSYNC;
2303 ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
2165 ring->init = init_ring_common; 2304 ring->init = init_ring_common;
2166 2305
2167 return intel_init_ring_buffer(dev, ring); 2306 return intel_init_ring_buffer(dev, ring);
@@ -2170,7 +2309,7 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
2170int intel_init_vebox_ring_buffer(struct drm_device *dev) 2309int intel_init_vebox_ring_buffer(struct drm_device *dev)
2171{ 2310{
2172 struct drm_i915_private *dev_priv = dev->dev_private; 2311 struct drm_i915_private *dev_priv = dev->dev_private;
2173 struct intel_ring_buffer *ring = &dev_priv->ring[VECS]; 2312 struct intel_engine_cs *ring = &dev_priv->ring[VECS];
2174 2313
2175 ring->name = "video enhancement ring"; 2314 ring->name = "video enhancement ring";
2176 ring->id = VECS; 2315 ring->id = VECS;
@@ -2194,22 +2333,25 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev)
2194 ring->irq_put = hsw_vebox_put_irq; 2333 ring->irq_put = hsw_vebox_put_irq;
2195 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; 2334 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
2196 } 2335 }
2197 ring->sync_to = gen6_ring_sync; 2336 ring->semaphore.sync_to = gen6_ring_sync;
2198 ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VER; 2337 ring->semaphore.signal = gen6_signal;
2199 ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_VEV; 2338 ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VER;
2200 ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_VEB; 2339 ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_VEV;
2201 ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_INVALID; 2340 ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VEB;
2202 ring->signal_mbox[RCS] = GEN6_RVESYNC; 2341 ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_INVALID;
2203 ring->signal_mbox[VCS] = GEN6_VVESYNC; 2342 ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
2204 ring->signal_mbox[BCS] = GEN6_BVESYNC; 2343 ring->semaphore.mbox.signal[RCS] = GEN6_RVESYNC;
2205 ring->signal_mbox[VECS] = GEN6_NOSYNC; 2344 ring->semaphore.mbox.signal[VCS] = GEN6_VVESYNC;
2345 ring->semaphore.mbox.signal[BCS] = GEN6_BVESYNC;
2346 ring->semaphore.mbox.signal[VECS] = GEN6_NOSYNC;
2347 ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
2206 ring->init = init_ring_common; 2348 ring->init = init_ring_common;
2207 2349
2208 return intel_init_ring_buffer(dev, ring); 2350 return intel_init_ring_buffer(dev, ring);
2209} 2351}
2210 2352
2211int 2353int
2212intel_ring_flush_all_caches(struct intel_ring_buffer *ring) 2354intel_ring_flush_all_caches(struct intel_engine_cs *ring)
2213{ 2355{
2214 int ret; 2356 int ret;
2215 2357
@@ -2227,7 +2369,7 @@ intel_ring_flush_all_caches(struct intel_ring_buffer *ring)
2227} 2369}
2228 2370
2229int 2371int
2230intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring) 2372intel_ring_invalidate_all_caches(struct intel_engine_cs *ring)
2231{ 2373{
2232 uint32_t flush_domains; 2374 uint32_t flush_domains;
2233 int ret; 2375 int ret;
@@ -2245,3 +2387,19 @@ intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring)
2245 ring->gpu_caches_dirty = false; 2387 ring->gpu_caches_dirty = false;
2246 return 0; 2388 return 0;
2247} 2389}
2390
2391void
2392intel_stop_ring_buffer(struct intel_engine_cs *ring)
2393{
2394 int ret;
2395
2396 if (!intel_ring_initialized(ring))
2397 return;
2398
2399 ret = intel_ring_idle(ring);
2400 if (ret && !i915_reset_in_progress(&to_i915(ring->dev)->gpu_error))
2401 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
2402 ring->name, ret);
2403
2404 stop_ring(ring);
2405}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 2b91c4b4d34b..910c83cf7d44 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -1,6 +1,10 @@
1#ifndef _INTEL_RINGBUFFER_H_ 1#ifndef _INTEL_RINGBUFFER_H_
2#define _INTEL_RINGBUFFER_H_ 2#define _INTEL_RINGBUFFER_H_
3 3
4#include <linux/hashtable.h>
5
6#define I915_CMD_HASH_ORDER 9
7
4/* 8/*
5 * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use" 9 * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use"
6 * Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use" 10 * Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use"
@@ -54,76 +58,93 @@ struct intel_ring_hangcheck {
54 bool deadlock; 58 bool deadlock;
55}; 59};
56 60
57struct intel_ring_buffer { 61struct intel_ringbuffer {
62 struct drm_i915_gem_object *obj;
63 void __iomem *virtual_start;
64
65 u32 head;
66 u32 tail;
67 int space;
68 int size;
69 int effective_size;
70
71 /** We track the position of the requests in the ring buffer, and
72 * when each is retired we increment last_retired_head as the GPU
73 * must have finished processing the request and so we know we
74 * can advance the ringbuffer up to that position.
75 *
76 * last_retired_head is set to -1 after the value is consumed so
77 * we can detect new retirements.
78 */
79 u32 last_retired_head;
80};
81
82struct intel_engine_cs {
58 const char *name; 83 const char *name;
59 enum intel_ring_id { 84 enum intel_ring_id {
60 RCS = 0x0, 85 RCS = 0x0,
61 VCS, 86 VCS,
62 BCS, 87 BCS,
63 VECS, 88 VECS,
89 VCS2
64 } id; 90 } id;
65#define I915_NUM_RINGS 4 91#define I915_NUM_RINGS 5
92#define LAST_USER_RING (VECS + 1)
66 u32 mmio_base; 93 u32 mmio_base;
67 void __iomem *virtual_start;
68 struct drm_device *dev; 94 struct drm_device *dev;
69 struct drm_i915_gem_object *obj; 95 struct intel_ringbuffer *buffer;
70 96
71 u32 head;
72 u32 tail;
73 int space;
74 int size;
75 int effective_size;
76 struct intel_hw_status_page status_page; 97 struct intel_hw_status_page status_page;
77 98
78 /** We track the position of the requests in the ring buffer, and
79 * when each is retired we increment last_retired_head as the GPU
80 * must have finished processing the request and so we know we
81 * can advance the ringbuffer up to that position.
82 *
83 * last_retired_head is set to -1 after the value is consumed so
84 * we can detect new retirements.
85 */
86 u32 last_retired_head;
87
88 unsigned irq_refcount; /* protected by dev_priv->irq_lock */ 99 unsigned irq_refcount; /* protected by dev_priv->irq_lock */
89 u32 irq_enable_mask; /* bitmask to enable ring interrupt */ 100 u32 irq_enable_mask; /* bitmask to enable ring interrupt */
90 u32 trace_irq_seqno; 101 u32 trace_irq_seqno;
91 u32 sync_seqno[I915_NUM_RINGS-1]; 102 bool __must_check (*irq_get)(struct intel_engine_cs *ring);
92 bool __must_check (*irq_get)(struct intel_ring_buffer *ring); 103 void (*irq_put)(struct intel_engine_cs *ring);
93 void (*irq_put)(struct intel_ring_buffer *ring);
94 104
95 int (*init)(struct intel_ring_buffer *ring); 105 int (*init)(struct intel_engine_cs *ring);
96 106
97 void (*write_tail)(struct intel_ring_buffer *ring, 107 void (*write_tail)(struct intel_engine_cs *ring,
98 u32 value); 108 u32 value);
99 int __must_check (*flush)(struct intel_ring_buffer *ring, 109 int __must_check (*flush)(struct intel_engine_cs *ring,
100 u32 invalidate_domains, 110 u32 invalidate_domains,
101 u32 flush_domains); 111 u32 flush_domains);
102 int (*add_request)(struct intel_ring_buffer *ring); 112 int (*add_request)(struct intel_engine_cs *ring);
103 /* Some chipsets are not quite as coherent as advertised and need 113 /* Some chipsets are not quite as coherent as advertised and need
104 * an expensive kick to force a true read of the up-to-date seqno. 114 * an expensive kick to force a true read of the up-to-date seqno.
105 * However, the up-to-date seqno is not always required and the last 115 * However, the up-to-date seqno is not always required and the last
106 * seen value is good enough. Note that the seqno will always be 116 * seen value is good enough. Note that the seqno will always be
107 * monotonic, even if not coherent. 117 * monotonic, even if not coherent.
108 */ 118 */
109 u32 (*get_seqno)(struct intel_ring_buffer *ring, 119 u32 (*get_seqno)(struct intel_engine_cs *ring,
110 bool lazy_coherency); 120 bool lazy_coherency);
111 void (*set_seqno)(struct intel_ring_buffer *ring, 121 void (*set_seqno)(struct intel_engine_cs *ring,
112 u32 seqno); 122 u32 seqno);
113 int (*dispatch_execbuffer)(struct intel_ring_buffer *ring, 123 int (*dispatch_execbuffer)(struct intel_engine_cs *ring,
114 u32 offset, u32 length, 124 u64 offset, u32 length,
115 unsigned flags); 125 unsigned flags);
116#define I915_DISPATCH_SECURE 0x1 126#define I915_DISPATCH_SECURE 0x1
117#define I915_DISPATCH_PINNED 0x2 127#define I915_DISPATCH_PINNED 0x2
118 void (*cleanup)(struct intel_ring_buffer *ring); 128 void (*cleanup)(struct intel_engine_cs *ring);
119 int (*sync_to)(struct intel_ring_buffer *ring,
120 struct intel_ring_buffer *to,
121 u32 seqno);
122 129
123 /* our mbox written by others */ 130 struct {
124 u32 semaphore_register[I915_NUM_RINGS]; 131 u32 sync_seqno[I915_NUM_RINGS-1];
125 /* mboxes this ring signals to */ 132
126 u32 signal_mbox[I915_NUM_RINGS]; 133 struct {
134 /* our mbox written by others */
135 u32 wait[I915_NUM_RINGS];
136 /* mboxes this ring signals to */
137 u32 signal[I915_NUM_RINGS];
138 } mbox;
139
140 /* AKA wait() */
141 int (*sync_to)(struct intel_engine_cs *ring,
142 struct intel_engine_cs *to,
143 u32 seqno);
144 int (*signal)(struct intel_engine_cs *signaller,
145 /* num_dwords needed by caller */
146 unsigned int num_dwords);
147 } semaphore;
127 148
128 /** 149 /**
129 * List of objects currently involved in rendering from the 150 * List of objects currently involved in rendering from the
@@ -153,12 +174,8 @@ struct intel_ring_buffer {
153 174
154 wait_queue_head_t irq_queue; 175 wait_queue_head_t irq_queue;
155 176
156 /** 177 struct intel_context *default_context;
157 * Do an explicit TLB flush before MI_SET_CONTEXT 178 struct intel_context *last_context;
158 */
159 bool itlb_before_ctx_switch;
160 struct i915_hw_context *default_context;
161 struct i915_hw_context *last_context;
162 179
163 struct intel_ring_hangcheck hangcheck; 180 struct intel_ring_hangcheck hangcheck;
164 181
@@ -168,12 +185,13 @@ struct intel_ring_buffer {
168 volatile u32 *cpu_page; 185 volatile u32 *cpu_page;
169 } scratch; 186 } scratch;
170 187
188 bool needs_cmd_parser;
189
171 /* 190 /*
172 * Tables of commands the command parser needs to know about 191 * Table of commands the command parser needs to know about
173 * for this ring. 192 * for this ring.
174 */ 193 */
175 const struct drm_i915_cmd_table *cmd_tables; 194 DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER);
176 int cmd_table_count;
177 195
178 /* 196 /*
179 * Table of registers allowed in commands that read/write registers. 197 * Table of registers allowed in commands that read/write registers.
@@ -202,20 +220,20 @@ struct intel_ring_buffer {
202}; 220};
203 221
204static inline bool 222static inline bool
205intel_ring_initialized(struct intel_ring_buffer *ring) 223intel_ring_initialized(struct intel_engine_cs *ring)
206{ 224{
207 return ring->obj != NULL; 225 return ring->buffer && ring->buffer->obj;
208} 226}
209 227
210static inline unsigned 228static inline unsigned
211intel_ring_flag(struct intel_ring_buffer *ring) 229intel_ring_flag(struct intel_engine_cs *ring)
212{ 230{
213 return 1 << ring->id; 231 return 1 << ring->id;
214} 232}
215 233
216static inline u32 234static inline u32
217intel_ring_sync_index(struct intel_ring_buffer *ring, 235intel_ring_sync_index(struct intel_engine_cs *ring,
218 struct intel_ring_buffer *other) 236 struct intel_engine_cs *other)
219{ 237{
220 int idx; 238 int idx;
221 239
@@ -233,7 +251,7 @@ intel_ring_sync_index(struct intel_ring_buffer *ring,
233} 251}
234 252
235static inline u32 253static inline u32
236intel_read_status_page(struct intel_ring_buffer *ring, 254intel_read_status_page(struct intel_engine_cs *ring,
237 int reg) 255 int reg)
238{ 256{
239 /* Ensure that the compiler doesn't optimize away the load. */ 257 /* Ensure that the compiler doesn't optimize away the load. */
@@ -242,7 +260,7 @@ intel_read_status_page(struct intel_ring_buffer *ring,
242} 260}
243 261
244static inline void 262static inline void
245intel_write_status_page(struct intel_ring_buffer *ring, 263intel_write_status_page(struct intel_engine_cs *ring,
246 int reg, u32 value) 264 int reg, u32 value)
247{ 265{
248 ring->status_page.page_addr[reg] = value; 266 ring->status_page.page_addr[reg] = value;
@@ -267,47 +285,51 @@ intel_write_status_page(struct intel_ring_buffer *ring,
267#define I915_GEM_HWS_SCRATCH_INDEX 0x30 285#define I915_GEM_HWS_SCRATCH_INDEX 0x30
268#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT) 286#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
269 287
270void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring); 288void intel_stop_ring_buffer(struct intel_engine_cs *ring);
289void intel_cleanup_ring_buffer(struct intel_engine_cs *ring);
271 290
272int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n); 291int __must_check intel_ring_begin(struct intel_engine_cs *ring, int n);
273int __must_check intel_ring_cacheline_align(struct intel_ring_buffer *ring); 292int __must_check intel_ring_cacheline_align(struct intel_engine_cs *ring);
274static inline void intel_ring_emit(struct intel_ring_buffer *ring, 293static inline void intel_ring_emit(struct intel_engine_cs *ring,
275 u32 data) 294 u32 data)
276{ 295{
277 iowrite32(data, ring->virtual_start + ring->tail); 296 struct intel_ringbuffer *ringbuf = ring->buffer;
278 ring->tail += 4; 297 iowrite32(data, ringbuf->virtual_start + ringbuf->tail);
298 ringbuf->tail += 4;
279} 299}
280static inline void intel_ring_advance(struct intel_ring_buffer *ring) 300static inline void intel_ring_advance(struct intel_engine_cs *ring)
281{ 301{
282 ring->tail &= ring->size - 1; 302 struct intel_ringbuffer *ringbuf = ring->buffer;
303 ringbuf->tail &= ringbuf->size - 1;
283} 304}
284void __intel_ring_advance(struct intel_ring_buffer *ring); 305void __intel_ring_advance(struct intel_engine_cs *ring);
285 306
286int __must_check intel_ring_idle(struct intel_ring_buffer *ring); 307int __must_check intel_ring_idle(struct intel_engine_cs *ring);
287void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno); 308void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno);
288int intel_ring_flush_all_caches(struct intel_ring_buffer *ring); 309int intel_ring_flush_all_caches(struct intel_engine_cs *ring);
289int intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring); 310int intel_ring_invalidate_all_caches(struct intel_engine_cs *ring);
290 311
291int intel_init_render_ring_buffer(struct drm_device *dev); 312int intel_init_render_ring_buffer(struct drm_device *dev);
292int intel_init_bsd_ring_buffer(struct drm_device *dev); 313int intel_init_bsd_ring_buffer(struct drm_device *dev);
314int intel_init_bsd2_ring_buffer(struct drm_device *dev);
293int intel_init_blt_ring_buffer(struct drm_device *dev); 315int intel_init_blt_ring_buffer(struct drm_device *dev);
294int intel_init_vebox_ring_buffer(struct drm_device *dev); 316int intel_init_vebox_ring_buffer(struct drm_device *dev);
295 317
296u64 intel_ring_get_active_head(struct intel_ring_buffer *ring); 318u64 intel_ring_get_active_head(struct intel_engine_cs *ring);
297void intel_ring_setup_status_page(struct intel_ring_buffer *ring); 319void intel_ring_setup_status_page(struct intel_engine_cs *ring);
298 320
299static inline u32 intel_ring_get_tail(struct intel_ring_buffer *ring) 321static inline u32 intel_ring_get_tail(struct intel_engine_cs *ring)
300{ 322{
301 return ring->tail; 323 return ring->buffer->tail;
302} 324}
303 325
304static inline u32 intel_ring_get_seqno(struct intel_ring_buffer *ring) 326static inline u32 intel_ring_get_seqno(struct intel_engine_cs *ring)
305{ 327{
306 BUG_ON(ring->outstanding_lazy_seqno == 0); 328 BUG_ON(ring->outstanding_lazy_seqno == 0);
307 return ring->outstanding_lazy_seqno; 329 return ring->outstanding_lazy_seqno;
308} 330}
309 331
310static inline void i915_trace_irq_get(struct intel_ring_buffer *ring, u32 seqno) 332static inline void i915_trace_irq_get(struct intel_engine_cs *ring, u32 seqno)
311{ 333{
312 if (ring->trace_irq_seqno == 0 && ring->irq_get(ring)) 334 if (ring->trace_irq_seqno == 0 && ring->irq_get(ring))
313 ring->trace_irq_seqno = seqno; 335 ring->trace_irq_seqno = seqno;
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 46be00d66df3..6a4d5bc17697 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1153,20 +1153,21 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
1153 pipe_config->pixel_multiplier = 1153 pipe_config->pixel_multiplier =
1154 intel_sdvo_get_pixel_multiplier(adjusted_mode); 1154 intel_sdvo_get_pixel_multiplier(adjusted_mode);
1155 1155
1156 pipe_config->has_hdmi_sink = intel_sdvo->has_hdmi_monitor;
1157
1156 if (intel_sdvo->color_range_auto) { 1158 if (intel_sdvo->color_range_auto) {
1157 /* See CEA-861-E - 5.1 Default Encoding Parameters */ 1159 /* See CEA-861-E - 5.1 Default Encoding Parameters */
1158 /* FIXME: This bit is only valid when using TMDS encoding and 8 1160 /* FIXME: This bit is only valid when using TMDS encoding and 8
1159 * bit per color mode. */ 1161 * bit per color mode. */
1160 if (intel_sdvo->has_hdmi_monitor && 1162 if (pipe_config->has_hdmi_sink &&
1161 drm_match_cea_mode(adjusted_mode) > 1) 1163 drm_match_cea_mode(adjusted_mode) > 1)
1162 intel_sdvo->color_range = HDMI_COLOR_RANGE_16_235; 1164 pipe_config->limited_color_range = true;
1163 else 1165 } else {
1164 intel_sdvo->color_range = 0; 1166 if (pipe_config->has_hdmi_sink &&
1167 intel_sdvo->color_range == HDMI_COLOR_RANGE_16_235)
1168 pipe_config->limited_color_range = true;
1165 } 1169 }
1166 1170
1167 if (intel_sdvo->color_range)
1168 pipe_config->limited_color_range = true;
1169
1170 /* Clock computation needs to happen after pixel multiplier. */ 1171 /* Clock computation needs to happen after pixel multiplier. */
1171 if (intel_sdvo->is_tv) 1172 if (intel_sdvo->is_tv)
1172 i9xx_adjust_sdvo_tv_clock(pipe_config); 1173 i9xx_adjust_sdvo_tv_clock(pipe_config);
@@ -1174,7 +1175,7 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
1174 return true; 1175 return true;
1175} 1176}
1176 1177
1177static void intel_sdvo_mode_set(struct intel_encoder *intel_encoder) 1178static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder)
1178{ 1179{
1179 struct drm_device *dev = intel_encoder->base.dev; 1180 struct drm_device *dev = intel_encoder->base.dev;
1180 struct drm_i915_private *dev_priv = dev->dev_private; 1181 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1223,7 +1224,7 @@ static void intel_sdvo_mode_set(struct intel_encoder *intel_encoder)
1223 if (!intel_sdvo_set_target_input(intel_sdvo)) 1224 if (!intel_sdvo_set_target_input(intel_sdvo))
1224 return; 1225 return;
1225 1226
1226 if (intel_sdvo->has_hdmi_monitor) { 1227 if (crtc->config.has_hdmi_sink) {
1227 intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI); 1228 intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI);
1228 intel_sdvo_set_colorimetry(intel_sdvo, 1229 intel_sdvo_set_colorimetry(intel_sdvo,
1229 SDVO_COLORIMETRY_RGB256); 1230 SDVO_COLORIMETRY_RGB256);
@@ -1258,8 +1259,8 @@ static void intel_sdvo_mode_set(struct intel_encoder *intel_encoder)
1258 /* The real mode polarity is set by the SDVO commands, using 1259 /* The real mode polarity is set by the SDVO commands, using
1259 * struct intel_sdvo_dtd. */ 1260 * struct intel_sdvo_dtd. */
1260 sdvox = SDVO_VSYNC_ACTIVE_HIGH | SDVO_HSYNC_ACTIVE_HIGH; 1261 sdvox = SDVO_VSYNC_ACTIVE_HIGH | SDVO_HSYNC_ACTIVE_HIGH;
1261 if (!HAS_PCH_SPLIT(dev) && intel_sdvo->is_hdmi) 1262 if (!HAS_PCH_SPLIT(dev) && crtc->config.limited_color_range)
1262 sdvox |= intel_sdvo->color_range; 1263 sdvox |= HDMI_COLOR_RANGE_16_235;
1263 if (INTEL_INFO(dev)->gen < 5) 1264 if (INTEL_INFO(dev)->gen < 5)
1264 sdvox |= SDVO_BORDER_ENABLE; 1265 sdvox |= SDVO_BORDER_ENABLE;
1265 } else { 1266 } else {
@@ -1349,6 +1350,8 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
1349 u8 val; 1350 u8 val;
1350 bool ret; 1351 bool ret;
1351 1352
1353 sdvox = I915_READ(intel_sdvo->sdvo_reg);
1354
1352 ret = intel_sdvo_get_input_timing(intel_sdvo, &dtd); 1355 ret = intel_sdvo_get_input_timing(intel_sdvo, &dtd);
1353 if (!ret) { 1356 if (!ret) {
1354 /* Some sdvo encoders are not spec compliant and don't 1357 /* Some sdvo encoders are not spec compliant and don't
@@ -1377,7 +1380,6 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
1377 * other platfroms. 1380 * other platfroms.
1378 */ 1381 */
1379 if (IS_I915G(dev) || IS_I915GM(dev)) { 1382 if (IS_I915G(dev) || IS_I915GM(dev)) {
1380 sdvox = I915_READ(intel_sdvo->sdvo_reg);
1381 pipe_config->pixel_multiplier = 1383 pipe_config->pixel_multiplier =
1382 ((sdvox & SDVO_PORT_MULTIPLY_MASK) 1384 ((sdvox & SDVO_PORT_MULTIPLY_MASK)
1383 >> SDVO_PORT_MULTIPLY_SHIFT) + 1; 1385 >> SDVO_PORT_MULTIPLY_SHIFT) + 1;
@@ -1406,6 +1408,15 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
1406 } 1408 }
1407 } 1409 }
1408 1410
1411 if (sdvox & HDMI_COLOR_RANGE_16_235)
1412 pipe_config->limited_color_range = true;
1413
1414 if (intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ENCODE,
1415 &val, 1)) {
1416 if (val == SDVO_ENCODE_HDMI)
1417 pipe_config->has_hdmi_sink = true;
1418 }
1419
1409 WARN(encoder_pixel_multiplier != pipe_config->pixel_multiplier, 1420 WARN(encoder_pixel_multiplier != pipe_config->pixel_multiplier,
1410 "SDVO pixel multiplier mismatch, port: %i, encoder: %i\n", 1421 "SDVO pixel multiplier mismatch, port: %i, encoder: %i\n",
1411 pipe_config->pixel_multiplier, encoder_pixel_multiplier); 1422 pipe_config->pixel_multiplier, encoder_pixel_multiplier);
@@ -1732,7 +1743,7 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
1732 enum drm_connector_status ret; 1743 enum drm_connector_status ret;
1733 1744
1734 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", 1745 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
1735 connector->base.id, drm_get_connector_name(connector)); 1746 connector->base.id, connector->name);
1736 1747
1737 if (!intel_sdvo_get_value(intel_sdvo, 1748 if (!intel_sdvo_get_value(intel_sdvo,
1738 SDVO_CMD_GET_ATTACHED_DISPLAYS, 1749 SDVO_CMD_GET_ATTACHED_DISPLAYS,
@@ -1794,7 +1805,7 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
1794 struct edid *edid; 1805 struct edid *edid;
1795 1806
1796 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", 1807 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
1797 connector->base.id, drm_get_connector_name(connector)); 1808 connector->base.id, connector->name);
1798 1809
1799 /* set the bus switch and get the modes */ 1810 /* set the bus switch and get the modes */
1800 edid = intel_sdvo_get_edid(connector); 1811 edid = intel_sdvo_get_edid(connector);
@@ -1892,7 +1903,7 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
1892 int i; 1903 int i;
1893 1904
1894 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", 1905 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
1895 connector->base.id, drm_get_connector_name(connector)); 1906 connector->base.id, connector->name);
1896 1907
1897 /* Read the list of supported input resolutions for the selected TV 1908 /* Read the list of supported input resolutions for the selected TV
1898 * format. 1909 * format.
@@ -1929,7 +1940,7 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
1929 struct drm_display_mode *newmode; 1940 struct drm_display_mode *newmode;
1930 1941
1931 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", 1942 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
1932 connector->base.id, drm_get_connector_name(connector)); 1943 connector->base.id, connector->name);
1933 1944
1934 /* 1945 /*
1935 * Fetch modes from VBT. For SDVO prefer the VBT mode since some 1946 * Fetch modes from VBT. For SDVO prefer the VBT mode since some
@@ -2999,7 +3010,7 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
2999 3010
3000 intel_encoder->compute_config = intel_sdvo_compute_config; 3011 intel_encoder->compute_config = intel_sdvo_compute_config;
3001 intel_encoder->disable = intel_disable_sdvo; 3012 intel_encoder->disable = intel_disable_sdvo;
3002 intel_encoder->mode_set = intel_sdvo_mode_set; 3013 intel_encoder->pre_enable = intel_sdvo_pre_enable;
3003 intel_encoder->enable = intel_enable_sdvo; 3014 intel_encoder->enable = intel_enable_sdvo;
3004 intel_encoder->get_hw_state = intel_sdvo_get_hw_state; 3015 intel_encoder->get_hw_state = intel_sdvo_get_hw_state;
3005 intel_encoder->get_config = intel_sdvo_get_config; 3016 intel_encoder->get_config = intel_sdvo_get_config;
diff --git a/drivers/gpu/drm/i915/intel_sideband.c b/drivers/gpu/drm/i915/intel_sideband.c
index 0954f132726e..01d841ea3140 100644
--- a/drivers/gpu/drm/i915/intel_sideband.c
+++ b/drivers/gpu/drm/i915/intel_sideband.c
@@ -29,12 +29,21 @@
29 * IOSF sideband, see VLV2_SidebandMsg_HAS.docx and 29 * IOSF sideband, see VLV2_SidebandMsg_HAS.docx and
30 * VLV_VLV2_PUNIT_HAS_0.8.docx 30 * VLV_VLV2_PUNIT_HAS_0.8.docx
31 */ 31 */
32
33/* Standard MMIO read, non-posted */
34#define SB_MRD_NP 0x00
35/* Standard MMIO write, non-posted */
36#define SB_MWR_NP 0x01
37/* Private register read, double-word addressing, non-posted */
38#define SB_CRRDDA_NP 0x06
39/* Private register write, double-word addressing, non-posted */
40#define SB_CRWRDA_NP 0x07
41
32static int vlv_sideband_rw(struct drm_i915_private *dev_priv, u32 devfn, 42static int vlv_sideband_rw(struct drm_i915_private *dev_priv, u32 devfn,
33 u32 port, u32 opcode, u32 addr, u32 *val) 43 u32 port, u32 opcode, u32 addr, u32 *val)
34{ 44{
35 u32 cmd, be = 0xf, bar = 0; 45 u32 cmd, be = 0xf, bar = 0;
36 bool is_read = (opcode == PUNIT_OPCODE_REG_READ || 46 bool is_read = (opcode == SB_MRD_NP || opcode == SB_CRRDDA_NP);
37 opcode == DPIO_OPCODE_REG_READ);
38 47
39 cmd = (devfn << IOSF_DEVFN_SHIFT) | (opcode << IOSF_OPCODE_SHIFT) | 48 cmd = (devfn << IOSF_DEVFN_SHIFT) | (opcode << IOSF_OPCODE_SHIFT) |
40 (port << IOSF_PORT_SHIFT) | (be << IOSF_BYTE_ENABLES_SHIFT) | 49 (port << IOSF_PORT_SHIFT) | (be << IOSF_BYTE_ENABLES_SHIFT) |
@@ -74,7 +83,7 @@ u32 vlv_punit_read(struct drm_i915_private *dev_priv, u8 addr)
74 83
75 mutex_lock(&dev_priv->dpio_lock); 84 mutex_lock(&dev_priv->dpio_lock);
76 vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_PUNIT, 85 vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_PUNIT,
77 PUNIT_OPCODE_REG_READ, addr, &val); 86 SB_CRRDDA_NP, addr, &val);
78 mutex_unlock(&dev_priv->dpio_lock); 87 mutex_unlock(&dev_priv->dpio_lock);
79 88
80 return val; 89 return val;
@@ -86,7 +95,7 @@ void vlv_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val)
86 95
87 mutex_lock(&dev_priv->dpio_lock); 96 mutex_lock(&dev_priv->dpio_lock);
88 vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_PUNIT, 97 vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_PUNIT,
89 PUNIT_OPCODE_REG_WRITE, addr, &val); 98 SB_CRWRDA_NP, addr, &val);
90 mutex_unlock(&dev_priv->dpio_lock); 99 mutex_unlock(&dev_priv->dpio_lock);
91} 100}
92 101
@@ -95,7 +104,7 @@ u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg)
95 u32 val = 0; 104 u32 val = 0;
96 105
97 vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_BUNIT, 106 vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_BUNIT,
98 PUNIT_OPCODE_REG_READ, reg, &val); 107 SB_CRRDDA_NP, reg, &val);
99 108
100 return val; 109 return val;
101} 110}
@@ -103,7 +112,7 @@ u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg)
103void vlv_bunit_write(struct drm_i915_private *dev_priv, u32 reg, u32 val) 112void vlv_bunit_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
104{ 113{
105 vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_BUNIT, 114 vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_BUNIT,
106 PUNIT_OPCODE_REG_WRITE, reg, &val); 115 SB_CRWRDA_NP, reg, &val);
107} 116}
108 117
109u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr) 118u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr)
@@ -114,7 +123,7 @@ u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr)
114 123
115 mutex_lock(&dev_priv->dpio_lock); 124 mutex_lock(&dev_priv->dpio_lock);
116 vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_NC, 125 vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_NC,
117 PUNIT_OPCODE_REG_READ, addr, &val); 126 SB_CRRDDA_NP, addr, &val);
118 mutex_unlock(&dev_priv->dpio_lock); 127 mutex_unlock(&dev_priv->dpio_lock);
119 128
120 return val; 129 return val;
@@ -124,56 +133,56 @@ u32 vlv_gpio_nc_read(struct drm_i915_private *dev_priv, u32 reg)
124{ 133{
125 u32 val = 0; 134 u32 val = 0;
126 vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPIO_NC, 135 vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPIO_NC,
127 PUNIT_OPCODE_REG_READ, reg, &val); 136 SB_CRRDDA_NP, reg, &val);
128 return val; 137 return val;
129} 138}
130 139
131void vlv_gpio_nc_write(struct drm_i915_private *dev_priv, u32 reg, u32 val) 140void vlv_gpio_nc_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
132{ 141{
133 vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPIO_NC, 142 vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPIO_NC,
134 PUNIT_OPCODE_REG_WRITE, reg, &val); 143 SB_CRWRDA_NP, reg, &val);
135} 144}
136 145
137u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg) 146u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg)
138{ 147{
139 u32 val = 0; 148 u32 val = 0;
140 vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCK, 149 vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCK,
141 PUNIT_OPCODE_REG_READ, reg, &val); 150 SB_CRRDDA_NP, reg, &val);
142 return val; 151 return val;
143} 152}
144 153
145void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val) 154void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
146{ 155{
147 vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCK, 156 vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCK,
148 PUNIT_OPCODE_REG_WRITE, reg, &val); 157 SB_CRWRDA_NP, reg, &val);
149} 158}
150 159
151u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg) 160u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg)
152{ 161{
153 u32 val = 0; 162 u32 val = 0;
154 vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCU, 163 vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCU,
155 PUNIT_OPCODE_REG_READ, reg, &val); 164 SB_CRRDDA_NP, reg, &val);
156 return val; 165 return val;
157} 166}
158 167
159void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val) 168void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
160{ 169{
161 vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCU, 170 vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCU,
162 PUNIT_OPCODE_REG_WRITE, reg, &val); 171 SB_CRWRDA_NP, reg, &val);
163} 172}
164 173
165u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg) 174u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg)
166{ 175{
167 u32 val = 0; 176 u32 val = 0;
168 vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPS_CORE, 177 vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPS_CORE,
169 PUNIT_OPCODE_REG_READ, reg, &val); 178 SB_CRRDDA_NP, reg, &val);
170 return val; 179 return val;
171} 180}
172 181
173void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val) 182void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
174{ 183{
175 vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPS_CORE, 184 vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPS_CORE,
176 PUNIT_OPCODE_REG_WRITE, reg, &val); 185 SB_CRWRDA_NP, reg, &val);
177} 186}
178 187
179u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg) 188u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg)
@@ -181,14 +190,22 @@ u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg)
181 u32 val = 0; 190 u32 val = 0;
182 191
183 vlv_sideband_rw(dev_priv, DPIO_DEVFN, DPIO_PHY_IOSF_PORT(DPIO_PHY(pipe)), 192 vlv_sideband_rw(dev_priv, DPIO_DEVFN, DPIO_PHY_IOSF_PORT(DPIO_PHY(pipe)),
184 DPIO_OPCODE_REG_READ, reg, &val); 193 SB_MRD_NP, reg, &val);
194
195 /*
196 * FIXME: There might be some registers where all 1's is a valid value,
197 * so ideally we should check the register offset instead...
198 */
199 WARN(val == 0xffffffff, "DPIO read pipe %c reg 0x%x == 0x%x\n",
200 pipe_name(pipe), reg, val);
201
185 return val; 202 return val;
186} 203}
187 204
188void vlv_dpio_write(struct drm_i915_private *dev_priv, enum pipe pipe, int reg, u32 val) 205void vlv_dpio_write(struct drm_i915_private *dev_priv, enum pipe pipe, int reg, u32 val)
189{ 206{
190 vlv_sideband_rw(dev_priv, DPIO_DEVFN, DPIO_PHY_IOSF_PORT(DPIO_PHY(pipe)), 207 vlv_sideband_rw(dev_priv, DPIO_DEVFN, DPIO_PHY_IOSF_PORT(DPIO_PHY(pipe)),
191 DPIO_OPCODE_REG_WRITE, reg, &val); 208 SB_MWR_NP, reg, &val);
192} 209}
193 210
194/* SBI access */ 211/* SBI access */
@@ -253,13 +270,13 @@ void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
253u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg) 270u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg)
254{ 271{
255 u32 val = 0; 272 u32 val = 0;
256 vlv_sideband_rw(dev_priv, DPIO_DEVFN, IOSF_PORT_FLISDSI, 273 vlv_sideband_rw(dev_priv, DPIO_DEVFN, IOSF_PORT_FLISDSI, SB_CRRDDA_NP,
257 DPIO_OPCODE_REG_READ, reg, &val); 274 reg, &val);
258 return val; 275 return val;
259} 276}
260 277
261void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val) 278void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
262{ 279{
263 vlv_sideband_rw(dev_priv, DPIO_DEVFN, IOSF_PORT_FLISDSI, 280 vlv_sideband_rw(dev_priv, DPIO_DEVFN, IOSF_PORT_FLISDSI, SB_CRWRDA_NP,
264 DPIO_OPCODE_REG_WRITE, reg, &val); 281 reg, &val);
265} 282}
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 336ae6c602f2..1b66ddcdfb33 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -37,6 +37,106 @@
37#include <drm/i915_drm.h> 37#include <drm/i915_drm.h>
38#include "i915_drv.h" 38#include "i915_drv.h"
39 39
40static int usecs_to_scanlines(const struct drm_display_mode *mode, int usecs)
41{
42 /* paranoia */
43 if (!mode->crtc_htotal)
44 return 1;
45
46 return DIV_ROUND_UP(usecs * mode->crtc_clock, 1000 * mode->crtc_htotal);
47}
48
49static bool intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl_count)
50{
51 struct drm_device *dev = crtc->base.dev;
52 const struct drm_display_mode *mode = &crtc->config.adjusted_mode;
53 enum pipe pipe = crtc->pipe;
54 long timeout = msecs_to_jiffies_timeout(1);
55 int scanline, min, max, vblank_start;
56 DEFINE_WAIT(wait);
57
58 WARN_ON(!drm_modeset_is_locked(&crtc->base.mutex));
59
60 vblank_start = mode->crtc_vblank_start;
61 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
62 vblank_start = DIV_ROUND_UP(vblank_start, 2);
63
64 /* FIXME needs to be calibrated sensibly */
65 min = vblank_start - usecs_to_scanlines(mode, 100);
66 max = vblank_start - 1;
67
68 if (min <= 0 || max <= 0)
69 return false;
70
71 if (WARN_ON(drm_vblank_get(dev, pipe)))
72 return false;
73
74 local_irq_disable();
75
76 trace_i915_pipe_update_start(crtc, min, max);
77
78 for (;;) {
79 /*
80 * prepare_to_wait() has a memory barrier, which guarantees
81 * other CPUs can see the task state update by the time we
82 * read the scanline.
83 */
84 prepare_to_wait(&crtc->vbl_wait, &wait, TASK_UNINTERRUPTIBLE);
85
86 scanline = intel_get_crtc_scanline(crtc);
87 if (scanline < min || scanline > max)
88 break;
89
90 if (timeout <= 0) {
91 DRM_ERROR("Potential atomic update failure on pipe %c\n",
92 pipe_name(crtc->pipe));
93 break;
94 }
95
96 local_irq_enable();
97
98 timeout = schedule_timeout(timeout);
99
100 local_irq_disable();
101 }
102
103 finish_wait(&crtc->vbl_wait, &wait);
104
105 drm_vblank_put(dev, pipe);
106
107 *start_vbl_count = dev->driver->get_vblank_counter(dev, pipe);
108
109 trace_i915_pipe_update_vblank_evaded(crtc, min, max, *start_vbl_count);
110
111 return true;
112}
113
114static void intel_pipe_update_end(struct intel_crtc *crtc, u32 start_vbl_count)
115{
116 struct drm_device *dev = crtc->base.dev;
117 enum pipe pipe = crtc->pipe;
118 u32 end_vbl_count = dev->driver->get_vblank_counter(dev, pipe);
119
120 trace_i915_pipe_update_end(crtc, end_vbl_count);
121
122 local_irq_enable();
123
124 if (start_vbl_count != end_vbl_count)
125 DRM_ERROR("Atomic update failure on pipe %c (start=%u end=%u)\n",
126 pipe_name(pipe), start_vbl_count, end_vbl_count);
127}
128
129static void intel_update_primary_plane(struct intel_crtc *crtc)
130{
131 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
132 int reg = DSPCNTR(crtc->plane);
133
134 if (crtc->primary_enabled)
135 I915_WRITE(reg, I915_READ(reg) | DISPLAY_PLANE_ENABLE);
136 else
137 I915_WRITE(reg, I915_READ(reg) & ~DISPLAY_PLANE_ENABLE);
138}
139
40static void 140static void
41vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc, 141vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
42 struct drm_framebuffer *fb, 142 struct drm_framebuffer *fb,
@@ -48,11 +148,14 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
48 struct drm_device *dev = dplane->dev; 148 struct drm_device *dev = dplane->dev;
49 struct drm_i915_private *dev_priv = dev->dev_private; 149 struct drm_i915_private *dev_priv = dev->dev_private;
50 struct intel_plane *intel_plane = to_intel_plane(dplane); 150 struct intel_plane *intel_plane = to_intel_plane(dplane);
151 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
51 int pipe = intel_plane->pipe; 152 int pipe = intel_plane->pipe;
52 int plane = intel_plane->plane; 153 int plane = intel_plane->plane;
53 u32 sprctl; 154 u32 sprctl;
54 unsigned long sprsurf_offset, linear_offset; 155 unsigned long sprsurf_offset, linear_offset;
55 int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0); 156 int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
157 u32 start_vbl_count;
158 bool atomic_update;
56 159
57 sprctl = I915_READ(SPCNTR(pipe, plane)); 160 sprctl = I915_READ(SPCNTR(pipe, plane));
58 161
@@ -131,6 +234,10 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
131 fb->pitches[0]); 234 fb->pitches[0]);
132 linear_offset -= sprsurf_offset; 235 linear_offset -= sprsurf_offset;
133 236
237 atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count);
238
239 intel_update_primary_plane(intel_crtc);
240
134 I915_WRITE(SPSTRIDE(pipe, plane), fb->pitches[0]); 241 I915_WRITE(SPSTRIDE(pipe, plane), fb->pitches[0]);
135 I915_WRITE(SPPOS(pipe, plane), (crtc_y << 16) | crtc_x); 242 I915_WRITE(SPPOS(pipe, plane), (crtc_y << 16) | crtc_x);
136 243
@@ -143,7 +250,11 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
143 I915_WRITE(SPCNTR(pipe, plane), sprctl); 250 I915_WRITE(SPCNTR(pipe, plane), sprctl);
144 I915_WRITE(SPSURF(pipe, plane), i915_gem_obj_ggtt_offset(obj) + 251 I915_WRITE(SPSURF(pipe, plane), i915_gem_obj_ggtt_offset(obj) +
145 sprsurf_offset); 252 sprsurf_offset);
146 POSTING_READ(SPSURF(pipe, plane)); 253
254 intel_flush_primary_plane(dev_priv, intel_crtc->plane);
255
256 if (atomic_update)
257 intel_pipe_update_end(intel_crtc, start_vbl_count);
147} 258}
148 259
149static void 260static void
@@ -152,14 +263,25 @@ vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
152 struct drm_device *dev = dplane->dev; 263 struct drm_device *dev = dplane->dev;
153 struct drm_i915_private *dev_priv = dev->dev_private; 264 struct drm_i915_private *dev_priv = dev->dev_private;
154 struct intel_plane *intel_plane = to_intel_plane(dplane); 265 struct intel_plane *intel_plane = to_intel_plane(dplane);
266 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
155 int pipe = intel_plane->pipe; 267 int pipe = intel_plane->pipe;
156 int plane = intel_plane->plane; 268 int plane = intel_plane->plane;
269 u32 start_vbl_count;
270 bool atomic_update;
271
272 atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count);
273
274 intel_update_primary_plane(intel_crtc);
157 275
158 I915_WRITE(SPCNTR(pipe, plane), I915_READ(SPCNTR(pipe, plane)) & 276 I915_WRITE(SPCNTR(pipe, plane), I915_READ(SPCNTR(pipe, plane)) &
159 ~SP_ENABLE); 277 ~SP_ENABLE);
160 /* Activate double buffered register update */ 278 /* Activate double buffered register update */
161 I915_WRITE(SPSURF(pipe, plane), 0); 279 I915_WRITE(SPSURF(pipe, plane), 0);
162 POSTING_READ(SPSURF(pipe, plane)); 280
281 intel_flush_primary_plane(dev_priv, intel_crtc->plane);
282
283 if (atomic_update)
284 intel_pipe_update_end(intel_crtc, start_vbl_count);
163 285
164 intel_update_sprite_watermarks(dplane, crtc, 0, 0, false, false); 286 intel_update_sprite_watermarks(dplane, crtc, 0, 0, false, false);
165} 287}
@@ -226,10 +348,13 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
226 struct drm_device *dev = plane->dev; 348 struct drm_device *dev = plane->dev;
227 struct drm_i915_private *dev_priv = dev->dev_private; 349 struct drm_i915_private *dev_priv = dev->dev_private;
228 struct intel_plane *intel_plane = to_intel_plane(plane); 350 struct intel_plane *intel_plane = to_intel_plane(plane);
351 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
229 int pipe = intel_plane->pipe; 352 int pipe = intel_plane->pipe;
230 u32 sprctl, sprscale = 0; 353 u32 sprctl, sprscale = 0;
231 unsigned long sprsurf_offset, linear_offset; 354 unsigned long sprsurf_offset, linear_offset;
232 int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0); 355 int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
356 u32 start_vbl_count;
357 bool atomic_update;
233 358
234 sprctl = I915_READ(SPRCTL(pipe)); 359 sprctl = I915_READ(SPRCTL(pipe));
235 360
@@ -299,6 +424,10 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
299 pixel_size, fb->pitches[0]); 424 pixel_size, fb->pitches[0]);
300 linear_offset -= sprsurf_offset; 425 linear_offset -= sprsurf_offset;
301 426
427 atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count);
428
429 intel_update_primary_plane(intel_crtc);
430
302 I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]); 431 I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]);
303 I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x); 432 I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x);
304 433
@@ -317,7 +446,11 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
317 I915_WRITE(SPRCTL(pipe), sprctl); 446 I915_WRITE(SPRCTL(pipe), sprctl);
318 I915_WRITE(SPRSURF(pipe), 447 I915_WRITE(SPRSURF(pipe),
319 i915_gem_obj_ggtt_offset(obj) + sprsurf_offset); 448 i915_gem_obj_ggtt_offset(obj) + sprsurf_offset);
320 POSTING_READ(SPRSURF(pipe)); 449
450 intel_flush_primary_plane(dev_priv, intel_crtc->plane);
451
452 if (atomic_update)
453 intel_pipe_update_end(intel_crtc, start_vbl_count);
321} 454}
322 455
323static void 456static void
@@ -326,7 +459,14 @@ ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
326 struct drm_device *dev = plane->dev; 459 struct drm_device *dev = plane->dev;
327 struct drm_i915_private *dev_priv = dev->dev_private; 460 struct drm_i915_private *dev_priv = dev->dev_private;
328 struct intel_plane *intel_plane = to_intel_plane(plane); 461 struct intel_plane *intel_plane = to_intel_plane(plane);
462 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
329 int pipe = intel_plane->pipe; 463 int pipe = intel_plane->pipe;
464 u32 start_vbl_count;
465 bool atomic_update;
466
467 atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count);
468
469 intel_update_primary_plane(intel_crtc);
330 470
331 I915_WRITE(SPRCTL(pipe), I915_READ(SPRCTL(pipe)) & ~SPRITE_ENABLE); 471 I915_WRITE(SPRCTL(pipe), I915_READ(SPRCTL(pipe)) & ~SPRITE_ENABLE);
332 /* Can't leave the scaler enabled... */ 472 /* Can't leave the scaler enabled... */
@@ -334,7 +474,11 @@ ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
334 I915_WRITE(SPRSCALE(pipe), 0); 474 I915_WRITE(SPRSCALE(pipe), 0);
335 /* Activate double buffered register update */ 475 /* Activate double buffered register update */
336 I915_WRITE(SPRSURF(pipe), 0); 476 I915_WRITE(SPRSURF(pipe), 0);
337 POSTING_READ(SPRSURF(pipe)); 477
478 intel_flush_primary_plane(dev_priv, intel_crtc->plane);
479
480 if (atomic_update)
481 intel_pipe_update_end(intel_crtc, start_vbl_count);
338 482
339 /* 483 /*
340 * Avoid underruns when disabling the sprite. 484 * Avoid underruns when disabling the sprite.
@@ -410,10 +554,13 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
410 struct drm_device *dev = plane->dev; 554 struct drm_device *dev = plane->dev;
411 struct drm_i915_private *dev_priv = dev->dev_private; 555 struct drm_i915_private *dev_priv = dev->dev_private;
412 struct intel_plane *intel_plane = to_intel_plane(plane); 556 struct intel_plane *intel_plane = to_intel_plane(plane);
557 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
413 int pipe = intel_plane->pipe; 558 int pipe = intel_plane->pipe;
414 unsigned long dvssurf_offset, linear_offset; 559 unsigned long dvssurf_offset, linear_offset;
415 u32 dvscntr, dvsscale; 560 u32 dvscntr, dvsscale;
416 int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0); 561 int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
562 u32 start_vbl_count;
563 bool atomic_update;
417 564
418 dvscntr = I915_READ(DVSCNTR(pipe)); 565 dvscntr = I915_READ(DVSCNTR(pipe));
419 566
@@ -478,6 +625,10 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
478 pixel_size, fb->pitches[0]); 625 pixel_size, fb->pitches[0]);
479 linear_offset -= dvssurf_offset; 626 linear_offset -= dvssurf_offset;
480 627
628 atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count);
629
630 intel_update_primary_plane(intel_crtc);
631
481 I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]); 632 I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]);
482 I915_WRITE(DVSPOS(pipe), (crtc_y << 16) | crtc_x); 633 I915_WRITE(DVSPOS(pipe), (crtc_y << 16) | crtc_x);
483 634
@@ -491,7 +642,11 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
491 I915_WRITE(DVSCNTR(pipe), dvscntr); 642 I915_WRITE(DVSCNTR(pipe), dvscntr);
492 I915_WRITE(DVSSURF(pipe), 643 I915_WRITE(DVSSURF(pipe),
493 i915_gem_obj_ggtt_offset(obj) + dvssurf_offset); 644 i915_gem_obj_ggtt_offset(obj) + dvssurf_offset);
494 POSTING_READ(DVSSURF(pipe)); 645
646 intel_flush_primary_plane(dev_priv, intel_crtc->plane);
647
648 if (atomic_update)
649 intel_pipe_update_end(intel_crtc, start_vbl_count);
495} 650}
496 651
497static void 652static void
@@ -500,14 +655,25 @@ ilk_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
500 struct drm_device *dev = plane->dev; 655 struct drm_device *dev = plane->dev;
501 struct drm_i915_private *dev_priv = dev->dev_private; 656 struct drm_i915_private *dev_priv = dev->dev_private;
502 struct intel_plane *intel_plane = to_intel_plane(plane); 657 struct intel_plane *intel_plane = to_intel_plane(plane);
658 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
503 int pipe = intel_plane->pipe; 659 int pipe = intel_plane->pipe;
660 u32 start_vbl_count;
661 bool atomic_update;
662
663 atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count);
664
665 intel_update_primary_plane(intel_crtc);
504 666
505 I915_WRITE(DVSCNTR(pipe), I915_READ(DVSCNTR(pipe)) & ~DVS_ENABLE); 667 I915_WRITE(DVSCNTR(pipe), I915_READ(DVSCNTR(pipe)) & ~DVS_ENABLE);
506 /* Disable the scaler */ 668 /* Disable the scaler */
507 I915_WRITE(DVSSCALE(pipe), 0); 669 I915_WRITE(DVSSCALE(pipe), 0);
508 /* Flush double buffered register updates */ 670 /* Flush double buffered register updates */
509 I915_WRITE(DVSSURF(pipe), 0); 671 I915_WRITE(DVSSURF(pipe), 0);
510 POSTING_READ(DVSSURF(pipe)); 672
673 intel_flush_primary_plane(dev_priv, intel_crtc->plane);
674
675 if (atomic_update)
676 intel_pipe_update_end(intel_crtc, start_vbl_count);
511 677
512 /* 678 /*
513 * Avoid underruns when disabling the sprite. 679 * Avoid underruns when disabling the sprite.
@@ -519,20 +685,10 @@ ilk_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
519} 685}
520 686
521static void 687static void
522intel_enable_primary(struct drm_crtc *crtc) 688intel_post_enable_primary(struct drm_crtc *crtc)
523{ 689{
524 struct drm_device *dev = crtc->dev; 690 struct drm_device *dev = crtc->dev;
525 struct drm_i915_private *dev_priv = dev->dev_private;
526 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 691 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
527 int reg = DSPCNTR(intel_crtc->plane);
528
529 if (intel_crtc->primary_enabled)
530 return;
531
532 intel_crtc->primary_enabled = true;
533
534 I915_WRITE(reg, I915_READ(reg) | DISPLAY_PLANE_ENABLE);
535 intel_flush_primary_plane(dev_priv, intel_crtc->plane);
536 692
537 /* 693 /*
538 * FIXME IPS should be fine as long as one plane is 694 * FIXME IPS should be fine as long as one plane is
@@ -540,10 +696,7 @@ intel_enable_primary(struct drm_crtc *crtc)
540 * when going from primary only to sprite only and vice 696 * when going from primary only to sprite only and vice
541 * versa. 697 * versa.
542 */ 698 */
543 if (intel_crtc->config.ips_enabled) { 699 hsw_enable_ips(intel_crtc);
544 intel_wait_for_vblank(dev, intel_crtc->pipe);
545 hsw_enable_ips(intel_crtc);
546 }
547 700
548 mutex_lock(&dev->struct_mutex); 701 mutex_lock(&dev->struct_mutex);
549 intel_update_fbc(dev); 702 intel_update_fbc(dev);
@@ -551,17 +704,11 @@ intel_enable_primary(struct drm_crtc *crtc)
551} 704}
552 705
553static void 706static void
554intel_disable_primary(struct drm_crtc *crtc) 707intel_pre_disable_primary(struct drm_crtc *crtc)
555{ 708{
556 struct drm_device *dev = crtc->dev; 709 struct drm_device *dev = crtc->dev;
557 struct drm_i915_private *dev_priv = dev->dev_private; 710 struct drm_i915_private *dev_priv = dev->dev_private;
558 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 711 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
559 int reg = DSPCNTR(intel_crtc->plane);
560
561 if (!intel_crtc->primary_enabled)
562 return;
563
564 intel_crtc->primary_enabled = false;
565 712
566 mutex_lock(&dev->struct_mutex); 713 mutex_lock(&dev->struct_mutex);
567 if (dev_priv->fbc.plane == intel_crtc->plane) 714 if (dev_priv->fbc.plane == intel_crtc->plane)
@@ -575,9 +722,6 @@ intel_disable_primary(struct drm_crtc *crtc)
575 * versa. 722 * versa.
576 */ 723 */
577 hsw_disable_ips(intel_crtc); 724 hsw_disable_ips(intel_crtc);
578
579 I915_WRITE(reg, I915_READ(reg) & ~DISPLAY_PLANE_ENABLE);
580 intel_flush_primary_plane(dev_priv, intel_crtc->plane);
581} 725}
582 726
583static int 727static int
@@ -671,7 +815,7 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
671 struct drm_i915_gem_object *obj = intel_fb->obj; 815 struct drm_i915_gem_object *obj = intel_fb->obj;
672 struct drm_i915_gem_object *old_obj = intel_plane->obj; 816 struct drm_i915_gem_object *old_obj = intel_plane->obj;
673 int ret; 817 int ret;
674 bool disable_primary = false; 818 bool primary_enabled;
675 bool visible; 819 bool visible;
676 int hscale, vscale; 820 int hscale, vscale;
677 int max_scale, min_scale; 821 int max_scale, min_scale;
@@ -842,8 +986,8 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
842 * If the sprite is completely covering the primary plane, 986 * If the sprite is completely covering the primary plane,
843 * we can disable the primary and save power. 987 * we can disable the primary and save power.
844 */ 988 */
845 disable_primary = drm_rect_equals(&dst, &clip) && !colorkey_enabled(intel_plane); 989 primary_enabled = !drm_rect_equals(&dst, &clip) || colorkey_enabled(intel_plane);
846 WARN_ON(disable_primary && !visible && intel_crtc->active); 990 WARN_ON(!primary_enabled && !visible && intel_crtc->active);
847 991
848 mutex_lock(&dev->struct_mutex); 992 mutex_lock(&dev->struct_mutex);
849 993
@@ -870,12 +1014,15 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
870 intel_plane->obj = obj; 1014 intel_plane->obj = obj;
871 1015
872 if (intel_crtc->active) { 1016 if (intel_crtc->active) {
873 /* 1017 bool primary_was_enabled = intel_crtc->primary_enabled;
874 * Be sure to re-enable the primary before the sprite is no longer 1018
875 * covering it fully. 1019 intel_crtc->primary_enabled = primary_enabled;
876 */ 1020
877 if (!disable_primary) 1021 if (primary_was_enabled != primary_enabled)
878 intel_enable_primary(crtc); 1022 intel_crtc_wait_for_pending_flips(crtc);
1023
1024 if (primary_was_enabled && !primary_enabled)
1025 intel_pre_disable_primary(crtc);
879 1026
880 if (visible) 1027 if (visible)
881 intel_plane->update_plane(plane, crtc, fb, obj, 1028 intel_plane->update_plane(plane, crtc, fb, obj,
@@ -884,8 +1031,8 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
884 else 1031 else
885 intel_plane->disable_plane(plane, crtc); 1032 intel_plane->disable_plane(plane, crtc);
886 1033
887 if (disable_primary) 1034 if (!primary_was_enabled && primary_enabled)
888 intel_disable_primary(crtc); 1035 intel_post_enable_primary(crtc);
889 } 1036 }
890 1037
891 /* Unpin old obj after new one is active to avoid ugliness */ 1038 /* Unpin old obj after new one is active to avoid ugliness */
@@ -923,8 +1070,14 @@ intel_disable_plane(struct drm_plane *plane)
923 intel_crtc = to_intel_crtc(plane->crtc); 1070 intel_crtc = to_intel_crtc(plane->crtc);
924 1071
925 if (intel_crtc->active) { 1072 if (intel_crtc->active) {
926 intel_enable_primary(plane->crtc); 1073 bool primary_was_enabled = intel_crtc->primary_enabled;
1074
1075 intel_crtc->primary_enabled = true;
1076
927 intel_plane->disable_plane(plane, plane->crtc); 1077 intel_plane->disable_plane(plane, plane->crtc);
1078
1079 if (!primary_was_enabled && intel_crtc->primary_enabled)
1080 intel_post_enable_primary(plane->crtc);
928 } 1081 }
929 1082
930 if (intel_plane->obj) { 1083 if (intel_plane->obj) {
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index bafe92e317d5..67c6c9a2eb1c 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -934,7 +934,86 @@ intel_tv_compute_config(struct intel_encoder *encoder,
934 return true; 934 return true;
935} 935}
936 936
937static void intel_tv_mode_set(struct intel_encoder *encoder) 937static void
938set_tv_mode_timings(struct drm_i915_private *dev_priv,
939 const struct tv_mode *tv_mode,
940 bool burst_ena)
941{
942 u32 hctl1, hctl2, hctl3;
943 u32 vctl1, vctl2, vctl3, vctl4, vctl5, vctl6, vctl7;
944
945 hctl1 = (tv_mode->hsync_end << TV_HSYNC_END_SHIFT) |
946 (tv_mode->htotal << TV_HTOTAL_SHIFT);
947
948 hctl2 = (tv_mode->hburst_start << 16) |
949 (tv_mode->hburst_len << TV_HBURST_LEN_SHIFT);
950
951 if (burst_ena)
952 hctl2 |= TV_BURST_ENA;
953
954 hctl3 = (tv_mode->hblank_start << TV_HBLANK_START_SHIFT) |
955 (tv_mode->hblank_end << TV_HBLANK_END_SHIFT);
956
957 vctl1 = (tv_mode->nbr_end << TV_NBR_END_SHIFT) |
958 (tv_mode->vi_end_f1 << TV_VI_END_F1_SHIFT) |
959 (tv_mode->vi_end_f2 << TV_VI_END_F2_SHIFT);
960
961 vctl2 = (tv_mode->vsync_len << TV_VSYNC_LEN_SHIFT) |
962 (tv_mode->vsync_start_f1 << TV_VSYNC_START_F1_SHIFT) |
963 (tv_mode->vsync_start_f2 << TV_VSYNC_START_F2_SHIFT);
964
965 vctl3 = (tv_mode->veq_len << TV_VEQ_LEN_SHIFT) |
966 (tv_mode->veq_start_f1 << TV_VEQ_START_F1_SHIFT) |
967 (tv_mode->veq_start_f2 << TV_VEQ_START_F2_SHIFT);
968
969 if (tv_mode->veq_ena)
970 vctl3 |= TV_EQUAL_ENA;
971
972 vctl4 = (tv_mode->vburst_start_f1 << TV_VBURST_START_F1_SHIFT) |
973 (tv_mode->vburst_end_f1 << TV_VBURST_END_F1_SHIFT);
974
975 vctl5 = (tv_mode->vburst_start_f2 << TV_VBURST_START_F2_SHIFT) |
976 (tv_mode->vburst_end_f2 << TV_VBURST_END_F2_SHIFT);
977
978 vctl6 = (tv_mode->vburst_start_f3 << TV_VBURST_START_F3_SHIFT) |
979 (tv_mode->vburst_end_f3 << TV_VBURST_END_F3_SHIFT);
980
981 vctl7 = (tv_mode->vburst_start_f4 << TV_VBURST_START_F4_SHIFT) |
982 (tv_mode->vburst_end_f4 << TV_VBURST_END_F4_SHIFT);
983
984 I915_WRITE(TV_H_CTL_1, hctl1);
985 I915_WRITE(TV_H_CTL_2, hctl2);
986 I915_WRITE(TV_H_CTL_3, hctl3);
987 I915_WRITE(TV_V_CTL_1, vctl1);
988 I915_WRITE(TV_V_CTL_2, vctl2);
989 I915_WRITE(TV_V_CTL_3, vctl3);
990 I915_WRITE(TV_V_CTL_4, vctl4);
991 I915_WRITE(TV_V_CTL_5, vctl5);
992 I915_WRITE(TV_V_CTL_6, vctl6);
993 I915_WRITE(TV_V_CTL_7, vctl7);
994}
995
996static void set_color_conversion(struct drm_i915_private *dev_priv,
997 const struct color_conversion *color_conversion)
998{
999 if (!color_conversion)
1000 return;
1001
1002 I915_WRITE(TV_CSC_Y, (color_conversion->ry << 16) |
1003 color_conversion->gy);
1004 I915_WRITE(TV_CSC_Y2, (color_conversion->by << 16) |
1005 color_conversion->ay);
1006 I915_WRITE(TV_CSC_U, (color_conversion->ru << 16) |
1007 color_conversion->gu);
1008 I915_WRITE(TV_CSC_U2, (color_conversion->bu << 16) |
1009 color_conversion->au);
1010 I915_WRITE(TV_CSC_V, (color_conversion->rv << 16) |
1011 color_conversion->gv);
1012 I915_WRITE(TV_CSC_V2, (color_conversion->bv << 16) |
1013 color_conversion->av);
1014}
1015
1016static void intel_tv_pre_enable(struct intel_encoder *encoder)
938{ 1017{
939 struct drm_device *dev = encoder->base.dev; 1018 struct drm_device *dev = encoder->base.dev;
940 struct drm_i915_private *dev_priv = dev->dev_private; 1019 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -942,14 +1021,13 @@ static void intel_tv_mode_set(struct intel_encoder *encoder)
942 struct intel_tv *intel_tv = enc_to_tv(encoder); 1021 struct intel_tv *intel_tv = enc_to_tv(encoder);
943 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv); 1022 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
944 u32 tv_ctl; 1023 u32 tv_ctl;
945 u32 hctl1, hctl2, hctl3;
946 u32 vctl1, vctl2, vctl3, vctl4, vctl5, vctl6, vctl7;
947 u32 scctl1, scctl2, scctl3; 1024 u32 scctl1, scctl2, scctl3;
948 int i, j; 1025 int i, j;
949 const struct video_levels *video_levels; 1026 const struct video_levels *video_levels;
950 const struct color_conversion *color_conversion; 1027 const struct color_conversion *color_conversion;
951 bool burst_ena; 1028 bool burst_ena;
952 int pipe = intel_crtc->pipe; 1029 int xpos = 0x0, ypos = 0x0;
1030 unsigned int xsize, ysize;
953 1031
954 if (!tv_mode) 1032 if (!tv_mode)
955 return; /* can't happen (mode_prepare prevents this) */ 1033 return; /* can't happen (mode_prepare prevents this) */
@@ -982,44 +1060,6 @@ static void intel_tv_mode_set(struct intel_encoder *encoder)
982 burst_ena = tv_mode->burst_ena; 1060 burst_ena = tv_mode->burst_ena;
983 break; 1061 break;
984 } 1062 }
985 hctl1 = (tv_mode->hsync_end << TV_HSYNC_END_SHIFT) |
986 (tv_mode->htotal << TV_HTOTAL_SHIFT);
987
988 hctl2 = (tv_mode->hburst_start << 16) |
989 (tv_mode->hburst_len << TV_HBURST_LEN_SHIFT);
990
991 if (burst_ena)
992 hctl2 |= TV_BURST_ENA;
993
994 hctl3 = (tv_mode->hblank_start << TV_HBLANK_START_SHIFT) |
995 (tv_mode->hblank_end << TV_HBLANK_END_SHIFT);
996
997 vctl1 = (tv_mode->nbr_end << TV_NBR_END_SHIFT) |
998 (tv_mode->vi_end_f1 << TV_VI_END_F1_SHIFT) |
999 (tv_mode->vi_end_f2 << TV_VI_END_F2_SHIFT);
1000
1001 vctl2 = (tv_mode->vsync_len << TV_VSYNC_LEN_SHIFT) |
1002 (tv_mode->vsync_start_f1 << TV_VSYNC_START_F1_SHIFT) |
1003 (tv_mode->vsync_start_f2 << TV_VSYNC_START_F2_SHIFT);
1004
1005 vctl3 = (tv_mode->veq_len << TV_VEQ_LEN_SHIFT) |
1006 (tv_mode->veq_start_f1 << TV_VEQ_START_F1_SHIFT) |
1007 (tv_mode->veq_start_f2 << TV_VEQ_START_F2_SHIFT);
1008
1009 if (tv_mode->veq_ena)
1010 vctl3 |= TV_EQUAL_ENA;
1011
1012 vctl4 = (tv_mode->vburst_start_f1 << TV_VBURST_START_F1_SHIFT) |
1013 (tv_mode->vburst_end_f1 << TV_VBURST_END_F1_SHIFT);
1014
1015 vctl5 = (tv_mode->vburst_start_f2 << TV_VBURST_START_F2_SHIFT) |
1016 (tv_mode->vburst_end_f2 << TV_VBURST_END_F2_SHIFT);
1017
1018 vctl6 = (tv_mode->vburst_start_f3 << TV_VBURST_START_F3_SHIFT) |
1019 (tv_mode->vburst_end_f3 << TV_VBURST_END_F3_SHIFT);
1020
1021 vctl7 = (tv_mode->vburst_start_f4 << TV_VBURST_START_F4_SHIFT) |
1022 (tv_mode->vburst_end_f4 << TV_VBURST_END_F4_SHIFT);
1023 1063
1024 if (intel_crtc->pipe == 1) 1064 if (intel_crtc->pipe == 1)
1025 tv_ctl |= TV_ENC_PIPEB_SELECT; 1065 tv_ctl |= TV_ENC_PIPEB_SELECT;
@@ -1051,37 +1091,16 @@ static void intel_tv_mode_set(struct intel_encoder *encoder)
1051 tv_mode->dda3_inc << TV_SCDDA3_INC_SHIFT; 1091 tv_mode->dda3_inc << TV_SCDDA3_INC_SHIFT;
1052 1092
1053 /* Enable two fixes for the chips that need them. */ 1093 /* Enable two fixes for the chips that need them. */
1054 if (dev->pdev->device < 0x2772) 1094 if (IS_I915GM(dev))
1055 tv_ctl |= TV_ENC_C0_FIX | TV_ENC_SDP_FIX; 1095 tv_ctl |= TV_ENC_C0_FIX | TV_ENC_SDP_FIX;
1056 1096
1057 I915_WRITE(TV_H_CTL_1, hctl1); 1097 set_tv_mode_timings(dev_priv, tv_mode, burst_ena);
1058 I915_WRITE(TV_H_CTL_2, hctl2); 1098
1059 I915_WRITE(TV_H_CTL_3, hctl3);
1060 I915_WRITE(TV_V_CTL_1, vctl1);
1061 I915_WRITE(TV_V_CTL_2, vctl2);
1062 I915_WRITE(TV_V_CTL_3, vctl3);
1063 I915_WRITE(TV_V_CTL_4, vctl4);
1064 I915_WRITE(TV_V_CTL_5, vctl5);
1065 I915_WRITE(TV_V_CTL_6, vctl6);
1066 I915_WRITE(TV_V_CTL_7, vctl7);
1067 I915_WRITE(TV_SC_CTL_1, scctl1); 1099 I915_WRITE(TV_SC_CTL_1, scctl1);
1068 I915_WRITE(TV_SC_CTL_2, scctl2); 1100 I915_WRITE(TV_SC_CTL_2, scctl2);
1069 I915_WRITE(TV_SC_CTL_3, scctl3); 1101 I915_WRITE(TV_SC_CTL_3, scctl3);
1070 1102
1071 if (color_conversion) { 1103 set_color_conversion(dev_priv, color_conversion);
1072 I915_WRITE(TV_CSC_Y, (color_conversion->ry << 16) |
1073 color_conversion->gy);
1074 I915_WRITE(TV_CSC_Y2, (color_conversion->by << 16) |
1075 color_conversion->ay);
1076 I915_WRITE(TV_CSC_U, (color_conversion->ru << 16) |
1077 color_conversion->gu);
1078 I915_WRITE(TV_CSC_U2, (color_conversion->bu << 16) |
1079 color_conversion->au);
1080 I915_WRITE(TV_CSC_V, (color_conversion->rv << 16) |
1081 color_conversion->gv);
1082 I915_WRITE(TV_CSC_V2, (color_conversion->bv << 16) |
1083 color_conversion->av);
1084 }
1085 1104
1086 if (INTEL_INFO(dev)->gen >= 4) 1105 if (INTEL_INFO(dev)->gen >= 4)
1087 I915_WRITE(TV_CLR_KNOBS, 0x00404000); 1106 I915_WRITE(TV_CLR_KNOBS, 0x00404000);
@@ -1092,46 +1111,25 @@ static void intel_tv_mode_set(struct intel_encoder *encoder)
1092 I915_WRITE(TV_CLR_LEVEL, 1111 I915_WRITE(TV_CLR_LEVEL,
1093 ((video_levels->black << TV_BLACK_LEVEL_SHIFT) | 1112 ((video_levels->black << TV_BLACK_LEVEL_SHIFT) |
1094 (video_levels->blank << TV_BLANK_LEVEL_SHIFT))); 1113 (video_levels->blank << TV_BLANK_LEVEL_SHIFT)));
1095 { 1114
1096 int pipeconf_reg = PIPECONF(pipe); 1115 assert_pipe_disabled(dev_priv, intel_crtc->pipe);
1097 int dspcntr_reg = DSPCNTR(intel_crtc->plane); 1116
1098 int pipeconf = I915_READ(pipeconf_reg); 1117 /* Filter ctl must be set before TV_WIN_SIZE */
1099 int dspcntr = I915_READ(dspcntr_reg); 1118 I915_WRITE(TV_FILTER_CTL_1, TV_AUTO_SCALE);
1100 int xpos = 0x0, ypos = 0x0; 1119 xsize = tv_mode->hblank_start - tv_mode->hblank_end;
1101 unsigned int xsize, ysize; 1120 if (tv_mode->progressive)
1102 /* Pipe must be off here */ 1121 ysize = tv_mode->nbr_end + 1;
1103 I915_WRITE(dspcntr_reg, dspcntr & ~DISPLAY_PLANE_ENABLE); 1122 else
1104 intel_flush_primary_plane(dev_priv, intel_crtc->plane); 1123 ysize = 2*tv_mode->nbr_end + 1;
1105 1124
1106 /* Wait for vblank for the disable to take effect */ 1125 xpos += intel_tv->margin[TV_MARGIN_LEFT];
1107 if (IS_GEN2(dev)) 1126 ypos += intel_tv->margin[TV_MARGIN_TOP];
1108 intel_wait_for_vblank(dev, intel_crtc->pipe); 1127 xsize -= (intel_tv->margin[TV_MARGIN_LEFT] +
1109 1128 intel_tv->margin[TV_MARGIN_RIGHT]);
1110 I915_WRITE(pipeconf_reg, pipeconf & ~PIPECONF_ENABLE); 1129 ysize -= (intel_tv->margin[TV_MARGIN_TOP] +
1111 /* Wait for vblank for the disable to take effect. */ 1130 intel_tv->margin[TV_MARGIN_BOTTOM]);
1112 intel_wait_for_pipe_off(dev, intel_crtc->pipe); 1131 I915_WRITE(TV_WIN_POS, (xpos<<16)|ypos);
1113 1132 I915_WRITE(TV_WIN_SIZE, (xsize<<16)|ysize);
1114 /* Filter ctl must be set before TV_WIN_SIZE */
1115 I915_WRITE(TV_FILTER_CTL_1, TV_AUTO_SCALE);
1116 xsize = tv_mode->hblank_start - tv_mode->hblank_end;
1117 if (tv_mode->progressive)
1118 ysize = tv_mode->nbr_end + 1;
1119 else
1120 ysize = 2*tv_mode->nbr_end + 1;
1121
1122 xpos += intel_tv->margin[TV_MARGIN_LEFT];
1123 ypos += intel_tv->margin[TV_MARGIN_TOP];
1124 xsize -= (intel_tv->margin[TV_MARGIN_LEFT] +
1125 intel_tv->margin[TV_MARGIN_RIGHT]);
1126 ysize -= (intel_tv->margin[TV_MARGIN_TOP] +
1127 intel_tv->margin[TV_MARGIN_BOTTOM]);
1128 I915_WRITE(TV_WIN_POS, (xpos<<16)|ypos);
1129 I915_WRITE(TV_WIN_SIZE, (xsize<<16)|ysize);
1130
1131 I915_WRITE(pipeconf_reg, pipeconf);
1132 I915_WRITE(dspcntr_reg, dspcntr);
1133 intel_flush_primary_plane(dev_priv, intel_crtc->plane);
1134 }
1135 1133
1136 j = 0; 1134 j = 0;
1137 for (i = 0; i < 60; i++) 1135 for (i = 0; i < 60; i++)
@@ -1316,17 +1314,18 @@ intel_tv_detect(struct drm_connector *connector, bool force)
1316 int type; 1314 int type;
1317 1315
1318 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] force=%d\n", 1316 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] force=%d\n",
1319 connector->base.id, drm_get_connector_name(connector), 1317 connector->base.id, connector->name,
1320 force); 1318 force);
1321 1319
1322 mode = reported_modes[0]; 1320 mode = reported_modes[0];
1323 1321
1324 if (force) { 1322 if (force) {
1325 struct intel_load_detect_pipe tmp; 1323 struct intel_load_detect_pipe tmp;
1324 struct drm_modeset_acquire_ctx ctx;
1326 1325
1327 if (intel_get_load_detect_pipe(connector, &mode, &tmp)) { 1326 if (intel_get_load_detect_pipe(connector, &mode, &tmp, &ctx)) {
1328 type = intel_tv_detect_type(intel_tv, connector); 1327 type = intel_tv_detect_type(intel_tv, connector);
1329 intel_release_load_detect_pipe(connector, &tmp); 1328 intel_release_load_detect_pipe(connector, &tmp, &ctx);
1330 } else 1329 } else
1331 return connector_status_unknown; 1330 return connector_status_unknown;
1332 } else 1331 } else
@@ -1634,7 +1633,7 @@ intel_tv_init(struct drm_device *dev)
1634 1633
1635 intel_encoder->compute_config = intel_tv_compute_config; 1634 intel_encoder->compute_config = intel_tv_compute_config;
1636 intel_encoder->get_config = intel_tv_get_config; 1635 intel_encoder->get_config = intel_tv_get_config;
1637 intel_encoder->mode_set = intel_tv_mode_set; 1636 intel_encoder->pre_enable = intel_tv_pre_enable;
1638 intel_encoder->enable = intel_enable_tv; 1637 intel_encoder->enable = intel_enable_tv;
1639 intel_encoder->disable = intel_disable_tv; 1638 intel_encoder->disable = intel_disable_tv;
1640 intel_encoder->get_hw_state = intel_tv_get_hw_state; 1639 intel_encoder->get_hw_state = intel_tv_get_hw_state;
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index d0c75779d3f6..79cba593df0d 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -255,8 +255,7 @@ static void __vlv_force_wake_put(struct drm_i915_private *dev_priv,
255 255
256} 256}
257 257
258void vlv_force_wake_get(struct drm_i915_private *dev_priv, 258static void vlv_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
259 int fw_engine)
260{ 259{
261 unsigned long irqflags; 260 unsigned long irqflags;
262 261
@@ -275,8 +274,7 @@ void vlv_force_wake_get(struct drm_i915_private *dev_priv,
275 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 274 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
276} 275}
277 276
278void vlv_force_wake_put(struct drm_i915_private *dev_priv, 277static void vlv_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
279 int fw_engine)
280{ 278{
281 unsigned long irqflags; 279 unsigned long irqflags;
282 280
@@ -374,7 +372,7 @@ void intel_uncore_early_sanitize(struct drm_device *dev)
374 if (HAS_FPGA_DBG_UNCLAIMED(dev)) 372 if (HAS_FPGA_DBG_UNCLAIMED(dev))
375 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); 373 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
376 374
377 if (IS_HASWELL(dev) && 375 if ((IS_HASWELL(dev) || IS_BROADWELL(dev)) &&
378 (__raw_i915_read32(dev_priv, HSW_EDRAM_PRESENT) == 1)) { 376 (__raw_i915_read32(dev_priv, HSW_EDRAM_PRESENT) == 1)) {
379 /* The docs do not explain exactly how the calculation can be 377 /* The docs do not explain exactly how the calculation can be
380 * made. It is somewhat guessable, but for now, it's always 378 * made. It is somewhat guessable, but for now, it's always
@@ -395,26 +393,8 @@ void intel_uncore_early_sanitize(struct drm_device *dev)
395 393
396void intel_uncore_sanitize(struct drm_device *dev) 394void intel_uncore_sanitize(struct drm_device *dev)
397{ 395{
398 struct drm_i915_private *dev_priv = dev->dev_private;
399 u32 reg_val;
400
401 /* BIOS often leaves RC6 enabled, but disable it for hw init */ 396 /* BIOS often leaves RC6 enabled, but disable it for hw init */
402 intel_disable_gt_powersave(dev); 397 intel_disable_gt_powersave(dev);
403
404 /* Turn off power gate, require especially for the BIOS less system */
405 if (IS_VALLEYVIEW(dev)) {
406
407 mutex_lock(&dev_priv->rps.hw_lock);
408 reg_val = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS);
409
410 if (reg_val & (PUNIT_PWRGT_PWR_GATE(PUNIT_POWER_WELL_RENDER) |
411 PUNIT_PWRGT_PWR_GATE(PUNIT_POWER_WELL_MEDIA) |
412 PUNIT_PWRGT_PWR_GATE(PUNIT_POWER_WELL_DISP2D)))
413 vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, 0x0);
414
415 mutex_unlock(&dev_priv->rps.hw_lock);
416
417 }
418} 398}
419 399
420/* 400/*
@@ -488,6 +468,17 @@ void assert_force_wake_inactive(struct drm_i915_private *dev_priv)
488#define NEEDS_FORCE_WAKE(dev_priv, reg) \ 468#define NEEDS_FORCE_WAKE(dev_priv, reg) \
489 ((reg) < 0x40000 && (reg) != FORCEWAKE) 469 ((reg) < 0x40000 && (reg) != FORCEWAKE)
490 470
471#define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \
472 (((reg) >= 0x2000 && (reg) < 0x4000) ||\
473 ((reg) >= 0x5000 && (reg) < 0x8000) ||\
474 ((reg) >= 0xB000 && (reg) < 0x12000) ||\
475 ((reg) >= 0x2E000 && (reg) < 0x30000))
476
477#define FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)\
478 (((reg) >= 0x12000 && (reg) < 0x14000) ||\
479 ((reg) >= 0x22000 && (reg) < 0x24000) ||\
480 ((reg) >= 0x30000 && (reg) < 0x40000))
481
491static void 482static void
492ilk_dummy_write(struct drm_i915_private *dev_priv) 483ilk_dummy_write(struct drm_i915_private *dev_priv)
493{ 484{
@@ -854,12 +845,15 @@ void intel_uncore_fini(struct drm_device *dev)
854 intel_uncore_forcewake_reset(dev, false); 845 intel_uncore_forcewake_reset(dev, false);
855} 846}
856 847
848#define GEN_RANGE(l, h) GENMASK(h, l)
849
857static const struct register_whitelist { 850static const struct register_whitelist {
858 uint64_t offset; 851 uint64_t offset;
859 uint32_t size; 852 uint32_t size;
860 uint32_t gen_bitmask; /* support gens, 0x10 for 4, 0x30 for 4 and 5, etc. */ 853 /* supported gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
854 uint32_t gen_bitmask;
861} whitelist[] = { 855} whitelist[] = {
862 { RING_TIMESTAMP(RENDER_RING_BASE), 8, 0x1F0 }, 856 { RING_TIMESTAMP(RENDER_RING_BASE), 8, GEN_RANGE(4, 8) },
863}; 857};
864 858
865int i915_reg_read_ioctl(struct drm_device *dev, 859int i915_reg_read_ioctl(struct drm_device *dev,
@@ -911,7 +905,7 @@ int i915_get_reset_stats_ioctl(struct drm_device *dev,
911 struct drm_i915_private *dev_priv = dev->dev_private; 905 struct drm_i915_private *dev_priv = dev->dev_private;
912 struct drm_i915_reset_stats *args = data; 906 struct drm_i915_reset_stats *args = data;
913 struct i915_ctx_hang_stats *hs; 907 struct i915_ctx_hang_stats *hs;
914 struct i915_hw_context *ctx; 908 struct intel_context *ctx;
915 int ret; 909 int ret;
916 910
917 if (args->flags || args->pad) 911 if (args->flags || args->pad)
@@ -955,6 +949,9 @@ static int i965_do_reset(struct drm_device *dev)
955{ 949{
956 int ret; 950 int ret;
957 951
952 /* FIXME: i965g/gm need a display save/restore for gpu reset. */
953 return -ENODEV;
954
958 /* 955 /*
959 * Set the domains we want to reset (GRDOM/bits 2 and 3) as 956 * Set the domains we want to reset (GRDOM/bits 2 and 3) as
960 * well as the reset bit (GR/bit 0). Setting the GR bit 957 * well as the reset bit (GR/bit 0). Setting the GR bit
@@ -966,7 +963,6 @@ static int i965_do_reset(struct drm_device *dev)
966 if (ret) 963 if (ret)
967 return ret; 964 return ret;
968 965
969 /* We can't reset render&media without also resetting display ... */
970 pci_write_config_byte(dev->pdev, I965_GDRST, 966 pci_write_config_byte(dev->pdev, I965_GDRST,
971 GRDOM_MEDIA | GRDOM_RESET_ENABLE); 967 GRDOM_MEDIA | GRDOM_RESET_ENABLE);
972 968
@@ -979,26 +975,58 @@ static int i965_do_reset(struct drm_device *dev)
979 return 0; 975 return 0;
980} 976}
981 977
978static int g4x_do_reset(struct drm_device *dev)
979{
980 struct drm_i915_private *dev_priv = dev->dev_private;
981 int ret;
982
983 pci_write_config_byte(dev->pdev, I965_GDRST,
984 GRDOM_RENDER | GRDOM_RESET_ENABLE);
985 ret = wait_for(i965_reset_complete(dev), 500);
986 if (ret)
987 return ret;
988
989 /* WaVcpClkGateDisableForMediaReset:ctg,elk */
990 I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE);
991 POSTING_READ(VDECCLK_GATE_D);
992
993 pci_write_config_byte(dev->pdev, I965_GDRST,
994 GRDOM_MEDIA | GRDOM_RESET_ENABLE);
995 ret = wait_for(i965_reset_complete(dev), 500);
996 if (ret)
997 return ret;
998
999 /* WaVcpClkGateDisableForMediaReset:ctg,elk */
1000 I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE);
1001 POSTING_READ(VDECCLK_GATE_D);
1002
1003 pci_write_config_byte(dev->pdev, I965_GDRST, 0);
1004
1005 return 0;
1006}
1007
982static int ironlake_do_reset(struct drm_device *dev) 1008static int ironlake_do_reset(struct drm_device *dev)
983{ 1009{
984 struct drm_i915_private *dev_priv = dev->dev_private; 1010 struct drm_i915_private *dev_priv = dev->dev_private;
985 u32 gdrst;
986 int ret; 1011 int ret;
987 1012
988 gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
989 gdrst &= ~GRDOM_MASK;
990 I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, 1013 I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
991 gdrst | GRDOM_RENDER | GRDOM_RESET_ENABLE); 1014 ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
992 ret = wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500); 1015 ret = wait_for((I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) &
1016 ILK_GRDOM_RESET_ENABLE) == 0, 500);
993 if (ret) 1017 if (ret)
994 return ret; 1018 return ret;
995 1019
996 /* We can't reset render&media without also resetting display ... */
997 gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
998 gdrst &= ~GRDOM_MASK;
999 I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, 1020 I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
1000 gdrst | GRDOM_MEDIA | GRDOM_RESET_ENABLE); 1021 ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
1001 return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500); 1022 ret = wait_for((I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) &
1023 ILK_GRDOM_RESET_ENABLE) == 0, 500);
1024 if (ret)
1025 return ret;
1026
1027 I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, 0);
1028
1029 return 0;
1002} 1030}
1003 1031
1004static int gen6_do_reset(struct drm_device *dev) 1032static int gen6_do_reset(struct drm_device *dev)
@@ -1029,7 +1057,11 @@ int intel_gpu_reset(struct drm_device *dev)
1029 case 7: 1057 case 7:
1030 case 6: return gen6_do_reset(dev); 1058 case 6: return gen6_do_reset(dev);
1031 case 5: return ironlake_do_reset(dev); 1059 case 5: return ironlake_do_reset(dev);
1032 case 4: return i965_do_reset(dev); 1060 case 4:
1061 if (IS_G4X(dev))
1062 return g4x_do_reset(dev);
1063 else
1064 return i965_do_reset(dev);
1033 default: return -ENODEV; 1065 default: return -ENODEV;
1034 } 1066 }
1035} 1067}
diff --git a/drivers/gpu/drm/mga/mga_ioc32.c b/drivers/gpu/drm/mga/mga_ioc32.c
index 86b4bb804852..729bfd56b55f 100644
--- a/drivers/gpu/drm/mga/mga_ioc32.c
+++ b/drivers/gpu/drm/mga/mga_ioc32.c
@@ -214,7 +214,7 @@ long mga_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
214 if (nr < DRM_COMMAND_BASE) 214 if (nr < DRM_COMMAND_BASE)
215 return drm_compat_ioctl(filp, cmd, arg); 215 return drm_compat_ioctl(filp, cmd, arg);
216 216
217 if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls)) 217 if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(mga_compat_ioctls))
218 fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE]; 218 fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
219 219
220 if (fn != NULL) 220 if (fn != NULL)
diff --git a/drivers/gpu/drm/mga/mga_state.c b/drivers/gpu/drm/mga/mga_state.c
index 314685b7f41f..792f924496fc 100644
--- a/drivers/gpu/drm/mga/mga_state.c
+++ b/drivers/gpu/drm/mga/mga_state.c
@@ -1020,7 +1020,7 @@ static int mga_getparam(struct drm_device *dev, void *data, struct drm_file *fil
1020 1020
1021 switch (param->param) { 1021 switch (param->param) {
1022 case MGA_PARAM_IRQ_NR: 1022 case MGA_PARAM_IRQ_NR:
1023 value = drm_dev_to_irq(dev); 1023 value = dev->pdev->irq;
1024 break; 1024 break;
1025 case MGA_PARAM_CARD_TYPE: 1025 case MGA_PARAM_CARD_TYPE:
1026 value = dev_priv->chipset; 1026 value = dev_priv->chipset;
@@ -1099,4 +1099,4 @@ const struct drm_ioctl_desc mga_ioctls[] = {
1099 DRM_IOCTL_DEF_DRV(MGA_DMA_BOOTSTRAP, mga_dma_bootstrap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1099 DRM_IOCTL_DEF_DRV(MGA_DMA_BOOTSTRAP, mga_dma_bootstrap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1100}; 1100};
1101 1101
1102int mga_max_ioctl = DRM_ARRAY_SIZE(mga_ioctls); 1102int mga_max_ioctl = ARRAY_SIZE(mga_ioctls);
diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c
index 26868e5c55b0..f6b283b8375e 100644
--- a/drivers/gpu/drm/mgag200/mgag200_main.c
+++ b/drivers/gpu/drm/mgag200/mgag200_main.c
@@ -322,17 +322,13 @@ static void mgag200_bo_unref(struct mgag200_bo **bo)
322 322
323 tbo = &((*bo)->bo); 323 tbo = &((*bo)->bo);
324 ttm_bo_unref(&tbo); 324 ttm_bo_unref(&tbo);
325 if (tbo == NULL) 325 *bo = NULL;
326 *bo = NULL;
327
328} 326}
329 327
330void mgag200_gem_free_object(struct drm_gem_object *obj) 328void mgag200_gem_free_object(struct drm_gem_object *obj)
331{ 329{
332 struct mgag200_bo *mgag200_bo = gem_to_mga_bo(obj); 330 struct mgag200_bo *mgag200_bo = gem_to_mga_bo(obj);
333 331
334 if (!mgag200_bo)
335 return;
336 mgag200_bo_unref(&mgag200_bo); 332 mgag200_bo_unref(&mgag200_bo);
337} 333}
338 334
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index b6984971ce0c..f12388967856 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -3,7 +3,7 @@ config DRM_MSM
3 tristate "MSM DRM" 3 tristate "MSM DRM"
4 depends on DRM 4 depends on DRM
5 depends on MSM_IOMMU 5 depends on MSM_IOMMU
6 depends on ARCH_MSM8960 || (ARM && COMPILE_TEST) 6 depends on ARCH_QCOM || (ARM && COMPILE_TEST)
7 select DRM_KMS_HELPER 7 select DRM_KMS_HELPER
8 select SHMEM 8 select SHMEM
9 select TMPFS 9 select TMPFS
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 5e1e6b0cd8ac..93ca49c8df44 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -34,6 +34,8 @@ msm-y := \
34 msm_gem_submit.o \ 34 msm_gem_submit.o \
35 msm_gpu.o \ 35 msm_gpu.o \
36 msm_iommu.o \ 36 msm_iommu.o \
37 msm_perf.o \
38 msm_rd.o \
37 msm_ringbuffer.o 39 msm_ringbuffer.o
38 40
39msm-$(CONFIG_DRM_MSM_FBDEV) += msm_fbdev.o 41msm-$(CONFIG_DRM_MSM_FBDEV) += msm_fbdev.o
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
index f20fbde5dc49..942e09d898a8 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -207,11 +207,11 @@ static int a3xx_hw_init(struct msm_gpu *gpu)
207 /* Turn on performance counters: */ 207 /* Turn on performance counters: */
208 gpu_write(gpu, REG_A3XX_RBBM_PERFCTR_CTL, 0x01); 208 gpu_write(gpu, REG_A3XX_RBBM_PERFCTR_CTL, 0x01);
209 209
210 /* Set SP perfcounter 7 to count SP_FS_FULL_ALU_INSTRUCTIONS 210 /* Enable the perfcntrs that we use.. */
211 * we will use this to augment our hang detection: 211 for (i = 0; i < gpu->num_perfcntrs; i++) {
212 */ 212 const struct msm_gpu_perfcntr *perfcntr = &gpu->perfcntrs[i];
213 gpu_write(gpu, REG_A3XX_SP_PERFCOUNTER7_SELECT, 213 gpu_write(gpu, perfcntr->select_reg, perfcntr->select_val);
214 SP_FS_FULL_ALU_INSTRUCTIONS); 214 }
215 215
216 gpu_write(gpu, REG_A3XX_RBBM_INT_0_MASK, A3XX_INT0_MASK); 216 gpu_write(gpu, REG_A3XX_RBBM_INT_0_MASK, A3XX_INT0_MASK);
217 217
@@ -465,6 +465,13 @@ static const struct adreno_gpu_funcs funcs = {
465 }, 465 },
466}; 466};
467 467
468static const struct msm_gpu_perfcntr perfcntrs[] = {
469 { REG_A3XX_SP_PERFCOUNTER6_SELECT, REG_A3XX_RBBM_PERFCTR_SP_6_LO,
470 SP_ALU_ACTIVE_CYCLES, "ALUACTIVE" },
471 { REG_A3XX_SP_PERFCOUNTER7_SELECT, REG_A3XX_RBBM_PERFCTR_SP_7_LO,
472 SP_FS_FULL_ALU_INSTRUCTIONS, "ALUFULL" },
473};
474
468struct msm_gpu *a3xx_gpu_init(struct drm_device *dev) 475struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
469{ 476{
470 struct a3xx_gpu *a3xx_gpu = NULL; 477 struct a3xx_gpu *a3xx_gpu = NULL;
@@ -504,6 +511,9 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
504 DBG("fast_rate=%u, slow_rate=%u, bus_freq=%u", 511 DBG("fast_rate=%u, slow_rate=%u, bus_freq=%u",
505 gpu->fast_rate, gpu->slow_rate, gpu->bus_freq); 512 gpu->fast_rate, gpu->slow_rate, gpu->bus_freq);
506 513
514 gpu->perfcntrs = perfcntrs;
515 gpu->num_perfcntrs = ARRAY_SIZE(perfcntrs);
516
507 ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, config->rev); 517 ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, config->rev);
508 if (ret) 518 if (ret)
509 goto fail; 519 goto fail;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
index 7dedfdd12075..e56a6196867c 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
@@ -247,36 +247,49 @@ void hdmi_connector_irq(struct drm_connector *connector)
247 } 247 }
248} 248}
249 249
250static enum drm_connector_status detect_reg(struct hdmi *hdmi)
251{
252 uint32_t hpd_int_status = hdmi_read(hdmi, REG_HDMI_HPD_INT_STATUS);
253 return (hpd_int_status & HDMI_HPD_INT_STATUS_CABLE_DETECTED) ?
254 connector_status_connected : connector_status_disconnected;
255}
256
257static enum drm_connector_status detect_gpio(struct hdmi *hdmi)
258{
259 const struct hdmi_platform_config *config = hdmi->config;
260 return gpio_get_value(config->hpd_gpio) ?
261 connector_status_connected :
262 connector_status_disconnected;
263}
264
250static enum drm_connector_status hdmi_connector_detect( 265static enum drm_connector_status hdmi_connector_detect(
251 struct drm_connector *connector, bool force) 266 struct drm_connector *connector, bool force)
252{ 267{
253 struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector); 268 struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector);
254 struct hdmi *hdmi = hdmi_connector->hdmi; 269 struct hdmi *hdmi = hdmi_connector->hdmi;
255 const struct hdmi_platform_config *config = hdmi->config; 270 enum drm_connector_status stat_gpio, stat_reg;
256 uint32_t hpd_int_status;
257 int retry = 20; 271 int retry = 20;
258 272
259 hpd_int_status = hdmi_read(hdmi, REG_HDMI_HPD_INT_STATUS); 273 do {
274 stat_gpio = detect_gpio(hdmi);
275 stat_reg = detect_reg(hdmi);
260 276
261 /* sense seems to in some cases be momentarily de-asserted, don't 277 if (stat_gpio == stat_reg)
262 * let that trick us into thinking the monitor is gone:
263 */
264 while (retry-- && !(hpd_int_status & HDMI_HPD_INT_STATUS_CABLE_DETECTED)) {
265 /* hdmi debounce logic seems to get stuck sometimes,
266 * read directly the gpio to get a second opinion:
267 */
268 if (gpio_get_value(config->hpd_gpio)) {
269 DBG("gpio tells us we are connected!");
270 hpd_int_status |= HDMI_HPD_INT_STATUS_CABLE_DETECTED;
271 break; 278 break;
272 } 279
273 mdelay(10); 280 mdelay(10);
274 hpd_int_status = hdmi_read(hdmi, REG_HDMI_HPD_INT_STATUS); 281 } while (--retry);
275 DBG("status=%08x", hpd_int_status); 282
283 /* the status we get from reading gpio seems to be more reliable,
284 * so trust that one the most if we didn't manage to get hdmi and
285 * gpio status to agree:
286 */
287 if (stat_gpio != stat_reg) {
288 DBG("HDMI_HPD_INT_STATUS tells us: %d", stat_reg);
289 DBG("hpd gpio tells us: %d", stat_gpio);
276 } 290 }
277 291
278 return (hpd_int_status & HDMI_HPD_INT_STATUS_CABLE_DETECTED) ? 292 return stat_gpio;
279 connector_status_connected : connector_status_disconnected;
280} 293}
281 294
282static void hdmi_connector_destroy(struct drm_connector *connector) 295static void hdmi_connector_destroy(struct drm_connector *connector)
@@ -389,7 +402,8 @@ struct drm_connector *hdmi_connector_init(struct hdmi *hdmi)
389 DRM_MODE_CONNECTOR_HDMIA); 402 DRM_MODE_CONNECTOR_HDMIA);
390 drm_connector_helper_add(connector, &hdmi_connector_helper_funcs); 403 drm_connector_helper_add(connector, &hdmi_connector_helper_funcs);
391 404
392 connector->polled = DRM_CONNECTOR_POLL_HPD; 405 connector->polled = DRM_CONNECTOR_POLL_CONNECT |
406 DRM_CONNECTOR_POLL_DISCONNECT;
393 407
394 connector->interlace_allowed = 1; 408 connector->interlace_allowed = 1;
395 connector->doublescan_allowed = 0; 409 connector->doublescan_allowed = 0;
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
index ef9957dbac94..74cebb51e8c2 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
@@ -217,8 +217,6 @@ static void mdp4_crtc_destroy(struct drm_crtc *crtc)
217{ 217{
218 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); 218 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
219 219
220 mdp4_crtc->plane->funcs->destroy(mdp4_crtc->plane);
221
222 drm_crtc_cleanup(crtc); 220 drm_crtc_cleanup(crtc);
223 drm_flip_work_cleanup(&mdp4_crtc->unref_fb_work); 221 drm_flip_work_cleanup(&mdp4_crtc->unref_fb_work);
224 drm_flip_work_cleanup(&mdp4_crtc->unref_cursor_work); 222 drm_flip_work_cleanup(&mdp4_crtc->unref_cursor_work);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
index 6ea10bdb6e8f..ebe2e60f3ab1 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -195,8 +195,6 @@ static void mdp5_crtc_destroy(struct drm_crtc *crtc)
195{ 195{
196 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); 196 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
197 197
198 mdp5_crtc->plane->funcs->destroy(mdp5_crtc->plane);
199
200 drm_crtc_cleanup(crtc); 198 drm_crtc_cleanup(crtc);
201 drm_flip_work_cleanup(&mdp5_crtc->unref_fb_work); 199 drm_flip_work_cleanup(&mdp5_crtc->unref_fb_work);
202 200
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index ee8446c1b5f6..42caf7fcb0b9 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -280,12 +280,22 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
280 goto fail; 280 goto fail;
281 } 281 }
282 282
283 ret = get_clk(pdev, &mdp5_kms->axi_clk, "bus_clk") || 283 ret = get_clk(pdev, &mdp5_kms->axi_clk, "bus_clk");
284 get_clk(pdev, &mdp5_kms->ahb_clk, "iface_clk") || 284 if (ret)
285 get_clk(pdev, &mdp5_kms->src_clk, "core_clk_src") || 285 goto fail;
286 get_clk(pdev, &mdp5_kms->core_clk, "core_clk") || 286 ret = get_clk(pdev, &mdp5_kms->ahb_clk, "iface_clk");
287 get_clk(pdev, &mdp5_kms->lut_clk, "lut_clk") || 287 if (ret)
288 get_clk(pdev, &mdp5_kms->vsync_clk, "vsync_clk"); 288 goto fail;
289 ret = get_clk(pdev, &mdp5_kms->src_clk, "core_clk_src");
290 if (ret)
291 goto fail;
292 ret = get_clk(pdev, &mdp5_kms->core_clk, "core_clk");
293 if (ret)
294 goto fail;
295 ret = get_clk(pdev, &mdp5_kms->lut_clk, "lut_clk");
296 if (ret)
297 goto fail;
298 ret = get_clk(pdev, &mdp5_kms->vsync_clk, "vsync_clk");
289 if (ret) 299 if (ret)
290 goto fail; 300 goto fail;
291 301
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
index 47f7bbb9c15a..f3daec4412ad 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
@@ -85,8 +85,11 @@ static int mdp5_plane_disable(struct drm_plane *plane)
85static void mdp5_plane_destroy(struct drm_plane *plane) 85static void mdp5_plane_destroy(struct drm_plane *plane)
86{ 86{
87 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); 87 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
88 struct msm_drm_private *priv = plane->dev->dev_private;
89
90 if (priv->kms)
91 mdp5_plane_disable(plane);
88 92
89 mdp5_plane_disable(plane);
90 drm_plane_cleanup(plane); 93 drm_plane_cleanup(plane);
91 94
92 kfree(mdp5_plane); 95 kfree(mdp5_plane);
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index f9de156b9e65..0d2562fb681e 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -220,7 +220,7 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
220 * is bogus, but non-null if allocation succeeded: 220 * is bogus, but non-null if allocation succeeded:
221 */ 221 */
222 p = dma_alloc_attrs(dev->dev, size, 222 p = dma_alloc_attrs(dev->dev, size,
223 &priv->vram.paddr, 0, &attrs); 223 &priv->vram.paddr, GFP_KERNEL, &attrs);
224 if (!p) { 224 if (!p) {
225 dev_err(dev->dev, "failed to allocate VRAM\n"); 225 dev_err(dev->dev, "failed to allocate VRAM\n");
226 priv->vram.paddr = 0; 226 priv->vram.paddr = 0;
@@ -288,7 +288,7 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
288 } 288 }
289 289
290 pm_runtime_get_sync(dev->dev); 290 pm_runtime_get_sync(dev->dev);
291 ret = drm_irq_install(dev); 291 ret = drm_irq_install(dev, platform_get_irq(dev->platformdev, 0));
292 pm_runtime_put_sync(dev->dev); 292 pm_runtime_put_sync(dev->dev);
293 if (ret < 0) { 293 if (ret < 0) {
294 dev_err(dev->dev, "failed to install IRQ handler\n"); 294 dev_err(dev->dev, "failed to install IRQ handler\n");
@@ -299,6 +299,10 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
299 priv->fbdev = msm_fbdev_init(dev); 299 priv->fbdev = msm_fbdev_init(dev);
300#endif 300#endif
301 301
302 ret = msm_debugfs_late_init(dev);
303 if (ret)
304 goto fail;
305
302 drm_kms_helper_poll_init(dev); 306 drm_kms_helper_poll_init(dev);
303 307
304 return 0; 308 return 0;
@@ -382,11 +386,8 @@ static void msm_preclose(struct drm_device *dev, struct drm_file *file)
382static void msm_lastclose(struct drm_device *dev) 386static void msm_lastclose(struct drm_device *dev)
383{ 387{
384 struct msm_drm_private *priv = dev->dev_private; 388 struct msm_drm_private *priv = dev->dev_private;
385 if (priv->fbdev) { 389 if (priv->fbdev)
386 drm_modeset_lock_all(dev); 390 drm_fb_helper_restore_fbdev_mode_unlocked(priv->fbdev);
387 drm_fb_helper_restore_fbdev_mode(priv->fbdev);
388 drm_modeset_unlock_all(dev);
389 }
390} 391}
391 392
392static irqreturn_t msm_irq(int irq, void *arg) 393static irqreturn_t msm_irq(int irq, void *arg)
@@ -531,6 +532,41 @@ static struct drm_info_list msm_debugfs_list[] = {
531 { "fb", show_locked, 0, msm_fb_show }, 532 { "fb", show_locked, 0, msm_fb_show },
532}; 533};
533 534
535static int late_init_minor(struct drm_minor *minor)
536{
537 int ret;
538
539 if (!minor)
540 return 0;
541
542 ret = msm_rd_debugfs_init(minor);
543 if (ret) {
544 dev_err(minor->dev->dev, "could not install rd debugfs\n");
545 return ret;
546 }
547
548 ret = msm_perf_debugfs_init(minor);
549 if (ret) {
550 dev_err(minor->dev->dev, "could not install perf debugfs\n");
551 return ret;
552 }
553
554 return 0;
555}
556
557int msm_debugfs_late_init(struct drm_device *dev)
558{
559 int ret;
560 ret = late_init_minor(dev->primary);
561 if (ret)
562 return ret;
563 ret = late_init_minor(dev->render);
564 if (ret)
565 return ret;
566 ret = late_init_minor(dev->control);
567 return ret;
568}
569
534static int msm_debugfs_init(struct drm_minor *minor) 570static int msm_debugfs_init(struct drm_minor *minor)
535{ 571{
536 struct drm_device *dev = minor->dev; 572 struct drm_device *dev = minor->dev;
@@ -545,13 +581,17 @@ static int msm_debugfs_init(struct drm_minor *minor)
545 return ret; 581 return ret;
546 } 582 }
547 583
548 return ret; 584 return 0;
549} 585}
550 586
551static void msm_debugfs_cleanup(struct drm_minor *minor) 587static void msm_debugfs_cleanup(struct drm_minor *minor)
552{ 588{
553 drm_debugfs_remove_files(msm_debugfs_list, 589 drm_debugfs_remove_files(msm_debugfs_list,
554 ARRAY_SIZE(msm_debugfs_list), minor); 590 ARRAY_SIZE(msm_debugfs_list), minor);
591 if (!minor->dev->dev_private)
592 return;
593 msm_rd_debugfs_cleanup(minor);
594 msm_perf_debugfs_cleanup(minor);
555} 595}
556#endif 596#endif
557 597
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 9d10ee0b5aac..8a2c5fd0893e 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -33,7 +33,7 @@
33#include <asm/sizes.h> 33#include <asm/sizes.h>
34 34
35 35
36#if defined(CONFIG_COMPILE_TEST) && !defined(CONFIG_ARCH_MSM) 36#if defined(CONFIG_COMPILE_TEST) && !defined(CONFIG_ARCH_QCOM)
37/* stubs we need for compile-test: */ 37/* stubs we need for compile-test: */
38static inline struct device *msm_iommu_get_ctx(const char *ctx_name) 38static inline struct device *msm_iommu_get_ctx(const char *ctx_name)
39{ 39{
@@ -55,6 +55,9 @@ static inline struct device *msm_iommu_get_ctx(const char *ctx_name)
55struct msm_kms; 55struct msm_kms;
56struct msm_gpu; 56struct msm_gpu;
57struct msm_mmu; 57struct msm_mmu;
58struct msm_rd_state;
59struct msm_perf_state;
60struct msm_gem_submit;
58 61
59#define NUM_DOMAINS 2 /* one for KMS, then one per gpu core (?) */ 62#define NUM_DOMAINS 2 /* one for KMS, then one per gpu core (?) */
60 63
@@ -82,6 +85,9 @@ struct msm_drm_private {
82 uint32_t next_fence, completed_fence; 85 uint32_t next_fence, completed_fence;
83 wait_queue_head_t fence_event; 86 wait_queue_head_t fence_event;
84 87
88 struct msm_rd_state *rd;
89 struct msm_perf_state *perf;
90
85 /* list of GEM objects: */ 91 /* list of GEM objects: */
86 struct list_head inactive_list; 92 struct list_head inactive_list;
87 93
@@ -204,6 +210,15 @@ void __exit hdmi_unregister(void);
204void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m); 210void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m);
205void msm_gem_describe_objects(struct list_head *list, struct seq_file *m); 211void msm_gem_describe_objects(struct list_head *list, struct seq_file *m);
206void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m); 212void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m);
213int msm_debugfs_late_init(struct drm_device *dev);
214int msm_rd_debugfs_init(struct drm_minor *minor);
215void msm_rd_debugfs_cleanup(struct drm_minor *minor);
216void msm_rd_dump_submit(struct msm_gem_submit *submit);
217int msm_perf_debugfs_init(struct drm_minor *minor);
218void msm_perf_debugfs_cleanup(struct drm_minor *minor);
219#else
220static inline int msm_debugfs_late_init(struct drm_device *dev) { return 0; }
221static inline void msm_rd_dump_submit(struct msm_gem_submit *submit) {}
207#endif 222#endif
208 223
209void __iomem *msm_ioremap(struct platform_device *pdev, const char *name, 224void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index 3246bb46c4f2..bfb052688f8e 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -90,6 +90,7 @@ struct msm_gem_submit {
90 uint32_t type; 90 uint32_t type;
91 uint32_t size; /* in dwords */ 91 uint32_t size; /* in dwords */
92 uint32_t iova; 92 uint32_t iova;
93 uint32_t idx; /* cmdstream buffer idx in bos[] */
93 } cmd[MAX_CMDS]; 94 } cmd[MAX_CMDS];
94 struct { 95 struct {
95 uint32_t flags; 96 uint32_t flags;
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 1f1f4cffdaed..cd0554f68316 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -402,6 +402,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
402 submit->cmd[i].type = submit_cmd.type; 402 submit->cmd[i].type = submit_cmd.type;
403 submit->cmd[i].size = submit_cmd.size / 4; 403 submit->cmd[i].size = submit_cmd.size / 4;
404 submit->cmd[i].iova = iova + submit_cmd.submit_offset; 404 submit->cmd[i].iova = iova + submit_cmd.submit_offset;
405 submit->cmd[i].idx = submit_cmd.submit_idx;
405 406
406 if (submit->valid) 407 if (submit->valid)
407 continue; 408 continue;
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 3e667ca1f2b9..c6322197db8c 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -320,6 +320,101 @@ static void hangcheck_handler(unsigned long data)
320} 320}
321 321
322/* 322/*
323 * Performance Counters:
324 */
325
326/* called under perf_lock */
327static int update_hw_cntrs(struct msm_gpu *gpu, uint32_t ncntrs, uint32_t *cntrs)
328{
329 uint32_t current_cntrs[ARRAY_SIZE(gpu->last_cntrs)];
330 int i, n = min(ncntrs, gpu->num_perfcntrs);
331
332 /* read current values: */
333 for (i = 0; i < gpu->num_perfcntrs; i++)
334 current_cntrs[i] = gpu_read(gpu, gpu->perfcntrs[i].sample_reg);
335
336 /* update cntrs: */
337 for (i = 0; i < n; i++)
338 cntrs[i] = current_cntrs[i] - gpu->last_cntrs[i];
339
340 /* save current values: */
341 for (i = 0; i < gpu->num_perfcntrs; i++)
342 gpu->last_cntrs[i] = current_cntrs[i];
343
344 return n;
345}
346
347static void update_sw_cntrs(struct msm_gpu *gpu)
348{
349 ktime_t time;
350 uint32_t elapsed;
351 unsigned long flags;
352
353 spin_lock_irqsave(&gpu->perf_lock, flags);
354 if (!gpu->perfcntr_active)
355 goto out;
356
357 time = ktime_get();
358 elapsed = ktime_to_us(ktime_sub(time, gpu->last_sample.time));
359
360 gpu->totaltime += elapsed;
361 if (gpu->last_sample.active)
362 gpu->activetime += elapsed;
363
364 gpu->last_sample.active = msm_gpu_active(gpu);
365 gpu->last_sample.time = time;
366
367out:
368 spin_unlock_irqrestore(&gpu->perf_lock, flags);
369}
370
371void msm_gpu_perfcntr_start(struct msm_gpu *gpu)
372{
373 unsigned long flags;
374
375 spin_lock_irqsave(&gpu->perf_lock, flags);
376 /* we could dynamically enable/disable perfcntr registers too.. */
377 gpu->last_sample.active = msm_gpu_active(gpu);
378 gpu->last_sample.time = ktime_get();
379 gpu->activetime = gpu->totaltime = 0;
380 gpu->perfcntr_active = true;
381 update_hw_cntrs(gpu, 0, NULL);
382 spin_unlock_irqrestore(&gpu->perf_lock, flags);
383}
384
385void msm_gpu_perfcntr_stop(struct msm_gpu *gpu)
386{
387 gpu->perfcntr_active = false;
388}
389
390/* returns -errno or # of cntrs sampled */
391int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime,
392 uint32_t *totaltime, uint32_t ncntrs, uint32_t *cntrs)
393{
394 unsigned long flags;
395 int ret;
396
397 spin_lock_irqsave(&gpu->perf_lock, flags);
398
399 if (!gpu->perfcntr_active) {
400 ret = -EINVAL;
401 goto out;
402 }
403
404 *activetime = gpu->activetime;
405 *totaltime = gpu->totaltime;
406
407 gpu->activetime = gpu->totaltime = 0;
408
409 ret = update_hw_cntrs(gpu, ncntrs, cntrs);
410
411out:
412 spin_unlock_irqrestore(&gpu->perf_lock, flags);
413
414 return ret;
415}
416
417/*
323 * Cmdstream submission/retirement: 418 * Cmdstream submission/retirement:
324 */ 419 */
325 420
@@ -361,6 +456,7 @@ void msm_gpu_retire(struct msm_gpu *gpu)
361{ 456{
362 struct msm_drm_private *priv = gpu->dev->dev_private; 457 struct msm_drm_private *priv = gpu->dev->dev_private;
363 queue_work(priv->wq, &gpu->retire_work); 458 queue_work(priv->wq, &gpu->retire_work);
459 update_sw_cntrs(gpu);
364} 460}
365 461
366/* add bo's to gpu's ring, and kick gpu: */ 462/* add bo's to gpu's ring, and kick gpu: */
@@ -377,6 +473,12 @@ int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
377 473
378 inactive_cancel(gpu); 474 inactive_cancel(gpu);
379 475
476 msm_rd_dump_submit(submit);
477
478 gpu->submitted_fence = submit->fence;
479
480 update_sw_cntrs(gpu);
481
380 ret = gpu->funcs->submit(gpu, submit, ctx); 482 ret = gpu->funcs->submit(gpu, submit, ctx);
381 priv->lastctx = ctx; 483 priv->lastctx = ctx;
382 484
@@ -429,6 +531,9 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
429 struct iommu_domain *iommu; 531 struct iommu_domain *iommu;
430 int i, ret; 532 int i, ret;
431 533
534 if (WARN_ON(gpu->num_perfcntrs > ARRAY_SIZE(gpu->last_cntrs)))
535 gpu->num_perfcntrs = ARRAY_SIZE(gpu->last_cntrs);
536
432 gpu->dev = drm; 537 gpu->dev = drm;
433 gpu->funcs = funcs; 538 gpu->funcs = funcs;
434 gpu->name = name; 539 gpu->name = name;
@@ -444,6 +549,8 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
444 setup_timer(&gpu->hangcheck_timer, hangcheck_handler, 549 setup_timer(&gpu->hangcheck_timer, hangcheck_handler,
445 (unsigned long)gpu); 550 (unsigned long)gpu);
446 551
552 spin_lock_init(&gpu->perf_lock);
553
447 BUG_ON(ARRAY_SIZE(clk_names) != ARRAY_SIZE(gpu->grp_clks)); 554 BUG_ON(ARRAY_SIZE(clk_names) != ARRAY_SIZE(gpu->grp_clks));
448 555
449 /* Map registers: */ 556 /* Map registers: */
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index fad27008922f..9b579b792840 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -25,6 +25,7 @@
25#include "msm_ringbuffer.h" 25#include "msm_ringbuffer.h"
26 26
27struct msm_gem_submit; 27struct msm_gem_submit;
28struct msm_gpu_perfcntr;
28 29
29/* So far, with hardware that I've seen to date, we can have: 30/* So far, with hardware that I've seen to date, we can have:
30 * + zero, one, or two z180 2d cores 31 * + zero, one, or two z180 2d cores
@@ -64,6 +65,18 @@ struct msm_gpu {
64 struct drm_device *dev; 65 struct drm_device *dev;
65 const struct msm_gpu_funcs *funcs; 66 const struct msm_gpu_funcs *funcs;
66 67
68 /* performance counters (hw & sw): */
69 spinlock_t perf_lock;
70 bool perfcntr_active;
71 struct {
72 bool active;
73 ktime_t time;
74 } last_sample;
75 uint32_t totaltime, activetime; /* sw counters */
76 uint32_t last_cntrs[5]; /* hw counters */
77 const struct msm_gpu_perfcntr *perfcntrs;
78 uint32_t num_perfcntrs;
79
67 struct msm_ringbuffer *rb; 80 struct msm_ringbuffer *rb;
68 uint32_t rb_iova; 81 uint32_t rb_iova;
69 82
@@ -113,6 +126,19 @@ static inline bool msm_gpu_active(struct msm_gpu *gpu)
113 return gpu->submitted_fence > gpu->funcs->last_fence(gpu); 126 return gpu->submitted_fence > gpu->funcs->last_fence(gpu);
114} 127}
115 128
129/* Perf-Counters:
130 * The select_reg and select_val are just there for the benefit of the child
131 * class that actually enables the perf counter.. but msm_gpu base class
132 * will handle sampling/displaying the counters.
133 */
134
135struct msm_gpu_perfcntr {
136 uint32_t select_reg;
137 uint32_t sample_reg;
138 uint32_t select_val;
139 const char *name;
140};
141
116static inline void gpu_write(struct msm_gpu *gpu, u32 reg, u32 data) 142static inline void gpu_write(struct msm_gpu *gpu, u32 reg, u32 data)
117{ 143{
118 msm_writel(data, gpu->mmio + (reg << 2)); 144 msm_writel(data, gpu->mmio + (reg << 2));
@@ -126,6 +152,11 @@ static inline u32 gpu_read(struct msm_gpu *gpu, u32 reg)
126int msm_gpu_pm_suspend(struct msm_gpu *gpu); 152int msm_gpu_pm_suspend(struct msm_gpu *gpu);
127int msm_gpu_pm_resume(struct msm_gpu *gpu); 153int msm_gpu_pm_resume(struct msm_gpu *gpu);
128 154
155void msm_gpu_perfcntr_start(struct msm_gpu *gpu);
156void msm_gpu_perfcntr_stop(struct msm_gpu *gpu);
157int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime,
158 uint32_t *totaltime, uint32_t ncntrs, uint32_t *cntrs);
159
129void msm_gpu_retire(struct msm_gpu *gpu); 160void msm_gpu_retire(struct msm_gpu *gpu);
130int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, 161int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
131 struct msm_file_private *ctx); 162 struct msm_file_private *ctx);
diff --git a/drivers/gpu/drm/msm/msm_perf.c b/drivers/gpu/drm/msm/msm_perf.c
new file mode 100644
index 000000000000..830857c47c86
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_perf.c
@@ -0,0 +1,275 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18/* For profiling, userspace can:
19 *
20 * tail -f /sys/kernel/debug/dri/<minor>/gpu
21 *
22 * This will enable performance counters/profiling to track the busy time
23 * and any gpu specific performance counters that are supported.
24 */
25
26#ifdef CONFIG_DEBUG_FS
27
28#include <linux/debugfs.h>
29
30#include "msm_drv.h"
31#include "msm_gpu.h"
32
33struct msm_perf_state {
34 struct drm_device *dev;
35
36 bool open;
37 int cnt;
38 struct mutex read_lock;
39
40 char buf[256];
41 int buftot, bufpos;
42
43 unsigned long next_jiffies;
44
45 struct dentry *ent;
46 struct drm_info_node *node;
47};
48
49#define SAMPLE_TIME (HZ/4)
50
51/* wait for next sample time: */
52static int wait_sample(struct msm_perf_state *perf)
53{
54 unsigned long start_jiffies = jiffies;
55
56 if (time_after(perf->next_jiffies, start_jiffies)) {
57 unsigned long remaining_jiffies =
58 perf->next_jiffies - start_jiffies;
59 int ret = schedule_timeout_interruptible(remaining_jiffies);
60 if (ret > 0) {
61 /* interrupted */
62 return -ERESTARTSYS;
63 }
64 }
65 perf->next_jiffies += SAMPLE_TIME;
66 return 0;
67}
68
69static int refill_buf(struct msm_perf_state *perf)
70{
71 struct msm_drm_private *priv = perf->dev->dev_private;
72 struct msm_gpu *gpu = priv->gpu;
73 char *ptr = perf->buf;
74 int rem = sizeof(perf->buf);
75 int i, n;
76
77 if ((perf->cnt++ % 32) == 0) {
78 /* Header line: */
79 n = snprintf(ptr, rem, "%%BUSY");
80 ptr += n;
81 rem -= n;
82
83 for (i = 0; i < gpu->num_perfcntrs; i++) {
84 const struct msm_gpu_perfcntr *perfcntr = &gpu->perfcntrs[i];
85 n = snprintf(ptr, rem, "\t%s", perfcntr->name);
86 ptr += n;
87 rem -= n;
88 }
89 } else {
90 /* Sample line: */
91 uint32_t activetime = 0, totaltime = 0;
92 uint32_t cntrs[5];
93 uint32_t val;
94 int ret;
95
96 /* sleep until next sample time: */
97 ret = wait_sample(perf);
98 if (ret)
99 return ret;
100
101 ret = msm_gpu_perfcntr_sample(gpu, &activetime, &totaltime,
102 ARRAY_SIZE(cntrs), cntrs);
103 if (ret < 0)
104 return ret;
105
106 val = totaltime ? 1000 * activetime / totaltime : 0;
107 n = snprintf(ptr, rem, "%3d.%d%%", val / 10, val % 10);
108 ptr += n;
109 rem -= n;
110
111 for (i = 0; i < ret; i++) {
112 /* cycle counters (I think).. convert to MHz.. */
113 val = cntrs[i] / 10000;
114 n = snprintf(ptr, rem, "\t%5d.%02d",
115 val / 100, val % 100);
116 ptr += n;
117 rem -= n;
118 }
119 }
120
121 n = snprintf(ptr, rem, "\n");
122 ptr += n;
123 rem -= n;
124
125 perf->bufpos = 0;
126 perf->buftot = ptr - perf->buf;
127
128 return 0;
129}
130
131static ssize_t perf_read(struct file *file, char __user *buf,
132 size_t sz, loff_t *ppos)
133{
134 struct msm_perf_state *perf = file->private_data;
135 int n = 0, ret;
136
137 mutex_lock(&perf->read_lock);
138
139 if (perf->bufpos >= perf->buftot) {
140 ret = refill_buf(perf);
141 if (ret)
142 goto out;
143 }
144
145 n = min((int)sz, perf->buftot - perf->bufpos);
146 ret = copy_to_user(buf, &perf->buf[perf->bufpos], n);
147 if (ret)
148 goto out;
149
150 perf->bufpos += n;
151 *ppos += n;
152
153out:
154 mutex_unlock(&perf->read_lock);
155 if (ret)
156 return ret;
157 return n;
158}
159
160static int perf_open(struct inode *inode, struct file *file)
161{
162 struct msm_perf_state *perf = inode->i_private;
163 struct drm_device *dev = perf->dev;
164 struct msm_drm_private *priv = dev->dev_private;
165 struct msm_gpu *gpu = priv->gpu;
166 int ret = 0;
167
168 mutex_lock(&dev->struct_mutex);
169
170 if (perf->open || !gpu) {
171 ret = -EBUSY;
172 goto out;
173 }
174
175 file->private_data = perf;
176 perf->open = true;
177 perf->cnt = 0;
178 perf->buftot = 0;
179 perf->bufpos = 0;
180 msm_gpu_perfcntr_start(gpu);
181 perf->next_jiffies = jiffies + SAMPLE_TIME;
182
183out:
184 mutex_unlock(&dev->struct_mutex);
185 return ret;
186}
187
188static int perf_release(struct inode *inode, struct file *file)
189{
190 struct msm_perf_state *perf = inode->i_private;
191 struct msm_drm_private *priv = perf->dev->dev_private;
192 msm_gpu_perfcntr_stop(priv->gpu);
193 perf->open = false;
194 return 0;
195}
196
197
198static const struct file_operations perf_debugfs_fops = {
199 .owner = THIS_MODULE,
200 .open = perf_open,
201 .read = perf_read,
202 .llseek = no_llseek,
203 .release = perf_release,
204};
205
206int msm_perf_debugfs_init(struct drm_minor *minor)
207{
208 struct msm_drm_private *priv = minor->dev->dev_private;
209 struct msm_perf_state *perf;
210
211 /* only create on first minor: */
212 if (priv->perf)
213 return 0;
214
215 perf = kzalloc(sizeof(*perf), GFP_KERNEL);
216 if (!perf)
217 return -ENOMEM;
218
219 perf->dev = minor->dev;
220
221 mutex_init(&perf->read_lock);
222 priv->perf = perf;
223
224 perf->node = kzalloc(sizeof(*perf->node), GFP_KERNEL);
225 if (!perf->node)
226 goto fail;
227
228 perf->ent = debugfs_create_file("perf", S_IFREG | S_IRUGO,
229 minor->debugfs_root, perf, &perf_debugfs_fops);
230 if (!perf->ent) {
231 DRM_ERROR("Cannot create /sys/kernel/debug/dri/%s/perf\n",
232 minor->debugfs_root->d_name.name);
233 goto fail;
234 }
235
236 perf->node->minor = minor;
237 perf->node->dent = perf->ent;
238 perf->node->info_ent = NULL;
239
240 mutex_lock(&minor->debugfs_lock);
241 list_add(&perf->node->list, &minor->debugfs_list);
242 mutex_unlock(&minor->debugfs_lock);
243
244 return 0;
245
246fail:
247 msm_perf_debugfs_cleanup(minor);
248 return -1;
249}
250
251void msm_perf_debugfs_cleanup(struct drm_minor *minor)
252{
253 struct msm_drm_private *priv = minor->dev->dev_private;
254 struct msm_perf_state *perf = priv->perf;
255
256 if (!perf)
257 return;
258
259 priv->perf = NULL;
260
261 debugfs_remove(perf->ent);
262
263 if (perf->node) {
264 mutex_lock(&minor->debugfs_lock);
265 list_del(&perf->node->list);
266 mutex_unlock(&minor->debugfs_lock);
267 kfree(perf->node);
268 }
269
270 mutex_destroy(&perf->read_lock);
271
272 kfree(perf);
273}
274
275#endif
diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c
new file mode 100644
index 000000000000..9a78c48817c6
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_rd.c
@@ -0,0 +1,337 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18/* For debugging crashes, userspace can:
19 *
20 * tail -f /sys/kernel/debug/dri/<minor>/rd > logfile.rd
21 *
22 * To log the cmdstream in a format that is understood by freedreno/cffdump
23 * utility. By comparing the last successfully completed fence #, to the
24 * cmdstream for the next fence, you can narrow down which process and submit
25 * caused the gpu crash/lockup.
26 *
27 * This bypasses drm_debugfs_create_files() mainly because we need to use
28 * our own fops for a bit more control. In particular, we don't want to
29 * do anything if userspace doesn't have the debugfs file open.
30 */
31
32#ifdef CONFIG_DEBUG_FS
33
34#include <linux/kfifo.h>
35#include <linux/debugfs.h>
36#include <linux/circ_buf.h>
37#include <linux/wait.h>
38
39#include "msm_drv.h"
40#include "msm_gpu.h"
41#include "msm_gem.h"
42
43enum rd_sect_type {
44 RD_NONE,
45 RD_TEST, /* ascii text */
46 RD_CMD, /* ascii text */
47 RD_GPUADDR, /* u32 gpuaddr, u32 size */
48 RD_CONTEXT, /* raw dump */
49 RD_CMDSTREAM, /* raw dump */
50 RD_CMDSTREAM_ADDR, /* gpu addr of cmdstream */
51 RD_PARAM, /* u32 param_type, u32 param_val, u32 bitlen */
52 RD_FLUSH, /* empty, clear previous params */
53 RD_PROGRAM, /* shader program, raw dump */
54 RD_VERT_SHADER,
55 RD_FRAG_SHADER,
56 RD_BUFFER_CONTENTS,
57 RD_GPU_ID,
58};
59
60#define BUF_SZ 512 /* should be power of 2 */
61
62/* space used: */
63#define circ_count(circ) \
64 (CIRC_CNT((circ)->head, (circ)->tail, BUF_SZ))
65#define circ_count_to_end(circ) \
66 (CIRC_CNT_TO_END((circ)->head, (circ)->tail, BUF_SZ))
67/* space available: */
68#define circ_space(circ) \
69 (CIRC_SPACE((circ)->head, (circ)->tail, BUF_SZ))
70#define circ_space_to_end(circ) \
71 (CIRC_SPACE_TO_END((circ)->head, (circ)->tail, BUF_SZ))
72
73struct msm_rd_state {
74 struct drm_device *dev;
75
76 bool open;
77
78 struct dentry *ent;
79 struct drm_info_node *node;
80
81 /* current submit to read out: */
82 struct msm_gem_submit *submit;
83
84 /* fifo access is synchronized on the producer side by
85 * struct_mutex held by submit code (otherwise we could
86 * end up w/ cmds logged in different order than they
87 * were executed). And read_lock synchronizes the reads
88 */
89 struct mutex read_lock;
90
91 wait_queue_head_t fifo_event;
92 struct circ_buf fifo;
93
94 char buf[BUF_SZ];
95};
96
97static void rd_write(struct msm_rd_state *rd, const void *buf, int sz)
98{
99 struct circ_buf *fifo = &rd->fifo;
100 const char *ptr = buf;
101
102 while (sz > 0) {
103 char *fptr = &fifo->buf[fifo->head];
104 int n;
105
106 wait_event(rd->fifo_event, circ_space(&rd->fifo) > 0);
107
108 n = min(sz, circ_space_to_end(&rd->fifo));
109 memcpy(fptr, ptr, n);
110
111 fifo->head = (fifo->head + n) & (BUF_SZ - 1);
112 sz -= n;
113 ptr += n;
114
115 wake_up_all(&rd->fifo_event);
116 }
117}
118
119static void rd_write_section(struct msm_rd_state *rd,
120 enum rd_sect_type type, const void *buf, int sz)
121{
122 rd_write(rd, &type, 4);
123 rd_write(rd, &sz, 4);
124 rd_write(rd, buf, sz);
125}
126
127static ssize_t rd_read(struct file *file, char __user *buf,
128 size_t sz, loff_t *ppos)
129{
130 struct msm_rd_state *rd = file->private_data;
131 struct circ_buf *fifo = &rd->fifo;
132 const char *fptr = &fifo->buf[fifo->tail];
133 int n = 0, ret = 0;
134
135 mutex_lock(&rd->read_lock);
136
137 ret = wait_event_interruptible(rd->fifo_event,
138 circ_count(&rd->fifo) > 0);
139 if (ret)
140 goto out;
141
142 n = min_t(int, sz, circ_count_to_end(&rd->fifo));
143 ret = copy_to_user(buf, fptr, n);
144 if (ret)
145 goto out;
146
147 fifo->tail = (fifo->tail + n) & (BUF_SZ - 1);
148 *ppos += n;
149
150 wake_up_all(&rd->fifo_event);
151
152out:
153 mutex_unlock(&rd->read_lock);
154 if (ret)
155 return ret;
156 return n;
157}
158
159static int rd_open(struct inode *inode, struct file *file)
160{
161 struct msm_rd_state *rd = inode->i_private;
162 struct drm_device *dev = rd->dev;
163 struct msm_drm_private *priv = dev->dev_private;
164 struct msm_gpu *gpu = priv->gpu;
165 uint64_t val;
166 uint32_t gpu_id;
167 int ret = 0;
168
169 mutex_lock(&dev->struct_mutex);
170
171 if (rd->open || !gpu) {
172 ret = -EBUSY;
173 goto out;
174 }
175
176 file->private_data = rd;
177 rd->open = true;
178
179 /* the parsing tools need to know gpu-id to know which
180 * register database to load.
181 */
182 gpu->funcs->get_param(gpu, MSM_PARAM_GPU_ID, &val);
183 gpu_id = val;
184
185 rd_write_section(rd, RD_GPU_ID, &gpu_id, sizeof(gpu_id));
186
187out:
188 mutex_unlock(&dev->struct_mutex);
189 return ret;
190}
191
192static int rd_release(struct inode *inode, struct file *file)
193{
194 struct msm_rd_state *rd = inode->i_private;
195 rd->open = false;
196 return 0;
197}
198
199
200static const struct file_operations rd_debugfs_fops = {
201 .owner = THIS_MODULE,
202 .open = rd_open,
203 .read = rd_read,
204 .llseek = no_llseek,
205 .release = rd_release,
206};
207
208int msm_rd_debugfs_init(struct drm_minor *minor)
209{
210 struct msm_drm_private *priv = minor->dev->dev_private;
211 struct msm_rd_state *rd;
212
213 /* only create on first minor: */
214 if (priv->rd)
215 return 0;
216
217 rd = kzalloc(sizeof(*rd), GFP_KERNEL);
218 if (!rd)
219 return -ENOMEM;
220
221 rd->dev = minor->dev;
222 rd->fifo.buf = rd->buf;
223
224 mutex_init(&rd->read_lock);
225 priv->rd = rd;
226
227 init_waitqueue_head(&rd->fifo_event);
228
229 rd->node = kzalloc(sizeof(*rd->node), GFP_KERNEL);
230 if (!rd->node)
231 goto fail;
232
233 rd->ent = debugfs_create_file("rd", S_IFREG | S_IRUGO,
234 minor->debugfs_root, rd, &rd_debugfs_fops);
235 if (!rd->ent) {
236 DRM_ERROR("Cannot create /sys/kernel/debug/dri/%s/rd\n",
237 minor->debugfs_root->d_name.name);
238 goto fail;
239 }
240
241 rd->node->minor = minor;
242 rd->node->dent = rd->ent;
243 rd->node->info_ent = NULL;
244
245 mutex_lock(&minor->debugfs_lock);
246 list_add(&rd->node->list, &minor->debugfs_list);
247 mutex_unlock(&minor->debugfs_lock);
248
249 return 0;
250
251fail:
252 msm_rd_debugfs_cleanup(minor);
253 return -1;
254}
255
256void msm_rd_debugfs_cleanup(struct drm_minor *minor)
257{
258 struct msm_drm_private *priv = minor->dev->dev_private;
259 struct msm_rd_state *rd = priv->rd;
260
261 if (!rd)
262 return;
263
264 priv->rd = NULL;
265
266 debugfs_remove(rd->ent);
267
268 if (rd->node) {
269 mutex_lock(&minor->debugfs_lock);
270 list_del(&rd->node->list);
271 mutex_unlock(&minor->debugfs_lock);
272 kfree(rd->node);
273 }
274
275 mutex_destroy(&rd->read_lock);
276
277 kfree(rd);
278}
279
280/* called under struct_mutex */
281void msm_rd_dump_submit(struct msm_gem_submit *submit)
282{
283 struct drm_device *dev = submit->dev;
284 struct msm_drm_private *priv = dev->dev_private;
285 struct msm_rd_state *rd = priv->rd;
286 char msg[128];
287 int i, n;
288
289 if (!rd->open)
290 return;
291
292 /* writing into fifo is serialized by caller, and
293 * rd->read_lock is used to serialize the reads
294 */
295 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
296
297 n = snprintf(msg, sizeof(msg), "%.*s/%d: fence=%u",
298 TASK_COMM_LEN, current->comm, task_pid_nr(current),
299 submit->fence);
300
301 rd_write_section(rd, RD_CMD, msg, ALIGN(n, 4));
302
303 /* could be nice to have an option (module-param?) to snapshot
304 * all the bo's associated with the submit. Handy to see vtx
305 * buffers, etc. For now just the cmdstream bo's is enough.
306 */
307
308 for (i = 0; i < submit->nr_cmds; i++) {
309 uint32_t idx = submit->cmd[i].idx;
310 uint32_t iova = submit->cmd[i].iova;
311 uint32_t szd = submit->cmd[i].size; /* in dwords */
312 struct msm_gem_object *obj = submit->bos[idx].obj;
313 const char *buf = msm_gem_vaddr_locked(&obj->base);
314
315 buf += iova - submit->bos[idx].iova;
316
317 rd_write_section(rd, RD_GPUADDR,
318 (uint32_t[2]){ iova, szd * 4 }, 8);
319 rd_write_section(rd, RD_BUFFER_CONTENTS,
320 buf, szd * 4);
321
322 switch (submit->cmd[i].type) {
323 case MSM_SUBMIT_CMD_IB_TARGET_BUF:
324 /* ignore IB-targets, we've logged the buffer, the
325 * parser tool will follow the IB based on the logged
326 * buffer/gpuaddr, so nothing more to do.
327 */
328 break;
329 case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
330 case MSM_SUBMIT_CMD_BUF:
331 rd_write_section(rd, RD_CMDSTREAM_ADDR,
332 (uint32_t[2]){ iova, szd }, 8);
333 break;
334 }
335 }
336}
337#endif
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index b7d216264775..2b6156d0e4b5 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -102,6 +102,7 @@ nouveau-y += core/subdev/fb/nvaa.o
102nouveau-y += core/subdev/fb/nvaf.o 102nouveau-y += core/subdev/fb/nvaf.o
103nouveau-y += core/subdev/fb/nvc0.o 103nouveau-y += core/subdev/fb/nvc0.o
104nouveau-y += core/subdev/fb/nve0.o 104nouveau-y += core/subdev/fb/nve0.o
105nouveau-y += core/subdev/fb/gk20a.o
105nouveau-y += core/subdev/fb/gm107.o 106nouveau-y += core/subdev/fb/gm107.o
106nouveau-y += core/subdev/fb/ramnv04.o 107nouveau-y += core/subdev/fb/ramnv04.o
107nouveau-y += core/subdev/fb/ramnv10.o 108nouveau-y += core/subdev/fb/ramnv10.o
@@ -117,25 +118,32 @@ nouveau-y += core/subdev/fb/ramnva3.o
117nouveau-y += core/subdev/fb/ramnvaa.o 118nouveau-y += core/subdev/fb/ramnvaa.o
118nouveau-y += core/subdev/fb/ramnvc0.o 119nouveau-y += core/subdev/fb/ramnvc0.o
119nouveau-y += core/subdev/fb/ramnve0.o 120nouveau-y += core/subdev/fb/ramnve0.o
121nouveau-y += core/subdev/fb/ramgk20a.o
120nouveau-y += core/subdev/fb/ramgm107.o 122nouveau-y += core/subdev/fb/ramgm107.o
121nouveau-y += core/subdev/fb/sddr3.o 123nouveau-y += core/subdev/fb/sddr3.o
122nouveau-y += core/subdev/fb/gddr5.o 124nouveau-y += core/subdev/fb/gddr5.o
123nouveau-y += core/subdev/gpio/base.o 125nouveau-y += core/subdev/gpio/base.o
124nouveau-y += core/subdev/gpio/nv10.o 126nouveau-y += core/subdev/gpio/nv10.o
125nouveau-y += core/subdev/gpio/nv50.o 127nouveau-y += core/subdev/gpio/nv50.o
128nouveau-y += core/subdev/gpio/nv92.o
126nouveau-y += core/subdev/gpio/nvd0.o 129nouveau-y += core/subdev/gpio/nvd0.o
127nouveau-y += core/subdev/gpio/nve0.o 130nouveau-y += core/subdev/gpio/nve0.o
128nouveau-y += core/subdev/i2c/base.o 131nouveau-y += core/subdev/i2c/base.o
129nouveau-y += core/subdev/i2c/anx9805.o 132nouveau-y += core/subdev/i2c/anx9805.o
130nouveau-y += core/subdev/i2c/aux.o 133nouveau-y += core/subdev/i2c/aux.o
131nouveau-y += core/subdev/i2c/bit.o 134nouveau-y += core/subdev/i2c/bit.o
135nouveau-y += core/subdev/i2c/pad.o
136nouveau-y += core/subdev/i2c/padnv04.o
137nouveau-y += core/subdev/i2c/padnv94.o
132nouveau-y += core/subdev/i2c/nv04.o 138nouveau-y += core/subdev/i2c/nv04.o
133nouveau-y += core/subdev/i2c/nv4e.o 139nouveau-y += core/subdev/i2c/nv4e.o
134nouveau-y += core/subdev/i2c/nv50.o 140nouveau-y += core/subdev/i2c/nv50.o
135nouveau-y += core/subdev/i2c/nv94.o 141nouveau-y += core/subdev/i2c/nv94.o
136nouveau-y += core/subdev/i2c/nvd0.o 142nouveau-y += core/subdev/i2c/nvd0.o
143nouveau-y += core/subdev/i2c/nve0.o
137nouveau-y += core/subdev/ibus/nvc0.o 144nouveau-y += core/subdev/ibus/nvc0.o
138nouveau-y += core/subdev/ibus/nve0.o 145nouveau-y += core/subdev/ibus/nve0.o
146nouveau-y += core/subdev/ibus/gk20a.o
139nouveau-y += core/subdev/instmem/base.o 147nouveau-y += core/subdev/instmem/base.o
140nouveau-y += core/subdev/instmem/nv04.o 148nouveau-y += core/subdev/instmem/nv04.o
141nouveau-y += core/subdev/instmem/nv40.o 149nouveau-y += core/subdev/instmem/nv40.o
@@ -214,6 +222,9 @@ nouveau-y += core/engine/device/nvc0.o
214nouveau-y += core/engine/device/nve0.o 222nouveau-y += core/engine/device/nve0.o
215nouveau-y += core/engine/device/gm100.o 223nouveau-y += core/engine/device/gm100.o
216nouveau-y += core/engine/disp/base.o 224nouveau-y += core/engine/disp/base.o
225nouveau-y += core/engine/disp/conn.o
226nouveau-y += core/engine/disp/outp.o
227nouveau-y += core/engine/disp/outpdp.o
217nouveau-y += core/engine/disp/nv04.o 228nouveau-y += core/engine/disp/nv04.o
218nouveau-y += core/engine/disp/nv50.o 229nouveau-y += core/engine/disp/nv50.o
219nouveau-y += core/engine/disp/nv84.o 230nouveau-y += core/engine/disp/nv84.o
@@ -245,6 +256,7 @@ nouveau-y += core/engine/fifo/nv50.o
245nouveau-y += core/engine/fifo/nv84.o 256nouveau-y += core/engine/fifo/nv84.o
246nouveau-y += core/engine/fifo/nvc0.o 257nouveau-y += core/engine/fifo/nvc0.o
247nouveau-y += core/engine/fifo/nve0.o 258nouveau-y += core/engine/fifo/nve0.o
259nouveau-y += core/engine/fifo/gk20a.o
248nouveau-y += core/engine/fifo/nv108.o 260nouveau-y += core/engine/fifo/nv108.o
249nouveau-y += core/engine/graph/ctxnv40.o 261nouveau-y += core/engine/graph/ctxnv40.o
250nouveau-y += core/engine/graph/ctxnv50.o 262nouveau-y += core/engine/graph/ctxnv50.o
@@ -255,6 +267,7 @@ nouveau-y += core/engine/graph/ctxnvc8.o
255nouveau-y += core/engine/graph/ctxnvd7.o 267nouveau-y += core/engine/graph/ctxnvd7.o
256nouveau-y += core/engine/graph/ctxnvd9.o 268nouveau-y += core/engine/graph/ctxnvd9.o
257nouveau-y += core/engine/graph/ctxnve4.o 269nouveau-y += core/engine/graph/ctxnve4.o
270nouveau-y += core/engine/graph/ctxgk20a.o
258nouveau-y += core/engine/graph/ctxnvf0.o 271nouveau-y += core/engine/graph/ctxnvf0.o
259nouveau-y += core/engine/graph/ctxnv108.o 272nouveau-y += core/engine/graph/ctxnv108.o
260nouveau-y += core/engine/graph/ctxgm107.o 273nouveau-y += core/engine/graph/ctxgm107.o
@@ -275,6 +288,7 @@ nouveau-y += core/engine/graph/nvc8.o
275nouveau-y += core/engine/graph/nvd7.o 288nouveau-y += core/engine/graph/nvd7.o
276nouveau-y += core/engine/graph/nvd9.o 289nouveau-y += core/engine/graph/nvd9.o
277nouveau-y += core/engine/graph/nve4.o 290nouveau-y += core/engine/graph/nve4.o
291nouveau-y += core/engine/graph/gk20a.o
278nouveau-y += core/engine/graph/nvf0.o 292nouveau-y += core/engine/graph/nvf0.o
279nouveau-y += core/engine/graph/nv108.o 293nouveau-y += core/engine/graph/nv108.o
280nouveau-y += core/engine/graph/gm107.o 294nouveau-y += core/engine/graph/gm107.o
diff --git a/drivers/gpu/drm/nouveau/core/core/event.c b/drivers/gpu/drm/nouveau/core/core/event.c
index 3f3c76581a9e..ae81d3b5d8b7 100644
--- a/drivers/gpu/drm/nouveau/core/core/event.c
+++ b/drivers/gpu/drm/nouveau/core/core/event.c
@@ -28,14 +28,20 @@ nouveau_event_put(struct nouveau_eventh *handler)
28{ 28{
29 struct nouveau_event *event = handler->event; 29 struct nouveau_event *event = handler->event;
30 unsigned long flags; 30 unsigned long flags;
31 if (__test_and_clear_bit(NVKM_EVENT_ENABLE, &handler->flags)) { 31 u32 m, t;
32 spin_lock_irqsave(&event->refs_lock, flags); 32
33 if (!--event->index[handler->index].refs) { 33 if (!__test_and_clear_bit(NVKM_EVENT_ENABLE, &handler->flags))
34 return;
35
36 spin_lock_irqsave(&event->refs_lock, flags);
37 for (m = handler->types; t = __ffs(m), m; m &= ~(1 << t)) {
38 if (!--event->refs[handler->index * event->types_nr + t]) {
34 if (event->disable) 39 if (event->disable)
35 event->disable(event, handler->index); 40 event->disable(event, 1 << t, handler->index);
36 } 41 }
37 spin_unlock_irqrestore(&event->refs_lock, flags); 42
38 } 43 }
44 spin_unlock_irqrestore(&event->refs_lock, flags);
39} 45}
40 46
41void 47void
@@ -43,14 +49,20 @@ nouveau_event_get(struct nouveau_eventh *handler)
43{ 49{
44 struct nouveau_event *event = handler->event; 50 struct nouveau_event *event = handler->event;
45 unsigned long flags; 51 unsigned long flags;
46 if (!__test_and_set_bit(NVKM_EVENT_ENABLE, &handler->flags)) { 52 u32 m, t;
47 spin_lock_irqsave(&event->refs_lock, flags); 53
48 if (!event->index[handler->index].refs++) { 54 if (__test_and_set_bit(NVKM_EVENT_ENABLE, &handler->flags))
55 return;
56
57 spin_lock_irqsave(&event->refs_lock, flags);
58 for (m = handler->types; t = __ffs(m), m; m &= ~(1 << t)) {
59 if (!event->refs[handler->index * event->types_nr + t]++) {
49 if (event->enable) 60 if (event->enable)
50 event->enable(event, handler->index); 61 event->enable(event, 1 << t, handler->index);
51 } 62 }
52 spin_unlock_irqrestore(&event->refs_lock, flags); 63
53 } 64 }
65 spin_unlock_irqrestore(&event->refs_lock, flags);
54} 66}
55 67
56static void 68static void
@@ -65,38 +77,47 @@ nouveau_event_fini(struct nouveau_eventh *handler)
65} 77}
66 78
67static int 79static int
68nouveau_event_init(struct nouveau_event *event, int index, 80nouveau_event_init(struct nouveau_event *event, u32 types, int index,
69 int (*func)(void *, int), void *priv, 81 int (*func)(void *, u32, int), void *priv,
70 struct nouveau_eventh *handler) 82 struct nouveau_eventh *handler)
71{ 83{
72 unsigned long flags; 84 unsigned long flags;
73 85
86 if (types & ~((1 << event->types_nr) - 1))
87 return -EINVAL;
74 if (index >= event->index_nr) 88 if (index >= event->index_nr)
75 return -EINVAL; 89 return -EINVAL;
76 90
77 handler->event = event; 91 handler->event = event;
78 handler->flags = 0; 92 handler->flags = 0;
93 handler->types = types;
79 handler->index = index; 94 handler->index = index;
80 handler->func = func; 95 handler->func = func;
81 handler->priv = priv; 96 handler->priv = priv;
82 97
83 spin_lock_irqsave(&event->list_lock, flags); 98 spin_lock_irqsave(&event->list_lock, flags);
84 list_add_tail(&handler->head, &event->index[index].list); 99 list_add_tail(&handler->head, &event->list[index]);
85 spin_unlock_irqrestore(&event->list_lock, flags); 100 spin_unlock_irqrestore(&event->list_lock, flags);
86 return 0; 101 return 0;
87} 102}
88 103
89int 104int
90nouveau_event_new(struct nouveau_event *event, int index, 105nouveau_event_new(struct nouveau_event *event, u32 types, int index,
91 int (*func)(void *, int), void *priv, 106 int (*func)(void *, u32, int), void *priv,
92 struct nouveau_eventh **phandler) 107 struct nouveau_eventh **phandler)
93{ 108{
94 struct nouveau_eventh *handler; 109 struct nouveau_eventh *handler;
95 int ret = -ENOMEM; 110 int ret = -ENOMEM;
96 111
112 if (event->check) {
113 ret = event->check(event, types, index);
114 if (ret)
115 return ret;
116 }
117
97 handler = *phandler = kmalloc(sizeof(*handler), GFP_KERNEL); 118 handler = *phandler = kmalloc(sizeof(*handler), GFP_KERNEL);
98 if (handler) { 119 if (handler) {
99 ret = nouveau_event_init(event, index, func, priv, handler); 120 ret = nouveau_event_init(event, types, index, func, priv, handler);
100 if (ret) 121 if (ret)
101 kfree(handler); 122 kfree(handler);
102 } 123 }
@@ -116,7 +137,7 @@ nouveau_event_ref(struct nouveau_eventh *handler, struct nouveau_eventh **ref)
116} 137}
117 138
118void 139void
119nouveau_event_trigger(struct nouveau_event *event, int index) 140nouveau_event_trigger(struct nouveau_event *event, u32 types, int index)
120{ 141{
121 struct nouveau_eventh *handler; 142 struct nouveau_eventh *handler;
122 unsigned long flags; 143 unsigned long flags;
@@ -125,10 +146,15 @@ nouveau_event_trigger(struct nouveau_event *event, int index)
125 return; 146 return;
126 147
127 spin_lock_irqsave(&event->list_lock, flags); 148 spin_lock_irqsave(&event->list_lock, flags);
128 list_for_each_entry(handler, &event->index[index].list, head) { 149 list_for_each_entry(handler, &event->list[index], head) {
129 if (test_bit(NVKM_EVENT_ENABLE, &handler->flags) && 150 if (!test_bit(NVKM_EVENT_ENABLE, &handler->flags))
130 handler->func(handler->priv, index) == NVKM_EVENT_DROP) 151 continue;
131 nouveau_event_put(handler); 152 if (!(handler->types & types))
153 continue;
154 if (handler->func(handler->priv, handler->types & types, index)
155 != NVKM_EVENT_DROP)
156 continue;
157 nouveau_event_put(handler);
132 } 158 }
133 spin_unlock_irqrestore(&event->list_lock, flags); 159 spin_unlock_irqrestore(&event->list_lock, flags);
134} 160}
@@ -144,20 +170,27 @@ nouveau_event_destroy(struct nouveau_event **pevent)
144} 170}
145 171
146int 172int
147nouveau_event_create(int index_nr, struct nouveau_event **pevent) 173nouveau_event_create(int types_nr, int index_nr, struct nouveau_event **pevent)
148{ 174{
149 struct nouveau_event *event; 175 struct nouveau_event *event;
150 int i; 176 int i;
151 177
152 event = *pevent = kzalloc(sizeof(*event) + index_nr * 178 event = *pevent = kzalloc(sizeof(*event) + (index_nr * types_nr) *
153 sizeof(event->index[0]), GFP_KERNEL); 179 sizeof(event->refs[0]), GFP_KERNEL);
154 if (!event) 180 if (!event)
155 return -ENOMEM; 181 return -ENOMEM;
156 182
183 event->list = kmalloc(sizeof(*event->list) * index_nr, GFP_KERNEL);
184 if (!event->list) {
185 kfree(event);
186 return -ENOMEM;
187 }
188
157 spin_lock_init(&event->list_lock); 189 spin_lock_init(&event->list_lock);
158 spin_lock_init(&event->refs_lock); 190 spin_lock_init(&event->refs_lock);
159 for (i = 0; i < index_nr; i++) 191 for (i = 0; i < index_nr; i++)
160 INIT_LIST_HEAD(&event->index[i].list); 192 INIT_LIST_HEAD(&event->list[i]);
193 event->types_nr = types_nr;
161 event->index_nr = index_nr; 194 event->index_nr = index_nr;
162 return 0; 195 return 0;
163} 196}
diff --git a/drivers/gpu/drm/nouveau/core/core/object.c b/drivers/gpu/drm/nouveau/core/core/object.c
index 7f48e288215f..124538555904 100644
--- a/drivers/gpu/drm/nouveau/core/core/object.c
+++ b/drivers/gpu/drm/nouveau/core/core/object.c
@@ -156,7 +156,7 @@ nouveau_object_ctor(struct nouveau_object *parent,
156 } 156 }
157 157
158 if (ret == 0) { 158 if (ret == 0) {
159 nv_debug(object, "created\n"); 159 nv_trace(object, "created\n");
160 atomic_set(&object->refcount, 1); 160 atomic_set(&object->refcount, 1);
161 } 161 }
162 162
@@ -166,7 +166,7 @@ nouveau_object_ctor(struct nouveau_object *parent,
166static void 166static void
167nouveau_object_dtor(struct nouveau_object *object) 167nouveau_object_dtor(struct nouveau_object *object)
168{ 168{
169 nv_debug(object, "destroying\n"); 169 nv_trace(object, "destroying\n");
170 nv_ofuncs(object)->dtor(object); 170 nv_ofuncs(object)->dtor(object);
171} 171}
172 172
@@ -337,7 +337,7 @@ nouveau_object_inc(struct nouveau_object *object)
337 goto fail_self; 337 goto fail_self;
338 } 338 }
339 339
340 nv_debug(object, "initialised\n"); 340 nv_trace(object, "initialised\n");
341 return 0; 341 return 0;
342 342
343fail_self: 343fail_self:
@@ -375,7 +375,7 @@ nouveau_object_decf(struct nouveau_object *object)
375 if (object->parent) 375 if (object->parent)
376 nouveau_object_dec(object->parent, false); 376 nouveau_object_dec(object->parent, false);
377 377
378 nv_debug(object, "stopped\n"); 378 nv_trace(object, "stopped\n");
379 return 0; 379 return 0;
380} 380}
381 381
@@ -411,7 +411,7 @@ nouveau_object_decs(struct nouveau_object *object)
411 } 411 }
412 } 412 }
413 413
414 nv_debug(object, "suspended\n"); 414 nv_trace(object, "suspended\n");
415 return 0; 415 return 0;
416 416
417fail_parent: 417fail_parent:
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/gm100.c b/drivers/gpu/drm/nouveau/core/engine/device/gm100.c
index d258c21c4a22..a520029e25d9 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/gm100.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/gm100.c
@@ -60,8 +60,8 @@ gm100_identify(struct nouveau_device *device)
60 case 0x117: 60 case 0x117:
61 device->cname = "GM107"; 61 device->cname = "GM107";
62 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 62 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
63 device->oclass[NVDEV_SUBDEV_GPIO ] = &nve0_gpio_oclass; 63 device->oclass[NVDEV_SUBDEV_GPIO ] = nve0_gpio_oclass;
64 device->oclass[NVDEV_SUBDEV_I2C ] = &nvd0_i2c_oclass; 64 device->oclass[NVDEV_SUBDEV_I2C ] = nvd0_i2c_oclass;
65 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass; 65 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass;
66#if 0 66#if 0
67 device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass; 67 device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv04.c b/drivers/gpu/drm/nouveau/core/engine/device/nv04.c
index 0a51ff4e9e00..40b29d0214cb 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv04.c
@@ -47,7 +47,7 @@ nv04_identify(struct nouveau_device *device)
47 case 0x04: 47 case 0x04:
48 device->cname = "NV04"; 48 device->cname = "NV04";
49 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 49 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
50 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; 50 device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
51 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; 51 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
52 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv04_devinit_oclass; 52 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv04_devinit_oclass;
53 device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass; 53 device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
@@ -65,7 +65,7 @@ nv04_identify(struct nouveau_device *device)
65 case 0x05: 65 case 0x05:
66 device->cname = "NV05"; 66 device->cname = "NV05";
67 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 67 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
68 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; 68 device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
69 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; 69 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
70 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv05_devinit_oclass; 70 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv05_devinit_oclass;
71 device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass; 71 device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv10.c b/drivers/gpu/drm/nouveau/core/engine/device/nv10.c
index e008de8b51b0..5f7c25ff523d 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv10.c
@@ -48,8 +48,8 @@ nv10_identify(struct nouveau_device *device)
48 case 0x10: 48 case 0x10:
49 device->cname = "NV10"; 49 device->cname = "NV10";
50 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 50 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
51 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; 51 device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
52 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; 52 device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
53 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; 53 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
54 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv10_devinit_oclass; 54 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv10_devinit_oclass;
55 device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass; 55 device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
@@ -65,8 +65,8 @@ nv10_identify(struct nouveau_device *device)
65 case 0x15: 65 case 0x15:
66 device->cname = "NV15"; 66 device->cname = "NV15";
67 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 67 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
68 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; 68 device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
69 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; 69 device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
70 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; 70 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
71 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv10_devinit_oclass; 71 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv10_devinit_oclass;
72 device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass; 72 device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
@@ -84,8 +84,8 @@ nv10_identify(struct nouveau_device *device)
84 case 0x16: 84 case 0x16:
85 device->cname = "NV16"; 85 device->cname = "NV16";
86 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 86 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
87 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; 87 device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
88 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; 88 device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
89 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; 89 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
90 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv10_devinit_oclass; 90 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv10_devinit_oclass;
91 device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass; 91 device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
@@ -103,8 +103,8 @@ nv10_identify(struct nouveau_device *device)
103 case 0x1a: 103 case 0x1a:
104 device->cname = "nForce"; 104 device->cname = "nForce";
105 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 105 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
106 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; 106 device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
107 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; 107 device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
108 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; 108 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
109 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass; 109 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
110 device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass; 110 device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
@@ -122,8 +122,8 @@ nv10_identify(struct nouveau_device *device)
122 case 0x11: 122 case 0x11:
123 device->cname = "NV11"; 123 device->cname = "NV11";
124 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 124 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
125 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; 125 device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
126 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; 126 device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
127 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; 127 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
128 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv10_devinit_oclass; 128 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv10_devinit_oclass;
129 device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass; 129 device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
@@ -141,8 +141,8 @@ nv10_identify(struct nouveau_device *device)
141 case 0x17: 141 case 0x17:
142 device->cname = "NV17"; 142 device->cname = "NV17";
143 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 143 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
144 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; 144 device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
145 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; 145 device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
146 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; 146 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
147 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv10_devinit_oclass; 147 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv10_devinit_oclass;
148 device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass; 148 device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
@@ -160,8 +160,8 @@ nv10_identify(struct nouveau_device *device)
160 case 0x1f: 160 case 0x1f:
161 device->cname = "nForce2"; 161 device->cname = "nForce2";
162 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 162 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
163 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; 163 device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
164 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; 164 device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
165 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; 165 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
166 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass; 166 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
167 device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass; 167 device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
@@ -179,8 +179,8 @@ nv10_identify(struct nouveau_device *device)
179 case 0x18: 179 case 0x18:
180 device->cname = "NV18"; 180 device->cname = "NV18";
181 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 181 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
182 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; 182 device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
183 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; 183 device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
184 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; 184 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
185 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv10_devinit_oclass; 185 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv10_devinit_oclass;
186 device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass; 186 device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv20.c b/drivers/gpu/drm/nouveau/core/engine/device/nv20.c
index 7b629a3aed05..75fed11bba0a 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv20.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv20.c
@@ -49,8 +49,8 @@ nv20_identify(struct nouveau_device *device)
49 case 0x20: 49 case 0x20:
50 device->cname = "NV20"; 50 device->cname = "NV20";
51 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 51 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
52 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; 52 device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
53 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; 53 device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
54 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; 54 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
55 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv20_devinit_oclass; 55 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv20_devinit_oclass;
56 device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass; 56 device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
@@ -68,8 +68,8 @@ nv20_identify(struct nouveau_device *device)
68 case 0x25: 68 case 0x25:
69 device->cname = "NV25"; 69 device->cname = "NV25";
70 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 70 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
71 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; 71 device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
72 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; 72 device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
73 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; 73 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
74 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv20_devinit_oclass; 74 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv20_devinit_oclass;
75 device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass; 75 device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
@@ -87,8 +87,8 @@ nv20_identify(struct nouveau_device *device)
87 case 0x28: 87 case 0x28:
88 device->cname = "NV28"; 88 device->cname = "NV28";
89 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 89 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
90 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; 90 device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
91 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; 91 device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
92 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; 92 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
93 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv20_devinit_oclass; 93 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv20_devinit_oclass;
94 device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass; 94 device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
@@ -106,8 +106,8 @@ nv20_identify(struct nouveau_device *device)
106 case 0x2a: 106 case 0x2a:
107 device->cname = "NV2A"; 107 device->cname = "NV2A";
108 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 108 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
109 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; 109 device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
110 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; 110 device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
111 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; 111 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
112 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv20_devinit_oclass; 112 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv20_devinit_oclass;
113 device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass; 113 device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv30.c b/drivers/gpu/drm/nouveau/core/engine/device/nv30.c
index 7dfddd5a1908..36919d7db7cc 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv30.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv30.c
@@ -49,8 +49,8 @@ nv30_identify(struct nouveau_device *device)
49 case 0x30: 49 case 0x30:
50 device->cname = "NV30"; 50 device->cname = "NV30";
51 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 51 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
52 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; 52 device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
53 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; 53 device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
54 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; 54 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
55 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv20_devinit_oclass; 55 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv20_devinit_oclass;
56 device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass; 56 device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
@@ -68,8 +68,8 @@ nv30_identify(struct nouveau_device *device)
68 case 0x35: 68 case 0x35:
69 device->cname = "NV35"; 69 device->cname = "NV35";
70 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 70 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
71 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; 71 device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
72 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; 72 device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
73 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; 73 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
74 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv20_devinit_oclass; 74 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv20_devinit_oclass;
75 device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass; 75 device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
@@ -87,8 +87,8 @@ nv30_identify(struct nouveau_device *device)
87 case 0x31: 87 case 0x31:
88 device->cname = "NV31"; 88 device->cname = "NV31";
89 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 89 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
90 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; 90 device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
91 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; 91 device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
92 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; 92 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
93 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv20_devinit_oclass; 93 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv20_devinit_oclass;
94 device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass; 94 device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
@@ -107,8 +107,8 @@ nv30_identify(struct nouveau_device *device)
107 case 0x36: 107 case 0x36:
108 device->cname = "NV36"; 108 device->cname = "NV36";
109 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 109 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
110 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; 110 device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
111 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; 111 device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
112 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; 112 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
113 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv20_devinit_oclass; 113 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv20_devinit_oclass;
114 device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass; 114 device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
@@ -127,8 +127,8 @@ nv30_identify(struct nouveau_device *device)
127 case 0x34: 127 case 0x34:
128 device->cname = "NV34"; 128 device->cname = "NV34";
129 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 129 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
130 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; 130 device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
131 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; 131 device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
132 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; 132 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
133 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv10_devinit_oclass; 133 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv10_devinit_oclass;
134 device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass; 134 device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv40.c b/drivers/gpu/drm/nouveau/core/engine/device/nv40.c
index 7c1ce6cf4f1f..1130a62be2c7 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv40.c
@@ -53,8 +53,8 @@ nv40_identify(struct nouveau_device *device)
53 case 0x40: 53 case 0x40:
54 device->cname = "NV40"; 54 device->cname = "NV40";
55 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 55 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
56 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; 56 device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
57 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; 57 device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
58 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; 58 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
59 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; 59 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
60 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass; 60 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
@@ -76,8 +76,8 @@ nv40_identify(struct nouveau_device *device)
76 case 0x41: 76 case 0x41:
77 device->cname = "NV41"; 77 device->cname = "NV41";
78 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 78 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
79 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; 79 device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
80 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; 80 device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
81 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; 81 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
82 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; 82 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
83 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass; 83 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
@@ -99,8 +99,8 @@ nv40_identify(struct nouveau_device *device)
99 case 0x42: 99 case 0x42:
100 device->cname = "NV42"; 100 device->cname = "NV42";
101 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 101 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
102 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; 102 device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
103 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; 103 device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
104 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; 104 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
105 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; 105 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
106 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass; 106 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
@@ -122,8 +122,8 @@ nv40_identify(struct nouveau_device *device)
122 case 0x43: 122 case 0x43:
123 device->cname = "NV43"; 123 device->cname = "NV43";
124 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 124 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
125 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; 125 device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
126 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; 126 device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
127 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; 127 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
128 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; 128 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
129 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass; 129 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
@@ -145,8 +145,8 @@ nv40_identify(struct nouveau_device *device)
145 case 0x45: 145 case 0x45:
146 device->cname = "NV45"; 146 device->cname = "NV45";
147 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 147 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
148 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; 148 device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
149 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; 149 device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
150 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; 150 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
151 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; 151 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
152 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass; 152 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
@@ -168,8 +168,8 @@ nv40_identify(struct nouveau_device *device)
168 case 0x47: 168 case 0x47:
169 device->cname = "G70"; 169 device->cname = "G70";
170 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 170 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
171 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; 171 device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
172 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; 172 device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
173 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; 173 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
174 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; 174 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
175 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass; 175 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
@@ -191,8 +191,8 @@ nv40_identify(struct nouveau_device *device)
191 case 0x49: 191 case 0x49:
192 device->cname = "G71"; 192 device->cname = "G71";
193 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 193 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
194 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; 194 device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
195 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; 195 device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
196 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; 196 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
197 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; 197 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
198 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass; 198 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
@@ -214,8 +214,8 @@ nv40_identify(struct nouveau_device *device)
214 case 0x4b: 214 case 0x4b:
215 device->cname = "G73"; 215 device->cname = "G73";
216 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 216 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
217 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; 217 device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
218 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; 218 device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
219 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; 219 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
220 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; 220 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
221 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass; 221 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
@@ -237,8 +237,8 @@ nv40_identify(struct nouveau_device *device)
237 case 0x44: 237 case 0x44:
238 device->cname = "NV44"; 238 device->cname = "NV44";
239 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 239 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
240 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; 240 device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
241 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; 241 device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
242 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; 242 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
243 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; 243 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
244 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass; 244 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
@@ -260,8 +260,8 @@ nv40_identify(struct nouveau_device *device)
260 case 0x46: 260 case 0x46:
261 device->cname = "G72"; 261 device->cname = "G72";
262 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 262 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
263 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; 263 device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
264 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; 264 device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
265 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; 265 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
266 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; 266 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
267 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass; 267 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
@@ -283,8 +283,8 @@ nv40_identify(struct nouveau_device *device)
283 case 0x4a: 283 case 0x4a:
284 device->cname = "NV44A"; 284 device->cname = "NV44A";
285 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 285 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
286 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; 286 device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
287 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; 287 device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
288 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; 288 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
289 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; 289 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
290 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass; 290 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
@@ -306,8 +306,8 @@ nv40_identify(struct nouveau_device *device)
306 case 0x4c: 306 case 0x4c:
307 device->cname = "C61"; 307 device->cname = "C61";
308 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 308 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
309 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; 309 device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
310 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; 310 device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
311 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; 311 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
312 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; 312 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
313 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass; 313 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
@@ -329,8 +329,8 @@ nv40_identify(struct nouveau_device *device)
329 case 0x4e: 329 case 0x4e:
330 device->cname = "C51"; 330 device->cname = "C51";
331 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 331 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
332 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; 332 device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
333 device->oclass[NVDEV_SUBDEV_I2C ] = &nv4e_i2c_oclass; 333 device->oclass[NVDEV_SUBDEV_I2C ] = nv4e_i2c_oclass;
334 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; 334 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
335 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; 335 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
336 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass; 336 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
@@ -352,8 +352,8 @@ nv40_identify(struct nouveau_device *device)
352 case 0x63: 352 case 0x63:
353 device->cname = "C73"; 353 device->cname = "C73";
354 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 354 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
355 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; 355 device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
356 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; 356 device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
357 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; 357 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
358 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; 358 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
359 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass; 359 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
@@ -375,8 +375,8 @@ nv40_identify(struct nouveau_device *device)
375 case 0x67: 375 case 0x67:
376 device->cname = "C67"; 376 device->cname = "C67";
377 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 377 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
378 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; 378 device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
379 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; 379 device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
380 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; 380 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
381 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; 381 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
382 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass; 382 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
@@ -398,8 +398,8 @@ nv40_identify(struct nouveau_device *device)
398 case 0x68: 398 case 0x68:
399 device->cname = "C68"; 399 device->cname = "C68";
400 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 400 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
401 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; 401 device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
402 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; 402 device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
403 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; 403 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
404 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; 404 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
405 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass; 405 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv50.c b/drivers/gpu/drm/nouveau/core/engine/device/nv50.c
index 66499fa0f758..ef0b0bde1a91 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv50.c
@@ -60,8 +60,8 @@ nv50_identify(struct nouveau_device *device)
60 case 0x50: 60 case 0x50:
61 device->cname = "G80"; 61 device->cname = "G80";
62 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 62 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
63 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; 63 device->oclass[NVDEV_SUBDEV_GPIO ] = nv50_gpio_oclass;
64 device->oclass[NVDEV_SUBDEV_I2C ] = &nv50_i2c_oclass; 64 device->oclass[NVDEV_SUBDEV_I2C ] = nv50_i2c_oclass;
65 device->oclass[NVDEV_SUBDEV_CLOCK ] = nv50_clock_oclass; 65 device->oclass[NVDEV_SUBDEV_CLOCK ] = nv50_clock_oclass;
66 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass; 66 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
67 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 67 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
@@ -85,8 +85,8 @@ nv50_identify(struct nouveau_device *device)
85 case 0x84: 85 case 0x84:
86 device->cname = "G84"; 86 device->cname = "G84";
87 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 87 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
88 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; 88 device->oclass[NVDEV_SUBDEV_GPIO ] = nv50_gpio_oclass;
89 device->oclass[NVDEV_SUBDEV_I2C ] = &nv50_i2c_oclass; 89 device->oclass[NVDEV_SUBDEV_I2C ] = nv50_i2c_oclass;
90 device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass; 90 device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass;
91 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass; 91 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
92 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 92 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
@@ -113,8 +113,8 @@ nv50_identify(struct nouveau_device *device)
113 case 0x86: 113 case 0x86:
114 device->cname = "G86"; 114 device->cname = "G86";
115 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 115 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
116 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; 116 device->oclass[NVDEV_SUBDEV_GPIO ] = nv50_gpio_oclass;
117 device->oclass[NVDEV_SUBDEV_I2C ] = &nv50_i2c_oclass; 117 device->oclass[NVDEV_SUBDEV_I2C ] = nv50_i2c_oclass;
118 device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass; 118 device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass;
119 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass; 119 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
120 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 120 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
@@ -141,8 +141,8 @@ nv50_identify(struct nouveau_device *device)
141 case 0x92: 141 case 0x92:
142 device->cname = "G92"; 142 device->cname = "G92";
143 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 143 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
144 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; 144 device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass;
145 device->oclass[NVDEV_SUBDEV_I2C ] = &nv50_i2c_oclass; 145 device->oclass[NVDEV_SUBDEV_I2C ] = nv50_i2c_oclass;
146 device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass; 146 device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass;
147 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass; 147 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
148 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 148 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
@@ -169,8 +169,8 @@ nv50_identify(struct nouveau_device *device)
169 case 0x94: 169 case 0x94:
170 device->cname = "G94"; 170 device->cname = "G94";
171 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 171 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
172 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; 172 device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass;
173 device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass; 173 device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
174 device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass; 174 device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass;
175 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass; 175 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
176 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 176 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
@@ -197,8 +197,8 @@ nv50_identify(struct nouveau_device *device)
197 case 0x96: 197 case 0x96:
198 device->cname = "G96"; 198 device->cname = "G96";
199 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 199 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
200 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; 200 device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass;
201 device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass; 201 device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
202 device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass; 202 device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass;
203 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass; 203 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
204 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 204 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
@@ -225,8 +225,8 @@ nv50_identify(struct nouveau_device *device)
225 case 0x98: 225 case 0x98:
226 device->cname = "G98"; 226 device->cname = "G98";
227 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 227 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
228 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; 228 device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass;
229 device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass; 229 device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
230 device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass; 230 device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass;
231 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass; 231 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
232 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 232 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
@@ -253,8 +253,8 @@ nv50_identify(struct nouveau_device *device)
253 case 0xa0: 253 case 0xa0:
254 device->cname = "G200"; 254 device->cname = "G200";
255 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 255 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
256 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; 256 device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass;
257 device->oclass[NVDEV_SUBDEV_I2C ] = &nv50_i2c_oclass; 257 device->oclass[NVDEV_SUBDEV_I2C ] = nv50_i2c_oclass;
258 device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass; 258 device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass;
259 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass; 259 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
260 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 260 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
@@ -281,8 +281,8 @@ nv50_identify(struct nouveau_device *device)
281 case 0xaa: 281 case 0xaa:
282 device->cname = "MCP77/MCP78"; 282 device->cname = "MCP77/MCP78";
283 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 283 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
284 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; 284 device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass;
285 device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass; 285 device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
286 device->oclass[NVDEV_SUBDEV_CLOCK ] = nvaa_clock_oclass; 286 device->oclass[NVDEV_SUBDEV_CLOCK ] = nvaa_clock_oclass;
287 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass; 287 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
288 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 288 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
@@ -309,8 +309,8 @@ nv50_identify(struct nouveau_device *device)
309 case 0xac: 309 case 0xac:
310 device->cname = "MCP79/MCP7A"; 310 device->cname = "MCP79/MCP7A";
311 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 311 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
312 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; 312 device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass;
313 device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass; 313 device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
314 device->oclass[NVDEV_SUBDEV_CLOCK ] = nvaa_clock_oclass; 314 device->oclass[NVDEV_SUBDEV_CLOCK ] = nvaa_clock_oclass;
315 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass; 315 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
316 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 316 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
@@ -337,8 +337,8 @@ nv50_identify(struct nouveau_device *device)
337 case 0xa3: 337 case 0xa3:
338 device->cname = "GT215"; 338 device->cname = "GT215";
339 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 339 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
340 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; 340 device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass;
341 device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass; 341 device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
342 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nva3_clock_oclass; 342 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nva3_clock_oclass;
343 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; 343 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
344 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 344 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
@@ -367,8 +367,8 @@ nv50_identify(struct nouveau_device *device)
367 case 0xa5: 367 case 0xa5:
368 device->cname = "GT216"; 368 device->cname = "GT216";
369 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 369 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
370 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; 370 device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass;
371 device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass; 371 device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
372 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nva3_clock_oclass; 372 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nva3_clock_oclass;
373 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; 373 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
374 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 374 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
@@ -396,8 +396,8 @@ nv50_identify(struct nouveau_device *device)
396 case 0xa8: 396 case 0xa8:
397 device->cname = "GT218"; 397 device->cname = "GT218";
398 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 398 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
399 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; 399 device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass;
400 device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass; 400 device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
401 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nva3_clock_oclass; 401 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nva3_clock_oclass;
402 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; 402 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
403 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 403 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
@@ -425,8 +425,8 @@ nv50_identify(struct nouveau_device *device)
425 case 0xaf: 425 case 0xaf:
426 device->cname = "MCP89"; 426 device->cname = "MCP89";
427 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 427 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
428 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; 428 device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass;
429 device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass; 429 device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
430 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nva3_clock_oclass; 430 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nva3_clock_oclass;
431 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; 431 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
432 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 432 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
index 2075b3027052..f199957995fa 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
@@ -60,8 +60,8 @@ nvc0_identify(struct nouveau_device *device)
60 case 0xc0: 60 case 0xc0:
61 device->cname = "GF100"; 61 device->cname = "GF100";
62 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 62 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
63 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; 63 device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass;
64 device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass; 64 device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
65 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass; 65 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
66 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; 66 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
67 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 67 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
@@ -92,8 +92,8 @@ nvc0_identify(struct nouveau_device *device)
92 case 0xc4: 92 case 0xc4:
93 device->cname = "GF104"; 93 device->cname = "GF104";
94 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 94 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
95 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; 95 device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass;
96 device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass; 96 device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
97 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass; 97 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
98 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; 98 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
99 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 99 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
@@ -124,8 +124,8 @@ nvc0_identify(struct nouveau_device *device)
124 case 0xc3: 124 case 0xc3:
125 device->cname = "GF106"; 125 device->cname = "GF106";
126 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 126 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
127 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; 127 device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass;
128 device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass; 128 device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
129 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass; 129 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
130 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; 130 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
131 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 131 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
@@ -155,8 +155,8 @@ nvc0_identify(struct nouveau_device *device)
155 case 0xce: 155 case 0xce:
156 device->cname = "GF114"; 156 device->cname = "GF114";
157 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 157 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
158 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; 158 device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass;
159 device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass; 159 device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
160 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass; 160 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
161 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; 161 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
162 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 162 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
@@ -187,8 +187,8 @@ nvc0_identify(struct nouveau_device *device)
187 case 0xcf: 187 case 0xcf:
188 device->cname = "GF116"; 188 device->cname = "GF116";
189 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 189 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
190 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; 190 device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass;
191 device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass; 191 device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
192 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass; 192 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
193 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; 193 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
194 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 194 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
@@ -219,8 +219,8 @@ nvc0_identify(struct nouveau_device *device)
219 case 0xc1: 219 case 0xc1:
220 device->cname = "GF108"; 220 device->cname = "GF108";
221 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 221 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
222 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; 222 device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass;
223 device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass; 223 device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
224 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass; 224 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
225 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; 225 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
226 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 226 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
@@ -250,8 +250,8 @@ nvc0_identify(struct nouveau_device *device)
250 case 0xc8: 250 case 0xc8:
251 device->cname = "GF110"; 251 device->cname = "GF110";
252 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 252 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
253 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; 253 device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass;
254 device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass; 254 device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
255 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass; 255 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
256 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; 256 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
257 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 257 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
@@ -282,8 +282,8 @@ nvc0_identify(struct nouveau_device *device)
282 case 0xd9: 282 case 0xd9:
283 device->cname = "GF119"; 283 device->cname = "GF119";
284 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 284 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
285 device->oclass[NVDEV_SUBDEV_GPIO ] = &nvd0_gpio_oclass; 285 device->oclass[NVDEV_SUBDEV_GPIO ] = nvd0_gpio_oclass;
286 device->oclass[NVDEV_SUBDEV_I2C ] = &nvd0_i2c_oclass; 286 device->oclass[NVDEV_SUBDEV_I2C ] = nvd0_i2c_oclass;
287 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass; 287 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
288 device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass; 288 device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
289 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 289 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
@@ -313,8 +313,8 @@ nvc0_identify(struct nouveau_device *device)
313 case 0xd7: 313 case 0xd7:
314 device->cname = "GF117"; 314 device->cname = "GF117";
315 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 315 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
316 device->oclass[NVDEV_SUBDEV_GPIO ] = &nvd0_gpio_oclass; 316 device->oclass[NVDEV_SUBDEV_GPIO ] = nvd0_gpio_oclass;
317 device->oclass[NVDEV_SUBDEV_I2C ] = &nvd0_i2c_oclass; 317 device->oclass[NVDEV_SUBDEV_I2C ] = nvd0_i2c_oclass;
318 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass; 318 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
319 device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass; 319 device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
320 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 320 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nve0.c b/drivers/gpu/drm/nouveau/core/engine/device/nve0.c
index 9784cbf8a9d2..2d1e97d4264f 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nve0.c
@@ -60,8 +60,8 @@ nve0_identify(struct nouveau_device *device)
60 case 0xe4: 60 case 0xe4:
61 device->cname = "GK104"; 61 device->cname = "GK104";
62 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 62 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
63 device->oclass[NVDEV_SUBDEV_GPIO ] = &nve0_gpio_oclass; 63 device->oclass[NVDEV_SUBDEV_GPIO ] = nve0_gpio_oclass;
64 device->oclass[NVDEV_SUBDEV_I2C ] = &nvd0_i2c_oclass; 64 device->oclass[NVDEV_SUBDEV_I2C ] = nve0_i2c_oclass;
65 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass; 65 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass;
66 device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass; 66 device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
67 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 67 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
@@ -93,8 +93,8 @@ nve0_identify(struct nouveau_device *device)
93 case 0xe7: 93 case 0xe7:
94 device->cname = "GK107"; 94 device->cname = "GK107";
95 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 95 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
96 device->oclass[NVDEV_SUBDEV_GPIO ] = &nve0_gpio_oclass; 96 device->oclass[NVDEV_SUBDEV_GPIO ] = nve0_gpio_oclass;
97 device->oclass[NVDEV_SUBDEV_I2C ] = &nvd0_i2c_oclass; 97 device->oclass[NVDEV_SUBDEV_I2C ] = nve0_i2c_oclass;
98 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass; 98 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass;
99 device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass; 99 device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
100 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 100 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
@@ -126,8 +126,8 @@ nve0_identify(struct nouveau_device *device)
126 case 0xe6: 126 case 0xe6:
127 device->cname = "GK106"; 127 device->cname = "GK106";
128 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 128 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
129 device->oclass[NVDEV_SUBDEV_GPIO ] = &nve0_gpio_oclass; 129 device->oclass[NVDEV_SUBDEV_GPIO ] = nve0_gpio_oclass;
130 device->oclass[NVDEV_SUBDEV_I2C ] = &nvd0_i2c_oclass; 130 device->oclass[NVDEV_SUBDEV_I2C ] = nve0_i2c_oclass;
131 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass; 131 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass;
132 device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass; 132 device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
133 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 133 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
@@ -156,11 +156,61 @@ nve0_identify(struct nouveau_device *device)
156 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass; 156 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
157 device->oclass[NVDEV_ENGINE_PERFMON] = &nve0_perfmon_oclass; 157 device->oclass[NVDEV_ENGINE_PERFMON] = &nve0_perfmon_oclass;
158 break; 158 break;
159 case 0xea:
160 device->cname = "GK20A";
161 device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass;
162 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
163 device->oclass[NVDEV_SUBDEV_TIMER ] = &gk20a_timer_oclass;
164 device->oclass[NVDEV_SUBDEV_FB ] = gk20a_fb_oclass;
165 device->oclass[NVDEV_SUBDEV_IBUS ] = &gk20a_ibus_oclass;
166 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
167 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
168 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
169 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
170 device->oclass[NVDEV_ENGINE_FIFO ] = gk20a_fifo_oclass;
171 device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
172 device->oclass[NVDEV_ENGINE_GR ] = gk20a_graph_oclass;
173 device->oclass[NVDEV_ENGINE_COPY2 ] = &nve0_copy2_oclass;
174 device->oclass[NVDEV_ENGINE_PERFMON] = &nve0_perfmon_oclass;
175 break;
159 case 0xf0: 176 case 0xf0:
160 device->cname = "GK110"; 177 device->cname = "GK110";
161 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 178 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
162 device->oclass[NVDEV_SUBDEV_GPIO ] = &nve0_gpio_oclass; 179 device->oclass[NVDEV_SUBDEV_GPIO ] = nve0_gpio_oclass;
163 device->oclass[NVDEV_SUBDEV_I2C ] = &nvd0_i2c_oclass; 180 device->oclass[NVDEV_SUBDEV_I2C ] = nve0_i2c_oclass;
181 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass;
182 device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
183 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
184 device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass;
185 device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass;
186 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
187 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
188 device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass;
189 device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass;
190 device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass;
191 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
192 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
193 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
194 device->oclass[NVDEV_SUBDEV_PWR ] = &nvd0_pwr_oclass;
195 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
196 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
197 device->oclass[NVDEV_ENGINE_FIFO ] = nve0_fifo_oclass;
198 device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
199 device->oclass[NVDEV_ENGINE_GR ] = nvf0_graph_oclass;
200 device->oclass[NVDEV_ENGINE_DISP ] = nvf0_disp_oclass;
201 device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass;
202 device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass;
203 device->oclass[NVDEV_ENGINE_COPY2 ] = &nve0_copy2_oclass;
204 device->oclass[NVDEV_ENGINE_BSP ] = &nve0_bsp_oclass;
205 device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass;
206 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
207 device->oclass[NVDEV_ENGINE_PERFMON] = &nvf0_perfmon_oclass;
208 break;
209 case 0xf1:
210 device->cname = "GK110B";
211 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
212 device->oclass[NVDEV_SUBDEV_GPIO ] = nve0_gpio_oclass;
213 device->oclass[NVDEV_SUBDEV_I2C ] = nvd0_i2c_oclass;
164 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass; 214 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass;
165 device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass; 215 device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
166 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 216 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
@@ -184,18 +234,16 @@ nve0_identify(struct nouveau_device *device)
184 device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass; 234 device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass;
185 device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass; 235 device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass;
186 device->oclass[NVDEV_ENGINE_COPY2 ] = &nve0_copy2_oclass; 236 device->oclass[NVDEV_ENGINE_COPY2 ] = &nve0_copy2_oclass;
187#if 0
188 device->oclass[NVDEV_ENGINE_BSP ] = &nve0_bsp_oclass; 237 device->oclass[NVDEV_ENGINE_BSP ] = &nve0_bsp_oclass;
189 device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass; 238 device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass;
190 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass; 239 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
191#endif
192 device->oclass[NVDEV_ENGINE_PERFMON] = &nvf0_perfmon_oclass; 240 device->oclass[NVDEV_ENGINE_PERFMON] = &nvf0_perfmon_oclass;
193 break; 241 break;
194 case 0x108: 242 case 0x108:
195 device->cname = "GK208"; 243 device->cname = "GK208";
196 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 244 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
197 device->oclass[NVDEV_SUBDEV_GPIO ] = &nve0_gpio_oclass; 245 device->oclass[NVDEV_SUBDEV_GPIO ] = nve0_gpio_oclass;
198 device->oclass[NVDEV_SUBDEV_I2C ] = &nvd0_i2c_oclass; 246 device->oclass[NVDEV_SUBDEV_I2C ] = nve0_i2c_oclass;
199 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass; 247 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass;
200 device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass; 248 device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
201 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 249 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
@@ -219,11 +267,9 @@ nve0_identify(struct nouveau_device *device)
219 device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass; 267 device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass;
220 device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass; 268 device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass;
221 device->oclass[NVDEV_ENGINE_COPY2 ] = &nve0_copy2_oclass; 269 device->oclass[NVDEV_ENGINE_COPY2 ] = &nve0_copy2_oclass;
222#if 0
223 device->oclass[NVDEV_ENGINE_BSP ] = &nve0_bsp_oclass; 270 device->oclass[NVDEV_ENGINE_BSP ] = &nve0_bsp_oclass;
224 device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass; 271 device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass;
225 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass; 272 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
226#endif
227 break; 273 break;
228 default: 274 default:
229 nv_fatal(device, "unknown Kepler chipset\n"); 275 nv_fatal(device, "unknown Kepler chipset\n");
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/base.c b/drivers/gpu/drm/nouveau/core/engine/disp/base.c
index 7a5cae42834f..c41f656abe64 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/base.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/base.c
@@ -22,13 +22,87 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <engine/disp.h> 25#include "priv.h"
26#include "outp.h"
27#include "conn.h"
28
29static int
30nouveau_disp_hpd_check(struct nouveau_event *event, u32 types, int index)
31{
32 struct nouveau_disp *disp = event->priv;
33 struct nvkm_output *outp;
34 list_for_each_entry(outp, &disp->outp, head) {
35 if (outp->conn->index == index) {
36 if (outp->conn->hpd.event)
37 return 0;
38 break;
39 }
40 }
41 return -ENOSYS;
42}
43
44int
45_nouveau_disp_fini(struct nouveau_object *object, bool suspend)
46{
47 struct nouveau_disp *disp = (void *)object;
48 struct nvkm_output *outp;
49 int ret;
50
51 list_for_each_entry(outp, &disp->outp, head) {
52 ret = nv_ofuncs(outp)->fini(nv_object(outp), suspend);
53 if (ret && suspend)
54 goto fail_outp;
55 }
56
57 return nouveau_engine_fini(&disp->base, suspend);
58
59fail_outp:
60 list_for_each_entry_continue_reverse(outp, &disp->outp, head) {
61 nv_ofuncs(outp)->init(nv_object(outp));
62 }
63
64 return ret;
65}
66
67int
68_nouveau_disp_init(struct nouveau_object *object)
69{
70 struct nouveau_disp *disp = (void *)object;
71 struct nvkm_output *outp;
72 int ret;
73
74 ret = nouveau_engine_init(&disp->base);
75 if (ret)
76 return ret;
77
78 list_for_each_entry(outp, &disp->outp, head) {
79 ret = nv_ofuncs(outp)->init(nv_object(outp));
80 if (ret)
81 goto fail_outp;
82 }
83
84 return ret;
85
86fail_outp:
87 list_for_each_entry_continue_reverse(outp, &disp->outp, head) {
88 nv_ofuncs(outp)->fini(nv_object(outp), false);
89 }
90
91 return ret;
92}
26 93
27void 94void
28_nouveau_disp_dtor(struct nouveau_object *object) 95_nouveau_disp_dtor(struct nouveau_object *object)
29{ 96{
30 struct nouveau_disp *disp = (void *)object; 97 struct nouveau_disp *disp = (void *)object;
98 struct nvkm_output *outp, *outt;
99
31 nouveau_event_destroy(&disp->vblank); 100 nouveau_event_destroy(&disp->vblank);
101
102 list_for_each_entry_safe(outp, outt, &disp->outp, head) {
103 nouveau_object_ref(NULL, (struct nouveau_object **)&outp);
104 }
105
32 nouveau_engine_destroy(&disp->base); 106 nouveau_engine_destroy(&disp->base);
33} 107}
34 108
@@ -39,8 +113,15 @@ nouveau_disp_create_(struct nouveau_object *parent,
39 const char *intname, const char *extname, 113 const char *intname, const char *extname,
40 int length, void **pobject) 114 int length, void **pobject)
41{ 115{
116 struct nouveau_disp_impl *impl = (void *)oclass;
117 struct nouveau_bios *bios = nouveau_bios(parent);
42 struct nouveau_disp *disp; 118 struct nouveau_disp *disp;
43 int ret; 119 struct nouveau_oclass **sclass;
120 struct nouveau_object *object;
121 struct dcb_output dcbE;
122 u8 hpd = 0, ver, hdr;
123 u32 data;
124 int ret, i;
44 125
45 ret = nouveau_engine_create_(parent, engine, oclass, true, 126 ret = nouveau_engine_create_(parent, engine, oclass, true,
46 intname, extname, length, pobject); 127 intname, extname, length, pobject);
@@ -48,5 +129,42 @@ nouveau_disp_create_(struct nouveau_object *parent,
48 if (ret) 129 if (ret)
49 return ret; 130 return ret;
50 131
51 return nouveau_event_create(heads, &disp->vblank); 132 INIT_LIST_HEAD(&disp->outp);
133
134 /* create output objects for each display path in the vbios */
135 i = -1;
136 while ((data = dcb_outp_parse(bios, ++i, &ver, &hdr, &dcbE))) {
137 if (dcbE.type == DCB_OUTPUT_UNUSED)
138 continue;
139 if (dcbE.type == DCB_OUTPUT_EOL)
140 break;
141 data = dcbE.location << 4 | dcbE.type;
142
143 oclass = nvkm_output_oclass;
144 sclass = impl->outp;
145 while (sclass && sclass[0]) {
146 if (sclass[0]->handle == data) {
147 oclass = sclass[0];
148 break;
149 }
150 sclass++;
151 }
152
153 nouveau_object_ctor(*pobject, *pobject, oclass,
154 &dcbE, i, &object);
155 hpd = max(hpd, (u8)(dcbE.connector + 1));
156 }
157
158 ret = nouveau_event_create(3, hpd, &disp->hpd);
159 if (ret)
160 return ret;
161
162 disp->hpd->priv = disp;
163 disp->hpd->check = nouveau_disp_hpd_check;
164
165 ret = nouveau_event_create(1, heads, &disp->vblank);
166 if (ret)
167 return ret;
168
169 return 0;
52} 170}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/conn.c b/drivers/gpu/drm/nouveau/core/engine/disp/conn.c
new file mode 100644
index 000000000000..4ffbc70ecf5a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/conn.c
@@ -0,0 +1,172 @@
1/*
2 * Copyright 2014 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/gpio.h>
26
27#include "conn.h"
28#include "outp.h"
29
30static void
31nvkm_connector_hpd_work(struct work_struct *w)
32{
33 struct nvkm_connector *conn = container_of(w, typeof(*conn), hpd.work);
34 struct nouveau_disp *disp = nouveau_disp(conn);
35 struct nouveau_gpio *gpio = nouveau_gpio(conn);
36 u32 send = NVKM_HPD_UNPLUG;
37 if (gpio->get(gpio, 0, DCB_GPIO_UNUSED, conn->hpd.event->index))
38 send = NVKM_HPD_PLUG;
39 nouveau_event_trigger(disp->hpd, send, conn->index);
40 nouveau_event_get(conn->hpd.event);
41}
42
43static int
44nvkm_connector_hpd(void *data, u32 type, int index)
45{
46 struct nvkm_connector *conn = data;
47 DBG("HPD: %d\n", type);
48 schedule_work(&conn->hpd.work);
49 return NVKM_EVENT_DROP;
50}
51
52int
53_nvkm_connector_fini(struct nouveau_object *object, bool suspend)
54{
55 struct nvkm_connector *conn = (void *)object;
56 if (conn->hpd.event)
57 nouveau_event_put(conn->hpd.event);
58 return nouveau_object_fini(&conn->base, suspend);
59}
60
61int
62_nvkm_connector_init(struct nouveau_object *object)
63{
64 struct nvkm_connector *conn = (void *)object;
65 int ret = nouveau_object_init(&conn->base);
66 if (ret == 0) {
67 if (conn->hpd.event)
68 nouveau_event_get(conn->hpd.event);
69 }
70 return ret;
71}
72
73void
74_nvkm_connector_dtor(struct nouveau_object *object)
75{
76 struct nvkm_connector *conn = (void *)object;
77 nouveau_event_ref(NULL, &conn->hpd.event);
78 nouveau_object_destroy(&conn->base);
79}
80
81int
82nvkm_connector_create_(struct nouveau_object *parent,
83 struct nouveau_object *engine,
84 struct nouveau_oclass *oclass,
85 struct nvbios_connE *info, int index,
86 int length, void **pobject)
87{
88 static const u8 hpd[] = { 0x07, 0x08, 0x51, 0x52, 0x5e, 0x5f, 0x60 };
89 struct nouveau_gpio *gpio = nouveau_gpio(parent);
90 struct nouveau_disp *disp = (void *)engine;
91 struct nvkm_connector *conn;
92 struct nvkm_output *outp;
93 struct dcb_gpio_func func;
94 int ret;
95
96 list_for_each_entry(outp, &disp->outp, head) {
97 if (outp->conn && outp->conn->index == index) {
98 atomic_inc(&nv_object(outp->conn)->refcount);
99 *pobject = outp->conn;
100 return 1;
101 }
102 }
103
104 ret = nouveau_object_create_(parent, engine, oclass, 0, length, pobject);
105 conn = *pobject;
106 if (ret)
107 return ret;
108
109 conn->info = *info;
110 conn->index = index;
111
112 DBG("type %02x loc %d hpd %02x dp %x di %x sr %x lcdid %x\n",
113 info->type, info->location, info->hpd, info->dp,
114 info->di, info->sr, info->lcdid);
115
116 if ((info->hpd = ffs(info->hpd))) {
117 if (--info->hpd >= ARRAY_SIZE(hpd)) {
118 ERR("hpd %02x unknown\n", info->hpd);
119 goto done;
120 }
121 info->hpd = hpd[info->hpd];
122
123 ret = gpio->find(gpio, 0, info->hpd, DCB_GPIO_UNUSED, &func);
124 if (ret) {
125 ERR("func %02x lookup failed, %d\n", info->hpd, ret);
126 goto done;
127 }
128
129 ret = nouveau_event_new(gpio->events, NVKM_GPIO_TOGGLED,
130 func.line, nvkm_connector_hpd,
131 conn, &conn->hpd.event);
132 if (ret) {
133 ERR("func %02x failed, %d\n", info->hpd, ret);
134 } else {
135 DBG("func %02x (HPD)\n", info->hpd);
136 }
137 }
138
139done:
140 INIT_WORK(&conn->hpd.work, nvkm_connector_hpd_work);
141 return 0;
142}
143
144int
145_nvkm_connector_ctor(struct nouveau_object *parent,
146 struct nouveau_object *engine,
147 struct nouveau_oclass *oclass, void *info, u32 index,
148 struct nouveau_object **pobject)
149{
150 struct nvkm_connector *conn;
151 int ret;
152
153 ret = nvkm_connector_create(parent, engine, oclass, info, index, &conn);
154 *pobject = nv_object(conn);
155 if (ret)
156 return ret;
157
158 return 0;
159}
160
161struct nouveau_oclass *
162nvkm_connector_oclass = &(struct nvkm_connector_impl) {
163 .base = {
164 .handle = 0,
165 .ofuncs = &(struct nouveau_ofuncs) {
166 .ctor = _nvkm_connector_ctor,
167 .dtor = _nvkm_connector_dtor,
168 .init = _nvkm_connector_init,
169 .fini = _nvkm_connector_fini,
170 },
171 },
172}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/conn.h b/drivers/gpu/drm/nouveau/core/engine/disp/conn.h
new file mode 100644
index 000000000000..035ebeacbb1c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/conn.h
@@ -0,0 +1,59 @@
1#ifndef __NVKM_DISP_CONN_H__
2#define __NVKM_DISP_CONN_H__
3
4#include "priv.h"
5
6struct nvkm_connector {
7 struct nouveau_object base;
8 struct list_head head;
9
10 struct nvbios_connE info;
11 int index;
12
13 struct {
14 struct nouveau_eventh *event;
15 struct work_struct work;
16 } hpd;
17};
18
19#define nvkm_connector_create(p,e,c,b,i,d) \
20 nvkm_connector_create_((p), (e), (c), (b), (i), sizeof(**d), (void **)d)
21#define nvkm_connector_destroy(d) ({ \
22 struct nvkm_connector *disp = (d); \
23 _nvkm_connector_dtor(nv_object(disp)); \
24})
25#define nvkm_connector_init(d) ({ \
26 struct nvkm_connector *disp = (d); \
27 _nvkm_connector_init(nv_object(disp)); \
28})
29#define nvkm_connector_fini(d,s) ({ \
30 struct nvkm_connector *disp = (d); \
31 _nvkm_connector_fini(nv_object(disp), (s)); \
32})
33
34int nvkm_connector_create_(struct nouveau_object *, struct nouveau_object *,
35 struct nouveau_oclass *, struct nvbios_connE *,
36 int, int, void **);
37
38int _nvkm_connector_ctor(struct nouveau_object *, struct nouveau_object *,
39 struct nouveau_oclass *, void *, u32,
40 struct nouveau_object **);
41void _nvkm_connector_dtor(struct nouveau_object *);
42int _nvkm_connector_init(struct nouveau_object *);
43int _nvkm_connector_fini(struct nouveau_object *, bool);
44
45struct nvkm_connector_impl {
46 struct nouveau_oclass base;
47};
48
49#ifndef MSG
50#define MSG(l,f,a...) do { \
51 struct nvkm_connector *_conn = (void *)conn; \
52 nv_##l(nv_object(conn)->engine, "%02x:%02x%02x: "f, _conn->index, \
53 _conn->info.location, _conn->info.type, ##a); \
54} while(0)
55#define DBG(f,a...) MSG(debug, f, ##a)
56#define ERR(f,a...) MSG(error, f, ##a)
57#endif
58
59#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/dport.c b/drivers/gpu/drm/nouveau/core/engine/disp/dport.c
index 3ca2d25b7f5e..39562d48101d 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/dport.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/dport.c
@@ -30,42 +30,38 @@
30 30
31#include <engine/disp.h> 31#include <engine/disp.h>
32 32
33#include "dport.h" 33#include <core/class.h>
34 34
35#define DBG(fmt, args...) nv_debug(dp->disp, "DP:%04x:%04x: " fmt, \ 35#include "dport.h"
36 dp->outp->hasht, dp->outp->hashm, ##args) 36#include "outpdp.h"
37#define ERR(fmt, args...) nv_error(dp->disp, "DP:%04x:%04x: " fmt, \
38 dp->outp->hasht, dp->outp->hashm, ##args)
39 37
40/****************************************************************************** 38/******************************************************************************
41 * link training 39 * link training
42 *****************************************************************************/ 40 *****************************************************************************/
43struct dp_state { 41struct dp_state {
44 const struct nouveau_dp_func *func; 42 struct nvkm_output_dp *outp;
45 struct nouveau_disp *disp;
46 struct dcb_output *outp;
47 struct nvbios_dpout info;
48 u8 version;
49 struct nouveau_i2c_port *aux;
50 int head;
51 u8 dpcd[4];
52 int link_nr; 43 int link_nr;
53 u32 link_bw; 44 u32 link_bw;
54 u8 stat[6]; 45 u8 stat[6];
55 u8 conf[4]; 46 u8 conf[4];
47 bool pc2;
48 u8 pc2stat;
49 u8 pc2conf[2];
56}; 50};
57 51
58static int 52static int
59dp_set_link_config(struct dp_state *dp) 53dp_set_link_config(struct dp_state *dp)
60{ 54{
61 struct nouveau_disp *disp = dp->disp; 55 struct nvkm_output_dp_impl *impl = (void *)nv_oclass(dp->outp);
56 struct nvkm_output_dp *outp = dp->outp;
57 struct nouveau_disp *disp = nouveau_disp(outp);
62 struct nouveau_bios *bios = nouveau_bios(disp); 58 struct nouveau_bios *bios = nouveau_bios(disp);
63 struct nvbios_init init = { 59 struct nvbios_init init = {
64 .subdev = nv_subdev(dp->disp), 60 .subdev = nv_subdev(disp),
65 .bios = bios, 61 .bios = bios,
66 .offset = 0x0000, 62 .offset = 0x0000,
67 .outp = dp->outp, 63 .outp = &outp->base.info,
68 .crtc = dp->head, 64 .crtc = -1,
69 .execute = 1, 65 .execute = 1,
70 }; 66 };
71 u32 lnkcmp; 67 u32 lnkcmp;
@@ -75,8 +71,8 @@ dp_set_link_config(struct dp_state *dp)
75 DBG("%d lanes at %d KB/s\n", dp->link_nr, dp->link_bw); 71 DBG("%d lanes at %d KB/s\n", dp->link_nr, dp->link_bw);
76 72
77 /* set desired link configuration on the source */ 73 /* set desired link configuration on the source */
78 if ((lnkcmp = dp->info.lnkcmp)) { 74 if ((lnkcmp = dp->outp->info.lnkcmp)) {
79 if (dp->version < 0x30) { 75 if (outp->version < 0x30) {
80 while ((dp->link_bw / 10) < nv_ro16(bios, lnkcmp)) 76 while ((dp->link_bw / 10) < nv_ro16(bios, lnkcmp))
81 lnkcmp += 4; 77 lnkcmp += 4;
82 init.offset = nv_ro16(bios, lnkcmp + 2); 78 init.offset = nv_ro16(bios, lnkcmp + 2);
@@ -89,73 +85,112 @@ dp_set_link_config(struct dp_state *dp)
89 nvbios_exec(&init); 85 nvbios_exec(&init);
90 } 86 }
91 87
92 ret = dp->func->lnk_ctl(dp->disp, dp->outp, dp->head, 88 ret = impl->lnk_ctl(outp, dp->link_nr, dp->link_bw / 27000,
93 dp->link_nr, dp->link_bw / 27000, 89 outp->dpcd[DPCD_RC02] &
94 dp->dpcd[DPCD_RC02] & 90 DPCD_RC02_ENHANCED_FRAME_CAP);
95 DPCD_RC02_ENHANCED_FRAME_CAP);
96 if (ret) { 91 if (ret) {
97 ERR("lnk_ctl failed with %d\n", ret); 92 if (ret < 0)
93 ERR("lnk_ctl failed with %d\n", ret);
98 return ret; 94 return ret;
99 } 95 }
100 96
97 impl->lnk_pwr(outp, dp->link_nr);
98
101 /* set desired link configuration on the sink */ 99 /* set desired link configuration on the sink */
102 sink[0] = dp->link_bw / 27000; 100 sink[0] = dp->link_bw / 27000;
103 sink[1] = dp->link_nr; 101 sink[1] = dp->link_nr;
104 if (dp->dpcd[DPCD_RC02] & DPCD_RC02_ENHANCED_FRAME_CAP) 102 if (outp->dpcd[DPCD_RC02] & DPCD_RC02_ENHANCED_FRAME_CAP)
105 sink[1] |= DPCD_LC01_ENHANCED_FRAME_EN; 103 sink[1] |= DPCD_LC01_ENHANCED_FRAME_EN;
106 104
107 return nv_wraux(dp->aux, DPCD_LC00, sink, 2); 105 return nv_wraux(outp->base.edid, DPCD_LC00_LINK_BW_SET, sink, 2);
108} 106}
109 107
110static void 108static void
111dp_set_training_pattern(struct dp_state *dp, u8 pattern) 109dp_set_training_pattern(struct dp_state *dp, u8 pattern)
112{ 110{
111 struct nvkm_output_dp_impl *impl = (void *)nv_oclass(dp->outp);
112 struct nvkm_output_dp *outp = dp->outp;
113 u8 sink_tp; 113 u8 sink_tp;
114 114
115 DBG("training pattern %d\n", pattern); 115 DBG("training pattern %d\n", pattern);
116 dp->func->pattern(dp->disp, dp->outp, dp->head, pattern); 116 impl->pattern(outp, pattern);
117 117
118 nv_rdaux(dp->aux, DPCD_LC02, &sink_tp, 1); 118 nv_rdaux(outp->base.edid, DPCD_LC02, &sink_tp, 1);
119 sink_tp &= ~DPCD_LC02_TRAINING_PATTERN_SET; 119 sink_tp &= ~DPCD_LC02_TRAINING_PATTERN_SET;
120 sink_tp |= pattern; 120 sink_tp |= pattern;
121 nv_wraux(dp->aux, DPCD_LC02, &sink_tp, 1); 121 nv_wraux(outp->base.edid, DPCD_LC02, &sink_tp, 1);
122} 122}
123 123
124static int 124static int
125dp_link_train_commit(struct dp_state *dp) 125dp_link_train_commit(struct dp_state *dp, bool pc)
126{ 126{
127 int i; 127 struct nvkm_output_dp_impl *impl = (void *)nv_oclass(dp->outp);
128 struct nvkm_output_dp *outp = dp->outp;
129 int ret, i;
128 130
129 for (i = 0; i < dp->link_nr; i++) { 131 for (i = 0; i < dp->link_nr; i++) {
130 u8 lane = (dp->stat[4 + (i >> 1)] >> ((i & 1) * 4)) & 0xf; 132 u8 lane = (dp->stat[4 + (i >> 1)] >> ((i & 1) * 4)) & 0xf;
133 u8 lpc2 = (dp->pc2stat >> (i * 2)) & 0x3;
131 u8 lpre = (lane & 0x0c) >> 2; 134 u8 lpre = (lane & 0x0c) >> 2;
132 u8 lvsw = (lane & 0x03) >> 0; 135 u8 lvsw = (lane & 0x03) >> 0;
136 u8 hivs = 3 - lpre;
137 u8 hipe = 3;
138 u8 hipc = 3;
139
140 if (lpc2 >= hipc)
141 lpc2 = hipc | DPCD_LC0F_LANE0_MAX_POST_CURSOR2_REACHED;
142 if (lpre >= hipe) {
143 lpre = hipe | DPCD_LC03_MAX_SWING_REACHED; /* yes. */
144 lvsw = hivs = 3 - (lpre & 3);
145 } else
146 if (lvsw >= hivs) {
147 lvsw = hivs | DPCD_LC03_MAX_SWING_REACHED;
148 }
133 149
134 dp->conf[i] = (lpre << 3) | lvsw; 150 dp->conf[i] = (lpre << 3) | lvsw;
135 if (lvsw == 3) 151 dp->pc2conf[i >> 1] |= lpc2 << ((i & 1) * 4);
136 dp->conf[i] |= DPCD_LC03_MAX_SWING_REACHED; 152
137 if (lpre == 3) 153 DBG("config lane %d %02x %02x\n", i, dp->conf[i], lpc2);
138 dp->conf[i] |= DPCD_LC03_MAX_PRE_EMPHASIS_REACHED; 154 impl->drv_ctl(outp, i, lvsw & 3, lpre & 3, lpc2 & 3);
155 }
156
157 ret = nv_wraux(outp->base.edid, DPCD_LC03(0), dp->conf, 4);
158 if (ret)
159 return ret;
139 160
140 DBG("config lane %d %02x\n", i, dp->conf[i]); 161 if (pc) {
141 dp->func->drv_ctl(dp->disp, dp->outp, dp->head, i, lvsw, lpre); 162 ret = nv_wraux(outp->base.edid, DPCD_LC0F, dp->pc2conf, 2);
163 if (ret)
164 return ret;
142 } 165 }
143 166
144 return nv_wraux(dp->aux, DPCD_LC03(0), dp->conf, 4); 167 return 0;
145} 168}
146 169
147static int 170static int
148dp_link_train_update(struct dp_state *dp, u32 delay) 171dp_link_train_update(struct dp_state *dp, bool pc, u32 delay)
149{ 172{
173 struct nvkm_output_dp *outp = dp->outp;
150 int ret; 174 int ret;
151 175
152 udelay(delay); 176 if (outp->dpcd[DPCD_RC0E_AUX_RD_INTERVAL])
177 mdelay(outp->dpcd[DPCD_RC0E_AUX_RD_INTERVAL] * 4);
178 else
179 udelay(delay);
153 180
154 ret = nv_rdaux(dp->aux, DPCD_LS02, dp->stat, 6); 181 ret = nv_rdaux(outp->base.edid, DPCD_LS02, dp->stat, 6);
155 if (ret) 182 if (ret)
156 return ret; 183 return ret;
157 184
158 DBG("status %6ph\n", dp->stat); 185 if (pc) {
186 ret = nv_rdaux(outp->base.edid, DPCD_LS0C, &dp->pc2stat, 1);
187 if (ret)
188 dp->pc2stat = 0x00;
189 DBG("status %6ph pc2 %02x\n", dp->stat, dp->pc2stat);
190 } else {
191 DBG("status %6ph\n", dp->stat);
192 }
193
159 return 0; 194 return 0;
160} 195}
161 196
@@ -169,8 +204,8 @@ dp_link_train_cr(struct dp_state *dp)
169 dp_set_training_pattern(dp, 1); 204 dp_set_training_pattern(dp, 1);
170 205
171 do { 206 do {
172 if (dp_link_train_commit(dp) || 207 if (dp_link_train_commit(dp, false) ||
173 dp_link_train_update(dp, 100)) 208 dp_link_train_update(dp, false, 100))
174 break; 209 break;
175 210
176 cr_done = true; 211 cr_done = true;
@@ -196,13 +231,17 @@ dp_link_train_cr(struct dp_state *dp)
196static int 231static int
197dp_link_train_eq(struct dp_state *dp) 232dp_link_train_eq(struct dp_state *dp)
198{ 233{
234 struct nvkm_output_dp *outp = dp->outp;
199 bool eq_done = false, cr_done = true; 235 bool eq_done = false, cr_done = true;
200 int tries = 0, i; 236 int tries = 0, i;
201 237
202 dp_set_training_pattern(dp, 2); 238 if (outp->dpcd[2] & DPCD_RC02_TPS3_SUPPORTED)
239 dp_set_training_pattern(dp, 3);
240 else
241 dp_set_training_pattern(dp, 2);
203 242
204 do { 243 do {
205 if (dp_link_train_update(dp, 400)) 244 if (dp_link_train_update(dp, dp->pc2, 400))
206 break; 245 break;
207 246
208 eq_done = !!(dp->stat[2] & DPCD_LS04_INTERLANE_ALIGN_DONE); 247 eq_done = !!(dp->stat[2] & DPCD_LS04_INTERLANE_ALIGN_DONE);
@@ -215,7 +254,7 @@ dp_link_train_eq(struct dp_state *dp)
215 eq_done = false; 254 eq_done = false;
216 } 255 }
217 256
218 if (dp_link_train_commit(dp)) 257 if (dp_link_train_commit(dp, dp->pc2))
219 break; 258 break;
220 } while (!eq_done && cr_done && ++tries <= 5); 259 } while (!eq_done && cr_done && ++tries <= 5);
221 260
@@ -225,121 +264,109 @@ dp_link_train_eq(struct dp_state *dp)
225static void 264static void
226dp_link_train_init(struct dp_state *dp, bool spread) 265dp_link_train_init(struct dp_state *dp, bool spread)
227{ 266{
267 struct nvkm_output_dp *outp = dp->outp;
268 struct nouveau_disp *disp = nouveau_disp(outp);
269 struct nouveau_bios *bios = nouveau_bios(disp);
228 struct nvbios_init init = { 270 struct nvbios_init init = {
229 .subdev = nv_subdev(dp->disp), 271 .subdev = nv_subdev(disp),
230 .bios = nouveau_bios(dp->disp), 272 .bios = bios,
231 .outp = dp->outp, 273 .outp = &outp->base.info,
232 .crtc = dp->head, 274 .crtc = -1,
233 .execute = 1, 275 .execute = 1,
234 }; 276 };
235 277
236 /* set desired spread */ 278 /* set desired spread */
237 if (spread) 279 if (spread)
238 init.offset = dp->info.script[2]; 280 init.offset = outp->info.script[2];
239 else 281 else
240 init.offset = dp->info.script[3]; 282 init.offset = outp->info.script[3];
241 nvbios_exec(&init); 283 nvbios_exec(&init);
242 284
243 /* pre-train script */ 285 /* pre-train script */
244 init.offset = dp->info.script[0]; 286 init.offset = outp->info.script[0];
245 nvbios_exec(&init); 287 nvbios_exec(&init);
246} 288}
247 289
248static void 290static void
249dp_link_train_fini(struct dp_state *dp) 291dp_link_train_fini(struct dp_state *dp)
250{ 292{
293 struct nvkm_output_dp *outp = dp->outp;
294 struct nouveau_disp *disp = nouveau_disp(outp);
295 struct nouveau_bios *bios = nouveau_bios(disp);
251 struct nvbios_init init = { 296 struct nvbios_init init = {
252 .subdev = nv_subdev(dp->disp), 297 .subdev = nv_subdev(disp),
253 .bios = nouveau_bios(dp->disp), 298 .bios = bios,
254 .outp = dp->outp, 299 .outp = &outp->base.info,
255 .crtc = dp->head, 300 .crtc = -1,
256 .execute = 1, 301 .execute = 1,
257 }; 302 };
258 303
259 /* post-train script */ 304 /* post-train script */
260 init.offset = dp->info.script[1], 305 init.offset = outp->info.script[1],
261 nvbios_exec(&init); 306 nvbios_exec(&init);
262} 307}
263 308
264int 309static const struct dp_rates {
265nouveau_dp_train(struct nouveau_disp *disp, const struct nouveau_dp_func *func, 310 u32 rate;
266 struct dcb_output *outp, int head, u32 datarate) 311 u8 bw;
312 u8 nr;
313} nouveau_dp_rates[] = {
314 { 2160000, 0x14, 4 },
315 { 1080000, 0x0a, 4 },
316 { 1080000, 0x14, 2 },
317 { 648000, 0x06, 4 },
318 { 540000, 0x0a, 2 },
319 { 540000, 0x14, 1 },
320 { 324000, 0x06, 2 },
321 { 270000, 0x0a, 1 },
322 { 162000, 0x06, 1 },
323 {}
324};
325
326void
327nouveau_dp_train(struct work_struct *w)
267{ 328{
268 struct nouveau_bios *bios = nouveau_bios(disp); 329 struct nvkm_output_dp *outp = container_of(w, typeof(*outp), lt.work);
269 struct nouveau_i2c *i2c = nouveau_i2c(disp); 330 struct nouveau_disp *disp = nouveau_disp(outp);
331 const struct dp_rates *cfg = nouveau_dp_rates;
270 struct dp_state _dp = { 332 struct dp_state _dp = {
271 .disp = disp,
272 .func = func,
273 .outp = outp, 333 .outp = outp,
274 .head = head,
275 }, *dp = &_dp; 334 }, *dp = &_dp;
276 const u32 bw_list[] = { 540000, 270000, 162000, 0 }; 335 u32 datarate = 0;
277 const u32 *link_bw = bw_list;
278 u8 hdr, cnt, len;
279 u32 data;
280 int ret; 336 int ret;
281 337
282 /* find the bios displayport data relevant to this output */
283 data = nvbios_dpout_match(bios, outp->hasht, outp->hashm, &dp->version,
284 &hdr, &cnt, &len, &dp->info);
285 if (!data) {
286 ERR("bios data not found\n");
287 return -EINVAL;
288 }
289
290 /* acquire the aux channel and fetch some info about the display */
291 if (outp->location)
292 dp->aux = i2c->find_type(i2c, NV_I2C_TYPE_EXTAUX(outp->extdev));
293 else
294 dp->aux = i2c->find(i2c, NV_I2C_TYPE_DCBI2C(outp->i2c_index));
295 if (!dp->aux) {
296 ERR("no aux channel?!\n");
297 return -ENODEV;
298 }
299
300 ret = nv_rdaux(dp->aux, 0x00000, dp->dpcd, sizeof(dp->dpcd));
301 if (ret) {
302 /* it's possible the display has been unplugged before we
303 * get here. we still need to execute the full set of
304 * vbios scripts, and program the OR at a high enough
305 * frequency to satisfy the target mode. failure to do
306 * so results at best in an UPDATE hanging, and at worst
307 * with PDISP running away to join the circus.
308 */
309 dp->dpcd[1] = link_bw[0] / 27000;
310 dp->dpcd[2] = 4;
311 dp->dpcd[3] = 0x00;
312 ERR("failed to read DPCD\n");
313 }
314
315 /* bring capabilities within encoder limits */ 338 /* bring capabilities within encoder limits */
316 if ((dp->dpcd[2] & 0x1f) > dp->outp->dpconf.link_nr) { 339 if (nv_mclass(disp) < NVD0_DISP_CLASS)
317 dp->dpcd[2] &= ~0x1f; 340 outp->dpcd[2] &= ~DPCD_RC02_TPS3_SUPPORTED;
318 dp->dpcd[2] |= dp->outp->dpconf.link_nr; 341 if ((outp->dpcd[2] & 0x1f) > outp->base.info.dpconf.link_nr) {
342 outp->dpcd[2] &= ~DPCD_RC02_MAX_LANE_COUNT;
343 outp->dpcd[2] |= outp->base.info.dpconf.link_nr;
344 }
345 if (outp->dpcd[1] > outp->base.info.dpconf.link_bw)
346 outp->dpcd[1] = outp->base.info.dpconf.link_bw;
347 dp->pc2 = outp->dpcd[2] & DPCD_RC02_TPS3_SUPPORTED;
348
349 /* restrict link config to the lowest required rate, if requested */
350 if (datarate) {
351 datarate = (datarate / 8) * 10; /* 8B/10B coding overhead */
352 while (cfg[1].rate >= datarate)
353 cfg++;
319 } 354 }
320 if (dp->dpcd[1] > dp->outp->dpconf.link_bw) 355 cfg--;
321 dp->dpcd[1] = dp->outp->dpconf.link_bw;
322 356
323 /* adjust required bandwidth for 8B/10B coding overhead */ 357 /* disable link interrupt handling during link training */
324 datarate = (datarate / 8) * 10; 358 nouveau_event_put(outp->irq);
325 359
326 /* enable down-spreading and execute pre-train script from vbios */ 360 /* enable down-spreading and execute pre-train script from vbios */
327 dp_link_train_init(dp, dp->dpcd[3] & 0x01); 361 dp_link_train_init(dp, outp->dpcd[3] & 0x01);
328 362
329 /* start off at highest link rate supported by encoder and display */ 363 while (ret = -EIO, (++cfg)->rate) {
330 while (*link_bw > (dp->dpcd[1] * 27000)) 364 /* select next configuration supported by encoder and sink */
331 link_bw++; 365 while (cfg->nr > (outp->dpcd[2] & DPCD_RC02_MAX_LANE_COUNT) ||
332 366 cfg->bw > (outp->dpcd[DPCD_RC01_MAX_LINK_RATE]))
333 while ((ret = -EIO) && link_bw[0]) { 367 cfg++;
334 /* find minimum required lane count at this link rate */ 368 dp->link_bw = cfg->bw * 27000;
335 dp->link_nr = dp->dpcd[2] & DPCD_RC02_MAX_LANE_COUNT; 369 dp->link_nr = cfg->nr;
336 while ((dp->link_nr >> 1) * link_bw[0] > datarate)
337 dp->link_nr >>= 1;
338
339 /* drop link rate to minimum with this lane count */
340 while ((link_bw[1] * dp->link_nr) > datarate)
341 link_bw++;
342 dp->link_bw = link_bw[0];
343 370
344 /* program selected link configuration */ 371 /* program selected link configuration */
345 ret = dp_set_link_config(dp); 372 ret = dp_set_link_config(dp);
@@ -356,17 +383,18 @@ nouveau_dp_train(struct nouveau_disp *disp, const struct nouveau_dp_func *func,
356 */ 383 */
357 break; 384 break;
358 } 385 }
359
360 /* retry at lower rate */
361 link_bw++;
362 } 386 }
363 387
364 /* finish link training */ 388 /* finish link training and execute post-train script from vbios */
365 dp_set_training_pattern(dp, 0); 389 dp_set_training_pattern(dp, 0);
366 if (ret < 0) 390 if (ret < 0)
367 ERR("link training failed\n"); 391 ERR("link training failed\n");
368 392
369 /* execute post-train script from vbios */
370 dp_link_train_fini(dp); 393 dp_link_train_fini(dp);
371 return (ret < 0) ? false : true; 394
395 /* signal completion and enable link interrupt handling */
396 DBG("training complete\n");
397 atomic_set(&outp->lt.done, 1);
398 wake_up(&outp->lt.wait);
399 nouveau_event_get(outp->irq);
372} 400}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/dport.h b/drivers/gpu/drm/nouveau/core/engine/disp/dport.h
index 0e1bbd18ff6c..5628d2d5ec71 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/dport.h
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/dport.h
@@ -2,19 +2,18 @@
2#define __NVKM_DISP_DPORT_H__ 2#define __NVKM_DISP_DPORT_H__
3 3
4/* DPCD Receiver Capabilities */ 4/* DPCD Receiver Capabilities */
5#define DPCD_RC00 0x00000 5#define DPCD_RC00_DPCD_REV 0x00000
6#define DPCD_RC00_DPCD_REV 0xff 6#define DPCD_RC01_MAX_LINK_RATE 0x00001
7#define DPCD_RC01 0x00001
8#define DPCD_RC01_MAX_LINK_RATE 0xff
9#define DPCD_RC02 0x00002 7#define DPCD_RC02 0x00002
10#define DPCD_RC02_ENHANCED_FRAME_CAP 0x80 8#define DPCD_RC02_ENHANCED_FRAME_CAP 0x80
9#define DPCD_RC02_TPS3_SUPPORTED 0x40
11#define DPCD_RC02_MAX_LANE_COUNT 0x1f 10#define DPCD_RC02_MAX_LANE_COUNT 0x1f
12#define DPCD_RC03 0x00003 11#define DPCD_RC03 0x00003
13#define DPCD_RC03_MAX_DOWNSPREAD 0x01 12#define DPCD_RC03_MAX_DOWNSPREAD 0x01
13#define DPCD_RC0E_AUX_RD_INTERVAL 0x0000e
14 14
15/* DPCD Link Configuration */ 15/* DPCD Link Configuration */
16#define DPCD_LC00 0x00100 16#define DPCD_LC00_LINK_BW_SET 0x00100
17#define DPCD_LC00_LINK_BW_SET 0xff
18#define DPCD_LC01 0x00101 17#define DPCD_LC01 0x00101
19#define DPCD_LC01_ENHANCED_FRAME_EN 0x80 18#define DPCD_LC01_ENHANCED_FRAME_EN 0x80
20#define DPCD_LC01_LANE_COUNT_SET 0x1f 19#define DPCD_LC01_LANE_COUNT_SET 0x1f
@@ -25,6 +24,16 @@
25#define DPCD_LC03_PRE_EMPHASIS_SET 0x18 24#define DPCD_LC03_PRE_EMPHASIS_SET 0x18
26#define DPCD_LC03_MAX_SWING_REACHED 0x04 25#define DPCD_LC03_MAX_SWING_REACHED 0x04
27#define DPCD_LC03_VOLTAGE_SWING_SET 0x03 26#define DPCD_LC03_VOLTAGE_SWING_SET 0x03
27#define DPCD_LC0F 0x0010f
28#define DPCD_LC0F_LANE1_MAX_POST_CURSOR2_REACHED 0x40
29#define DPCD_LC0F_LANE1_POST_CURSOR2_SET 0x30
30#define DPCD_LC0F_LANE0_MAX_POST_CURSOR2_REACHED 0x04
31#define DPCD_LC0F_LANE0_POST_CURSOR2_SET 0x03
32#define DPCD_LC10 0x00110
33#define DPCD_LC10_LANE3_MAX_POST_CURSOR2_REACHED 0x40
34#define DPCD_LC10_LANE3_POST_CURSOR2_SET 0x30
35#define DPCD_LC10_LANE2_MAX_POST_CURSOR2_REACHED 0x04
36#define DPCD_LC10_LANE2_POST_CURSOR2_SET 0x03
28 37
29/* DPCD Link/Sink Status */ 38/* DPCD Link/Sink Status */
30#define DPCD_LS02 0x00202 39#define DPCD_LS02 0x00202
@@ -55,24 +64,12 @@
55#define DPCD_LS07_LANE3_VOLTAGE_SWING 0x30 64#define DPCD_LS07_LANE3_VOLTAGE_SWING 0x30
56#define DPCD_LS07_LANE2_PRE_EMPHASIS 0x0c 65#define DPCD_LS07_LANE2_PRE_EMPHASIS 0x0c
57#define DPCD_LS07_LANE2_VOLTAGE_SWING 0x03 66#define DPCD_LS07_LANE2_VOLTAGE_SWING 0x03
67#define DPCD_LS0C 0x0020c
68#define DPCD_LS0C_LANE3_POST_CURSOR2 0xc0
69#define DPCD_LS0C_LANE2_POST_CURSOR2 0x30
70#define DPCD_LS0C_LANE1_POST_CURSOR2 0x0c
71#define DPCD_LS0C_LANE0_POST_CURSOR2 0x03
58 72
59struct nouveau_disp; 73void nouveau_dp_train(struct work_struct *);
60struct dcb_output;
61
62struct nouveau_dp_func {
63 int (*pattern)(struct nouveau_disp *, struct dcb_output *,
64 int head, int pattern);
65 int (*lnk_ctl)(struct nouveau_disp *, struct dcb_output *, int head,
66 int link_nr, int link_bw, bool enh_frame);
67 int (*drv_ctl)(struct nouveau_disp *, struct dcb_output *, int head,
68 int lane, int swing, int preem);
69};
70
71extern const struct nouveau_dp_func nv94_sor_dp_func;
72extern const struct nouveau_dp_func nvd0_sor_dp_func;
73extern const struct nouveau_dp_func nv50_pior_dp_func;
74
75int nouveau_dp_train(struct nouveau_disp *, const struct nouveau_dp_func *,
76 struct dcb_output *, int, u32);
77 74
78#endif 75#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/gm107.c b/drivers/gpu/drm/nouveau/core/engine/disp/gm107.c
index cf6f59677b74..9fc7447fec90 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/gm107.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/gm107.c
@@ -81,7 +81,6 @@ gm107_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
81 priv->sor.power = nv50_sor_power; 81 priv->sor.power = nv50_sor_power;
82 priv->sor.hda_eld = nvd0_hda_eld; 82 priv->sor.hda_eld = nvd0_hda_eld;
83 priv->sor.hdmi = nvd0_hdmi_ctrl; 83 priv->sor.hdmi = nvd0_hdmi_ctrl;
84 priv->sor.dp = &nvd0_sor_dp_func;
85 return 0; 84 return 0;
86} 85}
87 86
@@ -94,6 +93,7 @@ gm107_disp_oclass = &(struct nv50_disp_impl) {
94 .init = _nouveau_disp_init, 93 .init = _nouveau_disp_init,
95 .fini = _nouveau_disp_fini, 94 .fini = _nouveau_disp_fini,
96 }, 95 },
96 .base.outp = nvd0_disp_outp_sclass,
97 .mthd.core = &nve0_disp_mast_mthd_chan, 97 .mthd.core = &nve0_disp_mast_mthd_chan,
98 .mthd.base = &nvd0_disp_sync_mthd_chan, 98 .mthd.base = &nvd0_disp_sync_mthd_chan,
99 .mthd.ovly = &nve0_disp_ovly_mthd_chan, 99 .mthd.ovly = &nve0_disp_ovly_mthd_chan,
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c
index 6c89af792889..a32666ed0c47 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c
@@ -51,6 +51,14 @@ nv04_disp_scanoutpos(struct nouveau_object *object, u32 mthd,
51 args->htotal = nv_rd32(priv, 0x680824 + (head * 0x2000)) & 0xffff; 51 args->htotal = nv_rd32(priv, 0x680824 + (head * 0x2000)) & 0xffff;
52 args->hblanke = args->htotal - 1; 52 args->hblanke = args->htotal - 1;
53 53
54 /*
55 * If output is vga instead of digital then vtotal/htotal is invalid
56 * so we have to give up and trigger the timestamping fallback in the
57 * drm core.
58 */
59 if (!args->vtotal || !args->htotal)
60 return -ENOTSUPP;
61
54 args->time[0] = ktime_to_ns(ktime_get()); 62 args->time[0] = ktime_to_ns(ktime_get());
55 line = nv_rd32(priv, 0x600868 + (head * 0x2000)); 63 line = nv_rd32(priv, 0x600868 + (head * 0x2000));
56 args->time[1] = ktime_to_ns(ktime_get()); 64 args->time[1] = ktime_to_ns(ktime_get());
@@ -78,13 +86,13 @@ nv04_disp_sclass[] = {
78 ******************************************************************************/ 86 ******************************************************************************/
79 87
80static void 88static void
81nv04_disp_vblank_enable(struct nouveau_event *event, int head) 89nv04_disp_vblank_enable(struct nouveau_event *event, int type, int head)
82{ 90{
83 nv_wr32(event->priv, 0x600140 + (head * 0x2000) , 0x00000001); 91 nv_wr32(event->priv, 0x600140 + (head * 0x2000) , 0x00000001);
84} 92}
85 93
86static void 94static void
87nv04_disp_vblank_disable(struct nouveau_event *event, int head) 95nv04_disp_vblank_disable(struct nouveau_event *event, int type, int head)
88{ 96{
89 nv_wr32(event->priv, 0x600140 + (head * 0x2000) , 0x00000000); 97 nv_wr32(event->priv, 0x600140 + (head * 0x2000) , 0x00000000);
90} 98}
@@ -98,12 +106,12 @@ nv04_disp_intr(struct nouveau_subdev *subdev)
98 u32 pvideo; 106 u32 pvideo;
99 107
100 if (crtc0 & 0x00000001) { 108 if (crtc0 & 0x00000001) {
101 nouveau_event_trigger(priv->base.vblank, 0); 109 nouveau_event_trigger(priv->base.vblank, 1, 0);
102 nv_wr32(priv, 0x600100, 0x00000001); 110 nv_wr32(priv, 0x600100, 0x00000001);
103 } 111 }
104 112
105 if (crtc1 & 0x00000001) { 113 if (crtc1 & 0x00000001) {
106 nouveau_event_trigger(priv->base.vblank, 1); 114 nouveau_event_trigger(priv->base.vblank, 1, 1);
107 nv_wr32(priv, 0x602100, 0x00000001); 115 nv_wr32(priv, 0x602100, 0x00000001);
108 } 116 }
109 117
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
index 9a0cab9c3adb..1e85f36c705f 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
@@ -829,13 +829,13 @@ nv50_disp_base_scanoutpos(struct nouveau_object *object, u32 mthd,
829} 829}
830 830
831static void 831static void
832nv50_disp_base_vblank_enable(struct nouveau_event *event, int head) 832nv50_disp_base_vblank_enable(struct nouveau_event *event, int type, int head)
833{ 833{
834 nv_mask(event->priv, 0x61002c, (4 << head), (4 << head)); 834 nv_mask(event->priv, 0x61002c, (4 << head), (4 << head));
835} 835}
836 836
837static void 837static void
838nv50_disp_base_vblank_disable(struct nouveau_event *event, int head) 838nv50_disp_base_vblank_disable(struct nouveau_event *event, int type, int head)
839{ 839{
840 nv_mask(event->priv, 0x61002c, (4 << head), 0); 840 nv_mask(event->priv, 0x61002c, (4 << head), 0);
841} 841}
@@ -1114,19 +1114,20 @@ nv50_disp_intr_error(struct nv50_disp_priv *priv, int chid)
1114 nv_wr32(priv, 0x610080 + (chid * 0x08), 0x90000000); 1114 nv_wr32(priv, 0x610080 + (chid * 0x08), 0x90000000);
1115} 1115}
1116 1116
1117static u16 1117static struct nvkm_output *
1118exec_lookup(struct nv50_disp_priv *priv, int head, int outp, u32 ctrl, 1118exec_lookup(struct nv50_disp_priv *priv, int head, int or, u32 ctrl,
1119 struct dcb_output *dcb, u8 *ver, u8 *hdr, u8 *cnt, u8 *len, 1119 u32 *data, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
1120 struct nvbios_outp *info) 1120 struct nvbios_outp *info)
1121{ 1121{
1122 struct nouveau_bios *bios = nouveau_bios(priv); 1122 struct nouveau_bios *bios = nouveau_bios(priv);
1123 u16 mask, type, data; 1123 struct nvkm_output *outp;
1124 u16 mask, type;
1124 1125
1125 if (outp < 4) { 1126 if (or < 4) {
1126 type = DCB_OUTPUT_ANALOG; 1127 type = DCB_OUTPUT_ANALOG;
1127 mask = 0; 1128 mask = 0;
1128 } else 1129 } else
1129 if (outp < 8) { 1130 if (or < 8) {
1130 switch (ctrl & 0x00000f00) { 1131 switch (ctrl & 0x00000f00) {
1131 case 0x00000000: type = DCB_OUTPUT_LVDS; mask = 1; break; 1132 case 0x00000000: type = DCB_OUTPUT_LVDS; mask = 1; break;
1132 case 0x00000100: type = DCB_OUTPUT_TMDS; mask = 1; break; 1133 case 0x00000100: type = DCB_OUTPUT_TMDS; mask = 1; break;
@@ -1136,45 +1137,48 @@ exec_lookup(struct nv50_disp_priv *priv, int head, int outp, u32 ctrl,
1136 case 0x00000900: type = DCB_OUTPUT_DP; mask = 2; break; 1137 case 0x00000900: type = DCB_OUTPUT_DP; mask = 2; break;
1137 default: 1138 default:
1138 nv_error(priv, "unknown SOR mc 0x%08x\n", ctrl); 1139 nv_error(priv, "unknown SOR mc 0x%08x\n", ctrl);
1139 return 0x0000; 1140 return NULL;
1140 } 1141 }
1141 outp -= 4; 1142 or -= 4;
1142 } else { 1143 } else {
1143 outp = outp - 8; 1144 or = or - 8;
1144 type = 0x0010; 1145 type = 0x0010;
1145 mask = 0; 1146 mask = 0;
1146 switch (ctrl & 0x00000f00) { 1147 switch (ctrl & 0x00000f00) {
1147 case 0x00000000: type |= priv->pior.type[outp]; break; 1148 case 0x00000000: type |= priv->pior.type[or]; break;
1148 default: 1149 default:
1149 nv_error(priv, "unknown PIOR mc 0x%08x\n", ctrl); 1150 nv_error(priv, "unknown PIOR mc 0x%08x\n", ctrl);
1150 return 0x0000; 1151 return NULL;
1151 } 1152 }
1152 } 1153 }
1153 1154
1154 mask = 0x00c0 & (mask << 6); 1155 mask = 0x00c0 & (mask << 6);
1155 mask |= 0x0001 << outp; 1156 mask |= 0x0001 << or;
1156 mask |= 0x0100 << head; 1157 mask |= 0x0100 << head;
1157 1158
1158 data = dcb_outp_match(bios, type, mask, ver, hdr, dcb); 1159 list_for_each_entry(outp, &priv->base.outp, head) {
1159 if (!data) 1160 if ((outp->info.hasht & 0xff) == type &&
1160 return 0x0000; 1161 (outp->info.hashm & mask) == mask) {
1161 1162 *data = nvbios_outp_match(bios, outp->info.hasht,
1162 /* off-chip encoders require matching the exact encoder type */ 1163 outp->info.hashm,
1163 if (dcb->location != 0) 1164 ver, hdr, cnt, len, info);
1164 type |= dcb->extdev << 8; 1165 if (!*data)
1166 return NULL;
1167 return outp;
1168 }
1169 }
1165 1170
1166 return nvbios_outp_match(bios, type, mask, ver, hdr, cnt, len, info); 1171 return NULL;
1167} 1172}
1168 1173
1169static bool 1174static struct nvkm_output *
1170exec_script(struct nv50_disp_priv *priv, int head, int id) 1175exec_script(struct nv50_disp_priv *priv, int head, int id)
1171{ 1176{
1172 struct nouveau_bios *bios = nouveau_bios(priv); 1177 struct nouveau_bios *bios = nouveau_bios(priv);
1178 struct nvkm_output *outp;
1173 struct nvbios_outp info; 1179 struct nvbios_outp info;
1174 struct dcb_output dcb;
1175 u8 ver, hdr, cnt, len; 1180 u8 ver, hdr, cnt, len;
1176 u16 data; 1181 u32 data, ctrl = 0;
1177 u32 ctrl = 0x00000000;
1178 u32 reg; 1182 u32 reg;
1179 int i; 1183 int i;
1180 1184
@@ -1204,36 +1208,35 @@ exec_script(struct nv50_disp_priv *priv, int head, int id)
1204 } 1208 }
1205 1209
1206 if (!(ctrl & (1 << head))) 1210 if (!(ctrl & (1 << head)))
1207 return false; 1211 return NULL;
1208 i--; 1212 i--;
1209 1213
1210 data = exec_lookup(priv, head, i, ctrl, &dcb, &ver, &hdr, &cnt, &len, &info); 1214 outp = exec_lookup(priv, head, i, ctrl, &data, &ver, &hdr, &cnt, &len, &info);
1211 if (data) { 1215 if (outp) {
1212 struct nvbios_init init = { 1216 struct nvbios_init init = {
1213 .subdev = nv_subdev(priv), 1217 .subdev = nv_subdev(priv),
1214 .bios = bios, 1218 .bios = bios,
1215 .offset = info.script[id], 1219 .offset = info.script[id],
1216 .outp = &dcb, 1220 .outp = &outp->info,
1217 .crtc = head, 1221 .crtc = head,
1218 .execute = 1, 1222 .execute = 1,
1219 }; 1223 };
1220 1224
1221 return nvbios_exec(&init) == 0; 1225 nvbios_exec(&init);
1222 } 1226 }
1223 1227
1224 return false; 1228 return outp;
1225} 1229}
1226 1230
1227static u32 1231static struct nvkm_output *
1228exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, u32 pclk, 1232exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, u32 pclk, u32 *conf)
1229 struct dcb_output *outp)
1230{ 1233{
1231 struct nouveau_bios *bios = nouveau_bios(priv); 1234 struct nouveau_bios *bios = nouveau_bios(priv);
1235 struct nvkm_output *outp;
1232 struct nvbios_outp info1; 1236 struct nvbios_outp info1;
1233 struct nvbios_ocfg info2; 1237 struct nvbios_ocfg info2;
1234 u8 ver, hdr, cnt, len; 1238 u8 ver, hdr, cnt, len;
1235 u32 ctrl = 0x00000000; 1239 u32 data, ctrl = 0;
1236 u32 data, conf = ~0;
1237 u32 reg; 1240 u32 reg;
1238 int i; 1241 int i;
1239 1242
@@ -1263,37 +1266,37 @@ exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, u32 pclk,
1263 } 1266 }
1264 1267
1265 if (!(ctrl & (1 << head))) 1268 if (!(ctrl & (1 << head)))
1266 return conf; 1269 return NULL;
1267 i--; 1270 i--;
1268 1271
1269 data = exec_lookup(priv, head, i, ctrl, outp, &ver, &hdr, &cnt, &len, &info1); 1272 outp = exec_lookup(priv, head, i, ctrl, &data, &ver, &hdr, &cnt, &len, &info1);
1270 if (!data) 1273 if (!data)
1271 return conf; 1274 return NULL;
1272 1275
1273 if (outp->location == 0) { 1276 if (outp->info.location == 0) {
1274 switch (outp->type) { 1277 switch (outp->info.type) {
1275 case DCB_OUTPUT_TMDS: 1278 case DCB_OUTPUT_TMDS:
1276 conf = (ctrl & 0x00000f00) >> 8; 1279 *conf = (ctrl & 0x00000f00) >> 8;
1277 if (pclk >= 165000) 1280 if (pclk >= 165000)
1278 conf |= 0x0100; 1281 *conf |= 0x0100;
1279 break; 1282 break;
1280 case DCB_OUTPUT_LVDS: 1283 case DCB_OUTPUT_LVDS:
1281 conf = priv->sor.lvdsconf; 1284 *conf = priv->sor.lvdsconf;
1282 break; 1285 break;
1283 case DCB_OUTPUT_DP: 1286 case DCB_OUTPUT_DP:
1284 conf = (ctrl & 0x00000f00) >> 8; 1287 *conf = (ctrl & 0x00000f00) >> 8;
1285 break; 1288 break;
1286 case DCB_OUTPUT_ANALOG: 1289 case DCB_OUTPUT_ANALOG:
1287 default: 1290 default:
1288 conf = 0x00ff; 1291 *conf = 0x00ff;
1289 break; 1292 break;
1290 } 1293 }
1291 } else { 1294 } else {
1292 conf = (ctrl & 0x00000f00) >> 8; 1295 *conf = (ctrl & 0x00000f00) >> 8;
1293 pclk = pclk / 2; 1296 pclk = pclk / 2;
1294 } 1297 }
1295 1298
1296 data = nvbios_ocfg_match(bios, data, conf, &ver, &hdr, &cnt, &len, &info2); 1299 data = nvbios_ocfg_match(bios, data, *conf, &ver, &hdr, &cnt, &len, &info2);
1297 if (data && id < 0xff) { 1300 if (data && id < 0xff) {
1298 data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk); 1301 data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk);
1299 if (data) { 1302 if (data) {
@@ -1301,7 +1304,7 @@ exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, u32 pclk,
1301 .subdev = nv_subdev(priv), 1304 .subdev = nv_subdev(priv),
1302 .bios = bios, 1305 .bios = bios,
1303 .offset = data, 1306 .offset = data,
1304 .outp = outp, 1307 .outp = &outp->info,
1305 .crtc = head, 1308 .crtc = head,
1306 .execute = 1, 1309 .execute = 1,
1307 }; 1310 };
@@ -1310,7 +1313,7 @@ exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, u32 pclk,
1310 } 1313 }
1311 } 1314 }
1312 1315
1313 return conf; 1316 return outp;
1314} 1317}
1315 1318
1316static void 1319static void
@@ -1322,7 +1325,35 @@ nv50_disp_intr_unk10_0(struct nv50_disp_priv *priv, int head)
1322static void 1325static void
1323nv50_disp_intr_unk20_0(struct nv50_disp_priv *priv, int head) 1326nv50_disp_intr_unk20_0(struct nv50_disp_priv *priv, int head)
1324{ 1327{
1325 exec_script(priv, head, 2); 1328 struct nvkm_output *outp = exec_script(priv, head, 2);
1329
1330 /* the binary driver does this outside of the supervisor handling
1331 * (after the third supervisor from a detach). we (currently?)
1332 * allow both detach/attach to happen in the same set of
1333 * supervisor interrupts, so it would make sense to execute this
1334 * (full power down?) script after all the detach phases of the
1335 * supervisor handling. like with training if needed from the
1336 * second supervisor, nvidia doesn't do this, so who knows if it's
1337 * entirely safe, but it does appear to work..
1338 *
1339 * without this script being run, on some configurations i've
1340 * seen, switching from DP to TMDS on a DP connector may result
1341 * in a blank screen (SOR_PWR off/on can restore it)
1342 */
1343 if (outp && outp->info.type == DCB_OUTPUT_DP) {
1344 struct nvkm_output_dp *outpdp = (void *)outp;
1345 struct nvbios_init init = {
1346 .subdev = nv_subdev(priv),
1347 .bios = nouveau_bios(priv),
1348 .outp = &outp->info,
1349 .crtc = head,
1350 .offset = outpdp->info.script[4],
1351 .execute = 1,
1352 };
1353
1354 nvbios_exec(&init);
1355 atomic_set(&outpdp->lt.done, 0);
1356 }
1326} 1357}
1327 1358
1328static void 1359static void
@@ -1444,56 +1475,83 @@ nv50_disp_intr_unk20_2_dp(struct nv50_disp_priv *priv,
1444static void 1475static void
1445nv50_disp_intr_unk20_2(struct nv50_disp_priv *priv, int head) 1476nv50_disp_intr_unk20_2(struct nv50_disp_priv *priv, int head)
1446{ 1477{
1447 struct dcb_output outp; 1478 struct nvkm_output *outp;
1448 u32 pclk = nv_rd32(priv, 0x610ad0 + (head * 0x540)) & 0x3fffff; 1479 u32 pclk = nv_rd32(priv, 0x610ad0 + (head * 0x540)) & 0x3fffff;
1449 u32 hval, hreg = 0x614200 + (head * 0x800); 1480 u32 hval, hreg = 0x614200 + (head * 0x800);
1450 u32 oval, oreg; 1481 u32 oval, oreg;
1451 u32 mask; 1482 u32 mask, conf;
1452 u32 conf = exec_clkcmp(priv, head, 0xff, pclk, &outp);
1453 if (conf != ~0) {
1454 if (outp.location == 0 && outp.type == DCB_OUTPUT_DP) {
1455 u32 soff = (ffs(outp.or) - 1) * 0x08;
1456 u32 ctrl = nv_rd32(priv, 0x610794 + soff);
1457 u32 datarate;
1458
1459 switch ((ctrl & 0x000f0000) >> 16) {
1460 case 6: datarate = pclk * 30 / 8; break;
1461 case 5: datarate = pclk * 24 / 8; break;
1462 case 2:
1463 default:
1464 datarate = pclk * 18 / 8;
1465 break;
1466 }
1467 1483
1468 nouveau_dp_train(&priv->base, priv->sor.dp, 1484 outp = exec_clkcmp(priv, head, 0xff, pclk, &conf);
1469 &outp, head, datarate); 1485 if (!outp)
1470 } 1486 return;
1487
1488 /* we allow both encoder attach and detach operations to occur
1489 * within a single supervisor (ie. modeset) sequence. the
1490 * encoder detach scripts quite often switch off power to the
1491 * lanes, which requires the link to be re-trained.
1492 *
1493 * this is not generally an issue as the sink "must" (heh)
1494 * signal an irq when it's lost sync so the driver can
1495 * re-train.
1496 *
1497 * however, on some boards, if one does not configure at least
1498 * the gpu side of the link *before* attaching, then various
1499 * things can go horribly wrong (PDISP disappearing from mmio,
1500 * third supervisor never happens, etc).
1501 *
1502 * the solution is simply to retrain here, if necessary. last
1503 * i checked, the binary driver userspace does not appear to
1504 * trigger this situation (it forces an UPDATE between steps).
1505 */
1506 if (outp->info.type == DCB_OUTPUT_DP) {
1507 u32 soff = (ffs(outp->info.or) - 1) * 0x08;
1508 u32 ctrl, datarate;
1471 1509
1472 exec_clkcmp(priv, head, 0, pclk, &outp); 1510 if (outp->info.location == 0) {
1473 1511 ctrl = nv_rd32(priv, 0x610794 + soff);
1474 if (!outp.location && outp.type == DCB_OUTPUT_ANALOG) { 1512 soff = 1;
1475 oreg = 0x614280 + (ffs(outp.or) - 1) * 0x800;
1476 oval = 0x00000000;
1477 hval = 0x00000000;
1478 mask = 0xffffffff;
1479 } else
1480 if (!outp.location) {
1481 if (outp.type == DCB_OUTPUT_DP)
1482 nv50_disp_intr_unk20_2_dp(priv, &outp, pclk);
1483 oreg = 0x614300 + (ffs(outp.or) - 1) * 0x800;
1484 oval = (conf & 0x0100) ? 0x00000101 : 0x00000000;
1485 hval = 0x00000000;
1486 mask = 0x00000707;
1487 } else { 1513 } else {
1488 oreg = 0x614380 + (ffs(outp.or) - 1) * 0x800; 1514 ctrl = nv_rd32(priv, 0x610b80 + soff);
1489 oval = 0x00000001; 1515 soff = 2;
1490 hval = 0x00000001;
1491 mask = 0x00000707;
1492 } 1516 }
1493 1517
1494 nv_mask(priv, hreg, 0x0000000f, hval); 1518 switch ((ctrl & 0x000f0000) >> 16) {
1495 nv_mask(priv, oreg, mask, oval); 1519 case 6: datarate = pclk * 30 / 8; break;
1520 case 5: datarate = pclk * 24 / 8; break;
1521 case 2:
1522 default:
1523 datarate = pclk * 18 / 8;
1524 break;
1525 }
1526
1527 if (nvkm_output_dp_train(outp, datarate / soff, true))
1528 ERR("link not trained before attach\n");
1496 } 1529 }
1530
1531 exec_clkcmp(priv, head, 0, pclk, &conf);
1532
1533 if (!outp->info.location && outp->info.type == DCB_OUTPUT_ANALOG) {
1534 oreg = 0x614280 + (ffs(outp->info.or) - 1) * 0x800;
1535 oval = 0x00000000;
1536 hval = 0x00000000;
1537 mask = 0xffffffff;
1538 } else
1539 if (!outp->info.location) {
1540 if (outp->info.type == DCB_OUTPUT_DP)
1541 nv50_disp_intr_unk20_2_dp(priv, &outp->info, pclk);
1542 oreg = 0x614300 + (ffs(outp->info.or) - 1) * 0x800;
1543 oval = (conf & 0x0100) ? 0x00000101 : 0x00000000;
1544 hval = 0x00000000;
1545 mask = 0x00000707;
1546 } else {
1547 oreg = 0x614380 + (ffs(outp->info.or) - 1) * 0x800;
1548 oval = 0x00000001;
1549 hval = 0x00000001;
1550 mask = 0x00000707;
1551 }
1552
1553 nv_mask(priv, hreg, 0x0000000f, hval);
1554 nv_mask(priv, oreg, mask, oval);
1497} 1555}
1498 1556
1499/* If programming a TMDS output on a SOR that can also be configured for 1557/* If programming a TMDS output on a SOR that can also be configured for
@@ -1521,30 +1579,16 @@ nv50_disp_intr_unk40_0_tmds(struct nv50_disp_priv *priv, struct dcb_output *outp
1521static void 1579static void
1522nv50_disp_intr_unk40_0(struct nv50_disp_priv *priv, int head) 1580nv50_disp_intr_unk40_0(struct nv50_disp_priv *priv, int head)
1523{ 1581{
1524 struct dcb_output outp; 1582 struct nvkm_output *outp;
1525 u32 pclk = nv_rd32(priv, 0x610ad0 + (head * 0x540)) & 0x3fffff; 1583 u32 pclk = nv_rd32(priv, 0x610ad0 + (head * 0x540)) & 0x3fffff;
1526 if (exec_clkcmp(priv, head, 1, pclk, &outp) != ~0) { 1584 u32 conf;
1527 if (outp.location == 0 && outp.type == DCB_OUTPUT_TMDS)
1528 nv50_disp_intr_unk40_0_tmds(priv, &outp);
1529 else
1530 if (outp.location == 1 && outp.type == DCB_OUTPUT_DP) {
1531 u32 soff = (ffs(outp.or) - 1) * 0x08;
1532 u32 ctrl = nv_rd32(priv, 0x610b84 + soff);
1533 u32 datarate;
1534
1535 switch ((ctrl & 0x000f0000) >> 16) {
1536 case 6: datarate = pclk * 30 / 8; break;
1537 case 5: datarate = pclk * 24 / 8; break;
1538 case 2:
1539 default:
1540 datarate = pclk * 18 / 8;
1541 break;
1542 }
1543 1585
1544 nouveau_dp_train(&priv->base, priv->pior.dp, 1586 outp = exec_clkcmp(priv, head, 1, pclk, &conf);
1545 &outp, head, datarate); 1587 if (!outp)
1546 } 1588 return;
1547 } 1589
1590 if (outp->info.location == 0 && outp->info.type == DCB_OUTPUT_TMDS)
1591 nv50_disp_intr_unk40_0_tmds(priv, &outp->info);
1548} 1592}
1549 1593
1550void 1594void
@@ -1610,13 +1654,13 @@ nv50_disp_intr(struct nouveau_subdev *subdev)
1610 } 1654 }
1611 1655
1612 if (intr1 & 0x00000004) { 1656 if (intr1 & 0x00000004) {
1613 nouveau_event_trigger(priv->base.vblank, 0); 1657 nouveau_event_trigger(priv->base.vblank, 1, 0);
1614 nv_wr32(priv, 0x610024, 0x00000004); 1658 nv_wr32(priv, 0x610024, 0x00000004);
1615 intr1 &= ~0x00000004; 1659 intr1 &= ~0x00000004;
1616 } 1660 }
1617 1661
1618 if (intr1 & 0x00000008) { 1662 if (intr1 & 0x00000008) {
1619 nouveau_event_trigger(priv->base.vblank, 1); 1663 nouveau_event_trigger(priv->base.vblank, 1, 1);
1620 nv_wr32(priv, 0x610024, 0x00000008); 1664 nv_wr32(priv, 0x610024, 0x00000008);
1621 intr1 &= ~0x00000008; 1665 intr1 &= ~0x00000008;
1622 } 1666 }
@@ -1656,11 +1700,16 @@ nv50_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
1656 priv->dac.sense = nv50_dac_sense; 1700 priv->dac.sense = nv50_dac_sense;
1657 priv->sor.power = nv50_sor_power; 1701 priv->sor.power = nv50_sor_power;
1658 priv->pior.power = nv50_pior_power; 1702 priv->pior.power = nv50_pior_power;
1659 priv->pior.dp = &nv50_pior_dp_func;
1660 return 0; 1703 return 0;
1661} 1704}
1662 1705
1663struct nouveau_oclass * 1706struct nouveau_oclass *
1707nv50_disp_outp_sclass[] = {
1708 &nv50_pior_dp_impl.base.base,
1709 NULL
1710};
1711
1712struct nouveau_oclass *
1664nv50_disp_oclass = &(struct nv50_disp_impl) { 1713nv50_disp_oclass = &(struct nv50_disp_impl) {
1665 .base.base.handle = NV_ENGINE(DISP, 0x50), 1714 .base.base.handle = NV_ENGINE(DISP, 0x50),
1666 .base.base.ofuncs = &(struct nouveau_ofuncs) { 1715 .base.base.ofuncs = &(struct nouveau_ofuncs) {
@@ -1669,6 +1718,7 @@ nv50_disp_oclass = &(struct nv50_disp_impl) {
1669 .init = _nouveau_disp_init, 1718 .init = _nouveau_disp_init,
1670 .fini = _nouveau_disp_fini, 1719 .fini = _nouveau_disp_fini,
1671 }, 1720 },
1721 .base.outp = nv50_disp_outp_sclass,
1672 .mthd.core = &nv50_disp_mast_mthd_chan, 1722 .mthd.core = &nv50_disp_mast_mthd_chan,
1673 .mthd.base = &nv50_disp_sync_mthd_chan, 1723 .mthd.base = &nv50_disp_sync_mthd_chan,
1674 .mthd.ovly = &nv50_disp_ovly_mthd_chan, 1724 .mthd.ovly = &nv50_disp_ovly_mthd_chan,
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h
index 48d59db47f0d..1a886472b6f5 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h
@@ -11,6 +11,8 @@
11 11
12#include "dport.h" 12#include "dport.h"
13#include "priv.h" 13#include "priv.h"
14#include "outp.h"
15#include "outpdp.h"
14 16
15struct nv50_disp_impl { 17struct nv50_disp_impl {
16 struct nouveau_disp_impl base; 18 struct nouveau_disp_impl base;
@@ -43,13 +45,11 @@ struct nv50_disp_priv {
43 int (*hda_eld)(struct nv50_disp_priv *, int sor, u8 *, u32); 45 int (*hda_eld)(struct nv50_disp_priv *, int sor, u8 *, u32);
44 int (*hdmi)(struct nv50_disp_priv *, int head, int sor, u32); 46 int (*hdmi)(struct nv50_disp_priv *, int head, int sor, u32);
45 u32 lvdsconf; 47 u32 lvdsconf;
46 const struct nouveau_dp_func *dp;
47 } sor; 48 } sor;
48 struct { 49 struct {
49 int nr; 50 int nr;
50 int (*power)(struct nv50_disp_priv *, int ext, u32 data); 51 int (*power)(struct nv50_disp_priv *, int ext, u32 data);
51 u8 type[3]; 52 u8 type[3];
52 const struct nouveau_dp_func *dp;
53 } pior; 53 } pior;
54}; 54};
55 55
@@ -199,4 +199,14 @@ void nvd0_disp_intr(struct nouveau_subdev *);
199extern const struct nv50_disp_mthd_chan nve0_disp_mast_mthd_chan; 199extern const struct nv50_disp_mthd_chan nve0_disp_mast_mthd_chan;
200extern const struct nv50_disp_mthd_chan nve0_disp_ovly_mthd_chan; 200extern const struct nv50_disp_mthd_chan nve0_disp_ovly_mthd_chan;
201 201
202extern struct nvkm_output_dp_impl nv50_pior_dp_impl;
203extern struct nouveau_oclass *nv50_disp_outp_sclass[];
204
205extern struct nvkm_output_dp_impl nv94_sor_dp_impl;
206int nv94_sor_dp_lnk_pwr(struct nvkm_output_dp *, int);
207extern struct nouveau_oclass *nv94_disp_outp_sclass[];
208
209extern struct nvkm_output_dp_impl nvd0_sor_dp_impl;
210extern struct nouveau_oclass *nvd0_disp_outp_sclass[];
211
202#endif 212#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c
index 98c5b19bc2b0..1cc62e434683 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c
@@ -264,7 +264,6 @@ nv84_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
264 priv->sor.power = nv50_sor_power; 264 priv->sor.power = nv50_sor_power;
265 priv->sor.hdmi = nv84_hdmi_ctrl; 265 priv->sor.hdmi = nv84_hdmi_ctrl;
266 priv->pior.power = nv50_pior_power; 266 priv->pior.power = nv50_pior_power;
267 priv->pior.dp = &nv50_pior_dp_func;
268 return 0; 267 return 0;
269} 268}
270 269
@@ -277,6 +276,7 @@ nv84_disp_oclass = &(struct nv50_disp_impl) {
277 .init = _nouveau_disp_init, 276 .init = _nouveau_disp_init,
278 .fini = _nouveau_disp_fini, 277 .fini = _nouveau_disp_fini,
279 }, 278 },
279 .base.outp = nv50_disp_outp_sclass,
280 .mthd.core = &nv84_disp_mast_mthd_chan, 280 .mthd.core = &nv84_disp_mast_mthd_chan,
281 .mthd.base = &nv84_disp_sync_mthd_chan, 281 .mthd.base = &nv84_disp_sync_mthd_chan,
282 .mthd.ovly = &nv84_disp_ovly_mthd_chan, 282 .mthd.ovly = &nv84_disp_ovly_mthd_chan,
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c
index 6844061c7e04..4f718a9f5aef 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c
@@ -77,6 +77,7 @@ nv94_disp_base_omthds[] = {
77 { SOR_MTHD(NV50_DISP_SOR_PWR) , nv50_sor_mthd }, 77 { SOR_MTHD(NV50_DISP_SOR_PWR) , nv50_sor_mthd },
78 { SOR_MTHD(NV84_DISP_SOR_HDMI_PWR) , nv50_sor_mthd }, 78 { SOR_MTHD(NV84_DISP_SOR_HDMI_PWR) , nv50_sor_mthd },
79 { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd }, 79 { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd },
80 { SOR_MTHD(NV94_DISP_SOR_DP_PWR) , nv50_sor_mthd },
80 { DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd }, 81 { DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd },
81 { DAC_MTHD(NV50_DISP_DAC_LOAD) , nv50_dac_mthd }, 82 { DAC_MTHD(NV50_DISP_DAC_LOAD) , nv50_dac_mthd },
82 { PIOR_MTHD(NV50_DISP_PIOR_PWR) , nv50_pior_mthd }, 83 { PIOR_MTHD(NV50_DISP_PIOR_PWR) , nv50_pior_mthd },
@@ -122,13 +123,18 @@ nv94_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
122 priv->dac.sense = nv50_dac_sense; 123 priv->dac.sense = nv50_dac_sense;
123 priv->sor.power = nv50_sor_power; 124 priv->sor.power = nv50_sor_power;
124 priv->sor.hdmi = nv84_hdmi_ctrl; 125 priv->sor.hdmi = nv84_hdmi_ctrl;
125 priv->sor.dp = &nv94_sor_dp_func;
126 priv->pior.power = nv50_pior_power; 126 priv->pior.power = nv50_pior_power;
127 priv->pior.dp = &nv50_pior_dp_func;
128 return 0; 127 return 0;
129} 128}
130 129
131struct nouveau_oclass * 130struct nouveau_oclass *
131nv94_disp_outp_sclass[] = {
132 &nv50_pior_dp_impl.base.base,
133 &nv94_sor_dp_impl.base.base,
134 NULL
135};
136
137struct nouveau_oclass *
132nv94_disp_oclass = &(struct nv50_disp_impl) { 138nv94_disp_oclass = &(struct nv50_disp_impl) {
133 .base.base.handle = NV_ENGINE(DISP, 0x88), 139 .base.base.handle = NV_ENGINE(DISP, 0x88),
134 .base.base.ofuncs = &(struct nouveau_ofuncs) { 140 .base.base.ofuncs = &(struct nouveau_ofuncs) {
@@ -137,6 +143,7 @@ nv94_disp_oclass = &(struct nv50_disp_impl) {
137 .init = _nouveau_disp_init, 143 .init = _nouveau_disp_init,
138 .fini = _nouveau_disp_fini, 144 .fini = _nouveau_disp_fini,
139 }, 145 },
146 .base.outp = nv94_disp_outp_sclass,
140 .mthd.core = &nv94_disp_mast_mthd_chan, 147 .mthd.core = &nv94_disp_mast_mthd_chan,
141 .mthd.base = &nv84_disp_sync_mthd_chan, 148 .mthd.base = &nv84_disp_sync_mthd_chan,
142 .mthd.ovly = &nv84_disp_ovly_mthd_chan, 149 .mthd.ovly = &nv84_disp_ovly_mthd_chan,
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c
index 88c96241c02a..6237a9a36f70 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c
@@ -126,7 +126,6 @@ nva0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
126 priv->sor.power = nv50_sor_power; 126 priv->sor.power = nv50_sor_power;
127 priv->sor.hdmi = nv84_hdmi_ctrl; 127 priv->sor.hdmi = nv84_hdmi_ctrl;
128 priv->pior.power = nv50_pior_power; 128 priv->pior.power = nv50_pior_power;
129 priv->pior.dp = &nv50_pior_dp_func;
130 return 0; 129 return 0;
131} 130}
132 131
@@ -139,6 +138,7 @@ nva0_disp_oclass = &(struct nv50_disp_impl) {
139 .init = _nouveau_disp_init, 138 .init = _nouveau_disp_init,
140 .fini = _nouveau_disp_fini, 139 .fini = _nouveau_disp_fini,
141 }, 140 },
141 .base.outp = nv50_disp_outp_sclass,
142 .mthd.core = &nv84_disp_mast_mthd_chan, 142 .mthd.core = &nv84_disp_mast_mthd_chan,
143 .mthd.base = &nv84_disp_sync_mthd_chan, 143 .mthd.base = &nv84_disp_sync_mthd_chan,
144 .mthd.ovly = &nva0_disp_ovly_mthd_chan, 144 .mthd.ovly = &nva0_disp_ovly_mthd_chan,
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c b/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c
index 46cb2ce0e82a..019124d4782b 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c
@@ -50,6 +50,7 @@ nva3_disp_base_omthds[] = {
50 { SOR_MTHD(NVA3_DISP_SOR_HDA_ELD) , nv50_sor_mthd }, 50 { SOR_MTHD(NVA3_DISP_SOR_HDA_ELD) , nv50_sor_mthd },
51 { SOR_MTHD(NV84_DISP_SOR_HDMI_PWR) , nv50_sor_mthd }, 51 { SOR_MTHD(NV84_DISP_SOR_HDMI_PWR) , nv50_sor_mthd },
52 { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd }, 52 { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd },
53 { SOR_MTHD(NV94_DISP_SOR_DP_PWR) , nv50_sor_mthd },
53 { DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd }, 54 { DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd },
54 { DAC_MTHD(NV50_DISP_DAC_LOAD) , nv50_dac_mthd }, 55 { DAC_MTHD(NV50_DISP_DAC_LOAD) , nv50_dac_mthd },
55 { PIOR_MTHD(NV50_DISP_PIOR_PWR) , nv50_pior_mthd }, 56 { PIOR_MTHD(NV50_DISP_PIOR_PWR) , nv50_pior_mthd },
@@ -96,9 +97,7 @@ nva3_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
96 priv->sor.power = nv50_sor_power; 97 priv->sor.power = nv50_sor_power;
97 priv->sor.hda_eld = nva3_hda_eld; 98 priv->sor.hda_eld = nva3_hda_eld;
98 priv->sor.hdmi = nva3_hdmi_ctrl; 99 priv->sor.hdmi = nva3_hdmi_ctrl;
99 priv->sor.dp = &nv94_sor_dp_func;
100 priv->pior.power = nv50_pior_power; 100 priv->pior.power = nv50_pior_power;
101 priv->pior.dp = &nv50_pior_dp_func;
102 return 0; 101 return 0;
103} 102}
104 103
@@ -111,6 +110,7 @@ nva3_disp_oclass = &(struct nv50_disp_impl) {
111 .init = _nouveau_disp_init, 110 .init = _nouveau_disp_init,
112 .fini = _nouveau_disp_fini, 111 .fini = _nouveau_disp_fini,
113 }, 112 },
113 .base.outp = nv94_disp_outp_sclass,
114 .mthd.core = &nv94_disp_mast_mthd_chan, 114 .mthd.core = &nv94_disp_mast_mthd_chan,
115 .mthd.base = &nv84_disp_sync_mthd_chan, 115 .mthd.base = &nv84_disp_sync_mthd_chan,
116 .mthd.ovly = &nv84_disp_ovly_mthd_chan, 116 .mthd.ovly = &nv84_disp_ovly_mthd_chan,
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
index 876de9ac3793..48aa38a87e3f 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
@@ -748,13 +748,13 @@ nvd0_disp_base_scanoutpos(struct nouveau_object *object, u32 mthd,
748} 748}
749 749
750static void 750static void
751nvd0_disp_base_vblank_enable(struct nouveau_event *event, int head) 751nvd0_disp_base_vblank_enable(struct nouveau_event *event, int type, int head)
752{ 752{
753 nv_mask(event->priv, 0x6100c0 + (head * 0x800), 0x00000001, 0x00000001); 753 nv_mask(event->priv, 0x6100c0 + (head * 0x800), 0x00000001, 0x00000001);
754} 754}
755 755
756static void 756static void
757nvd0_disp_base_vblank_disable(struct nouveau_event *event, int head) 757nvd0_disp_base_vblank_disable(struct nouveau_event *event, int type, int head)
758{ 758{
759 nv_mask(event->priv, 0x6100c0 + (head * 0x800), 0x00000001, 0x00000000); 759 nv_mask(event->priv, 0x6100c0 + (head * 0x800), 0x00000001, 0x00000000);
760} 760}
@@ -887,6 +887,7 @@ nvd0_disp_base_omthds[] = {
887 { SOR_MTHD(NVA3_DISP_SOR_HDA_ELD) , nv50_sor_mthd }, 887 { SOR_MTHD(NVA3_DISP_SOR_HDA_ELD) , nv50_sor_mthd },
888 { SOR_MTHD(NV84_DISP_SOR_HDMI_PWR) , nv50_sor_mthd }, 888 { SOR_MTHD(NV84_DISP_SOR_HDMI_PWR) , nv50_sor_mthd },
889 { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd }, 889 { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd },
890 { SOR_MTHD(NV94_DISP_SOR_DP_PWR) , nv50_sor_mthd },
890 { DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd }, 891 { DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd },
891 { DAC_MTHD(NV50_DISP_DAC_LOAD) , nv50_dac_mthd }, 892 { DAC_MTHD(NV50_DISP_DAC_LOAD) , nv50_dac_mthd },
892 { PIOR_MTHD(NV50_DISP_PIOR_PWR) , nv50_pior_mthd }, 893 { PIOR_MTHD(NV50_DISP_PIOR_PWR) , nv50_pior_mthd },
@@ -915,19 +916,20 @@ nvd0_disp_sclass[] = {
915 * Display engine implementation 916 * Display engine implementation
916 ******************************************************************************/ 917 ******************************************************************************/
917 918
918static u16 919static struct nvkm_output *
919exec_lookup(struct nv50_disp_priv *priv, int head, int outp, u32 ctrl, 920exec_lookup(struct nv50_disp_priv *priv, int head, int or, u32 ctrl,
920 struct dcb_output *dcb, u8 *ver, u8 *hdr, u8 *cnt, u8 *len, 921 u32 *data, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
921 struct nvbios_outp *info) 922 struct nvbios_outp *info)
922{ 923{
923 struct nouveau_bios *bios = nouveau_bios(priv); 924 struct nouveau_bios *bios = nouveau_bios(priv);
924 u16 mask, type, data; 925 struct nvkm_output *outp;
926 u16 mask, type;
925 927
926 if (outp < 4) { 928 if (or < 4) {
927 type = DCB_OUTPUT_ANALOG; 929 type = DCB_OUTPUT_ANALOG;
928 mask = 0; 930 mask = 0;
929 } else { 931 } else {
930 outp -= 4; 932 or -= 4;
931 switch (ctrl & 0x00000f00) { 933 switch (ctrl & 0x00000f00) {
932 case 0x00000000: type = DCB_OUTPUT_LVDS; mask = 1; break; 934 case 0x00000000: type = DCB_OUTPUT_LVDS; mask = 1; break;
933 case 0x00000100: type = DCB_OUTPUT_TMDS; mask = 1; break; 935 case 0x00000100: type = DCB_OUTPUT_TMDS; mask = 1; break;
@@ -939,101 +941,106 @@ exec_lookup(struct nv50_disp_priv *priv, int head, int outp, u32 ctrl,
939 nv_error(priv, "unknown SOR mc 0x%08x\n", ctrl); 941 nv_error(priv, "unknown SOR mc 0x%08x\n", ctrl);
940 return 0x0000; 942 return 0x0000;
941 } 943 }
942 dcb->sorconf.link = mask;
943 } 944 }
944 945
945 mask = 0x00c0 & (mask << 6); 946 mask = 0x00c0 & (mask << 6);
946 mask |= 0x0001 << outp; 947 mask |= 0x0001 << or;
947 mask |= 0x0100 << head; 948 mask |= 0x0100 << head;
948 949
949 data = dcb_outp_match(bios, type, mask, ver, hdr, dcb); 950 list_for_each_entry(outp, &priv->base.outp, head) {
950 if (!data) 951 if ((outp->info.hasht & 0xff) == type &&
951 return 0x0000; 952 (outp->info.hashm & mask) == mask) {
953 *data = nvbios_outp_match(bios, outp->info.hasht,
954 outp->info.hashm,
955 ver, hdr, cnt, len, info);
956 if (!*data)
957 return NULL;
958 return outp;
959 }
960 }
952 961
953 return nvbios_outp_match(bios, type, mask, ver, hdr, cnt, len, info); 962 return NULL;
954} 963}
955 964
956static bool 965static struct nvkm_output *
957exec_script(struct nv50_disp_priv *priv, int head, int id) 966exec_script(struct nv50_disp_priv *priv, int head, int id)
958{ 967{
959 struct nouveau_bios *bios = nouveau_bios(priv); 968 struct nouveau_bios *bios = nouveau_bios(priv);
969 struct nvkm_output *outp;
960 struct nvbios_outp info; 970 struct nvbios_outp info;
961 struct dcb_output dcb;
962 u8 ver, hdr, cnt, len; 971 u8 ver, hdr, cnt, len;
963 u32 ctrl = 0x00000000; 972 u32 data, ctrl = 0;
964 u16 data; 973 int or;
965 int outp;
966 974
967 for (outp = 0; !(ctrl & (1 << head)) && outp < 8; outp++) { 975 for (or = 0; !(ctrl & (1 << head)) && or < 8; or++) {
968 ctrl = nv_rd32(priv, 0x640180 + (outp * 0x20)); 976 ctrl = nv_rd32(priv, 0x640180 + (or * 0x20));
969 if (ctrl & (1 << head)) 977 if (ctrl & (1 << head))
970 break; 978 break;
971 } 979 }
972 980
973 if (outp == 8) 981 if (or == 8)
974 return false; 982 return NULL;
975 983
976 data = exec_lookup(priv, head, outp, ctrl, &dcb, &ver, &hdr, &cnt, &len, &info); 984 outp = exec_lookup(priv, head, or, ctrl, &data, &ver, &hdr, &cnt, &len, &info);
977 if (data) { 985 if (outp) {
978 struct nvbios_init init = { 986 struct nvbios_init init = {
979 .subdev = nv_subdev(priv), 987 .subdev = nv_subdev(priv),
980 .bios = bios, 988 .bios = bios,
981 .offset = info.script[id], 989 .offset = info.script[id],
982 .outp = &dcb, 990 .outp = &outp->info,
983 .crtc = head, 991 .crtc = head,
984 .execute = 1, 992 .execute = 1,
985 }; 993 };
986 994
987 return nvbios_exec(&init) == 0; 995 nvbios_exec(&init);
988 } 996 }
989 997
990 return false; 998 return outp;
991} 999}
992 1000
993static u32 1001static struct nvkm_output *
994exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, 1002exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, u32 pclk, u32 *conf)
995 u32 pclk, struct dcb_output *dcb)
996{ 1003{
997 struct nouveau_bios *bios = nouveau_bios(priv); 1004 struct nouveau_bios *bios = nouveau_bios(priv);
1005 struct nvkm_output *outp;
998 struct nvbios_outp info1; 1006 struct nvbios_outp info1;
999 struct nvbios_ocfg info2; 1007 struct nvbios_ocfg info2;
1000 u8 ver, hdr, cnt, len; 1008 u8 ver, hdr, cnt, len;
1001 u32 ctrl = 0x00000000; 1009 u32 data, ctrl = 0;
1002 u32 data, conf = ~0; 1010 int or;
1003 int outp;
1004 1011
1005 for (outp = 0; !(ctrl & (1 << head)) && outp < 8; outp++) { 1012 for (or = 0; !(ctrl & (1 << head)) && or < 8; or++) {
1006 ctrl = nv_rd32(priv, 0x660180 + (outp * 0x20)); 1013 ctrl = nv_rd32(priv, 0x660180 + (or * 0x20));
1007 if (ctrl & (1 << head)) 1014 if (ctrl & (1 << head))
1008 break; 1015 break;
1009 } 1016 }
1010 1017
1011 if (outp == 8) 1018 if (or == 8)
1012 return conf; 1019 return NULL;
1013 1020
1014 data = exec_lookup(priv, head, outp, ctrl, dcb, &ver, &hdr, &cnt, &len, &info1); 1021 outp = exec_lookup(priv, head, or, ctrl, &data, &ver, &hdr, &cnt, &len, &info1);
1015 if (data == 0x0000) 1022 if (!outp)
1016 return conf; 1023 return NULL;
1017 1024
1018 switch (dcb->type) { 1025 switch (outp->info.type) {
1019 case DCB_OUTPUT_TMDS: 1026 case DCB_OUTPUT_TMDS:
1020 conf = (ctrl & 0x00000f00) >> 8; 1027 *conf = (ctrl & 0x00000f00) >> 8;
1021 if (pclk >= 165000) 1028 if (pclk >= 165000)
1022 conf |= 0x0100; 1029 *conf |= 0x0100;
1023 break; 1030 break;
1024 case DCB_OUTPUT_LVDS: 1031 case DCB_OUTPUT_LVDS:
1025 conf = priv->sor.lvdsconf; 1032 *conf = priv->sor.lvdsconf;
1026 break; 1033 break;
1027 case DCB_OUTPUT_DP: 1034 case DCB_OUTPUT_DP:
1028 conf = (ctrl & 0x00000f00) >> 8; 1035 *conf = (ctrl & 0x00000f00) >> 8;
1029 break; 1036 break;
1030 case DCB_OUTPUT_ANALOG: 1037 case DCB_OUTPUT_ANALOG:
1031 default: 1038 default:
1032 conf = 0x00ff; 1039 *conf = 0x00ff;
1033 break; 1040 break;
1034 } 1041 }
1035 1042
1036 data = nvbios_ocfg_match(bios, data, conf, &ver, &hdr, &cnt, &len, &info2); 1043 data = nvbios_ocfg_match(bios, data, *conf, &ver, &hdr, &cnt, &len, &info2);
1037 if (data && id < 0xff) { 1044 if (data && id < 0xff) {
1038 data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk); 1045 data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk);
1039 if (data) { 1046 if (data) {
@@ -1041,7 +1048,7 @@ exec_clkcmp(struct nv50_disp_priv *priv, int head, int id,
1041 .subdev = nv_subdev(priv), 1048 .subdev = nv_subdev(priv),
1042 .bios = bios, 1049 .bios = bios,
1043 .offset = data, 1050 .offset = data,
1044 .outp = dcb, 1051 .outp = &outp->info,
1045 .crtc = head, 1052 .crtc = head,
1046 .execute = 1, 1053 .execute = 1,
1047 }; 1054 };
@@ -1050,7 +1057,7 @@ exec_clkcmp(struct nv50_disp_priv *priv, int head, int id,
1050 } 1057 }
1051 } 1058 }
1052 1059
1053 return conf; 1060 return outp;
1054} 1061}
1055 1062
1056static void 1063static void
@@ -1062,7 +1069,23 @@ nvd0_disp_intr_unk1_0(struct nv50_disp_priv *priv, int head)
1062static void 1069static void
1063nvd0_disp_intr_unk2_0(struct nv50_disp_priv *priv, int head) 1070nvd0_disp_intr_unk2_0(struct nv50_disp_priv *priv, int head)
1064{ 1071{
1065 exec_script(priv, head, 2); 1072 struct nvkm_output *outp = exec_script(priv, head, 2);
1073
1074 /* see note in nv50_disp_intr_unk20_0() */
1075 if (outp && outp->info.type == DCB_OUTPUT_DP) {
1076 struct nvkm_output_dp *outpdp = (void *)outp;
1077 struct nvbios_init init = {
1078 .subdev = nv_subdev(priv),
1079 .bios = nouveau_bios(priv),
1080 .outp = &outp->info,
1081 .crtc = head,
1082 .offset = outpdp->info.script[4],
1083 .execute = 1,
1084 };
1085
1086 nvbios_exec(&init);
1087 atomic_set(&outpdp->lt.done, 0);
1088 }
1066} 1089}
1067 1090
1068static void 1091static void
@@ -1124,49 +1147,52 @@ nvd0_disp_intr_unk2_2_tu(struct nv50_disp_priv *priv, int head,
1124static void 1147static void
1125nvd0_disp_intr_unk2_2(struct nv50_disp_priv *priv, int head) 1148nvd0_disp_intr_unk2_2(struct nv50_disp_priv *priv, int head)
1126{ 1149{
1127 struct dcb_output outp; 1150 struct nvkm_output *outp;
1128 u32 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000; 1151 u32 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
1129 u32 conf = exec_clkcmp(priv, head, 0xff, pclk, &outp); 1152 u32 conf, addr, data;
1130 if (conf != ~0) { 1153
1131 u32 addr, data; 1154 outp = exec_clkcmp(priv, head, 0xff, pclk, &conf);
1132 1155 if (!outp)
1133 if (outp.type == DCB_OUTPUT_DP) { 1156 return;
1134 u32 sync = nv_rd32(priv, 0x660404 + (head * 0x300)); 1157
1135 switch ((sync & 0x000003c0) >> 6) { 1158 /* see note in nv50_disp_intr_unk20_2() */
1136 case 6: pclk = pclk * 30 / 8; break; 1159 if (outp->info.type == DCB_OUTPUT_DP) {
1137 case 5: pclk = pclk * 24 / 8; break; 1160 u32 sync = nv_rd32(priv, 0x660404 + (head * 0x300));
1138 case 2: 1161 switch ((sync & 0x000003c0) >> 6) {
1139 default: 1162 case 6: pclk = pclk * 30 / 8; break;
1140 pclk = pclk * 18 / 8; 1163 case 5: pclk = pclk * 24 / 8; break;
1141 break; 1164 case 2:
1142 } 1165 default:
1143 1166 pclk = pclk * 18 / 8;
1144 nouveau_dp_train(&priv->base, priv->sor.dp, 1167 break;
1145 &outp, head, pclk);
1146 } 1168 }
1147 1169
1148 exec_clkcmp(priv, head, 0, pclk, &outp); 1170 if (nvkm_output_dp_train(outp, pclk, true))
1171 ERR("link not trained before attach\n");
1172 }
1149 1173
1150 if (outp.type == DCB_OUTPUT_ANALOG) { 1174 exec_clkcmp(priv, head, 0, pclk, &conf);
1151 addr = 0x612280 + (ffs(outp.or) - 1) * 0x800;
1152 data = 0x00000000;
1153 } else {
1154 if (outp.type == DCB_OUTPUT_DP)
1155 nvd0_disp_intr_unk2_2_tu(priv, head, &outp);
1156 addr = 0x612300 + (ffs(outp.or) - 1) * 0x800;
1157 data = (conf & 0x0100) ? 0x00000101 : 0x00000000;
1158 }
1159 1175
1160 nv_mask(priv, addr, 0x00000707, data); 1176 if (outp->info.type == DCB_OUTPUT_ANALOG) {
1177 addr = 0x612280 + (ffs(outp->info.or) - 1) * 0x800;
1178 data = 0x00000000;
1179 } else {
1180 if (outp->info.type == DCB_OUTPUT_DP)
1181 nvd0_disp_intr_unk2_2_tu(priv, head, &outp->info);
1182 addr = 0x612300 + (ffs(outp->info.or) - 1) * 0x800;
1183 data = (conf & 0x0100) ? 0x00000101 : 0x00000000;
1161 } 1184 }
1185
1186 nv_mask(priv, addr, 0x00000707, data);
1162} 1187}
1163 1188
1164static void 1189static void
1165nvd0_disp_intr_unk4_0(struct nv50_disp_priv *priv, int head) 1190nvd0_disp_intr_unk4_0(struct nv50_disp_priv *priv, int head)
1166{ 1191{
1167 struct dcb_output outp;
1168 u32 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000; 1192 u32 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
1169 exec_clkcmp(priv, head, 1, pclk, &outp); 1193 u32 conf;
1194
1195 exec_clkcmp(priv, head, 1, pclk, &conf);
1170} 1196}
1171 1197
1172void 1198void
@@ -1240,7 +1266,7 @@ nvd0_disp_intr_error(struct nv50_disp_priv *priv, int chid)
1240 chid, (mthd & 0x0000ffc), data, mthd, unkn); 1266 chid, (mthd & 0x0000ffc), data, mthd, unkn);
1241 1267
1242 if (chid == 0) { 1268 if (chid == 0) {
1243 switch (mthd) { 1269 switch (mthd & 0xffc) {
1244 case 0x0080: 1270 case 0x0080:
1245 nv50_disp_mthd_chan(priv, NV_DBG_ERROR, chid - 0, 1271 nv50_disp_mthd_chan(priv, NV_DBG_ERROR, chid - 0,
1246 impl->mthd.core); 1272 impl->mthd.core);
@@ -1250,7 +1276,7 @@ nvd0_disp_intr_error(struct nv50_disp_priv *priv, int chid)
1250 } 1276 }
1251 } else 1277 } else
1252 if (chid <= 4) { 1278 if (chid <= 4) {
1253 switch (mthd) { 1279 switch (mthd & 0xffc) {
1254 case 0x0080: 1280 case 0x0080:
1255 nv50_disp_mthd_chan(priv, NV_DBG_ERROR, chid - 1, 1281 nv50_disp_mthd_chan(priv, NV_DBG_ERROR, chid - 1,
1256 impl->mthd.base); 1282 impl->mthd.base);
@@ -1260,7 +1286,7 @@ nvd0_disp_intr_error(struct nv50_disp_priv *priv, int chid)
1260 } 1286 }
1261 } else 1287 } else
1262 if (chid <= 8) { 1288 if (chid <= 8) {
1263 switch (mthd) { 1289 switch (mthd & 0xffc) {
1264 case 0x0080: 1290 case 0x0080:
1265 nv50_disp_mthd_chan(priv, NV_DBG_ERROR, chid - 5, 1291 nv50_disp_mthd_chan(priv, NV_DBG_ERROR, chid - 5,
1266 impl->mthd.ovly); 1292 impl->mthd.ovly);
@@ -1317,7 +1343,7 @@ nvd0_disp_intr(struct nouveau_subdev *subdev)
1317 if (mask & intr) { 1343 if (mask & intr) {
1318 u32 stat = nv_rd32(priv, 0x6100bc + (i * 0x800)); 1344 u32 stat = nv_rd32(priv, 0x6100bc + (i * 0x800));
1319 if (stat & 0x00000001) 1345 if (stat & 0x00000001)
1320 nouveau_event_trigger(priv->base.vblank, i); 1346 nouveau_event_trigger(priv->base.vblank, 1, i);
1321 nv_mask(priv, 0x6100bc + (i * 0x800), 0, 0); 1347 nv_mask(priv, 0x6100bc + (i * 0x800), 0, 0);
1322 nv_rd32(priv, 0x6100c0 + (i * 0x800)); 1348 nv_rd32(priv, 0x6100c0 + (i * 0x800));
1323 } 1349 }
@@ -1352,11 +1378,16 @@ nvd0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
1352 priv->sor.power = nv50_sor_power; 1378 priv->sor.power = nv50_sor_power;
1353 priv->sor.hda_eld = nvd0_hda_eld; 1379 priv->sor.hda_eld = nvd0_hda_eld;
1354 priv->sor.hdmi = nvd0_hdmi_ctrl; 1380 priv->sor.hdmi = nvd0_hdmi_ctrl;
1355 priv->sor.dp = &nvd0_sor_dp_func;
1356 return 0; 1381 return 0;
1357} 1382}
1358 1383
1359struct nouveau_oclass * 1384struct nouveau_oclass *
1385nvd0_disp_outp_sclass[] = {
1386 &nvd0_sor_dp_impl.base.base,
1387 NULL
1388};
1389
1390struct nouveau_oclass *
1360nvd0_disp_oclass = &(struct nv50_disp_impl) { 1391nvd0_disp_oclass = &(struct nv50_disp_impl) {
1361 .base.base.handle = NV_ENGINE(DISP, 0x90), 1392 .base.base.handle = NV_ENGINE(DISP, 0x90),
1362 .base.base.ofuncs = &(struct nouveau_ofuncs) { 1393 .base.base.ofuncs = &(struct nouveau_ofuncs) {
@@ -1365,6 +1396,7 @@ nvd0_disp_oclass = &(struct nv50_disp_impl) {
1365 .init = _nouveau_disp_init, 1396 .init = _nouveau_disp_init,
1366 .fini = _nouveau_disp_fini, 1397 .fini = _nouveau_disp_fini,
1367 }, 1398 },
1399 .base.outp = nvd0_disp_outp_sclass,
1368 .mthd.core = &nvd0_disp_mast_mthd_chan, 1400 .mthd.core = &nvd0_disp_mast_mthd_chan,
1369 .mthd.base = &nvd0_disp_sync_mthd_chan, 1401 .mthd.base = &nvd0_disp_sync_mthd_chan,
1370 .mthd.ovly = &nvd0_disp_ovly_mthd_chan, 1402 .mthd.ovly = &nvd0_disp_ovly_mthd_chan,
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c
index 44e0b8f34c1a..11328e3f5df1 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c
@@ -246,7 +246,6 @@ nve0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
246 priv->sor.power = nv50_sor_power; 246 priv->sor.power = nv50_sor_power;
247 priv->sor.hda_eld = nvd0_hda_eld; 247 priv->sor.hda_eld = nvd0_hda_eld;
248 priv->sor.hdmi = nvd0_hdmi_ctrl; 248 priv->sor.hdmi = nvd0_hdmi_ctrl;
249 priv->sor.dp = &nvd0_sor_dp_func;
250 return 0; 249 return 0;
251} 250}
252 251
@@ -259,6 +258,7 @@ nve0_disp_oclass = &(struct nv50_disp_impl) {
259 .init = _nouveau_disp_init, 258 .init = _nouveau_disp_init,
260 .fini = _nouveau_disp_fini, 259 .fini = _nouveau_disp_fini,
261 }, 260 },
261 .base.outp = nvd0_disp_outp_sclass,
262 .mthd.core = &nve0_disp_mast_mthd_chan, 262 .mthd.core = &nve0_disp_mast_mthd_chan,
263 .mthd.base = &nvd0_disp_sync_mthd_chan, 263 .mthd.base = &nvd0_disp_sync_mthd_chan,
264 .mthd.ovly = &nve0_disp_ovly_mthd_chan, 264 .mthd.ovly = &nve0_disp_ovly_mthd_chan,
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c
index 482585d375fa..104388081d73 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c
@@ -81,7 +81,6 @@ nvf0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
81 priv->sor.power = nv50_sor_power; 81 priv->sor.power = nv50_sor_power;
82 priv->sor.hda_eld = nvd0_hda_eld; 82 priv->sor.hda_eld = nvd0_hda_eld;
83 priv->sor.hdmi = nvd0_hdmi_ctrl; 83 priv->sor.hdmi = nvd0_hdmi_ctrl;
84 priv->sor.dp = &nvd0_sor_dp_func;
85 return 0; 84 return 0;
86} 85}
87 86
@@ -94,6 +93,7 @@ nvf0_disp_oclass = &(struct nv50_disp_impl) {
94 .init = _nouveau_disp_init, 93 .init = _nouveau_disp_init,
95 .fini = _nouveau_disp_fini, 94 .fini = _nouveau_disp_fini,
96 }, 95 },
96 .base.outp = nvd0_disp_outp_sclass,
97 .mthd.core = &nve0_disp_mast_mthd_chan, 97 .mthd.core = &nve0_disp_mast_mthd_chan,
98 .mthd.base = &nvd0_disp_sync_mthd_chan, 98 .mthd.base = &nvd0_disp_sync_mthd_chan,
99 .mthd.ovly = &nve0_disp_ovly_mthd_chan, 99 .mthd.ovly = &nve0_disp_ovly_mthd_chan,
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/outp.c b/drivers/gpu/drm/nouveau/core/engine/disp/outp.c
new file mode 100644
index 000000000000..ad9ba7ccec7f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/outp.c
@@ -0,0 +1,137 @@
1/*
2 * Copyright 2014 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/i2c.h>
26#include <subdev/bios.h>
27#include <subdev/bios/conn.h>
28
29#include "outp.h"
30
31int
32_nvkm_output_fini(struct nouveau_object *object, bool suspend)
33{
34 struct nvkm_output *outp = (void *)object;
35 nv_ofuncs(outp->conn)->fini(nv_object(outp->conn), suspend);
36 return nouveau_object_fini(&outp->base, suspend);
37}
38
39int
40_nvkm_output_init(struct nouveau_object *object)
41{
42 struct nvkm_output *outp = (void *)object;
43 int ret = nouveau_object_init(&outp->base);
44 if (ret == 0)
45 nv_ofuncs(outp->conn)->init(nv_object(outp->conn));
46 return 0;
47}
48
49void
50_nvkm_output_dtor(struct nouveau_object *object)
51{
52 struct nvkm_output *outp = (void *)object;
53 list_del(&outp->head);
54 nouveau_object_ref(NULL, (void *)&outp->conn);
55 nouveau_object_destroy(&outp->base);
56}
57
58int
59nvkm_output_create_(struct nouveau_object *parent,
60 struct nouveau_object *engine,
61 struct nouveau_oclass *oclass,
62 struct dcb_output *dcbE, int index,
63 int length, void **pobject)
64{
65 struct nouveau_bios *bios = nouveau_bios(engine);
66 struct nouveau_i2c *i2c = nouveau_i2c(parent);
67 struct nouveau_disp *disp = (void *)engine;
68 struct nvbios_connE connE;
69 struct nvkm_output *outp;
70 u8 ver, hdr;
71 u32 data;
72 int ret;
73
74 ret = nouveau_object_create_(parent, engine, oclass, 0, length, pobject);
75 outp = *pobject;
76 if (ret)
77 return ret;
78
79 outp->info = *dcbE;
80 outp->index = index;
81
82 DBG("type %02x loc %d or %d link %d con %x edid %x bus %d head %x\n",
83 dcbE->type, dcbE->location, dcbE->or, dcbE->type >= 2 ?
84 dcbE->sorconf.link : 0, dcbE->connector, dcbE->i2c_index,
85 dcbE->bus, dcbE->heads);
86
87 outp->port = i2c->find(i2c, outp->info.i2c_index);
88 outp->edid = outp->port;
89
90 data = nvbios_connEp(bios, outp->info.connector, &ver, &hdr, &connE);
91 if (!data) {
92 DBG("vbios connector data not found\n");
93 memset(&connE, 0x00, sizeof(connE));
94 connE.type = DCB_CONNECTOR_NONE;
95 }
96
97 ret = nouveau_object_ctor(parent, engine, nvkm_connector_oclass,
98 &connE, outp->info.connector,
99 (struct nouveau_object **)&outp->conn);
100 if (ret < 0) {
101 ERR("error %d creating connector, disabling\n", ret);
102 return ret;
103 }
104
105 list_add_tail(&outp->head, &disp->outp);
106 return 0;
107}
108
109int
110_nvkm_output_ctor(struct nouveau_object *parent,
111 struct nouveau_object *engine,
112 struct nouveau_oclass *oclass, void *dcbE, u32 index,
113 struct nouveau_object **pobject)
114{
115 struct nvkm_output *outp;
116 int ret;
117
118 ret = nvkm_output_create(parent, engine, oclass, dcbE, index, &outp);
119 *pobject = nv_object(outp);
120 if (ret)
121 return ret;
122
123 return 0;
124}
125
126struct nouveau_oclass *
127nvkm_output_oclass = &(struct nvkm_output_impl) {
128 .base = {
129 .handle = 0,
130 .ofuncs = &(struct nouveau_ofuncs) {
131 .ctor = _nvkm_output_ctor,
132 .dtor = _nvkm_output_dtor,
133 .init = _nvkm_output_init,
134 .fini = _nvkm_output_fini,
135 },
136 },
137}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/outp.h b/drivers/gpu/drm/nouveau/core/engine/disp/outp.h
new file mode 100644
index 000000000000..bc76fbf85710
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/outp.h
@@ -0,0 +1,59 @@
1#ifndef __NVKM_DISP_OUTP_H__
2#define __NVKM_DISP_OUTP_H__
3
4#include "priv.h"
5
6struct nvkm_output {
7 struct nouveau_object base;
8 struct list_head head;
9
10 struct dcb_output info;
11 int index;
12
13 struct nouveau_i2c_port *port;
14 struct nouveau_i2c_port *edid;
15
16 struct nvkm_connector *conn;
17};
18
19#define nvkm_output_create(p,e,c,b,i,d) \
20 nvkm_output_create_((p), (e), (c), (b), (i), sizeof(**d), (void **)d)
21#define nvkm_output_destroy(d) ({ \
22 struct nvkm_output *_outp = (d); \
23 _nvkm_output_dtor(nv_object(_outp)); \
24})
25#define nvkm_output_init(d) ({ \
26 struct nvkm_output *_outp = (d); \
27 _nvkm_output_init(nv_object(_outp)); \
28})
29#define nvkm_output_fini(d,s) ({ \
30 struct nvkm_output *_outp = (d); \
31 _nvkm_output_fini(nv_object(_outp), (s)); \
32})
33
34int nvkm_output_create_(struct nouveau_object *, struct nouveau_object *,
35 struct nouveau_oclass *, struct dcb_output *,
36 int, int, void **);
37
38int _nvkm_output_ctor(struct nouveau_object *, struct nouveau_object *,
39 struct nouveau_oclass *, void *, u32,
40 struct nouveau_object **);
41void _nvkm_output_dtor(struct nouveau_object *);
42int _nvkm_output_init(struct nouveau_object *);
43int _nvkm_output_fini(struct nouveau_object *, bool);
44
45struct nvkm_output_impl {
46 struct nouveau_oclass base;
47};
48
49#ifndef MSG
50#define MSG(l,f,a...) do { \
51 struct nvkm_output *_outp = (void *)outp; \
52 nv_##l(nv_object(outp)->engine, "%02x:%04x:%04x: "f, _outp->index, \
53 _outp->info.hasht, _outp->info.hashm, ##a); \
54} while(0)
55#define DBG(f,a...) MSG(debug, f, ##a)
56#define ERR(f,a...) MSG(error, f, ##a)
57#endif
58
59#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/outpdp.c b/drivers/gpu/drm/nouveau/core/engine/disp/outpdp.c
new file mode 100644
index 000000000000..52c299c3d300
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/outpdp.c
@@ -0,0 +1,276 @@
1/*
2 * Copyright 2014 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/i2c.h>
26
27#include "outpdp.h"
28#include "conn.h"
29#include "dport.h"
30
31int
32nvkm_output_dp_train(struct nvkm_output *base, u32 datarate, bool wait)
33{
34 struct nvkm_output_dp *outp = (void *)base;
35 bool retrain = true;
36 u8 link[2], stat[3];
37 u32 rate;
38 int ret, i;
39
40 /* check that the link is trained at a high enough rate */
41 ret = nv_rdaux(outp->base.edid, DPCD_LC00_LINK_BW_SET, link, 2);
42 if (ret) {
43 DBG("failed to read link config, assuming no sink\n");
44 goto done;
45 }
46
47 rate = link[0] * 27000 * (link[1] & DPCD_LC01_LANE_COUNT_SET);
48 if (rate < ((datarate / 8) * 10)) {
49 DBG("link not trained at sufficient rate\n");
50 goto done;
51 }
52
53 /* check that link is still trained */
54 ret = nv_rdaux(outp->base.edid, DPCD_LS02, stat, 3);
55 if (ret) {
56 DBG("failed to read link status, assuming no sink\n");
57 goto done;
58 }
59
60 if (stat[2] & DPCD_LS04_INTERLANE_ALIGN_DONE) {
61 for (i = 0; i < (link[1] & DPCD_LC01_LANE_COUNT_SET); i++) {
62 u8 lane = (stat[i >> 1] >> ((i & 1) * 4)) & 0x0f;
63 if (!(lane & DPCD_LS02_LANE0_CR_DONE) ||
64 !(lane & DPCD_LS02_LANE0_CHANNEL_EQ_DONE) ||
65 !(lane & DPCD_LS02_LANE0_SYMBOL_LOCKED)) {
66 DBG("lane %d not equalised\n", lane);
67 goto done;
68 }
69 }
70 retrain = false;
71 } else {
72 DBG("no inter-lane alignment\n");
73 }
74
75done:
76 if (retrain || !atomic_read(&outp->lt.done)) {
77 /* no sink, but still need to configure source */
78 if (outp->dpcd[DPCD_RC00_DPCD_REV] == 0x00) {
79 outp->dpcd[DPCD_RC01_MAX_LINK_RATE] =
80 outp->base.info.dpconf.link_bw;
81 outp->dpcd[DPCD_RC02] =
82 outp->base.info.dpconf.link_nr;
83 }
84 atomic_set(&outp->lt.done, 0);
85 schedule_work(&outp->lt.work);
86 } else {
87 nouveau_event_get(outp->irq);
88 }
89
90 if (wait) {
91 if (!wait_event_timeout(outp->lt.wait,
92 atomic_read(&outp->lt.done),
93 msecs_to_jiffies(2000)))
94 ret = -ETIMEDOUT;
95 }
96
97 return ret;
98}
99
100static void
101nvkm_output_dp_enable(struct nvkm_output_dp *outp, bool present)
102{
103 struct nouveau_i2c_port *port = outp->base.edid;
104 if (present) {
105 if (!outp->present) {
106 nouveau_i2c(port)->acquire_pad(port, 0);
107 DBG("aux power -> always\n");
108 outp->present = true;
109 }
110 nvkm_output_dp_train(&outp->base, 0, true);
111 } else {
112 if (outp->present) {
113 nouveau_i2c(port)->release_pad(port);
114 DBG("aux power -> demand\n");
115 outp->present = false;
116 }
117 atomic_set(&outp->lt.done, 0);
118 }
119}
120
121static void
122nvkm_output_dp_detect(struct nvkm_output_dp *outp)
123{
124 struct nouveau_i2c_port *port = outp->base.edid;
125 int ret = nouveau_i2c(port)->acquire_pad(port, 0);
126 if (ret == 0) {
127 ret = nv_rdaux(outp->base.edid, DPCD_RC00_DPCD_REV,
128 outp->dpcd, sizeof(outp->dpcd));
129 nvkm_output_dp_enable(outp, ret == 0);
130 nouveau_i2c(port)->release_pad(port);
131 }
132}
133
134static void
135nvkm_output_dp_service_work(struct work_struct *work)
136{
137 struct nvkm_output_dp *outp = container_of(work, typeof(*outp), work);
138 struct nouveau_disp *disp = nouveau_disp(outp);
139 int type = atomic_xchg(&outp->pending, 0);
140 u32 send = 0;
141
142 if (type & (NVKM_I2C_PLUG | NVKM_I2C_UNPLUG)) {
143 nvkm_output_dp_detect(outp);
144 if (type & NVKM_I2C_UNPLUG)
145 send |= NVKM_HPD_UNPLUG;
146 if (type & NVKM_I2C_PLUG)
147 send |= NVKM_HPD_PLUG;
148 nouveau_event_get(outp->base.conn->hpd.event);
149 }
150
151 if (type & NVKM_I2C_IRQ) {
152 nvkm_output_dp_train(&outp->base, 0, true);
153 send |= NVKM_HPD_IRQ;
154 }
155
156 nouveau_event_trigger(disp->hpd, send, outp->base.info.connector);
157}
158
159static int
160nvkm_output_dp_service(void *data, u32 type, int index)
161{
162 struct nvkm_output_dp *outp = data;
163 DBG("HPD: %d\n", type);
164 atomic_or(type, &outp->pending);
165 schedule_work(&outp->work);
166 return NVKM_EVENT_DROP;
167}
168
169int
170_nvkm_output_dp_fini(struct nouveau_object *object, bool suspend)
171{
172 struct nvkm_output_dp *outp = (void *)object;
173 nouveau_event_put(outp->irq);
174 nvkm_output_dp_enable(outp, false);
175 return nvkm_output_fini(&outp->base, suspend);
176}
177
178int
179_nvkm_output_dp_init(struct nouveau_object *object)
180{
181 struct nvkm_output_dp *outp = (void *)object;
182 nvkm_output_dp_detect(outp);
183 return nvkm_output_init(&outp->base);
184}
185
186void
187_nvkm_output_dp_dtor(struct nouveau_object *object)
188{
189 struct nvkm_output_dp *outp = (void *)object;
190 nouveau_event_ref(NULL, &outp->irq);
191 nvkm_output_destroy(&outp->base);
192}
193
194int
195nvkm_output_dp_create_(struct nouveau_object *parent,
196 struct nouveau_object *engine,
197 struct nouveau_oclass *oclass,
198 struct dcb_output *info, int index,
199 int length, void **pobject)
200{
201 struct nouveau_bios *bios = nouveau_bios(parent);
202 struct nouveau_i2c *i2c = nouveau_i2c(parent);
203 struct nvkm_output_dp *outp;
204 u8 hdr, cnt, len;
205 u32 data;
206 int ret;
207
208 ret = nvkm_output_create_(parent, engine, oclass, info, index,
209 length, pobject);
210 outp = *pobject;
211 if (ret)
212 return ret;
213
214 nouveau_event_ref(NULL, &outp->base.conn->hpd.event);
215
216 /* access to the aux channel is not optional... */
217 if (!outp->base.edid) {
218 ERR("aux channel not found\n");
219 return -ENODEV;
220 }
221
222 /* nor is the bios data for this output... */
223 data = nvbios_dpout_match(bios, outp->base.info.hasht,
224 outp->base.info.hashm, &outp->version,
225 &hdr, &cnt, &len, &outp->info);
226 if (!data) {
227 ERR("no bios dp data\n");
228 return -ENODEV;
229 }
230
231 DBG("bios dp %02x %02x %02x %02x\n", outp->version, hdr, cnt, len);
232
233 /* link training */
234 INIT_WORK(&outp->lt.work, nouveau_dp_train);
235 init_waitqueue_head(&outp->lt.wait);
236 atomic_set(&outp->lt.done, 0);
237
238 /* link maintenance */
239 ret = nouveau_event_new(i2c->ntfy, NVKM_I2C_IRQ, outp->base.edid->index,
240 nvkm_output_dp_service, outp, &outp->irq);
241 if (ret) {
242 ERR("error monitoring aux irq event: %d\n", ret);
243 return ret;
244 }
245
246 INIT_WORK(&outp->work, nvkm_output_dp_service_work);
247
248 /* hotplug detect, replaces gpio-based mechanism with aux events */
249 ret = nouveau_event_new(i2c->ntfy, NVKM_I2C_PLUG | NVKM_I2C_UNPLUG,
250 outp->base.edid->index,
251 nvkm_output_dp_service, outp,
252 &outp->base.conn->hpd.event);
253 if (ret) {
254 ERR("error monitoring aux hpd events: %d\n", ret);
255 return ret;
256 }
257
258 return 0;
259}
260
261int
262_nvkm_output_dp_ctor(struct nouveau_object *parent,
263 struct nouveau_object *engine,
264 struct nouveau_oclass *oclass, void *info, u32 index,
265 struct nouveau_object **pobject)
266{
267 struct nvkm_output_dp *outp;
268 int ret;
269
270 ret = nvkm_output_dp_create(parent, engine, oclass, info, index, &outp);
271 *pobject = nv_object(outp);
272 if (ret)
273 return ret;
274
275 return 0;
276}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/outpdp.h b/drivers/gpu/drm/nouveau/core/engine/disp/outpdp.h
new file mode 100644
index 000000000000..ff33ba12cb67
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/outpdp.h
@@ -0,0 +1,65 @@
1#ifndef __NVKM_DISP_OUTP_DP_H__
2#define __NVKM_DISP_OUTP_DP_H__
3
4#include <subdev/bios.h>
5#include <subdev/bios/dp.h>
6
7#include "outp.h"
8
9struct nvkm_output_dp {
10 struct nvkm_output base;
11
12 struct nvbios_dpout info;
13 u8 version;
14
15 struct nouveau_eventh *irq;
16 struct nouveau_eventh *hpd;
17 struct work_struct work;
18 atomic_t pending;
19 bool present;
20 u8 dpcd[16];
21
22 struct {
23 struct work_struct work;
24 wait_queue_head_t wait;
25 atomic_t done;
26 } lt;
27};
28
29#define nvkm_output_dp_create(p,e,c,b,i,d) \
30 nvkm_output_dp_create_((p), (e), (c), (b), (i), sizeof(**d), (void **)d)
31#define nvkm_output_dp_destroy(d) ({ \
32 struct nvkm_output_dp *_outp = (d); \
33 _nvkm_output_dp_dtor(nv_object(_outp)); \
34})
35#define nvkm_output_dp_init(d) ({ \
36 struct nvkm_output_dp *_outp = (d); \
37 _nvkm_output_dp_init(nv_object(_outp)); \
38})
39#define nvkm_output_dp_fini(d,s) ({ \
40 struct nvkm_output_dp *_outp = (d); \
41 _nvkm_output_dp_fini(nv_object(_outp), (s)); \
42})
43
44int nvkm_output_dp_create_(struct nouveau_object *, struct nouveau_object *,
45 struct nouveau_oclass *, struct dcb_output *,
46 int, int, void **);
47
48int _nvkm_output_dp_ctor(struct nouveau_object *, struct nouveau_object *,
49 struct nouveau_oclass *, void *, u32,
50 struct nouveau_object **);
51void _nvkm_output_dp_dtor(struct nouveau_object *);
52int _nvkm_output_dp_init(struct nouveau_object *);
53int _nvkm_output_dp_fini(struct nouveau_object *, bool);
54
55struct nvkm_output_dp_impl {
56 struct nvkm_output_impl base;
57 int (*pattern)(struct nvkm_output_dp *, int);
58 int (*lnk_pwr)(struct nvkm_output_dp *, int nr);
59 int (*lnk_ctl)(struct nvkm_output_dp *, int nr, int bw, bool ef);
60 int (*drv_ctl)(struct nvkm_output_dp *, int ln, int vs, int pe, int pc);
61};
62
63int nvkm_output_dp_train(struct nvkm_output *, u32 rate, bool wait);
64
65#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/piornv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/piornv50.c
index 2c8ce351b52d..fe0f256f11bf 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/piornv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/piornv50.c
@@ -33,68 +33,107 @@
33#include "nv50.h" 33#include "nv50.h"
34 34
35/****************************************************************************** 35/******************************************************************************
36 * DisplayPort 36 * TMDS
37 *****************************************************************************/ 37 *****************************************************************************/
38static struct nouveau_i2c_port * 38
39nv50_pior_dp_find(struct nouveau_disp *disp, struct dcb_output *outp) 39static int
40nv50_pior_tmds_ctor(struct nouveau_object *parent,
41 struct nouveau_object *engine,
42 struct nouveau_oclass *oclass, void *info, u32 index,
43 struct nouveau_object **pobject)
40{ 44{
41 struct nouveau_i2c *i2c = nouveau_i2c(disp); 45 struct nouveau_i2c *i2c = nouveau_i2c(parent);
42 return i2c->find_type(i2c, NV_I2C_TYPE_EXTAUX(outp->extdev)); 46 struct nvkm_output *outp;
47 int ret;
48
49 ret = nvkm_output_create(parent, engine, oclass, info, index, &outp);
50 *pobject = nv_object(outp);
51 if (ret)
52 return ret;
53
54 outp->edid = i2c->find_type(i2c, NV_I2C_TYPE_EXTDDC(outp->info.extdev));
55 return 0;
43} 56}
44 57
58struct nvkm_output_impl
59nv50_pior_tmds_impl = {
60 .base.handle = DCB_OUTPUT_TMDS | 0x0100,
61 .base.ofuncs = &(struct nouveau_ofuncs) {
62 .ctor = nv50_pior_tmds_ctor,
63 .dtor = _nvkm_output_dtor,
64 .init = _nvkm_output_init,
65 .fini = _nvkm_output_fini,
66 },
67};
68
69/******************************************************************************
70 * DisplayPort
71 *****************************************************************************/
72
45static int 73static int
46nv50_pior_dp_pattern(struct nouveau_disp *disp, struct dcb_output *outp, 74nv50_pior_dp_pattern(struct nvkm_output_dp *outp, int pattern)
47 int head, int pattern)
48{ 75{
49 struct nouveau_i2c_port *port; 76 struct nouveau_i2c_port *port = outp->base.edid;
50 int ret = -EINVAL; 77 if (port && port->func->pattern)
51 78 return port->func->pattern(port, pattern);
52 port = nv50_pior_dp_find(disp, outp); 79 return port ? 0 : -ENODEV;
53 if (port) {
54 if (port->func->pattern)
55 ret = port->func->pattern(port, pattern);
56 else
57 ret = 0;
58 }
59
60 return ret;
61} 80}
62 81
63static int 82static int
64nv50_pior_dp_lnk_ctl(struct nouveau_disp *disp, struct dcb_output *outp, 83nv50_pior_dp_lnk_pwr(struct nvkm_output_dp *outp, int nr)
65 int head, int lane_nr, int link_bw, bool enh)
66{ 84{
67 struct nouveau_i2c_port *port; 85 return 0;
68 int ret = -EINVAL; 86}
69 87
70 port = nv50_pior_dp_find(disp, outp); 88static int
89nv50_pior_dp_lnk_ctl(struct nvkm_output_dp *outp, int nr, int bw, bool ef)
90{
91 struct nouveau_i2c_port *port = outp->base.edid;
71 if (port && port->func->lnk_ctl) 92 if (port && port->func->lnk_ctl)
72 ret = port->func->lnk_ctl(port, lane_nr, link_bw, enh); 93 return port->func->lnk_ctl(port, nr, bw, ef);
94 return port ? 0 : -ENODEV;
95}
73 96
74 return ret; 97static int
98nv50_pior_dp_drv_ctl(struct nvkm_output_dp *outp, int ln, int vs, int pe, int pc)
99{
100 struct nouveau_i2c_port *port = outp->base.edid;
101 if (port && port->func->drv_ctl)
102 return port->func->drv_ctl(port, ln, vs, pe);
103 return port ? 0 : -ENODEV;
75} 104}
76 105
77static int 106static int
78nv50_pior_dp_drv_ctl(struct nouveau_disp *disp, struct dcb_output *outp, 107nv50_pior_dp_ctor(struct nouveau_object *parent,
79 int head, int lane, int vsw, int pre) 108 struct nouveau_object *engine,
109 struct nouveau_oclass *oclass, void *info, u32 index,
110 struct nouveau_object **pobject)
80{ 111{
81 struct nouveau_i2c_port *port; 112 struct nouveau_i2c *i2c = nouveau_i2c(parent);
82 int ret = -EINVAL; 113 struct nvkm_output_dp *outp;
83 114 int ret;
84 port = nv50_pior_dp_find(disp, outp);
85 if (port) {
86 if (port->func->drv_ctl)
87 ret = port->func->drv_ctl(port, lane, vsw, pre);
88 else
89 ret = 0;
90 }
91 115
92 return ret; 116 ret = nvkm_output_dp_create(parent, engine, oclass, info, index, &outp);
117 *pobject = nv_object(outp);
118 if (ret)
119 return ret;
120
121 outp->base.edid = i2c->find_type(i2c, NV_I2C_TYPE_EXTAUX(
122 outp->base.info.extdev));
123 return 0;
93} 124}
94 125
95const struct nouveau_dp_func 126struct nvkm_output_dp_impl
96nv50_pior_dp_func = { 127nv50_pior_dp_impl = {
128 .base.base.handle = DCB_OUTPUT_DP | 0x0010,
129 .base.base.ofuncs = &(struct nouveau_ofuncs) {
130 .ctor = nv50_pior_dp_ctor,
131 .dtor = _nvkm_output_dp_dtor,
132 .init = _nvkm_output_dp_init,
133 .fini = _nvkm_output_dp_fini,
134 },
97 .pattern = nv50_pior_dp_pattern, 135 .pattern = nv50_pior_dp_pattern,
136 .lnk_pwr = nv50_pior_dp_lnk_pwr,
98 .lnk_ctl = nv50_pior_dp_lnk_ctl, 137 .lnk_ctl = nv50_pior_dp_lnk_ctl,
99 .drv_ctl = nv50_pior_dp_drv_ctl, 138 .drv_ctl = nv50_pior_dp_drv_ctl,
100}; 139};
@@ -102,6 +141,7 @@ nv50_pior_dp_func = {
102/****************************************************************************** 141/******************************************************************************
103 * General PIOR handling 142 * General PIOR handling
104 *****************************************************************************/ 143 *****************************************************************************/
144
105int 145int
106nv50_pior_power(struct nv50_disp_priv *priv, int or, u32 data) 146nv50_pior_power(struct nv50_disp_priv *priv, int or, u32 data)
107{ 147{
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/priv.h b/drivers/gpu/drm/nouveau/core/engine/disp/priv.h
index cc3c7a4ca747..26e9a42569c7 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/priv.h
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/priv.h
@@ -1,10 +1,42 @@
1#ifndef __NVKM_DISP_PRIV_H__ 1#ifndef __NVKM_DISP_PRIV_H__
2#define __NVKM_DISP_PRIV_H__ 2#define __NVKM_DISP_PRIV_H__
3 3
4#include <subdev/bios.h>
5#include <subdev/bios/dcb.h>
6#include <subdev/bios/conn.h>
7
4#include <engine/disp.h> 8#include <engine/disp.h>
5 9
6struct nouveau_disp_impl { 10struct nouveau_disp_impl {
7 struct nouveau_oclass base; 11 struct nouveau_oclass base;
12 struct nouveau_oclass **outp;
13 struct nouveau_oclass **conn;
8}; 14};
9 15
16#define nouveau_disp_create(p,e,c,h,i,x,d) \
17 nouveau_disp_create_((p), (e), (c), (h), (i), (x), \
18 sizeof(**d), (void **)d)
19#define nouveau_disp_destroy(d) ({ \
20 struct nouveau_disp *disp = (d); \
21 _nouveau_disp_dtor(nv_object(disp)); \
22})
23#define nouveau_disp_init(d) ({ \
24 struct nouveau_disp *disp = (d); \
25 _nouveau_disp_init(nv_object(disp)); \
26})
27#define nouveau_disp_fini(d,s) ({ \
28 struct nouveau_disp *disp = (d); \
29 _nouveau_disp_fini(nv_object(disp), (s)); \
30})
31
32int nouveau_disp_create_(struct nouveau_object *, struct nouveau_object *,
33 struct nouveau_oclass *, int heads,
34 const char *, const char *, int, void **);
35void _nouveau_disp_dtor(struct nouveau_object *);
36int _nouveau_disp_init(struct nouveau_object *);
37int _nouveau_disp_fini(struct nouveau_object *, bool);
38
39extern struct nouveau_oclass *nvkm_output_oclass;
40extern struct nouveau_oclass *nvkm_connector_oclass;
41
10#endif 42#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c
index 526b75242899..e1832778e8b6 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c
@@ -47,8 +47,12 @@ int
47nv50_sor_mthd(struct nouveau_object *object, u32 mthd, void *args, u32 size) 47nv50_sor_mthd(struct nouveau_object *object, u32 mthd, void *args, u32 size)
48{ 48{
49 struct nv50_disp_priv *priv = (void *)object->engine; 49 struct nv50_disp_priv *priv = (void *)object->engine;
50 const u8 type = (mthd & NV50_DISP_SOR_MTHD_TYPE) >> 12;
50 const u8 head = (mthd & NV50_DISP_SOR_MTHD_HEAD) >> 3; 51 const u8 head = (mthd & NV50_DISP_SOR_MTHD_HEAD) >> 3;
52 const u8 link = (mthd & NV50_DISP_SOR_MTHD_LINK) >> 2;
51 const u8 or = (mthd & NV50_DISP_SOR_MTHD_OR); 53 const u8 or = (mthd & NV50_DISP_SOR_MTHD_OR);
54 const u16 mask = (0x0100 << head) | (0x0040 << link) | (0x0001 << or);
55 struct nvkm_output *outp = NULL, *temp;
52 u32 data; 56 u32 data;
53 int ret = -EINVAL; 57 int ret = -EINVAL;
54 58
@@ -56,6 +60,13 @@ nv50_sor_mthd(struct nouveau_object *object, u32 mthd, void *args, u32 size)
56 return -EINVAL; 60 return -EINVAL;
57 data = *(u32 *)args; 61 data = *(u32 *)args;
58 62
63 list_for_each_entry(temp, &priv->base.outp, head) {
64 if ((temp->info.hasht & 0xff) == type &&
65 (temp->info.hashm & mask) == mask) {
66 outp = temp;
67 break;
68 }
69 }
59 70
60 switch (mthd & ~0x3f) { 71 switch (mthd & ~0x3f) {
61 case NV50_DISP_SOR_PWR: 72 case NV50_DISP_SOR_PWR:
@@ -71,6 +82,23 @@ nv50_sor_mthd(struct nouveau_object *object, u32 mthd, void *args, u32 size)
71 priv->sor.lvdsconf = data & NV50_DISP_SOR_LVDS_SCRIPT_ID; 82 priv->sor.lvdsconf = data & NV50_DISP_SOR_LVDS_SCRIPT_ID;
72 ret = 0; 83 ret = 0;
73 break; 84 break;
85 case NV94_DISP_SOR_DP_PWR:
86 if (outp) {
87 struct nvkm_output_dp *outpdp = (void *)outp;
88 switch (data) {
89 case NV94_DISP_SOR_DP_PWR_STATE_OFF:
90 ((struct nvkm_output_dp_impl *)nv_oclass(outp))
91 ->lnk_pwr(outpdp, 0);
92 atomic_set(&outpdp->lt.done, 0);
93 break;
94 case NV94_DISP_SOR_DP_PWR_STATE_ON:
95 nvkm_output_dp_train(&outpdp->base, 0, true);
96 break;
97 default:
98 return -EINVAL;
99 }
100 }
101 break;
74 default: 102 default:
75 BUG_ON(1); 103 BUG_ON(1);
76 } 104 }
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c b/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c
index eea3ef59693d..05487cda84a8 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c
@@ -29,19 +29,21 @@
29#include <subdev/bios/dcb.h> 29#include <subdev/bios/dcb.h>
30#include <subdev/bios/dp.h> 30#include <subdev/bios/dp.h>
31#include <subdev/bios/init.h> 31#include <subdev/bios/init.h>
32#include <subdev/timer.h>
32 33
33#include "nv50.h" 34#include "nv50.h"
35#include "outpdp.h"
34 36
35static inline u32 37static inline u32
36nv94_sor_soff(struct dcb_output *outp) 38nv94_sor_soff(struct nvkm_output_dp *outp)
37{ 39{
38 return (ffs(outp->or) - 1) * 0x800; 40 return (ffs(outp->base.info.or) - 1) * 0x800;
39} 41}
40 42
41static inline u32 43static inline u32
42nv94_sor_loff(struct dcb_output *outp) 44nv94_sor_loff(struct nvkm_output_dp *outp)
43{ 45{
44 return nv94_sor_soff(outp) + !(outp->sorconf.link & 1) * 0x80; 46 return nv94_sor_soff(outp) + !(outp->base.info.sorconf.link & 1) * 0x80;
45} 47}
46 48
47static inline u32 49static inline u32
@@ -55,77 +57,96 @@ nv94_sor_dp_lane_map(struct nv50_disp_priv *priv, u8 lane)
55} 57}
56 58
57static int 59static int
58nv94_sor_dp_pattern(struct nouveau_disp *disp, struct dcb_output *outp, 60nv94_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern)
59 int head, int pattern)
60{ 61{
61 struct nv50_disp_priv *priv = (void *)disp; 62 struct nv50_disp_priv *priv = (void *)nouveau_disp(outp);
62 const u32 loff = nv94_sor_loff(outp); 63 const u32 loff = nv94_sor_loff(outp);
63 nv_mask(priv, 0x61c10c + loff, 0x0f000000, pattern << 24); 64 nv_mask(priv, 0x61c10c + loff, 0x0f000000, pattern << 24);
64 return 0; 65 return 0;
65} 66}
66 67
68int
69nv94_sor_dp_lnk_pwr(struct nvkm_output_dp *outp, int nr)
70{
71 struct nv50_disp_priv *priv = (void *)nouveau_disp(outp);
72 const u32 soff = nv94_sor_soff(outp);
73 const u32 loff = nv94_sor_loff(outp);
74 u32 mask = 0, i;
75
76 for (i = 0; i < nr; i++)
77 mask |= 1 << (nv94_sor_dp_lane_map(priv, i) >> 3);
78
79 nv_mask(priv, 0x61c130 + loff, 0x0000000f, mask);
80 nv_mask(priv, 0x61c034 + soff, 0x80000000, 0x80000000);
81 nv_wait(priv, 0x61c034 + soff, 0x80000000, 0x00000000);
82 return 0;
83}
84
67static int 85static int
68nv94_sor_dp_lnk_ctl(struct nouveau_disp *disp, struct dcb_output *outp, 86nv94_sor_dp_lnk_ctl(struct nvkm_output_dp *outp, int nr, int bw, bool ef)
69 int head, int link_nr, int link_bw, bool enh_frame)
70{ 87{
71 struct nv50_disp_priv *priv = (void *)disp; 88 struct nv50_disp_priv *priv = (void *)nouveau_disp(outp);
72 const u32 soff = nv94_sor_soff(outp); 89 const u32 soff = nv94_sor_soff(outp);
73 const u32 loff = nv94_sor_loff(outp); 90 const u32 loff = nv94_sor_loff(outp);
74 u32 dpctrl = 0x00000000; 91 u32 dpctrl = 0x00000000;
75 u32 clksor = 0x00000000; 92 u32 clksor = 0x00000000;
76 u32 lane = 0;
77 int i;
78 93
79 dpctrl |= ((1 << link_nr) - 1) << 16; 94 dpctrl |= ((1 << nr) - 1) << 16;
80 if (enh_frame) 95 if (ef)
81 dpctrl |= 0x00004000; 96 dpctrl |= 0x00004000;
82 if (link_bw > 0x06) 97 if (bw > 0x06)
83 clksor |= 0x00040000; 98 clksor |= 0x00040000;
84 99
85 for (i = 0; i < link_nr; i++)
86 lane |= 1 << (nv94_sor_dp_lane_map(priv, i) >> 3);
87
88 nv_mask(priv, 0x614300 + soff, 0x000c0000, clksor); 100 nv_mask(priv, 0x614300 + soff, 0x000c0000, clksor);
89 nv_mask(priv, 0x61c10c + loff, 0x001f4000, dpctrl); 101 nv_mask(priv, 0x61c10c + loff, 0x001f4000, dpctrl);
90 nv_mask(priv, 0x61c130 + loff, 0x0000000f, lane);
91 return 0; 102 return 0;
92} 103}
93 104
94static int 105static int
95nv94_sor_dp_drv_ctl(struct nouveau_disp *disp, struct dcb_output *outp, 106nv94_sor_dp_drv_ctl(struct nvkm_output_dp *outp, int ln, int vs, int pe, int pc)
96 int head, int lane, int swing, int preem)
97{ 107{
98 struct nouveau_bios *bios = nouveau_bios(disp); 108 struct nv50_disp_priv *priv = (void *)nouveau_disp(outp);
99 struct nv50_disp_priv *priv = (void *)disp; 109 struct nouveau_bios *bios = nouveau_bios(priv);
100 const u32 shift = nv94_sor_dp_lane_map(priv, lane); 110 const u32 shift = nv94_sor_dp_lane_map(priv, ln);
101 const u32 loff = nv94_sor_loff(outp); 111 const u32 loff = nv94_sor_loff(outp);
102 u32 addr, data[3]; 112 u32 addr, data[3];
103 u8 ver, hdr, cnt, len; 113 u8 ver, hdr, cnt, len;
104 struct nvbios_dpout info; 114 struct nvbios_dpout info;
105 struct nvbios_dpcfg ocfg; 115 struct nvbios_dpcfg ocfg;
106 116
107 addr = nvbios_dpout_match(bios, outp->hasht, outp->hashm, 117 addr = nvbios_dpout_match(bios, outp->base.info.hasht,
118 outp->base.info.hashm,
108 &ver, &hdr, &cnt, &len, &info); 119 &ver, &hdr, &cnt, &len, &info);
109 if (!addr) 120 if (!addr)
110 return -ENODEV; 121 return -ENODEV;
111 122
112 addr = nvbios_dpcfg_match(bios, addr, 0, swing, preem, 123 addr = nvbios_dpcfg_match(bios, addr, 0, vs, pe,
113 &ver, &hdr, &cnt, &len, &ocfg); 124 &ver, &hdr, &cnt, &len, &ocfg);
114 if (!addr) 125 if (!addr)
115 return -EINVAL; 126 return -EINVAL;
116 127
117 data[0] = nv_rd32(priv, 0x61c118 + loff) & ~(0x000000ff << shift); 128 data[0] = nv_rd32(priv, 0x61c118 + loff) & ~(0x000000ff << shift);
118 data[1] = nv_rd32(priv, 0x61c120 + loff) & ~(0x000000ff << shift); 129 data[1] = nv_rd32(priv, 0x61c120 + loff) & ~(0x000000ff << shift);
119 data[2] = nv_rd32(priv, 0x61c130 + loff) & ~(0x0000ff00); 130 data[2] = nv_rd32(priv, 0x61c130 + loff);
120 nv_wr32(priv, 0x61c118 + loff, data[0] | (ocfg.drv << shift)); 131 if ((data[2] & 0x0000ff00) < (ocfg.tx_pu << 8) || ln == 0)
121 nv_wr32(priv, 0x61c120 + loff, data[1] | (ocfg.pre << shift)); 132 data[2] = (data[2] & ~0x0000ff00) | (ocfg.tx_pu << 8);
122 nv_wr32(priv, 0x61c130 + loff, data[2] | (ocfg.unk << 8)); 133 nv_wr32(priv, 0x61c118 + loff, data[0] | (ocfg.dc << shift));
134 nv_wr32(priv, 0x61c120 + loff, data[1] | (ocfg.pe << shift));
135 nv_wr32(priv, 0x61c130 + loff, data[2] | (ocfg.tx_pu << 8));
123 return 0; 136 return 0;
124} 137}
125 138
126const struct nouveau_dp_func 139struct nvkm_output_dp_impl
127nv94_sor_dp_func = { 140nv94_sor_dp_impl = {
141 .base.base.handle = DCB_OUTPUT_DP,
142 .base.base.ofuncs = &(struct nouveau_ofuncs) {
143 .ctor = _nvkm_output_dp_ctor,
144 .dtor = _nvkm_output_dp_dtor,
145 .init = _nvkm_output_dp_init,
146 .fini = _nvkm_output_dp_fini,
147 },
128 .pattern = nv94_sor_dp_pattern, 148 .pattern = nv94_sor_dp_pattern,
149 .lnk_pwr = nv94_sor_dp_lnk_pwr,
129 .lnk_ctl = nv94_sor_dp_lnk_ctl, 150 .lnk_ctl = nv94_sor_dp_lnk_ctl,
130 .drv_ctl = nv94_sor_dp_drv_ctl, 151 .drv_ctl = nv94_sor_dp_drv_ctl,
131}; 152};
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c
index d2df572f16a3..97f0e9cd3d40 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c
@@ -29,19 +29,20 @@
29#include <subdev/bios/dcb.h> 29#include <subdev/bios/dcb.h>
30#include <subdev/bios/dp.h> 30#include <subdev/bios/dp.h>
31#include <subdev/bios/init.h> 31#include <subdev/bios/init.h>
32#include <subdev/timer.h>
32 33
33#include "nv50.h" 34#include "nv50.h"
34 35
35static inline u32 36static inline u32
36nvd0_sor_soff(struct dcb_output *outp) 37nvd0_sor_soff(struct nvkm_output_dp *outp)
37{ 38{
38 return (ffs(outp->or) - 1) * 0x800; 39 return (ffs(outp->base.info.or) - 1) * 0x800;
39} 40}
40 41
41static inline u32 42static inline u32
42nvd0_sor_loff(struct dcb_output *outp) 43nvd0_sor_loff(struct nvkm_output_dp *outp)
43{ 44{
44 return nvd0_sor_soff(outp) + !(outp->sorconf.link & 1) * 0x80; 45 return nvd0_sor_soff(outp) + !(outp->base.info.sorconf.link & 1) * 0x80;
45} 46}
46 47
47static inline u32 48static inline u32
@@ -52,77 +53,80 @@ nvd0_sor_dp_lane_map(struct nv50_disp_priv *priv, u8 lane)
52} 53}
53 54
54static int 55static int
55nvd0_sor_dp_pattern(struct nouveau_disp *disp, struct dcb_output *outp, 56nvd0_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern)
56 int head, int pattern)
57{ 57{
58 struct nv50_disp_priv *priv = (void *)disp; 58 struct nv50_disp_priv *priv = (void *)nouveau_disp(outp);
59 const u32 loff = nvd0_sor_loff(outp); 59 const u32 loff = nvd0_sor_loff(outp);
60 nv_mask(priv, 0x61c110 + loff, 0x0f0f0f0f, 0x01010101 * pattern); 60 nv_mask(priv, 0x61c110 + loff, 0x0f0f0f0f, 0x01010101 * pattern);
61 return 0; 61 return 0;
62} 62}
63 63
64static int 64static int
65nvd0_sor_dp_lnk_ctl(struct nouveau_disp *disp, struct dcb_output *outp, 65nvd0_sor_dp_lnk_ctl(struct nvkm_output_dp *outp, int nr, int bw, bool ef)
66 int head, int link_nr, int link_bw, bool enh_frame)
67{ 66{
68 struct nv50_disp_priv *priv = (void *)disp; 67 struct nv50_disp_priv *priv = (void *)nouveau_disp(outp);
69 const u32 soff = nvd0_sor_soff(outp); 68 const u32 soff = nvd0_sor_soff(outp);
70 const u32 loff = nvd0_sor_loff(outp); 69 const u32 loff = nvd0_sor_loff(outp);
71 u32 dpctrl = 0x00000000; 70 u32 dpctrl = 0x00000000;
72 u32 clksor = 0x00000000; 71 u32 clksor = 0x00000000;
73 u32 lane = 0;
74 int i;
75 72
76 clksor |= link_bw << 18; 73 clksor |= bw << 18;
77 dpctrl |= ((1 << link_nr) - 1) << 16; 74 dpctrl |= ((1 << nr) - 1) << 16;
78 if (enh_frame) 75 if (ef)
79 dpctrl |= 0x00004000; 76 dpctrl |= 0x00004000;
80 77
81 for (i = 0; i < link_nr; i++)
82 lane |= 1 << (nvd0_sor_dp_lane_map(priv, i) >> 3);
83
84 nv_mask(priv, 0x612300 + soff, 0x007c0000, clksor); 78 nv_mask(priv, 0x612300 + soff, 0x007c0000, clksor);
85 nv_mask(priv, 0x61c10c + loff, 0x001f4000, dpctrl); 79 nv_mask(priv, 0x61c10c + loff, 0x001f4000, dpctrl);
86 nv_mask(priv, 0x61c130 + loff, 0x0000000f, lane);
87 return 0; 80 return 0;
88} 81}
89 82
90static int 83static int
91nvd0_sor_dp_drv_ctl(struct nouveau_disp *disp, struct dcb_output *outp, 84nvd0_sor_dp_drv_ctl(struct nvkm_output_dp *outp, int ln, int vs, int pe, int pc)
92 int head, int lane, int swing, int preem)
93{ 85{
94 struct nouveau_bios *bios = nouveau_bios(disp); 86 struct nv50_disp_priv *priv = (void *)nouveau_disp(outp);
95 struct nv50_disp_priv *priv = (void *)disp; 87 struct nouveau_bios *bios = nouveau_bios(priv);
96 const u32 shift = nvd0_sor_dp_lane_map(priv, lane); 88 const u32 shift = nvd0_sor_dp_lane_map(priv, ln);
97 const u32 loff = nvd0_sor_loff(outp); 89 const u32 loff = nvd0_sor_loff(outp);
98 u32 addr, data[3]; 90 u32 addr, data[4];
99 u8 ver, hdr, cnt, len; 91 u8 ver, hdr, cnt, len;
100 struct nvbios_dpout info; 92 struct nvbios_dpout info;
101 struct nvbios_dpcfg ocfg; 93 struct nvbios_dpcfg ocfg;
102 94
103 addr = nvbios_dpout_match(bios, outp->hasht, outp->hashm, 95 addr = nvbios_dpout_match(bios, outp->base.info.hasht,
96 outp->base.info.hashm,
104 &ver, &hdr, &cnt, &len, &info); 97 &ver, &hdr, &cnt, &len, &info);
105 if (!addr) 98 if (!addr)
106 return -ENODEV; 99 return -ENODEV;
107 100
108 addr = nvbios_dpcfg_match(bios, addr, 0, swing, preem, 101 addr = nvbios_dpcfg_match(bios, addr, pc, vs, pe,
109 &ver, &hdr, &cnt, &len, &ocfg); 102 &ver, &hdr, &cnt, &len, &ocfg);
110 if (!addr) 103 if (!addr)
111 return -EINVAL; 104 return -EINVAL;
112 105
113 data[0] = nv_rd32(priv, 0x61c118 + loff) & ~(0x000000ff << shift); 106 data[0] = nv_rd32(priv, 0x61c118 + loff) & ~(0x000000ff << shift);
114 data[1] = nv_rd32(priv, 0x61c120 + loff) & ~(0x000000ff << shift); 107 data[1] = nv_rd32(priv, 0x61c120 + loff) & ~(0x000000ff << shift);
115 data[2] = nv_rd32(priv, 0x61c130 + loff) & ~(0x0000ff00); 108 data[2] = nv_rd32(priv, 0x61c130 + loff);
116 nv_wr32(priv, 0x61c118 + loff, data[0] | (ocfg.drv << shift)); 109 if ((data[2] & 0x0000ff00) < (ocfg.tx_pu << 8) || ln == 0)
117 nv_wr32(priv, 0x61c120 + loff, data[1] | (ocfg.pre << shift)); 110 data[2] = (data[2] & ~0x0000ff00) | (ocfg.tx_pu << 8);
118 nv_wr32(priv, 0x61c130 + loff, data[2] | (ocfg.unk << 8)); 111 nv_wr32(priv, 0x61c118 + loff, data[0] | (ocfg.dc << shift));
119 nv_mask(priv, 0x61c13c + loff, 0x00000000, 0x00000000); 112 nv_wr32(priv, 0x61c120 + loff, data[1] | (ocfg.pe << shift));
113 nv_wr32(priv, 0x61c130 + loff, data[2] | (ocfg.tx_pu << 8));
114 data[3] = nv_rd32(priv, 0x61c13c + loff) & ~(0x000000ff << shift);
115 nv_wr32(priv, 0x61c13c + loff, data[3] | (ocfg.pc << shift));
120 return 0; 116 return 0;
121} 117}
122 118
123const struct nouveau_dp_func 119struct nvkm_output_dp_impl
124nvd0_sor_dp_func = { 120nvd0_sor_dp_impl = {
121 .base.base.handle = DCB_OUTPUT_DP,
122 .base.base.ofuncs = &(struct nouveau_ofuncs) {
123 .ctor = _nvkm_output_dp_ctor,
124 .dtor = _nvkm_output_dp_dtor,
125 .init = _nvkm_output_dp_init,
126 .fini = _nvkm_output_dp_fini,
127 },
125 .pattern = nvd0_sor_dp_pattern, 128 .pattern = nvd0_sor_dp_pattern,
129 .lnk_pwr = nv94_sor_dp_lnk_pwr,
126 .lnk_ctl = nvd0_sor_dp_lnk_ctl, 130 .lnk_ctl = nvd0_sor_dp_lnk_ctl,
127 .drv_ctl = nvd0_sor_dp_drv_ctl, 131 .drv_ctl = nvd0_sor_dp_drv_ctl,
128}; 132};
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/base.c b/drivers/gpu/drm/nouveau/core/engine/fifo/base.c
index 6f9041ced9a2..56ed3d73bf8e 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/base.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/base.c
@@ -91,7 +91,7 @@ nouveau_fifo_channel_create_(struct nouveau_object *parent,
91 if (!chan->user) 91 if (!chan->user)
92 return -EFAULT; 92 return -EFAULT;
93 93
94 nouveau_event_trigger(priv->cevent, 0); 94 nouveau_event_trigger(priv->cevent, 1, 0);
95 95
96 chan->size = size; 96 chan->size = size;
97 return 0; 97 return 0;
@@ -194,11 +194,11 @@ nouveau_fifo_create_(struct nouveau_object *parent,
194 if (!priv->channel) 194 if (!priv->channel)
195 return -ENOMEM; 195 return -ENOMEM;
196 196
197 ret = nouveau_event_create(1, &priv->cevent); 197 ret = nouveau_event_create(1, 1, &priv->cevent);
198 if (ret) 198 if (ret)
199 return ret; 199 return ret;
200 200
201 ret = nouveau_event_create(1, &priv->uevent); 201 ret = nouveau_event_create(1, 1, &priv->uevent);
202 if (ret) 202 if (ret)
203 return ret; 203 return ret;
204 204
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/gk20a.c b/drivers/gpu/drm/nouveau/core/engine/fifo/gk20a.c
new file mode 100644
index 000000000000..327456eae963
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/gk20a.c
@@ -0,0 +1,35 @@
1/*
2 * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#include "nve0.h"
24
25struct nouveau_oclass *
26gk20a_fifo_oclass = &(struct nve0_fifo_impl) {
27 .base.handle = NV_ENGINE(FIFO, 0xea),
28 .base.ofuncs = &(struct nouveau_ofuncs) {
29 .ctor = nve0_fifo_ctor,
30 .dtor = nve0_fifo_dtor,
31 .init = nve0_fifo_init,
32 .fini = nve0_fifo_fini,
33 },
34 .channels = 128,
35}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
index 54f26cc801c7..c61b16a63884 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
@@ -539,7 +539,7 @@ nv04_fifo_intr(struct nouveau_subdev *subdev)
539 } 539 }
540 540
541 if (status & 0x40000000) { 541 if (status & 0x40000000) {
542 nouveau_event_trigger(priv->base.uevent, 0); 542 nouveau_event_trigger(priv->base.uevent, 1, 0);
543 nv_wr32(priv, 0x002100, 0x40000000); 543 nv_wr32(priv, 0x002100, 0x40000000);
544 status &= ~0x40000000; 544 status &= ~0x40000000;
545 } 545 }
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
index fe0f41e65d9b..6e5ac16e5460 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
@@ -389,14 +389,14 @@ nv84_fifo_cclass = {
389 ******************************************************************************/ 389 ******************************************************************************/
390 390
391static void 391static void
392nv84_fifo_uevent_enable(struct nouveau_event *event, int index) 392nv84_fifo_uevent_enable(struct nouveau_event *event, int type, int index)
393{ 393{
394 struct nv84_fifo_priv *priv = event->priv; 394 struct nv84_fifo_priv *priv = event->priv;
395 nv_mask(priv, 0x002140, 0x40000000, 0x40000000); 395 nv_mask(priv, 0x002140, 0x40000000, 0x40000000);
396} 396}
397 397
398static void 398static void
399nv84_fifo_uevent_disable(struct nouveau_event *event, int index) 399nv84_fifo_uevent_disable(struct nouveau_event *event, int type, int index)
400{ 400{
401 struct nv84_fifo_priv *priv = event->priv; 401 struct nv84_fifo_priv *priv = event->priv;
402 nv_mask(priv, 0x002140, 0x40000000, 0x00000000); 402 nv_mask(priv, 0x002140, 0x40000000, 0x00000000);
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
index fa1e719872b7..ae4a4dc5642a 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
@@ -730,7 +730,7 @@ nvc0_fifo_intr_engine_unit(struct nvc0_fifo_priv *priv, int engn)
730 for (unkn = 0; unkn < 8; unkn++) { 730 for (unkn = 0; unkn < 8; unkn++) {
731 u32 ints = (intr >> (unkn * 0x04)) & inte; 731 u32 ints = (intr >> (unkn * 0x04)) & inte;
732 if (ints & 0x1) { 732 if (ints & 0x1) {
733 nouveau_event_trigger(priv->base.uevent, 0); 733 nouveau_event_trigger(priv->base.uevent, 1, 0);
734 ints &= ~1; 734 ints &= ~1;
735 } 735 }
736 if (ints) { 736 if (ints) {
@@ -827,14 +827,14 @@ nvc0_fifo_intr(struct nouveau_subdev *subdev)
827} 827}
828 828
829static void 829static void
830nvc0_fifo_uevent_enable(struct nouveau_event *event, int index) 830nvc0_fifo_uevent_enable(struct nouveau_event *event, int type, int index)
831{ 831{
832 struct nvc0_fifo_priv *priv = event->priv; 832 struct nvc0_fifo_priv *priv = event->priv;
833 nv_mask(priv, 0x002140, 0x80000000, 0x80000000); 833 nv_mask(priv, 0x002140, 0x80000000, 0x80000000);
834} 834}
835 835
836static void 836static void
837nvc0_fifo_uevent_disable(struct nouveau_event *event, int index) 837nvc0_fifo_uevent_disable(struct nouveau_event *event, int type, int index)
838{ 838{
839 struct nvc0_fifo_priv *priv = event->priv; 839 struct nvc0_fifo_priv *priv = event->priv;
840 nv_mask(priv, 0x002140, 0x80000000, 0x00000000); 840 nv_mask(priv, 0x002140, 0x80000000, 0x00000000);
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
index a9a1a9c9f9f2..298063edb92d 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
@@ -859,7 +859,7 @@ nve0_fifo_intr_runlist(struct nve0_fifo_priv *priv)
859static void 859static void
860nve0_fifo_intr_engine(struct nve0_fifo_priv *priv) 860nve0_fifo_intr_engine(struct nve0_fifo_priv *priv)
861{ 861{
862 nouveau_event_trigger(priv->base.uevent, 0); 862 nouveau_event_trigger(priv->base.uevent, 1, 0);
863} 863}
864 864
865static void 865static void
@@ -952,14 +952,14 @@ nve0_fifo_intr(struct nouveau_subdev *subdev)
952} 952}
953 953
954static void 954static void
955nve0_fifo_uevent_enable(struct nouveau_event *event, int index) 955nve0_fifo_uevent_enable(struct nouveau_event *event, int type, int index)
956{ 956{
957 struct nve0_fifo_priv *priv = event->priv; 957 struct nve0_fifo_priv *priv = event->priv;
958 nv_mask(priv, 0x002140, 0x80000000, 0x80000000); 958 nv_mask(priv, 0x002140, 0x80000000, 0x80000000);
959} 959}
960 960
961static void 961static void
962nve0_fifo_uevent_disable(struct nouveau_event *event, int index) 962nve0_fifo_uevent_disable(struct nouveau_event *event, int type, int index)
963{ 963{
964 struct nve0_fifo_priv *priv = event->priv; 964 struct nve0_fifo_priv *priv = event->priv;
965 nv_mask(priv, 0x002140, 0x80000000, 0x00000000); 965 nv_mask(priv, 0x002140, 0x80000000, 0x00000000);
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.h b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.h
index 014344ebee66..e96b32bb1bbc 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.h
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.h
@@ -8,6 +8,7 @@ int nve0_fifo_ctor(struct nouveau_object *, struct nouveau_object *,
8 struct nouveau_object **); 8 struct nouveau_object **);
9void nve0_fifo_dtor(struct nouveau_object *); 9void nve0_fifo_dtor(struct nouveau_object *);
10int nve0_fifo_init(struct nouveau_object *); 10int nve0_fifo_init(struct nouveau_object *);
11int nve0_fifo_fini(struct nouveau_object *, bool);
11 12
12struct nve0_fifo_impl { 13struct nve0_fifo_impl {
13 struct nouveau_oclass base; 14 struct nouveau_oclass base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxgk20a.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxgk20a.c
new file mode 100644
index 000000000000..224ee0287ab7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxgk20a.c
@@ -0,0 +1,53 @@
1/*
2 * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#include "ctxnvc0.h"
24
25static const struct nvc0_graph_pack
26gk20a_grctx_pack_mthd[] = {
27 { nve4_grctx_init_a097_0, 0xa297 },
28 { nvc0_grctx_init_902d_0, 0x902d },
29 {}
30};
31
32struct nouveau_oclass *
33gk20a_grctx_oclass = &(struct nvc0_grctx_oclass) {
34 .base.handle = NV_ENGCTX(GR, 0xea),
35 .base.ofuncs = &(struct nouveau_ofuncs) {
36 .ctor = nvc0_graph_context_ctor,
37 .dtor = nvc0_graph_context_dtor,
38 .init = _nouveau_graph_context_init,
39 .fini = _nouveau_graph_context_fini,
40 .rd32 = _nouveau_graph_context_rd32,
41 .wr32 = _nouveau_graph_context_wr32,
42 },
43 .main = nve4_grctx_generate_main,
44 .mods = nve4_grctx_generate_mods,
45 .unkn = nve4_grctx_generate_unkn,
46 .hub = nve4_grctx_pack_hub,
47 .gpc = nve4_grctx_pack_gpc,
48 .zcull = nvc0_grctx_pack_zcull,
49 .tpc = nve4_grctx_pack_tpc,
50 .ppc = nve4_grctx_pack_ppc,
51 .icmd = nve4_grctx_pack_icmd,
52 .mthd = gk20a_grctx_pack_mthd,
53}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv108.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv108.c
index 48351b4d6d6b..8de4a4291548 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv108.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv108.c
@@ -545,10 +545,12 @@ nv108_grctx_generate_mods(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
545 mmio_list(0x408010, 0x80000000, 0, 0); 545 mmio_list(0x408010, 0x80000000, 0, 0);
546 mmio_list(0x419004, 0x00000000, 8, 1); 546 mmio_list(0x419004, 0x00000000, 8, 1);
547 mmio_list(0x419008, 0x00000000, 0, 0); 547 mmio_list(0x419008, 0x00000000, 0, 0);
548 mmio_list(0x4064cc, 0x80000000, 0, 0);
548 mmio_list(0x408004, 0x00000000, 8, 0); 549 mmio_list(0x408004, 0x00000000, 8, 0);
549 mmio_list(0x408008, 0x80000030, 0, 0); 550 mmio_list(0x408008, 0x80000030, 0, 0);
550 mmio_list(0x418808, 0x00000000, 8, 0); 551 mmio_list(0x418808, 0x00000000, 8, 0);
551 mmio_list(0x41880c, 0x80000030, 0, 0); 552 mmio_list(0x41880c, 0x80000030, 0, 0);
553 mmio_list(0x4064c8, 0x00c20200, 0, 0);
552 mmio_list(0x418810, 0x80000000, 12, 2); 554 mmio_list(0x418810, 0x80000000, 12, 2);
553 mmio_list(0x419848, 0x10000000, 12, 2); 555 mmio_list(0x419848, 0x10000000, 12, 2);
554 556
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.h b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.h
index 9c815d1f99ef..8da8b627b9d0 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.h
@@ -69,7 +69,9 @@ extern struct nouveau_oclass *nvd7_grctx_oclass;
69extern struct nouveau_oclass *nvd9_grctx_oclass; 69extern struct nouveau_oclass *nvd9_grctx_oclass;
70 70
71extern struct nouveau_oclass *nve4_grctx_oclass; 71extern struct nouveau_oclass *nve4_grctx_oclass;
72extern struct nouveau_oclass *gk20a_grctx_oclass;
72void nve4_grctx_generate_main(struct nvc0_graph_priv *, struct nvc0_grctx *); 73void nve4_grctx_generate_main(struct nvc0_graph_priv *, struct nvc0_grctx *);
74void nve4_grctx_generate_mods(struct nvc0_graph_priv *, struct nvc0_grctx *);
73void nve4_grctx_generate_unkn(struct nvc0_graph_priv *); 75void nve4_grctx_generate_unkn(struct nvc0_graph_priv *);
74void nve4_grctx_generate_r418bb8(struct nvc0_graph_priv *); 76void nve4_grctx_generate_r418bb8(struct nvc0_graph_priv *);
75 77
@@ -151,6 +153,13 @@ extern const struct nvc0_graph_init nve4_grctx_init_gpm_0[];
151 153
152extern const struct nvc0_graph_init nve4_grctx_init_pes_0[]; 154extern const struct nvc0_graph_init nve4_grctx_init_pes_0[];
153 155
156extern const struct nvc0_graph_pack nve4_grctx_pack_hub[];
157extern const struct nvc0_graph_pack nve4_grctx_pack_gpc[];
158extern const struct nvc0_graph_pack nve4_grctx_pack_tpc[];
159extern const struct nvc0_graph_pack nve4_grctx_pack_ppc[];
160extern const struct nvc0_graph_pack nve4_grctx_pack_icmd[];
161extern const struct nvc0_graph_init nve4_grctx_init_a097_0[];
162
154extern const struct nvc0_graph_pack nvf0_grctx_pack_mthd[]; 163extern const struct nvc0_graph_pack nvf0_grctx_pack_mthd[];
155 164
156extern const struct nvc0_graph_init nvf0_grctx_init_pri_0[]; 165extern const struct nvc0_graph_init nvf0_grctx_init_pri_0[];
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnve4.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnve4.c
index 49a14b116a5f..c5b249238587 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnve4.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnve4.c
@@ -272,13 +272,13 @@ nve4_grctx_init_icmd_0[] = {
272 {} 272 {}
273}; 273};
274 274
275static const struct nvc0_graph_pack 275const struct nvc0_graph_pack
276nve4_grctx_pack_icmd[] = { 276nve4_grctx_pack_icmd[] = {
277 { nve4_grctx_init_icmd_0 }, 277 { nve4_grctx_init_icmd_0 },
278 {} 278 {}
279}; 279};
280 280
281static const struct nvc0_graph_init 281const struct nvc0_graph_init
282nve4_grctx_init_a097_0[] = { 282nve4_grctx_init_a097_0[] = {
283 { 0x000800, 8, 0x40, 0x00000000 }, 283 { 0x000800, 8, 0x40, 0x00000000 },
284 { 0x000804, 8, 0x40, 0x00000000 }, 284 { 0x000804, 8, 0x40, 0x00000000 },
@@ -697,7 +697,7 @@ nve4_grctx_init_be_0[] = {
697 {} 697 {}
698}; 698};
699 699
700static const struct nvc0_graph_pack 700const struct nvc0_graph_pack
701nve4_grctx_pack_hub[] = { 701nve4_grctx_pack_hub[] = {
702 { nvc0_grctx_init_main_0 }, 702 { nvc0_grctx_init_main_0 },
703 { nve4_grctx_init_fe_0 }, 703 { nve4_grctx_init_fe_0 },
@@ -737,7 +737,7 @@ nve4_grctx_init_gpm_0[] = {
737 {} 737 {}
738}; 738};
739 739
740static const struct nvc0_graph_pack 740const struct nvc0_graph_pack
741nve4_grctx_pack_gpc[] = { 741nve4_grctx_pack_gpc[] = {
742 { nvc0_grctx_init_gpc_unk_0 }, 742 { nvc0_grctx_init_gpc_unk_0 },
743 { nvd9_grctx_init_prop_0 }, 743 { nvd9_grctx_init_prop_0 },
@@ -802,7 +802,7 @@ nve4_grctx_init_sm_0[] = {
802 {} 802 {}
803}; 803};
804 804
805static const struct nvc0_graph_pack 805const struct nvc0_graph_pack
806nve4_grctx_pack_tpc[] = { 806nve4_grctx_pack_tpc[] = {
807 { nvd7_grctx_init_pe_0 }, 807 { nvd7_grctx_init_pe_0 },
808 { nve4_grctx_init_tex_0 }, 808 { nve4_grctx_init_tex_0 },
@@ -826,7 +826,7 @@ nve4_grctx_init_cbm_0[] = {
826 {} 826 {}
827}; 827};
828 828
829static const struct nvc0_graph_pack 829const struct nvc0_graph_pack
830nve4_grctx_pack_ppc[] = { 830nve4_grctx_pack_ppc[] = {
831 { nve4_grctx_init_pes_0 }, 831 { nve4_grctx_init_pes_0 },
832 { nve4_grctx_init_cbm_0 }, 832 { nve4_grctx_init_cbm_0 },
@@ -838,7 +838,7 @@ nve4_grctx_pack_ppc[] = {
838 * PGRAPH context implementation 838 * PGRAPH context implementation
839 ******************************************************************************/ 839 ******************************************************************************/
840 840
841static void 841void
842nve4_grctx_generate_mods(struct nvc0_graph_priv *priv, struct nvc0_grctx *info) 842nve4_grctx_generate_mods(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
843{ 843{
844 u32 magic[GPC_MAX][2]; 844 u32 magic[GPC_MAX][2];
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvf0.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvf0.c
index 0fab95e49f53..dec03f04114d 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvf0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvf0.c
@@ -842,7 +842,7 @@ nvf0_grctx_generate_mods(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
842 u16 magic3 = 0x0648; 842 u16 magic3 = 0x0648;
843 magic[gpc][0] = 0x10000000 | (magic0 << 16) | offset; 843 magic[gpc][0] = 0x10000000 | (magic0 << 16) | offset;
844 magic[gpc][1] = 0x00000000 | (magic1 << 16); 844 magic[gpc][1] = 0x00000000 | (magic1 << 16);
845 offset += 0x0324 * (priv->tpc_nr[gpc] - 1);; 845 offset += 0x0324 * (priv->tpc_nr[gpc] - 1);
846 magic[gpc][2] = 0x10000000 | (magic2 << 16) | offset; 846 magic[gpc][2] = 0x10000000 | (magic2 << 16) | offset;
847 magic[gpc][3] = 0x00000000 | (magic3 << 16); 847 magic[gpc][3] = 0x00000000 | (magic3 << 16);
848 offset += 0x0324; 848 offset += 0x0324;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/gk20a.c b/drivers/gpu/drm/nouveau/core/engine/graph/gk20a.c
new file mode 100644
index 000000000000..83048a56430d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/gk20a.c
@@ -0,0 +1,47 @@
1/*
2 * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#include "nvc0.h"
24#include "ctxnvc0.h"
25
26static struct nouveau_oclass
27gk20a_graph_sclass[] = {
28 { 0x902d, &nouveau_object_ofuncs },
29 { 0xa040, &nouveau_object_ofuncs },
30 { 0xa297, &nouveau_object_ofuncs },
31 { 0xa0c0, &nouveau_object_ofuncs },
32 {}
33};
34
35struct nouveau_oclass *
36gk20a_graph_oclass = &(struct nvc0_graph_oclass) {
37 .base.handle = NV_ENGINE(GR, 0xea),
38 .base.ofuncs = &(struct nouveau_ofuncs) {
39 .ctor = nvc0_graph_ctor,
40 .dtor = nvc0_graph_dtor,
41 .init = nve4_graph_init,
42 .fini = nve4_graph_fini,
43 },
44 .cclass = &gk20a_grctx_oclass,
45 .sclass = gk20a_graph_sclass,
46 .mmio = nve4_graph_pack_mmio,
47}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
index 2c7809e1a09b..1a2d56493cf6 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
@@ -901,7 +901,7 @@ nv50_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
901 nv_engine(priv)->sclass = nvaf_graph_sclass; 901 nv_engine(priv)->sclass = nvaf_graph_sclass;
902 break; 902 break;
903 903
904 }; 904 }
905 905
906 /* unfortunate hw bug workaround... */ 906 /* unfortunate hw bug workaround... */
907 if (nv_device(priv)->chipset != 0x50 && 907 if (nv_device(priv)->chipset != 0x50 &&
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
index f3c7329da0a0..bf7bdb1f291e 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
@@ -894,6 +894,10 @@ nvc0_graph_init_fw(struct nvc0_graph_priv *priv, u32 fuc_base,
894 nv_wr32(priv, fuc_base + 0x0188, i >> 6); 894 nv_wr32(priv, fuc_base + 0x0188, i >> 6);
895 nv_wr32(priv, fuc_base + 0x0184, code->data[i]); 895 nv_wr32(priv, fuc_base + 0x0184, code->data[i]);
896 } 896 }
897
898 /* code must be padded to 0x40 words */
899 for (; i & 0x3f; i++)
900 nv_wr32(priv, fuc_base + 0x0184, 0);
897} 901}
898 902
899static void 903static void
@@ -1259,10 +1263,14 @@ nvc0_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
1259 struct nvc0_graph_oclass *oclass = (void *)bclass; 1263 struct nvc0_graph_oclass *oclass = (void *)bclass;
1260 struct nouveau_device *device = nv_device(parent); 1264 struct nouveau_device *device = nv_device(parent);
1261 struct nvc0_graph_priv *priv; 1265 struct nvc0_graph_priv *priv;
1266 bool use_ext_fw, enable;
1262 int ret, i; 1267 int ret, i;
1263 1268
1264 ret = nouveau_graph_create(parent, engine, bclass, 1269 use_ext_fw = nouveau_boolopt(device->cfgopt, "NvGrUseFW",
1265 (oclass->fecs.ucode != NULL), &priv); 1270 oclass->fecs.ucode == NULL);
1271 enable = use_ext_fw || oclass->fecs.ucode != NULL;
1272
1273 ret = nouveau_graph_create(parent, engine, bclass, enable, &priv);
1266 *pobject = nv_object(priv); 1274 *pobject = nv_object(priv);
1267 if (ret) 1275 if (ret)
1268 return ret; 1276 return ret;
@@ -1272,7 +1280,7 @@ nvc0_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
1272 1280
1273 priv->base.units = nvc0_graph_units; 1281 priv->base.units = nvc0_graph_units;
1274 1282
1275 if (nouveau_boolopt(device->cfgopt, "NvGrUseFW", false)) { 1283 if (use_ext_fw) {
1276 nv_info(priv, "using external firmware\n"); 1284 nv_info(priv, "using external firmware\n");
1277 if (nvc0_graph_ctor_fw(priv, "fuc409c", &priv->fuc409c) || 1285 if (nvc0_graph_ctor_fw(priv, "fuc409c", &priv->fuc409c) ||
1278 nvc0_graph_ctor_fw(priv, "fuc409d", &priv->fuc409d) || 1286 nvc0_graph_ctor_fw(priv, "fuc409d", &priv->fuc409d) ||
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h
index 90d44616c876..75203a99d902 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h
@@ -116,6 +116,7 @@ int nvc0_graph_ctor(struct nouveau_object *, struct nouveau_object *,
116 struct nouveau_object **); 116 struct nouveau_object **);
117void nvc0_graph_dtor(struct nouveau_object *); 117void nvc0_graph_dtor(struct nouveau_object *);
118int nvc0_graph_init(struct nouveau_object *); 118int nvc0_graph_init(struct nouveau_object *);
119int nve4_graph_fini(struct nouveau_object *, bool);
119int nve4_graph_init(struct nouveau_object *); 120int nve4_graph_init(struct nouveau_object *);
120 121
121extern struct nouveau_oclass nvc0_graph_sclass[]; 122extern struct nouveau_oclass nvc0_graph_sclass[];
@@ -217,6 +218,7 @@ extern const struct nvc0_graph_init nve4_graph_init_main_0[];
217extern const struct nvc0_graph_init nve4_graph_init_tpccs_0[]; 218extern const struct nvc0_graph_init nve4_graph_init_tpccs_0[];
218extern const struct nvc0_graph_init nve4_graph_init_pe_0[]; 219extern const struct nvc0_graph_init nve4_graph_init_pe_0[];
219extern const struct nvc0_graph_init nve4_graph_init_be_0[]; 220extern const struct nvc0_graph_init nve4_graph_init_be_0[];
221extern const struct nvc0_graph_pack nve4_graph_pack_mmio[];
220 222
221extern const struct nvc0_graph_init nvf0_graph_init_fe_0[]; 223extern const struct nvc0_graph_init nvf0_graph_init_fe_0[];
222extern const struct nvc0_graph_init nvf0_graph_init_sked_0[]; 224extern const struct nvc0_graph_init nvf0_graph_init_sked_0[];
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nve4.c b/drivers/gpu/drm/nouveau/core/engine/graph/nve4.c
index f7c011217175..51e0c075ad34 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nve4.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nve4.c
@@ -151,7 +151,7 @@ nve4_graph_init_be_0[] = {
151 {} 151 {}
152}; 152};
153 153
154static const struct nvc0_graph_pack 154const struct nvc0_graph_pack
155nve4_graph_pack_mmio[] = { 155nve4_graph_pack_mmio[] = {
156 { nve4_graph_init_main_0 }, 156 { nve4_graph_init_main_0 },
157 { nvc0_graph_init_fe_0 }, 157 { nvc0_graph_init_fe_0 },
@@ -189,7 +189,7 @@ nve4_graph_pack_mmio[] = {
189 * PGRAPH engine/subdev functions 189 * PGRAPH engine/subdev functions
190 ******************************************************************************/ 190 ******************************************************************************/
191 191
192static int 192int
193nve4_graph_fini(struct nouveau_object *object, bool suspend) 193nve4_graph_fini(struct nouveau_object *object, bool suspend)
194{ 194{
195 struct nvc0_graph_priv *priv = (void *)object; 195 struct nvc0_graph_priv *priv = (void *)object;
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv50.c b/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
index 5ce686ee729e..f3b4d9dbf23c 100644
--- a/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
@@ -124,7 +124,7 @@ nv50_software_sclass[] = {
124 ******************************************************************************/ 124 ******************************************************************************/
125 125
126static int 126static int
127nv50_software_vblsem_release(void *data, int head) 127nv50_software_vblsem_release(void *data, u32 type, int head)
128{ 128{
129 struct nv50_software_chan *chan = data; 129 struct nv50_software_chan *chan = data;
130 struct nv50_software_priv *priv = (void *)nv_object(chan)->engine; 130 struct nv50_software_priv *priv = (void *)nv_object(chan)->engine;
@@ -183,7 +183,7 @@ nv50_software_context_ctor(struct nouveau_object *parent,
183 return -ENOMEM; 183 return -ENOMEM;
184 184
185 for (i = 0; i < chan->vblank.nr_event; i++) { 185 for (i = 0; i < chan->vblank.nr_event; i++) {
186 ret = nouveau_event_new(pdisp->vblank, i, pclass->vblank, 186 ret = nouveau_event_new(pdisp->vblank, 1, i, pclass->vblank,
187 chan, &chan->vblank.event[i]); 187 chan, &chan->vblank.event[i]);
188 if (ret) 188 if (ret)
189 return ret; 189 return ret;
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv50.h b/drivers/gpu/drm/nouveau/core/engine/software/nv50.h
index 2de370c21279..bb49a7a20857 100644
--- a/drivers/gpu/drm/nouveau/core/engine/software/nv50.h
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nv50.h
@@ -19,7 +19,7 @@ int nv50_software_ctor(struct nouveau_object *, struct nouveau_object *,
19 19
20struct nv50_software_cclass { 20struct nv50_software_cclass {
21 struct nouveau_oclass base; 21 struct nouveau_oclass base;
22 int (*vblank)(void *, int); 22 int (*vblank)(void *, u32, int);
23}; 23};
24 24
25struct nv50_software_chan { 25struct nv50_software_chan {
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c
index f9430c1bf3e5..135c20f38356 100644
--- a/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c
@@ -104,7 +104,7 @@ nvc0_software_sclass[] = {
104 ******************************************************************************/ 104 ******************************************************************************/
105 105
106static int 106static int
107nvc0_software_vblsem_release(void *data, int head) 107nvc0_software_vblsem_release(void *data, u32 type, int head)
108{ 108{
109 struct nv50_software_chan *chan = data; 109 struct nv50_software_chan *chan = data;
110 struct nv50_software_priv *priv = (void *)nv_object(chan)->engine; 110 struct nv50_software_priv *priv = (void *)nv_object(chan)->engine;
diff --git a/drivers/gpu/drm/nouveau/core/include/core/class.h b/drivers/gpu/drm/nouveau/core/include/core/class.h
index 9c0cd73462d9..e0c812bc884f 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/class.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/class.h
@@ -295,6 +295,10 @@ struct nv04_display_scanoutpos {
295#define NV84_DISP_SOR_HDMI_PWR_REKEY 0x0000007f 295#define NV84_DISP_SOR_HDMI_PWR_REKEY 0x0000007f
296#define NV50_DISP_SOR_LVDS_SCRIPT 0x00013000 296#define NV50_DISP_SOR_LVDS_SCRIPT 0x00013000
297#define NV50_DISP_SOR_LVDS_SCRIPT_ID 0x0000ffff 297#define NV50_DISP_SOR_LVDS_SCRIPT_ID 0x0000ffff
298#define NV94_DISP_SOR_DP_PWR 0x00016000
299#define NV94_DISP_SOR_DP_PWR_STATE 0x00000001
300#define NV94_DISP_SOR_DP_PWR_STATE_OFF 0x00000000
301#define NV94_DISP_SOR_DP_PWR_STATE_ON 0x00000001
298 302
299#define NV50_DISP_DAC_MTHD 0x00020000 303#define NV50_DISP_DAC_MTHD 0x00020000
300#define NV50_DISP_DAC_MTHD_TYPE 0x0000f000 304#define NV50_DISP_DAC_MTHD_TYPE 0x0000f000
diff --git a/drivers/gpu/drm/nouveau/core/include/core/event.h b/drivers/gpu/drm/nouveau/core/include/core/event.h
index 5d539ebff3ed..ba3f1a76a815 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/event.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/event.h
@@ -12,32 +12,33 @@ struct nouveau_eventh {
12 struct nouveau_event *event; 12 struct nouveau_event *event;
13 struct list_head head; 13 struct list_head head;
14 unsigned long flags; 14 unsigned long flags;
15 u32 types;
15 int index; 16 int index;
16 int (*func)(void *, int); 17 int (*func)(void *, u32, int);
17 void *priv; 18 void *priv;
18}; 19};
19 20
20struct nouveau_event { 21struct nouveau_event {
21 spinlock_t list_lock;
22 spinlock_t refs_lock;
23
24 void *priv; 22 void *priv;
25 void (*enable)(struct nouveau_event *, int index); 23 int (*check)(struct nouveau_event *, u32 type, int index);
26 void (*disable)(struct nouveau_event *, int index); 24 void (*enable)(struct nouveau_event *, int type, int index);
25 void (*disable)(struct nouveau_event *, int type, int index);
27 26
27 int types_nr;
28 int index_nr; 28 int index_nr;
29 struct { 29
30 struct list_head list; 30 spinlock_t list_lock;
31 int refs; 31 struct list_head *list;
32 } index[]; 32 spinlock_t refs_lock;
33 int refs[];
33}; 34};
34 35
35int nouveau_event_create(int index_nr, struct nouveau_event **); 36int nouveau_event_create(int types_nr, int index_nr, struct nouveau_event **);
36void nouveau_event_destroy(struct nouveau_event **); 37void nouveau_event_destroy(struct nouveau_event **);
37void nouveau_event_trigger(struct nouveau_event *, int index); 38void nouveau_event_trigger(struct nouveau_event *, u32 types, int index);
38 39
39int nouveau_event_new(struct nouveau_event *, int index, 40int nouveau_event_new(struct nouveau_event *, u32 types, int index,
40 int (*func)(void *, int), void *, 41 int (*func)(void *, u32, int), void *,
41 struct nouveau_eventh **); 42 struct nouveau_eventh **);
42void nouveau_event_ref(struct nouveau_eventh *, struct nouveau_eventh **); 43void nouveau_event_ref(struct nouveau_eventh *, struct nouveau_eventh **);
43void nouveau_event_get(struct nouveau_eventh *); 44void nouveau_event_get(struct nouveau_eventh *);
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/disp.h b/drivers/gpu/drm/nouveau/core/include/engine/disp.h
index fd0c68804de3..fde842896806 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/disp.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/disp.h
@@ -6,8 +6,19 @@
6#include <core/device.h> 6#include <core/device.h>
7#include <core/event.h> 7#include <core/event.h>
8 8
9enum nvkm_hpd_event {
10 NVKM_HPD_PLUG = 1,
11 NVKM_HPD_UNPLUG = 2,
12 NVKM_HPD_IRQ = 4,
13 NVKM_HPD = (NVKM_HPD_PLUG | NVKM_HPD_UNPLUG | NVKM_HPD_IRQ)
14};
15
9struct nouveau_disp { 16struct nouveau_disp {
10 struct nouveau_engine base; 17 struct nouveau_engine base;
18
19 struct list_head outp;
20 struct nouveau_event *hpd;
21
11 struct nouveau_event *vblank; 22 struct nouveau_event *vblank;
12}; 23};
13 24
@@ -17,25 +28,6 @@ nouveau_disp(void *obj)
17 return (void *)nv_device(obj)->subdev[NVDEV_ENGINE_DISP]; 28 return (void *)nv_device(obj)->subdev[NVDEV_ENGINE_DISP];
18} 29}
19 30
20#define nouveau_disp_create(p,e,c,h,i,x,d) \
21 nouveau_disp_create_((p), (e), (c), (h), (i), (x), \
22 sizeof(**d), (void **)d)
23#define nouveau_disp_destroy(d) ({ \
24 struct nouveau_disp *disp = (d); \
25 _nouveau_disp_dtor(nv_object(disp)); \
26})
27#define nouveau_disp_init(d) \
28 nouveau_engine_init(&(d)->base)
29#define nouveau_disp_fini(d,s) \
30 nouveau_engine_fini(&(d)->base, (s))
31
32int nouveau_disp_create_(struct nouveau_object *, struct nouveau_object *,
33 struct nouveau_oclass *, int heads,
34 const char *, const char *, int, void **);
35void _nouveau_disp_dtor(struct nouveau_object *);
36#define _nouveau_disp_init _nouveau_engine_init
37#define _nouveau_disp_fini _nouveau_engine_fini
38
39extern struct nouveau_oclass *nv04_disp_oclass; 31extern struct nouveau_oclass *nv04_disp_oclass;
40extern struct nouveau_oclass *nv50_disp_oclass; 32extern struct nouveau_oclass *nv50_disp_oclass;
41extern struct nouveau_oclass *nv84_disp_oclass; 33extern struct nouveau_oclass *nv84_disp_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/fifo.h b/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
index 26b6b2bb1112..b639eb2c74ff 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
@@ -109,6 +109,7 @@ extern struct nouveau_oclass *nv50_fifo_oclass;
109extern struct nouveau_oclass *nv84_fifo_oclass; 109extern struct nouveau_oclass *nv84_fifo_oclass;
110extern struct nouveau_oclass *nvc0_fifo_oclass; 110extern struct nouveau_oclass *nvc0_fifo_oclass;
111extern struct nouveau_oclass *nve0_fifo_oclass; 111extern struct nouveau_oclass *nve0_fifo_oclass;
112extern struct nouveau_oclass *gk20a_fifo_oclass;
112extern struct nouveau_oclass *nv108_fifo_oclass; 113extern struct nouveau_oclass *nv108_fifo_oclass;
113 114
114void nv04_fifo_intr(struct nouveau_subdev *); 115void nv04_fifo_intr(struct nouveau_subdev *);
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/graph.h b/drivers/gpu/drm/nouveau/core/include/engine/graph.h
index 871edfdf3d5b..8c1d4772da0c 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/graph.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/graph.h
@@ -68,6 +68,7 @@ extern struct nouveau_oclass *nvc8_graph_oclass;
68extern struct nouveau_oclass *nvd7_graph_oclass; 68extern struct nouveau_oclass *nvd7_graph_oclass;
69extern struct nouveau_oclass *nvd9_graph_oclass; 69extern struct nouveau_oclass *nvd9_graph_oclass;
70extern struct nouveau_oclass *nve4_graph_oclass; 70extern struct nouveau_oclass *nve4_graph_oclass;
71extern struct nouveau_oclass *gk20a_graph_oclass;
71extern struct nouveau_oclass *nvf0_graph_oclass; 72extern struct nouveau_oclass *nvf0_graph_oclass;
72extern struct nouveau_oclass *nv108_graph_oclass; 73extern struct nouveau_oclass *nv108_graph_oclass;
73extern struct nouveau_oclass *gm107_graph_oclass; 74extern struct nouveau_oclass *gm107_graph_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/conn.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/conn.h
index a32feb3f3fb6..f3930c27cb7a 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/conn.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/conn.h
@@ -22,7 +22,25 @@ enum dcb_connector_type {
22 DCB_CONNECTOR_NONE = 0xff 22 DCB_CONNECTOR_NONE = 0xff
23}; 23};
24 24
25u16 dcb_conntab(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len); 25struct nvbios_connT {
26u16 dcb_conn(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len); 26};
27
28u32 nvbios_connTe(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
29u32 nvbios_connTp(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
30 struct nvbios_connT *info);
31
32struct nvbios_connE {
33 u8 type;
34 u8 location;
35 u8 hpd;
36 u8 dp;
37 u8 di;
38 u8 sr;
39 u8 lcdid;
40};
41
42u32 nvbios_connEe(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *hdr);
43u32 nvbios_connEp(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *hdr,
44 struct nvbios_connE *info);
27 45
28#endif 46#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/dp.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/dp.h
index 6e54218b55fc..728206e21777 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/dp.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/dp.h
@@ -17,9 +17,10 @@ u16 nvbios_dpout_match(struct nouveau_bios *, u16 type, u16 mask,
17 struct nvbios_dpout *); 17 struct nvbios_dpout *);
18 18
19struct nvbios_dpcfg { 19struct nvbios_dpcfg {
20 u8 drv; 20 u8 pc;
21 u8 pre; 21 u8 dc;
22 u8 unk; 22 u8 pe;
23 u8 tx_pu;
23}; 24};
24 25
25u16 26u16
@@ -27,7 +28,7 @@ nvbios_dpcfg_parse(struct nouveau_bios *, u16 outp, u8 idx,
27 u8 *ver, u8 *hdr, u8 *cnt, u8 *len, 28 u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
28 struct nvbios_dpcfg *); 29 struct nvbios_dpcfg *);
29u16 30u16
30nvbios_dpcfg_match(struct nouveau_bios *, u16 outp, u8 un, u8 vs, u8 pe, 31nvbios_dpcfg_match(struct nouveau_bios *, u16 outp, u8 pc, u8 vs, u8 pe,
31 u8 *ver, u8 *hdr, u8 *cnt, u8 *len, 32 u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
32 struct nvbios_dpcfg *); 33 struct nvbios_dpcfg *);
33 34
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/clock.h b/drivers/gpu/drm/nouveau/core/include/subdev/clock.h
index 8f4ced75444a..c01e29c9f89a 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/clock.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/clock.h
@@ -77,6 +77,8 @@ struct nouveau_clock {
77 int tstate; /* thermal adjustment (max-) */ 77 int tstate; /* thermal adjustment (max-) */
78 int dstate; /* display adjustment (min+) */ 78 int dstate; /* display adjustment (min+) */
79 79
80 bool allow_reclock;
81
80 int (*read)(struct nouveau_clock *, enum nv_clk_src); 82 int (*read)(struct nouveau_clock *, enum nv_clk_src);
81 int (*calc)(struct nouveau_clock *, struct nouveau_cstate *); 83 int (*calc)(struct nouveau_clock *, struct nouveau_cstate *);
82 int (*prog)(struct nouveau_clock *); 84 int (*prog)(struct nouveau_clock *);
@@ -106,8 +108,8 @@ struct nouveau_clocks {
106 int mdiv; 108 int mdiv;
107}; 109};
108 110
109#define nouveau_clock_create(p,e,o,i,d) \ 111#define nouveau_clock_create(p,e,o,i,r,d) \
110 nouveau_clock_create_((p), (e), (o), (i), sizeof(**d), (void **)d) 112 nouveau_clock_create_((p), (e), (o), (i), (r), sizeof(**d), (void **)d)
111#define nouveau_clock_destroy(p) ({ \ 113#define nouveau_clock_destroy(p) ({ \
112 struct nouveau_clock *clk = (p); \ 114 struct nouveau_clock *clk = (p); \
113 _nouveau_clock_dtor(nv_object(clk)); \ 115 _nouveau_clock_dtor(nv_object(clk)); \
@@ -121,7 +123,7 @@ struct nouveau_clocks {
121 123
122int nouveau_clock_create_(struct nouveau_object *, struct nouveau_object *, 124int nouveau_clock_create_(struct nouveau_object *, struct nouveau_object *,
123 struct nouveau_oclass *, 125 struct nouveau_oclass *,
124 struct nouveau_clocks *, int, void **); 126 struct nouveau_clocks *, bool, int, void **);
125void _nouveau_clock_dtor(struct nouveau_object *); 127void _nouveau_clock_dtor(struct nouveau_object *);
126int _nouveau_clock_init(struct nouveau_object *); 128int _nouveau_clock_init(struct nouveau_object *);
127#define _nouveau_clock_fini _nouveau_subdev_fini 129#define _nouveau_clock_fini _nouveau_subdev_fini
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/fb.h b/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
index 58c7ccdebb01..871e73914b24 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
@@ -105,6 +105,7 @@ extern struct nouveau_oclass *nvaa_fb_oclass;
105extern struct nouveau_oclass *nvaf_fb_oclass; 105extern struct nouveau_oclass *nvaf_fb_oclass;
106extern struct nouveau_oclass *nvc0_fb_oclass; 106extern struct nouveau_oclass *nvc0_fb_oclass;
107extern struct nouveau_oclass *nve0_fb_oclass; 107extern struct nouveau_oclass *nve0_fb_oclass;
108extern struct nouveau_oclass *gk20a_fb_oclass;
108extern struct nouveau_oclass *gm107_fb_oclass; 109extern struct nouveau_oclass *gm107_fb_oclass;
109 110
110#include <subdev/bios/ramcfg.h> 111#include <subdev/bios/ramcfg.h>
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/gpio.h b/drivers/gpu/drm/nouveau/core/include/subdev/gpio.h
index c85b9f1579ad..612d82ab683d 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/gpio.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/gpio.h
@@ -8,17 +8,18 @@
8#include <subdev/bios.h> 8#include <subdev/bios.h>
9#include <subdev/bios/gpio.h> 9#include <subdev/bios/gpio.h>
10 10
11enum nvkm_gpio_event {
12 NVKM_GPIO_HI = 1,
13 NVKM_GPIO_LO = 2,
14 NVKM_GPIO_TOGGLED = (NVKM_GPIO_HI | NVKM_GPIO_LO),
15};
16
11struct nouveau_gpio { 17struct nouveau_gpio {
12 struct nouveau_subdev base; 18 struct nouveau_subdev base;
13 19
14 struct nouveau_event *events; 20 struct nouveau_event *events;
15 21
16 /* hardware interfaces */
17 void (*reset)(struct nouveau_gpio *, u8 func); 22 void (*reset)(struct nouveau_gpio *, u8 func);
18 int (*drive)(struct nouveau_gpio *, int line, int dir, int out);
19 int (*sense)(struct nouveau_gpio *, int line);
20
21 /* software interfaces */
22 int (*find)(struct nouveau_gpio *, int idx, u8 tag, u8 line, 23 int (*find)(struct nouveau_gpio *, int idx, u8 tag, u8 line,
23 struct dcb_gpio_func *); 24 struct dcb_gpio_func *);
24 int (*set)(struct nouveau_gpio *, int idx, u8 tag, u8 line, int state); 25 int (*set)(struct nouveau_gpio *, int idx, u8 tag, u8 line, int state);
@@ -31,23 +32,10 @@ nouveau_gpio(void *obj)
31 return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_GPIO]; 32 return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_GPIO];
32} 33}
33 34
34#define nouveau_gpio_create(p,e,o,l,d) \ 35extern struct nouveau_oclass *nv10_gpio_oclass;
35 nouveau_gpio_create_((p), (e), (o), (l), sizeof(**d), (void **)d) 36extern struct nouveau_oclass *nv50_gpio_oclass;
36#define nouveau_gpio_destroy(p) ({ \ 37extern struct nouveau_oclass *nv92_gpio_oclass;
37 struct nouveau_gpio *gpio = (p); \ 38extern struct nouveau_oclass *nvd0_gpio_oclass;
38 _nouveau_gpio_dtor(nv_object(gpio)); \ 39extern struct nouveau_oclass *nve0_gpio_oclass;
39})
40#define nouveau_gpio_fini(p,s) \
41 nouveau_subdev_fini(&(p)->base, (s))
42
43int nouveau_gpio_create_(struct nouveau_object *, struct nouveau_object *,
44 struct nouveau_oclass *, int, int, void **);
45void _nouveau_gpio_dtor(struct nouveau_object *);
46int nouveau_gpio_init(struct nouveau_gpio *);
47
48extern struct nouveau_oclass nv10_gpio_oclass;
49extern struct nouveau_oclass nv50_gpio_oclass;
50extern struct nouveau_oclass nvd0_gpio_oclass;
51extern struct nouveau_oclass nve0_gpio_oclass;
52 40
53#endif 41#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h b/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
index 7f50a858b16f..db1b39d08013 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
@@ -14,52 +14,41 @@
14#define NV_I2C_TYPE_EXTDDC(e) (0x0005 | (e) << 8) 14#define NV_I2C_TYPE_EXTDDC(e) (0x0005 | (e) << 8)
15#define NV_I2C_TYPE_EXTAUX(e) (0x0006 | (e) << 8) 15#define NV_I2C_TYPE_EXTAUX(e) (0x0006 | (e) << 8)
16 16
17enum nvkm_i2c_event {
18 NVKM_I2C_PLUG = 1,
19 NVKM_I2C_UNPLUG = 2,
20 NVKM_I2C_IRQ = 4,
21 NVKM_I2C_DONE = 8,
22 NVKM_I2C_ANY = (NVKM_I2C_PLUG |
23 NVKM_I2C_UNPLUG |
24 NVKM_I2C_IRQ |
25 NVKM_I2C_DONE),
26};
27
17struct nouveau_i2c_port { 28struct nouveau_i2c_port {
18 struct nouveau_object base; 29 struct nouveau_object base;
19 struct i2c_adapter adapter; 30 struct i2c_adapter adapter;
31 struct mutex mutex;
20 32
21 struct list_head head; 33 struct list_head head;
22 u8 index; 34 u8 index;
35 int aux;
23 36
24 const struct nouveau_i2c_func *func; 37 const struct nouveau_i2c_func *func;
25}; 38};
26 39
27struct nouveau_i2c_func { 40struct nouveau_i2c_func {
28 void (*acquire)(struct nouveau_i2c_port *);
29 void (*release)(struct nouveau_i2c_port *);
30
31 void (*drive_scl)(struct nouveau_i2c_port *, int); 41 void (*drive_scl)(struct nouveau_i2c_port *, int);
32 void (*drive_sda)(struct nouveau_i2c_port *, int); 42 void (*drive_sda)(struct nouveau_i2c_port *, int);
33 int (*sense_scl)(struct nouveau_i2c_port *); 43 int (*sense_scl)(struct nouveau_i2c_port *);
34 int (*sense_sda)(struct nouveau_i2c_port *); 44 int (*sense_sda)(struct nouveau_i2c_port *);
35 45
36 int (*aux)(struct nouveau_i2c_port *, u8, u32, u8 *, u8); 46 int (*aux)(struct nouveau_i2c_port *, bool, u8, u32, u8 *, u8);
37 int (*pattern)(struct nouveau_i2c_port *, int pattern); 47 int (*pattern)(struct nouveau_i2c_port *, int pattern);
38 int (*lnk_ctl)(struct nouveau_i2c_port *, int nr, int bw, bool enh); 48 int (*lnk_ctl)(struct nouveau_i2c_port *, int nr, int bw, bool enh);
39 int (*drv_ctl)(struct nouveau_i2c_port *, int lane, int sw, int pe); 49 int (*drv_ctl)(struct nouveau_i2c_port *, int lane, int sw, int pe);
40}; 50};
41 51
42#define nouveau_i2c_port_create(p,e,o,i,a,f,d) \
43 nouveau_i2c_port_create_((p), (e), (o), (i), (a), (f), \
44 sizeof(**d), (void **)d)
45#define nouveau_i2c_port_destroy(p) ({ \
46 struct nouveau_i2c_port *port = (p); \
47 _nouveau_i2c_port_dtor(nv_object(i2c)); \
48})
49#define nouveau_i2c_port_init(p) \
50 nouveau_object_init(&(p)->base)
51#define nouveau_i2c_port_fini(p,s) \
52 nouveau_object_fini(&(p)->base, (s))
53
54int nouveau_i2c_port_create_(struct nouveau_object *, struct nouveau_object *,
55 struct nouveau_oclass *, u8,
56 const struct i2c_algorithm *,
57 const struct nouveau_i2c_func *,
58 int, void **);
59void _nouveau_i2c_port_dtor(struct nouveau_object *);
60#define _nouveau_i2c_port_init nouveau_object_init
61#define _nouveau_i2c_port_fini nouveau_object_fini
62
63struct nouveau_i2c_board_info { 52struct nouveau_i2c_board_info {
64 struct i2c_board_info dev; 53 struct i2c_board_info dev;
65 u8 udelay; /* set to 0 to use the standard delay */ 54 u8 udelay; /* set to 0 to use the standard delay */
@@ -67,13 +56,20 @@ struct nouveau_i2c_board_info {
67 56
68struct nouveau_i2c { 57struct nouveau_i2c {
69 struct nouveau_subdev base; 58 struct nouveau_subdev base;
59 struct nouveau_event *ntfy;
70 60
71 struct nouveau_i2c_port *(*find)(struct nouveau_i2c *, u8 index); 61 struct nouveau_i2c_port *(*find)(struct nouveau_i2c *, u8 index);
72 struct nouveau_i2c_port *(*find_type)(struct nouveau_i2c *, u16 type); 62 struct nouveau_i2c_port *(*find_type)(struct nouveau_i2c *, u16 type);
63 int (*acquire_pad)(struct nouveau_i2c_port *, unsigned long timeout);
64 void (*release_pad)(struct nouveau_i2c_port *);
65 int (*acquire)(struct nouveau_i2c_port *, unsigned long timeout);
66 void (*release)(struct nouveau_i2c_port *);
73 int (*identify)(struct nouveau_i2c *, int index, 67 int (*identify)(struct nouveau_i2c *, int index,
74 const char *what, struct nouveau_i2c_board_info *, 68 const char *what, struct nouveau_i2c_board_info *,
75 bool (*match)(struct nouveau_i2c_port *, 69 bool (*match)(struct nouveau_i2c_port *,
76 struct i2c_board_info *, void *), void *); 70 struct i2c_board_info *, void *), void *);
71
72 wait_queue_head_t wait;
77 struct list_head ports; 73 struct list_head ports;
78}; 74};
79 75
@@ -83,37 +79,12 @@ nouveau_i2c(void *obj)
83 return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_I2C]; 79 return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_I2C];
84} 80}
85 81
86#define nouveau_i2c_create(p,e,o,s,d) \ 82extern struct nouveau_oclass *nv04_i2c_oclass;
87 nouveau_i2c_create_((p), (e), (o), (s), sizeof(**d), (void **)d) 83extern struct nouveau_oclass *nv4e_i2c_oclass;
88#define nouveau_i2c_destroy(p) ({ \ 84extern struct nouveau_oclass *nv50_i2c_oclass;
89 struct nouveau_i2c *i2c = (p); \ 85extern struct nouveau_oclass *nv94_i2c_oclass;
90 _nouveau_i2c_dtor(nv_object(i2c)); \ 86extern struct nouveau_oclass *nvd0_i2c_oclass;
91}) 87extern struct nouveau_oclass *nve0_i2c_oclass;
92#define nouveau_i2c_init(p) ({ \
93 struct nouveau_i2c *i2c = (p); \
94 _nouveau_i2c_init(nv_object(i2c)); \
95})
96#define nouveau_i2c_fini(p,s) ({ \
97 struct nouveau_i2c *i2c = (p); \
98 _nouveau_i2c_fini(nv_object(i2c), (s)); \
99})
100
101int nouveau_i2c_create_(struct nouveau_object *, struct nouveau_object *,
102 struct nouveau_oclass *, struct nouveau_oclass *,
103 int, void **);
104void _nouveau_i2c_dtor(struct nouveau_object *);
105int _nouveau_i2c_init(struct nouveau_object *);
106int _nouveau_i2c_fini(struct nouveau_object *, bool);
107
108extern struct nouveau_oclass nv04_i2c_oclass;
109extern struct nouveau_oclass nv4e_i2c_oclass;
110extern struct nouveau_oclass nv50_i2c_oclass;
111extern struct nouveau_oclass nv94_i2c_oclass;
112extern struct nouveau_oclass nvd0_i2c_oclass;
113extern struct nouveau_oclass nouveau_anx9805_sclass[];
114
115extern const struct i2c_algorithm nouveau_i2c_bit_algo;
116extern const struct i2c_algorithm nouveau_i2c_aux_algo;
117 88
118static inline int 89static inline int
119nv_rdi2cr(struct nouveau_i2c_port *port, u8 addr, u8 reg) 90nv_rdi2cr(struct nouveau_i2c_port *port, u8 addr, u8 reg)
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/ibus.h b/drivers/gpu/drm/nouveau/core/include/subdev/ibus.h
index 88814f159d89..31df634c0fdc 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/ibus.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/ibus.h
@@ -30,5 +30,6 @@ nouveau_ibus(void *obj)
30 30
31extern struct nouveau_oclass nvc0_ibus_oclass; 31extern struct nouveau_oclass nvc0_ibus_oclass;
32extern struct nouveau_oclass nve0_ibus_oclass; 32extern struct nouveau_oclass nve0_ibus_oclass;
33extern struct nouveau_oclass gk20a_ibus_oclass;
33 34
34#endif 35#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bar/base.c b/drivers/gpu/drm/nouveau/core/subdev/bar/base.c
index bdf594116f3f..73b1ed20c8d5 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bar/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bar/base.c
@@ -118,8 +118,10 @@ nouveau_bar_create_(struct nouveau_object *parent,
118 if (ret) 118 if (ret)
119 return ret; 119 return ret;
120 120
121 bar->iomem = ioremap(nv_device_resource_start(device, 3), 121 if (nv_device_resource_len(device, 3) != 0)
122 nv_device_resource_len(device, 3)); 122 bar->iomem = ioremap(nv_device_resource_start(device, 3),
123 nv_device_resource_len(device, 3));
124
123 return 0; 125 return 0;
124} 126}
125 127
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c
index 3f30db62e656..ca8139b9ab27 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c
@@ -30,14 +30,16 @@
30 30
31#include "priv.h" 31#include "priv.h"
32 32
33struct nvc0_bar_priv_vm {
34 struct nouveau_gpuobj *mem;
35 struct nouveau_gpuobj *pgd;
36 struct nouveau_vm *vm;
37};
38
33struct nvc0_bar_priv { 39struct nvc0_bar_priv {
34 struct nouveau_bar base; 40 struct nouveau_bar base;
35 spinlock_t lock; 41 spinlock_t lock;
36 struct { 42 struct nvc0_bar_priv_vm bar[2];
37 struct nouveau_gpuobj *mem;
38 struct nouveau_gpuobj *pgd;
39 struct nouveau_vm *vm;
40 } bar[2];
41}; 43};
42 44
43static int 45static int
@@ -79,87 +81,87 @@ nvc0_bar_unmap(struct nouveau_bar *bar, struct nouveau_vma *vma)
79} 81}
80 82
81static int 83static int
82nvc0_bar_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 84nvc0_bar_init_vm(struct nvc0_bar_priv *priv, struct nvc0_bar_priv_vm *bar_vm,
83 struct nouveau_oclass *oclass, void *data, u32 size, 85 int bar_nr)
84 struct nouveau_object **pobject)
85{ 86{
86 struct nouveau_device *device = nv_device(parent); 87 struct nouveau_device *device = nv_device(&priv->base);
87 struct nvc0_bar_priv *priv;
88 struct nouveau_gpuobj *mem;
89 struct nouveau_vm *vm; 88 struct nouveau_vm *vm;
89 resource_size_t bar_len;
90 int ret; 90 int ret;
91 91
92 ret = nouveau_bar_create(parent, engine, oclass, &priv);
93 *pobject = nv_object(priv);
94 if (ret)
95 return ret;
96
97 /* BAR3 */
98 ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x1000, 0, 0, 92 ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x1000, 0, 0,
99 &priv->bar[0].mem); 93 &bar_vm->mem);
100 mem = priv->bar[0].mem;
101 if (ret) 94 if (ret)
102 return ret; 95 return ret;
103 96
104 ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x8000, 0, 0, 97 ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x8000, 0, 0,
105 &priv->bar[0].pgd); 98 &bar_vm->pgd);
106 if (ret) 99 if (ret)
107 return ret; 100 return ret;
108 101
109 ret = nouveau_vm_new(device, 0, nv_device_resource_len(device, 3), 0, &vm); 102 bar_len = nv_device_resource_len(device, bar_nr);
103
104 ret = nouveau_vm_new(device, 0, bar_len, 0, &vm);
110 if (ret) 105 if (ret)
111 return ret; 106 return ret;
112 107
113 atomic_inc(&vm->engref[NVDEV_SUBDEV_BAR]); 108 atomic_inc(&vm->engref[NVDEV_SUBDEV_BAR]);
114 109
115 ret = nouveau_gpuobj_new(nv_object(priv), NULL, 110 /*
116 (nv_device_resource_len(device, 3) >> 12) * 8, 111 * Bootstrap page table lookup.
117 0x1000, NVOBJ_FLAG_ZERO_ALLOC, 112 */
118 &vm->pgt[0].obj[0]); 113 if (bar_nr == 3) {
119 vm->pgt[0].refcount[0] = 1; 114 ret = nouveau_gpuobj_new(nv_object(priv), NULL,
120 if (ret) 115 (bar_len >> 12) * 8, 0x1000,
121 return ret; 116 NVOBJ_FLAG_ZERO_ALLOC,
117 &vm->pgt[0].obj[0]);
118 vm->pgt[0].refcount[0] = 1;
119 if (ret)
120 return ret;
121 }
122 122
123 ret = nouveau_vm_ref(vm, &priv->bar[0].vm, priv->bar[0].pgd); 123 ret = nouveau_vm_ref(vm, &bar_vm->vm, bar_vm->pgd);
124 nouveau_vm_ref(NULL, &vm, NULL); 124 nouveau_vm_ref(NULL, &vm, NULL);
125 if (ret) 125 if (ret)
126 return ret; 126 return ret;
127 127
128 nv_wo32(mem, 0x0200, lower_32_bits(priv->bar[0].pgd->addr)); 128 nv_wo32(bar_vm->mem, 0x0200, lower_32_bits(bar_vm->pgd->addr));
129 nv_wo32(mem, 0x0204, upper_32_bits(priv->bar[0].pgd->addr)); 129 nv_wo32(bar_vm->mem, 0x0204, upper_32_bits(bar_vm->pgd->addr));
130 nv_wo32(mem, 0x0208, lower_32_bits(nv_device_resource_len(device, 3) - 1)); 130 nv_wo32(bar_vm->mem, 0x0208, lower_32_bits(bar_len - 1));
131 nv_wo32(mem, 0x020c, upper_32_bits(nv_device_resource_len(device, 3) - 1)); 131 nv_wo32(bar_vm->mem, 0x020c, upper_32_bits(bar_len - 1));
132 132
133 /* BAR1 */ 133 return 0;
134 ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x1000, 0, 0, 134}
135 &priv->bar[1].mem);
136 mem = priv->bar[1].mem;
137 if (ret)
138 return ret;
139 135
140 ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x8000, 0, 0, 136static int
141 &priv->bar[1].pgd); 137nvc0_bar_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
142 if (ret) 138 struct nouveau_oclass *oclass, void *data, u32 size,
143 return ret; 139 struct nouveau_object **pobject)
140{
141 struct nouveau_device *device = nv_device(parent);
142 struct nvc0_bar_priv *priv;
143 bool has_bar3 = nv_device_resource_len(device, 3) != 0;
144 int ret;
144 145
145 ret = nouveau_vm_new(device, 0, nv_device_resource_len(device, 1), 0, &vm); 146 ret = nouveau_bar_create(parent, engine, oclass, &priv);
147 *pobject = nv_object(priv);
146 if (ret) 148 if (ret)
147 return ret; 149 return ret;
148 150
149 atomic_inc(&vm->engref[NVDEV_SUBDEV_BAR]); 151 /* BAR3 */
152 if (has_bar3) {
153 ret = nvc0_bar_init_vm(priv, &priv->bar[0], 3);
154 if (ret)
155 return ret;
156 priv->base.alloc = nouveau_bar_alloc;
157 priv->base.kmap = nvc0_bar_kmap;
158 }
150 159
151 ret = nouveau_vm_ref(vm, &priv->bar[1].vm, priv->bar[1].pgd); 160 /* BAR1 */
152 nouveau_vm_ref(NULL, &vm, NULL); 161 ret = nvc0_bar_init_vm(priv, &priv->bar[1], 1);
153 if (ret) 162 if (ret)
154 return ret; 163 return ret;
155 164
156 nv_wo32(mem, 0x0200, lower_32_bits(priv->bar[1].pgd->addr));
157 nv_wo32(mem, 0x0204, upper_32_bits(priv->bar[1].pgd->addr));
158 nv_wo32(mem, 0x0208, lower_32_bits(nv_device_resource_len(device, 1) - 1));
159 nv_wo32(mem, 0x020c, upper_32_bits(nv_device_resource_len(device, 1) - 1));
160
161 priv->base.alloc = nouveau_bar_alloc;
162 priv->base.kmap = nvc0_bar_kmap;
163 priv->base.umap = nvc0_bar_umap; 165 priv->base.umap = nvc0_bar_umap;
164 priv->base.unmap = nvc0_bar_unmap; 166 priv->base.unmap = nvc0_bar_unmap;
165 priv->base.flush = nv84_bar_flush; 167 priv->base.flush = nv84_bar_flush;
@@ -201,7 +203,9 @@ nvc0_bar_init(struct nouveau_object *object)
201 nv_mask(priv, 0x100c80, 0x00000001, 0x00000000); 203 nv_mask(priv, 0x100c80, 0x00000001, 0x00000000);
202 204
203 nv_wr32(priv, 0x001704, 0x80000000 | priv->bar[1].mem->addr >> 12); 205 nv_wr32(priv, 0x001704, 0x80000000 | priv->bar[1].mem->addr >> 12);
204 nv_wr32(priv, 0x001714, 0xc0000000 | priv->bar[0].mem->addr >> 12); 206 if (priv->bar[0].mem)
207 nv_wr32(priv, 0x001714,
208 0xc0000000 | priv->bar[0].mem->addr >> 12);
205 return 0; 209 return 0;
206} 210}
207 211
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
index 222e8ebb669d..d45704a2c2df 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
@@ -183,10 +183,11 @@ nouveau_bios_shadow_prom(struct nouveau_bios *bios)
183 goto out; 183 goto out;
184 184
185 bios->data = kmalloc(bios->size, GFP_KERNEL); 185 bios->data = kmalloc(bios->size, GFP_KERNEL);
186 if (bios->data) { 186 if (!bios->data)
187 for (i = 0; i < bios->size; i += 4) 187 goto out;
188 ((u32 *)bios->data)[i/4] = nv_rd32(bios, 0x300000 + i); 188
189 } 189 for (i = 0; i < bios->size; i += 4)
190 ((u32 *)bios->data)[i/4] = nv_rd32(bios, 0x300000 + i);
190 191
191 /* check the PCI record header */ 192 /* check the PCI record header */
192 pcir = nv_ro16(bios, 0x0018); 193 pcir = nv_ro16(bios, 0x0018);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/conn.c b/drivers/gpu/drm/nouveau/core/subdev/bios/conn.c
index 5ac010efd959..2ede3bcd96a1 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/conn.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/conn.c
@@ -28,12 +28,12 @@
28#include <subdev/bios/dcb.h> 28#include <subdev/bios/dcb.h>
29#include <subdev/bios/conn.h> 29#include <subdev/bios/conn.h>
30 30
31u16 31u32
32dcb_conntab(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len) 32nvbios_connTe(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
33{ 33{
34 u16 dcb = dcb_table(bios, ver, hdr, cnt, len); 34 u32 dcb = dcb_table(bios, ver, hdr, cnt, len);
35 if (dcb && *ver >= 0x30 && *hdr >= 0x16) { 35 if (dcb && *ver >= 0x30 && *hdr >= 0x16) {
36 u16 data = nv_ro16(bios, dcb + 0x14); 36 u32 data = nv_ro16(bios, dcb + 0x14);
37 if (data) { 37 if (data) {
38 *ver = nv_ro08(bios, data + 0); 38 *ver = nv_ro08(bios, data + 0);
39 *hdr = nv_ro08(bios, data + 1); 39 *hdr = nv_ro08(bios, data + 1);
@@ -42,15 +42,59 @@ dcb_conntab(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
42 return data; 42 return data;
43 } 43 }
44 } 44 }
45 return 0x0000; 45 return 0x00000000;
46} 46}
47 47
48u16 48u32
49dcb_conn(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len) 49nvbios_connTp(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
50 struct nvbios_connT *info)
51{
52 u32 data = nvbios_connTe(bios, ver, hdr, cnt, len);
53 memset(info, 0x00, sizeof(*info));
54 switch (!!data * *ver) {
55 case 0x30:
56 case 0x40:
57 return data;
58 default:
59 break;
60 }
61 return 0x00000000;
62}
63
64u32
65nvbios_connEe(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len)
50{ 66{
51 u8 hdr, cnt; 67 u8 hdr, cnt;
52 u16 data = dcb_conntab(bios, ver, &hdr, &cnt, len); 68 u32 data = nvbios_connTe(bios, ver, &hdr, &cnt, len);
53 if (data && idx < cnt) 69 if (data && idx < cnt)
54 return data + hdr + (idx * *len); 70 return data + hdr + (idx * *len);
55 return 0x0000; 71 return 0x00000000;
72}
73
74u32
75nvbios_connEp(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len,
76 struct nvbios_connE *info)
77{
78 u32 data = nvbios_connEe(bios, idx, ver, len);
79 memset(info, 0x00, sizeof(*info));
80 switch (!!data * *ver) {
81 case 0x30:
82 case 0x40:
83 info->type = nv_ro08(bios, data + 0x00);
84 info->location = nv_ro08(bios, data + 0x01) & 0x0f;
85 info->hpd = (nv_ro08(bios, data + 0x01) & 0x30) >> 4;
86 info->dp = (nv_ro08(bios, data + 0x01) & 0xc0) >> 6;
87 if (*len < 4)
88 return data;
89 info->hpd |= (nv_ro08(bios, data + 0x02) & 0x03) << 2;
90 info->dp |= nv_ro08(bios, data + 0x02) & 0x0c;
91 info->di = (nv_ro08(bios, data + 0x02) & 0xf0) >> 4;
92 info->hpd |= (nv_ro08(bios, data + 0x03) & 0x07) << 4;
93 info->sr = (nv_ro08(bios, data + 0x03) & 0x08) >> 3;
94 info->lcdid = (nv_ro08(bios, data + 0x03) & 0x70) >> 4;
95 return data;
96 default:
97 break;
98 }
99 return 0x00000000;
56} 100}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/dp.c b/drivers/gpu/drm/nouveau/core/subdev/bios/dp.c
index 7628fe759220..f309dd657250 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/dp.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/dp.c
@@ -162,18 +162,20 @@ nvbios_dpcfg_parse(struct nouveau_bios *bios, u16 outp, u8 idx,
162 struct nvbios_dpcfg *info) 162 struct nvbios_dpcfg *info)
163{ 163{
164 u16 data = nvbios_dpcfg_entry(bios, outp, idx, ver, hdr, cnt, len); 164 u16 data = nvbios_dpcfg_entry(bios, outp, idx, ver, hdr, cnt, len);
165 memset(info, 0x00, sizeof(*info));
165 if (data) { 166 if (data) {
166 switch (*ver) { 167 switch (*ver) {
167 case 0x21: 168 case 0x21:
168 info->drv = nv_ro08(bios, data + 0x02); 169 info->dc = nv_ro08(bios, data + 0x02);
169 info->pre = nv_ro08(bios, data + 0x03); 170 info->pe = nv_ro08(bios, data + 0x03);
170 info->unk = nv_ro08(bios, data + 0x04); 171 info->tx_pu = nv_ro08(bios, data + 0x04);
171 break; 172 break;
172 case 0x30: 173 case 0x30:
173 case 0x40: 174 case 0x40:
174 info->drv = nv_ro08(bios, data + 0x01); 175 info->pc = nv_ro08(bios, data + 0x00);
175 info->pre = nv_ro08(bios, data + 0x02); 176 info->dc = nv_ro08(bios, data + 0x01);
176 info->unk = nv_ro08(bios, data + 0x03); 177 info->pe = nv_ro08(bios, data + 0x02);
178 info->tx_pu = nv_ro08(bios, data + 0x03);
177 break; 179 break;
178 default: 180 default:
179 data = 0x0000; 181 data = 0x0000;
@@ -184,7 +186,7 @@ nvbios_dpcfg_parse(struct nouveau_bios *bios, u16 outp, u8 idx,
184} 186}
185 187
186u16 188u16
187nvbios_dpcfg_match(struct nouveau_bios *bios, u16 outp, u8 un, u8 vs, u8 pe, 189nvbios_dpcfg_match(struct nouveau_bios *bios, u16 outp, u8 pc, u8 vs, u8 pe,
188 u8 *ver, u8 *hdr, u8 *cnt, u8 *len, 190 u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
189 struct nvbios_dpcfg *info) 191 struct nvbios_dpcfg *info)
190{ 192{
@@ -193,16 +195,15 @@ nvbios_dpcfg_match(struct nouveau_bios *bios, u16 outp, u8 un, u8 vs, u8 pe,
193 195
194 if (*ver >= 0x30) { 196 if (*ver >= 0x30) {
195 const u8 vsoff[] = { 0, 4, 7, 9 }; 197 const u8 vsoff[] = { 0, 4, 7, 9 };
196 idx = (un * 10) + vsoff[vs] + pe; 198 idx = (pc * 10) + vsoff[vs] + pe;
197 } else { 199 } else {
198 while ((data = nvbios_dpcfg_entry(bios, outp, idx, 200 while ((data = nvbios_dpcfg_entry(bios, outp, ++idx,
199 ver, hdr, cnt, len))) { 201 ver, hdr, cnt, len))) {
200 if (nv_ro08(bios, data + 0x00) == vs && 202 if (nv_ro08(bios, data + 0x00) == vs &&
201 nv_ro08(bios, data + 0x01) == pe) 203 nv_ro08(bios, data + 0x01) == pe)
202 break; 204 break;
203 idx++;
204 } 205 }
205 } 206 }
206 207
207 return nvbios_dpcfg_parse(bios, outp, pe, ver, hdr, cnt, len, info); 208 return nvbios_dpcfg_parse(bios, outp, idx, ver, hdr, cnt, len, info);
208} 209}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
index acaeaf79e3f0..626380f9e4c0 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
@@ -98,15 +98,16 @@ static u8
98init_conn(struct nvbios_init *init) 98init_conn(struct nvbios_init *init)
99{ 99{
100 struct nouveau_bios *bios = init->bios; 100 struct nouveau_bios *bios = init->bios;
101 u8 ver, len; 101 struct nvbios_connE connE;
102 u16 conn; 102 u8 ver, hdr;
103 u32 conn;
103 104
104 if (init_exec(init)) { 105 if (init_exec(init)) {
105 if (init->outp) { 106 if (init->outp) {
106 conn = init->outp->connector; 107 conn = init->outp->connector;
107 conn = dcb_conn(bios, conn, &ver, &len); 108 conn = nvbios_connEp(bios, conn, &ver, &hdr, &connE);
108 if (conn) 109 if (conn)
109 return nv_ro08(bios, conn); 110 return connE.type;
110 } 111 }
111 112
112 error("script needs connector type\n"); 113 error("script needs connector type\n");
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/base.c b/drivers/gpu/drm/nouveau/core/subdev/clock/base.c
index dd62baead39c..22351f594d2a 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/base.c
@@ -346,8 +346,8 @@ nouveau_clock_ustate_update(struct nouveau_clock *clk, int req)
346 struct nouveau_pstate *pstate; 346 struct nouveau_pstate *pstate;
347 int i = 0; 347 int i = 0;
348 348
349 /* YKW repellant */ 349 if (!clk->allow_reclock)
350 return -ENOSYS; 350 return -ENOSYS;
351 351
352 if (req != -1 && req != -2) { 352 if (req != -1 && req != -2) {
353 list_for_each_entry(pstate, &clk->states, head) { 353 list_for_each_entry(pstate, &clk->states, head) {
@@ -456,6 +456,7 @@ nouveau_clock_create_(struct nouveau_object *parent,
456 struct nouveau_object *engine, 456 struct nouveau_object *engine,
457 struct nouveau_oclass *oclass, 457 struct nouveau_oclass *oclass,
458 struct nouveau_clocks *clocks, 458 struct nouveau_clocks *clocks,
459 bool allow_reclock,
459 int length, void **object) 460 int length, void **object)
460{ 461{
461 struct nouveau_device *device = nv_device(parent); 462 struct nouveau_device *device = nv_device(parent);
@@ -478,6 +479,8 @@ nouveau_clock_create_(struct nouveau_object *parent,
478 ret = nouveau_pstate_new(clk, idx++); 479 ret = nouveau_pstate_new(clk, idx++);
479 } while (ret == 0); 480 } while (ret == 0);
480 481
482 clk->allow_reclock = allow_reclock;
483
481 mode = nouveau_stropt(device->cfgopt, "NvClkMode", &arglen); 484 mode = nouveau_stropt(device->cfgopt, "NvClkMode", &arglen);
482 if (mode) { 485 if (mode) {
483 if (!strncasecmpz(mode, "disabled", arglen)) { 486 if (!strncasecmpz(mode, "disabled", arglen)) {
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c
index b74db6cfc4e2..eb2d4425a49e 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c
@@ -82,7 +82,8 @@ nv04_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
82 struct nv04_clock_priv *priv; 82 struct nv04_clock_priv *priv;
83 int ret; 83 int ret;
84 84
85 ret = nouveau_clock_create(parent, engine, oclass, nv04_domain, &priv); 85 ret = nouveau_clock_create(parent, engine, oclass, nv04_domain, false,
86 &priv);
86 *pobject = nv_object(priv); 87 *pobject = nv_object(priv);
87 if (ret) 88 if (ret)
88 return ret; 89 return ret;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nv40.c
index db7346f79080..8a9e16839791 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nv40.c
@@ -213,7 +213,8 @@ nv40_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
213 struct nv40_clock_priv *priv; 213 struct nv40_clock_priv *priv;
214 int ret; 214 int ret;
215 215
216 ret = nouveau_clock_create(parent, engine, oclass, nv40_domain, &priv); 216 ret = nouveau_clock_create(parent, engine, oclass, nv40_domain, true,
217 &priv);
217 *pobject = nv_object(priv); 218 *pobject = nv_object(priv);
218 if (ret) 219 if (ret)
219 return ret; 220 return ret;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nv50.c
index 250a6d96016b..8c132772ba9e 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nv50.c
@@ -507,7 +507,7 @@ nv50_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
507 int ret; 507 int ret;
508 508
509 ret = nouveau_clock_create(parent, engine, oclass, pclass->domains, 509 ret = nouveau_clock_create(parent, engine, oclass, pclass->domains,
510 &priv); 510 false, &priv);
511 *pobject = nv_object(priv); 511 *pobject = nv_object(priv);
512 if (ret) 512 if (ret)
513 return ret; 513 return ret;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c
index 4f5a1373f002..9fb58354a80b 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c
@@ -302,7 +302,8 @@ nva3_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
302 struct nva3_clock_priv *priv; 302 struct nva3_clock_priv *priv;
303 int ret; 303 int ret;
304 304
305 ret = nouveau_clock_create(parent, engine, oclass, nva3_domain, &priv); 305 ret = nouveau_clock_create(parent, engine, oclass, nva3_domain, false,
306 &priv);
306 *pobject = nv_object(priv); 307 *pobject = nv_object(priv);
307 if (ret) 308 if (ret)
308 return ret; 309 return ret;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nvaa.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nvaa.c
index 7a723b4f564d..6a65fc9e9663 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nvaa.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nvaa.c
@@ -421,7 +421,8 @@ nvaa_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
421 struct nvaa_clock_priv *priv; 421 struct nvaa_clock_priv *priv;
422 int ret; 422 int ret;
423 423
424 ret = nouveau_clock_create(parent, engine, oclass, nvaa_domains, &priv); 424 ret = nouveau_clock_create(parent, engine, oclass, nvaa_domains, true,
425 &priv);
425 *pobject = nv_object(priv); 426 *pobject = nv_object(priv);
426 if (ret) 427 if (ret)
427 return ret; 428 return ret;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c
index c3105720ed24..dbf8517f54da 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c
@@ -437,7 +437,8 @@ nvc0_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
437 struct nvc0_clock_priv *priv; 437 struct nvc0_clock_priv *priv;
438 int ret; 438 int ret;
439 439
440 ret = nouveau_clock_create(parent, engine, oclass, nvc0_domain, &priv); 440 ret = nouveau_clock_create(parent, engine, oclass, nvc0_domain, false,
441 &priv);
441 *pobject = nv_object(priv); 442 *pobject = nv_object(priv);
442 if (ret) 443 if (ret)
443 return ret; 444 return ret;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nve0.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nve0.c
index d3c37c96f0e7..4ac1aa30ea11 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nve0.c
@@ -473,7 +473,8 @@ nve0_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
473 struct nve0_clock_priv *priv; 473 struct nve0_clock_priv *priv;
474 int ret; 474 int ret;
475 475
476 ret = nouveau_clock_create(parent, engine, oclass, nve0_domain, &priv); 476 ret = nouveau_clock_create(parent, engine, oclass, nve0_domain, true,
477 &priv);
477 *pobject = nv_object(priv); 478 *pobject = nv_object(priv);
478 if (ret) 479 if (ret)
479 return ret; 480 return ret;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/gk20a.c b/drivers/gpu/drm/nouveau/core/subdev/fb/gk20a.c
new file mode 100644
index 000000000000..a16024a74771
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/gk20a.c
@@ -0,0 +1,56 @@
1/*
2 * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#include "nvc0.h"
24
25struct gk20a_fb_priv {
26 struct nouveau_fb base;
27};
28
29static int
30gk20a_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
31 struct nouveau_oclass *oclass, void *data, u32 size,
32 struct nouveau_object **pobject)
33{
34 struct gk20a_fb_priv *priv;
35 int ret;
36
37 ret = nouveau_fb_create(parent, engine, oclass, &priv);
38 *pobject = nv_object(priv);
39 if (ret)
40 return ret;
41
42 return 0;
43}
44
45struct nouveau_oclass *
46gk20a_fb_oclass = &(struct nouveau_fb_impl) {
47 .base.handle = NV_SUBDEV(FB, 0xea),
48 .base.ofuncs = &(struct nouveau_ofuncs) {
49 .ctor = gk20a_fb_ctor,
50 .dtor = _nouveau_fb_dtor,
51 .init = _nouveau_fb_init,
52 .fini = _nouveau_fb_fini,
53 },
54 .memtype = nvc0_fb_memtype_valid,
55 .ram = &gk20a_ram_oclass,
56}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h b/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h
index da74c889aed4..82273f832e42 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h
@@ -32,6 +32,7 @@ extern struct nouveau_oclass nva3_ram_oclass;
32extern struct nouveau_oclass nvaa_ram_oclass; 32extern struct nouveau_oclass nvaa_ram_oclass;
33extern struct nouveau_oclass nvc0_ram_oclass; 33extern struct nouveau_oclass nvc0_ram_oclass;
34extern struct nouveau_oclass nve0_ram_oclass; 34extern struct nouveau_oclass nve0_ram_oclass;
35extern struct nouveau_oclass gk20a_ram_oclass;
35extern struct nouveau_oclass gm107_ram_oclass; 36extern struct nouveau_oclass gm107_ram_oclass;
36 37
37int nouveau_sddr3_calc(struct nouveau_ram *ram); 38int nouveau_sddr3_calc(struct nouveau_ram *ram);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramgk20a.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramgk20a.c
new file mode 100644
index 000000000000..4d77d75e4673
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramgk20a.c
@@ -0,0 +1,152 @@
1/*
2 * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#include "priv.h"
24
25#include <subdev/fb.h>
26
27struct gk20a_mem {
28 struct nouveau_mem base;
29 void *cpuaddr;
30 dma_addr_t handle;
31};
32#define to_gk20a_mem(m) container_of(m, struct gk20a_mem, base)
33
34static void
35gk20a_ram_put(struct nouveau_fb *pfb, struct nouveau_mem **pmem)
36{
37 struct device *dev = nv_device_base(nv_device(pfb));
38 struct gk20a_mem *mem = to_gk20a_mem(*pmem);
39
40 *pmem = NULL;
41 if (unlikely(mem == NULL))
42 return;
43
44 if (likely(mem->cpuaddr))
45 dma_free_coherent(dev, mem->base.size << PAGE_SHIFT,
46 mem->cpuaddr, mem->handle);
47
48 kfree(mem->base.pages);
49 kfree(mem);
50}
51
52static int
53gk20a_ram_get(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin,
54 u32 memtype, struct nouveau_mem **pmem)
55{
56 struct device *dev = nv_device_base(nv_device(pfb));
57 struct gk20a_mem *mem;
58 u32 type = memtype & 0xff;
59 u32 npages, order;
60 int i;
61
62 nv_debug(pfb, "%s: size: %llx align: %x, ncmin: %x\n", __func__, size,
63 align, ncmin);
64
65 npages = size >> PAGE_SHIFT;
66 if (npages == 0)
67 npages = 1;
68
69 if (align == 0)
70 align = PAGE_SIZE;
71 align >>= PAGE_SHIFT;
72
73 /* round alignment to the next power of 2, if needed */
74 order = fls(align);
75 if ((align & (align - 1)) == 0)
76 order--;
77 align = BIT(order);
78
79 /* ensure returned address is correctly aligned */
80 npages = max(align, npages);
81
82 mem = kzalloc(sizeof(*mem), GFP_KERNEL);
83 if (!mem)
84 return -ENOMEM;
85
86 mem->base.size = npages;
87 mem->base.memtype = type;
88
89 mem->base.pages = kzalloc(sizeof(dma_addr_t) * npages, GFP_KERNEL);
90 if (!mem->base.pages) {
91 kfree(mem);
92 return -ENOMEM;
93 }
94
95 *pmem = &mem->base;
96
97 mem->cpuaddr = dma_alloc_coherent(dev, npages << PAGE_SHIFT,
98 &mem->handle, GFP_KERNEL);
99 if (!mem->cpuaddr) {
100 nv_error(pfb, "%s: cannot allocate memory!\n", __func__);
101 gk20a_ram_put(pfb, pmem);
102 return -ENOMEM;
103 }
104
105 align <<= PAGE_SHIFT;
106
107 /* alignment check */
108 if (unlikely(mem->handle & (align - 1)))
109 nv_warn(pfb, "memory not aligned as requested: %pad (0x%x)\n",
110 &mem->handle, align);
111
112 nv_debug(pfb, "alloc size: 0x%x, align: 0x%x, paddr: %pad, vaddr: %p\n",
113 npages << PAGE_SHIFT, align, &mem->handle, mem->cpuaddr);
114
115 for (i = 0; i < npages; i++)
116 mem->base.pages[i] = mem->handle + (PAGE_SIZE * i);
117
118 mem->base.offset = (u64)mem->base.pages[0];
119
120 return 0;
121}
122
123static int
124gk20a_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
125 struct nouveau_oclass *oclass, void *data, u32 datasize,
126 struct nouveau_object **pobject)
127{
128 struct nouveau_ram *ram;
129 int ret;
130
131 ret = nouveau_ram_create(parent, engine, oclass, &ram);
132 *pobject = nv_object(ram);
133 if (ret)
134 return ret;
135 ram->type = NV_MEM_TYPE_STOLEN;
136 ram->size = get_num_physpages() << PAGE_SHIFT;
137
138 ram->get = gk20a_ram_get;
139 ram->put = gk20a_ram_put;
140
141 return 0;
142}
143
144struct nouveau_oclass
145gk20a_ram_oclass = {
146 .ofuncs = &(struct nouveau_ofuncs) {
147 .ctor = gk20a_ram_ctor,
148 .dtor = _nouveau_ram_dtor,
149 .init = _nouveau_ram_init,
150 .fini = _nouveau_ram_fini,
151 },
152};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c
index ef91b6e893af..e5d12c24cc43 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c
@@ -211,7 +211,7 @@ nv50_ram_prog(struct nouveau_fb *pfb)
211 struct nv50_ram *ram = (void *)pfb->ram; 211 struct nv50_ram *ram = (void *)pfb->ram;
212 struct nv50_ramseq *hwsq = &ram->hwsq; 212 struct nv50_ramseq *hwsq = &ram->hwsq;
213 213
214 ram_exec(hwsq, nouveau_boolopt(device->cfgopt, "NvMemExec", false)); 214 ram_exec(hwsq, nouveau_boolopt(device->cfgopt, "NvMemExec", true));
215 return 0; 215 return 0;
216} 216}
217 217
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnva3.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnva3.c
index 6eb97f16fbda..8076fb195dd5 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnva3.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnva3.c
@@ -309,7 +309,7 @@ nva3_ram_prog(struct nouveau_fb *pfb)
309 struct nouveau_device *device = nv_device(pfb); 309 struct nouveau_device *device = nv_device(pfb);
310 struct nva3_ram *ram = (void *)pfb->ram; 310 struct nva3_ram *ram = (void *)pfb->ram;
311 struct nva3_ramfuc *fuc = &ram->fuc; 311 struct nva3_ramfuc *fuc = &ram->fuc;
312 ram_exec(fuc, nouveau_boolopt(device->cfgopt, "NvMemExec", false)); 312 ram_exec(fuc, nouveau_boolopt(device->cfgopt, "NvMemExec", true));
313 return 0; 313 return 0;
314} 314}
315 315
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c
index 8edc92224c84..5a6a5027f749 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c
@@ -408,7 +408,7 @@ nvc0_ram_prog(struct nouveau_fb *pfb)
408 struct nouveau_device *device = nv_device(pfb); 408 struct nouveau_device *device = nv_device(pfb);
409 struct nvc0_ram *ram = (void *)pfb->ram; 409 struct nvc0_ram *ram = (void *)pfb->ram;
410 struct nvc0_ramfuc *fuc = &ram->fuc; 410 struct nvc0_ramfuc *fuc = &ram->fuc;
411 ram_exec(fuc, nouveau_boolopt(device->cfgopt, "NvMemExec", false)); 411 ram_exec(fuc, nouveau_boolopt(device->cfgopt, "NvMemExec", true));
412 return 0; 412 return 0;
413} 413}
414 414
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c
index 16752192cf87..84c7efbc4f38 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c
@@ -1111,7 +1111,7 @@ nve0_ram_prog(struct nouveau_fb *pfb)
1111 struct nouveau_device *device = nv_device(pfb); 1111 struct nouveau_device *device = nv_device(pfb);
1112 struct nve0_ram *ram = (void *)pfb->ram; 1112 struct nve0_ram *ram = (void *)pfb->ram;
1113 struct nve0_ramfuc *fuc = &ram->fuc; 1113 struct nve0_ramfuc *fuc = &ram->fuc;
1114 ram_exec(fuc, nouveau_boolopt(device->cfgopt, "NvMemExec", false)); 1114 ram_exec(fuc, nouveau_boolopt(device->cfgopt, "NvMemExec", true));
1115 return (ram->base.next == &ram->base.xition); 1115 return (ram->base.next == &ram->base.xition);
1116} 1116}
1117 1117
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c
index f572c2804c32..45e0202f3151 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c
@@ -22,21 +22,24 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <subdev/gpio.h>
26#include <subdev/bios.h> 25#include <subdev/bios.h>
27#include <subdev/bios/gpio.h> 26#include <subdev/bios/gpio.h>
28 27
28#include "priv.h"
29
29static int 30static int
30nouveau_gpio_drive(struct nouveau_gpio *gpio, 31nouveau_gpio_drive(struct nouveau_gpio *gpio,
31 int idx, int line, int dir, int out) 32 int idx, int line, int dir, int out)
32{ 33{
33 return gpio->drive ? gpio->drive(gpio, line, dir, out) : -ENODEV; 34 const struct nouveau_gpio_impl *impl = (void *)nv_object(gpio)->oclass;
35 return impl->drive ? impl->drive(gpio, line, dir, out) : -ENODEV;
34} 36}
35 37
36static int 38static int
37nouveau_gpio_sense(struct nouveau_gpio *gpio, int idx, int line) 39nouveau_gpio_sense(struct nouveau_gpio *gpio, int idx, int line)
38{ 40{
39 return gpio->sense ? gpio->sense(gpio, line) : -ENODEV; 41 const struct nouveau_gpio_impl *impl = (void *)nv_object(gpio)->oclass;
42 return impl->sense ? impl->sense(gpio, line) : -ENODEV;
40} 43}
41 44
42static int 45static int
@@ -102,6 +105,80 @@ nouveau_gpio_get(struct nouveau_gpio *gpio, int idx, u8 tag, u8 line)
102 return ret; 105 return ret;
103} 106}
104 107
108static void
109nouveau_gpio_intr_disable(struct nouveau_event *event, int type, int index)
110{
111 struct nouveau_gpio *gpio = nouveau_gpio(event->priv);
112 const struct nouveau_gpio_impl *impl = (void *)nv_object(gpio)->oclass;
113 impl->intr_mask(gpio, type, 1 << index, 0);
114}
115
116static void
117nouveau_gpio_intr_enable(struct nouveau_event *event, int type, int index)
118{
119 struct nouveau_gpio *gpio = nouveau_gpio(event->priv);
120 const struct nouveau_gpio_impl *impl = (void *)nv_object(gpio)->oclass;
121 impl->intr_mask(gpio, type, 1 << index, 1 << index);
122}
123
124static void
125nouveau_gpio_intr(struct nouveau_subdev *subdev)
126{
127 struct nouveau_gpio *gpio = nouveau_gpio(subdev);
128 const struct nouveau_gpio_impl *impl = (void *)nv_object(gpio)->oclass;
129 u32 hi, lo, e, i;
130
131 impl->intr_stat(gpio, &hi, &lo);
132
133 for (i = 0; e = 0, (hi | lo) && i < impl->lines; i++) {
134 if (hi & (1 << i))
135 e |= NVKM_GPIO_HI;
136 if (lo & (1 << i))
137 e |= NVKM_GPIO_LO;
138 nouveau_event_trigger(gpio->events, e, i);
139 }
140}
141
142int
143_nouveau_gpio_fini(struct nouveau_object *object, bool suspend)
144{
145 const struct nouveau_gpio_impl *impl = (void *)object->oclass;
146 struct nouveau_gpio *gpio = nouveau_gpio(object);
147 u32 mask = (1 << impl->lines) - 1;
148
149 impl->intr_mask(gpio, NVKM_GPIO_TOGGLED, mask, 0);
150 impl->intr_stat(gpio, &mask, &mask);
151
152 return nouveau_subdev_fini(&gpio->base, suspend);
153}
154
155static struct dmi_system_id gpio_reset_ids[] = {
156 {
157 .ident = "Apple Macbook 10,1",
158 .matches = {
159 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
160 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro10,1"),
161 }
162 },
163 { }
164};
165
166int
167_nouveau_gpio_init(struct nouveau_object *object)
168{
169 struct nouveau_gpio *gpio = nouveau_gpio(object);
170 int ret;
171
172 ret = nouveau_subdev_init(&gpio->base);
173 if (ret)
174 return ret;
175
176 if (gpio->reset && dmi_check_system(gpio_reset_ids))
177 gpio->reset(gpio, DCB_GPIO_UNUSED);
178
179 return ret;
180}
181
105void 182void
106_nouveau_gpio_dtor(struct nouveau_object *object) 183_nouveau_gpio_dtor(struct nouveau_object *object)
107{ 184{
@@ -113,9 +190,10 @@ _nouveau_gpio_dtor(struct nouveau_object *object)
113int 190int
114nouveau_gpio_create_(struct nouveau_object *parent, 191nouveau_gpio_create_(struct nouveau_object *parent,
115 struct nouveau_object *engine, 192 struct nouveau_object *engine,
116 struct nouveau_oclass *oclass, int lines, 193 struct nouveau_oclass *oclass,
117 int length, void **pobject) 194 int length, void **pobject)
118{ 195{
196 const struct nouveau_gpio_impl *impl = (void *)oclass;
119 struct nouveau_gpio *gpio; 197 struct nouveau_gpio *gpio;
120 int ret; 198 int ret;
121 199
@@ -125,34 +203,34 @@ nouveau_gpio_create_(struct nouveau_object *parent,
125 if (ret) 203 if (ret)
126 return ret; 204 return ret;
127 205
128 ret = nouveau_event_create(lines, &gpio->events);
129 if (ret)
130 return ret;
131
132 gpio->find = nouveau_gpio_find; 206 gpio->find = nouveau_gpio_find;
133 gpio->set = nouveau_gpio_set; 207 gpio->set = nouveau_gpio_set;
134 gpio->get = nouveau_gpio_get; 208 gpio->get = nouveau_gpio_get;
209 gpio->reset = impl->reset;
210
211 ret = nouveau_event_create(2, impl->lines, &gpio->events);
212 if (ret)
213 return ret;
214
215 gpio->events->priv = gpio;
216 gpio->events->enable = nouveau_gpio_intr_enable;
217 gpio->events->disable = nouveau_gpio_intr_disable;
218 nv_subdev(gpio)->intr = nouveau_gpio_intr;
135 return 0; 219 return 0;
136} 220}
137 221
138static struct dmi_system_id gpio_reset_ids[] = {
139 {
140 .ident = "Apple Macbook 10,1",
141 .matches = {
142 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
143 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro10,1"),
144 }
145 },
146 { }
147};
148
149int 222int
150nouveau_gpio_init(struct nouveau_gpio *gpio) 223_nouveau_gpio_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
224 struct nouveau_oclass *oclass, void *data, u32 size,
225 struct nouveau_object **pobject)
151{ 226{
152 int ret = nouveau_subdev_init(&gpio->base); 227 struct nouveau_gpio *gpio;
153 if (ret == 0 && gpio->reset) { 228 int ret;
154 if (dmi_check_system(gpio_reset_ids)) 229
155 gpio->reset(gpio, DCB_GPIO_UNUSED); 230 ret = nouveau_gpio_create(parent, engine, oclass, &gpio);
156 } 231 *pobject = nv_object(gpio);
157 return ret; 232 if (ret)
233 return ret;
234
235 return 0;
158} 236}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/nv10.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/nv10.c
index 76d5d5465ddd..27ad23eaf185 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/gpio/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/nv10.c
@@ -26,10 +26,6 @@
26 26
27#include "priv.h" 27#include "priv.h"
28 28
29struct nv10_gpio_priv {
30 struct nouveau_gpio base;
31};
32
33static int 29static int
34nv10_gpio_sense(struct nouveau_gpio *gpio, int line) 30nv10_gpio_sense(struct nouveau_gpio *gpio, int line)
35{ 31{
@@ -83,95 +79,38 @@ nv10_gpio_drive(struct nouveau_gpio *gpio, int line, int dir, int out)
83} 79}
84 80
85static void 81static void
86nv10_gpio_intr(struct nouveau_subdev *subdev) 82nv10_gpio_intr_stat(struct nouveau_gpio *gpio, u32 *hi, u32 *lo)
87{
88 struct nv10_gpio_priv *priv = (void *)subdev;
89 u32 intr = nv_rd32(priv, 0x001104);
90 u32 hi = (intr & 0x0000ffff) >> 0;
91 u32 lo = (intr & 0xffff0000) >> 16;
92 int i;
93
94 for (i = 0; (hi | lo) && i < 32; i++) {
95 if ((hi | lo) & (1 << i))
96 nouveau_event_trigger(priv->base.events, i);
97 }
98
99 nv_wr32(priv, 0x001104, intr);
100}
101
102static void
103nv10_gpio_intr_enable(struct nouveau_event *event, int line)
104{
105 nv_wr32(event->priv, 0x001104, 0x00010001 << line);
106 nv_mask(event->priv, 0x001144, 0x00010001 << line, 0x00010001 << line);
107}
108
109static void
110nv10_gpio_intr_disable(struct nouveau_event *event, int line)
111{
112 nv_wr32(event->priv, 0x001104, 0x00010001 << line);
113 nv_mask(event->priv, 0x001144, 0x00010001 << line, 0x00000000);
114}
115
116static int
117nv10_gpio_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
118 struct nouveau_oclass *oclass, void *data, u32 size,
119 struct nouveau_object **pobject)
120{ 83{
121 struct nv10_gpio_priv *priv; 84 u32 intr = nv_rd32(gpio, 0x001104);
122 int ret; 85 u32 stat = nv_rd32(gpio, 0x001144) & intr;
123 86 *lo = (stat & 0xffff0000) >> 16;
124 ret = nouveau_gpio_create(parent, engine, oclass, 16, &priv); 87 *hi = (stat & 0x0000ffff);
125 *pobject = nv_object(priv); 88 nv_wr32(gpio, 0x001104, intr);
126 if (ret)
127 return ret;
128
129 priv->base.drive = nv10_gpio_drive;
130 priv->base.sense = nv10_gpio_sense;
131 priv->base.events->priv = priv;
132 priv->base.events->enable = nv10_gpio_intr_enable;
133 priv->base.events->disable = nv10_gpio_intr_disable;
134 nv_subdev(priv)->intr = nv10_gpio_intr;
135 return 0;
136} 89}
137 90
138static void 91static void
139nv10_gpio_dtor(struct nouveau_object *object) 92nv10_gpio_intr_mask(struct nouveau_gpio *gpio, u32 type, u32 mask, u32 data)
140{
141 struct nv10_gpio_priv *priv = (void *)object;
142 nouveau_gpio_destroy(&priv->base);
143}
144
145static int
146nv10_gpio_init(struct nouveau_object *object)
147{
148 struct nv10_gpio_priv *priv = (void *)object;
149 int ret;
150
151 ret = nouveau_gpio_init(&priv->base);
152 if (ret)
153 return ret;
154
155 nv_wr32(priv, 0x001144, 0x00000000);
156 nv_wr32(priv, 0x001104, 0xffffffff);
157 return 0;
158}
159
160static int
161nv10_gpio_fini(struct nouveau_object *object, bool suspend)
162{ 93{
163 struct nv10_gpio_priv *priv = (void *)object; 94 u32 inte = nv_rd32(gpio, 0x001144);
164 nv_wr32(priv, 0x001144, 0x00000000); 95 if (type & NVKM_GPIO_LO)
165 return nouveau_gpio_fini(&priv->base, suspend); 96 inte = (inte & ~(mask << 16)) | (data << 16);
97 if (type & NVKM_GPIO_HI)
98 inte = (inte & ~mask) | data;
99 nv_wr32(gpio, 0x001144, inte);
166} 100}
167 101
168struct nouveau_oclass 102struct nouveau_oclass *
169nv10_gpio_oclass = { 103nv10_gpio_oclass = &(struct nouveau_gpio_impl) {
170 .handle = NV_SUBDEV(GPIO, 0x10), 104 .base.handle = NV_SUBDEV(GPIO, 0x10),
171 .ofuncs = &(struct nouveau_ofuncs) { 105 .base.ofuncs = &(struct nouveau_ofuncs) {
172 .ctor = nv10_gpio_ctor, 106 .ctor = _nouveau_gpio_ctor,
173 .dtor = nv10_gpio_dtor, 107 .dtor = _nouveau_gpio_dtor,
174 .init = nv10_gpio_init, 108 .init = _nouveau_gpio_init,
175 .fini = nv10_gpio_fini, 109 .fini = _nouveau_gpio_fini,
176 }, 110 },
177}; 111 .lines = 16,
112 .intr_stat = nv10_gpio_intr_stat,
113 .intr_mask = nv10_gpio_intr_mask,
114 .drive = nv10_gpio_drive,
115 .sense = nv10_gpio_sense,
116}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c
index 2ef774731629..1864fa98e6b1 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c
@@ -24,15 +24,10 @@
24 24
25#include "priv.h" 25#include "priv.h"
26 26
27struct nv50_gpio_priv { 27void
28 struct nouveau_gpio base;
29};
30
31static void
32nv50_gpio_reset(struct nouveau_gpio *gpio, u8 match) 28nv50_gpio_reset(struct nouveau_gpio *gpio, u8 match)
33{ 29{
34 struct nouveau_bios *bios = nouveau_bios(gpio); 30 struct nouveau_bios *bios = nouveau_bios(gpio);
35 struct nv50_gpio_priv *priv = (void *)gpio;
36 u8 ver, len; 31 u8 ver, len;
37 u16 entry; 32 u16 entry;
38 int ent = -1; 33 int ent = -1;
@@ -55,7 +50,7 @@ nv50_gpio_reset(struct nouveau_gpio *gpio, u8 match)
55 50
56 gpio->set(gpio, 0, func, line, defs); 51 gpio->set(gpio, 0, func, line, defs);
57 52
58 nv_mask(priv, reg, 0x00010001 << lsh, val << lsh); 53 nv_mask(gpio, reg, 0x00010001 << lsh, val << lsh);
59 } 54 }
60} 55}
61 56
@@ -72,7 +67,7 @@ nv50_gpio_location(int line, u32 *reg, u32 *shift)
72 return 0; 67 return 0;
73} 68}
74 69
75static int 70int
76nv50_gpio_drive(struct nouveau_gpio *gpio, int line, int dir, int out) 71nv50_gpio_drive(struct nouveau_gpio *gpio, int line, int dir, int out)
77{ 72{
78 u32 reg, shift; 73 u32 reg, shift;
@@ -84,7 +79,7 @@ nv50_gpio_drive(struct nouveau_gpio *gpio, int line, int dir, int out)
84 return 0; 79 return 0;
85} 80}
86 81
87static int 82int
88nv50_gpio_sense(struct nouveau_gpio *gpio, int line) 83nv50_gpio_sense(struct nouveau_gpio *gpio, int line)
89{ 84{
90 u32 reg, shift; 85 u32 reg, shift;
@@ -95,119 +90,40 @@ nv50_gpio_sense(struct nouveau_gpio *gpio, int line)
95 return !!(nv_rd32(gpio, reg) & (4 << shift)); 90 return !!(nv_rd32(gpio, reg) & (4 << shift));
96} 91}
97 92
98void 93static void
99nv50_gpio_intr(struct nouveau_subdev *subdev) 94nv50_gpio_intr_stat(struct nouveau_gpio *gpio, u32 *hi, u32 *lo)
100{
101 struct nv50_gpio_priv *priv = (void *)subdev;
102 u32 intr0, intr1 = 0;
103 u32 hi, lo;
104 int i;
105
106 intr0 = nv_rd32(priv, 0xe054) & nv_rd32(priv, 0xe050);
107 if (nv_device(priv)->chipset > 0x92)
108 intr1 = nv_rd32(priv, 0xe074) & nv_rd32(priv, 0xe070);
109
110 hi = (intr0 & 0x0000ffff) | (intr1 << 16);
111 lo = (intr0 >> 16) | (intr1 & 0xffff0000);
112
113 for (i = 0; (hi | lo) && i < 32; i++) {
114 if ((hi | lo) & (1 << i))
115 nouveau_event_trigger(priv->base.events, i);
116 }
117
118 nv_wr32(priv, 0xe054, intr0);
119 if (nv_device(priv)->chipset > 0x92)
120 nv_wr32(priv, 0xe074, intr1);
121}
122
123void
124nv50_gpio_intr_enable(struct nouveau_event *event, int line)
125{
126 const u32 addr = line < 16 ? 0xe050 : 0xe070;
127 const u32 mask = 0x00010001 << (line & 0xf);
128 nv_wr32(event->priv, addr + 0x04, mask);
129 nv_mask(event->priv, addr + 0x00, mask, mask);
130}
131
132void
133nv50_gpio_intr_disable(struct nouveau_event *event, int line)
134{
135 const u32 addr = line < 16 ? 0xe050 : 0xe070;
136 const u32 mask = 0x00010001 << (line & 0xf);
137 nv_wr32(event->priv, addr + 0x04, mask);
138 nv_mask(event->priv, addr + 0x00, mask, 0x00000000);
139}
140
141static int
142nv50_gpio_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
143 struct nouveau_oclass *oclass, void *data, u32 size,
144 struct nouveau_object **pobject)
145{
146 struct nv50_gpio_priv *priv;
147 int ret;
148
149 ret = nouveau_gpio_create(parent, engine, oclass,
150 nv_device(parent)->chipset > 0x92 ? 32 : 16,
151 &priv);
152 *pobject = nv_object(priv);
153 if (ret)
154 return ret;
155
156 priv->base.reset = nv50_gpio_reset;
157 priv->base.drive = nv50_gpio_drive;
158 priv->base.sense = nv50_gpio_sense;
159 priv->base.events->priv = priv;
160 priv->base.events->enable = nv50_gpio_intr_enable;
161 priv->base.events->disable = nv50_gpio_intr_disable;
162 nv_subdev(priv)->intr = nv50_gpio_intr;
163 return 0;
164}
165
166void
167nv50_gpio_dtor(struct nouveau_object *object)
168{
169 struct nv50_gpio_priv *priv = (void *)object;
170 nouveau_gpio_destroy(&priv->base);
171}
172
173int
174nv50_gpio_init(struct nouveau_object *object)
175{ 95{
176 struct nv50_gpio_priv *priv = (void *)object; 96 u32 intr = nv_rd32(gpio, 0x00e054);
177 int ret; 97 u32 stat = nv_rd32(gpio, 0x00e050) & intr;
178 98 *lo = (stat & 0xffff0000) >> 16;
179 ret = nouveau_gpio_init(&priv->base); 99 *hi = (stat & 0x0000ffff);
180 if (ret) 100 nv_wr32(gpio, 0x00e054, intr);
181 return ret;
182
183 /* disable, and ack any pending gpio interrupts */
184 nv_wr32(priv, 0xe050, 0x00000000);
185 nv_wr32(priv, 0xe054, 0xffffffff);
186 if (nv_device(priv)->chipset > 0x92) {
187 nv_wr32(priv, 0xe070, 0x00000000);
188 nv_wr32(priv, 0xe074, 0xffffffff);
189 }
190
191 return 0;
192} 101}
193 102
194int 103static void
195nv50_gpio_fini(struct nouveau_object *object, bool suspend) 104nv50_gpio_intr_mask(struct nouveau_gpio *gpio, u32 type, u32 mask, u32 data)
196{ 105{
197 struct nv50_gpio_priv *priv = (void *)object; 106 u32 inte = nv_rd32(gpio, 0x00e050);
198 nv_wr32(priv, 0xe050, 0x00000000); 107 if (type & NVKM_GPIO_LO)
199 if (nv_device(priv)->chipset > 0x92) 108 inte = (inte & ~(mask << 16)) | (data << 16);
200 nv_wr32(priv, 0xe070, 0x00000000); 109 if (type & NVKM_GPIO_HI)
201 return nouveau_gpio_fini(&priv->base, suspend); 110 inte = (inte & ~mask) | data;
111 nv_wr32(gpio, 0x00e050, inte);
202} 112}
203 113
204struct nouveau_oclass 114struct nouveau_oclass *
205nv50_gpio_oclass = { 115nv50_gpio_oclass = &(struct nouveau_gpio_impl) {
206 .handle = NV_SUBDEV(GPIO, 0x50), 116 .base.handle = NV_SUBDEV(GPIO, 0x50),
207 .ofuncs = &(struct nouveau_ofuncs) { 117 .base.ofuncs = &(struct nouveau_ofuncs) {
208 .ctor = nv50_gpio_ctor, 118 .ctor = _nouveau_gpio_ctor,
209 .dtor = nv50_gpio_dtor, 119 .dtor = _nouveau_gpio_dtor,
210 .init = nv50_gpio_init, 120 .init = _nouveau_gpio_init,
211 .fini = nv50_gpio_fini, 121 .fini = _nouveau_gpio_fini,
212 }, 122 },
213}; 123 .lines = 16,
124 .intr_stat = nv50_gpio_intr_stat,
125 .intr_mask = nv50_gpio_intr_mask,
126 .drive = nv50_gpio_drive,
127 .sense = nv50_gpio_sense,
128 .reset = nv50_gpio_reset,
129}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/nv92.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/nv92.c
new file mode 100644
index 000000000000..252083d376f5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/nv92.c
@@ -0,0 +1,74 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "priv.h"
26
27void
28nv92_gpio_intr_stat(struct nouveau_gpio *gpio, u32 *hi, u32 *lo)
29{
30 u32 intr0 = nv_rd32(gpio, 0x00e054);
31 u32 intr1 = nv_rd32(gpio, 0x00e074);
32 u32 stat0 = nv_rd32(gpio, 0x00e050) & intr0;
33 u32 stat1 = nv_rd32(gpio, 0x00e070) & intr1;
34 *lo = (stat1 & 0xffff0000) | (stat0 >> 16);
35 *hi = (stat1 << 16) | (stat0 & 0x0000ffff);
36 nv_wr32(gpio, 0x00e054, intr0);
37 nv_wr32(gpio, 0x00e074, intr1);
38}
39
40void
41nv92_gpio_intr_mask(struct nouveau_gpio *gpio, u32 type, u32 mask, u32 data)
42{
43 u32 inte0 = nv_rd32(gpio, 0x00e050);
44 u32 inte1 = nv_rd32(gpio, 0x00e070);
45 if (type & NVKM_GPIO_LO)
46 inte0 = (inte0 & ~(mask << 16)) | (data << 16);
47 if (type & NVKM_GPIO_HI)
48 inte0 = (inte0 & ~(mask & 0xffff)) | (data & 0xffff);
49 mask >>= 16;
50 data >>= 16;
51 if (type & NVKM_GPIO_LO)
52 inte1 = (inte1 & ~(mask << 16)) | (data << 16);
53 if (type & NVKM_GPIO_HI)
54 inte1 = (inte1 & ~mask) | data;
55 nv_wr32(gpio, 0x00e050, inte0);
56 nv_wr32(gpio, 0x00e070, inte1);
57}
58
59struct nouveau_oclass *
60nv92_gpio_oclass = &(struct nouveau_gpio_impl) {
61 .base.handle = NV_SUBDEV(GPIO, 0x92),
62 .base.ofuncs = &(struct nouveau_ofuncs) {
63 .ctor = _nouveau_gpio_ctor,
64 .dtor = _nouveau_gpio_dtor,
65 .init = _nouveau_gpio_init,
66 .fini = _nouveau_gpio_fini,
67 },
68 .lines = 32,
69 .intr_stat = nv92_gpio_intr_stat,
70 .intr_mask = nv92_gpio_intr_mask,
71 .drive = nv50_gpio_drive,
72 .sense = nv50_gpio_sense,
73 .reset = nv50_gpio_reset,
74}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/nvd0.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/nvd0.c
index 010431e3acec..a4682b0956ad 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/gpio/nvd0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/nvd0.c
@@ -24,15 +24,10 @@
24 24
25#include "priv.h" 25#include "priv.h"
26 26
27struct nvd0_gpio_priv {
28 struct nouveau_gpio base;
29};
30
31void 27void
32nvd0_gpio_reset(struct nouveau_gpio *gpio, u8 match) 28nvd0_gpio_reset(struct nouveau_gpio *gpio, u8 match)
33{ 29{
34 struct nouveau_bios *bios = nouveau_bios(gpio); 30 struct nouveau_bios *bios = nouveau_bios(gpio);
35 struct nvd0_gpio_priv *priv = (void *)gpio;
36 u8 ver, len; 31 u8 ver, len;
37 u16 entry; 32 u16 entry;
38 int ent = -1; 33 int ent = -1;
@@ -51,9 +46,9 @@ nvd0_gpio_reset(struct nouveau_gpio *gpio, u8 match)
51 46
52 gpio->set(gpio, 0, func, line, defs); 47 gpio->set(gpio, 0, func, line, defs);
53 48
54 nv_mask(priv, 0x00d610 + (line * 4), 0xff, unk0); 49 nv_mask(gpio, 0x00d610 + (line * 4), 0xff, unk0);
55 if (unk1--) 50 if (unk1--)
56 nv_mask(priv, 0x00d740 + (unk1 * 4), 0xff, line); 51 nv_mask(gpio, 0x00d740 + (unk1 * 4), 0xff, line);
57 } 52 }
58} 53}
59 54
@@ -72,36 +67,19 @@ nvd0_gpio_sense(struct nouveau_gpio *gpio, int line)
72 return !!(nv_rd32(gpio, 0x00d610 + (line * 4)) & 0x00004000); 67 return !!(nv_rd32(gpio, 0x00d610 + (line * 4)) & 0x00004000);
73} 68}
74 69
75static int 70struct nouveau_oclass *
76nvd0_gpio_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 71nvd0_gpio_oclass = &(struct nouveau_gpio_impl) {
77 struct nouveau_oclass *oclass, void *data, u32 size, 72 .base.handle = NV_SUBDEV(GPIO, 0xd0),
78 struct nouveau_object **pobject) 73 .base.ofuncs = &(struct nouveau_ofuncs) {
79{ 74 .ctor = _nouveau_gpio_ctor,
80 struct nvd0_gpio_priv *priv; 75 .dtor = _nouveau_gpio_dtor,
81 int ret; 76 .init = _nouveau_gpio_init,
82 77 .fini = _nouveau_gpio_fini,
83 ret = nouveau_gpio_create(parent, engine, oclass, 32, &priv);
84 *pobject = nv_object(priv);
85 if (ret)
86 return ret;
87
88 priv->base.reset = nvd0_gpio_reset;
89 priv->base.drive = nvd0_gpio_drive;
90 priv->base.sense = nvd0_gpio_sense;
91 priv->base.events->priv = priv;
92 priv->base.events->enable = nv50_gpio_intr_enable;
93 priv->base.events->disable = nv50_gpio_intr_disable;
94 nv_subdev(priv)->intr = nv50_gpio_intr;
95 return 0;
96}
97
98struct nouveau_oclass
99nvd0_gpio_oclass = {
100 .handle = NV_SUBDEV(GPIO, 0xd0),
101 .ofuncs = &(struct nouveau_ofuncs) {
102 .ctor = nvd0_gpio_ctor,
103 .dtor = nv50_gpio_dtor,
104 .init = nv50_gpio_init,
105 .fini = nv50_gpio_fini,
106 }, 78 },
107}; 79 .lines = 32,
80 .intr_stat = nv92_gpio_intr_stat,
81 .intr_mask = nv92_gpio_intr_mask,
82 .drive = nvd0_gpio_drive,
83 .sense = nvd0_gpio_sense,
84 .reset = nvd0_gpio_reset,
85}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/nve0.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/nve0.c
index 16b8c5bf5efa..e1145b48c76c 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/gpio/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/nve0.c
@@ -24,108 +24,51 @@
24 24
25#include "priv.h" 25#include "priv.h"
26 26
27struct nve0_gpio_priv { 27static void
28 struct nouveau_gpio base; 28nve0_gpio_intr_stat(struct nouveau_gpio *gpio, u32 *hi, u32 *lo)
29};
30
31void
32nve0_gpio_intr(struct nouveau_subdev *subdev)
33{ 29{
34 struct nve0_gpio_priv *priv = (void *)subdev; 30 u32 intr0 = nv_rd32(gpio, 0x00dc00);
35 u32 intr0 = nv_rd32(priv, 0xdc00) & nv_rd32(priv, 0xdc08); 31 u32 intr1 = nv_rd32(gpio, 0x00dc80);
36 u32 intr1 = nv_rd32(priv, 0xdc80) & nv_rd32(priv, 0xdc88); 32 u32 stat0 = nv_rd32(gpio, 0x00dc08) & intr0;
37 u32 hi = (intr0 & 0x0000ffff) | (intr1 << 16); 33 u32 stat1 = nv_rd32(gpio, 0x00dc88) & intr1;
38 u32 lo = (intr0 >> 16) | (intr1 & 0xffff0000); 34 *lo = (stat1 & 0xffff0000) | (stat0 >> 16);
39 int i; 35 *hi = (stat1 << 16) | (stat0 & 0x0000ffff);
40 36 nv_wr32(gpio, 0x00dc00, intr0);
41 for (i = 0; (hi | lo) && i < 32; i++) { 37 nv_wr32(gpio, 0x00dc80, intr1);
42 if ((hi | lo) & (1 << i))
43 nouveau_event_trigger(priv->base.events, i);
44 }
45
46 nv_wr32(priv, 0xdc00, intr0);
47 nv_wr32(priv, 0xdc88, intr1);
48} 38}
49 39
50void 40void
51nve0_gpio_intr_enable(struct nouveau_event *event, int line) 41nve0_gpio_intr_mask(struct nouveau_gpio *gpio, u32 type, u32 mask, u32 data)
52{ 42{
53 const u32 addr = line < 16 ? 0xdc00 : 0xdc80; 43 u32 inte0 = nv_rd32(gpio, 0x00dc08);
54 const u32 mask = 0x00010001 << (line & 0xf); 44 u32 inte1 = nv_rd32(gpio, 0x00dc88);
55 nv_wr32(event->priv, addr + 0x08, mask); 45 if (type & NVKM_GPIO_LO)
56 nv_mask(event->priv, addr + 0x00, mask, mask); 46 inte0 = (inte0 & ~(mask << 16)) | (data << 16);
57} 47 if (type & NVKM_GPIO_HI)
58 48 inte0 = (inte0 & ~(mask & 0xffff)) | (data & 0xffff);
59void 49 mask >>= 16;
60nve0_gpio_intr_disable(struct nouveau_event *event, int line) 50 data >>= 16;
61{ 51 if (type & NVKM_GPIO_LO)
62 const u32 addr = line < 16 ? 0xdc00 : 0xdc80; 52 inte1 = (inte1 & ~(mask << 16)) | (data << 16);
63 const u32 mask = 0x00010001 << (line & 0xf); 53 if (type & NVKM_GPIO_HI)
64 nv_wr32(event->priv, addr + 0x08, mask); 54 inte1 = (inte1 & ~mask) | data;
65 nv_mask(event->priv, addr + 0x00, mask, 0x00000000); 55 nv_wr32(gpio, 0x00dc08, inte0);
66} 56 nv_wr32(gpio, 0x00dc88, inte1);
67
68int
69nve0_gpio_fini(struct nouveau_object *object, bool suspend)
70{
71 struct nve0_gpio_priv *priv = (void *)object;
72 nv_wr32(priv, 0xdc08, 0x00000000);
73 nv_wr32(priv, 0xdc88, 0x00000000);
74 return nouveau_gpio_fini(&priv->base, suspend);
75}
76
77int
78nve0_gpio_init(struct nouveau_object *object)
79{
80 struct nve0_gpio_priv *priv = (void *)object;
81 int ret;
82
83 ret = nouveau_gpio_init(&priv->base);
84 if (ret)
85 return ret;
86
87 nv_wr32(priv, 0xdc00, 0xffffffff);
88 nv_wr32(priv, 0xdc80, 0xffffffff);
89 return 0;
90}
91
92void
93nve0_gpio_dtor(struct nouveau_object *object)
94{
95 struct nve0_gpio_priv *priv = (void *)object;
96 nouveau_gpio_destroy(&priv->base);
97}
98
99static int
100nve0_gpio_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
101 struct nouveau_oclass *oclass, void *data, u32 size,
102 struct nouveau_object **pobject)
103{
104 struct nve0_gpio_priv *priv;
105 int ret;
106
107 ret = nouveau_gpio_create(parent, engine, oclass, 32, &priv);
108 *pobject = nv_object(priv);
109 if (ret)
110 return ret;
111
112 priv->base.reset = nvd0_gpio_reset;
113 priv->base.drive = nvd0_gpio_drive;
114 priv->base.sense = nvd0_gpio_sense;
115 priv->base.events->priv = priv;
116 priv->base.events->enable = nve0_gpio_intr_enable;
117 priv->base.events->disable = nve0_gpio_intr_disable;
118 nv_subdev(priv)->intr = nve0_gpio_intr;
119 return 0;
120} 57}
121 58
122struct nouveau_oclass 59struct nouveau_oclass *
123nve0_gpio_oclass = { 60nve0_gpio_oclass = &(struct nouveau_gpio_impl) {
124 .handle = NV_SUBDEV(GPIO, 0xe0), 61 .base.handle = NV_SUBDEV(GPIO, 0xe0),
125 .ofuncs = &(struct nouveau_ofuncs) { 62 .base.ofuncs = &(struct nouveau_ofuncs) {
126 .ctor = nve0_gpio_ctor, 63 .ctor = _nouveau_gpio_ctor,
127 .dtor = nv50_gpio_dtor, 64 .dtor = _nouveau_gpio_dtor,
128 .init = nve0_gpio_init, 65 .init = _nouveau_gpio_init,
129 .fini = nve0_gpio_fini, 66 .fini = _nouveau_gpio_fini,
130 }, 67 },
131}; 68 .lines = 32,
69 .intr_stat = nve0_gpio_intr_stat,
70 .intr_mask = nve0_gpio_intr_mask,
71 .drive = nvd0_gpio_drive,
72 .sense = nvd0_gpio_sense,
73 .reset = nvd0_gpio_reset,
74}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/priv.h b/drivers/gpu/drm/nouveau/core/subdev/gpio/priv.h
index 2ee1c895c782..e1724dfc86ae 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/gpio/priv.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/priv.h
@@ -3,15 +3,65 @@
3 3
4#include <subdev/gpio.h> 4#include <subdev/gpio.h>
5 5
6void nv50_gpio_dtor(struct nouveau_object *); 6#define nouveau_gpio_create(p,e,o,d) \
7int nv50_gpio_init(struct nouveau_object *); 7 nouveau_gpio_create_((p), (e), (o), sizeof(**d), (void **)d)
8int nv50_gpio_fini(struct nouveau_object *, bool); 8#define nouveau_gpio_destroy(p) ({ \
9void nv50_gpio_intr(struct nouveau_subdev *); 9 struct nouveau_gpio *gpio = (p); \
10void nv50_gpio_intr_enable(struct nouveau_event *, int line); 10 _nouveau_gpio_dtor(nv_object(gpio)); \
11void nv50_gpio_intr_disable(struct nouveau_event *, int line); 11})
12#define nouveau_gpio_init(p) ({ \
13 struct nouveau_gpio *gpio = (p); \
14 _nouveau_gpio_init(nv_object(gpio)); \
15})
16#define nouveau_gpio_fini(p,s) ({ \
17 struct nouveau_gpio *gpio = (p); \
18 _nouveau_gpio_fini(nv_object(gpio), (s)); \
19})
20
21int nouveau_gpio_create_(struct nouveau_object *, struct nouveau_object *,
22 struct nouveau_oclass *, int, void **);
23int _nouveau_gpio_ctor(struct nouveau_object *, struct nouveau_object *,
24 struct nouveau_oclass *, void *, u32,
25 struct nouveau_object **);
26void _nouveau_gpio_dtor(struct nouveau_object *);
27int _nouveau_gpio_init(struct nouveau_object *);
28int _nouveau_gpio_fini(struct nouveau_object *, bool);
29
30struct nouveau_gpio_impl {
31 struct nouveau_oclass base;
32 int lines;
33
34 /* read and ack pending interrupts, returning only data
35 * for lines that have not been masked off, while still
36 * performing the ack for anything that was pending.
37 */
38 void (*intr_stat)(struct nouveau_gpio *, u32 *, u32 *);
39
40 /* mask on/off interrupts for hi/lo transitions on a
41 * given set of gpio lines
42 */
43 void (*intr_mask)(struct nouveau_gpio *, u32, u32, u32);
44
45 /* configure gpio direction and output value */
46 int (*drive)(struct nouveau_gpio *, int line, int dir, int out);
47
48 /* sense current state of given gpio line */
49 int (*sense)(struct nouveau_gpio *, int line);
50
51 /*XXX*/
52 void (*reset)(struct nouveau_gpio *, u8);
53};
54
55void nv50_gpio_reset(struct nouveau_gpio *, u8);
56int nv50_gpio_drive(struct nouveau_gpio *, int, int, int);
57int nv50_gpio_sense(struct nouveau_gpio *, int);
58
59void nv92_gpio_intr_stat(struct nouveau_gpio *, u32 *, u32 *);
60void nv92_gpio_intr_mask(struct nouveau_gpio *, u32, u32, u32);
12 61
13void nvd0_gpio_reset(struct nouveau_gpio *, u8); 62void nvd0_gpio_reset(struct nouveau_gpio *, u8);
14int nvd0_gpio_drive(struct nouveau_gpio *, int, int, int); 63int nvd0_gpio_drive(struct nouveau_gpio *, int, int, int);
15int nvd0_gpio_sense(struct nouveau_gpio *, int); 64int nvd0_gpio_sense(struct nouveau_gpio *, int);
16 65
66
17#endif 67#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/anx9805.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/anx9805.c
index 4b195ac4da66..2c2731a6cf91 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/anx9805.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/anx9805.c
@@ -22,7 +22,7 @@
22 * Authors: Ben Skeggs <bskeggs@redhat.com> 22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */ 23 */
24 24
25#include <subdev/i2c.h> 25#include "port.h"
26 26
27struct anx9805_i2c_port { 27struct anx9805_i2c_port {
28 struct nouveau_i2c_port base; 28 struct nouveau_i2c_port base;
@@ -37,6 +37,8 @@ anx9805_train(struct nouveau_i2c_port *port, int link_nr, int link_bw, bool enh)
37 struct nouveau_i2c_port *mast = (void *)nv_object(chan)->parent; 37 struct nouveau_i2c_port *mast = (void *)nv_object(chan)->parent;
38 u8 tmp, i; 38 u8 tmp, i;
39 39
40 DBG("ANX9805 train %d 0x%02x %d\n", link_nr, link_bw, enh);
41
40 nv_wri2cr(mast, chan->addr, 0xa0, link_bw); 42 nv_wri2cr(mast, chan->addr, 0xa0, link_bw);
41 nv_wri2cr(mast, chan->addr, 0xa1, link_nr | (enh ? 0x80 : 0x00)); 43 nv_wri2cr(mast, chan->addr, 0xa1, link_nr | (enh ? 0x80 : 0x00));
42 nv_wri2cr(mast, chan->addr, 0xa2, 0x01); 44 nv_wri2cr(mast, chan->addr, 0xa2, 0x01);
@@ -60,21 +62,29 @@ anx9805_train(struct nouveau_i2c_port *port, int link_nr, int link_bw, bool enh)
60} 62}
61 63
62static int 64static int
63anx9805_aux(struct nouveau_i2c_port *port, u8 type, u32 addr, u8 *data, u8 size) 65anx9805_aux(struct nouveau_i2c_port *port, bool retry,
66 u8 type, u32 addr, u8 *data, u8 size)
64{ 67{
65 struct anx9805_i2c_port *chan = (void *)port; 68 struct anx9805_i2c_port *chan = (void *)port;
66 struct nouveau_i2c_port *mast = (void *)nv_object(chan)->parent; 69 struct nouveau_i2c_port *mast = (void *)nv_object(chan)->parent;
67 int i, ret = -ETIMEDOUT; 70 int i, ret = -ETIMEDOUT;
71 u8 buf[16] = {};
68 u8 tmp; 72 u8 tmp;
69 73
74 DBG("%02x %05x %d\n", type, addr, size);
75
70 tmp = nv_rdi2cr(mast, chan->ctrl, 0x07) & ~0x04; 76 tmp = nv_rdi2cr(mast, chan->ctrl, 0x07) & ~0x04;
71 nv_wri2cr(mast, chan->ctrl, 0x07, tmp | 0x04); 77 nv_wri2cr(mast, chan->ctrl, 0x07, tmp | 0x04);
72 nv_wri2cr(mast, chan->ctrl, 0x07, tmp); 78 nv_wri2cr(mast, chan->ctrl, 0x07, tmp);
73 nv_wri2cr(mast, chan->ctrl, 0xf7, 0x01); 79 nv_wri2cr(mast, chan->ctrl, 0xf7, 0x01);
74 80
75 nv_wri2cr(mast, chan->addr, 0xe4, 0x80); 81 nv_wri2cr(mast, chan->addr, 0xe4, 0x80);
76 for (i = 0; !(type & 1) && i < size; i++) 82 if (!(type & 1)) {
77 nv_wri2cr(mast, chan->addr, 0xf0 + i, data[i]); 83 memcpy(buf, data, size);
84 DBG("%16ph", buf);
85 for (i = 0; i < size; i++)
86 nv_wri2cr(mast, chan->addr, 0xf0 + i, buf[i]);
87 }
78 nv_wri2cr(mast, chan->addr, 0xe5, ((size - 1) << 4) | type); 88 nv_wri2cr(mast, chan->addr, 0xe5, ((size - 1) << 4) | type);
79 nv_wri2cr(mast, chan->addr, 0xe6, (addr & 0x000ff) >> 0); 89 nv_wri2cr(mast, chan->addr, 0xe6, (addr & 0x000ff) >> 0);
80 nv_wri2cr(mast, chan->addr, 0xe7, (addr & 0x0ff00) >> 8); 90 nv_wri2cr(mast, chan->addr, 0xe7, (addr & 0x0ff00) >> 8);
@@ -93,8 +103,13 @@ anx9805_aux(struct nouveau_i2c_port *port, u8 type, u32 addr, u8 *data, u8 size)
93 goto done; 103 goto done;
94 } 104 }
95 105
96 for (i = 0; (type & 1) && i < size; i++) 106 if (type & 1) {
97 data[i] = nv_rdi2cr(mast, chan->addr, 0xf0 + i); 107 for (i = 0; i < size; i++)
108 buf[i] = nv_rdi2cr(mast, chan->addr, 0xf0 + i);
109 DBG("%16ph", buf);
110 memcpy(data, buf, size);
111 }
112
98 ret = 0; 113 ret = 0;
99done: 114done:
100 nv_wri2cr(mast, chan->ctrl, 0xf7, 0x01); 115 nv_wri2cr(mast, chan->ctrl, 0xf7, 0x01);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/aux.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/aux.c
index 5de074ad170b..02eb42be2e9e 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/aux.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/aux.c
@@ -22,15 +22,19 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <subdev/i2c.h> 25#include "priv.h"
26 26
27int 27int
28nv_rdaux(struct nouveau_i2c_port *port, u32 addr, u8 *data, u8 size) 28nv_rdaux(struct nouveau_i2c_port *port, u32 addr, u8 *data, u8 size)
29{ 29{
30 struct nouveau_i2c *i2c = nouveau_i2c(port);
30 if (port->func->aux) { 31 if (port->func->aux) {
31 if (port->func->acquire) 32 int ret = i2c->acquire(port, 0);
32 port->func->acquire(port); 33 if (ret == 0) {
33 return port->func->aux(port, 9, addr, data, size); 34 ret = port->func->aux(port, true, 9, addr, data, size);
35 i2c->release(port);
36 }
37 return ret;
34 } 38 }
35 return -ENODEV; 39 return -ENODEV;
36} 40}
@@ -38,10 +42,14 @@ nv_rdaux(struct nouveau_i2c_port *port, u32 addr, u8 *data, u8 size)
38int 42int
39nv_wraux(struct nouveau_i2c_port *port, u32 addr, u8 *data, u8 size) 43nv_wraux(struct nouveau_i2c_port *port, u32 addr, u8 *data, u8 size)
40{ 44{
45 struct nouveau_i2c *i2c = nouveau_i2c(port);
41 if (port->func->aux) { 46 if (port->func->aux) {
42 if (port->func->acquire) 47 int ret = i2c->acquire(port, 0);
43 port->func->acquire(port); 48 if (ret == 0) {
44 return port->func->aux(port, 8, addr, data, size); 49 ret = port->func->aux(port, true, 8, addr, data, size);
50 i2c->release(port);
51 }
52 return ret;
45 } 53 }
46 return -ENODEV; 54 return -ENODEV;
47} 55}
@@ -50,13 +58,16 @@ static int
50aux_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) 58aux_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
51{ 59{
52 struct nouveau_i2c_port *port = adap->algo_data; 60 struct nouveau_i2c_port *port = adap->algo_data;
61 struct nouveau_i2c *i2c = nouveau_i2c(port);
53 struct i2c_msg *msg = msgs; 62 struct i2c_msg *msg = msgs;
54 int ret, mcnt = num; 63 int ret, mcnt = num;
55 64
56 if (!port->func->aux) 65 if (!port->func->aux)
57 return -ENODEV; 66 return -ENODEV;
58 if ( port->func->acquire) 67
59 port->func->acquire(port); 68 ret = i2c->acquire(port, 0);
69 if (ret)
70 return ret;
60 71
61 while (mcnt--) { 72 while (mcnt--) {
62 u8 remaining = msg->len; 73 u8 remaining = msg->len;
@@ -74,9 +85,11 @@ aux_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
74 if (mcnt || remaining > 16) 85 if (mcnt || remaining > 16)
75 cmd |= 4; /* MOT */ 86 cmd |= 4; /* MOT */
76 87
77 ret = port->func->aux(port, cmd, msg->addr, ptr, cnt); 88 ret = port->func->aux(port, true, cmd, msg->addr, ptr, cnt);
78 if (ret < 0) 89 if (ret < 0) {
90 i2c->release(port);
79 return ret; 91 return ret;
92 }
80 93
81 ptr += cnt; 94 ptr += cnt;
82 remaining -= cnt; 95 remaining -= cnt;
@@ -85,6 +98,7 @@ aux_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
85 msg++; 98 msg++;
86 } 99 }
87 100
101 i2c->release(port);
88 return num; 102 return num;
89} 103}
90 104
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
index 378e05b88e6f..09ba2cc851cf 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
@@ -23,13 +23,16 @@
23 */ 23 */
24 24
25#include <core/option.h> 25#include <core/option.h>
26#include <core/event.h>
26 27
27#include <subdev/bios.h> 28#include <subdev/bios.h>
28#include <subdev/bios/dcb.h> 29#include <subdev/bios/dcb.h>
29#include <subdev/bios/i2c.h> 30#include <subdev/bios/i2c.h>
30#include <subdev/i2c.h>
31#include <subdev/vga.h> 31#include <subdev/vga.h>
32 32
33#include "priv.h"
34#include "pad.h"
35
33/****************************************************************************** 36/******************************************************************************
34 * interface to linux i2c bit-banging algorithm 37 * interface to linux i2c bit-banging algorithm
35 *****************************************************************************/ 38 *****************************************************************************/
@@ -45,9 +48,15 @@ nouveau_i2c_pre_xfer(struct i2c_adapter *adap)
45{ 48{
46 struct i2c_algo_bit_data *bit = adap->algo_data; 49 struct i2c_algo_bit_data *bit = adap->algo_data;
47 struct nouveau_i2c_port *port = bit->data; 50 struct nouveau_i2c_port *port = bit->data;
48 if (port->func->acquire) 51 return nouveau_i2c(port)->acquire(port, bit->timeout);
49 port->func->acquire(port); 52}
50 return 0; 53
54static void
55nouveau_i2c_post_xfer(struct i2c_adapter *adap)
56{
57 struct i2c_algo_bit_data *bit = adap->algo_data;
58 struct nouveau_i2c_port *port = bit->data;
59 return nouveau_i2c(port)->release(port);
51} 60}
52 61
53static void 62static void
@@ -82,6 +91,15 @@ nouveau_i2c_getsda(void *data)
82 * base i2c "port" class implementation 91 * base i2c "port" class implementation
83 *****************************************************************************/ 92 *****************************************************************************/
84 93
94int
95_nouveau_i2c_port_fini(struct nouveau_object *object, bool suspend)
96{
97 struct nouveau_i2c_port *port = (void *)object;
98 struct nvkm_i2c_pad *pad = nvkm_i2c_pad(port);
99 nv_ofuncs(pad)->fini(nv_object(pad), suspend);
100 return nouveau_object_fini(&port->base, suspend);
101}
102
85void 103void
86_nouveau_i2c_port_dtor(struct nouveau_object *object) 104_nouveau_i2c_port_dtor(struct nouveau_object *object)
87{ 105{
@@ -98,7 +116,7 @@ nouveau_i2c_port_create_(struct nouveau_object *parent,
98 const struct nouveau_i2c_func *func, 116 const struct nouveau_i2c_func *func,
99 int size, void **pobject) 117 int size, void **pobject)
100{ 118{
101 struct nouveau_device *device = nv_device(parent); 119 struct nouveau_device *device = nv_device(engine);
102 struct nouveau_i2c *i2c = (void *)engine; 120 struct nouveau_i2c *i2c = (void *)engine;
103 struct nouveau_i2c_port *port; 121 struct nouveau_i2c_port *port;
104 int ret; 122 int ret;
@@ -113,8 +131,9 @@ nouveau_i2c_port_create_(struct nouveau_object *parent,
113 port->adapter.owner = THIS_MODULE; 131 port->adapter.owner = THIS_MODULE;
114 port->adapter.dev.parent = nv_device_base(device); 132 port->adapter.dev.parent = nv_device_base(device);
115 port->index = index; 133 port->index = index;
134 port->aux = -1;
116 port->func = func; 135 port->func = func;
117 i2c_set_adapdata(&port->adapter, i2c); 136 mutex_init(&port->mutex);
118 137
119 if ( algo == &nouveau_i2c_bit_algo && 138 if ( algo == &nouveau_i2c_bit_algo &&
120 !nouveau_boolopt(device->cfgopt, "NvI2C", CSTMSEL)) { 139 !nouveau_boolopt(device->cfgopt, "NvI2C", CSTMSEL)) {
@@ -128,6 +147,7 @@ nouveau_i2c_port_create_(struct nouveau_object *parent,
128 bit->timeout = usecs_to_jiffies(2200); 147 bit->timeout = usecs_to_jiffies(2200);
129 bit->data = port; 148 bit->data = port;
130 bit->pre_xfer = nouveau_i2c_pre_xfer; 149 bit->pre_xfer = nouveau_i2c_pre_xfer;
150 bit->post_xfer = nouveau_i2c_post_xfer;
131 bit->setsda = nouveau_i2c_setsda; 151 bit->setsda = nouveau_i2c_setsda;
132 bit->setscl = nouveau_i2c_setscl; 152 bit->setscl = nouveau_i2c_setscl;
133 bit->getsda = nouveau_i2c_getsda; 153 bit->getsda = nouveau_i2c_getsda;
@@ -141,7 +161,6 @@ nouveau_i2c_port_create_(struct nouveau_object *parent,
141 ret = i2c_add_adapter(&port->adapter); 161 ret = i2c_add_adapter(&port->adapter);
142 } 162 }
143 163
144 /* drop port's i2c subdev refcount, i2c handles this itself */
145 if (ret == 0) 164 if (ret == 0)
146 list_add_tail(&port->head, &i2c->ports); 165 list_add_tail(&port->head, &i2c->ports);
147 return ret; 166 return ret;
@@ -193,6 +212,75 @@ nouveau_i2c_find_type(struct nouveau_i2c *i2c, u16 type)
193 return NULL; 212 return NULL;
194} 213}
195 214
215static void
216nouveau_i2c_release_pad(struct nouveau_i2c_port *port)
217{
218 struct nvkm_i2c_pad *pad = nvkm_i2c_pad(port);
219 struct nouveau_i2c *i2c = nouveau_i2c(port);
220
221 if (atomic_dec_and_test(&nv_object(pad)->usecount)) {
222 nv_ofuncs(pad)->fini(nv_object(pad), false);
223 wake_up_all(&i2c->wait);
224 }
225}
226
227static int
228nouveau_i2c_try_acquire_pad(struct nouveau_i2c_port *port)
229{
230 struct nvkm_i2c_pad *pad = nvkm_i2c_pad(port);
231
232 if (atomic_add_return(1, &nv_object(pad)->usecount) != 1) {
233 struct nouveau_object *owner = (void *)pad->port;
234 do {
235 if (owner == (void *)port)
236 return 0;
237 owner = owner->parent;
238 } while(owner);
239 nouveau_i2c_release_pad(port);
240 return -EBUSY;
241 }
242
243 pad->next = port;
244 nv_ofuncs(pad)->init(nv_object(pad));
245 return 0;
246}
247
248static int
249nouveau_i2c_acquire_pad(struct nouveau_i2c_port *port, unsigned long timeout)
250{
251 struct nouveau_i2c *i2c = nouveau_i2c(port);
252
253 if (timeout) {
254 if (wait_event_timeout(i2c->wait,
255 nouveau_i2c_try_acquire_pad(port) == 0,
256 timeout) == 0)
257 return -EBUSY;
258 } else {
259 wait_event(i2c->wait, nouveau_i2c_try_acquire_pad(port) == 0);
260 }
261
262 return 0;
263}
264
265static void
266nouveau_i2c_release(struct nouveau_i2c_port *port)
267__releases(pad->mutex)
268{
269 nouveau_i2c(port)->release_pad(port);
270 mutex_unlock(&port->mutex);
271}
272
273static int
274nouveau_i2c_acquire(struct nouveau_i2c_port *port, unsigned long timeout)
275__acquires(pad->mutex)
276{
277 int ret;
278 mutex_lock(&port->mutex);
279 if ((ret = nouveau_i2c(port)->acquire_pad(port, timeout)))
280 mutex_unlock(&port->mutex);
281 return ret;
282}
283
196static int 284static int
197nouveau_i2c_identify(struct nouveau_i2c *i2c, int index, const char *what, 285nouveau_i2c_identify(struct nouveau_i2c *i2c, int index, const char *what,
198 struct nouveau_i2c_board_info *info, 286 struct nouveau_i2c_board_info *info,
@@ -237,11 +325,59 @@ nouveau_i2c_identify(struct nouveau_i2c *i2c, int index, const char *what,
237 return -ENODEV; 325 return -ENODEV;
238} 326}
239 327
328static void
329nouveau_i2c_intr_disable(struct nouveau_event *event, int type, int index)
330{
331 struct nouveau_i2c *i2c = nouveau_i2c(event->priv);
332 struct nouveau_i2c_port *port = i2c->find(i2c, index);
333 const struct nouveau_i2c_impl *impl = (void *)nv_object(i2c)->oclass;
334 if (port && port->aux >= 0)
335 impl->aux_mask(i2c, type, 1 << port->aux, 0);
336}
337
338static void
339nouveau_i2c_intr_enable(struct nouveau_event *event, int type, int index)
340{
341 struct nouveau_i2c *i2c = nouveau_i2c(event->priv);
342 struct nouveau_i2c_port *port = i2c->find(i2c, index);
343 const struct nouveau_i2c_impl *impl = (void *)nv_object(i2c)->oclass;
344 if (port && port->aux >= 0)
345 impl->aux_mask(i2c, type, 1 << port->aux, 1 << port->aux);
346}
347
348static void
349nouveau_i2c_intr(struct nouveau_subdev *subdev)
350{
351 struct nouveau_i2c_impl *impl = (void *)nv_oclass(subdev);
352 struct nouveau_i2c *i2c = nouveau_i2c(subdev);
353 struct nouveau_i2c_port *port;
354 u32 hi, lo, rq, tx, e;
355
356 if (impl->aux_stat) {
357 impl->aux_stat(i2c, &hi, &lo, &rq, &tx);
358 if (hi || lo || rq || tx) {
359 list_for_each_entry(port, &i2c->ports, head) {
360 if (e = 0, port->aux < 0)
361 continue;
362
363 if (hi & (1 << port->aux)) e |= NVKM_I2C_PLUG;
364 if (lo & (1 << port->aux)) e |= NVKM_I2C_UNPLUG;
365 if (rq & (1 << port->aux)) e |= NVKM_I2C_IRQ;
366 if (tx & (1 << port->aux)) e |= NVKM_I2C_DONE;
367
368 nouveau_event_trigger(i2c->ntfy, e, port->index);
369 }
370 }
371 }
372}
373
240int 374int
241_nouveau_i2c_fini(struct nouveau_object *object, bool suspend) 375_nouveau_i2c_fini(struct nouveau_object *object, bool suspend)
242{ 376{
377 struct nouveau_i2c_impl *impl = (void *)nv_oclass(object);
243 struct nouveau_i2c *i2c = (void *)object; 378 struct nouveau_i2c *i2c = (void *)object;
244 struct nouveau_i2c_port *port; 379 struct nouveau_i2c_port *port;
380 u32 mask;
245 int ret; 381 int ret;
246 382
247 list_for_each_entry(port, &i2c->ports, head) { 383 list_for_each_entry(port, &i2c->ports, head) {
@@ -250,6 +386,11 @@ _nouveau_i2c_fini(struct nouveau_object *object, bool suspend)
250 goto fail; 386 goto fail;
251 } 387 }
252 388
389 if ((mask = (1 << impl->aux) - 1), impl->aux_stat) {
390 impl->aux_mask(i2c, NVKM_I2C_ANY, mask, 0);
391 impl->aux_stat(i2c, &mask, &mask, &mask, &mask);
392 }
393
253 return nouveau_subdev_fini(&i2c->base, suspend); 394 return nouveau_subdev_fini(&i2c->base, suspend);
254fail: 395fail:
255 list_for_each_entry_continue_reverse(port, &i2c->ports, head) { 396 list_for_each_entry_continue_reverse(port, &i2c->ports, head) {
@@ -290,6 +431,8 @@ _nouveau_i2c_dtor(struct nouveau_object *object)
290 struct nouveau_i2c *i2c = (void *)object; 431 struct nouveau_i2c *i2c = (void *)object;
291 struct nouveau_i2c_port *port, *temp; 432 struct nouveau_i2c_port *port, *temp;
292 433
434 nouveau_event_destroy(&i2c->ntfy);
435
293 list_for_each_entry_safe(port, temp, &i2c->ports, head) { 436 list_for_each_entry_safe(port, temp, &i2c->ports, head) {
294 nouveau_object_ref(NULL, (struct nouveau_object **)&port); 437 nouveau_object_ref(NULL, (struct nouveau_object **)&port);
295 } 438 }
@@ -306,14 +449,14 @@ int
306nouveau_i2c_create_(struct nouveau_object *parent, 449nouveau_i2c_create_(struct nouveau_object *parent,
307 struct nouveau_object *engine, 450 struct nouveau_object *engine,
308 struct nouveau_oclass *oclass, 451 struct nouveau_oclass *oclass,
309 struct nouveau_oclass *sclass,
310 int length, void **pobject) 452 int length, void **pobject)
311{ 453{
454 const struct nouveau_i2c_impl *impl = (void *)oclass;
312 struct nouveau_bios *bios = nouveau_bios(parent); 455 struct nouveau_bios *bios = nouveau_bios(parent);
313 struct nouveau_i2c *i2c; 456 struct nouveau_i2c *i2c;
314 struct nouveau_object *object; 457 struct nouveau_object *object;
315 struct dcb_i2c_entry info; 458 struct dcb_i2c_entry info;
316 int ret, i, j, index = -1; 459 int ret, i, j, index = -1, pad;
317 struct dcb_output outp; 460 struct dcb_output outp;
318 u8 ver, hdr; 461 u8 ver, hdr;
319 u32 data; 462 u32 data;
@@ -324,24 +467,48 @@ nouveau_i2c_create_(struct nouveau_object *parent,
324 if (ret) 467 if (ret)
325 return ret; 468 return ret;
326 469
470 nv_subdev(i2c)->intr = nouveau_i2c_intr;
327 i2c->find = nouveau_i2c_find; 471 i2c->find = nouveau_i2c_find;
328 i2c->find_type = nouveau_i2c_find_type; 472 i2c->find_type = nouveau_i2c_find_type;
473 i2c->acquire_pad = nouveau_i2c_acquire_pad;
474 i2c->release_pad = nouveau_i2c_release_pad;
475 i2c->acquire = nouveau_i2c_acquire;
476 i2c->release = nouveau_i2c_release;
329 i2c->identify = nouveau_i2c_identify; 477 i2c->identify = nouveau_i2c_identify;
478 init_waitqueue_head(&i2c->wait);
330 INIT_LIST_HEAD(&i2c->ports); 479 INIT_LIST_HEAD(&i2c->ports);
331 480
332 while (!dcb_i2c_parse(bios, ++index, &info)) { 481 while (!dcb_i2c_parse(bios, ++index, &info)) {
333 if (info.type == DCB_I2C_UNUSED) 482 if (info.type == DCB_I2C_UNUSED)
334 continue; 483 continue;
335 484
336 oclass = sclass; 485 if (info.share != DCB_I2C_UNUSED) {
486 if (info.type == DCB_I2C_NVIO_AUX)
487 pad = info.drive;
488 else
489 pad = info.share;
490 oclass = impl->pad_s;
491 } else {
492 pad = 0x100 + info.drive;
493 oclass = impl->pad_x;
494 }
495
496 ret = nouveau_object_ctor(NULL, *pobject, oclass,
497 NULL, pad, &parent);
498 if (ret < 0)
499 continue;
500
501 oclass = impl->sclass;
337 do { 502 do {
338 ret = -EINVAL; 503 ret = -EINVAL;
339 if (oclass->handle == info.type) { 504 if (oclass->handle == info.type) {
340 ret = nouveau_object_ctor(*pobject, *pobject, 505 ret = nouveau_object_ctor(parent, *pobject,
341 oclass, &info, 506 oclass, &info,
342 index, &object); 507 index, &object);
343 } 508 }
344 } while (ret && (++oclass)->handle); 509 } while (ret && (++oclass)->handle);
510
511 nouveau_object_ref(NULL, &parent);
345 } 512 }
346 513
347 /* in addition to the busses specified in the i2c table, there 514 /* in addition to the busses specified in the i2c table, there
@@ -380,5 +547,28 @@ nouveau_i2c_create_(struct nouveau_object *parent,
380 } 547 }
381 } 548 }
382 549
550 ret = nouveau_event_create(4, index, &i2c->ntfy);
551 if (ret)
552 return ret;
553
554 i2c->ntfy->priv = i2c;
555 i2c->ntfy->enable = nouveau_i2c_intr_enable;
556 i2c->ntfy->disable = nouveau_i2c_intr_disable;
557 return 0;
558}
559
560int
561_nouveau_i2c_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
562 struct nouveau_oclass *oclass, void *data, u32 size,
563 struct nouveau_object **pobject)
564{
565 struct nouveau_i2c *i2c;
566 int ret;
567
568 ret = nouveau_i2c_create(parent, engine, oclass, &i2c);
569 *pobject = nv_object(i2c);
570 if (ret)
571 return ret;
572
383 return 0; 573 return 0;
384} 574}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/bit.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/bit.c
index a6e72d3b06b5..813ffc96e864 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/bit.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/bit.c
@@ -22,7 +22,7 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include "subdev/i2c.h" 25#include "priv.h"
26 26
27#ifdef CONFIG_NOUVEAU_I2C_INTERNAL 27#ifdef CONFIG_NOUVEAU_I2C_INTERNAL
28#define T_TIMEOUT 2200000 28#define T_TIMEOUT 2200000
@@ -187,8 +187,9 @@ i2c_bit_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
187 struct i2c_msg *msg = msgs; 187 struct i2c_msg *msg = msgs;
188 int ret = 0, mcnt = num; 188 int ret = 0, mcnt = num;
189 189
190 if (port->func->acquire) 190 ret = nouveau_i2c(port)->acquire(port, nsecs_to_jiffies(T_TIMEOUT));
191 port->func->acquire(port); 191 if (ret)
192 return ret;
192 193
193 while (!ret && mcnt--) { 194 while (!ret && mcnt--) {
194 u8 remaining = msg->len; 195 u8 remaining = msg->len;
@@ -210,6 +211,7 @@ i2c_bit_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
210 } 211 }
211 212
212 i2c_stop(port); 213 i2c_stop(port);
214 nouveau_i2c(port)->release(port);
213 return (ret < 0) ? ret : num; 215 return (ret < 0) ? ret : num;
214} 216}
215#else 217#else
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv04.c
index 860d5d2365da..b1725bdea967 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv04.c
@@ -22,9 +22,10 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <subdev/i2c.h>
26#include <subdev/vga.h> 25#include <subdev/vga.h>
27 26
27#include "priv.h"
28
28struct nv04_i2c_priv { 29struct nv04_i2c_priv {
29 struct nouveau_i2c base; 30 struct nouveau_i2c base;
30}; 31};
@@ -115,29 +116,15 @@ nv04_i2c_sclass[] = {
115 {} 116 {}
116}; 117};
117 118
118static int 119struct nouveau_oclass *
119nv04_i2c_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 120nv04_i2c_oclass = &(struct nouveau_i2c_impl) {
120 struct nouveau_oclass *oclass, void *data, u32 size, 121 .base.handle = NV_SUBDEV(I2C, 0x04),
121 struct nouveau_object **pobject) 122 .base.ofuncs = &(struct nouveau_ofuncs) {
122{ 123 .ctor = _nouveau_i2c_ctor,
123 struct nv04_i2c_priv *priv;
124 int ret;
125
126 ret = nouveau_i2c_create(parent, engine, oclass, nv04_i2c_sclass, &priv);
127 *pobject = nv_object(priv);
128 if (ret)
129 return ret;
130
131 return 0;
132}
133
134struct nouveau_oclass
135nv04_i2c_oclass = {
136 .handle = NV_SUBDEV(I2C, 0x04),
137 .ofuncs = &(struct nouveau_ofuncs) {
138 .ctor = nv04_i2c_ctor,
139 .dtor = _nouveau_i2c_dtor, 124 .dtor = _nouveau_i2c_dtor,
140 .init = _nouveau_i2c_init, 125 .init = _nouveau_i2c_init,
141 .fini = _nouveau_i2c_fini, 126 .fini = _nouveau_i2c_fini,
142 }, 127 },
143}; 128 .sclass = nv04_i2c_sclass,
129 .pad_x = &nv04_i2c_pad_oclass,
130}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/nv4e.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv4e.c
index 0c2655a03bb4..f16c87ce5ba1 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/nv4e.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv4e.c
@@ -22,9 +22,10 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <subdev/i2c.h>
26#include <subdev/vga.h> 25#include <subdev/vga.h>
27 26
27#include "priv.h"
28
28struct nv4e_i2c_priv { 29struct nv4e_i2c_priv {
29 struct nouveau_i2c base; 30 struct nouveau_i2c base;
30}; 31};
@@ -107,29 +108,15 @@ nv4e_i2c_sclass[] = {
107 {} 108 {}
108}; 109};
109 110
110static int 111struct nouveau_oclass *
111nv4e_i2c_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 112nv4e_i2c_oclass = &(struct nouveau_i2c_impl) {
112 struct nouveau_oclass *oclass, void *data, u32 size, 113 .base.handle = NV_SUBDEV(I2C, 0x4e),
113 struct nouveau_object **pobject) 114 .base.ofuncs = &(struct nouveau_ofuncs) {
114{ 115 .ctor = _nouveau_i2c_ctor,
115 struct nv4e_i2c_priv *priv;
116 int ret;
117
118 ret = nouveau_i2c_create(parent, engine, oclass, nv4e_i2c_sclass, &priv);
119 *pobject = nv_object(priv);
120 if (ret)
121 return ret;
122
123 return 0;
124}
125
126struct nouveau_oclass
127nv4e_i2c_oclass = {
128 .handle = NV_SUBDEV(I2C, 0x4e),
129 .ofuncs = &(struct nouveau_ofuncs) {
130 .ctor = nv4e_i2c_ctor,
131 .dtor = _nouveau_i2c_dtor, 116 .dtor = _nouveau_i2c_dtor,
132 .init = _nouveau_i2c_init, 117 .init = _nouveau_i2c_init,
133 .fini = _nouveau_i2c_fini, 118 .fini = _nouveau_i2c_fini,
134 }, 119 },
135}; 120 .sclass = nv4e_i2c_sclass,
121 .pad_x = &nv04_i2c_pad_oclass,
122}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.c
index a8d67a287704..7b8756d4df08 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.c
@@ -121,29 +121,15 @@ nv50_i2c_sclass[] = {
121 {} 121 {}
122}; 122};
123 123
124static int 124struct nouveau_oclass *
125nv50_i2c_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 125nv50_i2c_oclass = &(struct nouveau_i2c_impl) {
126 struct nouveau_oclass *oclass, void *data, u32 size, 126 .base.handle = NV_SUBDEV(I2C, 0x50),
127 struct nouveau_object **pobject) 127 .base.ofuncs = &(struct nouveau_ofuncs) {
128{ 128 .ctor = _nouveau_i2c_ctor,
129 struct nv50_i2c_priv *priv;
130 int ret;
131
132 ret = nouveau_i2c_create(parent, engine, oclass, nv50_i2c_sclass, &priv);
133 *pobject = nv_object(priv);
134 if (ret)
135 return ret;
136
137 return 0;
138}
139
140struct nouveau_oclass
141nv50_i2c_oclass = {
142 .handle = NV_SUBDEV(I2C, 0x50),
143 .ofuncs = &(struct nouveau_ofuncs) {
144 .ctor = nv50_i2c_ctor,
145 .dtor = _nouveau_i2c_dtor, 129 .dtor = _nouveau_i2c_dtor,
146 .init = _nouveau_i2c_init, 130 .init = _nouveau_i2c_init,
147 .fini = _nouveau_i2c_fini, 131 .fini = _nouveau_i2c_fini,
148 }, 132 },
149}; 133 .sclass = nv50_i2c_sclass,
134 .pad_x = &nv04_i2c_pad_oclass,
135}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.h b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.h
index 4e5ba48ebf5a..5d2a77421c74 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.h
@@ -1,7 +1,7 @@
1#ifndef __NV50_I2C_H__ 1#ifndef __NV50_I2C_H__
2#define __NV50_I2C_H__ 2#define __NV50_I2C_H__
3 3
4#include <subdev/i2c.h> 4#include "priv.h"
5 5
6struct nv50_i2c_priv { 6struct nv50_i2c_priv {
7 struct nouveau_i2c base; 7 struct nouveau_i2c base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/nv94.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv94.c
index df6d3e4b68be..f59c3a255462 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/nv94.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv94.c
@@ -24,6 +24,36 @@
24 24
25#include "nv50.h" 25#include "nv50.h"
26 26
27void
28nv94_aux_stat(struct nouveau_i2c *i2c, u32 *hi, u32 *lo, u32 *rq, u32 *tx)
29{
30 u32 intr = nv_rd32(i2c, 0x00e06c);
31 u32 stat = nv_rd32(i2c, 0x00e068) & intr, i;
32 for (i = 0, *hi = *lo = *rq = *tx = 0; i < 8; i++) {
33 if ((stat & (1 << (i * 4)))) *hi |= 1 << i;
34 if ((stat & (2 << (i * 4)))) *lo |= 1 << i;
35 if ((stat & (4 << (i * 4)))) *rq |= 1 << i;
36 if ((stat & (8 << (i * 4)))) *tx |= 1 << i;
37 }
38 nv_wr32(i2c, 0x00e06c, intr);
39}
40
41void
42nv94_aux_mask(struct nouveau_i2c *i2c, u32 type, u32 mask, u32 data)
43{
44 u32 temp = nv_rd32(i2c, 0x00e068), i;
45 for (i = 0; i < 8; i++) {
46 if (mask & (1 << i)) {
47 if (!(data & (1 << i))) {
48 temp &= ~(type << (i * 4));
49 continue;
50 }
51 temp |= type << (i * 4);
52 }
53 }
54 nv_wr32(i2c, 0x00e068, temp);
55}
56
27#define AUX_DBG(fmt, args...) nv_debug(aux, "AUXCH(%d): " fmt, ch, ##args) 57#define AUX_DBG(fmt, args...) nv_debug(aux, "AUXCH(%d): " fmt, ch, ##args)
28#define AUX_ERR(fmt, args...) nv_error(aux, "AUXCH(%d): " fmt, ch, ##args) 58#define AUX_ERR(fmt, args...) nv_error(aux, "AUXCH(%d): " fmt, ch, ##args)
29 59
@@ -69,7 +99,8 @@ auxch_init(struct nouveau_i2c *aux, int ch)
69} 99}
70 100
71int 101int
72nv94_aux(struct nouveau_i2c_port *base, u8 type, u32 addr, u8 *data, u8 size) 102nv94_aux(struct nouveau_i2c_port *base, bool retry,
103 u8 type, u32 addr, u8 *data, u8 size)
73{ 104{
74 struct nouveau_i2c *aux = nouveau_i2c(base); 105 struct nouveau_i2c *aux = nouveau_i2c(base);
75 struct nv50_i2c_port *port = (void *)base; 106 struct nv50_i2c_port *port = (void *)base;
@@ -105,9 +136,8 @@ nv94_aux(struct nouveau_i2c_port *base, u8 type, u32 addr, u8 *data, u8 size)
105 ctrl |= size - 1; 136 ctrl |= size - 1;
106 nv_wr32(aux, 0x00e4e0 + (ch * 0x50), addr); 137 nv_wr32(aux, 0x00e4e0 + (ch * 0x50), addr);
107 138
108 /* retry transaction a number of times on failure... */ 139 /* (maybe) retry transaction a number of times on failure... */
109 ret = -EREMOTEIO; 140 for (retries = 0; !ret && retries < 32; retries++) {
110 for (retries = 0; retries < 32; retries++) {
111 /* reset, and delay a while if this is a retry */ 141 /* reset, and delay a while if this is a retry */
112 nv_wr32(aux, 0x00e4e4 + (ch * 0x50), 0x80000000 | ctrl); 142 nv_wr32(aux, 0x00e4e4 + (ch * 0x50), 0x80000000 | ctrl);
113 nv_wr32(aux, 0x00e4e4 + (ch * 0x50), 0x00000000 | ctrl); 143 nv_wr32(aux, 0x00e4e4 + (ch * 0x50), 0x00000000 | ctrl);
@@ -123,16 +153,21 @@ nv94_aux(struct nouveau_i2c_port *base, u8 type, u32 addr, u8 *data, u8 size)
123 udelay(1); 153 udelay(1);
124 if (!timeout--) { 154 if (!timeout--) {
125 AUX_ERR("tx req timeout 0x%08x\n", ctrl); 155 AUX_ERR("tx req timeout 0x%08x\n", ctrl);
156 ret = -EIO;
126 goto out; 157 goto out;
127 } 158 }
128 } while (ctrl & 0x00010000); 159 } while (ctrl & 0x00010000);
160 ret = 1;
129 161
130 /* read status, and check if transaction completed ok */ 162 /* read status, and check if transaction completed ok */
131 stat = nv_mask(aux, 0x00e4e8 + (ch * 0x50), 0, 0); 163 stat = nv_mask(aux, 0x00e4e8 + (ch * 0x50), 0, 0);
132 if (!(stat & 0x000f0f00)) { 164 if ((stat & 0x000f0000) == 0x00080000 ||
133 ret = 0; 165 (stat & 0x000f0000) == 0x00020000)
134 break; 166 ret = retry ? 0 : 1;
135 } 167 if ((stat & 0x00000100))
168 ret = -ETIMEDOUT;
169 if ((stat & 0x00000e00))
170 ret = -EIO;
136 171
137 AUX_DBG("%02d 0x%08x 0x%08x\n", retries, ctrl, stat); 172 AUX_DBG("%02d 0x%08x 0x%08x\n", retries, ctrl, stat);
138 } 173 }
@@ -147,29 +182,11 @@ nv94_aux(struct nouveau_i2c_port *base, u8 type, u32 addr, u8 *data, u8 size)
147 182
148out: 183out:
149 auxch_fini(aux, ch); 184 auxch_fini(aux, ch);
150 return ret; 185 return ret < 0 ? ret : (stat & 0x000f0000) >> 16;
151}
152
153void
154nv94_i2c_acquire(struct nouveau_i2c_port *base)
155{
156 struct nv50_i2c_priv *priv = (void *)nv_object(base)->engine;
157 struct nv50_i2c_port *port = (void *)base;
158 if (port->ctrl) {
159 nv_mask(priv, port->ctrl + 0x0c, 0x00000001, 0x00000000);
160 nv_mask(priv, port->ctrl + 0x00, 0x0000f003, port->data);
161 }
162}
163
164void
165nv94_i2c_release(struct nouveau_i2c_port *base)
166{
167} 186}
168 187
169static const struct nouveau_i2c_func 188static const struct nouveau_i2c_func
170nv94_i2c_func = { 189nv94_i2c_func = {
171 .acquire = nv94_i2c_acquire,
172 .release = nv94_i2c_release,
173 .drive_scl = nv50_i2c_drive_scl, 190 .drive_scl = nv50_i2c_drive_scl,
174 .drive_sda = nv50_i2c_drive_sda, 191 .drive_sda = nv50_i2c_drive_sda,
175 .sense_scl = nv50_i2c_sense_scl, 192 .sense_scl = nv50_i2c_sense_scl,
@@ -206,8 +223,6 @@ nv94_i2c_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
206 223
207static const struct nouveau_i2c_func 224static const struct nouveau_i2c_func
208nv94_aux_func = { 225nv94_aux_func = {
209 .acquire = nv94_i2c_acquire,
210 .release = nv94_i2c_release,
211 .aux = nv94_aux, 226 .aux = nv94_aux,
212}; 227};
213 228
@@ -227,6 +242,7 @@ nv94_aux_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
227 if (ret) 242 if (ret)
228 return ret; 243 return ret;
229 244
245 port->base.aux = info->drive;
230 port->addr = info->drive; 246 port->addr = info->drive;
231 if (info->share != DCB_I2C_UNUSED) { 247 if (info->share != DCB_I2C_UNUSED) {
232 port->ctrl = 0x00e500 + (info->drive * 0x50); 248 port->ctrl = 0x00e500 + (info->drive * 0x50);
@@ -257,29 +273,19 @@ nv94_i2c_sclass[] = {
257 {} 273 {}
258}; 274};
259 275
260static int 276struct nouveau_oclass *
261nv94_i2c_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 277nv94_i2c_oclass = &(struct nouveau_i2c_impl) {
262 struct nouveau_oclass *oclass, void *data, u32 size, 278 .base.handle = NV_SUBDEV(I2C, 0x94),
263 struct nouveau_object **pobject) 279 .base.ofuncs = &(struct nouveau_ofuncs) {
264{ 280 .ctor = _nouveau_i2c_ctor,
265 struct nv50_i2c_priv *priv;
266 int ret;
267
268 ret = nouveau_i2c_create(parent, engine, oclass, nv94_i2c_sclass, &priv);
269 *pobject = nv_object(priv);
270 if (ret)
271 return ret;
272
273 return 0;
274}
275
276struct nouveau_oclass
277nv94_i2c_oclass = {
278 .handle = NV_SUBDEV(I2C, 0x94),
279 .ofuncs = &(struct nouveau_ofuncs) {
280 .ctor = nv94_i2c_ctor,
281 .dtor = _nouveau_i2c_dtor, 281 .dtor = _nouveau_i2c_dtor,
282 .init = _nouveau_i2c_init, 282 .init = _nouveau_i2c_init,
283 .fini = _nouveau_i2c_fini, 283 .fini = _nouveau_i2c_fini,
284 }, 284 },
285}; 285 .sclass = nv94_i2c_sclass,
286 .pad_x = &nv04_i2c_pad_oclass,
287 .pad_s = &nv94_i2c_pad_oclass,
288 .aux = 4,
289 .aux_stat = nv94_aux_stat,
290 .aux_mask = nv94_aux_mask,
291}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/nvd0.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/nvd0.c
index 29967d30f97c..364ddb1c5f03 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/nvd0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/nvd0.c
@@ -42,8 +42,6 @@ nvd0_i2c_sense_sda(struct nouveau_i2c_port *base)
42 42
43static const struct nouveau_i2c_func 43static const struct nouveau_i2c_func
44nvd0_i2c_func = { 44nvd0_i2c_func = {
45 .acquire = nv94_i2c_acquire,
46 .release = nv94_i2c_release,
47 .drive_scl = nv50_i2c_drive_scl, 45 .drive_scl = nv50_i2c_drive_scl,
48 .drive_sda = nv50_i2c_drive_sda, 46 .drive_sda = nv50_i2c_drive_sda,
49 .sense_scl = nvd0_i2c_sense_scl, 47 .sense_scl = nvd0_i2c_sense_scl,
@@ -75,7 +73,7 @@ nvd0_i2c_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
75 return 0; 73 return 0;
76} 74}
77 75
78static struct nouveau_oclass 76struct nouveau_oclass
79nvd0_i2c_sclass[] = { 77nvd0_i2c_sclass[] = {
80 { .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_BIT), 78 { .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_BIT),
81 .ofuncs = &(struct nouveau_ofuncs) { 79 .ofuncs = &(struct nouveau_ofuncs) {
@@ -96,29 +94,19 @@ nvd0_i2c_sclass[] = {
96 {} 94 {}
97}; 95};
98 96
99static int 97struct nouveau_oclass *
100nvd0_i2c_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 98nvd0_i2c_oclass = &(struct nouveau_i2c_impl) {
101 struct nouveau_oclass *oclass, void *data, u32 size, 99 .base.handle = NV_SUBDEV(I2C, 0xd0),
102 struct nouveau_object **pobject) 100 .base.ofuncs = &(struct nouveau_ofuncs) {
103{ 101 .ctor = _nouveau_i2c_ctor,
104 struct nv50_i2c_priv *priv;
105 int ret;
106
107 ret = nouveau_i2c_create(parent, engine, oclass, nvd0_i2c_sclass, &priv);
108 *pobject = nv_object(priv);
109 if (ret)
110 return ret;
111
112 return 0;
113}
114
115struct nouveau_oclass
116nvd0_i2c_oclass = {
117 .handle = NV_SUBDEV(I2C, 0xd0),
118 .ofuncs = &(struct nouveau_ofuncs) {
119 .ctor = nvd0_i2c_ctor,
120 .dtor = _nouveau_i2c_dtor, 102 .dtor = _nouveau_i2c_dtor,
121 .init = _nouveau_i2c_init, 103 .init = _nouveau_i2c_init,
122 .fini = _nouveau_i2c_fini, 104 .fini = _nouveau_i2c_fini,
123 }, 105 },
124}; 106 .sclass = nvd0_i2c_sclass,
107 .pad_x = &nv04_i2c_pad_oclass,
108 .pad_s = &nv94_i2c_pad_oclass,
109 .aux = 4,
110 .aux_stat = nv94_aux_stat,
111 .aux_mask = nv94_aux_mask,
112}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/nve0.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/nve0.c
new file mode 100644
index 000000000000..cae77e1ad8dc
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/nve0.c
@@ -0,0 +1,72 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "nv50.h"
26
27static void
28nve0_aux_stat(struct nouveau_i2c *i2c, u32 *hi, u32 *lo, u32 *rq, u32 *tx)
29{
30 u32 intr = nv_rd32(i2c, 0x00dc60);
31 u32 stat = nv_rd32(i2c, 0x00dc68) & intr, i;
32 for (i = 0, *hi = *lo = *rq = *tx = 0; i < 8; i++) {
33 if ((stat & (1 << (i * 4)))) *hi |= 1 << i;
34 if ((stat & (2 << (i * 4)))) *lo |= 1 << i;
35 if ((stat & (4 << (i * 4)))) *rq |= 1 << i;
36 if ((stat & (8 << (i * 4)))) *tx |= 1 << i;
37 }
38 nv_wr32(i2c, 0x00dc60, intr);
39}
40
41static void
42nve0_aux_mask(struct nouveau_i2c *i2c, u32 type, u32 mask, u32 data)
43{
44 u32 temp = nv_rd32(i2c, 0x00dc68), i;
45 for (i = 0; i < 8; i++) {
46 if (mask & (1 << i)) {
47 if (!(data & (1 << i))) {
48 temp &= ~(type << (i * 4));
49 continue;
50 }
51 temp |= type << (i * 4);
52 }
53 }
54 nv_wr32(i2c, 0x00dc68, temp);
55}
56
57struct nouveau_oclass *
58nve0_i2c_oclass = &(struct nouveau_i2c_impl) {
59 .base.handle = NV_SUBDEV(I2C, 0xe0),
60 .base.ofuncs = &(struct nouveau_ofuncs) {
61 .ctor = _nouveau_i2c_ctor,
62 .dtor = _nouveau_i2c_dtor,
63 .init = _nouveau_i2c_init,
64 .fini = _nouveau_i2c_fini,
65 },
66 .sclass = nvd0_i2c_sclass,
67 .pad_x = &nv04_i2c_pad_oclass,
68 .pad_s = &nv94_i2c_pad_oclass,
69 .aux = 4,
70 .aux_stat = nve0_aux_stat,
71 .aux_mask = nve0_aux_mask,
72}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/pad.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/pad.c
new file mode 100644
index 000000000000..e9e412477c12
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/pad.c
@@ -0,0 +1,84 @@
1/*
2 * Copyright 2014 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "pad.h"
26
27int
28_nvkm_i2c_pad_fini(struct nouveau_object *object, bool suspend)
29{
30 struct nvkm_i2c_pad *pad = (void *)object;
31 DBG("-> NULL\n");
32 pad->port = NULL;
33 return nouveau_object_fini(&pad->base, suspend);
34}
35
36int
37_nvkm_i2c_pad_init(struct nouveau_object *object)
38{
39 struct nvkm_i2c_pad *pad = (void *)object;
40 DBG("-> PORT:%02x\n", pad->next->index);
41 pad->port = pad->next;
42 return nouveau_object_init(&pad->base);
43}
44
45int
46nvkm_i2c_pad_create_(struct nouveau_object *parent,
47 struct nouveau_object *engine,
48 struct nouveau_oclass *oclass, int index,
49 int size, void **pobject)
50{
51 struct nouveau_i2c *i2c = (void *)engine;
52 struct nouveau_i2c_port *port;
53 struct nvkm_i2c_pad *pad;
54 int ret;
55
56 list_for_each_entry(port, &i2c->ports, head) {
57 pad = nvkm_i2c_pad(port);
58 if (pad->index == index) {
59 atomic_inc(&nv_object(pad)->refcount);
60 *pobject = pad;
61 return 1;
62 }
63 }
64
65 ret = nouveau_object_create_(parent, engine, oclass, 0, size, pobject);
66 pad = *pobject;
67 if (ret)
68 return ret;
69
70 pad->index = index;
71 return 0;
72}
73
74int
75_nvkm_i2c_pad_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
76 struct nouveau_oclass *oclass, void *data, u32 index,
77 struct nouveau_object **pobject)
78{
79 struct nvkm_i2c_pad *pad;
80 int ret;
81 ret = nvkm_i2c_pad_create(parent, engine, oclass, index, &pad);
82 *pobject = nv_object(pad);
83 return ret;
84}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/pad.h b/drivers/gpu/drm/nouveau/core/subdev/i2c/pad.h
new file mode 100644
index 000000000000..452ac10c3004
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/pad.h
@@ -0,0 +1,58 @@
1#ifndef __NVKM_I2C_PAD_H__
2#define __NVKM_I2C_PAD_H__
3
4#include "priv.h"
5
6struct nvkm_i2c_pad {
7 struct nouveau_object base;
8 int index;
9 struct nouveau_i2c_port *port;
10 struct nouveau_i2c_port *next;
11};
12
13static inline struct nvkm_i2c_pad *
14nvkm_i2c_pad(struct nouveau_i2c_port *port)
15{
16 struct nouveau_object *pad = nv_object(port);
17 while (pad->parent)
18 pad = pad->parent;
19 return (void *)pad;
20}
21
22#define nvkm_i2c_pad_create(p,e,o,i,d) \
23 nvkm_i2c_pad_create_((p), (e), (o), (i), sizeof(**d), (void **)d)
24#define nvkm_i2c_pad_destroy(p) ({ \
25 struct nvkm_i2c_pad *_p = (p); \
26 _nvkm_i2c_pad_dtor(nv_object(_p)); \
27})
28#define nvkm_i2c_pad_init(p) ({ \
29 struct nvkm_i2c_pad *_p = (p); \
30 _nvkm_i2c_pad_init(nv_object(_p)); \
31})
32#define nvkm_i2c_pad_fini(p,s) ({ \
33 struct nvkm_i2c_pad *_p = (p); \
34 _nvkm_i2c_pad_fini(nv_object(_p), (s)); \
35})
36
37int nvkm_i2c_pad_create_(struct nouveau_object *, struct nouveau_object *,
38 struct nouveau_oclass *, int index, int, void **);
39
40int _nvkm_i2c_pad_ctor(struct nouveau_object *, struct nouveau_object *,
41 struct nouveau_oclass *, void *, u32,
42 struct nouveau_object **);
43#define _nvkm_i2c_pad_dtor nouveau_object_destroy
44int _nvkm_i2c_pad_init(struct nouveau_object *);
45int _nvkm_i2c_pad_fini(struct nouveau_object *, bool);
46
47#ifndef MSG
48#define MSG(l,f,a...) do { \
49 struct nvkm_i2c_pad *_pad = (void *)pad; \
50 nv_##l(nv_object(_pad)->engine, "PAD:%c:%02x: "f, \
51 _pad->index >= 0x100 ? 'X' : 'S', \
52 _pad->index >= 0x100 ? _pad->index - 0x100 : _pad->index, ##a); \
53} while(0)
54#define DBG(f,a...) MSG(debug, f, ##a)
55#define ERR(f,a...) MSG(error, f, ##a)
56#endif
57
58#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/padnv04.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/padnv04.c
new file mode 100644
index 000000000000..2c4b61296dd1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/padnv04.c
@@ -0,0 +1,35 @@
1/*
2 * Copyright 2014 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "pad.h"
26
27struct nouveau_oclass
28nv04_i2c_pad_oclass = {
29 .ofuncs = &(struct nouveau_ofuncs) {
30 .ctor = _nvkm_i2c_pad_ctor,
31 .dtor = _nvkm_i2c_pad_dtor,
32 .init = _nvkm_i2c_pad_init,
33 .fini = _nvkm_i2c_pad_fini,
34 },
35};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/padnv94.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/padnv94.c
new file mode 100644
index 000000000000..0dc6753014f0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/padnv94.c
@@ -0,0 +1,86 @@
1/*
2 * Copyright 2014 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "pad.h"
26
27struct nv94_i2c_pad {
28 struct nvkm_i2c_pad base;
29 int addr;
30};
31
32static int
33nv94_i2c_pad_fini(struct nouveau_object *object, bool suspend)
34{
35 struct nouveau_i2c *i2c = (void *)object->engine;
36 struct nv94_i2c_pad *pad = (void *)object;
37 nv_mask(i2c, 0x00e50c + pad->addr, 0x00000001, 0x00000001);
38 return nvkm_i2c_pad_fini(&pad->base, suspend);
39}
40
41static int
42nv94_i2c_pad_init(struct nouveau_object *object)
43{
44 struct nouveau_i2c *i2c = (void *)object->engine;
45 struct nv94_i2c_pad *pad = (void *)object;
46
47 switch (nv_oclass(pad->base.next)->handle) {
48 case NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_AUX):
49 nv_mask(i2c, 0x00e500 + pad->addr, 0x0000c003, 0x00000002);
50 break;
51 case NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_BIT):
52 default:
53 nv_mask(i2c, 0x00e500 + pad->addr, 0x0000c003, 0x0000c001);
54 break;
55 }
56
57 nv_mask(i2c, 0x00e50c + pad->addr, 0x00000001, 0x00000000);
58 return nvkm_i2c_pad_init(&pad->base);
59}
60
61static int
62nv94_i2c_pad_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
63 struct nouveau_oclass *oclass, void *data, u32 index,
64 struct nouveau_object **pobject)
65{
66 struct nv94_i2c_pad *pad;
67 int ret;
68
69 ret = nvkm_i2c_pad_create(parent, engine, oclass, index, &pad);
70 *pobject = nv_object(pad);
71 if (ret)
72 return ret;
73
74 pad->addr = index * 0x50;;
75 return 0;
76}
77
78struct nouveau_oclass
79nv94_i2c_pad_oclass = {
80 .ofuncs = &(struct nouveau_ofuncs) {
81 .ctor = nv94_i2c_pad_ctor,
82 .dtor = _nvkm_i2c_pad_dtor,
83 .init = nv94_i2c_pad_init,
84 .fini = nv94_i2c_pad_fini,
85 },
86};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/port.h b/drivers/gpu/drm/nouveau/core/subdev/i2c/port.h
new file mode 100644
index 000000000000..a8ff6e077af5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/port.h
@@ -0,0 +1,15 @@
1#ifndef __NVKM_I2C_PORT_H__
2#define __NVKM_I2C_PORT_H__
3
4#include "priv.h"
5
6#ifndef MSG
7#define MSG(l,f,a...) do { \
8 struct nouveau_i2c_port *_port = (void *)port; \
9 nv_##l(nv_object(_port)->engine, "PORT:%02x: "f, _port->index, ##a); \
10} while(0)
11#define DBG(f,a...) MSG(debug, f, ##a)
12#define ERR(f,a...) MSG(error, f, ##a)
13#endif
14
15#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/priv.h b/drivers/gpu/drm/nouveau/core/subdev/i2c/priv.h
new file mode 100644
index 000000000000..780090b6425a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/priv.h
@@ -0,0 +1,85 @@
1#ifndef __NVKM_I2C_H__
2#define __NVKM_I2C_H__
3
4#include <subdev/i2c.h>
5
6extern struct nouveau_oclass nv04_i2c_pad_oclass;
7extern struct nouveau_oclass nv94_i2c_pad_oclass;
8
9#define nouveau_i2c_port_create(p,e,o,i,a,f,d) \
10 nouveau_i2c_port_create_((p), (e), (o), (i), (a), (f), \
11 sizeof(**d), (void **)d)
12#define nouveau_i2c_port_destroy(p) ({ \
13 struct nouveau_i2c_port *port = (p); \
14 _nouveau_i2c_port_dtor(nv_object(i2c)); \
15})
16#define nouveau_i2c_port_init(p) \
17 nouveau_object_init(&(p)->base)
18#define nouveau_i2c_port_fini(p,s) \
19 nouveau_object_fini(&(p)->base, (s))
20
21int nouveau_i2c_port_create_(struct nouveau_object *, struct nouveau_object *,
22 struct nouveau_oclass *, u8,
23 const struct i2c_algorithm *,
24 const struct nouveau_i2c_func *,
25 int, void **);
26void _nouveau_i2c_port_dtor(struct nouveau_object *);
27#define _nouveau_i2c_port_init nouveau_object_init
28int _nouveau_i2c_port_fini(struct nouveau_object *, bool);
29
30#define nouveau_i2c_create(p,e,o,d) \
31 nouveau_i2c_create_((p), (e), (o), sizeof(**d), (void **)d)
32#define nouveau_i2c_destroy(p) ({ \
33 struct nouveau_i2c *i2c = (p); \
34 _nouveau_i2c_dtor(nv_object(i2c)); \
35})
36#define nouveau_i2c_init(p) ({ \
37 struct nouveau_i2c *i2c = (p); \
38 _nouveau_i2c_init(nv_object(i2c)); \
39})
40#define nouveau_i2c_fini(p,s) ({ \
41 struct nouveau_i2c *i2c = (p); \
42 _nouveau_i2c_fini(nv_object(i2c), (s)); \
43})
44
45int nouveau_i2c_create_(struct nouveau_object *, struct nouveau_object *,
46 struct nouveau_oclass *, int, void **);
47int _nouveau_i2c_ctor(struct nouveau_object *, struct nouveau_object *,
48 struct nouveau_oclass *, void *, u32,
49 struct nouveau_object **);
50void _nouveau_i2c_dtor(struct nouveau_object *);
51int _nouveau_i2c_init(struct nouveau_object *);
52int _nouveau_i2c_fini(struct nouveau_object *, bool);
53
54extern struct nouveau_oclass nouveau_anx9805_sclass[];
55extern struct nouveau_oclass nvd0_i2c_sclass[];
56
57extern const struct i2c_algorithm nouveau_i2c_bit_algo;
58extern const struct i2c_algorithm nouveau_i2c_aux_algo;
59
60struct nouveau_i2c_impl {
61 struct nouveau_oclass base;
62
63 /* supported i2c port classes */
64 struct nouveau_oclass *sclass;
65 struct nouveau_oclass *pad_x;
66 struct nouveau_oclass *pad_s;
67
68 /* number of native dp aux channels present */
69 int aux;
70
71 /* read and ack pending interrupts, returning only data
72 * for ports that have not been masked off, while still
73 * performing the ack for anything that was pending.
74 */
75 void (*aux_stat)(struct nouveau_i2c *, u32 *, u32 *, u32 *, u32 *);
76
77 /* mask on/off interrupt types for a given set of auxch
78 */
79 void (*aux_mask)(struct nouveau_i2c *, u32, u32, u32);
80};
81
82void nv94_aux_stat(struct nouveau_i2c *, u32 *, u32 *, u32 *, u32 *);
83void nv94_aux_mask(struct nouveau_i2c *, u32, u32, u32);
84
85#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/ibus/gk20a.c b/drivers/gpu/drm/nouveau/core/subdev/ibus/gk20a.c
new file mode 100644
index 000000000000..245f0ebaa6af
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/ibus/gk20a.c
@@ -0,0 +1,103 @@
1/*
2 * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#include <subdev/ibus.h>
24#include <subdev/timer.h>
25
26struct gk20a_ibus_priv {
27 struct nouveau_ibus base;
28};
29
30static void
31gk20a_ibus_init_priv_ring(struct gk20a_ibus_priv *priv)
32{
33 nv_mask(priv, 0x137250, 0x3f, 0);
34
35 nv_mask(priv, 0x000200, 0x20, 0);
36 usleep_range(20, 30);
37 nv_mask(priv, 0x000200, 0x20, 0x20);
38
39 nv_wr32(priv, 0x12004c, 0x4);
40 nv_wr32(priv, 0x122204, 0x2);
41 nv_rd32(priv, 0x122204);
42}
43
44static void
45gk20a_ibus_intr(struct nouveau_subdev *subdev)
46{
47 struct gk20a_ibus_priv *priv = (void *)subdev;
48 u32 status0 = nv_rd32(priv, 0x120058);
49
50 if (status0 & 0x7) {
51 nv_debug(priv, "resetting priv ring\n");
52 gk20a_ibus_init_priv_ring(priv);
53 }
54
55 /* Acknowledge interrupt */
56 nv_mask(priv, 0x12004c, 0x2, 0x2);
57
58 if (!nv_wait(subdev, 0x12004c, 0x3f, 0x00))
59 nv_warn(priv, "timeout waiting for ringmaster ack\n");
60}
61
62static int
63gk20a_ibus_init(struct nouveau_object *object)
64{
65 struct gk20a_ibus_priv *priv = (void *)object;
66 int ret;
67
68 ret = _nouveau_ibus_init(object);
69 if (ret)
70 return ret;
71
72 gk20a_ibus_init_priv_ring(priv);
73
74 return 0;
75}
76
77static int
78gk20a_ibus_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
79 struct nouveau_oclass *oclass, void *data, u32 size,
80 struct nouveau_object **pobject)
81{
82 struct gk20a_ibus_priv *priv;
83 int ret;
84
85 ret = nouveau_ibus_create(parent, engine, oclass, &priv);
86 *pobject = nv_object(priv);
87 if (ret)
88 return ret;
89
90 nv_subdev(priv)->intr = gk20a_ibus_intr;
91 return 0;
92}
93
94struct nouveau_oclass
95gk20a_ibus_oclass = {
96 .handle = NV_SUBDEV(IBUS, 0xea),
97 .ofuncs = &(struct nouveau_ofuncs) {
98 .ctor = gk20a_ibus_ctor,
99 .dtor = _nouveau_ibus_dtor,
100 .init = gk20a_ibus_init,
101 .fini = _nouveau_ibus_fini,
102 },
103};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
index e8822a934c48..9ca93e2718f7 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
@@ -26,6 +26,7 @@
26 26
27const struct nouveau_mc_intr 27const struct nouveau_mc_intr
28nv50_mc_intr[] = { 28nv50_mc_intr[] = {
29 { 0x04000000, NVDEV_ENGINE_DISP }, /* DISP before FIFO, so pageflip-timestamping works! */
29 { 0x00000001, NVDEV_ENGINE_MPEG }, 30 { 0x00000001, NVDEV_ENGINE_MPEG },
30 { 0x00000100, NVDEV_ENGINE_FIFO }, 31 { 0x00000100, NVDEV_ENGINE_FIFO },
31 { 0x00001000, NVDEV_ENGINE_GR }, 32 { 0x00001000, NVDEV_ENGINE_GR },
@@ -33,8 +34,8 @@ nv50_mc_intr[] = {
33 { 0x00008000, NVDEV_ENGINE_BSP }, /* NV84- */ 34 { 0x00008000, NVDEV_ENGINE_BSP }, /* NV84- */
34 { 0x00020000, NVDEV_ENGINE_VP }, /* NV84- */ 35 { 0x00020000, NVDEV_ENGINE_VP }, /* NV84- */
35 { 0x00100000, NVDEV_SUBDEV_TIMER }, 36 { 0x00100000, NVDEV_SUBDEV_TIMER },
36 { 0x00200000, NVDEV_SUBDEV_GPIO }, 37 { 0x00200000, NVDEV_SUBDEV_GPIO }, /* PMGR->GPIO */
37 { 0x04000000, NVDEV_ENGINE_DISP }, 38 { 0x00200000, NVDEV_SUBDEV_I2C }, /* PMGR->I2C/AUX */
38 { 0x10000000, NVDEV_SUBDEV_BUS }, 39 { 0x10000000, NVDEV_SUBDEV_BUS },
39 { 0x80000000, NVDEV_ENGINE_SW }, 40 { 0x80000000, NVDEV_ENGINE_SW },
40 { 0x0002d101, NVDEV_SUBDEV_FB }, 41 { 0x0002d101, NVDEV_SUBDEV_FB },
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
index f8a6f18e2d34..3c76d9038f38 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
@@ -26,6 +26,7 @@
26 26
27static const struct nouveau_mc_intr 27static const struct nouveau_mc_intr
28nv98_mc_intr[] = { 28nv98_mc_intr[] = {
29 { 0x04000000, NVDEV_ENGINE_DISP }, /* DISP first, so pageflip timestamps work */
29 { 0x00000001, NVDEV_ENGINE_PPP }, 30 { 0x00000001, NVDEV_ENGINE_PPP },
30 { 0x00000100, NVDEV_ENGINE_FIFO }, 31 { 0x00000100, NVDEV_ENGINE_FIFO },
31 { 0x00001000, NVDEV_ENGINE_GR }, 32 { 0x00001000, NVDEV_ENGINE_GR },
@@ -35,9 +36,9 @@ nv98_mc_intr[] = {
35 { 0x00040000, NVDEV_SUBDEV_PWR }, /* NVA3:NVC0 */ 36 { 0x00040000, NVDEV_SUBDEV_PWR }, /* NVA3:NVC0 */
36 { 0x00080000, NVDEV_SUBDEV_THERM }, /* NVA3:NVC0 */ 37 { 0x00080000, NVDEV_SUBDEV_THERM }, /* NVA3:NVC0 */
37 { 0x00100000, NVDEV_SUBDEV_TIMER }, 38 { 0x00100000, NVDEV_SUBDEV_TIMER },
38 { 0x00200000, NVDEV_SUBDEV_GPIO }, 39 { 0x00200000, NVDEV_SUBDEV_GPIO }, /* PMGR->GPIO */
40 { 0x00200000, NVDEV_SUBDEV_I2C }, /* PMGR->I2C/AUX */
39 { 0x00400000, NVDEV_ENGINE_COPY0 }, /* NVA3- */ 41 { 0x00400000, NVDEV_ENGINE_COPY0 }, /* NVA3- */
40 { 0x04000000, NVDEV_ENGINE_DISP },
41 { 0x10000000, NVDEV_SUBDEV_BUS }, 42 { 0x10000000, NVDEV_SUBDEV_BUS },
42 { 0x80000000, NVDEV_ENGINE_SW }, 43 { 0x80000000, NVDEV_ENGINE_SW },
43 { 0x0042d101, NVDEV_SUBDEV_FB }, 44 { 0x0042d101, NVDEV_SUBDEV_FB },
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
index 34472d317097..f9c6a678b47d 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
@@ -26,6 +26,7 @@
26 26
27const struct nouveau_mc_intr 27const struct nouveau_mc_intr
28nvc0_mc_intr[] = { 28nvc0_mc_intr[] = {
29 { 0x04000000, NVDEV_ENGINE_DISP }, /* DISP first, so pageflip timestamps work. */
29 { 0x00000001, NVDEV_ENGINE_PPP }, 30 { 0x00000001, NVDEV_ENGINE_PPP },
30 { 0x00000020, NVDEV_ENGINE_COPY0 }, 31 { 0x00000020, NVDEV_ENGINE_COPY0 },
31 { 0x00000040, NVDEV_ENGINE_COPY1 }, 32 { 0x00000040, NVDEV_ENGINE_COPY1 },
@@ -37,10 +38,10 @@ nvc0_mc_intr[] = {
37 { 0x00040000, NVDEV_SUBDEV_THERM }, 38 { 0x00040000, NVDEV_SUBDEV_THERM },
38 { 0x00020000, NVDEV_ENGINE_VP }, 39 { 0x00020000, NVDEV_ENGINE_VP },
39 { 0x00100000, NVDEV_SUBDEV_TIMER }, 40 { 0x00100000, NVDEV_SUBDEV_TIMER },
40 { 0x00200000, NVDEV_SUBDEV_GPIO }, 41 { 0x00200000, NVDEV_SUBDEV_GPIO }, /* PMGR->GPIO */
42 { 0x00200000, NVDEV_SUBDEV_I2C }, /* PMGR->I2C/AUX */
41 { 0x01000000, NVDEV_SUBDEV_PWR }, 43 { 0x01000000, NVDEV_SUBDEV_PWR },
42 { 0x02000000, NVDEV_SUBDEV_LTCG }, 44 { 0x02000000, NVDEV_SUBDEV_LTCG },
43 { 0x04000000, NVDEV_ENGINE_DISP },
44 { 0x08000000, NVDEV_SUBDEV_FB }, 45 { 0x08000000, NVDEV_SUBDEV_FB },
45 { 0x10000000, NVDEV_SUBDEV_BUS }, 46 { 0x10000000, NVDEV_SUBDEV_BUS },
46 { 0x40000000, NVDEV_SUBDEV_IBUS }, 47 { 0x40000000, NVDEV_SUBDEV_IBUS },
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mxm/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/mxm/nv50.c
index 64f8b4702bf7..fcaabe8456e3 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mxm/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mxm/nv50.c
@@ -150,7 +150,7 @@ mxm_dcb_sanitise_entry(struct nouveau_bios *bios, void *data, int idx, u16 pdcb)
150 * common example is DP->eDP. 150 * common example is DP->eDP.
151 */ 151 */
152 conn = bios->data; 152 conn = bios->data;
153 conn += dcb_conn(bios, (ctx.outp[0] & 0x0000f000) >> 12, &ver, &len); 153 conn += nvbios_connEe(bios, (ctx.outp[0] & 0x0000f000) >> 12, &ver, &len);
154 type = conn[0]; 154 type = conn[0];
155 switch (ctx.desc.conn_type) { 155 switch (ctx.desc.conn_type) {
156 case 0x01: /* LVDS */ 156 case 0x01: /* LVDS */
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c b/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c
index 7610fc5f8fa2..ca9ad9fd47be 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c
@@ -60,9 +60,9 @@ static struct nouveau_i2c_board_info
60nv_board_infos[] = { 60nv_board_infos[] = {
61 { { I2C_BOARD_INFO("w83l785ts", 0x2d) }, 0 }, 61 { { I2C_BOARD_INFO("w83l785ts", 0x2d) }, 0 },
62 { { I2C_BOARD_INFO("w83781d", 0x2d) }, 0 }, 62 { { I2C_BOARD_INFO("w83781d", 0x2d) }, 0 },
63 { { I2C_BOARD_INFO("adt7473", 0x2e) }, 20 }, 63 { { I2C_BOARD_INFO("adt7473", 0x2e) }, 40 },
64 { { I2C_BOARD_INFO("adt7473", 0x2d) }, 20 }, 64 { { I2C_BOARD_INFO("adt7473", 0x2d) }, 40 },
65 { { I2C_BOARD_INFO("adt7473", 0x2c) }, 20 }, 65 { { I2C_BOARD_INFO("adt7473", 0x2c) }, 40 },
66 { { I2C_BOARD_INFO("f75375", 0x2e) }, 0 }, 66 { { I2C_BOARD_INFO("f75375", 0x2e) }, 0 },
67 { { I2C_BOARD_INFO("lm99", 0x4c) }, 0 }, 67 { { I2C_BOARD_INFO("lm99", 0x4c) }, 0 },
68 { { I2C_BOARD_INFO("lm90", 0x4c) }, 0 }, 68 { { I2C_BOARD_INFO("lm90", 0x4c) }, 0 },
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/nva3.c b/drivers/gpu/drm/nouveau/core/subdev/therm/nva3.c
index 3b2c4580098b..0478b2e3fb1d 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/nva3.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/nva3.c
@@ -36,7 +36,7 @@ nva3_therm_fan_sense(struct nouveau_therm *therm)
36 u32 tach = nv_rd32(therm, 0x00e728) & 0x0000ffff; 36 u32 tach = nv_rd32(therm, 0x00e728) & 0x0000ffff;
37 u32 ctrl = nv_rd32(therm, 0x00e720); 37 u32 ctrl = nv_rd32(therm, 0x00e720);
38 if (ctrl & 0x00000001) 38 if (ctrl & 0x00000001)
39 return tach * 60; 39 return tach * 60 / 2;
40 return -ENODEV; 40 return -ENODEV;
41} 41}
42 42
diff --git a/drivers/gpu/drm/nouveau/dispnv04/dac.c b/drivers/gpu/drm/nouveau/dispnv04/dac.c
index 434b920f6bd4..a96dda48718e 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/dac.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/dac.c
@@ -414,7 +414,7 @@ static void nv04_dac_commit(struct drm_encoder *encoder)
414 helper->dpms(encoder, DRM_MODE_DPMS_ON); 414 helper->dpms(encoder, DRM_MODE_DPMS_ON);
415 415
416 NV_DEBUG(drm, "Output %s is running on CRTC %d using output %c\n", 416 NV_DEBUG(drm, "Output %s is running on CRTC %d using output %c\n",
417 drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base), 417 nouveau_encoder_connector_get(nv_encoder)->base.name,
418 nv_crtc->index, '@' + ffs(nv_encoder->dcb->or)); 418 nv_crtc->index, '@' + ffs(nv_encoder->dcb->or));
419} 419}
420 420
diff --git a/drivers/gpu/drm/nouveau/dispnv04/dfp.c b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
index a2d669b4acf2..e57babb206d3 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/dfp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
@@ -477,7 +477,7 @@ static void nv04_dfp_commit(struct drm_encoder *encoder)
477 helper->dpms(encoder, DRM_MODE_DPMS_ON); 477 helper->dpms(encoder, DRM_MODE_DPMS_ON);
478 478
479 NV_DEBUG(drm, "Output %s is running on CRTC %d using output %c\n", 479 NV_DEBUG(drm, "Output %s is running on CRTC %d using output %c\n",
480 drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base), 480 nouveau_encoder_connector_get(nv_encoder)->base.name,
481 nv_crtc->index, '@' + ffs(nv_encoder->dcb->or)); 481 nv_crtc->index, '@' + ffs(nv_encoder->dcb->or));
482} 482}
483 483
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.c b/drivers/gpu/drm/nouveau/dispnv04/disp.c
index 2f1ed61f7c8c..4342fdaee707 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c
@@ -115,7 +115,7 @@ nv04_display_create(struct drm_device *dev)
115 &dev->mode_config.connector_list, head) { 115 &dev->mode_config.connector_list, head) {
116 if (!connector->encoder_ids[0]) { 116 if (!connector->encoder_ids[0]) {
117 NV_WARN(drm, "%s has no encoders, removing\n", 117 NV_WARN(drm, "%s has no encoders, removing\n",
118 drm_get_connector_name(connector)); 118 connector->name);
119 connector->funcs->destroy(connector); 119 connector->funcs->destroy(connector);
120 } 120 }
121 } 121 }
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
index 244822df8ffc..8667620b703a 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
@@ -171,7 +171,8 @@ static void nv04_tv_commit(struct drm_encoder *encoder)
171 helper->dpms(encoder, DRM_MODE_DPMS_ON); 171 helper->dpms(encoder, DRM_MODE_DPMS_ON);
172 172
173 NV_DEBUG(drm, "Output %s is running on CRTC %d using output %c\n", 173 NV_DEBUG(drm, "Output %s is running on CRTC %d using output %c\n",
174 drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base), nv_crtc->index, '@' + ffs(nv_encoder->dcb->or)); 174 nouveau_encoder_connector_get(nv_encoder)->base.name,
175 nv_crtc->index, '@' + ffs(nv_encoder->dcb->or));
175} 176}
176 177
177static void nv04_tv_destroy(struct drm_encoder *encoder) 178static void nv04_tv_destroy(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
index acef48f4a4ea..195bd8e86c6a 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
@@ -612,8 +612,7 @@ static void nv17_tv_commit(struct drm_encoder *encoder)
612 helper->dpms(encoder, DRM_MODE_DPMS_ON); 612 helper->dpms(encoder, DRM_MODE_DPMS_ON);
613 613
614 NV_INFO(drm, "Output %s is running on CRTC %d using output %c\n", 614 NV_INFO(drm, "Output %s is running on CRTC %d using output %c\n",
615 drm_get_connector_name( 615 nouveau_encoder_connector_get(nv_encoder)->base.name,
616 &nouveau_encoder_connector_get(nv_encoder)->base),
617 nv_crtc->index, '@' + ffs(nv_encoder->dcb->or)); 616 nv_crtc->index, '@' + ffs(nv_encoder->dcb->or));
618} 617}
619 618
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index d07ce028af51..1fa222e8f007 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -44,6 +44,7 @@
44 44
45#include <subdev/i2c.h> 45#include <subdev/i2c.h>
46#include <subdev/gpio.h> 46#include <subdev/gpio.h>
47#include <engine/disp.h>
47 48
48MODULE_PARM_DESC(tv_disable, "Disable TV-out detection"); 49MODULE_PARM_DESC(tv_disable, "Disable TV-out detection");
49static int nouveau_tv_disable = 0; 50static int nouveau_tv_disable = 0;
@@ -75,7 +76,8 @@ find_encoder(struct drm_connector *connector, int type)
75 continue; 76 continue;
76 nv_encoder = nouveau_encoder(obj_to_encoder(obj)); 77 nv_encoder = nouveau_encoder(obj_to_encoder(obj));
77 78
78 if (type == DCB_OUTPUT_ANY || nv_encoder->dcb->type == type) 79 if (type == DCB_OUTPUT_ANY ||
80 (nv_encoder->dcb && nv_encoder->dcb->type == type))
79 return nv_encoder; 81 return nv_encoder;
80 } 82 }
81 83
@@ -100,22 +102,24 @@ static void
100nouveau_connector_destroy(struct drm_connector *connector) 102nouveau_connector_destroy(struct drm_connector *connector)
101{ 103{
102 struct nouveau_connector *nv_connector = nouveau_connector(connector); 104 struct nouveau_connector *nv_connector = nouveau_connector(connector);
103 nouveau_event_ref(NULL, &nv_connector->hpd_func); 105 nouveau_event_ref(NULL, &nv_connector->hpd);
104 kfree(nv_connector->edid); 106 kfree(nv_connector->edid);
105 drm_sysfs_connector_remove(connector); 107 drm_sysfs_connector_remove(connector);
106 drm_connector_cleanup(connector); 108 drm_connector_cleanup(connector);
109 if (nv_connector->aux.transfer)
110 drm_dp_aux_unregister(&nv_connector->aux);
107 kfree(connector); 111 kfree(connector);
108} 112}
109 113
110static struct nouveau_i2c_port * 114static struct nouveau_encoder *
111nouveau_connector_ddc_detect(struct drm_connector *connector, 115nouveau_connector_ddc_detect(struct drm_connector *connector)
112 struct nouveau_encoder **pnv_encoder)
113{ 116{
114 struct drm_device *dev = connector->dev; 117 struct drm_device *dev = connector->dev;
115 struct nouveau_connector *nv_connector = nouveau_connector(connector); 118 struct nouveau_connector *nv_connector = nouveau_connector(connector);
116 struct nouveau_drm *drm = nouveau_drm(dev); 119 struct nouveau_drm *drm = nouveau_drm(dev);
117 struct nouveau_gpio *gpio = nouveau_gpio(drm->device); 120 struct nouveau_gpio *gpio = nouveau_gpio(drm->device);
118 struct nouveau_i2c_port *port = NULL; 121 struct nouveau_encoder *nv_encoder;
122 struct drm_mode_object *obj;
119 int i, panel = -ENODEV; 123 int i, panel = -ENODEV;
120 124
121 /* eDP panels need powering on by us (if the VBIOS doesn't default it 125 /* eDP panels need powering on by us (if the VBIOS doesn't default it
@@ -130,13 +134,9 @@ nouveau_connector_ddc_detect(struct drm_connector *connector,
130 } 134 }
131 } 135 }
132 136
133 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { 137 for (i = 0; nv_encoder = NULL, i < DRM_CONNECTOR_MAX_ENCODER; i++) {
134 struct nouveau_encoder *nv_encoder; 138 int id = connector->encoder_ids[i];
135 struct drm_mode_object *obj; 139 if (id == 0)
136 int id;
137
138 id = connector->encoder_ids[i];
139 if (!id)
140 break; 140 break;
141 141
142 obj = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_ENCODER); 142 obj = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_ENCODER);
@@ -144,22 +144,24 @@ nouveau_connector_ddc_detect(struct drm_connector *connector,
144 continue; 144 continue;
145 nv_encoder = nouveau_encoder(obj_to_encoder(obj)); 145 nv_encoder = nouveau_encoder(obj_to_encoder(obj));
146 146
147 port = nv_encoder->i2c; 147 if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
148 if (port && nv_probe_i2c(port, 0x50)) { 148 int ret = nouveau_dp_detect(nv_encoder);
149 *pnv_encoder = nv_encoder; 149 if (ret == 0)
150 break; 150 break;
151 } else
152 if (nv_encoder->i2c) {
153 if (nv_probe_i2c(nv_encoder->i2c, 0x50))
154 break;
151 } 155 }
152
153 port = NULL;
154 } 156 }
155 157
156 /* eDP panel not detected, restore panel power GPIO to previous 158 /* eDP panel not detected, restore panel power GPIO to previous
157 * state to avoid confusing the SOR for other output types. 159 * state to avoid confusing the SOR for other output types.
158 */ 160 */
159 if (!port && panel == 0) 161 if (!nv_encoder && panel == 0)
160 gpio->set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, panel); 162 gpio->set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, panel);
161 163
162 return port; 164 return nv_encoder;
163} 165}
164 166
165static struct nouveau_encoder * 167static struct nouveau_encoder *
@@ -258,25 +260,17 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
258 if (ret < 0 && ret != -EACCES) 260 if (ret < 0 && ret != -EACCES)
259 return conn_status; 261 return conn_status;
260 262
261 i2c = nouveau_connector_ddc_detect(connector, &nv_encoder); 263 nv_encoder = nouveau_connector_ddc_detect(connector);
262 if (i2c) { 264 if (nv_encoder && (i2c = nv_encoder->i2c) != NULL) {
263 nv_connector->edid = drm_get_edid(connector, &i2c->adapter); 265 nv_connector->edid = drm_get_edid(connector, &i2c->adapter);
264 drm_mode_connector_update_edid_property(connector, 266 drm_mode_connector_update_edid_property(connector,
265 nv_connector->edid); 267 nv_connector->edid);
266 if (!nv_connector->edid) { 268 if (!nv_connector->edid) {
267 NV_ERROR(drm, "DDC responded, but no EDID for %s\n", 269 NV_ERROR(drm, "DDC responded, but no EDID for %s\n",
268 drm_get_connector_name(connector)); 270 connector->name);
269 goto detect_analog; 271 goto detect_analog;
270 } 272 }
271 273
272 if (nv_encoder->dcb->type == DCB_OUTPUT_DP &&
273 !nouveau_dp_detect(to_drm_encoder(nv_encoder))) {
274 NV_ERROR(drm, "Detected %s, but failed init\n",
275 drm_get_connector_name(connector));
276 conn_status = connector_status_disconnected;
277 goto out;
278 }
279
280 /* Override encoder type for DVI-I based on whether EDID 274 /* Override encoder type for DVI-I based on whether EDID
281 * says the display is digital or analog, both use the 275 * says the display is digital or analog, both use the
282 * same i2c channel so the value returned from ddc_detect 276 * same i2c channel so the value returned from ddc_detect
@@ -437,7 +431,7 @@ nouveau_connector_force(struct drm_connector *connector)
437 nv_encoder = find_encoder(connector, type); 431 nv_encoder = find_encoder(connector, type);
438 if (!nv_encoder) { 432 if (!nv_encoder) {
439 NV_ERROR(drm, "can't find encoder to force %s on!\n", 433 NV_ERROR(drm, "can't find encoder to force %s on!\n",
440 drm_get_connector_name(connector)); 434 connector->name);
441 connector->status = connector_status_disconnected; 435 connector->status = connector_status_disconnected;
442 return; 436 return;
443 } 437 }
@@ -912,33 +906,103 @@ nouveau_connector_funcs_lvds = {
912}; 906};
913 907
914static void 908static void
909nouveau_connector_dp_dpms(struct drm_connector *connector, int mode)
910{
911 struct nouveau_encoder *nv_encoder = NULL;
912
913 if (connector->encoder)
914 nv_encoder = nouveau_encoder(connector->encoder);
915 if (nv_encoder && nv_encoder->dcb &&
916 nv_encoder->dcb->type == DCB_OUTPUT_DP) {
917 if (mode == DRM_MODE_DPMS_ON) {
918 u8 data = DP_SET_POWER_D0;
919 nv_wraux(nv_encoder->i2c, DP_SET_POWER, &data, 1);
920 usleep_range(1000, 2000);
921 } else {
922 u8 data = DP_SET_POWER_D3;
923 nv_wraux(nv_encoder->i2c, DP_SET_POWER, &data, 1);
924 }
925 }
926
927 drm_helper_connector_dpms(connector, mode);
928}
929
930static const struct drm_connector_funcs
931nouveau_connector_funcs_dp = {
932 .dpms = nouveau_connector_dp_dpms,
933 .save = NULL,
934 .restore = NULL,
935 .detect = nouveau_connector_detect,
936 .destroy = nouveau_connector_destroy,
937 .fill_modes = drm_helper_probe_single_connector_modes,
938 .set_property = nouveau_connector_set_property,
939 .force = nouveau_connector_force
940};
941
942static void
915nouveau_connector_hotplug_work(struct work_struct *work) 943nouveau_connector_hotplug_work(struct work_struct *work)
916{ 944{
917 struct nouveau_connector *nv_connector = 945 struct nouveau_connector *nv_connector =
918 container_of(work, struct nouveau_connector, hpd_work); 946 container_of(work, typeof(*nv_connector), work);
919 struct drm_connector *connector = &nv_connector->base; 947 struct drm_connector *connector = &nv_connector->base;
920 struct drm_device *dev = connector->dev; 948 struct nouveau_drm *drm = nouveau_drm(connector->dev);
921 struct nouveau_drm *drm = nouveau_drm(dev); 949 const char *name = connector->name;
922 struct nouveau_gpio *gpio = nouveau_gpio(drm->device);
923 bool plugged = gpio->get(gpio, 0, nv_connector->hpd.func, 0xff);
924 950
925 NV_DEBUG(drm, "%splugged %s\n", plugged ? "" : "un", 951 if (nv_connector->status & NVKM_HPD_IRQ) {
926 drm_get_connector_name(connector)); 952 } else {
953 bool plugged = (nv_connector->status != NVKM_HPD_UNPLUG);
927 954
928 if (plugged) 955 NV_DEBUG(drm, "%splugged %s\n", plugged ? "" : "un", name);
929 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
930 else
931 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
932 956
933 drm_helper_hpd_irq_event(dev); 957 if (plugged)
958 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
959 else
960 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
961 drm_helper_hpd_irq_event(connector->dev);
962 }
963
964 nouveau_event_get(nv_connector->hpd);
934} 965}
935 966
936static int 967static int
937nouveau_connector_hotplug(void *data, int index) 968nouveau_connector_hotplug(void *data, u32 type, int index)
938{ 969{
939 struct nouveau_connector *nv_connector = data; 970 struct nouveau_connector *nv_connector = data;
940 schedule_work(&nv_connector->hpd_work); 971 nv_connector->status = type;
941 return NVKM_EVENT_KEEP; 972 schedule_work(&nv_connector->work);
973 return NVKM_EVENT_DROP;
974}
975
976static ssize_t
977nouveau_connector_aux_xfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
978{
979 struct nouveau_connector *nv_connector =
980 container_of(aux, typeof(*nv_connector), aux);
981 struct nouveau_encoder *nv_encoder;
982 struct nouveau_i2c_port *port;
983 int ret;
984
985 nv_encoder = find_encoder(&nv_connector->base, DCB_OUTPUT_DP);
986 if (!nv_encoder || !(port = nv_encoder->i2c))
987 return -ENODEV;
988 if (WARN_ON(msg->size > 16))
989 return -E2BIG;
990 if (msg->size == 0)
991 return msg->size;
992
993 ret = nouveau_i2c(port)->acquire(port, 0);
994 if (ret)
995 return ret;
996
997 ret = port->func->aux(port, false, msg->request, msg->address,
998 msg->buffer, msg->size);
999 nouveau_i2c(port)->release(port);
1000 if (ret >= 0) {
1001 msg->reply = ret;
1002 return msg->size;
1003 }
1004
1005 return ret;
942} 1006}
943 1007
944static int 1008static int
@@ -974,9 +1038,9 @@ nouveau_connector_create(struct drm_device *dev, int index)
974{ 1038{
975 const struct drm_connector_funcs *funcs = &nouveau_connector_funcs; 1039 const struct drm_connector_funcs *funcs = &nouveau_connector_funcs;
976 struct nouveau_drm *drm = nouveau_drm(dev); 1040 struct nouveau_drm *drm = nouveau_drm(dev);
977 struct nouveau_gpio *gpio = nouveau_gpio(drm->device);
978 struct nouveau_display *disp = nouveau_display(dev); 1041 struct nouveau_display *disp = nouveau_display(dev);
979 struct nouveau_connector *nv_connector = NULL; 1042 struct nouveau_connector *nv_connector = NULL;
1043 struct nouveau_disp *pdisp = nouveau_disp(drm->device);
980 struct drm_connector *connector; 1044 struct drm_connector *connector;
981 int type, ret = 0; 1045 int type, ret = 0;
982 bool dummy; 1046 bool dummy;
@@ -992,33 +1056,15 @@ nouveau_connector_create(struct drm_device *dev, int index)
992 return ERR_PTR(-ENOMEM); 1056 return ERR_PTR(-ENOMEM);
993 1057
994 connector = &nv_connector->base; 1058 connector = &nv_connector->base;
995 INIT_WORK(&nv_connector->hpd_work, nouveau_connector_hotplug_work);
996 nv_connector->index = index; 1059 nv_connector->index = index;
997 1060
998 /* attempt to parse vbios connector type and hotplug gpio */ 1061 /* attempt to parse vbios connector type and hotplug gpio */
999 nv_connector->dcb = olddcb_conn(dev, index); 1062 nv_connector->dcb = olddcb_conn(dev, index);
1000 if (nv_connector->dcb) { 1063 if (nv_connector->dcb) {
1001 static const u8 hpd[16] = {
1002 0xff, 0x07, 0x08, 0xff, 0xff, 0x51, 0x52, 0xff,
1003 0xff, 0xff, 0xff, 0xff, 0xff, 0x5e, 0x5f, 0x60,
1004 };
1005
1006 u32 entry = ROM16(nv_connector->dcb[0]); 1064 u32 entry = ROM16(nv_connector->dcb[0]);
1007 if (olddcb_conntab(dev)[3] >= 4) 1065 if (olddcb_conntab(dev)[3] >= 4)
1008 entry |= (u32)ROM16(nv_connector->dcb[2]) << 16; 1066 entry |= (u32)ROM16(nv_connector->dcb[2]) << 16;
1009 1067
1010 ret = gpio->find(gpio, 0, hpd[ffs((entry & 0x07033000) >> 12)],
1011 DCB_GPIO_UNUSED, &nv_connector->hpd);
1012 if (ret)
1013 nv_connector->hpd.func = DCB_GPIO_UNUSED;
1014
1015 if (nv_connector->hpd.func != DCB_GPIO_UNUSED) {
1016 nouveau_event_new(gpio->events, nv_connector->hpd.line,
1017 nouveau_connector_hotplug,
1018 nv_connector,
1019 &nv_connector->hpd_func);
1020 }
1021
1022 nv_connector->type = nv_connector->dcb[0]; 1068 nv_connector->type = nv_connector->dcb[0];
1023 if (drm_conntype_from_dcb(nv_connector->type) == 1069 if (drm_conntype_from_dcb(nv_connector->type) ==
1024 DRM_MODE_CONNECTOR_Unknown) { 1070 DRM_MODE_CONNECTOR_Unknown) {
@@ -1040,7 +1086,6 @@ nouveau_connector_create(struct drm_device *dev, int index)
1040 } 1086 }
1041 } else { 1087 } else {
1042 nv_connector->type = DCB_CONNECTOR_NONE; 1088 nv_connector->type = DCB_CONNECTOR_NONE;
1043 nv_connector->hpd.func = DCB_GPIO_UNUSED;
1044 } 1089 }
1045 1090
1046 /* no vbios data, or an unknown dcb connector type - attempt to 1091 /* no vbios data, or an unknown dcb connector type - attempt to
@@ -1080,8 +1125,8 @@ nouveau_connector_create(struct drm_device *dev, int index)
1080 } 1125 }
1081 } 1126 }
1082 1127
1083 type = drm_conntype_from_dcb(nv_connector->type); 1128 switch ((type = drm_conntype_from_dcb(nv_connector->type))) {
1084 if (type == DRM_MODE_CONNECTOR_LVDS) { 1129 case DRM_MODE_CONNECTOR_LVDS:
1085 ret = nouveau_bios_parse_lvds_table(dev, 0, &dummy, &dummy); 1130 ret = nouveau_bios_parse_lvds_table(dev, 0, &dummy, &dummy);
1086 if (ret) { 1131 if (ret) {
1087 NV_ERROR(drm, "Error parsing LVDS table, disabling\n"); 1132 NV_ERROR(drm, "Error parsing LVDS table, disabling\n");
@@ -1090,8 +1135,23 @@ nouveau_connector_create(struct drm_device *dev, int index)
1090 } 1135 }
1091 1136
1092 funcs = &nouveau_connector_funcs_lvds; 1137 funcs = &nouveau_connector_funcs_lvds;
1093 } else { 1138 break;
1139 case DRM_MODE_CONNECTOR_DisplayPort:
1140 case DRM_MODE_CONNECTOR_eDP:
1141 nv_connector->aux.dev = dev->dev;
1142 nv_connector->aux.transfer = nouveau_connector_aux_xfer;
1143 ret = drm_dp_aux_register(&nv_connector->aux);
1144 if (ret) {
1145 NV_ERROR(drm, "failed to register aux channel\n");
1146 kfree(nv_connector);
1147 return ERR_PTR(ret);
1148 }
1149
1150 funcs = &nouveau_connector_funcs_dp;
1151 break;
1152 default:
1094 funcs = &nouveau_connector_funcs; 1153 funcs = &nouveau_connector_funcs;
1154 break;
1095 } 1155 }
1096 1156
1097 /* defaults, will get overridden in detect() */ 1157 /* defaults, will get overridden in detect() */
@@ -1166,10 +1226,16 @@ nouveau_connector_create(struct drm_device *dev, int index)
1166 break; 1226 break;
1167 } 1227 }
1168 1228
1169 connector->polled = DRM_CONNECTOR_POLL_CONNECT; 1229 ret = nouveau_event_new(pdisp->hpd, NVKM_HPD, index,
1170 if (nv_connector->hpd.func != DCB_GPIO_UNUSED) 1230 nouveau_connector_hotplug,
1231 nv_connector, &nv_connector->hpd);
1232 if (ret)
1233 connector->polled = DRM_CONNECTOR_POLL_CONNECT;
1234 else
1171 connector->polled = DRM_CONNECTOR_POLL_HPD; 1235 connector->polled = DRM_CONNECTOR_POLL_HPD;
1172 1236
1237 INIT_WORK(&nv_connector->work, nouveau_connector_hotplug_work);
1238
1173 drm_sysfs_connector_add(connector); 1239 drm_sysfs_connector_add(connector);
1174 return connector; 1240 return connector;
1175} 1241}
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h
index 264a778f473b..8861b6c579ad 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.h
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.h
@@ -28,12 +28,12 @@
28#define __NOUVEAU_CONNECTOR_H__ 28#define __NOUVEAU_CONNECTOR_H__
29 29
30#include <drm/drm_edid.h> 30#include <drm/drm_edid.h>
31#include <drm/drm_dp_helper.h>
31#include "nouveau_crtc.h" 32#include "nouveau_crtc.h"
32 33
33#include <core/event.h> 34#include <core/event.h>
34 35
35#include <subdev/bios.h> 36#include <subdev/bios.h>
36#include <subdev/bios/gpio.h>
37 37
38struct nouveau_i2c_port; 38struct nouveau_i2c_port;
39 39
@@ -67,9 +67,11 @@ struct nouveau_connector {
67 u8 index; 67 u8 index;
68 u8 *dcb; 68 u8 *dcb;
69 69
70 struct dcb_gpio_func hpd; 70 struct nouveau_eventh *hpd;
71 struct work_struct hpd_work; 71 u32 status;
72 struct nouveau_eventh *hpd_func; 72 struct work_struct work;
73
74 struct drm_dp_aux aux;
73 75
74 int dithering_mode; 76 int dithering_mode;
75 int dithering_depth; 77 int dithering_depth;
diff --git a/drivers/gpu/drm/nouveau/nouveau_crtc.h b/drivers/gpu/drm/nouveau/nouveau_crtc.h
index d1e5890784d7..a0534489d23f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_crtc.h
+++ b/drivers/gpu/drm/nouveau/nouveau_crtc.h
@@ -74,7 +74,7 @@ struct nouveau_crtc {
74 74
75static inline struct nouveau_crtc *nouveau_crtc(struct drm_crtc *crtc) 75static inline struct nouveau_crtc *nouveau_crtc(struct drm_crtc *crtc)
76{ 76{
77 return container_of(crtc, struct nouveau_crtc, base); 77 return crtc ? container_of(crtc, struct nouveau_crtc, base) : NULL;
78} 78}
79 79
80static inline struct drm_crtc *to_drm_crtc(struct nouveau_crtc *crtc) 80static inline struct drm_crtc *to_drm_crtc(struct nouveau_crtc *crtc)
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index da764a4ed958..26b5647188ef 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -42,7 +42,7 @@
42#include <core/class.h> 42#include <core/class.h>
43 43
44static int 44static int
45nouveau_display_vblank_handler(void *data, int head) 45nouveau_display_vblank_handler(void *data, u32 type, int head)
46{ 46{
47 struct nouveau_drm *drm = data; 47 struct nouveau_drm *drm = data;
48 drm_handle_vblank(drm->dev, head); 48 drm_handle_vblank(drm->dev, head);
@@ -178,7 +178,7 @@ nouveau_display_vblank_init(struct drm_device *dev)
178 return -ENOMEM; 178 return -ENOMEM;
179 179
180 for (i = 0; i < dev->mode_config.num_crtc; i++) { 180 for (i = 0; i < dev->mode_config.num_crtc; i++) {
181 ret = nouveau_event_new(pdisp->vblank, i, 181 ret = nouveau_event_new(pdisp->vblank, 1, i,
182 nouveau_display_vblank_handler, 182 nouveau_display_vblank_handler,
183 drm, &disp->vblank[i]); 183 drm, &disp->vblank[i]);
184 if (ret) { 184 if (ret) {
@@ -393,7 +393,7 @@ nouveau_display_init(struct drm_device *dev)
393 /* enable hotplug interrupts */ 393 /* enable hotplug interrupts */
394 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 394 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
395 struct nouveau_connector *conn = nouveau_connector(connector); 395 struct nouveau_connector *conn = nouveau_connector(connector);
396 if (conn->hpd_func) nouveau_event_get(conn->hpd_func); 396 if (conn->hpd) nouveau_event_get(conn->hpd);
397 } 397 }
398 398
399 return ret; 399 return ret;
@@ -408,7 +408,7 @@ nouveau_display_fini(struct drm_device *dev)
408 /* disable hotplug interrupts */ 408 /* disable hotplug interrupts */
409 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 409 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
410 struct nouveau_connector *conn = nouveau_connector(connector); 410 struct nouveau_connector *conn = nouveau_connector(connector);
411 if (conn->hpd_func) nouveau_event_put(conn->hpd_func); 411 if (conn->hpd) nouveau_event_put(conn->hpd);
412 } 412 }
413 413
414 drm_kms_helper_poll_disable(dev); 414 drm_kms_helper_poll_disable(dev);
@@ -798,6 +798,7 @@ nouveau_finish_page_flip(struct nouveau_channel *chan,
798 struct drm_device *dev = drm->dev; 798 struct drm_device *dev = drm->dev;
799 struct nouveau_page_flip_state *s; 799 struct nouveau_page_flip_state *s;
800 unsigned long flags; 800 unsigned long flags;
801 int crtcid = -1;
801 802
802 spin_lock_irqsave(&dev->event_lock, flags); 803 spin_lock_irqsave(&dev->event_lock, flags);
803 804
@@ -808,8 +809,13 @@ nouveau_finish_page_flip(struct nouveau_channel *chan,
808 } 809 }
809 810
810 s = list_first_entry(&fctx->flip, struct nouveau_page_flip_state, head); 811 s = list_first_entry(&fctx->flip, struct nouveau_page_flip_state, head);
811 if (s->event) 812 if (s->event) {
812 drm_send_vblank_event(dev, s->crtc, s->event); 813 /* Vblank timestamps/counts are only correct on >= NV-50 */
814 if (nv_device(drm->device)->card_type >= NV_50)
815 crtcid = s->crtc;
816
817 drm_send_vblank_event(dev, crtcid, s->event);
818 }
813 819
814 list_del(&s->head); 820 list_del(&s->head);
815 if (ps) 821 if (ps)
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
index 36fd22500569..5675ffc175ae 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -55,11 +55,10 @@ nouveau_dp_probe_oui(struct drm_device *dev, struct nouveau_i2c_port *auxch,
55 55
56} 56}
57 57
58bool 58int
59nouveau_dp_detect(struct drm_encoder *encoder) 59nouveau_dp_detect(struct nouveau_encoder *nv_encoder)
60{ 60{
61 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 61 struct drm_device *dev = nv_encoder->base.base.dev;
62 struct drm_device *dev = encoder->dev;
63 struct nouveau_drm *drm = nouveau_drm(dev); 62 struct nouveau_drm *drm = nouveau_drm(dev);
64 struct nouveau_i2c_port *auxch; 63 struct nouveau_i2c_port *auxch;
65 u8 *dpcd = nv_encoder->dp.dpcd; 64 u8 *dpcd = nv_encoder->dp.dpcd;
@@ -67,11 +66,11 @@ nouveau_dp_detect(struct drm_encoder *encoder)
67 66
68 auxch = nv_encoder->i2c; 67 auxch = nv_encoder->i2c;
69 if (!auxch) 68 if (!auxch)
70 return false; 69 return -ENODEV;
71 70
72 ret = nv_rdaux(auxch, DP_DPCD_REV, dpcd, 8); 71 ret = nv_rdaux(auxch, DP_DPCD_REV, dpcd, 8);
73 if (ret) 72 if (ret)
74 return false; 73 return ret;
75 74
76 nv_encoder->dp.link_bw = 27000 * dpcd[1]; 75 nv_encoder->dp.link_bw = 27000 * dpcd[1];
77 nv_encoder->dp.link_nr = dpcd[2] & DP_MAX_LANE_COUNT_MASK; 76 nv_encoder->dp.link_nr = dpcd[2] & DP_MAX_LANE_COUNT_MASK;
@@ -91,6 +90,5 @@ nouveau_dp_detect(struct drm_encoder *encoder)
91 nv_encoder->dp.link_nr, nv_encoder->dp.link_bw); 90 nv_encoder->dp.link_nr, nv_encoder->dp.link_bw);
92 91
93 nouveau_dp_probe_oui(dev, auxch, dpcd); 92 nouveau_dp_probe_oui(dev, auxch, dpcd);
94 93 return 0;
95 return true;
96} 94}
diff --git a/drivers/gpu/drm/nouveau/nouveau_encoder.h b/drivers/gpu/drm/nouveau/nouveau_encoder.h
index 24660c0f713d..5f0e37fc2849 100644
--- a/drivers/gpu/drm/nouveau/nouveau_encoder.h
+++ b/drivers/gpu/drm/nouveau/nouveau_encoder.h
@@ -46,6 +46,7 @@ struct nouveau_encoder {
46 /* different to drm_encoder.crtc, this reflects what's 46 /* different to drm_encoder.crtc, this reflects what's
47 * actually programmed on the hw, not the proposed crtc */ 47 * actually programmed on the hw, not the proposed crtc */
48 struct drm_crtc *crtc; 48 struct drm_crtc *crtc;
49 u32 ctrl;
49 50
50 struct drm_display_mode mode; 51 struct drm_display_mode mode;
51 int last_dpms; 52 int last_dpms;
@@ -84,9 +85,7 @@ get_slave_funcs(struct drm_encoder *enc)
84} 85}
85 86
86/* nouveau_dp.c */ 87/* nouveau_dp.c */
87bool nouveau_dp_detect(struct drm_encoder *); 88int nouveau_dp_detect(struct nouveau_encoder *);
88void nouveau_dp_dpms(struct drm_encoder *, int mode, u32 datarate,
89 struct nouveau_object *);
90 89
91struct nouveau_connector * 90struct nouveau_connector *
92nouveau_encoder_connector_get(struct nouveau_encoder *encoder); 91nouveau_encoder_connector_get(struct nouveau_encoder *encoder);
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 90074d620e31..ab5ea3b0d666 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -166,7 +166,7 @@ nouveau_fence_done(struct nouveau_fence *fence)
166} 166}
167 167
168static int 168static int
169nouveau_fence_wait_uevent_handler(void *data, int index) 169nouveau_fence_wait_uevent_handler(void *data, u32 type, int index)
170{ 170{
171 struct nouveau_fence_priv *priv = data; 171 struct nouveau_fence_priv *priv = data;
172 wake_up_all(&priv->waiting); 172 wake_up_all(&priv->waiting);
@@ -183,7 +183,7 @@ nouveau_fence_wait_uevent(struct nouveau_fence *fence, bool intr)
183 struct nouveau_eventh *handler; 183 struct nouveau_eventh *handler;
184 int ret = 0; 184 int ret = 0;
185 185
186 ret = nouveau_event_new(pfifo->uevent, 0, 186 ret = nouveau_event_new(pfifo->uevent, 1, 0,
187 nouveau_fence_wait_uevent_handler, 187 nouveau_fence_wait_uevent_handler,
188 priv, &handler); 188 priv, &handler);
189 if (ret) 189 if (ret)
diff --git a/drivers/gpu/drm/nouveau/nouveau_ioc32.c b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
index c1a7e5a73a26..462679a8fec5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ioc32.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
@@ -57,7 +57,7 @@ long nouveau_compat_ioctl(struct file *filp, unsigned int cmd,
57 return drm_compat_ioctl(filp, cmd, arg); 57 return drm_compat_ioctl(filp, cmd, arg);
58 58
59#if 0 59#if 0
60 if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls)) 60 if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(mga_compat_ioctls))
61 fn = nouveau_compat_ioctls[nr - DRM_COMMAND_BASE]; 61 fn = nouveau_compat_ioctls[nr - DRM_COMMAND_BASE];
62#endif 62#endif
63 if (fn != NULL) 63 if (fn != NULL)
diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
index fb84da3cb50d..4f4c3fec6916 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
@@ -64,12 +64,13 @@ static bool
64nouveau_switcheroo_can_switch(struct pci_dev *pdev) 64nouveau_switcheroo_can_switch(struct pci_dev *pdev)
65{ 65{
66 struct drm_device *dev = pci_get_drvdata(pdev); 66 struct drm_device *dev = pci_get_drvdata(pdev);
67 bool can_switch;
68 67
69 spin_lock(&dev->count_lock); 68 /*
70 can_switch = (dev->open_count == 0); 69 * FIXME: open_count is protected by drm_global_mutex but that would lead to
71 spin_unlock(&dev->count_lock); 70 * locking inversion with the driver load path. And the access here is
72 return can_switch; 71 * completely racy anyway. So don't bother with locking for now.
72 */
73 return dev->open_count == 0;
73} 74}
74 75
75static const struct vga_switcheroo_client_ops 76static const struct vga_switcheroo_client_ops
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 58af547b0b93..afdf607df3e6 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -1,4 +1,4 @@
1 /* 1/*
2 * Copyright 2011 Red Hat Inc. 2 * Copyright 2011 Red Hat Inc.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
@@ -26,6 +26,7 @@
26 26
27#include <drm/drmP.h> 27#include <drm/drmP.h>
28#include <drm/drm_crtc_helper.h> 28#include <drm/drm_crtc_helper.h>
29#include <drm/drm_dp_helper.h>
29 30
30#include "nouveau_drm.h" 31#include "nouveau_drm.h"
31#include "nouveau_dma.h" 32#include "nouveau_dma.h"
@@ -957,7 +958,7 @@ nv50_crtc_prepare(struct drm_crtc *crtc)
957 958
958 nv50_display_flip_stop(crtc); 959 nv50_display_flip_stop(crtc);
959 960
960 push = evo_wait(mast, 2); 961 push = evo_wait(mast, 6);
961 if (push) { 962 if (push) {
962 if (nv50_vers(mast) < NV84_DISP_MAST_CLASS) { 963 if (nv50_vers(mast) < NV84_DISP_MAST_CLASS) {
963 evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1); 964 evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1);
@@ -1207,6 +1208,7 @@ static void
1207nv50_crtc_disable(struct drm_crtc *crtc) 1208nv50_crtc_disable(struct drm_crtc *crtc)
1208{ 1209{
1209 struct nv50_head *head = nv50_head(crtc); 1210 struct nv50_head *head = nv50_head(crtc);
1211 evo_sync(crtc->dev);
1210 if (head->image) 1212 if (head->image)
1211 nouveau_bo_unpin(head->image); 1213 nouveau_bo_unpin(head->image);
1212 nouveau_bo_ref(NULL, &head->image); 1214 nouveau_bo_ref(NULL, &head->image);
@@ -1700,10 +1702,9 @@ nv50_hdmi_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
1700} 1702}
1701 1703
1702static void 1704static void
1703nv50_hdmi_disconnect(struct drm_encoder *encoder) 1705nv50_hdmi_disconnect(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc)
1704{ 1706{
1705 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 1707 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
1706 struct nouveau_crtc *nv_crtc = nouveau_crtc(nv_encoder->crtc);
1707 struct nv50_disp *disp = nv50_disp(encoder->dev); 1708 struct nv50_disp *disp = nv50_disp(encoder->dev);
1708 const u32 moff = (nv_crtc->index << 3) | nv_encoder->or; 1709 const u32 moff = (nv_crtc->index << 3) | nv_encoder->or;
1709 1710
@@ -1722,7 +1723,7 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode)
1722 struct drm_device *dev = encoder->dev; 1723 struct drm_device *dev = encoder->dev;
1723 struct nv50_disp *disp = nv50_disp(dev); 1724 struct nv50_disp *disp = nv50_disp(dev);
1724 struct drm_encoder *partner; 1725 struct drm_encoder *partner;
1725 int or = nv_encoder->or; 1726 u32 mthd;
1726 1727
1727 nv_encoder->last_dpms = mode; 1728 nv_encoder->last_dpms = mode;
1728 1729
@@ -1740,7 +1741,17 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode)
1740 } 1741 }
1741 } 1742 }
1742 1743
1743 nv_call(disp->core, NV50_DISP_SOR_PWR + or, (mode == DRM_MODE_DPMS_ON)); 1744 mthd = (ffs(nv_encoder->dcb->sorconf.link) - 1) << 2;
1745 mthd |= nv_encoder->or;
1746
1747 if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
1748 nv_call(disp->core, NV50_DISP_SOR_PWR | mthd, 1);
1749 mthd |= NV94_DISP_SOR_DP_PWR;
1750 } else {
1751 mthd |= NV50_DISP_SOR_PWR;
1752 }
1753
1754 nv_call(disp->core, mthd, (mode == DRM_MODE_DPMS_ON));
1744} 1755}
1745 1756
1746static bool 1757static bool
@@ -1764,33 +1775,36 @@ nv50_sor_mode_fixup(struct drm_encoder *encoder,
1764} 1775}
1765 1776
1766static void 1777static void
1767nv50_sor_disconnect(struct drm_encoder *encoder) 1778nv50_sor_ctrl(struct nouveau_encoder *nv_encoder, u32 mask, u32 data)
1768{ 1779{
1769 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 1780 struct nv50_mast *mast = nv50_mast(nv_encoder->base.base.dev);
1770 struct nv50_mast *mast = nv50_mast(encoder->dev); 1781 u32 temp = (nv_encoder->ctrl & ~mask) | (data & mask), *push;
1771 const int or = nv_encoder->or; 1782 if (temp != nv_encoder->ctrl && (push = evo_wait(mast, 2))) {
1772 u32 *push; 1783 if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
1773 1784 evo_mthd(push, 0x0600 + (nv_encoder->or * 0x40), 1);
1774 if (nv_encoder->crtc) { 1785 evo_data(push, (nv_encoder->ctrl = temp));
1775 nv50_crtc_prepare(nv_encoder->crtc); 1786 } else {
1776 1787 evo_mthd(push, 0x0200 + (nv_encoder->or * 0x20), 1);
1777 push = evo_wait(mast, 4); 1788 evo_data(push, (nv_encoder->ctrl = temp));
1778 if (push) {
1779 if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
1780 evo_mthd(push, 0x0600 + (or * 0x40), 1);
1781 evo_data(push, 0x00000000);
1782 } else {
1783 evo_mthd(push, 0x0200 + (or * 0x20), 1);
1784 evo_data(push, 0x00000000);
1785 }
1786 evo_kick(push, mast);
1787 } 1789 }
1788 1790 evo_kick(push, mast);
1789 nv50_hdmi_disconnect(encoder);
1790 } 1791 }
1792}
1793
1794static void
1795nv50_sor_disconnect(struct drm_encoder *encoder)
1796{
1797 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
1798 struct nouveau_crtc *nv_crtc = nouveau_crtc(nv_encoder->crtc);
1791 1799
1792 nv_encoder->last_dpms = DRM_MODE_DPMS_OFF; 1800 nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
1793 nv_encoder->crtc = NULL; 1801 nv_encoder->crtc = NULL;
1802
1803 if (nv_crtc) {
1804 nv50_crtc_prepare(&nv_crtc->base);
1805 nv50_sor_ctrl(nv_encoder, 1 << nv_crtc->index, 0);
1806 nv50_hdmi_disconnect(&nv_encoder->base.base, nv_crtc);
1807 }
1794} 1808}
1795 1809
1796static void 1810static void
@@ -1810,12 +1824,14 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
1810 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc); 1824 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
1811 struct nouveau_connector *nv_connector; 1825 struct nouveau_connector *nv_connector;
1812 struct nvbios *bios = &drm->vbios; 1826 struct nvbios *bios = &drm->vbios;
1813 u32 *push, lvds = 0; 1827 u32 lvds = 0, mask, ctrl;
1814 u8 owner = 1 << nv_crtc->index; 1828 u8 owner = 1 << nv_crtc->index;
1815 u8 proto = 0xf; 1829 u8 proto = 0xf;
1816 u8 depth = 0x0; 1830 u8 depth = 0x0;
1817 1831
1818 nv_connector = nouveau_encoder_connector_get(nv_encoder); 1832 nv_connector = nouveau_encoder_connector_get(nv_encoder);
1833 nv_encoder->crtc = encoder->crtc;
1834
1819 switch (nv_encoder->dcb->type) { 1835 switch (nv_encoder->dcb->type) {
1820 case DCB_OUTPUT_TMDS: 1836 case DCB_OUTPUT_TMDS:
1821 if (nv_encoder->dcb->sorconf.link & 1) { 1837 if (nv_encoder->dcb->sorconf.link & 1) {
@@ -1827,7 +1843,7 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
1827 proto = 0x2; 1843 proto = 0x2;
1828 } 1844 }
1829 1845
1830 nv50_hdmi_mode_set(encoder, mode); 1846 nv50_hdmi_mode_set(&nv_encoder->base.base, mode);
1831 break; 1847 break;
1832 case DCB_OUTPUT_LVDS: 1848 case DCB_OUTPUT_LVDS:
1833 proto = 0x0; 1849 proto = 0x0;
@@ -1883,19 +1899,11 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
1883 break; 1899 break;
1884 } 1900 }
1885 1901
1886 nv50_sor_dpms(encoder, DRM_MODE_DPMS_ON); 1902 nv50_sor_dpms(&nv_encoder->base.base, DRM_MODE_DPMS_ON);
1887 1903
1888 push = evo_wait(nv50_mast(dev), 8); 1904 if (nv50_vers(mast) >= NVD0_DISP_CLASS) {
1889 if (push) { 1905 u32 *push = evo_wait(mast, 3);
1890 if (nv50_vers(mast) < NVD0_DISP_CLASS) { 1906 if (push) {
1891 u32 ctrl = (depth << 16) | (proto << 8) | owner;
1892 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
1893 ctrl |= 0x00001000;
1894 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
1895 ctrl |= 0x00002000;
1896 evo_mthd(push, 0x0600 + (nv_encoder->or * 0x040), 1);
1897 evo_data(push, ctrl);
1898 } else {
1899 u32 magic = 0x31ec6000 | (nv_crtc->index << 25); 1907 u32 magic = 0x31ec6000 | (nv_crtc->index << 25);
1900 u32 syncs = 0x00000001; 1908 u32 syncs = 0x00000001;
1901 1909
@@ -1910,14 +1918,21 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
1910 evo_mthd(push, 0x0404 + (nv_crtc->index * 0x300), 2); 1918 evo_mthd(push, 0x0404 + (nv_crtc->index * 0x300), 2);
1911 evo_data(push, syncs | (depth << 6)); 1919 evo_data(push, syncs | (depth << 6));
1912 evo_data(push, magic); 1920 evo_data(push, magic);
1913 evo_mthd(push, 0x0200 + (nv_encoder->or * 0x020), 1); 1921 evo_kick(push, mast);
1914 evo_data(push, owner | (proto << 8));
1915 } 1922 }
1916 1923
1917 evo_kick(push, mast); 1924 ctrl = proto << 8;
1925 mask = 0x00000f00;
1926 } else {
1927 ctrl = (depth << 16) | (proto << 8);
1928 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
1929 ctrl |= 0x00001000;
1930 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
1931 ctrl |= 0x00002000;
1932 mask = 0x000f3f00;
1918 } 1933 }
1919 1934
1920 nv_encoder->crtc = encoder->crtc; 1935 nv50_sor_ctrl(nv_encoder, mask | owner, ctrl | owner);
1921} 1936}
1922 1937
1923static void 1938static void
@@ -2295,7 +2310,7 @@ nv50_display_create(struct drm_device *dev)
2295 continue; 2310 continue;
2296 2311
2297 NV_WARN(drm, "%s has no encoders, removing\n", 2312 NV_WARN(drm, "%s has no encoders, removing\n",
2298 drm_get_connector_name(connector)); 2313 connector->name);
2299 connector->funcs->destroy(connector); 2314 connector->funcs->destroy(connector);
2300 } 2315 }
2301 2316
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c
index e3c47a8005ff..2d28dc337cfb 100644
--- a/drivers/gpu/drm/omapdrm/omap_crtc.c
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.c
@@ -319,13 +319,13 @@ static void page_flip_worker(struct work_struct *work)
319 struct drm_display_mode *mode = &crtc->mode; 319 struct drm_display_mode *mode = &crtc->mode;
320 struct drm_gem_object *bo; 320 struct drm_gem_object *bo;
321 321
322 mutex_lock(&crtc->mutex); 322 drm_modeset_lock(&crtc->mutex, NULL);
323 omap_plane_mode_set(omap_crtc->plane, crtc, crtc->primary->fb, 323 omap_plane_mode_set(omap_crtc->plane, crtc, crtc->primary->fb,
324 0, 0, mode->hdisplay, mode->vdisplay, 324 0, 0, mode->hdisplay, mode->vdisplay,
325 crtc->x << 16, crtc->y << 16, 325 crtc->x << 16, crtc->y << 16,
326 mode->hdisplay << 16, mode->vdisplay << 16, 326 mode->hdisplay << 16, mode->vdisplay << 16,
327 vblank_cb, crtc); 327 vblank_cb, crtc);
328 mutex_unlock(&crtc->mutex); 328 drm_modeset_unlock(&crtc->mutex);
329 329
330 bo = omap_framebuffer_bo(crtc->primary->fb, 0); 330 bo = omap_framebuffer_bo(crtc->primary->fb, 0);
331 drm_gem_object_unreference_unlocked(bo); 331 drm_gem_object_unreference_unlocked(bo);
@@ -465,7 +465,7 @@ static void apply_worker(struct work_struct *work)
465 * the callbacks and list modification all serialized 465 * the callbacks and list modification all serialized
466 * with respect to modesetting ioctls from userspace. 466 * with respect to modesetting ioctls from userspace.
467 */ 467 */
468 mutex_lock(&crtc->mutex); 468 drm_modeset_lock(&crtc->mutex, NULL);
469 dispc_runtime_get(); 469 dispc_runtime_get();
470 470
471 /* 471 /*
@@ -510,7 +510,7 @@ static void apply_worker(struct work_struct *work)
510 510
511out: 511out:
512 dispc_runtime_put(); 512 dispc_runtime_put();
513 mutex_unlock(&crtc->mutex); 513 drm_modeset_unlock(&crtc->mutex);
514} 514}
515 515
516int omap_crtc_apply(struct drm_crtc *crtc, 516int omap_crtc_apply(struct drm_crtc *crtc,
@@ -518,7 +518,7 @@ int omap_crtc_apply(struct drm_crtc *crtc,
518{ 518{
519 struct omap_crtc *omap_crtc = to_omap_crtc(crtc); 519 struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
520 520
521 WARN_ON(!mutex_is_locked(&crtc->mutex)); 521 WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
522 522
523 /* no need to queue it again if it is already queued: */ 523 /* no need to queue it again if it is already queued: */
524 if (apply->queued) 524 if (apply->queued)
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index c8270e4b26f3..002b9721e85a 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -588,9 +588,7 @@ static void dev_lastclose(struct drm_device *dev)
588 } 588 }
589 } 589 }
590 590
591 drm_modeset_lock_all(dev); 591 ret = drm_fb_helper_restore_fbdev_mode_unlocked(priv->fbdev);
592 ret = drm_fb_helper_restore_fbdev_mode(priv->fbdev);
593 drm_modeset_unlock_all(dev);
594 if (ret) 592 if (ret)
595 DBG("failed to restore crtc mode"); 593 DBG("failed to restore crtc mode");
596} 594}
diff --git a/drivers/gpu/drm/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c
index 8b019602ffe6..2a5cacdc344b 100644
--- a/drivers/gpu/drm/omapdrm/omap_fb.c
+++ b/drivers/gpu/drm/omapdrm/omap_fb.c
@@ -346,6 +346,7 @@ void omap_framebuffer_flush(struct drm_framebuffer *fb,
346 346
347 VERB("flush: %d,%d %dx%d, fb=%p", x, y, w, h, fb); 347 VERB("flush: %d,%d %dx%d, fb=%p", x, y, w, h, fb);
348 348
349 /* FIXME: This is racy - no protection against modeset config changes. */
349 while ((connector = omap_framebuffer_get_next_connector(fb, connector))) { 350 while ((connector = omap_framebuffer_get_next_connector(fb, connector))) {
350 /* only consider connectors that are part of a chain */ 351 /* only consider connectors that are part of a chain */
351 if (connector->encoder && connector->encoder->crtc) { 352 if (connector->encoder && connector->encoder->crtc) {
diff --git a/drivers/gpu/drm/panel/panel-ld9040.c b/drivers/gpu/drm/panel/panel-ld9040.c
index 1f1f8371a199..db1601fdbe29 100644
--- a/drivers/gpu/drm/panel/panel-ld9040.c
+++ b/drivers/gpu/drm/panel/panel-ld9040.c
@@ -27,6 +27,7 @@
27#define MCS_ELVSS_ON 0xb1 27#define MCS_ELVSS_ON 0xb1
28#define MCS_USER_SETTING 0xf0 28#define MCS_USER_SETTING 0xf0
29#define MCS_DISPCTL 0xf2 29#define MCS_DISPCTL 0xf2
30#define MCS_POWER_CTRL 0xf4
30#define MCS_GTCON 0xf7 31#define MCS_GTCON 0xf7
31#define MCS_PANEL_CONDITION 0xf8 32#define MCS_PANEL_CONDITION 0xf8
32#define MCS_GAMMA_SET1 0xf9 33#define MCS_GAMMA_SET1 0xf9
@@ -182,6 +183,8 @@ static void ld9040_init(struct ld9040 *ctx)
182 ld9040_dcs_write_seq_static(ctx, MCS_DISPCTL, 183 ld9040_dcs_write_seq_static(ctx, MCS_DISPCTL,
183 0x02, 0x08, 0x08, 0x10, 0x10); 184 0x02, 0x08, 0x08, 0x10, 0x10);
184 ld9040_dcs_write_seq_static(ctx, MCS_MANPWR, 0x04); 185 ld9040_dcs_write_seq_static(ctx, MCS_MANPWR, 0x04);
186 ld9040_dcs_write_seq_static(ctx, MCS_POWER_CTRL,
187 0x0a, 0x87, 0x25, 0x6a, 0x44, 0x02, 0x88);
185 ld9040_dcs_write_seq_static(ctx, MCS_ELVSS_ON, 0x0d, 0x00, 0x16); 188 ld9040_dcs_write_seq_static(ctx, MCS_ELVSS_ON, 0x0d, 0x00, 0x16);
186 ld9040_dcs_write_seq_static(ctx, MCS_GTCON, 0x09, 0x00, 0x00); 189 ld9040_dcs_write_seq_static(ctx, MCS_GTCON, 0x09, 0x00, 0x00);
187 ld9040_brightness_set(ctx); 190 ld9040_brightness_set(ctx);
diff --git a/drivers/gpu/drm/panel/panel-s6e8aa0.c b/drivers/gpu/drm/panel/panel-s6e8aa0.c
index 35941d2412b8..06e57a26db7a 100644
--- a/drivers/gpu/drm/panel/panel-s6e8aa0.c
+++ b/drivers/gpu/drm/panel/panel-s6e8aa0.c
@@ -847,6 +847,7 @@ static void s6e8aa0_read_mtp_id(struct s6e8aa0 *ctx)
847 if (i >= ARRAY_SIZE(s6e8aa0_variants)) { 847 if (i >= ARRAY_SIZE(s6e8aa0_variants)) {
848 dev_err(ctx->dev, "unsupported display version %d\n", id[1]); 848 dev_err(ctx->dev, "unsupported display version %d\n", id[1]);
849 ctx->error = -EINVAL; 849 ctx->error = -EINVAL;
850 return;
850 } 851 }
851 852
852 ctx->variant = &s6e8aa0_variants[i]; 853 ctx->variant = &s6e8aa0_variants[i];
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 309f29e9234a..a25136132c31 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -262,6 +262,13 @@ static int panel_simple_remove(struct device *dev)
262 return 0; 262 return 0;
263} 263}
264 264
265static void panel_simple_shutdown(struct device *dev)
266{
267 struct panel_simple *panel = dev_get_drvdata(dev);
268
269 panel_simple_disable(&panel->base);
270}
271
265static const struct drm_display_mode auo_b101aw03_mode = { 272static const struct drm_display_mode auo_b101aw03_mode = {
266 .clock = 51450, 273 .clock = 51450,
267 .hdisplay = 1024, 274 .hdisplay = 1024,
@@ -284,6 +291,28 @@ static const struct panel_desc auo_b101aw03 = {
284 }, 291 },
285}; 292};
286 293
294static const struct drm_display_mode auo_b133xtn01_mode = {
295 .clock = 69500,
296 .hdisplay = 1366,
297 .hsync_start = 1366 + 48,
298 .hsync_end = 1366 + 48 + 32,
299 .htotal = 1366 + 48 + 32 + 20,
300 .vdisplay = 768,
301 .vsync_start = 768 + 3,
302 .vsync_end = 768 + 3 + 6,
303 .vtotal = 768 + 3 + 6 + 13,
304 .vrefresh = 60,
305};
306
307static const struct panel_desc auo_b133xtn01 = {
308 .modes = &auo_b133xtn01_mode,
309 .num_modes = 1,
310 .size = {
311 .width = 293,
312 .height = 165,
313 },
314};
315
287static const struct drm_display_mode chunghwa_claa101wa01a_mode = { 316static const struct drm_display_mode chunghwa_claa101wa01a_mode = {
288 .clock = 72070, 317 .clock = 72070,
289 .hdisplay = 1366, 318 .hdisplay = 1366,
@@ -328,6 +357,52 @@ static const struct panel_desc chunghwa_claa101wb01 = {
328 }, 357 },
329}; 358};
330 359
360static const struct drm_display_mode edt_et057090dhu_mode = {
361 .clock = 25175,
362 .hdisplay = 640,
363 .hsync_start = 640 + 16,
364 .hsync_end = 640 + 16 + 30,
365 .htotal = 640 + 16 + 30 + 114,
366 .vdisplay = 480,
367 .vsync_start = 480 + 10,
368 .vsync_end = 480 + 10 + 3,
369 .vtotal = 480 + 10 + 3 + 32,
370 .vrefresh = 60,
371 .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
372};
373
374static const struct panel_desc edt_et057090dhu = {
375 .modes = &edt_et057090dhu_mode,
376 .num_modes = 1,
377 .size = {
378 .width = 115,
379 .height = 86,
380 },
381};
382
383static const struct drm_display_mode edt_etm0700g0dh6_mode = {
384 .clock = 33260,
385 .hdisplay = 800,
386 .hsync_start = 800 + 40,
387 .hsync_end = 800 + 40 + 128,
388 .htotal = 800 + 40 + 128 + 88,
389 .vdisplay = 480,
390 .vsync_start = 480 + 10,
391 .vsync_end = 480 + 10 + 2,
392 .vtotal = 480 + 10 + 2 + 33,
393 .vrefresh = 60,
394 .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
395};
396
397static const struct panel_desc edt_etm0700g0dh6 = {
398 .modes = &edt_etm0700g0dh6_mode,
399 .num_modes = 1,
400 .size = {
401 .width = 152,
402 .height = 91,
403 },
404};
405
331static const struct drm_display_mode lg_lp129qe_mode = { 406static const struct drm_display_mode lg_lp129qe_mode = {
332 .clock = 285250, 407 .clock = 285250,
333 .hdisplay = 2560, 408 .hdisplay = 2560,
@@ -377,12 +452,24 @@ static const struct of_device_id platform_of_match[] = {
377 .compatible = "auo,b101aw03", 452 .compatible = "auo,b101aw03",
378 .data = &auo_b101aw03, 453 .data = &auo_b101aw03,
379 }, { 454 }, {
455 .compatible = "auo,b133xtn01",
456 .data = &auo_b133xtn01,
457 }, {
380 .compatible = "chunghwa,claa101wa01a", 458 .compatible = "chunghwa,claa101wa01a",
381 .data = &chunghwa_claa101wa01a 459 .data = &chunghwa_claa101wa01a
382 }, { 460 }, {
383 .compatible = "chunghwa,claa101wb01", 461 .compatible = "chunghwa,claa101wb01",
384 .data = &chunghwa_claa101wb01 462 .data = &chunghwa_claa101wb01
385 }, { 463 }, {
464 .compatible = "edt,et057090dhu",
465 .data = &edt_et057090dhu,
466 }, {
467 .compatible = "edt,et070080dh6",
468 .data = &edt_etm0700g0dh6,
469 }, {
470 .compatible = "edt,etm0700g0dh6",
471 .data = &edt_etm0700g0dh6,
472 }, {
386 .compatible = "lg,lp129qe", 473 .compatible = "lg,lp129qe",
387 .data = &lg_lp129qe, 474 .data = &lg_lp129qe,
388 }, { 475 }, {
@@ -412,6 +499,11 @@ static int panel_simple_platform_remove(struct platform_device *pdev)
412 return panel_simple_remove(&pdev->dev); 499 return panel_simple_remove(&pdev->dev);
413} 500}
414 501
502static void panel_simple_platform_shutdown(struct platform_device *pdev)
503{
504 panel_simple_shutdown(&pdev->dev);
505}
506
415static struct platform_driver panel_simple_platform_driver = { 507static struct platform_driver panel_simple_platform_driver = {
416 .driver = { 508 .driver = {
417 .name = "panel-simple", 509 .name = "panel-simple",
@@ -420,6 +512,7 @@ static struct platform_driver panel_simple_platform_driver = {
420 }, 512 },
421 .probe = panel_simple_platform_probe, 513 .probe = panel_simple_platform_probe,
422 .remove = panel_simple_platform_remove, 514 .remove = panel_simple_platform_remove,
515 .shutdown = panel_simple_platform_shutdown,
423}; 516};
424 517
425struct panel_desc_dsi { 518struct panel_desc_dsi {
@@ -561,6 +654,11 @@ static int panel_simple_dsi_remove(struct mipi_dsi_device *dsi)
561 return panel_simple_remove(&dsi->dev); 654 return panel_simple_remove(&dsi->dev);
562} 655}
563 656
657static void panel_simple_dsi_shutdown(struct mipi_dsi_device *dsi)
658{
659 panel_simple_shutdown(&dsi->dev);
660}
661
564static struct mipi_dsi_driver panel_simple_dsi_driver = { 662static struct mipi_dsi_driver panel_simple_dsi_driver = {
565 .driver = { 663 .driver = {
566 .name = "panel-simple-dsi", 664 .name = "panel-simple-dsi",
@@ -569,6 +667,7 @@ static struct mipi_dsi_driver panel_simple_dsi_driver = {
569 }, 667 },
570 .probe = panel_simple_dsi_probe, 668 .probe = panel_simple_dsi_probe,
571 .remove = panel_simple_dsi_remove, 669 .remove = panel_simple_dsi_remove,
670 .shutdown = panel_simple_dsi_shutdown,
572}; 671};
573 672
574static int __init panel_simple_init(void) 673static int __init panel_simple_init(void)
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 41bdd174657e..5d7ea2461852 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -574,6 +574,10 @@ static int qxl_crtc_mode_set(struct drm_crtc *crtc,
574 bo->surf.height, bo->surf.stride, bo->surf.format); 574 bo->surf.height, bo->surf.stride, bo->surf.format);
575 qxl_io_create_primary(qdev, base_offset, bo); 575 qxl_io_create_primary(qdev, base_offset, bo);
576 bo->is_primary = true; 576 bo->is_primary = true;
577 }
578
579 if (bo->is_primary) {
580 DRM_DEBUG_KMS("setting surface_id to 0 for primary surface %d on crtc %d\n", bo->surface_id, qcrtc->index);
577 surf_id = 0; 581 surf_id = 0;
578 } else { 582 } else {
579 surf_id = bo->surface_id; 583 surf_id = bo->surface_id;
@@ -841,7 +845,7 @@ static const struct drm_connector_funcs qxl_connector_funcs = {
841 .save = qxl_conn_save, 845 .save = qxl_conn_save,
842 .restore = qxl_conn_restore, 846 .restore = qxl_conn_restore,
843 .detect = qxl_conn_detect, 847 .detect = qxl_conn_detect,
844 .fill_modes = drm_helper_probe_single_connector_modes, 848 .fill_modes = drm_helper_probe_single_connector_modes_nomerge,
845 .set_property = qxl_conn_set_property, 849 .set_property = qxl_conn_set_property,
846 .destroy = qxl_conn_destroy, 850 .destroy = qxl_conn_destroy,
847}; 851};
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
index fee8748bdca5..6e936634d65c 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.c
+++ b/drivers/gpu/drm/qxl/qxl_drv.c
@@ -214,7 +214,6 @@ static struct pci_driver qxl_pci_driver = {
214static struct drm_driver qxl_driver = { 214static struct drm_driver qxl_driver = {
215 .driver_features = DRIVER_GEM | DRIVER_MODESET | 215 .driver_features = DRIVER_GEM | DRIVER_MODESET |
216 DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED, 216 DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
217 .dev_priv_size = 0,
218 .load = qxl_driver_load, 217 .load = qxl_driver_load,
219 .unload = qxl_driver_unload, 218 .unload = qxl_driver_unload,
220 219
diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
index 0bb86e6d41b4..b110883f8253 100644
--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
@@ -451,4 +451,4 @@ const struct drm_ioctl_desc qxl_ioctls[] = {
451 DRM_AUTH|DRM_UNLOCKED), 451 DRM_AUTH|DRM_UNLOCKED),
452}; 452};
453 453
454int qxl_max_ioctls = DRM_ARRAY_SIZE(qxl_ioctls); 454int qxl_max_ioctls = ARRAY_SIZE(qxl_ioctls);
diff --git a/drivers/gpu/drm/qxl/qxl_irq.c b/drivers/gpu/drm/qxl/qxl_irq.c
index 28f84b4fce32..34d6a85e9023 100644
--- a/drivers/gpu/drm/qxl/qxl_irq.c
+++ b/drivers/gpu/drm/qxl/qxl_irq.c
@@ -87,7 +87,7 @@ int qxl_irq_init(struct qxl_device *qdev)
87 atomic_set(&qdev->irq_received_cursor, 0); 87 atomic_set(&qdev->irq_received_cursor, 0);
88 atomic_set(&qdev->irq_received_io_cmd, 0); 88 atomic_set(&qdev->irq_received_io_cmd, 0);
89 qdev->irq_received_error = 0; 89 qdev->irq_received_error = 0;
90 ret = drm_irq_install(qdev->ddev); 90 ret = drm_irq_install(qdev->ddev, qdev->ddev->pdev->irq);
91 qdev->ram_header->int_mask = QXL_INTERRUPT_MASK; 91 qdev->ram_header->int_mask = QXL_INTERRUPT_MASK;
92 if (unlikely(ret != 0)) { 92 if (unlikely(ret != 0)) {
93 DRM_ERROR("Failed installing irq: %d\n", ret); 93 DRM_ERROR("Failed installing irq: %d\n", ret);
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index d52c27527b9a..71a1baeac14e 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -109,13 +109,11 @@ static const struct vm_operations_struct *ttm_vm_ops;
109static int qxl_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 109static int qxl_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
110{ 110{
111 struct ttm_buffer_object *bo; 111 struct ttm_buffer_object *bo;
112 struct qxl_device *qdev;
113 int r; 112 int r;
114 113
115 bo = (struct ttm_buffer_object *)vma->vm_private_data; 114 bo = (struct ttm_buffer_object *)vma->vm_private_data;
116 if (bo == NULL) 115 if (bo == NULL)
117 return VM_FAULT_NOPAGE; 116 return VM_FAULT_NOPAGE;
118 qdev = qxl_get_qdev(bo->bdev);
119 r = ttm_vm_ops->fault(vma, vmf); 117 r = ttm_vm_ops->fault(vma, vmf);
120 return r; 118 return r;
121} 119}
@@ -162,10 +160,6 @@ static int qxl_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
162static int qxl_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, 160static int qxl_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
163 struct ttm_mem_type_manager *man) 161 struct ttm_mem_type_manager *man)
164{ 162{
165 struct qxl_device *qdev;
166
167 qdev = qxl_get_qdev(bdev);
168
169 switch (type) { 163 switch (type) {
170 case TTM_PL_SYSTEM: 164 case TTM_PL_SYSTEM:
171 /* System memory */ 165 /* System memory */
diff --git a/drivers/gpu/drm/r128/r128_ioc32.c b/drivers/gpu/drm/r128/r128_ioc32.c
index b0d0fd3e4376..663f38c63ba6 100644
--- a/drivers/gpu/drm/r128/r128_ioc32.c
+++ b/drivers/gpu/drm/r128/r128_ioc32.c
@@ -203,7 +203,7 @@ long r128_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
203 if (nr < DRM_COMMAND_BASE) 203 if (nr < DRM_COMMAND_BASE)
204 return drm_compat_ioctl(filp, cmd, arg); 204 return drm_compat_ioctl(filp, cmd, arg);
205 205
206 if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(r128_compat_ioctls)) 206 if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(r128_compat_ioctls))
207 fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE]; 207 fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
208 208
209 if (fn != NULL) 209 if (fn != NULL)
diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
index e806dacd452f..575e986f82a7 100644
--- a/drivers/gpu/drm/r128/r128_state.c
+++ b/drivers/gpu/drm/r128/r128_state.c
@@ -1594,7 +1594,7 @@ static int r128_getparam(struct drm_device *dev, void *data, struct drm_file *fi
1594 1594
1595 switch (param->param) { 1595 switch (param->param) {
1596 case R128_PARAM_IRQ_NR: 1596 case R128_PARAM_IRQ_NR:
1597 value = drm_dev_to_irq(dev); 1597 value = dev->pdev->irq;
1598 break; 1598 break;
1599 default: 1599 default:
1600 return -EINVAL; 1600 return -EINVAL;
@@ -1641,4 +1641,4 @@ const struct drm_ioctl_desc r128_ioctls[] = {
1641 DRM_IOCTL_DEF_DRV(R128_GETPARAM, r128_getparam, DRM_AUTH), 1641 DRM_IOCTL_DEF_DRV(R128_GETPARAM, r128_getparam, DRM_AUTH),
1642}; 1642};
1643 1643
1644int r128_max_ioctl = DRM_ARRAY_SIZE(r128_ioctls); 1644int r128_max_ioctl = ARRAY_SIZE(r128_ioctls);
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index 09433534dc47..dbcbfe80aac0 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -72,7 +72,7 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
72 radeon_cs.o radeon_bios.o radeon_benchmark.o r100.o r300.o r420.o \ 72 radeon_cs.o radeon_bios.o radeon_benchmark.o r100.o r300.o r420.o \
73 rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \ 73 rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \
74 r200.o radeon_legacy_tv.o r600_cs.o r600_blit_shaders.o \ 74 r200.o radeon_legacy_tv.o r600_cs.o r600_blit_shaders.o \
75 radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \ 75 radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o dce3_1_afmt.o \
76 evergreen.o evergreen_cs.o evergreen_blit_shaders.o \ 76 evergreen.o evergreen_cs.o evergreen_blit_shaders.o \
77 evergreen_hdmi.o radeon_trace_points.o ni.o cayman_blit_shaders.o \ 77 evergreen_hdmi.o radeon_trace_points.o ni.o cayman_blit_shaders.o \
78 atombios_encoders.o radeon_semaphore.o radeon_sa.o atombios_i2c.o si.o \ 78 atombios_encoders.o radeon_semaphore.o radeon_sa.o atombios_i2c.o si.o \
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index e911898348f8..26c12a3fe430 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -557,6 +557,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
557 u32 adjusted_clock = mode->clock; 557 u32 adjusted_clock = mode->clock;
558 int encoder_mode = atombios_get_encoder_mode(encoder); 558 int encoder_mode = atombios_get_encoder_mode(encoder);
559 u32 dp_clock = mode->clock; 559 u32 dp_clock = mode->clock;
560 u32 clock = mode->clock;
560 int bpc = radeon_crtc->bpc; 561 int bpc = radeon_crtc->bpc;
561 bool is_duallink = radeon_dig_monitor_is_duallink(encoder, mode->clock); 562 bool is_duallink = radeon_dig_monitor_is_duallink(encoder, mode->clock);
562 563
@@ -632,6 +633,24 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
632 radeon_crtc->pll_flags |= RADEON_PLL_USE_REF_DIV; 633 radeon_crtc->pll_flags |= RADEON_PLL_USE_REF_DIV;
633 } 634 }
634 635
636 /* adjust pll for deep color modes */
637 if (encoder_mode == ATOM_ENCODER_MODE_HDMI) {
638 switch (bpc) {
639 case 8:
640 default:
641 break;
642 case 10:
643 clock = (clock * 5) / 4;
644 break;
645 case 12:
646 clock = (clock * 3) / 2;
647 break;
648 case 16:
649 clock = clock * 2;
650 break;
651 }
652 }
653
635 /* DCE3+ has an AdjustDisplayPll that will adjust the pixel clock 654 /* DCE3+ has an AdjustDisplayPll that will adjust the pixel clock
636 * accordingly based on the encoder/transmitter to work around 655 * accordingly based on the encoder/transmitter to work around
637 * special hw requirements. 656 * special hw requirements.
@@ -653,7 +672,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
653 switch (crev) { 672 switch (crev) {
654 case 1: 673 case 1:
655 case 2: 674 case 2:
656 args.v1.usPixelClock = cpu_to_le16(mode->clock / 10); 675 args.v1.usPixelClock = cpu_to_le16(clock / 10);
657 args.v1.ucTransmitterID = radeon_encoder->encoder_id; 676 args.v1.ucTransmitterID = radeon_encoder->encoder_id;
658 args.v1.ucEncodeMode = encoder_mode; 677 args.v1.ucEncodeMode = encoder_mode;
659 if (radeon_crtc->ss_enabled && radeon_crtc->ss.percentage) 678 if (radeon_crtc->ss_enabled && radeon_crtc->ss.percentage)
@@ -665,7 +684,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
665 adjusted_clock = le16_to_cpu(args.v1.usPixelClock) * 10; 684 adjusted_clock = le16_to_cpu(args.v1.usPixelClock) * 10;
666 break; 685 break;
667 case 3: 686 case 3:
668 args.v3.sInput.usPixelClock = cpu_to_le16(mode->clock / 10); 687 args.v3.sInput.usPixelClock = cpu_to_le16(clock / 10);
669 args.v3.sInput.ucTransmitterID = radeon_encoder->encoder_id; 688 args.v3.sInput.ucTransmitterID = radeon_encoder->encoder_id;
670 args.v3.sInput.ucEncodeMode = encoder_mode; 689 args.v3.sInput.ucEncodeMode = encoder_mode;
671 args.v3.sInput.ucDispPllConfig = 0; 690 args.v3.sInput.ucDispPllConfig = 0;
@@ -679,10 +698,6 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
679 args.v3.sInput.usPixelClock = cpu_to_le16(dp_clock / 10); 698 args.v3.sInput.usPixelClock = cpu_to_le16(dp_clock / 10);
680 } else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { 699 } else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
681 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 700 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
682 if (encoder_mode == ATOM_ENCODER_MODE_HDMI)
683 /* deep color support */
684 args.v3.sInput.usPixelClock =
685 cpu_to_le16((mode->clock * bpc / 8) / 10);
686 if (dig->coherent_mode) 701 if (dig->coherent_mode)
687 args.v3.sInput.ucDispPllConfig |= 702 args.v3.sInput.ucDispPllConfig |=
688 DISPPLL_CONFIG_COHERENT_MODE; 703 DISPPLL_CONFIG_COHERENT_MODE;
@@ -862,14 +877,21 @@ static void atombios_crtc_program_pll(struct drm_crtc *crtc,
862 args.v5.ucMiscInfo = 0; /* HDMI depth, etc. */ 877 args.v5.ucMiscInfo = 0; /* HDMI depth, etc. */
863 if (ss_enabled && (ss->type & ATOM_EXTERNAL_SS_MASK)) 878 if (ss_enabled && (ss->type & ATOM_EXTERNAL_SS_MASK))
864 args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_REF_DIV_SRC; 879 args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_REF_DIV_SRC;
865 switch (bpc) { 880 if (encoder_mode == ATOM_ENCODER_MODE_HDMI) {
866 case 8: 881 switch (bpc) {
867 default: 882 case 8:
868 args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_24BPP; 883 default:
869 break; 884 args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_24BPP;
870 case 10: 885 break;
871 args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_30BPP; 886 case 10:
872 break; 887 /* yes this is correct, the atom define is wrong */
888 args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_32BPP;
889 break;
890 case 12:
891 /* yes this is correct, the atom define is wrong */
892 args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_30BPP;
893 break;
894 }
873 } 895 }
874 args.v5.ucTransmitterID = encoder_id; 896 args.v5.ucTransmitterID = encoder_id;
875 args.v5.ucEncoderMode = encoder_mode; 897 args.v5.ucEncoderMode = encoder_mode;
@@ -884,20 +906,22 @@ static void atombios_crtc_program_pll(struct drm_crtc *crtc,
884 args.v6.ucMiscInfo = 0; /* HDMI depth, etc. */ 906 args.v6.ucMiscInfo = 0; /* HDMI depth, etc. */
885 if (ss_enabled && (ss->type & ATOM_EXTERNAL_SS_MASK)) 907 if (ss_enabled && (ss->type & ATOM_EXTERNAL_SS_MASK))
886 args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_REF_DIV_SRC; 908 args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_REF_DIV_SRC;
887 switch (bpc) { 909 if (encoder_mode == ATOM_ENCODER_MODE_HDMI) {
888 case 8: 910 switch (bpc) {
889 default: 911 case 8:
890 args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_24BPP; 912 default:
891 break; 913 args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_24BPP;
892 case 10: 914 break;
893 args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_30BPP; 915 case 10:
894 break; 916 args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_30BPP_V6;
895 case 12: 917 break;
896 args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_36BPP; 918 case 12:
897 break; 919 args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_36BPP_V6;
898 case 16: 920 break;
899 args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_48BPP; 921 case 16:
900 break; 922 args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_48BPP;
923 break;
924 }
901 } 925 }
902 args.v6.ucTransmitterID = encoder_id; 926 args.v6.ucTransmitterID = encoder_id;
903 args.v6.ucEncoderMode = encoder_mode; 927 args.v6.ucEncoderMode = encoder_mode;
@@ -938,6 +962,9 @@ static bool atombios_crtc_prepare_pll(struct drm_crtc *crtc, struct drm_display_
938 struct radeon_connector_atom_dig *dig_connector = 962 struct radeon_connector_atom_dig *dig_connector =
939 radeon_connector->con_priv; 963 radeon_connector->con_priv;
940 int dp_clock; 964 int dp_clock;
965
966 /* Assign mode clock for hdmi deep color max clock limit check */
967 radeon_connector->pixelclock_for_modeset = mode->clock;
941 radeon_crtc->bpc = radeon_get_monitor_bpc(connector); 968 radeon_crtc->bpc = radeon_get_monitor_bpc(connector);
942 969
943 switch (encoder_mode) { 970 switch (encoder_mode) {
@@ -1019,10 +1046,17 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
1019 struct radeon_encoder *radeon_encoder = 1046 struct radeon_encoder *radeon_encoder =
1020 to_radeon_encoder(radeon_crtc->encoder); 1047 to_radeon_encoder(radeon_crtc->encoder);
1021 u32 pll_clock = mode->clock; 1048 u32 pll_clock = mode->clock;
1049 u32 clock = mode->clock;
1022 u32 ref_div = 0, fb_div = 0, frac_fb_div = 0, post_div = 0; 1050 u32 ref_div = 0, fb_div = 0, frac_fb_div = 0, post_div = 0;
1023 struct radeon_pll *pll; 1051 struct radeon_pll *pll;
1024 int encoder_mode = atombios_get_encoder_mode(radeon_crtc->encoder); 1052 int encoder_mode = atombios_get_encoder_mode(radeon_crtc->encoder);
1025 1053
1054 /* pass the actual clock to atombios_crtc_program_pll for DCE5,6 for HDMI */
1055 if (ASIC_IS_DCE5(rdev) && !ASIC_IS_DCE8(rdev) &&
1056 (encoder_mode == ATOM_ENCODER_MODE_HDMI) &&
1057 (radeon_crtc->bpc > 8))
1058 clock = radeon_crtc->adjusted_clock;
1059
1026 switch (radeon_crtc->pll_id) { 1060 switch (radeon_crtc->pll_id) {
1027 case ATOM_PPLL1: 1061 case ATOM_PPLL1:
1028 pll = &rdev->clock.p1pll; 1062 pll = &rdev->clock.p1pll;
@@ -1057,7 +1091,7 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
1057 radeon_crtc->crtc_id, &radeon_crtc->ss); 1091 radeon_crtc->crtc_id, &radeon_crtc->ss);
1058 1092
1059 atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id, 1093 atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id,
1060 encoder_mode, radeon_encoder->encoder_id, mode->clock, 1094 encoder_mode, radeon_encoder->encoder_id, clock,
1061 ref_div, fb_div, frac_fb_div, post_div, 1095 ref_div, fb_div, frac_fb_div, post_div,
1062 radeon_crtc->bpc, radeon_crtc->ss_enabled, &radeon_crtc->ss); 1096 radeon_crtc->bpc, radeon_crtc->ss_enabled, &radeon_crtc->ss);
1063 1097
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 54e4f52549af..c5b1f2da3954 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -95,9 +95,12 @@ static int radeon_process_aux_ch(struct radeon_i2c_chan *chan,
95 int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction); 95 int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction);
96 unsigned char *base; 96 unsigned char *base;
97 int recv_bytes; 97 int recv_bytes;
98 int r = 0;
98 99
99 memset(&args, 0, sizeof(args)); 100 memset(&args, 0, sizeof(args));
100 101
102 mutex_lock(&chan->mutex);
103
101 base = (unsigned char *)(rdev->mode_info.atom_context->scratch + 1); 104 base = (unsigned char *)(rdev->mode_info.atom_context->scratch + 1);
102 105
103 radeon_atom_copy_swap(base, send, send_bytes, true); 106 radeon_atom_copy_swap(base, send, send_bytes, true);
@@ -117,19 +120,22 @@ static int radeon_process_aux_ch(struct radeon_i2c_chan *chan,
117 /* timeout */ 120 /* timeout */
118 if (args.v1.ucReplyStatus == 1) { 121 if (args.v1.ucReplyStatus == 1) {
119 DRM_DEBUG_KMS("dp_aux_ch timeout\n"); 122 DRM_DEBUG_KMS("dp_aux_ch timeout\n");
120 return -ETIMEDOUT; 123 r = -ETIMEDOUT;
124 goto done;
121 } 125 }
122 126
123 /* flags not zero */ 127 /* flags not zero */
124 if (args.v1.ucReplyStatus == 2) { 128 if (args.v1.ucReplyStatus == 2) {
125 DRM_DEBUG_KMS("dp_aux_ch flags not zero\n"); 129 DRM_DEBUG_KMS("dp_aux_ch flags not zero\n");
126 return -EBUSY; 130 r = -EBUSY;
131 goto done;
127 } 132 }
128 133
129 /* error */ 134 /* error */
130 if (args.v1.ucReplyStatus == 3) { 135 if (args.v1.ucReplyStatus == 3) {
131 DRM_DEBUG_KMS("dp_aux_ch error\n"); 136 DRM_DEBUG_KMS("dp_aux_ch error\n");
132 return -EIO; 137 r = -EIO;
138 goto done;
133 } 139 }
134 140
135 recv_bytes = args.v1.ucDataOutLen; 141 recv_bytes = args.v1.ucDataOutLen;
@@ -139,7 +145,11 @@ static int radeon_process_aux_ch(struct radeon_i2c_chan *chan,
139 if (recv && recv_size) 145 if (recv && recv_size)
140 radeon_atom_copy_swap(recv, base + 16, recv_bytes, false); 146 radeon_atom_copy_swap(recv, base + 16, recv_bytes, false);
141 147
142 return recv_bytes; 148 r = recv_bytes;
149done:
150 mutex_unlock(&chan->mutex);
151
152 return r;
143} 153}
144 154
145#define BARE_ADDRESS_SIZE 3 155#define BARE_ADDRESS_SIZE 3
@@ -212,11 +222,12 @@ void radeon_dp_aux_init(struct radeon_connector *radeon_connector)
212 radeon_connector->ddc_bus->rec.hpd = radeon_connector->hpd.hpd; 222 radeon_connector->ddc_bus->rec.hpd = radeon_connector->hpd.hpd;
213 radeon_connector->ddc_bus->aux.dev = radeon_connector->base.kdev; 223 radeon_connector->ddc_bus->aux.dev = radeon_connector->base.kdev;
214 radeon_connector->ddc_bus->aux.transfer = radeon_dp_aux_transfer; 224 radeon_connector->ddc_bus->aux.transfer = radeon_dp_aux_transfer;
215 ret = drm_dp_aux_register_i2c_bus(&radeon_connector->ddc_bus->aux); 225
226 ret = drm_dp_aux_register(&radeon_connector->ddc_bus->aux);
216 if (!ret) 227 if (!ret)
217 radeon_connector->ddc_bus->has_aux = true; 228 radeon_connector->ddc_bus->has_aux = true;
218 229
219 WARN(ret, "drm_dp_aux_register_i2c_bus() failed with error %d\n", ret); 230 WARN(ret, "drm_dp_aux_register() failed with error %d\n", ret);
220} 231}
221 232
222/***** general DP utility functions *****/ 233/***** general DP utility functions *****/
@@ -281,6 +292,19 @@ static int dp_get_max_dp_pix_clock(int link_rate,
281 292
282/***** radeon specific DP functions *****/ 293/***** radeon specific DP functions *****/
283 294
295static int radeon_dp_get_max_link_rate(struct drm_connector *connector,
296 u8 dpcd[DP_DPCD_SIZE])
297{
298 int max_link_rate;
299
300 if (radeon_connector_is_dp12_capable(connector))
301 max_link_rate = min(drm_dp_max_link_rate(dpcd), 540000);
302 else
303 max_link_rate = min(drm_dp_max_link_rate(dpcd), 270000);
304
305 return max_link_rate;
306}
307
284/* First get the min lane# when low rate is used according to pixel clock 308/* First get the min lane# when low rate is used according to pixel clock
285 * (prefer low rate), second check max lane# supported by DP panel, 309 * (prefer low rate), second check max lane# supported by DP panel,
286 * if the max lane# < low rate lane# then use max lane# instead. 310 * if the max lane# < low rate lane# then use max lane# instead.
@@ -290,7 +314,7 @@ static int radeon_dp_get_dp_lane_number(struct drm_connector *connector,
290 int pix_clock) 314 int pix_clock)
291{ 315{
292 int bpp = convert_bpc_to_bpp(radeon_get_monitor_bpc(connector)); 316 int bpp = convert_bpc_to_bpp(radeon_get_monitor_bpc(connector));
293 int max_link_rate = drm_dp_max_link_rate(dpcd); 317 int max_link_rate = radeon_dp_get_max_link_rate(connector, dpcd);
294 int max_lane_num = drm_dp_max_lane_count(dpcd); 318 int max_lane_num = drm_dp_max_lane_count(dpcd);
295 int lane_num; 319 int lane_num;
296 int max_dp_pix_clock; 320 int max_dp_pix_clock;
@@ -328,7 +352,7 @@ static int radeon_dp_get_dp_link_clock(struct drm_connector *connector,
328 return 540000; 352 return 540000;
329 } 353 }
330 354
331 return drm_dp_max_link_rate(dpcd); 355 return radeon_dp_get_max_link_rate(connector, dpcd);
332} 356}
333 357
334static u8 radeon_dp_encoder_service(struct radeon_device *rdev, 358static u8 radeon_dp_encoder_service(struct radeon_device *rdev,
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index e6eb5097597f..2b2908440644 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -1884,8 +1884,11 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
1884 args.v2.ucEncodeMode = ATOM_ENCODER_MODE_CRT; 1884 args.v2.ucEncodeMode = ATOM_ENCODER_MODE_CRT;
1885 else 1885 else
1886 args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder); 1886 args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder);
1887 } else 1887 } else if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
1888 args.v2.ucEncodeMode = ATOM_ENCODER_MODE_LVDS;
1889 } else {
1888 args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder); 1890 args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder);
1891 }
1889 switch (radeon_encoder->encoder_id) { 1892 switch (radeon_encoder->encoder_id) {
1890 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: 1893 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
1891 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: 1894 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
diff --git a/drivers/gpu/drm/radeon/atombios_i2c.c b/drivers/gpu/drm/radeon/atombios_i2c.c
index b5162c3b6111..9c570fb15b8c 100644
--- a/drivers/gpu/drm/radeon/atombios_i2c.c
+++ b/drivers/gpu/drm/radeon/atombios_i2c.c
@@ -43,15 +43,19 @@ static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan,
43 int index = GetIndexIntoMasterTable(COMMAND, ProcessI2cChannelTransaction); 43 int index = GetIndexIntoMasterTable(COMMAND, ProcessI2cChannelTransaction);
44 unsigned char *base; 44 unsigned char *base;
45 u16 out = cpu_to_le16(0); 45 u16 out = cpu_to_le16(0);
46 int r = 0;
46 47
47 memset(&args, 0, sizeof(args)); 48 memset(&args, 0, sizeof(args));
48 49
50 mutex_lock(&chan->mutex);
51
49 base = (unsigned char *)rdev->mode_info.atom_context->scratch; 52 base = (unsigned char *)rdev->mode_info.atom_context->scratch;
50 53
51 if (flags & HW_I2C_WRITE) { 54 if (flags & HW_I2C_WRITE) {
52 if (num > ATOM_MAX_HW_I2C_WRITE) { 55 if (num > ATOM_MAX_HW_I2C_WRITE) {
53 DRM_ERROR("hw i2c: tried to write too many bytes (%d vs 3)\n", num); 56 DRM_ERROR("hw i2c: tried to write too many bytes (%d vs 3)\n", num);
54 return -EINVAL; 57 r = -EINVAL;
58 goto done;
55 } 59 }
56 if (buf == NULL) 60 if (buf == NULL)
57 args.ucRegIndex = 0; 61 args.ucRegIndex = 0;
@@ -65,7 +69,8 @@ static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan,
65 } else { 69 } else {
66 if (num > ATOM_MAX_HW_I2C_READ) { 70 if (num > ATOM_MAX_HW_I2C_READ) {
67 DRM_ERROR("hw i2c: tried to read too many bytes (%d vs 255)\n", num); 71 DRM_ERROR("hw i2c: tried to read too many bytes (%d vs 255)\n", num);
68 return -EINVAL; 72 r = -EINVAL;
73 goto done;
69 } 74 }
70 args.ucRegIndex = 0; 75 args.ucRegIndex = 0;
71 args.lpI2CDataOut = 0; 76 args.lpI2CDataOut = 0;
@@ -82,13 +87,17 @@ static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan,
82 /* error */ 87 /* error */
83 if (args.ucStatus != HW_ASSISTED_I2C_STATUS_SUCCESS) { 88 if (args.ucStatus != HW_ASSISTED_I2C_STATUS_SUCCESS) {
84 DRM_DEBUG_KMS("hw_i2c error\n"); 89 DRM_DEBUG_KMS("hw_i2c error\n");
85 return -EIO; 90 r = -EIO;
91 goto done;
86 } 92 }
87 93
88 if (!(flags & HW_I2C_WRITE)) 94 if (!(flags & HW_I2C_WRITE))
89 radeon_atom_copy_swap(buf, base, num, false); 95 radeon_atom_copy_swap(buf, base, num, false);
90 96
91 return 0; 97done:
98 mutex_unlock(&chan->mutex);
99
100 return r;
92} 101}
93 102
94int radeon_atom_hw_i2c_xfer(struct i2c_adapter *i2c_adap, 103int radeon_atom_hw_i2c_xfer(struct i2c_adapter *i2c_adap,
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index d2fd98968085..dcd4518a9b08 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -80,6 +80,7 @@ extern int sumo_rlc_init(struct radeon_device *rdev);
80extern void si_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc); 80extern void si_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
81extern void si_rlc_reset(struct radeon_device *rdev); 81extern void si_rlc_reset(struct radeon_device *rdev);
82extern void si_init_uvd_internal_cg(struct radeon_device *rdev); 82extern void si_init_uvd_internal_cg(struct radeon_device *rdev);
83static u32 cik_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh);
83extern int cik_sdma_resume(struct radeon_device *rdev); 84extern int cik_sdma_resume(struct radeon_device *rdev);
84extern void cik_sdma_enable(struct radeon_device *rdev, bool enable); 85extern void cik_sdma_enable(struct radeon_device *rdev, bool enable);
85extern void cik_sdma_fini(struct radeon_device *rdev); 86extern void cik_sdma_fini(struct radeon_device *rdev);
@@ -3257,7 +3258,7 @@ static void cik_gpu_init(struct radeon_device *rdev)
3257 u32 mc_shared_chmap, mc_arb_ramcfg; 3258 u32 mc_shared_chmap, mc_arb_ramcfg;
3258 u32 hdp_host_path_cntl; 3259 u32 hdp_host_path_cntl;
3259 u32 tmp; 3260 u32 tmp;
3260 int i, j; 3261 int i, j, k;
3261 3262
3262 switch (rdev->family) { 3263 switch (rdev->family) {
3263 case CHIP_BONAIRE: 3264 case CHIP_BONAIRE:
@@ -3446,6 +3447,15 @@ static void cik_gpu_init(struct radeon_device *rdev)
3446 rdev->config.cik.max_sh_per_se, 3447 rdev->config.cik.max_sh_per_se,
3447 rdev->config.cik.max_backends_per_se); 3448 rdev->config.cik.max_backends_per_se);
3448 3449
3450 for (i = 0; i < rdev->config.cik.max_shader_engines; i++) {
3451 for (j = 0; j < rdev->config.cik.max_sh_per_se; j++) {
3452 for (k = 0; k < rdev->config.cik.max_cu_per_sh; k++) {
3453 rdev->config.cik.active_cus +=
3454 hweight32(cik_get_cu_active_bitmap(rdev, i, j));
3455 }
3456 }
3457 }
3458
3449 /* set HW defaults for 3D engine */ 3459 /* set HW defaults for 3D engine */
3450 WREG32(CP_MEQ_THRESHOLDS, MEQ1_START(0x30) | MEQ2_START(0x60)); 3460 WREG32(CP_MEQ_THRESHOLDS, MEQ1_START(0x30) | MEQ2_START(0x60));
3451 3461
@@ -3698,7 +3708,7 @@ bool cik_semaphore_ring_emit(struct radeon_device *rdev,
3698 unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL; 3708 unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL;
3699 3709
3700 radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1)); 3710 radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1));
3701 radeon_ring_write(ring, addr & 0xffffffff); 3711 radeon_ring_write(ring, lower_32_bits(addr));
3702 radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) | sel); 3712 radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) | sel);
3703 3713
3704 return true; 3714 return true;
@@ -3818,7 +3828,7 @@ void cik_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
3818 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 3828 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
3819 radeon_ring_write(ring, WRITE_DATA_DST_SEL(1)); 3829 radeon_ring_write(ring, WRITE_DATA_DST_SEL(1));
3820 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); 3830 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
3821 radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff); 3831 radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr));
3822 radeon_ring_write(ring, next_rptr); 3832 radeon_ring_write(ring, next_rptr);
3823 } 3833 }
3824 3834
@@ -5396,6 +5406,7 @@ static int cik_pcie_gart_enable(struct radeon_device *rdev)
5396 WREG32(MC_VM_MX_L1_TLB_CNTL, 5406 WREG32(MC_VM_MX_L1_TLB_CNTL,
5397 (0xA << 7) | 5407 (0xA << 7) |
5398 ENABLE_L1_TLB | 5408 ENABLE_L1_TLB |
5409 ENABLE_L1_FRAGMENT_PROCESSING |
5399 SYSTEM_ACCESS_MODE_NOT_IN_SYS | 5410 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
5400 ENABLE_ADVANCED_DRIVER_MODEL | 5411 ENABLE_ADVANCED_DRIVER_MODEL |
5401 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU); 5412 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
@@ -5408,7 +5419,8 @@ static int cik_pcie_gart_enable(struct radeon_device *rdev)
5408 CONTEXT1_IDENTITY_ACCESS_MODE(1)); 5419 CONTEXT1_IDENTITY_ACCESS_MODE(1));
5409 WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE); 5420 WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE);
5410 WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY | 5421 WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
5411 L2_CACHE_BIGK_FRAGMENT_SIZE(6)); 5422 BANK_SELECT(4) |
5423 L2_CACHE_BIGK_FRAGMENT_SIZE(4));
5412 /* setup context0 */ 5424 /* setup context0 */
5413 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); 5425 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
5414 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12); 5426 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
@@ -5444,6 +5456,7 @@ static int cik_pcie_gart_enable(struct radeon_device *rdev)
5444 (u32)(rdev->dummy_page.addr >> 12)); 5456 (u32)(rdev->dummy_page.addr >> 12));
5445 WREG32(VM_CONTEXT1_CNTL2, 4); 5457 WREG32(VM_CONTEXT1_CNTL2, 4);
5446 WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) | 5458 WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
5459 PAGE_TABLE_BLOCK_SIZE(radeon_vm_block_size - 9) |
5447 RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT | 5460 RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
5448 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT | 5461 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT |
5449 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT | 5462 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
@@ -7450,7 +7463,7 @@ restart_ih:
7450 wake_up(&rdev->irq.vblank_queue); 7463 wake_up(&rdev->irq.vblank_queue);
7451 } 7464 }
7452 if (atomic_read(&rdev->irq.pflip[0])) 7465 if (atomic_read(&rdev->irq.pflip[0]))
7453 radeon_crtc_handle_flip(rdev, 0); 7466 radeon_crtc_handle_vblank(rdev, 0);
7454 rdev->irq.stat_regs.cik.disp_int &= ~LB_D1_VBLANK_INTERRUPT; 7467 rdev->irq.stat_regs.cik.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
7455 DRM_DEBUG("IH: D1 vblank\n"); 7468 DRM_DEBUG("IH: D1 vblank\n");
7456 } 7469 }
@@ -7476,7 +7489,7 @@ restart_ih:
7476 wake_up(&rdev->irq.vblank_queue); 7489 wake_up(&rdev->irq.vblank_queue);
7477 } 7490 }
7478 if (atomic_read(&rdev->irq.pflip[1])) 7491 if (atomic_read(&rdev->irq.pflip[1]))
7479 radeon_crtc_handle_flip(rdev, 1); 7492 radeon_crtc_handle_vblank(rdev, 1);
7480 rdev->irq.stat_regs.cik.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT; 7493 rdev->irq.stat_regs.cik.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
7481 DRM_DEBUG("IH: D2 vblank\n"); 7494 DRM_DEBUG("IH: D2 vblank\n");
7482 } 7495 }
@@ -7502,7 +7515,7 @@ restart_ih:
7502 wake_up(&rdev->irq.vblank_queue); 7515 wake_up(&rdev->irq.vblank_queue);
7503 } 7516 }
7504 if (atomic_read(&rdev->irq.pflip[2])) 7517 if (atomic_read(&rdev->irq.pflip[2]))
7505 radeon_crtc_handle_flip(rdev, 2); 7518 radeon_crtc_handle_vblank(rdev, 2);
7506 rdev->irq.stat_regs.cik.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT; 7519 rdev->irq.stat_regs.cik.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
7507 DRM_DEBUG("IH: D3 vblank\n"); 7520 DRM_DEBUG("IH: D3 vblank\n");
7508 } 7521 }
@@ -7528,7 +7541,7 @@ restart_ih:
7528 wake_up(&rdev->irq.vblank_queue); 7541 wake_up(&rdev->irq.vblank_queue);
7529 } 7542 }
7530 if (atomic_read(&rdev->irq.pflip[3])) 7543 if (atomic_read(&rdev->irq.pflip[3]))
7531 radeon_crtc_handle_flip(rdev, 3); 7544 radeon_crtc_handle_vblank(rdev, 3);
7532 rdev->irq.stat_regs.cik.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT; 7545 rdev->irq.stat_regs.cik.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
7533 DRM_DEBUG("IH: D4 vblank\n"); 7546 DRM_DEBUG("IH: D4 vblank\n");
7534 } 7547 }
@@ -7554,7 +7567,7 @@ restart_ih:
7554 wake_up(&rdev->irq.vblank_queue); 7567 wake_up(&rdev->irq.vblank_queue);
7555 } 7568 }
7556 if (atomic_read(&rdev->irq.pflip[4])) 7569 if (atomic_read(&rdev->irq.pflip[4]))
7557 radeon_crtc_handle_flip(rdev, 4); 7570 radeon_crtc_handle_vblank(rdev, 4);
7558 rdev->irq.stat_regs.cik.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT; 7571 rdev->irq.stat_regs.cik.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
7559 DRM_DEBUG("IH: D5 vblank\n"); 7572 DRM_DEBUG("IH: D5 vblank\n");
7560 } 7573 }
@@ -7580,7 +7593,7 @@ restart_ih:
7580 wake_up(&rdev->irq.vblank_queue); 7593 wake_up(&rdev->irq.vblank_queue);
7581 } 7594 }
7582 if (atomic_read(&rdev->irq.pflip[5])) 7595 if (atomic_read(&rdev->irq.pflip[5]))
7583 radeon_crtc_handle_flip(rdev, 5); 7596 radeon_crtc_handle_vblank(rdev, 5);
7584 rdev->irq.stat_regs.cik.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT; 7597 rdev->irq.stat_regs.cik.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
7585 DRM_DEBUG("IH: D6 vblank\n"); 7598 DRM_DEBUG("IH: D6 vblank\n");
7586 } 7599 }
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
index 72e464c79a88..8e9d0f1d858e 100644
--- a/drivers/gpu/drm/radeon/cik_sdma.c
+++ b/drivers/gpu/drm/radeon/cik_sdma.c
@@ -141,7 +141,7 @@ void cik_sdma_ring_ib_execute(struct radeon_device *rdev,
141 next_rptr += 4; 141 next_rptr += 4;
142 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0)); 142 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
143 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); 143 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
144 radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff); 144 radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr));
145 radeon_ring_write(ring, 1); /* number of DWs to follow */ 145 radeon_ring_write(ring, 1); /* number of DWs to follow */
146 radeon_ring_write(ring, next_rptr); 146 radeon_ring_write(ring, next_rptr);
147 } 147 }
@@ -151,7 +151,7 @@ void cik_sdma_ring_ib_execute(struct radeon_device *rdev,
151 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0)); 151 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0));
152 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_INDIRECT_BUFFER, 0, extra_bits)); 152 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_INDIRECT_BUFFER, 0, extra_bits));
153 radeon_ring_write(ring, ib->gpu_addr & 0xffffffe0); /* base must be 32 byte aligned */ 153 radeon_ring_write(ring, ib->gpu_addr & 0xffffffe0); /* base must be 32 byte aligned */
154 radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xffffffff); 154 radeon_ring_write(ring, upper_32_bits(ib->gpu_addr));
155 radeon_ring_write(ring, ib->length_dw); 155 radeon_ring_write(ring, ib->length_dw);
156 156
157} 157}
@@ -203,8 +203,8 @@ void cik_sdma_fence_ring_emit(struct radeon_device *rdev,
203 203
204 /* write the fence */ 204 /* write the fence */
205 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_FENCE, 0, 0)); 205 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_FENCE, 0, 0));
206 radeon_ring_write(ring, addr & 0xffffffff); 206 radeon_ring_write(ring, lower_32_bits(addr));
207 radeon_ring_write(ring, upper_32_bits(addr) & 0xffffffff); 207 radeon_ring_write(ring, upper_32_bits(addr));
208 radeon_ring_write(ring, fence->seq); 208 radeon_ring_write(ring, fence->seq);
209 /* generate an interrupt */ 209 /* generate an interrupt */
210 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_TRAP, 0, 0)); 210 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_TRAP, 0, 0));
@@ -233,7 +233,7 @@ bool cik_sdma_semaphore_ring_emit(struct radeon_device *rdev,
233 233
234 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SEMAPHORE, 0, extra_bits)); 234 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SEMAPHORE, 0, extra_bits));
235 radeon_ring_write(ring, addr & 0xfffffff8); 235 radeon_ring_write(ring, addr & 0xfffffff8);
236 radeon_ring_write(ring, upper_32_bits(addr) & 0xffffffff); 236 radeon_ring_write(ring, upper_32_bits(addr));
237 237
238 return true; 238 return true;
239} 239}
@@ -551,10 +551,10 @@ int cik_copy_dma(struct radeon_device *rdev,
551 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_COPY_SUB_OPCODE_LINEAR, 0)); 551 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_COPY_SUB_OPCODE_LINEAR, 0));
552 radeon_ring_write(ring, cur_size_in_bytes); 552 radeon_ring_write(ring, cur_size_in_bytes);
553 radeon_ring_write(ring, 0); /* src/dst endian swap */ 553 radeon_ring_write(ring, 0); /* src/dst endian swap */
554 radeon_ring_write(ring, src_offset & 0xffffffff); 554 radeon_ring_write(ring, lower_32_bits(src_offset));
555 radeon_ring_write(ring, upper_32_bits(src_offset) & 0xffffffff); 555 radeon_ring_write(ring, upper_32_bits(src_offset));
556 radeon_ring_write(ring, dst_offset & 0xffffffff); 556 radeon_ring_write(ring, lower_32_bits(dst_offset));
557 radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xffffffff); 557 radeon_ring_write(ring, upper_32_bits(dst_offset));
558 src_offset += cur_size_in_bytes; 558 src_offset += cur_size_in_bytes;
559 dst_offset += cur_size_in_bytes; 559 dst_offset += cur_size_in_bytes;
560 } 560 }
@@ -605,7 +605,7 @@ int cik_sdma_ring_test(struct radeon_device *rdev,
605 } 605 }
606 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0)); 606 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
607 radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc); 607 radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc);
608 radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xffffffff); 608 radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr));
609 radeon_ring_write(ring, 1); /* number of DWs to follow */ 609 radeon_ring_write(ring, 1); /* number of DWs to follow */
610 radeon_ring_write(ring, 0xDEADBEEF); 610 radeon_ring_write(ring, 0xDEADBEEF);
611 radeon_ring_unlock_commit(rdev, ring); 611 radeon_ring_unlock_commit(rdev, ring);
@@ -660,7 +660,7 @@ int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
660 660
661 ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0); 661 ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
662 ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc; 662 ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc;
663 ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xffffffff; 663 ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr);
664 ib.ptr[3] = 1; 664 ib.ptr[3] = 1;
665 ib.ptr[4] = 0xDEADBEEF; 665 ib.ptr[4] = 0xDEADBEEF;
666 ib.length_dw = 5; 666 ib.length_dw = 5;
@@ -742,7 +742,26 @@ void cik_sdma_vm_set_page(struct radeon_device *rdev,
742 742
743 trace_radeon_vm_set_page(pe, addr, count, incr, flags); 743 trace_radeon_vm_set_page(pe, addr, count, incr, flags);
744 744
745 if (flags & R600_PTE_SYSTEM) { 745 if (flags == R600_PTE_GART) {
746 uint64_t src = rdev->gart.table_addr + (addr >> 12) * 8;
747 while (count) {
748 unsigned bytes = count * 8;
749 if (bytes > 0x1FFFF8)
750 bytes = 0x1FFFF8;
751
752 ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
753 ib->ptr[ib->length_dw++] = bytes;
754 ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
755 ib->ptr[ib->length_dw++] = lower_32_bits(src);
756 ib->ptr[ib->length_dw++] = upper_32_bits(src);
757 ib->ptr[ib->length_dw++] = lower_32_bits(pe);
758 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
759
760 pe += bytes;
761 src += bytes;
762 count -= bytes / 8;
763 }
764 } else if (flags & R600_PTE_SYSTEM) {
746 while (count) { 765 while (count) {
747 ndw = count * 2; 766 ndw = count * 2;
748 if (ndw > 0xFFFFE) 767 if (ndw > 0xFFFFE)
diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h
index dd7926394a8f..ae88660f34ea 100644
--- a/drivers/gpu/drm/radeon/cikd.h
+++ b/drivers/gpu/drm/radeon/cikd.h
@@ -482,6 +482,7 @@
482#define READ_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 16) 482#define READ_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 16)
483#define WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 18) 483#define WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 18)
484#define WRITE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 19) 484#define WRITE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 19)
485#define PAGE_TABLE_BLOCK_SIZE(x) (((x) & 0xF) << 24)
485#define VM_CONTEXT1_CNTL 0x1414 486#define VM_CONTEXT1_CNTL 0x1414
486#define VM_CONTEXT0_CNTL2 0x1430 487#define VM_CONTEXT0_CNTL2 0x1430
487#define VM_CONTEXT1_CNTL2 0x1434 488#define VM_CONTEXT1_CNTL2 0x1434
diff --git a/drivers/gpu/drm/radeon/clearstate_cayman.h b/drivers/gpu/drm/radeon/clearstate_cayman.h
index aa908c55a513..e48a14037b76 100644
--- a/drivers/gpu/drm/radeon/clearstate_cayman.h
+++ b/drivers/gpu/drm/radeon/clearstate_cayman.h
@@ -1050,7 +1050,7 @@ static const struct cs_extent_def SECT_CONTEXT_defs[] =
1050 {SECT_CONTEXT_def_5, 0x0000a29e, 5 }, 1050 {SECT_CONTEXT_def_5, 0x0000a29e, 5 },
1051 {SECT_CONTEXT_def_6, 0x0000a2a5, 56 }, 1051 {SECT_CONTEXT_def_6, 0x0000a2a5, 56 },
1052 {SECT_CONTEXT_def_7, 0x0000a2de, 290 }, 1052 {SECT_CONTEXT_def_7, 0x0000a2de, 290 },
1053 { 0, 0, 0 } 1053 { NULL, 0, 0 }
1054}; 1054};
1055static const u32 SECT_CLEAR_def_1[] = 1055static const u32 SECT_CLEAR_def_1[] =
1056{ 1056{
@@ -1061,7 +1061,7 @@ static const u32 SECT_CLEAR_def_1[] =
1061static const struct cs_extent_def SECT_CLEAR_defs[] = 1061static const struct cs_extent_def SECT_CLEAR_defs[] =
1062{ 1062{
1063 {SECT_CLEAR_def_1, 0x0000ffc0, 3 }, 1063 {SECT_CLEAR_def_1, 0x0000ffc0, 3 },
1064 { 0, 0, 0 } 1064 { NULL, 0, 0 }
1065}; 1065};
1066static const u32 SECT_CTRLCONST_def_1[] = 1066static const u32 SECT_CTRLCONST_def_1[] =
1067{ 1067{
@@ -1071,11 +1071,11 @@ static const u32 SECT_CTRLCONST_def_1[] =
1071static const struct cs_extent_def SECT_CTRLCONST_defs[] = 1071static const struct cs_extent_def SECT_CTRLCONST_defs[] =
1072{ 1072{
1073 {SECT_CTRLCONST_def_1, 0x0000f3fc, 2 }, 1073 {SECT_CTRLCONST_def_1, 0x0000f3fc, 2 },
1074 { 0, 0, 0 } 1074 { NULL, 0, 0 }
1075}; 1075};
1076static const struct cs_section_def cayman_cs_data[] = { 1076static const struct cs_section_def cayman_cs_data[] = {
1077 { SECT_CONTEXT_defs, SECT_CONTEXT }, 1077 { SECT_CONTEXT_defs, SECT_CONTEXT },
1078 { SECT_CLEAR_defs, SECT_CLEAR }, 1078 { SECT_CLEAR_defs, SECT_CLEAR },
1079 { SECT_CTRLCONST_defs, SECT_CTRLCONST }, 1079 { SECT_CTRLCONST_defs, SECT_CTRLCONST },
1080 { 0, SECT_NONE } 1080 { NULL, SECT_NONE }
1081}; 1081};
diff --git a/drivers/gpu/drm/radeon/clearstate_ci.h b/drivers/gpu/drm/radeon/clearstate_ci.h
index c3982f9475fb..f55d06664e31 100644
--- a/drivers/gpu/drm/radeon/clearstate_ci.h
+++ b/drivers/gpu/drm/radeon/clearstate_ci.h
@@ -936,9 +936,9 @@ static const struct cs_extent_def ci_SECT_CONTEXT_defs[] =
936 {ci_SECT_CONTEXT_def_5, 0x0000a2a0, 2 }, 936 {ci_SECT_CONTEXT_def_5, 0x0000a2a0, 2 },
937 {ci_SECT_CONTEXT_def_6, 0x0000a2a3, 1 }, 937 {ci_SECT_CONTEXT_def_6, 0x0000a2a3, 1 },
938 {ci_SECT_CONTEXT_def_7, 0x0000a2a5, 233 }, 938 {ci_SECT_CONTEXT_def_7, 0x0000a2a5, 233 },
939 { 0, 0, 0 } 939 { NULL, 0, 0 }
940}; 940};
941static const struct cs_section_def ci_cs_data[] = { 941static const struct cs_section_def ci_cs_data[] = {
942 { ci_SECT_CONTEXT_defs, SECT_CONTEXT }, 942 { ci_SECT_CONTEXT_defs, SECT_CONTEXT },
943 { 0, SECT_NONE } 943 { NULL, SECT_NONE }
944}; 944};
diff --git a/drivers/gpu/drm/radeon/clearstate_si.h b/drivers/gpu/drm/radeon/clearstate_si.h
index b994cb2a35a0..66e39cdb5cb0 100644
--- a/drivers/gpu/drm/radeon/clearstate_si.h
+++ b/drivers/gpu/drm/radeon/clearstate_si.h
@@ -933,9 +933,9 @@ static const struct cs_extent_def si_SECT_CONTEXT_defs[] =
933 {si_SECT_CONTEXT_def_5, 0x0000a2a1, 1 }, 933 {si_SECT_CONTEXT_def_5, 0x0000a2a1, 1 },
934 {si_SECT_CONTEXT_def_6, 0x0000a2a3, 1 }, 934 {si_SECT_CONTEXT_def_6, 0x0000a2a3, 1 },
935 {si_SECT_CONTEXT_def_7, 0x0000a2a5, 233 }, 935 {si_SECT_CONTEXT_def_7, 0x0000a2a5, 233 },
936 { 0, 0, 0 } 936 { NULL, 0, 0 }
937}; 937};
938static const struct cs_section_def si_cs_data[] = { 938static const struct cs_section_def si_cs_data[] = {
939 { si_SECT_CONTEXT_defs, SECT_CONTEXT }, 939 { si_SECT_CONTEXT_defs, SECT_CONTEXT },
940 { 0, SECT_NONE } 940 { NULL, SECT_NONE }
941}; 941};
diff --git a/drivers/gpu/drm/radeon/dce3_1_afmt.c b/drivers/gpu/drm/radeon/dce3_1_afmt.c
new file mode 100644
index 000000000000..51800e340a57
--- /dev/null
+++ b/drivers/gpu/drm/radeon/dce3_1_afmt.c
@@ -0,0 +1,244 @@
1/*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 * Copyright 2014 Rafał Miłecki
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 */
23#include <linux/hdmi.h>
24#include <drm/drmP.h>
25#include "radeon.h"
26#include "radeon_asic.h"
27#include "r600d.h"
28
29static void dce3_2_afmt_write_speaker_allocation(struct drm_encoder *encoder)
30{
31 struct radeon_device *rdev = encoder->dev->dev_private;
32 struct drm_connector *connector;
33 struct radeon_connector *radeon_connector = NULL;
34 u32 tmp;
35 u8 *sadb;
36 int sad_count;
37
38 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
39 if (connector->encoder == encoder) {
40 radeon_connector = to_radeon_connector(connector);
41 break;
42 }
43 }
44
45 if (!radeon_connector) {
46 DRM_ERROR("Couldn't find encoder's connector\n");
47 return;
48 }
49
50 sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb);
51 if (sad_count < 0) {
52 DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
53 return;
54 }
55
56 /* program the speaker allocation */
57 tmp = RREG32(AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER);
58 tmp &= ~(DP_CONNECTION | SPEAKER_ALLOCATION_MASK);
59 /* set HDMI mode */
60 tmp |= HDMI_CONNECTION;
61 if (sad_count)
62 tmp |= SPEAKER_ALLOCATION(sadb[0]);
63 else
64 tmp |= SPEAKER_ALLOCATION(5); /* stereo */
65 WREG32(AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER, tmp);
66
67 kfree(sadb);
68}
69
70static void dce3_2_afmt_write_sad_regs(struct drm_encoder *encoder)
71{
72 struct radeon_device *rdev = encoder->dev->dev_private;
73 struct drm_connector *connector;
74 struct radeon_connector *radeon_connector = NULL;
75 struct cea_sad *sads;
76 int i, sad_count;
77
78 static const u16 eld_reg_to_type[][2] = {
79 { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM },
80 { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 },
81 { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 },
82 { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 },
83 { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 },
84 { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC },
85 { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS },
86 { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC },
87 { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 },
88 { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD },
89 { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP },
90 { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
91 };
92
93 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
94 if (connector->encoder == encoder) {
95 radeon_connector = to_radeon_connector(connector);
96 break;
97 }
98 }
99
100 if (!radeon_connector) {
101 DRM_ERROR("Couldn't find encoder's connector\n");
102 return;
103 }
104
105 sad_count = drm_edid_to_sad(radeon_connector->edid, &sads);
106 if (sad_count < 0) {
107 DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
108 return;
109 }
110 BUG_ON(!sads);
111
112 for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
113 u32 value = 0;
114 u8 stereo_freqs = 0;
115 int max_channels = -1;
116 int j;
117
118 for (j = 0; j < sad_count; j++) {
119 struct cea_sad *sad = &sads[j];
120
121 if (sad->format == eld_reg_to_type[i][1]) {
122 if (sad->channels > max_channels) {
123 value = MAX_CHANNELS(sad->channels) |
124 DESCRIPTOR_BYTE_2(sad->byte2) |
125 SUPPORTED_FREQUENCIES(sad->freq);
126 max_channels = sad->channels;
127 }
128
129 if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
130 stereo_freqs |= sad->freq;
131 else
132 break;
133 }
134 }
135
136 value |= SUPPORTED_FREQUENCIES_STEREO(stereo_freqs);
137
138 WREG32(eld_reg_to_type[i][0], value);
139 }
140
141 kfree(sads);
142}
143
144/*
145 * update the info frames with the data from the current display mode
146 */
147void dce3_1_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode)
148{
149 struct drm_device *dev = encoder->dev;
150 struct radeon_device *rdev = dev->dev_private;
151 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
152 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
153 u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
154 struct hdmi_avi_infoframe frame;
155 uint32_t offset;
156 ssize_t err;
157
158 if (!dig || !dig->afmt)
159 return;
160
161 /* Silent, r600_hdmi_enable will raise WARN for us */
162 if (!dig->afmt->enabled)
163 return;
164 offset = dig->afmt->offset;
165
166 /* disable audio prior to setting up hw */
167 dig->afmt->pin = r600_audio_get_pin(rdev);
168 r600_audio_enable(rdev, dig->afmt->pin, false);
169
170 r600_audio_set_dto(encoder, mode->clock);
171
172 WREG32(HDMI0_VBI_PACKET_CONTROL + offset,
173 HDMI0_NULL_SEND); /* send null packets when required */
174
175 WREG32(HDMI0_AUDIO_CRC_CONTROL + offset, 0x1000);
176
177 if (ASIC_IS_DCE32(rdev)) {
178 WREG32(HDMI0_AUDIO_PACKET_CONTROL + offset,
179 HDMI0_AUDIO_DELAY_EN(1) | /* default audio delay */
180 HDMI0_AUDIO_PACKETS_PER_LINE(3)); /* should be suffient for all audio modes and small enough for all hblanks */
181 WREG32(AFMT_AUDIO_PACKET_CONTROL + offset,
182 AFMT_AUDIO_SAMPLE_SEND | /* send audio packets */
183 AFMT_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */
184 } else {
185 WREG32(HDMI0_AUDIO_PACKET_CONTROL + offset,
186 HDMI0_AUDIO_SAMPLE_SEND | /* send audio packets */
187 HDMI0_AUDIO_DELAY_EN(1) | /* default audio delay */
188 HDMI0_AUDIO_PACKETS_PER_LINE(3) | /* should be suffient for all audio modes and small enough for all hblanks */
189 HDMI0_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */
190 }
191
192 if (ASIC_IS_DCE32(rdev)) {
193 dce3_2_afmt_write_speaker_allocation(encoder);
194 dce3_2_afmt_write_sad_regs(encoder);
195 }
196
197 WREG32(HDMI0_ACR_PACKET_CONTROL + offset,
198 HDMI0_ACR_SOURCE | /* select SW CTS value - XXX verify that hw CTS works on all families */
199 HDMI0_ACR_AUTO_SEND); /* allow hw to sent ACR packets when required */
200
201 WREG32(HDMI0_VBI_PACKET_CONTROL + offset,
202 HDMI0_NULL_SEND | /* send null packets when required */
203 HDMI0_GC_SEND | /* send general control packets */
204 HDMI0_GC_CONT); /* send general control packets every frame */
205
206 /* TODO: HDMI0_AUDIO_INFO_UPDATE */
207 WREG32(HDMI0_INFOFRAME_CONTROL0 + offset,
208 HDMI0_AVI_INFO_SEND | /* enable AVI info frames */
209 HDMI0_AVI_INFO_CONT | /* send AVI info frames every frame/field */
210 HDMI0_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */
211 HDMI0_AUDIO_INFO_CONT); /* send audio info frames every frame/field */
212
213 WREG32(HDMI0_INFOFRAME_CONTROL1 + offset,
214 HDMI0_AVI_INFO_LINE(2) | /* anything other than 0 */
215 HDMI0_AUDIO_INFO_LINE(2)); /* anything other than 0 */
216
217 WREG32(HDMI0_GC + offset, 0); /* unset HDMI0_GC_AVMUTE */
218
219 err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
220 if (err < 0) {
221 DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
222 return;
223 }
224
225 err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
226 if (err < 0) {
227 DRM_ERROR("failed to pack AVI infoframe: %zd\n", err);
228 return;
229 }
230
231 r600_hdmi_update_avi_infoframe(encoder, buffer, sizeof(buffer));
232 r600_hdmi_update_ACR(encoder, mode->clock);
233
234 /* it's unknown what these bits do excatly, but it's indeed quite useful for debugging */
235 WREG32(HDMI0_RAMP_CONTROL0 + offset, 0x00FFFFFF);
236 WREG32(HDMI0_RAMP_CONTROL1 + offset, 0x007FFFFF);
237 WREG32(HDMI0_RAMP_CONTROL2 + offset, 0x00000001);
238 WREG32(HDMI0_RAMP_CONTROL3 + offset, 0x00000001);
239
240 r600_hdmi_audio_workaround(encoder);
241
242 /* enable audio after to setting up hw */
243 r600_audio_enable(rdev, dig->afmt->pin, true);
244}
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 0f7a51a3694f..e2f605224e8c 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -1301,36 +1301,6 @@ void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc)
1301} 1301}
1302 1302
1303/** 1303/**
1304 * radeon_irq_kms_pflip_irq_get - pre-pageflip callback.
1305 *
1306 * @rdev: radeon_device pointer
1307 * @crtc: crtc to prepare for pageflip on
1308 *
1309 * Pre-pageflip callback (evergreen+).
1310 * Enables the pageflip irq (vblank irq).
1311 */
1312void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc)
1313{
1314 /* enable the pflip int */
1315 radeon_irq_kms_pflip_irq_get(rdev, crtc);
1316}
1317
1318/**
1319 * evergreen_post_page_flip - pos-pageflip callback.
1320 *
1321 * @rdev: radeon_device pointer
1322 * @crtc: crtc to cleanup pageflip on
1323 *
1324 * Post-pageflip callback (evergreen+).
1325 * Disables the pageflip irq (vblank irq).
1326 */
1327void evergreen_post_page_flip(struct radeon_device *rdev, int crtc)
1328{
1329 /* disable the pflip int */
1330 radeon_irq_kms_pflip_irq_put(rdev, crtc);
1331}
1332
1333/**
1334 * evergreen_page_flip - pageflip callback. 1304 * evergreen_page_flip - pageflip callback.
1335 * 1305 *
1336 * @rdev: radeon_device pointer 1306 * @rdev: radeon_device pointer
@@ -1343,7 +1313,7 @@ void evergreen_post_page_flip(struct radeon_device *rdev, int crtc)
1343 * double buffered update to take place. 1313 * double buffered update to take place.
1344 * Returns the current update pending status. 1314 * Returns the current update pending status.
1345 */ 1315 */
1346u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) 1316void evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
1347{ 1317{
1348 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; 1318 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
1349 u32 tmp = RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset); 1319 u32 tmp = RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset);
@@ -1375,9 +1345,23 @@ u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
1375 /* Unlock the lock, so double-buffering can take place inside vblank */ 1345 /* Unlock the lock, so double-buffering can take place inside vblank */
1376 tmp &= ~EVERGREEN_GRPH_UPDATE_LOCK; 1346 tmp &= ~EVERGREEN_GRPH_UPDATE_LOCK;
1377 WREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset, tmp); 1347 WREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
1348}
1349
1350/**
1351 * evergreen_page_flip_pending - check if page flip is still pending
1352 *
1353 * @rdev: radeon_device pointer
1354 * @crtc_id: crtc to check
1355 *
1356 * Returns the current update pending status.
1357 */
1358bool evergreen_page_flip_pending(struct radeon_device *rdev, int crtc_id)
1359{
1360 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
1378 1361
1379 /* Return current update_pending status: */ 1362 /* Return current update_pending status: */
1380 return RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING; 1363 return !!(RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) &
1364 EVERGREEN_GRPH_SURFACE_UPDATE_PENDING);
1381} 1365}
1382 1366
1383/* get temperature in millidegrees */ 1367/* get temperature in millidegrees */
@@ -3353,6 +3337,18 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
3353 disabled_rb_mask &= ~(1 << i); 3337 disabled_rb_mask &= ~(1 << i);
3354 } 3338 }
3355 3339
3340 for (i = 0; i < rdev->config.evergreen.num_ses; i++) {
3341 u32 simd_disable_bitmap;
3342
3343 WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
3344 WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
3345 simd_disable_bitmap = (RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffff0000) >> 16;
3346 simd_disable_bitmap |= 0xffffffff << rdev->config.evergreen.max_simds;
3347 tmp <<= 16;
3348 tmp |= simd_disable_bitmap;
3349 }
3350 rdev->config.evergreen.active_simds = hweight32(~tmp);
3351
3356 WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES); 3352 WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
3357 WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES); 3353 WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
3358 3354
@@ -4810,7 +4806,7 @@ restart_ih:
4810 wake_up(&rdev->irq.vblank_queue); 4806 wake_up(&rdev->irq.vblank_queue);
4811 } 4807 }
4812 if (atomic_read(&rdev->irq.pflip[0])) 4808 if (atomic_read(&rdev->irq.pflip[0]))
4813 radeon_crtc_handle_flip(rdev, 0); 4809 radeon_crtc_handle_vblank(rdev, 0);
4814 rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT; 4810 rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
4815 DRM_DEBUG("IH: D1 vblank\n"); 4811 DRM_DEBUG("IH: D1 vblank\n");
4816 } 4812 }
@@ -4836,7 +4832,7 @@ restart_ih:
4836 wake_up(&rdev->irq.vblank_queue); 4832 wake_up(&rdev->irq.vblank_queue);
4837 } 4833 }
4838 if (atomic_read(&rdev->irq.pflip[1])) 4834 if (atomic_read(&rdev->irq.pflip[1]))
4839 radeon_crtc_handle_flip(rdev, 1); 4835 radeon_crtc_handle_vblank(rdev, 1);
4840 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT; 4836 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
4841 DRM_DEBUG("IH: D2 vblank\n"); 4837 DRM_DEBUG("IH: D2 vblank\n");
4842 } 4838 }
@@ -4862,7 +4858,7 @@ restart_ih:
4862 wake_up(&rdev->irq.vblank_queue); 4858 wake_up(&rdev->irq.vblank_queue);
4863 } 4859 }
4864 if (atomic_read(&rdev->irq.pflip[2])) 4860 if (atomic_read(&rdev->irq.pflip[2]))
4865 radeon_crtc_handle_flip(rdev, 2); 4861 radeon_crtc_handle_vblank(rdev, 2);
4866 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT; 4862 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
4867 DRM_DEBUG("IH: D3 vblank\n"); 4863 DRM_DEBUG("IH: D3 vblank\n");
4868 } 4864 }
@@ -4888,7 +4884,7 @@ restart_ih:
4888 wake_up(&rdev->irq.vblank_queue); 4884 wake_up(&rdev->irq.vblank_queue);
4889 } 4885 }
4890 if (atomic_read(&rdev->irq.pflip[3])) 4886 if (atomic_read(&rdev->irq.pflip[3]))
4891 radeon_crtc_handle_flip(rdev, 3); 4887 radeon_crtc_handle_vblank(rdev, 3);
4892 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT; 4888 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
4893 DRM_DEBUG("IH: D4 vblank\n"); 4889 DRM_DEBUG("IH: D4 vblank\n");
4894 } 4890 }
@@ -4914,7 +4910,7 @@ restart_ih:
4914 wake_up(&rdev->irq.vblank_queue); 4910 wake_up(&rdev->irq.vblank_queue);
4915 } 4911 }
4916 if (atomic_read(&rdev->irq.pflip[4])) 4912 if (atomic_read(&rdev->irq.pflip[4]))
4917 radeon_crtc_handle_flip(rdev, 4); 4913 radeon_crtc_handle_vblank(rdev, 4);
4918 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT; 4914 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
4919 DRM_DEBUG("IH: D5 vblank\n"); 4915 DRM_DEBUG("IH: D5 vblank\n");
4920 } 4916 }
@@ -4940,7 +4936,7 @@ restart_ih:
4940 wake_up(&rdev->irq.vblank_queue); 4936 wake_up(&rdev->irq.vblank_queue);
4941 } 4937 }
4942 if (atomic_read(&rdev->irq.pflip[5])) 4938 if (atomic_read(&rdev->irq.pflip[5]))
4943 radeon_crtc_handle_flip(rdev, 5); 4939 radeon_crtc_handle_vblank(rdev, 5);
4944 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT; 4940 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
4945 DRM_DEBUG("IH: D6 vblank\n"); 4941 DRM_DEBUG("IH: D6 vblank\n");
4946 } 4942 }
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
index 05b0c95813fd..1ec0e6e83f9f 100644
--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
@@ -293,10 +293,13 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
293 struct radeon_device *rdev = dev->dev_private; 293 struct radeon_device *rdev = dev->dev_private;
294 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 294 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
295 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 295 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
296 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
296 u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE]; 297 u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
297 struct hdmi_avi_infoframe frame; 298 struct hdmi_avi_infoframe frame;
298 uint32_t offset; 299 uint32_t offset;
299 ssize_t err; 300 ssize_t err;
301 uint32_t val;
302 int bpc = 8;
300 303
301 if (!dig || !dig->afmt) 304 if (!dig || !dig->afmt)
302 return; 305 return;
@@ -306,6 +309,12 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
306 return; 309 return;
307 offset = dig->afmt->offset; 310 offset = dig->afmt->offset;
308 311
312 /* hdmi deep color mode general control packets setup, if bpc > 8 */
313 if (encoder->crtc) {
314 struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
315 bpc = radeon_crtc->bpc;
316 }
317
309 /* disable audio prior to setting up hw */ 318 /* disable audio prior to setting up hw */
310 if (ASIC_IS_DCE6(rdev)) { 319 if (ASIC_IS_DCE6(rdev)) {
311 dig->afmt->pin = dce6_audio_get_pin(rdev); 320 dig->afmt->pin = dce6_audio_get_pin(rdev);
@@ -322,6 +331,35 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
322 331
323 WREG32(AFMT_AUDIO_CRC_CONTROL + offset, 0x1000); 332 WREG32(AFMT_AUDIO_CRC_CONTROL + offset, 0x1000);
324 333
334 val = RREG32(HDMI_CONTROL + offset);
335 val &= ~HDMI_DEEP_COLOR_ENABLE;
336 val &= ~HDMI_DEEP_COLOR_DEPTH_MASK;
337
338 switch (bpc) {
339 case 0:
340 case 6:
341 case 8:
342 case 16:
343 default:
344 DRM_DEBUG("%s: Disabling hdmi deep color for %d bpc.\n",
345 connector->name, bpc);
346 break;
347 case 10:
348 val |= HDMI_DEEP_COLOR_ENABLE;
349 val |= HDMI_DEEP_COLOR_DEPTH(HDMI_30BIT_DEEP_COLOR);
350 DRM_DEBUG("%s: Enabling hdmi deep color 30 for 10 bpc.\n",
351 connector->name);
352 break;
353 case 12:
354 val |= HDMI_DEEP_COLOR_ENABLE;
355 val |= HDMI_DEEP_COLOR_DEPTH(HDMI_36BIT_DEEP_COLOR);
356 DRM_DEBUG("%s: Enabling hdmi deep color 36 for 12 bpc.\n",
357 connector->name);
358 break;
359 }
360
361 WREG32(HDMI_CONTROL + offset, val);
362
325 WREG32(HDMI_VBI_PACKET_CONTROL + offset, 363 WREG32(HDMI_VBI_PACKET_CONTROL + offset,
326 HDMI_NULL_SEND | /* send null packets when required */ 364 HDMI_NULL_SEND | /* send null packets when required */
327 HDMI_GC_SEND | /* send general control packets */ 365 HDMI_GC_SEND | /* send general control packets */
@@ -348,9 +386,13 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
348 386
349 /* fglrx clears sth in AFMT_AUDIO_PACKET_CONTROL2 here */ 387 /* fglrx clears sth in AFMT_AUDIO_PACKET_CONTROL2 here */
350 388
351 WREG32(HDMI_ACR_PACKET_CONTROL + offset, 389 if (bpc > 8)
352 HDMI_ACR_SOURCE | /* select SW CTS value */ 390 WREG32(HDMI_ACR_PACKET_CONTROL + offset,
353 HDMI_ACR_AUTO_SEND); /* allow hw to sent ACR packets when required */ 391 HDMI_ACR_AUTO_SEND); /* allow hw to sent ACR packets when required */
392 else
393 WREG32(HDMI_ACR_PACKET_CONTROL + offset,
394 HDMI_ACR_SOURCE | /* select SW CTS value */
395 HDMI_ACR_AUTO_SEND); /* allow hw to sent ACR packets when required */
354 396
355 evergreen_hdmi_update_ACR(encoder, mode->clock); 397 evergreen_hdmi_update_ACR(encoder, mode->clock);
356 398
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index f9c7963b3ee6..b066d6711b8d 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -517,10 +517,11 @@
517# define HDMI_ERROR_ACK (1 << 8) 517# define HDMI_ERROR_ACK (1 << 8)
518# define HDMI_ERROR_MASK (1 << 9) 518# define HDMI_ERROR_MASK (1 << 9)
519# define HDMI_DEEP_COLOR_ENABLE (1 << 24) 519# define HDMI_DEEP_COLOR_ENABLE (1 << 24)
520# define HDMI_DEEP_COLOR_DEPTH (((x) & 3) << 28) 520# define HDMI_DEEP_COLOR_DEPTH(x) (((x) & 3) << 28)
521# define HDMI_24BIT_DEEP_COLOR 0 521# define HDMI_24BIT_DEEP_COLOR 0
522# define HDMI_30BIT_DEEP_COLOR 1 522# define HDMI_30BIT_DEEP_COLOR 1
523# define HDMI_36BIT_DEEP_COLOR 2 523# define HDMI_36BIT_DEEP_COLOR 2
524# define HDMI_DEEP_COLOR_DEPTH_MASK (3 << 28)
524#define HDMI_STATUS 0x7034 525#define HDMI_STATUS 0x7034
525# define HDMI_ACTIVE_AVMUTE (1 << 0) 526# define HDMI_ACTIVE_AVMUTE (1 << 0)
526# define HDMI_AUDIO_PACKET_ERROR (1 << 16) 527# define HDMI_AUDIO_PACKET_ERROR (1 << 16)
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index d246e043421a..5a33ca681867 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -1057,6 +1057,18 @@ static void cayman_gpu_init(struct radeon_device *rdev)
1057 disabled_rb_mask &= ~(1 << i); 1057 disabled_rb_mask &= ~(1 << i);
1058 } 1058 }
1059 1059
1060 for (i = 0; i < rdev->config.cayman.max_shader_engines; i++) {
1061 u32 simd_disable_bitmap;
1062
1063 WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
1064 WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
1065 simd_disable_bitmap = (RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffff0000) >> 16;
1066 simd_disable_bitmap |= 0xffffffff << rdev->config.cayman.max_simds_per_se;
1067 tmp <<= 16;
1068 tmp |= simd_disable_bitmap;
1069 }
1070 rdev->config.cayman.active_simds = hweight32(~tmp);
1071
1060 WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES); 1072 WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
1061 WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES); 1073 WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
1062 1074
@@ -1228,12 +1240,14 @@ static int cayman_pcie_gart_enable(struct radeon_device *rdev)
1228 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU); 1240 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
1229 /* Setup L2 cache */ 1241 /* Setup L2 cache */
1230 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | 1242 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE |
1243 ENABLE_L2_FRAGMENT_PROCESSING |
1231 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE | 1244 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
1232 ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE | 1245 ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
1233 EFFECTIVE_L2_QUEUE_SIZE(7) | 1246 EFFECTIVE_L2_QUEUE_SIZE(7) |
1234 CONTEXT1_IDENTITY_ACCESS_MODE(1)); 1247 CONTEXT1_IDENTITY_ACCESS_MODE(1));
1235 WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE); 1248 WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE);
1236 WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY | 1249 WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
1250 BANK_SELECT(6) |
1237 L2_CACHE_BIGK_FRAGMENT_SIZE(6)); 1251 L2_CACHE_BIGK_FRAGMENT_SIZE(6));
1238 /* setup context0 */ 1252 /* setup context0 */
1239 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); 1253 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
@@ -1266,6 +1280,7 @@ static int cayman_pcie_gart_enable(struct radeon_device *rdev)
1266 (u32)(rdev->dummy_page.addr >> 12)); 1280 (u32)(rdev->dummy_page.addr >> 12));
1267 WREG32(VM_CONTEXT1_CNTL2, 4); 1281 WREG32(VM_CONTEXT1_CNTL2, 4);
1268 WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) | 1282 WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
1283 PAGE_TABLE_BLOCK_SIZE(radeon_vm_block_size - 9) |
1269 RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT | 1284 RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
1270 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT | 1285 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT |
1271 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT | 1286 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
@@ -1343,7 +1358,7 @@ void cayman_fence_ring_emit(struct radeon_device *rdev,
1343 /* EVENT_WRITE_EOP - flush caches, send int */ 1358 /* EVENT_WRITE_EOP - flush caches, send int */
1344 radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4)); 1359 radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
1345 radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5)); 1360 radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
1346 radeon_ring_write(ring, addr & 0xffffffff); 1361 radeon_ring_write(ring, lower_32_bits(addr));
1347 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2)); 1362 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
1348 radeon_ring_write(ring, fence->seq); 1363 radeon_ring_write(ring, fence->seq);
1349 radeon_ring_write(ring, 0); 1364 radeon_ring_write(ring, 0);
diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h
index d996033c243e..2e12e4d69253 100644
--- a/drivers/gpu/drm/radeon/nid.h
+++ b/drivers/gpu/drm/radeon/nid.h
@@ -128,6 +128,7 @@
128#define READ_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 16) 128#define READ_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 16)
129#define WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 18) 129#define WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 18)
130#define WRITE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 19) 130#define WRITE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 19)
131#define PAGE_TABLE_BLOCK_SIZE(x) (((x) & 0xF) << 24)
131#define VM_CONTEXT1_CNTL 0x1414 132#define VM_CONTEXT1_CNTL 0x1414
132#define VM_CONTEXT0_CNTL2 0x1430 133#define VM_CONTEXT0_CNTL2 0x1430
133#define VM_CONTEXT1_CNTL2 0x1434 134#define VM_CONTEXT1_CNTL2 0x1434
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index b6c32640df20..1544efcf1c3a 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -142,36 +142,6 @@ void r100_wait_for_vblank(struct radeon_device *rdev, int crtc)
142} 142}
143 143
144/** 144/**
145 * r100_pre_page_flip - pre-pageflip callback.
146 *
147 * @rdev: radeon_device pointer
148 * @crtc: crtc to prepare for pageflip on
149 *
150 * Pre-pageflip callback (r1xx-r4xx).
151 * Enables the pageflip irq (vblank irq).
152 */
153void r100_pre_page_flip(struct radeon_device *rdev, int crtc)
154{
155 /* enable the pflip int */
156 radeon_irq_kms_pflip_irq_get(rdev, crtc);
157}
158
159/**
160 * r100_post_page_flip - pos-pageflip callback.
161 *
162 * @rdev: radeon_device pointer
163 * @crtc: crtc to cleanup pageflip on
164 *
165 * Post-pageflip callback (r1xx-r4xx).
166 * Disables the pageflip irq (vblank irq).
167 */
168void r100_post_page_flip(struct radeon_device *rdev, int crtc)
169{
170 /* disable the pflip int */
171 radeon_irq_kms_pflip_irq_put(rdev, crtc);
172}
173
174/**
175 * r100_page_flip - pageflip callback. 145 * r100_page_flip - pageflip callback.
176 * 146 *
177 * @rdev: radeon_device pointer 147 * @rdev: radeon_device pointer
@@ -182,9 +152,8 @@ void r100_post_page_flip(struct radeon_device *rdev, int crtc)
182 * During vblank we take the crtc lock and wait for the update_pending 152 * During vblank we take the crtc lock and wait for the update_pending
183 * bit to go high, when it does, we release the lock, and allow the 153 * bit to go high, when it does, we release the lock, and allow the
184 * double buffered update to take place. 154 * double buffered update to take place.
185 * Returns the current update pending status.
186 */ 155 */
187u32 r100_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) 156void r100_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
188{ 157{
189 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; 158 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
190 u32 tmp = ((u32)crtc_base) | RADEON_CRTC_OFFSET__OFFSET_LOCK; 159 u32 tmp = ((u32)crtc_base) | RADEON_CRTC_OFFSET__OFFSET_LOCK;
@@ -206,8 +175,24 @@ u32 r100_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
206 tmp &= ~RADEON_CRTC_OFFSET__OFFSET_LOCK; 175 tmp &= ~RADEON_CRTC_OFFSET__OFFSET_LOCK;
207 WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp); 176 WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp);
208 177
178}
179
180/**
181 * r100_page_flip_pending - check if page flip is still pending
182 *
183 * @rdev: radeon_device pointer
184 * @crtc_id: crtc to check
185 *
186 * Check if the last pagefilp is still pending (r1xx-r4xx).
187 * Returns the current update pending status.
188 */
189bool r100_page_flip_pending(struct radeon_device *rdev, int crtc_id)
190{
191 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
192
209 /* Return current update_pending status: */ 193 /* Return current update_pending status: */
210 return RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET; 194 return !!(RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) &
195 RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET);
211} 196}
212 197
213/** 198/**
@@ -697,15 +682,11 @@ void r100_pci_gart_disable(struct radeon_device *rdev)
697 WREG32(RADEON_AIC_HI_ADDR, 0); 682 WREG32(RADEON_AIC_HI_ADDR, 0);
698} 683}
699 684
700int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) 685void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i,
686 uint64_t addr)
701{ 687{
702 u32 *gtt = rdev->gart.ptr; 688 u32 *gtt = rdev->gart.ptr;
703
704 if (i < 0 || i > rdev->gart.num_gpu_pages) {
705 return -EINVAL;
706 }
707 gtt[i] = cpu_to_le32(lower_32_bits(addr)); 689 gtt[i] = cpu_to_le32(lower_32_bits(addr));
708 return 0;
709} 690}
710 691
711void r100_pci_gart_fini(struct radeon_device *rdev) 692void r100_pci_gart_fini(struct radeon_device *rdev)
@@ -794,7 +775,7 @@ int r100_irq_process(struct radeon_device *rdev)
794 wake_up(&rdev->irq.vblank_queue); 775 wake_up(&rdev->irq.vblank_queue);
795 } 776 }
796 if (atomic_read(&rdev->irq.pflip[0])) 777 if (atomic_read(&rdev->irq.pflip[0]))
797 radeon_crtc_handle_flip(rdev, 0); 778 radeon_crtc_handle_vblank(rdev, 0);
798 } 779 }
799 if (status & RADEON_CRTC2_VBLANK_STAT) { 780 if (status & RADEON_CRTC2_VBLANK_STAT) {
800 if (rdev->irq.crtc_vblank_int[1]) { 781 if (rdev->irq.crtc_vblank_int[1]) {
@@ -803,7 +784,7 @@ int r100_irq_process(struct radeon_device *rdev)
803 wake_up(&rdev->irq.vblank_queue); 784 wake_up(&rdev->irq.vblank_queue);
804 } 785 }
805 if (atomic_read(&rdev->irq.pflip[1])) 786 if (atomic_read(&rdev->irq.pflip[1]))
806 radeon_crtc_handle_flip(rdev, 1); 787 radeon_crtc_handle_vblank(rdev, 1);
807 } 788 }
808 if (status & RADEON_FP_DETECT_STAT) { 789 if (status & RADEON_FP_DETECT_STAT) {
809 queue_hotplug = true; 790 queue_hotplug = true;
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 206caf9700b7..3c21d77a483d 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -72,13 +72,11 @@ void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev)
72#define R300_PTE_WRITEABLE (1 << 2) 72#define R300_PTE_WRITEABLE (1 << 2)
73#define R300_PTE_READABLE (1 << 3) 73#define R300_PTE_READABLE (1 << 3)
74 74
75int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) 75void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i,
76 uint64_t addr)
76{ 77{
77 void __iomem *ptr = rdev->gart.ptr; 78 void __iomem *ptr = rdev->gart.ptr;
78 79
79 if (i < 0 || i > rdev->gart.num_gpu_pages) {
80 return -EINVAL;
81 }
82 addr = (lower_32_bits(addr) >> 8) | 80 addr = (lower_32_bits(addr) >> 8) |
83 ((upper_32_bits(addr) & 0xff) << 24) | 81 ((upper_32_bits(addr) & 0xff) << 24) |
84 R300_PTE_WRITEABLE | R300_PTE_READABLE; 82 R300_PTE_WRITEABLE | R300_PTE_READABLE;
@@ -86,7 +84,6 @@ int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
86 * on powerpc without HW swappers, it'll get swapped on way 84 * on powerpc without HW swappers, it'll get swapped on way
87 * into VRAM - so no need for cpu_to_le32 on VRAM tables */ 85 * into VRAM - so no need for cpu_to_le32 on VRAM tables */
88 writel(addr, ((void __iomem *)ptr) + (i * 4)); 86 writel(addr, ((void __iomem *)ptr) + (i * 4));
89 return 0;
90} 87}
91 88
92int rv370_pcie_gart_init(struct radeon_device *rdev) 89int rv370_pcie_gart_init(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index bbc189fd3ddc..c66952d4b00c 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -1958,6 +1958,9 @@ static void r600_gpu_init(struct radeon_device *rdev)
1958 if (tmp < rdev->config.r600.max_simds) { 1958 if (tmp < rdev->config.r600.max_simds) {
1959 rdev->config.r600.max_simds = tmp; 1959 rdev->config.r600.max_simds = tmp;
1960 } 1960 }
1961 tmp = rdev->config.r600.max_simds -
1962 r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R6XX_MAX_SIMDS_MASK);
1963 rdev->config.r600.active_simds = tmp;
1961 1964
1962 disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R6XX_MAX_BACKENDS_MASK; 1965 disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R6XX_MAX_BACKENDS_MASK;
1963 tmp = (tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT; 1966 tmp = (tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT;
@@ -2724,7 +2727,7 @@ void r600_fence_ring_emit(struct radeon_device *rdev,
2724 /* EVENT_WRITE_EOP - flush caches, send int */ 2727 /* EVENT_WRITE_EOP - flush caches, send int */
2725 radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4)); 2728 radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
2726 radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5)); 2729 radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
2727 radeon_ring_write(ring, addr & 0xffffffff); 2730 radeon_ring_write(ring, lower_32_bits(addr));
2728 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2)); 2731 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
2729 radeon_ring_write(ring, fence->seq); 2732 radeon_ring_write(ring, fence->seq);
2730 radeon_ring_write(ring, 0); 2733 radeon_ring_write(ring, 0);
@@ -2763,7 +2766,7 @@ bool r600_semaphore_ring_emit(struct radeon_device *rdev,
2763 sel |= PACKET3_SEM_WAIT_ON_SIGNAL; 2766 sel |= PACKET3_SEM_WAIT_ON_SIGNAL;
2764 2767
2765 radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1)); 2768 radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1));
2766 radeon_ring_write(ring, addr & 0xffffffff); 2769 radeon_ring_write(ring, lower_32_bits(addr));
2767 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel); 2770 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel);
2768 2771
2769 return true; 2772 return true;
@@ -2824,9 +2827,9 @@ int r600_copy_cpdma(struct radeon_device *rdev,
2824 if (size_in_bytes == 0) 2827 if (size_in_bytes == 0)
2825 tmp |= PACKET3_CP_DMA_CP_SYNC; 2828 tmp |= PACKET3_CP_DMA_CP_SYNC;
2826 radeon_ring_write(ring, PACKET3(PACKET3_CP_DMA, 4)); 2829 radeon_ring_write(ring, PACKET3(PACKET3_CP_DMA, 4));
2827 radeon_ring_write(ring, src_offset & 0xffffffff); 2830 radeon_ring_write(ring, lower_32_bits(src_offset));
2828 radeon_ring_write(ring, tmp); 2831 radeon_ring_write(ring, tmp);
2829 radeon_ring_write(ring, dst_offset & 0xffffffff); 2832 radeon_ring_write(ring, lower_32_bits(dst_offset));
2830 radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff); 2833 radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
2831 radeon_ring_write(ring, cur_size_in_bytes); 2834 radeon_ring_write(ring, cur_size_in_bytes);
2832 src_offset += cur_size_in_bytes; 2835 src_offset += cur_size_in_bytes;
@@ -3876,7 +3879,7 @@ restart_ih:
3876 wake_up(&rdev->irq.vblank_queue); 3879 wake_up(&rdev->irq.vblank_queue);
3877 } 3880 }
3878 if (atomic_read(&rdev->irq.pflip[0])) 3881 if (atomic_read(&rdev->irq.pflip[0]))
3879 radeon_crtc_handle_flip(rdev, 0); 3882 radeon_crtc_handle_vblank(rdev, 0);
3880 rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VBLANK_INTERRUPT; 3883 rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
3881 DRM_DEBUG("IH: D1 vblank\n"); 3884 DRM_DEBUG("IH: D1 vblank\n");
3882 } 3885 }
@@ -3902,7 +3905,7 @@ restart_ih:
3902 wake_up(&rdev->irq.vblank_queue); 3905 wake_up(&rdev->irq.vblank_queue);
3903 } 3906 }
3904 if (atomic_read(&rdev->irq.pflip[1])) 3907 if (atomic_read(&rdev->irq.pflip[1]))
3905 radeon_crtc_handle_flip(rdev, 1); 3908 radeon_crtc_handle_vblank(rdev, 1);
3906 rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VBLANK_INTERRUPT; 3909 rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VBLANK_INTERRUPT;
3907 DRM_DEBUG("IH: D2 vblank\n"); 3910 DRM_DEBUG("IH: D2 vblank\n");
3908 } 3911 }
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index 85a2bb28aed2..26ef8ced6f89 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -133,7 +133,7 @@ struct radeon_hdmi_acr r600_hdmi_acr(uint32_t clock)
133/* 133/*
134 * update the N and CTS parameters for a given pixel clock rate 134 * update the N and CTS parameters for a given pixel clock rate
135 */ 135 */
136static void r600_hdmi_update_ACR(struct drm_encoder *encoder, uint32_t clock) 136void r600_hdmi_update_ACR(struct drm_encoder *encoder, uint32_t clock)
137{ 137{
138 struct drm_device *dev = encoder->dev; 138 struct drm_device *dev = encoder->dev;
139 struct radeon_device *rdev = dev->dev_private; 139 struct radeon_device *rdev = dev->dev_private;
@@ -142,21 +142,33 @@ static void r600_hdmi_update_ACR(struct drm_encoder *encoder, uint32_t clock)
142 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 142 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
143 uint32_t offset = dig->afmt->offset; 143 uint32_t offset = dig->afmt->offset;
144 144
145 WREG32(HDMI0_ACR_32_0 + offset, HDMI0_ACR_CTS_32(acr.cts_32khz)); 145 WREG32_P(HDMI0_ACR_32_0 + offset,
146 WREG32(HDMI0_ACR_32_1 + offset, acr.n_32khz); 146 HDMI0_ACR_CTS_32(acr.cts_32khz),
147 147 ~HDMI0_ACR_CTS_32_MASK);
148 WREG32(HDMI0_ACR_44_0 + offset, HDMI0_ACR_CTS_44(acr.cts_44_1khz)); 148 WREG32_P(HDMI0_ACR_32_1 + offset,
149 WREG32(HDMI0_ACR_44_1 + offset, acr.n_44_1khz); 149 HDMI0_ACR_N_32(acr.n_32khz),
150 150 ~HDMI0_ACR_N_32_MASK);
151 WREG32(HDMI0_ACR_48_0 + offset, HDMI0_ACR_CTS_48(acr.cts_48khz)); 151
152 WREG32(HDMI0_ACR_48_1 + offset, acr.n_48khz); 152 WREG32_P(HDMI0_ACR_44_0 + offset,
153 HDMI0_ACR_CTS_44(acr.cts_44_1khz),
154 ~HDMI0_ACR_CTS_44_MASK);
155 WREG32_P(HDMI0_ACR_44_1 + offset,
156 HDMI0_ACR_N_44(acr.n_44_1khz),
157 ~HDMI0_ACR_N_44_MASK);
158
159 WREG32_P(HDMI0_ACR_48_0 + offset,
160 HDMI0_ACR_CTS_48(acr.cts_48khz),
161 ~HDMI0_ACR_CTS_48_MASK);
162 WREG32_P(HDMI0_ACR_48_1 + offset,
163 HDMI0_ACR_N_48(acr.n_48khz),
164 ~HDMI0_ACR_N_48_MASK);
153} 165}
154 166
155/* 167/*
156 * build a HDMI Video Info Frame 168 * build a HDMI Video Info Frame
157 */ 169 */
158static void r600_hdmi_update_avi_infoframe(struct drm_encoder *encoder, 170void r600_hdmi_update_avi_infoframe(struct drm_encoder *encoder, void *buffer,
159 void *buffer, size_t size) 171 size_t size)
160{ 172{
161 struct drm_device *dev = encoder->dev; 173 struct drm_device *dev = encoder->dev;
162 struct radeon_device *rdev = dev->dev_private; 174 struct radeon_device *rdev = dev->dev_private;
@@ -231,7 +243,7 @@ int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder)
231/* 243/*
232 * write the audio workaround status to the hardware 244 * write the audio workaround status to the hardware
233 */ 245 */
234static void r600_hdmi_audio_workaround(struct drm_encoder *encoder) 246void r600_hdmi_audio_workaround(struct drm_encoder *encoder)
235{ 247{
236 struct drm_device *dev = encoder->dev; 248 struct drm_device *dev = encoder->dev;
237 struct radeon_device *rdev = dev->dev_private; 249 struct radeon_device *rdev = dev->dev_private;
@@ -250,7 +262,7 @@ static void r600_hdmi_audio_workaround(struct drm_encoder *encoder)
250 value, ~HDMI0_AUDIO_TEST_EN); 262 value, ~HDMI0_AUDIO_TEST_EN);
251} 263}
252 264
253static void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock) 265void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock)
254{ 266{
255 struct drm_device *dev = encoder->dev; 267 struct drm_device *dev = encoder->dev;
256 struct radeon_device *rdev = dev->dev_private; 268 struct radeon_device *rdev = dev->dev_private;
@@ -320,121 +332,6 @@ static void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock)
320 } 332 }
321} 333}
322 334
323static void dce3_2_afmt_write_speaker_allocation(struct drm_encoder *encoder)
324{
325 struct radeon_device *rdev = encoder->dev->dev_private;
326 struct drm_connector *connector;
327 struct radeon_connector *radeon_connector = NULL;
328 u32 tmp;
329 u8 *sadb;
330 int sad_count;
331
332 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
333 if (connector->encoder == encoder) {
334 radeon_connector = to_radeon_connector(connector);
335 break;
336 }
337 }
338
339 if (!radeon_connector) {
340 DRM_ERROR("Couldn't find encoder's connector\n");
341 return;
342 }
343
344 sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb);
345 if (sad_count < 0) {
346 DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
347 return;
348 }
349
350 /* program the speaker allocation */
351 tmp = RREG32(AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER);
352 tmp &= ~(DP_CONNECTION | SPEAKER_ALLOCATION_MASK);
353 /* set HDMI mode */
354 tmp |= HDMI_CONNECTION;
355 if (sad_count)
356 tmp |= SPEAKER_ALLOCATION(sadb[0]);
357 else
358 tmp |= SPEAKER_ALLOCATION(5); /* stereo */
359 WREG32(AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER, tmp);
360
361 kfree(sadb);
362}
363
364static void dce3_2_afmt_write_sad_regs(struct drm_encoder *encoder)
365{
366 struct radeon_device *rdev = encoder->dev->dev_private;
367 struct drm_connector *connector;
368 struct radeon_connector *radeon_connector = NULL;
369 struct cea_sad *sads;
370 int i, sad_count;
371
372 static const u16 eld_reg_to_type[][2] = {
373 { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM },
374 { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 },
375 { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 },
376 { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 },
377 { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 },
378 { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC },
379 { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS },
380 { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC },
381 { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 },
382 { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD },
383 { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP },
384 { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
385 };
386
387 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
388 if (connector->encoder == encoder) {
389 radeon_connector = to_radeon_connector(connector);
390 break;
391 }
392 }
393
394 if (!radeon_connector) {
395 DRM_ERROR("Couldn't find encoder's connector\n");
396 return;
397 }
398
399 sad_count = drm_edid_to_sad(radeon_connector->edid, &sads);
400 if (sad_count < 0) {
401 DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
402 return;
403 }
404 BUG_ON(!sads);
405
406 for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
407 u32 value = 0;
408 u8 stereo_freqs = 0;
409 int max_channels = -1;
410 int j;
411
412 for (j = 0; j < sad_count; j++) {
413 struct cea_sad *sad = &sads[j];
414
415 if (sad->format == eld_reg_to_type[i][1]) {
416 if (sad->channels > max_channels) {
417 value = MAX_CHANNELS(sad->channels) |
418 DESCRIPTOR_BYTE_2(sad->byte2) |
419 SUPPORTED_FREQUENCIES(sad->freq);
420 max_channels = sad->channels;
421 }
422
423 if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
424 stereo_freqs |= sad->freq;
425 else
426 break;
427 }
428 }
429
430 value |= SUPPORTED_FREQUENCIES_STEREO(stereo_freqs);
431
432 WREG32(eld_reg_to_type[i][0], value);
433 }
434
435 kfree(sads);
436}
437
438/* 335/*
439 * update the info frames with the data from the current display mode 336 * update the info frames with the data from the current display mode
440 */ 337 */
@@ -447,6 +344,7 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
447 u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE]; 344 u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
448 struct hdmi_avi_infoframe frame; 345 struct hdmi_avi_infoframe frame;
449 uint32_t offset; 346 uint32_t offset;
347 uint32_t acr_ctl;
450 ssize_t err; 348 ssize_t err;
451 349
452 if (!dig || !dig->afmt) 350 if (!dig || !dig->afmt)
@@ -463,52 +361,44 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
463 361
464 r600_audio_set_dto(encoder, mode->clock); 362 r600_audio_set_dto(encoder, mode->clock);
465 363
466 WREG32(HDMI0_VBI_PACKET_CONTROL + offset, 364 WREG32_P(HDMI0_AUDIO_PACKET_CONTROL + offset,
467 HDMI0_NULL_SEND); /* send null packets when required */ 365 HDMI0_AUDIO_SAMPLE_SEND | /* send audio packets */
468 366 HDMI0_AUDIO_DELAY_EN(1) | /* default audio delay */
469 WREG32(HDMI0_AUDIO_CRC_CONTROL + offset, 0x1000); 367 HDMI0_AUDIO_PACKETS_PER_LINE(3) | /* should be suffient for all audio modes and small enough for all hblanks */
470 368 HDMI0_60958_CS_UPDATE, /* allow 60958 channel status fields to be updated */
471 if (ASIC_IS_DCE32(rdev)) { 369 ~(HDMI0_AUDIO_SAMPLE_SEND |
472 WREG32(HDMI0_AUDIO_PACKET_CONTROL + offset, 370 HDMI0_AUDIO_DELAY_EN_MASK |
473 HDMI0_AUDIO_DELAY_EN(1) | /* default audio delay */ 371 HDMI0_AUDIO_PACKETS_PER_LINE_MASK |
474 HDMI0_AUDIO_PACKETS_PER_LINE(3)); /* should be suffient for all audio modes and small enough for all hblanks */ 372 HDMI0_60958_CS_UPDATE));
475 WREG32(AFMT_AUDIO_PACKET_CONTROL + offset, 373
476 AFMT_AUDIO_SAMPLE_SEND | /* send audio packets */ 374 /* DCE 3.0 uses register that's normally for CRC_CONTROL */
477 AFMT_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */ 375 acr_ctl = ASIC_IS_DCE3(rdev) ? DCE3_HDMI0_ACR_PACKET_CONTROL :
478 } else { 376 HDMI0_ACR_PACKET_CONTROL;
479 WREG32(HDMI0_AUDIO_PACKET_CONTROL + offset, 377 WREG32_P(acr_ctl + offset,
480 HDMI0_AUDIO_SAMPLE_SEND | /* send audio packets */ 378 HDMI0_ACR_SOURCE | /* select SW CTS value - XXX verify that hw CTS works on all families */
481 HDMI0_AUDIO_DELAY_EN(1) | /* default audio delay */ 379 HDMI0_ACR_AUTO_SEND, /* allow hw to sent ACR packets when required */
482 HDMI0_AUDIO_PACKETS_PER_LINE(3) | /* should be suffient for all audio modes and small enough for all hblanks */ 380 ~(HDMI0_ACR_SOURCE |
483 HDMI0_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */ 381 HDMI0_ACR_AUTO_SEND));
484 } 382
485 383 WREG32_OR(HDMI0_VBI_PACKET_CONTROL + offset,
486 if (ASIC_IS_DCE32(rdev)) { 384 HDMI0_NULL_SEND | /* send null packets when required */
487 dce3_2_afmt_write_speaker_allocation(encoder); 385 HDMI0_GC_SEND | /* send general control packets */
488 dce3_2_afmt_write_sad_regs(encoder); 386 HDMI0_GC_CONT); /* send general control packets every frame */
489 } 387
490 388 WREG32_OR(HDMI0_INFOFRAME_CONTROL0 + offset,
491 WREG32(HDMI0_ACR_PACKET_CONTROL + offset, 389 HDMI0_AVI_INFO_SEND | /* enable AVI info frames */
492 HDMI0_ACR_SOURCE | /* select SW CTS value - XXX verify that hw CTS works on all families */ 390 HDMI0_AVI_INFO_CONT | /* send AVI info frames every frame/field */
493 HDMI0_ACR_AUTO_SEND); /* allow hw to sent ACR packets when required */ 391 HDMI0_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */
494 392 HDMI0_AUDIO_INFO_UPDATE); /* required for audio info values to be updated */
495 WREG32(HDMI0_VBI_PACKET_CONTROL + offset, 393
496 HDMI0_NULL_SEND | /* send null packets when required */ 394 WREG32_P(HDMI0_INFOFRAME_CONTROL1 + offset,
497 HDMI0_GC_SEND | /* send general control packets */ 395 HDMI0_AVI_INFO_LINE(2) | /* anything other than 0 */
498 HDMI0_GC_CONT); /* send general control packets every frame */ 396 HDMI0_AUDIO_INFO_LINE(2), /* anything other than 0 */
499 397 ~(HDMI0_AVI_INFO_LINE_MASK |
500 /* TODO: HDMI0_AUDIO_INFO_UPDATE */ 398 HDMI0_AUDIO_INFO_LINE_MASK));
501 WREG32(HDMI0_INFOFRAME_CONTROL0 + offset, 399
502 HDMI0_AVI_INFO_SEND | /* enable AVI info frames */ 400 WREG32_AND(HDMI0_GC + offset,
503 HDMI0_AVI_INFO_CONT | /* send AVI info frames every frame/field */ 401 ~HDMI0_GC_AVMUTE); /* unset HDMI0_GC_AVMUTE */
504 HDMI0_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */
505 HDMI0_AUDIO_INFO_CONT); /* send audio info frames every frame/field */
506
507 WREG32(HDMI0_INFOFRAME_CONTROL1 + offset,
508 HDMI0_AVI_INFO_LINE(2) | /* anything other than 0 */
509 HDMI0_AUDIO_INFO_LINE(2)); /* anything other than 0 */
510
511 WREG32(HDMI0_GC + offset, 0); /* unset HDMI0_GC_AVMUTE */
512 402
513 err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode); 403 err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
514 if (err < 0) { 404 if (err < 0) {
@@ -523,22 +413,45 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
523 } 413 }
524 414
525 r600_hdmi_update_avi_infoframe(encoder, buffer, sizeof(buffer)); 415 r600_hdmi_update_avi_infoframe(encoder, buffer, sizeof(buffer));
416
417 /* fglrx duplicates INFOFRAME_CONTROL0 & INFOFRAME_CONTROL1 ops here */
418
419 WREG32_AND(HDMI0_GENERIC_PACKET_CONTROL + offset,
420 ~(HDMI0_GENERIC0_SEND |
421 HDMI0_GENERIC0_CONT |
422 HDMI0_GENERIC0_UPDATE |
423 HDMI0_GENERIC1_SEND |
424 HDMI0_GENERIC1_CONT |
425 HDMI0_GENERIC0_LINE_MASK |
426 HDMI0_GENERIC1_LINE_MASK));
427
526 r600_hdmi_update_ACR(encoder, mode->clock); 428 r600_hdmi_update_ACR(encoder, mode->clock);
527 429
430 WREG32_P(HDMI0_60958_0 + offset,
431 HDMI0_60958_CS_CHANNEL_NUMBER_L(1),
432 ~(HDMI0_60958_CS_CHANNEL_NUMBER_L_MASK |
433 HDMI0_60958_CS_CLOCK_ACCURACY_MASK));
434
435 WREG32_P(HDMI0_60958_1 + offset,
436 HDMI0_60958_CS_CHANNEL_NUMBER_R(2),
437 ~HDMI0_60958_CS_CHANNEL_NUMBER_R_MASK);
438
528 /* it's unknown what these bits do excatly, but it's indeed quite useful for debugging */ 439 /* it's unknown what these bits do excatly, but it's indeed quite useful for debugging */
529 WREG32(HDMI0_RAMP_CONTROL0 + offset, 0x00FFFFFF); 440 WREG32(HDMI0_RAMP_CONTROL0 + offset, 0x00FFFFFF);
530 WREG32(HDMI0_RAMP_CONTROL1 + offset, 0x007FFFFF); 441 WREG32(HDMI0_RAMP_CONTROL1 + offset, 0x007FFFFF);
531 WREG32(HDMI0_RAMP_CONTROL2 + offset, 0x00000001); 442 WREG32(HDMI0_RAMP_CONTROL2 + offset, 0x00000001);
532 WREG32(HDMI0_RAMP_CONTROL3 + offset, 0x00000001); 443 WREG32(HDMI0_RAMP_CONTROL3 + offset, 0x00000001);
533 444
534 r600_hdmi_audio_workaround(encoder);
535
536 /* enable audio after to setting up hw */ 445 /* enable audio after to setting up hw */
537 r600_audio_enable(rdev, dig->afmt->pin, true); 446 r600_audio_enable(rdev, dig->afmt->pin, true);
538} 447}
539 448
540/* 449/**
541 * update settings with current parameters from audio engine 450 * r600_hdmi_update_audio_settings - Update audio infoframe
451 *
452 * @encoder: drm encoder
453 *
454 * Gets info about current audio stream and updates audio infoframe.
542 */ 455 */
543void r600_hdmi_update_audio_settings(struct drm_encoder *encoder) 456void r600_hdmi_update_audio_settings(struct drm_encoder *encoder)
544{ 457{
@@ -550,7 +463,7 @@ void r600_hdmi_update_audio_settings(struct drm_encoder *encoder)
550 uint8_t buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AUDIO_INFOFRAME_SIZE]; 463 uint8_t buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AUDIO_INFOFRAME_SIZE];
551 struct hdmi_audio_infoframe frame; 464 struct hdmi_audio_infoframe frame;
552 uint32_t offset; 465 uint32_t offset;
553 uint32_t iec; 466 uint32_t value;
554 ssize_t err; 467 ssize_t err;
555 468
556 if (!dig->afmt || !dig->afmt->enabled) 469 if (!dig->afmt || !dig->afmt->enabled)
@@ -563,60 +476,6 @@ void r600_hdmi_update_audio_settings(struct drm_encoder *encoder)
563 DRM_DEBUG("0x%02X IEC60958 status bits and 0x%02X category code\n", 476 DRM_DEBUG("0x%02X IEC60958 status bits and 0x%02X category code\n",
564 (int)audio.status_bits, (int)audio.category_code); 477 (int)audio.status_bits, (int)audio.category_code);
565 478
566 iec = 0;
567 if (audio.status_bits & AUDIO_STATUS_PROFESSIONAL)
568 iec |= 1 << 0;
569 if (audio.status_bits & AUDIO_STATUS_NONAUDIO)
570 iec |= 1 << 1;
571 if (audio.status_bits & AUDIO_STATUS_COPYRIGHT)
572 iec |= 1 << 2;
573 if (audio.status_bits & AUDIO_STATUS_EMPHASIS)
574 iec |= 1 << 3;
575
576 iec |= HDMI0_60958_CS_CATEGORY_CODE(audio.category_code);
577
578 switch (audio.rate) {
579 case 32000:
580 iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0x3);
581 break;
582 case 44100:
583 iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0x0);
584 break;
585 case 48000:
586 iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0x2);
587 break;
588 case 88200:
589 iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0x8);
590 break;
591 case 96000:
592 iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0xa);
593 break;
594 case 176400:
595 iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0xc);
596 break;
597 case 192000:
598 iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0xe);
599 break;
600 }
601
602 WREG32(HDMI0_60958_0 + offset, iec);
603
604 iec = 0;
605 switch (audio.bits_per_sample) {
606 case 16:
607 iec |= HDMI0_60958_CS_WORD_LENGTH(0x2);
608 break;
609 case 20:
610 iec |= HDMI0_60958_CS_WORD_LENGTH(0x3);
611 break;
612 case 24:
613 iec |= HDMI0_60958_CS_WORD_LENGTH(0xb);
614 break;
615 }
616 if (audio.status_bits & AUDIO_STATUS_V)
617 iec |= 0x5 << 16;
618 WREG32_P(HDMI0_60958_1 + offset, iec, ~0x5000f);
619
620 err = hdmi_audio_infoframe_init(&frame); 479 err = hdmi_audio_infoframe_init(&frame);
621 if (err < 0) { 480 if (err < 0) {
622 DRM_ERROR("failed to setup audio infoframe\n"); 481 DRM_ERROR("failed to setup audio infoframe\n");
@@ -631,8 +490,22 @@ void r600_hdmi_update_audio_settings(struct drm_encoder *encoder)
631 return; 490 return;
632 } 491 }
633 492
493 value = RREG32(HDMI0_AUDIO_PACKET_CONTROL + offset);
494 if (value & HDMI0_AUDIO_TEST_EN)
495 WREG32(HDMI0_AUDIO_PACKET_CONTROL + offset,
496 value & ~HDMI0_AUDIO_TEST_EN);
497
498 WREG32_OR(HDMI0_CONTROL + offset,
499 HDMI0_ERROR_ACK);
500
501 WREG32_AND(HDMI0_INFOFRAME_CONTROL0 + offset,
502 ~HDMI0_AUDIO_INFO_SOURCE);
503
634 r600_hdmi_update_audio_infoframe(encoder, buffer, sizeof(buffer)); 504 r600_hdmi_update_audio_infoframe(encoder, buffer, sizeof(buffer));
635 r600_hdmi_audio_workaround(encoder); 505
506 WREG32_OR(HDMI0_INFOFRAME_CONTROL0 + offset,
507 HDMI0_AUDIO_INFO_CONT |
508 HDMI0_AUDIO_INFO_UPDATE);
636} 509}
637 510
638/* 511/*
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 37455f65107f..f94e7a9afe75 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -1029,15 +1029,18 @@
1029#define HDMI0_AUDIO_PACKET_CONTROL 0x7408 1029#define HDMI0_AUDIO_PACKET_CONTROL 0x7408
1030# define HDMI0_AUDIO_SAMPLE_SEND (1 << 0) 1030# define HDMI0_AUDIO_SAMPLE_SEND (1 << 0)
1031# define HDMI0_AUDIO_DELAY_EN(x) (((x) & 3) << 4) 1031# define HDMI0_AUDIO_DELAY_EN(x) (((x) & 3) << 4)
1032# define HDMI0_AUDIO_DELAY_EN_MASK (3 << 4)
1032# define HDMI0_AUDIO_SEND_MAX_PACKETS (1 << 8) 1033# define HDMI0_AUDIO_SEND_MAX_PACKETS (1 << 8)
1033# define HDMI0_AUDIO_TEST_EN (1 << 12) 1034# define HDMI0_AUDIO_TEST_EN (1 << 12)
1034# define HDMI0_AUDIO_PACKETS_PER_LINE(x) (((x) & 0x1f) << 16) 1035# define HDMI0_AUDIO_PACKETS_PER_LINE(x) (((x) & 0x1f) << 16)
1036# define HDMI0_AUDIO_PACKETS_PER_LINE_MASK (0x1f << 16)
1035# define HDMI0_AUDIO_CHANNEL_SWAP (1 << 24) 1037# define HDMI0_AUDIO_CHANNEL_SWAP (1 << 24)
1036# define HDMI0_60958_CS_UPDATE (1 << 26) 1038# define HDMI0_60958_CS_UPDATE (1 << 26)
1037# define HDMI0_AZ_FORMAT_WTRIG_MASK (1 << 28) 1039# define HDMI0_AZ_FORMAT_WTRIG_MASK (1 << 28)
1038# define HDMI0_AZ_FORMAT_WTRIG_ACK (1 << 29) 1040# define HDMI0_AZ_FORMAT_WTRIG_ACK (1 << 29)
1039#define HDMI0_AUDIO_CRC_CONTROL 0x740c 1041#define HDMI0_AUDIO_CRC_CONTROL 0x740c
1040# define HDMI0_AUDIO_CRC_EN (1 << 0) 1042# define HDMI0_AUDIO_CRC_EN (1 << 0)
1043#define DCE3_HDMI0_ACR_PACKET_CONTROL 0x740c
1041#define HDMI0_VBI_PACKET_CONTROL 0x7410 1044#define HDMI0_VBI_PACKET_CONTROL 0x7410
1042# define HDMI0_NULL_SEND (1 << 0) 1045# define HDMI0_NULL_SEND (1 << 0)
1043# define HDMI0_GC_SEND (1 << 4) 1046# define HDMI0_GC_SEND (1 << 4)
@@ -1054,7 +1057,9 @@
1054# define HDMI0_MPEG_INFO_UPDATE (1 << 10) 1057# define HDMI0_MPEG_INFO_UPDATE (1 << 10)
1055#define HDMI0_INFOFRAME_CONTROL1 0x7418 1058#define HDMI0_INFOFRAME_CONTROL1 0x7418
1056# define HDMI0_AVI_INFO_LINE(x) (((x) & 0x3f) << 0) 1059# define HDMI0_AVI_INFO_LINE(x) (((x) & 0x3f) << 0)
1060# define HDMI0_AVI_INFO_LINE_MASK (0x3f << 0)
1057# define HDMI0_AUDIO_INFO_LINE(x) (((x) & 0x3f) << 8) 1061# define HDMI0_AUDIO_INFO_LINE(x) (((x) & 0x3f) << 8)
1062# define HDMI0_AUDIO_INFO_LINE_MASK (0x3f << 8)
1058# define HDMI0_MPEG_INFO_LINE(x) (((x) & 0x3f) << 16) 1063# define HDMI0_MPEG_INFO_LINE(x) (((x) & 0x3f) << 16)
1059#define HDMI0_GENERIC_PACKET_CONTROL 0x741c 1064#define HDMI0_GENERIC_PACKET_CONTROL 0x741c
1060# define HDMI0_GENERIC0_SEND (1 << 0) 1065# define HDMI0_GENERIC0_SEND (1 << 0)
@@ -1063,7 +1068,9 @@
1063# define HDMI0_GENERIC1_SEND (1 << 4) 1068# define HDMI0_GENERIC1_SEND (1 << 4)
1064# define HDMI0_GENERIC1_CONT (1 << 5) 1069# define HDMI0_GENERIC1_CONT (1 << 5)
1065# define HDMI0_GENERIC0_LINE(x) (((x) & 0x3f) << 16) 1070# define HDMI0_GENERIC0_LINE(x) (((x) & 0x3f) << 16)
1071# define HDMI0_GENERIC0_LINE_MASK (0x3f << 16)
1066# define HDMI0_GENERIC1_LINE(x) (((x) & 0x3f) << 24) 1072# define HDMI0_GENERIC1_LINE(x) (((x) & 0x3f) << 24)
1073# define HDMI0_GENERIC1_LINE_MASK (0x3f << 24)
1067#define HDMI0_GC 0x7428 1074#define HDMI0_GC 0x7428
1068# define HDMI0_GC_AVMUTE (1 << 0) 1075# define HDMI0_GC_AVMUTE (1 << 0)
1069#define HDMI0_AVI_INFO0 0x7454 1076#define HDMI0_AVI_INFO0 0x7454
@@ -1119,16 +1126,22 @@
1119#define HDMI0_GENERIC1_6 0x74a8 1126#define HDMI0_GENERIC1_6 0x74a8
1120#define HDMI0_ACR_32_0 0x74ac 1127#define HDMI0_ACR_32_0 0x74ac
1121# define HDMI0_ACR_CTS_32(x) (((x) & 0xfffff) << 12) 1128# define HDMI0_ACR_CTS_32(x) (((x) & 0xfffff) << 12)
1129# define HDMI0_ACR_CTS_32_MASK (0xfffff << 12)
1122#define HDMI0_ACR_32_1 0x74b0 1130#define HDMI0_ACR_32_1 0x74b0
1123# define HDMI0_ACR_N_32(x) (((x) & 0xfffff) << 0) 1131# define HDMI0_ACR_N_32(x) (((x) & 0xfffff) << 0)
1132# define HDMI0_ACR_N_32_MASK (0xfffff << 0)
1124#define HDMI0_ACR_44_0 0x74b4 1133#define HDMI0_ACR_44_0 0x74b4
1125# define HDMI0_ACR_CTS_44(x) (((x) & 0xfffff) << 12) 1134# define HDMI0_ACR_CTS_44(x) (((x) & 0xfffff) << 12)
1135# define HDMI0_ACR_CTS_44_MASK (0xfffff << 12)
1126#define HDMI0_ACR_44_1 0x74b8 1136#define HDMI0_ACR_44_1 0x74b8
1127# define HDMI0_ACR_N_44(x) (((x) & 0xfffff) << 0) 1137# define HDMI0_ACR_N_44(x) (((x) & 0xfffff) << 0)
1138# define HDMI0_ACR_N_44_MASK (0xfffff << 0)
1128#define HDMI0_ACR_48_0 0x74bc 1139#define HDMI0_ACR_48_0 0x74bc
1129# define HDMI0_ACR_CTS_48(x) (((x) & 0xfffff) << 12) 1140# define HDMI0_ACR_CTS_48(x) (((x) & 0xfffff) << 12)
1141# define HDMI0_ACR_CTS_48_MASK (0xfffff << 12)
1130#define HDMI0_ACR_48_1 0x74c0 1142#define HDMI0_ACR_48_1 0x74c0
1131# define HDMI0_ACR_N_48(x) (((x) & 0xfffff) << 0) 1143# define HDMI0_ACR_N_48(x) (((x) & 0xfffff) << 0)
1144# define HDMI0_ACR_N_48_MASK (0xfffff << 0)
1132#define HDMI0_ACR_STATUS_0 0x74c4 1145#define HDMI0_ACR_STATUS_0 0x74c4
1133#define HDMI0_ACR_STATUS_1 0x74c8 1146#define HDMI0_ACR_STATUS_1 0x74c8
1134#define HDMI0_AUDIO_INFO0 0x74cc 1147#define HDMI0_AUDIO_INFO0 0x74cc
@@ -1148,14 +1161,17 @@
1148# define HDMI0_60958_CS_CATEGORY_CODE(x) (((x) & 0xff) << 8) 1161# define HDMI0_60958_CS_CATEGORY_CODE(x) (((x) & 0xff) << 8)
1149# define HDMI0_60958_CS_SOURCE_NUMBER(x) (((x) & 0xf) << 16) 1162# define HDMI0_60958_CS_SOURCE_NUMBER(x) (((x) & 0xf) << 16)
1150# define HDMI0_60958_CS_CHANNEL_NUMBER_L(x) (((x) & 0xf) << 20) 1163# define HDMI0_60958_CS_CHANNEL_NUMBER_L(x) (((x) & 0xf) << 20)
1164# define HDMI0_60958_CS_CHANNEL_NUMBER_L_MASK (0xf << 20)
1151# define HDMI0_60958_CS_SAMPLING_FREQUENCY(x) (((x) & 0xf) << 24) 1165# define HDMI0_60958_CS_SAMPLING_FREQUENCY(x) (((x) & 0xf) << 24)
1152# define HDMI0_60958_CS_CLOCK_ACCURACY(x) (((x) & 3) << 28) 1166# define HDMI0_60958_CS_CLOCK_ACCURACY(x) (((x) & 3) << 28)
1167# define HDMI0_60958_CS_CLOCK_ACCURACY_MASK (3 << 28)
1153#define HDMI0_60958_1 0x74d8 1168#define HDMI0_60958_1 0x74d8
1154# define HDMI0_60958_CS_WORD_LENGTH(x) (((x) & 0xf) << 0) 1169# define HDMI0_60958_CS_WORD_LENGTH(x) (((x) & 0xf) << 0)
1155# define HDMI0_60958_CS_ORIGINAL_SAMPLING_FREQUENCY(x) (((x) & 0xf) << 4) 1170# define HDMI0_60958_CS_ORIGINAL_SAMPLING_FREQUENCY(x) (((x) & 0xf) << 4)
1156# define HDMI0_60958_CS_VALID_L(x) (((x) & 1) << 16) 1171# define HDMI0_60958_CS_VALID_L(x) (((x) & 1) << 16)
1157# define HDMI0_60958_CS_VALID_R(x) (((x) & 1) << 18) 1172# define HDMI0_60958_CS_VALID_R(x) (((x) & 1) << 18)
1158# define HDMI0_60958_CS_CHANNEL_NUMBER_R(x) (((x) & 0xf) << 20) 1173# define HDMI0_60958_CS_CHANNEL_NUMBER_R(x) (((x) & 0xf) << 20)
1174# define HDMI0_60958_CS_CHANNEL_NUMBER_R_MASK (0xf << 20)
1159#define HDMI0_ACR_PACKET_CONTROL 0x74dc 1175#define HDMI0_ACR_PACKET_CONTROL 0x74dc
1160# define HDMI0_ACR_SEND (1 << 0) 1176# define HDMI0_ACR_SEND (1 << 0)
1161# define HDMI0_ACR_CONT (1 << 1) 1177# define HDMI0_ACR_CONT (1 << 1)
@@ -1166,6 +1182,7 @@
1166# define HDMI0_ACR_48 3 1182# define HDMI0_ACR_48 3
1167# define HDMI0_ACR_SOURCE (1 << 8) /* 0 - hw; 1 - cts value */ 1183# define HDMI0_ACR_SOURCE (1 << 8) /* 0 - hw; 1 - cts value */
1168# define HDMI0_ACR_AUTO_SEND (1 << 12) 1184# define HDMI0_ACR_AUTO_SEND (1 << 12)
1185#define DCE3_HDMI0_AUDIO_CRC_CONTROL 0x74dc
1169#define HDMI0_RAMP_CONTROL0 0x74e0 1186#define HDMI0_RAMP_CONTROL0 0x74e0
1170# define HDMI0_RAMP_MAX_COUNT(x) (((x) & 0xffffff) << 0) 1187# define HDMI0_RAMP_MAX_COUNT(x) (((x) & 0xffffff) << 0)
1171#define HDMI0_RAMP_CONTROL1 0x74e4 1188#define HDMI0_RAMP_CONTROL1 0x74e4
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 8149e7cf4303..4b0bbf88d5c0 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -100,6 +100,8 @@ extern int radeon_dpm;
100extern int radeon_aspm; 100extern int radeon_aspm;
101extern int radeon_runtime_pm; 101extern int radeon_runtime_pm;
102extern int radeon_hard_reset; 102extern int radeon_hard_reset;
103extern int radeon_vm_size;
104extern int radeon_vm_block_size;
103 105
104/* 106/*
105 * Copy from radeon_drv.h so we don't have to include both and have conflicting 107 * Copy from radeon_drv.h so we don't have to include both and have conflicting
@@ -676,14 +678,16 @@ void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell);
676 * IRQS. 678 * IRQS.
677 */ 679 */
678 680
679struct radeon_unpin_work { 681struct radeon_flip_work {
680 struct work_struct work; 682 struct work_struct flip_work;
681 struct radeon_device *rdev; 683 struct work_struct unpin_work;
682 int crtc_id; 684 struct radeon_device *rdev;
683 struct radeon_fence *fence; 685 int crtc_id;
686 struct drm_framebuffer *fb;
684 struct drm_pending_vblank_event *event; 687 struct drm_pending_vblank_event *event;
685 struct radeon_bo *old_rbo; 688 struct radeon_bo *old_rbo;
686 u64 new_crtc_base; 689 struct radeon_bo *new_rbo;
690 struct radeon_fence *fence;
687}; 691};
688 692
689struct r500_irq_stat_regs { 693struct r500_irq_stat_regs {
@@ -835,13 +839,8 @@ struct radeon_mec {
835/* maximum number of VMIDs */ 839/* maximum number of VMIDs */
836#define RADEON_NUM_VM 16 840#define RADEON_NUM_VM 16
837 841
838/* defines number of bits in page table versus page directory,
839 * a page is 4KB so we have 12 bits offset, 9 bits in the page
840 * table and the remaining 19 bits are in the page directory */
841#define RADEON_VM_BLOCK_SIZE 9
842
843/* number of entries in page table */ 842/* number of entries in page table */
844#define RADEON_VM_PTE_COUNT (1 << RADEON_VM_BLOCK_SIZE) 843#define RADEON_VM_PTE_COUNT (1 << radeon_vm_block_size)
845 844
846/* PTBs (Page Table Blocks) need to be aligned to 32K */ 845/* PTBs (Page Table Blocks) need to be aligned to 32K */
847#define RADEON_VM_PTB_ALIGN_SIZE 32768 846#define RADEON_VM_PTB_ALIGN_SIZE 32768
@@ -854,6 +853,15 @@ struct radeon_mec {
854#define R600_PTE_READABLE (1 << 5) 853#define R600_PTE_READABLE (1 << 5)
855#define R600_PTE_WRITEABLE (1 << 6) 854#define R600_PTE_WRITEABLE (1 << 6)
856 855
856/* PTE (Page Table Entry) fragment field for different page sizes */
857#define R600_PTE_FRAG_4KB (0 << 7)
858#define R600_PTE_FRAG_64KB (4 << 7)
859#define R600_PTE_FRAG_256KB (6 << 7)
860
861/* flags used for GART page table entries on R600+ */
862#define R600_PTE_GART ( R600_PTE_VALID | R600_PTE_SYSTEM | R600_PTE_SNOOPED \
863 | R600_PTE_READABLE | R600_PTE_WRITEABLE)
864
857struct radeon_vm_pt { 865struct radeon_vm_pt {
858 struct radeon_bo *bo; 866 struct radeon_bo *bo;
859 uint64_t addr; 867 uint64_t addr;
@@ -986,8 +994,8 @@ struct radeon_cs_reloc {
986 struct radeon_bo *robj; 994 struct radeon_bo *robj;
987 struct ttm_validate_buffer tv; 995 struct ttm_validate_buffer tv;
988 uint64_t gpu_offset; 996 uint64_t gpu_offset;
989 unsigned domain; 997 unsigned prefered_domains;
990 unsigned alt_domain; 998 unsigned allowed_domains;
991 uint32_t tiling_flags; 999 uint32_t tiling_flags;
992 uint32_t handle; 1000 uint32_t handle;
993}; 1001};
@@ -1771,7 +1779,8 @@ struct radeon_asic {
1771 /* gart */ 1779 /* gart */
1772 struct { 1780 struct {
1773 void (*tlb_flush)(struct radeon_device *rdev); 1781 void (*tlb_flush)(struct radeon_device *rdev);
1774 int (*set_page)(struct radeon_device *rdev, int i, uint64_t addr); 1782 void (*set_page)(struct radeon_device *rdev, unsigned i,
1783 uint64_t addr);
1775 } gart; 1784 } gart;
1776 struct { 1785 struct {
1777 int (*init)(struct radeon_device *rdev); 1786 int (*init)(struct radeon_device *rdev);
@@ -1883,9 +1892,8 @@ struct radeon_asic {
1883 } dpm; 1892 } dpm;
1884 /* pageflipping */ 1893 /* pageflipping */
1885 struct { 1894 struct {
1886 void (*pre_page_flip)(struct radeon_device *rdev, int crtc); 1895 void (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
1887 u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base); 1896 bool (*page_flip_pending)(struct radeon_device *rdev, int crtc);
1888 void (*post_page_flip)(struct radeon_device *rdev, int crtc);
1889 } pflip; 1897 } pflip;
1890}; 1898};
1891 1899
@@ -1924,6 +1932,7 @@ struct r600_asic {
1924 unsigned tiling_group_size; 1932 unsigned tiling_group_size;
1925 unsigned tile_config; 1933 unsigned tile_config;
1926 unsigned backend_map; 1934 unsigned backend_map;
1935 unsigned active_simds;
1927}; 1936};
1928 1937
1929struct rv770_asic { 1938struct rv770_asic {
@@ -1949,6 +1958,7 @@ struct rv770_asic {
1949 unsigned tiling_group_size; 1958 unsigned tiling_group_size;
1950 unsigned tile_config; 1959 unsigned tile_config;
1951 unsigned backend_map; 1960 unsigned backend_map;
1961 unsigned active_simds;
1952}; 1962};
1953 1963
1954struct evergreen_asic { 1964struct evergreen_asic {
@@ -1975,6 +1985,7 @@ struct evergreen_asic {
1975 unsigned tiling_group_size; 1985 unsigned tiling_group_size;
1976 unsigned tile_config; 1986 unsigned tile_config;
1977 unsigned backend_map; 1987 unsigned backend_map;
1988 unsigned active_simds;
1978}; 1989};
1979 1990
1980struct cayman_asic { 1991struct cayman_asic {
@@ -2013,6 +2024,7 @@ struct cayman_asic {
2013 unsigned multi_gpu_tile_size; 2024 unsigned multi_gpu_tile_size;
2014 2025
2015 unsigned tile_config; 2026 unsigned tile_config;
2027 unsigned active_simds;
2016}; 2028};
2017 2029
2018struct si_asic { 2030struct si_asic {
@@ -2043,6 +2055,7 @@ struct si_asic {
2043 2055
2044 unsigned tile_config; 2056 unsigned tile_config;
2045 uint32_t tile_mode_array[32]; 2057 uint32_t tile_mode_array[32];
2058 uint32_t active_cus;
2046}; 2059};
2047 2060
2048struct cik_asic { 2061struct cik_asic {
@@ -2074,6 +2087,7 @@ struct cik_asic {
2074 unsigned tile_config; 2087 unsigned tile_config;
2075 uint32_t tile_mode_array[32]; 2088 uint32_t tile_mode_array[32];
2076 uint32_t macrotile_mode_array[16]; 2089 uint32_t macrotile_mode_array[16];
2090 uint32_t active_cus;
2077}; 2091};
2078 2092
2079union radeon_asic_config { 2093union radeon_asic_config {
@@ -2745,9 +2759,8 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
2745#define radeon_pm_finish(rdev) (rdev)->asic->pm.finish((rdev)) 2759#define radeon_pm_finish(rdev) (rdev)->asic->pm.finish((rdev))
2746#define radeon_pm_init_profile(rdev) (rdev)->asic->pm.init_profile((rdev)) 2760#define radeon_pm_init_profile(rdev) (rdev)->asic->pm.init_profile((rdev))
2747#define radeon_pm_get_dynpm_state(rdev) (rdev)->asic->pm.get_dynpm_state((rdev)) 2761#define radeon_pm_get_dynpm_state(rdev) (rdev)->asic->pm.get_dynpm_state((rdev))
2748#define radeon_pre_page_flip(rdev, crtc) (rdev)->asic->pflip.pre_page_flip((rdev), (crtc))
2749#define radeon_page_flip(rdev, crtc, base) (rdev)->asic->pflip.page_flip((rdev), (crtc), (base)) 2762#define radeon_page_flip(rdev, crtc, base) (rdev)->asic->pflip.page_flip((rdev), (crtc), (base))
2750#define radeon_post_page_flip(rdev, crtc) (rdev)->asic->pflip.post_page_flip((rdev), (crtc)) 2763#define radeon_page_flip_pending(rdev, crtc) (rdev)->asic->pflip.page_flip_pending((rdev), (crtc))
2751#define radeon_wait_for_vblank(rdev, crtc) (rdev)->asic->display.wait_for_vblank((rdev), (crtc)) 2764#define radeon_wait_for_vblank(rdev, crtc) (rdev)->asic->display.wait_for_vblank((rdev), (crtc))
2752#define radeon_mc_wait_for_idle(rdev) (rdev)->asic->mc_wait_for_idle((rdev)) 2765#define radeon_mc_wait_for_idle(rdev) (rdev)->asic->mc_wait_for_idle((rdev))
2753#define radeon_get_xclk(rdev) (rdev)->asic->get_xclk((rdev)) 2766#define radeon_get_xclk(rdev) (rdev)->asic->get_xclk((rdev))
diff --git a/drivers/gpu/drm/radeon/radeon_agp.c b/drivers/gpu/drm/radeon/radeon_agp.c
index 42433344cb1b..a9297b2c3524 100644
--- a/drivers/gpu/drm/radeon/radeon_agp.c
+++ b/drivers/gpu/drm/radeon/radeon_agp.c
@@ -117,9 +117,6 @@ static struct radeon_agpmode_quirk radeon_agpmode_quirk_list[] = {
117 /* ATI Host Bridge / RV280 [M9+] Needs AGPMode 1 (phoronix forum) */ 117 /* ATI Host Bridge / RV280 [M9+] Needs AGPMode 1 (phoronix forum) */
118 { PCI_VENDOR_ID_ATI, 0xcbb2, PCI_VENDOR_ID_ATI, 0x5c61, 118 { PCI_VENDOR_ID_ATI, 0xcbb2, PCI_VENDOR_ID_ATI, 0x5c61,
119 PCI_VENDOR_ID_SONY, 0x8175, 1}, 119 PCI_VENDOR_ID_SONY, 0x8175, 1},
120 /* HP Host Bridge / R300 [FireGL X1] Needs AGPMode 2 (fdo #7770) */
121 { PCI_VENDOR_ID_HP, 0x122e, PCI_VENDOR_ID_ATI, 0x4e47,
122 PCI_VENDOR_ID_ATI, 0x0152, 2},
123 { 0, 0, 0, 0, 0, 0, 0 }, 120 { 0, 0, 0, 0, 0, 0, 0 },
124}; 121};
125#endif 122#endif
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index e5f0177bea1e..34b9aa9e3c06 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -248,9 +248,8 @@ static struct radeon_asic r100_asic = {
248 .set_clock_gating = &radeon_legacy_set_clock_gating, 248 .set_clock_gating = &radeon_legacy_set_clock_gating,
249 }, 249 },
250 .pflip = { 250 .pflip = {
251 .pre_page_flip = &r100_pre_page_flip,
252 .page_flip = &r100_page_flip, 251 .page_flip = &r100_page_flip,
253 .post_page_flip = &r100_post_page_flip, 252 .page_flip_pending = &r100_page_flip_pending,
254 }, 253 },
255}; 254};
256 255
@@ -315,9 +314,8 @@ static struct radeon_asic r200_asic = {
315 .set_clock_gating = &radeon_legacy_set_clock_gating, 314 .set_clock_gating = &radeon_legacy_set_clock_gating,
316 }, 315 },
317 .pflip = { 316 .pflip = {
318 .pre_page_flip = &r100_pre_page_flip,
319 .page_flip = &r100_page_flip, 317 .page_flip = &r100_page_flip,
320 .post_page_flip = &r100_post_page_flip, 318 .page_flip_pending = &r100_page_flip_pending,
321 }, 319 },
322}; 320};
323 321
@@ -396,9 +394,8 @@ static struct radeon_asic r300_asic = {
396 .set_clock_gating = &radeon_legacy_set_clock_gating, 394 .set_clock_gating = &radeon_legacy_set_clock_gating,
397 }, 395 },
398 .pflip = { 396 .pflip = {
399 .pre_page_flip = &r100_pre_page_flip,
400 .page_flip = &r100_page_flip, 397 .page_flip = &r100_page_flip,
401 .post_page_flip = &r100_post_page_flip, 398 .page_flip_pending = &r100_page_flip_pending,
402 }, 399 },
403}; 400};
404 401
@@ -463,9 +460,8 @@ static struct radeon_asic r300_asic_pcie = {
463 .set_clock_gating = &radeon_legacy_set_clock_gating, 460 .set_clock_gating = &radeon_legacy_set_clock_gating,
464 }, 461 },
465 .pflip = { 462 .pflip = {
466 .pre_page_flip = &r100_pre_page_flip,
467 .page_flip = &r100_page_flip, 463 .page_flip = &r100_page_flip,
468 .post_page_flip = &r100_post_page_flip, 464 .page_flip_pending = &r100_page_flip_pending,
469 }, 465 },
470}; 466};
471 467
@@ -530,9 +526,8 @@ static struct radeon_asic r420_asic = {
530 .set_clock_gating = &radeon_atom_set_clock_gating, 526 .set_clock_gating = &radeon_atom_set_clock_gating,
531 }, 527 },
532 .pflip = { 528 .pflip = {
533 .pre_page_flip = &r100_pre_page_flip,
534 .page_flip = &r100_page_flip, 529 .page_flip = &r100_page_flip,
535 .post_page_flip = &r100_post_page_flip, 530 .page_flip_pending = &r100_page_flip_pending,
536 }, 531 },
537}; 532};
538 533
@@ -597,9 +592,8 @@ static struct radeon_asic rs400_asic = {
597 .set_clock_gating = &radeon_legacy_set_clock_gating, 592 .set_clock_gating = &radeon_legacy_set_clock_gating,
598 }, 593 },
599 .pflip = { 594 .pflip = {
600 .pre_page_flip = &r100_pre_page_flip,
601 .page_flip = &r100_page_flip, 595 .page_flip = &r100_page_flip,
602 .post_page_flip = &r100_post_page_flip, 596 .page_flip_pending = &r100_page_flip_pending,
603 }, 597 },
604}; 598};
605 599
@@ -666,9 +660,8 @@ static struct radeon_asic rs600_asic = {
666 .set_clock_gating = &radeon_atom_set_clock_gating, 660 .set_clock_gating = &radeon_atom_set_clock_gating,
667 }, 661 },
668 .pflip = { 662 .pflip = {
669 .pre_page_flip = &rs600_pre_page_flip,
670 .page_flip = &rs600_page_flip, 663 .page_flip = &rs600_page_flip,
671 .post_page_flip = &rs600_post_page_flip, 664 .page_flip_pending = &rs600_page_flip_pending,
672 }, 665 },
673}; 666};
674 667
@@ -735,9 +728,8 @@ static struct radeon_asic rs690_asic = {
735 .set_clock_gating = &radeon_atom_set_clock_gating, 728 .set_clock_gating = &radeon_atom_set_clock_gating,
736 }, 729 },
737 .pflip = { 730 .pflip = {
738 .pre_page_flip = &rs600_pre_page_flip,
739 .page_flip = &rs600_page_flip, 731 .page_flip = &rs600_page_flip,
740 .post_page_flip = &rs600_post_page_flip, 732 .page_flip_pending = &rs600_page_flip_pending,
741 }, 733 },
742}; 734};
743 735
@@ -802,9 +794,8 @@ static struct radeon_asic rv515_asic = {
802 .set_clock_gating = &radeon_atom_set_clock_gating, 794 .set_clock_gating = &radeon_atom_set_clock_gating,
803 }, 795 },
804 .pflip = { 796 .pflip = {
805 .pre_page_flip = &rs600_pre_page_flip,
806 .page_flip = &rs600_page_flip, 797 .page_flip = &rs600_page_flip,
807 .post_page_flip = &rs600_post_page_flip, 798 .page_flip_pending = &rs600_page_flip_pending,
808 }, 799 },
809}; 800};
810 801
@@ -869,9 +860,8 @@ static struct radeon_asic r520_asic = {
869 .set_clock_gating = &radeon_atom_set_clock_gating, 860 .set_clock_gating = &radeon_atom_set_clock_gating,
870 }, 861 },
871 .pflip = { 862 .pflip = {
872 .pre_page_flip = &rs600_pre_page_flip,
873 .page_flip = &rs600_page_flip, 863 .page_flip = &rs600_page_flip,
874 .post_page_flip = &rs600_post_page_flip, 864 .page_flip_pending = &rs600_page_flip_pending,
875 }, 865 },
876}; 866};
877 867
@@ -968,9 +958,8 @@ static struct radeon_asic r600_asic = {
968 .get_temperature = &rv6xx_get_temp, 958 .get_temperature = &rv6xx_get_temp,
969 }, 959 },
970 .pflip = { 960 .pflip = {
971 .pre_page_flip = &rs600_pre_page_flip,
972 .page_flip = &rs600_page_flip, 961 .page_flip = &rs600_page_flip,
973 .post_page_flip = &rs600_post_page_flip, 962 .page_flip_pending = &rs600_page_flip_pending,
974 }, 963 },
975}; 964};
976 965
@@ -1059,9 +1048,8 @@ static struct radeon_asic rv6xx_asic = {
1059 .force_performance_level = &rv6xx_dpm_force_performance_level, 1048 .force_performance_level = &rv6xx_dpm_force_performance_level,
1060 }, 1049 },
1061 .pflip = { 1050 .pflip = {
1062 .pre_page_flip = &rs600_pre_page_flip,
1063 .page_flip = &rs600_page_flip, 1051 .page_flip = &rs600_page_flip,
1064 .post_page_flip = &rs600_post_page_flip, 1052 .page_flip_pending = &rs600_page_flip_pending,
1065 }, 1053 },
1066}; 1054};
1067 1055
@@ -1150,9 +1138,8 @@ static struct radeon_asic rs780_asic = {
1150 .force_performance_level = &rs780_dpm_force_performance_level, 1138 .force_performance_level = &rs780_dpm_force_performance_level,
1151 }, 1139 },
1152 .pflip = { 1140 .pflip = {
1153 .pre_page_flip = &rs600_pre_page_flip,
1154 .page_flip = &rs600_page_flip, 1141 .page_flip = &rs600_page_flip,
1155 .post_page_flip = &rs600_post_page_flip, 1142 .page_flip_pending = &rs600_page_flip_pending,
1156 }, 1143 },
1157}; 1144};
1158 1145
@@ -1201,7 +1188,7 @@ static struct radeon_asic rv770_asic = {
1201 .set_backlight_level = &atombios_set_backlight_level, 1188 .set_backlight_level = &atombios_set_backlight_level,
1202 .get_backlight_level = &atombios_get_backlight_level, 1189 .get_backlight_level = &atombios_get_backlight_level,
1203 .hdmi_enable = &r600_hdmi_enable, 1190 .hdmi_enable = &r600_hdmi_enable,
1204 .hdmi_setmode = &r600_hdmi_setmode, 1191 .hdmi_setmode = &dce3_1_hdmi_setmode,
1205 }, 1192 },
1206 .copy = { 1193 .copy = {
1207 .blit = &r600_copy_cpdma, 1194 .blit = &r600_copy_cpdma,
@@ -1256,9 +1243,8 @@ static struct radeon_asic rv770_asic = {
1256 .vblank_too_short = &rv770_dpm_vblank_too_short, 1243 .vblank_too_short = &rv770_dpm_vblank_too_short,
1257 }, 1244 },
1258 .pflip = { 1245 .pflip = {
1259 .pre_page_flip = &rs600_pre_page_flip,
1260 .page_flip = &rv770_page_flip, 1246 .page_flip = &rv770_page_flip,
1261 .post_page_flip = &rs600_post_page_flip, 1247 .page_flip_pending = &rv770_page_flip_pending,
1262 }, 1248 },
1263}; 1249};
1264 1250
@@ -1375,9 +1361,8 @@ static struct radeon_asic evergreen_asic = {
1375 .vblank_too_short = &cypress_dpm_vblank_too_short, 1361 .vblank_too_short = &cypress_dpm_vblank_too_short,
1376 }, 1362 },
1377 .pflip = { 1363 .pflip = {
1378 .pre_page_flip = &evergreen_pre_page_flip,
1379 .page_flip = &evergreen_page_flip, 1364 .page_flip = &evergreen_page_flip,
1380 .post_page_flip = &evergreen_post_page_flip, 1365 .page_flip_pending = &evergreen_page_flip_pending,
1381 }, 1366 },
1382}; 1367};
1383 1368
@@ -1467,9 +1452,8 @@ static struct radeon_asic sumo_asic = {
1467 .force_performance_level = &sumo_dpm_force_performance_level, 1452 .force_performance_level = &sumo_dpm_force_performance_level,
1468 }, 1453 },
1469 .pflip = { 1454 .pflip = {
1470 .pre_page_flip = &evergreen_pre_page_flip,
1471 .page_flip = &evergreen_page_flip, 1455 .page_flip = &evergreen_page_flip,
1472 .post_page_flip = &evergreen_post_page_flip, 1456 .page_flip_pending = &evergreen_page_flip_pending,
1473 }, 1457 },
1474}; 1458};
1475 1459
@@ -1560,9 +1544,8 @@ static struct radeon_asic btc_asic = {
1560 .vblank_too_short = &btc_dpm_vblank_too_short, 1544 .vblank_too_short = &btc_dpm_vblank_too_short,
1561 }, 1545 },
1562 .pflip = { 1546 .pflip = {
1563 .pre_page_flip = &evergreen_pre_page_flip,
1564 .page_flip = &evergreen_page_flip, 1547 .page_flip = &evergreen_page_flip,
1565 .post_page_flip = &evergreen_post_page_flip, 1548 .page_flip_pending = &evergreen_page_flip_pending,
1566 }, 1549 },
1567}; 1550};
1568 1551
@@ -1704,9 +1687,8 @@ static struct radeon_asic cayman_asic = {
1704 .vblank_too_short = &ni_dpm_vblank_too_short, 1687 .vblank_too_short = &ni_dpm_vblank_too_short,
1705 }, 1688 },
1706 .pflip = { 1689 .pflip = {
1707 .pre_page_flip = &evergreen_pre_page_flip,
1708 .page_flip = &evergreen_page_flip, 1690 .page_flip = &evergreen_page_flip,
1709 .post_page_flip = &evergreen_post_page_flip, 1691 .page_flip_pending = &evergreen_page_flip_pending,
1710 }, 1692 },
1711}; 1693};
1712 1694
@@ -1805,9 +1787,8 @@ static struct radeon_asic trinity_asic = {
1805 .enable_bapm = &trinity_dpm_enable_bapm, 1787 .enable_bapm = &trinity_dpm_enable_bapm,
1806 }, 1788 },
1807 .pflip = { 1789 .pflip = {
1808 .pre_page_flip = &evergreen_pre_page_flip,
1809 .page_flip = &evergreen_page_flip, 1790 .page_flip = &evergreen_page_flip,
1810 .post_page_flip = &evergreen_post_page_flip, 1791 .page_flip_pending = &evergreen_page_flip_pending,
1811 }, 1792 },
1812}; 1793};
1813 1794
@@ -1936,9 +1917,8 @@ static struct radeon_asic si_asic = {
1936 .vblank_too_short = &ni_dpm_vblank_too_short, 1917 .vblank_too_short = &ni_dpm_vblank_too_short,
1937 }, 1918 },
1938 .pflip = { 1919 .pflip = {
1939 .pre_page_flip = &evergreen_pre_page_flip,
1940 .page_flip = &evergreen_page_flip, 1920 .page_flip = &evergreen_page_flip,
1941 .post_page_flip = &evergreen_post_page_flip, 1921 .page_flip_pending = &evergreen_page_flip_pending,
1942 }, 1922 },
1943}; 1923};
1944 1924
@@ -2049,8 +2029,8 @@ static struct radeon_asic ci_asic = {
2049 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, 2029 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
2050 .dma = &cik_copy_dma, 2030 .dma = &cik_copy_dma,
2051 .dma_ring_index = R600_RING_TYPE_DMA_INDEX, 2031 .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
2052 .copy = &cik_copy_cpdma, 2032 .copy = &cik_copy_dma,
2053 .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX, 2033 .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
2054 }, 2034 },
2055 .surface = { 2035 .surface = {
2056 .set_reg = r600_set_surface_reg, 2036 .set_reg = r600_set_surface_reg,
@@ -2099,9 +2079,8 @@ static struct radeon_asic ci_asic = {
2099 .powergate_uvd = &ci_dpm_powergate_uvd, 2079 .powergate_uvd = &ci_dpm_powergate_uvd,
2100 }, 2080 },
2101 .pflip = { 2081 .pflip = {
2102 .pre_page_flip = &evergreen_pre_page_flip,
2103 .page_flip = &evergreen_page_flip, 2082 .page_flip = &evergreen_page_flip,
2104 .post_page_flip = &evergreen_post_page_flip, 2083 .page_flip_pending = &evergreen_page_flip_pending,
2105 }, 2084 },
2106}; 2085};
2107 2086
@@ -2204,9 +2183,8 @@ static struct radeon_asic kv_asic = {
2204 .enable_bapm = &kv_dpm_enable_bapm, 2183 .enable_bapm = &kv_dpm_enable_bapm,
2205 }, 2184 },
2206 .pflip = { 2185 .pflip = {
2207 .pre_page_flip = &evergreen_pre_page_flip,
2208 .page_flip = &evergreen_page_flip, 2186 .page_flip = &evergreen_page_flip,
2209 .post_page_flip = &evergreen_post_page_flip, 2187 .page_flip_pending = &evergreen_page_flip_pending,
2210 }, 2188 },
2211}; 2189};
2212 2190
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 3d55a3a39e82..01e7c0ad8f01 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -67,7 +67,8 @@ bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
67int r100_asic_reset(struct radeon_device *rdev); 67int r100_asic_reset(struct radeon_device *rdev);
68u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc); 68u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc);
69void r100_pci_gart_tlb_flush(struct radeon_device *rdev); 69void r100_pci_gart_tlb_flush(struct radeon_device *rdev);
70int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); 70void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i,
71 uint64_t addr);
71void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring); 72void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring);
72int r100_irq_set(struct radeon_device *rdev); 73int r100_irq_set(struct radeon_device *rdev);
73int r100_irq_process(struct radeon_device *rdev); 74int r100_irq_process(struct radeon_device *rdev);
@@ -135,9 +136,9 @@ extern void r100_pm_prepare(struct radeon_device *rdev);
135extern void r100_pm_finish(struct radeon_device *rdev); 136extern void r100_pm_finish(struct radeon_device *rdev);
136extern void r100_pm_init_profile(struct radeon_device *rdev); 137extern void r100_pm_init_profile(struct radeon_device *rdev);
137extern void r100_pm_get_dynpm_state(struct radeon_device *rdev); 138extern void r100_pm_get_dynpm_state(struct radeon_device *rdev);
138extern void r100_pre_page_flip(struct radeon_device *rdev, int crtc); 139extern void r100_page_flip(struct radeon_device *rdev, int crtc,
139extern u32 r100_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base); 140 u64 crtc_base);
140extern void r100_post_page_flip(struct radeon_device *rdev, int crtc); 141extern bool r100_page_flip_pending(struct radeon_device *rdev, int crtc);
141extern void r100_wait_for_vblank(struct radeon_device *rdev, int crtc); 142extern void r100_wait_for_vblank(struct radeon_device *rdev, int crtc);
142extern int r100_mc_wait_for_idle(struct radeon_device *rdev); 143extern int r100_mc_wait_for_idle(struct radeon_device *rdev);
143 144
@@ -171,7 +172,8 @@ extern void r300_fence_ring_emit(struct radeon_device *rdev,
171 struct radeon_fence *fence); 172 struct radeon_fence *fence);
172extern int r300_cs_parse(struct radeon_cs_parser *p); 173extern int r300_cs_parse(struct radeon_cs_parser *p);
173extern void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev); 174extern void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev);
174extern int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); 175extern void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i,
176 uint64_t addr);
175extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes); 177extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes);
176extern int rv370_get_pcie_lanes(struct radeon_device *rdev); 178extern int rv370_get_pcie_lanes(struct radeon_device *rdev);
177extern void r300_set_reg_safe(struct radeon_device *rdev); 179extern void r300_set_reg_safe(struct radeon_device *rdev);
@@ -206,7 +208,8 @@ extern void rs400_fini(struct radeon_device *rdev);
206extern int rs400_suspend(struct radeon_device *rdev); 208extern int rs400_suspend(struct radeon_device *rdev);
207extern int rs400_resume(struct radeon_device *rdev); 209extern int rs400_resume(struct radeon_device *rdev);
208void rs400_gart_tlb_flush(struct radeon_device *rdev); 210void rs400_gart_tlb_flush(struct radeon_device *rdev);
209int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); 211void rs400_gart_set_page(struct radeon_device *rdev, unsigned i,
212 uint64_t addr);
210uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg); 213uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg);
211void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); 214void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
212int rs400_gart_init(struct radeon_device *rdev); 215int rs400_gart_init(struct radeon_device *rdev);
@@ -229,7 +232,8 @@ int rs600_irq_process(struct radeon_device *rdev);
229void rs600_irq_disable(struct radeon_device *rdev); 232void rs600_irq_disable(struct radeon_device *rdev);
230u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc); 233u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc);
231void rs600_gart_tlb_flush(struct radeon_device *rdev); 234void rs600_gart_tlb_flush(struct radeon_device *rdev);
232int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); 235void rs600_gart_set_page(struct radeon_device *rdev, unsigned i,
236 uint64_t addr);
233uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg); 237uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg);
234void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); 238void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
235void rs600_bandwidth_update(struct radeon_device *rdev); 239void rs600_bandwidth_update(struct radeon_device *rdev);
@@ -241,9 +245,9 @@ void rs600_hpd_set_polarity(struct radeon_device *rdev,
241extern void rs600_pm_misc(struct radeon_device *rdev); 245extern void rs600_pm_misc(struct radeon_device *rdev);
242extern void rs600_pm_prepare(struct radeon_device *rdev); 246extern void rs600_pm_prepare(struct radeon_device *rdev);
243extern void rs600_pm_finish(struct radeon_device *rdev); 247extern void rs600_pm_finish(struct radeon_device *rdev);
244extern void rs600_pre_page_flip(struct radeon_device *rdev, int crtc); 248extern void rs600_page_flip(struct radeon_device *rdev, int crtc,
245extern u32 rs600_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base); 249 u64 crtc_base);
246extern void rs600_post_page_flip(struct radeon_device *rdev, int crtc); 250extern bool rs600_page_flip_pending(struct radeon_device *rdev, int crtc);
247void rs600_set_safe_registers(struct radeon_device *rdev); 251void rs600_set_safe_registers(struct radeon_device *rdev);
248extern void avivo_wait_for_vblank(struct radeon_device *rdev, int crtc); 252extern void avivo_wait_for_vblank(struct radeon_device *rdev, int crtc);
249extern int rs600_mc_wait_for_idle(struct radeon_device *rdev); 253extern int rs600_mc_wait_for_idle(struct radeon_device *rdev);
@@ -387,6 +391,11 @@ void r600_rlc_stop(struct radeon_device *rdev);
387int r600_audio_init(struct radeon_device *rdev); 391int r600_audio_init(struct radeon_device *rdev);
388struct r600_audio_pin r600_audio_status(struct radeon_device *rdev); 392struct r600_audio_pin r600_audio_status(struct radeon_device *rdev);
389void r600_audio_fini(struct radeon_device *rdev); 393void r600_audio_fini(struct radeon_device *rdev);
394void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock);
395void r600_hdmi_update_avi_infoframe(struct drm_encoder *encoder, void *buffer,
396 size_t size);
397void r600_hdmi_update_ACR(struct drm_encoder *encoder, uint32_t clock);
398void r600_hdmi_audio_workaround(struct drm_encoder *encoder);
390int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder); 399int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder);
391void r600_hdmi_update_audio_settings(struct drm_encoder *encoder); 400void r600_hdmi_update_audio_settings(struct drm_encoder *encoder);
392void r600_hdmi_enable(struct drm_encoder *encoder, bool enable); 401void r600_hdmi_enable(struct drm_encoder *encoder, bool enable);
@@ -447,7 +456,8 @@ void rv770_fini(struct radeon_device *rdev);
447int rv770_suspend(struct radeon_device *rdev); 456int rv770_suspend(struct radeon_device *rdev);
448int rv770_resume(struct radeon_device *rdev); 457int rv770_resume(struct radeon_device *rdev);
449void rv770_pm_misc(struct radeon_device *rdev); 458void rv770_pm_misc(struct radeon_device *rdev);
450u32 rv770_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base); 459void rv770_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
460bool rv770_page_flip_pending(struct radeon_device *rdev, int crtc);
451void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc); 461void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
452void r700_cp_stop(struct radeon_device *rdev); 462void r700_cp_stop(struct radeon_device *rdev);
453void r700_cp_fini(struct radeon_device *rdev); 463void r700_cp_fini(struct radeon_device *rdev);
@@ -458,6 +468,8 @@ int rv770_copy_dma(struct radeon_device *rdev,
458u32 rv770_get_xclk(struct radeon_device *rdev); 468u32 rv770_get_xclk(struct radeon_device *rdev);
459int rv770_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk); 469int rv770_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
460int rv770_get_temp(struct radeon_device *rdev); 470int rv770_get_temp(struct radeon_device *rdev);
471/* hdmi */
472void dce3_1_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
461/* rv7xx pm */ 473/* rv7xx pm */
462int rv770_dpm_init(struct radeon_device *rdev); 474int rv770_dpm_init(struct radeon_device *rdev);
463int rv770_dpm_enable(struct radeon_device *rdev); 475int rv770_dpm_enable(struct radeon_device *rdev);
@@ -513,9 +525,9 @@ extern void sumo_pm_init_profile(struct radeon_device *rdev);
513extern void btc_pm_init_profile(struct radeon_device *rdev); 525extern void btc_pm_init_profile(struct radeon_device *rdev);
514int sumo_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk); 526int sumo_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
515int evergreen_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk); 527int evergreen_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
516extern void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc); 528extern void evergreen_page_flip(struct radeon_device *rdev, int crtc,
517extern u32 evergreen_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base); 529 u64 crtc_base);
518extern void evergreen_post_page_flip(struct radeon_device *rdev, int crtc); 530extern bool evergreen_page_flip_pending(struct radeon_device *rdev, int crtc);
519extern void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc); 531extern void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc);
520void evergreen_disable_interrupt_state(struct radeon_device *rdev); 532void evergreen_disable_interrupt_state(struct radeon_device *rdev);
521int evergreen_mc_wait_for_idle(struct radeon_device *rdev); 533int evergreen_mc_wait_for_idle(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
index 9ab30976287d..6a03624fadaa 100644
--- a/drivers/gpu/drm/radeon/radeon_bios.c
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
@@ -626,7 +626,7 @@ static bool radeon_acpi_vfct_bios(struct radeon_device *rdev)
626 vhdr->DeviceID != rdev->pdev->device) { 626 vhdr->DeviceID != rdev->pdev->device) {
627 DRM_INFO("ACPI VFCT table is not for this card\n"); 627 DRM_INFO("ACPI VFCT table is not for this card\n");
628 goto out_unmap; 628 goto out_unmap;
629 }; 629 }
630 630
631 if (vfct->VBIOSImageOffset + sizeof(VFCT_IMAGE_HEADER) + vhdr->ImageLength > tbl_size) { 631 if (vfct->VBIOSImageOffset + sizeof(VFCT_IMAGE_HEADER) + vhdr->ImageLength > tbl_size) {
632 DRM_ERROR("ACPI VFCT image truncated\n"); 632 DRM_ERROR("ACPI VFCT image truncated\n");
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index ea50e0ae7bf7..933c5c39654d 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -48,6 +48,7 @@ void radeon_connector_hotplug(struct drm_connector *connector)
48 radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd); 48 radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
49 49
50 /* if the connector is already off, don't turn it back on */ 50 /* if the connector is already off, don't turn it back on */
51 /* FIXME: This access isn't protected by any locks. */
51 if (connector->dpms != DRM_MODE_DPMS_ON) 52 if (connector->dpms != DRM_MODE_DPMS_ON)
52 return; 53 return;
53 54
@@ -100,6 +101,7 @@ int radeon_get_monitor_bpc(struct drm_connector *connector)
100 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 101 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
101 struct radeon_connector_atom_dig *dig_connector; 102 struct radeon_connector_atom_dig *dig_connector;
102 int bpc = 8; 103 int bpc = 8;
104 int mode_clock, max_tmds_clock;
103 105
104 switch (connector->connector_type) { 106 switch (connector->connector_type) {
105 case DRM_MODE_CONNECTOR_DVII: 107 case DRM_MODE_CONNECTOR_DVII:
@@ -145,6 +147,61 @@ int radeon_get_monitor_bpc(struct drm_connector *connector)
145 } 147 }
146 break; 148 break;
147 } 149 }
150
151 if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
152 /* hdmi deep color only implemented on DCE4+ */
153 if ((bpc > 8) && !ASIC_IS_DCE4(rdev)) {
154 DRM_DEBUG("%s: HDMI deep color %d bpc unsupported. Using 8 bpc.\n",
155 connector->name, bpc);
156 bpc = 8;
157 }
158
159 /*
160 * Pre DCE-8 hw can't handle > 12 bpc, and more than 12 bpc doesn't make
161 * much sense without support for > 12 bpc framebuffers. RGB 4:4:4 at
162 * 12 bpc is always supported on hdmi deep color sinks, as this is
163 * required by the HDMI-1.3 spec. Clamp to a safe 12 bpc maximum.
164 */
165 if (bpc > 12) {
166 DRM_DEBUG("%s: HDMI deep color %d bpc unsupported. Using 12 bpc.\n",
167 connector->name, bpc);
168 bpc = 12;
169 }
170
171 /* Any defined maximum tmds clock limit we must not exceed? */
172 if (connector->max_tmds_clock > 0) {
173 /* mode_clock is clock in kHz for mode to be modeset on this connector */
174 mode_clock = radeon_connector->pixelclock_for_modeset;
175
176 /* Maximum allowable input clock in kHz */
177 max_tmds_clock = connector->max_tmds_clock * 1000;
178
179 DRM_DEBUG("%s: hdmi mode dotclock %d kHz, max tmds input clock %d kHz.\n",
180 connector->name, mode_clock, max_tmds_clock);
181
182 /* Check if bpc is within clock limit. Try to degrade gracefully otherwise */
183 if ((bpc == 12) && (mode_clock * 3/2 > max_tmds_clock)) {
184 if ((connector->display_info.edid_hdmi_dc_modes & DRM_EDID_HDMI_DC_30) &&
185 (mode_clock * 5/4 <= max_tmds_clock))
186 bpc = 10;
187 else
188 bpc = 8;
189
190 DRM_DEBUG("%s: HDMI deep color 12 bpc exceeds max tmds clock. Using %d bpc.\n",
191 connector->name, bpc);
192 }
193
194 if ((bpc == 10) && (mode_clock * 5/4 > max_tmds_clock)) {
195 bpc = 8;
196 DRM_DEBUG("%s: HDMI deep color 10 bpc exceeds max tmds clock. Using %d bpc.\n",
197 connector->name, bpc);
198 }
199 }
200 }
201
202 DRM_DEBUG("%s: Display bpc=%d, returned bpc=%d\n",
203 connector->name, connector->display_info.bpc, bpc);
204
148 return bpc; 205 return bpc;
149} 206}
150 207
@@ -260,13 +317,17 @@ radeon_connector_analog_encoder_conflict_solve(struct drm_connector *connector,
260 continue; 317 continue;
261 318
262 if (priority == true) { 319 if (priority == true) {
263 DRM_DEBUG_KMS("1: conflicting encoders switching off %s\n", drm_get_connector_name(conflict)); 320 DRM_DEBUG_KMS("1: conflicting encoders switching off %s\n",
264 DRM_DEBUG_KMS("in favor of %s\n", drm_get_connector_name(connector)); 321 conflict->name);
322 DRM_DEBUG_KMS("in favor of %s\n",
323 connector->name);
265 conflict->status = connector_status_disconnected; 324 conflict->status = connector_status_disconnected;
266 radeon_connector_update_scratch_regs(conflict, connector_status_disconnected); 325 radeon_connector_update_scratch_regs(conflict, connector_status_disconnected);
267 } else { 326 } else {
268 DRM_DEBUG_KMS("2: conflicting encoders switching off %s\n", drm_get_connector_name(connector)); 327 DRM_DEBUG_KMS("2: conflicting encoders switching off %s\n",
269 DRM_DEBUG_KMS("in favor of %s\n", drm_get_connector_name(conflict)); 328 connector->name);
329 DRM_DEBUG_KMS("in favor of %s\n",
330 conflict->name);
270 current_status = connector_status_disconnected; 331 current_status = connector_status_disconnected;
271 } 332 }
272 break; 333 break;
@@ -787,7 +848,7 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
787 848
788 if (!radeon_connector->edid) { 849 if (!radeon_connector->edid) {
789 DRM_ERROR("%s: probed a monitor but no|invalid EDID\n", 850 DRM_ERROR("%s: probed a monitor but no|invalid EDID\n",
790 drm_get_connector_name(connector)); 851 connector->name);
791 ret = connector_status_connected; 852 ret = connector_status_connected;
792 } else { 853 } else {
793 radeon_connector->use_digital = !!(radeon_connector->edid->input & DRM_EDID_INPUT_DIGITAL); 854 radeon_connector->use_digital = !!(radeon_connector->edid->input & DRM_EDID_INPUT_DIGITAL);
@@ -1010,12 +1071,13 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
1010 1071
1011 if (!radeon_connector->edid) { 1072 if (!radeon_connector->edid) {
1012 DRM_ERROR("%s: probed a monitor but no|invalid EDID\n", 1073 DRM_ERROR("%s: probed a monitor but no|invalid EDID\n",
1013 drm_get_connector_name(connector)); 1074 connector->name);
1014 /* rs690 seems to have a problem with connectors not existing and always 1075 /* rs690 seems to have a problem with connectors not existing and always
1015 * return a block of 0's. If we see this just stop polling on this output */ 1076 * return a block of 0's. If we see this just stop polling on this output */
1016 if ((rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) && radeon_connector->base.null_edid_counter) { 1077 if ((rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) && radeon_connector->base.null_edid_counter) {
1017 ret = connector_status_disconnected; 1078 ret = connector_status_disconnected;
1018 DRM_ERROR("%s: detected RS690 floating bus bug, stopping ddc detect\n", drm_get_connector_name(connector)); 1079 DRM_ERROR("%s: detected RS690 floating bus bug, stopping ddc detect\n",
1080 connector->name);
1019 radeon_connector->ddc_bus = NULL; 1081 radeon_connector->ddc_bus = NULL;
1020 } else { 1082 } else {
1021 ret = connector_status_connected; 1083 ret = connector_status_connected;
@@ -1387,7 +1449,7 @@ bool radeon_connector_is_dp12_capable(struct drm_connector *connector)
1387 struct radeon_device *rdev = dev->dev_private; 1449 struct radeon_device *rdev = dev->dev_private;
1388 1450
1389 if (ASIC_IS_DCE5(rdev) && 1451 if (ASIC_IS_DCE5(rdev) &&
1390 (rdev->clock.dp_extclk >= 53900) && 1452 (rdev->clock.default_dispclk >= 53900) &&
1391 radeon_connector_encoder_is_hbr2(connector)) { 1453 radeon_connector_encoder_is_hbr2(connector)) {
1392 return true; 1454 return true;
1393 } 1455 }
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 41ecf8a60611..71a143461478 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -140,10 +140,10 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
140 if (p->ring == R600_RING_TYPE_UVD_INDEX && 140 if (p->ring == R600_RING_TYPE_UVD_INDEX &&
141 (i == 0 || drm_pci_device_is_agp(p->rdev->ddev))) { 141 (i == 0 || drm_pci_device_is_agp(p->rdev->ddev))) {
142 /* TODO: is this still needed for NI+ ? */ 142 /* TODO: is this still needed for NI+ ? */
143 p->relocs[i].domain = 143 p->relocs[i].prefered_domains =
144 RADEON_GEM_DOMAIN_VRAM; 144 RADEON_GEM_DOMAIN_VRAM;
145 145
146 p->relocs[i].alt_domain = 146 p->relocs[i].allowed_domains =
147 RADEON_GEM_DOMAIN_VRAM; 147 RADEON_GEM_DOMAIN_VRAM;
148 148
149 /* prioritize this over any other relocation */ 149 /* prioritize this over any other relocation */
@@ -158,10 +158,10 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
158 return -EINVAL; 158 return -EINVAL;
159 } 159 }
160 160
161 p->relocs[i].domain = domain; 161 p->relocs[i].prefered_domains = domain;
162 if (domain == RADEON_GEM_DOMAIN_VRAM) 162 if (domain == RADEON_GEM_DOMAIN_VRAM)
163 domain |= RADEON_GEM_DOMAIN_GTT; 163 domain |= RADEON_GEM_DOMAIN_GTT;
164 p->relocs[i].alt_domain = domain; 164 p->relocs[i].allowed_domains = domain;
165 } 165 }
166 166
167 p->relocs[i].tv.bo = &p->relocs[i].robj->tbo; 167 p->relocs[i].tv.bo = &p->relocs[i].robj->tbo;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 2cd144c378d6..03686fab842d 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1052,6 +1052,43 @@ static void radeon_check_arguments(struct radeon_device *rdev)
1052 radeon_agpmode = 0; 1052 radeon_agpmode = 0;
1053 break; 1053 break;
1054 } 1054 }
1055
1056 if (!radeon_check_pot_argument(radeon_vm_size)) {
1057 dev_warn(rdev->dev, "VM size (%d) must be a power of 2\n",
1058 radeon_vm_size);
1059 radeon_vm_size = 4096;
1060 }
1061
1062 if (radeon_vm_size < 4) {
1063 dev_warn(rdev->dev, "VM size (%d) to small, min is 4MB\n",
1064 radeon_vm_size);
1065 radeon_vm_size = 4096;
1066 }
1067
1068 /*
1069 * Max GPUVM size for Cayman, SI and CI are 40 bits.
1070 */
1071 if (radeon_vm_size > 1024*1024) {
1072 dev_warn(rdev->dev, "VM size (%d) to large, max is 1TB\n",
1073 radeon_vm_size);
1074 radeon_vm_size = 4096;
1075 }
1076
1077 /* defines number of bits in page table versus page directory,
1078 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1079 * page table and the remaining bits are in the page directory */
1080 if (radeon_vm_block_size < 9) {
1081 dev_warn(rdev->dev, "VM page table size (%d) to small\n",
1082 radeon_vm_block_size);
1083 radeon_vm_block_size = 9;
1084 }
1085
1086 if (radeon_vm_block_size > 24 ||
1087 radeon_vm_size < (1ull << radeon_vm_block_size)) {
1088 dev_warn(rdev->dev, "VM page table size (%d) to large\n",
1089 radeon_vm_block_size);
1090 radeon_vm_block_size = 9;
1091 }
1055} 1092}
1056 1093
1057/** 1094/**
@@ -1126,12 +1163,13 @@ static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero
1126static bool radeon_switcheroo_can_switch(struct pci_dev *pdev) 1163static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
1127{ 1164{
1128 struct drm_device *dev = pci_get_drvdata(pdev); 1165 struct drm_device *dev = pci_get_drvdata(pdev);
1129 bool can_switch;
1130 1166
1131 spin_lock(&dev->count_lock); 1167 /*
1132 can_switch = (dev->open_count == 0); 1168 * FIXME: open_count is protected by drm_global_mutex but that would lead to
1133 spin_unlock(&dev->count_lock); 1169 * locking inversion with the driver load path. And the access here is
1134 return can_switch; 1170 * completely racy anyway. So don't bother with locking for now.
1171 */
1172 return dev->open_count == 0;
1135} 1173}
1136 1174
1137static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = { 1175static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = {
@@ -1196,17 +1234,16 @@ int radeon_device_init(struct radeon_device *rdev,
1196 if (r) 1234 if (r)
1197 return r; 1235 return r;
1198 1236
1237 radeon_check_arguments(rdev);
1199 /* Adjust VM size here. 1238 /* Adjust VM size here.
1200 * Currently set to 4GB ((1 << 20) 4k pages). 1239 * Max GPUVM size for cayman+ is 40 bits.
1201 * Max GPUVM size for cayman and SI is 40 bits.
1202 */ 1240 */
1203 rdev->vm_manager.max_pfn = 1 << 20; 1241 rdev->vm_manager.max_pfn = radeon_vm_size << 8;
1204 1242
1205 /* Set asic functions */ 1243 /* Set asic functions */
1206 r = radeon_asic_init(rdev); 1244 r = radeon_asic_init(rdev);
1207 if (r) 1245 if (r)
1208 return r; 1246 return r;
1209 radeon_check_arguments(rdev);
1210 1247
1211 /* all of the newer IGP chips have an internal gart 1248 /* all of the newer IGP chips have an internal gart
1212 * However some rs4xx report as AGP, so remove that here. 1249 * However some rs4xx report as AGP, so remove that here.
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 356b733caafe..5ed617056b9c 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -249,16 +249,21 @@ static void radeon_crtc_destroy(struct drm_crtc *crtc)
249 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 249 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
250 250
251 drm_crtc_cleanup(crtc); 251 drm_crtc_cleanup(crtc);
252 destroy_workqueue(radeon_crtc->flip_queue);
252 kfree(radeon_crtc); 253 kfree(radeon_crtc);
253} 254}
254 255
255/* 256/**
256 * Handle unpin events outside the interrupt handler proper. 257 * radeon_unpin_work_func - unpin old buffer object
258 *
259 * @__work - kernel work item
260 *
261 * Unpin the old frame buffer object outside of the interrupt handler
257 */ 262 */
258static void radeon_unpin_work_func(struct work_struct *__work) 263static void radeon_unpin_work_func(struct work_struct *__work)
259{ 264{
260 struct radeon_unpin_work *work = 265 struct radeon_flip_work *work =
261 container_of(__work, struct radeon_unpin_work, work); 266 container_of(__work, struct radeon_flip_work, unpin_work);
262 int r; 267 int r;
263 268
264 /* unpin of the old buffer */ 269 /* unpin of the old buffer */
@@ -276,10 +281,10 @@ static void radeon_unpin_work_func(struct work_struct *__work)
276 kfree(work); 281 kfree(work);
277} 282}
278 283
279void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id) 284void radeon_crtc_handle_vblank(struct radeon_device *rdev, int crtc_id)
280{ 285{
281 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; 286 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
282 struct radeon_unpin_work *work; 287 struct radeon_flip_work *work;
283 unsigned long flags; 288 unsigned long flags;
284 u32 update_pending; 289 u32 update_pending;
285 int vpos, hpos; 290 int vpos, hpos;
@@ -289,24 +294,13 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id)
289 return; 294 return;
290 295
291 spin_lock_irqsave(&rdev->ddev->event_lock, flags); 296 spin_lock_irqsave(&rdev->ddev->event_lock, flags);
292 work = radeon_crtc->unpin_work; 297 work = radeon_crtc->flip_work;
293 if (work == NULL || 298 if (work == NULL) {
294 (work->fence && !radeon_fence_signaled(work->fence))) {
295 spin_unlock_irqrestore(&rdev->ddev->event_lock, flags); 299 spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
296 return; 300 return;
297 } 301 }
298 /* New pageflip, or just completion of a previous one? */ 302
299 if (!radeon_crtc->deferred_flip_completion) { 303 update_pending = radeon_page_flip_pending(rdev, crtc_id);
300 /* do the flip (mmio) */
301 update_pending = radeon_page_flip(rdev, crtc_id, work->new_crtc_base);
302 } else {
303 /* This is just a completion of a flip queued in crtc
304 * at last invocation. Make sure we go directly to
305 * completion routine.
306 */
307 update_pending = 0;
308 radeon_crtc->deferred_flip_completion = 0;
309 }
310 304
311 /* Has the pageflip already completed in crtc, or is it certain 305 /* Has the pageflip already completed in crtc, or is it certain
312 * to complete in this vblank? 306 * to complete in this vblank?
@@ -324,19 +318,38 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id)
324 */ 318 */
325 update_pending = 0; 319 update_pending = 0;
326 } 320 }
327 if (update_pending) { 321 spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
328 /* crtc didn't flip in this target vblank interval, 322 if (!update_pending)
329 * but flip is pending in crtc. It will complete it 323 radeon_crtc_handle_flip(rdev, crtc_id);
330 * in next vblank interval, so complete the flip at 324}
331 * next vblank irq. 325
332 */ 326/**
333 radeon_crtc->deferred_flip_completion = 1; 327 * radeon_crtc_handle_flip - page flip completed
328 *
329 * @rdev: radeon device pointer
330 * @crtc_id: crtc number this event is for
331 *
332 * Called when we are sure that a page flip for this crtc is completed.
333 */
334void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id)
335{
336 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
337 struct radeon_flip_work *work;
338 unsigned long flags;
339
340 /* this can happen at init */
341 if (radeon_crtc == NULL)
342 return;
343
344 spin_lock_irqsave(&rdev->ddev->event_lock, flags);
345 work = radeon_crtc->flip_work;
346 if (work == NULL) {
334 spin_unlock_irqrestore(&rdev->ddev->event_lock, flags); 347 spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
335 return; 348 return;
336 } 349 }
337 350
338 /* Pageflip (will be) certainly completed in this vblank. Clean up. */ 351 /* Pageflip completed. Clean up. */
339 radeon_crtc->unpin_work = NULL; 352 radeon_crtc->flip_work = NULL;
340 353
341 /* wakeup userspace */ 354 /* wakeup userspace */
342 if (work->event) 355 if (work->event)
@@ -344,86 +357,71 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id)
344 357
345 spin_unlock_irqrestore(&rdev->ddev->event_lock, flags); 358 spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
346 359
347 drm_vblank_put(rdev->ddev, radeon_crtc->crtc_id);
348 radeon_fence_unref(&work->fence); 360 radeon_fence_unref(&work->fence);
349 radeon_post_page_flip(work->rdev, work->crtc_id); 361 radeon_irq_kms_pflip_irq_get(rdev, work->crtc_id);
350 schedule_work(&work->work); 362 queue_work(radeon_crtc->flip_queue, &work->unpin_work);
351} 363}
352 364
353static int radeon_crtc_page_flip(struct drm_crtc *crtc, 365/**
354 struct drm_framebuffer *fb, 366 * radeon_flip_work_func - page flip framebuffer
355 struct drm_pending_vblank_event *event, 367 *
356 uint32_t page_flip_flags) 368 * @work - kernel work item
369 *
370 * Wait for the buffer object to become idle and do the actual page flip
371 */
372static void radeon_flip_work_func(struct work_struct *__work)
357{ 373{
358 struct drm_device *dev = crtc->dev; 374 struct radeon_flip_work *work =
359 struct radeon_device *rdev = dev->dev_private; 375 container_of(__work, struct radeon_flip_work, flip_work);
360 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 376 struct radeon_device *rdev = work->rdev;
361 struct radeon_framebuffer *old_radeon_fb; 377 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[work->crtc_id];
362 struct radeon_framebuffer *new_radeon_fb;
363 struct drm_gem_object *obj;
364 struct radeon_bo *rbo;
365 struct radeon_unpin_work *work;
366 unsigned long flags;
367 u32 tiling_flags, pitch_pixels;
368 u64 base;
369 int r;
370 378
371 work = kzalloc(sizeof *work, GFP_KERNEL); 379 struct drm_crtc *crtc = &radeon_crtc->base;
372 if (work == NULL) 380 struct drm_framebuffer *fb = work->fb;
373 return -ENOMEM;
374 381
375 work->event = event; 382 uint32_t tiling_flags, pitch_pixels;
376 work->rdev = rdev; 383 uint64_t base;
377 work->crtc_id = radeon_crtc->crtc_id;
378 old_radeon_fb = to_radeon_framebuffer(crtc->primary->fb);
379 new_radeon_fb = to_radeon_framebuffer(fb);
380 /* schedule unpin of the old buffer */
381 obj = old_radeon_fb->obj;
382 /* take a reference to the old object */
383 drm_gem_object_reference(obj);
384 rbo = gem_to_radeon_bo(obj);
385 work->old_rbo = rbo;
386 obj = new_radeon_fb->obj;
387 rbo = gem_to_radeon_bo(obj);
388 384
389 spin_lock(&rbo->tbo.bdev->fence_lock); 385 unsigned long flags;
390 if (rbo->tbo.sync_obj) 386 int r;
391 work->fence = radeon_fence_ref(rbo->tbo.sync_obj);
392 spin_unlock(&rbo->tbo.bdev->fence_lock);
393 387
394 INIT_WORK(&work->work, radeon_unpin_work_func); 388 down_read(&rdev->exclusive_lock);
389 while (work->fence) {
390 r = radeon_fence_wait(work->fence, false);
391 if (r == -EDEADLK) {
392 up_read(&rdev->exclusive_lock);
393 r = radeon_gpu_reset(rdev);
394 down_read(&rdev->exclusive_lock);
395 }
395 396
396 /* We borrow the event spin lock for protecting unpin_work */ 397 if (r) {
397 spin_lock_irqsave(&dev->event_lock, flags); 398 DRM_ERROR("failed to wait on page flip fence (%d)!\n",
398 if (radeon_crtc->unpin_work) { 399 r);
399 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); 400 goto cleanup;
400 r = -EBUSY; 401 } else
401 goto unlock_free; 402 radeon_fence_unref(&work->fence);
402 } 403 }
403 radeon_crtc->unpin_work = work;
404 radeon_crtc->deferred_flip_completion = 0;
405 spin_unlock_irqrestore(&dev->event_lock, flags);
406 404
407 /* pin the new buffer */ 405 /* pin the new buffer */
408 DRM_DEBUG_DRIVER("flip-ioctl() cur_fbo = %p, cur_bbo = %p\n", 406 DRM_DEBUG_DRIVER("flip-ioctl() cur_fbo = %p, cur_bbo = %p\n",
409 work->old_rbo, rbo); 407 work->old_rbo, work->new_rbo);
410 408
411 r = radeon_bo_reserve(rbo, false); 409 r = radeon_bo_reserve(work->new_rbo, false);
412 if (unlikely(r != 0)) { 410 if (unlikely(r != 0)) {
413 DRM_ERROR("failed to reserve new rbo buffer before flip\n"); 411 DRM_ERROR("failed to reserve new rbo buffer before flip\n");
414 goto pflip_cleanup; 412 goto cleanup;
415 } 413 }
416 /* Only 27 bit offset for legacy CRTC */ 414 /* Only 27 bit offset for legacy CRTC */
417 r = radeon_bo_pin_restricted(rbo, RADEON_GEM_DOMAIN_VRAM, 415 r = radeon_bo_pin_restricted(work->new_rbo, RADEON_GEM_DOMAIN_VRAM,
418 ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27, &base); 416 ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27, &base);
419 if (unlikely(r != 0)) { 417 if (unlikely(r != 0)) {
420 radeon_bo_unreserve(rbo); 418 radeon_bo_unreserve(work->new_rbo);
421 r = -EINVAL; 419 r = -EINVAL;
422 DRM_ERROR("failed to pin new rbo buffer before flip\n"); 420 DRM_ERROR("failed to pin new rbo buffer before flip\n");
423 goto pflip_cleanup; 421 goto cleanup;
424 } 422 }
425 radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL); 423 radeon_bo_get_tiling_flags(work->new_rbo, &tiling_flags, NULL);
426 radeon_bo_unreserve(rbo); 424 radeon_bo_unreserve(work->new_rbo);
427 425
428 if (!ASIC_IS_AVIVO(rdev)) { 426 if (!ASIC_IS_AVIVO(rdev)) {
429 /* crtc offset is from display base addr not FB location */ 427 /* crtc offset is from display base addr not FB location */
@@ -461,44 +459,91 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
461 base &= ~7; 459 base &= ~7;
462 } 460 }
463 461
464 spin_lock_irqsave(&dev->event_lock, flags); 462 /* We borrow the event spin lock for protecting flip_work */
465 work->new_crtc_base = base; 463 spin_lock_irqsave(&crtc->dev->event_lock, flags);
466 spin_unlock_irqrestore(&dev->event_lock, flags);
467 464
468 /* update crtc fb */ 465 /* set the proper interrupt */
469 crtc->primary->fb = fb; 466 radeon_irq_kms_pflip_irq_get(rdev, radeon_crtc->crtc_id);
470 467
471 r = drm_vblank_get(dev, radeon_crtc->crtc_id); 468 /* do the flip (mmio) */
472 if (r) { 469 radeon_page_flip(rdev, radeon_crtc->crtc_id, base);
473 DRM_ERROR("failed to get vblank before flip\n");
474 goto pflip_cleanup1;
475 }
476 470
477 /* set the proper interrupt */ 471 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
478 radeon_pre_page_flip(rdev, radeon_crtc->crtc_id); 472 up_read(&rdev->exclusive_lock);
479 473
480 return 0; 474 return;
481 475
482pflip_cleanup1: 476cleanup:
483 if (unlikely(radeon_bo_reserve(rbo, false) != 0)) { 477 drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
484 DRM_ERROR("failed to reserve new rbo in error path\n");
485 goto pflip_cleanup;
486 }
487 if (unlikely(radeon_bo_unpin(rbo) != 0)) {
488 DRM_ERROR("failed to unpin new rbo in error path\n");
489 }
490 radeon_bo_unreserve(rbo);
491
492pflip_cleanup:
493 spin_lock_irqsave(&dev->event_lock, flags);
494 radeon_crtc->unpin_work = NULL;
495unlock_free:
496 spin_unlock_irqrestore(&dev->event_lock, flags);
497 drm_gem_object_unreference_unlocked(old_radeon_fb->obj);
498 radeon_fence_unref(&work->fence); 478 radeon_fence_unref(&work->fence);
499 kfree(work); 479 kfree(work);
480 up_read(&rdev->exclusive_lock);
481}
500 482
501 return r; 483static int radeon_crtc_page_flip(struct drm_crtc *crtc,
484 struct drm_framebuffer *fb,
485 struct drm_pending_vblank_event *event,
486 uint32_t page_flip_flags)
487{
488 struct drm_device *dev = crtc->dev;
489 struct radeon_device *rdev = dev->dev_private;
490 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
491 struct radeon_framebuffer *old_radeon_fb;
492 struct radeon_framebuffer *new_radeon_fb;
493 struct drm_gem_object *obj;
494 struct radeon_flip_work *work;
495 unsigned long flags;
496
497 work = kzalloc(sizeof *work, GFP_KERNEL);
498 if (work == NULL)
499 return -ENOMEM;
500
501 INIT_WORK(&work->flip_work, radeon_flip_work_func);
502 INIT_WORK(&work->unpin_work, radeon_unpin_work_func);
503
504 work->rdev = rdev;
505 work->crtc_id = radeon_crtc->crtc_id;
506 work->fb = fb;
507 work->event = event;
508
509 /* schedule unpin of the old buffer */
510 old_radeon_fb = to_radeon_framebuffer(crtc->primary->fb);
511 obj = old_radeon_fb->obj;
512
513 /* take a reference to the old object */
514 drm_gem_object_reference(obj);
515 work->old_rbo = gem_to_radeon_bo(obj);
516
517 new_radeon_fb = to_radeon_framebuffer(fb);
518 obj = new_radeon_fb->obj;
519 work->new_rbo = gem_to_radeon_bo(obj);
520
521 spin_lock(&work->new_rbo->tbo.bdev->fence_lock);
522 if (work->new_rbo->tbo.sync_obj)
523 work->fence = radeon_fence_ref(work->new_rbo->tbo.sync_obj);
524 spin_unlock(&work->new_rbo->tbo.bdev->fence_lock);
525
526 /* We borrow the event spin lock for protecting flip_work */
527 spin_lock_irqsave(&crtc->dev->event_lock, flags);
528
529 if (radeon_crtc->flip_work) {
530 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
531 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
532 drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
533 radeon_fence_unref(&work->fence);
534 kfree(work);
535 return -EBUSY;
536 }
537 radeon_crtc->flip_work = work;
538
539 /* update crtc fb */
540 crtc->primary->fb = fb;
541
542 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
543
544 queue_work(radeon_crtc->flip_queue, &work->flip_work);
545
546 return 0;
502} 547}
503 548
504static int 549static int
@@ -568,6 +613,7 @@ static void radeon_crtc_init(struct drm_device *dev, int index)
568 613
569 drm_mode_crtc_set_gamma_size(&radeon_crtc->base, 256); 614 drm_mode_crtc_set_gamma_size(&radeon_crtc->base, 256);
570 radeon_crtc->crtc_id = index; 615 radeon_crtc->crtc_id = index;
616 radeon_crtc->flip_queue = create_singlethread_workqueue("radeon-crtc");
571 rdev->mode_info.crtcs[index] = radeon_crtc; 617 rdev->mode_info.crtcs[index] = radeon_crtc;
572 618
573 if (rdev->family >= CHIP_BONAIRE) { 619 if (rdev->family >= CHIP_BONAIRE) {
@@ -661,7 +707,7 @@ static void radeon_print_display_setup(struct drm_device *dev)
661 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 707 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
662 radeon_connector = to_radeon_connector(connector); 708 radeon_connector = to_radeon_connector(connector);
663 DRM_INFO("Connector %d:\n", i); 709 DRM_INFO("Connector %d:\n", i);
664 DRM_INFO(" %s\n", drm_get_connector_name(connector)); 710 DRM_INFO(" %s\n", connector->name);
665 if (radeon_connector->hpd.hpd != RADEON_HPD_NONE) 711 if (radeon_connector->hpd.hpd != RADEON_HPD_NONE)
666 DRM_INFO(" %s\n", hpd_names[radeon_connector->hpd.hpd]); 712 DRM_INFO(" %s\n", hpd_names[radeon_connector->hpd.hpd]);
667 if (radeon_connector->ddc_bus) { 713 if (radeon_connector->ddc_bus) {
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index c00a2f585185..6e3017413386 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -81,9 +81,10 @@
81 * 2.37.0 - allow GS ring setup on r6xx/r7xx 81 * 2.37.0 - allow GS ring setup on r6xx/r7xx
82 * 2.38.0 - RADEON_GEM_OP (GET_INITIAL_DOMAIN, SET_INITIAL_DOMAIN), 82 * 2.38.0 - RADEON_GEM_OP (GET_INITIAL_DOMAIN, SET_INITIAL_DOMAIN),
83 * CIK: 1D and linear tiling modes contain valid PIPE_CONFIG 83 * CIK: 1D and linear tiling modes contain valid PIPE_CONFIG
84 * 2.39.0 - Add INFO query for number of active CUs
84 */ 85 */
85#define KMS_DRIVER_MAJOR 2 86#define KMS_DRIVER_MAJOR 2
86#define KMS_DRIVER_MINOR 38 87#define KMS_DRIVER_MINOR 39
87#define KMS_DRIVER_PATCHLEVEL 0 88#define KMS_DRIVER_PATCHLEVEL 0
88int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); 89int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
89int radeon_driver_unload_kms(struct drm_device *dev); 90int radeon_driver_unload_kms(struct drm_device *dev);
@@ -172,6 +173,8 @@ int radeon_dpm = -1;
172int radeon_aspm = -1; 173int radeon_aspm = -1;
173int radeon_runtime_pm = -1; 174int radeon_runtime_pm = -1;
174int radeon_hard_reset = 0; 175int radeon_hard_reset = 0;
176int radeon_vm_size = 4096;
177int radeon_vm_block_size = 9;
175 178
176MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers"); 179MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
177module_param_named(no_wb, radeon_no_wb, int, 0444); 180module_param_named(no_wb, radeon_no_wb, int, 0444);
@@ -239,6 +242,12 @@ module_param_named(runpm, radeon_runtime_pm, int, 0444);
239MODULE_PARM_DESC(hard_reset, "PCI config reset (1 = force enable, 0 = disable (default))"); 242MODULE_PARM_DESC(hard_reset, "PCI config reset (1 = force enable, 0 = disable (default))");
240module_param_named(hard_reset, radeon_hard_reset, int, 0444); 243module_param_named(hard_reset, radeon_hard_reset, int, 0444);
241 244
245MODULE_PARM_DESC(vm_size, "VM address space size in megabytes (default 4GB)");
246module_param_named(vm_size, radeon_vm_size, int, 0444);
247
248MODULE_PARM_DESC(vm_block_size, "VM page table size in bits (default 9)");
249module_param_named(vm_block_size, radeon_vm_block_size, int, 0444);
250
242static struct pci_device_id pciidlist[] = { 251static struct pci_device_id pciidlist[] = {
243 radeon_PCI_IDS 252 radeon_PCI_IDS
244}; 253};
@@ -519,7 +528,6 @@ static struct drm_driver kms_driver = {
519 DRIVER_USE_AGP | 528 DRIVER_USE_AGP |
520 DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | 529 DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM |
521 DRIVER_PRIME | DRIVER_RENDER, 530 DRIVER_PRIME | DRIVER_RENDER,
522 .dev_priv_size = 0,
523 .load = radeon_driver_load_kms, 531 .load = radeon_driver_load_kms,
524 .open = radeon_driver_open_kms, 532 .open = radeon_driver_open_kms,
525 .preclose = radeon_driver_preclose_kms, 533 .preclose = radeon_driver_preclose_kms,
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index a77b1c13ea43..913787085dfa 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -819,15 +819,35 @@ static int radeon_debugfs_fence_info(struct seq_file *m, void *data)
819 return 0; 819 return 0;
820} 820}
821 821
822/**
823 * radeon_debugfs_gpu_reset - manually trigger a gpu reset
824 *
825 * Manually trigger a gpu reset at the next fence wait.
826 */
827static int radeon_debugfs_gpu_reset(struct seq_file *m, void *data)
828{
829 struct drm_info_node *node = (struct drm_info_node *) m->private;
830 struct drm_device *dev = node->minor->dev;
831 struct radeon_device *rdev = dev->dev_private;
832
833 down_read(&rdev->exclusive_lock);
834 seq_printf(m, "%d\n", rdev->needs_reset);
835 rdev->needs_reset = true;
836 up_read(&rdev->exclusive_lock);
837
838 return 0;
839}
840
822static struct drm_info_list radeon_debugfs_fence_list[] = { 841static struct drm_info_list radeon_debugfs_fence_list[] = {
823 {"radeon_fence_info", &radeon_debugfs_fence_info, 0, NULL}, 842 {"radeon_fence_info", &radeon_debugfs_fence_info, 0, NULL},
843 {"radeon_gpu_reset", &radeon_debugfs_gpu_reset, 0, NULL}
824}; 844};
825#endif 845#endif
826 846
827int radeon_debugfs_fence_init(struct radeon_device *rdev) 847int radeon_debugfs_fence_init(struct radeon_device *rdev)
828{ 848{
829#if defined(CONFIG_DEBUG_FS) 849#if defined(CONFIG_DEBUG_FS)
830 return radeon_debugfs_add_files(rdev, radeon_debugfs_fence_list, 1); 850 return radeon_debugfs_add_files(rdev, radeon_debugfs_fence_list, 2);
831#else 851#else
832 return 0; 852 return 0;
833#endif 853#endif
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
index 7b944142a9fd..add622008407 100644
--- a/drivers/gpu/drm/radeon/radeon_i2c.c
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -94,6 +94,8 @@ static int pre_xfer(struct i2c_adapter *i2c_adap)
94 struct radeon_i2c_bus_rec *rec = &i2c->rec; 94 struct radeon_i2c_bus_rec *rec = &i2c->rec;
95 uint32_t temp; 95 uint32_t temp;
96 96
97 mutex_lock(&i2c->mutex);
98
97 /* RV410 appears to have a bug where the hw i2c in reset 99 /* RV410 appears to have a bug where the hw i2c in reset
98 * holds the i2c port in a bad state - switch hw i2c away before 100 * holds the i2c port in a bad state - switch hw i2c away before
99 * doing DDC - do this for all r200s/r300s/r400s for safety sake 101 * doing DDC - do this for all r200s/r300s/r400s for safety sake
@@ -170,6 +172,8 @@ static void post_xfer(struct i2c_adapter *i2c_adap)
170 temp = RREG32(rec->mask_data_reg) & ~rec->mask_data_mask; 172 temp = RREG32(rec->mask_data_reg) & ~rec->mask_data_mask;
171 WREG32(rec->mask_data_reg, temp); 173 WREG32(rec->mask_data_reg, temp);
172 temp = RREG32(rec->mask_data_reg); 174 temp = RREG32(rec->mask_data_reg);
175
176 mutex_unlock(&i2c->mutex);
173} 177}
174 178
175static int get_clock(void *i2c_priv) 179static int get_clock(void *i2c_priv)
@@ -813,6 +817,8 @@ static int radeon_hw_i2c_xfer(struct i2c_adapter *i2c_adap,
813 struct radeon_i2c_bus_rec *rec = &i2c->rec; 817 struct radeon_i2c_bus_rec *rec = &i2c->rec;
814 int ret = 0; 818 int ret = 0;
815 819
820 mutex_lock(&i2c->mutex);
821
816 switch (rdev->family) { 822 switch (rdev->family) {
817 case CHIP_R100: 823 case CHIP_R100:
818 case CHIP_RV100: 824 case CHIP_RV100:
@@ -879,6 +885,8 @@ static int radeon_hw_i2c_xfer(struct i2c_adapter *i2c_adap,
879 break; 885 break;
880 } 886 }
881 887
888 mutex_unlock(&i2c->mutex);
889
882 return ret; 890 return ret;
883} 891}
884 892
@@ -919,6 +927,7 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
919 i2c->adapter.dev.parent = &dev->pdev->dev; 927 i2c->adapter.dev.parent = &dev->pdev->dev;
920 i2c->dev = dev; 928 i2c->dev = dev;
921 i2c_set_adapdata(&i2c->adapter, i2c); 929 i2c_set_adapdata(&i2c->adapter, i2c);
930 mutex_init(&i2c->mutex);
922 if (rec->mm_i2c || 931 if (rec->mm_i2c ||
923 (rec->hw_capable && 932 (rec->hw_capable &&
924 radeon_hw_i2c && 933 radeon_hw_i2c &&
@@ -979,7 +988,7 @@ void radeon_i2c_destroy(struct radeon_i2c_chan *i2c)
979 return; 988 return;
980 i2c_del_adapter(&i2c->adapter); 989 i2c_del_adapter(&i2c->adapter);
981 if (i2c->has_aux) 990 if (i2c->has_aux)
982 drm_dp_aux_unregister_i2c_bus(&i2c->aux); 991 drm_dp_aux_unregister(&i2c->aux);
983 kfree(i2c); 992 kfree(i2c);
984} 993}
985 994
diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
index bdb0f93e73bc..0b98ea134579 100644
--- a/drivers/gpu/drm/radeon/radeon_ioc32.c
+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
@@ -399,7 +399,7 @@ long radeon_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
399 if (nr < DRM_COMMAND_BASE) 399 if (nr < DRM_COMMAND_BASE)
400 return drm_compat_ioctl(filp, cmd, arg); 400 return drm_compat_ioctl(filp, cmd, arg);
401 401
402 if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(radeon_compat_ioctls)) 402 if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(radeon_compat_ioctls))
403 fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE]; 403 fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
404 404
405 if (fn != NULL) 405 if (fn != NULL)
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index 089c9ffb0aa9..16807afab362 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -287,7 +287,7 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
287 INIT_WORK(&rdev->reset_work, radeon_irq_reset_work_func); 287 INIT_WORK(&rdev->reset_work, radeon_irq_reset_work_func);
288 288
289 rdev->irq.installed = true; 289 rdev->irq.installed = true;
290 r = drm_irq_install(rdev->ddev); 290 r = drm_irq_install(rdev->ddev, rdev->ddev->pdev->irq);
291 if (r) { 291 if (r) {
292 rdev->irq.installed = false; 292 rdev->irq.installed = false;
293 flush_work(&rdev->hotplug_work); 293 flush_work(&rdev->hotplug_work);
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index eaaedba04675..35d931881b4b 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -513,6 +513,22 @@ static int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file
513 value_size = sizeof(uint64_t); 513 value_size = sizeof(uint64_t);
514 value64 = atomic64_read(&rdev->gtt_usage); 514 value64 = atomic64_read(&rdev->gtt_usage);
515 break; 515 break;
516 case RADEON_INFO_ACTIVE_CU_COUNT:
517 if (rdev->family >= CHIP_BONAIRE)
518 *value = rdev->config.cik.active_cus;
519 else if (rdev->family >= CHIP_TAHITI)
520 *value = rdev->config.si.active_cus;
521 else if (rdev->family >= CHIP_CAYMAN)
522 *value = rdev->config.cayman.active_simds;
523 else if (rdev->family >= CHIP_CEDAR)
524 *value = rdev->config.evergreen.active_simds;
525 else if (rdev->family >= CHIP_RV770)
526 *value = rdev->config.rv770.active_simds;
527 else if (rdev->family >= CHIP_R600)
528 *value = rdev->config.r600.active_simds;
529 else
530 *value = 1;
531 break;
516 default: 532 default:
517 DRM_DEBUG_KMS("Invalid request %d\n", info->request); 533 DRM_DEBUG_KMS("Invalid request %d\n", info->request);
518 return -EINVAL; 534 return -EINVAL;
@@ -859,4 +875,4 @@ const struct drm_ioctl_desc radeon_ioctls_kms[] = {
859 DRM_IOCTL_DEF_DRV(RADEON_GEM_VA, radeon_gem_va_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), 875 DRM_IOCTL_DEF_DRV(RADEON_GEM_VA, radeon_gem_va_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
860 DRM_IOCTL_DEF_DRV(RADEON_GEM_OP, radeon_gem_op_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), 876 DRM_IOCTL_DEF_DRV(RADEON_GEM_OP, radeon_gem_op_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
861}; 877};
862int radeon_max_kms_ioctl = DRM_ARRAY_SIZE(radeon_ioctls_kms); 878int radeon_max_kms_ioctl = ARRAY_SIZE(radeon_ioctls_kms);
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 6ddf31a2d34e..ad0e4b8cc7e3 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -191,6 +191,7 @@ struct radeon_i2c_chan {
191 struct radeon_i2c_bus_rec rec; 191 struct radeon_i2c_bus_rec rec;
192 struct drm_dp_aux aux; 192 struct drm_dp_aux aux;
193 bool has_aux; 193 bool has_aux;
194 struct mutex mutex;
194}; 195};
195 196
196/* mostly for macs, but really any system without connector tables */ 197/* mostly for macs, but really any system without connector tables */
@@ -324,8 +325,8 @@ struct radeon_crtc {
324 struct drm_display_mode native_mode; 325 struct drm_display_mode native_mode;
325 int pll_id; 326 int pll_id;
326 /* page flipping */ 327 /* page flipping */
327 struct radeon_unpin_work *unpin_work; 328 struct workqueue_struct *flip_queue;
328 int deferred_flip_completion; 329 struct radeon_flip_work *flip_work;
329 /* pll sharing */ 330 /* pll sharing */
330 struct radeon_atom_ss ss; 331 struct radeon_atom_ss ss;
331 bool ss_enabled; 332 bool ss_enabled;
@@ -505,6 +506,7 @@ struct radeon_connector {
505 struct radeon_i2c_chan *router_bus; 506 struct radeon_i2c_chan *router_bus;
506 enum radeon_connector_audio audio; 507 enum radeon_connector_audio audio;
507 enum radeon_connector_dither dither; 508 enum radeon_connector_dither dither;
509 int pixelclock_for_modeset;
508}; 510};
509 511
510struct radeon_framebuffer { 512struct radeon_framebuffer {
@@ -906,6 +908,7 @@ bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj)
906 908
907void radeon_fb_output_poll_changed(struct radeon_device *rdev); 909void radeon_fb_output_poll_changed(struct radeon_device *rdev);
908 910
911void radeon_crtc_handle_vblank(struct radeon_device *rdev, int crtc_id);
909void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id); 912void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id);
910 913
911int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tiled); 914int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tiled);
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 4faa4d6f9bb4..6c717b257d6d 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -446,7 +446,7 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
446 list_for_each_entry(lobj, head, tv.head) { 446 list_for_each_entry(lobj, head, tv.head) {
447 bo = lobj->robj; 447 bo = lobj->robj;
448 if (!bo->pin_count) { 448 if (!bo->pin_count) {
449 u32 domain = lobj->domain; 449 u32 domain = lobj->prefered_domains;
450 u32 current_domain = 450 u32 current_domain =
451 radeon_mem_type_to_domain(bo->tbo.mem.mem_type); 451 radeon_mem_type_to_domain(bo->tbo.mem.mem_type);
452 452
@@ -458,7 +458,7 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
458 * into account. We don't want to disallow buffer moves 458 * into account. We don't want to disallow buffer moves
459 * completely. 459 * completely.
460 */ 460 */
461 if ((lobj->alt_domain & current_domain) != 0 && 461 if ((lobj->allowed_domains & current_domain) != 0 &&
462 (domain & current_domain) == 0 && /* will be moved */ 462 (domain & current_domain) == 0 && /* will be moved */
463 bytes_moved > bytes_moved_threshold) { 463 bytes_moved > bytes_moved_threshold) {
464 /* don't move it */ 464 /* don't move it */
@@ -476,8 +476,9 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
476 initial_bytes_moved; 476 initial_bytes_moved;
477 477
478 if (unlikely(r)) { 478 if (unlikely(r)) {
479 if (r != -ERESTARTSYS && domain != lobj->alt_domain) { 479 if (r != -ERESTARTSYS &&
480 domain = lobj->alt_domain; 480 domain != lobj->allowed_domains) {
481 domain = lobj->allowed_domains;
481 goto retry; 482 goto retry;
482 } 483 }
483 ttm_eu_backoff_reservation(ticket, head); 484 ttm_eu_backoff_reservation(ticket, head);
@@ -730,7 +731,7 @@ int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait)
730{ 731{
731 int r; 732 int r;
732 733
733 r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0); 734 r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, NULL);
734 if (unlikely(r != 0)) 735 if (unlikely(r != 0))
735 return r; 736 return r;
736 spin_lock(&bo->tbo.bdev->fence_lock); 737 spin_lock(&bo->tbo.bdev->fence_lock);
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index 9e7b25a0629d..5a873f31a171 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -65,7 +65,7 @@ static inline int radeon_bo_reserve(struct radeon_bo *bo, bool no_intr)
65{ 65{
66 int r; 66 int r;
67 67
68 r = ttm_bo_reserve(&bo->tbo, !no_intr, false, false, 0); 68 r = ttm_bo_reserve(&bo->tbo, !no_intr, false, false, NULL);
69 if (unlikely(r != 0)) { 69 if (unlikely(r != 0)) {
70 if (r != -ERESTARTSYS) 70 if (r != -ERESTARTSYS)
71 dev_err(bo->rdev->dev, "%p reserve failed\n", bo); 71 dev_err(bo->rdev->dev, "%p reserve failed\n", bo);
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 2bdae61c0ac0..12c663e86ca1 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -984,6 +984,8 @@ void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable)
984 if (enable) { 984 if (enable) {
985 mutex_lock(&rdev->pm.mutex); 985 mutex_lock(&rdev->pm.mutex);
986 rdev->pm.dpm.uvd_active = true; 986 rdev->pm.dpm.uvd_active = true;
987 /* disable this for now */
988#if 0
987 if ((rdev->pm.dpm.sd == 1) && (rdev->pm.dpm.hd == 0)) 989 if ((rdev->pm.dpm.sd == 1) && (rdev->pm.dpm.hd == 0))
988 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_SD; 990 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_SD;
989 else if ((rdev->pm.dpm.sd == 2) && (rdev->pm.dpm.hd == 0)) 991 else if ((rdev->pm.dpm.sd == 2) && (rdev->pm.dpm.hd == 0))
@@ -993,6 +995,7 @@ void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable)
993 else if ((rdev->pm.dpm.sd == 0) && (rdev->pm.dpm.hd == 2)) 995 else if ((rdev->pm.dpm.sd == 0) && (rdev->pm.dpm.hd == 2))
994 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD2; 996 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD2;
995 else 997 else
998#endif
996 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD; 999 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD;
997 rdev->pm.dpm.state = dpm_state; 1000 rdev->pm.dpm.state = dpm_state;
998 mutex_unlock(&rdev->pm.mutex); 1001 mutex_unlock(&rdev->pm.mutex);
diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
index 956ab7f14e16..23bb64fd775f 100644
--- a/drivers/gpu/drm/radeon/radeon_state.c
+++ b/drivers/gpu/drm/radeon/radeon_state.c
@@ -3054,7 +3054,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
3054 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) 3054 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
3055 value = 0; 3055 value = 0;
3056 else 3056 else
3057 value = drm_dev_to_irq(dev); 3057 value = dev->pdev->irq;
3058 break; 3058 break;
3059 case RADEON_PARAM_GART_BASE: 3059 case RADEON_PARAM_GART_BASE:
3060 value = dev_priv->gart_vm_start; 3060 value = dev_priv->gart_vm_start;
@@ -3258,4 +3258,4 @@ struct drm_ioctl_desc radeon_ioctls[] = {
3258 DRM_IOCTL_DEF_DRV(RADEON_CS, r600_cs_legacy_ioctl, DRM_AUTH) 3258 DRM_IOCTL_DEF_DRV(RADEON_CS, r600_cs_legacy_ioctl, DRM_AUTH)
3259}; 3259};
3260 3260
3261int radeon_max_ioctl = DRM_ARRAY_SIZE(radeon_ioctls); 3261int radeon_max_ioctl = ARRAY_SIZE(radeon_ioctls);
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 1b65ae2433cd..a4ad270e8261 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -812,7 +812,8 @@ void radeon_uvd_note_usage(struct radeon_device *rdev)
812 (rdev->pm.dpm.hd != hd)) { 812 (rdev->pm.dpm.hd != hd)) {
813 rdev->pm.dpm.sd = sd; 813 rdev->pm.dpm.sd = sd;
814 rdev->pm.dpm.hd = hd; 814 rdev->pm.dpm.hd = hd;
815 streams_changed = true; 815 /* disable this for now */
816 /*streams_changed = true;*/
816 } 817 }
817 } 818 }
818 819
diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c
index 3971d968af6c..aa21c31a846c 100644
--- a/drivers/gpu/drm/radeon/radeon_vce.c
+++ b/drivers/gpu/drm/radeon/radeon_vce.c
@@ -66,6 +66,7 @@ int radeon_vce_init(struct radeon_device *rdev)
66 case CHIP_BONAIRE: 66 case CHIP_BONAIRE:
67 case CHIP_KAVERI: 67 case CHIP_KAVERI:
68 case CHIP_KABINI: 68 case CHIP_KABINI:
69 case CHIP_HAWAII:
69 case CHIP_MULLINS: 70 case CHIP_MULLINS:
70 fw_name = FIRMWARE_BONAIRE; 71 fw_name = FIRMWARE_BONAIRE;
71 break; 72 break;
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index c11b71d249e3..899d9126cad6 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -59,7 +59,7 @@
59 */ 59 */
60static unsigned radeon_vm_num_pdes(struct radeon_device *rdev) 60static unsigned radeon_vm_num_pdes(struct radeon_device *rdev)
61{ 61{
62 return rdev->vm_manager.max_pfn >> RADEON_VM_BLOCK_SIZE; 62 return rdev->vm_manager.max_pfn >> radeon_vm_block_size;
63} 63}
64 64
65/** 65/**
@@ -140,8 +140,8 @@ struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev,
140 /* add the vm page table to the list */ 140 /* add the vm page table to the list */
141 list[0].gobj = NULL; 141 list[0].gobj = NULL;
142 list[0].robj = vm->page_directory; 142 list[0].robj = vm->page_directory;
143 list[0].domain = RADEON_GEM_DOMAIN_VRAM; 143 list[0].prefered_domains = RADEON_GEM_DOMAIN_VRAM;
144 list[0].alt_domain = RADEON_GEM_DOMAIN_VRAM; 144 list[0].allowed_domains = RADEON_GEM_DOMAIN_VRAM;
145 list[0].tv.bo = &vm->page_directory->tbo; 145 list[0].tv.bo = &vm->page_directory->tbo;
146 list[0].tiling_flags = 0; 146 list[0].tiling_flags = 0;
147 list[0].handle = 0; 147 list[0].handle = 0;
@@ -153,8 +153,8 @@ struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev,
153 153
154 list[idx].gobj = NULL; 154 list[idx].gobj = NULL;
155 list[idx].robj = vm->page_tables[i].bo; 155 list[idx].robj = vm->page_tables[i].bo;
156 list[idx].domain = RADEON_GEM_DOMAIN_VRAM; 156 list[idx].prefered_domains = RADEON_GEM_DOMAIN_VRAM;
157 list[idx].alt_domain = RADEON_GEM_DOMAIN_VRAM; 157 list[idx].allowed_domains = RADEON_GEM_DOMAIN_VRAM;
158 list[idx].tv.bo = &list[idx].robj->tbo; 158 list[idx].tv.bo = &list[idx].robj->tbo;
159 list[idx].tiling_flags = 0; 159 list[idx].tiling_flags = 0;
160 list[idx].handle = 0; 160 list[idx].handle = 0;
@@ -474,8 +474,10 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
474 bo_va->valid = false; 474 bo_va->valid = false;
475 list_move(&bo_va->vm_list, head); 475 list_move(&bo_va->vm_list, head);
476 476
477 soffset = (soffset / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE; 477 soffset = (soffset / RADEON_GPU_PAGE_SIZE) >> radeon_vm_block_size;
478 eoffset = (eoffset / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE; 478 eoffset = (eoffset / RADEON_GPU_PAGE_SIZE) >> radeon_vm_block_size;
479
480 BUG_ON(eoffset >= radeon_vm_num_pdes(rdev));
479 481
480 if (eoffset > vm->max_pde_used) 482 if (eoffset > vm->max_pde_used)
481 vm->max_pde_used = eoffset; 483 vm->max_pde_used = eoffset;
@@ -583,10 +585,9 @@ static uint32_t radeon_vm_page_flags(uint32_t flags)
583int radeon_vm_update_page_directory(struct radeon_device *rdev, 585int radeon_vm_update_page_directory(struct radeon_device *rdev,
584 struct radeon_vm *vm) 586 struct radeon_vm *vm)
585{ 587{
586 static const uint32_t incr = RADEON_VM_PTE_COUNT * 8;
587
588 struct radeon_bo *pd = vm->page_directory; 588 struct radeon_bo *pd = vm->page_directory;
589 uint64_t pd_addr = radeon_bo_gpu_offset(pd); 589 uint64_t pd_addr = radeon_bo_gpu_offset(pd);
590 uint32_t incr = RADEON_VM_PTE_COUNT * 8;
590 uint64_t last_pde = ~0, last_pt = ~0; 591 uint64_t last_pde = ~0, last_pt = ~0;
591 unsigned count = 0, pt_idx, ndw; 592 unsigned count = 0, pt_idx, ndw;
592 struct radeon_ib ib; 593 struct radeon_ib ib;
@@ -660,6 +661,84 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev,
660} 661}
661 662
662/** 663/**
664 * radeon_vm_frag_ptes - add fragment information to PTEs
665 *
666 * @rdev: radeon_device pointer
667 * @ib: IB for the update
668 * @pe_start: first PTE to handle
669 * @pe_end: last PTE to handle
670 * @addr: addr those PTEs should point to
671 * @flags: hw mapping flags
672 *
673 * Global and local mutex must be locked!
674 */
675static void radeon_vm_frag_ptes(struct radeon_device *rdev,
676 struct radeon_ib *ib,
677 uint64_t pe_start, uint64_t pe_end,
678 uint64_t addr, uint32_t flags)
679{
680 /**
681 * The MC L1 TLB supports variable sized pages, based on a fragment
682 * field in the PTE. When this field is set to a non-zero value, page
683 * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
684 * flags are considered valid for all PTEs within the fragment range
685 * and corresponding mappings are assumed to be physically contiguous.
686 *
687 * The L1 TLB can store a single PTE for the whole fragment,
688 * significantly increasing the space available for translation
689 * caching. This leads to large improvements in throughput when the
690 * TLB is under pressure.
691 *
692 * The L2 TLB distributes small and large fragments into two
693 * asymmetric partitions. The large fragment cache is significantly
694 * larger. Thus, we try to use large fragments wherever possible.
695 * Userspace can support this by aligning virtual base address and
696 * allocation size to the fragment size.
697 */
698
699 /* NI is optimized for 256KB fragments, SI and newer for 64KB */
700 uint64_t frag_flags = rdev->family == CHIP_CAYMAN ?
701 R600_PTE_FRAG_256KB : R600_PTE_FRAG_64KB;
702 uint64_t frag_align = rdev->family == CHIP_CAYMAN ? 0x200 : 0x80;
703
704 uint64_t frag_start = ALIGN(pe_start, frag_align);
705 uint64_t frag_end = pe_end & ~(frag_align - 1);
706
707 unsigned count;
708
709 /* system pages are non continuously */
710 if ((flags & R600_PTE_SYSTEM) || !(flags & R600_PTE_VALID) ||
711 (frag_start >= frag_end)) {
712
713 count = (pe_end - pe_start) / 8;
714 radeon_asic_vm_set_page(rdev, ib, pe_start, addr, count,
715 RADEON_GPU_PAGE_SIZE, flags);
716 return;
717 }
718
719 /* handle the 4K area at the beginning */
720 if (pe_start != frag_start) {
721 count = (frag_start - pe_start) / 8;
722 radeon_asic_vm_set_page(rdev, ib, pe_start, addr, count,
723 RADEON_GPU_PAGE_SIZE, flags);
724 addr += RADEON_GPU_PAGE_SIZE * count;
725 }
726
727 /* handle the area in the middle */
728 count = (frag_end - frag_start) / 8;
729 radeon_asic_vm_set_page(rdev, ib, frag_start, addr, count,
730 RADEON_GPU_PAGE_SIZE, flags | frag_flags);
731
732 /* handle the 4K area at the end */
733 if (frag_end != pe_end) {
734 addr += RADEON_GPU_PAGE_SIZE * count;
735 count = (pe_end - frag_end) / 8;
736 radeon_asic_vm_set_page(rdev, ib, frag_end, addr, count,
737 RADEON_GPU_PAGE_SIZE, flags);
738 }
739}
740
741/**
663 * radeon_vm_update_ptes - make sure that page tables are valid 742 * radeon_vm_update_ptes - make sure that page tables are valid
664 * 743 *
665 * @rdev: radeon_device pointer 744 * @rdev: radeon_device pointer
@@ -679,8 +758,7 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev,
679 uint64_t start, uint64_t end, 758 uint64_t start, uint64_t end,
680 uint64_t dst, uint32_t flags) 759 uint64_t dst, uint32_t flags)
681{ 760{
682 static const uint64_t mask = RADEON_VM_PTE_COUNT - 1; 761 uint64_t mask = RADEON_VM_PTE_COUNT - 1;
683
684 uint64_t last_pte = ~0, last_dst = ~0; 762 uint64_t last_pte = ~0, last_dst = ~0;
685 unsigned count = 0; 763 unsigned count = 0;
686 uint64_t addr; 764 uint64_t addr;
@@ -690,7 +768,7 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev,
690 768
691 /* walk over the address space and update the page tables */ 769 /* walk over the address space and update the page tables */
692 for (addr = start; addr < end; ) { 770 for (addr = start; addr < end; ) {
693 uint64_t pt_idx = addr >> RADEON_VM_BLOCK_SIZE; 771 uint64_t pt_idx = addr >> radeon_vm_block_size;
694 struct radeon_bo *pt = vm->page_tables[pt_idx].bo; 772 struct radeon_bo *pt = vm->page_tables[pt_idx].bo;
695 unsigned nptes; 773 unsigned nptes;
696 uint64_t pte; 774 uint64_t pte;
@@ -708,10 +786,9 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev,
708 if ((last_pte + 8 * count) != pte) { 786 if ((last_pte + 8 * count) != pte) {
709 787
710 if (count) { 788 if (count) {
711 radeon_asic_vm_set_page(rdev, ib, last_pte, 789 radeon_vm_frag_ptes(rdev, ib, last_pte,
712 last_dst, count, 790 last_pte + 8 * count,
713 RADEON_GPU_PAGE_SIZE, 791 last_dst, flags);
714 flags);
715 } 792 }
716 793
717 count = nptes; 794 count = nptes;
@@ -726,9 +803,9 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev,
726 } 803 }
727 804
728 if (count) { 805 if (count) {
729 radeon_asic_vm_set_page(rdev, ib, last_pte, 806 radeon_vm_frag_ptes(rdev, ib, last_pte,
730 last_dst, count, 807 last_pte + 8 * count,
731 RADEON_GPU_PAGE_SIZE, flags); 808 last_dst, flags);
732 } 809 }
733} 810}
734 811
@@ -796,13 +873,13 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
796 /* padding, etc. */ 873 /* padding, etc. */
797 ndw = 64; 874 ndw = 64;
798 875
799 if (RADEON_VM_BLOCK_SIZE > 11) 876 if (radeon_vm_block_size > 11)
800 /* reserve space for one header for every 2k dwords */ 877 /* reserve space for one header for every 2k dwords */
801 ndw += (nptes >> 11) * 4; 878 ndw += (nptes >> 11) * 4;
802 else 879 else
803 /* reserve space for one header for 880 /* reserve space for one header for
804 every (1 << BLOCK_SIZE) entries */ 881 every (1 << BLOCK_SIZE) entries */
805 ndw += (nptes >> RADEON_VM_BLOCK_SIZE) * 4; 882 ndw += (nptes >> radeon_vm_block_size) * 4;
806 883
807 /* reserve space for pte addresses */ 884 /* reserve space for pte addresses */
808 ndw += nptes * 2; 885 ndw += nptes * 2;
@@ -892,6 +969,8 @@ void radeon_vm_bo_invalidate(struct radeon_device *rdev,
892 */ 969 */
893int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm) 970int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
894{ 971{
972 const unsigned align = min(RADEON_VM_PTB_ALIGN_SIZE,
973 RADEON_VM_PTE_COUNT * 8);
895 unsigned pd_size, pd_entries, pts_size; 974 unsigned pd_size, pd_entries, pts_size;
896 int r; 975 int r;
897 976
@@ -913,7 +992,7 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
913 return -ENOMEM; 992 return -ENOMEM;
914 } 993 }
915 994
916 r = radeon_bo_create(rdev, pd_size, RADEON_VM_PTB_ALIGN_SIZE, false, 995 r = radeon_bo_create(rdev, pd_size, align, false,
917 RADEON_GEM_DOMAIN_VRAM, NULL, 996 RADEON_GEM_DOMAIN_VRAM, NULL,
918 &vm->page_directory); 997 &vm->page_directory);
919 if (r) 998 if (r)
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index 130d5cc50d43..a0f96decece3 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -212,21 +212,16 @@ void rs400_gart_fini(struct radeon_device *rdev)
212#define RS400_PTE_WRITEABLE (1 << 2) 212#define RS400_PTE_WRITEABLE (1 << 2)
213#define RS400_PTE_READABLE (1 << 3) 213#define RS400_PTE_READABLE (1 << 3)
214 214
215int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) 215void rs400_gart_set_page(struct radeon_device *rdev, unsigned i, uint64_t addr)
216{ 216{
217 uint32_t entry; 217 uint32_t entry;
218 u32 *gtt = rdev->gart.ptr; 218 u32 *gtt = rdev->gart.ptr;
219 219
220 if (i < 0 || i > rdev->gart.num_gpu_pages) {
221 return -EINVAL;
222 }
223
224 entry = (lower_32_bits(addr) & PAGE_MASK) | 220 entry = (lower_32_bits(addr) & PAGE_MASK) |
225 ((upper_32_bits(addr) & 0xff) << 4) | 221 ((upper_32_bits(addr) & 0xff) << 4) |
226 RS400_PTE_WRITEABLE | RS400_PTE_READABLE; 222 RS400_PTE_WRITEABLE | RS400_PTE_READABLE;
227 entry = cpu_to_le32(entry); 223 entry = cpu_to_le32(entry);
228 gtt[i] = entry; 224 gtt[i] = entry;
229 return 0;
230} 225}
231 226
232int rs400_mc_wait_for_idle(struct radeon_device *rdev) 227int rs400_mc_wait_for_idle(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 72d3616de08e..d1a35cb1c91d 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -109,19 +109,7 @@ void avivo_wait_for_vblank(struct radeon_device *rdev, int crtc)
109 } 109 }
110} 110}
111 111
112void rs600_pre_page_flip(struct radeon_device *rdev, int crtc) 112void rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
113{
114 /* enable the pflip int */
115 radeon_irq_kms_pflip_irq_get(rdev, crtc);
116}
117
118void rs600_post_page_flip(struct radeon_device *rdev, int crtc)
119{
120 /* disable the pflip int */
121 radeon_irq_kms_pflip_irq_put(rdev, crtc);
122}
123
124u32 rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
125{ 113{
126 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; 114 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
127 u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset); 115 u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset);
@@ -148,9 +136,15 @@ u32 rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
148 /* Unlock the lock, so double-buffering can take place inside vblank */ 136 /* Unlock the lock, so double-buffering can take place inside vblank */
149 tmp &= ~AVIVO_D1GRPH_UPDATE_LOCK; 137 tmp &= ~AVIVO_D1GRPH_UPDATE_LOCK;
150 WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp); 138 WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
139}
140
141bool rs600_page_flip_pending(struct radeon_device *rdev, int crtc_id)
142{
143 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
151 144
152 /* Return current update_pending status: */ 145 /* Return current update_pending status: */
153 return RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING; 146 return !!(RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) &
147 AVIVO_D1GRPH_SURFACE_UPDATE_PENDING);
154} 148}
155 149
156void avivo_program_fmt(struct drm_encoder *encoder) 150void avivo_program_fmt(struct drm_encoder *encoder)
@@ -632,24 +626,16 @@ static void rs600_gart_fini(struct radeon_device *rdev)
632 radeon_gart_table_vram_free(rdev); 626 radeon_gart_table_vram_free(rdev);
633} 627}
634 628
635#define R600_PTE_VALID (1 << 0) 629void rs600_gart_set_page(struct radeon_device *rdev, unsigned i, uint64_t addr)
636#define R600_PTE_SYSTEM (1 << 1)
637#define R600_PTE_SNOOPED (1 << 2)
638#define R600_PTE_READABLE (1 << 5)
639#define R600_PTE_WRITEABLE (1 << 6)
640
641int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
642{ 630{
643 void __iomem *ptr = (void *)rdev->gart.ptr; 631 void __iomem *ptr = (void *)rdev->gart.ptr;
644 632
645 if (i < 0 || i > rdev->gart.num_gpu_pages) {
646 return -EINVAL;
647 }
648 addr = addr & 0xFFFFFFFFFFFFF000ULL; 633 addr = addr & 0xFFFFFFFFFFFFF000ULL;
649 addr |= R600_PTE_VALID | R600_PTE_SYSTEM | R600_PTE_SNOOPED; 634 if (addr == rdev->dummy_page.addr)
650 addr |= R600_PTE_READABLE | R600_PTE_WRITEABLE; 635 addr |= R600_PTE_SYSTEM | R600_PTE_SNOOPED;
636 else
637 addr |= R600_PTE_GART;
651 writeq(addr, ptr + (i * 8)); 638 writeq(addr, ptr + (i * 8));
652 return 0;
653} 639}
654 640
655int rs600_irq_set(struct radeon_device *rdev) 641int rs600_irq_set(struct radeon_device *rdev)
@@ -787,7 +773,7 @@ int rs600_irq_process(struct radeon_device *rdev)
787 wake_up(&rdev->irq.vblank_queue); 773 wake_up(&rdev->irq.vblank_queue);
788 } 774 }
789 if (atomic_read(&rdev->irq.pflip[0])) 775 if (atomic_read(&rdev->irq.pflip[0]))
790 radeon_crtc_handle_flip(rdev, 0); 776 radeon_crtc_handle_vblank(rdev, 0);
791 } 777 }
792 if (G_007EDC_LB_D2_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { 778 if (G_007EDC_LB_D2_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
793 if (rdev->irq.crtc_vblank_int[1]) { 779 if (rdev->irq.crtc_vblank_int[1]) {
@@ -796,7 +782,7 @@ int rs600_irq_process(struct radeon_device *rdev)
796 wake_up(&rdev->irq.vblank_queue); 782 wake_up(&rdev->irq.vblank_queue);
797 } 783 }
798 if (atomic_read(&rdev->irq.pflip[1])) 784 if (atomic_read(&rdev->irq.pflip[1]))
799 radeon_crtc_handle_flip(rdev, 1); 785 radeon_crtc_handle_vblank(rdev, 1);
800 } 786 }
801 if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { 787 if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
802 queue_hotplug = true; 788 queue_hotplug = true;
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index fef310773aad..da8703d8d455 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -801,7 +801,7 @@ u32 rv770_get_xclk(struct radeon_device *rdev)
801 return reference_clock; 801 return reference_clock;
802} 802}
803 803
804u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) 804void rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
805{ 805{
806 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; 806 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
807 u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset); 807 u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset);
@@ -835,9 +835,15 @@ u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
835 /* Unlock the lock, so double-buffering can take place inside vblank */ 835 /* Unlock the lock, so double-buffering can take place inside vblank */
836 tmp &= ~AVIVO_D1GRPH_UPDATE_LOCK; 836 tmp &= ~AVIVO_D1GRPH_UPDATE_LOCK;
837 WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp); 837 WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
838}
839
840bool rv770_page_flip_pending(struct radeon_device *rdev, int crtc_id)
841{
842 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
838 843
839 /* Return current update_pending status: */ 844 /* Return current update_pending status: */
840 return RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING; 845 return !!(RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) &
846 AVIVO_D1GRPH_SURFACE_UPDATE_PENDING);
841} 847}
842 848
843/* get temperature in millidegrees */ 849/* get temperature in millidegrees */
@@ -1321,6 +1327,9 @@ static void rv770_gpu_init(struct radeon_device *rdev)
1321 if (tmp < rdev->config.rv770.max_simds) { 1327 if (tmp < rdev->config.rv770.max_simds) {
1322 rdev->config.rv770.max_simds = tmp; 1328 rdev->config.rv770.max_simds = tmp;
1323 } 1329 }
1330 tmp = rdev->config.rv770.max_simds -
1331 r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R7XX_MAX_SIMDS_MASK);
1332 rdev->config.rv770.active_simds = tmp;
1324 1333
1325 switch (rdev->config.rv770.max_tile_pipes) { 1334 switch (rdev->config.rv770.max_tile_pipes) {
1326 case 1: 1335 case 1:
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 22a63c98ba14..730cee2c34cf 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -71,6 +71,7 @@ MODULE_FIRMWARE("radeon/HAINAN_mc2.bin");
71MODULE_FIRMWARE("radeon/HAINAN_rlc.bin"); 71MODULE_FIRMWARE("radeon/HAINAN_rlc.bin");
72MODULE_FIRMWARE("radeon/HAINAN_smc.bin"); 72MODULE_FIRMWARE("radeon/HAINAN_smc.bin");
73 73
74static u32 si_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh);
74static void si_pcie_gen3_enable(struct radeon_device *rdev); 75static void si_pcie_gen3_enable(struct radeon_device *rdev);
75static void si_program_aspm(struct radeon_device *rdev); 76static void si_program_aspm(struct radeon_device *rdev);
76extern void sumo_rlc_fini(struct radeon_device *rdev); 77extern void sumo_rlc_fini(struct radeon_device *rdev);
@@ -2900,7 +2901,7 @@ static void si_gpu_init(struct radeon_device *rdev)
2900 u32 sx_debug_1; 2901 u32 sx_debug_1;
2901 u32 hdp_host_path_cntl; 2902 u32 hdp_host_path_cntl;
2902 u32 tmp; 2903 u32 tmp;
2903 int i, j; 2904 int i, j, k;
2904 2905
2905 switch (rdev->family) { 2906 switch (rdev->family) {
2906 case CHIP_TAHITI: 2907 case CHIP_TAHITI:
@@ -3098,6 +3099,14 @@ static void si_gpu_init(struct radeon_device *rdev)
3098 rdev->config.si.max_sh_per_se, 3099 rdev->config.si.max_sh_per_se,
3099 rdev->config.si.max_cu_per_sh); 3100 rdev->config.si.max_cu_per_sh);
3100 3101
3102 for (i = 0; i < rdev->config.si.max_shader_engines; i++) {
3103 for (j = 0; j < rdev->config.si.max_sh_per_se; j++) {
3104 for (k = 0; k < rdev->config.si.max_cu_per_sh; k++) {
3105 rdev->config.si.active_cus +=
3106 hweight32(si_get_cu_active_bitmap(rdev, i, j));
3107 }
3108 }
3109 }
3101 3110
3102 /* set HW defaults for 3D engine */ 3111 /* set HW defaults for 3D engine */
3103 WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) | 3112 WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) |
@@ -3186,7 +3195,7 @@ void si_fence_ring_emit(struct radeon_device *rdev,
3186 /* EVENT_WRITE_EOP - flush caches, send int */ 3195 /* EVENT_WRITE_EOP - flush caches, send int */
3187 radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4)); 3196 radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
3188 radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | EVENT_INDEX(5)); 3197 radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | EVENT_INDEX(5));
3189 radeon_ring_write(ring, addr & 0xffffffff); 3198 radeon_ring_write(ring, lower_32_bits(addr));
3190 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2)); 3199 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
3191 radeon_ring_write(ring, fence->seq); 3200 radeon_ring_write(ring, fence->seq);
3192 radeon_ring_write(ring, 0); 3201 radeon_ring_write(ring, 0);
@@ -3219,7 +3228,7 @@ void si_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
3219 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 3228 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
3220 radeon_ring_write(ring, (1 << 8)); 3229 radeon_ring_write(ring, (1 << 8));
3221 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); 3230 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
3222 radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff); 3231 radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr));
3223 radeon_ring_write(ring, next_rptr); 3232 radeon_ring_write(ring, next_rptr);
3224 } 3233 }
3225 3234
@@ -4044,18 +4053,21 @@ static int si_pcie_gart_enable(struct radeon_device *rdev)
4044 WREG32(MC_VM_MX_L1_TLB_CNTL, 4053 WREG32(MC_VM_MX_L1_TLB_CNTL,
4045 (0xA << 7) | 4054 (0xA << 7) |
4046 ENABLE_L1_TLB | 4055 ENABLE_L1_TLB |
4056 ENABLE_L1_FRAGMENT_PROCESSING |
4047 SYSTEM_ACCESS_MODE_NOT_IN_SYS | 4057 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
4048 ENABLE_ADVANCED_DRIVER_MODEL | 4058 ENABLE_ADVANCED_DRIVER_MODEL |
4049 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU); 4059 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
4050 /* Setup L2 cache */ 4060 /* Setup L2 cache */
4051 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | 4061 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE |
4062 ENABLE_L2_FRAGMENT_PROCESSING |
4052 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE | 4063 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
4053 ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE | 4064 ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
4054 EFFECTIVE_L2_QUEUE_SIZE(7) | 4065 EFFECTIVE_L2_QUEUE_SIZE(7) |
4055 CONTEXT1_IDENTITY_ACCESS_MODE(1)); 4066 CONTEXT1_IDENTITY_ACCESS_MODE(1));
4056 WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE); 4067 WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE);
4057 WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY | 4068 WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
4058 L2_CACHE_BIGK_FRAGMENT_SIZE(0)); 4069 BANK_SELECT(4) |
4070 L2_CACHE_BIGK_FRAGMENT_SIZE(4));
4059 /* setup context0 */ 4071 /* setup context0 */
4060 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); 4072 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
4061 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12); 4073 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
@@ -4092,6 +4104,7 @@ static int si_pcie_gart_enable(struct radeon_device *rdev)
4092 (u32)(rdev->dummy_page.addr >> 12)); 4104 (u32)(rdev->dummy_page.addr >> 12));
4093 WREG32(VM_CONTEXT1_CNTL2, 4); 4105 WREG32(VM_CONTEXT1_CNTL2, 4);
4094 WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) | 4106 WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
4107 PAGE_TABLE_BLOCK_SIZE(radeon_vm_block_size - 9) |
4095 RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT | 4108 RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
4096 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT | 4109 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT |
4097 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT | 4110 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
@@ -6151,7 +6164,7 @@ restart_ih:
6151 wake_up(&rdev->irq.vblank_queue); 6164 wake_up(&rdev->irq.vblank_queue);
6152 } 6165 }
6153 if (atomic_read(&rdev->irq.pflip[0])) 6166 if (atomic_read(&rdev->irq.pflip[0]))
6154 radeon_crtc_handle_flip(rdev, 0); 6167 radeon_crtc_handle_vblank(rdev, 0);
6155 rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT; 6168 rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
6156 DRM_DEBUG("IH: D1 vblank\n"); 6169 DRM_DEBUG("IH: D1 vblank\n");
6157 } 6170 }
@@ -6177,7 +6190,7 @@ restart_ih:
6177 wake_up(&rdev->irq.vblank_queue); 6190 wake_up(&rdev->irq.vblank_queue);
6178 } 6191 }
6179 if (atomic_read(&rdev->irq.pflip[1])) 6192 if (atomic_read(&rdev->irq.pflip[1]))
6180 radeon_crtc_handle_flip(rdev, 1); 6193 radeon_crtc_handle_vblank(rdev, 1);
6181 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT; 6194 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
6182 DRM_DEBUG("IH: D2 vblank\n"); 6195 DRM_DEBUG("IH: D2 vblank\n");
6183 } 6196 }
@@ -6203,7 +6216,7 @@ restart_ih:
6203 wake_up(&rdev->irq.vblank_queue); 6216 wake_up(&rdev->irq.vblank_queue);
6204 } 6217 }
6205 if (atomic_read(&rdev->irq.pflip[2])) 6218 if (atomic_read(&rdev->irq.pflip[2]))
6206 radeon_crtc_handle_flip(rdev, 2); 6219 radeon_crtc_handle_vblank(rdev, 2);
6207 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT; 6220 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
6208 DRM_DEBUG("IH: D3 vblank\n"); 6221 DRM_DEBUG("IH: D3 vblank\n");
6209 } 6222 }
@@ -6229,7 +6242,7 @@ restart_ih:
6229 wake_up(&rdev->irq.vblank_queue); 6242 wake_up(&rdev->irq.vblank_queue);
6230 } 6243 }
6231 if (atomic_read(&rdev->irq.pflip[3])) 6244 if (atomic_read(&rdev->irq.pflip[3]))
6232 radeon_crtc_handle_flip(rdev, 3); 6245 radeon_crtc_handle_vblank(rdev, 3);
6233 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT; 6246 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
6234 DRM_DEBUG("IH: D4 vblank\n"); 6247 DRM_DEBUG("IH: D4 vblank\n");
6235 } 6248 }
@@ -6255,7 +6268,7 @@ restart_ih:
6255 wake_up(&rdev->irq.vblank_queue); 6268 wake_up(&rdev->irq.vblank_queue);
6256 } 6269 }
6257 if (atomic_read(&rdev->irq.pflip[4])) 6270 if (atomic_read(&rdev->irq.pflip[4]))
6258 radeon_crtc_handle_flip(rdev, 4); 6271 radeon_crtc_handle_vblank(rdev, 4);
6259 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT; 6272 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
6260 DRM_DEBUG("IH: D5 vblank\n"); 6273 DRM_DEBUG("IH: D5 vblank\n");
6261 } 6274 }
@@ -6281,7 +6294,7 @@ restart_ih:
6281 wake_up(&rdev->irq.vblank_queue); 6294 wake_up(&rdev->irq.vblank_queue);
6282 } 6295 }
6283 if (atomic_read(&rdev->irq.pflip[5])) 6296 if (atomic_read(&rdev->irq.pflip[5]))
6284 radeon_crtc_handle_flip(rdev, 5); 6297 radeon_crtc_handle_vblank(rdev, 5);
6285 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT; 6298 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
6286 DRM_DEBUG("IH: D6 vblank\n"); 6299 DRM_DEBUG("IH: D6 vblank\n");
6287 } 6300 }
diff --git a/drivers/gpu/drm/radeon/si_dma.c b/drivers/gpu/drm/radeon/si_dma.c
index de0ca070122f..e24c94b6d14d 100644
--- a/drivers/gpu/drm/radeon/si_dma.c
+++ b/drivers/gpu/drm/radeon/si_dma.c
@@ -79,7 +79,25 @@ void si_dma_vm_set_page(struct radeon_device *rdev,
79 79
80 trace_radeon_vm_set_page(pe, addr, count, incr, flags); 80 trace_radeon_vm_set_page(pe, addr, count, incr, flags);
81 81
82 if (flags & R600_PTE_SYSTEM) { 82 if (flags == R600_PTE_GART) {
83 uint64_t src = rdev->gart.table_addr + (addr >> 12) * 8;
84 while (count) {
85 unsigned bytes = count * 8;
86 if (bytes > 0xFFFF8)
87 bytes = 0xFFFF8;
88
89 ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY,
90 1, 0, 0, bytes);
91 ib->ptr[ib->length_dw++] = lower_32_bits(pe);
92 ib->ptr[ib->length_dw++] = lower_32_bits(src);
93 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
94 ib->ptr[ib->length_dw++] = upper_32_bits(src) & 0xff;
95
96 pe += bytes;
97 src += bytes;
98 count -= bytes / 8;
99 }
100 } else if (flags & R600_PTE_SYSTEM) {
83 while (count) { 101 while (count) {
84 ndw = count * 2; 102 ndw = count * 2;
85 if (ndw > 0xFFFFE) 103 if (ndw > 0xFFFFE)
@@ -202,8 +220,8 @@ int si_copy_dma(struct radeon_device *rdev,
202 cur_size_in_bytes = 0xFFFFF; 220 cur_size_in_bytes = 0xFFFFF;
203 size_in_bytes -= cur_size_in_bytes; 221 size_in_bytes -= cur_size_in_bytes;
204 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 1, 0, 0, cur_size_in_bytes)); 222 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 1, 0, 0, cur_size_in_bytes));
205 radeon_ring_write(ring, dst_offset & 0xffffffff); 223 radeon_ring_write(ring, lower_32_bits(dst_offset));
206 radeon_ring_write(ring, src_offset & 0xffffffff); 224 radeon_ring_write(ring, lower_32_bits(src_offset));
207 radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff); 225 radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
208 radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff); 226 radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
209 src_offset += cur_size_in_bytes; 227 src_offset += cur_size_in_bytes;
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 9a3567bedaae..58918868f894 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -1948,6 +1948,10 @@ static void si_initialize_powertune_defaults(struct radeon_device *rdev)
1948 si_pi->cac_weights = cac_weights_cape_verde_pro; 1948 si_pi->cac_weights = cac_weights_cape_verde_pro;
1949 si_pi->dte_data = dte_data_cape_verde; 1949 si_pi->dte_data = dte_data_cape_verde;
1950 break; 1950 break;
1951 case 0x682C:
1952 si_pi->cac_weights = cac_weights_cape_verde_pro;
1953 si_pi->dte_data = dte_data_sun_xt;
1954 break;
1951 case 0x6825: 1955 case 0x6825:
1952 case 0x6827: 1956 case 0x6827:
1953 si_pi->cac_weights = cac_weights_heathrow; 1957 si_pi->cac_weights = cac_weights_heathrow;
@@ -1971,10 +1975,9 @@ static void si_initialize_powertune_defaults(struct radeon_device *rdev)
1971 si_pi->dte_data = dte_data_venus_xt; 1975 si_pi->dte_data = dte_data_venus_xt;
1972 break; 1976 break;
1973 case 0x6823: 1977 case 0x6823:
1974 si_pi->cac_weights = cac_weights_chelsea_pro;
1975 si_pi->dte_data = dte_data_venus_pro;
1976 break;
1977 case 0x682B: 1978 case 0x682B:
1979 case 0x6822:
1980 case 0x682A:
1978 si_pi->cac_weights = cac_weights_chelsea_pro; 1981 si_pi->cac_weights = cac_weights_chelsea_pro;
1979 si_pi->dte_data = dte_data_venus_pro; 1982 si_pi->dte_data = dte_data_venus_pro;
1980 break; 1983 break;
@@ -1988,6 +1991,7 @@ static void si_initialize_powertune_defaults(struct radeon_device *rdev)
1988 case 0x6601: 1991 case 0x6601:
1989 case 0x6621: 1992 case 0x6621:
1990 case 0x6603: 1993 case 0x6603:
1994 case 0x6605:
1991 si_pi->cac_weights = cac_weights_mars_pro; 1995 si_pi->cac_weights = cac_weights_mars_pro;
1992 si_pi->lcac_config = lcac_mars_pro; 1996 si_pi->lcac_config = lcac_mars_pro;
1993 si_pi->cac_override = cac_override_oland; 1997 si_pi->cac_override = cac_override_oland;
@@ -1998,6 +2002,7 @@ static void si_initialize_powertune_defaults(struct radeon_device *rdev)
1998 case 0x6600: 2002 case 0x6600:
1999 case 0x6606: 2003 case 0x6606:
2000 case 0x6620: 2004 case 0x6620:
2005 case 0x6604:
2001 si_pi->cac_weights = cac_weights_mars_xt; 2006 si_pi->cac_weights = cac_weights_mars_xt;
2002 si_pi->lcac_config = lcac_mars_pro; 2007 si_pi->lcac_config = lcac_mars_pro;
2003 si_pi->cac_override = cac_override_oland; 2008 si_pi->cac_override = cac_override_oland;
@@ -2006,6 +2011,8 @@ static void si_initialize_powertune_defaults(struct radeon_device *rdev)
2006 update_dte_from_pl2 = true; 2011 update_dte_from_pl2 = true;
2007 break; 2012 break;
2008 case 0x6611: 2013 case 0x6611:
2014 case 0x6613:
2015 case 0x6608:
2009 si_pi->cac_weights = cac_weights_oland_pro; 2016 si_pi->cac_weights = cac_weights_oland_pro;
2010 si_pi->lcac_config = lcac_mars_pro; 2017 si_pi->lcac_config = lcac_mars_pro;
2011 si_pi->cac_override = cac_override_oland; 2018 si_pi->cac_override = cac_override_oland;
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index 7321283602ce..fd414d34d885 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -362,6 +362,7 @@
362#define READ_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 16) 362#define READ_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 16)
363#define WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 18) 363#define WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 18)
364#define WRITE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 19) 364#define WRITE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 19)
365#define PAGE_TABLE_BLOCK_SIZE(x) (((x) & 0xF) << 24)
365#define VM_CONTEXT1_CNTL 0x1414 366#define VM_CONTEXT1_CNTL 0x1414
366#define VM_CONTEXT0_CNTL2 0x1430 367#define VM_CONTEXT0_CNTL2 0x1430
367#define VM_CONTEXT1_CNTL2 0x1434 368#define VM_CONTEXT1_CNTL2 0x1434
diff --git a/drivers/gpu/drm/radeon/uvd_v2_2.c b/drivers/gpu/drm/radeon/uvd_v2_2.c
index d1771004cb52..8bfdadd56598 100644
--- a/drivers/gpu/drm/radeon/uvd_v2_2.c
+++ b/drivers/gpu/drm/radeon/uvd_v2_2.c
@@ -45,7 +45,7 @@ void uvd_v2_2_fence_emit(struct radeon_device *rdev,
45 radeon_ring_write(ring, PACKET0(UVD_CONTEXT_ID, 0)); 45 radeon_ring_write(ring, PACKET0(UVD_CONTEXT_ID, 0));
46 radeon_ring_write(ring, fence->seq); 46 radeon_ring_write(ring, fence->seq);
47 radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA0, 0)); 47 radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA0, 0));
48 radeon_ring_write(ring, addr & 0xffffffff); 48 radeon_ring_write(ring, lower_32_bits(addr));
49 radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA1, 0)); 49 radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA1, 0));
50 radeon_ring_write(ring, upper_32_bits(addr) & 0xff); 50 radeon_ring_write(ring, upper_32_bits(addr) & 0xff);
51 radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_CMD, 0)); 51 radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_CMD, 0));
diff --git a/drivers/gpu/drm/rcar-du/Kconfig b/drivers/gpu/drm/rcar-du/Kconfig
index d8e835ac2c5e..2e3d7b5b0ad7 100644
--- a/drivers/gpu/drm/rcar-du/Kconfig
+++ b/drivers/gpu/drm/rcar-du/Kconfig
@@ -1,6 +1,7 @@
1config DRM_RCAR_DU 1config DRM_RCAR_DU
2 tristate "DRM Support for R-Car Display Unit" 2 tristate "DRM Support for R-Car Display Unit"
3 depends on DRM && ARM 3 depends on DRM && ARM
4 depends on ARCH_SHMOBILE || COMPILE_TEST
4 select DRM_KMS_HELPER 5 select DRM_KMS_HELPER
5 select DRM_KMS_CMA_HELPER 6 select DRM_KMS_CMA_HELPER
6 select DRM_GEM_CMA_HELPER 7 select DRM_GEM_CMA_HELPER
@@ -12,6 +13,7 @@ config DRM_RCAR_DU
12config DRM_RCAR_LVDS 13config DRM_RCAR_LVDS
13 bool "R-Car DU LVDS Encoder Support" 14 bool "R-Car DU LVDS Encoder Support"
14 depends on DRM_RCAR_DU 15 depends on DRM_RCAR_DU
16 depends on ARCH_R8A7790 || ARCH_R8A7791 || COMPILE_TEST
15 help 17 help
16 Enable support the R-Car Display Unit embedded LVDS encoders 18 Enable support the R-Car Display Unit embedded LVDS encoders
17 (currently only on R8A7790). 19 (currently only on R8A7790).
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
index 4f3ba93cd91d..289048d1c7b2 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
@@ -57,15 +57,8 @@ static int rcar_du_lvds_connector_get_modes(struct drm_connector *connector)
57 return 1; 57 return 1;
58} 58}
59 59
60static int rcar_du_lvds_connector_mode_valid(struct drm_connector *connector,
61 struct drm_display_mode *mode)
62{
63 return MODE_OK;
64}
65
66static const struct drm_connector_helper_funcs connector_helper_funcs = { 60static const struct drm_connector_helper_funcs connector_helper_funcs = {
67 .get_modes = rcar_du_lvds_connector_get_modes, 61 .get_modes = rcar_du_lvds_connector_get_modes,
68 .mode_valid = rcar_du_lvds_connector_mode_valid,
69 .best_encoder = rcar_du_connector_best_encoder, 62 .best_encoder = rcar_du_connector_best_encoder,
70}; 63};
71 64
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c
index 41d563adfeaa..ccfe64c7188f 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c
@@ -25,15 +25,8 @@ static int rcar_du_vga_connector_get_modes(struct drm_connector *connector)
25 return 0; 25 return 0;
26} 26}
27 27
28static int rcar_du_vga_connector_mode_valid(struct drm_connector *connector,
29 struct drm_display_mode *mode)
30{
31 return MODE_OK;
32}
33
34static const struct drm_connector_helper_funcs connector_helper_funcs = { 28static const struct drm_connector_helper_funcs connector_helper_funcs = {
35 .get_modes = rcar_du_vga_connector_get_modes, 29 .get_modes = rcar_du_vga_connector_get_modes,
36 .mode_valid = rcar_du_vga_connector_mode_valid,
37 .best_encoder = rcar_du_connector_best_encoder, 30 .best_encoder = rcar_du_connector_best_encoder,
38}; 31};
39 32
diff --git a/drivers/gpu/drm/savage/savage_bci.c b/drivers/gpu/drm/savage/savage_bci.c
index d2b2df9e26f3..c97cdc9ab239 100644
--- a/drivers/gpu/drm/savage/savage_bci.c
+++ b/drivers/gpu/drm/savage/savage_bci.c
@@ -1079,4 +1079,4 @@ const struct drm_ioctl_desc savage_ioctls[] = {
1079 DRM_IOCTL_DEF_DRV(SAVAGE_BCI_EVENT_WAIT, savage_bci_event_wait, DRM_AUTH), 1079 DRM_IOCTL_DEF_DRV(SAVAGE_BCI_EVENT_WAIT, savage_bci_event_wait, DRM_AUTH),
1080}; 1080};
1081 1081
1082int savage_max_ioctl = DRM_ARRAY_SIZE(savage_ioctls); 1082int savage_max_ioctl = ARRAY_SIZE(savage_ioctls);
diff --git a/drivers/gpu/drm/shmobile/Kconfig b/drivers/gpu/drm/shmobile/Kconfig
index 2ee44ca9d67f..a50fe0eeaa0d 100644
--- a/drivers/gpu/drm/shmobile/Kconfig
+++ b/drivers/gpu/drm/shmobile/Kconfig
@@ -1,6 +1,7 @@
1config DRM_SHMOBILE 1config DRM_SHMOBILE
2 tristate "DRM Support for SH Mobile" 2 tristate "DRM Support for SH Mobile"
3 depends on DRM && (ARM || SUPERH) 3 depends on DRM && ARM
4 depends on ARCH_SHMOBILE || COMPILE_TEST
4 select BACKLIGHT_CLASS_DEVICE 5 select BACKLIGHT_CLASS_DEVICE
5 select DRM_KMS_HELPER 6 select DRM_KMS_HELPER
6 select DRM_KMS_FB_HELPER 7 select DRM_KMS_FB_HELPER
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
index e9e5e6d368cc..faf176b2daf9 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
@@ -674,12 +674,6 @@ static int shmob_drm_connector_get_modes(struct drm_connector *connector)
674 return 1; 674 return 1;
675} 675}
676 676
677static int shmob_drm_connector_mode_valid(struct drm_connector *connector,
678 struct drm_display_mode *mode)
679{
680 return MODE_OK;
681}
682
683static struct drm_encoder * 677static struct drm_encoder *
684shmob_drm_connector_best_encoder(struct drm_connector *connector) 678shmob_drm_connector_best_encoder(struct drm_connector *connector)
685{ 679{
@@ -690,7 +684,6 @@ shmob_drm_connector_best_encoder(struct drm_connector *connector)
690 684
691static const struct drm_connector_helper_funcs connector_helper_funcs = { 685static const struct drm_connector_helper_funcs connector_helper_funcs = {
692 .get_modes = shmob_drm_connector_get_modes, 686 .get_modes = shmob_drm_connector_get_modes,
693 .mode_valid = shmob_drm_connector_mode_valid,
694 .best_encoder = shmob_drm_connector_best_encoder, 687 .best_encoder = shmob_drm_connector_best_encoder,
695}; 688};
696 689
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
index c839c9c89efb..82c84c7fd4f6 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_drv.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
@@ -185,7 +185,7 @@ static int shmob_drm_load(struct drm_device *dev, unsigned long flags)
185 goto done; 185 goto done;
186 } 186 }
187 187
188 ret = drm_irq_install(dev); 188 ret = drm_irq_install(dev, platform_get_irq(dev->platformdev, 0));
189 if (ret < 0) { 189 if (ret < 0) {
190 dev_err(&pdev->dev, "failed to install IRQ handler\n"); 190 dev_err(&pdev->dev, "failed to install IRQ handler\n");
191 goto done; 191 goto done;
diff --git a/drivers/gpu/drm/sis/sis_mm.c b/drivers/gpu/drm/sis/sis_mm.c
index 0573be0d2933..77f288e4a0a6 100644
--- a/drivers/gpu/drm/sis/sis_mm.c
+++ b/drivers/gpu/drm/sis/sis_mm.c
@@ -359,4 +359,4 @@ const struct drm_ioctl_desc sis_ioctls[] = {
359 DRM_IOCTL_DEF_DRV(SIS_FB_INIT, sis_fb_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY), 359 DRM_IOCTL_DEF_DRV(SIS_FB_INIT, sis_fb_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY),
360}; 360};
361 361
362int sis_max_ioctl = DRM_ARRAY_SIZE(sis_ioctls); 362int sis_max_ioctl = ARRAY_SIZE(sis_ioctls);
diff --git a/drivers/gpu/drm/tegra/Makefile b/drivers/gpu/drm/tegra/Makefile
index d43f21bb4596..2c66a8db9da4 100644
--- a/drivers/gpu/drm/tegra/Makefile
+++ b/drivers/gpu/drm/tegra/Makefile
@@ -1,7 +1,6 @@
1ccflags-$(CONFIG_DRM_TEGRA_DEBUG) += -DDEBUG 1ccflags-$(CONFIG_DRM_TEGRA_DEBUG) += -DDEBUG
2 2
3tegra-drm-y := \ 3tegra-drm-y := \
4 bus.o \
5 drm.o \ 4 drm.o \
6 gem.o \ 5 gem.o \
7 fb.o \ 6 fb.o \
diff --git a/drivers/gpu/drm/tegra/bus.c b/drivers/gpu/drm/tegra/bus.c
deleted file mode 100644
index 71cef5c13dc8..000000000000
--- a/drivers/gpu/drm/tegra/bus.c
+++ /dev/null
@@ -1,75 +0,0 @@
1/*
2 * Copyright (C) 2013 NVIDIA Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#include "drm.h"
10
11static int drm_host1x_set_busid(struct drm_device *dev,
12 struct drm_master *master)
13{
14 const char *device = dev_name(dev->dev);
15 const char *driver = dev->driver->name;
16 const char *bus = dev->dev->bus->name;
17 int length;
18
19 master->unique_len = strlen(bus) + 1 + strlen(device);
20 master->unique_size = master->unique_len;
21
22 master->unique = kmalloc(master->unique_len + 1, GFP_KERNEL);
23 if (!master->unique)
24 return -ENOMEM;
25
26 snprintf(master->unique, master->unique_len + 1, "%s:%s", bus, device);
27
28 length = strlen(driver) + 1 + master->unique_len;
29
30 dev->devname = kmalloc(length + 1, GFP_KERNEL);
31 if (!dev->devname)
32 return -ENOMEM;
33
34 snprintf(dev->devname, length + 1, "%s@%s", driver, master->unique);
35
36 return 0;
37}
38
39static struct drm_bus drm_host1x_bus = {
40 .bus_type = DRIVER_BUS_HOST1X,
41 .set_busid = drm_host1x_set_busid,
42};
43
44int drm_host1x_init(struct drm_driver *driver, struct host1x_device *device)
45{
46 struct drm_device *drm;
47 int ret;
48
49 driver->bus = &drm_host1x_bus;
50
51 drm = drm_dev_alloc(driver, &device->dev);
52 if (!drm)
53 return -ENOMEM;
54
55 ret = drm_dev_register(drm, 0);
56 if (ret)
57 goto err_free;
58
59 DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n", driver->name,
60 driver->major, driver->minor, driver->patchlevel,
61 driver->date, drm->primary->index);
62
63 return 0;
64
65err_free:
66 drm_dev_unref(drm);
67 return ret;
68}
69
70void drm_host1x_exit(struct drm_driver *driver, struct host1x_device *device)
71{
72 struct tegra_drm *tegra = dev_get_drvdata(&device->dev);
73
74 drm_put_dev(tegra->drm);
75}
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index edb871d7d395..ef40381f3909 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -17,6 +17,7 @@
17 17
18struct tegra_dc_soc_info { 18struct tegra_dc_soc_info {
19 bool supports_interlacing; 19 bool supports_interlacing;
20 bool supports_cursor;
20}; 21};
21 22
22struct tegra_plane { 23struct tegra_plane {
@@ -29,6 +30,254 @@ static inline struct tegra_plane *to_tegra_plane(struct drm_plane *plane)
29 return container_of(plane, struct tegra_plane, base); 30 return container_of(plane, struct tegra_plane, base);
30} 31}
31 32
33static unsigned int tegra_dc_format(uint32_t format, uint32_t *swap)
34{
35 /* assume no swapping of fetched data */
36 if (swap)
37 *swap = BYTE_SWAP_NOSWAP;
38
39 switch (format) {
40 case DRM_FORMAT_XBGR8888:
41 return WIN_COLOR_DEPTH_R8G8B8A8;
42
43 case DRM_FORMAT_XRGB8888:
44 return WIN_COLOR_DEPTH_B8G8R8A8;
45
46 case DRM_FORMAT_RGB565:
47 return WIN_COLOR_DEPTH_B5G6R5;
48
49 case DRM_FORMAT_UYVY:
50 return WIN_COLOR_DEPTH_YCbCr422;
51
52 case DRM_FORMAT_YUYV:
53 if (swap)
54 *swap = BYTE_SWAP_SWAP2;
55
56 return WIN_COLOR_DEPTH_YCbCr422;
57
58 case DRM_FORMAT_YUV420:
59 return WIN_COLOR_DEPTH_YCbCr420P;
60
61 case DRM_FORMAT_YUV422:
62 return WIN_COLOR_DEPTH_YCbCr422P;
63
64 default:
65 break;
66 }
67
68 WARN(1, "unsupported pixel format %u, using default\n", format);
69 return WIN_COLOR_DEPTH_B8G8R8A8;
70}
71
72static bool tegra_dc_format_is_yuv(unsigned int format, bool *planar)
73{
74 switch (format) {
75 case WIN_COLOR_DEPTH_YCbCr422:
76 case WIN_COLOR_DEPTH_YUV422:
77 if (planar)
78 *planar = false;
79
80 return true;
81
82 case WIN_COLOR_DEPTH_YCbCr420P:
83 case WIN_COLOR_DEPTH_YUV420P:
84 case WIN_COLOR_DEPTH_YCbCr422P:
85 case WIN_COLOR_DEPTH_YUV422P:
86 case WIN_COLOR_DEPTH_YCbCr422R:
87 case WIN_COLOR_DEPTH_YUV422R:
88 case WIN_COLOR_DEPTH_YCbCr422RA:
89 case WIN_COLOR_DEPTH_YUV422RA:
90 if (planar)
91 *planar = true;
92
93 return true;
94 }
95
96 return false;
97}
98
99static inline u32 compute_dda_inc(unsigned int in, unsigned int out, bool v,
100 unsigned int bpp)
101{
102 fixed20_12 outf = dfixed_init(out);
103 fixed20_12 inf = dfixed_init(in);
104 u32 dda_inc;
105 int max;
106
107 if (v)
108 max = 15;
109 else {
110 switch (bpp) {
111 case 2:
112 max = 8;
113 break;
114
115 default:
116 WARN_ON_ONCE(1);
117 /* fallthrough */
118 case 4:
119 max = 4;
120 break;
121 }
122 }
123
124 outf.full = max_t(u32, outf.full - dfixed_const(1), dfixed_const(1));
125 inf.full -= dfixed_const(1);
126
127 dda_inc = dfixed_div(inf, outf);
128 dda_inc = min_t(u32, dda_inc, dfixed_const(max));
129
130 return dda_inc;
131}
132
133static inline u32 compute_initial_dda(unsigned int in)
134{
135 fixed20_12 inf = dfixed_init(in);
136 return dfixed_frac(inf);
137}
138
139static int tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index,
140 const struct tegra_dc_window *window)
141{
142 unsigned h_offset, v_offset, h_size, v_size, h_dda, v_dda, bpp;
143 unsigned long value;
144 bool yuv, planar;
145
146 /*
147 * For YUV planar modes, the number of bytes per pixel takes into
148 * account only the luma component and therefore is 1.
149 */
150 yuv = tegra_dc_format_is_yuv(window->format, &planar);
151 if (!yuv)
152 bpp = window->bits_per_pixel / 8;
153 else
154 bpp = planar ? 1 : 2;
155
156 value = WINDOW_A_SELECT << index;
157 tegra_dc_writel(dc, value, DC_CMD_DISPLAY_WINDOW_HEADER);
158
159 tegra_dc_writel(dc, window->format, DC_WIN_COLOR_DEPTH);
160 tegra_dc_writel(dc, window->swap, DC_WIN_BYTE_SWAP);
161
162 value = V_POSITION(window->dst.y) | H_POSITION(window->dst.x);
163 tegra_dc_writel(dc, value, DC_WIN_POSITION);
164
165 value = V_SIZE(window->dst.h) | H_SIZE(window->dst.w);
166 tegra_dc_writel(dc, value, DC_WIN_SIZE);
167
168 h_offset = window->src.x * bpp;
169 v_offset = window->src.y;
170 h_size = window->src.w * bpp;
171 v_size = window->src.h;
172
173 value = V_PRESCALED_SIZE(v_size) | H_PRESCALED_SIZE(h_size);
174 tegra_dc_writel(dc, value, DC_WIN_PRESCALED_SIZE);
175
176 /*
177 * For DDA computations the number of bytes per pixel for YUV planar
178 * modes needs to take into account all Y, U and V components.
179 */
180 if (yuv && planar)
181 bpp = 2;
182
183 h_dda = compute_dda_inc(window->src.w, window->dst.w, false, bpp);
184 v_dda = compute_dda_inc(window->src.h, window->dst.h, true, bpp);
185
186 value = V_DDA_INC(v_dda) | H_DDA_INC(h_dda);
187 tegra_dc_writel(dc, value, DC_WIN_DDA_INC);
188
189 h_dda = compute_initial_dda(window->src.x);
190 v_dda = compute_initial_dda(window->src.y);
191
192 tegra_dc_writel(dc, h_dda, DC_WIN_H_INITIAL_DDA);
193 tegra_dc_writel(dc, v_dda, DC_WIN_V_INITIAL_DDA);
194
195 tegra_dc_writel(dc, 0, DC_WIN_UV_BUF_STRIDE);
196 tegra_dc_writel(dc, 0, DC_WIN_BUF_STRIDE);
197
198 tegra_dc_writel(dc, window->base[0], DC_WINBUF_START_ADDR);
199
200 if (yuv && planar) {
201 tegra_dc_writel(dc, window->base[1], DC_WINBUF_START_ADDR_U);
202 tegra_dc_writel(dc, window->base[2], DC_WINBUF_START_ADDR_V);
203 value = window->stride[1] << 16 | window->stride[0];
204 tegra_dc_writel(dc, value, DC_WIN_LINE_STRIDE);
205 } else {
206 tegra_dc_writel(dc, window->stride[0], DC_WIN_LINE_STRIDE);
207 }
208
209 if (window->bottom_up)
210 v_offset += window->src.h - 1;
211
212 tegra_dc_writel(dc, h_offset, DC_WINBUF_ADDR_H_OFFSET);
213 tegra_dc_writel(dc, v_offset, DC_WINBUF_ADDR_V_OFFSET);
214
215 if (window->tiled) {
216 value = DC_WIN_BUFFER_ADDR_MODE_TILE_UV |
217 DC_WIN_BUFFER_ADDR_MODE_TILE;
218 } else {
219 value = DC_WIN_BUFFER_ADDR_MODE_LINEAR_UV |
220 DC_WIN_BUFFER_ADDR_MODE_LINEAR;
221 }
222
223 tegra_dc_writel(dc, value, DC_WIN_BUFFER_ADDR_MODE);
224
225 value = WIN_ENABLE;
226
227 if (yuv) {
228 /* setup default colorspace conversion coefficients */
229 tegra_dc_writel(dc, 0x00f0, DC_WIN_CSC_YOF);
230 tegra_dc_writel(dc, 0x012a, DC_WIN_CSC_KYRGB);
231 tegra_dc_writel(dc, 0x0000, DC_WIN_CSC_KUR);
232 tegra_dc_writel(dc, 0x0198, DC_WIN_CSC_KVR);
233 tegra_dc_writel(dc, 0x039b, DC_WIN_CSC_KUG);
234 tegra_dc_writel(dc, 0x032f, DC_WIN_CSC_KVG);
235 tegra_dc_writel(dc, 0x0204, DC_WIN_CSC_KUB);
236 tegra_dc_writel(dc, 0x0000, DC_WIN_CSC_KVB);
237
238 value |= CSC_ENABLE;
239 } else if (window->bits_per_pixel < 24) {
240 value |= COLOR_EXPAND;
241 }
242
243 if (window->bottom_up)
244 value |= V_DIRECTION;
245
246 tegra_dc_writel(dc, value, DC_WIN_WIN_OPTIONS);
247
248 /*
249 * Disable blending and assume Window A is the bottom-most window,
250 * Window C is the top-most window and Window B is in the middle.
251 */
252 tegra_dc_writel(dc, 0xffff00, DC_WIN_BLEND_NOKEY);
253 tegra_dc_writel(dc, 0xffff00, DC_WIN_BLEND_1WIN);
254
255 switch (index) {
256 case 0:
257 tegra_dc_writel(dc, 0x000000, DC_WIN_BLEND_2WIN_X);
258 tegra_dc_writel(dc, 0x000000, DC_WIN_BLEND_2WIN_Y);
259 tegra_dc_writel(dc, 0x000000, DC_WIN_BLEND_3WIN_XY);
260 break;
261
262 case 1:
263 tegra_dc_writel(dc, 0xffff00, DC_WIN_BLEND_2WIN_X);
264 tegra_dc_writel(dc, 0x000000, DC_WIN_BLEND_2WIN_Y);
265 tegra_dc_writel(dc, 0x000000, DC_WIN_BLEND_3WIN_XY);
266 break;
267
268 case 2:
269 tegra_dc_writel(dc, 0xffff00, DC_WIN_BLEND_2WIN_X);
270 tegra_dc_writel(dc, 0xffff00, DC_WIN_BLEND_2WIN_Y);
271 tegra_dc_writel(dc, 0xffff00, DC_WIN_BLEND_3WIN_XY);
272 break;
273 }
274
275 tegra_dc_writel(dc, WIN_A_UPDATE << index, DC_CMD_STATE_CONTROL);
276 tegra_dc_writel(dc, WIN_A_ACT_REQ << index, DC_CMD_STATE_CONTROL);
277
278 return 0;
279}
280
32static int tegra_plane_update(struct drm_plane *plane, struct drm_crtc *crtc, 281static int tegra_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
33 struct drm_framebuffer *fb, int crtc_x, 282 struct drm_framebuffer *fb, int crtc_x,
34 int crtc_y, unsigned int crtc_w, 283 int crtc_y, unsigned int crtc_w,
@@ -49,7 +298,7 @@ static int tegra_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
49 window.dst.y = crtc_y; 298 window.dst.y = crtc_y;
50 window.dst.w = crtc_w; 299 window.dst.w = crtc_w;
51 window.dst.h = crtc_h; 300 window.dst.h = crtc_h;
52 window.format = tegra_dc_format(fb->pixel_format); 301 window.format = tegra_dc_format(fb->pixel_format, &window.swap);
53 window.bits_per_pixel = fb->bits_per_pixel; 302 window.bits_per_pixel = fb->bits_per_pixel;
54 window.bottom_up = tegra_fb_is_bottom_up(fb); 303 window.bottom_up = tegra_fb_is_bottom_up(fb);
55 window.tiled = tegra_fb_is_tiled(fb); 304 window.tiled = tegra_fb_is_tiled(fb);
@@ -117,6 +366,7 @@ static const uint32_t plane_formats[] = {
117 DRM_FORMAT_XRGB8888, 366 DRM_FORMAT_XRGB8888,
118 DRM_FORMAT_RGB565, 367 DRM_FORMAT_RGB565,
119 DRM_FORMAT_UYVY, 368 DRM_FORMAT_UYVY,
369 DRM_FORMAT_YUYV,
120 DRM_FORMAT_YUV420, 370 DRM_FORMAT_YUV420,
121 DRM_FORMAT_YUV422, 371 DRM_FORMAT_YUV422,
122}; 372};
@@ -150,9 +400,9 @@ static int tegra_dc_add_planes(struct drm_device *drm, struct tegra_dc *dc)
150static int tegra_dc_set_base(struct tegra_dc *dc, int x, int y, 400static int tegra_dc_set_base(struct tegra_dc *dc, int x, int y,
151 struct drm_framebuffer *fb) 401 struct drm_framebuffer *fb)
152{ 402{
153 unsigned int format = tegra_dc_format(fb->pixel_format);
154 struct tegra_bo *bo = tegra_fb_get_plane(fb, 0); 403 struct tegra_bo *bo = tegra_fb_get_plane(fb, 0);
155 unsigned int h_offset = 0, v_offset = 0; 404 unsigned int h_offset = 0, v_offset = 0;
405 unsigned int format, swap;
156 unsigned long value; 406 unsigned long value;
157 407
158 tegra_dc_writel(dc, WINDOW_A_SELECT, DC_CMD_DISPLAY_WINDOW_HEADER); 408 tegra_dc_writel(dc, WINDOW_A_SELECT, DC_CMD_DISPLAY_WINDOW_HEADER);
@@ -162,7 +412,10 @@ static int tegra_dc_set_base(struct tegra_dc *dc, int x, int y,
162 412
163 tegra_dc_writel(dc, bo->paddr + value, DC_WINBUF_START_ADDR); 413 tegra_dc_writel(dc, bo->paddr + value, DC_WINBUF_START_ADDR);
164 tegra_dc_writel(dc, fb->pitches[0], DC_WIN_LINE_STRIDE); 414 tegra_dc_writel(dc, fb->pitches[0], DC_WIN_LINE_STRIDE);
415
416 format = tegra_dc_format(fb->pixel_format, &swap);
165 tegra_dc_writel(dc, format, DC_WIN_COLOR_DEPTH); 417 tegra_dc_writel(dc, format, DC_WIN_COLOR_DEPTH);
418 tegra_dc_writel(dc, swap, DC_WIN_BYTE_SWAP);
166 419
167 if (tegra_fb_is_tiled(fb)) { 420 if (tegra_fb_is_tiled(fb)) {
168 value = DC_WIN_BUFFER_ADDR_MODE_TILE_UV | 421 value = DC_WIN_BUFFER_ADDR_MODE_TILE_UV |
@@ -177,13 +430,13 @@ static int tegra_dc_set_base(struct tegra_dc *dc, int x, int y,
177 /* make sure bottom-up buffers are properly displayed */ 430 /* make sure bottom-up buffers are properly displayed */
178 if (tegra_fb_is_bottom_up(fb)) { 431 if (tegra_fb_is_bottom_up(fb)) {
179 value = tegra_dc_readl(dc, DC_WIN_WIN_OPTIONS); 432 value = tegra_dc_readl(dc, DC_WIN_WIN_OPTIONS);
180 value |= INVERT_V; 433 value |= V_DIRECTION;
181 tegra_dc_writel(dc, value, DC_WIN_WIN_OPTIONS); 434 tegra_dc_writel(dc, value, DC_WIN_WIN_OPTIONS);
182 435
183 v_offset += fb->height - 1; 436 v_offset += fb->height - 1;
184 } else { 437 } else {
185 value = tegra_dc_readl(dc, DC_WIN_WIN_OPTIONS); 438 value = tegra_dc_readl(dc, DC_WIN_WIN_OPTIONS);
186 value &= ~INVERT_V; 439 value &= ~V_DIRECTION;
187 tegra_dc_writel(dc, value, DC_WIN_WIN_OPTIONS); 440 tegra_dc_writel(dc, value, DC_WIN_WIN_OPTIONS);
188 } 441 }
189 442
@@ -225,6 +478,109 @@ void tegra_dc_disable_vblank(struct tegra_dc *dc)
225 spin_unlock_irqrestore(&dc->lock, flags); 478 spin_unlock_irqrestore(&dc->lock, flags);
226} 479}
227 480
481static int tegra_dc_cursor_set2(struct drm_crtc *crtc, struct drm_file *file,
482 uint32_t handle, uint32_t width,
483 uint32_t height, int32_t hot_x, int32_t hot_y)
484{
485 unsigned long value = CURSOR_CLIP_DISPLAY;
486 struct tegra_dc *dc = to_tegra_dc(crtc);
487 struct drm_gem_object *gem;
488 struct tegra_bo *bo = NULL;
489
490 if (!dc->soc->supports_cursor)
491 return -ENXIO;
492
493 if (width != height)
494 return -EINVAL;
495
496 switch (width) {
497 case 32:
498 value |= CURSOR_SIZE_32x32;
499 break;
500
501 case 64:
502 value |= CURSOR_SIZE_64x64;
503 break;
504
505 case 128:
506 value |= CURSOR_SIZE_128x128;
507
508 case 256:
509 value |= CURSOR_SIZE_256x256;
510 break;
511
512 default:
513 return -EINVAL;
514 }
515
516 if (handle) {
517 gem = drm_gem_object_lookup(crtc->dev, file, handle);
518 if (!gem)
519 return -ENOENT;
520
521 bo = to_tegra_bo(gem);
522 }
523
524 if (bo) {
525 unsigned long addr = (bo->paddr & 0xfffffc00) >> 10;
526#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
527 unsigned long high = (bo->paddr & 0xfffffffc) >> 32;
528#endif
529
530 tegra_dc_writel(dc, value | addr, DC_DISP_CURSOR_START_ADDR);
531
532#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
533 tegra_dc_writel(dc, high, DC_DISP_CURSOR_START_ADDR_HI);
534#endif
535
536 value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
537 value |= CURSOR_ENABLE;
538 tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
539
540 value = tegra_dc_readl(dc, DC_DISP_BLEND_CURSOR_CONTROL);
541 value &= ~CURSOR_DST_BLEND_MASK;
542 value &= ~CURSOR_SRC_BLEND_MASK;
543 value |= CURSOR_MODE_NORMAL;
544 value |= CURSOR_DST_BLEND_NEG_K1_TIMES_SRC;
545 value |= CURSOR_SRC_BLEND_K1_TIMES_SRC;
546 value |= CURSOR_ALPHA;
547 tegra_dc_writel(dc, value, DC_DISP_BLEND_CURSOR_CONTROL);
548 } else {
549 value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
550 value &= ~CURSOR_ENABLE;
551 tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
552 }
553
554 tegra_dc_writel(dc, CURSOR_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
555 tegra_dc_writel(dc, CURSOR_ACT_REQ, DC_CMD_STATE_CONTROL);
556
557 tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
558 tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
559
560 return 0;
561}
562
563static int tegra_dc_cursor_move(struct drm_crtc *crtc, int x, int y)
564{
565 struct tegra_dc *dc = to_tegra_dc(crtc);
566 unsigned long value;
567
568 if (!dc->soc->supports_cursor)
569 return -ENXIO;
570
571 value = ((y & 0x3fff) << 16) | (x & 0x3fff);
572 tegra_dc_writel(dc, value, DC_DISP_CURSOR_POSITION);
573
574 tegra_dc_writel(dc, CURSOR_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
575 tegra_dc_writel(dc, CURSOR_ACT_REQ, DC_CMD_STATE_CONTROL);
576
577 /* XXX: only required on generations earlier than Tegra124? */
578 tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
579 tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
580
581 return 0;
582}
583
228static void tegra_dc_finish_page_flip(struct tegra_dc *dc) 584static void tegra_dc_finish_page_flip(struct tegra_dc *dc)
229{ 585{
230 struct drm_device *drm = dc->base.dev; 586 struct drm_device *drm = dc->base.dev;
@@ -301,6 +657,8 @@ static void tegra_dc_destroy(struct drm_crtc *crtc)
301} 657}
302 658
303static const struct drm_crtc_funcs tegra_crtc_funcs = { 659static const struct drm_crtc_funcs tegra_crtc_funcs = {
660 .cursor_set2 = tegra_dc_cursor_set2,
661 .cursor_move = tegra_dc_cursor_move,
304 .page_flip = tegra_dc_page_flip, 662 .page_flip = tegra_dc_page_flip,
305 .set_config = drm_crtc_helper_set_config, 663 .set_config = drm_crtc_helper_set_config,
306 .destroy = tegra_dc_destroy, 664 .destroy = tegra_dc_destroy,
@@ -334,52 +692,11 @@ static bool tegra_crtc_mode_fixup(struct drm_crtc *crtc,
334 return true; 692 return true;
335} 693}
336 694
337static inline u32 compute_dda_inc(unsigned int in, unsigned int out, bool v,
338 unsigned int bpp)
339{
340 fixed20_12 outf = dfixed_init(out);
341 fixed20_12 inf = dfixed_init(in);
342 u32 dda_inc;
343 int max;
344
345 if (v)
346 max = 15;
347 else {
348 switch (bpp) {
349 case 2:
350 max = 8;
351 break;
352
353 default:
354 WARN_ON_ONCE(1);
355 /* fallthrough */
356 case 4:
357 max = 4;
358 break;
359 }
360 }
361
362 outf.full = max_t(u32, outf.full - dfixed_const(1), dfixed_const(1));
363 inf.full -= dfixed_const(1);
364
365 dda_inc = dfixed_div(inf, outf);
366 dda_inc = min_t(u32, dda_inc, dfixed_const(max));
367
368 return dda_inc;
369}
370
371static inline u32 compute_initial_dda(unsigned int in)
372{
373 fixed20_12 inf = dfixed_init(in);
374 return dfixed_frac(inf);
375}
376
377static int tegra_dc_set_timings(struct tegra_dc *dc, 695static int tegra_dc_set_timings(struct tegra_dc *dc,
378 struct drm_display_mode *mode) 696 struct drm_display_mode *mode)
379{ 697{
380 /* TODO: For HDMI compliance, h & v ref_to_sync should be set to 1 */ 698 unsigned int h_ref_to_sync = 1;
381 unsigned int h_ref_to_sync = 0; 699 unsigned int v_ref_to_sync = 1;
382 unsigned int v_ref_to_sync = 0;
383 unsigned long value; 700 unsigned long value;
384 701
385 tegra_dc_writel(dc, 0x0, DC_DISP_DISP_TIMING_OPTIONS); 702 tegra_dc_writel(dc, 0x0, DC_DISP_DISP_TIMING_OPTIONS);
@@ -406,13 +723,14 @@ static int tegra_dc_set_timings(struct tegra_dc *dc,
406} 723}
407 724
408static int tegra_crtc_setup_clk(struct drm_crtc *crtc, 725static int tegra_crtc_setup_clk(struct drm_crtc *crtc,
409 struct drm_display_mode *mode, 726 struct drm_display_mode *mode)
410 unsigned long *div)
411{ 727{
412 unsigned long pclk = mode->clock * 1000, rate; 728 unsigned long pclk = mode->clock * 1000;
413 struct tegra_dc *dc = to_tegra_dc(crtc); 729 struct tegra_dc *dc = to_tegra_dc(crtc);
414 struct tegra_output *output = NULL; 730 struct tegra_output *output = NULL;
415 struct drm_encoder *encoder; 731 struct drm_encoder *encoder;
732 unsigned int div;
733 u32 value;
416 long err; 734 long err;
417 735
418 list_for_each_entry(encoder, &crtc->dev->mode_config.encoder_list, head) 736 list_for_each_entry(encoder, &crtc->dev->mode_config.encoder_list, head)
@@ -425,221 +743,23 @@ static int tegra_crtc_setup_clk(struct drm_crtc *crtc,
425 return -ENODEV; 743 return -ENODEV;
426 744
427 /* 745 /*
428 * This assumes that the display controller will divide its parent 746 * This assumes that the parent clock is pll_d_out0 or pll_d2_out
429 * clock by 2 to generate the pixel clock. 747 * respectively, each of which divides the base pll_d by 2.
430 */ 748 */
431 err = tegra_output_setup_clock(output, dc->clk, pclk * 2); 749 err = tegra_output_setup_clock(output, dc->clk, pclk, &div);
432 if (err < 0) { 750 if (err < 0) {
433 dev_err(dc->dev, "failed to setup clock: %ld\n", err); 751 dev_err(dc->dev, "failed to setup clock: %ld\n", err);
434 return err; 752 return err;
435 } 753 }
436 754
437 rate = clk_get_rate(dc->clk); 755 DRM_DEBUG_KMS("rate: %lu, div: %u\n", clk_get_rate(dc->clk), div);
438 *div = (rate * 2 / pclk) - 2;
439
440 DRM_DEBUG_KMS("rate: %lu, div: %lu\n", rate, *div);
441
442 return 0;
443}
444
445static bool tegra_dc_format_is_yuv(unsigned int format, bool *planar)
446{
447 switch (format) {
448 case WIN_COLOR_DEPTH_YCbCr422:
449 case WIN_COLOR_DEPTH_YUV422:
450 if (planar)
451 *planar = false;
452
453 return true;
454
455 case WIN_COLOR_DEPTH_YCbCr420P:
456 case WIN_COLOR_DEPTH_YUV420P:
457 case WIN_COLOR_DEPTH_YCbCr422P:
458 case WIN_COLOR_DEPTH_YUV422P:
459 case WIN_COLOR_DEPTH_YCbCr422R:
460 case WIN_COLOR_DEPTH_YUV422R:
461 case WIN_COLOR_DEPTH_YCbCr422RA:
462 case WIN_COLOR_DEPTH_YUV422RA:
463 if (planar)
464 *planar = true;
465
466 return true;
467 }
468
469 return false;
470}
471
472int tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index,
473 const struct tegra_dc_window *window)
474{
475 unsigned h_offset, v_offset, h_size, v_size, h_dda, v_dda, bpp;
476 unsigned long value;
477 bool yuv, planar;
478
479 /*
480 * For YUV planar modes, the number of bytes per pixel takes into
481 * account only the luma component and therefore is 1.
482 */
483 yuv = tegra_dc_format_is_yuv(window->format, &planar);
484 if (!yuv)
485 bpp = window->bits_per_pixel / 8;
486 else
487 bpp = planar ? 1 : 2;
488
489 value = WINDOW_A_SELECT << index;
490 tegra_dc_writel(dc, value, DC_CMD_DISPLAY_WINDOW_HEADER);
491
492 tegra_dc_writel(dc, window->format, DC_WIN_COLOR_DEPTH);
493 tegra_dc_writel(dc, 0, DC_WIN_BYTE_SWAP);
494
495 value = V_POSITION(window->dst.y) | H_POSITION(window->dst.x);
496 tegra_dc_writel(dc, value, DC_WIN_POSITION);
497
498 value = V_SIZE(window->dst.h) | H_SIZE(window->dst.w);
499 tegra_dc_writel(dc, value, DC_WIN_SIZE);
500
501 h_offset = window->src.x * bpp;
502 v_offset = window->src.y;
503 h_size = window->src.w * bpp;
504 v_size = window->src.h;
505
506 value = V_PRESCALED_SIZE(v_size) | H_PRESCALED_SIZE(h_size);
507 tegra_dc_writel(dc, value, DC_WIN_PRESCALED_SIZE);
508
509 /*
510 * For DDA computations the number of bytes per pixel for YUV planar
511 * modes needs to take into account all Y, U and V components.
512 */
513 if (yuv && planar)
514 bpp = 2;
515
516 h_dda = compute_dda_inc(window->src.w, window->dst.w, false, bpp);
517 v_dda = compute_dda_inc(window->src.h, window->dst.h, true, bpp);
518
519 value = V_DDA_INC(v_dda) | H_DDA_INC(h_dda);
520 tegra_dc_writel(dc, value, DC_WIN_DDA_INC);
521
522 h_dda = compute_initial_dda(window->src.x);
523 v_dda = compute_initial_dda(window->src.y);
524
525 tegra_dc_writel(dc, h_dda, DC_WIN_H_INITIAL_DDA);
526 tegra_dc_writel(dc, v_dda, DC_WIN_V_INITIAL_DDA);
527
528 tegra_dc_writel(dc, 0, DC_WIN_UV_BUF_STRIDE);
529 tegra_dc_writel(dc, 0, DC_WIN_BUF_STRIDE);
530
531 tegra_dc_writel(dc, window->base[0], DC_WINBUF_START_ADDR);
532
533 if (yuv && planar) {
534 tegra_dc_writel(dc, window->base[1], DC_WINBUF_START_ADDR_U);
535 tegra_dc_writel(dc, window->base[2], DC_WINBUF_START_ADDR_V);
536 value = window->stride[1] << 16 | window->stride[0];
537 tegra_dc_writel(dc, value, DC_WIN_LINE_STRIDE);
538 } else {
539 tegra_dc_writel(dc, window->stride[0], DC_WIN_LINE_STRIDE);
540 }
541
542 if (window->bottom_up)
543 v_offset += window->src.h - 1;
544 756
545 tegra_dc_writel(dc, h_offset, DC_WINBUF_ADDR_H_OFFSET); 757 value = SHIFT_CLK_DIVIDER(div) | PIXEL_CLK_DIVIDER_PCD1;
546 tegra_dc_writel(dc, v_offset, DC_WINBUF_ADDR_V_OFFSET); 758 tegra_dc_writel(dc, value, DC_DISP_DISP_CLOCK_CONTROL);
547
548 if (window->tiled) {
549 value = DC_WIN_BUFFER_ADDR_MODE_TILE_UV |
550 DC_WIN_BUFFER_ADDR_MODE_TILE;
551 } else {
552 value = DC_WIN_BUFFER_ADDR_MODE_LINEAR_UV |
553 DC_WIN_BUFFER_ADDR_MODE_LINEAR;
554 }
555
556 tegra_dc_writel(dc, value, DC_WIN_BUFFER_ADDR_MODE);
557
558 value = WIN_ENABLE;
559
560 if (yuv) {
561 /* setup default colorspace conversion coefficients */
562 tegra_dc_writel(dc, 0x00f0, DC_WIN_CSC_YOF);
563 tegra_dc_writel(dc, 0x012a, DC_WIN_CSC_KYRGB);
564 tegra_dc_writel(dc, 0x0000, DC_WIN_CSC_KUR);
565 tegra_dc_writel(dc, 0x0198, DC_WIN_CSC_KVR);
566 tegra_dc_writel(dc, 0x039b, DC_WIN_CSC_KUG);
567 tegra_dc_writel(dc, 0x032f, DC_WIN_CSC_KVG);
568 tegra_dc_writel(dc, 0x0204, DC_WIN_CSC_KUB);
569 tegra_dc_writel(dc, 0x0000, DC_WIN_CSC_KVB);
570
571 value |= CSC_ENABLE;
572 } else if (window->bits_per_pixel < 24) {
573 value |= COLOR_EXPAND;
574 }
575
576 if (window->bottom_up)
577 value |= INVERT_V;
578
579 tegra_dc_writel(dc, value, DC_WIN_WIN_OPTIONS);
580
581 /*
582 * Disable blending and assume Window A is the bottom-most window,
583 * Window C is the top-most window and Window B is in the middle.
584 */
585 tegra_dc_writel(dc, 0xffff00, DC_WIN_BLEND_NOKEY);
586 tegra_dc_writel(dc, 0xffff00, DC_WIN_BLEND_1WIN);
587
588 switch (index) {
589 case 0:
590 tegra_dc_writel(dc, 0x000000, DC_WIN_BLEND_2WIN_X);
591 tegra_dc_writel(dc, 0x000000, DC_WIN_BLEND_2WIN_Y);
592 tegra_dc_writel(dc, 0x000000, DC_WIN_BLEND_3WIN_XY);
593 break;
594
595 case 1:
596 tegra_dc_writel(dc, 0xffff00, DC_WIN_BLEND_2WIN_X);
597 tegra_dc_writel(dc, 0x000000, DC_WIN_BLEND_2WIN_Y);
598 tegra_dc_writel(dc, 0x000000, DC_WIN_BLEND_3WIN_XY);
599 break;
600
601 case 2:
602 tegra_dc_writel(dc, 0xffff00, DC_WIN_BLEND_2WIN_X);
603 tegra_dc_writel(dc, 0xffff00, DC_WIN_BLEND_2WIN_Y);
604 tegra_dc_writel(dc, 0xffff00, DC_WIN_BLEND_3WIN_XY);
605 break;
606 }
607
608 tegra_dc_writel(dc, WIN_A_UPDATE << index, DC_CMD_STATE_CONTROL);
609 tegra_dc_writel(dc, WIN_A_ACT_REQ << index, DC_CMD_STATE_CONTROL);
610 759
611 return 0; 760 return 0;
612} 761}
613 762
614unsigned int tegra_dc_format(uint32_t format)
615{
616 switch (format) {
617 case DRM_FORMAT_XBGR8888:
618 return WIN_COLOR_DEPTH_R8G8B8A8;
619
620 case DRM_FORMAT_XRGB8888:
621 return WIN_COLOR_DEPTH_B8G8R8A8;
622
623 case DRM_FORMAT_RGB565:
624 return WIN_COLOR_DEPTH_B5G6R5;
625
626 case DRM_FORMAT_UYVY:
627 return WIN_COLOR_DEPTH_YCbCr422;
628
629 case DRM_FORMAT_YUV420:
630 return WIN_COLOR_DEPTH_YCbCr420P;
631
632 case DRM_FORMAT_YUV422:
633 return WIN_COLOR_DEPTH_YCbCr422P;
634
635 default:
636 break;
637 }
638
639 WARN(1, "unsupported pixel format %u, using default\n", format);
640 return WIN_COLOR_DEPTH_B8G8R8A8;
641}
642
643static int tegra_crtc_mode_set(struct drm_crtc *crtc, 763static int tegra_crtc_mode_set(struct drm_crtc *crtc,
644 struct drm_display_mode *mode, 764 struct drm_display_mode *mode,
645 struct drm_display_mode *adjusted, 765 struct drm_display_mode *adjusted,
@@ -648,12 +768,12 @@ static int tegra_crtc_mode_set(struct drm_crtc *crtc,
648 struct tegra_bo *bo = tegra_fb_get_plane(crtc->primary->fb, 0); 768 struct tegra_bo *bo = tegra_fb_get_plane(crtc->primary->fb, 0);
649 struct tegra_dc *dc = to_tegra_dc(crtc); 769 struct tegra_dc *dc = to_tegra_dc(crtc);
650 struct tegra_dc_window window; 770 struct tegra_dc_window window;
651 unsigned long div, value; 771 u32 value;
652 int err; 772 int err;
653 773
654 drm_vblank_pre_modeset(crtc->dev, dc->pipe); 774 drm_vblank_pre_modeset(crtc->dev, dc->pipe);
655 775
656 err = tegra_crtc_setup_clk(crtc, mode, &div); 776 err = tegra_crtc_setup_clk(crtc, mode);
657 if (err) { 777 if (err) {
658 dev_err(dc->dev, "failed to setup clock for CRTC: %d\n", err); 778 dev_err(dc->dev, "failed to setup clock for CRTC: %d\n", err);
659 return err; 779 return err;
@@ -669,9 +789,6 @@ static int tegra_crtc_mode_set(struct drm_crtc *crtc,
669 tegra_dc_writel(dc, value, DC_DISP_INTERLACE_CONTROL); 789 tegra_dc_writel(dc, value, DC_DISP_INTERLACE_CONTROL);
670 } 790 }
671 791
672 value = SHIFT_CLK_DIVIDER(div) | PIXEL_CLK_DIVIDER_PCD1;
673 tegra_dc_writel(dc, value, DC_DISP_DISP_CLOCK_CONTROL);
674
675 /* setup window parameters */ 792 /* setup window parameters */
676 memset(&window, 0, sizeof(window)); 793 memset(&window, 0, sizeof(window));
677 window.src.x = 0; 794 window.src.x = 0;
@@ -682,7 +799,8 @@ static int tegra_crtc_mode_set(struct drm_crtc *crtc,
682 window.dst.y = 0; 799 window.dst.y = 0;
683 window.dst.w = mode->hdisplay; 800 window.dst.w = mode->hdisplay;
684 window.dst.h = mode->vdisplay; 801 window.dst.h = mode->vdisplay;
685 window.format = tegra_dc_format(crtc->primary->fb->pixel_format); 802 window.format = tegra_dc_format(crtc->primary->fb->pixel_format,
803 &window.swap);
686 window.bits_per_pixel = crtc->primary->fb->bits_per_pixel; 804 window.bits_per_pixel = crtc->primary->fb->bits_per_pixel;
687 window.stride[0] = crtc->primary->fb->pitches[0]; 805 window.stride[0] = crtc->primary->fb->pitches[0];
688 window.base[0] = bo->paddr; 806 window.base[0] = bo->paddr;
@@ -728,10 +846,6 @@ static void tegra_crtc_prepare(struct drm_crtc *crtc)
728 WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT; 846 WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
729 tegra_dc_writel(dc, value, DC_CMD_INT_POLARITY); 847 tegra_dc_writel(dc, value, DC_CMD_INT_POLARITY);
730 848
731 value = PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE |
732 PW4_ENABLE | PM0_ENABLE | PM1_ENABLE;
733 tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL);
734
735 /* initialize timer */ 849 /* initialize timer */
736 value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(0x20) | 850 value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(0x20) |
737 WINDOW_B_THRESHOLD(0x20) | WINDOW_C_THRESHOLD(0x20); 851 WINDOW_B_THRESHOLD(0x20) | WINDOW_C_THRESHOLD(0x20);
@@ -991,6 +1105,8 @@ static int tegra_dc_show_regs(struct seq_file *s, void *data)
991 DUMP_REG(DC_DISP_SD_BL_CONTROL); 1105 DUMP_REG(DC_DISP_SD_BL_CONTROL);
992 DUMP_REG(DC_DISP_SD_HW_K_VALUES); 1106 DUMP_REG(DC_DISP_SD_HW_K_VALUES);
993 DUMP_REG(DC_DISP_SD_MAN_K_VALUES); 1107 DUMP_REG(DC_DISP_SD_MAN_K_VALUES);
1108 DUMP_REG(DC_DISP_CURSOR_START_ADDR_HI);
1109 DUMP_REG(DC_DISP_BLEND_CURSOR_CONTROL);
994 DUMP_REG(DC_WIN_WIN_OPTIONS); 1110 DUMP_REG(DC_WIN_WIN_OPTIONS);
995 DUMP_REG(DC_WIN_BYTE_SWAP); 1111 DUMP_REG(DC_WIN_BYTE_SWAP);
996 DUMP_REG(DC_WIN_BUFFER_CONTROL); 1112 DUMP_REG(DC_WIN_BUFFER_CONTROL);
@@ -1096,26 +1212,26 @@ static int tegra_dc_debugfs_exit(struct tegra_dc *dc)
1096 1212
1097static int tegra_dc_init(struct host1x_client *client) 1213static int tegra_dc_init(struct host1x_client *client)
1098{ 1214{
1099 struct tegra_drm *tegra = dev_get_drvdata(client->parent); 1215 struct drm_device *drm = dev_get_drvdata(client->parent);
1100 struct tegra_dc *dc = host1x_client_to_dc(client); 1216 struct tegra_dc *dc = host1x_client_to_dc(client);
1101 int err; 1217 int err;
1102 1218
1103 drm_crtc_init(tegra->drm, &dc->base, &tegra_crtc_funcs); 1219 drm_crtc_init(drm, &dc->base, &tegra_crtc_funcs);
1104 drm_mode_crtc_set_gamma_size(&dc->base, 256); 1220 drm_mode_crtc_set_gamma_size(&dc->base, 256);
1105 drm_crtc_helper_add(&dc->base, &tegra_crtc_helper_funcs); 1221 drm_crtc_helper_add(&dc->base, &tegra_crtc_helper_funcs);
1106 1222
1107 err = tegra_dc_rgb_init(tegra->drm, dc); 1223 err = tegra_dc_rgb_init(drm, dc);
1108 if (err < 0 && err != -ENODEV) { 1224 if (err < 0 && err != -ENODEV) {
1109 dev_err(dc->dev, "failed to initialize RGB output: %d\n", err); 1225 dev_err(dc->dev, "failed to initialize RGB output: %d\n", err);
1110 return err; 1226 return err;
1111 } 1227 }
1112 1228
1113 err = tegra_dc_add_planes(tegra->drm, dc); 1229 err = tegra_dc_add_planes(drm, dc);
1114 if (err < 0) 1230 if (err < 0)
1115 return err; 1231 return err;
1116 1232
1117 if (IS_ENABLED(CONFIG_DEBUG_FS)) { 1233 if (IS_ENABLED(CONFIG_DEBUG_FS)) {
1118 err = tegra_dc_debugfs_init(dc, tegra->drm->primary); 1234 err = tegra_dc_debugfs_init(dc, drm->primary);
1119 if (err < 0) 1235 if (err < 0)
1120 dev_err(dc->dev, "debugfs setup failed: %d\n", err); 1236 dev_err(dc->dev, "debugfs setup failed: %d\n", err);
1121 } 1237 }
@@ -1160,14 +1276,17 @@ static const struct host1x_client_ops dc_client_ops = {
1160 1276
1161static const struct tegra_dc_soc_info tegra20_dc_soc_info = { 1277static const struct tegra_dc_soc_info tegra20_dc_soc_info = {
1162 .supports_interlacing = false, 1278 .supports_interlacing = false,
1279 .supports_cursor = false,
1163}; 1280};
1164 1281
1165static const struct tegra_dc_soc_info tegra30_dc_soc_info = { 1282static const struct tegra_dc_soc_info tegra30_dc_soc_info = {
1166 .supports_interlacing = false, 1283 .supports_interlacing = false,
1284 .supports_cursor = false,
1167}; 1285};
1168 1286
1169static const struct tegra_dc_soc_info tegra124_dc_soc_info = { 1287static const struct tegra_dc_soc_info tegra124_dc_soc_info = {
1170 .supports_interlacing = true, 1288 .supports_interlacing = true,
1289 .supports_cursor = true,
1171}; 1290};
1172 1291
1173static const struct of_device_id tegra_dc_of_match[] = { 1292static const struct of_device_id tegra_dc_of_match[] = {
diff --git a/drivers/gpu/drm/tegra/dc.h b/drivers/gpu/drm/tegra/dc.h
index c94101494826..78c5feff95d2 100644
--- a/drivers/gpu/drm/tegra/dc.h
+++ b/drivers/gpu/drm/tegra/dc.h
@@ -67,10 +67,12 @@
67#define WIN_A_ACT_REQ (1 << 1) 67#define WIN_A_ACT_REQ (1 << 1)
68#define WIN_B_ACT_REQ (1 << 2) 68#define WIN_B_ACT_REQ (1 << 2)
69#define WIN_C_ACT_REQ (1 << 3) 69#define WIN_C_ACT_REQ (1 << 3)
70#define CURSOR_ACT_REQ (1 << 7)
70#define GENERAL_UPDATE (1 << 8) 71#define GENERAL_UPDATE (1 << 8)
71#define WIN_A_UPDATE (1 << 9) 72#define WIN_A_UPDATE (1 << 9)
72#define WIN_B_UPDATE (1 << 10) 73#define WIN_B_UPDATE (1 << 10)
73#define WIN_C_UPDATE (1 << 11) 74#define WIN_C_UPDATE (1 << 11)
75#define CURSOR_UPDATE (1 << 15)
74#define NC_HOST_TRIG (1 << 24) 76#define NC_HOST_TRIG (1 << 24)
75 77
76#define DC_CMD_DISPLAY_WINDOW_HEADER 0x042 78#define DC_CMD_DISPLAY_WINDOW_HEADER 0x042
@@ -116,9 +118,10 @@
116#define DC_DISP_DISP_SIGNAL_OPTIONS1 0x401 118#define DC_DISP_DISP_SIGNAL_OPTIONS1 0x401
117 119
118#define DC_DISP_DISP_WIN_OPTIONS 0x402 120#define DC_DISP_DISP_WIN_OPTIONS 0x402
119#define HDMI_ENABLE (1 << 30) 121#define HDMI_ENABLE (1 << 30)
120#define DSI_ENABLE (1 << 29) 122#define DSI_ENABLE (1 << 29)
121#define SOR_ENABLE (1 << 25) 123#define SOR_ENABLE (1 << 25)
124#define CURSOR_ENABLE (1 << 16)
122 125
123#define DC_DISP_DISP_MEM_HIGH_PRIORITY 0x403 126#define DC_DISP_DISP_MEM_HIGH_PRIORITY 0x403
124#define CURSOR_THRESHOLD(x) (((x) & 0x03) << 24) 127#define CURSOR_THRESHOLD(x) (((x) & 0x03) << 24)
@@ -266,6 +269,14 @@
266#define DC_DISP_CURSOR_BACKGROUND 0x43d 269#define DC_DISP_CURSOR_BACKGROUND 0x43d
267 270
268#define DC_DISP_CURSOR_START_ADDR 0x43e 271#define DC_DISP_CURSOR_START_ADDR 0x43e
272#define CURSOR_CLIP_DISPLAY (0 << 28)
273#define CURSOR_CLIP_WIN_A (1 << 28)
274#define CURSOR_CLIP_WIN_B (2 << 28)
275#define CURSOR_CLIP_WIN_C (3 << 28)
276#define CURSOR_SIZE_32x32 (0 << 24)
277#define CURSOR_SIZE_64x64 (1 << 24)
278#define CURSOR_SIZE_128x128 (2 << 24)
279#define CURSOR_SIZE_256x256 (3 << 24)
269#define DC_DISP_CURSOR_START_ADDR_NS 0x43f 280#define DC_DISP_CURSOR_START_ADDR_NS 0x43f
270 281
271#define DC_DISP_CURSOR_POSITION 0x440 282#define DC_DISP_CURSOR_POSITION 0x440
@@ -302,6 +313,19 @@
302#define INTERLACE_START (1 << 1) 313#define INTERLACE_START (1 << 1)
303#define INTERLACE_ENABLE (1 << 0) 314#define INTERLACE_ENABLE (1 << 0)
304 315
316#define DC_DISP_CURSOR_START_ADDR_HI 0x4ec
317#define DC_DISP_BLEND_CURSOR_CONTROL 0x4f1
318#define CURSOR_MODE_LEGACY (0 << 24)
319#define CURSOR_MODE_NORMAL (1 << 24)
320#define CURSOR_DST_BLEND_ZERO (0 << 16)
321#define CURSOR_DST_BLEND_K1 (1 << 16)
322#define CURSOR_DST_BLEND_NEG_K1_TIMES_SRC (2 << 16)
323#define CURSOR_DST_BLEND_MASK (3 << 16)
324#define CURSOR_SRC_BLEND_K1 (0 << 8)
325#define CURSOR_SRC_BLEND_K1_TIMES_SRC (1 << 8)
326#define CURSOR_SRC_BLEND_MASK (3 << 8)
327#define CURSOR_ALPHA 0xff
328
305#define DC_WIN_CSC_YOF 0x611 329#define DC_WIN_CSC_YOF 0x611
306#define DC_WIN_CSC_KYRGB 0x612 330#define DC_WIN_CSC_KYRGB 0x612
307#define DC_WIN_CSC_KUR 0x613 331#define DC_WIN_CSC_KUR 0x613
@@ -312,7 +336,8 @@
312#define DC_WIN_CSC_KVB 0x618 336#define DC_WIN_CSC_KVB 0x618
313 337
314#define DC_WIN_WIN_OPTIONS 0x700 338#define DC_WIN_WIN_OPTIONS 0x700
315#define INVERT_V (1 << 2) 339#define H_DIRECTION (1 << 0)
340#define V_DIRECTION (1 << 2)
316#define COLOR_EXPAND (1 << 6) 341#define COLOR_EXPAND (1 << 6)
317#define CSC_ENABLE (1 << 18) 342#define CSC_ENABLE (1 << 18)
318#define WIN_ENABLE (1 << 30) 343#define WIN_ENABLE (1 << 30)
diff --git a/drivers/gpu/drm/tegra/dpaux.c b/drivers/gpu/drm/tegra/dpaux.c
index 005c19bd92df..3f132e356e9c 100644
--- a/drivers/gpu/drm/tegra/dpaux.c
+++ b/drivers/gpu/drm/tegra/dpaux.c
@@ -15,6 +15,7 @@
15#include <linux/platform_device.h> 15#include <linux/platform_device.h>
16#include <linux/reset.h> 16#include <linux/reset.h>
17#include <linux/regulator/consumer.h> 17#include <linux/regulator/consumer.h>
18#include <linux/workqueue.h>
18 19
19#include <drm/drm_dp_helper.h> 20#include <drm/drm_dp_helper.h>
20#include <drm/drm_panel.h> 21#include <drm/drm_panel.h>
@@ -41,6 +42,7 @@ struct tegra_dpaux {
41 struct regulator *vdd; 42 struct regulator *vdd;
42 43
43 struct completion complete; 44 struct completion complete;
45 struct work_struct work;
44 struct list_head list; 46 struct list_head list;
45}; 47};
46 48
@@ -49,6 +51,11 @@ static inline struct tegra_dpaux *to_dpaux(struct drm_dp_aux *aux)
49 return container_of(aux, struct tegra_dpaux, aux); 51 return container_of(aux, struct tegra_dpaux, aux);
50} 52}
51 53
54static inline struct tegra_dpaux *work_to_dpaux(struct work_struct *work)
55{
56 return container_of(work, struct tegra_dpaux, work);
57}
58
52static inline unsigned long tegra_dpaux_readl(struct tegra_dpaux *dpaux, 59static inline unsigned long tegra_dpaux_readl(struct tegra_dpaux *dpaux,
53 unsigned long offset) 60 unsigned long offset)
54{ 61{
@@ -231,6 +238,14 @@ static ssize_t tegra_dpaux_transfer(struct drm_dp_aux *aux,
231 return ret; 238 return ret;
232} 239}
233 240
241static void tegra_dpaux_hotplug(struct work_struct *work)
242{
243 struct tegra_dpaux *dpaux = work_to_dpaux(work);
244
245 if (dpaux->output)
246 drm_helper_hpd_irq_event(dpaux->output->connector.dev);
247}
248
234static irqreturn_t tegra_dpaux_irq(int irq, void *data) 249static irqreturn_t tegra_dpaux_irq(int irq, void *data)
235{ 250{
236 struct tegra_dpaux *dpaux = data; 251 struct tegra_dpaux *dpaux = data;
@@ -241,16 +256,8 @@ static irqreturn_t tegra_dpaux_irq(int irq, void *data)
241 value = tegra_dpaux_readl(dpaux, DPAUX_INTR_AUX); 256 value = tegra_dpaux_readl(dpaux, DPAUX_INTR_AUX);
242 tegra_dpaux_writel(dpaux, value, DPAUX_INTR_AUX); 257 tegra_dpaux_writel(dpaux, value, DPAUX_INTR_AUX);
243 258
244 if (value & DPAUX_INTR_PLUG_EVENT) { 259 if (value & (DPAUX_INTR_PLUG_EVENT | DPAUX_INTR_UNPLUG_EVENT))
245 if (dpaux->output) { 260 schedule_work(&dpaux->work);
246 drm_helper_hpd_irq_event(dpaux->output->connector.dev);
247 }
248 }
249
250 if (value & DPAUX_INTR_UNPLUG_EVENT) {
251 if (dpaux->output)
252 drm_helper_hpd_irq_event(dpaux->output->connector.dev);
253 }
254 261
255 if (value & DPAUX_INTR_IRQ_EVENT) { 262 if (value & DPAUX_INTR_IRQ_EVENT) {
256 /* TODO: handle this */ 263 /* TODO: handle this */
@@ -273,6 +280,7 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
273 if (!dpaux) 280 if (!dpaux)
274 return -ENOMEM; 281 return -ENOMEM;
275 282
283 INIT_WORK(&dpaux->work, tegra_dpaux_hotplug);
276 init_completion(&dpaux->complete); 284 init_completion(&dpaux->complete);
277 INIT_LIST_HEAD(&dpaux->list); 285 INIT_LIST_HEAD(&dpaux->list);
278 dpaux->dev = &pdev->dev; 286 dpaux->dev = &pdev->dev;
@@ -332,7 +340,7 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
332 dpaux->aux.transfer = tegra_dpaux_transfer; 340 dpaux->aux.transfer = tegra_dpaux_transfer;
333 dpaux->aux.dev = &pdev->dev; 341 dpaux->aux.dev = &pdev->dev;
334 342
335 err = drm_dp_aux_register_i2c_bus(&dpaux->aux); 343 err = drm_dp_aux_register(&dpaux->aux);
336 if (err < 0) 344 if (err < 0)
337 return err; 345 return err;
338 346
@@ -355,12 +363,14 @@ static int tegra_dpaux_remove(struct platform_device *pdev)
355{ 363{
356 struct tegra_dpaux *dpaux = platform_get_drvdata(pdev); 364 struct tegra_dpaux *dpaux = platform_get_drvdata(pdev);
357 365
358 drm_dp_aux_unregister_i2c_bus(&dpaux->aux); 366 drm_dp_aux_unregister(&dpaux->aux);
359 367
360 mutex_lock(&dpaux_lock); 368 mutex_lock(&dpaux_lock);
361 list_del(&dpaux->list); 369 list_del(&dpaux->list);
362 mutex_unlock(&dpaux_lock); 370 mutex_unlock(&dpaux_lock);
363 371
372 cancel_work_sync(&dpaux->work);
373
364 clk_disable_unprepare(dpaux->clk_parent); 374 clk_disable_unprepare(dpaux->clk_parent);
365 reset_control_assert(dpaux->rst); 375 reset_control_assert(dpaux->rst);
366 clk_disable_unprepare(dpaux->clk); 376 clk_disable_unprepare(dpaux->clk);
@@ -404,6 +414,7 @@ int tegra_dpaux_attach(struct tegra_dpaux *dpaux, struct tegra_output *output)
404 unsigned long timeout; 414 unsigned long timeout;
405 int err; 415 int err;
406 416
417 output->connector.polled = DRM_CONNECTOR_POLL_HPD;
407 dpaux->output = output; 418 dpaux->output = output;
408 419
409 err = regulator_enable(dpaux->vdd); 420 err = regulator_enable(dpaux->vdd);
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 6f5b6e2f552e..3396f9f6a9f7 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -33,7 +33,6 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
33 if (!tegra) 33 if (!tegra)
34 return -ENOMEM; 34 return -ENOMEM;
35 35
36 dev_set_drvdata(drm->dev, tegra);
37 mutex_init(&tegra->clients_lock); 36 mutex_init(&tegra->clients_lock);
38 INIT_LIST_HEAD(&tegra->clients); 37 INIT_LIST_HEAD(&tegra->clients);
39 drm->dev_private = tegra; 38 drm->dev_private = tegra;
@@ -640,14 +639,40 @@ int tegra_drm_unregister_client(struct tegra_drm *tegra,
640 return 0; 639 return 0;
641} 640}
642 641
643static int host1x_drm_probe(struct host1x_device *device) 642static int host1x_drm_probe(struct host1x_device *dev)
644{ 643{
645 return drm_host1x_init(&tegra_drm_driver, device); 644 struct drm_driver *driver = &tegra_drm_driver;
645 struct drm_device *drm;
646 int err;
647
648 drm = drm_dev_alloc(driver, &dev->dev);
649 if (!drm)
650 return -ENOMEM;
651
652 drm_dev_set_unique(drm, dev_name(&dev->dev));
653 dev_set_drvdata(&dev->dev, drm);
654
655 err = drm_dev_register(drm, 0);
656 if (err < 0)
657 goto unref;
658
659 DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n", driver->name,
660 driver->major, driver->minor, driver->patchlevel,
661 driver->date, drm->primary->index);
662
663 return 0;
664
665unref:
666 drm_dev_unref(drm);
667 return err;
646} 668}
647 669
648static int host1x_drm_remove(struct host1x_device *device) 670static int host1x_drm_remove(struct host1x_device *dev)
649{ 671{
650 drm_host1x_exit(&tegra_drm_driver, device); 672 struct drm_device *drm = dev_get_drvdata(&dev->dev);
673
674 drm_dev_unregister(drm);
675 drm_dev_unref(drm);
651 676
652 return 0; 677 return 0;
653} 678}
@@ -666,6 +691,7 @@ static const struct of_device_id host1x_drm_subdevs[] = {
666 { .compatible = "nvidia,tegra114-gr3d", }, 691 { .compatible = "nvidia,tegra114-gr3d", },
667 { .compatible = "nvidia,tegra124-dc", }, 692 { .compatible = "nvidia,tegra124-dc", },
668 { .compatible = "nvidia,tegra124-sor", }, 693 { .compatible = "nvidia,tegra124-sor", },
694 { .compatible = "nvidia,tegra124-hdmi", },
669 { /* sentinel */ } 695 { /* sentinel */ }
670}; 696};
671 697
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
index 126332c3ecbb..6b8fe9d86ed4 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -80,13 +80,13 @@ host1x_to_drm_client(struct host1x_client *client)
80 return container_of(client, struct tegra_drm_client, base); 80 return container_of(client, struct tegra_drm_client, base);
81} 81}
82 82
83extern int tegra_drm_register_client(struct tegra_drm *tegra, 83int tegra_drm_register_client(struct tegra_drm *tegra,
84 struct tegra_drm_client *client); 84 struct tegra_drm_client *client);
85extern int tegra_drm_unregister_client(struct tegra_drm *tegra, 85int tegra_drm_unregister_client(struct tegra_drm *tegra,
86 struct tegra_drm_client *client); 86 struct tegra_drm_client *client);
87 87
88extern int tegra_drm_init(struct tegra_drm *tegra, struct drm_device *drm); 88int tegra_drm_init(struct tegra_drm *tegra, struct drm_device *drm);
89extern int tegra_drm_exit(struct tegra_drm *tegra); 89int tegra_drm_exit(struct tegra_drm *tegra);
90 90
91struct tegra_dc_soc_info; 91struct tegra_dc_soc_info;
92struct tegra_output; 92struct tegra_output;
@@ -156,6 +156,7 @@ struct tegra_dc_window {
156 } dst; 156 } dst;
157 unsigned int bits_per_pixel; 157 unsigned int bits_per_pixel;
158 unsigned int format; 158 unsigned int format;
159 unsigned int swap;
159 unsigned int stride[2]; 160 unsigned int stride[2];
160 unsigned long base[3]; 161 unsigned long base[3];
161 bool bottom_up; 162 bool bottom_up;
@@ -163,19 +164,15 @@ struct tegra_dc_window {
163}; 164};
164 165
165/* from dc.c */ 166/* from dc.c */
166extern unsigned int tegra_dc_format(uint32_t format); 167void tegra_dc_enable_vblank(struct tegra_dc *dc);
167extern int tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index, 168void tegra_dc_disable_vblank(struct tegra_dc *dc);
168 const struct tegra_dc_window *window); 169void tegra_dc_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file);
169extern void tegra_dc_enable_vblank(struct tegra_dc *dc);
170extern void tegra_dc_disable_vblank(struct tegra_dc *dc);
171extern void tegra_dc_cancel_page_flip(struct drm_crtc *crtc,
172 struct drm_file *file);
173 170
174struct tegra_output_ops { 171struct tegra_output_ops {
175 int (*enable)(struct tegra_output *output); 172 int (*enable)(struct tegra_output *output);
176 int (*disable)(struct tegra_output *output); 173 int (*disable)(struct tegra_output *output);
177 int (*setup_clock)(struct tegra_output *output, struct clk *clk, 174 int (*setup_clock)(struct tegra_output *output, struct clk *clk,
178 unsigned long pclk); 175 unsigned long pclk, unsigned int *div);
179 int (*check_mode)(struct tegra_output *output, 176 int (*check_mode)(struct tegra_output *output,
180 struct drm_display_mode *mode, 177 struct drm_display_mode *mode,
181 enum drm_mode_status *status); 178 enum drm_mode_status *status);
@@ -233,10 +230,11 @@ static inline int tegra_output_disable(struct tegra_output *output)
233} 230}
234 231
235static inline int tegra_output_setup_clock(struct tegra_output *output, 232static inline int tegra_output_setup_clock(struct tegra_output *output,
236 struct clk *clk, unsigned long pclk) 233 struct clk *clk, unsigned long pclk,
234 unsigned int *div)
237{ 235{
238 if (output && output->ops && output->ops->setup_clock) 236 if (output && output->ops && output->ops->setup_clock)
239 return output->ops->setup_clock(output, clk, pclk); 237 return output->ops->setup_clock(output, clk, pclk, div);
240 238
241 return output ? -ENOSYS : -EINVAL; 239 return output ? -ENOSYS : -EINVAL;
242} 240}
@@ -251,27 +249,21 @@ static inline int tegra_output_check_mode(struct tegra_output *output,
251 return output ? -ENOSYS : -EINVAL; 249 return output ? -ENOSYS : -EINVAL;
252} 250}
253 251
254/* from bus.c */
255int drm_host1x_init(struct drm_driver *driver, struct host1x_device *device);
256void drm_host1x_exit(struct drm_driver *driver, struct host1x_device *device);
257
258/* from rgb.c */ 252/* from rgb.c */
259extern int tegra_dc_rgb_probe(struct tegra_dc *dc); 253int tegra_dc_rgb_probe(struct tegra_dc *dc);
260extern int tegra_dc_rgb_remove(struct tegra_dc *dc); 254int tegra_dc_rgb_remove(struct tegra_dc *dc);
261extern int tegra_dc_rgb_init(struct drm_device *drm, struct tegra_dc *dc); 255int tegra_dc_rgb_init(struct drm_device *drm, struct tegra_dc *dc);
262extern int tegra_dc_rgb_exit(struct tegra_dc *dc); 256int tegra_dc_rgb_exit(struct tegra_dc *dc);
263 257
264/* from output.c */ 258/* from output.c */
265extern int tegra_output_probe(struct tegra_output *output); 259int tegra_output_probe(struct tegra_output *output);
266extern int tegra_output_remove(struct tegra_output *output); 260int tegra_output_remove(struct tegra_output *output);
267extern int tegra_output_init(struct drm_device *drm, struct tegra_output *output); 261int tegra_output_init(struct drm_device *drm, struct tegra_output *output);
268extern int tegra_output_exit(struct tegra_output *output); 262int tegra_output_exit(struct tegra_output *output);
269 263
270/* from dpaux.c */ 264/* from dpaux.c */
271
272struct tegra_dpaux; 265struct tegra_dpaux;
273struct drm_dp_link; 266struct drm_dp_link;
274struct drm_dp_aux;
275 267
276struct tegra_dpaux *tegra_dpaux_find_by_of_node(struct device_node *np); 268struct tegra_dpaux *tegra_dpaux_find_by_of_node(struct device_node *np);
277enum drm_connector_status tegra_dpaux_detect(struct tegra_dpaux *dpaux); 269enum drm_connector_status tegra_dpaux_detect(struct tegra_dpaux *dpaux);
@@ -288,10 +280,10 @@ struct tegra_bo *tegra_fb_get_plane(struct drm_framebuffer *framebuffer,
288 unsigned int index); 280 unsigned int index);
289bool tegra_fb_is_bottom_up(struct drm_framebuffer *framebuffer); 281bool tegra_fb_is_bottom_up(struct drm_framebuffer *framebuffer);
290bool tegra_fb_is_tiled(struct drm_framebuffer *framebuffer); 282bool tegra_fb_is_tiled(struct drm_framebuffer *framebuffer);
291extern int tegra_drm_fb_init(struct drm_device *drm); 283int tegra_drm_fb_init(struct drm_device *drm);
292extern void tegra_drm_fb_exit(struct drm_device *drm); 284void tegra_drm_fb_exit(struct drm_device *drm);
293#ifdef CONFIG_DRM_TEGRA_FBDEV 285#ifdef CONFIG_DRM_TEGRA_FBDEV
294extern void tegra_fbdev_restore_mode(struct tegra_fbdev *fbdev); 286void tegra_fbdev_restore_mode(struct tegra_fbdev *fbdev);
295#endif 287#endif
296 288
297extern struct platform_driver tegra_dc_driver; 289extern struct platform_driver tegra_dc_driver;
diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
index 0e599f0417c0..bd56f2affa78 100644
--- a/drivers/gpu/drm/tegra/dsi.c
+++ b/drivers/gpu/drm/tegra/dsi.c
@@ -14,6 +14,8 @@
14#include <linux/platform_device.h> 14#include <linux/platform_device.h>
15#include <linux/reset.h> 15#include <linux/reset.h>
16 16
17#include <linux/regulator/consumer.h>
18
17#include <drm/drm_mipi_dsi.h> 19#include <drm/drm_mipi_dsi.h>
18#include <drm/drm_panel.h> 20#include <drm/drm_panel.h>
19 21
@@ -43,11 +45,15 @@ struct tegra_dsi {
43 struct drm_minor *minor; 45 struct drm_minor *minor;
44 struct dentry *debugfs; 46 struct dentry *debugfs;
45 47
48 unsigned long flags;
46 enum mipi_dsi_pixel_format format; 49 enum mipi_dsi_pixel_format format;
47 unsigned int lanes; 50 unsigned int lanes;
48 51
49 struct tegra_mipi_device *mipi; 52 struct tegra_mipi_device *mipi;
50 struct mipi_dsi_host host; 53 struct mipi_dsi_host host;
54
55 struct regulator *vdd;
56 bool enabled;
51}; 57};
52 58
53static inline struct tegra_dsi * 59static inline struct tegra_dsi *
@@ -244,8 +250,10 @@ static int tegra_dsi_debugfs_exit(struct tegra_dsi *dsi)
244#define PKT_LP (1 << 30) 250#define PKT_LP (1 << 30)
245#define NUM_PKT_SEQ 12 251#define NUM_PKT_SEQ 12
246 252
247/* non-burst mode with sync-end */ 253/*
248static const u32 pkt_seq_vnb_syne[NUM_PKT_SEQ] = { 254 * non-burst mode with sync pulses
255 */
256static const u32 pkt_seq_video_non_burst_sync_pulses[NUM_PKT_SEQ] = {
249 [ 0] = PKT_ID0(MIPI_DSI_V_SYNC_START) | PKT_LEN0(0) | 257 [ 0] = PKT_ID0(MIPI_DSI_V_SYNC_START) | PKT_LEN0(0) |
250 PKT_ID1(MIPI_DSI_BLANKING_PACKET) | PKT_LEN1(1) | 258 PKT_ID1(MIPI_DSI_BLANKING_PACKET) | PKT_LEN1(1) |
251 PKT_ID2(MIPI_DSI_H_SYNC_END) | PKT_LEN2(0) | 259 PKT_ID2(MIPI_DSI_H_SYNC_END) | PKT_LEN2(0) |
@@ -280,6 +288,36 @@ static const u32 pkt_seq_vnb_syne[NUM_PKT_SEQ] = {
280 PKT_ID2(MIPI_DSI_BLANKING_PACKET) | PKT_LEN2(4), 288 PKT_ID2(MIPI_DSI_BLANKING_PACKET) | PKT_LEN2(4),
281}; 289};
282 290
291/*
292 * non-burst mode with sync events
293 */
294static const u32 pkt_seq_video_non_burst_sync_events[NUM_PKT_SEQ] = {
295 [ 0] = PKT_ID0(MIPI_DSI_V_SYNC_START) | PKT_LEN0(0) |
296 PKT_ID1(MIPI_DSI_END_OF_TRANSMISSION) | PKT_LEN1(7) |
297 PKT_LP,
298 [ 1] = 0,
299 [ 2] = PKT_ID0(MIPI_DSI_H_SYNC_START) | PKT_LEN0(0) |
300 PKT_ID1(MIPI_DSI_END_OF_TRANSMISSION) | PKT_LEN1(7) |
301 PKT_LP,
302 [ 3] = 0,
303 [ 4] = PKT_ID0(MIPI_DSI_H_SYNC_START) | PKT_LEN0(0) |
304 PKT_ID1(MIPI_DSI_END_OF_TRANSMISSION) | PKT_LEN1(7) |
305 PKT_LP,
306 [ 5] = 0,
307 [ 6] = PKT_ID0(MIPI_DSI_H_SYNC_START) | PKT_LEN0(0) |
308 PKT_ID1(MIPI_DSI_BLANKING_PACKET) | PKT_LEN1(2) |
309 PKT_ID2(MIPI_DSI_PACKED_PIXEL_STREAM_24) | PKT_LEN2(3),
310 [ 7] = PKT_ID0(MIPI_DSI_BLANKING_PACKET) | PKT_LEN0(4),
311 [ 8] = PKT_ID0(MIPI_DSI_H_SYNC_START) | PKT_LEN0(0) |
312 PKT_ID1(MIPI_DSI_END_OF_TRANSMISSION) | PKT_LEN1(7) |
313 PKT_LP,
314 [ 9] = 0,
315 [10] = PKT_ID0(MIPI_DSI_H_SYNC_START) | PKT_LEN0(0) |
316 PKT_ID1(MIPI_DSI_BLANKING_PACKET) | PKT_LEN1(2) |
317 PKT_ID2(MIPI_DSI_PACKED_PIXEL_STREAM_24) | PKT_LEN2(3),
318 [11] = PKT_ID0(MIPI_DSI_BLANKING_PACKET) | PKT_LEN0(4),
319};
320
283static int tegra_dsi_set_phy_timing(struct tegra_dsi *dsi) 321static int tegra_dsi_set_phy_timing(struct tegra_dsi *dsi)
284{ 322{
285 struct mipi_dphy_timing timing; 323 struct mipi_dphy_timing timing;
@@ -361,28 +399,70 @@ static int tegra_dsi_get_muldiv(enum mipi_dsi_pixel_format format,
361 return 0; 399 return 0;
362} 400}
363 401
402static int tegra_dsi_get_format(enum mipi_dsi_pixel_format format,
403 enum tegra_dsi_format *fmt)
404{
405 switch (format) {
406 case MIPI_DSI_FMT_RGB888:
407 *fmt = TEGRA_DSI_FORMAT_24P;
408 break;
409
410 case MIPI_DSI_FMT_RGB666:
411 *fmt = TEGRA_DSI_FORMAT_18NP;
412 break;
413
414 case MIPI_DSI_FMT_RGB666_PACKED:
415 *fmt = TEGRA_DSI_FORMAT_18P;
416 break;
417
418 case MIPI_DSI_FMT_RGB565:
419 *fmt = TEGRA_DSI_FORMAT_16P;
420 break;
421
422 default:
423 return -EINVAL;
424 }
425
426 return 0;
427}
428
364static int tegra_output_dsi_enable(struct tegra_output *output) 429static int tegra_output_dsi_enable(struct tegra_output *output)
365{ 430{
366 struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc); 431 struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc);
367 struct drm_display_mode *mode = &dc->base.mode; 432 struct drm_display_mode *mode = &dc->base.mode;
368 unsigned int hact, hsw, hbp, hfp, i, mul, div; 433 unsigned int hact, hsw, hbp, hfp, i, mul, div;
369 struct tegra_dsi *dsi = to_dsi(output); 434 struct tegra_dsi *dsi = to_dsi(output);
370 /* FIXME: don't hardcode this */ 435 enum tegra_dsi_format format;
371 const u32 *pkt_seq = pkt_seq_vnb_syne;
372 unsigned long value; 436 unsigned long value;
437 const u32 *pkt_seq;
373 int err; 438 int err;
374 439
440 if (dsi->enabled)
441 return 0;
442
443 if (dsi->flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) {
444 DRM_DEBUG_KMS("Non-burst video mode with sync pulses\n");
445 pkt_seq = pkt_seq_video_non_burst_sync_pulses;
446 } else {
447 DRM_DEBUG_KMS("Non-burst video mode with sync events\n");
448 pkt_seq = pkt_seq_video_non_burst_sync_events;
449 }
450
375 err = tegra_dsi_get_muldiv(dsi->format, &mul, &div); 451 err = tegra_dsi_get_muldiv(dsi->format, &mul, &div);
376 if (err < 0) 452 if (err < 0)
377 return err; 453 return err;
378 454
455 err = tegra_dsi_get_format(dsi->format, &format);
456 if (err < 0)
457 return err;
458
379 err = clk_enable(dsi->clk); 459 err = clk_enable(dsi->clk);
380 if (err < 0) 460 if (err < 0)
381 return err; 461 return err;
382 462
383 reset_control_deassert(dsi->rst); 463 reset_control_deassert(dsi->rst);
384 464
385 value = DSI_CONTROL_CHANNEL(0) | DSI_CONTROL_FORMAT(dsi->format) | 465 value = DSI_CONTROL_CHANNEL(0) | DSI_CONTROL_FORMAT(format) |
386 DSI_CONTROL_LANES(dsi->lanes - 1) | 466 DSI_CONTROL_LANES(dsi->lanes - 1) |
387 DSI_CONTROL_SOURCE(dc->pipe); 467 DSI_CONTROL_SOURCE(dc->pipe);
388 tegra_dsi_writel(dsi, value, DSI_CONTROL); 468 tegra_dsi_writel(dsi, value, DSI_CONTROL);
@@ -454,6 +534,8 @@ static int tegra_output_dsi_enable(struct tegra_output *output)
454 value |= DSI_POWER_CONTROL_ENABLE; 534 value |= DSI_POWER_CONTROL_ENABLE;
455 tegra_dsi_writel(dsi, value, DSI_POWER_CONTROL); 535 tegra_dsi_writel(dsi, value, DSI_POWER_CONTROL);
456 536
537 dsi->enabled = true;
538
457 return 0; 539 return 0;
458} 540}
459 541
@@ -463,9 +545,12 @@ static int tegra_output_dsi_disable(struct tegra_output *output)
463 struct tegra_dsi *dsi = to_dsi(output); 545 struct tegra_dsi *dsi = to_dsi(output);
464 unsigned long value; 546 unsigned long value;
465 547
548 if (!dsi->enabled)
549 return 0;
550
466 /* disable DSI controller */ 551 /* disable DSI controller */
467 value = tegra_dsi_readl(dsi, DSI_POWER_CONTROL); 552 value = tegra_dsi_readl(dsi, DSI_POWER_CONTROL);
468 value &= DSI_POWER_CONTROL_ENABLE; 553 value &= ~DSI_POWER_CONTROL_ENABLE;
469 tegra_dsi_writel(dsi, value, DSI_POWER_CONTROL); 554 tegra_dsi_writel(dsi, value, DSI_POWER_CONTROL);
470 555
471 /* 556 /*
@@ -492,30 +577,44 @@ static int tegra_output_dsi_disable(struct tegra_output *output)
492 577
493 clk_disable(dsi->clk); 578 clk_disable(dsi->clk);
494 579
580 dsi->enabled = false;
581
495 return 0; 582 return 0;
496} 583}
497 584
498static int tegra_output_dsi_setup_clock(struct tegra_output *output, 585static int tegra_output_dsi_setup_clock(struct tegra_output *output,
499 struct clk *clk, unsigned long pclk) 586 struct clk *clk, unsigned long pclk,
587 unsigned int *divp)
500{ 588{
501 struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc); 589 struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc);
502 struct drm_display_mode *mode = &dc->base.mode; 590 struct drm_display_mode *mode = &dc->base.mode;
503 unsigned int timeout, mul, div, vrefresh; 591 unsigned int timeout, mul, div, vrefresh;
504 struct tegra_dsi *dsi = to_dsi(output); 592 struct tegra_dsi *dsi = to_dsi(output);
505 unsigned long bclk, plld, value; 593 unsigned long bclk, plld, value;
506 struct clk *base;
507 int err; 594 int err;
508 595
509 err = tegra_dsi_get_muldiv(dsi->format, &mul, &div); 596 err = tegra_dsi_get_muldiv(dsi->format, &mul, &div);
510 if (err < 0) 597 if (err < 0)
511 return err; 598 return err;
512 599
600 DRM_DEBUG_KMS("mul: %u, div: %u, lanes: %u\n", mul, div, dsi->lanes);
513 vrefresh = drm_mode_vrefresh(mode); 601 vrefresh = drm_mode_vrefresh(mode);
602 DRM_DEBUG_KMS("vrefresh: %u\n", vrefresh);
514 603
515 pclk = mode->htotal * mode->vtotal * vrefresh; 604 /* compute byte clock */
516 bclk = (pclk * mul) / (div * dsi->lanes); 605 bclk = (pclk * mul) / (div * dsi->lanes);
517 plld = DIV_ROUND_UP(bclk * 8, 1000000); 606
518 pclk = (plld * 1000000) / 2; 607 /*
608 * Compute bit clock and round up to the next MHz.
609 */
610 plld = DIV_ROUND_UP(bclk * 8, 1000000) * 1000000;
611
612 /*
613 * We divide the frequency by two here, but we make up for that by
614 * setting the shift clock divider (further below) to half of the
615 * correct value.
616 */
617 plld /= 2;
519 618
520 err = clk_set_parent(clk, dsi->clk_parent); 619 err = clk_set_parent(clk, dsi->clk_parent);
521 if (err < 0) { 620 if (err < 0) {
@@ -523,20 +622,26 @@ static int tegra_output_dsi_setup_clock(struct tegra_output *output,
523 return err; 622 return err;
524 } 623 }
525 624
526 base = clk_get_parent(dsi->clk_parent); 625 err = clk_set_rate(dsi->clk_parent, plld);
527
528 /*
529 * This assumes that the parent clock is pll_d_out0 or pll_d2_out
530 * respectively, each of which divides the base pll_d by 2.
531 */
532 err = clk_set_rate(base, pclk * 2);
533 if (err < 0) { 626 if (err < 0) {
534 dev_err(dsi->dev, "failed to set base clock rate to %lu Hz\n", 627 dev_err(dsi->dev, "failed to set base clock rate to %lu Hz\n",
535 pclk * 2); 628 plld);
536 return err; 629 return err;
537 } 630 }
538 631
539 /* 632 /*
633 * Derive pixel clock from bit clock using the shift clock divider.
634 * Note that this is only half of what we would expect, but we need
635 * that to make up for the fact that we divided the bit clock by a
636 * factor of two above.
637 *
638 * It's not clear exactly why this is necessary, but the display is
639 * not working properly otherwise. Perhaps the PLLs cannot generate
640 * frequencies sufficiently high.
641 */
642 *divp = ((8 * mul) / (div * dsi->lanes)) - 2;
643
644 /*
540 * XXX: Move the below somewhere else so that we don't need to have 645 * XXX: Move the below somewhere else so that we don't need to have
541 * access to the vrefresh in this function? 646 * access to the vrefresh in this function?
542 */ 647 */
@@ -610,61 +715,32 @@ static int tegra_dsi_pad_calibrate(struct tegra_dsi *dsi)
610 715
611static int tegra_dsi_init(struct host1x_client *client) 716static int tegra_dsi_init(struct host1x_client *client)
612{ 717{
613 struct tegra_drm *tegra = dev_get_drvdata(client->parent); 718 struct drm_device *drm = dev_get_drvdata(client->parent);
614 struct tegra_dsi *dsi = host1x_client_to_dsi(client); 719 struct tegra_dsi *dsi = host1x_client_to_dsi(client);
615 unsigned long value, i;
616 int err; 720 int err;
617 721
618 dsi->output.type = TEGRA_OUTPUT_DSI; 722 dsi->output.type = TEGRA_OUTPUT_DSI;
619 dsi->output.dev = client->dev; 723 dsi->output.dev = client->dev;
620 dsi->output.ops = &dsi_ops; 724 dsi->output.ops = &dsi_ops;
621 725
622 err = tegra_output_init(tegra->drm, &dsi->output); 726 err = tegra_output_init(drm, &dsi->output);
623 if (err < 0) { 727 if (err < 0) {
624 dev_err(client->dev, "output setup failed: %d\n", err); 728 dev_err(client->dev, "output setup failed: %d\n", err);
625 return err; 729 return err;
626 } 730 }
627 731
628 if (IS_ENABLED(CONFIG_DEBUG_FS)) { 732 if (IS_ENABLED(CONFIG_DEBUG_FS)) {
629 err = tegra_dsi_debugfs_init(dsi, tegra->drm->primary); 733 err = tegra_dsi_debugfs_init(dsi, drm->primary);
630 if (err < 0) 734 if (err < 0)
631 dev_err(dsi->dev, "debugfs setup failed: %d\n", err); 735 dev_err(dsi->dev, "debugfs setup failed: %d\n", err);
632 } 736 }
633 737
634 /*
635 * enable high-speed mode, checksum generation, ECC generation and
636 * disable raw mode
637 */
638 value = tegra_dsi_readl(dsi, DSI_HOST_CONTROL);
639 value |= DSI_HOST_CONTROL_ECC | DSI_HOST_CONTROL_CS |
640 DSI_HOST_CONTROL_HS;
641 value &= ~DSI_HOST_CONTROL_RAW;
642 tegra_dsi_writel(dsi, value, DSI_HOST_CONTROL);
643
644 tegra_dsi_writel(dsi, 0, DSI_SOL_DELAY);
645 tegra_dsi_writel(dsi, 0, DSI_MAX_THRESHOLD);
646
647 tegra_dsi_writel(dsi, 0, DSI_INIT_SEQ_CONTROL);
648
649 for (i = 0; i < 8; i++) {
650 tegra_dsi_writel(dsi, 0, DSI_INIT_SEQ_DATA_0 + i);
651 tegra_dsi_writel(dsi, 0, DSI_INIT_SEQ_DATA_8 + i);
652 }
653
654 for (i = 0; i < 12; i++)
655 tegra_dsi_writel(dsi, 0, DSI_PKT_SEQ_0_LO + i);
656
657 tegra_dsi_writel(dsi, 0, DSI_DCS_CMDS);
658
659 err = tegra_dsi_pad_calibrate(dsi); 738 err = tegra_dsi_pad_calibrate(dsi);
660 if (err < 0) { 739 if (err < 0) {
661 dev_err(dsi->dev, "MIPI calibration failed: %d\n", err); 740 dev_err(dsi->dev, "MIPI calibration failed: %d\n", err);
662 return err; 741 return err;
663 } 742 }
664 743
665 tegra_dsi_writel(dsi, DSI_POWER_CONTROL_ENABLE, DSI_POWER_CONTROL);
666 usleep_range(300, 1000);
667
668 return 0; 744 return 0;
669} 745}
670 746
@@ -715,66 +791,13 @@ static int tegra_dsi_setup_clocks(struct tegra_dsi *dsi)
715 return 0; 791 return 0;
716} 792}
717 793
718static void tegra_dsi_initialize(struct tegra_dsi *dsi)
719{
720 unsigned int i;
721
722 tegra_dsi_writel(dsi, 0, DSI_POWER_CONTROL);
723
724 tegra_dsi_writel(dsi, 0, DSI_INT_ENABLE);
725 tegra_dsi_writel(dsi, 0, DSI_INT_STATUS);
726 tegra_dsi_writel(dsi, 0, DSI_INT_MASK);
727
728 tegra_dsi_writel(dsi, 0, DSI_HOST_CONTROL);
729 tegra_dsi_writel(dsi, 0, DSI_CONTROL);
730
731 tegra_dsi_writel(dsi, 0, DSI_SOL_DELAY);
732 tegra_dsi_writel(dsi, 0, DSI_MAX_THRESHOLD);
733
734 tegra_dsi_writel(dsi, 0, DSI_INIT_SEQ_CONTROL);
735
736 for (i = 0; i < 8; i++) {
737 tegra_dsi_writel(dsi, 0, DSI_INIT_SEQ_DATA_0 + i);
738 tegra_dsi_writel(dsi, 0, DSI_INIT_SEQ_DATA_8 + i);
739 }
740
741 for (i = 0; i < 12; i++)
742 tegra_dsi_writel(dsi, 0, DSI_PKT_SEQ_0_LO + i);
743
744 tegra_dsi_writel(dsi, 0, DSI_DCS_CMDS);
745
746 for (i = 0; i < 4; i++)
747 tegra_dsi_writel(dsi, 0, DSI_PKT_LEN_0_1 + i);
748
749 tegra_dsi_writel(dsi, 0x00000000, DSI_PHY_TIMING_0);
750 tegra_dsi_writel(dsi, 0x00000000, DSI_PHY_TIMING_1);
751 tegra_dsi_writel(dsi, 0x000000ff, DSI_PHY_TIMING_2);
752 tegra_dsi_writel(dsi, 0x00000000, DSI_BTA_TIMING);
753
754 tegra_dsi_writel(dsi, 0, DSI_TIMEOUT_0);
755 tegra_dsi_writel(dsi, 0, DSI_TIMEOUT_1);
756 tegra_dsi_writel(dsi, 0, DSI_TO_TALLY);
757
758 tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_0);
759 tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_CD);
760 tegra_dsi_writel(dsi, 0, DSI_PAD_CD_STATUS);
761 tegra_dsi_writel(dsi, 0, DSI_VIDEO_MODE_CONTROL);
762 tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_1);
763 tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_2);
764 tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_3);
765 tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_4);
766
767 tegra_dsi_writel(dsi, 0, DSI_GANGED_MODE_CONTROL);
768 tegra_dsi_writel(dsi, 0, DSI_GANGED_MODE_START);
769 tegra_dsi_writel(dsi, 0, DSI_GANGED_MODE_SIZE);
770}
771
772static int tegra_dsi_host_attach(struct mipi_dsi_host *host, 794static int tegra_dsi_host_attach(struct mipi_dsi_host *host,
773 struct mipi_dsi_device *device) 795 struct mipi_dsi_device *device)
774{ 796{
775 struct tegra_dsi *dsi = host_to_tegra(host); 797 struct tegra_dsi *dsi = host_to_tegra(host);
776 struct tegra_output *output = &dsi->output; 798 struct tegra_output *output = &dsi->output;
777 799
800 dsi->flags = device->mode_flags;
778 dsi->format = device->format; 801 dsi->format = device->format;
779 dsi->lanes = device->lanes; 802 dsi->lanes = device->lanes;
780 803
@@ -829,6 +852,7 @@ static int tegra_dsi_probe(struct platform_device *pdev)
829 * attaches to the DSI host, the parameters will be taken from 852 * attaches to the DSI host, the parameters will be taken from
830 * the attached device. 853 * the attached device.
831 */ 854 */
855 dsi->flags = MIPI_DSI_MODE_VIDEO;
832 dsi->format = MIPI_DSI_FMT_RGB888; 856 dsi->format = MIPI_DSI_FMT_RGB888;
833 dsi->lanes = 4; 857 dsi->lanes = 4;
834 858
@@ -872,6 +896,18 @@ static int tegra_dsi_probe(struct platform_device *pdev)
872 return err; 896 return err;
873 } 897 }
874 898
899 dsi->vdd = devm_regulator_get(&pdev->dev, "avdd-dsi-csi");
900 if (IS_ERR(dsi->vdd)) {
901 dev_err(&pdev->dev, "cannot get VDD supply\n");
902 return PTR_ERR(dsi->vdd);
903 }
904
905 err = regulator_enable(dsi->vdd);
906 if (err < 0) {
907 dev_err(&pdev->dev, "cannot enable VDD supply\n");
908 return err;
909 }
910
875 err = tegra_dsi_setup_clocks(dsi); 911 err = tegra_dsi_setup_clocks(dsi);
876 if (err < 0) { 912 if (err < 0) {
877 dev_err(&pdev->dev, "cannot setup clocks\n"); 913 dev_err(&pdev->dev, "cannot setup clocks\n");
@@ -883,8 +919,6 @@ static int tegra_dsi_probe(struct platform_device *pdev)
883 if (IS_ERR(dsi->regs)) 919 if (IS_ERR(dsi->regs))
884 return PTR_ERR(dsi->regs); 920 return PTR_ERR(dsi->regs);
885 921
886 tegra_dsi_initialize(dsi);
887
888 dsi->mipi = tegra_mipi_request(&pdev->dev); 922 dsi->mipi = tegra_mipi_request(&pdev->dev);
889 if (IS_ERR(dsi->mipi)) 923 if (IS_ERR(dsi->mipi))
890 return PTR_ERR(dsi->mipi); 924 return PTR_ERR(dsi->mipi);
@@ -929,9 +963,11 @@ static int tegra_dsi_remove(struct platform_device *pdev)
929 mipi_dsi_host_unregister(&dsi->host); 963 mipi_dsi_host_unregister(&dsi->host);
930 tegra_mipi_free(dsi->mipi); 964 tegra_mipi_free(dsi->mipi);
931 965
966 regulator_disable(dsi->vdd);
932 clk_disable_unprepare(dsi->clk_parent); 967 clk_disable_unprepare(dsi->clk_parent);
933 clk_disable_unprepare(dsi->clk_lp); 968 clk_disable_unprepare(dsi->clk_lp);
934 clk_disable_unprepare(dsi->clk); 969 clk_disable_unprepare(dsi->clk);
970 reset_control_assert(dsi->rst);
935 971
936 err = tegra_output_remove(&dsi->output); 972 err = tegra_output_remove(&dsi->output);
937 if (err < 0) { 973 if (err < 0) {
diff --git a/drivers/gpu/drm/tegra/dsi.h b/drivers/gpu/drm/tegra/dsi.h
index 1db5cc24ea91..5ce610d08d77 100644
--- a/drivers/gpu/drm/tegra/dsi.h
+++ b/drivers/gpu/drm/tegra/dsi.h
@@ -117,4 +117,14 @@
117#define DSI_INIT_SEQ_DATA_14 0x5e 117#define DSI_INIT_SEQ_DATA_14 0x5e
118#define DSI_INIT_SEQ_DATA_15 0x5f 118#define DSI_INIT_SEQ_DATA_15 0x5f
119 119
120/*
121 * pixel format as used in the DSI_CONTROL_FORMAT field
122 */
123enum tegra_dsi_format {
124 TEGRA_DSI_FORMAT_16P,
125 TEGRA_DSI_FORMAT_18NP,
126 TEGRA_DSI_FORMAT_18P,
127 TEGRA_DSI_FORMAT_24P,
128};
129
120#endif 130#endif
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index f7fca09d4921..9798a7080322 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -346,11 +346,8 @@ static void tegra_fbdev_free(struct tegra_fbdev *fbdev)
346 346
347void tegra_fbdev_restore_mode(struct tegra_fbdev *fbdev) 347void tegra_fbdev_restore_mode(struct tegra_fbdev *fbdev)
348{ 348{
349 if (fbdev) { 349 if (fbdev)
350 drm_modeset_lock_all(fbdev->base.dev); 350 drm_fb_helper_restore_fbdev_mode_unlocked(&fbdev->base);
351 drm_fb_helper_restore_fbdev_mode(&fbdev->base);
352 drm_modeset_unlock_all(fbdev->base.dev);
353 }
354} 351}
355 352
356static void tegra_fb_output_poll_changed(struct drm_device *drm) 353static void tegra_fb_output_poll_changed(struct drm_device *drm)
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index bcf9895cef9f..aa85b7b26f10 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -169,7 +169,8 @@ err:
169 return ERR_PTR(ret); 169 return ERR_PTR(ret);
170} 170}
171 171
172struct tegra_bo *tegra_bo_import(struct drm_device *drm, struct dma_buf *buf) 172static struct tegra_bo *tegra_bo_import(struct drm_device *drm,
173 struct dma_buf *buf)
173{ 174{
174 struct dma_buf_attachment *attach; 175 struct dma_buf_attachment *attach;
175 struct tegra_bo *bo; 176 struct tegra_bo *bo;
diff --git a/drivers/gpu/drm/tegra/gr2d.c b/drivers/gpu/drm/tegra/gr2d.c
index 2c7ca748edf5..7c53941f2a9e 100644
--- a/drivers/gpu/drm/tegra/gr2d.c
+++ b/drivers/gpu/drm/tegra/gr2d.c
@@ -28,7 +28,7 @@ static inline struct gr2d *to_gr2d(struct tegra_drm_client *client)
28static int gr2d_init(struct host1x_client *client) 28static int gr2d_init(struct host1x_client *client)
29{ 29{
30 struct tegra_drm_client *drm = host1x_to_drm_client(client); 30 struct tegra_drm_client *drm = host1x_to_drm_client(client);
31 struct tegra_drm *tegra = dev_get_drvdata(client->parent); 31 struct drm_device *dev = dev_get_drvdata(client->parent);
32 unsigned long flags = HOST1X_SYNCPT_HAS_BASE; 32 unsigned long flags = HOST1X_SYNCPT_HAS_BASE;
33 struct gr2d *gr2d = to_gr2d(drm); 33 struct gr2d *gr2d = to_gr2d(drm);
34 34
@@ -42,17 +42,17 @@ static int gr2d_init(struct host1x_client *client)
42 return -ENOMEM; 42 return -ENOMEM;
43 } 43 }
44 44
45 return tegra_drm_register_client(tegra, drm); 45 return tegra_drm_register_client(dev->dev_private, drm);
46} 46}
47 47
48static int gr2d_exit(struct host1x_client *client) 48static int gr2d_exit(struct host1x_client *client)
49{ 49{
50 struct tegra_drm_client *drm = host1x_to_drm_client(client); 50 struct tegra_drm_client *drm = host1x_to_drm_client(client);
51 struct tegra_drm *tegra = dev_get_drvdata(client->parent); 51 struct drm_device *dev = dev_get_drvdata(client->parent);
52 struct gr2d *gr2d = to_gr2d(drm); 52 struct gr2d *gr2d = to_gr2d(drm);
53 int err; 53 int err;
54 54
55 err = tegra_drm_unregister_client(tegra, drm); 55 err = tegra_drm_unregister_client(dev->dev_private, drm);
56 if (err < 0) 56 if (err < 0)
57 return err; 57 return err;
58 58
diff --git a/drivers/gpu/drm/tegra/gr3d.c b/drivers/gpu/drm/tegra/gr3d.c
index 0cbb24b1ae04..30f5ba9bd6d0 100644
--- a/drivers/gpu/drm/tegra/gr3d.c
+++ b/drivers/gpu/drm/tegra/gr3d.c
@@ -37,7 +37,7 @@ static inline struct gr3d *to_gr3d(struct tegra_drm_client *client)
37static int gr3d_init(struct host1x_client *client) 37static int gr3d_init(struct host1x_client *client)
38{ 38{
39 struct tegra_drm_client *drm = host1x_to_drm_client(client); 39 struct tegra_drm_client *drm = host1x_to_drm_client(client);
40 struct tegra_drm *tegra = dev_get_drvdata(client->parent); 40 struct drm_device *dev = dev_get_drvdata(client->parent);
41 unsigned long flags = HOST1X_SYNCPT_HAS_BASE; 41 unsigned long flags = HOST1X_SYNCPT_HAS_BASE;
42 struct gr3d *gr3d = to_gr3d(drm); 42 struct gr3d *gr3d = to_gr3d(drm);
43 43
@@ -51,17 +51,17 @@ static int gr3d_init(struct host1x_client *client)
51 return -ENOMEM; 51 return -ENOMEM;
52 } 52 }
53 53
54 return tegra_drm_register_client(tegra, drm); 54 return tegra_drm_register_client(dev->dev_private, drm);
55} 55}
56 56
57static int gr3d_exit(struct host1x_client *client) 57static int gr3d_exit(struct host1x_client *client)
58{ 58{
59 struct tegra_drm_client *drm = host1x_to_drm_client(client); 59 struct tegra_drm_client *drm = host1x_to_drm_client(client);
60 struct tegra_drm *tegra = dev_get_drvdata(client->parent); 60 struct drm_device *dev = dev_get_drvdata(client->parent);
61 struct gr3d *gr3d = to_gr3d(drm); 61 struct gr3d *gr3d = to_gr3d(drm);
62 int err; 62 int err;
63 63
64 err = tegra_drm_unregister_client(tegra, drm); 64 err = tegra_drm_unregister_client(dev->dev_private, drm);
65 if (err < 0) 65 if (err < 0)
66 return err; 66 return err;
67 67
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
index 6928015d11a4..ba067bb767e3 100644
--- a/drivers/gpu/drm/tegra/hdmi.c
+++ b/drivers/gpu/drm/tegra/hdmi.c
@@ -42,8 +42,9 @@ struct tegra_hdmi {
42 struct device *dev; 42 struct device *dev;
43 bool enabled; 43 bool enabled;
44 44
45 struct regulator *vdd; 45 struct regulator *hdmi;
46 struct regulator *pll; 46 struct regulator *pll;
47 struct regulator *vdd;
47 48
48 void __iomem *regs; 49 void __iomem *regs;
49 unsigned int irq; 50 unsigned int irq;
@@ -317,6 +318,85 @@ static const struct tmds_config tegra114_tmds_config[] = {
317 }, 318 },
318}; 319};
319 320
321static const struct tmds_config tegra124_tmds_config[] = {
322 { /* 480p/576p / 25.2MHz/27MHz modes */
323 .pclk = 27000000,
324 .pll0 = SOR_PLL_ICHPMP(1) | SOR_PLL_BG_V17_S(3) |
325 SOR_PLL_VCOCAP(0) | SOR_PLL_RESISTORSEL,
326 .pll1 = SOR_PLL_LOADADJ(3) | SOR_PLL_TMDS_TERMADJ(0),
327 .pe_current = PE_CURRENT0(PE_CURRENT_0_mA_T114) |
328 PE_CURRENT1(PE_CURRENT_0_mA_T114) |
329 PE_CURRENT2(PE_CURRENT_0_mA_T114) |
330 PE_CURRENT3(PE_CURRENT_0_mA_T114),
331 .drive_current =
332 DRIVE_CURRENT_LANE0_T114(DRIVE_CURRENT_10_400_mA_T114) |
333 DRIVE_CURRENT_LANE1_T114(DRIVE_CURRENT_10_400_mA_T114) |
334 DRIVE_CURRENT_LANE2_T114(DRIVE_CURRENT_10_400_mA_T114) |
335 DRIVE_CURRENT_LANE3_T114(DRIVE_CURRENT_10_400_mA_T114),
336 .peak_current = PEAK_CURRENT_LANE0(PEAK_CURRENT_0_000_mA) |
337 PEAK_CURRENT_LANE1(PEAK_CURRENT_0_000_mA) |
338 PEAK_CURRENT_LANE2(PEAK_CURRENT_0_000_mA) |
339 PEAK_CURRENT_LANE3(PEAK_CURRENT_0_000_mA),
340 }, { /* 720p / 74.25MHz modes */
341 .pclk = 74250000,
342 .pll0 = SOR_PLL_ICHPMP(1) | SOR_PLL_BG_V17_S(3) |
343 SOR_PLL_VCOCAP(1) | SOR_PLL_RESISTORSEL,
344 .pll1 = SOR_PLL_PE_EN | SOR_PLL_LOADADJ(3) |
345 SOR_PLL_TMDS_TERMADJ(0),
346 .pe_current = PE_CURRENT0(PE_CURRENT_15_mA_T114) |
347 PE_CURRENT1(PE_CURRENT_15_mA_T114) |
348 PE_CURRENT2(PE_CURRENT_15_mA_T114) |
349 PE_CURRENT3(PE_CURRENT_15_mA_T114),
350 .drive_current =
351 DRIVE_CURRENT_LANE0_T114(DRIVE_CURRENT_10_400_mA_T114) |
352 DRIVE_CURRENT_LANE1_T114(DRIVE_CURRENT_10_400_mA_T114) |
353 DRIVE_CURRENT_LANE2_T114(DRIVE_CURRENT_10_400_mA_T114) |
354 DRIVE_CURRENT_LANE3_T114(DRIVE_CURRENT_10_400_mA_T114),
355 .peak_current = PEAK_CURRENT_LANE0(PEAK_CURRENT_0_000_mA) |
356 PEAK_CURRENT_LANE1(PEAK_CURRENT_0_000_mA) |
357 PEAK_CURRENT_LANE2(PEAK_CURRENT_0_000_mA) |
358 PEAK_CURRENT_LANE3(PEAK_CURRENT_0_000_mA),
359 }, { /* 1080p / 148.5MHz modes */
360 .pclk = 148500000,
361 .pll0 = SOR_PLL_ICHPMP(1) | SOR_PLL_BG_V17_S(3) |
362 SOR_PLL_VCOCAP(3) | SOR_PLL_RESISTORSEL,
363 .pll1 = SOR_PLL_PE_EN | SOR_PLL_LOADADJ(3) |
364 SOR_PLL_TMDS_TERMADJ(0),
365 .pe_current = PE_CURRENT0(PE_CURRENT_10_mA_T114) |
366 PE_CURRENT1(PE_CURRENT_10_mA_T114) |
367 PE_CURRENT2(PE_CURRENT_10_mA_T114) |
368 PE_CURRENT3(PE_CURRENT_10_mA_T114),
369 .drive_current =
370 DRIVE_CURRENT_LANE0_T114(DRIVE_CURRENT_12_400_mA_T114) |
371 DRIVE_CURRENT_LANE1_T114(DRIVE_CURRENT_12_400_mA_T114) |
372 DRIVE_CURRENT_LANE2_T114(DRIVE_CURRENT_12_400_mA_T114) |
373 DRIVE_CURRENT_LANE3_T114(DRIVE_CURRENT_12_400_mA_T114),
374 .peak_current = PEAK_CURRENT_LANE0(PEAK_CURRENT_0_000_mA) |
375 PEAK_CURRENT_LANE1(PEAK_CURRENT_0_000_mA) |
376 PEAK_CURRENT_LANE2(PEAK_CURRENT_0_000_mA) |
377 PEAK_CURRENT_LANE3(PEAK_CURRENT_0_000_mA),
378 }, { /* 225/297MHz modes */
379 .pclk = UINT_MAX,
380 .pll0 = SOR_PLL_ICHPMP(1) | SOR_PLL_BG_V17_S(3) |
381 SOR_PLL_VCOCAP(0xf) | SOR_PLL_RESISTORSEL,
382 .pll1 = SOR_PLL_LOADADJ(3) | SOR_PLL_TMDS_TERMADJ(7)
383 | SOR_PLL_TMDS_TERM_ENABLE,
384 .pe_current = PE_CURRENT0(PE_CURRENT_0_mA_T114) |
385 PE_CURRENT1(PE_CURRENT_0_mA_T114) |
386 PE_CURRENT2(PE_CURRENT_0_mA_T114) |
387 PE_CURRENT3(PE_CURRENT_0_mA_T114),
388 .drive_current =
389 DRIVE_CURRENT_LANE0_T114(DRIVE_CURRENT_25_200_mA_T114) |
390 DRIVE_CURRENT_LANE1_T114(DRIVE_CURRENT_25_200_mA_T114) |
391 DRIVE_CURRENT_LANE2_T114(DRIVE_CURRENT_25_200_mA_T114) |
392 DRIVE_CURRENT_LANE3_T114(DRIVE_CURRENT_19_200_mA_T114),
393 .peak_current = PEAK_CURRENT_LANE0(PEAK_CURRENT_3_000_mA) |
394 PEAK_CURRENT_LANE1(PEAK_CURRENT_3_000_mA) |
395 PEAK_CURRENT_LANE2(PEAK_CURRENT_3_000_mA) |
396 PEAK_CURRENT_LANE3(PEAK_CURRENT_0_800_mA),
397 },
398};
399
320static const struct tegra_hdmi_audio_config * 400static const struct tegra_hdmi_audio_config *
321tegra_hdmi_get_audio_config(unsigned int audio_freq, unsigned int pclk) 401tegra_hdmi_get_audio_config(unsigned int audio_freq, unsigned int pclk)
322{ 402{
@@ -716,13 +796,9 @@ static int tegra_output_hdmi_enable(struct tegra_output *output)
716 return err; 796 return err;
717 } 797 }
718 798
719 /* 799 err = regulator_enable(hdmi->vdd);
720 * This assumes that the display controller will divide its parent
721 * clock by 2 to generate the pixel clock.
722 */
723 err = tegra_output_setup_clock(output, hdmi->clk, pclk * 2);
724 if (err < 0) { 800 if (err < 0) {
725 dev_err(hdmi->dev, "failed to setup clock: %d\n", err); 801 dev_err(hdmi->dev, "failed to enable VDD regulator: %d\n", err);
726 return err; 802 return err;
727 } 803 }
728 804
@@ -730,7 +806,7 @@ static int tegra_output_hdmi_enable(struct tegra_output *output)
730 if (err < 0) 806 if (err < 0)
731 return err; 807 return err;
732 808
733 err = clk_enable(hdmi->clk); 809 err = clk_prepare_enable(hdmi->clk);
734 if (err < 0) { 810 if (err < 0) {
735 dev_err(hdmi->dev, "failed to enable clock: %d\n", err); 811 dev_err(hdmi->dev, "failed to enable clock: %d\n", err);
736 return err; 812 return err;
@@ -740,6 +816,17 @@ static int tegra_output_hdmi_enable(struct tegra_output *output)
740 usleep_range(1000, 2000); 816 usleep_range(1000, 2000);
741 reset_control_deassert(hdmi->rst); 817 reset_control_deassert(hdmi->rst);
742 818
819 /* power up sequence */
820 value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_SOR_PLL0);
821 value &= ~SOR_PLL_PDBG;
822 tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_PLL0);
823
824 usleep_range(10, 20);
825
826 value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_SOR_PLL0);
827 value &= ~SOR_PLL_PWR;
828 tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_PLL0);
829
743 tegra_dc_writel(dc, VSYNC_H_POSITION(1), 830 tegra_dc_writel(dc, VSYNC_H_POSITION(1),
744 DC_DISP_DISP_TIMING_OPTIONS); 831 DC_DISP_DISP_TIMING_OPTIONS);
745 tegra_dc_writel(dc, DITHER_CONTROL_DISABLE | BASE_COLOR_SIZE888, 832 tegra_dc_writel(dc, DITHER_CONTROL_DISABLE | BASE_COLOR_SIZE888,
@@ -838,9 +925,13 @@ static int tegra_output_hdmi_enable(struct tegra_output *output)
838 tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_SEQ_INST(0)); 925 tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_SEQ_INST(0));
839 tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_SEQ_INST(8)); 926 tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_SEQ_INST(8));
840 927
841 value = 0x1c800; 928 value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_SOR_CSTM);
842 value &= ~SOR_CSTM_ROTCLK(~0); 929 value &= ~SOR_CSTM_ROTCLK(~0);
843 value |= SOR_CSTM_ROTCLK(2); 930 value |= SOR_CSTM_ROTCLK(2);
931 value |= SOR_CSTM_PLLDIV;
932 value &= ~SOR_CSTM_LVDS_ENABLE;
933 value &= ~SOR_CSTM_MODE_MASK;
934 value |= SOR_CSTM_MODE_TMDS;
844 tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_CSTM); 935 tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_CSTM);
845 936
846 /* start SOR */ 937 /* start SOR */
@@ -930,10 +1021,18 @@ static int tegra_output_hdmi_disable(struct tegra_output *output)
930 * sure it's only executed when the output is attached to one. 1021 * sure it's only executed when the output is attached to one.
931 */ 1022 */
932 if (dc) { 1023 if (dc) {
1024 /*
1025 * XXX: We can't do this here because it causes HDMI to go
1026 * into an erroneous state with the result that HDMI won't
1027 * properly work once disabled. See also a similar symptom
1028 * for the SOR output.
1029 */
1030 /*
933 value = tegra_dc_readl(dc, DC_CMD_DISPLAY_POWER_CONTROL); 1031 value = tegra_dc_readl(dc, DC_CMD_DISPLAY_POWER_CONTROL);
934 value &= ~(PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE | 1032 value &= ~(PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE |
935 PW4_ENABLE | PM0_ENABLE | PM1_ENABLE); 1033 PW4_ENABLE | PM0_ENABLE | PM1_ENABLE);
936 tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL); 1034 tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL);
1035 */
937 1036
938 value = tegra_dc_readl(dc, DC_CMD_DISPLAY_COMMAND); 1037 value = tegra_dc_readl(dc, DC_CMD_DISPLAY_COMMAND);
939 value &= ~DISP_CTRL_MODE_MASK; 1038 value &= ~DISP_CTRL_MODE_MASK;
@@ -947,8 +1046,9 @@ static int tegra_output_hdmi_disable(struct tegra_output *output)
947 tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL); 1046 tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
948 } 1047 }
949 1048
1049 clk_disable_unprepare(hdmi->clk);
950 reset_control_assert(hdmi->rst); 1050 reset_control_assert(hdmi->rst);
951 clk_disable(hdmi->clk); 1051 regulator_disable(hdmi->vdd);
952 regulator_disable(hdmi->pll); 1052 regulator_disable(hdmi->pll);
953 1053
954 hdmi->enabled = false; 1054 hdmi->enabled = false;
@@ -957,10 +1057,10 @@ static int tegra_output_hdmi_disable(struct tegra_output *output)
957} 1057}
958 1058
959static int tegra_output_hdmi_setup_clock(struct tegra_output *output, 1059static int tegra_output_hdmi_setup_clock(struct tegra_output *output,
960 struct clk *clk, unsigned long pclk) 1060 struct clk *clk, unsigned long pclk,
1061 unsigned int *div)
961{ 1062{
962 struct tegra_hdmi *hdmi = to_hdmi(output); 1063 struct tegra_hdmi *hdmi = to_hdmi(output);
963 struct clk *base;
964 int err; 1064 int err;
965 1065
966 err = clk_set_parent(clk, hdmi->clk_parent); 1066 err = clk_set_parent(clk, hdmi->clk_parent);
@@ -969,17 +1069,12 @@ static int tegra_output_hdmi_setup_clock(struct tegra_output *output,
969 return err; 1069 return err;
970 } 1070 }
971 1071
972 base = clk_get_parent(hdmi->clk_parent); 1072 err = clk_set_rate(hdmi->clk_parent, pclk);
973
974 /*
975 * This assumes that the parent clock is pll_d_out0 or pll_d2_out
976 * respectively, each of which divides the base pll_d by 2.
977 */
978 err = clk_set_rate(base, pclk * 2);
979 if (err < 0) 1073 if (err < 0)
980 dev_err(output->dev, 1074 dev_err(output->dev, "failed to set clock rate to %lu Hz\n",
981 "failed to set base clock rate to %lu Hz\n", 1075 pclk);
982 pclk * 2); 1076
1077 *div = 0;
983 1078
984 return 0; 1079 return 0;
985} 1080}
@@ -1017,7 +1112,7 @@ static int tegra_hdmi_show_regs(struct seq_file *s, void *data)
1017 struct tegra_hdmi *hdmi = node->info_ent->data; 1112 struct tegra_hdmi *hdmi = node->info_ent->data;
1018 int err; 1113 int err;
1019 1114
1020 err = clk_enable(hdmi->clk); 1115 err = clk_prepare_enable(hdmi->clk);
1021 if (err) 1116 if (err)
1022 return err; 1117 return err;
1023 1118
@@ -1186,7 +1281,7 @@ static int tegra_hdmi_show_regs(struct seq_file *s, void *data)
1186 1281
1187#undef DUMP_REG 1282#undef DUMP_REG
1188 1283
1189 clk_disable(hdmi->clk); 1284 clk_disable_unprepare(hdmi->clk);
1190 1285
1191 return 0; 1286 return 0;
1192} 1287}
@@ -1252,33 +1347,33 @@ static int tegra_hdmi_debugfs_exit(struct tegra_hdmi *hdmi)
1252 1347
1253static int tegra_hdmi_init(struct host1x_client *client) 1348static int tegra_hdmi_init(struct host1x_client *client)
1254{ 1349{
1255 struct tegra_drm *tegra = dev_get_drvdata(client->parent); 1350 struct drm_device *drm = dev_get_drvdata(client->parent);
1256 struct tegra_hdmi *hdmi = host1x_client_to_hdmi(client); 1351 struct tegra_hdmi *hdmi = host1x_client_to_hdmi(client);
1257 int err; 1352 int err;
1258 1353
1259 err = regulator_enable(hdmi->vdd);
1260 if (err < 0) {
1261 dev_err(client->dev, "failed to enable VDD regulator: %d\n",
1262 err);
1263 return err;
1264 }
1265
1266 hdmi->output.type = TEGRA_OUTPUT_HDMI; 1354 hdmi->output.type = TEGRA_OUTPUT_HDMI;
1267 hdmi->output.dev = client->dev; 1355 hdmi->output.dev = client->dev;
1268 hdmi->output.ops = &hdmi_ops; 1356 hdmi->output.ops = &hdmi_ops;
1269 1357
1270 err = tegra_output_init(tegra->drm, &hdmi->output); 1358 err = tegra_output_init(drm, &hdmi->output);
1271 if (err < 0) { 1359 if (err < 0) {
1272 dev_err(client->dev, "output setup failed: %d\n", err); 1360 dev_err(client->dev, "output setup failed: %d\n", err);
1273 return err; 1361 return err;
1274 } 1362 }
1275 1363
1276 if (IS_ENABLED(CONFIG_DEBUG_FS)) { 1364 if (IS_ENABLED(CONFIG_DEBUG_FS)) {
1277 err = tegra_hdmi_debugfs_init(hdmi, tegra->drm->primary); 1365 err = tegra_hdmi_debugfs_init(hdmi, drm->primary);
1278 if (err < 0) 1366 if (err < 0)
1279 dev_err(client->dev, "debugfs setup failed: %d\n", err); 1367 dev_err(client->dev, "debugfs setup failed: %d\n", err);
1280 } 1368 }
1281 1369
1370 err = regulator_enable(hdmi->hdmi);
1371 if (err < 0) {
1372 dev_err(client->dev, "failed to enable HDMI regulator: %d\n",
1373 err);
1374 return err;
1375 }
1376
1282 return 0; 1377 return 0;
1283} 1378}
1284 1379
@@ -1287,6 +1382,8 @@ static int tegra_hdmi_exit(struct host1x_client *client)
1287 struct tegra_hdmi *hdmi = host1x_client_to_hdmi(client); 1382 struct tegra_hdmi *hdmi = host1x_client_to_hdmi(client);
1288 int err; 1383 int err;
1289 1384
1385 regulator_disable(hdmi->hdmi);
1386
1290 if (IS_ENABLED(CONFIG_DEBUG_FS)) { 1387 if (IS_ENABLED(CONFIG_DEBUG_FS)) {
1291 err = tegra_hdmi_debugfs_exit(hdmi); 1388 err = tegra_hdmi_debugfs_exit(hdmi);
1292 if (err < 0) 1389 if (err < 0)
@@ -1306,8 +1403,6 @@ static int tegra_hdmi_exit(struct host1x_client *client)
1306 return err; 1403 return err;
1307 } 1404 }
1308 1405
1309 regulator_disable(hdmi->vdd);
1310
1311 return 0; 1406 return 0;
1312} 1407}
1313 1408
@@ -1340,7 +1435,16 @@ static const struct tegra_hdmi_config tegra114_hdmi_config = {
1340 .has_sor_io_peak_current = true, 1435 .has_sor_io_peak_current = true,
1341}; 1436};
1342 1437
1438static const struct tegra_hdmi_config tegra124_hdmi_config = {
1439 .tmds = tegra124_tmds_config,
1440 .num_tmds = ARRAY_SIZE(tegra124_tmds_config),
1441 .fuse_override_offset = HDMI_NV_PDISP_SOR_PAD_CTLS0,
1442 .fuse_override_value = 1 << 31,
1443 .has_sor_io_peak_current = true,
1444};
1445
1343static const struct of_device_id tegra_hdmi_of_match[] = { 1446static const struct of_device_id tegra_hdmi_of_match[] = {
1447 { .compatible = "nvidia,tegra124-hdmi", .data = &tegra124_hdmi_config },
1344 { .compatible = "nvidia,tegra114-hdmi", .data = &tegra114_hdmi_config }, 1448 { .compatible = "nvidia,tegra114-hdmi", .data = &tegra114_hdmi_config },
1345 { .compatible = "nvidia,tegra30-hdmi", .data = &tegra30_hdmi_config }, 1449 { .compatible = "nvidia,tegra30-hdmi", .data = &tegra30_hdmi_config },
1346 { .compatible = "nvidia,tegra20-hdmi", .data = &tegra20_hdmi_config }, 1450 { .compatible = "nvidia,tegra20-hdmi", .data = &tegra20_hdmi_config },
@@ -1381,28 +1485,20 @@ static int tegra_hdmi_probe(struct platform_device *pdev)
1381 return PTR_ERR(hdmi->rst); 1485 return PTR_ERR(hdmi->rst);
1382 } 1486 }
1383 1487
1384 err = clk_prepare(hdmi->clk);
1385 if (err < 0)
1386 return err;
1387
1388 hdmi->clk_parent = devm_clk_get(&pdev->dev, "parent"); 1488 hdmi->clk_parent = devm_clk_get(&pdev->dev, "parent");
1389 if (IS_ERR(hdmi->clk_parent)) 1489 if (IS_ERR(hdmi->clk_parent))
1390 return PTR_ERR(hdmi->clk_parent); 1490 return PTR_ERR(hdmi->clk_parent);
1391 1491
1392 err = clk_prepare(hdmi->clk_parent);
1393 if (err < 0)
1394 return err;
1395
1396 err = clk_set_parent(hdmi->clk, hdmi->clk_parent); 1492 err = clk_set_parent(hdmi->clk, hdmi->clk_parent);
1397 if (err < 0) { 1493 if (err < 0) {
1398 dev_err(&pdev->dev, "failed to setup clocks: %d\n", err); 1494 dev_err(&pdev->dev, "failed to setup clocks: %d\n", err);
1399 return err; 1495 return err;
1400 } 1496 }
1401 1497
1402 hdmi->vdd = devm_regulator_get(&pdev->dev, "vdd"); 1498 hdmi->hdmi = devm_regulator_get(&pdev->dev, "hdmi");
1403 if (IS_ERR(hdmi->vdd)) { 1499 if (IS_ERR(hdmi->hdmi)) {
1404 dev_err(&pdev->dev, "failed to get VDD regulator\n"); 1500 dev_err(&pdev->dev, "failed to get HDMI regulator\n");
1405 return PTR_ERR(hdmi->vdd); 1501 return PTR_ERR(hdmi->hdmi);
1406 } 1502 }
1407 1503
1408 hdmi->pll = devm_regulator_get(&pdev->dev, "pll"); 1504 hdmi->pll = devm_regulator_get(&pdev->dev, "pll");
@@ -1411,6 +1507,12 @@ static int tegra_hdmi_probe(struct platform_device *pdev)
1411 return PTR_ERR(hdmi->pll); 1507 return PTR_ERR(hdmi->pll);
1412 } 1508 }
1413 1509
1510 hdmi->vdd = devm_regulator_get(&pdev->dev, "vdd");
1511 if (IS_ERR(hdmi->vdd)) {
1512 dev_err(&pdev->dev, "failed to get VDD regulator\n");
1513 return PTR_ERR(hdmi->vdd);
1514 }
1515
1414 hdmi->output.dev = &pdev->dev; 1516 hdmi->output.dev = &pdev->dev;
1415 1517
1416 err = tegra_output_probe(&hdmi->output); 1518 err = tegra_output_probe(&hdmi->output);
@@ -1462,8 +1564,8 @@ static int tegra_hdmi_remove(struct platform_device *pdev)
1462 return err; 1564 return err;
1463 } 1565 }
1464 1566
1465 clk_unprepare(hdmi->clk_parent); 1567 clk_disable_unprepare(hdmi->clk_parent);
1466 clk_unprepare(hdmi->clk); 1568 clk_disable_unprepare(hdmi->clk);
1467 1569
1468 return 0; 1570 return 0;
1469} 1571}
diff --git a/drivers/gpu/drm/tegra/hdmi.h b/drivers/gpu/drm/tegra/hdmi.h
index 0aebc485f7fa..919a19df4e1b 100644
--- a/drivers/gpu/drm/tegra/hdmi.h
+++ b/drivers/gpu/drm/tegra/hdmi.h
@@ -190,6 +190,11 @@
190 190
191#define HDMI_NV_PDISP_SOR_CSTM 0x5a 191#define HDMI_NV_PDISP_SOR_CSTM 0x5a
192#define SOR_CSTM_ROTCLK(x) (((x) & 0xf) << 24) 192#define SOR_CSTM_ROTCLK(x) (((x) & 0xf) << 24)
193#define SOR_CSTM_PLLDIV (1 << 21)
194#define SOR_CSTM_LVDS_ENABLE (1 << 16)
195#define SOR_CSTM_MODE_LVDS (0 << 12)
196#define SOR_CSTM_MODE_TMDS (1 << 12)
197#define SOR_CSTM_MODE_MASK (3 << 12)
193 198
194#define HDMI_NV_PDISP_SOR_LVDS 0x5b 199#define HDMI_NV_PDISP_SOR_LVDS 0x5b
195#define HDMI_NV_PDISP_SOR_CRCA 0x5c 200#define HDMI_NV_PDISP_SOR_CRCA 0x5c
diff --git a/drivers/gpu/drm/tegra/rgb.c b/drivers/gpu/drm/tegra/rgb.c
index 0266fb40479e..d6af9be48f42 100644
--- a/drivers/gpu/drm/tegra/rgb.c
+++ b/drivers/gpu/drm/tegra/rgb.c
@@ -159,11 +159,38 @@ static int tegra_output_rgb_disable(struct tegra_output *output)
159} 159}
160 160
161static int tegra_output_rgb_setup_clock(struct tegra_output *output, 161static int tegra_output_rgb_setup_clock(struct tegra_output *output,
162 struct clk *clk, unsigned long pclk) 162 struct clk *clk, unsigned long pclk,
163 unsigned int *div)
163{ 164{
164 struct tegra_rgb *rgb = to_rgb(output); 165 struct tegra_rgb *rgb = to_rgb(output);
166 int err;
167
168 err = clk_set_parent(clk, rgb->clk_parent);
169 if (err < 0) {
170 dev_err(output->dev, "failed to set parent: %d\n", err);
171 return err;
172 }
165 173
166 return clk_set_parent(clk, rgb->clk_parent); 174 /*
175 * We may not want to change the frequency of the parent clock, since
176 * it may be a parent for other peripherals. This is due to the fact
177 * that on Tegra20 there's only a single clock dedicated to display
178 * (pll_d_out0), whereas later generations have a second one that can
179 * be used to independently drive a second output (pll_d2_out0).
180 *
181 * As a way to support multiple outputs on Tegra20 as well, pll_p is
182 * typically used as the parent clock for the display controllers.
183 * But this comes at a cost: pll_p is the parent of several other
184 * peripherals, so its frequency shouldn't change out of the blue.
185 *
186 * The best we can do at this point is to use the shift clock divider
187 * and hope that the desired frequency can be matched (or at least
188 * matched sufficiently close that the panel will still work).
189 */
190
191 *div = ((clk_get_rate(clk) * 2) / pclk) - 2;
192
193 return 0;
167} 194}
168 195
169static int tegra_output_rgb_check_mode(struct tegra_output *output, 196static int tegra_output_rgb_check_mode(struct tegra_output *output,
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index 49ef5729f435..27c979b50111 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -7,6 +7,7 @@
7 */ 7 */
8 8
9#include <linux/clk.h> 9#include <linux/clk.h>
10#include <linux/debugfs.h>
10#include <linux/io.h> 11#include <linux/io.h>
11#include <linux/platform_device.h> 12#include <linux/platform_device.h>
12#include <linux/reset.h> 13#include <linux/reset.h>
@@ -33,7 +34,23 @@ struct tegra_sor {
33 34
34 struct tegra_dpaux *dpaux; 35 struct tegra_dpaux *dpaux;
35 36
37 struct mutex lock;
36 bool enabled; 38 bool enabled;
39
40 struct dentry *debugfs;
41};
42
43struct tegra_sor_config {
44 u32 bits_per_pixel;
45
46 u32 active_polarity;
47 u32 active_count;
48 u32 tu_size;
49 u32 active_frac;
50 u32 watermark;
51
52 u32 hblank_symbols;
53 u32 vblank_symbols;
37}; 54};
38 55
39static inline struct tegra_sor * 56static inline struct tegra_sor *
@@ -289,34 +306,232 @@ static int tegra_sor_power_up(struct tegra_sor *sor, unsigned long timeout)
289 return -ETIMEDOUT; 306 return -ETIMEDOUT;
290} 307}
291 308
309struct tegra_sor_params {
310 /* number of link clocks per line */
311 unsigned int num_clocks;
312 /* ratio between input and output */
313 u64 ratio;
314 /* precision factor */
315 u64 precision;
316
317 unsigned int active_polarity;
318 unsigned int active_count;
319 unsigned int active_frac;
320 unsigned int tu_size;
321 unsigned int error;
322};
323
324static int tegra_sor_compute_params(struct tegra_sor *sor,
325 struct tegra_sor_params *params,
326 unsigned int tu_size)
327{
328 u64 active_sym, active_count, frac, approx;
329 u32 active_polarity, active_frac = 0;
330 const u64 f = params->precision;
331 s64 error;
332
333 active_sym = params->ratio * tu_size;
334 active_count = div_u64(active_sym, f) * f;
335 frac = active_sym - active_count;
336
337 /* fraction < 0.5 */
338 if (frac >= (f / 2)) {
339 active_polarity = 1;
340 frac = f - frac;
341 } else {
342 active_polarity = 0;
343 }
344
345 if (frac != 0) {
346 frac = div_u64(f * f, frac); /* 1/fraction */
347 if (frac <= (15 * f)) {
348 active_frac = div_u64(frac, f);
349
350 /* round up */
351 if (active_polarity)
352 active_frac++;
353 } else {
354 active_frac = active_polarity ? 1 : 15;
355 }
356 }
357
358 if (active_frac == 1)
359 active_polarity = 0;
360
361 if (active_polarity == 1) {
362 if (active_frac) {
363 approx = active_count + (active_frac * (f - 1)) * f;
364 approx = div_u64(approx, active_frac * f);
365 } else {
366 approx = active_count + f;
367 }
368 } else {
369 if (active_frac)
370 approx = active_count + div_u64(f, active_frac);
371 else
372 approx = active_count;
373 }
374
375 error = div_s64(active_sym - approx, tu_size);
376 error *= params->num_clocks;
377
378 if (error <= 0 && abs64(error) < params->error) {
379 params->active_count = div_u64(active_count, f);
380 params->active_polarity = active_polarity;
381 params->active_frac = active_frac;
382 params->error = abs64(error);
383 params->tu_size = tu_size;
384
385 if (error == 0)
386 return true;
387 }
388
389 return false;
390}
391
392static int tegra_sor_calc_config(struct tegra_sor *sor,
393 struct drm_display_mode *mode,
394 struct tegra_sor_config *config,
395 struct drm_dp_link *link)
396{
397 const u64 f = 100000, link_rate = link->rate * 1000;
398 const u64 pclk = mode->clock * 1000;
399 u64 input, output, watermark, num;
400 struct tegra_sor_params params;
401 u32 num_syms_per_line;
402 unsigned int i;
403
404 if (!link_rate || !link->num_lanes || !pclk || !config->bits_per_pixel)
405 return -EINVAL;
406
407 output = link_rate * 8 * link->num_lanes;
408 input = pclk * config->bits_per_pixel;
409
410 if (input >= output)
411 return -ERANGE;
412
413 memset(&params, 0, sizeof(params));
414 params.ratio = div64_u64(input * f, output);
415 params.num_clocks = div_u64(link_rate * mode->hdisplay, pclk);
416 params.precision = f;
417 params.error = 64 * f;
418 params.tu_size = 64;
419
420 for (i = params.tu_size; i >= 32; i--)
421 if (tegra_sor_compute_params(sor, &params, i))
422 break;
423
424 if (params.active_frac == 0) {
425 config->active_polarity = 0;
426 config->active_count = params.active_count;
427
428 if (!params.active_polarity)
429 config->active_count--;
430
431 config->tu_size = params.tu_size;
432 config->active_frac = 1;
433 } else {
434 config->active_polarity = params.active_polarity;
435 config->active_count = params.active_count;
436 config->active_frac = params.active_frac;
437 config->tu_size = params.tu_size;
438 }
439
440 dev_dbg(sor->dev,
441 "polarity: %d active count: %d tu size: %d active frac: %d\n",
442 config->active_polarity, config->active_count,
443 config->tu_size, config->active_frac);
444
445 watermark = params.ratio * config->tu_size * (f - params.ratio);
446 watermark = div_u64(watermark, f);
447
448 watermark = div_u64(watermark + params.error, f);
449 config->watermark = watermark + (config->bits_per_pixel / 8) + 2;
450 num_syms_per_line = (mode->hdisplay * config->bits_per_pixel) *
451 (link->num_lanes * 8);
452
453 if (config->watermark > 30) {
454 config->watermark = 30;
455 dev_err(sor->dev,
456 "unable to compute TU size, forcing watermark to %u\n",
457 config->watermark);
458 } else if (config->watermark > num_syms_per_line) {
459 config->watermark = num_syms_per_line;
460 dev_err(sor->dev, "watermark too high, forcing to %u\n",
461 config->watermark);
462 }
463
464 /* compute the number of symbols per horizontal blanking interval */
465 num = ((mode->htotal - mode->hdisplay) - 7) * link_rate;
466 config->hblank_symbols = div_u64(num, pclk);
467
468 if (link->capabilities & DP_LINK_CAP_ENHANCED_FRAMING)
469 config->hblank_symbols -= 3;
470
471 config->hblank_symbols -= 12 / link->num_lanes;
472
473 /* compute the number of symbols per vertical blanking interval */
474 num = (mode->hdisplay - 25) * link_rate;
475 config->vblank_symbols = div_u64(num, pclk);
476 config->vblank_symbols -= 36 / link->num_lanes + 4;
477
478 dev_dbg(sor->dev, "blank symbols: H:%u V:%u\n", config->hblank_symbols,
479 config->vblank_symbols);
480
481 return 0;
482}
483
292static int tegra_output_sor_enable(struct tegra_output *output) 484static int tegra_output_sor_enable(struct tegra_output *output)
293{ 485{
294 struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc); 486 struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc);
295 struct drm_display_mode *mode = &dc->base.mode; 487 struct drm_display_mode *mode = &dc->base.mode;
296 unsigned int vbe, vse, hbe, hse, vbs, hbs, i; 488 unsigned int vbe, vse, hbe, hse, vbs, hbs, i;
297 struct tegra_sor *sor = to_sor(output); 489 struct tegra_sor *sor = to_sor(output);
490 struct tegra_sor_config config;
491 struct drm_dp_link link;
492 struct drm_dp_aux *aux;
298 unsigned long value; 493 unsigned long value;
299 int err; 494 int err = 0;
495
496 mutex_lock(&sor->lock);
300 497
301 if (sor->enabled) 498 if (sor->enabled)
302 return 0; 499 goto unlock;
303 500
304 err = clk_prepare_enable(sor->clk); 501 err = clk_prepare_enable(sor->clk);
305 if (err < 0) 502 if (err < 0)
306 return err; 503 goto unlock;
307 504
308 reset_control_deassert(sor->rst); 505 reset_control_deassert(sor->rst);
309 506
507 /* FIXME: properly convert to struct drm_dp_aux */
508 aux = (struct drm_dp_aux *)sor->dpaux;
509
310 if (sor->dpaux) { 510 if (sor->dpaux) {
311 err = tegra_dpaux_enable(sor->dpaux); 511 err = tegra_dpaux_enable(sor->dpaux);
312 if (err < 0) 512 if (err < 0)
313 dev_err(sor->dev, "failed to enable DP: %d\n", err); 513 dev_err(sor->dev, "failed to enable DP: %d\n", err);
514
515 err = drm_dp_link_probe(aux, &link);
516 if (err < 0) {
517 dev_err(sor->dev, "failed to probe eDP link: %d\n",
518 err);
519 return err;
520 }
314 } 521 }
315 522
316 err = clk_set_parent(sor->clk, sor->clk_safe); 523 err = clk_set_parent(sor->clk, sor->clk_safe);
317 if (err < 0) 524 if (err < 0)
318 dev_err(sor->dev, "failed to set safe parent clock: %d\n", err); 525 dev_err(sor->dev, "failed to set safe parent clock: %d\n", err);
319 526
527 memset(&config, 0, sizeof(config));
528 config.bits_per_pixel = 24; /* XXX: don't hardcode? */
529
530 err = tegra_sor_calc_config(sor, mode, &config, &link);
531 if (err < 0)
532 dev_err(sor->dev, "failed to compute link configuration: %d\n",
533 err);
534
320 value = tegra_sor_readl(sor, SOR_CLK_CNTRL); 535 value = tegra_sor_readl(sor, SOR_CLK_CNTRL);
321 value &= ~SOR_CLK_CNTRL_DP_CLK_SEL_MASK; 536 value &= ~SOR_CLK_CNTRL_DP_CLK_SEL_MASK;
322 value |= SOR_CLK_CNTRL_DP_CLK_SEL_SINGLE_DPCLK; 537 value |= SOR_CLK_CNTRL_DP_CLK_SEL_SINGLE_DPCLK;
@@ -385,7 +600,7 @@ static int tegra_output_sor_enable(struct tegra_output *output)
385 err = tegra_io_rail_power_on(TEGRA_IO_RAIL_LVDS); 600 err = tegra_io_rail_power_on(TEGRA_IO_RAIL_LVDS);
386 if (err < 0) { 601 if (err < 0) {
387 dev_err(sor->dev, "failed to power on I/O rail: %d\n", err); 602 dev_err(sor->dev, "failed to power on I/O rail: %d\n", err);
388 return err; 603 goto unlock;
389 } 604 }
390 605
391 usleep_range(5, 100); 606 usleep_range(5, 100);
@@ -419,15 +634,29 @@ static int tegra_output_sor_enable(struct tegra_output *output)
419 if (err < 0) 634 if (err < 0)
420 dev_err(sor->dev, "failed to set DP parent clock: %d\n", err); 635 dev_err(sor->dev, "failed to set DP parent clock: %d\n", err);
421 636
422 /* power dplanes (XXX parameterize based on link?) */ 637 /* power DP lanes */
423 value = tegra_sor_readl(sor, SOR_DP_PADCTL_0); 638 value = tegra_sor_readl(sor, SOR_DP_PADCTL_0);
424 value |= SOR_DP_PADCTL_PD_TXD_3 | SOR_DP_PADCTL_PD_TXD_0 | 639
425 SOR_DP_PADCTL_PD_TXD_1 | SOR_DP_PADCTL_PD_TXD_2; 640 if (link.num_lanes <= 2)
641 value &= ~(SOR_DP_PADCTL_PD_TXD_3 | SOR_DP_PADCTL_PD_TXD_2);
642 else
643 value |= SOR_DP_PADCTL_PD_TXD_3 | SOR_DP_PADCTL_PD_TXD_2;
644
645 if (link.num_lanes <= 1)
646 value &= ~SOR_DP_PADCTL_PD_TXD_1;
647 else
648 value |= SOR_DP_PADCTL_PD_TXD_1;
649
650 if (link.num_lanes == 0)
651 value &= ~SOR_DP_PADCTL_PD_TXD_0;
652 else
653 value |= SOR_DP_PADCTL_PD_TXD_0;
654
426 tegra_sor_writel(sor, value, SOR_DP_PADCTL_0); 655 tegra_sor_writel(sor, value, SOR_DP_PADCTL_0);
427 656
428 value = tegra_sor_readl(sor, SOR_DP_LINKCTL_0); 657 value = tegra_sor_readl(sor, SOR_DP_LINKCTL_0);
429 value &= ~SOR_DP_LINKCTL_LANE_COUNT_MASK; 658 value &= ~SOR_DP_LINKCTL_LANE_COUNT_MASK;
430 value |= SOR_DP_LINKCTL_LANE_COUNT(4); 659 value |= SOR_DP_LINKCTL_LANE_COUNT(link.num_lanes);
431 tegra_sor_writel(sor, value, SOR_DP_LINKCTL_0); 660 tegra_sor_writel(sor, value, SOR_DP_LINKCTL_0);
432 661
433 /* start lane sequencer */ 662 /* start lane sequencer */
@@ -443,10 +672,10 @@ static int tegra_output_sor_enable(struct tegra_output *output)
443 usleep_range(250, 1000); 672 usleep_range(250, 1000);
444 } 673 }
445 674
446 /* set link bandwidth (2.7 GHz, XXX: parameterize based on link?) */ 675 /* set link bandwidth */
447 value = tegra_sor_readl(sor, SOR_CLK_CNTRL); 676 value = tegra_sor_readl(sor, SOR_CLK_CNTRL);
448 value &= ~SOR_CLK_CNTRL_DP_LINK_SPEED_MASK; 677 value &= ~SOR_CLK_CNTRL_DP_LINK_SPEED_MASK;
449 value |= SOR_CLK_CNTRL_DP_LINK_SPEED_G2_70; 678 value |= drm_dp_link_rate_to_bw_code(link.rate) << 2;
450 tegra_sor_writel(sor, value, SOR_CLK_CNTRL); 679 tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
451 680
452 /* set linkctl */ 681 /* set linkctl */
@@ -454,7 +683,7 @@ static int tegra_output_sor_enable(struct tegra_output *output)
454 value |= SOR_DP_LINKCTL_ENABLE; 683 value |= SOR_DP_LINKCTL_ENABLE;
455 684
456 value &= ~SOR_DP_LINKCTL_TU_SIZE_MASK; 685 value &= ~SOR_DP_LINKCTL_TU_SIZE_MASK;
457 value |= SOR_DP_LINKCTL_TU_SIZE(59); /* XXX: don't hardcode? */ 686 value |= SOR_DP_LINKCTL_TU_SIZE(config.tu_size);
458 687
459 value |= SOR_DP_LINKCTL_ENHANCED_FRAME; 688 value |= SOR_DP_LINKCTL_ENHANCED_FRAME;
460 tegra_sor_writel(sor, value, SOR_DP_LINKCTL_0); 689 tegra_sor_writel(sor, value, SOR_DP_LINKCTL_0);
@@ -470,28 +699,31 @@ static int tegra_output_sor_enable(struct tegra_output *output)
470 699
471 value = tegra_sor_readl(sor, SOR_DP_CONFIG_0); 700 value = tegra_sor_readl(sor, SOR_DP_CONFIG_0);
472 value &= ~SOR_DP_CONFIG_WATERMARK_MASK; 701 value &= ~SOR_DP_CONFIG_WATERMARK_MASK;
473 value |= SOR_DP_CONFIG_WATERMARK(14); /* XXX: don't hardcode? */ 702 value |= SOR_DP_CONFIG_WATERMARK(config.watermark);
474 703
475 value &= ~SOR_DP_CONFIG_ACTIVE_SYM_COUNT_MASK; 704 value &= ~SOR_DP_CONFIG_ACTIVE_SYM_COUNT_MASK;
476 value |= SOR_DP_CONFIG_ACTIVE_SYM_COUNT(47); /* XXX: don't hardcode? */ 705 value |= SOR_DP_CONFIG_ACTIVE_SYM_COUNT(config.active_count);
477 706
478 value &= ~SOR_DP_CONFIG_ACTIVE_SYM_FRAC_MASK; 707 value &= ~SOR_DP_CONFIG_ACTIVE_SYM_FRAC_MASK;
479 value |= SOR_DP_CONFIG_ACTIVE_SYM_FRAC(9); /* XXX: don't hardcode? */ 708 value |= SOR_DP_CONFIG_ACTIVE_SYM_FRAC(config.active_frac);
480 709
481 value &= ~SOR_DP_CONFIG_ACTIVE_SYM_POLARITY; /* XXX: don't hardcode? */ 710 if (config.active_polarity)
711 value |= SOR_DP_CONFIG_ACTIVE_SYM_POLARITY;
712 else
713 value &= ~SOR_DP_CONFIG_ACTIVE_SYM_POLARITY;
482 714
483 value |= SOR_DP_CONFIG_ACTIVE_SYM_ENABLE; 715 value |= SOR_DP_CONFIG_ACTIVE_SYM_ENABLE;
484 value |= SOR_DP_CONFIG_DISPARITY_NEGATIVE; /* XXX: don't hardcode? */ 716 value |= SOR_DP_CONFIG_DISPARITY_NEGATIVE;
485 tegra_sor_writel(sor, value, SOR_DP_CONFIG_0); 717 tegra_sor_writel(sor, value, SOR_DP_CONFIG_0);
486 718
487 value = tegra_sor_readl(sor, SOR_DP_AUDIO_HBLANK_SYMBOLS); 719 value = tegra_sor_readl(sor, SOR_DP_AUDIO_HBLANK_SYMBOLS);
488 value &= ~SOR_DP_AUDIO_HBLANK_SYMBOLS_MASK; 720 value &= ~SOR_DP_AUDIO_HBLANK_SYMBOLS_MASK;
489 value |= 137; /* XXX: don't hardcode? */ 721 value |= config.hblank_symbols & 0xffff;
490 tegra_sor_writel(sor, value, SOR_DP_AUDIO_HBLANK_SYMBOLS); 722 tegra_sor_writel(sor, value, SOR_DP_AUDIO_HBLANK_SYMBOLS);
491 723
492 value = tegra_sor_readl(sor, SOR_DP_AUDIO_VBLANK_SYMBOLS); 724 value = tegra_sor_readl(sor, SOR_DP_AUDIO_VBLANK_SYMBOLS);
493 value &= ~SOR_DP_AUDIO_VBLANK_SYMBOLS_MASK; 725 value &= ~SOR_DP_AUDIO_VBLANK_SYMBOLS_MASK;
494 value |= 2368; /* XXX: don't hardcode? */ 726 value |= config.vblank_symbols & 0xffff;
495 tegra_sor_writel(sor, value, SOR_DP_AUDIO_VBLANK_SYMBOLS); 727 tegra_sor_writel(sor, value, SOR_DP_AUDIO_VBLANK_SYMBOLS);
496 728
497 /* enable pad calibration logic */ 729 /* enable pad calibration logic */
@@ -500,30 +732,27 @@ static int tegra_output_sor_enable(struct tegra_output *output)
500 tegra_sor_writel(sor, value, SOR_DP_PADCTL_0); 732 tegra_sor_writel(sor, value, SOR_DP_PADCTL_0);
501 733
502 if (sor->dpaux) { 734 if (sor->dpaux) {
503 /* FIXME: properly convert to struct drm_dp_aux */
504 struct drm_dp_aux *aux = (struct drm_dp_aux *)sor->dpaux;
505 struct drm_dp_link link;
506 u8 rate, lanes; 735 u8 rate, lanes;
507 736
508 err = drm_dp_link_probe(aux, &link); 737 err = drm_dp_link_probe(aux, &link);
509 if (err < 0) { 738 if (err < 0) {
510 dev_err(sor->dev, "failed to probe eDP link: %d\n", 739 dev_err(sor->dev, "failed to probe eDP link: %d\n",
511 err); 740 err);
512 return err; 741 goto unlock;
513 } 742 }
514 743
515 err = drm_dp_link_power_up(aux, &link); 744 err = drm_dp_link_power_up(aux, &link);
516 if (err < 0) { 745 if (err < 0) {
517 dev_err(sor->dev, "failed to power up eDP link: %d\n", 746 dev_err(sor->dev, "failed to power up eDP link: %d\n",
518 err); 747 err);
519 return err; 748 goto unlock;
520 } 749 }
521 750
522 err = drm_dp_link_configure(aux, &link); 751 err = drm_dp_link_configure(aux, &link);
523 if (err < 0) { 752 if (err < 0) {
524 dev_err(sor->dev, "failed to configure eDP link: %d\n", 753 dev_err(sor->dev, "failed to configure eDP link: %d\n",
525 err); 754 err);
526 return err; 755 goto unlock;
527 } 756 }
528 757
529 rate = drm_dp_link_rate_to_bw_code(link.rate); 758 rate = drm_dp_link_rate_to_bw_code(link.rate);
@@ -558,7 +787,7 @@ static int tegra_output_sor_enable(struct tegra_output *output)
558 if (err < 0) { 787 if (err < 0) {
559 dev_err(sor->dev, "DP fast link training failed: %d\n", 788 dev_err(sor->dev, "DP fast link training failed: %d\n",
560 err); 789 err);
561 return err; 790 goto unlock;
562 } 791 }
563 792
564 dev_dbg(sor->dev, "fast link training succeeded\n"); 793 dev_dbg(sor->dev, "fast link training succeeded\n");
@@ -567,7 +796,7 @@ static int tegra_output_sor_enable(struct tegra_output *output)
567 err = tegra_sor_power_up(sor, 250); 796 err = tegra_sor_power_up(sor, 250);
568 if (err < 0) { 797 if (err < 0) {
569 dev_err(sor->dev, "failed to power up SOR: %d\n", err); 798 dev_err(sor->dev, "failed to power up SOR: %d\n", err);
570 return err; 799 goto unlock;
571 } 800 }
572 801
573 /* start display controller in continuous mode */ 802 /* start display controller in continuous mode */
@@ -586,12 +815,26 @@ static int tegra_output_sor_enable(struct tegra_output *output)
586 * configure panel (24bpp, vsync-, hsync-, DP-A protocol, complete 815 * configure panel (24bpp, vsync-, hsync-, DP-A protocol, complete
587 * raster, associate with display controller) 816 * raster, associate with display controller)
588 */ 817 */
589 value = SOR_STATE_ASY_PIXELDEPTH_BPP_24_444 | 818 value = SOR_STATE_ASY_VSYNCPOL |
590 SOR_STATE_ASY_VSYNCPOL |
591 SOR_STATE_ASY_HSYNCPOL | 819 SOR_STATE_ASY_HSYNCPOL |
592 SOR_STATE_ASY_PROTOCOL_DP_A | 820 SOR_STATE_ASY_PROTOCOL_DP_A |
593 SOR_STATE_ASY_CRC_MODE_COMPLETE | 821 SOR_STATE_ASY_CRC_MODE_COMPLETE |
594 SOR_STATE_ASY_OWNER(dc->pipe + 1); 822 SOR_STATE_ASY_OWNER(dc->pipe + 1);
823
824 switch (config.bits_per_pixel) {
825 case 24:
826 value |= SOR_STATE_ASY_PIXELDEPTH_BPP_24_444;
827 break;
828
829 case 18:
830 value |= SOR_STATE_ASY_PIXELDEPTH_BPP_18_444;
831 break;
832
833 default:
834 BUG();
835 break;
836 }
837
595 tegra_sor_writel(sor, value, SOR_STATE_1); 838 tegra_sor_writel(sor, value, SOR_STATE_1);
596 839
597 /* 840 /*
@@ -620,11 +863,8 @@ static int tegra_output_sor_enable(struct tegra_output *output)
620 value = ((vbs & 0x7fff) << 16) | (hbs & 0x7fff); 863 value = ((vbs & 0x7fff) << 16) | (hbs & 0x7fff);
621 tegra_sor_writel(sor, value, SOR_HEAD_STATE_4(0)); 864 tegra_sor_writel(sor, value, SOR_HEAD_STATE_4(0));
622 865
623 /* XXX interlaced mode */
624 tegra_sor_writel(sor, 0x00000001, SOR_HEAD_STATE_5(0));
625
626 /* CSTM (LVDS, link A/B, upper) */ 866 /* CSTM (LVDS, link A/B, upper) */
627 value = SOR_CSTM_LVDS | SOR_CSTM_LINK_ACT_B | SOR_CSTM_LINK_ACT_B | 867 value = SOR_CSTM_LVDS | SOR_CSTM_LINK_ACT_A | SOR_CSTM_LINK_ACT_B |
628 SOR_CSTM_UPPER; 868 SOR_CSTM_UPPER;
629 tegra_sor_writel(sor, value, SOR_CSTM); 869 tegra_sor_writel(sor, value, SOR_CSTM);
630 870
@@ -632,7 +872,7 @@ static int tegra_output_sor_enable(struct tegra_output *output)
632 err = tegra_sor_setup_pwm(sor, 250); 872 err = tegra_sor_setup_pwm(sor, 250);
633 if (err < 0) { 873 if (err < 0) {
634 dev_err(sor->dev, "failed to setup PWM: %d\n", err); 874 dev_err(sor->dev, "failed to setup PWM: %d\n", err);
635 return err; 875 goto unlock;
636 } 876 }
637 877
638 value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS); 878 value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
@@ -644,18 +884,20 @@ static int tegra_output_sor_enable(struct tegra_output *output)
644 err = tegra_sor_attach(sor); 884 err = tegra_sor_attach(sor);
645 if (err < 0) { 885 if (err < 0) {
646 dev_err(sor->dev, "failed to attach SOR: %d\n", err); 886 dev_err(sor->dev, "failed to attach SOR: %d\n", err);
647 return err; 887 goto unlock;
648 } 888 }
649 889
650 err = tegra_sor_wakeup(sor); 890 err = tegra_sor_wakeup(sor);
651 if (err < 0) { 891 if (err < 0) {
652 dev_err(sor->dev, "failed to enable DC: %d\n", err); 892 dev_err(sor->dev, "failed to enable DC: %d\n", err);
653 return err; 893 goto unlock;
654 } 894 }
655 895
656 sor->enabled = true; 896 sor->enabled = true;
657 897
658 return 0; 898unlock:
899 mutex_unlock(&sor->lock);
900 return err;
659} 901}
660 902
661static int tegra_sor_detach(struct tegra_sor *sor) 903static int tegra_sor_detach(struct tegra_sor *sor)
@@ -740,7 +982,7 @@ static int tegra_sor_power_down(struct tegra_sor *sor)
740 tegra_sor_writel(sor, value, SOR_DP_PADCTL_0); 982 tegra_sor_writel(sor, value, SOR_DP_PADCTL_0);
741 983
742 /* stop lane sequencer */ 984 /* stop lane sequencer */
743 value = SOR_LANE_SEQ_CTL_TRIGGER | SOR_LANE_SEQ_CTL_SEQUENCE_DOWN | 985 value = SOR_LANE_SEQ_CTL_TRIGGER | SOR_LANE_SEQ_CTL_SEQUENCE_UP |
744 SOR_LANE_SEQ_CTL_POWER_STATE_DOWN; 986 SOR_LANE_SEQ_CTL_POWER_STATE_DOWN;
745 tegra_sor_writel(sor, value, SOR_LANE_SEQ_CTL); 987 tegra_sor_writel(sor, value, SOR_LANE_SEQ_CTL);
746 988
@@ -783,15 +1025,17 @@ static int tegra_output_sor_disable(struct tegra_output *output)
783 struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc); 1025 struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc);
784 struct tegra_sor *sor = to_sor(output); 1026 struct tegra_sor *sor = to_sor(output);
785 unsigned long value; 1027 unsigned long value;
786 int err; 1028 int err = 0;
1029
1030 mutex_lock(&sor->lock);
787 1031
788 if (!sor->enabled) 1032 if (!sor->enabled)
789 return 0; 1033 goto unlock;
790 1034
791 err = tegra_sor_detach(sor); 1035 err = tegra_sor_detach(sor);
792 if (err < 0) { 1036 if (err < 0) {
793 dev_err(sor->dev, "failed to detach SOR: %d\n", err); 1037 dev_err(sor->dev, "failed to detach SOR: %d\n", err);
794 return err; 1038 goto unlock;
795 } 1039 }
796 1040
797 tegra_sor_writel(sor, 0, SOR_STATE_1); 1041 tegra_sor_writel(sor, 0, SOR_STATE_1);
@@ -832,21 +1076,21 @@ static int tegra_output_sor_disable(struct tegra_output *output)
832 err = tegra_sor_power_down(sor); 1076 err = tegra_sor_power_down(sor);
833 if (err < 0) { 1077 if (err < 0) {
834 dev_err(sor->dev, "failed to power down SOR: %d\n", err); 1078 dev_err(sor->dev, "failed to power down SOR: %d\n", err);
835 return err; 1079 goto unlock;
836 } 1080 }
837 1081
838 if (sor->dpaux) { 1082 if (sor->dpaux) {
839 err = tegra_dpaux_disable(sor->dpaux); 1083 err = tegra_dpaux_disable(sor->dpaux);
840 if (err < 0) { 1084 if (err < 0) {
841 dev_err(sor->dev, "failed to disable DP: %d\n", err); 1085 dev_err(sor->dev, "failed to disable DP: %d\n", err);
842 return err; 1086 goto unlock;
843 } 1087 }
844 } 1088 }
845 1089
846 err = tegra_io_rail_power_off(TEGRA_IO_RAIL_LVDS); 1090 err = tegra_io_rail_power_off(TEGRA_IO_RAIL_LVDS);
847 if (err < 0) { 1091 if (err < 0) {
848 dev_err(sor->dev, "failed to power off I/O rail: %d\n", err); 1092 dev_err(sor->dev, "failed to power off I/O rail: %d\n", err);
849 return err; 1093 goto unlock;
850 } 1094 }
851 1095
852 reset_control_assert(sor->rst); 1096 reset_control_assert(sor->rst);
@@ -854,18 +1098,18 @@ static int tegra_output_sor_disable(struct tegra_output *output)
854 1098
855 sor->enabled = false; 1099 sor->enabled = false;
856 1100
857 return 0; 1101unlock:
1102 mutex_unlock(&sor->lock);
1103 return err;
858} 1104}
859 1105
860static int tegra_output_sor_setup_clock(struct tegra_output *output, 1106static int tegra_output_sor_setup_clock(struct tegra_output *output,
861 struct clk *clk, unsigned long pclk) 1107 struct clk *clk, unsigned long pclk,
1108 unsigned int *div)
862{ 1109{
863 struct tegra_sor *sor = to_sor(output); 1110 struct tegra_sor *sor = to_sor(output);
864 int err; 1111 int err;
865 1112
866 /* round to next MHz */
867 pclk = DIV_ROUND_UP(pclk / 2, 1000000) * 1000000;
868
869 err = clk_set_parent(clk, sor->clk_parent); 1113 err = clk_set_parent(clk, sor->clk_parent);
870 if (err < 0) { 1114 if (err < 0) {
871 dev_err(sor->dev, "failed to set parent clock: %d\n", err); 1115 dev_err(sor->dev, "failed to set parent clock: %d\n", err);
@@ -874,11 +1118,12 @@ static int tegra_output_sor_setup_clock(struct tegra_output *output,
874 1118
875 err = clk_set_rate(sor->clk_parent, pclk); 1119 err = clk_set_rate(sor->clk_parent, pclk);
876 if (err < 0) { 1120 if (err < 0) {
877 dev_err(sor->dev, "failed to set base clock rate to %lu Hz\n", 1121 dev_err(sor->dev, "failed to set clock rate to %lu Hz\n", pclk);
878 pclk * 2);
879 return err; 1122 return err;
880 } 1123 }
881 1124
1125 *div = 0;
1126
882 return 0; 1127 return 0;
883} 1128}
884 1129
@@ -914,9 +1159,124 @@ static const struct tegra_output_ops sor_ops = {
914 .detect = tegra_output_sor_detect, 1159 .detect = tegra_output_sor_detect,
915}; 1160};
916 1161
1162static int tegra_sor_crc_open(struct inode *inode, struct file *file)
1163{
1164 file->private_data = inode->i_private;
1165
1166 return 0;
1167}
1168
1169static int tegra_sor_crc_release(struct inode *inode, struct file *file)
1170{
1171 return 0;
1172}
1173
1174static int tegra_sor_crc_wait(struct tegra_sor *sor, unsigned long timeout)
1175{
1176 u32 value;
1177
1178 timeout = jiffies + msecs_to_jiffies(timeout);
1179
1180 while (time_before(jiffies, timeout)) {
1181 value = tegra_sor_readl(sor, SOR_CRC_A);
1182 if (value & SOR_CRC_A_VALID)
1183 return 0;
1184
1185 usleep_range(100, 200);
1186 }
1187
1188 return -ETIMEDOUT;
1189}
1190
1191static ssize_t tegra_sor_crc_read(struct file *file, char __user *buffer,
1192 size_t size, loff_t *ppos)
1193{
1194 struct tegra_sor *sor = file->private_data;
1195 ssize_t num, err;
1196 char buf[10];
1197 u32 value;
1198
1199 mutex_lock(&sor->lock);
1200
1201 if (!sor->enabled) {
1202 err = -EAGAIN;
1203 goto unlock;
1204 }
1205
1206 value = tegra_sor_readl(sor, SOR_STATE_1);
1207 value &= ~SOR_STATE_ASY_CRC_MODE_MASK;
1208 tegra_sor_writel(sor, value, SOR_STATE_1);
1209
1210 value = tegra_sor_readl(sor, SOR_CRC_CNTRL);
1211 value |= SOR_CRC_CNTRL_ENABLE;
1212 tegra_sor_writel(sor, value, SOR_CRC_CNTRL);
1213
1214 value = tegra_sor_readl(sor, SOR_TEST);
1215 value &= ~SOR_TEST_CRC_POST_SERIALIZE;
1216 tegra_sor_writel(sor, value, SOR_TEST);
1217
1218 err = tegra_sor_crc_wait(sor, 100);
1219 if (err < 0)
1220 goto unlock;
1221
1222 tegra_sor_writel(sor, SOR_CRC_A_RESET, SOR_CRC_A);
1223 value = tegra_sor_readl(sor, SOR_CRC_B);
1224
1225 num = scnprintf(buf, sizeof(buf), "%08x\n", value);
1226
1227 err = simple_read_from_buffer(buffer, size, ppos, buf, num);
1228
1229unlock:
1230 mutex_unlock(&sor->lock);
1231 return err;
1232}
1233
1234static const struct file_operations tegra_sor_crc_fops = {
1235 .owner = THIS_MODULE,
1236 .open = tegra_sor_crc_open,
1237 .read = tegra_sor_crc_read,
1238 .release = tegra_sor_crc_release,
1239};
1240
1241static int tegra_sor_debugfs_init(struct tegra_sor *sor,
1242 struct drm_minor *minor)
1243{
1244 struct dentry *entry;
1245 int err = 0;
1246
1247 sor->debugfs = debugfs_create_dir("sor", minor->debugfs_root);
1248 if (!sor->debugfs)
1249 return -ENOMEM;
1250
1251 entry = debugfs_create_file("crc", 0644, sor->debugfs, sor,
1252 &tegra_sor_crc_fops);
1253 if (!entry) {
1254 dev_err(sor->dev,
1255 "cannot create /sys/kernel/debug/dri/%s/sor/crc\n",
1256 minor->debugfs_root->d_name.name);
1257 err = -ENOMEM;
1258 goto remove;
1259 }
1260
1261 return err;
1262
1263remove:
1264 debugfs_remove(sor->debugfs);
1265 sor->debugfs = NULL;
1266 return err;
1267}
1268
1269static int tegra_sor_debugfs_exit(struct tegra_sor *sor)
1270{
1271 debugfs_remove_recursive(sor->debugfs);
1272 sor->debugfs = NULL;
1273
1274 return 0;
1275}
1276
917static int tegra_sor_init(struct host1x_client *client) 1277static int tegra_sor_init(struct host1x_client *client)
918{ 1278{
919 struct tegra_drm *tegra = dev_get_drvdata(client->parent); 1279 struct drm_device *drm = dev_get_drvdata(client->parent);
920 struct tegra_sor *sor = host1x_client_to_sor(client); 1280 struct tegra_sor *sor = host1x_client_to_sor(client);
921 int err; 1281 int err;
922 1282
@@ -928,12 +1288,18 @@ static int tegra_sor_init(struct host1x_client *client)
928 sor->output.dev = sor->dev; 1288 sor->output.dev = sor->dev;
929 sor->output.ops = &sor_ops; 1289 sor->output.ops = &sor_ops;
930 1290
931 err = tegra_output_init(tegra->drm, &sor->output); 1291 err = tegra_output_init(drm, &sor->output);
932 if (err < 0) { 1292 if (err < 0) {
933 dev_err(sor->dev, "output setup failed: %d\n", err); 1293 dev_err(sor->dev, "output setup failed: %d\n", err);
934 return err; 1294 return err;
935 } 1295 }
936 1296
1297 if (IS_ENABLED(CONFIG_DEBUG_FS)) {
1298 err = tegra_sor_debugfs_init(sor, drm->primary);
1299 if (err < 0)
1300 dev_err(sor->dev, "debugfs setup failed: %d\n", err);
1301 }
1302
937 if (sor->dpaux) { 1303 if (sor->dpaux) {
938 err = tegra_dpaux_attach(sor->dpaux, &sor->output); 1304 err = tegra_dpaux_attach(sor->dpaux, &sor->output);
939 if (err < 0) { 1305 if (err < 0) {
@@ -964,6 +1330,12 @@ static int tegra_sor_exit(struct host1x_client *client)
964 } 1330 }
965 } 1331 }
966 1332
1333 if (IS_ENABLED(CONFIG_DEBUG_FS)) {
1334 err = tegra_sor_debugfs_exit(sor);
1335 if (err < 0)
1336 dev_err(sor->dev, "debugfs cleanup failed: %d\n", err);
1337 }
1338
967 err = tegra_output_exit(&sor->output); 1339 err = tegra_output_exit(&sor->output);
968 if (err < 0) { 1340 if (err < 0) {
969 dev_err(sor->dev, "output cleanup failed: %d\n", err); 1341 dev_err(sor->dev, "output cleanup failed: %d\n", err);
@@ -1045,6 +1417,8 @@ static int tegra_sor_probe(struct platform_device *pdev)
1045 sor->client.ops = &sor_client_ops; 1417 sor->client.ops = &sor_client_ops;
1046 sor->client.dev = &pdev->dev; 1418 sor->client.dev = &pdev->dev;
1047 1419
1420 mutex_init(&sor->lock);
1421
1048 err = host1x_client_register(&sor->client); 1422 err = host1x_client_register(&sor->client);
1049 if (err < 0) { 1423 if (err < 0) {
1050 dev_err(&pdev->dev, "failed to register host1x client: %d\n", 1424 dev_err(&pdev->dev, "failed to register host1x client: %d\n",
diff --git a/drivers/gpu/drm/tegra/sor.h b/drivers/gpu/drm/tegra/sor.h
index f4156d54cd05..a5f8853fedb5 100644
--- a/drivers/gpu/drm/tegra/sor.h
+++ b/drivers/gpu/drm/tegra/sor.h
@@ -47,6 +47,7 @@
47#define SOR_HEAD_STATE_4(x) (0x0d + (x)) 47#define SOR_HEAD_STATE_4(x) (0x0d + (x))
48#define SOR_HEAD_STATE_5(x) (0x0f + (x)) 48#define SOR_HEAD_STATE_5(x) (0x0f + (x))
49#define SOR_CRC_CNTRL 0x11 49#define SOR_CRC_CNTRL 0x11
50#define SOR_CRC_CNTRL_ENABLE (1 << 0)
50#define SOR_DP_DEBUG_MVID 0x12 51#define SOR_DP_DEBUG_MVID 0x12
51 52
52#define SOR_CLK_CNTRL 0x13 53#define SOR_CLK_CNTRL 0x13
@@ -69,6 +70,7 @@
69#define SOR_PWR_NORMAL_STATE_PU (1 << 0) 70#define SOR_PWR_NORMAL_STATE_PU (1 << 0)
70 71
71#define SOR_TEST 0x16 72#define SOR_TEST 0x16
73#define SOR_TEST_CRC_POST_SERIALIZE (1 << 23)
72#define SOR_TEST_ATTACHED (1 << 10) 74#define SOR_TEST_ATTACHED (1 << 10)
73#define SOR_TEST_HEAD_MODE_MASK (3 << 8) 75#define SOR_TEST_HEAD_MODE_MASK (3 << 8)
74#define SOR_TEST_HEAD_MODE_AWAKE (2 << 8) 76#define SOR_TEST_HEAD_MODE_AWAKE (2 << 8)
@@ -115,6 +117,8 @@
115 117
116#define SOR_LVDS 0x1c 118#define SOR_LVDS 0x1c
117#define SOR_CRC_A 0x1d 119#define SOR_CRC_A 0x1d
120#define SOR_CRC_A_VALID (1 << 0)
121#define SOR_CRC_A_RESET (1 << 0)
118#define SOR_CRC_B 0x1e 122#define SOR_CRC_B 0x1e
119#define SOR_BLANK 0x1f 123#define SOR_BLANK 0x1f
120#define SOR_SEQ_CTL 0x20 124#define SOR_SEQ_CTL 0x20
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
index 171a8203892c..b20b69488dc9 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -268,7 +268,7 @@ static int tilcdc_load(struct drm_device *dev, unsigned long flags)
268 } 268 }
269 269
270 pm_runtime_get_sync(dev->dev); 270 pm_runtime_get_sync(dev->dev);
271 ret = drm_irq_install(dev); 271 ret = drm_irq_install(dev, platform_get_irq(dev->platformdev, 0));
272 pm_runtime_put_sync(dev->dev); 272 pm_runtime_put_sync(dev->dev);
273 if (ret < 0) { 273 if (ret < 0) {
274 dev_err(dev->dev, "failed to install IRQ handler\n"); 274 dev_err(dev->dev, "failed to install IRQ handler\n");
diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c
index afdf383f630a..7094b92d1ec7 100644
--- a/drivers/gpu/drm/udl/udl_main.c
+++ b/drivers/gpu/drm/udl/udl_main.c
@@ -294,6 +294,7 @@ int udl_driver_load(struct drm_device *dev, unsigned long flags)
294 dev->dev_private = udl; 294 dev->dev_private = udl;
295 295
296 if (!udl_parse_vendor_descriptor(dev, dev->usbdev)) { 296 if (!udl_parse_vendor_descriptor(dev, dev->usbdev)) {
297 ret = -ENODEV;
297 DRM_ERROR("firmware not recognized. Assume incompatible device\n"); 298 DRM_ERROR("firmware not recognized. Assume incompatible device\n");
298 goto err; 299 goto err;
299 } 300 }
diff --git a/drivers/gpu/drm/via/via_dma.c b/drivers/gpu/drm/via/via_dma.c
index a18479c6b6da..6fc0648dd37f 100644
--- a/drivers/gpu/drm/via/via_dma.c
+++ b/drivers/gpu/drm/via/via_dma.c
@@ -737,4 +737,4 @@ const struct drm_ioctl_desc via_ioctls[] = {
737 DRM_IOCTL_DEF_DRV(VIA_BLIT_SYNC, via_dma_blit_sync, DRM_AUTH) 737 DRM_IOCTL_DEF_DRV(VIA_BLIT_SYNC, via_dma_blit_sync, DRM_AUTH)
738}; 738};
739 739
740int via_max_ioctl = DRM_ARRAY_SIZE(via_ioctls); 740int via_max_ioctl = ARRAY_SIZE(via_ioctls);
diff --git a/drivers/gpu/drm/via/via_mm.c b/drivers/gpu/drm/via/via_mm.c
index 927889105483..d70b1e1544bf 100644
--- a/drivers/gpu/drm/via/via_mm.c
+++ b/drivers/gpu/drm/via/via_mm.c
@@ -79,7 +79,7 @@ int via_final_context(struct drm_device *dev, int context)
79 79
80 /* Linux specific until context tracking code gets ported to BSD */ 80 /* Linux specific until context tracking code gets ported to BSD */
81 /* Last context, perform cleanup */ 81 /* Last context, perform cleanup */
82 if (list_is_singular(&dev->ctxlist) && dev->dev_private) { 82 if (list_is_singular(&dev->ctxlist)) {
83 DRM_DEBUG("Last Context\n"); 83 DRM_DEBUG("Last Context\n");
84 drm_irq_uninstall(dev); 84 drm_irq_uninstall(dev);
85 via_cleanup_futex(dev_priv); 85 via_cleanup_futex(dev_priv);
diff --git a/drivers/gpu/drm/vmwgfx/Kconfig b/drivers/gpu/drm/vmwgfx/Kconfig
index b71bcd0bfbbf..67720f70fe29 100644
--- a/drivers/gpu/drm/vmwgfx/Kconfig
+++ b/drivers/gpu/drm/vmwgfx/Kconfig
@@ -1,11 +1,14 @@
1config DRM_VMWGFX 1config DRM_VMWGFX
2 tristate "DRM driver for VMware Virtual GPU" 2 tristate "DRM driver for VMware Virtual GPU"
3 depends on DRM && PCI && FB 3 depends on DRM && PCI
4 select FB_DEFERRED_IO 4 select FB_DEFERRED_IO
5 select FB_CFB_FILLRECT 5 select FB_CFB_FILLRECT
6 select FB_CFB_COPYAREA 6 select FB_CFB_COPYAREA
7 select FB_CFB_IMAGEBLIT 7 select FB_CFB_IMAGEBLIT
8 select DRM_TTM 8 select DRM_TTM
9 # Only needed for the transitional use of drm_crtc_init - can be removed
10 # again once vmwgfx sets up the primary plane itself.
11 select DRM_KMS_HELPER
9 help 12 help
10 Choose this option if you would like to run 3D acceleration 13 Choose this option if you would like to run 3D acceleration
11 in a VMware virtual machine. 14 in a VMware virtual machine.
@@ -14,7 +17,7 @@ config DRM_VMWGFX
14 The compiled module will be called "vmwgfx.ko". 17 The compiled module will be called "vmwgfx.ko".
15 18
16config DRM_VMWGFX_FBCON 19config DRM_VMWGFX_FBCON
17 depends on DRM_VMWGFX 20 depends on DRM_VMWGFX && FB
18 bool "Enable framebuffer console under vmwgfx by default" 21 bool "Enable framebuffer console under vmwgfx by default"
19 help 22 help
20 Choose this option if you are shipping a new vmwgfx 23 Choose this option if you are shipping a new vmwgfx
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 4a223bbea3b3..246a62bab378 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -806,7 +806,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
806 } 806 }
807 807
808 if (dev_priv->capabilities & SVGA_CAP_IRQMASK) { 808 if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
809 ret = drm_irq_install(dev); 809 ret = drm_irq_install(dev, dev->pdev->irq);
810 if (ret != 0) { 810 if (ret != 0) {
811 DRM_ERROR("Failed installing irq: %d\n", ret); 811 DRM_ERROR("Failed installing irq: %d\n", ret);
812 goto out_no_irq; 812 goto out_no_irq;
@@ -1417,7 +1417,7 @@ static struct drm_driver driver = {
1417 .enable_vblank = vmw_enable_vblank, 1417 .enable_vblank = vmw_enable_vblank,
1418 .disable_vblank = vmw_disable_vblank, 1418 .disable_vblank = vmw_disable_vblank,
1419 .ioctls = vmw_ioctls, 1419 .ioctls = vmw_ioctls,
1420 .num_ioctls = DRM_ARRAY_SIZE(vmw_ioctls), 1420 .num_ioctls = ARRAY_SIZE(vmw_ioctls),
1421 .master_create = vmw_master_create, 1421 .master_create = vmw_master_create,
1422 .master_destroy = vmw_master_destroy, 1422 .master_destroy = vmw_master_destroy,
1423 .master_set = vmw_master_set, 1423 .master_set = vmw_master_set,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index a2dde5ad8138..8f3edc4710f2 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -187,7 +187,7 @@ int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
187 * can do this since the caller in the drm core doesn't check anything 187 * can do this since the caller in the drm core doesn't check anything
188 * which is protected by any looks. 188 * which is protected by any looks.
189 */ 189 */
190 mutex_unlock(&crtc->mutex); 190 drm_modeset_unlock(&crtc->mutex);
191 drm_modeset_lock_all(dev_priv->dev); 191 drm_modeset_lock_all(dev_priv->dev);
192 192
193 /* A lot of the code assumes this */ 193 /* A lot of the code assumes this */
@@ -252,7 +252,7 @@ int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
252 ret = 0; 252 ret = 0;
253out: 253out:
254 drm_modeset_unlock_all(dev_priv->dev); 254 drm_modeset_unlock_all(dev_priv->dev);
255 mutex_lock(&crtc->mutex); 255 drm_modeset_lock(&crtc->mutex, NULL);
256 256
257 return ret; 257 return ret;
258} 258}
@@ -273,7 +273,7 @@ int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
273 * can do this since the caller in the drm core doesn't check anything 273 * can do this since the caller in the drm core doesn't check anything
274 * which is protected by any looks. 274 * which is protected by any looks.
275 */ 275 */
276 mutex_unlock(&crtc->mutex); 276 drm_modeset_unlock(&crtc->mutex);
277 drm_modeset_lock_all(dev_priv->dev); 277 drm_modeset_lock_all(dev_priv->dev);
278 278
279 vmw_cursor_update_position(dev_priv, shown, 279 vmw_cursor_update_position(dev_priv, shown,
@@ -281,7 +281,7 @@ int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
281 du->cursor_y + du->hotspot_y); 281 du->cursor_y + du->hotspot_y);
282 282
283 drm_modeset_unlock_all(dev_priv->dev); 283 drm_modeset_unlock_all(dev_priv->dev);
284 mutex_lock(&crtc->mutex); 284 drm_modeset_lock(&crtc->mutex, NULL);
285 285
286 return 0; 286 return 0;
287} 287}
@@ -2001,7 +2001,7 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector,
2001 if (du->pref_mode) 2001 if (du->pref_mode)
2002 list_move(&du->pref_mode->head, &connector->probed_modes); 2002 list_move(&du->pref_mode->head, &connector->probed_modes);
2003 2003
2004 drm_mode_connector_list_update(connector); 2004 drm_mode_connector_list_update(connector, true);
2005 2005
2006 return 1; 2006 return 1;
2007} 2007}
diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c
index ccdd2e6da5e3..aaf54859adb0 100644
--- a/drivers/gpu/host1x/bus.c
+++ b/drivers/gpu/host1x/bus.c
@@ -216,8 +216,8 @@ int host1x_device_exit(struct host1x_device *device)
216} 216}
217EXPORT_SYMBOL(host1x_device_exit); 217EXPORT_SYMBOL(host1x_device_exit);
218 218
219static int host1x_register_client(struct host1x *host1x, 219static int host1x_add_client(struct host1x *host1x,
220 struct host1x_client *client) 220 struct host1x_client *client)
221{ 221{
222 struct host1x_device *device; 222 struct host1x_device *device;
223 struct host1x_subdev *subdev; 223 struct host1x_subdev *subdev;
@@ -238,8 +238,8 @@ static int host1x_register_client(struct host1x *host1x,
238 return -ENODEV; 238 return -ENODEV;
239} 239}
240 240
241static int host1x_unregister_client(struct host1x *host1x, 241static int host1x_del_client(struct host1x *host1x,
242 struct host1x_client *client) 242 struct host1x_client *client)
243{ 243{
244 struct host1x_device *device, *dt; 244 struct host1x_device *device, *dt;
245 struct host1x_subdev *subdev; 245 struct host1x_subdev *subdev;
@@ -503,7 +503,7 @@ int host1x_client_register(struct host1x_client *client)
503 mutex_lock(&devices_lock); 503 mutex_lock(&devices_lock);
504 504
505 list_for_each_entry(host1x, &devices, list) { 505 list_for_each_entry(host1x, &devices, list) {
506 err = host1x_register_client(host1x, client); 506 err = host1x_add_client(host1x, client);
507 if (!err) { 507 if (!err) {
508 mutex_unlock(&devices_lock); 508 mutex_unlock(&devices_lock);
509 return 0; 509 return 0;
@@ -529,7 +529,7 @@ int host1x_client_unregister(struct host1x_client *client)
529 mutex_lock(&devices_lock); 529 mutex_lock(&devices_lock);
530 530
531 list_for_each_entry(host1x, &devices, list) { 531 list_for_each_entry(host1x, &devices, list) {
532 err = host1x_unregister_client(host1x, client); 532 err = host1x_del_client(host1x, client);
533 if (!err) { 533 if (!err) {
534 mutex_unlock(&devices_lock); 534 mutex_unlock(&devices_lock);
535 return 0; 535 return 0;
diff --git a/drivers/gpu/ipu-v3/Kconfig b/drivers/gpu/ipu-v3/Kconfig
new file mode 100644
index 000000000000..2f228a2f2a48
--- /dev/null
+++ b/drivers/gpu/ipu-v3/Kconfig
@@ -0,0 +1,7 @@
1config IMX_IPUV3_CORE
2 tristate "IPUv3 core support"
3 depends on SOC_IMX5 || SOC_IMX6Q || SOC_IMX6SL || ARCH_MULTIPLATFORM
4 depends on RESET_CONTROLLER
5 help
6 Choose this if you have a i.MX5/6 system and want to use the Image
7 Processing Unit. This option only enables IPU base support.
diff --git a/drivers/staging/imx-drm/ipu-v3/Makefile b/drivers/gpu/ipu-v3/Makefile
index 28ed72e98a96..1887972b4ac2 100644
--- a/drivers/staging/imx-drm/ipu-v3/Makefile
+++ b/drivers/gpu/ipu-v3/Makefile
@@ -1,3 +1,3 @@
1obj-$(CONFIG_DRM_IMX_IPUV3_CORE) += imx-ipu-v3.o 1obj-$(CONFIG_IMX_IPUV3_CORE) += imx-ipu-v3.o
2 2
3imx-ipu-v3-objs := ipu-common.o ipu-dc.o ipu-di.o ipu-dp.o ipu-dmfc.o 3imx-ipu-v3-objs := ipu-common.o ipu-dc.o ipu-di.o ipu-dp.o ipu-dmfc.o ipu-smfc.o
diff --git a/drivers/staging/imx-drm/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
index a1f7b2001c8a..04e7b2eafbdd 100644
--- a/drivers/staging/imx-drm/ipu-v3/ipu-common.c
+++ b/drivers/gpu/ipu-v3/ipu-common.c
@@ -31,7 +31,7 @@
31 31
32#include <drm/drm_fourcc.h> 32#include <drm/drm_fourcc.h>
33 33
34#include "imx-ipu-v3.h" 34#include <video/imx-ipu-v3.h>
35#include "ipu-prv.h" 35#include "ipu-prv.h"
36 36
37static inline u32 ipu_cm_read(struct ipu_soc *ipu, unsigned offset) 37static inline u32 ipu_cm_read(struct ipu_soc *ipu, unsigned offset)
@@ -661,6 +661,39 @@ int ipu_module_disable(struct ipu_soc *ipu, u32 mask)
661} 661}
662EXPORT_SYMBOL_GPL(ipu_module_disable); 662EXPORT_SYMBOL_GPL(ipu_module_disable);
663 663
664int ipu_csi_enable(struct ipu_soc *ipu, int csi)
665{
666 return ipu_module_enable(ipu, csi ? IPU_CONF_CSI1_EN : IPU_CONF_CSI0_EN);
667}
668EXPORT_SYMBOL_GPL(ipu_csi_enable);
669
670int ipu_csi_disable(struct ipu_soc *ipu, int csi)
671{
672 return ipu_module_disable(ipu, csi ? IPU_CONF_CSI1_EN : IPU_CONF_CSI0_EN);
673}
674EXPORT_SYMBOL_GPL(ipu_csi_disable);
675
676int ipu_smfc_enable(struct ipu_soc *ipu)
677{
678 return ipu_module_enable(ipu, IPU_CONF_SMFC_EN);
679}
680EXPORT_SYMBOL_GPL(ipu_smfc_enable);
681
682int ipu_smfc_disable(struct ipu_soc *ipu)
683{
684 return ipu_module_disable(ipu, IPU_CONF_SMFC_EN);
685}
686EXPORT_SYMBOL_GPL(ipu_smfc_disable);
687
688int ipu_idmac_get_current_buffer(struct ipuv3_channel *channel)
689{
690 struct ipu_soc *ipu = channel->ipu;
691 unsigned int chno = channel->num;
692
693 return (ipu_cm_read(ipu, IPU_CHA_CUR_BUF(chno)) & idma_mask(chno)) ? 1 : 0;
694}
695EXPORT_SYMBOL_GPL(ipu_idmac_get_current_buffer);
696
664void ipu_idmac_select_buffer(struct ipuv3_channel *channel, u32 buf_num) 697void ipu_idmac_select_buffer(struct ipuv3_channel *channel, u32 buf_num)
665{ 698{
666 struct ipu_soc *ipu = channel->ipu; 699 struct ipu_soc *ipu = channel->ipu;
@@ -896,8 +929,17 @@ static int ipu_submodules_init(struct ipu_soc *ipu,
896 goto err_dp; 929 goto err_dp;
897 } 930 }
898 931
932 ret = ipu_smfc_init(ipu, dev, ipu_base +
933 devtype->cm_ofs + IPU_CM_SMFC_REG_OFS);
934 if (ret) {
935 unit = "smfc";
936 goto err_smfc;
937 }
938
899 return 0; 939 return 0;
900 940
941err_smfc:
942 ipu_dp_exit(ipu);
901err_dp: 943err_dp:
902 ipu_dmfc_exit(ipu); 944 ipu_dmfc_exit(ipu);
903err_dmfc: 945err_dmfc:
@@ -977,6 +1019,7 @@ EXPORT_SYMBOL_GPL(ipu_idmac_channel_irq);
977 1019
978static void ipu_submodules_exit(struct ipu_soc *ipu) 1020static void ipu_submodules_exit(struct ipu_soc *ipu)
979{ 1021{
1022 ipu_smfc_exit(ipu);
980 ipu_dp_exit(ipu); 1023 ipu_dp_exit(ipu);
981 ipu_dmfc_exit(ipu); 1024 ipu_dmfc_exit(ipu);
982 ipu_dc_exit(ipu); 1025 ipu_dc_exit(ipu);
@@ -1001,6 +1044,7 @@ static void platform_device_unregister_children(struct platform_device *pdev)
1001struct ipu_platform_reg { 1044struct ipu_platform_reg {
1002 struct ipu_client_platformdata pdata; 1045 struct ipu_client_platformdata pdata;
1003 const char *name; 1046 const char *name;
1047 int reg_offset;
1004}; 1048};
1005 1049
1006static const struct ipu_platform_reg client_reg[] = { 1050static const struct ipu_platform_reg client_reg[] = {
@@ -1022,13 +1066,29 @@ static const struct ipu_platform_reg client_reg[] = {
1022 .dma[1] = -EINVAL, 1066 .dma[1] = -EINVAL,
1023 }, 1067 },
1024 .name = "imx-ipuv3-crtc", 1068 .name = "imx-ipuv3-crtc",
1069 }, {
1070 .pdata = {
1071 .csi = 0,
1072 .dma[0] = IPUV3_CHANNEL_CSI0,
1073 .dma[1] = -EINVAL,
1074 },
1075 .reg_offset = IPU_CM_CSI0_REG_OFS,
1076 .name = "imx-ipuv3-camera",
1077 }, {
1078 .pdata = {
1079 .csi = 1,
1080 .dma[0] = IPUV3_CHANNEL_CSI1,
1081 .dma[1] = -EINVAL,
1082 },
1083 .reg_offset = IPU_CM_CSI1_REG_OFS,
1084 .name = "imx-ipuv3-camera",
1025 }, 1085 },
1026}; 1086};
1027 1087
1028static DEFINE_MUTEX(ipu_client_id_mutex); 1088static DEFINE_MUTEX(ipu_client_id_mutex);
1029static int ipu_client_id; 1089static int ipu_client_id;
1030 1090
1031static int ipu_add_client_devices(struct ipu_soc *ipu) 1091static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base)
1032{ 1092{
1033 struct device *dev = ipu->dev; 1093 struct device *dev = ipu->dev;
1034 unsigned i; 1094 unsigned i;
@@ -1042,9 +1102,19 @@ static int ipu_add_client_devices(struct ipu_soc *ipu)
1042 for (i = 0; i < ARRAY_SIZE(client_reg); i++) { 1102 for (i = 0; i < ARRAY_SIZE(client_reg); i++) {
1043 const struct ipu_platform_reg *reg = &client_reg[i]; 1103 const struct ipu_platform_reg *reg = &client_reg[i];
1044 struct platform_device *pdev; 1104 struct platform_device *pdev;
1045 1105 struct resource res;
1046 pdev = platform_device_register_data(dev, reg->name, 1106
1047 id++, &reg->pdata, sizeof(reg->pdata)); 1107 if (reg->reg_offset) {
1108 memset(&res, 0, sizeof(res));
1109 res.flags = IORESOURCE_MEM;
1110 res.start = ipu_base + ipu->devtype->cm_ofs + reg->reg_offset;
1111 res.end = res.start + PAGE_SIZE - 1;
1112 pdev = platform_device_register_resndata(dev, reg->name,
1113 id++, &res, 1, &reg->pdata, sizeof(reg->pdata));
1114 } else {
1115 pdev = platform_device_register_data(dev, reg->name,
1116 id++, &reg->pdata, sizeof(reg->pdata));
1117 }
1048 1118
1049 if (IS_ERR(pdev)) 1119 if (IS_ERR(pdev))
1050 goto err_register; 1120 goto err_register;
@@ -1241,7 +1311,7 @@ static int ipu_probe(struct platform_device *pdev)
1241 if (ret) 1311 if (ret)
1242 goto failed_submodules_init; 1312 goto failed_submodules_init;
1243 1313
1244 ret = ipu_add_client_devices(ipu); 1314 ret = ipu_add_client_devices(ipu, ipu_base);
1245 if (ret) { 1315 if (ret) {
1246 dev_err(&pdev->dev, "adding client devices failed with %d\n", 1316 dev_err(&pdev->dev, "adding client devices failed with %d\n",
1247 ret); 1317 ret);
diff --git a/drivers/staging/imx-drm/ipu-v3/ipu-dc.c b/drivers/gpu/ipu-v3/ipu-dc.c
index 784a4a13eac3..2326c752d89b 100644
--- a/drivers/staging/imx-drm/ipu-v3/ipu-dc.c
+++ b/drivers/gpu/ipu-v3/ipu-dc.c
@@ -21,8 +21,7 @@
21#include <linux/interrupt.h> 21#include <linux/interrupt.h>
22#include <linux/io.h> 22#include <linux/io.h>
23 23
24#include "../imx-drm.h" 24#include <video/imx-ipu-v3.h>
25#include "imx-ipu-v3.h"
26#include "ipu-prv.h" 25#include "ipu-prv.h"
27 26
28#define DC_MAP_CONF_PTR(n) (0x108 + ((n) & ~0x1) * 2) 27#define DC_MAP_CONF_PTR(n) (0x108 + ((n) & ~0x1) * 2)
diff --git a/drivers/staging/imx-drm/ipu-v3/ipu-di.c b/drivers/gpu/ipu-v3/ipu-di.c
index 849b3e120ef0..c490ba4384fc 100644
--- a/drivers/staging/imx-drm/ipu-v3/ipu-di.c
+++ b/drivers/gpu/ipu-v3/ipu-di.c
@@ -20,7 +20,7 @@
20#include <linux/err.h> 20#include <linux/err.h>
21#include <linux/platform_device.h> 21#include <linux/platform_device.h>
22 22
23#include "imx-ipu-v3.h" 23#include <video/imx-ipu-v3.h>
24#include "ipu-prv.h" 24#include "ipu-prv.h"
25 25
26struct ipu_di { 26struct ipu_di {
diff --git a/drivers/staging/imx-drm/ipu-v3/ipu-dmfc.c b/drivers/gpu/ipu-v3/ipu-dmfc.c
index 59f182b28fc1..042c3958e2a0 100644
--- a/drivers/staging/imx-drm/ipu-v3/ipu-dmfc.c
+++ b/drivers/gpu/ipu-v3/ipu-dmfc.c
@@ -17,7 +17,7 @@
17#include <linux/errno.h> 17#include <linux/errno.h>
18#include <linux/io.h> 18#include <linux/io.h>
19 19
20#include "imx-ipu-v3.h" 20#include <video/imx-ipu-v3.h>
21#include "ipu-prv.h" 21#include "ipu-prv.h"
22 22
23#define DMFC_RD_CHAN 0x0000 23#define DMFC_RD_CHAN 0x0000
diff --git a/drivers/staging/imx-drm/ipu-v3/ipu-dp.c b/drivers/gpu/ipu-v3/ipu-dp.c
index d90f82a87d19..98686edbcdbb 100644
--- a/drivers/staging/imx-drm/ipu-v3/ipu-dp.c
+++ b/drivers/gpu/ipu-v3/ipu-dp.c
@@ -19,7 +19,7 @@
19#include <linux/io.h> 19#include <linux/io.h>
20#include <linux/err.h> 20#include <linux/err.h>
21 21
22#include "imx-ipu-v3.h" 22#include <video/imx-ipu-v3.h>
23#include "ipu-prv.h" 23#include "ipu-prv.h"
24 24
25#define DP_SYNC 0 25#define DP_SYNC 0
diff --git a/drivers/staging/imx-drm/ipu-v3/ipu-prv.h b/drivers/gpu/ipu-v3/ipu-prv.h
index bfc1b3366488..c93f50ec04f7 100644
--- a/drivers/staging/imx-drm/ipu-v3/ipu-prv.h
+++ b/drivers/gpu/ipu-v3/ipu-prv.h
@@ -22,7 +22,7 @@ struct ipu_soc;
22#include <linux/clk.h> 22#include <linux/clk.h>
23#include <linux/platform_device.h> 23#include <linux/platform_device.h>
24 24
25#include "imx-ipu-v3.h" 25#include <video/imx-ipu-v3.h>
26 26
27#define IPUV3_CHANNEL_CSI0 0 27#define IPUV3_CHANNEL_CSI0 0
28#define IPUV3_CHANNEL_CSI1 1 28#define IPUV3_CHANNEL_CSI1 1
@@ -151,6 +151,8 @@ struct ipuv3_channel {
151struct ipu_dc_priv; 151struct ipu_dc_priv;
152struct ipu_dmfc_priv; 152struct ipu_dmfc_priv;
153struct ipu_di; 153struct ipu_di;
154struct ipu_smfc_priv;
155
154struct ipu_devtype; 156struct ipu_devtype;
155 157
156struct ipu_soc { 158struct ipu_soc {
@@ -178,6 +180,7 @@ struct ipu_soc {
178 struct ipu_dp_priv *dp_priv; 180 struct ipu_dp_priv *dp_priv;
179 struct ipu_dmfc_priv *dmfc_priv; 181 struct ipu_dmfc_priv *dmfc_priv;
180 struct ipu_di *di_priv[2]; 182 struct ipu_di *di_priv[2];
183 struct ipu_smfc_priv *smfc_priv;
181}; 184};
182 185
183void ipu_srm_dp_sync_update(struct ipu_soc *ipu); 186void ipu_srm_dp_sync_update(struct ipu_soc *ipu);
@@ -206,4 +209,7 @@ void ipu_dc_exit(struct ipu_soc *ipu);
206int ipu_cpmem_init(struct ipu_soc *ipu, struct device *dev, unsigned long base); 209int ipu_cpmem_init(struct ipu_soc *ipu, struct device *dev, unsigned long base);
207void ipu_cpmem_exit(struct ipu_soc *ipu); 210void ipu_cpmem_exit(struct ipu_soc *ipu);
208 211
212int ipu_smfc_init(struct ipu_soc *ipu, struct device *dev, unsigned long base);
213void ipu_smfc_exit(struct ipu_soc *ipu);
214
209#endif /* __IPU_PRV_H__ */ 215#endif /* __IPU_PRV_H__ */
diff --git a/drivers/gpu/ipu-v3/ipu-smfc.c b/drivers/gpu/ipu-v3/ipu-smfc.c
new file mode 100644
index 000000000000..e4f85ad286fc
--- /dev/null
+++ b/drivers/gpu/ipu-v3/ipu-smfc.c
@@ -0,0 +1,97 @@
1/*
2 * Copyright 2008-2010 Freescale Semiconductor, Inc. All Rights Reserved.
3 *
4 * The code contained herein is licensed under the GNU General Public
5 * License. You may obtain a copy of the GNU General Public License
6 * Version 2 or later at the following locations:
7 *
8 * http://www.opensource.org/licenses/gpl-license.html
9 * http://www.gnu.org/copyleft/gpl.html
10 */
11#define DEBUG
12#include <linux/export.h>
13#include <linux/types.h>
14#include <linux/init.h>
15#include <linux/io.h>
16#include <linux/errno.h>
17#include <linux/spinlock.h>
18#include <linux/delay.h>
19#include <linux/clk.h>
20#include <video/imx-ipu-v3.h>
21
22#include "ipu-prv.h"
23
24struct ipu_smfc_priv {
25 void __iomem *base;
26 spinlock_t lock;
27};
28
29/*SMFC Registers */
30#define SMFC_MAP 0x0000
31#define SMFC_WMC 0x0004
32#define SMFC_BS 0x0008
33
34int ipu_smfc_set_burstsize(struct ipu_soc *ipu, int channel, int burstsize)
35{
36 struct ipu_smfc_priv *smfc = ipu->smfc_priv;
37 unsigned long flags;
38 u32 val, shift;
39
40 spin_lock_irqsave(&smfc->lock, flags);
41
42 shift = channel * 4;
43 val = readl(smfc->base + SMFC_BS);
44 val &= ~(0xf << shift);
45 val |= burstsize << shift;
46 writel(val, smfc->base + SMFC_BS);
47
48 spin_unlock_irqrestore(&smfc->lock, flags);
49
50 return 0;
51}
52EXPORT_SYMBOL_GPL(ipu_smfc_set_burstsize);
53
54int ipu_smfc_map_channel(struct ipu_soc *ipu, int channel, int csi_id, int mipi_id)
55{
56 struct ipu_smfc_priv *smfc = ipu->smfc_priv;
57 unsigned long flags;
58 u32 val, shift;
59
60 spin_lock_irqsave(&smfc->lock, flags);
61
62 shift = channel * 3;
63 val = readl(smfc->base + SMFC_MAP);
64 val &= ~(0x7 << shift);
65 val |= ((csi_id << 2) | mipi_id) << shift;
66 writel(val, smfc->base + SMFC_MAP);
67
68 spin_unlock_irqrestore(&smfc->lock, flags);
69
70 return 0;
71}
72EXPORT_SYMBOL_GPL(ipu_smfc_map_channel);
73
74int ipu_smfc_init(struct ipu_soc *ipu, struct device *dev,
75 unsigned long base)
76{
77 struct ipu_smfc_priv *smfc;
78
79 smfc = devm_kzalloc(dev, sizeof(*smfc), GFP_KERNEL);
80 if (!smfc)
81 return -ENOMEM;
82
83 ipu->smfc_priv = smfc;
84 spin_lock_init(&smfc->lock);
85
86 smfc->base = devm_ioremap(dev, base, PAGE_SIZE);
87 if (!smfc->base)
88 return -ENOMEM;
89
90 pr_debug("%s: ioremap 0x%08lx -> %p\n", __func__, base, smfc->base);
91
92 return 0;
93}
94
95void ipu_smfc_exit(struct ipu_soc *ipu)
96{
97}
diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
index ec0ae2d1686a..6866448083b2 100644
--- a/drivers/gpu/vga/vga_switcheroo.c
+++ b/drivers/gpu/vga/vga_switcheroo.c
@@ -623,7 +623,8 @@ static int vga_switcheroo_runtime_suspend(struct device *dev)
623 ret = dev->bus->pm->runtime_suspend(dev); 623 ret = dev->bus->pm->runtime_suspend(dev);
624 if (ret) 624 if (ret)
625 return ret; 625 return ret;
626 626 if (vgasr_priv.handler->switchto)
627 vgasr_priv.handler->switchto(VGA_SWITCHEROO_IGD);
627 vga_switcheroo_power_switch(pdev, VGA_SWITCHEROO_OFF); 628 vga_switcheroo_power_switch(pdev, VGA_SWITCHEROO_OFF);
628 return 0; 629 return 0;
629} 630}
diff --git a/drivers/staging/imx-drm/Kconfig b/drivers/staging/imx-drm/Kconfig
index c6e8ba7b3e4e..82fb758a29bc 100644
--- a/drivers/staging/imx-drm/Kconfig
+++ b/drivers/staging/imx-drm/Kconfig
@@ -39,19 +39,10 @@ config DRM_IMX_LDB
39 Choose this to enable the internal LVDS Display Bridge (LDB) 39 Choose this to enable the internal LVDS Display Bridge (LDB)
40 found on i.MX53 and i.MX6 processors. 40 found on i.MX53 and i.MX6 processors.
41 41
42config DRM_IMX_IPUV3_CORE
43 tristate "IPUv3 core support"
44 depends on DRM_IMX
45 depends on RESET_CONTROLLER
46 help
47 Choose this if you have a i.MX5/6 system and want
48 to use the IPU. This option only enables IPU base
49 support.
50
51config DRM_IMX_IPUV3 42config DRM_IMX_IPUV3
52 tristate "DRM Support for i.MX IPUv3" 43 tristate "DRM Support for i.MX IPUv3"
53 depends on DRM_IMX 44 depends on DRM_IMX
54 depends on DRM_IMX_IPUV3_CORE 45 depends on IMX_IPUV3_CORE
55 help 46 help
56 Choose this if you have a i.MX5 or i.MX6 processor. 47 Choose this if you have a i.MX5 or i.MX6 processor.
57 48
diff --git a/drivers/staging/imx-drm/Makefile b/drivers/staging/imx-drm/Makefile
index 129e3a3f59f1..582c438d8cbd 100644
--- a/drivers/staging/imx-drm/Makefile
+++ b/drivers/staging/imx-drm/Makefile
@@ -6,7 +6,6 @@ obj-$(CONFIG_DRM_IMX) += imxdrm.o
6obj-$(CONFIG_DRM_IMX_PARALLEL_DISPLAY) += parallel-display.o 6obj-$(CONFIG_DRM_IMX_PARALLEL_DISPLAY) += parallel-display.o
7obj-$(CONFIG_DRM_IMX_TVE) += imx-tve.o 7obj-$(CONFIG_DRM_IMX_TVE) += imx-tve.o
8obj-$(CONFIG_DRM_IMX_LDB) += imx-ldb.o 8obj-$(CONFIG_DRM_IMX_LDB) += imx-ldb.o
9obj-$(CONFIG_DRM_IMX_IPUV3_CORE) += ipu-v3/
10 9
11imx-ipuv3-crtc-objs := ipuv3-crtc.o ipuv3-plane.o 10imx-ipuv3-crtc-objs := ipuv3-crtc.o ipuv3-plane.o
12obj-$(CONFIG_DRM_IMX_IPUV3) += imx-ipuv3-crtc.o 11obj-$(CONFIG_DRM_IMX_IPUV3) += imx-ipuv3-crtc.o
diff --git a/drivers/staging/imx-drm/imx-drm-core.c b/drivers/staging/imx-drm/imx-drm-core.c
index c270c9ae6d27..def8280d7ee6 100644
--- a/drivers/staging/imx-drm/imx-drm-core.c
+++ b/drivers/staging/imx-drm/imx-drm-core.c
@@ -200,13 +200,6 @@ static const struct file_operations imx_drm_driver_fops = {
200 .llseek = noop_llseek, 200 .llseek = noop_llseek,
201}; 201};
202 202
203int imx_drm_connector_mode_valid(struct drm_connector *connector,
204 struct drm_display_mode *mode)
205{
206 return MODE_OK;
207}
208EXPORT_SYMBOL(imx_drm_connector_mode_valid);
209
210void imx_drm_connector_destroy(struct drm_connector *connector) 203void imx_drm_connector_destroy(struct drm_connector *connector)
211{ 204{
212 drm_sysfs_connector_remove(connector); 205 drm_sysfs_connector_remove(connector);
@@ -305,7 +298,7 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags)
305 dev_err(drm->dev, 298 dev_err(drm->dev,
306 "[CONNECTOR:%d:%s] drm_sysfs_connector_add failed: %d\n", 299 "[CONNECTOR:%d:%s] drm_sysfs_connector_add failed: %d\n",
307 connector->base.id, 300 connector->base.id,
308 drm_get_connector_name(connector), ret); 301 connector->name, ret);
309 goto err_unbind; 302 goto err_unbind;
310 } 303 }
311 } 304 }
diff --git a/drivers/staging/imx-drm/imx-drm.h b/drivers/staging/imx-drm/imx-drm.h
index a322bac55414..7453ae00c412 100644
--- a/drivers/staging/imx-drm/imx-drm.h
+++ b/drivers/staging/imx-drm/imx-drm.h
@@ -50,8 +50,6 @@ int imx_drm_encoder_get_mux_id(struct device_node *node,
50int imx_drm_encoder_parse_of(struct drm_device *drm, 50int imx_drm_encoder_parse_of(struct drm_device *drm,
51 struct drm_encoder *encoder, struct device_node *np); 51 struct drm_encoder *encoder, struct device_node *np);
52 52
53int imx_drm_connector_mode_valid(struct drm_connector *connector,
54 struct drm_display_mode *mode);
55void imx_drm_connector_destroy(struct drm_connector *connector); 53void imx_drm_connector_destroy(struct drm_connector *connector);
56void imx_drm_encoder_destroy(struct drm_encoder *encoder); 54void imx_drm_encoder_destroy(struct drm_encoder *encoder);
57 55
diff --git a/drivers/staging/imx-drm/imx-hdmi.c b/drivers/staging/imx-drm/imx-hdmi.c
index 1b440483f28f..18c9ccd460b7 100644
--- a/drivers/staging/imx-drm/imx-hdmi.c
+++ b/drivers/staging/imx-drm/imx-hdmi.c
@@ -27,8 +27,8 @@
27#include <drm/drm_crtc_helper.h> 27#include <drm/drm_crtc_helper.h>
28#include <drm/drm_edid.h> 28#include <drm/drm_edid.h>
29#include <drm/drm_encoder_slave.h> 29#include <drm/drm_encoder_slave.h>
30#include <video/imx-ipu-v3.h>
30 31
31#include "ipu-v3/imx-ipu-v3.h"
32#include "imx-hdmi.h" 32#include "imx-hdmi.h"
33#include "imx-drm.h" 33#include "imx-drm.h"
34 34
@@ -1490,7 +1490,6 @@ static struct drm_connector_funcs imx_hdmi_connector_funcs = {
1490 1490
1491static struct drm_connector_helper_funcs imx_hdmi_connector_helper_funcs = { 1491static struct drm_connector_helper_funcs imx_hdmi_connector_helper_funcs = {
1492 .get_modes = imx_hdmi_connector_get_modes, 1492 .get_modes = imx_hdmi_connector_get_modes,
1493 .mode_valid = imx_drm_connector_mode_valid,
1494 .best_encoder = imx_hdmi_connector_best_encoder, 1493 .best_encoder = imx_hdmi_connector_best_encoder,
1495}; 1494};
1496 1495
diff --git a/drivers/staging/imx-drm/imx-ldb.c b/drivers/staging/imx-drm/imx-ldb.c
index fe4c1ef4e7a5..7e3f019d7e72 100644
--- a/drivers/staging/imx-drm/imx-ldb.c
+++ b/drivers/staging/imx-drm/imx-ldb.c
@@ -317,7 +317,6 @@ static struct drm_connector_funcs imx_ldb_connector_funcs = {
317static struct drm_connector_helper_funcs imx_ldb_connector_helper_funcs = { 317static struct drm_connector_helper_funcs imx_ldb_connector_helper_funcs = {
318 .get_modes = imx_ldb_connector_get_modes, 318 .get_modes = imx_ldb_connector_get_modes,
319 .best_encoder = imx_ldb_connector_best_encoder, 319 .best_encoder = imx_ldb_connector_best_encoder,
320 .mode_valid = imx_drm_connector_mode_valid,
321}; 320};
322 321
323static struct drm_encoder_funcs imx_ldb_encoder_funcs = { 322static struct drm_encoder_funcs imx_ldb_encoder_funcs = {
diff --git a/drivers/staging/imx-drm/imx-tve.c b/drivers/staging/imx-drm/imx-tve.c
index a23f4f773146..c628fcdc22ae 100644
--- a/drivers/staging/imx-drm/imx-tve.c
+++ b/drivers/staging/imx-drm/imx-tve.c
@@ -30,8 +30,8 @@
30#include <drm/drmP.h> 30#include <drm/drmP.h>
31#include <drm/drm_fb_helper.h> 31#include <drm/drm_fb_helper.h>
32#include <drm/drm_crtc_helper.h> 32#include <drm/drm_crtc_helper.h>
33#include <video/imx-ipu-v3.h>
33 34
34#include "ipu-v3/imx-ipu-v3.h"
35#include "imx-drm.h" 35#include "imx-drm.h"
36 36
37#define TVE_COM_CONF_REG 0x00 37#define TVE_COM_CONF_REG 0x00
@@ -249,11 +249,6 @@ static int imx_tve_connector_mode_valid(struct drm_connector *connector,
249{ 249{
250 struct imx_tve *tve = con_to_tve(connector); 250 struct imx_tve *tve = con_to_tve(connector);
251 unsigned long rate; 251 unsigned long rate;
252 int ret;
253
254 ret = imx_drm_connector_mode_valid(connector, mode);
255 if (ret != MODE_OK)
256 return ret;
257 252
258 /* pixel clock with 2x oversampling */ 253 /* pixel clock with 2x oversampling */
259 rate = clk_round_rate(tve->clk, 2000UL * mode->clock) / 2000; 254 rate = clk_round_rate(tve->clk, 2000UL * mode->clock) / 2000;
diff --git a/drivers/staging/imx-drm/ipuv3-crtc.c b/drivers/staging/imx-drm/ipuv3-crtc.c
index 47bec5e17358..720868bff35b 100644
--- a/drivers/staging/imx-drm/ipuv3-crtc.c
+++ b/drivers/staging/imx-drm/ipuv3-crtc.c
@@ -30,7 +30,7 @@
30#include <drm/drm_gem_cma_helper.h> 30#include <drm/drm_gem_cma_helper.h>
31#include <drm/drm_fb_cma_helper.h> 31#include <drm/drm_fb_cma_helper.h>
32 32
33#include "ipu-v3/imx-ipu-v3.h" 33#include <video/imx-ipu-v3.h>
34#include "imx-drm.h" 34#include "imx-drm.h"
35#include "ipuv3-plane.h" 35#include "ipuv3-plane.h"
36 36
diff --git a/drivers/staging/imx-drm/ipuv3-plane.c b/drivers/staging/imx-drm/ipuv3-plane.c
index 5697e59ddf1d..6f393a11f44d 100644
--- a/drivers/staging/imx-drm/ipuv3-plane.c
+++ b/drivers/staging/imx-drm/ipuv3-plane.c
@@ -17,7 +17,7 @@
17#include <drm/drm_fb_cma_helper.h> 17#include <drm/drm_fb_cma_helper.h>
18#include <drm/drm_gem_cma_helper.h> 18#include <drm/drm_gem_cma_helper.h>
19 19
20#include "ipu-v3/imx-ipu-v3.h" 20#include "video/imx-ipu-v3.h"
21#include "ipuv3-plane.h" 21#include "ipuv3-plane.h"
22 22
23#define to_ipu_plane(x) container_of(x, struct ipu_plane, base) 23#define to_ipu_plane(x) container_of(x, struct ipu_plane, base)
diff --git a/drivers/staging/imx-drm/parallel-display.c b/drivers/staging/imx-drm/parallel-display.c
index eaf4dda1a0c4..b5678328fc40 100644
--- a/drivers/staging/imx-drm/parallel-display.c
+++ b/drivers/staging/imx-drm/parallel-display.c
@@ -148,7 +148,6 @@ static struct drm_connector_funcs imx_pd_connector_funcs = {
148static struct drm_connector_helper_funcs imx_pd_connector_helper_funcs = { 148static struct drm_connector_helper_funcs imx_pd_connector_helper_funcs = {
149 .get_modes = imx_pd_connector_get_modes, 149 .get_modes = imx_pd_connector_get_modes,
150 .best_encoder = imx_pd_connector_best_encoder, 150 .best_encoder = imx_pd_connector_best_encoder,
151 .mode_valid = imx_drm_connector_mode_valid,
152}; 151};
153 152
154static struct drm_encoder_funcs imx_pd_encoder_funcs = { 153static struct drm_encoder_funcs imx_pd_encoder_funcs = {
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index c7b4f0f927b1..8bf495ffb020 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -20,6 +20,7 @@ source "drivers/char/agp/Kconfig"
20source "drivers/gpu/vga/Kconfig" 20source "drivers/gpu/vga/Kconfig"
21 21
22source "drivers/gpu/host1x/Kconfig" 22source "drivers/gpu/host1x/Kconfig"
23source "drivers/gpu/ipu-v3/Kconfig"
23 24
24menu "Direct Rendering Manager" 25menu "Direct Rendering Manager"
25source "drivers/gpu/drm/Kconfig" 26source "drivers/gpu/drm/Kconfig"
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index a7c2a862b4f4..8af71a8e2c00 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -143,11 +143,6 @@ int drm_err(const char *func, const char *format, ...);
143#define DRIVER_PRIME 0x4000 143#define DRIVER_PRIME 0x4000
144#define DRIVER_RENDER 0x8000 144#define DRIVER_RENDER 0x8000
145 145
146#define DRIVER_BUS_PCI 0x1
147#define DRIVER_BUS_PLATFORM 0x2
148#define DRIVER_BUS_USB 0x3
149#define DRIVER_BUS_HOST1X 0x4
150
151/***********************************************************************/ 146/***********************************************************************/
152/** \name Begin the DRM... */ 147/** \name Begin the DRM... */
153/*@{*/ 148/*@{*/
@@ -239,8 +234,6 @@ int drm_err(const char *func, const char *format, ...);
239/** \name Internal types and structures */ 234/** \name Internal types and structures */
240/*@{*/ 235/*@{*/
241 236
242#define DRM_ARRAY_SIZE(x) ARRAY_SIZE(x)
243
244#define DRM_IF_VERSION(maj, min) (maj << 16 | min) 237#define DRM_IF_VERSION(maj, min) (maj << 16 | min)
245 238
246/** 239/**
@@ -731,13 +724,7 @@ struct drm_master {
731#define DRM_SCANOUTPOS_ACCURATE (1 << 2) 724#define DRM_SCANOUTPOS_ACCURATE (1 << 2)
732 725
733struct drm_bus { 726struct drm_bus {
734 int bus_type;
735 int (*get_irq)(struct drm_device *dev);
736 const char *(*get_name)(struct drm_device *dev);
737 int (*set_busid)(struct drm_device *dev, struct drm_master *master); 727 int (*set_busid)(struct drm_device *dev, struct drm_master *master);
738 int (*set_unique)(struct drm_device *dev, struct drm_master *master,
739 struct drm_unique *unique);
740 int (*irq_by_busid)(struct drm_device *dev, struct drm_irq_busid *p);
741}; 728};
742 729
743/** 730/**
@@ -974,11 +961,6 @@ struct drm_driver {
974 const struct drm_ioctl_desc *ioctls; 961 const struct drm_ioctl_desc *ioctls;
975 int num_ioctls; 962 int num_ioctls;
976 const struct file_operations *fops; 963 const struct file_operations *fops;
977 union {
978 struct pci_driver *pci;
979 struct platform_device *platform_device;
980 struct usb_driver *usb;
981 } kdriver;
982 struct drm_bus *bus; 964 struct drm_bus *bus;
983 965
984 /* List of devices hanging off this driver with stealth attach. */ 966 /* List of devices hanging off this driver with stealth attach. */
@@ -1040,14 +1022,17 @@ struct drm_pending_vblank_event {
1040}; 1022};
1041 1023
1042struct drm_vblank_crtc { 1024struct drm_vblank_crtc {
1025 struct drm_device *dev; /* pointer to the drm_device */
1043 wait_queue_head_t queue; /**< VBLANK wait queue */ 1026 wait_queue_head_t queue; /**< VBLANK wait queue */
1044 struct timeval time[DRM_VBLANKTIME_RBSIZE]; /**< timestamp of current count */ 1027 struct timeval time[DRM_VBLANKTIME_RBSIZE]; /**< timestamp of current count */
1028 struct timer_list disable_timer; /* delayed disable timer */
1045 atomic_t count; /**< number of VBLANK interrupts */ 1029 atomic_t count; /**< number of VBLANK interrupts */
1046 atomic_t refcount; /* number of users of vblank interruptsper crtc */ 1030 atomic_t refcount; /* number of users of vblank interruptsper crtc */
1047 u32 last; /* protected by dev->vbl_lock, used */ 1031 u32 last; /* protected by dev->vbl_lock, used */
1048 /* for wraparound handling */ 1032 /* for wraparound handling */
1049 u32 last_wait; /* Last vblank seqno waited per CRTC */ 1033 u32 last_wait; /* Last vblank seqno waited per CRTC */
1050 unsigned int inmodeset; /* Display driver is setting mode */ 1034 unsigned int inmodeset; /* Display driver is setting mode */
1035 int crtc; /* crtc index */
1051 bool enabled; /* so we don't call enable more than 1036 bool enabled; /* so we don't call enable more than
1052 once per disable */ 1037 once per disable */
1053}; 1038};
@@ -1058,7 +1043,6 @@ struct drm_vblank_crtc {
1058 */ 1043 */
1059struct drm_device { 1044struct drm_device {
1060 struct list_head legacy_dev_list;/**< list of devices per driver for stealth attach cleanup */ 1045 struct list_head legacy_dev_list;/**< list of devices per driver for stealth attach cleanup */
1061 char *devname; /**< For /proc/interrupts */
1062 int if_version; /**< Highest interface version set */ 1046 int if_version; /**< Highest interface version set */
1063 1047
1064 /** \name Lifetime Management */ 1048 /** \name Lifetime Management */
@@ -1072,18 +1056,19 @@ struct drm_device {
1072 struct drm_minor *render; /**< Render node */ 1056 struct drm_minor *render; /**< Render node */
1073 atomic_t unplugged; /**< Flag whether dev is dead */ 1057 atomic_t unplugged; /**< Flag whether dev is dead */
1074 struct inode *anon_inode; /**< inode for private address-space */ 1058 struct inode *anon_inode; /**< inode for private address-space */
1059 char *unique; /**< unique name of the device */
1075 /*@} */ 1060 /*@} */
1076 1061
1077 /** \name Locks */ 1062 /** \name Locks */
1078 /*@{ */ 1063 /*@{ */
1079 spinlock_t count_lock; /**< For inuse, drm_device::open_count, drm_device::buf_use */
1080 struct mutex struct_mutex; /**< For others */ 1064 struct mutex struct_mutex; /**< For others */
1081 struct mutex master_mutex; /**< For drm_minor::master and drm_file::is_master */ 1065 struct mutex master_mutex; /**< For drm_minor::master and drm_file::is_master */
1082 /*@} */ 1066 /*@} */
1083 1067
1084 /** \name Usage Counters */ 1068 /** \name Usage Counters */
1085 /*@{ */ 1069 /*@{ */
1086 int open_count; /**< Outstanding files open */ 1070 int open_count; /**< Outstanding files open, protected by drm_global_mutex. */
1071 spinlock_t buf_lock; /**< For drm_device::buf_use and a few other things. */
1087 int buf_use; /**< Buffers in use -- cannot alloc */ 1072 int buf_use; /**< Buffers in use -- cannot alloc */
1088 atomic_t buf_alloc; /**< Buffer allocation in progress */ 1073 atomic_t buf_alloc; /**< Buffer allocation in progress */
1089 /*@} */ 1074 /*@} */
@@ -1114,6 +1099,8 @@ struct drm_device {
1114 /** \name Context support */ 1099 /** \name Context support */
1115 /*@{ */ 1100 /*@{ */
1116 bool irq_enabled; /**< True if irq handler is enabled */ 1101 bool irq_enabled; /**< True if irq handler is enabled */
1102 int irq;
1103
1117 __volatile__ long context_flag; /**< Context swapping flag */ 1104 __volatile__ long context_flag; /**< Context swapping flag */
1118 int last_context; /**< Last current context */ 1105 int last_context; /**< Last current context */
1119 /*@} */ 1106 /*@} */
@@ -1134,7 +1121,6 @@ struct drm_device {
1134 1121
1135 spinlock_t vblank_time_lock; /**< Protects vblank count and time updates during vblank enable/disable */ 1122 spinlock_t vblank_time_lock; /**< Protects vblank count and time updates during vblank enable/disable */
1136 spinlock_t vbl_lock; 1123 spinlock_t vbl_lock;
1137 struct timer_list vblank_disable_timer;
1138 1124
1139 u32 max_vblank_count; /**< size of vblank counter register */ 1125 u32 max_vblank_count; /**< size of vblank counter register */
1140 1126
@@ -1186,11 +1172,6 @@ static __inline__ int drm_core_check_feature(struct drm_device *dev,
1186 return ((dev->driver->driver_features & feature) ? 1 : 0); 1172 return ((dev->driver->driver_features & feature) ? 1 : 0);
1187} 1173}
1188 1174
1189static inline int drm_dev_to_irq(struct drm_device *dev)
1190{
1191 return dev->driver->bus->get_irq(dev);
1192}
1193
1194static inline void drm_device_set_unplugged(struct drm_device *dev) 1175static inline void drm_device_set_unplugged(struct drm_device *dev)
1195{ 1176{
1196 smp_wmb(); 1177 smp_wmb();
@@ -1204,11 +1185,6 @@ static inline int drm_device_is_unplugged(struct drm_device *dev)
1204 return ret; 1185 return ret;
1205} 1186}
1206 1187
1207static inline bool drm_modeset_is_locked(struct drm_device *dev)
1208{
1209 return mutex_is_locked(&dev->mode_config.mutex);
1210}
1211
1212static inline bool drm_is_render_client(const struct drm_file *file_priv) 1188static inline bool drm_is_render_client(const struct drm_file *file_priv)
1213{ 1189{
1214 return file_priv->minor->type == DRM_MINOR_RENDER; 1190 return file_priv->minor->type == DRM_MINOR_RENDER;
@@ -1310,7 +1286,7 @@ extern int drm_remove_magic(struct drm_master *master, drm_magic_t magic);
1310/* Cache management (drm_cache.c) */ 1286/* Cache management (drm_cache.c) */
1311void drm_clflush_pages(struct page *pages[], unsigned long num_pages); 1287void drm_clflush_pages(struct page *pages[], unsigned long num_pages);
1312void drm_clflush_sg(struct sg_table *st); 1288void drm_clflush_sg(struct sg_table *st);
1313void drm_clflush_virt_range(char *addr, unsigned long length); 1289void drm_clflush_virt_range(void *addr, unsigned long length);
1314 1290
1315 /* Locking IOCTL support (drm_lock.h) */ 1291 /* Locking IOCTL support (drm_lock.h) */
1316extern int drm_lock(struct drm_device *dev, void *data, 1292extern int drm_lock(struct drm_device *dev, void *data,
@@ -1363,7 +1339,7 @@ extern void drm_core_reclaim_buffers(struct drm_device *dev,
1363 /* IRQ support (drm_irq.h) */ 1339 /* IRQ support (drm_irq.h) */
1364extern int drm_control(struct drm_device *dev, void *data, 1340extern int drm_control(struct drm_device *dev, void *data,
1365 struct drm_file *file_priv); 1341 struct drm_file *file_priv);
1366extern int drm_irq_install(struct drm_device *dev); 1342extern int drm_irq_install(struct drm_device *dev, int irq);
1367extern int drm_irq_uninstall(struct drm_device *dev); 1343extern int drm_irq_uninstall(struct drm_device *dev);
1368 1344
1369extern int drm_vblank_init(struct drm_device *dev, int num_crtcs); 1345extern int drm_vblank_init(struct drm_device *dev, int num_crtcs);
@@ -1377,8 +1353,14 @@ extern void drm_send_vblank_event(struct drm_device *dev, int crtc,
1377extern bool drm_handle_vblank(struct drm_device *dev, int crtc); 1353extern bool drm_handle_vblank(struct drm_device *dev, int crtc);
1378extern int drm_vblank_get(struct drm_device *dev, int crtc); 1354extern int drm_vblank_get(struct drm_device *dev, int crtc);
1379extern void drm_vblank_put(struct drm_device *dev, int crtc); 1355extern void drm_vblank_put(struct drm_device *dev, int crtc);
1356extern int drm_crtc_vblank_get(struct drm_crtc *crtc);
1357extern void drm_crtc_vblank_put(struct drm_crtc *crtc);
1380extern void drm_vblank_off(struct drm_device *dev, int crtc); 1358extern void drm_vblank_off(struct drm_device *dev, int crtc);
1359extern void drm_vblank_on(struct drm_device *dev, int crtc);
1360extern void drm_crtc_vblank_off(struct drm_crtc *crtc);
1361extern void drm_crtc_vblank_on(struct drm_crtc *crtc);
1381extern void drm_vblank_cleanup(struct drm_device *dev); 1362extern void drm_vblank_cleanup(struct drm_device *dev);
1363
1382extern u32 drm_get_last_vbltimestamp(struct drm_device *dev, int crtc, 1364extern u32 drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
1383 struct timeval *tvblank, unsigned flags); 1365 struct timeval *tvblank, unsigned flags);
1384extern int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, 1366extern int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
@@ -1522,6 +1504,9 @@ extern drm_dma_handle_t *drm_pci_alloc(struct drm_device *dev, size_t size,
1522 size_t align); 1504 size_t align);
1523extern void __drm_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah); 1505extern void __drm_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah);
1524extern void drm_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah); 1506extern void drm_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah);
1507extern int drm_pci_set_unique(struct drm_device *dev,
1508 struct drm_master *master,
1509 struct drm_unique *u);
1525 1510
1526 /* sysfs support (drm_sysfs.c) */ 1511 /* sysfs support (drm_sysfs.c) */
1527struct drm_sysfs_class; 1512struct drm_sysfs_class;
@@ -1631,6 +1616,7 @@ void drm_dev_ref(struct drm_device *dev);
1631void drm_dev_unref(struct drm_device *dev); 1616void drm_dev_unref(struct drm_device *dev);
1632int drm_dev_register(struct drm_device *dev, unsigned long flags); 1617int drm_dev_register(struct drm_device *dev, unsigned long flags);
1633void drm_dev_unregister(struct drm_device *dev); 1618void drm_dev_unregister(struct drm_device *dev);
1619int drm_dev_set_unique(struct drm_device *dev, const char *fmt, ...);
1634 1620
1635struct drm_minor *drm_minor_acquire(unsigned int minor_id); 1621struct drm_minor *drm_minor_acquire(unsigned int minor_id);
1636void drm_minor_release(struct drm_minor *minor); 1622void drm_minor_release(struct drm_minor *minor);
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index e55fccbe7c42..251b75e6bf7a 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -33,6 +33,7 @@
33#include <linux/hdmi.h> 33#include <linux/hdmi.h>
34#include <drm/drm_mode.h> 34#include <drm/drm_mode.h>
35#include <drm/drm_fourcc.h> 35#include <drm/drm_fourcc.h>
36#include <drm/drm_modeset_lock.h>
36 37
37struct drm_device; 38struct drm_device;
38struct drm_mode_set; 39struct drm_mode_set;
@@ -50,6 +51,7 @@ struct drm_clip_rect;
50#define DRM_MODE_OBJECT_BLOB 0xbbbbbbbb 51#define DRM_MODE_OBJECT_BLOB 0xbbbbbbbb
51#define DRM_MODE_OBJECT_PLANE 0xeeeeeeee 52#define DRM_MODE_OBJECT_PLANE 0xeeeeeeee
52#define DRM_MODE_OBJECT_BRIDGE 0xbdbdbdbd 53#define DRM_MODE_OBJECT_BRIDGE 0xbdbdbdbd
54#define DRM_MODE_OBJECT_ANY 0
53 55
54struct drm_mode_object { 56struct drm_mode_object {
55 uint32_t id; 57 uint32_t id;
@@ -64,6 +66,15 @@ struct drm_object_properties {
64 uint64_t values[DRM_OBJECT_MAX_PROPERTY]; 66 uint64_t values[DRM_OBJECT_MAX_PROPERTY];
65}; 67};
66 68
69static inline int64_t U642I64(uint64_t val)
70{
71 return (int64_t)*((int64_t *)&val);
72}
73static inline uint64_t I642U64(int64_t val)
74{
75 return (uint64_t)*((uint64_t *)&val);
76}
77
67enum drm_connector_force { 78enum drm_connector_force {
68 DRM_FORCE_UNSPECIFIED, 79 DRM_FORCE_UNSPECIFIED,
69 DRM_FORCE_OFF, 80 DRM_FORCE_OFF,
@@ -110,6 +121,9 @@ struct drm_display_info {
110 enum subpixel_order subpixel_order; 121 enum subpixel_order subpixel_order;
111 u32 color_formats; 122 u32 color_formats;
112 123
124 /* Mask of supported hdmi deep color modes */
125 u8 edid_hdmi_dc_modes;
126
113 u8 cea_rev; 127 u8 cea_rev;
114}; 128};
115 129
@@ -190,10 +204,15 @@ struct drm_property {
190 char name[DRM_PROP_NAME_LEN]; 204 char name[DRM_PROP_NAME_LEN];
191 uint32_t num_values; 205 uint32_t num_values;
192 uint64_t *values; 206 uint64_t *values;
207 struct drm_device *dev;
193 208
194 struct list_head enum_blob_list; 209 struct list_head enum_blob_list;
195}; 210};
196 211
212void drm_modeset_lock_all(struct drm_device *dev);
213void drm_modeset_unlock_all(struct drm_device *dev);
214void drm_warn_on_modeset_not_all_locked(struct drm_device *dev);
215
197struct drm_crtc; 216struct drm_crtc;
198struct drm_connector; 217struct drm_connector;
199struct drm_encoder; 218struct drm_encoder;
@@ -269,6 +288,7 @@ struct drm_crtc_funcs {
269 * drm_crtc - central CRTC control structure 288 * drm_crtc - central CRTC control structure
270 * @dev: parent DRM device 289 * @dev: parent DRM device
271 * @head: list management 290 * @head: list management
291 * @mutex: per-CRTC locking
272 * @base: base KMS object for ID tracking etc. 292 * @base: base KMS object for ID tracking etc.
273 * @primary: primary plane for this CRTC 293 * @primary: primary plane for this CRTC
274 * @cursor: cursor plane for this CRTC 294 * @cursor: cursor plane for this CRTC
@@ -303,7 +323,7 @@ struct drm_crtc {
303 * state, ...) and a write lock for everything which can be update 323 * state, ...) and a write lock for everything which can be update
304 * without a full modeset (fb, cursor data, ...) 324 * without a full modeset (fb, cursor data, ...)
305 */ 325 */
306 struct mutex mutex; 326 struct drm_modeset_lock mutex;
307 327
308 struct drm_mode_object base; 328 struct drm_mode_object base;
309 329
@@ -400,6 +420,7 @@ struct drm_encoder_funcs {
400 * @dev: parent DRM device 420 * @dev: parent DRM device
401 * @head: list management 421 * @head: list management
402 * @base: base KMS object 422 * @base: base KMS object
423 * @name: encoder name
403 * @encoder_type: one of the %DRM_MODE_ENCODER_<foo> types in drm_mode.h 424 * @encoder_type: one of the %DRM_MODE_ENCODER_<foo> types in drm_mode.h
404 * @possible_crtcs: bitmask of potential CRTC bindings 425 * @possible_crtcs: bitmask of potential CRTC bindings
405 * @possible_clones: bitmask of potential sibling encoders for cloning 426 * @possible_clones: bitmask of potential sibling encoders for cloning
@@ -416,6 +437,7 @@ struct drm_encoder {
416 struct list_head head; 437 struct list_head head;
417 438
418 struct drm_mode_object base; 439 struct drm_mode_object base;
440 char *name;
419 int encoder_type; 441 int encoder_type;
420 uint32_t possible_crtcs; 442 uint32_t possible_crtcs;
421 uint32_t possible_clones; 443 uint32_t possible_clones;
@@ -444,6 +466,7 @@ struct drm_encoder {
444 * @attr: sysfs attributes 466 * @attr: sysfs attributes
445 * @head: list management 467 * @head: list management
446 * @base: base KMS object 468 * @base: base KMS object
469 * @name: connector name
447 * @connector_type: one of the %DRM_MODE_CONNECTOR_<foo> types from drm_mode.h 470 * @connector_type: one of the %DRM_MODE_CONNECTOR_<foo> types from drm_mode.h
448 * @connector_type_id: index into connector type enum 471 * @connector_type_id: index into connector type enum
449 * @interlace_allowed: can this connector handle interlaced modes? 472 * @interlace_allowed: can this connector handle interlaced modes?
@@ -482,6 +505,7 @@ struct drm_connector {
482 505
483 struct drm_mode_object base; 506 struct drm_mode_object base;
484 507
508 char *name;
485 int connector_type; 509 int connector_type;
486 int connector_type_id; 510 int connector_type_id;
487 bool interlace_allowed; 511 bool interlace_allowed;
@@ -723,6 +747,8 @@ struct drm_mode_group {
723 */ 747 */
724struct drm_mode_config { 748struct drm_mode_config {
725 struct mutex mutex; /* protects configuration (mode lists etc.) */ 749 struct mutex mutex; /* protects configuration (mode lists etc.) */
750 struct drm_modeset_lock connection_mutex; /* protects connector->encoder and encoder->crtc links */
751 struct drm_modeset_acquire_ctx *acquire_ctx; /* for legacy _lock_all() / _unlock_all() */
726 struct mutex idr_mutex; /* for IDR management */ 752 struct mutex idr_mutex; /* for IDR management */
727 struct idr crtc_idr; /* use this idr for all IDs, fb, crtc, connector, modes - just makes life easier */ 753 struct idr crtc_idr; /* use this idr for all IDs, fb, crtc, connector, modes - just makes life easier */
728 /* this is limited to one for now */ 754 /* this is limited to one for now */
@@ -823,10 +849,6 @@ struct drm_prop_enum_list {
823 char *name; 849 char *name;
824}; 850};
825 851
826extern void drm_modeset_lock_all(struct drm_device *dev);
827extern void drm_modeset_unlock_all(struct drm_device *dev);
828extern void drm_warn_on_modeset_not_all_locked(struct drm_device *dev);
829
830extern int drm_crtc_init_with_planes(struct drm_device *dev, 852extern int drm_crtc_init_with_planes(struct drm_device *dev,
831 struct drm_crtc *crtc, 853 struct drm_crtc *crtc,
832 struct drm_plane *primary, 854 struct drm_plane *primary,
@@ -905,7 +927,6 @@ extern int drm_crtc_check_viewport(const struct drm_crtc *crtc,
905 927
906extern void drm_encoder_cleanup(struct drm_encoder *encoder); 928extern void drm_encoder_cleanup(struct drm_encoder *encoder);
907 929
908extern const char *drm_get_connector_name(const struct drm_connector *connector);
909extern const char *drm_get_connector_status_name(enum drm_connector_status status); 930extern const char *drm_get_connector_status_name(enum drm_connector_status status);
910extern const char *drm_get_subpixel_order_name(enum subpixel_order order); 931extern const char *drm_get_subpixel_order_name(enum subpixel_order order);
911extern const char *drm_get_dpms_name(int val); 932extern const char *drm_get_dpms_name(int val);
@@ -915,6 +936,7 @@ extern const char *drm_get_tv_subconnector_name(int val);
915extern const char *drm_get_tv_select_name(int val); 936extern const char *drm_get_tv_select_name(int val);
916extern void drm_fb_release(struct drm_file *file_priv); 937extern void drm_fb_release(struct drm_file *file_priv);
917extern int drm_mode_group_init_legacy_group(struct drm_device *dev, struct drm_mode_group *group); 938extern int drm_mode_group_init_legacy_group(struct drm_device *dev, struct drm_mode_group *group);
939extern void drm_mode_group_destroy(struct drm_mode_group *group);
918extern bool drm_probe_ddc(struct i2c_adapter *adapter); 940extern bool drm_probe_ddc(struct i2c_adapter *adapter);
919extern struct edid *drm_get_edid(struct drm_connector *connector, 941extern struct edid *drm_get_edid(struct drm_connector *connector,
920 struct i2c_adapter *adapter); 942 struct i2c_adapter *adapter);
@@ -926,6 +948,23 @@ extern void drm_mode_config_cleanup(struct drm_device *dev);
926 948
927extern int drm_mode_connector_update_edid_property(struct drm_connector *connector, 949extern int drm_mode_connector_update_edid_property(struct drm_connector *connector,
928 struct edid *edid); 950 struct edid *edid);
951
952static inline bool drm_property_type_is(struct drm_property *property,
953 uint32_t type)
954{
955 /* instanceof for props.. handles extended type vs original types: */
956 if (property->flags & DRM_MODE_PROP_EXTENDED_TYPE)
957 return (property->flags & DRM_MODE_PROP_EXTENDED_TYPE) == type;
958 return property->flags & type;
959}
960
961static inline bool drm_property_type_valid(struct drm_property *property)
962{
963 if (property->flags & DRM_MODE_PROP_EXTENDED_TYPE)
964 return !(property->flags & DRM_MODE_PROP_LEGACY_TYPE);
965 return !!(property->flags & DRM_MODE_PROP_LEGACY_TYPE);
966}
967
929extern int drm_object_property_set_value(struct drm_mode_object *obj, 968extern int drm_object_property_set_value(struct drm_mode_object *obj,
930 struct drm_property *property, 969 struct drm_property *property,
931 uint64_t val); 970 uint64_t val);
@@ -959,6 +998,11 @@ struct drm_property *drm_property_create_bitmask(struct drm_device *dev,
959struct drm_property *drm_property_create_range(struct drm_device *dev, int flags, 998struct drm_property *drm_property_create_range(struct drm_device *dev, int flags,
960 const char *name, 999 const char *name,
961 uint64_t min, uint64_t max); 1000 uint64_t min, uint64_t max);
1001struct drm_property *drm_property_create_signed_range(struct drm_device *dev,
1002 int flags, const char *name,
1003 int64_t min, int64_t max);
1004struct drm_property *drm_property_create_object(struct drm_device *dev,
1005 int flags, const char *name, uint32_t type);
962extern void drm_property_destroy(struct drm_device *dev, struct drm_property *property); 1006extern void drm_property_destroy(struct drm_device *dev, struct drm_property *property);
963extern int drm_property_add_enum(struct drm_property *property, int index, 1007extern int drm_property_add_enum(struct drm_property *property, int index,
964 uint64_t value, const char *name); 1008 uint64_t value, const char *name);
@@ -967,7 +1011,6 @@ extern int drm_mode_create_tv_properties(struct drm_device *dev, int num_formats
967 char *formats[]); 1011 char *formats[]);
968extern int drm_mode_create_scaling_mode_property(struct drm_device *dev); 1012extern int drm_mode_create_scaling_mode_property(struct drm_device *dev);
969extern int drm_mode_create_dirty_info_property(struct drm_device *dev); 1013extern int drm_mode_create_dirty_info_property(struct drm_device *dev);
970extern const char *drm_get_encoder_name(const struct drm_encoder *encoder);
971 1014
972extern int drm_mode_connector_attach_encoder(struct drm_connector *connector, 1015extern int drm_mode_connector_attach_encoder(struct drm_connector *connector,
973 struct drm_encoder *encoder); 1016 struct drm_encoder *encoder);
@@ -975,6 +1018,7 @@ extern int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
975 int gamma_size); 1018 int gamma_size);
976extern struct drm_mode_object *drm_mode_object_find(struct drm_device *dev, 1019extern struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
977 uint32_t id, uint32_t type); 1020 uint32_t id, uint32_t type);
1021
978/* IOCTLs */ 1022/* IOCTLs */
979extern int drm_mode_getresources(struct drm_device *dev, 1023extern int drm_mode_getresources(struct drm_device *dev,
980 void *data, struct drm_file *file_priv); 1024 void *data, struct drm_file *file_priv);
@@ -1020,6 +1064,7 @@ extern int drm_mode_gamma_get_ioctl(struct drm_device *dev,
1020extern int drm_mode_gamma_set_ioctl(struct drm_device *dev, 1064extern int drm_mode_gamma_set_ioctl(struct drm_device *dev,
1021 void *data, struct drm_file *file_priv); 1065 void *data, struct drm_file *file_priv);
1022extern u8 drm_match_cea_mode(const struct drm_display_mode *to_match); 1066extern u8 drm_match_cea_mode(const struct drm_display_mode *to_match);
1067extern enum hdmi_picture_aspect drm_get_cea_aspect_ratio(const u8 video_code);
1023extern bool drm_detect_hdmi_monitor(struct edid *edid); 1068extern bool drm_detect_hdmi_monitor(struct edid *edid);
1024extern bool drm_detect_monitor_audio(struct edid *edid); 1069extern bool drm_detect_monitor_audio(struct edid *edid);
1025extern bool drm_rgb_quant_range_selectable(struct edid *edid); 1070extern bool drm_rgb_quant_range_selectable(struct edid *edid);
@@ -1057,6 +1102,15 @@ extern int drm_format_vert_chroma_subsampling(uint32_t format);
1057extern const char *drm_get_format_name(uint32_t format); 1102extern const char *drm_get_format_name(uint32_t format);
1058 1103
1059/* Helpers */ 1104/* Helpers */
1105
1106static inline struct drm_plane *drm_plane_find(struct drm_device *dev,
1107 uint32_t id)
1108{
1109 struct drm_mode_object *mo;
1110 mo = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_PLANE);
1111 return mo ? obj_to_plane(mo) : NULL;
1112}
1113
1060static inline struct drm_crtc *drm_crtc_find(struct drm_device *dev, 1114static inline struct drm_crtc *drm_crtc_find(struct drm_device *dev,
1061 uint32_t id) 1115 uint32_t id)
1062{ 1116{
@@ -1073,6 +1127,30 @@ static inline struct drm_encoder *drm_encoder_find(struct drm_device *dev,
1073 return mo ? obj_to_encoder(mo) : NULL; 1127 return mo ? obj_to_encoder(mo) : NULL;
1074} 1128}
1075 1129
1130static inline struct drm_connector *drm_connector_find(struct drm_device *dev,
1131 uint32_t id)
1132{
1133 struct drm_mode_object *mo;
1134 mo = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_CONNECTOR);
1135 return mo ? obj_to_connector(mo) : NULL;
1136}
1137
1138static inline struct drm_property *drm_property_find(struct drm_device *dev,
1139 uint32_t id)
1140{
1141 struct drm_mode_object *mo;
1142 mo = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_PROPERTY);
1143 return mo ? obj_to_property(mo) : NULL;
1144}
1145
1146static inline struct drm_property_blob *
1147drm_property_blob_find(struct drm_device *dev, uint32_t id)
1148{
1149 struct drm_mode_object *mo;
1150 mo = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_BLOB);
1151 return mo ? obj_to_blob(mo) : NULL;
1152}
1153
1076/* Plane list iterator for legacy (overlay only) planes. */ 1154/* Plane list iterator for legacy (overlay only) planes. */
1077#define drm_for_each_legacy_plane(plane, planelist) \ 1155#define drm_for_each_legacy_plane(plane, planelist) \
1078 list_for_each_entry(plane, planelist, head) \ 1156 list_for_each_entry(plane, planelist, head) \
diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
index 36a5febac2a6..a3d75fefd010 100644
--- a/include/drm/drm_crtc_helper.h
+++ b/include/drm/drm_crtc_helper.h
@@ -114,7 +114,7 @@ struct drm_encoder_helper_funcs {
114/** 114/**
115 * drm_connector_helper_funcs - helper operations for connectors 115 * drm_connector_helper_funcs - helper operations for connectors
116 * @get_modes: get mode list for this connector 116 * @get_modes: get mode list for this connector
117 * @mode_valid: is this mode valid on the given connector? 117 * @mode_valid (optional): is this mode valid on the given connector?
118 * 118 *
119 * The helper operations are called by the mid-layer CRTC helper. 119 * The helper operations are called by the mid-layer CRTC helper.
120 */ 120 */
@@ -165,6 +165,10 @@ extern void drm_helper_resume_force_mode(struct drm_device *dev);
165extern int drm_helper_probe_single_connector_modes(struct drm_connector 165extern int drm_helper_probe_single_connector_modes(struct drm_connector
166 *connector, uint32_t maxX, 166 *connector, uint32_t maxX,
167 uint32_t maxY); 167 uint32_t maxY);
168extern int drm_helper_probe_single_connector_modes_nomerge(struct drm_connector
169 *connector,
170 uint32_t maxX,
171 uint32_t maxY);
168extern void drm_kms_helper_poll_init(struct drm_device *dev); 172extern void drm_kms_helper_poll_init(struct drm_device *dev);
169extern void drm_kms_helper_poll_fini(struct drm_device *dev); 173extern void drm_kms_helper_poll_fini(struct drm_device *dev);
170extern bool drm_helper_hpd_irq_event(struct drm_device *dev); 174extern bool drm_helper_hpd_irq_event(struct drm_device *dev);
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index cfcacec5b89d..a21568bf1514 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -37,6 +37,7 @@
37 * eDP: Embedded DisplayPort version 1 37 * eDP: Embedded DisplayPort version 1
38 * DPI: DisplayPort Interoperability Guideline v1.1a 38 * DPI: DisplayPort Interoperability Guideline v1.1a
39 * 1.2: DisplayPort 1.2 39 * 1.2: DisplayPort 1.2
40 * MST: Multistream Transport - part of DP 1.2a
40 * 41 *
41 * 1.2 formally includes both eDP and DPI definitions. 42 * 1.2 formally includes both eDP and DPI definitions.
42 */ 43 */
@@ -103,9 +104,14 @@
103#define DP_TRAINING_AUX_RD_INTERVAL 0x00e /* XXX 1.2? */ 104#define DP_TRAINING_AUX_RD_INTERVAL 0x00e /* XXX 1.2? */
104 105
105/* Multiple stream transport */ 106/* Multiple stream transport */
107#define DP_FAUX_CAP 0x020 /* 1.2 */
108# define DP_FAUX_CAP_1 (1 << 0)
109
106#define DP_MSTM_CAP 0x021 /* 1.2 */ 110#define DP_MSTM_CAP 0x021 /* 1.2 */
107# define DP_MST_CAP (1 << 0) 111# define DP_MST_CAP (1 << 0)
108 112
113#define DP_GUID 0x030 /* 1.2 */
114
109#define DP_PSR_SUPPORT 0x070 /* XXX 1.2? */ 115#define DP_PSR_SUPPORT 0x070 /* XXX 1.2? */
110# define DP_PSR_IS_SUPPORTED 1 116# define DP_PSR_IS_SUPPORTED 1
111#define DP_PSR_CAPS 0x071 /* XXX 1.2? */ 117#define DP_PSR_CAPS 0x071 /* XXX 1.2? */
@@ -221,6 +227,16 @@
221# define DP_PSR_CRC_VERIFICATION (1 << 2) 227# define DP_PSR_CRC_VERIFICATION (1 << 2)
222# define DP_PSR_FRAME_CAPTURE (1 << 3) 228# define DP_PSR_FRAME_CAPTURE (1 << 3)
223 229
230#define DP_ADAPTER_CTRL 0x1a0
231# define DP_ADAPTER_CTRL_FORCE_LOAD_SENSE (1 << 0)
232
233#define DP_BRANCH_DEVICE_CTRL 0x1a1
234# define DP_BRANCH_DEVICE_IRQ_HPD (1 << 0)
235
236#define DP_PAYLOAD_ALLOCATE_SET 0x1c0
237#define DP_PAYLOAD_ALLOCATE_START_TIME_SLOT 0x1c1
238#define DP_PAYLOAD_ALLOCATE_TIME_SLOT_COUNT 0x1c2
239
224#define DP_SINK_COUNT 0x200 240#define DP_SINK_COUNT 0x200
225/* prior to 1.2 bit 7 was reserved mbz */ 241/* prior to 1.2 bit 7 was reserved mbz */
226# define DP_GET_SINK_COUNT(x) ((((x) & 0x80) >> 1) | ((x) & 0x3f)) 242# define DP_GET_SINK_COUNT(x) ((((x) & 0x80) >> 1) | ((x) & 0x3f))
@@ -230,6 +246,9 @@
230# define DP_REMOTE_CONTROL_COMMAND_PENDING (1 << 0) 246# define DP_REMOTE_CONTROL_COMMAND_PENDING (1 << 0)
231# define DP_AUTOMATED_TEST_REQUEST (1 << 1) 247# define DP_AUTOMATED_TEST_REQUEST (1 << 1)
232# define DP_CP_IRQ (1 << 2) 248# define DP_CP_IRQ (1 << 2)
249# define DP_MCCS_IRQ (1 << 3)
250# define DP_DOWN_REP_MSG_RDY (1 << 4) /* 1.2 MST */
251# define DP_UP_REQ_MSG_RDY (1 << 5) /* 1.2 MST */
233# define DP_SINK_SPECIFIC_IRQ (1 << 6) 252# define DP_SINK_SPECIFIC_IRQ (1 << 6)
234 253
235#define DP_LANE0_1_STATUS 0x202 254#define DP_LANE0_1_STATUS 0x202
@@ -291,9 +310,18 @@
291# define DP_TEST_NAK (1 << 1) 310# define DP_TEST_NAK (1 << 1)
292# define DP_TEST_EDID_CHECKSUM_WRITE (1 << 2) 311# define DP_TEST_EDID_CHECKSUM_WRITE (1 << 2)
293 312
313#define DP_TEST_EDID_CHECKSUM 0x261
314
294#define DP_TEST_SINK 0x270 315#define DP_TEST_SINK 0x270
295#define DP_TEST_SINK_START (1 << 0) 316#define DP_TEST_SINK_START (1 << 0)
296 317
318#define DP_PAYLOAD_TABLE_UPDATE_STATUS 0x2c0 /* 1.2 MST */
319# define DP_PAYLOAD_TABLE_UPDATED (1 << 0)
320# define DP_PAYLOAD_ACT_HANDLED (1 << 1)
321
322#define DP_VC_PAYLOAD_ID_SLOT_1 0x2c1 /* 1.2 MST */
323/* up to ID_SLOT_63 at 0x2ff */
324
297#define DP_SOURCE_OUI 0x300 325#define DP_SOURCE_OUI 0x300
298#define DP_SINK_OUI 0x400 326#define DP_SINK_OUI 0x400
299#define DP_BRANCH_OUI 0x500 327#define DP_BRANCH_OUI 0x500
@@ -303,6 +331,21 @@
303# define DP_SET_POWER_D3 0x2 331# define DP_SET_POWER_D3 0x2
304# define DP_SET_POWER_MASK 0x3 332# define DP_SET_POWER_MASK 0x3
305 333
334#define DP_SIDEBAND_MSG_DOWN_REQ_BASE 0x1000 /* 1.2 MST */
335#define DP_SIDEBAND_MSG_UP_REP_BASE 0x1200 /* 1.2 MST */
336#define DP_SIDEBAND_MSG_DOWN_REP_BASE 0x1400 /* 1.2 MST */
337#define DP_SIDEBAND_MSG_UP_REQ_BASE 0x1600 /* 1.2 MST */
338
339#define DP_SINK_COUNT_ESI 0x2002 /* 1.2 */
340/* 0-5 sink count */
341# define DP_SINK_COUNT_CP_READY (1 << 6)
342
343#define DP_DEVICE_SERVICE_IRQ_VECTOR_ESI0 0x2003 /* 1.2 */
344
345#define DP_DEVICE_SERVICE_IRQ_VECTOR_ESI1 0x2004 /* 1.2 */
346
347#define DP_LINK_SERVICE_IRQ_VECTOR_ESI0 0x2005 /* 1.2 */
348
306#define DP_PSR_ERROR_STATUS 0x2006 /* XXX 1.2? */ 349#define DP_PSR_ERROR_STATUS 0x2006 /* XXX 1.2? */
307# define DP_PSR_LINK_CRC_ERROR (1 << 0) 350# define DP_PSR_LINK_CRC_ERROR (1 << 0)
308# define DP_PSR_RFB_STORAGE_ERROR (1 << 1) 351# define DP_PSR_RFB_STORAGE_ERROR (1 << 1)
@@ -319,6 +362,43 @@
319# define DP_PSR_SINK_INTERNAL_ERROR 7 362# define DP_PSR_SINK_INTERNAL_ERROR 7
320# define DP_PSR_SINK_STATE_MASK 0x07 363# define DP_PSR_SINK_STATE_MASK 0x07
321 364
365/* DP 1.2 Sideband message defines */
366/* peer device type - DP 1.2a Table 2-92 */
367#define DP_PEER_DEVICE_NONE 0x0
368#define DP_PEER_DEVICE_SOURCE_OR_SST 0x1
369#define DP_PEER_DEVICE_MST_BRANCHING 0x2
370#define DP_PEER_DEVICE_SST_SINK 0x3
371#define DP_PEER_DEVICE_DP_LEGACY_CONV 0x4
372
373/* DP 1.2 MST sideband request names DP 1.2a Table 2-80 */
374#define DP_LINK_ADDRESS 0x01
375#define DP_CONNECTION_STATUS_NOTIFY 0x02
376#define DP_ENUM_PATH_RESOURCES 0x10
377#define DP_ALLOCATE_PAYLOAD 0x11
378#define DP_QUERY_PAYLOAD 0x12
379#define DP_RESOURCE_STATUS_NOTIFY 0x13
380#define DP_CLEAR_PAYLOAD_ID_TABLE 0x14
381#define DP_REMOTE_DPCD_READ 0x20
382#define DP_REMOTE_DPCD_WRITE 0x21
383#define DP_REMOTE_I2C_READ 0x22
384#define DP_REMOTE_I2C_WRITE 0x23
385#define DP_POWER_UP_PHY 0x24
386#define DP_POWER_DOWN_PHY 0x25
387#define DP_SINK_EVENT_NOTIFY 0x30
388#define DP_QUERY_STREAM_ENC_STATUS 0x38
389
390/* DP 1.2 MST sideband nak reasons - table 2.84 */
391#define DP_NAK_WRITE_FAILURE 0x01
392#define DP_NAK_INVALID_READ 0x02
393#define DP_NAK_CRC_FAILURE 0x03
394#define DP_NAK_BAD_PARAM 0x04
395#define DP_NAK_DEFER 0x05
396#define DP_NAK_LINK_FAILURE 0x06
397#define DP_NAK_NO_RESOURCES 0x07
398#define DP_NAK_DPCD_FAIL 0x08
399#define DP_NAK_I2C_NAK 0x09
400#define DP_NAK_ALLOCATE_FAIL 0x0a
401
322#define MODE_I2C_START 1 402#define MODE_I2C_START 1
323#define MODE_I2C_WRITE 2 403#define MODE_I2C_WRITE 2
324#define MODE_I2C_READ 4 404#define MODE_I2C_READ 4
@@ -431,8 +511,10 @@ struct drm_dp_aux_msg {
431 511
432/** 512/**
433 * struct drm_dp_aux - DisplayPort AUX channel 513 * struct drm_dp_aux - DisplayPort AUX channel
514 * @name: user-visible name of this AUX channel and the I2C-over-AUX adapter
434 * @ddc: I2C adapter that can be used for I2C-over-AUX communication 515 * @ddc: I2C adapter that can be used for I2C-over-AUX communication
435 * @dev: pointer to struct device that is the parent for this AUX channel 516 * @dev: pointer to struct device that is the parent for this AUX channel
517 * @hw_mutex: internal mutex used for locking transfers
436 * @transfer: transfers a message representing a single AUX transaction 518 * @transfer: transfers a message representing a single AUX transaction
437 * 519 *
438 * The .dev field should be set to a pointer to the device that implements 520 * The .dev field should be set to a pointer to the device that implements
@@ -465,7 +547,7 @@ struct drm_dp_aux {
465 const char *name; 547 const char *name;
466 struct i2c_adapter ddc; 548 struct i2c_adapter ddc;
467 struct device *dev; 549 struct device *dev;
468 550 struct mutex hw_mutex;
469 ssize_t (*transfer)(struct drm_dp_aux *aux, 551 ssize_t (*transfer)(struct drm_dp_aux *aux,
470 struct drm_dp_aux_msg *msg); 552 struct drm_dp_aux_msg *msg);
471}; 553};
@@ -524,7 +606,7 @@ int drm_dp_link_probe(struct drm_dp_aux *aux, struct drm_dp_link *link);
524int drm_dp_link_power_up(struct drm_dp_aux *aux, struct drm_dp_link *link); 606int drm_dp_link_power_up(struct drm_dp_aux *aux, struct drm_dp_link *link);
525int drm_dp_link_configure(struct drm_dp_aux *aux, struct drm_dp_link *link); 607int drm_dp_link_configure(struct drm_dp_aux *aux, struct drm_dp_link *link);
526 608
527int drm_dp_aux_register_i2c_bus(struct drm_dp_aux *aux); 609int drm_dp_aux_register(struct drm_dp_aux *aux);
528void drm_dp_aux_unregister_i2c_bus(struct drm_dp_aux *aux); 610void drm_dp_aux_unregister(struct drm_dp_aux *aux);
529 611
530#endif /* _DRM_DP_HELPER_H_ */ 612#endif /* _DRM_DP_HELPER_H_ */
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index a1441c5ac63d..b96031d947a0 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -202,6 +202,11 @@ struct detailed_timing {
202#define DRM_EDID_FEATURE_PM_SUSPEND (1 << 6) 202#define DRM_EDID_FEATURE_PM_SUSPEND (1 << 6)
203#define DRM_EDID_FEATURE_PM_STANDBY (1 << 7) 203#define DRM_EDID_FEATURE_PM_STANDBY (1 << 7)
204 204
205#define DRM_EDID_HDMI_DC_48 (1 << 6)
206#define DRM_EDID_HDMI_DC_36 (1 << 5)
207#define DRM_EDID_HDMI_DC_30 (1 << 4)
208#define DRM_EDID_HDMI_DC_Y444 (1 << 3)
209
205struct edid { 210struct edid {
206 u8 header[8]; 211 u8 header[8];
207 /* Vendor & product info */ 212 /* Vendor & product info */
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index 6e622f7d481d..7997246d4039 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -108,7 +108,7 @@ int drm_fb_helper_set_par(struct fb_info *info);
108int drm_fb_helper_check_var(struct fb_var_screeninfo *var, 108int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
109 struct fb_info *info); 109 struct fb_info *info);
110 110
111bool drm_fb_helper_restore_fbdev_mode(struct drm_fb_helper *fb_helper); 111bool drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper);
112void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper, 112void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper,
113 uint32_t fb_width, uint32_t fb_height); 113 uint32_t fb_width, uint32_t fb_height);
114void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch, 114void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
diff --git a/include/drm/drm_flip_work.h b/include/drm/drm_flip_work.h
index 35c776ae7d3b..9eed34dcd6af 100644
--- a/include/drm/drm_flip_work.h
+++ b/include/drm/drm_flip_work.h
@@ -57,6 +57,7 @@ typedef void (*drm_flip_func_t)(struct drm_flip_work *work, void *val);
57 * @count: number of committed items 57 * @count: number of committed items
58 * @func: callback fxn called for each committed item 58 * @func: callback fxn called for each committed item
59 * @worker: worker which calls @func 59 * @worker: worker which calls @func
60 * @fifo: queue of committed items
60 */ 61 */
61struct drm_flip_work { 62struct drm_flip_work {
62 const char *name; 63 const char *name;
diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h
index 7209df15a3cd..944f33f8ba38 100644
--- a/include/drm/drm_mipi_dsi.h
+++ b/include/drm/drm_mipi_dsi.h
@@ -135,11 +135,13 @@ ssize_t mipi_dsi_dcs_read(struct mipi_dsi_device *dsi, unsigned int channel,
135 * @driver: device driver model driver 135 * @driver: device driver model driver
136 * @probe: callback for device binding 136 * @probe: callback for device binding
137 * @remove: callback for device unbinding 137 * @remove: callback for device unbinding
138 * @shutdown: called at shutdown time to quiesce the device
138 */ 139 */
139struct mipi_dsi_driver { 140struct mipi_dsi_driver {
140 struct device_driver driver; 141 struct device_driver driver;
141 int(*probe)(struct mipi_dsi_device *dsi); 142 int(*probe)(struct mipi_dsi_device *dsi);
142 int(*remove)(struct mipi_dsi_device *dsi); 143 int(*remove)(struct mipi_dsi_device *dsi);
144 void (*shutdown)(struct mipi_dsi_device *dsi);
143}; 145};
144 146
145#define to_mipi_dsi_driver(d) container_of(d, struct mipi_dsi_driver, driver) 147#define to_mipi_dsi_driver(d) container_of(d, struct mipi_dsi_driver, driver)
diff --git a/include/drm/drm_modes.h b/include/drm/drm_modes.h
index 2dbbf9976669..91d0582f924e 100644
--- a/include/drm/drm_modes.h
+++ b/include/drm/drm_modes.h
@@ -223,7 +223,7 @@ void drm_mode_validate_size(struct drm_device *dev,
223void drm_mode_prune_invalid(struct drm_device *dev, 223void drm_mode_prune_invalid(struct drm_device *dev,
224 struct list_head *mode_list, bool verbose); 224 struct list_head *mode_list, bool verbose);
225void drm_mode_sort(struct list_head *mode_list); 225void drm_mode_sort(struct list_head *mode_list);
226void drm_mode_connector_list_update(struct drm_connector *connector); 226void drm_mode_connector_list_update(struct drm_connector *connector, bool merge_type_bits);
227 227
228/* parsing cmdline modes */ 228/* parsing cmdline modes */
229bool 229bool
diff --git a/include/drm/drm_modeset_lock.h b/include/drm/drm_modeset_lock.h
new file mode 100644
index 000000000000..402aa7a6a058
--- /dev/null
+++ b/include/drm/drm_modeset_lock.h
@@ -0,0 +1,126 @@
1/*
2 * Copyright (C) 2014 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24#ifndef DRM_MODESET_LOCK_H_
25#define DRM_MODESET_LOCK_H_
26
27#include <linux/ww_mutex.h>
28
29struct drm_modeset_lock;
30
31/**
32 * drm_modeset_acquire_ctx - locking context (see ww_acquire_ctx)
33 * @ww_ctx: base acquire ctx
34 * @contended: used internally for -EDEADLK handling
35 * @locked: list of held locks
36 *
37 * Each thread competing for a set of locks must use one acquire
38 * ctx. And if any lock fxn returns -EDEADLK, it must backoff and
39 * retry.
40 */
41struct drm_modeset_acquire_ctx {
42
43 struct ww_acquire_ctx ww_ctx;
44
45 /**
46 * Contended lock: if a lock is contended you should only call
47 * drm_modeset_backoff() which drops locks and slow-locks the
48 * contended lock.
49 */
50 struct drm_modeset_lock *contended;
51
52 /**
53 * list of held locks (drm_modeset_lock)
54 */
55 struct list_head locked;
56};
57
58/**
59 * drm_modeset_lock - used for locking modeset resources.
60 * @mutex: resource locking
61 * @head: used to hold it's place on state->locked list when
62 * part of an atomic update
63 *
64 * Used for locking CRTCs and other modeset resources.
65 */
66struct drm_modeset_lock {
67 /**
68 * modeset lock
69 */
70 struct ww_mutex mutex;
71
72 /**
73 * Resources that are locked as part of an atomic update are added
74 * to a list (so we know what to unlock at the end).
75 */
76 struct list_head head;
77};
78
79extern struct ww_class crtc_ww_class;
80
81void drm_modeset_acquire_init(struct drm_modeset_acquire_ctx *ctx,
82 uint32_t flags);
83void drm_modeset_acquire_fini(struct drm_modeset_acquire_ctx *ctx);
84void drm_modeset_drop_locks(struct drm_modeset_acquire_ctx *ctx);
85void drm_modeset_backoff(struct drm_modeset_acquire_ctx *ctx);
86int drm_modeset_backoff_interruptible(struct drm_modeset_acquire_ctx *ctx);
87
88/**
89 * drm_modeset_lock_init - initialize lock
90 * @lock: lock to init
91 */
92static inline void drm_modeset_lock_init(struct drm_modeset_lock *lock)
93{
94 ww_mutex_init(&lock->mutex, &crtc_ww_class);
95 INIT_LIST_HEAD(&lock->head);
96}
97
98/**
99 * drm_modeset_lock_fini - cleanup lock
100 * @lock: lock to cleanup
101 */
102static inline void drm_modeset_lock_fini(struct drm_modeset_lock *lock)
103{
104 WARN_ON(!list_empty(&lock->head));
105}
106
107/**
108 * drm_modeset_is_locked - equivalent to mutex_is_locked()
109 * @lock: lock to check
110 */
111static inline bool drm_modeset_is_locked(struct drm_modeset_lock *lock)
112{
113 return ww_mutex_is_locked(&lock->mutex);
114}
115
116int drm_modeset_lock(struct drm_modeset_lock *lock,
117 struct drm_modeset_acquire_ctx *ctx);
118int drm_modeset_lock_interruptible(struct drm_modeset_lock *lock,
119 struct drm_modeset_acquire_ctx *ctx);
120void drm_modeset_unlock(struct drm_modeset_lock *lock);
121
122struct drm_device;
123int drm_modeset_lock_all_crtcs(struct drm_device *dev,
124 struct drm_modeset_acquire_ctx *ctx);
125
126#endif /* DRM_MODESET_LOCK_H_ */
diff --git a/include/drm/drm_plane_helper.h b/include/drm/drm_plane_helper.h
index 09824becee3e..52e6870534b2 100644
--- a/include/drm/drm_plane_helper.h
+++ b/include/drm/drm_plane_helper.h
@@ -24,6 +24,17 @@
24#ifndef DRM_PLANE_HELPER_H 24#ifndef DRM_PLANE_HELPER_H
25#define DRM_PLANE_HELPER_H 25#define DRM_PLANE_HELPER_H
26 26
27#include <drm/drm_rect.h>
28
29/*
30 * Drivers that don't allow primary plane scaling may pass this macro in place
31 * of the min/max scale parameters of the update checker function.
32 *
33 * Due to src being in 16.16 fixed point and dest being in integer pixels,
34 * 1<<16 represents no scaling.
35 */
36#define DRM_PLANE_HELPER_NO_SCALING (1<<16)
37
27/** 38/**
28 * DOC: plane helpers 39 * DOC: plane helpers
29 * 40 *
@@ -31,6 +42,17 @@
31 * planes. 42 * planes.
32 */ 43 */
33 44
45extern int drm_plane_helper_check_update(struct drm_plane *plane,
46 struct drm_crtc *crtc,
47 struct drm_framebuffer *fb,
48 struct drm_rect *src,
49 struct drm_rect *dest,
50 const struct drm_rect *clip,
51 int min_scale,
52 int max_scale,
53 bool can_position,
54 bool can_update_disabled,
55 bool *visible);
34extern int drm_primary_helper_update(struct drm_plane *plane, 56extern int drm_primary_helper_update(struct drm_plane *plane,
35 struct drm_crtc *crtc, 57 struct drm_crtc *crtc,
36 struct drm_framebuffer *fb, 58 struct drm_framebuffer *fb,
@@ -42,7 +64,7 @@ extern int drm_primary_helper_disable(struct drm_plane *plane);
42extern void drm_primary_helper_destroy(struct drm_plane *plane); 64extern void drm_primary_helper_destroy(struct drm_plane *plane);
43extern const struct drm_plane_funcs drm_primary_helper_funcs; 65extern const struct drm_plane_funcs drm_primary_helper_funcs;
44extern struct drm_plane *drm_primary_helper_create_plane(struct drm_device *dev, 66extern struct drm_plane *drm_primary_helper_create_plane(struct drm_device *dev,
45 uint32_t *formats, 67 const uint32_t *formats,
46 int num_formats); 68 int num_formats);
47 69
48 70
diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
index 012d58fa8ff0..0572035673f3 100644
--- a/include/drm/i915_pciids.h
+++ b/include/drm/i915_pciids.h
@@ -223,14 +223,32 @@
223 _INTEL_BDW_D(gt, 0x160A, info), /* Server */ \ 223 _INTEL_BDW_D(gt, 0x160A, info), /* Server */ \
224 _INTEL_BDW_D(gt, 0x160D, info) /* Workstation */ 224 _INTEL_BDW_D(gt, 0x160D, info) /* Workstation */
225 225
226#define INTEL_BDW_M_IDS(info) \ 226#define INTEL_BDW_GT12M_IDS(info) \
227 _INTEL_BDW_M_IDS(1, info), \ 227 _INTEL_BDW_M_IDS(1, info), \
228 _INTEL_BDW_M_IDS(2, info), \ 228 _INTEL_BDW_M_IDS(2, info)
229 _INTEL_BDW_M_IDS(3, info)
230 229
231#define INTEL_BDW_D_IDS(info) \ 230#define INTEL_BDW_GT12D_IDS(info) \
232 _INTEL_BDW_D_IDS(1, info), \ 231 _INTEL_BDW_D_IDS(1, info), \
233 _INTEL_BDW_D_IDS(2, info), \ 232 _INTEL_BDW_D_IDS(2, info)
233
234#define INTEL_BDW_GT3M_IDS(info) \
235 _INTEL_BDW_M_IDS(3, info)
236
237#define INTEL_BDW_GT3D_IDS(info) \
234 _INTEL_BDW_D_IDS(3, info) 238 _INTEL_BDW_D_IDS(3, info)
235 239
240#define INTEL_BDW_M_IDS(info) \
241 INTEL_BDW_GT12M_IDS(info), \
242 INTEL_BDW_GT3M_IDS(info)
243
244#define INTEL_BDW_D_IDS(info) \
245 INTEL_BDW_GT12D_IDS(info), \
246 INTEL_BDW_GT3D_IDS(info)
247
248#define INTEL_CHV_IDS(info) \
249 INTEL_VGA_DEVICE(0x22b0, info), \
250 INTEL_VGA_DEVICE(0x22b1, info), \
251 INTEL_VGA_DEVICE(0x22b2, info), \
252 INTEL_VGA_DEVICE(0x22b3, info)
253
236#endif /* _I915_PCIIDS_H */ 254#endif /* _I915_PCIIDS_H */
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index ee127ec33c60..7526c5bf5610 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -485,13 +485,12 @@ extern int ttm_bo_init(struct ttm_bo_device *bdev,
485 void (*destroy) (struct ttm_buffer_object *)); 485 void (*destroy) (struct ttm_buffer_object *));
486 486
487/** 487/**
488 * ttm_bo_synccpu_object_init 488 * ttm_bo_create
489 * 489 *
490 * @bdev: Pointer to a ttm_bo_device struct. 490 * @bdev: Pointer to a ttm_bo_device struct.
491 * @bo: Pointer to a ttm_buffer_object to be initialized.
492 * @size: Requested size of buffer object. 491 * @size: Requested size of buffer object.
493 * @type: Requested type of buffer object. 492 * @type: Requested type of buffer object.
494 * @flags: Initial placement flags. 493 * @placement: Initial placement.
495 * @page_alignment: Data alignment in pages. 494 * @page_alignment: Data alignment in pages.
496 * @interruptible: If needing to sleep while waiting for GPU resources, 495 * @interruptible: If needing to sleep while waiting for GPU resources,
497 * sleep interruptible. 496 * sleep interruptible.
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index f104c2603ebe..def54f9e07ca 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -181,6 +181,7 @@ struct drm_mode_get_plane_res {
181#define DRM_MODE_ENCODER_TVDAC 4 181#define DRM_MODE_ENCODER_TVDAC 4
182#define DRM_MODE_ENCODER_VIRTUAL 5 182#define DRM_MODE_ENCODER_VIRTUAL 5
183#define DRM_MODE_ENCODER_DSI 6 183#define DRM_MODE_ENCODER_DSI 6
184#define DRM_MODE_ENCODER_DPMST 7
184 185
185struct drm_mode_get_encoder { 186struct drm_mode_get_encoder {
186 __u32 encoder_id; 187 __u32 encoder_id;
@@ -251,6 +252,21 @@ struct drm_mode_get_connector {
251#define DRM_MODE_PROP_BLOB (1<<4) 252#define DRM_MODE_PROP_BLOB (1<<4)
252#define DRM_MODE_PROP_BITMASK (1<<5) /* bitmask of enumerated types */ 253#define DRM_MODE_PROP_BITMASK (1<<5) /* bitmask of enumerated types */
253 254
255/* non-extended types: legacy bitmask, one bit per type: */
256#define DRM_MODE_PROP_LEGACY_TYPE ( \
257 DRM_MODE_PROP_RANGE | \
258 DRM_MODE_PROP_ENUM | \
259 DRM_MODE_PROP_BLOB | \
260 DRM_MODE_PROP_BITMASK)
261
262/* extended-types: rather than continue to consume a bit per type,
263 * grab a chunk of the bits to use as integer type id.
264 */
265#define DRM_MODE_PROP_EXTENDED_TYPE 0x0000ffc0
266#define DRM_MODE_PROP_TYPE(n) ((n) << 6)
267#define DRM_MODE_PROP_OBJECT DRM_MODE_PROP_TYPE(1)
268#define DRM_MODE_PROP_SIGNED_RANGE DRM_MODE_PROP_TYPE(2)
269
254struct drm_mode_property_enum { 270struct drm_mode_property_enum {
255 __u64 value; 271 __u64 value;
256 char name[DRM_PROP_NAME_LEN]; 272 char name[DRM_PROP_NAME_LEN];
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index 126bfaa8bb6b..ff57f07c3249 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -223,6 +223,7 @@ typedef struct _drm_i915_sarea {
223#define DRM_I915_GEM_GET_CACHING 0x30 223#define DRM_I915_GEM_GET_CACHING 0x30
224#define DRM_I915_REG_READ 0x31 224#define DRM_I915_REG_READ 0x31
225#define DRM_I915_GET_RESET_STATS 0x32 225#define DRM_I915_GET_RESET_STATS 0x32
226#define DRM_I915_GEM_USERPTR 0x33
226 227
227#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) 228#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
228#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH) 229#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
@@ -273,6 +274,7 @@ typedef struct _drm_i915_sarea {
273#define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy) 274#define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy)
274#define DRM_IOCTL_I915_REG_READ DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_REG_READ, struct drm_i915_reg_read) 275#define DRM_IOCTL_I915_REG_READ DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_REG_READ, struct drm_i915_reg_read)
275#define DRM_IOCTL_I915_GET_RESET_STATS DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GET_RESET_STATS, struct drm_i915_reset_stats) 276#define DRM_IOCTL_I915_GET_RESET_STATS DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GET_RESET_STATS, struct drm_i915_reset_stats)
277#define DRM_IOCTL_I915_GEM_USERPTR DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_USERPTR, struct drm_i915_gem_userptr)
276 278
277/* Allow drivers to submit batchbuffers directly to hardware, relying 279/* Allow drivers to submit batchbuffers directly to hardware, relying
278 * on the security mechanisms provided by hardware. 280 * on the security mechanisms provided by hardware.
@@ -337,6 +339,7 @@ typedef struct drm_i915_irq_wait {
337#define I915_PARAM_HAS_EXEC_NO_RELOC 25 339#define I915_PARAM_HAS_EXEC_NO_RELOC 25
338#define I915_PARAM_HAS_EXEC_HANDLE_LUT 26 340#define I915_PARAM_HAS_EXEC_HANDLE_LUT 26
339#define I915_PARAM_HAS_WT 27 341#define I915_PARAM_HAS_WT 27
342#define I915_PARAM_CMD_PARSER_VERSION 28
340 343
341typedef struct drm_i915_getparam { 344typedef struct drm_i915_getparam {
342 int param; 345 int param;
@@ -1049,4 +1052,18 @@ struct drm_i915_reset_stats {
1049 __u32 pad; 1052 __u32 pad;
1050}; 1053};
1051 1054
1055struct drm_i915_gem_userptr {
1056 __u64 user_ptr;
1057 __u64 user_size;
1058 __u32 flags;
1059#define I915_USERPTR_READ_ONLY 0x1
1060#define I915_USERPTR_UNSYNCHRONIZED 0x80000000
1061 /**
1062 * Returned handle for the object.
1063 *
1064 * Object handles are nonzero.
1065 */
1066 __u32 handle;
1067};
1068
1052#endif /* _UAPI_I915_DRM_H_ */ 1069#endif /* _UAPI_I915_DRM_H_ */
diff --git a/include/uapi/drm/radeon_drm.h b/include/uapi/drm/radeon_drm.h
index aefa2f6afa3b..1cc0b610f162 100644
--- a/include/uapi/drm/radeon_drm.h
+++ b/include/uapi/drm/radeon_drm.h
@@ -1007,7 +1007,7 @@ struct drm_radeon_cs {
1007#define RADEON_INFO_NUM_BYTES_MOVED 0x1d 1007#define RADEON_INFO_NUM_BYTES_MOVED 0x1d
1008#define RADEON_INFO_VRAM_USAGE 0x1e 1008#define RADEON_INFO_VRAM_USAGE 0x1e
1009#define RADEON_INFO_GTT_USAGE 0x1f 1009#define RADEON_INFO_GTT_USAGE 0x1f
1010 1010#define RADEON_INFO_ACTIVE_CU_COUNT 0x20
1011 1011
1012struct drm_radeon_info { 1012struct drm_radeon_info {
1013 uint32_t request; 1013 uint32_t request;
diff --git a/drivers/staging/imx-drm/ipu-v3/imx-ipu-v3.h b/include/video/imx-ipu-v3.h
index c2c6fab05eaa..3e43e22cdff9 100644
--- a/drivers/staging/imx-drm/ipu-v3/imx-ipu-v3.h
+++ b/include/video/imx-ipu-v3.h
@@ -104,6 +104,7 @@ int ipu_idmac_wait_busy(struct ipuv3_channel *channel, int ms);
104 104
105void ipu_idmac_set_double_buffer(struct ipuv3_channel *channel, 105void ipu_idmac_set_double_buffer(struct ipuv3_channel *channel,
106 bool doublebuffer); 106 bool doublebuffer);
107int ipu_idmac_get_current_buffer(struct ipuv3_channel *channel);
107void ipu_idmac_select_buffer(struct ipuv3_channel *channel, u32 buf_num); 108void ipu_idmac_select_buffer(struct ipuv3_channel *channel, u32 buf_num);
108 109
109/* 110/*
@@ -165,6 +166,20 @@ int ipu_dp_set_window_pos(struct ipu_dp *, u16 x_pos, u16 y_pos);
165int ipu_dp_set_global_alpha(struct ipu_dp *dp, bool enable, u8 alpha, 166int ipu_dp_set_global_alpha(struct ipu_dp *dp, bool enable, u8 alpha,
166 bool bg_chan); 167 bool bg_chan);
167 168
169/*
170 * IPU CMOS Sensor Interface (csi) functions
171 */
172int ipu_csi_enable(struct ipu_soc *ipu, int csi);
173int ipu_csi_disable(struct ipu_soc *ipu, int csi);
174
175/*
176 * IPU Sensor Multiple FIFO Controller (SMFC) functions
177 */
178int ipu_smfc_enable(struct ipu_soc *ipu);
179int ipu_smfc_disable(struct ipu_soc *ipu);
180int ipu_smfc_map_channel(struct ipu_soc *ipu, int channel, int csi_id, int mipi_id);
181int ipu_smfc_set_burstsize(struct ipu_soc *ipu, int channel, int burstsize);
182
168#define IPU_CPMEM_WORD(word, ofs, size) ((((word) * 160 + (ofs)) << 8) | (size)) 183#define IPU_CPMEM_WORD(word, ofs, size) ((((word) * 160 + (ofs)) << 8) | (size))
169 184
170#define IPU_FIELD_UBO IPU_CPMEM_WORD(0, 46, 22) 185#define IPU_FIELD_UBO IPU_CPMEM_WORD(0, 46, 22)
@@ -321,6 +336,7 @@ static inline void ipu_cpmem_set_burstsize(struct ipu_ch_param __iomem *p,
321}; 336};
322 337
323struct ipu_client_platformdata { 338struct ipu_client_platformdata {
339 int csi;
324 int di; 340 int di;
325 int dc; 341 int dc;
326 int dp; 342 int dp;
diff --git a/lib/Kconfig b/lib/Kconfig
index 4771fb3f4da4..334f7722a999 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -331,6 +331,20 @@ config TEXTSEARCH_FSM
331config BTREE 331config BTREE
332 boolean 332 boolean
333 333
334config INTERVAL_TREE
335 boolean
336 help
337 Simple, embeddable, interval-tree. Can find the start of an
338 overlapping range in log(n) time and then iterate over all
339 overlapping nodes. The algorithm is implemented as an
340 augmented rbtree.
341
342 See:
343
344 Documentation/rbtree.txt
345
346 for more information.
347
334config ASSOCIATIVE_ARRAY 348config ASSOCIATIVE_ARRAY
335 bool 349 bool
336 help 350 help
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index ccca32264748..e34d11d70bbf 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1511,6 +1511,7 @@ config RBTREE_TEST
1511config INTERVAL_TREE_TEST 1511config INTERVAL_TREE_TEST
1512 tristate "Interval tree test" 1512 tristate "Interval tree test"
1513 depends on m && DEBUG_KERNEL 1513 depends on m && DEBUG_KERNEL
1514 select INTERVAL_TREE
1514 help 1515 help
1515 A benchmark measuring the performance of the interval tree library 1516 A benchmark measuring the performance of the interval tree library
1516 1517
diff --git a/lib/Makefile b/lib/Makefile
index 74a32dc49a93..4a4078987a4c 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -50,6 +50,7 @@ CFLAGS_hweight.o = $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS))
50obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o 50obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
51 51
52obj-$(CONFIG_BTREE) += btree.o 52obj-$(CONFIG_BTREE) += btree.o
53obj-$(CONFIG_INTERVAL_TREE) += interval_tree.o
53obj-$(CONFIG_ASSOCIATIVE_ARRAY) += assoc_array.o 54obj-$(CONFIG_ASSOCIATIVE_ARRAY) += assoc_array.o
54obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o 55obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
55obj-$(CONFIG_DEBUG_LIST) += list_debug.o 56obj-$(CONFIG_DEBUG_LIST) += list_debug.o
@@ -157,8 +158,6 @@ lib-$(CONFIG_LIBFDT) += $(libfdt_files)
157obj-$(CONFIG_RBTREE_TEST) += rbtree_test.o 158obj-$(CONFIG_RBTREE_TEST) += rbtree_test.o
158obj-$(CONFIG_INTERVAL_TREE_TEST) += interval_tree_test.o 159obj-$(CONFIG_INTERVAL_TREE_TEST) += interval_tree_test.o
159 160
160interval_tree_test-objs := interval_tree_test_main.o interval_tree.o
161
162obj-$(CONFIG_PERCPU_TEST) += percpu_test.o 161obj-$(CONFIG_PERCPU_TEST) += percpu_test.o
163 162
164obj-$(CONFIG_ASN1) += asn1_decoder.o 163obj-$(CONFIG_ASN1) += asn1_decoder.o
diff --git a/lib/interval_tree.c b/lib/interval_tree.c
index e6eb406f2d65..f367f9ad544c 100644
--- a/lib/interval_tree.c
+++ b/lib/interval_tree.c
@@ -1,6 +1,7 @@
1#include <linux/init.h> 1#include <linux/init.h>
2#include <linux/interval_tree.h> 2#include <linux/interval_tree.h>
3#include <linux/interval_tree_generic.h> 3#include <linux/interval_tree_generic.h>
4#include <linux/module.h>
4 5
5#define START(node) ((node)->start) 6#define START(node) ((node)->start)
6#define LAST(node) ((node)->last) 7#define LAST(node) ((node)->last)
@@ -8,3 +9,8 @@
8INTERVAL_TREE_DEFINE(struct interval_tree_node, rb, 9INTERVAL_TREE_DEFINE(struct interval_tree_node, rb,
9 unsigned long, __subtree_last, 10 unsigned long, __subtree_last,
10 START, LAST,, interval_tree) 11 START, LAST,, interval_tree)
12
13EXPORT_SYMBOL_GPL(interval_tree_insert);
14EXPORT_SYMBOL_GPL(interval_tree_remove);
15EXPORT_SYMBOL_GPL(interval_tree_iter_first);
16EXPORT_SYMBOL_GPL(interval_tree_iter_next);
diff --git a/lib/interval_tree_test_main.c b/lib/interval_tree_test.c
index 245900b98c8e..245900b98c8e 100644
--- a/lib/interval_tree_test_main.c
+++ b/lib/interval_tree_test.c