aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-05-02 22:40:34 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-05-02 22:40:34 -0400
commit20a2078ce7705a6e0722ef5184336eb8657a58d8 (patch)
tree5b927c96516380aa0ecd68d8a609f7cd72120ad5 /drivers/gpu
parent0279b3c0ada1d78882f24acf94ac4595bd657a89 (diff)
parent307b9c022720f9de90d58e51743e01e9a42aec59 (diff)
Merge branch 'drm-next' of git://people.freedesktop.org/~airlied/linux
Pull drm updates from Dave Airlie: "This is the main drm pull request for 3.10. Wierd bits: - OMAP drm changes required OMAP dss changes, in drivers/video, so I took them in here. - one more fbcon fix for font handover - VT switch avoidance in pm code - scatterlist helpers for gpu drivers - have acks from akpm Highlights: - qxl kms driver - driver for the spice qxl virtual GPU Nouveau: - fermi/kepler VRAM compression - GK110/nvf0 modesetting support. Tegra: - host1x core merged with 2D engine support i915: - vt switchless resume - more valleyview support - vblank fixes - modesetting pipe config rework radeon: - UVD engine support - SI chip tiling support - GPU registers initialisation from golden values. exynos: - device tree changes - fimc block support Otherwise: - bunches of fixes all over the place." * 'drm-next' of git://people.freedesktop.org/~airlied/linux: (513 commits) qxl: update to new idr interfaces. drm/nouveau: fix build with nv50->nvc0 drm/radeon: fix handling of v6 power tables drm/radeon: clarify family checks in pm table parsing drm/radeon: consolidate UVD clock programming drm/radeon: fix UPLL_REF_DIV_MASK definition radeon: add bo tracking debugfs drm/radeon: add new richland pci ids drm/radeon: add some new SI PCI ids drm/radeon: fix scratch reg handling for UVD fence drm/radeon: allocate SA bo in the requested domain drm/radeon: fix possible segfault when parsing pm tables drm/radeon: fix endian bugs in atom_allocate_fb_scratch() OMAPDSS: TFP410: return EPROBE_DEFER if the i2c adapter not found OMAPDSS: VENC: Add error handling for venc_probe_pdata OMAPDSS: HDMI: Add error handling for hdmi_probe_pdata OMAPDSS: RFBI: Add error handling for rfbi_probe_pdata OMAPDSS: DSI: Add error handling for dsi_probe_pdata OMAPDSS: SDI: Add error handling for sdi_probe_pdata OMAPDSS: DPI: Add error handling for dpi_probe_pdata ...
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/Makefile1
-rw-r--r--drivers/gpu/drm/Kconfig4
-rw-r--r--drivers/gpu/drm/Makefile2
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h2
-rw-r--r--drivers/gpu/drm/ast/ast_fb.c43
-rw-r--r--drivers/gpu/drm/ast/ast_ttm.c2
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.h2
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_fbdev.c38
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_ttm.c2
-rw-r--r--drivers/gpu/drm/drm_cache.c7
-rw-r--r--drivers/gpu/drm/drm_crtc.c411
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c3
-rw-r--r--drivers/gpu/drm/drm_drv.c9
-rw-r--r--drivers/gpu/drm/drm_edid.c279
-rw-r--r--drivers/gpu/drm/drm_edid_load.c21
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c23
-rw-r--r--drivers/gpu/drm/drm_gem.c4
-rw-r--r--drivers/gpu/drm/drm_modes.c22
-rw-r--r--drivers/gpu/drm/drm_pci.c10
-rw-r--r--drivers/gpu/drm/drm_prime.c99
-rw-r--r--drivers/gpu/drm/drm_vm.c1
-rw-r--r--drivers/gpu/drm/exynos/Kconfig6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_connector.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dmabuf.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c9
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h12
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c273
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c39
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_hdmi.c54
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_hdmi.h3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.c27
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c712
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c22
-rw-r--r--drivers/gpu/drm/exynos/regs-fimc.h7
-rw-r--r--drivers/gpu/drm/gma500/Kconfig13
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_crt.c1
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_hdmi.c1
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c6
-rw-r--r--drivers/gpu/drm/gma500/gtt.c52
-rw-r--r--drivers/gpu/drm/gma500/gtt.h2
-rw-r--r--drivers/gpu/drm/gma500/intel_bios.c3
-rw-r--r--drivers/gpu/drm/gma500/intel_bios.h6
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_output.c7
-rw-r--r--drivers/gpu/drm/gma500/power.c17
-rw-r--r--drivers/gpu/drm/gma500/power.h3
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c3
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h1
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_display.c154
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_display.h3
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_drv.h8
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_reg.h1
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_sdvo.c33
-rw-r--r--drivers/gpu/drm/gma500/psb_irq.c2
-rw-r--r--drivers/gpu/drm/gma500/psb_irq.h6
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c433
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c90
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c206
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h94
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c135
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c7
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c18
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c36
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c262
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c65
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c33
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c659
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h293
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c15
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c27
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c9
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h4
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c22
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c70
-rw-r--r--drivers/gpu/drm/i915/intel_display.c1535
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c526
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h155
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c13
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c8
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c166
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c4
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c49
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c49
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c245
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c131
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c243
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c16
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.h8
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_fb.c55
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_main.c31
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c7
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_ttm.c4
-rw-r--r--drivers/gpu/drm/nouveau/Makefile26
-rw-r--r--drivers/gpu/drm/nouveau/core/core/client.c7
-rw-r--r--drivers/gpu/drm/nouveau/core/core/engine.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/core/event.c12
-rw-r--r--drivers/gpu/drm/nouveau/core/core/object.c19
-rw-r--r--drivers/gpu/drm/nouveau/core/core/parent.c8
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/base.c (renamed from drivers/gpu/drm/nouveau/core/subdev/device/base.c)186
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv04.c (renamed from drivers/gpu/drm/nouveau/core/subdev/device/nv04.c)2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv10.c (renamed from drivers/gpu/drm/nouveau/core/subdev/device/nv10.c)2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv20.c (renamed from drivers/gpu/drm/nouveau/core/subdev/device/nv20.c)2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv30.c (renamed from drivers/gpu/drm/nouveau/core/subdev/device/nv30.c)2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv40.c (renamed from drivers/gpu/drm/nouveau/core/subdev/device/nv40.c)2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv50.c (renamed from drivers/gpu/drm/nouveau/core/subdev/device/nv50.c)20
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nvc0.c (renamed from drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c)30
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nve0.c (renamed from drivers/gpu/drm/nouveau/core/subdev/device/nve0.c)36
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/dport.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv50.c5
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c89
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/base.c7
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c18
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c22
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c9
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c7
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c51
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnve0.c13
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvc0.fuc5
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc3
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv20.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv25.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv2a.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv30.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv34.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv35.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv40.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv50.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c31
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h3
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nve0.c230
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nvc0.c29
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/class.h12
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/device.h6
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/parent.h4
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/device.h (renamed from drivers/gpu/drm/nouveau/core/include/subdev/device.h)1
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/disp.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/fifo.h3
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/graph.h4
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/ltcg.h7
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/mc.h30
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/therm.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/os.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bar/nv50.c22
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c14
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/init.c56
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c72
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/base.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c20
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c12
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c129
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/base.c60
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/nv40.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/nv50.c175
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/nv84.c221
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/nva3.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/priv.h3
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/temp.c14
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c13
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/vm/nv04.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/vm/nv41.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/vm/nv44.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/vm/nvc0.c58
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/Makefile10
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/arb.c (renamed from drivers/gpu/drm/nouveau/nouveau_calc.c)2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/crtc.c (renamed from drivers/gpu/drm/nouveau/nv04_crtc.c)5
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/cursor.c (renamed from drivers/gpu/drm/nouveau/nv04_cursor.c)3
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/dac.c (renamed from drivers/gpu/drm/nouveau/nv04_dac.c)2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/dfp.c (renamed from drivers/gpu/drm/nouveau/nv04_dfp.c)2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.c (renamed from drivers/gpu/drm/nouveau/nv04_display.c)2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.h (renamed from drivers/gpu/drm/nouveau/nv04_display.h)0
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/hw.c (renamed from drivers/gpu/drm/nouveau/nouveau_hw.c)2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/hw.h (renamed from drivers/gpu/drm/nouveau/nouveau_hw.h)3
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/nvreg.h (renamed from drivers/gpu/drm/nouveau/nvreg.h)0
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvmodesnv17.c (renamed from drivers/gpu/drm/nouveau/nv17_tv_modes.c)4
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv04.c (renamed from drivers/gpu/drm/nouveau/nv04_tv.c)2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv17.c (renamed from drivers/gpu/drm/nouveau/nv17_tv.c)4
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv17.h (renamed from drivers/gpu/drm/nouveau/nv17_tv.h)0
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_backlight.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c26
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.h13
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_encoder.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_irq.c76
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_irq.h11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c25
-rw-r--r--drivers/gpu/drm/nouveau/nv04_pm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv40_pm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c1
-rw-r--r--drivers/gpu/drm/nouveau/nv50_pm.c2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_connector.c27
-rw-r--r--drivers/gpu/drm/omapdrm/omap_crtc.c21
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c165
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.h38
-rw-r--r--drivers/gpu/drm/omapdrm/omap_encoder.c24
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c3
-rw-r--r--drivers/gpu/drm/omapdrm/omap_irq.c17
-rw-r--r--drivers/gpu/drm/omapdrm/omap_plane.c6
-rw-r--r--drivers/gpu/drm/qxl/Kconfig10
-rw-r--r--drivers/gpu/drm/qxl/Makefile9
-rw-r--r--drivers/gpu/drm/qxl/qxl_cmd.c685
-rw-r--r--drivers/gpu/drm/qxl/qxl_debugfs.c141
-rw-r--r--drivers/gpu/drm/qxl/qxl_dev.h879
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c982
-rw-r--r--drivers/gpu/drm/qxl/qxl_draw.c390
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.c145
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h566
-rw-r--r--drivers/gpu/drm/qxl/qxl_dumb.c93
-rw-r--r--drivers/gpu/drm/qxl/qxl_fb.c567
-rw-r--r--drivers/gpu/drm/qxl/qxl_fence.c97
-rw-r--r--drivers/gpu/drm/qxl/qxl_gem.c149
-rw-r--r--drivers/gpu/drm/qxl/qxl_image.c176
-rw-r--r--drivers/gpu/drm/qxl/qxl_ioctl.c411
-rw-r--r--drivers/gpu/drm/qxl/qxl_irq.c97
-rw-r--r--drivers/gpu/drm/qxl/qxl_kms.c302
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.c365
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.h112
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c304
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c581
-rw-r--r--drivers/gpu/drm/radeon/Makefile2
-rw-r--r--drivers/gpu/drm/radeon/atom.c6
-rw-r--r--drivers/gpu/drm/radeon/atombios.h2
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c3
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c17
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c1187
-rw-r--r--drivers/gpu/drm/radeon/evergreen_hdmi.c169
-rw-r--r--drivers/gpu/drm/radeon/evergreen_reg.h2
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h48
-rw-r--r--drivers/gpu/drm/radeon/ni.c414
-rw-r--r--drivers/gpu/drm/radeon/nid.h21
-rw-r--r--drivers/gpu/drm/radeon/r100.c77
-rw-r--r--drivers/gpu/drm/radeon/r500_reg.h2
-rw-r--r--drivers/gpu/drm/radeon/r600.c404
-rw-r--r--drivers/gpu/drm/radeon/r600_audio.c64
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c150
-rw-r--r--drivers/gpu/drm/radeon/r600d.h72
-rw-r--r--drivers/gpu/drm/radeon/radeon.h94
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c100
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h28
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c132
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c83
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c56
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c20
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c50
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c187
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h23
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c27
-rw-r--r--drivers/gpu/drm/radeon/radeon_sa.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c72
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c831
-rw-r--r--drivers/gpu/drm/radeon/rs600.c52
-rw-r--r--drivers/gpu/drm/radeon/rs690.c23
-rw-r--r--drivers/gpu/drm/radeon/rs690d.h3
-rw-r--r--drivers/gpu/drm/radeon/rv515.c56
-rw-r--r--drivers/gpu/drm/radeon/rv770.c909
-rw-r--r--drivers/gpu/drm/radeon/rv770d.h43
-rw-r--r--drivers/gpu/drm/radeon/si.c979
-rw-r--r--drivers/gpu/drm/radeon/sid.h40
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_crtc.c2
-rw-r--r--drivers/gpu/drm/tegra/Makefile7
-rw-r--r--drivers/gpu/drm/tegra/drm.c217
-rw-r--r--drivers/gpu/drm/tegra/fb.c52
-rw-r--r--drivers/gpu/drm/tegra/host1x.c327
-rw-r--r--drivers/gpu/drm/tilcdc/Makefile5
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.c9
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_panel.c3
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_slave.c1
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_tfp410.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c13
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c4
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c2
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c4
-rw-r--r--drivers/gpu/host1x/Kconfig24
-rw-r--r--drivers/gpu/host1x/Makefile20
-rw-r--r--drivers/gpu/host1x/cdma.c491
-rw-r--r--drivers/gpu/host1x/cdma.h100
-rw-r--r--drivers/gpu/host1x/channel.c126
-rw-r--r--drivers/gpu/host1x/channel.h52
-rw-r--r--drivers/gpu/host1x/debug.c210
-rw-r--r--drivers/gpu/host1x/debug.h51
-rw-r--r--drivers/gpu/host1x/dev.c246
-rw-r--r--drivers/gpu/host1x/dev.h308
-rw-r--r--drivers/gpu/host1x/drm/Kconfig (renamed from drivers/gpu/drm/tegra/Kconfig)20
-rw-r--r--drivers/gpu/host1x/drm/dc.c (renamed from drivers/gpu/drm/tegra/dc.c)31
-rw-r--r--drivers/gpu/host1x/drm/dc.h (renamed from drivers/gpu/drm/tegra/dc.h)0
-rw-r--r--drivers/gpu/host1x/drm/drm.c640
-rw-r--r--drivers/gpu/host1x/drm/drm.h (renamed from drivers/gpu/drm/tegra/drm.h)68
-rw-r--r--drivers/gpu/host1x/drm/fb.c374
-rw-r--r--drivers/gpu/host1x/drm/gem.c270
-rw-r--r--drivers/gpu/host1x/drm/gem.h59
-rw-r--r--drivers/gpu/host1x/drm/gr2d.c339
-rw-r--r--drivers/gpu/host1x/drm/hdmi.c (renamed from drivers/gpu/drm/tegra/hdmi.c)5
-rw-r--r--drivers/gpu/host1x/drm/hdmi.h (renamed from drivers/gpu/drm/tegra/hdmi.h)0
-rw-r--r--drivers/gpu/host1x/drm/output.c (renamed from drivers/gpu/drm/tegra/output.c)0
-rw-r--r--drivers/gpu/host1x/drm/rgb.c (renamed from drivers/gpu/drm/tegra/rgb.c)0
-rw-r--r--drivers/gpu/host1x/host1x.h30
-rw-r--r--drivers/gpu/host1x/host1x_bo.h87
-rw-r--r--drivers/gpu/host1x/host1x_client.h35
-rw-r--r--drivers/gpu/host1x/hw/Makefile6
-rw-r--r--drivers/gpu/host1x/hw/cdma_hw.c326
-rw-r--r--drivers/gpu/host1x/hw/channel_hw.c168
-rw-r--r--drivers/gpu/host1x/hw/debug_hw.c322
-rw-r--r--drivers/gpu/host1x/hw/host1x01.c42
-rw-r--r--drivers/gpu/host1x/hw/host1x01.h25
-rw-r--r--drivers/gpu/host1x/hw/host1x01_hardware.h143
-rw-r--r--drivers/gpu/host1x/hw/hw_host1x01_channel.h120
-rw-r--r--drivers/gpu/host1x/hw/hw_host1x01_sync.h243
-rw-r--r--drivers/gpu/host1x/hw/hw_host1x01_uclass.h174
-rw-r--r--drivers/gpu/host1x/hw/intr_hw.c143
-rw-r--r--drivers/gpu/host1x/hw/syncpt_hw.c114
-rw-r--r--drivers/gpu/host1x/intr.c354
-rw-r--r--drivers/gpu/host1x/intr.h102
-rw-r--r--drivers/gpu/host1x/job.c603
-rw-r--r--drivers/gpu/host1x/job.h162
-rw-r--r--drivers/gpu/host1x/syncpt.c387
-rw-r--r--drivers/gpu/host1x/syncpt.h165
333 files changed, 26522 insertions, 5467 deletions
diff --git a/drivers/gpu/Makefile b/drivers/gpu/Makefile
index 30879df3daea..d8a22c2a579d 100644
--- a/drivers/gpu/Makefile
+++ b/drivers/gpu/Makefile
@@ -1 +1,2 @@
1obj-y += drm/ vga/ 1obj-y += drm/ vga/
2obj-$(CONFIG_TEGRA_HOST1X) += host1x/
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 1e82882da9de..b16c50ee769c 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -215,8 +215,8 @@ source "drivers/gpu/drm/cirrus/Kconfig"
215 215
216source "drivers/gpu/drm/shmobile/Kconfig" 216source "drivers/gpu/drm/shmobile/Kconfig"
217 217
218source "drivers/gpu/drm/tegra/Kconfig"
219
220source "drivers/gpu/drm/omapdrm/Kconfig" 218source "drivers/gpu/drm/omapdrm/Kconfig"
221 219
222source "drivers/gpu/drm/tilcdc/Kconfig" 220source "drivers/gpu/drm/tilcdc/Kconfig"
221
222source "drivers/gpu/drm/qxl/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 0d59b24f8d23..1c9f24396002 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -49,7 +49,7 @@ obj-$(CONFIG_DRM_GMA500) += gma500/
49obj-$(CONFIG_DRM_UDL) += udl/ 49obj-$(CONFIG_DRM_UDL) += udl/
50obj-$(CONFIG_DRM_AST) += ast/ 50obj-$(CONFIG_DRM_AST) += ast/
51obj-$(CONFIG_DRM_SHMOBILE) +=shmobile/ 51obj-$(CONFIG_DRM_SHMOBILE) +=shmobile/
52obj-$(CONFIG_DRM_TEGRA) += tegra/
53obj-$(CONFIG_DRM_OMAP) += omapdrm/ 52obj-$(CONFIG_DRM_OMAP) += omapdrm/
54obj-$(CONFIG_DRM_TILCDC) += tilcdc/ 53obj-$(CONFIG_DRM_TILCDC) += tilcdc/
54obj-$(CONFIG_DRM_QXL) += qxl/
55obj-y += i2c/ 55obj-y += i2c/
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index 528429252f0f..02e52d543e4b 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -241,6 +241,8 @@ struct ast_fbdev {
241 void *sysram; 241 void *sysram;
242 int size; 242 int size;
243 struct ttm_bo_kmap_obj mapping; 243 struct ttm_bo_kmap_obj mapping;
244 int x1, y1, x2, y2; /* dirty rect */
245 spinlock_t dirty_lock;
244}; 246};
245 247
246#define to_ast_crtc(x) container_of(x, struct ast_crtc, base) 248#define to_ast_crtc(x) container_of(x, struct ast_crtc, base)
diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c
index 34931fe7d2c5..fbc0823cfa18 100644
--- a/drivers/gpu/drm/ast/ast_fb.c
+++ b/drivers/gpu/drm/ast/ast_fb.c
@@ -53,16 +53,52 @@ static void ast_dirty_update(struct ast_fbdev *afbdev,
53 int bpp = (afbdev->afb.base.bits_per_pixel + 7)/8; 53 int bpp = (afbdev->afb.base.bits_per_pixel + 7)/8;
54 int ret; 54 int ret;
55 bool unmap = false; 55 bool unmap = false;
56 bool store_for_later = false;
57 int x2, y2;
58 unsigned long flags;
56 59
57 obj = afbdev->afb.obj; 60 obj = afbdev->afb.obj;
58 bo = gem_to_ast_bo(obj); 61 bo = gem_to_ast_bo(obj);
59 62
63 /*
64 * try and reserve the BO, if we fail with busy
65 * then the BO is being moved and we should
66 * store up the damage until later.
67 */
60 ret = ast_bo_reserve(bo, true); 68 ret = ast_bo_reserve(bo, true);
61 if (ret) { 69 if (ret) {
62 DRM_ERROR("failed to reserve fb bo\n"); 70 if (ret != -EBUSY)
71 return;
72
73 store_for_later = true;
74 }
75
76 x2 = x + width - 1;
77 y2 = y + height - 1;
78 spin_lock_irqsave(&afbdev->dirty_lock, flags);
79
80 if (afbdev->y1 < y)
81 y = afbdev->y1;
82 if (afbdev->y2 > y2)
83 y2 = afbdev->y2;
84 if (afbdev->x1 < x)
85 x = afbdev->x1;
86 if (afbdev->x2 > x2)
87 x2 = afbdev->x2;
88
89 if (store_for_later) {
90 afbdev->x1 = x;
91 afbdev->x2 = x2;
92 afbdev->y1 = y;
93 afbdev->y2 = y2;
94 spin_unlock_irqrestore(&afbdev->dirty_lock, flags);
63 return; 95 return;
64 } 96 }
65 97
98 afbdev->x1 = afbdev->y1 = INT_MAX;
99 afbdev->x2 = afbdev->y2 = 0;
100 spin_unlock_irqrestore(&afbdev->dirty_lock, flags);
101
66 if (!bo->kmap.virtual) { 102 if (!bo->kmap.virtual) {
67 ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap); 103 ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
68 if (ret) { 104 if (ret) {
@@ -72,10 +108,10 @@ static void ast_dirty_update(struct ast_fbdev *afbdev,
72 } 108 }
73 unmap = true; 109 unmap = true;
74 } 110 }
75 for (i = y; i < y + height; i++) { 111 for (i = y; i <= y2; i++) {
76 /* assume equal stride for now */ 112 /* assume equal stride for now */
77 src_offset = dst_offset = i * afbdev->afb.base.pitches[0] + (x * bpp); 113 src_offset = dst_offset = i * afbdev->afb.base.pitches[0] + (x * bpp);
78 memcpy_toio(bo->kmap.virtual + src_offset, afbdev->sysram + src_offset, width * bpp); 114 memcpy_toio(bo->kmap.virtual + src_offset, afbdev->sysram + src_offset, (x2 - x + 1) * bpp);
79 115
80 } 116 }
81 if (unmap) 117 if (unmap)
@@ -292,6 +328,7 @@ int ast_fbdev_init(struct drm_device *dev)
292 328
293 ast->fbdev = afbdev; 329 ast->fbdev = afbdev;
294 afbdev->helper.funcs = &ast_fb_helper_funcs; 330 afbdev->helper.funcs = &ast_fb_helper_funcs;
331 spin_lock_init(&afbdev->dirty_lock);
295 ret = drm_fb_helper_init(dev, &afbdev->helper, 332 ret = drm_fb_helper_init(dev, &afbdev->helper,
296 1, 1); 333 1, 1);
297 if (ret) { 334 if (ret) {
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c
index 3602731a6112..09da3393c527 100644
--- a/drivers/gpu/drm/ast/ast_ttm.c
+++ b/drivers/gpu/drm/ast/ast_ttm.c
@@ -316,7 +316,7 @@ int ast_bo_reserve(struct ast_bo *bo, bool no_wait)
316 316
317 ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0); 317 ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0);
318 if (ret) { 318 if (ret) {
319 if (ret != -ERESTARTSYS) 319 if (ret != -ERESTARTSYS && ret != -EBUSY)
320 DRM_ERROR("reserve failed %p\n", bo); 320 DRM_ERROR("reserve failed %p\n", bo);
321 return ret; 321 return ret;
322 } 322 }
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h
index 6e0cc724e5a2..7ca059596887 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.h
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.h
@@ -154,6 +154,8 @@ struct cirrus_fbdev {
154 struct list_head fbdev_list; 154 struct list_head fbdev_list;
155 void *sysram; 155 void *sysram;
156 int size; 156 int size;
157 int x1, y1, x2, y2; /* dirty rect */
158 spinlock_t dirty_lock;
157}; 159};
158 160
159struct cirrus_bo { 161struct cirrus_bo {
diff --git a/drivers/gpu/drm/cirrus/cirrus_fbdev.c b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
index e25afccaf85b..3541b567bbd8 100644
--- a/drivers/gpu/drm/cirrus/cirrus_fbdev.c
+++ b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
@@ -27,16 +27,51 @@ static void cirrus_dirty_update(struct cirrus_fbdev *afbdev,
27 int bpp = (afbdev->gfb.base.bits_per_pixel + 7)/8; 27 int bpp = (afbdev->gfb.base.bits_per_pixel + 7)/8;
28 int ret; 28 int ret;
29 bool unmap = false; 29 bool unmap = false;
30 bool store_for_later = false;
31 int x2, y2;
32 unsigned long flags;
30 33
31 obj = afbdev->gfb.obj; 34 obj = afbdev->gfb.obj;
32 bo = gem_to_cirrus_bo(obj); 35 bo = gem_to_cirrus_bo(obj);
33 36
37 /*
38 * try and reserve the BO, if we fail with busy
39 * then the BO is being moved and we should
40 * store up the damage until later.
41 */
34 ret = cirrus_bo_reserve(bo, true); 42 ret = cirrus_bo_reserve(bo, true);
35 if (ret) { 43 if (ret) {
36 DRM_ERROR("failed to reserve fb bo\n"); 44 if (ret != -EBUSY)
45 return;
46 store_for_later = true;
47 }
48
49 x2 = x + width - 1;
50 y2 = y + height - 1;
51 spin_lock_irqsave(&afbdev->dirty_lock, flags);
52
53 if (afbdev->y1 < y)
54 y = afbdev->y1;
55 if (afbdev->y2 > y2)
56 y2 = afbdev->y2;
57 if (afbdev->x1 < x)
58 x = afbdev->x1;
59 if (afbdev->x2 > x2)
60 x2 = afbdev->x2;
61
62 if (store_for_later) {
63 afbdev->x1 = x;
64 afbdev->x2 = x2;
65 afbdev->y1 = y;
66 afbdev->y2 = y2;
67 spin_unlock_irqrestore(&afbdev->dirty_lock, flags);
37 return; 68 return;
38 } 69 }
39 70
71 afbdev->x1 = afbdev->y1 = INT_MAX;
72 afbdev->x2 = afbdev->y2 = 0;
73 spin_unlock_irqrestore(&afbdev->dirty_lock, flags);
74
40 if (!bo->kmap.virtual) { 75 if (!bo->kmap.virtual) {
41 ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap); 76 ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
42 if (ret) { 77 if (ret) {
@@ -268,6 +303,7 @@ int cirrus_fbdev_init(struct cirrus_device *cdev)
268 303
269 cdev->mode_info.gfbdev = gfbdev; 304 cdev->mode_info.gfbdev = gfbdev;
270 gfbdev->helper.funcs = &cirrus_fb_helper_funcs; 305 gfbdev->helper.funcs = &cirrus_fb_helper_funcs;
306 spin_lock_init(&gfbdev->dirty_lock);
271 307
272 ret = drm_fb_helper_init(cdev->dev, &gfbdev->helper, 308 ret = drm_fb_helper_init(cdev->dev, &gfbdev->helper,
273 cdev->num_crtc, CIRRUSFB_CONN_LIMIT); 309 cdev->num_crtc, CIRRUSFB_CONN_LIMIT);
diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c
index 1413a26e4905..2ed8cfc740c9 100644
--- a/drivers/gpu/drm/cirrus/cirrus_ttm.c
+++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c
@@ -321,7 +321,7 @@ int cirrus_bo_reserve(struct cirrus_bo *bo, bool no_wait)
321 321
322 ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0); 322 ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0);
323 if (ret) { 323 if (ret) {
324 if (ret != -ERESTARTSYS) 324 if (ret != -ERESTARTSYS && ret != -EBUSY)
325 DRM_ERROR("reserve failed %p\n", bo); 325 DRM_ERROR("reserve failed %p\n", bo);
326 return ret; 326 return ret;
327 } 327 }
diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c
index a575cb2e6bdb..bb8f58012189 100644
--- a/drivers/gpu/drm/drm_cache.c
+++ b/drivers/gpu/drm/drm_cache.c
@@ -105,12 +105,11 @@ drm_clflush_sg(struct sg_table *st)
105{ 105{
106#if defined(CONFIG_X86) 106#if defined(CONFIG_X86)
107 if (cpu_has_clflush) { 107 if (cpu_has_clflush) {
108 struct scatterlist *sg; 108 struct sg_page_iter sg_iter;
109 int i;
110 109
111 mb(); 110 mb();
112 for_each_sg(st->sgl, sg, st->nents, i) 111 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
113 drm_clflush_page(sg_page(sg)); 112 drm_clflush_page(sg_page_iter_page(&sg_iter));
114 mb(); 113 mb();
115 114
116 return; 115 return;
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index dd64a06dc5b4..3a8f7e6db295 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -178,9 +178,6 @@ static struct drm_prop_enum_list drm_dirty_info_enum_list[] = {
178 { DRM_MODE_DIRTY_ANNOTATE, "Annotate" }, 178 { DRM_MODE_DIRTY_ANNOTATE, "Annotate" },
179}; 179};
180 180
181DRM_ENUM_NAME_FN(drm_get_dirty_info_name,
182 drm_dirty_info_enum_list)
183
184struct drm_conn_prop_enum_list { 181struct drm_conn_prop_enum_list {
185 int type; 182 int type;
186 char *name; 183 char *name;
@@ -412,7 +409,7 @@ struct drm_framebuffer *drm_framebuffer_lookup(struct drm_device *dev,
412 mutex_lock(&dev->mode_config.fb_lock); 409 mutex_lock(&dev->mode_config.fb_lock);
413 fb = __drm_framebuffer_lookup(dev, id); 410 fb = __drm_framebuffer_lookup(dev, id);
414 if (fb) 411 if (fb)
415 kref_get(&fb->refcount); 412 drm_framebuffer_reference(fb);
416 mutex_unlock(&dev->mode_config.fb_lock); 413 mutex_unlock(&dev->mode_config.fb_lock);
417 414
418 return fb; 415 return fb;
@@ -706,7 +703,6 @@ int drm_connector_init(struct drm_device *dev,
706 connector->connector_type = connector_type; 703 connector->connector_type = connector_type;
707 connector->connector_type_id = 704 connector->connector_type_id =
708 ++drm_connector_enum_list[connector_type].count; /* TODO */ 705 ++drm_connector_enum_list[connector_type].count; /* TODO */
709 INIT_LIST_HEAD(&connector->user_modes);
710 INIT_LIST_HEAD(&connector->probed_modes); 706 INIT_LIST_HEAD(&connector->probed_modes);
711 INIT_LIST_HEAD(&connector->modes); 707 INIT_LIST_HEAD(&connector->modes);
712 connector->edid_blob_ptr = NULL; 708 connector->edid_blob_ptr = NULL;
@@ -747,9 +743,6 @@ void drm_connector_cleanup(struct drm_connector *connector)
747 list_for_each_entry_safe(mode, t, &connector->modes, head) 743 list_for_each_entry_safe(mode, t, &connector->modes, head)
748 drm_mode_remove(connector, mode); 744 drm_mode_remove(connector, mode);
749 745
750 list_for_each_entry_safe(mode, t, &connector->user_modes, head)
751 drm_mode_remove(connector, mode);
752
753 drm_mode_object_put(dev, &connector->base); 746 drm_mode_object_put(dev, &connector->base);
754 list_del(&connector->head); 747 list_del(&connector->head);
755 dev->mode_config.num_connector--; 748 dev->mode_config.num_connector--;
@@ -1120,45 +1113,7 @@ int drm_mode_create_dirty_info_property(struct drm_device *dev)
1120} 1113}
1121EXPORT_SYMBOL(drm_mode_create_dirty_info_property); 1114EXPORT_SYMBOL(drm_mode_create_dirty_info_property);
1122 1115
1123/** 1116static int drm_mode_group_init(struct drm_device *dev, struct drm_mode_group *group)
1124 * drm_mode_config_init - initialize DRM mode_configuration structure
1125 * @dev: DRM device
1126 *
1127 * Initialize @dev's mode_config structure, used for tracking the graphics
1128 * configuration of @dev.
1129 *
1130 * Since this initializes the modeset locks, no locking is possible. Which is no
1131 * problem, since this should happen single threaded at init time. It is the
1132 * driver's problem to ensure this guarantee.
1133 *
1134 */
1135void drm_mode_config_init(struct drm_device *dev)
1136{
1137 mutex_init(&dev->mode_config.mutex);
1138 mutex_init(&dev->mode_config.idr_mutex);
1139 mutex_init(&dev->mode_config.fb_lock);
1140 INIT_LIST_HEAD(&dev->mode_config.fb_list);
1141 INIT_LIST_HEAD(&dev->mode_config.crtc_list);
1142 INIT_LIST_HEAD(&dev->mode_config.connector_list);
1143 INIT_LIST_HEAD(&dev->mode_config.encoder_list);
1144 INIT_LIST_HEAD(&dev->mode_config.property_list);
1145 INIT_LIST_HEAD(&dev->mode_config.property_blob_list);
1146 INIT_LIST_HEAD(&dev->mode_config.plane_list);
1147 idr_init(&dev->mode_config.crtc_idr);
1148
1149 drm_modeset_lock_all(dev);
1150 drm_mode_create_standard_connector_properties(dev);
1151 drm_modeset_unlock_all(dev);
1152
1153 /* Just to be sure */
1154 dev->mode_config.num_fb = 0;
1155 dev->mode_config.num_connector = 0;
1156 dev->mode_config.num_crtc = 0;
1157 dev->mode_config.num_encoder = 0;
1158}
1159EXPORT_SYMBOL(drm_mode_config_init);
1160
1161int drm_mode_group_init(struct drm_device *dev, struct drm_mode_group *group)
1162{ 1117{
1163 uint32_t total_objects = 0; 1118 uint32_t total_objects = 0;
1164 1119
@@ -1203,69 +1158,6 @@ int drm_mode_group_init_legacy_group(struct drm_device *dev,
1203EXPORT_SYMBOL(drm_mode_group_init_legacy_group); 1158EXPORT_SYMBOL(drm_mode_group_init_legacy_group);
1204 1159
1205/** 1160/**
1206 * drm_mode_config_cleanup - free up DRM mode_config info
1207 * @dev: DRM device
1208 *
1209 * Free up all the connectors and CRTCs associated with this DRM device, then
1210 * free up the framebuffers and associated buffer objects.
1211 *
1212 * Note that since this /should/ happen single-threaded at driver/device
1213 * teardown time, no locking is required. It's the driver's job to ensure that
1214 * this guarantee actually holds true.
1215 *
1216 * FIXME: cleanup any dangling user buffer objects too
1217 */
1218void drm_mode_config_cleanup(struct drm_device *dev)
1219{
1220 struct drm_connector *connector, *ot;
1221 struct drm_crtc *crtc, *ct;
1222 struct drm_encoder *encoder, *enct;
1223 struct drm_framebuffer *fb, *fbt;
1224 struct drm_property *property, *pt;
1225 struct drm_plane *plane, *plt;
1226
1227 list_for_each_entry_safe(encoder, enct, &dev->mode_config.encoder_list,
1228 head) {
1229 encoder->funcs->destroy(encoder);
1230 }
1231
1232 list_for_each_entry_safe(connector, ot,
1233 &dev->mode_config.connector_list, head) {
1234 connector->funcs->destroy(connector);
1235 }
1236
1237 list_for_each_entry_safe(property, pt, &dev->mode_config.property_list,
1238 head) {
1239 drm_property_destroy(dev, property);
1240 }
1241
1242 /*
1243 * Single-threaded teardown context, so it's not required to grab the
1244 * fb_lock to protect against concurrent fb_list access. Contrary, it
1245 * would actually deadlock with the drm_framebuffer_cleanup function.
1246 *
1247 * Also, if there are any framebuffers left, that's a driver leak now,
1248 * so politely WARN about this.
1249 */
1250 WARN_ON(!list_empty(&dev->mode_config.fb_list));
1251 list_for_each_entry_safe(fb, fbt, &dev->mode_config.fb_list, head) {
1252 drm_framebuffer_remove(fb);
1253 }
1254
1255 list_for_each_entry_safe(plane, plt, &dev->mode_config.plane_list,
1256 head) {
1257 plane->funcs->destroy(plane);
1258 }
1259
1260 list_for_each_entry_safe(crtc, ct, &dev->mode_config.crtc_list, head) {
1261 crtc->funcs->destroy(crtc);
1262 }
1263
1264 idr_destroy(&dev->mode_config.crtc_idr);
1265}
1266EXPORT_SYMBOL(drm_mode_config_cleanup);
1267
1268/**
1269 * drm_crtc_convert_to_umode - convert a drm_display_mode into a modeinfo 1161 * drm_crtc_convert_to_umode - convert a drm_display_mode into a modeinfo
1270 * @out: drm_mode_modeinfo struct to return to the user 1162 * @out: drm_mode_modeinfo struct to return to the user
1271 * @in: drm_display_mode to use 1163 * @in: drm_display_mode to use
@@ -2717,192 +2609,6 @@ void drm_fb_release(struct drm_file *priv)
2717 mutex_unlock(&priv->fbs_lock); 2609 mutex_unlock(&priv->fbs_lock);
2718} 2610}
2719 2611
2720/**
2721 * drm_mode_attachmode - add a mode to the user mode list
2722 * @dev: DRM device
2723 * @connector: connector to add the mode to
2724 * @mode: mode to add
2725 *
2726 * Add @mode to @connector's user mode list.
2727 */
2728static void drm_mode_attachmode(struct drm_device *dev,
2729 struct drm_connector *connector,
2730 struct drm_display_mode *mode)
2731{
2732 list_add_tail(&mode->head, &connector->user_modes);
2733}
2734
2735int drm_mode_attachmode_crtc(struct drm_device *dev, struct drm_crtc *crtc,
2736 const struct drm_display_mode *mode)
2737{
2738 struct drm_connector *connector;
2739 int ret = 0;
2740 struct drm_display_mode *dup_mode, *next;
2741 LIST_HEAD(list);
2742
2743 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2744 if (!connector->encoder)
2745 continue;
2746 if (connector->encoder->crtc == crtc) {
2747 dup_mode = drm_mode_duplicate(dev, mode);
2748 if (!dup_mode) {
2749 ret = -ENOMEM;
2750 goto out;
2751 }
2752 list_add_tail(&dup_mode->head, &list);
2753 }
2754 }
2755
2756 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2757 if (!connector->encoder)
2758 continue;
2759 if (connector->encoder->crtc == crtc)
2760 list_move_tail(list.next, &connector->user_modes);
2761 }
2762
2763 WARN_ON(!list_empty(&list));
2764
2765 out:
2766 list_for_each_entry_safe(dup_mode, next, &list, head)
2767 drm_mode_destroy(dev, dup_mode);
2768
2769 return ret;
2770}
2771EXPORT_SYMBOL(drm_mode_attachmode_crtc);
2772
2773static int drm_mode_detachmode(struct drm_device *dev,
2774 struct drm_connector *connector,
2775 struct drm_display_mode *mode)
2776{
2777 int found = 0;
2778 int ret = 0;
2779 struct drm_display_mode *match_mode, *t;
2780
2781 list_for_each_entry_safe(match_mode, t, &connector->user_modes, head) {
2782 if (drm_mode_equal(match_mode, mode)) {
2783 list_del(&match_mode->head);
2784 drm_mode_destroy(dev, match_mode);
2785 found = 1;
2786 break;
2787 }
2788 }
2789
2790 if (!found)
2791 ret = -EINVAL;
2792
2793 return ret;
2794}
2795
2796int drm_mode_detachmode_crtc(struct drm_device *dev, struct drm_display_mode *mode)
2797{
2798 struct drm_connector *connector;
2799
2800 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2801 drm_mode_detachmode(dev, connector, mode);
2802 }
2803 return 0;
2804}
2805EXPORT_SYMBOL(drm_mode_detachmode_crtc);
2806
2807/**
2808 * drm_fb_attachmode - Attach a user mode to an connector
2809 * @dev: drm device for the ioctl
2810 * @data: data pointer for the ioctl
2811 * @file_priv: drm file for the ioctl call
2812 *
2813 * This attaches a user specified mode to an connector.
2814 * Called by the user via ioctl.
2815 *
2816 * RETURNS:
2817 * Zero on success, errno on failure.
2818 */
2819int drm_mode_attachmode_ioctl(struct drm_device *dev,
2820 void *data, struct drm_file *file_priv)
2821{
2822 struct drm_mode_mode_cmd *mode_cmd = data;
2823 struct drm_connector *connector;
2824 struct drm_display_mode *mode;
2825 struct drm_mode_object *obj;
2826 struct drm_mode_modeinfo *umode = &mode_cmd->mode;
2827 int ret;
2828
2829 if (!drm_core_check_feature(dev, DRIVER_MODESET))
2830 return -EINVAL;
2831
2832 drm_modeset_lock_all(dev);
2833
2834 obj = drm_mode_object_find(dev, mode_cmd->connector_id, DRM_MODE_OBJECT_CONNECTOR);
2835 if (!obj) {
2836 ret = -EINVAL;
2837 goto out;
2838 }
2839 connector = obj_to_connector(obj);
2840
2841 mode = drm_mode_create(dev);
2842 if (!mode) {
2843 ret = -ENOMEM;
2844 goto out;
2845 }
2846
2847 ret = drm_crtc_convert_umode(mode, umode);
2848 if (ret) {
2849 DRM_DEBUG_KMS("Invalid mode\n");
2850 drm_mode_destroy(dev, mode);
2851 goto out;
2852 }
2853
2854 drm_mode_attachmode(dev, connector, mode);
2855out:
2856 drm_modeset_unlock_all(dev);
2857 return ret;
2858}
2859
2860
2861/**
2862 * drm_fb_detachmode - Detach a user specified mode from an connector
2863 * @dev: drm device for the ioctl
2864 * @data: data pointer for the ioctl
2865 * @file_priv: drm file for the ioctl call
2866 *
2867 * Called by the user via ioctl.
2868 *
2869 * RETURNS:
2870 * Zero on success, errno on failure.
2871 */
2872int drm_mode_detachmode_ioctl(struct drm_device *dev,
2873 void *data, struct drm_file *file_priv)
2874{
2875 struct drm_mode_object *obj;
2876 struct drm_mode_mode_cmd *mode_cmd = data;
2877 struct drm_connector *connector;
2878 struct drm_display_mode mode;
2879 struct drm_mode_modeinfo *umode = &mode_cmd->mode;
2880 int ret;
2881
2882 if (!drm_core_check_feature(dev, DRIVER_MODESET))
2883 return -EINVAL;
2884
2885 drm_modeset_lock_all(dev);
2886
2887 obj = drm_mode_object_find(dev, mode_cmd->connector_id, DRM_MODE_OBJECT_CONNECTOR);
2888 if (!obj) {
2889 ret = -EINVAL;
2890 goto out;
2891 }
2892 connector = obj_to_connector(obj);
2893
2894 ret = drm_crtc_convert_umode(&mode, umode);
2895 if (ret) {
2896 DRM_DEBUG_KMS("Invalid mode\n");
2897 goto out;
2898 }
2899
2900 ret = drm_mode_detachmode(dev, connector, &mode);
2901out:
2902 drm_modeset_unlock_all(dev);
2903 return ret;
2904}
2905
2906struct drm_property *drm_property_create(struct drm_device *dev, int flags, 2612struct drm_property *drm_property_create(struct drm_device *dev, int flags,
2907 const char *name, int num_values) 2613 const char *name, int num_values)
2908{ 2614{
@@ -3739,6 +3445,12 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
3739 goto out; 3445 goto out;
3740 } 3446 }
3741 3447
3448 if (crtc->fb->pixel_format != fb->pixel_format) {
3449 DRM_DEBUG_KMS("Page flip is not allowed to change frame buffer format.\n");
3450 ret = -EINVAL;
3451 goto out;
3452 }
3453
3742 if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) { 3454 if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) {
3743 ret = -ENOMEM; 3455 ret = -ENOMEM;
3744 spin_lock_irqsave(&dev->event_lock, flags); 3456 spin_lock_irqsave(&dev->event_lock, flags);
@@ -4064,3 +3776,110 @@ int drm_format_vert_chroma_subsampling(uint32_t format)
4064 } 3776 }
4065} 3777}
4066EXPORT_SYMBOL(drm_format_vert_chroma_subsampling); 3778EXPORT_SYMBOL(drm_format_vert_chroma_subsampling);
3779
3780/**
3781 * drm_mode_config_init - initialize DRM mode_configuration structure
3782 * @dev: DRM device
3783 *
3784 * Initialize @dev's mode_config structure, used for tracking the graphics
3785 * configuration of @dev.
3786 *
3787 * Since this initializes the modeset locks, no locking is possible. Which is no
3788 * problem, since this should happen single threaded at init time. It is the
3789 * driver's problem to ensure this guarantee.
3790 *
3791 */
3792void drm_mode_config_init(struct drm_device *dev)
3793{
3794 mutex_init(&dev->mode_config.mutex);
3795 mutex_init(&dev->mode_config.idr_mutex);
3796 mutex_init(&dev->mode_config.fb_lock);
3797 INIT_LIST_HEAD(&dev->mode_config.fb_list);
3798 INIT_LIST_HEAD(&dev->mode_config.crtc_list);
3799 INIT_LIST_HEAD(&dev->mode_config.connector_list);
3800 INIT_LIST_HEAD(&dev->mode_config.encoder_list);
3801 INIT_LIST_HEAD(&dev->mode_config.property_list);
3802 INIT_LIST_HEAD(&dev->mode_config.property_blob_list);
3803 INIT_LIST_HEAD(&dev->mode_config.plane_list);
3804 idr_init(&dev->mode_config.crtc_idr);
3805
3806 drm_modeset_lock_all(dev);
3807 drm_mode_create_standard_connector_properties(dev);
3808 drm_modeset_unlock_all(dev);
3809
3810 /* Just to be sure */
3811 dev->mode_config.num_fb = 0;
3812 dev->mode_config.num_connector = 0;
3813 dev->mode_config.num_crtc = 0;
3814 dev->mode_config.num_encoder = 0;
3815}
3816EXPORT_SYMBOL(drm_mode_config_init);
3817
3818/**
3819 * drm_mode_config_cleanup - free up DRM mode_config info
3820 * @dev: DRM device
3821 *
3822 * Free up all the connectors and CRTCs associated with this DRM device, then
3823 * free up the framebuffers and associated buffer objects.
3824 *
3825 * Note that since this /should/ happen single-threaded at driver/device
3826 * teardown time, no locking is required. It's the driver's job to ensure that
3827 * this guarantee actually holds true.
3828 *
3829 * FIXME: cleanup any dangling user buffer objects too
3830 */
3831void drm_mode_config_cleanup(struct drm_device *dev)
3832{
3833 struct drm_connector *connector, *ot;
3834 struct drm_crtc *crtc, *ct;
3835 struct drm_encoder *encoder, *enct;
3836 struct drm_framebuffer *fb, *fbt;
3837 struct drm_property *property, *pt;
3838 struct drm_property_blob *blob, *bt;
3839 struct drm_plane *plane, *plt;
3840
3841 list_for_each_entry_safe(encoder, enct, &dev->mode_config.encoder_list,
3842 head) {
3843 encoder->funcs->destroy(encoder);
3844 }
3845
3846 list_for_each_entry_safe(connector, ot,
3847 &dev->mode_config.connector_list, head) {
3848 connector->funcs->destroy(connector);
3849 }
3850
3851 list_for_each_entry_safe(property, pt, &dev->mode_config.property_list,
3852 head) {
3853 drm_property_destroy(dev, property);
3854 }
3855
3856 list_for_each_entry_safe(blob, bt, &dev->mode_config.property_blob_list,
3857 head) {
3858 drm_property_destroy_blob(dev, blob);
3859 }
3860
3861 /*
3862 * Single-threaded teardown context, so it's not required to grab the
3863 * fb_lock to protect against concurrent fb_list access. Contrary, it
3864 * would actually deadlock with the drm_framebuffer_cleanup function.
3865 *
3866 * Also, if there are any framebuffers left, that's a driver leak now,
3867 * so politely WARN about this.
3868 */
3869 WARN_ON(!list_empty(&dev->mode_config.fb_list));
3870 list_for_each_entry_safe(fb, fbt, &dev->mode_config.fb_list, head) {
3871 drm_framebuffer_remove(fb);
3872 }
3873
3874 list_for_each_entry_safe(plane, plt, &dev->mode_config.plane_list,
3875 head) {
3876 plane->funcs->destroy(plane);
3877 }
3878
3879 list_for_each_entry_safe(crtc, ct, &dev->mode_config.crtc_list, head) {
3880 crtc->funcs->destroy(crtc);
3881 }
3882
3883 idr_destroy(&dev->mode_config.crtc_idr);
3884}
3885EXPORT_SYMBOL(drm_mode_config_cleanup);
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 7b2d378b2576..e974f9309b72 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -648,6 +648,9 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
648 } else if (set->fb->bits_per_pixel != 648 } else if (set->fb->bits_per_pixel !=
649 set->crtc->fb->bits_per_pixel) { 649 set->crtc->fb->bits_per_pixel) {
650 mode_changed = true; 650 mode_changed = true;
651 } else if (set->fb->pixel_format !=
652 set->crtc->fb->pixel_format) {
653 mode_changed = true;
651 } else 654 } else
652 fb_changed = true; 655 fb_changed = true;
653 } 656 }
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 25f91cd23e60..8d4f29075af5 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -60,7 +60,7 @@ static int drm_version(struct drm_device *dev, void *data,
60 [DRM_IOCTL_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, .cmd_drv = 0} 60 [DRM_IOCTL_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, .cmd_drv = 0}
61 61
62/** Ioctl table */ 62/** Ioctl table */
63static struct drm_ioctl_desc drm_ioctls[] = { 63static const struct drm_ioctl_desc drm_ioctls[] = {
64 DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_UNLOCKED), 64 DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_UNLOCKED),
65 DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0), 65 DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
66 DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0), 66 DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
@@ -150,8 +150,8 @@ static struct drm_ioctl_desc drm_ioctls[] = {
150 DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER|DRM_UNLOCKED), 150 DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER|DRM_UNLOCKED),
151 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 151 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
152 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 152 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
153 DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_mode_attachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 153 DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
154 DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_mode_detachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 154 DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
155 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 155 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
156 DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 156 DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
157 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 157 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
@@ -375,7 +375,7 @@ long drm_ioctl(struct file *filp,
375{ 375{
376 struct drm_file *file_priv = filp->private_data; 376 struct drm_file *file_priv = filp->private_data;
377 struct drm_device *dev; 377 struct drm_device *dev;
378 struct drm_ioctl_desc *ioctl; 378 const struct drm_ioctl_desc *ioctl;
379 drm_ioctl_t *func; 379 drm_ioctl_t *func;
380 unsigned int nr = DRM_IOCTL_NR(cmd); 380 unsigned int nr = DRM_IOCTL_NR(cmd);
381 int retcode = -EINVAL; 381 int retcode = -EINVAL;
@@ -408,6 +408,7 @@ long drm_ioctl(struct file *filp,
408 usize = asize = _IOC_SIZE(cmd); 408 usize = asize = _IOC_SIZE(cmd);
409 if (drv_size > asize) 409 if (drv_size > asize)
410 asize = drv_size; 410 asize = drv_size;
411 cmd = ioctl->cmd_drv;
411 } 412 }
412 else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) { 413 else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) {
413 ioctl = &drm_ioctls[nr]; 414 ioctl = &drm_ioctls[nr];
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index e2acfdbf7d3c..9e62bbedb5ad 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -587,284 +587,348 @@ static const struct drm_display_mode edid_cea_modes[] = {
587 /* 1 - 640x480@60Hz */ 587 /* 1 - 640x480@60Hz */
588 { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656, 588 { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
589 752, 800, 0, 480, 490, 492, 525, 0, 589 752, 800, 0, 480, 490, 492, 525, 0,
590 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 590 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
591 .vrefresh = 60, },
591 /* 2 - 720x480@60Hz */ 592 /* 2 - 720x480@60Hz */
592 { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736, 593 { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
593 798, 858, 0, 480, 489, 495, 525, 0, 594 798, 858, 0, 480, 489, 495, 525, 0,
594 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 595 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
596 .vrefresh = 60, },
595 /* 3 - 720x480@60Hz */ 597 /* 3 - 720x480@60Hz */
596 { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736, 598 { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
597 798, 858, 0, 480, 489, 495, 525, 0, 599 798, 858, 0, 480, 489, 495, 525, 0,
598 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 600 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
601 .vrefresh = 60, },
599 /* 4 - 1280x720@60Hz */ 602 /* 4 - 1280x720@60Hz */
600 { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390, 603 { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390,
601 1430, 1650, 0, 720, 725, 730, 750, 0, 604 1430, 1650, 0, 720, 725, 730, 750, 0,
602 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 605 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
606 .vrefresh = 60, },
603 /* 5 - 1920x1080i@60Hz */ 607 /* 5 - 1920x1080i@60Hz */
604 { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008, 608 { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
605 2052, 2200, 0, 1080, 1084, 1094, 1125, 0, 609 2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
606 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | 610 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
607 DRM_MODE_FLAG_INTERLACE) }, 611 DRM_MODE_FLAG_INTERLACE),
612 .vrefresh = 60, },
608 /* 6 - 1440x480i@60Hz */ 613 /* 6 - 1440x480i@60Hz */
609 { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478, 614 { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
610 1602, 1716, 0, 480, 488, 494, 525, 0, 615 1602, 1716, 0, 480, 488, 494, 525, 0,
611 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | 616 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
612 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) }, 617 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
618 .vrefresh = 60, },
613 /* 7 - 1440x480i@60Hz */ 619 /* 7 - 1440x480i@60Hz */
614 { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478, 620 { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
615 1602, 1716, 0, 480, 488, 494, 525, 0, 621 1602, 1716, 0, 480, 488, 494, 525, 0,
616 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | 622 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
617 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) }, 623 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
624 .vrefresh = 60, },
618 /* 8 - 1440x240@60Hz */ 625 /* 8 - 1440x240@60Hz */
619 { DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478, 626 { DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
620 1602, 1716, 0, 240, 244, 247, 262, 0, 627 1602, 1716, 0, 240, 244, 247, 262, 0,
621 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | 628 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
622 DRM_MODE_FLAG_DBLCLK) }, 629 DRM_MODE_FLAG_DBLCLK),
630 .vrefresh = 60, },
623 /* 9 - 1440x240@60Hz */ 631 /* 9 - 1440x240@60Hz */
624 { DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478, 632 { DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
625 1602, 1716, 0, 240, 244, 247, 262, 0, 633 1602, 1716, 0, 240, 244, 247, 262, 0,
626 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | 634 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
627 DRM_MODE_FLAG_DBLCLK) }, 635 DRM_MODE_FLAG_DBLCLK),
636 .vrefresh = 60, },
628 /* 10 - 2880x480i@60Hz */ 637 /* 10 - 2880x480i@60Hz */
629 { DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956, 638 { DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
630 3204, 3432, 0, 480, 488, 494, 525, 0, 639 3204, 3432, 0, 480, 488, 494, 525, 0,
631 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | 640 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
632 DRM_MODE_FLAG_INTERLACE) }, 641 DRM_MODE_FLAG_INTERLACE),
642 .vrefresh = 60, },
633 /* 11 - 2880x480i@60Hz */ 643 /* 11 - 2880x480i@60Hz */
634 { DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956, 644 { DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
635 3204, 3432, 0, 480, 488, 494, 525, 0, 645 3204, 3432, 0, 480, 488, 494, 525, 0,
636 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | 646 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
637 DRM_MODE_FLAG_INTERLACE) }, 647 DRM_MODE_FLAG_INTERLACE),
648 .vrefresh = 60, },
638 /* 12 - 2880x240@60Hz */ 649 /* 12 - 2880x240@60Hz */
639 { DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956, 650 { DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
640 3204, 3432, 0, 240, 244, 247, 262, 0, 651 3204, 3432, 0, 240, 244, 247, 262, 0,
641 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 652 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
653 .vrefresh = 60, },
642 /* 13 - 2880x240@60Hz */ 654 /* 13 - 2880x240@60Hz */
643 { DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956, 655 { DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
644 3204, 3432, 0, 240, 244, 247, 262, 0, 656 3204, 3432, 0, 240, 244, 247, 262, 0,
645 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 657 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
658 .vrefresh = 60, },
646 /* 14 - 1440x480@60Hz */ 659 /* 14 - 1440x480@60Hz */
647 { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472, 660 { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472,
648 1596, 1716, 0, 480, 489, 495, 525, 0, 661 1596, 1716, 0, 480, 489, 495, 525, 0,
649 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 662 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
663 .vrefresh = 60, },
650 /* 15 - 1440x480@60Hz */ 664 /* 15 - 1440x480@60Hz */
651 { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472, 665 { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472,
652 1596, 1716, 0, 480, 489, 495, 525, 0, 666 1596, 1716, 0, 480, 489, 495, 525, 0,
653 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 667 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
668 .vrefresh = 60, },
654 /* 16 - 1920x1080@60Hz */ 669 /* 16 - 1920x1080@60Hz */
655 { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008, 670 { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
656 2052, 2200, 0, 1080, 1084, 1089, 1125, 0, 671 2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
657 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 672 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
673 .vrefresh = 60, },
658 /* 17 - 720x576@50Hz */ 674 /* 17 - 720x576@50Hz */
659 { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732, 675 { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
660 796, 864, 0, 576, 581, 586, 625, 0, 676 796, 864, 0, 576, 581, 586, 625, 0,
661 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 677 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
678 .vrefresh = 50, },
662 /* 18 - 720x576@50Hz */ 679 /* 18 - 720x576@50Hz */
663 { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732, 680 { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
664 796, 864, 0, 576, 581, 586, 625, 0, 681 796, 864, 0, 576, 581, 586, 625, 0,
665 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 682 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
683 .vrefresh = 50, },
666 /* 19 - 1280x720@50Hz */ 684 /* 19 - 1280x720@50Hz */
667 { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1720, 685 { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1720,
668 1760, 1980, 0, 720, 725, 730, 750, 0, 686 1760, 1980, 0, 720, 725, 730, 750, 0,
669 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 687 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
688 .vrefresh = 50, },
670 /* 20 - 1920x1080i@50Hz */ 689 /* 20 - 1920x1080i@50Hz */
671 { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448, 690 { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
672 2492, 2640, 0, 1080, 1084, 1094, 1125, 0, 691 2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
673 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | 692 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
674 DRM_MODE_FLAG_INTERLACE) }, 693 DRM_MODE_FLAG_INTERLACE),
694 .vrefresh = 50, },
675 /* 21 - 1440x576i@50Hz */ 695 /* 21 - 1440x576i@50Hz */
676 { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464, 696 { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
677 1590, 1728, 0, 576, 580, 586, 625, 0, 697 1590, 1728, 0, 576, 580, 586, 625, 0,
678 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | 698 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
679 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) }, 699 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
700 .vrefresh = 50, },
680 /* 22 - 1440x576i@50Hz */ 701 /* 22 - 1440x576i@50Hz */
681 { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464, 702 { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
682 1590, 1728, 0, 576, 580, 586, 625, 0, 703 1590, 1728, 0, 576, 580, 586, 625, 0,
683 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | 704 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
684 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) }, 705 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
706 .vrefresh = 50, },
685 /* 23 - 1440x288@50Hz */ 707 /* 23 - 1440x288@50Hz */
686 { DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464, 708 { DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
687 1590, 1728, 0, 288, 290, 293, 312, 0, 709 1590, 1728, 0, 288, 290, 293, 312, 0,
688 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | 710 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
689 DRM_MODE_FLAG_DBLCLK) }, 711 DRM_MODE_FLAG_DBLCLK),
712 .vrefresh = 50, },
690 /* 24 - 1440x288@50Hz */ 713 /* 24 - 1440x288@50Hz */
691 { DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464, 714 { DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
692 1590, 1728, 0, 288, 290, 293, 312, 0, 715 1590, 1728, 0, 288, 290, 293, 312, 0,
693 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | 716 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
694 DRM_MODE_FLAG_DBLCLK) }, 717 DRM_MODE_FLAG_DBLCLK),
718 .vrefresh = 50, },
695 /* 25 - 2880x576i@50Hz */ 719 /* 25 - 2880x576i@50Hz */
696 { DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928, 720 { DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
697 3180, 3456, 0, 576, 580, 586, 625, 0, 721 3180, 3456, 0, 576, 580, 586, 625, 0,
698 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | 722 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
699 DRM_MODE_FLAG_INTERLACE) }, 723 DRM_MODE_FLAG_INTERLACE),
724 .vrefresh = 50, },
700 /* 26 - 2880x576i@50Hz */ 725 /* 26 - 2880x576i@50Hz */
701 { DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928, 726 { DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
702 3180, 3456, 0, 576, 580, 586, 625, 0, 727 3180, 3456, 0, 576, 580, 586, 625, 0,
703 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | 728 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
704 DRM_MODE_FLAG_INTERLACE) }, 729 DRM_MODE_FLAG_INTERLACE),
730 .vrefresh = 50, },
705 /* 27 - 2880x288@50Hz */ 731 /* 27 - 2880x288@50Hz */
706 { DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928, 732 { DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
707 3180, 3456, 0, 288, 290, 293, 312, 0, 733 3180, 3456, 0, 288, 290, 293, 312, 0,
708 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 734 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
735 .vrefresh = 50, },
709 /* 28 - 2880x288@50Hz */ 736 /* 28 - 2880x288@50Hz */
710 { DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928, 737 { DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
711 3180, 3456, 0, 288, 290, 293, 312, 0, 738 3180, 3456, 0, 288, 290, 293, 312, 0,
712 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 739 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
740 .vrefresh = 50, },
713 /* 29 - 1440x576@50Hz */ 741 /* 29 - 1440x576@50Hz */
714 { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464, 742 { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
715 1592, 1728, 0, 576, 581, 586, 625, 0, 743 1592, 1728, 0, 576, 581, 586, 625, 0,
716 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 744 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
745 .vrefresh = 50, },
717 /* 30 - 1440x576@50Hz */ 746 /* 30 - 1440x576@50Hz */
718 { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464, 747 { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
719 1592, 1728, 0, 576, 581, 586, 625, 0, 748 1592, 1728, 0, 576, 581, 586, 625, 0,
720 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 749 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
750 .vrefresh = 50, },
721 /* 31 - 1920x1080@50Hz */ 751 /* 31 - 1920x1080@50Hz */
722 { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448, 752 { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
723 2492, 2640, 0, 1080, 1084, 1089, 1125, 0, 753 2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
724 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 754 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
755 .vrefresh = 50, },
725 /* 32 - 1920x1080@24Hz */ 756 /* 32 - 1920x1080@24Hz */
726 { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2558, 757 { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2558,
727 2602, 2750, 0, 1080, 1084, 1089, 1125, 0, 758 2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
728 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 759 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
760 .vrefresh = 24, },
729 /* 33 - 1920x1080@25Hz */ 761 /* 33 - 1920x1080@25Hz */
730 { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448, 762 { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
731 2492, 2640, 0, 1080, 1084, 1089, 1125, 0, 763 2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
732 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 764 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
765 .vrefresh = 25, },
733 /* 34 - 1920x1080@30Hz */ 766 /* 34 - 1920x1080@30Hz */
734 { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008, 767 { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
735 2052, 2200, 0, 1080, 1084, 1089, 1125, 0, 768 2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
736 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 769 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
770 .vrefresh = 30, },
737 /* 35 - 2880x480@60Hz */ 771 /* 35 - 2880x480@60Hz */
738 { DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944, 772 { DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944,
739 3192, 3432, 0, 480, 489, 495, 525, 0, 773 3192, 3432, 0, 480, 489, 495, 525, 0,
740 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 774 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
775 .vrefresh = 60, },
741 /* 36 - 2880x480@60Hz */ 776 /* 36 - 2880x480@60Hz */
742 { DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944, 777 { DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944,
743 3192, 3432, 0, 480, 489, 495, 525, 0, 778 3192, 3432, 0, 480, 489, 495, 525, 0,
744 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 779 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
780 .vrefresh = 60, },
745 /* 37 - 2880x576@50Hz */ 781 /* 37 - 2880x576@50Hz */
746 { DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928, 782 { DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928,
747 3184, 3456, 0, 576, 581, 586, 625, 0, 783 3184, 3456, 0, 576, 581, 586, 625, 0,
748 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 784 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
785 .vrefresh = 50, },
749 /* 38 - 2880x576@50Hz */ 786 /* 38 - 2880x576@50Hz */
750 { DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928, 787 { DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928,
751 3184, 3456, 0, 576, 581, 586, 625, 0, 788 3184, 3456, 0, 576, 581, 586, 625, 0,
752 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 789 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
790 .vrefresh = 50, },
753 /* 39 - 1920x1080i@50Hz */ 791 /* 39 - 1920x1080i@50Hz */
754 { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 72000, 1920, 1952, 792 { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 72000, 1920, 1952,
755 2120, 2304, 0, 1080, 1126, 1136, 1250, 0, 793 2120, 2304, 0, 1080, 1126, 1136, 1250, 0,
756 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC | 794 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC |
757 DRM_MODE_FLAG_INTERLACE) }, 795 DRM_MODE_FLAG_INTERLACE),
796 .vrefresh = 50, },
758 /* 40 - 1920x1080i@100Hz */ 797 /* 40 - 1920x1080i@100Hz */
759 { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448, 798 { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
760 2492, 2640, 0, 1080, 1084, 1094, 1125, 0, 799 2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
761 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | 800 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
762 DRM_MODE_FLAG_INTERLACE) }, 801 DRM_MODE_FLAG_INTERLACE),
802 .vrefresh = 100, },
763 /* 41 - 1280x720@100Hz */ 803 /* 41 - 1280x720@100Hz */
764 { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1720, 804 { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1720,
765 1760, 1980, 0, 720, 725, 730, 750, 0, 805 1760, 1980, 0, 720, 725, 730, 750, 0,
766 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 806 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
807 .vrefresh = 100, },
767 /* 42 - 720x576@100Hz */ 808 /* 42 - 720x576@100Hz */
768 { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732, 809 { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
769 796, 864, 0, 576, 581, 586, 625, 0, 810 796, 864, 0, 576, 581, 586, 625, 0,
770 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 811 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
812 .vrefresh = 100, },
771 /* 43 - 720x576@100Hz */ 813 /* 43 - 720x576@100Hz */
772 { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732, 814 { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
773 796, 864, 0, 576, 581, 586, 625, 0, 815 796, 864, 0, 576, 581, 586, 625, 0,
774 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 816 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
817 .vrefresh = 100, },
775 /* 44 - 1440x576i@100Hz */ 818 /* 44 - 1440x576i@100Hz */
776 { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464, 819 { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
777 1590, 1728, 0, 576, 580, 586, 625, 0, 820 1590, 1728, 0, 576, 580, 586, 625, 0,
778 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | 821 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
779 DRM_MODE_FLAG_DBLCLK) }, 822 DRM_MODE_FLAG_DBLCLK),
823 .vrefresh = 100, },
780 /* 45 - 1440x576i@100Hz */ 824 /* 45 - 1440x576i@100Hz */
781 { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464, 825 { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
782 1590, 1728, 0, 576, 580, 586, 625, 0, 826 1590, 1728, 0, 576, 580, 586, 625, 0,
783 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | 827 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
784 DRM_MODE_FLAG_DBLCLK) }, 828 DRM_MODE_FLAG_DBLCLK),
829 .vrefresh = 100, },
785 /* 46 - 1920x1080i@120Hz */ 830 /* 46 - 1920x1080i@120Hz */
786 { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008, 831 { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
787 2052, 2200, 0, 1080, 1084, 1094, 1125, 0, 832 2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
788 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | 833 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
789 DRM_MODE_FLAG_INTERLACE) }, 834 DRM_MODE_FLAG_INTERLACE),
835 .vrefresh = 120, },
790 /* 47 - 1280x720@120Hz */ 836 /* 47 - 1280x720@120Hz */
791 { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1390, 837 { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1390,
792 1430, 1650, 0, 720, 725, 730, 750, 0, 838 1430, 1650, 0, 720, 725, 730, 750, 0,
793 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 839 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
840 .vrefresh = 120, },
794 /* 48 - 720x480@120Hz */ 841 /* 48 - 720x480@120Hz */
795 { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736, 842 { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736,
796 798, 858, 0, 480, 489, 495, 525, 0, 843 798, 858, 0, 480, 489, 495, 525, 0,
797 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 844 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
845 .vrefresh = 120, },
798 /* 49 - 720x480@120Hz */ 846 /* 49 - 720x480@120Hz */
799 { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736, 847 { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736,
800 798, 858, 0, 480, 489, 495, 525, 0, 848 798, 858, 0, 480, 489, 495, 525, 0,
801 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 849 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
850 .vrefresh = 120, },
802 /* 50 - 1440x480i@120Hz */ 851 /* 50 - 1440x480i@120Hz */
803 { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478, 852 { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
804 1602, 1716, 0, 480, 488, 494, 525, 0, 853 1602, 1716, 0, 480, 488, 494, 525, 0,
805 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | 854 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
806 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) }, 855 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
856 .vrefresh = 120, },
807 /* 51 - 1440x480i@120Hz */ 857 /* 51 - 1440x480i@120Hz */
808 { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478, 858 { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
809 1602, 1716, 0, 480, 488, 494, 525, 0, 859 1602, 1716, 0, 480, 488, 494, 525, 0,
810 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | 860 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
811 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) }, 861 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
862 .vrefresh = 120, },
812 /* 52 - 720x576@200Hz */ 863 /* 52 - 720x576@200Hz */
813 { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732, 864 { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732,
814 796, 864, 0, 576, 581, 586, 625, 0, 865 796, 864, 0, 576, 581, 586, 625, 0,
815 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 866 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
867 .vrefresh = 200, },
816 /* 53 - 720x576@200Hz */ 868 /* 53 - 720x576@200Hz */
817 { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732, 869 { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732,
818 796, 864, 0, 576, 581, 586, 625, 0, 870 796, 864, 0, 576, 581, 586, 625, 0,
819 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 871 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
872 .vrefresh = 200, },
820 /* 54 - 1440x576i@200Hz */ 873 /* 54 - 1440x576i@200Hz */
821 { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464, 874 { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
822 1590, 1728, 0, 576, 580, 586, 625, 0, 875 1590, 1728, 0, 576, 580, 586, 625, 0,
823 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | 876 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
824 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) }, 877 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
878 .vrefresh = 200, },
825 /* 55 - 1440x576i@200Hz */ 879 /* 55 - 1440x576i@200Hz */
826 { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464, 880 { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
827 1590, 1728, 0, 576, 580, 586, 625, 0, 881 1590, 1728, 0, 576, 580, 586, 625, 0,
828 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | 882 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
829 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) }, 883 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
884 .vrefresh = 200, },
830 /* 56 - 720x480@240Hz */ 885 /* 56 - 720x480@240Hz */
831 { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736, 886 { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736,
832 798, 858, 0, 480, 489, 495, 525, 0, 887 798, 858, 0, 480, 489, 495, 525, 0,
833 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 888 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
889 .vrefresh = 240, },
834 /* 57 - 720x480@240Hz */ 890 /* 57 - 720x480@240Hz */
835 { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736, 891 { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736,
836 798, 858, 0, 480, 489, 495, 525, 0, 892 798, 858, 0, 480, 489, 495, 525, 0,
837 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 893 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
894 .vrefresh = 240, },
838 /* 58 - 1440x480i@240 */ 895 /* 58 - 1440x480i@240 */
839 { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478, 896 { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
840 1602, 1716, 0, 480, 488, 494, 525, 0, 897 1602, 1716, 0, 480, 488, 494, 525, 0,
841 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | 898 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
842 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) }, 899 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
900 .vrefresh = 240, },
843 /* 59 - 1440x480i@240 */ 901 /* 59 - 1440x480i@240 */
844 { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478, 902 { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
845 1602, 1716, 0, 480, 488, 494, 525, 0, 903 1602, 1716, 0, 480, 488, 494, 525, 0,
846 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | 904 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
847 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) }, 905 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
906 .vrefresh = 240, },
848 /* 60 - 1280x720@24Hz */ 907 /* 60 - 1280x720@24Hz */
849 { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 59400, 1280, 3040, 908 { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 59400, 1280, 3040,
850 3080, 3300, 0, 720, 725, 730, 750, 0, 909 3080, 3300, 0, 720, 725, 730, 750, 0,
851 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 910 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
911 .vrefresh = 24, },
852 /* 61 - 1280x720@25Hz */ 912 /* 61 - 1280x720@25Hz */
853 { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3700, 913 { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3700,
854 3740, 3960, 0, 720, 725, 730, 750, 0, 914 3740, 3960, 0, 720, 725, 730, 750, 0,
855 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 915 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
916 .vrefresh = 25, },
856 /* 62 - 1280x720@30Hz */ 917 /* 62 - 1280x720@30Hz */
857 { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3040, 918 { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3040,
858 3080, 3300, 0, 720, 725, 730, 750, 0, 919 3080, 3300, 0, 720, 725, 730, 750, 0,
859 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 920 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
921 .vrefresh = 30, },
860 /* 63 - 1920x1080@120Hz */ 922 /* 63 - 1920x1080@120Hz */
861 { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2008, 923 { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2008,
862 2052, 2200, 0, 1080, 1084, 1089, 1125, 0, 924 2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
863 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 925 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
926 .vrefresh = 120, },
864 /* 64 - 1920x1080@100Hz */ 927 /* 64 - 1920x1080@100Hz */
865 { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2448, 928 { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2448,
866 2492, 2640, 0, 1080, 1084, 1094, 1125, 0, 929 2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
867 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 930 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
931 .vrefresh = 100, },
868}; 932};
869 933
870/*** DDC fetch and block validation ***/ 934/*** DDC fetch and block validation ***/
@@ -2266,13 +2330,34 @@ EXPORT_SYMBOL(drm_find_cea_extension);
2266 */ 2330 */
2267u8 drm_match_cea_mode(const struct drm_display_mode *to_match) 2331u8 drm_match_cea_mode(const struct drm_display_mode *to_match)
2268{ 2332{
2269 struct drm_display_mode *cea_mode;
2270 u8 mode; 2333 u8 mode;
2271 2334
2335 if (!to_match->clock)
2336 return 0;
2337
2272 for (mode = 0; mode < ARRAY_SIZE(edid_cea_modes); mode++) { 2338 for (mode = 0; mode < ARRAY_SIZE(edid_cea_modes); mode++) {
2273 cea_mode = (struct drm_display_mode *)&edid_cea_modes[mode]; 2339 const struct drm_display_mode *cea_mode = &edid_cea_modes[mode];
2340 unsigned int clock1, clock2;
2341
2342 clock1 = clock2 = cea_mode->clock;
2274 2343
2275 if (drm_mode_equal(to_match, cea_mode)) 2344 /* Check both 60Hz and 59.94Hz */
2345 if (cea_mode->vrefresh % 6 == 0) {
2346 /*
2347 * edid_cea_modes contains the 59.94Hz
2348 * variant for 240 and 480 line modes,
2349 * and the 60Hz variant otherwise.
2350 */
2351 if (cea_mode->vdisplay == 240 ||
2352 cea_mode->vdisplay == 480)
2353 clock1 = clock1 * 1001 / 1000;
2354 else
2355 clock2 = DIV_ROUND_UP(clock2 * 1000, 1001);
2356 }
2357
2358 if ((KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock1) ||
2359 KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock2)) &&
2360 drm_mode_equal_no_clocks(to_match, cea_mode))
2276 return mode + 1; 2361 return mode + 1;
2277 } 2362 }
2278 return 0; 2363 return 0;
@@ -2294,6 +2379,7 @@ do_cea_modes (struct drm_connector *connector, u8 *db, u8 len)
2294 newmode = drm_mode_duplicate(dev, 2379 newmode = drm_mode_duplicate(dev,
2295 &edid_cea_modes[cea_mode]); 2380 &edid_cea_modes[cea_mode]);
2296 if (newmode) { 2381 if (newmode) {
2382 newmode->vrefresh = 0;
2297 drm_mode_probed_add(connector, newmode); 2383 drm_mode_probed_add(connector, newmode);
2298 modes++; 2384 modes++;
2299 } 2385 }
@@ -2511,6 +2597,65 @@ void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid)
2511EXPORT_SYMBOL(drm_edid_to_eld); 2597EXPORT_SYMBOL(drm_edid_to_eld);
2512 2598
2513/** 2599/**
2600 * drm_edid_to_sad - extracts SADs from EDID
2601 * @edid: EDID to parse
2602 * @sads: pointer that will be set to the extracted SADs
2603 *
2604 * Looks for CEA EDID block and extracts SADs (Short Audio Descriptors) from it.
2605 * Note: returned pointer needs to be kfreed
2606 *
2607 * Return number of found SADs or negative number on error.
2608 */
2609int drm_edid_to_sad(struct edid *edid, struct cea_sad **sads)
2610{
2611 int count = 0;
2612 int i, start, end, dbl;
2613 u8 *cea;
2614
2615 cea = drm_find_cea_extension(edid);
2616 if (!cea) {
2617 DRM_DEBUG_KMS("SAD: no CEA Extension found\n");
2618 return -ENOENT;
2619 }
2620
2621 if (cea_revision(cea) < 3) {
2622 DRM_DEBUG_KMS("SAD: wrong CEA revision\n");
2623 return -ENOTSUPP;
2624 }
2625
2626 if (cea_db_offsets(cea, &start, &end)) {
2627 DRM_DEBUG_KMS("SAD: invalid data block offsets\n");
2628 return -EPROTO;
2629 }
2630
2631 for_each_cea_db(cea, i, start, end) {
2632 u8 *db = &cea[i];
2633
2634 if (cea_db_tag(db) == AUDIO_BLOCK) {
2635 int j;
2636 dbl = cea_db_payload_len(db);
2637
2638 count = dbl / 3; /* SAD is 3B */
2639 *sads = kcalloc(count, sizeof(**sads), GFP_KERNEL);
2640 if (!*sads)
2641 return -ENOMEM;
2642 for (j = 0; j < count; j++) {
2643 u8 *sad = &db[1 + j * 3];
2644
2645 (*sads)[j].format = (sad[0] & 0x78) >> 3;
2646 (*sads)[j].channels = sad[0] & 0x7;
2647 (*sads)[j].freq = sad[1] & 0x7F;
2648 (*sads)[j].byte2 = sad[2];
2649 }
2650 break;
2651 }
2652 }
2653
2654 return count;
2655}
2656EXPORT_SYMBOL(drm_edid_to_sad);
2657
2658/**
2514 * drm_av_sync_delay - HDMI/DP sink audio-video sync delay in millisecond 2659 * drm_av_sync_delay - HDMI/DP sink audio-video sync delay in millisecond
2515 * @connector: connector associated with the HDMI/DP sink 2660 * @connector: connector associated with the HDMI/DP sink
2516 * @mode: the display mode 2661 * @mode: the display mode
diff --git a/drivers/gpu/drm/drm_edid_load.c b/drivers/gpu/drm/drm_edid_load.c
index 38d3943f72de..fa445dd4dc00 100644
--- a/drivers/gpu/drm/drm_edid_load.c
+++ b/drivers/gpu/drm/drm_edid_load.c
@@ -31,10 +31,11 @@ module_param_string(edid_firmware, edid_firmware, sizeof(edid_firmware), 0644);
31MODULE_PARM_DESC(edid_firmware, "Do not probe monitor, use specified EDID blob " 31MODULE_PARM_DESC(edid_firmware, "Do not probe monitor, use specified EDID blob "
32 "from built-in data or /lib/firmware instead. "); 32 "from built-in data or /lib/firmware instead. ");
33 33
34#define GENERIC_EDIDS 4 34#define GENERIC_EDIDS 5
35static char *generic_edid_name[GENERIC_EDIDS] = { 35static char *generic_edid_name[GENERIC_EDIDS] = {
36 "edid/1024x768.bin", 36 "edid/1024x768.bin",
37 "edid/1280x1024.bin", 37 "edid/1280x1024.bin",
38 "edid/1600x1200.bin",
38 "edid/1680x1050.bin", 39 "edid/1680x1050.bin",
39 "edid/1920x1080.bin", 40 "edid/1920x1080.bin",
40}; 41};
@@ -79,6 +80,24 @@ static u8 generic_edid[GENERIC_EDIDS][128] = {
79 { 80 {
80 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 81 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
81 0x31, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 82 0x31, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
83 0x05, 0x16, 0x01, 0x03, 0x6d, 0x37, 0x29, 0x78,
84 0xea, 0x5e, 0xc0, 0xa4, 0x59, 0x4a, 0x98, 0x25,
85 0x20, 0x50, 0x54, 0x00, 0x00, 0x00, 0xa9, 0x40,
86 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
87 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x48, 0x3f,
88 0x40, 0x30, 0x62, 0xb0, 0x32, 0x40, 0x40, 0xc0,
89 0x13, 0x00, 0x2b, 0xa0, 0x21, 0x00, 0x00, 0x1e,
90 0x00, 0x00, 0x00, 0xff, 0x00, 0x4c, 0x69, 0x6e,
91 0x75, 0x78, 0x20, 0x23, 0x30, 0x0a, 0x20, 0x20,
92 0x20, 0x20, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x3b,
93 0x3d, 0x4a, 0x4c, 0x11, 0x00, 0x0a, 0x20, 0x20,
94 0x20, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xfc,
95 0x00, 0x4c, 0x69, 0x6e, 0x75, 0x78, 0x20, 0x55,
96 0x58, 0x47, 0x41, 0x0a, 0x20, 0x20, 0x00, 0x9d,
97 },
98 {
99 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
100 0x31, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
82 0x05, 0x16, 0x01, 0x03, 0x6d, 0x2b, 0x1b, 0x78, 101 0x05, 0x16, 0x01, 0x03, 0x6d, 0x2b, 0x1b, 0x78,
83 0xea, 0x5e, 0xc0, 0xa4, 0x59, 0x4a, 0x98, 0x25, 102 0xea, 0x5e, 0xc0, 0xa4, 0x59, 0x4a, 0x98, 0x25,
84 0x20, 0x50, 0x54, 0x00, 0x00, 0x00, 0xb3, 0x00, 103 0x20, 0x50, 0x54, 0x00, 0x00, 0x00, 0xb3, 0x00,
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 892ff9f95975..b78cbe74dadf 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -1398,7 +1398,7 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
1398 struct drm_mode_set *modeset; 1398 struct drm_mode_set *modeset;
1399 bool *enabled; 1399 bool *enabled;
1400 int width, height; 1400 int width, height;
1401 int i, ret; 1401 int i;
1402 1402
1403 DRM_DEBUG_KMS("\n"); 1403 DRM_DEBUG_KMS("\n");
1404 1404
@@ -1419,16 +1419,23 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
1419 1419
1420 drm_enable_connectors(fb_helper, enabled); 1420 drm_enable_connectors(fb_helper, enabled);
1421 1421
1422 ret = drm_target_cloned(fb_helper, modes, enabled, width, height); 1422 if (!(fb_helper->funcs->initial_config &&
1423 if (!ret) { 1423 fb_helper->funcs->initial_config(fb_helper, crtcs, modes,
1424 ret = drm_target_preferred(fb_helper, modes, enabled, width, height); 1424 enabled, width, height))) {
1425 if (!ret) 1425 memset(modes, 0, dev->mode_config.num_connector*sizeof(modes[0]));
1426 memset(crtcs, 0, dev->mode_config.num_connector*sizeof(crtcs[0]));
1427
1428 if (!drm_target_cloned(fb_helper,
1429 modes, enabled, width, height) &&
1430 !drm_target_preferred(fb_helper,
1431 modes, enabled, width, height))
1426 DRM_ERROR("Unable to find initial modes\n"); 1432 DRM_ERROR("Unable to find initial modes\n");
1427 }
1428 1433
1429 DRM_DEBUG_KMS("picking CRTCs for %dx%d config\n", width, height); 1434 DRM_DEBUG_KMS("picking CRTCs for %dx%d config\n",
1435 width, height);
1430 1436
1431 drm_pick_crtcs(fb_helper, crtcs, modes, 0, width, height); 1437 drm_pick_crtcs(fb_helper, crtcs, modes, 0, width, height);
1438 }
1432 1439
1433 /* need to set the modesets up here for use later */ 1440 /* need to set the modesets up here for use later */
1434 /* fill out the connector<->crtc mappings into the modesets */ 1441 /* fill out the connector<->crtc mappings into the modesets */
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index af779ae19ebf..cf919e36e8ae 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -205,11 +205,11 @@ static void
205drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp) 205drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
206{ 206{
207 if (obj->import_attach) { 207 if (obj->import_attach) {
208 drm_prime_remove_imported_buf_handle(&filp->prime, 208 drm_prime_remove_buf_handle(&filp->prime,
209 obj->import_attach->dmabuf); 209 obj->import_attach->dmabuf);
210 } 210 }
211 if (obj->export_dma_buf) { 211 if (obj->export_dma_buf) {
212 drm_prime_remove_imported_buf_handle(&filp->prime, 212 drm_prime_remove_buf_handle(&filp->prime,
213 obj->export_dma_buf); 213 obj->export_dma_buf);
214 } 214 }
215} 215}
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index f83f0719922e..faa79df02648 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -848,6 +848,26 @@ bool drm_mode_equal(const struct drm_display_mode *mode1, const struct drm_displ
848 } else if (mode1->clock != mode2->clock) 848 } else if (mode1->clock != mode2->clock)
849 return false; 849 return false;
850 850
851 return drm_mode_equal_no_clocks(mode1, mode2);
852}
853EXPORT_SYMBOL(drm_mode_equal);
854
855/**
856 * drm_mode_equal_no_clocks - test modes for equality
857 * @mode1: first mode
858 * @mode2: second mode
859 *
860 * LOCKING:
861 * None.
862 *
863 * Check to see if @mode1 and @mode2 are equivalent, but
864 * don't check the pixel clocks.
865 *
866 * RETURNS:
867 * True if the modes are equal, false otherwise.
868 */
869bool drm_mode_equal_no_clocks(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2)
870{
851 if (mode1->hdisplay == mode2->hdisplay && 871 if (mode1->hdisplay == mode2->hdisplay &&
852 mode1->hsync_start == mode2->hsync_start && 872 mode1->hsync_start == mode2->hsync_start &&
853 mode1->hsync_end == mode2->hsync_end && 873 mode1->hsync_end == mode2->hsync_end &&
@@ -863,7 +883,7 @@ bool drm_mode_equal(const struct drm_display_mode *mode1, const struct drm_displ
863 883
864 return false; 884 return false;
865} 885}
866EXPORT_SYMBOL(drm_mode_equal); 886EXPORT_SYMBOL(drm_mode_equal_no_clocks);
867 887
868/** 888/**
869 * drm_mode_validate_size - make sure modes adhere to size constraints 889 * drm_mode_validate_size - make sure modes adhere to size constraints
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index bd719e936e13..14194b6ef644 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -152,7 +152,7 @@ static const char *drm_pci_get_name(struct drm_device *dev)
152 return pdriver->name; 152 return pdriver->name;
153} 153}
154 154
155int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master) 155static int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master)
156{ 156{
157 int len, ret; 157 int len, ret;
158 struct pci_driver *pdriver = dev->driver->kdriver.pci; 158 struct pci_driver *pdriver = dev->driver->kdriver.pci;
@@ -194,9 +194,9 @@ err:
194 return ret; 194 return ret;
195} 195}
196 196
197int drm_pci_set_unique(struct drm_device *dev, 197static int drm_pci_set_unique(struct drm_device *dev,
198 struct drm_master *master, 198 struct drm_master *master,
199 struct drm_unique *u) 199 struct drm_unique *u)
200{ 200{
201 int domain, bus, slot, func, ret; 201 int domain, bus, slot, func, ret;
202 const char *bus_name; 202 const char *bus_name;
@@ -266,7 +266,7 @@ static int drm_pci_irq_by_busid(struct drm_device *dev, struct drm_irq_busid *p)
266 return 0; 266 return 0;
267} 267}
268 268
269int drm_pci_agp_init(struct drm_device *dev) 269static int drm_pci_agp_init(struct drm_device *dev)
270{ 270{
271 if (drm_core_has_AGP(dev)) { 271 if (drm_core_has_AGP(dev)) {
272 if (drm_pci_device_is_agp(dev)) 272 if (drm_pci_device_is_agp(dev))
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 366910ddcfcb..dcde35231e25 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -62,6 +62,7 @@ struct drm_prime_member {
62 struct dma_buf *dma_buf; 62 struct dma_buf *dma_buf;
63 uint32_t handle; 63 uint32_t handle;
64}; 64};
65static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle);
65 66
66static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach, 67static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
67 enum dma_data_direction dir) 68 enum dma_data_direction dir)
@@ -200,7 +201,8 @@ int drm_gem_prime_handle_to_fd(struct drm_device *dev,
200{ 201{
201 struct drm_gem_object *obj; 202 struct drm_gem_object *obj;
202 void *buf; 203 void *buf;
203 int ret; 204 int ret = 0;
205 struct dma_buf *dmabuf;
204 206
205 obj = drm_gem_object_lookup(dev, file_priv, handle); 207 obj = drm_gem_object_lookup(dev, file_priv, handle);
206 if (!obj) 208 if (!obj)
@@ -209,43 +211,44 @@ int drm_gem_prime_handle_to_fd(struct drm_device *dev,
209 mutex_lock(&file_priv->prime.lock); 211 mutex_lock(&file_priv->prime.lock);
210 /* re-export the original imported object */ 212 /* re-export the original imported object */
211 if (obj->import_attach) { 213 if (obj->import_attach) {
212 get_dma_buf(obj->import_attach->dmabuf); 214 dmabuf = obj->import_attach->dmabuf;
213 *prime_fd = dma_buf_fd(obj->import_attach->dmabuf, flags); 215 goto out_have_obj;
214 drm_gem_object_unreference_unlocked(obj);
215 mutex_unlock(&file_priv->prime.lock);
216 return 0;
217 } 216 }
218 217
219 if (obj->export_dma_buf) { 218 if (obj->export_dma_buf) {
220 get_dma_buf(obj->export_dma_buf); 219 dmabuf = obj->export_dma_buf;
221 *prime_fd = dma_buf_fd(obj->export_dma_buf, flags); 220 goto out_have_obj;
222 drm_gem_object_unreference_unlocked(obj); 221 }
223 } else { 222
224 buf = dev->driver->gem_prime_export(dev, obj, flags); 223 buf = dev->driver->gem_prime_export(dev, obj, flags);
225 if (IS_ERR(buf)) { 224 if (IS_ERR(buf)) {
226 /* normally the created dma-buf takes ownership of the ref, 225 /* normally the created dma-buf takes ownership of the ref,
227 * but if that fails then drop the ref 226 * but if that fails then drop the ref
228 */ 227 */
229 drm_gem_object_unreference_unlocked(obj); 228 ret = PTR_ERR(buf);
230 mutex_unlock(&file_priv->prime.lock); 229 goto out;
231 return PTR_ERR(buf);
232 }
233 obj->export_dma_buf = buf;
234 *prime_fd = dma_buf_fd(buf, flags);
235 } 230 }
231 obj->export_dma_buf = buf;
232
236 /* if we've exported this buffer the cheat and add it to the import list 233 /* if we've exported this buffer the cheat and add it to the import list
237 * so we get the correct handle back 234 * so we get the correct handle back
238 */ 235 */
239 ret = drm_prime_add_imported_buf_handle(&file_priv->prime, 236 ret = drm_prime_add_buf_handle(&file_priv->prime,
240 obj->export_dma_buf, handle); 237 obj->export_dma_buf, handle);
241 if (ret) { 238 if (ret)
242 drm_gem_object_unreference_unlocked(obj); 239 goto out;
243 mutex_unlock(&file_priv->prime.lock);
244 return ret;
245 }
246 240
241 *prime_fd = dma_buf_fd(buf, flags);
247 mutex_unlock(&file_priv->prime.lock); 242 mutex_unlock(&file_priv->prime.lock);
248 return 0; 243 return 0;
244
245out_have_obj:
246 get_dma_buf(dmabuf);
247 *prime_fd = dma_buf_fd(dmabuf, flags);
248out:
249 drm_gem_object_unreference_unlocked(obj);
250 mutex_unlock(&file_priv->prime.lock);
251 return ret;
249} 252}
250EXPORT_SYMBOL(drm_gem_prime_handle_to_fd); 253EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
251 254
@@ -268,7 +271,6 @@ struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
268 * refcount on gem itself instead of f_count of dmabuf. 271 * refcount on gem itself instead of f_count of dmabuf.
269 */ 272 */
270 drm_gem_object_reference(obj); 273 drm_gem_object_reference(obj);
271 dma_buf_put(dma_buf);
272 return obj; 274 return obj;
273 } 275 }
274 } 276 }
@@ -277,6 +279,8 @@ struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
277 if (IS_ERR(attach)) 279 if (IS_ERR(attach))
278 return ERR_PTR(PTR_ERR(attach)); 280 return ERR_PTR(PTR_ERR(attach));
279 281
282 get_dma_buf(dma_buf);
283
280 sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); 284 sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
281 if (IS_ERR_OR_NULL(sgt)) { 285 if (IS_ERR_OR_NULL(sgt)) {
282 ret = PTR_ERR(sgt); 286 ret = PTR_ERR(sgt);
@@ -297,6 +301,8 @@ fail_unmap:
297 dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL); 301 dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
298fail_detach: 302fail_detach:
299 dma_buf_detach(dma_buf, attach); 303 dma_buf_detach(dma_buf, attach);
304 dma_buf_put(dma_buf);
305
300 return ERR_PTR(ret); 306 return ERR_PTR(ret);
301} 307}
302EXPORT_SYMBOL(drm_gem_prime_import); 308EXPORT_SYMBOL(drm_gem_prime_import);
@@ -314,7 +320,7 @@ int drm_gem_prime_fd_to_handle(struct drm_device *dev,
314 320
315 mutex_lock(&file_priv->prime.lock); 321 mutex_lock(&file_priv->prime.lock);
316 322
317 ret = drm_prime_lookup_imported_buf_handle(&file_priv->prime, 323 ret = drm_prime_lookup_buf_handle(&file_priv->prime,
318 dma_buf, handle); 324 dma_buf, handle);
319 if (!ret) { 325 if (!ret) {
320 ret = 0; 326 ret = 0;
@@ -333,12 +339,15 @@ int drm_gem_prime_fd_to_handle(struct drm_device *dev,
333 if (ret) 339 if (ret)
334 goto out_put; 340 goto out_put;
335 341
336 ret = drm_prime_add_imported_buf_handle(&file_priv->prime, 342 ret = drm_prime_add_buf_handle(&file_priv->prime,
337 dma_buf, *handle); 343 dma_buf, *handle);
338 if (ret) 344 if (ret)
339 goto fail; 345 goto fail;
340 346
341 mutex_unlock(&file_priv->prime.lock); 347 mutex_unlock(&file_priv->prime.lock);
348
349 dma_buf_put(dma_buf);
350
342 return 0; 351 return 0;
343 352
344fail: 353fail:
@@ -401,21 +410,17 @@ int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
401struct sg_table *drm_prime_pages_to_sg(struct page **pages, int nr_pages) 410struct sg_table *drm_prime_pages_to_sg(struct page **pages, int nr_pages)
402{ 411{
403 struct sg_table *sg = NULL; 412 struct sg_table *sg = NULL;
404 struct scatterlist *iter;
405 int i;
406 int ret; 413 int ret;
407 414
408 sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL); 415 sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
409 if (!sg) 416 if (!sg)
410 goto out; 417 goto out;
411 418
412 ret = sg_alloc_table(sg, nr_pages, GFP_KERNEL); 419 ret = sg_alloc_table_from_pages(sg, pages, nr_pages, 0,
420 nr_pages << PAGE_SHIFT, GFP_KERNEL);
413 if (ret) 421 if (ret)
414 goto out; 422 goto out;
415 423
416 for_each_sg(sg->sgl, iter, nr_pages, i)
417 sg_set_page(iter, pages[i], PAGE_SIZE, 0);
418
419 return sg; 424 return sg;
420out: 425out:
421 kfree(sg); 426 kfree(sg);
@@ -483,15 +488,12 @@ EXPORT_SYMBOL(drm_prime_init_file_private);
483 488
484void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv) 489void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv)
485{ 490{
486 struct drm_prime_member *member, *safe; 491 /* by now drm_gem_release should've made sure the list is empty */
487 list_for_each_entry_safe(member, safe, &prime_fpriv->head, entry) { 492 WARN_ON(!list_empty(&prime_fpriv->head));
488 list_del(&member->entry);
489 kfree(member);
490 }
491} 493}
492EXPORT_SYMBOL(drm_prime_destroy_file_private); 494EXPORT_SYMBOL(drm_prime_destroy_file_private);
493 495
494int drm_prime_add_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle) 496static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle)
495{ 497{
496 struct drm_prime_member *member; 498 struct drm_prime_member *member;
497 499
@@ -499,14 +501,14 @@ int drm_prime_add_imported_buf_handle(struct drm_prime_file_private *prime_fpriv
499 if (!member) 501 if (!member)
500 return -ENOMEM; 502 return -ENOMEM;
501 503
504 get_dma_buf(dma_buf);
502 member->dma_buf = dma_buf; 505 member->dma_buf = dma_buf;
503 member->handle = handle; 506 member->handle = handle;
504 list_add(&member->entry, &prime_fpriv->head); 507 list_add(&member->entry, &prime_fpriv->head);
505 return 0; 508 return 0;
506} 509}
507EXPORT_SYMBOL(drm_prime_add_imported_buf_handle);
508 510
509int drm_prime_lookup_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t *handle) 511int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t *handle)
510{ 512{
511 struct drm_prime_member *member; 513 struct drm_prime_member *member;
512 514
@@ -518,19 +520,20 @@ int drm_prime_lookup_imported_buf_handle(struct drm_prime_file_private *prime_fp
518 } 520 }
519 return -ENOENT; 521 return -ENOENT;
520} 522}
521EXPORT_SYMBOL(drm_prime_lookup_imported_buf_handle); 523EXPORT_SYMBOL(drm_prime_lookup_buf_handle);
522 524
523void drm_prime_remove_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf) 525void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf)
524{ 526{
525 struct drm_prime_member *member, *safe; 527 struct drm_prime_member *member, *safe;
526 528
527 mutex_lock(&prime_fpriv->lock); 529 mutex_lock(&prime_fpriv->lock);
528 list_for_each_entry_safe(member, safe, &prime_fpriv->head, entry) { 530 list_for_each_entry_safe(member, safe, &prime_fpriv->head, entry) {
529 if (member->dma_buf == dma_buf) { 531 if (member->dma_buf == dma_buf) {
532 dma_buf_put(dma_buf);
530 list_del(&member->entry); 533 list_del(&member->entry);
531 kfree(member); 534 kfree(member);
532 } 535 }
533 } 536 }
534 mutex_unlock(&prime_fpriv->lock); 537 mutex_unlock(&prime_fpriv->lock);
535} 538}
536EXPORT_SYMBOL(drm_prime_remove_imported_buf_handle); 539EXPORT_SYMBOL(drm_prime_remove_buf_handle);
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index db7bd292410b..1d4f7c9fe661 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -422,6 +422,7 @@ void drm_vm_open_locked(struct drm_device *dev,
422 list_add(&vma_entry->head, &dev->vmalist); 422 list_add(&vma_entry->head, &dev->vmalist);
423 } 423 }
424} 424}
425EXPORT_SYMBOL_GPL(drm_vm_open_locked);
425 426
426static void drm_vm_open(struct vm_area_struct *vma) 427static void drm_vm_open(struct vm_area_struct *vma)
427{ 428{
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 046bcda36abe..772c62a6e2ac 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -24,7 +24,9 @@ config DRM_EXYNOS_DMABUF
24 24
25config DRM_EXYNOS_FIMD 25config DRM_EXYNOS_FIMD
26 bool "Exynos DRM FIMD" 26 bool "Exynos DRM FIMD"
27 depends on DRM_EXYNOS && !FB_S3C && !ARCH_MULTIPLATFORM 27 depends on OF && DRM_EXYNOS && !FB_S3C && !ARCH_MULTIPLATFORM
28 select FB_MODE_HELPERS
29 select VIDEOMODE_HELPERS
28 help 30 help
29 Choose this option if you want to use Exynos FIMD for DRM. 31 Choose this option if you want to use Exynos FIMD for DRM.
30 32
@@ -54,7 +56,7 @@ config DRM_EXYNOS_IPP
54 56
55config DRM_EXYNOS_FIMC 57config DRM_EXYNOS_FIMC
56 bool "Exynos DRM FIMC" 58 bool "Exynos DRM FIMC"
57 depends on DRM_EXYNOS_IPP 59 depends on DRM_EXYNOS_IPP && MFD_SYSCON && OF
58 help 60 help
59 Choose this option if you want to use Exynos FIMC for DRM. 61 Choose this option if you want to use Exynos FIMC for DRM.
60 62
diff --git a/drivers/gpu/drm/exynos/exynos_drm_connector.c b/drivers/gpu/drm/exynos/exynos_drm_connector.c
index 4c5b6859c9ea..8bcc13ac9f73 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_connector.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_connector.c
@@ -124,7 +124,7 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector)
124 } 124 }
125 125
126 count = drm_add_edid_modes(connector, edid); 126 count = drm_add_edid_modes(connector, edid);
127 if (count < 0) { 127 if (!count) {
128 DRM_ERROR("Add edid modes failed %d\n", count); 128 DRM_ERROR("Add edid modes failed %d\n", count);
129 goto out; 129 goto out;
130 } 130 }
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
index ba0a3aa78547..ff7f2a886a34 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
@@ -235,7 +235,6 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
235 * refcount on gem itself instead of f_count of dmabuf. 235 * refcount on gem itself instead of f_count of dmabuf.
236 */ 236 */
237 drm_gem_object_reference(obj); 237 drm_gem_object_reference(obj);
238 dma_buf_put(dma_buf);
239 return obj; 238 return obj;
240 } 239 }
241 } 240 }
@@ -244,6 +243,7 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
244 if (IS_ERR(attach)) 243 if (IS_ERR(attach))
245 return ERR_PTR(-EINVAL); 244 return ERR_PTR(-EINVAL);
246 245
246 get_dma_buf(dma_buf);
247 247
248 sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); 248 sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
249 if (IS_ERR_OR_NULL(sgt)) { 249 if (IS_ERR_OR_NULL(sgt)) {
@@ -298,6 +298,8 @@ err_unmap_attach:
298 dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL); 298 dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
299err_buf_detach: 299err_buf_detach:
300 dma_buf_detach(dma_buf, attach); 300 dma_buf_detach(dma_buf, attach);
301 dma_buf_put(dma_buf);
302
301 return ERR_PTR(ret); 303 return ERR_PTR(ret);
302} 304}
303 305
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 3da5c2d214d8..ba6d995e4375 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -380,6 +380,10 @@ static int __init exynos_drm_init(void)
380 ret = platform_driver_register(&ipp_driver); 380 ret = platform_driver_register(&ipp_driver);
381 if (ret < 0) 381 if (ret < 0)
382 goto out_ipp; 382 goto out_ipp;
383
384 ret = exynos_platform_device_ipp_register();
385 if (ret < 0)
386 goto out_ipp_dev;
383#endif 387#endif
384 388
385 ret = platform_driver_register(&exynos_drm_platform_driver); 389 ret = platform_driver_register(&exynos_drm_platform_driver);
@@ -388,7 +392,7 @@ static int __init exynos_drm_init(void)
388 392
389 exynos_drm_pdev = platform_device_register_simple("exynos-drm", -1, 393 exynos_drm_pdev = platform_device_register_simple("exynos-drm", -1,
390 NULL, 0); 394 NULL, 0);
391 if (IS_ERR_OR_NULL(exynos_drm_pdev)) { 395 if (IS_ERR(exynos_drm_pdev)) {
392 ret = PTR_ERR(exynos_drm_pdev); 396 ret = PTR_ERR(exynos_drm_pdev);
393 goto out; 397 goto out;
394 } 398 }
@@ -400,6 +404,8 @@ out:
400 404
401out_drm: 405out_drm:
402#ifdef CONFIG_DRM_EXYNOS_IPP 406#ifdef CONFIG_DRM_EXYNOS_IPP
407 exynos_platform_device_ipp_unregister();
408out_ipp_dev:
403 platform_driver_unregister(&ipp_driver); 409 platform_driver_unregister(&ipp_driver);
404out_ipp: 410out_ipp:
405#endif 411#endif
@@ -456,6 +462,7 @@ static void __exit exynos_drm_exit(void)
456 platform_driver_unregister(&exynos_drm_platform_driver); 462 platform_driver_unregister(&exynos_drm_platform_driver);
457 463
458#ifdef CONFIG_DRM_EXYNOS_IPP 464#ifdef CONFIG_DRM_EXYNOS_IPP
465 exynos_platform_device_ipp_unregister();
459 platform_driver_unregister(&ipp_driver); 466 platform_driver_unregister(&ipp_driver);
460#endif 467#endif
461 468
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index 4606fac7241a..680a7c1b9dea 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -322,13 +322,23 @@ void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file);
322 * this function registers exynos drm hdmi platform device. It ensures only one 322 * this function registers exynos drm hdmi platform device. It ensures only one
323 * instance of the device is created. 323 * instance of the device is created.
324 */ 324 */
325extern int exynos_platform_device_hdmi_register(void); 325int exynos_platform_device_hdmi_register(void);
326 326
327/* 327/*
328 * this function unregisters exynos drm hdmi platform device if it exists. 328 * this function unregisters exynos drm hdmi platform device if it exists.
329 */ 329 */
330void exynos_platform_device_hdmi_unregister(void); 330void exynos_platform_device_hdmi_unregister(void);
331 331
332/*
333 * this function registers exynos drm ipp platform device.
334 */
335int exynos_platform_device_ipp_register(void);
336
337/*
338 * this function unregisters exynos drm ipp platform device if it exists.
339 */
340void exynos_platform_device_ipp_unregister(void);
341
332extern struct platform_driver fimd_driver; 342extern struct platform_driver fimd_driver;
333extern struct platform_driver hdmi_driver; 343extern struct platform_driver hdmi_driver;
334extern struct platform_driver mixer_driver; 344extern struct platform_driver mixer_driver;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index 411f69b76e84..773f583fa964 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -12,11 +12,12 @@
12 * 12 *
13 */ 13 */
14#include <linux/kernel.h> 14#include <linux/kernel.h>
15#include <linux/mfd/syscon.h>
15#include <linux/module.h> 16#include <linux/module.h>
16#include <linux/platform_device.h> 17#include <linux/platform_device.h>
18#include <linux/regmap.h>
17#include <linux/clk.h> 19#include <linux/clk.h>
18#include <linux/pm_runtime.h> 20#include <linux/pm_runtime.h>
19#include <plat/map-base.h>
20 21
21#include <drm/drmP.h> 22#include <drm/drmP.h>
22#include <drm/exynos_drm.h> 23#include <drm/exynos_drm.h>
@@ -76,6 +77,27 @@ enum fimc_wb {
76 FIMC_WB_B, 77 FIMC_WB_B,
77}; 78};
78 79
80enum {
81 FIMC_CLK_LCLK,
82 FIMC_CLK_GATE,
83 FIMC_CLK_WB_A,
84 FIMC_CLK_WB_B,
85 FIMC_CLK_MUX,
86 FIMC_CLK_PARENT,
87 FIMC_CLKS_MAX
88};
89
90static const char * const fimc_clock_names[] = {
91 [FIMC_CLK_LCLK] = "sclk_fimc",
92 [FIMC_CLK_GATE] = "fimc",
93 [FIMC_CLK_WB_A] = "pxl_async0",
94 [FIMC_CLK_WB_B] = "pxl_async1",
95 [FIMC_CLK_MUX] = "mux",
96 [FIMC_CLK_PARENT] = "parent",
97};
98
99#define FIMC_DEFAULT_LCLK_FREQUENCY 133000000UL
100
79/* 101/*
80 * A structure of scaler. 102 * A structure of scaler.
81 * 103 *
@@ -119,28 +141,16 @@ struct fimc_capability {
119}; 141};
120 142
121/* 143/*
122 * A structure of fimc driver data.
123 *
124 * @parent_clk: name of parent clock.
125 */
126struct fimc_driverdata {
127 char *parent_clk;
128};
129
130/*
131 * A structure of fimc context. 144 * A structure of fimc context.
132 * 145 *
133 * @ippdrv: prepare initialization using ippdrv. 146 * @ippdrv: prepare initialization using ippdrv.
134 * @regs_res: register resources. 147 * @regs_res: register resources.
135 * @regs: memory mapped io registers. 148 * @regs: memory mapped io registers.
136 * @lock: locking of operations. 149 * @lock: locking of operations.
137 * @sclk_fimc_clk: fimc source clock. 150 * @clocks: fimc clocks.
138 * @fimc_clk: fimc clock. 151 * @clk_frequency: LCLK clock frequency.
139 * @wb_clk: writeback a clock. 152 * @sysreg: handle to SYSREG block regmap.
140 * @wb_b_clk: writeback b clock.
141 * @sc: scaler infomations. 153 * @sc: scaler infomations.
142 * @odr: ordering of YUV.
143 * @ver: fimc version.
144 * @pol: porarity of writeback. 154 * @pol: porarity of writeback.
145 * @id: fimc id. 155 * @id: fimc id.
146 * @irq: irq number. 156 * @irq: irq number.
@@ -151,12 +161,10 @@ struct fimc_context {
151 struct resource *regs_res; 161 struct resource *regs_res;
152 void __iomem *regs; 162 void __iomem *regs;
153 struct mutex lock; 163 struct mutex lock;
154 struct clk *sclk_fimc_clk; 164 struct clk *clocks[FIMC_CLKS_MAX];
155 struct clk *fimc_clk; 165 u32 clk_frequency;
156 struct clk *wb_clk; 166 struct regmap *sysreg;
157 struct clk *wb_b_clk;
158 struct fimc_scaler sc; 167 struct fimc_scaler sc;
159 struct fimc_driverdata *ddata;
160 struct exynos_drm_ipp_pol pol; 168 struct exynos_drm_ipp_pol pol;
161 int id; 169 int id;
162 int irq; 170 int irq;
@@ -200,17 +208,13 @@ static void fimc_sw_reset(struct fimc_context *ctx)
200 fimc_write(0x0, EXYNOS_CIFCNTSEQ); 208 fimc_write(0x0, EXYNOS_CIFCNTSEQ);
201} 209}
202 210
203static void fimc_set_camblk_fimd0_wb(struct fimc_context *ctx) 211static int fimc_set_camblk_fimd0_wb(struct fimc_context *ctx)
204{ 212{
205 u32 camblk_cfg;
206
207 DRM_DEBUG_KMS("%s\n", __func__); 213 DRM_DEBUG_KMS("%s\n", __func__);
208 214
209 camblk_cfg = readl(SYSREG_CAMERA_BLK); 215 return regmap_update_bits(ctx->sysreg, SYSREG_CAMERA_BLK,
210 camblk_cfg &= ~(SYSREG_FIMD0WB_DEST_MASK); 216 SYSREG_FIMD0WB_DEST_MASK,
211 camblk_cfg |= ctx->id << (SYSREG_FIMD0WB_DEST_SHIFT); 217 ctx->id << SYSREG_FIMD0WB_DEST_SHIFT);
212
213 writel(camblk_cfg, SYSREG_CAMERA_BLK);
214} 218}
215 219
216static void fimc_set_type_ctrl(struct fimc_context *ctx, enum fimc_wb wb) 220static void fimc_set_type_ctrl(struct fimc_context *ctx, enum fimc_wb wb)
@@ -1301,14 +1305,12 @@ static int fimc_clk_ctrl(struct fimc_context *ctx, bool enable)
1301 DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable); 1305 DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable);
1302 1306
1303 if (enable) { 1307 if (enable) {
1304 clk_enable(ctx->sclk_fimc_clk); 1308 clk_prepare_enable(ctx->clocks[FIMC_CLK_GATE]);
1305 clk_enable(ctx->fimc_clk); 1309 clk_prepare_enable(ctx->clocks[FIMC_CLK_WB_A]);
1306 clk_enable(ctx->wb_clk);
1307 ctx->suspended = false; 1310 ctx->suspended = false;
1308 } else { 1311 } else {
1309 clk_disable(ctx->sclk_fimc_clk); 1312 clk_disable_unprepare(ctx->clocks[FIMC_CLK_GATE]);
1310 clk_disable(ctx->fimc_clk); 1313 clk_disable_unprepare(ctx->clocks[FIMC_CLK_WB_A]);
1311 clk_disable(ctx->wb_clk);
1312 ctx->suspended = true; 1314 ctx->suspended = true;
1313 } 1315 }
1314 1316
@@ -1613,7 +1615,11 @@ static int fimc_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd)
1613 fimc_handle_lastend(ctx, true); 1615 fimc_handle_lastend(ctx, true);
1614 1616
1615 /* setup FIMD */ 1617 /* setup FIMD */
1616 fimc_set_camblk_fimd0_wb(ctx); 1618 ret = fimc_set_camblk_fimd0_wb(ctx);
1619 if (ret < 0) {
1620 dev_err(dev, "camblk setup failed.\n");
1621 return ret;
1622 }
1617 1623
1618 set_wb.enable = 1; 1624 set_wb.enable = 1;
1619 set_wb.refresh = property->refresh_rate; 1625 set_wb.refresh = property->refresh_rate;
@@ -1713,76 +1719,118 @@ static void fimc_ippdrv_stop(struct device *dev, enum drm_exynos_ipp_cmd cmd)
1713 fimc_write(cfg, EXYNOS_CIGCTRL); 1719 fimc_write(cfg, EXYNOS_CIGCTRL);
1714} 1720}
1715 1721
1722static void fimc_put_clocks(struct fimc_context *ctx)
1723{
1724 int i;
1725
1726 for (i = 0; i < FIMC_CLKS_MAX; i++) {
1727 if (IS_ERR(ctx->clocks[i]))
1728 continue;
1729 clk_put(ctx->clocks[i]);
1730 ctx->clocks[i] = ERR_PTR(-EINVAL);
1731 }
1732}
1733
1734static int fimc_setup_clocks(struct fimc_context *ctx)
1735{
1736 struct device *fimc_dev = ctx->ippdrv.dev;
1737 struct device *dev;
1738 int ret, i;
1739
1740 for (i = 0; i < FIMC_CLKS_MAX; i++)
1741 ctx->clocks[i] = ERR_PTR(-EINVAL);
1742
1743 for (i = 0; i < FIMC_CLKS_MAX; i++) {
1744 if (i == FIMC_CLK_WB_A || i == FIMC_CLK_WB_B)
1745 dev = fimc_dev->parent;
1746 else
1747 dev = fimc_dev;
1748
1749 ctx->clocks[i] = clk_get(dev, fimc_clock_names[i]);
1750 if (IS_ERR(ctx->clocks[i])) {
1751 if (i >= FIMC_CLK_MUX)
1752 break;
1753 ret = PTR_ERR(ctx->clocks[i]);
1754 dev_err(fimc_dev, "failed to get clock: %s\n",
1755 fimc_clock_names[i]);
1756 goto e_clk_free;
1757 }
1758 }
1759
1760 /* Optional FIMC LCLK parent clock setting */
1761 if (!IS_ERR(ctx->clocks[FIMC_CLK_PARENT])) {
1762 ret = clk_set_parent(ctx->clocks[FIMC_CLK_MUX],
1763 ctx->clocks[FIMC_CLK_PARENT]);
1764 if (ret < 0) {
1765 dev_err(fimc_dev, "failed to set parent.\n");
1766 goto e_clk_free;
1767 }
1768 }
1769
1770 ret = clk_set_rate(ctx->clocks[FIMC_CLK_LCLK], ctx->clk_frequency);
1771 if (ret < 0)
1772 goto e_clk_free;
1773
1774 ret = clk_prepare_enable(ctx->clocks[FIMC_CLK_LCLK]);
1775 if (!ret)
1776 return ret;
1777e_clk_free:
1778 fimc_put_clocks(ctx);
1779 return ret;
1780}
1781
1782static int fimc_parse_dt(struct fimc_context *ctx)
1783{
1784 struct device_node *node = ctx->ippdrv.dev->of_node;
1785
1786 /* Handle only devices that support the LCD Writeback data path */
1787 if (!of_property_read_bool(node, "samsung,lcd-wb"))
1788 return -ENODEV;
1789
1790 if (of_property_read_u32(node, "clock-frequency",
1791 &ctx->clk_frequency))
1792 ctx->clk_frequency = FIMC_DEFAULT_LCLK_FREQUENCY;
1793
1794 ctx->id = of_alias_get_id(node, "fimc");
1795
1796 if (ctx->id < 0) {
1797 dev_err(ctx->ippdrv.dev, "failed to get node alias id.\n");
1798 return -EINVAL;
1799 }
1800
1801 return 0;
1802}
1803
1716static int fimc_probe(struct platform_device *pdev) 1804static int fimc_probe(struct platform_device *pdev)
1717{ 1805{
1718 struct device *dev = &pdev->dev; 1806 struct device *dev = &pdev->dev;
1719 struct fimc_context *ctx; 1807 struct fimc_context *ctx;
1720 struct clk *parent_clk;
1721 struct resource *res; 1808 struct resource *res;
1722 struct exynos_drm_ippdrv *ippdrv; 1809 struct exynos_drm_ippdrv *ippdrv;
1723 struct exynos_drm_fimc_pdata *pdata;
1724 struct fimc_driverdata *ddata;
1725 int ret; 1810 int ret;
1726 1811
1727 pdata = pdev->dev.platform_data; 1812 if (!dev->of_node) {
1728 if (!pdata) { 1813 dev_err(dev, "device tree node not found.\n");
1729 dev_err(dev, "no platform data specified.\n"); 1814 return -ENODEV;
1730 return -EINVAL;
1731 } 1815 }
1732 1816
1733 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); 1817 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
1734 if (!ctx) 1818 if (!ctx)
1735 return -ENOMEM; 1819 return -ENOMEM;
1736 1820
1737 ddata = (struct fimc_driverdata *) 1821 ctx->ippdrv.dev = dev;
1738 platform_get_device_id(pdev)->driver_data;
1739
1740 /* clock control */
1741 ctx->sclk_fimc_clk = devm_clk_get(dev, "sclk_fimc");
1742 if (IS_ERR(ctx->sclk_fimc_clk)) {
1743 dev_err(dev, "failed to get src fimc clock.\n");
1744 return PTR_ERR(ctx->sclk_fimc_clk);
1745 }
1746 clk_enable(ctx->sclk_fimc_clk);
1747
1748 ctx->fimc_clk = devm_clk_get(dev, "fimc");
1749 if (IS_ERR(ctx->fimc_clk)) {
1750 dev_err(dev, "failed to get fimc clock.\n");
1751 clk_disable(ctx->sclk_fimc_clk);
1752 return PTR_ERR(ctx->fimc_clk);
1753 }
1754
1755 ctx->wb_clk = devm_clk_get(dev, "pxl_async0");
1756 if (IS_ERR(ctx->wb_clk)) {
1757 dev_err(dev, "failed to get writeback a clock.\n");
1758 clk_disable(ctx->sclk_fimc_clk);
1759 return PTR_ERR(ctx->wb_clk);
1760 }
1761
1762 ctx->wb_b_clk = devm_clk_get(dev, "pxl_async1");
1763 if (IS_ERR(ctx->wb_b_clk)) {
1764 dev_err(dev, "failed to get writeback b clock.\n");
1765 clk_disable(ctx->sclk_fimc_clk);
1766 return PTR_ERR(ctx->wb_b_clk);
1767 }
1768 1822
1769 parent_clk = devm_clk_get(dev, ddata->parent_clk); 1823 ret = fimc_parse_dt(ctx);
1770 1824 if (ret < 0)
1771 if (IS_ERR(parent_clk)) { 1825 return ret;
1772 dev_err(dev, "failed to get parent clock.\n");
1773 clk_disable(ctx->sclk_fimc_clk);
1774 return PTR_ERR(parent_clk);
1775 }
1776 1826
1777 if (clk_set_parent(ctx->sclk_fimc_clk, parent_clk)) { 1827 ctx->sysreg = syscon_regmap_lookup_by_phandle(dev->of_node,
1778 dev_err(dev, "failed to set parent.\n"); 1828 "samsung,sysreg");
1779 clk_disable(ctx->sclk_fimc_clk); 1829 if (IS_ERR(ctx->sysreg)) {
1780 return -EINVAL; 1830 dev_err(dev, "syscon regmap lookup failed.\n");
1831 return PTR_ERR(ctx->sysreg);
1781 } 1832 }
1782 1833
1783 devm_clk_put(dev, parent_clk);
1784 clk_set_rate(ctx->sclk_fimc_clk, pdata->clk_rate);
1785
1786 /* resource memory */ 1834 /* resource memory */
1787 ctx->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1835 ctx->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1788 ctx->regs = devm_ioremap_resource(dev, ctx->regs_res); 1836 ctx->regs = devm_ioremap_resource(dev, ctx->regs_res);
@@ -1804,13 +1852,11 @@ static int fimc_probe(struct platform_device *pdev)
1804 return ret; 1852 return ret;
1805 } 1853 }
1806 1854
1807 /* context initailization */ 1855 ret = fimc_setup_clocks(ctx);
1808 ctx->id = pdev->id; 1856 if (ret < 0)
1809 ctx->pol = pdata->pol; 1857 goto err_free_irq;
1810 ctx->ddata = ddata;
1811 1858
1812 ippdrv = &ctx->ippdrv; 1859 ippdrv = &ctx->ippdrv;
1813 ippdrv->dev = dev;
1814 ippdrv->ops[EXYNOS_DRM_OPS_SRC] = &fimc_src_ops; 1860 ippdrv->ops[EXYNOS_DRM_OPS_SRC] = &fimc_src_ops;
1815 ippdrv->ops[EXYNOS_DRM_OPS_DST] = &fimc_dst_ops; 1861 ippdrv->ops[EXYNOS_DRM_OPS_DST] = &fimc_dst_ops;
1816 ippdrv->check_property = fimc_ippdrv_check_property; 1862 ippdrv->check_property = fimc_ippdrv_check_property;
@@ -1820,7 +1866,7 @@ static int fimc_probe(struct platform_device *pdev)
1820 ret = fimc_init_prop_list(ippdrv); 1866 ret = fimc_init_prop_list(ippdrv);
1821 if (ret < 0) { 1867 if (ret < 0) {
1822 dev_err(dev, "failed to init property list.\n"); 1868 dev_err(dev, "failed to init property list.\n");
1823 goto err_get_irq; 1869 goto err_put_clk;
1824 } 1870 }
1825 1871
1826 DRM_DEBUG_KMS("%s:id[%d]ippdrv[0x%x]\n", __func__, ctx->id, 1872 DRM_DEBUG_KMS("%s:id[%d]ippdrv[0x%x]\n", __func__, ctx->id,
@@ -1835,17 +1881,18 @@ static int fimc_probe(struct platform_device *pdev)
1835 ret = exynos_drm_ippdrv_register(ippdrv); 1881 ret = exynos_drm_ippdrv_register(ippdrv);
1836 if (ret < 0) { 1882 if (ret < 0) {
1837 dev_err(dev, "failed to register drm fimc device.\n"); 1883 dev_err(dev, "failed to register drm fimc device.\n");
1838 goto err_ippdrv_register; 1884 goto err_pm_dis;
1839 } 1885 }
1840 1886
1841 dev_info(&pdev->dev, "drm fimc registered successfully.\n"); 1887 dev_info(&pdev->dev, "drm fimc registered successfully.\n");
1842 1888
1843 return 0; 1889 return 0;
1844 1890
1845err_ippdrv_register: 1891err_pm_dis:
1846 devm_kfree(dev, ippdrv->prop_list);
1847 pm_runtime_disable(dev); 1892 pm_runtime_disable(dev);
1848err_get_irq: 1893err_put_clk:
1894 fimc_put_clocks(ctx);
1895err_free_irq:
1849 free_irq(ctx->irq, ctx); 1896 free_irq(ctx->irq, ctx);
1850 1897
1851 return ret; 1898 return ret;
@@ -1857,10 +1904,10 @@ static int fimc_remove(struct platform_device *pdev)
1857 struct fimc_context *ctx = get_fimc_context(dev); 1904 struct fimc_context *ctx = get_fimc_context(dev);
1858 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; 1905 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
1859 1906
1860 devm_kfree(dev, ippdrv->prop_list);
1861 exynos_drm_ippdrv_unregister(ippdrv); 1907 exynos_drm_ippdrv_unregister(ippdrv);
1862 mutex_destroy(&ctx->lock); 1908 mutex_destroy(&ctx->lock);
1863 1909
1910 fimc_put_clocks(ctx);
1864 pm_runtime_set_suspended(dev); 1911 pm_runtime_set_suspended(dev);
1865 pm_runtime_disable(dev); 1912 pm_runtime_disable(dev);
1866 1913
@@ -1915,36 +1962,22 @@ static int fimc_runtime_resume(struct device *dev)
1915} 1962}
1916#endif 1963#endif
1917 1964
1918static struct fimc_driverdata exynos4210_fimc_data = {
1919 .parent_clk = "mout_mpll",
1920};
1921
1922static struct fimc_driverdata exynos4410_fimc_data = {
1923 .parent_clk = "mout_mpll_user",
1924};
1925
1926static struct platform_device_id fimc_driver_ids[] = {
1927 {
1928 .name = "exynos4210-fimc",
1929 .driver_data = (unsigned long)&exynos4210_fimc_data,
1930 }, {
1931 .name = "exynos4412-fimc",
1932 .driver_data = (unsigned long)&exynos4410_fimc_data,
1933 },
1934 {},
1935};
1936MODULE_DEVICE_TABLE(platform, fimc_driver_ids);
1937
1938static const struct dev_pm_ops fimc_pm_ops = { 1965static const struct dev_pm_ops fimc_pm_ops = {
1939 SET_SYSTEM_SLEEP_PM_OPS(fimc_suspend, fimc_resume) 1966 SET_SYSTEM_SLEEP_PM_OPS(fimc_suspend, fimc_resume)
1940 SET_RUNTIME_PM_OPS(fimc_runtime_suspend, fimc_runtime_resume, NULL) 1967 SET_RUNTIME_PM_OPS(fimc_runtime_suspend, fimc_runtime_resume, NULL)
1941}; 1968};
1942 1969
1970static const struct of_device_id fimc_of_match[] = {
1971 { .compatible = "samsung,exynos4210-fimc" },
1972 { .compatible = "samsung,exynos4212-fimc" },
1973 { },
1974};
1975
1943struct platform_driver fimc_driver = { 1976struct platform_driver fimc_driver = {
1944 .probe = fimc_probe, 1977 .probe = fimc_probe,
1945 .remove = fimc_remove, 1978 .remove = fimc_remove,
1946 .id_table = fimc_driver_ids,
1947 .driver = { 1979 .driver = {
1980 .of_match_table = fimc_of_match,
1948 .name = "exynos-drm-fimc", 1981 .name = "exynos-drm-fimc",
1949 .owner = THIS_MODULE, 1982 .owner = THIS_MODULE,
1950 .pm = &fimc_pm_ops, 1983 .pm = &fimc_pm_ops,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 98cc14725ba9..746b282b343a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -20,6 +20,7 @@
20#include <linux/of_device.h> 20#include <linux/of_device.h>
21#include <linux/pm_runtime.h> 21#include <linux/pm_runtime.h>
22 22
23#include <video/of_display_timing.h>
23#include <video/samsung_fimd.h> 24#include <video/samsung_fimd.h>
24#include <drm/exynos_drm.h> 25#include <drm/exynos_drm.h>
25 26
@@ -800,18 +801,18 @@ static int fimd_clock(struct fimd_context *ctx, bool enable)
800 if (enable) { 801 if (enable) {
801 int ret; 802 int ret;
802 803
803 ret = clk_enable(ctx->bus_clk); 804 ret = clk_prepare_enable(ctx->bus_clk);
804 if (ret < 0) 805 if (ret < 0)
805 return ret; 806 return ret;
806 807
807 ret = clk_enable(ctx->lcd_clk); 808 ret = clk_prepare_enable(ctx->lcd_clk);
808 if (ret < 0) { 809 if (ret < 0) {
809 clk_disable(ctx->bus_clk); 810 clk_disable_unprepare(ctx->bus_clk);
810 return ret; 811 return ret;
811 } 812 }
812 } else { 813 } else {
813 clk_disable(ctx->lcd_clk); 814 clk_disable_unprepare(ctx->lcd_clk);
814 clk_disable(ctx->bus_clk); 815 clk_disable_unprepare(ctx->bus_clk);
815 } 816 }
816 817
817 return 0; 818 return 0;
@@ -884,10 +885,25 @@ static int fimd_probe(struct platform_device *pdev)
884 885
885 DRM_DEBUG_KMS("%s\n", __FILE__); 886 DRM_DEBUG_KMS("%s\n", __FILE__);
886 887
887 pdata = pdev->dev.platform_data; 888 if (pdev->dev.of_node) {
888 if (!pdata) { 889 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
889 dev_err(dev, "no platform data specified\n"); 890 if (!pdata) {
890 return -EINVAL; 891 DRM_ERROR("memory allocation for pdata failed\n");
892 return -ENOMEM;
893 }
894
895 ret = of_get_fb_videomode(dev->of_node, &pdata->panel.timing,
896 OF_USE_NATIVE_MODE);
897 if (ret) {
898 DRM_ERROR("failed: of_get_fb_videomode() : %d\n", ret);
899 return ret;
900 }
901 } else {
902 pdata = pdev->dev.platform_data;
903 if (!pdata) {
904 DRM_ERROR("no platform data specified\n");
905 return -EINVAL;
906 }
891 } 907 }
892 908
893 panel = &pdata->panel; 909 panel = &pdata->panel;
@@ -918,7 +934,7 @@ static int fimd_probe(struct platform_device *pdev)
918 if (IS_ERR(ctx->regs)) 934 if (IS_ERR(ctx->regs))
919 return PTR_ERR(ctx->regs); 935 return PTR_ERR(ctx->regs);
920 936
921 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 937 res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "vsync");
922 if (!res) { 938 if (!res) {
923 dev_err(dev, "irq request failed.\n"); 939 dev_err(dev, "irq request failed.\n");
924 return -ENXIO; 940 return -ENXIO;
@@ -980,9 +996,6 @@ static int fimd_remove(struct platform_device *pdev)
980 if (ctx->suspended) 996 if (ctx->suspended)
981 goto out; 997 goto out;
982 998
983 clk_disable(ctx->lcd_clk);
984 clk_disable(ctx->bus_clk);
985
986 pm_runtime_set_suspended(dev); 999 pm_runtime_set_suspended(dev);
987 pm_runtime_put_sync(dev); 1000 pm_runtime_put_sync(dev);
988 1001
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 0e6fe000578c..cf4543ffa079 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -682,7 +682,8 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
682 args->pitch = args->width * ((args->bpp + 7) / 8); 682 args->pitch = args->width * ((args->bpp + 7) / 8);
683 args->size = args->pitch * args->height; 683 args->size = args->pitch * args->height;
684 684
685 exynos_gem_obj = exynos_drm_gem_create(dev, args->flags, args->size); 685 exynos_gem_obj = exynos_drm_gem_create(dev, EXYNOS_BO_CONTIG |
686 EXYNOS_BO_WC, args->size);
686 if (IS_ERR(exynos_gem_obj)) 687 if (IS_ERR(exynos_gem_obj))
687 return PTR_ERR(exynos_gem_obj); 688 return PTR_ERR(exynos_gem_obj);
688 689
diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
index 7c27df03c9ff..ba2f0f1aa05f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
@@ -51,21 +51,27 @@ struct drm_hdmi_context {
51 51
52int exynos_platform_device_hdmi_register(void) 52int exynos_platform_device_hdmi_register(void)
53{ 53{
54 struct platform_device *pdev;
55
54 if (exynos_drm_hdmi_pdev) 56 if (exynos_drm_hdmi_pdev)
55 return -EEXIST; 57 return -EEXIST;
56 58
57 exynos_drm_hdmi_pdev = platform_device_register_simple( 59 pdev = platform_device_register_simple(
58 "exynos-drm-hdmi", -1, NULL, 0); 60 "exynos-drm-hdmi", -1, NULL, 0);
59 if (IS_ERR_OR_NULL(exynos_drm_hdmi_pdev)) 61 if (IS_ERR(pdev))
60 return PTR_ERR(exynos_drm_hdmi_pdev); 62 return PTR_ERR(pdev);
63
64 exynos_drm_hdmi_pdev = pdev;
61 65
62 return 0; 66 return 0;
63} 67}
64 68
65void exynos_platform_device_hdmi_unregister(void) 69void exynos_platform_device_hdmi_unregister(void)
66{ 70{
67 if (exynos_drm_hdmi_pdev) 71 if (exynos_drm_hdmi_pdev) {
68 platform_device_unregister(exynos_drm_hdmi_pdev); 72 platform_device_unregister(exynos_drm_hdmi_pdev);
73 exynos_drm_hdmi_pdev = NULL;
74 }
69} 75}
70 76
71void exynos_hdmi_drv_attach(struct exynos_drm_hdmi_context *ctx) 77void exynos_hdmi_drv_attach(struct exynos_drm_hdmi_context *ctx)
@@ -205,13 +211,45 @@ static void drm_hdmi_mode_fixup(struct device *subdrv_dev,
205 const struct drm_display_mode *mode, 211 const struct drm_display_mode *mode,
206 struct drm_display_mode *adjusted_mode) 212 struct drm_display_mode *adjusted_mode)
207{ 213{
208 struct drm_hdmi_context *ctx = to_context(subdrv_dev); 214 struct drm_display_mode *m;
215 int mode_ok;
209 216
210 DRM_DEBUG_KMS("%s\n", __FILE__); 217 DRM_DEBUG_KMS("%s\n", __FILE__);
211 218
212 if (hdmi_ops && hdmi_ops->mode_fixup) 219 drm_mode_set_crtcinfo(adjusted_mode, 0);
213 hdmi_ops->mode_fixup(ctx->hdmi_ctx->ctx, connector, mode, 220
214 adjusted_mode); 221 mode_ok = drm_hdmi_check_timing(subdrv_dev, adjusted_mode);
222
223 /* just return if user desired mode exists. */
224 if (mode_ok == 0)
225 return;
226
227 /*
228 * otherwise, find the most suitable mode among modes and change it
229 * to adjusted_mode.
230 */
231 list_for_each_entry(m, &connector->modes, head) {
232 mode_ok = drm_hdmi_check_timing(subdrv_dev, m);
233
234 if (mode_ok == 0) {
235 struct drm_mode_object base;
236 struct list_head head;
237
238 DRM_INFO("desired mode doesn't exist so\n");
239 DRM_INFO("use the most suitable mode among modes.\n");
240
241 DRM_DEBUG_KMS("Adjusted Mode: [%d]x[%d] [%d]Hz\n",
242 m->hdisplay, m->vdisplay, m->vrefresh);
243
244 /* preserve display mode header while copying. */
245 head = adjusted_mode->head;
246 base = adjusted_mode->base;
247 memcpy(adjusted_mode, m, sizeof(*m));
248 adjusted_mode->head = head;
249 adjusted_mode->base = base;
250 break;
251 }
252 }
215} 253}
216 254
217static void drm_hdmi_mode_set(struct device *subdrv_dev, void *mode) 255static void drm_hdmi_mode_set(struct device *subdrv_dev, void *mode)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.h b/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
index b7faa3662307..6b709440df4c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
@@ -36,9 +36,6 @@ struct exynos_hdmi_ops {
36 int (*power_on)(void *ctx, int mode); 36 int (*power_on)(void *ctx, int mode);
37 37
38 /* manager */ 38 /* manager */
39 void (*mode_fixup)(void *ctx, struct drm_connector *connector,
40 const struct drm_display_mode *mode,
41 struct drm_display_mode *adjusted_mode);
42 void (*mode_set)(void *ctx, void *mode); 39 void (*mode_set)(void *ctx, void *mode);
43 void (*get_max_resol)(void *ctx, unsigned int *width, 40 void (*get_max_resol)(void *ctx, unsigned int *width,
44 unsigned int *height); 41 unsigned int *height);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
index 1adce07ecb5b..29d2ad314490 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -47,6 +47,9 @@
47#define get_ipp_context(dev) platform_get_drvdata(to_platform_device(dev)) 47#define get_ipp_context(dev) platform_get_drvdata(to_platform_device(dev))
48#define ipp_is_m2m_cmd(c) (c == IPP_CMD_M2M) 48#define ipp_is_m2m_cmd(c) (c == IPP_CMD_M2M)
49 49
50/* platform device pointer for ipp device. */
51static struct platform_device *exynos_drm_ipp_pdev;
52
50/* 53/*
51 * A structure of event. 54 * A structure of event.
52 * 55 *
@@ -102,6 +105,30 @@ static LIST_HEAD(exynos_drm_ippdrv_list);
102static DEFINE_MUTEX(exynos_drm_ippdrv_lock); 105static DEFINE_MUTEX(exynos_drm_ippdrv_lock);
103static BLOCKING_NOTIFIER_HEAD(exynos_drm_ippnb_list); 106static BLOCKING_NOTIFIER_HEAD(exynos_drm_ippnb_list);
104 107
108int exynos_platform_device_ipp_register(void)
109{
110 struct platform_device *pdev;
111
112 if (exynos_drm_ipp_pdev)
113 return -EEXIST;
114
115 pdev = platform_device_register_simple("exynos-drm-ipp", -1, NULL, 0);
116 if (IS_ERR(pdev))
117 return PTR_ERR(pdev);
118
119 exynos_drm_ipp_pdev = pdev;
120
121 return 0;
122}
123
124void exynos_platform_device_ipp_unregister(void)
125{
126 if (exynos_drm_ipp_pdev) {
127 platform_device_unregister(exynos_drm_ipp_pdev);
128 exynos_drm_ipp_pdev = NULL;
129 }
130}
131
105int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv) 132int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv)
106{ 133{
107 DRM_DEBUG_KMS("%s\n", __func__); 134 DRM_DEBUG_KMS("%s\n", __func__);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
index a40b9fb60240..947f09f15ad1 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -674,7 +674,7 @@ static int rotator_probe(struct platform_device *pdev)
674 } 674 }
675 675
676 rot->clock = devm_clk_get(dev, "rotator"); 676 rot->clock = devm_clk_get(dev, "rotator");
677 if (IS_ERR_OR_NULL(rot->clock)) { 677 if (IS_ERR(rot->clock)) {
678 dev_err(dev, "failed to get clock\n"); 678 dev_err(dev, "failed to get clock\n");
679 ret = PTR_ERR(rot->clock); 679 ret = PTR_ERR(rot->clock);
680 goto err_clk_get; 680 goto err_clk_get;
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 2c5f266154ad..bbfc3840080c 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -108,7 +108,20 @@ struct hdmi_tg_regs {
108 u8 tg_3d[1]; 108 u8 tg_3d[1];
109}; 109};
110 110
111struct hdmi_core_regs { 111struct hdmi_v13_core_regs {
112 u8 h_blank[2];
113 u8 v_blank[3];
114 u8 h_v_line[3];
115 u8 vsync_pol[1];
116 u8 int_pro_mode[1];
117 u8 v_blank_f[3];
118 u8 h_sync_gen[3];
119 u8 v_sync_gen1[3];
120 u8 v_sync_gen2[3];
121 u8 v_sync_gen3[3];
122};
123
124struct hdmi_v14_core_regs {
112 u8 h_blank[2]; 125 u8 h_blank[2];
113 u8 v2_blank[2]; 126 u8 v2_blank[2];
114 u8 v1_blank[2]; 127 u8 v1_blank[2];
@@ -147,11 +160,23 @@ struct hdmi_core_regs {
147 u8 vact_space_6[2]; 160 u8 vact_space_6[2];
148}; 161};
149 162
163struct hdmi_v13_conf {
164 struct hdmi_v13_core_regs core;
165 struct hdmi_tg_regs tg;
166};
167
150struct hdmi_v14_conf { 168struct hdmi_v14_conf {
151 int pixel_clock; 169 struct hdmi_v14_core_regs core;
152 struct hdmi_core_regs core;
153 struct hdmi_tg_regs tg; 170 struct hdmi_tg_regs tg;
171};
172
173struct hdmi_conf_regs {
174 int pixel_clock;
154 int cea_video_id; 175 int cea_video_id;
176 union {
177 struct hdmi_v13_conf v13_conf;
178 struct hdmi_v14_conf v14_conf;
179 } conf;
155}; 180};
156 181
157struct hdmi_context { 182struct hdmi_context {
@@ -169,9 +194,8 @@ struct hdmi_context {
169 struct i2c_client *ddc_port; 194 struct i2c_client *ddc_port;
170 struct i2c_client *hdmiphy_port; 195 struct i2c_client *hdmiphy_port;
171 196
172 /* current hdmiphy conf index */ 197 /* current hdmiphy conf regs */
173 int cur_conf; 198 struct hdmi_conf_regs mode_conf;
174 struct hdmi_v14_conf mode_conf;
175 199
176 struct hdmi_resources res; 200 struct hdmi_resources res;
177 201
@@ -180,292 +204,60 @@ struct hdmi_context {
180 enum hdmi_type type; 204 enum hdmi_type type;
181}; 205};
182 206
183/* HDMI Version 1.3 */ 207struct hdmiphy_config {
184static const u8 hdmiphy_v13_conf27[32] = { 208 int pixel_clock;
185 0x01, 0x05, 0x00, 0xD8, 0x10, 0x1C, 0x30, 0x40, 209 u8 conf[32];
186 0x6B, 0x10, 0x02, 0x51, 0xDF, 0xF2, 0x54, 0x87,
187 0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
188 0x22, 0x40, 0xE3, 0x26, 0x00, 0x00, 0x00, 0x00,
189};
190
191static const u8 hdmiphy_v13_conf27_027[32] = {
192 0x01, 0x05, 0x00, 0xD4, 0x10, 0x9C, 0x09, 0x64,
193 0x6B, 0x10, 0x02, 0x51, 0xDF, 0xF2, 0x54, 0x87,
194 0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
195 0x22, 0x40, 0xE3, 0x26, 0x00, 0x00, 0x00, 0x00,
196};
197
198static const u8 hdmiphy_v13_conf74_175[32] = {
199 0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xef, 0x5B,
200 0x6D, 0x10, 0x01, 0x51, 0xef, 0xF3, 0x54, 0xb9,
201 0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
202 0x22, 0x40, 0xa5, 0x26, 0x01, 0x00, 0x00, 0x00,
203};
204
205static const u8 hdmiphy_v13_conf74_25[32] = {
206 0x01, 0x05, 0x00, 0xd8, 0x10, 0x9c, 0xf8, 0x40,
207 0x6a, 0x10, 0x01, 0x51, 0xff, 0xf1, 0x54, 0xba,
208 0x84, 0x00, 0x10, 0x38, 0x00, 0x08, 0x10, 0xe0,
209 0x22, 0x40, 0xa4, 0x26, 0x01, 0x00, 0x00, 0x00,
210};
211
212static const u8 hdmiphy_v13_conf148_5[32] = {
213 0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xf8, 0x40,
214 0x6A, 0x18, 0x00, 0x51, 0xff, 0xF1, 0x54, 0xba,
215 0x84, 0x00, 0x10, 0x38, 0x00, 0x08, 0x10, 0xE0,
216 0x22, 0x40, 0xa4, 0x26, 0x02, 0x00, 0x00, 0x00,
217};
218
219struct hdmi_v13_tg_regs {
220 u8 cmd;
221 u8 h_fsz_l;
222 u8 h_fsz_h;
223 u8 hact_st_l;
224 u8 hact_st_h;
225 u8 hact_sz_l;
226 u8 hact_sz_h;
227 u8 v_fsz_l;
228 u8 v_fsz_h;
229 u8 vsync_l;
230 u8 vsync_h;
231 u8 vsync2_l;
232 u8 vsync2_h;
233 u8 vact_st_l;
234 u8 vact_st_h;
235 u8 vact_sz_l;
236 u8 vact_sz_h;
237 u8 field_chg_l;
238 u8 field_chg_h;
239 u8 vact_st2_l;
240 u8 vact_st2_h;
241 u8 vsync_top_hdmi_l;
242 u8 vsync_top_hdmi_h;
243 u8 vsync_bot_hdmi_l;
244 u8 vsync_bot_hdmi_h;
245 u8 field_top_hdmi_l;
246 u8 field_top_hdmi_h;
247 u8 field_bot_hdmi_l;
248 u8 field_bot_hdmi_h;
249};
250
251struct hdmi_v13_core_regs {
252 u8 h_blank[2];
253 u8 v_blank[3];
254 u8 h_v_line[3];
255 u8 vsync_pol[1];
256 u8 int_pro_mode[1];
257 u8 v_blank_f[3];
258 u8 h_sync_gen[3];
259 u8 v_sync_gen1[3];
260 u8 v_sync_gen2[3];
261 u8 v_sync_gen3[3];
262};
263
264struct hdmi_v13_preset_conf {
265 struct hdmi_v13_core_regs core;
266 struct hdmi_v13_tg_regs tg;
267};
268
269struct hdmi_v13_conf {
270 int width;
271 int height;
272 int vrefresh;
273 bool interlace;
274 int cea_video_id;
275 const u8 *hdmiphy_data;
276 const struct hdmi_v13_preset_conf *conf;
277};
278
279static const struct hdmi_v13_preset_conf hdmi_v13_conf_480p = {
280 .core = {
281 .h_blank = {0x8a, 0x00},
282 .v_blank = {0x0d, 0x6a, 0x01},
283 .h_v_line = {0x0d, 0xa2, 0x35},
284 .vsync_pol = {0x01},
285 .int_pro_mode = {0x00},
286 .v_blank_f = {0x00, 0x00, 0x00},
287 .h_sync_gen = {0x0e, 0x30, 0x11},
288 .v_sync_gen1 = {0x0f, 0x90, 0x00},
289 /* other don't care */
290 },
291 .tg = {
292 0x00, /* cmd */
293 0x5a, 0x03, /* h_fsz */
294 0x8a, 0x00, 0xd0, 0x02, /* hact */
295 0x0d, 0x02, /* v_fsz */
296 0x01, 0x00, 0x33, 0x02, /* vsync */
297 0x2d, 0x00, 0xe0, 0x01, /* vact */
298 0x33, 0x02, /* field_chg */
299 0x49, 0x02, /* vact_st2 */
300 0x01, 0x00, 0x33, 0x02, /* vsync top/bot */
301 0x01, 0x00, 0x33, 0x02, /* field top/bot */
302 },
303};
304
305static const struct hdmi_v13_preset_conf hdmi_v13_conf_720p60 = {
306 .core = {
307 .h_blank = {0x72, 0x01},
308 .v_blank = {0xee, 0xf2, 0x00},
309 .h_v_line = {0xee, 0x22, 0x67},
310 .vsync_pol = {0x00},
311 .int_pro_mode = {0x00},
312 .v_blank_f = {0x00, 0x00, 0x00}, /* don't care */
313 .h_sync_gen = {0x6c, 0x50, 0x02},
314 .v_sync_gen1 = {0x0a, 0x50, 0x00},
315 .v_sync_gen2 = {0x01, 0x10, 0x00},
316 .v_sync_gen3 = {0x01, 0x10, 0x00},
317 /* other don't care */
318 },
319 .tg = {
320 0x00, /* cmd */
321 0x72, 0x06, /* h_fsz */
322 0x71, 0x01, 0x01, 0x05, /* hact */
323 0xee, 0x02, /* v_fsz */
324 0x01, 0x00, 0x33, 0x02, /* vsync */
325 0x1e, 0x00, 0xd0, 0x02, /* vact */
326 0x33, 0x02, /* field_chg */
327 0x49, 0x02, /* vact_st2 */
328 0x01, 0x00, 0x01, 0x00, /* vsync top/bot */
329 0x01, 0x00, 0x33, 0x02, /* field top/bot */
330 },
331};
332
333static const struct hdmi_v13_preset_conf hdmi_v13_conf_1080i50 = {
334 .core = {
335 .h_blank = {0xd0, 0x02},
336 .v_blank = {0x32, 0xB2, 0x00},
337 .h_v_line = {0x65, 0x04, 0xa5},
338 .vsync_pol = {0x00},
339 .int_pro_mode = {0x01},
340 .v_blank_f = {0x49, 0x2A, 0x23},
341 .h_sync_gen = {0x0E, 0xEA, 0x08},
342 .v_sync_gen1 = {0x07, 0x20, 0x00},
343 .v_sync_gen2 = {0x39, 0x42, 0x23},
344 .v_sync_gen3 = {0x38, 0x87, 0x73},
345 /* other don't care */
346 },
347 .tg = {
348 0x00, /* cmd */
349 0x50, 0x0A, /* h_fsz */
350 0xCF, 0x02, 0x81, 0x07, /* hact */
351 0x65, 0x04, /* v_fsz */
352 0x01, 0x00, 0x33, 0x02, /* vsync */
353 0x16, 0x00, 0x1c, 0x02, /* vact */
354 0x33, 0x02, /* field_chg */
355 0x49, 0x02, /* vact_st2 */
356 0x01, 0x00, 0x33, 0x02, /* vsync top/bot */
357 0x01, 0x00, 0x33, 0x02, /* field top/bot */
358 },
359}; 210};
360 211
361static const struct hdmi_v13_preset_conf hdmi_v13_conf_1080p50 = { 212/* list of phy config settings */
362 .core = { 213static const struct hdmiphy_config hdmiphy_v13_configs[] = {
363 .h_blank = {0xd0, 0x02}, 214 {
364 .v_blank = {0x65, 0x6c, 0x01}, 215 .pixel_clock = 27000000,
365 .h_v_line = {0x65, 0x04, 0xa5}, 216 .conf = {
366 .vsync_pol = {0x00}, 217 0x01, 0x05, 0x00, 0xD8, 0x10, 0x1C, 0x30, 0x40,
367 .int_pro_mode = {0x00}, 218 0x6B, 0x10, 0x02, 0x51, 0xDF, 0xF2, 0x54, 0x87,
368 .v_blank_f = {0x00, 0x00, 0x00}, /* don't care */ 219 0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
369 .h_sync_gen = {0x0e, 0xea, 0x08}, 220 0x22, 0x40, 0xE3, 0x26, 0x00, 0x00, 0x00, 0x00,
370 .v_sync_gen1 = {0x09, 0x40, 0x00}, 221 },
371 .v_sync_gen2 = {0x01, 0x10, 0x00},
372 .v_sync_gen3 = {0x01, 0x10, 0x00},
373 /* other don't care */
374 },
375 .tg = {
376 0x00, /* cmd */
377 0x50, 0x0A, /* h_fsz */
378 0xCF, 0x02, 0x81, 0x07, /* hact */
379 0x65, 0x04, /* v_fsz */
380 0x01, 0x00, 0x33, 0x02, /* vsync */
381 0x2d, 0x00, 0x38, 0x04, /* vact */
382 0x33, 0x02, /* field_chg */
383 0x48, 0x02, /* vact_st2 */
384 0x01, 0x00, 0x01, 0x00, /* vsync top/bot */
385 0x01, 0x00, 0x33, 0x02, /* field top/bot */
386 }, 222 },
387}; 223 {
388 224 .pixel_clock = 27027000,
389static const struct hdmi_v13_preset_conf hdmi_v13_conf_1080i60 = { 225 .conf = {
390 .core = { 226 0x01, 0x05, 0x00, 0xD4, 0x10, 0x9C, 0x09, 0x64,
391 .h_blank = {0x18, 0x01}, 227 0x6B, 0x10, 0x02, 0x51, 0xDF, 0xF2, 0x54, 0x87,
392 .v_blank = {0x32, 0xB2, 0x00}, 228 0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
393 .h_v_line = {0x65, 0x84, 0x89}, 229 0x22, 0x40, 0xE3, 0x26, 0x00, 0x00, 0x00, 0x00,
394 .vsync_pol = {0x00}, 230 },
395 .int_pro_mode = {0x01},
396 .v_blank_f = {0x49, 0x2A, 0x23},
397 .h_sync_gen = {0x56, 0x08, 0x02},
398 .v_sync_gen1 = {0x07, 0x20, 0x00},
399 .v_sync_gen2 = {0x39, 0x42, 0x23},
400 .v_sync_gen3 = {0xa4, 0x44, 0x4a},
401 /* other don't care */
402 }, 231 },
403 .tg = { 232 {
404 0x00, /* cmd */ 233 .pixel_clock = 74176000,
405 0x98, 0x08, /* h_fsz */ 234 .conf = {
406 0x17, 0x01, 0x81, 0x07, /* hact */ 235 0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xef, 0x5B,
407 0x65, 0x04, /* v_fsz */ 236 0x6D, 0x10, 0x01, 0x51, 0xef, 0xF3, 0x54, 0xb9,
408 0x01, 0x00, 0x33, 0x02, /* vsync */ 237 0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
409 0x16, 0x00, 0x1c, 0x02, /* vact */ 238 0x22, 0x40, 0xa5, 0x26, 0x01, 0x00, 0x00, 0x00,
410 0x33, 0x02, /* field_chg */ 239 },
411 0x49, 0x02, /* vact_st2 */
412 0x01, 0x00, 0x33, 0x02, /* vsync top/bot */
413 0x01, 0x00, 0x33, 0x02, /* field top/bot */
414 }, 240 },
415}; 241 {
416 242 .pixel_clock = 74250000,
417static const struct hdmi_v13_preset_conf hdmi_v13_conf_1080p60 = { 243 .conf = {
418 .core = { 244 0x01, 0x05, 0x00, 0xd8, 0x10, 0x9c, 0xf8, 0x40,
419 .h_blank = {0x18, 0x01}, 245 0x6a, 0x10, 0x01, 0x51, 0xff, 0xf1, 0x54, 0xba,
420 .v_blank = {0x65, 0x6c, 0x01}, 246 0x84, 0x00, 0x10, 0x38, 0x00, 0x08, 0x10, 0xe0,
421 .h_v_line = {0x65, 0x84, 0x89}, 247 0x22, 0x40, 0xa4, 0x26, 0x01, 0x00, 0x00, 0x00,
422 .vsync_pol = {0x00}, 248 },
423 .int_pro_mode = {0x00},
424 .v_blank_f = {0x00, 0x00, 0x00}, /* don't care */
425 .h_sync_gen = {0x56, 0x08, 0x02},
426 .v_sync_gen1 = {0x09, 0x40, 0x00},
427 .v_sync_gen2 = {0x01, 0x10, 0x00},
428 .v_sync_gen3 = {0x01, 0x10, 0x00},
429 /* other don't care */
430 }, 249 },
431 .tg = { 250 {
432 0x00, /* cmd */ 251 .pixel_clock = 148500000,
433 0x98, 0x08, /* h_fsz */ 252 .conf = {
434 0x17, 0x01, 0x81, 0x07, /* hact */ 253 0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xf8, 0x40,
435 0x65, 0x04, /* v_fsz */ 254 0x6A, 0x18, 0x00, 0x51, 0xff, 0xF1, 0x54, 0xba,
436 0x01, 0x00, 0x33, 0x02, /* vsync */ 255 0x84, 0x00, 0x10, 0x38, 0x00, 0x08, 0x10, 0xE0,
437 0x2d, 0x00, 0x38, 0x04, /* vact */ 256 0x22, 0x40, 0xa4, 0x26, 0x02, 0x00, 0x00, 0x00,
438 0x33, 0x02, /* field_chg */ 257 },
439 0x48, 0x02, /* vact_st2 */
440 0x01, 0x00, 0x01, 0x00, /* vsync top/bot */
441 0x01, 0x00, 0x33, 0x02, /* field top/bot */
442 }, 258 },
443}; 259};
444 260
445static const struct hdmi_v13_conf hdmi_v13_confs[] = {
446 { 1280, 720, 60, false, 4, hdmiphy_v13_conf74_25,
447 &hdmi_v13_conf_720p60 },
448 { 1280, 720, 50, false, 19, hdmiphy_v13_conf74_25,
449 &hdmi_v13_conf_720p60 },
450 { 720, 480, 60, false, 3, hdmiphy_v13_conf27_027,
451 &hdmi_v13_conf_480p },
452 { 1920, 1080, 50, true, 20, hdmiphy_v13_conf74_25,
453 &hdmi_v13_conf_1080i50 },
454 { 1920, 1080, 50, false, 31, hdmiphy_v13_conf148_5,
455 &hdmi_v13_conf_1080p50 },
456 { 1920, 1080, 60, true, 5, hdmiphy_v13_conf74_25,
457 &hdmi_v13_conf_1080i60 },
458 { 1920, 1080, 60, false, 16, hdmiphy_v13_conf148_5,
459 &hdmi_v13_conf_1080p60 },
460};
461
462/* HDMI Version 1.4 */
463struct hdmiphy_config {
464 int pixel_clock;
465 u8 conf[32];
466};
467
468/* list of all required phy config settings */
469static const struct hdmiphy_config hdmiphy_v14_configs[] = { 261static const struct hdmiphy_config hdmiphy_v14_configs[] = {
470 { 262 {
471 .pixel_clock = 25200000, 263 .pixel_clock = 25200000,
@@ -873,22 +665,6 @@ static void hdmi_regs_dump(struct hdmi_context *hdata, char *prefix)
873 hdmi_v14_regs_dump(hdata, prefix); 665 hdmi_v14_regs_dump(hdata, prefix);
874} 666}
875 667
876static int hdmi_v13_conf_index(struct drm_display_mode *mode)
877{
878 int i;
879
880 for (i = 0; i < ARRAY_SIZE(hdmi_v13_confs); ++i)
881 if (hdmi_v13_confs[i].width == mode->hdisplay &&
882 hdmi_v13_confs[i].height == mode->vdisplay &&
883 hdmi_v13_confs[i].vrefresh == mode->vrefresh &&
884 hdmi_v13_confs[i].interlace ==
885 ((mode->flags & DRM_MODE_FLAG_INTERLACE) ?
886 true : false))
887 return i;
888
889 return -EINVAL;
890}
891
892static u8 hdmi_chksum(struct hdmi_context *hdata, 668static u8 hdmi_chksum(struct hdmi_context *hdata,
893 u32 start, u8 len, u32 hdr_sum) 669 u32 start, u8 len, u32 hdr_sum)
894{ 670{
@@ -943,11 +719,7 @@ static void hdmi_reg_infoframe(struct hdmi_context *hdata,
943 hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(2), aspect_ratio | 719 hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(2), aspect_ratio |
944 AVI_SAME_AS_PIC_ASPECT_RATIO); 720 AVI_SAME_AS_PIC_ASPECT_RATIO);
945 721
946 if (hdata->type == HDMI_TYPE13) 722 vic = hdata->mode_conf.cea_video_id;
947 vic = hdmi_v13_confs[hdata->cur_conf].cea_video_id;
948 else
949 vic = hdata->mode_conf.cea_video_id;
950
951 hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(4), vic); 723 hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(4), vic);
952 724
953 chksum = hdmi_chksum(hdata, HDMI_AVI_BYTE(1), 725 chksum = hdmi_chksum(hdata, HDMI_AVI_BYTE(1),
@@ -1000,63 +772,34 @@ static struct edid *hdmi_get_edid(void *ctx, struct drm_connector *connector)
1000 return raw_edid; 772 return raw_edid;
1001} 773}
1002 774
1003static int hdmi_v13_check_timing(struct fb_videomode *check_timing) 775static int hdmi_find_phy_conf(struct hdmi_context *hdata, u32 pixel_clock)
1004{ 776{
1005 int i; 777 const struct hdmiphy_config *confs;
778 int count, i;
1006 779
1007 DRM_DEBUG_KMS("valid mode : xres=%d, yres=%d, refresh=%d, intl=%d\n", 780 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
1008 check_timing->xres, check_timing->yres,
1009 check_timing->refresh, (check_timing->vmode &
1010 FB_VMODE_INTERLACED) ? true : false);
1011
1012 for (i = 0; i < ARRAY_SIZE(hdmi_v13_confs); ++i)
1013 if (hdmi_v13_confs[i].width == check_timing->xres &&
1014 hdmi_v13_confs[i].height == check_timing->yres &&
1015 hdmi_v13_confs[i].vrefresh == check_timing->refresh &&
1016 hdmi_v13_confs[i].interlace ==
1017 ((check_timing->vmode & FB_VMODE_INTERLACED) ?
1018 true : false))
1019 return 0;
1020
1021 /* TODO */
1022
1023 return -EINVAL;
1024}
1025 781
1026static int hdmi_v14_find_phy_conf(int pixel_clock) 782 if (hdata->type == HDMI_TYPE13) {
1027{ 783 confs = hdmiphy_v13_configs;
1028 int i; 784 count = ARRAY_SIZE(hdmiphy_v13_configs);
785 } else if (hdata->type == HDMI_TYPE14) {
786 confs = hdmiphy_v14_configs;
787 count = ARRAY_SIZE(hdmiphy_v14_configs);
788 } else
789 return -EINVAL;
1029 790
1030 for (i = 0; i < ARRAY_SIZE(hdmiphy_v14_configs); i++) { 791 for (i = 0; i < count; i++)
1031 if (hdmiphy_v14_configs[i].pixel_clock == pixel_clock) 792 if (confs[i].pixel_clock == pixel_clock)
1032 return i; 793 return i;
1033 }
1034 794
1035 DRM_DEBUG_KMS("Could not find phy config for %d\n", pixel_clock); 795 DRM_DEBUG_KMS("Could not find phy config for %d\n", pixel_clock);
1036 return -EINVAL; 796 return -EINVAL;
1037} 797}
1038 798
1039static int hdmi_v14_check_timing(struct fb_videomode *check_timing)
1040{
1041 int i;
1042
1043 DRM_DEBUG_KMS("mode: xres=%d, yres=%d, refresh=%d, clock=%d, intl=%d\n",
1044 check_timing->xres, check_timing->yres,
1045 check_timing->refresh, check_timing->pixclock,
1046 (check_timing->vmode & FB_VMODE_INTERLACED) ?
1047 true : false);
1048
1049 for (i = 0; i < ARRAY_SIZE(hdmiphy_v14_configs); i++)
1050 if (hdmiphy_v14_configs[i].pixel_clock ==
1051 check_timing->pixclock)
1052 return 0;
1053
1054 return -EINVAL;
1055}
1056
1057static int hdmi_check_timing(void *ctx, struct fb_videomode *timing) 799static int hdmi_check_timing(void *ctx, struct fb_videomode *timing)
1058{ 800{
1059 struct hdmi_context *hdata = ctx; 801 struct hdmi_context *hdata = ctx;
802 int ret;
1060 803
1061 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); 804 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
1062 805
@@ -1064,10 +807,10 @@ static int hdmi_check_timing(void *ctx, struct fb_videomode *timing)
1064 timing->yres, timing->refresh, 807 timing->yres, timing->refresh,
1065 timing->vmode); 808 timing->vmode);
1066 809
1067 if (hdata->type == HDMI_TYPE13) 810 ret = hdmi_find_phy_conf(hdata, timing->pixclock);
1068 return hdmi_v13_check_timing(timing); 811 if (ret < 0)
1069 else 812 return ret;
1070 return hdmi_v14_check_timing(timing); 813 return 0;
1071} 814}
1072 815
1073static void hdmi_set_acr(u32 freq, u8 *acr) 816static void hdmi_set_acr(u32 freq, u8 *acr)
@@ -1301,10 +1044,9 @@ static void hdmi_conf_init(struct hdmi_context *hdata)
1301 1044
1302static void hdmi_v13_timing_apply(struct hdmi_context *hdata) 1045static void hdmi_v13_timing_apply(struct hdmi_context *hdata)
1303{ 1046{
1304 const struct hdmi_v13_preset_conf *conf = 1047 const struct hdmi_tg_regs *tg = &hdata->mode_conf.conf.v13_conf.tg;
1305 hdmi_v13_confs[hdata->cur_conf].conf; 1048 const struct hdmi_v13_core_regs *core =
1306 const struct hdmi_v13_core_regs *core = &conf->core; 1049 &hdata->mode_conf.conf.v13_conf.core;
1307 const struct hdmi_v13_tg_regs *tg = &conf->tg;
1308 int tries; 1050 int tries;
1309 1051
1310 /* setting core registers */ 1052 /* setting core registers */
@@ -1334,34 +1076,34 @@ static void hdmi_v13_timing_apply(struct hdmi_context *hdata)
1334 hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_3_1, core->v_sync_gen3[1]); 1076 hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_3_1, core->v_sync_gen3[1]);
1335 hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_3_2, core->v_sync_gen3[2]); 1077 hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_3_2, core->v_sync_gen3[2]);
1336 /* Timing generator registers */ 1078 /* Timing generator registers */
1337 hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_L, tg->h_fsz_l); 1079 hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_L, tg->h_fsz[0]);
1338 hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_H, tg->h_fsz_h); 1080 hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_H, tg->h_fsz[1]);
1339 hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_L, tg->hact_st_l); 1081 hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_L, tg->hact_st[0]);
1340 hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_H, tg->hact_st_h); 1082 hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_H, tg->hact_st[1]);
1341 hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_L, tg->hact_sz_l); 1083 hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_L, tg->hact_sz[0]);
1342 hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_H, tg->hact_sz_h); 1084 hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_H, tg->hact_sz[1]);
1343 hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_L, tg->v_fsz_l); 1085 hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_L, tg->v_fsz[0]);
1344 hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_H, tg->v_fsz_h); 1086 hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_H, tg->v_fsz[1]);
1345 hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_L, tg->vsync_l); 1087 hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_L, tg->vsync[0]);
1346 hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_H, tg->vsync_h); 1088 hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_H, tg->vsync[1]);
1347 hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_L, tg->vsync2_l); 1089 hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_L, tg->vsync2[0]);
1348 hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_H, tg->vsync2_h); 1090 hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_H, tg->vsync2[1]);
1349 hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_L, tg->vact_st_l); 1091 hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_L, tg->vact_st[0]);
1350 hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_H, tg->vact_st_h); 1092 hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_H, tg->vact_st[1]);
1351 hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_L, tg->vact_sz_l); 1093 hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_L, tg->vact_sz[0]);
1352 hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_H, tg->vact_sz_h); 1094 hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_H, tg->vact_sz[1]);
1353 hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_L, tg->field_chg_l); 1095 hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_L, tg->field_chg[0]);
1354 hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_H, tg->field_chg_h); 1096 hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_H, tg->field_chg[1]);
1355 hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_L, tg->vact_st2_l); 1097 hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_L, tg->vact_st2[0]);
1356 hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_H, tg->vact_st2_h); 1098 hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_H, tg->vact_st2[1]);
1357 hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_L, tg->vsync_top_hdmi_l); 1099 hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_L, tg->vsync_top_hdmi[0]);
1358 hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_H, tg->vsync_top_hdmi_h); 1100 hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_H, tg->vsync_top_hdmi[1]);
1359 hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_L, tg->vsync_bot_hdmi_l); 1101 hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_L, tg->vsync_bot_hdmi[0]);
1360 hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_H, tg->vsync_bot_hdmi_h); 1102 hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_H, tg->vsync_bot_hdmi[1]);
1361 hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_L, tg->field_top_hdmi_l); 1103 hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_L, tg->field_top_hdmi[0]);
1362 hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_H, tg->field_top_hdmi_h); 1104 hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_H, tg->field_top_hdmi[1]);
1363 hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_L, tg->field_bot_hdmi_l); 1105 hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_L, tg->field_bot_hdmi[0]);
1364 hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_H, tg->field_bot_hdmi_h); 1106 hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_H, tg->field_bot_hdmi[1]);
1365 1107
1366 /* waiting for HDMIPHY's PLL to get to steady state */ 1108 /* waiting for HDMIPHY's PLL to get to steady state */
1367 for (tries = 100; tries; --tries) { 1109 for (tries = 100; tries; --tries) {
@@ -1391,8 +1133,9 @@ static void hdmi_v13_timing_apply(struct hdmi_context *hdata)
1391 1133
1392static void hdmi_v14_timing_apply(struct hdmi_context *hdata) 1134static void hdmi_v14_timing_apply(struct hdmi_context *hdata)
1393{ 1135{
1394 struct hdmi_core_regs *core = &hdata->mode_conf.core; 1136 const struct hdmi_tg_regs *tg = &hdata->mode_conf.conf.v14_conf.tg;
1395 struct hdmi_tg_regs *tg = &hdata->mode_conf.tg; 1137 const struct hdmi_v14_core_regs *core =
1138 &hdata->mode_conf.conf.v14_conf.core;
1396 int tries; 1139 int tries;
1397 1140
1398 /* setting core registers */ 1141 /* setting core registers */
@@ -1624,17 +1367,16 @@ static void hdmiphy_conf_apply(struct hdmi_context *hdata)
1624 } 1367 }
1625 1368
1626 /* pixel clock */ 1369 /* pixel clock */
1627 if (hdata->type == HDMI_TYPE13) { 1370 i = hdmi_find_phy_conf(hdata, hdata->mode_conf.pixel_clock);
1628 hdmiphy_data = hdmi_v13_confs[hdata->cur_conf].hdmiphy_data; 1371 if (i < 0) {
1629 } else { 1372 DRM_ERROR("failed to find hdmiphy conf\n");
1630 i = hdmi_v14_find_phy_conf(hdata->mode_conf.pixel_clock); 1373 return;
1631 if (i < 0) { 1374 }
1632 DRM_ERROR("failed to find hdmiphy conf\n");
1633 return;
1634 }
1635 1375
1376 if (hdata->type == HDMI_TYPE13)
1377 hdmiphy_data = hdmiphy_v13_configs[i].conf;
1378 else
1636 hdmiphy_data = hdmiphy_v14_configs[i].conf; 1379 hdmiphy_data = hdmiphy_v14_configs[i].conf;
1637 }
1638 1380
1639 memcpy(buffer, hdmiphy_data, 32); 1381 memcpy(buffer, hdmiphy_data, 32);
1640 ret = i2c_master_send(hdata->hdmiphy_port, buffer, 32); 1382 ret = i2c_master_send(hdata->hdmiphy_port, buffer, 32);
@@ -1687,75 +1429,121 @@ static void hdmi_conf_apply(struct hdmi_context *hdata)
1687 hdmi_regs_dump(hdata, "start"); 1429 hdmi_regs_dump(hdata, "start");
1688} 1430}
1689 1431
1690static void hdmi_mode_fixup(void *ctx, struct drm_connector *connector, 1432static void hdmi_set_reg(u8 *reg_pair, int num_bytes, u32 value)
1691 const struct drm_display_mode *mode,
1692 struct drm_display_mode *adjusted_mode)
1693{ 1433{
1694 struct drm_display_mode *m; 1434 int i;
1695 struct hdmi_context *hdata = ctx; 1435 BUG_ON(num_bytes > 4);
1696 int index; 1436 for (i = 0; i < num_bytes; i++)
1437 reg_pair[i] = (value >> (8 * i)) & 0xff;
1438}
1697 1439
1698 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); 1440static void hdmi_v13_mode_set(struct hdmi_context *hdata,
1441 struct drm_display_mode *m)
1442{
1443 struct hdmi_v13_core_regs *core = &hdata->mode_conf.conf.v13_conf.core;
1444 struct hdmi_tg_regs *tg = &hdata->mode_conf.conf.v13_conf.tg;
1445 unsigned int val;
1699 1446
1700 drm_mode_set_crtcinfo(adjusted_mode, 0); 1447 hdata->mode_conf.cea_video_id =
1448 drm_match_cea_mode((struct drm_display_mode *)m);
1449 hdata->mode_conf.pixel_clock = m->clock * 1000;
1701 1450
1702 if (hdata->type == HDMI_TYPE13) 1451 hdmi_set_reg(core->h_blank, 2, m->htotal - m->hdisplay);
1703 index = hdmi_v13_conf_index(adjusted_mode); 1452 hdmi_set_reg(core->h_v_line, 3, (m->htotal << 12) | m->vtotal);
1704 else
1705 index = hdmi_v14_find_phy_conf(adjusted_mode->clock * 1000);
1706 1453
1707 /* just return if user desired mode exists. */ 1454 val = (m->flags & DRM_MODE_FLAG_NVSYNC) ? 1 : 0;
1708 if (index >= 0) 1455 hdmi_set_reg(core->vsync_pol, 1, val);
1709 return; 1456
1457 val = (m->flags & DRM_MODE_FLAG_INTERLACE) ? 1 : 0;
1458 hdmi_set_reg(core->int_pro_mode, 1, val);
1459
1460 val = (m->hsync_start - m->hdisplay - 2);
1461 val |= ((m->hsync_end - m->hdisplay - 2) << 10);
1462 val |= ((m->flags & DRM_MODE_FLAG_NHSYNC) ? 1 : 0)<<20;
1463 hdmi_set_reg(core->h_sync_gen, 3, val);
1710 1464
1711 /* 1465 /*
1712 * otherwise, find the most suitable mode among modes and change it 1466 * Quirk requirement for exynos HDMI IP design,
1713 * to adjusted_mode. 1467 * 2 pixels less than the actual calculation for hsync_start
1468 * and end.
1714 */ 1469 */
1715 list_for_each_entry(m, &connector->modes, head) { 1470
1716 if (hdata->type == HDMI_TYPE13) 1471 /* Following values & calculations differ for different type of modes */
1717 index = hdmi_v13_conf_index(m); 1472 if (m->flags & DRM_MODE_FLAG_INTERLACE) {
1718 else 1473 /* Interlaced Mode */
1719 index = hdmi_v14_find_phy_conf(m->clock * 1000); 1474 val = ((m->vsync_end - m->vdisplay) / 2);
1720 1475 val |= ((m->vsync_start - m->vdisplay) / 2) << 12;
1721 if (index >= 0) { 1476 hdmi_set_reg(core->v_sync_gen1, 3, val);
1722 struct drm_mode_object base; 1477
1723 struct list_head head; 1478 val = m->vtotal / 2;
1724 1479 val |= ((m->vtotal - m->vdisplay) / 2) << 11;
1725 DRM_INFO("desired mode doesn't exist so\n"); 1480 hdmi_set_reg(core->v_blank, 3, val);
1726 DRM_INFO("use the most suitable mode among modes.\n"); 1481
1727 1482 val = (m->vtotal +
1728 DRM_DEBUG_KMS("Adjusted Mode: [%d]x[%d] [%d]Hz\n", 1483 ((m->vsync_end - m->vsync_start) * 4) + 5) / 2;
1729 m->hdisplay, m->vdisplay, m->vrefresh); 1484 val |= m->vtotal << 11;
1730 1485 hdmi_set_reg(core->v_blank_f, 3, val);
1731 /* preserve display mode header while copying. */ 1486
1732 head = adjusted_mode->head; 1487 val = ((m->vtotal / 2) + 7);
1733 base = adjusted_mode->base; 1488 val |= ((m->vtotal / 2) + 2) << 12;
1734 memcpy(adjusted_mode, m, sizeof(*m)); 1489 hdmi_set_reg(core->v_sync_gen2, 3, val);
1735 adjusted_mode->head = head; 1490
1736 adjusted_mode->base = base; 1491 val = ((m->htotal / 2) + (m->hsync_start - m->hdisplay));
1737 break; 1492 val |= ((m->htotal / 2) +
1738 } 1493 (m->hsync_start - m->hdisplay)) << 12;
1494 hdmi_set_reg(core->v_sync_gen3, 3, val);
1495
1496 hdmi_set_reg(tg->vact_st, 2, (m->vtotal - m->vdisplay) / 2);
1497 hdmi_set_reg(tg->vact_sz, 2, m->vdisplay / 2);
1498
1499 hdmi_set_reg(tg->vact_st2, 2, 0x249);/* Reset value + 1*/
1500 } else {
1501 /* Progressive Mode */
1502
1503 val = m->vtotal;
1504 val |= (m->vtotal - m->vdisplay) << 11;
1505 hdmi_set_reg(core->v_blank, 3, val);
1506
1507 hdmi_set_reg(core->v_blank_f, 3, 0);
1508
1509 val = (m->vsync_end - m->vdisplay);
1510 val |= ((m->vsync_start - m->vdisplay) << 12);
1511 hdmi_set_reg(core->v_sync_gen1, 3, val);
1512
1513 hdmi_set_reg(core->v_sync_gen2, 3, 0x1001);/* Reset value */
1514 hdmi_set_reg(core->v_sync_gen3, 3, 0x1001);/* Reset value */
1515 hdmi_set_reg(tg->vact_st, 2, m->vtotal - m->vdisplay);
1516 hdmi_set_reg(tg->vact_sz, 2, m->vdisplay);
1517 hdmi_set_reg(tg->vact_st2, 2, 0x248); /* Reset value */
1739 } 1518 }
1740}
1741 1519
1742static void hdmi_set_reg(u8 *reg_pair, int num_bytes, u32 value) 1520 /* Timing generator registers */
1743{ 1521 hdmi_set_reg(tg->cmd, 1, 0x0);
1744 int i; 1522 hdmi_set_reg(tg->h_fsz, 2, m->htotal);
1745 BUG_ON(num_bytes > 4); 1523 hdmi_set_reg(tg->hact_st, 2, m->htotal - m->hdisplay);
1746 for (i = 0; i < num_bytes; i++) 1524 hdmi_set_reg(tg->hact_sz, 2, m->hdisplay);
1747 reg_pair[i] = (value >> (8 * i)) & 0xff; 1525 hdmi_set_reg(tg->v_fsz, 2, m->vtotal);
1526 hdmi_set_reg(tg->vsync, 2, 0x1);
1527 hdmi_set_reg(tg->vsync2, 2, 0x233); /* Reset value */
1528 hdmi_set_reg(tg->field_chg, 2, 0x233); /* Reset value */
1529 hdmi_set_reg(tg->vsync_top_hdmi, 2, 0x1); /* Reset value */
1530 hdmi_set_reg(tg->vsync_bot_hdmi, 2, 0x233); /* Reset value */
1531 hdmi_set_reg(tg->field_top_hdmi, 2, 0x1); /* Reset value */
1532 hdmi_set_reg(tg->field_bot_hdmi, 2, 0x233); /* Reset value */
1533 hdmi_set_reg(tg->tg_3d, 1, 0x0); /* Not used */
1748} 1534}
1749 1535
1750static void hdmi_v14_mode_set(struct hdmi_context *hdata, 1536static void hdmi_v14_mode_set(struct hdmi_context *hdata,
1751 struct drm_display_mode *m) 1537 struct drm_display_mode *m)
1752{ 1538{
1753 struct hdmi_core_regs *core = &hdata->mode_conf.core; 1539 struct hdmi_tg_regs *tg = &hdata->mode_conf.conf.v14_conf.tg;
1754 struct hdmi_tg_regs *tg = &hdata->mode_conf.tg; 1540 struct hdmi_v14_core_regs *core =
1755 1541 &hdata->mode_conf.conf.v14_conf.core;
1756 hdata->mode_conf.cea_video_id = drm_match_cea_mode(m);
1757 1542
1543 hdata->mode_conf.cea_video_id =
1544 drm_match_cea_mode((struct drm_display_mode *)m);
1758 hdata->mode_conf.pixel_clock = m->clock * 1000; 1545 hdata->mode_conf.pixel_clock = m->clock * 1000;
1546
1759 hdmi_set_reg(core->h_blank, 2, m->htotal - m->hdisplay); 1547 hdmi_set_reg(core->h_blank, 2, m->htotal - m->hdisplay);
1760 hdmi_set_reg(core->v_line, 2, m->vtotal); 1548 hdmi_set_reg(core->v_line, 2, m->vtotal);
1761 hdmi_set_reg(core->h_line, 2, m->htotal); 1549 hdmi_set_reg(core->h_line, 2, m->htotal);
@@ -1852,25 +1640,22 @@ static void hdmi_v14_mode_set(struct hdmi_context *hdata,
1852 hdmi_set_reg(tg->field_top_hdmi, 2, 0x1); /* Reset value */ 1640 hdmi_set_reg(tg->field_top_hdmi, 2, 0x1); /* Reset value */
1853 hdmi_set_reg(tg->field_bot_hdmi, 2, 0x233); /* Reset value */ 1641 hdmi_set_reg(tg->field_bot_hdmi, 2, 0x233); /* Reset value */
1854 hdmi_set_reg(tg->tg_3d, 1, 0x0); 1642 hdmi_set_reg(tg->tg_3d, 1, 0x0);
1855
1856} 1643}
1857 1644
1858static void hdmi_mode_set(void *ctx, void *mode) 1645static void hdmi_mode_set(void *ctx, void *mode)
1859{ 1646{
1860 struct hdmi_context *hdata = ctx; 1647 struct hdmi_context *hdata = ctx;
1861 int conf_idx; 1648 struct drm_display_mode *m = mode;
1862 1649
1863 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); 1650 DRM_DEBUG_KMS("[%s]: xres=%d, yres=%d, refresh=%d, intl=%s\n",
1651 __func__, m->hdisplay, m->vdisplay,
1652 m->vrefresh, (m->flags & DRM_MODE_FLAG_INTERLACE) ?
1653 "INTERLACED" : "PROGERESSIVE");
1864 1654
1865 if (hdata->type == HDMI_TYPE13) { 1655 if (hdata->type == HDMI_TYPE13)
1866 conf_idx = hdmi_v13_conf_index(mode); 1656 hdmi_v13_mode_set(hdata, mode);
1867 if (conf_idx >= 0) 1657 else
1868 hdata->cur_conf = conf_idx;
1869 else
1870 DRM_DEBUG_KMS("not supported mode\n");
1871 } else {
1872 hdmi_v14_mode_set(hdata, mode); 1658 hdmi_v14_mode_set(hdata, mode);
1873 }
1874} 1659}
1875 1660
1876static void hdmi_get_max_resol(void *ctx, unsigned int *width, 1661static void hdmi_get_max_resol(void *ctx, unsigned int *width,
@@ -1983,7 +1768,6 @@ static struct exynos_hdmi_ops hdmi_ops = {
1983 .check_timing = hdmi_check_timing, 1768 .check_timing = hdmi_check_timing,
1984 1769
1985 /* manager */ 1770 /* manager */
1986 .mode_fixup = hdmi_mode_fixup,
1987 .mode_set = hdmi_mode_set, 1771 .mode_set = hdmi_mode_set,
1988 .get_max_resol = hdmi_get_max_resol, 1772 .get_max_resol = hdmi_get_max_resol,
1989 .commit = hdmi_commit, 1773 .commit = hdmi_commit,
@@ -2023,27 +1807,27 @@ static int hdmi_resources_init(struct hdmi_context *hdata)
2023 1807
2024 /* get clocks, power */ 1808 /* get clocks, power */
2025 res->hdmi = devm_clk_get(dev, "hdmi"); 1809 res->hdmi = devm_clk_get(dev, "hdmi");
2026 if (IS_ERR_OR_NULL(res->hdmi)) { 1810 if (IS_ERR(res->hdmi)) {
2027 DRM_ERROR("failed to get clock 'hdmi'\n"); 1811 DRM_ERROR("failed to get clock 'hdmi'\n");
2028 goto fail; 1812 goto fail;
2029 } 1813 }
2030 res->sclk_hdmi = devm_clk_get(dev, "sclk_hdmi"); 1814 res->sclk_hdmi = devm_clk_get(dev, "sclk_hdmi");
2031 if (IS_ERR_OR_NULL(res->sclk_hdmi)) { 1815 if (IS_ERR(res->sclk_hdmi)) {
2032 DRM_ERROR("failed to get clock 'sclk_hdmi'\n"); 1816 DRM_ERROR("failed to get clock 'sclk_hdmi'\n");
2033 goto fail; 1817 goto fail;
2034 } 1818 }
2035 res->sclk_pixel = devm_clk_get(dev, "sclk_pixel"); 1819 res->sclk_pixel = devm_clk_get(dev, "sclk_pixel");
2036 if (IS_ERR_OR_NULL(res->sclk_pixel)) { 1820 if (IS_ERR(res->sclk_pixel)) {
2037 DRM_ERROR("failed to get clock 'sclk_pixel'\n"); 1821 DRM_ERROR("failed to get clock 'sclk_pixel'\n");
2038 goto fail; 1822 goto fail;
2039 } 1823 }
2040 res->sclk_hdmiphy = devm_clk_get(dev, "sclk_hdmiphy"); 1824 res->sclk_hdmiphy = devm_clk_get(dev, "sclk_hdmiphy");
2041 if (IS_ERR_OR_NULL(res->sclk_hdmiphy)) { 1825 if (IS_ERR(res->sclk_hdmiphy)) {
2042 DRM_ERROR("failed to get clock 'sclk_hdmiphy'\n"); 1826 DRM_ERROR("failed to get clock 'sclk_hdmiphy'\n");
2043 goto fail; 1827 goto fail;
2044 } 1828 }
2045 res->hdmiphy = devm_clk_get(dev, "hdmiphy"); 1829 res->hdmiphy = devm_clk_get(dev, "hdmiphy");
2046 if (IS_ERR_OR_NULL(res->hdmiphy)) { 1830 if (IS_ERR(res->hdmiphy)) {
2047 DRM_ERROR("failed to get clock 'hdmiphy'\n"); 1831 DRM_ERROR("failed to get clock 'hdmiphy'\n");
2048 goto fail; 1832 goto fail;
2049 } 1833 }
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 2f4f72f07047..ec3e376b7e01 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -643,12 +643,14 @@ static void mixer_win_reset(struct mixer_context *ctx)
643 /* setting graphical layers */ 643 /* setting graphical layers */
644 val = MXR_GRP_CFG_COLOR_KEY_DISABLE; /* no blank key */ 644 val = MXR_GRP_CFG_COLOR_KEY_DISABLE; /* no blank key */
645 val |= MXR_GRP_CFG_WIN_BLEND_EN; 645 val |= MXR_GRP_CFG_WIN_BLEND_EN;
646 val |= MXR_GRP_CFG_BLEND_PRE_MUL;
647 val |= MXR_GRP_CFG_PIXEL_BLEND_EN;
648 val |= MXR_GRP_CFG_ALPHA_VAL(0xff); /* non-transparent alpha */ 646 val |= MXR_GRP_CFG_ALPHA_VAL(0xff); /* non-transparent alpha */
649 647
650 /* the same configuration for both layers */ 648 /* Don't blend layer 0 onto the mixer background */
651 mixer_reg_write(res, MXR_GRAPHIC_CFG(0), val); 649 mixer_reg_write(res, MXR_GRAPHIC_CFG(0), val);
650
651 /* Blend layer 1 into layer 0 */
652 val |= MXR_GRP_CFG_BLEND_PRE_MUL;
653 val |= MXR_GRP_CFG_PIXEL_BLEND_EN;
652 mixer_reg_write(res, MXR_GRAPHIC_CFG(1), val); 654 mixer_reg_write(res, MXR_GRAPHIC_CFG(1), val);
653 655
654 /* setting video layers */ 656 /* setting video layers */
@@ -820,7 +822,6 @@ static void mixer_win_disable(void *ctx, int win)
820 822
821static int mixer_check_timing(void *ctx, struct fb_videomode *timing) 823static int mixer_check_timing(void *ctx, struct fb_videomode *timing)
822{ 824{
823 struct mixer_context *mixer_ctx = ctx;
824 u32 w, h; 825 u32 w, h;
825 826
826 w = timing->xres; 827 w = timing->xres;
@@ -831,9 +832,6 @@ static int mixer_check_timing(void *ctx, struct fb_videomode *timing)
831 timing->refresh, (timing->vmode & 832 timing->refresh, (timing->vmode &
832 FB_VMODE_INTERLACED) ? true : false); 833 FB_VMODE_INTERLACED) ? true : false);
833 834
834 if (mixer_ctx->mxr_ver == MXR_VER_0_0_0_16)
835 return 0;
836
837 if ((w >= 464 && w <= 720 && h >= 261 && h <= 576) || 835 if ((w >= 464 && w <= 720 && h >= 261 && h <= 576) ||
838 (w >= 1024 && w <= 1280 && h >= 576 && h <= 720) || 836 (w >= 1024 && w <= 1280 && h >= 576 && h <= 720) ||
839 (w >= 1664 && w <= 1920 && h >= 936 && h <= 1080)) 837 (w >= 1664 && w <= 1920 && h >= 936 && h <= 1080))
@@ -1047,13 +1045,13 @@ static int mixer_resources_init(struct exynos_drm_hdmi_context *ctx,
1047 spin_lock_init(&mixer_res->reg_slock); 1045 spin_lock_init(&mixer_res->reg_slock);
1048 1046
1049 mixer_res->mixer = devm_clk_get(dev, "mixer"); 1047 mixer_res->mixer = devm_clk_get(dev, "mixer");
1050 if (IS_ERR_OR_NULL(mixer_res->mixer)) { 1048 if (IS_ERR(mixer_res->mixer)) {
1051 dev_err(dev, "failed to get clock 'mixer'\n"); 1049 dev_err(dev, "failed to get clock 'mixer'\n");
1052 return -ENODEV; 1050 return -ENODEV;
1053 } 1051 }
1054 1052
1055 mixer_res->sclk_hdmi = devm_clk_get(dev, "sclk_hdmi"); 1053 mixer_res->sclk_hdmi = devm_clk_get(dev, "sclk_hdmi");
1056 if (IS_ERR_OR_NULL(mixer_res->sclk_hdmi)) { 1054 if (IS_ERR(mixer_res->sclk_hdmi)) {
1057 dev_err(dev, "failed to get clock 'sclk_hdmi'\n"); 1055 dev_err(dev, "failed to get clock 'sclk_hdmi'\n");
1058 return -ENODEV; 1056 return -ENODEV;
1059 } 1057 }
@@ -1096,17 +1094,17 @@ static int vp_resources_init(struct exynos_drm_hdmi_context *ctx,
1096 struct resource *res; 1094 struct resource *res;
1097 1095
1098 mixer_res->vp = devm_clk_get(dev, "vp"); 1096 mixer_res->vp = devm_clk_get(dev, "vp");
1099 if (IS_ERR_OR_NULL(mixer_res->vp)) { 1097 if (IS_ERR(mixer_res->vp)) {
1100 dev_err(dev, "failed to get clock 'vp'\n"); 1098 dev_err(dev, "failed to get clock 'vp'\n");
1101 return -ENODEV; 1099 return -ENODEV;
1102 } 1100 }
1103 mixer_res->sclk_mixer = devm_clk_get(dev, "sclk_mixer"); 1101 mixer_res->sclk_mixer = devm_clk_get(dev, "sclk_mixer");
1104 if (IS_ERR_OR_NULL(mixer_res->sclk_mixer)) { 1102 if (IS_ERR(mixer_res->sclk_mixer)) {
1105 dev_err(dev, "failed to get clock 'sclk_mixer'\n"); 1103 dev_err(dev, "failed to get clock 'sclk_mixer'\n");
1106 return -ENODEV; 1104 return -ENODEV;
1107 } 1105 }
1108 mixer_res->sclk_dac = devm_clk_get(dev, "sclk_dac"); 1106 mixer_res->sclk_dac = devm_clk_get(dev, "sclk_dac");
1109 if (IS_ERR_OR_NULL(mixer_res->sclk_dac)) { 1107 if (IS_ERR(mixer_res->sclk_dac)) {
1110 dev_err(dev, "failed to get clock 'sclk_dac'\n"); 1108 dev_err(dev, "failed to get clock 'sclk_dac'\n");
1111 return -ENODEV; 1109 return -ENODEV;
1112 } 1110 }
diff --git a/drivers/gpu/drm/exynos/regs-fimc.h b/drivers/gpu/drm/exynos/regs-fimc.h
index b4f9ca1fd851..30496134a3d0 100644
--- a/drivers/gpu/drm/exynos/regs-fimc.h
+++ b/drivers/gpu/drm/exynos/regs-fimc.h
@@ -661,9 +661,8 @@
661#define EXYNOS_CLKSRC_SCLK (1 << 1) 661#define EXYNOS_CLKSRC_SCLK (1 << 1)
662 662
663/* SYSREG for FIMC writeback */ 663/* SYSREG for FIMC writeback */
664#define SYSREG_CAMERA_BLK (S3C_VA_SYS + 0x0218) 664#define SYSREG_CAMERA_BLK (0x0218)
665#define SYSREG_ISP_BLK (S3C_VA_SYS + 0x020c) 665#define SYSREG_FIMD0WB_DEST_MASK (0x3 << 23)
666#define SYSREG_FIMD0WB_DEST_MASK (0x3 << 23) 666#define SYSREG_FIMD0WB_DEST_SHIFT 23
667#define SYSREG_FIMD0WB_DEST_SHIFT 23
668 667
669#endif /* EXYNOS_REGS_FIMC_H */ 668#endif /* EXYNOS_REGS_FIMC_H */
diff --git a/drivers/gpu/drm/gma500/Kconfig b/drivers/gpu/drm/gma500/Kconfig
index 1188f0fe7e4f..1f6e2dfaaeae 100644
--- a/drivers/gpu/drm/gma500/Kconfig
+++ b/drivers/gpu/drm/gma500/Kconfig
@@ -2,10 +2,15 @@ config DRM_GMA500
2 tristate "Intel GMA5/600 KMS Framebuffer" 2 tristate "Intel GMA5/600 KMS Framebuffer"
3 depends on DRM && PCI && X86 3 depends on DRM && PCI && X86
4 select FB_CFB_COPYAREA 4 select FB_CFB_COPYAREA
5 select FB_CFB_FILLRECT 5 select FB_CFB_FILLRECT
6 select FB_CFB_IMAGEBLIT 6 select FB_CFB_IMAGEBLIT
7 select DRM_KMS_HELPER 7 select DRM_KMS_HELPER
8 select DRM_TTM 8 select DRM_TTM
9 # GMA500 depends on ACPI_VIDEO when ACPI is enabled, just like i915
10 select ACPI_VIDEO if ACPI
11 select BACKLIGHT_CLASS_DEVICE if ACPI
12 select VIDEO_OUTPUT_CONTROL if ACPI
13 select INPUT if ACPI
9 help 14 help
10 Say yes for an experimental 2D KMS framebuffer driver for the 15 Say yes for an experimental 2D KMS framebuffer driver for the
11 Intel GMA500 ('Poulsbo') and other Intel IMG based graphics 16 Intel GMA500 ('Poulsbo') and other Intel IMG based graphics
diff --git a/drivers/gpu/drm/gma500/cdv_intel_crt.c b/drivers/gpu/drm/gma500/cdv_intel_crt.c
index 8c175345d85c..7b8386fc3024 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_crt.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_crt.c
@@ -276,6 +276,7 @@ void cdv_intel_crt_init(struct drm_device *dev,
276 goto failed_connector; 276 goto failed_connector;
277 277
278 connector = &psb_intel_connector->base; 278 connector = &psb_intel_connector->base;
279 connector->polled = DRM_CONNECTOR_POLL_HPD;
279 drm_connector_init(dev, connector, 280 drm_connector_init(dev, connector,
280 &cdv_intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA); 281 &cdv_intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
281 282
diff --git a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
index e223b500022e..464153d9d2df 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
@@ -319,6 +319,7 @@ void cdv_hdmi_init(struct drm_device *dev,
319 goto err_priv; 319 goto err_priv;
320 320
321 connector = &psb_intel_connector->base; 321 connector = &psb_intel_connector->base;
322 connector->polled = DRM_CONNECTOR_POLL_HPD;
322 encoder = &psb_intel_encoder->base; 323 encoder = &psb_intel_encoder->base;
323 drm_connector_init(dev, connector, 324 drm_connector_init(dev, connector,
324 &cdv_hdmi_connector_funcs, 325 &cdv_hdmi_connector_funcs,
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index 2590cac84257..1534e220097a 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -431,7 +431,7 @@ static int psbfb_create(struct psb_fbdev *fbdev,
431 fbdev->psb_fb_helper.fbdev = info; 431 fbdev->psb_fb_helper.fbdev = info;
432 432
433 drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth); 433 drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
434 strcpy(info->fix.id, "psbfb"); 434 strcpy(info->fix.id, "psbdrmfb");
435 435
436 info->flags = FBINFO_DEFAULT; 436 info->flags = FBINFO_DEFAULT;
437 if (dev_priv->ops->accel_2d && pitch_lines > 8) /* 2D engine */ 437 if (dev_priv->ops->accel_2d && pitch_lines > 8) /* 2D engine */
@@ -772,8 +772,8 @@ void psb_modeset_init(struct drm_device *dev)
772 for (i = 0; i < dev_priv->num_pipe; i++) 772 for (i = 0; i < dev_priv->num_pipe; i++)
773 psb_intel_crtc_init(dev, i, mode_dev); 773 psb_intel_crtc_init(dev, i, mode_dev);
774 774
775 dev->mode_config.max_width = 2048; 775 dev->mode_config.max_width = 4096;
776 dev->mode_config.max_height = 2048; 776 dev->mode_config.max_height = 4096;
777 777
778 psb_setup_outputs(dev); 778 psb_setup_outputs(dev);
779 779
diff --git a/drivers/gpu/drm/gma500/gtt.c b/drivers/gpu/drm/gma500/gtt.c
index 054e26e769ec..1f82183536a3 100644
--- a/drivers/gpu/drm/gma500/gtt.c
+++ b/drivers/gpu/drm/gma500/gtt.c
@@ -80,7 +80,8 @@ static u32 __iomem *psb_gtt_entry(struct drm_device *dev, struct gtt_range *r)
80 * the GTT. This is protected via the gtt mutex which the caller 80 * the GTT. This is protected via the gtt mutex which the caller
81 * must hold. 81 * must hold.
82 */ 82 */
83static int psb_gtt_insert(struct drm_device *dev, struct gtt_range *r) 83static int psb_gtt_insert(struct drm_device *dev, struct gtt_range *r,
84 int resume)
84{ 85{
85 u32 __iomem *gtt_slot; 86 u32 __iomem *gtt_slot;
86 u32 pte; 87 u32 pte;
@@ -97,8 +98,10 @@ static int psb_gtt_insert(struct drm_device *dev, struct gtt_range *r)
97 gtt_slot = psb_gtt_entry(dev, r); 98 gtt_slot = psb_gtt_entry(dev, r);
98 pages = r->pages; 99 pages = r->pages;
99 100
100 /* Make sure changes are visible to the GPU */ 101 if (!resume) {
101 set_pages_array_wc(pages, r->npage); 102 /* Make sure changes are visible to the GPU */
103 set_pages_array_wc(pages, r->npage);
104 }
102 105
103 /* Write our page entries into the GTT itself */ 106 /* Write our page entries into the GTT itself */
104 for (i = r->roll; i < r->npage; i++) { 107 for (i = r->roll; i < r->npage; i++) {
@@ -269,7 +272,7 @@ int psb_gtt_pin(struct gtt_range *gt)
269 ret = psb_gtt_attach_pages(gt); 272 ret = psb_gtt_attach_pages(gt);
270 if (ret < 0) 273 if (ret < 0)
271 goto out; 274 goto out;
272 ret = psb_gtt_insert(dev, gt); 275 ret = psb_gtt_insert(dev, gt, 0);
273 if (ret < 0) { 276 if (ret < 0) {
274 psb_gtt_detach_pages(gt); 277 psb_gtt_detach_pages(gt);
275 goto out; 278 goto out;
@@ -421,9 +424,11 @@ int psb_gtt_init(struct drm_device *dev, int resume)
421 int ret = 0; 424 int ret = 0;
422 uint32_t pte; 425 uint32_t pte;
423 426
424 mutex_init(&dev_priv->gtt_mutex); 427 if (!resume) {
428 mutex_init(&dev_priv->gtt_mutex);
429 psb_gtt_alloc(dev);
430 }
425 431
426 psb_gtt_alloc(dev);
427 pg = &dev_priv->gtt; 432 pg = &dev_priv->gtt;
428 433
429 /* Enable the GTT */ 434 /* Enable the GTT */
@@ -505,7 +510,8 @@ int psb_gtt_init(struct drm_device *dev, int resume)
505 /* 510 /*
506 * Map the GTT and the stolen memory area 511 * Map the GTT and the stolen memory area
507 */ 512 */
508 dev_priv->gtt_map = ioremap_nocache(pg->gtt_phys_start, 513 if (!resume)
514 dev_priv->gtt_map = ioremap_nocache(pg->gtt_phys_start,
509 gtt_pages << PAGE_SHIFT); 515 gtt_pages << PAGE_SHIFT);
510 if (!dev_priv->gtt_map) { 516 if (!dev_priv->gtt_map) {
511 dev_err(dev->dev, "Failure to map gtt.\n"); 517 dev_err(dev->dev, "Failure to map gtt.\n");
@@ -513,7 +519,9 @@ int psb_gtt_init(struct drm_device *dev, int resume)
513 goto out_err; 519 goto out_err;
514 } 520 }
515 521
516 dev_priv->vram_addr = ioremap_wc(dev_priv->stolen_base, stolen_size); 522 if (!resume)
523 dev_priv->vram_addr = ioremap_wc(dev_priv->stolen_base,
524 stolen_size);
517 if (!dev_priv->vram_addr) { 525 if (!dev_priv->vram_addr) {
518 dev_err(dev->dev, "Failure to map stolen base.\n"); 526 dev_err(dev->dev, "Failure to map stolen base.\n");
519 ret = -ENOMEM; 527 ret = -ENOMEM;
@@ -549,3 +557,31 @@ out_err:
549 psb_gtt_takedown(dev); 557 psb_gtt_takedown(dev);
550 return ret; 558 return ret;
551} 559}
560
561int psb_gtt_restore(struct drm_device *dev)
562{
563 struct drm_psb_private *dev_priv = dev->dev_private;
564 struct resource *r = dev_priv->gtt_mem->child;
565 struct gtt_range *range;
566 unsigned int restored = 0, total = 0, size = 0;
567
568 /* On resume, the gtt_mutex is already initialized */
569 mutex_lock(&dev_priv->gtt_mutex);
570 psb_gtt_init(dev, 1);
571
572 while (r != NULL) {
573 range = container_of(r, struct gtt_range, resource);
574 if (range->pages) {
575 psb_gtt_insert(dev, range, 1);
576 size += range->resource.end - range->resource.start;
577 restored++;
578 }
579 r = r->sibling;
580 total++;
581 }
582 mutex_unlock(&dev_priv->gtt_mutex);
583 DRM_DEBUG_DRIVER("Restored %u of %u gtt ranges (%u KB)", restored,
584 total, (size / 1024));
585
586 return 0;
587}
diff --git a/drivers/gpu/drm/gma500/gtt.h b/drivers/gpu/drm/gma500/gtt.h
index aa1742387f5a..6191d10acf33 100644
--- a/drivers/gpu/drm/gma500/gtt.h
+++ b/drivers/gpu/drm/gma500/gtt.h
@@ -60,5 +60,5 @@ extern int psb_gtt_pin(struct gtt_range *gt);
60extern void psb_gtt_unpin(struct gtt_range *gt); 60extern void psb_gtt_unpin(struct gtt_range *gt);
61extern void psb_gtt_roll(struct drm_device *dev, 61extern void psb_gtt_roll(struct drm_device *dev,
62 struct gtt_range *gt, int roll); 62 struct gtt_range *gt, int roll);
63 63extern int psb_gtt_restore(struct drm_device *dev);
64#endif 64#endif
diff --git a/drivers/gpu/drm/gma500/intel_bios.c b/drivers/gpu/drm/gma500/intel_bios.c
index 403fffb03abd..d3497348c4d5 100644
--- a/drivers/gpu/drm/gma500/intel_bios.c
+++ b/drivers/gpu/drm/gma500/intel_bios.c
@@ -218,12 +218,11 @@ static void parse_backlight_data(struct drm_psb_private *dev_priv,
218 bl_start = find_section(bdb, BDB_LVDS_BACKLIGHT); 218 bl_start = find_section(bdb, BDB_LVDS_BACKLIGHT);
219 vbt_lvds_bl = (struct bdb_lvds_backlight *)(bl_start + 1) + p_type; 219 vbt_lvds_bl = (struct bdb_lvds_backlight *)(bl_start + 1) + p_type;
220 220
221 lvds_bl = kzalloc(sizeof(*vbt_lvds_bl), GFP_KERNEL); 221 lvds_bl = kmemdup(vbt_lvds_bl, sizeof(*vbt_lvds_bl), GFP_KERNEL);
222 if (!lvds_bl) { 222 if (!lvds_bl) {
223 dev_err(dev_priv->dev->dev, "out of memory for backlight data\n"); 223 dev_err(dev_priv->dev->dev, "out of memory for backlight data\n");
224 return; 224 return;
225 } 225 }
226 memcpy(lvds_bl, vbt_lvds_bl, sizeof(*vbt_lvds_bl));
227 dev_priv->lvds_bl = lvds_bl; 226 dev_priv->lvds_bl = lvds_bl;
228} 227}
229 228
diff --git a/drivers/gpu/drm/gma500/intel_bios.h b/drivers/gpu/drm/gma500/intel_bios.h
index c6267c98c9e7..978ae4b25e82 100644
--- a/drivers/gpu/drm/gma500/intel_bios.h
+++ b/drivers/gpu/drm/gma500/intel_bios.h
@@ -19,8 +19,8 @@
19 * 19 *
20 */ 20 */
21 21
22#ifndef _I830_BIOS_H_ 22#ifndef _INTEL_BIOS_H_
23#define _I830_BIOS_H_ 23#define _INTEL_BIOS_H_
24 24
25#include <drm/drmP.h> 25#include <drm/drmP.h>
26#include <drm/drm_dp_helper.h> 26#include <drm/drm_dp_helper.h>
@@ -618,4 +618,4 @@ extern void psb_intel_destroy_bios(struct drm_device *dev);
618#define PORT_IDPC 8 618#define PORT_IDPC 8
619#define PORT_IDPD 9 619#define PORT_IDPD 9
620 620
621#endif /* _I830_BIOS_H_ */ 621#endif /* _INTEL_BIOS_H_ */
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_output.c b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
index 2d4ab48f07a2..3abf8315f57c 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_output.c
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
@@ -92,8 +92,8 @@ void mdfld_dsi_brightness_init(struct mdfld_dsi_config *dsi_config, int pipe)
92{ 92{
93 struct mdfld_dsi_pkg_sender *sender = 93 struct mdfld_dsi_pkg_sender *sender =
94 mdfld_dsi_get_pkg_sender(dsi_config); 94 mdfld_dsi_get_pkg_sender(dsi_config);
95 struct drm_device *dev = sender->dev; 95 struct drm_device *dev;
96 struct drm_psb_private *dev_priv = dev->dev_private; 96 struct drm_psb_private *dev_priv;
97 u32 gen_ctrl_val; 97 u32 gen_ctrl_val;
98 98
99 if (!sender) { 99 if (!sender) {
@@ -101,6 +101,9 @@ void mdfld_dsi_brightness_init(struct mdfld_dsi_config *dsi_config, int pipe)
101 return; 101 return;
102 } 102 }
103 103
104 dev = sender->dev;
105 dev_priv = dev->dev_private;
106
104 /* Set default display backlight value to 85% (0xd8)*/ 107 /* Set default display backlight value to 85% (0xd8)*/
105 mdfld_dsi_send_mcs_short(sender, write_display_brightness, 0xd8, 1, 108 mdfld_dsi_send_mcs_short(sender, write_display_brightness, 0xd8, 1,
106 true); 109 true);
diff --git a/drivers/gpu/drm/gma500/power.c b/drivers/gpu/drm/gma500/power.c
index 889b854751da..b6b135fcd59c 100644
--- a/drivers/gpu/drm/gma500/power.c
+++ b/drivers/gpu/drm/gma500/power.c
@@ -110,6 +110,8 @@ static void gma_resume_display(struct pci_dev *pdev)
110 PSB_WVDC32(dev_priv->pge_ctl | _PSB_PGETBL_ENABLED, PSB_PGETBL_CTL); 110 PSB_WVDC32(dev_priv->pge_ctl | _PSB_PGETBL_ENABLED, PSB_PGETBL_CTL);
111 pci_write_config_word(pdev, PSB_GMCH_CTRL, 111 pci_write_config_word(pdev, PSB_GMCH_CTRL,
112 dev_priv->gmch_ctrl | _PSB_GMCH_ENABLED); 112 dev_priv->gmch_ctrl | _PSB_GMCH_ENABLED);
113
114 psb_gtt_restore(dev); /* Rebuild our GTT mappings */
113 dev_priv->ops->restore_regs(dev); 115 dev_priv->ops->restore_regs(dev);
114} 116}
115 117
@@ -313,3 +315,18 @@ int psb_runtime_idle(struct device *dev)
313 else 315 else
314 return 1; 316 return 1;
315} 317}
318
319int gma_power_thaw(struct device *_dev)
320{
321 return gma_power_resume(_dev);
322}
323
324int gma_power_freeze(struct device *_dev)
325{
326 return gma_power_suspend(_dev);
327}
328
329int gma_power_restore(struct device *_dev)
330{
331 return gma_power_resume(_dev);
332}
diff --git a/drivers/gpu/drm/gma500/power.h b/drivers/gpu/drm/gma500/power.h
index 1969d2ecb328..56d8708bd41c 100644
--- a/drivers/gpu/drm/gma500/power.h
+++ b/drivers/gpu/drm/gma500/power.h
@@ -41,6 +41,9 @@ void gma_power_uninit(struct drm_device *dev);
41 */ 41 */
42int gma_power_suspend(struct device *dev); 42int gma_power_suspend(struct device *dev);
43int gma_power_resume(struct device *dev); 43int gma_power_resume(struct device *dev);
44int gma_power_thaw(struct device *dev);
45int gma_power_freeze(struct device *dev);
46int gma_power_restore(struct device *_dev);
44 47
45/* 48/*
46 * These are the functions the driver should use to wrap all hw access 49 * These are the functions the driver should use to wrap all hw access
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index 111e3df9c5de..bddea5807442 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -601,6 +601,9 @@ static void psb_remove(struct pci_dev *pdev)
601static const struct dev_pm_ops psb_pm_ops = { 601static const struct dev_pm_ops psb_pm_ops = {
602 .resume = gma_power_resume, 602 .resume = gma_power_resume,
603 .suspend = gma_power_suspend, 603 .suspend = gma_power_suspend,
604 .thaw = gma_power_thaw,
605 .freeze = gma_power_freeze,
606 .restore = gma_power_restore,
604 .runtime_suspend = psb_runtime_suspend, 607 .runtime_suspend = psb_runtime_suspend,
605 .runtime_resume = psb_runtime_resume, 608 .runtime_resume = psb_runtime_resume,
606 .runtime_idle = psb_runtime_idle, 609 .runtime_idle = psb_runtime_idle,
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index a7fd6c48b793..6053b8abcd12 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -876,7 +876,6 @@ extern const struct psb_ops cdv_chip_ops;
876#define PSB_D_MSVDX (1 << 9) 876#define PSB_D_MSVDX (1 << 9)
877#define PSB_D_TOPAZ (1 << 10) 877#define PSB_D_TOPAZ (1 << 10)
878 878
879extern int drm_psb_no_fb;
880extern int drm_idle_check_interval; 879extern int drm_idle_check_interval;
881 880
882/* 881/*
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c
index 9edb1902a096..6e8f42b61ff6 100644
--- a/drivers/gpu/drm/gma500/psb_intel_display.c
+++ b/drivers/gpu/drm/gma500/psb_intel_display.c
@@ -50,119 +50,41 @@ struct psb_intel_p2_t {
50 int p2_slow, p2_fast; 50 int p2_slow, p2_fast;
51}; 51};
52 52
53#define INTEL_P2_NUM 2
54
55struct psb_intel_limit_t { 53struct psb_intel_limit_t {
56 struct psb_intel_range_t dot, vco, n, m, m1, m2, p, p1; 54 struct psb_intel_range_t dot, vco, n, m, m1, m2, p, p1;
57 struct psb_intel_p2_t p2; 55 struct psb_intel_p2_t p2;
58}; 56};
59 57
60#define I8XX_DOT_MIN 25000 58#define INTEL_LIMIT_I9XX_SDVO_DAC 0
61#define I8XX_DOT_MAX 350000 59#define INTEL_LIMIT_I9XX_LVDS 1
62#define I8XX_VCO_MIN 930000
63#define I8XX_VCO_MAX 1400000
64#define I8XX_N_MIN 3
65#define I8XX_N_MAX 16
66#define I8XX_M_MIN 96
67#define I8XX_M_MAX 140
68#define I8XX_M1_MIN 18
69#define I8XX_M1_MAX 26
70#define I8XX_M2_MIN 6
71#define I8XX_M2_MAX 16
72#define I8XX_P_MIN 4
73#define I8XX_P_MAX 128
74#define I8XX_P1_MIN 2
75#define I8XX_P1_MAX 33
76#define I8XX_P1_LVDS_MIN 1
77#define I8XX_P1_LVDS_MAX 6
78#define I8XX_P2_SLOW 4
79#define I8XX_P2_FAST 2
80#define I8XX_P2_LVDS_SLOW 14
81#define I8XX_P2_LVDS_FAST 14 /* No fast option */
82#define I8XX_P2_SLOW_LIMIT 165000
83
84#define I9XX_DOT_MIN 20000
85#define I9XX_DOT_MAX 400000
86#define I9XX_VCO_MIN 1400000
87#define I9XX_VCO_MAX 2800000
88#define I9XX_N_MIN 1
89#define I9XX_N_MAX 6
90#define I9XX_M_MIN 70
91#define I9XX_M_MAX 120
92#define I9XX_M1_MIN 8
93#define I9XX_M1_MAX 18
94#define I9XX_M2_MIN 3
95#define I9XX_M2_MAX 7
96#define I9XX_P_SDVO_DAC_MIN 5
97#define I9XX_P_SDVO_DAC_MAX 80
98#define I9XX_P_LVDS_MIN 7
99#define I9XX_P_LVDS_MAX 98
100#define I9XX_P1_MIN 1
101#define I9XX_P1_MAX 8
102#define I9XX_P2_SDVO_DAC_SLOW 10
103#define I9XX_P2_SDVO_DAC_FAST 5
104#define I9XX_P2_SDVO_DAC_SLOW_LIMIT 200000
105#define I9XX_P2_LVDS_SLOW 14
106#define I9XX_P2_LVDS_FAST 7
107#define I9XX_P2_LVDS_SLOW_LIMIT 112000
108
109#define INTEL_LIMIT_I8XX_DVO_DAC 0
110#define INTEL_LIMIT_I8XX_LVDS 1
111#define INTEL_LIMIT_I9XX_SDVO_DAC 2
112#define INTEL_LIMIT_I9XX_LVDS 3
113 60
114static const struct psb_intel_limit_t psb_intel_limits[] = { 61static const struct psb_intel_limit_t psb_intel_limits[] = {
115 { /* INTEL_LIMIT_I8XX_DVO_DAC */
116 .dot = {.min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX},
117 .vco = {.min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX},
118 .n = {.min = I8XX_N_MIN, .max = I8XX_N_MAX},
119 .m = {.min = I8XX_M_MIN, .max = I8XX_M_MAX},
120 .m1 = {.min = I8XX_M1_MIN, .max = I8XX_M1_MAX},
121 .m2 = {.min = I8XX_M2_MIN, .max = I8XX_M2_MAX},
122 .p = {.min = I8XX_P_MIN, .max = I8XX_P_MAX},
123 .p1 = {.min = I8XX_P1_MIN, .max = I8XX_P1_MAX},
124 .p2 = {.dot_limit = I8XX_P2_SLOW_LIMIT,
125 .p2_slow = I8XX_P2_SLOW, .p2_fast = I8XX_P2_FAST},
126 },
127 { /* INTEL_LIMIT_I8XX_LVDS */
128 .dot = {.min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX},
129 .vco = {.min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX},
130 .n = {.min = I8XX_N_MIN, .max = I8XX_N_MAX},
131 .m = {.min = I8XX_M_MIN, .max = I8XX_M_MAX},
132 .m1 = {.min = I8XX_M1_MIN, .max = I8XX_M1_MAX},
133 .m2 = {.min = I8XX_M2_MIN, .max = I8XX_M2_MAX},
134 .p = {.min = I8XX_P_MIN, .max = I8XX_P_MAX},
135 .p1 = {.min = I8XX_P1_LVDS_MIN, .max = I8XX_P1_LVDS_MAX},
136 .p2 = {.dot_limit = I8XX_P2_SLOW_LIMIT,
137 .p2_slow = I8XX_P2_LVDS_SLOW, .p2_fast = I8XX_P2_LVDS_FAST},
138 },
139 { /* INTEL_LIMIT_I9XX_SDVO_DAC */ 62 { /* INTEL_LIMIT_I9XX_SDVO_DAC */
140 .dot = {.min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX}, 63 .dot = {.min = 20000, .max = 400000},
141 .vco = {.min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX}, 64 .vco = {.min = 1400000, .max = 2800000},
142 .n = {.min = I9XX_N_MIN, .max = I9XX_N_MAX}, 65 .n = {.min = 1, .max = 6},
143 .m = {.min = I9XX_M_MIN, .max = I9XX_M_MAX}, 66 .m = {.min = 70, .max = 120},
144 .m1 = {.min = I9XX_M1_MIN, .max = I9XX_M1_MAX}, 67 .m1 = {.min = 8, .max = 18},
145 .m2 = {.min = I9XX_M2_MIN, .max = I9XX_M2_MAX}, 68 .m2 = {.min = 3, .max = 7},
146 .p = {.min = I9XX_P_SDVO_DAC_MIN, .max = I9XX_P_SDVO_DAC_MAX}, 69 .p = {.min = 5, .max = 80},
147 .p1 = {.min = I9XX_P1_MIN, .max = I9XX_P1_MAX}, 70 .p1 = {.min = 1, .max = 8},
148 .p2 = {.dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT, 71 .p2 = {.dot_limit = 200000,
149 .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = 72 .p2_slow = 10, .p2_fast = 5},
150 I9XX_P2_SDVO_DAC_FAST},
151 }, 73 },
152 { /* INTEL_LIMIT_I9XX_LVDS */ 74 { /* INTEL_LIMIT_I9XX_LVDS */
153 .dot = {.min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX}, 75 .dot = {.min = 20000, .max = 400000},
154 .vco = {.min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX}, 76 .vco = {.min = 1400000, .max = 2800000},
155 .n = {.min = I9XX_N_MIN, .max = I9XX_N_MAX}, 77 .n = {.min = 1, .max = 6},
156 .m = {.min = I9XX_M_MIN, .max = I9XX_M_MAX}, 78 .m = {.min = 70, .max = 120},
157 .m1 = {.min = I9XX_M1_MIN, .max = I9XX_M1_MAX}, 79 .m1 = {.min = 8, .max = 18},
158 .m2 = {.min = I9XX_M2_MIN, .max = I9XX_M2_MAX}, 80 .m2 = {.min = 3, .max = 7},
159 .p = {.min = I9XX_P_LVDS_MIN, .max = I9XX_P_LVDS_MAX}, 81 .p = {.min = 7, .max = 98},
160 .p1 = {.min = I9XX_P1_MIN, .max = I9XX_P1_MAX}, 82 .p1 = {.min = 1, .max = 8},
161 /* The single-channel range is 25-112Mhz, and dual-channel 83 /* The single-channel range is 25-112Mhz, and dual-channel
162 * is 80-224Mhz. Prefer single channel as much as possible. 84 * is 80-224Mhz. Prefer single channel as much as possible.
163 */ 85 */
164 .p2 = {.dot_limit = I9XX_P2_LVDS_SLOW_LIMIT, 86 .p2 = {.dot_limit = 112000,
165 .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST}, 87 .p2_slow = 14, .p2_fast = 7},
166 }, 88 },
167}; 89};
168 90
@@ -177,9 +99,7 @@ static const struct psb_intel_limit_t *psb_intel_limit(struct drm_crtc *crtc)
177 return limit; 99 return limit;
178} 100}
179 101
180/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */ 102static void psb_intel_clock(int refclk, struct psb_intel_clock_t *clock)
181
182static void i8xx_clock(int refclk, struct psb_intel_clock_t *clock)
183{ 103{
184 clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2); 104 clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
185 clock->p = clock->p1 * clock->p2; 105 clock->p = clock->p1 * clock->p2;
@@ -187,22 +107,6 @@ static void i8xx_clock(int refclk, struct psb_intel_clock_t *clock)
187 clock->dot = clock->vco / clock->p; 107 clock->dot = clock->vco / clock->p;
188} 108}
189 109
190/** Derive the pixel clock for the given refclk and divisors for 9xx chips. */
191
192static void i9xx_clock(int refclk, struct psb_intel_clock_t *clock)
193{
194 clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
195 clock->p = clock->p1 * clock->p2;
196 clock->vco = refclk * clock->m / (clock->n + 2);
197 clock->dot = clock->vco / clock->p;
198}
199
200static void psb_intel_clock(struct drm_device *dev, int refclk,
201 struct psb_intel_clock_t *clock)
202{
203 return i9xx_clock(refclk, clock);
204}
205
206/** 110/**
207 * Returns whether any output on the specified pipe is of the specified type 111 * Returns whether any output on the specified pipe is of the specified type
208 */ 112 */
@@ -308,7 +212,7 @@ static bool psb_intel_find_best_PLL(struct drm_crtc *crtc, int target,
308 clock.p1++) { 212 clock.p1++) {
309 int this_err; 213 int this_err;
310 214
311 psb_intel_clock(dev, refclk, &clock); 215 psb_intel_clock(refclk, &clock);
312 216
313 if (!psb_intel_PLL_is_valid 217 if (!psb_intel_PLL_is_valid
314 (crtc, &clock)) 218 (crtc, &clock))
@@ -1068,7 +972,7 @@ static int psb_intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
1068 return 0; 972 return 0;
1069} 973}
1070 974
1071void psb_intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, 975static void psb_intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red,
1072 u16 *green, u16 *blue, uint32_t type, uint32_t size) 976 u16 *green, u16 *blue, uint32_t type, uint32_t size)
1073{ 977{
1074 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); 978 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
@@ -1149,9 +1053,9 @@ static int psb_intel_crtc_clock_get(struct drm_device *dev,
1149 if ((dpll & PLL_REF_INPUT_MASK) == 1053 if ((dpll & PLL_REF_INPUT_MASK) ==
1150 PLLB_REF_INPUT_SPREADSPECTRUMIN) { 1054 PLLB_REF_INPUT_SPREADSPECTRUMIN) {
1151 /* XXX: might not be 66MHz */ 1055 /* XXX: might not be 66MHz */
1152 i8xx_clock(66000, &clock); 1056 psb_intel_clock(66000, &clock);
1153 } else 1057 } else
1154 i8xx_clock(48000, &clock); 1058 psb_intel_clock(48000, &clock);
1155 } else { 1059 } else {
1156 if (dpll & PLL_P1_DIVIDE_BY_TWO) 1060 if (dpll & PLL_P1_DIVIDE_BY_TWO)
1157 clock.p1 = 2; 1061 clock.p1 = 2;
@@ -1166,7 +1070,7 @@ static int psb_intel_crtc_clock_get(struct drm_device *dev,
1166 else 1070 else
1167 clock.p2 = 2; 1071 clock.p2 = 2;
1168 1072
1169 i8xx_clock(48000, &clock); 1073 psb_intel_clock(48000, &clock);
1170 } 1074 }
1171 1075
1172 /* XXX: It would be nice to validate the clocks, but we can't reuse 1076 /* XXX: It would be nice to validate the clocks, but we can't reuse
@@ -1225,7 +1129,7 @@ struct drm_display_mode *psb_intel_crtc_mode_get(struct drm_device *dev,
1225 return mode; 1129 return mode;
1226} 1130}
1227 1131
1228void psb_intel_crtc_destroy(struct drm_crtc *crtc) 1132static void psb_intel_crtc_destroy(struct drm_crtc *crtc)
1229{ 1133{
1230 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); 1134 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
1231 struct gtt_range *gt; 1135 struct gtt_range *gt;
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.h b/drivers/gpu/drm/gma500/psb_intel_display.h
index 535b49a5e409..3724b971e91c 100644
--- a/drivers/gpu/drm/gma500/psb_intel_display.h
+++ b/drivers/gpu/drm/gma500/psb_intel_display.h
@@ -21,8 +21,5 @@
21#define _INTEL_DISPLAY_H_ 21#define _INTEL_DISPLAY_H_
22 22
23bool psb_intel_pipe_has_type(struct drm_crtc *crtc, int type); 23bool psb_intel_pipe_has_type(struct drm_crtc *crtc, int type);
24void psb_intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red,
25 u16 *green, u16 *blue, uint32_t type, uint32_t size);
26void psb_intel_crtc_destroy(struct drm_crtc *crtc);
27 24
28#endif 25#endif
diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h
index 90f2d11e686b..4dcae421a58d 100644
--- a/drivers/gpu/drm/gma500/psb_intel_drv.h
+++ b/drivers/gpu/drm/gma500/psb_intel_drv.h
@@ -32,9 +32,6 @@
32/* maximum connectors per crtcs in the mode set */ 32/* maximum connectors per crtcs in the mode set */
33#define INTELFB_CONN_LIMIT 4 33#define INTELFB_CONN_LIMIT 4
34 34
35#define INTEL_I2C_BUS_DVO 1
36#define INTEL_I2C_BUS_SDVO 2
37
38/* Intel Pipe Clone Bit */ 35/* Intel Pipe Clone Bit */
39#define INTEL_HDMIB_CLONE_BIT 1 36#define INTEL_HDMIB_CLONE_BIT 1
40#define INTEL_HDMIC_CLONE_BIT 2 37#define INTEL_HDMIC_CLONE_BIT 2
@@ -68,11 +65,6 @@
68#define INTEL_OUTPUT_DISPLAYPORT 9 65#define INTEL_OUTPUT_DISPLAYPORT 9
69#define INTEL_OUTPUT_EDP 10 66#define INTEL_OUTPUT_EDP 10
70 67
71#define INTEL_DVO_CHIP_NONE 0
72#define INTEL_DVO_CHIP_LVDS 1
73#define INTEL_DVO_CHIP_TMDS 2
74#define INTEL_DVO_CHIP_TVOUT 4
75
76#define INTEL_MODE_PIXEL_MULTIPLIER_SHIFT (0x0) 68#define INTEL_MODE_PIXEL_MULTIPLIER_SHIFT (0x0)
77#define INTEL_MODE_PIXEL_MULTIPLIER_MASK (0xf << INTEL_MODE_PIXEL_MULTIPLIER_SHIFT) 69#define INTEL_MODE_PIXEL_MULTIPLIER_MASK (0xf << INTEL_MODE_PIXEL_MULTIPLIER_SHIFT)
78 70
diff --git a/drivers/gpu/drm/gma500/psb_intel_reg.h b/drivers/gpu/drm/gma500/psb_intel_reg.h
index d914719c4b60..0be30e4d146d 100644
--- a/drivers/gpu/drm/gma500/psb_intel_reg.h
+++ b/drivers/gpu/drm/gma500/psb_intel_reg.h
@@ -493,7 +493,6 @@
493#define PIPEACONF_DISABLE 0 493#define PIPEACONF_DISABLE 0
494#define PIPEACONF_DOUBLE_WIDE (1 << 30) 494#define PIPEACONF_DOUBLE_WIDE (1 << 30)
495#define PIPECONF_ACTIVE (1 << 30) 495#define PIPECONF_ACTIVE (1 << 30)
496#define I965_PIPECONF_ACTIVE (1 << 30)
497#define PIPECONF_DSIPLL_LOCK (1 << 29) 496#define PIPECONF_DSIPLL_LOCK (1 << 29)
498#define PIPEACONF_SINGLE_WIDE 0 497#define PIPEACONF_SINGLE_WIDE 0
499#define PIPEACONF_PIPE_UNLOCKED 0 498#define PIPEACONF_PIPE_UNLOCKED 0
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
index a4cc777ab7a6..19e36603b23b 100644
--- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c
+++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
@@ -134,6 +134,9 @@ struct psb_intel_sdvo {
134 134
135 /* Input timings for adjusted_mode */ 135 /* Input timings for adjusted_mode */
136 struct psb_intel_sdvo_dtd input_dtd; 136 struct psb_intel_sdvo_dtd input_dtd;
137
138 /* Saved SDVO output states */
139 uint32_t saveSDVO; /* Can be SDVOB or SDVOC depending on sdvo_reg */
137}; 140};
138 141
139struct psb_intel_sdvo_connector { 142struct psb_intel_sdvo_connector {
@@ -1830,6 +1833,34 @@ done:
1830#undef CHECK_PROPERTY 1833#undef CHECK_PROPERTY
1831} 1834}
1832 1835
1836static void psb_intel_sdvo_save(struct drm_connector *connector)
1837{
1838 struct drm_device *dev = connector->dev;
1839 struct psb_intel_encoder *psb_intel_encoder =
1840 psb_intel_attached_encoder(connector);
1841 struct psb_intel_sdvo *sdvo =
1842 to_psb_intel_sdvo(&psb_intel_encoder->base);
1843
1844 sdvo->saveSDVO = REG_READ(sdvo->sdvo_reg);
1845}
1846
1847static void psb_intel_sdvo_restore(struct drm_connector *connector)
1848{
1849 struct drm_device *dev = connector->dev;
1850 struct drm_encoder *encoder =
1851 &psb_intel_attached_encoder(connector)->base;
1852 struct psb_intel_sdvo *sdvo = to_psb_intel_sdvo(encoder);
1853 struct drm_crtc *crtc = encoder->crtc;
1854
1855 REG_WRITE(sdvo->sdvo_reg, sdvo->saveSDVO);
1856
1857 /* Force a full mode set on the crtc. We're supposed to have the
1858 mode_config lock already. */
1859 if (connector->status == connector_status_connected)
1860 drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x, crtc->y,
1861 NULL);
1862}
1863
1833static const struct drm_encoder_helper_funcs psb_intel_sdvo_helper_funcs = { 1864static const struct drm_encoder_helper_funcs psb_intel_sdvo_helper_funcs = {
1834 .dpms = psb_intel_sdvo_dpms, 1865 .dpms = psb_intel_sdvo_dpms,
1835 .mode_fixup = psb_intel_sdvo_mode_fixup, 1866 .mode_fixup = psb_intel_sdvo_mode_fixup,
@@ -1840,6 +1871,8 @@ static const struct drm_encoder_helper_funcs psb_intel_sdvo_helper_funcs = {
1840 1871
1841static const struct drm_connector_funcs psb_intel_sdvo_connector_funcs = { 1872static const struct drm_connector_funcs psb_intel_sdvo_connector_funcs = {
1842 .dpms = drm_helper_connector_dpms, 1873 .dpms = drm_helper_connector_dpms,
1874 .save = psb_intel_sdvo_save,
1875 .restore = psb_intel_sdvo_restore,
1843 .detect = psb_intel_sdvo_detect, 1876 .detect = psb_intel_sdvo_detect,
1844 .fill_modes = drm_helper_probe_single_connector_modes, 1877 .fill_modes = drm_helper_probe_single_connector_modes,
1845 .set_property = psb_intel_sdvo_set_property, 1878 .set_property = psb_intel_sdvo_set_property,
diff --git a/drivers/gpu/drm/gma500/psb_irq.c b/drivers/gpu/drm/gma500/psb_irq.c
index 8652cdf3f03f..029eccf30137 100644
--- a/drivers/gpu/drm/gma500/psb_irq.c
+++ b/drivers/gpu/drm/gma500/psb_irq.c
@@ -211,7 +211,7 @@ irqreturn_t psb_irq_handler(DRM_IRQ_ARGS)
211 211
212 vdc_stat = PSB_RVDC32(PSB_INT_IDENTITY_R); 212 vdc_stat = PSB_RVDC32(PSB_INT_IDENTITY_R);
213 213
214 if (vdc_stat & _PSB_PIPE_EVENT_FLAG) 214 if (vdc_stat & (_PSB_PIPE_EVENT_FLAG|_PSB_IRQ_ASLE))
215 dsp_int = 1; 215 dsp_int = 1;
216 216
217 /* FIXME: Handle Medfield 217 /* FIXME: Handle Medfield
diff --git a/drivers/gpu/drm/gma500/psb_irq.h b/drivers/gpu/drm/gma500/psb_irq.h
index 603045bee58a..debb7f190c06 100644
--- a/drivers/gpu/drm/gma500/psb_irq.h
+++ b/drivers/gpu/drm/gma500/psb_irq.h
@@ -21,8 +21,8 @@
21 * 21 *
22 **************************************************************************/ 22 **************************************************************************/
23 23
24#ifndef _SYSIRQ_H_ 24#ifndef _PSB_IRQ_H_
25#define _SYSIRQ_H_ 25#define _PSB_IRQ_H_
26 26
27#include <drm/drmP.h> 27#include <drm/drmP.h>
28 28
@@ -44,4 +44,4 @@ u32 psb_get_vblank_counter(struct drm_device *dev, int pipe);
44 44
45int mdfld_enable_te(struct drm_device *dev, int pipe); 45int mdfld_enable_te(struct drm_device *dev, int pipe);
46void mdfld_disable_te(struct drm_device *dev, int pipe); 46void mdfld_disable_te(struct drm_device *dev, int pipe);
47#endif /* _SYSIRQ_H_ */ 47#endif /* _PSB_IRQ_H_ */
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 7299ea45dd03..e913d325d5b8 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -772,6 +772,23 @@ static int i915_error_state(struct seq_file *m, void *unused)
772 } 772 }
773 } 773 }
774 } 774 }
775
776 obj = error->ring[i].ctx;
777 if (obj) {
778 seq_printf(m, "%s --- HW Context = 0x%08x\n",
779 dev_priv->ring[i].name,
780 obj->gtt_offset);
781 offset = 0;
782 for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
783 seq_printf(m, "[%04x] %08x %08x %08x %08x\n",
784 offset,
785 obj->pages[0][elt],
786 obj->pages[0][elt+1],
787 obj->pages[0][elt+2],
788 obj->pages[0][elt+3]);
789 offset += 16;
790 }
791 }
775 } 792 }
776 793
777 if (error->overlay) 794 if (error->overlay)
@@ -849,76 +866,42 @@ static const struct file_operations i915_error_state_fops = {
849 .release = i915_error_state_release, 866 .release = i915_error_state_release,
850}; 867};
851 868
852static ssize_t 869static int
853i915_next_seqno_read(struct file *filp, 870i915_next_seqno_get(void *data, u64 *val)
854 char __user *ubuf,
855 size_t max,
856 loff_t *ppos)
857{ 871{
858 struct drm_device *dev = filp->private_data; 872 struct drm_device *dev = data;
859 drm_i915_private_t *dev_priv = dev->dev_private; 873 drm_i915_private_t *dev_priv = dev->dev_private;
860 char buf[80];
861 int len;
862 int ret; 874 int ret;
863 875
864 ret = mutex_lock_interruptible(&dev->struct_mutex); 876 ret = mutex_lock_interruptible(&dev->struct_mutex);
865 if (ret) 877 if (ret)
866 return ret; 878 return ret;
867 879
868 len = snprintf(buf, sizeof(buf), 880 *val = dev_priv->next_seqno;
869 "next_seqno : 0x%x\n",
870 dev_priv->next_seqno);
871
872 mutex_unlock(&dev->struct_mutex); 881 mutex_unlock(&dev->struct_mutex);
873 882
874 if (len > sizeof(buf)) 883 return 0;
875 len = sizeof(buf);
876
877 return simple_read_from_buffer(ubuf, max, ppos, buf, len);
878} 884}
879 885
880static ssize_t 886static int
881i915_next_seqno_write(struct file *filp, 887i915_next_seqno_set(void *data, u64 val)
882 const char __user *ubuf, 888{
883 size_t cnt, 889 struct drm_device *dev = data;
884 loff_t *ppos)
885{
886 struct drm_device *dev = filp->private_data;
887 char buf[20];
888 u32 val = 1;
889 int ret; 890 int ret;
890 891
891 if (cnt > 0) {
892 if (cnt > sizeof(buf) - 1)
893 return -EINVAL;
894
895 if (copy_from_user(buf, ubuf, cnt))
896 return -EFAULT;
897 buf[cnt] = 0;
898
899 ret = kstrtouint(buf, 0, &val);
900 if (ret < 0)
901 return ret;
902 }
903
904 ret = mutex_lock_interruptible(&dev->struct_mutex); 892 ret = mutex_lock_interruptible(&dev->struct_mutex);
905 if (ret) 893 if (ret)
906 return ret; 894 return ret;
907 895
908 ret = i915_gem_set_seqno(dev, val); 896 ret = i915_gem_set_seqno(dev, val);
909
910 mutex_unlock(&dev->struct_mutex); 897 mutex_unlock(&dev->struct_mutex);
911 898
912 return ret ?: cnt; 899 return ret;
913} 900}
914 901
915static const struct file_operations i915_next_seqno_fops = { 902DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops,
916 .owner = THIS_MODULE, 903 i915_next_seqno_get, i915_next_seqno_set,
917 .open = simple_open, 904 "0x%llx\n");
918 .read = i915_next_seqno_read,
919 .write = i915_next_seqno_write,
920 .llseek = default_llseek,
921};
922 905
923static int i915_rstdby_delays(struct seq_file *m, void *unused) 906static int i915_rstdby_delays(struct seq_file *m, void *unused)
924{ 907{
@@ -1023,6 +1006,9 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
1023 max_freq = rp_state_cap & 0xff; 1006 max_freq = rp_state_cap & 0xff;
1024 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", 1007 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
1025 max_freq * GT_FREQUENCY_MULTIPLIER); 1008 max_freq * GT_FREQUENCY_MULTIPLIER);
1009
1010 seq_printf(m, "Max overclocked frequency: %dMHz\n",
1011 dev_priv->rps.hw_max * GT_FREQUENCY_MULTIPLIER);
1026 } else { 1012 } else {
1027 seq_printf(m, "no P-state info available\n"); 1013 seq_printf(m, "no P-state info available\n");
1028 } 1014 }
@@ -1371,7 +1357,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
1371 if (ret) 1357 if (ret)
1372 return ret; 1358 return ret;
1373 1359
1374 seq_printf(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\n"); 1360 seq_printf(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
1375 1361
1376 for (gpu_freq = dev_priv->rps.min_delay; 1362 for (gpu_freq = dev_priv->rps.min_delay;
1377 gpu_freq <= dev_priv->rps.max_delay; 1363 gpu_freq <= dev_priv->rps.max_delay;
@@ -1380,7 +1366,10 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
1380 sandybridge_pcode_read(dev_priv, 1366 sandybridge_pcode_read(dev_priv,
1381 GEN6_PCODE_READ_MIN_FREQ_TABLE, 1367 GEN6_PCODE_READ_MIN_FREQ_TABLE,
1382 &ia_freq); 1368 &ia_freq);
1383 seq_printf(m, "%d\t\t%d\n", gpu_freq * GT_FREQUENCY_MULTIPLIER, ia_freq * 100); 1369 seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
1370 gpu_freq * GT_FREQUENCY_MULTIPLIER,
1371 ((ia_freq >> 0) & 0xff) * 100,
1372 ((ia_freq >> 8) & 0xff) * 100);
1384 } 1373 }
1385 1374
1386 mutex_unlock(&dev_priv->rps.hw_lock); 1375 mutex_unlock(&dev_priv->rps.hw_lock);
@@ -1680,105 +1669,51 @@ static int i915_dpio_info(struct seq_file *m, void *data)
1680 return 0; 1669 return 0;
1681} 1670}
1682 1671
1683static ssize_t 1672static int
1684i915_wedged_read(struct file *filp, 1673i915_wedged_get(void *data, u64 *val)
1685 char __user *ubuf,
1686 size_t max,
1687 loff_t *ppos)
1688{ 1674{
1689 struct drm_device *dev = filp->private_data; 1675 struct drm_device *dev = data;
1690 drm_i915_private_t *dev_priv = dev->dev_private; 1676 drm_i915_private_t *dev_priv = dev->dev_private;
1691 char buf[80];
1692 int len;
1693 1677
1694 len = snprintf(buf, sizeof(buf), 1678 *val = atomic_read(&dev_priv->gpu_error.reset_counter);
1695 "wedged : %d\n",
1696 atomic_read(&dev_priv->gpu_error.reset_counter));
1697 1679
1698 if (len > sizeof(buf)) 1680 return 0;
1699 len = sizeof(buf);
1700
1701 return simple_read_from_buffer(ubuf, max, ppos, buf, len);
1702} 1681}
1703 1682
1704static ssize_t 1683static int
1705i915_wedged_write(struct file *filp, 1684i915_wedged_set(void *data, u64 val)
1706 const char __user *ubuf,
1707 size_t cnt,
1708 loff_t *ppos)
1709{ 1685{
1710 struct drm_device *dev = filp->private_data; 1686 struct drm_device *dev = data;
1711 char buf[20];
1712 int val = 1;
1713
1714 if (cnt > 0) {
1715 if (cnt > sizeof(buf) - 1)
1716 return -EINVAL;
1717 1687
1718 if (copy_from_user(buf, ubuf, cnt)) 1688 DRM_INFO("Manually setting wedged to %llu\n", val);
1719 return -EFAULT;
1720 buf[cnt] = 0;
1721
1722 val = simple_strtoul(buf, NULL, 0);
1723 }
1724
1725 DRM_INFO("Manually setting wedged to %d\n", val);
1726 i915_handle_error(dev, val); 1689 i915_handle_error(dev, val);
1727 1690
1728 return cnt; 1691 return 0;
1729} 1692}
1730 1693
1731static const struct file_operations i915_wedged_fops = { 1694DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
1732 .owner = THIS_MODULE, 1695 i915_wedged_get, i915_wedged_set,
1733 .open = simple_open, 1696 "%llu\n");
1734 .read = i915_wedged_read,
1735 .write = i915_wedged_write,
1736 .llseek = default_llseek,
1737};
1738 1697
1739static ssize_t 1698static int
1740i915_ring_stop_read(struct file *filp, 1699i915_ring_stop_get(void *data, u64 *val)
1741 char __user *ubuf,
1742 size_t max,
1743 loff_t *ppos)
1744{ 1700{
1745 struct drm_device *dev = filp->private_data; 1701 struct drm_device *dev = data;
1746 drm_i915_private_t *dev_priv = dev->dev_private; 1702 drm_i915_private_t *dev_priv = dev->dev_private;
1747 char buf[20];
1748 int len;
1749 1703
1750 len = snprintf(buf, sizeof(buf), 1704 *val = dev_priv->gpu_error.stop_rings;
1751 "0x%08x\n", dev_priv->gpu_error.stop_rings);
1752 1705
1753 if (len > sizeof(buf)) 1706 return 0;
1754 len = sizeof(buf);
1755
1756 return simple_read_from_buffer(ubuf, max, ppos, buf, len);
1757} 1707}
1758 1708
1759static ssize_t 1709static int
1760i915_ring_stop_write(struct file *filp, 1710i915_ring_stop_set(void *data, u64 val)
1761 const char __user *ubuf,
1762 size_t cnt,
1763 loff_t *ppos)
1764{ 1711{
1765 struct drm_device *dev = filp->private_data; 1712 struct drm_device *dev = data;
1766 struct drm_i915_private *dev_priv = dev->dev_private; 1713 struct drm_i915_private *dev_priv = dev->dev_private;
1767 char buf[20]; 1714 int ret;
1768 int val = 0, ret;
1769
1770 if (cnt > 0) {
1771 if (cnt > sizeof(buf) - 1)
1772 return -EINVAL;
1773
1774 if (copy_from_user(buf, ubuf, cnt))
1775 return -EFAULT;
1776 buf[cnt] = 0;
1777
1778 val = simple_strtoul(buf, NULL, 0);
1779 }
1780 1715
1781 DRM_DEBUG_DRIVER("Stopping rings 0x%08x\n", val); 1716 DRM_DEBUG_DRIVER("Stopping rings 0x%08llx\n", val);
1782 1717
1783 ret = mutex_lock_interruptible(&dev->struct_mutex); 1718 ret = mutex_lock_interruptible(&dev->struct_mutex);
1784 if (ret) 1719 if (ret)
@@ -1787,16 +1722,12 @@ i915_ring_stop_write(struct file *filp,
1787 dev_priv->gpu_error.stop_rings = val; 1722 dev_priv->gpu_error.stop_rings = val;
1788 mutex_unlock(&dev->struct_mutex); 1723 mutex_unlock(&dev->struct_mutex);
1789 1724
1790 return cnt; 1725 return 0;
1791} 1726}
1792 1727
1793static const struct file_operations i915_ring_stop_fops = { 1728DEFINE_SIMPLE_ATTRIBUTE(i915_ring_stop_fops,
1794 .owner = THIS_MODULE, 1729 i915_ring_stop_get, i915_ring_stop_set,
1795 .open = simple_open, 1730 "0x%08llx\n");
1796 .read = i915_ring_stop_read,
1797 .write = i915_ring_stop_write,
1798 .llseek = default_llseek,
1799};
1800 1731
1801#define DROP_UNBOUND 0x1 1732#define DROP_UNBOUND 0x1
1802#define DROP_BOUND 0x2 1733#define DROP_BOUND 0x2
@@ -1806,46 +1737,23 @@ static const struct file_operations i915_ring_stop_fops = {
1806 DROP_BOUND | \ 1737 DROP_BOUND | \
1807 DROP_RETIRE | \ 1738 DROP_RETIRE | \
1808 DROP_ACTIVE) 1739 DROP_ACTIVE)
1809static ssize_t 1740static int
1810i915_drop_caches_read(struct file *filp, 1741i915_drop_caches_get(void *data, u64 *val)
1811 char __user *ubuf,
1812 size_t max,
1813 loff_t *ppos)
1814{ 1742{
1815 char buf[20]; 1743 *val = DROP_ALL;
1816 int len;
1817 1744
1818 len = snprintf(buf, sizeof(buf), "0x%08x\n", DROP_ALL); 1745 return 0;
1819 if (len > sizeof(buf))
1820 len = sizeof(buf);
1821
1822 return simple_read_from_buffer(ubuf, max, ppos, buf, len);
1823} 1746}
1824 1747
1825static ssize_t 1748static int
1826i915_drop_caches_write(struct file *filp, 1749i915_drop_caches_set(void *data, u64 val)
1827 const char __user *ubuf,
1828 size_t cnt,
1829 loff_t *ppos)
1830{ 1750{
1831 struct drm_device *dev = filp->private_data; 1751 struct drm_device *dev = data;
1832 struct drm_i915_private *dev_priv = dev->dev_private; 1752 struct drm_i915_private *dev_priv = dev->dev_private;
1833 struct drm_i915_gem_object *obj, *next; 1753 struct drm_i915_gem_object *obj, *next;
1834 char buf[20]; 1754 int ret;
1835 int val = 0, ret;
1836
1837 if (cnt > 0) {
1838 if (cnt > sizeof(buf) - 1)
1839 return -EINVAL;
1840
1841 if (copy_from_user(buf, ubuf, cnt))
1842 return -EFAULT;
1843 buf[cnt] = 0;
1844
1845 val = simple_strtoul(buf, NULL, 0);
1846 }
1847 1755
1848 DRM_DEBUG_DRIVER("Dropping caches: 0x%08x\n", val); 1756 DRM_DEBUG_DRIVER("Dropping caches: 0x%08llx\n", val);
1849 1757
1850 /* No need to check and wait for gpu resets, only libdrm auto-restarts 1758 /* No need to check and wait for gpu resets, only libdrm auto-restarts
1851 * on ioctls on -EAGAIN. */ 1759 * on ioctls on -EAGAIN. */
@@ -1883,27 +1791,19 @@ i915_drop_caches_write(struct file *filp,
1883unlock: 1791unlock:
1884 mutex_unlock(&dev->struct_mutex); 1792 mutex_unlock(&dev->struct_mutex);
1885 1793
1886 return ret ?: cnt; 1794 return ret;
1887} 1795}
1888 1796
1889static const struct file_operations i915_drop_caches_fops = { 1797DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
1890 .owner = THIS_MODULE, 1798 i915_drop_caches_get, i915_drop_caches_set,
1891 .open = simple_open, 1799 "0x%08llx\n");
1892 .read = i915_drop_caches_read,
1893 .write = i915_drop_caches_write,
1894 .llseek = default_llseek,
1895};
1896 1800
1897static ssize_t 1801static int
1898i915_max_freq_read(struct file *filp, 1802i915_max_freq_get(void *data, u64 *val)
1899 char __user *ubuf,
1900 size_t max,
1901 loff_t *ppos)
1902{ 1803{
1903 struct drm_device *dev = filp->private_data; 1804 struct drm_device *dev = data;
1904 drm_i915_private_t *dev_priv = dev->dev_private; 1805 drm_i915_private_t *dev_priv = dev->dev_private;
1905 char buf[80]; 1806 int ret;
1906 int len, ret;
1907 1807
1908 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 1808 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
1909 return -ENODEV; 1809 return -ENODEV;
@@ -1912,42 +1812,23 @@ i915_max_freq_read(struct file *filp,
1912 if (ret) 1812 if (ret)
1913 return ret; 1813 return ret;
1914 1814
1915 len = snprintf(buf, sizeof(buf), 1815 *val = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER;
1916 "max freq: %d\n", dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER);
1917 mutex_unlock(&dev_priv->rps.hw_lock); 1816 mutex_unlock(&dev_priv->rps.hw_lock);
1918 1817
1919 if (len > sizeof(buf)) 1818 return 0;
1920 len = sizeof(buf);
1921
1922 return simple_read_from_buffer(ubuf, max, ppos, buf, len);
1923} 1819}
1924 1820
1925static ssize_t 1821static int
1926i915_max_freq_write(struct file *filp, 1822i915_max_freq_set(void *data, u64 val)
1927 const char __user *ubuf,
1928 size_t cnt,
1929 loff_t *ppos)
1930{ 1823{
1931 struct drm_device *dev = filp->private_data; 1824 struct drm_device *dev = data;
1932 struct drm_i915_private *dev_priv = dev->dev_private; 1825 struct drm_i915_private *dev_priv = dev->dev_private;
1933 char buf[20]; 1826 int ret;
1934 int val = 1, ret;
1935 1827
1936 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 1828 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
1937 return -ENODEV; 1829 return -ENODEV;
1938 1830
1939 if (cnt > 0) { 1831 DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val);
1940 if (cnt > sizeof(buf) - 1)
1941 return -EINVAL;
1942
1943 if (copy_from_user(buf, ubuf, cnt))
1944 return -EFAULT;
1945 buf[cnt] = 0;
1946
1947 val = simple_strtoul(buf, NULL, 0);
1948 }
1949
1950 DRM_DEBUG_DRIVER("Manually setting max freq to %d\n", val);
1951 1832
1952 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 1833 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
1953 if (ret) 1834 if (ret)
@@ -1956,30 +1837,24 @@ i915_max_freq_write(struct file *filp,
1956 /* 1837 /*
1957 * Turbo will still be enabled, but won't go above the set value. 1838 * Turbo will still be enabled, but won't go above the set value.
1958 */ 1839 */
1959 dev_priv->rps.max_delay = val / GT_FREQUENCY_MULTIPLIER; 1840 do_div(val, GT_FREQUENCY_MULTIPLIER);
1960 1841 dev_priv->rps.max_delay = val;
1961 gen6_set_rps(dev, val / GT_FREQUENCY_MULTIPLIER); 1842 gen6_set_rps(dev, val);
1962 mutex_unlock(&dev_priv->rps.hw_lock); 1843 mutex_unlock(&dev_priv->rps.hw_lock);
1963 1844
1964 return cnt; 1845 return 0;
1965} 1846}
1966 1847
1967static const struct file_operations i915_max_freq_fops = { 1848DEFINE_SIMPLE_ATTRIBUTE(i915_max_freq_fops,
1968 .owner = THIS_MODULE, 1849 i915_max_freq_get, i915_max_freq_set,
1969 .open = simple_open, 1850 "%llu\n");
1970 .read = i915_max_freq_read,
1971 .write = i915_max_freq_write,
1972 .llseek = default_llseek,
1973};
1974 1851
1975static ssize_t 1852static int
1976i915_min_freq_read(struct file *filp, char __user *ubuf, size_t max, 1853i915_min_freq_get(void *data, u64 *val)
1977 loff_t *ppos)
1978{ 1854{
1979 struct drm_device *dev = filp->private_data; 1855 struct drm_device *dev = data;
1980 drm_i915_private_t *dev_priv = dev->dev_private; 1856 drm_i915_private_t *dev_priv = dev->dev_private;
1981 char buf[80]; 1857 int ret;
1982 int len, ret;
1983 1858
1984 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 1859 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
1985 return -ENODEV; 1860 return -ENODEV;
@@ -1988,40 +1863,23 @@ i915_min_freq_read(struct file *filp, char __user *ubuf, size_t max,
1988 if (ret) 1863 if (ret)
1989 return ret; 1864 return ret;
1990 1865
1991 len = snprintf(buf, sizeof(buf), 1866 *val = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER;
1992 "min freq: %d\n", dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER);
1993 mutex_unlock(&dev_priv->rps.hw_lock); 1867 mutex_unlock(&dev_priv->rps.hw_lock);
1994 1868
1995 if (len > sizeof(buf)) 1869 return 0;
1996 len = sizeof(buf);
1997
1998 return simple_read_from_buffer(ubuf, max, ppos, buf, len);
1999} 1870}
2000 1871
2001static ssize_t 1872static int
2002i915_min_freq_write(struct file *filp, const char __user *ubuf, size_t cnt, 1873i915_min_freq_set(void *data, u64 val)
2003 loff_t *ppos)
2004{ 1874{
2005 struct drm_device *dev = filp->private_data; 1875 struct drm_device *dev = data;
2006 struct drm_i915_private *dev_priv = dev->dev_private; 1876 struct drm_i915_private *dev_priv = dev->dev_private;
2007 char buf[20]; 1877 int ret;
2008 int val = 1, ret;
2009 1878
2010 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 1879 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
2011 return -ENODEV; 1880 return -ENODEV;
2012 1881
2013 if (cnt > 0) { 1882 DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val);
2014 if (cnt > sizeof(buf) - 1)
2015 return -EINVAL;
2016
2017 if (copy_from_user(buf, ubuf, cnt))
2018 return -EFAULT;
2019 buf[cnt] = 0;
2020
2021 val = simple_strtoul(buf, NULL, 0);
2022 }
2023
2024 DRM_DEBUG_DRIVER("Manually setting min freq to %d\n", val);
2025 1883
2026 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 1884 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
2027 if (ret) 1885 if (ret)
@@ -2030,33 +1888,25 @@ i915_min_freq_write(struct file *filp, const char __user *ubuf, size_t cnt,
2030 /* 1888 /*
2031 * Turbo will still be enabled, but won't go below the set value. 1889 * Turbo will still be enabled, but won't go below the set value.
2032 */ 1890 */
2033 dev_priv->rps.min_delay = val / GT_FREQUENCY_MULTIPLIER; 1891 do_div(val, GT_FREQUENCY_MULTIPLIER);
2034 1892 dev_priv->rps.min_delay = val;
2035 gen6_set_rps(dev, val / GT_FREQUENCY_MULTIPLIER); 1893 gen6_set_rps(dev, val);
2036 mutex_unlock(&dev_priv->rps.hw_lock); 1894 mutex_unlock(&dev_priv->rps.hw_lock);
2037 1895
2038 return cnt; 1896 return 0;
2039} 1897}
2040 1898
2041static const struct file_operations i915_min_freq_fops = { 1899DEFINE_SIMPLE_ATTRIBUTE(i915_min_freq_fops,
2042 .owner = THIS_MODULE, 1900 i915_min_freq_get, i915_min_freq_set,
2043 .open = simple_open, 1901 "%llu\n");
2044 .read = i915_min_freq_read,
2045 .write = i915_min_freq_write,
2046 .llseek = default_llseek,
2047};
2048 1902
2049static ssize_t 1903static int
2050i915_cache_sharing_read(struct file *filp, 1904i915_cache_sharing_get(void *data, u64 *val)
2051 char __user *ubuf,
2052 size_t max,
2053 loff_t *ppos)
2054{ 1905{
2055 struct drm_device *dev = filp->private_data; 1906 struct drm_device *dev = data;
2056 drm_i915_private_t *dev_priv = dev->dev_private; 1907 drm_i915_private_t *dev_priv = dev->dev_private;
2057 char buf[80];
2058 u32 snpcr; 1908 u32 snpcr;
2059 int len, ret; 1909 int ret;
2060 1910
2061 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 1911 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
2062 return -ENODEV; 1912 return -ENODEV;
@@ -2068,46 +1918,25 @@ i915_cache_sharing_read(struct file *filp,
2068 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); 1918 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
2069 mutex_unlock(&dev_priv->dev->struct_mutex); 1919 mutex_unlock(&dev_priv->dev->struct_mutex);
2070 1920
2071 len = snprintf(buf, sizeof(buf), 1921 *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
2072 "%d\n", (snpcr & GEN6_MBC_SNPCR_MASK) >>
2073 GEN6_MBC_SNPCR_SHIFT);
2074 1922
2075 if (len > sizeof(buf)) 1923 return 0;
2076 len = sizeof(buf);
2077
2078 return simple_read_from_buffer(ubuf, max, ppos, buf, len);
2079} 1924}
2080 1925
2081static ssize_t 1926static int
2082i915_cache_sharing_write(struct file *filp, 1927i915_cache_sharing_set(void *data, u64 val)
2083 const char __user *ubuf,
2084 size_t cnt,
2085 loff_t *ppos)
2086{ 1928{
2087 struct drm_device *dev = filp->private_data; 1929 struct drm_device *dev = data;
2088 struct drm_i915_private *dev_priv = dev->dev_private; 1930 struct drm_i915_private *dev_priv = dev->dev_private;
2089 char buf[20];
2090 u32 snpcr; 1931 u32 snpcr;
2091 int val = 1;
2092 1932
2093 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 1933 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
2094 return -ENODEV; 1934 return -ENODEV;
2095 1935
2096 if (cnt > 0) { 1936 if (val > 3)
2097 if (cnt > sizeof(buf) - 1)
2098 return -EINVAL;
2099
2100 if (copy_from_user(buf, ubuf, cnt))
2101 return -EFAULT;
2102 buf[cnt] = 0;
2103
2104 val = simple_strtoul(buf, NULL, 0);
2105 }
2106
2107 if (val < 0 || val > 3)
2108 return -EINVAL; 1937 return -EINVAL;
2109 1938
2110 DRM_DEBUG_DRIVER("Manually setting uncore sharing to %d\n", val); 1939 DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
2111 1940
2112 /* Update the cache sharing policy here as well */ 1941 /* Update the cache sharing policy here as well */
2113 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); 1942 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
@@ -2115,16 +1944,12 @@ i915_cache_sharing_write(struct file *filp,
2115 snpcr |= (val << GEN6_MBC_SNPCR_SHIFT); 1944 snpcr |= (val << GEN6_MBC_SNPCR_SHIFT);
2116 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr); 1945 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
2117 1946
2118 return cnt; 1947 return 0;
2119} 1948}
2120 1949
2121static const struct file_operations i915_cache_sharing_fops = { 1950DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
2122 .owner = THIS_MODULE, 1951 i915_cache_sharing_get, i915_cache_sharing_set,
2123 .open = simple_open, 1952 "%llu\n");
2124 .read = i915_cache_sharing_read,
2125 .write = i915_cache_sharing_write,
2126 .llseek = default_llseek,
2127};
2128 1953
2129/* As the drm_debugfs_init() routines are called before dev->dev_private is 1954/* As the drm_debugfs_init() routines are called before dev->dev_private is
2130 * allocated we need to hook into the minor for release. */ 1955 * allocated we need to hook into the minor for release. */
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 4fa6beb14c77..3b315ba85a3e 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1322,6 +1322,10 @@ static int i915_load_modeset_init(struct drm_device *dev)
1322 /* Always safe in the mode setting case. */ 1322 /* Always safe in the mode setting case. */
1323 /* FIXME: do pre/post-mode set stuff in core KMS code */ 1323 /* FIXME: do pre/post-mode set stuff in core KMS code */
1324 dev->vblank_disable_allowed = 1; 1324 dev->vblank_disable_allowed = 1;
1325 if (INTEL_INFO(dev)->num_pipes == 0) {
1326 dev_priv->mm.suspended = 0;
1327 return 0;
1328 }
1325 1329
1326 ret = intel_fbdev_init(dev); 1330 ret = intel_fbdev_init(dev);
1327 if (ret) 1331 if (ret)
@@ -1453,6 +1457,22 @@ static void i915_dump_device_info(struct drm_i915_private *dev_priv)
1453} 1457}
1454 1458
1455/** 1459/**
1460 * intel_early_sanitize_regs - clean up BIOS state
1461 * @dev: DRM device
1462 *
1463 * This function must be called before we do any I915_READ or I915_WRITE. Its
1464 * purpose is to clean up any state left by the BIOS that may affect us when
1465 * reading and/or writing registers.
1466 */
1467static void intel_early_sanitize_regs(struct drm_device *dev)
1468{
1469 struct drm_i915_private *dev_priv = dev->dev_private;
1470
1471 if (IS_HASWELL(dev))
1472 I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
1473}
1474
1475/**
1456 * i915_driver_load - setup chip and create an initial config 1476 * i915_driver_load - setup chip and create an initial config
1457 * @dev: DRM device 1477 * @dev: DRM device
1458 * @flags: startup flags 1478 * @flags: startup flags
@@ -1498,6 +1518,28 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1498 goto free_priv; 1518 goto free_priv;
1499 } 1519 }
1500 1520
1521 mmio_bar = IS_GEN2(dev) ? 1 : 0;
1522 /* Before gen4, the registers and the GTT are behind different BARs.
1523 * However, from gen4 onwards, the registers and the GTT are shared
1524 * in the same BAR, so we want to restrict this ioremap from
1525 * clobbering the GTT which we want ioremap_wc instead. Fortunately,
1526 * the register BAR remains the same size for all the earlier
1527 * generations up to Ironlake.
1528 */
1529 if (info->gen < 5)
1530 mmio_size = 512*1024;
1531 else
1532 mmio_size = 2*1024*1024;
1533
1534 dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size);
1535 if (!dev_priv->regs) {
1536 DRM_ERROR("failed to map registers\n");
1537 ret = -EIO;
1538 goto put_bridge;
1539 }
1540
1541 intel_early_sanitize_regs(dev);
1542
1501 ret = i915_gem_gtt_init(dev); 1543 ret = i915_gem_gtt_init(dev);
1502 if (ret) 1544 if (ret)
1503 goto put_bridge; 1545 goto put_bridge;
@@ -1522,26 +1564,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1522 if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) 1564 if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
1523 dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32)); 1565 dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
1524 1566
1525 mmio_bar = IS_GEN2(dev) ? 1 : 0;
1526 /* Before gen4, the registers and the GTT are behind different BARs.
1527 * However, from gen4 onwards, the registers and the GTT are shared
1528 * in the same BAR, so we want to restrict this ioremap from
1529 * clobbering the GTT which we want ioremap_wc instead. Fortunately,
1530 * the register BAR remains the same size for all the earlier
1531 * generations up to Ironlake.
1532 */
1533 if (info->gen < 5)
1534 mmio_size = 512*1024;
1535 else
1536 mmio_size = 2*1024*1024;
1537
1538 dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size);
1539 if (!dev_priv->regs) {
1540 DRM_ERROR("failed to map registers\n");
1541 ret = -EIO;
1542 goto put_gmch;
1543 }
1544
1545 aperture_size = dev_priv->gtt.mappable_end; 1567 aperture_size = dev_priv->gtt.mappable_end;
1546 1568
1547 dev_priv->gtt.mappable = 1569 dev_priv->gtt.mappable =
@@ -1612,16 +1634,15 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1612 mutex_init(&dev_priv->rps.hw_lock); 1634 mutex_init(&dev_priv->rps.hw_lock);
1613 mutex_init(&dev_priv->modeset_restore_lock); 1635 mutex_init(&dev_priv->modeset_restore_lock);
1614 1636
1615 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) 1637 dev_priv->num_plane = 1;
1616 dev_priv->num_pipe = 3; 1638 if (IS_VALLEYVIEW(dev))
1617 else if (IS_MOBILE(dev) || !IS_GEN2(dev)) 1639 dev_priv->num_plane = 2;
1618 dev_priv->num_pipe = 2;
1619 else
1620 dev_priv->num_pipe = 1;
1621 1640
1622 ret = drm_vblank_init(dev, dev_priv->num_pipe); 1641 if (INTEL_INFO(dev)->num_pipes) {
1623 if (ret) 1642 ret = drm_vblank_init(dev, INTEL_INFO(dev)->num_pipes);
1624 goto out_gem_unload; 1643 if (ret)
1644 goto out_gem_unload;
1645 }
1625 1646
1626 /* Start out suspended */ 1647 /* Start out suspended */
1627 dev_priv->mm.suspended = 1; 1648 dev_priv->mm.suspended = 1;
@@ -1636,9 +1657,11 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1636 1657
1637 i915_setup_sysfs(dev); 1658 i915_setup_sysfs(dev);
1638 1659
1639 /* Must be done after probing outputs */ 1660 if (INTEL_INFO(dev)->num_pipes) {
1640 intel_opregion_init(dev); 1661 /* Must be done after probing outputs */
1641 acpi_video_register(); 1662 intel_opregion_init(dev);
1663 acpi_video_register();
1664 }
1642 1665
1643 if (IS_GEN5(dev)) 1666 if (IS_GEN5(dev))
1644 intel_gpu_ips_init(dev_priv); 1667 intel_gpu_ips_init(dev_priv);
@@ -1663,10 +1686,9 @@ out_mtrrfree:
1663 dev_priv->mm.gtt_mtrr = -1; 1686 dev_priv->mm.gtt_mtrr = -1;
1664 } 1687 }
1665 io_mapping_free(dev_priv->gtt.mappable); 1688 io_mapping_free(dev_priv->gtt.mappable);
1689 dev_priv->gtt.gtt_remove(dev);
1666out_rmmap: 1690out_rmmap:
1667 pci_iounmap(dev->pdev, dev_priv->regs); 1691 pci_iounmap(dev->pdev, dev_priv->regs);
1668put_gmch:
1669 dev_priv->gtt.gtt_remove(dev);
1670put_bridge: 1692put_bridge:
1671 pci_dev_put(dev_priv->bridge_dev); 1693 pci_dev_put(dev_priv->bridge_dev);
1672free_priv: 1694free_priv:
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index e9b57893db2b..9ebe895c17d6 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -121,9 +121,7 @@ MODULE_PARM_DESC(i915_enable_ppgtt,
121unsigned int i915_preliminary_hw_support __read_mostly = 0; 121unsigned int i915_preliminary_hw_support __read_mostly = 0;
122module_param_named(preliminary_hw_support, i915_preliminary_hw_support, int, 0600); 122module_param_named(preliminary_hw_support, i915_preliminary_hw_support, int, 0600);
123MODULE_PARM_DESC(preliminary_hw_support, 123MODULE_PARM_DESC(preliminary_hw_support,
124 "Enable preliminary hardware support. " 124 "Enable preliminary hardware support. (default: false)");
125 "Enable Haswell and ValleyView Support. "
126 "(default: false)");
127 125
128int i915_disable_power_well __read_mostly = 0; 126int i915_disable_power_well __read_mostly = 0;
129module_param_named(disable_power_well, i915_disable_power_well, int, 0600); 127module_param_named(disable_power_well, i915_disable_power_well, int, 0600);
@@ -142,75 +140,85 @@ extern int intel_agp_enabled;
142 .subdevice = PCI_ANY_ID, \ 140 .subdevice = PCI_ANY_ID, \
143 .driver_data = (unsigned long) info } 141 .driver_data = (unsigned long) info }
144 142
143#define INTEL_QUANTA_VGA_DEVICE(info) { \
144 .class = PCI_BASE_CLASS_DISPLAY << 16, \
145 .class_mask = 0xff0000, \
146 .vendor = 0x8086, \
147 .device = 0x16a, \
148 .subvendor = 0x152d, \
149 .subdevice = 0x8990, \
150 .driver_data = (unsigned long) info }
151
152
145static const struct intel_device_info intel_i830_info = { 153static const struct intel_device_info intel_i830_info = {
146 .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, 154 .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
147 .has_overlay = 1, .overlay_needs_physical = 1, 155 .has_overlay = 1, .overlay_needs_physical = 1,
148}; 156};
149 157
150static const struct intel_device_info intel_845g_info = { 158static const struct intel_device_info intel_845g_info = {
151 .gen = 2, 159 .gen = 2, .num_pipes = 1,
152 .has_overlay = 1, .overlay_needs_physical = 1, 160 .has_overlay = 1, .overlay_needs_physical = 1,
153}; 161};
154 162
155static const struct intel_device_info intel_i85x_info = { 163static const struct intel_device_info intel_i85x_info = {
156 .gen = 2, .is_i85x = 1, .is_mobile = 1, 164 .gen = 2, .is_i85x = 1, .is_mobile = 1, .num_pipes = 2,
157 .cursor_needs_physical = 1, 165 .cursor_needs_physical = 1,
158 .has_overlay = 1, .overlay_needs_physical = 1, 166 .has_overlay = 1, .overlay_needs_physical = 1,
159}; 167};
160 168
161static const struct intel_device_info intel_i865g_info = { 169static const struct intel_device_info intel_i865g_info = {
162 .gen = 2, 170 .gen = 2, .num_pipes = 1,
163 .has_overlay = 1, .overlay_needs_physical = 1, 171 .has_overlay = 1, .overlay_needs_physical = 1,
164}; 172};
165 173
166static const struct intel_device_info intel_i915g_info = { 174static const struct intel_device_info intel_i915g_info = {
167 .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, 175 .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2,
168 .has_overlay = 1, .overlay_needs_physical = 1, 176 .has_overlay = 1, .overlay_needs_physical = 1,
169}; 177};
170static const struct intel_device_info intel_i915gm_info = { 178static const struct intel_device_info intel_i915gm_info = {
171 .gen = 3, .is_mobile = 1, 179 .gen = 3, .is_mobile = 1, .num_pipes = 2,
172 .cursor_needs_physical = 1, 180 .cursor_needs_physical = 1,
173 .has_overlay = 1, .overlay_needs_physical = 1, 181 .has_overlay = 1, .overlay_needs_physical = 1,
174 .supports_tv = 1, 182 .supports_tv = 1,
175}; 183};
176static const struct intel_device_info intel_i945g_info = { 184static const struct intel_device_info intel_i945g_info = {
177 .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, 185 .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2,
178 .has_overlay = 1, .overlay_needs_physical = 1, 186 .has_overlay = 1, .overlay_needs_physical = 1,
179}; 187};
180static const struct intel_device_info intel_i945gm_info = { 188static const struct intel_device_info intel_i945gm_info = {
181 .gen = 3, .is_i945gm = 1, .is_mobile = 1, 189 .gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2,
182 .has_hotplug = 1, .cursor_needs_physical = 1, 190 .has_hotplug = 1, .cursor_needs_physical = 1,
183 .has_overlay = 1, .overlay_needs_physical = 1, 191 .has_overlay = 1, .overlay_needs_physical = 1,
184 .supports_tv = 1, 192 .supports_tv = 1,
185}; 193};
186 194
187static const struct intel_device_info intel_i965g_info = { 195static const struct intel_device_info intel_i965g_info = {
188 .gen = 4, .is_broadwater = 1, 196 .gen = 4, .is_broadwater = 1, .num_pipes = 2,
189 .has_hotplug = 1, 197 .has_hotplug = 1,
190 .has_overlay = 1, 198 .has_overlay = 1,
191}; 199};
192 200
193static const struct intel_device_info intel_i965gm_info = { 201static const struct intel_device_info intel_i965gm_info = {
194 .gen = 4, .is_crestline = 1, 202 .gen = 4, .is_crestline = 1, .num_pipes = 2,
195 .is_mobile = 1, .has_fbc = 1, .has_hotplug = 1, 203 .is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
196 .has_overlay = 1, 204 .has_overlay = 1,
197 .supports_tv = 1, 205 .supports_tv = 1,
198}; 206};
199 207
200static const struct intel_device_info intel_g33_info = { 208static const struct intel_device_info intel_g33_info = {
201 .gen = 3, .is_g33 = 1, 209 .gen = 3, .is_g33 = 1, .num_pipes = 2,
202 .need_gfx_hws = 1, .has_hotplug = 1, 210 .need_gfx_hws = 1, .has_hotplug = 1,
203 .has_overlay = 1, 211 .has_overlay = 1,
204}; 212};
205 213
206static const struct intel_device_info intel_g45_info = { 214static const struct intel_device_info intel_g45_info = {
207 .gen = 4, .is_g4x = 1, .need_gfx_hws = 1, 215 .gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2,
208 .has_pipe_cxsr = 1, .has_hotplug = 1, 216 .has_pipe_cxsr = 1, .has_hotplug = 1,
209 .has_bsd_ring = 1, 217 .has_bsd_ring = 1,
210}; 218};
211 219
212static const struct intel_device_info intel_gm45_info = { 220static const struct intel_device_info intel_gm45_info = {
213 .gen = 4, .is_g4x = 1, 221 .gen = 4, .is_g4x = 1, .num_pipes = 2,
214 .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, 222 .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
215 .has_pipe_cxsr = 1, .has_hotplug = 1, 223 .has_pipe_cxsr = 1, .has_hotplug = 1,
216 .supports_tv = 1, 224 .supports_tv = 1,
@@ -218,26 +226,26 @@ static const struct intel_device_info intel_gm45_info = {
218}; 226};
219 227
220static const struct intel_device_info intel_pineview_info = { 228static const struct intel_device_info intel_pineview_info = {
221 .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, 229 .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .num_pipes = 2,
222 .need_gfx_hws = 1, .has_hotplug = 1, 230 .need_gfx_hws = 1, .has_hotplug = 1,
223 .has_overlay = 1, 231 .has_overlay = 1,
224}; 232};
225 233
226static const struct intel_device_info intel_ironlake_d_info = { 234static const struct intel_device_info intel_ironlake_d_info = {
227 .gen = 5, 235 .gen = 5, .num_pipes = 2,
228 .need_gfx_hws = 1, .has_hotplug = 1, 236 .need_gfx_hws = 1, .has_hotplug = 1,
229 .has_bsd_ring = 1, 237 .has_bsd_ring = 1,
230}; 238};
231 239
232static const struct intel_device_info intel_ironlake_m_info = { 240static const struct intel_device_info intel_ironlake_m_info = {
233 .gen = 5, .is_mobile = 1, 241 .gen = 5, .is_mobile = 1, .num_pipes = 2,
234 .need_gfx_hws = 1, .has_hotplug = 1, 242 .need_gfx_hws = 1, .has_hotplug = 1,
235 .has_fbc = 1, 243 .has_fbc = 1,
236 .has_bsd_ring = 1, 244 .has_bsd_ring = 1,
237}; 245};
238 246
239static const struct intel_device_info intel_sandybridge_d_info = { 247static const struct intel_device_info intel_sandybridge_d_info = {
240 .gen = 6, 248 .gen = 6, .num_pipes = 2,
241 .need_gfx_hws = 1, .has_hotplug = 1, 249 .need_gfx_hws = 1, .has_hotplug = 1,
242 .has_bsd_ring = 1, 250 .has_bsd_ring = 1,
243 .has_blt_ring = 1, 251 .has_blt_ring = 1,
@@ -246,7 +254,7 @@ static const struct intel_device_info intel_sandybridge_d_info = {
246}; 254};
247 255
248static const struct intel_device_info intel_sandybridge_m_info = { 256static const struct intel_device_info intel_sandybridge_m_info = {
249 .gen = 6, .is_mobile = 1, 257 .gen = 6, .is_mobile = 1, .num_pipes = 2,
250 .need_gfx_hws = 1, .has_hotplug = 1, 258 .need_gfx_hws = 1, .has_hotplug = 1,
251 .has_fbc = 1, 259 .has_fbc = 1,
252 .has_bsd_ring = 1, 260 .has_bsd_ring = 1,
@@ -255,61 +263,57 @@ static const struct intel_device_info intel_sandybridge_m_info = {
255 .has_force_wake = 1, 263 .has_force_wake = 1,
256}; 264};
257 265
266#define GEN7_FEATURES \
267 .gen = 7, .num_pipes = 3, \
268 .need_gfx_hws = 1, .has_hotplug = 1, \
269 .has_bsd_ring = 1, \
270 .has_blt_ring = 1, \
271 .has_llc = 1, \
272 .has_force_wake = 1
273
258static const struct intel_device_info intel_ivybridge_d_info = { 274static const struct intel_device_info intel_ivybridge_d_info = {
259 .is_ivybridge = 1, .gen = 7, 275 GEN7_FEATURES,
260 .need_gfx_hws = 1, .has_hotplug = 1, 276 .is_ivybridge = 1,
261 .has_bsd_ring = 1,
262 .has_blt_ring = 1,
263 .has_llc = 1,
264 .has_force_wake = 1,
265}; 277};
266 278
267static const struct intel_device_info intel_ivybridge_m_info = { 279static const struct intel_device_info intel_ivybridge_m_info = {
268 .is_ivybridge = 1, .gen = 7, .is_mobile = 1, 280 GEN7_FEATURES,
269 .need_gfx_hws = 1, .has_hotplug = 1, 281 .is_ivybridge = 1,
270 .has_fbc = 0, /* FBC is not enabled on Ivybridge mobile yet */ 282 .is_mobile = 1,
271 .has_bsd_ring = 1, 283};
272 .has_blt_ring = 1, 284
273 .has_llc = 1, 285static const struct intel_device_info intel_ivybridge_q_info = {
274 .has_force_wake = 1, 286 GEN7_FEATURES,
287 .is_ivybridge = 1,
288 .num_pipes = 0, /* legal, last one wins */
275}; 289};
276 290
277static const struct intel_device_info intel_valleyview_m_info = { 291static const struct intel_device_info intel_valleyview_m_info = {
278 .gen = 7, .is_mobile = 1, 292 GEN7_FEATURES,
279 .need_gfx_hws = 1, .has_hotplug = 1, 293 .is_mobile = 1,
280 .has_fbc = 0, 294 .num_pipes = 2,
281 .has_bsd_ring = 1,
282 .has_blt_ring = 1,
283 .is_valleyview = 1, 295 .is_valleyview = 1,
284 .display_mmio_offset = VLV_DISPLAY_BASE, 296 .display_mmio_offset = VLV_DISPLAY_BASE,
297 .has_llc = 0, /* legal, last one wins */
285}; 298};
286 299
287static const struct intel_device_info intel_valleyview_d_info = { 300static const struct intel_device_info intel_valleyview_d_info = {
288 .gen = 7, 301 GEN7_FEATURES,
289 .need_gfx_hws = 1, .has_hotplug = 1, 302 .num_pipes = 2,
290 .has_fbc = 0,
291 .has_bsd_ring = 1,
292 .has_blt_ring = 1,
293 .is_valleyview = 1, 303 .is_valleyview = 1,
294 .display_mmio_offset = VLV_DISPLAY_BASE, 304 .display_mmio_offset = VLV_DISPLAY_BASE,
305 .has_llc = 0, /* legal, last one wins */
295}; 306};
296 307
297static const struct intel_device_info intel_haswell_d_info = { 308static const struct intel_device_info intel_haswell_d_info = {
298 .is_haswell = 1, .gen = 7, 309 GEN7_FEATURES,
299 .need_gfx_hws = 1, .has_hotplug = 1, 310 .is_haswell = 1,
300 .has_bsd_ring = 1,
301 .has_blt_ring = 1,
302 .has_llc = 1,
303 .has_force_wake = 1,
304}; 311};
305 312
306static const struct intel_device_info intel_haswell_m_info = { 313static const struct intel_device_info intel_haswell_m_info = {
307 .is_haswell = 1, .gen = 7, .is_mobile = 1, 314 GEN7_FEATURES,
308 .need_gfx_hws = 1, .has_hotplug = 1, 315 .is_haswell = 1,
309 .has_bsd_ring = 1, 316 .is_mobile = 1,
310 .has_blt_ring = 1,
311 .has_llc = 1,
312 .has_force_wake = 1,
313}; 317};
314 318
315static const struct pci_device_id pciidlist[] = { /* aka */ 319static const struct pci_device_id pciidlist[] = { /* aka */
@@ -356,6 +360,7 @@ static const struct pci_device_id pciidlist[] = { /* aka */
356 INTEL_VGA_DEVICE(0x0152, &intel_ivybridge_d_info), /* GT1 desktop */ 360 INTEL_VGA_DEVICE(0x0152, &intel_ivybridge_d_info), /* GT1 desktop */
357 INTEL_VGA_DEVICE(0x0162, &intel_ivybridge_d_info), /* GT2 desktop */ 361 INTEL_VGA_DEVICE(0x0162, &intel_ivybridge_d_info), /* GT2 desktop */
358 INTEL_VGA_DEVICE(0x015a, &intel_ivybridge_d_info), /* GT1 server */ 362 INTEL_VGA_DEVICE(0x015a, &intel_ivybridge_d_info), /* GT1 server */
363 INTEL_QUANTA_VGA_DEVICE(&intel_ivybridge_q_info), /* Quanta transcode */
359 INTEL_VGA_DEVICE(0x016a, &intel_ivybridge_d_info), /* GT2 server */ 364 INTEL_VGA_DEVICE(0x016a, &intel_ivybridge_d_info), /* GT2 server */
360 INTEL_VGA_DEVICE(0x0402, &intel_haswell_d_info), /* GT1 desktop */ 365 INTEL_VGA_DEVICE(0x0402, &intel_haswell_d_info), /* GT1 desktop */
361 INTEL_VGA_DEVICE(0x0412, &intel_haswell_d_info), /* GT2 desktop */ 366 INTEL_VGA_DEVICE(0x0412, &intel_haswell_d_info), /* GT2 desktop */
@@ -394,6 +399,9 @@ static const struct pci_device_id pciidlist[] = { /* aka */
394 INTEL_VGA_DEVICE(0x0D16, &intel_haswell_m_info), /* CRW GT2 mobile */ 399 INTEL_VGA_DEVICE(0x0D16, &intel_haswell_m_info), /* CRW GT2 mobile */
395 INTEL_VGA_DEVICE(0x0D26, &intel_haswell_m_info), /* CRW GT2 mobile */ 400 INTEL_VGA_DEVICE(0x0D26, &intel_haswell_m_info), /* CRW GT2 mobile */
396 INTEL_VGA_DEVICE(0x0f30, &intel_valleyview_m_info), 401 INTEL_VGA_DEVICE(0x0f30, &intel_valleyview_m_info),
402 INTEL_VGA_DEVICE(0x0f31, &intel_valleyview_m_info),
403 INTEL_VGA_DEVICE(0x0f32, &intel_valleyview_m_info),
404 INTEL_VGA_DEVICE(0x0f33, &intel_valleyview_m_info),
397 INTEL_VGA_DEVICE(0x0157, &intel_valleyview_m_info), 405 INTEL_VGA_DEVICE(0x0157, &intel_valleyview_m_info),
398 INTEL_VGA_DEVICE(0x0155, &intel_valleyview_d_info), 406 INTEL_VGA_DEVICE(0x0155, &intel_valleyview_d_info),
399 {0, 0, 0} 407 {0, 0, 0}
@@ -408,6 +416,15 @@ void intel_detect_pch(struct drm_device *dev)
408 struct drm_i915_private *dev_priv = dev->dev_private; 416 struct drm_i915_private *dev_priv = dev->dev_private;
409 struct pci_dev *pch; 417 struct pci_dev *pch;
410 418
419 /* In all current cases, num_pipes is equivalent to the PCH_NOP setting
420 * (which really amounts to a PCH but no South Display).
421 */
422 if (INTEL_INFO(dev)->num_pipes == 0) {
423 dev_priv->pch_type = PCH_NOP;
424 dev_priv->num_pch_pll = 0;
425 return;
426 }
427
411 /* 428 /*
412 * The reason to probe ISA bridge instead of Dev31:Fun0 is to 429 * The reason to probe ISA bridge instead of Dev31:Fun0 is to
413 * make graphics device passthrough work easy for VMM, that only 430 * make graphics device passthrough work easy for VMM, that only
@@ -442,11 +459,13 @@ void intel_detect_pch(struct drm_device *dev)
442 dev_priv->num_pch_pll = 0; 459 dev_priv->num_pch_pll = 0;
443 DRM_DEBUG_KMS("Found LynxPoint PCH\n"); 460 DRM_DEBUG_KMS("Found LynxPoint PCH\n");
444 WARN_ON(!IS_HASWELL(dev)); 461 WARN_ON(!IS_HASWELL(dev));
462 WARN_ON(IS_ULT(dev));
445 } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) { 463 } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
446 dev_priv->pch_type = PCH_LPT; 464 dev_priv->pch_type = PCH_LPT;
447 dev_priv->num_pch_pll = 0; 465 dev_priv->num_pch_pll = 0;
448 DRM_DEBUG_KMS("Found LynxPoint LP PCH\n"); 466 DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
449 WARN_ON(!IS_HASWELL(dev)); 467 WARN_ON(!IS_HASWELL(dev));
468 WARN_ON(!IS_ULT(dev));
450 } 469 }
451 BUG_ON(dev_priv->num_pch_pll > I915_NUM_PLLS); 470 BUG_ON(dev_priv->num_pch_pll > I915_NUM_PLLS);
452 } 471 }
@@ -474,6 +493,7 @@ bool i915_semaphore_is_enabled(struct drm_device *dev)
474static int i915_drm_freeze(struct drm_device *dev) 493static int i915_drm_freeze(struct drm_device *dev)
475{ 494{
476 struct drm_i915_private *dev_priv = dev->dev_private; 495 struct drm_i915_private *dev_priv = dev->dev_private;
496 struct drm_crtc *crtc;
477 497
478 /* ignore lid events during suspend */ 498 /* ignore lid events during suspend */
479 mutex_lock(&dev_priv->modeset_restore_lock); 499 mutex_lock(&dev_priv->modeset_restore_lock);
@@ -497,10 +517,14 @@ static int i915_drm_freeze(struct drm_device *dev)
497 517
498 cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work); 518 cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work);
499 519
500 intel_modeset_disable(dev);
501
502 drm_irq_uninstall(dev); 520 drm_irq_uninstall(dev);
503 dev_priv->enable_hotplug_processing = false; 521 dev_priv->enable_hotplug_processing = false;
522 /*
523 * Disable CRTCs directly since we want to preserve sw state
524 * for _thaw.
525 */
526 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
527 dev_priv->display.crtc_disable(crtc);
504 } 528 }
505 529
506 i915_save_state(dev); 530 i915_save_state(dev);
@@ -556,6 +580,24 @@ void intel_console_resume(struct work_struct *work)
556 console_unlock(); 580 console_unlock();
557} 581}
558 582
583static void intel_resume_hotplug(struct drm_device *dev)
584{
585 struct drm_mode_config *mode_config = &dev->mode_config;
586 struct intel_encoder *encoder;
587
588 mutex_lock(&mode_config->mutex);
589 DRM_DEBUG_KMS("running encoder hotplug functions\n");
590
591 list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
592 if (encoder->hot_plug)
593 encoder->hot_plug(encoder);
594
595 mutex_unlock(&mode_config->mutex);
596
597 /* Just fire off a uevent and let userspace tell us what to do */
598 drm_helper_hpd_irq_event(dev);
599}
600
559static int __i915_drm_thaw(struct drm_device *dev) 601static int __i915_drm_thaw(struct drm_device *dev)
560{ 602{
561 struct drm_i915_private *dev_priv = dev->dev_private; 603 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -578,7 +620,10 @@ static int __i915_drm_thaw(struct drm_device *dev)
578 drm_irq_install(dev); 620 drm_irq_install(dev);
579 621
580 intel_modeset_init_hw(dev); 622 intel_modeset_init_hw(dev);
581 intel_modeset_setup_hw_state(dev, false); 623
624 drm_modeset_lock_all(dev);
625 intel_modeset_setup_hw_state(dev, true);
626 drm_modeset_unlock_all(dev);
582 627
583 /* 628 /*
584 * ... but also need to make sure that hotplug processing 629 * ... but also need to make sure that hotplug processing
@@ -588,6 +633,8 @@ static int __i915_drm_thaw(struct drm_device *dev)
588 * */ 633 * */
589 intel_hpd_init(dev); 634 intel_hpd_init(dev);
590 dev_priv->enable_hotplug_processing = true; 635 dev_priv->enable_hotplug_processing = true;
636 /* Config may have changed between suspend and resume */
637 intel_resume_hotplug(dev);
591 } 638 }
592 639
593 intel_opregion_init(dev); 640 intel_opregion_init(dev);
@@ -732,6 +779,7 @@ static int ironlake_do_reset(struct drm_device *dev)
732 int ret; 779 int ret;
733 780
734 gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR); 781 gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
782 gdrst &= ~GRDOM_MASK;
735 I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, 783 I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
736 gdrst | GRDOM_RENDER | GRDOM_RESET_ENABLE); 784 gdrst | GRDOM_RENDER | GRDOM_RESET_ENABLE);
737 ret = wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500); 785 ret = wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
@@ -740,6 +788,7 @@ static int ironlake_do_reset(struct drm_device *dev)
740 788
741 /* We can't reset render&media without also resetting display ... */ 789 /* We can't reset render&media without also resetting display ... */
742 gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR); 790 gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
791 gdrst &= ~GRDOM_MASK;
743 I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, 792 I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
744 gdrst | GRDOM_MEDIA | GRDOM_RESET_ENABLE); 793 gdrst | GRDOM_MEDIA | GRDOM_RESET_ENABLE);
745 return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500); 794 return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
@@ -803,7 +852,7 @@ int intel_gpu_reset(struct drm_device *dev)
803 852
804 /* Also reset the gpu hangman. */ 853 /* Also reset the gpu hangman. */
805 if (dev_priv->gpu_error.stop_rings) { 854 if (dev_priv->gpu_error.stop_rings) {
806 DRM_DEBUG("Simulated gpu hang, resetting stop_rings\n"); 855 DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
807 dev_priv->gpu_error.stop_rings = 0; 856 dev_priv->gpu_error.stop_rings = 0;
808 if (ret == -ENODEV) { 857 if (ret == -ENODEV) {
809 DRM_ERROR("Reset not implemented, but ignoring " 858 DRM_ERROR("Reset not implemented, but ignoring "
@@ -882,7 +931,11 @@ int i915_reset(struct drm_device *dev)
882 ring->init(ring); 931 ring->init(ring);
883 932
884 i915_gem_context_init(dev); 933 i915_gem_context_init(dev);
885 i915_gem_init_ppgtt(dev); 934 if (dev_priv->mm.aliasing_ppgtt) {
935 ret = dev_priv->mm.aliasing_ppgtt->enable(dev);
936 if (ret)
937 i915_gem_cleanup_aliasing_ppgtt(dev);
938 }
886 939
887 /* 940 /*
888 * It would make sense to re-init all the other hw state, at 941 * It would make sense to re-init all the other hw state, at
@@ -1147,6 +1200,27 @@ ilk_dummy_write(struct drm_i915_private *dev_priv)
1147 I915_WRITE_NOTRACE(MI_MODE, 0); 1200 I915_WRITE_NOTRACE(MI_MODE, 0);
1148} 1201}
1149 1202
1203static void
1204hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg)
1205{
1206 if (IS_HASWELL(dev_priv->dev) &&
1207 (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
1208 DRM_ERROR("Unknown unclaimed register before writing to %x\n",
1209 reg);
1210 I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
1211 }
1212}
1213
1214static void
1215hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg)
1216{
1217 if (IS_HASWELL(dev_priv->dev) &&
1218 (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
1219 DRM_ERROR("Unclaimed write to %x\n", reg);
1220 I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
1221 }
1222}
1223
1150#define __i915_read(x, y) \ 1224#define __i915_read(x, y) \
1151u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \ 1225u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
1152 u##x val = 0; \ 1226 u##x val = 0; \
@@ -1183,18 +1257,12 @@ void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
1183 } \ 1257 } \
1184 if (IS_GEN5(dev_priv->dev)) \ 1258 if (IS_GEN5(dev_priv->dev)) \
1185 ilk_dummy_write(dev_priv); \ 1259 ilk_dummy_write(dev_priv); \
1186 if (IS_HASWELL(dev_priv->dev) && (I915_READ_NOTRACE(GEN7_ERR_INT) & ERR_INT_MMIO_UNCLAIMED)) { \ 1260 hsw_unclaimed_reg_clear(dev_priv, reg); \
1187 DRM_ERROR("Unknown unclaimed register before writing to %x\n", reg); \
1188 I915_WRITE_NOTRACE(GEN7_ERR_INT, ERR_INT_MMIO_UNCLAIMED); \
1189 } \
1190 write##y(val, dev_priv->regs + reg); \ 1261 write##y(val, dev_priv->regs + reg); \
1191 if (unlikely(__fifo_ret)) { \ 1262 if (unlikely(__fifo_ret)) { \
1192 gen6_gt_check_fifodbg(dev_priv); \ 1263 gen6_gt_check_fifodbg(dev_priv); \
1193 } \ 1264 } \
1194 if (IS_HASWELL(dev_priv->dev) && (I915_READ_NOTRACE(GEN7_ERR_INT) & ERR_INT_MMIO_UNCLAIMED)) { \ 1265 hsw_unclaimed_reg_check(dev_priv, reg); \
1195 DRM_ERROR("Unclaimed write to %x\n", reg); \
1196 writel(ERR_INT_MMIO_UNCLAIMED, dev_priv->regs + GEN7_ERR_INT); \
1197 } \
1198} 1266}
1199__i915_write(8, b) 1267__i915_write(8, b)
1200__i915_write(16, w) 1268__i915_write(16, w)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 01769e2a9953..d5dcf7fe1ee9 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -86,6 +86,19 @@ enum port {
86}; 86};
87#define port_name(p) ((p) + 'A') 87#define port_name(p) ((p) + 'A')
88 88
89enum hpd_pin {
90 HPD_NONE = 0,
91 HPD_PORT_A = HPD_NONE, /* PORT_A is internal */
92 HPD_TV = HPD_NONE, /* TV is known to be unreliable */
93 HPD_CRT,
94 HPD_SDVO_B,
95 HPD_SDVO_C,
96 HPD_PORT_B,
97 HPD_PORT_C,
98 HPD_PORT_D,
99 HPD_NUM_PINS
100};
101
89#define I915_GEM_GPU_DOMAINS \ 102#define I915_GEM_GPU_DOMAINS \
90 (I915_GEM_DOMAIN_RENDER | \ 103 (I915_GEM_DOMAIN_RENDER | \
91 I915_GEM_DOMAIN_SAMPLER | \ 104 I915_GEM_DOMAIN_SAMPLER | \
@@ -93,7 +106,7 @@ enum port {
93 I915_GEM_DOMAIN_INSTRUCTION | \ 106 I915_GEM_DOMAIN_INSTRUCTION | \
94 I915_GEM_DOMAIN_VERTEX) 107 I915_GEM_DOMAIN_VERTEX)
95 108
96#define for_each_pipe(p) for ((p) = 0; (p) < dev_priv->num_pipe; (p)++) 109#define for_each_pipe(p) for ((p) = 0; (p) < INTEL_INFO(dev)->num_pipes; (p)++)
97 110
98#define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \ 111#define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
99 list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \ 112 list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
@@ -182,9 +195,9 @@ struct drm_i915_master_private {
182 struct _drm_i915_sarea *sarea_priv; 195 struct _drm_i915_sarea *sarea_priv;
183}; 196};
184#define I915_FENCE_REG_NONE -1 197#define I915_FENCE_REG_NONE -1
185#define I915_MAX_NUM_FENCES 16 198#define I915_MAX_NUM_FENCES 32
186/* 16 fences + sign bit for FENCE_REG_NONE */ 199/* 32 fences + sign bit for FENCE_REG_NONE */
187#define I915_MAX_NUM_FENCE_BITS 5 200#define I915_MAX_NUM_FENCE_BITS 6
188 201
189struct drm_i915_fence_reg { 202struct drm_i915_fence_reg {
190 struct list_head lru_list; 203 struct list_head lru_list;
@@ -243,7 +256,7 @@ struct drm_i915_error_state {
243 int page_count; 256 int page_count;
244 u32 gtt_offset; 257 u32 gtt_offset;
245 u32 *pages[0]; 258 u32 *pages[0];
246 } *ringbuffer, *batchbuffer; 259 } *ringbuffer, *batchbuffer, *ctx;
247 struct drm_i915_error_request { 260 struct drm_i915_error_request {
248 long jiffies; 261 long jiffies;
249 u32 seqno; 262 u32 seqno;
@@ -271,6 +284,9 @@ struct drm_i915_error_state {
271 struct intel_display_error_state *display; 284 struct intel_display_error_state *display;
272}; 285};
273 286
287struct intel_crtc_config;
288struct intel_crtc;
289
274struct drm_i915_display_funcs { 290struct drm_i915_display_funcs {
275 bool (*fbc_enabled)(struct drm_device *dev); 291 bool (*fbc_enabled)(struct drm_device *dev);
276 void (*enable_fbc)(struct drm_crtc *crtc, unsigned long interval); 292 void (*enable_fbc)(struct drm_crtc *crtc, unsigned long interval);
@@ -283,9 +299,11 @@ struct drm_i915_display_funcs {
283 void (*update_linetime_wm)(struct drm_device *dev, int pipe, 299 void (*update_linetime_wm)(struct drm_device *dev, int pipe,
284 struct drm_display_mode *mode); 300 struct drm_display_mode *mode);
285 void (*modeset_global_resources)(struct drm_device *dev); 301 void (*modeset_global_resources)(struct drm_device *dev);
302 /* Returns the active state of the crtc, and if the crtc is active,
303 * fills out the pipe-config with the hw state. */
304 bool (*get_pipe_config)(struct intel_crtc *,
305 struct intel_crtc_config *);
286 int (*crtc_mode_set)(struct drm_crtc *crtc, 306 int (*crtc_mode_set)(struct drm_crtc *crtc,
287 struct drm_display_mode *mode,
288 struct drm_display_mode *adjusted_mode,
289 int x, int y, 307 int x, int y,
290 struct drm_framebuffer *old_fb); 308 struct drm_framebuffer *old_fb);
291 void (*crtc_enable)(struct drm_crtc *crtc); 309 void (*crtc_enable)(struct drm_crtc *crtc);
@@ -341,6 +359,7 @@ struct drm_i915_gt_funcs {
341 359
342struct intel_device_info { 360struct intel_device_info {
343 u32 display_mmio_offset; 361 u32 display_mmio_offset;
362 u8 num_pipes:3;
344 u8 gen; 363 u8 gen;
345 u8 is_mobile:1; 364 u8 is_mobile:1;
346 u8 is_i85x:1; 365 u8 is_i85x:1;
@@ -430,6 +449,7 @@ struct i915_hw_ppgtt {
430 struct sg_table *st, 449 struct sg_table *st,
431 unsigned int pg_start, 450 unsigned int pg_start,
432 enum i915_cache_level cache_level); 451 enum i915_cache_level cache_level);
452 int (*enable)(struct drm_device *dev);
433 void (*cleanup)(struct i915_hw_ppgtt *ppgtt); 453 void (*cleanup)(struct i915_hw_ppgtt *ppgtt);
434}; 454};
435 455
@@ -460,6 +480,7 @@ enum intel_pch {
460 PCH_IBX, /* Ibexpeak PCH */ 480 PCH_IBX, /* Ibexpeak PCH */
461 PCH_CPT, /* Cougarpoint PCH */ 481 PCH_CPT, /* Cougarpoint PCH */
462 PCH_LPT, /* Lynxpoint PCH */ 482 PCH_LPT, /* Lynxpoint PCH */
483 PCH_NOP,
463}; 484};
464 485
465enum intel_sbi_destination { 486enum intel_sbi_destination {
@@ -647,6 +668,7 @@ struct intel_gen6_power_mgmt {
647 u8 cur_delay; 668 u8 cur_delay;
648 u8 min_delay; 669 u8 min_delay;
649 u8 max_delay; 670 u8 max_delay;
671 u8 hw_max;
650 672
651 struct delayed_work delayed_resume_work; 673 struct delayed_work delayed_resume_work;
652 674
@@ -905,16 +927,24 @@ typedef struct drm_i915_private {
905 struct mutex dpio_lock; 927 struct mutex dpio_lock;
906 928
907 /** Cached value of IMR to avoid reads in updating the bitfield */ 929 /** Cached value of IMR to avoid reads in updating the bitfield */
908 u32 pipestat[2];
909 u32 irq_mask; 930 u32 irq_mask;
910 u32 gt_irq_mask; 931 u32 gt_irq_mask;
911 932
912 u32 hotplug_supported_mask;
913 struct work_struct hotplug_work; 933 struct work_struct hotplug_work;
914 bool enable_hotplug_processing; 934 bool enable_hotplug_processing;
935 struct {
936 unsigned long hpd_last_jiffies;
937 int hpd_cnt;
938 enum {
939 HPD_ENABLED = 0,
940 HPD_DISABLED = 1,
941 HPD_MARK_DISABLED = 2
942 } hpd_mark;
943 } hpd_stats[HPD_NUM_PINS];
944 struct timer_list hotplug_reenable_timer;
915 945
916 int num_pipe;
917 int num_pch_pll; 946 int num_pch_pll;
947 int num_plane;
918 948
919 unsigned long cfb_size; 949 unsigned long cfb_size;
920 unsigned int cfb_fb; 950 unsigned int cfb_fb;
@@ -928,9 +958,14 @@ typedef struct drm_i915_private {
928 struct intel_overlay *overlay; 958 struct intel_overlay *overlay;
929 unsigned int sprite_scaling_enabled; 959 unsigned int sprite_scaling_enabled;
930 960
961 /* backlight */
962 struct {
963 int level;
964 bool enabled;
965 struct backlight_device *device;
966 } backlight;
967
931 /* LVDS info */ 968 /* LVDS info */
932 int backlight_level; /* restore backlight to this value */
933 bool backlight_enabled;
934 struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */ 969 struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
935 struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */ 970 struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
936 971
@@ -941,6 +976,7 @@ typedef struct drm_i915_private {
941 unsigned int int_crt_support:1; 976 unsigned int int_crt_support:1;
942 unsigned int lvds_use_ssc:1; 977 unsigned int lvds_use_ssc:1;
943 unsigned int display_clock_mode:1; 978 unsigned int display_clock_mode:1;
979 unsigned int fdi_rx_polarity_inverted:1;
944 int lvds_ssc_freq; 980 int lvds_ssc_freq;
945 unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */ 981 unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
946 struct { 982 struct {
@@ -1032,8 +1068,6 @@ typedef struct drm_i915_private {
1032 */ 1068 */
1033 struct work_struct console_resume_work; 1069 struct work_struct console_resume_work;
1034 1070
1035 struct backlight_device *backlight;
1036
1037 struct drm_property *broadcast_rgb_property; 1071 struct drm_property *broadcast_rgb_property;
1038 struct drm_property *force_audio_property; 1072 struct drm_property *force_audio_property;
1039 1073
@@ -1340,6 +1374,7 @@ struct drm_i915_file_private {
1340#define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5) 1374#define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5)
1341 1375
1342#define HAS_DDI(dev) (IS_HASWELL(dev)) 1376#define HAS_DDI(dev) (IS_HASWELL(dev))
1377#define HAS_POWER_WELL(dev) (IS_HASWELL(dev))
1343 1378
1344#define INTEL_PCH_DEVICE_ID_MASK 0xff00 1379#define INTEL_PCH_DEVICE_ID_MASK 0xff00
1345#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 1380#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
@@ -1352,6 +1387,7 @@ struct drm_i915_file_private {
1352#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT) 1387#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
1353#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) 1388#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
1354#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX) 1389#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
1390#define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP)
1355#define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE) 1391#define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE)
1356 1392
1357#define HAS_FORCE_WAKE(dev) (INTEL_INFO(dev)->has_force_wake) 1393#define HAS_FORCE_WAKE(dev) (INTEL_INFO(dev)->has_force_wake)
@@ -1529,17 +1565,12 @@ void i915_gem_lastclose(struct drm_device *dev);
1529int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj); 1565int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
1530static inline struct page *i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n) 1566static inline struct page *i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
1531{ 1567{
1532 struct scatterlist *sg = obj->pages->sgl; 1568 struct sg_page_iter sg_iter;
1533 int nents = obj->pages->nents; 1569
1534 while (nents > SG_MAX_SINGLE_ALLOC) { 1570 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, n)
1535 if (n < SG_MAX_SINGLE_ALLOC - 1) 1571 return sg_page_iter_page(&sg_iter);
1536 break; 1572
1537 1573 return NULL;
1538 sg = sg_chain_ptr(sg + SG_MAX_SINGLE_ALLOC - 1);
1539 n -= SG_MAX_SINGLE_ALLOC - 1;
1540 nents -= SG_MAX_SINGLE_ALLOC - 1;
1541 }
1542 return sg_page(sg+n);
1543} 1574}
1544static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj) 1575static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
1545{ 1576{
@@ -1624,7 +1655,6 @@ int __must_check i915_gem_init(struct drm_device *dev);
1624int __must_check i915_gem_init_hw(struct drm_device *dev); 1655int __must_check i915_gem_init_hw(struct drm_device *dev);
1625void i915_gem_l3_remap(struct drm_device *dev); 1656void i915_gem_l3_remap(struct drm_device *dev);
1626void i915_gem_init_swizzling(struct drm_device *dev); 1657void i915_gem_init_swizzling(struct drm_device *dev);
1627void i915_gem_init_ppgtt(struct drm_device *dev);
1628void i915_gem_cleanup_ringbuffer(struct drm_device *dev); 1658void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
1629int __must_check i915_gpu_idle(struct drm_device *dev); 1659int __must_check i915_gpu_idle(struct drm_device *dev);
1630int __must_check i915_gem_idle(struct drm_device *dev); 1660int __must_check i915_gem_idle(struct drm_device *dev);
@@ -1718,6 +1748,11 @@ void i915_gem_stolen_cleanup_compression(struct drm_device *dev);
1718void i915_gem_cleanup_stolen(struct drm_device *dev); 1748void i915_gem_cleanup_stolen(struct drm_device *dev);
1719struct drm_i915_gem_object * 1749struct drm_i915_gem_object *
1720i915_gem_object_create_stolen(struct drm_device *dev, u32 size); 1750i915_gem_object_create_stolen(struct drm_device *dev, u32 size);
1751struct drm_i915_gem_object *
1752i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
1753 u32 stolen_offset,
1754 u32 gtt_offset,
1755 u32 size);
1721void i915_gem_object_release_stolen(struct drm_i915_gem_object *obj); 1756void i915_gem_object_release_stolen(struct drm_i915_gem_object *obj);
1722 1757
1723/* i915_gem_tiling.c */ 1758/* i915_gem_tiling.c */
@@ -1848,6 +1883,8 @@ int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
1848 1883
1849int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val); 1884int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val);
1850int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val); 1885int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val);
1886int valleyview_punit_read(struct drm_i915_private *dev_priv, u8 addr, u32 *val);
1887int valleyview_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val);
1851 1888
1852#define __i915_read(x, y) \ 1889#define __i915_read(x, y) \
1853 u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg); 1890 u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg);
@@ -1901,4 +1938,9 @@ static inline uint32_t i915_vgacntrl_reg(struct drm_device *dev)
1901 return VGACNTRL; 1938 return VGACNTRL;
1902} 1939}
1903 1940
1941static inline void __user *to_user_ptr(u64 address)
1942{
1943 return (void __user *)(uintptr_t)address;
1944}
1945
1904#endif 1946#endif
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 0e207e6e0df8..6be940effefd 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -411,10 +411,9 @@ i915_gem_shmem_pread(struct drm_device *dev,
411 int obj_do_bit17_swizzling, page_do_bit17_swizzling; 411 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
412 int prefaulted = 0; 412 int prefaulted = 0;
413 int needs_clflush = 0; 413 int needs_clflush = 0;
414 struct scatterlist *sg; 414 struct sg_page_iter sg_iter;
415 int i;
416 415
417 user_data = (char __user *) (uintptr_t) args->data_ptr; 416 user_data = to_user_ptr(args->data_ptr);
418 remain = args->size; 417 remain = args->size;
419 418
420 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); 419 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
@@ -441,11 +440,9 @@ i915_gem_shmem_pread(struct drm_device *dev,
441 440
442 offset = args->offset; 441 offset = args->offset;
443 442
444 for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) { 443 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
445 struct page *page; 444 offset >> PAGE_SHIFT) {
446 445 struct page *page = sg_page_iter_page(&sg_iter);
447 if (i < offset >> PAGE_SHIFT)
448 continue;
449 446
450 if (remain <= 0) 447 if (remain <= 0)
451 break; 448 break;
@@ -460,7 +457,6 @@ i915_gem_shmem_pread(struct drm_device *dev,
460 if ((shmem_page_offset + page_length) > PAGE_SIZE) 457 if ((shmem_page_offset + page_length) > PAGE_SIZE)
461 page_length = PAGE_SIZE - shmem_page_offset; 458 page_length = PAGE_SIZE - shmem_page_offset;
462 459
463 page = sg_page(sg);
464 page_do_bit17_swizzling = obj_do_bit17_swizzling && 460 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
465 (page_to_phys(page) & (1 << 17)) != 0; 461 (page_to_phys(page) & (1 << 17)) != 0;
466 462
@@ -522,7 +518,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
522 return 0; 518 return 0;
523 519
524 if (!access_ok(VERIFY_WRITE, 520 if (!access_ok(VERIFY_WRITE,
525 (char __user *)(uintptr_t)args->data_ptr, 521 to_user_ptr(args->data_ptr),
526 args->size)) 522 args->size))
527 return -EFAULT; 523 return -EFAULT;
528 524
@@ -613,7 +609,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
613 if (ret) 609 if (ret)
614 goto out_unpin; 610 goto out_unpin;
615 611
616 user_data = (char __user *) (uintptr_t) args->data_ptr; 612 user_data = to_user_ptr(args->data_ptr);
617 remain = args->size; 613 remain = args->size;
618 614
619 offset = obj->gtt_offset + args->offset; 615 offset = obj->gtt_offset + args->offset;
@@ -732,10 +728,9 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
732 int hit_slowpath = 0; 728 int hit_slowpath = 0;
733 int needs_clflush_after = 0; 729 int needs_clflush_after = 0;
734 int needs_clflush_before = 0; 730 int needs_clflush_before = 0;
735 int i; 731 struct sg_page_iter sg_iter;
736 struct scatterlist *sg;
737 732
738 user_data = (char __user *) (uintptr_t) args->data_ptr; 733 user_data = to_user_ptr(args->data_ptr);
739 remain = args->size; 734 remain = args->size;
740 735
741 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); 736 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
@@ -768,13 +763,11 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
768 offset = args->offset; 763 offset = args->offset;
769 obj->dirty = 1; 764 obj->dirty = 1;
770 765
771 for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) { 766 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
772 struct page *page; 767 offset >> PAGE_SHIFT) {
768 struct page *page = sg_page_iter_page(&sg_iter);
773 int partial_cacheline_write; 769 int partial_cacheline_write;
774 770
775 if (i < offset >> PAGE_SHIFT)
776 continue;
777
778 if (remain <= 0) 771 if (remain <= 0)
779 break; 772 break;
780 773
@@ -796,7 +789,6 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
796 ((shmem_page_offset | page_length) 789 ((shmem_page_offset | page_length)
797 & (boot_cpu_data.x86_clflush_size - 1)); 790 & (boot_cpu_data.x86_clflush_size - 1));
798 791
799 page = sg_page(sg);
800 page_do_bit17_swizzling = obj_do_bit17_swizzling && 792 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
801 (page_to_phys(page) & (1 << 17)) != 0; 793 (page_to_phys(page) & (1 << 17)) != 0;
802 794
@@ -867,11 +859,11 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
867 return 0; 859 return 0;
868 860
869 if (!access_ok(VERIFY_READ, 861 if (!access_ok(VERIFY_READ,
870 (char __user *)(uintptr_t)args->data_ptr, 862 to_user_ptr(args->data_ptr),
871 args->size)) 863 args->size))
872 return -EFAULT; 864 return -EFAULT;
873 865
874 ret = fault_in_multipages_readable((char __user *)(uintptr_t)args->data_ptr, 866 ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr),
875 args->size); 867 args->size);
876 if (ret) 868 if (ret)
877 return -EFAULT; 869 return -EFAULT;
@@ -1633,9 +1625,8 @@ i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
1633static void 1625static void
1634i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj) 1626i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
1635{ 1627{
1636 int page_count = obj->base.size / PAGE_SIZE; 1628 struct sg_page_iter sg_iter;
1637 struct scatterlist *sg; 1629 int ret;
1638 int ret, i;
1639 1630
1640 BUG_ON(obj->madv == __I915_MADV_PURGED); 1631 BUG_ON(obj->madv == __I915_MADV_PURGED);
1641 1632
@@ -1655,8 +1646,8 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
1655 if (obj->madv == I915_MADV_DONTNEED) 1646 if (obj->madv == I915_MADV_DONTNEED)
1656 obj->dirty = 0; 1647 obj->dirty = 0;
1657 1648
1658 for_each_sg(obj->pages->sgl, sg, page_count, i) { 1649 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
1659 struct page *page = sg_page(sg); 1650 struct page *page = sg_page_iter_page(&sg_iter);
1660 1651
1661 if (obj->dirty) 1652 if (obj->dirty)
1662 set_page_dirty(page); 1653 set_page_dirty(page);
@@ -1757,7 +1748,9 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
1757 struct address_space *mapping; 1748 struct address_space *mapping;
1758 struct sg_table *st; 1749 struct sg_table *st;
1759 struct scatterlist *sg; 1750 struct scatterlist *sg;
1751 struct sg_page_iter sg_iter;
1760 struct page *page; 1752 struct page *page;
1753 unsigned long last_pfn = 0; /* suppress gcc warning */
1761 gfp_t gfp; 1754 gfp_t gfp;
1762 1755
1763 /* Assert that the object is not currently in any GPU domain. As it 1756 /* Assert that the object is not currently in any GPU domain. As it
@@ -1787,7 +1780,9 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
1787 gfp = mapping_gfp_mask(mapping); 1780 gfp = mapping_gfp_mask(mapping);
1788 gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD; 1781 gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
1789 gfp &= ~(__GFP_IO | __GFP_WAIT); 1782 gfp &= ~(__GFP_IO | __GFP_WAIT);
1790 for_each_sg(st->sgl, sg, page_count, i) { 1783 sg = st->sgl;
1784 st->nents = 0;
1785 for (i = 0; i < page_count; i++) {
1791 page = shmem_read_mapping_page_gfp(mapping, i, gfp); 1786 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
1792 if (IS_ERR(page)) { 1787 if (IS_ERR(page)) {
1793 i915_gem_purge(dev_priv, page_count); 1788 i915_gem_purge(dev_priv, page_count);
@@ -1810,9 +1805,18 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
1810 gfp &= ~(__GFP_IO | __GFP_WAIT); 1805 gfp &= ~(__GFP_IO | __GFP_WAIT);
1811 } 1806 }
1812 1807
1813 sg_set_page(sg, page, PAGE_SIZE, 0); 1808 if (!i || page_to_pfn(page) != last_pfn + 1) {
1809 if (i)
1810 sg = sg_next(sg);
1811 st->nents++;
1812 sg_set_page(sg, page, PAGE_SIZE, 0);
1813 } else {
1814 sg->length += PAGE_SIZE;
1815 }
1816 last_pfn = page_to_pfn(page);
1814 } 1817 }
1815 1818
1819 sg_mark_end(sg);
1816 obj->pages = st; 1820 obj->pages = st;
1817 1821
1818 if (i915_gem_object_needs_bit17_swizzle(obj)) 1822 if (i915_gem_object_needs_bit17_swizzle(obj))
@@ -1821,8 +1825,9 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
1821 return 0; 1825 return 0;
1822 1826
1823err_pages: 1827err_pages:
1824 for_each_sg(st->sgl, sg, i, page_count) 1828 sg_mark_end(sg);
1825 page_cache_release(sg_page(sg)); 1829 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
1830 page_cache_release(sg_page_iter_page(&sg_iter));
1826 sg_free_table(st); 1831 sg_free_table(st);
1827 kfree(st); 1832 kfree(st);
1828 return PTR_ERR(page); 1833 return PTR_ERR(page);
@@ -2123,11 +2128,11 @@ static void i915_gem_reset_fences(struct drm_device *dev)
2123 for (i = 0; i < dev_priv->num_fence_regs; i++) { 2128 for (i = 0; i < dev_priv->num_fence_regs; i++) {
2124 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i]; 2129 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
2125 2130
2126 i915_gem_write_fence(dev, i, NULL);
2127
2128 if (reg->obj) 2131 if (reg->obj)
2129 i915_gem_object_fence_lost(reg->obj); 2132 i915_gem_object_fence_lost(reg->obj);
2130 2133
2134 i915_gem_write_fence(dev, i, NULL);
2135
2131 reg->pin_count = 0; 2136 reg->pin_count = 0;
2132 reg->obj = NULL; 2137 reg->obj = NULL;
2133 INIT_LIST_HEAD(&reg->lru_list); 2138 INIT_LIST_HEAD(&reg->lru_list);
@@ -2678,17 +2683,35 @@ static inline int fence_number(struct drm_i915_private *dev_priv,
2678 return fence - dev_priv->fence_regs; 2683 return fence - dev_priv->fence_regs;
2679} 2684}
2680 2685
2686static void i915_gem_write_fence__ipi(void *data)
2687{
2688 wbinvd();
2689}
2690
2681static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj, 2691static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
2682 struct drm_i915_fence_reg *fence, 2692 struct drm_i915_fence_reg *fence,
2683 bool enable) 2693 bool enable)
2684{ 2694{
2685 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 2695 struct drm_device *dev = obj->base.dev;
2686 int reg = fence_number(dev_priv, fence); 2696 struct drm_i915_private *dev_priv = dev->dev_private;
2687 2697 int fence_reg = fence_number(dev_priv, fence);
2688 i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL); 2698
2699 /* In order to fully serialize access to the fenced region and
2700 * the update to the fence register we need to take extreme
2701 * measures on SNB+. In theory, the write to the fence register
2702 * flushes all memory transactions before, and coupled with the
2703 * mb() placed around the register write we serialise all memory
2704 * operations with respect to the changes in the tiler. Yet, on
2705 * SNB+ we need to take a step further and emit an explicit wbinvd()
2706 * on each processor in order to manually flush all memory
2707 * transactions before updating the fence register.
2708 */
2709 if (HAS_LLC(obj->base.dev))
2710 on_each_cpu(i915_gem_write_fence__ipi, NULL, 1);
2711 i915_gem_write_fence(dev, fence_reg, enable ? obj : NULL);
2689 2712
2690 if (enable) { 2713 if (enable) {
2691 obj->fence_reg = reg; 2714 obj->fence_reg = fence_reg;
2692 fence->obj = obj; 2715 fence->obj = obj;
2693 list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list); 2716 list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
2694 } else { 2717 } else {
@@ -2717,6 +2740,7 @@ int
2717i915_gem_object_put_fence(struct drm_i915_gem_object *obj) 2740i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
2718{ 2741{
2719 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 2742 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2743 struct drm_i915_fence_reg *fence;
2720 int ret; 2744 int ret;
2721 2745
2722 ret = i915_gem_object_wait_fence(obj); 2746 ret = i915_gem_object_wait_fence(obj);
@@ -2726,10 +2750,10 @@ i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
2726 if (obj->fence_reg == I915_FENCE_REG_NONE) 2750 if (obj->fence_reg == I915_FENCE_REG_NONE)
2727 return 0; 2751 return 0;
2728 2752
2729 i915_gem_object_update_fence(obj, 2753 fence = &dev_priv->fence_regs[obj->fence_reg];
2730 &dev_priv->fence_regs[obj->fence_reg], 2754
2731 false);
2732 i915_gem_object_fence_lost(obj); 2755 i915_gem_object_fence_lost(obj);
2756 i915_gem_object_update_fence(obj, fence, false);
2733 2757
2734 return 0; 2758 return 0;
2735} 2759}
@@ -3986,6 +4010,12 @@ i915_gem_init_hw(struct drm_device *dev)
3986 if (IS_HASWELL(dev) && (I915_READ(0x120010) == 1)) 4010 if (IS_HASWELL(dev) && (I915_READ(0x120010) == 1))
3987 I915_WRITE(0x9008, I915_READ(0x9008) | 0xf0000); 4011 I915_WRITE(0x9008, I915_READ(0x9008) | 0xf0000);
3988 4012
4013 if (HAS_PCH_NOP(dev)) {
4014 u32 temp = I915_READ(GEN7_MSG_CTL);
4015 temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
4016 I915_WRITE(GEN7_MSG_CTL, temp);
4017 }
4018
3989 i915_gem_l3_remap(dev); 4019 i915_gem_l3_remap(dev);
3990 4020
3991 i915_gem_init_swizzling(dev); 4021 i915_gem_init_swizzling(dev);
@@ -3999,7 +4029,13 @@ i915_gem_init_hw(struct drm_device *dev)
3999 * contexts before PPGTT. 4029 * contexts before PPGTT.
4000 */ 4030 */
4001 i915_gem_context_init(dev); 4031 i915_gem_context_init(dev);
4002 i915_gem_init_ppgtt(dev); 4032 if (dev_priv->mm.aliasing_ppgtt) {
4033 ret = dev_priv->mm.aliasing_ppgtt->enable(dev);
4034 if (ret) {
4035 i915_gem_cleanup_aliasing_ppgtt(dev);
4036 DRM_INFO("PPGTT enable failed. This is not fatal, but unexpected\n");
4037 }
4038 }
4003 4039
4004 return 0; 4040 return 0;
4005} 4041}
@@ -4010,7 +4046,16 @@ int i915_gem_init(struct drm_device *dev)
4010 int ret; 4046 int ret;
4011 4047
4012 mutex_lock(&dev->struct_mutex); 4048 mutex_lock(&dev->struct_mutex);
4049
4050 if (IS_VALLEYVIEW(dev)) {
4051 /* VLVA0 (potential hack), BIOS isn't actually waking us */
4052 I915_WRITE(VLV_GTLC_WAKE_CTRL, 1);
4053 if (wait_for((I915_READ(VLV_GTLC_PW_STATUS) & 1) == 1, 10))
4054 DRM_DEBUG_DRIVER("allow wake ack timed out\n");
4055 }
4056
4013 i915_gem_init_global_gtt(dev); 4057 i915_gem_init_global_gtt(dev);
4058
4014 ret = i915_gem_init_hw(dev); 4059 ret = i915_gem_init_hw(dev);
4015 mutex_unlock(&dev->struct_mutex); 4060 mutex_unlock(&dev->struct_mutex);
4016 if (ret) { 4061 if (ret) {
@@ -4145,7 +4190,9 @@ i915_gem_load(struct drm_device *dev)
4145 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 4190 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4146 dev_priv->fence_reg_start = 3; 4191 dev_priv->fence_reg_start = 3;
4147 4192
4148 if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) 4193 if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev))
4194 dev_priv->num_fence_regs = 32;
4195 else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
4149 dev_priv->num_fence_regs = 16; 4196 dev_priv->num_fence_regs = 16;
4150 else 4197 else
4151 dev_priv->num_fence_regs = 8; 4198 dev_priv->num_fence_regs = 8;
@@ -4327,7 +4374,7 @@ i915_gem_phys_pwrite(struct drm_device *dev,
4327 struct drm_file *file_priv) 4374 struct drm_file *file_priv)
4328{ 4375{
4329 void *vaddr = obj->phys_obj->handle->vaddr + args->offset; 4376 void *vaddr = obj->phys_obj->handle->vaddr + args->offset;
4330 char __user *user_data = (char __user *) (uintptr_t) args->data_ptr; 4377 char __user *user_data = to_user_ptr(args->data_ptr);
4331 4378
4332 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) { 4379 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
4333 unsigned long unwritten; 4380 unsigned long unwritten;
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 94d873a6cffb..a1e8ecb6adf6 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -152,6 +152,13 @@ create_hw_context(struct drm_device *dev,
152 return ERR_PTR(-ENOMEM); 152 return ERR_PTR(-ENOMEM);
153 } 153 }
154 154
155 if (INTEL_INFO(dev)->gen >= 7) {
156 ret = i915_gem_object_set_cache_level(ctx->obj,
157 I915_CACHE_LLC_MLC);
158 if (ret)
159 goto err_out;
160 }
161
155 /* The ring associated with the context object is handled by the normal 162 /* The ring associated with the context object is handled by the normal
156 * object tracking code. We give an initial ring value simple to pass an 163 * object tracking code. We give an initial ring value simple to pass an
157 * assertion in the context switch code. 164 * assertion in the context switch code.
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 6a5af6828624..dc53a527126b 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -62,7 +62,7 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme
62 src = obj->pages->sgl; 62 src = obj->pages->sgl;
63 dst = st->sgl; 63 dst = st->sgl;
64 for (i = 0; i < obj->pages->nents; i++) { 64 for (i = 0; i < obj->pages->nents; i++) {
65 sg_set_page(dst, sg_page(src), PAGE_SIZE, 0); 65 sg_set_page(dst, sg_page(src), src->length, 0);
66 dst = sg_next(dst); 66 dst = sg_next(dst);
67 src = sg_next(src); 67 src = sg_next(src);
68 } 68 }
@@ -105,7 +105,7 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
105{ 105{
106 struct drm_i915_gem_object *obj = dma_buf->priv; 106 struct drm_i915_gem_object *obj = dma_buf->priv;
107 struct drm_device *dev = obj->base.dev; 107 struct drm_device *dev = obj->base.dev;
108 struct scatterlist *sg; 108 struct sg_page_iter sg_iter;
109 struct page **pages; 109 struct page **pages;
110 int ret, i; 110 int ret, i;
111 111
@@ -124,14 +124,15 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
124 124
125 ret = -ENOMEM; 125 ret = -ENOMEM;
126 126
127 pages = drm_malloc_ab(obj->pages->nents, sizeof(struct page *)); 127 pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages));
128 if (pages == NULL) 128 if (pages == NULL)
129 goto error; 129 goto error;
130 130
131 for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) 131 i = 0;
132 pages[i] = sg_page(sg); 132 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0)
133 pages[i++] = sg_page_iter_page(&sg_iter);
133 134
134 obj->dma_buf_vmapping = vmap(pages, obj->pages->nents, 0, PAGE_KERNEL); 135 obj->dma_buf_vmapping = vmap(pages, i, 0, PAGE_KERNEL);
135 drm_free_large(pages); 136 drm_free_large(pages);
136 137
137 if (!obj->dma_buf_vmapping) 138 if (!obj->dma_buf_vmapping)
@@ -271,7 +272,6 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
271 * refcount on gem itself instead of f_count of dmabuf. 272 * refcount on gem itself instead of f_count of dmabuf.
272 */ 273 */
273 drm_gem_object_reference(&obj->base); 274 drm_gem_object_reference(&obj->base);
274 dma_buf_put(dma_buf);
275 return &obj->base; 275 return &obj->base;
276 } 276 }
277 } 277 }
@@ -281,6 +281,8 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
281 if (IS_ERR(attach)) 281 if (IS_ERR(attach))
282 return ERR_CAST(attach); 282 return ERR_CAST(attach);
283 283
284 get_dma_buf(dma_buf);
285
284 obj = i915_gem_object_alloc(dev); 286 obj = i915_gem_object_alloc(dev);
285 if (obj == NULL) { 287 if (obj == NULL) {
286 ret = -ENOMEM; 288 ret = -ENOMEM;
@@ -300,5 +302,7 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
300 302
301fail_detach: 303fail_detach:
302 dma_buf_detach(dma_buf, attach); 304 dma_buf_detach(dma_buf, attach);
305 dma_buf_put(dma_buf);
306
303 return ERR_PTR(ret); 307 return ERR_PTR(ret);
304} 308}
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 9a48e1a2d417..117ce3813681 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -305,7 +305,7 @@ i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
305 struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; 305 struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
306 int remain, ret; 306 int remain, ret;
307 307
308 user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr; 308 user_relocs = to_user_ptr(entry->relocs_ptr);
309 309
310 remain = entry->relocation_count; 310 remain = entry->relocation_count;
311 while (remain) { 311 while (remain) {
@@ -359,8 +359,7 @@ i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
359} 359}
360 360
361static int 361static int
362i915_gem_execbuffer_relocate(struct drm_device *dev, 362i915_gem_execbuffer_relocate(struct eb_objects *eb)
363 struct eb_objects *eb)
364{ 363{
365 struct drm_i915_gem_object *obj; 364 struct drm_i915_gem_object *obj;
366 int ret = 0; 365 int ret = 0;
@@ -475,7 +474,6 @@ i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj)
475 474
476static int 475static int
477i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, 476i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
478 struct drm_file *file,
479 struct list_head *objects, 477 struct list_head *objects,
480 bool *need_relocs) 478 bool *need_relocs)
481{ 479{
@@ -618,7 +616,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
618 u64 invalid_offset = (u64)-1; 616 u64 invalid_offset = (u64)-1;
619 int j; 617 int j;
620 618
621 user_relocs = (void __user *)(uintptr_t)exec[i].relocs_ptr; 619 user_relocs = to_user_ptr(exec[i].relocs_ptr);
622 620
623 if (copy_from_user(reloc+total, user_relocs, 621 if (copy_from_user(reloc+total, user_relocs,
624 exec[i].relocation_count * sizeof(*reloc))) { 622 exec[i].relocation_count * sizeof(*reloc))) {
@@ -663,7 +661,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
663 goto err; 661 goto err;
664 662
665 need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0; 663 need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
666 ret = i915_gem_execbuffer_reserve(ring, file, &eb->objects, &need_relocs); 664 ret = i915_gem_execbuffer_reserve(ring, &eb->objects, &need_relocs);
667 if (ret) 665 if (ret)
668 goto err; 666 goto err;
669 667
@@ -736,7 +734,7 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
736 int relocs_max = INT_MAX / sizeof(struct drm_i915_gem_relocation_entry); 734 int relocs_max = INT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
737 735
738 for (i = 0; i < count; i++) { 736 for (i = 0; i < count; i++) {
739 char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr; 737 char __user *ptr = to_user_ptr(exec[i].relocs_ptr);
740 int length; /* limited by fault_in_pages_readable() */ 738 int length; /* limited by fault_in_pages_readable() */
741 739
742 if (exec[i].flags & __EXEC_OBJECT_UNKNOWN_FLAGS) 740 if (exec[i].flags & __EXEC_OBJECT_UNKNOWN_FLAGS)
@@ -752,7 +750,11 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
752 750
753 length = exec[i].relocation_count * 751 length = exec[i].relocation_count *
754 sizeof(struct drm_i915_gem_relocation_entry); 752 sizeof(struct drm_i915_gem_relocation_entry);
755 /* we may also need to update the presumed offsets */ 753 /*
754 * We must check that the entire relocation array is safe
755 * to read, but since we may need to update the presumed
756 * offsets during execution, check for full write access.
757 */
756 if (!access_ok(VERIFY_WRITE, ptr, length)) 758 if (!access_ok(VERIFY_WRITE, ptr, length))
757 return -EFAULT; 759 return -EFAULT;
758 760
@@ -949,9 +951,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
949 } 951 }
950 952
951 if (copy_from_user(cliprects, 953 if (copy_from_user(cliprects,
952 (struct drm_clip_rect __user *)(uintptr_t) 954 to_user_ptr(args->cliprects_ptr),
953 args->cliprects_ptr, 955 sizeof(*cliprects)*args->num_cliprects)) {
954 sizeof(*cliprects)*args->num_cliprects)) {
955 ret = -EFAULT; 956 ret = -EFAULT;
956 goto pre_mutex_err; 957 goto pre_mutex_err;
957 } 958 }
@@ -986,13 +987,13 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
986 987
987 /* Move the objects en-masse into the GTT, evicting if necessary. */ 988 /* Move the objects en-masse into the GTT, evicting if necessary. */
988 need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0; 989 need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
989 ret = i915_gem_execbuffer_reserve(ring, file, &eb->objects, &need_relocs); 990 ret = i915_gem_execbuffer_reserve(ring, &eb->objects, &need_relocs);
990 if (ret) 991 if (ret)
991 goto err; 992 goto err;
992 993
993 /* The objects are in their final locations, apply the relocations. */ 994 /* The objects are in their final locations, apply the relocations. */
994 if (need_relocs) 995 if (need_relocs)
995 ret = i915_gem_execbuffer_relocate(dev, eb); 996 ret = i915_gem_execbuffer_relocate(eb);
996 if (ret) { 997 if (ret) {
997 if (ret == -EFAULT) { 998 if (ret == -EFAULT) {
998 ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring, 999 ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
@@ -1115,7 +1116,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
1115 return -ENOMEM; 1116 return -ENOMEM;
1116 } 1117 }
1117 ret = copy_from_user(exec_list, 1118 ret = copy_from_user(exec_list,
1118 (void __user *)(uintptr_t)args->buffers_ptr, 1119 to_user_ptr(args->buffers_ptr),
1119 sizeof(*exec_list) * args->buffer_count); 1120 sizeof(*exec_list) * args->buffer_count);
1120 if (ret != 0) { 1121 if (ret != 0) {
1121 DRM_DEBUG("copy %d exec entries failed %d\n", 1122 DRM_DEBUG("copy %d exec entries failed %d\n",
@@ -1154,7 +1155,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
1154 for (i = 0; i < args->buffer_count; i++) 1155 for (i = 0; i < args->buffer_count; i++)
1155 exec_list[i].offset = exec2_list[i].offset; 1156 exec_list[i].offset = exec2_list[i].offset;
1156 /* ... and back out to userspace */ 1157 /* ... and back out to userspace */
1157 ret = copy_to_user((void __user *)(uintptr_t)args->buffers_ptr, 1158 ret = copy_to_user(to_user_ptr(args->buffers_ptr),
1158 exec_list, 1159 exec_list,
1159 sizeof(*exec_list) * args->buffer_count); 1160 sizeof(*exec_list) * args->buffer_count);
1160 if (ret) { 1161 if (ret) {
@@ -1195,8 +1196,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
1195 return -ENOMEM; 1196 return -ENOMEM;
1196 } 1197 }
1197 ret = copy_from_user(exec2_list, 1198 ret = copy_from_user(exec2_list,
1198 (struct drm_i915_relocation_entry __user *) 1199 to_user_ptr(args->buffers_ptr),
1199 (uintptr_t) args->buffers_ptr,
1200 sizeof(*exec2_list) * args->buffer_count); 1200 sizeof(*exec2_list) * args->buffer_count);
1201 if (ret != 0) { 1201 if (ret != 0) {
1202 DRM_DEBUG("copy %d exec entries failed %d\n", 1202 DRM_DEBUG("copy %d exec entries failed %d\n",
@@ -1208,7 +1208,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
1208 ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list); 1208 ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
1209 if (!ret) { 1209 if (!ret) {
1210 /* Copy the new buffer offsets back to the user's exec list. */ 1210 /* Copy the new buffer offsets back to the user's exec list. */
1211 ret = copy_to_user((void __user *)(uintptr_t)args->buffers_ptr, 1211 ret = copy_to_user(to_user_ptr(args->buffers_ptr),
1212 exec2_list, 1212 exec2_list,
1213 sizeof(*exec2_list) * args->buffer_count); 1213 sizeof(*exec2_list) * args->buffer_count);
1214 if (ret) { 1214 if (ret) {
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 926a1e2dd234..dca614de71b6 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -28,7 +28,7 @@
28#include "i915_trace.h" 28#include "i915_trace.h"
29#include "intel_drv.h" 29#include "intel_drv.h"
30 30
31typedef uint32_t gtt_pte_t; 31typedef uint32_t gen6_gtt_pte_t;
32 32
33/* PPGTT stuff */ 33/* PPGTT stuff */
34#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0)) 34#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
@@ -44,11 +44,11 @@ typedef uint32_t gtt_pte_t;
44#define GEN6_PTE_CACHE_LLC_MLC (3 << 1) 44#define GEN6_PTE_CACHE_LLC_MLC (3 << 1)
45#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr) 45#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
46 46
47static inline gtt_pte_t gen6_pte_encode(struct drm_device *dev, 47static inline gen6_gtt_pte_t gen6_pte_encode(struct drm_device *dev,
48 dma_addr_t addr, 48 dma_addr_t addr,
49 enum i915_cache_level level) 49 enum i915_cache_level level)
50{ 50{
51 gtt_pte_t pte = GEN6_PTE_VALID; 51 gen6_gtt_pte_t pte = GEN6_PTE_VALID;
52 pte |= GEN6_PTE_ADDR_ENCODE(addr); 52 pte |= GEN6_PTE_ADDR_ENCODE(addr);
53 53
54 switch (level) { 54 switch (level) {
@@ -72,18 +72,85 @@ static inline gtt_pte_t gen6_pte_encode(struct drm_device *dev,
72 BUG(); 72 BUG();
73 } 73 }
74 74
75
76 return pte; 75 return pte;
77} 76}
78 77
78static int gen6_ppgtt_enable(struct drm_device *dev)
79{
80 drm_i915_private_t *dev_priv = dev->dev_private;
81 uint32_t pd_offset;
82 struct intel_ring_buffer *ring;
83 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
84 gen6_gtt_pte_t __iomem *pd_addr;
85 uint32_t pd_entry;
86 int i;
87
88 pd_addr = (gen6_gtt_pte_t __iomem*)dev_priv->gtt.gsm +
89 ppgtt->pd_offset / sizeof(gen6_gtt_pte_t);
90 for (i = 0; i < ppgtt->num_pd_entries; i++) {
91 dma_addr_t pt_addr;
92
93 pt_addr = ppgtt->pt_dma_addr[i];
94 pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
95 pd_entry |= GEN6_PDE_VALID;
96
97 writel(pd_entry, pd_addr + i);
98 }
99 readl(pd_addr);
100
101 pd_offset = ppgtt->pd_offset;
102 pd_offset /= 64; /* in cachelines, */
103 pd_offset <<= 16;
104
105 if (INTEL_INFO(dev)->gen == 6) {
106 uint32_t ecochk, gab_ctl, ecobits;
107
108 ecobits = I915_READ(GAC_ECO_BITS);
109 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT |
110 ECOBITS_PPGTT_CACHE64B);
111
112 gab_ctl = I915_READ(GAB_CTL);
113 I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
114
115 ecochk = I915_READ(GAM_ECOCHK);
116 I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT |
117 ECOCHK_PPGTT_CACHE64B);
118 I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
119 } else if (INTEL_INFO(dev)->gen >= 7) {
120 uint32_t ecochk, ecobits;
121
122 ecobits = I915_READ(GAC_ECO_BITS);
123 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
124
125 ecochk = I915_READ(GAM_ECOCHK);
126 if (IS_HASWELL(dev)) {
127 ecochk |= ECOCHK_PPGTT_WB_HSW;
128 } else {
129 ecochk |= ECOCHK_PPGTT_LLC_IVB;
130 ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
131 }
132 I915_WRITE(GAM_ECOCHK, ecochk);
133 /* GFX_MODE is per-ring on gen7+ */
134 }
135
136 for_each_ring(ring, dev_priv, i) {
137 if (INTEL_INFO(dev)->gen >= 7)
138 I915_WRITE(RING_MODE_GEN7(ring),
139 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
140
141 I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
142 I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset);
143 }
144 return 0;
145}
146
79/* PPGTT support for Sandybdrige/Gen6 and later */ 147/* PPGTT support for Sandybdrige/Gen6 and later */
80static void gen6_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt, 148static void gen6_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
81 unsigned first_entry, 149 unsigned first_entry,
82 unsigned num_entries) 150 unsigned num_entries)
83{ 151{
84 gtt_pte_t *pt_vaddr; 152 gen6_gtt_pte_t *pt_vaddr, scratch_pte;
85 gtt_pte_t scratch_pte; 153 unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
86 unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
87 unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES; 154 unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
88 unsigned last_pte, i; 155 unsigned last_pte, i;
89 156
@@ -96,7 +163,7 @@ static void gen6_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
96 if (last_pte > I915_PPGTT_PT_ENTRIES) 163 if (last_pte > I915_PPGTT_PT_ENTRIES)
97 last_pte = I915_PPGTT_PT_ENTRIES; 164 last_pte = I915_PPGTT_PT_ENTRIES;
98 165
99 pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]); 166 pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
100 167
101 for (i = first_pte; i < last_pte; i++) 168 for (i = first_pte; i < last_pte; i++)
102 pt_vaddr[i] = scratch_pte; 169 pt_vaddr[i] = scratch_pte;
@@ -105,7 +172,7 @@ static void gen6_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
105 172
106 num_entries -= last_pte - first_pte; 173 num_entries -= last_pte - first_pte;
107 first_pte = 0; 174 first_pte = 0;
108 act_pd++; 175 act_pt++;
109 } 176 }
110} 177}
111 178
@@ -114,43 +181,27 @@ static void gen6_ppgtt_insert_entries(struct i915_hw_ppgtt *ppgtt,
114 unsigned first_entry, 181 unsigned first_entry,
115 enum i915_cache_level cache_level) 182 enum i915_cache_level cache_level)
116{ 183{
117 gtt_pte_t *pt_vaddr; 184 gen6_gtt_pte_t *pt_vaddr;
118 unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES; 185 unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
119 unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES; 186 unsigned act_pte = first_entry % I915_PPGTT_PT_ENTRIES;
120 unsigned i, j, m, segment_len; 187 struct sg_page_iter sg_iter;
121 dma_addr_t page_addr;
122 struct scatterlist *sg;
123
124 /* init sg walking */
125 sg = pages->sgl;
126 i = 0;
127 segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
128 m = 0;
129
130 while (i < pages->nents) {
131 pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]);
132
133 for (j = first_pte; j < I915_PPGTT_PT_ENTRIES; j++) {
134 page_addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
135 pt_vaddr[j] = gen6_pte_encode(ppgtt->dev, page_addr,
136 cache_level);
137
138 /* grab the next page */
139 if (++m == segment_len) {
140 if (++i == pages->nents)
141 break;
142
143 sg = sg_next(sg);
144 segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
145 m = 0;
146 }
147 }
148 188
149 kunmap_atomic(pt_vaddr); 189 pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
190 for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
191 dma_addr_t page_addr;
150 192
151 first_pte = 0; 193 page_addr = sg_page_iter_dma_address(&sg_iter);
152 act_pd++; 194 pt_vaddr[act_pte] = gen6_pte_encode(ppgtt->dev, page_addr,
195 cache_level);
196 if (++act_pte == I915_PPGTT_PT_ENTRIES) {
197 kunmap_atomic(pt_vaddr);
198 act_pt++;
199 pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
200 act_pte = 0;
201
202 }
153 } 203 }
204 kunmap_atomic(pt_vaddr);
154} 205}
155 206
156static void gen6_ppgtt_cleanup(struct i915_hw_ppgtt *ppgtt) 207static void gen6_ppgtt_cleanup(struct i915_hw_ppgtt *ppgtt)
@@ -182,10 +233,10 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
182 /* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024 233 /* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024
183 * entries. For aliasing ppgtt support we just steal them at the end for 234 * entries. For aliasing ppgtt support we just steal them at the end for
184 * now. */ 235 * now. */
185 first_pd_entry_in_global_pt = 236 first_pd_entry_in_global_pt = gtt_total_entries(dev_priv->gtt);
186 gtt_total_entries(dev_priv->gtt) - I915_PPGTT_PD_ENTRIES;
187 237
188 ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES; 238 ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES;
239 ppgtt->enable = gen6_ppgtt_enable;
189 ppgtt->clear_range = gen6_ppgtt_clear_range; 240 ppgtt->clear_range = gen6_ppgtt_clear_range;
190 ppgtt->insert_entries = gen6_ppgtt_insert_entries; 241 ppgtt->insert_entries = gen6_ppgtt_insert_entries;
191 ppgtt->cleanup = gen6_ppgtt_cleanup; 242 ppgtt->cleanup = gen6_ppgtt_cleanup;
@@ -219,12 +270,10 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
219 ppgtt->pt_dma_addr[i] = pt_addr; 270 ppgtt->pt_dma_addr[i] = pt_addr;
220 } 271 }
221 272
222 ppgtt->scratch_page_dma_addr = dev_priv->gtt.scratch_page_dma;
223
224 ppgtt->clear_range(ppgtt, 0, 273 ppgtt->clear_range(ppgtt, 0,
225 ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES); 274 ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES);
226 275
227 ppgtt->pd_offset = (first_pd_entry_in_global_pt)*sizeof(gtt_pte_t); 276 ppgtt->pd_offset = first_pd_entry_in_global_pt * sizeof(gen6_gtt_pte_t);
228 277
229 return 0; 278 return 0;
230 279
@@ -256,8 +305,13 @@ static int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
256 return -ENOMEM; 305 return -ENOMEM;
257 306
258 ppgtt->dev = dev; 307 ppgtt->dev = dev;
308 ppgtt->scratch_page_dma_addr = dev_priv->gtt.scratch_page_dma;
309
310 if (INTEL_INFO(dev)->gen < 8)
311 ret = gen6_ppgtt_init(ppgtt);
312 else
313 BUG();
259 314
260 ret = gen6_ppgtt_init(ppgtt);
261 if (ret) 315 if (ret)
262 kfree(ppgtt); 316 kfree(ppgtt);
263 else 317 else
@@ -275,6 +329,7 @@ void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
275 return; 329 return;
276 330
277 ppgtt->cleanup(ppgtt); 331 ppgtt->cleanup(ppgtt);
332 dev_priv->mm.aliasing_ppgtt = NULL;
278} 333}
279 334
280void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt, 335void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
@@ -294,64 +349,6 @@ void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
294 obj->base.size >> PAGE_SHIFT); 349 obj->base.size >> PAGE_SHIFT);
295} 350}
296 351
297void i915_gem_init_ppgtt(struct drm_device *dev)
298{
299 drm_i915_private_t *dev_priv = dev->dev_private;
300 uint32_t pd_offset;
301 struct intel_ring_buffer *ring;
302 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
303 gtt_pte_t __iomem *pd_addr;
304 uint32_t pd_entry;
305 int i;
306
307 if (!dev_priv->mm.aliasing_ppgtt)
308 return;
309
310
311 pd_addr = (gtt_pte_t __iomem*)dev_priv->gtt.gsm + ppgtt->pd_offset/sizeof(gtt_pte_t);
312 for (i = 0; i < ppgtt->num_pd_entries; i++) {
313 dma_addr_t pt_addr;
314
315 pt_addr = ppgtt->pt_dma_addr[i];
316 pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
317 pd_entry |= GEN6_PDE_VALID;
318
319 writel(pd_entry, pd_addr + i);
320 }
321 readl(pd_addr);
322
323 pd_offset = ppgtt->pd_offset;
324 pd_offset /= 64; /* in cachelines, */
325 pd_offset <<= 16;
326
327 if (INTEL_INFO(dev)->gen == 6) {
328 uint32_t ecochk, gab_ctl, ecobits;
329
330 ecobits = I915_READ(GAC_ECO_BITS);
331 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
332
333 gab_ctl = I915_READ(GAB_CTL);
334 I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
335
336 ecochk = I915_READ(GAM_ECOCHK);
337 I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT |
338 ECOCHK_PPGTT_CACHE64B);
339 I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
340 } else if (INTEL_INFO(dev)->gen >= 7) {
341 I915_WRITE(GAM_ECOCHK, ECOCHK_PPGTT_CACHE64B);
342 /* GFX_MODE is per-ring on gen7+ */
343 }
344
345 for_each_ring(ring, dev_priv, i) {
346 if (INTEL_INFO(dev)->gen >= 7)
347 I915_WRITE(RING_MODE_GEN7(ring),
348 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
349
350 I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
351 I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset);
352 }
353}
354
355extern int intel_iommu_gfx_mapped; 352extern int intel_iommu_gfx_mapped;
356/* Certain Gen5 chipsets require require idling the GPU before 353/* Certain Gen5 chipsets require require idling the GPU before
357 * unmapping anything from the GTT when VT-d is enabled. 354 * unmapping anything from the GTT when VT-d is enabled.
@@ -432,21 +429,16 @@ static void gen6_ggtt_insert_entries(struct drm_device *dev,
432 enum i915_cache_level level) 429 enum i915_cache_level level)
433{ 430{
434 struct drm_i915_private *dev_priv = dev->dev_private; 431 struct drm_i915_private *dev_priv = dev->dev_private;
435 struct scatterlist *sg = st->sgl; 432 gen6_gtt_pte_t __iomem *gtt_entries =
436 gtt_pte_t __iomem *gtt_entries = 433 (gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
437 (gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry; 434 int i = 0;
438 int unused, i = 0; 435 struct sg_page_iter sg_iter;
439 unsigned int len, m = 0;
440 dma_addr_t addr; 436 dma_addr_t addr;
441 437
442 for_each_sg(st->sgl, sg, st->nents, unused) { 438 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
443 len = sg_dma_len(sg) >> PAGE_SHIFT; 439 addr = sg_page_iter_dma_address(&sg_iter);
444 for (m = 0; m < len; m++) { 440 iowrite32(gen6_pte_encode(dev, addr, level), &gtt_entries[i]);
445 addr = sg_dma_address(sg) + (m << PAGE_SHIFT); 441 i++;
446 iowrite32(gen6_pte_encode(dev, addr, level),
447 &gtt_entries[i]);
448 i++;
449 }
450 } 442 }
451 443
452 /* XXX: This serves as a posting read to make sure that the PTE has 444 /* XXX: This serves as a posting read to make sure that the PTE has
@@ -472,8 +464,8 @@ static void gen6_ggtt_clear_range(struct drm_device *dev,
472 unsigned int num_entries) 464 unsigned int num_entries)
473{ 465{
474 struct drm_i915_private *dev_priv = dev->dev_private; 466 struct drm_i915_private *dev_priv = dev->dev_private;
475 gtt_pte_t scratch_pte; 467 gen6_gtt_pte_t scratch_pte, __iomem *gtt_base =
476 gtt_pte_t __iomem *gtt_base = (gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry; 468 (gen6_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
477 const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry; 469 const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
478 int i; 470 int i;
479 471
@@ -647,9 +639,12 @@ void i915_gem_init_global_gtt(struct drm_device *dev)
647 639
648 if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) { 640 if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
649 int ret; 641 int ret;
650 /* PPGTT pdes are stolen from global gtt ptes, so shrink the 642
651 * aperture accordingly when using aliasing ppgtt. */ 643 if (INTEL_INFO(dev)->gen <= 7) {
652 gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE; 644 /* PPGTT pdes are stolen from global gtt ptes, so shrink the
645 * aperture accordingly when using aliasing ppgtt. */
646 gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
647 }
653 648
654 i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size); 649 i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
655 650
@@ -752,15 +747,17 @@ static int gen6_gmch_probe(struct drm_device *dev,
752 pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl); 747 pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
753 gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl); 748 gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl);
754 749
755 if (IS_GEN7(dev)) 750 if (IS_GEN7(dev) && !IS_VALLEYVIEW(dev))
756 *stolen = gen7_get_stolen_size(snb_gmch_ctl); 751 *stolen = gen7_get_stolen_size(snb_gmch_ctl);
757 else 752 else
758 *stolen = gen6_get_stolen_size(snb_gmch_ctl); 753 *stolen = gen6_get_stolen_size(snb_gmch_ctl);
759 754
760 *gtt_total = (gtt_size / sizeof(gtt_pte_t)) << PAGE_SHIFT; 755 *gtt_total = (gtt_size / sizeof(gen6_gtt_pte_t)) << PAGE_SHIFT;
756
757 /* For Modern GENs the PTEs and register space are split in the BAR */
758 gtt_bus_addr = pci_resource_start(dev->pdev, 0) +
759 (pci_resource_len(dev->pdev, 0) / 2);
761 760
762 /* For GEN6+ the PTEs for the ggtt live at 2MB + BAR0 */
763 gtt_bus_addr = pci_resource_start(dev->pdev, 0) + (2<<20);
764 dev_priv->gtt.gsm = ioremap_wc(gtt_bus_addr, gtt_size); 761 dev_priv->gtt.gsm = ioremap_wc(gtt_bus_addr, gtt_size);
765 if (!dev_priv->gtt.gsm) { 762 if (!dev_priv->gtt.gsm) {
766 DRM_ERROR("Failed to map the gtt page table\n"); 763 DRM_ERROR("Failed to map the gtt page table\n");
@@ -817,7 +814,6 @@ int i915_gem_gtt_init(struct drm_device *dev)
817{ 814{
818 struct drm_i915_private *dev_priv = dev->dev_private; 815 struct drm_i915_private *dev_priv = dev->dev_private;
819 struct i915_gtt *gtt = &dev_priv->gtt; 816 struct i915_gtt *gtt = &dev_priv->gtt;
820 unsigned long gtt_size;
821 int ret; 817 int ret;
822 818
823 if (INTEL_INFO(dev)->gen <= 5) { 819 if (INTEL_INFO(dev)->gen <= 5) {
@@ -835,8 +831,6 @@ int i915_gem_gtt_init(struct drm_device *dev)
835 if (ret) 831 if (ret)
836 return ret; 832 return ret;
837 833
838 gtt_size = (dev_priv->gtt.total >> PAGE_SHIFT) * sizeof(gtt_pte_t);
839
840 /* GMADR is the PCI mmio aperture into the global GTT. */ 834 /* GMADR is the PCI mmio aperture into the global GTT. */
841 DRM_INFO("Memory usable by graphics device = %zdM\n", 835 DRM_INFO("Memory usable by graphics device = %zdM\n",
842 dev_priv->gtt.total >> 20); 836 dev_priv->gtt.total >> 20);
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 69d97cbac13c..130d1db27e28 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -312,6 +312,71 @@ i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
312 return NULL; 312 return NULL;
313} 313}
314 314
315struct drm_i915_gem_object *
316i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
317 u32 stolen_offset,
318 u32 gtt_offset,
319 u32 size)
320{
321 struct drm_i915_private *dev_priv = dev->dev_private;
322 struct drm_i915_gem_object *obj;
323 struct drm_mm_node *stolen;
324
325 if (dev_priv->mm.stolen_base == 0)
326 return NULL;
327
328 DRM_DEBUG_KMS("creating preallocated stolen object: stolen_offset=%x, gtt_offset=%x, size=%x\n",
329 stolen_offset, gtt_offset, size);
330
331 /* KISS and expect everything to be page-aligned */
332 BUG_ON(stolen_offset & 4095);
333 BUG_ON(gtt_offset & 4095);
334 BUG_ON(size & 4095);
335
336 if (WARN_ON(size == 0))
337 return NULL;
338
339 stolen = drm_mm_create_block(&dev_priv->mm.stolen,
340 stolen_offset, size,
341 false);
342 if (stolen == NULL) {
343 DRM_DEBUG_KMS("failed to allocate stolen space\n");
344 return NULL;
345 }
346
347 obj = _i915_gem_object_create_stolen(dev, stolen);
348 if (obj == NULL) {
349 DRM_DEBUG_KMS("failed to allocate stolen object\n");
350 drm_mm_put_block(stolen);
351 return NULL;
352 }
353
354 /* To simplify the initialisation sequence between KMS and GTT,
355 * we allow construction of the stolen object prior to
356 * setting up the GTT space. The actual reservation will occur
357 * later.
358 */
359 if (drm_mm_initialized(&dev_priv->mm.gtt_space)) {
360 obj->gtt_space = drm_mm_create_block(&dev_priv->mm.gtt_space,
361 gtt_offset, size,
362 false);
363 if (obj->gtt_space == NULL) {
364 DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
365 drm_gem_object_unreference(&obj->base);
366 return NULL;
367 }
368 } else
369 obj->gtt_space = I915_GTT_RESERVED;
370
371 obj->gtt_offset = gtt_offset;
372 obj->has_global_gtt_mapping = 1;
373
374 list_add_tail(&obj->gtt_list, &dev_priv->mm.bound_list);
375 list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
376
377 return obj;
378}
379
315void 380void
316i915_gem_object_release_stolen(struct drm_i915_gem_object *obj) 381i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
317{ 382{
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index abcba2f5a788..537545be69db 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -217,9 +217,12 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
217 tile_width = 512; 217 tile_width = 512;
218 218
219 /* check maximum stride & object size */ 219 /* check maximum stride & object size */
220 if (INTEL_INFO(dev)->gen >= 4) { 220 /* i965+ stores the end address of the gtt mapping in the fence
221 /* i965 stores the end address of the gtt mapping in the fence 221 * reg, so dont bother to check the size */
222 * reg, so dont bother to check the size */ 222 if (INTEL_INFO(dev)->gen >= 7) {
223 if (stride / 128 > GEN7_FENCE_MAX_PITCH_VAL)
224 return false;
225 } else if (INTEL_INFO(dev)->gen >= 4) {
223 if (stride / 128 > I965_FENCE_MAX_PITCH_VAL) 226 if (stride / 128 > I965_FENCE_MAX_PITCH_VAL)
224 return false; 227 return false;
225 } else { 228 } else {
@@ -235,6 +238,9 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
235 } 238 }
236 } 239 }
237 240
241 if (stride < tile_width)
242 return false;
243
238 /* 965+ just needs multiples of tile width */ 244 /* 965+ just needs multiples of tile width */
239 if (INTEL_INFO(dev)->gen >= 4) { 245 if (INTEL_INFO(dev)->gen >= 4) {
240 if (stride & (tile_width - 1)) 246 if (stride & (tile_width - 1))
@@ -243,9 +249,6 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
243 } 249 }
244 250
245 /* Pre-965 needs power of two tile widths */ 251 /* Pre-965 needs power of two tile widths */
246 if (stride < tile_width)
247 return false;
248
249 if (stride & (stride - 1)) 252 if (stride & (stride - 1))
250 return false; 253 return false;
251 254
@@ -473,28 +476,29 @@ i915_gem_swizzle_page(struct page *page)
473void 476void
474i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj) 477i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
475{ 478{
476 struct scatterlist *sg; 479 struct sg_page_iter sg_iter;
477 int page_count = obj->base.size >> PAGE_SHIFT;
478 int i; 480 int i;
479 481
480 if (obj->bit_17 == NULL) 482 if (obj->bit_17 == NULL)
481 return; 483 return;
482 484
483 for_each_sg(obj->pages->sgl, sg, page_count, i) { 485 i = 0;
484 struct page *page = sg_page(sg); 486 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
487 struct page *page = sg_page_iter_page(&sg_iter);
485 char new_bit_17 = page_to_phys(page) >> 17; 488 char new_bit_17 = page_to_phys(page) >> 17;
486 if ((new_bit_17 & 0x1) != 489 if ((new_bit_17 & 0x1) !=
487 (test_bit(i, obj->bit_17) != 0)) { 490 (test_bit(i, obj->bit_17) != 0)) {
488 i915_gem_swizzle_page(page); 491 i915_gem_swizzle_page(page);
489 set_page_dirty(page); 492 set_page_dirty(page);
490 } 493 }
494 i++;
491 } 495 }
492} 496}
493 497
494void 498void
495i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj) 499i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
496{ 500{
497 struct scatterlist *sg; 501 struct sg_page_iter sg_iter;
498 int page_count = obj->base.size >> PAGE_SHIFT; 502 int page_count = obj->base.size >> PAGE_SHIFT;
499 int i; 503 int i;
500 504
@@ -508,11 +512,12 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
508 } 512 }
509 } 513 }
510 514
511 for_each_sg(obj->pages->sgl, sg, page_count, i) { 515 i = 0;
512 struct page *page = sg_page(sg); 516 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
513 if (page_to_phys(page) & (1 << 17)) 517 if (page_to_phys(sg_page_iter_page(&sg_iter)) & (1 << 17))
514 __set_bit(i, obj->bit_17); 518 __set_bit(i, obj->bit_17);
515 else 519 else
516 __clear_bit(i, obj->bit_17); 520 __clear_bit(i, obj->bit_17);
521 i++;
517 } 522 }
518} 523}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 3c7bb0410b51..0aa2ef0d2ae0 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -36,6 +36,61 @@
36#include "i915_trace.h" 36#include "i915_trace.h"
37#include "intel_drv.h" 37#include "intel_drv.h"
38 38
39static const u32 hpd_ibx[] = {
40 [HPD_CRT] = SDE_CRT_HOTPLUG,
41 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
42 [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
43 [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
44 [HPD_PORT_D] = SDE_PORTD_HOTPLUG
45};
46
47static const u32 hpd_cpt[] = {
48 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
49 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
50 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
51 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
52 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
53};
54
55static const u32 hpd_mask_i915[] = {
56 [HPD_CRT] = CRT_HOTPLUG_INT_EN,
57 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
58 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
59 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
60 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
61 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
62};
63
64static const u32 hpd_status_gen4[] = {
65 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
66 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
67 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
68 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
69 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
70 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
71};
72
73static const u32 hpd_status_i965[] = {
74 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
75 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I965,
76 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I965,
77 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
78 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
79 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
80};
81
82static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */
83 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
84 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
85 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
86 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
87 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
88 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
89};
90
91static void ibx_hpd_irq_setup(struct drm_device *dev);
92static void i915_hpd_irq_setup(struct drm_device *dev);
93
39/* For display hotplug interrupt */ 94/* For display hotplug interrupt */
40static void 95static void
41ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask) 96ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
@@ -47,7 +102,7 @@ ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
47 } 102 }
48} 103}
49 104
50static inline void 105static void
51ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask) 106ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
52{ 107{
53 if ((dev_priv->irq_mask & mask) != mask) { 108 if ((dev_priv->irq_mask & mask) != mask) {
@@ -60,26 +115,30 @@ ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
60void 115void
61i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) 116i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
62{ 117{
63 if ((dev_priv->pipestat[pipe] & mask) != mask) { 118 u32 reg = PIPESTAT(pipe);
64 u32 reg = PIPESTAT(pipe); 119 u32 pipestat = I915_READ(reg) & 0x7fff0000;
65 120
66 dev_priv->pipestat[pipe] |= mask; 121 if ((pipestat & mask) == mask)
67 /* Enable the interrupt, clear any pending status */ 122 return;
68 I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16)); 123
69 POSTING_READ(reg); 124 /* Enable the interrupt, clear any pending status */
70 } 125 pipestat |= mask | (mask >> 16);
126 I915_WRITE(reg, pipestat);
127 POSTING_READ(reg);
71} 128}
72 129
73void 130void
74i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) 131i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
75{ 132{
76 if ((dev_priv->pipestat[pipe] & mask) != 0) { 133 u32 reg = PIPESTAT(pipe);
77 u32 reg = PIPESTAT(pipe); 134 u32 pipestat = I915_READ(reg) & 0x7fff0000;
78 135
79 dev_priv->pipestat[pipe] &= ~mask; 136 if ((pipestat & mask) == 0)
80 I915_WRITE(reg, dev_priv->pipestat[pipe]); 137 return;
81 POSTING_READ(reg); 138
82 } 139 pipestat &= ~mask;
140 I915_WRITE(reg, pipestat);
141 POSTING_READ(reg);
83} 142}
84 143
85/** 144/**
@@ -250,10 +309,9 @@ static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
250 struct timeval *vblank_time, 309 struct timeval *vblank_time,
251 unsigned flags) 310 unsigned flags)
252{ 311{
253 struct drm_i915_private *dev_priv = dev->dev_private;
254 struct drm_crtc *crtc; 312 struct drm_crtc *crtc;
255 313
256 if (pipe < 0 || pipe >= dev_priv->num_pipe) { 314 if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) {
257 DRM_ERROR("Invalid crtc %d\n", pipe); 315 DRM_ERROR("Invalid crtc %d\n", pipe);
258 return -EINVAL; 316 return -EINVAL;
259 } 317 }
@@ -279,13 +337,19 @@ static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
279/* 337/*
280 * Handle hotplug events outside the interrupt handler proper. 338 * Handle hotplug events outside the interrupt handler proper.
281 */ 339 */
340#define I915_REENABLE_HOTPLUG_DELAY (2*60*1000)
341
282static void i915_hotplug_work_func(struct work_struct *work) 342static void i915_hotplug_work_func(struct work_struct *work)
283{ 343{
284 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 344 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
285 hotplug_work); 345 hotplug_work);
286 struct drm_device *dev = dev_priv->dev; 346 struct drm_device *dev = dev_priv->dev;
287 struct drm_mode_config *mode_config = &dev->mode_config; 347 struct drm_mode_config *mode_config = &dev->mode_config;
288 struct intel_encoder *encoder; 348 struct intel_connector *intel_connector;
349 struct intel_encoder *intel_encoder;
350 struct drm_connector *connector;
351 unsigned long irqflags;
352 bool hpd_disabled = false;
289 353
290 /* HPD irq before everything is fully set up. */ 354 /* HPD irq before everything is fully set up. */
291 if (!dev_priv->enable_hotplug_processing) 355 if (!dev_priv->enable_hotplug_processing)
@@ -294,9 +358,36 @@ static void i915_hotplug_work_func(struct work_struct *work)
294 mutex_lock(&mode_config->mutex); 358 mutex_lock(&mode_config->mutex);
295 DRM_DEBUG_KMS("running encoder hotplug functions\n"); 359 DRM_DEBUG_KMS("running encoder hotplug functions\n");
296 360
297 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) 361 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
298 if (encoder->hot_plug) 362 list_for_each_entry(connector, &mode_config->connector_list, head) {
299 encoder->hot_plug(encoder); 363 intel_connector = to_intel_connector(connector);
364 intel_encoder = intel_connector->encoder;
365 if (intel_encoder->hpd_pin > HPD_NONE &&
366 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED &&
367 connector->polled == DRM_CONNECTOR_POLL_HPD) {
368 DRM_INFO("HPD interrupt storm detected on connector %s: "
369 "switching from hotplug detection to polling\n",
370 drm_get_connector_name(connector));
371 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED;
372 connector->polled = DRM_CONNECTOR_POLL_CONNECT
373 | DRM_CONNECTOR_POLL_DISCONNECT;
374 hpd_disabled = true;
375 }
376 }
377 /* if there were no outputs to poll, poll was disabled,
378 * therefore make sure it's enabled when disabling HPD on
379 * some connectors */
380 if (hpd_disabled) {
381 drm_kms_helper_poll_enable(dev);
382 mod_timer(&dev_priv->hotplug_reenable_timer,
383 jiffies + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
384 }
385
386 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
387
388 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
389 if (intel_encoder->hot_plug)
390 intel_encoder->hot_plug(intel_encoder);
300 391
301 mutex_unlock(&mode_config->mutex); 392 mutex_unlock(&mode_config->mutex);
302 393
@@ -525,6 +616,45 @@ static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
525 queue_work(dev_priv->wq, &dev_priv->rps.work); 616 queue_work(dev_priv->wq, &dev_priv->rps.work);
526} 617}
527 618
619#define HPD_STORM_DETECT_PERIOD 1000
620#define HPD_STORM_THRESHOLD 5
621
622static inline bool hotplug_irq_storm_detect(struct drm_device *dev,
623 u32 hotplug_trigger,
624 const u32 *hpd)
625{
626 drm_i915_private_t *dev_priv = dev->dev_private;
627 unsigned long irqflags;
628 int i;
629 bool ret = false;
630
631 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
632
633 for (i = 1; i < HPD_NUM_PINS; i++) {
634
635 if (!(hpd[i] & hotplug_trigger) ||
636 dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
637 continue;
638
639 if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies,
640 dev_priv->hpd_stats[i].hpd_last_jiffies
641 + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
642 dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies;
643 dev_priv->hpd_stats[i].hpd_cnt = 0;
644 } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) {
645 dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED;
646 DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i);
647 ret = true;
648 } else {
649 dev_priv->hpd_stats[i].hpd_cnt++;
650 }
651 }
652
653 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
654
655 return ret;
656}
657
528static void gmbus_irq_handler(struct drm_device *dev) 658static void gmbus_irq_handler(struct drm_device *dev)
529{ 659{
530 struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private; 660 struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -593,13 +723,16 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
593 /* Consume port. Then clear IIR or we'll miss events */ 723 /* Consume port. Then clear IIR or we'll miss events */
594 if (iir & I915_DISPLAY_PORT_INTERRUPT) { 724 if (iir & I915_DISPLAY_PORT_INTERRUPT) {
595 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 725 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
726 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
596 727
597 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", 728 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
598 hotplug_status); 729 hotplug_status);
599 if (hotplug_status & dev_priv->hotplug_supported_mask) 730 if (hotplug_trigger) {
731 if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_status_i915))
732 i915_hpd_irq_setup(dev);
600 queue_work(dev_priv->wq, 733 queue_work(dev_priv->wq,
601 &dev_priv->hotplug_work); 734 &dev_priv->hotplug_work);
602 735 }
603 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 736 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
604 I915_READ(PORT_HOTPLUG_STAT); 737 I915_READ(PORT_HOTPLUG_STAT);
605 } 738 }
@@ -623,10 +756,13 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
623{ 756{
624 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 757 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
625 int pipe; 758 int pipe;
759 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
626 760
627 if (pch_iir & SDE_HOTPLUG_MASK) 761 if (hotplug_trigger) {
762 if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_ibx))
763 ibx_hpd_irq_setup(dev);
628 queue_work(dev_priv->wq, &dev_priv->hotplug_work); 764 queue_work(dev_priv->wq, &dev_priv->hotplug_work);
629 765 }
630 if (pch_iir & SDE_AUDIO_POWER_MASK) 766 if (pch_iir & SDE_AUDIO_POWER_MASK)
631 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", 767 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
632 (pch_iir & SDE_AUDIO_POWER_MASK) >> 768 (pch_iir & SDE_AUDIO_POWER_MASK) >>
@@ -669,10 +805,13 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
669{ 805{
670 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 806 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
671 int pipe; 807 int pipe;
808 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
672 809
673 if (pch_iir & SDE_HOTPLUG_MASK_CPT) 810 if (hotplug_trigger) {
811 if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_cpt))
812 ibx_hpd_irq_setup(dev);
674 queue_work(dev_priv->wq, &dev_priv->hotplug_work); 813 queue_work(dev_priv->wq, &dev_priv->hotplug_work);
675 814 }
676 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) 815 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT)
677 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", 816 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
678 (pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> 817 (pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
@@ -701,7 +840,7 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
701{ 840{
702 struct drm_device *dev = (struct drm_device *) arg; 841 struct drm_device *dev = (struct drm_device *) arg;
703 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 842 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
704 u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier; 843 u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier = 0;
705 irqreturn_t ret = IRQ_NONE; 844 irqreturn_t ret = IRQ_NONE;
706 int i; 845 int i;
707 846
@@ -716,9 +855,11 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
716 * able to process them after we restore SDEIER (as soon as we restore 855 * able to process them after we restore SDEIER (as soon as we restore
717 * it, we'll get an interrupt if SDEIIR still has something to process 856 * it, we'll get an interrupt if SDEIIR still has something to process
718 * due to its back queue). */ 857 * due to its back queue). */
719 sde_ier = I915_READ(SDEIER); 858 if (!HAS_PCH_NOP(dev)) {
720 I915_WRITE(SDEIER, 0); 859 sde_ier = I915_READ(SDEIER);
721 POSTING_READ(SDEIER); 860 I915_WRITE(SDEIER, 0);
861 POSTING_READ(SDEIER);
862 }
722 863
723 gt_iir = I915_READ(GTIIR); 864 gt_iir = I915_READ(GTIIR);
724 if (gt_iir) { 865 if (gt_iir) {
@@ -745,7 +886,7 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
745 } 886 }
746 887
747 /* check event from PCH */ 888 /* check event from PCH */
748 if (de_iir & DE_PCH_EVENT_IVB) { 889 if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
749 u32 pch_iir = I915_READ(SDEIIR); 890 u32 pch_iir = I915_READ(SDEIIR);
750 891
751 cpt_irq_handler(dev, pch_iir); 892 cpt_irq_handler(dev, pch_iir);
@@ -768,8 +909,10 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
768 909
769 I915_WRITE(DEIER, de_ier); 910 I915_WRITE(DEIER, de_ier);
770 POSTING_READ(DEIER); 911 POSTING_READ(DEIER);
771 I915_WRITE(SDEIER, sde_ier); 912 if (!HAS_PCH_NOP(dev)) {
772 POSTING_READ(SDEIER); 913 I915_WRITE(SDEIER, sde_ier);
914 POSTING_READ(SDEIER);
915 }
773 916
774 return ret; 917 return ret;
775} 918}
@@ -937,6 +1080,8 @@ static void i915_error_work_func(struct work_struct *work)
937 for_each_ring(ring, dev_priv, i) 1080 for_each_ring(ring, dev_priv, i)
938 wake_up_all(&ring->irq_queue); 1081 wake_up_all(&ring->irq_queue);
939 1082
1083 intel_display_handle_reset(dev);
1084
940 wake_up_all(&dev_priv->gpu_error.reset_queue); 1085 wake_up_all(&dev_priv->gpu_error.reset_queue);
941 } 1086 }
942} 1087}
@@ -972,24 +1117,23 @@ static void i915_get_extra_instdone(struct drm_device *dev,
972 1117
973#ifdef CONFIG_DEBUG_FS 1118#ifdef CONFIG_DEBUG_FS
974static struct drm_i915_error_object * 1119static struct drm_i915_error_object *
975i915_error_object_create(struct drm_i915_private *dev_priv, 1120i915_error_object_create_sized(struct drm_i915_private *dev_priv,
976 struct drm_i915_gem_object *src) 1121 struct drm_i915_gem_object *src,
1122 const int num_pages)
977{ 1123{
978 struct drm_i915_error_object *dst; 1124 struct drm_i915_error_object *dst;
979 int i, count; 1125 int i;
980 u32 reloc_offset; 1126 u32 reloc_offset;
981 1127
982 if (src == NULL || src->pages == NULL) 1128 if (src == NULL || src->pages == NULL)
983 return NULL; 1129 return NULL;
984 1130
985 count = src->base.size / PAGE_SIZE; 1131 dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC);
986
987 dst = kmalloc(sizeof(*dst) + count * sizeof(u32 *), GFP_ATOMIC);
988 if (dst == NULL) 1132 if (dst == NULL)
989 return NULL; 1133 return NULL;
990 1134
991 reloc_offset = src->gtt_offset; 1135 reloc_offset = src->gtt_offset;
992 for (i = 0; i < count; i++) { 1136 for (i = 0; i < num_pages; i++) {
993 unsigned long flags; 1137 unsigned long flags;
994 void *d; 1138 void *d;
995 1139
@@ -1039,7 +1183,7 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
1039 1183
1040 reloc_offset += PAGE_SIZE; 1184 reloc_offset += PAGE_SIZE;
1041 } 1185 }
1042 dst->page_count = count; 1186 dst->page_count = num_pages;
1043 dst->gtt_offset = src->gtt_offset; 1187 dst->gtt_offset = src->gtt_offset;
1044 1188
1045 return dst; 1189 return dst;
@@ -1050,6 +1194,9 @@ unwind:
1050 kfree(dst); 1194 kfree(dst);
1051 return NULL; 1195 return NULL;
1052} 1196}
1197#define i915_error_object_create(dev_priv, src) \
1198 i915_error_object_create_sized((dev_priv), (src), \
1199 (src)->base.size>>PAGE_SHIFT)
1053 1200
1054static void 1201static void
1055i915_error_object_free(struct drm_i915_error_object *obj) 1202i915_error_object_free(struct drm_i915_error_object *obj)
@@ -1148,7 +1295,7 @@ static void i915_gem_record_fences(struct drm_device *dev,
1148 switch (INTEL_INFO(dev)->gen) { 1295 switch (INTEL_INFO(dev)->gen) {
1149 case 7: 1296 case 7:
1150 case 6: 1297 case 6:
1151 for (i = 0; i < 16; i++) 1298 for (i = 0; i < dev_priv->num_fence_regs; i++)
1152 error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8)); 1299 error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
1153 break; 1300 break;
1154 case 5: 1301 case 5:
@@ -1256,6 +1403,26 @@ static void i915_record_ring_state(struct drm_device *dev,
1256 error->cpu_ring_tail[ring->id] = ring->tail; 1403 error->cpu_ring_tail[ring->id] = ring->tail;
1257} 1404}
1258 1405
1406
1407static void i915_gem_record_active_context(struct intel_ring_buffer *ring,
1408 struct drm_i915_error_state *error,
1409 struct drm_i915_error_ring *ering)
1410{
1411 struct drm_i915_private *dev_priv = ring->dev->dev_private;
1412 struct drm_i915_gem_object *obj;
1413
1414 /* Currently render ring is the only HW context user */
1415 if (ring->id != RCS || !error->ccid)
1416 return;
1417
1418 list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
1419 if ((error->ccid & PAGE_MASK) == obj->gtt_offset) {
1420 ering->ctx = i915_error_object_create_sized(dev_priv,
1421 obj, 1);
1422 }
1423 }
1424}
1425
1259static void i915_gem_record_rings(struct drm_device *dev, 1426static void i915_gem_record_rings(struct drm_device *dev,
1260 struct drm_i915_error_state *error) 1427 struct drm_i915_error_state *error)
1261{ 1428{
@@ -1273,6 +1440,9 @@ static void i915_gem_record_rings(struct drm_device *dev,
1273 error->ring[i].ringbuffer = 1440 error->ring[i].ringbuffer =
1274 i915_error_object_create(dev_priv, ring->obj); 1441 i915_error_object_create(dev_priv, ring->obj);
1275 1442
1443
1444 i915_gem_record_active_context(ring, error, &error->ring[i]);
1445
1276 count = 0; 1446 count = 0;
1277 list_for_each_entry(request, &ring->request_list, list) 1447 list_for_each_entry(request, &ring->request_list, list)
1278 count++; 1448 count++;
@@ -1328,14 +1498,15 @@ static void i915_capture_error_state(struct drm_device *dev)
1328 return; 1498 return;
1329 } 1499 }
1330 1500
1331 DRM_INFO("capturing error event; look for more information in" 1501 DRM_INFO("capturing error event; look for more information in "
1332 "/sys/kernel/debug/dri/%d/i915_error_state\n", 1502 "/sys/kernel/debug/dri/%d/i915_error_state\n",
1333 dev->primary->index); 1503 dev->primary->index);
1334 1504
1335 kref_init(&error->ref); 1505 kref_init(&error->ref);
1336 error->eir = I915_READ(EIR); 1506 error->eir = I915_READ(EIR);
1337 error->pgtbl_er = I915_READ(PGTBL_ER); 1507 error->pgtbl_er = I915_READ(PGTBL_ER);
1338 error->ccid = I915_READ(CCID); 1508 if (HAS_HW_CONTEXTS(dev))
1509 error->ccid = I915_READ(CCID);
1339 1510
1340 if (HAS_PCH_SPLIT(dev)) 1511 if (HAS_PCH_SPLIT(dev))
1341 error->ier = I915_READ(DEIER) | I915_READ(GTIER); 1512 error->ier = I915_READ(DEIER) | I915_READ(GTIER);
@@ -1356,8 +1527,9 @@ static void i915_capture_error_state(struct drm_device *dev)
1356 else if (INTEL_INFO(dev)->gen == 6) 1527 else if (INTEL_INFO(dev)->gen == 6)
1357 error->forcewake = I915_READ(FORCEWAKE); 1528 error->forcewake = I915_READ(FORCEWAKE);
1358 1529
1359 for_each_pipe(pipe) 1530 if (!HAS_PCH_SPLIT(dev))
1360 error->pipestat[pipe] = I915_READ(PIPESTAT(pipe)); 1531 for_each_pipe(pipe)
1532 error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
1361 1533
1362 if (INTEL_INFO(dev)->gen >= 6) { 1534 if (INTEL_INFO(dev)->gen >= 6) {
1363 error->error = I915_READ(ERROR_GEN6); 1535 error->error = I915_READ(ERROR_GEN6);
@@ -1567,7 +1739,7 @@ void i915_handle_error(struct drm_device *dev, bool wedged)
1567 queue_work(dev_priv->wq, &dev_priv->gpu_error.work); 1739 queue_work(dev_priv->wq, &dev_priv->gpu_error.work);
1568} 1740}
1569 1741
1570static void i915_pageflip_stall_check(struct drm_device *dev, int pipe) 1742static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe)
1571{ 1743{
1572 drm_i915_private_t *dev_priv = dev->dev_private; 1744 drm_i915_private_t *dev_priv = dev->dev_private;
1573 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 1745 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
@@ -1777,6 +1949,37 @@ static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err)
1777 return false; 1949 return false;
1778} 1950}
1779 1951
1952static bool semaphore_passed(struct intel_ring_buffer *ring)
1953{
1954 struct drm_i915_private *dev_priv = ring->dev->dev_private;
1955 u32 acthd = intel_ring_get_active_head(ring) & HEAD_ADDR;
1956 struct intel_ring_buffer *signaller;
1957 u32 cmd, ipehr, acthd_min;
1958
1959 ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
1960 if ((ipehr & ~(0x3 << 16)) !=
1961 (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | MI_SEMAPHORE_REGISTER))
1962 return false;
1963
1964 /* ACTHD is likely pointing to the dword after the actual command,
1965 * so scan backwards until we find the MBOX.
1966 */
1967 acthd_min = max((int)acthd - 3 * 4, 0);
1968 do {
1969 cmd = ioread32(ring->virtual_start + acthd);
1970 if (cmd == ipehr)
1971 break;
1972
1973 acthd -= 4;
1974 if (acthd < acthd_min)
1975 return false;
1976 } while (1);
1977
1978 signaller = &dev_priv->ring[(ring->id + (((ipehr >> 17) & 1) + 1)) % 3];
1979 return i915_seqno_passed(signaller->get_seqno(signaller, false),
1980 ioread32(ring->virtual_start+acthd+4)+1);
1981}
1982
1780static bool kick_ring(struct intel_ring_buffer *ring) 1983static bool kick_ring(struct intel_ring_buffer *ring)
1781{ 1984{
1782 struct drm_device *dev = ring->dev; 1985 struct drm_device *dev = ring->dev;
@@ -1788,6 +1991,15 @@ static bool kick_ring(struct intel_ring_buffer *ring)
1788 I915_WRITE_CTL(ring, tmp); 1991 I915_WRITE_CTL(ring, tmp);
1789 return true; 1992 return true;
1790 } 1993 }
1994
1995 if (INTEL_INFO(dev)->gen >= 6 &&
1996 tmp & RING_WAIT_SEMAPHORE &&
1997 semaphore_passed(ring)) {
1998 DRM_ERROR("Kicking stuck semaphore on %s\n",
1999 ring->name);
2000 I915_WRITE_CTL(ring, tmp);
2001 return true;
2002 }
1791 return false; 2003 return false;
1792} 2004}
1793 2005
@@ -1901,9 +2113,18 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
1901 I915_WRITE(GTIER, 0x0); 2113 I915_WRITE(GTIER, 0x0);
1902 POSTING_READ(GTIER); 2114 POSTING_READ(GTIER);
1903 2115
2116 if (HAS_PCH_NOP(dev))
2117 return;
2118
1904 /* south display irq */ 2119 /* south display irq */
1905 I915_WRITE(SDEIMR, 0xffffffff); 2120 I915_WRITE(SDEIMR, 0xffffffff);
1906 I915_WRITE(SDEIER, 0x0); 2121 /*
2122 * SDEIER is also touched by the interrupt handler to work around missed
2123 * PCH interrupts. Hence we can't update it after the interrupt handler
2124 * is enabled - instead we unconditionally enable all PCH interrupt
2125 * sources here, but then only unmask them as needed with SDEIMR.
2126 */
2127 I915_WRITE(SDEIER, 0xffffffff);
1907 POSTING_READ(SDEIER); 2128 POSTING_READ(SDEIER);
1908} 2129}
1909 2130
@@ -1939,18 +2160,34 @@ static void valleyview_irq_preinstall(struct drm_device *dev)
1939 POSTING_READ(VLV_IER); 2160 POSTING_READ(VLV_IER);
1940} 2161}
1941 2162
1942/* 2163static void ibx_hpd_irq_setup(struct drm_device *dev)
1943 * Enable digital hotplug on the PCH, and configure the DP short pulse
1944 * duration to 2ms (which is the minimum in the Display Port spec)
1945 *
1946 * This register is the same on all known PCH chips.
1947 */
1948
1949static void ibx_enable_hotplug(struct drm_device *dev)
1950{ 2164{
1951 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2165 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1952 u32 hotplug; 2166 struct drm_mode_config *mode_config = &dev->mode_config;
2167 struct intel_encoder *intel_encoder;
2168 u32 mask = ~I915_READ(SDEIMR);
2169 u32 hotplug;
2170
2171 if (HAS_PCH_IBX(dev)) {
2172 mask &= ~SDE_HOTPLUG_MASK;
2173 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
2174 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
2175 mask |= hpd_ibx[intel_encoder->hpd_pin];
2176 } else {
2177 mask &= ~SDE_HOTPLUG_MASK_CPT;
2178 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
2179 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
2180 mask |= hpd_cpt[intel_encoder->hpd_pin];
2181 }
1953 2182
2183 I915_WRITE(SDEIMR, ~mask);
2184
2185 /*
2186 * Enable digital hotplug on the PCH, and configure the DP short pulse
2187 * duration to 2ms (which is the minimum in the Display Port spec)
2188 *
2189 * This register is the same on all known PCH chips.
2190 */
1954 hotplug = I915_READ(PCH_PORT_HOTPLUG); 2191 hotplug = I915_READ(PCH_PORT_HOTPLUG);
1955 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK); 2192 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
1956 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms; 2193 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
@@ -1965,20 +2202,15 @@ static void ibx_irq_postinstall(struct drm_device *dev)
1965 u32 mask; 2202 u32 mask;
1966 2203
1967 if (HAS_PCH_IBX(dev)) 2204 if (HAS_PCH_IBX(dev))
1968 mask = SDE_HOTPLUG_MASK | 2205 mask = SDE_GMBUS | SDE_AUX_MASK;
1969 SDE_GMBUS |
1970 SDE_AUX_MASK;
1971 else 2206 else
1972 mask = SDE_HOTPLUG_MASK_CPT | 2207 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
1973 SDE_GMBUS_CPT | 2208
1974 SDE_AUX_MASK_CPT; 2209 if (HAS_PCH_NOP(dev))
2210 return;
1975 2211
1976 I915_WRITE(SDEIIR, I915_READ(SDEIIR)); 2212 I915_WRITE(SDEIIR, I915_READ(SDEIIR));
1977 I915_WRITE(SDEIMR, ~mask); 2213 I915_WRITE(SDEIMR, ~mask);
1978 I915_WRITE(SDEIER, mask);
1979 POSTING_READ(SDEIER);
1980
1981 ibx_enable_hotplug(dev);
1982} 2214}
1983 2215
1984static int ironlake_irq_postinstall(struct drm_device *dev) 2216static int ironlake_irq_postinstall(struct drm_device *dev)
@@ -2089,9 +2321,6 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
2089 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | 2321 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
2090 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 2322 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
2091 2323
2092 dev_priv->pipestat[0] = 0;
2093 dev_priv->pipestat[1] = 0;
2094
2095 /* Hack for broken MSIs on VLV */ 2324 /* Hack for broken MSIs on VLV */
2096 pci_write_config_dword(dev_priv->dev->pdev, 0x94, 0xfee00000); 2325 pci_write_config_dword(dev_priv->dev->pdev, 0x94, 0xfee00000);
2097 pci_read_config_word(dev->pdev, 0x98, &msid); 2326 pci_read_config_word(dev->pdev, 0x98, &msid);
@@ -2135,30 +2364,6 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
2135 return 0; 2364 return 0;
2136} 2365}
2137 2366
2138static void valleyview_hpd_irq_setup(struct drm_device *dev)
2139{
2140 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2141 u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
2142
2143 /* Note HDMI and DP share bits */
2144 if (dev_priv->hotplug_supported_mask & PORTB_HOTPLUG_INT_STATUS)
2145 hotplug_en |= PORTB_HOTPLUG_INT_EN;
2146 if (dev_priv->hotplug_supported_mask & PORTC_HOTPLUG_INT_STATUS)
2147 hotplug_en |= PORTC_HOTPLUG_INT_EN;
2148 if (dev_priv->hotplug_supported_mask & PORTD_HOTPLUG_INT_STATUS)
2149 hotplug_en |= PORTD_HOTPLUG_INT_EN;
2150 if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915)
2151 hotplug_en |= SDVOC_HOTPLUG_INT_EN;
2152 if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915)
2153 hotplug_en |= SDVOB_HOTPLUG_INT_EN;
2154 if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
2155 hotplug_en |= CRT_HOTPLUG_INT_EN;
2156 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
2157 }
2158
2159 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
2160}
2161
2162static void valleyview_irq_uninstall(struct drm_device *dev) 2367static void valleyview_irq_uninstall(struct drm_device *dev)
2163{ 2368{
2164 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2369 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -2167,6 +2372,8 @@ static void valleyview_irq_uninstall(struct drm_device *dev)
2167 if (!dev_priv) 2372 if (!dev_priv)
2168 return; 2373 return;
2169 2374
2375 del_timer_sync(&dev_priv->hotplug_reenable_timer);
2376
2170 for_each_pipe(pipe) 2377 for_each_pipe(pipe)
2171 I915_WRITE(PIPESTAT(pipe), 0xffff); 2378 I915_WRITE(PIPESTAT(pipe), 0xffff);
2172 2379
@@ -2188,6 +2395,8 @@ static void ironlake_irq_uninstall(struct drm_device *dev)
2188 if (!dev_priv) 2395 if (!dev_priv)
2189 return; 2396 return;
2190 2397
2398 del_timer_sync(&dev_priv->hotplug_reenable_timer);
2399
2191 I915_WRITE(HWSTAM, 0xffffffff); 2400 I915_WRITE(HWSTAM, 0xffffffff);
2192 2401
2193 I915_WRITE(DEIMR, 0xffffffff); 2402 I915_WRITE(DEIMR, 0xffffffff);
@@ -2198,6 +2407,9 @@ static void ironlake_irq_uninstall(struct drm_device *dev)
2198 I915_WRITE(GTIER, 0x0); 2407 I915_WRITE(GTIER, 0x0);
2199 I915_WRITE(GTIIR, I915_READ(GTIIR)); 2408 I915_WRITE(GTIIR, I915_READ(GTIIR));
2200 2409
2410 if (HAS_PCH_NOP(dev))
2411 return;
2412
2201 I915_WRITE(SDEIMR, 0xffffffff); 2413 I915_WRITE(SDEIMR, 0xffffffff);
2202 I915_WRITE(SDEIER, 0x0); 2414 I915_WRITE(SDEIER, 0x0);
2203 I915_WRITE(SDEIIR, I915_READ(SDEIIR)); 2415 I915_WRITE(SDEIIR, I915_READ(SDEIIR));
@@ -2221,9 +2433,6 @@ static int i8xx_irq_postinstall(struct drm_device *dev)
2221{ 2433{
2222 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2434 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2223 2435
2224 dev_priv->pipestat[0] = 0;
2225 dev_priv->pipestat[1] = 0;
2226
2227 I915_WRITE16(EMR, 2436 I915_WRITE16(EMR,
2228 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 2437 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
2229 2438
@@ -2246,6 +2455,37 @@ static int i8xx_irq_postinstall(struct drm_device *dev)
2246 return 0; 2455 return 0;
2247} 2456}
2248 2457
2458/*
2459 * Returns true when a page flip has completed.
2460 */
2461static bool i8xx_handle_vblank(struct drm_device *dev,
2462 int pipe, u16 iir)
2463{
2464 drm_i915_private_t *dev_priv = dev->dev_private;
2465 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(pipe);
2466
2467 if (!drm_handle_vblank(dev, pipe))
2468 return false;
2469
2470 if ((iir & flip_pending) == 0)
2471 return false;
2472
2473 intel_prepare_page_flip(dev, pipe);
2474
2475 /* We detect FlipDone by looking for the change in PendingFlip from '1'
2476 * to '0' on the following vblank, i.e. IIR has the Pendingflip
2477 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
2478 * the flip is completed (no longer pending). Since this doesn't raise
2479 * an interrupt per se, we watch for the change at vblank.
2480 */
2481 if (I915_READ16(ISR) & flip_pending)
2482 return false;
2483
2484 intel_finish_page_flip(dev, pipe);
2485
2486 return true;
2487}
2488
2249static irqreturn_t i8xx_irq_handler(int irq, void *arg) 2489static irqreturn_t i8xx_irq_handler(int irq, void *arg)
2250{ 2490{
2251 struct drm_device *dev = (struct drm_device *) arg; 2491 struct drm_device *dev = (struct drm_device *) arg;
@@ -2301,22 +2541,12 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
2301 notify_ring(dev, &dev_priv->ring[RCS]); 2541 notify_ring(dev, &dev_priv->ring[RCS]);
2302 2542
2303 if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS && 2543 if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS &&
2304 drm_handle_vblank(dev, 0)) { 2544 i8xx_handle_vblank(dev, 0, iir))
2305 if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) { 2545 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(0);
2306 intel_prepare_page_flip(dev, 0);
2307 intel_finish_page_flip(dev, 0);
2308 flip_mask &= ~I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT;
2309 }
2310 }
2311 2546
2312 if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS && 2547 if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS &&
2313 drm_handle_vblank(dev, 1)) { 2548 i8xx_handle_vblank(dev, 1, iir))
2314 if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) { 2549 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(1);
2315 intel_prepare_page_flip(dev, 1);
2316 intel_finish_page_flip(dev, 1);
2317 flip_mask &= ~I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
2318 }
2319 }
2320 2550
2321 iir = new_iir; 2551 iir = new_iir;
2322 } 2552 }
@@ -2364,9 +2594,6 @@ static int i915_irq_postinstall(struct drm_device *dev)
2364 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2594 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2365 u32 enable_mask; 2595 u32 enable_mask;
2366 2596
2367 dev_priv->pipestat[0] = 0;
2368 dev_priv->pipestat[1] = 0;
2369
2370 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 2597 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
2371 2598
2372 /* Unmask the interrupts that we always want on. */ 2599 /* Unmask the interrupts that we always want on. */
@@ -2404,33 +2631,35 @@ static int i915_irq_postinstall(struct drm_device *dev)
2404 return 0; 2631 return 0;
2405} 2632}
2406 2633
2407static void i915_hpd_irq_setup(struct drm_device *dev) 2634/*
2635 * Returns true when a page flip has completed.
2636 */
2637static bool i915_handle_vblank(struct drm_device *dev,
2638 int plane, int pipe, u32 iir)
2408{ 2639{
2409 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2640 drm_i915_private_t *dev_priv = dev->dev_private;
2410 u32 hotplug_en; 2641 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
2411 2642
2412 if (I915_HAS_HOTPLUG(dev)) { 2643 if (!drm_handle_vblank(dev, pipe))
2413 hotplug_en = I915_READ(PORT_HOTPLUG_EN); 2644 return false;
2414 2645
2415 if (dev_priv->hotplug_supported_mask & PORTB_HOTPLUG_INT_STATUS) 2646 if ((iir & flip_pending) == 0)
2416 hotplug_en |= PORTB_HOTPLUG_INT_EN; 2647 return false;
2417 if (dev_priv->hotplug_supported_mask & PORTC_HOTPLUG_INT_STATUS)
2418 hotplug_en |= PORTC_HOTPLUG_INT_EN;
2419 if (dev_priv->hotplug_supported_mask & PORTD_HOTPLUG_INT_STATUS)
2420 hotplug_en |= PORTD_HOTPLUG_INT_EN;
2421 if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915)
2422 hotplug_en |= SDVOC_HOTPLUG_INT_EN;
2423 if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915)
2424 hotplug_en |= SDVOB_HOTPLUG_INT_EN;
2425 if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
2426 hotplug_en |= CRT_HOTPLUG_INT_EN;
2427 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
2428 }
2429 2648
2430 /* Ignore TV since it's buggy */ 2649 intel_prepare_page_flip(dev, plane);
2431 2650
2432 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); 2651 /* We detect FlipDone by looking for the change in PendingFlip from '1'
2433 } 2652 * to '0' on the following vblank, i.e. IIR has the Pendingflip
2653 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
2654 * the flip is completed (no longer pending). Since this doesn't raise
2655 * an interrupt per se, we watch for the change at vblank.
2656 */
2657 if (I915_READ(ISR) & flip_pending)
2658 return false;
2659
2660 intel_finish_page_flip(dev, pipe);
2661
2662 return true;
2434} 2663}
2435 2664
2436static irqreturn_t i915_irq_handler(int irq, void *arg) 2665static irqreturn_t i915_irq_handler(int irq, void *arg)
@@ -2442,10 +2671,6 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
2442 u32 flip_mask = 2671 u32 flip_mask =
2443 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 2672 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2444 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 2673 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
2445 u32 flip[2] = {
2446 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT,
2447 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
2448 };
2449 int pipe, ret = IRQ_NONE; 2674 int pipe, ret = IRQ_NONE;
2450 2675
2451 atomic_inc(&dev_priv->irq_received); 2676 atomic_inc(&dev_priv->irq_received);
@@ -2486,13 +2711,16 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
2486 if ((I915_HAS_HOTPLUG(dev)) && 2711 if ((I915_HAS_HOTPLUG(dev)) &&
2487 (iir & I915_DISPLAY_PORT_INTERRUPT)) { 2712 (iir & I915_DISPLAY_PORT_INTERRUPT)) {
2488 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 2713 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
2714 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
2489 2715
2490 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", 2716 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
2491 hotplug_status); 2717 hotplug_status);
2492 if (hotplug_status & dev_priv->hotplug_supported_mask) 2718 if (hotplug_trigger) {
2719 if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_status_i915))
2720 i915_hpd_irq_setup(dev);
2493 queue_work(dev_priv->wq, 2721 queue_work(dev_priv->wq,
2494 &dev_priv->hotplug_work); 2722 &dev_priv->hotplug_work);
2495 2723 }
2496 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 2724 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
2497 POSTING_READ(PORT_HOTPLUG_STAT); 2725 POSTING_READ(PORT_HOTPLUG_STAT);
2498 } 2726 }
@@ -2507,14 +2735,10 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
2507 int plane = pipe; 2735 int plane = pipe;
2508 if (IS_MOBILE(dev)) 2736 if (IS_MOBILE(dev))
2509 plane = !plane; 2737 plane = !plane;
2738
2510 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 2739 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
2511 drm_handle_vblank(dev, pipe)) { 2740 i915_handle_vblank(dev, plane, pipe, iir))
2512 if (iir & flip[plane]) { 2741 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
2513 intel_prepare_page_flip(dev, plane);
2514 intel_finish_page_flip(dev, pipe);
2515 flip_mask &= ~flip[plane];
2516 }
2517 }
2518 2742
2519 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 2743 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
2520 blc_event = true; 2744 blc_event = true;
@@ -2552,6 +2776,8 @@ static void i915_irq_uninstall(struct drm_device * dev)
2552 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2776 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2553 int pipe; 2777 int pipe;
2554 2778
2779 del_timer_sync(&dev_priv->hotplug_reenable_timer);
2780
2555 if (I915_HAS_HOTPLUG(dev)) { 2781 if (I915_HAS_HOTPLUG(dev)) {
2556 I915_WRITE(PORT_HOTPLUG_EN, 0); 2782 I915_WRITE(PORT_HOTPLUG_EN, 0);
2557 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 2783 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
@@ -2603,13 +2829,13 @@ static int i965_irq_postinstall(struct drm_device *dev)
2603 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 2829 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2604 2830
2605 enable_mask = ~dev_priv->irq_mask; 2831 enable_mask = ~dev_priv->irq_mask;
2832 enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2833 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
2606 enable_mask |= I915_USER_INTERRUPT; 2834 enable_mask |= I915_USER_INTERRUPT;
2607 2835
2608 if (IS_G4X(dev)) 2836 if (IS_G4X(dev))
2609 enable_mask |= I915_BSD_USER_INTERRUPT; 2837 enable_mask |= I915_BSD_USER_INTERRUPT;
2610 2838
2611 dev_priv->pipestat[0] = 0;
2612 dev_priv->pipestat[1] = 0;
2613 i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE); 2839 i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
2614 2840
2615 /* 2841 /*
@@ -2639,45 +2865,33 @@ static int i965_irq_postinstall(struct drm_device *dev)
2639 return 0; 2865 return 0;
2640} 2866}
2641 2867
2642static void i965_hpd_irq_setup(struct drm_device *dev) 2868static void i915_hpd_irq_setup(struct drm_device *dev)
2643{ 2869{
2644 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2870 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2871 struct drm_mode_config *mode_config = &dev->mode_config;
2872 struct intel_encoder *intel_encoder;
2645 u32 hotplug_en; 2873 u32 hotplug_en;
2646 2874
2647 /* Note HDMI and DP share hotplug bits */ 2875 if (I915_HAS_HOTPLUG(dev)) {
2648 hotplug_en = 0; 2876 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
2649 if (dev_priv->hotplug_supported_mask & PORTB_HOTPLUG_INT_STATUS) 2877 hotplug_en &= ~HOTPLUG_INT_EN_MASK;
2650 hotplug_en |= PORTB_HOTPLUG_INT_EN; 2878 /* Note HDMI and DP share hotplug bits */
2651 if (dev_priv->hotplug_supported_mask & PORTC_HOTPLUG_INT_STATUS) 2879 /* enable bits are the same for all generations */
2652 hotplug_en |= PORTC_HOTPLUG_INT_EN; 2880 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
2653 if (dev_priv->hotplug_supported_mask & PORTD_HOTPLUG_INT_STATUS) 2881 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
2654 hotplug_en |= PORTD_HOTPLUG_INT_EN; 2882 hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin];
2655 if (IS_G4X(dev)) {
2656 if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_G4X)
2657 hotplug_en |= SDVOC_HOTPLUG_INT_EN;
2658 if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_G4X)
2659 hotplug_en |= SDVOB_HOTPLUG_INT_EN;
2660 } else {
2661 if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I965)
2662 hotplug_en |= SDVOC_HOTPLUG_INT_EN;
2663 if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I965)
2664 hotplug_en |= SDVOB_HOTPLUG_INT_EN;
2665 }
2666 if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
2667 hotplug_en |= CRT_HOTPLUG_INT_EN;
2668
2669 /* Programming the CRT detection parameters tends 2883 /* Programming the CRT detection parameters tends
2670 to generate a spurious hotplug event about three 2884 to generate a spurious hotplug event about three
2671 seconds later. So just do it once. 2885 seconds later. So just do it once.
2672 */ 2886 */
2673 if (IS_G4X(dev)) 2887 if (IS_G4X(dev))
2674 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; 2888 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
2889 hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK;
2675 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 2890 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
2676 }
2677
2678 /* Ignore TV since it's buggy */
2679 2891
2680 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); 2892 /* Ignore TV since it's buggy */
2893 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
2894 }
2681} 2895}
2682 2896
2683static irqreturn_t i965_irq_handler(int irq, void *arg) 2897static irqreturn_t i965_irq_handler(int irq, void *arg)
@@ -2689,6 +2903,9 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
2689 unsigned long irqflags; 2903 unsigned long irqflags;
2690 int irq_received; 2904 int irq_received;
2691 int ret = IRQ_NONE, pipe; 2905 int ret = IRQ_NONE, pipe;
2906 u32 flip_mask =
2907 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2908 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
2692 2909
2693 atomic_inc(&dev_priv->irq_received); 2910 atomic_inc(&dev_priv->irq_received);
2694 2911
@@ -2697,7 +2914,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
2697 for (;;) { 2914 for (;;) {
2698 bool blc_event = false; 2915 bool blc_event = false;
2699 2916
2700 irq_received = iir != 0; 2917 irq_received = (iir & ~flip_mask) != 0;
2701 2918
2702 /* Can't rely on pipestat interrupt bit in iir as it might 2919 /* Can't rely on pipestat interrupt bit in iir as it might
2703 * have been cleared after the pipestat interrupt was received. 2920 * have been cleared after the pipestat interrupt was received.
@@ -2733,18 +2950,24 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
2733 /* Consume port. Then clear IIR or we'll miss events */ 2950 /* Consume port. Then clear IIR or we'll miss events */
2734 if (iir & I915_DISPLAY_PORT_INTERRUPT) { 2951 if (iir & I915_DISPLAY_PORT_INTERRUPT) {
2735 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 2952 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
2953 u32 hotplug_trigger = hotplug_status & (IS_G4X(dev) ?
2954 HOTPLUG_INT_STATUS_G4X :
2955 HOTPLUG_INT_STATUS_I965);
2736 2956
2737 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", 2957 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
2738 hotplug_status); 2958 hotplug_status);
2739 if (hotplug_status & dev_priv->hotplug_supported_mask) 2959 if (hotplug_trigger) {
2960 if (hotplug_irq_storm_detect(dev, hotplug_trigger,
2961 IS_G4X(dev) ? hpd_status_gen4 : hpd_status_i965))
2962 i915_hpd_irq_setup(dev);
2740 queue_work(dev_priv->wq, 2963 queue_work(dev_priv->wq,
2741 &dev_priv->hotplug_work); 2964 &dev_priv->hotplug_work);
2742 2965 }
2743 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 2966 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
2744 I915_READ(PORT_HOTPLUG_STAT); 2967 I915_READ(PORT_HOTPLUG_STAT);
2745 } 2968 }
2746 2969
2747 I915_WRITE(IIR, iir); 2970 I915_WRITE(IIR, iir & ~flip_mask);
2748 new_iir = I915_READ(IIR); /* Flush posted writes */ 2971 new_iir = I915_READ(IIR); /* Flush posted writes */
2749 2972
2750 if (iir & I915_USER_INTERRUPT) 2973 if (iir & I915_USER_INTERRUPT)
@@ -2752,18 +2975,10 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
2752 if (iir & I915_BSD_USER_INTERRUPT) 2975 if (iir & I915_BSD_USER_INTERRUPT)
2753 notify_ring(dev, &dev_priv->ring[VCS]); 2976 notify_ring(dev, &dev_priv->ring[VCS]);
2754 2977
2755 if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT)
2756 intel_prepare_page_flip(dev, 0);
2757
2758 if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT)
2759 intel_prepare_page_flip(dev, 1);
2760
2761 for_each_pipe(pipe) { 2978 for_each_pipe(pipe) {
2762 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && 2979 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
2763 drm_handle_vblank(dev, pipe)) { 2980 i915_handle_vblank(dev, pipe, pipe, iir))
2764 i915_pageflip_stall_check(dev, pipe); 2981 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
2765 intel_finish_page_flip(dev, pipe);
2766 }
2767 2982
2768 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 2983 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
2769 blc_event = true; 2984 blc_event = true;
@@ -2807,6 +3022,8 @@ static void i965_irq_uninstall(struct drm_device * dev)
2807 if (!dev_priv) 3022 if (!dev_priv)
2808 return; 3023 return;
2809 3024
3025 del_timer_sync(&dev_priv->hotplug_reenable_timer);
3026
2810 I915_WRITE(PORT_HOTPLUG_EN, 0); 3027 I915_WRITE(PORT_HOTPLUG_EN, 0);
2811 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3028 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2812 3029
@@ -2822,6 +3039,41 @@ static void i965_irq_uninstall(struct drm_device * dev)
2822 I915_WRITE(IIR, I915_READ(IIR)); 3039 I915_WRITE(IIR, I915_READ(IIR));
2823} 3040}
2824 3041
3042static void i915_reenable_hotplug_timer_func(unsigned long data)
3043{
3044 drm_i915_private_t *dev_priv = (drm_i915_private_t *)data;
3045 struct drm_device *dev = dev_priv->dev;
3046 struct drm_mode_config *mode_config = &dev->mode_config;
3047 unsigned long irqflags;
3048 int i;
3049
3050 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3051 for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) {
3052 struct drm_connector *connector;
3053
3054 if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED)
3055 continue;
3056
3057 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
3058
3059 list_for_each_entry(connector, &mode_config->connector_list, head) {
3060 struct intel_connector *intel_connector = to_intel_connector(connector);
3061
3062 if (intel_connector->encoder->hpd_pin == i) {
3063 if (connector->polled != intel_connector->polled)
3064 DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
3065 drm_get_connector_name(connector));
3066 connector->polled = intel_connector->polled;
3067 if (!connector->polled)
3068 connector->polled = DRM_CONNECTOR_POLL_HPD;
3069 }
3070 }
3071 }
3072 if (dev_priv->display.hpd_irq_setup)
3073 dev_priv->display.hpd_irq_setup(dev);
3074 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3075}
3076
2825void intel_irq_init(struct drm_device *dev) 3077void intel_irq_init(struct drm_device *dev)
2826{ 3078{
2827 struct drm_i915_private *dev_priv = dev->dev_private; 3079 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2834,6 +3086,8 @@ void intel_irq_init(struct drm_device *dev)
2834 setup_timer(&dev_priv->gpu_error.hangcheck_timer, 3086 setup_timer(&dev_priv->gpu_error.hangcheck_timer,
2835 i915_hangcheck_elapsed, 3087 i915_hangcheck_elapsed,
2836 (unsigned long) dev); 3088 (unsigned long) dev);
3089 setup_timer(&dev_priv->hotplug_reenable_timer, i915_reenable_hotplug_timer_func,
3090 (unsigned long) dev_priv);
2837 3091
2838 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); 3092 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
2839 3093
@@ -2857,7 +3111,7 @@ void intel_irq_init(struct drm_device *dev)
2857 dev->driver->irq_uninstall = valleyview_irq_uninstall; 3111 dev->driver->irq_uninstall = valleyview_irq_uninstall;
2858 dev->driver->enable_vblank = valleyview_enable_vblank; 3112 dev->driver->enable_vblank = valleyview_enable_vblank;
2859 dev->driver->disable_vblank = valleyview_disable_vblank; 3113 dev->driver->disable_vblank = valleyview_disable_vblank;
2860 dev_priv->display.hpd_irq_setup = valleyview_hpd_irq_setup; 3114 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
2861 } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) { 3115 } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
2862 /* Share pre & uninstall handlers with ILK/SNB */ 3116 /* Share pre & uninstall handlers with ILK/SNB */
2863 dev->driver->irq_handler = ivybridge_irq_handler; 3117 dev->driver->irq_handler = ivybridge_irq_handler;
@@ -2866,6 +3120,7 @@ void intel_irq_init(struct drm_device *dev)
2866 dev->driver->irq_uninstall = ironlake_irq_uninstall; 3120 dev->driver->irq_uninstall = ironlake_irq_uninstall;
2867 dev->driver->enable_vblank = ivybridge_enable_vblank; 3121 dev->driver->enable_vblank = ivybridge_enable_vblank;
2868 dev->driver->disable_vblank = ivybridge_disable_vblank; 3122 dev->driver->disable_vblank = ivybridge_disable_vblank;
3123 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
2869 } else if (HAS_PCH_SPLIT(dev)) { 3124 } else if (HAS_PCH_SPLIT(dev)) {
2870 dev->driver->irq_handler = ironlake_irq_handler; 3125 dev->driver->irq_handler = ironlake_irq_handler;
2871 dev->driver->irq_preinstall = ironlake_irq_preinstall; 3126 dev->driver->irq_preinstall = ironlake_irq_preinstall;
@@ -2873,6 +3128,7 @@ void intel_irq_init(struct drm_device *dev)
2873 dev->driver->irq_uninstall = ironlake_irq_uninstall; 3128 dev->driver->irq_uninstall = ironlake_irq_uninstall;
2874 dev->driver->enable_vblank = ironlake_enable_vblank; 3129 dev->driver->enable_vblank = ironlake_enable_vblank;
2875 dev->driver->disable_vblank = ironlake_disable_vblank; 3130 dev->driver->disable_vblank = ironlake_disable_vblank;
3131 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
2876 } else { 3132 } else {
2877 if (INTEL_INFO(dev)->gen == 2) { 3133 if (INTEL_INFO(dev)->gen == 2) {
2878 dev->driver->irq_preinstall = i8xx_irq_preinstall; 3134 dev->driver->irq_preinstall = i8xx_irq_preinstall;
@@ -2890,7 +3146,7 @@ void intel_irq_init(struct drm_device *dev)
2890 dev->driver->irq_postinstall = i965_irq_postinstall; 3146 dev->driver->irq_postinstall = i965_irq_postinstall;
2891 dev->driver->irq_uninstall = i965_irq_uninstall; 3147 dev->driver->irq_uninstall = i965_irq_uninstall;
2892 dev->driver->irq_handler = i965_irq_handler; 3148 dev->driver->irq_handler = i965_irq_handler;
2893 dev_priv->display.hpd_irq_setup = i965_hpd_irq_setup; 3149 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
2894 } 3150 }
2895 dev->driver->enable_vblank = i915_enable_vblank; 3151 dev->driver->enable_vblank = i915_enable_vblank;
2896 dev->driver->disable_vblank = i915_disable_vblank; 3152 dev->driver->disable_vblank = i915_disable_vblank;
@@ -2900,7 +3156,20 @@ void intel_irq_init(struct drm_device *dev)
2900void intel_hpd_init(struct drm_device *dev) 3156void intel_hpd_init(struct drm_device *dev)
2901{ 3157{
2902 struct drm_i915_private *dev_priv = dev->dev_private; 3158 struct drm_i915_private *dev_priv = dev->dev_private;
3159 struct drm_mode_config *mode_config = &dev->mode_config;
3160 struct drm_connector *connector;
3161 int i;
2903 3162
3163 for (i = 1; i < HPD_NUM_PINS; i++) {
3164 dev_priv->hpd_stats[i].hpd_cnt = 0;
3165 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
3166 }
3167 list_for_each_entry(connector, &mode_config->connector_list, head) {
3168 struct intel_connector *intel_connector = to_intel_connector(connector);
3169 connector->polled = intel_connector->polled;
3170 if (!connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
3171 connector->polled = DRM_CONNECTOR_POLL_HPD;
3172 }
2904 if (dev_priv->display.hpd_irq_setup) 3173 if (dev_priv->display.hpd_irq_setup)
2905 dev_priv->display.hpd_irq_setup(dev); 3174 dev_priv->display.hpd_irq_setup(dev);
2906} 3175}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 848992f67d56..83f9c26e1adb 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -91,6 +91,7 @@
91#define GRDOM_FULL (0<<2) 91#define GRDOM_FULL (0<<2)
92#define GRDOM_RENDER (1<<2) 92#define GRDOM_RENDER (1<<2)
93#define GRDOM_MEDIA (3<<2) 93#define GRDOM_MEDIA (3<<2)
94#define GRDOM_MASK (3<<2)
94#define GRDOM_RESET_ENABLE (1<<0) 95#define GRDOM_RESET_ENABLE (1<<0)
95 96
96#define GEN6_MBCUNIT_SNPCR 0x900c /* for LLC config */ 97#define GEN6_MBCUNIT_SNPCR 0x900c /* for LLC config */
@@ -121,10 +122,17 @@
121 122
122#define GAM_ECOCHK 0x4090 123#define GAM_ECOCHK 0x4090
123#define ECOCHK_SNB_BIT (1<<10) 124#define ECOCHK_SNB_BIT (1<<10)
125#define HSW_ECOCHK_ARB_PRIO_SOL (1<<6)
124#define ECOCHK_PPGTT_CACHE64B (0x3<<3) 126#define ECOCHK_PPGTT_CACHE64B (0x3<<3)
125#define ECOCHK_PPGTT_CACHE4B (0x0<<3) 127#define ECOCHK_PPGTT_CACHE4B (0x0<<3)
128#define ECOCHK_PPGTT_GFDT_IVB (0x1<<4)
129#define ECOCHK_PPGTT_LLC_IVB (0x1<<3)
130#define ECOCHK_PPGTT_UC_HSW (0x1<<3)
131#define ECOCHK_PPGTT_WT_HSW (0x2<<3)
132#define ECOCHK_PPGTT_WB_HSW (0x3<<3)
126 133
127#define GAC_ECO_BITS 0x14090 134#define GAC_ECO_BITS 0x14090
135#define ECOBITS_SNB_BIT (1<<13)
128#define ECOBITS_PPGTT_CACHE64B (3<<8) 136#define ECOBITS_PPGTT_CACHE64B (3<<8)
129#define ECOBITS_PPGTT_CACHE4B (0<<8) 137#define ECOBITS_PPGTT_CACHE4B (0<<8)
130 138
@@ -422,6 +430,7 @@
422 430
423#define FENCE_REG_SANDYBRIDGE_0 0x100000 431#define FENCE_REG_SANDYBRIDGE_0 0x100000
424#define SANDYBRIDGE_FENCE_PITCH_SHIFT 32 432#define SANDYBRIDGE_FENCE_PITCH_SHIFT 32
433#define GEN7_FENCE_MAX_PITCH_VAL 0x0800
425 434
426/* control register for cpu gtt access */ 435/* control register for cpu gtt access */
427#define TILECTL 0x101000 436#define TILECTL 0x101000
@@ -522,6 +531,9 @@
522#define GEN7_ERR_INT 0x44040 531#define GEN7_ERR_INT 0x44040
523#define ERR_INT_MMIO_UNCLAIMED (1<<13) 532#define ERR_INT_MMIO_UNCLAIMED (1<<13)
524 533
534#define FPGA_DBG 0x42300
535#define FPGA_DBG_RM_NOCLAIM (1<<31)
536
525#define DERRMR 0x44050 537#define DERRMR 0x44050
526 538
527/* GM45+ chicken bits -- debug workaround bits that may be required 539/* GM45+ chicken bits -- debug workaround bits that may be required
@@ -591,6 +603,7 @@
591#define I915_USER_INTERRUPT (1<<1) 603#define I915_USER_INTERRUPT (1<<1)
592#define I915_ASLE_INTERRUPT (1<<0) 604#define I915_ASLE_INTERRUPT (1<<0)
593#define I915_BSD_USER_INTERRUPT (1<<25) 605#define I915_BSD_USER_INTERRUPT (1<<25)
606#define DISPLAY_PLANE_FLIP_PENDING(plane) (1<<(11-(plane))) /* A and B only */
594#define EIR 0x020b0 607#define EIR 0x020b0
595#define EMR 0x020b4 608#define EMR 0x020b4
596#define ESR 0x020b8 609#define ESR 0x020b8
@@ -1197,6 +1210,9 @@
1197 1210
1198#define MCHBAR_MIRROR_BASE_SNB 0x140000 1211#define MCHBAR_MIRROR_BASE_SNB 0x140000
1199 1212
1213/* Memory controller frequency in MCHBAR for Haswell (possible SNB+) */
1214#define DCLK 0x5e04
1215
1200/** 915-945 and GM965 MCH register controlling DRAM channel access */ 1216/** 915-945 and GM965 MCH register controlling DRAM channel access */
1201#define DCC 0x10200 1217#define DCC 0x10200
1202#define DCC_ADDRESSING_MODE_SINGLE_CHANNEL (0 << 0) 1218#define DCC_ADDRESSING_MODE_SINGLE_CHANNEL (0 << 0)
@@ -1637,6 +1653,12 @@
1637#define SDVOC_HOTPLUG_INT_EN (1 << 25) 1653#define SDVOC_HOTPLUG_INT_EN (1 << 25)
1638#define TV_HOTPLUG_INT_EN (1 << 18) 1654#define TV_HOTPLUG_INT_EN (1 << 18)
1639#define CRT_HOTPLUG_INT_EN (1 << 9) 1655#define CRT_HOTPLUG_INT_EN (1 << 9)
1656#define HOTPLUG_INT_EN_MASK (PORTB_HOTPLUG_INT_EN | \
1657 PORTC_HOTPLUG_INT_EN | \
1658 PORTD_HOTPLUG_INT_EN | \
1659 SDVOC_HOTPLUG_INT_EN | \
1660 SDVOB_HOTPLUG_INT_EN | \
1661 CRT_HOTPLUG_INT_EN)
1640#define CRT_HOTPLUG_FORCE_DETECT (1 << 3) 1662#define CRT_HOTPLUG_FORCE_DETECT (1 << 3)
1641#define CRT_HOTPLUG_ACTIVATION_PERIOD_32 (0 << 8) 1663#define CRT_HOTPLUG_ACTIVATION_PERIOD_32 (0 << 8)
1642/* must use period 64 on GM45 according to docs */ 1664/* must use period 64 on GM45 according to docs */
@@ -1675,43 +1697,84 @@
1675#define SDVOB_HOTPLUG_INT_STATUS_I965 (3 << 2) 1697#define SDVOB_HOTPLUG_INT_STATUS_I965 (3 << 2)
1676#define SDVOC_HOTPLUG_INT_STATUS_I915 (1 << 7) 1698#define SDVOC_HOTPLUG_INT_STATUS_I915 (1 << 7)
1677#define SDVOB_HOTPLUG_INT_STATUS_I915 (1 << 6) 1699#define SDVOB_HOTPLUG_INT_STATUS_I915 (1 << 6)
1678 1700#define HOTPLUG_INT_STATUS_G4X (CRT_HOTPLUG_INT_STATUS | \
1679/* SDVO port control */ 1701 SDVOB_HOTPLUG_INT_STATUS_G4X | \
1680#define SDVOB 0x61140 1702 SDVOC_HOTPLUG_INT_STATUS_G4X | \
1681#define SDVOC 0x61160 1703 PORTB_HOTPLUG_INT_STATUS | \
1682#define SDVO_ENABLE (1 << 31) 1704 PORTC_HOTPLUG_INT_STATUS | \
1683#define SDVO_PIPE_B_SELECT (1 << 30) 1705 PORTD_HOTPLUG_INT_STATUS)
1684#define SDVO_STALL_SELECT (1 << 29) 1706
1685#define SDVO_INTERRUPT_ENABLE (1 << 26) 1707#define HOTPLUG_INT_STATUS_I965 (CRT_HOTPLUG_INT_STATUS | \
1708 SDVOB_HOTPLUG_INT_STATUS_I965 | \
1709 SDVOC_HOTPLUG_INT_STATUS_I965 | \
1710 PORTB_HOTPLUG_INT_STATUS | \
1711 PORTC_HOTPLUG_INT_STATUS | \
1712 PORTD_HOTPLUG_INT_STATUS)
1713
1714#define HOTPLUG_INT_STATUS_I915 (CRT_HOTPLUG_INT_STATUS | \
1715 SDVOB_HOTPLUG_INT_STATUS_I915 | \
1716 SDVOC_HOTPLUG_INT_STATUS_I915 | \
1717 PORTB_HOTPLUG_INT_STATUS | \
1718 PORTC_HOTPLUG_INT_STATUS | \
1719 PORTD_HOTPLUG_INT_STATUS)
1720
1721/* SDVO and HDMI port control.
1722 * The same register may be used for SDVO or HDMI */
1723#define GEN3_SDVOB 0x61140
1724#define GEN3_SDVOC 0x61160
1725#define GEN4_HDMIB GEN3_SDVOB
1726#define GEN4_HDMIC GEN3_SDVOC
1727#define PCH_SDVOB 0xe1140
1728#define PCH_HDMIB PCH_SDVOB
1729#define PCH_HDMIC 0xe1150
1730#define PCH_HDMID 0xe1160
1731
1732/* Gen 3 SDVO bits: */
1733#define SDVO_ENABLE (1 << 31)
1734#define SDVO_PIPE_SEL(pipe) ((pipe) << 30)
1735#define SDVO_PIPE_SEL_MASK (1 << 30)
1736#define SDVO_PIPE_B_SELECT (1 << 30)
1737#define SDVO_STALL_SELECT (1 << 29)
1738#define SDVO_INTERRUPT_ENABLE (1 << 26)
1686/** 1739/**
1687 * 915G/GM SDVO pixel multiplier. 1740 * 915G/GM SDVO pixel multiplier.
1688 *
1689 * Programmed value is multiplier - 1, up to 5x. 1741 * Programmed value is multiplier - 1, up to 5x.
1690 *
1691 * \sa DPLL_MD_UDI_MULTIPLIER_MASK 1742 * \sa DPLL_MD_UDI_MULTIPLIER_MASK
1692 */ 1743 */
1693#define SDVO_PORT_MULTIPLY_MASK (7 << 23) 1744#define SDVO_PORT_MULTIPLY_MASK (7 << 23)
1694#define SDVO_PORT_MULTIPLY_SHIFT 23 1745#define SDVO_PORT_MULTIPLY_SHIFT 23
1695#define SDVO_PHASE_SELECT_MASK (15 << 19) 1746#define SDVO_PHASE_SELECT_MASK (15 << 19)
1696#define SDVO_PHASE_SELECT_DEFAULT (6 << 19) 1747#define SDVO_PHASE_SELECT_DEFAULT (6 << 19)
1697#define SDVO_CLOCK_OUTPUT_INVERT (1 << 18) 1748#define SDVO_CLOCK_OUTPUT_INVERT (1 << 18)
1698#define SDVOC_GANG_MODE (1 << 16) 1749#define SDVOC_GANG_MODE (1 << 16) /* Port C only */
1699#define SDVO_ENCODING_SDVO (0x0 << 10) 1750#define SDVO_BORDER_ENABLE (1 << 7) /* SDVO only */
1700#define SDVO_ENCODING_HDMI (0x2 << 10) 1751#define SDVOB_PCIE_CONCURRENCY (1 << 3) /* Port B only */
1701/** Requird for HDMI operation */ 1752#define SDVO_DETECTED (1 << 2)
1702#define SDVO_NULL_PACKETS_DURING_VSYNC (1 << 9)
1703#define SDVO_COLOR_RANGE_16_235 (1 << 8)
1704#define SDVO_BORDER_ENABLE (1 << 7)
1705#define SDVO_AUDIO_ENABLE (1 << 6)
1706/** New with 965, default is to be set */
1707#define SDVO_VSYNC_ACTIVE_HIGH (1 << 4)
1708/** New with 965, default is to be set */
1709#define SDVO_HSYNC_ACTIVE_HIGH (1 << 3)
1710#define SDVOB_PCIE_CONCURRENCY (1 << 3)
1711#define SDVO_DETECTED (1 << 2)
1712/* Bits to be preserved when writing */ 1753/* Bits to be preserved when writing */
1713#define SDVOB_PRESERVE_MASK ((1 << 17) | (1 << 16) | (1 << 14) | (1 << 26)) 1754#define SDVOB_PRESERVE_MASK ((1 << 17) | (1 << 16) | (1 << 14) | \
1714#define SDVOC_PRESERVE_MASK ((1 << 17) | (1 << 26)) 1755 SDVO_INTERRUPT_ENABLE)
1756#define SDVOC_PRESERVE_MASK ((1 << 17) | SDVO_INTERRUPT_ENABLE)
1757
1758/* Gen 4 SDVO/HDMI bits: */
1759#define SDVO_COLOR_FORMAT_8bpc (0 << 26)
1760#define SDVO_ENCODING_SDVO (0 << 10)
1761#define SDVO_ENCODING_HDMI (2 << 10)
1762#define HDMI_MODE_SELECT_HDMI (1 << 9) /* HDMI only */
1763#define HDMI_MODE_SELECT_DVI (0 << 9) /* HDMI only */
1764#define HDMI_COLOR_RANGE_16_235 (1 << 8) /* HDMI only */
1765#define SDVO_AUDIO_ENABLE (1 << 6)
1766/* VSYNC/HSYNC bits new with 965, default is to be set */
1767#define SDVO_VSYNC_ACTIVE_HIGH (1 << 4)
1768#define SDVO_HSYNC_ACTIVE_HIGH (1 << 3)
1769
1770/* Gen 5 (IBX) SDVO/HDMI bits: */
1771#define HDMI_COLOR_FORMAT_12bpc (3 << 26) /* HDMI only */
1772#define SDVOB_HOTPLUG_ENABLE (1 << 23) /* SDVO only */
1773
1774/* Gen 6 (CPT) SDVO/HDMI bits: */
1775#define SDVO_PIPE_SEL_CPT(pipe) ((pipe) << 29)
1776#define SDVO_PIPE_SEL_MASK_CPT (3 << 29)
1777
1715 1778
1716/* DVO port control */ 1779/* DVO port control */
1717#define DVOA 0x61120 1780#define DVOA 0x61120
@@ -1898,7 +1961,7 @@
1898#define PFIT_AUTO_RATIOS (dev_priv->info->display_mmio_offset + 0x61238) 1961#define PFIT_AUTO_RATIOS (dev_priv->info->display_mmio_offset + 0x61238)
1899 1962
1900/* Backlight control */ 1963/* Backlight control */
1901#define BLC_PWM_CTL2 0x61250 /* 965+ only */ 1964#define BLC_PWM_CTL2 (dev_priv->info->display_mmio_offset + 0x61250) /* 965+ only */
1902#define BLM_PWM_ENABLE (1 << 31) 1965#define BLM_PWM_ENABLE (1 << 31)
1903#define BLM_COMBINATION_MODE (1 << 30) /* gen4 only */ 1966#define BLM_COMBINATION_MODE (1 << 30) /* gen4 only */
1904#define BLM_PIPE_SELECT (1 << 29) 1967#define BLM_PIPE_SELECT (1 << 29)
@@ -1917,7 +1980,7 @@
1917#define BLM_PHASE_IN_COUNT_MASK (0xff << 8) 1980#define BLM_PHASE_IN_COUNT_MASK (0xff << 8)
1918#define BLM_PHASE_IN_INCR_SHIFT (0) 1981#define BLM_PHASE_IN_INCR_SHIFT (0)
1919#define BLM_PHASE_IN_INCR_MASK (0xff << 0) 1982#define BLM_PHASE_IN_INCR_MASK (0xff << 0)
1920#define BLC_PWM_CTL 0x61254 1983#define BLC_PWM_CTL (dev_priv->info->display_mmio_offset + 0x61254)
1921/* 1984/*
1922 * This is the most significant 15 bits of the number of backlight cycles in a 1985 * This is the most significant 15 bits of the number of backlight cycles in a
1923 * complete cycle of the modulated backlight control. 1986 * complete cycle of the modulated backlight control.
@@ -1939,7 +2002,7 @@
1939#define BACKLIGHT_DUTY_CYCLE_MASK_PNV (0xfffe) 2002#define BACKLIGHT_DUTY_CYCLE_MASK_PNV (0xfffe)
1940#define BLM_POLARITY_PNV (1 << 0) /* pnv only */ 2003#define BLM_POLARITY_PNV (1 << 0) /* pnv only */
1941 2004
1942#define BLC_HIST_CTL 0x61260 2005#define BLC_HIST_CTL (dev_priv->info->display_mmio_offset + 0x61260)
1943 2006
1944/* New registers for PCH-split platforms. Safe where new bits show up, the 2007/* New registers for PCH-split platforms. Safe where new bits show up, the
1945 * register layout machtes with gen4 BLC_PWM_CTL[12]. */ 2008 * register layout machtes with gen4 BLC_PWM_CTL[12]. */
@@ -2589,14 +2652,14 @@
2589#define _PIPEB_GMCH_DATA_M 0x71050 2652#define _PIPEB_GMCH_DATA_M 0x71050
2590 2653
2591/* Transfer unit size for display port - 1, default is 0x3f (for TU size 64) */ 2654/* Transfer unit size for display port - 1, default is 0x3f (for TU size 64) */
2592#define PIPE_GMCH_DATA_M_TU_SIZE_MASK (0x3f << 25) 2655#define TU_SIZE(x) (((x)-1) << 25) /* default size 64 */
2593#define PIPE_GMCH_DATA_M_TU_SIZE_SHIFT 25 2656#define TU_SIZE_MASK (0x3f << 25)
2594 2657
2595#define PIPE_GMCH_DATA_M_MASK (0xffffff) 2658#define DATA_LINK_M_N_MASK (0xffffff)
2659#define DATA_LINK_N_MAX (0x800000)
2596 2660
2597#define _PIPEA_GMCH_DATA_N 0x70054 2661#define _PIPEA_GMCH_DATA_N 0x70054
2598#define _PIPEB_GMCH_DATA_N 0x71054 2662#define _PIPEB_GMCH_DATA_N 0x71054
2599#define PIPE_GMCH_DATA_N_MASK (0xffffff)
2600 2663
2601/* 2664/*
2602 * Computing Link M and N values for the Display Port link 2665 * Computing Link M and N values for the Display Port link
@@ -2611,11 +2674,9 @@
2611 2674
2612#define _PIPEA_DP_LINK_M 0x70060 2675#define _PIPEA_DP_LINK_M 0x70060
2613#define _PIPEB_DP_LINK_M 0x71060 2676#define _PIPEB_DP_LINK_M 0x71060
2614#define PIPEA_DP_LINK_M_MASK (0xffffff)
2615 2677
2616#define _PIPEA_DP_LINK_N 0x70064 2678#define _PIPEA_DP_LINK_N 0x70064
2617#define _PIPEB_DP_LINK_N 0x71064 2679#define _PIPEB_DP_LINK_N 0x71064
2618#define PIPEA_DP_LINK_N_MASK (0xffffff)
2619 2680
2620#define PIPE_GMCH_DATA_M(pipe) _PIPE(pipe, _PIPEA_GMCH_DATA_M, _PIPEB_GMCH_DATA_M) 2681#define PIPE_GMCH_DATA_M(pipe) _PIPE(pipe, _PIPEA_GMCH_DATA_M, _PIPEB_GMCH_DATA_M)
2621#define PIPE_GMCH_DATA_N(pipe) _PIPE(pipe, _PIPEA_GMCH_DATA_N, _PIPEB_GMCH_DATA_N) 2682#define PIPE_GMCH_DATA_N(pipe) _PIPE(pipe, _PIPEA_GMCH_DATA_N, _PIPEB_GMCH_DATA_N)
@@ -2776,6 +2837,8 @@
2776#define DSPFW_HPLL_CURSOR_SHIFT 16 2837#define DSPFW_HPLL_CURSOR_SHIFT 16
2777#define DSPFW_HPLL_CURSOR_MASK (0x3f<<16) 2838#define DSPFW_HPLL_CURSOR_MASK (0x3f<<16)
2778#define DSPFW_HPLL_SR_MASK (0x1ff) 2839#define DSPFW_HPLL_SR_MASK (0x1ff)
2840#define DSPFW4 (dev_priv->info->display_mmio_offset + 0x70070)
2841#define DSPFW7 (dev_priv->info->display_mmio_offset + 0x7007c)
2779 2842
2780/* drain latency register values*/ 2843/* drain latency register values*/
2781#define DRAIN_LATENCY_PRECISION_32 32 2844#define DRAIN_LATENCY_PRECISION_32 32
@@ -3233,6 +3296,63 @@
3233#define SPRGAMC(pipe) _PIPE(pipe, _SPRA_GAMC, _SPRB_GAMC) 3296#define SPRGAMC(pipe) _PIPE(pipe, _SPRA_GAMC, _SPRB_GAMC)
3234#define SPRSURFLIVE(pipe) _PIPE(pipe, _SPRA_SURFLIVE, _SPRB_SURFLIVE) 3297#define SPRSURFLIVE(pipe) _PIPE(pipe, _SPRA_SURFLIVE, _SPRB_SURFLIVE)
3235 3298
3299#define _SPACNTR 0x72180
3300#define SP_ENABLE (1<<31)
3301#define SP_GEAMMA_ENABLE (1<<30)
3302#define SP_PIXFORMAT_MASK (0xf<<26)
3303#define SP_FORMAT_YUV422 (0<<26)
3304#define SP_FORMAT_BGR565 (5<<26)
3305#define SP_FORMAT_BGRX8888 (6<<26)
3306#define SP_FORMAT_BGRA8888 (7<<26)
3307#define SP_FORMAT_RGBX1010102 (8<<26)
3308#define SP_FORMAT_RGBA1010102 (9<<26)
3309#define SP_FORMAT_RGBX8888 (0xe<<26)
3310#define SP_FORMAT_RGBA8888 (0xf<<26)
3311#define SP_SOURCE_KEY (1<<22)
3312#define SP_YUV_BYTE_ORDER_MASK (3<<16)
3313#define SP_YUV_ORDER_YUYV (0<<16)
3314#define SP_YUV_ORDER_UYVY (1<<16)
3315#define SP_YUV_ORDER_YVYU (2<<16)
3316#define SP_YUV_ORDER_VYUY (3<<16)
3317#define SP_TILED (1<<10)
3318#define _SPALINOFF 0x72184
3319#define _SPASTRIDE 0x72188
3320#define _SPAPOS 0x7218c
3321#define _SPASIZE 0x72190
3322#define _SPAKEYMINVAL 0x72194
3323#define _SPAKEYMSK 0x72198
3324#define _SPASURF 0x7219c
3325#define _SPAKEYMAXVAL 0x721a0
3326#define _SPATILEOFF 0x721a4
3327#define _SPACONSTALPHA 0x721a8
3328#define _SPAGAMC 0x721f4
3329
3330#define _SPBCNTR 0x72280
3331#define _SPBLINOFF 0x72284
3332#define _SPBSTRIDE 0x72288
3333#define _SPBPOS 0x7228c
3334#define _SPBSIZE 0x72290
3335#define _SPBKEYMINVAL 0x72294
3336#define _SPBKEYMSK 0x72298
3337#define _SPBSURF 0x7229c
3338#define _SPBKEYMAXVAL 0x722a0
3339#define _SPBTILEOFF 0x722a4
3340#define _SPBCONSTALPHA 0x722a8
3341#define _SPBGAMC 0x722f4
3342
3343#define SPCNTR(pipe, plane) _PIPE(pipe * 2 + plane, _SPACNTR, _SPBCNTR)
3344#define SPLINOFF(pipe, plane) _PIPE(pipe * 2 + plane, _SPALINOFF, _SPBLINOFF)
3345#define SPSTRIDE(pipe, plane) _PIPE(pipe * 2 + plane, _SPASTRIDE, _SPBSTRIDE)
3346#define SPPOS(pipe, plane) _PIPE(pipe * 2 + plane, _SPAPOS, _SPBPOS)
3347#define SPSIZE(pipe, plane) _PIPE(pipe * 2 + plane, _SPASIZE, _SPBSIZE)
3348#define SPKEYMINVAL(pipe, plane) _PIPE(pipe * 2 + plane, _SPAKEYMINVAL, _SPBKEYMINVAL)
3349#define SPKEYMSK(pipe, plane) _PIPE(pipe * 2 + plane, _SPAKEYMSK, _SPBKEYMSK)
3350#define SPSURF(pipe, plane) _PIPE(pipe * 2 + plane, _SPASURF, _SPBSURF)
3351#define SPKEYMAXVAL(pipe, plane) _PIPE(pipe * 2 + plane, _SPAKEYMAXVAL, _SPBKEYMAXVAL)
3352#define SPTILEOFF(pipe, plane) _PIPE(pipe * 2 + plane, _SPATILEOFF, _SPBTILEOFF)
3353#define SPCONSTALPHA(pipe, plane) _PIPE(pipe * 2 + plane, _SPACONSTALPHA, _SPBCONSTALPHA)
3354#define SPGAMC(pipe, plane) _PIPE(pipe * 2 + plane, _SPAGAMC, _SPBGAMC)
3355
3236/* VBIOS regs */ 3356/* VBIOS regs */
3237#define VGACNTRL 0x71400 3357#define VGACNTRL 0x71400
3238# define VGA_DISP_DISABLE (1 << 31) 3358# define VGA_DISP_DISABLE (1 << 31)
@@ -3282,8 +3402,6 @@
3282 3402
3283 3403
3284#define _PIPEA_DATA_M1 (dev_priv->info->display_mmio_offset + 0x60030) 3404#define _PIPEA_DATA_M1 (dev_priv->info->display_mmio_offset + 0x60030)
3285#define TU_SIZE(x) (((x)-1) << 25) /* default size 64 */
3286#define TU_SIZE_MASK 0x7e000000
3287#define PIPE_DATA_M1_OFFSET 0 3405#define PIPE_DATA_M1_OFFSET 0
3288#define _PIPEA_DATA_N1 (dev_priv->info->display_mmio_offset + 0x60034) 3406#define _PIPEA_DATA_N1 (dev_priv->info->display_mmio_offset + 0x60034)
3289#define PIPE_DATA_N1_OFFSET 0 3407#define PIPE_DATA_N1_OFFSET 0
@@ -3456,6 +3574,9 @@
3456#define DISP_ARB_CTL 0x45000 3574#define DISP_ARB_CTL 0x45000
3457#define DISP_TILE_SURFACE_SWIZZLING (1<<13) 3575#define DISP_TILE_SURFACE_SWIZZLING (1<<13)
3458#define DISP_FBC_WM_DIS (1<<15) 3576#define DISP_FBC_WM_DIS (1<<15)
3577#define GEN7_MSG_CTL 0x45010
3578#define WAIT_FOR_PCH_RESET_ACK (1<<1)
3579#define WAIT_FOR_PCH_FLR_ACK (1<<0)
3459 3580
3460/* GEN7 chicken */ 3581/* GEN7 chicken */
3461#define GEN7_COMMON_SLICE_CHICKEN1 0x7010 3582#define GEN7_COMMON_SLICE_CHICKEN1 0x7010
@@ -3508,7 +3629,11 @@
3508#define SDE_PORTC_HOTPLUG (1 << 9) 3629#define SDE_PORTC_HOTPLUG (1 << 9)
3509#define SDE_PORTB_HOTPLUG (1 << 8) 3630#define SDE_PORTB_HOTPLUG (1 << 8)
3510#define SDE_SDVOB_HOTPLUG (1 << 6) 3631#define SDE_SDVOB_HOTPLUG (1 << 6)
3511#define SDE_HOTPLUG_MASK (0xf << 8) 3632#define SDE_HOTPLUG_MASK (SDE_CRT_HOTPLUG | \
3633 SDE_SDVOB_HOTPLUG | \
3634 SDE_PORTB_HOTPLUG | \
3635 SDE_PORTC_HOTPLUG | \
3636 SDE_PORTD_HOTPLUG)
3512#define SDE_TRANSB_CRC_DONE (1 << 5) 3637#define SDE_TRANSB_CRC_DONE (1 << 5)
3513#define SDE_TRANSB_CRC_ERR (1 << 4) 3638#define SDE_TRANSB_CRC_ERR (1 << 4)
3514#define SDE_TRANSB_FIFO_UNDER (1 << 3) 3639#define SDE_TRANSB_FIFO_UNDER (1 << 3)
@@ -3531,7 +3656,9 @@
3531#define SDE_PORTC_HOTPLUG_CPT (1 << 22) 3656#define SDE_PORTC_HOTPLUG_CPT (1 << 22)
3532#define SDE_PORTB_HOTPLUG_CPT (1 << 21) 3657#define SDE_PORTB_HOTPLUG_CPT (1 << 21)
3533#define SDE_CRT_HOTPLUG_CPT (1 << 19) 3658#define SDE_CRT_HOTPLUG_CPT (1 << 19)
3659#define SDE_SDVOB_HOTPLUG_CPT (1 << 18)
3534#define SDE_HOTPLUG_MASK_CPT (SDE_CRT_HOTPLUG_CPT | \ 3660#define SDE_HOTPLUG_MASK_CPT (SDE_CRT_HOTPLUG_CPT | \
3661 SDE_SDVOB_HOTPLUG_CPT | \
3535 SDE_PORTD_HOTPLUG_CPT | \ 3662 SDE_PORTD_HOTPLUG_CPT | \
3536 SDE_PORTC_HOTPLUG_CPT | \ 3663 SDE_PORTC_HOTPLUG_CPT | \
3537 SDE_PORTB_HOTPLUG_CPT) 3664 SDE_PORTB_HOTPLUG_CPT)
@@ -3754,14 +3881,16 @@
3754#define HSW_VIDEO_DIP_VSC_ECC_B 0x61344 3881#define HSW_VIDEO_DIP_VSC_ECC_B 0x61344
3755#define HSW_VIDEO_DIP_GCP_B 0x61210 3882#define HSW_VIDEO_DIP_GCP_B 0x61210
3756 3883
3757#define HSW_TVIDEO_DIP_CTL(pipe) \ 3884#define HSW_TVIDEO_DIP_CTL(trans) \
3758 _PIPE(pipe, HSW_VIDEO_DIP_CTL_A, HSW_VIDEO_DIP_CTL_B) 3885 _TRANSCODER(trans, HSW_VIDEO_DIP_CTL_A, HSW_VIDEO_DIP_CTL_B)
3759#define HSW_TVIDEO_DIP_AVI_DATA(pipe) \ 3886#define HSW_TVIDEO_DIP_AVI_DATA(trans) \
3760 _PIPE(pipe, HSW_VIDEO_DIP_AVI_DATA_A, HSW_VIDEO_DIP_AVI_DATA_B) 3887 _TRANSCODER(trans, HSW_VIDEO_DIP_AVI_DATA_A, HSW_VIDEO_DIP_AVI_DATA_B)
3761#define HSW_TVIDEO_DIP_SPD_DATA(pipe) \ 3888#define HSW_TVIDEO_DIP_SPD_DATA(trans) \
3762 _PIPE(pipe, HSW_VIDEO_DIP_SPD_DATA_A, HSW_VIDEO_DIP_SPD_DATA_B) 3889 _TRANSCODER(trans, HSW_VIDEO_DIP_SPD_DATA_A, HSW_VIDEO_DIP_SPD_DATA_B)
3763#define HSW_TVIDEO_DIP_GCP(pipe) \ 3890#define HSW_TVIDEO_DIP_GCP(trans) \
3764 _PIPE(pipe, HSW_VIDEO_DIP_GCP_A, HSW_VIDEO_DIP_GCP_B) 3891 _TRANSCODER(trans, HSW_VIDEO_DIP_GCP_A, HSW_VIDEO_DIP_GCP_B)
3892#define HSW_TVIDEO_DIP_VSC_DATA(trans) \
3893 _TRANSCODER(trans, HSW_VIDEO_DIP_VSC_DATA_A, HSW_VIDEO_DIP_VSC_DATA_B)
3765 3894
3766#define _TRANS_HTOTAL_B 0xe1000 3895#define _TRANS_HTOTAL_B 0xe1000
3767#define _TRANS_HBLANK_B 0xe1004 3896#define _TRANS_HBLANK_B 0xe1004
@@ -3826,8 +3955,11 @@
3826#define _TRANSA_CHICKEN2 0xf0064 3955#define _TRANSA_CHICKEN2 0xf0064
3827#define _TRANSB_CHICKEN2 0xf1064 3956#define _TRANSB_CHICKEN2 0xf1064
3828#define TRANS_CHICKEN2(pipe) _PIPE(pipe, _TRANSA_CHICKEN2, _TRANSB_CHICKEN2) 3957#define TRANS_CHICKEN2(pipe) _PIPE(pipe, _TRANSA_CHICKEN2, _TRANSB_CHICKEN2)
3829#define TRANS_CHICKEN2_TIMING_OVERRIDE (1<<31) 3958#define TRANS_CHICKEN2_TIMING_OVERRIDE (1<<31)
3830 3959#define TRANS_CHICKEN2_FDI_POLARITY_REVERSED (1<<29)
3960#define TRANS_CHICKEN2_FRAME_START_DELAY_MASK (3<<27)
3961#define TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER (1<<26)
3962#define TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH (1<<25)
3831 3963
3832#define SOUTH_CHICKEN1 0xc2000 3964#define SOUTH_CHICKEN1 0xc2000
3833#define FDIA_PHASE_SYNC_SHIFT_OVR 19 3965#define FDIA_PHASE_SYNC_SHIFT_OVR 19
@@ -3976,34 +4108,6 @@
3976#define FDI_PLL_CTL_1 0xfe000 4108#define FDI_PLL_CTL_1 0xfe000
3977#define FDI_PLL_CTL_2 0xfe004 4109#define FDI_PLL_CTL_2 0xfe004
3978 4110
3979/* or SDVOB */
3980#define HDMIB 0xe1140
3981#define PORT_ENABLE (1 << 31)
3982#define TRANSCODER(pipe) ((pipe) << 30)
3983#define TRANSCODER_CPT(pipe) ((pipe) << 29)
3984#define TRANSCODER_MASK (1 << 30)
3985#define TRANSCODER_MASK_CPT (3 << 29)
3986#define COLOR_FORMAT_8bpc (0)
3987#define COLOR_FORMAT_12bpc (3 << 26)
3988#define SDVOB_HOTPLUG_ENABLE (1 << 23)
3989#define SDVO_ENCODING (0)
3990#define TMDS_ENCODING (2 << 10)
3991#define NULL_PACKET_VSYNC_ENABLE (1 << 9)
3992/* CPT */
3993#define HDMI_MODE_SELECT (1 << 9)
3994#define DVI_MODE_SELECT (0)
3995#define SDVOB_BORDER_ENABLE (1 << 7)
3996#define AUDIO_ENABLE (1 << 6)
3997#define VSYNC_ACTIVE_HIGH (1 << 4)
3998#define HSYNC_ACTIVE_HIGH (1 << 3)
3999#define PORT_DETECTED (1 << 2)
4000
4001/* PCH SDVOB multiplex with HDMIB */
4002#define PCH_SDVOB HDMIB
4003
4004#define HDMIC 0xe1150
4005#define HDMID 0xe1160
4006
4007#define PCH_LVDS 0xe1180 4111#define PCH_LVDS 0xe1180
4008#define LVDS_DETECTED (1 << 1) 4112#define LVDS_DETECTED (1 << 1)
4009 4113
@@ -4020,6 +4124,15 @@
4020#define PIPEB_PP_OFF_DELAYS (VLV_DISPLAY_BASE + 0x6130c) 4124#define PIPEB_PP_OFF_DELAYS (VLV_DISPLAY_BASE + 0x6130c)
4021#define PIPEB_PP_DIVISOR (VLV_DISPLAY_BASE + 0x61310) 4125#define PIPEB_PP_DIVISOR (VLV_DISPLAY_BASE + 0x61310)
4022 4126
4127#define VLV_PIPE_PP_STATUS(pipe) _PIPE(pipe, PIPEA_PP_STATUS, PIPEB_PP_STATUS)
4128#define VLV_PIPE_PP_CONTROL(pipe) _PIPE(pipe, PIPEA_PP_CONTROL, PIPEB_PP_CONTROL)
4129#define VLV_PIPE_PP_ON_DELAYS(pipe) \
4130 _PIPE(pipe, PIPEA_PP_ON_DELAYS, PIPEB_PP_ON_DELAYS)
4131#define VLV_PIPE_PP_OFF_DELAYS(pipe) \
4132 _PIPE(pipe, PIPEA_PP_OFF_DELAYS, PIPEB_PP_OFF_DELAYS)
4133#define VLV_PIPE_PP_DIVISOR(pipe) \
4134 _PIPE(pipe, PIPEA_PP_DIVISOR, PIPEB_PP_DIVISOR)
4135
4023#define PCH_PP_STATUS 0xc7200 4136#define PCH_PP_STATUS 0xc7200
4024#define PCH_PP_CONTROL 0xc7204 4137#define PCH_PP_CONTROL 0xc7204
4025#define PANEL_UNLOCK_REGS (0xabcd << 16) 4138#define PANEL_UNLOCK_REGS (0xabcd << 16)
@@ -4149,8 +4262,12 @@
4149#define FORCEWAKE 0xA18C 4262#define FORCEWAKE 0xA18C
4150#define FORCEWAKE_VLV 0x1300b0 4263#define FORCEWAKE_VLV 0x1300b0
4151#define FORCEWAKE_ACK_VLV 0x1300b4 4264#define FORCEWAKE_ACK_VLV 0x1300b4
4265#define FORCEWAKE_MEDIA_VLV 0x1300b8
4266#define FORCEWAKE_ACK_MEDIA_VLV 0x1300bc
4152#define FORCEWAKE_ACK_HSW 0x130044 4267#define FORCEWAKE_ACK_HSW 0x130044
4153#define FORCEWAKE_ACK 0x130090 4268#define FORCEWAKE_ACK 0x130090
4269#define VLV_GTLC_WAKE_CTRL 0x130090
4270#define VLV_GTLC_PW_STATUS 0x130094
4154#define FORCEWAKE_MT 0xa188 /* multi-threaded */ 4271#define FORCEWAKE_MT 0xa188 /* multi-threaded */
4155#define FORCEWAKE_KERNEL 0x1 4272#define FORCEWAKE_KERNEL 0x1
4156#define FORCEWAKE_USER 0x2 4273#define FORCEWAKE_USER 0x2
@@ -4184,6 +4301,7 @@
4184#define GEN6_RPNSWREQ 0xA008 4301#define GEN6_RPNSWREQ 0xA008
4185#define GEN6_TURBO_DISABLE (1<<31) 4302#define GEN6_TURBO_DISABLE (1<<31)
4186#define GEN6_FREQUENCY(x) ((x)<<25) 4303#define GEN6_FREQUENCY(x) ((x)<<25)
4304#define HSW_FREQUENCY(x) ((x)<<24)
4187#define GEN6_OFFSET(x) ((x)<<19) 4305#define GEN6_OFFSET(x) ((x)<<19)
4188#define GEN6_AGGRESSIVE_TURBO (0<<15) 4306#define GEN6_AGGRESSIVE_TURBO (0<<15)
4189#define GEN6_RC_VIDEO_FREQ 0xA00C 4307#define GEN6_RC_VIDEO_FREQ 0xA00C
@@ -4274,6 +4392,21 @@
4274#define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) + 245) 4392#define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) + 245)
4275#define GEN6_PCODE_DATA 0x138128 4393#define GEN6_PCODE_DATA 0x138128
4276#define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8 4394#define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8
4395#define GEN6_PCODE_FREQ_RING_RATIO_SHIFT 16
4396
4397#define VLV_IOSF_DOORBELL_REQ 0x182100
4398#define IOSF_DEVFN_SHIFT 24
4399#define IOSF_OPCODE_SHIFT 16
4400#define IOSF_PORT_SHIFT 8
4401#define IOSF_BYTE_ENABLES_SHIFT 4
4402#define IOSF_BAR_SHIFT 1
4403#define IOSF_SB_BUSY (1<<0)
4404#define IOSF_PORT_PUNIT 0x4
4405#define VLV_IOSF_DATA 0x182104
4406#define VLV_IOSF_ADDR 0x182108
4407
4408#define PUNIT_OPCODE_REG_READ 6
4409#define PUNIT_OPCODE_REG_WRITE 7
4277 4410
4278#define GEN6_GT_CORE_STATUS 0x138060 4411#define GEN6_GT_CORE_STATUS 0x138060
4279#define GEN6_CORE_CPD_STATE_MASK (7<<4) 4412#define GEN6_CORE_CPD_STATE_MASK (7<<4)
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 2135f21ea458..41f0fdecfbdc 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -209,7 +209,8 @@ static void i915_save_display(struct drm_device *dev)
209 dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2); 209 dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2);
210 dev_priv->regfile.saveBLC_CPU_PWM_CTL = I915_READ(BLC_PWM_CPU_CTL); 210 dev_priv->regfile.saveBLC_CPU_PWM_CTL = I915_READ(BLC_PWM_CPU_CTL);
211 dev_priv->regfile.saveBLC_CPU_PWM_CTL2 = I915_READ(BLC_PWM_CPU_CTL2); 211 dev_priv->regfile.saveBLC_CPU_PWM_CTL2 = I915_READ(BLC_PWM_CPU_CTL2);
212 dev_priv->regfile.saveLVDS = I915_READ(PCH_LVDS); 212 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
213 dev_priv->regfile.saveLVDS = I915_READ(PCH_LVDS);
213 } else { 214 } else {
214 dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL); 215 dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL);
215 dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS); 216 dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
@@ -255,6 +256,7 @@ static void i915_save_display(struct drm_device *dev)
255static void i915_restore_display(struct drm_device *dev) 256static void i915_restore_display(struct drm_device *dev)
256{ 257{
257 struct drm_i915_private *dev_priv = dev->dev_private; 258 struct drm_i915_private *dev_priv = dev->dev_private;
259 u32 mask = 0xffffffff;
258 260
259 /* Display arbitration */ 261 /* Display arbitration */
260 if (INTEL_INFO(dev)->gen <= 4) 262 if (INTEL_INFO(dev)->gen <= 4)
@@ -267,10 +269,13 @@ static void i915_restore_display(struct drm_device *dev)
267 if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) 269 if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
268 I915_WRITE(BLC_PWM_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2); 270 I915_WRITE(BLC_PWM_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2);
269 271
270 if (HAS_PCH_SPLIT(dev)) { 272 if (drm_core_check_feature(dev, DRIVER_MODESET))
271 I915_WRITE(PCH_LVDS, dev_priv->regfile.saveLVDS); 273 mask = ~LVDS_PORT_EN;
272 } else if (IS_MOBILE(dev) && !IS_I830(dev)) 274
273 I915_WRITE(LVDS, dev_priv->regfile.saveLVDS); 275 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
276 I915_WRITE(PCH_LVDS, dev_priv->regfile.saveLVDS & mask);
277 else if (INTEL_INFO(dev)->gen <= 4 && IS_MOBILE(dev) && !IS_I830(dev))
278 I915_WRITE(LVDS, dev_priv->regfile.saveLVDS & mask);
274 279
275 if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev)) 280 if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev))
276 I915_WRITE(PFIT_CONTROL, dev_priv->regfile.savePFIT_CONTROL); 281 I915_WRITE(PFIT_CONTROL, dev_priv->regfile.savePFIT_CONTROL);
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 9462081b1e60..d5e1890678f9 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -49,7 +49,7 @@ static ssize_t
49show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf) 49show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf)
50{ 50{
51 struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev); 51 struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
52 return snprintf(buf, PAGE_SIZE, "%x", intel_enable_rc6(dminor->dev)); 52 return snprintf(buf, PAGE_SIZE, "%x\n", intel_enable_rc6(dminor->dev));
53} 53}
54 54
55static ssize_t 55static ssize_t
@@ -57,7 +57,7 @@ show_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
57{ 57{
58 struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev); 58 struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
59 u32 rc6_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6); 59 u32 rc6_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6);
60 return snprintf(buf, PAGE_SIZE, "%u", rc6_residency); 60 return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency);
61} 61}
62 62
63static ssize_t 63static ssize_t
@@ -65,7 +65,7 @@ show_rc6p_ms(struct device *kdev, struct device_attribute *attr, char *buf)
65{ 65{
66 struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev); 66 struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
67 u32 rc6p_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6p); 67 u32 rc6p_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6p);
68 return snprintf(buf, PAGE_SIZE, "%u", rc6p_residency); 68 return snprintf(buf, PAGE_SIZE, "%u\n", rc6p_residency);
69} 69}
70 70
71static ssize_t 71static ssize_t
@@ -73,7 +73,7 @@ show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf)
73{ 73{
74 struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev); 74 struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
75 u32 rc6pp_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6pp); 75 u32 rc6pp_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6pp);
76 return snprintf(buf, PAGE_SIZE, "%u", rc6pp_residency); 76 return snprintf(buf, PAGE_SIZE, "%u\n", rc6pp_residency);
77} 77}
78 78
79static DEVICE_ATTR(rc6_enable, S_IRUGO, show_rc6_mask, NULL); 79static DEVICE_ATTR(rc6_enable, S_IRUGO, show_rc6_mask, NULL);
@@ -215,7 +215,7 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
215 ret = dev_priv->rps.cur_delay * GT_FREQUENCY_MULTIPLIER; 215 ret = dev_priv->rps.cur_delay * GT_FREQUENCY_MULTIPLIER;
216 mutex_unlock(&dev_priv->rps.hw_lock); 216 mutex_unlock(&dev_priv->rps.hw_lock);
217 217
218 return snprintf(buf, PAGE_SIZE, "%d", ret); 218 return snprintf(buf, PAGE_SIZE, "%d\n", ret);
219} 219}
220 220
221static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) 221static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
@@ -229,7 +229,7 @@ static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute
229 ret = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER; 229 ret = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER;
230 mutex_unlock(&dev_priv->rps.hw_lock); 230 mutex_unlock(&dev_priv->rps.hw_lock);
231 231
232 return snprintf(buf, PAGE_SIZE, "%d", ret); 232 return snprintf(buf, PAGE_SIZE, "%d\n", ret);
233} 233}
234 234
235static ssize_t gt_max_freq_mhz_store(struct device *kdev, 235static ssize_t gt_max_freq_mhz_store(struct device *kdev,
@@ -239,7 +239,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
239 struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev); 239 struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
240 struct drm_device *dev = minor->dev; 240 struct drm_device *dev = minor->dev;
241 struct drm_i915_private *dev_priv = dev->dev_private; 241 struct drm_i915_private *dev_priv = dev->dev_private;
242 u32 val, rp_state_cap, hw_max, hw_min; 242 u32 val, rp_state_cap, hw_max, hw_min, non_oc_max;
243 ssize_t ret; 243 ssize_t ret;
244 244
245 ret = kstrtou32(buf, 0, &val); 245 ret = kstrtou32(buf, 0, &val);
@@ -251,7 +251,8 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
251 mutex_lock(&dev_priv->rps.hw_lock); 251 mutex_lock(&dev_priv->rps.hw_lock);
252 252
253 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 253 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
254 hw_max = (rp_state_cap & 0xff); 254 hw_max = dev_priv->rps.hw_max;
255 non_oc_max = (rp_state_cap & 0xff);
255 hw_min = ((rp_state_cap & 0xff0000) >> 16); 256 hw_min = ((rp_state_cap & 0xff0000) >> 16);
256 257
257 if (val < hw_min || val > hw_max || val < dev_priv->rps.min_delay) { 258 if (val < hw_min || val > hw_max || val < dev_priv->rps.min_delay) {
@@ -259,6 +260,10 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
259 return -EINVAL; 260 return -EINVAL;
260 } 261 }
261 262
263 if (val > non_oc_max)
264 DRM_DEBUG("User requested overclocking to %d\n",
265 val * GT_FREQUENCY_MULTIPLIER);
266
262 if (dev_priv->rps.cur_delay > val) 267 if (dev_priv->rps.cur_delay > val)
263 gen6_set_rps(dev_priv->dev, val); 268 gen6_set_rps(dev_priv->dev, val);
264 269
@@ -280,7 +285,7 @@ static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute
280 ret = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER; 285 ret = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER;
281 mutex_unlock(&dev_priv->rps.hw_lock); 286 mutex_unlock(&dev_priv->rps.hw_lock);
282 287
283 return snprintf(buf, PAGE_SIZE, "%d", ret); 288 return snprintf(buf, PAGE_SIZE, "%d\n", ret);
284} 289}
285 290
286static ssize_t gt_min_freq_mhz_store(struct device *kdev, 291static ssize_t gt_min_freq_mhz_store(struct device *kdev,
@@ -302,7 +307,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
302 mutex_lock(&dev_priv->rps.hw_lock); 307 mutex_lock(&dev_priv->rps.hw_lock);
303 308
304 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 309 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
305 hw_max = (rp_state_cap & 0xff); 310 hw_max = dev_priv->rps.hw_max;
306 hw_min = ((rp_state_cap & 0xff0000) >> 16); 311 hw_min = ((rp_state_cap & 0xff0000) >> 16);
307 312
308 if (val < hw_min || val > hw_max || val > dev_priv->rps.max_delay) { 313 if (val < hw_min || val > hw_max || val > dev_priv->rps.max_delay) {
@@ -355,7 +360,7 @@ static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr
355 } else { 360 } else {
356 BUG(); 361 BUG();
357 } 362 }
358 return snprintf(buf, PAGE_SIZE, "%d", val); 363 return snprintf(buf, PAGE_SIZE, "%d\n", val);
359} 364}
360 365
361static const struct attribute *gen6_attrs[] = { 366static const struct attribute *gen6_attrs[] = {
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 55ffba1f5818..95070b2124c6 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -351,12 +351,14 @@ parse_general_features(struct drm_i915_private *dev_priv,
351 dev_priv->lvds_ssc_freq = 351 dev_priv->lvds_ssc_freq =
352 intel_bios_ssc_frequency(dev, general->ssc_freq); 352 intel_bios_ssc_frequency(dev, general->ssc_freq);
353 dev_priv->display_clock_mode = general->display_clock_mode; 353 dev_priv->display_clock_mode = general->display_clock_mode;
354 DRM_DEBUG_KMS("BDB_GENERAL_FEATURES int_tv_support %d int_crt_support %d lvds_use_ssc %d lvds_ssc_freq %d display_clock_mode %d\n", 354 dev_priv->fdi_rx_polarity_inverted = general->fdi_rx_polarity_inverted;
355 DRM_DEBUG_KMS("BDB_GENERAL_FEATURES int_tv_support %d int_crt_support %d lvds_use_ssc %d lvds_ssc_freq %d display_clock_mode %d fdi_rx_polarity_inverted %d\n",
355 dev_priv->int_tv_support, 356 dev_priv->int_tv_support,
356 dev_priv->int_crt_support, 357 dev_priv->int_crt_support,
357 dev_priv->lvds_use_ssc, 358 dev_priv->lvds_use_ssc,
358 dev_priv->lvds_ssc_freq, 359 dev_priv->lvds_ssc_freq,
359 dev_priv->display_clock_mode); 360 dev_priv->display_clock_mode,
361 dev_priv->fdi_rx_polarity_inverted);
360 } 362 }
361} 363}
362 364
@@ -692,6 +694,9 @@ intel_parse_bios(struct drm_device *dev)
692 struct bdb_header *bdb = NULL; 694 struct bdb_header *bdb = NULL;
693 u8 __iomem *bios = NULL; 695 u8 __iomem *bios = NULL;
694 696
697 if (HAS_PCH_NOP(dev))
698 return -ENODEV;
699
695 init_vbt_defaults(dev_priv); 700 init_vbt_defaults(dev_priv);
696 701
697 /* XXX Should this validation be moved to intel_opregion.c? */ 702 /* XXX Should this validation be moved to intel_opregion.c? */
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index 36e57f934373..e088d6f0956a 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -127,7 +127,9 @@ struct bdb_general_features {
127 /* bits 3 */ 127 /* bits 3 */
128 u8 disable_smooth_vision:1; 128 u8 disable_smooth_vision:1;
129 u8 single_dvi:1; 129 u8 single_dvi:1;
130 u8 rsvd9:6; /* finish byte */ 130 u8 rsvd9:1;
131 u8 fdi_rx_polarity_inverted:1;
132 u8 rsvd10:4; /* finish byte */
131 133
132 /* bits 4 */ 134 /* bits 4 */
133 u8 legacy_monitor_detect; 135 u8 legacy_monitor_detect;
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 1ce45a0a2d3e..58b4a53715cd 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -199,10 +199,14 @@ static int intel_crt_mode_valid(struct drm_connector *connector,
199 return MODE_OK; 199 return MODE_OK;
200} 200}
201 201
202static bool intel_crt_mode_fixup(struct drm_encoder *encoder, 202static bool intel_crt_compute_config(struct intel_encoder *encoder,
203 const struct drm_display_mode *mode, 203 struct intel_crtc_config *pipe_config)
204 struct drm_display_mode *adjusted_mode)
205{ 204{
205 struct drm_device *dev = encoder->base.dev;
206
207 if (HAS_PCH_SPLIT(dev))
208 pipe_config->has_pch_encoder = true;
209
206 return true; 210 return true;
207} 211}
208 212
@@ -676,7 +680,6 @@ static void intel_crt_reset(struct drm_connector *connector)
676 */ 680 */
677 681
678static const struct drm_encoder_helper_funcs crt_encoder_funcs = { 682static const struct drm_encoder_helper_funcs crt_encoder_funcs = {
679 .mode_fixup = intel_crt_mode_fixup,
680 .mode_set = intel_crt_mode_set, 683 .mode_set = intel_crt_mode_set,
681}; 684};
682 685
@@ -768,8 +771,11 @@ void intel_crt_init(struct drm_device *dev)
768 else 771 else
769 crt->adpa_reg = ADPA; 772 crt->adpa_reg = ADPA;
770 773
774 crt->base.compute_config = intel_crt_compute_config;
771 crt->base.disable = intel_disable_crt; 775 crt->base.disable = intel_disable_crt;
772 crt->base.enable = intel_enable_crt; 776 crt->base.enable = intel_enable_crt;
777 if (I915_HAS_HOTPLUG(dev))
778 crt->base.hpd_pin = HPD_CRT;
773 if (HAS_DDI(dev)) 779 if (HAS_DDI(dev))
774 crt->base.get_hw_state = intel_ddi_get_hw_state; 780 crt->base.get_hw_state = intel_ddi_get_hw_state;
775 else 781 else
@@ -781,18 +787,14 @@ void intel_crt_init(struct drm_device *dev)
781 787
782 drm_sysfs_connector_add(connector); 788 drm_sysfs_connector_add(connector);
783 789
784 if (I915_HAS_HOTPLUG(dev)) 790 if (!I915_HAS_HOTPLUG(dev))
785 connector->polled = DRM_CONNECTOR_POLL_HPD; 791 intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT;
786 else
787 connector->polled = DRM_CONNECTOR_POLL_CONNECT;
788 792
789 /* 793 /*
790 * Configure the automatic hotplug detection stuff 794 * Configure the automatic hotplug detection stuff
791 */ 795 */
792 crt->force_hotplug_required = 0; 796 crt->force_hotplug_required = 0;
793 797
794 dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS;
795
796 /* 798 /*
797 * TODO: find a proper way to discover whether we need to set the the 799 * TODO: find a proper way to discover whether we need to set the the
798 * polarity and link reversal bits or not, instead of relying on the 800 * polarity and link reversal bits or not, instead of relying on the
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 8d0bac3c35d7..26a0a570f92e 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -898,6 +898,9 @@ bool intel_ddi_pll_mode_set(struct drm_crtc *crtc, int clock)
898 plls->spll_refcount++; 898 plls->spll_refcount++;
899 reg = SPLL_CTL; 899 reg = SPLL_CTL;
900 intel_crtc->ddi_pll_sel = PORT_CLK_SEL_SPLL; 900 intel_crtc->ddi_pll_sel = PORT_CLK_SEL_SPLL;
901 } else {
902 DRM_ERROR("SPLL already in use\n");
903 return false;
901 } 904 }
902 905
903 WARN(I915_READ(reg) & SPLL_PLL_ENABLE, 906 WARN(I915_READ(reg) & SPLL_PLL_ENABLE,
@@ -921,14 +924,14 @@ void intel_ddi_set_pipe_settings(struct drm_crtc *crtc)
921 struct drm_i915_private *dev_priv = crtc->dev->dev_private; 924 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
922 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 925 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
923 struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc); 926 struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
924 enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; 927 enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
925 int type = intel_encoder->type; 928 int type = intel_encoder->type;
926 uint32_t temp; 929 uint32_t temp;
927 930
928 if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) { 931 if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
929 932
930 temp = TRANS_MSA_SYNC_CLK; 933 temp = TRANS_MSA_SYNC_CLK;
931 switch (intel_crtc->bpp) { 934 switch (intel_crtc->config.pipe_bpp) {
932 case 18: 935 case 18:
933 temp |= TRANS_MSA_6_BPC; 936 temp |= TRANS_MSA_6_BPC;
934 break; 937 break;
@@ -942,22 +945,20 @@ void intel_ddi_set_pipe_settings(struct drm_crtc *crtc)
942 temp |= TRANS_MSA_12_BPC; 945 temp |= TRANS_MSA_12_BPC;
943 break; 946 break;
944 default: 947 default:
945 temp |= TRANS_MSA_8_BPC; 948 BUG();
946 WARN(1, "%d bpp unsupported by DDI function\n",
947 intel_crtc->bpp);
948 } 949 }
949 I915_WRITE(TRANS_MSA_MISC(cpu_transcoder), temp); 950 I915_WRITE(TRANS_MSA_MISC(cpu_transcoder), temp);
950 } 951 }
951} 952}
952 953
953void intel_ddi_enable_pipe_func(struct drm_crtc *crtc) 954void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
954{ 955{
955 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 956 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
956 struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc); 957 struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
957 struct drm_encoder *encoder = &intel_encoder->base; 958 struct drm_encoder *encoder = &intel_encoder->base;
958 struct drm_i915_private *dev_priv = crtc->dev->dev_private; 959 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
959 enum pipe pipe = intel_crtc->pipe; 960 enum pipe pipe = intel_crtc->pipe;
960 enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; 961 enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
961 enum port port = intel_ddi_get_encoder_port(intel_encoder); 962 enum port port = intel_ddi_get_encoder_port(intel_encoder);
962 int type = intel_encoder->type; 963 int type = intel_encoder->type;
963 uint32_t temp; 964 uint32_t temp;
@@ -966,7 +967,7 @@ void intel_ddi_enable_pipe_func(struct drm_crtc *crtc)
966 temp = TRANS_DDI_FUNC_ENABLE; 967 temp = TRANS_DDI_FUNC_ENABLE;
967 temp |= TRANS_DDI_SELECT_PORT(port); 968 temp |= TRANS_DDI_SELECT_PORT(port);
968 969
969 switch (intel_crtc->bpp) { 970 switch (intel_crtc->config.pipe_bpp) {
970 case 18: 971 case 18:
971 temp |= TRANS_DDI_BPC_6; 972 temp |= TRANS_DDI_BPC_6;
972 break; 973 break;
@@ -980,8 +981,7 @@ void intel_ddi_enable_pipe_func(struct drm_crtc *crtc)
980 temp |= TRANS_DDI_BPC_12; 981 temp |= TRANS_DDI_BPC_12;
981 break; 982 break;
982 default: 983 default:
983 WARN(1, "%d bpp unsupported by transcoder DDI function\n", 984 BUG();
984 intel_crtc->bpp);
985 } 985 }
986 986
987 if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC) 987 if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
@@ -1150,14 +1150,14 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
1150 1150
1151 DRM_DEBUG_KMS("No pipe for ddi port %i found\n", port); 1151 DRM_DEBUG_KMS("No pipe for ddi port %i found\n", port);
1152 1152
1153 return true; 1153 return false;
1154} 1154}
1155 1155
1156static uint32_t intel_ddi_get_crtc_pll(struct drm_i915_private *dev_priv, 1156static uint32_t intel_ddi_get_crtc_pll(struct drm_i915_private *dev_priv,
1157 enum pipe pipe) 1157 enum pipe pipe)
1158{ 1158{
1159 uint32_t temp, ret; 1159 uint32_t temp, ret;
1160 enum port port; 1160 enum port port = I915_MAX_PORTS;
1161 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 1161 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1162 pipe); 1162 pipe);
1163 int i; 1163 int i;
@@ -1173,10 +1173,16 @@ static uint32_t intel_ddi_get_crtc_pll(struct drm_i915_private *dev_priv,
1173 port = i; 1173 port = i;
1174 } 1174 }
1175 1175
1176 ret = I915_READ(PORT_CLK_SEL(port)); 1176 if (port == I915_MAX_PORTS) {
1177 1177 WARN(1, "Pipe %c enabled on an unknown port\n",
1178 DRM_DEBUG_KMS("Pipe %c connected to port %c using clock 0x%08x\n", 1178 pipe_name(pipe));
1179 pipe_name(pipe), port_name(port), ret); 1179 ret = PORT_CLK_SEL_NONE;
1180 } else {
1181 ret = I915_READ(PORT_CLK_SEL(port));
1182 DRM_DEBUG_KMS("Pipe %c connected to port %c using clock "
1183 "0x%08x\n", pipe_name(pipe), port_name(port),
1184 ret);
1185 }
1180 1186
1181 return ret; 1187 return ret;
1182} 1188}
@@ -1217,7 +1223,7 @@ void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc)
1217 struct drm_i915_private *dev_priv = crtc->dev->dev_private; 1223 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
1218 struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc); 1224 struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
1219 enum port port = intel_ddi_get_encoder_port(intel_encoder); 1225 enum port port = intel_ddi_get_encoder_port(intel_encoder);
1220 enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; 1226 enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
1221 1227
1222 if (cpu_transcoder != TRANSCODER_EDP) 1228 if (cpu_transcoder != TRANSCODER_EDP)
1223 I915_WRITE(TRANS_CLK_SEL(cpu_transcoder), 1229 I915_WRITE(TRANS_CLK_SEL(cpu_transcoder),
@@ -1227,7 +1233,7 @@ void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc)
1227void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc) 1233void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc)
1228{ 1234{
1229 struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private; 1235 struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
1230 enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; 1236 enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
1231 1237
1232 if (cpu_transcoder != TRANSCODER_EDP) 1238 if (cpu_transcoder != TRANSCODER_EDP)
1233 I915_WRITE(TRANS_CLK_SEL(cpu_transcoder), 1239 I915_WRITE(TRANS_CLK_SEL(cpu_transcoder),
@@ -1341,15 +1347,15 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder)
1341 struct drm_i915_private *dev_priv = dev->dev_private; 1347 struct drm_i915_private *dev_priv = dev->dev_private;
1342 uint32_t tmp; 1348 uint32_t tmp;
1343 1349
1350 tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
1351 tmp &= ~((AUDIO_OUTPUT_ENABLE_A | AUDIO_ELD_VALID_A) << (pipe * 4));
1352 I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
1353
1344 if (type == INTEL_OUTPUT_EDP) { 1354 if (type == INTEL_OUTPUT_EDP) {
1345 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1355 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1346 1356
1347 ironlake_edp_backlight_off(intel_dp); 1357 ironlake_edp_backlight_off(intel_dp);
1348 } 1358 }
1349
1350 tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
1351 tmp &= ~((AUDIO_OUTPUT_ENABLE_A | AUDIO_ELD_VALID_A) << (pipe * 4));
1352 I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
1353} 1359}
1354 1360
1355int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv) 1361int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv)
@@ -1467,19 +1473,17 @@ static void intel_ddi_destroy(struct drm_encoder *encoder)
1467 intel_dp_encoder_destroy(encoder); 1473 intel_dp_encoder_destroy(encoder);
1468} 1474}
1469 1475
1470static bool intel_ddi_mode_fixup(struct drm_encoder *encoder, 1476static bool intel_ddi_compute_config(struct intel_encoder *encoder,
1471 const struct drm_display_mode *mode, 1477 struct intel_crtc_config *pipe_config)
1472 struct drm_display_mode *adjusted_mode)
1473{ 1478{
1474 struct intel_encoder *intel_encoder = to_intel_encoder(encoder); 1479 int type = encoder->type;
1475 int type = intel_encoder->type;
1476 1480
1477 WARN(type == INTEL_OUTPUT_UNKNOWN, "mode_fixup() on unknown output!\n"); 1481 WARN(type == INTEL_OUTPUT_UNKNOWN, "compute_config() on unknown output!\n");
1478 1482
1479 if (type == INTEL_OUTPUT_HDMI) 1483 if (type == INTEL_OUTPUT_HDMI)
1480 return intel_hdmi_mode_fixup(encoder, mode, adjusted_mode); 1484 return intel_hdmi_compute_config(encoder, pipe_config);
1481 else 1485 else
1482 return intel_dp_mode_fixup(encoder, mode, adjusted_mode); 1486 return intel_dp_compute_config(encoder, pipe_config);
1483} 1487}
1484 1488
1485static const struct drm_encoder_funcs intel_ddi_funcs = { 1489static const struct drm_encoder_funcs intel_ddi_funcs = {
@@ -1487,7 +1491,6 @@ static const struct drm_encoder_funcs intel_ddi_funcs = {
1487}; 1491};
1488 1492
1489static const struct drm_encoder_helper_funcs intel_ddi_helper_funcs = { 1493static const struct drm_encoder_helper_funcs intel_ddi_helper_funcs = {
1490 .mode_fixup = intel_ddi_mode_fixup,
1491 .mode_set = intel_ddi_mode_set, 1494 .mode_set = intel_ddi_mode_set,
1492}; 1495};
1493 1496
@@ -1527,6 +1530,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
1527 DRM_MODE_ENCODER_TMDS); 1530 DRM_MODE_ENCODER_TMDS);
1528 drm_encoder_helper_add(encoder, &intel_ddi_helper_funcs); 1531 drm_encoder_helper_add(encoder, &intel_ddi_helper_funcs);
1529 1532
1533 intel_encoder->compute_config = intel_ddi_compute_config;
1530 intel_encoder->enable = intel_enable_ddi; 1534 intel_encoder->enable = intel_enable_ddi;
1531 intel_encoder->pre_enable = intel_ddi_pre_enable; 1535 intel_encoder->pre_enable = intel_ddi_pre_enable;
1532 intel_encoder->disable = intel_disable_ddi; 1536 intel_encoder->disable = intel_disable_ddi;
@@ -1537,9 +1541,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
1537 intel_dig_port->port_reversal = I915_READ(DDI_BUF_CTL(port)) & 1541 intel_dig_port->port_reversal = I915_READ(DDI_BUF_CTL(port)) &
1538 DDI_BUF_PORT_REVERSAL; 1542 DDI_BUF_PORT_REVERSAL;
1539 if (hdmi_connector) 1543 if (hdmi_connector)
1540 intel_dig_port->hdmi.sdvox_reg = DDI_BUF_CTL(port); 1544 intel_dig_port->hdmi.hdmi_reg = DDI_BUF_CTL(port);
1541 else
1542 intel_dig_port->hdmi.sdvox_reg = 0;
1543 intel_dig_port->dp.output_reg = DDI_BUF_CTL(port); 1545 intel_dig_port->dp.output_reg = DDI_BUF_CTL(port);
1544 1546
1545 intel_encoder->type = INTEL_OUTPUT_UNKNOWN; 1547 intel_encoder->type = INTEL_OUTPUT_UNKNOWN;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index b20d50192fcc..efe829919755 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -71,8 +71,24 @@ typedef struct intel_limit intel_limit_t;
71struct intel_limit { 71struct intel_limit {
72 intel_range_t dot, vco, n, m, m1, m2, p, p1; 72 intel_range_t dot, vco, n, m, m1, m2, p, p1;
73 intel_p2_t p2; 73 intel_p2_t p2;
74 bool (* find_pll)(const intel_limit_t *, struct drm_crtc *, 74 /**
75 int, int, intel_clock_t *, intel_clock_t *); 75 * find_pll() - Find the best values for the PLL
76 * @limit: limits for the PLL
77 * @crtc: current CRTC
78 * @target: target frequency in kHz
79 * @refclk: reference clock frequency in kHz
80 * @match_clock: if provided, @best_clock P divider must
81 * match the P divider from @match_clock
82 * used for LVDS downclocking
83 * @best_clock: best PLL values found
84 *
85 * Returns true on success, false on failure.
86 */
87 bool (*find_pll)(const intel_limit_t *limit,
88 struct drm_crtc *crtc,
89 int target, int refclk,
90 intel_clock_t *match_clock,
91 intel_clock_t *best_clock);
76}; 92};
77 93
78/* FDI */ 94/* FDI */
@@ -471,7 +487,6 @@ static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
471 487
472 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 488 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
473 if (intel_is_dual_link_lvds(dev)) { 489 if (intel_is_dual_link_lvds(dev)) {
474 /* LVDS dual channel */
475 if (refclk == 100000) 490 if (refclk == 100000)
476 limit = &intel_limits_ironlake_dual_lvds_100m; 491 limit = &intel_limits_ironlake_dual_lvds_100m;
477 else 492 else
@@ -498,10 +513,8 @@ static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
498 513
499 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 514 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
500 if (intel_is_dual_link_lvds(dev)) 515 if (intel_is_dual_link_lvds(dev))
501 /* LVDS with dual channel */
502 limit = &intel_limits_g4x_dual_channel_lvds; 516 limit = &intel_limits_g4x_dual_channel_lvds;
503 else 517 else
504 /* LVDS with dual channel */
505 limit = &intel_limits_g4x_single_channel_lvds; 518 limit = &intel_limits_g4x_single_channel_lvds;
506 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) || 519 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) ||
507 intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) { 520 intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
@@ -879,7 +892,7 @@ enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
879 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 892 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
880 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 893 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
881 894
882 return intel_crtc->cpu_transcoder; 895 return intel_crtc->config.cpu_transcoder;
883} 896}
884 897
885static void ironlake_wait_for_vblank(struct drm_device *dev, int pipe) 898static void ironlake_wait_for_vblank(struct drm_device *dev, int pipe)
@@ -1214,8 +1227,8 @@ void assert_pipe(struct drm_i915_private *dev_priv,
1214 if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) 1227 if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
1215 state = true; 1228 state = true;
1216 1229
1217 if (IS_HASWELL(dev_priv->dev) && cpu_transcoder != TRANSCODER_EDP && 1230 if (!intel_using_power_well(dev_priv->dev) &&
1218 !(I915_READ(HSW_PWR_WELL_DRIVER) & HSW_PWR_WELL_ENABLE)) { 1231 cpu_transcoder != TRANSCODER_EDP) {
1219 cur_state = false; 1232 cur_state = false;
1220 } else { 1233 } else {
1221 reg = PIPECONF(cpu_transcoder); 1234 reg = PIPECONF(cpu_transcoder);
@@ -1254,7 +1267,7 @@ static void assert_planes_disabled(struct drm_i915_private *dev_priv,
1254 int cur_pipe; 1267 int cur_pipe;
1255 1268
1256 /* Planes are fixed to pipes on ILK+ */ 1269 /* Planes are fixed to pipes on ILK+ */
1257 if (HAS_PCH_SPLIT(dev_priv->dev)) { 1270 if (HAS_PCH_SPLIT(dev_priv->dev) || IS_VALLEYVIEW(dev_priv->dev)) {
1258 reg = DSPCNTR(pipe); 1271 reg = DSPCNTR(pipe);
1259 val = I915_READ(reg); 1272 val = I915_READ(reg);
1260 WARN((val & DISPLAY_PLANE_ENABLE), 1273 WARN((val & DISPLAY_PLANE_ENABLE),
@@ -1275,6 +1288,25 @@ static void assert_planes_disabled(struct drm_i915_private *dev_priv,
1275 } 1288 }
1276} 1289}
1277 1290
1291static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
1292 enum pipe pipe)
1293{
1294 int reg, i;
1295 u32 val;
1296
1297 if (!IS_VALLEYVIEW(dev_priv->dev))
1298 return;
1299
1300 /* Need to check both planes against the pipe */
1301 for (i = 0; i < dev_priv->num_plane; i++) {
1302 reg = SPCNTR(pipe, i);
1303 val = I915_READ(reg);
1304 WARN((val & SP_ENABLE),
1305 "sprite %d assertion failure, should be off on pipe %c but is still active\n",
1306 pipe * 2 + i, pipe_name(pipe));
1307 }
1308}
1309
1278static void assert_pch_refclk_enabled(struct drm_i915_private *dev_priv) 1310static void assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
1279{ 1311{
1280 u32 val; 1312 u32 val;
@@ -1327,14 +1359,14 @@ static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
1327static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv, 1359static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
1328 enum pipe pipe, u32 val) 1360 enum pipe pipe, u32 val)
1329{ 1361{
1330 if ((val & PORT_ENABLE) == 0) 1362 if ((val & SDVO_ENABLE) == 0)
1331 return false; 1363 return false;
1332 1364
1333 if (HAS_PCH_CPT(dev_priv->dev)) { 1365 if (HAS_PCH_CPT(dev_priv->dev)) {
1334 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe)) 1366 if ((val & SDVO_PIPE_SEL_MASK_CPT) != SDVO_PIPE_SEL_CPT(pipe))
1335 return false; 1367 return false;
1336 } else { 1368 } else {
1337 if ((val & TRANSCODER_MASK) != TRANSCODER(pipe)) 1369 if ((val & SDVO_PIPE_SEL_MASK) != SDVO_PIPE_SEL(pipe))
1338 return false; 1370 return false;
1339 } 1371 }
1340 return true; 1372 return true;
@@ -1392,7 +1424,7 @@ static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1392 "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n", 1424 "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
1393 reg, pipe_name(pipe)); 1425 reg, pipe_name(pipe));
1394 1426
1395 WARN(HAS_PCH_IBX(dev_priv->dev) && (val & PORT_ENABLE) == 0 1427 WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_ENABLE) == 0
1396 && (val & SDVO_PIPE_B_SELECT), 1428 && (val & SDVO_PIPE_B_SELECT),
1397 "IBX PCH hdmi port still using transcoder B\n"); 1429 "IBX PCH hdmi port still using transcoder B\n");
1398} 1430}
@@ -1419,9 +1451,9 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1419 "PCH LVDS enabled on transcoder %c, should be disabled\n", 1451 "PCH LVDS enabled on transcoder %c, should be disabled\n",
1420 pipe_name(pipe)); 1452 pipe_name(pipe));
1421 1453
1422 assert_pch_hdmi_disabled(dev_priv, pipe, HDMIB); 1454 assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIB);
1423 assert_pch_hdmi_disabled(dev_priv, pipe, HDMIC); 1455 assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIC);
1424 assert_pch_hdmi_disabled(dev_priv, pipe, HDMID); 1456 assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID);
1425} 1457}
1426 1458
1427/** 1459/**
@@ -1859,6 +1891,7 @@ static void intel_disable_pipe(struct drm_i915_private *dev_priv,
1859 * or we might hang the display. 1891 * or we might hang the display.
1860 */ 1892 */
1861 assert_planes_disabled(dev_priv, pipe); 1893 assert_planes_disabled(dev_priv, pipe);
1894 assert_sprites_disabled(dev_priv, pipe);
1862 1895
1863 /* Don't disable pipe A or pipe A PLLs if needed */ 1896 /* Don't disable pipe A or pipe A PLLs if needed */
1864 if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE)) 1897 if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
@@ -1937,6 +1970,15 @@ static void intel_disable_plane(struct drm_i915_private *dev_priv,
1937 intel_wait_for_vblank(dev_priv->dev, pipe); 1970 intel_wait_for_vblank(dev_priv->dev, pipe);
1938} 1971}
1939 1972
1973static bool need_vtd_wa(struct drm_device *dev)
1974{
1975#ifdef CONFIG_INTEL_IOMMU
1976 if (INTEL_INFO(dev)->gen >= 6 && intel_iommu_gfx_mapped)
1977 return true;
1978#endif
1979 return false;
1980}
1981
1940int 1982int
1941intel_pin_and_fence_fb_obj(struct drm_device *dev, 1983intel_pin_and_fence_fb_obj(struct drm_device *dev,
1942 struct drm_i915_gem_object *obj, 1984 struct drm_i915_gem_object *obj,
@@ -1960,13 +2002,23 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
1960 alignment = 0; 2002 alignment = 0;
1961 break; 2003 break;
1962 case I915_TILING_Y: 2004 case I915_TILING_Y:
1963 /* FIXME: Is this true? */ 2005 /* Despite that we check this in framebuffer_init userspace can
1964 DRM_ERROR("Y tiled not allowed for scan out buffers\n"); 2006 * screw us over and change the tiling after the fact. Only
2007 * pinned buffers can't change their tiling. */
2008 DRM_DEBUG_DRIVER("Y tiled not allowed for scan out buffers\n");
1965 return -EINVAL; 2009 return -EINVAL;
1966 default: 2010 default:
1967 BUG(); 2011 BUG();
1968 } 2012 }
1969 2013
2014 /* Note that the w/a also requires 64 PTE of padding following the
2015 * bo. We currently fill all unused PTE with the shadow page and so
2016 * we should always have valid PTE following the scanout preventing
2017 * the VT-d warning.
2018 */
2019 if (need_vtd_wa(dev) && alignment < 256 * 1024)
2020 alignment = 256 * 1024;
2021
1970 dev_priv->mm.interruptible = false; 2022 dev_priv->mm.interruptible = false;
1971 ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined); 2023 ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined);
1972 if (ret) 2024 if (ret)
@@ -2083,8 +2135,7 @@ static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
2083 dspcntr |= DISPPLANE_RGBX101010; 2135 dspcntr |= DISPPLANE_RGBX101010;
2084 break; 2136 break;
2085 default: 2137 default:
2086 DRM_ERROR("Unknown pixel format 0x%08x\n", fb->pixel_format); 2138 BUG();
2087 return -EINVAL;
2088 } 2139 }
2089 2140
2090 if (INTEL_INFO(dev)->gen >= 4) { 2141 if (INTEL_INFO(dev)->gen >= 4) {
@@ -2177,8 +2228,7 @@ static int ironlake_update_plane(struct drm_crtc *crtc,
2177 dspcntr |= DISPPLANE_RGBX101010; 2228 dspcntr |= DISPPLANE_RGBX101010;
2178 break; 2229 break;
2179 default: 2230 default:
2180 DRM_ERROR("Unknown pixel format 0x%08x\n", fb->pixel_format); 2231 BUG();
2181 return -EINVAL;
2182 } 2232 }
2183 2233
2184 if (obj->tiling_mode != I915_TILING_NONE) 2234 if (obj->tiling_mode != I915_TILING_NONE)
@@ -2229,6 +2279,44 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
2229 return dev_priv->display.update_plane(crtc, fb, x, y); 2279 return dev_priv->display.update_plane(crtc, fb, x, y);
2230} 2280}
2231 2281
2282void intel_display_handle_reset(struct drm_device *dev)
2283{
2284 struct drm_i915_private *dev_priv = dev->dev_private;
2285 struct drm_crtc *crtc;
2286
2287 /*
2288 * Flips in the rings have been nuked by the reset,
2289 * so complete all pending flips so that user space
2290 * will get its events and not get stuck.
2291 *
2292 * Also update the base address of all primary
2293 * planes to the the last fb to make sure we're
2294 * showing the correct fb after a reset.
2295 *
2296 * Need to make two loops over the crtcs so that we
2297 * don't try to grab a crtc mutex before the
2298 * pending_flip_queue really got woken up.
2299 */
2300
2301 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2302 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2303 enum plane plane = intel_crtc->plane;
2304
2305 intel_prepare_page_flip(dev, plane);
2306 intel_finish_page_flip_plane(dev, plane);
2307 }
2308
2309 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2310 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2311
2312 mutex_lock(&crtc->mutex);
2313 if (intel_crtc->active)
2314 dev_priv->display.update_plane(crtc, crtc->fb,
2315 crtc->x, crtc->y);
2316 mutex_unlock(&crtc->mutex);
2317 }
2318}
2319
2232static int 2320static int
2233intel_finish_fb(struct drm_framebuffer *old_fb) 2321intel_finish_fb(struct drm_framebuffer *old_fb)
2234{ 2322{
@@ -2295,10 +2383,10 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2295 return 0; 2383 return 0;
2296 } 2384 }
2297 2385
2298 if(intel_crtc->plane > dev_priv->num_pipe) { 2386 if (intel_crtc->plane > INTEL_INFO(dev)->num_pipes) {
2299 DRM_ERROR("no plane for crtc: plane %d, num_pipes %d\n", 2387 DRM_ERROR("no plane for crtc: plane %d, num_pipes %d\n",
2300 intel_crtc->plane, 2388 intel_crtc->plane,
2301 dev_priv->num_pipe); 2389 INTEL_INFO(dev)->num_pipes);
2302 return -EINVAL; 2390 return -EINVAL;
2303 } 2391 }
2304 2392
@@ -2312,9 +2400,6 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2312 return ret; 2400 return ret;
2313 } 2401 }
2314 2402
2315 if (crtc->fb)
2316 intel_finish_fb(crtc->fb);
2317
2318 ret = dev_priv->display.update_plane(crtc, fb, x, y); 2403 ret = dev_priv->display.update_plane(crtc, fb, x, y);
2319 if (ret) { 2404 if (ret) {
2320 intel_unpin_fb_obj(to_intel_framebuffer(fb)->obj); 2405 intel_unpin_fb_obj(to_intel_framebuffer(fb)->obj);
@@ -2912,32 +2997,6 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
2912 mutex_unlock(&dev->struct_mutex); 2997 mutex_unlock(&dev->struct_mutex);
2913} 2998}
2914 2999
2915static bool ironlake_crtc_driving_pch(struct drm_crtc *crtc)
2916{
2917 struct drm_device *dev = crtc->dev;
2918 struct intel_encoder *intel_encoder;
2919
2920 /*
2921 * If there's a non-PCH eDP on this crtc, it must be DP_A, and that
2922 * must be driven by its own crtc; no sharing is possible.
2923 */
2924 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
2925 switch (intel_encoder->type) {
2926 case INTEL_OUTPUT_EDP:
2927 if (!intel_encoder_is_pch_edp(&intel_encoder->base))
2928 return false;
2929 continue;
2930 }
2931 }
2932
2933 return true;
2934}
2935
2936static bool haswell_crtc_driving_pch(struct drm_crtc *crtc)
2937{
2938 return intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG);
2939}
2940
2941/* Program iCLKIP clock to the desired frequency */ 3000/* Program iCLKIP clock to the desired frequency */
2942static void lpt_program_iclkip(struct drm_crtc *crtc) 3001static void lpt_program_iclkip(struct drm_crtc *crtc)
2943{ 3002{
@@ -3144,7 +3203,7 @@ static void lpt_pch_enable(struct drm_crtc *crtc)
3144 struct drm_device *dev = crtc->dev; 3203 struct drm_device *dev = crtc->dev;
3145 struct drm_i915_private *dev_priv = dev->dev_private; 3204 struct drm_i915_private *dev_priv = dev->dev_private;
3146 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3205 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3147 enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; 3206 enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
3148 3207
3149 assert_transcoder_disabled(dev_priv, TRANSCODER_A); 3208 assert_transcoder_disabled(dev_priv, TRANSCODER_A);
3150 3209
@@ -3273,7 +3332,6 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
3273 int pipe = intel_crtc->pipe; 3332 int pipe = intel_crtc->pipe;
3274 int plane = intel_crtc->plane; 3333 int plane = intel_crtc->plane;
3275 u32 temp; 3334 u32 temp;
3276 bool is_pch_port;
3277 3335
3278 WARN_ON(!crtc->enabled); 3336 WARN_ON(!crtc->enabled);
3279 3337
@@ -3289,9 +3347,8 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
3289 I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN); 3347 I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
3290 } 3348 }
3291 3349
3292 is_pch_port = ironlake_crtc_driving_pch(crtc);
3293 3350
3294 if (is_pch_port) { 3351 if (intel_crtc->config.has_pch_encoder) {
3295 /* Note: FDI PLL enabling _must_ be done before we enable the 3352 /* Note: FDI PLL enabling _must_ be done before we enable the
3296 * cpu pipes, hence this is separate from all the other fdi/pch 3353 * cpu pipes, hence this is separate from all the other fdi/pch
3297 * enabling. */ 3354 * enabling. */
@@ -3328,10 +3385,11 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
3328 */ 3385 */
3329 intel_crtc_load_lut(crtc); 3386 intel_crtc_load_lut(crtc);
3330 3387
3331 intel_enable_pipe(dev_priv, pipe, is_pch_port); 3388 intel_enable_pipe(dev_priv, pipe,
3389 intel_crtc->config.has_pch_encoder);
3332 intel_enable_plane(dev_priv, plane, pipe); 3390 intel_enable_plane(dev_priv, plane, pipe);
3333 3391
3334 if (is_pch_port) 3392 if (intel_crtc->config.has_pch_encoder)
3335 ironlake_pch_enable(crtc); 3393 ironlake_pch_enable(crtc);
3336 3394
3337 mutex_lock(&dev->struct_mutex); 3395 mutex_lock(&dev->struct_mutex);
@@ -3365,7 +3423,6 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
3365 struct intel_encoder *encoder; 3423 struct intel_encoder *encoder;
3366 int pipe = intel_crtc->pipe; 3424 int pipe = intel_crtc->pipe;
3367 int plane = intel_crtc->plane; 3425 int plane = intel_crtc->plane;
3368 bool is_pch_port;
3369 3426
3370 WARN_ON(!crtc->enabled); 3427 WARN_ON(!crtc->enabled);
3371 3428
@@ -3375,9 +3432,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
3375 intel_crtc->active = true; 3432 intel_crtc->active = true;
3376 intel_update_watermarks(dev); 3433 intel_update_watermarks(dev);
3377 3434
3378 is_pch_port = haswell_crtc_driving_pch(crtc); 3435 if (intel_crtc->config.has_pch_encoder)
3379
3380 if (is_pch_port)
3381 dev_priv->display.fdi_link_train(crtc); 3436 dev_priv->display.fdi_link_train(crtc);
3382 3437
3383 for_each_encoder_on_crtc(dev, crtc, encoder) 3438 for_each_encoder_on_crtc(dev, crtc, encoder)
@@ -3406,12 +3461,13 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
3406 intel_crtc_load_lut(crtc); 3461 intel_crtc_load_lut(crtc);
3407 3462
3408 intel_ddi_set_pipe_settings(crtc); 3463 intel_ddi_set_pipe_settings(crtc);
3409 intel_ddi_enable_pipe_func(crtc); 3464 intel_ddi_enable_transcoder_func(crtc);
3410 3465
3411 intel_enable_pipe(dev_priv, pipe, is_pch_port); 3466 intel_enable_pipe(dev_priv, pipe,
3467 intel_crtc->config.has_pch_encoder);
3412 intel_enable_plane(dev_priv, plane, pipe); 3468 intel_enable_plane(dev_priv, plane, pipe);
3413 3469
3414 if (is_pch_port) 3470 if (intel_crtc->config.has_pch_encoder)
3415 lpt_pch_enable(crtc); 3471 lpt_pch_enable(crtc);
3416 3472
3417 mutex_lock(&dev->struct_mutex); 3473 mutex_lock(&dev->struct_mutex);
@@ -3522,14 +3578,11 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
3522 struct intel_encoder *encoder; 3578 struct intel_encoder *encoder;
3523 int pipe = intel_crtc->pipe; 3579 int pipe = intel_crtc->pipe;
3524 int plane = intel_crtc->plane; 3580 int plane = intel_crtc->plane;
3525 enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; 3581 enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
3526 bool is_pch_port;
3527 3582
3528 if (!intel_crtc->active) 3583 if (!intel_crtc->active)
3529 return; 3584 return;
3530 3585
3531 is_pch_port = haswell_crtc_driving_pch(crtc);
3532
3533 for_each_encoder_on_crtc(dev, crtc, encoder) 3586 for_each_encoder_on_crtc(dev, crtc, encoder)
3534 encoder->disable(encoder); 3587 encoder->disable(encoder);
3535 3588
@@ -3546,9 +3599,13 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
3546 3599
3547 intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder); 3600 intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
3548 3601
3549 /* Disable PF */ 3602 /* XXX: Once we have proper panel fitter state tracking implemented with
3550 I915_WRITE(PF_CTL(pipe), 0); 3603 * hardware state read/check support we should switch to only disable
3551 I915_WRITE(PF_WIN_SZ(pipe), 0); 3604 * the panel fitter when we know it's used. */
3605 if (intel_using_power_well(dev)) {
3606 I915_WRITE(PF_CTL(pipe), 0);
3607 I915_WRITE(PF_WIN_SZ(pipe), 0);
3608 }
3552 3609
3553 intel_ddi_disable_pipe_clock(intel_crtc); 3610 intel_ddi_disable_pipe_clock(intel_crtc);
3554 3611
@@ -3556,7 +3613,7 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
3556 if (encoder->post_disable) 3613 if (encoder->post_disable)
3557 encoder->post_disable(encoder); 3614 encoder->post_disable(encoder);
3558 3615
3559 if (is_pch_port) { 3616 if (intel_crtc->config.has_pch_encoder) {
3560 lpt_disable_pch_transcoder(dev_priv); 3617 lpt_disable_pch_transcoder(dev_priv);
3561 intel_ddi_fdi_disable(crtc); 3618 intel_ddi_fdi_disable(crtc);
3562 } 3619 }
@@ -3581,7 +3638,7 @@ static void haswell_crtc_off(struct drm_crtc *crtc)
3581 3638
3582 /* Stop saying we're using TRANSCODER_EDP because some other CRTC might 3639 /* Stop saying we're using TRANSCODER_EDP because some other CRTC might
3583 * start using it. */ 3640 * start using it. */
3584 intel_crtc->cpu_transcoder = (enum transcoder) intel_crtc->pipe; 3641 intel_crtc->config.cpu_transcoder = (enum transcoder) intel_crtc->pipe;
3585 3642
3586 intel_ddi_put_crtc_pll(crtc); 3643 intel_ddi_put_crtc_pll(crtc);
3587} 3644}
@@ -3667,6 +3724,26 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
3667 encoder->enable(encoder); 3724 encoder->enable(encoder);
3668} 3725}
3669 3726
3727static void i9xx_pfit_disable(struct intel_crtc *crtc)
3728{
3729 struct drm_device *dev = crtc->base.dev;
3730 struct drm_i915_private *dev_priv = dev->dev_private;
3731 enum pipe pipe;
3732 uint32_t pctl = I915_READ(PFIT_CONTROL);
3733
3734 assert_pipe_disabled(dev_priv, crtc->pipe);
3735
3736 if (INTEL_INFO(dev)->gen >= 4)
3737 pipe = (pctl & PFIT_PIPE_MASK) >> PFIT_PIPE_SHIFT;
3738 else
3739 pipe = PIPE_B;
3740
3741 if (pipe == crtc->pipe) {
3742 DRM_DEBUG_DRIVER("disabling pfit, current: 0x%08x\n", pctl);
3743 I915_WRITE(PFIT_CONTROL, 0);
3744 }
3745}
3746
3670static void i9xx_crtc_disable(struct drm_crtc *crtc) 3747static void i9xx_crtc_disable(struct drm_crtc *crtc)
3671{ 3748{
3672 struct drm_device *dev = crtc->dev; 3749 struct drm_device *dev = crtc->dev;
@@ -3675,8 +3752,6 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
3675 struct intel_encoder *encoder; 3752 struct intel_encoder *encoder;
3676 int pipe = intel_crtc->pipe; 3753 int pipe = intel_crtc->pipe;
3677 int plane = intel_crtc->plane; 3754 int plane = intel_crtc->plane;
3678 u32 pctl;
3679
3680 3755
3681 if (!intel_crtc->active) 3756 if (!intel_crtc->active)
3682 return; 3757 return;
@@ -3696,11 +3771,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
3696 intel_disable_plane(dev_priv, plane, pipe); 3771 intel_disable_plane(dev_priv, plane, pipe);
3697 intel_disable_pipe(dev_priv, pipe); 3772 intel_disable_pipe(dev_priv, pipe);
3698 3773
3699 /* Disable pannel fitter if it is on this pipe. */ 3774 i9xx_pfit_disable(intel_crtc);
3700 pctl = I915_READ(PFIT_CONTROL);
3701 if ((pctl & PFIT_ENABLE) &&
3702 ((pctl & PFIT_PIPE_MASK) >> PFIT_PIPE_SHIFT) == pipe)
3703 I915_WRITE(PFIT_CONTROL, 0);
3704 3775
3705 intel_disable_pll(dev_priv, pipe); 3776 intel_disable_pll(dev_priv, pipe);
3706 3777
@@ -3906,22 +3977,23 @@ bool intel_connector_get_hw_state(struct intel_connector *connector)
3906 return encoder->get_hw_state(encoder, &pipe); 3977 return encoder->get_hw_state(encoder, &pipe);
3907} 3978}
3908 3979
3909static bool intel_crtc_mode_fixup(struct drm_crtc *crtc, 3980static bool intel_crtc_compute_config(struct drm_crtc *crtc,
3910 const struct drm_display_mode *mode, 3981 struct intel_crtc_config *pipe_config)
3911 struct drm_display_mode *adjusted_mode)
3912{ 3982{
3913 struct drm_device *dev = crtc->dev; 3983 struct drm_device *dev = crtc->dev;
3984 struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
3914 3985
3915 if (HAS_PCH_SPLIT(dev)) { 3986 if (HAS_PCH_SPLIT(dev)) {
3916 /* FDI link clock is fixed at 2.7G */ 3987 /* FDI link clock is fixed at 2.7G */
3917 if (mode->clock * 3 > IRONLAKE_FDI_FREQ * 4) 3988 if (pipe_config->requested_mode.clock * 3
3989 > IRONLAKE_FDI_FREQ * 4)
3918 return false; 3990 return false;
3919 } 3991 }
3920 3992
3921 /* All interlaced capable intel hw wants timings in frames. Note though 3993 /* All interlaced capable intel hw wants timings in frames. Note though
3922 * that intel_lvds_mode_fixup does some funny tricks with the crtc 3994 * that intel_lvds_mode_fixup does some funny tricks with the crtc
3923 * timings, so we need to be careful not to clobber these.*/ 3995 * timings, so we need to be careful not to clobber these.*/
3924 if (!(adjusted_mode->private_flags & INTEL_MODE_CRTC_TIMINGS_SET)) 3996 if (!pipe_config->timings_set)
3925 drm_mode_set_crtcinfo(adjusted_mode, 0); 3997 drm_mode_set_crtcinfo(adjusted_mode, 0);
3926 3998
3927 /* WaPruneModeWithIncorrectHsyncOffset: Cantiga+ cannot handle modes 3999 /* WaPruneModeWithIncorrectHsyncOffset: Cantiga+ cannot handle modes
@@ -3931,6 +4003,14 @@ static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
3931 adjusted_mode->hsync_start == adjusted_mode->hdisplay) 4003 adjusted_mode->hsync_start == adjusted_mode->hdisplay)
3932 return false; 4004 return false;
3933 4005
4006 if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) && pipe_config->pipe_bpp > 10*3) {
4007 pipe_config->pipe_bpp = 10*3; /* 12bpc is gen5+ */
4008 } else if (INTEL_INFO(dev)->gen <= 4 && pipe_config->pipe_bpp > 8*3) {
4009 /* only a 8bpc pipe, with 6bpc dither through the panel fitter
4010 * for lvds. */
4011 pipe_config->pipe_bpp = 8*3;
4012 }
4013
3934 return true; 4014 return true;
3935} 4015}
3936 4016
@@ -4004,26 +4084,36 @@ static int i830_get_display_clock_speed(struct drm_device *dev)
4004} 4084}
4005 4085
4006static void 4086static void
4007intel_reduce_ratio(uint32_t *num, uint32_t *den) 4087intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den)
4008{ 4088{
4009 while (*num > 0xffffff || *den > 0xffffff) { 4089 while (*num > DATA_LINK_M_N_MASK ||
4090 *den > DATA_LINK_M_N_MASK) {
4010 *num >>= 1; 4091 *num >>= 1;
4011 *den >>= 1; 4092 *den >>= 1;
4012 } 4093 }
4013} 4094}
4014 4095
4096static void compute_m_n(unsigned int m, unsigned int n,
4097 uint32_t *ret_m, uint32_t *ret_n)
4098{
4099 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
4100 *ret_m = div_u64((uint64_t) m * *ret_n, n);
4101 intel_reduce_m_n_ratio(ret_m, ret_n);
4102}
4103
4015void 4104void
4016intel_link_compute_m_n(int bits_per_pixel, int nlanes, 4105intel_link_compute_m_n(int bits_per_pixel, int nlanes,
4017 int pixel_clock, int link_clock, 4106 int pixel_clock, int link_clock,
4018 struct intel_link_m_n *m_n) 4107 struct intel_link_m_n *m_n)
4019{ 4108{
4020 m_n->tu = 64; 4109 m_n->tu = 64;
4021 m_n->gmch_m = bits_per_pixel * pixel_clock; 4110
4022 m_n->gmch_n = link_clock * nlanes * 8; 4111 compute_m_n(bits_per_pixel * pixel_clock,
4023 intel_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n); 4112 link_clock * nlanes * 8,
4024 m_n->link_m = pixel_clock; 4113 &m_n->gmch_m, &m_n->gmch_n);
4025 m_n->link_n = link_clock; 4114
4026 intel_reduce_ratio(&m_n->link_m, &m_n->link_n); 4115 compute_m_n(pixel_clock, link_clock,
4116 &m_n->link_m, &m_n->link_n);
4027} 4117}
4028 4118
4029static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv) 4119static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
@@ -4034,142 +4124,6 @@ static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
4034 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE); 4124 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
4035} 4125}
4036 4126
4037/**
4038 * intel_choose_pipe_bpp_dither - figure out what color depth the pipe should send
4039 * @crtc: CRTC structure
4040 * @mode: requested mode
4041 *
4042 * A pipe may be connected to one or more outputs. Based on the depth of the
4043 * attached framebuffer, choose a good color depth to use on the pipe.
4044 *
4045 * If possible, match the pipe depth to the fb depth. In some cases, this
4046 * isn't ideal, because the connected output supports a lesser or restricted
4047 * set of depths. Resolve that here:
4048 * LVDS typically supports only 6bpc, so clamp down in that case
4049 * HDMI supports only 8bpc or 12bpc, so clamp to 8bpc with dither for 10bpc
4050 * Displays may support a restricted set as well, check EDID and clamp as
4051 * appropriate.
4052 * DP may want to dither down to 6bpc to fit larger modes
4053 *
4054 * RETURNS:
4055 * Dithering requirement (i.e. false if display bpc and pipe bpc match,
4056 * true if they don't match).
4057 */
4058static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
4059 struct drm_framebuffer *fb,
4060 unsigned int *pipe_bpp,
4061 struct drm_display_mode *mode)
4062{
4063 struct drm_device *dev = crtc->dev;
4064 struct drm_i915_private *dev_priv = dev->dev_private;
4065 struct drm_connector *connector;
4066 struct intel_encoder *intel_encoder;
4067 unsigned int display_bpc = UINT_MAX, bpc;
4068
4069 /* Walk the encoders & connectors on this crtc, get min bpc */
4070 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
4071
4072 if (intel_encoder->type == INTEL_OUTPUT_LVDS) {
4073 unsigned int lvds_bpc;
4074
4075 if ((I915_READ(PCH_LVDS) & LVDS_A3_POWER_MASK) ==
4076 LVDS_A3_POWER_UP)
4077 lvds_bpc = 8;
4078 else
4079 lvds_bpc = 6;
4080
4081 if (lvds_bpc < display_bpc) {
4082 DRM_DEBUG_KMS("clamping display bpc (was %d) to LVDS (%d)\n", display_bpc, lvds_bpc);
4083 display_bpc = lvds_bpc;
4084 }
4085 continue;
4086 }
4087
4088 /* Not one of the known troublemakers, check the EDID */
4089 list_for_each_entry(connector, &dev->mode_config.connector_list,
4090 head) {
4091 if (connector->encoder != &intel_encoder->base)
4092 continue;
4093
4094 /* Don't use an invalid EDID bpc value */
4095 if (connector->display_info.bpc &&
4096 connector->display_info.bpc < display_bpc) {
4097 DRM_DEBUG_KMS("clamping display bpc (was %d) to EDID reported max of %d\n", display_bpc, connector->display_info.bpc);
4098 display_bpc = connector->display_info.bpc;
4099 }
4100 }
4101
4102 if (intel_encoder->type == INTEL_OUTPUT_EDP) {
4103 /* Use VBT settings if we have an eDP panel */
4104 unsigned int edp_bpc = dev_priv->edp.bpp / 3;
4105
4106 if (edp_bpc && edp_bpc < display_bpc) {
4107 DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc);
4108 display_bpc = edp_bpc;
4109 }
4110 continue;
4111 }
4112
4113 /*
4114 * HDMI is either 12 or 8, so if the display lets 10bpc sneak
4115 * through, clamp it down. (Note: >12bpc will be caught below.)
4116 */
4117 if (intel_encoder->type == INTEL_OUTPUT_HDMI) {
4118 if (display_bpc > 8 && display_bpc < 12) {
4119 DRM_DEBUG_KMS("forcing bpc to 12 for HDMI\n");
4120 display_bpc = 12;
4121 } else {
4122 DRM_DEBUG_KMS("forcing bpc to 8 for HDMI\n");
4123 display_bpc = 8;
4124 }
4125 }
4126 }
4127
4128 if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
4129 DRM_DEBUG_KMS("Dithering DP to 6bpc\n");
4130 display_bpc = 6;
4131 }
4132
4133 /*
4134 * We could just drive the pipe at the highest bpc all the time and
4135 * enable dithering as needed, but that costs bandwidth. So choose
4136 * the minimum value that expresses the full color range of the fb but
4137 * also stays within the max display bpc discovered above.
4138 */
4139
4140 switch (fb->depth) {
4141 case 8:
4142 bpc = 8; /* since we go through a colormap */
4143 break;
4144 case 15:
4145 case 16:
4146 bpc = 6; /* min is 18bpp */
4147 break;
4148 case 24:
4149 bpc = 8;
4150 break;
4151 case 30:
4152 bpc = 10;
4153 break;
4154 case 48:
4155 bpc = 12;
4156 break;
4157 default:
4158 DRM_DEBUG("unsupported depth, assuming 24 bits\n");
4159 bpc = min((unsigned int)8, display_bpc);
4160 break;
4161 }
4162
4163 display_bpc = min(display_bpc, bpc);
4164
4165 DRM_DEBUG_KMS("setting pipe bpc to %d (max display bpc %d)\n",
4166 bpc, display_bpc);
4167
4168 *pipe_bpp = display_bpc * 3;
4169
4170 return display_bpc != bpc;
4171}
4172
4173static int vlv_get_refclk(struct drm_crtc *crtc) 4127static int vlv_get_refclk(struct drm_crtc *crtc)
4174{ 4128{
4175 struct drm_device *dev = crtc->dev; 4129 struct drm_device *dev = crtc->dev;
@@ -4214,37 +4168,38 @@ static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors)
4214 return refclk; 4168 return refclk;
4215} 4169}
4216 4170
4217static void i9xx_adjust_sdvo_tv_clock(struct drm_display_mode *adjusted_mode, 4171static void i9xx_adjust_sdvo_tv_clock(struct intel_crtc *crtc)
4218 intel_clock_t *clock)
4219{ 4172{
4173 unsigned dotclock = crtc->config.adjusted_mode.clock;
4174 struct dpll *clock = &crtc->config.dpll;
4175
4220 /* SDVO TV has fixed PLL values depend on its clock range, 4176 /* SDVO TV has fixed PLL values depend on its clock range,
4221 this mirrors vbios setting. */ 4177 this mirrors vbios setting. */
4222 if (adjusted_mode->clock >= 100000 4178 if (dotclock >= 100000 && dotclock < 140500) {
4223 && adjusted_mode->clock < 140500) {
4224 clock->p1 = 2; 4179 clock->p1 = 2;
4225 clock->p2 = 10; 4180 clock->p2 = 10;
4226 clock->n = 3; 4181 clock->n = 3;
4227 clock->m1 = 16; 4182 clock->m1 = 16;
4228 clock->m2 = 8; 4183 clock->m2 = 8;
4229 } else if (adjusted_mode->clock >= 140500 4184 } else if (dotclock >= 140500 && dotclock <= 200000) {
4230 && adjusted_mode->clock <= 200000) {
4231 clock->p1 = 1; 4185 clock->p1 = 1;
4232 clock->p2 = 10; 4186 clock->p2 = 10;
4233 clock->n = 6; 4187 clock->n = 6;
4234 clock->m1 = 12; 4188 clock->m1 = 12;
4235 clock->m2 = 8; 4189 clock->m2 = 8;
4236 } 4190 }
4191
4192 crtc->config.clock_set = true;
4237} 4193}
4238 4194
4239static void i9xx_update_pll_dividers(struct drm_crtc *crtc, 4195static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
4240 intel_clock_t *clock,
4241 intel_clock_t *reduced_clock) 4196 intel_clock_t *reduced_clock)
4242{ 4197{
4243 struct drm_device *dev = crtc->dev; 4198 struct drm_device *dev = crtc->base.dev;
4244 struct drm_i915_private *dev_priv = dev->dev_private; 4199 struct drm_i915_private *dev_priv = dev->dev_private;
4245 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4200 int pipe = crtc->pipe;
4246 int pipe = intel_crtc->pipe;
4247 u32 fp, fp2 = 0; 4201 u32 fp, fp2 = 0;
4202 struct dpll *clock = &crtc->config.dpll;
4248 4203
4249 if (IS_PINEVIEW(dev)) { 4204 if (IS_PINEVIEW(dev)) {
4250 fp = (1 << clock->n) << 16 | clock->m1 << 8 | clock->m2; 4205 fp = (1 << clock->n) << 16 | clock->m1 << 8 | clock->m2;
@@ -4260,26 +4215,29 @@ static void i9xx_update_pll_dividers(struct drm_crtc *crtc,
4260 4215
4261 I915_WRITE(FP0(pipe), fp); 4216 I915_WRITE(FP0(pipe), fp);
4262 4217
4263 intel_crtc->lowfreq_avail = false; 4218 crtc->lowfreq_avail = false;
4264 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && 4219 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
4265 reduced_clock && i915_powersave) { 4220 reduced_clock && i915_powersave) {
4266 I915_WRITE(FP1(pipe), fp2); 4221 I915_WRITE(FP1(pipe), fp2);
4267 intel_crtc->lowfreq_avail = true; 4222 crtc->lowfreq_avail = true;
4268 } else { 4223 } else {
4269 I915_WRITE(FP1(pipe), fp); 4224 I915_WRITE(FP1(pipe), fp);
4270 } 4225 }
4271} 4226}
4272 4227
4273static void vlv_update_pll(struct drm_crtc *crtc, 4228static void intel_dp_set_m_n(struct intel_crtc *crtc)
4274 struct drm_display_mode *mode,
4275 struct drm_display_mode *adjusted_mode,
4276 intel_clock_t *clock, intel_clock_t *reduced_clock,
4277 int num_connectors)
4278{ 4229{
4279 struct drm_device *dev = crtc->dev; 4230 if (crtc->config.has_pch_encoder)
4231 intel_pch_transcoder_set_m_n(crtc, &crtc->config.dp_m_n);
4232 else
4233 intel_cpu_transcoder_set_m_n(crtc, &crtc->config.dp_m_n);
4234}
4235
4236static void vlv_update_pll(struct intel_crtc *crtc)
4237{
4238 struct drm_device *dev = crtc->base.dev;
4280 struct drm_i915_private *dev_priv = dev->dev_private; 4239 struct drm_i915_private *dev_priv = dev->dev_private;
4281 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4240 int pipe = crtc->pipe;
4282 int pipe = intel_crtc->pipe;
4283 u32 dpll, mdiv, pdiv; 4241 u32 dpll, mdiv, pdiv;
4284 u32 bestn, bestm1, bestm2, bestp1, bestp2; 4242 u32 bestn, bestm1, bestm2, bestp1, bestp2;
4285 bool is_sdvo; 4243 bool is_sdvo;
@@ -4287,8 +4245,8 @@ static void vlv_update_pll(struct drm_crtc *crtc,
4287 4245
4288 mutex_lock(&dev_priv->dpio_lock); 4246 mutex_lock(&dev_priv->dpio_lock);
4289 4247
4290 is_sdvo = intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) || 4248 is_sdvo = intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_SDVO) ||
4291 intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI); 4249 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI);
4292 4250
4293 dpll = DPLL_VGA_MODE_DIS; 4251 dpll = DPLL_VGA_MODE_DIS;
4294 dpll |= DPLL_EXT_BUFFER_ENABLE_VLV; 4252 dpll |= DPLL_EXT_BUFFER_ENABLE_VLV;
@@ -4298,11 +4256,11 @@ static void vlv_update_pll(struct drm_crtc *crtc,
4298 I915_WRITE(DPLL(pipe), dpll); 4256 I915_WRITE(DPLL(pipe), dpll);
4299 POSTING_READ(DPLL(pipe)); 4257 POSTING_READ(DPLL(pipe));
4300 4258
4301 bestn = clock->n; 4259 bestn = crtc->config.dpll.n;
4302 bestm1 = clock->m1; 4260 bestm1 = crtc->config.dpll.m1;
4303 bestm2 = clock->m2; 4261 bestm2 = crtc->config.dpll.m2;
4304 bestp1 = clock->p1; 4262 bestp1 = crtc->config.dpll.p1;
4305 bestp2 = clock->p2; 4263 bestp2 = crtc->config.dpll.p2;
4306 4264
4307 /* 4265 /*
4308 * In Valleyview PLL and program lane counter registers are exposed 4266 * In Valleyview PLL and program lane counter registers are exposed
@@ -4334,8 +4292,8 @@ static void vlv_update_pll(struct drm_crtc *crtc,
4334 4292
4335 intel_dpio_write(dev_priv, DPIO_FASTCLK_DISABLE, 0x620); 4293 intel_dpio_write(dev_priv, DPIO_FASTCLK_DISABLE, 0x620);
4336 4294
4337 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) 4295 if (crtc->config.has_dp_encoder)
4338 intel_dp_set_m_n(crtc, mode, adjusted_mode); 4296 intel_dp_set_m_n(crtc);
4339 4297
4340 I915_WRITE(DPLL(pipe), dpll); 4298 I915_WRITE(DPLL(pipe), dpll);
4341 4299
@@ -4345,26 +4303,25 @@ static void vlv_update_pll(struct drm_crtc *crtc,
4345 4303
4346 temp = 0; 4304 temp = 0;
4347 if (is_sdvo) { 4305 if (is_sdvo) {
4348 temp = intel_mode_get_pixel_multiplier(adjusted_mode); 4306 temp = 0;
4349 if (temp > 1) 4307 if (crtc->config.pixel_multiplier > 1) {
4350 temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; 4308 temp = (crtc->config.pixel_multiplier - 1)
4351 else 4309 << DPLL_MD_UDI_MULTIPLIER_SHIFT;
4352 temp = 0; 4310 }
4353 } 4311 }
4354 I915_WRITE(DPLL_MD(pipe), temp); 4312 I915_WRITE(DPLL_MD(pipe), temp);
4355 POSTING_READ(DPLL_MD(pipe)); 4313 POSTING_READ(DPLL_MD(pipe));
4356 4314
4357 /* Now program lane control registers */ 4315 /* Now program lane control registers */
4358 if(intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) 4316 if(intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT)
4359 || intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) 4317 || intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI)) {
4360 {
4361 temp = 0x1000C4; 4318 temp = 0x1000C4;
4362 if(pipe == 1) 4319 if(pipe == 1)
4363 temp |= (1 << 21); 4320 temp |= (1 << 21);
4364 intel_dpio_write(dev_priv, DPIO_DATA_CHANNEL1, temp); 4321 intel_dpio_write(dev_priv, DPIO_DATA_CHANNEL1, temp);
4365 } 4322 }
4366 if(intel_pipe_has_type(crtc,INTEL_OUTPUT_EDP)) 4323
4367 { 4324 if(intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP)) {
4368 temp = 0x1000C4; 4325 temp = 0x1000C4;
4369 if(pipe == 1) 4326 if(pipe == 1)
4370 temp |= (1 << 21); 4327 temp |= (1 << 21);
@@ -4374,40 +4331,39 @@ static void vlv_update_pll(struct drm_crtc *crtc,
4374 mutex_unlock(&dev_priv->dpio_lock); 4331 mutex_unlock(&dev_priv->dpio_lock);
4375} 4332}
4376 4333
4377static void i9xx_update_pll(struct drm_crtc *crtc, 4334static void i9xx_update_pll(struct intel_crtc *crtc,
4378 struct drm_display_mode *mode, 4335 intel_clock_t *reduced_clock,
4379 struct drm_display_mode *adjusted_mode,
4380 intel_clock_t *clock, intel_clock_t *reduced_clock,
4381 int num_connectors) 4336 int num_connectors)
4382{ 4337{
4383 struct drm_device *dev = crtc->dev; 4338 struct drm_device *dev = crtc->base.dev;
4384 struct drm_i915_private *dev_priv = dev->dev_private; 4339 struct drm_i915_private *dev_priv = dev->dev_private;
4385 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4386 struct intel_encoder *encoder; 4340 struct intel_encoder *encoder;
4387 int pipe = intel_crtc->pipe; 4341 int pipe = crtc->pipe;
4388 u32 dpll; 4342 u32 dpll;
4389 bool is_sdvo; 4343 bool is_sdvo;
4344 struct dpll *clock = &crtc->config.dpll;
4390 4345
4391 i9xx_update_pll_dividers(crtc, clock, reduced_clock); 4346 i9xx_update_pll_dividers(crtc, reduced_clock);
4392 4347
4393 is_sdvo = intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) || 4348 is_sdvo = intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_SDVO) ||
4394 intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI); 4349 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI);
4395 4350
4396 dpll = DPLL_VGA_MODE_DIS; 4351 dpll = DPLL_VGA_MODE_DIS;
4397 4352
4398 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) 4353 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS))
4399 dpll |= DPLLB_MODE_LVDS; 4354 dpll |= DPLLB_MODE_LVDS;
4400 else 4355 else
4401 dpll |= DPLLB_MODE_DAC_SERIAL; 4356 dpll |= DPLLB_MODE_DAC_SERIAL;
4357
4402 if (is_sdvo) { 4358 if (is_sdvo) {
4403 int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); 4359 if ((crtc->config.pixel_multiplier > 1) &&
4404 if (pixel_multiplier > 1) { 4360 (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))) {
4405 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) 4361 dpll |= (crtc->config.pixel_multiplier - 1)
4406 dpll |= (pixel_multiplier - 1) << SDVO_MULTIPLIER_SHIFT_HIRES; 4362 << SDVO_MULTIPLIER_SHIFT_HIRES;
4407 } 4363 }
4408 dpll |= DPLL_DVO_HIGH_SPEED; 4364 dpll |= DPLL_DVO_HIGH_SPEED;
4409 } 4365 }
4410 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) 4366 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT))
4411 dpll |= DPLL_DVO_HIGH_SPEED; 4367 dpll |= DPLL_DVO_HIGH_SPEED;
4412 4368
4413 /* compute bitmask from p1 value */ 4369 /* compute bitmask from p1 value */
@@ -4435,13 +4391,13 @@ static void i9xx_update_pll(struct drm_crtc *crtc,
4435 if (INTEL_INFO(dev)->gen >= 4) 4391 if (INTEL_INFO(dev)->gen >= 4)
4436 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT); 4392 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
4437 4393
4438 if (is_sdvo && intel_pipe_has_type(crtc, INTEL_OUTPUT_TVOUT)) 4394 if (is_sdvo && intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_TVOUT))
4439 dpll |= PLL_REF_INPUT_TVCLKINBC; 4395 dpll |= PLL_REF_INPUT_TVCLKINBC;
4440 else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_TVOUT)) 4396 else if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_TVOUT))
4441 /* XXX: just matching BIOS for now */ 4397 /* XXX: just matching BIOS for now */
4442 /* dpll |= PLL_REF_INPUT_TVCLKINBC; */ 4398 /* dpll |= PLL_REF_INPUT_TVCLKINBC; */
4443 dpll |= 3; 4399 dpll |= 3;
4444 else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && 4400 else if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
4445 intel_panel_use_ssc(dev_priv) && num_connectors < 2) 4401 intel_panel_use_ssc(dev_priv) && num_connectors < 2)
4446 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 4402 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
4447 else 4403 else
@@ -4452,12 +4408,12 @@ static void i9xx_update_pll(struct drm_crtc *crtc,
4452 POSTING_READ(DPLL(pipe)); 4408 POSTING_READ(DPLL(pipe));
4453 udelay(150); 4409 udelay(150);
4454 4410
4455 for_each_encoder_on_crtc(dev, crtc, encoder) 4411 for_each_encoder_on_crtc(dev, &crtc->base, encoder)
4456 if (encoder->pre_pll_enable) 4412 if (encoder->pre_pll_enable)
4457 encoder->pre_pll_enable(encoder); 4413 encoder->pre_pll_enable(encoder);
4458 4414
4459 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) 4415 if (crtc->config.has_dp_encoder)
4460 intel_dp_set_m_n(crtc, mode, adjusted_mode); 4416 intel_dp_set_m_n(crtc);
4461 4417
4462 I915_WRITE(DPLL(pipe), dpll); 4418 I915_WRITE(DPLL(pipe), dpll);
4463 4419
@@ -4468,11 +4424,11 @@ static void i9xx_update_pll(struct drm_crtc *crtc,
4468 if (INTEL_INFO(dev)->gen >= 4) { 4424 if (INTEL_INFO(dev)->gen >= 4) {
4469 u32 temp = 0; 4425 u32 temp = 0;
4470 if (is_sdvo) { 4426 if (is_sdvo) {
4471 temp = intel_mode_get_pixel_multiplier(adjusted_mode); 4427 temp = 0;
4472 if (temp > 1) 4428 if (crtc->config.pixel_multiplier > 1) {
4473 temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; 4429 temp = (crtc->config.pixel_multiplier - 1)
4474 else 4430 << DPLL_MD_UDI_MULTIPLIER_SHIFT;
4475 temp = 0; 4431 }
4476 } 4432 }
4477 I915_WRITE(DPLL_MD(pipe), temp); 4433 I915_WRITE(DPLL_MD(pipe), temp);
4478 } else { 4434 } else {
@@ -4485,23 +4441,23 @@ static void i9xx_update_pll(struct drm_crtc *crtc,
4485 } 4441 }
4486} 4442}
4487 4443
4488static void i8xx_update_pll(struct drm_crtc *crtc, 4444static void i8xx_update_pll(struct intel_crtc *crtc,
4489 struct drm_display_mode *adjusted_mode, 4445 struct drm_display_mode *adjusted_mode,
4490 intel_clock_t *clock, intel_clock_t *reduced_clock, 4446 intel_clock_t *reduced_clock,
4491 int num_connectors) 4447 int num_connectors)
4492{ 4448{
4493 struct drm_device *dev = crtc->dev; 4449 struct drm_device *dev = crtc->base.dev;
4494 struct drm_i915_private *dev_priv = dev->dev_private; 4450 struct drm_i915_private *dev_priv = dev->dev_private;
4495 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4496 struct intel_encoder *encoder; 4451 struct intel_encoder *encoder;
4497 int pipe = intel_crtc->pipe; 4452 int pipe = crtc->pipe;
4498 u32 dpll; 4453 u32 dpll;
4454 struct dpll *clock = &crtc->config.dpll;
4499 4455
4500 i9xx_update_pll_dividers(crtc, clock, reduced_clock); 4456 i9xx_update_pll_dividers(crtc, reduced_clock);
4501 4457
4502 dpll = DPLL_VGA_MODE_DIS; 4458 dpll = DPLL_VGA_MODE_DIS;
4503 4459
4504 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 4460 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS)) {
4505 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 4461 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
4506 } else { 4462 } else {
4507 if (clock->p1 == 2) 4463 if (clock->p1 == 2)
@@ -4512,11 +4468,7 @@ static void i8xx_update_pll(struct drm_crtc *crtc,
4512 dpll |= PLL_P2_DIVIDE_BY_4; 4468 dpll |= PLL_P2_DIVIDE_BY_4;
4513 } 4469 }
4514 4470
4515 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_TVOUT)) 4471 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
4516 /* XXX: just matching BIOS for now */
4517 /* dpll |= PLL_REF_INPUT_TVCLKINBC; */
4518 dpll |= 3;
4519 else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
4520 intel_panel_use_ssc(dev_priv) && num_connectors < 2) 4472 intel_panel_use_ssc(dev_priv) && num_connectors < 2)
4521 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 4473 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
4522 else 4474 else
@@ -4527,7 +4479,7 @@ static void i8xx_update_pll(struct drm_crtc *crtc,
4527 POSTING_READ(DPLL(pipe)); 4479 POSTING_READ(DPLL(pipe));
4528 udelay(150); 4480 udelay(150);
4529 4481
4530 for_each_encoder_on_crtc(dev, crtc, encoder) 4482 for_each_encoder_on_crtc(dev, &crtc->base, encoder)
4531 if (encoder->pre_pll_enable) 4483 if (encoder->pre_pll_enable)
4532 encoder->pre_pll_enable(encoder); 4484 encoder->pre_pll_enable(encoder);
4533 4485
@@ -4552,7 +4504,7 @@ static void intel_set_pipe_timings(struct intel_crtc *intel_crtc,
4552 struct drm_device *dev = intel_crtc->base.dev; 4504 struct drm_device *dev = intel_crtc->base.dev;
4553 struct drm_i915_private *dev_priv = dev->dev_private; 4505 struct drm_i915_private *dev_priv = dev->dev_private;
4554 enum pipe pipe = intel_crtc->pipe; 4506 enum pipe pipe = intel_crtc->pipe;
4555 enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; 4507 enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
4556 uint32_t vsyncshift; 4508 uint32_t vsyncshift;
4557 4509
4558 if (!IS_GEN2(dev) && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { 4510 if (!IS_GEN2(dev) && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
@@ -4603,22 +4555,92 @@ static void intel_set_pipe_timings(struct intel_crtc *intel_crtc,
4603 ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1)); 4555 ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
4604} 4556}
4605 4557
4558static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
4559{
4560 struct drm_device *dev = intel_crtc->base.dev;
4561 struct drm_i915_private *dev_priv = dev->dev_private;
4562 uint32_t pipeconf;
4563
4564 pipeconf = I915_READ(PIPECONF(intel_crtc->pipe));
4565
4566 if (intel_crtc->pipe == 0 && INTEL_INFO(dev)->gen < 4) {
4567 /* Enable pixel doubling when the dot clock is > 90% of the (display)
4568 * core speed.
4569 *
4570 * XXX: No double-wide on 915GM pipe B. Is that the only reason for the
4571 * pipe == 0 check?
4572 */
4573 if (intel_crtc->config.requested_mode.clock >
4574 dev_priv->display.get_display_clock_speed(dev) * 9 / 10)
4575 pipeconf |= PIPECONF_DOUBLE_WIDE;
4576 else
4577 pipeconf &= ~PIPECONF_DOUBLE_WIDE;
4578 }
4579
4580 /* default to 8bpc */
4581 pipeconf &= ~(PIPECONF_BPC_MASK | PIPECONF_DITHER_EN);
4582 if (intel_crtc->config.has_dp_encoder) {
4583 if (intel_crtc->config.dither) {
4584 pipeconf |= PIPECONF_6BPC |
4585 PIPECONF_DITHER_EN |
4586 PIPECONF_DITHER_TYPE_SP;
4587 }
4588 }
4589
4590 if (IS_VALLEYVIEW(dev) && intel_pipe_has_type(&intel_crtc->base,
4591 INTEL_OUTPUT_EDP)) {
4592 if (intel_crtc->config.dither) {
4593 pipeconf |= PIPECONF_6BPC |
4594 PIPECONF_ENABLE |
4595 I965_PIPECONF_ACTIVE;
4596 }
4597 }
4598
4599 if (HAS_PIPE_CXSR(dev)) {
4600 if (intel_crtc->lowfreq_avail) {
4601 DRM_DEBUG_KMS("enabling CxSR downclocking\n");
4602 pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
4603 } else {
4604 DRM_DEBUG_KMS("disabling CxSR downclocking\n");
4605 pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
4606 }
4607 }
4608
4609 pipeconf &= ~PIPECONF_INTERLACE_MASK;
4610 if (!IS_GEN2(dev) &&
4611 intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
4612 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
4613 else
4614 pipeconf |= PIPECONF_PROGRESSIVE;
4615
4616 if (IS_VALLEYVIEW(dev)) {
4617 if (intel_crtc->config.limited_color_range)
4618 pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
4619 else
4620 pipeconf &= ~PIPECONF_COLOR_RANGE_SELECT;
4621 }
4622
4623 I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf);
4624 POSTING_READ(PIPECONF(intel_crtc->pipe));
4625}
4626
4606static int i9xx_crtc_mode_set(struct drm_crtc *crtc, 4627static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
4607 struct drm_display_mode *mode,
4608 struct drm_display_mode *adjusted_mode,
4609 int x, int y, 4628 int x, int y,
4610 struct drm_framebuffer *fb) 4629 struct drm_framebuffer *fb)
4611{ 4630{
4612 struct drm_device *dev = crtc->dev; 4631 struct drm_device *dev = crtc->dev;
4613 struct drm_i915_private *dev_priv = dev->dev_private; 4632 struct drm_i915_private *dev_priv = dev->dev_private;
4614 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4633 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4634 struct drm_display_mode *adjusted_mode =
4635 &intel_crtc->config.adjusted_mode;
4636 struct drm_display_mode *mode = &intel_crtc->config.requested_mode;
4615 int pipe = intel_crtc->pipe; 4637 int pipe = intel_crtc->pipe;
4616 int plane = intel_crtc->plane; 4638 int plane = intel_crtc->plane;
4617 int refclk, num_connectors = 0; 4639 int refclk, num_connectors = 0;
4618 intel_clock_t clock, reduced_clock; 4640 intel_clock_t clock, reduced_clock;
4619 u32 dspcntr, pipeconf; 4641 u32 dspcntr;
4620 bool ok, has_reduced_clock = false, is_sdvo = false; 4642 bool ok, has_reduced_clock = false, is_sdvo = false;
4621 bool is_lvds = false, is_tv = false, is_dp = false; 4643 bool is_lvds = false, is_tv = false;
4622 struct intel_encoder *encoder; 4644 struct intel_encoder *encoder;
4623 const intel_limit_t *limit; 4645 const intel_limit_t *limit;
4624 int ret; 4646 int ret;
@@ -4637,9 +4659,6 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
4637 case INTEL_OUTPUT_TVOUT: 4659 case INTEL_OUTPUT_TVOUT:
4638 is_tv = true; 4660 is_tv = true;
4639 break; 4661 break;
4640 case INTEL_OUTPUT_DISPLAYPORT:
4641 is_dp = true;
4642 break;
4643 } 4662 }
4644 4663
4645 num_connectors++; 4664 num_connectors++;
@@ -4676,86 +4695,42 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
4676 &clock, 4695 &clock,
4677 &reduced_clock); 4696 &reduced_clock);
4678 } 4697 }
4698 /* Compat-code for transition, will disappear. */
4699 if (!intel_crtc->config.clock_set) {
4700 intel_crtc->config.dpll.n = clock.n;
4701 intel_crtc->config.dpll.m1 = clock.m1;
4702 intel_crtc->config.dpll.m2 = clock.m2;
4703 intel_crtc->config.dpll.p1 = clock.p1;
4704 intel_crtc->config.dpll.p2 = clock.p2;
4705 }
4679 4706
4680 if (is_sdvo && is_tv) 4707 if (is_sdvo && is_tv)
4681 i9xx_adjust_sdvo_tv_clock(adjusted_mode, &clock); 4708 i9xx_adjust_sdvo_tv_clock(intel_crtc);
4682 4709
4683 if (IS_GEN2(dev)) 4710 if (IS_GEN2(dev))
4684 i8xx_update_pll(crtc, adjusted_mode, &clock, 4711 i8xx_update_pll(intel_crtc, adjusted_mode,
4685 has_reduced_clock ? &reduced_clock : NULL, 4712 has_reduced_clock ? &reduced_clock : NULL,
4686 num_connectors); 4713 num_connectors);
4687 else if (IS_VALLEYVIEW(dev)) 4714 else if (IS_VALLEYVIEW(dev))
4688 vlv_update_pll(crtc, mode, adjusted_mode, &clock, 4715 vlv_update_pll(intel_crtc);
4689 has_reduced_clock ? &reduced_clock : NULL,
4690 num_connectors);
4691 else 4716 else
4692 i9xx_update_pll(crtc, mode, adjusted_mode, &clock, 4717 i9xx_update_pll(intel_crtc,
4693 has_reduced_clock ? &reduced_clock : NULL, 4718 has_reduced_clock ? &reduced_clock : NULL,
4694 num_connectors); 4719 num_connectors);
4695 4720
4696 /* setup pipeconf */
4697 pipeconf = I915_READ(PIPECONF(pipe));
4698
4699 /* Set up the display plane register */ 4721 /* Set up the display plane register */
4700 dspcntr = DISPPLANE_GAMMA_ENABLE; 4722 dspcntr = DISPPLANE_GAMMA_ENABLE;
4701 4723
4702 if (pipe == 0) 4724 if (!IS_VALLEYVIEW(dev)) {
4703 dspcntr &= ~DISPPLANE_SEL_PIPE_MASK; 4725 if (pipe == 0)
4704 else 4726 dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
4705 dspcntr |= DISPPLANE_SEL_PIPE_B;
4706
4707 if (pipe == 0 && INTEL_INFO(dev)->gen < 4) {
4708 /* Enable pixel doubling when the dot clock is > 90% of the (display)
4709 * core speed.
4710 *
4711 * XXX: No double-wide on 915GM pipe B. Is that the only reason for the
4712 * pipe == 0 check?
4713 */
4714 if (mode->clock >
4715 dev_priv->display.get_display_clock_speed(dev) * 9 / 10)
4716 pipeconf |= PIPECONF_DOUBLE_WIDE;
4717 else 4727 else
4718 pipeconf &= ~PIPECONF_DOUBLE_WIDE; 4728 dspcntr |= DISPPLANE_SEL_PIPE_B;
4719 }
4720
4721 /* default to 8bpc */
4722 pipeconf &= ~(PIPECONF_BPC_MASK | PIPECONF_DITHER_EN);
4723 if (is_dp) {
4724 if (adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
4725 pipeconf |= PIPECONF_6BPC |
4726 PIPECONF_DITHER_EN |
4727 PIPECONF_DITHER_TYPE_SP;
4728 }
4729 }
4730
4731 if (IS_VALLEYVIEW(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) {
4732 if (adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
4733 pipeconf |= PIPECONF_6BPC |
4734 PIPECONF_ENABLE |
4735 I965_PIPECONF_ACTIVE;
4736 }
4737 } 4729 }
4738 4730
4739 DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B'); 4731 DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
4740 drm_mode_debug_printmodeline(mode); 4732 drm_mode_debug_printmodeline(mode);
4741 4733
4742 if (HAS_PIPE_CXSR(dev)) {
4743 if (intel_crtc->lowfreq_avail) {
4744 DRM_DEBUG_KMS("enabling CxSR downclocking\n");
4745 pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
4746 } else {
4747 DRM_DEBUG_KMS("disabling CxSR downclocking\n");
4748 pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
4749 }
4750 }
4751
4752 pipeconf &= ~PIPECONF_INTERLACE_MASK;
4753 if (!IS_GEN2(dev) &&
4754 adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
4755 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
4756 else
4757 pipeconf |= PIPECONF_PROGRESSIVE;
4758
4759 intel_set_pipe_timings(intel_crtc, mode, adjusted_mode); 4734 intel_set_pipe_timings(intel_crtc, mode, adjusted_mode);
4760 4735
4761 /* pipesrc and dspsize control the size that is scaled from, 4736 /* pipesrc and dspsize control the size that is scaled from,
@@ -4766,8 +4741,8 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
4766 (mode->hdisplay - 1)); 4741 (mode->hdisplay - 1));
4767 I915_WRITE(DSPPOS(plane), 0); 4742 I915_WRITE(DSPPOS(plane), 0);
4768 4743
4769 I915_WRITE(PIPECONF(pipe), pipeconf); 4744 i9xx_set_pipeconf(intel_crtc);
4770 POSTING_READ(PIPECONF(pipe)); 4745
4771 intel_enable_pipe(dev_priv, pipe, false); 4746 intel_enable_pipe(dev_priv, pipe, false);
4772 4747
4773 intel_wait_for_vblank(dev, pipe); 4748 intel_wait_for_vblank(dev, pipe);
@@ -4782,12 +4757,26 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
4782 return ret; 4757 return ret;
4783} 4758}
4784 4759
4760static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
4761 struct intel_crtc_config *pipe_config)
4762{
4763 struct drm_device *dev = crtc->base.dev;
4764 struct drm_i915_private *dev_priv = dev->dev_private;
4765 uint32_t tmp;
4766
4767 tmp = I915_READ(PIPECONF(crtc->pipe));
4768 if (!(tmp & PIPECONF_ENABLE))
4769 return false;
4770
4771 return true;
4772}
4773
4785static void ironlake_init_pch_refclk(struct drm_device *dev) 4774static void ironlake_init_pch_refclk(struct drm_device *dev)
4786{ 4775{
4787 struct drm_i915_private *dev_priv = dev->dev_private; 4776 struct drm_i915_private *dev_priv = dev->dev_private;
4788 struct drm_mode_config *mode_config = &dev->mode_config; 4777 struct drm_mode_config *mode_config = &dev->mode_config;
4789 struct intel_encoder *encoder; 4778 struct intel_encoder *encoder;
4790 u32 temp; 4779 u32 val, final;
4791 bool has_lvds = false; 4780 bool has_lvds = false;
4792 bool has_cpu_edp = false; 4781 bool has_cpu_edp = false;
4793 bool has_pch_edp = false; 4782 bool has_pch_edp = false;
@@ -4830,70 +4819,109 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
4830 * PCH B stepping, previous chipset stepping should be 4819 * PCH B stepping, previous chipset stepping should be
4831 * ignoring this setting. 4820 * ignoring this setting.
4832 */ 4821 */
4833 temp = I915_READ(PCH_DREF_CONTROL); 4822 val = I915_READ(PCH_DREF_CONTROL);
4823
4824 /* As we must carefully and slowly disable/enable each source in turn,
4825 * compute the final state we want first and check if we need to
4826 * make any changes at all.
4827 */
4828 final = val;
4829 final &= ~DREF_NONSPREAD_SOURCE_MASK;
4830 if (has_ck505)
4831 final |= DREF_NONSPREAD_CK505_ENABLE;
4832 else
4833 final |= DREF_NONSPREAD_SOURCE_ENABLE;
4834
4835 final &= ~DREF_SSC_SOURCE_MASK;
4836 final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
4837 final &= ~DREF_SSC1_ENABLE;
4838
4839 if (has_panel) {
4840 final |= DREF_SSC_SOURCE_ENABLE;
4841
4842 if (intel_panel_use_ssc(dev_priv) && can_ssc)
4843 final |= DREF_SSC1_ENABLE;
4844
4845 if (has_cpu_edp) {
4846 if (intel_panel_use_ssc(dev_priv) && can_ssc)
4847 final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
4848 else
4849 final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
4850 } else
4851 final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
4852 } else {
4853 final |= DREF_SSC_SOURCE_DISABLE;
4854 final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
4855 }
4856
4857 if (final == val)
4858 return;
4859
4834 /* Always enable nonspread source */ 4860 /* Always enable nonspread source */
4835 temp &= ~DREF_NONSPREAD_SOURCE_MASK; 4861 val &= ~DREF_NONSPREAD_SOURCE_MASK;
4836 4862
4837 if (has_ck505) 4863 if (has_ck505)
4838 temp |= DREF_NONSPREAD_CK505_ENABLE; 4864 val |= DREF_NONSPREAD_CK505_ENABLE;
4839 else 4865 else
4840 temp |= DREF_NONSPREAD_SOURCE_ENABLE; 4866 val |= DREF_NONSPREAD_SOURCE_ENABLE;
4841 4867
4842 if (has_panel) { 4868 if (has_panel) {
4843 temp &= ~DREF_SSC_SOURCE_MASK; 4869 val &= ~DREF_SSC_SOURCE_MASK;
4844 temp |= DREF_SSC_SOURCE_ENABLE; 4870 val |= DREF_SSC_SOURCE_ENABLE;
4845 4871
4846 /* SSC must be turned on before enabling the CPU output */ 4872 /* SSC must be turned on before enabling the CPU output */
4847 if (intel_panel_use_ssc(dev_priv) && can_ssc) { 4873 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
4848 DRM_DEBUG_KMS("Using SSC on panel\n"); 4874 DRM_DEBUG_KMS("Using SSC on panel\n");
4849 temp |= DREF_SSC1_ENABLE; 4875 val |= DREF_SSC1_ENABLE;
4850 } else 4876 } else
4851 temp &= ~DREF_SSC1_ENABLE; 4877 val &= ~DREF_SSC1_ENABLE;
4852 4878
4853 /* Get SSC going before enabling the outputs */ 4879 /* Get SSC going before enabling the outputs */
4854 I915_WRITE(PCH_DREF_CONTROL, temp); 4880 I915_WRITE(PCH_DREF_CONTROL, val);
4855 POSTING_READ(PCH_DREF_CONTROL); 4881 POSTING_READ(PCH_DREF_CONTROL);
4856 udelay(200); 4882 udelay(200);
4857 4883
4858 temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 4884 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
4859 4885
4860 /* Enable CPU source on CPU attached eDP */ 4886 /* Enable CPU source on CPU attached eDP */
4861 if (has_cpu_edp) { 4887 if (has_cpu_edp) {
4862 if (intel_panel_use_ssc(dev_priv) && can_ssc) { 4888 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
4863 DRM_DEBUG_KMS("Using SSC on eDP\n"); 4889 DRM_DEBUG_KMS("Using SSC on eDP\n");
4864 temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; 4890 val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
4865 } 4891 }
4866 else 4892 else
4867 temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; 4893 val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
4868 } else 4894 } else
4869 temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 4895 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
4870 4896
4871 I915_WRITE(PCH_DREF_CONTROL, temp); 4897 I915_WRITE(PCH_DREF_CONTROL, val);
4872 POSTING_READ(PCH_DREF_CONTROL); 4898 POSTING_READ(PCH_DREF_CONTROL);
4873 udelay(200); 4899 udelay(200);
4874 } else { 4900 } else {
4875 DRM_DEBUG_KMS("Disabling SSC entirely\n"); 4901 DRM_DEBUG_KMS("Disabling SSC entirely\n");
4876 4902
4877 temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 4903 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
4878 4904
4879 /* Turn off CPU output */ 4905 /* Turn off CPU output */
4880 temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 4906 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
4881 4907
4882 I915_WRITE(PCH_DREF_CONTROL, temp); 4908 I915_WRITE(PCH_DREF_CONTROL, val);
4883 POSTING_READ(PCH_DREF_CONTROL); 4909 POSTING_READ(PCH_DREF_CONTROL);
4884 udelay(200); 4910 udelay(200);
4885 4911
4886 /* Turn off the SSC source */ 4912 /* Turn off the SSC source */
4887 temp &= ~DREF_SSC_SOURCE_MASK; 4913 val &= ~DREF_SSC_SOURCE_MASK;
4888 temp |= DREF_SSC_SOURCE_DISABLE; 4914 val |= DREF_SSC_SOURCE_DISABLE;
4889 4915
4890 /* Turn off SSC1 */ 4916 /* Turn off SSC1 */
4891 temp &= ~ DREF_SSC1_ENABLE; 4917 val &= ~DREF_SSC1_ENABLE;
4892 4918
4893 I915_WRITE(PCH_DREF_CONTROL, temp); 4919 I915_WRITE(PCH_DREF_CONTROL, val);
4894 POSTING_READ(PCH_DREF_CONTROL); 4920 POSTING_READ(PCH_DREF_CONTROL);
4895 udelay(200); 4921 udelay(200);
4896 } 4922 }
4923
4924 BUG_ON(val != final);
4897} 4925}
4898 4926
4899/* Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O. */ 4927/* Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O. */
@@ -4958,13 +4986,6 @@ static void lpt_init_pch_refclk(struct drm_device *dev)
4958 tmp |= (0x12 << 24); 4986 tmp |= (0x12 << 24);
4959 intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY); 4987 intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
4960 4988
4961 if (!is_sdv) {
4962 tmp = intel_sbi_read(dev_priv, 0x808C, SBI_MPHY);
4963 tmp &= ~(0x3 << 6);
4964 tmp |= (1 << 6) | (1 << 0);
4965 intel_sbi_write(dev_priv, 0x808C, tmp, SBI_MPHY);
4966 }
4967
4968 if (is_sdv) { 4989 if (is_sdv) {
4969 tmp = intel_sbi_read(dev_priv, 0x800C, SBI_MPHY); 4990 tmp = intel_sbi_read(dev_priv, 0x800C, SBI_MPHY);
4970 tmp |= 0x7FFF; 4991 tmp |= 0x7FFF;
@@ -5118,7 +5139,7 @@ static void ironlake_set_pipeconf(struct drm_crtc *crtc,
5118 val = I915_READ(PIPECONF(pipe)); 5139 val = I915_READ(PIPECONF(pipe));
5119 5140
5120 val &= ~PIPECONF_BPC_MASK; 5141 val &= ~PIPECONF_BPC_MASK;
5121 switch (intel_crtc->bpp) { 5142 switch (intel_crtc->config.pipe_bpp) {
5122 case 18: 5143 case 18:
5123 val |= PIPECONF_6BPC; 5144 val |= PIPECONF_6BPC;
5124 break; 5145 break;
@@ -5146,7 +5167,7 @@ static void ironlake_set_pipeconf(struct drm_crtc *crtc,
5146 else 5167 else
5147 val |= PIPECONF_PROGRESSIVE; 5168 val |= PIPECONF_PROGRESSIVE;
5148 5169
5149 if (adjusted_mode->private_flags & INTEL_MODE_LIMITED_COLOR_RANGE) 5170 if (intel_crtc->config.limited_color_range)
5150 val |= PIPECONF_COLOR_RANGE_SELECT; 5171 val |= PIPECONF_COLOR_RANGE_SELECT;
5151 else 5172 else
5152 val &= ~PIPECONF_COLOR_RANGE_SELECT; 5173 val &= ~PIPECONF_COLOR_RANGE_SELECT;
@@ -5162,8 +5183,7 @@ static void ironlake_set_pipeconf(struct drm_crtc *crtc,
5162 * is supported, but eventually this should handle various 5183 * is supported, but eventually this should handle various
5163 * RGB<->YCbCr scenarios as well. 5184 * RGB<->YCbCr scenarios as well.
5164 */ 5185 */
5165static void intel_set_pipe_csc(struct drm_crtc *crtc, 5186static void intel_set_pipe_csc(struct drm_crtc *crtc)
5166 const struct drm_display_mode *adjusted_mode)
5167{ 5187{
5168 struct drm_device *dev = crtc->dev; 5188 struct drm_device *dev = crtc->dev;
5169 struct drm_i915_private *dev_priv = dev->dev_private; 5189 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -5178,7 +5198,7 @@ static void intel_set_pipe_csc(struct drm_crtc *crtc,
5178 * consideration. 5198 * consideration.
5179 */ 5199 */
5180 5200
5181 if (adjusted_mode->private_flags & INTEL_MODE_LIMITED_COLOR_RANGE) 5201 if (intel_crtc->config.limited_color_range)
5182 coeff = ((235 - 16) * (1 << 12) / 255) & 0xff8; /* 0.xxx... */ 5202 coeff = ((235 - 16) * (1 << 12) / 255) & 0xff8; /* 0.xxx... */
5183 5203
5184 /* 5204 /*
@@ -5202,7 +5222,7 @@ static void intel_set_pipe_csc(struct drm_crtc *crtc,
5202 if (INTEL_INFO(dev)->gen > 6) { 5222 if (INTEL_INFO(dev)->gen > 6) {
5203 uint16_t postoff = 0; 5223 uint16_t postoff = 0;
5204 5224
5205 if (adjusted_mode->private_flags & INTEL_MODE_LIMITED_COLOR_RANGE) 5225 if (intel_crtc->config.limited_color_range)
5206 postoff = (16 * (1 << 13) / 255) & 0x1fff; 5226 postoff = (16 * (1 << 13) / 255) & 0x1fff;
5207 5227
5208 I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff); 5228 I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff);
@@ -5213,7 +5233,7 @@ static void intel_set_pipe_csc(struct drm_crtc *crtc,
5213 } else { 5233 } else {
5214 uint32_t mode = CSC_MODE_YUV_TO_RGB; 5234 uint32_t mode = CSC_MODE_YUV_TO_RGB;
5215 5235
5216 if (adjusted_mode->private_flags & INTEL_MODE_LIMITED_COLOR_RANGE) 5236 if (intel_crtc->config.limited_color_range)
5217 mode |= CSC_BLACK_SCREEN_OFFSET; 5237 mode |= CSC_BLACK_SCREEN_OFFSET;
5218 5238
5219 I915_WRITE(PIPE_CSC_MODE(pipe), mode); 5239 I915_WRITE(PIPE_CSC_MODE(pipe), mode);
@@ -5226,7 +5246,7 @@ static void haswell_set_pipeconf(struct drm_crtc *crtc,
5226{ 5246{
5227 struct drm_i915_private *dev_priv = crtc->dev->dev_private; 5247 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
5228 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5248 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5229 enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; 5249 enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
5230 uint32_t val; 5250 uint32_t val;
5231 5251
5232 val = I915_READ(PIPECONF(cpu_transcoder)); 5252 val = I915_READ(PIPECONF(cpu_transcoder));
@@ -5303,7 +5323,7 @@ static bool ironlake_compute_clocks(struct drm_crtc *crtc,
5303 } 5323 }
5304 5324
5305 if (is_sdvo && is_tv) 5325 if (is_sdvo && is_tv)
5306 i9xx_adjust_sdvo_tv_clock(adjusted_mode, clock); 5326 i9xx_adjust_sdvo_tv_clock(to_intel_crtc(crtc));
5307 5327
5308 return true; 5328 return true;
5309} 5329}
@@ -5344,7 +5364,7 @@ static bool ironlake_check_fdi_lanes(struct intel_crtc *intel_crtc)
5344 return false; 5364 return false;
5345 } 5365 }
5346 5366
5347 if (dev_priv->num_pipe == 2) 5367 if (INTEL_INFO(dev)->num_pipes == 2)
5348 return true; 5368 return true;
5349 5369
5350 switch (intel_crtc->pipe) { 5370 switch (intel_crtc->pipe) {
@@ -5401,87 +5421,87 @@ int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
5401 return bps / (link_bw * 8) + 1; 5421 return bps / (link_bw * 8) + 1;
5402} 5422}
5403 5423
5404static void ironlake_set_m_n(struct drm_crtc *crtc, 5424void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
5405 struct drm_display_mode *mode, 5425 struct intel_link_m_n *m_n)
5406 struct drm_display_mode *adjusted_mode)
5407{ 5426{
5408 struct drm_device *dev = crtc->dev; 5427 struct drm_device *dev = crtc->base.dev;
5409 struct drm_i915_private *dev_priv = dev->dev_private; 5428 struct drm_i915_private *dev_priv = dev->dev_private;
5410 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5429 int pipe = crtc->pipe;
5411 enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
5412 struct intel_encoder *intel_encoder, *edp_encoder = NULL;
5413 struct intel_link_m_n m_n = {0};
5414 int target_clock, pixel_multiplier, lane, link_bw;
5415 bool is_dp = false, is_cpu_edp = false;
5416 5430
5417 for_each_encoder_on_crtc(dev, crtc, intel_encoder) { 5431 I915_WRITE(TRANSDATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
5418 switch (intel_encoder->type) { 5432 I915_WRITE(TRANSDATA_N1(pipe), m_n->gmch_n);
5419 case INTEL_OUTPUT_DISPLAYPORT: 5433 I915_WRITE(TRANSDPLINK_M1(pipe), m_n->link_m);
5420 is_dp = true; 5434 I915_WRITE(TRANSDPLINK_N1(pipe), m_n->link_n);
5421 break; 5435}
5422 case INTEL_OUTPUT_EDP: 5436
5423 is_dp = true; 5437void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
5424 if (!intel_encoder_is_pch_edp(&intel_encoder->base)) 5438 struct intel_link_m_n *m_n)
5425 is_cpu_edp = true; 5439{
5426 edp_encoder = intel_encoder; 5440 struct drm_device *dev = crtc->base.dev;
5427 break; 5441 struct drm_i915_private *dev_priv = dev->dev_private;
5428 } 5442 int pipe = crtc->pipe;
5429 } 5443 enum transcoder transcoder = crtc->config.cpu_transcoder;
5430 5444
5431 /* FDI link */ 5445 if (INTEL_INFO(dev)->gen >= 5) {
5432 pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); 5446 I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
5433 lane = 0; 5447 I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
5434 /* CPU eDP doesn't require FDI link, so just set DP M/N 5448 I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
5435 according to current link config */ 5449 I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
5436 if (is_cpu_edp) {
5437 intel_edp_link_config(edp_encoder, &lane, &link_bw);
5438 } else { 5450 } else {
5439 /* FDI is a binary signal running at ~2.7GHz, encoding 5451 I915_WRITE(PIPE_GMCH_DATA_M(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
5440 * each output octet as 10 bits. The actual frequency 5452 I915_WRITE(PIPE_GMCH_DATA_N(pipe), m_n->gmch_n);
5441 * is stored as a divider into a 100MHz clock, and the 5453 I915_WRITE(PIPE_DP_LINK_M(pipe), m_n->link_m);
5442 * mode pixel clock is stored in units of 1KHz. 5454 I915_WRITE(PIPE_DP_LINK_N(pipe), m_n->link_n);
5443 * Hence the bw of each lane in terms of the mode signal
5444 * is:
5445 */
5446 link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
5447 } 5455 }
5456}
5457
5458static void ironlake_fdi_set_m_n(struct drm_crtc *crtc)
5459{
5460 struct drm_device *dev = crtc->dev;
5461 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5462 struct drm_display_mode *adjusted_mode =
5463 &intel_crtc->config.adjusted_mode;
5464 struct intel_link_m_n m_n = {0};
5465 int target_clock, lane, link_bw;
5466
5467 /* FDI is a binary signal running at ~2.7GHz, encoding
5468 * each output octet as 10 bits. The actual frequency
5469 * is stored as a divider into a 100MHz clock, and the
5470 * mode pixel clock is stored in units of 1KHz.
5471 * Hence the bw of each lane in terms of the mode signal
5472 * is:
5473 */
5474 link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
5448 5475
5449 /* [e]DP over FDI requires target mode clock instead of link clock. */ 5476 if (intel_crtc->config.pixel_target_clock)
5450 if (edp_encoder) 5477 target_clock = intel_crtc->config.pixel_target_clock;
5451 target_clock = intel_edp_target_clock(edp_encoder, mode);
5452 else if (is_dp)
5453 target_clock = mode->clock;
5454 else 5478 else
5455 target_clock = adjusted_mode->clock; 5479 target_clock = adjusted_mode->clock;
5456 5480
5457 if (!lane) 5481 lane = ironlake_get_lanes_required(target_clock, link_bw,
5458 lane = ironlake_get_lanes_required(target_clock, link_bw, 5482 intel_crtc->config.pipe_bpp);
5459 intel_crtc->bpp);
5460 5483
5461 intel_crtc->fdi_lanes = lane; 5484 intel_crtc->fdi_lanes = lane;
5462 5485
5463 if (pixel_multiplier > 1) 5486 if (intel_crtc->config.pixel_multiplier > 1)
5464 link_bw *= pixel_multiplier; 5487 link_bw *= intel_crtc->config.pixel_multiplier;
5465 intel_link_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw, &m_n); 5488 intel_link_compute_m_n(intel_crtc->config.pipe_bpp, lane, target_clock,
5489 link_bw, &m_n);
5466 5490
5467 I915_WRITE(PIPE_DATA_M1(cpu_transcoder), TU_SIZE(m_n.tu) | m_n.gmch_m); 5491 intel_cpu_transcoder_set_m_n(intel_crtc, &m_n);
5468 I915_WRITE(PIPE_DATA_N1(cpu_transcoder), m_n.gmch_n);
5469 I915_WRITE(PIPE_LINK_M1(cpu_transcoder), m_n.link_m);
5470 I915_WRITE(PIPE_LINK_N1(cpu_transcoder), m_n.link_n);
5471} 5492}
5472 5493
5473static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc, 5494static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
5474 struct drm_display_mode *adjusted_mode, 5495 intel_clock_t *clock, u32 *fp,
5475 intel_clock_t *clock, u32 fp) 5496 intel_clock_t *reduced_clock, u32 *fp2)
5476{ 5497{
5477 struct drm_crtc *crtc = &intel_crtc->base; 5498 struct drm_crtc *crtc = &intel_crtc->base;
5478 struct drm_device *dev = crtc->dev; 5499 struct drm_device *dev = crtc->dev;
5479 struct drm_i915_private *dev_priv = dev->dev_private; 5500 struct drm_i915_private *dev_priv = dev->dev_private;
5480 struct intel_encoder *intel_encoder; 5501 struct intel_encoder *intel_encoder;
5481 uint32_t dpll; 5502 uint32_t dpll;
5482 int factor, pixel_multiplier, num_connectors = 0; 5503 int factor, num_connectors = 0;
5483 bool is_lvds = false, is_sdvo = false, is_tv = false; 5504 bool is_lvds = false, is_sdvo = false, is_tv = false;
5484 bool is_dp = false, is_cpu_edp = false;
5485 5505
5486 for_each_encoder_on_crtc(dev, crtc, intel_encoder) { 5506 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
5487 switch (intel_encoder->type) { 5507 switch (intel_encoder->type) {
@@ -5497,14 +5517,6 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
5497 case INTEL_OUTPUT_TVOUT: 5517 case INTEL_OUTPUT_TVOUT:
5498 is_tv = true; 5518 is_tv = true;
5499 break; 5519 break;
5500 case INTEL_OUTPUT_DISPLAYPORT:
5501 is_dp = true;
5502 break;
5503 case INTEL_OUTPUT_EDP:
5504 is_dp = true;
5505 if (!intel_encoder_is_pch_edp(&intel_encoder->base))
5506 is_cpu_edp = true;
5507 break;
5508 } 5520 }
5509 5521
5510 num_connectors++; 5522 num_connectors++;
@@ -5515,13 +5527,16 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
5515 if (is_lvds) { 5527 if (is_lvds) {
5516 if ((intel_panel_use_ssc(dev_priv) && 5528 if ((intel_panel_use_ssc(dev_priv) &&
5517 dev_priv->lvds_ssc_freq == 100) || 5529 dev_priv->lvds_ssc_freq == 100) ||
5518 intel_is_dual_link_lvds(dev)) 5530 (HAS_PCH_IBX(dev) && intel_is_dual_link_lvds(dev)))
5519 factor = 25; 5531 factor = 25;
5520 } else if (is_sdvo && is_tv) 5532 } else if (is_sdvo && is_tv)
5521 factor = 20; 5533 factor = 20;
5522 5534
5523 if (clock->m < factor * clock->n) 5535 if (clock->m < factor * clock->n)
5524 fp |= FP_CB_TUNE; 5536 *fp |= FP_CB_TUNE;
5537
5538 if (fp2 && (reduced_clock->m < factor * reduced_clock->n))
5539 *fp2 |= FP_CB_TUNE;
5525 5540
5526 dpll = 0; 5541 dpll = 0;
5527 5542
@@ -5530,13 +5545,14 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
5530 else 5545 else
5531 dpll |= DPLLB_MODE_DAC_SERIAL; 5546 dpll |= DPLLB_MODE_DAC_SERIAL;
5532 if (is_sdvo) { 5547 if (is_sdvo) {
5533 pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); 5548 if (intel_crtc->config.pixel_multiplier > 1) {
5534 if (pixel_multiplier > 1) { 5549 dpll |= (intel_crtc->config.pixel_multiplier - 1)
5535 dpll |= (pixel_multiplier - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; 5550 << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
5536 } 5551 }
5537 dpll |= DPLL_DVO_HIGH_SPEED; 5552 dpll |= DPLL_DVO_HIGH_SPEED;
5538 } 5553 }
5539 if (is_dp && !is_cpu_edp) 5554 if (intel_crtc->config.has_dp_encoder &&
5555 intel_crtc->config.has_pch_encoder)
5540 dpll |= DPLL_DVO_HIGH_SPEED; 5556 dpll |= DPLL_DVO_HIGH_SPEED;
5541 5557
5542 /* compute bitmask from p1 value */ 5558 /* compute bitmask from p1 value */
@@ -5574,21 +5590,22 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
5574} 5590}
5575 5591
5576static int ironlake_crtc_mode_set(struct drm_crtc *crtc, 5592static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5577 struct drm_display_mode *mode,
5578 struct drm_display_mode *adjusted_mode,
5579 int x, int y, 5593 int x, int y,
5580 struct drm_framebuffer *fb) 5594 struct drm_framebuffer *fb)
5581{ 5595{
5582 struct drm_device *dev = crtc->dev; 5596 struct drm_device *dev = crtc->dev;
5583 struct drm_i915_private *dev_priv = dev->dev_private; 5597 struct drm_i915_private *dev_priv = dev->dev_private;
5584 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5598 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5599 struct drm_display_mode *adjusted_mode =
5600 &intel_crtc->config.adjusted_mode;
5601 struct drm_display_mode *mode = &intel_crtc->config.requested_mode;
5585 int pipe = intel_crtc->pipe; 5602 int pipe = intel_crtc->pipe;
5586 int plane = intel_crtc->plane; 5603 int plane = intel_crtc->plane;
5587 int num_connectors = 0; 5604 int num_connectors = 0;
5588 intel_clock_t clock, reduced_clock; 5605 intel_clock_t clock, reduced_clock;
5589 u32 dpll, fp = 0, fp2 = 0; 5606 u32 dpll, fp = 0, fp2 = 0;
5590 bool ok, has_reduced_clock = false; 5607 bool ok, has_reduced_clock = false;
5591 bool is_lvds = false, is_dp = false, is_cpu_edp = false; 5608 bool is_lvds = false;
5592 struct intel_encoder *encoder; 5609 struct intel_encoder *encoder;
5593 int ret; 5610 int ret;
5594 bool dither, fdi_config_ok; 5611 bool dither, fdi_config_ok;
@@ -5598,14 +5615,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5598 case INTEL_OUTPUT_LVDS: 5615 case INTEL_OUTPUT_LVDS:
5599 is_lvds = true; 5616 is_lvds = true;
5600 break; 5617 break;
5601 case INTEL_OUTPUT_DISPLAYPORT:
5602 is_dp = true;
5603 break;
5604 case INTEL_OUTPUT_EDP:
5605 is_dp = true;
5606 if (!intel_encoder_is_pch_edp(&encoder->base))
5607 is_cpu_edp = true;
5608 break;
5609 } 5618 }
5610 5619
5611 num_connectors++; 5620 num_connectors++;
@@ -5614,19 +5623,28 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5614 WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)), 5623 WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)),
5615 "Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev)); 5624 "Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev));
5616 5625
5626 intel_crtc->config.cpu_transcoder = pipe;
5627
5617 ok = ironlake_compute_clocks(crtc, adjusted_mode, &clock, 5628 ok = ironlake_compute_clocks(crtc, adjusted_mode, &clock,
5618 &has_reduced_clock, &reduced_clock); 5629 &has_reduced_clock, &reduced_clock);
5619 if (!ok) { 5630 if (!ok) {
5620 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 5631 DRM_ERROR("Couldn't find PLL settings for mode!\n");
5621 return -EINVAL; 5632 return -EINVAL;
5622 } 5633 }
5634 /* Compat-code for transition, will disappear. */
5635 if (!intel_crtc->config.clock_set) {
5636 intel_crtc->config.dpll.n = clock.n;
5637 intel_crtc->config.dpll.m1 = clock.m1;
5638 intel_crtc->config.dpll.m2 = clock.m2;
5639 intel_crtc->config.dpll.p1 = clock.p1;
5640 intel_crtc->config.dpll.p2 = clock.p2;
5641 }
5623 5642
5624 /* Ensure that the cursor is valid for the new mode before changing... */ 5643 /* Ensure that the cursor is valid for the new mode before changing... */
5625 intel_crtc_update_cursor(crtc, true); 5644 intel_crtc_update_cursor(crtc, true);
5626 5645
5627 /* determine panel color depth */ 5646 /* determine panel color depth */
5628 dither = intel_choose_pipe_bpp_dither(crtc, fb, &intel_crtc->bpp, 5647 dither = intel_crtc->config.dither;
5629 adjusted_mode);
5630 if (is_lvds && dev_priv->lvds_dither) 5648 if (is_lvds && dev_priv->lvds_dither)
5631 dither = true; 5649 dither = true;
5632 5650
@@ -5635,13 +5653,14 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5635 fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 | 5653 fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
5636 reduced_clock.m2; 5654 reduced_clock.m2;
5637 5655
5638 dpll = ironlake_compute_dpll(intel_crtc, adjusted_mode, &clock, fp); 5656 dpll = ironlake_compute_dpll(intel_crtc, &clock, &fp, &reduced_clock,
5657 has_reduced_clock ? &fp2 : NULL);
5639 5658
5640 DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe); 5659 DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
5641 drm_mode_debug_printmodeline(mode); 5660 drm_mode_debug_printmodeline(mode);
5642 5661
5643 /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */ 5662 /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
5644 if (!is_cpu_edp) { 5663 if (intel_crtc->config.has_pch_encoder) {
5645 struct intel_pch_pll *pll; 5664 struct intel_pch_pll *pll;
5646 5665
5647 pll = intel_get_pch_pll(intel_crtc, dpll, fp); 5666 pll = intel_get_pch_pll(intel_crtc, dpll, fp);
@@ -5653,8 +5672,8 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5653 } else 5672 } else
5654 intel_put_pch_pll(intel_crtc); 5673 intel_put_pch_pll(intel_crtc);
5655 5674
5656 if (is_dp && !is_cpu_edp) 5675 if (intel_crtc->config.has_dp_encoder)
5657 intel_dp_set_m_n(crtc, mode, adjusted_mode); 5676 intel_dp_set_m_n(intel_crtc);
5658 5677
5659 for_each_encoder_on_crtc(dev, crtc, encoder) 5678 for_each_encoder_on_crtc(dev, crtc, encoder)
5660 if (encoder->pre_pll_enable) 5679 if (encoder->pre_pll_enable)
@@ -5689,7 +5708,9 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5689 5708
5690 /* Note, this also computes intel_crtc->fdi_lanes which is used below in 5709 /* Note, this also computes intel_crtc->fdi_lanes which is used below in
5691 * ironlake_check_fdi_lanes. */ 5710 * ironlake_check_fdi_lanes. */
5692 ironlake_set_m_n(crtc, mode, adjusted_mode); 5711 intel_crtc->fdi_lanes = 0;
5712 if (intel_crtc->config.has_pch_encoder)
5713 ironlake_fdi_set_m_n(crtc);
5693 5714
5694 fdi_config_ok = ironlake_check_fdi_lanes(intel_crtc); 5715 fdi_config_ok = ironlake_check_fdi_lanes(intel_crtc);
5695 5716
@@ -5710,6 +5731,23 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5710 return fdi_config_ok ? ret : -EINVAL; 5731 return fdi_config_ok ? ret : -EINVAL;
5711} 5732}
5712 5733
5734static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
5735 struct intel_crtc_config *pipe_config)
5736{
5737 struct drm_device *dev = crtc->base.dev;
5738 struct drm_i915_private *dev_priv = dev->dev_private;
5739 uint32_t tmp;
5740
5741 tmp = I915_READ(PIPECONF(crtc->pipe));
5742 if (!(tmp & PIPECONF_ENABLE))
5743 return false;
5744
5745 if (I915_READ(TRANSCONF(crtc->pipe)) & TRANS_ENABLE)
5746 pipe_config->has_pch_encoder = true;
5747
5748 return true;
5749}
5750
5713static void haswell_modeset_global_resources(struct drm_device *dev) 5751static void haswell_modeset_global_resources(struct drm_device *dev)
5714{ 5752{
5715 struct drm_i915_private *dev_priv = dev->dev_private; 5753 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -5740,29 +5778,26 @@ static void haswell_modeset_global_resources(struct drm_device *dev)
5740} 5778}
5741 5779
5742static int haswell_crtc_mode_set(struct drm_crtc *crtc, 5780static int haswell_crtc_mode_set(struct drm_crtc *crtc,
5743 struct drm_display_mode *mode,
5744 struct drm_display_mode *adjusted_mode,
5745 int x, int y, 5781 int x, int y,
5746 struct drm_framebuffer *fb) 5782 struct drm_framebuffer *fb)
5747{ 5783{
5748 struct drm_device *dev = crtc->dev; 5784 struct drm_device *dev = crtc->dev;
5749 struct drm_i915_private *dev_priv = dev->dev_private; 5785 struct drm_i915_private *dev_priv = dev->dev_private;
5750 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5786 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5787 struct drm_display_mode *adjusted_mode =
5788 &intel_crtc->config.adjusted_mode;
5789 struct drm_display_mode *mode = &intel_crtc->config.requested_mode;
5751 int pipe = intel_crtc->pipe; 5790 int pipe = intel_crtc->pipe;
5752 int plane = intel_crtc->plane; 5791 int plane = intel_crtc->plane;
5753 int num_connectors = 0; 5792 int num_connectors = 0;
5754 bool is_dp = false, is_cpu_edp = false; 5793 bool is_cpu_edp = false;
5755 struct intel_encoder *encoder; 5794 struct intel_encoder *encoder;
5756 int ret; 5795 int ret;
5757 bool dither; 5796 bool dither;
5758 5797
5759 for_each_encoder_on_crtc(dev, crtc, encoder) { 5798 for_each_encoder_on_crtc(dev, crtc, encoder) {
5760 switch (encoder->type) { 5799 switch (encoder->type) {
5761 case INTEL_OUTPUT_DISPLAYPORT:
5762 is_dp = true;
5763 break;
5764 case INTEL_OUTPUT_EDP: 5800 case INTEL_OUTPUT_EDP:
5765 is_dp = true;
5766 if (!intel_encoder_is_pch_edp(&encoder->base)) 5801 if (!intel_encoder_is_pch_edp(&encoder->base))
5767 is_cpu_edp = true; 5802 is_cpu_edp = true;
5768 break; 5803 break;
@@ -5772,9 +5807,9 @@ static int haswell_crtc_mode_set(struct drm_crtc *crtc,
5772 } 5807 }
5773 5808
5774 if (is_cpu_edp) 5809 if (is_cpu_edp)
5775 intel_crtc->cpu_transcoder = TRANSCODER_EDP; 5810 intel_crtc->config.cpu_transcoder = TRANSCODER_EDP;
5776 else 5811 else
5777 intel_crtc->cpu_transcoder = pipe; 5812 intel_crtc->config.cpu_transcoder = pipe;
5778 5813
5779 /* We are not sure yet this won't happen. */ 5814 /* We are not sure yet this won't happen. */
5780 WARN(!HAS_PCH_LPT(dev), "Unexpected PCH type %d\n", 5815 WARN(!HAS_PCH_LPT(dev), "Unexpected PCH type %d\n",
@@ -5783,7 +5818,7 @@ static int haswell_crtc_mode_set(struct drm_crtc *crtc,
5783 WARN(num_connectors != 1, "%d connectors attached to pipe %c\n", 5818 WARN(num_connectors != 1, "%d connectors attached to pipe %c\n",
5784 num_connectors, pipe_name(pipe)); 5819 num_connectors, pipe_name(pipe));
5785 5820
5786 WARN_ON(I915_READ(PIPECONF(intel_crtc->cpu_transcoder)) & 5821 WARN_ON(I915_READ(PIPECONF(intel_crtc->config.cpu_transcoder)) &
5787 (PIPECONF_ENABLE | I965_PIPECONF_ACTIVE)); 5822 (PIPECONF_ENABLE | I965_PIPECONF_ACTIVE));
5788 5823
5789 WARN_ON(I915_READ(DSPCNTR(plane)) & DISPLAY_PLANE_ENABLE); 5824 WARN_ON(I915_READ(DSPCNTR(plane)) & DISPLAY_PLANE_ENABLE);
@@ -5795,25 +5830,24 @@ static int haswell_crtc_mode_set(struct drm_crtc *crtc,
5795 intel_crtc_update_cursor(crtc, true); 5830 intel_crtc_update_cursor(crtc, true);
5796 5831
5797 /* determine panel color depth */ 5832 /* determine panel color depth */
5798 dither = intel_choose_pipe_bpp_dither(crtc, fb, &intel_crtc->bpp, 5833 dither = intel_crtc->config.dither;
5799 adjusted_mode);
5800 5834
5801 DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe); 5835 DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
5802 drm_mode_debug_printmodeline(mode); 5836 drm_mode_debug_printmodeline(mode);
5803 5837
5804 if (is_dp && !is_cpu_edp) 5838 if (intel_crtc->config.has_dp_encoder)
5805 intel_dp_set_m_n(crtc, mode, adjusted_mode); 5839 intel_dp_set_m_n(intel_crtc);
5806 5840
5807 intel_crtc->lowfreq_avail = false; 5841 intel_crtc->lowfreq_avail = false;
5808 5842
5809 intel_set_pipe_timings(intel_crtc, mode, adjusted_mode); 5843 intel_set_pipe_timings(intel_crtc, mode, adjusted_mode);
5810 5844
5811 if (!is_dp || is_cpu_edp) 5845 if (intel_crtc->config.has_pch_encoder)
5812 ironlake_set_m_n(crtc, mode, adjusted_mode); 5846 ironlake_fdi_set_m_n(crtc);
5813 5847
5814 haswell_set_pipeconf(crtc, adjusted_mode, dither); 5848 haswell_set_pipeconf(crtc, adjusted_mode, dither);
5815 5849
5816 intel_set_pipe_csc(crtc, adjusted_mode); 5850 intel_set_pipe_csc(crtc);
5817 5851
5818 /* Set up the display plane register */ 5852 /* Set up the display plane register */
5819 I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE | DISPPLANE_PIPE_CSC_ENABLE); 5853 I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE | DISPPLANE_PIPE_CSC_ENABLE);
@@ -5828,9 +5862,32 @@ static int haswell_crtc_mode_set(struct drm_crtc *crtc,
5828 return ret; 5862 return ret;
5829} 5863}
5830 5864
5865static bool haswell_get_pipe_config(struct intel_crtc *crtc,
5866 struct intel_crtc_config *pipe_config)
5867{
5868 struct drm_device *dev = crtc->base.dev;
5869 struct drm_i915_private *dev_priv = dev->dev_private;
5870 uint32_t tmp;
5871
5872 tmp = I915_READ(PIPECONF(crtc->config.cpu_transcoder));
5873 if (!(tmp & PIPECONF_ENABLE))
5874 return false;
5875
5876 /*
5877 * aswell has only FDI/PCH transcoder A. It is which is connected to
5878 * DDI E. So just check whether this pipe is wired to DDI E and whether
5879 * the PCH transcoder is on.
5880 */
5881 tmp = I915_READ(TRANS_DDI_FUNC_CTL(crtc->pipe));
5882 if ((tmp & TRANS_DDI_PORT_MASK) == TRANS_DDI_SELECT_PORT(PORT_E) &&
5883 I915_READ(TRANSCONF(PIPE_A)) & TRANS_ENABLE)
5884 pipe_config->has_pch_encoder = true;
5885
5886
5887 return true;
5888}
5889
5831static int intel_crtc_mode_set(struct drm_crtc *crtc, 5890static int intel_crtc_mode_set(struct drm_crtc *crtc,
5832 struct drm_display_mode *mode,
5833 struct drm_display_mode *adjusted_mode,
5834 int x, int y, 5891 int x, int y,
5835 struct drm_framebuffer *fb) 5892 struct drm_framebuffer *fb)
5836{ 5893{
@@ -5839,13 +5896,16 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
5839 struct drm_encoder_helper_funcs *encoder_funcs; 5896 struct drm_encoder_helper_funcs *encoder_funcs;
5840 struct intel_encoder *encoder; 5897 struct intel_encoder *encoder;
5841 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5898 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5899 struct drm_display_mode *adjusted_mode =
5900 &intel_crtc->config.adjusted_mode;
5901 struct drm_display_mode *mode = &intel_crtc->config.requested_mode;
5842 int pipe = intel_crtc->pipe; 5902 int pipe = intel_crtc->pipe;
5843 int ret; 5903 int ret;
5844 5904
5845 drm_vblank_pre_modeset(dev, pipe); 5905 drm_vblank_pre_modeset(dev, pipe);
5846 5906
5847 ret = dev_priv->display.crtc_mode_set(crtc, mode, adjusted_mode, 5907 ret = dev_priv->display.crtc_mode_set(crtc, x, y, fb);
5848 x, y, fb); 5908
5849 drm_vblank_post_modeset(dev, pipe); 5909 drm_vblank_post_modeset(dev, pipe);
5850 5910
5851 if (ret != 0) 5911 if (ret != 0)
@@ -5856,8 +5916,12 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
5856 encoder->base.base.id, 5916 encoder->base.base.id,
5857 drm_get_encoder_name(&encoder->base), 5917 drm_get_encoder_name(&encoder->base),
5858 mode->base.id, mode->name); 5918 mode->base.id, mode->name);
5859 encoder_funcs = encoder->base.helper_private; 5919 if (encoder->mode_set) {
5860 encoder_funcs->mode_set(&encoder->base, mode, adjusted_mode); 5920 encoder->mode_set(encoder);
5921 } else {
5922 encoder_funcs = encoder->base.helper_private;
5923 encoder_funcs->mode_set(&encoder->base, mode, adjusted_mode);
5924 }
5861 } 5925 }
5862 5926
5863 return 0; 5927 return 0;
@@ -6325,13 +6389,24 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
6325 /* we only need to pin inside GTT if cursor is non-phy */ 6389 /* we only need to pin inside GTT if cursor is non-phy */
6326 mutex_lock(&dev->struct_mutex); 6390 mutex_lock(&dev->struct_mutex);
6327 if (!dev_priv->info->cursor_needs_physical) { 6391 if (!dev_priv->info->cursor_needs_physical) {
6392 unsigned alignment;
6393
6328 if (obj->tiling_mode) { 6394 if (obj->tiling_mode) {
6329 DRM_ERROR("cursor cannot be tiled\n"); 6395 DRM_ERROR("cursor cannot be tiled\n");
6330 ret = -EINVAL; 6396 ret = -EINVAL;
6331 goto fail_locked; 6397 goto fail_locked;
6332 } 6398 }
6333 6399
6334 ret = i915_gem_object_pin_to_display_plane(obj, 0, NULL); 6400 /* Note that the w/a also requires 2 PTE of padding following
6401 * the bo. We currently fill all unused PTE with the shadow
6402 * page and so we should always have valid PTE following the
6403 * cursor preventing the VT-d warning.
6404 */
6405 alignment = 0;
6406 if (need_vtd_wa(dev))
6407 alignment = 64*1024;
6408
6409 ret = i915_gem_object_pin_to_display_plane(obj, alignment, NULL);
6335 if (ret) { 6410 if (ret) {
6336 DRM_ERROR("failed to move cursor bo into the GTT\n"); 6411 DRM_ERROR("failed to move cursor bo into the GTT\n");
6337 goto fail_locked; 6412 goto fail_locked;
@@ -6436,20 +6511,6 @@ static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
6436 intel_crtc_load_lut(crtc); 6511 intel_crtc_load_lut(crtc);
6437} 6512}
6438 6513
6439/**
6440 * Get a pipe with a simple mode set on it for doing load-based monitor
6441 * detection.
6442 *
6443 * It will be up to the load-detect code to adjust the pipe as appropriate for
6444 * its requirements. The pipe will be connected to no other encoders.
6445 *
6446 * Currently this code will only succeed if there is a pipe with no encoders
6447 * configured for it. In the future, it could choose to temporarily disable
6448 * some outputs to free up a pipe for its use.
6449 *
6450 * \return crtc, or NULL if no pipes are available.
6451 */
6452
6453/* VESA 640x480x72Hz mode to set on the pipe */ 6514/* VESA 640x480x72Hz mode to set on the pipe */
6454static struct drm_display_mode load_detect_mode = { 6515static struct drm_display_mode load_detect_mode = {
6455 DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664, 6516 DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
@@ -6776,7 +6837,7 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
6776{ 6837{
6777 struct drm_i915_private *dev_priv = dev->dev_private; 6838 struct drm_i915_private *dev_priv = dev->dev_private;
6778 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6839 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6779 enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; 6840 enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
6780 struct drm_display_mode *mode; 6841 struct drm_display_mode *mode;
6781 int htot = I915_READ(HTOTAL(cpu_transcoder)); 6842 int htot = I915_READ(HTOTAL(cpu_transcoder));
6782 int hsync = I915_READ(HSYNC(cpu_transcoder)); 6843 int hsync = I915_READ(HSYNC(cpu_transcoder));
@@ -6954,7 +7015,6 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
6954 drm_i915_private_t *dev_priv = dev->dev_private; 7015 drm_i915_private_t *dev_priv = dev->dev_private;
6955 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 7016 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6956 struct intel_unpin_work *work; 7017 struct intel_unpin_work *work;
6957 struct drm_i915_gem_object *obj;
6958 unsigned long flags; 7018 unsigned long flags;
6959 7019
6960 /* Ignore early vblank irqs */ 7020 /* Ignore early vblank irqs */
@@ -6984,8 +7044,6 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
6984 7044
6985 spin_unlock_irqrestore(&dev->event_lock, flags); 7045 spin_unlock_irqrestore(&dev->event_lock, flags);
6986 7046
6987 obj = work->old_fb_obj;
6988
6989 wake_up_all(&dev_priv->pending_flip_queue); 7047 wake_up_all(&dev_priv->pending_flip_queue);
6990 7048
6991 queue_work(dev_priv->wq, &work->work); 7049 queue_work(dev_priv->wq, &work->work);
@@ -7473,19 +7531,93 @@ static void intel_modeset_commit_output_state(struct drm_device *dev)
7473 } 7531 }
7474} 7532}
7475 7533
7476static struct drm_display_mode * 7534static int
7477intel_modeset_adjusted_mode(struct drm_crtc *crtc, 7535pipe_config_set_bpp(struct drm_crtc *crtc,
7478 struct drm_display_mode *mode) 7536 struct drm_framebuffer *fb,
7537 struct intel_crtc_config *pipe_config)
7538{
7539 struct drm_device *dev = crtc->dev;
7540 struct drm_connector *connector;
7541 int bpp;
7542
7543 switch (fb->pixel_format) {
7544 case DRM_FORMAT_C8:
7545 bpp = 8*3; /* since we go through a colormap */
7546 break;
7547 case DRM_FORMAT_XRGB1555:
7548 case DRM_FORMAT_ARGB1555:
7549 /* checked in intel_framebuffer_init already */
7550 if (WARN_ON(INTEL_INFO(dev)->gen > 3))
7551 return -EINVAL;
7552 case DRM_FORMAT_RGB565:
7553 bpp = 6*3; /* min is 18bpp */
7554 break;
7555 case DRM_FORMAT_XBGR8888:
7556 case DRM_FORMAT_ABGR8888:
7557 /* checked in intel_framebuffer_init already */
7558 if (WARN_ON(INTEL_INFO(dev)->gen < 4))
7559 return -EINVAL;
7560 case DRM_FORMAT_XRGB8888:
7561 case DRM_FORMAT_ARGB8888:
7562 bpp = 8*3;
7563 break;
7564 case DRM_FORMAT_XRGB2101010:
7565 case DRM_FORMAT_ARGB2101010:
7566 case DRM_FORMAT_XBGR2101010:
7567 case DRM_FORMAT_ABGR2101010:
7568 /* checked in intel_framebuffer_init already */
7569 if (WARN_ON(INTEL_INFO(dev)->gen < 4))
7570 return -EINVAL;
7571 bpp = 10*3;
7572 break;
7573 /* TODO: gen4+ supports 16 bpc floating point, too. */
7574 default:
7575 DRM_DEBUG_KMS("unsupported depth\n");
7576 return -EINVAL;
7577 }
7578
7579 pipe_config->pipe_bpp = bpp;
7580
7581 /* Clamp display bpp to EDID value */
7582 list_for_each_entry(connector, &dev->mode_config.connector_list,
7583 head) {
7584 if (connector->encoder && connector->encoder->crtc != crtc)
7585 continue;
7586
7587 /* Don't use an invalid EDID bpc value */
7588 if (connector->display_info.bpc &&
7589 connector->display_info.bpc * 3 < bpp) {
7590 DRM_DEBUG_KMS("clamping display bpp (was %d) to EDID reported max of %d\n",
7591 bpp, connector->display_info.bpc*3);
7592 pipe_config->pipe_bpp = connector->display_info.bpc*3;
7593 }
7594 }
7595
7596 return bpp;
7597}
7598
7599static struct intel_crtc_config *
7600intel_modeset_pipe_config(struct drm_crtc *crtc,
7601 struct drm_framebuffer *fb,
7602 struct drm_display_mode *mode)
7479{ 7603{
7480 struct drm_device *dev = crtc->dev; 7604 struct drm_device *dev = crtc->dev;
7481 struct drm_display_mode *adjusted_mode;
7482 struct drm_encoder_helper_funcs *encoder_funcs; 7605 struct drm_encoder_helper_funcs *encoder_funcs;
7483 struct intel_encoder *encoder; 7606 struct intel_encoder *encoder;
7607 struct intel_crtc_config *pipe_config;
7608 int plane_bpp;
7484 7609
7485 adjusted_mode = drm_mode_duplicate(dev, mode); 7610 pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
7486 if (!adjusted_mode) 7611 if (!pipe_config)
7487 return ERR_PTR(-ENOMEM); 7612 return ERR_PTR(-ENOMEM);
7488 7613
7614 drm_mode_copy(&pipe_config->adjusted_mode, mode);
7615 drm_mode_copy(&pipe_config->requested_mode, mode);
7616
7617 plane_bpp = pipe_config_set_bpp(crtc, fb, pipe_config);
7618 if (plane_bpp < 0)
7619 goto fail;
7620
7489 /* Pass our mode to the connectors and the CRTC to give them a chance to 7621 /* Pass our mode to the connectors and the CRTC to give them a chance to
7490 * adjust it according to limitations or connector properties, and also 7622 * adjust it according to limitations or connector properties, and also
7491 * a chance to reject the mode entirely. 7623 * a chance to reject the mode entirely.
@@ -7495,23 +7627,38 @@ intel_modeset_adjusted_mode(struct drm_crtc *crtc,
7495 7627
7496 if (&encoder->new_crtc->base != crtc) 7628 if (&encoder->new_crtc->base != crtc)
7497 continue; 7629 continue;
7630
7631 if (encoder->compute_config) {
7632 if (!(encoder->compute_config(encoder, pipe_config))) {
7633 DRM_DEBUG_KMS("Encoder config failure\n");
7634 goto fail;
7635 }
7636
7637 continue;
7638 }
7639
7498 encoder_funcs = encoder->base.helper_private; 7640 encoder_funcs = encoder->base.helper_private;
7499 if (!(encoder_funcs->mode_fixup(&encoder->base, mode, 7641 if (!(encoder_funcs->mode_fixup(&encoder->base,
7500 adjusted_mode))) { 7642 &pipe_config->requested_mode,
7643 &pipe_config->adjusted_mode))) {
7501 DRM_DEBUG_KMS("Encoder fixup failed\n"); 7644 DRM_DEBUG_KMS("Encoder fixup failed\n");
7502 goto fail; 7645 goto fail;
7503 } 7646 }
7504 } 7647 }
7505 7648
7506 if (!(intel_crtc_mode_fixup(crtc, mode, adjusted_mode))) { 7649 if (!(intel_crtc_compute_config(crtc, pipe_config))) {
7507 DRM_DEBUG_KMS("CRTC fixup failed\n"); 7650 DRM_DEBUG_KMS("CRTC fixup failed\n");
7508 goto fail; 7651 goto fail;
7509 } 7652 }
7510 DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id); 7653 DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
7511 7654
7512 return adjusted_mode; 7655 pipe_config->dither = pipe_config->pipe_bpp != plane_bpp;
7656 DRM_DEBUG_KMS("plane bpp: %i, pipe bpp: %i, dithering: %i\n",
7657 plane_bpp, pipe_config->pipe_bpp, pipe_config->dither);
7658
7659 return pipe_config;
7513fail: 7660fail:
7514 drm_mode_destroy(dev, adjusted_mode); 7661 kfree(pipe_config);
7515 return ERR_PTR(-EINVAL); 7662 return ERR_PTR(-EINVAL);
7516} 7663}
7517 7664
@@ -7589,22 +7736,25 @@ intel_modeset_affected_pipes(struct drm_crtc *crtc, unsigned *modeset_pipes,
7589 if (crtc->enabled) 7736 if (crtc->enabled)
7590 *prepare_pipes |= 1 << intel_crtc->pipe; 7737 *prepare_pipes |= 1 << intel_crtc->pipe;
7591 7738
7592 /* We only support modeset on one single crtc, hence we need to do that 7739 /*
7593 * only for the passed in crtc iff we change anything else than just 7740 * For simplicity do a full modeset on any pipe where the output routing
7594 * disable crtcs. 7741 * changed. We could be more clever, but that would require us to be
7595 * 7742 * more careful with calling the relevant encoder->mode_set functions.
7596 * This is actually not true, to be fully compatible with the old crtc 7743 */
7597 * helper we automatically disable _any_ output (i.e. doesn't need to be
7598 * connected to the crtc we're modesetting on) if it's disconnected.
7599 * Which is a rather nutty api (since changed the output configuration
7600 * without userspace's explicit request can lead to confusion), but
7601 * alas. Hence we currently need to modeset on all pipes we prepare. */
7602 if (*prepare_pipes) 7744 if (*prepare_pipes)
7603 *modeset_pipes = *prepare_pipes; 7745 *modeset_pipes = *prepare_pipes;
7604 7746
7605 /* ... and mask these out. */ 7747 /* ... and mask these out. */
7606 *modeset_pipes &= ~(*disable_pipes); 7748 *modeset_pipes &= ~(*disable_pipes);
7607 *prepare_pipes &= ~(*disable_pipes); 7749 *prepare_pipes &= ~(*disable_pipes);
7750
7751 /*
7752 * HACK: We don't (yet) fully support global modesets. intel_set_config
7753 * obies this rule, but the modeset restore mode of
7754 * intel_modeset_setup_hw_state does not.
7755 */
7756 *modeset_pipes &= 1 << intel_crtc->pipe;
7757 *prepare_pipes &= 1 << intel_crtc->pipe;
7608} 7758}
7609 7759
7610static bool intel_crtc_in_use(struct drm_crtc *crtc) 7760static bool intel_crtc_in_use(struct drm_crtc *crtc)
@@ -7673,12 +7823,29 @@ intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes)
7673 base.head) \ 7823 base.head) \
7674 if (mask & (1 <<(intel_crtc)->pipe)) \ 7824 if (mask & (1 <<(intel_crtc)->pipe)) \
7675 7825
7826static bool
7827intel_pipe_config_compare(struct intel_crtc_config *current_config,
7828 struct intel_crtc_config *pipe_config)
7829{
7830 if (current_config->has_pch_encoder != pipe_config->has_pch_encoder) {
7831 DRM_ERROR("mismatch in has_pch_encoder "
7832 "(expected %i, found %i)\n",
7833 current_config->has_pch_encoder,
7834 pipe_config->has_pch_encoder);
7835 return false;
7836 }
7837
7838 return true;
7839}
7840
7676void 7841void
7677intel_modeset_check_state(struct drm_device *dev) 7842intel_modeset_check_state(struct drm_device *dev)
7678{ 7843{
7844 drm_i915_private_t *dev_priv = dev->dev_private;
7679 struct intel_crtc *crtc; 7845 struct intel_crtc *crtc;
7680 struct intel_encoder *encoder; 7846 struct intel_encoder *encoder;
7681 struct intel_connector *connector; 7847 struct intel_connector *connector;
7848 struct intel_crtc_config pipe_config;
7682 7849
7683 list_for_each_entry(connector, &dev->mode_config.connector_list, 7850 list_for_each_entry(connector, &dev->mode_config.connector_list,
7684 base.head) { 7851 base.head) {
@@ -7767,17 +7934,27 @@ intel_modeset_check_state(struct drm_device *dev)
7767 "crtc's computed enabled state doesn't match tracked enabled state " 7934 "crtc's computed enabled state doesn't match tracked enabled state "
7768 "(expected %i, found %i)\n", enabled, crtc->base.enabled); 7935 "(expected %i, found %i)\n", enabled, crtc->base.enabled);
7769 7936
7770 assert_pipe(dev->dev_private, crtc->pipe, crtc->active); 7937 memset(&pipe_config, 0, sizeof(pipe_config));
7938 active = dev_priv->display.get_pipe_config(crtc,
7939 &pipe_config);
7940 WARN(crtc->active != active,
7941 "crtc active state doesn't match with hw state "
7942 "(expected %i, found %i)\n", crtc->active, active);
7943
7944 WARN(active &&
7945 !intel_pipe_config_compare(&crtc->config, &pipe_config),
7946 "pipe state doesn't match!\n");
7771 } 7947 }
7772} 7948}
7773 7949
7774int intel_set_mode(struct drm_crtc *crtc, 7950static int __intel_set_mode(struct drm_crtc *crtc,
7775 struct drm_display_mode *mode, 7951 struct drm_display_mode *mode,
7776 int x, int y, struct drm_framebuffer *fb) 7952 int x, int y, struct drm_framebuffer *fb)
7777{ 7953{
7778 struct drm_device *dev = crtc->dev; 7954 struct drm_device *dev = crtc->dev;
7779 drm_i915_private_t *dev_priv = dev->dev_private; 7955 drm_i915_private_t *dev_priv = dev->dev_private;
7780 struct drm_display_mode *adjusted_mode, *saved_mode, *saved_hwmode; 7956 struct drm_display_mode *saved_mode, *saved_hwmode;
7957 struct intel_crtc_config *pipe_config = NULL;
7781 struct intel_crtc *intel_crtc; 7958 struct intel_crtc *intel_crtc;
7782 unsigned disable_pipes, prepare_pipes, modeset_pipes; 7959 unsigned disable_pipes, prepare_pipes, modeset_pipes;
7783 int ret = 0; 7960 int ret = 0;
@@ -7790,12 +7967,6 @@ int intel_set_mode(struct drm_crtc *crtc,
7790 intel_modeset_affected_pipes(crtc, &modeset_pipes, 7967 intel_modeset_affected_pipes(crtc, &modeset_pipes,
7791 &prepare_pipes, &disable_pipes); 7968 &prepare_pipes, &disable_pipes);
7792 7969
7793 DRM_DEBUG_KMS("set mode pipe masks: modeset: %x, prepare: %x, disable: %x\n",
7794 modeset_pipes, prepare_pipes, disable_pipes);
7795
7796 for_each_intel_crtc_masked(dev, disable_pipes, intel_crtc)
7797 intel_crtc_disable(&intel_crtc->base);
7798
7799 *saved_hwmode = crtc->hwmode; 7970 *saved_hwmode = crtc->hwmode;
7800 *saved_mode = crtc->mode; 7971 *saved_mode = crtc->mode;
7801 7972
@@ -7804,15 +7975,22 @@ int intel_set_mode(struct drm_crtc *crtc,
7804 * Hence simply check whether any bit is set in modeset_pipes in all the 7975 * Hence simply check whether any bit is set in modeset_pipes in all the
7805 * pieces of code that are not yet converted to deal with mutliple crtcs 7976 * pieces of code that are not yet converted to deal with mutliple crtcs
7806 * changing their mode at the same time. */ 7977 * changing their mode at the same time. */
7807 adjusted_mode = NULL;
7808 if (modeset_pipes) { 7978 if (modeset_pipes) {
7809 adjusted_mode = intel_modeset_adjusted_mode(crtc, mode); 7979 pipe_config = intel_modeset_pipe_config(crtc, fb, mode);
7810 if (IS_ERR(adjusted_mode)) { 7980 if (IS_ERR(pipe_config)) {
7811 ret = PTR_ERR(adjusted_mode); 7981 ret = PTR_ERR(pipe_config);
7982 pipe_config = NULL;
7983
7812 goto out; 7984 goto out;
7813 } 7985 }
7814 } 7986 }
7815 7987
7988 DRM_DEBUG_KMS("set mode pipe masks: modeset: %x, prepare: %x, disable: %x\n",
7989 modeset_pipes, prepare_pipes, disable_pipes);
7990
7991 for_each_intel_crtc_masked(dev, disable_pipes, intel_crtc)
7992 intel_crtc_disable(&intel_crtc->base);
7993
7816 for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) { 7994 for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) {
7817 if (intel_crtc->base.enabled) 7995 if (intel_crtc->base.enabled)
7818 dev_priv->display.crtc_disable(&intel_crtc->base); 7996 dev_priv->display.crtc_disable(&intel_crtc->base);
@@ -7821,8 +7999,14 @@ int intel_set_mode(struct drm_crtc *crtc,
7821 /* crtc->mode is already used by the ->mode_set callbacks, hence we need 7999 /* crtc->mode is already used by the ->mode_set callbacks, hence we need
7822 * to set it here already despite that we pass it down the callchain. 8000 * to set it here already despite that we pass it down the callchain.
7823 */ 8001 */
7824 if (modeset_pipes) 8002 if (modeset_pipes) {
8003 enum transcoder tmp = to_intel_crtc(crtc)->config.cpu_transcoder;
7825 crtc->mode = *mode; 8004 crtc->mode = *mode;
8005 /* mode_set/enable/disable functions rely on a correct pipe
8006 * config. */
8007 to_intel_crtc(crtc)->config = *pipe_config;
8008 to_intel_crtc(crtc)->config.cpu_transcoder = tmp;
8009 }
7826 8010
7827 /* Only after disabling all output pipelines that will be changed can we 8011 /* Only after disabling all output pipelines that will be changed can we
7828 * update the the output configuration. */ 8012 * update the the output configuration. */
@@ -7836,7 +8020,6 @@ int intel_set_mode(struct drm_crtc *crtc,
7836 */ 8020 */
7837 for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) { 8021 for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) {
7838 ret = intel_crtc_mode_set(&intel_crtc->base, 8022 ret = intel_crtc_mode_set(&intel_crtc->base,
7839 mode, adjusted_mode,
7840 x, y, fb); 8023 x, y, fb);
7841 if (ret) 8024 if (ret)
7842 goto done; 8025 goto done;
@@ -7848,7 +8031,7 @@ int intel_set_mode(struct drm_crtc *crtc,
7848 8031
7849 if (modeset_pipes) { 8032 if (modeset_pipes) {
7850 /* Store real post-adjustment hardware mode. */ 8033 /* Store real post-adjustment hardware mode. */
7851 crtc->hwmode = *adjusted_mode; 8034 crtc->hwmode = pipe_config->adjusted_mode;
7852 8035
7853 /* Calculate and store various constants which 8036 /* Calculate and store various constants which
7854 * are later needed by vblank and swap-completion 8037 * are later needed by vblank and swap-completion
@@ -7859,19 +8042,31 @@ int intel_set_mode(struct drm_crtc *crtc,
7859 8042
7860 /* FIXME: add subpixel order */ 8043 /* FIXME: add subpixel order */
7861done: 8044done:
7862 drm_mode_destroy(dev, adjusted_mode);
7863 if (ret && crtc->enabled) { 8045 if (ret && crtc->enabled) {
7864 crtc->hwmode = *saved_hwmode; 8046 crtc->hwmode = *saved_hwmode;
7865 crtc->mode = *saved_mode; 8047 crtc->mode = *saved_mode;
7866 } else {
7867 intel_modeset_check_state(dev);
7868 } 8048 }
7869 8049
7870out: 8050out:
8051 kfree(pipe_config);
7871 kfree(saved_mode); 8052 kfree(saved_mode);
7872 return ret; 8053 return ret;
7873} 8054}
7874 8055
8056int intel_set_mode(struct drm_crtc *crtc,
8057 struct drm_display_mode *mode,
8058 int x, int y, struct drm_framebuffer *fb)
8059{
8060 int ret;
8061
8062 ret = __intel_set_mode(crtc, mode, x, y, fb);
8063
8064 if (ret == 0)
8065 intel_modeset_check_state(crtc->dev);
8066
8067 return ret;
8068}
8069
7875void intel_crtc_restore_mode(struct drm_crtc *crtc) 8070void intel_crtc_restore_mode(struct drm_crtc *crtc)
7876{ 8071{
7877 intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->fb); 8072 intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->fb);
@@ -7959,10 +8154,8 @@ intel_set_config_compute_mode_changes(struct drm_mode_set *set,
7959 config->mode_changed = true; 8154 config->mode_changed = true;
7960 } else if (set->fb == NULL) { 8155 } else if (set->fb == NULL) {
7961 config->mode_changed = true; 8156 config->mode_changed = true;
7962 } else if (set->fb->depth != set->crtc->fb->depth) { 8157 } else if (set->fb->pixel_format !=
7963 config->mode_changed = true; 8158 set->crtc->fb->pixel_format) {
7964 } else if (set->fb->bits_per_pixel !=
7965 set->crtc->fb->bits_per_pixel) {
7966 config->mode_changed = true; 8159 config->mode_changed = true;
7967 } else 8160 } else
7968 config->fb_changed = true; 8161 config->fb_changed = true;
@@ -8145,6 +8338,8 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
8145 goto fail; 8338 goto fail;
8146 } 8339 }
8147 } else if (config->fb_changed) { 8340 } else if (config->fb_changed) {
8341 intel_crtc_wait_for_pending_flips(set->crtc);
8342
8148 ret = intel_pipe_set_base(set->crtc, 8343 ret = intel_pipe_set_base(set->crtc,
8149 set->x, set->y, set->fb); 8344 set->x, set->y, set->fb);
8150 } 8345 }
@@ -8221,7 +8416,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
8221 /* Swap pipes & planes for FBC on pre-965 */ 8416 /* Swap pipes & planes for FBC on pre-965 */
8222 intel_crtc->pipe = pipe; 8417 intel_crtc->pipe = pipe;
8223 intel_crtc->plane = pipe; 8418 intel_crtc->plane = pipe;
8224 intel_crtc->cpu_transcoder = pipe; 8419 intel_crtc->config.cpu_transcoder = pipe;
8225 if (IS_MOBILE(dev) && IS_GEN3(dev)) { 8420 if (IS_MOBILE(dev) && IS_GEN3(dev)) {
8226 DRM_DEBUG_KMS("swapping pipes & planes for FBC\n"); 8421 DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
8227 intel_crtc->plane = !pipe; 8422 intel_crtc->plane = !pipe;
@@ -8232,8 +8427,6 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
8232 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base; 8427 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
8233 dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base; 8428 dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
8234 8429
8235 intel_crtc->bpp = 24; /* default for pre-Ironlake */
8236
8237 drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs); 8430 drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
8238} 8431}
8239 8432
@@ -8314,7 +8507,7 @@ static void intel_setup_outputs(struct drm_device *dev)
8314 I915_WRITE(PFIT_CONTROL, 0); 8507 I915_WRITE(PFIT_CONTROL, 0);
8315 } 8508 }
8316 8509
8317 if (!(HAS_DDI(dev) && (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES))) 8510 if (!IS_ULT(dev))
8318 intel_crt_init(dev); 8511 intel_crt_init(dev);
8319 8512
8320 if (HAS_DDI(dev)) { 8513 if (HAS_DDI(dev)) {
@@ -8343,20 +8536,20 @@ static void intel_setup_outputs(struct drm_device *dev)
8343 if (has_edp_a(dev)) 8536 if (has_edp_a(dev))
8344 intel_dp_init(dev, DP_A, PORT_A); 8537 intel_dp_init(dev, DP_A, PORT_A);
8345 8538
8346 if (I915_READ(HDMIB) & PORT_DETECTED) { 8539 if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
8347 /* PCH SDVOB multiplex with HDMIB */ 8540 /* PCH SDVOB multiplex with HDMIB */
8348 found = intel_sdvo_init(dev, PCH_SDVOB, true); 8541 found = intel_sdvo_init(dev, PCH_SDVOB, true);
8349 if (!found) 8542 if (!found)
8350 intel_hdmi_init(dev, HDMIB, PORT_B); 8543 intel_hdmi_init(dev, PCH_HDMIB, PORT_B);
8351 if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED)) 8544 if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
8352 intel_dp_init(dev, PCH_DP_B, PORT_B); 8545 intel_dp_init(dev, PCH_DP_B, PORT_B);
8353 } 8546 }
8354 8547
8355 if (I915_READ(HDMIC) & PORT_DETECTED) 8548 if (I915_READ(PCH_HDMIC) & SDVO_DETECTED)
8356 intel_hdmi_init(dev, HDMIC, PORT_C); 8549 intel_hdmi_init(dev, PCH_HDMIC, PORT_C);
8357 8550
8358 if (!dpd_is_edp && I915_READ(HDMID) & PORT_DETECTED) 8551 if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED)
8359 intel_hdmi_init(dev, HDMID, PORT_D); 8552 intel_hdmi_init(dev, PCH_HDMID, PORT_D);
8360 8553
8361 if (I915_READ(PCH_DP_C) & DP_DETECTED) 8554 if (I915_READ(PCH_DP_C) & DP_DETECTED)
8362 intel_dp_init(dev, PCH_DP_C, PORT_C); 8555 intel_dp_init(dev, PCH_DP_C, PORT_C);
@@ -8368,24 +8561,21 @@ static void intel_setup_outputs(struct drm_device *dev)
8368 if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED) 8561 if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED)
8369 intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, PORT_C); 8562 intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, PORT_C);
8370 8563
8371 if (I915_READ(VLV_DISPLAY_BASE + SDVOB) & PORT_DETECTED) { 8564 if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIB) & SDVO_DETECTED) {
8372 intel_hdmi_init(dev, VLV_DISPLAY_BASE + SDVOB, PORT_B); 8565 intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIB,
8566 PORT_B);
8373 if (I915_READ(VLV_DISPLAY_BASE + DP_B) & DP_DETECTED) 8567 if (I915_READ(VLV_DISPLAY_BASE + DP_B) & DP_DETECTED)
8374 intel_dp_init(dev, VLV_DISPLAY_BASE + DP_B, PORT_B); 8568 intel_dp_init(dev, VLV_DISPLAY_BASE + DP_B, PORT_B);
8375 } 8569 }
8376
8377 if (I915_READ(VLV_DISPLAY_BASE + SDVOC) & PORT_DETECTED)
8378 intel_hdmi_init(dev, VLV_DISPLAY_BASE + SDVOC, PORT_C);
8379
8380 } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) { 8570 } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
8381 bool found = false; 8571 bool found = false;
8382 8572
8383 if (I915_READ(SDVOB) & SDVO_DETECTED) { 8573 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
8384 DRM_DEBUG_KMS("probing SDVOB\n"); 8574 DRM_DEBUG_KMS("probing SDVOB\n");
8385 found = intel_sdvo_init(dev, SDVOB, true); 8575 found = intel_sdvo_init(dev, GEN3_SDVOB, true);
8386 if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) { 8576 if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
8387 DRM_DEBUG_KMS("probing HDMI on SDVOB\n"); 8577 DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
8388 intel_hdmi_init(dev, SDVOB, PORT_B); 8578 intel_hdmi_init(dev, GEN4_HDMIB, PORT_B);
8389 } 8579 }
8390 8580
8391 if (!found && SUPPORTS_INTEGRATED_DP(dev)) { 8581 if (!found && SUPPORTS_INTEGRATED_DP(dev)) {
@@ -8396,16 +8586,16 @@ static void intel_setup_outputs(struct drm_device *dev)
8396 8586
8397 /* Before G4X SDVOC doesn't have its own detect register */ 8587 /* Before G4X SDVOC doesn't have its own detect register */
8398 8588
8399 if (I915_READ(SDVOB) & SDVO_DETECTED) { 8589 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
8400 DRM_DEBUG_KMS("probing SDVOC\n"); 8590 DRM_DEBUG_KMS("probing SDVOC\n");
8401 found = intel_sdvo_init(dev, SDVOC, false); 8591 found = intel_sdvo_init(dev, GEN3_SDVOC, false);
8402 } 8592 }
8403 8593
8404 if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) { 8594 if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
8405 8595
8406 if (SUPPORTS_INTEGRATED_HDMI(dev)) { 8596 if (SUPPORTS_INTEGRATED_HDMI(dev)) {
8407 DRM_DEBUG_KMS("probing HDMI on SDVOC\n"); 8597 DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
8408 intel_hdmi_init(dev, SDVOC, PORT_C); 8598 intel_hdmi_init(dev, GEN4_HDMIC, PORT_C);
8409 } 8599 }
8410 if (SUPPORTS_INTEGRATED_DP(dev)) { 8600 if (SUPPORTS_INTEGRATED_DP(dev)) {
8411 DRM_DEBUG_KMS("probing DP_C\n"); 8601 DRM_DEBUG_KMS("probing DP_C\n");
@@ -8572,20 +8762,22 @@ static void intel_init_display(struct drm_device *dev)
8572{ 8762{
8573 struct drm_i915_private *dev_priv = dev->dev_private; 8763 struct drm_i915_private *dev_priv = dev->dev_private;
8574 8764
8575 /* We always want a DPMS function */
8576 if (HAS_DDI(dev)) { 8765 if (HAS_DDI(dev)) {
8766 dev_priv->display.get_pipe_config = haswell_get_pipe_config;
8577 dev_priv->display.crtc_mode_set = haswell_crtc_mode_set; 8767 dev_priv->display.crtc_mode_set = haswell_crtc_mode_set;
8578 dev_priv->display.crtc_enable = haswell_crtc_enable; 8768 dev_priv->display.crtc_enable = haswell_crtc_enable;
8579 dev_priv->display.crtc_disable = haswell_crtc_disable; 8769 dev_priv->display.crtc_disable = haswell_crtc_disable;
8580 dev_priv->display.off = haswell_crtc_off; 8770 dev_priv->display.off = haswell_crtc_off;
8581 dev_priv->display.update_plane = ironlake_update_plane; 8771 dev_priv->display.update_plane = ironlake_update_plane;
8582 } else if (HAS_PCH_SPLIT(dev)) { 8772 } else if (HAS_PCH_SPLIT(dev)) {
8773 dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
8583 dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set; 8774 dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
8584 dev_priv->display.crtc_enable = ironlake_crtc_enable; 8775 dev_priv->display.crtc_enable = ironlake_crtc_enable;
8585 dev_priv->display.crtc_disable = ironlake_crtc_disable; 8776 dev_priv->display.crtc_disable = ironlake_crtc_disable;
8586 dev_priv->display.off = ironlake_crtc_off; 8777 dev_priv->display.off = ironlake_crtc_off;
8587 dev_priv->display.update_plane = ironlake_update_plane; 8778 dev_priv->display.update_plane = ironlake_update_plane;
8588 } else { 8779 } else {
8780 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
8589 dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set; 8781 dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
8590 dev_priv->display.crtc_enable = i9xx_crtc_enable; 8782 dev_priv->display.crtc_enable = i9xx_crtc_enable;
8591 dev_priv->display.crtc_disable = i9xx_crtc_disable; 8783 dev_priv->display.crtc_disable = i9xx_crtc_disable;
@@ -8828,7 +9020,7 @@ void intel_modeset_init_hw(struct drm_device *dev)
8828void intel_modeset_init(struct drm_device *dev) 9020void intel_modeset_init(struct drm_device *dev)
8829{ 9021{
8830 struct drm_i915_private *dev_priv = dev->dev_private; 9022 struct drm_i915_private *dev_priv = dev->dev_private;
8831 int i, ret; 9023 int i, j, ret;
8832 9024
8833 drm_mode_config_init(dev); 9025 drm_mode_config_init(dev);
8834 9026
@@ -8844,6 +9036,9 @@ void intel_modeset_init(struct drm_device *dev)
8844 9036
8845 intel_init_pm(dev); 9037 intel_init_pm(dev);
8846 9038
9039 if (INTEL_INFO(dev)->num_pipes == 0)
9040 return;
9041
8847 intel_init_display(dev); 9042 intel_init_display(dev);
8848 9043
8849 if (IS_GEN2(dev)) { 9044 if (IS_GEN2(dev)) {
@@ -8859,13 +9054,17 @@ void intel_modeset_init(struct drm_device *dev)
8859 dev->mode_config.fb_base = dev_priv->gtt.mappable_base; 9054 dev->mode_config.fb_base = dev_priv->gtt.mappable_base;
8860 9055
8861 DRM_DEBUG_KMS("%d display pipe%s available.\n", 9056 DRM_DEBUG_KMS("%d display pipe%s available.\n",
8862 dev_priv->num_pipe, dev_priv->num_pipe > 1 ? "s" : ""); 9057 INTEL_INFO(dev)->num_pipes,
9058 INTEL_INFO(dev)->num_pipes > 1 ? "s" : "");
8863 9059
8864 for (i = 0; i < dev_priv->num_pipe; i++) { 9060 for (i = 0; i < INTEL_INFO(dev)->num_pipes; i++) {
8865 intel_crtc_init(dev, i); 9061 intel_crtc_init(dev, i);
8866 ret = intel_plane_init(dev, i); 9062 for (j = 0; j < dev_priv->num_plane; j++) {
8867 if (ret) 9063 ret = intel_plane_init(dev, i, j);
8868 DRM_DEBUG_KMS("plane %d init failed: %d\n", i, ret); 9064 if (ret)
9065 DRM_DEBUG_KMS("pipe %d plane %d init failed: %d\n",
9066 i, j, ret);
9067 }
8869 } 9068 }
8870 9069
8871 intel_cpu_pll_init(dev); 9070 intel_cpu_pll_init(dev);
@@ -8918,10 +9117,11 @@ static void intel_enable_pipe_a(struct drm_device *dev)
8918static bool 9117static bool
8919intel_check_plane_mapping(struct intel_crtc *crtc) 9118intel_check_plane_mapping(struct intel_crtc *crtc)
8920{ 9119{
8921 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 9120 struct drm_device *dev = crtc->base.dev;
9121 struct drm_i915_private *dev_priv = dev->dev_private;
8922 u32 reg, val; 9122 u32 reg, val;
8923 9123
8924 if (dev_priv->num_pipe == 1) 9124 if (INTEL_INFO(dev)->num_pipes == 1)
8925 return true; 9125 return true;
8926 9126
8927 reg = DSPCNTR(!crtc->plane); 9127 reg = DSPCNTR(!crtc->plane);
@@ -8941,7 +9141,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
8941 u32 reg; 9141 u32 reg;
8942 9142
8943 /* Clear any frame start delays used for debugging left by the BIOS */ 9143 /* Clear any frame start delays used for debugging left by the BIOS */
8944 reg = PIPECONF(crtc->cpu_transcoder); 9144 reg = PIPECONF(crtc->config.cpu_transcoder);
8945 I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK); 9145 I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
8946 9146
8947 /* We need to sanitize the plane -> pipe mapping first because this will 9147 /* We need to sanitize the plane -> pipe mapping first because this will
@@ -9077,6 +9277,7 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
9077 struct drm_i915_private *dev_priv = dev->dev_private; 9277 struct drm_i915_private *dev_priv = dev->dev_private;
9078 enum pipe pipe; 9278 enum pipe pipe;
9079 u32 tmp; 9279 u32 tmp;
9280 struct drm_plane *plane;
9080 struct intel_crtc *crtc; 9281 struct intel_crtc *crtc;
9081 struct intel_encoder *encoder; 9282 struct intel_encoder *encoder;
9082 struct intel_connector *connector; 9283 struct intel_connector *connector;
@@ -9096,24 +9297,32 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
9096 case TRANS_DDI_EDP_INPUT_C_ONOFF: 9297 case TRANS_DDI_EDP_INPUT_C_ONOFF:
9097 pipe = PIPE_C; 9298 pipe = PIPE_C;
9098 break; 9299 break;
9300 default:
9301 /* A bogus value has been programmed, disable
9302 * the transcoder */
9303 WARN(1, "Bogus eDP source %08x\n", tmp);
9304 intel_ddi_disable_transcoder_func(dev_priv,
9305 TRANSCODER_EDP);
9306 goto setup_pipes;
9099 } 9307 }
9100 9308
9101 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 9309 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
9102 crtc->cpu_transcoder = TRANSCODER_EDP; 9310 crtc->config.cpu_transcoder = TRANSCODER_EDP;
9103 9311
9104 DRM_DEBUG_KMS("Pipe %c using transcoder EDP\n", 9312 DRM_DEBUG_KMS("Pipe %c using transcoder EDP\n",
9105 pipe_name(pipe)); 9313 pipe_name(pipe));
9106 } 9314 }
9107 } 9315 }
9108 9316
9109 for_each_pipe(pipe) { 9317setup_pipes:
9110 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 9318 list_for_each_entry(crtc, &dev->mode_config.crtc_list,
9319 base.head) {
9320 enum transcoder tmp = crtc->config.cpu_transcoder;
9321 memset(&crtc->config, 0, sizeof(crtc->config));
9322 crtc->config.cpu_transcoder = tmp;
9111 9323
9112 tmp = I915_READ(PIPECONF(crtc->cpu_transcoder)); 9324 crtc->active = dev_priv->display.get_pipe_config(crtc,
9113 if (tmp & PIPECONF_ENABLE) 9325 &crtc->config);
9114 crtc->active = true;
9115 else
9116 crtc->active = false;
9117 9326
9118 crtc->base.enabled = crtc->active; 9327 crtc->base.enabled = crtc->active;
9119 9328
@@ -9172,9 +9381,19 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
9172 } 9381 }
9173 9382
9174 if (force_restore) { 9383 if (force_restore) {
9384 /*
9385 * We need to use raw interfaces for restoring state to avoid
9386 * checking (bogus) intermediate states.
9387 */
9175 for_each_pipe(pipe) { 9388 for_each_pipe(pipe) {
9176 intel_crtc_restore_mode(dev_priv->pipe_to_crtc_mapping[pipe]); 9389 struct drm_crtc *crtc =
9390 dev_priv->pipe_to_crtc_mapping[pipe];
9391
9392 __intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y,
9393 crtc->fb);
9177 } 9394 }
9395 list_for_each_entry(plane, &dev->mode_config.plane_list, head)
9396 intel_plane_restore(plane);
9178 9397
9179 i915_redisable_vga(dev); 9398 i915_redisable_vga(dev);
9180 } else { 9399 } else {
@@ -9236,6 +9455,9 @@ void intel_modeset_cleanup(struct drm_device *dev)
9236 /* flush any delayed tasks or pending work */ 9455 /* flush any delayed tasks or pending work */
9237 flush_scheduled_work(); 9456 flush_scheduled_work();
9238 9457
9458 /* destroy backlight, if any, before the connectors */
9459 intel_panel_destroy_backlight(dev);
9460
9239 drm_mode_config_cleanup(dev); 9461 drm_mode_config_cleanup(dev);
9240 9462
9241 intel_cleanup_overlay(dev); 9463 intel_cleanup_overlay(dev);
@@ -9323,15 +9545,24 @@ intel_display_capture_error_state(struct drm_device *dev)
9323 for_each_pipe(i) { 9545 for_each_pipe(i) {
9324 cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, i); 9546 cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, i);
9325 9547
9326 error->cursor[i].control = I915_READ(CURCNTR(i)); 9548 if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev)) {
9327 error->cursor[i].position = I915_READ(CURPOS(i)); 9549 error->cursor[i].control = I915_READ(CURCNTR(i));
9328 error->cursor[i].base = I915_READ(CURBASE(i)); 9550 error->cursor[i].position = I915_READ(CURPOS(i));
9551 error->cursor[i].base = I915_READ(CURBASE(i));
9552 } else {
9553 error->cursor[i].control = I915_READ(CURCNTR_IVB(i));
9554 error->cursor[i].position = I915_READ(CURPOS_IVB(i));
9555 error->cursor[i].base = I915_READ(CURBASE_IVB(i));
9556 }
9329 9557
9330 error->plane[i].control = I915_READ(DSPCNTR(i)); 9558 error->plane[i].control = I915_READ(DSPCNTR(i));
9331 error->plane[i].stride = I915_READ(DSPSTRIDE(i)); 9559 error->plane[i].stride = I915_READ(DSPSTRIDE(i));
9332 error->plane[i].size = I915_READ(DSPSIZE(i)); 9560 if (INTEL_INFO(dev)->gen <= 3) {
9333 error->plane[i].pos = I915_READ(DSPPOS(i)); 9561 error->plane[i].size = I915_READ(DSPSIZE(i));
9334 error->plane[i].addr = I915_READ(DSPADDR(i)); 9562 error->plane[i].pos = I915_READ(DSPPOS(i));
9563 }
9564 if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
9565 error->plane[i].addr = I915_READ(DSPADDR(i));
9335 if (INTEL_INFO(dev)->gen >= 4) { 9566 if (INTEL_INFO(dev)->gen >= 4) {
9336 error->plane[i].surface = I915_READ(DSPSURF(i)); 9567 error->plane[i].surface = I915_READ(DSPSURF(i));
9337 error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i)); 9568 error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
@@ -9355,10 +9586,9 @@ intel_display_print_error_state(struct seq_file *m,
9355 struct drm_device *dev, 9586 struct drm_device *dev,
9356 struct intel_display_error_state *error) 9587 struct intel_display_error_state *error)
9357{ 9588{
9358 drm_i915_private_t *dev_priv = dev->dev_private;
9359 int i; 9589 int i;
9360 9590
9361 seq_printf(m, "Num Pipes: %d\n", dev_priv->num_pipe); 9591 seq_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes);
9362 for_each_pipe(i) { 9592 for_each_pipe(i) {
9363 seq_printf(m, "Pipe [%d]:\n", i); 9593 seq_printf(m, "Pipe [%d]:\n", i);
9364 seq_printf(m, " CONF: %08x\n", error->pipe[i].conf); 9594 seq_printf(m, " CONF: %08x\n", error->pipe[i].conf);
@@ -9373,9 +9603,12 @@ intel_display_print_error_state(struct seq_file *m,
9373 seq_printf(m, "Plane [%d]:\n", i); 9603 seq_printf(m, "Plane [%d]:\n", i);
9374 seq_printf(m, " CNTR: %08x\n", error->plane[i].control); 9604 seq_printf(m, " CNTR: %08x\n", error->plane[i].control);
9375 seq_printf(m, " STRIDE: %08x\n", error->plane[i].stride); 9605 seq_printf(m, " STRIDE: %08x\n", error->plane[i].stride);
9376 seq_printf(m, " SIZE: %08x\n", error->plane[i].size); 9606 if (INTEL_INFO(dev)->gen <= 3) {
9377 seq_printf(m, " POS: %08x\n", error->plane[i].pos); 9607 seq_printf(m, " SIZE: %08x\n", error->plane[i].size);
9378 seq_printf(m, " ADDR: %08x\n", error->plane[i].addr); 9608 seq_printf(m, " POS: %08x\n", error->plane[i].pos);
9609 }
9610 if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
9611 seq_printf(m, " ADDR: %08x\n", error->plane[i].addr);
9379 if (INTEL_INFO(dev)->gen >= 4) { 9612 if (INTEL_INFO(dev)->gen >= 4) {
9380 seq_printf(m, " SURF: %08x\n", error->plane[i].surface); 9613 seq_printf(m, " SURF: %08x\n", error->plane[i].surface);
9381 seq_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset); 9614 seq_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index c3f5bd8a5077..fb2fbc1e08b9 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -109,29 +109,6 @@ bool intel_encoder_is_pch_edp(struct drm_encoder *encoder)
109 109
110static void intel_dp_link_down(struct intel_dp *intel_dp); 110static void intel_dp_link_down(struct intel_dp *intel_dp);
111 111
112void
113intel_edp_link_config(struct intel_encoder *intel_encoder,
114 int *lane_num, int *link_bw)
115{
116 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
117
118 *lane_num = intel_dp->lane_count;
119 *link_bw = drm_dp_bw_code_to_link_rate(intel_dp->link_bw);
120}
121
122int
123intel_edp_target_clock(struct intel_encoder *intel_encoder,
124 struct drm_display_mode *mode)
125{
126 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
127 struct intel_connector *intel_connector = intel_dp->attached_connector;
128
129 if (intel_connector->panel.fixed_mode)
130 return intel_connector->panel.fixed_mode->clock;
131 else
132 return mode->clock;
133}
134
135static int 112static int
136intel_dp_max_link_bw(struct intel_dp *intel_dp) 113intel_dp_max_link_bw(struct intel_dp *intel_dp)
137{ 114{
@@ -177,34 +154,6 @@ intel_dp_max_data_rate(int max_link_clock, int max_lanes)
177 return (max_link_clock * max_lanes * 8) / 10; 154 return (max_link_clock * max_lanes * 8) / 10;
178} 155}
179 156
180static bool
181intel_dp_adjust_dithering(struct intel_dp *intel_dp,
182 struct drm_display_mode *mode,
183 bool adjust_mode)
184{
185 int max_link_clock =
186 drm_dp_bw_code_to_link_rate(intel_dp_max_link_bw(intel_dp));
187 int max_lanes = drm_dp_max_lane_count(intel_dp->dpcd);
188 int max_rate, mode_rate;
189
190 mode_rate = intel_dp_link_required(mode->clock, 24);
191 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
192
193 if (mode_rate > max_rate) {
194 mode_rate = intel_dp_link_required(mode->clock, 18);
195 if (mode_rate > max_rate)
196 return false;
197
198 if (adjust_mode)
199 mode->private_flags
200 |= INTEL_MODE_DP_FORCE_6BPC;
201
202 return true;
203 }
204
205 return true;
206}
207
208static int 157static int
209intel_dp_mode_valid(struct drm_connector *connector, 158intel_dp_mode_valid(struct drm_connector *connector,
210 struct drm_display_mode *mode) 159 struct drm_display_mode *mode)
@@ -212,6 +161,8 @@ intel_dp_mode_valid(struct drm_connector *connector,
212 struct intel_dp *intel_dp = intel_attached_dp(connector); 161 struct intel_dp *intel_dp = intel_attached_dp(connector);
213 struct intel_connector *intel_connector = to_intel_connector(connector); 162 struct intel_connector *intel_connector = to_intel_connector(connector);
214 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; 163 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
164 int target_clock = mode->clock;
165 int max_rate, mode_rate, max_lanes, max_link_clock;
215 166
216 if (is_edp(intel_dp) && fixed_mode) { 167 if (is_edp(intel_dp) && fixed_mode) {
217 if (mode->hdisplay > fixed_mode->hdisplay) 168 if (mode->hdisplay > fixed_mode->hdisplay)
@@ -219,9 +170,17 @@ intel_dp_mode_valid(struct drm_connector *connector,
219 170
220 if (mode->vdisplay > fixed_mode->vdisplay) 171 if (mode->vdisplay > fixed_mode->vdisplay)
221 return MODE_PANEL; 172 return MODE_PANEL;
173
174 target_clock = fixed_mode->clock;
222 } 175 }
223 176
224 if (!intel_dp_adjust_dithering(intel_dp, mode, false)) 177 max_link_clock = drm_dp_bw_code_to_link_rate(intel_dp_max_link_bw(intel_dp));
178 max_lanes = drm_dp_max_lane_count(intel_dp->dpcd);
179
180 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
181 mode_rate = intel_dp_link_required(target_clock, 18);
182
183 if (mode_rate > max_rate)
225 return MODE_CLOCK_HIGH; 184 return MODE_CLOCK_HIGH;
226 185
227 if (mode->clock < 10000) 186 if (mode->clock < 10000)
@@ -294,16 +253,20 @@ static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp)
294{ 253{
295 struct drm_device *dev = intel_dp_to_dev(intel_dp); 254 struct drm_device *dev = intel_dp_to_dev(intel_dp);
296 struct drm_i915_private *dev_priv = dev->dev_private; 255 struct drm_i915_private *dev_priv = dev->dev_private;
256 u32 pp_stat_reg;
297 257
298 return (I915_READ(PCH_PP_STATUS) & PP_ON) != 0; 258 pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS;
259 return (I915_READ(pp_stat_reg) & PP_ON) != 0;
299} 260}
300 261
301static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp) 262static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp)
302{ 263{
303 struct drm_device *dev = intel_dp_to_dev(intel_dp); 264 struct drm_device *dev = intel_dp_to_dev(intel_dp);
304 struct drm_i915_private *dev_priv = dev->dev_private; 265 struct drm_i915_private *dev_priv = dev->dev_private;
266 u32 pp_ctrl_reg;
305 267
306 return (I915_READ(PCH_PP_CONTROL) & EDP_FORCE_VDD) != 0; 268 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
269 return (I915_READ(pp_ctrl_reg) & EDP_FORCE_VDD) != 0;
307} 270}
308 271
309static void 272static void
@@ -311,14 +274,19 @@ intel_dp_check_edp(struct intel_dp *intel_dp)
311{ 274{
312 struct drm_device *dev = intel_dp_to_dev(intel_dp); 275 struct drm_device *dev = intel_dp_to_dev(intel_dp);
313 struct drm_i915_private *dev_priv = dev->dev_private; 276 struct drm_i915_private *dev_priv = dev->dev_private;
277 u32 pp_stat_reg, pp_ctrl_reg;
314 278
315 if (!is_edp(intel_dp)) 279 if (!is_edp(intel_dp))
316 return; 280 return;
281
282 pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS;
283 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
284
317 if (!ironlake_edp_have_panel_power(intel_dp) && !ironlake_edp_have_panel_vdd(intel_dp)) { 285 if (!ironlake_edp_have_panel_power(intel_dp) && !ironlake_edp_have_panel_vdd(intel_dp)) {
318 WARN(1, "eDP powered off while attempting aux channel communication.\n"); 286 WARN(1, "eDP powered off while attempting aux channel communication.\n");
319 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n", 287 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
320 I915_READ(PCH_PP_STATUS), 288 I915_READ(pp_stat_reg),
321 I915_READ(PCH_PP_CONTROL)); 289 I915_READ(pp_ctrl_reg));
322 } 290 }
323} 291}
324 292
@@ -328,29 +296,10 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
328 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 296 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
329 struct drm_device *dev = intel_dig_port->base.base.dev; 297 struct drm_device *dev = intel_dig_port->base.base.dev;
330 struct drm_i915_private *dev_priv = dev->dev_private; 298 struct drm_i915_private *dev_priv = dev->dev_private;
331 uint32_t ch_ctl = intel_dp->output_reg + 0x10; 299 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
332 uint32_t status; 300 uint32_t status;
333 bool done; 301 bool done;
334 302
335 if (IS_HASWELL(dev)) {
336 switch (intel_dig_port->port) {
337 case PORT_A:
338 ch_ctl = DPA_AUX_CH_CTL;
339 break;
340 case PORT_B:
341 ch_ctl = PCH_DPB_AUX_CH_CTL;
342 break;
343 case PORT_C:
344 ch_ctl = PCH_DPC_AUX_CH_CTL;
345 break;
346 case PORT_D:
347 ch_ctl = PCH_DPD_AUX_CH_CTL;
348 break;
349 default:
350 BUG();
351 }
352 }
353
354#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0) 303#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
355 if (has_aux_irq) 304 if (has_aux_irq)
356 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C, 305 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
@@ -370,11 +319,10 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
370 uint8_t *send, int send_bytes, 319 uint8_t *send, int send_bytes,
371 uint8_t *recv, int recv_size) 320 uint8_t *recv, int recv_size)
372{ 321{
373 uint32_t output_reg = intel_dp->output_reg;
374 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 322 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
375 struct drm_device *dev = intel_dig_port->base.base.dev; 323 struct drm_device *dev = intel_dig_port->base.base.dev;
376 struct drm_i915_private *dev_priv = dev->dev_private; 324 struct drm_i915_private *dev_priv = dev->dev_private;
377 uint32_t ch_ctl = output_reg + 0x10; 325 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
378 uint32_t ch_data = ch_ctl + 4; 326 uint32_t ch_data = ch_ctl + 4;
379 int i, ret, recv_bytes; 327 int i, ret, recv_bytes;
380 uint32_t status; 328 uint32_t status;
@@ -388,29 +336,6 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
388 */ 336 */
389 pm_qos_update_request(&dev_priv->pm_qos, 0); 337 pm_qos_update_request(&dev_priv->pm_qos, 0);
390 338
391 if (IS_HASWELL(dev)) {
392 switch (intel_dig_port->port) {
393 case PORT_A:
394 ch_ctl = DPA_AUX_CH_CTL;
395 ch_data = DPA_AUX_CH_DATA1;
396 break;
397 case PORT_B:
398 ch_ctl = PCH_DPB_AUX_CH_CTL;
399 ch_data = PCH_DPB_AUX_CH_DATA1;
400 break;
401 case PORT_C:
402 ch_ctl = PCH_DPC_AUX_CH_CTL;
403 ch_data = PCH_DPC_AUX_CH_DATA1;
404 break;
405 case PORT_D:
406 ch_ctl = PCH_DPD_AUX_CH_CTL;
407 ch_data = PCH_DPD_AUX_CH_DATA1;
408 break;
409 default:
410 BUG();
411 }
412 }
413
414 intel_dp_check_edp(intel_dp); 339 intel_dp_check_edp(intel_dp);
415 /* The clock divider is based off the hrawclk, 340 /* The clock divider is based off the hrawclk,
416 * and would like to run at 2MHz. So, take the 341 * and would like to run at 2MHz. So, take the
@@ -428,10 +353,14 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
428 aux_clock_divider = 200; /* SNB & IVB eDP input clock at 400Mhz */ 353 aux_clock_divider = 200; /* SNB & IVB eDP input clock at 400Mhz */
429 else 354 else
430 aux_clock_divider = 225; /* eDP input clock at 450Mhz */ 355 aux_clock_divider = 225; /* eDP input clock at 450Mhz */
431 } else if (HAS_PCH_SPLIT(dev)) 356 } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
357 /* Workaround for non-ULT HSW */
358 aux_clock_divider = 74;
359 } else if (HAS_PCH_SPLIT(dev)) {
432 aux_clock_divider = DIV_ROUND_UP(intel_pch_rawclk(dev), 2); 360 aux_clock_divider = DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
433 else 361 } else {
434 aux_clock_divider = intel_hrawclk(dev) / 2; 362 aux_clock_divider = intel_hrawclk(dev) / 2;
363 }
435 364
436 if (IS_GEN6(dev)) 365 if (IS_GEN6(dev))
437 precharge = 3; 366 precharge = 3;
@@ -732,18 +661,26 @@ intel_dp_i2c_init(struct intel_dp *intel_dp,
732} 661}
733 662
734bool 663bool
735intel_dp_mode_fixup(struct drm_encoder *encoder, 664intel_dp_compute_config(struct intel_encoder *encoder,
736 const struct drm_display_mode *mode, 665 struct intel_crtc_config *pipe_config)
737 struct drm_display_mode *adjusted_mode)
738{ 666{
739 struct drm_device *dev = encoder->dev; 667 struct drm_device *dev = encoder->base.dev;
740 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 668 struct drm_i915_private *dev_priv = dev->dev_private;
669 struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
670 struct drm_display_mode *mode = &pipe_config->requested_mode;
671 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
741 struct intel_connector *intel_connector = intel_dp->attached_connector; 672 struct intel_connector *intel_connector = intel_dp->attached_connector;
742 int lane_count, clock; 673 int lane_count, clock;
743 int max_lane_count = drm_dp_max_lane_count(intel_dp->dpcd); 674 int max_lane_count = drm_dp_max_lane_count(intel_dp->dpcd);
744 int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0; 675 int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
745 int bpp, mode_rate; 676 int bpp, mode_rate;
746 static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 }; 677 static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
678 int target_clock, link_avail, link_clock;
679
680 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && !is_cpu_edp(intel_dp))
681 pipe_config->has_pch_encoder = true;
682
683 pipe_config->has_dp_encoder = true;
747 684
748 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) { 685 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
749 intel_fixed_panel_mode(intel_connector->panel.fixed_mode, 686 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
@@ -752,6 +689,8 @@ intel_dp_mode_fixup(struct drm_encoder *encoder,
752 intel_connector->panel.fitting_mode, 689 intel_connector->panel.fitting_mode,
753 mode, adjusted_mode); 690 mode, adjusted_mode);
754 } 691 }
692 /* We need to take the panel's fixed mode into account. */
693 target_clock = adjusted_mode->clock;
755 694
756 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) 695 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
757 return false; 696 return false;
@@ -760,11 +699,28 @@ intel_dp_mode_fixup(struct drm_encoder *encoder,
760 "max bw %02x pixel clock %iKHz\n", 699 "max bw %02x pixel clock %iKHz\n",
761 max_lane_count, bws[max_clock], adjusted_mode->clock); 700 max_lane_count, bws[max_clock], adjusted_mode->clock);
762 701
763 if (!intel_dp_adjust_dithering(intel_dp, adjusted_mode, true)) 702 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
764 return false; 703 * bpc in between. */
704 bpp = min_t(int, 8*3, pipe_config->pipe_bpp);
705 for (; bpp >= 6*3; bpp -= 2*3) {
706 mode_rate = intel_dp_link_required(target_clock, bpp);
707
708 for (clock = 0; clock <= max_clock; clock++) {
709 for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
710 link_clock = drm_dp_bw_code_to_link_rate(bws[clock]);
711 link_avail = intel_dp_max_data_rate(link_clock,
712 lane_count);
713
714 if (mode_rate <= link_avail) {
715 goto found;
716 }
717 }
718 }
719 }
765 720
766 bpp = adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 24; 721 return false;
767 722
723found:
768 if (intel_dp->color_range_auto) { 724 if (intel_dp->color_range_auto) {
769 /* 725 /*
770 * See: 726 * See:
@@ -778,104 +734,38 @@ intel_dp_mode_fixup(struct drm_encoder *encoder,
778 } 734 }
779 735
780 if (intel_dp->color_range) 736 if (intel_dp->color_range)
781 adjusted_mode->private_flags |= INTEL_MODE_LIMITED_COLOR_RANGE; 737 pipe_config->limited_color_range = true;
782
783 mode_rate = intel_dp_link_required(adjusted_mode->clock, bpp);
784
785 for (clock = 0; clock <= max_clock; clock++) {
786 for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
787 int link_bw_clock =
788 drm_dp_bw_code_to_link_rate(bws[clock]);
789 int link_avail = intel_dp_max_data_rate(link_bw_clock,
790 lane_count);
791
792 if (mode_rate <= link_avail) {
793 intel_dp->link_bw = bws[clock];
794 intel_dp->lane_count = lane_count;
795 adjusted_mode->clock = link_bw_clock;
796 DRM_DEBUG_KMS("DP link bw %02x lane "
797 "count %d clock %d bpp %d\n",
798 intel_dp->link_bw, intel_dp->lane_count,
799 adjusted_mode->clock, bpp);
800 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
801 mode_rate, link_avail);
802 return true;
803 }
804 }
805 }
806
807 return false;
808}
809 738
810void 739 intel_dp->link_bw = bws[clock];
811intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, 740 intel_dp->lane_count = lane_count;
812 struct drm_display_mode *adjusted_mode) 741 adjusted_mode->clock = drm_dp_bw_code_to_link_rate(intel_dp->link_bw);
813{ 742 pipe_config->pixel_target_clock = target_clock;
814 struct drm_device *dev = crtc->dev;
815 struct intel_encoder *intel_encoder;
816 struct intel_dp *intel_dp;
817 struct drm_i915_private *dev_priv = dev->dev_private;
818 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
819 int lane_count = 4;
820 struct intel_link_m_n m_n;
821 int pipe = intel_crtc->pipe;
822 enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
823 int target_clock;
824 743
825 /* 744 DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
826 * Find the lane count in the intel_encoder private 745 intel_dp->link_bw, intel_dp->lane_count,
827 */ 746 adjusted_mode->clock, bpp);
828 for_each_encoder_on_crtc(dev, crtc, intel_encoder) { 747 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
829 intel_dp = enc_to_intel_dp(&intel_encoder->base); 748 mode_rate, link_avail);
830 749
831 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT || 750 intel_link_compute_m_n(bpp, lane_count,
832 intel_encoder->type == INTEL_OUTPUT_EDP) 751 target_clock, adjusted_mode->clock,
833 { 752 &pipe_config->dp_m_n);
834 lane_count = intel_dp->lane_count;
835 break;
836 }
837 }
838
839 target_clock = mode->clock;
840 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
841 if (intel_encoder->type == INTEL_OUTPUT_EDP) {
842 target_clock = intel_edp_target_clock(intel_encoder,
843 mode);
844 break;
845 }
846 }
847 753
848 /* 754 /*
849 * Compute the GMCH and Link ratios. The '3' here is 755 * XXX: We have a strange regression where using the vbt edp bpp value
850 * the number of bytes_per_pixel post-LUT, which we always 756 * for the link bw computation results in black screens, the panel only
851 * set up for 8-bits of R/G/B, or 3 bytes total. 757 * works when we do the computation at the usual 24bpp (but still
758 * requires us to use 18bpp). Until that's fully debugged, stay
759 * bug-for-bug compatible with the old code.
852 */ 760 */
853 intel_link_compute_m_n(intel_crtc->bpp, lane_count, 761 if (is_edp(intel_dp) && dev_priv->edp.bpp) {
854 target_clock, adjusted_mode->clock, &m_n); 762 DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n",
855 763 bpp, dev_priv->edp.bpp);
856 if (IS_HASWELL(dev)) { 764 bpp = min_t(int, bpp, dev_priv->edp.bpp);
857 I915_WRITE(PIPE_DATA_M1(cpu_transcoder),
858 TU_SIZE(m_n.tu) | m_n.gmch_m);
859 I915_WRITE(PIPE_DATA_N1(cpu_transcoder), m_n.gmch_n);
860 I915_WRITE(PIPE_LINK_M1(cpu_transcoder), m_n.link_m);
861 I915_WRITE(PIPE_LINK_N1(cpu_transcoder), m_n.link_n);
862 } else if (HAS_PCH_SPLIT(dev)) {
863 I915_WRITE(TRANSDATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m);
864 I915_WRITE(TRANSDATA_N1(pipe), m_n.gmch_n);
865 I915_WRITE(TRANSDPLINK_M1(pipe), m_n.link_m);
866 I915_WRITE(TRANSDPLINK_N1(pipe), m_n.link_n);
867 } else if (IS_VALLEYVIEW(dev)) {
868 I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m);
869 I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n);
870 I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m);
871 I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n);
872 } else {
873 I915_WRITE(PIPE_GMCH_DATA_M(pipe),
874 TU_SIZE(m_n.tu) | m_n.gmch_m);
875 I915_WRITE(PIPE_GMCH_DATA_N(pipe), m_n.gmch_n);
876 I915_WRITE(PIPE_DP_LINK_M(pipe), m_n.link_m);
877 I915_WRITE(PIPE_DP_LINK_N(pipe), m_n.link_n);
878 } 765 }
766 pipe_config->pipe_bpp = bpp;
767
768 return true;
879} 769}
880 770
881void intel_dp_init_link_config(struct intel_dp *intel_dp) 771void intel_dp_init_link_config(struct intel_dp *intel_dp)
@@ -994,7 +884,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
994 else 884 else
995 intel_dp->DP |= DP_PLL_FREQ_270MHZ; 885 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
996 } else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) { 886 } else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) {
997 if (!HAS_PCH_SPLIT(dev)) 887 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev))
998 intel_dp->DP |= intel_dp->color_range; 888 intel_dp->DP |= intel_dp->color_range;
999 889
1000 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 890 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
@@ -1009,7 +899,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
1009 if (intel_crtc->pipe == 1) 899 if (intel_crtc->pipe == 1)
1010 intel_dp->DP |= DP_PIPEB_SELECT; 900 intel_dp->DP |= DP_PIPEB_SELECT;
1011 901
1012 if (is_cpu_edp(intel_dp)) { 902 if (is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
1013 /* don't miss out required setting for eDP */ 903 /* don't miss out required setting for eDP */
1014 if (adjusted_mode->clock < 200000) 904 if (adjusted_mode->clock < 200000)
1015 intel_dp->DP |= DP_PLL_FREQ_160MHZ; 905 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
@@ -1020,7 +910,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
1020 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; 910 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1021 } 911 }
1022 912
1023 if (is_cpu_edp(intel_dp)) 913 if (is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev))
1024 ironlake_set_pll_edp(crtc, adjusted_mode->clock); 914 ironlake_set_pll_edp(crtc, adjusted_mode->clock);
1025} 915}
1026 916
@@ -1039,16 +929,20 @@ static void ironlake_wait_panel_status(struct intel_dp *intel_dp,
1039{ 929{
1040 struct drm_device *dev = intel_dp_to_dev(intel_dp); 930 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1041 struct drm_i915_private *dev_priv = dev->dev_private; 931 struct drm_i915_private *dev_priv = dev->dev_private;
932 u32 pp_stat_reg, pp_ctrl_reg;
933
934 pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS;
935 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
1042 936
1043 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n", 937 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
1044 mask, value, 938 mask, value,
1045 I915_READ(PCH_PP_STATUS), 939 I915_READ(pp_stat_reg),
1046 I915_READ(PCH_PP_CONTROL)); 940 I915_READ(pp_ctrl_reg));
1047 941
1048 if (_wait_for((I915_READ(PCH_PP_STATUS) & mask) == value, 5000, 10)) { 942 if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
1049 DRM_ERROR("Panel status timeout: status %08x control %08x\n", 943 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
1050 I915_READ(PCH_PP_STATUS), 944 I915_READ(pp_stat_reg),
1051 I915_READ(PCH_PP_CONTROL)); 945 I915_READ(pp_ctrl_reg));
1052 } 946 }
1053} 947}
1054 948
@@ -1075,9 +969,15 @@ static void ironlake_wait_panel_power_cycle(struct intel_dp *intel_dp)
1075 * is locked 969 * is locked
1076 */ 970 */
1077 971
1078static u32 ironlake_get_pp_control(struct drm_i915_private *dev_priv) 972static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
1079{ 973{
1080 u32 control = I915_READ(PCH_PP_CONTROL); 974 struct drm_device *dev = intel_dp_to_dev(intel_dp);
975 struct drm_i915_private *dev_priv = dev->dev_private;
976 u32 control;
977 u32 pp_ctrl_reg;
978
979 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
980 control = I915_READ(pp_ctrl_reg);
1081 981
1082 control &= ~PANEL_UNLOCK_MASK; 982 control &= ~PANEL_UNLOCK_MASK;
1083 control |= PANEL_UNLOCK_REGS; 983 control |= PANEL_UNLOCK_REGS;
@@ -1089,6 +989,7 @@ void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
1089 struct drm_device *dev = intel_dp_to_dev(intel_dp); 989 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1090 struct drm_i915_private *dev_priv = dev->dev_private; 990 struct drm_i915_private *dev_priv = dev->dev_private;
1091 u32 pp; 991 u32 pp;
992 u32 pp_stat_reg, pp_ctrl_reg;
1092 993
1093 if (!is_edp(intel_dp)) 994 if (!is_edp(intel_dp))
1094 return; 995 return;
@@ -1107,13 +1008,16 @@ void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
1107 if (!ironlake_edp_have_panel_power(intel_dp)) 1008 if (!ironlake_edp_have_panel_power(intel_dp))
1108 ironlake_wait_panel_power_cycle(intel_dp); 1009 ironlake_wait_panel_power_cycle(intel_dp);
1109 1010
1110 pp = ironlake_get_pp_control(dev_priv); 1011 pp = ironlake_get_pp_control(intel_dp);
1111 pp |= EDP_FORCE_VDD; 1012 pp |= EDP_FORCE_VDD;
1112 I915_WRITE(PCH_PP_CONTROL, pp);
1113 POSTING_READ(PCH_PP_CONTROL);
1114 DRM_DEBUG_KMS("PCH_PP_STATUS: 0x%08x PCH_PP_CONTROL: 0x%08x\n",
1115 I915_READ(PCH_PP_STATUS), I915_READ(PCH_PP_CONTROL));
1116 1013
1014 pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS;
1015 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
1016
1017 I915_WRITE(pp_ctrl_reg, pp);
1018 POSTING_READ(pp_ctrl_reg);
1019 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1020 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1117 /* 1021 /*
1118 * If the panel wasn't on, delay before accessing aux channel 1022 * If the panel wasn't on, delay before accessing aux channel
1119 */ 1023 */
@@ -1128,19 +1032,23 @@ static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp)
1128 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1032 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1129 struct drm_i915_private *dev_priv = dev->dev_private; 1033 struct drm_i915_private *dev_priv = dev->dev_private;
1130 u32 pp; 1034 u32 pp;
1035 u32 pp_stat_reg, pp_ctrl_reg;
1131 1036
1132 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); 1037 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
1133 1038
1134 if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) { 1039 if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) {
1135 pp = ironlake_get_pp_control(dev_priv); 1040 pp = ironlake_get_pp_control(intel_dp);
1136 pp &= ~EDP_FORCE_VDD; 1041 pp &= ~EDP_FORCE_VDD;
1137 I915_WRITE(PCH_PP_CONTROL, pp);
1138 POSTING_READ(PCH_PP_CONTROL);
1139 1042
1140 /* Make sure sequencer is idle before allowing subsequent activity */ 1043 pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS;
1141 DRM_DEBUG_KMS("PCH_PP_STATUS: 0x%08x PCH_PP_CONTROL: 0x%08x\n", 1044 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
1142 I915_READ(PCH_PP_STATUS), I915_READ(PCH_PP_CONTROL)); 1045
1046 I915_WRITE(pp_ctrl_reg, pp);
1047 POSTING_READ(pp_ctrl_reg);
1143 1048
1049 /* Make sure sequencer is idle before allowing subsequent activity */
1050 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1051 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1144 msleep(intel_dp->panel_power_down_delay); 1052 msleep(intel_dp->panel_power_down_delay);
1145 } 1053 }
1146} 1054}
@@ -1184,6 +1092,7 @@ void ironlake_edp_panel_on(struct intel_dp *intel_dp)
1184 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1092 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1185 struct drm_i915_private *dev_priv = dev->dev_private; 1093 struct drm_i915_private *dev_priv = dev->dev_private;
1186 u32 pp; 1094 u32 pp;
1095 u32 pp_ctrl_reg;
1187 1096
1188 if (!is_edp(intel_dp)) 1097 if (!is_edp(intel_dp))
1189 return; 1098 return;
@@ -1197,7 +1106,7 @@ void ironlake_edp_panel_on(struct intel_dp *intel_dp)
1197 1106
1198 ironlake_wait_panel_power_cycle(intel_dp); 1107 ironlake_wait_panel_power_cycle(intel_dp);
1199 1108
1200 pp = ironlake_get_pp_control(dev_priv); 1109 pp = ironlake_get_pp_control(intel_dp);
1201 if (IS_GEN5(dev)) { 1110 if (IS_GEN5(dev)) {
1202 /* ILK workaround: disable reset around power sequence */ 1111 /* ILK workaround: disable reset around power sequence */
1203 pp &= ~PANEL_POWER_RESET; 1112 pp &= ~PANEL_POWER_RESET;
@@ -1209,8 +1118,10 @@ void ironlake_edp_panel_on(struct intel_dp *intel_dp)
1209 if (!IS_GEN5(dev)) 1118 if (!IS_GEN5(dev))
1210 pp |= PANEL_POWER_RESET; 1119 pp |= PANEL_POWER_RESET;
1211 1120
1212 I915_WRITE(PCH_PP_CONTROL, pp); 1121 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
1213 POSTING_READ(PCH_PP_CONTROL); 1122
1123 I915_WRITE(pp_ctrl_reg, pp);
1124 POSTING_READ(pp_ctrl_reg);
1214 1125
1215 ironlake_wait_panel_on(intel_dp); 1126 ironlake_wait_panel_on(intel_dp);
1216 1127
@@ -1226,6 +1137,7 @@ void ironlake_edp_panel_off(struct intel_dp *intel_dp)
1226 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1137 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1227 struct drm_i915_private *dev_priv = dev->dev_private; 1138 struct drm_i915_private *dev_priv = dev->dev_private;
1228 u32 pp; 1139 u32 pp;
1140 u32 pp_ctrl_reg;
1229 1141
1230 if (!is_edp(intel_dp)) 1142 if (!is_edp(intel_dp))
1231 return; 1143 return;
@@ -1234,12 +1146,15 @@ void ironlake_edp_panel_off(struct intel_dp *intel_dp)
1234 1146
1235 WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n"); 1147 WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n");
1236 1148
1237 pp = ironlake_get_pp_control(dev_priv); 1149 pp = ironlake_get_pp_control(intel_dp);
1238 /* We need to switch off panel power _and_ force vdd, for otherwise some 1150 /* We need to switch off panel power _and_ force vdd, for otherwise some
1239 * panels get very unhappy and cease to work. */ 1151 * panels get very unhappy and cease to work. */
1240 pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE); 1152 pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE);
1241 I915_WRITE(PCH_PP_CONTROL, pp); 1153
1242 POSTING_READ(PCH_PP_CONTROL); 1154 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
1155
1156 I915_WRITE(pp_ctrl_reg, pp);
1157 POSTING_READ(pp_ctrl_reg);
1243 1158
1244 intel_dp->want_panel_vdd = false; 1159 intel_dp->want_panel_vdd = false;
1245 1160
@@ -1253,6 +1168,7 @@ void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
1253 struct drm_i915_private *dev_priv = dev->dev_private; 1168 struct drm_i915_private *dev_priv = dev->dev_private;
1254 int pipe = to_intel_crtc(intel_dig_port->base.base.crtc)->pipe; 1169 int pipe = to_intel_crtc(intel_dig_port->base.base.crtc)->pipe;
1255 u32 pp; 1170 u32 pp;
1171 u32 pp_ctrl_reg;
1256 1172
1257 if (!is_edp(intel_dp)) 1173 if (!is_edp(intel_dp))
1258 return; 1174 return;
@@ -1265,10 +1181,13 @@ void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
1265 * allowing it to appear. 1181 * allowing it to appear.
1266 */ 1182 */
1267 msleep(intel_dp->backlight_on_delay); 1183 msleep(intel_dp->backlight_on_delay);
1268 pp = ironlake_get_pp_control(dev_priv); 1184 pp = ironlake_get_pp_control(intel_dp);
1269 pp |= EDP_BLC_ENABLE; 1185 pp |= EDP_BLC_ENABLE;
1270 I915_WRITE(PCH_PP_CONTROL, pp); 1186
1271 POSTING_READ(PCH_PP_CONTROL); 1187 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
1188
1189 I915_WRITE(pp_ctrl_reg, pp);
1190 POSTING_READ(pp_ctrl_reg);
1272 1191
1273 intel_panel_enable_backlight(dev, pipe); 1192 intel_panel_enable_backlight(dev, pipe);
1274} 1193}
@@ -1278,6 +1197,7 @@ void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
1278 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1197 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1279 struct drm_i915_private *dev_priv = dev->dev_private; 1198 struct drm_i915_private *dev_priv = dev->dev_private;
1280 u32 pp; 1199 u32 pp;
1200 u32 pp_ctrl_reg;
1281 1201
1282 if (!is_edp(intel_dp)) 1202 if (!is_edp(intel_dp))
1283 return; 1203 return;
@@ -1285,10 +1205,13 @@ void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
1285 intel_panel_disable_backlight(dev); 1205 intel_panel_disable_backlight(dev);
1286 1206
1287 DRM_DEBUG_KMS("\n"); 1207 DRM_DEBUG_KMS("\n");
1288 pp = ironlake_get_pp_control(dev_priv); 1208 pp = ironlake_get_pp_control(intel_dp);
1289 pp &= ~EDP_BLC_ENABLE; 1209 pp &= ~EDP_BLC_ENABLE;
1290 I915_WRITE(PCH_PP_CONTROL, pp); 1210
1291 POSTING_READ(PCH_PP_CONTROL); 1211 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
1212
1213 I915_WRITE(pp_ctrl_reg, pp);
1214 POSTING_READ(pp_ctrl_reg);
1292 msleep(intel_dp->backlight_off_delay); 1215 msleep(intel_dp->backlight_off_delay);
1293} 1216}
1294 1217
@@ -1384,7 +1307,7 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
1384 if (!(tmp & DP_PORT_EN)) 1307 if (!(tmp & DP_PORT_EN))
1385 return false; 1308 return false;
1386 1309
1387 if (is_cpu_edp(intel_dp) && IS_GEN7(dev)) { 1310 if (is_cpu_edp(intel_dp) && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
1388 *pipe = PORT_TO_PIPE_CPT(tmp); 1311 *pipe = PORT_TO_PIPE_CPT(tmp);
1389 } else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) { 1312 } else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) {
1390 *pipe = PORT_TO_PIPE(tmp); 1313 *pipe = PORT_TO_PIPE(tmp);
@@ -1441,10 +1364,12 @@ static void intel_disable_dp(struct intel_encoder *encoder)
1441static void intel_post_disable_dp(struct intel_encoder *encoder) 1364static void intel_post_disable_dp(struct intel_encoder *encoder)
1442{ 1365{
1443 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1366 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1367 struct drm_device *dev = encoder->base.dev;
1444 1368
1445 if (is_cpu_edp(intel_dp)) { 1369 if (is_cpu_edp(intel_dp)) {
1446 intel_dp_link_down(intel_dp); 1370 intel_dp_link_down(intel_dp);
1447 ironlake_edp_pll_off(intel_dp); 1371 if (!IS_VALLEYVIEW(dev))
1372 ironlake_edp_pll_off(intel_dp);
1448 } 1373 }
1449} 1374}
1450 1375
@@ -1470,8 +1395,9 @@ static void intel_enable_dp(struct intel_encoder *encoder)
1470static void intel_pre_enable_dp(struct intel_encoder *encoder) 1395static void intel_pre_enable_dp(struct intel_encoder *encoder)
1471{ 1396{
1472 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1397 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1398 struct drm_device *dev = encoder->base.dev;
1473 1399
1474 if (is_cpu_edp(intel_dp)) 1400 if (is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev))
1475 ironlake_edp_pll_on(intel_dp); 1401 ironlake_edp_pll_on(intel_dp);
1476} 1402}
1477 1403
@@ -1548,7 +1474,7 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
1548{ 1474{
1549 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1475 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1550 1476
1551 if (IS_HASWELL(dev)) { 1477 if (HAS_DDI(dev)) {
1552 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { 1478 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
1553 case DP_TRAIN_VOLTAGE_SWING_400: 1479 case DP_TRAIN_VOLTAGE_SWING_400:
1554 return DP_TRAIN_PRE_EMPHASIS_9_5; 1480 return DP_TRAIN_PRE_EMPHASIS_9_5;
@@ -1756,7 +1682,7 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
1756 uint32_t signal_levels, mask; 1682 uint32_t signal_levels, mask;
1757 uint8_t train_set = intel_dp->train_set[0]; 1683 uint8_t train_set = intel_dp->train_set[0];
1758 1684
1759 if (IS_HASWELL(dev)) { 1685 if (HAS_DDI(dev)) {
1760 signal_levels = intel_hsw_signal_levels(train_set); 1686 signal_levels = intel_hsw_signal_levels(train_set);
1761 mask = DDI_BUF_EMP_MASK; 1687 mask = DDI_BUF_EMP_MASK;
1762 } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) { 1688 } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
@@ -1787,7 +1713,7 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
1787 int ret; 1713 int ret;
1788 uint32_t temp; 1714 uint32_t temp;
1789 1715
1790 if (IS_HASWELL(dev)) { 1716 if (HAS_DDI(dev)) {
1791 temp = I915_READ(DP_TP_CTL(port)); 1717 temp = I915_READ(DP_TP_CTL(port));
1792 1718
1793 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE) 1719 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
@@ -2311,6 +2237,16 @@ g4x_dp_detect(struct intel_dp *intel_dp)
2311 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 2237 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2312 uint32_t bit; 2238 uint32_t bit;
2313 2239
2240 /* Can't disconnect eDP, but you can close the lid... */
2241 if (is_edp(intel_dp)) {
2242 enum drm_connector_status status;
2243
2244 status = intel_panel_detect(dev);
2245 if (status == connector_status_unknown)
2246 status = connector_status_connected;
2247 return status;
2248 }
2249
2314 switch (intel_dig_port->port) { 2250 switch (intel_dig_port->port) {
2315 case PORT_B: 2251 case PORT_B:
2316 bit = PORTB_HOTPLUG_LIVE_STATUS; 2252 bit = PORTB_HOTPLUG_LIVE_STATUS;
@@ -2492,6 +2428,9 @@ intel_dp_set_property(struct drm_connector *connector,
2492 } 2428 }
2493 2429
2494 if (property == dev_priv->broadcast_rgb_property) { 2430 if (property == dev_priv->broadcast_rgb_property) {
2431 bool old_auto = intel_dp->color_range_auto;
2432 uint32_t old_range = intel_dp->color_range;
2433
2495 switch (val) { 2434 switch (val) {
2496 case INTEL_BROADCAST_RGB_AUTO: 2435 case INTEL_BROADCAST_RGB_AUTO:
2497 intel_dp->color_range_auto = true; 2436 intel_dp->color_range_auto = true;
@@ -2507,6 +2446,11 @@ intel_dp_set_property(struct drm_connector *connector,
2507 default: 2446 default:
2508 return -EINVAL; 2447 return -EINVAL;
2509 } 2448 }
2449
2450 if (old_auto == intel_dp->color_range_auto &&
2451 old_range == intel_dp->color_range)
2452 return 0;
2453
2510 goto done; 2454 goto done;
2511 } 2455 }
2512 2456
@@ -2538,17 +2482,14 @@ done:
2538static void 2482static void
2539intel_dp_destroy(struct drm_connector *connector) 2483intel_dp_destroy(struct drm_connector *connector)
2540{ 2484{
2541 struct drm_device *dev = connector->dev;
2542 struct intel_dp *intel_dp = intel_attached_dp(connector); 2485 struct intel_dp *intel_dp = intel_attached_dp(connector);
2543 struct intel_connector *intel_connector = to_intel_connector(connector); 2486 struct intel_connector *intel_connector = to_intel_connector(connector);
2544 2487
2545 if (!IS_ERR_OR_NULL(intel_connector->edid)) 2488 if (!IS_ERR_OR_NULL(intel_connector->edid))
2546 kfree(intel_connector->edid); 2489 kfree(intel_connector->edid);
2547 2490
2548 if (is_edp(intel_dp)) { 2491 if (is_edp(intel_dp))
2549 intel_panel_destroy_backlight(dev);
2550 intel_panel_fini(&intel_connector->panel); 2492 intel_panel_fini(&intel_connector->panel);
2551 }
2552 2493
2553 drm_sysfs_connector_remove(connector); 2494 drm_sysfs_connector_remove(connector);
2554 drm_connector_cleanup(connector); 2495 drm_connector_cleanup(connector);
@@ -2573,7 +2514,6 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
2573} 2514}
2574 2515
2575static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = { 2516static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
2576 .mode_fixup = intel_dp_mode_fixup,
2577 .mode_set = intel_dp_mode_set, 2517 .mode_set = intel_dp_mode_set,
2578}; 2518};
2579 2519
@@ -2669,15 +2609,28 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
2669 struct drm_i915_private *dev_priv = dev->dev_private; 2609 struct drm_i915_private *dev_priv = dev->dev_private;
2670 struct edp_power_seq cur, vbt, spec, final; 2610 struct edp_power_seq cur, vbt, spec, final;
2671 u32 pp_on, pp_off, pp_div, pp; 2611 u32 pp_on, pp_off, pp_div, pp;
2612 int pp_control_reg, pp_on_reg, pp_off_reg, pp_div_reg;
2613
2614 if (HAS_PCH_SPLIT(dev)) {
2615 pp_control_reg = PCH_PP_CONTROL;
2616 pp_on_reg = PCH_PP_ON_DELAYS;
2617 pp_off_reg = PCH_PP_OFF_DELAYS;
2618 pp_div_reg = PCH_PP_DIVISOR;
2619 } else {
2620 pp_control_reg = PIPEA_PP_CONTROL;
2621 pp_on_reg = PIPEA_PP_ON_DELAYS;
2622 pp_off_reg = PIPEA_PP_OFF_DELAYS;
2623 pp_div_reg = PIPEA_PP_DIVISOR;
2624 }
2672 2625
2673 /* Workaround: Need to write PP_CONTROL with the unlock key as 2626 /* Workaround: Need to write PP_CONTROL with the unlock key as
2674 * the very first thing. */ 2627 * the very first thing. */
2675 pp = ironlake_get_pp_control(dev_priv); 2628 pp = ironlake_get_pp_control(intel_dp);
2676 I915_WRITE(PCH_PP_CONTROL, pp); 2629 I915_WRITE(pp_control_reg, pp);
2677 2630
2678 pp_on = I915_READ(PCH_PP_ON_DELAYS); 2631 pp_on = I915_READ(pp_on_reg);
2679 pp_off = I915_READ(PCH_PP_OFF_DELAYS); 2632 pp_off = I915_READ(pp_off_reg);
2680 pp_div = I915_READ(PCH_PP_DIVISOR); 2633 pp_div = I915_READ(pp_div_reg);
2681 2634
2682 /* Pull timing values out of registers */ 2635 /* Pull timing values out of registers */
2683 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >> 2636 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
@@ -2752,7 +2705,22 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
2752 struct edp_power_seq *seq) 2705 struct edp_power_seq *seq)
2753{ 2706{
2754 struct drm_i915_private *dev_priv = dev->dev_private; 2707 struct drm_i915_private *dev_priv = dev->dev_private;
2755 u32 pp_on, pp_off, pp_div; 2708 u32 pp_on, pp_off, pp_div, port_sel = 0;
2709 int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
2710 int pp_on_reg, pp_off_reg, pp_div_reg;
2711
2712 if (HAS_PCH_SPLIT(dev)) {
2713 pp_on_reg = PCH_PP_ON_DELAYS;
2714 pp_off_reg = PCH_PP_OFF_DELAYS;
2715 pp_div_reg = PCH_PP_DIVISOR;
2716 } else {
2717 pp_on_reg = PIPEA_PP_ON_DELAYS;
2718 pp_off_reg = PIPEA_PP_OFF_DELAYS;
2719 pp_div_reg = PIPEA_PP_DIVISOR;
2720 }
2721
2722 if (IS_VALLEYVIEW(dev))
2723 port_sel = I915_READ(pp_on_reg) & 0xc0000000;
2756 2724
2757 /* And finally store the new values in the power sequencer. */ 2725 /* And finally store the new values in the power sequencer. */
2758 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) | 2726 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
@@ -2761,8 +2729,7 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
2761 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT); 2729 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
2762 /* Compute the divisor for the pp clock, simply match the Bspec 2730 /* Compute the divisor for the pp clock, simply match the Bspec
2763 * formula. */ 2731 * formula. */
2764 pp_div = ((100 * intel_pch_rawclk(dev))/2 - 1) 2732 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
2765 << PP_REFERENCE_DIVIDER_SHIFT;
2766 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000) 2733 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
2767 << PANEL_POWER_CYCLE_DELAY_SHIFT); 2734 << PANEL_POWER_CYCLE_DELAY_SHIFT);
2768 2735
@@ -2770,19 +2737,21 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
2770 * power sequencer any more. */ 2737 * power sequencer any more. */
2771 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) { 2738 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
2772 if (is_cpu_edp(intel_dp)) 2739 if (is_cpu_edp(intel_dp))
2773 pp_on |= PANEL_POWER_PORT_DP_A; 2740 port_sel = PANEL_POWER_PORT_DP_A;
2774 else 2741 else
2775 pp_on |= PANEL_POWER_PORT_DP_D; 2742 port_sel = PANEL_POWER_PORT_DP_D;
2776 } 2743 }
2777 2744
2778 I915_WRITE(PCH_PP_ON_DELAYS, pp_on); 2745 pp_on |= port_sel;
2779 I915_WRITE(PCH_PP_OFF_DELAYS, pp_off); 2746
2780 I915_WRITE(PCH_PP_DIVISOR, pp_div); 2747 I915_WRITE(pp_on_reg, pp_on);
2748 I915_WRITE(pp_off_reg, pp_off);
2749 I915_WRITE(pp_div_reg, pp_div);
2781 2750
2782 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n", 2751 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
2783 I915_READ(PCH_PP_ON_DELAYS), 2752 I915_READ(pp_on_reg),
2784 I915_READ(PCH_PP_OFF_DELAYS), 2753 I915_READ(pp_off_reg),
2785 I915_READ(PCH_PP_DIVISOR)); 2754 I915_READ(pp_div_reg));
2786} 2755}
2787 2756
2788void 2757void
@@ -2829,7 +2798,6 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
2829 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type); 2798 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
2830 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); 2799 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
2831 2800
2832 connector->polled = DRM_CONNECTOR_POLL_HPD;
2833 connector->interlace_allowed = true; 2801 connector->interlace_allowed = true;
2834 connector->doublescan_allowed = 0; 2802 connector->doublescan_allowed = 0;
2835 2803
@@ -2844,27 +2812,46 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
2844 else 2812 else
2845 intel_connector->get_hw_state = intel_connector_get_hw_state; 2813 intel_connector->get_hw_state = intel_connector_get_hw_state;
2846 2814
2815 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
2816 if (HAS_DDI(dev)) {
2817 switch (intel_dig_port->port) {
2818 case PORT_A:
2819 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
2820 break;
2821 case PORT_B:
2822 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
2823 break;
2824 case PORT_C:
2825 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
2826 break;
2827 case PORT_D:
2828 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
2829 break;
2830 default:
2831 BUG();
2832 }
2833 }
2847 2834
2848 /* Set up the DDC bus. */ 2835 /* Set up the DDC bus. */
2849 switch (port) { 2836 switch (port) {
2850 case PORT_A: 2837 case PORT_A:
2838 intel_encoder->hpd_pin = HPD_PORT_A;
2851 name = "DPDDC-A"; 2839 name = "DPDDC-A";
2852 break; 2840 break;
2853 case PORT_B: 2841 case PORT_B:
2854 dev_priv->hotplug_supported_mask |= PORTB_HOTPLUG_INT_STATUS; 2842 intel_encoder->hpd_pin = HPD_PORT_B;
2855 name = "DPDDC-B"; 2843 name = "DPDDC-B";
2856 break; 2844 break;
2857 case PORT_C: 2845 case PORT_C:
2858 dev_priv->hotplug_supported_mask |= PORTC_HOTPLUG_INT_STATUS; 2846 intel_encoder->hpd_pin = HPD_PORT_C;
2859 name = "DPDDC-C"; 2847 name = "DPDDC-C";
2860 break; 2848 break;
2861 case PORT_D: 2849 case PORT_D:
2862 dev_priv->hotplug_supported_mask |= PORTD_HOTPLUG_INT_STATUS; 2850 intel_encoder->hpd_pin = HPD_PORT_D;
2863 name = "DPDDC-D"; 2851 name = "DPDDC-D";
2864 break; 2852 break;
2865 default: 2853 default:
2866 WARN(1, "Invalid port %c\n", port_name(port)); 2854 BUG();
2867 break;
2868 } 2855 }
2869 2856
2870 if (is_edp(intel_dp)) 2857 if (is_edp(intel_dp))
@@ -2974,6 +2961,7 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
2974 DRM_MODE_ENCODER_TMDS); 2961 DRM_MODE_ENCODER_TMDS);
2975 drm_encoder_helper_add(&intel_encoder->base, &intel_dp_helper_funcs); 2962 drm_encoder_helper_add(&intel_encoder->base, &intel_dp_helper_funcs);
2976 2963
2964 intel_encoder->compute_config = intel_dp_compute_config;
2977 intel_encoder->enable = intel_enable_dp; 2965 intel_encoder->enable = intel_enable_dp;
2978 intel_encoder->pre_enable = intel_pre_enable_dp; 2966 intel_encoder->pre_enable = intel_pre_enable_dp;
2979 intel_encoder->disable = intel_disable_dp; 2967 intel_encoder->disable = intel_disable_dp;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 07ebac6fe8ca..b5b6d19e6dd3 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -33,12 +33,21 @@
33#include <drm/drm_fb_helper.h> 33#include <drm/drm_fb_helper.h>
34#include <drm/drm_dp_helper.h> 34#include <drm/drm_dp_helper.h>
35 35
36/**
37 * _wait_for - magic (register) wait macro
38 *
39 * Does the right thing for modeset paths when run under kdgb or similar atomic
40 * contexts. Note that it's important that we check the condition again after
41 * having timed out, since the timeout could be due to preemption or similar and
42 * we've never had a chance to check the condition before the timeout.
43 */
36#define _wait_for(COND, MS, W) ({ \ 44#define _wait_for(COND, MS, W) ({ \
37 unsigned long timeout__ = jiffies + msecs_to_jiffies(MS); \ 45 unsigned long timeout__ = jiffies + msecs_to_jiffies(MS) + 1; \
38 int ret__ = 0; \ 46 int ret__ = 0; \
39 while (!(COND)) { \ 47 while (!(COND)) { \
40 if (time_after(jiffies, timeout__)) { \ 48 if (time_after(jiffies, timeout__)) { \
41 ret__ = -ETIMEDOUT; \ 49 if (!(COND)) \
50 ret__ = -ETIMEDOUT; \
42 break; \ 51 break; \
43 } \ 52 } \
44 if (W && drm_can_sleep()) { \ 53 if (W && drm_can_sleep()) { \
@@ -50,21 +59,10 @@
50 ret__; \ 59 ret__; \
51}) 60})
52 61
53#define wait_for_atomic_us(COND, US) ({ \
54 unsigned long timeout__ = jiffies + usecs_to_jiffies(US); \
55 int ret__ = 0; \
56 while (!(COND)) { \
57 if (time_after(jiffies, timeout__)) { \
58 ret__ = -ETIMEDOUT; \
59 break; \
60 } \
61 cpu_relax(); \
62 } \
63 ret__; \
64})
65
66#define wait_for(COND, MS) _wait_for(COND, MS, 1) 62#define wait_for(COND, MS) _wait_for(COND, MS, 1)
67#define wait_for_atomic(COND, MS) _wait_for(COND, MS, 0) 63#define wait_for_atomic(COND, MS) _wait_for(COND, MS, 0)
64#define wait_for_atomic_us(COND, US) _wait_for((COND), \
65 DIV_ROUND_UP((US), 1000), 0)
68 66
69#define KHz(x) (1000*x) 67#define KHz(x) (1000*x)
70#define MHz(x) KHz(1000*x) 68#define MHz(x) KHz(1000*x)
@@ -101,34 +99,6 @@
101#define INTEL_DVO_CHIP_TMDS 2 99#define INTEL_DVO_CHIP_TMDS 2
102#define INTEL_DVO_CHIP_TVOUT 4 100#define INTEL_DVO_CHIP_TVOUT 4
103 101
104/* drm_display_mode->private_flags */
105#define INTEL_MODE_PIXEL_MULTIPLIER_SHIFT (0x0)
106#define INTEL_MODE_PIXEL_MULTIPLIER_MASK (0xf << INTEL_MODE_PIXEL_MULTIPLIER_SHIFT)
107#define INTEL_MODE_DP_FORCE_6BPC (0x10)
108/* This flag must be set by the encoder's mode_fixup if it changes the crtc
109 * timings in the mode to prevent the crtc fixup from overwriting them.
110 * Currently only lvds needs that. */
111#define INTEL_MODE_CRTC_TIMINGS_SET (0x20)
112/*
113 * Set when limited 16-235 (as opposed to full 0-255) RGB color range is
114 * to be used.
115 */
116#define INTEL_MODE_LIMITED_COLOR_RANGE (0x40)
117
118static inline void
119intel_mode_set_pixel_multiplier(struct drm_display_mode *mode,
120 int multiplier)
121{
122 mode->clock *= multiplier;
123 mode->private_flags |= multiplier;
124}
125
126static inline int
127intel_mode_get_pixel_multiplier(const struct drm_display_mode *mode)
128{
129 return (mode->private_flags & INTEL_MODE_PIXEL_MULTIPLIER_MASK) >> INTEL_MODE_PIXEL_MULTIPLIER_SHIFT;
130}
131
132struct intel_framebuffer { 102struct intel_framebuffer {
133 struct drm_framebuffer base; 103 struct drm_framebuffer base;
134 struct drm_i915_gem_object *obj; 104 struct drm_i915_gem_object *obj;
@@ -158,9 +128,12 @@ struct intel_encoder {
158 bool cloneable; 128 bool cloneable;
159 bool connectors_active; 129 bool connectors_active;
160 void (*hot_plug)(struct intel_encoder *); 130 void (*hot_plug)(struct intel_encoder *);
131 bool (*compute_config)(struct intel_encoder *,
132 struct intel_crtc_config *);
161 void (*pre_pll_enable)(struct intel_encoder *); 133 void (*pre_pll_enable)(struct intel_encoder *);
162 void (*pre_enable)(struct intel_encoder *); 134 void (*pre_enable)(struct intel_encoder *);
163 void (*enable)(struct intel_encoder *); 135 void (*enable)(struct intel_encoder *);
136 void (*mode_set)(struct intel_encoder *intel_encoder);
164 void (*disable)(struct intel_encoder *); 137 void (*disable)(struct intel_encoder *);
165 void (*post_disable)(struct intel_encoder *); 138 void (*post_disable)(struct intel_encoder *);
166 /* Read out the current hw state of this connector, returning true if 139 /* Read out the current hw state of this connector, returning true if
@@ -168,6 +141,7 @@ struct intel_encoder {
168 * it is connected to in the pipe parameter. */ 141 * it is connected to in the pipe parameter. */
169 bool (*get_hw_state)(struct intel_encoder *, enum pipe *pipe); 142 bool (*get_hw_state)(struct intel_encoder *, enum pipe *pipe);
170 int crtc_mask; 143 int crtc_mask;
144 enum hpd_pin hpd_pin;
171}; 145};
172 146
173struct intel_panel { 147struct intel_panel {
@@ -197,13 +171,65 @@ struct intel_connector {
197 171
198 /* Cached EDID for eDP and LVDS. May hold ERR_PTR for invalid EDID. */ 172 /* Cached EDID for eDP and LVDS. May hold ERR_PTR for invalid EDID. */
199 struct edid *edid; 173 struct edid *edid;
174
175 /* since POLL and HPD connectors may use the same HPD line keep the native
176 state of connector->polled in case hotplug storm detection changes it */
177 u8 polled;
178};
179
180struct intel_crtc_config {
181 struct drm_display_mode requested_mode;
182 struct drm_display_mode adjusted_mode;
183 /* This flag must be set by the encoder's compute_config callback if it
184 * changes the crtc timings in the mode to prevent the crtc fixup from
185 * overwriting them. Currently only lvds needs that. */
186 bool timings_set;
187 /* Whether to set up the PCH/FDI. Note that we never allow sharing
188 * between pch encoders and cpu encoders. */
189 bool has_pch_encoder;
190
191 /* CPU Transcoder for the pipe. Currently this can only differ from the
192 * pipe on Haswell (where we have a special eDP transcoder). */
193 enum transcoder cpu_transcoder;
194
195 /*
196 * Use reduced/limited/broadcast rbg range, compressing from the full
197 * range fed into the crtcs.
198 */
199 bool limited_color_range;
200
201 /* DP has a bunch of special case unfortunately, so mark the pipe
202 * accordingly. */
203 bool has_dp_encoder;
204 bool dither;
205
206 /* Controls for the clock computation, to override various stages. */
207 bool clock_set;
208
209 /* Settings for the intel dpll used on pretty much everything but
210 * haswell. */
211 struct dpll {
212 unsigned n;
213 unsigned m1, m2;
214 unsigned p1, p2;
215 } dpll;
216
217 int pipe_bpp;
218 struct intel_link_m_n dp_m_n;
219 /**
220 * This is currently used by DP and HDMI encoders since those can have a
221 * target pixel clock != the port link clock (which is currently stored
222 * in adjusted_mode->clock).
223 */
224 int pixel_target_clock;
225 /* Used by SDVO (and if we ever fix it, HDMI). */
226 unsigned pixel_multiplier;
200}; 227};
201 228
202struct intel_crtc { 229struct intel_crtc {
203 struct drm_crtc base; 230 struct drm_crtc base;
204 enum pipe pipe; 231 enum pipe pipe;
205 enum plane plane; 232 enum plane plane;
206 enum transcoder cpu_transcoder;
207 u8 lut_r[256], lut_g[256], lut_b[256]; 233 u8 lut_r[256], lut_g[256], lut_b[256];
208 /* 234 /*
209 * Whether the crtc and the connected output pipeline is active. Implies 235 * Whether the crtc and the connected output pipeline is active. Implies
@@ -230,7 +256,8 @@ struct intel_crtc {
230 int16_t cursor_x, cursor_y; 256 int16_t cursor_x, cursor_y;
231 int16_t cursor_width, cursor_height; 257 int16_t cursor_width, cursor_height;
232 bool cursor_visible; 258 bool cursor_visible;
233 unsigned int bpp; 259
260 struct intel_crtc_config config;
234 261
235 /* We can share PLLs across outputs if the timings match */ 262 /* We can share PLLs across outputs if the timings match */
236 struct intel_pch_pll *pch_pll; 263 struct intel_pch_pll *pch_pll;
@@ -242,11 +269,16 @@ struct intel_crtc {
242 269
243struct intel_plane { 270struct intel_plane {
244 struct drm_plane base; 271 struct drm_plane base;
272 int plane;
245 enum pipe pipe; 273 enum pipe pipe;
246 struct drm_i915_gem_object *obj; 274 struct drm_i915_gem_object *obj;
247 bool can_scale; 275 bool can_scale;
248 int max_downscale; 276 int max_downscale;
249 u32 lut_r[1024], lut_g[1024], lut_b[1024]; 277 u32 lut_r[1024], lut_g[1024], lut_b[1024];
278 int crtc_x, crtc_y;
279 unsigned int crtc_w, crtc_h;
280 uint32_t src_x, src_y;
281 uint32_t src_w, src_h;
250 void (*update_plane)(struct drm_plane *plane, 282 void (*update_plane)(struct drm_plane *plane,
251 struct drm_framebuffer *fb, 283 struct drm_framebuffer *fb,
252 struct drm_i915_gem_object *obj, 284 struct drm_i915_gem_object *obj,
@@ -347,7 +379,7 @@ struct dip_infoframe {
347} __attribute__((packed)); 379} __attribute__((packed));
348 380
349struct intel_hdmi { 381struct intel_hdmi {
350 u32 sdvox_reg; 382 u32 hdmi_reg;
351 int ddc_bus; 383 int ddc_bus;
352 uint32_t color_range; 384 uint32_t color_range;
353 bool color_range_auto; 385 bool color_range_auto;
@@ -366,6 +398,7 @@ struct intel_hdmi {
366 398
367struct intel_dp { 399struct intel_dp {
368 uint32_t output_reg; 400 uint32_t output_reg;
401 uint32_t aux_ch_ctl_reg;
369 uint32_t DP; 402 uint32_t DP;
370 uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]; 403 uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE];
371 bool has_audio; 404 bool has_audio;
@@ -443,13 +476,12 @@ extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector)
443 476
444extern void intel_crt_init(struct drm_device *dev); 477extern void intel_crt_init(struct drm_device *dev);
445extern void intel_hdmi_init(struct drm_device *dev, 478extern void intel_hdmi_init(struct drm_device *dev,
446 int sdvox_reg, enum port port); 479 int hdmi_reg, enum port port);
447extern void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, 480extern void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
448 struct intel_connector *intel_connector); 481 struct intel_connector *intel_connector);
449extern struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder); 482extern struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
450extern bool intel_hdmi_mode_fixup(struct drm_encoder *encoder, 483extern bool intel_hdmi_compute_config(struct intel_encoder *encoder,
451 const struct drm_display_mode *mode, 484 struct intel_crtc_config *pipe_config);
452 struct drm_display_mode *adjusted_mode);
453extern void intel_dip_infoframe_csum(struct dip_infoframe *avi_if); 485extern void intel_dip_infoframe_csum(struct dip_infoframe *avi_if);
454extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, 486extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg,
455 bool is_sdvob); 487 bool is_sdvob);
@@ -464,18 +496,14 @@ extern void intel_dp_init(struct drm_device *dev, int output_reg,
464 enum port port); 496 enum port port);
465extern void intel_dp_init_connector(struct intel_digital_port *intel_dig_port, 497extern void intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
466 struct intel_connector *intel_connector); 498 struct intel_connector *intel_connector);
467void
468intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
469 struct drm_display_mode *adjusted_mode);
470extern void intel_dp_init_link_config(struct intel_dp *intel_dp); 499extern void intel_dp_init_link_config(struct intel_dp *intel_dp);
471extern void intel_dp_start_link_train(struct intel_dp *intel_dp); 500extern void intel_dp_start_link_train(struct intel_dp *intel_dp);
472extern void intel_dp_complete_link_train(struct intel_dp *intel_dp); 501extern void intel_dp_complete_link_train(struct intel_dp *intel_dp);
473extern void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode); 502extern void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
474extern void intel_dp_encoder_destroy(struct drm_encoder *encoder); 503extern void intel_dp_encoder_destroy(struct drm_encoder *encoder);
475extern void intel_dp_check_link_status(struct intel_dp *intel_dp); 504extern void intel_dp_check_link_status(struct intel_dp *intel_dp);
476extern bool intel_dp_mode_fixup(struct drm_encoder *encoder, 505extern bool intel_dp_compute_config(struct intel_encoder *encoder,
477 const struct drm_display_mode *mode, 506 struct intel_crtc_config *pipe_config);
478 struct drm_display_mode *adjusted_mode);
479extern bool intel_dpd_is_edp(struct drm_device *dev); 507extern bool intel_dpd_is_edp(struct drm_device *dev);
480extern void ironlake_edp_backlight_on(struct intel_dp *intel_dp); 508extern void ironlake_edp_backlight_on(struct intel_dp *intel_dp);
481extern void ironlake_edp_backlight_off(struct intel_dp *intel_dp); 509extern void ironlake_edp_backlight_off(struct intel_dp *intel_dp);
@@ -483,11 +511,8 @@ extern void ironlake_edp_panel_on(struct intel_dp *intel_dp);
483extern void ironlake_edp_panel_off(struct intel_dp *intel_dp); 511extern void ironlake_edp_panel_off(struct intel_dp *intel_dp);
484extern void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp); 512extern void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp);
485extern void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync); 513extern void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
486extern void intel_edp_link_config(struct intel_encoder *, int *, int *);
487extern int intel_edp_target_clock(struct intel_encoder *,
488 struct drm_display_mode *mode);
489extern bool intel_encoder_is_pch_edp(struct drm_encoder *encoder); 514extern bool intel_encoder_is_pch_edp(struct drm_encoder *encoder);
490extern int intel_plane_init(struct drm_device *dev, enum pipe pipe); 515extern int intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane);
491extern void intel_flush_display_plane(struct drm_i915_private *dev_priv, 516extern void intel_flush_display_plane(struct drm_i915_private *dev_priv,
492 enum plane plane); 517 enum plane plane);
493 518
@@ -531,6 +556,7 @@ extern bool intel_encoder_check_is_cloned(struct intel_encoder *encoder);
531extern void intel_connector_dpms(struct drm_connector *, int mode); 556extern void intel_connector_dpms(struct drm_connector *, int mode);
532extern bool intel_connector_get_hw_state(struct intel_connector *connector); 557extern bool intel_connector_get_hw_state(struct intel_connector *connector);
533extern void intel_modeset_check_state(struct drm_device *dev); 558extern void intel_modeset_check_state(struct drm_device *dev);
559extern void intel_plane_restore(struct drm_plane *plane);
534 560
535 561
536static inline struct intel_encoder *intel_attached_encoder(struct drm_connector *connector) 562static inline struct intel_encoder *intel_attached_encoder(struct drm_connector *connector)
@@ -636,6 +662,10 @@ extern void intel_init_clock_gating(struct drm_device *dev);
636extern void intel_write_eld(struct drm_encoder *encoder, 662extern void intel_write_eld(struct drm_encoder *encoder,
637 struct drm_display_mode *mode); 663 struct drm_display_mode *mode);
638extern void intel_cpt_verify_modeset(struct drm_device *dev, int pipe); 664extern void intel_cpt_verify_modeset(struct drm_device *dev, int pipe);
665extern void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
666 struct intel_link_m_n *m_n);
667extern void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
668 struct intel_link_m_n *m_n);
639extern void intel_prepare_ddi(struct drm_device *dev); 669extern void intel_prepare_ddi(struct drm_device *dev);
640extern void hsw_fdi_link_train(struct drm_crtc *crtc); 670extern void hsw_fdi_link_train(struct drm_crtc *crtc);
641extern void intel_ddi_init(struct drm_device *dev, enum port port); 671extern void intel_ddi_init(struct drm_device *dev, enum port port);
@@ -670,6 +700,7 @@ extern void intel_update_fbc(struct drm_device *dev);
670extern void intel_gpu_ips_init(struct drm_i915_private *dev_priv); 700extern void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
671extern void intel_gpu_ips_teardown(void); 701extern void intel_gpu_ips_teardown(void);
672 702
703extern bool intel_using_power_well(struct drm_device *dev);
673extern void intel_init_power_well(struct drm_device *dev); 704extern void intel_init_power_well(struct drm_device *dev);
674extern void intel_set_power_well(struct drm_device *dev, bool enable); 705extern void intel_set_power_well(struct drm_device *dev, bool enable);
675extern void intel_enable_gt_powersave(struct drm_device *dev); 706extern void intel_enable_gt_powersave(struct drm_device *dev);
@@ -681,7 +712,7 @@ extern bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
681 enum pipe *pipe); 712 enum pipe *pipe);
682extern int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv); 713extern int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv);
683extern void intel_ddi_pll_init(struct drm_device *dev); 714extern void intel_ddi_pll_init(struct drm_device *dev);
684extern void intel_ddi_enable_pipe_func(struct drm_crtc *crtc); 715extern void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc);
685extern void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv, 716extern void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
686 enum transcoder cpu_transcoder); 717 enum transcoder cpu_transcoder);
687extern void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc); 718extern void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc);
@@ -695,4 +726,6 @@ extern bool
695intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector); 726intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
696extern void intel_ddi_fdi_disable(struct drm_crtc *crtc); 727extern void intel_ddi_fdi_disable(struct drm_crtc *crtc);
697 728
729extern void intel_display_handle_reset(struct drm_device *dev);
730
698#endif /* __INTEL_DRV_H__ */ 731#endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 00e70dbe82da..cc70b16d5d42 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -448,6 +448,7 @@ void intel_dvo_init(struct drm_device *dev)
448 const struct intel_dvo_device *dvo = &intel_dvo_devices[i]; 448 const struct intel_dvo_device *dvo = &intel_dvo_devices[i];
449 struct i2c_adapter *i2c; 449 struct i2c_adapter *i2c;
450 int gpio; 450 int gpio;
451 bool dvoinit;
451 452
452 /* Allow the I2C driver info to specify the GPIO to be used in 453 /* Allow the I2C driver info to specify the GPIO to be used in
453 * special cases, but otherwise default to what's defined 454 * special cases, but otherwise default to what's defined
@@ -467,7 +468,17 @@ void intel_dvo_init(struct drm_device *dev)
467 i2c = intel_gmbus_get_adapter(dev_priv, gpio); 468 i2c = intel_gmbus_get_adapter(dev_priv, gpio);
468 469
469 intel_dvo->dev = *dvo; 470 intel_dvo->dev = *dvo;
470 if (!dvo->dev_ops->init(&intel_dvo->dev, i2c)) 471
472 /* GMBUS NAK handling seems to be unstable, hence let the
473 * transmitter detection run in bit banging mode for now.
474 */
475 intel_gmbus_force_bit(i2c, true);
476
477 dvoinit = dvo->dev_ops->init(&intel_dvo->dev, i2c);
478
479 intel_gmbus_force_bit(i2c, false);
480
481 if (!dvoinit)
471 continue; 482 continue;
472 483
473 intel_encoder->type = INTEL_OUTPUT_DVO; 484 intel_encoder->type = INTEL_OUTPUT_DVO;
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index 981bdce3634e..0e19e575a1b4 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -150,7 +150,8 @@ static int intelfb_create(struct drm_fb_helper *helper,
150 } 150 }
151 info->screen_size = size; 151 info->screen_size = size;
152 152
153// memset(info->screen_base, 0, size); 153 /* This driver doesn't need a VT switch to restore the mode on resume */
154 info->skip_vt_switch = true;
154 155
155 drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth); 156 drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
156 drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height); 157 drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height);
@@ -227,7 +228,7 @@ int intel_fbdev_init(struct drm_device *dev)
227 ifbdev->helper.funcs = &intel_fb_helper_funcs; 228 ifbdev->helper.funcs = &intel_fb_helper_funcs;
228 229
229 ret = drm_fb_helper_init(dev, &ifbdev->helper, 230 ret = drm_fb_helper_init(dev, &ifbdev->helper,
230 dev_priv->num_pipe, 231 INTEL_INFO(dev)->num_pipes,
231 INTELFB_CONN_LIMIT); 232 INTELFB_CONN_LIMIT);
232 if (ret) { 233 if (ret) {
233 kfree(ifbdev); 234 kfree(ifbdev);
@@ -282,6 +283,9 @@ void intel_fb_restore_mode(struct drm_device *dev)
282 struct drm_mode_config *config = &dev->mode_config; 283 struct drm_mode_config *config = &dev->mode_config;
283 struct drm_plane *plane; 284 struct drm_plane *plane;
284 285
286 if (INTEL_INFO(dev)->num_pipes == 0)
287 return;
288
285 drm_modeset_lock_all(dev); 289 drm_modeset_lock_all(dev);
286 290
287 ret = drm_fb_helper_restore_fbdev_mode(&dev_priv->fbdev->helper); 291 ret = drm_fb_helper_restore_fbdev_mode(&dev_priv->fbdev->helper);
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index fa8ec4a26041..a9057930f2b2 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -50,7 +50,7 @@ assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi)
50 50
51 enabled_bits = HAS_DDI(dev) ? DDI_BUF_CTL_ENABLE : SDVO_ENABLE; 51 enabled_bits = HAS_DDI(dev) ? DDI_BUF_CTL_ENABLE : SDVO_ENABLE;
52 52
53 WARN(I915_READ(intel_hdmi->sdvox_reg) & enabled_bits, 53 WARN(I915_READ(intel_hdmi->hdmi_reg) & enabled_bits,
54 "HDMI port enabled, expecting disabled\n"); 54 "HDMI port enabled, expecting disabled\n");
55} 55}
56 56
@@ -120,13 +120,14 @@ static u32 hsw_infoframe_enable(struct dip_infoframe *frame)
120 } 120 }
121} 121}
122 122
123static u32 hsw_infoframe_data_reg(struct dip_infoframe *frame, enum pipe pipe) 123static u32 hsw_infoframe_data_reg(struct dip_infoframe *frame,
124 enum transcoder cpu_transcoder)
124{ 125{
125 switch (frame->type) { 126 switch (frame->type) {
126 case DIP_TYPE_AVI: 127 case DIP_TYPE_AVI:
127 return HSW_TVIDEO_DIP_AVI_DATA(pipe); 128 return HSW_TVIDEO_DIP_AVI_DATA(cpu_transcoder);
128 case DIP_TYPE_SPD: 129 case DIP_TYPE_SPD:
129 return HSW_TVIDEO_DIP_SPD_DATA(pipe); 130 return HSW_TVIDEO_DIP_SPD_DATA(cpu_transcoder);
130 default: 131 default:
131 DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type); 132 DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type);
132 return 0; 133 return 0;
@@ -293,8 +294,8 @@ static void hsw_write_infoframe(struct drm_encoder *encoder,
293 struct drm_device *dev = encoder->dev; 294 struct drm_device *dev = encoder->dev;
294 struct drm_i915_private *dev_priv = dev->dev_private; 295 struct drm_i915_private *dev_priv = dev->dev_private;
295 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 296 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
296 u32 ctl_reg = HSW_TVIDEO_DIP_CTL(intel_crtc->pipe); 297 u32 ctl_reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config.cpu_transcoder);
297 u32 data_reg = hsw_infoframe_data_reg(frame, intel_crtc->pipe); 298 u32 data_reg = hsw_infoframe_data_reg(frame, intel_crtc->config.cpu_transcoder);
298 unsigned int i, len = DIP_HEADER_SIZE + frame->len; 299 unsigned int i, len = DIP_HEADER_SIZE + frame->len;
299 u32 val = I915_READ(ctl_reg); 300 u32 val = I915_READ(ctl_reg);
300 301
@@ -332,6 +333,7 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
332 struct drm_display_mode *adjusted_mode) 333 struct drm_display_mode *adjusted_mode)
333{ 334{
334 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); 335 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
336 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
335 struct dip_infoframe avi_if = { 337 struct dip_infoframe avi_if = {
336 .type = DIP_TYPE_AVI, 338 .type = DIP_TYPE_AVI,
337 .ver = DIP_VERSION_AVI, 339 .ver = DIP_VERSION_AVI,
@@ -342,7 +344,7 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
342 avi_if.body.avi.YQ_CN_PR |= DIP_AVI_PR_2; 344 avi_if.body.avi.YQ_CN_PR |= DIP_AVI_PR_2;
343 345
344 if (intel_hdmi->rgb_quant_range_selectable) { 346 if (intel_hdmi->rgb_quant_range_selectable) {
345 if (adjusted_mode->private_flags & INTEL_MODE_LIMITED_COLOR_RANGE) 347 if (intel_crtc->config.limited_color_range)
346 avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_LIMITED; 348 avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_LIMITED;
347 else 349 else
348 avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_FULL; 350 avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_FULL;
@@ -568,7 +570,7 @@ static void hsw_set_infoframes(struct drm_encoder *encoder,
568 struct drm_i915_private *dev_priv = encoder->dev->dev_private; 570 struct drm_i915_private *dev_priv = encoder->dev->dev_private;
569 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 571 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
570 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); 572 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
571 u32 reg = HSW_TVIDEO_DIP_CTL(intel_crtc->pipe); 573 u32 reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config.cpu_transcoder);
572 u32 val = I915_READ(reg); 574 u32 val = I915_READ(reg);
573 575
574 assert_hdmi_port_disabled(intel_hdmi); 576 assert_hdmi_port_disabled(intel_hdmi);
@@ -597,40 +599,40 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
597 struct drm_i915_private *dev_priv = dev->dev_private; 599 struct drm_i915_private *dev_priv = dev->dev_private;
598 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 600 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
599 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); 601 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
600 u32 sdvox; 602 u32 hdmi_val;
601 603
602 sdvox = SDVO_ENCODING_HDMI; 604 hdmi_val = SDVO_ENCODING_HDMI;
603 if (!HAS_PCH_SPLIT(dev)) 605 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev))
604 sdvox |= intel_hdmi->color_range; 606 hdmi_val |= intel_hdmi->color_range;
605 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 607 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
606 sdvox |= SDVO_VSYNC_ACTIVE_HIGH; 608 hdmi_val |= SDVO_VSYNC_ACTIVE_HIGH;
607 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 609 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
608 sdvox |= SDVO_HSYNC_ACTIVE_HIGH; 610 hdmi_val |= SDVO_HSYNC_ACTIVE_HIGH;
609 611
610 if (intel_crtc->bpp > 24) 612 if (intel_crtc->config.pipe_bpp > 24)
611 sdvox |= COLOR_FORMAT_12bpc; 613 hdmi_val |= HDMI_COLOR_FORMAT_12bpc;
612 else 614 else
613 sdvox |= COLOR_FORMAT_8bpc; 615 hdmi_val |= SDVO_COLOR_FORMAT_8bpc;
614 616
615 /* Required on CPT */ 617 /* Required on CPT */
616 if (intel_hdmi->has_hdmi_sink && HAS_PCH_CPT(dev)) 618 if (intel_hdmi->has_hdmi_sink && HAS_PCH_CPT(dev))
617 sdvox |= HDMI_MODE_SELECT; 619 hdmi_val |= HDMI_MODE_SELECT_HDMI;
618 620
619 if (intel_hdmi->has_audio) { 621 if (intel_hdmi->has_audio) {
620 DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n", 622 DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n",
621 pipe_name(intel_crtc->pipe)); 623 pipe_name(intel_crtc->pipe));
622 sdvox |= SDVO_AUDIO_ENABLE; 624 hdmi_val |= SDVO_AUDIO_ENABLE;
623 sdvox |= SDVO_NULL_PACKETS_DURING_VSYNC; 625 hdmi_val |= HDMI_MODE_SELECT_HDMI;
624 intel_write_eld(encoder, adjusted_mode); 626 intel_write_eld(encoder, adjusted_mode);
625 } 627 }
626 628
627 if (HAS_PCH_CPT(dev)) 629 if (HAS_PCH_CPT(dev))
628 sdvox |= PORT_TRANS_SEL_CPT(intel_crtc->pipe); 630 hdmi_val |= SDVO_PIPE_SEL_CPT(intel_crtc->pipe);
629 else if (intel_crtc->pipe == PIPE_B) 631 else
630 sdvox |= SDVO_PIPE_B_SELECT; 632 hdmi_val |= SDVO_PIPE_SEL(intel_crtc->pipe);
631 633
632 I915_WRITE(intel_hdmi->sdvox_reg, sdvox); 634 I915_WRITE(intel_hdmi->hdmi_reg, hdmi_val);
633 POSTING_READ(intel_hdmi->sdvox_reg); 635 POSTING_READ(intel_hdmi->hdmi_reg);
634 636
635 intel_hdmi->set_infoframes(encoder, adjusted_mode); 637 intel_hdmi->set_infoframes(encoder, adjusted_mode);
636} 638}
@@ -643,7 +645,7 @@ static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
643 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); 645 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
644 u32 tmp; 646 u32 tmp;
645 647
646 tmp = I915_READ(intel_hdmi->sdvox_reg); 648 tmp = I915_READ(intel_hdmi->hdmi_reg);
647 649
648 if (!(tmp & SDVO_ENABLE)) 650 if (!(tmp & SDVO_ENABLE))
649 return false; 651 return false;
@@ -660,6 +662,7 @@ static void intel_enable_hdmi(struct intel_encoder *encoder)
660{ 662{
661 struct drm_device *dev = encoder->base.dev; 663 struct drm_device *dev = encoder->base.dev;
662 struct drm_i915_private *dev_priv = dev->dev_private; 664 struct drm_i915_private *dev_priv = dev->dev_private;
665 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
663 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); 666 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
664 u32 temp; 667 u32 temp;
665 u32 enable_bits = SDVO_ENABLE; 668 u32 enable_bits = SDVO_ENABLE;
@@ -667,38 +670,32 @@ static void intel_enable_hdmi(struct intel_encoder *encoder)
667 if (intel_hdmi->has_audio) 670 if (intel_hdmi->has_audio)
668 enable_bits |= SDVO_AUDIO_ENABLE; 671 enable_bits |= SDVO_AUDIO_ENABLE;
669 672
670 temp = I915_READ(intel_hdmi->sdvox_reg); 673 temp = I915_READ(intel_hdmi->hdmi_reg);
671 674
672 /* HW workaround for IBX, we need to move the port to transcoder A 675 /* HW workaround for IBX, we need to move the port to transcoder A
673 * before disabling it. */ 676 * before disabling it, so restore the transcoder select bit here. */
674 if (HAS_PCH_IBX(dev)) { 677 if (HAS_PCH_IBX(dev))
675 struct drm_crtc *crtc = encoder->base.crtc; 678 enable_bits |= SDVO_PIPE_SEL(intel_crtc->pipe);
676 int pipe = crtc ? to_intel_crtc(crtc)->pipe : -1;
677
678 /* Restore the transcoder select bit. */
679 if (pipe == PIPE_B)
680 enable_bits |= SDVO_PIPE_B_SELECT;
681 }
682 679
683 /* HW workaround, need to toggle enable bit off and on for 12bpc, but 680 /* HW workaround, need to toggle enable bit off and on for 12bpc, but
684 * we do this anyway which shows more stable in testing. 681 * we do this anyway which shows more stable in testing.
685 */ 682 */
686 if (HAS_PCH_SPLIT(dev)) { 683 if (HAS_PCH_SPLIT(dev)) {
687 I915_WRITE(intel_hdmi->sdvox_reg, temp & ~SDVO_ENABLE); 684 I915_WRITE(intel_hdmi->hdmi_reg, temp & ~SDVO_ENABLE);
688 POSTING_READ(intel_hdmi->sdvox_reg); 685 POSTING_READ(intel_hdmi->hdmi_reg);
689 } 686 }
690 687
691 temp |= enable_bits; 688 temp |= enable_bits;
692 689
693 I915_WRITE(intel_hdmi->sdvox_reg, temp); 690 I915_WRITE(intel_hdmi->hdmi_reg, temp);
694 POSTING_READ(intel_hdmi->sdvox_reg); 691 POSTING_READ(intel_hdmi->hdmi_reg);
695 692
696 /* HW workaround, need to write this twice for issue that may result 693 /* HW workaround, need to write this twice for issue that may result
697 * in first write getting masked. 694 * in first write getting masked.
698 */ 695 */
699 if (HAS_PCH_SPLIT(dev)) { 696 if (HAS_PCH_SPLIT(dev)) {
700 I915_WRITE(intel_hdmi->sdvox_reg, temp); 697 I915_WRITE(intel_hdmi->hdmi_reg, temp);
701 POSTING_READ(intel_hdmi->sdvox_reg); 698 POSTING_READ(intel_hdmi->hdmi_reg);
702 } 699 }
703} 700}
704 701
@@ -710,7 +707,7 @@ static void intel_disable_hdmi(struct intel_encoder *encoder)
710 u32 temp; 707 u32 temp;
711 u32 enable_bits = SDVO_ENABLE | SDVO_AUDIO_ENABLE; 708 u32 enable_bits = SDVO_ENABLE | SDVO_AUDIO_ENABLE;
712 709
713 temp = I915_READ(intel_hdmi->sdvox_reg); 710 temp = I915_READ(intel_hdmi->hdmi_reg);
714 711
715 /* HW workaround for IBX, we need to move the port to transcoder A 712 /* HW workaround for IBX, we need to move the port to transcoder A
716 * before disabling it. */ 713 * before disabling it. */
@@ -720,12 +717,12 @@ static void intel_disable_hdmi(struct intel_encoder *encoder)
720 717
721 if (temp & SDVO_PIPE_B_SELECT) { 718 if (temp & SDVO_PIPE_B_SELECT) {
722 temp &= ~SDVO_PIPE_B_SELECT; 719 temp &= ~SDVO_PIPE_B_SELECT;
723 I915_WRITE(intel_hdmi->sdvox_reg, temp); 720 I915_WRITE(intel_hdmi->hdmi_reg, temp);
724 POSTING_READ(intel_hdmi->sdvox_reg); 721 POSTING_READ(intel_hdmi->hdmi_reg);
725 722
726 /* Again we need to write this twice. */ 723 /* Again we need to write this twice. */
727 I915_WRITE(intel_hdmi->sdvox_reg, temp); 724 I915_WRITE(intel_hdmi->hdmi_reg, temp);
728 POSTING_READ(intel_hdmi->sdvox_reg); 725 POSTING_READ(intel_hdmi->hdmi_reg);
729 726
730 /* Transcoder selection bits only update 727 /* Transcoder selection bits only update
731 * effectively on vblank. */ 728 * effectively on vblank. */
@@ -740,21 +737,21 @@ static void intel_disable_hdmi(struct intel_encoder *encoder)
740 * we do this anyway which shows more stable in testing. 737 * we do this anyway which shows more stable in testing.
741 */ 738 */
742 if (HAS_PCH_SPLIT(dev)) { 739 if (HAS_PCH_SPLIT(dev)) {
743 I915_WRITE(intel_hdmi->sdvox_reg, temp & ~SDVO_ENABLE); 740 I915_WRITE(intel_hdmi->hdmi_reg, temp & ~SDVO_ENABLE);
744 POSTING_READ(intel_hdmi->sdvox_reg); 741 POSTING_READ(intel_hdmi->hdmi_reg);
745 } 742 }
746 743
747 temp &= ~enable_bits; 744 temp &= ~enable_bits;
748 745
749 I915_WRITE(intel_hdmi->sdvox_reg, temp); 746 I915_WRITE(intel_hdmi->hdmi_reg, temp);
750 POSTING_READ(intel_hdmi->sdvox_reg); 747 POSTING_READ(intel_hdmi->hdmi_reg);
751 748
752 /* HW workaround, need to write this twice for issue that may result 749 /* HW workaround, need to write this twice for issue that may result
753 * in first write getting masked. 750 * in first write getting masked.
754 */ 751 */
755 if (HAS_PCH_SPLIT(dev)) { 752 if (HAS_PCH_SPLIT(dev)) {
756 I915_WRITE(intel_hdmi->sdvox_reg, temp); 753 I915_WRITE(intel_hdmi->hdmi_reg, temp);
757 POSTING_READ(intel_hdmi->sdvox_reg); 754 POSTING_READ(intel_hdmi->hdmi_reg);
758 } 755 }
759} 756}
760 757
@@ -772,23 +769,40 @@ static int intel_hdmi_mode_valid(struct drm_connector *connector,
772 return MODE_OK; 769 return MODE_OK;
773} 770}
774 771
775bool intel_hdmi_mode_fixup(struct drm_encoder *encoder, 772bool intel_hdmi_compute_config(struct intel_encoder *encoder,
776 const struct drm_display_mode *mode, 773 struct intel_crtc_config *pipe_config)
777 struct drm_display_mode *adjusted_mode)
778{ 774{
779 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); 775 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
776 struct drm_device *dev = encoder->base.dev;
777 struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
780 778
781 if (intel_hdmi->color_range_auto) { 779 if (intel_hdmi->color_range_auto) {
782 /* See CEA-861-E - 5.1 Default Encoding Parameters */ 780 /* See CEA-861-E - 5.1 Default Encoding Parameters */
783 if (intel_hdmi->has_hdmi_sink && 781 if (intel_hdmi->has_hdmi_sink &&
784 drm_match_cea_mode(adjusted_mode) > 1) 782 drm_match_cea_mode(adjusted_mode) > 1)
785 intel_hdmi->color_range = SDVO_COLOR_RANGE_16_235; 783 intel_hdmi->color_range = HDMI_COLOR_RANGE_16_235;
786 else 784 else
787 intel_hdmi->color_range = 0; 785 intel_hdmi->color_range = 0;
788 } 786 }
789 787
790 if (intel_hdmi->color_range) 788 if (intel_hdmi->color_range)
791 adjusted_mode->private_flags |= INTEL_MODE_LIMITED_COLOR_RANGE; 789 pipe_config->limited_color_range = true;
790
791 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev))
792 pipe_config->has_pch_encoder = true;
793
794 /*
795 * HDMI is either 12 or 8, so if the display lets 10bpc sneak
796 * through, clamp it down. Note that g4x/vlv don't support 12bpc hdmi
797 * outputs.
798 */
799 if (pipe_config->pipe_bpp > 8*3 && HAS_PCH_SPLIT(dev)) {
800 DRM_DEBUG_KMS("forcing bpc to 12 for HDMI\n");
801 pipe_config->pipe_bpp = 12*3;
802 } else {
803 DRM_DEBUG_KMS("forcing bpc to 8 for HDMI\n");
804 pipe_config->pipe_bpp = 8*3;
805 }
792 806
793 return true; 807 return true;
794} 808}
@@ -906,6 +920,9 @@ intel_hdmi_set_property(struct drm_connector *connector,
906 } 920 }
907 921
908 if (property == dev_priv->broadcast_rgb_property) { 922 if (property == dev_priv->broadcast_rgb_property) {
923 bool old_auto = intel_hdmi->color_range_auto;
924 uint32_t old_range = intel_hdmi->color_range;
925
909 switch (val) { 926 switch (val) {
910 case INTEL_BROADCAST_RGB_AUTO: 927 case INTEL_BROADCAST_RGB_AUTO:
911 intel_hdmi->color_range_auto = true; 928 intel_hdmi->color_range_auto = true;
@@ -916,11 +933,16 @@ intel_hdmi_set_property(struct drm_connector *connector,
916 break; 933 break;
917 case INTEL_BROADCAST_RGB_LIMITED: 934 case INTEL_BROADCAST_RGB_LIMITED:
918 intel_hdmi->color_range_auto = false; 935 intel_hdmi->color_range_auto = false;
919 intel_hdmi->color_range = SDVO_COLOR_RANGE_16_235; 936 intel_hdmi->color_range = HDMI_COLOR_RANGE_16_235;
920 break; 937 break;
921 default: 938 default:
922 return -EINVAL; 939 return -EINVAL;
923 } 940 }
941
942 if (old_auto == intel_hdmi->color_range_auto &&
943 old_range == intel_hdmi->color_range)
944 return 0;
945
924 goto done; 946 goto done;
925 } 947 }
926 948
@@ -941,7 +963,6 @@ static void intel_hdmi_destroy(struct drm_connector *connector)
941} 963}
942 964
943static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = { 965static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = {
944 .mode_fixup = intel_hdmi_mode_fixup,
945 .mode_set = intel_hdmi_mode_set, 966 .mode_set = intel_hdmi_mode_set,
946}; 967};
947 968
@@ -985,36 +1006,36 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
985 DRM_MODE_CONNECTOR_HDMIA); 1006 DRM_MODE_CONNECTOR_HDMIA);
986 drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs); 1007 drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs);
987 1008
988 connector->polled = DRM_CONNECTOR_POLL_HPD;
989 connector->interlace_allowed = 1; 1009 connector->interlace_allowed = 1;
990 connector->doublescan_allowed = 0; 1010 connector->doublescan_allowed = 0;
991 1011
992 switch (port) { 1012 switch (port) {
993 case PORT_B: 1013 case PORT_B:
994 intel_hdmi->ddc_bus = GMBUS_PORT_DPB; 1014 intel_hdmi->ddc_bus = GMBUS_PORT_DPB;
995 dev_priv->hotplug_supported_mask |= PORTB_HOTPLUG_INT_STATUS; 1015 intel_encoder->hpd_pin = HPD_PORT_B;
996 break; 1016 break;
997 case PORT_C: 1017 case PORT_C:
998 intel_hdmi->ddc_bus = GMBUS_PORT_DPC; 1018 intel_hdmi->ddc_bus = GMBUS_PORT_DPC;
999 dev_priv->hotplug_supported_mask |= PORTC_HOTPLUG_INT_STATUS; 1019 intel_encoder->hpd_pin = HPD_PORT_C;
1000 break; 1020 break;
1001 case PORT_D: 1021 case PORT_D:
1002 intel_hdmi->ddc_bus = GMBUS_PORT_DPD; 1022 intel_hdmi->ddc_bus = GMBUS_PORT_DPD;
1003 dev_priv->hotplug_supported_mask |= PORTD_HOTPLUG_INT_STATUS; 1023 intel_encoder->hpd_pin = HPD_PORT_D;
1004 break; 1024 break;
1005 case PORT_A: 1025 case PORT_A:
1026 intel_encoder->hpd_pin = HPD_PORT_A;
1006 /* Internal port only for eDP. */ 1027 /* Internal port only for eDP. */
1007 default: 1028 default:
1008 BUG(); 1029 BUG();
1009 } 1030 }
1010 1031
1011 if (!HAS_PCH_SPLIT(dev)) { 1032 if (IS_VALLEYVIEW(dev)) {
1012 intel_hdmi->write_infoframe = g4x_write_infoframe;
1013 intel_hdmi->set_infoframes = g4x_set_infoframes;
1014 } else if (IS_VALLEYVIEW(dev)) {
1015 intel_hdmi->write_infoframe = vlv_write_infoframe; 1033 intel_hdmi->write_infoframe = vlv_write_infoframe;
1016 intel_hdmi->set_infoframes = vlv_set_infoframes; 1034 intel_hdmi->set_infoframes = vlv_set_infoframes;
1017 } else if (IS_HASWELL(dev)) { 1035 } else if (!HAS_PCH_SPLIT(dev)) {
1036 intel_hdmi->write_infoframe = g4x_write_infoframe;
1037 intel_hdmi->set_infoframes = g4x_set_infoframes;
1038 } else if (HAS_DDI(dev)) {
1018 intel_hdmi->write_infoframe = hsw_write_infoframe; 1039 intel_hdmi->write_infoframe = hsw_write_infoframe;
1019 intel_hdmi->set_infoframes = hsw_set_infoframes; 1040 intel_hdmi->set_infoframes = hsw_set_infoframes;
1020 } else if (HAS_PCH_IBX(dev)) { 1041 } else if (HAS_PCH_IBX(dev)) {
@@ -1045,7 +1066,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
1045 } 1066 }
1046} 1067}
1047 1068
1048void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port) 1069void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port)
1049{ 1070{
1050 struct intel_digital_port *intel_dig_port; 1071 struct intel_digital_port *intel_dig_port;
1051 struct intel_encoder *intel_encoder; 1072 struct intel_encoder *intel_encoder;
@@ -1069,6 +1090,7 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port)
1069 DRM_MODE_ENCODER_TMDS); 1090 DRM_MODE_ENCODER_TMDS);
1070 drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs); 1091 drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs);
1071 1092
1093 intel_encoder->compute_config = intel_hdmi_compute_config;
1072 intel_encoder->enable = intel_enable_hdmi; 1094 intel_encoder->enable = intel_enable_hdmi;
1073 intel_encoder->disable = intel_disable_hdmi; 1095 intel_encoder->disable = intel_disable_hdmi;
1074 intel_encoder->get_hw_state = intel_hdmi_get_hw_state; 1096 intel_encoder->get_hw_state = intel_hdmi_get_hw_state;
@@ -1078,7 +1100,7 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port)
1078 intel_encoder->cloneable = false; 1100 intel_encoder->cloneable = false;
1079 1101
1080 intel_dig_port->port = port; 1102 intel_dig_port->port = port;
1081 intel_dig_port->hdmi.sdvox_reg = sdvox_reg; 1103 intel_dig_port->hdmi.hdmi_reg = hdmi_reg;
1082 intel_dig_port->dp.output_reg = 0; 1104 intel_dig_port->dp.output_reg = 0;
1083 1105
1084 intel_hdmi_init_connector(intel_dig_port, intel_connector); 1106 intel_hdmi_init_connector(intel_dig_port, intel_connector);
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index ef4744e1bf0b..5d245031e391 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -522,7 +522,9 @@ int intel_setup_gmbus(struct drm_device *dev)
522 struct drm_i915_private *dev_priv = dev->dev_private; 522 struct drm_i915_private *dev_priv = dev->dev_private;
523 int ret, i; 523 int ret, i;
524 524
525 if (HAS_PCH_SPLIT(dev)) 525 if (HAS_PCH_NOP(dev))
526 return 0;
527 else if (HAS_PCH_SPLIT(dev))
526 dev_priv->gpio_mmio_base = PCH_GPIOA - GPIOA; 528 dev_priv->gpio_mmio_base = PCH_GPIOA - GPIOA;
527 else if (IS_VALLEYVIEW(dev)) 529 else if (IS_VALLEYVIEW(dev))
528 dev_priv->gpio_mmio_base = VLV_DISPLAY_BASE; 530 dev_priv->gpio_mmio_base = VLV_DISPLAY_BASE;
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 3d1d97488cc9..f36f1baabd5a 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -261,8 +261,6 @@ centre_horizontally(struct drm_display_mode *mode,
261 261
262 mode->crtc_hsync_start = mode->crtc_hblank_start + sync_pos; 262 mode->crtc_hsync_start = mode->crtc_hblank_start + sync_pos;
263 mode->crtc_hsync_end = mode->crtc_hsync_start + sync_width; 263 mode->crtc_hsync_end = mode->crtc_hsync_start + sync_width;
264
265 mode->private_flags |= INTEL_MODE_CRTC_TIMINGS_SET;
266} 264}
267 265
268static void 266static void
@@ -284,8 +282,6 @@ centre_vertically(struct drm_display_mode *mode,
284 282
285 mode->crtc_vsync_start = mode->crtc_vblank_start + sync_pos; 283 mode->crtc_vsync_start = mode->crtc_vblank_start + sync_pos;
286 mode->crtc_vsync_end = mode->crtc_vsync_start + sync_width; 284 mode->crtc_vsync_end = mode->crtc_vsync_start + sync_width;
287
288 mode->private_flags |= INTEL_MODE_CRTC_TIMINGS_SET;
289} 285}
290 286
291static inline u32 panel_fitter_scaling(u32 source, u32 target) 287static inline u32 panel_fitter_scaling(u32 source, u32 target)
@@ -301,17 +297,20 @@ static inline u32 panel_fitter_scaling(u32 source, u32 target)
301 return (FACTOR * ratio + FACTOR/2) / FACTOR; 297 return (FACTOR * ratio + FACTOR/2) / FACTOR;
302} 298}
303 299
304static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, 300static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
305 const struct drm_display_mode *mode, 301 struct intel_crtc_config *pipe_config)
306 struct drm_display_mode *adjusted_mode)
307{ 302{
308 struct drm_device *dev = encoder->dev; 303 struct drm_device *dev = intel_encoder->base.dev;
309 struct drm_i915_private *dev_priv = dev->dev_private; 304 struct drm_i915_private *dev_priv = dev->dev_private;
310 struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder); 305 struct intel_lvds_encoder *lvds_encoder =
306 to_lvds_encoder(&intel_encoder->base);
311 struct intel_connector *intel_connector = 307 struct intel_connector *intel_connector =
312 &lvds_encoder->attached_connector->base; 308 &lvds_encoder->attached_connector->base;
309 struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
310 struct drm_display_mode *mode = &pipe_config->requested_mode;
313 struct intel_crtc *intel_crtc = lvds_encoder->base.new_crtc; 311 struct intel_crtc *intel_crtc = lvds_encoder->base.new_crtc;
314 u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0; 312 u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
313 unsigned int lvds_bpp;
315 int pipe; 314 int pipe;
316 315
317 /* Should never happen!! */ 316 /* Should never happen!! */
@@ -323,6 +322,17 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
323 if (intel_encoder_check_is_cloned(&lvds_encoder->base)) 322 if (intel_encoder_check_is_cloned(&lvds_encoder->base))
324 return false; 323 return false;
325 324
325 if ((I915_READ(lvds_encoder->reg) & LVDS_A3_POWER_MASK) ==
326 LVDS_A3_POWER_UP)
327 lvds_bpp = 8*3;
328 else
329 lvds_bpp = 6*3;
330
331 if (lvds_bpp != pipe_config->pipe_bpp) {
332 DRM_DEBUG_KMS("forcing display bpp (was %d) to LVDS (%d)\n",
333 pipe_config->pipe_bpp, lvds_bpp);
334 pipe_config->pipe_bpp = lvds_bpp;
335 }
326 /* 336 /*
327 * We have timings from the BIOS for the panel, put them in 337 * We have timings from the BIOS for the panel, put them in
328 * to the adjusted mode. The CRTC will be set up for this mode, 338 * to the adjusted mode. The CRTC will be set up for this mode,
@@ -333,6 +343,8 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
333 adjusted_mode); 343 adjusted_mode);
334 344
335 if (HAS_PCH_SPLIT(dev)) { 345 if (HAS_PCH_SPLIT(dev)) {
346 pipe_config->has_pch_encoder = true;
347
336 intel_pch_panel_fitting(dev, 348 intel_pch_panel_fitting(dev,
337 intel_connector->panel.fitting_mode, 349 intel_connector->panel.fitting_mode,
338 mode, adjusted_mode); 350 mode, adjusted_mode);
@@ -359,6 +371,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
359 I915_WRITE(BCLRPAT(pipe), 0); 371 I915_WRITE(BCLRPAT(pipe), 0);
360 372
361 drm_mode_set_crtcinfo(adjusted_mode, 0); 373 drm_mode_set_crtcinfo(adjusted_mode, 0);
374 pipe_config->timings_set = true;
362 375
363 switch (intel_connector->panel.fitting_mode) { 376 switch (intel_connector->panel.fitting_mode) {
364 case DRM_MODE_SCALE_CENTER: 377 case DRM_MODE_SCALE_CENTER:
@@ -618,7 +631,6 @@ static void intel_lvds_destroy(struct drm_connector *connector)
618 if (!IS_ERR_OR_NULL(lvds_connector->base.edid)) 631 if (!IS_ERR_OR_NULL(lvds_connector->base.edid))
619 kfree(lvds_connector->base.edid); 632 kfree(lvds_connector->base.edid);
620 633
621 intel_panel_destroy_backlight(connector->dev);
622 intel_panel_fini(&lvds_connector->base.panel); 634 intel_panel_fini(&lvds_connector->base.panel);
623 635
624 drm_sysfs_connector_remove(connector); 636 drm_sysfs_connector_remove(connector);
@@ -661,7 +673,6 @@ static int intel_lvds_set_property(struct drm_connector *connector,
661} 673}
662 674
663static const struct drm_encoder_helper_funcs intel_lvds_helper_funcs = { 675static const struct drm_encoder_helper_funcs intel_lvds_helper_funcs = {
664 .mode_fixup = intel_lvds_mode_fixup,
665 .mode_set = intel_lvds_mode_set, 676 .mode_set = intel_lvds_mode_set,
666}; 677};
667 678
@@ -850,6 +861,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
850 DMI_MATCH(DMI_PRODUCT_NAME, "X7SPA-H"), 861 DMI_MATCH(DMI_PRODUCT_NAME, "X7SPA-H"),
851 }, 862 },
852 }, 863 },
864 {
865 .callback = intel_no_lvds_dmi_callback,
866 .ident = "Fujitsu Esprimo Q900",
867 .matches = {
868 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
869 DMI_MATCH(DMI_PRODUCT_NAME, "ESPRIMO Q900"),
870 },
871 },
853 872
854 { } /* terminating entry */ 873 { } /* terminating entry */
855}; 874};
@@ -1019,12 +1038,15 @@ static bool intel_lvds_supported(struct drm_device *dev)
1019{ 1038{
1020 /* With the introduction of the PCH we gained a dedicated 1039 /* With the introduction of the PCH we gained a dedicated
1021 * LVDS presence pin, use it. */ 1040 * LVDS presence pin, use it. */
1022 if (HAS_PCH_SPLIT(dev)) 1041 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
1023 return true; 1042 return true;
1024 1043
1025 /* Otherwise LVDS was only attached to mobile products, 1044 /* Otherwise LVDS was only attached to mobile products,
1026 * except for the inglorious 830gm */ 1045 * except for the inglorious 830gm */
1027 return IS_MOBILE(dev) && !IS_I830(dev); 1046 if (INTEL_INFO(dev)->gen <= 4 && IS_MOBILE(dev) && !IS_I830(dev))
1047 return true;
1048
1049 return false;
1028} 1050}
1029 1051
1030/** 1052/**
@@ -1102,6 +1124,7 @@ bool intel_lvds_init(struct drm_device *dev)
1102 intel_encoder->enable = intel_enable_lvds; 1124 intel_encoder->enable = intel_enable_lvds;
1103 intel_encoder->pre_enable = intel_pre_enable_lvds; 1125 intel_encoder->pre_enable = intel_pre_enable_lvds;
1104 intel_encoder->pre_pll_enable = intel_pre_pll_enable_lvds; 1126 intel_encoder->pre_pll_enable = intel_pre_pll_enable_lvds;
1127 intel_encoder->compute_config = intel_lvds_compute_config;
1105 intel_encoder->disable = intel_disable_lvds; 1128 intel_encoder->disable = intel_disable_lvds;
1106 intel_encoder->get_hw_state = intel_lvds_get_hw_state; 1129 intel_encoder->get_hw_state = intel_lvds_get_hw_state;
1107 intel_connector->get_hw_state = intel_connector_get_hw_state; 1130 intel_connector->get_hw_state = intel_connector_get_hw_state;
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index bee8cb6108a7..eb5e6e95f3c7 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -286,8 +286,11 @@ void intel_panel_set_backlight(struct drm_device *dev, u32 level)
286{ 286{
287 struct drm_i915_private *dev_priv = dev->dev_private; 287 struct drm_i915_private *dev_priv = dev->dev_private;
288 288
289 dev_priv->backlight_level = level; 289 dev_priv->backlight.level = level;
290 if (dev_priv->backlight_enabled) 290 if (dev_priv->backlight.device)
291 dev_priv->backlight.device->props.brightness = level;
292
293 if (dev_priv->backlight.enabled)
291 intel_panel_actually_set_backlight(dev, level); 294 intel_panel_actually_set_backlight(dev, level);
292} 295}
293 296
@@ -295,7 +298,7 @@ void intel_panel_disable_backlight(struct drm_device *dev)
295{ 298{
296 struct drm_i915_private *dev_priv = dev->dev_private; 299 struct drm_i915_private *dev_priv = dev->dev_private;
297 300
298 dev_priv->backlight_enabled = false; 301 dev_priv->backlight.enabled = false;
299 intel_panel_actually_set_backlight(dev, 0); 302 intel_panel_actually_set_backlight(dev, 0);
300 303
301 if (INTEL_INFO(dev)->gen >= 4) { 304 if (INTEL_INFO(dev)->gen >= 4) {
@@ -318,8 +321,12 @@ void intel_panel_enable_backlight(struct drm_device *dev,
318{ 321{
319 struct drm_i915_private *dev_priv = dev->dev_private; 322 struct drm_i915_private *dev_priv = dev->dev_private;
320 323
321 if (dev_priv->backlight_level == 0) 324 if (dev_priv->backlight.level == 0) {
322 dev_priv->backlight_level = intel_panel_get_max_backlight(dev); 325 dev_priv->backlight.level = intel_panel_get_max_backlight(dev);
326 if (dev_priv->backlight.device)
327 dev_priv->backlight.device->props.brightness =
328 dev_priv->backlight.level;
329 }
323 330
324 if (INTEL_INFO(dev)->gen >= 4) { 331 if (INTEL_INFO(dev)->gen >= 4) {
325 uint32_t reg, tmp; 332 uint32_t reg, tmp;
@@ -335,7 +342,7 @@ void intel_panel_enable_backlight(struct drm_device *dev,
335 if (tmp & BLM_PWM_ENABLE) 342 if (tmp & BLM_PWM_ENABLE)
336 goto set_level; 343 goto set_level;
337 344
338 if (dev_priv->num_pipe == 3) 345 if (INTEL_INFO(dev)->num_pipes == 3)
339 tmp &= ~BLM_PIPE_SELECT_IVB; 346 tmp &= ~BLM_PIPE_SELECT_IVB;
340 else 347 else
341 tmp &= ~BLM_PIPE_SELECT; 348 tmp &= ~BLM_PIPE_SELECT;
@@ -360,16 +367,16 @@ set_level:
360 * BLC_PWM_CPU_CTL may be cleared to zero automatically when these 367 * BLC_PWM_CPU_CTL may be cleared to zero automatically when these
361 * registers are set. 368 * registers are set.
362 */ 369 */
363 dev_priv->backlight_enabled = true; 370 dev_priv->backlight.enabled = true;
364 intel_panel_actually_set_backlight(dev, dev_priv->backlight_level); 371 intel_panel_actually_set_backlight(dev, dev_priv->backlight.level);
365} 372}
366 373
367static void intel_panel_init_backlight(struct drm_device *dev) 374static void intel_panel_init_backlight(struct drm_device *dev)
368{ 375{
369 struct drm_i915_private *dev_priv = dev->dev_private; 376 struct drm_i915_private *dev_priv = dev->dev_private;
370 377
371 dev_priv->backlight_level = intel_panel_get_backlight(dev); 378 dev_priv->backlight.level = intel_panel_get_backlight(dev);
372 dev_priv->backlight_enabled = dev_priv->backlight_level != 0; 379 dev_priv->backlight.enabled = dev_priv->backlight.level != 0;
373} 380}
374 381
375enum drm_connector_status 382enum drm_connector_status
@@ -405,8 +412,7 @@ static int intel_panel_update_status(struct backlight_device *bd)
405static int intel_panel_get_brightness(struct backlight_device *bd) 412static int intel_panel_get_brightness(struct backlight_device *bd)
406{ 413{
407 struct drm_device *dev = bl_get_data(bd); 414 struct drm_device *dev = bl_get_data(bd);
408 struct drm_i915_private *dev_priv = dev->dev_private; 415 return intel_panel_get_backlight(dev);
409 return dev_priv->backlight_level;
410} 416}
411 417
412static const struct backlight_ops intel_panel_bl_ops = { 418static const struct backlight_ops intel_panel_bl_ops = {
@@ -422,33 +428,38 @@ int intel_panel_setup_backlight(struct drm_connector *connector)
422 428
423 intel_panel_init_backlight(dev); 429 intel_panel_init_backlight(dev);
424 430
431 if (WARN_ON(dev_priv->backlight.device))
432 return -ENODEV;
433
425 memset(&props, 0, sizeof(props)); 434 memset(&props, 0, sizeof(props));
426 props.type = BACKLIGHT_RAW; 435 props.type = BACKLIGHT_RAW;
436 props.brightness = dev_priv->backlight.level;
427 props.max_brightness = _intel_panel_get_max_backlight(dev); 437 props.max_brightness = _intel_panel_get_max_backlight(dev);
428 if (props.max_brightness == 0) { 438 if (props.max_brightness == 0) {
429 DRM_DEBUG_DRIVER("Failed to get maximum backlight value\n"); 439 DRM_DEBUG_DRIVER("Failed to get maximum backlight value\n");
430 return -ENODEV; 440 return -ENODEV;
431 } 441 }
432 dev_priv->backlight = 442 dev_priv->backlight.device =
433 backlight_device_register("intel_backlight", 443 backlight_device_register("intel_backlight",
434 &connector->kdev, dev, 444 &connector->kdev, dev,
435 &intel_panel_bl_ops, &props); 445 &intel_panel_bl_ops, &props);
436 446
437 if (IS_ERR(dev_priv->backlight)) { 447 if (IS_ERR(dev_priv->backlight.device)) {
438 DRM_ERROR("Failed to register backlight: %ld\n", 448 DRM_ERROR("Failed to register backlight: %ld\n",
439 PTR_ERR(dev_priv->backlight)); 449 PTR_ERR(dev_priv->backlight.device));
440 dev_priv->backlight = NULL; 450 dev_priv->backlight.device = NULL;
441 return -ENODEV; 451 return -ENODEV;
442 } 452 }
443 dev_priv->backlight->props.brightness = intel_panel_get_backlight(dev);
444 return 0; 453 return 0;
445} 454}
446 455
447void intel_panel_destroy_backlight(struct drm_device *dev) 456void intel_panel_destroy_backlight(struct drm_device *dev)
448{ 457{
449 struct drm_i915_private *dev_priv = dev->dev_private; 458 struct drm_i915_private *dev_priv = dev->dev_private;
450 if (dev_priv->backlight) 459 if (dev_priv->backlight.device) {
451 backlight_device_unregister(dev_priv->backlight); 460 backlight_device_unregister(dev_priv->backlight.device);
461 dev_priv->backlight.device = NULL;
462 }
452} 463}
453#else 464#else
454int intel_panel_setup_backlight(struct drm_connector *connector) 465int intel_panel_setup_backlight(struct drm_connector *connector)
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index adca00783e61..de3b0dc5658b 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2460,10 +2460,14 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
2460 if (val == dev_priv->rps.cur_delay) 2460 if (val == dev_priv->rps.cur_delay)
2461 return; 2461 return;
2462 2462
2463 I915_WRITE(GEN6_RPNSWREQ, 2463 if (IS_HASWELL(dev))
2464 GEN6_FREQUENCY(val) | 2464 I915_WRITE(GEN6_RPNSWREQ,
2465 GEN6_OFFSET(0) | 2465 HSW_FREQUENCY(val));
2466 GEN6_AGGRESSIVE_TURBO); 2466 else
2467 I915_WRITE(GEN6_RPNSWREQ,
2468 GEN6_FREQUENCY(val) |
2469 GEN6_OFFSET(0) |
2470 GEN6_AGGRESSIVE_TURBO);
2467 2471
2468 /* Make sure we continue to get interrupts 2472 /* Make sure we continue to get interrupts
2469 * until we hit the minimum or maximum frequencies. 2473 * until we hit the minimum or maximum frequencies.
@@ -2554,8 +2558,8 @@ static void gen6_enable_rps(struct drm_device *dev)
2554 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 2558 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
2555 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); 2559 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
2556 2560
2557 /* In units of 100MHz */ 2561 /* In units of 50MHz */
2558 dev_priv->rps.max_delay = rp_state_cap & 0xff; 2562 dev_priv->rps.hw_max = dev_priv->rps.max_delay = rp_state_cap & 0xff;
2559 dev_priv->rps.min_delay = (rp_state_cap & 0xff0000) >> 16; 2563 dev_priv->rps.min_delay = (rp_state_cap & 0xff0000) >> 16;
2560 dev_priv->rps.cur_delay = 0; 2564 dev_priv->rps.cur_delay = 0;
2561 2565
@@ -2601,12 +2605,19 @@ static void gen6_enable_rps(struct drm_device *dev)
2601 GEN6_RC_CTL_EI_MODE(1) | 2605 GEN6_RC_CTL_EI_MODE(1) |
2602 GEN6_RC_CTL_HW_ENABLE); 2606 GEN6_RC_CTL_HW_ENABLE);
2603 2607
2604 I915_WRITE(GEN6_RPNSWREQ, 2608 if (IS_HASWELL(dev)) {
2605 GEN6_FREQUENCY(10) | 2609 I915_WRITE(GEN6_RPNSWREQ,
2606 GEN6_OFFSET(0) | 2610 HSW_FREQUENCY(10));
2607 GEN6_AGGRESSIVE_TURBO); 2611 I915_WRITE(GEN6_RC_VIDEO_FREQ,
2608 I915_WRITE(GEN6_RC_VIDEO_FREQ, 2612 HSW_FREQUENCY(12));
2609 GEN6_FREQUENCY(12)); 2613 } else {
2614 I915_WRITE(GEN6_RPNSWREQ,
2615 GEN6_FREQUENCY(10) |
2616 GEN6_OFFSET(0) |
2617 GEN6_AGGRESSIVE_TURBO);
2618 I915_WRITE(GEN6_RC_VIDEO_FREQ,
2619 GEN6_FREQUENCY(12));
2620 }
2610 2621
2611 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000); 2622 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
2612 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, 2623 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
@@ -2631,9 +2642,11 @@ static void gen6_enable_rps(struct drm_device *dev)
2631 if (!ret) { 2642 if (!ret) {
2632 pcu_mbox = 0; 2643 pcu_mbox = 0;
2633 ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox); 2644 ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox);
2634 if (ret && pcu_mbox & (1<<31)) { /* OC supported */ 2645 if (!ret && (pcu_mbox & (1<<31))) { /* OC supported */
2635 dev_priv->rps.max_delay = pcu_mbox & 0xff; 2646 DRM_DEBUG_DRIVER("Overclocking supported. Max: %dMHz, Overclock max: %dMHz\n",
2636 DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50); 2647 (dev_priv->rps.max_delay & 0xff) * 50,
2648 (pcu_mbox & 0xff) * 50);
2649 dev_priv->rps.hw_max = pcu_mbox & 0xff;
2637 } 2650 }
2638 } else { 2651 } else {
2639 DRM_DEBUG_DRIVER("Failed to set the min frequency\n"); 2652 DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
@@ -2671,8 +2684,8 @@ static void gen6_update_ring_freq(struct drm_device *dev)
2671{ 2684{
2672 struct drm_i915_private *dev_priv = dev->dev_private; 2685 struct drm_i915_private *dev_priv = dev->dev_private;
2673 int min_freq = 15; 2686 int min_freq = 15;
2674 int gpu_freq; 2687 unsigned int gpu_freq;
2675 unsigned int ia_freq, max_ia_freq; 2688 unsigned int max_ia_freq, min_ring_freq;
2676 int scaling_factor = 180; 2689 int scaling_factor = 180;
2677 2690
2678 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 2691 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
@@ -2688,6 +2701,10 @@ static void gen6_update_ring_freq(struct drm_device *dev)
2688 /* Convert from kHz to MHz */ 2701 /* Convert from kHz to MHz */
2689 max_ia_freq /= 1000; 2702 max_ia_freq /= 1000;
2690 2703
2704 min_ring_freq = I915_READ(MCHBAR_MIRROR_BASE_SNB + DCLK);
2705 /* convert DDR frequency from units of 133.3MHz to bandwidth */
2706 min_ring_freq = (2 * 4 * min_ring_freq + 2) / 3;
2707
2691 /* 2708 /*
2692 * For each potential GPU frequency, load a ring frequency we'd like 2709 * For each potential GPU frequency, load a ring frequency we'd like
2693 * to use for memory access. We do this by specifying the IA frequency 2710 * to use for memory access. We do this by specifying the IA frequency
@@ -2696,21 +2713,32 @@ static void gen6_update_ring_freq(struct drm_device *dev)
2696 for (gpu_freq = dev_priv->rps.max_delay; gpu_freq >= dev_priv->rps.min_delay; 2713 for (gpu_freq = dev_priv->rps.max_delay; gpu_freq >= dev_priv->rps.min_delay;
2697 gpu_freq--) { 2714 gpu_freq--) {
2698 int diff = dev_priv->rps.max_delay - gpu_freq; 2715 int diff = dev_priv->rps.max_delay - gpu_freq;
2699 2716 unsigned int ia_freq = 0, ring_freq = 0;
2700 /* 2717
2701 * For GPU frequencies less than 750MHz, just use the lowest 2718 if (IS_HASWELL(dev)) {
2702 * ring freq. 2719 ring_freq = (gpu_freq * 5 + 3) / 4;
2703 */ 2720 ring_freq = max(min_ring_freq, ring_freq);
2704 if (gpu_freq < min_freq) 2721 /* leave ia_freq as the default, chosen by cpufreq */
2705 ia_freq = 800; 2722 } else {
2706 else 2723 /* On older processors, there is no separate ring
2707 ia_freq = max_ia_freq - ((diff * scaling_factor) / 2); 2724 * clock domain, so in order to boost the bandwidth
2708 ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100); 2725 * of the ring, we need to upclock the CPU (ia_freq).
2709 ia_freq <<= GEN6_PCODE_FREQ_IA_RATIO_SHIFT; 2726 *
2727 * For GPU frequencies less than 750MHz,
2728 * just use the lowest ring freq.
2729 */
2730 if (gpu_freq < min_freq)
2731 ia_freq = 800;
2732 else
2733 ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
2734 ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
2735 }
2710 2736
2711 sandybridge_pcode_write(dev_priv, 2737 sandybridge_pcode_write(dev_priv,
2712 GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 2738 GEN6_PCODE_WRITE_MIN_FREQ_TABLE,
2713 ia_freq | gpu_freq); 2739 ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT |
2740 ring_freq << GEN6_PCODE_FREQ_RING_RATIO_SHIFT |
2741 gpu_freq);
2714 } 2742 }
2715} 2743}
2716 2744
@@ -2821,7 +2849,7 @@ static void ironlake_enable_rc6(struct drm_device *dev)
2821 ret = intel_ring_idle(ring); 2849 ret = intel_ring_idle(ring);
2822 dev_priv->mm.interruptible = was_interruptible; 2850 dev_priv->mm.interruptible = was_interruptible;
2823 if (ret) { 2851 if (ret) {
2824 DRM_ERROR("failed to enable ironlake power power savings\n"); 2852 DRM_ERROR("failed to enable ironlake power savings\n");
2825 ironlake_teardown_rc6(dev); 2853 ironlake_teardown_rc6(dev);
2826 return; 2854 return;
2827 } 2855 }
@@ -3562,6 +3590,7 @@ static void cpt_init_clock_gating(struct drm_device *dev)
3562{ 3590{
3563 struct drm_i915_private *dev_priv = dev->dev_private; 3591 struct drm_i915_private *dev_priv = dev->dev_private;
3564 int pipe; 3592 int pipe;
3593 uint32_t val;
3565 3594
3566 /* 3595 /*
3567 * On Ibex Peak and Cougar Point, we need to disable clock 3596 * On Ibex Peak and Cougar Point, we need to disable clock
@@ -3574,8 +3603,17 @@ static void cpt_init_clock_gating(struct drm_device *dev)
3574 /* The below fixes the weird display corruption, a few pixels shifted 3603 /* The below fixes the weird display corruption, a few pixels shifted
3575 * downward, on (only) LVDS of some HP laptops with IVY. 3604 * downward, on (only) LVDS of some HP laptops with IVY.
3576 */ 3605 */
3577 for_each_pipe(pipe) 3606 for_each_pipe(pipe) {
3578 I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_CHICKEN2_TIMING_OVERRIDE); 3607 val = I915_READ(TRANS_CHICKEN2(pipe));
3608 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
3609 val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
3610 if (dev_priv->fdi_rx_polarity_inverted)
3611 val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
3612 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
3613 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER;
3614 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH;
3615 I915_WRITE(TRANS_CHICKEN2(pipe), val);
3616 }
3579 /* WADP0ClockGatingDisable */ 3617 /* WADP0ClockGatingDisable */
3580 for_each_pipe(pipe) { 3618 for_each_pipe(pipe) {
3581 I915_WRITE(TRANS_CHICKEN1(pipe), 3619 I915_WRITE(TRANS_CHICKEN1(pipe),
@@ -3768,6 +3806,9 @@ static void haswell_init_clock_gating(struct drm_device *dev)
3768 I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) | 3806 I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
3769 GEN6_MBCTL_ENABLE_BOOT_FETCH); 3807 GEN6_MBCTL_ENABLE_BOOT_FETCH);
3770 3808
3809 /* WaSwitchSolVfFArbitrationPriority */
3810 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
3811
3771 /* XXX: This is a workaround for early silicon revisions and should be 3812 /* XXX: This is a workaround for early silicon revisions and should be
3772 * removed later. 3813 * removed later.
3773 */ 3814 */
@@ -3874,7 +3915,8 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
3874 snpcr |= GEN6_MBC_SNPCR_MED; 3915 snpcr |= GEN6_MBC_SNPCR_MED;
3875 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr); 3916 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
3876 3917
3877 cpt_init_clock_gating(dev); 3918 if (!HAS_PCH_NOP(dev))
3919 cpt_init_clock_gating(dev);
3878 3920
3879 gen6_check_mch_setup(dev); 3921 gen6_check_mch_setup(dev);
3880} 3922}
@@ -3899,8 +3941,10 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
3899 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE | 3941 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
3900 CHICKEN3_DGMG_DONE_FIX_DISABLE); 3942 CHICKEN3_DGMG_DONE_FIX_DISABLE);
3901 3943
3944 /* WaDisablePSDDualDispatchEnable */
3902 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1, 3945 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
3903 _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE)); 3946 _MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP |
3947 GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
3904 3948
3905 /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */ 3949 /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
3906 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1, 3950 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
@@ -3968,24 +4012,20 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
3968 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE)); 4012 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
3969 4013
3970 /* 4014 /*
3971 * On ValleyView, the GUnit needs to signal the GT
3972 * when flip and other events complete. So enable
3973 * all the GUnit->GT interrupts here
3974 */
3975 I915_WRITE(VLV_DPFLIPSTAT, PIPEB_LINE_COMPARE_INT_EN |
3976 PIPEB_HLINE_INT_EN | PIPEB_VBLANK_INT_EN |
3977 SPRITED_FLIPDONE_INT_EN | SPRITEC_FLIPDONE_INT_EN |
3978 PLANEB_FLIPDONE_INT_EN | PIPEA_LINE_COMPARE_INT_EN |
3979 PIPEA_HLINE_INT_EN | PIPEA_VBLANK_INT_EN |
3980 SPRITEB_FLIPDONE_INT_EN | SPRITEA_FLIPDONE_INT_EN |
3981 PLANEA_FLIPDONE_INT_EN);
3982
3983 /*
3984 * WaDisableVLVClockGating_VBIIssue 4015 * WaDisableVLVClockGating_VBIIssue
3985 * Disable clock gating on th GCFG unit to prevent a delay 4016 * Disable clock gating on th GCFG unit to prevent a delay
3986 * in the reporting of vblank events. 4017 * in the reporting of vblank events.
3987 */ 4018 */
3988 I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS); 4019 I915_WRITE(VLV_GUNIT_CLOCK_GATE, 0xffffffff);
4020
4021 /* Conservative clock gating settings for now */
4022 I915_WRITE(0x9400, 0xffffffff);
4023 I915_WRITE(0x9404, 0xffffffff);
4024 I915_WRITE(0x9408, 0xffffffff);
4025 I915_WRITE(0x940c, 0xffffffff);
4026 I915_WRITE(0x9410, 0xffffffff);
4027 I915_WRITE(0x9414, 0xffffffff);
4028 I915_WRITE(0x9418, 0xffffffff);
3989} 4029}
3990 4030
3991static void g4x_init_clock_gating(struct drm_device *dev) 4031static void g4x_init_clock_gating(struct drm_device *dev)
@@ -4070,13 +4110,29 @@ void intel_init_clock_gating(struct drm_device *dev)
4070 dev_priv->display.init_clock_gating(dev); 4110 dev_priv->display.init_clock_gating(dev);
4071} 4111}
4072 4112
4113/**
4114 * We should only use the power well if we explicitly asked the hardware to
4115 * enable it, so check if it's enabled and also check if we've requested it to
4116 * be enabled.
4117 */
4118bool intel_using_power_well(struct drm_device *dev)
4119{
4120 struct drm_i915_private *dev_priv = dev->dev_private;
4121
4122 if (IS_HASWELL(dev))
4123 return I915_READ(HSW_PWR_WELL_DRIVER) ==
4124 (HSW_PWR_WELL_ENABLE | HSW_PWR_WELL_STATE);
4125 else
4126 return true;
4127}
4128
4073void intel_set_power_well(struct drm_device *dev, bool enable) 4129void intel_set_power_well(struct drm_device *dev, bool enable)
4074{ 4130{
4075 struct drm_i915_private *dev_priv = dev->dev_private; 4131 struct drm_i915_private *dev_priv = dev->dev_private;
4076 bool is_enabled, enable_requested; 4132 bool is_enabled, enable_requested;
4077 uint32_t tmp; 4133 uint32_t tmp;
4078 4134
4079 if (!IS_HASWELL(dev)) 4135 if (!HAS_POWER_WELL(dev))
4080 return; 4136 return;
4081 4137
4082 if (!i915_disable_power_well && !enable) 4138 if (!i915_disable_power_well && !enable)
@@ -4114,7 +4170,7 @@ void intel_init_power_well(struct drm_device *dev)
4114{ 4170{
4115 struct drm_i915_private *dev_priv = dev->dev_private; 4171 struct drm_i915_private *dev_priv = dev->dev_private;
4116 4172
4117 if (!IS_HASWELL(dev)) 4173 if (!HAS_POWER_WELL(dev))
4118 return; 4174 return;
4119 4175
4120 /* For now, we need the power well to be always enabled. */ 4176 /* For now, we need the power well to be always enabled. */
@@ -4176,7 +4232,6 @@ void intel_init_pm(struct drm_device *dev)
4176 } 4232 }
4177 dev_priv->display.init_clock_gating = gen6_init_clock_gating; 4233 dev_priv->display.init_clock_gating = gen6_init_clock_gating;
4178 } else if (IS_IVYBRIDGE(dev)) { 4234 } else if (IS_IVYBRIDGE(dev)) {
4179 /* FIXME: detect B0+ stepping and use auto training */
4180 if (SNB_READ_WM0_LATENCY()) { 4235 if (SNB_READ_WM0_LATENCY()) {
4181 dev_priv->display.update_wm = ivybridge_update_wm; 4236 dev_priv->display.update_wm = ivybridge_update_wm;
4182 dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm; 4237 dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
@@ -4274,21 +4329,14 @@ static void __gen6_gt_force_wake_reset(struct drm_i915_private *dev_priv)
4274 4329
4275static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) 4330static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
4276{ 4331{
4277 u32 forcewake_ack; 4332 if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK) & 1) == 0,
4278
4279 if (IS_HASWELL(dev_priv->dev))
4280 forcewake_ack = FORCEWAKE_ACK_HSW;
4281 else
4282 forcewake_ack = FORCEWAKE_ACK;
4283
4284 if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1) == 0,
4285 FORCEWAKE_ACK_TIMEOUT_MS)) 4333 FORCEWAKE_ACK_TIMEOUT_MS))
4286 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n"); 4334 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
4287 4335
4288 I915_WRITE_NOTRACE(FORCEWAKE, FORCEWAKE_KERNEL); 4336 I915_WRITE_NOTRACE(FORCEWAKE, 1);
4289 POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */ 4337 POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
4290 4338
4291 if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1), 4339 if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK) & 1),
4292 FORCEWAKE_ACK_TIMEOUT_MS)) 4340 FORCEWAKE_ACK_TIMEOUT_MS))
4293 DRM_ERROR("Timed out waiting for forcewake to ack request.\n"); 4341 DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
4294 4342
@@ -4311,7 +4359,7 @@ static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
4311 else 4359 else
4312 forcewake_ack = FORCEWAKE_MT_ACK; 4360 forcewake_ack = FORCEWAKE_MT_ACK;
4313 4361
4314 if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1) == 0, 4362 if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & FORCEWAKE_KERNEL) == 0,
4315 FORCEWAKE_ACK_TIMEOUT_MS)) 4363 FORCEWAKE_ACK_TIMEOUT_MS))
4316 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n"); 4364 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
4317 4365
@@ -4319,7 +4367,7 @@ static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
4319 /* something from same cacheline, but !FORCEWAKE_MT */ 4367 /* something from same cacheline, but !FORCEWAKE_MT */
4320 POSTING_READ(ECOBUS); 4368 POSTING_READ(ECOBUS);
4321 4369
4322 if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1), 4370 if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & FORCEWAKE_KERNEL),
4323 FORCEWAKE_ACK_TIMEOUT_MS)) 4371 FORCEWAKE_ACK_TIMEOUT_MS))
4324 DRM_ERROR("Timed out waiting for forcewake to ack request.\n"); 4372 DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
4325 4373
@@ -4409,15 +4457,22 @@ static void vlv_force_wake_reset(struct drm_i915_private *dev_priv)
4409 4457
4410static void vlv_force_wake_get(struct drm_i915_private *dev_priv) 4458static void vlv_force_wake_get(struct drm_i915_private *dev_priv)
4411{ 4459{
4412 if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1) == 0, 4460 if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL) == 0,
4413 FORCEWAKE_ACK_TIMEOUT_MS)) 4461 FORCEWAKE_ACK_TIMEOUT_MS))
4414 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n"); 4462 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
4415 4463
4416 I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL)); 4464 I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
4465 I915_WRITE_NOTRACE(FORCEWAKE_MEDIA_VLV,
4466 _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
4417 4467
4418 if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1), 4468 if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL),
4419 FORCEWAKE_ACK_TIMEOUT_MS)) 4469 FORCEWAKE_ACK_TIMEOUT_MS))
4420 DRM_ERROR("Timed out waiting for forcewake to ack request.\n"); 4470 DRM_ERROR("Timed out waiting for GT to ack forcewake request.\n");
4471
4472 if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_MEDIA_VLV) &
4473 FORCEWAKE_KERNEL),
4474 FORCEWAKE_ACK_TIMEOUT_MS))
4475 DRM_ERROR("Timed out waiting for media to ack forcewake request.\n");
4421 4476
4422 __gen6_gt_wait_for_thread_c0(dev_priv); 4477 __gen6_gt_wait_for_thread_c0(dev_priv);
4423} 4478}
@@ -4425,8 +4480,9 @@ static void vlv_force_wake_get(struct drm_i915_private *dev_priv)
4425static void vlv_force_wake_put(struct drm_i915_private *dev_priv) 4480static void vlv_force_wake_put(struct drm_i915_private *dev_priv)
4426{ 4481{
4427 I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); 4482 I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
4428 /* something from same cacheline, but !FORCEWAKE_VLV */ 4483 I915_WRITE_NOTRACE(FORCEWAKE_MEDIA_VLV,
4429 POSTING_READ(FORCEWAKE_ACK_VLV); 4484 _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
4485 /* The below doubles as a POSTING_READ */
4430 gen6_gt_check_fifodbg(dev_priv); 4486 gen6_gt_check_fifodbg(dev_priv);
4431} 4487}
4432 4488
@@ -4511,3 +4567,56 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val)
4511 4567
4512 return 0; 4568 return 0;
4513} 4569}
4570
4571static int vlv_punit_rw(struct drm_i915_private *dev_priv, u8 opcode,
4572 u8 addr, u32 *val)
4573{
4574 u32 cmd, devfn, port, be, bar;
4575
4576 bar = 0;
4577 be = 0xf;
4578 port = IOSF_PORT_PUNIT;
4579 devfn = PCI_DEVFN(2, 0);
4580
4581 cmd = (devfn << IOSF_DEVFN_SHIFT) | (opcode << IOSF_OPCODE_SHIFT) |
4582 (port << IOSF_PORT_SHIFT) | (be << IOSF_BYTE_ENABLES_SHIFT) |
4583 (bar << IOSF_BAR_SHIFT);
4584
4585 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
4586
4587 if (I915_READ(VLV_IOSF_DOORBELL_REQ) & IOSF_SB_BUSY) {
4588 DRM_DEBUG_DRIVER("warning: pcode (%s) mailbox access failed\n",
4589 opcode == PUNIT_OPCODE_REG_READ ?
4590 "read" : "write");
4591 return -EAGAIN;
4592 }
4593
4594 I915_WRITE(VLV_IOSF_ADDR, addr);
4595 if (opcode == PUNIT_OPCODE_REG_WRITE)
4596 I915_WRITE(VLV_IOSF_DATA, *val);
4597 I915_WRITE(VLV_IOSF_DOORBELL_REQ, cmd);
4598
4599 if (wait_for((I915_READ(VLV_IOSF_DOORBELL_REQ) & IOSF_SB_BUSY) == 0,
4600 500)) {
4601 DRM_ERROR("timeout waiting for pcode %s (%d) to finish\n",
4602 opcode == PUNIT_OPCODE_REG_READ ? "read" : "write",
4603 addr);
4604 return -ETIMEDOUT;
4605 }
4606
4607 if (opcode == PUNIT_OPCODE_REG_READ)
4608 *val = I915_READ(VLV_IOSF_DATA);
4609 I915_WRITE(VLV_IOSF_DATA, 0);
4610
4611 return 0;
4612}
4613
4614int valleyview_punit_read(struct drm_i915_private *dev_priv, u8 addr, u32 *val)
4615{
4616 return vlv_punit_rw(dev_priv, PUNIT_OPCODE_REG_READ, addr, val);
4617}
4618
4619int valleyview_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val)
4620{
4621 return vlv_punit_rw(dev_priv, PUNIT_OPCODE_REG_WRITE, addr, &val);
4622}
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 78413ec623c9..d15428404b9a 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -246,11 +246,11 @@ static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val)
246 return; 246 return;
247 } 247 }
248 248
249 if (intel_sdvo->sdvo_reg == SDVOB) { 249 if (intel_sdvo->sdvo_reg == GEN3_SDVOB)
250 cval = I915_READ(SDVOC); 250 cval = I915_READ(GEN3_SDVOC);
251 } else { 251 else
252 bval = I915_READ(SDVOB); 252 bval = I915_READ(GEN3_SDVOB);
253 } 253
254 /* 254 /*
255 * Write the registers twice for luck. Sometimes, 255 * Write the registers twice for luck. Sometimes,
256 * writing them only once doesn't appear to 'stick'. 256 * writing them only once doesn't appear to 'stick'.
@@ -258,10 +258,10 @@ static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val)
258 */ 258 */
259 for (i = 0; i < 2; i++) 259 for (i = 0; i < 2; i++)
260 { 260 {
261 I915_WRITE(SDVOB, bval); 261 I915_WRITE(GEN3_SDVOB, bval);
262 I915_READ(SDVOB); 262 I915_READ(GEN3_SDVOB);
263 I915_WRITE(SDVOC, cval); 263 I915_WRITE(GEN3_SDVOC, cval);
264 I915_READ(SDVOC); 264 I915_READ(GEN3_SDVOC);
265 } 265 }
266} 266}
267 267
@@ -788,7 +788,6 @@ static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd,
788 v_sync_offset = mode->vsync_start - mode->vdisplay; 788 v_sync_offset = mode->vsync_start - mode->vdisplay;
789 789
790 mode_clock = mode->clock; 790 mode_clock = mode->clock;
791 mode_clock /= intel_mode_get_pixel_multiplier(mode) ?: 1;
792 mode_clock /= 10; 791 mode_clock /= 10;
793 dtd->part1.clock = mode_clock; 792 dtd->part1.clock = mode_clock;
794 793
@@ -957,14 +956,17 @@ static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo,
957 .len = DIP_LEN_AVI, 956 .len = DIP_LEN_AVI,
958 }; 957 };
959 uint8_t sdvo_data[4 + sizeof(avi_if.body.avi)]; 958 uint8_t sdvo_data[4 + sizeof(avi_if.body.avi)];
959 struct intel_crtc *intel_crtc = to_intel_crtc(intel_sdvo->base.base.crtc);
960 960
961 if (intel_sdvo->rgb_quant_range_selectable) { 961 if (intel_sdvo->rgb_quant_range_selectable) {
962 if (adjusted_mode->private_flags & INTEL_MODE_LIMITED_COLOR_RANGE) 962 if (intel_crtc->config.limited_color_range)
963 avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_LIMITED; 963 avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_LIMITED;
964 else 964 else
965 avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_FULL; 965 avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_FULL;
966 } 966 }
967 967
968 avi_if.body.avi.VIC = drm_match_cea_mode(adjusted_mode);
969
968 intel_dip_infoframe_csum(&avi_if); 970 intel_dip_infoframe_csum(&avi_if);
969 971
970 /* sdvo spec says that the ecc is handled by the hw, and it looks like 972 /* sdvo spec says that the ecc is handled by the hw, and it looks like
@@ -1039,12 +1041,18 @@ intel_sdvo_get_preferred_input_mode(struct intel_sdvo *intel_sdvo,
1039 return true; 1041 return true;
1040} 1042}
1041 1043
1042static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder, 1044static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
1043 const struct drm_display_mode *mode, 1045 struct intel_crtc_config *pipe_config)
1044 struct drm_display_mode *adjusted_mode)
1045{ 1046{
1046 struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder); 1047 struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
1047 int multiplier; 1048 struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
1049 struct drm_display_mode *mode = &pipe_config->requested_mode;
1050
1051 DRM_DEBUG_KMS("forcing bpc to 8 for SDVO\n");
1052 pipe_config->pipe_bpp = 8*3;
1053
1054 if (HAS_PCH_SPLIT(encoder->base.dev))
1055 pipe_config->has_pch_encoder = true;
1048 1056
1049 /* We need to construct preferred input timings based on our 1057 /* We need to construct preferred input timings based on our
1050 * output timings. To do that, we have to set the output 1058 * output timings. To do that, we have to set the output
@@ -1071,37 +1079,40 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
1071 /* Make the CRTC code factor in the SDVO pixel multiplier. The 1079 /* Make the CRTC code factor in the SDVO pixel multiplier. The
1072 * SDVO device will factor out the multiplier during mode_set. 1080 * SDVO device will factor out the multiplier during mode_set.
1073 */ 1081 */
1074 multiplier = intel_sdvo_get_pixel_multiplier(adjusted_mode); 1082 pipe_config->pixel_multiplier =
1075 intel_mode_set_pixel_multiplier(adjusted_mode, multiplier); 1083 intel_sdvo_get_pixel_multiplier(adjusted_mode);
1084 adjusted_mode->clock *= pipe_config->pixel_multiplier;
1076 1085
1077 if (intel_sdvo->color_range_auto) { 1086 if (intel_sdvo->color_range_auto) {
1078 /* See CEA-861-E - 5.1 Default Encoding Parameters */ 1087 /* See CEA-861-E - 5.1 Default Encoding Parameters */
1088 /* FIXME: This bit is only valid when using TMDS encoding and 8
1089 * bit per color mode. */
1079 if (intel_sdvo->has_hdmi_monitor && 1090 if (intel_sdvo->has_hdmi_monitor &&
1080 drm_match_cea_mode(adjusted_mode) > 1) 1091 drm_match_cea_mode(adjusted_mode) > 1)
1081 intel_sdvo->color_range = SDVO_COLOR_RANGE_16_235; 1092 intel_sdvo->color_range = HDMI_COLOR_RANGE_16_235;
1082 else 1093 else
1083 intel_sdvo->color_range = 0; 1094 intel_sdvo->color_range = 0;
1084 } 1095 }
1085 1096
1086 if (intel_sdvo->color_range) 1097 if (intel_sdvo->color_range)
1087 adjusted_mode->private_flags |= INTEL_MODE_LIMITED_COLOR_RANGE; 1098 pipe_config->limited_color_range = true;
1088 1099
1089 return true; 1100 return true;
1090} 1101}
1091 1102
1092static void intel_sdvo_mode_set(struct drm_encoder *encoder, 1103static void intel_sdvo_mode_set(struct intel_encoder *intel_encoder)
1093 struct drm_display_mode *mode,
1094 struct drm_display_mode *adjusted_mode)
1095{ 1104{
1096 struct drm_device *dev = encoder->dev; 1105 struct drm_device *dev = intel_encoder->base.dev;
1097 struct drm_i915_private *dev_priv = dev->dev_private; 1106 struct drm_i915_private *dev_priv = dev->dev_private;
1098 struct drm_crtc *crtc = encoder->crtc; 1107 struct drm_crtc *crtc = intel_encoder->base.crtc;
1099 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1108 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1100 struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder); 1109 struct drm_display_mode *adjusted_mode =
1110 &intel_crtc->config.adjusted_mode;
1111 struct drm_display_mode *mode = &intel_crtc->config.requested_mode;
1112 struct intel_sdvo *intel_sdvo = to_intel_sdvo(&intel_encoder->base);
1101 u32 sdvox; 1113 u32 sdvox;
1102 struct intel_sdvo_in_out_map in_out; 1114 struct intel_sdvo_in_out_map in_out;
1103 struct intel_sdvo_dtd input_dtd, output_dtd; 1115 struct intel_sdvo_dtd input_dtd, output_dtd;
1104 int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
1105 int rate; 1116 int rate;
1106 1117
1107 if (!mode) 1118 if (!mode)
@@ -1161,7 +1172,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
1161 DRM_INFO("Setting input timings on %s failed\n", 1172 DRM_INFO("Setting input timings on %s failed\n",
1162 SDVO_NAME(intel_sdvo)); 1173 SDVO_NAME(intel_sdvo));
1163 1174
1164 switch (pixel_multiplier) { 1175 switch (intel_crtc->config.pixel_multiplier) {
1165 default: 1176 default:
1166 case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break; 1177 case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break;
1167 case 2: rate = SDVO_CLOCK_RATE_MULT_2X; break; 1178 case 2: rate = SDVO_CLOCK_RATE_MULT_2X; break;
@@ -1182,10 +1193,10 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
1182 } else { 1193 } else {
1183 sdvox = I915_READ(intel_sdvo->sdvo_reg); 1194 sdvox = I915_READ(intel_sdvo->sdvo_reg);
1184 switch (intel_sdvo->sdvo_reg) { 1195 switch (intel_sdvo->sdvo_reg) {
1185 case SDVOB: 1196 case GEN3_SDVOB:
1186 sdvox &= SDVOB_PRESERVE_MASK; 1197 sdvox &= SDVOB_PRESERVE_MASK;
1187 break; 1198 break;
1188 case SDVOC: 1199 case GEN3_SDVOC:
1189 sdvox &= SDVOC_PRESERVE_MASK; 1200 sdvox &= SDVOC_PRESERVE_MASK;
1190 break; 1201 break;
1191 } 1202 }
@@ -1193,9 +1204,9 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
1193 } 1204 }
1194 1205
1195 if (INTEL_PCH_TYPE(dev) >= PCH_CPT) 1206 if (INTEL_PCH_TYPE(dev) >= PCH_CPT)
1196 sdvox |= TRANSCODER_CPT(intel_crtc->pipe); 1207 sdvox |= SDVO_PIPE_SEL_CPT(intel_crtc->pipe);
1197 else 1208 else
1198 sdvox |= TRANSCODER(intel_crtc->pipe); 1209 sdvox |= SDVO_PIPE_SEL(intel_crtc->pipe);
1199 1210
1200 if (intel_sdvo->has_hdmi_audio) 1211 if (intel_sdvo->has_hdmi_audio)
1201 sdvox |= SDVO_AUDIO_ENABLE; 1212 sdvox |= SDVO_AUDIO_ENABLE;
@@ -1205,7 +1216,8 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
1205 } else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) { 1216 } else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
1206 /* done in crtc_mode_set as it lives inside the dpll register */ 1217 /* done in crtc_mode_set as it lives inside the dpll register */
1207 } else { 1218 } else {
1208 sdvox |= (pixel_multiplier - 1) << SDVO_PORT_MULTIPLY_SHIFT; 1219 sdvox |= (intel_crtc->config.pixel_multiplier - 1)
1220 << SDVO_PORT_MULTIPLY_SHIFT;
1209 } 1221 }
1210 1222
1211 if (input_dtd.part2.sdvo_flags & SDVO_NEED_TO_STALL && 1223 if (input_dtd.part2.sdvo_flags & SDVO_NEED_TO_STALL &&
@@ -1235,11 +1247,13 @@ static bool intel_sdvo_get_hw_state(struct intel_encoder *encoder,
1235 struct drm_device *dev = encoder->base.dev; 1247 struct drm_device *dev = encoder->base.dev;
1236 struct drm_i915_private *dev_priv = dev->dev_private; 1248 struct drm_i915_private *dev_priv = dev->dev_private;
1237 struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base); 1249 struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
1250 u16 active_outputs;
1238 u32 tmp; 1251 u32 tmp;
1239 1252
1240 tmp = I915_READ(intel_sdvo->sdvo_reg); 1253 tmp = I915_READ(intel_sdvo->sdvo_reg);
1254 intel_sdvo_get_active_outputs(intel_sdvo, &active_outputs);
1241 1255
1242 if (!(tmp & SDVO_ENABLE)) 1256 if (!(tmp & SDVO_ENABLE) && (active_outputs == 0))
1243 return false; 1257 return false;
1244 1258
1245 if (HAS_PCH_CPT(dev)) 1259 if (HAS_PCH_CPT(dev))
@@ -1305,15 +1319,9 @@ static void intel_enable_sdvo(struct intel_encoder *encoder)
1305 temp = I915_READ(intel_sdvo->sdvo_reg); 1319 temp = I915_READ(intel_sdvo->sdvo_reg);
1306 if ((temp & SDVO_ENABLE) == 0) { 1320 if ((temp & SDVO_ENABLE) == 0) {
1307 /* HW workaround for IBX, we need to move the port 1321 /* HW workaround for IBX, we need to move the port
1308 * to transcoder A before disabling it. */ 1322 * to transcoder A before disabling it, so restore it here. */
1309 if (HAS_PCH_IBX(dev)) { 1323 if (HAS_PCH_IBX(dev))
1310 struct drm_crtc *crtc = encoder->base.crtc; 1324 temp |= SDVO_PIPE_SEL(intel_crtc->pipe);
1311 int pipe = crtc ? to_intel_crtc(crtc)->pipe : -1;
1312
1313 /* Restore the transcoder select bit. */
1314 if (pipe == PIPE_B)
1315 temp |= SDVO_PIPE_B_SELECT;
1316 }
1317 1325
1318 intel_sdvo_write_sdvox(intel_sdvo, temp | SDVO_ENABLE); 1326 intel_sdvo_write_sdvox(intel_sdvo, temp | SDVO_ENABLE);
1319 } 1327 }
@@ -1922,6 +1930,9 @@ intel_sdvo_set_property(struct drm_connector *connector,
1922 } 1930 }
1923 1931
1924 if (property == dev_priv->broadcast_rgb_property) { 1932 if (property == dev_priv->broadcast_rgb_property) {
1933 bool old_auto = intel_sdvo->color_range_auto;
1934 uint32_t old_range = intel_sdvo->color_range;
1935
1925 switch (val) { 1936 switch (val) {
1926 case INTEL_BROADCAST_RGB_AUTO: 1937 case INTEL_BROADCAST_RGB_AUTO:
1927 intel_sdvo->color_range_auto = true; 1938 intel_sdvo->color_range_auto = true;
@@ -1932,11 +1943,18 @@ intel_sdvo_set_property(struct drm_connector *connector,
1932 break; 1943 break;
1933 case INTEL_BROADCAST_RGB_LIMITED: 1944 case INTEL_BROADCAST_RGB_LIMITED:
1934 intel_sdvo->color_range_auto = false; 1945 intel_sdvo->color_range_auto = false;
1935 intel_sdvo->color_range = SDVO_COLOR_RANGE_16_235; 1946 /* FIXME: this bit is only valid when using TMDS
1947 * encoding and 8 bit per color mode. */
1948 intel_sdvo->color_range = HDMI_COLOR_RANGE_16_235;
1936 break; 1949 break;
1937 default: 1950 default:
1938 return -EINVAL; 1951 return -EINVAL;
1939 } 1952 }
1953
1954 if (old_auto == intel_sdvo->color_range_auto &&
1955 old_range == intel_sdvo->color_range)
1956 return 0;
1957
1940 goto done; 1958 goto done;
1941 } 1959 }
1942 1960
@@ -2040,11 +2058,6 @@ done:
2040#undef CHECK_PROPERTY 2058#undef CHECK_PROPERTY
2041} 2059}
2042 2060
2043static const struct drm_encoder_helper_funcs intel_sdvo_helper_funcs = {
2044 .mode_fixup = intel_sdvo_mode_fixup,
2045 .mode_set = intel_sdvo_mode_set,
2046};
2047
2048static const struct drm_connector_funcs intel_sdvo_connector_funcs = { 2061static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
2049 .dpms = intel_sdvo_dpms, 2062 .dpms = intel_sdvo_dpms,
2050 .detect = intel_sdvo_detect, 2063 .detect = intel_sdvo_detect,
@@ -2269,7 +2282,6 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
2269 connector = &intel_connector->base; 2282 connector = &intel_connector->base;
2270 if (intel_sdvo_get_hotplug_support(intel_sdvo) & 2283 if (intel_sdvo_get_hotplug_support(intel_sdvo) &
2271 intel_sdvo_connector->output_flag) { 2284 intel_sdvo_connector->output_flag) {
2272 connector->polled = DRM_CONNECTOR_POLL_HPD;
2273 intel_sdvo->hotplug_active |= intel_sdvo_connector->output_flag; 2285 intel_sdvo->hotplug_active |= intel_sdvo_connector->output_flag;
2274 /* Some SDVO devices have one-shot hotplug interrupts. 2286 /* Some SDVO devices have one-shot hotplug interrupts.
2275 * Ensure that they get re-enabled when an interrupt happens. 2287 * Ensure that they get re-enabled when an interrupt happens.
@@ -2277,7 +2289,7 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
2277 intel_encoder->hot_plug = intel_sdvo_enable_hotplug; 2289 intel_encoder->hot_plug = intel_sdvo_enable_hotplug;
2278 intel_sdvo_enable_hotplug(intel_encoder); 2290 intel_sdvo_enable_hotplug(intel_encoder);
2279 } else { 2291 } else {
2280 connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; 2292 intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
2281 } 2293 }
2282 encoder->encoder_type = DRM_MODE_ENCODER_TMDS; 2294 encoder->encoder_type = DRM_MODE_ENCODER_TMDS;
2283 connector->connector_type = DRM_MODE_CONNECTOR_DVID; 2295 connector->connector_type = DRM_MODE_CONNECTOR_DVID;
@@ -2346,7 +2358,7 @@ intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device)
2346 2358
2347 intel_connector = &intel_sdvo_connector->base; 2359 intel_connector = &intel_sdvo_connector->base;
2348 connector = &intel_connector->base; 2360 connector = &intel_connector->base;
2349 connector->polled = DRM_CONNECTOR_POLL_CONNECT; 2361 intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT;
2350 encoder->encoder_type = DRM_MODE_ENCODER_DAC; 2362 encoder->encoder_type = DRM_MODE_ENCODER_DAC;
2351 connector->connector_type = DRM_MODE_CONNECTOR_VGA; 2363 connector->connector_type = DRM_MODE_CONNECTOR_VGA;
2352 2364
@@ -2739,7 +2751,6 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
2739 struct intel_sdvo *intel_sdvo; 2751 struct intel_sdvo *intel_sdvo;
2740 u32 hotplug_mask; 2752 u32 hotplug_mask;
2741 int i; 2753 int i;
2742
2743 intel_sdvo = kzalloc(sizeof(struct intel_sdvo), GFP_KERNEL); 2754 intel_sdvo = kzalloc(sizeof(struct intel_sdvo), GFP_KERNEL);
2744 if (!intel_sdvo) 2755 if (!intel_sdvo)
2745 return false; 2756 return false;
@@ -2779,9 +2790,15 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
2779 SDVOB_HOTPLUG_INT_STATUS_I915 : SDVOC_HOTPLUG_INT_STATUS_I915; 2790 SDVOB_HOTPLUG_INT_STATUS_I915 : SDVOC_HOTPLUG_INT_STATUS_I915;
2780 } 2791 }
2781 2792
2782 drm_encoder_helper_add(&intel_encoder->base, &intel_sdvo_helper_funcs); 2793 /* Only enable the hotplug irq if we need it, to work around noisy
2794 * hotplug lines.
2795 */
2796 if (intel_sdvo->hotplug_active)
2797 intel_encoder->hpd_pin = HPD_SDVO_B ? HPD_SDVO_B : HPD_SDVO_C;
2783 2798
2799 intel_encoder->compute_config = intel_sdvo_compute_config;
2784 intel_encoder->disable = intel_disable_sdvo; 2800 intel_encoder->disable = intel_disable_sdvo;
2801 intel_encoder->mode_set = intel_sdvo_mode_set;
2785 intel_encoder->enable = intel_enable_sdvo; 2802 intel_encoder->enable = intel_enable_sdvo;
2786 intel_encoder->get_hw_state = intel_sdvo_get_hw_state; 2803 intel_encoder->get_hw_state = intel_sdvo_get_hw_state;
2787 2804
@@ -2807,12 +2824,6 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
2807 */ 2824 */
2808 intel_sdvo->base.cloneable = false; 2825 intel_sdvo->base.cloneable = false;
2809 2826
2810 /* Only enable the hotplug irq if we need it, to work around noisy
2811 * hotplug lines.
2812 */
2813 if (intel_sdvo->hotplug_active)
2814 dev_priv->hotplug_supported_mask |= hotplug_mask;
2815
2816 intel_sdvo_select_ddc_bus(dev_priv, intel_sdvo, sdvo_reg); 2827 intel_sdvo_select_ddc_bus(dev_priv, intel_sdvo, sdvo_reg);
2817 2828
2818 /* Set the input timing to the screen. Assume always input 0. */ 2829 /* Set the input timing to the screen. Assume always input 0. */
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 1b6eb76beb7c..c7d25c5dd4e6 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -37,6 +37,174 @@
37#include "i915_drv.h" 37#include "i915_drv.h"
38 38
39static void 39static void
40vlv_update_plane(struct drm_plane *dplane, struct drm_framebuffer *fb,
41 struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
42 unsigned int crtc_w, unsigned int crtc_h,
43 uint32_t x, uint32_t y,
44 uint32_t src_w, uint32_t src_h)
45{
46 struct drm_device *dev = dplane->dev;
47 struct drm_i915_private *dev_priv = dev->dev_private;
48 struct intel_plane *intel_plane = to_intel_plane(dplane);
49 int pipe = intel_plane->pipe;
50 int plane = intel_plane->plane;
51 u32 sprctl;
52 unsigned long sprsurf_offset, linear_offset;
53 int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
54
55 sprctl = I915_READ(SPCNTR(pipe, plane));
56
57 /* Mask out pixel format bits in case we change it */
58 sprctl &= ~SP_PIXFORMAT_MASK;
59 sprctl &= ~SP_YUV_BYTE_ORDER_MASK;
60 sprctl &= ~SP_TILED;
61
62 switch (fb->pixel_format) {
63 case DRM_FORMAT_YUYV:
64 sprctl |= SP_FORMAT_YUV422 | SP_YUV_ORDER_YUYV;
65 break;
66 case DRM_FORMAT_YVYU:
67 sprctl |= SP_FORMAT_YUV422 | SP_YUV_ORDER_YVYU;
68 break;
69 case DRM_FORMAT_UYVY:
70 sprctl |= SP_FORMAT_YUV422 | SP_YUV_ORDER_UYVY;
71 break;
72 case DRM_FORMAT_VYUY:
73 sprctl |= SP_FORMAT_YUV422 | SP_YUV_ORDER_VYUY;
74 break;
75 case DRM_FORMAT_RGB565:
76 sprctl |= SP_FORMAT_BGR565;
77 break;
78 case DRM_FORMAT_XRGB8888:
79 sprctl |= SP_FORMAT_BGRX8888;
80 break;
81 case DRM_FORMAT_ARGB8888:
82 sprctl |= SP_FORMAT_BGRA8888;
83 break;
84 case DRM_FORMAT_XBGR2101010:
85 sprctl |= SP_FORMAT_RGBX1010102;
86 break;
87 case DRM_FORMAT_ABGR2101010:
88 sprctl |= SP_FORMAT_RGBA1010102;
89 break;
90 case DRM_FORMAT_XBGR8888:
91 sprctl |= SP_FORMAT_RGBX8888;
92 break;
93 case DRM_FORMAT_ABGR8888:
94 sprctl |= SP_FORMAT_RGBA8888;
95 break;
96 default:
97 /*
98 * If we get here one of the upper layers failed to filter
99 * out the unsupported plane formats
100 */
101 BUG();
102 break;
103 }
104
105 if (obj->tiling_mode != I915_TILING_NONE)
106 sprctl |= SP_TILED;
107
108 sprctl |= SP_ENABLE;
109
110 /* Sizes are 0 based */
111 src_w--;
112 src_h--;
113 crtc_w--;
114 crtc_h--;
115
116 intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size);
117
118 I915_WRITE(SPSTRIDE(pipe, plane), fb->pitches[0]);
119 I915_WRITE(SPPOS(pipe, plane), (crtc_y << 16) | crtc_x);
120
121 linear_offset = y * fb->pitches[0] + x * pixel_size;
122 sprsurf_offset = intel_gen4_compute_page_offset(&x, &y,
123 obj->tiling_mode,
124 pixel_size,
125 fb->pitches[0]);
126 linear_offset -= sprsurf_offset;
127
128 if (obj->tiling_mode != I915_TILING_NONE)
129 I915_WRITE(SPTILEOFF(pipe, plane), (y << 16) | x);
130 else
131 I915_WRITE(SPLINOFF(pipe, plane), linear_offset);
132
133 I915_WRITE(SPSIZE(pipe, plane), (crtc_h << 16) | crtc_w);
134 I915_WRITE(SPCNTR(pipe, plane), sprctl);
135 I915_MODIFY_DISPBASE(SPSURF(pipe, plane), obj->gtt_offset +
136 sprsurf_offset);
137 POSTING_READ(SPSURF(pipe, plane));
138}
139
140static void
141vlv_disable_plane(struct drm_plane *dplane)
142{
143 struct drm_device *dev = dplane->dev;
144 struct drm_i915_private *dev_priv = dev->dev_private;
145 struct intel_plane *intel_plane = to_intel_plane(dplane);
146 int pipe = intel_plane->pipe;
147 int plane = intel_plane->plane;
148
149 I915_WRITE(SPCNTR(pipe, plane), I915_READ(SPCNTR(pipe, plane)) &
150 ~SP_ENABLE);
151 /* Activate double buffered register update */
152 I915_MODIFY_DISPBASE(SPSURF(pipe, plane), 0);
153 POSTING_READ(SPSURF(pipe, plane));
154}
155
156static int
157vlv_update_colorkey(struct drm_plane *dplane,
158 struct drm_intel_sprite_colorkey *key)
159{
160 struct drm_device *dev = dplane->dev;
161 struct drm_i915_private *dev_priv = dev->dev_private;
162 struct intel_plane *intel_plane = to_intel_plane(dplane);
163 int pipe = intel_plane->pipe;
164 int plane = intel_plane->plane;
165 u32 sprctl;
166
167 if (key->flags & I915_SET_COLORKEY_DESTINATION)
168 return -EINVAL;
169
170 I915_WRITE(SPKEYMINVAL(pipe, plane), key->min_value);
171 I915_WRITE(SPKEYMAXVAL(pipe, plane), key->max_value);
172 I915_WRITE(SPKEYMSK(pipe, plane), key->channel_mask);
173
174 sprctl = I915_READ(SPCNTR(pipe, plane));
175 sprctl &= ~SP_SOURCE_KEY;
176 if (key->flags & I915_SET_COLORKEY_SOURCE)
177 sprctl |= SP_SOURCE_KEY;
178 I915_WRITE(SPCNTR(pipe, plane), sprctl);
179
180 POSTING_READ(SPKEYMSK(pipe, plane));
181
182 return 0;
183}
184
185static void
186vlv_get_colorkey(struct drm_plane *dplane,
187 struct drm_intel_sprite_colorkey *key)
188{
189 struct drm_device *dev = dplane->dev;
190 struct drm_i915_private *dev_priv = dev->dev_private;
191 struct intel_plane *intel_plane = to_intel_plane(dplane);
192 int pipe = intel_plane->pipe;
193 int plane = intel_plane->plane;
194 u32 sprctl;
195
196 key->min_value = I915_READ(SPKEYMINVAL(pipe, plane));
197 key->max_value = I915_READ(SPKEYMAXVAL(pipe, plane));
198 key->channel_mask = I915_READ(SPKEYMSK(pipe, plane));
199
200 sprctl = I915_READ(SPCNTR(pipe, plane));
201 if (sprctl & SP_SOURCE_KEY)
202 key->flags = I915_SET_COLORKEY_SOURCE;
203 else
204 key->flags = I915_SET_COLORKEY_NONE;
205}
206
207static void
40ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb, 208ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
41 struct drm_i915_gem_object *obj, int crtc_x, int crtc_y, 209 struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
42 unsigned int crtc_w, unsigned int crtc_h, 210 unsigned int crtc_w, unsigned int crtc_h,
@@ -441,6 +609,15 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
441 609
442 old_obj = intel_plane->obj; 610 old_obj = intel_plane->obj;
443 611
612 intel_plane->crtc_x = crtc_x;
613 intel_plane->crtc_y = crtc_y;
614 intel_plane->crtc_w = crtc_w;
615 intel_plane->crtc_h = crtc_h;
616 intel_plane->src_x = src_x;
617 intel_plane->src_y = src_y;
618 intel_plane->src_w = src_w;
619 intel_plane->src_h = src_h;
620
444 src_w = src_w >> 16; 621 src_w = src_w >> 16;
445 src_h = src_h >> 16; 622 src_h = src_h >> 16;
446 623
@@ -513,6 +690,11 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
513 690
514 mutex_lock(&dev->struct_mutex); 691 mutex_lock(&dev->struct_mutex);
515 692
693 /* Note that this will apply the VT-d workaround for scanouts,
694 * which is more restrictive than required for sprites. (The
695 * primary plane requires 256KiB alignment with 64 PTE padding,
696 * the sprite planes only require 128KiB alignment and 32 PTE padding.
697 */
516 ret = intel_pin_and_fence_fb_obj(dev, obj, NULL); 698 ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
517 if (ret) 699 if (ret)
518 goto out_unlock; 700 goto out_unlock;
@@ -568,6 +750,8 @@ intel_disable_plane(struct drm_plane *plane)
568 if (!intel_plane->obj) 750 if (!intel_plane->obj)
569 goto out; 751 goto out;
570 752
753 intel_wait_for_vblank(dev, intel_plane->pipe);
754
571 mutex_lock(&dev->struct_mutex); 755 mutex_lock(&dev->struct_mutex);
572 intel_unpin_fb_obj(intel_plane->obj); 756 intel_unpin_fb_obj(intel_plane->obj);
573 intel_plane->obj = NULL; 757 intel_plane->obj = NULL;
@@ -647,6 +831,20 @@ out_unlock:
647 return ret; 831 return ret;
648} 832}
649 833
834void intel_plane_restore(struct drm_plane *plane)
835{
836 struct intel_plane *intel_plane = to_intel_plane(plane);
837
838 if (!plane->crtc || !plane->fb)
839 return;
840
841 intel_update_plane(plane, plane->crtc, plane->fb,
842 intel_plane->crtc_x, intel_plane->crtc_y,
843 intel_plane->crtc_w, intel_plane->crtc_h,
844 intel_plane->src_x, intel_plane->src_y,
845 intel_plane->src_w, intel_plane->src_h);
846}
847
650static const struct drm_plane_funcs intel_plane_funcs = { 848static const struct drm_plane_funcs intel_plane_funcs = {
651 .update_plane = intel_update_plane, 849 .update_plane = intel_update_plane,
652 .disable_plane = intel_disable_plane, 850 .disable_plane = intel_disable_plane,
@@ -670,8 +868,22 @@ static uint32_t snb_plane_formats[] = {
670 DRM_FORMAT_VYUY, 868 DRM_FORMAT_VYUY,
671}; 869};
672 870
871static uint32_t vlv_plane_formats[] = {
872 DRM_FORMAT_RGB565,
873 DRM_FORMAT_ABGR8888,
874 DRM_FORMAT_ARGB8888,
875 DRM_FORMAT_XBGR8888,
876 DRM_FORMAT_XRGB8888,
877 DRM_FORMAT_XBGR2101010,
878 DRM_FORMAT_ABGR2101010,
879 DRM_FORMAT_YUYV,
880 DRM_FORMAT_YVYU,
881 DRM_FORMAT_UYVY,
882 DRM_FORMAT_VYUY,
883};
884
673int 885int
674intel_plane_init(struct drm_device *dev, enum pipe pipe) 886intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
675{ 887{
676 struct intel_plane *intel_plane; 888 struct intel_plane *intel_plane;
677 unsigned long possible_crtcs; 889 unsigned long possible_crtcs;
@@ -710,14 +922,26 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe)
710 intel_plane->can_scale = false; 922 intel_plane->can_scale = false;
711 else 923 else
712 intel_plane->can_scale = true; 924 intel_plane->can_scale = true;
713 intel_plane->max_downscale = 2; 925
714 intel_plane->update_plane = ivb_update_plane; 926 if (IS_VALLEYVIEW(dev)) {
715 intel_plane->disable_plane = ivb_disable_plane; 927 intel_plane->max_downscale = 1;
716 intel_plane->update_colorkey = ivb_update_colorkey; 928 intel_plane->update_plane = vlv_update_plane;
717 intel_plane->get_colorkey = ivb_get_colorkey; 929 intel_plane->disable_plane = vlv_disable_plane;
718 930 intel_plane->update_colorkey = vlv_update_colorkey;
719 plane_formats = snb_plane_formats; 931 intel_plane->get_colorkey = vlv_get_colorkey;
720 num_plane_formats = ARRAY_SIZE(snb_plane_formats); 932
933 plane_formats = vlv_plane_formats;
934 num_plane_formats = ARRAY_SIZE(vlv_plane_formats);
935 } else {
936 intel_plane->max_downscale = 2;
937 intel_plane->update_plane = ivb_update_plane;
938 intel_plane->disable_plane = ivb_disable_plane;
939 intel_plane->update_colorkey = ivb_update_colorkey;
940 intel_plane->get_colorkey = ivb_get_colorkey;
941
942 plane_formats = snb_plane_formats;
943 num_plane_formats = ARRAY_SIZE(snb_plane_formats);
944 }
721 break; 945 break;
722 946
723 default: 947 default:
@@ -726,6 +950,7 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe)
726 } 950 }
727 951
728 intel_plane->pipe = pipe; 952 intel_plane->pipe = pipe;
953 intel_plane->plane = plane;
729 possible_crtcs = (1 << pipe); 954 possible_crtcs = (1 << pipe);
730 ret = drm_plane_init(dev, &intel_plane->base, possible_crtcs, 955 ret = drm_plane_init(dev, &intel_plane->base, possible_crtcs,
731 &intel_plane_funcs, 956 &intel_plane_funcs,
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index d808421c1c80..b945bc54207a 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -905,11 +905,10 @@ intel_tv_mode_valid(struct drm_connector *connector,
905 905
906 906
907static bool 907static bool
908intel_tv_mode_fixup(struct drm_encoder *encoder, 908intel_tv_compute_config(struct intel_encoder *encoder,
909 const struct drm_display_mode *mode, 909 struct intel_crtc_config *pipe_config)
910 struct drm_display_mode *adjusted_mode)
911{ 910{
912 struct intel_tv *intel_tv = enc_to_intel_tv(encoder); 911 struct intel_tv *intel_tv = enc_to_intel_tv(&encoder->base);
913 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv); 912 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
914 913
915 if (!tv_mode) 914 if (!tv_mode)
@@ -918,7 +917,10 @@ intel_tv_mode_fixup(struct drm_encoder *encoder,
918 if (intel_encoder_check_is_cloned(&intel_tv->base)) 917 if (intel_encoder_check_is_cloned(&intel_tv->base))
919 return false; 918 return false;
920 919
921 adjusted_mode->clock = tv_mode->clock; 920 pipe_config->adjusted_mode.clock = tv_mode->clock;
921 DRM_DEBUG_KMS("forcing bpc to 8 for TV\n");
922 pipe_config->pipe_bpp = 8*3;
923
922 return true; 924 return true;
923} 925}
924 926
@@ -1485,7 +1487,6 @@ out:
1485} 1487}
1486 1488
1487static const struct drm_encoder_helper_funcs intel_tv_helper_funcs = { 1489static const struct drm_encoder_helper_funcs intel_tv_helper_funcs = {
1488 .mode_fixup = intel_tv_mode_fixup,
1489 .mode_set = intel_tv_mode_set, 1490 .mode_set = intel_tv_mode_set,
1490}; 1491};
1491 1492
@@ -1612,7 +1613,7 @@ intel_tv_init(struct drm_device *dev)
1612 * 1613 *
1613 * More recent chipsets favour HDMI rather than integrated S-Video. 1614 * More recent chipsets favour HDMI rather than integrated S-Video.
1614 */ 1615 */
1615 connector->polled = DRM_CONNECTOR_POLL_CONNECT; 1616 intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT;
1616 1617
1617 drm_connector_init(dev, connector, &intel_tv_connector_funcs, 1618 drm_connector_init(dev, connector, &intel_tv_connector_funcs,
1618 DRM_MODE_CONNECTOR_SVIDEO); 1619 DRM_MODE_CONNECTOR_SVIDEO);
@@ -1620,6 +1621,7 @@ intel_tv_init(struct drm_device *dev)
1620 drm_encoder_init(dev, &intel_encoder->base, &intel_tv_enc_funcs, 1621 drm_encoder_init(dev, &intel_encoder->base, &intel_tv_enc_funcs,
1621 DRM_MODE_ENCODER_TVDAC); 1622 DRM_MODE_ENCODER_TVDAC);
1622 1623
1624 intel_encoder->compute_config = intel_tv_compute_config;
1623 intel_encoder->enable = intel_enable_tv; 1625 intel_encoder->enable = intel_enable_tv;
1624 intel_encoder->disable = intel_disable_tv; 1626 intel_encoder->disable = intel_disable_tv;
1625 intel_encoder->get_hw_state = intel_tv_get_hw_state; 1627 intel_encoder->get_hw_state = intel_tv_get_hw_state;
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index 4d932c46725d..bf29b2f4d68d 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -115,6 +115,8 @@ struct mga_fbdev {
115 void *sysram; 115 void *sysram;
116 int size; 116 int size;
117 struct ttm_bo_kmap_obj mapping; 117 struct ttm_bo_kmap_obj mapping;
118 int x1, y1, x2, y2; /* dirty rect */
119 spinlock_t dirty_lock;
118}; 120};
119 121
120struct mga_crtc { 122struct mga_crtc {
@@ -215,7 +217,7 @@ mgag200_bo(struct ttm_buffer_object *bo)
215{ 217{
216 return container_of(bo, struct mgag200_bo, bo); 218 return container_of(bo, struct mgag200_bo, bo);
217} 219}
218 /* mga_crtc.c */ 220 /* mgag200_crtc.c */
219void mga_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, 221void mga_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
220 u16 blue, int regno); 222 u16 blue, int regno);
221void mga_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, 223void mga_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
@@ -225,7 +227,7 @@ void mga_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
225int mgag200_modeset_init(struct mga_device *mdev); 227int mgag200_modeset_init(struct mga_device *mdev);
226void mgag200_modeset_fini(struct mga_device *mdev); 228void mgag200_modeset_fini(struct mga_device *mdev);
227 229
228 /* mga_fbdev.c */ 230 /* mgag200_fb.c */
229int mgag200_fbdev_init(struct mga_device *mdev); 231int mgag200_fbdev_init(struct mga_device *mdev);
230void mgag200_fbdev_fini(struct mga_device *mdev); 232void mgag200_fbdev_fini(struct mga_device *mdev);
231 233
@@ -254,7 +256,7 @@ mgag200_dumb_mmap_offset(struct drm_file *file,
254 struct drm_device *dev, 256 struct drm_device *dev,
255 uint32_t handle, 257 uint32_t handle,
256 uint64_t *offset); 258 uint64_t *offset);
257 /* mga_i2c.c */ 259 /* mgag200_i2c.c */
258struct mga_i2c_chan *mgag200_i2c_create(struct drm_device *dev); 260struct mga_i2c_chan *mgag200_i2c_create(struct drm_device *dev);
259void mgag200_i2c_destroy(struct mga_i2c_chan *i2c); 261void mgag200_i2c_destroy(struct mga_i2c_chan *i2c);
260 262
diff --git a/drivers/gpu/drm/mgag200/mgag200_fb.c b/drivers/gpu/drm/mgag200/mgag200_fb.c
index d2253f639481..5da824ce9ba1 100644
--- a/drivers/gpu/drm/mgag200/mgag200_fb.c
+++ b/drivers/gpu/drm/mgag200/mgag200_fb.c
@@ -29,16 +29,52 @@ static void mga_dirty_update(struct mga_fbdev *mfbdev,
29 int bpp = (mfbdev->mfb.base.bits_per_pixel + 7)/8; 29 int bpp = (mfbdev->mfb.base.bits_per_pixel + 7)/8;
30 int ret; 30 int ret;
31 bool unmap = false; 31 bool unmap = false;
32 bool store_for_later = false;
33 int x2, y2;
34 unsigned long flags;
32 35
33 obj = mfbdev->mfb.obj; 36 obj = mfbdev->mfb.obj;
34 bo = gem_to_mga_bo(obj); 37 bo = gem_to_mga_bo(obj);
35 38
39 /*
40 * try and reserve the BO, if we fail with busy
41 * then the BO is being moved and we should
42 * store up the damage until later.
43 */
36 ret = mgag200_bo_reserve(bo, true); 44 ret = mgag200_bo_reserve(bo, true);
37 if (ret) { 45 if (ret) {
38 DRM_ERROR("failed to reserve fb bo\n"); 46 if (ret != -EBUSY)
47 return;
48
49 store_for_later = true;
50 }
51
52 x2 = x + width - 1;
53 y2 = y + height - 1;
54 spin_lock_irqsave(&mfbdev->dirty_lock, flags);
55
56 if (mfbdev->y1 < y)
57 y = mfbdev->y1;
58 if (mfbdev->y2 > y2)
59 y2 = mfbdev->y2;
60 if (mfbdev->x1 < x)
61 x = mfbdev->x1;
62 if (mfbdev->x2 > x2)
63 x2 = mfbdev->x2;
64
65 if (store_for_later) {
66 mfbdev->x1 = x;
67 mfbdev->x2 = x2;
68 mfbdev->y1 = y;
69 mfbdev->y2 = y2;
70 spin_unlock_irqrestore(&mfbdev->dirty_lock, flags);
39 return; 71 return;
40 } 72 }
41 73
74 mfbdev->x1 = mfbdev->y1 = INT_MAX;
75 mfbdev->x2 = mfbdev->y2 = 0;
76 spin_unlock_irqrestore(&mfbdev->dirty_lock, flags);
77
42 if (!bo->kmap.virtual) { 78 if (!bo->kmap.virtual) {
43 ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap); 79 ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
44 if (ret) { 80 if (ret) {
@@ -48,10 +84,10 @@ static void mga_dirty_update(struct mga_fbdev *mfbdev,
48 } 84 }
49 unmap = true; 85 unmap = true;
50 } 86 }
51 for (i = y; i < y + height; i++) { 87 for (i = y; i <= y2; i++) {
52 /* assume equal stride for now */ 88 /* assume equal stride for now */
53 src_offset = dst_offset = i * mfbdev->mfb.base.pitches[0] + (x * bpp); 89 src_offset = dst_offset = i * mfbdev->mfb.base.pitches[0] + (x * bpp);
54 memcpy_toio(bo->kmap.virtual + src_offset, mfbdev->sysram + src_offset, width * bpp); 90 memcpy_toio(bo->kmap.virtual + src_offset, mfbdev->sysram + src_offset, (x2 - x + 1) * bpp);
55 91
56 } 92 }
57 if (unmap) 93 if (unmap)
@@ -105,12 +141,9 @@ static int mgag200fb_create_object(struct mga_fbdev *afbdev,
105 struct drm_gem_object **gobj_p) 141 struct drm_gem_object **gobj_p)
106{ 142{
107 struct drm_device *dev = afbdev->helper.dev; 143 struct drm_device *dev = afbdev->helper.dev;
108 u32 bpp, depth;
109 u32 size; 144 u32 size;
110 struct drm_gem_object *gobj; 145 struct drm_gem_object *gobj;
111
112 int ret = 0; 146 int ret = 0;
113 drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
114 147
115 size = mode_cmd->pitches[0] * mode_cmd->height; 148 size = mode_cmd->pitches[0] * mode_cmd->height;
116 ret = mgag200_gem_create(dev, size, true, &gobj); 149 ret = mgag200_gem_create(dev, size, true, &gobj);
@@ -249,19 +282,19 @@ int mgag200_fbdev_init(struct mga_device *mdev)
249 struct mga_fbdev *mfbdev; 282 struct mga_fbdev *mfbdev;
250 int ret; 283 int ret;
251 284
252 mfbdev = kzalloc(sizeof(struct mga_fbdev), GFP_KERNEL); 285 mfbdev = devm_kzalloc(mdev->dev->dev, sizeof(struct mga_fbdev), GFP_KERNEL);
253 if (!mfbdev) 286 if (!mfbdev)
254 return -ENOMEM; 287 return -ENOMEM;
255 288
256 mdev->mfbdev = mfbdev; 289 mdev->mfbdev = mfbdev;
257 mfbdev->helper.funcs = &mga_fb_helper_funcs; 290 mfbdev->helper.funcs = &mga_fb_helper_funcs;
291 spin_lock_init(&mfbdev->dirty_lock);
258 292
259 ret = drm_fb_helper_init(mdev->dev, &mfbdev->helper, 293 ret = drm_fb_helper_init(mdev->dev, &mfbdev->helper,
260 mdev->num_crtc, MGAG200FB_CONN_LIMIT); 294 mdev->num_crtc, MGAG200FB_CONN_LIMIT);
261 if (ret) { 295 if (ret)
262 kfree(mfbdev);
263 return ret; 296 return ret;
264 } 297
265 drm_fb_helper_single_add_all_connectors(&mfbdev->helper); 298 drm_fb_helper_single_add_all_connectors(&mfbdev->helper);
266 299
267 /* disable all the possible outputs/crtcs before entering KMS mode */ 300 /* disable all the possible outputs/crtcs before entering KMS mode */
@@ -278,6 +311,4 @@ void mgag200_fbdev_fini(struct mga_device *mdev)
278 return; 311 return;
279 312
280 mga_fbdev_destroy(mdev->dev, mdev->mfbdev); 313 mga_fbdev_destroy(mdev->dev, mdev->mfbdev);
281 kfree(mdev->mfbdev);
282 mdev->mfbdev = NULL;
283} 314}
diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c
index 64297c72464f..99059237da38 100644
--- a/drivers/gpu/drm/mgag200/mgag200_main.c
+++ b/drivers/gpu/drm/mgag200/mgag200_main.c
@@ -76,15 +76,6 @@ static const struct drm_mode_config_funcs mga_mode_funcs = {
76 .fb_create = mgag200_user_framebuffer_create, 76 .fb_create = mgag200_user_framebuffer_create,
77}; 77};
78 78
79/* Unmap the framebuffer from the core and release the memory */
80static void mga_vram_fini(struct mga_device *mdev)
81{
82 pci_iounmap(mdev->dev->pdev, mdev->rmmio);
83 mdev->rmmio = NULL;
84 if (mdev->mc.vram_base)
85 release_mem_region(mdev->mc.vram_base, mdev->mc.vram_window);
86}
87
88static int mga_probe_vram(struct mga_device *mdev, void __iomem *mem) 79static int mga_probe_vram(struct mga_device *mdev, void __iomem *mem)
89{ 80{
90 int offset; 81 int offset;
@@ -140,7 +131,7 @@ static int mga_vram_init(struct mga_device *mdev)
140 remove_conflicting_framebuffers(aper, "mgafb", true); 131 remove_conflicting_framebuffers(aper, "mgafb", true);
141 kfree(aper); 132 kfree(aper);
142 133
143 if (!request_mem_region(mdev->mc.vram_base, mdev->mc.vram_window, 134 if (!devm_request_mem_region(mdev->dev->dev, mdev->mc.vram_base, mdev->mc.vram_window,
144 "mgadrmfb_vram")) { 135 "mgadrmfb_vram")) {
145 DRM_ERROR("can't reserve VRAM\n"); 136 DRM_ERROR("can't reserve VRAM\n");
146 return -ENXIO; 137 return -ENXIO;
@@ -173,13 +164,13 @@ static int mgag200_device_init(struct drm_device *dev,
173 mdev->rmmio_base = pci_resource_start(mdev->dev->pdev, 1); 164 mdev->rmmio_base = pci_resource_start(mdev->dev->pdev, 1);
174 mdev->rmmio_size = pci_resource_len(mdev->dev->pdev, 1); 165 mdev->rmmio_size = pci_resource_len(mdev->dev->pdev, 1);
175 166
176 if (!request_mem_region(mdev->rmmio_base, mdev->rmmio_size, 167 if (!devm_request_mem_region(mdev->dev->dev, mdev->rmmio_base, mdev->rmmio_size,
177 "mgadrmfb_mmio")) { 168 "mgadrmfb_mmio")) {
178 DRM_ERROR("can't reserve mmio registers\n"); 169 DRM_ERROR("can't reserve mmio registers\n");
179 return -ENOMEM; 170 return -ENOMEM;
180 } 171 }
181 172
182 mdev->rmmio = pci_iomap(dev->pdev, 1, 0); 173 mdev->rmmio = pcim_iomap(dev->pdev, 1, 0);
183 if (mdev->rmmio == NULL) 174 if (mdev->rmmio == NULL)
184 return -ENOMEM; 175 return -ENOMEM;
185 176
@@ -188,10 +179,8 @@ static int mgag200_device_init(struct drm_device *dev,
188 mdev->reg_1e24 = RREG32(0x1e24); 179 mdev->reg_1e24 = RREG32(0x1e24);
189 180
190 ret = mga_vram_init(mdev); 181 ret = mga_vram_init(mdev);
191 if (ret) { 182 if (ret)
192 release_mem_region(mdev->rmmio_base, mdev->rmmio_size);
193 return ret; 183 return ret;
194 }
195 184
196 mdev->bpp_shifts[0] = 0; 185 mdev->bpp_shifts[0] = 0;
197 mdev->bpp_shifts[1] = 1; 186 mdev->bpp_shifts[1] = 1;
@@ -200,12 +189,6 @@ static int mgag200_device_init(struct drm_device *dev,
200 return 0; 189 return 0;
201} 190}
202 191
203void mgag200_device_fini(struct mga_device *mdev)
204{
205 release_mem_region(mdev->rmmio_base, mdev->rmmio_size);
206 mga_vram_fini(mdev);
207}
208
209/* 192/*
210 * Functions here will be called by the core once it's bound the driver to 193 * Functions here will be called by the core once it's bound the driver to
211 * a PCI device 194 * a PCI device
@@ -217,7 +200,7 @@ int mgag200_driver_load(struct drm_device *dev, unsigned long flags)
217 struct mga_device *mdev; 200 struct mga_device *mdev;
218 int r; 201 int r;
219 202
220 mdev = kzalloc(sizeof(struct mga_device), GFP_KERNEL); 203 mdev = devm_kzalloc(dev->dev, sizeof(struct mga_device), GFP_KERNEL);
221 if (mdev == NULL) 204 if (mdev == NULL)
222 return -ENOMEM; 205 return -ENOMEM;
223 dev->dev_private = (void *)mdev; 206 dev->dev_private = (void *)mdev;
@@ -234,8 +217,6 @@ int mgag200_driver_load(struct drm_device *dev, unsigned long flags)
234 217
235 drm_mode_config_init(dev); 218 drm_mode_config_init(dev);
236 dev->mode_config.funcs = (void *)&mga_mode_funcs; 219 dev->mode_config.funcs = (void *)&mga_mode_funcs;
237 dev->mode_config.min_width = 0;
238 dev->mode_config.min_height = 0;
239 dev->mode_config.preferred_depth = 24; 220 dev->mode_config.preferred_depth = 24;
240 dev->mode_config.prefer_shadow = 1; 221 dev->mode_config.prefer_shadow = 1;
241 222
@@ -258,8 +239,6 @@ int mgag200_driver_unload(struct drm_device *dev)
258 mgag200_fbdev_fini(mdev); 239 mgag200_fbdev_fini(mdev);
259 drm_mode_config_cleanup(dev); 240 drm_mode_config_cleanup(dev);
260 mgag200_mm_fini(mdev); 241 mgag200_mm_fini(mdev);
261 mgag200_device_fini(mdev);
262 kfree(mdev);
263 dev->dev_private = NULL; 242 dev->dev_private = NULL;
264 return 0; 243 return 0;
265} 244}
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index 78d8e919509f..f9889658329b 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -1254,9 +1254,8 @@ static const struct drm_crtc_helper_funcs mga_helper_funcs = {
1254}; 1254};
1255 1255
1256/* CRTC setup */ 1256/* CRTC setup */
1257static void mga_crtc_init(struct drm_device *dev) 1257static void mga_crtc_init(struct mga_device *mdev)
1258{ 1258{
1259 struct mga_device *mdev = dev->dev_private;
1260 struct mga_crtc *mga_crtc; 1259 struct mga_crtc *mga_crtc;
1261 int i; 1260 int i;
1262 1261
@@ -1267,7 +1266,7 @@ static void mga_crtc_init(struct drm_device *dev)
1267 if (mga_crtc == NULL) 1266 if (mga_crtc == NULL)
1268 return; 1267 return;
1269 1268
1270 drm_crtc_init(dev, &mga_crtc->base, &mga_crtc_funcs); 1269 drm_crtc_init(mdev->dev, &mga_crtc->base, &mga_crtc_funcs);
1271 1270
1272 drm_mode_crtc_set_gamma_size(&mga_crtc->base, MGAG200_LUT_SIZE); 1271 drm_mode_crtc_set_gamma_size(&mga_crtc->base, MGAG200_LUT_SIZE);
1273 mdev->mode_info.crtc = mga_crtc; 1272 mdev->mode_info.crtc = mga_crtc;
@@ -1522,7 +1521,7 @@ int mgag200_modeset_init(struct mga_device *mdev)
1522 1521
1523 mdev->dev->mode_config.fb_base = mdev->mc.vram_base; 1522 mdev->dev->mode_config.fb_base = mdev->mc.vram_base;
1524 1523
1525 mga_crtc_init(mdev->dev); 1524 mga_crtc_init(mdev);
1526 1525
1527 encoder = mga_encoder_init(mdev->dev); 1526 encoder = mga_encoder_init(mdev->dev);
1528 if (!encoder) { 1527 if (!encoder) {
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c
index 8fc9d9201945..401c9891d3a8 100644
--- a/drivers/gpu/drm/mgag200/mgag200_ttm.c
+++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c
@@ -315,8 +315,8 @@ int mgag200_bo_reserve(struct mgag200_bo *bo, bool no_wait)
315 315
316 ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0); 316 ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0);
317 if (ret) { 317 if (ret) {
318 if (ret != -ERESTARTSYS) 318 if (ret != -ERESTARTSYS && ret != -EBUSY)
319 DRM_ERROR("reserve failed %p\n", bo); 319 DRM_ERROR("reserve failed %p %d\n", bo, ret);
320 return ret; 320 return ret;
321 } 321 }
322 return 0; 322 return 0;
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index 90f9140eeefd..998e8b4444f3 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -53,15 +53,6 @@ nouveau-y += core/subdev/clock/nva3.o
53nouveau-y += core/subdev/clock/nvc0.o 53nouveau-y += core/subdev/clock/nvc0.o
54nouveau-y += core/subdev/clock/pllnv04.o 54nouveau-y += core/subdev/clock/pllnv04.o
55nouveau-y += core/subdev/clock/pllnva3.o 55nouveau-y += core/subdev/clock/pllnva3.o
56nouveau-y += core/subdev/device/base.o
57nouveau-y += core/subdev/device/nv04.o
58nouveau-y += core/subdev/device/nv10.o
59nouveau-y += core/subdev/device/nv20.o
60nouveau-y += core/subdev/device/nv30.o
61nouveau-y += core/subdev/device/nv40.o
62nouveau-y += core/subdev/device/nv50.o
63nouveau-y += core/subdev/device/nvc0.o
64nouveau-y += core/subdev/device/nve0.o
65nouveau-y += core/subdev/devinit/base.o 56nouveau-y += core/subdev/devinit/base.o
66nouveau-y += core/subdev/devinit/nv04.o 57nouveau-y += core/subdev/devinit/nv04.o
67nouveau-y += core/subdev/devinit/nv05.o 58nouveau-y += core/subdev/devinit/nv05.o
@@ -126,6 +117,7 @@ nouveau-y += core/subdev/therm/ic.o
126nouveau-y += core/subdev/therm/temp.o 117nouveau-y += core/subdev/therm/temp.o
127nouveau-y += core/subdev/therm/nv40.o 118nouveau-y += core/subdev/therm/nv40.o
128nouveau-y += core/subdev/therm/nv50.o 119nouveau-y += core/subdev/therm/nv50.o
120nouveau-y += core/subdev/therm/nv84.o
129nouveau-y += core/subdev/therm/nva3.o 121nouveau-y += core/subdev/therm/nva3.o
130nouveau-y += core/subdev/therm/nvd0.o 122nouveau-y += core/subdev/therm/nvd0.o
131nouveau-y += core/subdev/timer/base.o 123nouveau-y += core/subdev/timer/base.o
@@ -150,6 +142,15 @@ nouveau-y += core/engine/copy/nvc0.o
150nouveau-y += core/engine/copy/nve0.o 142nouveau-y += core/engine/copy/nve0.o
151nouveau-y += core/engine/crypt/nv84.o 143nouveau-y += core/engine/crypt/nv84.o
152nouveau-y += core/engine/crypt/nv98.o 144nouveau-y += core/engine/crypt/nv98.o
145nouveau-y += core/engine/device/base.o
146nouveau-y += core/engine/device/nv04.o
147nouveau-y += core/engine/device/nv10.o
148nouveau-y += core/engine/device/nv20.o
149nouveau-y += core/engine/device/nv30.o
150nouveau-y += core/engine/device/nv40.o
151nouveau-y += core/engine/device/nv50.o
152nouveau-y += core/engine/device/nvc0.o
153nouveau-y += core/engine/device/nve0.o
153nouveau-y += core/engine/disp/base.o 154nouveau-y += core/engine/disp/base.o
154nouveau-y += core/engine/disp/nv04.o 155nouveau-y += core/engine/disp/nv04.o
155nouveau-y += core/engine/disp/nv50.o 156nouveau-y += core/engine/disp/nv50.o
@@ -159,6 +160,7 @@ nouveau-y += core/engine/disp/nva0.o
159nouveau-y += core/engine/disp/nva3.o 160nouveau-y += core/engine/disp/nva3.o
160nouveau-y += core/engine/disp/nvd0.o 161nouveau-y += core/engine/disp/nvd0.o
161nouveau-y += core/engine/disp/nve0.o 162nouveau-y += core/engine/disp/nve0.o
163nouveau-y += core/engine/disp/nvf0.o
162nouveau-y += core/engine/disp/dacnv50.o 164nouveau-y += core/engine/disp/dacnv50.o
163nouveau-y += core/engine/disp/dport.o 165nouveau-y += core/engine/disp/dport.o
164nouveau-y += core/engine/disp/hdanva3.o 166nouveau-y += core/engine/disp/hdanva3.o
@@ -212,7 +214,7 @@ nouveau-y += core/engine/vp/nve0.o
212 214
213# drm/core 215# drm/core
214nouveau-y += nouveau_drm.o nouveau_chan.o nouveau_dma.o nouveau_fence.o 216nouveau-y += nouveau_drm.o nouveau_chan.o nouveau_dma.o nouveau_fence.o
215nouveau-y += nouveau_irq.o nouveau_vga.o nouveau_agp.o 217nouveau-y += nouveau_vga.o nouveau_agp.o
216nouveau-y += nouveau_ttm.o nouveau_sgdma.o nouveau_bo.o nouveau_gem.o 218nouveau-y += nouveau_ttm.o nouveau_sgdma.o nouveau_bo.o nouveau_gem.o
217nouveau-y += nouveau_prime.o nouveau_abi16.o 219nouveau-y += nouveau_prime.o nouveau_abi16.o
218nouveau-y += nv04_fence.o nv10_fence.o nv17_fence.o 220nouveau-y += nv04_fence.o nv10_fence.o nv17_fence.o
@@ -224,9 +226,7 @@ nouveau-y += nouveau_connector.o nouveau_dp.o
224nouveau-y += nv04_fbcon.o nv50_fbcon.o nvc0_fbcon.o 226nouveau-y += nv04_fbcon.o nv50_fbcon.o nvc0_fbcon.o
225 227
226# drm/kms/nv04:nv50 228# drm/kms/nv04:nv50
227nouveau-y += nouveau_hw.o nouveau_calc.o 229include $(src)/dispnv04/Makefile
228nouveau-y += nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o
229nouveau-y += nv04_crtc.o nv04_display.o nv04_cursor.o
230 230
231# drm/kms/nv50- 231# drm/kms/nv50-
232nouveau-y += nv50_display.o 232nouveau-y += nv50_display.o
diff --git a/drivers/gpu/drm/nouveau/core/core/client.c b/drivers/gpu/drm/nouveau/core/core/client.c
index 295c22165eac..9079c0ac58e6 100644
--- a/drivers/gpu/drm/nouveau/core/core/client.c
+++ b/drivers/gpu/drm/nouveau/core/core/client.c
@@ -27,7 +27,7 @@
27#include <core/handle.h> 27#include <core/handle.h>
28#include <core/option.h> 28#include <core/option.h>
29 29
30#include <subdev/device.h> 30#include <engine/device.h>
31 31
32static void 32static void
33nouveau_client_dtor(struct nouveau_object *object) 33nouveau_client_dtor(struct nouveau_object *object)
@@ -58,8 +58,9 @@ nouveau_client_create_(const char *name, u64 devname, const char *cfg,
58 return -ENODEV; 58 return -ENODEV;
59 59
60 ret = nouveau_namedb_create_(NULL, NULL, &nouveau_client_oclass, 60 ret = nouveau_namedb_create_(NULL, NULL, &nouveau_client_oclass,
61 NV_CLIENT_CLASS, nouveau_device_sclass, 61 NV_CLIENT_CLASS, NULL,
62 0, length, pobject); 62 (1ULL << NVDEV_ENGINE_DEVICE),
63 length, pobject);
63 client = *pobject; 64 client = *pobject;
64 if (ret) 65 if (ret)
65 return ret; 66 return ret;
diff --git a/drivers/gpu/drm/nouveau/core/core/engine.c b/drivers/gpu/drm/nouveau/core/core/engine.c
index 09b3bd502fd0..c8bed4a26833 100644
--- a/drivers/gpu/drm/nouveau/core/core/engine.c
+++ b/drivers/gpu/drm/nouveau/core/core/engine.c
@@ -33,7 +33,6 @@ nouveau_engine_create_(struct nouveau_object *parent,
33 const char *iname, const char *fname, 33 const char *iname, const char *fname,
34 int length, void **pobject) 34 int length, void **pobject)
35{ 35{
36 struct nouveau_device *device = nv_device(parent);
37 struct nouveau_engine *engine; 36 struct nouveau_engine *engine;
38 int ret; 37 int ret;
39 38
@@ -43,7 +42,8 @@ nouveau_engine_create_(struct nouveau_object *parent,
43 if (ret) 42 if (ret)
44 return ret; 43 return ret;
45 44
46 if (!nouveau_boolopt(device->cfgopt, iname, enable)) { 45 if ( parent &&
46 !nouveau_boolopt(nv_device(parent)->cfgopt, iname, enable)) {
47 if (!enable) 47 if (!enable)
48 nv_warn(engine, "disabled, %s=1 to enable\n", iname); 48 nv_warn(engine, "disabled, %s=1 to enable\n", iname);
49 return -ENODEV; 49 return -ENODEV;
diff --git a/drivers/gpu/drm/nouveau/core/core/event.c b/drivers/gpu/drm/nouveau/core/core/event.c
index 6d01e0f0fc8a..7eb81c1b6fab 100644
--- a/drivers/gpu/drm/nouveau/core/core/event.c
+++ b/drivers/gpu/drm/nouveau/core/core/event.c
@@ -27,8 +27,10 @@ static void
27nouveau_event_put_locked(struct nouveau_event *event, int index, 27nouveau_event_put_locked(struct nouveau_event *event, int index,
28 struct nouveau_eventh *handler) 28 struct nouveau_eventh *handler)
29{ 29{
30 if (!--event->index[index].refs) 30 if (!--event->index[index].refs) {
31 event->disable(event, index); 31 if (event->disable)
32 event->disable(event, index);
33 }
32 list_del(&handler->head); 34 list_del(&handler->head);
33} 35}
34 36
@@ -53,8 +55,10 @@ nouveau_event_get(struct nouveau_event *event, int index,
53 spin_lock_irqsave(&event->lock, flags); 55 spin_lock_irqsave(&event->lock, flags);
54 if (index < event->index_nr) { 56 if (index < event->index_nr) {
55 list_add(&handler->head, &event->index[index].list); 57 list_add(&handler->head, &event->index[index].list);
56 if (!event->index[index].refs++) 58 if (!event->index[index].refs++) {
57 event->enable(event, index); 59 if (event->enable)
60 event->enable(event, index);
61 }
58 } 62 }
59 spin_unlock_irqrestore(&event->lock, flags); 63 spin_unlock_irqrestore(&event->lock, flags);
60} 64}
diff --git a/drivers/gpu/drm/nouveau/core/core/object.c b/drivers/gpu/drm/nouveau/core/core/object.c
index 3b2e7b6304d3..7f48e288215f 100644
--- a/drivers/gpu/drm/nouveau/core/core/object.c
+++ b/drivers/gpu/drm/nouveau/core/core/object.c
@@ -136,26 +136,30 @@ nouveau_object_ctor(struct nouveau_object *parent,
136 struct nouveau_object **pobject) 136 struct nouveau_object **pobject)
137{ 137{
138 struct nouveau_ofuncs *ofuncs = oclass->ofuncs; 138 struct nouveau_ofuncs *ofuncs = oclass->ofuncs;
139 struct nouveau_object *object = NULL;
139 int ret; 140 int ret;
140 141
141 *pobject = NULL; 142 ret = ofuncs->ctor(parent, engine, oclass, data, size, &object);
142 143 *pobject = object;
143 ret = ofuncs->ctor(parent, engine, oclass, data, size, pobject);
144 if (ret < 0) { 144 if (ret < 0) {
145 if (ret != -ENODEV) { 145 if (ret != -ENODEV) {
146 nv_error(parent, "failed to create 0x%08x, %d\n", 146 nv_error(parent, "failed to create 0x%08x, %d\n",
147 oclass->handle, ret); 147 oclass->handle, ret);
148 } 148 }
149 149
150 if (*pobject) { 150 if (object) {
151 ofuncs->dtor(*pobject); 151 ofuncs->dtor(object);
152 *pobject = NULL; 152 *pobject = NULL;
153 } 153 }
154 154
155 return ret; 155 return ret;
156 } 156 }
157 157
158 nv_debug(*pobject, "created\n"); 158 if (ret == 0) {
159 nv_debug(object, "created\n");
160 atomic_set(&object->refcount, 1);
161 }
162
159 return 0; 163 return 0;
160} 164}
161 165
@@ -327,6 +331,7 @@ nouveau_object_inc(struct nouveau_object *object)
327 } 331 }
328 332
329 ret = nv_ofuncs(object)->init(object); 333 ret = nv_ofuncs(object)->init(object);
334 atomic_set(&object->usecount, 1);
330 if (ret) { 335 if (ret) {
331 nv_error(object, "init failed, %d\n", ret); 336 nv_error(object, "init failed, %d\n", ret);
332 goto fail_self; 337 goto fail_self;
@@ -357,6 +362,7 @@ nouveau_object_decf(struct nouveau_object *object)
357 nv_trace(object, "stopping...\n"); 362 nv_trace(object, "stopping...\n");
358 363
359 ret = nv_ofuncs(object)->fini(object, false); 364 ret = nv_ofuncs(object)->fini(object, false);
365 atomic_set(&object->usecount, 0);
360 if (ret) 366 if (ret)
361 nv_warn(object, "failed fini, %d\n", ret); 367 nv_warn(object, "failed fini, %d\n", ret);
362 368
@@ -381,6 +387,7 @@ nouveau_object_decs(struct nouveau_object *object)
381 nv_trace(object, "suspending...\n"); 387 nv_trace(object, "suspending...\n");
382 388
383 ret = nv_ofuncs(object)->fini(object, true); 389 ret = nv_ofuncs(object)->fini(object, true);
390 atomic_set(&object->usecount, 0);
384 if (ret) { 391 if (ret) {
385 nv_error(object, "failed suspend, %d\n", ret); 392 nv_error(object, "failed suspend, %d\n", ret);
386 return ret; 393 return ret;
diff --git a/drivers/gpu/drm/nouveau/core/core/parent.c b/drivers/gpu/drm/nouveau/core/core/parent.c
index db7c54943102..313380ce632d 100644
--- a/drivers/gpu/drm/nouveau/core/core/parent.c
+++ b/drivers/gpu/drm/nouveau/core/core/parent.c
@@ -24,6 +24,7 @@
24 24
25#include <core/object.h> 25#include <core/object.h>
26#include <core/parent.h> 26#include <core/parent.h>
27#include <core/client.h>
27 28
28int 29int
29nouveau_parent_sclass(struct nouveau_object *parent, u16 handle, 30nouveau_parent_sclass(struct nouveau_object *parent, u16 handle,
@@ -50,7 +51,12 @@ nouveau_parent_sclass(struct nouveau_object *parent, u16 handle,
50 while (mask) { 51 while (mask) {
51 int i = ffsll(mask) - 1; 52 int i = ffsll(mask) - 1;
52 53
53 if ((engine = nouveau_engine(parent, i))) { 54 if (nv_iclass(parent, NV_CLIENT_CLASS))
55 engine = nv_engine(nv_client(parent)->device);
56 else
57 engine = nouveau_engine(parent, i);
58
59 if (engine) {
54 oclass = engine->sclass; 60 oclass = engine->sclass;
55 while (oclass->ofuncs) { 61 while (oclass->ofuncs) {
56 if ((oclass->handle & 0xffff) == handle) { 62 if ((oclass->handle & 0xffff) == handle) {
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/base.c b/drivers/gpu/drm/nouveau/core/engine/device/base.c
index 3937ced5c753..4c72571655ad 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/base.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/base.c
@@ -29,7 +29,7 @@
29 29
30#include <core/class.h> 30#include <core/class.h>
31 31
32#include <subdev/device.h> 32#include <engine/device.h>
33 33
34static DEFINE_MUTEX(nv_devices_mutex); 34static DEFINE_MUTEX(nv_devices_mutex);
35static LIST_HEAD(nv_devices); 35static LIST_HEAD(nv_devices);
@@ -55,7 +55,6 @@ nouveau_device_find(u64 name)
55struct nouveau_devobj { 55struct nouveau_devobj {
56 struct nouveau_parent base; 56 struct nouveau_parent base;
57 struct nouveau_object *subdev[NVDEV_SUBDEV_NR]; 57 struct nouveau_object *subdev[NVDEV_SUBDEV_NR];
58 bool created;
59}; 58};
60 59
61static const u64 disable_map[] = { 60static const u64 disable_map[] = {
@@ -173,7 +172,8 @@ nouveau_devobj_ctor(struct nouveau_object *parent,
173 case 0xa0: device->card_type = NV_50; break; 172 case 0xa0: device->card_type = NV_50; break;
174 case 0xc0: device->card_type = NV_C0; break; 173 case 0xc0: device->card_type = NV_C0; break;
175 case 0xd0: device->card_type = NV_D0; break; 174 case 0xd0: device->card_type = NV_D0; break;
176 case 0xe0: device->card_type = NV_E0; break; 175 case 0xe0:
176 case 0xf0: device->card_type = NV_E0; break;
177 default: 177 default:
178 break; 178 break;
179 } 179 }
@@ -238,26 +238,24 @@ nouveau_devobj_ctor(struct nouveau_object *parent,
238 } 238 }
239 239
240 /* ensure requested subsystems are available for use */ 240 /* ensure requested subsystems are available for use */
241 for (i = 0, c = 0; i < NVDEV_SUBDEV_NR; i++) { 241 for (i = 1, c = 1; i < NVDEV_SUBDEV_NR; i++) {
242 if (!(oclass = device->oclass[i]) || (disable & (1ULL << i))) 242 if (!(oclass = device->oclass[i]) || (disable & (1ULL << i)))
243 continue; 243 continue;
244 244
245 if (!device->subdev[i]) { 245 if (device->subdev[i]) {
246 ret = nouveau_object_ctor(nv_object(device), NULL,
247 oclass, NULL, i,
248 &devobj->subdev[i]);
249 if (ret == -ENODEV)
250 continue;
251 if (ret)
252 return ret;
253
254 if (nv_iclass(devobj->subdev[i], NV_ENGINE_CLASS))
255 nouveau_subdev_reset(devobj->subdev[i]);
256 } else {
257 nouveau_object_ref(device->subdev[i], 246 nouveau_object_ref(device->subdev[i],
258 &devobj->subdev[i]); 247 &devobj->subdev[i]);
248 continue;
259 } 249 }
260 250
251 ret = nouveau_object_ctor(nv_object(device), NULL,
252 oclass, NULL, i,
253 &devobj->subdev[i]);
254 if (ret == -ENODEV)
255 continue;
256 if (ret)
257 return ret;
258
261 /* note: can't init *any* subdevs until devinit has been run 259 /* note: can't init *any* subdevs until devinit has been run
262 * due to not knowing exactly what the vbios init tables will 260 * due to not knowing exactly what the vbios init tables will
263 * mess with. devinit also can't be run until all of its 261 * mess with. devinit also can't be run until all of its
@@ -273,6 +271,10 @@ nouveau_devobj_ctor(struct nouveau_object *parent,
273 ret = nouveau_object_inc(subdev); 271 ret = nouveau_object_inc(subdev);
274 if (ret) 272 if (ret)
275 return ret; 273 return ret;
274 atomic_dec(&nv_object(device)->usecount);
275 } else
276 if (subdev) {
277 nouveau_subdev_reset(subdev);
276 } 278 }
277 } 279 }
278 } 280 }
@@ -292,74 +294,6 @@ nouveau_devobj_dtor(struct nouveau_object *object)
292 nouveau_parent_destroy(&devobj->base); 294 nouveau_parent_destroy(&devobj->base);
293} 295}
294 296
295static int
296nouveau_devobj_init(struct nouveau_object *object)
297{
298 struct nouveau_devobj *devobj = (void *)object;
299 struct nouveau_object *subdev;
300 int ret, i;
301
302 ret = nouveau_parent_init(&devobj->base);
303 if (ret)
304 return ret;
305
306 for (i = 0; devobj->created && i < NVDEV_SUBDEV_NR; i++) {
307 if ((subdev = devobj->subdev[i])) {
308 if (!nv_iclass(subdev, NV_ENGINE_CLASS)) {
309 ret = nouveau_object_inc(subdev);
310 if (ret)
311 goto fail;
312 }
313 }
314 }
315
316 devobj->created = true;
317 return 0;
318
319fail:
320 for (--i; i >= 0; i--) {
321 if ((subdev = devobj->subdev[i])) {
322 if (!nv_iclass(subdev, NV_ENGINE_CLASS))
323 nouveau_object_dec(subdev, false);
324 }
325 }
326
327 return ret;
328}
329
330static int
331nouveau_devobj_fini(struct nouveau_object *object, bool suspend)
332{
333 struct nouveau_devobj *devobj = (void *)object;
334 struct nouveau_object *subdev;
335 int ret, i;
336
337 for (i = NVDEV_SUBDEV_NR - 1; i >= 0; i--) {
338 if ((subdev = devobj->subdev[i])) {
339 if (!nv_iclass(subdev, NV_ENGINE_CLASS)) {
340 ret = nouveau_object_dec(subdev, suspend);
341 if (ret && suspend)
342 goto fail;
343 }
344 }
345 }
346
347 ret = nouveau_parent_fini(&devobj->base, suspend);
348fail:
349 for (; ret && suspend && i < NVDEV_SUBDEV_NR; i++) {
350 if ((subdev = devobj->subdev[i])) {
351 if (!nv_iclass(subdev, NV_ENGINE_CLASS)) {
352 ret = nouveau_object_inc(subdev);
353 if (ret) {
354 /* XXX */
355 }
356 }
357 }
358 }
359
360 return ret;
361}
362
363static u8 297static u8
364nouveau_devobj_rd08(struct nouveau_object *object, u64 addr) 298nouveau_devobj_rd08(struct nouveau_object *object, u64 addr)
365{ 299{
@@ -400,8 +334,8 @@ static struct nouveau_ofuncs
400nouveau_devobj_ofuncs = { 334nouveau_devobj_ofuncs = {
401 .ctor = nouveau_devobj_ctor, 335 .ctor = nouveau_devobj_ctor,
402 .dtor = nouveau_devobj_dtor, 336 .dtor = nouveau_devobj_dtor,
403 .init = nouveau_devobj_init, 337 .init = _nouveau_parent_init,
404 .fini = nouveau_devobj_fini, 338 .fini = _nouveau_parent_fini,
405 .rd08 = nouveau_devobj_rd08, 339 .rd08 = nouveau_devobj_rd08,
406 .rd16 = nouveau_devobj_rd16, 340 .rd16 = nouveau_devobj_rd16,
407 .rd32 = nouveau_devobj_rd32, 341 .rd32 = nouveau_devobj_rd32,
@@ -413,12 +347,76 @@ nouveau_devobj_ofuncs = {
413/****************************************************************************** 347/******************************************************************************
414 * nouveau_device: engine functions 348 * nouveau_device: engine functions
415 *****************************************************************************/ 349 *****************************************************************************/
416struct nouveau_oclass 350static struct nouveau_oclass
417nouveau_device_sclass[] = { 351nouveau_device_sclass[] = {
418 { 0x0080, &nouveau_devobj_ofuncs }, 352 { 0x0080, &nouveau_devobj_ofuncs },
419 {} 353 {}
420}; 354};
421 355
356static int
357nouveau_device_fini(struct nouveau_object *object, bool suspend)
358{
359 struct nouveau_device *device = (void *)object;
360 struct nouveau_object *subdev;
361 int ret, i;
362
363 for (i = NVDEV_SUBDEV_NR - 1; i >= 0; i--) {
364 if ((subdev = device->subdev[i])) {
365 if (!nv_iclass(subdev, NV_ENGINE_CLASS)) {
366 ret = nouveau_object_dec(subdev, suspend);
367 if (ret && suspend)
368 goto fail;
369 }
370 }
371 }
372
373 ret = 0;
374fail:
375 for (; ret && i < NVDEV_SUBDEV_NR; i++) {
376 if ((subdev = device->subdev[i])) {
377 if (!nv_iclass(subdev, NV_ENGINE_CLASS)) {
378 ret = nouveau_object_inc(subdev);
379 if (ret) {
380 /* XXX */
381 }
382 }
383 }
384 }
385
386 return ret;
387}
388
389static int
390nouveau_device_init(struct nouveau_object *object)
391{
392 struct nouveau_device *device = (void *)object;
393 struct nouveau_object *subdev;
394 int ret, i;
395
396 for (i = 0; i < NVDEV_SUBDEV_NR; i++) {
397 if ((subdev = device->subdev[i])) {
398 if (!nv_iclass(subdev, NV_ENGINE_CLASS)) {
399 ret = nouveau_object_inc(subdev);
400 if (ret)
401 goto fail;
402 } else {
403 nouveau_subdev_reset(subdev);
404 }
405 }
406 }
407
408 ret = 0;
409fail:
410 for (--i; ret && i >= 0; i--) {
411 if ((subdev = device->subdev[i])) {
412 if (!nv_iclass(subdev, NV_ENGINE_CLASS))
413 nouveau_object_dec(subdev, false);
414 }
415 }
416
417 return ret;
418}
419
422static void 420static void
423nouveau_device_dtor(struct nouveau_object *object) 421nouveau_device_dtor(struct nouveau_object *object)
424{ 422{
@@ -428,17 +426,19 @@ nouveau_device_dtor(struct nouveau_object *object)
428 list_del(&device->head); 426 list_del(&device->head);
429 mutex_unlock(&nv_devices_mutex); 427 mutex_unlock(&nv_devices_mutex);
430 428
431 if (device->base.mmio) 429 if (nv_subdev(device)->mmio)
432 iounmap(device->base.mmio); 430 iounmap(nv_subdev(device)->mmio);
433 431
434 nouveau_subdev_destroy(&device->base); 432 nouveau_engine_destroy(&device->base);
435} 433}
436 434
437static struct nouveau_oclass 435static struct nouveau_oclass
438nouveau_device_oclass = { 436nouveau_device_oclass = {
439 .handle = NV_SUBDEV(DEVICE, 0x00), 437 .handle = NV_ENGINE(DEVICE, 0x00),
440 .ofuncs = &(struct nouveau_ofuncs) { 438 .ofuncs = &(struct nouveau_ofuncs) {
441 .dtor = nouveau_device_dtor, 439 .dtor = nouveau_device_dtor,
440 .init = nouveau_device_init,
441 .fini = nouveau_device_fini,
442 }, 442 },
443}; 443};
444 444
@@ -456,13 +456,12 @@ nouveau_device_create_(struct pci_dev *pdev, u64 name, const char *sname,
456 goto done; 456 goto done;
457 } 457 }
458 458
459 ret = nouveau_subdev_create_(NULL, NULL, &nouveau_device_oclass, 0, 459 ret = nouveau_engine_create_(NULL, NULL, &nouveau_device_oclass, true,
460 "DEVICE", "device", length, pobject); 460 "DEVICE", "device", length, pobject);
461 device = *pobject; 461 device = *pobject;
462 if (ret) 462 if (ret)
463 goto done; 463 goto done;
464 464
465 atomic_set(&nv_object(device)->usecount, 2);
466 device->pdev = pdev; 465 device->pdev = pdev;
467 device->handle = name; 466 device->handle = name;
468 device->cfgopt = cfg; 467 device->cfgopt = cfg;
@@ -470,6 +469,7 @@ nouveau_device_create_(struct pci_dev *pdev, u64 name, const char *sname,
470 device->name = sname; 469 device->name = sname;
471 470
472 nv_subdev(device)->debug = nouveau_dbgopt(device->dbgopt, "DEVICE"); 471 nv_subdev(device)->debug = nouveau_dbgopt(device->dbgopt, "DEVICE");
472 nv_engine(device)->sclass = nouveau_device_sclass;
473 list_add(&device->head, &nv_devices); 473 list_add(&device->head, &nv_devices);
474done: 474done:
475 mutex_unlock(&nv_devices_mutex); 475 mutex_unlock(&nv_devices_mutex);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv04.c b/drivers/gpu/drm/nouveau/core/engine/device/nv04.c
index 473c5c03d3c9..a0284cf09c0f 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv04.c
@@ -22,7 +22,6 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <subdev/device.h>
26#include <subdev/bios.h> 25#include <subdev/bios.h>
27#include <subdev/bus.h> 26#include <subdev/bus.h>
28#include <subdev/i2c.h> 27#include <subdev/i2c.h>
@@ -34,6 +33,7 @@
34#include <subdev/instmem.h> 33#include <subdev/instmem.h>
35#include <subdev/vm.h> 34#include <subdev/vm.h>
36 35
36#include <engine/device.h>
37#include <engine/dmaobj.h> 37#include <engine/dmaobj.h>
38#include <engine/fifo.h> 38#include <engine/fifo.h>
39#include <engine/software.h> 39#include <engine/software.h>
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv10.c b/drivers/gpu/drm/nouveau/core/engine/device/nv10.c
index d0774f5bebe1..1b7809a095c3 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv10.c
@@ -22,7 +22,6 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <subdev/device.h>
26#include <subdev/bios.h> 25#include <subdev/bios.h>
27#include <subdev/bus.h> 26#include <subdev/bus.h>
28#include <subdev/gpio.h> 27#include <subdev/gpio.h>
@@ -35,6 +34,7 @@
35#include <subdev/instmem.h> 34#include <subdev/instmem.h>
36#include <subdev/vm.h> 35#include <subdev/vm.h>
37 36
37#include <engine/device.h>
38#include <engine/dmaobj.h> 38#include <engine/dmaobj.h>
39#include <engine/fifo.h> 39#include <engine/fifo.h>
40#include <engine/software.h> 40#include <engine/software.h>
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv20.c b/drivers/gpu/drm/nouveau/core/engine/device/nv20.c
index ab920e0dc45b..12a4005fa619 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nv20.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv20.c
@@ -22,7 +22,6 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <subdev/device.h>
26#include <subdev/bios.h> 25#include <subdev/bios.h>
27#include <subdev/bus.h> 26#include <subdev/bus.h>
28#include <subdev/gpio.h> 27#include <subdev/gpio.h>
@@ -36,6 +35,7 @@
36#include <subdev/instmem.h> 35#include <subdev/instmem.h>
37#include <subdev/vm.h> 36#include <subdev/vm.h>
38 37
38#include <engine/device.h>
39#include <engine/dmaobj.h> 39#include <engine/dmaobj.h>
40#include <engine/fifo.h> 40#include <engine/fifo.h>
41#include <engine/software.h> 41#include <engine/software.h>
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv30.c b/drivers/gpu/drm/nouveau/core/engine/device/nv30.c
index 5f2110261b04..cef0f1ea4c21 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nv30.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv30.c
@@ -22,7 +22,6 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <subdev/device.h>
26#include <subdev/bios.h> 25#include <subdev/bios.h>
27#include <subdev/bus.h> 26#include <subdev/bus.h>
28#include <subdev/gpio.h> 27#include <subdev/gpio.h>
@@ -35,6 +34,7 @@
35#include <subdev/instmem.h> 34#include <subdev/instmem.h>
36#include <subdev/vm.h> 35#include <subdev/vm.h>
37 36
37#include <engine/device.h>
38#include <engine/dmaobj.h> 38#include <engine/dmaobj.h>
39#include <engine/fifo.h> 39#include <engine/fifo.h>
40#include <engine/software.h> 40#include <engine/software.h>
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv40.c b/drivers/gpu/drm/nouveau/core/engine/device/nv40.c
index f3d55efe9ac9..1719cb0ee595 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv40.c
@@ -22,7 +22,6 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <subdev/device.h>
26#include <subdev/bios.h> 25#include <subdev/bios.h>
27#include <subdev/bus.h> 26#include <subdev/bus.h>
28#include <subdev/vm.h> 27#include <subdev/vm.h>
@@ -37,6 +36,7 @@
37#include <subdev/instmem.h> 36#include <subdev/instmem.h>
38#include <subdev/vm.h> 37#include <subdev/vm.h>
39 38
39#include <engine/device.h>
40#include <engine/dmaobj.h> 40#include <engine/dmaobj.h>
41#include <engine/fifo.h> 41#include <engine/fifo.h>
42#include <engine/software.h> 42#include <engine/software.h>
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv50.c b/drivers/gpu/drm/nouveau/core/engine/device/nv50.c
index 5ed2fa51ddc2..5e8c3de75593 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv50.c
@@ -22,7 +22,6 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <subdev/device.h>
26#include <subdev/bios.h> 25#include <subdev/bios.h>
27#include <subdev/bus.h> 26#include <subdev/bus.h>
28#include <subdev/gpio.h> 27#include <subdev/gpio.h>
@@ -38,6 +37,7 @@
38#include <subdev/vm.h> 37#include <subdev/vm.h>
39#include <subdev/bar.h> 38#include <subdev/bar.h>
40 39
40#include <engine/device.h>
41#include <engine/dmaobj.h> 41#include <engine/dmaobj.h>
42#include <engine/fifo.h> 42#include <engine/fifo.h>
43#include <engine/software.h> 43#include <engine/software.h>
@@ -83,7 +83,7 @@ nv50_identify(struct nouveau_device *device)
83 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; 83 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
84 device->oclass[NVDEV_SUBDEV_I2C ] = &nv50_i2c_oclass; 84 device->oclass[NVDEV_SUBDEV_I2C ] = &nv50_i2c_oclass;
85 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass; 85 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass;
86 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass; 86 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
87 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 87 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
88 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; 88 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
89 device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass; 89 device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass;
@@ -109,7 +109,7 @@ nv50_identify(struct nouveau_device *device)
109 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; 109 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
110 device->oclass[NVDEV_SUBDEV_I2C ] = &nv50_i2c_oclass; 110 device->oclass[NVDEV_SUBDEV_I2C ] = &nv50_i2c_oclass;
111 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass; 111 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass;
112 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass; 112 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
113 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 113 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
114 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; 114 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
115 device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass; 115 device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass;
@@ -135,7 +135,7 @@ nv50_identify(struct nouveau_device *device)
135 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; 135 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
136 device->oclass[NVDEV_SUBDEV_I2C ] = &nv50_i2c_oclass; 136 device->oclass[NVDEV_SUBDEV_I2C ] = &nv50_i2c_oclass;
137 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass; 137 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass;
138 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass; 138 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
139 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 139 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
140 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; 140 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
141 device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass; 141 device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass;
@@ -161,7 +161,7 @@ nv50_identify(struct nouveau_device *device)
161 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; 161 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
162 device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass; 162 device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
163 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass; 163 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass;
164 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass; 164 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
165 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 165 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
166 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; 166 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
167 device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass; 167 device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass;
@@ -187,7 +187,7 @@ nv50_identify(struct nouveau_device *device)
187 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; 187 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
188 device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass; 188 device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
189 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass; 189 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass;
190 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass; 190 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
191 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 191 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
192 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; 192 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
193 device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass; 193 device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass;
@@ -213,7 +213,7 @@ nv50_identify(struct nouveau_device *device)
213 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; 213 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
214 device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass; 214 device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
215 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass; 215 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass;
216 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass; 216 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
217 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 217 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
218 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; 218 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
219 device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass; 219 device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass;
@@ -239,7 +239,7 @@ nv50_identify(struct nouveau_device *device)
239 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; 239 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
240 device->oclass[NVDEV_SUBDEV_I2C ] = &nv50_i2c_oclass; 240 device->oclass[NVDEV_SUBDEV_I2C ] = &nv50_i2c_oclass;
241 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass; 241 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass;
242 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass; 242 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
243 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 243 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
244 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; 244 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
245 device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass; 245 device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass;
@@ -265,7 +265,7 @@ nv50_identify(struct nouveau_device *device)
265 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; 265 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
266 device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass; 266 device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
267 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass; 267 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass;
268 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass; 268 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
269 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 269 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
270 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; 270 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
271 device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass; 271 device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass;
@@ -291,7 +291,7 @@ nv50_identify(struct nouveau_device *device)
291 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; 291 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
292 device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass; 292 device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
293 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass; 293 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass;
294 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass; 294 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
295 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 295 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
296 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; 296 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
297 device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass; 297 device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
index 4393eb4d6564..955af122c3a6 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
@@ -22,7 +22,6 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <subdev/device.h>
26#include <subdev/bios.h> 25#include <subdev/bios.h>
27#include <subdev/bus.h> 26#include <subdev/bus.h>
28#include <subdev/gpio.h> 27#include <subdev/gpio.h>
@@ -40,6 +39,7 @@
40#include <subdev/vm.h> 39#include <subdev/vm.h>
41#include <subdev/bar.h> 40#include <subdev/bar.h>
42 41
42#include <engine/device.h>
43#include <engine/dmaobj.h> 43#include <engine/dmaobj.h>
44#include <engine/fifo.h> 44#include <engine/fifo.h>
45#include <engine/software.h> 45#include <engine/software.h>
@@ -285,6 +285,34 @@ nvc0_identify(struct nouveau_device *device)
285 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass; 285 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
286 device->oclass[NVDEV_ENGINE_DISP ] = &nvd0_disp_oclass; 286 device->oclass[NVDEV_ENGINE_DISP ] = &nvd0_disp_oclass;
287 break; 287 break;
288 case 0xd7:
289 device->cname = "GF117";
290 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
291 device->oclass[NVDEV_SUBDEV_GPIO ] = &nvd0_gpio_oclass;
292 device->oclass[NVDEV_SUBDEV_I2C ] = &nvd0_i2c_oclass;
293 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
294 device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
295 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
296 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
297 device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass;
298 device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass;
299 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
300 device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
301 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
302 device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
303 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
304 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
305 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
306 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
307 device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
308 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
309 device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass;
310 device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
311 device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
312 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
313 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
314 device->oclass[NVDEV_ENGINE_DISP ] = &nvd0_disp_oclass;
315 break;
288 default: 316 default:
289 nv_fatal(device, "unknown Fermi chipset\n"); 317 nv_fatal(device, "unknown Fermi chipset\n");
290 return -EINVAL; 318 return -EINVAL;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nve0.c b/drivers/gpu/drm/nouveau/core/engine/device/nve0.c
index 5c12391619fd..a354e409cdff 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nve0.c
@@ -22,7 +22,6 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <subdev/device.h>
26#include <subdev/bios.h> 25#include <subdev/bios.h>
27#include <subdev/bus.h> 26#include <subdev/bus.h>
28#include <subdev/gpio.h> 27#include <subdev/gpio.h>
@@ -40,6 +39,7 @@
40#include <subdev/vm.h> 39#include <subdev/vm.h>
41#include <subdev/bar.h> 40#include <subdev/bar.h>
42 41
42#include <engine/device.h>
43#include <engine/dmaobj.h> 43#include <engine/dmaobj.h>
44#include <engine/fifo.h> 44#include <engine/fifo.h>
45#include <engine/software.h> 45#include <engine/software.h>
@@ -141,6 +141,40 @@ nve0_identify(struct nouveau_device *device)
141 device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass; 141 device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass;
142 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass; 142 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
143 break; 143 break;
144 case 0xf0:
145 device->cname = "GK110";
146 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
147 device->oclass[NVDEV_SUBDEV_GPIO ] = &nve0_gpio_oclass;
148 device->oclass[NVDEV_SUBDEV_I2C ] = &nvd0_i2c_oclass;
149 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
150 device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
151 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
152 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
153 device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass;
154 device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass;
155 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
156 device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
157 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
158 device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass;
159 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
160 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
161 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
162 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
163#if 0
164 device->oclass[NVDEV_ENGINE_FIFO ] = &nve0_fifo_oclass;
165 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
166 device->oclass[NVDEV_ENGINE_GR ] = &nve0_graph_oclass;
167#endif
168 device->oclass[NVDEV_ENGINE_DISP ] = &nvf0_disp_oclass;
169#if 0
170 device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass;
171 device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass;
172 device->oclass[NVDEV_ENGINE_COPY2 ] = &nve0_copy2_oclass;
173 device->oclass[NVDEV_ENGINE_BSP ] = &nve0_bsp_oclass;
174 device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass;
175 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
176#endif
177 break;
144 default: 178 default:
145 nv_fatal(device, "unknown Kepler chipset\n"); 179 nv_fatal(device, "unknown Kepler chipset\n");
146 return -EINVAL; 180 return -EINVAL;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/dport.c b/drivers/gpu/drm/nouveau/core/engine/disp/dport.c
index fa27b02ff829..31cc8fe8e7f0 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/dport.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/dport.c
@@ -191,7 +191,7 @@ dp_link_train_cr(struct dp_state *dp)
191static int 191static int
192dp_link_train_eq(struct dp_state *dp) 192dp_link_train_eq(struct dp_state *dp)
193{ 193{
194 bool eq_done, cr_done = true; 194 bool eq_done = false, cr_done = true;
195 int tries = 0, i; 195 int tries = 0, i;
196 196
197 dp_set_training_pattern(dp, 2); 197 dp_set_training_pattern(dp, 2);
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
index 02e369f80449..6a38402fa56c 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
@@ -572,7 +572,8 @@ nv50_disp_base_ctor(struct nouveau_object *parent,
572 priv->base.vblank->priv = priv; 572 priv->base.vblank->priv = priv;
573 priv->base.vblank->enable = nv50_disp_base_vblank_enable; 573 priv->base.vblank->enable = nv50_disp_base_vblank_enable;
574 priv->base.vblank->disable = nv50_disp_base_vblank_disable; 574 priv->base.vblank->disable = nv50_disp_base_vblank_disable;
575 return nouveau_ramht_new(parent, parent, 0x1000, 0, &base->ramht); 575 return nouveau_ramht_new(nv_object(base), nv_object(base), 0x1000, 0,
576 &base->ramht);
576} 577}
577 578
578static void 579static void
@@ -719,7 +720,7 @@ nv50_disp_data_ctor(struct nouveau_object *parent,
719 if (nv_mclass(parent) != NV_DEVICE_CLASS) { 720 if (nv_mclass(parent) != NV_DEVICE_CLASS) {
720 atomic_inc(&parent->refcount); 721 atomic_inc(&parent->refcount);
721 *pobject = parent; 722 *pobject = parent;
722 return 0; 723 return 1;
723 } 724 }
724 725
725 /* allocate display hardware to client */ 726 /* allocate display hardware to client */
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
index 788dd34ccb54..019eacd8a68f 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
@@ -473,7 +473,8 @@ nvd0_disp_base_ctor(struct nouveau_object *parent,
473 priv->base.vblank->enable = nvd0_disp_base_vblank_enable; 473 priv->base.vblank->enable = nvd0_disp_base_vblank_enable;
474 priv->base.vblank->disable = nvd0_disp_base_vblank_disable; 474 priv->base.vblank->disable = nvd0_disp_base_vblank_disable;
475 475
476 return nouveau_ramht_new(parent, parent, 0x1000, 0, &base->ramht); 476 return nouveau_ramht_new(nv_object(base), nv_object(base), 0x1000, 0,
477 &base->ramht);
477} 478}
478 479
479static void 480static void
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c
new file mode 100644
index 000000000000..a488c36e40f9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c
@@ -0,0 +1,89 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <engine/software.h>
26#include <engine/disp.h>
27
28#include <core/class.h>
29
30#include "nv50.h"
31
32static struct nouveau_oclass
33nvf0_disp_sclass[] = {
34 { NVF0_DISP_MAST_CLASS, &nvd0_disp_mast_ofuncs },
35 { NVF0_DISP_SYNC_CLASS, &nvd0_disp_sync_ofuncs },
36 { NVF0_DISP_OVLY_CLASS, &nvd0_disp_ovly_ofuncs },
37 { NVF0_DISP_OIMM_CLASS, &nvd0_disp_oimm_ofuncs },
38 { NVF0_DISP_CURS_CLASS, &nvd0_disp_curs_ofuncs },
39 {}
40};
41
42static struct nouveau_oclass
43nvf0_disp_base_oclass[] = {
44 { NVF0_DISP_CLASS, &nvd0_disp_base_ofuncs, nva3_disp_base_omthds },
45 {}
46};
47
48static int
49nvf0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
50 struct nouveau_oclass *oclass, void *data, u32 size,
51 struct nouveau_object **pobject)
52{
53 struct nv50_disp_priv *priv;
54 int heads = nv_rd32(parent, 0x022448);
55 int ret;
56
57 ret = nouveau_disp_create(parent, engine, oclass, heads,
58 "PDISP", "display", &priv);
59 *pobject = nv_object(priv);
60 if (ret)
61 return ret;
62
63 nv_engine(priv)->sclass = nvf0_disp_base_oclass;
64 nv_engine(priv)->cclass = &nv50_disp_cclass;
65 nv_subdev(priv)->intr = nvd0_disp_intr;
66 INIT_WORK(&priv->supervisor, nvd0_disp_intr_supervisor);
67 priv->sclass = nvf0_disp_sclass;
68 priv->head.nr = heads;
69 priv->dac.nr = 3;
70 priv->sor.nr = 4;
71 priv->dac.power = nv50_dac_power;
72 priv->dac.sense = nv50_dac_sense;
73 priv->sor.power = nv50_sor_power;
74 priv->sor.hda_eld = nvd0_hda_eld;
75 priv->sor.hdmi = nvd0_hdmi_ctrl;
76 priv->sor.dp = &nvd0_sor_dp_func;
77 return 0;
78}
79
80struct nouveau_oclass
81nvf0_disp_oclass = {
82 .handle = NV_ENGINE(DISP, 0x92),
83 .ofuncs = &(struct nouveau_ofuncs) {
84 .ctor = nvf0_disp_ctor,
85 .dtor = _nouveau_disp_dtor,
86 .init = _nouveau_disp_init,
87 .fini = _nouveau_disp_fini,
88 },
89};
diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c
index d1528752980c..944e73ac485c 100644
--- a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c
@@ -50,6 +50,9 @@ nvd0_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
50 case NVE0_DISP_MAST_CLASS: 50 case NVE0_DISP_MAST_CLASS:
51 case NVE0_DISP_SYNC_CLASS: 51 case NVE0_DISP_SYNC_CLASS:
52 case NVE0_DISP_OVLY_CLASS: 52 case NVE0_DISP_OVLY_CLASS:
53 case NVF0_DISP_MAST_CLASS:
54 case NVF0_DISP_SYNC_CLASS:
55 case NVF0_DISP_OVLY_CLASS:
53 break; 56 break;
54 default: 57 default:
55 return -EINVAL; 58 return -EINVAL;
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/base.c b/drivers/gpu/drm/nouveau/core/engine/fifo/base.c
index 7341ebe131fa..d3ec436d9cb5 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/base.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/base.c
@@ -91,6 +91,8 @@ nouveau_fifo_channel_create_(struct nouveau_object *parent,
91 if (!chan->user) 91 if (!chan->user)
92 return -EFAULT; 92 return -EFAULT;
93 93
94 nouveau_event_trigger(priv->cevent, 0);
95
94 chan->size = size; 96 chan->size = size;
95 return 0; 97 return 0;
96} 98}
@@ -167,6 +169,7 @@ nouveau_fifo_destroy(struct nouveau_fifo *priv)
167{ 169{
168 kfree(priv->channel); 170 kfree(priv->channel);
169 nouveau_event_destroy(&priv->uevent); 171 nouveau_event_destroy(&priv->uevent);
172 nouveau_event_destroy(&priv->cevent);
170 nouveau_engine_destroy(&priv->base); 173 nouveau_engine_destroy(&priv->base);
171} 174}
172 175
@@ -191,6 +194,10 @@ nouveau_fifo_create_(struct nouveau_object *parent,
191 if (!priv->channel) 194 if (!priv->channel)
192 return -ENOMEM; 195 return -ENOMEM;
193 196
197 ret = nouveau_event_create(1, &priv->cevent);
198 if (ret)
199 return ret;
200
194 ret = nouveau_event_create(1, &priv->uevent); 201 ret = nouveau_event_create(1, &priv->uevent);
195 if (ret) 202 if (ret)
196 return ret; 203 return ret;
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
index 840af6172788..ddaeb5572903 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
@@ -210,7 +210,8 @@ nv50_fifo_chan_ctor_dma(struct nouveau_object *parent,
210 nv_parent(chan)->object_attach = nv50_fifo_object_attach; 210 nv_parent(chan)->object_attach = nv50_fifo_object_attach;
211 nv_parent(chan)->object_detach = nv50_fifo_object_detach; 211 nv_parent(chan)->object_detach = nv50_fifo_object_detach;
212 212
213 ret = nouveau_ramht_new(parent, parent, 0x8000, 16, &chan->ramht); 213 ret = nouveau_ramht_new(nv_object(chan), nv_object(chan), 0x8000, 16,
214 &chan->ramht);
214 if (ret) 215 if (ret)
215 return ret; 216 return ret;
216 217
@@ -263,7 +264,8 @@ nv50_fifo_chan_ctor_ind(struct nouveau_object *parent,
263 nv_parent(chan)->object_attach = nv50_fifo_object_attach; 264 nv_parent(chan)->object_attach = nv50_fifo_object_attach;
264 nv_parent(chan)->object_detach = nv50_fifo_object_detach; 265 nv_parent(chan)->object_detach = nv50_fifo_object_detach;
265 266
266 ret = nouveau_ramht_new(parent, parent, 0x8000, 16, &chan->ramht); 267 ret = nouveau_ramht_new(nv_object(chan), nv_object(chan), 0x8000, 16,
268 &chan->ramht);
267 if (ret) 269 if (ret)
268 return ret; 270 return ret;
269 271
@@ -373,17 +375,17 @@ nv50_fifo_context_ctor(struct nouveau_object *parent,
373 if (ret) 375 if (ret)
374 return ret; 376 return ret;
375 377
376 ret = nouveau_gpuobj_new(parent, nv_object(base), 0x0200, 0x1000, 378 ret = nouveau_gpuobj_new(nv_object(base), nv_object(base), 0x0200,
377 NVOBJ_FLAG_ZERO_ALLOC, &base->ramfc); 379 0x1000, NVOBJ_FLAG_ZERO_ALLOC, &base->ramfc);
378 if (ret) 380 if (ret)
379 return ret; 381 return ret;
380 382
381 ret = nouveau_gpuobj_new(parent, nv_object(base), 0x1200, 0, 383 ret = nouveau_gpuobj_new(nv_object(base), nv_object(base), 0x1200, 0,
382 NVOBJ_FLAG_ZERO_ALLOC, &base->eng); 384 NVOBJ_FLAG_ZERO_ALLOC, &base->eng);
383 if (ret) 385 if (ret)
384 return ret; 386 return ret;
385 387
386 ret = nouveau_gpuobj_new(parent, nv_object(base), 0x4000, 0, 0, 388 ret = nouveau_gpuobj_new(nv_object(base), nv_object(base), 0x4000, 0, 0,
387 &base->pgd); 389 &base->pgd);
388 if (ret) 390 if (ret)
389 return ret; 391 return ret;
@@ -437,12 +439,12 @@ nv50_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
437 if (ret) 439 if (ret)
438 return ret; 440 return ret;
439 441
440 ret = nouveau_gpuobj_new(parent, NULL, 128 * 4, 0x1000, 0, 442 ret = nouveau_gpuobj_new(nv_object(priv), NULL, 128 * 4, 0x1000, 0,
441 &priv->playlist[0]); 443 &priv->playlist[0]);
442 if (ret) 444 if (ret)
443 return ret; 445 return ret;
444 446
445 ret = nouveau_gpuobj_new(parent, NULL, 128 * 4, 0x1000, 0, 447 ret = nouveau_gpuobj_new(nv_object(priv), NULL, 128 * 4, 0x1000, 0,
446 &priv->playlist[1]); 448 &priv->playlist[1]);
447 if (ret) 449 if (ret)
448 return ret; 450 return ret;
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
index 094000e87871..35b94bd18808 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
@@ -180,7 +180,8 @@ nv84_fifo_chan_ctor_dma(struct nouveau_object *parent,
180 if (ret) 180 if (ret)
181 return ret; 181 return ret;
182 182
183 ret = nouveau_ramht_new(parent, parent, 0x8000, 16, &chan->ramht); 183 ret = nouveau_ramht_new(nv_object(chan), nv_object(chan), 0x8000, 16,
184 &chan->ramht);
184 if (ret) 185 if (ret)
185 return ret; 186 return ret;
186 187
@@ -242,7 +243,8 @@ nv84_fifo_chan_ctor_ind(struct nouveau_object *parent,
242 if (ret) 243 if (ret)
243 return ret; 244 return ret;
244 245
245 ret = nouveau_ramht_new(parent, parent, 0x8000, 16, &chan->ramht); 246 ret = nouveau_ramht_new(nv_object(chan), nv_object(chan), 0x8000, 16,
247 &chan->ramht);
246 if (ret) 248 if (ret)
247 return ret; 249 return ret;
248 250
@@ -336,12 +338,12 @@ nv84_fifo_context_ctor(struct nouveau_object *parent,
336 if (ret) 338 if (ret)
337 return ret; 339 return ret;
338 340
339 ret = nouveau_gpuobj_new(parent, nv_object(base), 0x0200, 0, 341 ret = nouveau_gpuobj_new(nv_object(base), nv_object(base), 0x0200, 0,
340 NVOBJ_FLAG_ZERO_ALLOC, &base->eng); 342 NVOBJ_FLAG_ZERO_ALLOC, &base->eng);
341 if (ret) 343 if (ret)
342 return ret; 344 return ret;
343 345
344 ret = nouveau_gpuobj_new(parent, nv_object(base), 0x4000, 0, 346 ret = nouveau_gpuobj_new(nv_object(base), nv_object(base), 0x4000, 0,
345 0, &base->pgd); 347 0, &base->pgd);
346 if (ret) 348 if (ret)
347 return ret; 349 return ret;
@@ -350,13 +352,13 @@ nv84_fifo_context_ctor(struct nouveau_object *parent,
350 if (ret) 352 if (ret)
351 return ret; 353 return ret;
352 354
353 ret = nouveau_gpuobj_new(parent, nv_object(base), 0x1000, 0x400, 355 ret = nouveau_gpuobj_new(nv_object(base), nv_object(base), 0x1000,
354 NVOBJ_FLAG_ZERO_ALLOC, &base->cache); 356 0x400, NVOBJ_FLAG_ZERO_ALLOC, &base->cache);
355 if (ret) 357 if (ret)
356 return ret; 358 return ret;
357 359
358 ret = nouveau_gpuobj_new(parent, nv_object(base), 0x0100, 0x100, 360 ret = nouveau_gpuobj_new(nv_object(base), nv_object(base), 0x0100,
359 NVOBJ_FLAG_ZERO_ALLOC, &base->ramfc); 361 0x100, NVOBJ_FLAG_ZERO_ALLOC, &base->ramfc);
360 if (ret) 362 if (ret)
361 return ret; 363 return ret;
362 364
@@ -407,12 +409,12 @@ nv84_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
407 if (ret) 409 if (ret)
408 return ret; 410 return ret;
409 411
410 ret = nouveau_gpuobj_new(parent, NULL, 128 * 4, 0x1000, 0, 412 ret = nouveau_gpuobj_new(nv_object(priv), NULL, 128 * 4, 0x1000, 0,
411 &priv->playlist[0]); 413 &priv->playlist[0]);
412 if (ret) 414 if (ret)
413 return ret; 415 return ret;
414 416
415 ret = nouveau_gpuobj_new(parent, NULL, 128 * 4, 0x1000, 0, 417 ret = nouveau_gpuobj_new(nv_object(priv), NULL, 128 * 4, 0x1000, 0,
416 &priv->playlist[1]); 418 &priv->playlist[1]);
417 if (ret) 419 if (ret)
418 return ret; 420 return ret;
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
index 4f226afb5591..4d4a6b905370 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
@@ -292,7 +292,8 @@ nvc0_fifo_context_ctor(struct nouveau_object *parent,
292 if (ret) 292 if (ret)
293 return ret; 293 return ret;
294 294
295 ret = nouveau_gpuobj_new(parent, NULL, 0x10000, 0x1000, 0, &base->pgd); 295 ret = nouveau_gpuobj_new(nv_object(base), NULL, 0x10000, 0x1000, 0,
296 &base->pgd);
296 if (ret) 297 if (ret)
297 return ret; 298 return ret;
298 299
@@ -623,17 +624,17 @@ nvc0_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
623 if (ret) 624 if (ret)
624 return ret; 625 return ret;
625 626
626 ret = nouveau_gpuobj_new(parent, NULL, 0x1000, 0x1000, 0, 627 ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x1000, 0x1000, 0,
627 &priv->playlist[0]); 628 &priv->playlist[0]);
628 if (ret) 629 if (ret)
629 return ret; 630 return ret;
630 631
631 ret = nouveau_gpuobj_new(parent, NULL, 0x1000, 0x1000, 0, 632 ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x1000, 0x1000, 0,
632 &priv->playlist[1]); 633 &priv->playlist[1]);
633 if (ret) 634 if (ret)
634 return ret; 635 return ret;
635 636
636 ret = nouveau_gpuobj_new(parent, NULL, 128 * 0x1000, 0x1000, 0, 637 ret = nouveau_gpuobj_new(nv_object(priv), NULL, 128 * 0x1000, 0x1000, 0,
637 &priv->user.mem); 638 &priv->user.mem);
638 if (ret) 639 if (ret)
639 return ret; 640 return ret;
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
index 4419e40d88e9..9151919fb831 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
@@ -96,7 +96,7 @@ nve0_fifo_playlist_update(struct nve0_fifo_priv *priv, u32 engine)
96 96
97 cur = engn->playlist[engn->cur_playlist]; 97 cur = engn->playlist[engn->cur_playlist];
98 if (unlikely(cur == NULL)) { 98 if (unlikely(cur == NULL)) {
99 int ret = nouveau_gpuobj_new(nv_object(priv)->parent, NULL, 99 int ret = nouveau_gpuobj_new(nv_object(priv), NULL,
100 0x8000, 0x1000, 0, &cur); 100 0x8000, 0x1000, 0, &cur);
101 if (ret) { 101 if (ret) {
102 nv_error(priv, "playlist alloc failed\n"); 102 nv_error(priv, "playlist alloc failed\n");
@@ -333,7 +333,8 @@ nve0_fifo_context_ctor(struct nouveau_object *parent,
333 if (ret) 333 if (ret)
334 return ret; 334 return ret;
335 335
336 ret = nouveau_gpuobj_new(parent, NULL, 0x10000, 0x1000, 0, &base->pgd); 336 ret = nouveau_gpuobj_new(nv_object(base), NULL, 0x10000, 0x1000, 0,
337 &base->pgd);
337 if (ret) 338 if (ret)
338 return ret; 339 return ret;
339 340
@@ -595,7 +596,7 @@ nve0_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
595 if (ret) 596 if (ret)
596 return ret; 597 return ret;
597 598
598 ret = nouveau_gpuobj_new(parent, NULL, 4096 * 0x200, 0x1000, 599 ret = nouveau_gpuobj_new(nv_object(priv), NULL, 4096 * 0x200, 0x1000,
599 NVOBJ_FLAG_ZERO_ALLOC, &priv->user.mem); 600 NVOBJ_FLAG_ZERO_ALLOC, &priv->user.mem);
600 if (ret) 601 if (ret)
601 return ret; 602 return ret;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c
index 0b7951a85943..4cc6269d4077 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c
@@ -36,7 +36,6 @@ int
36nvc0_grctx_init(struct nvc0_graph_priv *priv, struct nvc0_grctx *info) 36nvc0_grctx_init(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
37{ 37{
38 struct nouveau_bar *bar = nouveau_bar(priv); 38 struct nouveau_bar *bar = nouveau_bar(priv);
39 struct nouveau_object *parent = nv_object(priv);
40 struct nouveau_gpuobj *chan; 39 struct nouveau_gpuobj *chan;
41 u32 size = (0x80000 + priv->size + 4095) & ~4095; 40 u32 size = (0x80000 + priv->size + 4095) & ~4095;
42 int ret, i; 41 int ret, i;
@@ -44,7 +43,7 @@ nvc0_grctx_init(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
44 /* allocate memory to for a "channel", which we'll use to generate 43 /* allocate memory to for a "channel", which we'll use to generate
45 * the default context values 44 * the default context values
46 */ 45 */
47 ret = nouveau_gpuobj_new(parent, NULL, size, 0x1000, 46 ret = nouveau_gpuobj_new(nv_object(priv), NULL, size, 0x1000,
48 NVOBJ_FLAG_ZERO_ALLOC, &info->chan); 47 NVOBJ_FLAG_ZERO_ALLOC, &info->chan);
49 chan = info->chan; 48 chan = info->chan;
50 if (ret) { 49 if (ret) {
@@ -1399,7 +1398,7 @@ nvc0_grctx_generate_90c0(struct nvc0_graph_priv *priv)
1399{ 1398{
1400 int i; 1399 int i;
1401 1400
1402 for (i = 0; nv_device(priv)->chipset == 0xd9 && i < 4; i++) { 1401 for (i = 0; nv_device(priv)->chipset >= 0xd0 && i < 4; i++) {
1403 nv_mthd(priv, 0x90c0, 0x2700 + (i * 0x40), 0x00000000); 1402 nv_mthd(priv, 0x90c0, 0x2700 + (i * 0x40), 0x00000000);
1404 nv_mthd(priv, 0x90c0, 0x2720 + (i * 0x40), 0x00000000); 1403 nv_mthd(priv, 0x90c0, 0x2720 + (i * 0x40), 0x00000000);
1405 nv_mthd(priv, 0x90c0, 0x2704 + (i * 0x40), 0x00000000); 1404 nv_mthd(priv, 0x90c0, 0x2704 + (i * 0x40), 0x00000000);
@@ -1415,7 +1414,7 @@ nvc0_grctx_generate_90c0(struct nvc0_graph_priv *priv)
1415 nv_mthd(priv, 0x90c0, 0x27ac, 0x00000000); 1414 nv_mthd(priv, 0x90c0, 0x27ac, 0x00000000);
1416 nv_mthd(priv, 0x90c0, 0x27cc, 0x00000000); 1415 nv_mthd(priv, 0x90c0, 0x27cc, 0x00000000);
1417 nv_mthd(priv, 0x90c0, 0x27ec, 0x00000000); 1416 nv_mthd(priv, 0x90c0, 0x27ec, 0x00000000);
1418 for (i = 0; nv_device(priv)->chipset == 0xd9 && i < 4; i++) { 1417 for (i = 0; nv_device(priv)->chipset >= 0xd0 && i < 4; i++) {
1419 nv_mthd(priv, 0x90c0, 0x2710 + (i * 0x40), 0x00014000); 1418 nv_mthd(priv, 0x90c0, 0x2710 + (i * 0x40), 0x00014000);
1420 nv_mthd(priv, 0x90c0, 0x2730 + (i * 0x40), 0x00014000); 1419 nv_mthd(priv, 0x90c0, 0x2730 + (i * 0x40), 0x00014000);
1421 nv_mthd(priv, 0x90c0, 0x2714 + (i * 0x40), 0x00000040); 1420 nv_mthd(priv, 0x90c0, 0x2714 + (i * 0x40), 0x00000040);
@@ -1615,7 +1614,7 @@ static void
1615nvc0_grctx_generate_shaders(struct nvc0_graph_priv *priv) 1614nvc0_grctx_generate_shaders(struct nvc0_graph_priv *priv)
1616{ 1615{
1617 1616
1618 if (nv_device(priv)->chipset == 0xd9) { 1617 if (nv_device(priv)->chipset >= 0xd0) {
1619 nv_wr32(priv, 0x405800, 0x0f8000bf); 1618 nv_wr32(priv, 0x405800, 0x0f8000bf);
1620 nv_wr32(priv, 0x405830, 0x02180218); 1619 nv_wr32(priv, 0x405830, 0x02180218);
1621 nv_wr32(priv, 0x405834, 0x08000000); 1620 nv_wr32(priv, 0x405834, 0x08000000);
@@ -1658,10 +1657,10 @@ nvc0_grctx_generate_unk64xx(struct nvc0_graph_priv *priv)
1658 nv_wr32(priv, 0x4064ac, 0x00003fff); 1657 nv_wr32(priv, 0x4064ac, 0x00003fff);
1659 nv_wr32(priv, 0x4064b4, 0x00000000); 1658 nv_wr32(priv, 0x4064b4, 0x00000000);
1660 nv_wr32(priv, 0x4064b8, 0x00000000); 1659 nv_wr32(priv, 0x4064b8, 0x00000000);
1661 if (nv_device(priv)->chipset == 0xd9) 1660 if (nv_device(priv)->chipset >= 0xd0)
1662 nv_wr32(priv, 0x4064bc, 0x00000000); 1661 nv_wr32(priv, 0x4064bc, 0x00000000);
1663 if (nv_device(priv)->chipset == 0xc1 || 1662 if (nv_device(priv)->chipset == 0xc1 ||
1664 nv_device(priv)->chipset == 0xd9) { 1663 nv_device(priv)->chipset >= 0xd0) {
1665 nv_wr32(priv, 0x4064c0, 0x80140078); 1664 nv_wr32(priv, 0x4064c0, 0x80140078);
1666 nv_wr32(priv, 0x4064c4, 0x0086ffff); 1665 nv_wr32(priv, 0x4064c4, 0x0086ffff);
1667 } 1666 }
@@ -1701,7 +1700,7 @@ nvc0_grctx_generate_rop(struct nvc0_graph_priv *priv)
1701 /* ROPC_BROADCAST */ 1700 /* ROPC_BROADCAST */
1702 nv_wr32(priv, 0x408800, 0x02802a3c); 1701 nv_wr32(priv, 0x408800, 0x02802a3c);
1703 nv_wr32(priv, 0x408804, 0x00000040); 1702 nv_wr32(priv, 0x408804, 0x00000040);
1704 if (chipset == 0xd9) { 1703 if (chipset >= 0xd0) {
1705 nv_wr32(priv, 0x408808, 0x1043e005); 1704 nv_wr32(priv, 0x408808, 0x1043e005);
1706 nv_wr32(priv, 0x408900, 0x3080b801); 1705 nv_wr32(priv, 0x408900, 0x3080b801);
1707 nv_wr32(priv, 0x408904, 0x1043e005); 1706 nv_wr32(priv, 0x408904, 0x1043e005);
@@ -1735,7 +1734,7 @@ nvc0_grctx_generate_gpc(struct nvc0_graph_priv *priv)
1735 nv_wr32(priv, 0x418408, 0x00000000); 1734 nv_wr32(priv, 0x418408, 0x00000000);
1736 nv_wr32(priv, 0x41840c, 0x00001008); 1735 nv_wr32(priv, 0x41840c, 0x00001008);
1737 nv_wr32(priv, 0x418410, 0x0fff0fff); 1736 nv_wr32(priv, 0x418410, 0x0fff0fff);
1738 nv_wr32(priv, 0x418414, chipset != 0xd9 ? 0x00200fff : 0x02200fff); 1737 nv_wr32(priv, 0x418414, chipset < 0xd0 ? 0x00200fff : 0x02200fff);
1739 nv_wr32(priv, 0x418450, 0x00000000); 1738 nv_wr32(priv, 0x418450, 0x00000000);
1740 nv_wr32(priv, 0x418454, 0x00000000); 1739 nv_wr32(priv, 0x418454, 0x00000000);
1741 nv_wr32(priv, 0x418458, 0x00000000); 1740 nv_wr32(priv, 0x418458, 0x00000000);
@@ -1750,14 +1749,14 @@ nvc0_grctx_generate_gpc(struct nvc0_graph_priv *priv)
1750 nv_wr32(priv, 0x418700, 0x00000002); 1749 nv_wr32(priv, 0x418700, 0x00000002);
1751 nv_wr32(priv, 0x418704, 0x00000080); 1750 nv_wr32(priv, 0x418704, 0x00000080);
1752 nv_wr32(priv, 0x418708, 0x00000000); 1751 nv_wr32(priv, 0x418708, 0x00000000);
1753 nv_wr32(priv, 0x41870c, chipset != 0xd9 ? 0x07c80000 : 0x00000000); 1752 nv_wr32(priv, 0x41870c, chipset < 0xd0 ? 0x07c80000 : 0x00000000);
1754 nv_wr32(priv, 0x418710, 0x00000000); 1753 nv_wr32(priv, 0x418710, 0x00000000);
1755 nv_wr32(priv, 0x418800, chipset != 0xd9 ? 0x0006860a : 0x7006860a); 1754 nv_wr32(priv, 0x418800, chipset < 0xd0 ? 0x0006860a : 0x7006860a);
1756 nv_wr32(priv, 0x418808, 0x00000000); 1755 nv_wr32(priv, 0x418808, 0x00000000);
1757 nv_wr32(priv, 0x41880c, 0x00000000); 1756 nv_wr32(priv, 0x41880c, 0x00000000);
1758 nv_wr32(priv, 0x418810, 0x00000000); 1757 nv_wr32(priv, 0x418810, 0x00000000);
1759 nv_wr32(priv, 0x418828, 0x00008442); 1758 nv_wr32(priv, 0x418828, 0x00008442);
1760 if (chipset == 0xc1 || chipset == 0xd9) 1759 if (chipset == 0xc1 || chipset >= 0xd0)
1761 nv_wr32(priv, 0x418830, 0x10000001); 1760 nv_wr32(priv, 0x418830, 0x10000001);
1762 else 1761 else
1763 nv_wr32(priv, 0x418830, 0x00000001); 1762 nv_wr32(priv, 0x418830, 0x00000001);
@@ -1768,7 +1767,7 @@ nvc0_grctx_generate_gpc(struct nvc0_graph_priv *priv)
1768 nv_wr32(priv, 0x4188f0, 0x00000000); 1767 nv_wr32(priv, 0x4188f0, 0x00000000);
1769 nv_wr32(priv, 0x4188f4, 0x00000000); 1768 nv_wr32(priv, 0x4188f4, 0x00000000);
1770 nv_wr32(priv, 0x4188f8, 0x00000000); 1769 nv_wr32(priv, 0x4188f8, 0x00000000);
1771 if (chipset == 0xd9) 1770 if (chipset >= 0xd0)
1772 nv_wr32(priv, 0x4188fc, 0x20100008); 1771 nv_wr32(priv, 0x4188fc, 0x20100008);
1773 else if (chipset == 0xc1) 1772 else if (chipset == 0xc1)
1774 nv_wr32(priv, 0x4188fc, 0x00100018); 1773 nv_wr32(priv, 0x4188fc, 0x00100018);
@@ -1787,7 +1786,7 @@ nvc0_grctx_generate_gpc(struct nvc0_graph_priv *priv)
1787 nv_wr32(priv, 0x418a14 + (i * 0x20), 0x00000000); 1786 nv_wr32(priv, 0x418a14 + (i * 0x20), 0x00000000);
1788 nv_wr32(priv, 0x418a18 + (i * 0x20), 0x00000000); 1787 nv_wr32(priv, 0x418a18 + (i * 0x20), 0x00000000);
1789 } 1788 }
1790 nv_wr32(priv, 0x418b00, chipset != 0xd9 ? 0x00000000 : 0x00000006); 1789 nv_wr32(priv, 0x418b00, chipset < 0xd0 ? 0x00000000 : 0x00000006);
1791 nv_wr32(priv, 0x418b08, 0x0a418820); 1790 nv_wr32(priv, 0x418b08, 0x0a418820);
1792 nv_wr32(priv, 0x418b0c, 0x062080e6); 1791 nv_wr32(priv, 0x418b0c, 0x062080e6);
1793 nv_wr32(priv, 0x418b10, 0x020398a4); 1792 nv_wr32(priv, 0x418b10, 0x020398a4);
@@ -1804,7 +1803,7 @@ nvc0_grctx_generate_gpc(struct nvc0_graph_priv *priv)
1804 nv_wr32(priv, 0x418c24, 0x00000000); 1803 nv_wr32(priv, 0x418c24, 0x00000000);
1805 nv_wr32(priv, 0x418c28, 0x00000000); 1804 nv_wr32(priv, 0x418c28, 0x00000000);
1806 nv_wr32(priv, 0x418c2c, 0x00000000); 1805 nv_wr32(priv, 0x418c2c, 0x00000000);
1807 if (chipset == 0xc1 || chipset == 0xd9) 1806 if (chipset == 0xc1 || chipset >= 0xd0)
1808 nv_wr32(priv, 0x418c6c, 0x00000001); 1807 nv_wr32(priv, 0x418c6c, 0x00000001);
1809 nv_wr32(priv, 0x418c80, 0x20200004); 1808 nv_wr32(priv, 0x418c80, 0x20200004);
1810 nv_wr32(priv, 0x418c8c, 0x00000001); 1809 nv_wr32(priv, 0x418c8c, 0x00000001);
@@ -1823,7 +1822,7 @@ nvc0_grctx_generate_tp(struct nvc0_graph_priv *priv)
1823 nv_wr32(priv, 0x419818, 0x00000000); 1822 nv_wr32(priv, 0x419818, 0x00000000);
1824 nv_wr32(priv, 0x41983c, 0x00038bc7); 1823 nv_wr32(priv, 0x41983c, 0x00038bc7);
1825 nv_wr32(priv, 0x419848, 0x00000000); 1824 nv_wr32(priv, 0x419848, 0x00000000);
1826 if (chipset == 0xc1 || chipset == 0xd9) 1825 if (chipset == 0xc1 || chipset >= 0xd0)
1827 nv_wr32(priv, 0x419864, 0x00000129); 1826 nv_wr32(priv, 0x419864, 0x00000129);
1828 else 1827 else
1829 nv_wr32(priv, 0x419864, 0x0000012a); 1828 nv_wr32(priv, 0x419864, 0x0000012a);
@@ -1836,7 +1835,7 @@ nvc0_grctx_generate_tp(struct nvc0_graph_priv *priv)
1836 nv_wr32(priv, 0x419a14, 0x00000200); 1835 nv_wr32(priv, 0x419a14, 0x00000200);
1837 nv_wr32(priv, 0x419a1c, 0x00000000); 1836 nv_wr32(priv, 0x419a1c, 0x00000000);
1838 nv_wr32(priv, 0x419a20, 0x00000800); 1837 nv_wr32(priv, 0x419a20, 0x00000800);
1839 if (chipset == 0xd9) 1838 if (chipset >= 0xd0)
1840 nv_wr32(priv, 0x00419ac4, 0x0017f440); 1839 nv_wr32(priv, 0x00419ac4, 0x0017f440);
1841 else if (chipset != 0xc0 && chipset != 0xc8) 1840 else if (chipset != 0xc0 && chipset != 0xc8)
1842 nv_wr32(priv, 0x00419ac4, 0x0007f440); 1841 nv_wr32(priv, 0x00419ac4, 0x0007f440);
@@ -1847,16 +1846,16 @@ nvc0_grctx_generate_tp(struct nvc0_graph_priv *priv)
1847 nv_wr32(priv, 0x419b10, 0x0a418820); 1846 nv_wr32(priv, 0x419b10, 0x0a418820);
1848 nv_wr32(priv, 0x419b14, 0x000000e6); 1847 nv_wr32(priv, 0x419b14, 0x000000e6);
1849 nv_wr32(priv, 0x419bd0, 0x00900103); 1848 nv_wr32(priv, 0x419bd0, 0x00900103);
1850 if (chipset == 0xc1 || chipset == 0xd9) 1849 if (chipset == 0xc1 || chipset >= 0xd0)
1851 nv_wr32(priv, 0x419be0, 0x00400001); 1850 nv_wr32(priv, 0x419be0, 0x00400001);
1852 else 1851 else
1853 nv_wr32(priv, 0x419be0, 0x00000001); 1852 nv_wr32(priv, 0x419be0, 0x00000001);
1854 nv_wr32(priv, 0x419be4, 0x00000000); 1853 nv_wr32(priv, 0x419be4, 0x00000000);
1855 nv_wr32(priv, 0x419c00, chipset != 0xd9 ? 0x00000002 : 0x0000000a); 1854 nv_wr32(priv, 0x419c00, chipset < 0xd0 ? 0x00000002 : 0x0000000a);
1856 nv_wr32(priv, 0x419c04, 0x00000006); 1855 nv_wr32(priv, 0x419c04, 0x00000006);
1857 nv_wr32(priv, 0x419c08, 0x00000002); 1856 nv_wr32(priv, 0x419c08, 0x00000002);
1858 nv_wr32(priv, 0x419c20, 0x00000000); 1857 nv_wr32(priv, 0x419c20, 0x00000000);
1859 if (nv_device(priv)->chipset == 0xd9) { 1858 if (nv_device(priv)->chipset >= 0xd0) {
1860 nv_wr32(priv, 0x419c24, 0x00084210); 1859 nv_wr32(priv, 0x419c24, 0x00084210);
1861 nv_wr32(priv, 0x419c28, 0x3cf3cf3c); 1860 nv_wr32(priv, 0x419c28, 0x3cf3cf3c);
1862 nv_wr32(priv, 0x419cb0, 0x00020048); 1861 nv_wr32(priv, 0x419cb0, 0x00020048);
@@ -1868,12 +1867,12 @@ nvc0_grctx_generate_tp(struct nvc0_graph_priv *priv)
1868 } 1867 }
1869 nv_wr32(priv, 0x419ce8, 0x00000000); 1868 nv_wr32(priv, 0x419ce8, 0x00000000);
1870 nv_wr32(priv, 0x419cf4, 0x00000183); 1869 nv_wr32(priv, 0x419cf4, 0x00000183);
1871 if (chipset == 0xc1 || chipset == 0xd9) 1870 if (chipset == 0xc1 || chipset >= 0xd0)
1872 nv_wr32(priv, 0x419d20, 0x12180000); 1871 nv_wr32(priv, 0x419d20, 0x12180000);
1873 else 1872 else
1874 nv_wr32(priv, 0x419d20, 0x02180000); 1873 nv_wr32(priv, 0x419d20, 0x02180000);
1875 nv_wr32(priv, 0x419d24, 0x00001fff); 1874 nv_wr32(priv, 0x419d24, 0x00001fff);
1876 if (chipset == 0xc1 || chipset == 0xd9) 1875 if (chipset == 0xc1 || chipset >= 0xd0)
1877 nv_wr32(priv, 0x419d44, 0x02180218); 1876 nv_wr32(priv, 0x419d44, 0x02180218);
1878 nv_wr32(priv, 0x419e04, 0x00000000); 1877 nv_wr32(priv, 0x419e04, 0x00000000);
1879 nv_wr32(priv, 0x419e08, 0x00000000); 1878 nv_wr32(priv, 0x419e08, 0x00000000);
@@ -2210,7 +2209,7 @@ nvc0_grctx_generate(struct nvc0_graph_priv *priv)
2210 nv_icmd(priv, 0x00000215, 0x00000040); 2209 nv_icmd(priv, 0x00000215, 0x00000040);
2211 nv_icmd(priv, 0x00000216, 0x00000040); 2210 nv_icmd(priv, 0x00000216, 0x00000040);
2212 nv_icmd(priv, 0x00000217, 0x00000040); 2211 nv_icmd(priv, 0x00000217, 0x00000040);
2213 if (nv_device(priv)->chipset == 0xd9) { 2212 if (nv_device(priv)->chipset >= 0xd0) {
2214 for (i = 0x0400; i <= 0x0417; i++) 2213 for (i = 0x0400; i <= 0x0417; i++)
2215 nv_icmd(priv, i, 0x00000040); 2214 nv_icmd(priv, i, 0x00000040);
2216 } 2215 }
@@ -2222,7 +2221,7 @@ nvc0_grctx_generate(struct nvc0_graph_priv *priv)
2222 nv_icmd(priv, 0x0000021d, 0x0000c080); 2221 nv_icmd(priv, 0x0000021d, 0x0000c080);
2223 nv_icmd(priv, 0x0000021e, 0x0000c080); 2222 nv_icmd(priv, 0x0000021e, 0x0000c080);
2224 nv_icmd(priv, 0x0000021f, 0x0000c080); 2223 nv_icmd(priv, 0x0000021f, 0x0000c080);
2225 if (nv_device(priv)->chipset == 0xd9) { 2224 if (nv_device(priv)->chipset >= 0xd0) {
2226 for (i = 0x0440; i <= 0x0457; i++) 2225 for (i = 0x0440; i <= 0x0457; i++)
2227 nv_icmd(priv, i, 0x0000c080); 2226 nv_icmd(priv, i, 0x0000c080);
2228 } 2227 }
@@ -2789,7 +2788,7 @@ nvc0_grctx_generate(struct nvc0_graph_priv *priv)
2789 nv_icmd(priv, 0x00000585, 0x0000003f); 2788 nv_icmd(priv, 0x00000585, 0x0000003f);
2790 nv_icmd(priv, 0x00000576, 0x00000003); 2789 nv_icmd(priv, 0x00000576, 0x00000003);
2791 if (nv_device(priv)->chipset == 0xc1 || 2790 if (nv_device(priv)->chipset == 0xc1 ||
2792 nv_device(priv)->chipset == 0xd9) 2791 nv_device(priv)->chipset >= 0xd0)
2793 nv_icmd(priv, 0x0000057b, 0x00000059); 2792 nv_icmd(priv, 0x0000057b, 0x00000059);
2794 nv_icmd(priv, 0x00000586, 0x00000040); 2793 nv_icmd(priv, 0x00000586, 0x00000040);
2795 nv_icmd(priv, 0x00000582, 0x00000080); 2794 nv_icmd(priv, 0x00000582, 0x00000080);
@@ -2891,7 +2890,7 @@ nvc0_grctx_generate(struct nvc0_graph_priv *priv)
2891 nv_icmd(priv, 0x00000957, 0x00000003); 2890 nv_icmd(priv, 0x00000957, 0x00000003);
2892 nv_icmd(priv, 0x0000095e, 0x20164010); 2891 nv_icmd(priv, 0x0000095e, 0x20164010);
2893 nv_icmd(priv, 0x0000095f, 0x00000020); 2892 nv_icmd(priv, 0x0000095f, 0x00000020);
2894 if (nv_device(priv)->chipset == 0xd9) 2893 if (nv_device(priv)->chipset >= 0xd0)
2895 nv_icmd(priv, 0x0000097d, 0x00000020); 2894 nv_icmd(priv, 0x0000097d, 0x00000020);
2896 nv_icmd(priv, 0x00000683, 0x00000006); 2895 nv_icmd(priv, 0x00000683, 0x00000006);
2897 nv_icmd(priv, 0x00000685, 0x003fffff); 2896 nv_icmd(priv, 0x00000685, 0x003fffff);
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnve0.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnve0.c
index 6d8c63931ee6..ae27dae3fe38 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnve0.c
@@ -2772,10 +2772,15 @@ nve0_grctx_generate(struct nvc0_graph_priv *priv)
2772 for (i = 0; i < 8; i++) 2772 for (i = 0; i < 8; i++)
2773 nv_wr32(priv, 0x4064d0 + (i * 0x04), 0x00000000); 2773 nv_wr32(priv, 0x4064d0 + (i * 0x04), 0x00000000);
2774 2774
2775 nv_wr32(priv, 0x405b00, 0x201); 2775 nv_wr32(priv, 0x405b00, (priv->tpc_total << 8) | priv->gpc_nr);
2776 nv_wr32(priv, 0x408850, 0x2); 2776 if (priv->gpc_nr == 1) {
2777 nv_wr32(priv, 0x408958, 0x2); 2777 nv_mask(priv, 0x408850, 0x0000000f, priv->tpc_nr[0]);
2778 nv_wr32(priv, 0x419f78, 0xa); 2778 nv_mask(priv, 0x408958, 0x0000000f, priv->tpc_nr[0]);
2779 } else {
2780 nv_mask(priv, 0x408850, 0x0000000f, priv->gpc_nr);
2781 nv_mask(priv, 0x408958, 0x0000000f, priv->gpc_nr);
2782 }
2783 nv_mask(priv, 0x419f78, 0x00000001, 0x00000000);
2779 2784
2780 nve0_grctx_generate_icmd(priv); 2785 nve0_grctx_generate_icmd(priv);
2781 nve0_grctx_generate_a097(priv); 2786 nve0_grctx_generate_a097(priv);
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvc0.fuc b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvc0.fuc
index b86cc60dcd56..f7055af0f2a6 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvc0.fuc
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvc0.fuc
@@ -87,6 +87,11 @@ chipsets:
87.b16 #nvd9_gpc_mmio_tail 87.b16 #nvd9_gpc_mmio_tail
88.b16 #nvd9_tpc_mmio_head 88.b16 #nvd9_tpc_mmio_head
89.b16 #nvd9_tpc_mmio_tail 89.b16 #nvd9_tpc_mmio_tail
90.b8 0xd7 0 0 0
91.b16 #nvd9_gpc_mmio_head
92.b16 #nvd9_gpc_mmio_tail
93.b16 #nvd9_tpc_mmio_head
94.b16 #nvd9_tpc_mmio_tail
90.b8 0 0 0 0 95.b8 0 0 0 0
91 96
92// GPC mmio lists 97// GPC mmio lists
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc
index 0bcfa4d447e5..7fbdebb2bafb 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc
@@ -62,6 +62,9 @@ chipsets:
62.b8 0xd9 0 0 0 62.b8 0xd9 0 0 0
63.b16 #nvd9_hub_mmio_head 63.b16 #nvd9_hub_mmio_head
64.b16 #nvd9_hub_mmio_tail 64.b16 #nvd9_hub_mmio_tail
65.b8 0xd7 0 0 0
66.b16 #nvd9_hub_mmio_head
67.b16 #nvd9_hub_mmio_tail
65.b8 0 0 0 0 68.b8 0 0 0 0
66 69
67nvc0_hub_mmio_head: 70nvc0_hub_mmio_head:
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c
index 0607b9801748..b24559315903 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c
@@ -254,7 +254,7 @@ nv20_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
254 if (ret) 254 if (ret)
255 return ret; 255 return ret;
256 256
257 ret = nouveau_gpuobj_new(parent, NULL, 32 * 4, 16, 257 ret = nouveau_gpuobj_new(nv_object(priv), NULL, 32 * 4, 16,
258 NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab); 258 NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
259 if (ret) 259 if (ret)
260 return ret; 260 return ret;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv25.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv25.c
index b2b650dd8b28..7a80d005a974 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv25.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv25.c
@@ -142,7 +142,7 @@ nv25_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
142 if (ret) 142 if (ret)
143 return ret; 143 return ret;
144 144
145 ret = nouveau_gpuobj_new(parent, NULL, 32 * 4, 16, 145 ret = nouveau_gpuobj_new(nv_object(priv), NULL, 32 * 4, 16,
146 NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab); 146 NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
147 if (ret) 147 if (ret)
148 return ret; 148 return ret;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv2a.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv2a.c
index 700462fa0ae0..3e1f32ee43d4 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv2a.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv2a.c
@@ -109,7 +109,7 @@ nv2a_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
109 if (ret) 109 if (ret)
110 return ret; 110 return ret;
111 111
112 ret = nouveau_gpuobj_new(parent, NULL, 32 * 4, 16, 112 ret = nouveau_gpuobj_new(nv_object(priv), NULL, 32 * 4, 16,
113 NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab); 113 NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
114 if (ret) 114 if (ret)
115 return ret; 115 return ret;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv30.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv30.c
index cedadaa92d3f..e451db32e92a 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv30.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv30.c
@@ -143,7 +143,7 @@ nv30_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
143 if (ret) 143 if (ret)
144 return ret; 144 return ret;
145 145
146 ret = nouveau_gpuobj_new(parent, NULL, 32 * 4, 16, 146 ret = nouveau_gpuobj_new(nv_object(priv), NULL, 32 * 4, 16,
147 NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab); 147 NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
148 if (ret) 148 if (ret)
149 return ret; 149 return ret;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv34.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv34.c
index 273f6320027b..9385ac7b44a4 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv34.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv34.c
@@ -143,7 +143,7 @@ nv34_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
143 if (ret) 143 if (ret)
144 return ret; 144 return ret;
145 145
146 ret = nouveau_gpuobj_new(parent, NULL, 32 * 4, 16, 146 ret = nouveau_gpuobj_new(nv_object(priv), NULL, 32 * 4, 16,
147 NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab); 147 NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
148 if (ret) 148 if (ret)
149 return ret; 149 return ret;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv35.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv35.c
index f40ee2116ee1..9ce84b73f86a 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv35.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv35.c
@@ -141,7 +141,7 @@ nv35_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
141 if (ret) 141 if (ret)
142 return ret; 142 return ret;
143 143
144 ret = nouveau_gpuobj_new(parent, NULL, 32 * 4, 16, 144 ret = nouveau_gpuobj_new(nv_object(priv), NULL, 32 * 4, 16,
145 NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab); 145 NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
146 if (ret) 146 if (ret)
147 return ret; 147 return ret;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c
index 17049d5c723d..193a5de1b482 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c
@@ -46,6 +46,14 @@ struct nv40_graph_chan {
46 struct nouveau_graph_chan base; 46 struct nouveau_graph_chan base;
47}; 47};
48 48
49static u64
50nv40_graph_units(struct nouveau_graph *graph)
51{
52 struct nv40_graph_priv *priv = (void *)graph;
53
54 return nv_rd32(priv, 0x1540);
55}
56
49/******************************************************************************* 57/*******************************************************************************
50 * Graphics object classes 58 * Graphics object classes
51 ******************************************************************************/ 59 ******************************************************************************/
@@ -359,6 +367,8 @@ nv40_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
359 else 367 else
360 nv_engine(priv)->sclass = nv40_graph_sclass; 368 nv_engine(priv)->sclass = nv40_graph_sclass;
361 nv_engine(priv)->tile_prog = nv40_graph_tile_prog; 369 nv_engine(priv)->tile_prog = nv40_graph_tile_prog;
370
371 priv->base.units = nv40_graph_units;
362 return 0; 372 return 0;
363} 373}
364 374
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
index f2b1a7a124f2..1ac36110ca19 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
@@ -48,6 +48,14 @@ struct nv50_graph_chan {
48 struct nouveau_graph_chan base; 48 struct nouveau_graph_chan base;
49}; 49};
50 50
51static u64
52nv50_graph_units(struct nouveau_graph *graph)
53{
54 struct nv50_graph_priv *priv = (void *)graph;
55
56 return nv_rd32(priv, 0x1540);
57}
58
51/******************************************************************************* 59/*******************************************************************************
52 * Graphics object classes 60 * Graphics object classes
53 ******************************************************************************/ 61 ******************************************************************************/
@@ -819,6 +827,8 @@ nv50_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
819 nv_subdev(priv)->intr = nv50_graph_intr; 827 nv_subdev(priv)->intr = nv50_graph_intr;
820 nv_engine(priv)->cclass = &nv50_graph_cclass; 828 nv_engine(priv)->cclass = &nv50_graph_cclass;
821 829
830 priv->base.units = nv50_graph_units;
831
822 switch (nv_device(priv)->chipset) { 832 switch (nv_device(priv)->chipset) {
823 case 0x50: 833 case 0x50:
824 nv_engine(priv)->sclass = nv50_graph_sclass; 834 nv_engine(priv)->sclass = nv50_graph_sclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
index 0de0dd724aff..f9b9d82c287f 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
@@ -60,6 +60,19 @@ nvc8_graph_sclass[] = {
60 {} 60 {}
61}; 61};
62 62
63u64
64nvc0_graph_units(struct nouveau_graph *graph)
65{
66 struct nvc0_graph_priv *priv = (void *)graph;
67 u64 cfg;
68
69 cfg = (u32)priv->gpc_nr;
70 cfg |= (u32)priv->tpc_total << 8;
71 cfg |= (u64)priv->rop_nr << 32;
72
73 return cfg;
74}
75
63/******************************************************************************* 76/*******************************************************************************
64 * PGRAPH context 77 * PGRAPH context
65 ******************************************************************************/ 78 ******************************************************************************/
@@ -89,7 +102,8 @@ nvc0_graph_context_ctor(struct nouveau_object *parent,
89 * fuc to modify some per-context register settings on first load 102 * fuc to modify some per-context register settings on first load
90 * of the context. 103 * of the context.
91 */ 104 */
92 ret = nouveau_gpuobj_new(parent, NULL, 0x1000, 0x100, 0, &chan->mmio); 105 ret = nouveau_gpuobj_new(nv_object(chan), NULL, 0x1000, 0x100, 0,
106 &chan->mmio);
93 if (ret) 107 if (ret)
94 return ret; 108 return ret;
95 109
@@ -101,8 +115,8 @@ nvc0_graph_context_ctor(struct nouveau_object *parent,
101 115
102 /* allocate buffers referenced by mmio list */ 116 /* allocate buffers referenced by mmio list */
103 for (i = 0; data->size && i < ARRAY_SIZE(priv->mmio_data); i++) { 117 for (i = 0; data->size && i < ARRAY_SIZE(priv->mmio_data); i++) {
104 ret = nouveau_gpuobj_new(parent, NULL, data->size, data->align, 118 ret = nouveau_gpuobj_new(nv_object(chan), NULL, data->size,
105 0, &chan->data[i].mem); 119 data->align, 0, &chan->data[i].mem);
106 if (ret) 120 if (ret)
107 return ret; 121 return ret;
108 122
@@ -518,9 +532,10 @@ nvc0_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
518{ 532{
519 struct nouveau_device *device = nv_device(parent); 533 struct nouveau_device *device = nv_device(parent);
520 struct nvc0_graph_priv *priv; 534 struct nvc0_graph_priv *priv;
535 bool enable = device->chipset != 0xd7;
521 int ret, i; 536 int ret, i;
522 537
523 ret = nouveau_graph_create(parent, engine, oclass, true, &priv); 538 ret = nouveau_graph_create(parent, engine, oclass, enable, &priv);
524 *pobject = nv_object(priv); 539 *pobject = nv_object(priv);
525 if (ret) 540 if (ret)
526 return ret; 541 return ret;
@@ -529,6 +544,8 @@ nvc0_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
529 nv_subdev(priv)->intr = nvc0_graph_intr; 544 nv_subdev(priv)->intr = nvc0_graph_intr;
530 nv_engine(priv)->cclass = &nvc0_graph_cclass; 545 nv_engine(priv)->cclass = &nvc0_graph_cclass;
531 546
547 priv->base.units = nvc0_graph_units;
548
532 if (nouveau_boolopt(device->cfgopt, "NvGrUseFW", false)) { 549 if (nouveau_boolopt(device->cfgopt, "NvGrUseFW", false)) {
533 nv_info(priv, "using external firmware\n"); 550 nv_info(priv, "using external firmware\n");
534 if (nvc0_graph_ctor_fw(priv, "fuc409c", &priv->fuc409c) || 551 if (nvc0_graph_ctor_fw(priv, "fuc409c", &priv->fuc409c) ||
@@ -551,11 +568,13 @@ nvc0_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
551 break; 568 break;
552 } 569 }
553 570
554 ret = nouveau_gpuobj_new(parent, NULL, 0x1000, 256, 0, &priv->unk4188b4); 571 ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x1000, 256, 0,
572 &priv->unk4188b4);
555 if (ret) 573 if (ret)
556 return ret; 574 return ret;
557 575
558 ret = nouveau_gpuobj_new(parent, NULL, 0x1000, 256, 0, &priv->unk4188b8); 576 ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x1000, 256, 0,
577 &priv->unk4188b8);
559 if (ret) 578 if (ret)
560 return ret; 579 return ret;
561 580
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h
index a1e78de46456..c870dad0f670 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h
@@ -118,6 +118,7 @@ nvc0_graph_class(void *obj)
118 return 0x9197; 118 return 0x9197;
119 case 0xc8: 119 case 0xc8:
120 case 0xd9: 120 case 0xd9:
121 case 0xd7:
121 return 0x9297; 122 return 0x9297;
122 case 0xe4: 123 case 0xe4:
123 case 0xe7: 124 case 0xe7:
@@ -169,4 +170,6 @@ int nvc0_graph_context_ctor(struct nouveau_object *, struct nouveau_object *,
169 struct nouveau_object **); 170 struct nouveau_object **);
170void nvc0_graph_context_dtor(struct nouveau_object *); 171void nvc0_graph_context_dtor(struct nouveau_object *);
171 172
173u64 nvc0_graph_units(struct nouveau_graph *);
174
172#endif 175#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c b/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c
index 4857f913efdd..678c16f63055 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c
@@ -77,11 +77,207 @@ nve0_graph_ctxctl_isr(struct nvc0_graph_priv *priv)
77 nv_wr32(priv, 0x409c20, ustat); 77 nv_wr32(priv, 0x409c20, ustat);
78} 78}
79 79
80static const struct nouveau_enum nve0_mp_warp_error[] = {
81 { 0x00, "NO_ERROR" },
82 { 0x01, "STACK_MISMATCH" },
83 { 0x05, "MISALIGNED_PC" },
84 { 0x08, "MISALIGNED_GPR" },
85 { 0x09, "INVALID_OPCODE" },
86 { 0x0d, "GPR_OUT_OF_BOUNDS" },
87 { 0x0e, "MEM_OUT_OF_BOUNDS" },
88 { 0x0f, "UNALIGNED_MEM_ACCESS" },
89 { 0x11, "INVALID_PARAM" },
90 {}
91};
92
93static const struct nouveau_enum nve0_mp_global_error[] = {
94 { 2, "MULTIPLE_WARP_ERRORS" },
95 { 3, "OUT_OF_STACK_SPACE" },
96 {}
97};
98
99static const struct nouveau_enum nve0_gpc_rop_error[] = {
100 { 1, "RT_PITCH_OVERRUN" },
101 { 4, "RT_WIDTH_OVERRUN" },
102 { 5, "RT_HEIGHT_OVERRUN" },
103 { 7, "ZETA_STORAGE_TYPE_MISMATCH" },
104 { 8, "RT_STORAGE_TYPE_MISMATCH" },
105 { 10, "RT_LINEAR_MISMATCH" },
106 {}
107};
108
109static const struct nouveau_enum nve0_sked_error[] = {
110 { 7, "CONSTANT_BUFFER_SIZE" },
111 { 9, "LOCAL_MEMORY_SIZE_POS" },
112 { 10, "LOCAL_MEMORY_SIZE_NEG" },
113 { 11, "WARP_CSTACK_SIZE" },
114 { 12, "TOTAL_TEMP_SIZE" },
115 { 13, "REGISTER_COUNT" },
116 { 18, "TOTAL_THREADS" },
117 { 20, "PROGRAM_OFFSET" },
118 { 21, "SHARED_MEMORY_SIZE" },
119 { 25, "SHARED_CONFIG_TOO_SMALL" },
120 { 26, "TOTAL_REGISTER_COUNT" },
121 {}
122};
123
124static void
125nve0_graph_mp_trap(struct nvc0_graph_priv *priv, int gpc, int tp)
126{
127 int i;
128 u32 werr = nv_rd32(priv, TPC_UNIT(gpc, tp, 0x648));
129 u32 gerr = nv_rd32(priv, TPC_UNIT(gpc, tp, 0x650));
130
131 nv_error(priv, "GPC%i/TP%i/MP trap:", gpc, tp);
132
133 for (i = 0; i <= 31; ++i) {
134 if (!(gerr & (1 << i)))
135 continue;
136 pr_cont(" ");
137 nouveau_enum_print(nve0_mp_global_error, i);
138 }
139 if (werr) {
140 pr_cont(" ");
141 nouveau_enum_print(nve0_mp_warp_error, werr & 0xffff);
142 }
143 pr_cont("\n");
144
145 /* disable MP trap to avoid spam */
146 nv_mask(priv, TPC_UNIT(gpc, tp, 0x50c), 0x2, 0x0);
147
148 /* TODO: figure out how to resume after an MP trap */
149}
150
151static void
152nve0_graph_tp_trap(struct nvc0_graph_priv *priv, int gpc, int tp)
153{
154 u32 stat = nv_rd32(priv, TPC_UNIT(gpc, tp, 0x508));
155
156 if (stat & 0x1) {
157 u32 trap = nv_rd32(priv, TPC_UNIT(gpc, tp, 0x224));
158 nv_error(priv, "GPC%i/TP%i/TEX trap: %08x\n",
159 gpc, tp, trap);
160
161 nv_wr32(priv, TPC_UNIT(gpc, tp, 0x224), 0xc0000000);
162 stat &= ~0x1;
163 }
164
165 if (stat & 0x2) {
166 nve0_graph_mp_trap(priv, gpc, tp);
167 stat &= ~0x2;
168 }
169
170 if (stat & 0x4) {
171 u32 trap = nv_rd32(priv, TPC_UNIT(gpc, tp, 0x084));
172 nv_error(priv, "GPC%i/TP%i/POLY trap: %08x\n",
173 gpc, tp, trap);
174
175 nv_wr32(priv, TPC_UNIT(gpc, tp, 0x084), 0xc0000000);
176 stat &= ~0x4;
177 }
178
179 if (stat & 0x8) {
180 u32 trap = nv_rd32(priv, TPC_UNIT(gpc, tp, 0x48c));
181 nv_error(priv, "GPC%i/TP%i/L1C trap: %08x\n",
182 gpc, tp, trap);
183
184 nv_wr32(priv, TPC_UNIT(gpc, tp, 0x48c), 0xc0000000);
185 stat &= ~0x8;
186 }
187
188 if (stat) {
189 nv_error(priv, "GPC%i/TP%i: unknown stat %08x\n",
190 gpc, tp, stat);
191 }
192}
193
194static void
195nve0_graph_gpc_trap(struct nvc0_graph_priv *priv)
196{
197 const u32 mask = nv_rd32(priv, 0x400118);
198 int gpc;
199
200 for (gpc = 0; gpc < 4; ++gpc) {
201 u32 stat;
202 int tp;
203
204 if (!(mask & (1 << gpc)))
205 continue;
206 stat = nv_rd32(priv, GPC_UNIT(gpc, 0x2c90));
207
208 if (stat & 0x0001) {
209 u32 trap[4];
210 int i;
211
212 trap[0] = nv_rd32(priv, GPC_UNIT(gpc, 0x0420));
213 trap[1] = nv_rd32(priv, GPC_UNIT(gpc, 0x0434));
214 trap[2] = nv_rd32(priv, GPC_UNIT(gpc, 0x0438));
215 trap[3] = nv_rd32(priv, GPC_UNIT(gpc, 0x043c));
216
217 nv_error(priv, "GPC%i/PROP trap:", gpc);
218 for (i = 0; i <= 29; ++i) {
219 if (!(trap[0] & (1 << i)))
220 continue;
221 pr_cont(" ");
222 nouveau_enum_print(nve0_gpc_rop_error, i);
223 }
224 pr_cont("\n");
225
226 nv_error(priv, "x = %u, y = %u, "
227 "format = %x, storage type = %x\n",
228 trap[1] & 0xffff,
229 trap[1] >> 16,
230 (trap[2] >> 8) & 0x3f,
231 trap[3] & 0xff);
232
233 nv_wr32(priv, GPC_UNIT(gpc, 0x0420), 0xc0000000);
234 stat &= ~0x0001;
235 }
236
237 if (stat & 0x0002) {
238 u32 trap = nv_rd32(priv, GPC_UNIT(gpc, 0x0900));
239 nv_error(priv, "GPC%i/ZCULL trap: %08x\n", gpc,
240 trap);
241 nv_wr32(priv, GPC_UNIT(gpc, 0x0900), 0xc0000000);
242 stat &= ~0x0002;
243 }
244
245 if (stat & 0x0004) {
246 u32 trap = nv_rd32(priv, GPC_UNIT(gpc, 0x1028));
247 nv_error(priv, "GPC%i/CCACHE trap: %08x\n", gpc,
248 trap);
249 nv_wr32(priv, GPC_UNIT(gpc, 0x1028), 0xc0000000);
250 stat &= ~0x0004;
251 }
252
253 if (stat & 0x0008) {
254 u32 trap = nv_rd32(priv, GPC_UNIT(gpc, 0x0824));
255 nv_error(priv, "GPC%i/ESETUP trap %08x\n", gpc,
256 trap);
257 nv_wr32(priv, GPC_UNIT(gpc, 0x0824), 0xc0000000);
258 stat &= ~0x0008;
259 }
260
261 for (tp = 0; tp < 8; ++tp) {
262 if (stat & (1 << (16 + tp)))
263 nve0_graph_tp_trap(priv, gpc, tp);
264 }
265 stat &= ~0xff0000;
266
267 if (stat) {
268 nv_error(priv, "GPC%i: unknown stat %08x\n",
269 gpc, stat);
270 }
271 }
272}
273
274
80static void 275static void
81nve0_graph_trap_isr(struct nvc0_graph_priv *priv, int chid, u64 inst, 276nve0_graph_trap_isr(struct nvc0_graph_priv *priv, int chid, u64 inst,
82 struct nouveau_object *engctx) 277 struct nouveau_object *engctx)
83{ 278{
84 u32 trap = nv_rd32(priv, 0x400108); 279 u32 trap = nv_rd32(priv, 0x400108);
280 int i;
85 int rop; 281 int rop;
86 282
87 if (trap & 0x00000001) { 283 if (trap & 0x00000001) {
@@ -102,6 +298,32 @@ nve0_graph_trap_isr(struct nvc0_graph_priv *priv, int chid, u64 inst,
102 trap &= ~0x00000010; 298 trap &= ~0x00000010;
103 } 299 }
104 300
301 if (trap & 0x00000100) {
302 u32 stat = nv_rd32(priv, 0x407020);
303 nv_error(priv, "SKED ch %d [0x%010llx %s]:",
304 chid, inst, nouveau_client_name(engctx));
305
306 for (i = 0; i <= 29; ++i) {
307 if (!(stat & (1 << i)))
308 continue;
309 pr_cont(" ");
310 nouveau_enum_print(nve0_sked_error, i);
311 }
312 pr_cont("\n");
313
314 if (stat & 0x3fffffff)
315 nv_wr32(priv, 0x407020, 0x40000000);
316 nv_wr32(priv, 0x400108, 0x00000100);
317 trap &= ~0x00000100;
318 }
319
320 if (trap & 0x01000000) {
321 nv_error(priv, "GPC ch %d [0x%010llx %s]:\n",
322 chid, inst, nouveau_client_name(engctx));
323 nve0_graph_gpc_trap(priv);
324 trap &= ~0x01000000;
325 }
326
105 if (trap & 0x02000000) { 327 if (trap & 0x02000000) {
106 for (rop = 0; rop < priv->rop_nr; rop++) { 328 for (rop = 0; rop < priv->rop_nr; rop++) {
107 u32 statz = nv_rd32(priv, ROP_UNIT(rop, 0x070)); 329 u32 statz = nv_rd32(priv, ROP_UNIT(rop, 0x070));
@@ -217,6 +439,8 @@ nve0_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
217 nv_engine(priv)->cclass = &nve0_graph_cclass; 439 nv_engine(priv)->cclass = &nve0_graph_cclass;
218 nv_engine(priv)->sclass = nve0_graph_sclass; 440 nv_engine(priv)->sclass = nve0_graph_sclass;
219 441
442 priv->base.units = nvc0_graph_units;
443
220 if (nouveau_boolopt(device->cfgopt, "NvGrUseFW", false)) { 444 if (nouveau_boolopt(device->cfgopt, "NvGrUseFW", false)) {
221 nv_info(priv, "using external firmware\n"); 445 nv_info(priv, "using external firmware\n");
222 if (nvc0_graph_ctor_fw(priv, "fuc409c", &priv->fuc409c) || 446 if (nvc0_graph_ctor_fw(priv, "fuc409c", &priv->fuc409c) ||
@@ -227,11 +451,13 @@ nve0_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
227 priv->firmware = true; 451 priv->firmware = true;
228 } 452 }
229 453
230 ret = nouveau_gpuobj_new(parent, NULL, 0x1000, 256, 0, &priv->unk4188b4); 454 ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x1000, 256, 0,
455 &priv->unk4188b4);
231 if (ret) 456 if (ret)
232 return ret; 457 return ret;
233 458
234 ret = nouveau_gpuobj_new(parent, NULL, 0x1000, 256, 0, &priv->unk4188b8); 459 ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x1000, 256, 0,
460 &priv->unk4188b8);
235 if (ret) 461 if (ret)
236 return ret; 462 return ret;
237 463
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c
index a523eaad47e3..d698e710ddd4 100644
--- a/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c
@@ -94,6 +94,32 @@ nvc0_software_mthd_flip(struct nouveau_object *object, u32 mthd,
94 return -EINVAL; 94 return -EINVAL;
95} 95}
96 96
97static int
98nvc0_software_mthd_mp_control(struct nouveau_object *object, u32 mthd,
99 void *args, u32 size)
100{
101 struct nvc0_software_chan *chan = (void *)nv_engctx(object->parent);
102 struct nvc0_software_priv *priv = (void *)nv_object(chan)->engine;
103 u32 data = *(u32 *)args;
104
105 switch (mthd) {
106 case 0x600:
107 nv_wr32(priv, 0x419e00, data); /* MP.PM_UNK000 */
108 break;
109 case 0x644:
110 if (data & ~0x1ffffe)
111 return -EINVAL;
112 nv_wr32(priv, 0x419e44, data); /* MP.TRAP_WARP_ERROR_EN */
113 break;
114 case 0x6ac:
115 nv_wr32(priv, 0x419eac, data); /* MP.PM_UNK0AC */
116 break;
117 default:
118 return -EINVAL;
119 }
120 return 0;
121}
122
97static struct nouveau_omthds 123static struct nouveau_omthds
98nvc0_software_omthds[] = { 124nvc0_software_omthds[] = {
99 { 0x0400, 0x0400, nvc0_software_mthd_vblsem_offset }, 125 { 0x0400, 0x0400, nvc0_software_mthd_vblsem_offset },
@@ -101,6 +127,9 @@ nvc0_software_omthds[] = {
101 { 0x0408, 0x0408, nvc0_software_mthd_vblsem_value }, 127 { 0x0408, 0x0408, nvc0_software_mthd_vblsem_value },
102 { 0x040c, 0x040c, nvc0_software_mthd_vblsem_release }, 128 { 0x040c, 0x040c, nvc0_software_mthd_vblsem_release },
103 { 0x0500, 0x0500, nvc0_software_mthd_flip }, 129 { 0x0500, 0x0500, nvc0_software_mthd_flip },
130 { 0x0600, 0x0600, nvc0_software_mthd_mp_control },
131 { 0x0644, 0x0644, nvc0_software_mthd_mp_control },
132 { 0x06ac, 0x06ac, nvc0_software_mthd_mp_control },
104 {} 133 {}
105}; 134};
106 135
diff --git a/drivers/gpu/drm/nouveau/core/include/core/class.h b/drivers/gpu/drm/nouveau/core/include/core/class.h
index 92d3ab11d962..0a393f7f055f 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/class.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/class.h
@@ -169,6 +169,7 @@ struct nv04_display_class {
169 * 8570: NVA3_DISP 169 * 8570: NVA3_DISP
170 * 9070: NVD0_DISP 170 * 9070: NVD0_DISP
171 * 9170: NVE0_DISP 171 * 9170: NVE0_DISP
172 * 9270: NVF0_DISP
172 */ 173 */
173 174
174#define NV50_DISP_CLASS 0x00005070 175#define NV50_DISP_CLASS 0x00005070
@@ -178,6 +179,7 @@ struct nv04_display_class {
178#define NVA3_DISP_CLASS 0x00008570 179#define NVA3_DISP_CLASS 0x00008570
179#define NVD0_DISP_CLASS 0x00009070 180#define NVD0_DISP_CLASS 0x00009070
180#define NVE0_DISP_CLASS 0x00009170 181#define NVE0_DISP_CLASS 0x00009170
182#define NVF0_DISP_CLASS 0x00009270
181 183
182#define NV50_DISP_SOR_MTHD 0x00010000 184#define NV50_DISP_SOR_MTHD 0x00010000
183#define NV50_DISP_SOR_MTHD_TYPE 0x0000f000 185#define NV50_DISP_SOR_MTHD_TYPE 0x0000f000
@@ -246,6 +248,7 @@ struct nv50_display_class {
246 * 857a: NVA3_DISP_CURS 248 * 857a: NVA3_DISP_CURS
247 * 907a: NVD0_DISP_CURS 249 * 907a: NVD0_DISP_CURS
248 * 917a: NVE0_DISP_CURS 250 * 917a: NVE0_DISP_CURS
251 * 927a: NVF0_DISP_CURS
249 */ 252 */
250 253
251#define NV50_DISP_CURS_CLASS 0x0000507a 254#define NV50_DISP_CURS_CLASS 0x0000507a
@@ -255,6 +258,7 @@ struct nv50_display_class {
255#define NVA3_DISP_CURS_CLASS 0x0000857a 258#define NVA3_DISP_CURS_CLASS 0x0000857a
256#define NVD0_DISP_CURS_CLASS 0x0000907a 259#define NVD0_DISP_CURS_CLASS 0x0000907a
257#define NVE0_DISP_CURS_CLASS 0x0000917a 260#define NVE0_DISP_CURS_CLASS 0x0000917a
261#define NVF0_DISP_CURS_CLASS 0x0000927a
258 262
259struct nv50_display_curs_class { 263struct nv50_display_curs_class {
260 u32 head; 264 u32 head;
@@ -267,6 +271,7 @@ struct nv50_display_curs_class {
267 * 857b: NVA3_DISP_OIMM 271 * 857b: NVA3_DISP_OIMM
268 * 907b: NVD0_DISP_OIMM 272 * 907b: NVD0_DISP_OIMM
269 * 917b: NVE0_DISP_OIMM 273 * 917b: NVE0_DISP_OIMM
274 * 927b: NVE0_DISP_OIMM
270 */ 275 */
271 276
272#define NV50_DISP_OIMM_CLASS 0x0000507b 277#define NV50_DISP_OIMM_CLASS 0x0000507b
@@ -276,6 +281,7 @@ struct nv50_display_curs_class {
276#define NVA3_DISP_OIMM_CLASS 0x0000857b 281#define NVA3_DISP_OIMM_CLASS 0x0000857b
277#define NVD0_DISP_OIMM_CLASS 0x0000907b 282#define NVD0_DISP_OIMM_CLASS 0x0000907b
278#define NVE0_DISP_OIMM_CLASS 0x0000917b 283#define NVE0_DISP_OIMM_CLASS 0x0000917b
284#define NVF0_DISP_OIMM_CLASS 0x0000927b
279 285
280struct nv50_display_oimm_class { 286struct nv50_display_oimm_class {
281 u32 head; 287 u32 head;
@@ -288,6 +294,7 @@ struct nv50_display_oimm_class {
288 * 857c: NVA3_DISP_SYNC 294 * 857c: NVA3_DISP_SYNC
289 * 907c: NVD0_DISP_SYNC 295 * 907c: NVD0_DISP_SYNC
290 * 917c: NVE0_DISP_SYNC 296 * 917c: NVE0_DISP_SYNC
297 * 927c: NVF0_DISP_SYNC
291 */ 298 */
292 299
293#define NV50_DISP_SYNC_CLASS 0x0000507c 300#define NV50_DISP_SYNC_CLASS 0x0000507c
@@ -297,6 +304,7 @@ struct nv50_display_oimm_class {
297#define NVA3_DISP_SYNC_CLASS 0x0000857c 304#define NVA3_DISP_SYNC_CLASS 0x0000857c
298#define NVD0_DISP_SYNC_CLASS 0x0000907c 305#define NVD0_DISP_SYNC_CLASS 0x0000907c
299#define NVE0_DISP_SYNC_CLASS 0x0000917c 306#define NVE0_DISP_SYNC_CLASS 0x0000917c
307#define NVF0_DISP_SYNC_CLASS 0x0000927c
300 308
301struct nv50_display_sync_class { 309struct nv50_display_sync_class {
302 u32 pushbuf; 310 u32 pushbuf;
@@ -310,6 +318,7 @@ struct nv50_display_sync_class {
310 * 857d: NVA3_DISP_MAST 318 * 857d: NVA3_DISP_MAST
311 * 907d: NVD0_DISP_MAST 319 * 907d: NVD0_DISP_MAST
312 * 917d: NVE0_DISP_MAST 320 * 917d: NVE0_DISP_MAST
321 * 927d: NVF0_DISP_MAST
313 */ 322 */
314 323
315#define NV50_DISP_MAST_CLASS 0x0000507d 324#define NV50_DISP_MAST_CLASS 0x0000507d
@@ -319,6 +328,7 @@ struct nv50_display_sync_class {
319#define NVA3_DISP_MAST_CLASS 0x0000857d 328#define NVA3_DISP_MAST_CLASS 0x0000857d
320#define NVD0_DISP_MAST_CLASS 0x0000907d 329#define NVD0_DISP_MAST_CLASS 0x0000907d
321#define NVE0_DISP_MAST_CLASS 0x0000917d 330#define NVE0_DISP_MAST_CLASS 0x0000917d
331#define NVF0_DISP_MAST_CLASS 0x0000927d
322 332
323struct nv50_display_mast_class { 333struct nv50_display_mast_class {
324 u32 pushbuf; 334 u32 pushbuf;
@@ -331,6 +341,7 @@ struct nv50_display_mast_class {
331 * 857e: NVA3_DISP_OVLY 341 * 857e: NVA3_DISP_OVLY
332 * 907e: NVD0_DISP_OVLY 342 * 907e: NVD0_DISP_OVLY
333 * 917e: NVE0_DISP_OVLY 343 * 917e: NVE0_DISP_OVLY
344 * 927e: NVF0_DISP_OVLY
334 */ 345 */
335 346
336#define NV50_DISP_OVLY_CLASS 0x0000507e 347#define NV50_DISP_OVLY_CLASS 0x0000507e
@@ -340,6 +351,7 @@ struct nv50_display_mast_class {
340#define NVA3_DISP_OVLY_CLASS 0x0000857e 351#define NVA3_DISP_OVLY_CLASS 0x0000857e
341#define NVD0_DISP_OVLY_CLASS 0x0000907e 352#define NVD0_DISP_OVLY_CLASS 0x0000907e
342#define NVE0_DISP_OVLY_CLASS 0x0000917e 353#define NVE0_DISP_OVLY_CLASS 0x0000917e
354#define NVF0_DISP_OVLY_CLASS 0x0000927e
343 355
344struct nv50_display_ovly_class { 356struct nv50_display_ovly_class {
345 u32 pushbuf; 357 u32 pushbuf;
diff --git a/drivers/gpu/drm/nouveau/core/include/core/device.h b/drivers/gpu/drm/nouveau/core/include/core/device.h
index d351a4e5819c..05840f3eee98 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/device.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/device.h
@@ -6,7 +6,7 @@
6#include <core/engine.h> 6#include <core/engine.h>
7 7
8enum nv_subdev_type { 8enum nv_subdev_type {
9 NVDEV_SUBDEV_DEVICE, 9 NVDEV_ENGINE_DEVICE,
10 NVDEV_SUBDEV_VBIOS, 10 NVDEV_SUBDEV_VBIOS,
11 11
12 /* All subdevs from DEVINIT to DEVINIT_LAST will be created before 12 /* All subdevs from DEVINIT to DEVINIT_LAST will be created before
@@ -57,7 +57,7 @@ enum nv_subdev_type {
57}; 57};
58 58
59struct nouveau_device { 59struct nouveau_device {
60 struct nouveau_subdev base; 60 struct nouveau_engine base;
61 struct list_head head; 61 struct list_head head;
62 62
63 struct pci_dev *pdev; 63 struct pci_dev *pdev;
@@ -99,7 +99,7 @@ nv_device(void *obj)
99 99
100#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA 100#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
101 if (unlikely(!nv_iclass(device, NV_SUBDEV_CLASS) || 101 if (unlikely(!nv_iclass(device, NV_SUBDEV_CLASS) ||
102 (nv_hclass(device) & 0xff) != NVDEV_SUBDEV_DEVICE)) { 102 (nv_hclass(device) & 0xff) != NVDEV_ENGINE_DEVICE)) {
103 nv_assert("BAD CAST -> NvDevice, 0x%08x 0x%08x", 103 nv_assert("BAD CAST -> NvDevice, 0x%08x 0x%08x",
104 nv_hclass(object), nv_hclass(device)); 104 nv_hclass(object), nv_hclass(device));
105 } 105 }
diff --git a/drivers/gpu/drm/nouveau/core/include/core/parent.h b/drivers/gpu/drm/nouveau/core/include/core/parent.h
index 31cd852c96df..9f5ea900ff00 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/parent.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/parent.h
@@ -51,8 +51,8 @@ int nouveau_parent_create_(struct nouveau_object *, struct nouveau_object *,
51void nouveau_parent_destroy(struct nouveau_parent *); 51void nouveau_parent_destroy(struct nouveau_parent *);
52 52
53void _nouveau_parent_dtor(struct nouveau_object *); 53void _nouveau_parent_dtor(struct nouveau_object *);
54#define _nouveau_parent_init _nouveau_object_init 54#define _nouveau_parent_init nouveau_object_init
55#define _nouveau_parent_fini _nouveau_object_fini 55#define _nouveau_parent_fini nouveau_object_fini
56 56
57int nouveau_parent_sclass(struct nouveau_object *, u16 handle, 57int nouveau_parent_sclass(struct nouveau_object *, u16 handle,
58 struct nouveau_object **pengine, 58 struct nouveau_object **pengine,
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/device.h b/drivers/gpu/drm/nouveau/core/include/engine/device.h
index c9e4c4afa50e..b3dd2c4c2f1e 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/device.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/device.h
@@ -18,7 +18,6 @@ int nv50_identify(struct nouveau_device *);
18int nvc0_identify(struct nouveau_device *); 18int nvc0_identify(struct nouveau_device *);
19int nve0_identify(struct nouveau_device *); 19int nve0_identify(struct nouveau_device *);
20 20
21extern struct nouveau_oclass nouveau_device_sclass[];
22struct nouveau_device *nouveau_device_find(u64 name); 21struct nouveau_device *nouveau_device_find(u64 name);
23 22
24#endif 23#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/disp.h b/drivers/gpu/drm/nouveau/core/include/engine/disp.h
index 28da6772c095..4b21fabfbddb 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/disp.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/disp.h
@@ -44,5 +44,6 @@ extern struct nouveau_oclass nv94_disp_oclass;
44extern struct nouveau_oclass nva3_disp_oclass; 44extern struct nouveau_oclass nva3_disp_oclass;
45extern struct nouveau_oclass nvd0_disp_oclass; 45extern struct nouveau_oclass nvd0_disp_oclass;
46extern struct nouveau_oclass nve0_disp_oclass; 46extern struct nouveau_oclass nve0_disp_oclass;
47extern struct nouveau_oclass nvf0_disp_oclass;
47 48
48#endif 49#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/fifo.h b/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
index b46c197709f3..633c2f806482 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
@@ -65,7 +65,8 @@ struct nouveau_fifo_base {
65struct nouveau_fifo { 65struct nouveau_fifo {
66 struct nouveau_engine base; 66 struct nouveau_engine base;
67 67
68 struct nouveau_event *uevent; 68 struct nouveau_event *cevent; /* channel creation event */
69 struct nouveau_event *uevent; /* async user trigger */
69 70
70 struct nouveau_object **channel; 71 struct nouveau_object **channel;
71 spinlock_t lock; 72 spinlock_t lock;
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/graph.h b/drivers/gpu/drm/nouveau/core/include/engine/graph.h
index 6943b40d0817..5d392439f2ac 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/graph.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/graph.h
@@ -26,6 +26,10 @@ struct nouveau_graph_chan {
26 26
27struct nouveau_graph { 27struct nouveau_graph {
28 struct nouveau_engine base; 28 struct nouveau_engine base;
29
30 /* Returns chipset-specific counts of units packed into an u64.
31 */
32 u64 (*units)(struct nouveau_graph *);
29}; 33};
30 34
31static inline struct nouveau_graph * 35static inline struct nouveau_graph *
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/ltcg.h b/drivers/gpu/drm/nouveau/core/include/subdev/ltcg.h
index f351f63bc654..a1985ed3d58d 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/ltcg.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/ltcg.h
@@ -4,8 +4,15 @@
4#include <core/subdev.h> 4#include <core/subdev.h>
5#include <core/device.h> 5#include <core/device.h>
6 6
7struct nouveau_mm_node;
8
7struct nouveau_ltcg { 9struct nouveau_ltcg {
8 struct nouveau_subdev base; 10 struct nouveau_subdev base;
11
12 int (*tags_alloc)(struct nouveau_ltcg *, u32 count,
13 struct nouveau_mm_node **);
14 void (*tags_free)(struct nouveau_ltcg *, struct nouveau_mm_node **);
15 void (*tags_clear)(struct nouveau_ltcg *, u32 first, u32 count);
9}; 16};
10 17
11static inline struct nouveau_ltcg * 18static inline struct nouveau_ltcg *
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/mc.h b/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
index fded97cea500..d5502267c30f 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
@@ -21,18 +21,22 @@ nouveau_mc(void *obj)
21} 21}
22 22
23#define nouveau_mc_create(p,e,o,d) \ 23#define nouveau_mc_create(p,e,o,d) \
24 nouveau_subdev_create_((p), (e), (o), 0, "PMC", "master", \ 24 nouveau_mc_create_((p), (e), (o), sizeof(**d), (void **)d)
25 sizeof(**d), (void **)d) 25#define nouveau_mc_destroy(p) ({ \
26#define nouveau_mc_destroy(p) \ 26 struct nouveau_mc *pmc = (p); _nouveau_mc_dtor(nv_object(pmc)); \
27 nouveau_subdev_destroy(&(p)->base) 27})
28#define nouveau_mc_init(p) \ 28#define nouveau_mc_init(p) ({ \
29 nouveau_subdev_init(&(p)->base) 29 struct nouveau_mc *pmc = (p); _nouveau_mc_init(nv_object(pmc)); \
30#define nouveau_mc_fini(p,s) \ 30})
31 nouveau_subdev_fini(&(p)->base, (s)) 31#define nouveau_mc_fini(p,s) ({ \
32 32 struct nouveau_mc *pmc = (p); _nouveau_mc_fini(nv_object(pmc), (s)); \
33#define _nouveau_mc_dtor _nouveau_subdev_dtor 33})
34#define _nouveau_mc_init _nouveau_subdev_init 34
35#define _nouveau_mc_fini _nouveau_subdev_fini 35int nouveau_mc_create_(struct nouveau_object *, struct nouveau_object *,
36 struct nouveau_oclass *, int, void **);
37void _nouveau_mc_dtor(struct nouveau_object *);
38int _nouveau_mc_init(struct nouveau_object *);
39int _nouveau_mc_fini(struct nouveau_object *, bool);
36 40
37extern struct nouveau_oclass nv04_mc_oclass; 41extern struct nouveau_oclass nv04_mc_oclass;
38extern struct nouveau_oclass nv44_mc_oclass; 42extern struct nouveau_oclass nv44_mc_oclass;
@@ -40,8 +44,6 @@ extern struct nouveau_oclass nv50_mc_oclass;
40extern struct nouveau_oclass nv98_mc_oclass; 44extern struct nouveau_oclass nv98_mc_oclass;
41extern struct nouveau_oclass nvc0_mc_oclass; 45extern struct nouveau_oclass nvc0_mc_oclass;
42 46
43void nouveau_mc_intr(struct nouveau_subdev *);
44
45extern const struct nouveau_mc_intr nv04_mc_intr[]; 47extern const struct nouveau_mc_intr nv04_mc_intr[];
46int nv04_mc_init(struct nouveau_object *); 48int nv04_mc_init(struct nouveau_object *);
47int nv50_mc_init(struct nouveau_object *); 49int nv50_mc_init(struct nouveau_object *);
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/therm.h b/drivers/gpu/drm/nouveau/core/include/subdev/therm.h
index 0b20fc0d19c1..c075998d82e6 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/therm.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/therm.h
@@ -73,6 +73,7 @@ int _nouveau_therm_fini(struct nouveau_object *, bool);
73 73
74extern struct nouveau_oclass nv40_therm_oclass; 74extern struct nouveau_oclass nv40_therm_oclass;
75extern struct nouveau_oclass nv50_therm_oclass; 75extern struct nouveau_oclass nv50_therm_oclass;
76extern struct nouveau_oclass nv84_therm_oclass;
76extern struct nouveau_oclass nva3_therm_oclass; 77extern struct nouveau_oclass nva3_therm_oclass;
77extern struct nouveau_oclass nvd0_therm_oclass; 78extern struct nouveau_oclass nvd0_therm_oclass;
78 79
diff --git a/drivers/gpu/drm/nouveau/core/os.h b/drivers/gpu/drm/nouveau/core/os.h
index eb496033b55c..3bd9be2ab37f 100644
--- a/drivers/gpu/drm/nouveau/core/os.h
+++ b/drivers/gpu/drm/nouveau/core/os.h
@@ -17,6 +17,7 @@
17#include <linux/acpi.h> 17#include <linux/acpi.h>
18#include <linux/dmi.h> 18#include <linux/dmi.h>
19#include <linux/reboot.h> 19#include <linux/reboot.h>
20#include <linux/interrupt.h>
20 21
21#include <asm/unaligned.h> 22#include <asm/unaligned.h>
22 23
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bar/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/bar/nv50.c
index c3acf5b70d9e..649f1ced1fe0 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bar/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bar/nv50.c
@@ -122,18 +122,20 @@ nv50_bar_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
122 if (ret) 122 if (ret)
123 return ret; 123 return ret;
124 124
125 ret = nouveau_gpuobj_new(parent, NULL, 0x20000, 0, NVOBJ_FLAG_HEAP, 125 ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x20000, 0,
126 &priv->mem); 126 NVOBJ_FLAG_HEAP, &priv->mem);
127 heap = nv_object(priv->mem); 127 heap = nv_object(priv->mem);
128 if (ret) 128 if (ret)
129 return ret; 129 return ret;
130 130
131 ret = nouveau_gpuobj_new(parent, heap, (device->chipset == 0x50) ? 131 ret = nouveau_gpuobj_new(nv_object(priv), heap,
132 0x1400 : 0x0200, 0, 0, &priv->pad); 132 (device->chipset == 0x50) ? 0x1400 : 0x0200,
133 0, 0, &priv->pad);
133 if (ret) 134 if (ret)
134 return ret; 135 return ret;
135 136
136 ret = nouveau_gpuobj_new(parent, heap, 0x4000, 0, 0, &priv->pgd); 137 ret = nouveau_gpuobj_new(nv_object(priv), heap, 0x4000, 0,
138 0, &priv->pgd);
137 if (ret) 139 if (ret)
138 return ret; 140 return ret;
139 141
@@ -145,9 +147,9 @@ nv50_bar_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
145 if (ret) 147 if (ret)
146 return ret; 148 return ret;
147 149
148 ret = nouveau_gpuobj_new(parent, heap, ((limit-- - start) >> 12) * 8, 150 ret = nouveau_gpuobj_new(nv_object(priv), heap,
149 0x1000, NVOBJ_FLAG_ZERO_ALLOC, 151 ((limit-- - start) >> 12) * 8, 0x1000,
150 &vm->pgt[0].obj[0]); 152 NVOBJ_FLAG_ZERO_ALLOC, &vm->pgt[0].obj[0]);
151 vm->pgt[0].refcount[0] = 1; 153 vm->pgt[0].refcount[0] = 1;
152 if (ret) 154 if (ret)
153 return ret; 155 return ret;
@@ -157,7 +159,7 @@ nv50_bar_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
157 if (ret) 159 if (ret)
158 return ret; 160 return ret;
159 161
160 ret = nouveau_gpuobj_new(parent, heap, 24, 16, 0, &priv->bar3); 162 ret = nouveau_gpuobj_new(nv_object(priv), heap, 24, 16, 0, &priv->bar3);
161 if (ret) 163 if (ret)
162 return ret; 164 return ret;
163 165
@@ -182,7 +184,7 @@ nv50_bar_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
182 if (ret) 184 if (ret)
183 return ret; 185 return ret;
184 186
185 ret = nouveau_gpuobj_new(parent, heap, 24, 16, 0, &priv->bar1); 187 ret = nouveau_gpuobj_new(nv_object(priv), heap, 24, 16, 0, &priv->bar1);
186 if (ret) 188 if (ret)
187 return ret; 189 return ret;
188 190
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c
index 77a6fb725d3f..f8a44956dec1 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c
@@ -101,12 +101,14 @@ nvc0_bar_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
101 return ret; 101 return ret;
102 102
103 /* BAR3 */ 103 /* BAR3 */
104 ret = nouveau_gpuobj_new(parent, NULL, 0x1000, 0, 0, &priv->bar[0].mem); 104 ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x1000, 0, 0,
105 &priv->bar[0].mem);
105 mem = priv->bar[0].mem; 106 mem = priv->bar[0].mem;
106 if (ret) 107 if (ret)
107 return ret; 108 return ret;
108 109
109 ret = nouveau_gpuobj_new(parent, NULL, 0x8000, 0, 0, &priv->bar[0].pgd); 110 ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x8000, 0, 0,
111 &priv->bar[0].pgd);
110 if (ret) 112 if (ret)
111 return ret; 113 return ret;
112 114
@@ -114,7 +116,7 @@ nvc0_bar_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
114 if (ret) 116 if (ret)
115 return ret; 117 return ret;
116 118
117 ret = nouveau_gpuobj_new(parent, NULL, 119 ret = nouveau_gpuobj_new(nv_object(priv), NULL,
118 (pci_resource_len(pdev, 3) >> 12) * 8, 120 (pci_resource_len(pdev, 3) >> 12) * 8,
119 0x1000, NVOBJ_FLAG_ZERO_ALLOC, 121 0x1000, NVOBJ_FLAG_ZERO_ALLOC,
120 &vm->pgt[0].obj[0]); 122 &vm->pgt[0].obj[0]);
@@ -133,12 +135,14 @@ nvc0_bar_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
133 nv_wo32(mem, 0x020c, upper_32_bits(pci_resource_len(pdev, 3) - 1)); 135 nv_wo32(mem, 0x020c, upper_32_bits(pci_resource_len(pdev, 3) - 1));
134 136
135 /* BAR1 */ 137 /* BAR1 */
136 ret = nouveau_gpuobj_new(parent, NULL, 0x1000, 0, 0, &priv->bar[1].mem); 138 ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x1000, 0, 0,
139 &priv->bar[1].mem);
137 mem = priv->bar[1].mem; 140 mem = priv->bar[1].mem;
138 if (ret) 141 if (ret)
139 return ret; 142 return ret;
140 143
141 ret = nouveau_gpuobj_new(parent, NULL, 0x8000, 0, 0, &priv->bar[1].pgd); 144 ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x8000, 0, 0,
145 &priv->bar[1].pgd);
142 if (ret) 146 if (ret)
143 return ret; 147 return ret;
144 148
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
index 9c41b58d57e2..c300b5e7b670 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
@@ -64,27 +64,33 @@ init_exec_force(struct nvbios_init *init, bool exec)
64static inline int 64static inline int
65init_or(struct nvbios_init *init) 65init_or(struct nvbios_init *init)
66{ 66{
67 if (init->outp) 67 if (init_exec(init)) {
68 return ffs(init->outp->or) - 1; 68 if (init->outp)
69 error("script needs OR!!\n"); 69 return ffs(init->outp->or) - 1;
70 error("script needs OR!!\n");
71 }
70 return 0; 72 return 0;
71} 73}
72 74
73static inline int 75static inline int
74init_link(struct nvbios_init *init) 76init_link(struct nvbios_init *init)
75{ 77{
76 if (init->outp) 78 if (init_exec(init)) {
77 return !(init->outp->sorconf.link & 1); 79 if (init->outp)
78 error("script needs OR link\n"); 80 return !(init->outp->sorconf.link & 1);
81 error("script needs OR link\n");
82 }
79 return 0; 83 return 0;
80} 84}
81 85
82static inline int 86static inline int
83init_crtc(struct nvbios_init *init) 87init_crtc(struct nvbios_init *init)
84{ 88{
85 if (init->crtc >= 0) 89 if (init_exec(init)) {
86 return init->crtc; 90 if (init->crtc >= 0)
87 error("script needs crtc\n"); 91 return init->crtc;
92 error("script needs crtc\n");
93 }
88 return 0; 94 return 0;
89} 95}
90 96
@@ -92,16 +98,21 @@ static u8
92init_conn(struct nvbios_init *init) 98init_conn(struct nvbios_init *init)
93{ 99{
94 struct nouveau_bios *bios = init->bios; 100 struct nouveau_bios *bios = init->bios;
101 u8 ver, len;
102 u16 conn;
95 103
96 if (init->outp) { 104 if (init_exec(init)) {
97 u8 ver, len; 105 if (init->outp) {
98 u16 conn = dcb_conn(bios, init->outp->connector, &ver, &len); 106 conn = init->outp->connector;
99 if (conn) 107 conn = dcb_conn(bios, conn, &ver, &len);
100 return nv_ro08(bios, conn); 108 if (conn)
109 return nv_ro08(bios, conn);
110 }
111
112 error("script needs connector type\n");
101 } 113 }
102 114
103 error("script needs connector type\n"); 115 return 0xff;
104 return 0x00;
105} 116}
106 117
107static inline u32 118static inline u32
@@ -227,7 +238,8 @@ init_i2c(struct nvbios_init *init, int index)
227 } else 238 } else
228 if (index < 0) { 239 if (index < 0) {
229 if (!init->outp) { 240 if (!init->outp) {
230 error("script needs output for i2c\n"); 241 if (init_exec(init))
242 error("script needs output for i2c\n");
231 return NULL; 243 return NULL;
232 } 244 }
233 245
@@ -544,7 +556,8 @@ init_tmds_reg(struct nvbios_init *init, u8 tmds)
544 return 0x6808b0 + dacoffset; 556 return 0x6808b0 + dacoffset;
545 } 557 }
546 558
547 error("tmds opcodes need dcb\n"); 559 if (init_exec(init))
560 error("tmds opcodes need dcb\n");
548 } else { 561 } else {
549 if (tmds < ARRAY_SIZE(pramdac_table)) 562 if (tmds < ARRAY_SIZE(pramdac_table))
550 return pramdac_table[tmds]; 563 return pramdac_table[tmds];
@@ -792,7 +805,8 @@ init_dp_condition(struct nvbios_init *init)
792 break; 805 break;
793 } 806 }
794 807
795 warn("script needs dp output table data\n"); 808 if (init_exec(init))
809 warn("script needs dp output table data\n");
796 break; 810 break;
797 case 5: 811 case 5:
798 if (!(init_rdauxr(init, 0x0d) & 1)) 812 if (!(init_rdauxr(init, 0x0d) & 1))
@@ -816,7 +830,7 @@ init_io_mask_or(struct nvbios_init *init)
816 u8 or = init_or(init); 830 u8 or = init_or(init);
817 u8 data; 831 u8 data;
818 832
819 trace("IO_MASK_OR\t0x03d4[0x%02x] &= ~(1 << 0x%02x)", index, or); 833 trace("IO_MASK_OR\t0x03d4[0x%02x] &= ~(1 << 0x%02x)\n", index, or);
820 init->offset += 2; 834 init->offset += 2;
821 835
822 data = init_rdvgai(init, 0x03d4, index); 836 data = init_rdvgai(init, 0x03d4, index);
@@ -835,7 +849,7 @@ init_io_or(struct nvbios_init *init)
835 u8 or = init_or(init); 849 u8 or = init_or(init);
836 u8 data; 850 u8 data;
837 851
838 trace("IO_OR\t0x03d4[0x%02x] |= (1 << 0x%02x)", index, or); 852 trace("IO_OR\t0x03d4[0x%02x] |= (1 << 0x%02x)\n", index, or);
839 init->offset += 2; 853 init->offset += 2;
840 854
841 data = init_rdvgai(init, 0x03d4, index); 855 data = init_rdvgai(init, 0x03d4, index);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
index 7606ed15b6fa..86ad59203c8b 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
@@ -23,6 +23,7 @@
23 */ 23 */
24 24
25#include <subdev/fb.h> 25#include <subdev/fb.h>
26#include <subdev/ltcg.h>
26#include <subdev/bios.h> 27#include <subdev/bios.h>
27 28
28struct nvc0_fb_priv { 29struct nvc0_fb_priv {
@@ -31,34 +32,14 @@ struct nvc0_fb_priv {
31 dma_addr_t r100c10; 32 dma_addr_t r100c10;
32}; 33};
33 34
34/* 0 = unsupported 35extern const u8 nvc0_pte_storage_type_map[256];
35 * 1 = non-compressed 36
36 * 3 = compressed
37 */
38static const u8 types[256] = {
39 1, 1, 3, 3, 3, 3, 0, 3, 3, 3, 3, 0, 0, 0, 0, 0,
40 0, 1, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, 0,
41 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0,
42 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 3,
43 3, 3, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0,
44 0, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
45 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
46 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0,
47 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 1, 1, 1, 1, 0,
48 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
49 0, 0, 0, 3, 3, 3, 3, 1, 1, 1, 1, 0, 0, 0, 0, 0,
50 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3,
51 3, 3, 3, 1, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3,
52 3, 3, 0, 0, 0, 0, 0, 0, 3, 0, 0, 3, 0, 3, 0, 3,
53 3, 0, 3, 3, 3, 3, 3, 0, 0, 3, 0, 3, 0, 3, 3, 0,
54 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 1, 1, 0
55};
56 37
57static bool 38static bool
58nvc0_fb_memtype_valid(struct nouveau_fb *pfb, u32 tile_flags) 39nvc0_fb_memtype_valid(struct nouveau_fb *pfb, u32 tile_flags)
59{ 40{
60 u8 memtype = (tile_flags & 0x0000ff00) >> 8; 41 u8 memtype = (tile_flags & 0x0000ff00) >> 8;
61 return likely((types[memtype] == 1)); 42 return likely((nvc0_pte_storage_type_map[memtype] != 0xff));
62} 43}
63 44
64static int 45static int
@@ -130,6 +111,7 @@ nvc0_fb_vram_new(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin,
130 int type = (memtype & 0x0ff); 111 int type = (memtype & 0x0ff);
131 int back = (memtype & 0x800); 112 int back = (memtype & 0x800);
132 int ret; 113 int ret;
114 const bool comp = nvc0_pte_storage_type_map[type] != type;
133 115
134 size >>= 12; 116 size >>= 12;
135 align >>= 12; 117 align >>= 12;
@@ -142,10 +124,22 @@ nvc0_fb_vram_new(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin,
142 return -ENOMEM; 124 return -ENOMEM;
143 125
144 INIT_LIST_HEAD(&mem->regions); 126 INIT_LIST_HEAD(&mem->regions);
145 mem->memtype = type;
146 mem->size = size; 127 mem->size = size;
147 128
148 mutex_lock(&pfb->base.mutex); 129 mutex_lock(&pfb->base.mutex);
130 if (comp) {
131 struct nouveau_ltcg *ltcg = nouveau_ltcg(pfb->base.base.parent);
132
133 /* compression only works with lpages */
134 if (align == (1 << (17 - 12))) {
135 int n = size >> 5;
136 ltcg->tags_alloc(ltcg, n, &mem->tag);
137 }
138 if (unlikely(!mem->tag))
139 type = nvc0_pte_storage_type_map[type];
140 }
141 mem->memtype = type;
142
149 do { 143 do {
150 if (back) 144 if (back)
151 ret = nouveau_mm_tail(mm, 1, size, ncmin, align, &r); 145 ret = nouveau_mm_tail(mm, 1, size, ncmin, align, &r);
@@ -168,6 +162,17 @@ nvc0_fb_vram_new(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin,
168 return 0; 162 return 0;
169} 163}
170 164
165static void
166nvc0_fb_vram_del(struct nouveau_fb *pfb, struct nouveau_mem **pmem)
167{
168 struct nouveau_ltcg *ltcg = nouveau_ltcg(pfb->base.base.parent);
169
170 if ((*pmem)->tag)
171 ltcg->tags_free(ltcg, &(*pmem)->tag);
172
173 nv50_fb_vram_del(pfb, pmem);
174}
175
171static int 176static int
172nvc0_fb_init(struct nouveau_object *object) 177nvc0_fb_init(struct nouveau_object *object)
173{ 178{
@@ -178,7 +183,8 @@ nvc0_fb_init(struct nouveau_object *object)
178 if (ret) 183 if (ret)
179 return ret; 184 return ret;
180 185
181 nv_wr32(priv, 0x100c10, priv->r100c10 >> 8); 186 if (priv->r100c10_page)
187 nv_wr32(priv, 0x100c10, priv->r100c10 >> 8);
182 return 0; 188 return 0;
183} 189}
184 190
@@ -214,16 +220,16 @@ nvc0_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
214 priv->base.memtype_valid = nvc0_fb_memtype_valid; 220 priv->base.memtype_valid = nvc0_fb_memtype_valid;
215 priv->base.ram.init = nvc0_fb_vram_init; 221 priv->base.ram.init = nvc0_fb_vram_init;
216 priv->base.ram.get = nvc0_fb_vram_new; 222 priv->base.ram.get = nvc0_fb_vram_new;
217 priv->base.ram.put = nv50_fb_vram_del; 223 priv->base.ram.put = nvc0_fb_vram_del;
218 224
219 priv->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO); 225 priv->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
220 if (!priv->r100c10_page) 226 if (priv->r100c10_page) {
221 return -ENOMEM; 227 priv->r100c10 = pci_map_page(device->pdev, priv->r100c10_page,
222 228 0, PAGE_SIZE,
223 priv->r100c10 = pci_map_page(device->pdev, priv->r100c10_page, 0, 229 PCI_DMA_BIDIRECTIONAL);
224 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 230 if (pci_dma_mapping_error(device->pdev, priv->r100c10))
225 if (pci_dma_mapping_error(device->pdev, priv->r100c10)) 231 return -EFAULT;
226 return -EFAULT; 232 }
227 233
228 return nouveau_fb_preinit(&priv->base); 234 return nouveau_fb_preinit(&priv->base);
229} 235}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
index 2e98e8a3f1aa..8ae2625415e1 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
@@ -140,12 +140,8 @@ nouveau_i2c_port_create_(struct nouveau_object *parent,
140 } 140 }
141 141
142 /* drop port's i2c subdev refcount, i2c handles this itself */ 142 /* drop port's i2c subdev refcount, i2c handles this itself */
143 if (ret == 0) { 143 if (ret == 0)
144 list_add_tail(&port->head, &i2c->ports); 144 list_add_tail(&port->head, &i2c->ports);
145 atomic_dec(&parent->refcount);
146 atomic_dec(&engine->refcount);
147 }
148
149 return ret; 145 return ret;
150} 146}
151 147
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c
index f5bbd3834116..795393d7b2f5 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c
@@ -93,7 +93,6 @@ nv04_instmem_alloc(struct nouveau_instmem *imem, struct nouveau_object *parent,
93 u32 size, u32 align, struct nouveau_object **pobject) 93 u32 size, u32 align, struct nouveau_object **pobject)
94{ 94{
95 struct nouveau_object *engine = nv_object(imem); 95 struct nouveau_object *engine = nv_object(imem);
96 struct nv04_instmem_priv *priv = (void *)(imem);
97 int ret; 96 int ret;
98 97
99 ret = nouveau_object_ctor(parent, engine, &nv04_instobj_oclass, 98 ret = nouveau_object_ctor(parent, engine, &nv04_instobj_oclass,
@@ -101,14 +100,6 @@ nv04_instmem_alloc(struct nouveau_instmem *imem, struct nouveau_object *parent,
101 if (ret) 100 if (ret)
102 return ret; 101 return ret;
103 102
104 /* INSTMEM itself creates objects to reserve (and preserve across
105 * suspend/resume) various fixed data locations, each one of these
106 * takes a reference on INSTMEM itself, causing it to never be
107 * freed. We drop all the self-references here to avoid this.
108 */
109 if (unlikely(!priv->created))
110 atomic_dec(&engine->refcount);
111
112 return 0; 103 return 0;
113} 104}
114 105
@@ -134,27 +125,28 @@ nv04_instmem_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
134 return ret; 125 return ret;
135 126
136 /* 0x00000-0x10000: reserve for probable vbios image */ 127 /* 0x00000-0x10000: reserve for probable vbios image */
137 ret = nouveau_gpuobj_new(parent, NULL, 0x10000, 0, 0, &priv->vbios); 128 ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x10000, 0, 0,
129 &priv->vbios);
138 if (ret) 130 if (ret)
139 return ret; 131 return ret;
140 132
141 /* 0x10000-0x18000: reserve for RAMHT */ 133 /* 0x10000-0x18000: reserve for RAMHT */
142 ret = nouveau_ramht_new(parent, NULL, 0x08000, 0, &priv->ramht); 134 ret = nouveau_ramht_new(nv_object(priv), NULL, 0x08000, 0, &priv->ramht);
143 if (ret) 135 if (ret)
144 return ret; 136 return ret;
145 137
146 /* 0x18000-0x18800: reserve for RAMFC (enough for 32 nv30 channels) */ 138 /* 0x18000-0x18800: reserve for RAMFC (enough for 32 nv30 channels) */
147 ret = nouveau_gpuobj_new(parent, NULL, 0x00800, 0, 139 ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x00800, 0,
148 NVOBJ_FLAG_ZERO_ALLOC, &priv->ramfc); 140 NVOBJ_FLAG_ZERO_ALLOC, &priv->ramfc);
149 if (ret) 141 if (ret)
150 return ret; 142 return ret;
151 143
152 /* 0x18800-0x18a00: reserve for RAMRO */ 144 /* 0x18800-0x18a00: reserve for RAMRO */
153 ret = nouveau_gpuobj_new(parent, NULL, 0x00200, 0, 0, &priv->ramro); 145 ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x00200, 0, 0,
146 &priv->ramro);
154 if (ret) 147 if (ret)
155 return ret; 148 return ret;
156 149
157 priv->created = true;
158 return 0; 150 return 0;
159} 151}
160 152
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.h b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.h
index 7983d8d9b358..b15b61310236 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.h
@@ -9,7 +9,6 @@
9 9
10struct nv04_instmem_priv { 10struct nv04_instmem_priv {
11 struct nouveau_instmem base; 11 struct nouveau_instmem base;
12 bool created;
13 12
14 void __iomem *iomem; 13 void __iomem *iomem;
15 struct nouveau_mm heap; 14 struct nouveau_mm heap;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c
index da64253201ef..716bf41bc3c1 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c
@@ -82,31 +82,33 @@ nv40_instmem_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
82 return ret; 82 return ret;
83 83
84 /* 0x00000-0x10000: reserve for probable vbios image */ 84 /* 0x00000-0x10000: reserve for probable vbios image */
85 ret = nouveau_gpuobj_new(parent, NULL, 0x10000, 0, 0, &priv->vbios); 85 ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x10000, 0, 0,
86 &priv->vbios);
86 if (ret) 87 if (ret)
87 return ret; 88 return ret;
88 89
89 /* 0x10000-0x18000: reserve for RAMHT */ 90 /* 0x10000-0x18000: reserve for RAMHT */
90 ret = nouveau_ramht_new(parent, NULL, 0x08000, 0, &priv->ramht); 91 ret = nouveau_ramht_new(nv_object(priv), NULL, 0x08000, 0,
92 &priv->ramht);
91 if (ret) 93 if (ret)
92 return ret; 94 return ret;
93 95
94 /* 0x18000-0x18200: reserve for RAMRO 96 /* 0x18000-0x18200: reserve for RAMRO
95 * 0x18200-0x20000: padding 97 * 0x18200-0x20000: padding
96 */ 98 */
97 ret = nouveau_gpuobj_new(parent, NULL, 0x08000, 0, 0, &priv->ramro); 99 ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x08000, 0, 0,
100 &priv->ramro);
98 if (ret) 101 if (ret)
99 return ret; 102 return ret;
100 103
101 /* 0x20000-0x21000: reserve for RAMFC 104 /* 0x20000-0x21000: reserve for RAMFC
102 * 0x21000-0x40000: padding and some unknown crap 105 * 0x21000-0x40000: padding and some unknown crap
103 */ 106 */
104 ret = nouveau_gpuobj_new(parent, NULL, 0x20000, 0, 107 ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x20000, 0,
105 NVOBJ_FLAG_ZERO_ALLOC, &priv->ramfc); 108 NVOBJ_FLAG_ZERO_ALLOC, &priv->ramfc);
106 if (ret) 109 if (ret)
107 return ret; 110 return ret;
108 111
109 priv->created = true;
110 return 0; 112 return 0;
111} 113}
112 114
diff --git a/drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c
index 078a2b9d6bd6..e4940fb166e8 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c
@@ -23,10 +23,17 @@
23 */ 23 */
24 24
25#include <subdev/ltcg.h> 25#include <subdev/ltcg.h>
26#include <subdev/fb.h>
27#include <subdev/timer.h>
26 28
27struct nvc0_ltcg_priv { 29struct nvc0_ltcg_priv {
28 struct nouveau_ltcg base; 30 struct nouveau_ltcg base;
31 u32 part_nr;
32 u32 part_mask;
29 u32 subp_nr; 33 u32 subp_nr;
34 struct nouveau_mm tags;
35 u32 num_tags;
36 struct nouveau_mm_node *tag_ram;
30}; 37};
31 38
32static void 39static void
@@ -62,11 +69,104 @@ nvc0_ltcg_intr(struct nouveau_subdev *subdev)
62} 69}
63 70
64static int 71static int
72nvc0_ltcg_tags_alloc(struct nouveau_ltcg *ltcg, u32 n,
73 struct nouveau_mm_node **pnode)
74{
75 struct nvc0_ltcg_priv *priv = (struct nvc0_ltcg_priv *)ltcg;
76 int ret;
77
78 ret = nouveau_mm_head(&priv->tags, 1, n, n, 1, pnode);
79 if (ret)
80 *pnode = NULL;
81
82 return ret;
83}
84
85static void
86nvc0_ltcg_tags_free(struct nouveau_ltcg *ltcg, struct nouveau_mm_node **pnode)
87{
88 struct nvc0_ltcg_priv *priv = (struct nvc0_ltcg_priv *)ltcg;
89
90 nouveau_mm_free(&priv->tags, pnode);
91}
92
93static void
94nvc0_ltcg_tags_clear(struct nouveau_ltcg *ltcg, u32 first, u32 count)
95{
96 struct nvc0_ltcg_priv *priv = (struct nvc0_ltcg_priv *)ltcg;
97 u32 last = first + count - 1;
98 int p, i;
99
100 BUG_ON((first > last) || (last >= priv->num_tags));
101
102 nv_wr32(priv, 0x17e8cc, first);
103 nv_wr32(priv, 0x17e8d0, last);
104 nv_wr32(priv, 0x17e8c8, 0x4); /* trigger clear */
105
106 /* wait until it's finished with clearing */
107 for (p = 0; p < priv->part_nr; ++p) {
108 if (!(priv->part_mask & (1 << p)))
109 continue;
110 for (i = 0; i < priv->subp_nr; ++i)
111 nv_wait(priv, 0x1410c8 + p * 0x2000 + i * 0x400, ~0, 0);
112 }
113}
114
115/* TODO: Figure out tag memory details and drop the over-cautious allocation.
116 */
117static int
118nvc0_ltcg_init_tag_ram(struct nouveau_fb *pfb, struct nvc0_ltcg_priv *priv)
119{
120 u32 tag_size, tag_margin, tag_align;
121 int ret;
122
123 nv_wr32(priv, 0x17e8d8, priv->part_nr);
124
125 /* tags for 1/4 of VRAM should be enough (8192/4 per GiB of VRAM) */
126 priv->num_tags = (pfb->ram.size >> 17) / 4;
127 if (priv->num_tags > (1 << 17))
128 priv->num_tags = 1 << 17; /* we have 17 bits in PTE */
129 priv->num_tags = (priv->num_tags + 63) & ~63; /* round up to 64 */
130
131 tag_align = priv->part_nr * 0x800;
132 tag_margin = (tag_align < 0x6000) ? 0x6000 : tag_align;
133
134 /* 4 part 4 sub: 0x2000 bytes for 56 tags */
135 /* 3 part 4 sub: 0x6000 bytes for 168 tags */
136 /*
137 * About 147 bytes per tag. Let's be safe and allocate x2, which makes
138 * 0x4980 bytes for 64 tags, and round up to 0x6000 bytes for 64 tags.
139 *
140 * For 4 GiB of memory we'll have 8192 tags which makes 3 MiB, < 0.1 %.
141 */
142 tag_size = (priv->num_tags / 64) * 0x6000 + tag_margin;
143 tag_size += tag_align;
144 tag_size = (tag_size + 0xfff) >> 12; /* round up */
145
146 ret = nouveau_mm_tail(&pfb->vram, 0, tag_size, tag_size, 1,
147 &priv->tag_ram);
148 if (ret) {
149 priv->num_tags = 0;
150 } else {
151 u64 tag_base = (priv->tag_ram->offset << 12) + tag_margin;
152
153 tag_base += tag_align - 1;
154 ret = do_div(tag_base, tag_align);
155
156 nv_wr32(priv, 0x17e8d4, tag_base);
157 }
158 ret = nouveau_mm_init(&priv->tags, 0, priv->num_tags, 1);
159
160 return ret;
161}
162
163static int
65nvc0_ltcg_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 164nvc0_ltcg_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
66 struct nouveau_oclass *oclass, void *data, u32 size, 165 struct nouveau_oclass *oclass, void *data, u32 size,
67 struct nouveau_object **pobject) 166 struct nouveau_object **pobject)
68{ 167{
69 struct nvc0_ltcg_priv *priv; 168 struct nvc0_ltcg_priv *priv;
169 struct nouveau_fb *pfb = nouveau_fb(parent);
70 int ret; 170 int ret;
71 171
72 ret = nouveau_ltcg_create(parent, engine, oclass, &priv); 172 ret = nouveau_ltcg_create(parent, engine, oclass, &priv);
@@ -74,19 +174,44 @@ nvc0_ltcg_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
74 if (ret) 174 if (ret)
75 return ret; 175 return ret;
76 176
77 priv->subp_nr = nv_rd32(priv, 0x17e8dc) >> 24; 177 priv->part_nr = nv_rd32(priv, 0x022438);
178 priv->part_mask = nv_rd32(priv, 0x022554);
179
180 priv->subp_nr = nv_rd32(priv, 0x17e8dc) >> 28;
181
78 nv_mask(priv, 0x17e820, 0x00100000, 0x00000000); /* INTR_EN &= ~0x10 */ 182 nv_mask(priv, 0x17e820, 0x00100000, 0x00000000); /* INTR_EN &= ~0x10 */
79 183
184 ret = nvc0_ltcg_init_tag_ram(pfb, priv);
185 if (ret)
186 return ret;
187
188 priv->base.tags_alloc = nvc0_ltcg_tags_alloc;
189 priv->base.tags_free = nvc0_ltcg_tags_free;
190 priv->base.tags_clear = nvc0_ltcg_tags_clear;
191
80 nv_subdev(priv)->intr = nvc0_ltcg_intr; 192 nv_subdev(priv)->intr = nvc0_ltcg_intr;
81 return 0; 193 return 0;
82} 194}
83 195
196static void
197nvc0_ltcg_dtor(struct nouveau_object *object)
198{
199 struct nouveau_ltcg *ltcg = (struct nouveau_ltcg *)object;
200 struct nvc0_ltcg_priv *priv = (struct nvc0_ltcg_priv *)ltcg;
201 struct nouveau_fb *pfb = nouveau_fb(ltcg->base.base.parent);
202
203 nouveau_mm_fini(&priv->tags);
204 nouveau_mm_free(&pfb->vram, &priv->tag_ram);
205
206 nouveau_ltcg_destroy(ltcg);
207}
208
84struct nouveau_oclass 209struct nouveau_oclass
85nvc0_ltcg_oclass = { 210nvc0_ltcg_oclass = {
86 .handle = NV_SUBDEV(LTCG, 0xc0), 211 .handle = NV_SUBDEV(LTCG, 0xc0),
87 .ofuncs = &(struct nouveau_ofuncs) { 212 .ofuncs = &(struct nouveau_ofuncs) {
88 .ctor = nvc0_ltcg_ctor, 213 .ctor = nvc0_ltcg_ctor,
89 .dtor = _nouveau_ltcg_dtor, 214 .dtor = nvc0_ltcg_dtor,
90 .init = _nouveau_ltcg_init, 215 .init = _nouveau_ltcg_init,
91 .fini = _nouveau_ltcg_fini, 216 .fini = _nouveau_ltcg_fini,
92 }, 217 },
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
index 8379aafa6e1b..1c0330b8c9a4 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
@@ -24,10 +24,10 @@
24 24
25#include <subdev/mc.h> 25#include <subdev/mc.h>
26 26
27void 27static irqreturn_t
28nouveau_mc_intr(struct nouveau_subdev *subdev) 28nouveau_mc_intr(int irq, void *arg)
29{ 29{
30 struct nouveau_mc *pmc = nouveau_mc(subdev); 30 struct nouveau_mc *pmc = arg;
31 const struct nouveau_mc_intr *map = pmc->intr_map; 31 const struct nouveau_mc_intr *map = pmc->intr_map;
32 struct nouveau_subdev *unit; 32 struct nouveau_subdev *unit;
33 u32 stat, intr; 33 u32 stat, intr;
@@ -35,7 +35,7 @@ nouveau_mc_intr(struct nouveau_subdev *subdev)
35 intr = stat = nv_rd32(pmc, 0x000100); 35 intr = stat = nv_rd32(pmc, 0x000100);
36 while (stat && map->stat) { 36 while (stat && map->stat) {
37 if (stat & map->stat) { 37 if (stat & map->stat) {
38 unit = nouveau_subdev(subdev, map->unit); 38 unit = nouveau_subdev(pmc, map->unit);
39 if (unit && unit->intr) 39 if (unit && unit->intr)
40 unit->intr(unit); 40 unit->intr(unit);
41 intr &= ~map->stat; 41 intr &= ~map->stat;
@@ -46,4 +46,56 @@ nouveau_mc_intr(struct nouveau_subdev *subdev)
46 if (intr) { 46 if (intr) {
47 nv_error(pmc, "unknown intr 0x%08x\n", stat); 47 nv_error(pmc, "unknown intr 0x%08x\n", stat);
48 } 48 }
49
50 return stat ? IRQ_HANDLED : IRQ_NONE;
51}
52
53int
54_nouveau_mc_fini(struct nouveau_object *object, bool suspend)
55{
56 struct nouveau_mc *pmc = (void *)object;
57 nv_wr32(pmc, 0x000140, 0x00000000);
58 return nouveau_subdev_fini(&pmc->base, suspend);
59}
60
61int
62_nouveau_mc_init(struct nouveau_object *object)
63{
64 struct nouveau_mc *pmc = (void *)object;
65 int ret = nouveau_subdev_init(&pmc->base);
66 if (ret)
67 return ret;
68 nv_wr32(pmc, 0x000140, 0x00000001);
69 return 0;
70}
71
72void
73_nouveau_mc_dtor(struct nouveau_object *object)
74{
75 struct nouveau_device *device = nv_device(object);
76 struct nouveau_mc *pmc = (void *)object;
77 free_irq(device->pdev->irq, pmc);
78 nouveau_subdev_destroy(&pmc->base);
79}
80
81int
82nouveau_mc_create_(struct nouveau_object *parent, struct nouveau_object *engine,
83 struct nouveau_oclass *oclass, int length, void **pobject)
84{
85 struct nouveau_device *device = nv_device(parent);
86 struct nouveau_mc *pmc;
87 int ret;
88
89 ret = nouveau_subdev_create_(parent, engine, oclass, 0, "PMC",
90 "master", length, pobject);
91 pmc = *pobject;
92 if (ret)
93 return ret;
94
95 ret = request_irq(device->pdev->irq, nouveau_mc_intr,
96 IRQF_SHARED, "nouveau", pmc);
97 if (ret < 0)
98 return ret;
99
100 return 0;
49} 101}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c
index 89da8fa7ea0f..8c769715227b 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c
@@ -55,7 +55,6 @@ nv04_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
55 if (ret) 55 if (ret)
56 return ret; 56 return ret;
57 57
58 nv_subdev(priv)->intr = nouveau_mc_intr;
59 priv->base.intr_map = nv04_mc_intr; 58 priv->base.intr_map = nv04_mc_intr;
60 return 0; 59 return 0;
61} 60}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c
index 397d868359ad..51919371810f 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c
@@ -41,7 +41,6 @@ nv44_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
41 if (ret) 41 if (ret)
42 return ret; 42 return ret;
43 43
44 nv_subdev(priv)->intr = nouveau_mc_intr;
45 priv->base.intr_map = nv04_mc_intr; 44 priv->base.intr_map = nv04_mc_intr;
46 return 0; 45 return 0;
47} 46}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
index 5965add6daee..d796924f9930 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
@@ -57,7 +57,6 @@ nv50_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
57 if (ret) 57 if (ret)
58 return ret; 58 return ret;
59 59
60 nv_subdev(priv)->intr = nouveau_mc_intr;
61 priv->base.intr_map = nv50_mc_intr; 60 priv->base.intr_map = nv50_mc_intr;
62 return 0; 61 return 0;
63} 62}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
index 3a80b29dce0f..e82fd21b5041 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
@@ -59,7 +59,6 @@ nv98_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
59 if (ret) 59 if (ret)
60 return ret; 60 return ret;
61 61
62 nv_subdev(priv)->intr = nouveau_mc_intr;
63 priv->base.intr_map = nv98_mc_intr; 62 priv->base.intr_map = nv98_mc_intr;
64 return 0; 63 return 0;
65} 64}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
index 42bbf72023a8..737bd4b682e1 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
@@ -61,7 +61,6 @@ nvc0_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
61 if (ret) 61 if (ret)
62 return ret; 62 return ret;
63 63
64 nv_subdev(priv)->intr = nouveau_mc_intr;
65 priv->base.intr_map = nvc0_mc_intr; 64 priv->base.intr_map = nvc0_mc_intr;
66 return 0; 65 return 0;
67} 66}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/therm/nv40.c
index a70d1b7e397b..002e51b3af93 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/nv40.c
@@ -165,7 +165,7 @@ nv40_fan_pwm_set(struct nouveau_therm *therm, int line, u32 divs, u32 duty)
165 return 0; 165 return 0;
166} 166}
167 167
168static void 168void
169nv40_therm_intr(struct nouveau_subdev *subdev) 169nv40_therm_intr(struct nouveau_subdev *subdev)
170{ 170{
171 struct nouveau_therm *therm = nouveau_therm(subdev); 171 struct nouveau_therm *therm = nouveau_therm(subdev);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/therm/nv50.c
index 86632cbd65ce..8cf7597a2182 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/nv50.c
@@ -118,145 +118,36 @@ nv50_fan_pwm_clock(struct nouveau_therm *therm)
118 return pwm_clock; 118 return pwm_clock;
119} 119}
120 120
121int
122nv50_temp_get(struct nouveau_therm *therm)
123{
124 return nv_rd32(therm, 0x20400);
125}
126
127static void
128nv50_therm_program_alarms(struct nouveau_therm *therm)
129{
130 struct nouveau_therm_priv *priv = (void *)therm;
131 struct nvbios_therm_sensor *sensor = &priv->bios_sensor;
132 unsigned long flags;
133
134 spin_lock_irqsave(&priv->sensor.alarm_program_lock, flags);
135
136 /* enable RISING and FALLING IRQs for shutdown, THRS 0, 1, 2 and 4 */
137 nv_wr32(therm, 0x20000, 0x000003ff);
138
139 /* shutdown: The computer should be shutdown when reached */
140 nv_wr32(therm, 0x20484, sensor->thrs_shutdown.hysteresis);
141 nv_wr32(therm, 0x20480, sensor->thrs_shutdown.temp);
142
143 /* THRS_1 : fan boost*/
144 nv_wr32(therm, 0x204c4, sensor->thrs_fan_boost.temp);
145
146 /* THRS_2 : critical */
147 nv_wr32(therm, 0x204c0, sensor->thrs_critical.temp);
148
149 /* THRS_4 : down clock */
150 nv_wr32(therm, 0x20414, sensor->thrs_down_clock.temp);
151 spin_unlock_irqrestore(&priv->sensor.alarm_program_lock, flags);
152
153 nv_info(therm,
154 "Programmed thresholds [ %d(%d), %d(%d), %d(%d), %d(%d) ]\n",
155 sensor->thrs_fan_boost.temp, sensor->thrs_fan_boost.hysteresis,
156 sensor->thrs_down_clock.temp,
157 sensor->thrs_down_clock.hysteresis,
158 sensor->thrs_critical.temp, sensor->thrs_critical.hysteresis,
159 sensor->thrs_shutdown.temp, sensor->thrs_shutdown.hysteresis);
160
161}
162
163/* must be called with alarm_program_lock taken ! */
164static void 121static void
165nv50_therm_threshold_hyst_emulation(struct nouveau_therm *therm, 122nv50_sensor_setup(struct nouveau_therm *therm)
166 uint32_t thrs_reg, u8 status_bit,
167 const struct nvbios_therm_threshold *thrs,
168 enum nouveau_therm_thrs thrs_name)
169{ 123{
170 enum nouveau_therm_thrs_direction direction; 124 nv_mask(therm, 0x20010, 0x40000000, 0x0);
171 enum nouveau_therm_thrs_state prev_state, new_state; 125 mdelay(20); /* wait for the temperature to stabilize */
172 int temp, cur;
173
174 prev_state = nouveau_therm_sensor_get_threshold_state(therm, thrs_name);
175 temp = nv_rd32(therm, thrs_reg);
176
177 /* program the next threshold */
178 if (temp == thrs->temp) {
179 nv_wr32(therm, thrs_reg, thrs->temp - thrs->hysteresis);
180 new_state = NOUVEAU_THERM_THRS_HIGHER;
181 } else {
182 nv_wr32(therm, thrs_reg, thrs->temp);
183 new_state = NOUVEAU_THERM_THRS_LOWER;
184 }
185
186 /* fix the state (in case someone reprogrammed the alarms) */
187 cur = therm->temp_get(therm);
188 if (new_state == NOUVEAU_THERM_THRS_LOWER && cur > thrs->temp)
189 new_state = NOUVEAU_THERM_THRS_HIGHER;
190 else if (new_state == NOUVEAU_THERM_THRS_HIGHER &&
191 cur < thrs->temp - thrs->hysteresis)
192 new_state = NOUVEAU_THERM_THRS_LOWER;
193 nouveau_therm_sensor_set_threshold_state(therm, thrs_name, new_state);
194
195 /* find the direction */
196 if (prev_state < new_state)
197 direction = NOUVEAU_THERM_THRS_RISING;
198 else if (prev_state > new_state)
199 direction = NOUVEAU_THERM_THRS_FALLING;
200 else
201 return;
202
203 /* advertise a change in direction */
204 nouveau_therm_sensor_event(therm, thrs_name, direction);
205} 126}
206 127
207static void 128static int
208nv50_therm_intr(struct nouveau_subdev *subdev) 129nv50_temp_get(struct nouveau_therm *therm)
209{ 130{
210 struct nouveau_therm *therm = nouveau_therm(subdev);
211 struct nouveau_therm_priv *priv = (void *)therm; 131 struct nouveau_therm_priv *priv = (void *)therm;
212 struct nvbios_therm_sensor *sensor = &priv->bios_sensor; 132 struct nvbios_therm_sensor *sensor = &priv->bios_sensor;
213 unsigned long flags; 133 int core_temp;
214 uint32_t intr;
215
216 spin_lock_irqsave(&priv->sensor.alarm_program_lock, flags);
217
218 intr = nv_rd32(therm, 0x20100);
219
220 /* THRS_4: downclock */
221 if (intr & 0x002) {
222 nv50_therm_threshold_hyst_emulation(therm, 0x20414, 24,
223 &sensor->thrs_down_clock,
224 NOUVEAU_THERM_THRS_DOWNCLOCK);
225 intr &= ~0x002;
226 }
227 134
228 /* shutdown */ 135 core_temp = nv_rd32(therm, 0x20014) & 0x3fff;
229 if (intr & 0x004) {
230 nv50_therm_threshold_hyst_emulation(therm, 0x20480, 20,
231 &sensor->thrs_shutdown,
232 NOUVEAU_THERM_THRS_SHUTDOWN);
233 intr &= ~0x004;
234 }
235
236 /* THRS_1 : fan boost */
237 if (intr & 0x008) {
238 nv50_therm_threshold_hyst_emulation(therm, 0x204c4, 21,
239 &sensor->thrs_fan_boost,
240 NOUVEAU_THERM_THRS_FANBOOST);
241 intr &= ~0x008;
242 }
243 136
244 /* THRS_2 : critical */ 137 /* if the slope or the offset is unset, do no use the sensor */
245 if (intr & 0x010) { 138 if (!sensor->slope_div || !sensor->slope_mult ||
246 nv50_therm_threshold_hyst_emulation(therm, 0x204c0, 22, 139 !sensor->offset_num || !sensor->offset_den)
247 &sensor->thrs_critical, 140 return -ENODEV;
248 NOUVEAU_THERM_THRS_CRITICAL);
249 intr &= ~0x010;
250 }
251 141
252 if (intr) 142 core_temp = core_temp * sensor->slope_mult / sensor->slope_div;
253 nv_error(therm, "unhandled intr 0x%08x\n", intr); 143 core_temp = core_temp + sensor->offset_num / sensor->offset_den;
144 core_temp = core_temp + sensor->offset_constant - 8;
254 145
255 /* ACK everything */ 146 /* reserve negative temperatures for errors */
256 nv_wr32(therm, 0x20100, 0xffffffff); 147 if (core_temp < 0)
257 nv_wr32(therm, 0x1100, 0x10000); /* PBUS */ 148 core_temp = 0;
258 149
259 spin_unlock_irqrestore(&priv->sensor.alarm_program_lock, flags); 150 return core_temp;
260} 151}
261 152
262static int 153static int
@@ -278,33 +169,29 @@ nv50_therm_ctor(struct nouveau_object *parent,
278 priv->base.base.pwm_set = nv50_fan_pwm_set; 169 priv->base.base.pwm_set = nv50_fan_pwm_set;
279 priv->base.base.pwm_clock = nv50_fan_pwm_clock; 170 priv->base.base.pwm_clock = nv50_fan_pwm_clock;
280 priv->base.base.temp_get = nv50_temp_get; 171 priv->base.base.temp_get = nv50_temp_get;
281 priv->base.sensor.program_alarms = nv50_therm_program_alarms; 172 priv->base.sensor.program_alarms = nouveau_therm_program_alarms_polling;
282 nv_subdev(priv)->intr = nv50_therm_intr; 173 nv_subdev(priv)->intr = nv40_therm_intr;
283
284 /* init the thresholds */
285 nouveau_therm_sensor_set_threshold_state(&priv->base.base,
286 NOUVEAU_THERM_THRS_SHUTDOWN,
287 NOUVEAU_THERM_THRS_LOWER);
288 nouveau_therm_sensor_set_threshold_state(&priv->base.base,
289 NOUVEAU_THERM_THRS_FANBOOST,
290 NOUVEAU_THERM_THRS_LOWER);
291 nouveau_therm_sensor_set_threshold_state(&priv->base.base,
292 NOUVEAU_THERM_THRS_CRITICAL,
293 NOUVEAU_THERM_THRS_LOWER);
294 nouveau_therm_sensor_set_threshold_state(&priv->base.base,
295 NOUVEAU_THERM_THRS_DOWNCLOCK,
296 NOUVEAU_THERM_THRS_LOWER);
297 174
298 return nouveau_therm_preinit(&priv->base.base); 175 return nouveau_therm_preinit(&priv->base.base);
299} 176}
300 177
178static int
179nv50_therm_init(struct nouveau_object *object)
180{
181 struct nouveau_therm *therm = (void *)object;
182
183 nv50_sensor_setup(therm);
184
185 return _nouveau_therm_init(object);
186}
187
301struct nouveau_oclass 188struct nouveau_oclass
302nv50_therm_oclass = { 189nv50_therm_oclass = {
303 .handle = NV_SUBDEV(THERM, 0x50), 190 .handle = NV_SUBDEV(THERM, 0x50),
304 .ofuncs = &(struct nouveau_ofuncs) { 191 .ofuncs = &(struct nouveau_ofuncs) {
305 .ctor = nv50_therm_ctor, 192 .ctor = nv50_therm_ctor,
306 .dtor = _nouveau_therm_dtor, 193 .dtor = _nouveau_therm_dtor,
307 .init = _nouveau_therm_init, 194 .init = nv50_therm_init,
308 .fini = _nouveau_therm_fini, 195 .fini = _nouveau_therm_fini,
309 }, 196 },
310}; 197};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/nv84.c b/drivers/gpu/drm/nouveau/core/subdev/therm/nv84.c
new file mode 100644
index 000000000000..42ba633ccff7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/nv84.c
@@ -0,0 +1,221 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 * Martin Peres
24 */
25
26#include "priv.h"
27
28struct nv84_therm_priv {
29 struct nouveau_therm_priv base;
30};
31
32int
33nv84_temp_get(struct nouveau_therm *therm)
34{
35 return nv_rd32(therm, 0x20400);
36}
37
38static void
39nv84_therm_program_alarms(struct nouveau_therm *therm)
40{
41 struct nouveau_therm_priv *priv = (void *)therm;
42 struct nvbios_therm_sensor *sensor = &priv->bios_sensor;
43 unsigned long flags;
44
45 spin_lock_irqsave(&priv->sensor.alarm_program_lock, flags);
46
47 /* enable RISING and FALLING IRQs for shutdown, THRS 0, 1, 2 and 4 */
48 nv_wr32(therm, 0x20000, 0x000003ff);
49
50 /* shutdown: The computer should be shutdown when reached */
51 nv_wr32(therm, 0x20484, sensor->thrs_shutdown.hysteresis);
52 nv_wr32(therm, 0x20480, sensor->thrs_shutdown.temp);
53
54 /* THRS_1 : fan boost*/
55 nv_wr32(therm, 0x204c4, sensor->thrs_fan_boost.temp);
56
57 /* THRS_2 : critical */
58 nv_wr32(therm, 0x204c0, sensor->thrs_critical.temp);
59
60 /* THRS_4 : down clock */
61 nv_wr32(therm, 0x20414, sensor->thrs_down_clock.temp);
62 spin_unlock_irqrestore(&priv->sensor.alarm_program_lock, flags);
63
64 nv_debug(therm,
65 "Programmed thresholds [ %d(%d), %d(%d), %d(%d), %d(%d) ]\n",
66 sensor->thrs_fan_boost.temp, sensor->thrs_fan_boost.hysteresis,
67 sensor->thrs_down_clock.temp,
68 sensor->thrs_down_clock.hysteresis,
69 sensor->thrs_critical.temp, sensor->thrs_critical.hysteresis,
70 sensor->thrs_shutdown.temp, sensor->thrs_shutdown.hysteresis);
71
72}
73
74/* must be called with alarm_program_lock taken ! */
75static void
76nv84_therm_threshold_hyst_emulation(struct nouveau_therm *therm,
77 uint32_t thrs_reg, u8 status_bit,
78 const struct nvbios_therm_threshold *thrs,
79 enum nouveau_therm_thrs thrs_name)
80{
81 enum nouveau_therm_thrs_direction direction;
82 enum nouveau_therm_thrs_state prev_state, new_state;
83 int temp, cur;
84
85 prev_state = nouveau_therm_sensor_get_threshold_state(therm, thrs_name);
86 temp = nv_rd32(therm, thrs_reg);
87
88 /* program the next threshold */
89 if (temp == thrs->temp) {
90 nv_wr32(therm, thrs_reg, thrs->temp - thrs->hysteresis);
91 new_state = NOUVEAU_THERM_THRS_HIGHER;
92 } else {
93 nv_wr32(therm, thrs_reg, thrs->temp);
94 new_state = NOUVEAU_THERM_THRS_LOWER;
95 }
96
97 /* fix the state (in case someone reprogrammed the alarms) */
98 cur = therm->temp_get(therm);
99 if (new_state == NOUVEAU_THERM_THRS_LOWER && cur > thrs->temp)
100 new_state = NOUVEAU_THERM_THRS_HIGHER;
101 else if (new_state == NOUVEAU_THERM_THRS_HIGHER &&
102 cur < thrs->temp - thrs->hysteresis)
103 new_state = NOUVEAU_THERM_THRS_LOWER;
104 nouveau_therm_sensor_set_threshold_state(therm, thrs_name, new_state);
105
106 /* find the direction */
107 if (prev_state < new_state)
108 direction = NOUVEAU_THERM_THRS_RISING;
109 else if (prev_state > new_state)
110 direction = NOUVEAU_THERM_THRS_FALLING;
111 else
112 return;
113
114 /* advertise a change in direction */
115 nouveau_therm_sensor_event(therm, thrs_name, direction);
116}
117
118static void
119nv84_therm_intr(struct nouveau_subdev *subdev)
120{
121 struct nouveau_therm *therm = nouveau_therm(subdev);
122 struct nouveau_therm_priv *priv = (void *)therm;
123 struct nvbios_therm_sensor *sensor = &priv->bios_sensor;
124 unsigned long flags;
125 uint32_t intr;
126
127 spin_lock_irqsave(&priv->sensor.alarm_program_lock, flags);
128
129 intr = nv_rd32(therm, 0x20100);
130
131 /* THRS_4: downclock */
132 if (intr & 0x002) {
133 nv84_therm_threshold_hyst_emulation(therm, 0x20414, 24,
134 &sensor->thrs_down_clock,
135 NOUVEAU_THERM_THRS_DOWNCLOCK);
136 intr &= ~0x002;
137 }
138
139 /* shutdown */
140 if (intr & 0x004) {
141 nv84_therm_threshold_hyst_emulation(therm, 0x20480, 20,
142 &sensor->thrs_shutdown,
143 NOUVEAU_THERM_THRS_SHUTDOWN);
144 intr &= ~0x004;
145 }
146
147 /* THRS_1 : fan boost */
148 if (intr & 0x008) {
149 nv84_therm_threshold_hyst_emulation(therm, 0x204c4, 21,
150 &sensor->thrs_fan_boost,
151 NOUVEAU_THERM_THRS_FANBOOST);
152 intr &= ~0x008;
153 }
154
155 /* THRS_2 : critical */
156 if (intr & 0x010) {
157 nv84_therm_threshold_hyst_emulation(therm, 0x204c0, 22,
158 &sensor->thrs_critical,
159 NOUVEAU_THERM_THRS_CRITICAL);
160 intr &= ~0x010;
161 }
162
163 if (intr)
164 nv_error(therm, "unhandled intr 0x%08x\n", intr);
165
166 /* ACK everything */
167 nv_wr32(therm, 0x20100, 0xffffffff);
168 nv_wr32(therm, 0x1100, 0x10000); /* PBUS */
169
170 spin_unlock_irqrestore(&priv->sensor.alarm_program_lock, flags);
171}
172
173static int
174nv84_therm_ctor(struct nouveau_object *parent,
175 struct nouveau_object *engine,
176 struct nouveau_oclass *oclass, void *data, u32 size,
177 struct nouveau_object **pobject)
178{
179 struct nv84_therm_priv *priv;
180 int ret;
181
182 ret = nouveau_therm_create(parent, engine, oclass, &priv);
183 *pobject = nv_object(priv);
184 if (ret)
185 return ret;
186
187 priv->base.base.pwm_ctrl = nv50_fan_pwm_ctrl;
188 priv->base.base.pwm_get = nv50_fan_pwm_get;
189 priv->base.base.pwm_set = nv50_fan_pwm_set;
190 priv->base.base.pwm_clock = nv50_fan_pwm_clock;
191 priv->base.base.temp_get = nv84_temp_get;
192 priv->base.sensor.program_alarms = nv84_therm_program_alarms;
193 nv_subdev(priv)->intr = nv84_therm_intr;
194
195 /* init the thresholds */
196 nouveau_therm_sensor_set_threshold_state(&priv->base.base,
197 NOUVEAU_THERM_THRS_SHUTDOWN,
198 NOUVEAU_THERM_THRS_LOWER);
199 nouveau_therm_sensor_set_threshold_state(&priv->base.base,
200 NOUVEAU_THERM_THRS_FANBOOST,
201 NOUVEAU_THERM_THRS_LOWER);
202 nouveau_therm_sensor_set_threshold_state(&priv->base.base,
203 NOUVEAU_THERM_THRS_CRITICAL,
204 NOUVEAU_THERM_THRS_LOWER);
205 nouveau_therm_sensor_set_threshold_state(&priv->base.base,
206 NOUVEAU_THERM_THRS_DOWNCLOCK,
207 NOUVEAU_THERM_THRS_LOWER);
208
209 return nouveau_therm_preinit(&priv->base.base);
210}
211
212struct nouveau_oclass
213nv84_therm_oclass = {
214 .handle = NV_SUBDEV(THERM, 0x84),
215 .ofuncs = &(struct nouveau_ofuncs) {
216 .ctor = nv84_therm_ctor,
217 .dtor = _nouveau_therm_dtor,
218 .init = _nouveau_therm_init,
219 .fini = _nouveau_therm_fini,
220 },
221};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/nva3.c b/drivers/gpu/drm/nouveau/core/subdev/therm/nva3.c
index 2dcc5437116a..d11a7c400813 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/nva3.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/nva3.c
@@ -81,7 +81,7 @@ nva3_therm_ctor(struct nouveau_object *parent,
81 priv->base.base.pwm_get = nv50_fan_pwm_get; 81 priv->base.base.pwm_get = nv50_fan_pwm_get;
82 priv->base.base.pwm_set = nv50_fan_pwm_set; 82 priv->base.base.pwm_set = nv50_fan_pwm_set;
83 priv->base.base.pwm_clock = nv50_fan_pwm_clock; 83 priv->base.base.pwm_clock = nv50_fan_pwm_clock;
84 priv->base.base.temp_get = nv50_temp_get; 84 priv->base.base.temp_get = nv84_temp_get;
85 priv->base.base.fan_sense = nva3_therm_fan_sense; 85 priv->base.base.fan_sense = nva3_therm_fan_sense;
86 priv->base.sensor.program_alarms = nouveau_therm_program_alarms_polling; 86 priv->base.sensor.program_alarms = nouveau_therm_program_alarms_polling;
87 return nouveau_therm_preinit(&priv->base.base); 87 return nouveau_therm_preinit(&priv->base.base);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c b/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c
index d7d30ee8332e..54c28bdc4204 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c
@@ -135,7 +135,7 @@ nvd0_therm_ctor(struct nouveau_object *parent,
135 priv->base.base.pwm_get = nvd0_fan_pwm_get; 135 priv->base.base.pwm_get = nvd0_fan_pwm_get;
136 priv->base.base.pwm_set = nvd0_fan_pwm_set; 136 priv->base.base.pwm_set = nvd0_fan_pwm_set;
137 priv->base.base.pwm_clock = nvd0_fan_pwm_clock; 137 priv->base.base.pwm_clock = nvd0_fan_pwm_clock;
138 priv->base.base.temp_get = nv50_temp_get; 138 priv->base.base.temp_get = nv84_temp_get;
139 priv->base.base.fan_sense = nva3_therm_fan_sense; 139 priv->base.base.fan_sense = nva3_therm_fan_sense;
140 priv->base.sensor.program_alarms = nouveau_therm_program_alarms_polling; 140 priv->base.sensor.program_alarms = nouveau_therm_program_alarms_polling;
141 return nouveau_therm_preinit(&priv->base.base); 141 return nouveau_therm_preinit(&priv->base.base);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h b/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h
index 438d9824b774..15ca64e481f1 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h
@@ -134,11 +134,12 @@ void nouveau_therm_sensor_event(struct nouveau_therm *therm,
134 enum nouveau_therm_thrs_direction dir); 134 enum nouveau_therm_thrs_direction dir);
135void nouveau_therm_program_alarms_polling(struct nouveau_therm *therm); 135void nouveau_therm_program_alarms_polling(struct nouveau_therm *therm);
136 136
137void nv40_therm_intr(struct nouveau_subdev *);
137int nv50_fan_pwm_ctrl(struct nouveau_therm *, int, bool); 138int nv50_fan_pwm_ctrl(struct nouveau_therm *, int, bool);
138int nv50_fan_pwm_get(struct nouveau_therm *, int, u32 *, u32 *); 139int nv50_fan_pwm_get(struct nouveau_therm *, int, u32 *, u32 *);
139int nv50_fan_pwm_set(struct nouveau_therm *, int, u32, u32); 140int nv50_fan_pwm_set(struct nouveau_therm *, int, u32, u32);
140int nv50_fan_pwm_clock(struct nouveau_therm *); 141int nv50_fan_pwm_clock(struct nouveau_therm *);
141int nv50_temp_get(struct nouveau_therm *therm); 142int nv84_temp_get(struct nouveau_therm *therm);
142 143
143int nva3_therm_fan_sense(struct nouveau_therm *); 144int nva3_therm_fan_sense(struct nouveau_therm *);
144 145
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c b/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c
index 470f6a47b656..dde746c78c8a 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c
@@ -205,13 +205,13 @@ nouveau_therm_program_alarms_polling(struct nouveau_therm *therm)
205 struct nouveau_therm_priv *priv = (void *)therm; 205 struct nouveau_therm_priv *priv = (void *)therm;
206 struct nvbios_therm_sensor *sensor = &priv->bios_sensor; 206 struct nvbios_therm_sensor *sensor = &priv->bios_sensor;
207 207
208 nv_info(therm, 208 nv_debug(therm,
209 "programmed thresholds [ %d(%d), %d(%d), %d(%d), %d(%d) ]\n", 209 "programmed thresholds [ %d(%d), %d(%d), %d(%d), %d(%d) ]\n",
210 sensor->thrs_fan_boost.temp, sensor->thrs_fan_boost.hysteresis, 210 sensor->thrs_fan_boost.temp, sensor->thrs_fan_boost.hysteresis,
211 sensor->thrs_down_clock.temp, 211 sensor->thrs_down_clock.temp,
212 sensor->thrs_down_clock.hysteresis, 212 sensor->thrs_down_clock.hysteresis,
213 sensor->thrs_critical.temp, sensor->thrs_critical.hysteresis, 213 sensor->thrs_critical.temp, sensor->thrs_critical.hysteresis,
214 sensor->thrs_shutdown.temp, sensor->thrs_shutdown.hysteresis); 214 sensor->thrs_shutdown.temp, sensor->thrs_shutdown.hysteresis);
215 215
216 alarm_timer_callback(&priv->sensor.therm_poll_alarm); 216 alarm_timer_callback(&priv->sensor.therm_poll_alarm);
217} 217}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c
index 8e1bae4f12e8..9469b8275675 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c
@@ -96,11 +96,16 @@ nv04_timer_alarm(struct nouveau_timer *ptimer, u64 time,
96 96
97 /* append new alarm to list, in soonest-alarm-first order */ 97 /* append new alarm to list, in soonest-alarm-first order */
98 spin_lock_irqsave(&priv->lock, flags); 98 spin_lock_irqsave(&priv->lock, flags);
99 list_for_each_entry(list, &priv->alarms, head) { 99 if (!time) {
100 if (list->timestamp > alarm->timestamp) 100 if (!list_empty(&alarm->head))
101 break; 101 list_del(&alarm->head);
102 } else {
103 list_for_each_entry(list, &priv->alarms, head) {
104 if (list->timestamp > alarm->timestamp)
105 break;
106 }
107 list_add_tail(&alarm->head, &list->head);
102 } 108 }
103 list_add_tail(&alarm->head, &list->head);
104 spin_unlock_irqrestore(&priv->lock, flags); 109 spin_unlock_irqrestore(&priv->lock, flags);
105 110
106 /* process pending alarms */ 111 /* process pending alarms */
diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/vm/nv04.c
index 6adbbc9cc361..ed45437167f2 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/vm/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/vm/nv04.c
@@ -110,7 +110,7 @@ nv04_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
110 if (ret) 110 if (ret)
111 return ret; 111 return ret;
112 112
113 ret = nouveau_gpuobj_new(parent, NULL, 113 ret = nouveau_gpuobj_new(nv_object(priv), NULL,
114 (NV04_PDMA_SIZE / NV04_PDMA_PAGE) * 4 + 114 (NV04_PDMA_SIZE / NV04_PDMA_PAGE) * 4 +
115 8, 16, NVOBJ_FLAG_ZERO_ALLOC, 115 8, 16, NVOBJ_FLAG_ZERO_ALLOC,
116 &priv->vm->pgt[0].obj[0]); 116 &priv->vm->pgt[0].obj[0]);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/nv41.c b/drivers/gpu/drm/nouveau/core/subdev/vm/nv41.c
index 9474cfca6e4c..064c76262876 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/vm/nv41.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/vm/nv41.c
@@ -119,7 +119,7 @@ nv41_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
119 if (ret) 119 if (ret)
120 return ret; 120 return ret;
121 121
122 ret = nouveau_gpuobj_new(parent, NULL, 122 ret = nouveau_gpuobj_new(nv_object(priv), NULL,
123 (NV41_GART_SIZE / NV41_GART_PAGE) * 4, 123 (NV41_GART_SIZE / NV41_GART_PAGE) * 4,
124 16, NVOBJ_FLAG_ZERO_ALLOC, 124 16, NVOBJ_FLAG_ZERO_ALLOC,
125 &priv->vm->pgt[0].obj[0]); 125 &priv->vm->pgt[0].obj[0]);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/nv44.c b/drivers/gpu/drm/nouveau/core/subdev/vm/nv44.c
index aa8131436e3d..fae1f67d5948 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/vm/nv44.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/vm/nv44.c
@@ -196,7 +196,7 @@ nv44_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
196 if (ret) 196 if (ret)
197 return ret; 197 return ret;
198 198
199 ret = nouveau_gpuobj_new(parent, NULL, 199 ret = nouveau_gpuobj_new(nv_object(priv), NULL,
200 (NV44_GART_SIZE / NV44_GART_PAGE) * 4, 200 (NV44_GART_SIZE / NV44_GART_PAGE) * 4,
201 512 * 1024, NVOBJ_FLAG_ZERO_ALLOC, 201 512 * 1024, NVOBJ_FLAG_ZERO_ALLOC,
202 &priv->vm->pgt[0].obj[0]); 202 &priv->vm->pgt[0].obj[0]);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/vm/nvc0.c
index 30c61e6c2017..4c3b0a23b9d6 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/vm/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/vm/nvc0.c
@@ -28,12 +28,54 @@
28#include <subdev/timer.h> 28#include <subdev/timer.h>
29#include <subdev/fb.h> 29#include <subdev/fb.h>
30#include <subdev/vm.h> 30#include <subdev/vm.h>
31#include <subdev/ltcg.h>
31 32
32struct nvc0_vmmgr_priv { 33struct nvc0_vmmgr_priv {
33 struct nouveau_vmmgr base; 34 struct nouveau_vmmgr base;
34 spinlock_t lock; 35 spinlock_t lock;
35}; 36};
36 37
38
39/* Map from compressed to corresponding uncompressed storage type.
40 * The value 0xff represents an invalid storage type.
41 */
42const u8 nvc0_pte_storage_type_map[256] =
43{
44 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0xff, 0x01, /* 0x00 */
45 0x01, 0x01, 0x01, 0xff, 0xff, 0xff, 0xff, 0xff,
46 0xff, 0x11, 0xff, 0xff, 0xff, 0xff, 0xff, 0x11, /* 0x10 */
47 0x11, 0x11, 0x11, 0xff, 0xff, 0xff, 0xff, 0xff,
48 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x26, 0x27, /* 0x20 */
49 0x28, 0x29, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
50 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30 */
51 0xff, 0xff, 0x26, 0x27, 0x28, 0x29, 0x26, 0x27,
52 0x28, 0x29, 0xff, 0xff, 0xff, 0xff, 0x46, 0xff, /* 0x40 */
53 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
54 0xff, 0x46, 0x46, 0x46, 0x46, 0xff, 0xff, 0xff, /* 0x50 */
55 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
56 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60 */
57 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
58 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70 */
59 0xff, 0xff, 0xff, 0x7b, 0xff, 0xff, 0xff, 0xff,
60 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7b, 0x7b, /* 0x80 */
61 0x7b, 0x7b, 0xff, 0x8b, 0x8c, 0x8d, 0x8e, 0xff,
62 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90 */
63 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
64 0xff, 0xff, 0xff, 0x8b, 0x8c, 0x8d, 0x8e, 0xa7, /* 0xa0 */
65 0xa8, 0xa9, 0xaa, 0xff, 0xff, 0xff, 0xff, 0xff,
66 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0 */
67 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xa7,
68 0xa8, 0xa9, 0xaa, 0xc3, 0xff, 0xff, 0xff, 0xff, /* 0xc0 */
69 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xc3, 0xc3,
70 0xc3, 0xc3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0 */
71 0xfe, 0xff, 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe,
72 0xfe, 0xff, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xff, /* 0xe0 */
73 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe, 0xfe, 0xff,
74 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xf0 */
75 0xfe, 0xfe, 0xfe, 0xfe, 0xff, 0xfd, 0xfe, 0xff
76};
77
78
37static void 79static void
38nvc0_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 index, 80nvc0_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 index,
39 struct nouveau_gpuobj *pgt[2]) 81 struct nouveau_gpuobj *pgt[2])
@@ -68,10 +110,20 @@ static void
68nvc0_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt, 110nvc0_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
69 struct nouveau_mem *mem, u32 pte, u32 cnt, u64 phys, u64 delta) 111 struct nouveau_mem *mem, u32 pte, u32 cnt, u64 phys, u64 delta)
70{ 112{
71 u32 next = 1 << (vma->node->type - 8); 113 u64 next = 1 << (vma->node->type - 8);
72 114
73 phys = nvc0_vm_addr(vma, phys, mem->memtype, 0); 115 phys = nvc0_vm_addr(vma, phys, mem->memtype, 0);
74 pte <<= 3; 116 pte <<= 3;
117
118 if (mem->tag) {
119 struct nouveau_ltcg *ltcg =
120 nouveau_ltcg(vma->vm->vmm->base.base.parent);
121 u32 tag = mem->tag->offset + (delta >> 17);
122 phys |= (u64)tag << (32 + 12);
123 next |= (u64)1 << (32 + 12);
124 ltcg->tags_clear(ltcg, tag, cnt);
125 }
126
75 while (cnt--) { 127 while (cnt--) {
76 nv_wo32(pgt, pte + 0, lower_32_bits(phys)); 128 nv_wo32(pgt, pte + 0, lower_32_bits(phys));
77 nv_wo32(pgt, pte + 4, upper_32_bits(phys)); 129 nv_wo32(pgt, pte + 4, upper_32_bits(phys));
@@ -85,10 +137,12 @@ nvc0_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
85 struct nouveau_mem *mem, u32 pte, u32 cnt, dma_addr_t *list) 137 struct nouveau_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
86{ 138{
87 u32 target = (vma->access & NV_MEM_ACCESS_NOSNOOP) ? 7 : 5; 139 u32 target = (vma->access & NV_MEM_ACCESS_NOSNOOP) ? 7 : 5;
140 /* compressed storage types are invalid for system memory */
141 u32 memtype = nvc0_pte_storage_type_map[mem->memtype & 0xff];
88 142
89 pte <<= 3; 143 pte <<= 3;
90 while (cnt--) { 144 while (cnt--) {
91 u64 phys = nvc0_vm_addr(vma, *list++, mem->memtype, target); 145 u64 phys = nvc0_vm_addr(vma, *list++, memtype, target);
92 nv_wo32(pgt, pte + 0, lower_32_bits(phys)); 146 nv_wo32(pgt, pte + 0, lower_32_bits(phys));
93 nv_wo32(pgt, pte + 4, upper_32_bits(phys)); 147 nv_wo32(pgt, pte + 4, upper_32_bits(phys));
94 pte += 8; 148 pte += 8;
diff --git a/drivers/gpu/drm/nouveau/dispnv04/Makefile b/drivers/gpu/drm/nouveau/dispnv04/Makefile
new file mode 100644
index 000000000000..ea3f5b8a0f95
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv04/Makefile
@@ -0,0 +1,10 @@
1nouveau-y += dispnv04/arb.o
2nouveau-y += dispnv04/crtc.o
3nouveau-y += dispnv04/cursor.o
4nouveau-y += dispnv04/dac.o
5nouveau-y += dispnv04/dfp.o
6nouveau-y += dispnv04/disp.o
7nouveau-y += dispnv04/hw.o
8nouveau-y += dispnv04/tvmodesnv17.o
9nouveau-y += dispnv04/tvnv04.o
10nouveau-y += dispnv04/tvnv17.o
diff --git a/drivers/gpu/drm/nouveau/nouveau_calc.c b/drivers/gpu/drm/nouveau/dispnv04/arb.c
index 6da576445b3d..2e70462883e8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_calc.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/arb.c
@@ -25,7 +25,7 @@
25 25
26#include "nouveau_drm.h" 26#include "nouveau_drm.h"
27#include "nouveau_reg.h" 27#include "nouveau_reg.h"
28#include "nouveau_hw.h" 28#include "hw.h"
29 29
30/****************************************************************************\ 30/****************************************************************************\
31* * 31* *
diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
index 6578cd28c556..0782bd2f1e04 100644
--- a/drivers/gpu/drm/nouveau/nv04_crtc.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
@@ -33,10 +33,10 @@
33#include "nouveau_encoder.h" 33#include "nouveau_encoder.h"
34#include "nouveau_connector.h" 34#include "nouveau_connector.h"
35#include "nouveau_crtc.h" 35#include "nouveau_crtc.h"
36#include "nouveau_hw.h" 36#include "hw.h"
37#include "nvreg.h" 37#include "nvreg.h"
38#include "nouveau_fbcon.h" 38#include "nouveau_fbcon.h"
39#include "nv04_display.h" 39#include "disp.h"
40 40
41#include <subdev/bios/pll.h> 41#include <subdev/bios/pll.h>
42#include <subdev/clock.h> 42#include <subdev/clock.h>
@@ -1070,4 +1070,3 @@ nv04_crtc_create(struct drm_device *dev, int crtc_num)
1070 1070
1071 return 0; 1071 return 0;
1072} 1072}
1073
diff --git a/drivers/gpu/drm/nouveau/nv04_cursor.c b/drivers/gpu/drm/nouveau/dispnv04/cursor.c
index fe86f0de348f..a810303169de 100644
--- a/drivers/gpu/drm/nouveau/nv04_cursor.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/cursor.c
@@ -3,7 +3,7 @@
3#include "nouveau_drm.h" 3#include "nouveau_drm.h"
4#include "nouveau_reg.h" 4#include "nouveau_reg.h"
5#include "nouveau_crtc.h" 5#include "nouveau_crtc.h"
6#include "nouveau_hw.h" 6#include "hw.h"
7 7
8static void 8static void
9nv04_cursor_show(struct nouveau_crtc *nv_crtc, bool update) 9nv04_cursor_show(struct nouveau_crtc *nv_crtc, bool update)
@@ -68,4 +68,3 @@ nv04_cursor_init(struct nouveau_crtc *crtc)
68 crtc->cursor.show = nv04_cursor_show; 68 crtc->cursor.show = nv04_cursor_show;
69 return 0; 69 return 0;
70} 70}
71
diff --git a/drivers/gpu/drm/nouveau/nv04_dac.c b/drivers/gpu/drm/nouveau/dispnv04/dac.c
index 64f7020fb605..434b920f6bd4 100644
--- a/drivers/gpu/drm/nouveau/nv04_dac.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/dac.c
@@ -31,7 +31,7 @@
31#include "nouveau_encoder.h" 31#include "nouveau_encoder.h"
32#include "nouveau_connector.h" 32#include "nouveau_connector.h"
33#include "nouveau_crtc.h" 33#include "nouveau_crtc.h"
34#include "nouveau_hw.h" 34#include "hw.h"
35#include "nvreg.h" 35#include "nvreg.h"
36 36
37#include <subdev/bios/gpio.h> 37#include <subdev/bios/gpio.h>
diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
index 7e24cdf1cb39..93dd23ff0093 100644
--- a/drivers/gpu/drm/nouveau/nv04_dfp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
@@ -32,7 +32,7 @@
32#include "nouveau_encoder.h" 32#include "nouveau_encoder.h"
33#include "nouveau_connector.h" 33#include "nouveau_connector.h"
34#include "nouveau_crtc.h" 34#include "nouveau_crtc.h"
35#include "nouveau_hw.h" 35#include "hw.h"
36#include "nvreg.h" 36#include "nvreg.h"
37 37
38#include <drm/i2c/sil164.h> 38#include <drm/i2c/sil164.h>
diff --git a/drivers/gpu/drm/nouveau/nv04_display.c b/drivers/gpu/drm/nouveau/dispnv04/disp.c
index ad48444c385c..4908d3fd0486 100644
--- a/drivers/gpu/drm/nouveau/nv04_display.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c
@@ -30,7 +30,7 @@
30 30
31#include "nouveau_drm.h" 31#include "nouveau_drm.h"
32#include "nouveau_reg.h" 32#include "nouveau_reg.h"
33#include "nouveau_hw.h" 33#include "hw.h"
34#include "nouveau_encoder.h" 34#include "nouveau_encoder.h"
35#include "nouveau_connector.h" 35#include "nouveau_connector.h"
36 36
diff --git a/drivers/gpu/drm/nouveau/nv04_display.h b/drivers/gpu/drm/nouveau/dispnv04/disp.h
index a0a031dad13f..a0a031dad13f 100644
--- a/drivers/gpu/drm/nouveau/nv04_display.h
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.h
diff --git a/drivers/gpu/drm/nouveau/nouveau_hw.c b/drivers/gpu/drm/nouveau/dispnv04/hw.c
index 617a06ffdb46..973056b86207 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hw.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/hw.c
@@ -24,7 +24,7 @@
24 24
25#include <drm/drmP.h> 25#include <drm/drmP.h>
26#include "nouveau_drm.h" 26#include "nouveau_drm.h"
27#include "nouveau_hw.h" 27#include "hw.h"
28 28
29#include <subdev/bios/pll.h> 29#include <subdev/bios/pll.h>
30#include <subdev/clock.h> 30#include <subdev/clock.h>
diff --git a/drivers/gpu/drm/nouveau/nouveau_hw.h b/drivers/gpu/drm/nouveau/dispnv04/hw.h
index 7dff1021fab4..eeb70d912d99 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hw.h
+++ b/drivers/gpu/drm/nouveau/dispnv04/hw.h
@@ -24,7 +24,8 @@
24#define __NOUVEAU_HW_H__ 24#define __NOUVEAU_HW_H__
25 25
26#include <drm/drmP.h> 26#include <drm/drmP.h>
27#include "nv04_display.h" 27#include "disp.h"
28#include "nvreg.h"
28 29
29#include <subdev/bios/pll.h> 30#include <subdev/bios/pll.h>
30 31
diff --git a/drivers/gpu/drm/nouveau/nvreg.h b/drivers/gpu/drm/nouveau/dispnv04/nvreg.h
index bbfb1a68fb11..bbfb1a68fb11 100644
--- a/drivers/gpu/drm/nouveau/nvreg.h
+++ b/drivers/gpu/drm/nouveau/dispnv04/nvreg.h
diff --git a/drivers/gpu/drm/nouveau/nv17_tv_modes.c b/drivers/gpu/drm/nouveau/dispnv04/tvmodesnv17.c
index 1cdfe2a5875d..08c6f5e50610 100644
--- a/drivers/gpu/drm/nouveau/nv17_tv_modes.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvmodesnv17.c
@@ -29,8 +29,8 @@
29#include "nouveau_drm.h" 29#include "nouveau_drm.h"
30#include "nouveau_encoder.h" 30#include "nouveau_encoder.h"
31#include "nouveau_crtc.h" 31#include "nouveau_crtc.h"
32#include "nouveau_hw.h" 32#include "hw.h"
33#include "nv17_tv.h" 33#include "tvnv17.h"
34 34
35char *nv17_tv_norm_names[NUM_TV_NORMS] = { 35char *nv17_tv_norm_names[NUM_TV_NORMS] = {
36 [TV_NORM_PAL] = "PAL", 36 [TV_NORM_PAL] = "PAL",
diff --git a/drivers/gpu/drm/nouveau/nv04_tv.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
index 4a69ccdef9b4..bf13db4e8631 100644
--- a/drivers/gpu/drm/nouveau/nv04_tv.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
@@ -30,7 +30,7 @@
30#include "nouveau_encoder.h" 30#include "nouveau_encoder.h"
31#include "nouveau_connector.h" 31#include "nouveau_connector.h"
32#include "nouveau_crtc.h" 32#include "nouveau_crtc.h"
33#include "nouveau_hw.h" 33#include "hw.h"
34#include <drm/drm_crtc_helper.h> 34#include <drm/drm_crtc_helper.h>
35 35
36#include <drm/i2c/ch7006.h> 36#include <drm/i2c/ch7006.h>
diff --git a/drivers/gpu/drm/nouveau/nv17_tv.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
index 977e42be2050..acef48f4a4ea 100644
--- a/drivers/gpu/drm/nouveau/nv17_tv.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
@@ -31,8 +31,8 @@
31#include "nouveau_encoder.h" 31#include "nouveau_encoder.h"
32#include "nouveau_connector.h" 32#include "nouveau_connector.h"
33#include "nouveau_crtc.h" 33#include "nouveau_crtc.h"
34#include "nouveau_hw.h" 34#include "hw.h"
35#include "nv17_tv.h" 35#include "tvnv17.h"
36 36
37#include <core/device.h> 37#include <core/device.h>
38 38
diff --git a/drivers/gpu/drm/nouveau/nv17_tv.h b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.h
index 7b331543a41b..7b331543a41b 100644
--- a/drivers/gpu/drm/nouveau/nv17_tv.h
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.h
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index 5eb3e0da7c6e..1c4c6c9161ac 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -30,6 +30,7 @@
30#include <subdev/fb.h> 30#include <subdev/fb.h>
31#include <subdev/timer.h> 31#include <subdev/timer.h>
32#include <subdev/instmem.h> 32#include <subdev/instmem.h>
33#include <engine/graph.h>
33 34
34#include "nouveau_drm.h" 35#include "nouveau_drm.h"
35#include "nouveau_dma.h" 36#include "nouveau_dma.h"
@@ -168,6 +169,7 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
168 struct nouveau_drm *drm = nouveau_drm(dev); 169 struct nouveau_drm *drm = nouveau_drm(dev);
169 struct nouveau_device *device = nv_device(drm->device); 170 struct nouveau_device *device = nv_device(drm->device);
170 struct nouveau_timer *ptimer = nouveau_timer(device); 171 struct nouveau_timer *ptimer = nouveau_timer(device);
172 struct nouveau_graph *graph = (void *)nouveau_engine(device, NVDEV_ENGINE_GR);
171 struct drm_nouveau_getparam *getparam = data; 173 struct drm_nouveau_getparam *getparam = data;
172 174
173 switch (getparam->param) { 175 switch (getparam->param) {
@@ -208,14 +210,8 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
208 getparam->value = 1; 210 getparam->value = 1;
209 break; 211 break;
210 case NOUVEAU_GETPARAM_GRAPH_UNITS: 212 case NOUVEAU_GETPARAM_GRAPH_UNITS:
211 /* NV40 and NV50 versions are quite different, but register 213 getparam->value = graph->units ? graph->units(graph) : 0;
212 * address is the same. User is supposed to know the card 214 break;
213 * family anyway... */
214 if (device->chipset >= 0x40) {
215 getparam->value = nv_rd32(device, 0x001540);
216 break;
217 }
218 /* FALLTHRU */
219 default: 215 default:
220 nv_debug(device, "unknown parameter %lld\n", getparam->param); 216 nv_debug(device, "unknown parameter %lld\n", getparam->param);
221 return -EINVAL; 217 return -EINVAL;
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
index 5d940302d2aa..2ffad2176b7f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_backlight.c
+++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
@@ -239,6 +239,9 @@ nouveau_backlight_init(struct drm_device *dev)
239 case NV_40: 239 case NV_40:
240 return nv40_backlight_init(connector); 240 return nv40_backlight_init(connector);
241 case NV_50: 241 case NV_50:
242 case NV_C0:
243 case NV_D0:
244 case NV_E0:
242 return nv50_backlight_init(connector); 245 return nv50_backlight_init(connector);
243 default: 246 default:
244 break; 247 break;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 50a6dd02f7c5..6aa2137e093a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -28,7 +28,7 @@
28 28
29#include "nouveau_drm.h" 29#include "nouveau_drm.h"
30#include "nouveau_reg.h" 30#include "nouveau_reg.h"
31#include "nouveau_hw.h" 31#include "dispnv04/hw.h"
32#include "nouveau_encoder.h" 32#include "nouveau_encoder.h"
33 33
34#include <linux/io-mapping.h> 34#include <linux/io-mapping.h>
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h
index 7ccd28f11adf..0067586eb015 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.h
@@ -24,8 +24,6 @@
24#ifndef __NOUVEAU_DISPBIOS_H__ 24#ifndef __NOUVEAU_DISPBIOS_H__
25#define __NOUVEAU_DISPBIOS_H__ 25#define __NOUVEAU_DISPBIOS_H__
26 26
27#include "nvreg.h"
28
29#define DCB_MAX_NUM_ENTRIES 16 27#define DCB_MAX_NUM_ENTRIES 16
30#define DCB_MAX_NUM_I2C_ENTRIES 16 28#define DCB_MAX_NUM_I2C_ENTRIES 16
31#define DCB_MAX_NUM_GPIO_ENTRIES 32 29#define DCB_MAX_NUM_GPIO_ENTRIES 32
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 4dd7ae2ac6c6..4da776f344d7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -32,7 +32,7 @@
32 32
33#include "nouveau_reg.h" 33#include "nouveau_reg.h"
34#include "nouveau_drm.h" 34#include "nouveau_drm.h"
35#include "nouveau_hw.h" 35#include "dispnv04/hw.h"
36#include "nouveau_acpi.h" 36#include "nouveau_acpi.h"
37 37
38#include "nouveau_display.h" 38#include "nouveau_display.h"
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 4610c3a29bbe..7bf22d4a3d96 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -28,7 +28,7 @@
28#include <drm/drm_crtc_helper.h> 28#include <drm/drm_crtc_helper.h>
29 29
30#include "nouveau_fbcon.h" 30#include "nouveau_fbcon.h"
31#include "nouveau_hw.h" 31#include "dispnv04/hw.h"
32#include "nouveau_crtc.h" 32#include "nouveau_crtc.h"
33#include "nouveau_dma.h" 33#include "nouveau_dma.h"
34#include "nouveau_gem.h" 34#include "nouveau_gem.h"
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index c95decf543e9..46c152ff0a80 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -31,13 +31,13 @@
31#include <core/gpuobj.h> 31#include <core/gpuobj.h>
32#include <core/class.h> 32#include <core/class.h>
33 33
34#include <subdev/device.h> 34#include <engine/device.h>
35#include <subdev/vm.h>
36
37#include <engine/disp.h> 35#include <engine/disp.h>
36#include <engine/fifo.h>
37
38#include <subdev/vm.h>
38 39
39#include "nouveau_drm.h" 40#include "nouveau_drm.h"
40#include "nouveau_irq.h"
41#include "nouveau_dma.h" 41#include "nouveau_dma.h"
42#include "nouveau_ttm.h" 42#include "nouveau_ttm.h"
43#include "nouveau_gem.h" 43#include "nouveau_gem.h"
@@ -165,7 +165,7 @@ nouveau_accel_init(struct nouveau_drm *drm)
165 u32 arg0, arg1; 165 u32 arg0, arg1;
166 int ret; 166 int ret;
167 167
168 if (nouveau_noaccel) 168 if (nouveau_noaccel || !nouveau_fifo(device) /*XXX*/)
169 return; 169 return;
170 170
171 /* initialise synchronisation routines */ 171 /* initialise synchronisation routines */
@@ -365,10 +365,6 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
365 if (ret) 365 if (ret)
366 goto fail_bios; 366 goto fail_bios;
367 367
368 ret = nouveau_irq_init(dev);
369 if (ret)
370 goto fail_irq;
371
372 ret = nouveau_display_create(dev); 368 ret = nouveau_display_create(dev);
373 if (ret) 369 if (ret)
374 goto fail_dispctor; 370 goto fail_dispctor;
@@ -388,8 +384,6 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
388fail_dispinit: 384fail_dispinit:
389 nouveau_display_destroy(dev); 385 nouveau_display_destroy(dev);
390fail_dispctor: 386fail_dispctor:
391 nouveau_irq_fini(dev);
392fail_irq:
393 nouveau_bios_takedown(dev); 387 nouveau_bios_takedown(dev);
394fail_bios: 388fail_bios:
395 nouveau_ttm_fini(drm); 389 nouveau_ttm_fini(drm);
@@ -415,7 +409,6 @@ nouveau_drm_unload(struct drm_device *dev)
415 nouveau_display_fini(dev); 409 nouveau_display_fini(dev);
416 nouveau_display_destroy(dev); 410 nouveau_display_destroy(dev);
417 411
418 nouveau_irq_fini(dev);
419 nouveau_bios_takedown(dev); 412 nouveau_bios_takedown(dev);
420 413
421 nouveau_ttm_fini(drm); 414 nouveau_ttm_fini(drm);
@@ -533,7 +526,6 @@ nouveau_do_resume(struct drm_device *dev)
533 nouveau_fence(drm)->resume(drm); 526 nouveau_fence(drm)->resume(drm);
534 527
535 nouveau_run_vbios_init(dev); 528 nouveau_run_vbios_init(dev);
536 nouveau_irq_postinstall(dev);
537 nouveau_pm_resume(dev); 529 nouveau_pm_resume(dev);
538 530
539 if (dev->mode_config.num_crtc) { 531 if (dev->mode_config.num_crtc) {
@@ -669,8 +661,7 @@ static struct drm_driver
669driver = { 661driver = {
670 .driver_features = 662 .driver_features =
671 DRIVER_USE_AGP | DRIVER_PCI_DMA | DRIVER_SG | 663 DRIVER_USE_AGP | DRIVER_PCI_DMA | DRIVER_SG |
672 DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | 664 DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME,
673 DRIVER_MODESET | DRIVER_PRIME,
674 665
675 .load = nouveau_drm_load, 666 .load = nouveau_drm_load,
676 .unload = nouveau_drm_unload, 667 .unload = nouveau_drm_unload,
@@ -684,11 +675,6 @@ driver = {
684 .debugfs_cleanup = nouveau_debugfs_takedown, 675 .debugfs_cleanup = nouveau_debugfs_takedown,
685#endif 676#endif
686 677
687 .irq_preinstall = nouveau_irq_preinstall,
688 .irq_postinstall = nouveau_irq_postinstall,
689 .irq_uninstall = nouveau_irq_uninstall,
690 .irq_handler = nouveau_irq_handler,
691
692 .get_vblank_counter = drm_vblank_count, 678 .get_vblank_counter = drm_vblank_count,
693 .enable_vblank = nouveau_drm_vblank_enable, 679 .enable_vblank = nouveau_drm_vblank_enable,
694 .disable_vblank = nouveau_drm_vblank_disable, 680 .disable_vblank = nouveau_drm_vblank_disable,
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
index 9c39bafbef2c..f2b30f89dee0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
@@ -10,7 +10,18 @@
10 10
11#define DRIVER_MAJOR 1 11#define DRIVER_MAJOR 1
12#define DRIVER_MINOR 1 12#define DRIVER_MINOR 1
13#define DRIVER_PATCHLEVEL 0 13#define DRIVER_PATCHLEVEL 1
14
15/*
16 * 1.1.1:
17 * - added support for tiled system memory buffer objects
18 * - added support for NOUVEAU_GETPARAM_GRAPH_UNITS on [nvc0,nve0].
19 * - added support for compressed memory storage types on [nvc0,nve0].
20 * - added support for software methods 0x600,0x644,0x6ac on nvc0
21 * to control registers on the MPs to enable performance counters,
22 * and to control the warp error enable mask (OpenGL requires out of
23 * bounds access to local memory to be silently ignored / return 0).
24 */
14 25
15#include <core/client.h> 26#include <core/client.h>
16#include <core/event.h> 27#include <core/event.h>
diff --git a/drivers/gpu/drm/nouveau/nouveau_encoder.h b/drivers/gpu/drm/nouveau/nouveau_encoder.h
index e24341229d5e..24660c0f713d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_encoder.h
+++ b/drivers/gpu/drm/nouveau/nouveau_encoder.h
@@ -30,7 +30,7 @@
30#include <subdev/bios/dcb.h> 30#include <subdev/bios/dcb.h>
31 31
32#include <drm/drm_encoder_slave.h> 32#include <drm/drm_encoder_slave.h>
33#include "nv04_display.h" 33#include "dispnv04/disp.h"
34 34
35#define NV_DPMS_CLEARED 0x80 35#define NV_DPMS_CLEARED 0x80
36 36
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c
deleted file mode 100644
index 1303680affd3..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_irq.c
+++ /dev/null
@@ -1,76 +0,0 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/mc.h>
26
27#include "nouveau_drm.h"
28#include "nouveau_irq.h"
29#include "nv50_display.h"
30
31void
32nouveau_irq_preinstall(struct drm_device *dev)
33{
34 nv_wr32(nouveau_dev(dev), 0x000140, 0x00000000);
35}
36
37int
38nouveau_irq_postinstall(struct drm_device *dev)
39{
40 nv_wr32(nouveau_dev(dev), 0x000140, 0x00000001);
41 return 0;
42}
43
44void
45nouveau_irq_uninstall(struct drm_device *dev)
46{
47 nv_wr32(nouveau_dev(dev), 0x000140, 0x00000000);
48}
49
50irqreturn_t
51nouveau_irq_handler(DRM_IRQ_ARGS)
52{
53 struct drm_device *dev = arg;
54 struct nouveau_device *device = nouveau_dev(dev);
55 struct nouveau_mc *pmc = nouveau_mc(device);
56 u32 stat;
57
58 stat = nv_rd32(device, 0x000100);
59 if (stat == 0 || stat == ~0)
60 return IRQ_NONE;
61
62 nv_subdev(pmc)->intr(nv_subdev(pmc));
63 return IRQ_HANDLED;
64}
65
66int
67nouveau_irq_init(struct drm_device *dev)
68{
69 return drm_irq_install(dev);
70}
71
72void
73nouveau_irq_fini(struct drm_device *dev)
74{
75 drm_irq_uninstall(dev);
76}
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.h b/drivers/gpu/drm/nouveau/nouveau_irq.h
deleted file mode 100644
index 06714ad857bb..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_irq.h
+++ /dev/null
@@ -1,11 +0,0 @@
1#ifndef __NOUVEAU_IRQ_H__
2#define __NOUVEAU_IRQ_H__
3
4extern int nouveau_irq_init(struct drm_device *);
5extern void nouveau_irq_fini(struct drm_device *);
6extern irqreturn_t nouveau_irq_handler(DRM_IRQ_ARGS);
7extern void nouveau_irq_preinstall(struct drm_device *);
8extern int nouveau_irq_postinstall(struct drm_device *);
9extern void nouveau_irq_uninstall(struct drm_device *);
10
11#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index 9be9cb58e19b..f19a15a3bc03 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -35,14 +35,16 @@
35static int 35static int
36nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize) 36nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
37{ 37{
38 /* nothing to do */ 38 struct nouveau_drm *drm = nouveau_bdev(man->bdev);
39 struct nouveau_fb *pfb = nouveau_fb(drm->device);
40 man->priv = pfb;
39 return 0; 41 return 0;
40} 42}
41 43
42static int 44static int
43nouveau_vram_manager_fini(struct ttm_mem_type_manager *man) 45nouveau_vram_manager_fini(struct ttm_mem_type_manager *man)
44{ 46{
45 /* nothing to do */ 47 man->priv = NULL;
46 return 0; 48 return 0;
47} 49}
48 50
@@ -104,7 +106,8 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
104static void 106static void
105nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix) 107nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
106{ 108{
107 struct nouveau_mm *mm = man->priv; 109 struct nouveau_fb *pfb = man->priv;
110 struct nouveau_mm *mm = &pfb->vram;
108 struct nouveau_mm_node *r; 111 struct nouveau_mm_node *r;
109 u32 total = 0, free = 0; 112 u32 total = 0, free = 0;
110 113
@@ -161,6 +164,8 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
161 struct ttm_placement *placement, 164 struct ttm_placement *placement,
162 struct ttm_mem_reg *mem) 165 struct ttm_mem_reg *mem)
163{ 166{
167 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
168 struct nouveau_bo *nvbo = nouveau_bo(bo);
164 struct nouveau_mem *node; 169 struct nouveau_mem *node;
165 170
166 if (unlikely((mem->num_pages << PAGE_SHIFT) >= 512 * 1024 * 1024)) 171 if (unlikely((mem->num_pages << PAGE_SHIFT) >= 512 * 1024 * 1024))
@@ -171,6 +176,20 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
171 return -ENOMEM; 176 return -ENOMEM;
172 node->page_shift = 12; 177 node->page_shift = 12;
173 178
179 switch (nv_device(drm->device)->card_type) {
180 case NV_50:
181 if (nv_device(drm->device)->chipset != 0x50)
182 node->memtype = (nvbo->tile_flags & 0x7f00) >> 8;
183 break;
184 case NV_C0:
185 case NV_D0:
186 case NV_E0:
187 node->memtype = (nvbo->tile_flags & 0xff00) >> 8;
188 break;
189 default:
190 break;
191 }
192
174 mem->mm_node = node; 193 mem->mm_node = node;
175 mem->start = 0; 194 mem->start = 0;
176 return 0; 195 return 0;
diff --git a/drivers/gpu/drm/nouveau/nv04_pm.c b/drivers/gpu/drm/nouveau/nv04_pm.c
index 2a0cc9d0614a..27afc0ea28b0 100644
--- a/drivers/gpu/drm/nouveau/nv04_pm.c
+++ b/drivers/gpu/drm/nouveau/nv04_pm.c
@@ -25,7 +25,7 @@
25#include <drm/drmP.h> 25#include <drm/drmP.h>
26#include "nouveau_drm.h" 26#include "nouveau_drm.h"
27#include "nouveau_reg.h" 27#include "nouveau_reg.h"
28#include "nouveau_hw.h" 28#include "dispnv04/hw.h"
29#include "nouveau_pm.h" 29#include "nouveau_pm.h"
30 30
31#include <subdev/bios/pll.h> 31#include <subdev/bios/pll.h>
diff --git a/drivers/gpu/drm/nouveau/nv40_pm.c b/drivers/gpu/drm/nouveau/nv40_pm.c
index 3382064c7f33..3af5bcd0b203 100644
--- a/drivers/gpu/drm/nouveau/nv40_pm.c
+++ b/drivers/gpu/drm/nouveau/nv40_pm.c
@@ -26,7 +26,7 @@
26#include "nouveau_drm.h" 26#include "nouveau_drm.h"
27#include "nouveau_bios.h" 27#include "nouveau_bios.h"
28#include "nouveau_pm.h" 28#include "nouveau_pm.h"
29#include "nouveau_hw.h" 29#include "dispnv04/hw.h"
30 30
31#include <subdev/bios/pll.h> 31#include <subdev/bios/pll.h>
32#include <subdev/clock.h> 32#include <subdev/clock.h>
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 1ddc03e51bf4..ebf0a683305e 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -2174,6 +2174,7 @@ int
2174nv50_display_create(struct drm_device *dev) 2174nv50_display_create(struct drm_device *dev)
2175{ 2175{
2176 static const u16 oclass[] = { 2176 static const u16 oclass[] = {
2177 NVF0_DISP_CLASS,
2177 NVE0_DISP_CLASS, 2178 NVE0_DISP_CLASS,
2178 NVD0_DISP_CLASS, 2179 NVD0_DISP_CLASS,
2179 NVA3_DISP_CLASS, 2180 NVA3_DISP_CLASS,
diff --git a/drivers/gpu/drm/nouveau/nv50_pm.c b/drivers/gpu/drm/nouveau/nv50_pm.c
index 8bd5d2781baf..69620e39c90c 100644
--- a/drivers/gpu/drm/nouveau/nv50_pm.c
+++ b/drivers/gpu/drm/nouveau/nv50_pm.c
@@ -25,7 +25,7 @@
25#include <drm/drmP.h> 25#include <drm/drmP.h>
26#include "nouveau_drm.h" 26#include "nouveau_drm.h"
27#include "nouveau_bios.h" 27#include "nouveau_bios.h"
28#include "nouveau_hw.h" 28#include "dispnv04/hw.h"
29#include "nouveau_pm.h" 29#include "nouveau_pm.h"
30#include "nouveau_hwsq.h" 30#include "nouveau_hwsq.h"
31 31
diff --git a/drivers/gpu/drm/omapdrm/omap_connector.c b/drivers/gpu/drm/omapdrm/omap_connector.c
index c451c41a7a7d..912759daf562 100644
--- a/drivers/gpu/drm/omapdrm/omap_connector.c
+++ b/drivers/gpu/drm/omapdrm/omap_connector.c
@@ -110,6 +110,11 @@ static enum drm_connector_status omap_connector_detect(
110 ret = connector_status_connected; 110 ret = connector_status_connected;
111 else 111 else
112 ret = connector_status_disconnected; 112 ret = connector_status_disconnected;
113 } else if (dssdev->type == OMAP_DISPLAY_TYPE_DPI ||
114 dssdev->type == OMAP_DISPLAY_TYPE_DBI ||
115 dssdev->type == OMAP_DISPLAY_TYPE_SDI ||
116 dssdev->type == OMAP_DISPLAY_TYPE_DSI) {
117 ret = connector_status_connected;
113 } else { 118 } else {
114 ret = connector_status_unknown; 119 ret = connector_status_unknown;
115 } 120 }
@@ -189,12 +194,30 @@ static int omap_connector_mode_valid(struct drm_connector *connector,
189 struct omap_video_timings timings = {0}; 194 struct omap_video_timings timings = {0};
190 struct drm_device *dev = connector->dev; 195 struct drm_device *dev = connector->dev;
191 struct drm_display_mode *new_mode; 196 struct drm_display_mode *new_mode;
192 int ret = MODE_BAD; 197 int r, ret = MODE_BAD;
193 198
194 copy_timings_drm_to_omap(&timings, mode); 199 copy_timings_drm_to_omap(&timings, mode);
195 mode->vrefresh = drm_mode_vrefresh(mode); 200 mode->vrefresh = drm_mode_vrefresh(mode);
196 201
197 if (!dssdrv->check_timings(dssdev, &timings)) { 202 /*
203 * if the panel driver doesn't have a check_timings, it's most likely
204 * a fixed resolution panel, check if the timings match with the
205 * panel's timings
206 */
207 if (dssdrv->check_timings) {
208 r = dssdrv->check_timings(dssdev, &timings);
209 } else {
210 struct omap_video_timings t = {0};
211
212 dssdrv->get_timings(dssdev, &t);
213
214 if (memcmp(&timings, &t, sizeof(struct omap_video_timings)))
215 r = -EINVAL;
216 else
217 r = 0;
218 }
219
220 if (!r) {
198 /* check if vrefresh is still valid */ 221 /* check if vrefresh is still valid */
199 new_mode = drm_mode_duplicate(dev, mode); 222 new_mode = drm_mode_duplicate(dev, mode);
200 new_mode->clock = timings.pixel_clock; 223 new_mode->clock = timings.pixel_clock;
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c
index bec66a490b8f..79b200aee18a 100644
--- a/drivers/gpu/drm/omapdrm/omap_crtc.c
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.c
@@ -74,6 +74,13 @@ struct omap_crtc {
74 struct work_struct page_flip_work; 74 struct work_struct page_flip_work;
75}; 75};
76 76
77uint32_t pipe2vbl(struct drm_crtc *crtc)
78{
79 struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
80
81 return dispc_mgr_get_vsync_irq(omap_crtc->channel);
82}
83
77/* 84/*
78 * Manager-ops, callbacks from output when they need to configure 85 * Manager-ops, callbacks from output when they need to configure
79 * the upstream part of the video pipe. 86 * the upstream part of the video pipe.
@@ -613,7 +620,13 @@ struct drm_crtc *omap_crtc_init(struct drm_device *dev,
613 omap_crtc->apply.pre_apply = omap_crtc_pre_apply; 620 omap_crtc->apply.pre_apply = omap_crtc_pre_apply;
614 omap_crtc->apply.post_apply = omap_crtc_post_apply; 621 omap_crtc->apply.post_apply = omap_crtc_post_apply;
615 622
616 omap_crtc->apply_irq.irqmask = pipe2vbl(id); 623 omap_crtc->channel = channel;
624 omap_crtc->plane = plane;
625 omap_crtc->plane->crtc = crtc;
626 omap_crtc->name = channel_names[channel];
627 omap_crtc->pipe = id;
628
629 omap_crtc->apply_irq.irqmask = pipe2vbl(crtc);
617 omap_crtc->apply_irq.irq = omap_crtc_apply_irq; 630 omap_crtc->apply_irq.irq = omap_crtc_apply_irq;
618 631
619 omap_crtc->error_irq.irqmask = 632 omap_crtc->error_irq.irqmask =
@@ -621,12 +634,6 @@ struct drm_crtc *omap_crtc_init(struct drm_device *dev,
621 omap_crtc->error_irq.irq = omap_crtc_error_irq; 634 omap_crtc->error_irq.irq = omap_crtc_error_irq;
622 omap_irq_register(dev, &omap_crtc->error_irq); 635 omap_irq_register(dev, &omap_crtc->error_irq);
623 636
624 omap_crtc->channel = channel;
625 omap_crtc->plane = plane;
626 omap_crtc->plane->crtc = crtc;
627 omap_crtc->name = channel_names[channel];
628 omap_crtc->pipe = id;
629
630 /* temporary: */ 637 /* temporary: */
631 omap_crtc->mgr.id = channel; 638 omap_crtc->mgr.id = channel;
632 639
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index 079c54c6f94c..9c53c25e5201 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -74,54 +74,53 @@ static int get_connector_type(struct omap_dss_device *dssdev)
74 } 74 }
75} 75}
76 76
77static bool channel_used(struct drm_device *dev, enum omap_channel channel)
78{
79 struct omap_drm_private *priv = dev->dev_private;
80 int i;
81
82 for (i = 0; i < priv->num_crtcs; i++) {
83 struct drm_crtc *crtc = priv->crtcs[i];
84
85 if (omap_crtc_channel(crtc) == channel)
86 return true;
87 }
88
89 return false;
90}
91
77static int omap_modeset_init(struct drm_device *dev) 92static int omap_modeset_init(struct drm_device *dev)
78{ 93{
79 struct omap_drm_private *priv = dev->dev_private; 94 struct omap_drm_private *priv = dev->dev_private;
80 struct omap_dss_device *dssdev = NULL; 95 struct omap_dss_device *dssdev = NULL;
81 int num_ovls = dss_feat_get_num_ovls(); 96 int num_ovls = dss_feat_get_num_ovls();
82 int id; 97 int num_mgrs = dss_feat_get_num_mgrs();
98 int num_crtcs;
99 int i, id = 0;
83 100
84 drm_mode_config_init(dev); 101 drm_mode_config_init(dev);
85 102
86 omap_drm_irq_install(dev); 103 omap_drm_irq_install(dev);
87 104
88 /* 105 /*
89 * Create private planes and CRTCs for the last NUM_CRTCs overlay 106 * We usually don't want to create a CRTC for each manager, at least
90 * plus manager: 107 * not until we have a way to expose private planes to userspace.
108 * Otherwise there would not be enough video pipes left for drm planes.
109 * We use the num_crtc argument to limit the number of crtcs we create.
91 */ 110 */
92 for (id = 0; id < min(num_crtc, num_ovls); id++) { 111 num_crtcs = min3(num_crtc, num_mgrs, num_ovls);
93 struct drm_plane *plane;
94 struct drm_crtc *crtc;
95
96 plane = omap_plane_init(dev, id, true);
97 crtc = omap_crtc_init(dev, plane, pipe2chan(id), id);
98 112
99 BUG_ON(priv->num_crtcs >= ARRAY_SIZE(priv->crtcs)); 113 dssdev = NULL;
100 priv->crtcs[id] = crtc;
101 priv->num_crtcs++;
102
103 priv->planes[id] = plane;
104 priv->num_planes++;
105 }
106
107 /*
108 * Create normal planes for the remaining overlays:
109 */
110 for (; id < num_ovls; id++) {
111 struct drm_plane *plane = omap_plane_init(dev, id, false);
112
113 BUG_ON(priv->num_planes >= ARRAY_SIZE(priv->planes));
114 priv->planes[priv->num_planes++] = plane;
115 }
116 114
117 for_each_dss_dev(dssdev) { 115 for_each_dss_dev(dssdev) {
118 struct drm_connector *connector; 116 struct drm_connector *connector;
119 struct drm_encoder *encoder; 117 struct drm_encoder *encoder;
118 enum omap_channel channel;
120 119
121 if (!dssdev->driver) { 120 if (!dssdev->driver) {
122 dev_warn(dev->dev, "%s has no driver.. skipping it\n", 121 dev_warn(dev->dev, "%s has no driver.. skipping it\n",
123 dssdev->name); 122 dssdev->name);
124 return 0; 123 continue;
125 } 124 }
126 125
127 if (!(dssdev->driver->get_timings || 126 if (!(dssdev->driver->get_timings ||
@@ -129,7 +128,7 @@ static int omap_modeset_init(struct drm_device *dev)
129 dev_warn(dev->dev, "%s driver does not support " 128 dev_warn(dev->dev, "%s driver does not support "
130 "get_timings or read_edid.. skipping it!\n", 129 "get_timings or read_edid.. skipping it!\n",
131 dssdev->name); 130 dssdev->name);
132 return 0; 131 continue;
133 } 132 }
134 133
135 encoder = omap_encoder_init(dev, dssdev); 134 encoder = omap_encoder_init(dev, dssdev);
@@ -157,16 +156,118 @@ static int omap_modeset_init(struct drm_device *dev)
157 156
158 drm_mode_connector_attach_encoder(connector, encoder); 157 drm_mode_connector_attach_encoder(connector, encoder);
159 158
159 /*
160 * if we have reached the limit of the crtcs we are allowed to
161 * create, let's not try to look for a crtc for this
162 * panel/encoder and onwards, we will, of course, populate the
163 * the possible_crtcs field for all the encoders with the final
164 * set of crtcs we create
165 */
166 if (id == num_crtcs)
167 continue;
168
169 /*
170 * get the recommended DISPC channel for this encoder. For now,
171 * we only try to get create a crtc out of the recommended, the
172 * other possible channels to which the encoder can connect are
173 * not considered.
174 */
175 channel = dssdev->output->dispc_channel;
176
177 /*
178 * if this channel hasn't already been taken by a previously
179 * allocated crtc, we create a new crtc for it
180 */
181 if (!channel_used(dev, channel)) {
182 struct drm_plane *plane;
183 struct drm_crtc *crtc;
184
185 plane = omap_plane_init(dev, id, true);
186 crtc = omap_crtc_init(dev, plane, channel, id);
187
188 BUG_ON(priv->num_crtcs >= ARRAY_SIZE(priv->crtcs));
189 priv->crtcs[id] = crtc;
190 priv->num_crtcs++;
191
192 priv->planes[id] = plane;
193 priv->num_planes++;
194
195 id++;
196 }
197 }
198
199 /*
200 * we have allocated crtcs according to the need of the panels/encoders,
201 * adding more crtcs here if needed
202 */
203 for (; id < num_crtcs; id++) {
204
205 /* find a free manager for this crtc */
206 for (i = 0; i < num_mgrs; i++) {
207 if (!channel_used(dev, i)) {
208 struct drm_plane *plane;
209 struct drm_crtc *crtc;
210
211 plane = omap_plane_init(dev, id, true);
212 crtc = omap_crtc_init(dev, plane, i, id);
213
214 BUG_ON(priv->num_crtcs >=
215 ARRAY_SIZE(priv->crtcs));
216
217 priv->crtcs[id] = crtc;
218 priv->num_crtcs++;
219
220 priv->planes[id] = plane;
221 priv->num_planes++;
222
223 break;
224 } else {
225 continue;
226 }
227 }
228
229 if (i == num_mgrs) {
230 /* this shouldn't really happen */
231 dev_err(dev->dev, "no managers left for crtc\n");
232 return -ENOMEM;
233 }
234 }
235
236 /*
237 * Create normal planes for the remaining overlays:
238 */
239 for (; id < num_ovls; id++) {
240 struct drm_plane *plane = omap_plane_init(dev, id, false);
241
242 BUG_ON(priv->num_planes >= ARRAY_SIZE(priv->planes));
243 priv->planes[priv->num_planes++] = plane;
244 }
245
246 for (i = 0; i < priv->num_encoders; i++) {
247 struct drm_encoder *encoder = priv->encoders[i];
248 struct omap_dss_device *dssdev =
249 omap_encoder_get_dssdev(encoder);
250
160 /* figure out which crtc's we can connect the encoder to: */ 251 /* figure out which crtc's we can connect the encoder to: */
161 encoder->possible_crtcs = 0; 252 encoder->possible_crtcs = 0;
162 for (id = 0; id < priv->num_crtcs; id++) { 253 for (id = 0; id < priv->num_crtcs; id++) {
163 enum omap_dss_output_id supported_outputs = 254 struct drm_crtc *crtc = priv->crtcs[id];
164 dss_feat_get_supported_outputs(pipe2chan(id)); 255 enum omap_channel crtc_channel;
256 enum omap_dss_output_id supported_outputs;
257
258 crtc_channel = omap_crtc_channel(crtc);
259 supported_outputs =
260 dss_feat_get_supported_outputs(crtc_channel);
261
165 if (supported_outputs & dssdev->output->id) 262 if (supported_outputs & dssdev->output->id)
166 encoder->possible_crtcs |= (1 << id); 263 encoder->possible_crtcs |= (1 << id);
167 } 264 }
168 } 265 }
169 266
267 DBG("registered %d planes, %d crtcs, %d encoders and %d connectors\n",
268 priv->num_planes, priv->num_crtcs, priv->num_encoders,
269 priv->num_connectors);
270
170 dev->mode_config.min_width = 32; 271 dev->mode_config.min_width = 32;
171 dev->mode_config.min_height = 32; 272 dev->mode_config.min_height = 32;
172 273
@@ -303,7 +404,7 @@ static int ioctl_gem_info(struct drm_device *dev, void *data,
303 return ret; 404 return ret;
304} 405}
305 406
306struct drm_ioctl_desc ioctls[DRM_COMMAND_END - DRM_COMMAND_BASE] = { 407static struct drm_ioctl_desc ioctls[DRM_COMMAND_END - DRM_COMMAND_BASE] = {
307 DRM_IOCTL_DEF_DRV(OMAP_GET_PARAM, ioctl_get_param, DRM_UNLOCKED|DRM_AUTH), 408 DRM_IOCTL_DEF_DRV(OMAP_GET_PARAM, ioctl_get_param, DRM_UNLOCKED|DRM_AUTH),
308 DRM_IOCTL_DEF_DRV(OMAP_SET_PARAM, ioctl_set_param, DRM_UNLOCKED|DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 409 DRM_IOCTL_DEF_DRV(OMAP_SET_PARAM, ioctl_set_param, DRM_UNLOCKED|DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
309 DRM_IOCTL_DEF_DRV(OMAP_GEM_NEW, ioctl_gem_new, DRM_UNLOCKED|DRM_AUTH), 410 DRM_IOCTL_DEF_DRV(OMAP_GEM_NEW, ioctl_gem_new, DRM_UNLOCKED|DRM_AUTH),
@@ -567,7 +668,7 @@ static const struct dev_pm_ops omapdrm_pm_ops = {
567}; 668};
568#endif 669#endif
569 670
570struct platform_driver pdev = { 671static struct platform_driver pdev = {
571 .driver = { 672 .driver = {
572 .name = DRIVER_NAME, 673 .name = DRIVER_NAME,
573 .owner = THIS_MODULE, 674 .owner = THIS_MODULE,
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h
index d4f997bb4ac0..215a20dd340c 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.h
+++ b/drivers/gpu/drm/omapdrm/omap_drv.h
@@ -139,8 +139,8 @@ void omap_gem_describe_objects(struct list_head *list, struct seq_file *m);
139int omap_gem_resume(struct device *dev); 139int omap_gem_resume(struct device *dev);
140#endif 140#endif
141 141
142int omap_irq_enable_vblank(struct drm_device *dev, int crtc); 142int omap_irq_enable_vblank(struct drm_device *dev, int crtc_id);
143void omap_irq_disable_vblank(struct drm_device *dev, int crtc); 143void omap_irq_disable_vblank(struct drm_device *dev, int crtc_id);
144irqreturn_t omap_irq_handler(DRM_IRQ_ARGS); 144irqreturn_t omap_irq_handler(DRM_IRQ_ARGS);
145void omap_irq_preinstall(struct drm_device *dev); 145void omap_irq_preinstall(struct drm_device *dev);
146int omap_irq_postinstall(struct drm_device *dev); 146int omap_irq_postinstall(struct drm_device *dev);
@@ -271,39 +271,9 @@ static inline int align_pitch(int pitch, int width, int bpp)
271 return ALIGN(pitch, 8 * bytespp); 271 return ALIGN(pitch, 8 * bytespp);
272} 272}
273 273
274static inline enum omap_channel pipe2chan(int pipe)
275{
276 int num_mgrs = dss_feat_get_num_mgrs();
277
278 /*
279 * We usually don't want to create a CRTC for each manager,
280 * at least not until we have a way to expose private planes
281 * to userspace. Otherwise there would not be enough video
282 * pipes left for drm planes. The higher #'d managers tend
283 * to have more features so start in reverse order.
284 */
285 return num_mgrs - pipe - 1;
286}
287
288/* map crtc to vblank mask */ 274/* map crtc to vblank mask */
289static inline uint32_t pipe2vbl(int crtc) 275uint32_t pipe2vbl(struct drm_crtc *crtc);
290{ 276struct omap_dss_device *omap_encoder_get_dssdev(struct drm_encoder *encoder);
291 enum omap_channel channel = pipe2chan(crtc);
292 return dispc_mgr_get_vsync_irq(channel);
293}
294
295static inline int crtc2pipe(struct drm_device *dev, struct drm_crtc *crtc)
296{
297 struct omap_drm_private *priv = dev->dev_private;
298 int i;
299
300 for (i = 0; i < ARRAY_SIZE(priv->crtcs); i++)
301 if (priv->crtcs[i] == crtc)
302 return i;
303
304 BUG(); /* bogus CRTC ptr */
305 return -1;
306}
307 277
308/* should these be made into common util helpers? 278/* should these be made into common util helpers?
309 */ 279 */
diff --git a/drivers/gpu/drm/omapdrm/omap_encoder.c b/drivers/gpu/drm/omapdrm/omap_encoder.c
index 21d126d0317e..c29451ba65da 100644
--- a/drivers/gpu/drm/omapdrm/omap_encoder.c
+++ b/drivers/gpu/drm/omapdrm/omap_encoder.c
@@ -41,6 +41,13 @@ struct omap_encoder {
41 struct omap_dss_device *dssdev; 41 struct omap_dss_device *dssdev;
42}; 42};
43 43
44struct omap_dss_device *omap_encoder_get_dssdev(struct drm_encoder *encoder)
45{
46 struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
47
48 return omap_encoder->dssdev;
49}
50
44static void omap_encoder_destroy(struct drm_encoder *encoder) 51static void omap_encoder_destroy(struct drm_encoder *encoder)
45{ 52{
46 struct omap_encoder *omap_encoder = to_omap_encoder(encoder); 53 struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
@@ -128,13 +135,26 @@ int omap_encoder_update(struct drm_encoder *encoder,
128 135
129 dssdev->output->manager = mgr; 136 dssdev->output->manager = mgr;
130 137
131 ret = dssdrv->check_timings(dssdev, timings); 138 if (dssdrv->check_timings) {
139 ret = dssdrv->check_timings(dssdev, timings);
140 } else {
141 struct omap_video_timings t = {0};
142
143 dssdrv->get_timings(dssdev, &t);
144
145 if (memcmp(timings, &t, sizeof(struct omap_video_timings)))
146 ret = -EINVAL;
147 else
148 ret = 0;
149 }
150
132 if (ret) { 151 if (ret) {
133 dev_err(dev->dev, "could not set timings: %d\n", ret); 152 dev_err(dev->dev, "could not set timings: %d\n", ret);
134 return ret; 153 return ret;
135 } 154 }
136 155
137 dssdrv->set_timings(dssdev, timings); 156 if (dssdrv->set_timings)
157 dssdrv->set_timings(dssdev, timings);
138 158
139 return 0; 159 return 0;
140} 160}
diff --git a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
index ac74d1bc67bf..be7cd97a0db0 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
@@ -178,7 +178,7 @@ out_unlock:
178 return omap_gem_mmap_obj(obj, vma); 178 return omap_gem_mmap_obj(obj, vma);
179} 179}
180 180
181struct dma_buf_ops omap_dmabuf_ops = { 181static struct dma_buf_ops omap_dmabuf_ops = {
182 .map_dma_buf = omap_gem_map_dma_buf, 182 .map_dma_buf = omap_gem_map_dma_buf,
183 .unmap_dma_buf = omap_gem_unmap_dma_buf, 183 .unmap_dma_buf = omap_gem_unmap_dma_buf,
184 .release = omap_gem_dmabuf_release, 184 .release = omap_gem_dmabuf_release,
@@ -212,7 +212,6 @@ struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev,
212 * refcount on gem itself instead of f_count of dmabuf. 212 * refcount on gem itself instead of f_count of dmabuf.
213 */ 213 */
214 drm_gem_object_reference(obj); 214 drm_gem_object_reference(obj);
215 dma_buf_put(buffer);
216 return obj; 215 return obj;
217 } 216 }
218 } 217 }
diff --git a/drivers/gpu/drm/omapdrm/omap_irq.c b/drivers/gpu/drm/omapdrm/omap_irq.c
index e01303ee00c3..9263db117ff8 100644
--- a/drivers/gpu/drm/omapdrm/omap_irq.c
+++ b/drivers/gpu/drm/omapdrm/omap_irq.c
@@ -130,12 +130,13 @@ int omap_irq_wait(struct drm_device *dev, struct omap_irq_wait *wait,
130 * Zero on success, appropriate errno if the given @crtc's vblank 130 * Zero on success, appropriate errno if the given @crtc's vblank
131 * interrupt cannot be enabled. 131 * interrupt cannot be enabled.
132 */ 132 */
133int omap_irq_enable_vblank(struct drm_device *dev, int crtc) 133int omap_irq_enable_vblank(struct drm_device *dev, int crtc_id)
134{ 134{
135 struct omap_drm_private *priv = dev->dev_private; 135 struct omap_drm_private *priv = dev->dev_private;
136 struct drm_crtc *crtc = priv->crtcs[crtc_id];
136 unsigned long flags; 137 unsigned long flags;
137 138
138 DBG("dev=%p, crtc=%d", dev, crtc); 139 DBG("dev=%p, crtc=%d", dev, crtc_id);
139 140
140 dispc_runtime_get(); 141 dispc_runtime_get();
141 spin_lock_irqsave(&list_lock, flags); 142 spin_lock_irqsave(&list_lock, flags);
@@ -156,12 +157,13 @@ int omap_irq_enable_vblank(struct drm_device *dev, int crtc)
156 * a hardware vblank counter, this routine should be a no-op, since 157 * a hardware vblank counter, this routine should be a no-op, since
157 * interrupts will have to stay on to keep the count accurate. 158 * interrupts will have to stay on to keep the count accurate.
158 */ 159 */
159void omap_irq_disable_vblank(struct drm_device *dev, int crtc) 160void omap_irq_disable_vblank(struct drm_device *dev, int crtc_id)
160{ 161{
161 struct omap_drm_private *priv = dev->dev_private; 162 struct omap_drm_private *priv = dev->dev_private;
163 struct drm_crtc *crtc = priv->crtcs[crtc_id];
162 unsigned long flags; 164 unsigned long flags;
163 165
164 DBG("dev=%p, crtc=%d", dev, crtc); 166 DBG("dev=%p, crtc=%d", dev, crtc_id);
165 167
166 dispc_runtime_get(); 168 dispc_runtime_get();
167 spin_lock_irqsave(&list_lock, flags); 169 spin_lock_irqsave(&list_lock, flags);
@@ -186,9 +188,12 @@ irqreturn_t omap_irq_handler(DRM_IRQ_ARGS)
186 188
187 VERB("irqs: %08x", irqstatus); 189 VERB("irqs: %08x", irqstatus);
188 190
189 for (id = 0; id < priv->num_crtcs; id++) 191 for (id = 0; id < priv->num_crtcs; id++) {
190 if (irqstatus & pipe2vbl(id)) 192 struct drm_crtc *crtc = priv->crtcs[id];
193
194 if (irqstatus & pipe2vbl(crtc))
191 drm_handle_vblank(dev, id); 195 drm_handle_vblank(dev, id);
196 }
192 197
193 spin_lock_irqsave(&list_lock, flags); 198 spin_lock_irqsave(&list_lock, flags);
194 list_for_each_entry_safe(handler, n, &priv->irq_list, node) { 199 list_for_each_entry_safe(handler, n, &priv->irq_list, node) {
diff --git a/drivers/gpu/drm/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c
index 2882cda6ea19..8d225d7ff4e3 100644
--- a/drivers/gpu/drm/omapdrm/omap_plane.c
+++ b/drivers/gpu/drm/omapdrm/omap_plane.c
@@ -247,6 +247,12 @@ static int omap_plane_update(struct drm_plane *plane,
247{ 247{
248 struct omap_plane *omap_plane = to_omap_plane(plane); 248 struct omap_plane *omap_plane = to_omap_plane(plane);
249 omap_plane->enabled = true; 249 omap_plane->enabled = true;
250
251 if (plane->fb)
252 drm_framebuffer_unreference(plane->fb);
253
254 drm_framebuffer_reference(fb);
255
250 return omap_plane_mode_set(plane, crtc, fb, 256 return omap_plane_mode_set(plane, crtc, fb,
251 crtc_x, crtc_y, crtc_w, crtc_h, 257 crtc_x, crtc_y, crtc_w, crtc_h,
252 src_x, src_y, src_w, src_h, 258 src_x, src_y, src_w, src_h,
diff --git a/drivers/gpu/drm/qxl/Kconfig b/drivers/gpu/drm/qxl/Kconfig
new file mode 100644
index 000000000000..2f1a57e11140
--- /dev/null
+++ b/drivers/gpu/drm/qxl/Kconfig
@@ -0,0 +1,10 @@
1config DRM_QXL
2 tristate "QXL virtual GPU"
3 depends on DRM && PCI
4 select FB_SYS_FILLRECT
5 select FB_SYS_COPYAREA
6 select FB_SYS_IMAGEBLIT
7 select DRM_KMS_HELPER
8 select DRM_TTM
9 help
10 QXL virtual GPU for Spice virtualization desktop integration. Do not enable this driver unless your distro ships a corresponding X.org QXL driver that can handle kernel modesetting.
diff --git a/drivers/gpu/drm/qxl/Makefile b/drivers/gpu/drm/qxl/Makefile
new file mode 100644
index 000000000000..ea046ba691d2
--- /dev/null
+++ b/drivers/gpu/drm/qxl/Makefile
@@ -0,0 +1,9 @@
1#
2# Makefile for the drm device driver. This driver provides support for the
3# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
4
5ccflags-y := -Iinclude/drm
6
7qxl-y := qxl_drv.o qxl_kms.o qxl_display.o qxl_ttm.o qxl_fb.o qxl_object.o qxl_gem.o qxl_cmd.o qxl_image.o qxl_draw.o qxl_debugfs.o qxl_irq.o qxl_dumb.o qxl_ioctl.o qxl_fence.o qxl_release.o
8
9obj-$(CONFIG_DRM_QXL)+= qxl.o
diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
new file mode 100644
index 000000000000..08b0823c93d5
--- /dev/null
+++ b/drivers/gpu/drm/qxl/qxl_cmd.c
@@ -0,0 +1,685 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Dave Airlie
23 * Alon Levy
24 */
25
26/* QXL cmd/ring handling */
27
28#include "qxl_drv.h"
29#include "qxl_object.h"
30
31static int qxl_reap_surface_id(struct qxl_device *qdev, int max_to_reap);
32
33struct ring {
34 struct qxl_ring_header header;
35 uint8_t elements[0];
36};
37
38struct qxl_ring {
39 struct ring *ring;
40 int element_size;
41 int n_elements;
42 int prod_notify;
43 wait_queue_head_t *push_event;
44 spinlock_t lock;
45};
46
47void qxl_ring_free(struct qxl_ring *ring)
48{
49 kfree(ring);
50}
51
52struct qxl_ring *
53qxl_ring_create(struct qxl_ring_header *header,
54 int element_size,
55 int n_elements,
56 int prod_notify,
57 bool set_prod_notify,
58 wait_queue_head_t *push_event)
59{
60 struct qxl_ring *ring;
61
62 ring = kmalloc(sizeof(*ring), GFP_KERNEL);
63 if (!ring)
64 return NULL;
65
66 ring->ring = (struct ring *)header;
67 ring->element_size = element_size;
68 ring->n_elements = n_elements;
69 ring->prod_notify = prod_notify;
70 ring->push_event = push_event;
71 if (set_prod_notify)
72 header->notify_on_prod = ring->n_elements;
73 spin_lock_init(&ring->lock);
74 return ring;
75}
76
77static int qxl_check_header(struct qxl_ring *ring)
78{
79 int ret;
80 struct qxl_ring_header *header = &(ring->ring->header);
81 unsigned long flags;
82 spin_lock_irqsave(&ring->lock, flags);
83 ret = header->prod - header->cons < header->num_items;
84 if (ret == 0)
85 header->notify_on_cons = header->cons + 1;
86 spin_unlock_irqrestore(&ring->lock, flags);
87 return ret;
88}
89
90static int qxl_check_idle(struct qxl_ring *ring)
91{
92 int ret;
93 struct qxl_ring_header *header = &(ring->ring->header);
94 unsigned long flags;
95 spin_lock_irqsave(&ring->lock, flags);
96 ret = header->prod == header->cons;
97 spin_unlock_irqrestore(&ring->lock, flags);
98 return ret;
99}
100
101int qxl_ring_push(struct qxl_ring *ring,
102 const void *new_elt, bool interruptible)
103{
104 struct qxl_ring_header *header = &(ring->ring->header);
105 uint8_t *elt;
106 int idx, ret;
107 unsigned long flags;
108 spin_lock_irqsave(&ring->lock, flags);
109 if (header->prod - header->cons == header->num_items) {
110 header->notify_on_cons = header->cons + 1;
111 mb();
112 spin_unlock_irqrestore(&ring->lock, flags);
113 if (!drm_can_sleep()) {
114 while (!qxl_check_header(ring))
115 udelay(1);
116 } else {
117 if (interruptible) {
118 ret = wait_event_interruptible(*ring->push_event,
119 qxl_check_header(ring));
120 if (ret)
121 return ret;
122 } else {
123 wait_event(*ring->push_event,
124 qxl_check_header(ring));
125 }
126
127 }
128 spin_lock_irqsave(&ring->lock, flags);
129 }
130
131 idx = header->prod & (ring->n_elements - 1);
132 elt = ring->ring->elements + idx * ring->element_size;
133
134 memcpy((void *)elt, new_elt, ring->element_size);
135
136 header->prod++;
137
138 mb();
139
140 if (header->prod == header->notify_on_prod)
141 outb(0, ring->prod_notify);
142
143 spin_unlock_irqrestore(&ring->lock, flags);
144 return 0;
145}
146
147static bool qxl_ring_pop(struct qxl_ring *ring,
148 void *element)
149{
150 volatile struct qxl_ring_header *header = &(ring->ring->header);
151 volatile uint8_t *ring_elt;
152 int idx;
153 unsigned long flags;
154 spin_lock_irqsave(&ring->lock, flags);
155 if (header->cons == header->prod) {
156 header->notify_on_prod = header->cons + 1;
157 spin_unlock_irqrestore(&ring->lock, flags);
158 return false;
159 }
160
161 idx = header->cons & (ring->n_elements - 1);
162 ring_elt = ring->ring->elements + idx * ring->element_size;
163
164 memcpy(element, (void *)ring_elt, ring->element_size);
165
166 header->cons++;
167
168 spin_unlock_irqrestore(&ring->lock, flags);
169 return true;
170}
171
172int
173qxl_push_command_ring_release(struct qxl_device *qdev, struct qxl_release *release,
174 uint32_t type, bool interruptible)
175{
176 struct qxl_command cmd;
177
178 cmd.type = type;
179 cmd.data = qxl_bo_physical_address(qdev, release->bos[0], release->release_offset);
180
181 return qxl_ring_push(qdev->command_ring, &cmd, interruptible);
182}
183
184int
185qxl_push_cursor_ring_release(struct qxl_device *qdev, struct qxl_release *release,
186 uint32_t type, bool interruptible)
187{
188 struct qxl_command cmd;
189
190 cmd.type = type;
191 cmd.data = qxl_bo_physical_address(qdev, release->bos[0], release->release_offset);
192
193 return qxl_ring_push(qdev->cursor_ring, &cmd, interruptible);
194}
195
196bool qxl_queue_garbage_collect(struct qxl_device *qdev, bool flush)
197{
198 if (!qxl_check_idle(qdev->release_ring)) {
199 queue_work(qdev->gc_queue, &qdev->gc_work);
200 if (flush)
201 flush_work(&qdev->gc_work);
202 return true;
203 }
204 return false;
205}
206
207int qxl_garbage_collect(struct qxl_device *qdev)
208{
209 struct qxl_release *release;
210 uint64_t id, next_id;
211 int i = 0;
212 int ret;
213 union qxl_release_info *info;
214
215 while (qxl_ring_pop(qdev->release_ring, &id)) {
216 QXL_INFO(qdev, "popped %lld\n", id);
217 while (id) {
218 release = qxl_release_from_id_locked(qdev, id);
219 if (release == NULL)
220 break;
221
222 ret = qxl_release_reserve(qdev, release, false);
223 if (ret) {
224 qxl_io_log(qdev, "failed to reserve release on garbage collect %lld\n", id);
225 DRM_ERROR("failed to reserve release %lld\n", id);
226 }
227
228 info = qxl_release_map(qdev, release);
229 next_id = info->next;
230 qxl_release_unmap(qdev, release, info);
231
232 qxl_release_unreserve(qdev, release);
233 QXL_INFO(qdev, "popped %lld, next %lld\n", id,
234 next_id);
235
236 switch (release->type) {
237 case QXL_RELEASE_DRAWABLE:
238 case QXL_RELEASE_SURFACE_CMD:
239 case QXL_RELEASE_CURSOR_CMD:
240 break;
241 default:
242 DRM_ERROR("unexpected release type\n");
243 break;
244 }
245 id = next_id;
246
247 qxl_release_free(qdev, release);
248 ++i;
249 }
250 }
251
252 QXL_INFO(qdev, "%s: %lld\n", __func__, i);
253
254 return i;
255}
256
257int qxl_alloc_bo_reserved(struct qxl_device *qdev, unsigned long size,
258 struct qxl_bo **_bo)
259{
260 struct qxl_bo *bo;
261 int ret;
262
263 ret = qxl_bo_create(qdev, size, false /* not kernel - device */,
264 QXL_GEM_DOMAIN_VRAM, NULL, &bo);
265 if (ret) {
266 DRM_ERROR("failed to allocate VRAM BO\n");
267 return ret;
268 }
269 ret = qxl_bo_reserve(bo, false);
270 if (unlikely(ret != 0))
271 goto out_unref;
272
273 *_bo = bo;
274 return 0;
275out_unref:
276 qxl_bo_unref(&bo);
277 return 0;
278}
279
280static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port)
281{
282 int irq_num;
283 long addr = qdev->io_base + port;
284 int ret;
285
286 mutex_lock(&qdev->async_io_mutex);
287 irq_num = atomic_read(&qdev->irq_received_io_cmd);
288
289
290 if (qdev->last_sent_io_cmd > irq_num) {
291 ret = wait_event_interruptible(qdev->io_cmd_event,
292 atomic_read(&qdev->irq_received_io_cmd) > irq_num);
293 if (ret)
294 goto out;
295 irq_num = atomic_read(&qdev->irq_received_io_cmd);
296 }
297 outb(val, addr);
298 qdev->last_sent_io_cmd = irq_num + 1;
299 ret = wait_event_interruptible(qdev->io_cmd_event,
300 atomic_read(&qdev->irq_received_io_cmd) > irq_num);
301out:
302 mutex_unlock(&qdev->async_io_mutex);
303 return ret;
304}
305
306static void wait_for_io_cmd(struct qxl_device *qdev, uint8_t val, long port)
307{
308 int ret;
309
310restart:
311 ret = wait_for_io_cmd_user(qdev, val, port);
312 if (ret == -ERESTARTSYS)
313 goto restart;
314}
315
316int qxl_io_update_area(struct qxl_device *qdev, struct qxl_bo *surf,
317 const struct qxl_rect *area)
318{
319 int surface_id;
320 uint32_t surface_width, surface_height;
321 int ret;
322
323 if (!surf->hw_surf_alloc)
324 DRM_ERROR("got io update area with no hw surface\n");
325
326 if (surf->is_primary)
327 surface_id = 0;
328 else
329 surface_id = surf->surface_id;
330 surface_width = surf->surf.width;
331 surface_height = surf->surf.height;
332
333 if (area->left < 0 || area->top < 0 ||
334 area->right > surface_width || area->bottom > surface_height) {
335 qxl_io_log(qdev, "%s: not doing area update for "
336 "%d, (%d,%d,%d,%d) (%d,%d)\n", __func__, surface_id, area->left,
337 area->top, area->right, area->bottom, surface_width, surface_height);
338 return -EINVAL;
339 }
340 mutex_lock(&qdev->update_area_mutex);
341 qdev->ram_header->update_area = *area;
342 qdev->ram_header->update_surface = surface_id;
343 ret = wait_for_io_cmd_user(qdev, 0, QXL_IO_UPDATE_AREA_ASYNC);
344 mutex_unlock(&qdev->update_area_mutex);
345 return ret;
346}
347
348void qxl_io_notify_oom(struct qxl_device *qdev)
349{
350 outb(0, qdev->io_base + QXL_IO_NOTIFY_OOM);
351}
352
353void qxl_io_flush_release(struct qxl_device *qdev)
354{
355 outb(0, qdev->io_base + QXL_IO_FLUSH_RELEASE);
356}
357
358void qxl_io_flush_surfaces(struct qxl_device *qdev)
359{
360 wait_for_io_cmd(qdev, 0, QXL_IO_FLUSH_SURFACES_ASYNC);
361}
362
363
364void qxl_io_destroy_primary(struct qxl_device *qdev)
365{
366 wait_for_io_cmd(qdev, 0, QXL_IO_DESTROY_PRIMARY_ASYNC);
367}
368
369void qxl_io_create_primary(struct qxl_device *qdev, unsigned width,
370 unsigned height, unsigned offset, struct qxl_bo *bo)
371{
372 struct qxl_surface_create *create;
373
374 QXL_INFO(qdev, "%s: qdev %p, ram_header %p\n", __func__, qdev,
375 qdev->ram_header);
376 create = &qdev->ram_header->create_surface;
377 create->format = bo->surf.format;
378 create->width = width;
379 create->height = height;
380 create->stride = bo->surf.stride;
381 create->mem = qxl_bo_physical_address(qdev, bo, offset);
382
383 QXL_INFO(qdev, "%s: mem = %llx, from %p\n", __func__, create->mem,
384 bo->kptr);
385
386 create->flags = QXL_SURF_FLAG_KEEP_DATA;
387 create->type = QXL_SURF_TYPE_PRIMARY;
388
389 wait_for_io_cmd(qdev, 0, QXL_IO_CREATE_PRIMARY_ASYNC);
390}
391
392void qxl_io_memslot_add(struct qxl_device *qdev, uint8_t id)
393{
394 QXL_INFO(qdev, "qxl_memslot_add %d\n", id);
395 wait_for_io_cmd(qdev, id, QXL_IO_MEMSLOT_ADD_ASYNC);
396}
397
398void qxl_io_log(struct qxl_device *qdev, const char *fmt, ...)
399{
400 va_list args;
401
402 va_start(args, fmt);
403 vsnprintf(qdev->ram_header->log_buf, QXL_LOG_BUF_SIZE, fmt, args);
404 va_end(args);
405 /*
406 * DO not do a DRM output here - this will call printk, which will
407 * call back into qxl for rendering (qxl_fb)
408 */
409 outb(0, qdev->io_base + QXL_IO_LOG);
410}
411
412void qxl_io_reset(struct qxl_device *qdev)
413{
414 outb(0, qdev->io_base + QXL_IO_RESET);
415}
416
417void qxl_io_monitors_config(struct qxl_device *qdev)
418{
419 qxl_io_log(qdev, "%s: %d [%dx%d+%d+%d]\n", __func__,
420 qdev->monitors_config ?
421 qdev->monitors_config->count : -1,
422 qdev->monitors_config && qdev->monitors_config->count ?
423 qdev->monitors_config->heads[0].width : -1,
424 qdev->monitors_config && qdev->monitors_config->count ?
425 qdev->monitors_config->heads[0].height : -1,
426 qdev->monitors_config && qdev->monitors_config->count ?
427 qdev->monitors_config->heads[0].x : -1,
428 qdev->monitors_config && qdev->monitors_config->count ?
429 qdev->monitors_config->heads[0].y : -1
430 );
431
432 wait_for_io_cmd(qdev, 0, QXL_IO_MONITORS_CONFIG_ASYNC);
433}
434
435int qxl_surface_id_alloc(struct qxl_device *qdev,
436 struct qxl_bo *surf)
437{
438 uint32_t handle;
439 int idr_ret;
440 int count = 0;
441again:
442 idr_preload(GFP_ATOMIC);
443 spin_lock(&qdev->surf_id_idr_lock);
444 idr_ret = idr_alloc(&qdev->surf_id_idr, NULL, 1, 0, GFP_NOWAIT);
445 spin_unlock(&qdev->surf_id_idr_lock);
446 idr_preload_end();
447 if (idr_ret < 0)
448 return idr_ret;
449 handle = idr_ret;
450
451 if (handle >= qdev->rom->n_surfaces) {
452 count++;
453 spin_lock(&qdev->surf_id_idr_lock);
454 idr_remove(&qdev->surf_id_idr, handle);
455 spin_unlock(&qdev->surf_id_idr_lock);
456 qxl_reap_surface_id(qdev, 2);
457 goto again;
458 }
459 surf->surface_id = handle;
460
461 spin_lock(&qdev->surf_id_idr_lock);
462 qdev->last_alloced_surf_id = handle;
463 spin_unlock(&qdev->surf_id_idr_lock);
464 return 0;
465}
466
467void qxl_surface_id_dealloc(struct qxl_device *qdev,
468 uint32_t surface_id)
469{
470 spin_lock(&qdev->surf_id_idr_lock);
471 idr_remove(&qdev->surf_id_idr, surface_id);
472 spin_unlock(&qdev->surf_id_idr_lock);
473}
474
475int qxl_hw_surface_alloc(struct qxl_device *qdev,
476 struct qxl_bo *surf,
477 struct ttm_mem_reg *new_mem)
478{
479 struct qxl_surface_cmd *cmd;
480 struct qxl_release *release;
481 int ret;
482
483 if (surf->hw_surf_alloc)
484 return 0;
485
486 ret = qxl_alloc_surface_release_reserved(qdev, QXL_SURFACE_CMD_CREATE,
487 NULL,
488 &release);
489 if (ret)
490 return ret;
491
492 cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release);
493 cmd->type = QXL_SURFACE_CMD_CREATE;
494 cmd->u.surface_create.format = surf->surf.format;
495 cmd->u.surface_create.width = surf->surf.width;
496 cmd->u.surface_create.height = surf->surf.height;
497 cmd->u.surface_create.stride = surf->surf.stride;
498 if (new_mem) {
499 int slot_id = surf->type == QXL_GEM_DOMAIN_VRAM ? qdev->main_mem_slot : qdev->surfaces_mem_slot;
500 struct qxl_memslot *slot = &(qdev->mem_slots[slot_id]);
501
502 /* TODO - need to hold one of the locks to read tbo.offset */
503 cmd->u.surface_create.data = slot->high_bits;
504
505 cmd->u.surface_create.data |= (new_mem->start << PAGE_SHIFT) + surf->tbo.bdev->man[new_mem->mem_type].gpu_offset;
506 } else
507 cmd->u.surface_create.data = qxl_bo_physical_address(qdev, surf, 0);
508 cmd->surface_id = surf->surface_id;
509 qxl_release_unmap(qdev, release, &cmd->release_info);
510
511 surf->surf_create = release;
512
513 /* no need to add a release to the fence for this bo,
514 since it is only released when we ask to destroy the surface
515 and it would never signal otherwise */
516 qxl_fence_releaseable(qdev, release);
517
518 qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
519
520 qxl_release_unreserve(qdev, release);
521
522 surf->hw_surf_alloc = true;
523 spin_lock(&qdev->surf_id_idr_lock);
524 idr_replace(&qdev->surf_id_idr, surf, surf->surface_id);
525 spin_unlock(&qdev->surf_id_idr_lock);
526 return 0;
527}
528
529int qxl_hw_surface_dealloc(struct qxl_device *qdev,
530 struct qxl_bo *surf)
531{
532 struct qxl_surface_cmd *cmd;
533 struct qxl_release *release;
534 int ret;
535 int id;
536
537 if (!surf->hw_surf_alloc)
538 return 0;
539
540 ret = qxl_alloc_surface_release_reserved(qdev, QXL_SURFACE_CMD_DESTROY,
541 surf->surf_create,
542 &release);
543 if (ret)
544 return ret;
545
546 surf->surf_create = NULL;
547 /* remove the surface from the idr, but not the surface id yet */
548 spin_lock(&qdev->surf_id_idr_lock);
549 idr_replace(&qdev->surf_id_idr, NULL, surf->surface_id);
550 spin_unlock(&qdev->surf_id_idr_lock);
551 surf->hw_surf_alloc = false;
552
553 id = surf->surface_id;
554 surf->surface_id = 0;
555
556 release->surface_release_id = id;
557 cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release);
558 cmd->type = QXL_SURFACE_CMD_DESTROY;
559 cmd->surface_id = id;
560 qxl_release_unmap(qdev, release, &cmd->release_info);
561
562 qxl_fence_releaseable(qdev, release);
563
564 qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
565
566 qxl_release_unreserve(qdev, release);
567
568
569 return 0;
570}
571
572int qxl_update_surface(struct qxl_device *qdev, struct qxl_bo *surf)
573{
574 struct qxl_rect rect;
575 int ret;
576
577 /* if we are evicting, we need to make sure the surface is up
578 to date */
579 rect.left = 0;
580 rect.right = surf->surf.width;
581 rect.top = 0;
582 rect.bottom = surf->surf.height;
583retry:
584 ret = qxl_io_update_area(qdev, surf, &rect);
585 if (ret == -ERESTARTSYS)
586 goto retry;
587 return ret;
588}
589
590static void qxl_surface_evict_locked(struct qxl_device *qdev, struct qxl_bo *surf, bool do_update_area)
591{
592 /* no need to update area if we are just freeing the surface normally */
593 if (do_update_area)
594 qxl_update_surface(qdev, surf);
595
596 /* nuke the surface id at the hw */
597 qxl_hw_surface_dealloc(qdev, surf);
598}
599
600void qxl_surface_evict(struct qxl_device *qdev, struct qxl_bo *surf, bool do_update_area)
601{
602 mutex_lock(&qdev->surf_evict_mutex);
603 qxl_surface_evict_locked(qdev, surf, do_update_area);
604 mutex_unlock(&qdev->surf_evict_mutex);
605}
606
607static int qxl_reap_surf(struct qxl_device *qdev, struct qxl_bo *surf, bool stall)
608{
609 int ret;
610
611 ret = qxl_bo_reserve(surf, false);
612 if (ret == -EBUSY)
613 return -EBUSY;
614
615 if (surf->fence.num_active_releases > 0 && stall == false) {
616 qxl_bo_unreserve(surf);
617 return -EBUSY;
618 }
619
620 if (stall)
621 mutex_unlock(&qdev->surf_evict_mutex);
622
623 spin_lock(&surf->tbo.bdev->fence_lock);
624 ret = ttm_bo_wait(&surf->tbo, true, true, !stall);
625 spin_unlock(&surf->tbo.bdev->fence_lock);
626
627 if (stall)
628 mutex_lock(&qdev->surf_evict_mutex);
629 if (ret == -EBUSY) {
630 qxl_bo_unreserve(surf);
631 return -EBUSY;
632 }
633
634 qxl_surface_evict_locked(qdev, surf, true);
635 qxl_bo_unreserve(surf);
636 return 0;
637}
638
639static int qxl_reap_surface_id(struct qxl_device *qdev, int max_to_reap)
640{
641 int num_reaped = 0;
642 int i, ret;
643 bool stall = false;
644 int start = 0;
645
646 mutex_lock(&qdev->surf_evict_mutex);
647again:
648
649 spin_lock(&qdev->surf_id_idr_lock);
650 start = qdev->last_alloced_surf_id + 1;
651 spin_unlock(&qdev->surf_id_idr_lock);
652
653 for (i = start; i < start + qdev->rom->n_surfaces; i++) {
654 void *objptr;
655 int surfid = i % qdev->rom->n_surfaces;
656
657 /* this avoids the case where the objects is in the
658 idr but has been evicted half way - its makes
659 the idr lookup atomic with the eviction */
660 spin_lock(&qdev->surf_id_idr_lock);
661 objptr = idr_find(&qdev->surf_id_idr, surfid);
662 spin_unlock(&qdev->surf_id_idr_lock);
663
664 if (!objptr)
665 continue;
666
667 ret = qxl_reap_surf(qdev, objptr, stall);
668 if (ret == 0)
669 num_reaped++;
670 if (num_reaped >= max_to_reap)
671 break;
672 }
673 if (num_reaped == 0 && stall == false) {
674 stall = true;
675 goto again;
676 }
677
678 mutex_unlock(&qdev->surf_evict_mutex);
679 if (num_reaped) {
680 usleep_range(500, 1000);
681 qxl_queue_garbage_collect(qdev, true);
682 }
683
684 return 0;
685}
diff --git a/drivers/gpu/drm/qxl/qxl_debugfs.c b/drivers/gpu/drm/qxl/qxl_debugfs.c
new file mode 100644
index 000000000000..c3c2bbdc6674
--- /dev/null
+++ b/drivers/gpu/drm/qxl/qxl_debugfs.c
@@ -0,0 +1,141 @@
1/*
2 * Copyright (C) 2009 Red Hat <bskeggs@redhat.com>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining
5 * a copy of this software and associated documentation files (the
6 * "Software"), to deal in the Software without restriction, including
7 * without limitation the rights to use, copy, modify, merge, publish,
8 * distribute, sublicense, and/or sell copies of the Software, and to
9 * permit persons to whom the Software is furnished to do so, subject to
10 * the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the
13 * next paragraph) shall be included in all copies or substantial
14 * portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
17 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
19 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
20 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
21 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
22 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
23 *
24 */
25
26/*
27 * Authors:
28 * Alon Levy <alevy@redhat.com>
29 */
30
31#include <linux/debugfs.h>
32
33#include "drmP.h"
34#include "qxl_drv.h"
35#include "qxl_object.h"
36
37
38#if defined(CONFIG_DEBUG_FS)
39static int
40qxl_debugfs_irq_received(struct seq_file *m, void *data)
41{
42 struct drm_info_node *node = (struct drm_info_node *) m->private;
43 struct qxl_device *qdev = node->minor->dev->dev_private;
44
45 seq_printf(m, "%d\n", atomic_read(&qdev->irq_received));
46 seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_display));
47 seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_cursor));
48 seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_io_cmd));
49 seq_printf(m, "%d\n", qdev->irq_received_error);
50 return 0;
51}
52
53static int
54qxl_debugfs_buffers_info(struct seq_file *m, void *data)
55{
56 struct drm_info_node *node = (struct drm_info_node *) m->private;
57 struct qxl_device *qdev = node->minor->dev->dev_private;
58 struct qxl_bo *bo;
59
60 list_for_each_entry(bo, &qdev->gem.objects, list) {
61 seq_printf(m, "size %ld, pc %d, sync obj %p, num releases %d\n",
62 (unsigned long)bo->gem_base.size, bo->pin_count,
63 bo->tbo.sync_obj, bo->fence.num_active_releases);
64 }
65 return 0;
66}
67
68static struct drm_info_list qxl_debugfs_list[] = {
69 { "irq_received", qxl_debugfs_irq_received, 0, NULL },
70 { "qxl_buffers", qxl_debugfs_buffers_info, 0, NULL },
71};
72#define QXL_DEBUGFS_ENTRIES ARRAY_SIZE(qxl_debugfs_list)
73#endif
74
75int
76qxl_debugfs_init(struct drm_minor *minor)
77{
78#if defined(CONFIG_DEBUG_FS)
79 drm_debugfs_create_files(qxl_debugfs_list, QXL_DEBUGFS_ENTRIES,
80 minor->debugfs_root, minor);
81#endif
82 return 0;
83}
84
85void
86qxl_debugfs_takedown(struct drm_minor *minor)
87{
88#if defined(CONFIG_DEBUG_FS)
89 drm_debugfs_remove_files(qxl_debugfs_list, QXL_DEBUGFS_ENTRIES,
90 minor);
91#endif
92}
93
94int qxl_debugfs_add_files(struct qxl_device *qdev,
95 struct drm_info_list *files,
96 unsigned nfiles)
97{
98 unsigned i;
99
100 for (i = 0; i < qdev->debugfs_count; i++) {
101 if (qdev->debugfs[i].files == files) {
102 /* Already registered */
103 return 0;
104 }
105 }
106
107 i = qdev->debugfs_count + 1;
108 if (i > QXL_DEBUGFS_MAX_COMPONENTS) {
109 DRM_ERROR("Reached maximum number of debugfs components.\n");
110 DRM_ERROR("Report so we increase QXL_DEBUGFS_MAX_COMPONENTS.\n");
111 return -EINVAL;
112 }
113 qdev->debugfs[qdev->debugfs_count].files = files;
114 qdev->debugfs[qdev->debugfs_count].num_files = nfiles;
115 qdev->debugfs_count = i;
116#if defined(CONFIG_DEBUG_FS)
117 drm_debugfs_create_files(files, nfiles,
118 qdev->ddev->control->debugfs_root,
119 qdev->ddev->control);
120 drm_debugfs_create_files(files, nfiles,
121 qdev->ddev->primary->debugfs_root,
122 qdev->ddev->primary);
123#endif
124 return 0;
125}
126
127void qxl_debugfs_remove_files(struct qxl_device *qdev)
128{
129#if defined(CONFIG_DEBUG_FS)
130 unsigned i;
131
132 for (i = 0; i < qdev->debugfs_count; i++) {
133 drm_debugfs_remove_files(qdev->debugfs[i].files,
134 qdev->debugfs[i].num_files,
135 qdev->ddev->control);
136 drm_debugfs_remove_files(qdev->debugfs[i].files,
137 qdev->debugfs[i].num_files,
138 qdev->ddev->primary);
139 }
140#endif
141}
diff --git a/drivers/gpu/drm/qxl/qxl_dev.h b/drivers/gpu/drm/qxl/qxl_dev.h
new file mode 100644
index 000000000000..94c5aec71920
--- /dev/null
+++ b/drivers/gpu/drm/qxl/qxl_dev.h
@@ -0,0 +1,879 @@
1/*
2 Copyright (C) 2009 Red Hat, Inc.
3
4 Redistribution and use in source and binary forms, with or without
5 modification, are permitted provided that the following conditions are
6 met:
7
8 * Redistributions of source code must retain the above copyright
9 notice, this list of conditions and the following disclaimer.
10 * Redistributions in binary form must reproduce the above copyright
11 notice, this list of conditions and the following disclaimer in
12 the documentation and/or other materials provided with the
13 distribution.
14 * Neither the name of the copyright holder nor the names of its
15 contributors may be used to endorse or promote products derived
16 from this software without specific prior written permission.
17
18 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS "AS
19 IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
20 TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
21 PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29*/
30
31
32#ifndef H_QXL_DEV
33#define H_QXL_DEV
34
35#include <linux/types.h>
36
37/*
38 * from spice-protocol
39 * Release 0.10.0
40 */
41
42/* enums.h */
43
44enum SpiceImageType {
45 SPICE_IMAGE_TYPE_BITMAP,
46 SPICE_IMAGE_TYPE_QUIC,
47 SPICE_IMAGE_TYPE_RESERVED,
48 SPICE_IMAGE_TYPE_LZ_PLT = 100,
49 SPICE_IMAGE_TYPE_LZ_RGB,
50 SPICE_IMAGE_TYPE_GLZ_RGB,
51 SPICE_IMAGE_TYPE_FROM_CACHE,
52 SPICE_IMAGE_TYPE_SURFACE,
53 SPICE_IMAGE_TYPE_JPEG,
54 SPICE_IMAGE_TYPE_FROM_CACHE_LOSSLESS,
55 SPICE_IMAGE_TYPE_ZLIB_GLZ_RGB,
56 SPICE_IMAGE_TYPE_JPEG_ALPHA,
57
58 SPICE_IMAGE_TYPE_ENUM_END
59};
60
61enum SpiceBitmapFmt {
62 SPICE_BITMAP_FMT_INVALID,
63 SPICE_BITMAP_FMT_1BIT_LE,
64 SPICE_BITMAP_FMT_1BIT_BE,
65 SPICE_BITMAP_FMT_4BIT_LE,
66 SPICE_BITMAP_FMT_4BIT_BE,
67 SPICE_BITMAP_FMT_8BIT,
68 SPICE_BITMAP_FMT_16BIT,
69 SPICE_BITMAP_FMT_24BIT,
70 SPICE_BITMAP_FMT_32BIT,
71 SPICE_BITMAP_FMT_RGBA,
72
73 SPICE_BITMAP_FMT_ENUM_END
74};
75
76enum SpiceSurfaceFmt {
77 SPICE_SURFACE_FMT_INVALID,
78 SPICE_SURFACE_FMT_1_A,
79 SPICE_SURFACE_FMT_8_A = 8,
80 SPICE_SURFACE_FMT_16_555 = 16,
81 SPICE_SURFACE_FMT_32_xRGB = 32,
82 SPICE_SURFACE_FMT_16_565 = 80,
83 SPICE_SURFACE_FMT_32_ARGB = 96,
84
85 SPICE_SURFACE_FMT_ENUM_END
86};
87
88enum SpiceClipType {
89 SPICE_CLIP_TYPE_NONE,
90 SPICE_CLIP_TYPE_RECTS,
91
92 SPICE_CLIP_TYPE_ENUM_END
93};
94
95enum SpiceRopd {
96 SPICE_ROPD_INVERS_SRC = (1 << 0),
97 SPICE_ROPD_INVERS_BRUSH = (1 << 1),
98 SPICE_ROPD_INVERS_DEST = (1 << 2),
99 SPICE_ROPD_OP_PUT = (1 << 3),
100 SPICE_ROPD_OP_OR = (1 << 4),
101 SPICE_ROPD_OP_AND = (1 << 5),
102 SPICE_ROPD_OP_XOR = (1 << 6),
103 SPICE_ROPD_OP_BLACKNESS = (1 << 7),
104 SPICE_ROPD_OP_WHITENESS = (1 << 8),
105 SPICE_ROPD_OP_INVERS = (1 << 9),
106 SPICE_ROPD_INVERS_RES = (1 << 10),
107
108 SPICE_ROPD_MASK = 0x7ff
109};
110
111enum SpiceBrushType {
112 SPICE_BRUSH_TYPE_NONE,
113 SPICE_BRUSH_TYPE_SOLID,
114 SPICE_BRUSH_TYPE_PATTERN,
115
116 SPICE_BRUSH_TYPE_ENUM_END
117};
118
119enum SpiceCursorType {
120 SPICE_CURSOR_TYPE_ALPHA,
121 SPICE_CURSOR_TYPE_MONO,
122 SPICE_CURSOR_TYPE_COLOR4,
123 SPICE_CURSOR_TYPE_COLOR8,
124 SPICE_CURSOR_TYPE_COLOR16,
125 SPICE_CURSOR_TYPE_COLOR24,
126 SPICE_CURSOR_TYPE_COLOR32,
127
128 SPICE_CURSOR_TYPE_ENUM_END
129};
130
131/* qxl_dev.h */
132
133#pragma pack(push, 1)
134
135#define REDHAT_PCI_VENDOR_ID 0x1b36
136
137/* 0x100-0x11f reserved for spice, 0x1ff used for unstable work */
138#define QXL_DEVICE_ID_STABLE 0x0100
139
140enum {
141 QXL_REVISION_STABLE_V04 = 0x01,
142 QXL_REVISION_STABLE_V06 = 0x02,
143 QXL_REVISION_STABLE_V10 = 0x03,
144 QXL_REVISION_STABLE_V12 = 0x04,
145};
146
147#define QXL_DEVICE_ID_DEVEL 0x01ff
148#define QXL_REVISION_DEVEL 0x01
149
150#define QXL_ROM_MAGIC (*(uint32_t *)"QXRO")
151#define QXL_RAM_MAGIC (*(uint32_t *)"QXRA")
152
153enum {
154 QXL_RAM_RANGE_INDEX,
155 QXL_VRAM_RANGE_INDEX,
156 QXL_ROM_RANGE_INDEX,
157 QXL_IO_RANGE_INDEX,
158
159 QXL_PCI_RANGES
160};
161
162/* qxl-1 compat: append only */
163enum {
164 QXL_IO_NOTIFY_CMD,
165 QXL_IO_NOTIFY_CURSOR,
166 QXL_IO_UPDATE_AREA,
167 QXL_IO_UPDATE_IRQ,
168 QXL_IO_NOTIFY_OOM,
169 QXL_IO_RESET,
170 QXL_IO_SET_MODE, /* qxl-1 */
171 QXL_IO_LOG,
172 /* appended for qxl-2 */
173 QXL_IO_MEMSLOT_ADD,
174 QXL_IO_MEMSLOT_DEL,
175 QXL_IO_DETACH_PRIMARY,
176 QXL_IO_ATTACH_PRIMARY,
177 QXL_IO_CREATE_PRIMARY,
178 QXL_IO_DESTROY_PRIMARY,
179 QXL_IO_DESTROY_SURFACE_WAIT,
180 QXL_IO_DESTROY_ALL_SURFACES,
181 /* appended for qxl-3 */
182 QXL_IO_UPDATE_AREA_ASYNC,
183 QXL_IO_MEMSLOT_ADD_ASYNC,
184 QXL_IO_CREATE_PRIMARY_ASYNC,
185 QXL_IO_DESTROY_PRIMARY_ASYNC,
186 QXL_IO_DESTROY_SURFACE_ASYNC,
187 QXL_IO_DESTROY_ALL_SURFACES_ASYNC,
188 QXL_IO_FLUSH_SURFACES_ASYNC,
189 QXL_IO_FLUSH_RELEASE,
190 /* appended for qxl-4 */
191 QXL_IO_MONITORS_CONFIG_ASYNC,
192
193 QXL_IO_RANGE_SIZE
194};
195
196typedef uint64_t QXLPHYSICAL;
197typedef int32_t QXLFIXED; /* fixed 28.4 */
198
199struct qxl_point_fix {
200 QXLFIXED x;
201 QXLFIXED y;
202};
203
204struct qxl_point {
205 int32_t x;
206 int32_t y;
207};
208
209struct qxl_point_1_6 {
210 int16_t x;
211 int16_t y;
212};
213
214struct qxl_rect {
215 int32_t top;
216 int32_t left;
217 int32_t bottom;
218 int32_t right;
219};
220
221struct qxl_urect {
222 uint32_t top;
223 uint32_t left;
224 uint32_t bottom;
225 uint32_t right;
226};
227
228/* qxl-1 compat: append only */
229struct qxl_rom {
230 uint32_t magic;
231 uint32_t id;
232 uint32_t update_id;
233 uint32_t compression_level;
234 uint32_t log_level;
235 uint32_t mode; /* qxl-1 */
236 uint32_t modes_offset;
237 uint32_t num_io_pages;
238 uint32_t pages_offset; /* qxl-1 */
239 uint32_t draw_area_offset; /* qxl-1 */
240 uint32_t surface0_area_size; /* qxl-1 name: draw_area_size */
241 uint32_t ram_header_offset;
242 uint32_t mm_clock;
243 /* appended for qxl-2 */
244 uint32_t n_surfaces;
245 uint64_t flags;
246 uint8_t slots_start;
247 uint8_t slots_end;
248 uint8_t slot_gen_bits;
249 uint8_t slot_id_bits;
250 uint8_t slot_generation;
251 /* appended for qxl-4 */
252 uint8_t client_present;
253 uint8_t client_capabilities[58];
254 uint32_t client_monitors_config_crc;
255 struct {
256 uint16_t count;
257 uint16_t padding;
258 struct qxl_urect heads[64];
259 } client_monitors_config;
260};
261
262/* qxl-1 compat: fixed */
263struct qxl_mode {
264 uint32_t id;
265 uint32_t x_res;
266 uint32_t y_res;
267 uint32_t bits;
268 uint32_t stride;
269 uint32_t x_mili;
270 uint32_t y_mili;
271 uint32_t orientation;
272};
273
274/* qxl-1 compat: fixed */
275struct qxl_modes {
276 uint32_t n_modes;
277 struct qxl_mode modes[0];
278};
279
280/* qxl-1 compat: append only */
281enum qxl_cmd_type {
282 QXL_CMD_NOP,
283 QXL_CMD_DRAW,
284 QXL_CMD_UPDATE,
285 QXL_CMD_CURSOR,
286 QXL_CMD_MESSAGE,
287 QXL_CMD_SURFACE,
288};
289
290/* qxl-1 compat: fixed */
291struct qxl_command {
292 QXLPHYSICAL data;
293 uint32_t type;
294 uint32_t padding;
295};
296
297#define QXL_COMMAND_FLAG_COMPAT (1<<0)
298#define QXL_COMMAND_FLAG_COMPAT_16BPP (2<<0)
299
300struct qxl_command_ext {
301 struct qxl_command cmd;
302 uint32_t group_id;
303 uint32_t flags;
304};
305
306struct qxl_mem_slot {
307 uint64_t mem_start;
308 uint64_t mem_end;
309};
310
311#define QXL_SURF_TYPE_PRIMARY 0
312
313#define QXL_SURF_FLAG_KEEP_DATA (1 << 0)
314
315struct qxl_surface_create {
316 uint32_t width;
317 uint32_t height;
318 int32_t stride;
319 uint32_t format;
320 uint32_t position;
321 uint32_t mouse_mode;
322 uint32_t flags;
323 uint32_t type;
324 QXLPHYSICAL mem;
325};
326
327#define QXL_COMMAND_RING_SIZE 32
328#define QXL_CURSOR_RING_SIZE 32
329#define QXL_RELEASE_RING_SIZE 8
330
331#define QXL_LOG_BUF_SIZE 4096
332
333#define QXL_INTERRUPT_DISPLAY (1 << 0)
334#define QXL_INTERRUPT_CURSOR (1 << 1)
335#define QXL_INTERRUPT_IO_CMD (1 << 2)
336#define QXL_INTERRUPT_ERROR (1 << 3)
337#define QXL_INTERRUPT_CLIENT (1 << 4)
338#define QXL_INTERRUPT_CLIENT_MONITORS_CONFIG (1 << 5)
339
340struct qxl_ring_header {
341 uint32_t num_items;
342 uint32_t prod;
343 uint32_t notify_on_prod;
344 uint32_t cons;
345 uint32_t notify_on_cons;
346};
347
348/* qxl-1 compat: append only */
349struct qxl_ram_header {
350 uint32_t magic;
351 uint32_t int_pending;
352 uint32_t int_mask;
353 uint8_t log_buf[QXL_LOG_BUF_SIZE];
354 struct qxl_ring_header cmd_ring_hdr;
355 struct qxl_command cmd_ring[QXL_COMMAND_RING_SIZE];
356 struct qxl_ring_header cursor_ring_hdr;
357 struct qxl_command cursor_ring[QXL_CURSOR_RING_SIZE];
358 struct qxl_ring_header release_ring_hdr;
359 uint64_t release_ring[QXL_RELEASE_RING_SIZE];
360 struct qxl_rect update_area;
361 /* appended for qxl-2 */
362 uint32_t update_surface;
363 struct qxl_mem_slot mem_slot;
364 struct qxl_surface_create create_surface;
365 uint64_t flags;
366
367 /* appended for qxl-4 */
368
369 /* used by QXL_IO_MONITORS_CONFIG_ASYNC */
370 QXLPHYSICAL monitors_config;
371 uint8_t guest_capabilities[64];
372};
373
374union qxl_release_info {
375 uint64_t id; /* in */
376 uint64_t next; /* out */
377};
378
379struct qxl_release_info_ext {
380 union qxl_release_info *info;
381 uint32_t group_id;
382};
383
384struct qxl_data_chunk {
385 uint32_t data_size;
386 QXLPHYSICAL prev_chunk;
387 QXLPHYSICAL next_chunk;
388 uint8_t data[0];
389};
390
391struct qxl_message {
392 union qxl_release_info release_info;
393 uint8_t data[0];
394};
395
396struct qxl_compat_update_cmd {
397 union qxl_release_info release_info;
398 struct qxl_rect area;
399 uint32_t update_id;
400};
401
402struct qxl_update_cmd {
403 union qxl_release_info release_info;
404 struct qxl_rect area;
405 uint32_t update_id;
406 uint32_t surface_id;
407};
408
409struct qxl_cursor_header {
410 uint64_t unique;
411 uint16_t type;
412 uint16_t width;
413 uint16_t height;
414 uint16_t hot_spot_x;
415 uint16_t hot_spot_y;
416};
417
418struct qxl_cursor {
419 struct qxl_cursor_header header;
420 uint32_t data_size;
421 struct qxl_data_chunk chunk;
422};
423
424enum {
425 QXL_CURSOR_SET,
426 QXL_CURSOR_MOVE,
427 QXL_CURSOR_HIDE,
428 QXL_CURSOR_TRAIL,
429};
430
431#define QXL_CURSOR_DEVICE_DATA_SIZE 128
432
433struct qxl_cursor_cmd {
434 union qxl_release_info release_info;
435 uint8_t type;
436 union {
437 struct {
438 struct qxl_point_1_6 position;
439 uint8_t visible;
440 QXLPHYSICAL shape;
441 } set;
442 struct {
443 uint16_t length;
444 uint16_t frequency;
445 } trail;
446 struct qxl_point_1_6 position;
447 } u;
448 /* todo: dynamic size from rom */
449 uint8_t device_data[QXL_CURSOR_DEVICE_DATA_SIZE];
450};
451
452enum {
453 QXL_DRAW_NOP,
454 QXL_DRAW_FILL,
455 QXL_DRAW_OPAQUE,
456 QXL_DRAW_COPY,
457 QXL_COPY_BITS,
458 QXL_DRAW_BLEND,
459 QXL_DRAW_BLACKNESS,
460 QXL_DRAW_WHITENESS,
461 QXL_DRAW_INVERS,
462 QXL_DRAW_ROP3,
463 QXL_DRAW_STROKE,
464 QXL_DRAW_TEXT,
465 QXL_DRAW_TRANSPARENT,
466 QXL_DRAW_ALPHA_BLEND,
467 QXL_DRAW_COMPOSITE
468};
469
470struct qxl_raster_glyph {
471 struct qxl_point render_pos;
472 struct qxl_point glyph_origin;
473 uint16_t width;
474 uint16_t height;
475 uint8_t data[0];
476};
477
478struct qxl_string {
479 uint32_t data_size;
480 uint16_t length;
481 uint16_t flags;
482 struct qxl_data_chunk chunk;
483};
484
485struct qxl_copy_bits {
486 struct qxl_point src_pos;
487};
488
489enum qxl_effect_type {
490 QXL_EFFECT_BLEND = 0,
491 QXL_EFFECT_OPAQUE = 1,
492 QXL_EFFECT_REVERT_ON_DUP = 2,
493 QXL_EFFECT_BLACKNESS_ON_DUP = 3,
494 QXL_EFFECT_WHITENESS_ON_DUP = 4,
495 QXL_EFFECT_NOP_ON_DUP = 5,
496 QXL_EFFECT_NOP = 6,
497 QXL_EFFECT_OPAQUE_BRUSH = 7
498};
499
500struct qxl_pattern {
501 QXLPHYSICAL pat;
502 struct qxl_point pos;
503};
504
505struct qxl_brush {
506 uint32_t type;
507 union {
508 uint32_t color;
509 struct qxl_pattern pattern;
510 } u;
511};
512
513struct qxl_q_mask {
514 uint8_t flags;
515 struct qxl_point pos;
516 QXLPHYSICAL bitmap;
517};
518
519struct qxl_fill {
520 struct qxl_brush brush;
521 uint16_t rop_descriptor;
522 struct qxl_q_mask mask;
523};
524
525struct qxl_opaque {
526 QXLPHYSICAL src_bitmap;
527 struct qxl_rect src_area;
528 struct qxl_brush brush;
529 uint16_t rop_descriptor;
530 uint8_t scale_mode;
531 struct qxl_q_mask mask;
532};
533
534struct qxl_copy {
535 QXLPHYSICAL src_bitmap;
536 struct qxl_rect src_area;
537 uint16_t rop_descriptor;
538 uint8_t scale_mode;
539 struct qxl_q_mask mask;
540};
541
542struct qxl_transparent {
543 QXLPHYSICAL src_bitmap;
544 struct qxl_rect src_area;
545 uint32_t src_color;
546 uint32_t true_color;
547};
548
549struct qxl_alpha_blend {
550 uint16_t alpha_flags;
551 uint8_t alpha;
552 QXLPHYSICAL src_bitmap;
553 struct qxl_rect src_area;
554};
555
556struct qxl_compat_alpha_blend {
557 uint8_t alpha;
558 QXLPHYSICAL src_bitmap;
559 struct qxl_rect src_area;
560};
561
562struct qxl_rop_3 {
563 QXLPHYSICAL src_bitmap;
564 struct qxl_rect src_area;
565 struct qxl_brush brush;
566 uint8_t rop3;
567 uint8_t scale_mode;
568 struct qxl_q_mask mask;
569};
570
571struct qxl_line_attr {
572 uint8_t flags;
573 uint8_t join_style;
574 uint8_t end_style;
575 uint8_t style_nseg;
576 QXLFIXED width;
577 QXLFIXED miter_limit;
578 QXLPHYSICAL style;
579};
580
581struct qxl_stroke {
582 QXLPHYSICAL path;
583 struct qxl_line_attr attr;
584 struct qxl_brush brush;
585 uint16_t fore_mode;
586 uint16_t back_mode;
587};
588
589struct qxl_text {
590 QXLPHYSICAL str;
591 struct qxl_rect back_area;
592 struct qxl_brush fore_brush;
593 struct qxl_brush back_brush;
594 uint16_t fore_mode;
595 uint16_t back_mode;
596};
597
598struct qxl_mask {
599 struct qxl_q_mask mask;
600};
601
602struct qxl_clip {
603 uint32_t type;
604 QXLPHYSICAL data;
605};
606
607enum qxl_operator {
608 QXL_OP_CLEAR = 0x00,
609 QXL_OP_SOURCE = 0x01,
610 QXL_OP_DST = 0x02,
611 QXL_OP_OVER = 0x03,
612 QXL_OP_OVER_REVERSE = 0x04,
613 QXL_OP_IN = 0x05,
614 QXL_OP_IN_REVERSE = 0x06,
615 QXL_OP_OUT = 0x07,
616 QXL_OP_OUT_REVERSE = 0x08,
617 QXL_OP_ATOP = 0x09,
618 QXL_OP_ATOP_REVERSE = 0x0a,
619 QXL_OP_XOR = 0x0b,
620 QXL_OP_ADD = 0x0c,
621 QXL_OP_SATURATE = 0x0d,
622 /* Note the jump here from 0x0d to 0x30 */
623 QXL_OP_MULTIPLY = 0x30,
624 QXL_OP_SCREEN = 0x31,
625 QXL_OP_OVERLAY = 0x32,
626 QXL_OP_DARKEN = 0x33,
627 QXL_OP_LIGHTEN = 0x34,
628 QXL_OP_COLOR_DODGE = 0x35,
629 QXL_OP_COLOR_BURN = 0x36,
630 QXL_OP_HARD_LIGHT = 0x37,
631 QXL_OP_SOFT_LIGHT = 0x38,
632 QXL_OP_DIFFERENCE = 0x39,
633 QXL_OP_EXCLUSION = 0x3a,
634 QXL_OP_HSL_HUE = 0x3b,
635 QXL_OP_HSL_SATURATION = 0x3c,
636 QXL_OP_HSL_COLOR = 0x3d,
637 QXL_OP_HSL_LUMINOSITY = 0x3e
638};
639
640struct qxl_transform {
641 uint32_t t00;
642 uint32_t t01;
643 uint32_t t02;
644 uint32_t t10;
645 uint32_t t11;
646 uint32_t t12;
647};
648
649/* The flags field has the following bit fields:
650 *
651 * operator: [ 0 - 7 ]
652 * src_filter: [ 8 - 10 ]
653 * mask_filter: [ 11 - 13 ]
654 * src_repeat: [ 14 - 15 ]
655 * mask_repeat: [ 16 - 17 ]
656 * component_alpha: [ 18 - 18 ]
657 * reserved: [ 19 - 31 ]
658 *
659 * The repeat and filter values are those of pixman:
660 * REPEAT_NONE = 0
661 * REPEAT_NORMAL = 1
662 * REPEAT_PAD = 2
663 * REPEAT_REFLECT = 3
664 *
665 * The filter values are:
666 * FILTER_NEAREST = 0
667 * FILTER_BILINEAR = 1
668 */
669struct qxl_composite {
670 uint32_t flags;
671
672 QXLPHYSICAL src;
673 QXLPHYSICAL src_transform; /* May be NULL */
674 QXLPHYSICAL mask; /* May be NULL */
675 QXLPHYSICAL mask_transform; /* May be NULL */
676 struct qxl_point_1_6 src_origin;
677 struct qxl_point_1_6 mask_origin;
678};
679
680struct qxl_compat_drawable {
681 union qxl_release_info release_info;
682 uint8_t effect;
683 uint8_t type;
684 uint16_t bitmap_offset;
685 struct qxl_rect bitmap_area;
686 struct qxl_rect bbox;
687 struct qxl_clip clip;
688 uint32_t mm_time;
689 union {
690 struct qxl_fill fill;
691 struct qxl_opaque opaque;
692 struct qxl_copy copy;
693 struct qxl_transparent transparent;
694 struct qxl_compat_alpha_blend alpha_blend;
695 struct qxl_copy_bits copy_bits;
696 struct qxl_copy blend;
697 struct qxl_rop_3 rop3;
698 struct qxl_stroke stroke;
699 struct qxl_text text;
700 struct qxl_mask blackness;
701 struct qxl_mask invers;
702 struct qxl_mask whiteness;
703 } u;
704};
705
706struct qxl_drawable {
707 union qxl_release_info release_info;
708 uint32_t surface_id;
709 uint8_t effect;
710 uint8_t type;
711 uint8_t self_bitmap;
712 struct qxl_rect self_bitmap_area;
713 struct qxl_rect bbox;
714 struct qxl_clip clip;
715 uint32_t mm_time;
716 int32_t surfaces_dest[3];
717 struct qxl_rect surfaces_rects[3];
718 union {
719 struct qxl_fill fill;
720 struct qxl_opaque opaque;
721 struct qxl_copy copy;
722 struct qxl_transparent transparent;
723 struct qxl_alpha_blend alpha_blend;
724 struct qxl_copy_bits copy_bits;
725 struct qxl_copy blend;
726 struct qxl_rop_3 rop3;
727 struct qxl_stroke stroke;
728 struct qxl_text text;
729 struct qxl_mask blackness;
730 struct qxl_mask invers;
731 struct qxl_mask whiteness;
732 struct qxl_composite composite;
733 } u;
734};
735
736enum qxl_surface_cmd_type {
737 QXL_SURFACE_CMD_CREATE,
738 QXL_SURFACE_CMD_DESTROY,
739};
740
741struct qxl_surface {
742 uint32_t format;
743 uint32_t width;
744 uint32_t height;
745 int32_t stride;
746 QXLPHYSICAL data;
747};
748
749struct qxl_surface_cmd {
750 union qxl_release_info release_info;
751 uint32_t surface_id;
752 uint8_t type;
753 uint32_t flags;
754 union {
755 struct qxl_surface surface_create;
756 } u;
757};
758
759struct qxl_clip_rects {
760 uint32_t num_rects;
761 struct qxl_data_chunk chunk;
762};
763
764enum {
765 QXL_PATH_BEGIN = (1 << 0),
766 QXL_PATH_END = (1 << 1),
767 QXL_PATH_CLOSE = (1 << 3),
768 QXL_PATH_BEZIER = (1 << 4),
769};
770
771struct qxl_path_seg {
772 uint32_t flags;
773 uint32_t count;
774 struct qxl_point_fix points[0];
775};
776
777struct qxl_path {
778 uint32_t data_size;
779 struct qxl_data_chunk chunk;
780};
781
782enum {
783 QXL_IMAGE_GROUP_DRIVER,
784 QXL_IMAGE_GROUP_DEVICE,
785 QXL_IMAGE_GROUP_RED,
786 QXL_IMAGE_GROUP_DRIVER_DONT_CACHE,
787};
788
789struct qxl_image_id {
790 uint32_t group;
791 uint32_t unique;
792};
793
794union qxl_image_id_union {
795 struct qxl_image_id id;
796 uint64_t value;
797};
798
799enum qxl_image_flags {
800 QXL_IMAGE_CACHE = (1 << 0),
801 QXL_IMAGE_HIGH_BITS_SET = (1 << 1),
802};
803
804enum qxl_bitmap_flags {
805 QXL_BITMAP_DIRECT = (1 << 0),
806 QXL_BITMAP_UNSTABLE = (1 << 1),
807 QXL_BITMAP_TOP_DOWN = (1 << 2), /* == SPICE_BITMAP_FLAGS_TOP_DOWN */
808};
809
810#define QXL_SET_IMAGE_ID(image, _group, _unique) { \
811 (image)->descriptor.id = (((uint64_t)_unique) << 32) | _group; \
812}
813
814struct qxl_image_descriptor {
815 uint64_t id;
816 uint8_t type;
817 uint8_t flags;
818 uint32_t width;
819 uint32_t height;
820};
821
822struct qxl_palette {
823 uint64_t unique;
824 uint16_t num_ents;
825 uint32_t ents[0];
826};
827
828struct qxl_bitmap {
829 uint8_t format;
830 uint8_t flags;
831 uint32_t x;
832 uint32_t y;
833 uint32_t stride;
834 QXLPHYSICAL palette;
835 QXLPHYSICAL data; /* data[0] ? */
836};
837
838struct qxl_surface_id {
839 uint32_t surface_id;
840};
841
842struct qxl_encoder_data {
843 uint32_t data_size;
844 uint8_t data[0];
845};
846
847struct qxl_image {
848 struct qxl_image_descriptor descriptor;
849 union { /* variable length */
850 struct qxl_bitmap bitmap;
851 struct qxl_encoder_data quic;
852 struct qxl_surface_id surface_image;
853 } u;
854};
855
856/* A QXLHead is a single monitor output backed by a QXLSurface.
857 * x and y offsets are unsigned since they are used in relation to
858 * the given surface, not the same as the x, y coordinates in the guest
859 * screen reference frame. */
860struct qxl_head {
861 uint32_t id;
862 uint32_t surface_id;
863 uint32_t width;
864 uint32_t height;
865 uint32_t x;
866 uint32_t y;
867 uint32_t flags;
868};
869
870struct qxl_monitors_config {
871 uint16_t count;
872 uint16_t max_allowed; /* If it is 0 no fixed limit is given by the
873 driver */
874 struct qxl_head heads[0];
875};
876
877#pragma pack(pop)
878
879#endif /* _H_QXL_DEV */
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
new file mode 100644
index 000000000000..fcfd4436ceed
--- /dev/null
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -0,0 +1,982 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Dave Airlie
23 * Alon Levy
24 */
25
26
27#include "linux/crc32.h"
28
29#include "qxl_drv.h"
30#include "qxl_object.h"
31#include "drm_crtc_helper.h"
32
33static void qxl_crtc_set_to_mode(struct qxl_device *qdev,
34 struct drm_connector *connector,
35 struct qxl_head *head)
36{
37 struct drm_device *dev = connector->dev;
38 struct drm_display_mode *mode, *t;
39 int width = head->width;
40 int height = head->height;
41
42 if (width < 320 || height < 240) {
43 qxl_io_log(qdev, "%s: bad head: %dx%d", width, height);
44 width = 1024;
45 height = 768;
46 }
47 if (width * height * 4 > 16*1024*1024) {
48 width = 1024;
49 height = 768;
50 }
51 /* TODO: go over regular modes and removed preferred? */
52 list_for_each_entry_safe(mode, t, &connector->probed_modes, head)
53 drm_mode_remove(connector, mode);
54 mode = drm_cvt_mode(dev, width, height, 60, false, false, false);
55 mode->type |= DRM_MODE_TYPE_PREFERRED;
56 mode->status = MODE_OK;
57 drm_mode_probed_add(connector, mode);
58 qxl_io_log(qdev, "%s: %d x %d\n", __func__, width, height);
59}
60
61void qxl_crtc_set_from_monitors_config(struct qxl_device *qdev)
62{
63 struct drm_connector *connector;
64 int i;
65 struct drm_device *dev = qdev->ddev;
66
67 i = 0;
68 qxl_io_log(qdev, "%s: %d, %d\n", __func__,
69 dev->mode_config.num_connector,
70 qdev->monitors_config->count);
71 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
72 if (i > qdev->monitors_config->count) {
73 /* crtc will be reported as disabled */
74 continue;
75 }
76 qxl_crtc_set_to_mode(qdev, connector,
77 &qdev->monitors_config->heads[i]);
78 ++i;
79 }
80}
81
82void qxl_alloc_client_monitors_config(struct qxl_device *qdev, unsigned count)
83{
84 if (qdev->client_monitors_config &&
85 count > qdev->client_monitors_config->count) {
86 kfree(qdev->client_monitors_config);
87 qdev->client_monitors_config = NULL;
88 }
89 if (!qdev->client_monitors_config) {
90 qdev->client_monitors_config = kzalloc(
91 sizeof(struct qxl_monitors_config) +
92 sizeof(struct qxl_head) * count, GFP_KERNEL);
93 if (!qdev->client_monitors_config) {
94 qxl_io_log(qdev,
95 "%s: allocation failure for %u heads\n",
96 __func__, count);
97 return;
98 }
99 }
100 qdev->client_monitors_config->count = count;
101}
102
103static int qxl_display_copy_rom_client_monitors_config(struct qxl_device *qdev)
104{
105 int i;
106 int num_monitors;
107 uint32_t crc;
108
109 BUG_ON(!qdev->monitors_config);
110 num_monitors = qdev->rom->client_monitors_config.count;
111 crc = crc32(0, (const uint8_t *)&qdev->rom->client_monitors_config,
112 sizeof(qdev->rom->client_monitors_config));
113 if (crc != qdev->rom->client_monitors_config_crc) {
114 qxl_io_log(qdev, "crc mismatch: have %X (%d) != %X\n", crc,
115 sizeof(qdev->rom->client_monitors_config),
116 qdev->rom->client_monitors_config_crc);
117 return 1;
118 }
119 if (num_monitors > qdev->monitors_config->max_allowed) {
120 DRM_INFO("client monitors list will be truncated: %d < %d\n",
121 qdev->monitors_config->max_allowed, num_monitors);
122 num_monitors = qdev->monitors_config->max_allowed;
123 } else {
124 num_monitors = qdev->rom->client_monitors_config.count;
125 }
126 qxl_alloc_client_monitors_config(qdev, num_monitors);
127 /* we copy max from the client but it isn't used */
128 qdev->client_monitors_config->max_allowed =
129 qdev->monitors_config->max_allowed;
130 for (i = 0 ; i < qdev->client_monitors_config->count ; ++i) {
131 struct qxl_urect *c_rect =
132 &qdev->rom->client_monitors_config.heads[i];
133 struct qxl_head *client_head =
134 &qdev->client_monitors_config->heads[i];
135 struct qxl_head *head = &qdev->monitors_config->heads[i];
136 client_head->x = head->x = c_rect->left;
137 client_head->y = head->y = c_rect->top;
138 client_head->width = head->width =
139 c_rect->right - c_rect->left;
140 client_head->height = head->height =
141 c_rect->bottom - c_rect->top;
142 client_head->surface_id = head->surface_id = 0;
143 client_head->id = head->id = i;
144 client_head->flags = head->flags = 0;
145 QXL_DEBUG(qdev, "read %dx%d+%d+%d\n", head->width, head->height,
146 head->x, head->y);
147 }
148 return 0;
149}
150
151void qxl_display_read_client_monitors_config(struct qxl_device *qdev)
152{
153
154 while (qxl_display_copy_rom_client_monitors_config(qdev)) {
155 qxl_io_log(qdev, "failed crc check for client_monitors_config,"
156 " retrying\n");
157 }
158 qxl_crtc_set_from_monitors_config(qdev);
159 /* fire off a uevent and let userspace tell us what to do */
160 qxl_io_log(qdev, "calling drm_sysfs_hotplug_event\n");
161 drm_sysfs_hotplug_event(qdev->ddev);
162}
163
164static int qxl_add_monitors_config_modes(struct drm_connector *connector)
165{
166 struct drm_device *dev = connector->dev;
167 struct qxl_device *qdev = dev->dev_private;
168 struct qxl_output *output = drm_connector_to_qxl_output(connector);
169 int h = output->index;
170 struct drm_display_mode *mode = NULL;
171 struct qxl_head *head;
172
173 if (!qdev->monitors_config)
174 return 0;
175 head = &qdev->monitors_config->heads[h];
176
177 mode = drm_cvt_mode(dev, head->width, head->height, 60, false, false,
178 false);
179 mode->type |= DRM_MODE_TYPE_PREFERRED;
180 drm_mode_probed_add(connector, mode);
181 return 1;
182}
183
184static int qxl_add_common_modes(struct drm_connector *connector)
185{
186 struct drm_device *dev = connector->dev;
187 struct drm_display_mode *mode = NULL;
188 int i;
189 struct mode_size {
190 int w;
191 int h;
192 } common_modes[] = {
193 { 640, 480},
194 { 720, 480},
195 { 800, 600},
196 { 848, 480},
197 {1024, 768},
198 {1152, 768},
199 {1280, 720},
200 {1280, 800},
201 {1280, 854},
202 {1280, 960},
203 {1280, 1024},
204 {1440, 900},
205 {1400, 1050},
206 {1680, 1050},
207 {1600, 1200},
208 {1920, 1080},
209 {1920, 1200}
210 };
211
212 for (i = 0; i < ARRAY_SIZE(common_modes); i++) {
213 if (common_modes[i].w < 320 || common_modes[i].h < 200)
214 continue;
215
216 mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h,
217 60, false, false, false);
218 if (common_modes[i].w == 1024 && common_modes[i].h == 768)
219 mode->type |= DRM_MODE_TYPE_PREFERRED;
220 drm_mode_probed_add(connector, mode);
221 }
222 return i - 1;
223}
224
225static void qxl_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
226 u16 *blue, uint32_t start, uint32_t size)
227{
228 /* TODO */
229}
230
231static void qxl_crtc_destroy(struct drm_crtc *crtc)
232{
233 struct qxl_crtc *qxl_crtc = to_qxl_crtc(crtc);
234
235 drm_crtc_cleanup(crtc);
236 kfree(qxl_crtc);
237}
238
239static void
240qxl_hide_cursor(struct qxl_device *qdev)
241{
242 struct qxl_release *release;
243 struct qxl_cursor_cmd *cmd;
244 int ret;
245
246 ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd), QXL_RELEASE_CURSOR_CMD,
247 &release, NULL);
248
249 cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
250 cmd->type = QXL_CURSOR_HIDE;
251 qxl_release_unmap(qdev, release, &cmd->release_info);
252
253 qxl_fence_releaseable(qdev, release);
254 qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
255 qxl_release_unreserve(qdev, release);
256}
257
258static int qxl_crtc_cursor_set(struct drm_crtc *crtc,
259 struct drm_file *file_priv,
260 uint32_t handle,
261 uint32_t width,
262 uint32_t height)
263{
264 struct drm_device *dev = crtc->dev;
265 struct qxl_device *qdev = dev->dev_private;
266 struct qxl_crtc *qcrtc = to_qxl_crtc(crtc);
267 struct drm_gem_object *obj;
268 struct qxl_cursor *cursor;
269 struct qxl_cursor_cmd *cmd;
270 struct qxl_bo *cursor_bo, *user_bo;
271 struct qxl_release *release;
272 void *user_ptr;
273
274 int size = 64*64*4;
275 int ret = 0;
276 if (!handle) {
277 qxl_hide_cursor(qdev);
278 return 0;
279 }
280
281 obj = drm_gem_object_lookup(crtc->dev, file_priv, handle);
282 if (!obj) {
283 DRM_ERROR("cannot find cursor object\n");
284 return -ENOENT;
285 }
286
287 user_bo = gem_to_qxl_bo(obj);
288
289 ret = qxl_bo_reserve(user_bo, false);
290 if (ret)
291 goto out_unref;
292
293 ret = qxl_bo_pin(user_bo, QXL_GEM_DOMAIN_CPU, NULL);
294 if (ret)
295 goto out_unreserve;
296
297 ret = qxl_bo_kmap(user_bo, &user_ptr);
298 if (ret)
299 goto out_unpin;
300
301 ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd),
302 QXL_RELEASE_CURSOR_CMD,
303 &release, NULL);
304 if (ret)
305 goto out_kunmap;
306 ret = qxl_alloc_bo_reserved(qdev, sizeof(struct qxl_cursor) + size,
307 &cursor_bo);
308 if (ret)
309 goto out_free_release;
310 ret = qxl_bo_kmap(cursor_bo, (void **)&cursor);
311 if (ret)
312 goto out_free_bo;
313
314 cursor->header.unique = 0;
315 cursor->header.type = SPICE_CURSOR_TYPE_ALPHA;
316 cursor->header.width = 64;
317 cursor->header.height = 64;
318 cursor->header.hot_spot_x = 0;
319 cursor->header.hot_spot_y = 0;
320 cursor->data_size = size;
321 cursor->chunk.next_chunk = 0;
322 cursor->chunk.prev_chunk = 0;
323 cursor->chunk.data_size = size;
324
325 memcpy(cursor->chunk.data, user_ptr, size);
326
327 qxl_bo_kunmap(cursor_bo);
328
329 /* finish with the userspace bo */
330 qxl_bo_kunmap(user_bo);
331 qxl_bo_unpin(user_bo);
332 qxl_bo_unreserve(user_bo);
333 drm_gem_object_unreference_unlocked(obj);
334
335 cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
336 cmd->type = QXL_CURSOR_SET;
337 cmd->u.set.position.x = qcrtc->cur_x;
338 cmd->u.set.position.y = qcrtc->cur_y;
339
340 cmd->u.set.shape = qxl_bo_physical_address(qdev, cursor_bo, 0);
341 qxl_release_add_res(qdev, release, cursor_bo);
342
343 cmd->u.set.visible = 1;
344 qxl_release_unmap(qdev, release, &cmd->release_info);
345
346 qxl_fence_releaseable(qdev, release);
347 qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
348 qxl_release_unreserve(qdev, release);
349
350 qxl_bo_unreserve(cursor_bo);
351 qxl_bo_unref(&cursor_bo);
352
353 return ret;
354out_free_bo:
355 qxl_bo_unref(&cursor_bo);
356out_free_release:
357 qxl_release_unreserve(qdev, release);
358 qxl_release_free(qdev, release);
359out_kunmap:
360 qxl_bo_kunmap(user_bo);
361out_unpin:
362 qxl_bo_unpin(user_bo);
363out_unreserve:
364 qxl_bo_unreserve(user_bo);
365out_unref:
366 drm_gem_object_unreference_unlocked(obj);
367 return ret;
368}
369
370static int qxl_crtc_cursor_move(struct drm_crtc *crtc,
371 int x, int y)
372{
373 struct drm_device *dev = crtc->dev;
374 struct qxl_device *qdev = dev->dev_private;
375 struct qxl_crtc *qcrtc = to_qxl_crtc(crtc);
376 struct qxl_release *release;
377 struct qxl_cursor_cmd *cmd;
378 int ret;
379
380 ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd), QXL_RELEASE_CURSOR_CMD,
381 &release, NULL);
382
383 qcrtc->cur_x = x;
384 qcrtc->cur_y = y;
385
386 cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
387 cmd->type = QXL_CURSOR_MOVE;
388 cmd->u.position.x = qcrtc->cur_x;
389 cmd->u.position.y = qcrtc->cur_y;
390 qxl_release_unmap(qdev, release, &cmd->release_info);
391
392 qxl_fence_releaseable(qdev, release);
393 qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
394 qxl_release_unreserve(qdev, release);
395 return 0;
396}
397
398
399static const struct drm_crtc_funcs qxl_crtc_funcs = {
400 .cursor_set = qxl_crtc_cursor_set,
401 .cursor_move = qxl_crtc_cursor_move,
402 .gamma_set = qxl_crtc_gamma_set,
403 .set_config = drm_crtc_helper_set_config,
404 .destroy = qxl_crtc_destroy,
405};
406
407static void qxl_user_framebuffer_destroy(struct drm_framebuffer *fb)
408{
409 struct qxl_framebuffer *qxl_fb = to_qxl_framebuffer(fb);
410
411 if (qxl_fb->obj)
412 drm_gem_object_unreference_unlocked(qxl_fb->obj);
413 drm_framebuffer_cleanup(fb);
414 kfree(qxl_fb);
415}
416
417static int qxl_framebuffer_surface_dirty(struct drm_framebuffer *fb,
418 struct drm_file *file_priv,
419 unsigned flags, unsigned color,
420 struct drm_clip_rect *clips,
421 unsigned num_clips)
422{
423 /* TODO: vmwgfx where this was cribbed from had locking. Why? */
424 struct qxl_framebuffer *qxl_fb = to_qxl_framebuffer(fb);
425 struct qxl_device *qdev = qxl_fb->base.dev->dev_private;
426 struct drm_clip_rect norect;
427 struct qxl_bo *qobj;
428 int inc = 1;
429
430 qobj = gem_to_qxl_bo(qxl_fb->obj);
431 if (qxl_fb != qdev->active_user_framebuffer) {
432 DRM_INFO("%s: qxl_fb 0x%p != qdev->active_user_framebuffer 0x%p\n",
433 __func__, qxl_fb, qdev->active_user_framebuffer);
434 }
435 if (!num_clips) {
436 num_clips = 1;
437 clips = &norect;
438 norect.x1 = norect.y1 = 0;
439 norect.x2 = fb->width;
440 norect.y2 = fb->height;
441 } else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
442 num_clips /= 2;
443 inc = 2; /* skip source rects */
444 }
445
446 qxl_draw_dirty_fb(qdev, qxl_fb, qobj, flags, color,
447 clips, num_clips, inc);
448 return 0;
449}
450
451static const struct drm_framebuffer_funcs qxl_fb_funcs = {
452 .destroy = qxl_user_framebuffer_destroy,
453 .dirty = qxl_framebuffer_surface_dirty,
454/* TODO?
455 * .create_handle = qxl_user_framebuffer_create_handle, */
456};
457
458int
459qxl_framebuffer_init(struct drm_device *dev,
460 struct qxl_framebuffer *qfb,
461 struct drm_mode_fb_cmd2 *mode_cmd,
462 struct drm_gem_object *obj)
463{
464 int ret;
465
466 qfb->obj = obj;
467 ret = drm_framebuffer_init(dev, &qfb->base, &qxl_fb_funcs);
468 if (ret) {
469 qfb->obj = NULL;
470 return ret;
471 }
472 drm_helper_mode_fill_fb_struct(&qfb->base, mode_cmd);
473 return 0;
474}
475
476static void qxl_crtc_dpms(struct drm_crtc *crtc, int mode)
477{
478}
479
480static bool qxl_crtc_mode_fixup(struct drm_crtc *crtc,
481 const struct drm_display_mode *mode,
482 struct drm_display_mode *adjusted_mode)
483{
484 struct drm_device *dev = crtc->dev;
485 struct qxl_device *qdev = dev->dev_private;
486
487 qxl_io_log(qdev, "%s: (%d,%d) => (%d,%d)\n",
488 __func__,
489 mode->hdisplay, mode->vdisplay,
490 adjusted_mode->hdisplay,
491 adjusted_mode->vdisplay);
492 return true;
493}
494
495void
496qxl_send_monitors_config(struct qxl_device *qdev)
497{
498 int i;
499
500 BUG_ON(!qdev->ram_header->monitors_config);
501
502 if (qdev->monitors_config->count == 0) {
503 qxl_io_log(qdev, "%s: 0 monitors??\n", __func__);
504 return;
505 }
506 for (i = 0 ; i < qdev->monitors_config->count ; ++i) {
507 struct qxl_head *head = &qdev->monitors_config->heads[i];
508
509 if (head->y > 8192 || head->y < head->x ||
510 head->width > 8192 || head->height > 8192) {
511 DRM_ERROR("head %d wrong: %dx%d+%d+%d\n",
512 i, head->width, head->height,
513 head->x, head->y);
514 return;
515 }
516 }
517 qxl_io_monitors_config(qdev);
518}
519
520static void qxl_monitors_config_set_single(struct qxl_device *qdev,
521 unsigned x, unsigned y,
522 unsigned width, unsigned height)
523{
524 DRM_DEBUG("%dx%d+%d+%d\n", width, height, x, y);
525 qdev->monitors_config->count = 1;
526 qdev->monitors_config->heads[0].x = x;
527 qdev->monitors_config->heads[0].y = y;
528 qdev->monitors_config->heads[0].width = width;
529 qdev->monitors_config->heads[0].height = height;
530}
531
532static int qxl_crtc_mode_set(struct drm_crtc *crtc,
533 struct drm_display_mode *mode,
534 struct drm_display_mode *adjusted_mode,
535 int x, int y,
536 struct drm_framebuffer *old_fb)
537{
538 struct drm_device *dev = crtc->dev;
539 struct qxl_device *qdev = dev->dev_private;
540 struct qxl_mode *m = (void *)mode->private;
541 struct qxl_framebuffer *qfb;
542 struct qxl_bo *bo, *old_bo = NULL;
543 uint32_t width, height, base_offset;
544 bool recreate_primary = false;
545 int ret;
546
547 if (!crtc->fb) {
548 DRM_DEBUG_KMS("No FB bound\n");
549 return 0;
550 }
551
552 if (old_fb) {
553 qfb = to_qxl_framebuffer(old_fb);
554 old_bo = gem_to_qxl_bo(qfb->obj);
555 }
556 qfb = to_qxl_framebuffer(crtc->fb);
557 bo = gem_to_qxl_bo(qfb->obj);
558 if (!m)
559 /* and do we care? */
560 DRM_DEBUG("%dx%d: not a native mode\n", x, y);
561 else
562 DRM_DEBUG("%dx%d: qxl id %d\n",
563 mode->hdisplay, mode->vdisplay, m->id);
564 DRM_DEBUG("+%d+%d (%d,%d) => (%d,%d)\n",
565 x, y,
566 mode->hdisplay, mode->vdisplay,
567 adjusted_mode->hdisplay,
568 adjusted_mode->vdisplay);
569
570 recreate_primary = true;
571
572 width = mode->hdisplay;
573 height = mode->vdisplay;
574 base_offset = 0;
575
576 ret = qxl_bo_reserve(bo, false);
577 if (ret != 0)
578 return ret;
579 ret = qxl_bo_pin(bo, bo->type, NULL);
580 if (ret != 0) {
581 qxl_bo_unreserve(bo);
582 return -EINVAL;
583 }
584 qxl_bo_unreserve(bo);
585 if (recreate_primary) {
586 qxl_io_destroy_primary(qdev);
587 qxl_io_log(qdev,
588 "recreate primary: %dx%d (was %dx%d,%d,%d)\n",
589 width, height, bo->surf.width,
590 bo->surf.height, bo->surf.stride, bo->surf.format);
591 qxl_io_create_primary(qdev, width, height, base_offset, bo);
592 bo->is_primary = true;
593 }
594
595 if (old_bo && old_bo != bo) {
596 old_bo->is_primary = false;
597 ret = qxl_bo_reserve(old_bo, false);
598 qxl_bo_unpin(old_bo);
599 qxl_bo_unreserve(old_bo);
600 }
601
602 if (qdev->monitors_config->count == 0) {
603 qxl_monitors_config_set_single(qdev, x, y,
604 mode->hdisplay,
605 mode->vdisplay);
606 }
607 qdev->mode_set = true;
608 return 0;
609}
610
611static void qxl_crtc_prepare(struct drm_crtc *crtc)
612{
613 DRM_DEBUG("current: %dx%d+%d+%d (%d).\n",
614 crtc->mode.hdisplay, crtc->mode.vdisplay,
615 crtc->x, crtc->y, crtc->enabled);
616}
617
618static void qxl_crtc_commit(struct drm_crtc *crtc)
619{
620 DRM_DEBUG("\n");
621}
622
623static void qxl_crtc_load_lut(struct drm_crtc *crtc)
624{
625 DRM_DEBUG("\n");
626}
627
628static const struct drm_crtc_helper_funcs qxl_crtc_helper_funcs = {
629 .dpms = qxl_crtc_dpms,
630 .mode_fixup = qxl_crtc_mode_fixup,
631 .mode_set = qxl_crtc_mode_set,
632 .prepare = qxl_crtc_prepare,
633 .commit = qxl_crtc_commit,
634 .load_lut = qxl_crtc_load_lut,
635};
636
637static int qdev_crtc_init(struct drm_device *dev, int num_crtc)
638{
639 struct qxl_crtc *qxl_crtc;
640
641 qxl_crtc = kzalloc(sizeof(struct qxl_crtc), GFP_KERNEL);
642 if (!qxl_crtc)
643 return -ENOMEM;
644
645 drm_crtc_init(dev, &qxl_crtc->base, &qxl_crtc_funcs);
646
647 drm_mode_crtc_set_gamma_size(&qxl_crtc->base, 256);
648 drm_crtc_helper_add(&qxl_crtc->base, &qxl_crtc_helper_funcs);
649 return 0;
650}
651
652static void qxl_enc_dpms(struct drm_encoder *encoder, int mode)
653{
654 DRM_DEBUG("\n");
655}
656
657static bool qxl_enc_mode_fixup(struct drm_encoder *encoder,
658 const struct drm_display_mode *mode,
659 struct drm_display_mode *adjusted_mode)
660{
661 DRM_DEBUG("\n");
662 return true;
663}
664
665static void qxl_enc_prepare(struct drm_encoder *encoder)
666{
667 DRM_DEBUG("\n");
668}
669
670static void qxl_write_monitors_config_for_encoder(struct qxl_device *qdev,
671 struct drm_encoder *encoder)
672{
673 int i;
674 struct qxl_head *head;
675 struct drm_display_mode *mode;
676
677 BUG_ON(!encoder);
678 /* TODO: ugly, do better */
679 for (i = 0 ; (encoder->possible_crtcs != (1 << i)) && i < 32; ++i)
680 ;
681 if (encoder->possible_crtcs != (1 << i)) {
682 DRM_ERROR("encoder has wrong possible_crtcs: %x\n",
683 encoder->possible_crtcs);
684 return;
685 }
686 if (!qdev->monitors_config ||
687 qdev->monitors_config->max_allowed <= i) {
688 DRM_ERROR(
689 "head number too large or missing monitors config: %p, %d",
690 qdev->monitors_config,
691 qdev->monitors_config ?
692 qdev->monitors_config->max_allowed : -1);
693 return;
694 }
695 if (!encoder->crtc) {
696 DRM_ERROR("missing crtc on encoder %p\n", encoder);
697 return;
698 }
699 if (i != 0)
700 DRM_DEBUG("missing for multiple monitors: no head holes\n");
701 head = &qdev->monitors_config->heads[i];
702 head->id = i;
703 head->surface_id = 0;
704 if (encoder->crtc->enabled) {
705 mode = &encoder->crtc->mode;
706 head->width = mode->hdisplay;
707 head->height = mode->vdisplay;
708 head->x = encoder->crtc->x;
709 head->y = encoder->crtc->y;
710 if (qdev->monitors_config->count < i + 1)
711 qdev->monitors_config->count = i + 1;
712 } else {
713 head->width = 0;
714 head->height = 0;
715 head->x = 0;
716 head->y = 0;
717 }
718 DRM_DEBUG("setting head %d to +%d+%d %dx%d\n",
719 i, head->x, head->y, head->width, head->height);
720 head->flags = 0;
721 /* TODO - somewhere else to call this for multiple monitors
722 * (config_commit?) */
723 qxl_send_monitors_config(qdev);
724}
725
726static void qxl_enc_commit(struct drm_encoder *encoder)
727{
728 struct qxl_device *qdev = encoder->dev->dev_private;
729
730 qxl_write_monitors_config_for_encoder(qdev, encoder);
731 DRM_DEBUG("\n");
732}
733
734static void qxl_enc_mode_set(struct drm_encoder *encoder,
735 struct drm_display_mode *mode,
736 struct drm_display_mode *adjusted_mode)
737{
738 DRM_DEBUG("\n");
739}
740
741static int qxl_conn_get_modes(struct drm_connector *connector)
742{
743 int ret = 0;
744 struct qxl_device *qdev = connector->dev->dev_private;
745
746 DRM_DEBUG_KMS("monitors_config=%p\n", qdev->monitors_config);
747 /* TODO: what should we do here? only show the configured modes for the
748 * device, or allow the full list, or both? */
749 if (qdev->monitors_config && qdev->monitors_config->count) {
750 ret = qxl_add_monitors_config_modes(connector);
751 if (ret < 0)
752 return ret;
753 }
754 ret += qxl_add_common_modes(connector);
755 return ret;
756}
757
758static int qxl_conn_mode_valid(struct drm_connector *connector,
759 struct drm_display_mode *mode)
760{
761 /* TODO: is this called for user defined modes? (xrandr --add-mode)
762 * TODO: check that the mode fits in the framebuffer */
763 DRM_DEBUG("%s: %dx%d status=%d\n", mode->name, mode->hdisplay,
764 mode->vdisplay, mode->status);
765 return MODE_OK;
766}
767
768static struct drm_encoder *qxl_best_encoder(struct drm_connector *connector)
769{
770 struct qxl_output *qxl_output =
771 drm_connector_to_qxl_output(connector);
772
773 DRM_DEBUG("\n");
774 return &qxl_output->enc;
775}
776
777
778static const struct drm_encoder_helper_funcs qxl_enc_helper_funcs = {
779 .dpms = qxl_enc_dpms,
780 .mode_fixup = qxl_enc_mode_fixup,
781 .prepare = qxl_enc_prepare,
782 .mode_set = qxl_enc_mode_set,
783 .commit = qxl_enc_commit,
784};
785
786static const struct drm_connector_helper_funcs qxl_connector_helper_funcs = {
787 .get_modes = qxl_conn_get_modes,
788 .mode_valid = qxl_conn_mode_valid,
789 .best_encoder = qxl_best_encoder,
790};
791
792static void qxl_conn_save(struct drm_connector *connector)
793{
794 DRM_DEBUG("\n");
795}
796
797static void qxl_conn_restore(struct drm_connector *connector)
798{
799 DRM_DEBUG("\n");
800}
801
802static enum drm_connector_status qxl_conn_detect(
803 struct drm_connector *connector,
804 bool force)
805{
806 struct qxl_output *output =
807 drm_connector_to_qxl_output(connector);
808 struct drm_device *ddev = connector->dev;
809 struct qxl_device *qdev = ddev->dev_private;
810 int connected;
811
812 /* The first monitor is always connected */
813 connected = (output->index == 0) ||
814 (qdev->monitors_config &&
815 qdev->monitors_config->count > output->index);
816
817 DRM_DEBUG("\n");
818 return connected ? connector_status_connected
819 : connector_status_disconnected;
820}
821
822static int qxl_conn_set_property(struct drm_connector *connector,
823 struct drm_property *property,
824 uint64_t value)
825{
826 DRM_DEBUG("\n");
827 return 0;
828}
829
830static void qxl_conn_destroy(struct drm_connector *connector)
831{
832 struct qxl_output *qxl_output =
833 drm_connector_to_qxl_output(connector);
834
835 drm_sysfs_connector_remove(connector);
836 drm_connector_cleanup(connector);
837 kfree(qxl_output);
838}
839
840static const struct drm_connector_funcs qxl_connector_funcs = {
841 .dpms = drm_helper_connector_dpms,
842 .save = qxl_conn_save,
843 .restore = qxl_conn_restore,
844 .detect = qxl_conn_detect,
845 .fill_modes = drm_helper_probe_single_connector_modes,
846 .set_property = qxl_conn_set_property,
847 .destroy = qxl_conn_destroy,
848};
849
850static void qxl_enc_destroy(struct drm_encoder *encoder)
851{
852 drm_encoder_cleanup(encoder);
853}
854
855static const struct drm_encoder_funcs qxl_enc_funcs = {
856 .destroy = qxl_enc_destroy,
857};
858
859static int qdev_output_init(struct drm_device *dev, int num_output)
860{
861 struct qxl_output *qxl_output;
862 struct drm_connector *connector;
863 struct drm_encoder *encoder;
864
865 qxl_output = kzalloc(sizeof(struct qxl_output), GFP_KERNEL);
866 if (!qxl_output)
867 return -ENOMEM;
868
869 qxl_output->index = num_output;
870
871 connector = &qxl_output->base;
872 encoder = &qxl_output->enc;
873 drm_connector_init(dev, &qxl_output->base,
874 &qxl_connector_funcs, DRM_MODE_CONNECTOR_VIRTUAL);
875
876 drm_encoder_init(dev, &qxl_output->enc, &qxl_enc_funcs,
877 DRM_MODE_ENCODER_VIRTUAL);
878
879 encoder->possible_crtcs = 1 << num_output;
880 drm_mode_connector_attach_encoder(&qxl_output->base,
881 &qxl_output->enc);
882 drm_encoder_helper_add(encoder, &qxl_enc_helper_funcs);
883 drm_connector_helper_add(connector, &qxl_connector_helper_funcs);
884
885 drm_sysfs_connector_add(connector);
886 return 0;
887}
888
889static struct drm_framebuffer *
890qxl_user_framebuffer_create(struct drm_device *dev,
891 struct drm_file *file_priv,
892 struct drm_mode_fb_cmd2 *mode_cmd)
893{
894 struct drm_gem_object *obj;
895 struct qxl_framebuffer *qxl_fb;
896 struct qxl_device *qdev = dev->dev_private;
897 int ret;
898
899 obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]);
900
901 qxl_fb = kzalloc(sizeof(*qxl_fb), GFP_KERNEL);
902 if (qxl_fb == NULL)
903 return NULL;
904
905 ret = qxl_framebuffer_init(dev, qxl_fb, mode_cmd, obj);
906 if (ret) {
907 kfree(qxl_fb);
908 drm_gem_object_unreference_unlocked(obj);
909 return NULL;
910 }
911
912 if (qdev->active_user_framebuffer) {
913 DRM_INFO("%s: active_user_framebuffer %p -> %p\n",
914 __func__,
915 qdev->active_user_framebuffer, qxl_fb);
916 }
917 qdev->active_user_framebuffer = qxl_fb;
918
919 return &qxl_fb->base;
920}
921
922static const struct drm_mode_config_funcs qxl_mode_funcs = {
923 .fb_create = qxl_user_framebuffer_create,
924};
925
926int qxl_modeset_init(struct qxl_device *qdev)
927{
928 int i;
929 int ret;
930 struct drm_gem_object *gobj;
931 int max_allowed = QXL_NUM_OUTPUTS;
932 int monitors_config_size = sizeof(struct qxl_monitors_config) +
933 max_allowed * sizeof(struct qxl_head);
934
935 drm_mode_config_init(qdev->ddev);
936 ret = qxl_gem_object_create(qdev, monitors_config_size, 0,
937 QXL_GEM_DOMAIN_VRAM,
938 false, false, NULL, &gobj);
939 if (ret) {
940 DRM_ERROR("%s: failed to create gem ret=%d\n", __func__, ret);
941 return -ENOMEM;
942 }
943 qdev->monitors_config_bo = gem_to_qxl_bo(gobj);
944 qxl_bo_kmap(qdev->monitors_config_bo, NULL);
945 qdev->monitors_config = qdev->monitors_config_bo->kptr;
946 qdev->ram_header->monitors_config =
947 qxl_bo_physical_address(qdev, qdev->monitors_config_bo, 0);
948
949 memset(qdev->monitors_config, 0, monitors_config_size);
950 qdev->monitors_config->max_allowed = max_allowed;
951
952 qdev->ddev->mode_config.funcs = (void *)&qxl_mode_funcs;
953
954 /* modes will be validated against the framebuffer size */
955 qdev->ddev->mode_config.min_width = 320;
956 qdev->ddev->mode_config.min_height = 200;
957 qdev->ddev->mode_config.max_width = 8192;
958 qdev->ddev->mode_config.max_height = 8192;
959
960 qdev->ddev->mode_config.fb_base = qdev->vram_base;
961 for (i = 0 ; i < QXL_NUM_OUTPUTS; ++i) {
962 qdev_crtc_init(qdev->ddev, i);
963 qdev_output_init(qdev->ddev, i);
964 }
965
966 qdev->mode_info.mode_config_initialized = true;
967
968 /* primary surface must be created by this point, to allow
969 * issuing command queue commands and having them read by
970 * spice server. */
971 qxl_fbdev_init(qdev);
972 return 0;
973}
974
975void qxl_modeset_fini(struct qxl_device *qdev)
976{
977 qxl_fbdev_fini(qdev);
978 if (qdev->mode_info.mode_config_initialized) {
979 drm_mode_config_cleanup(qdev->ddev);
980 qdev->mode_info.mode_config_initialized = false;
981 }
982}
diff --git a/drivers/gpu/drm/qxl/qxl_draw.c b/drivers/gpu/drm/qxl/qxl_draw.c
new file mode 100644
index 000000000000..3c8c3dbf9378
--- /dev/null
+++ b/drivers/gpu/drm/qxl/qxl_draw.c
@@ -0,0 +1,390 @@
1/*
2 * Copyright 2011 Red Hat, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
19 * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
20 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#include "qxl_drv.h"
24#include "qxl_object.h"
25
26/* returns a pointer to the already allocated qxl_rect array inside
27 * the qxl_clip_rects. This is *not* the same as the memory allocated
28 * on the device, it is offset to qxl_clip_rects.chunk.data */
29static struct qxl_rect *drawable_set_clipping(struct qxl_device *qdev,
30 struct qxl_drawable *drawable,
31 unsigned num_clips,
32 struct qxl_bo **clips_bo,
33 struct qxl_release *release)
34{
35 struct qxl_clip_rects *dev_clips;
36 int ret;
37 int size = sizeof(*dev_clips) + sizeof(struct qxl_rect) * num_clips;
38 ret = qxl_alloc_bo_reserved(qdev, size, clips_bo);
39 if (ret)
40 return NULL;
41
42 ret = qxl_bo_kmap(*clips_bo, (void **)&dev_clips);
43 if (ret) {
44 qxl_bo_unref(clips_bo);
45 return NULL;
46 }
47 dev_clips->num_rects = num_clips;
48 dev_clips->chunk.next_chunk = 0;
49 dev_clips->chunk.prev_chunk = 0;
50 dev_clips->chunk.data_size = sizeof(struct qxl_rect) * num_clips;
51 return (struct qxl_rect *)dev_clips->chunk.data;
52}
53
54static int
55make_drawable(struct qxl_device *qdev, int surface, uint8_t type,
56 const struct qxl_rect *rect,
57 struct qxl_release **release)
58{
59 struct qxl_drawable *drawable;
60 int i, ret;
61
62 ret = qxl_alloc_release_reserved(qdev, sizeof(*drawable),
63 QXL_RELEASE_DRAWABLE, release,
64 NULL);
65 if (ret)
66 return ret;
67
68 drawable = (struct qxl_drawable *)qxl_release_map(qdev, *release);
69 drawable->type = type;
70
71 drawable->surface_id = surface; /* Only primary for now */
72 drawable->effect = QXL_EFFECT_OPAQUE;
73 drawable->self_bitmap = 0;
74 drawable->self_bitmap_area.top = 0;
75 drawable->self_bitmap_area.left = 0;
76 drawable->self_bitmap_area.bottom = 0;
77 drawable->self_bitmap_area.right = 0;
78 /* FIXME: add clipping */
79 drawable->clip.type = SPICE_CLIP_TYPE_NONE;
80
81 /*
82 * surfaces_dest[i] should apparently be filled out with the
83 * surfaces that we depend on, and surface_rects should be
84 * filled with the rectangles of those surfaces that we
85 * are going to use.
86 */
87 for (i = 0; i < 3; ++i)
88 drawable->surfaces_dest[i] = -1;
89
90 if (rect)
91 drawable->bbox = *rect;
92
93 drawable->mm_time = qdev->rom->mm_clock;
94 qxl_release_unmap(qdev, *release, &drawable->release_info);
95 return 0;
96}
97
98static int qxl_palette_create_1bit(struct qxl_bo **palette_bo,
99 const struct qxl_fb_image *qxl_fb_image)
100{
101 struct qxl_device *qdev = qxl_fb_image->qdev;
102 const struct fb_image *fb_image = &qxl_fb_image->fb_image;
103 uint32_t visual = qxl_fb_image->visual;
104 const uint32_t *pseudo_palette = qxl_fb_image->pseudo_palette;
105 struct qxl_palette *pal;
106 int ret;
107 uint32_t fgcolor, bgcolor;
108 static uint64_t unique; /* we make no attempt to actually set this
109 * correctly globaly, since that would require
110 * tracking all of our palettes. */
111
112 ret = qxl_alloc_bo_reserved(qdev,
113 sizeof(struct qxl_palette) + sizeof(uint32_t) * 2,
114 palette_bo);
115
116 ret = qxl_bo_kmap(*palette_bo, (void **)&pal);
117 pal->num_ents = 2;
118 pal->unique = unique++;
119 if (visual == FB_VISUAL_TRUECOLOR || visual == FB_VISUAL_DIRECTCOLOR) {
120 /* NB: this is the only used branch currently. */
121 fgcolor = pseudo_palette[fb_image->fg_color];
122 bgcolor = pseudo_palette[fb_image->bg_color];
123 } else {
124 fgcolor = fb_image->fg_color;
125 bgcolor = fb_image->bg_color;
126 }
127 pal->ents[0] = bgcolor;
128 pal->ents[1] = fgcolor;
129 qxl_bo_kunmap(*palette_bo);
130 return 0;
131}
132
133void qxl_draw_opaque_fb(const struct qxl_fb_image *qxl_fb_image,
134 int stride /* filled in if 0 */)
135{
136 struct qxl_device *qdev = qxl_fb_image->qdev;
137 struct qxl_drawable *drawable;
138 struct qxl_rect rect;
139 const struct fb_image *fb_image = &qxl_fb_image->fb_image;
140 int x = fb_image->dx;
141 int y = fb_image->dy;
142 int width = fb_image->width;
143 int height = fb_image->height;
144 const char *src = fb_image->data;
145 int depth = fb_image->depth;
146 struct qxl_release *release;
147 struct qxl_bo *image_bo;
148 struct qxl_image *image;
149 int ret;
150
151 if (stride == 0)
152 stride = depth * width / 8;
153
154 rect.left = x;
155 rect.right = x + width;
156 rect.top = y;
157 rect.bottom = y + height;
158
159 ret = make_drawable(qdev, 0, QXL_DRAW_COPY, &rect, &release);
160 if (ret)
161 return;
162
163 ret = qxl_image_create(qdev, release, &image_bo,
164 (const uint8_t *)src, 0, 0,
165 width, height, depth, stride);
166 if (ret) {
167 qxl_release_unreserve(qdev, release);
168 qxl_release_free(qdev, release);
169 return;
170 }
171
172 if (depth == 1) {
173 struct qxl_bo *palette_bo;
174 void *ptr;
175 ret = qxl_palette_create_1bit(&palette_bo, qxl_fb_image);
176 qxl_release_add_res(qdev, release, palette_bo);
177
178 ptr = qxl_bo_kmap_atomic_page(qdev, image_bo, 0);
179 image = ptr;
180 image->u.bitmap.palette =
181 qxl_bo_physical_address(qdev, palette_bo, 0);
182 qxl_bo_kunmap_atomic_page(qdev, image_bo, ptr);
183 qxl_bo_unreserve(palette_bo);
184 qxl_bo_unref(&palette_bo);
185 }
186
187 drawable = (struct qxl_drawable *)qxl_release_map(qdev, release);
188
189 drawable->u.copy.src_area.top = 0;
190 drawable->u.copy.src_area.bottom = height;
191 drawable->u.copy.src_area.left = 0;
192 drawable->u.copy.src_area.right = width;
193
194 drawable->u.copy.rop_descriptor = SPICE_ROPD_OP_PUT;
195 drawable->u.copy.scale_mode = 0;
196 drawable->u.copy.mask.flags = 0;
197 drawable->u.copy.mask.pos.x = 0;
198 drawable->u.copy.mask.pos.y = 0;
199 drawable->u.copy.mask.bitmap = 0;
200
201 drawable->u.copy.src_bitmap =
202 qxl_bo_physical_address(qdev, image_bo, 0);
203 qxl_release_unmap(qdev, release, &drawable->release_info);
204
205 qxl_release_add_res(qdev, release, image_bo);
206 qxl_bo_unreserve(image_bo);
207 qxl_bo_unref(&image_bo);
208
209 qxl_fence_releaseable(qdev, release);
210 qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
211 qxl_release_unreserve(qdev, release);
212}
213
214/* push a draw command using the given clipping rectangles as
215 * the sources from the shadow framebuffer.
216 *
217 * Right now implementing with a single draw and a clip list. Clip
218 * lists are known to be a problem performance wise, this can be solved
219 * by treating them differently in the server.
220 */
221void qxl_draw_dirty_fb(struct qxl_device *qdev,
222 struct qxl_framebuffer *qxl_fb,
223 struct qxl_bo *bo,
224 unsigned flags, unsigned color,
225 struct drm_clip_rect *clips,
226 unsigned num_clips, int inc)
227{
228 /*
229 * TODO: if flags & DRM_MODE_FB_DIRTY_ANNOTATE_FILL then we should
230 * send a fill command instead, much cheaper.
231 *
232 * See include/drm/drm_mode.h
233 */
234 struct drm_clip_rect *clips_ptr;
235 int i;
236 int left, right, top, bottom;
237 int width, height;
238 struct qxl_drawable *drawable;
239 struct qxl_rect drawable_rect;
240 struct qxl_rect *rects;
241 int stride = qxl_fb->base.pitches[0];
242 /* depth is not actually interesting, we don't mask with it */
243 int depth = qxl_fb->base.bits_per_pixel;
244 uint8_t *surface_base;
245 struct qxl_release *release;
246 struct qxl_bo *image_bo;
247 struct qxl_bo *clips_bo;
248 int ret;
249
250 left = clips->x1;
251 right = clips->x2;
252 top = clips->y1;
253 bottom = clips->y2;
254
255 /* skip the first clip rect */
256 for (i = 1, clips_ptr = clips + inc;
257 i < num_clips; i++, clips_ptr += inc) {
258 left = min_t(int, left, (int)clips_ptr->x1);
259 right = max_t(int, right, (int)clips_ptr->x2);
260 top = min_t(int, top, (int)clips_ptr->y1);
261 bottom = max_t(int, bottom, (int)clips_ptr->y2);
262 }
263
264 width = right - left;
265 height = bottom - top;
266 drawable_rect.left = left;
267 drawable_rect.right = right;
268 drawable_rect.top = top;
269 drawable_rect.bottom = bottom;
270 ret = make_drawable(qdev, 0, QXL_DRAW_COPY, &drawable_rect,
271 &release);
272 if (ret)
273 return;
274
275 ret = qxl_bo_kmap(bo, (void **)&surface_base);
276 if (ret)
277 goto out_unref;
278
279 ret = qxl_image_create(qdev, release, &image_bo, surface_base,
280 left, top, width, height, depth, stride);
281 qxl_bo_kunmap(bo);
282 if (ret)
283 goto out_unref;
284
285 rects = drawable_set_clipping(qdev, drawable, num_clips, &clips_bo, release);
286 if (!rects) {
287 qxl_bo_unref(&image_bo);
288 goto out_unref;
289 }
290 drawable = (struct qxl_drawable *)qxl_release_map(qdev, release);
291
292 drawable->clip.type = SPICE_CLIP_TYPE_RECTS;
293 drawable->clip.data = qxl_bo_physical_address(qdev,
294 clips_bo, 0);
295 qxl_release_add_res(qdev, release, clips_bo);
296
297 drawable->u.copy.src_area.top = 0;
298 drawable->u.copy.src_area.bottom = height;
299 drawable->u.copy.src_area.left = 0;
300 drawable->u.copy.src_area.right = width;
301
302 drawable->u.copy.rop_descriptor = SPICE_ROPD_OP_PUT;
303 drawable->u.copy.scale_mode = 0;
304 drawable->u.copy.mask.flags = 0;
305 drawable->u.copy.mask.pos.x = 0;
306 drawable->u.copy.mask.pos.y = 0;
307 drawable->u.copy.mask.bitmap = 0;
308
309 drawable->u.copy.src_bitmap = qxl_bo_physical_address(qdev, image_bo, 0);
310 qxl_release_unmap(qdev, release, &drawable->release_info);
311 qxl_release_add_res(qdev, release, image_bo);
312 qxl_bo_unreserve(image_bo);
313 qxl_bo_unref(&image_bo);
314 clips_ptr = clips;
315 for (i = 0; i < num_clips; i++, clips_ptr += inc) {
316 rects[i].left = clips_ptr->x1;
317 rects[i].right = clips_ptr->x2;
318 rects[i].top = clips_ptr->y1;
319 rects[i].bottom = clips_ptr->y2;
320 }
321 qxl_bo_kunmap(clips_bo);
322 qxl_bo_unreserve(clips_bo);
323 qxl_bo_unref(&clips_bo);
324
325 qxl_fence_releaseable(qdev, release);
326 qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
327 qxl_release_unreserve(qdev, release);
328 return;
329
330out_unref:
331 qxl_release_unreserve(qdev, release);
332 qxl_release_free(qdev, release);
333}
334
335void qxl_draw_copyarea(struct qxl_device *qdev,
336 u32 width, u32 height,
337 u32 sx, u32 sy,
338 u32 dx, u32 dy)
339{
340 struct qxl_drawable *drawable;
341 struct qxl_rect rect;
342 struct qxl_release *release;
343 int ret;
344
345 rect.left = dx;
346 rect.top = dy;
347 rect.right = dx + width;
348 rect.bottom = dy + height;
349 ret = make_drawable(qdev, 0, QXL_COPY_BITS, &rect, &release);
350 if (ret)
351 return;
352
353 drawable = (struct qxl_drawable *)qxl_release_map(qdev, release);
354 drawable->u.copy_bits.src_pos.x = sx;
355 drawable->u.copy_bits.src_pos.y = sy;
356
357 qxl_release_unmap(qdev, release, &drawable->release_info);
358 qxl_fence_releaseable(qdev, release);
359 qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
360 qxl_release_unreserve(qdev, release);
361}
362
363void qxl_draw_fill(struct qxl_draw_fill *qxl_draw_fill_rec)
364{
365 struct qxl_device *qdev = qxl_draw_fill_rec->qdev;
366 struct qxl_rect rect = qxl_draw_fill_rec->rect;
367 uint32_t color = qxl_draw_fill_rec->color;
368 uint16_t rop = qxl_draw_fill_rec->rop;
369 struct qxl_drawable *drawable;
370 struct qxl_release *release;
371 int ret;
372
373 ret = make_drawable(qdev, 0, QXL_DRAW_FILL, &rect, &release);
374 if (ret)
375 return;
376
377 drawable = (struct qxl_drawable *)qxl_release_map(qdev, release);
378 drawable->u.fill.brush.type = SPICE_BRUSH_TYPE_SOLID;
379 drawable->u.fill.brush.u.color = color;
380 drawable->u.fill.rop_descriptor = rop;
381 drawable->u.fill.mask.flags = 0;
382 drawable->u.fill.mask.pos.x = 0;
383 drawable->u.fill.mask.pos.y = 0;
384 drawable->u.fill.mask.bitmap = 0;
385
386 qxl_release_unmap(qdev, release, &drawable->release_info);
387 qxl_fence_releaseable(qdev, release);
388 qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
389 qxl_release_unreserve(qdev, release);
390}
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
new file mode 100644
index 000000000000..aa291d8a98a2
--- /dev/null
+++ b/drivers/gpu/drm/qxl/qxl_drv.c
@@ -0,0 +1,145 @@
1/* vim: set ts=8 sw=8 tw=78 ai noexpandtab */
2/* qxl_drv.c -- QXL driver -*- linux-c -*-
3 *
4 * Copyright 2011 Red Hat, Inc.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the next
15 * paragraph) shall be included in all copies or substantial portions of the
16 * Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
22 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
23 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
24 * OTHER DEALINGS IN THE SOFTWARE.
25 *
26 * Authors:
27 * Dave Airlie <airlie@redhat.com>
28 * Alon Levy <alevy@redhat.com>
29 */
30
31#include <linux/module.h>
32#include <linux/console.h>
33
34#include "drmP.h"
35#include "drm/drm.h"
36
37#include "qxl_drv.h"
38
39extern int qxl_max_ioctls;
40static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
41 { 0x1b36, 0x100, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8,
42 0xffff00, 0 },
43 { 0x1b36, 0x100, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_OTHER << 8,
44 0xffff00, 0 },
45 { 0, 0, 0 },
46};
47MODULE_DEVICE_TABLE(pci, pciidlist);
48
49static int qxl_modeset = -1;
50
51MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
52module_param_named(modeset, qxl_modeset, int, 0400);
53
54static struct drm_driver qxl_driver;
55static struct pci_driver qxl_pci_driver;
56
57static int
58qxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
59{
60 if (pdev->revision < 4) {
61 DRM_ERROR("qxl too old, doesn't support client_monitors_config,"
62 " use xf86-video-qxl in user mode");
63 return -EINVAL; /* TODO: ENODEV ? */
64 }
65 return drm_get_pci_dev(pdev, ent, &qxl_driver);
66}
67
68static void
69qxl_pci_remove(struct pci_dev *pdev)
70{
71 struct drm_device *dev = pci_get_drvdata(pdev);
72
73 drm_put_dev(dev);
74}
75
76static struct pci_driver qxl_pci_driver = {
77 .name = DRIVER_NAME,
78 .id_table = pciidlist,
79 .probe = qxl_pci_probe,
80 .remove = qxl_pci_remove,
81};
82
83static const struct file_operations qxl_fops = {
84 .owner = THIS_MODULE,
85 .open = drm_open,
86 .release = drm_release,
87 .unlocked_ioctl = drm_ioctl,
88 .poll = drm_poll,
89 .fasync = drm_fasync,
90 .mmap = qxl_mmap,
91};
92
93static struct drm_driver qxl_driver = {
94 .driver_features = DRIVER_GEM | DRIVER_MODESET |
95 DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
96 .dev_priv_size = 0,
97 .load = qxl_driver_load,
98 .unload = qxl_driver_unload,
99
100 .dumb_create = qxl_mode_dumb_create,
101 .dumb_map_offset = qxl_mode_dumb_mmap,
102 .dumb_destroy = qxl_mode_dumb_destroy,
103#if defined(CONFIG_DEBUG_FS)
104 .debugfs_init = qxl_debugfs_init,
105 .debugfs_cleanup = qxl_debugfs_takedown,
106#endif
107 .gem_init_object = qxl_gem_object_init,
108 .gem_free_object = qxl_gem_object_free,
109 .gem_open_object = qxl_gem_object_open,
110 .gem_close_object = qxl_gem_object_close,
111 .fops = &qxl_fops,
112 .ioctls = qxl_ioctls,
113 .irq_handler = qxl_irq_handler,
114 .name = DRIVER_NAME,
115 .desc = DRIVER_DESC,
116 .date = DRIVER_DATE,
117 .major = 0,
118 .minor = 1,
119 .patchlevel = 0,
120};
121
122static int __init qxl_init(void)
123{
124#ifdef CONFIG_VGA_CONSOLE
125 if (vgacon_text_force() && qxl_modeset == -1)
126 return -EINVAL;
127#endif
128
129 if (qxl_modeset == 0)
130 return -EINVAL;
131 qxl_driver.num_ioctls = qxl_max_ioctls;
132 return drm_pci_init(&qxl_driver, &qxl_pci_driver);
133}
134
135static void __exit qxl_exit(void)
136{
137 drm_pci_exit(&qxl_driver, &qxl_pci_driver);
138}
139
140module_init(qxl_init);
141module_exit(qxl_exit);
142
143MODULE_AUTHOR(DRIVER_AUTHOR);
144MODULE_DESCRIPTION(DRIVER_DESC);
145MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
new file mode 100644
index 000000000000..52b582c211da
--- /dev/null
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -0,0 +1,566 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Dave Airlie
23 * Alon Levy
24 */
25
26
27#ifndef QXL_DRV_H
28#define QXL_DRV_H
29
30/*
31 * Definitions taken from spice-protocol, plus kernel driver specific bits.
32 */
33
34#include <linux/workqueue.h>
35#include <linux/firmware.h>
36#include <linux/platform_device.h>
37
38#include "drmP.h"
39#include "drm_crtc.h"
40#include <ttm/ttm_bo_api.h>
41#include <ttm/ttm_bo_driver.h>
42#include <ttm/ttm_placement.h>
43#include <ttm/ttm_module.h>
44
45#include <drm/qxl_drm.h>
46#include "qxl_dev.h"
47
48#define DRIVER_AUTHOR "Dave Airlie"
49
50#define DRIVER_NAME "qxl"
51#define DRIVER_DESC "RH QXL"
52#define DRIVER_DATE "20120117"
53
54#define DRIVER_MAJOR 0
55#define DRIVER_MINOR 1
56#define DRIVER_PATCHLEVEL 0
57
58#define QXL_NUM_OUTPUTS 1
59
60#define QXL_DEBUGFS_MAX_COMPONENTS 32
61
62extern int qxl_log_level;
63
64enum {
65 QXL_INFO_LEVEL = 1,
66 QXL_DEBUG_LEVEL = 2,
67};
68
69#define QXL_INFO(qdev, fmt, ...) do { \
70 if (qxl_log_level >= QXL_INFO_LEVEL) { \
71 qxl_io_log(qdev, fmt, __VA_ARGS__); \
72 } \
73 } while (0)
74#define QXL_DEBUG(qdev, fmt, ...) do { \
75 if (qxl_log_level >= QXL_DEBUG_LEVEL) { \
76 qxl_io_log(qdev, fmt, __VA_ARGS__); \
77 } \
78 } while (0)
79#define QXL_INFO_ONCE(qdev, fmt, ...) do { \
80 static int done; \
81 if (!done) { \
82 done = 1; \
83 QXL_INFO(qdev, fmt, __VA_ARGS__); \
84 } \
85 } while (0)
86
87#define DRM_FILE_OFFSET 0x100000000ULL
88#define DRM_FILE_PAGE_OFFSET (DRM_FILE_OFFSET >> PAGE_SHIFT)
89
90#define QXL_INTERRUPT_MASK (\
91 QXL_INTERRUPT_DISPLAY |\
92 QXL_INTERRUPT_CURSOR |\
93 QXL_INTERRUPT_IO_CMD |\
94 QXL_INTERRUPT_CLIENT_MONITORS_CONFIG)
95
96struct qxl_fence {
97 struct qxl_device *qdev;
98 uint32_t num_active_releases;
99 uint32_t *release_ids;
100 struct radix_tree_root tree;
101};
102
103struct qxl_bo {
104 /* Protected by gem.mutex */
105 struct list_head list;
106 /* Protected by tbo.reserved */
107 u32 placements[3];
108 struct ttm_placement placement;
109 struct ttm_buffer_object tbo;
110 struct ttm_bo_kmap_obj kmap;
111 unsigned pin_count;
112 void *kptr;
113 int type;
114 /* Constant after initialization */
115 struct drm_gem_object gem_base;
116 bool is_primary; /* is this now a primary surface */
117 bool hw_surf_alloc;
118 struct qxl_surface surf;
119 uint32_t surface_id;
120 struct qxl_fence fence; /* per bo fence - list of releases */
121 struct qxl_release *surf_create;
122 atomic_t reserve_count;
123};
124#define gem_to_qxl_bo(gobj) container_of((gobj), struct qxl_bo, gem_base)
125
126struct qxl_gem {
127 struct mutex mutex;
128 struct list_head objects;
129};
130
131struct qxl_bo_list {
132 struct list_head lhead;
133 struct qxl_bo *bo;
134};
135
136struct qxl_reloc_list {
137 struct list_head bos;
138};
139
140struct qxl_crtc {
141 struct drm_crtc base;
142 int cur_x;
143 int cur_y;
144};
145
146struct qxl_output {
147 int index;
148 struct drm_connector base;
149 struct drm_encoder enc;
150};
151
152struct qxl_framebuffer {
153 struct drm_framebuffer base;
154 struct drm_gem_object *obj;
155};
156
157#define to_qxl_crtc(x) container_of(x, struct qxl_crtc, base)
158#define drm_connector_to_qxl_output(x) container_of(x, struct qxl_output, base)
159#define drm_encoder_to_qxl_output(x) container_of(x, struct qxl_output, base)
160#define to_qxl_framebuffer(x) container_of(x, struct qxl_framebuffer, base)
161
162struct qxl_mman {
163 struct ttm_bo_global_ref bo_global_ref;
164 struct drm_global_reference mem_global_ref;
165 bool mem_global_referenced;
166 struct ttm_bo_device bdev;
167};
168
169struct qxl_mode_info {
170 int num_modes;
171 struct qxl_mode *modes;
172 bool mode_config_initialized;
173
174 /* pointer to fbdev info structure */
175 struct qxl_fbdev *qfbdev;
176};
177
178
179struct qxl_memslot {
180 uint8_t generation;
181 uint64_t start_phys_addr;
182 uint64_t end_phys_addr;
183 uint64_t high_bits;
184};
185
186enum {
187 QXL_RELEASE_DRAWABLE,
188 QXL_RELEASE_SURFACE_CMD,
189 QXL_RELEASE_CURSOR_CMD,
190};
191
192/* drm_ prefix to differentiate from qxl_release_info in
193 * spice-protocol/qxl_dev.h */
194#define QXL_MAX_RES 96
195struct qxl_release {
196 int id;
197 int type;
198 int bo_count;
199 uint32_t release_offset;
200 uint32_t surface_release_id;
201 struct qxl_bo *bos[QXL_MAX_RES];
202};
203
204struct qxl_fb_image {
205 struct qxl_device *qdev;
206 uint32_t pseudo_palette[16];
207 struct fb_image fb_image;
208 uint32_t visual;
209};
210
211struct qxl_draw_fill {
212 struct qxl_device *qdev;
213 struct qxl_rect rect;
214 uint32_t color;
215 uint16_t rop;
216};
217
218/*
219 * Debugfs
220 */
221struct qxl_debugfs {
222 struct drm_info_list *files;
223 unsigned num_files;
224};
225
226int qxl_debugfs_add_files(struct qxl_device *rdev,
227 struct drm_info_list *files,
228 unsigned nfiles);
229int qxl_debugfs_fence_init(struct qxl_device *rdev);
230void qxl_debugfs_remove_files(struct qxl_device *qdev);
231
232struct qxl_device;
233
234struct qxl_device {
235 struct device *dev;
236 struct drm_device *ddev;
237 struct pci_dev *pdev;
238 unsigned long flags;
239
240 resource_size_t vram_base, vram_size;
241 resource_size_t surfaceram_base, surfaceram_size;
242 resource_size_t rom_base, rom_size;
243 struct qxl_rom *rom;
244
245 struct qxl_mode *modes;
246 struct qxl_bo *monitors_config_bo;
247 struct qxl_monitors_config *monitors_config;
248
249 /* last received client_monitors_config */
250 struct qxl_monitors_config *client_monitors_config;
251
252 int io_base;
253 void *ram;
254 struct qxl_mman mman;
255 struct qxl_gem gem;
256 struct qxl_mode_info mode_info;
257
258 /*
259 * last created framebuffer with fb_create
260 * only used by debugfs dumbppm
261 */
262 struct qxl_framebuffer *active_user_framebuffer;
263
264 struct fb_info *fbdev_info;
265 struct qxl_framebuffer *fbdev_qfb;
266 void *ram_physical;
267
268 struct qxl_ring *release_ring;
269 struct qxl_ring *command_ring;
270 struct qxl_ring *cursor_ring;
271
272 struct qxl_ram_header *ram_header;
273 bool mode_set;
274
275 bool primary_created;
276
277 struct qxl_memslot *mem_slots;
278 uint8_t n_mem_slots;
279
280 uint8_t main_mem_slot;
281 uint8_t surfaces_mem_slot;
282 uint8_t slot_id_bits;
283 uint8_t slot_gen_bits;
284 uint64_t va_slot_mask;
285
286 struct idr release_idr;
287 spinlock_t release_idr_lock;
288 struct mutex async_io_mutex;
289 unsigned int last_sent_io_cmd;
290
291 /* interrupt handling */
292 atomic_t irq_received;
293 atomic_t irq_received_display;
294 atomic_t irq_received_cursor;
295 atomic_t irq_received_io_cmd;
296 unsigned irq_received_error;
297 wait_queue_head_t display_event;
298 wait_queue_head_t cursor_event;
299 wait_queue_head_t io_cmd_event;
300 struct work_struct client_monitors_config_work;
301
302 /* debugfs */
303 struct qxl_debugfs debugfs[QXL_DEBUGFS_MAX_COMPONENTS];
304 unsigned debugfs_count;
305
306 struct mutex update_area_mutex;
307
308 struct idr surf_id_idr;
309 spinlock_t surf_id_idr_lock;
310 int last_alloced_surf_id;
311
312 struct mutex surf_evict_mutex;
313 struct io_mapping *vram_mapping;
314 struct io_mapping *surface_mapping;
315
316 /* */
317 struct mutex release_mutex;
318 struct qxl_bo *current_release_bo[3];
319 int current_release_bo_offset[3];
320
321 struct workqueue_struct *gc_queue;
322 struct work_struct gc_work;
323
324};
325
326/* forward declaration for QXL_INFO_IO */
327void qxl_io_log(struct qxl_device *qdev, const char *fmt, ...);
328
329extern struct drm_ioctl_desc qxl_ioctls[];
330extern int qxl_max_ioctl;
331
332int qxl_driver_load(struct drm_device *dev, unsigned long flags);
333int qxl_driver_unload(struct drm_device *dev);
334
335int qxl_modeset_init(struct qxl_device *qdev);
336void qxl_modeset_fini(struct qxl_device *qdev);
337
338int qxl_bo_init(struct qxl_device *qdev);
339void qxl_bo_fini(struct qxl_device *qdev);
340
341struct qxl_ring *qxl_ring_create(struct qxl_ring_header *header,
342 int element_size,
343 int n_elements,
344 int prod_notify,
345 bool set_prod_notify,
346 wait_queue_head_t *push_event);
347void qxl_ring_free(struct qxl_ring *ring);
348
349static inline void *
350qxl_fb_virtual_address(struct qxl_device *qdev, unsigned long physical)
351{
352 QXL_INFO(qdev, "not implemented (%lu)\n", physical);
353 return 0;
354}
355
356static inline uint64_t
357qxl_bo_physical_address(struct qxl_device *qdev, struct qxl_bo *bo,
358 unsigned long offset)
359{
360 int slot_id = bo->type == QXL_GEM_DOMAIN_VRAM ? qdev->main_mem_slot : qdev->surfaces_mem_slot;
361 struct qxl_memslot *slot = &(qdev->mem_slots[slot_id]);
362
363 /* TODO - need to hold one of the locks to read tbo.offset */
364 return slot->high_bits | (bo->tbo.offset + offset);
365}
366
367/* qxl_fb.c */
368#define QXLFB_CONN_LIMIT 1
369
370int qxl_fbdev_init(struct qxl_device *qdev);
371void qxl_fbdev_fini(struct qxl_device *qdev);
372int qxl_get_handle_for_primary_fb(struct qxl_device *qdev,
373 struct drm_file *file_priv,
374 uint32_t *handle);
375
376/* qxl_display.c */
377int
378qxl_framebuffer_init(struct drm_device *dev,
379 struct qxl_framebuffer *rfb,
380 struct drm_mode_fb_cmd2 *mode_cmd,
381 struct drm_gem_object *obj);
382void qxl_display_read_client_monitors_config(struct qxl_device *qdev);
383void qxl_send_monitors_config(struct qxl_device *qdev);
384
385/* used by qxl_debugfs only */
386void qxl_crtc_set_from_monitors_config(struct qxl_device *qdev);
387void qxl_alloc_client_monitors_config(struct qxl_device *qdev, unsigned count);
388
389/* qxl_gem.c */
390int qxl_gem_init(struct qxl_device *qdev);
391void qxl_gem_fini(struct qxl_device *qdev);
392int qxl_gem_object_create(struct qxl_device *qdev, int size,
393 int alignment, int initial_domain,
394 bool discardable, bool kernel,
395 struct qxl_surface *surf,
396 struct drm_gem_object **obj);
397int qxl_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain,
398 uint64_t *gpu_addr);
399void qxl_gem_object_unpin(struct drm_gem_object *obj);
400int qxl_gem_object_create_with_handle(struct qxl_device *qdev,
401 struct drm_file *file_priv,
402 u32 domain,
403 size_t size,
404 struct qxl_surface *surf,
405 struct qxl_bo **qobj,
406 uint32_t *handle);
407int qxl_gem_object_init(struct drm_gem_object *obj);
408void qxl_gem_object_free(struct drm_gem_object *gobj);
409int qxl_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv);
410void qxl_gem_object_close(struct drm_gem_object *obj,
411 struct drm_file *file_priv);
412void qxl_bo_force_delete(struct qxl_device *qdev);
413int qxl_bo_kmap(struct qxl_bo *bo, void **ptr);
414
415/* qxl_dumb.c */
416int qxl_mode_dumb_create(struct drm_file *file_priv,
417 struct drm_device *dev,
418 struct drm_mode_create_dumb *args);
419int qxl_mode_dumb_destroy(struct drm_file *file_priv,
420 struct drm_device *dev,
421 uint32_t handle);
422int qxl_mode_dumb_mmap(struct drm_file *filp,
423 struct drm_device *dev,
424 uint32_t handle, uint64_t *offset_p);
425
426
427/* qxl ttm */
428int qxl_ttm_init(struct qxl_device *qdev);
429void qxl_ttm_fini(struct qxl_device *qdev);
430int qxl_mmap(struct file *filp, struct vm_area_struct *vma);
431
432/* qxl image */
433
434int qxl_image_create(struct qxl_device *qdev,
435 struct qxl_release *release,
436 struct qxl_bo **image_bo,
437 const uint8_t *data,
438 int x, int y, int width, int height,
439 int depth, int stride);
440void qxl_update_screen(struct qxl_device *qxl);
441
442/* qxl io operations (qxl_cmd.c) */
443
444void qxl_io_create_primary(struct qxl_device *qdev,
445 unsigned width, unsigned height, unsigned offset,
446 struct qxl_bo *bo);
447void qxl_io_destroy_primary(struct qxl_device *qdev);
448void qxl_io_memslot_add(struct qxl_device *qdev, uint8_t id);
449void qxl_io_notify_oom(struct qxl_device *qdev);
450
451int qxl_io_update_area(struct qxl_device *qdev, struct qxl_bo *surf,
452 const struct qxl_rect *area);
453
454void qxl_io_reset(struct qxl_device *qdev);
455void qxl_io_monitors_config(struct qxl_device *qdev);
456int qxl_ring_push(struct qxl_ring *ring, const void *new_elt, bool interruptible);
457void qxl_io_flush_release(struct qxl_device *qdev);
458void qxl_io_flush_surfaces(struct qxl_device *qdev);
459
460int qxl_release_reserve(struct qxl_device *qdev,
461 struct qxl_release *release, bool no_wait);
462void qxl_release_unreserve(struct qxl_device *qdev,
463 struct qxl_release *release);
464union qxl_release_info *qxl_release_map(struct qxl_device *qdev,
465 struct qxl_release *release);
466void qxl_release_unmap(struct qxl_device *qdev,
467 struct qxl_release *release,
468 union qxl_release_info *info);
469/*
470 * qxl_bo_add_resource.
471 *
472 */
473void qxl_bo_add_resource(struct qxl_bo *main_bo, struct qxl_bo *resource);
474
475int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
476 enum qxl_surface_cmd_type surface_cmd_type,
477 struct qxl_release *create_rel,
478 struct qxl_release **release);
479int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
480 int type, struct qxl_release **release,
481 struct qxl_bo **rbo);
482int qxl_fence_releaseable(struct qxl_device *qdev,
483 struct qxl_release *release);
484int
485qxl_push_command_ring_release(struct qxl_device *qdev, struct qxl_release *release,
486 uint32_t type, bool interruptible);
487int
488qxl_push_cursor_ring_release(struct qxl_device *qdev, struct qxl_release *release,
489 uint32_t type, bool interruptible);
490int qxl_alloc_bo_reserved(struct qxl_device *qdev, unsigned long size,
491 struct qxl_bo **_bo);
492/* qxl drawing commands */
493
494void qxl_draw_opaque_fb(const struct qxl_fb_image *qxl_fb_image,
495 int stride /* filled in if 0 */);
496
497void qxl_draw_dirty_fb(struct qxl_device *qdev,
498 struct qxl_framebuffer *qxl_fb,
499 struct qxl_bo *bo,
500 unsigned flags, unsigned color,
501 struct drm_clip_rect *clips,
502 unsigned num_clips, int inc);
503
504void qxl_draw_fill(struct qxl_draw_fill *qxl_draw_fill_rec);
505
506void qxl_draw_copyarea(struct qxl_device *qdev,
507 u32 width, u32 height,
508 u32 sx, u32 sy,
509 u32 dx, u32 dy);
510
511uint64_t
512qxl_release_alloc(struct qxl_device *qdev, int type,
513 struct qxl_release **ret);
514
515void qxl_release_free(struct qxl_device *qdev,
516 struct qxl_release *release);
517void qxl_release_add_res(struct qxl_device *qdev,
518 struct qxl_release *release,
519 struct qxl_bo *bo);
520/* used by qxl_debugfs_release */
521struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev,
522 uint64_t id);
523
524bool qxl_queue_garbage_collect(struct qxl_device *qdev, bool flush);
525int qxl_garbage_collect(struct qxl_device *qdev);
526
527/* debugfs */
528
529int qxl_debugfs_init(struct drm_minor *minor);
530void qxl_debugfs_takedown(struct drm_minor *minor);
531
532/* qxl_irq.c */
533int qxl_irq_init(struct qxl_device *qdev);
534irqreturn_t qxl_irq_handler(DRM_IRQ_ARGS);
535
536/* qxl_fb.c */
537int qxl_fb_init(struct qxl_device *qdev);
538
539int qxl_debugfs_add_files(struct qxl_device *qdev,
540 struct drm_info_list *files,
541 unsigned nfiles);
542
543int qxl_surface_id_alloc(struct qxl_device *qdev,
544 struct qxl_bo *surf);
545void qxl_surface_id_dealloc(struct qxl_device *qdev,
546 uint32_t surface_id);
547int qxl_hw_surface_alloc(struct qxl_device *qdev,
548 struct qxl_bo *surf,
549 struct ttm_mem_reg *mem);
550int qxl_hw_surface_dealloc(struct qxl_device *qdev,
551 struct qxl_bo *surf);
552
553int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo);
554
555struct qxl_drv_surface *
556qxl_surface_lookup(struct drm_device *dev, int surface_id);
557void qxl_surface_evict(struct qxl_device *qdev, struct qxl_bo *surf, bool freeing);
558int qxl_update_surface(struct qxl_device *qdev, struct qxl_bo *surf);
559
560/* qxl_fence.c */
561int qxl_fence_add_release(struct qxl_fence *qfence, uint32_t rel_id);
562int qxl_fence_remove_release(struct qxl_fence *qfence, uint32_t rel_id);
563int qxl_fence_init(struct qxl_device *qdev, struct qxl_fence *qfence);
564void qxl_fence_fini(struct qxl_fence *qfence);
565
566#endif
diff --git a/drivers/gpu/drm/qxl/qxl_dumb.c b/drivers/gpu/drm/qxl/qxl_dumb.c
new file mode 100644
index 000000000000..847c4ee798f7
--- /dev/null
+++ b/drivers/gpu/drm/qxl/qxl_dumb.c
@@ -0,0 +1,93 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Dave Airlie
23 * Alon Levy
24 */
25
26#include "qxl_drv.h"
27#include "qxl_object.h"
28
29/* dumb ioctls implementation */
30
31int qxl_mode_dumb_create(struct drm_file *file_priv,
32 struct drm_device *dev,
33 struct drm_mode_create_dumb *args)
34{
35 struct qxl_device *qdev = dev->dev_private;
36 struct qxl_bo *qobj;
37 uint32_t handle;
38 int r;
39 struct qxl_surface surf;
40 uint32_t pitch, format;
41 pitch = args->width * ((args->bpp + 1) / 8);
42 args->size = pitch * args->height;
43 args->size = ALIGN(args->size, PAGE_SIZE);
44
45 switch (args->bpp) {
46 case 16:
47 format = SPICE_SURFACE_FMT_16_565;
48 break;
49 case 32:
50 format = SPICE_SURFACE_FMT_32_xRGB;
51 break;
52 default:
53 return -EINVAL;
54 }
55
56 surf.width = args->width;
57 surf.height = args->height;
58 surf.stride = pitch;
59 surf.format = format;
60 r = qxl_gem_object_create_with_handle(qdev, file_priv,
61 QXL_GEM_DOMAIN_VRAM,
62 args->size, &surf, &qobj,
63 &handle);
64 if (r)
65 return r;
66 args->pitch = pitch;
67 args->handle = handle;
68 return 0;
69}
70
71int qxl_mode_dumb_destroy(struct drm_file *file_priv,
72 struct drm_device *dev,
73 uint32_t handle)
74{
75 return drm_gem_handle_delete(file_priv, handle);
76}
77
78int qxl_mode_dumb_mmap(struct drm_file *file_priv,
79 struct drm_device *dev,
80 uint32_t handle, uint64_t *offset_p)
81{
82 struct drm_gem_object *gobj;
83 struct qxl_bo *qobj;
84
85 BUG_ON(!offset_p);
86 gobj = drm_gem_object_lookup(dev, file_priv, handle);
87 if (gobj == NULL)
88 return -ENOENT;
89 qobj = gem_to_qxl_bo(gobj);
90 *offset_p = qxl_bo_mmap_offset(qobj);
91 drm_gem_object_unreference_unlocked(gobj);
92 return 0;
93}
diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c
new file mode 100644
index 000000000000..b3c51275df5c
--- /dev/null
+++ b/drivers/gpu/drm/qxl/qxl_fb.c
@@ -0,0 +1,567 @@
1/*
2 * Copyright © 2013 Red Hat
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * David Airlie
25 */
26#include <linux/module.h>
27#include <linux/fb.h>
28
29#include "drmP.h"
30#include "drm/drm.h"
31#include "drm/drm_crtc.h"
32#include "drm/drm_crtc_helper.h"
33#include "qxl_drv.h"
34
35#include "qxl_object.h"
36#include "drm_fb_helper.h"
37
38#define QXL_DIRTY_DELAY (HZ / 30)
39
40struct qxl_fbdev {
41 struct drm_fb_helper helper;
42 struct qxl_framebuffer qfb;
43 struct list_head fbdev_list;
44 struct qxl_device *qdev;
45
46 void *shadow;
47 int size;
48
49 /* dirty memory logging */
50 struct {
51 spinlock_t lock;
52 bool active;
53 unsigned x1;
54 unsigned y1;
55 unsigned x2;
56 unsigned y2;
57 } dirty;
58};
59
60static void qxl_fb_image_init(struct qxl_fb_image *qxl_fb_image,
61 struct qxl_device *qdev, struct fb_info *info,
62 const struct fb_image *image)
63{
64 qxl_fb_image->qdev = qdev;
65 if (info) {
66 qxl_fb_image->visual = info->fix.visual;
67 if (qxl_fb_image->visual == FB_VISUAL_TRUECOLOR ||
68 qxl_fb_image->visual == FB_VISUAL_DIRECTCOLOR)
69 memcpy(&qxl_fb_image->pseudo_palette,
70 info->pseudo_palette,
71 sizeof(qxl_fb_image->pseudo_palette));
72 } else {
73 /* fallback */
74 if (image->depth == 1)
75 qxl_fb_image->visual = FB_VISUAL_MONO10;
76 else
77 qxl_fb_image->visual = FB_VISUAL_DIRECTCOLOR;
78 }
79 if (image) {
80 memcpy(&qxl_fb_image->fb_image, image,
81 sizeof(qxl_fb_image->fb_image));
82 }
83}
84
85static void qxl_fb_dirty_flush(struct fb_info *info)
86{
87 struct qxl_fbdev *qfbdev = info->par;
88 struct qxl_device *qdev = qfbdev->qdev;
89 struct qxl_fb_image qxl_fb_image;
90 struct fb_image *image = &qxl_fb_image.fb_image;
91 u32 x1, x2, y1, y2;
92
93 /* TODO: hard coding 32 bpp */
94 int stride = qfbdev->qfb.base.pitches[0] * 4;
95
96 x1 = qfbdev->dirty.x1;
97 x2 = qfbdev->dirty.x2;
98 y1 = qfbdev->dirty.y1;
99 y2 = qfbdev->dirty.y2;
100 /*
101 * we are using a shadow draw buffer, at qdev->surface0_shadow
102 */
103 qxl_io_log(qdev, "dirty x[%d, %d], y[%d, %d]", x1, x2, y1, y2);
104 image->dx = x1;
105 image->dy = y1;
106 image->width = x2 - x1;
107 image->height = y2 - y1;
108 image->fg_color = 0xffffffff; /* unused, just to avoid uninitialized
109 warnings */
110 image->bg_color = 0;
111 image->depth = 32; /* TODO: take from somewhere? */
112 image->cmap.start = 0;
113 image->cmap.len = 0;
114 image->cmap.red = NULL;
115 image->cmap.green = NULL;
116 image->cmap.blue = NULL;
117 image->cmap.transp = NULL;
118 image->data = qfbdev->shadow + (x1 * 4) + (stride * y1);
119
120 qxl_fb_image_init(&qxl_fb_image, qdev, info, NULL);
121 qxl_draw_opaque_fb(&qxl_fb_image, stride);
122 qfbdev->dirty.x1 = 0;
123 qfbdev->dirty.x2 = 0;
124 qfbdev->dirty.y1 = 0;
125 qfbdev->dirty.y2 = 0;
126}
127
128static void qxl_deferred_io(struct fb_info *info,
129 struct list_head *pagelist)
130{
131 struct qxl_fbdev *qfbdev = info->par;
132 unsigned long start, end, min, max;
133 struct page *page;
134 int y1, y2;
135
136 min = ULONG_MAX;
137 max = 0;
138 list_for_each_entry(page, pagelist, lru) {
139 start = page->index << PAGE_SHIFT;
140 end = start + PAGE_SIZE - 1;
141 min = min(min, start);
142 max = max(max, end);
143 }
144
145 if (min < max) {
146 y1 = min / info->fix.line_length;
147 y2 = (max / info->fix.line_length) + 1;
148
149 /* TODO: add spin lock? */
150 /* spin_lock_irqsave(&qfbdev->dirty.lock, flags); */
151 qfbdev->dirty.x1 = 0;
152 qfbdev->dirty.y1 = y1;
153 qfbdev->dirty.x2 = info->var.xres;
154 qfbdev->dirty.y2 = y2;
155 /* spin_unlock_irqrestore(&qfbdev->dirty.lock, flags); */
156 }
157
158 qxl_fb_dirty_flush(info);
159};
160
161
162static struct fb_deferred_io qxl_defio = {
163 .delay = QXL_DIRTY_DELAY,
164 .deferred_io = qxl_deferred_io,
165};
166
167static void qxl_fb_fillrect(struct fb_info *info,
168 const struct fb_fillrect *fb_rect)
169{
170 struct qxl_fbdev *qfbdev = info->par;
171 struct qxl_device *qdev = qfbdev->qdev;
172 struct qxl_rect rect;
173 uint32_t color;
174 int x = fb_rect->dx;
175 int y = fb_rect->dy;
176 int width = fb_rect->width;
177 int height = fb_rect->height;
178 uint16_t rop;
179 struct qxl_draw_fill qxl_draw_fill_rec;
180
181 if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
182 info->fix.visual == FB_VISUAL_DIRECTCOLOR)
183 color = ((u32 *) (info->pseudo_palette))[fb_rect->color];
184 else
185 color = fb_rect->color;
186 rect.left = x;
187 rect.right = x + width;
188 rect.top = y;
189 rect.bottom = y + height;
190 switch (fb_rect->rop) {
191 case ROP_XOR:
192 rop = SPICE_ROPD_OP_XOR;
193 break;
194 case ROP_COPY:
195 rop = SPICE_ROPD_OP_PUT;
196 break;
197 default:
198 pr_err("qxl_fb_fillrect(): unknown rop, "
199 "defaulting to SPICE_ROPD_OP_PUT\n");
200 rop = SPICE_ROPD_OP_PUT;
201 }
202 qxl_draw_fill_rec.qdev = qdev;
203 qxl_draw_fill_rec.rect = rect;
204 qxl_draw_fill_rec.color = color;
205 qxl_draw_fill_rec.rop = rop;
206 if (!drm_can_sleep()) {
207 qxl_io_log(qdev,
208 "%s: TODO use RCU, mysterious locks with spin_lock\n",
209 __func__);
210 return;
211 }
212 qxl_draw_fill(&qxl_draw_fill_rec);
213}
214
215static void qxl_fb_copyarea(struct fb_info *info,
216 const struct fb_copyarea *region)
217{
218 struct qxl_fbdev *qfbdev = info->par;
219
220 qxl_draw_copyarea(qfbdev->qdev,
221 region->width, region->height,
222 region->sx, region->sy,
223 region->dx, region->dy);
224}
225
226static void qxl_fb_imageblit_safe(struct qxl_fb_image *qxl_fb_image)
227{
228 qxl_draw_opaque_fb(qxl_fb_image, 0);
229}
230
231static void qxl_fb_imageblit(struct fb_info *info,
232 const struct fb_image *image)
233{
234 struct qxl_fbdev *qfbdev = info->par;
235 struct qxl_device *qdev = qfbdev->qdev;
236 struct qxl_fb_image qxl_fb_image;
237
238 if (!drm_can_sleep()) {
239 /* we cannot do any ttm_bo allocation since that will fail on
240 * ioremap_wc..__get_vm_area_node, so queue the work item
241 * instead This can happen from printk inside an interrupt
242 * context, i.e.: smp_apic_timer_interrupt..check_cpu_stall */
243 qxl_io_log(qdev,
244 "%s: TODO use RCU, mysterious locks with spin_lock\n",
245 __func__);
246 return;
247 }
248
249 /* ensure proper order of rendering operations - TODO: must do this
250 * for everything. */
251 qxl_fb_image_init(&qxl_fb_image, qfbdev->qdev, info, image);
252 qxl_fb_imageblit_safe(&qxl_fb_image);
253}
254
255int qxl_fb_init(struct qxl_device *qdev)
256{
257 return 0;
258}
259
260static struct fb_ops qxlfb_ops = {
261 .owner = THIS_MODULE,
262 .fb_check_var = drm_fb_helper_check_var,
263 .fb_set_par = drm_fb_helper_set_par, /* TODO: copy vmwgfx */
264 .fb_fillrect = qxl_fb_fillrect,
265 .fb_copyarea = qxl_fb_copyarea,
266 .fb_imageblit = qxl_fb_imageblit,
267 .fb_pan_display = drm_fb_helper_pan_display,
268 .fb_blank = drm_fb_helper_blank,
269 .fb_setcmap = drm_fb_helper_setcmap,
270 .fb_debug_enter = drm_fb_helper_debug_enter,
271 .fb_debug_leave = drm_fb_helper_debug_leave,
272};
273
274static void qxlfb_destroy_pinned_object(struct drm_gem_object *gobj)
275{
276 struct qxl_bo *qbo = gem_to_qxl_bo(gobj);
277 int ret;
278
279 ret = qxl_bo_reserve(qbo, false);
280 if (likely(ret == 0)) {
281 qxl_bo_kunmap(qbo);
282 qxl_bo_unpin(qbo);
283 qxl_bo_unreserve(qbo);
284 }
285 drm_gem_object_unreference_unlocked(gobj);
286}
287
288int qxl_get_handle_for_primary_fb(struct qxl_device *qdev,
289 struct drm_file *file_priv,
290 uint32_t *handle)
291{
292 int r;
293 struct drm_gem_object *gobj = qdev->fbdev_qfb->obj;
294
295 BUG_ON(!gobj);
296 /* drm_get_handle_create adds a reference - good */
297 r = drm_gem_handle_create(file_priv, gobj, handle);
298 if (r)
299 return r;
300 return 0;
301}
302
303static int qxlfb_create_pinned_object(struct qxl_fbdev *qfbdev,
304 struct drm_mode_fb_cmd2 *mode_cmd,
305 struct drm_gem_object **gobj_p)
306{
307 struct qxl_device *qdev = qfbdev->qdev;
308 struct drm_gem_object *gobj = NULL;
309 struct qxl_bo *qbo = NULL;
310 int ret;
311 int aligned_size, size;
312 int height = mode_cmd->height;
313 int bpp;
314 int depth;
315
316 drm_fb_get_bpp_depth(mode_cmd->pixel_format, &bpp, &depth);
317
318 size = mode_cmd->pitches[0] * height;
319 aligned_size = ALIGN(size, PAGE_SIZE);
320 /* TODO: unallocate and reallocate surface0 for real. Hack to just
321 * have a large enough surface0 for 1024x768 Xorg 32bpp mode */
322 ret = qxl_gem_object_create(qdev, aligned_size, 0,
323 QXL_GEM_DOMAIN_SURFACE,
324 false, /* is discardable */
325 false, /* is kernel (false means device) */
326 NULL,
327 &gobj);
328 if (ret) {
329 pr_err("failed to allocate framebuffer (%d)\n",
330 aligned_size);
331 return -ENOMEM;
332 }
333 qbo = gem_to_qxl_bo(gobj);
334
335 qbo->surf.width = mode_cmd->width;
336 qbo->surf.height = mode_cmd->height;
337 qbo->surf.stride = mode_cmd->pitches[0];
338 qbo->surf.format = SPICE_SURFACE_FMT_32_xRGB;
339 ret = qxl_bo_reserve(qbo, false);
340 if (unlikely(ret != 0))
341 goto out_unref;
342 ret = qxl_bo_pin(qbo, QXL_GEM_DOMAIN_SURFACE, NULL);
343 if (ret) {
344 qxl_bo_unreserve(qbo);
345 goto out_unref;
346 }
347 ret = qxl_bo_kmap(qbo, NULL);
348 qxl_bo_unreserve(qbo); /* unreserve, will be mmaped */
349 if (ret)
350 goto out_unref;
351
352 *gobj_p = gobj;
353 return 0;
354out_unref:
355 qxlfb_destroy_pinned_object(gobj);
356 *gobj_p = NULL;
357 return ret;
358}
359
360static int qxlfb_create(struct qxl_fbdev *qfbdev,
361 struct drm_fb_helper_surface_size *sizes)
362{
363 struct qxl_device *qdev = qfbdev->qdev;
364 struct fb_info *info;
365 struct drm_framebuffer *fb = NULL;
366 struct drm_mode_fb_cmd2 mode_cmd;
367 struct drm_gem_object *gobj = NULL;
368 struct qxl_bo *qbo = NULL;
369 struct device *device = &qdev->pdev->dev;
370 int ret;
371 int size;
372 int bpp = sizes->surface_bpp;
373 int depth = sizes->surface_depth;
374 void *shadow;
375
376 mode_cmd.width = sizes->surface_width;
377 mode_cmd.height = sizes->surface_height;
378
379 mode_cmd.pitches[0] = ALIGN(mode_cmd.width * ((bpp + 1) / 8), 64);
380 mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
381
382 ret = qxlfb_create_pinned_object(qfbdev, &mode_cmd, &gobj);
383 qbo = gem_to_qxl_bo(gobj);
384 QXL_INFO(qdev, "%s: %dx%d %d\n", __func__, mode_cmd.width,
385 mode_cmd.height, mode_cmd.pitches[0]);
386
387 shadow = vmalloc(mode_cmd.pitches[0] * mode_cmd.height);
388 /* TODO: what's the usual response to memory allocation errors? */
389 BUG_ON(!shadow);
390 QXL_INFO(qdev,
391 "surface0 at gpu offset %lld, mmap_offset %lld (virt %p, shadow %p)\n",
392 qxl_bo_gpu_offset(qbo),
393 qxl_bo_mmap_offset(qbo),
394 qbo->kptr,
395 shadow);
396 size = mode_cmd.pitches[0] * mode_cmd.height;
397
398 info = framebuffer_alloc(0, device);
399 if (info == NULL) {
400 ret = -ENOMEM;
401 goto out_unref;
402 }
403
404 info->par = qfbdev;
405
406 qxl_framebuffer_init(qdev->ddev, &qfbdev->qfb, &mode_cmd, gobj);
407
408 fb = &qfbdev->qfb.base;
409
410 /* setup helper with fb data */
411 qfbdev->helper.fb = fb;
412 qfbdev->helper.fbdev = info;
413 qfbdev->shadow = shadow;
414 strcpy(info->fix.id, "qxldrmfb");
415
416 drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
417
418 info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA | FBINFO_HWACCEL_FILLRECT;
419 info->fbops = &qxlfb_ops;
420
421 /*
422 * TODO: using gobj->size in various places in this function. Not sure
423 * what the difference between the different sizes is.
424 */
425 info->fix.smem_start = qdev->vram_base; /* TODO - correct? */
426 info->fix.smem_len = gobj->size;
427 info->screen_base = qfbdev->shadow;
428 info->screen_size = gobj->size;
429
430 drm_fb_helper_fill_var(info, &qfbdev->helper, sizes->fb_width,
431 sizes->fb_height);
432
433 /* setup aperture base/size for vesafb takeover */
434 info->apertures = alloc_apertures(1);
435 if (!info->apertures) {
436 ret = -ENOMEM;
437 goto out_unref;
438 }
439 info->apertures->ranges[0].base = qdev->ddev->mode_config.fb_base;
440 info->apertures->ranges[0].size = qdev->vram_size;
441
442 info->fix.mmio_start = 0;
443 info->fix.mmio_len = 0;
444
445 if (info->screen_base == NULL) {
446 ret = -ENOSPC;
447 goto out_unref;
448 }
449
450 ret = fb_alloc_cmap(&info->cmap, 256, 0);
451 if (ret) {
452 ret = -ENOMEM;
453 goto out_unref;
454 }
455
456 info->fbdefio = &qxl_defio;
457 fb_deferred_io_init(info);
458
459 qdev->fbdev_info = info;
460 qdev->fbdev_qfb = &qfbdev->qfb;
461 DRM_INFO("fb mappable at 0x%lX, size %lu\n", info->fix.smem_start, (unsigned long)info->screen_size);
462 DRM_INFO("fb: depth %d, pitch %d, width %d, height %d\n", fb->depth, fb->pitches[0], fb->width, fb->height);
463 return 0;
464
465out_unref:
466 if (qbo) {
467 ret = qxl_bo_reserve(qbo, false);
468 if (likely(ret == 0)) {
469 qxl_bo_kunmap(qbo);
470 qxl_bo_unpin(qbo);
471 qxl_bo_unreserve(qbo);
472 }
473 }
474 if (fb && ret) {
475 drm_gem_object_unreference(gobj);
476 drm_framebuffer_cleanup(fb);
477 kfree(fb);
478 }
479 drm_gem_object_unreference(gobj);
480 return ret;
481}
482
483static int qxl_fb_find_or_create_single(
484 struct drm_fb_helper *helper,
485 struct drm_fb_helper_surface_size *sizes)
486{
487 struct qxl_fbdev *qfbdev = (struct qxl_fbdev *)helper;
488 int new_fb = 0;
489 int ret;
490
491 if (!helper->fb) {
492 ret = qxlfb_create(qfbdev, sizes);
493 if (ret)
494 return ret;
495 new_fb = 1;
496 }
497 return new_fb;
498}
499
500static int qxl_fbdev_destroy(struct drm_device *dev, struct qxl_fbdev *qfbdev)
501{
502 struct fb_info *info;
503 struct qxl_framebuffer *qfb = &qfbdev->qfb;
504
505 if (qfbdev->helper.fbdev) {
506 info = qfbdev->helper.fbdev;
507
508 unregister_framebuffer(info);
509 framebuffer_release(info);
510 }
511 if (qfb->obj) {
512 qxlfb_destroy_pinned_object(qfb->obj);
513 qfb->obj = NULL;
514 }
515 drm_fb_helper_fini(&qfbdev->helper);
516 vfree(qfbdev->shadow);
517 drm_framebuffer_cleanup(&qfb->base);
518
519 return 0;
520}
521
522static struct drm_fb_helper_funcs qxl_fb_helper_funcs = {
523 /* TODO
524 .gamma_set = qxl_crtc_fb_gamma_set,
525 .gamma_get = qxl_crtc_fb_gamma_get,
526 */
527 .fb_probe = qxl_fb_find_or_create_single,
528};
529
530int qxl_fbdev_init(struct qxl_device *qdev)
531{
532 struct qxl_fbdev *qfbdev;
533 int bpp_sel = 32; /* TODO: parameter from somewhere? */
534 int ret;
535
536 qfbdev = kzalloc(sizeof(struct qxl_fbdev), GFP_KERNEL);
537 if (!qfbdev)
538 return -ENOMEM;
539
540 qfbdev->qdev = qdev;
541 qdev->mode_info.qfbdev = qfbdev;
542 qfbdev->helper.funcs = &qxl_fb_helper_funcs;
543
544 ret = drm_fb_helper_init(qdev->ddev, &qfbdev->helper,
545 1 /* num_crtc - QXL supports just 1 */,
546 QXLFB_CONN_LIMIT);
547 if (ret) {
548 kfree(qfbdev);
549 return ret;
550 }
551
552 drm_fb_helper_single_add_all_connectors(&qfbdev->helper);
553 drm_fb_helper_initial_config(&qfbdev->helper, bpp_sel);
554 return 0;
555}
556
557void qxl_fbdev_fini(struct qxl_device *qdev)
558{
559 if (!qdev->mode_info.qfbdev)
560 return;
561
562 qxl_fbdev_destroy(qdev->ddev, qdev->mode_info.qfbdev);
563 kfree(qdev->mode_info.qfbdev);
564 qdev->mode_info.qfbdev = NULL;
565}
566
567
diff --git a/drivers/gpu/drm/qxl/qxl_fence.c b/drivers/gpu/drm/qxl/qxl_fence.c
new file mode 100644
index 000000000000..63c6715ad385
--- /dev/null
+++ b/drivers/gpu/drm/qxl/qxl_fence.c
@@ -0,0 +1,97 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Dave Airlie
23 * Alon Levy
24 */
25
26
27#include "qxl_drv.h"
28
29/* QXL fencing-
30
31 When we submit operations to the GPU we pass a release reference to the GPU
32 with them, the release reference is then added to the release ring when
33 the GPU is finished with that particular operation and has removed it from
34 its tree.
35
36 So we have can have multiple outstanding non linear fences per object.
37
38 From a TTM POV we only care if the object has any outstanding releases on
39 it.
40
41 we wait until all outstanding releases are processeed.
42
43 sync object is just a list of release ids that represent that fence on
44 that buffer.
45
46 we just add new releases onto the sync object attached to the object.
47
48 This currently uses a radix tree to store the list of release ids.
49
50 For some reason every so often qxl hw fails to release, things go wrong.
51*/
52
53
54int qxl_fence_add_release(struct qxl_fence *qfence, uint32_t rel_id)
55{
56 struct qxl_bo *bo = container_of(qfence, struct qxl_bo, fence);
57
58 spin_lock(&bo->tbo.bdev->fence_lock);
59 radix_tree_insert(&qfence->tree, rel_id, qfence);
60 qfence->num_active_releases++;
61 spin_unlock(&bo->tbo.bdev->fence_lock);
62 return 0;
63}
64
65int qxl_fence_remove_release(struct qxl_fence *qfence, uint32_t rel_id)
66{
67 void *ret;
68 int retval = 0;
69 struct qxl_bo *bo = container_of(qfence, struct qxl_bo, fence);
70
71 spin_lock(&bo->tbo.bdev->fence_lock);
72
73 ret = radix_tree_delete(&qfence->tree, rel_id);
74 if (ret == qfence)
75 qfence->num_active_releases--;
76 else {
77 DRM_DEBUG("didn't find fence in radix tree for %d\n", rel_id);
78 retval = -ENOENT;
79 }
80 spin_unlock(&bo->tbo.bdev->fence_lock);
81 return retval;
82}
83
84
85int qxl_fence_init(struct qxl_device *qdev, struct qxl_fence *qfence)
86{
87 qfence->qdev = qdev;
88 qfence->num_active_releases = 0;
89 INIT_RADIX_TREE(&qfence->tree, GFP_ATOMIC);
90 return 0;
91}
92
93void qxl_fence_fini(struct qxl_fence *qfence)
94{
95 kfree(qfence->release_ids);
96 qfence->num_active_releases = 0;
97}
diff --git a/drivers/gpu/drm/qxl/qxl_gem.c b/drivers/gpu/drm/qxl/qxl_gem.c
new file mode 100644
index 000000000000..a235693aabba
--- /dev/null
+++ b/drivers/gpu/drm/qxl/qxl_gem.c
@@ -0,0 +1,149 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Dave Airlie
23 * Alon Levy
24 */
25
26#include "drmP.h"
27#include "drm/drm.h"
28#include "qxl_drv.h"
29#include "qxl_object.h"
30
31int qxl_gem_object_init(struct drm_gem_object *obj)
32{
33 /* we do nothings here */
34 return 0;
35}
36
37void qxl_gem_object_free(struct drm_gem_object *gobj)
38{
39 struct qxl_bo *qobj = gem_to_qxl_bo(gobj);
40
41 if (qobj)
42 qxl_bo_unref(&qobj);
43}
44
45int qxl_gem_object_create(struct qxl_device *qdev, int size,
46 int alignment, int initial_domain,
47 bool discardable, bool kernel,
48 struct qxl_surface *surf,
49 struct drm_gem_object **obj)
50{
51 struct qxl_bo *qbo;
52 int r;
53
54 *obj = NULL;
55 /* At least align on page size */
56 if (alignment < PAGE_SIZE)
57 alignment = PAGE_SIZE;
58 r = qxl_bo_create(qdev, size, kernel, initial_domain, surf, &qbo);
59 if (r) {
60 if (r != -ERESTARTSYS)
61 DRM_ERROR(
62 "Failed to allocate GEM object (%d, %d, %u, %d)\n",
63 size, initial_domain, alignment, r);
64 return r;
65 }
66 *obj = &qbo->gem_base;
67
68 mutex_lock(&qdev->gem.mutex);
69 list_add_tail(&qbo->list, &qdev->gem.objects);
70 mutex_unlock(&qdev->gem.mutex);
71
72 return 0;
73}
74
75int qxl_gem_object_create_with_handle(struct qxl_device *qdev,
76 struct drm_file *file_priv,
77 u32 domain,
78 size_t size,
79 struct qxl_surface *surf,
80 struct qxl_bo **qobj,
81 uint32_t *handle)
82{
83 struct drm_gem_object *gobj;
84 int r;
85
86 BUG_ON(!qobj);
87 BUG_ON(!handle);
88
89 r = qxl_gem_object_create(qdev, size, 0,
90 domain,
91 false, false, surf,
92 &gobj);
93 if (r)
94 return -ENOMEM;
95 r = drm_gem_handle_create(file_priv, gobj, handle);
96 if (r)
97 return r;
98 /* drop reference from allocate - handle holds it now */
99 *qobj = gem_to_qxl_bo(gobj);
100 drm_gem_object_unreference_unlocked(gobj);
101 return 0;
102}
103
104int qxl_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain,
105 uint64_t *gpu_addr)
106{
107 struct qxl_bo *qobj = obj->driver_private;
108 int r;
109
110 r = qxl_bo_reserve(qobj, false);
111 if (unlikely(r != 0))
112 return r;
113 r = qxl_bo_pin(qobj, pin_domain, gpu_addr);
114 qxl_bo_unreserve(qobj);
115 return r;
116}
117
118void qxl_gem_object_unpin(struct drm_gem_object *obj)
119{
120 struct qxl_bo *qobj = obj->driver_private;
121 int r;
122
123 r = qxl_bo_reserve(qobj, false);
124 if (likely(r == 0)) {
125 qxl_bo_unpin(qobj);
126 qxl_bo_unreserve(qobj);
127 }
128}
129
130int qxl_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
131{
132 return 0;
133}
134
135void qxl_gem_object_close(struct drm_gem_object *obj,
136 struct drm_file *file_priv)
137{
138}
139
140int qxl_gem_init(struct qxl_device *qdev)
141{
142 INIT_LIST_HEAD(&qdev->gem.objects);
143 return 0;
144}
145
146void qxl_gem_fini(struct qxl_device *qdev)
147{
148 qxl_bo_force_delete(qdev);
149}
diff --git a/drivers/gpu/drm/qxl/qxl_image.c b/drivers/gpu/drm/qxl/qxl_image.c
new file mode 100644
index 000000000000..cf856206996b
--- /dev/null
+++ b/drivers/gpu/drm/qxl/qxl_image.c
@@ -0,0 +1,176 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Dave Airlie
23 * Alon Levy
24 */
25
26#include <linux/gfp.h>
27#include <linux/slab.h>
28
29#include "qxl_drv.h"
30#include "qxl_object.h"
31
32static int
33qxl_image_create_helper(struct qxl_device *qdev,
34 struct qxl_release *release,
35 struct qxl_bo **image_bo,
36 const uint8_t *data,
37 int width, int height,
38 int depth, unsigned int hash,
39 int stride)
40{
41 struct qxl_image *image;
42 struct qxl_data_chunk *chunk;
43 int i;
44 int chunk_stride;
45 int linesize = width * depth / 8;
46 struct qxl_bo *chunk_bo;
47 int ret;
48 void *ptr;
49 /* Chunk */
50 /* FIXME: Check integer overflow */
51 /* TODO: variable number of chunks */
52 chunk_stride = stride; /* TODO: should use linesize, but it renders
53 wrong (check the bitmaps are sent correctly
54 first) */
55 ret = qxl_alloc_bo_reserved(qdev, sizeof(*chunk) + height * chunk_stride,
56 &chunk_bo);
57
58 ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, 0);
59 chunk = ptr;
60 chunk->data_size = height * chunk_stride;
61 chunk->prev_chunk = 0;
62 chunk->next_chunk = 0;
63 qxl_bo_kunmap_atomic_page(qdev, chunk_bo, ptr);
64
65 {
66 void *k_data, *i_data;
67 int remain;
68 int page;
69 int size;
70 if (stride == linesize && chunk_stride == stride) {
71 remain = linesize * height;
72 page = 0;
73 i_data = (void *)data;
74
75 while (remain > 0) {
76 ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, page << PAGE_SHIFT);
77
78 if (page == 0) {
79 chunk = ptr;
80 k_data = chunk->data;
81 size = PAGE_SIZE - offsetof(struct qxl_data_chunk, data);
82 } else {
83 k_data = ptr;
84 size = PAGE_SIZE;
85 }
86 size = min(size, remain);
87
88 memcpy(k_data, i_data, size);
89
90 qxl_bo_kunmap_atomic_page(qdev, chunk_bo, ptr);
91 i_data += size;
92 remain -= size;
93 page++;
94 }
95 } else {
96 unsigned page_base, page_offset, out_offset;
97 for (i = 0 ; i < height ; ++i) {
98 i_data = (void *)data + i * stride;
99 remain = linesize;
100 out_offset = offsetof(struct qxl_data_chunk, data) + i * chunk_stride;
101
102 while (remain > 0) {
103 page_base = out_offset & PAGE_MASK;
104 page_offset = offset_in_page(out_offset);
105
106 size = min((int)(PAGE_SIZE - page_offset), remain);
107
108 ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, page_base);
109 k_data = ptr + page_offset;
110 memcpy(k_data, i_data, size);
111 qxl_bo_kunmap_atomic_page(qdev, chunk_bo, ptr);
112 remain -= size;
113 i_data += size;
114 out_offset += size;
115 }
116 }
117 }
118 }
119
120
121 qxl_bo_kunmap(chunk_bo);
122
123 /* Image */
124 ret = qxl_alloc_bo_reserved(qdev, sizeof(*image), image_bo);
125
126 ptr = qxl_bo_kmap_atomic_page(qdev, *image_bo, 0);
127 image = ptr;
128
129 image->descriptor.id = 0;
130 image->descriptor.type = SPICE_IMAGE_TYPE_BITMAP;
131
132 image->descriptor.flags = 0;
133 image->descriptor.width = width;
134 image->descriptor.height = height;
135
136 switch (depth) {
137 case 1:
138 /* TODO: BE? check by arch? */
139 image->u.bitmap.format = SPICE_BITMAP_FMT_1BIT_BE;
140 break;
141 case 24:
142 image->u.bitmap.format = SPICE_BITMAP_FMT_24BIT;
143 break;
144 case 32:
145 image->u.bitmap.format = SPICE_BITMAP_FMT_32BIT;
146 break;
147 default:
148 DRM_ERROR("unsupported image bit depth\n");
149 return -EINVAL; /* TODO: cleanup */
150 }
151 image->u.bitmap.flags = QXL_BITMAP_TOP_DOWN;
152 image->u.bitmap.x = width;
153 image->u.bitmap.y = height;
154 image->u.bitmap.stride = chunk_stride;
155 image->u.bitmap.palette = 0;
156 image->u.bitmap.data = qxl_bo_physical_address(qdev, chunk_bo, 0);
157 qxl_release_add_res(qdev, release, chunk_bo);
158 qxl_bo_unreserve(chunk_bo);
159 qxl_bo_unref(&chunk_bo);
160
161 qxl_bo_kunmap_atomic_page(qdev, *image_bo, ptr);
162
163 return 0;
164}
165
166int qxl_image_create(struct qxl_device *qdev,
167 struct qxl_release *release,
168 struct qxl_bo **image_bo,
169 const uint8_t *data,
170 int x, int y, int width, int height,
171 int depth, int stride)
172{
173 data += y * stride + x * (depth / 8);
174 return qxl_image_create_helper(qdev, release, image_bo, data,
175 width, height, depth, 0, stride);
176}
diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
new file mode 100644
index 000000000000..04b64f9cbfdb
--- /dev/null
+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
@@ -0,0 +1,411 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Dave Airlie
23 * Alon Levy
24 */
25
26#include "qxl_drv.h"
27#include "qxl_object.h"
28
29/*
30 * TODO: allocating a new gem(in qxl_bo) for each request.
31 * This is wasteful since bo's are page aligned.
32 */
33static int qxl_alloc_ioctl(struct drm_device *dev, void *data,
34 struct drm_file *file_priv)
35{
36 struct qxl_device *qdev = dev->dev_private;
37 struct drm_qxl_alloc *qxl_alloc = data;
38 int ret;
39 struct qxl_bo *qobj;
40 uint32_t handle;
41 u32 domain = QXL_GEM_DOMAIN_VRAM;
42
43 if (qxl_alloc->size == 0) {
44 DRM_ERROR("invalid size %d\n", qxl_alloc->size);
45 return -EINVAL;
46 }
47 ret = qxl_gem_object_create_with_handle(qdev, file_priv,
48 domain,
49 qxl_alloc->size,
50 NULL,
51 &qobj, &handle);
52 if (ret) {
53 DRM_ERROR("%s: failed to create gem ret=%d\n",
54 __func__, ret);
55 return -ENOMEM;
56 }
57 qxl_alloc->handle = handle;
58 return 0;
59}
60
61static int qxl_map_ioctl(struct drm_device *dev, void *data,
62 struct drm_file *file_priv)
63{
64 struct qxl_device *qdev = dev->dev_private;
65 struct drm_qxl_map *qxl_map = data;
66
67 return qxl_mode_dumb_mmap(file_priv, qdev->ddev, qxl_map->handle,
68 &qxl_map->offset);
69}
70
71/*
72 * dst must be validated, i.e. whole bo on vram/surfacesram (right now all bo's
73 * are on vram).
74 * *(dst + dst_off) = qxl_bo_physical_address(src, src_off)
75 */
76static void
77apply_reloc(struct qxl_device *qdev, struct qxl_bo *dst, uint64_t dst_off,
78 struct qxl_bo *src, uint64_t src_off)
79{
80 void *reloc_page;
81
82 reloc_page = qxl_bo_kmap_atomic_page(qdev, dst, dst_off & PAGE_MASK);
83 *(uint64_t *)(reloc_page + (dst_off & ~PAGE_MASK)) = qxl_bo_physical_address(qdev,
84 src, src_off);
85 qxl_bo_kunmap_atomic_page(qdev, dst, reloc_page);
86}
87
88static void
89apply_surf_reloc(struct qxl_device *qdev, struct qxl_bo *dst, uint64_t dst_off,
90 struct qxl_bo *src)
91{
92 uint32_t id = 0;
93 void *reloc_page;
94
95 if (src && !src->is_primary)
96 id = src->surface_id;
97
98 reloc_page = qxl_bo_kmap_atomic_page(qdev, dst, dst_off & PAGE_MASK);
99 *(uint32_t *)(reloc_page + (dst_off & ~PAGE_MASK)) = id;
100 qxl_bo_kunmap_atomic_page(qdev, dst, reloc_page);
101}
102
103/* return holding the reference to this object */
104static struct qxl_bo *qxlhw_handle_to_bo(struct qxl_device *qdev,
105 struct drm_file *file_priv, uint64_t handle,
106 struct qxl_reloc_list *reloc_list)
107{
108 struct drm_gem_object *gobj;
109 struct qxl_bo *qobj;
110 int ret;
111
112 gobj = drm_gem_object_lookup(qdev->ddev, file_priv, handle);
113 if (!gobj) {
114 DRM_ERROR("bad bo handle %lld\n", handle);
115 return NULL;
116 }
117 qobj = gem_to_qxl_bo(gobj);
118
119 ret = qxl_bo_list_add(reloc_list, qobj);
120 if (ret)
121 return NULL;
122
123 return qobj;
124}
125
126/*
127 * Usage of execbuffer:
128 * Relocations need to take into account the full QXLDrawable size.
129 * However, the command as passed from user space must *not* contain the initial
130 * QXLReleaseInfo struct (first XXX bytes)
131 */
132static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
133 struct drm_file *file_priv)
134{
135 struct qxl_device *qdev = dev->dev_private;
136 struct drm_qxl_execbuffer *execbuffer = data;
137 struct drm_qxl_command user_cmd;
138 int cmd_num;
139 struct qxl_bo *reloc_src_bo;
140 struct qxl_bo *reloc_dst_bo;
141 struct drm_qxl_reloc reloc;
142 void *fb_cmd;
143 int i, ret;
144 struct qxl_reloc_list reloc_list;
145 int unwritten;
146 uint32_t reloc_dst_offset;
147 INIT_LIST_HEAD(&reloc_list.bos);
148
149 for (cmd_num = 0; cmd_num < execbuffer->commands_num; ++cmd_num) {
150 struct qxl_release *release;
151 struct qxl_bo *cmd_bo;
152 int release_type;
153 struct drm_qxl_command *commands =
154 (struct drm_qxl_command *)execbuffer->commands;
155
156 if (DRM_COPY_FROM_USER(&user_cmd, &commands[cmd_num],
157 sizeof(user_cmd)))
158 return -EFAULT;
159 switch (user_cmd.type) {
160 case QXL_CMD_DRAW:
161 release_type = QXL_RELEASE_DRAWABLE;
162 break;
163 case QXL_CMD_SURFACE:
164 case QXL_CMD_CURSOR:
165 default:
166 DRM_DEBUG("Only draw commands in execbuffers\n");
167 return -EINVAL;
168 break;
169 }
170
171 if (user_cmd.command_size > PAGE_SIZE - sizeof(union qxl_release_info))
172 return -EINVAL;
173
174 ret = qxl_alloc_release_reserved(qdev,
175 sizeof(union qxl_release_info) +
176 user_cmd.command_size,
177 release_type,
178 &release,
179 &cmd_bo);
180 if (ret)
181 return ret;
182
183 /* TODO copy slow path code from i915 */
184 fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE));
185 unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void *)(unsigned long)user_cmd.command, user_cmd.command_size);
186 qxl_bo_kunmap_atomic_page(qdev, cmd_bo, fb_cmd);
187 if (unwritten) {
188 DRM_ERROR("got unwritten %d\n", unwritten);
189 qxl_release_unreserve(qdev, release);
190 qxl_release_free(qdev, release);
191 return -EFAULT;
192 }
193
194 for (i = 0 ; i < user_cmd.relocs_num; ++i) {
195 if (DRM_COPY_FROM_USER(&reloc,
196 &((struct drm_qxl_reloc *)user_cmd.relocs)[i],
197 sizeof(reloc))) {
198 qxl_bo_list_unreserve(&reloc_list, true);
199 qxl_release_unreserve(qdev, release);
200 qxl_release_free(qdev, release);
201 return -EFAULT;
202 }
203
204 /* add the bos to the list of bos to validate -
205 need to validate first then process relocs? */
206 if (reloc.dst_handle) {
207 reloc_dst_bo = qxlhw_handle_to_bo(qdev, file_priv,
208 reloc.dst_handle, &reloc_list);
209 if (!reloc_dst_bo) {
210 qxl_bo_list_unreserve(&reloc_list, true);
211 qxl_release_unreserve(qdev, release);
212 qxl_release_free(qdev, release);
213 return -EINVAL;
214 }
215 reloc_dst_offset = 0;
216 } else {
217 reloc_dst_bo = cmd_bo;
218 reloc_dst_offset = release->release_offset;
219 }
220
221 /* reserve and validate the reloc dst bo */
222 if (reloc.reloc_type == QXL_RELOC_TYPE_BO || reloc.src_handle > 0) {
223 reloc_src_bo =
224 qxlhw_handle_to_bo(qdev, file_priv,
225 reloc.src_handle, &reloc_list);
226 if (!reloc_src_bo) {
227 if (reloc_dst_bo != cmd_bo)
228 drm_gem_object_unreference_unlocked(&reloc_dst_bo->gem_base);
229 qxl_bo_list_unreserve(&reloc_list, true);
230 qxl_release_unreserve(qdev, release);
231 qxl_release_free(qdev, release);
232 return -EINVAL;
233 }
234 } else
235 reloc_src_bo = NULL;
236 if (reloc.reloc_type == QXL_RELOC_TYPE_BO) {
237 apply_reloc(qdev, reloc_dst_bo, reloc_dst_offset + reloc.dst_offset,
238 reloc_src_bo, reloc.src_offset);
239 } else if (reloc.reloc_type == QXL_RELOC_TYPE_SURF) {
240 apply_surf_reloc(qdev, reloc_dst_bo, reloc_dst_offset + reloc.dst_offset, reloc_src_bo);
241 } else {
242 DRM_ERROR("unknown reloc type %d\n", reloc.reloc_type);
243 return -EINVAL;
244 }
245
246 if (reloc_src_bo && reloc_src_bo != cmd_bo) {
247 qxl_release_add_res(qdev, release, reloc_src_bo);
248 drm_gem_object_unreference_unlocked(&reloc_src_bo->gem_base);
249 }
250
251 if (reloc_dst_bo != cmd_bo)
252 drm_gem_object_unreference_unlocked(&reloc_dst_bo->gem_base);
253 }
254 qxl_fence_releaseable(qdev, release);
255
256 ret = qxl_push_command_ring_release(qdev, release, user_cmd.type, true);
257 if (ret == -ERESTARTSYS) {
258 qxl_release_unreserve(qdev, release);
259 qxl_release_free(qdev, release);
260 qxl_bo_list_unreserve(&reloc_list, true);
261 return ret;
262 }
263 qxl_release_unreserve(qdev, release);
264 }
265 qxl_bo_list_unreserve(&reloc_list, 0);
266 return 0;
267}
268
269static int qxl_update_area_ioctl(struct drm_device *dev, void *data,
270 struct drm_file *file)
271{
272 struct qxl_device *qdev = dev->dev_private;
273 struct drm_qxl_update_area *update_area = data;
274 struct qxl_rect area = {.left = update_area->left,
275 .top = update_area->top,
276 .right = update_area->right,
277 .bottom = update_area->bottom};
278 int ret;
279 struct drm_gem_object *gobj = NULL;
280 struct qxl_bo *qobj = NULL;
281
282 if (update_area->left >= update_area->right ||
283 update_area->top >= update_area->bottom)
284 return -EINVAL;
285
286 gobj = drm_gem_object_lookup(dev, file, update_area->handle);
287 if (gobj == NULL)
288 return -ENOENT;
289
290 qobj = gem_to_qxl_bo(gobj);
291
292 ret = qxl_bo_reserve(qobj, false);
293 if (ret)
294 goto out;
295
296 if (!qobj->pin_count) {
297 ret = ttm_bo_validate(&qobj->tbo, &qobj->placement,
298 true, false);
299 if (unlikely(ret))
300 goto out;
301 }
302
303 ret = qxl_bo_check_id(qdev, qobj);
304 if (ret)
305 goto out2;
306 if (!qobj->surface_id)
307 DRM_ERROR("got update area for surface with no id %d\n", update_area->handle);
308 ret = qxl_io_update_area(qdev, qobj, &area);
309
310out2:
311 qxl_bo_unreserve(qobj);
312
313out:
314 drm_gem_object_unreference_unlocked(gobj);
315 return ret;
316}
317
318static int qxl_getparam_ioctl(struct drm_device *dev, void *data,
319 struct drm_file *file_priv)
320{
321 struct qxl_device *qdev = dev->dev_private;
322 struct drm_qxl_getparam *param = data;
323
324 switch (param->param) {
325 case QXL_PARAM_NUM_SURFACES:
326 param->value = qdev->rom->n_surfaces;
327 break;
328 case QXL_PARAM_MAX_RELOCS:
329 param->value = QXL_MAX_RES;
330 break;
331 default:
332 return -EINVAL;
333 }
334 return 0;
335}
336
337static int qxl_clientcap_ioctl(struct drm_device *dev, void *data,
338 struct drm_file *file_priv)
339{
340 struct qxl_device *qdev = dev->dev_private;
341 struct drm_qxl_clientcap *param = data;
342 int byte, idx;
343
344 byte = param->index / 8;
345 idx = param->index % 8;
346
347 if (qdev->pdev->revision < 4)
348 return -ENOSYS;
349
350 if (byte >= 58)
351 return -ENOSYS;
352
353 if (qdev->rom->client_capabilities[byte] & (1 << idx))
354 return 0;
355 return -ENOSYS;
356}
357
358static int qxl_alloc_surf_ioctl(struct drm_device *dev, void *data,
359 struct drm_file *file)
360{
361 struct qxl_device *qdev = dev->dev_private;
362 struct drm_qxl_alloc_surf *param = data;
363 struct qxl_bo *qobj;
364 int handle;
365 int ret;
366 int size, actual_stride;
367 struct qxl_surface surf;
368
369 /* work out size allocate bo with handle */
370 actual_stride = param->stride < 0 ? -param->stride : param->stride;
371 size = actual_stride * param->height + actual_stride;
372
373 surf.format = param->format;
374 surf.width = param->width;
375 surf.height = param->height;
376 surf.stride = param->stride;
377 surf.data = 0;
378
379 ret = qxl_gem_object_create_with_handle(qdev, file,
380 QXL_GEM_DOMAIN_SURFACE,
381 size,
382 &surf,
383 &qobj, &handle);
384 if (ret) {
385 DRM_ERROR("%s: failed to create gem ret=%d\n",
386 __func__, ret);
387 return -ENOMEM;
388 } else
389 param->handle = handle;
390 return ret;
391}
392
393struct drm_ioctl_desc qxl_ioctls[] = {
394 DRM_IOCTL_DEF_DRV(QXL_ALLOC, qxl_alloc_ioctl, DRM_AUTH|DRM_UNLOCKED),
395
396 DRM_IOCTL_DEF_DRV(QXL_MAP, qxl_map_ioctl, DRM_AUTH|DRM_UNLOCKED),
397
398 DRM_IOCTL_DEF_DRV(QXL_EXECBUFFER, qxl_execbuffer_ioctl,
399 DRM_AUTH|DRM_UNLOCKED),
400 DRM_IOCTL_DEF_DRV(QXL_UPDATE_AREA, qxl_update_area_ioctl,
401 DRM_AUTH|DRM_UNLOCKED),
402 DRM_IOCTL_DEF_DRV(QXL_GETPARAM, qxl_getparam_ioctl,
403 DRM_AUTH|DRM_UNLOCKED),
404 DRM_IOCTL_DEF_DRV(QXL_CLIENTCAP, qxl_clientcap_ioctl,
405 DRM_AUTH|DRM_UNLOCKED),
406
407 DRM_IOCTL_DEF_DRV(QXL_ALLOC_SURF, qxl_alloc_surf_ioctl,
408 DRM_AUTH|DRM_UNLOCKED),
409};
410
411int qxl_max_ioctls = DRM_ARRAY_SIZE(qxl_ioctls);
diff --git a/drivers/gpu/drm/qxl/qxl_irq.c b/drivers/gpu/drm/qxl/qxl_irq.c
new file mode 100644
index 000000000000..21393dc4700a
--- /dev/null
+++ b/drivers/gpu/drm/qxl/qxl_irq.c
@@ -0,0 +1,97 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Dave Airlie
23 * Alon Levy
24 */
25
26#include "qxl_drv.h"
27
28irqreturn_t qxl_irq_handler(DRM_IRQ_ARGS)
29{
30 struct drm_device *dev = (struct drm_device *) arg;
31 struct qxl_device *qdev = (struct qxl_device *)dev->dev_private;
32 uint32_t pending;
33
34 pending = xchg(&qdev->ram_header->int_pending, 0);
35
36 atomic_inc(&qdev->irq_received);
37
38 if (pending & QXL_INTERRUPT_DISPLAY) {
39 atomic_inc(&qdev->irq_received_display);
40 wake_up_all(&qdev->display_event);
41 qxl_queue_garbage_collect(qdev, false);
42 }
43 if (pending & QXL_INTERRUPT_CURSOR) {
44 atomic_inc(&qdev->irq_received_cursor);
45 wake_up_all(&qdev->cursor_event);
46 }
47 if (pending & QXL_INTERRUPT_IO_CMD) {
48 atomic_inc(&qdev->irq_received_io_cmd);
49 wake_up_all(&qdev->io_cmd_event);
50 }
51 if (pending & QXL_INTERRUPT_ERROR) {
52 /* TODO: log it, reset device (only way to exit this condition)
53 * (do it a certain number of times, afterwards admit defeat,
54 * to avoid endless loops).
55 */
56 qdev->irq_received_error++;
57 qxl_io_log(qdev, "%s: driver is in bug mode.\n", __func__);
58 }
59 if (pending & QXL_INTERRUPT_CLIENT_MONITORS_CONFIG) {
60 qxl_io_log(qdev, "QXL_INTERRUPT_CLIENT_MONITORS_CONFIG\n");
61 schedule_work(&qdev->client_monitors_config_work);
62 }
63 qdev->ram_header->int_mask = QXL_INTERRUPT_MASK;
64 outb(0, qdev->io_base + QXL_IO_UPDATE_IRQ);
65 return IRQ_HANDLED;
66}
67
68static void qxl_client_monitors_config_work_func(struct work_struct *work)
69{
70 struct qxl_device *qdev = container_of(work, struct qxl_device,
71 client_monitors_config_work);
72
73 qxl_display_read_client_monitors_config(qdev);
74}
75
76int qxl_irq_init(struct qxl_device *qdev)
77{
78 int ret;
79
80 init_waitqueue_head(&qdev->display_event);
81 init_waitqueue_head(&qdev->cursor_event);
82 init_waitqueue_head(&qdev->io_cmd_event);
83 INIT_WORK(&qdev->client_monitors_config_work,
84 qxl_client_monitors_config_work_func);
85 atomic_set(&qdev->irq_received, 0);
86 atomic_set(&qdev->irq_received_display, 0);
87 atomic_set(&qdev->irq_received_cursor, 0);
88 atomic_set(&qdev->irq_received_io_cmd, 0);
89 qdev->irq_received_error = 0;
90 ret = drm_irq_install(qdev->ddev);
91 qdev->ram_header->int_mask = QXL_INTERRUPT_MASK;
92 if (unlikely(ret != 0)) {
93 DRM_ERROR("Failed installing irq: %d\n", ret);
94 return 1;
95 }
96 return 0;
97}
diff --git a/drivers/gpu/drm/qxl/qxl_kms.c b/drivers/gpu/drm/qxl/qxl_kms.c
new file mode 100644
index 000000000000..85127ed24cfd
--- /dev/null
+++ b/drivers/gpu/drm/qxl/qxl_kms.c
@@ -0,0 +1,302 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Dave Airlie
23 * Alon Levy
24 */
25
26#include "qxl_drv.h"
27#include "qxl_object.h"
28
29#include <linux/io-mapping.h>
30
31int qxl_log_level;
32
33static void qxl_dump_mode(struct qxl_device *qdev, void *p)
34{
35 struct qxl_mode *m = p;
36 DRM_DEBUG_KMS("%d: %dx%d %d bits, stride %d, %dmm x %dmm, orientation %d\n",
37 m->id, m->x_res, m->y_res, m->bits, m->stride, m->x_mili,
38 m->y_mili, m->orientation);
39}
40
41static bool qxl_check_device(struct qxl_device *qdev)
42{
43 struct qxl_rom *rom = qdev->rom;
44 int mode_offset;
45 int i;
46
47 if (rom->magic != 0x4f525851) {
48 DRM_ERROR("bad rom signature %x\n", rom->magic);
49 return false;
50 }
51
52 DRM_INFO("Device Version %d.%d\n", rom->id, rom->update_id);
53 DRM_INFO("Compression level %d log level %d\n", rom->compression_level,
54 rom->log_level);
55 DRM_INFO("Currently using mode #%d, list at 0x%x\n",
56 rom->mode, rom->modes_offset);
57 DRM_INFO("%d io pages at offset 0x%x\n",
58 rom->num_io_pages, rom->pages_offset);
59 DRM_INFO("%d byte draw area at offset 0x%x\n",
60 rom->surface0_area_size, rom->draw_area_offset);
61
62 qdev->vram_size = rom->surface0_area_size;
63 DRM_INFO("RAM header offset: 0x%x\n", rom->ram_header_offset);
64
65 mode_offset = rom->modes_offset / 4;
66 qdev->mode_info.num_modes = ((u32 *)rom)[mode_offset];
67 DRM_INFO("rom modes offset 0x%x for %d modes\n", rom->modes_offset,
68 qdev->mode_info.num_modes);
69 qdev->mode_info.modes = (void *)((uint32_t *)rom + mode_offset + 1);
70 for (i = 0; i < qdev->mode_info.num_modes; i++)
71 qxl_dump_mode(qdev, qdev->mode_info.modes + i);
72 return true;
73}
74
75static uint8_t setup_slot(struct qxl_device *qdev, uint8_t slot_index_offset,
76 unsigned long start_phys_addr, unsigned long end_phys_addr)
77{
78 uint64_t high_bits;
79 struct qxl_memslot *slot;
80 uint8_t slot_index;
81 struct qxl_ram_header *ram_header = qdev->ram_header;
82
83 slot_index = qdev->rom->slots_start + slot_index_offset;
84 slot = &qdev->mem_slots[slot_index];
85 slot->start_phys_addr = start_phys_addr;
86 slot->end_phys_addr = end_phys_addr;
87 ram_header->mem_slot.mem_start = slot->start_phys_addr;
88 ram_header->mem_slot.mem_end = slot->end_phys_addr;
89 qxl_io_memslot_add(qdev, slot_index);
90 slot->generation = qdev->rom->slot_generation;
91 high_bits = slot_index << qdev->slot_gen_bits;
92 high_bits |= slot->generation;
93 high_bits <<= (64 - (qdev->slot_gen_bits + qdev->slot_id_bits));
94 slot->high_bits = high_bits;
95 return slot_index;
96}
97
98static void qxl_gc_work(struct work_struct *work)
99{
100 struct qxl_device *qdev = container_of(work, struct qxl_device, gc_work);
101 qxl_garbage_collect(qdev);
102}
103
104int qxl_device_init(struct qxl_device *qdev,
105 struct drm_device *ddev,
106 struct pci_dev *pdev,
107 unsigned long flags)
108{
109 int r;
110
111 qdev->dev = &pdev->dev;
112 qdev->ddev = ddev;
113 qdev->pdev = pdev;
114 qdev->flags = flags;
115
116 mutex_init(&qdev->gem.mutex);
117 mutex_init(&qdev->update_area_mutex);
118 mutex_init(&qdev->release_mutex);
119 mutex_init(&qdev->surf_evict_mutex);
120 INIT_LIST_HEAD(&qdev->gem.objects);
121
122 qdev->rom_base = pci_resource_start(pdev, 2);
123 qdev->rom_size = pci_resource_len(pdev, 2);
124 qdev->vram_base = pci_resource_start(pdev, 0);
125 qdev->surfaceram_base = pci_resource_start(pdev, 1);
126 qdev->surfaceram_size = pci_resource_len(pdev, 1);
127 qdev->io_base = pci_resource_start(pdev, 3);
128
129 qdev->vram_mapping = io_mapping_create_wc(qdev->vram_base, pci_resource_len(pdev, 0));
130 qdev->surface_mapping = io_mapping_create_wc(qdev->surfaceram_base, qdev->surfaceram_size);
131 DRM_DEBUG_KMS("qxl: vram %p-%p(%dM %dk), surface %p-%p(%dM %dk)\n",
132 (void *)qdev->vram_base, (void *)pci_resource_end(pdev, 0),
133 (int)pci_resource_len(pdev, 0) / 1024 / 1024,
134 (int)pci_resource_len(pdev, 0) / 1024,
135 (void *)qdev->surfaceram_base,
136 (void *)pci_resource_end(pdev, 1),
137 (int)qdev->surfaceram_size / 1024 / 1024,
138 (int)qdev->surfaceram_size / 1024);
139
140 qdev->rom = ioremap(qdev->rom_base, qdev->rom_size);
141 if (!qdev->rom) {
142 pr_err("Unable to ioremap ROM\n");
143 return -ENOMEM;
144 }
145
146 qxl_check_device(qdev);
147
148 r = qxl_bo_init(qdev);
149 if (r) {
150 DRM_ERROR("bo init failed %d\n", r);
151 return r;
152 }
153
154 qdev->ram_header = ioremap(qdev->vram_base +
155 qdev->rom->ram_header_offset,
156 sizeof(*qdev->ram_header));
157
158 qdev->command_ring = qxl_ring_create(&(qdev->ram_header->cmd_ring_hdr),
159 sizeof(struct qxl_command),
160 QXL_COMMAND_RING_SIZE,
161 qdev->io_base + QXL_IO_NOTIFY_CMD,
162 false,
163 &qdev->display_event);
164
165 qdev->cursor_ring = qxl_ring_create(
166 &(qdev->ram_header->cursor_ring_hdr),
167 sizeof(struct qxl_command),
168 QXL_CURSOR_RING_SIZE,
169 qdev->io_base + QXL_IO_NOTIFY_CMD,
170 false,
171 &qdev->cursor_event);
172
173 qdev->release_ring = qxl_ring_create(
174 &(qdev->ram_header->release_ring_hdr),
175 sizeof(uint64_t),
176 QXL_RELEASE_RING_SIZE, 0, true,
177 NULL);
178
179 /* TODO - slot initialization should happen on reset. where is our
180 * reset handler? */
181 qdev->n_mem_slots = qdev->rom->slots_end;
182 qdev->slot_gen_bits = qdev->rom->slot_gen_bits;
183 qdev->slot_id_bits = qdev->rom->slot_id_bits;
184 qdev->va_slot_mask =
185 (~(uint64_t)0) >> (qdev->slot_id_bits + qdev->slot_gen_bits);
186
187 qdev->mem_slots =
188 kmalloc(qdev->n_mem_slots * sizeof(struct qxl_memslot),
189 GFP_KERNEL);
190
191 idr_init(&qdev->release_idr);
192 spin_lock_init(&qdev->release_idr_lock);
193
194 idr_init(&qdev->surf_id_idr);
195 spin_lock_init(&qdev->surf_id_idr_lock);
196
197 mutex_init(&qdev->async_io_mutex);
198
199 /* reset the device into a known state - no memslots, no primary
200 * created, no surfaces. */
201 qxl_io_reset(qdev);
202
203 /* must initialize irq before first async io - slot creation */
204 r = qxl_irq_init(qdev);
205 if (r)
206 return r;
207
208 /*
209 * Note that virtual is surface0. We rely on the single ioremap done
210 * before.
211 */
212 qdev->main_mem_slot = setup_slot(qdev, 0,
213 (unsigned long)qdev->vram_base,
214 (unsigned long)qdev->vram_base + qdev->rom->ram_header_offset);
215 qdev->surfaces_mem_slot = setup_slot(qdev, 1,
216 (unsigned long)qdev->surfaceram_base,
217 (unsigned long)qdev->surfaceram_base + qdev->surfaceram_size);
218 DRM_INFO("main mem slot %d [%lx,%x)\n",
219 qdev->main_mem_slot,
220 (unsigned long)qdev->vram_base, qdev->rom->ram_header_offset);
221
222
223 qdev->gc_queue = create_singlethread_workqueue("qxl_gc");
224 INIT_WORK(&qdev->gc_work, qxl_gc_work);
225
226 r = qxl_fb_init(qdev);
227 if (r)
228 return r;
229
230 return 0;
231}
232
233static void qxl_device_fini(struct qxl_device *qdev)
234{
235 if (qdev->current_release_bo[0])
236 qxl_bo_unref(&qdev->current_release_bo[0]);
237 if (qdev->current_release_bo[1])
238 qxl_bo_unref(&qdev->current_release_bo[1]);
239 flush_workqueue(qdev->gc_queue);
240 destroy_workqueue(qdev->gc_queue);
241 qdev->gc_queue = NULL;
242
243 qxl_ring_free(qdev->command_ring);
244 qxl_ring_free(qdev->cursor_ring);
245 qxl_ring_free(qdev->release_ring);
246 qxl_bo_fini(qdev);
247 io_mapping_free(qdev->surface_mapping);
248 io_mapping_free(qdev->vram_mapping);
249 iounmap(qdev->ram_header);
250 iounmap(qdev->rom);
251 qdev->rom = NULL;
252 qdev->mode_info.modes = NULL;
253 qdev->mode_info.num_modes = 0;
254 qxl_debugfs_remove_files(qdev);
255}
256
257int qxl_driver_unload(struct drm_device *dev)
258{
259 struct qxl_device *qdev = dev->dev_private;
260
261 if (qdev == NULL)
262 return 0;
263 qxl_modeset_fini(qdev);
264 qxl_device_fini(qdev);
265
266 kfree(qdev);
267 dev->dev_private = NULL;
268 return 0;
269}
270
271int qxl_driver_load(struct drm_device *dev, unsigned long flags)
272{
273 struct qxl_device *qdev;
274 int r;
275
276 /* require kms */
277 if (!drm_core_check_feature(dev, DRIVER_MODESET))
278 return -ENODEV;
279
280 qdev = kzalloc(sizeof(struct qxl_device), GFP_KERNEL);
281 if (qdev == NULL)
282 return -ENOMEM;
283
284 dev->dev_private = qdev;
285
286 r = qxl_device_init(qdev, dev, dev->pdev, flags);
287 if (r)
288 goto out;
289
290 r = qxl_modeset_init(qdev);
291 if (r) {
292 qxl_driver_unload(dev);
293 goto out;
294 }
295
296 return 0;
297out:
298 kfree(qdev);
299 return r;
300}
301
302
diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c
new file mode 100644
index 000000000000..d9b12e7bc6e1
--- /dev/null
+++ b/drivers/gpu/drm/qxl/qxl_object.c
@@ -0,0 +1,365 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Dave Airlie
23 * Alon Levy
24 */
25
26#include "qxl_drv.h"
27#include "qxl_object.h"
28
29#include <linux/io-mapping.h>
30static void qxl_ttm_bo_destroy(struct ttm_buffer_object *tbo)
31{
32 struct qxl_bo *bo;
33 struct qxl_device *qdev;
34
35 bo = container_of(tbo, struct qxl_bo, tbo);
36 qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
37
38 qxl_surface_evict(qdev, bo, false);
39 qxl_fence_fini(&bo->fence);
40 mutex_lock(&qdev->gem.mutex);
41 list_del_init(&bo->list);
42 mutex_unlock(&qdev->gem.mutex);
43 drm_gem_object_release(&bo->gem_base);
44 kfree(bo);
45}
46
47bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo)
48{
49 if (bo->destroy == &qxl_ttm_bo_destroy)
50 return true;
51 return false;
52}
53
54void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain)
55{
56 u32 c = 0;
57
58 qbo->placement.fpfn = 0;
59 qbo->placement.lpfn = 0;
60 qbo->placement.placement = qbo->placements;
61 qbo->placement.busy_placement = qbo->placements;
62 if (domain == QXL_GEM_DOMAIN_VRAM)
63 qbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_VRAM;
64 if (domain == QXL_GEM_DOMAIN_SURFACE)
65 qbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_PRIV0;
66 if (domain == QXL_GEM_DOMAIN_CPU)
67 qbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
68 if (!c)
69 qbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
70 qbo->placement.num_placement = c;
71 qbo->placement.num_busy_placement = c;
72}
73
74
75int qxl_bo_create(struct qxl_device *qdev,
76 unsigned long size, bool kernel, u32 domain,
77 struct qxl_surface *surf,
78 struct qxl_bo **bo_ptr)
79{
80 struct qxl_bo *bo;
81 enum ttm_bo_type type;
82 int r;
83
84 if (unlikely(qdev->mman.bdev.dev_mapping == NULL))
85 qdev->mman.bdev.dev_mapping = qdev->ddev->dev_mapping;
86 if (kernel)
87 type = ttm_bo_type_kernel;
88 else
89 type = ttm_bo_type_device;
90 *bo_ptr = NULL;
91 bo = kzalloc(sizeof(struct qxl_bo), GFP_KERNEL);
92 if (bo == NULL)
93 return -ENOMEM;
94 size = roundup(size, PAGE_SIZE);
95 r = drm_gem_object_init(qdev->ddev, &bo->gem_base, size);
96 if (unlikely(r)) {
97 kfree(bo);
98 return r;
99 }
100 bo->gem_base.driver_private = NULL;
101 bo->type = domain;
102 bo->pin_count = 0;
103 bo->surface_id = 0;
104 qxl_fence_init(qdev, &bo->fence);
105 INIT_LIST_HEAD(&bo->list);
106 atomic_set(&bo->reserve_count, 0);
107 if (surf)
108 bo->surf = *surf;
109
110 qxl_ttm_placement_from_domain(bo, domain);
111
112 r = ttm_bo_init(&qdev->mman.bdev, &bo->tbo, size, type,
113 &bo->placement, 0, !kernel, NULL, size,
114 NULL, &qxl_ttm_bo_destroy);
115 if (unlikely(r != 0)) {
116 if (r != -ERESTARTSYS)
117 dev_err(qdev->dev,
118 "object_init failed for (%lu, 0x%08X)\n",
119 size, domain);
120 return r;
121 }
122 *bo_ptr = bo;
123 return 0;
124}
125
126int qxl_bo_kmap(struct qxl_bo *bo, void **ptr)
127{
128 bool is_iomem;
129 int r;
130
131 if (bo->kptr) {
132 if (ptr)
133 *ptr = bo->kptr;
134 return 0;
135 }
136 r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
137 if (r)
138 return r;
139 bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
140 if (ptr)
141 *ptr = bo->kptr;
142 return 0;
143}
144
145void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev,
146 struct qxl_bo *bo, int page_offset)
147{
148 struct ttm_mem_type_manager *man = &bo->tbo.bdev->man[bo->tbo.mem.mem_type];
149 void *rptr;
150 int ret;
151 struct io_mapping *map;
152
153 if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
154 map = qdev->vram_mapping;
155 else if (bo->tbo.mem.mem_type == TTM_PL_PRIV0)
156 map = qdev->surface_mapping;
157 else
158 goto fallback;
159
160 (void) ttm_mem_io_lock(man, false);
161 ret = ttm_mem_io_reserve(bo->tbo.bdev, &bo->tbo.mem);
162 ttm_mem_io_unlock(man);
163
164 return io_mapping_map_atomic_wc(map, bo->tbo.mem.bus.offset + page_offset);
165fallback:
166 if (bo->kptr) {
167 rptr = bo->kptr + (page_offset * PAGE_SIZE);
168 return rptr;
169 }
170
171 ret = qxl_bo_kmap(bo, &rptr);
172 if (ret)
173 return NULL;
174
175 rptr += page_offset * PAGE_SIZE;
176 return rptr;
177}
178
179void qxl_bo_kunmap(struct qxl_bo *bo)
180{
181 if (bo->kptr == NULL)
182 return;
183 bo->kptr = NULL;
184 ttm_bo_kunmap(&bo->kmap);
185}
186
187void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev,
188 struct qxl_bo *bo, void *pmap)
189{
190 struct ttm_mem_type_manager *man = &bo->tbo.bdev->man[bo->tbo.mem.mem_type];
191 struct io_mapping *map;
192
193 if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
194 map = qdev->vram_mapping;
195 else if (bo->tbo.mem.mem_type == TTM_PL_PRIV0)
196 map = qdev->surface_mapping;
197 else
198 goto fallback;
199
200 io_mapping_unmap_atomic(pmap);
201
202 (void) ttm_mem_io_lock(man, false);
203 ttm_mem_io_free(bo->tbo.bdev, &bo->tbo.mem);
204 ttm_mem_io_unlock(man);
205 return ;
206 fallback:
207 qxl_bo_kunmap(bo);
208}
209
210void qxl_bo_unref(struct qxl_bo **bo)
211{
212 struct ttm_buffer_object *tbo;
213
214 if ((*bo) == NULL)
215 return;
216 tbo = &((*bo)->tbo);
217 ttm_bo_unref(&tbo);
218 if (tbo == NULL)
219 *bo = NULL;
220}
221
222struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo)
223{
224 ttm_bo_reference(&bo->tbo);
225 return bo;
226}
227
228int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr)
229{
230 struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
231 int r, i;
232
233 if (bo->pin_count) {
234 bo->pin_count++;
235 if (gpu_addr)
236 *gpu_addr = qxl_bo_gpu_offset(bo);
237 return 0;
238 }
239 qxl_ttm_placement_from_domain(bo, domain);
240 for (i = 0; i < bo->placement.num_placement; i++)
241 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
242 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
243 if (likely(r == 0)) {
244 bo->pin_count = 1;
245 if (gpu_addr != NULL)
246 *gpu_addr = qxl_bo_gpu_offset(bo);
247 }
248 if (unlikely(r != 0))
249 dev_err(qdev->dev, "%p pin failed\n", bo);
250 return r;
251}
252
253int qxl_bo_unpin(struct qxl_bo *bo)
254{
255 struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
256 int r, i;
257
258 if (!bo->pin_count) {
259 dev_warn(qdev->dev, "%p unpin not necessary\n", bo);
260 return 0;
261 }
262 bo->pin_count--;
263 if (bo->pin_count)
264 return 0;
265 for (i = 0; i < bo->placement.num_placement; i++)
266 bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
267 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
268 if (unlikely(r != 0))
269 dev_err(qdev->dev, "%p validate failed for unpin\n", bo);
270 return r;
271}
272
273void qxl_bo_force_delete(struct qxl_device *qdev)
274{
275 struct qxl_bo *bo, *n;
276
277 if (list_empty(&qdev->gem.objects))
278 return;
279 dev_err(qdev->dev, "Userspace still has active objects !\n");
280 list_for_each_entry_safe(bo, n, &qdev->gem.objects, list) {
281 mutex_lock(&qdev->ddev->struct_mutex);
282 dev_err(qdev->dev, "%p %p %lu %lu force free\n",
283 &bo->gem_base, bo, (unsigned long)bo->gem_base.size,
284 *((unsigned long *)&bo->gem_base.refcount));
285 mutex_lock(&qdev->gem.mutex);
286 list_del_init(&bo->list);
287 mutex_unlock(&qdev->gem.mutex);
288 /* this should unref the ttm bo */
289 drm_gem_object_unreference(&bo->gem_base);
290 mutex_unlock(&qdev->ddev->struct_mutex);
291 }
292}
293
294int qxl_bo_init(struct qxl_device *qdev)
295{
296 return qxl_ttm_init(qdev);
297}
298
299void qxl_bo_fini(struct qxl_device *qdev)
300{
301 qxl_ttm_fini(qdev);
302}
303
304int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo)
305{
306 int ret;
307 if (bo->type == QXL_GEM_DOMAIN_SURFACE && bo->surface_id == 0) {
308 /* allocate a surface id for this surface now */
309 ret = qxl_surface_id_alloc(qdev, bo);
310 if (ret)
311 return ret;
312
313 ret = qxl_hw_surface_alloc(qdev, bo, NULL);
314 if (ret)
315 return ret;
316 }
317 return 0;
318}
319
320void qxl_bo_list_unreserve(struct qxl_reloc_list *reloc_list, bool failed)
321{
322 struct qxl_bo_list *entry, *sf;
323
324 list_for_each_entry_safe(entry, sf, &reloc_list->bos, lhead) {
325 qxl_bo_unreserve(entry->bo);
326 list_del(&entry->lhead);
327 kfree(entry);
328 }
329}
330
331int qxl_bo_list_add(struct qxl_reloc_list *reloc_list, struct qxl_bo *bo)
332{
333 struct qxl_bo_list *entry;
334 int ret;
335
336 list_for_each_entry(entry, &reloc_list->bos, lhead) {
337 if (entry->bo == bo)
338 return 0;
339 }
340
341 entry = kmalloc(sizeof(struct qxl_bo_list), GFP_KERNEL);
342 if (!entry)
343 return -ENOMEM;
344
345 entry->bo = bo;
346 list_add(&entry->lhead, &reloc_list->bos);
347
348 ret = qxl_bo_reserve(bo, false);
349 if (ret)
350 return ret;
351
352 if (!bo->pin_count) {
353 qxl_ttm_placement_from_domain(bo, bo->type);
354 ret = ttm_bo_validate(&bo->tbo, &bo->placement,
355 true, false);
356 if (ret)
357 return ret;
358 }
359
360 /* allocate a surface for reserved + validated buffers */
361 ret = qxl_bo_check_id(bo->gem_base.dev->dev_private, bo);
362 if (ret)
363 return ret;
364 return 0;
365}
diff --git a/drivers/gpu/drm/qxl/qxl_object.h b/drivers/gpu/drm/qxl/qxl_object.h
new file mode 100644
index 000000000000..b4fd89fbd8b7
--- /dev/null
+++ b/drivers/gpu/drm/qxl/qxl_object.h
@@ -0,0 +1,112 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Dave Airlie
23 * Alon Levy
24 */
25#ifndef QXL_OBJECT_H
26#define QXL_OBJECT_H
27
28#include "qxl_drv.h"
29
30static inline int qxl_bo_reserve(struct qxl_bo *bo, bool no_wait)
31{
32 int r;
33
34 r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
35 if (unlikely(r != 0)) {
36 if (r != -ERESTARTSYS) {
37 struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
38 dev_err(qdev->dev, "%p reserve failed\n", bo);
39 }
40 return r;
41 }
42 return 0;
43}
44
45static inline void qxl_bo_unreserve(struct qxl_bo *bo)
46{
47 ttm_bo_unreserve(&bo->tbo);
48}
49
50static inline u64 qxl_bo_gpu_offset(struct qxl_bo *bo)
51{
52 return bo->tbo.offset;
53}
54
55static inline unsigned long qxl_bo_size(struct qxl_bo *bo)
56{
57 return bo->tbo.num_pages << PAGE_SHIFT;
58}
59
60static inline bool qxl_bo_is_reserved(struct qxl_bo *bo)
61{
62 return !!atomic_read(&bo->tbo.reserved);
63}
64
65static inline u64 qxl_bo_mmap_offset(struct qxl_bo *bo)
66{
67 return bo->tbo.addr_space_offset;
68}
69
70static inline int qxl_bo_wait(struct qxl_bo *bo, u32 *mem_type,
71 bool no_wait)
72{
73 int r;
74
75 r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
76 if (unlikely(r != 0)) {
77 if (r != -ERESTARTSYS) {
78 struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
79 dev_err(qdev->dev, "%p reserve failed for wait\n",
80 bo);
81 }
82 return r;
83 }
84 spin_lock(&bo->tbo.bdev->fence_lock);
85 if (mem_type)
86 *mem_type = bo->tbo.mem.mem_type;
87 if (bo->tbo.sync_obj)
88 r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
89 spin_unlock(&bo->tbo.bdev->fence_lock);
90 ttm_bo_unreserve(&bo->tbo);
91 return r;
92}
93
94extern int qxl_bo_create(struct qxl_device *qdev,
95 unsigned long size,
96 bool kernel, u32 domain,
97 struct qxl_surface *surf,
98 struct qxl_bo **bo_ptr);
99extern int qxl_bo_kmap(struct qxl_bo *bo, void **ptr);
100extern void qxl_bo_kunmap(struct qxl_bo *bo);
101void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev, struct qxl_bo *bo, int page_offset);
102void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev, struct qxl_bo *bo, void *map);
103extern struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo);
104extern void qxl_bo_unref(struct qxl_bo **bo);
105extern int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr);
106extern int qxl_bo_unpin(struct qxl_bo *bo);
107extern void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain);
108extern bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo);
109
110extern int qxl_bo_list_add(struct qxl_reloc_list *reloc_list, struct qxl_bo *bo);
111extern void qxl_bo_list_unreserve(struct qxl_reloc_list *reloc_list, bool failed);
112#endif
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
new file mode 100644
index 000000000000..b443d6751d5f
--- /dev/null
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -0,0 +1,304 @@
1/*
2 * Copyright 2011 Red Hat, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
19 * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
20 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
21 */
22#include "qxl_drv.h"
23#include "qxl_object.h"
24
25/*
26 * drawable cmd cache - allocate a bunch of VRAM pages, suballocate
27 * into 256 byte chunks for now - gives 16 cmds per page.
28 *
29 * use an ida to index into the chunks?
30 */
31/* manage releaseables */
32/* stack them 16 high for now -drawable object is 191 */
33#define RELEASE_SIZE 256
34#define RELEASES_PER_BO (4096 / RELEASE_SIZE)
35/* put an alloc/dealloc surface cmd into one bo and round up to 128 */
36#define SURFACE_RELEASE_SIZE 128
37#define SURFACE_RELEASES_PER_BO (4096 / SURFACE_RELEASE_SIZE)
38
39static const int release_size_per_bo[] = { RELEASE_SIZE, SURFACE_RELEASE_SIZE, RELEASE_SIZE };
40static const int releases_per_bo[] = { RELEASES_PER_BO, SURFACE_RELEASES_PER_BO, RELEASES_PER_BO };
41uint64_t
42qxl_release_alloc(struct qxl_device *qdev, int type,
43 struct qxl_release **ret)
44{
45 struct qxl_release *release;
46 int handle;
47 size_t size = sizeof(*release);
48 int idr_ret;
49
50 release = kmalloc(size, GFP_KERNEL);
51 if (!release) {
52 DRM_ERROR("Out of memory\n");
53 return 0;
54 }
55 release->type = type;
56 release->bo_count = 0;
57 release->release_offset = 0;
58 release->surface_release_id = 0;
59
60 idr_preload(GFP_KERNEL);
61 spin_lock(&qdev->release_idr_lock);
62 idr_ret = idr_alloc(&qdev->release_idr, release, 1, 0, GFP_NOWAIT);
63 spin_unlock(&qdev->release_idr_lock);
64 idr_preload_end();
65 handle = idr_ret;
66 if (idr_ret < 0)
67 goto release_fail;
68 *ret = release;
69 QXL_INFO(qdev, "allocated release %lld\n", handle);
70 release->id = handle;
71release_fail:
72
73 return handle;
74}
75
76void
77qxl_release_free(struct qxl_device *qdev,
78 struct qxl_release *release)
79{
80 int i;
81
82 QXL_INFO(qdev, "release %d, type %d, %d bos\n", release->id,
83 release->type, release->bo_count);
84
85 if (release->surface_release_id)
86 qxl_surface_id_dealloc(qdev, release->surface_release_id);
87
88 for (i = 0 ; i < release->bo_count; ++i) {
89 QXL_INFO(qdev, "release %llx\n",
90 release->bos[i]->tbo.addr_space_offset
91 - DRM_FILE_OFFSET);
92 qxl_fence_remove_release(&release->bos[i]->fence, release->id);
93 qxl_bo_unref(&release->bos[i]);
94 }
95 spin_lock(&qdev->release_idr_lock);
96 idr_remove(&qdev->release_idr, release->id);
97 spin_unlock(&qdev->release_idr_lock);
98 kfree(release);
99}
100
101void
102qxl_release_add_res(struct qxl_device *qdev, struct qxl_release *release,
103 struct qxl_bo *bo)
104{
105 int i;
106 for (i = 0; i < release->bo_count; i++)
107 if (release->bos[i] == bo)
108 return;
109
110 if (release->bo_count >= QXL_MAX_RES) {
111 DRM_ERROR("exceeded max resource on a qxl_release item\n");
112 return;
113 }
114 release->bos[release->bo_count++] = qxl_bo_ref(bo);
115}
116
117static int qxl_release_bo_alloc(struct qxl_device *qdev,
118 struct qxl_bo **bo)
119{
120 int ret;
121 ret = qxl_bo_create(qdev, PAGE_SIZE, false, QXL_GEM_DOMAIN_VRAM, NULL,
122 bo);
123 return ret;
124}
125
126int qxl_release_reserve(struct qxl_device *qdev,
127 struct qxl_release *release, bool no_wait)
128{
129 int ret;
130 if (atomic_inc_return(&release->bos[0]->reserve_count) == 1) {
131 ret = qxl_bo_reserve(release->bos[0], no_wait);
132 if (ret)
133 return ret;
134 }
135 return 0;
136}
137
138void qxl_release_unreserve(struct qxl_device *qdev,
139 struct qxl_release *release)
140{
141 if (atomic_dec_and_test(&release->bos[0]->reserve_count))
142 qxl_bo_unreserve(release->bos[0]);
143}
144
145int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
146 enum qxl_surface_cmd_type surface_cmd_type,
147 struct qxl_release *create_rel,
148 struct qxl_release **release)
149{
150 int ret;
151
152 if (surface_cmd_type == QXL_SURFACE_CMD_DESTROY && create_rel) {
153 int idr_ret;
154 struct qxl_bo *bo;
155 union qxl_release_info *info;
156
157 /* stash the release after the create command */
158 idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release);
159 bo = qxl_bo_ref(create_rel->bos[0]);
160
161 (*release)->release_offset = create_rel->release_offset + 64;
162
163 qxl_release_add_res(qdev, *release, bo);
164
165 ret = qxl_release_reserve(qdev, *release, false);
166 if (ret) {
167 DRM_ERROR("release reserve failed\n");
168 goto out_unref;
169 }
170 info = qxl_release_map(qdev, *release);
171 info->id = idr_ret;
172 qxl_release_unmap(qdev, *release, info);
173
174
175out_unref:
176 qxl_bo_unref(&bo);
177 return ret;
178 }
179
180 return qxl_alloc_release_reserved(qdev, sizeof(struct qxl_surface_cmd),
181 QXL_RELEASE_SURFACE_CMD, release, NULL);
182}
183
184int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
185 int type, struct qxl_release **release,
186 struct qxl_bo **rbo)
187{
188 struct qxl_bo *bo;
189 int idr_ret;
190 int ret;
191 union qxl_release_info *info;
192 int cur_idx;
193
194 if (type == QXL_RELEASE_DRAWABLE)
195 cur_idx = 0;
196 else if (type == QXL_RELEASE_SURFACE_CMD)
197 cur_idx = 1;
198 else if (type == QXL_RELEASE_CURSOR_CMD)
199 cur_idx = 2;
200 else {
201 DRM_ERROR("got illegal type: %d\n", type);
202 return -EINVAL;
203 }
204
205 idr_ret = qxl_release_alloc(qdev, type, release);
206
207 mutex_lock(&qdev->release_mutex);
208 if (qdev->current_release_bo_offset[cur_idx] + 1 >= releases_per_bo[cur_idx]) {
209 qxl_bo_unref(&qdev->current_release_bo[cur_idx]);
210 qdev->current_release_bo_offset[cur_idx] = 0;
211 qdev->current_release_bo[cur_idx] = NULL;
212 }
213 if (!qdev->current_release_bo[cur_idx]) {
214 ret = qxl_release_bo_alloc(qdev, &qdev->current_release_bo[cur_idx]);
215 if (ret) {
216 mutex_unlock(&qdev->release_mutex);
217 return ret;
218 }
219
220 /* pin releases bo's they are too messy to evict */
221 ret = qxl_bo_reserve(qdev->current_release_bo[cur_idx], false);
222 qxl_bo_pin(qdev->current_release_bo[cur_idx], QXL_GEM_DOMAIN_VRAM, NULL);
223 qxl_bo_unreserve(qdev->current_release_bo[cur_idx]);
224 }
225
226 bo = qxl_bo_ref(qdev->current_release_bo[cur_idx]);
227
228 (*release)->release_offset = qdev->current_release_bo_offset[cur_idx] * release_size_per_bo[cur_idx];
229 qdev->current_release_bo_offset[cur_idx]++;
230
231 if (rbo)
232 *rbo = bo;
233
234 qxl_release_add_res(qdev, *release, bo);
235
236 ret = qxl_release_reserve(qdev, *release, false);
237 mutex_unlock(&qdev->release_mutex);
238 if (ret)
239 goto out_unref;
240
241 info = qxl_release_map(qdev, *release);
242 info->id = idr_ret;
243 qxl_release_unmap(qdev, *release, info);
244
245out_unref:
246 qxl_bo_unref(&bo);
247 return ret;
248}
249
250int qxl_fence_releaseable(struct qxl_device *qdev,
251 struct qxl_release *release)
252{
253 int i, ret;
254 for (i = 0; i < release->bo_count; i++) {
255 if (!release->bos[i]->tbo.sync_obj)
256 release->bos[i]->tbo.sync_obj = &release->bos[i]->fence;
257 ret = qxl_fence_add_release(&release->bos[i]->fence, release->id);
258 if (ret)
259 return ret;
260 }
261 return 0;
262}
263
264struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev,
265 uint64_t id)
266{
267 struct qxl_release *release;
268
269 spin_lock(&qdev->release_idr_lock);
270 release = idr_find(&qdev->release_idr, id);
271 spin_unlock(&qdev->release_idr_lock);
272 if (!release) {
273 DRM_ERROR("failed to find id in release_idr\n");
274 return NULL;
275 }
276 if (release->bo_count < 1) {
277 DRM_ERROR("read a released resource with 0 bos\n");
278 return NULL;
279 }
280 return release;
281}
282
283union qxl_release_info *qxl_release_map(struct qxl_device *qdev,
284 struct qxl_release *release)
285{
286 void *ptr;
287 union qxl_release_info *info;
288 struct qxl_bo *bo = release->bos[0];
289
290 ptr = qxl_bo_kmap_atomic_page(qdev, bo, release->release_offset & PAGE_SIZE);
291 info = ptr + (release->release_offset & ~PAGE_SIZE);
292 return info;
293}
294
295void qxl_release_unmap(struct qxl_device *qdev,
296 struct qxl_release *release,
297 union qxl_release_info *info)
298{
299 struct qxl_bo *bo = release->bos[0];
300 void *ptr;
301
302 ptr = ((void *)info) - (release->release_offset & ~PAGE_SIZE);
303 qxl_bo_kunmap_atomic_page(qdev, bo, ptr);
304}
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
new file mode 100644
index 000000000000..489cb8cece4d
--- /dev/null
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -0,0 +1,581 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Dave Airlie
23 * Alon Levy
24 */
25
26#include <ttm/ttm_bo_api.h>
27#include <ttm/ttm_bo_driver.h>
28#include <ttm/ttm_placement.h>
29#include <ttm/ttm_page_alloc.h>
30#include <ttm/ttm_module.h>
31#include <drm/drmP.h>
32#include <drm/drm.h>
33#include <drm/qxl_drm.h>
34#include "qxl_drv.h"
35#include "qxl_object.h"
36
37#include <linux/delay.h>
38static int qxl_ttm_debugfs_init(struct qxl_device *qdev);
39
40static struct qxl_device *qxl_get_qdev(struct ttm_bo_device *bdev)
41{
42 struct qxl_mman *mman;
43 struct qxl_device *qdev;
44
45 mman = container_of(bdev, struct qxl_mman, bdev);
46 qdev = container_of(mman, struct qxl_device, mman);
47 return qdev;
48}
49
50static int qxl_ttm_mem_global_init(struct drm_global_reference *ref)
51{
52 return ttm_mem_global_init(ref->object);
53}
54
55static void qxl_ttm_mem_global_release(struct drm_global_reference *ref)
56{
57 ttm_mem_global_release(ref->object);
58}
59
60static int qxl_ttm_global_init(struct qxl_device *qdev)
61{
62 struct drm_global_reference *global_ref;
63 int r;
64
65 qdev->mman.mem_global_referenced = false;
66 global_ref = &qdev->mman.mem_global_ref;
67 global_ref->global_type = DRM_GLOBAL_TTM_MEM;
68 global_ref->size = sizeof(struct ttm_mem_global);
69 global_ref->init = &qxl_ttm_mem_global_init;
70 global_ref->release = &qxl_ttm_mem_global_release;
71
72 r = drm_global_item_ref(global_ref);
73 if (r != 0) {
74 DRM_ERROR("Failed setting up TTM memory accounting "
75 "subsystem.\n");
76 return r;
77 }
78
79 qdev->mman.bo_global_ref.mem_glob =
80 qdev->mman.mem_global_ref.object;
81 global_ref = &qdev->mman.bo_global_ref.ref;
82 global_ref->global_type = DRM_GLOBAL_TTM_BO;
83 global_ref->size = sizeof(struct ttm_bo_global);
84 global_ref->init = &ttm_bo_global_init;
85 global_ref->release = &ttm_bo_global_release;
86 r = drm_global_item_ref(global_ref);
87 if (r != 0) {
88 DRM_ERROR("Failed setting up TTM BO subsystem.\n");
89 drm_global_item_unref(&qdev->mman.mem_global_ref);
90 return r;
91 }
92
93 qdev->mman.mem_global_referenced = true;
94 return 0;
95}
96
97static void qxl_ttm_global_fini(struct qxl_device *qdev)
98{
99 if (qdev->mman.mem_global_referenced) {
100 drm_global_item_unref(&qdev->mman.bo_global_ref.ref);
101 drm_global_item_unref(&qdev->mman.mem_global_ref);
102 qdev->mman.mem_global_referenced = false;
103 }
104}
105
106static struct vm_operations_struct qxl_ttm_vm_ops;
107static const struct vm_operations_struct *ttm_vm_ops;
108
109static int qxl_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
110{
111 struct ttm_buffer_object *bo;
112 struct qxl_device *qdev;
113 int r;
114
115 bo = (struct ttm_buffer_object *)vma->vm_private_data;
116 if (bo == NULL)
117 return VM_FAULT_NOPAGE;
118 qdev = qxl_get_qdev(bo->bdev);
119 r = ttm_vm_ops->fault(vma, vmf);
120 return r;
121}
122
123int qxl_mmap(struct file *filp, struct vm_area_struct *vma)
124{
125 struct drm_file *file_priv;
126 struct qxl_device *qdev;
127 int r;
128
129 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
130 pr_info("%s: vma->vm_pgoff (%ld) < DRM_FILE_PAGE_OFFSET\n",
131 __func__, vma->vm_pgoff);
132 return drm_mmap(filp, vma);
133 }
134
135 file_priv = filp->private_data;
136 qdev = file_priv->minor->dev->dev_private;
137 if (qdev == NULL) {
138 DRM_ERROR(
139 "filp->private_data->minor->dev->dev_private == NULL\n");
140 return -EINVAL;
141 }
142 QXL_INFO(qdev, "%s: filp->private_data = 0x%p, vma->vm_pgoff = %lx\n",
143 __func__, filp->private_data, vma->vm_pgoff);
144
145 r = ttm_bo_mmap(filp, vma, &qdev->mman.bdev);
146 if (unlikely(r != 0))
147 return r;
148 if (unlikely(ttm_vm_ops == NULL)) {
149 ttm_vm_ops = vma->vm_ops;
150 qxl_ttm_vm_ops = *ttm_vm_ops;
151 qxl_ttm_vm_ops.fault = &qxl_ttm_fault;
152 }
153 vma->vm_ops = &qxl_ttm_vm_ops;
154 return 0;
155}
156
157static int qxl_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
158{
159 return 0;
160}
161
162static int qxl_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
163 struct ttm_mem_type_manager *man)
164{
165 struct qxl_device *qdev;
166
167 qdev = qxl_get_qdev(bdev);
168
169 switch (type) {
170 case TTM_PL_SYSTEM:
171 /* System memory */
172 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
173 man->available_caching = TTM_PL_MASK_CACHING;
174 man->default_caching = TTM_PL_FLAG_CACHED;
175 break;
176 case TTM_PL_VRAM:
177 case TTM_PL_PRIV0:
178 /* "On-card" video ram */
179 man->func = &ttm_bo_manager_func;
180 man->gpu_offset = 0;
181 man->flags = TTM_MEMTYPE_FLAG_FIXED |
182 TTM_MEMTYPE_FLAG_MAPPABLE;
183 man->available_caching = TTM_PL_MASK_CACHING;
184 man->default_caching = TTM_PL_FLAG_CACHED;
185 break;
186 default:
187 DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
188 return -EINVAL;
189 }
190 return 0;
191}
192
193static void qxl_evict_flags(struct ttm_buffer_object *bo,
194 struct ttm_placement *placement)
195{
196 struct qxl_bo *qbo;
197 static u32 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
198
199 if (!qxl_ttm_bo_is_qxl_bo(bo)) {
200 placement->fpfn = 0;
201 placement->lpfn = 0;
202 placement->placement = &placements;
203 placement->busy_placement = &placements;
204 placement->num_placement = 1;
205 placement->num_busy_placement = 1;
206 return;
207 }
208 qbo = container_of(bo, struct qxl_bo, tbo);
209 qxl_ttm_placement_from_domain(qbo, QXL_GEM_DOMAIN_CPU);
210 *placement = qbo->placement;
211}
212
213static int qxl_verify_access(struct ttm_buffer_object *bo, struct file *filp)
214{
215 return 0;
216}
217
218static int qxl_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
219 struct ttm_mem_reg *mem)
220{
221 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
222 struct qxl_device *qdev = qxl_get_qdev(bdev);
223
224 mem->bus.addr = NULL;
225 mem->bus.offset = 0;
226 mem->bus.size = mem->num_pages << PAGE_SHIFT;
227 mem->bus.base = 0;
228 mem->bus.is_iomem = false;
229 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
230 return -EINVAL;
231 switch (mem->mem_type) {
232 case TTM_PL_SYSTEM:
233 /* system memory */
234 return 0;
235 case TTM_PL_VRAM:
236 mem->bus.is_iomem = true;
237 mem->bus.base = qdev->vram_base;
238 mem->bus.offset = mem->start << PAGE_SHIFT;
239 break;
240 case TTM_PL_PRIV0:
241 mem->bus.is_iomem = true;
242 mem->bus.base = qdev->surfaceram_base;
243 mem->bus.offset = mem->start << PAGE_SHIFT;
244 break;
245 default:
246 return -EINVAL;
247 }
248 return 0;
249}
250
251static void qxl_ttm_io_mem_free(struct ttm_bo_device *bdev,
252 struct ttm_mem_reg *mem)
253{
254}
255
256/*
257 * TTM backend functions.
258 */
259struct qxl_ttm_tt {
260 struct ttm_dma_tt ttm;
261 struct qxl_device *qdev;
262 u64 offset;
263};
264
265static int qxl_ttm_backend_bind(struct ttm_tt *ttm,
266 struct ttm_mem_reg *bo_mem)
267{
268 struct qxl_ttm_tt *gtt = (void *)ttm;
269
270 gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT);
271 if (!ttm->num_pages) {
272 WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
273 ttm->num_pages, bo_mem, ttm);
274 }
275 /* Not implemented */
276 return -1;
277}
278
279static int qxl_ttm_backend_unbind(struct ttm_tt *ttm)
280{
281 /* Not implemented */
282 return -1;
283}
284
285static void qxl_ttm_backend_destroy(struct ttm_tt *ttm)
286{
287 struct qxl_ttm_tt *gtt = (void *)ttm;
288
289 ttm_dma_tt_fini(&gtt->ttm);
290 kfree(gtt);
291}
292
293static struct ttm_backend_func qxl_backend_func = {
294 .bind = &qxl_ttm_backend_bind,
295 .unbind = &qxl_ttm_backend_unbind,
296 .destroy = &qxl_ttm_backend_destroy,
297};
298
299static int qxl_ttm_tt_populate(struct ttm_tt *ttm)
300{
301 int r;
302
303 if (ttm->state != tt_unpopulated)
304 return 0;
305
306 r = ttm_pool_populate(ttm);
307 if (r)
308 return r;
309
310 return 0;
311}
312
313static void qxl_ttm_tt_unpopulate(struct ttm_tt *ttm)
314{
315 ttm_pool_unpopulate(ttm);
316}
317
318static struct ttm_tt *qxl_ttm_tt_create(struct ttm_bo_device *bdev,
319 unsigned long size, uint32_t page_flags,
320 struct page *dummy_read_page)
321{
322 struct qxl_device *qdev;
323 struct qxl_ttm_tt *gtt;
324
325 qdev = qxl_get_qdev(bdev);
326 gtt = kzalloc(sizeof(struct qxl_ttm_tt), GFP_KERNEL);
327 if (gtt == NULL)
328 return NULL;
329 gtt->ttm.ttm.func = &qxl_backend_func;
330 gtt->qdev = qdev;
331 if (ttm_dma_tt_init(&gtt->ttm, bdev, size, page_flags,
332 dummy_read_page)) {
333 kfree(gtt);
334 return NULL;
335 }
336 return &gtt->ttm.ttm;
337}
338
339static void qxl_move_null(struct ttm_buffer_object *bo,
340 struct ttm_mem_reg *new_mem)
341{
342 struct ttm_mem_reg *old_mem = &bo->mem;
343
344 BUG_ON(old_mem->mm_node != NULL);
345 *old_mem = *new_mem;
346 new_mem->mm_node = NULL;
347}
348
349static int qxl_bo_move(struct ttm_buffer_object *bo,
350 bool evict, bool interruptible,
351 bool no_wait_gpu,
352 struct ttm_mem_reg *new_mem)
353{
354 struct ttm_mem_reg *old_mem = &bo->mem;
355 if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
356 qxl_move_null(bo, new_mem);
357 return 0;
358 }
359 return ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
360}
361
362
363static int qxl_sync_obj_wait(void *sync_obj,
364 bool lazy, bool interruptible)
365{
366 struct qxl_fence *qfence = (struct qxl_fence *)sync_obj;
367 int count = 0, sc = 0;
368 struct qxl_bo *bo = container_of(qfence, struct qxl_bo, fence);
369
370 if (qfence->num_active_releases == 0)
371 return 0;
372
373retry:
374 if (sc == 0) {
375 if (bo->type == QXL_GEM_DOMAIN_SURFACE)
376 qxl_update_surface(qfence->qdev, bo);
377 } else if (sc >= 1) {
378 qxl_io_notify_oom(qfence->qdev);
379 }
380
381 sc++;
382
383 for (count = 0; count < 10; count++) {
384 bool ret;
385 ret = qxl_queue_garbage_collect(qfence->qdev, true);
386 if (ret == false)
387 break;
388
389 if (qfence->num_active_releases == 0)
390 return 0;
391 }
392
393 if (qfence->num_active_releases) {
394 bool have_drawable_releases = false;
395 void **slot;
396 struct radix_tree_iter iter;
397 int release_id;
398
399 radix_tree_for_each_slot(slot, &qfence->tree, &iter, 0) {
400 struct qxl_release *release;
401
402 release_id = iter.index;
403 release = qxl_release_from_id_locked(qfence->qdev, release_id);
404 if (release == NULL)
405 continue;
406
407 if (release->type == QXL_RELEASE_DRAWABLE)
408 have_drawable_releases = true;
409 }
410
411 qxl_queue_garbage_collect(qfence->qdev, true);
412
413 if (have_drawable_releases || sc < 4) {
414 if (sc > 2)
415 /* back off */
416 usleep_range(500, 1000);
417 if (have_drawable_releases && sc > 300) {
418 WARN(1, "sync obj %d still has outstanding releases %d %d %d %ld %d\n", sc, bo->surface_id, bo->is_primary, bo->pin_count, (unsigned long)bo->gem_base.size, qfence->num_active_releases);
419 return -EBUSY;
420 }
421 goto retry;
422 }
423 }
424 return 0;
425}
426
427static int qxl_sync_obj_flush(void *sync_obj)
428{
429 return 0;
430}
431
432static void qxl_sync_obj_unref(void **sync_obj)
433{
434}
435
436static void *qxl_sync_obj_ref(void *sync_obj)
437{
438 return sync_obj;
439}
440
441static bool qxl_sync_obj_signaled(void *sync_obj)
442{
443 struct qxl_fence *qfence = (struct qxl_fence *)sync_obj;
444 return (qfence->num_active_releases == 0);
445}
446
447static void qxl_bo_move_notify(struct ttm_buffer_object *bo,
448 struct ttm_mem_reg *new_mem)
449{
450 struct qxl_bo *qbo;
451 struct qxl_device *qdev;
452
453 if (!qxl_ttm_bo_is_qxl_bo(bo))
454 return;
455 qbo = container_of(bo, struct qxl_bo, tbo);
456 qdev = qbo->gem_base.dev->dev_private;
457
458 if (bo->mem.mem_type == TTM_PL_PRIV0 && qbo->surface_id)
459 qxl_surface_evict(qdev, qbo, new_mem ? true : false);
460}
461
462static struct ttm_bo_driver qxl_bo_driver = {
463 .ttm_tt_create = &qxl_ttm_tt_create,
464 .ttm_tt_populate = &qxl_ttm_tt_populate,
465 .ttm_tt_unpopulate = &qxl_ttm_tt_unpopulate,
466 .invalidate_caches = &qxl_invalidate_caches,
467 .init_mem_type = &qxl_init_mem_type,
468 .evict_flags = &qxl_evict_flags,
469 .move = &qxl_bo_move,
470 .verify_access = &qxl_verify_access,
471 .io_mem_reserve = &qxl_ttm_io_mem_reserve,
472 .io_mem_free = &qxl_ttm_io_mem_free,
473 .sync_obj_signaled = &qxl_sync_obj_signaled,
474 .sync_obj_wait = &qxl_sync_obj_wait,
475 .sync_obj_flush = &qxl_sync_obj_flush,
476 .sync_obj_unref = &qxl_sync_obj_unref,
477 .sync_obj_ref = &qxl_sync_obj_ref,
478 .move_notify = &qxl_bo_move_notify,
479};
480
481
482
483int qxl_ttm_init(struct qxl_device *qdev)
484{
485 int r;
486 int num_io_pages; /* != rom->num_io_pages, we include surface0 */
487
488 r = qxl_ttm_global_init(qdev);
489 if (r)
490 return r;
491 /* No others user of address space so set it to 0 */
492 r = ttm_bo_device_init(&qdev->mman.bdev,
493 qdev->mman.bo_global_ref.ref.object,
494 &qxl_bo_driver, DRM_FILE_PAGE_OFFSET, 0);
495 if (r) {
496 DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
497 return r;
498 }
499 /* NOTE: this includes the framebuffer (aka surface 0) */
500 num_io_pages = qdev->rom->ram_header_offset / PAGE_SIZE;
501 r = ttm_bo_init_mm(&qdev->mman.bdev, TTM_PL_VRAM,
502 num_io_pages);
503 if (r) {
504 DRM_ERROR("Failed initializing VRAM heap.\n");
505 return r;
506 }
507 r = ttm_bo_init_mm(&qdev->mman.bdev, TTM_PL_PRIV0,
508 qdev->surfaceram_size / PAGE_SIZE);
509 if (r) {
510 DRM_ERROR("Failed initializing Surfaces heap.\n");
511 return r;
512 }
513 DRM_INFO("qxl: %uM of VRAM memory size\n",
514 (unsigned)qdev->vram_size / (1024 * 1024));
515 DRM_INFO("qxl: %luM of IO pages memory ready (VRAM domain)\n",
516 ((unsigned)num_io_pages * PAGE_SIZE) / (1024 * 1024));
517 if (unlikely(qdev->mman.bdev.dev_mapping == NULL))
518 qdev->mman.bdev.dev_mapping = qdev->ddev->dev_mapping;
519 r = qxl_ttm_debugfs_init(qdev);
520 if (r) {
521 DRM_ERROR("Failed to init debugfs\n");
522 return r;
523 }
524 return 0;
525}
526
527void qxl_ttm_fini(struct qxl_device *qdev)
528{
529 ttm_bo_clean_mm(&qdev->mman.bdev, TTM_PL_VRAM);
530 ttm_bo_clean_mm(&qdev->mman.bdev, TTM_PL_PRIV0);
531 ttm_bo_device_release(&qdev->mman.bdev);
532 qxl_ttm_global_fini(qdev);
533 DRM_INFO("qxl: ttm finalized\n");
534}
535
536
537#define QXL_DEBUGFS_MEM_TYPES 2
538
539#if defined(CONFIG_DEBUG_FS)
540static int qxl_mm_dump_table(struct seq_file *m, void *data)
541{
542 struct drm_info_node *node = (struct drm_info_node *)m->private;
543 struct drm_mm *mm = (struct drm_mm *)node->info_ent->data;
544 struct drm_device *dev = node->minor->dev;
545 struct qxl_device *rdev = dev->dev_private;
546 int ret;
547 struct ttm_bo_global *glob = rdev->mman.bdev.glob;
548
549 spin_lock(&glob->lru_lock);
550 ret = drm_mm_dump_table(m, mm);
551 spin_unlock(&glob->lru_lock);
552 return ret;
553}
554#endif
555
556static int qxl_ttm_debugfs_init(struct qxl_device *qdev)
557{
558#if defined(CONFIG_DEBUG_FS)
559 static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES];
560 static char qxl_mem_types_names[QXL_DEBUGFS_MEM_TYPES][32];
561 unsigned i;
562
563 for (i = 0; i < QXL_DEBUGFS_MEM_TYPES; i++) {
564 if (i == 0)
565 sprintf(qxl_mem_types_names[i], "qxl_mem_mm");
566 else
567 sprintf(qxl_mem_types_names[i], "qxl_surf_mm");
568 qxl_mem_types_list[i].name = qxl_mem_types_names[i];
569 qxl_mem_types_list[i].show = &qxl_mm_dump_table;
570 qxl_mem_types_list[i].driver_features = 0;
571 if (i == 0)
572 qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
573 else
574 qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv;
575
576 }
577 return qxl_debugfs_add_files(qdev, qxl_mem_types_list, i);
578#else
579 return 0;
580#endif
581}
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index bf172522ea68..86c5e3611892 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -76,7 +76,7 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
76 evergreen.o evergreen_cs.o evergreen_blit_shaders.o evergreen_blit_kms.o \ 76 evergreen.o evergreen_cs.o evergreen_blit_shaders.o evergreen_blit_kms.o \
77 evergreen_hdmi.o radeon_trace_points.o ni.o cayman_blit_shaders.o \ 77 evergreen_hdmi.o radeon_trace_points.o ni.o cayman_blit_shaders.o \
78 atombios_encoders.o radeon_semaphore.o radeon_sa.o atombios_i2c.o si.o \ 78 atombios_encoders.o radeon_semaphore.o radeon_sa.o atombios_i2c.o si.o \
79 si_blit_shaders.o radeon_prime.o 79 si_blit_shaders.o radeon_prime.o radeon_uvd.o
80 80
81radeon-$(CONFIG_COMPAT) += radeon_ioc32.o 81radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
82radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o 82radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index 46a9c3772850..fb441a790f3d 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -1394,10 +1394,10 @@ int atom_allocate_fb_scratch(struct atom_context *ctx)
1394 firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset); 1394 firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset);
1395 1395
1396 DRM_DEBUG("atom firmware requested %08x %dkb\n", 1396 DRM_DEBUG("atom firmware requested %08x %dkb\n",
1397 firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware, 1397 le32_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware),
1398 firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb); 1398 le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb));
1399 1399
1400 usage_bytes = firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb * 1024; 1400 usage_bytes = le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb) * 1024;
1401 } 1401 }
1402 ctx->scratch_size_bytes = 0; 1402 ctx->scratch_size_bytes = 0;
1403 if (usage_bytes == 0) 1403 if (usage_bytes == 0)
diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h
index 4b04ba3828e8..0ee573743de9 100644
--- a/drivers/gpu/drm/radeon/atombios.h
+++ b/drivers/gpu/drm/radeon/atombios.h
@@ -458,6 +458,7 @@ typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V3
458 union 458 union
459 { 459 {
460 ATOM_COMPUTE_CLOCK_FREQ ulClock; //Input Parameter 460 ATOM_COMPUTE_CLOCK_FREQ ulClock; //Input Parameter
461 ULONG ulClockParams; //ULONG access for BE
461 ATOM_S_MPLL_FB_DIVIDER ulFbDiv; //Output Parameter 462 ATOM_S_MPLL_FB_DIVIDER ulFbDiv; //Output Parameter
462 }; 463 };
463 UCHAR ucRefDiv; //Output Parameter 464 UCHAR ucRefDiv; //Output Parameter
@@ -490,6 +491,7 @@ typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V5
490 union 491 union
491 { 492 {
492 ATOM_COMPUTE_CLOCK_FREQ ulClock; //Input Parameter 493 ATOM_COMPUTE_CLOCK_FREQ ulClock; //Input Parameter
494 ULONG ulClockParams; //ULONG access for BE
493 ATOM_S_MPLL_FB_DIVIDER ulFbDiv; //Output Parameter 495 ATOM_S_MPLL_FB_DIVIDER ulFbDiv; //Output Parameter
494 }; 496 };
495 UCHAR ucRefDiv; //Output Parameter 497 UCHAR ucRefDiv; //Output Parameter
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 21a892c6ab9c..6d6fdb3ba0d0 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -557,6 +557,9 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
557 /* use frac fb div on APUs */ 557 /* use frac fb div on APUs */
558 if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev)) 558 if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev))
559 radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV; 559 radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
560 /* use frac fb div on RS780/RS880 */
561 if ((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880))
562 radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
560 if (ASIC_IS_DCE32(rdev) && mode->clock > 165000) 563 if (ASIC_IS_DCE32(rdev) && mode->clock > 165000)
561 radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV; 564 radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
562 } else { 565 } else {
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 4552d4aff317..44a7da66e081 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -2150,13 +2150,10 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
2150 atombios_apply_encoder_quirks(encoder, adjusted_mode); 2150 atombios_apply_encoder_quirks(encoder, adjusted_mode);
2151 2151
2152 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) { 2152 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
2153 r600_hdmi_enable(encoder); 2153 if (rdev->asic->display.hdmi_enable)
2154 if (ASIC_IS_DCE6(rdev)) 2154 radeon_hdmi_enable(rdev, encoder, true);
2155 ; /* TODO (use pointers instead of if-s?) */ 2155 if (rdev->asic->display.hdmi_setmode)
2156 else if (ASIC_IS_DCE4(rdev)) 2156 radeon_hdmi_setmode(rdev, encoder, adjusted_mode);
2157 evergreen_hdmi_setmode(encoder, adjusted_mode);
2158 else
2159 r600_hdmi_setmode(encoder, adjusted_mode);
2160 } 2157 }
2161} 2158}
2162 2159
@@ -2413,8 +2410,10 @@ static void radeon_atom_encoder_disable(struct drm_encoder *encoder)
2413 2410
2414disable_done: 2411disable_done:
2415 if (radeon_encoder_is_digital(encoder)) { 2412 if (radeon_encoder_is_digital(encoder)) {
2416 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) 2413 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
2417 r600_hdmi_disable(encoder); 2414 if (rdev->asic->display.hdmi_enable)
2415 radeon_hdmi_enable(rdev, encoder, false);
2416 }
2418 dig = radeon_encoder->enc_priv; 2417 dig = radeon_encoder->enc_priv;
2419 dig->dig_encoder = -1; 2418 dig->dig_encoder = -1;
2420 } 2419 }
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 305a657bf215..105bafb6c29d 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -53,6 +53,864 @@ void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
53extern void cayman_cp_int_cntl_setup(struct radeon_device *rdev, 53extern void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
54 int ring, u32 cp_int_cntl); 54 int ring, u32 cp_int_cntl);
55 55
56static const u32 evergreen_golden_registers[] =
57{
58 0x3f90, 0xffff0000, 0xff000000,
59 0x9148, 0xffff0000, 0xff000000,
60 0x3f94, 0xffff0000, 0xff000000,
61 0x914c, 0xffff0000, 0xff000000,
62 0x9b7c, 0xffffffff, 0x00000000,
63 0x8a14, 0xffffffff, 0x00000007,
64 0x8b10, 0xffffffff, 0x00000000,
65 0x960c, 0xffffffff, 0x54763210,
66 0x88c4, 0xffffffff, 0x000000c2,
67 0x88d4, 0xffffffff, 0x00000010,
68 0x8974, 0xffffffff, 0x00000000,
69 0xc78, 0x00000080, 0x00000080,
70 0x5eb4, 0xffffffff, 0x00000002,
71 0x5e78, 0xffffffff, 0x001000f0,
72 0x6104, 0x01000300, 0x00000000,
73 0x5bc0, 0x00300000, 0x00000000,
74 0x7030, 0xffffffff, 0x00000011,
75 0x7c30, 0xffffffff, 0x00000011,
76 0x10830, 0xffffffff, 0x00000011,
77 0x11430, 0xffffffff, 0x00000011,
78 0x12030, 0xffffffff, 0x00000011,
79 0x12c30, 0xffffffff, 0x00000011,
80 0xd02c, 0xffffffff, 0x08421000,
81 0x240c, 0xffffffff, 0x00000380,
82 0x8b24, 0xffffffff, 0x00ff0fff,
83 0x28a4c, 0x06000000, 0x06000000,
84 0x10c, 0x00000001, 0x00000001,
85 0x8d00, 0xffffffff, 0x100e4848,
86 0x8d04, 0xffffffff, 0x00164745,
87 0x8c00, 0xffffffff, 0xe4000003,
88 0x8c04, 0xffffffff, 0x40600060,
89 0x8c08, 0xffffffff, 0x001c001c,
90 0x8cf0, 0xffffffff, 0x08e00620,
91 0x8c20, 0xffffffff, 0x00800080,
92 0x8c24, 0xffffffff, 0x00800080,
93 0x8c18, 0xffffffff, 0x20202078,
94 0x8c1c, 0xffffffff, 0x00001010,
95 0x28350, 0xffffffff, 0x00000000,
96 0xa008, 0xffffffff, 0x00010000,
97 0x5cc, 0xffffffff, 0x00000001,
98 0x9508, 0xffffffff, 0x00000002,
99 0x913c, 0x0000000f, 0x0000000a
100};
101
102static const u32 evergreen_golden_registers2[] =
103{
104 0x2f4c, 0xffffffff, 0x00000000,
105 0x54f4, 0xffffffff, 0x00000000,
106 0x54f0, 0xffffffff, 0x00000000,
107 0x5498, 0xffffffff, 0x00000000,
108 0x549c, 0xffffffff, 0x00000000,
109 0x5494, 0xffffffff, 0x00000000,
110 0x53cc, 0xffffffff, 0x00000000,
111 0x53c8, 0xffffffff, 0x00000000,
112 0x53c4, 0xffffffff, 0x00000000,
113 0x53c0, 0xffffffff, 0x00000000,
114 0x53bc, 0xffffffff, 0x00000000,
115 0x53b8, 0xffffffff, 0x00000000,
116 0x53b4, 0xffffffff, 0x00000000,
117 0x53b0, 0xffffffff, 0x00000000
118};
119
120static const u32 cypress_mgcg_init[] =
121{
122 0x802c, 0xffffffff, 0xc0000000,
123 0x5448, 0xffffffff, 0x00000100,
124 0x55e4, 0xffffffff, 0x00000100,
125 0x160c, 0xffffffff, 0x00000100,
126 0x5644, 0xffffffff, 0x00000100,
127 0xc164, 0xffffffff, 0x00000100,
128 0x8a18, 0xffffffff, 0x00000100,
129 0x897c, 0xffffffff, 0x06000100,
130 0x8b28, 0xffffffff, 0x00000100,
131 0x9144, 0xffffffff, 0x00000100,
132 0x9a60, 0xffffffff, 0x00000100,
133 0x9868, 0xffffffff, 0x00000100,
134 0x8d58, 0xffffffff, 0x00000100,
135 0x9510, 0xffffffff, 0x00000100,
136 0x949c, 0xffffffff, 0x00000100,
137 0x9654, 0xffffffff, 0x00000100,
138 0x9030, 0xffffffff, 0x00000100,
139 0x9034, 0xffffffff, 0x00000100,
140 0x9038, 0xffffffff, 0x00000100,
141 0x903c, 0xffffffff, 0x00000100,
142 0x9040, 0xffffffff, 0x00000100,
143 0xa200, 0xffffffff, 0x00000100,
144 0xa204, 0xffffffff, 0x00000100,
145 0xa208, 0xffffffff, 0x00000100,
146 0xa20c, 0xffffffff, 0x00000100,
147 0x971c, 0xffffffff, 0x00000100,
148 0x977c, 0xffffffff, 0x00000100,
149 0x3f80, 0xffffffff, 0x00000100,
150 0xa210, 0xffffffff, 0x00000100,
151 0xa214, 0xffffffff, 0x00000100,
152 0x4d8, 0xffffffff, 0x00000100,
153 0x9784, 0xffffffff, 0x00000100,
154 0x9698, 0xffffffff, 0x00000100,
155 0x4d4, 0xffffffff, 0x00000200,
156 0x30cc, 0xffffffff, 0x00000100,
157 0xd0c0, 0xffffffff, 0xff000100,
158 0x802c, 0xffffffff, 0x40000000,
159 0x915c, 0xffffffff, 0x00010000,
160 0x9160, 0xffffffff, 0x00030002,
161 0x9178, 0xffffffff, 0x00070000,
162 0x917c, 0xffffffff, 0x00030002,
163 0x9180, 0xffffffff, 0x00050004,
164 0x918c, 0xffffffff, 0x00010006,
165 0x9190, 0xffffffff, 0x00090008,
166 0x9194, 0xffffffff, 0x00070000,
167 0x9198, 0xffffffff, 0x00030002,
168 0x919c, 0xffffffff, 0x00050004,
169 0x91a8, 0xffffffff, 0x00010006,
170 0x91ac, 0xffffffff, 0x00090008,
171 0x91b0, 0xffffffff, 0x00070000,
172 0x91b4, 0xffffffff, 0x00030002,
173 0x91b8, 0xffffffff, 0x00050004,
174 0x91c4, 0xffffffff, 0x00010006,
175 0x91c8, 0xffffffff, 0x00090008,
176 0x91cc, 0xffffffff, 0x00070000,
177 0x91d0, 0xffffffff, 0x00030002,
178 0x91d4, 0xffffffff, 0x00050004,
179 0x91e0, 0xffffffff, 0x00010006,
180 0x91e4, 0xffffffff, 0x00090008,
181 0x91e8, 0xffffffff, 0x00000000,
182 0x91ec, 0xffffffff, 0x00070000,
183 0x91f0, 0xffffffff, 0x00030002,
184 0x91f4, 0xffffffff, 0x00050004,
185 0x9200, 0xffffffff, 0x00010006,
186 0x9204, 0xffffffff, 0x00090008,
187 0x9208, 0xffffffff, 0x00070000,
188 0x920c, 0xffffffff, 0x00030002,
189 0x9210, 0xffffffff, 0x00050004,
190 0x921c, 0xffffffff, 0x00010006,
191 0x9220, 0xffffffff, 0x00090008,
192 0x9224, 0xffffffff, 0x00070000,
193 0x9228, 0xffffffff, 0x00030002,
194 0x922c, 0xffffffff, 0x00050004,
195 0x9238, 0xffffffff, 0x00010006,
196 0x923c, 0xffffffff, 0x00090008,
197 0x9240, 0xffffffff, 0x00070000,
198 0x9244, 0xffffffff, 0x00030002,
199 0x9248, 0xffffffff, 0x00050004,
200 0x9254, 0xffffffff, 0x00010006,
201 0x9258, 0xffffffff, 0x00090008,
202 0x925c, 0xffffffff, 0x00070000,
203 0x9260, 0xffffffff, 0x00030002,
204 0x9264, 0xffffffff, 0x00050004,
205 0x9270, 0xffffffff, 0x00010006,
206 0x9274, 0xffffffff, 0x00090008,
207 0x9278, 0xffffffff, 0x00070000,
208 0x927c, 0xffffffff, 0x00030002,
209 0x9280, 0xffffffff, 0x00050004,
210 0x928c, 0xffffffff, 0x00010006,
211 0x9290, 0xffffffff, 0x00090008,
212 0x9294, 0xffffffff, 0x00000000,
213 0x929c, 0xffffffff, 0x00000001,
214 0x802c, 0xffffffff, 0x40010000,
215 0x915c, 0xffffffff, 0x00010000,
216 0x9160, 0xffffffff, 0x00030002,
217 0x9178, 0xffffffff, 0x00070000,
218 0x917c, 0xffffffff, 0x00030002,
219 0x9180, 0xffffffff, 0x00050004,
220 0x918c, 0xffffffff, 0x00010006,
221 0x9190, 0xffffffff, 0x00090008,
222 0x9194, 0xffffffff, 0x00070000,
223 0x9198, 0xffffffff, 0x00030002,
224 0x919c, 0xffffffff, 0x00050004,
225 0x91a8, 0xffffffff, 0x00010006,
226 0x91ac, 0xffffffff, 0x00090008,
227 0x91b0, 0xffffffff, 0x00070000,
228 0x91b4, 0xffffffff, 0x00030002,
229 0x91b8, 0xffffffff, 0x00050004,
230 0x91c4, 0xffffffff, 0x00010006,
231 0x91c8, 0xffffffff, 0x00090008,
232 0x91cc, 0xffffffff, 0x00070000,
233 0x91d0, 0xffffffff, 0x00030002,
234 0x91d4, 0xffffffff, 0x00050004,
235 0x91e0, 0xffffffff, 0x00010006,
236 0x91e4, 0xffffffff, 0x00090008,
237 0x91e8, 0xffffffff, 0x00000000,
238 0x91ec, 0xffffffff, 0x00070000,
239 0x91f0, 0xffffffff, 0x00030002,
240 0x91f4, 0xffffffff, 0x00050004,
241 0x9200, 0xffffffff, 0x00010006,
242 0x9204, 0xffffffff, 0x00090008,
243 0x9208, 0xffffffff, 0x00070000,
244 0x920c, 0xffffffff, 0x00030002,
245 0x9210, 0xffffffff, 0x00050004,
246 0x921c, 0xffffffff, 0x00010006,
247 0x9220, 0xffffffff, 0x00090008,
248 0x9224, 0xffffffff, 0x00070000,
249 0x9228, 0xffffffff, 0x00030002,
250 0x922c, 0xffffffff, 0x00050004,
251 0x9238, 0xffffffff, 0x00010006,
252 0x923c, 0xffffffff, 0x00090008,
253 0x9240, 0xffffffff, 0x00070000,
254 0x9244, 0xffffffff, 0x00030002,
255 0x9248, 0xffffffff, 0x00050004,
256 0x9254, 0xffffffff, 0x00010006,
257 0x9258, 0xffffffff, 0x00090008,
258 0x925c, 0xffffffff, 0x00070000,
259 0x9260, 0xffffffff, 0x00030002,
260 0x9264, 0xffffffff, 0x00050004,
261 0x9270, 0xffffffff, 0x00010006,
262 0x9274, 0xffffffff, 0x00090008,
263 0x9278, 0xffffffff, 0x00070000,
264 0x927c, 0xffffffff, 0x00030002,
265 0x9280, 0xffffffff, 0x00050004,
266 0x928c, 0xffffffff, 0x00010006,
267 0x9290, 0xffffffff, 0x00090008,
268 0x9294, 0xffffffff, 0x00000000,
269 0x929c, 0xffffffff, 0x00000001,
270 0x802c, 0xffffffff, 0xc0000000
271};
272
273static const u32 redwood_mgcg_init[] =
274{
275 0x802c, 0xffffffff, 0xc0000000,
276 0x5448, 0xffffffff, 0x00000100,
277 0x55e4, 0xffffffff, 0x00000100,
278 0x160c, 0xffffffff, 0x00000100,
279 0x5644, 0xffffffff, 0x00000100,
280 0xc164, 0xffffffff, 0x00000100,
281 0x8a18, 0xffffffff, 0x00000100,
282 0x897c, 0xffffffff, 0x06000100,
283 0x8b28, 0xffffffff, 0x00000100,
284 0x9144, 0xffffffff, 0x00000100,
285 0x9a60, 0xffffffff, 0x00000100,
286 0x9868, 0xffffffff, 0x00000100,
287 0x8d58, 0xffffffff, 0x00000100,
288 0x9510, 0xffffffff, 0x00000100,
289 0x949c, 0xffffffff, 0x00000100,
290 0x9654, 0xffffffff, 0x00000100,
291 0x9030, 0xffffffff, 0x00000100,
292 0x9034, 0xffffffff, 0x00000100,
293 0x9038, 0xffffffff, 0x00000100,
294 0x903c, 0xffffffff, 0x00000100,
295 0x9040, 0xffffffff, 0x00000100,
296 0xa200, 0xffffffff, 0x00000100,
297 0xa204, 0xffffffff, 0x00000100,
298 0xa208, 0xffffffff, 0x00000100,
299 0xa20c, 0xffffffff, 0x00000100,
300 0x971c, 0xffffffff, 0x00000100,
301 0x977c, 0xffffffff, 0x00000100,
302 0x3f80, 0xffffffff, 0x00000100,
303 0xa210, 0xffffffff, 0x00000100,
304 0xa214, 0xffffffff, 0x00000100,
305 0x4d8, 0xffffffff, 0x00000100,
306 0x9784, 0xffffffff, 0x00000100,
307 0x9698, 0xffffffff, 0x00000100,
308 0x4d4, 0xffffffff, 0x00000200,
309 0x30cc, 0xffffffff, 0x00000100,
310 0xd0c0, 0xffffffff, 0xff000100,
311 0x802c, 0xffffffff, 0x40000000,
312 0x915c, 0xffffffff, 0x00010000,
313 0x9160, 0xffffffff, 0x00030002,
314 0x9178, 0xffffffff, 0x00070000,
315 0x917c, 0xffffffff, 0x00030002,
316 0x9180, 0xffffffff, 0x00050004,
317 0x918c, 0xffffffff, 0x00010006,
318 0x9190, 0xffffffff, 0x00090008,
319 0x9194, 0xffffffff, 0x00070000,
320 0x9198, 0xffffffff, 0x00030002,
321 0x919c, 0xffffffff, 0x00050004,
322 0x91a8, 0xffffffff, 0x00010006,
323 0x91ac, 0xffffffff, 0x00090008,
324 0x91b0, 0xffffffff, 0x00070000,
325 0x91b4, 0xffffffff, 0x00030002,
326 0x91b8, 0xffffffff, 0x00050004,
327 0x91c4, 0xffffffff, 0x00010006,
328 0x91c8, 0xffffffff, 0x00090008,
329 0x91cc, 0xffffffff, 0x00070000,
330 0x91d0, 0xffffffff, 0x00030002,
331 0x91d4, 0xffffffff, 0x00050004,
332 0x91e0, 0xffffffff, 0x00010006,
333 0x91e4, 0xffffffff, 0x00090008,
334 0x91e8, 0xffffffff, 0x00000000,
335 0x91ec, 0xffffffff, 0x00070000,
336 0x91f0, 0xffffffff, 0x00030002,
337 0x91f4, 0xffffffff, 0x00050004,
338 0x9200, 0xffffffff, 0x00010006,
339 0x9204, 0xffffffff, 0x00090008,
340 0x9294, 0xffffffff, 0x00000000,
341 0x929c, 0xffffffff, 0x00000001,
342 0x802c, 0xffffffff, 0xc0000000
343};
344
345static const u32 cedar_golden_registers[] =
346{
347 0x3f90, 0xffff0000, 0xff000000,
348 0x9148, 0xffff0000, 0xff000000,
349 0x3f94, 0xffff0000, 0xff000000,
350 0x914c, 0xffff0000, 0xff000000,
351 0x9b7c, 0xffffffff, 0x00000000,
352 0x8a14, 0xffffffff, 0x00000007,
353 0x8b10, 0xffffffff, 0x00000000,
354 0x960c, 0xffffffff, 0x54763210,
355 0x88c4, 0xffffffff, 0x000000c2,
356 0x88d4, 0xffffffff, 0x00000000,
357 0x8974, 0xffffffff, 0x00000000,
358 0xc78, 0x00000080, 0x00000080,
359 0x5eb4, 0xffffffff, 0x00000002,
360 0x5e78, 0xffffffff, 0x001000f0,
361 0x6104, 0x01000300, 0x00000000,
362 0x5bc0, 0x00300000, 0x00000000,
363 0x7030, 0xffffffff, 0x00000011,
364 0x7c30, 0xffffffff, 0x00000011,
365 0x10830, 0xffffffff, 0x00000011,
366 0x11430, 0xffffffff, 0x00000011,
367 0xd02c, 0xffffffff, 0x08421000,
368 0x240c, 0xffffffff, 0x00000380,
369 0x8b24, 0xffffffff, 0x00ff0fff,
370 0x28a4c, 0x06000000, 0x06000000,
371 0x10c, 0x00000001, 0x00000001,
372 0x8d00, 0xffffffff, 0x100e4848,
373 0x8d04, 0xffffffff, 0x00164745,
374 0x8c00, 0xffffffff, 0xe4000003,
375 0x8c04, 0xffffffff, 0x40600060,
376 0x8c08, 0xffffffff, 0x001c001c,
377 0x8cf0, 0xffffffff, 0x08e00410,
378 0x8c20, 0xffffffff, 0x00800080,
379 0x8c24, 0xffffffff, 0x00800080,
380 0x8c18, 0xffffffff, 0x20202078,
381 0x8c1c, 0xffffffff, 0x00001010,
382 0x28350, 0xffffffff, 0x00000000,
383 0xa008, 0xffffffff, 0x00010000,
384 0x5cc, 0xffffffff, 0x00000001,
385 0x9508, 0xffffffff, 0x00000002
386};
387
388static const u32 cedar_mgcg_init[] =
389{
390 0x802c, 0xffffffff, 0xc0000000,
391 0x5448, 0xffffffff, 0x00000100,
392 0x55e4, 0xffffffff, 0x00000100,
393 0x160c, 0xffffffff, 0x00000100,
394 0x5644, 0xffffffff, 0x00000100,
395 0xc164, 0xffffffff, 0x00000100,
396 0x8a18, 0xffffffff, 0x00000100,
397 0x897c, 0xffffffff, 0x06000100,
398 0x8b28, 0xffffffff, 0x00000100,
399 0x9144, 0xffffffff, 0x00000100,
400 0x9a60, 0xffffffff, 0x00000100,
401 0x9868, 0xffffffff, 0x00000100,
402 0x8d58, 0xffffffff, 0x00000100,
403 0x9510, 0xffffffff, 0x00000100,
404 0x949c, 0xffffffff, 0x00000100,
405 0x9654, 0xffffffff, 0x00000100,
406 0x9030, 0xffffffff, 0x00000100,
407 0x9034, 0xffffffff, 0x00000100,
408 0x9038, 0xffffffff, 0x00000100,
409 0x903c, 0xffffffff, 0x00000100,
410 0x9040, 0xffffffff, 0x00000100,
411 0xa200, 0xffffffff, 0x00000100,
412 0xa204, 0xffffffff, 0x00000100,
413 0xa208, 0xffffffff, 0x00000100,
414 0xa20c, 0xffffffff, 0x00000100,
415 0x971c, 0xffffffff, 0x00000100,
416 0x977c, 0xffffffff, 0x00000100,
417 0x3f80, 0xffffffff, 0x00000100,
418 0xa210, 0xffffffff, 0x00000100,
419 0xa214, 0xffffffff, 0x00000100,
420 0x4d8, 0xffffffff, 0x00000100,
421 0x9784, 0xffffffff, 0x00000100,
422 0x9698, 0xffffffff, 0x00000100,
423 0x4d4, 0xffffffff, 0x00000200,
424 0x30cc, 0xffffffff, 0x00000100,
425 0xd0c0, 0xffffffff, 0xff000100,
426 0x802c, 0xffffffff, 0x40000000,
427 0x915c, 0xffffffff, 0x00010000,
428 0x9178, 0xffffffff, 0x00050000,
429 0x917c, 0xffffffff, 0x00030002,
430 0x918c, 0xffffffff, 0x00010004,
431 0x9190, 0xffffffff, 0x00070006,
432 0x9194, 0xffffffff, 0x00050000,
433 0x9198, 0xffffffff, 0x00030002,
434 0x91a8, 0xffffffff, 0x00010004,
435 0x91ac, 0xffffffff, 0x00070006,
436 0x91e8, 0xffffffff, 0x00000000,
437 0x9294, 0xffffffff, 0x00000000,
438 0x929c, 0xffffffff, 0x00000001,
439 0x802c, 0xffffffff, 0xc0000000
440};
441
442static const u32 juniper_mgcg_init[] =
443{
444 0x802c, 0xffffffff, 0xc0000000,
445 0x5448, 0xffffffff, 0x00000100,
446 0x55e4, 0xffffffff, 0x00000100,
447 0x160c, 0xffffffff, 0x00000100,
448 0x5644, 0xffffffff, 0x00000100,
449 0xc164, 0xffffffff, 0x00000100,
450 0x8a18, 0xffffffff, 0x00000100,
451 0x897c, 0xffffffff, 0x06000100,
452 0x8b28, 0xffffffff, 0x00000100,
453 0x9144, 0xffffffff, 0x00000100,
454 0x9a60, 0xffffffff, 0x00000100,
455 0x9868, 0xffffffff, 0x00000100,
456 0x8d58, 0xffffffff, 0x00000100,
457 0x9510, 0xffffffff, 0x00000100,
458 0x949c, 0xffffffff, 0x00000100,
459 0x9654, 0xffffffff, 0x00000100,
460 0x9030, 0xffffffff, 0x00000100,
461 0x9034, 0xffffffff, 0x00000100,
462 0x9038, 0xffffffff, 0x00000100,
463 0x903c, 0xffffffff, 0x00000100,
464 0x9040, 0xffffffff, 0x00000100,
465 0xa200, 0xffffffff, 0x00000100,
466 0xa204, 0xffffffff, 0x00000100,
467 0xa208, 0xffffffff, 0x00000100,
468 0xa20c, 0xffffffff, 0x00000100,
469 0x971c, 0xffffffff, 0x00000100,
470 0xd0c0, 0xffffffff, 0xff000100,
471 0x802c, 0xffffffff, 0x40000000,
472 0x915c, 0xffffffff, 0x00010000,
473 0x9160, 0xffffffff, 0x00030002,
474 0x9178, 0xffffffff, 0x00070000,
475 0x917c, 0xffffffff, 0x00030002,
476 0x9180, 0xffffffff, 0x00050004,
477 0x918c, 0xffffffff, 0x00010006,
478 0x9190, 0xffffffff, 0x00090008,
479 0x9194, 0xffffffff, 0x00070000,
480 0x9198, 0xffffffff, 0x00030002,
481 0x919c, 0xffffffff, 0x00050004,
482 0x91a8, 0xffffffff, 0x00010006,
483 0x91ac, 0xffffffff, 0x00090008,
484 0x91b0, 0xffffffff, 0x00070000,
485 0x91b4, 0xffffffff, 0x00030002,
486 0x91b8, 0xffffffff, 0x00050004,
487 0x91c4, 0xffffffff, 0x00010006,
488 0x91c8, 0xffffffff, 0x00090008,
489 0x91cc, 0xffffffff, 0x00070000,
490 0x91d0, 0xffffffff, 0x00030002,
491 0x91d4, 0xffffffff, 0x00050004,
492 0x91e0, 0xffffffff, 0x00010006,
493 0x91e4, 0xffffffff, 0x00090008,
494 0x91e8, 0xffffffff, 0x00000000,
495 0x91ec, 0xffffffff, 0x00070000,
496 0x91f0, 0xffffffff, 0x00030002,
497 0x91f4, 0xffffffff, 0x00050004,
498 0x9200, 0xffffffff, 0x00010006,
499 0x9204, 0xffffffff, 0x00090008,
500 0x9208, 0xffffffff, 0x00070000,
501 0x920c, 0xffffffff, 0x00030002,
502 0x9210, 0xffffffff, 0x00050004,
503 0x921c, 0xffffffff, 0x00010006,
504 0x9220, 0xffffffff, 0x00090008,
505 0x9224, 0xffffffff, 0x00070000,
506 0x9228, 0xffffffff, 0x00030002,
507 0x922c, 0xffffffff, 0x00050004,
508 0x9238, 0xffffffff, 0x00010006,
509 0x923c, 0xffffffff, 0x00090008,
510 0x9240, 0xffffffff, 0x00070000,
511 0x9244, 0xffffffff, 0x00030002,
512 0x9248, 0xffffffff, 0x00050004,
513 0x9254, 0xffffffff, 0x00010006,
514 0x9258, 0xffffffff, 0x00090008,
515 0x925c, 0xffffffff, 0x00070000,
516 0x9260, 0xffffffff, 0x00030002,
517 0x9264, 0xffffffff, 0x00050004,
518 0x9270, 0xffffffff, 0x00010006,
519 0x9274, 0xffffffff, 0x00090008,
520 0x9278, 0xffffffff, 0x00070000,
521 0x927c, 0xffffffff, 0x00030002,
522 0x9280, 0xffffffff, 0x00050004,
523 0x928c, 0xffffffff, 0x00010006,
524 0x9290, 0xffffffff, 0x00090008,
525 0x9294, 0xffffffff, 0x00000000,
526 0x929c, 0xffffffff, 0x00000001,
527 0x802c, 0xffffffff, 0xc0000000,
528 0x977c, 0xffffffff, 0x00000100,
529 0x3f80, 0xffffffff, 0x00000100,
530 0xa210, 0xffffffff, 0x00000100,
531 0xa214, 0xffffffff, 0x00000100,
532 0x4d8, 0xffffffff, 0x00000100,
533 0x9784, 0xffffffff, 0x00000100,
534 0x9698, 0xffffffff, 0x00000100,
535 0x4d4, 0xffffffff, 0x00000200,
536 0x30cc, 0xffffffff, 0x00000100,
537 0x802c, 0xffffffff, 0xc0000000
538};
539
540static const u32 supersumo_golden_registers[] =
541{
542 0x5eb4, 0xffffffff, 0x00000002,
543 0x5cc, 0xffffffff, 0x00000001,
544 0x7030, 0xffffffff, 0x00000011,
545 0x7c30, 0xffffffff, 0x00000011,
546 0x6104, 0x01000300, 0x00000000,
547 0x5bc0, 0x00300000, 0x00000000,
548 0x8c04, 0xffffffff, 0x40600060,
549 0x8c08, 0xffffffff, 0x001c001c,
550 0x8c20, 0xffffffff, 0x00800080,
551 0x8c24, 0xffffffff, 0x00800080,
552 0x8c18, 0xffffffff, 0x20202078,
553 0x8c1c, 0xffffffff, 0x00001010,
554 0x918c, 0xffffffff, 0x00010006,
555 0x91a8, 0xffffffff, 0x00010006,
556 0x91c4, 0xffffffff, 0x00010006,
557 0x91e0, 0xffffffff, 0x00010006,
558 0x9200, 0xffffffff, 0x00010006,
559 0x9150, 0xffffffff, 0x6e944040,
560 0x917c, 0xffffffff, 0x00030002,
561 0x9180, 0xffffffff, 0x00050004,
562 0x9198, 0xffffffff, 0x00030002,
563 0x919c, 0xffffffff, 0x00050004,
564 0x91b4, 0xffffffff, 0x00030002,
565 0x91b8, 0xffffffff, 0x00050004,
566 0x91d0, 0xffffffff, 0x00030002,
567 0x91d4, 0xffffffff, 0x00050004,
568 0x91f0, 0xffffffff, 0x00030002,
569 0x91f4, 0xffffffff, 0x00050004,
570 0x915c, 0xffffffff, 0x00010000,
571 0x9160, 0xffffffff, 0x00030002,
572 0x3f90, 0xffff0000, 0xff000000,
573 0x9178, 0xffffffff, 0x00070000,
574 0x9194, 0xffffffff, 0x00070000,
575 0x91b0, 0xffffffff, 0x00070000,
576 0x91cc, 0xffffffff, 0x00070000,
577 0x91ec, 0xffffffff, 0x00070000,
578 0x9148, 0xffff0000, 0xff000000,
579 0x9190, 0xffffffff, 0x00090008,
580 0x91ac, 0xffffffff, 0x00090008,
581 0x91c8, 0xffffffff, 0x00090008,
582 0x91e4, 0xffffffff, 0x00090008,
583 0x9204, 0xffffffff, 0x00090008,
584 0x3f94, 0xffff0000, 0xff000000,
585 0x914c, 0xffff0000, 0xff000000,
586 0x929c, 0xffffffff, 0x00000001,
587 0x8a18, 0xffffffff, 0x00000100,
588 0x8b28, 0xffffffff, 0x00000100,
589 0x9144, 0xffffffff, 0x00000100,
590 0x5644, 0xffffffff, 0x00000100,
591 0x9b7c, 0xffffffff, 0x00000000,
592 0x8030, 0xffffffff, 0x0000100a,
593 0x8a14, 0xffffffff, 0x00000007,
594 0x8b24, 0xffffffff, 0x00ff0fff,
595 0x8b10, 0xffffffff, 0x00000000,
596 0x28a4c, 0x06000000, 0x06000000,
597 0x4d8, 0xffffffff, 0x00000100,
598 0x913c, 0xffff000f, 0x0100000a,
599 0x960c, 0xffffffff, 0x54763210,
600 0x88c4, 0xffffffff, 0x000000c2,
601 0x88d4, 0xffffffff, 0x00000010,
602 0x8974, 0xffffffff, 0x00000000,
603 0xc78, 0x00000080, 0x00000080,
604 0x5e78, 0xffffffff, 0x001000f0,
605 0xd02c, 0xffffffff, 0x08421000,
606 0xa008, 0xffffffff, 0x00010000,
607 0x8d00, 0xffffffff, 0x100e4848,
608 0x8d04, 0xffffffff, 0x00164745,
609 0x8c00, 0xffffffff, 0xe4000003,
610 0x8cf0, 0x1fffffff, 0x08e00620,
611 0x28350, 0xffffffff, 0x00000000,
612 0x9508, 0xffffffff, 0x00000002
613};
614
615static const u32 sumo_golden_registers[] =
616{
617 0x900c, 0x00ffffff, 0x0017071f,
618 0x8c18, 0xffffffff, 0x10101060,
619 0x8c1c, 0xffffffff, 0x00001010,
620 0x8c30, 0x0000000f, 0x00000005,
621 0x9688, 0x0000000f, 0x00000007
622};
623
624static const u32 wrestler_golden_registers[] =
625{
626 0x5eb4, 0xffffffff, 0x00000002,
627 0x5cc, 0xffffffff, 0x00000001,
628 0x7030, 0xffffffff, 0x00000011,
629 0x7c30, 0xffffffff, 0x00000011,
630 0x6104, 0x01000300, 0x00000000,
631 0x5bc0, 0x00300000, 0x00000000,
632 0x918c, 0xffffffff, 0x00010006,
633 0x91a8, 0xffffffff, 0x00010006,
634 0x9150, 0xffffffff, 0x6e944040,
635 0x917c, 0xffffffff, 0x00030002,
636 0x9198, 0xffffffff, 0x00030002,
637 0x915c, 0xffffffff, 0x00010000,
638 0x3f90, 0xffff0000, 0xff000000,
639 0x9178, 0xffffffff, 0x00070000,
640 0x9194, 0xffffffff, 0x00070000,
641 0x9148, 0xffff0000, 0xff000000,
642 0x9190, 0xffffffff, 0x00090008,
643 0x91ac, 0xffffffff, 0x00090008,
644 0x3f94, 0xffff0000, 0xff000000,
645 0x914c, 0xffff0000, 0xff000000,
646 0x929c, 0xffffffff, 0x00000001,
647 0x8a18, 0xffffffff, 0x00000100,
648 0x8b28, 0xffffffff, 0x00000100,
649 0x9144, 0xffffffff, 0x00000100,
650 0x9b7c, 0xffffffff, 0x00000000,
651 0x8030, 0xffffffff, 0x0000100a,
652 0x8a14, 0xffffffff, 0x00000001,
653 0x8b24, 0xffffffff, 0x00ff0fff,
654 0x8b10, 0xffffffff, 0x00000000,
655 0x28a4c, 0x06000000, 0x06000000,
656 0x4d8, 0xffffffff, 0x00000100,
657 0x913c, 0xffff000f, 0x0100000a,
658 0x960c, 0xffffffff, 0x54763210,
659 0x88c4, 0xffffffff, 0x000000c2,
660 0x88d4, 0xffffffff, 0x00000010,
661 0x8974, 0xffffffff, 0x00000000,
662 0xc78, 0x00000080, 0x00000080,
663 0x5e78, 0xffffffff, 0x001000f0,
664 0xd02c, 0xffffffff, 0x08421000,
665 0xa008, 0xffffffff, 0x00010000,
666 0x8d00, 0xffffffff, 0x100e4848,
667 0x8d04, 0xffffffff, 0x00164745,
668 0x8c00, 0xffffffff, 0xe4000003,
669 0x8cf0, 0x1fffffff, 0x08e00410,
670 0x28350, 0xffffffff, 0x00000000,
671 0x9508, 0xffffffff, 0x00000002,
672 0x900c, 0xffffffff, 0x0017071f,
673 0x8c18, 0xffffffff, 0x10101060,
674 0x8c1c, 0xffffffff, 0x00001010
675};
676
677static const u32 barts_golden_registers[] =
678{
679 0x5eb4, 0xffffffff, 0x00000002,
680 0x5e78, 0x8f311ff1, 0x001000f0,
681 0x3f90, 0xffff0000, 0xff000000,
682 0x9148, 0xffff0000, 0xff000000,
683 0x3f94, 0xffff0000, 0xff000000,
684 0x914c, 0xffff0000, 0xff000000,
685 0xc78, 0x00000080, 0x00000080,
686 0xbd4, 0x70073777, 0x00010001,
687 0xd02c, 0xbfffff1f, 0x08421000,
688 0xd0b8, 0x03773777, 0x02011003,
689 0x5bc0, 0x00200000, 0x50100000,
690 0x98f8, 0x33773777, 0x02011003,
691 0x98fc, 0xffffffff, 0x76543210,
692 0x7030, 0x31000311, 0x00000011,
693 0x2f48, 0x00000007, 0x02011003,
694 0x6b28, 0x00000010, 0x00000012,
695 0x7728, 0x00000010, 0x00000012,
696 0x10328, 0x00000010, 0x00000012,
697 0x10f28, 0x00000010, 0x00000012,
698 0x11b28, 0x00000010, 0x00000012,
699 0x12728, 0x00000010, 0x00000012,
700 0x240c, 0x000007ff, 0x00000380,
701 0x8a14, 0xf000001f, 0x00000007,
702 0x8b24, 0x3fff3fff, 0x00ff0fff,
703 0x8b10, 0x0000ff0f, 0x00000000,
704 0x28a4c, 0x07ffffff, 0x06000000,
705 0x10c, 0x00000001, 0x00010003,
706 0xa02c, 0xffffffff, 0x0000009b,
707 0x913c, 0x0000000f, 0x0100000a,
708 0x8d00, 0xffff7f7f, 0x100e4848,
709 0x8d04, 0x00ffffff, 0x00164745,
710 0x8c00, 0xfffc0003, 0xe4000003,
711 0x8c04, 0xf8ff00ff, 0x40600060,
712 0x8c08, 0x00ff00ff, 0x001c001c,
713 0x8cf0, 0x1fff1fff, 0x08e00620,
714 0x8c20, 0x0fff0fff, 0x00800080,
715 0x8c24, 0x0fff0fff, 0x00800080,
716 0x8c18, 0xffffffff, 0x20202078,
717 0x8c1c, 0x0000ffff, 0x00001010,
718 0x28350, 0x00000f01, 0x00000000,
719 0x9508, 0x3700001f, 0x00000002,
720 0x960c, 0xffffffff, 0x54763210,
721 0x88c4, 0x001f3ae3, 0x000000c2,
722 0x88d4, 0x0000001f, 0x00000010,
723 0x8974, 0xffffffff, 0x00000000
724};
725
726static const u32 turks_golden_registers[] =
727{
728 0x5eb4, 0xffffffff, 0x00000002,
729 0x5e78, 0x8f311ff1, 0x001000f0,
730 0x8c8, 0x00003000, 0x00001070,
731 0x8cc, 0x000fffff, 0x00040035,
732 0x3f90, 0xffff0000, 0xfff00000,
733 0x9148, 0xffff0000, 0xfff00000,
734 0x3f94, 0xffff0000, 0xfff00000,
735 0x914c, 0xffff0000, 0xfff00000,
736 0xc78, 0x00000080, 0x00000080,
737 0xbd4, 0x00073007, 0x00010002,
738 0xd02c, 0xbfffff1f, 0x08421000,
739 0xd0b8, 0x03773777, 0x02010002,
740 0x5bc0, 0x00200000, 0x50100000,
741 0x98f8, 0x33773777, 0x00010002,
742 0x98fc, 0xffffffff, 0x33221100,
743 0x7030, 0x31000311, 0x00000011,
744 0x2f48, 0x33773777, 0x00010002,
745 0x6b28, 0x00000010, 0x00000012,
746 0x7728, 0x00000010, 0x00000012,
747 0x10328, 0x00000010, 0x00000012,
748 0x10f28, 0x00000010, 0x00000012,
749 0x11b28, 0x00000010, 0x00000012,
750 0x12728, 0x00000010, 0x00000012,
751 0x240c, 0x000007ff, 0x00000380,
752 0x8a14, 0xf000001f, 0x00000007,
753 0x8b24, 0x3fff3fff, 0x00ff0fff,
754 0x8b10, 0x0000ff0f, 0x00000000,
755 0x28a4c, 0x07ffffff, 0x06000000,
756 0x10c, 0x00000001, 0x00010003,
757 0xa02c, 0xffffffff, 0x0000009b,
758 0x913c, 0x0000000f, 0x0100000a,
759 0x8d00, 0xffff7f7f, 0x100e4848,
760 0x8d04, 0x00ffffff, 0x00164745,
761 0x8c00, 0xfffc0003, 0xe4000003,
762 0x8c04, 0xf8ff00ff, 0x40600060,
763 0x8c08, 0x00ff00ff, 0x001c001c,
764 0x8cf0, 0x1fff1fff, 0x08e00410,
765 0x8c20, 0x0fff0fff, 0x00800080,
766 0x8c24, 0x0fff0fff, 0x00800080,
767 0x8c18, 0xffffffff, 0x20202078,
768 0x8c1c, 0x0000ffff, 0x00001010,
769 0x28350, 0x00000f01, 0x00000000,
770 0x9508, 0x3700001f, 0x00000002,
771 0x960c, 0xffffffff, 0x54763210,
772 0x88c4, 0x001f3ae3, 0x000000c2,
773 0x88d4, 0x0000001f, 0x00000010,
774 0x8974, 0xffffffff, 0x00000000
775};
776
777static const u32 caicos_golden_registers[] =
778{
779 0x5eb4, 0xffffffff, 0x00000002,
780 0x5e78, 0x8f311ff1, 0x001000f0,
781 0x8c8, 0x00003420, 0x00001450,
782 0x8cc, 0x000fffff, 0x00040035,
783 0x3f90, 0xffff0000, 0xfffc0000,
784 0x9148, 0xffff0000, 0xfffc0000,
785 0x3f94, 0xffff0000, 0xfffc0000,
786 0x914c, 0xffff0000, 0xfffc0000,
787 0xc78, 0x00000080, 0x00000080,
788 0xbd4, 0x00073007, 0x00010001,
789 0xd02c, 0xbfffff1f, 0x08421000,
790 0xd0b8, 0x03773777, 0x02010001,
791 0x5bc0, 0x00200000, 0x50100000,
792 0x98f8, 0x33773777, 0x02010001,
793 0x98fc, 0xffffffff, 0x33221100,
794 0x7030, 0x31000311, 0x00000011,
795 0x2f48, 0x33773777, 0x02010001,
796 0x6b28, 0x00000010, 0x00000012,
797 0x7728, 0x00000010, 0x00000012,
798 0x10328, 0x00000010, 0x00000012,
799 0x10f28, 0x00000010, 0x00000012,
800 0x11b28, 0x00000010, 0x00000012,
801 0x12728, 0x00000010, 0x00000012,
802 0x240c, 0x000007ff, 0x00000380,
803 0x8a14, 0xf000001f, 0x00000001,
804 0x8b24, 0x3fff3fff, 0x00ff0fff,
805 0x8b10, 0x0000ff0f, 0x00000000,
806 0x28a4c, 0x07ffffff, 0x06000000,
807 0x10c, 0x00000001, 0x00010003,
808 0xa02c, 0xffffffff, 0x0000009b,
809 0x913c, 0x0000000f, 0x0100000a,
810 0x8d00, 0xffff7f7f, 0x100e4848,
811 0x8d04, 0x00ffffff, 0x00164745,
812 0x8c00, 0xfffc0003, 0xe4000003,
813 0x8c04, 0xf8ff00ff, 0x40600060,
814 0x8c08, 0x00ff00ff, 0x001c001c,
815 0x8cf0, 0x1fff1fff, 0x08e00410,
816 0x8c20, 0x0fff0fff, 0x00800080,
817 0x8c24, 0x0fff0fff, 0x00800080,
818 0x8c18, 0xffffffff, 0x20202078,
819 0x8c1c, 0x0000ffff, 0x00001010,
820 0x28350, 0x00000f01, 0x00000000,
821 0x9508, 0x3700001f, 0x00000002,
822 0x960c, 0xffffffff, 0x54763210,
823 0x88c4, 0x001f3ae3, 0x000000c2,
824 0x88d4, 0x0000001f, 0x00000010,
825 0x8974, 0xffffffff, 0x00000000
826};
827
828static void evergreen_init_golden_registers(struct radeon_device *rdev)
829{
830 switch (rdev->family) {
831 case CHIP_CYPRESS:
832 case CHIP_HEMLOCK:
833 radeon_program_register_sequence(rdev,
834 evergreen_golden_registers,
835 (const u32)ARRAY_SIZE(evergreen_golden_registers));
836 radeon_program_register_sequence(rdev,
837 evergreen_golden_registers2,
838 (const u32)ARRAY_SIZE(evergreen_golden_registers2));
839 radeon_program_register_sequence(rdev,
840 cypress_mgcg_init,
841 (const u32)ARRAY_SIZE(cypress_mgcg_init));
842 break;
843 case CHIP_JUNIPER:
844 radeon_program_register_sequence(rdev,
845 evergreen_golden_registers,
846 (const u32)ARRAY_SIZE(evergreen_golden_registers));
847 radeon_program_register_sequence(rdev,
848 evergreen_golden_registers2,
849 (const u32)ARRAY_SIZE(evergreen_golden_registers2));
850 radeon_program_register_sequence(rdev,
851 juniper_mgcg_init,
852 (const u32)ARRAY_SIZE(juniper_mgcg_init));
853 break;
854 case CHIP_REDWOOD:
855 radeon_program_register_sequence(rdev,
856 evergreen_golden_registers,
857 (const u32)ARRAY_SIZE(evergreen_golden_registers));
858 radeon_program_register_sequence(rdev,
859 evergreen_golden_registers2,
860 (const u32)ARRAY_SIZE(evergreen_golden_registers2));
861 radeon_program_register_sequence(rdev,
862 redwood_mgcg_init,
863 (const u32)ARRAY_SIZE(redwood_mgcg_init));
864 break;
865 case CHIP_CEDAR:
866 radeon_program_register_sequence(rdev,
867 cedar_golden_registers,
868 (const u32)ARRAY_SIZE(cedar_golden_registers));
869 radeon_program_register_sequence(rdev,
870 evergreen_golden_registers2,
871 (const u32)ARRAY_SIZE(evergreen_golden_registers2));
872 radeon_program_register_sequence(rdev,
873 cedar_mgcg_init,
874 (const u32)ARRAY_SIZE(cedar_mgcg_init));
875 break;
876 case CHIP_PALM:
877 radeon_program_register_sequence(rdev,
878 wrestler_golden_registers,
879 (const u32)ARRAY_SIZE(wrestler_golden_registers));
880 break;
881 case CHIP_SUMO:
882 radeon_program_register_sequence(rdev,
883 supersumo_golden_registers,
884 (const u32)ARRAY_SIZE(supersumo_golden_registers));
885 break;
886 case CHIP_SUMO2:
887 radeon_program_register_sequence(rdev,
888 supersumo_golden_registers,
889 (const u32)ARRAY_SIZE(supersumo_golden_registers));
890 radeon_program_register_sequence(rdev,
891 sumo_golden_registers,
892 (const u32)ARRAY_SIZE(sumo_golden_registers));
893 break;
894 case CHIP_BARTS:
895 radeon_program_register_sequence(rdev,
896 barts_golden_registers,
897 (const u32)ARRAY_SIZE(barts_golden_registers));
898 break;
899 case CHIP_TURKS:
900 radeon_program_register_sequence(rdev,
901 turks_golden_registers,
902 (const u32)ARRAY_SIZE(turks_golden_registers));
903 break;
904 case CHIP_CAICOS:
905 radeon_program_register_sequence(rdev,
906 caicos_golden_registers,
907 (const u32)ARRAY_SIZE(caicos_golden_registers));
908 break;
909 default:
910 break;
911 }
912}
913
56void evergreen_tiling_fields(unsigned tiling_flags, unsigned *bankw, 914void evergreen_tiling_fields(unsigned tiling_flags, unsigned *bankw,
57 unsigned *bankh, unsigned *mtaspect, 915 unsigned *bankh, unsigned *mtaspect,
58 unsigned *tile_split) 916 unsigned *tile_split)
@@ -84,6 +942,142 @@ void evergreen_tiling_fields(unsigned tiling_flags, unsigned *bankw,
84 } 942 }
85} 943}
86 944
945static int sumo_set_uvd_clock(struct radeon_device *rdev, u32 clock,
946 u32 cntl_reg, u32 status_reg)
947{
948 int r, i;
949 struct atom_clock_dividers dividers;
950
951 r = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
952 clock, false, &dividers);
953 if (r)
954 return r;
955
956 WREG32_P(cntl_reg, dividers.post_div, ~(DCLK_DIR_CNTL_EN|DCLK_DIVIDER_MASK));
957
958 for (i = 0; i < 100; i++) {
959 if (RREG32(status_reg) & DCLK_STATUS)
960 break;
961 mdelay(10);
962 }
963 if (i == 100)
964 return -ETIMEDOUT;
965
966 return 0;
967}
968
969int sumo_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
970{
971 int r = 0;
972 u32 cg_scratch = RREG32(CG_SCRATCH1);
973
974 r = sumo_set_uvd_clock(rdev, vclk, CG_VCLK_CNTL, CG_VCLK_STATUS);
975 if (r)
976 goto done;
977 cg_scratch &= 0xffff0000;
978 cg_scratch |= vclk / 100; /* Mhz */
979
980 r = sumo_set_uvd_clock(rdev, dclk, CG_DCLK_CNTL, CG_DCLK_STATUS);
981 if (r)
982 goto done;
983 cg_scratch &= 0x0000ffff;
984 cg_scratch |= (dclk / 100) << 16; /* Mhz */
985
986done:
987 WREG32(CG_SCRATCH1, cg_scratch);
988
989 return r;
990}
991
992int evergreen_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
993{
994 /* start off with something large */
995 unsigned fb_div = 0, vclk_div = 0, dclk_div = 0;
996 int r;
997
998 /* bypass vclk and dclk with bclk */
999 WREG32_P(CG_UPLL_FUNC_CNTL_2,
1000 VCLK_SRC_SEL(1) | DCLK_SRC_SEL(1),
1001 ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
1002
1003 /* put PLL in bypass mode */
1004 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~UPLL_BYPASS_EN_MASK);
1005
1006 if (!vclk || !dclk) {
1007 /* keep the Bypass mode, put PLL to sleep */
1008 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK);
1009 return 0;
1010 }
1011
1012 r = radeon_uvd_calc_upll_dividers(rdev, vclk, dclk, 125000, 250000,
1013 16384, 0x03FFFFFF, 0, 128, 5,
1014 &fb_div, &vclk_div, &dclk_div);
1015 if (r)
1016 return r;
1017
1018 /* set VCO_MODE to 1 */
1019 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_VCO_MODE_MASK, ~UPLL_VCO_MODE_MASK);
1020
1021 /* toggle UPLL_SLEEP to 1 then back to 0 */
1022 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK);
1023 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_SLEEP_MASK);
1024
1025 /* deassert UPLL_RESET */
1026 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK);
1027
1028 mdelay(1);
1029
1030 r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
1031 if (r)
1032 return r;
1033
1034 /* assert UPLL_RESET again */
1035 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_RESET_MASK, ~UPLL_RESET_MASK);
1036
1037 /* disable spread spectrum. */
1038 WREG32_P(CG_UPLL_SPREAD_SPECTRUM, 0, ~SSEN_MASK);
1039
1040 /* set feedback divider */
1041 WREG32_P(CG_UPLL_FUNC_CNTL_3, UPLL_FB_DIV(fb_div), ~UPLL_FB_DIV_MASK);
1042
1043 /* set ref divider to 0 */
1044 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_REF_DIV_MASK);
1045
1046 if (fb_div < 307200)
1047 WREG32_P(CG_UPLL_FUNC_CNTL_4, 0, ~UPLL_SPARE_ISPARE9);
1048 else
1049 WREG32_P(CG_UPLL_FUNC_CNTL_4, UPLL_SPARE_ISPARE9, ~UPLL_SPARE_ISPARE9);
1050
1051 /* set PDIV_A and PDIV_B */
1052 WREG32_P(CG_UPLL_FUNC_CNTL_2,
1053 UPLL_PDIV_A(vclk_div) | UPLL_PDIV_B(dclk_div),
1054 ~(UPLL_PDIV_A_MASK | UPLL_PDIV_B_MASK));
1055
1056 /* give the PLL some time to settle */
1057 mdelay(15);
1058
1059 /* deassert PLL_RESET */
1060 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK);
1061
1062 mdelay(15);
1063
1064 /* switch from bypass mode to normal mode */
1065 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_BYPASS_EN_MASK);
1066
1067 r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
1068 if (r)
1069 return r;
1070
1071 /* switch VCLK and DCLK selection */
1072 WREG32_P(CG_UPLL_FUNC_CNTL_2,
1073 VCLK_SRC_SEL(2) | DCLK_SRC_SEL(2),
1074 ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
1075
1076 mdelay(100);
1077
1078 return 0;
1079}
1080
87void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev) 1081void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev)
88{ 1082{
89 u16 ctl, v; 1083 u16 ctl, v;
@@ -105,6 +1099,27 @@ void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev)
105 } 1099 }
106} 1100}
107 1101
1102static bool dce4_is_in_vblank(struct radeon_device *rdev, int crtc)
1103{
1104 if (RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK)
1105 return true;
1106 else
1107 return false;
1108}
1109
1110static bool dce4_is_counter_moving(struct radeon_device *rdev, int crtc)
1111{
1112 u32 pos1, pos2;
1113
1114 pos1 = RREG32(EVERGREEN_CRTC_STATUS_POSITION + crtc_offsets[crtc]);
1115 pos2 = RREG32(EVERGREEN_CRTC_STATUS_POSITION + crtc_offsets[crtc]);
1116
1117 if (pos1 != pos2)
1118 return true;
1119 else
1120 return false;
1121}
1122
108/** 1123/**
109 * dce4_wait_for_vblank - vblank wait asic callback. 1124 * dce4_wait_for_vblank - vblank wait asic callback.
110 * 1125 *
@@ -115,21 +1130,28 @@ void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev)
115 */ 1130 */
116void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc) 1131void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc)
117{ 1132{
118 int i; 1133 unsigned i = 0;
119 1134
120 if (crtc >= rdev->num_crtc) 1135 if (crtc >= rdev->num_crtc)
121 return; 1136 return;
122 1137
123 if (RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[crtc]) & EVERGREEN_CRTC_MASTER_EN) { 1138 if (!(RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[crtc]) & EVERGREEN_CRTC_MASTER_EN))
124 for (i = 0; i < rdev->usec_timeout; i++) { 1139 return;
125 if (!(RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK)) 1140
1141 /* depending on when we hit vblank, we may be close to active; if so,
1142 * wait for another frame.
1143 */
1144 while (dce4_is_in_vblank(rdev, crtc)) {
1145 if (i++ % 100 == 0) {
1146 if (!dce4_is_counter_moving(rdev, crtc))
126 break; 1147 break;
127 udelay(1);
128 } 1148 }
129 for (i = 0; i < rdev->usec_timeout; i++) { 1149 }
130 if (RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK) 1150
1151 while (!dce4_is_in_vblank(rdev, crtc)) {
1152 if (i++ % 100 == 0) {
1153 if (!dce4_is_counter_moving(rdev, crtc))
131 break; 1154 break;
132 udelay(1);
133 } 1155 }
134 } 1156 }
135} 1157}
@@ -608,6 +1630,16 @@ void evergreen_hpd_init(struct radeon_device *rdev)
608 1630
609 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 1631 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
610 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 1632 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
1633
1634 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
1635 connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
1636 /* don't try to enable hpd on eDP or LVDS avoid breaking the
1637 * aux dp channel on imac and help (but not completely fix)
1638 * https://bugzilla.redhat.com/show_bug.cgi?id=726143
1639 * also avoid interrupt storms during dpms.
1640 */
1641 continue;
1642 }
611 switch (radeon_connector->hpd.hpd) { 1643 switch (radeon_connector->hpd.hpd) {
612 case RADEON_HPD_1: 1644 case RADEON_HPD_1:
613 WREG32(DC_HPD1_CONTROL, tmp); 1645 WREG32(DC_HPD1_CONTROL, tmp);
@@ -1325,17 +2357,16 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
1325 tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]); 2357 tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
1326 if (!(tmp & EVERGREEN_CRTC_BLANK_DATA_EN)) { 2358 if (!(tmp & EVERGREEN_CRTC_BLANK_DATA_EN)) {
1327 radeon_wait_for_vblank(rdev, i); 2359 radeon_wait_for_vblank(rdev, i);
1328 tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
1329 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1); 2360 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
2361 tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
1330 WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp); 2362 WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
1331 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
1332 } 2363 }
1333 } else { 2364 } else {
1334 tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]); 2365 tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
1335 if (!(tmp & EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE)) { 2366 if (!(tmp & EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE)) {
1336 radeon_wait_for_vblank(rdev, i); 2367 radeon_wait_for_vblank(rdev, i);
1337 tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
1338 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1); 2368 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
2369 tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
1339 WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp); 2370 WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
1340 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0); 2371 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
1341 } 2372 }
@@ -1347,6 +2378,15 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
1347 break; 2378 break;
1348 udelay(1); 2379 udelay(1);
1349 } 2380 }
2381
2382 /* XXX this is a hack to avoid strange behavior with EFI on certain systems */
2383 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
2384 tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
2385 tmp &= ~EVERGREEN_CRTC_MASTER_EN;
2386 WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
2387 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
2388 save->crtc_enabled[i] = false;
2389 /* ***** */
1350 } else { 2390 } else {
1351 save->crtc_enabled[i] = false; 2391 save->crtc_enabled[i] = false;
1352 } 2392 }
@@ -1364,6 +2404,22 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
1364 } 2404 }
1365 /* wait for the MC to settle */ 2405 /* wait for the MC to settle */
1366 udelay(100); 2406 udelay(100);
2407
2408 /* lock double buffered regs */
2409 for (i = 0; i < rdev->num_crtc; i++) {
2410 if (save->crtc_enabled[i]) {
2411 tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
2412 if (!(tmp & EVERGREEN_GRPH_UPDATE_LOCK)) {
2413 tmp |= EVERGREEN_GRPH_UPDATE_LOCK;
2414 WREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i], tmp);
2415 }
2416 tmp = RREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i]);
2417 if (!(tmp & 1)) {
2418 tmp |= 1;
2419 WREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
2420 }
2421 }
2422 }
1367} 2423}
1368 2424
1369void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save) 2425void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save)
@@ -1385,6 +2441,33 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s
1385 WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start)); 2441 WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start));
1386 WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start); 2442 WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start);
1387 2443
2444 /* unlock regs and wait for update */
2445 for (i = 0; i < rdev->num_crtc; i++) {
2446 if (save->crtc_enabled[i]) {
2447 tmp = RREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i]);
2448 if ((tmp & 0x3) != 0) {
2449 tmp &= ~0x3;
2450 WREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i], tmp);
2451 }
2452 tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
2453 if (tmp & EVERGREEN_GRPH_UPDATE_LOCK) {
2454 tmp &= ~EVERGREEN_GRPH_UPDATE_LOCK;
2455 WREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i], tmp);
2456 }
2457 tmp = RREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i]);
2458 if (tmp & 1) {
2459 tmp &= ~1;
2460 WREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
2461 }
2462 for (j = 0; j < rdev->usec_timeout; j++) {
2463 tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
2464 if ((tmp & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING) == 0)
2465 break;
2466 udelay(1);
2467 }
2468 }
2469 }
2470
1388 /* unblackout the MC */ 2471 /* unblackout the MC */
1389 tmp = RREG32(MC_SHARED_BLACKOUT_CNTL); 2472 tmp = RREG32(MC_SHARED_BLACKOUT_CNTL);
1390 tmp &= ~BLACKOUT_MODE_MASK; 2473 tmp &= ~BLACKOUT_MODE_MASK;
@@ -2050,6 +3133,14 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
2050 } 3133 }
2051 /* enabled rb are just the one not disabled :) */ 3134 /* enabled rb are just the one not disabled :) */
2052 disabled_rb_mask = tmp; 3135 disabled_rb_mask = tmp;
3136 tmp = 0;
3137 for (i = 0; i < rdev->config.evergreen.max_backends; i++)
3138 tmp |= (1 << i);
3139 /* if all the backends are disabled, fix it up here */
3140 if ((disabled_rb_mask & tmp) == tmp) {
3141 for (i = 0; i < rdev->config.evergreen.max_backends; i++)
3142 disabled_rb_mask &= ~(1 << i);
3143 }
2053 3144
2054 WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES); 3145 WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
2055 WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES); 3146 WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
@@ -2058,6 +3149,9 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
2058 WREG32(DMIF_ADDR_CONFIG, gb_addr_config); 3149 WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
2059 WREG32(HDP_ADDR_CONFIG, gb_addr_config); 3150 WREG32(HDP_ADDR_CONFIG, gb_addr_config);
2060 WREG32(DMA_TILING_CONFIG, gb_addr_config); 3151 WREG32(DMA_TILING_CONFIG, gb_addr_config);
3152 WREG32(UVD_UDEC_ADDR_CONFIG, gb_addr_config);
3153 WREG32(UVD_UDEC_DB_ADDR_CONFIG, gb_addr_config);
3154 WREG32(UVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config);
2061 3155
2062 if ((rdev->config.evergreen.max_backends == 1) && 3156 if ((rdev->config.evergreen.max_backends == 1) &&
2063 (rdev->flags & RADEON_IS_IGP)) { 3157 (rdev->flags & RADEON_IS_IGP)) {
@@ -3360,6 +4454,9 @@ restart_ih:
3360 DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data); 4454 DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
3361 break; 4455 break;
3362 } 4456 }
4457 case 124: /* UVD */
4458 DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
4459 radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
3363 break; 4460 break;
3364 case 146: 4461 case 146:
3365 case 147: 4462 case 147:
@@ -3571,7 +4668,7 @@ int evergreen_copy_dma(struct radeon_device *rdev,
3571 4668
3572static int evergreen_startup(struct radeon_device *rdev) 4669static int evergreen_startup(struct radeon_device *rdev)
3573{ 4670{
3574 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 4671 struct radeon_ring *ring;
3575 int r; 4672 int r;
3576 4673
3577 /* enable pcie gen2 link */ 4674 /* enable pcie gen2 link */
@@ -3638,6 +4735,17 @@ static int evergreen_startup(struct radeon_device *rdev)
3638 return r; 4735 return r;
3639 } 4736 }
3640 4737
4738 r = rv770_uvd_resume(rdev);
4739 if (!r) {
4740 r = radeon_fence_driver_start_ring(rdev,
4741 R600_RING_TYPE_UVD_INDEX);
4742 if (r)
4743 dev_err(rdev->dev, "UVD fences init error (%d).\n", r);
4744 }
4745
4746 if (r)
4747 rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
4748
3641 /* Enable IRQ */ 4749 /* Enable IRQ */
3642 r = r600_irq_init(rdev); 4750 r = r600_irq_init(rdev);
3643 if (r) { 4751 if (r) {
@@ -3647,6 +4755,7 @@ static int evergreen_startup(struct radeon_device *rdev)
3647 } 4755 }
3648 evergreen_irq_set(rdev); 4756 evergreen_irq_set(rdev);
3649 4757
4758 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
3650 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET, 4759 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
3651 R600_CP_RB_RPTR, R600_CP_RB_WPTR, 4760 R600_CP_RB_RPTR, R600_CP_RB_WPTR,
3652 0, 0xfffff, RADEON_CP_PACKET2); 4761 0, 0xfffff, RADEON_CP_PACKET2);
@@ -3670,6 +4779,19 @@ static int evergreen_startup(struct radeon_device *rdev)
3670 if (r) 4779 if (r)
3671 return r; 4780 return r;
3672 4781
4782 ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
4783 if (ring->ring_size) {
4784 r = radeon_ring_init(rdev, ring, ring->ring_size,
4785 R600_WB_UVD_RPTR_OFFSET,
4786 UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR,
4787 0, 0xfffff, RADEON_CP_PACKET2);
4788 if (!r)
4789 r = r600_uvd_init(rdev);
4790
4791 if (r)
4792 DRM_ERROR("radeon: error initializing UVD (%d).\n", r);
4793 }
4794
3673 r = radeon_ib_pool_init(rdev); 4795 r = radeon_ib_pool_init(rdev);
3674 if (r) { 4796 if (r) {
3675 dev_err(rdev->dev, "IB initialization failed (%d).\n", r); 4797 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
@@ -3701,6 +4823,9 @@ int evergreen_resume(struct radeon_device *rdev)
3701 /* post card */ 4823 /* post card */
3702 atom_asic_init(rdev->mode_info.atom_context); 4824 atom_asic_init(rdev->mode_info.atom_context);
3703 4825
4826 /* init golden registers */
4827 evergreen_init_golden_registers(rdev);
4828
3704 rdev->accel_working = true; 4829 rdev->accel_working = true;
3705 r = evergreen_startup(rdev); 4830 r = evergreen_startup(rdev);
3706 if (r) { 4831 if (r) {
@@ -3716,8 +4841,10 @@ int evergreen_resume(struct radeon_device *rdev)
3716int evergreen_suspend(struct radeon_device *rdev) 4841int evergreen_suspend(struct radeon_device *rdev)
3717{ 4842{
3718 r600_audio_fini(rdev); 4843 r600_audio_fini(rdev);
4844 radeon_uvd_suspend(rdev);
3719 r700_cp_stop(rdev); 4845 r700_cp_stop(rdev);
3720 r600_dma_stop(rdev); 4846 r600_dma_stop(rdev);
4847 r600_uvd_rbc_stop(rdev);
3721 evergreen_irq_suspend(rdev); 4848 evergreen_irq_suspend(rdev);
3722 radeon_wb_disable(rdev); 4849 radeon_wb_disable(rdev);
3723 evergreen_pcie_gart_disable(rdev); 4850 evergreen_pcie_gart_disable(rdev);
@@ -3762,6 +4889,8 @@ int evergreen_init(struct radeon_device *rdev)
3762 DRM_INFO("GPU not posted. posting now...\n"); 4889 DRM_INFO("GPU not posted. posting now...\n");
3763 atom_asic_init(rdev->mode_info.atom_context); 4890 atom_asic_init(rdev->mode_info.atom_context);
3764 } 4891 }
4892 /* init golden registers */
4893 evergreen_init_golden_registers(rdev);
3765 /* Initialize scratch registers */ 4894 /* Initialize scratch registers */
3766 r600_scratch_init(rdev); 4895 r600_scratch_init(rdev);
3767 /* Initialize surface registers */ 4896 /* Initialize surface registers */
@@ -3797,6 +4926,13 @@ int evergreen_init(struct radeon_device *rdev)
3797 rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL; 4926 rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL;
3798 r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024); 4927 r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024);
3799 4928
4929 r = radeon_uvd_init(rdev);
4930 if (!r) {
4931 rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
4932 r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX],
4933 4096);
4934 }
4935
3800 rdev->ih.ring_obj = NULL; 4936 rdev->ih.ring_obj = NULL;
3801 r600_ih_ring_init(rdev, 64 * 1024); 4937 r600_ih_ring_init(rdev, 64 * 1024);
3802 4938
@@ -3843,6 +4979,7 @@ void evergreen_fini(struct radeon_device *rdev)
3843 radeon_ib_pool_fini(rdev); 4979 radeon_ib_pool_fini(rdev);
3844 radeon_irq_kms_fini(rdev); 4980 radeon_irq_kms_fini(rdev);
3845 evergreen_pcie_gart_fini(rdev); 4981 evergreen_pcie_gart_fini(rdev);
4982 radeon_uvd_fini(rdev);
3846 r600_vram_scratch_fini(rdev); 4983 r600_vram_scratch_fini(rdev);
3847 radeon_gem_fini(rdev); 4984 radeon_gem_fini(rdev);
3848 radeon_fence_driver_fini(rdev); 4985 radeon_fence_driver_fini(rdev);
@@ -3878,7 +5015,7 @@ void evergreen_pcie_gen2_enable(struct radeon_device *rdev)
3878 if (!(mask & DRM_PCIE_SPEED_50)) 5015 if (!(mask & DRM_PCIE_SPEED_50))
3879 return; 5016 return;
3880 5017
3881 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); 5018 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
3882 if (speed_cntl & LC_CURRENT_DATA_RATE) { 5019 if (speed_cntl & LC_CURRENT_DATA_RATE) {
3883 DRM_INFO("PCIE gen 2 link speeds already enabled\n"); 5020 DRM_INFO("PCIE gen 2 link speeds already enabled\n");
3884 return; 5021 return;
@@ -3889,33 +5026,33 @@ void evergreen_pcie_gen2_enable(struct radeon_device *rdev)
3889 if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) || 5026 if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) ||
3890 (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) { 5027 (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
3891 5028
3892 link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL); 5029 link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
3893 link_width_cntl &= ~LC_UPCONFIGURE_DIS; 5030 link_width_cntl &= ~LC_UPCONFIGURE_DIS;
3894 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); 5031 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
3895 5032
3896 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); 5033 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
3897 speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN; 5034 speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
3898 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl); 5035 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
3899 5036
3900 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); 5037 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
3901 speed_cntl |= LC_CLR_FAILED_SPD_CHANGE_CNT; 5038 speed_cntl |= LC_CLR_FAILED_SPD_CHANGE_CNT;
3902 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl); 5039 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
3903 5040
3904 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); 5041 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
3905 speed_cntl &= ~LC_CLR_FAILED_SPD_CHANGE_CNT; 5042 speed_cntl &= ~LC_CLR_FAILED_SPD_CHANGE_CNT;
3906 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl); 5043 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
3907 5044
3908 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); 5045 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
3909 speed_cntl |= LC_GEN2_EN_STRAP; 5046 speed_cntl |= LC_GEN2_EN_STRAP;
3910 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl); 5047 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
3911 5048
3912 } else { 5049 } else {
3913 link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL); 5050 link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
3914 /* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */ 5051 /* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
3915 if (1) 5052 if (1)
3916 link_width_cntl |= LC_UPCONFIGURE_DIS; 5053 link_width_cntl |= LC_UPCONFIGURE_DIS;
3917 else 5054 else
3918 link_width_cntl &= ~LC_UPCONFIGURE_DIS; 5055 link_width_cntl &= ~LC_UPCONFIGURE_DIS;
3919 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); 5056 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
3920 } 5057 }
3921} 5058}
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
index 4fdecc2b4040..b4ab8ceb1654 100644
--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
@@ -54,6 +54,68 @@ static void evergreen_hdmi_update_ACR(struct drm_encoder *encoder, uint32_t cloc
54 WREG32(HDMI_ACR_48_1 + offset, acr.n_48khz); 54 WREG32(HDMI_ACR_48_1 + offset, acr.n_48khz);
55} 55}
56 56
57static void evergreen_hdmi_write_sad_regs(struct drm_encoder *encoder)
58{
59 struct radeon_device *rdev = encoder->dev->dev_private;
60 struct drm_connector *connector;
61 struct radeon_connector *radeon_connector = NULL;
62 struct cea_sad *sads;
63 int i, sad_count;
64
65 static const u16 eld_reg_to_type[][2] = {
66 { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM },
67 { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 },
68 { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 },
69 { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 },
70 { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 },
71 { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC },
72 { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS },
73 { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC },
74 { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 },
75 { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD },
76 { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP },
77 { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
78 };
79
80 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
81 if (connector->encoder == encoder)
82 radeon_connector = to_radeon_connector(connector);
83 }
84
85 if (!radeon_connector) {
86 DRM_ERROR("Couldn't find encoder's connector\n");
87 return;
88 }
89
90 sad_count = drm_edid_to_sad(radeon_connector->edid, &sads);
91 if (sad_count < 0) {
92 DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
93 return;
94 }
95 BUG_ON(!sads);
96
97 for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
98 u32 value = 0;
99 int j;
100
101 for (j = 0; j < sad_count; j++) {
102 struct cea_sad *sad = &sads[j];
103
104 if (sad->format == eld_reg_to_type[i][1]) {
105 value = MAX_CHANNELS(sad->channels) |
106 DESCRIPTOR_BYTE_2(sad->byte2) |
107 SUPPORTED_FREQUENCIES(sad->freq);
108 if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
109 value |= SUPPORTED_FREQUENCIES_STEREO(sad->freq);
110 break;
111 }
112 }
113 WREG32(eld_reg_to_type[i][0], value);
114 }
115
116 kfree(sads);
117}
118
57/* 119/*
58 * build a HDMI Video Info Frame 120 * build a HDMI Video Info Frame
59 */ 121 */
@@ -85,6 +147,30 @@ static void evergreen_hdmi_update_avi_infoframe(struct drm_encoder *encoder,
85 frame[0xC] | (frame[0xD] << 8)); 147 frame[0xC] | (frame[0xD] << 8));
86} 148}
87 149
150static void evergreen_audio_set_dto(struct drm_encoder *encoder, u32 clock)
151{
152 struct drm_device *dev = encoder->dev;
153 struct radeon_device *rdev = dev->dev_private;
154 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
155 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
156 struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
157 u32 base_rate = 48000;
158
159 if (!dig || !dig->afmt)
160 return;
161
162 /* XXX: properly calculate this */
163 /* XXX two dtos; generally use dto0 for hdmi */
164 /* Express [24MHz / target pixel clock] as an exact rational
165 * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE
166 * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
167 */
168 WREG32(DCCG_AUDIO_DTO0_PHASE, (base_rate*50) & 0xffffff);
169 WREG32(DCCG_AUDIO_DTO0_MODULE, (clock*100) & 0xffffff);
170 WREG32(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL(radeon_crtc->crtc_id));
171}
172
173
88/* 174/*
89 * update the info frames with the data from the current display mode 175 * update the info frames with the data from the current display mode
90 */ 176 */
@@ -104,33 +190,19 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
104 return; 190 return;
105 offset = dig->afmt->offset; 191 offset = dig->afmt->offset;
106 192
107 r600_audio_set_clock(encoder, mode->clock); 193 evergreen_audio_set_dto(encoder, mode->clock);
108 194
109 WREG32(HDMI_VBI_PACKET_CONTROL + offset, 195 WREG32(HDMI_VBI_PACKET_CONTROL + offset,
110 HDMI_NULL_SEND); /* send null packets when required */ 196 HDMI_NULL_SEND); /* send null packets when required */
111 197
112 WREG32(AFMT_AUDIO_CRC_CONTROL + offset, 0x1000); 198 WREG32(AFMT_AUDIO_CRC_CONTROL + offset, 0x1000);
113 199
114 WREG32(HDMI_AUDIO_PACKET_CONTROL + offset,
115 HDMI_AUDIO_DELAY_EN(1) | /* set the default audio delay */
116 HDMI_AUDIO_PACKETS_PER_LINE(3)); /* should be suffient for all audio modes and small enough for all hblanks */
117
118 WREG32(AFMT_AUDIO_PACKET_CONTROL + offset,
119 AFMT_AUDIO_SAMPLE_SEND | /* send audio packets */
120 AFMT_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */
121
122 WREG32(HDMI_ACR_PACKET_CONTROL + offset,
123 HDMI_ACR_AUTO_SEND | /* allow hw to sent ACR packets when required */
124 HDMI_ACR_SOURCE); /* select SW CTS value */
125
126 WREG32(HDMI_VBI_PACKET_CONTROL + offset, 200 WREG32(HDMI_VBI_PACKET_CONTROL + offset,
127 HDMI_NULL_SEND | /* send null packets when required */ 201 HDMI_NULL_SEND | /* send null packets when required */
128 HDMI_GC_SEND | /* send general control packets */ 202 HDMI_GC_SEND | /* send general control packets */
129 HDMI_GC_CONT); /* send general control packets every frame */ 203 HDMI_GC_CONT); /* send general control packets every frame */
130 204
131 WREG32(HDMI_INFOFRAME_CONTROL0 + offset, 205 WREG32(HDMI_INFOFRAME_CONTROL0 + offset,
132 HDMI_AVI_INFO_SEND | /* enable AVI info frames */
133 HDMI_AVI_INFO_CONT | /* send AVI info frames every frame/field */
134 HDMI_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */ 206 HDMI_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */
135 HDMI_AUDIO_INFO_CONT); /* required for audio info values to be updated */ 207 HDMI_AUDIO_INFO_CONT); /* required for audio info values to be updated */
136 208
@@ -138,11 +210,47 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
138 AFMT_AUDIO_INFO_UPDATE); /* required for audio info values to be updated */ 210 AFMT_AUDIO_INFO_UPDATE); /* required for audio info values to be updated */
139 211
140 WREG32(HDMI_INFOFRAME_CONTROL1 + offset, 212 WREG32(HDMI_INFOFRAME_CONTROL1 + offset,
141 HDMI_AVI_INFO_LINE(2) | /* anything other than 0 */
142 HDMI_AUDIO_INFO_LINE(2)); /* anything other than 0 */ 213 HDMI_AUDIO_INFO_LINE(2)); /* anything other than 0 */
143 214
144 WREG32(HDMI_GC + offset, 0); /* unset HDMI_GC_AVMUTE */ 215 WREG32(HDMI_GC + offset, 0); /* unset HDMI_GC_AVMUTE */
145 216
217 WREG32(HDMI_AUDIO_PACKET_CONTROL + offset,
218 HDMI_AUDIO_DELAY_EN(1) | /* set the default audio delay */
219 HDMI_AUDIO_PACKETS_PER_LINE(3)); /* should be suffient for all audio modes and small enough for all hblanks */
220
221 WREG32(AFMT_AUDIO_PACKET_CONTROL + offset,
222 AFMT_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */
223
224 /* fglrx clears sth in AFMT_AUDIO_PACKET_CONTROL2 here */
225
226 WREG32(HDMI_ACR_PACKET_CONTROL + offset,
227 HDMI_ACR_AUTO_SEND | /* allow hw to sent ACR packets when required */
228 HDMI_ACR_SOURCE); /* select SW CTS value */
229
230 evergreen_hdmi_update_ACR(encoder, mode->clock);
231
232 WREG32(AFMT_60958_0 + offset,
233 AFMT_60958_CS_CHANNEL_NUMBER_L(1));
234
235 WREG32(AFMT_60958_1 + offset,
236 AFMT_60958_CS_CHANNEL_NUMBER_R(2));
237
238 WREG32(AFMT_60958_2 + offset,
239 AFMT_60958_CS_CHANNEL_NUMBER_2(3) |
240 AFMT_60958_CS_CHANNEL_NUMBER_3(4) |
241 AFMT_60958_CS_CHANNEL_NUMBER_4(5) |
242 AFMT_60958_CS_CHANNEL_NUMBER_5(6) |
243 AFMT_60958_CS_CHANNEL_NUMBER_6(7) |
244 AFMT_60958_CS_CHANNEL_NUMBER_7(8));
245
246 /* fglrx sets 0x0001005f | (x & 0x00fc0000) in 0x5f78 here */
247
248 WREG32(AFMT_AUDIO_PACKET_CONTROL2 + offset,
249 AFMT_AUDIO_CHANNEL_ENABLE(0xff));
250
251 /* fglrx sets 0x40 in 0x5f80 here */
252 evergreen_hdmi_write_sad_regs(encoder);
253
146 err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode); 254 err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
147 if (err < 0) { 255 if (err < 0) {
148 DRM_ERROR("failed to setup AVI infoframe: %zd\n", err); 256 DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
@@ -156,7 +264,17 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
156 } 264 }
157 265
158 evergreen_hdmi_update_avi_infoframe(encoder, buffer, sizeof(buffer)); 266 evergreen_hdmi_update_avi_infoframe(encoder, buffer, sizeof(buffer));
159 evergreen_hdmi_update_ACR(encoder, mode->clock); 267
268 WREG32_OR(HDMI_INFOFRAME_CONTROL0 + offset,
269 HDMI_AVI_INFO_SEND | /* enable AVI info frames */
270 HDMI_AVI_INFO_CONT); /* required for audio info values to be updated */
271
272 WREG32_P(HDMI_INFOFRAME_CONTROL1 + offset,
273 HDMI_AVI_INFO_LINE(2), /* anything other than 0 */
274 ~HDMI_AVI_INFO_LINE_MASK);
275
276 WREG32_OR(AFMT_AUDIO_PACKET_CONTROL + offset,
277 AFMT_AUDIO_SAMPLE_SEND); /* send audio packets */
160 278
161 /* it's unknown what these bits do excatly, but it's indeed quite useful for debugging */ 279 /* it's unknown what these bits do excatly, but it's indeed quite useful for debugging */
162 WREG32(AFMT_RAMP_CONTROL0 + offset, 0x00FFFFFF); 280 WREG32(AFMT_RAMP_CONTROL0 + offset, 0x00FFFFFF);
@@ -164,3 +282,20 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
164 WREG32(AFMT_RAMP_CONTROL2 + offset, 0x00000001); 282 WREG32(AFMT_RAMP_CONTROL2 + offset, 0x00000001);
165 WREG32(AFMT_RAMP_CONTROL3 + offset, 0x00000001); 283 WREG32(AFMT_RAMP_CONTROL3 + offset, 0x00000001);
166} 284}
285
286void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable)
287{
288 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
289 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
290
291 /* Silent, r600_hdmi_enable will raise WARN for us */
292 if (enable && dig->afmt->enabled)
293 return;
294 if (!enable && !dig->afmt->enabled)
295 return;
296
297 dig->afmt->enabled = enable;
298
299 DRM_DEBUG("%sabling HDMI interface @ 0x%04X for encoder 0x%x\n",
300 enable ? "En" : "Dis", dig->afmt->offset, radeon_encoder->encoder_id);
301}
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
index f585be16e2d5..881aba23c477 100644
--- a/drivers/gpu/drm/radeon/evergreen_reg.h
+++ b/drivers/gpu/drm/radeon/evergreen_reg.h
@@ -226,6 +226,8 @@
226#define EVERGREEN_CRTC_STATUS_HV_COUNT 0x6ea0 226#define EVERGREEN_CRTC_STATUS_HV_COUNT 0x6ea0
227#define EVERGREEN_MASTER_UPDATE_MODE 0x6ef8 227#define EVERGREEN_MASTER_UPDATE_MODE 0x6ef8
228#define EVERGREEN_CRTC_UPDATE_LOCK 0x6ed4 228#define EVERGREEN_CRTC_UPDATE_LOCK 0x6ed4
229#define EVERGREEN_MASTER_UPDATE_LOCK 0x6ef4
230#define EVERGREEN_MASTER_UPDATE_MODE 0x6ef8
229 231
230#define EVERGREEN_DC_GPIO_HPD_MASK 0x64b0 232#define EVERGREEN_DC_GPIO_HPD_MASK 0x64b0
231#define EVERGREEN_DC_GPIO_HPD_A 0x64b4 233#define EVERGREEN_DC_GPIO_HPD_A 0x64b4
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index 982d25ad9af3..75c05631146d 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -53,6 +53,43 @@
53#define RCU_IND_INDEX 0x100 53#define RCU_IND_INDEX 0x100
54#define RCU_IND_DATA 0x104 54#define RCU_IND_DATA 0x104
55 55
56/* discrete uvd clocks */
57#define CG_UPLL_FUNC_CNTL 0x718
58# define UPLL_RESET_MASK 0x00000001
59# define UPLL_SLEEP_MASK 0x00000002
60# define UPLL_BYPASS_EN_MASK 0x00000004
61# define UPLL_CTLREQ_MASK 0x00000008
62# define UPLL_REF_DIV_MASK 0x003F0000
63# define UPLL_VCO_MODE_MASK 0x00000200
64# define UPLL_CTLACK_MASK 0x40000000
65# define UPLL_CTLACK2_MASK 0x80000000
66#define CG_UPLL_FUNC_CNTL_2 0x71c
67# define UPLL_PDIV_A(x) ((x) << 0)
68# define UPLL_PDIV_A_MASK 0x0000007F
69# define UPLL_PDIV_B(x) ((x) << 8)
70# define UPLL_PDIV_B_MASK 0x00007F00
71# define VCLK_SRC_SEL(x) ((x) << 20)
72# define VCLK_SRC_SEL_MASK 0x01F00000
73# define DCLK_SRC_SEL(x) ((x) << 25)
74# define DCLK_SRC_SEL_MASK 0x3E000000
75#define CG_UPLL_FUNC_CNTL_3 0x720
76# define UPLL_FB_DIV(x) ((x) << 0)
77# define UPLL_FB_DIV_MASK 0x01FFFFFF
78#define CG_UPLL_FUNC_CNTL_4 0x854
79# define UPLL_SPARE_ISPARE9 0x00020000
80#define CG_UPLL_SPREAD_SPECTRUM 0x79c
81# define SSEN_MASK 0x00000001
82
83/* fusion uvd clocks */
84#define CG_DCLK_CNTL 0x610
85# define DCLK_DIVIDER_MASK 0x7f
86# define DCLK_DIR_CNTL_EN (1 << 8)
87#define CG_DCLK_STATUS 0x614
88# define DCLK_STATUS (1 << 0)
89#define CG_VCLK_CNTL 0x618
90#define CG_VCLK_STATUS 0x61c
91#define CG_SCRATCH1 0x820
92
56#define GRBM_GFX_INDEX 0x802C 93#define GRBM_GFX_INDEX 0x802C
57#define INSTANCE_INDEX(x) ((x) << 0) 94#define INSTANCE_INDEX(x) ((x) << 0)
58#define SE_INDEX(x) ((x) << 16) 95#define SE_INDEX(x) ((x) << 16)
@@ -197,6 +234,7 @@
197# define HDMI_MPEG_INFO_CONT (1 << 9) 234# define HDMI_MPEG_INFO_CONT (1 << 9)
198#define HDMI_INFOFRAME_CONTROL1 0x7048 235#define HDMI_INFOFRAME_CONTROL1 0x7048
199# define HDMI_AVI_INFO_LINE(x) (((x) & 0x3f) << 0) 236# define HDMI_AVI_INFO_LINE(x) (((x) & 0x3f) << 0)
237# define HDMI_AVI_INFO_LINE_MASK (0x3f << 0)
200# define HDMI_AUDIO_INFO_LINE(x) (((x) & 0x3f) << 8) 238# define HDMI_AUDIO_INFO_LINE(x) (((x) & 0x3f) << 8)
201# define HDMI_MPEG_INFO_LINE(x) (((x) & 0x3f) << 16) 239# define HDMI_MPEG_INFO_LINE(x) (((x) & 0x3f) << 16)
202#define HDMI_GENERIC_PACKET_CONTROL 0x704c 240#define HDMI_GENERIC_PACKET_CONTROL 0x704c
@@ -992,6 +1030,16 @@
992# define TARGET_LINK_SPEED_MASK (0xf << 0) 1030# define TARGET_LINK_SPEED_MASK (0xf << 0)
993# define SELECTABLE_DEEMPHASIS (1 << 6) 1031# define SELECTABLE_DEEMPHASIS (1 << 6)
994 1032
1033
1034/*
1035 * UVD
1036 */
1037#define UVD_UDEC_ADDR_CONFIG 0xef4c
1038#define UVD_UDEC_DB_ADDR_CONFIG 0xef50
1039#define UVD_UDEC_DBW_ADDR_CONFIG 0xef54
1040#define UVD_RBC_RB_RPTR 0xf690
1041#define UVD_RBC_RB_WPTR 0xf694
1042
995/* 1043/*
996 * PM4 1044 * PM4
997 */ 1045 */
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 27769e724b6d..7969c0c8ec20 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -78,6 +78,282 @@ MODULE_FIRMWARE("radeon/ARUBA_pfp.bin");
78MODULE_FIRMWARE("radeon/ARUBA_me.bin"); 78MODULE_FIRMWARE("radeon/ARUBA_me.bin");
79MODULE_FIRMWARE("radeon/ARUBA_rlc.bin"); 79MODULE_FIRMWARE("radeon/ARUBA_rlc.bin");
80 80
81
82static const u32 cayman_golden_registers2[] =
83{
84 0x3e5c, 0xffffffff, 0x00000000,
85 0x3e48, 0xffffffff, 0x00000000,
86 0x3e4c, 0xffffffff, 0x00000000,
87 0x3e64, 0xffffffff, 0x00000000,
88 0x3e50, 0xffffffff, 0x00000000,
89 0x3e60, 0xffffffff, 0x00000000
90};
91
92static const u32 cayman_golden_registers[] =
93{
94 0x5eb4, 0xffffffff, 0x00000002,
95 0x5e78, 0x8f311ff1, 0x001000f0,
96 0x3f90, 0xffff0000, 0xff000000,
97 0x9148, 0xffff0000, 0xff000000,
98 0x3f94, 0xffff0000, 0xff000000,
99 0x914c, 0xffff0000, 0xff000000,
100 0xc78, 0x00000080, 0x00000080,
101 0xbd4, 0x70073777, 0x00011003,
102 0xd02c, 0xbfffff1f, 0x08421000,
103 0xd0b8, 0x73773777, 0x02011003,
104 0x5bc0, 0x00200000, 0x50100000,
105 0x98f8, 0x33773777, 0x02011003,
106 0x98fc, 0xffffffff, 0x76541032,
107 0x7030, 0x31000311, 0x00000011,
108 0x2f48, 0x33773777, 0x42010001,
109 0x6b28, 0x00000010, 0x00000012,
110 0x7728, 0x00000010, 0x00000012,
111 0x10328, 0x00000010, 0x00000012,
112 0x10f28, 0x00000010, 0x00000012,
113 0x11b28, 0x00000010, 0x00000012,
114 0x12728, 0x00000010, 0x00000012,
115 0x240c, 0x000007ff, 0x00000000,
116 0x8a14, 0xf000001f, 0x00000007,
117 0x8b24, 0x3fff3fff, 0x00ff0fff,
118 0x8b10, 0x0000ff0f, 0x00000000,
119 0x28a4c, 0x07ffffff, 0x06000000,
120 0x10c, 0x00000001, 0x00010003,
121 0xa02c, 0xffffffff, 0x0000009b,
122 0x913c, 0x0000010f, 0x01000100,
123 0x8c04, 0xf8ff00ff, 0x40600060,
124 0x28350, 0x00000f01, 0x00000000,
125 0x9508, 0x3700001f, 0x00000002,
126 0x960c, 0xffffffff, 0x54763210,
127 0x88c4, 0x001f3ae3, 0x00000082,
128 0x88d0, 0xffffffff, 0x0f40df40,
129 0x88d4, 0x0000001f, 0x00000010,
130 0x8974, 0xffffffff, 0x00000000
131};
132
133static const u32 dvst_golden_registers2[] =
134{
135 0x8f8, 0xffffffff, 0,
136 0x8fc, 0x00380000, 0,
137 0x8f8, 0xffffffff, 1,
138 0x8fc, 0x0e000000, 0
139};
140
141static const u32 dvst_golden_registers[] =
142{
143 0x690, 0x3fff3fff, 0x20c00033,
144 0x918c, 0x0fff0fff, 0x00010006,
145 0x91a8, 0x0fff0fff, 0x00010006,
146 0x9150, 0xffffdfff, 0x6e944040,
147 0x917c, 0x0fff0fff, 0x00030002,
148 0x9198, 0x0fff0fff, 0x00030002,
149 0x915c, 0x0fff0fff, 0x00010000,
150 0x3f90, 0xffff0001, 0xff000000,
151 0x9178, 0x0fff0fff, 0x00070000,
152 0x9194, 0x0fff0fff, 0x00070000,
153 0x9148, 0xffff0001, 0xff000000,
154 0x9190, 0x0fff0fff, 0x00090008,
155 0x91ac, 0x0fff0fff, 0x00090008,
156 0x3f94, 0xffff0000, 0xff000000,
157 0x914c, 0xffff0000, 0xff000000,
158 0x929c, 0x00000fff, 0x00000001,
159 0x55e4, 0xff607fff, 0xfc000100,
160 0x8a18, 0xff000fff, 0x00000100,
161 0x8b28, 0xff000fff, 0x00000100,
162 0x9144, 0xfffc0fff, 0x00000100,
163 0x6ed8, 0x00010101, 0x00010000,
164 0x9830, 0xffffffff, 0x00000000,
165 0x9834, 0xf00fffff, 0x00000400,
166 0x9838, 0xfffffffe, 0x00000000,
167 0xd0c0, 0xff000fff, 0x00000100,
168 0xd02c, 0xbfffff1f, 0x08421000,
169 0xd0b8, 0x73773777, 0x12010001,
170 0x5bb0, 0x000000f0, 0x00000070,
171 0x98f8, 0x73773777, 0x12010001,
172 0x98fc, 0xffffffff, 0x00000010,
173 0x9b7c, 0x00ff0000, 0x00fc0000,
174 0x8030, 0x00001f0f, 0x0000100a,
175 0x2f48, 0x73773777, 0x12010001,
176 0x2408, 0x00030000, 0x000c007f,
177 0x8a14, 0xf000003f, 0x00000007,
178 0x8b24, 0x3fff3fff, 0x00ff0fff,
179 0x8b10, 0x0000ff0f, 0x00000000,
180 0x28a4c, 0x07ffffff, 0x06000000,
181 0x4d8, 0x00000fff, 0x00000100,
182 0xa008, 0xffffffff, 0x00010000,
183 0x913c, 0xffff03ff, 0x01000100,
184 0x8c00, 0x000000ff, 0x00000003,
185 0x8c04, 0xf8ff00ff, 0x40600060,
186 0x8cf0, 0x1fff1fff, 0x08e00410,
187 0x28350, 0x00000f01, 0x00000000,
188 0x9508, 0xf700071f, 0x00000002,
189 0x960c, 0xffffffff, 0x54763210,
190 0x20ef8, 0x01ff01ff, 0x00000002,
191 0x20e98, 0xfffffbff, 0x00200000,
192 0x2015c, 0xffffffff, 0x00000f40,
193 0x88c4, 0x001f3ae3, 0x00000082,
194 0x8978, 0x3fffffff, 0x04050140,
195 0x88d4, 0x0000001f, 0x00000010,
196 0x8974, 0xffffffff, 0x00000000
197};
198
199static const u32 scrapper_golden_registers[] =
200{
201 0x690, 0x3fff3fff, 0x20c00033,
202 0x918c, 0x0fff0fff, 0x00010006,
203 0x918c, 0x0fff0fff, 0x00010006,
204 0x91a8, 0x0fff0fff, 0x00010006,
205 0x91a8, 0x0fff0fff, 0x00010006,
206 0x9150, 0xffffdfff, 0x6e944040,
207 0x9150, 0xffffdfff, 0x6e944040,
208 0x917c, 0x0fff0fff, 0x00030002,
209 0x917c, 0x0fff0fff, 0x00030002,
210 0x9198, 0x0fff0fff, 0x00030002,
211 0x9198, 0x0fff0fff, 0x00030002,
212 0x915c, 0x0fff0fff, 0x00010000,
213 0x915c, 0x0fff0fff, 0x00010000,
214 0x3f90, 0xffff0001, 0xff000000,
215 0x3f90, 0xffff0001, 0xff000000,
216 0x9178, 0x0fff0fff, 0x00070000,
217 0x9178, 0x0fff0fff, 0x00070000,
218 0x9194, 0x0fff0fff, 0x00070000,
219 0x9194, 0x0fff0fff, 0x00070000,
220 0x9148, 0xffff0001, 0xff000000,
221 0x9148, 0xffff0001, 0xff000000,
222 0x9190, 0x0fff0fff, 0x00090008,
223 0x9190, 0x0fff0fff, 0x00090008,
224 0x91ac, 0x0fff0fff, 0x00090008,
225 0x91ac, 0x0fff0fff, 0x00090008,
226 0x3f94, 0xffff0000, 0xff000000,
227 0x3f94, 0xffff0000, 0xff000000,
228 0x914c, 0xffff0000, 0xff000000,
229 0x914c, 0xffff0000, 0xff000000,
230 0x929c, 0x00000fff, 0x00000001,
231 0x929c, 0x00000fff, 0x00000001,
232 0x55e4, 0xff607fff, 0xfc000100,
233 0x8a18, 0xff000fff, 0x00000100,
234 0x8a18, 0xff000fff, 0x00000100,
235 0x8b28, 0xff000fff, 0x00000100,
236 0x8b28, 0xff000fff, 0x00000100,
237 0x9144, 0xfffc0fff, 0x00000100,
238 0x9144, 0xfffc0fff, 0x00000100,
239 0x6ed8, 0x00010101, 0x00010000,
240 0x9830, 0xffffffff, 0x00000000,
241 0x9830, 0xffffffff, 0x00000000,
242 0x9834, 0xf00fffff, 0x00000400,
243 0x9834, 0xf00fffff, 0x00000400,
244 0x9838, 0xfffffffe, 0x00000000,
245 0x9838, 0xfffffffe, 0x00000000,
246 0xd0c0, 0xff000fff, 0x00000100,
247 0xd02c, 0xbfffff1f, 0x08421000,
248 0xd02c, 0xbfffff1f, 0x08421000,
249 0xd0b8, 0x73773777, 0x12010001,
250 0xd0b8, 0x73773777, 0x12010001,
251 0x5bb0, 0x000000f0, 0x00000070,
252 0x98f8, 0x73773777, 0x12010001,
253 0x98f8, 0x73773777, 0x12010001,
254 0x98fc, 0xffffffff, 0x00000010,
255 0x98fc, 0xffffffff, 0x00000010,
256 0x9b7c, 0x00ff0000, 0x00fc0000,
257 0x9b7c, 0x00ff0000, 0x00fc0000,
258 0x8030, 0x00001f0f, 0x0000100a,
259 0x8030, 0x00001f0f, 0x0000100a,
260 0x2f48, 0x73773777, 0x12010001,
261 0x2f48, 0x73773777, 0x12010001,
262 0x2408, 0x00030000, 0x000c007f,
263 0x8a14, 0xf000003f, 0x00000007,
264 0x8a14, 0xf000003f, 0x00000007,
265 0x8b24, 0x3fff3fff, 0x00ff0fff,
266 0x8b24, 0x3fff3fff, 0x00ff0fff,
267 0x8b10, 0x0000ff0f, 0x00000000,
268 0x8b10, 0x0000ff0f, 0x00000000,
269 0x28a4c, 0x07ffffff, 0x06000000,
270 0x28a4c, 0x07ffffff, 0x06000000,
271 0x4d8, 0x00000fff, 0x00000100,
272 0x4d8, 0x00000fff, 0x00000100,
273 0xa008, 0xffffffff, 0x00010000,
274 0xa008, 0xffffffff, 0x00010000,
275 0x913c, 0xffff03ff, 0x01000100,
276 0x913c, 0xffff03ff, 0x01000100,
277 0x90e8, 0x001fffff, 0x010400c0,
278 0x8c00, 0x000000ff, 0x00000003,
279 0x8c00, 0x000000ff, 0x00000003,
280 0x8c04, 0xf8ff00ff, 0x40600060,
281 0x8c04, 0xf8ff00ff, 0x40600060,
282 0x8c30, 0x0000000f, 0x00040005,
283 0x8cf0, 0x1fff1fff, 0x08e00410,
284 0x8cf0, 0x1fff1fff, 0x08e00410,
285 0x900c, 0x00ffffff, 0x0017071f,
286 0x28350, 0x00000f01, 0x00000000,
287 0x28350, 0x00000f01, 0x00000000,
288 0x9508, 0xf700071f, 0x00000002,
289 0x9508, 0xf700071f, 0x00000002,
290 0x9688, 0x00300000, 0x0017000f,
291 0x960c, 0xffffffff, 0x54763210,
292 0x960c, 0xffffffff, 0x54763210,
293 0x20ef8, 0x01ff01ff, 0x00000002,
294 0x20e98, 0xfffffbff, 0x00200000,
295 0x2015c, 0xffffffff, 0x00000f40,
296 0x88c4, 0x001f3ae3, 0x00000082,
297 0x88c4, 0x001f3ae3, 0x00000082,
298 0x8978, 0x3fffffff, 0x04050140,
299 0x8978, 0x3fffffff, 0x04050140,
300 0x88d4, 0x0000001f, 0x00000010,
301 0x88d4, 0x0000001f, 0x00000010,
302 0x8974, 0xffffffff, 0x00000000,
303 0x8974, 0xffffffff, 0x00000000
304};
305
306static void ni_init_golden_registers(struct radeon_device *rdev)
307{
308 switch (rdev->family) {
309 case CHIP_CAYMAN:
310 radeon_program_register_sequence(rdev,
311 cayman_golden_registers,
312 (const u32)ARRAY_SIZE(cayman_golden_registers));
313 radeon_program_register_sequence(rdev,
314 cayman_golden_registers2,
315 (const u32)ARRAY_SIZE(cayman_golden_registers2));
316 break;
317 case CHIP_ARUBA:
318 if ((rdev->pdev->device == 0x9900) ||
319 (rdev->pdev->device == 0x9901) ||
320 (rdev->pdev->device == 0x9903) ||
321 (rdev->pdev->device == 0x9904) ||
322 (rdev->pdev->device == 0x9905) ||
323 (rdev->pdev->device == 0x9906) ||
324 (rdev->pdev->device == 0x9907) ||
325 (rdev->pdev->device == 0x9908) ||
326 (rdev->pdev->device == 0x9909) ||
327 (rdev->pdev->device == 0x990A) ||
328 (rdev->pdev->device == 0x990B) ||
329 (rdev->pdev->device == 0x990C) ||
330 (rdev->pdev->device == 0x990D) ||
331 (rdev->pdev->device == 0x990E) ||
332 (rdev->pdev->device == 0x990F) ||
333 (rdev->pdev->device == 0x9910) ||
334 (rdev->pdev->device == 0x9913) ||
335 (rdev->pdev->device == 0x9917) ||
336 (rdev->pdev->device == 0x9918)) {
337 radeon_program_register_sequence(rdev,
338 dvst_golden_registers,
339 (const u32)ARRAY_SIZE(dvst_golden_registers));
340 radeon_program_register_sequence(rdev,
341 dvst_golden_registers2,
342 (const u32)ARRAY_SIZE(dvst_golden_registers2));
343 } else {
344 radeon_program_register_sequence(rdev,
345 scrapper_golden_registers,
346 (const u32)ARRAY_SIZE(scrapper_golden_registers));
347 radeon_program_register_sequence(rdev,
348 dvst_golden_registers2,
349 (const u32)ARRAY_SIZE(dvst_golden_registers2));
350 }
351 break;
352 default:
353 break;
354 }
355}
356
81#define BTC_IO_MC_REGS_SIZE 29 357#define BTC_IO_MC_REGS_SIZE 29
82 358
83static const u32 barts_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = { 359static const u32 barts_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
@@ -473,7 +749,8 @@ static void cayman_gpu_init(struct radeon_device *rdev)
473 (rdev->pdev->device == 0x990F) || 749 (rdev->pdev->device == 0x990F) ||
474 (rdev->pdev->device == 0x9910) || 750 (rdev->pdev->device == 0x9910) ||
475 (rdev->pdev->device == 0x9917) || 751 (rdev->pdev->device == 0x9917) ||
476 (rdev->pdev->device == 0x9999)) { 752 (rdev->pdev->device == 0x9999) ||
753 (rdev->pdev->device == 0x999C)) {
477 rdev->config.cayman.max_simds_per_se = 6; 754 rdev->config.cayman.max_simds_per_se = 6;
478 rdev->config.cayman.max_backends_per_se = 2; 755 rdev->config.cayman.max_backends_per_se = 2;
479 } else if ((rdev->pdev->device == 0x9903) || 756 } else if ((rdev->pdev->device == 0x9903) ||
@@ -482,7 +759,8 @@ static void cayman_gpu_init(struct radeon_device *rdev)
482 (rdev->pdev->device == 0x990D) || 759 (rdev->pdev->device == 0x990D) ||
483 (rdev->pdev->device == 0x990E) || 760 (rdev->pdev->device == 0x990E) ||
484 (rdev->pdev->device == 0x9913) || 761 (rdev->pdev->device == 0x9913) ||
485 (rdev->pdev->device == 0x9918)) { 762 (rdev->pdev->device == 0x9918) ||
763 (rdev->pdev->device == 0x999D)) {
486 rdev->config.cayman.max_simds_per_se = 4; 764 rdev->config.cayman.max_simds_per_se = 4;
487 rdev->config.cayman.max_backends_per_se = 2; 765 rdev->config.cayman.max_backends_per_se = 2;
488 } else if ((rdev->pdev->device == 0x9919) || 766 } else if ((rdev->pdev->device == 0x9919) ||
@@ -615,15 +893,28 @@ static void cayman_gpu_init(struct radeon_device *rdev)
615 } 893 }
616 /* enabled rb are just the one not disabled :) */ 894 /* enabled rb are just the one not disabled :) */
617 disabled_rb_mask = tmp; 895 disabled_rb_mask = tmp;
896 tmp = 0;
897 for (i = 0; i < (rdev->config.cayman.max_backends_per_se * rdev->config.cayman.max_shader_engines); i++)
898 tmp |= (1 << i);
899 /* if all the backends are disabled, fix it up here */
900 if ((disabled_rb_mask & tmp) == tmp) {
901 for (i = 0; i < (rdev->config.cayman.max_backends_per_se * rdev->config.cayman.max_shader_engines); i++)
902 disabled_rb_mask &= ~(1 << i);
903 }
618 904
619 WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES); 905 WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
620 WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES); 906 WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
621 907
622 WREG32(GB_ADDR_CONFIG, gb_addr_config); 908 WREG32(GB_ADDR_CONFIG, gb_addr_config);
623 WREG32(DMIF_ADDR_CONFIG, gb_addr_config); 909 WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
910 if (ASIC_IS_DCE6(rdev))
911 WREG32(DMIF_ADDR_CALC, gb_addr_config);
624 WREG32(HDP_ADDR_CONFIG, gb_addr_config); 912 WREG32(HDP_ADDR_CONFIG, gb_addr_config);
625 WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config); 913 WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config);
626 WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config); 914 WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config);
915 WREG32(UVD_UDEC_ADDR_CONFIG, gb_addr_config);
916 WREG32(UVD_UDEC_DB_ADDR_CONFIG, gb_addr_config);
917 WREG32(UVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config);
627 918
628 if ((rdev->config.cayman.max_backends_per_se == 1) && 919 if ((rdev->config.cayman.max_backends_per_se == 1) &&
629 (rdev->flags & RADEON_IS_IGP)) { 920 (rdev->flags & RADEON_IS_IGP)) {
@@ -931,6 +1222,23 @@ void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
931 radeon_ring_write(ring, 10); /* poll interval */ 1222 radeon_ring_write(ring, 10); /* poll interval */
932} 1223}
933 1224
1225void cayman_uvd_semaphore_emit(struct radeon_device *rdev,
1226 struct radeon_ring *ring,
1227 struct radeon_semaphore *semaphore,
1228 bool emit_wait)
1229{
1230 uint64_t addr = semaphore->gpu_addr;
1231
1232 radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_LOW, 0));
1233 radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF);
1234
1235 radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_HIGH, 0));
1236 radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF);
1237
1238 radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0));
1239 radeon_ring_write(ring, 0x80 | (emit_wait ? 1 : 0));
1240}
1241
934static void cayman_cp_enable(struct radeon_device *rdev, bool enable) 1242static void cayman_cp_enable(struct radeon_device *rdev, bool enable)
935{ 1243{
936 if (enable) 1244 if (enable)
@@ -1682,6 +1990,16 @@ static int cayman_startup(struct radeon_device *rdev)
1682 return r; 1990 return r;
1683 } 1991 }
1684 1992
1993 r = rv770_uvd_resume(rdev);
1994 if (!r) {
1995 r = radeon_fence_driver_start_ring(rdev,
1996 R600_RING_TYPE_UVD_INDEX);
1997 if (r)
1998 dev_err(rdev->dev, "UVD fences init error (%d).\n", r);
1999 }
2000 if (r)
2001 rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
2002
1685 r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP1_INDEX); 2003 r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
1686 if (r) { 2004 if (r) {
1687 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r); 2005 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
@@ -1748,6 +2066,18 @@ static int cayman_startup(struct radeon_device *rdev)
1748 if (r) 2066 if (r)
1749 return r; 2067 return r;
1750 2068
2069 ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
2070 if (ring->ring_size) {
2071 r = radeon_ring_init(rdev, ring, ring->ring_size,
2072 R600_WB_UVD_RPTR_OFFSET,
2073 UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR,
2074 0, 0xfffff, RADEON_CP_PACKET2);
2075 if (!r)
2076 r = r600_uvd_init(rdev);
2077 if (r)
2078 DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
2079 }
2080
1751 r = radeon_ib_pool_init(rdev); 2081 r = radeon_ib_pool_init(rdev);
1752 if (r) { 2082 if (r) {
1753 dev_err(rdev->dev, "IB initialization failed (%d).\n", r); 2083 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
@@ -1778,6 +2108,9 @@ int cayman_resume(struct radeon_device *rdev)
1778 /* post card */ 2108 /* post card */
1779 atom_asic_init(rdev->mode_info.atom_context); 2109 atom_asic_init(rdev->mode_info.atom_context);
1780 2110
2111 /* init golden registers */
2112 ni_init_golden_registers(rdev);
2113
1781 rdev->accel_working = true; 2114 rdev->accel_working = true;
1782 r = cayman_startup(rdev); 2115 r = cayman_startup(rdev);
1783 if (r) { 2116 if (r) {
@@ -1794,6 +2127,8 @@ int cayman_suspend(struct radeon_device *rdev)
1794 radeon_vm_manager_fini(rdev); 2127 radeon_vm_manager_fini(rdev);
1795 cayman_cp_enable(rdev, false); 2128 cayman_cp_enable(rdev, false);
1796 cayman_dma_stop(rdev); 2129 cayman_dma_stop(rdev);
2130 r600_uvd_rbc_stop(rdev);
2131 radeon_uvd_suspend(rdev);
1797 evergreen_irq_suspend(rdev); 2132 evergreen_irq_suspend(rdev);
1798 radeon_wb_disable(rdev); 2133 radeon_wb_disable(rdev);
1799 cayman_pcie_gart_disable(rdev); 2134 cayman_pcie_gart_disable(rdev);
@@ -1834,6 +2169,8 @@ int cayman_init(struct radeon_device *rdev)
1834 DRM_INFO("GPU not posted. posting now...\n"); 2169 DRM_INFO("GPU not posted. posting now...\n");
1835 atom_asic_init(rdev->mode_info.atom_context); 2170 atom_asic_init(rdev->mode_info.atom_context);
1836 } 2171 }
2172 /* init golden registers */
2173 ni_init_golden_registers(rdev);
1837 /* Initialize scratch registers */ 2174 /* Initialize scratch registers */
1838 r600_scratch_init(rdev); 2175 r600_scratch_init(rdev);
1839 /* Initialize surface registers */ 2176 /* Initialize surface registers */
@@ -1868,6 +2205,13 @@ int cayman_init(struct radeon_device *rdev)
1868 ring->ring_obj = NULL; 2205 ring->ring_obj = NULL;
1869 r600_ring_init(rdev, ring, 64 * 1024); 2206 r600_ring_init(rdev, ring, 64 * 1024);
1870 2207
2208 r = radeon_uvd_init(rdev);
2209 if (!r) {
2210 ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
2211 ring->ring_obj = NULL;
2212 r600_ring_init(rdev, ring, 4096);
2213 }
2214
1871 rdev->ih.ring_obj = NULL; 2215 rdev->ih.ring_obj = NULL;
1872 r600_ih_ring_init(rdev, 64 * 1024); 2216 r600_ih_ring_init(rdev, 64 * 1024);
1873 2217
@@ -1919,6 +2263,7 @@ void cayman_fini(struct radeon_device *rdev)
1919 radeon_vm_manager_fini(rdev); 2263 radeon_vm_manager_fini(rdev);
1920 radeon_ib_pool_fini(rdev); 2264 radeon_ib_pool_fini(rdev);
1921 radeon_irq_kms_fini(rdev); 2265 radeon_irq_kms_fini(rdev);
2266 radeon_uvd_fini(rdev);
1922 cayman_pcie_gart_fini(rdev); 2267 cayman_pcie_gart_fini(rdev);
1923 r600_vram_scratch_fini(rdev); 2268 r600_vram_scratch_fini(rdev);
1924 radeon_gem_fini(rdev); 2269 radeon_gem_fini(rdev);
@@ -2017,28 +2362,57 @@ void cayman_vm_set_page(struct radeon_device *rdev,
2017 } 2362 }
2018 } 2363 }
2019 } else { 2364 } else {
2020 while (count) { 2365 if ((flags & RADEON_VM_PAGE_SYSTEM) ||
2021 ndw = count * 2; 2366 (count == 1)) {
2022 if (ndw > 0xFFFFE) 2367 while (count) {
2023 ndw = 0xFFFFE; 2368 ndw = count * 2;
2369 if (ndw > 0xFFFFE)
2370 ndw = 0xFFFFE;
2371
2372 /* for non-physically contiguous pages (system) */
2373 ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, ndw);
2374 ib->ptr[ib->length_dw++] = pe;
2375 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
2376 for (; ndw > 0; ndw -= 2, --count, pe += 8) {
2377 if (flags & RADEON_VM_PAGE_SYSTEM) {
2378 value = radeon_vm_map_gart(rdev, addr);
2379 value &= 0xFFFFFFFFFFFFF000ULL;
2380 } else if (flags & RADEON_VM_PAGE_VALID) {
2381 value = addr;
2382 } else {
2383 value = 0;
2384 }
2385 addr += incr;
2386 value |= r600_flags;
2387 ib->ptr[ib->length_dw++] = value;
2388 ib->ptr[ib->length_dw++] = upper_32_bits(value);
2389 }
2390 }
2391 while (ib->length_dw & 0x7)
2392 ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0);
2393 } else {
2394 while (count) {
2395 ndw = count * 2;
2396 if (ndw > 0xFFFFE)
2397 ndw = 0xFFFFE;
2024 2398
2025 /* for non-physically contiguous pages (system) */ 2399 if (flags & RADEON_VM_PAGE_VALID)
2026 ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, ndw);
2027 ib->ptr[ib->length_dw++] = pe;
2028 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
2029 for (; ndw > 0; ndw -= 2, --count, pe += 8) {
2030 if (flags & RADEON_VM_PAGE_SYSTEM) {
2031 value = radeon_vm_map_gart(rdev, addr);
2032 value &= 0xFFFFFFFFFFFFF000ULL;
2033 } else if (flags & RADEON_VM_PAGE_VALID) {
2034 value = addr; 2400 value = addr;
2035 } else { 2401 else
2036 value = 0; 2402 value = 0;
2037 } 2403 /* for physically contiguous pages (vram) */
2038 addr += incr; 2404 ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
2039 value |= r600_flags; 2405 ib->ptr[ib->length_dw++] = pe; /* dst addr */
2040 ib->ptr[ib->length_dw++] = value; 2406 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
2407 ib->ptr[ib->length_dw++] = r600_flags; /* mask */
2408 ib->ptr[ib->length_dw++] = 0;
2409 ib->ptr[ib->length_dw++] = value; /* value */
2041 ib->ptr[ib->length_dw++] = upper_32_bits(value); 2410 ib->ptr[ib->length_dw++] = upper_32_bits(value);
2411 ib->ptr[ib->length_dw++] = incr; /* increment size */
2412 ib->ptr[ib->length_dw++] = 0;
2413 pe += ndw * 4;
2414 addr += (ndw / 2) * incr;
2415 count -= ndw / 2;
2042 } 2416 }
2043 } 2417 }
2044 while (ib->length_dw & 0x7) 2418 while (ib->length_dw & 0x7)
diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h
index 079dee202a9e..e226faf16fea 100644
--- a/drivers/gpu/drm/radeon/nid.h
+++ b/drivers/gpu/drm/radeon/nid.h
@@ -45,6 +45,10 @@
45#define ARUBA_GB_ADDR_CONFIG_GOLDEN 0x12010001 45#define ARUBA_GB_ADDR_CONFIG_GOLDEN 0x12010001
46 46
47#define DMIF_ADDR_CONFIG 0xBD4 47#define DMIF_ADDR_CONFIG 0xBD4
48
49/* DCE6 only */
50#define DMIF_ADDR_CALC 0xC00
51
48#define SRBM_GFX_CNTL 0x0E44 52#define SRBM_GFX_CNTL 0x0E44
49#define RINGID(x) (((x) & 0x3) << 0) 53#define RINGID(x) (((x) & 0x3) << 0)
50#define VMID(x) (((x) & 0x7) << 0) 54#define VMID(x) (((x) & 0x7) << 0)
@@ -486,6 +490,18 @@
486# define CACHE_FLUSH_AND_INV_EVENT (0x16 << 0) 490# define CACHE_FLUSH_AND_INV_EVENT (0x16 << 0)
487 491
488/* 492/*
493 * UVD
494 */
495#define UVD_SEMA_ADDR_LOW 0xEF00
496#define UVD_SEMA_ADDR_HIGH 0xEF04
497#define UVD_SEMA_CMD 0xEF08
498#define UVD_UDEC_ADDR_CONFIG 0xEF4C
499#define UVD_UDEC_DB_ADDR_CONFIG 0xEF50
500#define UVD_UDEC_DBW_ADDR_CONFIG 0xEF54
501#define UVD_RBC_RB_RPTR 0xF690
502#define UVD_RBC_RB_WPTR 0xF694
503
504/*
489 * PM4 505 * PM4
490 */ 506 */
491#define PACKET0(reg, n) ((RADEON_PACKET_TYPE0 << 30) | \ 507#define PACKET0(reg, n) ((RADEON_PACKET_TYPE0 << 30) | \
@@ -668,6 +684,11 @@
668 (((vmid) & 0xF) << 20) | \ 684 (((vmid) & 0xF) << 20) | \
669 (((n) & 0xFFFFF) << 0)) 685 (((n) & 0xFFFFF) << 0))
670 686
687#define DMA_PTE_PDE_PACKET(n) ((2 << 28) | \
688 (1 << 26) | \
689 (1 << 21) | \
690 (((n) & 0xFFFFF) << 0))
691
671/* async DMA Packet types */ 692/* async DMA Packet types */
672#define DMA_PACKET_WRITE 0x2 693#define DMA_PACKET_WRITE 0x2
673#define DMA_PACKET_COPY 0x3 694#define DMA_PACKET_COPY 0x3
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 9db58530be37..4973bff37fec 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -69,6 +69,38 @@ MODULE_FIRMWARE(FIRMWARE_R520);
69 * and others in some cases. 69 * and others in some cases.
70 */ 70 */
71 71
72static bool r100_is_in_vblank(struct radeon_device *rdev, int crtc)
73{
74 if (crtc == 0) {
75 if (RREG32(RADEON_CRTC_STATUS) & RADEON_CRTC_VBLANK_CUR)
76 return true;
77 else
78 return false;
79 } else {
80 if (RREG32(RADEON_CRTC2_STATUS) & RADEON_CRTC2_VBLANK_CUR)
81 return true;
82 else
83 return false;
84 }
85}
86
87static bool r100_is_counter_moving(struct radeon_device *rdev, int crtc)
88{
89 u32 vline1, vline2;
90
91 if (crtc == 0) {
92 vline1 = (RREG32(RADEON_CRTC_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL;
93 vline2 = (RREG32(RADEON_CRTC_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL;
94 } else {
95 vline1 = (RREG32(RADEON_CRTC2_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL;
96 vline2 = (RREG32(RADEON_CRTC2_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL;
97 }
98 if (vline1 != vline2)
99 return true;
100 else
101 return false;
102}
103
72/** 104/**
73 * r100_wait_for_vblank - vblank wait asic callback. 105 * r100_wait_for_vblank - vblank wait asic callback.
74 * 106 *
@@ -79,36 +111,33 @@ MODULE_FIRMWARE(FIRMWARE_R520);
79 */ 111 */
80void r100_wait_for_vblank(struct radeon_device *rdev, int crtc) 112void r100_wait_for_vblank(struct radeon_device *rdev, int crtc)
81{ 113{
82 int i; 114 unsigned i = 0;
83 115
84 if (crtc >= rdev->num_crtc) 116 if (crtc >= rdev->num_crtc)
85 return; 117 return;
86 118
87 if (crtc == 0) { 119 if (crtc == 0) {
88 if (RREG32(RADEON_CRTC_GEN_CNTL) & RADEON_CRTC_EN) { 120 if (!(RREG32(RADEON_CRTC_GEN_CNTL) & RADEON_CRTC_EN))
89 for (i = 0; i < rdev->usec_timeout; i++) { 121 return;
90 if (!(RREG32(RADEON_CRTC_STATUS) & RADEON_CRTC_VBLANK_CUR))
91 break;
92 udelay(1);
93 }
94 for (i = 0; i < rdev->usec_timeout; i++) {
95 if (RREG32(RADEON_CRTC_STATUS) & RADEON_CRTC_VBLANK_CUR)
96 break;
97 udelay(1);
98 }
99 }
100 } else { 122 } else {
101 if (RREG32(RADEON_CRTC2_GEN_CNTL) & RADEON_CRTC2_EN) { 123 if (!(RREG32(RADEON_CRTC2_GEN_CNTL) & RADEON_CRTC2_EN))
102 for (i = 0; i < rdev->usec_timeout; i++) { 124 return;
103 if (!(RREG32(RADEON_CRTC2_STATUS) & RADEON_CRTC2_VBLANK_CUR)) 125 }
104 break; 126
105 udelay(1); 127 /* depending on when we hit vblank, we may be close to active; if so,
106 } 128 * wait for another frame.
107 for (i = 0; i < rdev->usec_timeout; i++) { 129 */
108 if (RREG32(RADEON_CRTC2_STATUS) & RADEON_CRTC2_VBLANK_CUR) 130 while (r100_is_in_vblank(rdev, crtc)) {
109 break; 131 if (i++ % 100 == 0) {
110 udelay(1); 132 if (!r100_is_counter_moving(rdev, crtc))
111 } 133 break;
134 }
135 }
136
137 while (!r100_is_in_vblank(rdev, crtc)) {
138 if (i++ % 100 == 0) {
139 if (!r100_is_counter_moving(rdev, crtc))
140 break;
112 } 141 }
113 } 142 }
114} 143}
diff --git a/drivers/gpu/drm/radeon/r500_reg.h b/drivers/gpu/drm/radeon/r500_reg.h
index c0dc8d3ba0bb..1dd0d32993d5 100644
--- a/drivers/gpu/drm/radeon/r500_reg.h
+++ b/drivers/gpu/drm/radeon/r500_reg.h
@@ -358,7 +358,9 @@
358#define AVIVO_D1CRTC_STATUS_HV_COUNT 0x60ac 358#define AVIVO_D1CRTC_STATUS_HV_COUNT 0x60ac
359#define AVIVO_D1CRTC_STEREO_CONTROL 0x60c4 359#define AVIVO_D1CRTC_STEREO_CONTROL 0x60c4
360 360
361#define AVIVO_D1MODE_MASTER_UPDATE_LOCK 0x60e0
361#define AVIVO_D1MODE_MASTER_UPDATE_MODE 0x60e4 362#define AVIVO_D1MODE_MASTER_UPDATE_MODE 0x60e4
363#define AVIVO_D1CRTC_UPDATE_LOCK 0x60e8
362 364
363/* master controls */ 365/* master controls */
364#define AVIVO_DC_CRTC_MASTER_EN 0x60f8 366#define AVIVO_DC_CRTC_MASTER_EN 0x60f8
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 0740db3fcd22..1a08008c978b 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -1145,7 +1145,7 @@ static void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc
1145 } 1145 }
1146 if (rdev->flags & RADEON_IS_AGP) { 1146 if (rdev->flags & RADEON_IS_AGP) {
1147 size_bf = mc->gtt_start; 1147 size_bf = mc->gtt_start;
1148 size_af = 0xFFFFFFFF - mc->gtt_end; 1148 size_af = mc->mc_mask - mc->gtt_end;
1149 if (size_bf > size_af) { 1149 if (size_bf > size_af) {
1150 if (mc->mc_vram_size > size_bf) { 1150 if (mc->mc_vram_size > size_bf) {
1151 dev_warn(rdev->dev, "limiting VRAM\n"); 1151 dev_warn(rdev->dev, "limiting VRAM\n");
@@ -2552,6 +2552,193 @@ void r600_dma_fini(struct radeon_device *rdev)
2552} 2552}
2553 2553
2554/* 2554/*
2555 * UVD
2556 */
2557int r600_uvd_rbc_start(struct radeon_device *rdev)
2558{
2559 struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
2560 uint64_t rptr_addr;
2561 uint32_t rb_bufsz, tmp;
2562 int r;
2563
2564 rptr_addr = rdev->wb.gpu_addr + R600_WB_UVD_RPTR_OFFSET;
2565
2566 if (upper_32_bits(rptr_addr) != upper_32_bits(ring->gpu_addr)) {
2567 DRM_ERROR("UVD ring and rptr not in the same 4GB segment!\n");
2568 return -EINVAL;
2569 }
2570
2571 /* force RBC into idle state */
2572 WREG32(UVD_RBC_RB_CNTL, 0x11010101);
2573
2574 /* Set the write pointer delay */
2575 WREG32(UVD_RBC_RB_WPTR_CNTL, 0);
2576
2577 /* set the wb address */
2578 WREG32(UVD_RBC_RB_RPTR_ADDR, rptr_addr >> 2);
2579
2580 /* programm the 4GB memory segment for rptr and ring buffer */
2581 WREG32(UVD_LMI_EXT40_ADDR, upper_32_bits(rptr_addr) |
2582 (0x7 << 16) | (0x1 << 31));
2583
2584 /* Initialize the ring buffer's read and write pointers */
2585 WREG32(UVD_RBC_RB_RPTR, 0x0);
2586
2587 ring->wptr = ring->rptr = RREG32(UVD_RBC_RB_RPTR);
2588 WREG32(UVD_RBC_RB_WPTR, ring->wptr);
2589
2590 /* set the ring address */
2591 WREG32(UVD_RBC_RB_BASE, ring->gpu_addr);
2592
2593 /* Set ring buffer size */
2594 rb_bufsz = drm_order(ring->ring_size);
2595 rb_bufsz = (0x1 << 8) | rb_bufsz;
2596 WREG32(UVD_RBC_RB_CNTL, rb_bufsz);
2597
2598 ring->ready = true;
2599 r = radeon_ring_test(rdev, R600_RING_TYPE_UVD_INDEX, ring);
2600 if (r) {
2601 ring->ready = false;
2602 return r;
2603 }
2604
2605 r = radeon_ring_lock(rdev, ring, 10);
2606 if (r) {
2607 DRM_ERROR("radeon: ring failed to lock UVD ring (%d).\n", r);
2608 return r;
2609 }
2610
2611 tmp = PACKET0(UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0);
2612 radeon_ring_write(ring, tmp);
2613 radeon_ring_write(ring, 0xFFFFF);
2614
2615 tmp = PACKET0(UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0);
2616 radeon_ring_write(ring, tmp);
2617 radeon_ring_write(ring, 0xFFFFF);
2618
2619 tmp = PACKET0(UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0);
2620 radeon_ring_write(ring, tmp);
2621 radeon_ring_write(ring, 0xFFFFF);
2622
2623 /* Clear timeout status bits */
2624 radeon_ring_write(ring, PACKET0(UVD_SEMA_TIMEOUT_STATUS, 0));
2625 radeon_ring_write(ring, 0x8);
2626
2627 radeon_ring_write(ring, PACKET0(UVD_SEMA_CNTL, 0));
2628 radeon_ring_write(ring, 3);
2629
2630 radeon_ring_unlock_commit(rdev, ring);
2631
2632 return 0;
2633}
2634
2635void r600_uvd_rbc_stop(struct radeon_device *rdev)
2636{
2637 struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
2638
2639 /* force RBC into idle state */
2640 WREG32(UVD_RBC_RB_CNTL, 0x11010101);
2641 ring->ready = false;
2642}
2643
2644int r600_uvd_init(struct radeon_device *rdev)
2645{
2646 int i, j, r;
2647
2648 /* raise clocks while booting up the VCPU */
2649 radeon_set_uvd_clocks(rdev, 53300, 40000);
2650
2651 /* disable clock gating */
2652 WREG32(UVD_CGC_GATE, 0);
2653
2654 /* disable interupt */
2655 WREG32_P(UVD_MASTINT_EN, 0, ~(1 << 1));
2656
2657 /* put LMI, VCPU, RBC etc... into reset */
2658 WREG32(UVD_SOFT_RESET, LMI_SOFT_RESET | VCPU_SOFT_RESET |
2659 LBSI_SOFT_RESET | RBC_SOFT_RESET | CSM_SOFT_RESET |
2660 CXW_SOFT_RESET | TAP_SOFT_RESET | LMI_UMC_SOFT_RESET);
2661 mdelay(5);
2662
2663 /* take UVD block out of reset */
2664 WREG32_P(SRBM_SOFT_RESET, 0, ~SOFT_RESET_UVD);
2665 mdelay(5);
2666
2667 /* initialize UVD memory controller */
2668 WREG32(UVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) |
2669 (1 << 21) | (1 << 9) | (1 << 20));
2670
2671 /* disable byte swapping */
2672 WREG32(UVD_LMI_SWAP_CNTL, 0);
2673 WREG32(UVD_MP_SWAP_CNTL, 0);
2674
2675 WREG32(UVD_MPC_SET_MUXA0, 0x40c2040);
2676 WREG32(UVD_MPC_SET_MUXA1, 0x0);
2677 WREG32(UVD_MPC_SET_MUXB0, 0x40c2040);
2678 WREG32(UVD_MPC_SET_MUXB1, 0x0);
2679 WREG32(UVD_MPC_SET_ALU, 0);
2680 WREG32(UVD_MPC_SET_MUX, 0x88);
2681
2682 /* Stall UMC */
2683 WREG32_P(UVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
2684 WREG32_P(UVD_RB_ARB_CTRL, 1 << 3, ~(1 << 3));
2685
2686 /* take all subblocks out of reset, except VCPU */
2687 WREG32(UVD_SOFT_RESET, VCPU_SOFT_RESET);
2688 mdelay(5);
2689
2690 /* enable VCPU clock */
2691 WREG32(UVD_VCPU_CNTL, 1 << 9);
2692
2693 /* enable UMC */
2694 WREG32_P(UVD_LMI_CTRL2, 0, ~(1 << 8));
2695
2696 /* boot up the VCPU */
2697 WREG32(UVD_SOFT_RESET, 0);
2698 mdelay(10);
2699
2700 WREG32_P(UVD_RB_ARB_CTRL, 0, ~(1 << 3));
2701
2702 for (i = 0; i < 10; ++i) {
2703 uint32_t status;
2704 for (j = 0; j < 100; ++j) {
2705 status = RREG32(UVD_STATUS);
2706 if (status & 2)
2707 break;
2708 mdelay(10);
2709 }
2710 r = 0;
2711 if (status & 2)
2712 break;
2713
2714 DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
2715 WREG32_P(UVD_SOFT_RESET, VCPU_SOFT_RESET, ~VCPU_SOFT_RESET);
2716 mdelay(10);
2717 WREG32_P(UVD_SOFT_RESET, 0, ~VCPU_SOFT_RESET);
2718 mdelay(10);
2719 r = -1;
2720 }
2721
2722 if (r) {
2723 DRM_ERROR("UVD not responding, giving up!!!\n");
2724 radeon_set_uvd_clocks(rdev, 0, 0);
2725 return r;
2726 }
2727
2728 /* enable interupt */
2729 WREG32_P(UVD_MASTINT_EN, 3<<1, ~(3 << 1));
2730
2731 r = r600_uvd_rbc_start(rdev);
2732 if (!r)
2733 DRM_INFO("UVD initialized successfully.\n");
2734
2735 /* lower clocks again */
2736 radeon_set_uvd_clocks(rdev, 0, 0);
2737
2738 return r;
2739}
2740
2741/*
2555 * GPU scratch registers helpers function. 2742 * GPU scratch registers helpers function.
2556 */ 2743 */
2557void r600_scratch_init(struct radeon_device *rdev) 2744void r600_scratch_init(struct radeon_device *rdev)
@@ -2660,6 +2847,40 @@ int r600_dma_ring_test(struct radeon_device *rdev,
2660 return r; 2847 return r;
2661} 2848}
2662 2849
2850int r600_uvd_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
2851{
2852 uint32_t tmp = 0;
2853 unsigned i;
2854 int r;
2855
2856 WREG32(UVD_CONTEXT_ID, 0xCAFEDEAD);
2857 r = radeon_ring_lock(rdev, ring, 3);
2858 if (r) {
2859 DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n",
2860 ring->idx, r);
2861 return r;
2862 }
2863 radeon_ring_write(ring, PACKET0(UVD_CONTEXT_ID, 0));
2864 radeon_ring_write(ring, 0xDEADBEEF);
2865 radeon_ring_unlock_commit(rdev, ring);
2866 for (i = 0; i < rdev->usec_timeout; i++) {
2867 tmp = RREG32(UVD_CONTEXT_ID);
2868 if (tmp == 0xDEADBEEF)
2869 break;
2870 DRM_UDELAY(1);
2871 }
2872
2873 if (i < rdev->usec_timeout) {
2874 DRM_INFO("ring test on %d succeeded in %d usecs\n",
2875 ring->idx, i);
2876 } else {
2877 DRM_ERROR("radeon: ring %d test failed (0x%08X)\n",
2878 ring->idx, tmp);
2879 r = -EINVAL;
2880 }
2881 return r;
2882}
2883
2663/* 2884/*
2664 * CP fences/semaphores 2885 * CP fences/semaphores
2665 */ 2886 */
@@ -2711,6 +2932,30 @@ void r600_fence_ring_emit(struct radeon_device *rdev,
2711 } 2932 }
2712} 2933}
2713 2934
2935void r600_uvd_fence_emit(struct radeon_device *rdev,
2936 struct radeon_fence *fence)
2937{
2938 struct radeon_ring *ring = &rdev->ring[fence->ring];
2939 uint32_t addr = rdev->fence_drv[fence->ring].gpu_addr;
2940
2941 radeon_ring_write(ring, PACKET0(UVD_CONTEXT_ID, 0));
2942 radeon_ring_write(ring, fence->seq);
2943 radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA0, 0));
2944 radeon_ring_write(ring, addr & 0xffffffff);
2945 radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA1, 0));
2946 radeon_ring_write(ring, upper_32_bits(addr) & 0xff);
2947 radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_CMD, 0));
2948 radeon_ring_write(ring, 0);
2949
2950 radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA0, 0));
2951 radeon_ring_write(ring, 0);
2952 radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA1, 0));
2953 radeon_ring_write(ring, 0);
2954 radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_CMD, 0));
2955 radeon_ring_write(ring, 2);
2956 return;
2957}
2958
2714void r600_semaphore_ring_emit(struct radeon_device *rdev, 2959void r600_semaphore_ring_emit(struct radeon_device *rdev,
2715 struct radeon_ring *ring, 2960 struct radeon_ring *ring,
2716 struct radeon_semaphore *semaphore, 2961 struct radeon_semaphore *semaphore,
@@ -2780,6 +3025,23 @@ void r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
2780 radeon_ring_write(ring, upper_32_bits(addr) & 0xff); 3025 radeon_ring_write(ring, upper_32_bits(addr) & 0xff);
2781} 3026}
2782 3027
3028void r600_uvd_semaphore_emit(struct radeon_device *rdev,
3029 struct radeon_ring *ring,
3030 struct radeon_semaphore *semaphore,
3031 bool emit_wait)
3032{
3033 uint64_t addr = semaphore->gpu_addr;
3034
3035 radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_LOW, 0));
3036 radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF);
3037
3038 radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_HIGH, 0));
3039 radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF);
3040
3041 radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0));
3042 radeon_ring_write(ring, emit_wait ? 1 : 0);
3043}
3044
2783int r600_copy_blit(struct radeon_device *rdev, 3045int r600_copy_blit(struct radeon_device *rdev,
2784 uint64_t src_offset, 3046 uint64_t src_offset,
2785 uint64_t dst_offset, 3047 uint64_t dst_offset,
@@ -3183,6 +3445,16 @@ void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
3183 radeon_ring_write(ring, ib->length_dw); 3445 radeon_ring_write(ring, ib->length_dw);
3184} 3446}
3185 3447
3448void r600_uvd_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
3449{
3450 struct radeon_ring *ring = &rdev->ring[ib->ring];
3451
3452 radeon_ring_write(ring, PACKET0(UVD_RBC_IB_BASE, 0));
3453 radeon_ring_write(ring, ib->gpu_addr);
3454 radeon_ring_write(ring, PACKET0(UVD_RBC_IB_SIZE, 0));
3455 radeon_ring_write(ring, ib->length_dw);
3456}
3457
3186int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) 3458int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
3187{ 3459{
3188 struct radeon_ib ib; 3460 struct radeon_ib ib;
@@ -3300,6 +3572,41 @@ int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
3300 return r; 3572 return r;
3301} 3573}
3302 3574
3575int r600_uvd_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
3576{
3577 struct radeon_fence *fence = NULL;
3578 int r;
3579
3580 r = radeon_set_uvd_clocks(rdev, 53300, 40000);
3581 if (r) {
3582 DRM_ERROR("radeon: failed to raise UVD clocks (%d).\n", r);
3583 return r;
3584 }
3585
3586 r = radeon_uvd_get_create_msg(rdev, ring->idx, 1, NULL);
3587 if (r) {
3588 DRM_ERROR("radeon: failed to get create msg (%d).\n", r);
3589 goto error;
3590 }
3591
3592 r = radeon_uvd_get_destroy_msg(rdev, ring->idx, 1, &fence);
3593 if (r) {
3594 DRM_ERROR("radeon: failed to get destroy ib (%d).\n", r);
3595 goto error;
3596 }
3597
3598 r = radeon_fence_wait(fence, false);
3599 if (r) {
3600 DRM_ERROR("radeon: fence wait failed (%d).\n", r);
3601 goto error;
3602 }
3603 DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
3604error:
3605 radeon_fence_unref(&fence);
3606 radeon_set_uvd_clocks(rdev, 0, 0);
3607 return r;
3608}
3609
3303/** 3610/**
3304 * r600_dma_ring_ib_execute - Schedule an IB on the DMA engine 3611 * r600_dma_ring_ib_execute - Schedule an IB on the DMA engine
3305 * 3612 *
@@ -4232,7 +4539,7 @@ void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
4232 4539
4233void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes) 4540void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes)
4234{ 4541{
4235 u32 link_width_cntl, mask, target_reg; 4542 u32 link_width_cntl, mask;
4236 4543
4237 if (rdev->flags & RADEON_IS_IGP) 4544 if (rdev->flags & RADEON_IS_IGP)
4238 return; 4545 return;
@@ -4244,7 +4551,7 @@ void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes)
4244 if (ASIC_IS_X2(rdev)) 4551 if (ASIC_IS_X2(rdev))
4245 return; 4552 return;
4246 4553
4247 /* FIXME wait for idle */ 4554 radeon_gui_idle(rdev);
4248 4555
4249 switch (lanes) { 4556 switch (lanes) {
4250 case 0: 4557 case 0:
@@ -4263,53 +4570,24 @@ void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes)
4263 mask = RADEON_PCIE_LC_LINK_WIDTH_X8; 4570 mask = RADEON_PCIE_LC_LINK_WIDTH_X8;
4264 break; 4571 break;
4265 case 12: 4572 case 12:
4573 /* not actually supported */
4266 mask = RADEON_PCIE_LC_LINK_WIDTH_X12; 4574 mask = RADEON_PCIE_LC_LINK_WIDTH_X12;
4267 break; 4575 break;
4268 case 16: 4576 case 16:
4269 default:
4270 mask = RADEON_PCIE_LC_LINK_WIDTH_X16; 4577 mask = RADEON_PCIE_LC_LINK_WIDTH_X16;
4271 break; 4578 break;
4272 } 4579 default:
4273 4580 DRM_ERROR("invalid pcie lane request: %d\n", lanes);
4274 link_width_cntl = RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
4275
4276 if ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) ==
4277 (mask << RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT))
4278 return;
4279
4280 if (link_width_cntl & R600_PCIE_LC_UPCONFIGURE_DIS)
4281 return; 4581 return;
4582 }
4282 4583
4283 link_width_cntl &= ~(RADEON_PCIE_LC_LINK_WIDTH_MASK | 4584 link_width_cntl = RREG32_PCIE_PORT(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
4284 RADEON_PCIE_LC_RECONFIG_NOW | 4585 link_width_cntl &= ~RADEON_PCIE_LC_LINK_WIDTH_MASK;
4285 R600_PCIE_LC_RENEGOTIATE_EN | 4586 link_width_cntl |= mask << RADEON_PCIE_LC_LINK_WIDTH_SHIFT;
4286 R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE); 4587 link_width_cntl |= (RADEON_PCIE_LC_RECONFIG_NOW |
4287 link_width_cntl |= mask; 4588 R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE);
4288
4289 WREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
4290
4291 /* some northbridges can renegotiate the link rather than requiring
4292 * a complete re-config.
4293 * e.g., AMD 780/790 northbridges (pci ids: 0x5956, 0x5957, 0x5958, etc.)
4294 */
4295 if (link_width_cntl & R600_PCIE_LC_RENEGOTIATION_SUPPORT)
4296 link_width_cntl |= R600_PCIE_LC_RENEGOTIATE_EN | R600_PCIE_LC_UPCONFIGURE_SUPPORT;
4297 else
4298 link_width_cntl |= R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE;
4299
4300 WREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL, (link_width_cntl |
4301 RADEON_PCIE_LC_RECONFIG_NOW));
4302
4303 if (rdev->family >= CHIP_RV770)
4304 target_reg = R700_TARGET_AND_CURRENT_PROFILE_INDEX;
4305 else
4306 target_reg = R600_TARGET_AND_CURRENT_PROFILE_INDEX;
4307
4308 /* wait for lane set to complete */
4309 link_width_cntl = RREG32(target_reg);
4310 while (link_width_cntl == 0xffffffff)
4311 link_width_cntl = RREG32(target_reg);
4312 4589
4590 WREG32_PCIE_PORT(RADEON_PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
4313} 4591}
4314 4592
4315int r600_get_pcie_lanes(struct radeon_device *rdev) 4593int r600_get_pcie_lanes(struct radeon_device *rdev)
@@ -4326,13 +4604,11 @@ int r600_get_pcie_lanes(struct radeon_device *rdev)
4326 if (ASIC_IS_X2(rdev)) 4604 if (ASIC_IS_X2(rdev))
4327 return 0; 4605 return 0;
4328 4606
4329 /* FIXME wait for idle */ 4607 radeon_gui_idle(rdev);
4330 4608
4331 link_width_cntl = RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL); 4609 link_width_cntl = RREG32_PCIE_PORT(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
4332 4610
4333 switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) { 4611 switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) {
4334 case RADEON_PCIE_LC_LINK_WIDTH_X0:
4335 return 0;
4336 case RADEON_PCIE_LC_LINK_WIDTH_X1: 4612 case RADEON_PCIE_LC_LINK_WIDTH_X1:
4337 return 1; 4613 return 1;
4338 case RADEON_PCIE_LC_LINK_WIDTH_X2: 4614 case RADEON_PCIE_LC_LINK_WIDTH_X2:
@@ -4341,6 +4617,10 @@ int r600_get_pcie_lanes(struct radeon_device *rdev)
4341 return 4; 4617 return 4;
4342 case RADEON_PCIE_LC_LINK_WIDTH_X8: 4618 case RADEON_PCIE_LC_LINK_WIDTH_X8:
4343 return 8; 4619 return 8;
4620 case RADEON_PCIE_LC_LINK_WIDTH_X12:
4621 /* not actually supported */
4622 return 12;
4623 case RADEON_PCIE_LC_LINK_WIDTH_X0:
4344 case RADEON_PCIE_LC_LINK_WIDTH_X16: 4624 case RADEON_PCIE_LC_LINK_WIDTH_X16:
4345 default: 4625 default:
4346 return 16; 4626 return 16;
@@ -4378,7 +4658,7 @@ static void r600_pcie_gen2_enable(struct radeon_device *rdev)
4378 if (!(mask & DRM_PCIE_SPEED_50)) 4658 if (!(mask & DRM_PCIE_SPEED_50))
4379 return; 4659 return;
4380 4660
4381 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); 4661 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
4382 if (speed_cntl & LC_CURRENT_DATA_RATE) { 4662 if (speed_cntl & LC_CURRENT_DATA_RATE) {
4383 DRM_INFO("PCIE gen 2 link speeds already enabled\n"); 4663 DRM_INFO("PCIE gen 2 link speeds already enabled\n");
4384 return; 4664 return;
@@ -4391,23 +4671,23 @@ static void r600_pcie_gen2_enable(struct radeon_device *rdev)
4391 (rdev->family == CHIP_RV620) || 4671 (rdev->family == CHIP_RV620) ||
4392 (rdev->family == CHIP_RV635)) { 4672 (rdev->family == CHIP_RV635)) {
4393 /* advertise upconfig capability */ 4673 /* advertise upconfig capability */
4394 link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL); 4674 link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
4395 link_width_cntl &= ~LC_UPCONFIGURE_DIS; 4675 link_width_cntl &= ~LC_UPCONFIGURE_DIS;
4396 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); 4676 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
4397 link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL); 4677 link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
4398 if (link_width_cntl & LC_RENEGOTIATION_SUPPORT) { 4678 if (link_width_cntl & LC_RENEGOTIATION_SUPPORT) {
4399 lanes = (link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT; 4679 lanes = (link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT;
4400 link_width_cntl &= ~(LC_LINK_WIDTH_MASK | 4680 link_width_cntl &= ~(LC_LINK_WIDTH_MASK |
4401 LC_RECONFIG_ARC_MISSING_ESCAPE); 4681 LC_RECONFIG_ARC_MISSING_ESCAPE);
4402 link_width_cntl |= lanes | LC_RECONFIG_NOW | LC_RENEGOTIATE_EN; 4682 link_width_cntl |= lanes | LC_RECONFIG_NOW | LC_RENEGOTIATE_EN;
4403 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); 4683 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
4404 } else { 4684 } else {
4405 link_width_cntl |= LC_UPCONFIGURE_DIS; 4685 link_width_cntl |= LC_UPCONFIGURE_DIS;
4406 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); 4686 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
4407 } 4687 }
4408 } 4688 }
4409 4689
4410 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); 4690 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
4411 if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) && 4691 if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
4412 (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) { 4692 (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
4413 4693
@@ -4428,7 +4708,7 @@ static void r600_pcie_gen2_enable(struct radeon_device *rdev)
4428 speed_cntl &= ~LC_VOLTAGE_TIMER_SEL_MASK; 4708 speed_cntl &= ~LC_VOLTAGE_TIMER_SEL_MASK;
4429 speed_cntl &= ~LC_FORCE_DIS_HW_SPEED_CHANGE; 4709 speed_cntl &= ~LC_FORCE_DIS_HW_SPEED_CHANGE;
4430 speed_cntl |= LC_FORCE_EN_HW_SPEED_CHANGE; 4710 speed_cntl |= LC_FORCE_EN_HW_SPEED_CHANGE;
4431 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl); 4711 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
4432 4712
4433 tmp = RREG32(0x541c); 4713 tmp = RREG32(0x541c);
4434 WREG32(0x541c, tmp | 0x8); 4714 WREG32(0x541c, tmp | 0x8);
@@ -4442,27 +4722,27 @@ static void r600_pcie_gen2_enable(struct radeon_device *rdev)
4442 if ((rdev->family == CHIP_RV670) || 4722 if ((rdev->family == CHIP_RV670) ||
4443 (rdev->family == CHIP_RV620) || 4723 (rdev->family == CHIP_RV620) ||
4444 (rdev->family == CHIP_RV635)) { 4724 (rdev->family == CHIP_RV635)) {
4445 training_cntl = RREG32_PCIE_P(PCIE_LC_TRAINING_CNTL); 4725 training_cntl = RREG32_PCIE_PORT(PCIE_LC_TRAINING_CNTL);
4446 training_cntl &= ~LC_POINT_7_PLUS_EN; 4726 training_cntl &= ~LC_POINT_7_PLUS_EN;
4447 WREG32_PCIE_P(PCIE_LC_TRAINING_CNTL, training_cntl); 4727 WREG32_PCIE_PORT(PCIE_LC_TRAINING_CNTL, training_cntl);
4448 } else { 4728 } else {
4449 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); 4729 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
4450 speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN; 4730 speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
4451 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl); 4731 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
4452 } 4732 }
4453 4733
4454 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); 4734 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
4455 speed_cntl |= LC_GEN2_EN_STRAP; 4735 speed_cntl |= LC_GEN2_EN_STRAP;
4456 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl); 4736 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
4457 4737
4458 } else { 4738 } else {
4459 link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL); 4739 link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
4460 /* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */ 4740 /* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
4461 if (1) 4741 if (1)
4462 link_width_cntl |= LC_UPCONFIGURE_DIS; 4742 link_width_cntl |= LC_UPCONFIGURE_DIS;
4463 else 4743 else
4464 link_width_cntl &= ~LC_UPCONFIGURE_DIS; 4744 link_width_cntl &= ~LC_UPCONFIGURE_DIS;
4465 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); 4745 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
4466 } 4746 }
4467} 4747}
4468 4748
diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c
index cb03fe22b0ab..c92eb86a8e55 100644
--- a/drivers/gpu/drm/radeon/r600_audio.c
+++ b/drivers/gpu/drm/radeon/r600_audio.c
@@ -57,10 +57,7 @@ static bool radeon_dig_encoder(struct drm_encoder *encoder)
57 */ 57 */
58static int r600_audio_chipset_supported(struct radeon_device *rdev) 58static int r600_audio_chipset_supported(struct radeon_device *rdev)
59{ 59{
60 return (rdev->family >= CHIP_R600 && !ASIC_IS_DCE6(rdev)) 60 return ASIC_IS_DCE2(rdev) && !ASIC_IS_DCE6(rdev);
61 || rdev->family == CHIP_RS600
62 || rdev->family == CHIP_RS690
63 || rdev->family == CHIP_RS740;
64} 61}
65 62
66struct r600_audio r600_audio_status(struct radeon_device *rdev) 63struct r600_audio r600_audio_status(struct radeon_device *rdev)
@@ -184,65 +181,6 @@ int r600_audio_init(struct radeon_device *rdev)
184} 181}
185 182
186/* 183/*
187 * atach the audio codec to the clock source of the encoder
188 */
189void r600_audio_set_clock(struct drm_encoder *encoder, int clock)
190{
191 struct drm_device *dev = encoder->dev;
192 struct radeon_device *rdev = dev->dev_private;
193 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
194 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
195 struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
196 int base_rate = 48000;
197
198 switch (radeon_encoder->encoder_id) {
199 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
200 case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
201 WREG32_P(R600_AUDIO_TIMING, 0, ~0x301);
202 break;
203 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
204 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
205 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
206 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
207 WREG32_P(R600_AUDIO_TIMING, 0x100, ~0x301);
208 break;
209 default:
210 dev_err(rdev->dev, "Unsupported encoder type 0x%02X\n",
211 radeon_encoder->encoder_id);
212 return;
213 }
214
215 if (ASIC_IS_DCE4(rdev)) {
216 /* TODO: other PLLs? */
217 WREG32(EVERGREEN_AUDIO_PLL1_MUL, base_rate * 10);
218 WREG32(EVERGREEN_AUDIO_PLL1_DIV, clock * 10);
219 WREG32(EVERGREEN_AUDIO_PLL1_UNK, 0x00000071);
220
221 /* Select DTO source */
222 WREG32(0x5ac, radeon_crtc->crtc_id);
223 } else {
224 switch (dig->dig_encoder) {
225 case 0:
226 WREG32(R600_AUDIO_PLL1_MUL, base_rate * 50);
227 WREG32(R600_AUDIO_PLL1_DIV, clock * 100);
228 WREG32(R600_AUDIO_CLK_SRCSEL, 0);
229 break;
230
231 case 1:
232 WREG32(R600_AUDIO_PLL2_MUL, base_rate * 50);
233 WREG32(R600_AUDIO_PLL2_DIV, clock * 100);
234 WREG32(R600_AUDIO_CLK_SRCSEL, 1);
235 break;
236 default:
237 dev_err(rdev->dev,
238 "Unsupported DIG on encoder 0x%02X\n",
239 radeon_encoder->encoder_id);
240 return;
241 }
242 }
243}
244
245/*
246 * release the audio timer 184 * release the audio timer
247 * TODO: How to do this correctly on SMP systems? 185 * TODO: How to do this correctly on SMP systems?
248 */ 186 */
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index 21ecc0e12dc4..47f180a79352 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -226,6 +226,39 @@ static void r600_hdmi_audio_workaround(struct drm_encoder *encoder)
226 value, ~HDMI0_AUDIO_TEST_EN); 226 value, ~HDMI0_AUDIO_TEST_EN);
227} 227}
228 228
229void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock)
230{
231 struct drm_device *dev = encoder->dev;
232 struct radeon_device *rdev = dev->dev_private;
233 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
234 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
235 u32 base_rate = 48000;
236
237 if (!dig || !dig->afmt)
238 return;
239
240 /* there are two DTOs selected by DCCG_AUDIO_DTO_SELECT.
241 * doesn't matter which one you use. Just use the first one.
242 */
243 /* XXX: properly calculate this */
244 /* XXX two dtos; generally use dto0 for hdmi */
245 /* Express [24MHz / target pixel clock] as an exact rational
246 * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE
247 * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
248 */
249 if (ASIC_IS_DCE3(rdev)) {
250 /* according to the reg specs, this should DCE3.2 only, but in
251 * practice it seems to cover DCE3.0 as well.
252 */
253 WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 50);
254 WREG32(DCCG_AUDIO_DTO0_MODULE, clock * 100);
255 WREG32(DCCG_AUDIO_DTO_SELECT, 0); /* select DTO0 */
256 } else {
257 /* according to the reg specs, this should be DCE2.0 and DCE3.0 */
258 WREG32(AUDIO_DTO, AUDIO_DTO_PHASE(base_rate * 50) |
259 AUDIO_DTO_MODULE(clock * 100));
260 }
261}
229 262
230/* 263/*
231 * update the info frames with the data from the current display mode 264 * update the info frames with the data from the current display mode
@@ -246,7 +279,7 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
246 return; 279 return;
247 offset = dig->afmt->offset; 280 offset = dig->afmt->offset;
248 281
249 r600_audio_set_clock(encoder, mode->clock); 282 r600_audio_set_dto(encoder, mode->clock);
250 283
251 WREG32(HDMI0_VBI_PACKET_CONTROL + offset, 284 WREG32(HDMI0_VBI_PACKET_CONTROL + offset,
252 HDMI0_NULL_SEND); /* send null packets when required */ 285 HDMI0_NULL_SEND); /* send null packets when required */
@@ -415,114 +448,73 @@ void r600_hdmi_update_audio_settings(struct drm_encoder *encoder)
415/* 448/*
416 * enable the HDMI engine 449 * enable the HDMI engine
417 */ 450 */
418void r600_hdmi_enable(struct drm_encoder *encoder) 451void r600_hdmi_enable(struct drm_encoder *encoder, bool enable)
419{ 452{
420 struct drm_device *dev = encoder->dev; 453 struct drm_device *dev = encoder->dev;
421 struct radeon_device *rdev = dev->dev_private; 454 struct radeon_device *rdev = dev->dev_private;
422 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 455 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
423 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 456 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
424 uint32_t offset; 457 u32 hdmi = HDMI0_ERROR_ACK;
425 u32 hdmi;
426
427 if (ASIC_IS_DCE6(rdev))
428 return;
429 458
430 /* Silent, r600_hdmi_enable will raise WARN for us */ 459 /* Silent, r600_hdmi_enable will raise WARN for us */
431 if (dig->afmt->enabled) 460 if (enable && dig->afmt->enabled)
461 return;
462 if (!enable && !dig->afmt->enabled)
432 return; 463 return;
433 offset = dig->afmt->offset;
434 464
435 /* Older chipsets require setting HDMI and routing manually */ 465 /* Older chipsets require setting HDMI and routing manually */
436 if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) { 466 if (!ASIC_IS_DCE3(rdev)) {
437 hdmi = HDMI0_ERROR_ACK | HDMI0_ENABLE; 467 if (enable)
468 hdmi |= HDMI0_ENABLE;
438 switch (radeon_encoder->encoder_id) { 469 switch (radeon_encoder->encoder_id) {
439 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: 470 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
440 WREG32_P(AVIVO_TMDSA_CNTL, AVIVO_TMDSA_CNTL_HDMI_EN, 471 if (enable) {
441 ~AVIVO_TMDSA_CNTL_HDMI_EN); 472 WREG32_OR(AVIVO_TMDSA_CNTL, AVIVO_TMDSA_CNTL_HDMI_EN);
442 hdmi |= HDMI0_STREAM(HDMI0_STREAM_TMDSA); 473 hdmi |= HDMI0_STREAM(HDMI0_STREAM_TMDSA);
474 } else {
475 WREG32_AND(AVIVO_TMDSA_CNTL, ~AVIVO_TMDSA_CNTL_HDMI_EN);
476 }
443 break; 477 break;
444 case ENCODER_OBJECT_ID_INTERNAL_LVTM1: 478 case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
445 WREG32_P(AVIVO_LVTMA_CNTL, AVIVO_LVTMA_CNTL_HDMI_EN, 479 if (enable) {
446 ~AVIVO_LVTMA_CNTL_HDMI_EN); 480 WREG32_OR(AVIVO_LVTMA_CNTL, AVIVO_LVTMA_CNTL_HDMI_EN);
447 hdmi |= HDMI0_STREAM(HDMI0_STREAM_LVTMA); 481 hdmi |= HDMI0_STREAM(HDMI0_STREAM_LVTMA);
482 } else {
483 WREG32_AND(AVIVO_LVTMA_CNTL, ~AVIVO_LVTMA_CNTL_HDMI_EN);
484 }
448 break; 485 break;
449 case ENCODER_OBJECT_ID_INTERNAL_DDI: 486 case ENCODER_OBJECT_ID_INTERNAL_DDI:
450 WREG32_P(DDIA_CNTL, DDIA_HDMI_EN, ~DDIA_HDMI_EN); 487 if (enable) {
451 hdmi |= HDMI0_STREAM(HDMI0_STREAM_DDIA); 488 WREG32_OR(DDIA_CNTL, DDIA_HDMI_EN);
489 hdmi |= HDMI0_STREAM(HDMI0_STREAM_DDIA);
490 } else {
491 WREG32_AND(DDIA_CNTL, ~DDIA_HDMI_EN);
492 }
452 break; 493 break;
453 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: 494 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
454 hdmi |= HDMI0_STREAM(HDMI0_STREAM_DVOA); 495 if (enable)
496 hdmi |= HDMI0_STREAM(HDMI0_STREAM_DVOA);
455 break; 497 break;
456 default: 498 default:
457 dev_err(rdev->dev, "Invalid encoder for HDMI: 0x%X\n", 499 dev_err(rdev->dev, "Invalid encoder for HDMI: 0x%X\n",
458 radeon_encoder->encoder_id); 500 radeon_encoder->encoder_id);
459 break; 501 break;
460 } 502 }
461 WREG32(HDMI0_CONTROL + offset, hdmi); 503 WREG32(HDMI0_CONTROL + dig->afmt->offset, hdmi);
462 } 504 }
463 505
464 if (rdev->irq.installed) { 506 if (rdev->irq.installed) {
465 /* if irq is available use it */ 507 /* if irq is available use it */
466 radeon_irq_kms_enable_afmt(rdev, dig->afmt->id); 508 /* XXX: shouldn't need this on any asics. Double check DCE2/3 */
509 if (enable)
510 radeon_irq_kms_enable_afmt(rdev, dig->afmt->id);
511 else
512 radeon_irq_kms_disable_afmt(rdev, dig->afmt->id);
467 } 513 }
468 514
469 dig->afmt->enabled = true; 515 dig->afmt->enabled = enable;
470 516
471 DRM_DEBUG("Enabling HDMI interface @ 0x%04X for encoder 0x%x\n", 517 DRM_DEBUG("%sabling HDMI interface @ 0x%04X for encoder 0x%x\n",
472 offset, radeon_encoder->encoder_id); 518 enable ? "En" : "Dis", dig->afmt->offset, radeon_encoder->encoder_id);
473} 519}
474 520
475/*
476 * disable the HDMI engine
477 */
478void r600_hdmi_disable(struct drm_encoder *encoder)
479{
480 struct drm_device *dev = encoder->dev;
481 struct radeon_device *rdev = dev->dev_private;
482 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
483 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
484 uint32_t offset;
485
486 if (ASIC_IS_DCE6(rdev))
487 return;
488
489 /* Called for ATOM_ENCODER_MODE_HDMI only */
490 if (!dig || !dig->afmt) {
491 return;
492 }
493 if (!dig->afmt->enabled)
494 return;
495 offset = dig->afmt->offset;
496
497 DRM_DEBUG("Disabling HDMI interface @ 0x%04X for encoder 0x%x\n",
498 offset, radeon_encoder->encoder_id);
499
500 /* disable irq */
501 radeon_irq_kms_disable_afmt(rdev, dig->afmt->id);
502
503 /* Older chipsets not handled by AtomBIOS */
504 if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) {
505 switch (radeon_encoder->encoder_id) {
506 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
507 WREG32_P(AVIVO_TMDSA_CNTL, 0,
508 ~AVIVO_TMDSA_CNTL_HDMI_EN);
509 break;
510 case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
511 WREG32_P(AVIVO_LVTMA_CNTL, 0,
512 ~AVIVO_LVTMA_CNTL_HDMI_EN);
513 break;
514 case ENCODER_OBJECT_ID_INTERNAL_DDI:
515 WREG32_P(DDIA_CNTL, 0, ~DDIA_HDMI_EN);
516 break;
517 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
518 break;
519 default:
520 dev_err(rdev->dev, "Invalid encoder for HDMI: 0x%X\n",
521 radeon_encoder->encoder_id);
522 break;
523 }
524 WREG32(HDMI0_CONTROL + offset, HDMI0_ERROR_ACK);
525 }
526
527 dig->afmt->enabled = false;
528}
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index a42ba11a3bed..acb146c06973 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -691,6 +691,7 @@
691#define SRBM_SOFT_RESET 0xe60 691#define SRBM_SOFT_RESET 0xe60
692# define SOFT_RESET_DMA (1 << 12) 692# define SOFT_RESET_DMA (1 << 12)
693# define SOFT_RESET_RLC (1 << 13) 693# define SOFT_RESET_RLC (1 << 13)
694# define SOFT_RESET_UVD (1 << 18)
694# define RV770_SOFT_RESET_DMA (1 << 20) 695# define RV770_SOFT_RESET_DMA (1 << 20)
695 696
696#define CP_INT_CNTL 0xc124 697#define CP_INT_CNTL 0xc124
@@ -909,7 +910,12 @@
909# define TARGET_LINK_SPEED_MASK (0xf << 0) 910# define TARGET_LINK_SPEED_MASK (0xf << 0)
910# define SELECTABLE_DEEMPHASIS (1 << 6) 911# define SELECTABLE_DEEMPHASIS (1 << 6)
911 912
912/* Audio clocks */ 913/* Audio clocks DCE 2.0/3.0 */
914#define AUDIO_DTO 0x7340
915# define AUDIO_DTO_PHASE(x) (((x) & 0xffff) << 0)
916# define AUDIO_DTO_MODULE(x) (((x) & 0xffff) << 16)
917
918/* Audio clocks DCE 3.2 */
913#define DCCG_AUDIO_DTO0_PHASE 0x0514 919#define DCCG_AUDIO_DTO0_PHASE 0x0514
914#define DCCG_AUDIO_DTO0_MODULE 0x0518 920#define DCCG_AUDIO_DTO0_MODULE 0x0518
915#define DCCG_AUDIO_DTO0_LOAD 0x051c 921#define DCCG_AUDIO_DTO0_LOAD 0x051c
@@ -1143,6 +1149,70 @@
1143# define AFMT_AZ_AUDIO_ENABLE_CHG_ACK (1 << 30) 1149# define AFMT_AZ_AUDIO_ENABLE_CHG_ACK (1 << 30)
1144 1150
1145/* 1151/*
1152 * UVD
1153 */
1154#define UVD_SEMA_ADDR_LOW 0xef00
1155#define UVD_SEMA_ADDR_HIGH 0xef04
1156#define UVD_SEMA_CMD 0xef08
1157
1158#define UVD_GPCOM_VCPU_CMD 0xef0c
1159#define UVD_GPCOM_VCPU_DATA0 0xef10
1160#define UVD_GPCOM_VCPU_DATA1 0xef14
1161#define UVD_ENGINE_CNTL 0xef18
1162
1163#define UVD_SEMA_CNTL 0xf400
1164#define UVD_RB_ARB_CTRL 0xf480
1165
1166#define UVD_LMI_EXT40_ADDR 0xf498
1167#define UVD_CGC_GATE 0xf4a8
1168#define UVD_LMI_CTRL2 0xf4f4
1169#define UVD_MASTINT_EN 0xf500
1170#define UVD_LMI_ADDR_EXT 0xf594
1171#define UVD_LMI_CTRL 0xf598
1172#define UVD_LMI_SWAP_CNTL 0xf5b4
1173#define UVD_MP_SWAP_CNTL 0xf5bC
1174#define UVD_MPC_CNTL 0xf5dC
1175#define UVD_MPC_SET_MUXA0 0xf5e4
1176#define UVD_MPC_SET_MUXA1 0xf5e8
1177#define UVD_MPC_SET_MUXB0 0xf5eC
1178#define UVD_MPC_SET_MUXB1 0xf5f0
1179#define UVD_MPC_SET_MUX 0xf5f4
1180#define UVD_MPC_SET_ALU 0xf5f8
1181
1182#define UVD_VCPU_CNTL 0xf660
1183#define UVD_SOFT_RESET 0xf680
1184#define RBC_SOFT_RESET (1<<0)
1185#define LBSI_SOFT_RESET (1<<1)
1186#define LMI_SOFT_RESET (1<<2)
1187#define VCPU_SOFT_RESET (1<<3)
1188#define CSM_SOFT_RESET (1<<5)
1189#define CXW_SOFT_RESET (1<<6)
1190#define TAP_SOFT_RESET (1<<7)
1191#define LMI_UMC_SOFT_RESET (1<<13)
1192#define UVD_RBC_IB_BASE 0xf684
1193#define UVD_RBC_IB_SIZE 0xf688
1194#define UVD_RBC_RB_BASE 0xf68c
1195#define UVD_RBC_RB_RPTR 0xf690
1196#define UVD_RBC_RB_WPTR 0xf694
1197#define UVD_RBC_RB_WPTR_CNTL 0xf698
1198
1199#define UVD_STATUS 0xf6bc
1200
1201#define UVD_SEMA_TIMEOUT_STATUS 0xf6c0
1202#define UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL 0xf6c4
1203#define UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL 0xf6c8
1204#define UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL 0xf6cc
1205
1206#define UVD_RBC_RB_CNTL 0xf6a4
1207#define UVD_RBC_RB_RPTR_ADDR 0xf6a8
1208
1209#define UVD_CONTEXT_ID 0xf6f4
1210
1211# define UPLL_CTLREQ_MASK 0x00000008
1212# define UPLL_CTLACK_MASK 0x40000000
1213# define UPLL_CTLACK2_MASK 0x80000000
1214
1215/*
1146 * PM4 1216 * PM4
1147 */ 1217 */
1148#define PACKET0(reg, n) ((RADEON_PACKET_TYPE0 << 30) | \ 1218#define PACKET0(reg, n) ((RADEON_PACKET_TYPE0 << 30) | \
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 8263af3fd832..1442ce765d48 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -95,6 +95,7 @@ extern int radeon_hw_i2c;
95extern int radeon_pcie_gen2; 95extern int radeon_pcie_gen2;
96extern int radeon_msi; 96extern int radeon_msi;
97extern int radeon_lockup_timeout; 97extern int radeon_lockup_timeout;
98extern int radeon_fastfb;
98 99
99/* 100/*
100 * Copy from radeon_drv.h so we don't have to include both and have conflicting 101 * Copy from radeon_drv.h so we don't have to include both and have conflicting
@@ -109,24 +110,27 @@ extern int radeon_lockup_timeout;
109#define RADEON_BIOS_NUM_SCRATCH 8 110#define RADEON_BIOS_NUM_SCRATCH 8
110 111
111/* max number of rings */ 112/* max number of rings */
112#define RADEON_NUM_RINGS 5 113#define RADEON_NUM_RINGS 6
113 114
114/* fence seq are set to this number when signaled */ 115/* fence seq are set to this number when signaled */
115#define RADEON_FENCE_SIGNALED_SEQ 0LL 116#define RADEON_FENCE_SIGNALED_SEQ 0LL
116 117
117/* internal ring indices */ 118/* internal ring indices */
118/* r1xx+ has gfx CP ring */ 119/* r1xx+ has gfx CP ring */
119#define RADEON_RING_TYPE_GFX_INDEX 0 120#define RADEON_RING_TYPE_GFX_INDEX 0
120 121
121/* cayman has 2 compute CP rings */ 122/* cayman has 2 compute CP rings */
122#define CAYMAN_RING_TYPE_CP1_INDEX 1 123#define CAYMAN_RING_TYPE_CP1_INDEX 1
123#define CAYMAN_RING_TYPE_CP2_INDEX 2 124#define CAYMAN_RING_TYPE_CP2_INDEX 2
124 125
125/* R600+ has an async dma ring */ 126/* R600+ has an async dma ring */
126#define R600_RING_TYPE_DMA_INDEX 3 127#define R600_RING_TYPE_DMA_INDEX 3
127/* cayman add a second async dma ring */ 128/* cayman add a second async dma ring */
128#define CAYMAN_RING_TYPE_DMA1_INDEX 4 129#define CAYMAN_RING_TYPE_DMA1_INDEX 4
129 130
131/* R600+ */
132#define R600_RING_TYPE_UVD_INDEX 5
133
130/* hardcode those limit for now */ 134/* hardcode those limit for now */
131#define RADEON_VA_IB_OFFSET (1 << 20) 135#define RADEON_VA_IB_OFFSET (1 << 20)
132#define RADEON_VA_RESERVED_SIZE (8 << 20) 136#define RADEON_VA_RESERVED_SIZE (8 << 20)
@@ -202,6 +206,11 @@ void radeon_pm_suspend(struct radeon_device *rdev);
202void radeon_pm_resume(struct radeon_device *rdev); 206void radeon_pm_resume(struct radeon_device *rdev);
203void radeon_combios_get_power_modes(struct radeon_device *rdev); 207void radeon_combios_get_power_modes(struct radeon_device *rdev);
204void radeon_atombios_get_power_modes(struct radeon_device *rdev); 208void radeon_atombios_get_power_modes(struct radeon_device *rdev);
209int radeon_atom_get_clock_dividers(struct radeon_device *rdev,
210 u8 clock_type,
211 u32 clock,
212 bool strobe_mode,
213 struct atom_clock_dividers *dividers);
205void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 voltage_type); 214void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 voltage_type);
206void rs690_pm_info(struct radeon_device *rdev); 215void rs690_pm_info(struct radeon_device *rdev);
207extern int rv6xx_get_temp(struct radeon_device *rdev); 216extern int rv6xx_get_temp(struct radeon_device *rdev);
@@ -349,7 +358,8 @@ struct radeon_bo {
349 struct radeon_device *rdev; 358 struct radeon_device *rdev;
350 struct drm_gem_object gem_base; 359 struct drm_gem_object gem_base;
351 360
352 struct ttm_bo_kmap_obj dma_buf_vmap; 361 struct ttm_bo_kmap_obj dma_buf_vmap;
362 pid_t pid;
353}; 363};
354#define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base) 364#define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base)
355 365
@@ -357,11 +367,14 @@ struct radeon_bo_list {
357 struct ttm_validate_buffer tv; 367 struct ttm_validate_buffer tv;
358 struct radeon_bo *bo; 368 struct radeon_bo *bo;
359 uint64_t gpu_offset; 369 uint64_t gpu_offset;
360 unsigned rdomain; 370 bool written;
361 unsigned wdomain; 371 unsigned domain;
372 unsigned alt_domain;
362 u32 tiling_flags; 373 u32 tiling_flags;
363}; 374};
364 375
376int radeon_gem_debugfs_init(struct radeon_device *rdev);
377
365/* sub-allocation manager, it has to be protected by another lock. 378/* sub-allocation manager, it has to be protected by another lock.
366 * By conception this is an helper for other part of the driver 379 * By conception this is an helper for other part of the driver
367 * like the indirect buffer or semaphore, which both have their 380 * like the indirect buffer or semaphore, which both have their
@@ -517,6 +530,7 @@ struct radeon_mc {
517 bool vram_is_ddr; 530 bool vram_is_ddr;
518 bool igp_sideport_enabled; 531 bool igp_sideport_enabled;
519 u64 gtt_base_align; 532 u64 gtt_base_align;
533 u64 mc_mask;
520}; 534};
521 535
522bool radeon_combios_sideport_present(struct radeon_device *rdev); 536bool radeon_combios_sideport_present(struct radeon_device *rdev);
@@ -918,6 +932,7 @@ struct radeon_wb {
918#define R600_WB_DMA_RPTR_OFFSET 1792 932#define R600_WB_DMA_RPTR_OFFSET 1792
919#define R600_WB_IH_WPTR_OFFSET 2048 933#define R600_WB_IH_WPTR_OFFSET 2048
920#define CAYMAN_WB_DMA1_RPTR_OFFSET 2304 934#define CAYMAN_WB_DMA1_RPTR_OFFSET 2304
935#define R600_WB_UVD_RPTR_OFFSET 2560
921#define R600_WB_EVENT_OFFSET 3072 936#define R600_WB_EVENT_OFFSET 3072
922 937
923/** 938/**
@@ -1118,6 +1133,46 @@ struct radeon_pm {
1118int radeon_pm_get_type_index(struct radeon_device *rdev, 1133int radeon_pm_get_type_index(struct radeon_device *rdev,
1119 enum radeon_pm_state_type ps_type, 1134 enum radeon_pm_state_type ps_type,
1120 int instance); 1135 int instance);
1136/*
1137 * UVD
1138 */
1139#define RADEON_MAX_UVD_HANDLES 10
1140#define RADEON_UVD_STACK_SIZE (1024*1024)
1141#define RADEON_UVD_HEAP_SIZE (1024*1024)
1142
1143struct radeon_uvd {
1144 struct radeon_bo *vcpu_bo;
1145 void *cpu_addr;
1146 uint64_t gpu_addr;
1147 atomic_t handles[RADEON_MAX_UVD_HANDLES];
1148 struct drm_file *filp[RADEON_MAX_UVD_HANDLES];
1149 struct delayed_work idle_work;
1150};
1151
1152int radeon_uvd_init(struct radeon_device *rdev);
1153void radeon_uvd_fini(struct radeon_device *rdev);
1154int radeon_uvd_suspend(struct radeon_device *rdev);
1155int radeon_uvd_resume(struct radeon_device *rdev);
1156int radeon_uvd_get_create_msg(struct radeon_device *rdev, int ring,
1157 uint32_t handle, struct radeon_fence **fence);
1158int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring,
1159 uint32_t handle, struct radeon_fence **fence);
1160void radeon_uvd_force_into_uvd_segment(struct radeon_bo *rbo);
1161void radeon_uvd_free_handles(struct radeon_device *rdev,
1162 struct drm_file *filp);
1163int radeon_uvd_cs_parse(struct radeon_cs_parser *parser);
1164void radeon_uvd_note_usage(struct radeon_device *rdev);
1165int radeon_uvd_calc_upll_dividers(struct radeon_device *rdev,
1166 unsigned vclk, unsigned dclk,
1167 unsigned vco_min, unsigned vco_max,
1168 unsigned fb_factor, unsigned fb_mask,
1169 unsigned pd_min, unsigned pd_max,
1170 unsigned pd_even,
1171 unsigned *optimal_fb_div,
1172 unsigned *optimal_vclk_div,
1173 unsigned *optimal_dclk_div);
1174int radeon_uvd_send_upll_ctlreq(struct radeon_device *rdev,
1175 unsigned cg_upll_func_cntl);
1121 1176
1122struct r600_audio { 1177struct r600_audio {
1123 int channels; 1178 int channels;
@@ -1229,6 +1284,9 @@ struct radeon_asic {
1229 void (*set_backlight_level)(struct radeon_encoder *radeon_encoder, u8 level); 1284 void (*set_backlight_level)(struct radeon_encoder *radeon_encoder, u8 level);
1230 /* get backlight level */ 1285 /* get backlight level */
1231 u8 (*get_backlight_level)(struct radeon_encoder *radeon_encoder); 1286 u8 (*get_backlight_level)(struct radeon_encoder *radeon_encoder);
1287 /* audio callbacks */
1288 void (*hdmi_enable)(struct drm_encoder *encoder, bool enable);
1289 void (*hdmi_setmode)(struct drm_encoder *encoder, struct drm_display_mode *mode);
1232 } display; 1290 } display;
1233 /* copy functions for bo handling */ 1291 /* copy functions for bo handling */
1234 struct { 1292 struct {
@@ -1281,6 +1339,7 @@ struct radeon_asic {
1281 int (*get_pcie_lanes)(struct radeon_device *rdev); 1339 int (*get_pcie_lanes)(struct radeon_device *rdev);
1282 void (*set_pcie_lanes)(struct radeon_device *rdev, int lanes); 1340 void (*set_pcie_lanes)(struct radeon_device *rdev, int lanes);
1283 void (*set_clock_gating)(struct radeon_device *rdev, int enable); 1341 void (*set_clock_gating)(struct radeon_device *rdev, int enable);
1342 int (*set_uvd_clocks)(struct radeon_device *rdev, u32 vclk, u32 dclk);
1284 } pm; 1343 } pm;
1285 /* pageflipping */ 1344 /* pageflipping */
1286 struct { 1345 struct {
@@ -1443,6 +1502,7 @@ struct si_asic {
1443 unsigned multi_gpu_tile_size; 1502 unsigned multi_gpu_tile_size;
1444 1503
1445 unsigned tile_config; 1504 unsigned tile_config;
1505 uint32_t tile_mode_array[32];
1446}; 1506};
1447 1507
1448union radeon_asic_config { 1508union radeon_asic_config {
@@ -1608,6 +1668,7 @@ struct radeon_device {
1608 struct radeon_asic *asic; 1668 struct radeon_asic *asic;
1609 struct radeon_gem gem; 1669 struct radeon_gem gem;
1610 struct radeon_pm pm; 1670 struct radeon_pm pm;
1671 struct radeon_uvd uvd;
1611 uint32_t bios_scratch[RADEON_BIOS_NUM_SCRATCH]; 1672 uint32_t bios_scratch[RADEON_BIOS_NUM_SCRATCH];
1612 struct radeon_wb wb; 1673 struct radeon_wb wb;
1613 struct radeon_dummy_page dummy_page; 1674 struct radeon_dummy_page dummy_page;
@@ -1615,12 +1676,14 @@ struct radeon_device {
1615 bool suspend; 1676 bool suspend;
1616 bool need_dma32; 1677 bool need_dma32;
1617 bool accel_working; 1678 bool accel_working;
1679 bool fastfb_working; /* IGP feature*/
1618 struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES]; 1680 struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES];
1619 const struct firmware *me_fw; /* all family ME firmware */ 1681 const struct firmware *me_fw; /* all family ME firmware */
1620 const struct firmware *pfp_fw; /* r6/700 PFP firmware */ 1682 const struct firmware *pfp_fw; /* r6/700 PFP firmware */
1621 const struct firmware *rlc_fw; /* r6/700 RLC firmware */ 1683 const struct firmware *rlc_fw; /* r6/700 RLC firmware */
1622 const struct firmware *mc_fw; /* NI MC firmware */ 1684 const struct firmware *mc_fw; /* NI MC firmware */
1623 const struct firmware *ce_fw; /* SI CE firmware */ 1685 const struct firmware *ce_fw; /* SI CE firmware */
1686 const struct firmware *uvd_fw; /* UVD firmware */
1624 struct r600_blit r600_blit; 1687 struct r600_blit r600_blit;
1625 struct r600_vram_scratch vram_scratch; 1688 struct r600_vram_scratch vram_scratch;
1626 int msi_enabled; /* msi enabled */ 1689 int msi_enabled; /* msi enabled */
@@ -1688,8 +1751,8 @@ void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v);
1688#define WREG32_MC(reg, v) rdev->mc_wreg(rdev, (reg), (v)) 1751#define WREG32_MC(reg, v) rdev->mc_wreg(rdev, (reg), (v))
1689#define RREG32_PCIE(reg) rv370_pcie_rreg(rdev, (reg)) 1752#define RREG32_PCIE(reg) rv370_pcie_rreg(rdev, (reg))
1690#define WREG32_PCIE(reg, v) rv370_pcie_wreg(rdev, (reg), (v)) 1753#define WREG32_PCIE(reg, v) rv370_pcie_wreg(rdev, (reg), (v))
1691#define RREG32_PCIE_P(reg) rdev->pciep_rreg(rdev, (reg)) 1754#define RREG32_PCIE_PORT(reg) rdev->pciep_rreg(rdev, (reg))
1692#define WREG32_PCIE_P(reg, v) rdev->pciep_wreg(rdev, (reg), (v)) 1755#define WREG32_PCIE_PORT(reg, v) rdev->pciep_wreg(rdev, (reg), (v))
1693#define WREG32_P(reg, val, mask) \ 1756#define WREG32_P(reg, val, mask) \
1694 do { \ 1757 do { \
1695 uint32_t tmp_ = RREG32(reg); \ 1758 uint32_t tmp_ = RREG32(reg); \
@@ -1697,6 +1760,8 @@ void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v);
1697 tmp_ |= ((val) & ~(mask)); \ 1760 tmp_ |= ((val) & ~(mask)); \
1698 WREG32(reg, tmp_); \ 1761 WREG32(reg, tmp_); \
1699 } while (0) 1762 } while (0)
1763#define WREG32_AND(reg, and) WREG32_P(reg, 0, and)
1764#define WREG32_OR(reg, or) WREG32_P(reg, or, ~or)
1700#define WREG32_PLL_P(reg, val, mask) \ 1765#define WREG32_PLL_P(reg, val, mask) \
1701 do { \ 1766 do { \
1702 uint32_t tmp_ = RREG32_PLL(reg); \ 1767 uint32_t tmp_ = RREG32_PLL(reg); \
@@ -1830,6 +1895,8 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
1830#define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->display.get_vblank_counter((rdev), (crtc)) 1895#define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->display.get_vblank_counter((rdev), (crtc))
1831#define radeon_set_backlight_level(rdev, e, l) (rdev)->asic->display.set_backlight_level((e), (l)) 1896#define radeon_set_backlight_level(rdev, e, l) (rdev)->asic->display.set_backlight_level((e), (l))
1832#define radeon_get_backlight_level(rdev, e) (rdev)->asic->display.get_backlight_level((e)) 1897#define radeon_get_backlight_level(rdev, e) (rdev)->asic->display.get_backlight_level((e))
1898#define radeon_hdmi_enable(rdev, e, b) (rdev)->asic->display.hdmi_enable((e), (b))
1899#define radeon_hdmi_setmode(rdev, e, m) (rdev)->asic->display.hdmi_setmode((e), (m))
1833#define radeon_fence_ring_emit(rdev, r, fence) (rdev)->asic->ring[(r)].emit_fence((rdev), (fence)) 1900#define radeon_fence_ring_emit(rdev, r, fence) (rdev)->asic->ring[(r)].emit_fence((rdev), (fence))
1834#define radeon_semaphore_ring_emit(rdev, r, cp, semaphore, emit_wait) (rdev)->asic->ring[(r)].emit_semaphore((rdev), (cp), (semaphore), (emit_wait)) 1901#define radeon_semaphore_ring_emit(rdev, r, cp, semaphore, emit_wait) (rdev)->asic->ring[(r)].emit_semaphore((rdev), (cp), (semaphore), (emit_wait))
1835#define radeon_copy_blit(rdev, s, d, np, f) (rdev)->asic->copy.blit((rdev), (s), (d), (np), (f)) 1902#define radeon_copy_blit(rdev, s, d, np, f) (rdev)->asic->copy.blit((rdev), (s), (d), (np), (f))
@@ -1845,6 +1912,7 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
1845#define radeon_get_pcie_lanes(rdev) (rdev)->asic->pm.get_pcie_lanes((rdev)) 1912#define radeon_get_pcie_lanes(rdev) (rdev)->asic->pm.get_pcie_lanes((rdev))
1846#define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->pm.set_pcie_lanes((rdev), (l)) 1913#define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->pm.set_pcie_lanes((rdev), (l))
1847#define radeon_set_clock_gating(rdev, e) (rdev)->asic->pm.set_clock_gating((rdev), (e)) 1914#define radeon_set_clock_gating(rdev, e) (rdev)->asic->pm.set_clock_gating((rdev), (e))
1915#define radeon_set_uvd_clocks(rdev, v, d) (rdev)->asic->pm.set_uvd_clocks((rdev), (v), (d))
1848#define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->surface.set_reg((rdev), (r), (f), (p), (o), (s))) 1916#define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->surface.set_reg((rdev), (r), (f), (p), (o), (s)))
1849#define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->surface.clear_reg((rdev), (r))) 1917#define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->surface.clear_reg((rdev), (r)))
1850#define radeon_bandwidth_update(rdev) (rdev)->asic->display.bandwidth_update((rdev)) 1918#define radeon_bandwidth_update(rdev) (rdev)->asic->display.bandwidth_update((rdev))
@@ -1892,6 +1960,9 @@ extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc
1892extern int radeon_resume_kms(struct drm_device *dev); 1960extern int radeon_resume_kms(struct drm_device *dev);
1893extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state); 1961extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state);
1894extern void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size); 1962extern void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size);
1963extern void radeon_program_register_sequence(struct radeon_device *rdev,
1964 const u32 *registers,
1965 const u32 array_size);
1895 1966
1896/* 1967/*
1897 * vm 1968 * vm
@@ -1964,9 +2035,6 @@ struct radeon_hdmi_acr {
1964 2035
1965extern struct radeon_hdmi_acr r600_hdmi_acr(uint32_t clock); 2036extern struct radeon_hdmi_acr r600_hdmi_acr(uint32_t clock);
1966 2037
1967extern void r600_hdmi_enable(struct drm_encoder *encoder);
1968extern void r600_hdmi_disable(struct drm_encoder *encoder);
1969extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
1970extern u32 r6xx_remap_render_backend(struct radeon_device *rdev, 2038extern u32 r6xx_remap_render_backend(struct radeon_device *rdev,
1971 u32 tiling_pipe_num, 2039 u32 tiling_pipe_num,
1972 u32 max_rb_num, 2040 u32 max_rb_num,
@@ -1977,8 +2045,6 @@ extern u32 r6xx_remap_render_backend(struct radeon_device *rdev,
1977 * evergreen functions used by radeon_encoder.c 2045 * evergreen functions used by radeon_encoder.c
1978 */ 2046 */
1979 2047
1980extern void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
1981
1982extern int ni_init_microcode(struct radeon_device *rdev); 2048extern int ni_init_microcode(struct radeon_device *rdev);
1983extern int ni_mc_load_microcode(struct radeon_device *rdev); 2049extern int ni_mc_load_microcode(struct radeon_device *rdev);
1984 2050
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index aba0a893ea98..6417132c50cf 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -656,6 +656,8 @@ static struct radeon_asic rs600_asic = {
656 .wait_for_vblank = &avivo_wait_for_vblank, 656 .wait_for_vblank = &avivo_wait_for_vblank,
657 .set_backlight_level = &atombios_set_backlight_level, 657 .set_backlight_level = &atombios_set_backlight_level,
658 .get_backlight_level = &atombios_get_backlight_level, 658 .get_backlight_level = &atombios_get_backlight_level,
659 .hdmi_enable = &r600_hdmi_enable,
660 .hdmi_setmode = &r600_hdmi_setmode,
659 }, 661 },
660 .copy = { 662 .copy = {
661 .blit = &r100_copy_blit, 663 .blit = &r100_copy_blit,
@@ -732,6 +734,8 @@ static struct radeon_asic rs690_asic = {
732 .wait_for_vblank = &avivo_wait_for_vblank, 734 .wait_for_vblank = &avivo_wait_for_vblank,
733 .set_backlight_level = &atombios_set_backlight_level, 735 .set_backlight_level = &atombios_set_backlight_level,
734 .get_backlight_level = &atombios_get_backlight_level, 736 .get_backlight_level = &atombios_get_backlight_level,
737 .hdmi_enable = &r600_hdmi_enable,
738 .hdmi_setmode = &r600_hdmi_setmode,
735 }, 739 },
736 .copy = { 740 .copy = {
737 .blit = &r100_copy_blit, 741 .blit = &r100_copy_blit,
@@ -970,6 +974,8 @@ static struct radeon_asic r600_asic = {
970 .wait_for_vblank = &avivo_wait_for_vblank, 974 .wait_for_vblank = &avivo_wait_for_vblank,
971 .set_backlight_level = &atombios_set_backlight_level, 975 .set_backlight_level = &atombios_set_backlight_level,
972 .get_backlight_level = &atombios_get_backlight_level, 976 .get_backlight_level = &atombios_get_backlight_level,
977 .hdmi_enable = &r600_hdmi_enable,
978 .hdmi_setmode = &r600_hdmi_setmode,
973 }, 979 },
974 .copy = { 980 .copy = {
975 .blit = &r600_copy_blit, 981 .blit = &r600_copy_blit,
@@ -1056,6 +1062,8 @@ static struct radeon_asic rs780_asic = {
1056 .wait_for_vblank = &avivo_wait_for_vblank, 1062 .wait_for_vblank = &avivo_wait_for_vblank,
1057 .set_backlight_level = &atombios_set_backlight_level, 1063 .set_backlight_level = &atombios_set_backlight_level,
1058 .get_backlight_level = &atombios_get_backlight_level, 1064 .get_backlight_level = &atombios_get_backlight_level,
1065 .hdmi_enable = &r600_hdmi_enable,
1066 .hdmi_setmode = &r600_hdmi_setmode,
1059 }, 1067 },
1060 .copy = { 1068 .copy = {
1061 .blit = &r600_copy_blit, 1069 .blit = &r600_copy_blit,
@@ -1130,6 +1138,15 @@ static struct radeon_asic rv770_asic = {
1130 .ring_test = &r600_dma_ring_test, 1138 .ring_test = &r600_dma_ring_test,
1131 .ib_test = &r600_dma_ib_test, 1139 .ib_test = &r600_dma_ib_test,
1132 .is_lockup = &r600_dma_is_lockup, 1140 .is_lockup = &r600_dma_is_lockup,
1141 },
1142 [R600_RING_TYPE_UVD_INDEX] = {
1143 .ib_execute = &r600_uvd_ib_execute,
1144 .emit_fence = &r600_uvd_fence_emit,
1145 .emit_semaphore = &r600_uvd_semaphore_emit,
1146 .cs_parse = &radeon_uvd_cs_parse,
1147 .ring_test = &r600_uvd_ring_test,
1148 .ib_test = &r600_uvd_ib_test,
1149 .is_lockup = &radeon_ring_test_lockup,
1133 } 1150 }
1134 }, 1151 },
1135 .irq = { 1152 .irq = {
@@ -1142,6 +1159,8 @@ static struct radeon_asic rv770_asic = {
1142 .wait_for_vblank = &avivo_wait_for_vblank, 1159 .wait_for_vblank = &avivo_wait_for_vblank,
1143 .set_backlight_level = &atombios_set_backlight_level, 1160 .set_backlight_level = &atombios_set_backlight_level,
1144 .get_backlight_level = &atombios_get_backlight_level, 1161 .get_backlight_level = &atombios_get_backlight_level,
1162 .hdmi_enable = &r600_hdmi_enable,
1163 .hdmi_setmode = &r600_hdmi_setmode,
1145 }, 1164 },
1146 .copy = { 1165 .copy = {
1147 .blit = &r600_copy_blit, 1166 .blit = &r600_copy_blit,
@@ -1174,6 +1193,7 @@ static struct radeon_asic rv770_asic = {
1174 .get_pcie_lanes = &r600_get_pcie_lanes, 1193 .get_pcie_lanes = &r600_get_pcie_lanes,
1175 .set_pcie_lanes = &r600_set_pcie_lanes, 1194 .set_pcie_lanes = &r600_set_pcie_lanes,
1176 .set_clock_gating = &radeon_atom_set_clock_gating, 1195 .set_clock_gating = &radeon_atom_set_clock_gating,
1196 .set_uvd_clocks = &rv770_set_uvd_clocks,
1177 }, 1197 },
1178 .pflip = { 1198 .pflip = {
1179 .pre_page_flip = &rs600_pre_page_flip, 1199 .pre_page_flip = &rs600_pre_page_flip,
@@ -1216,6 +1236,15 @@ static struct radeon_asic evergreen_asic = {
1216 .ring_test = &r600_dma_ring_test, 1236 .ring_test = &r600_dma_ring_test,
1217 .ib_test = &r600_dma_ib_test, 1237 .ib_test = &r600_dma_ib_test,
1218 .is_lockup = &evergreen_dma_is_lockup, 1238 .is_lockup = &evergreen_dma_is_lockup,
1239 },
1240 [R600_RING_TYPE_UVD_INDEX] = {
1241 .ib_execute = &r600_uvd_ib_execute,
1242 .emit_fence = &r600_uvd_fence_emit,
1243 .emit_semaphore = &r600_uvd_semaphore_emit,
1244 .cs_parse = &radeon_uvd_cs_parse,
1245 .ring_test = &r600_uvd_ring_test,
1246 .ib_test = &r600_uvd_ib_test,
1247 .is_lockup = &radeon_ring_test_lockup,
1219 } 1248 }
1220 }, 1249 },
1221 .irq = { 1250 .irq = {
@@ -1228,6 +1257,8 @@ static struct radeon_asic evergreen_asic = {
1228 .wait_for_vblank = &dce4_wait_for_vblank, 1257 .wait_for_vblank = &dce4_wait_for_vblank,
1229 .set_backlight_level = &atombios_set_backlight_level, 1258 .set_backlight_level = &atombios_set_backlight_level,
1230 .get_backlight_level = &atombios_get_backlight_level, 1259 .get_backlight_level = &atombios_get_backlight_level,
1260 .hdmi_enable = &evergreen_hdmi_enable,
1261 .hdmi_setmode = &evergreen_hdmi_setmode,
1231 }, 1262 },
1232 .copy = { 1263 .copy = {
1233 .blit = &r600_copy_blit, 1264 .blit = &r600_copy_blit,
@@ -1260,6 +1291,7 @@ static struct radeon_asic evergreen_asic = {
1260 .get_pcie_lanes = &r600_get_pcie_lanes, 1291 .get_pcie_lanes = &r600_get_pcie_lanes,
1261 .set_pcie_lanes = &r600_set_pcie_lanes, 1292 .set_pcie_lanes = &r600_set_pcie_lanes,
1262 .set_clock_gating = NULL, 1293 .set_clock_gating = NULL,
1294 .set_uvd_clocks = &evergreen_set_uvd_clocks,
1263 }, 1295 },
1264 .pflip = { 1296 .pflip = {
1265 .pre_page_flip = &evergreen_pre_page_flip, 1297 .pre_page_flip = &evergreen_pre_page_flip,
@@ -1302,6 +1334,15 @@ static struct radeon_asic sumo_asic = {
1302 .ring_test = &r600_dma_ring_test, 1334 .ring_test = &r600_dma_ring_test,
1303 .ib_test = &r600_dma_ib_test, 1335 .ib_test = &r600_dma_ib_test,
1304 .is_lockup = &evergreen_dma_is_lockup, 1336 .is_lockup = &evergreen_dma_is_lockup,
1337 },
1338 [R600_RING_TYPE_UVD_INDEX] = {
1339 .ib_execute = &r600_uvd_ib_execute,
1340 .emit_fence = &r600_uvd_fence_emit,
1341 .emit_semaphore = &r600_uvd_semaphore_emit,
1342 .cs_parse = &radeon_uvd_cs_parse,
1343 .ring_test = &r600_uvd_ring_test,
1344 .ib_test = &r600_uvd_ib_test,
1345 .is_lockup = &radeon_ring_test_lockup,
1305 } 1346 }
1306 }, 1347 },
1307 .irq = { 1348 .irq = {
@@ -1314,6 +1355,8 @@ static struct radeon_asic sumo_asic = {
1314 .wait_for_vblank = &dce4_wait_for_vblank, 1355 .wait_for_vblank = &dce4_wait_for_vblank,
1315 .set_backlight_level = &atombios_set_backlight_level, 1356 .set_backlight_level = &atombios_set_backlight_level,
1316 .get_backlight_level = &atombios_get_backlight_level, 1357 .get_backlight_level = &atombios_get_backlight_level,
1358 .hdmi_enable = &evergreen_hdmi_enable,
1359 .hdmi_setmode = &evergreen_hdmi_setmode,
1317 }, 1360 },
1318 .copy = { 1361 .copy = {
1319 .blit = &r600_copy_blit, 1362 .blit = &r600_copy_blit,
@@ -1346,6 +1389,7 @@ static struct radeon_asic sumo_asic = {
1346 .get_pcie_lanes = NULL, 1389 .get_pcie_lanes = NULL,
1347 .set_pcie_lanes = NULL, 1390 .set_pcie_lanes = NULL,
1348 .set_clock_gating = NULL, 1391 .set_clock_gating = NULL,
1392 .set_uvd_clocks = &sumo_set_uvd_clocks,
1349 }, 1393 },
1350 .pflip = { 1394 .pflip = {
1351 .pre_page_flip = &evergreen_pre_page_flip, 1395 .pre_page_flip = &evergreen_pre_page_flip,
@@ -1388,6 +1432,15 @@ static struct radeon_asic btc_asic = {
1388 .ring_test = &r600_dma_ring_test, 1432 .ring_test = &r600_dma_ring_test,
1389 .ib_test = &r600_dma_ib_test, 1433 .ib_test = &r600_dma_ib_test,
1390 .is_lockup = &evergreen_dma_is_lockup, 1434 .is_lockup = &evergreen_dma_is_lockup,
1435 },
1436 [R600_RING_TYPE_UVD_INDEX] = {
1437 .ib_execute = &r600_uvd_ib_execute,
1438 .emit_fence = &r600_uvd_fence_emit,
1439 .emit_semaphore = &r600_uvd_semaphore_emit,
1440 .cs_parse = &radeon_uvd_cs_parse,
1441 .ring_test = &r600_uvd_ring_test,
1442 .ib_test = &r600_uvd_ib_test,
1443 .is_lockup = &radeon_ring_test_lockup,
1391 } 1444 }
1392 }, 1445 },
1393 .irq = { 1446 .irq = {
@@ -1400,6 +1453,8 @@ static struct radeon_asic btc_asic = {
1400 .wait_for_vblank = &dce4_wait_for_vblank, 1453 .wait_for_vblank = &dce4_wait_for_vblank,
1401 .set_backlight_level = &atombios_set_backlight_level, 1454 .set_backlight_level = &atombios_set_backlight_level,
1402 .get_backlight_level = &atombios_get_backlight_level, 1455 .get_backlight_level = &atombios_get_backlight_level,
1456 .hdmi_enable = &evergreen_hdmi_enable,
1457 .hdmi_setmode = &evergreen_hdmi_setmode,
1403 }, 1458 },
1404 .copy = { 1459 .copy = {
1405 .blit = &r600_copy_blit, 1460 .blit = &r600_copy_blit,
@@ -1429,9 +1484,10 @@ static struct radeon_asic btc_asic = {
1429 .set_engine_clock = &radeon_atom_set_engine_clock, 1484 .set_engine_clock = &radeon_atom_set_engine_clock,
1430 .get_memory_clock = &radeon_atom_get_memory_clock, 1485 .get_memory_clock = &radeon_atom_get_memory_clock,
1431 .set_memory_clock = &radeon_atom_set_memory_clock, 1486 .set_memory_clock = &radeon_atom_set_memory_clock,
1432 .get_pcie_lanes = NULL, 1487 .get_pcie_lanes = &r600_get_pcie_lanes,
1433 .set_pcie_lanes = NULL, 1488 .set_pcie_lanes = &r600_set_pcie_lanes,
1434 .set_clock_gating = NULL, 1489 .set_clock_gating = NULL,
1490 .set_uvd_clocks = &evergreen_set_uvd_clocks,
1435 }, 1491 },
1436 .pflip = { 1492 .pflip = {
1437 .pre_page_flip = &evergreen_pre_page_flip, 1493 .pre_page_flip = &evergreen_pre_page_flip,
@@ -1517,6 +1573,15 @@ static struct radeon_asic cayman_asic = {
1517 .ib_test = &r600_dma_ib_test, 1573 .ib_test = &r600_dma_ib_test,
1518 .is_lockup = &cayman_dma_is_lockup, 1574 .is_lockup = &cayman_dma_is_lockup,
1519 .vm_flush = &cayman_dma_vm_flush, 1575 .vm_flush = &cayman_dma_vm_flush,
1576 },
1577 [R600_RING_TYPE_UVD_INDEX] = {
1578 .ib_execute = &r600_uvd_ib_execute,
1579 .emit_fence = &r600_uvd_fence_emit,
1580 .emit_semaphore = &cayman_uvd_semaphore_emit,
1581 .cs_parse = &radeon_uvd_cs_parse,
1582 .ring_test = &r600_uvd_ring_test,
1583 .ib_test = &r600_uvd_ib_test,
1584 .is_lockup = &radeon_ring_test_lockup,
1520 } 1585 }
1521 }, 1586 },
1522 .irq = { 1587 .irq = {
@@ -1529,6 +1594,8 @@ static struct radeon_asic cayman_asic = {
1529 .wait_for_vblank = &dce4_wait_for_vblank, 1594 .wait_for_vblank = &dce4_wait_for_vblank,
1530 .set_backlight_level = &atombios_set_backlight_level, 1595 .set_backlight_level = &atombios_set_backlight_level,
1531 .get_backlight_level = &atombios_get_backlight_level, 1596 .get_backlight_level = &atombios_get_backlight_level,
1597 .hdmi_enable = &evergreen_hdmi_enable,
1598 .hdmi_setmode = &evergreen_hdmi_setmode,
1532 }, 1599 },
1533 .copy = { 1600 .copy = {
1534 .blit = &r600_copy_blit, 1601 .blit = &r600_copy_blit,
@@ -1558,9 +1625,10 @@ static struct radeon_asic cayman_asic = {
1558 .set_engine_clock = &radeon_atom_set_engine_clock, 1625 .set_engine_clock = &radeon_atom_set_engine_clock,
1559 .get_memory_clock = &radeon_atom_get_memory_clock, 1626 .get_memory_clock = &radeon_atom_get_memory_clock,
1560 .set_memory_clock = &radeon_atom_set_memory_clock, 1627 .set_memory_clock = &radeon_atom_set_memory_clock,
1561 .get_pcie_lanes = NULL, 1628 .get_pcie_lanes = &r600_get_pcie_lanes,
1562 .set_pcie_lanes = NULL, 1629 .set_pcie_lanes = &r600_set_pcie_lanes,
1563 .set_clock_gating = NULL, 1630 .set_clock_gating = NULL,
1631 .set_uvd_clocks = &evergreen_set_uvd_clocks,
1564 }, 1632 },
1565 .pflip = { 1633 .pflip = {
1566 .pre_page_flip = &evergreen_pre_page_flip, 1634 .pre_page_flip = &evergreen_pre_page_flip,
@@ -1646,6 +1714,15 @@ static struct radeon_asic trinity_asic = {
1646 .ib_test = &r600_dma_ib_test, 1714 .ib_test = &r600_dma_ib_test,
1647 .is_lockup = &cayman_dma_is_lockup, 1715 .is_lockup = &cayman_dma_is_lockup,
1648 .vm_flush = &cayman_dma_vm_flush, 1716 .vm_flush = &cayman_dma_vm_flush,
1717 },
1718 [R600_RING_TYPE_UVD_INDEX] = {
1719 .ib_execute = &r600_uvd_ib_execute,
1720 .emit_fence = &r600_uvd_fence_emit,
1721 .emit_semaphore = &cayman_uvd_semaphore_emit,
1722 .cs_parse = &radeon_uvd_cs_parse,
1723 .ring_test = &r600_uvd_ring_test,
1724 .ib_test = &r600_uvd_ib_test,
1725 .is_lockup = &radeon_ring_test_lockup,
1649 } 1726 }
1650 }, 1727 },
1651 .irq = { 1728 .irq = {
@@ -1690,6 +1767,7 @@ static struct radeon_asic trinity_asic = {
1690 .get_pcie_lanes = NULL, 1767 .get_pcie_lanes = NULL,
1691 .set_pcie_lanes = NULL, 1768 .set_pcie_lanes = NULL,
1692 .set_clock_gating = NULL, 1769 .set_clock_gating = NULL,
1770 .set_uvd_clocks = &sumo_set_uvd_clocks,
1693 }, 1771 },
1694 .pflip = { 1772 .pflip = {
1695 .pre_page_flip = &evergreen_pre_page_flip, 1773 .pre_page_flip = &evergreen_pre_page_flip,
@@ -1775,6 +1853,15 @@ static struct radeon_asic si_asic = {
1775 .ib_test = &r600_dma_ib_test, 1853 .ib_test = &r600_dma_ib_test,
1776 .is_lockup = &si_dma_is_lockup, 1854 .is_lockup = &si_dma_is_lockup,
1777 .vm_flush = &si_dma_vm_flush, 1855 .vm_flush = &si_dma_vm_flush,
1856 },
1857 [R600_RING_TYPE_UVD_INDEX] = {
1858 .ib_execute = &r600_uvd_ib_execute,
1859 .emit_fence = &r600_uvd_fence_emit,
1860 .emit_semaphore = &cayman_uvd_semaphore_emit,
1861 .cs_parse = &radeon_uvd_cs_parse,
1862 .ring_test = &r600_uvd_ring_test,
1863 .ib_test = &r600_uvd_ib_test,
1864 .is_lockup = &radeon_ring_test_lockup,
1778 } 1865 }
1779 }, 1866 },
1780 .irq = { 1867 .irq = {
@@ -1816,9 +1903,10 @@ static struct radeon_asic si_asic = {
1816 .set_engine_clock = &radeon_atom_set_engine_clock, 1903 .set_engine_clock = &radeon_atom_set_engine_clock,
1817 .get_memory_clock = &radeon_atom_get_memory_clock, 1904 .get_memory_clock = &radeon_atom_get_memory_clock,
1818 .set_memory_clock = &radeon_atom_set_memory_clock, 1905 .set_memory_clock = &radeon_atom_set_memory_clock,
1819 .get_pcie_lanes = NULL, 1906 .get_pcie_lanes = &r600_get_pcie_lanes,
1820 .set_pcie_lanes = NULL, 1907 .set_pcie_lanes = &r600_set_pcie_lanes,
1821 .set_clock_gating = NULL, 1908 .set_clock_gating = NULL,
1909 .set_uvd_clocks = &si_set_uvd_clocks,
1822 }, 1910 },
1823 .pflip = { 1911 .pflip = {
1824 .pre_page_flip = &evergreen_pre_page_flip, 1912 .pre_page_flip = &evergreen_pre_page_flip,
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 3535f73ad3e2..2c87365d345f 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -330,6 +330,7 @@ int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
330void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); 330void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
331int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *cp); 331int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
332int r600_dma_ring_test(struct radeon_device *rdev, struct radeon_ring *cp); 332int r600_dma_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
333int r600_uvd_ring_test(struct radeon_device *rdev, struct radeon_ring *ring);
333int r600_copy_blit(struct radeon_device *rdev, 334int r600_copy_blit(struct radeon_device *rdev,
334 uint64_t src_offset, uint64_t dst_offset, 335 uint64_t src_offset, uint64_t dst_offset,
335 unsigned num_gpu_pages, struct radeon_fence **fence); 336 unsigned num_gpu_pages, struct radeon_fence **fence);
@@ -373,11 +374,12 @@ void r600_disable_interrupts(struct radeon_device *rdev);
373void r600_rlc_stop(struct radeon_device *rdev); 374void r600_rlc_stop(struct radeon_device *rdev);
374/* r600 audio */ 375/* r600 audio */
375int r600_audio_init(struct radeon_device *rdev); 376int r600_audio_init(struct radeon_device *rdev);
376void r600_audio_set_clock(struct drm_encoder *encoder, int clock);
377struct r600_audio r600_audio_status(struct radeon_device *rdev); 377struct r600_audio r600_audio_status(struct radeon_device *rdev);
378void r600_audio_fini(struct radeon_device *rdev); 378void r600_audio_fini(struct radeon_device *rdev);
379int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder); 379int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder);
380void r600_hdmi_update_audio_settings(struct drm_encoder *encoder); 380void r600_hdmi_update_audio_settings(struct drm_encoder *encoder);
381void r600_hdmi_enable(struct drm_encoder *encoder, bool enable);
382void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
381/* r600 blit */ 383/* r600 blit */
382int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages, 384int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages,
383 struct radeon_fence **fence, struct radeon_sa_bo **vb, 385 struct radeon_fence **fence, struct radeon_sa_bo **vb,
@@ -392,6 +394,19 @@ int r600_mc_wait_for_idle(struct radeon_device *rdev);
392u32 r600_get_xclk(struct radeon_device *rdev); 394u32 r600_get_xclk(struct radeon_device *rdev);
393uint64_t r600_get_gpu_clock_counter(struct radeon_device *rdev); 395uint64_t r600_get_gpu_clock_counter(struct radeon_device *rdev);
394 396
397/* uvd */
398int r600_uvd_init(struct radeon_device *rdev);
399int r600_uvd_rbc_start(struct radeon_device *rdev);
400void r600_uvd_rbc_stop(struct radeon_device *rdev);
401int r600_uvd_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
402void r600_uvd_fence_emit(struct radeon_device *rdev,
403 struct radeon_fence *fence);
404void r600_uvd_semaphore_emit(struct radeon_device *rdev,
405 struct radeon_ring *ring,
406 struct radeon_semaphore *semaphore,
407 bool emit_wait);
408void r600_uvd_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
409
395/* 410/*
396 * rv770,rv730,rv710,rv740 411 * rv770,rv730,rv710,rv740
397 */ 412 */
@@ -409,6 +424,8 @@ int rv770_copy_dma(struct radeon_device *rdev,
409 unsigned num_gpu_pages, 424 unsigned num_gpu_pages,
410 struct radeon_fence **fence); 425 struct radeon_fence **fence);
411u32 rv770_get_xclk(struct radeon_device *rdev); 426u32 rv770_get_xclk(struct radeon_device *rdev);
427int rv770_uvd_resume(struct radeon_device *rdev);
428int rv770_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
412 429
413/* 430/*
414 * evergreen 431 * evergreen
@@ -444,6 +461,8 @@ extern void evergreen_pm_prepare(struct radeon_device *rdev);
444extern void evergreen_pm_finish(struct radeon_device *rdev); 461extern void evergreen_pm_finish(struct radeon_device *rdev);
445extern void sumo_pm_init_profile(struct radeon_device *rdev); 462extern void sumo_pm_init_profile(struct radeon_device *rdev);
446extern void btc_pm_init_profile(struct radeon_device *rdev); 463extern void btc_pm_init_profile(struct radeon_device *rdev);
464int sumo_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
465int evergreen_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
447extern void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc); 466extern void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc);
448extern u32 evergreen_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base); 467extern u32 evergreen_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
449extern void evergreen_post_page_flip(struct radeon_device *rdev, int crtc); 468extern void evergreen_post_page_flip(struct radeon_device *rdev, int crtc);
@@ -459,12 +478,18 @@ int evergreen_copy_dma(struct radeon_device *rdev,
459 uint64_t src_offset, uint64_t dst_offset, 478 uint64_t src_offset, uint64_t dst_offset,
460 unsigned num_gpu_pages, 479 unsigned num_gpu_pages,
461 struct radeon_fence **fence); 480 struct radeon_fence **fence);
481void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable);
482void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
462 483
463/* 484/*
464 * cayman 485 * cayman
465 */ 486 */
466void cayman_fence_ring_emit(struct radeon_device *rdev, 487void cayman_fence_ring_emit(struct radeon_device *rdev,
467 struct radeon_fence *fence); 488 struct radeon_fence *fence);
489void cayman_uvd_semaphore_emit(struct radeon_device *rdev,
490 struct radeon_ring *ring,
491 struct radeon_semaphore *semaphore,
492 bool emit_wait);
468void cayman_pcie_gart_tlb_flush(struct radeon_device *rdev); 493void cayman_pcie_gart_tlb_flush(struct radeon_device *rdev);
469int cayman_init(struct radeon_device *rdev); 494int cayman_init(struct radeon_device *rdev);
470void cayman_fini(struct radeon_device *rdev); 495void cayman_fini(struct radeon_device *rdev);
@@ -524,5 +549,6 @@ int si_copy_dma(struct radeon_device *rdev,
524void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); 549void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
525u32 si_get_xclk(struct radeon_device *rdev); 550u32 si_get_xclk(struct radeon_device *rdev);
526uint64_t si_get_gpu_clock_counter(struct radeon_device *rdev); 551uint64_t si_get_gpu_clock_counter(struct radeon_device *rdev);
552int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
527 553
528#endif 554#endif
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index f22eb5713528..dea6f63c9724 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -2028,6 +2028,8 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
2028 num_modes = power_info->info.ucNumOfPowerModeEntries; 2028 num_modes = power_info->info.ucNumOfPowerModeEntries;
2029 if (num_modes > ATOM_MAX_NUMBEROF_POWER_BLOCK) 2029 if (num_modes > ATOM_MAX_NUMBEROF_POWER_BLOCK)
2030 num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK; 2030 num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK;
2031 if (num_modes == 0)
2032 return state_index;
2031 rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * num_modes, GFP_KERNEL); 2033 rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * num_modes, GFP_KERNEL);
2032 if (!rdev->pm.power_state) 2034 if (!rdev->pm.power_state)
2033 return state_index; 2035 return state_index;
@@ -2307,7 +2309,7 @@ static void radeon_atombios_parse_pplib_non_clock_info(struct radeon_device *rde
2307 rdev->pm.default_power_state_index = state_index; 2309 rdev->pm.default_power_state_index = state_index;
2308 rdev->pm.power_state[state_index].default_clock_mode = 2310 rdev->pm.power_state[state_index].default_clock_mode =
2309 &rdev->pm.power_state[state_index].clock_info[mode_index - 1]; 2311 &rdev->pm.power_state[state_index].clock_info[mode_index - 1];
2310 if (ASIC_IS_DCE5(rdev) && !(rdev->flags & RADEON_IS_IGP)) { 2312 if ((rdev->family >= CHIP_BARTS) && !(rdev->flags & RADEON_IS_IGP)) {
2311 /* NI chips post without MC ucode, so default clocks are strobe mode only */ 2313 /* NI chips post without MC ucode, so default clocks are strobe mode only */
2312 rdev->pm.default_sclk = rdev->pm.power_state[state_index].clock_info[0].sclk; 2314 rdev->pm.default_sclk = rdev->pm.power_state[state_index].clock_info[0].sclk;
2313 rdev->pm.default_mclk = rdev->pm.power_state[state_index].clock_info[0].mclk; 2315 rdev->pm.default_mclk = rdev->pm.power_state[state_index].clock_info[0].mclk;
@@ -2345,7 +2347,7 @@ static bool radeon_atombios_parse_pplib_clock_info(struct radeon_device *rdev,
2345 sclk |= clock_info->rs780.ucLowEngineClockHigh << 16; 2347 sclk |= clock_info->rs780.ucLowEngineClockHigh << 16;
2346 rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk; 2348 rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
2347 } 2349 }
2348 } else if (ASIC_IS_DCE6(rdev)) { 2350 } else if (rdev->family >= CHIP_TAHITI) {
2349 sclk = le16_to_cpu(clock_info->si.usEngineClockLow); 2351 sclk = le16_to_cpu(clock_info->si.usEngineClockLow);
2350 sclk |= clock_info->si.ucEngineClockHigh << 16; 2352 sclk |= clock_info->si.ucEngineClockHigh << 16;
2351 mclk = le16_to_cpu(clock_info->si.usMemoryClockLow); 2353 mclk = le16_to_cpu(clock_info->si.usMemoryClockLow);
@@ -2358,7 +2360,7 @@ static bool radeon_atombios_parse_pplib_clock_info(struct radeon_device *rdev,
2358 le16_to_cpu(clock_info->si.usVDDC); 2360 le16_to_cpu(clock_info->si.usVDDC);
2359 rdev->pm.power_state[state_index].clock_info[mode_index].voltage.vddci = 2361 rdev->pm.power_state[state_index].clock_info[mode_index].voltage.vddci =
2360 le16_to_cpu(clock_info->si.usVDDCI); 2362 le16_to_cpu(clock_info->si.usVDDCI);
2361 } else if (ASIC_IS_DCE4(rdev)) { 2363 } else if (rdev->family >= CHIP_CEDAR) {
2362 sclk = le16_to_cpu(clock_info->evergreen.usEngineClockLow); 2364 sclk = le16_to_cpu(clock_info->evergreen.usEngineClockLow);
2363 sclk |= clock_info->evergreen.ucEngineClockHigh << 16; 2365 sclk |= clock_info->evergreen.ucEngineClockHigh << 16;
2364 mclk = le16_to_cpu(clock_info->evergreen.usMemoryClockLow); 2366 mclk = le16_to_cpu(clock_info->evergreen.usMemoryClockLow);
@@ -2432,6 +2434,8 @@ static int radeon_atombios_parse_power_table_4_5(struct radeon_device *rdev)
2432 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); 2434 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
2433 2435
2434 radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController); 2436 radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController);
2437 if (power_info->pplib.ucNumStates == 0)
2438 return state_index;
2435 rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * 2439 rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) *
2436 power_info->pplib.ucNumStates, GFP_KERNEL); 2440 power_info->pplib.ucNumStates, GFP_KERNEL);
2437 if (!rdev->pm.power_state) 2441 if (!rdev->pm.power_state)
@@ -2514,6 +2518,7 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
2514 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); 2518 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
2515 u16 data_offset; 2519 u16 data_offset;
2516 u8 frev, crev; 2520 u8 frev, crev;
2521 u8 *power_state_offset;
2517 2522
2518 if (!atom_parse_data_header(mode_info->atom_context, index, NULL, 2523 if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
2519 &frev, &crev, &data_offset)) 2524 &frev, &crev, &data_offset))
@@ -2530,15 +2535,17 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
2530 non_clock_info_array = (struct _NonClockInfoArray *) 2535 non_clock_info_array = (struct _NonClockInfoArray *)
2531 (mode_info->atom_context->bios + data_offset + 2536 (mode_info->atom_context->bios + data_offset +
2532 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset)); 2537 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
2538 if (state_array->ucNumEntries == 0)
2539 return state_index;
2533 rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * 2540 rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) *
2534 state_array->ucNumEntries, GFP_KERNEL); 2541 state_array->ucNumEntries, GFP_KERNEL);
2535 if (!rdev->pm.power_state) 2542 if (!rdev->pm.power_state)
2536 return state_index; 2543 return state_index;
2544 power_state_offset = (u8 *)state_array->states;
2537 for (i = 0; i < state_array->ucNumEntries; i++) { 2545 for (i = 0; i < state_array->ucNumEntries; i++) {
2538 mode_index = 0; 2546 mode_index = 0;
2539 power_state = (union pplib_power_state *)&state_array->states[i]; 2547 power_state = (union pplib_power_state *)power_state_offset;
2540 /* XXX this might be an inagua bug... */ 2548 non_clock_array_index = power_state->v2.nonClockInfoIndex;
2541 non_clock_array_index = i; /* power_state->v2.nonClockInfoIndex */
2542 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) 2549 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
2543 &non_clock_info_array->nonClockInfo[non_clock_array_index]; 2550 &non_clock_info_array->nonClockInfo[non_clock_array_index];
2544 rdev->pm.power_state[i].clock_info = kzalloc(sizeof(struct radeon_pm_clock_info) * 2551 rdev->pm.power_state[i].clock_info = kzalloc(sizeof(struct radeon_pm_clock_info) *
@@ -2550,9 +2557,6 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
2550 if (power_state->v2.ucNumDPMLevels) { 2557 if (power_state->v2.ucNumDPMLevels) {
2551 for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) { 2558 for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
2552 clock_array_index = power_state->v2.clockInfoIndex[j]; 2559 clock_array_index = power_state->v2.clockInfoIndex[j];
2553 /* XXX this might be an inagua bug... */
2554 if (clock_array_index >= clock_info_array->ucNumEntries)
2555 continue;
2556 clock_info = (union pplib_clock_info *) 2560 clock_info = (union pplib_clock_info *)
2557 &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize]; 2561 &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
2558 valid = radeon_atombios_parse_pplib_clock_info(rdev, 2562 valid = radeon_atombios_parse_pplib_clock_info(rdev,
@@ -2574,6 +2578,7 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
2574 non_clock_info); 2578 non_clock_info);
2575 state_index++; 2579 state_index++;
2576 } 2580 }
2581 power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
2577 } 2582 }
2578 /* if multiple clock modes, mark the lowest as no display */ 2583 /* if multiple clock modes, mark the lowest as no display */
2579 for (i = 0; i < state_index; i++) { 2584 for (i = 0; i < state_index; i++) {
@@ -2620,7 +2625,9 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
2620 default: 2625 default:
2621 break; 2626 break;
2622 } 2627 }
2623 } else { 2628 }
2629
2630 if (state_index == 0) {
2624 rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state), GFP_KERNEL); 2631 rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state), GFP_KERNEL);
2625 if (rdev->pm.power_state) { 2632 if (rdev->pm.power_state) {
2626 rdev->pm.power_state[0].clock_info = 2633 rdev->pm.power_state[0].clock_info =
@@ -2654,6 +2661,111 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
2654 rdev->pm.current_vddc = 0; 2661 rdev->pm.current_vddc = 0;
2655} 2662}
2656 2663
2664union get_clock_dividers {
2665 struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS v1;
2666 struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V2 v2;
2667 struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V3 v3;
2668 struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4 v4;
2669 struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V5 v5;
2670};
2671
2672int radeon_atom_get_clock_dividers(struct radeon_device *rdev,
2673 u8 clock_type,
2674 u32 clock,
2675 bool strobe_mode,
2676 struct atom_clock_dividers *dividers)
2677{
2678 union get_clock_dividers args;
2679 int index = GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL);
2680 u8 frev, crev;
2681
2682 memset(&args, 0, sizeof(args));
2683 memset(dividers, 0, sizeof(struct atom_clock_dividers));
2684
2685 if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
2686 return -EINVAL;
2687
2688 switch (crev) {
2689 case 1:
2690 /* r4xx, r5xx */
2691 args.v1.ucAction = clock_type;
2692 args.v1.ulClock = cpu_to_le32(clock); /* 10 khz */
2693
2694 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
2695
2696 dividers->post_div = args.v1.ucPostDiv;
2697 dividers->fb_div = args.v1.ucFbDiv;
2698 dividers->enable_post_div = true;
2699 break;
2700 case 2:
2701 case 3:
2702 /* r6xx, r7xx, evergreen, ni */
2703 if (rdev->family <= CHIP_RV770) {
2704 args.v2.ucAction = clock_type;
2705 args.v2.ulClock = cpu_to_le32(clock); /* 10 khz */
2706
2707 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
2708
2709 dividers->post_div = args.v2.ucPostDiv;
2710 dividers->fb_div = le16_to_cpu(args.v2.usFbDiv);
2711 dividers->ref_div = args.v2.ucAction;
2712 if (rdev->family == CHIP_RV770) {
2713 dividers->enable_post_div = (le32_to_cpu(args.v2.ulClock) & (1 << 24)) ?
2714 true : false;
2715 dividers->vco_mode = (le32_to_cpu(args.v2.ulClock) & (1 << 25)) ? 1 : 0;
2716 } else
2717 dividers->enable_post_div = (dividers->fb_div & 1) ? true : false;
2718 } else {
2719 if (clock_type == COMPUTE_ENGINE_PLL_PARAM) {
2720 args.v3.ulClockParams = cpu_to_le32((clock_type << 24) | clock);
2721
2722 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
2723
2724 dividers->post_div = args.v3.ucPostDiv;
2725 dividers->enable_post_div = (args.v3.ucCntlFlag &
2726 ATOM_PLL_CNTL_FLAG_PLL_POST_DIV_EN) ? true : false;
2727 dividers->enable_dithen = (args.v3.ucCntlFlag &
2728 ATOM_PLL_CNTL_FLAG_FRACTION_DISABLE) ? false : true;
2729 dividers->fb_div = le16_to_cpu(args.v3.ulFbDiv.usFbDiv);
2730 dividers->frac_fb_div = le16_to_cpu(args.v3.ulFbDiv.usFbDivFrac);
2731 dividers->ref_div = args.v3.ucRefDiv;
2732 dividers->vco_mode = (args.v3.ucCntlFlag &
2733 ATOM_PLL_CNTL_FLAG_MPLL_VCO_MODE) ? 1 : 0;
2734 } else {
2735 args.v5.ulClockParams = cpu_to_le32((clock_type << 24) | clock);
2736 if (strobe_mode)
2737 args.v5.ucInputFlag = ATOM_PLL_INPUT_FLAG_PLL_STROBE_MODE_EN;
2738
2739 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
2740
2741 dividers->post_div = args.v5.ucPostDiv;
2742 dividers->enable_post_div = (args.v5.ucCntlFlag &
2743 ATOM_PLL_CNTL_FLAG_PLL_POST_DIV_EN) ? true : false;
2744 dividers->enable_dithen = (args.v5.ucCntlFlag &
2745 ATOM_PLL_CNTL_FLAG_FRACTION_DISABLE) ? false : true;
2746 dividers->whole_fb_div = le16_to_cpu(args.v5.ulFbDiv.usFbDiv);
2747 dividers->frac_fb_div = le16_to_cpu(args.v5.ulFbDiv.usFbDivFrac);
2748 dividers->ref_div = args.v5.ucRefDiv;
2749 dividers->vco_mode = (args.v5.ucCntlFlag &
2750 ATOM_PLL_CNTL_FLAG_MPLL_VCO_MODE) ? 1 : 0;
2751 }
2752 }
2753 break;
2754 case 4:
2755 /* fusion */
2756 args.v4.ulClock = cpu_to_le32(clock); /* 10 khz */
2757
2758 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
2759
2760 dividers->post_div = args.v4.ucPostDiv;
2761 dividers->real_clock = le32_to_cpu(args.v4.ulClock);
2762 break;
2763 default:
2764 return -EINVAL;
2765 }
2766 return 0;
2767}
2768
2657void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable) 2769void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable)
2658{ 2770{
2659 DYNAMIC_CLOCK_GATING_PS_ALLOCATION args; 2771 DYNAMIC_CLOCK_GATING_PS_ALLOCATION args;
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 70d38241b083..7e265a58141f 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -63,30 +63,50 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
63 break; 63 break;
64 } 64 }
65 } 65 }
66 if (!duplicate) { 66 if (duplicate) {
67 p->relocs[i].gobj = drm_gem_object_lookup(ddev,
68 p->filp,
69 r->handle);
70 if (p->relocs[i].gobj == NULL) {
71 DRM_ERROR("gem object lookup failed 0x%x\n",
72 r->handle);
73 return -ENOENT;
74 }
75 p->relocs_ptr[i] = &p->relocs[i];
76 p->relocs[i].robj = gem_to_radeon_bo(p->relocs[i].gobj);
77 p->relocs[i].lobj.bo = p->relocs[i].robj;
78 p->relocs[i].lobj.wdomain = r->write_domain;
79 p->relocs[i].lobj.rdomain = r->read_domains;
80 p->relocs[i].lobj.tv.bo = &p->relocs[i].robj->tbo;
81 p->relocs[i].handle = r->handle;
82 p->relocs[i].flags = r->flags;
83 radeon_bo_list_add_object(&p->relocs[i].lobj,
84 &p->validated);
85
86 } else
87 p->relocs[i].handle = 0; 67 p->relocs[i].handle = 0;
68 continue;
69 }
70
71 p->relocs[i].gobj = drm_gem_object_lookup(ddev, p->filp,
72 r->handle);
73 if (p->relocs[i].gobj == NULL) {
74 DRM_ERROR("gem object lookup failed 0x%x\n",
75 r->handle);
76 return -ENOENT;
77 }
78 p->relocs_ptr[i] = &p->relocs[i];
79 p->relocs[i].robj = gem_to_radeon_bo(p->relocs[i].gobj);
80 p->relocs[i].lobj.bo = p->relocs[i].robj;
81 p->relocs[i].lobj.written = !!r->write_domain;
82
83 /* the first reloc of an UVD job is the
84 msg and that must be in VRAM */
85 if (p->ring == R600_RING_TYPE_UVD_INDEX && i == 0) {
86 /* TODO: is this still needed for NI+ ? */
87 p->relocs[i].lobj.domain =
88 RADEON_GEM_DOMAIN_VRAM;
89
90 p->relocs[i].lobj.alt_domain =
91 RADEON_GEM_DOMAIN_VRAM;
92
93 } else {
94 uint32_t domain = r->write_domain ?
95 r->write_domain : r->read_domains;
96
97 p->relocs[i].lobj.domain = domain;
98 if (domain == RADEON_GEM_DOMAIN_VRAM)
99 domain |= RADEON_GEM_DOMAIN_GTT;
100 p->relocs[i].lobj.alt_domain = domain;
101 }
102
103 p->relocs[i].lobj.tv.bo = &p->relocs[i].robj->tbo;
104 p->relocs[i].handle = r->handle;
105
106 radeon_bo_list_add_object(&p->relocs[i].lobj,
107 &p->validated);
88 } 108 }
89 return radeon_bo_list_validate(&p->validated); 109 return radeon_bo_list_validate(&p->validated, p->ring);
90} 110}
91 111
92static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority) 112static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority)
@@ -121,6 +141,9 @@ static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority
121 return -EINVAL; 141 return -EINVAL;
122 } 142 }
123 break; 143 break;
144 case RADEON_CS_RING_UVD:
145 p->ring = R600_RING_TYPE_UVD_INDEX;
146 break;
124 } 147 }
125 return 0; 148 return 0;
126} 149}
@@ -241,15 +264,15 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
241 return -EINVAL; 264 return -EINVAL;
242 } 265 }
243 266
244 /* we only support VM on SI+ */ 267 if (radeon_cs_get_ring(p, ring, priority))
245 if ((p->rdev->family >= CHIP_TAHITI) &&
246 ((p->cs_flags & RADEON_CS_USE_VM) == 0)) {
247 DRM_ERROR("VM required on SI+!\n");
248 return -EINVAL; 268 return -EINVAL;
249 }
250 269
251 if (radeon_cs_get_ring(p, ring, priority)) 270 /* we only support VM on some SI+ rings */
271 if ((p->rdev->asic->ring[p->ring].cs_parse == NULL) &&
272 ((p->cs_flags & RADEON_CS_USE_VM) == 0)) {
273 DRM_ERROR("Ring %d requires VM!\n", p->ring);
252 return -EINVAL; 274 return -EINVAL;
275 }
253 } 276 }
254 277
255 /* deal with non-vm */ 278 /* deal with non-vm */
@@ -526,6 +549,10 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
526 r = radeon_cs_handle_lockup(rdev, r); 549 r = radeon_cs_handle_lockup(rdev, r);
527 return r; 550 return r;
528 } 551 }
552
553 if (parser.ring == R600_RING_TYPE_UVD_INDEX)
554 radeon_uvd_note_usage(rdev);
555
529 r = radeon_cs_ib_chunk(rdev, &parser); 556 r = radeon_cs_ib_chunk(rdev, &parser);
530 if (r) { 557 if (r) {
531 goto out; 558 goto out;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 44b8034a400d..a8f608903989 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -98,6 +98,42 @@ static const char radeon_family_name[][16] = {
98}; 98};
99 99
100/** 100/**
101 * radeon_program_register_sequence - program an array of registers.
102 *
103 * @rdev: radeon_device pointer
104 * @registers: pointer to the register array
105 * @array_size: size of the register array
106 *
107 * Programs an array or registers with and and or masks.
108 * This is a helper for setting golden registers.
109 */
110void radeon_program_register_sequence(struct radeon_device *rdev,
111 const u32 *registers,
112 const u32 array_size)
113{
114 u32 tmp, reg, and_mask, or_mask;
115 int i;
116
117 if (array_size % 3)
118 return;
119
120 for (i = 0; i < array_size; i +=3) {
121 reg = registers[i + 0];
122 and_mask = registers[i + 1];
123 or_mask = registers[i + 2];
124
125 if (and_mask == 0xffffffff) {
126 tmp = or_mask;
127 } else {
128 tmp = RREG32(reg);
129 tmp &= ~and_mask;
130 tmp |= or_mask;
131 }
132 WREG32(reg, tmp);
133 }
134}
135
136/**
101 * radeon_surface_init - Clear GPU surface registers. 137 * radeon_surface_init - Clear GPU surface registers.
102 * 138 *
103 * @rdev: radeon_device pointer 139 * @rdev: radeon_device pointer
@@ -359,7 +395,7 @@ void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64
359 uint64_t limit = (uint64_t)radeon_vram_limit << 20; 395 uint64_t limit = (uint64_t)radeon_vram_limit << 20;
360 396
361 mc->vram_start = base; 397 mc->vram_start = base;
362 if (mc->mc_vram_size > (0xFFFFFFFF - base + 1)) { 398 if (mc->mc_vram_size > (rdev->mc.mc_mask - base + 1)) {
363 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n"); 399 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
364 mc->real_vram_size = mc->aper_size; 400 mc->real_vram_size = mc->aper_size;
365 mc->mc_vram_size = mc->aper_size; 401 mc->mc_vram_size = mc->aper_size;
@@ -394,7 +430,7 @@ void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
394{ 430{
395 u64 size_af, size_bf; 431 u64 size_af, size_bf;
396 432
397 size_af = ((0xFFFFFFFF - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align; 433 size_af = ((rdev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
398 size_bf = mc->vram_start & ~mc->gtt_base_align; 434 size_bf = mc->vram_start & ~mc->gtt_base_align;
399 if (size_bf > size_af) { 435 if (size_bf > size_af) {
400 if (mc->gtt_size > size_bf) { 436 if (mc->gtt_size > size_bf) {
@@ -1068,6 +1104,17 @@ int radeon_device_init(struct radeon_device *rdev,
1068 radeon_agp_disable(rdev); 1104 radeon_agp_disable(rdev);
1069 } 1105 }
1070 1106
1107 /* Set the internal MC address mask
1108 * This is the max address of the GPU's
1109 * internal address space.
1110 */
1111 if (rdev->family >= CHIP_CAYMAN)
1112 rdev->mc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
1113 else if (rdev->family >= CHIP_CEDAR)
1114 rdev->mc.mc_mask = 0xfffffffffULL; /* 36 bit MC */
1115 else
1116 rdev->mc.mc_mask = 0xffffffffULL; /* 32 bit MC */
1117
1071 /* set DMA mask + need_dma32 flags. 1118 /* set DMA mask + need_dma32 flags.
1072 * PCIE - can handle 40-bits. 1119 * PCIE - can handle 40-bits.
1073 * IGP - can handle 40-bits 1120 * IGP - can handle 40-bits
@@ -1131,6 +1178,11 @@ int radeon_device_init(struct radeon_device *rdev,
1131 if (r) 1178 if (r)
1132 DRM_ERROR("ib ring test failed (%d).\n", r); 1179 DRM_ERROR("ib ring test failed (%d).\n", r);
1133 1180
1181 r = radeon_gem_debugfs_init(rdev);
1182 if (r) {
1183 DRM_ERROR("registering gem debugfs failed (%d).\n", r);
1184 }
1185
1134 if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) { 1186 if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) {
1135 /* Acceleration not working on AGP card try again 1187 /* Acceleration not working on AGP card try again
1136 * with fallback to PCI or PCIE GART 1188 * with fallback to PCI or PCIE GART
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 66a7f0fd9620..d33f484ace48 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -71,9 +71,12 @@
71 * 2.28.0 - r600-eg: Add MEM_WRITE packet support 71 * 2.28.0 - r600-eg: Add MEM_WRITE packet support
72 * 2.29.0 - R500 FP16 color clear registers 72 * 2.29.0 - R500 FP16 color clear registers
73 * 2.30.0 - fix for FMASK texturing 73 * 2.30.0 - fix for FMASK texturing
74 * 2.31.0 - Add fastfb support for rs690
75 * 2.32.0 - new info request for rings working
76 * 2.33.0 - Add SI tiling mode array query
74 */ 77 */
75#define KMS_DRIVER_MAJOR 2 78#define KMS_DRIVER_MAJOR 2
76#define KMS_DRIVER_MINOR 30 79#define KMS_DRIVER_MINOR 33
77#define KMS_DRIVER_PATCHLEVEL 0 80#define KMS_DRIVER_PATCHLEVEL 0
78int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); 81int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
79int radeon_driver_unload_kms(struct drm_device *dev); 82int radeon_driver_unload_kms(struct drm_device *dev);
@@ -160,6 +163,7 @@ int radeon_hw_i2c = 0;
160int radeon_pcie_gen2 = -1; 163int radeon_pcie_gen2 = -1;
161int radeon_msi = -1; 164int radeon_msi = -1;
162int radeon_lockup_timeout = 10000; 165int radeon_lockup_timeout = 10000;
166int radeon_fastfb = 0;
163 167
164MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers"); 168MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
165module_param_named(no_wb, radeon_no_wb, int, 0444); 169module_param_named(no_wb, radeon_no_wb, int, 0444);
@@ -212,6 +216,9 @@ module_param_named(msi, radeon_msi, int, 0444);
212MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (defaul 10000 = 10 seconds, 0 = disable)"); 216MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (defaul 10000 = 10 seconds, 0 = disable)");
213module_param_named(lockup_timeout, radeon_lockup_timeout, int, 0444); 217module_param_named(lockup_timeout, radeon_lockup_timeout, int, 0444);
214 218
219MODULE_PARM_DESC(fastfb, "Direct FB access for IGP chips (0 = disable, 1 = enable)");
220module_param_named(fastfb, radeon_fastfb, int, 0444);
221
215static struct pci_device_id pciidlist[] = { 222static struct pci_device_id pciidlist[] = {
216 radeon_PCI_IDS 223 radeon_PCI_IDS
217}; 224};
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 34356252567a..5b937dfe6f65 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -31,9 +31,9 @@
31#include <linux/seq_file.h> 31#include <linux/seq_file.h>
32#include <linux/atomic.h> 32#include <linux/atomic.h>
33#include <linux/wait.h> 33#include <linux/wait.h>
34#include <linux/list.h>
35#include <linux/kref.h> 34#include <linux/kref.h>
36#include <linux/slab.h> 35#include <linux/slab.h>
36#include <linux/firmware.h>
37#include <drm/drmP.h> 37#include <drm/drmP.h>
38#include "radeon_reg.h" 38#include "radeon_reg.h"
39#include "radeon.h" 39#include "radeon.h"
@@ -768,7 +768,19 @@ int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
768 radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg); 768 radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
769 if (rdev->wb.use_event || !radeon_ring_supports_scratch_reg(rdev, &rdev->ring[ring])) { 769 if (rdev->wb.use_event || !radeon_ring_supports_scratch_reg(rdev, &rdev->ring[ring])) {
770 rdev->fence_drv[ring].scratch_reg = 0; 770 rdev->fence_drv[ring].scratch_reg = 0;
771 index = R600_WB_EVENT_OFFSET + ring * 4; 771 if (ring != R600_RING_TYPE_UVD_INDEX) {
772 index = R600_WB_EVENT_OFFSET + ring * 4;
773 rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
774 rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr +
775 index;
776
777 } else {
778 /* put fence directly behind firmware */
779 index = ALIGN(rdev->uvd_fw->size, 8);
780 rdev->fence_drv[ring].cpu_addr = rdev->uvd.cpu_addr + index;
781 rdev->fence_drv[ring].gpu_addr = rdev->uvd.gpu_addr + index;
782 }
783
772 } else { 784 } else {
773 r = radeon_scratch_get(rdev, &rdev->fence_drv[ring].scratch_reg); 785 r = radeon_scratch_get(rdev, &rdev->fence_drv[ring].scratch_reg);
774 if (r) { 786 if (r) {
@@ -778,9 +790,9 @@ int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
778 index = RADEON_WB_SCRATCH_OFFSET + 790 index = RADEON_WB_SCRATCH_OFFSET +
779 rdev->fence_drv[ring].scratch_reg - 791 rdev->fence_drv[ring].scratch_reg -
780 rdev->scratch.reg_base; 792 rdev->scratch.reg_base;
793 rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
794 rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index;
781 } 795 }
782 rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
783 rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index;
784 radeon_fence_write(rdev, atomic64_read(&rdev->fence_drv[ring].last_seq), ring); 796 radeon_fence_write(rdev, atomic64_read(&rdev->fence_drv[ring].last_seq), ring);
785 rdev->fence_drv[ring].initialized = true; 797 rdev->fence_drv[ring].initialized = true;
786 dev_info(rdev->dev, "fence driver on ring %d use gpu addr 0x%016llx and cpu addr 0x%p\n", 798 dev_info(rdev->dev, "fence driver on ring %d use gpu addr 0x%016llx and cpu addr 0x%p\n",
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index fe5c1f6b7957..aa796031ab65 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -84,6 +84,7 @@ retry:
84 return r; 84 return r;
85 } 85 }
86 *obj = &robj->gem_base; 86 *obj = &robj->gem_base;
87 robj->pid = task_pid_nr(current);
87 88
88 mutex_lock(&rdev->gem.mutex); 89 mutex_lock(&rdev->gem.mutex);
89 list_add_tail(&robj->list, &rdev->gem.objects); 90 list_add_tail(&robj->list, &rdev->gem.objects);
@@ -575,3 +576,52 @@ int radeon_mode_dumb_destroy(struct drm_file *file_priv,
575{ 576{
576 return drm_gem_handle_delete(file_priv, handle); 577 return drm_gem_handle_delete(file_priv, handle);
577} 578}
579
580#if defined(CONFIG_DEBUG_FS)
581static int radeon_debugfs_gem_info(struct seq_file *m, void *data)
582{
583 struct drm_info_node *node = (struct drm_info_node *)m->private;
584 struct drm_device *dev = node->minor->dev;
585 struct radeon_device *rdev = dev->dev_private;
586 struct radeon_bo *rbo;
587 unsigned i = 0;
588
589 mutex_lock(&rdev->gem.mutex);
590 list_for_each_entry(rbo, &rdev->gem.objects, list) {
591 unsigned domain;
592 const char *placement;
593
594 domain = radeon_mem_type_to_domain(rbo->tbo.mem.mem_type);
595 switch (domain) {
596 case RADEON_GEM_DOMAIN_VRAM:
597 placement = "VRAM";
598 break;
599 case RADEON_GEM_DOMAIN_GTT:
600 placement = " GTT";
601 break;
602 case RADEON_GEM_DOMAIN_CPU:
603 default:
604 placement = " CPU";
605 break;
606 }
607 seq_printf(m, "bo[0x%08x] %8ldkB %8ldMB %s pid %8ld\n",
608 i, radeon_bo_size(rbo) >> 10, radeon_bo_size(rbo) >> 20,
609 placement, (unsigned long)rbo->pid);
610 i++;
611 }
612 mutex_unlock(&rdev->gem.mutex);
613 return 0;
614}
615
616static struct drm_info_list radeon_debugfs_gem_list[] = {
617 {"radeon_gem_info", &radeon_debugfs_gem_info, 0, NULL},
618};
619#endif
620
621int radeon_gem_debugfs_init(struct radeon_device *rdev)
622{
623#if defined(CONFIG_DEBUG_FS)
624 return radeon_debugfs_add_files(rdev, radeon_debugfs_gem_list, 1);
625#endif
626 return 0;
627}
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index c75cb2c6ba71..4f2d4f4c1dab 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -50,9 +50,13 @@ int radeon_driver_unload_kms(struct drm_device *dev)
50 50
51 if (rdev == NULL) 51 if (rdev == NULL)
52 return 0; 52 return 0;
53 if (rdev->rmmio == NULL)
54 goto done_free;
53 radeon_acpi_fini(rdev); 55 radeon_acpi_fini(rdev);
54 radeon_modeset_fini(rdev); 56 radeon_modeset_fini(rdev);
55 radeon_device_fini(rdev); 57 radeon_device_fini(rdev);
58
59done_free:
56 kfree(rdev); 60 kfree(rdev);
57 dev->dev_private = NULL; 61 dev->dev_private = NULL;
58 return 0; 62 return 0;
@@ -176,80 +180,65 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
176 struct radeon_device *rdev = dev->dev_private; 180 struct radeon_device *rdev = dev->dev_private;
177 struct drm_radeon_info *info = data; 181 struct drm_radeon_info *info = data;
178 struct radeon_mode_info *minfo = &rdev->mode_info; 182 struct radeon_mode_info *minfo = &rdev->mode_info;
179 uint32_t value, *value_ptr; 183 uint32_t *value, value_tmp, *value_ptr, value_size;
180 uint64_t value64, *value_ptr64; 184 uint64_t value64;
181 struct drm_crtc *crtc; 185 struct drm_crtc *crtc;
182 int i, found; 186 int i, found;
183 187
184 /* TIMESTAMP is a 64-bit value, needs special handling. */
185 if (info->request == RADEON_INFO_TIMESTAMP) {
186 if (rdev->family >= CHIP_R600) {
187 value_ptr64 = (uint64_t*)((unsigned long)info->value);
188 value64 = radeon_get_gpu_clock_counter(rdev);
189
190 if (DRM_COPY_TO_USER(value_ptr64, &value64, sizeof(value64))) {
191 DRM_ERROR("copy_to_user %s:%u\n", __func__, __LINE__);
192 return -EFAULT;
193 }
194 return 0;
195 } else {
196 DRM_DEBUG_KMS("timestamp is r6xx+ only!\n");
197 return -EINVAL;
198 }
199 }
200
201 value_ptr = (uint32_t *)((unsigned long)info->value); 188 value_ptr = (uint32_t *)((unsigned long)info->value);
202 if (DRM_COPY_FROM_USER(&value, value_ptr, sizeof(value))) { 189 value = &value_tmp;
203 DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__); 190 value_size = sizeof(uint32_t);
204 return -EFAULT;
205 }
206 191
207 switch (info->request) { 192 switch (info->request) {
208 case RADEON_INFO_DEVICE_ID: 193 case RADEON_INFO_DEVICE_ID:
209 value = dev->pci_device; 194 *value = dev->pci_device;
210 break; 195 break;
211 case RADEON_INFO_NUM_GB_PIPES: 196 case RADEON_INFO_NUM_GB_PIPES:
212 value = rdev->num_gb_pipes; 197 *value = rdev->num_gb_pipes;
213 break; 198 break;
214 case RADEON_INFO_NUM_Z_PIPES: 199 case RADEON_INFO_NUM_Z_PIPES:
215 value = rdev->num_z_pipes; 200 *value = rdev->num_z_pipes;
216 break; 201 break;
217 case RADEON_INFO_ACCEL_WORKING: 202 case RADEON_INFO_ACCEL_WORKING:
218 /* xf86-video-ati 6.13.0 relies on this being false for evergreen */ 203 /* xf86-video-ati 6.13.0 relies on this being false for evergreen */
219 if ((rdev->family >= CHIP_CEDAR) && (rdev->family <= CHIP_HEMLOCK)) 204 if ((rdev->family >= CHIP_CEDAR) && (rdev->family <= CHIP_HEMLOCK))
220 value = false; 205 *value = false;
221 else 206 else
222 value = rdev->accel_working; 207 *value = rdev->accel_working;
223 break; 208 break;
224 case RADEON_INFO_CRTC_FROM_ID: 209 case RADEON_INFO_CRTC_FROM_ID:
210 if (DRM_COPY_FROM_USER(value, value_ptr, sizeof(uint32_t))) {
211 DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
212 return -EFAULT;
213 }
225 for (i = 0, found = 0; i < rdev->num_crtc; i++) { 214 for (i = 0, found = 0; i < rdev->num_crtc; i++) {
226 crtc = (struct drm_crtc *)minfo->crtcs[i]; 215 crtc = (struct drm_crtc *)minfo->crtcs[i];
227 if (crtc && crtc->base.id == value) { 216 if (crtc && crtc->base.id == *value) {
228 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 217 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
229 value = radeon_crtc->crtc_id; 218 *value = radeon_crtc->crtc_id;
230 found = 1; 219 found = 1;
231 break; 220 break;
232 } 221 }
233 } 222 }
234 if (!found) { 223 if (!found) {
235 DRM_DEBUG_KMS("unknown crtc id %d\n", value); 224 DRM_DEBUG_KMS("unknown crtc id %d\n", *value);
236 return -EINVAL; 225 return -EINVAL;
237 } 226 }
238 break; 227 break;
239 case RADEON_INFO_ACCEL_WORKING2: 228 case RADEON_INFO_ACCEL_WORKING2:
240 value = rdev->accel_working; 229 *value = rdev->accel_working;
241 break; 230 break;
242 case RADEON_INFO_TILING_CONFIG: 231 case RADEON_INFO_TILING_CONFIG:
243 if (rdev->family >= CHIP_TAHITI) 232 if (rdev->family >= CHIP_TAHITI)
244 value = rdev->config.si.tile_config; 233 *value = rdev->config.si.tile_config;
245 else if (rdev->family >= CHIP_CAYMAN) 234 else if (rdev->family >= CHIP_CAYMAN)
246 value = rdev->config.cayman.tile_config; 235 *value = rdev->config.cayman.tile_config;
247 else if (rdev->family >= CHIP_CEDAR) 236 else if (rdev->family >= CHIP_CEDAR)
248 value = rdev->config.evergreen.tile_config; 237 *value = rdev->config.evergreen.tile_config;
249 else if (rdev->family >= CHIP_RV770) 238 else if (rdev->family >= CHIP_RV770)
250 value = rdev->config.rv770.tile_config; 239 *value = rdev->config.rv770.tile_config;
251 else if (rdev->family >= CHIP_R600) 240 else if (rdev->family >= CHIP_R600)
252 value = rdev->config.r600.tile_config; 241 *value = rdev->config.r600.tile_config;
253 else { 242 else {
254 DRM_DEBUG_KMS("tiling config is r6xx+ only!\n"); 243 DRM_DEBUG_KMS("tiling config is r6xx+ only!\n");
255 return -EINVAL; 244 return -EINVAL;
@@ -262,73 +251,81 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
262 * 251 *
263 * When returning, the value is 1 if filp owns hyper-z access, 252 * When returning, the value is 1 if filp owns hyper-z access,
264 * 0 otherwise. */ 253 * 0 otherwise. */
265 if (value >= 2) { 254 if (DRM_COPY_FROM_USER(value, value_ptr, sizeof(uint32_t))) {
266 DRM_DEBUG_KMS("WANT_HYPERZ: invalid value %d\n", value); 255 DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
256 return -EFAULT;
257 }
258 if (*value >= 2) {
259 DRM_DEBUG_KMS("WANT_HYPERZ: invalid value %d\n", *value);
267 return -EINVAL; 260 return -EINVAL;
268 } 261 }
269 radeon_set_filp_rights(dev, &rdev->hyperz_filp, filp, &value); 262 radeon_set_filp_rights(dev, &rdev->hyperz_filp, filp, value);
270 break; 263 break;
271 case RADEON_INFO_WANT_CMASK: 264 case RADEON_INFO_WANT_CMASK:
272 /* The same logic as Hyper-Z. */ 265 /* The same logic as Hyper-Z. */
273 if (value >= 2) { 266 if (DRM_COPY_FROM_USER(value, value_ptr, sizeof(uint32_t))) {
274 DRM_DEBUG_KMS("WANT_CMASK: invalid value %d\n", value); 267 DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
268 return -EFAULT;
269 }
270 if (*value >= 2) {
271 DRM_DEBUG_KMS("WANT_CMASK: invalid value %d\n", *value);
275 return -EINVAL; 272 return -EINVAL;
276 } 273 }
277 radeon_set_filp_rights(dev, &rdev->cmask_filp, filp, &value); 274 radeon_set_filp_rights(dev, &rdev->cmask_filp, filp, value);
278 break; 275 break;
279 case RADEON_INFO_CLOCK_CRYSTAL_FREQ: 276 case RADEON_INFO_CLOCK_CRYSTAL_FREQ:
280 /* return clock value in KHz */ 277 /* return clock value in KHz */
281 if (rdev->asic->get_xclk) 278 if (rdev->asic->get_xclk)
282 value = radeon_get_xclk(rdev) * 10; 279 *value = radeon_get_xclk(rdev) * 10;
283 else 280 else
284 value = rdev->clock.spll.reference_freq * 10; 281 *value = rdev->clock.spll.reference_freq * 10;
285 break; 282 break;
286 case RADEON_INFO_NUM_BACKENDS: 283 case RADEON_INFO_NUM_BACKENDS:
287 if (rdev->family >= CHIP_TAHITI) 284 if (rdev->family >= CHIP_TAHITI)
288 value = rdev->config.si.max_backends_per_se * 285 *value = rdev->config.si.max_backends_per_se *
289 rdev->config.si.max_shader_engines; 286 rdev->config.si.max_shader_engines;
290 else if (rdev->family >= CHIP_CAYMAN) 287 else if (rdev->family >= CHIP_CAYMAN)
291 value = rdev->config.cayman.max_backends_per_se * 288 *value = rdev->config.cayman.max_backends_per_se *
292 rdev->config.cayman.max_shader_engines; 289 rdev->config.cayman.max_shader_engines;
293 else if (rdev->family >= CHIP_CEDAR) 290 else if (rdev->family >= CHIP_CEDAR)
294 value = rdev->config.evergreen.max_backends; 291 *value = rdev->config.evergreen.max_backends;
295 else if (rdev->family >= CHIP_RV770) 292 else if (rdev->family >= CHIP_RV770)
296 value = rdev->config.rv770.max_backends; 293 *value = rdev->config.rv770.max_backends;
297 else if (rdev->family >= CHIP_R600) 294 else if (rdev->family >= CHIP_R600)
298 value = rdev->config.r600.max_backends; 295 *value = rdev->config.r600.max_backends;
299 else { 296 else {
300 return -EINVAL; 297 return -EINVAL;
301 } 298 }
302 break; 299 break;
303 case RADEON_INFO_NUM_TILE_PIPES: 300 case RADEON_INFO_NUM_TILE_PIPES:
304 if (rdev->family >= CHIP_TAHITI) 301 if (rdev->family >= CHIP_TAHITI)
305 value = rdev->config.si.max_tile_pipes; 302 *value = rdev->config.si.max_tile_pipes;
306 else if (rdev->family >= CHIP_CAYMAN) 303 else if (rdev->family >= CHIP_CAYMAN)
307 value = rdev->config.cayman.max_tile_pipes; 304 *value = rdev->config.cayman.max_tile_pipes;
308 else if (rdev->family >= CHIP_CEDAR) 305 else if (rdev->family >= CHIP_CEDAR)
309 value = rdev->config.evergreen.max_tile_pipes; 306 *value = rdev->config.evergreen.max_tile_pipes;
310 else if (rdev->family >= CHIP_RV770) 307 else if (rdev->family >= CHIP_RV770)
311 value = rdev->config.rv770.max_tile_pipes; 308 *value = rdev->config.rv770.max_tile_pipes;
312 else if (rdev->family >= CHIP_R600) 309 else if (rdev->family >= CHIP_R600)
313 value = rdev->config.r600.max_tile_pipes; 310 *value = rdev->config.r600.max_tile_pipes;
314 else { 311 else {
315 return -EINVAL; 312 return -EINVAL;
316 } 313 }
317 break; 314 break;
318 case RADEON_INFO_FUSION_GART_WORKING: 315 case RADEON_INFO_FUSION_GART_WORKING:
319 value = 1; 316 *value = 1;
320 break; 317 break;
321 case RADEON_INFO_BACKEND_MAP: 318 case RADEON_INFO_BACKEND_MAP:
322 if (rdev->family >= CHIP_TAHITI) 319 if (rdev->family >= CHIP_TAHITI)
323 value = rdev->config.si.backend_map; 320 *value = rdev->config.si.backend_map;
324 else if (rdev->family >= CHIP_CAYMAN) 321 else if (rdev->family >= CHIP_CAYMAN)
325 value = rdev->config.cayman.backend_map; 322 *value = rdev->config.cayman.backend_map;
326 else if (rdev->family >= CHIP_CEDAR) 323 else if (rdev->family >= CHIP_CEDAR)
327 value = rdev->config.evergreen.backend_map; 324 *value = rdev->config.evergreen.backend_map;
328 else if (rdev->family >= CHIP_RV770) 325 else if (rdev->family >= CHIP_RV770)
329 value = rdev->config.rv770.backend_map; 326 *value = rdev->config.rv770.backend_map;
330 else if (rdev->family >= CHIP_R600) 327 else if (rdev->family >= CHIP_R600)
331 value = rdev->config.r600.backend_map; 328 *value = rdev->config.r600.backend_map;
332 else { 329 else {
333 return -EINVAL; 330 return -EINVAL;
334 } 331 }
@@ -337,50 +334,91 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
337 /* this is where we report if vm is supported or not */ 334 /* this is where we report if vm is supported or not */
338 if (rdev->family < CHIP_CAYMAN) 335 if (rdev->family < CHIP_CAYMAN)
339 return -EINVAL; 336 return -EINVAL;
340 value = RADEON_VA_RESERVED_SIZE; 337 *value = RADEON_VA_RESERVED_SIZE;
341 break; 338 break;
342 case RADEON_INFO_IB_VM_MAX_SIZE: 339 case RADEON_INFO_IB_VM_MAX_SIZE:
343 /* this is where we report if vm is supported or not */ 340 /* this is where we report if vm is supported or not */
344 if (rdev->family < CHIP_CAYMAN) 341 if (rdev->family < CHIP_CAYMAN)
345 return -EINVAL; 342 return -EINVAL;
346 value = RADEON_IB_VM_MAX_SIZE; 343 *value = RADEON_IB_VM_MAX_SIZE;
347 break; 344 break;
348 case RADEON_INFO_MAX_PIPES: 345 case RADEON_INFO_MAX_PIPES:
349 if (rdev->family >= CHIP_TAHITI) 346 if (rdev->family >= CHIP_TAHITI)
350 value = rdev->config.si.max_cu_per_sh; 347 *value = rdev->config.si.max_cu_per_sh;
351 else if (rdev->family >= CHIP_CAYMAN) 348 else if (rdev->family >= CHIP_CAYMAN)
352 value = rdev->config.cayman.max_pipes_per_simd; 349 *value = rdev->config.cayman.max_pipes_per_simd;
353 else if (rdev->family >= CHIP_CEDAR) 350 else if (rdev->family >= CHIP_CEDAR)
354 value = rdev->config.evergreen.max_pipes; 351 *value = rdev->config.evergreen.max_pipes;
355 else if (rdev->family >= CHIP_RV770) 352 else if (rdev->family >= CHIP_RV770)
356 value = rdev->config.rv770.max_pipes; 353 *value = rdev->config.rv770.max_pipes;
357 else if (rdev->family >= CHIP_R600) 354 else if (rdev->family >= CHIP_R600)
358 value = rdev->config.r600.max_pipes; 355 *value = rdev->config.r600.max_pipes;
359 else { 356 else {
360 return -EINVAL; 357 return -EINVAL;
361 } 358 }
362 break; 359 break;
360 case RADEON_INFO_TIMESTAMP:
361 if (rdev->family < CHIP_R600) {
362 DRM_DEBUG_KMS("timestamp is r6xx+ only!\n");
363 return -EINVAL;
364 }
365 value = (uint32_t*)&value64;
366 value_size = sizeof(uint64_t);
367 value64 = radeon_get_gpu_clock_counter(rdev);
368 break;
363 case RADEON_INFO_MAX_SE: 369 case RADEON_INFO_MAX_SE:
364 if (rdev->family >= CHIP_TAHITI) 370 if (rdev->family >= CHIP_TAHITI)
365 value = rdev->config.si.max_shader_engines; 371 *value = rdev->config.si.max_shader_engines;
366 else if (rdev->family >= CHIP_CAYMAN) 372 else if (rdev->family >= CHIP_CAYMAN)
367 value = rdev->config.cayman.max_shader_engines; 373 *value = rdev->config.cayman.max_shader_engines;
368 else if (rdev->family >= CHIP_CEDAR) 374 else if (rdev->family >= CHIP_CEDAR)
369 value = rdev->config.evergreen.num_ses; 375 *value = rdev->config.evergreen.num_ses;
370 else 376 else
371 value = 1; 377 *value = 1;
372 break; 378 break;
373 case RADEON_INFO_MAX_SH_PER_SE: 379 case RADEON_INFO_MAX_SH_PER_SE:
374 if (rdev->family >= CHIP_TAHITI) 380 if (rdev->family >= CHIP_TAHITI)
375 value = rdev->config.si.max_sh_per_se; 381 *value = rdev->config.si.max_sh_per_se;
376 else 382 else
377 return -EINVAL; 383 return -EINVAL;
378 break; 384 break;
385 case RADEON_INFO_FASTFB_WORKING:
386 *value = rdev->fastfb_working;
387 break;
388 case RADEON_INFO_RING_WORKING:
389 if (DRM_COPY_FROM_USER(value, value_ptr, sizeof(uint32_t))) {
390 DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
391 return -EFAULT;
392 }
393 switch (*value) {
394 case RADEON_CS_RING_GFX:
395 case RADEON_CS_RING_COMPUTE:
396 *value = rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready;
397 break;
398 case RADEON_CS_RING_DMA:
399 *value = rdev->ring[R600_RING_TYPE_DMA_INDEX].ready;
400 *value |= rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready;
401 break;
402 case RADEON_CS_RING_UVD:
403 *value = rdev->ring[R600_RING_TYPE_UVD_INDEX].ready;
404 break;
405 default:
406 return -EINVAL;
407 }
408 break;
409 case RADEON_INFO_SI_TILE_MODE_ARRAY:
410 if (rdev->family < CHIP_TAHITI) {
411 DRM_DEBUG_KMS("tile mode array is si only!\n");
412 return -EINVAL;
413 }
414 value = rdev->config.si.tile_mode_array;
415 value_size = sizeof(uint32_t)*32;
416 break;
379 default: 417 default:
380 DRM_DEBUG_KMS("Invalid request %d\n", info->request); 418 DRM_DEBUG_KMS("Invalid request %d\n", info->request);
381 return -EINVAL; 419 return -EINVAL;
382 } 420 }
383 if (DRM_COPY_TO_USER(value_ptr, &value, sizeof(uint32_t))) { 421 if (DRM_COPY_TO_USER(value_ptr, (char*)value, value_size)) {
384 DRM_ERROR("copy_to_user %s:%u\n", __func__, __LINE__); 422 DRM_ERROR("copy_to_user %s:%u\n", __func__, __LINE__);
385 return -EFAULT; 423 return -EFAULT;
386 } 424 }
@@ -513,6 +551,7 @@ void radeon_driver_preclose_kms(struct drm_device *dev,
513 rdev->hyperz_filp = NULL; 551 rdev->hyperz_filp = NULL;
514 if (rdev->cmask_filp == file_priv) 552 if (rdev->cmask_filp == file_priv)
515 rdev->cmask_filp = NULL; 553 rdev->cmask_filp = NULL;
554 radeon_uvd_free_handles(rdev, file_priv);
516} 555}
517 556
518/* 557/*
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 4003f5a68c09..44e579e75fd0 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -492,6 +492,29 @@ struct radeon_framebuffer {
492#define ENCODER_MODE_IS_DP(em) (((em) == ATOM_ENCODER_MODE_DP) || \ 492#define ENCODER_MODE_IS_DP(em) (((em) == ATOM_ENCODER_MODE_DP) || \
493 ((em) == ATOM_ENCODER_MODE_DP_MST)) 493 ((em) == ATOM_ENCODER_MODE_DP_MST))
494 494
495struct atom_clock_dividers {
496 u32 post_div;
497 union {
498 struct {
499#ifdef __BIG_ENDIAN
500 u32 reserved : 6;
501 u32 whole_fb_div : 12;
502 u32 frac_fb_div : 14;
503#else
504 u32 frac_fb_div : 14;
505 u32 whole_fb_div : 12;
506 u32 reserved : 6;
507#endif
508 };
509 u32 fb_div;
510 };
511 u32 ref_div;
512 bool enable_post_div;
513 bool enable_dithen;
514 u32 vco_mode;
515 u32 real_clock;
516};
517
495extern enum radeon_tv_std 518extern enum radeon_tv_std
496radeon_combios_get_tv_info(struct radeon_device *rdev); 519radeon_combios_get_tv_info(struct radeon_device *rdev);
497extern enum radeon_tv_std 520extern enum radeon_tv_std
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index d3aface2d12d..1424ccde2377 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -321,8 +321,10 @@ void radeon_bo_force_delete(struct radeon_device *rdev)
321int radeon_bo_init(struct radeon_device *rdev) 321int radeon_bo_init(struct radeon_device *rdev)
322{ 322{
323 /* Add an MTRR for the VRAM */ 323 /* Add an MTRR for the VRAM */
324 rdev->mc.vram_mtrr = mtrr_add(rdev->mc.aper_base, rdev->mc.aper_size, 324 if (!rdev->fastfb_working) {
325 rdev->mc.vram_mtrr = mtrr_add(rdev->mc.aper_base, rdev->mc.aper_size,
325 MTRR_TYPE_WRCOMB, 1); 326 MTRR_TYPE_WRCOMB, 1);
327 }
326 DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n", 328 DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
327 rdev->mc.mc_vram_size >> 20, 329 rdev->mc.mc_vram_size >> 20,
328 (unsigned long long)rdev->mc.aper_size >> 20); 330 (unsigned long long)rdev->mc.aper_size >> 20);
@@ -339,14 +341,14 @@ void radeon_bo_fini(struct radeon_device *rdev)
339void radeon_bo_list_add_object(struct radeon_bo_list *lobj, 341void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
340 struct list_head *head) 342 struct list_head *head)
341{ 343{
342 if (lobj->wdomain) { 344 if (lobj->written) {
343 list_add(&lobj->tv.head, head); 345 list_add(&lobj->tv.head, head);
344 } else { 346 } else {
345 list_add_tail(&lobj->tv.head, head); 347 list_add_tail(&lobj->tv.head, head);
346 } 348 }
347} 349}
348 350
349int radeon_bo_list_validate(struct list_head *head) 351int radeon_bo_list_validate(struct list_head *head, int ring)
350{ 352{
351 struct radeon_bo_list *lobj; 353 struct radeon_bo_list *lobj;
352 struct radeon_bo *bo; 354 struct radeon_bo *bo;
@@ -360,15 +362,17 @@ int radeon_bo_list_validate(struct list_head *head)
360 list_for_each_entry(lobj, head, tv.head) { 362 list_for_each_entry(lobj, head, tv.head) {
361 bo = lobj->bo; 363 bo = lobj->bo;
362 if (!bo->pin_count) { 364 if (!bo->pin_count) {
363 domain = lobj->wdomain ? lobj->wdomain : lobj->rdomain; 365 domain = lobj->domain;
364 366
365 retry: 367 retry:
366 radeon_ttm_placement_from_domain(bo, domain); 368 radeon_ttm_placement_from_domain(bo, domain);
369 if (ring == R600_RING_TYPE_UVD_INDEX)
370 radeon_uvd_force_into_uvd_segment(bo);
367 r = ttm_bo_validate(&bo->tbo, &bo->placement, 371 r = ttm_bo_validate(&bo->tbo, &bo->placement,
368 true, false); 372 true, false);
369 if (unlikely(r)) { 373 if (unlikely(r)) {
370 if (r != -ERESTARTSYS && domain == RADEON_GEM_DOMAIN_VRAM) { 374 if (r != -ERESTARTSYS && domain != lobj->alt_domain) {
371 domain |= RADEON_GEM_DOMAIN_GTT; 375 domain = lobj->alt_domain;
372 goto retry; 376 goto retry;
373 } 377 }
374 return r; 378 return r;
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index 5fc86b03043b..e2cb80a96b51 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -128,7 +128,7 @@ extern int radeon_bo_init(struct radeon_device *rdev);
128extern void radeon_bo_fini(struct radeon_device *rdev); 128extern void radeon_bo_fini(struct radeon_device *rdev);
129extern void radeon_bo_list_add_object(struct radeon_bo_list *lobj, 129extern void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
130 struct list_head *head); 130 struct list_head *head);
131extern int radeon_bo_list_validate(struct list_head *head); 131extern int radeon_bo_list_validate(struct list_head *head, int ring);
132extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo, 132extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
133 struct vm_area_struct *vma); 133 struct vm_area_struct *vma);
134extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo, 134extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 338fd6a74e87..788c64cb4b47 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -843,7 +843,11 @@ static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
843 struct radeon_device *rdev = dev->dev_private; 843 struct radeon_device *rdev = dev->dev_private;
844 844
845 seq_printf(m, "default engine clock: %u0 kHz\n", rdev->pm.default_sclk); 845 seq_printf(m, "default engine clock: %u0 kHz\n", rdev->pm.default_sclk);
846 seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev)); 846 /* radeon_get_engine_clock is not reliable on APUs so just print the current clock */
847 if ((rdev->family >= CHIP_PALM) && (rdev->flags & RADEON_IS_IGP))
848 seq_printf(m, "current engine clock: %u0 kHz\n", rdev->pm.current_sclk);
849 else
850 seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
847 seq_printf(m, "default memory clock: %u0 kHz\n", rdev->pm.default_mclk); 851 seq_printf(m, "default memory clock: %u0 kHz\n", rdev->pm.default_mclk);
848 if (rdev->asic->pm.get_memory_clock) 852 if (rdev->asic->pm.get_memory_clock)
849 seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev)); 853 seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 8d58e268ff6d..e17faa7cf732 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -180,7 +180,8 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
180 radeon_semaphore_free(rdev, &ib->semaphore, NULL); 180 radeon_semaphore_free(rdev, &ib->semaphore, NULL);
181 } 181 }
182 /* if we can't remember our last VM flush then flush now! */ 182 /* if we can't remember our last VM flush then flush now! */
183 if (ib->vm && !ib->vm->last_flush) { 183 /* XXX figure out why we have to flush for every IB */
184 if (ib->vm /*&& !ib->vm->last_flush*/) {
184 radeon_ring_vm_flush(rdev, ib->ring, ib->vm); 185 radeon_ring_vm_flush(rdev, ib->ring, ib->vm);
185 } 186 }
186 if (const_ib) { 187 if (const_ib) {
@@ -368,7 +369,7 @@ void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *ring)
368{ 369{
369 u32 rptr; 370 u32 rptr;
370 371
371 if (rdev->wb.enabled) 372 if (rdev->wb.enabled && ring != &rdev->ring[R600_RING_TYPE_UVD_INDEX])
372 rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]); 373 rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]);
373 else 374 else
374 rptr = RREG32(ring->rptr_reg); 375 rptr = RREG32(ring->rptr_reg);
@@ -821,18 +822,20 @@ static int radeon_debugfs_ring_info(struct seq_file *m, void *data)
821 return 0; 822 return 0;
822} 823}
823 824
824static int radeon_ring_type_gfx_index = RADEON_RING_TYPE_GFX_INDEX; 825static int radeon_gfx_index = RADEON_RING_TYPE_GFX_INDEX;
825static int cayman_ring_type_cp1_index = CAYMAN_RING_TYPE_CP1_INDEX; 826static int cayman_cp1_index = CAYMAN_RING_TYPE_CP1_INDEX;
826static int cayman_ring_type_cp2_index = CAYMAN_RING_TYPE_CP2_INDEX; 827static int cayman_cp2_index = CAYMAN_RING_TYPE_CP2_INDEX;
827static int radeon_ring_type_dma1_index = R600_RING_TYPE_DMA_INDEX; 828static int radeon_dma1_index = R600_RING_TYPE_DMA_INDEX;
828static int radeon_ring_type_dma2_index = CAYMAN_RING_TYPE_DMA1_INDEX; 829static int radeon_dma2_index = CAYMAN_RING_TYPE_DMA1_INDEX;
830static int r600_uvd_index = R600_RING_TYPE_UVD_INDEX;
829 831
830static struct drm_info_list radeon_debugfs_ring_info_list[] = { 832static struct drm_info_list radeon_debugfs_ring_info_list[] = {
831 {"radeon_ring_gfx", radeon_debugfs_ring_info, 0, &radeon_ring_type_gfx_index}, 833 {"radeon_ring_gfx", radeon_debugfs_ring_info, 0, &radeon_gfx_index},
832 {"radeon_ring_cp1", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp1_index}, 834 {"radeon_ring_cp1", radeon_debugfs_ring_info, 0, &cayman_cp1_index},
833 {"radeon_ring_cp2", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp2_index}, 835 {"radeon_ring_cp2", radeon_debugfs_ring_info, 0, &cayman_cp2_index},
834 {"radeon_ring_dma1", radeon_debugfs_ring_info, 0, &radeon_ring_type_dma1_index}, 836 {"radeon_ring_dma1", radeon_debugfs_ring_info, 0, &radeon_dma1_index},
835 {"radeon_ring_dma2", radeon_debugfs_ring_info, 0, &radeon_ring_type_dma2_index}, 837 {"radeon_ring_dma2", radeon_debugfs_ring_info, 0, &radeon_dma2_index},
838 {"radeon_ring_uvd", radeon_debugfs_ring_info, 0, &r600_uvd_index},
836}; 839};
837 840
838static int radeon_debugfs_sa_info(struct seq_file *m, void *data) 841static int radeon_debugfs_sa_info(struct seq_file *m, void *data)
diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c
index cb800995d4f9..0abe5a9431bb 100644
--- a/drivers/gpu/drm/radeon/radeon_sa.c
+++ b/drivers/gpu/drm/radeon/radeon_sa.c
@@ -64,7 +64,7 @@ int radeon_sa_bo_manager_init(struct radeon_device *rdev,
64 } 64 }
65 65
66 r = radeon_bo_create(rdev, size, RADEON_GPU_PAGE_SIZE, true, 66 r = radeon_bo_create(rdev, size, RADEON_GPU_PAGE_SIZE, true,
67 RADEON_GEM_DOMAIN_CPU, NULL, &sa_manager->bo); 67 domain, NULL, &sa_manager->bo);
68 if (r) { 68 if (r) {
69 dev_err(rdev->dev, "(%d) failed to allocate bo for manager\n", r); 69 dev_err(rdev->dev, "(%d) failed to allocate bo for manager\n", r);
70 return r; 70 return r;
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index fda09c9ea689..bbed4af8d0bc 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -252,6 +252,36 @@ void radeon_test_moves(struct radeon_device *rdev)
252 radeon_do_test_moves(rdev, RADEON_TEST_COPY_BLIT); 252 radeon_do_test_moves(rdev, RADEON_TEST_COPY_BLIT);
253} 253}
254 254
255static int radeon_test_create_and_emit_fence(struct radeon_device *rdev,
256 struct radeon_ring *ring,
257 struct radeon_fence **fence)
258{
259 int r;
260
261 if (ring->idx == R600_RING_TYPE_UVD_INDEX) {
262 r = radeon_uvd_get_create_msg(rdev, ring->idx, 1, NULL);
263 if (r) {
264 DRM_ERROR("Failed to get dummy create msg\n");
265 return r;
266 }
267
268 r = radeon_uvd_get_destroy_msg(rdev, ring->idx, 1, fence);
269 if (r) {
270 DRM_ERROR("Failed to get dummy destroy msg\n");
271 return r;
272 }
273 } else {
274 r = radeon_ring_lock(rdev, ring, 64);
275 if (r) {
276 DRM_ERROR("Failed to lock ring A %d\n", ring->idx);
277 return r;
278 }
279 radeon_fence_emit(rdev, fence, ring->idx);
280 radeon_ring_unlock_commit(rdev, ring);
281 }
282 return 0;
283}
284
255void radeon_test_ring_sync(struct radeon_device *rdev, 285void radeon_test_ring_sync(struct radeon_device *rdev,
256 struct radeon_ring *ringA, 286 struct radeon_ring *ringA,
257 struct radeon_ring *ringB) 287 struct radeon_ring *ringB)
@@ -272,21 +302,24 @@ void radeon_test_ring_sync(struct radeon_device *rdev,
272 goto out_cleanup; 302 goto out_cleanup;
273 } 303 }
274 radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore); 304 radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
275 r = radeon_fence_emit(rdev, &fence1, ringA->idx); 305 radeon_ring_unlock_commit(rdev, ringA);
276 if (r) { 306
277 DRM_ERROR("Failed to emit fence 1\n"); 307 r = radeon_test_create_and_emit_fence(rdev, ringA, &fence1);
278 radeon_ring_unlock_undo(rdev, ringA); 308 if (r)
279 goto out_cleanup; 309 goto out_cleanup;
280 } 310
281 radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore); 311 r = radeon_ring_lock(rdev, ringA, 64);
282 r = radeon_fence_emit(rdev, &fence2, ringA->idx);
283 if (r) { 312 if (r) {
284 DRM_ERROR("Failed to emit fence 2\n"); 313 DRM_ERROR("Failed to lock ring A %d\n", ringA->idx);
285 radeon_ring_unlock_undo(rdev, ringA);
286 goto out_cleanup; 314 goto out_cleanup;
287 } 315 }
316 radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
288 radeon_ring_unlock_commit(rdev, ringA); 317 radeon_ring_unlock_commit(rdev, ringA);
289 318
319 r = radeon_test_create_and_emit_fence(rdev, ringA, &fence2);
320 if (r)
321 goto out_cleanup;
322
290 mdelay(1000); 323 mdelay(1000);
291 324
292 if (radeon_fence_signaled(fence1)) { 325 if (radeon_fence_signaled(fence1)) {
@@ -364,27 +397,22 @@ static void radeon_test_ring_sync2(struct radeon_device *rdev,
364 goto out_cleanup; 397 goto out_cleanup;
365 } 398 }
366 radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore); 399 radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
367 r = radeon_fence_emit(rdev, &fenceA, ringA->idx);
368 if (r) {
369 DRM_ERROR("Failed to emit sync fence 1\n");
370 radeon_ring_unlock_undo(rdev, ringA);
371 goto out_cleanup;
372 }
373 radeon_ring_unlock_commit(rdev, ringA); 400 radeon_ring_unlock_commit(rdev, ringA);
374 401
402 r = radeon_test_create_and_emit_fence(rdev, ringA, &fenceA);
403 if (r)
404 goto out_cleanup;
405
375 r = radeon_ring_lock(rdev, ringB, 64); 406 r = radeon_ring_lock(rdev, ringB, 64);
376 if (r) { 407 if (r) {
377 DRM_ERROR("Failed to lock ring B %d\n", ringB->idx); 408 DRM_ERROR("Failed to lock ring B %d\n", ringB->idx);
378 goto out_cleanup; 409 goto out_cleanup;
379 } 410 }
380 radeon_semaphore_emit_wait(rdev, ringB->idx, semaphore); 411 radeon_semaphore_emit_wait(rdev, ringB->idx, semaphore);
381 r = radeon_fence_emit(rdev, &fenceB, ringB->idx);
382 if (r) {
383 DRM_ERROR("Failed to create sync fence 2\n");
384 radeon_ring_unlock_undo(rdev, ringB);
385 goto out_cleanup;
386 }
387 radeon_ring_unlock_commit(rdev, ringB); 412 radeon_ring_unlock_commit(rdev, ringB);
413 r = radeon_test_create_and_emit_fence(rdev, ringB, &fenceB);
414 if (r)
415 goto out_cleanup;
388 416
389 mdelay(1000); 417 mdelay(1000);
390 418
@@ -393,7 +421,7 @@ static void radeon_test_ring_sync2(struct radeon_device *rdev,
393 goto out_cleanup; 421 goto out_cleanup;
394 } 422 }
395 if (radeon_fence_signaled(fenceB)) { 423 if (radeon_fence_signaled(fenceB)) {
396 DRM_ERROR("Fence A signaled without waiting for semaphore.\n"); 424 DRM_ERROR("Fence B signaled without waiting for semaphore.\n");
397 goto out_cleanup; 425 goto out_cleanup;
398 } 426 }
399 427
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
new file mode 100644
index 000000000000..906e5c0ca3b9
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -0,0 +1,831 @@
1/*
2 * Copyright 2011 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26/*
27 * Authors:
28 * Christian König <deathsimple@vodafone.de>
29 */
30
31#include <linux/firmware.h>
32#include <linux/module.h>
33#include <drm/drmP.h>
34#include <drm/drm.h>
35
36#include "radeon.h"
37#include "r600d.h"
38
39/* 1 second timeout */
40#define UVD_IDLE_TIMEOUT_MS 1000
41
42/* Firmware Names */
43#define FIRMWARE_RV710 "radeon/RV710_uvd.bin"
44#define FIRMWARE_CYPRESS "radeon/CYPRESS_uvd.bin"
45#define FIRMWARE_SUMO "radeon/SUMO_uvd.bin"
46#define FIRMWARE_TAHITI "radeon/TAHITI_uvd.bin"
47
48MODULE_FIRMWARE(FIRMWARE_RV710);
49MODULE_FIRMWARE(FIRMWARE_CYPRESS);
50MODULE_FIRMWARE(FIRMWARE_SUMO);
51MODULE_FIRMWARE(FIRMWARE_TAHITI);
52
53static void radeon_uvd_idle_work_handler(struct work_struct *work);
54
55int radeon_uvd_init(struct radeon_device *rdev)
56{
57 struct platform_device *pdev;
58 unsigned long bo_size;
59 const char *fw_name;
60 int i, r;
61
62 INIT_DELAYED_WORK(&rdev->uvd.idle_work, radeon_uvd_idle_work_handler);
63
64 pdev = platform_device_register_simple("radeon_uvd", 0, NULL, 0);
65 r = IS_ERR(pdev);
66 if (r) {
67 dev_err(rdev->dev, "radeon_uvd: Failed to register firmware\n");
68 return -EINVAL;
69 }
70
71 switch (rdev->family) {
72 case CHIP_RV710:
73 case CHIP_RV730:
74 case CHIP_RV740:
75 fw_name = FIRMWARE_RV710;
76 break;
77
78 case CHIP_CYPRESS:
79 case CHIP_HEMLOCK:
80 case CHIP_JUNIPER:
81 case CHIP_REDWOOD:
82 case CHIP_CEDAR:
83 fw_name = FIRMWARE_CYPRESS;
84 break;
85
86 case CHIP_SUMO:
87 case CHIP_SUMO2:
88 case CHIP_PALM:
89 case CHIP_CAYMAN:
90 case CHIP_BARTS:
91 case CHIP_TURKS:
92 case CHIP_CAICOS:
93 fw_name = FIRMWARE_SUMO;
94 break;
95
96 case CHIP_TAHITI:
97 case CHIP_VERDE:
98 case CHIP_PITCAIRN:
99 case CHIP_ARUBA:
100 fw_name = FIRMWARE_TAHITI;
101 break;
102
103 default:
104 return -EINVAL;
105 }
106
107 r = request_firmware(&rdev->uvd_fw, fw_name, &pdev->dev);
108 if (r) {
109 dev_err(rdev->dev, "radeon_uvd: Can't load firmware \"%s\"\n",
110 fw_name);
111 platform_device_unregister(pdev);
112 return r;
113 }
114
115 platform_device_unregister(pdev);
116
117 bo_size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 8) +
118 RADEON_UVD_STACK_SIZE + RADEON_UVD_HEAP_SIZE;
119 r = radeon_bo_create(rdev, bo_size, PAGE_SIZE, true,
120 RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->uvd.vcpu_bo);
121 if (r) {
122 dev_err(rdev->dev, "(%d) failed to allocate UVD bo\n", r);
123 return r;
124 }
125
126 r = radeon_uvd_resume(rdev);
127 if (r)
128 return r;
129
130 memset(rdev->uvd.cpu_addr, 0, bo_size);
131 memcpy(rdev->uvd.cpu_addr, rdev->uvd_fw->data, rdev->uvd_fw->size);
132
133 r = radeon_uvd_suspend(rdev);
134 if (r)
135 return r;
136
137 for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
138 atomic_set(&rdev->uvd.handles[i], 0);
139 rdev->uvd.filp[i] = NULL;
140 }
141
142 return 0;
143}
144
145void radeon_uvd_fini(struct radeon_device *rdev)
146{
147 radeon_uvd_suspend(rdev);
148 radeon_bo_unref(&rdev->uvd.vcpu_bo);
149}
150
151int radeon_uvd_suspend(struct radeon_device *rdev)
152{
153 int r;
154
155 if (rdev->uvd.vcpu_bo == NULL)
156 return 0;
157
158 r = radeon_bo_reserve(rdev->uvd.vcpu_bo, false);
159 if (!r) {
160 radeon_bo_kunmap(rdev->uvd.vcpu_bo);
161 radeon_bo_unpin(rdev->uvd.vcpu_bo);
162 radeon_bo_unreserve(rdev->uvd.vcpu_bo);
163 }
164 return r;
165}
166
167int radeon_uvd_resume(struct radeon_device *rdev)
168{
169 int r;
170
171 if (rdev->uvd.vcpu_bo == NULL)
172 return -EINVAL;
173
174 r = radeon_bo_reserve(rdev->uvd.vcpu_bo, false);
175 if (r) {
176 radeon_bo_unref(&rdev->uvd.vcpu_bo);
177 dev_err(rdev->dev, "(%d) failed to reserve UVD bo\n", r);
178 return r;
179 }
180
181 r = radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_VRAM,
182 &rdev->uvd.gpu_addr);
183 if (r) {
184 radeon_bo_unreserve(rdev->uvd.vcpu_bo);
185 radeon_bo_unref(&rdev->uvd.vcpu_bo);
186 dev_err(rdev->dev, "(%d) UVD bo pin failed\n", r);
187 return r;
188 }
189
190 r = radeon_bo_kmap(rdev->uvd.vcpu_bo, &rdev->uvd.cpu_addr);
191 if (r) {
192 dev_err(rdev->dev, "(%d) UVD map failed\n", r);
193 return r;
194 }
195
196 radeon_bo_unreserve(rdev->uvd.vcpu_bo);
197
198 return 0;
199}
200
201void radeon_uvd_force_into_uvd_segment(struct radeon_bo *rbo)
202{
203 rbo->placement.fpfn = 0 >> PAGE_SHIFT;
204 rbo->placement.lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT;
205}
206
207void radeon_uvd_free_handles(struct radeon_device *rdev, struct drm_file *filp)
208{
209 int i, r;
210 for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
211 if (rdev->uvd.filp[i] == filp) {
212 uint32_t handle = atomic_read(&rdev->uvd.handles[i]);
213 struct radeon_fence *fence;
214
215 r = radeon_uvd_get_destroy_msg(rdev,
216 R600_RING_TYPE_UVD_INDEX, handle, &fence);
217 if (r) {
218 DRM_ERROR("Error destroying UVD (%d)!\n", r);
219 continue;
220 }
221
222 radeon_fence_wait(fence, false);
223 radeon_fence_unref(&fence);
224
225 rdev->uvd.filp[i] = NULL;
226 atomic_set(&rdev->uvd.handles[i], 0);
227 }
228 }
229}
230
231static int radeon_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[])
232{
233 unsigned stream_type = msg[4];
234 unsigned width = msg[6];
235 unsigned height = msg[7];
236 unsigned dpb_size = msg[9];
237 unsigned pitch = msg[28];
238
239 unsigned width_in_mb = width / 16;
240 unsigned height_in_mb = ALIGN(height / 16, 2);
241
242 unsigned image_size, tmp, min_dpb_size;
243
244 image_size = width * height;
245 image_size += image_size / 2;
246 image_size = ALIGN(image_size, 1024);
247
248 switch (stream_type) {
249 case 0: /* H264 */
250
251 /* reference picture buffer */
252 min_dpb_size = image_size * 17;
253
254 /* macroblock context buffer */
255 min_dpb_size += width_in_mb * height_in_mb * 17 * 192;
256
257 /* IT surface buffer */
258 min_dpb_size += width_in_mb * height_in_mb * 32;
259 break;
260
261 case 1: /* VC1 */
262
263 /* reference picture buffer */
264 min_dpb_size = image_size * 3;
265
266 /* CONTEXT_BUFFER */
267 min_dpb_size += width_in_mb * height_in_mb * 128;
268
269 /* IT surface buffer */
270 min_dpb_size += width_in_mb * 64;
271
272 /* DB surface buffer */
273 min_dpb_size += width_in_mb * 128;
274
275 /* BP */
276 tmp = max(width_in_mb, height_in_mb);
277 min_dpb_size += ALIGN(tmp * 7 * 16, 64);
278 break;
279
280 case 3: /* MPEG2 */
281
282 /* reference picture buffer */
283 min_dpb_size = image_size * 3;
284 break;
285
286 case 4: /* MPEG4 */
287
288 /* reference picture buffer */
289 min_dpb_size = image_size * 3;
290
291 /* CM */
292 min_dpb_size += width_in_mb * height_in_mb * 64;
293
294 /* IT surface buffer */
295 min_dpb_size += ALIGN(width_in_mb * height_in_mb * 32, 64);
296 break;
297
298 default:
299 DRM_ERROR("UVD codec not handled %d!\n", stream_type);
300 return -EINVAL;
301 }
302
303 if (width > pitch) {
304 DRM_ERROR("Invalid UVD decoding target pitch!\n");
305 return -EINVAL;
306 }
307
308 if (dpb_size < min_dpb_size) {
309 DRM_ERROR("Invalid dpb_size in UVD message (%d / %d)!\n",
310 dpb_size, min_dpb_size);
311 return -EINVAL;
312 }
313
314 buf_sizes[0x1] = dpb_size;
315 buf_sizes[0x2] = image_size;
316 return 0;
317}
318
319static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
320 unsigned offset, unsigned buf_sizes[])
321{
322 int32_t *msg, msg_type, handle;
323 void *ptr;
324
325 int i, r;
326
327 if (offset & 0x3F) {
328 DRM_ERROR("UVD messages must be 64 byte aligned!\n");
329 return -EINVAL;
330 }
331
332 r = radeon_bo_kmap(bo, &ptr);
333 if (r)
334 return r;
335
336 msg = ptr + offset;
337
338 msg_type = msg[1];
339 handle = msg[2];
340
341 if (handle == 0) {
342 DRM_ERROR("Invalid UVD handle!\n");
343 return -EINVAL;
344 }
345
346 if (msg_type == 1) {
347 /* it's a decode msg, calc buffer sizes */
348 r = radeon_uvd_cs_msg_decode(msg, buf_sizes);
349 radeon_bo_kunmap(bo);
350 if (r)
351 return r;
352
353 } else if (msg_type == 2) {
354 /* it's a destroy msg, free the handle */
355 for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i)
356 atomic_cmpxchg(&p->rdev->uvd.handles[i], handle, 0);
357 radeon_bo_kunmap(bo);
358 return 0;
359 } else {
360 /* it's a create msg, no special handling needed */
361 radeon_bo_kunmap(bo);
362 }
363
364 /* create or decode, validate the handle */
365 for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
366 if (atomic_read(&p->rdev->uvd.handles[i]) == handle)
367 return 0;
368 }
369
370 /* handle not found try to alloc a new one */
371 for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
372 if (!atomic_cmpxchg(&p->rdev->uvd.handles[i], 0, handle)) {
373 p->rdev->uvd.filp[i] = p->filp;
374 return 0;
375 }
376 }
377
378 DRM_ERROR("No more free UVD handles!\n");
379 return -EINVAL;
380}
381
382static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
383 int data0, int data1,
384 unsigned buf_sizes[])
385{
386 struct radeon_cs_chunk *relocs_chunk;
387 struct radeon_cs_reloc *reloc;
388 unsigned idx, cmd, offset;
389 uint64_t start, end;
390 int r;
391
392 relocs_chunk = &p->chunks[p->chunk_relocs_idx];
393 offset = radeon_get_ib_value(p, data0);
394 idx = radeon_get_ib_value(p, data1);
395 if (idx >= relocs_chunk->length_dw) {
396 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
397 idx, relocs_chunk->length_dw);
398 return -EINVAL;
399 }
400
401 reloc = p->relocs_ptr[(idx / 4)];
402 start = reloc->lobj.gpu_offset;
403 end = start + radeon_bo_size(reloc->robj);
404 start += offset;
405
406 p->ib.ptr[data0] = start & 0xFFFFFFFF;
407 p->ib.ptr[data1] = start >> 32;
408
409 cmd = radeon_get_ib_value(p, p->idx) >> 1;
410
411 if (cmd < 0x4) {
412 if ((end - start) < buf_sizes[cmd]) {
413 DRM_ERROR("buffer to small (%d / %d)!\n",
414 (unsigned)(end - start), buf_sizes[cmd]);
415 return -EINVAL;
416 }
417
418 } else if (cmd != 0x100) {
419 DRM_ERROR("invalid UVD command %X!\n", cmd);
420 return -EINVAL;
421 }
422
423 if ((start >> 28) != (end >> 28)) {
424 DRM_ERROR("reloc %LX-%LX crossing 256MB boundary!\n",
425 start, end);
426 return -EINVAL;
427 }
428
429 /* TODO: is this still necessary on NI+ ? */
430 if ((cmd == 0 || cmd == 0x3) &&
431 (start >> 28) != (p->rdev->uvd.gpu_addr >> 28)) {
432 DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n",
433 start, end);
434 return -EINVAL;
435 }
436
437 if (cmd == 0) {
438 r = radeon_uvd_cs_msg(p, reloc->robj, offset, buf_sizes);
439 if (r)
440 return r;
441 }
442
443 return 0;
444}
445
446static int radeon_uvd_cs_reg(struct radeon_cs_parser *p,
447 struct radeon_cs_packet *pkt,
448 int *data0, int *data1,
449 unsigned buf_sizes[])
450{
451 int i, r;
452
453 p->idx++;
454 for (i = 0; i <= pkt->count; ++i) {
455 switch (pkt->reg + i*4) {
456 case UVD_GPCOM_VCPU_DATA0:
457 *data0 = p->idx;
458 break;
459 case UVD_GPCOM_VCPU_DATA1:
460 *data1 = p->idx;
461 break;
462 case UVD_GPCOM_VCPU_CMD:
463 r = radeon_uvd_cs_reloc(p, *data0, *data1, buf_sizes);
464 if (r)
465 return r;
466 break;
467 case UVD_ENGINE_CNTL:
468 break;
469 default:
470 DRM_ERROR("Invalid reg 0x%X!\n",
471 pkt->reg + i*4);
472 return -EINVAL;
473 }
474 p->idx++;
475 }
476 return 0;
477}
478
479int radeon_uvd_cs_parse(struct radeon_cs_parser *p)
480{
481 struct radeon_cs_packet pkt;
482 int r, data0 = 0, data1 = 0;
483
484 /* minimum buffer sizes */
485 unsigned buf_sizes[] = {
486 [0x00000000] = 2048,
487 [0x00000001] = 32 * 1024 * 1024,
488 [0x00000002] = 2048 * 1152 * 3,
489 [0x00000003] = 2048,
490 };
491
492 if (p->chunks[p->chunk_ib_idx].length_dw % 16) {
493 DRM_ERROR("UVD IB length (%d) not 16 dwords aligned!\n",
494 p->chunks[p->chunk_ib_idx].length_dw);
495 return -EINVAL;
496 }
497
498 if (p->chunk_relocs_idx == -1) {
499 DRM_ERROR("No relocation chunk !\n");
500 return -EINVAL;
501 }
502
503
504 do {
505 r = radeon_cs_packet_parse(p, &pkt, p->idx);
506 if (r)
507 return r;
508 switch (pkt.type) {
509 case RADEON_PACKET_TYPE0:
510 r = radeon_uvd_cs_reg(p, &pkt, &data0,
511 &data1, buf_sizes);
512 if (r)
513 return r;
514 break;
515 case RADEON_PACKET_TYPE2:
516 p->idx += pkt.count + 2;
517 break;
518 default:
519 DRM_ERROR("Unknown packet type %d !\n", pkt.type);
520 return -EINVAL;
521 }
522 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
523 return 0;
524}
525
526static int radeon_uvd_send_msg(struct radeon_device *rdev,
527 int ring, struct radeon_bo *bo,
528 struct radeon_fence **fence)
529{
530 struct ttm_validate_buffer tv;
531 struct list_head head;
532 struct radeon_ib ib;
533 uint64_t addr;
534 int i, r;
535
536 memset(&tv, 0, sizeof(tv));
537 tv.bo = &bo->tbo;
538
539 INIT_LIST_HEAD(&head);
540 list_add(&tv.head, &head);
541
542 r = ttm_eu_reserve_buffers(&head);
543 if (r)
544 return r;
545
546 radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_VRAM);
547 radeon_uvd_force_into_uvd_segment(bo);
548
549 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
550 if (r) {
551 ttm_eu_backoff_reservation(&head);
552 return r;
553 }
554
555 r = radeon_ib_get(rdev, ring, &ib, NULL, 16);
556 if (r) {
557 ttm_eu_backoff_reservation(&head);
558 return r;
559 }
560
561 addr = radeon_bo_gpu_offset(bo);
562 ib.ptr[0] = PACKET0(UVD_GPCOM_VCPU_DATA0, 0);
563 ib.ptr[1] = addr;
564 ib.ptr[2] = PACKET0(UVD_GPCOM_VCPU_DATA1, 0);
565 ib.ptr[3] = addr >> 32;
566 ib.ptr[4] = PACKET0(UVD_GPCOM_VCPU_CMD, 0);
567 ib.ptr[5] = 0;
568 for (i = 6; i < 16; ++i)
569 ib.ptr[i] = PACKET2(0);
570 ib.length_dw = 16;
571
572 r = radeon_ib_schedule(rdev, &ib, NULL);
573 if (r) {
574 ttm_eu_backoff_reservation(&head);
575 return r;
576 }
577 ttm_eu_fence_buffer_objects(&head, ib.fence);
578
579 if (fence)
580 *fence = radeon_fence_ref(ib.fence);
581
582 radeon_ib_free(rdev, &ib);
583 radeon_bo_unref(&bo);
584 return 0;
585}
586
587/* multiple fence commands without any stream commands in between can
588 crash the vcpu so just try to emmit a dummy create/destroy msg to
589 avoid this */
590int radeon_uvd_get_create_msg(struct radeon_device *rdev, int ring,
591 uint32_t handle, struct radeon_fence **fence)
592{
593 struct radeon_bo *bo;
594 uint32_t *msg;
595 int r, i;
596
597 r = radeon_bo_create(rdev, 1024, PAGE_SIZE, true,
598 RADEON_GEM_DOMAIN_VRAM, NULL, &bo);
599 if (r)
600 return r;
601
602 r = radeon_bo_reserve(bo, false);
603 if (r) {
604 radeon_bo_unref(&bo);
605 return r;
606 }
607
608 r = radeon_bo_kmap(bo, (void **)&msg);
609 if (r) {
610 radeon_bo_unreserve(bo);
611 radeon_bo_unref(&bo);
612 return r;
613 }
614
615 /* stitch together an UVD create msg */
616 msg[0] = 0x00000de4;
617 msg[1] = 0x00000000;
618 msg[2] = handle;
619 msg[3] = 0x00000000;
620 msg[4] = 0x00000000;
621 msg[5] = 0x00000000;
622 msg[6] = 0x00000000;
623 msg[7] = 0x00000780;
624 msg[8] = 0x00000440;
625 msg[9] = 0x00000000;
626 msg[10] = 0x01b37000;
627 for (i = 11; i < 1024; ++i)
628 msg[i] = 0x0;
629
630 radeon_bo_kunmap(bo);
631 radeon_bo_unreserve(bo);
632
633 return radeon_uvd_send_msg(rdev, ring, bo, fence);
634}
635
636int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring,
637 uint32_t handle, struct radeon_fence **fence)
638{
639 struct radeon_bo *bo;
640 uint32_t *msg;
641 int r, i;
642
643 r = radeon_bo_create(rdev, 1024, PAGE_SIZE, true,
644 RADEON_GEM_DOMAIN_VRAM, NULL, &bo);
645 if (r)
646 return r;
647
648 r = radeon_bo_reserve(bo, false);
649 if (r) {
650 radeon_bo_unref(&bo);
651 return r;
652 }
653
654 r = radeon_bo_kmap(bo, (void **)&msg);
655 if (r) {
656 radeon_bo_unreserve(bo);
657 radeon_bo_unref(&bo);
658 return r;
659 }
660
661 /* stitch together an UVD destroy msg */
662 msg[0] = 0x00000de4;
663 msg[1] = 0x00000002;
664 msg[2] = handle;
665 msg[3] = 0x00000000;
666 for (i = 4; i < 1024; ++i)
667 msg[i] = 0x0;
668
669 radeon_bo_kunmap(bo);
670 radeon_bo_unreserve(bo);
671
672 return radeon_uvd_send_msg(rdev, ring, bo, fence);
673}
674
675static void radeon_uvd_idle_work_handler(struct work_struct *work)
676{
677 struct radeon_device *rdev =
678 container_of(work, struct radeon_device, uvd.idle_work.work);
679
680 if (radeon_fence_count_emitted(rdev, R600_RING_TYPE_UVD_INDEX) == 0)
681 radeon_set_uvd_clocks(rdev, 0, 0);
682 else
683 schedule_delayed_work(&rdev->uvd.idle_work,
684 msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS));
685}
686
687void radeon_uvd_note_usage(struct radeon_device *rdev)
688{
689 bool set_clocks = !cancel_delayed_work_sync(&rdev->uvd.idle_work);
690 set_clocks &= schedule_delayed_work(&rdev->uvd.idle_work,
691 msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS));
692 if (set_clocks)
693 radeon_set_uvd_clocks(rdev, 53300, 40000);
694}
695
696static unsigned radeon_uvd_calc_upll_post_div(unsigned vco_freq,
697 unsigned target_freq,
698 unsigned pd_min,
699 unsigned pd_even)
700{
701 unsigned post_div = vco_freq / target_freq;
702
703 /* adjust to post divider minimum value */
704 if (post_div < pd_min)
705 post_div = pd_min;
706
707 /* we alway need a frequency less than or equal the target */
708 if ((vco_freq / post_div) > target_freq)
709 post_div += 1;
710
711 /* post dividers above a certain value must be even */
712 if (post_div > pd_even && post_div % 2)
713 post_div += 1;
714
715 return post_div;
716}
717
718/**
719 * radeon_uvd_calc_upll_dividers - calc UPLL clock dividers
720 *
721 * @rdev: radeon_device pointer
722 * @vclk: wanted VCLK
723 * @dclk: wanted DCLK
724 * @vco_min: minimum VCO frequency
725 * @vco_max: maximum VCO frequency
726 * @fb_factor: factor to multiply vco freq with
727 * @fb_mask: limit and bitmask for feedback divider
728 * @pd_min: post divider minimum
729 * @pd_max: post divider maximum
730 * @pd_even: post divider must be even above this value
731 * @optimal_fb_div: resulting feedback divider
732 * @optimal_vclk_div: resulting vclk post divider
733 * @optimal_dclk_div: resulting dclk post divider
734 *
735 * Calculate dividers for UVDs UPLL (R6xx-SI, except APUs).
736 * Returns zero on success -EINVAL on error.
737 */
738int radeon_uvd_calc_upll_dividers(struct radeon_device *rdev,
739 unsigned vclk, unsigned dclk,
740 unsigned vco_min, unsigned vco_max,
741 unsigned fb_factor, unsigned fb_mask,
742 unsigned pd_min, unsigned pd_max,
743 unsigned pd_even,
744 unsigned *optimal_fb_div,
745 unsigned *optimal_vclk_div,
746 unsigned *optimal_dclk_div)
747{
748 unsigned vco_freq, ref_freq = rdev->clock.spll.reference_freq;
749
750 /* start off with something large */
751 unsigned optimal_score = ~0;
752
753 /* loop through vco from low to high */
754 vco_min = max(max(vco_min, vclk), dclk);
755 for (vco_freq = vco_min; vco_freq <= vco_max; vco_freq += 100) {
756
757 uint64_t fb_div = (uint64_t)vco_freq * fb_factor;
758 unsigned vclk_div, dclk_div, score;
759
760 do_div(fb_div, ref_freq);
761
762 /* fb div out of range ? */
763 if (fb_div > fb_mask)
764 break; /* it can oly get worse */
765
766 fb_div &= fb_mask;
767
768 /* calc vclk divider with current vco freq */
769 vclk_div = radeon_uvd_calc_upll_post_div(vco_freq, vclk,
770 pd_min, pd_even);
771 if (vclk_div > pd_max)
772 break; /* vco is too big, it has to stop */
773
774 /* calc dclk divider with current vco freq */
775 dclk_div = radeon_uvd_calc_upll_post_div(vco_freq, dclk,
776 pd_min, pd_even);
777 if (vclk_div > pd_max)
778 break; /* vco is too big, it has to stop */
779
780 /* calc score with current vco freq */
781 score = vclk - (vco_freq / vclk_div) + dclk - (vco_freq / dclk_div);
782
783 /* determine if this vco setting is better than current optimal settings */
784 if (score < optimal_score) {
785 *optimal_fb_div = fb_div;
786 *optimal_vclk_div = vclk_div;
787 *optimal_dclk_div = dclk_div;
788 optimal_score = score;
789 if (optimal_score == 0)
790 break; /* it can't get better than this */
791 }
792 }
793
794 /* did we found a valid setup ? */
795 if (optimal_score == ~0)
796 return -EINVAL;
797
798 return 0;
799}
800
801int radeon_uvd_send_upll_ctlreq(struct radeon_device *rdev,
802 unsigned cg_upll_func_cntl)
803{
804 unsigned i;
805
806 /* make sure UPLL_CTLREQ is deasserted */
807 WREG32_P(cg_upll_func_cntl, 0, ~UPLL_CTLREQ_MASK);
808
809 mdelay(10);
810
811 /* assert UPLL_CTLREQ */
812 WREG32_P(cg_upll_func_cntl, UPLL_CTLREQ_MASK, ~UPLL_CTLREQ_MASK);
813
814 /* wait for CTLACK and CTLACK2 to get asserted */
815 for (i = 0; i < 100; ++i) {
816 uint32_t mask = UPLL_CTLACK_MASK | UPLL_CTLACK2_MASK;
817 if ((RREG32(cg_upll_func_cntl) & mask) == mask)
818 break;
819 mdelay(10);
820 }
821
822 /* deassert UPLL_CTLREQ */
823 WREG32_P(cg_upll_func_cntl, 0, ~UPLL_CTLREQ_MASK);
824
825 if (i == 100) {
826 DRM_ERROR("Timeout setting UVD clocks!\n");
827 return -ETIMEDOUT;
828 }
829
830 return 0;
831}
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 5a0fc74c2ba6..46fa1b07c560 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -52,23 +52,59 @@ static const u32 crtc_offsets[2] =
52 AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL 52 AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL
53}; 53};
54 54
55static bool avivo_is_in_vblank(struct radeon_device *rdev, int crtc)
56{
57 if (RREG32(AVIVO_D1CRTC_STATUS + crtc_offsets[crtc]) & AVIVO_D1CRTC_V_BLANK)
58 return true;
59 else
60 return false;
61}
62
63static bool avivo_is_counter_moving(struct radeon_device *rdev, int crtc)
64{
65 u32 pos1, pos2;
66
67 pos1 = RREG32(AVIVO_D1CRTC_STATUS_POSITION + crtc_offsets[crtc]);
68 pos2 = RREG32(AVIVO_D1CRTC_STATUS_POSITION + crtc_offsets[crtc]);
69
70 if (pos1 != pos2)
71 return true;
72 else
73 return false;
74}
75
76/**
77 * avivo_wait_for_vblank - vblank wait asic callback.
78 *
79 * @rdev: radeon_device pointer
80 * @crtc: crtc to wait for vblank on
81 *
82 * Wait for vblank on the requested crtc (r5xx-r7xx).
83 */
55void avivo_wait_for_vblank(struct radeon_device *rdev, int crtc) 84void avivo_wait_for_vblank(struct radeon_device *rdev, int crtc)
56{ 85{
57 int i; 86 unsigned i = 0;
58 87
59 if (crtc >= rdev->num_crtc) 88 if (crtc >= rdev->num_crtc)
60 return; 89 return;
61 90
62 if (RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[crtc]) & AVIVO_CRTC_EN) { 91 if (!(RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[crtc]) & AVIVO_CRTC_EN))
63 for (i = 0; i < rdev->usec_timeout; i++) { 92 return;
64 if (!(RREG32(AVIVO_D1CRTC_STATUS + crtc_offsets[crtc]) & AVIVO_D1CRTC_V_BLANK)) 93
94 /* depending on when we hit vblank, we may be close to active; if so,
95 * wait for another frame.
96 */
97 while (avivo_is_in_vblank(rdev, crtc)) {
98 if (i++ % 100 == 0) {
99 if (!avivo_is_counter_moving(rdev, crtc))
65 break; 100 break;
66 udelay(1);
67 } 101 }
68 for (i = 0; i < rdev->usec_timeout; i++) { 102 }
69 if (RREG32(AVIVO_D1CRTC_STATUS + crtc_offsets[crtc]) & AVIVO_D1CRTC_V_BLANK) 103
104 while (!avivo_is_in_vblank(rdev, crtc)) {
105 if (i++ % 100 == 0) {
106 if (!avivo_is_counter_moving(rdev, crtc))
70 break; 107 break;
71 udelay(1);
72 } 108 }
73 } 109 }
74} 110}
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 5706d2ac75ab..ab4c86cfd552 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -148,6 +148,8 @@ void rs690_pm_info(struct radeon_device *rdev)
148static void rs690_mc_init(struct radeon_device *rdev) 148static void rs690_mc_init(struct radeon_device *rdev)
149{ 149{
150 u64 base; 150 u64 base;
151 uint32_t h_addr, l_addr;
152 unsigned long long k8_addr;
151 153
152 rs400_gart_adjust_size(rdev); 154 rs400_gart_adjust_size(rdev);
153 rdev->mc.vram_is_ddr = true; 155 rdev->mc.vram_is_ddr = true;
@@ -160,6 +162,27 @@ static void rs690_mc_init(struct radeon_device *rdev)
160 base = RREG32_MC(R_000100_MCCFG_FB_LOCATION); 162 base = RREG32_MC(R_000100_MCCFG_FB_LOCATION);
161 base = G_000100_MC_FB_START(base) << 16; 163 base = G_000100_MC_FB_START(base) << 16;
162 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); 164 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
165
166 /* Use K8 direct mapping for fast fb access. */
167 rdev->fastfb_working = false;
168 h_addr = G_00005F_K8_ADDR_EXT(RREG32_MC(R_00005F_MC_MISC_UMA_CNTL));
169 l_addr = RREG32_MC(R_00001E_K8_FB_LOCATION);
170 k8_addr = ((unsigned long long)h_addr) << 32 | l_addr;
171#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
172 if (k8_addr + rdev->mc.visible_vram_size < 0x100000000ULL)
173#endif
174 {
175 /* FastFB shall be used with UMA memory. Here it is simply disabled when sideport
176 * memory is present.
177 */
178 if (rdev->mc.igp_sideport_enabled == false && radeon_fastfb == 1) {
179 DRM_INFO("Direct mapping: aper base at 0x%llx, replaced by direct mapping base 0x%llx.\n",
180 (unsigned long long)rdev->mc.aper_base, k8_addr);
181 rdev->mc.aper_base = (resource_size_t)k8_addr;
182 rdev->fastfb_working = true;
183 }
184 }
185
163 rs690_pm_info(rdev); 186 rs690_pm_info(rdev);
164 radeon_vram_location(rdev, &rdev->mc, base); 187 radeon_vram_location(rdev, &rdev->mc, base);
165 rdev->mc.gtt_base_align = rdev->mc.gtt_size - 1; 188 rdev->mc.gtt_base_align = rdev->mc.gtt_size - 1;
diff --git a/drivers/gpu/drm/radeon/rs690d.h b/drivers/gpu/drm/radeon/rs690d.h
index 36e6398a98ae..8af3ccf20cc0 100644
--- a/drivers/gpu/drm/radeon/rs690d.h
+++ b/drivers/gpu/drm/radeon/rs690d.h
@@ -29,6 +29,9 @@
29#define __RS690D_H__ 29#define __RS690D_H__
30 30
31/* Registers */ 31/* Registers */
32#define R_00001E_K8_FB_LOCATION 0x00001E
33#define R_00005F_MC_MISC_UMA_CNTL 0x00005F
34#define G_00005F_K8_ADDR_EXT(x) (((x) >> 0) & 0xFF)
32#define R_000078_MC_INDEX 0x000078 35#define R_000078_MC_INDEX 0x000078
33#define S_000078_MC_IND_ADDR(x) (((x) & 0x1FF) << 0) 36#define S_000078_MC_IND_ADDR(x) (((x) & 0x1FF) << 0)
34#define G_000078_MC_IND_ADDR(x) (((x) >> 0) & 0x1FF) 37#define G_000078_MC_IND_ADDR(x) (((x) >> 0) & 0x1FF)
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 435ed3551364..ffcba730c57c 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -303,8 +303,10 @@ void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save)
303 tmp = RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]); 303 tmp = RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]);
304 if (!(tmp & AVIVO_CRTC_DISP_READ_REQUEST_DISABLE)) { 304 if (!(tmp & AVIVO_CRTC_DISP_READ_REQUEST_DISABLE)) {
305 radeon_wait_for_vblank(rdev, i); 305 radeon_wait_for_vblank(rdev, i);
306 WREG32(AVIVO_D1CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
306 tmp |= AVIVO_CRTC_DISP_READ_REQUEST_DISABLE; 307 tmp |= AVIVO_CRTC_DISP_READ_REQUEST_DISABLE;
307 WREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i], tmp); 308 WREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i], tmp);
309 WREG32(AVIVO_D1CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
308 } 310 }
309 /* wait for the next frame */ 311 /* wait for the next frame */
310 frame_count = radeon_get_vblank_counter(rdev, i); 312 frame_count = radeon_get_vblank_counter(rdev, i);
@@ -313,6 +315,15 @@ void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save)
313 break; 315 break;
314 udelay(1); 316 udelay(1);
315 } 317 }
318
319 /* XXX this is a hack to avoid strange behavior with EFI on certain systems */
320 WREG32(AVIVO_D1CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
321 tmp = RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]);
322 tmp &= ~AVIVO_CRTC_EN;
323 WREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i], tmp);
324 WREG32(AVIVO_D1CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
325 save->crtc_enabled[i] = false;
326 /* ***** */
316 } else { 327 } else {
317 save->crtc_enabled[i] = false; 328 save->crtc_enabled[i] = false;
318 } 329 }
@@ -338,6 +349,22 @@ void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save)
338 } 349 }
339 /* wait for the MC to settle */ 350 /* wait for the MC to settle */
340 udelay(100); 351 udelay(100);
352
353 /* lock double buffered regs */
354 for (i = 0; i < rdev->num_crtc; i++) {
355 if (save->crtc_enabled[i]) {
356 tmp = RREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i]);
357 if (!(tmp & AVIVO_D1GRPH_UPDATE_LOCK)) {
358 tmp |= AVIVO_D1GRPH_UPDATE_LOCK;
359 WREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i], tmp);
360 }
361 tmp = RREG32(AVIVO_D1MODE_MASTER_UPDATE_LOCK + crtc_offsets[i]);
362 if (!(tmp & 1)) {
363 tmp |= 1;
364 WREG32(AVIVO_D1MODE_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
365 }
366 }
367 }
341} 368}
342 369
343void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save) 370void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save)
@@ -348,7 +375,7 @@ void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save)
348 /* update crtc base addresses */ 375 /* update crtc base addresses */
349 for (i = 0; i < rdev->num_crtc; i++) { 376 for (i = 0; i < rdev->num_crtc; i++) {
350 if (rdev->family >= CHIP_RV770) { 377 if (rdev->family >= CHIP_RV770) {
351 if (i == 1) { 378 if (i == 0) {
352 WREG32(R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, 379 WREG32(R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH,
353 upper_32_bits(rdev->mc.vram_start)); 380 upper_32_bits(rdev->mc.vram_start));
354 WREG32(R700_D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, 381 WREG32(R700_D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH,
@@ -367,6 +394,33 @@ void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save)
367 } 394 }
368 WREG32(R_000310_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start); 395 WREG32(R_000310_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start);
369 396
397 /* unlock regs and wait for update */
398 for (i = 0; i < rdev->num_crtc; i++) {
399 if (save->crtc_enabled[i]) {
400 tmp = RREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i]);
401 if ((tmp & 0x3) != 0) {
402 tmp &= ~0x3;
403 WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i], tmp);
404 }
405 tmp = RREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i]);
406 if (tmp & AVIVO_D1GRPH_UPDATE_LOCK) {
407 tmp &= ~AVIVO_D1GRPH_UPDATE_LOCK;
408 WREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i], tmp);
409 }
410 tmp = RREG32(AVIVO_D1MODE_MASTER_UPDATE_LOCK + crtc_offsets[i]);
411 if (tmp & 1) {
412 tmp &= ~1;
413 WREG32(AVIVO_D1MODE_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
414 }
415 for (j = 0; j < rdev->usec_timeout; j++) {
416 tmp = RREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i]);
417 if ((tmp & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING) == 0)
418 break;
419 udelay(1);
420 }
421 }
422 }
423
370 if (rdev->family >= CHIP_R600) { 424 if (rdev->family >= CHIP_R600) {
371 /* unblackout the MC */ 425 /* unblackout the MC */
372 if (rdev->family >= CHIP_RV770) 426 if (rdev->family >= CHIP_RV770)
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index d63fe1d0f53f..83f612a9500b 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -42,6 +42,739 @@
42static void rv770_gpu_init(struct radeon_device *rdev); 42static void rv770_gpu_init(struct radeon_device *rdev);
43void rv770_fini(struct radeon_device *rdev); 43void rv770_fini(struct radeon_device *rdev);
44static void rv770_pcie_gen2_enable(struct radeon_device *rdev); 44static void rv770_pcie_gen2_enable(struct radeon_device *rdev);
45int evergreen_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
46
47int rv770_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
48{
49 unsigned fb_div = 0, vclk_div = 0, dclk_div = 0;
50 int r;
51
52 /* RV740 uses evergreen uvd clk programming */
53 if (rdev->family == CHIP_RV740)
54 return evergreen_set_uvd_clocks(rdev, vclk, dclk);
55
56 /* bypass vclk and dclk with bclk */
57 WREG32_P(CG_UPLL_FUNC_CNTL_2,
58 VCLK_SRC_SEL(1) | DCLK_SRC_SEL(1),
59 ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
60
61 if (!vclk || !dclk) {
62 /* keep the Bypass mode, put PLL to sleep */
63 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK);
64 return 0;
65 }
66
67 r = radeon_uvd_calc_upll_dividers(rdev, vclk, dclk, 50000, 160000,
68 43663, 0x03FFFFFE, 1, 30, ~0,
69 &fb_div, &vclk_div, &dclk_div);
70 if (r)
71 return r;
72
73 fb_div |= 1;
74 vclk_div -= 1;
75 dclk_div -= 1;
76
77 /* set UPLL_FB_DIV to 0x50000 */
78 WREG32_P(CG_UPLL_FUNC_CNTL_3, UPLL_FB_DIV(0x50000), ~UPLL_FB_DIV_MASK);
79
80 /* deassert UPLL_RESET and UPLL_SLEEP */
81 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~(UPLL_RESET_MASK | UPLL_SLEEP_MASK));
82
83 /* assert BYPASS EN and FB_DIV[0] <- ??? why? */
84 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~UPLL_BYPASS_EN_MASK);
85 WREG32_P(CG_UPLL_FUNC_CNTL_3, UPLL_FB_DIV(1), ~UPLL_FB_DIV(1));
86
87 r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
88 if (r)
89 return r;
90
91 /* assert PLL_RESET */
92 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_RESET_MASK, ~UPLL_RESET_MASK);
93
94 /* set the required FB_DIV, REF_DIV, Post divder values */
95 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_REF_DIV(1), ~UPLL_REF_DIV_MASK);
96 WREG32_P(CG_UPLL_FUNC_CNTL_2,
97 UPLL_SW_HILEN(vclk_div >> 1) |
98 UPLL_SW_LOLEN((vclk_div >> 1) + (vclk_div & 1)) |
99 UPLL_SW_HILEN2(dclk_div >> 1) |
100 UPLL_SW_LOLEN2((dclk_div >> 1) + (dclk_div & 1)),
101 ~UPLL_SW_MASK);
102
103 WREG32_P(CG_UPLL_FUNC_CNTL_3, UPLL_FB_DIV(fb_div),
104 ~UPLL_FB_DIV_MASK);
105
106 /* give the PLL some time to settle */
107 mdelay(15);
108
109 /* deassert PLL_RESET */
110 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK);
111
112 mdelay(15);
113
114 /* deassert BYPASS EN and FB_DIV[0] <- ??? why? */
115 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_BYPASS_EN_MASK);
116 WREG32_P(CG_UPLL_FUNC_CNTL_3, 0, ~UPLL_FB_DIV(1));
117
118 r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
119 if (r)
120 return r;
121
122 /* switch VCLK and DCLK selection */
123 WREG32_P(CG_UPLL_FUNC_CNTL_2,
124 VCLK_SRC_SEL(2) | DCLK_SRC_SEL(2),
125 ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
126
127 mdelay(100);
128
129 return 0;
130}
131
132static const u32 r7xx_golden_registers[] =
133{
134 0x8d00, 0xffffffff, 0x0e0e0074,
135 0x8d04, 0xffffffff, 0x013a2b34,
136 0x9508, 0xffffffff, 0x00000002,
137 0x8b20, 0xffffffff, 0,
138 0x88c4, 0xffffffff, 0x000000c2,
139 0x28350, 0xffffffff, 0,
140 0x9058, 0xffffffff, 0x0fffc40f,
141 0x240c, 0xffffffff, 0x00000380,
142 0x733c, 0xffffffff, 0x00000002,
143 0x2650, 0x00040000, 0,
144 0x20bc, 0x00040000, 0,
145 0x7300, 0xffffffff, 0x001000f0
146};
147
148static const u32 r7xx_golden_dyn_gpr_registers[] =
149{
150 0x8db0, 0xffffffff, 0x98989898,
151 0x8db4, 0xffffffff, 0x98989898,
152 0x8db8, 0xffffffff, 0x98989898,
153 0x8dbc, 0xffffffff, 0x98989898,
154 0x8dc0, 0xffffffff, 0x98989898,
155 0x8dc4, 0xffffffff, 0x98989898,
156 0x8dc8, 0xffffffff, 0x98989898,
157 0x8dcc, 0xffffffff, 0x98989898,
158 0x88c4, 0xffffffff, 0x00000082
159};
160
161static const u32 rv770_golden_registers[] =
162{
163 0x562c, 0xffffffff, 0,
164 0x3f90, 0xffffffff, 0,
165 0x9148, 0xffffffff, 0,
166 0x3f94, 0xffffffff, 0,
167 0x914c, 0xffffffff, 0,
168 0x9698, 0x18000000, 0x18000000
169};
170
171static const u32 rv770ce_golden_registers[] =
172{
173 0x562c, 0xffffffff, 0,
174 0x3f90, 0xffffffff, 0x00cc0000,
175 0x9148, 0xffffffff, 0x00cc0000,
176 0x3f94, 0xffffffff, 0x00cc0000,
177 0x914c, 0xffffffff, 0x00cc0000,
178 0x9b7c, 0xffffffff, 0x00fa0000,
179 0x3f8c, 0xffffffff, 0x00fa0000,
180 0x9698, 0x18000000, 0x18000000
181};
182
183static const u32 rv770_mgcg_init[] =
184{
185 0x8bcc, 0xffffffff, 0x130300f9,
186 0x5448, 0xffffffff, 0x100,
187 0x55e4, 0xffffffff, 0x100,
188 0x160c, 0xffffffff, 0x100,
189 0x5644, 0xffffffff, 0x100,
190 0xc164, 0xffffffff, 0x100,
191 0x8a18, 0xffffffff, 0x100,
192 0x897c, 0xffffffff, 0x8000100,
193 0x8b28, 0xffffffff, 0x3c000100,
194 0x9144, 0xffffffff, 0x100,
195 0x9a1c, 0xffffffff, 0x10000,
196 0x9a50, 0xffffffff, 0x100,
197 0x9a1c, 0xffffffff, 0x10001,
198 0x9a50, 0xffffffff, 0x100,
199 0x9a1c, 0xffffffff, 0x10002,
200 0x9a50, 0xffffffff, 0x100,
201 0x9a1c, 0xffffffff, 0x10003,
202 0x9a50, 0xffffffff, 0x100,
203 0x9a1c, 0xffffffff, 0x0,
204 0x9870, 0xffffffff, 0x100,
205 0x8d58, 0xffffffff, 0x100,
206 0x9500, 0xffffffff, 0x0,
207 0x9510, 0xffffffff, 0x100,
208 0x9500, 0xffffffff, 0x1,
209 0x9510, 0xffffffff, 0x100,
210 0x9500, 0xffffffff, 0x2,
211 0x9510, 0xffffffff, 0x100,
212 0x9500, 0xffffffff, 0x3,
213 0x9510, 0xffffffff, 0x100,
214 0x9500, 0xffffffff, 0x4,
215 0x9510, 0xffffffff, 0x100,
216 0x9500, 0xffffffff, 0x5,
217 0x9510, 0xffffffff, 0x100,
218 0x9500, 0xffffffff, 0x6,
219 0x9510, 0xffffffff, 0x100,
220 0x9500, 0xffffffff, 0x7,
221 0x9510, 0xffffffff, 0x100,
222 0x9500, 0xffffffff, 0x8,
223 0x9510, 0xffffffff, 0x100,
224 0x9500, 0xffffffff, 0x9,
225 0x9510, 0xffffffff, 0x100,
226 0x9500, 0xffffffff, 0x8000,
227 0x9490, 0xffffffff, 0x0,
228 0x949c, 0xffffffff, 0x100,
229 0x9490, 0xffffffff, 0x1,
230 0x949c, 0xffffffff, 0x100,
231 0x9490, 0xffffffff, 0x2,
232 0x949c, 0xffffffff, 0x100,
233 0x9490, 0xffffffff, 0x3,
234 0x949c, 0xffffffff, 0x100,
235 0x9490, 0xffffffff, 0x4,
236 0x949c, 0xffffffff, 0x100,
237 0x9490, 0xffffffff, 0x5,
238 0x949c, 0xffffffff, 0x100,
239 0x9490, 0xffffffff, 0x6,
240 0x949c, 0xffffffff, 0x100,
241 0x9490, 0xffffffff, 0x7,
242 0x949c, 0xffffffff, 0x100,
243 0x9490, 0xffffffff, 0x8,
244 0x949c, 0xffffffff, 0x100,
245 0x9490, 0xffffffff, 0x9,
246 0x949c, 0xffffffff, 0x100,
247 0x9490, 0xffffffff, 0x8000,
248 0x9604, 0xffffffff, 0x0,
249 0x9654, 0xffffffff, 0x100,
250 0x9604, 0xffffffff, 0x1,
251 0x9654, 0xffffffff, 0x100,
252 0x9604, 0xffffffff, 0x2,
253 0x9654, 0xffffffff, 0x100,
254 0x9604, 0xffffffff, 0x3,
255 0x9654, 0xffffffff, 0x100,
256 0x9604, 0xffffffff, 0x4,
257 0x9654, 0xffffffff, 0x100,
258 0x9604, 0xffffffff, 0x5,
259 0x9654, 0xffffffff, 0x100,
260 0x9604, 0xffffffff, 0x6,
261 0x9654, 0xffffffff, 0x100,
262 0x9604, 0xffffffff, 0x7,
263 0x9654, 0xffffffff, 0x100,
264 0x9604, 0xffffffff, 0x8,
265 0x9654, 0xffffffff, 0x100,
266 0x9604, 0xffffffff, 0x9,
267 0x9654, 0xffffffff, 0x100,
268 0x9604, 0xffffffff, 0x80000000,
269 0x9030, 0xffffffff, 0x100,
270 0x9034, 0xffffffff, 0x100,
271 0x9038, 0xffffffff, 0x100,
272 0x903c, 0xffffffff, 0x100,
273 0x9040, 0xffffffff, 0x100,
274 0xa200, 0xffffffff, 0x100,
275 0xa204, 0xffffffff, 0x100,
276 0xa208, 0xffffffff, 0x100,
277 0xa20c, 0xffffffff, 0x100,
278 0x971c, 0xffffffff, 0x100,
279 0x915c, 0xffffffff, 0x00020001,
280 0x9160, 0xffffffff, 0x00040003,
281 0x916c, 0xffffffff, 0x00060005,
282 0x9170, 0xffffffff, 0x00080007,
283 0x9174, 0xffffffff, 0x000a0009,
284 0x9178, 0xffffffff, 0x000c000b,
285 0x917c, 0xffffffff, 0x000e000d,
286 0x9180, 0xffffffff, 0x0010000f,
287 0x918c, 0xffffffff, 0x00120011,
288 0x9190, 0xffffffff, 0x00140013,
289 0x9194, 0xffffffff, 0x00020001,
290 0x9198, 0xffffffff, 0x00040003,
291 0x919c, 0xffffffff, 0x00060005,
292 0x91a8, 0xffffffff, 0x00080007,
293 0x91ac, 0xffffffff, 0x000a0009,
294 0x91b0, 0xffffffff, 0x000c000b,
295 0x91b4, 0xffffffff, 0x000e000d,
296 0x91b8, 0xffffffff, 0x0010000f,
297 0x91c4, 0xffffffff, 0x00120011,
298 0x91c8, 0xffffffff, 0x00140013,
299 0x91cc, 0xffffffff, 0x00020001,
300 0x91d0, 0xffffffff, 0x00040003,
301 0x91d4, 0xffffffff, 0x00060005,
302 0x91e0, 0xffffffff, 0x00080007,
303 0x91e4, 0xffffffff, 0x000a0009,
304 0x91e8, 0xffffffff, 0x000c000b,
305 0x91ec, 0xffffffff, 0x00020001,
306 0x91f0, 0xffffffff, 0x00040003,
307 0x91f4, 0xffffffff, 0x00060005,
308 0x9200, 0xffffffff, 0x00080007,
309 0x9204, 0xffffffff, 0x000a0009,
310 0x9208, 0xffffffff, 0x000c000b,
311 0x920c, 0xffffffff, 0x000e000d,
312 0x9210, 0xffffffff, 0x0010000f,
313 0x921c, 0xffffffff, 0x00120011,
314 0x9220, 0xffffffff, 0x00140013,
315 0x9224, 0xffffffff, 0x00020001,
316 0x9228, 0xffffffff, 0x00040003,
317 0x922c, 0xffffffff, 0x00060005,
318 0x9238, 0xffffffff, 0x00080007,
319 0x923c, 0xffffffff, 0x000a0009,
320 0x9240, 0xffffffff, 0x000c000b,
321 0x9244, 0xffffffff, 0x000e000d,
322 0x9248, 0xffffffff, 0x0010000f,
323 0x9254, 0xffffffff, 0x00120011,
324 0x9258, 0xffffffff, 0x00140013,
325 0x925c, 0xffffffff, 0x00020001,
326 0x9260, 0xffffffff, 0x00040003,
327 0x9264, 0xffffffff, 0x00060005,
328 0x9270, 0xffffffff, 0x00080007,
329 0x9274, 0xffffffff, 0x000a0009,
330 0x9278, 0xffffffff, 0x000c000b,
331 0x927c, 0xffffffff, 0x000e000d,
332 0x9280, 0xffffffff, 0x0010000f,
333 0x928c, 0xffffffff, 0x00120011,
334 0x9290, 0xffffffff, 0x00140013,
335 0x9294, 0xffffffff, 0x00020001,
336 0x929c, 0xffffffff, 0x00040003,
337 0x92a0, 0xffffffff, 0x00060005,
338 0x92a4, 0xffffffff, 0x00080007
339};
340
341static const u32 rv710_golden_registers[] =
342{
343 0x3f90, 0x00ff0000, 0x00fc0000,
344 0x9148, 0x00ff0000, 0x00fc0000,
345 0x3f94, 0x00ff0000, 0x00fc0000,
346 0x914c, 0x00ff0000, 0x00fc0000,
347 0xb4c, 0x00000020, 0x00000020,
348 0xa180, 0xffffffff, 0x00003f3f
349};
350
351static const u32 rv710_mgcg_init[] =
352{
353 0x8bcc, 0xffffffff, 0x13030040,
354 0x5448, 0xffffffff, 0x100,
355 0x55e4, 0xffffffff, 0x100,
356 0x160c, 0xffffffff, 0x100,
357 0x5644, 0xffffffff, 0x100,
358 0xc164, 0xffffffff, 0x100,
359 0x8a18, 0xffffffff, 0x100,
360 0x897c, 0xffffffff, 0x8000100,
361 0x8b28, 0xffffffff, 0x3c000100,
362 0x9144, 0xffffffff, 0x100,
363 0x9a1c, 0xffffffff, 0x10000,
364 0x9a50, 0xffffffff, 0x100,
365 0x9a1c, 0xffffffff, 0x0,
366 0x9870, 0xffffffff, 0x100,
367 0x8d58, 0xffffffff, 0x100,
368 0x9500, 0xffffffff, 0x0,
369 0x9510, 0xffffffff, 0x100,
370 0x9500, 0xffffffff, 0x1,
371 0x9510, 0xffffffff, 0x100,
372 0x9500, 0xffffffff, 0x8000,
373 0x9490, 0xffffffff, 0x0,
374 0x949c, 0xffffffff, 0x100,
375 0x9490, 0xffffffff, 0x1,
376 0x949c, 0xffffffff, 0x100,
377 0x9490, 0xffffffff, 0x8000,
378 0x9604, 0xffffffff, 0x0,
379 0x9654, 0xffffffff, 0x100,
380 0x9604, 0xffffffff, 0x1,
381 0x9654, 0xffffffff, 0x100,
382 0x9604, 0xffffffff, 0x80000000,
383 0x9030, 0xffffffff, 0x100,
384 0x9034, 0xffffffff, 0x100,
385 0x9038, 0xffffffff, 0x100,
386 0x903c, 0xffffffff, 0x100,
387 0x9040, 0xffffffff, 0x100,
388 0xa200, 0xffffffff, 0x100,
389 0xa204, 0xffffffff, 0x100,
390 0xa208, 0xffffffff, 0x100,
391 0xa20c, 0xffffffff, 0x100,
392 0x971c, 0xffffffff, 0x100,
393 0x915c, 0xffffffff, 0x00020001,
394 0x9174, 0xffffffff, 0x00000003,
395 0x9178, 0xffffffff, 0x00050001,
396 0x917c, 0xffffffff, 0x00030002,
397 0x918c, 0xffffffff, 0x00000004,
398 0x9190, 0xffffffff, 0x00070006,
399 0x9194, 0xffffffff, 0x00050001,
400 0x9198, 0xffffffff, 0x00030002,
401 0x91a8, 0xffffffff, 0x00000004,
402 0x91ac, 0xffffffff, 0x00070006,
403 0x91e8, 0xffffffff, 0x00000001,
404 0x9294, 0xffffffff, 0x00000001,
405 0x929c, 0xffffffff, 0x00000002,
406 0x92a0, 0xffffffff, 0x00040003,
407 0x9150, 0xffffffff, 0x4d940000
408};
409
410static const u32 rv730_golden_registers[] =
411{
412 0x3f90, 0x00ff0000, 0x00f00000,
413 0x9148, 0x00ff0000, 0x00f00000,
414 0x3f94, 0x00ff0000, 0x00f00000,
415 0x914c, 0x00ff0000, 0x00f00000,
416 0x900c, 0xffffffff, 0x003b033f,
417 0xb4c, 0x00000020, 0x00000020,
418 0xa180, 0xffffffff, 0x00003f3f
419};
420
421static const u32 rv730_mgcg_init[] =
422{
423 0x8bcc, 0xffffffff, 0x130300f9,
424 0x5448, 0xffffffff, 0x100,
425 0x55e4, 0xffffffff, 0x100,
426 0x160c, 0xffffffff, 0x100,
427 0x5644, 0xffffffff, 0x100,
428 0xc164, 0xffffffff, 0x100,
429 0x8a18, 0xffffffff, 0x100,
430 0x897c, 0xffffffff, 0x8000100,
431 0x8b28, 0xffffffff, 0x3c000100,
432 0x9144, 0xffffffff, 0x100,
433 0x9a1c, 0xffffffff, 0x10000,
434 0x9a50, 0xffffffff, 0x100,
435 0x9a1c, 0xffffffff, 0x10001,
436 0x9a50, 0xffffffff, 0x100,
437 0x9a1c, 0xffffffff, 0x0,
438 0x9870, 0xffffffff, 0x100,
439 0x8d58, 0xffffffff, 0x100,
440 0x9500, 0xffffffff, 0x0,
441 0x9510, 0xffffffff, 0x100,
442 0x9500, 0xffffffff, 0x1,
443 0x9510, 0xffffffff, 0x100,
444 0x9500, 0xffffffff, 0x2,
445 0x9510, 0xffffffff, 0x100,
446 0x9500, 0xffffffff, 0x3,
447 0x9510, 0xffffffff, 0x100,
448 0x9500, 0xffffffff, 0x4,
449 0x9510, 0xffffffff, 0x100,
450 0x9500, 0xffffffff, 0x5,
451 0x9510, 0xffffffff, 0x100,
452 0x9500, 0xffffffff, 0x6,
453 0x9510, 0xffffffff, 0x100,
454 0x9500, 0xffffffff, 0x7,
455 0x9510, 0xffffffff, 0x100,
456 0x9500, 0xffffffff, 0x8000,
457 0x9490, 0xffffffff, 0x0,
458 0x949c, 0xffffffff, 0x100,
459 0x9490, 0xffffffff, 0x1,
460 0x949c, 0xffffffff, 0x100,
461 0x9490, 0xffffffff, 0x2,
462 0x949c, 0xffffffff, 0x100,
463 0x9490, 0xffffffff, 0x3,
464 0x949c, 0xffffffff, 0x100,
465 0x9490, 0xffffffff, 0x4,
466 0x949c, 0xffffffff, 0x100,
467 0x9490, 0xffffffff, 0x5,
468 0x949c, 0xffffffff, 0x100,
469 0x9490, 0xffffffff, 0x6,
470 0x949c, 0xffffffff, 0x100,
471 0x9490, 0xffffffff, 0x7,
472 0x949c, 0xffffffff, 0x100,
473 0x9490, 0xffffffff, 0x8000,
474 0x9604, 0xffffffff, 0x0,
475 0x9654, 0xffffffff, 0x100,
476 0x9604, 0xffffffff, 0x1,
477 0x9654, 0xffffffff, 0x100,
478 0x9604, 0xffffffff, 0x2,
479 0x9654, 0xffffffff, 0x100,
480 0x9604, 0xffffffff, 0x3,
481 0x9654, 0xffffffff, 0x100,
482 0x9604, 0xffffffff, 0x4,
483 0x9654, 0xffffffff, 0x100,
484 0x9604, 0xffffffff, 0x5,
485 0x9654, 0xffffffff, 0x100,
486 0x9604, 0xffffffff, 0x6,
487 0x9654, 0xffffffff, 0x100,
488 0x9604, 0xffffffff, 0x7,
489 0x9654, 0xffffffff, 0x100,
490 0x9604, 0xffffffff, 0x80000000,
491 0x9030, 0xffffffff, 0x100,
492 0x9034, 0xffffffff, 0x100,
493 0x9038, 0xffffffff, 0x100,
494 0x903c, 0xffffffff, 0x100,
495 0x9040, 0xffffffff, 0x100,
496 0xa200, 0xffffffff, 0x100,
497 0xa204, 0xffffffff, 0x100,
498 0xa208, 0xffffffff, 0x100,
499 0xa20c, 0xffffffff, 0x100,
500 0x971c, 0xffffffff, 0x100,
501 0x915c, 0xffffffff, 0x00020001,
502 0x916c, 0xffffffff, 0x00040003,
503 0x9170, 0xffffffff, 0x00000005,
504 0x9178, 0xffffffff, 0x00050001,
505 0x917c, 0xffffffff, 0x00030002,
506 0x918c, 0xffffffff, 0x00000004,
507 0x9190, 0xffffffff, 0x00070006,
508 0x9194, 0xffffffff, 0x00050001,
509 0x9198, 0xffffffff, 0x00030002,
510 0x91a8, 0xffffffff, 0x00000004,
511 0x91ac, 0xffffffff, 0x00070006,
512 0x91b0, 0xffffffff, 0x00050001,
513 0x91b4, 0xffffffff, 0x00030002,
514 0x91c4, 0xffffffff, 0x00000004,
515 0x91c8, 0xffffffff, 0x00070006,
516 0x91cc, 0xffffffff, 0x00050001,
517 0x91d0, 0xffffffff, 0x00030002,
518 0x91e0, 0xffffffff, 0x00000004,
519 0x91e4, 0xffffffff, 0x00070006,
520 0x91e8, 0xffffffff, 0x00000001,
521 0x91ec, 0xffffffff, 0x00050001,
522 0x91f0, 0xffffffff, 0x00030002,
523 0x9200, 0xffffffff, 0x00000004,
524 0x9204, 0xffffffff, 0x00070006,
525 0x9208, 0xffffffff, 0x00050001,
526 0x920c, 0xffffffff, 0x00030002,
527 0x921c, 0xffffffff, 0x00000004,
528 0x9220, 0xffffffff, 0x00070006,
529 0x9224, 0xffffffff, 0x00050001,
530 0x9228, 0xffffffff, 0x00030002,
531 0x9238, 0xffffffff, 0x00000004,
532 0x923c, 0xffffffff, 0x00070006,
533 0x9240, 0xffffffff, 0x00050001,
534 0x9244, 0xffffffff, 0x00030002,
535 0x9254, 0xffffffff, 0x00000004,
536 0x9258, 0xffffffff, 0x00070006,
537 0x9294, 0xffffffff, 0x00000001,
538 0x929c, 0xffffffff, 0x00000002,
539 0x92a0, 0xffffffff, 0x00040003,
540 0x92a4, 0xffffffff, 0x00000005
541};
542
543static const u32 rv740_golden_registers[] =
544{
545 0x88c4, 0xffffffff, 0x00000082,
546 0x28a50, 0xfffffffc, 0x00000004,
547 0x2650, 0x00040000, 0,
548 0x20bc, 0x00040000, 0,
549 0x733c, 0xffffffff, 0x00000002,
550 0x7300, 0xffffffff, 0x001000f0,
551 0x3f90, 0x00ff0000, 0,
552 0x9148, 0x00ff0000, 0,
553 0x3f94, 0x00ff0000, 0,
554 0x914c, 0x00ff0000, 0,
555 0x240c, 0xffffffff, 0x00000380,
556 0x8a14, 0x00000007, 0x00000007,
557 0x8b24, 0xffffffff, 0x00ff0fff,
558 0x28a4c, 0xffffffff, 0x00004000,
559 0xa180, 0xffffffff, 0x00003f3f,
560 0x8d00, 0xffffffff, 0x0e0e003a,
561 0x8d04, 0xffffffff, 0x013a0e2a,
562 0x8c00, 0xffffffff, 0xe400000f,
563 0x8db0, 0xffffffff, 0x98989898,
564 0x8db4, 0xffffffff, 0x98989898,
565 0x8db8, 0xffffffff, 0x98989898,
566 0x8dbc, 0xffffffff, 0x98989898,
567 0x8dc0, 0xffffffff, 0x98989898,
568 0x8dc4, 0xffffffff, 0x98989898,
569 0x8dc8, 0xffffffff, 0x98989898,
570 0x8dcc, 0xffffffff, 0x98989898,
571 0x9058, 0xffffffff, 0x0fffc40f,
572 0x900c, 0xffffffff, 0x003b033f,
573 0x28350, 0xffffffff, 0,
574 0x8cf0, 0x1fffffff, 0x08e00420,
575 0x9508, 0xffffffff, 0x00000002,
576 0x88c4, 0xffffffff, 0x000000c2,
577 0x9698, 0x18000000, 0x18000000
578};
579
580static const u32 rv740_mgcg_init[] =
581{
582 0x8bcc, 0xffffffff, 0x13030100,
583 0x5448, 0xffffffff, 0x100,
584 0x55e4, 0xffffffff, 0x100,
585 0x160c, 0xffffffff, 0x100,
586 0x5644, 0xffffffff, 0x100,
587 0xc164, 0xffffffff, 0x100,
588 0x8a18, 0xffffffff, 0x100,
589 0x897c, 0xffffffff, 0x100,
590 0x8b28, 0xffffffff, 0x100,
591 0x9144, 0xffffffff, 0x100,
592 0x9a1c, 0xffffffff, 0x10000,
593 0x9a50, 0xffffffff, 0x100,
594 0x9a1c, 0xffffffff, 0x10001,
595 0x9a50, 0xffffffff, 0x100,
596 0x9a1c, 0xffffffff, 0x10002,
597 0x9a50, 0xffffffff, 0x100,
598 0x9a1c, 0xffffffff, 0x10003,
599 0x9a50, 0xffffffff, 0x100,
600 0x9a1c, 0xffffffff, 0x0,
601 0x9870, 0xffffffff, 0x100,
602 0x8d58, 0xffffffff, 0x100,
603 0x9500, 0xffffffff, 0x0,
604 0x9510, 0xffffffff, 0x100,
605 0x9500, 0xffffffff, 0x1,
606 0x9510, 0xffffffff, 0x100,
607 0x9500, 0xffffffff, 0x2,
608 0x9510, 0xffffffff, 0x100,
609 0x9500, 0xffffffff, 0x3,
610 0x9510, 0xffffffff, 0x100,
611 0x9500, 0xffffffff, 0x4,
612 0x9510, 0xffffffff, 0x100,
613 0x9500, 0xffffffff, 0x5,
614 0x9510, 0xffffffff, 0x100,
615 0x9500, 0xffffffff, 0x6,
616 0x9510, 0xffffffff, 0x100,
617 0x9500, 0xffffffff, 0x7,
618 0x9510, 0xffffffff, 0x100,
619 0x9500, 0xffffffff, 0x8000,
620 0x9490, 0xffffffff, 0x0,
621 0x949c, 0xffffffff, 0x100,
622 0x9490, 0xffffffff, 0x1,
623 0x949c, 0xffffffff, 0x100,
624 0x9490, 0xffffffff, 0x2,
625 0x949c, 0xffffffff, 0x100,
626 0x9490, 0xffffffff, 0x3,
627 0x949c, 0xffffffff, 0x100,
628 0x9490, 0xffffffff, 0x4,
629 0x949c, 0xffffffff, 0x100,
630 0x9490, 0xffffffff, 0x5,
631 0x949c, 0xffffffff, 0x100,
632 0x9490, 0xffffffff, 0x6,
633 0x949c, 0xffffffff, 0x100,
634 0x9490, 0xffffffff, 0x7,
635 0x949c, 0xffffffff, 0x100,
636 0x9490, 0xffffffff, 0x8000,
637 0x9604, 0xffffffff, 0x0,
638 0x9654, 0xffffffff, 0x100,
639 0x9604, 0xffffffff, 0x1,
640 0x9654, 0xffffffff, 0x100,
641 0x9604, 0xffffffff, 0x2,
642 0x9654, 0xffffffff, 0x100,
643 0x9604, 0xffffffff, 0x3,
644 0x9654, 0xffffffff, 0x100,
645 0x9604, 0xffffffff, 0x4,
646 0x9654, 0xffffffff, 0x100,
647 0x9604, 0xffffffff, 0x5,
648 0x9654, 0xffffffff, 0x100,
649 0x9604, 0xffffffff, 0x6,
650 0x9654, 0xffffffff, 0x100,
651 0x9604, 0xffffffff, 0x7,
652 0x9654, 0xffffffff, 0x100,
653 0x9604, 0xffffffff, 0x80000000,
654 0x9030, 0xffffffff, 0x100,
655 0x9034, 0xffffffff, 0x100,
656 0x9038, 0xffffffff, 0x100,
657 0x903c, 0xffffffff, 0x100,
658 0x9040, 0xffffffff, 0x100,
659 0xa200, 0xffffffff, 0x100,
660 0xa204, 0xffffffff, 0x100,
661 0xa208, 0xffffffff, 0x100,
662 0xa20c, 0xffffffff, 0x100,
663 0x971c, 0xffffffff, 0x100,
664 0x915c, 0xffffffff, 0x00020001,
665 0x9160, 0xffffffff, 0x00040003,
666 0x916c, 0xffffffff, 0x00060005,
667 0x9170, 0xffffffff, 0x00080007,
668 0x9174, 0xffffffff, 0x000a0009,
669 0x9178, 0xffffffff, 0x000c000b,
670 0x917c, 0xffffffff, 0x000e000d,
671 0x9180, 0xffffffff, 0x0010000f,
672 0x918c, 0xffffffff, 0x00120011,
673 0x9190, 0xffffffff, 0x00140013,
674 0x9194, 0xffffffff, 0x00020001,
675 0x9198, 0xffffffff, 0x00040003,
676 0x919c, 0xffffffff, 0x00060005,
677 0x91a8, 0xffffffff, 0x00080007,
678 0x91ac, 0xffffffff, 0x000a0009,
679 0x91b0, 0xffffffff, 0x000c000b,
680 0x91b4, 0xffffffff, 0x000e000d,
681 0x91b8, 0xffffffff, 0x0010000f,
682 0x91c4, 0xffffffff, 0x00120011,
683 0x91c8, 0xffffffff, 0x00140013,
684 0x91cc, 0xffffffff, 0x00020001,
685 0x91d0, 0xffffffff, 0x00040003,
686 0x91d4, 0xffffffff, 0x00060005,
687 0x91e0, 0xffffffff, 0x00080007,
688 0x91e4, 0xffffffff, 0x000a0009,
689 0x91e8, 0xffffffff, 0x000c000b,
690 0x91ec, 0xffffffff, 0x00020001,
691 0x91f0, 0xffffffff, 0x00040003,
692 0x91f4, 0xffffffff, 0x00060005,
693 0x9200, 0xffffffff, 0x00080007,
694 0x9204, 0xffffffff, 0x000a0009,
695 0x9208, 0xffffffff, 0x000c000b,
696 0x920c, 0xffffffff, 0x000e000d,
697 0x9210, 0xffffffff, 0x0010000f,
698 0x921c, 0xffffffff, 0x00120011,
699 0x9220, 0xffffffff, 0x00140013,
700 0x9224, 0xffffffff, 0x00020001,
701 0x9228, 0xffffffff, 0x00040003,
702 0x922c, 0xffffffff, 0x00060005,
703 0x9238, 0xffffffff, 0x00080007,
704 0x923c, 0xffffffff, 0x000a0009,
705 0x9240, 0xffffffff, 0x000c000b,
706 0x9244, 0xffffffff, 0x000e000d,
707 0x9248, 0xffffffff, 0x0010000f,
708 0x9254, 0xffffffff, 0x00120011,
709 0x9258, 0xffffffff, 0x00140013,
710 0x9294, 0xffffffff, 0x00020001,
711 0x929c, 0xffffffff, 0x00040003,
712 0x92a0, 0xffffffff, 0x00060005,
713 0x92a4, 0xffffffff, 0x00080007
714};
715
716static void rv770_init_golden_registers(struct radeon_device *rdev)
717{
718 switch (rdev->family) {
719 case CHIP_RV770:
720 radeon_program_register_sequence(rdev,
721 r7xx_golden_registers,
722 (const u32)ARRAY_SIZE(r7xx_golden_registers));
723 radeon_program_register_sequence(rdev,
724 r7xx_golden_dyn_gpr_registers,
725 (const u32)ARRAY_SIZE(r7xx_golden_dyn_gpr_registers));
726 if (rdev->pdev->device == 0x994e)
727 radeon_program_register_sequence(rdev,
728 rv770ce_golden_registers,
729 (const u32)ARRAY_SIZE(rv770ce_golden_registers));
730 else
731 radeon_program_register_sequence(rdev,
732 rv770_golden_registers,
733 (const u32)ARRAY_SIZE(rv770_golden_registers));
734 radeon_program_register_sequence(rdev,
735 rv770_mgcg_init,
736 (const u32)ARRAY_SIZE(rv770_mgcg_init));
737 break;
738 case CHIP_RV730:
739 radeon_program_register_sequence(rdev,
740 r7xx_golden_registers,
741 (const u32)ARRAY_SIZE(r7xx_golden_registers));
742 radeon_program_register_sequence(rdev,
743 r7xx_golden_dyn_gpr_registers,
744 (const u32)ARRAY_SIZE(r7xx_golden_dyn_gpr_registers));
745 radeon_program_register_sequence(rdev,
746 rv730_golden_registers,
747 (const u32)ARRAY_SIZE(rv770_golden_registers));
748 radeon_program_register_sequence(rdev,
749 rv730_mgcg_init,
750 (const u32)ARRAY_SIZE(rv770_mgcg_init));
751 break;
752 case CHIP_RV710:
753 radeon_program_register_sequence(rdev,
754 r7xx_golden_registers,
755 (const u32)ARRAY_SIZE(r7xx_golden_registers));
756 radeon_program_register_sequence(rdev,
757 r7xx_golden_dyn_gpr_registers,
758 (const u32)ARRAY_SIZE(r7xx_golden_dyn_gpr_registers));
759 radeon_program_register_sequence(rdev,
760 rv710_golden_registers,
761 (const u32)ARRAY_SIZE(rv770_golden_registers));
762 radeon_program_register_sequence(rdev,
763 rv710_mgcg_init,
764 (const u32)ARRAY_SIZE(rv770_mgcg_init));
765 break;
766 case CHIP_RV740:
767 radeon_program_register_sequence(rdev,
768 rv740_golden_registers,
769 (const u32)ARRAY_SIZE(rv770_golden_registers));
770 radeon_program_register_sequence(rdev,
771 rv740_mgcg_init,
772 (const u32)ARRAY_SIZE(rv770_mgcg_init));
773 break;
774 default:
775 break;
776 }
777}
45 778
46#define PCIE_BUS_CLK 10000 779#define PCIE_BUS_CLK 10000
47#define TCLK (PCIE_BUS_CLK / 10) 780#define TCLK (PCIE_BUS_CLK / 10)
@@ -68,6 +801,105 @@ u32 rv770_get_xclk(struct radeon_device *rdev)
68 return reference_clock; 801 return reference_clock;
69} 802}
70 803
804int rv770_uvd_resume(struct radeon_device *rdev)
805{
806 uint64_t addr;
807 uint32_t chip_id, size;
808 int r;
809
810 r = radeon_uvd_resume(rdev);
811 if (r)
812 return r;
813
814 /* programm the VCPU memory controller bits 0-27 */
815 addr = rdev->uvd.gpu_addr >> 3;
816 size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 4) >> 3;
817 WREG32(UVD_VCPU_CACHE_OFFSET0, addr);
818 WREG32(UVD_VCPU_CACHE_SIZE0, size);
819
820 addr += size;
821 size = RADEON_UVD_STACK_SIZE >> 3;
822 WREG32(UVD_VCPU_CACHE_OFFSET1, addr);
823 WREG32(UVD_VCPU_CACHE_SIZE1, size);
824
825 addr += size;
826 size = RADEON_UVD_HEAP_SIZE >> 3;
827 WREG32(UVD_VCPU_CACHE_OFFSET2, addr);
828 WREG32(UVD_VCPU_CACHE_SIZE2, size);
829
830 /* bits 28-31 */
831 addr = (rdev->uvd.gpu_addr >> 28) & 0xF;
832 WREG32(UVD_LMI_ADDR_EXT, (addr << 12) | (addr << 0));
833
834 /* bits 32-39 */
835 addr = (rdev->uvd.gpu_addr >> 32) & 0xFF;
836 WREG32(UVD_LMI_EXT40_ADDR, addr | (0x9 << 16) | (0x1 << 31));
837
838 /* tell firmware which hardware it is running on */
839 switch (rdev->family) {
840 default:
841 return -EINVAL;
842 case CHIP_RV710:
843 chip_id = 0x01000005;
844 break;
845 case CHIP_RV730:
846 chip_id = 0x01000006;
847 break;
848 case CHIP_RV740:
849 chip_id = 0x01000007;
850 break;
851 case CHIP_CYPRESS:
852 case CHIP_HEMLOCK:
853 chip_id = 0x01000008;
854 break;
855 case CHIP_JUNIPER:
856 chip_id = 0x01000009;
857 break;
858 case CHIP_REDWOOD:
859 chip_id = 0x0100000a;
860 break;
861 case CHIP_CEDAR:
862 chip_id = 0x0100000b;
863 break;
864 case CHIP_SUMO:
865 chip_id = 0x0100000c;
866 break;
867 case CHIP_SUMO2:
868 chip_id = 0x0100000d;
869 break;
870 case CHIP_PALM:
871 chip_id = 0x0100000e;
872 break;
873 case CHIP_CAYMAN:
874 chip_id = 0x0100000f;
875 break;
876 case CHIP_BARTS:
877 chip_id = 0x01000010;
878 break;
879 case CHIP_TURKS:
880 chip_id = 0x01000011;
881 break;
882 case CHIP_CAICOS:
883 chip_id = 0x01000012;
884 break;
885 case CHIP_TAHITI:
886 chip_id = 0x01000014;
887 break;
888 case CHIP_VERDE:
889 chip_id = 0x01000015;
890 break;
891 case CHIP_PITCAIRN:
892 chip_id = 0x01000016;
893 break;
894 case CHIP_ARUBA:
895 chip_id = 0x01000017;
896 break;
897 }
898 WREG32(UVD_VCPU_CHIP_ID, chip_id);
899
900 return 0;
901}
902
71u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) 903u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
72{ 904{
73 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; 905 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
@@ -611,6 +1443,11 @@ static void rv770_gpu_init(struct radeon_device *rdev)
611 WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff)); 1443 WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff));
612 WREG32(DMA_TILING_CONFIG, (gb_tiling_config & 0xffff)); 1444 WREG32(DMA_TILING_CONFIG, (gb_tiling_config & 0xffff));
613 WREG32(DMA_TILING_CONFIG2, (gb_tiling_config & 0xffff)); 1445 WREG32(DMA_TILING_CONFIG2, (gb_tiling_config & 0xffff));
1446 if (rdev->family == CHIP_RV730) {
1447 WREG32(UVD_UDEC_DB_TILING_CONFIG, (gb_tiling_config & 0xffff));
1448 WREG32(UVD_UDEC_DBW_TILING_CONFIG, (gb_tiling_config & 0xffff));
1449 WREG32(UVD_UDEC_TILING_CONFIG, (gb_tiling_config & 0xffff));
1450 }
614 1451
615 WREG32(CGTS_SYS_TCC_DISABLE, 0); 1452 WREG32(CGTS_SYS_TCC_DISABLE, 0);
616 WREG32(CGTS_TCC_DISABLE, 0); 1453 WREG32(CGTS_TCC_DISABLE, 0);
@@ -840,7 +1677,7 @@ void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
840 } 1677 }
841 if (rdev->flags & RADEON_IS_AGP) { 1678 if (rdev->flags & RADEON_IS_AGP) {
842 size_bf = mc->gtt_start; 1679 size_bf = mc->gtt_start;
843 size_af = 0xFFFFFFFF - mc->gtt_end; 1680 size_af = mc->mc_mask - mc->gtt_end;
844 if (size_bf > size_af) { 1681 if (size_bf > size_af) {
845 if (mc->mc_vram_size > size_bf) { 1682 if (mc->mc_vram_size > size_bf) {
846 dev_warn(rdev->dev, "limiting VRAM\n"); 1683 dev_warn(rdev->dev, "limiting VRAM\n");
@@ -1040,6 +1877,17 @@ static int rv770_startup(struct radeon_device *rdev)
1040 return r; 1877 return r;
1041 } 1878 }
1042 1879
1880 r = rv770_uvd_resume(rdev);
1881 if (!r) {
1882 r = radeon_fence_driver_start_ring(rdev,
1883 R600_RING_TYPE_UVD_INDEX);
1884 if (r)
1885 dev_err(rdev->dev, "UVD fences init error (%d).\n", r);
1886 }
1887
1888 if (r)
1889 rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
1890
1043 /* Enable IRQ */ 1891 /* Enable IRQ */
1044 r = r600_irq_init(rdev); 1892 r = r600_irq_init(rdev);
1045 if (r) { 1893 if (r) {
@@ -1074,6 +1922,19 @@ static int rv770_startup(struct radeon_device *rdev)
1074 if (r) 1922 if (r)
1075 return r; 1923 return r;
1076 1924
1925 ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
1926 if (ring->ring_size) {
1927 r = radeon_ring_init(rdev, ring, ring->ring_size,
1928 R600_WB_UVD_RPTR_OFFSET,
1929 UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR,
1930 0, 0xfffff, RADEON_CP_PACKET2);
1931 if (!r)
1932 r = r600_uvd_init(rdev);
1933
1934 if (r)
1935 DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
1936 }
1937
1077 r = radeon_ib_pool_init(rdev); 1938 r = radeon_ib_pool_init(rdev);
1078 if (r) { 1939 if (r) {
1079 dev_err(rdev->dev, "IB initialization failed (%d).\n", r); 1940 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
@@ -1100,6 +1961,9 @@ int rv770_resume(struct radeon_device *rdev)
1100 /* post card */ 1961 /* post card */
1101 atom_asic_init(rdev->mode_info.atom_context); 1962 atom_asic_init(rdev->mode_info.atom_context);
1102 1963
1964 /* init golden registers */
1965 rv770_init_golden_registers(rdev);
1966
1103 rdev->accel_working = true; 1967 rdev->accel_working = true;
1104 r = rv770_startup(rdev); 1968 r = rv770_startup(rdev);
1105 if (r) { 1969 if (r) {
@@ -1115,6 +1979,7 @@ int rv770_resume(struct radeon_device *rdev)
1115int rv770_suspend(struct radeon_device *rdev) 1979int rv770_suspend(struct radeon_device *rdev)
1116{ 1980{
1117 r600_audio_fini(rdev); 1981 r600_audio_fini(rdev);
1982 radeon_uvd_suspend(rdev);
1118 r700_cp_stop(rdev); 1983 r700_cp_stop(rdev);
1119 r600_dma_stop(rdev); 1984 r600_dma_stop(rdev);
1120 r600_irq_suspend(rdev); 1985 r600_irq_suspend(rdev);
@@ -1156,6 +2021,8 @@ int rv770_init(struct radeon_device *rdev)
1156 DRM_INFO("GPU not posted. posting now...\n"); 2021 DRM_INFO("GPU not posted. posting now...\n");
1157 atom_asic_init(rdev->mode_info.atom_context); 2022 atom_asic_init(rdev->mode_info.atom_context);
1158 } 2023 }
2024 /* init golden registers */
2025 rv770_init_golden_registers(rdev);
1159 /* Initialize scratch registers */ 2026 /* Initialize scratch registers */
1160 r600_scratch_init(rdev); 2027 r600_scratch_init(rdev);
1161 /* Initialize surface registers */ 2028 /* Initialize surface registers */
@@ -1190,6 +2057,13 @@ int rv770_init(struct radeon_device *rdev)
1190 rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL; 2057 rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL;
1191 r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024); 2058 r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024);
1192 2059
2060 r = radeon_uvd_init(rdev);
2061 if (!r) {
2062 rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
2063 r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX],
2064 4096);
2065 }
2066
1193 rdev->ih.ring_obj = NULL; 2067 rdev->ih.ring_obj = NULL;
1194 r600_ih_ring_init(rdev, 64 * 1024); 2068 r600_ih_ring_init(rdev, 64 * 1024);
1195 2069
@@ -1224,6 +2098,7 @@ void rv770_fini(struct radeon_device *rdev)
1224 radeon_ib_pool_fini(rdev); 2098 radeon_ib_pool_fini(rdev);
1225 radeon_irq_kms_fini(rdev); 2099 radeon_irq_kms_fini(rdev);
1226 rv770_pcie_gart_fini(rdev); 2100 rv770_pcie_gart_fini(rdev);
2101 radeon_uvd_fini(rdev);
1227 r600_vram_scratch_fini(rdev); 2102 r600_vram_scratch_fini(rdev);
1228 radeon_gem_fini(rdev); 2103 radeon_gem_fini(rdev);
1229 radeon_fence_driver_fini(rdev); 2104 radeon_fence_driver_fini(rdev);
@@ -1264,23 +2139,23 @@ static void rv770_pcie_gen2_enable(struct radeon_device *rdev)
1264 DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n"); 2139 DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
1265 2140
1266 /* advertise upconfig capability */ 2141 /* advertise upconfig capability */
1267 link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL); 2142 link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
1268 link_width_cntl &= ~LC_UPCONFIGURE_DIS; 2143 link_width_cntl &= ~LC_UPCONFIGURE_DIS;
1269 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); 2144 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
1270 link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL); 2145 link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
1271 if (link_width_cntl & LC_RENEGOTIATION_SUPPORT) { 2146 if (link_width_cntl & LC_RENEGOTIATION_SUPPORT) {
1272 lanes = (link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT; 2147 lanes = (link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT;
1273 link_width_cntl &= ~(LC_LINK_WIDTH_MASK | 2148 link_width_cntl &= ~(LC_LINK_WIDTH_MASK |
1274 LC_RECONFIG_ARC_MISSING_ESCAPE); 2149 LC_RECONFIG_ARC_MISSING_ESCAPE);
1275 link_width_cntl |= lanes | LC_RECONFIG_NOW | 2150 link_width_cntl |= lanes | LC_RECONFIG_NOW |
1276 LC_RENEGOTIATE_EN | LC_UPCONFIGURE_SUPPORT; 2151 LC_RENEGOTIATE_EN | LC_UPCONFIGURE_SUPPORT;
1277 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); 2152 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
1278 } else { 2153 } else {
1279 link_width_cntl |= LC_UPCONFIGURE_DIS; 2154 link_width_cntl |= LC_UPCONFIGURE_DIS;
1280 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); 2155 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
1281 } 2156 }
1282 2157
1283 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); 2158 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
1284 if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) && 2159 if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
1285 (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) { 2160 (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
1286 2161
@@ -1293,29 +2168,29 @@ static void rv770_pcie_gen2_enable(struct radeon_device *rdev)
1293 WREG16(0x4088, link_cntl2); 2168 WREG16(0x4088, link_cntl2);
1294 WREG32(MM_CFGREGS_CNTL, 0); 2169 WREG32(MM_CFGREGS_CNTL, 0);
1295 2170
1296 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); 2171 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
1297 speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN; 2172 speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
1298 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl); 2173 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
1299 2174
1300 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); 2175 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
1301 speed_cntl |= LC_CLR_FAILED_SPD_CHANGE_CNT; 2176 speed_cntl |= LC_CLR_FAILED_SPD_CHANGE_CNT;
1302 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl); 2177 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
1303 2178
1304 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); 2179 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
1305 speed_cntl &= ~LC_CLR_FAILED_SPD_CHANGE_CNT; 2180 speed_cntl &= ~LC_CLR_FAILED_SPD_CHANGE_CNT;
1306 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl); 2181 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
1307 2182
1308 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); 2183 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
1309 speed_cntl |= LC_GEN2_EN_STRAP; 2184 speed_cntl |= LC_GEN2_EN_STRAP;
1310 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl); 2185 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
1311 2186
1312 } else { 2187 } else {
1313 link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL); 2188 link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
1314 /* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */ 2189 /* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
1315 if (1) 2190 if (1)
1316 link_width_cntl |= LC_UPCONFIGURE_DIS; 2191 link_width_cntl |= LC_UPCONFIGURE_DIS;
1317 else 2192 else
1318 link_width_cntl &= ~LC_UPCONFIGURE_DIS; 2193 link_width_cntl &= ~LC_UPCONFIGURE_DIS;
1319 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); 2194 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
1320 } 2195 }
1321} 2196}
diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h
index c55f950a4af7..85b16266f748 100644
--- a/drivers/gpu/drm/radeon/rv770d.h
+++ b/drivers/gpu/drm/radeon/rv770d.h
@@ -38,6 +38,30 @@
38#define R7XX_MAX_PIPES 8 38#define R7XX_MAX_PIPES 8
39#define R7XX_MAX_PIPES_MASK 0xff 39#define R7XX_MAX_PIPES_MASK 0xff
40 40
41/* discrete uvd clocks */
42#define CG_UPLL_FUNC_CNTL 0x718
43# define UPLL_RESET_MASK 0x00000001
44# define UPLL_SLEEP_MASK 0x00000002
45# define UPLL_BYPASS_EN_MASK 0x00000004
46# define UPLL_CTLREQ_MASK 0x00000008
47# define UPLL_REF_DIV(x) ((x) << 16)
48# define UPLL_REF_DIV_MASK 0x003F0000
49# define UPLL_CTLACK_MASK 0x40000000
50# define UPLL_CTLACK2_MASK 0x80000000
51#define CG_UPLL_FUNC_CNTL_2 0x71c
52# define UPLL_SW_HILEN(x) ((x) << 0)
53# define UPLL_SW_LOLEN(x) ((x) << 4)
54# define UPLL_SW_HILEN2(x) ((x) << 8)
55# define UPLL_SW_LOLEN2(x) ((x) << 12)
56# define UPLL_SW_MASK 0x0000FFFF
57# define VCLK_SRC_SEL(x) ((x) << 20)
58# define VCLK_SRC_SEL_MASK 0x01F00000
59# define DCLK_SRC_SEL(x) ((x) << 25)
60# define DCLK_SRC_SEL_MASK 0x3E000000
61#define CG_UPLL_FUNC_CNTL_3 0x720
62# define UPLL_FB_DIV(x) ((x) << 0)
63# define UPLL_FB_DIV_MASK 0x01FFFFFF
64
41/* Registers */ 65/* Registers */
42#define CB_COLOR0_BASE 0x28040 66#define CB_COLOR0_BASE 0x28040
43#define CB_COLOR1_BASE 0x28044 67#define CB_COLOR1_BASE 0x28044
@@ -112,6 +136,11 @@
112#define DMA_TILING_CONFIG 0x3ec8 136#define DMA_TILING_CONFIG 0x3ec8
113#define DMA_TILING_CONFIG2 0xd0b8 137#define DMA_TILING_CONFIG2 0xd0b8
114 138
139/* RV730 only */
140#define UVD_UDEC_TILING_CONFIG 0xef40
141#define UVD_UDEC_DB_TILING_CONFIG 0xef44
142#define UVD_UDEC_DBW_TILING_CONFIG 0xef48
143
115#define GC_USER_SHADER_PIPE_CONFIG 0x8954 144#define GC_USER_SHADER_PIPE_CONFIG 0x8954
116#define INACTIVE_QD_PIPES(x) ((x) << 8) 145#define INACTIVE_QD_PIPES(x) ((x) << 8)
117#define INACTIVE_QD_PIPES_MASK 0x0000FF00 146#define INACTIVE_QD_PIPES_MASK 0x0000FF00
@@ -671,4 +700,18 @@
671# define TARGET_LINK_SPEED_MASK (0xf << 0) 700# define TARGET_LINK_SPEED_MASK (0xf << 0)
672# define SELECTABLE_DEEMPHASIS (1 << 6) 701# define SELECTABLE_DEEMPHASIS (1 << 6)
673 702
703/* UVD */
704#define UVD_LMI_EXT40_ADDR 0xf498
705#define UVD_VCPU_CHIP_ID 0xf4d4
706#define UVD_VCPU_CACHE_OFFSET0 0xf4d8
707#define UVD_VCPU_CACHE_SIZE0 0xf4dc
708#define UVD_VCPU_CACHE_OFFSET1 0xf4e0
709#define UVD_VCPU_CACHE_SIZE1 0xf4e4
710#define UVD_VCPU_CACHE_OFFSET2 0xf4e8
711#define UVD_VCPU_CACHE_SIZE2 0xf4ec
712#define UVD_LMI_ADDR_EXT 0xf594
713
714#define UVD_RBC_RB_RPTR 0xf690
715#define UVD_RBC_RB_WPTR 0xf694
716
674#endif 717#endif
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index bafbe3216952..f0b6c2f87c4d 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -70,6 +70,794 @@ extern u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev);
70extern void evergreen_print_gpu_status_regs(struct radeon_device *rdev); 70extern void evergreen_print_gpu_status_regs(struct radeon_device *rdev);
71extern bool evergreen_is_display_hung(struct radeon_device *rdev); 71extern bool evergreen_is_display_hung(struct radeon_device *rdev);
72 72
73static const u32 tahiti_golden_rlc_registers[] =
74{
75 0xc424, 0xffffffff, 0x00601005,
76 0xc47c, 0xffffffff, 0x10104040,
77 0xc488, 0xffffffff, 0x0100000a,
78 0xc314, 0xffffffff, 0x00000800,
79 0xc30c, 0xffffffff, 0x800000f4,
80 0xf4a8, 0xffffffff, 0x00000000
81};
82
83static const u32 tahiti_golden_registers[] =
84{
85 0x9a10, 0x00010000, 0x00018208,
86 0x9830, 0xffffffff, 0x00000000,
87 0x9834, 0xf00fffff, 0x00000400,
88 0x9838, 0x0002021c, 0x00020200,
89 0xc78, 0x00000080, 0x00000000,
90 0xd030, 0x000300c0, 0x00800040,
91 0xd830, 0x000300c0, 0x00800040,
92 0x5bb0, 0x000000f0, 0x00000070,
93 0x5bc0, 0x00200000, 0x50100000,
94 0x7030, 0x31000311, 0x00000011,
95 0x277c, 0x00000003, 0x000007ff,
96 0x240c, 0x000007ff, 0x00000000,
97 0x8a14, 0xf000001f, 0x00000007,
98 0x8b24, 0xffffffff, 0x00ffffff,
99 0x8b10, 0x0000ff0f, 0x00000000,
100 0x28a4c, 0x07ffffff, 0x4e000000,
101 0x28350, 0x3f3f3fff, 0x2a00126a,
102 0x30, 0x000000ff, 0x0040,
103 0x34, 0x00000040, 0x00004040,
104 0x9100, 0x07ffffff, 0x03000000,
105 0x8e88, 0x01ff1f3f, 0x00000000,
106 0x8e84, 0x01ff1f3f, 0x00000000,
107 0x9060, 0x0000007f, 0x00000020,
108 0x9508, 0x00010000, 0x00010000,
109 0xac14, 0x00000200, 0x000002fb,
110 0xac10, 0xffffffff, 0x0000543b,
111 0xac0c, 0xffffffff, 0xa9210876,
112 0x88d0, 0xffffffff, 0x000fff40,
113 0x88d4, 0x0000001f, 0x00000010,
114 0x1410, 0x20000000, 0x20fffed8,
115 0x15c0, 0x000c0fc0, 0x000c0400
116};
117
118static const u32 tahiti_golden_registers2[] =
119{
120 0xc64, 0x00000001, 0x00000001
121};
122
123static const u32 pitcairn_golden_rlc_registers[] =
124{
125 0xc424, 0xffffffff, 0x00601004,
126 0xc47c, 0xffffffff, 0x10102020,
127 0xc488, 0xffffffff, 0x01000020,
128 0xc314, 0xffffffff, 0x00000800,
129 0xc30c, 0xffffffff, 0x800000a4
130};
131
132static const u32 pitcairn_golden_registers[] =
133{
134 0x9a10, 0x00010000, 0x00018208,
135 0x9830, 0xffffffff, 0x00000000,
136 0x9834, 0xf00fffff, 0x00000400,
137 0x9838, 0x0002021c, 0x00020200,
138 0xc78, 0x00000080, 0x00000000,
139 0xd030, 0x000300c0, 0x00800040,
140 0xd830, 0x000300c0, 0x00800040,
141 0x5bb0, 0x000000f0, 0x00000070,
142 0x5bc0, 0x00200000, 0x50100000,
143 0x7030, 0x31000311, 0x00000011,
144 0x2ae4, 0x00073ffe, 0x000022a2,
145 0x240c, 0x000007ff, 0x00000000,
146 0x8a14, 0xf000001f, 0x00000007,
147 0x8b24, 0xffffffff, 0x00ffffff,
148 0x8b10, 0x0000ff0f, 0x00000000,
149 0x28a4c, 0x07ffffff, 0x4e000000,
150 0x28350, 0x3f3f3fff, 0x2a00126a,
151 0x30, 0x000000ff, 0x0040,
152 0x34, 0x00000040, 0x00004040,
153 0x9100, 0x07ffffff, 0x03000000,
154 0x9060, 0x0000007f, 0x00000020,
155 0x9508, 0x00010000, 0x00010000,
156 0xac14, 0x000003ff, 0x000000f7,
157 0xac10, 0xffffffff, 0x00000000,
158 0xac0c, 0xffffffff, 0x32761054,
159 0x88d4, 0x0000001f, 0x00000010,
160 0x15c0, 0x000c0fc0, 0x000c0400
161};
162
163static const u32 verde_golden_rlc_registers[] =
164{
165 0xc424, 0xffffffff, 0x033f1005,
166 0xc47c, 0xffffffff, 0x10808020,
167 0xc488, 0xffffffff, 0x00800008,
168 0xc314, 0xffffffff, 0x00001000,
169 0xc30c, 0xffffffff, 0x80010014
170};
171
172static const u32 verde_golden_registers[] =
173{
174 0x9a10, 0x00010000, 0x00018208,
175 0x9830, 0xffffffff, 0x00000000,
176 0x9834, 0xf00fffff, 0x00000400,
177 0x9838, 0x0002021c, 0x00020200,
178 0xc78, 0x00000080, 0x00000000,
179 0xd030, 0x000300c0, 0x00800040,
180 0xd030, 0x000300c0, 0x00800040,
181 0xd830, 0x000300c0, 0x00800040,
182 0xd830, 0x000300c0, 0x00800040,
183 0x5bb0, 0x000000f0, 0x00000070,
184 0x5bc0, 0x00200000, 0x50100000,
185 0x7030, 0x31000311, 0x00000011,
186 0x2ae4, 0x00073ffe, 0x000022a2,
187 0x2ae4, 0x00073ffe, 0x000022a2,
188 0x2ae4, 0x00073ffe, 0x000022a2,
189 0x240c, 0x000007ff, 0x00000000,
190 0x240c, 0x000007ff, 0x00000000,
191 0x240c, 0x000007ff, 0x00000000,
192 0x8a14, 0xf000001f, 0x00000007,
193 0x8a14, 0xf000001f, 0x00000007,
194 0x8a14, 0xf000001f, 0x00000007,
195 0x8b24, 0xffffffff, 0x00ffffff,
196 0x8b10, 0x0000ff0f, 0x00000000,
197 0x28a4c, 0x07ffffff, 0x4e000000,
198 0x28350, 0x3f3f3fff, 0x0000124a,
199 0x28350, 0x3f3f3fff, 0x0000124a,
200 0x28350, 0x3f3f3fff, 0x0000124a,
201 0x30, 0x000000ff, 0x0040,
202 0x34, 0x00000040, 0x00004040,
203 0x9100, 0x07ffffff, 0x03000000,
204 0x9100, 0x07ffffff, 0x03000000,
205 0x8e88, 0x01ff1f3f, 0x00000000,
206 0x8e88, 0x01ff1f3f, 0x00000000,
207 0x8e88, 0x01ff1f3f, 0x00000000,
208 0x8e84, 0x01ff1f3f, 0x00000000,
209 0x8e84, 0x01ff1f3f, 0x00000000,
210 0x8e84, 0x01ff1f3f, 0x00000000,
211 0x9060, 0x0000007f, 0x00000020,
212 0x9508, 0x00010000, 0x00010000,
213 0xac14, 0x000003ff, 0x00000003,
214 0xac14, 0x000003ff, 0x00000003,
215 0xac14, 0x000003ff, 0x00000003,
216 0xac10, 0xffffffff, 0x00000000,
217 0xac10, 0xffffffff, 0x00000000,
218 0xac10, 0xffffffff, 0x00000000,
219 0xac0c, 0xffffffff, 0x00001032,
220 0xac0c, 0xffffffff, 0x00001032,
221 0xac0c, 0xffffffff, 0x00001032,
222 0x88d4, 0x0000001f, 0x00000010,
223 0x88d4, 0x0000001f, 0x00000010,
224 0x88d4, 0x0000001f, 0x00000010,
225 0x15c0, 0x000c0fc0, 0x000c0400
226};
227
228static const u32 oland_golden_rlc_registers[] =
229{
230 0xc424, 0xffffffff, 0x00601005,
231 0xc47c, 0xffffffff, 0x10104040,
232 0xc488, 0xffffffff, 0x0100000a,
233 0xc314, 0xffffffff, 0x00000800,
234 0xc30c, 0xffffffff, 0x800000f4
235};
236
237static const u32 oland_golden_registers[] =
238{
239 0x9a10, 0x00010000, 0x00018208,
240 0x9830, 0xffffffff, 0x00000000,
241 0x9834, 0xf00fffff, 0x00000400,
242 0x9838, 0x0002021c, 0x00020200,
243 0xc78, 0x00000080, 0x00000000,
244 0xd030, 0x000300c0, 0x00800040,
245 0xd830, 0x000300c0, 0x00800040,
246 0x5bb0, 0x000000f0, 0x00000070,
247 0x5bc0, 0x00200000, 0x50100000,
248 0x7030, 0x31000311, 0x00000011,
249 0x2ae4, 0x00073ffe, 0x000022a2,
250 0x240c, 0x000007ff, 0x00000000,
251 0x8a14, 0xf000001f, 0x00000007,
252 0x8b24, 0xffffffff, 0x00ffffff,
253 0x8b10, 0x0000ff0f, 0x00000000,
254 0x28a4c, 0x07ffffff, 0x4e000000,
255 0x28350, 0x3f3f3fff, 0x00000082,
256 0x30, 0x000000ff, 0x0040,
257 0x34, 0x00000040, 0x00004040,
258 0x9100, 0x07ffffff, 0x03000000,
259 0x9060, 0x0000007f, 0x00000020,
260 0x9508, 0x00010000, 0x00010000,
261 0xac14, 0x000003ff, 0x000000f3,
262 0xac10, 0xffffffff, 0x00000000,
263 0xac0c, 0xffffffff, 0x00003210,
264 0x88d4, 0x0000001f, 0x00000010,
265 0x15c0, 0x000c0fc0, 0x000c0400
266};
267
268static const u32 tahiti_mgcg_cgcg_init[] =
269{
270 0xc400, 0xffffffff, 0xfffffffc,
271 0x802c, 0xffffffff, 0xe0000000,
272 0x9a60, 0xffffffff, 0x00000100,
273 0x92a4, 0xffffffff, 0x00000100,
274 0xc164, 0xffffffff, 0x00000100,
275 0x9774, 0xffffffff, 0x00000100,
276 0x8984, 0xffffffff, 0x06000100,
277 0x8a18, 0xffffffff, 0x00000100,
278 0x92a0, 0xffffffff, 0x00000100,
279 0xc380, 0xffffffff, 0x00000100,
280 0x8b28, 0xffffffff, 0x00000100,
281 0x9144, 0xffffffff, 0x00000100,
282 0x8d88, 0xffffffff, 0x00000100,
283 0x8d8c, 0xffffffff, 0x00000100,
284 0x9030, 0xffffffff, 0x00000100,
285 0x9034, 0xffffffff, 0x00000100,
286 0x9038, 0xffffffff, 0x00000100,
287 0x903c, 0xffffffff, 0x00000100,
288 0xad80, 0xffffffff, 0x00000100,
289 0xac54, 0xffffffff, 0x00000100,
290 0x897c, 0xffffffff, 0x06000100,
291 0x9868, 0xffffffff, 0x00000100,
292 0x9510, 0xffffffff, 0x00000100,
293 0xaf04, 0xffffffff, 0x00000100,
294 0xae04, 0xffffffff, 0x00000100,
295 0x949c, 0xffffffff, 0x00000100,
296 0x802c, 0xffffffff, 0xe0000000,
297 0x9160, 0xffffffff, 0x00010000,
298 0x9164, 0xffffffff, 0x00030002,
299 0x9168, 0xffffffff, 0x00040007,
300 0x916c, 0xffffffff, 0x00060005,
301 0x9170, 0xffffffff, 0x00090008,
302 0x9174, 0xffffffff, 0x00020001,
303 0x9178, 0xffffffff, 0x00040003,
304 0x917c, 0xffffffff, 0x00000007,
305 0x9180, 0xffffffff, 0x00060005,
306 0x9184, 0xffffffff, 0x00090008,
307 0x9188, 0xffffffff, 0x00030002,
308 0x918c, 0xffffffff, 0x00050004,
309 0x9190, 0xffffffff, 0x00000008,
310 0x9194, 0xffffffff, 0x00070006,
311 0x9198, 0xffffffff, 0x000a0009,
312 0x919c, 0xffffffff, 0x00040003,
313 0x91a0, 0xffffffff, 0x00060005,
314 0x91a4, 0xffffffff, 0x00000009,
315 0x91a8, 0xffffffff, 0x00080007,
316 0x91ac, 0xffffffff, 0x000b000a,
317 0x91b0, 0xffffffff, 0x00050004,
318 0x91b4, 0xffffffff, 0x00070006,
319 0x91b8, 0xffffffff, 0x0008000b,
320 0x91bc, 0xffffffff, 0x000a0009,
321 0x91c0, 0xffffffff, 0x000d000c,
322 0x91c4, 0xffffffff, 0x00060005,
323 0x91c8, 0xffffffff, 0x00080007,
324 0x91cc, 0xffffffff, 0x0000000b,
325 0x91d0, 0xffffffff, 0x000a0009,
326 0x91d4, 0xffffffff, 0x000d000c,
327 0x91d8, 0xffffffff, 0x00070006,
328 0x91dc, 0xffffffff, 0x00090008,
329 0x91e0, 0xffffffff, 0x0000000c,
330 0x91e4, 0xffffffff, 0x000b000a,
331 0x91e8, 0xffffffff, 0x000e000d,
332 0x91ec, 0xffffffff, 0x00080007,
333 0x91f0, 0xffffffff, 0x000a0009,
334 0x91f4, 0xffffffff, 0x0000000d,
335 0x91f8, 0xffffffff, 0x000c000b,
336 0x91fc, 0xffffffff, 0x000f000e,
337 0x9200, 0xffffffff, 0x00090008,
338 0x9204, 0xffffffff, 0x000b000a,
339 0x9208, 0xffffffff, 0x000c000f,
340 0x920c, 0xffffffff, 0x000e000d,
341 0x9210, 0xffffffff, 0x00110010,
342 0x9214, 0xffffffff, 0x000a0009,
343 0x9218, 0xffffffff, 0x000c000b,
344 0x921c, 0xffffffff, 0x0000000f,
345 0x9220, 0xffffffff, 0x000e000d,
346 0x9224, 0xffffffff, 0x00110010,
347 0x9228, 0xffffffff, 0x000b000a,
348 0x922c, 0xffffffff, 0x000d000c,
349 0x9230, 0xffffffff, 0x00000010,
350 0x9234, 0xffffffff, 0x000f000e,
351 0x9238, 0xffffffff, 0x00120011,
352 0x923c, 0xffffffff, 0x000c000b,
353 0x9240, 0xffffffff, 0x000e000d,
354 0x9244, 0xffffffff, 0x00000011,
355 0x9248, 0xffffffff, 0x0010000f,
356 0x924c, 0xffffffff, 0x00130012,
357 0x9250, 0xffffffff, 0x000d000c,
358 0x9254, 0xffffffff, 0x000f000e,
359 0x9258, 0xffffffff, 0x00100013,
360 0x925c, 0xffffffff, 0x00120011,
361 0x9260, 0xffffffff, 0x00150014,
362 0x9264, 0xffffffff, 0x000e000d,
363 0x9268, 0xffffffff, 0x0010000f,
364 0x926c, 0xffffffff, 0x00000013,
365 0x9270, 0xffffffff, 0x00120011,
366 0x9274, 0xffffffff, 0x00150014,
367 0x9278, 0xffffffff, 0x000f000e,
368 0x927c, 0xffffffff, 0x00110010,
369 0x9280, 0xffffffff, 0x00000014,
370 0x9284, 0xffffffff, 0x00130012,
371 0x9288, 0xffffffff, 0x00160015,
372 0x928c, 0xffffffff, 0x0010000f,
373 0x9290, 0xffffffff, 0x00120011,
374 0x9294, 0xffffffff, 0x00000015,
375 0x9298, 0xffffffff, 0x00140013,
376 0x929c, 0xffffffff, 0x00170016,
377 0x9150, 0xffffffff, 0x96940200,
378 0x8708, 0xffffffff, 0x00900100,
379 0xc478, 0xffffffff, 0x00000080,
380 0xc404, 0xffffffff, 0x0020003f,
381 0x30, 0xffffffff, 0x0000001c,
382 0x34, 0x000f0000, 0x000f0000,
383 0x160c, 0xffffffff, 0x00000100,
384 0x1024, 0xffffffff, 0x00000100,
385 0x102c, 0x00000101, 0x00000000,
386 0x20a8, 0xffffffff, 0x00000104,
387 0x264c, 0x000c0000, 0x000c0000,
388 0x2648, 0x000c0000, 0x000c0000,
389 0x55e4, 0xff000fff, 0x00000100,
390 0x55e8, 0x00000001, 0x00000001,
391 0x2f50, 0x00000001, 0x00000001,
392 0x30cc, 0xc0000fff, 0x00000104,
393 0xc1e4, 0x00000001, 0x00000001,
394 0xd0c0, 0xfffffff0, 0x00000100,
395 0xd8c0, 0xfffffff0, 0x00000100
396};
397
398static const u32 pitcairn_mgcg_cgcg_init[] =
399{
400 0xc400, 0xffffffff, 0xfffffffc,
401 0x802c, 0xffffffff, 0xe0000000,
402 0x9a60, 0xffffffff, 0x00000100,
403 0x92a4, 0xffffffff, 0x00000100,
404 0xc164, 0xffffffff, 0x00000100,
405 0x9774, 0xffffffff, 0x00000100,
406 0x8984, 0xffffffff, 0x06000100,
407 0x8a18, 0xffffffff, 0x00000100,
408 0x92a0, 0xffffffff, 0x00000100,
409 0xc380, 0xffffffff, 0x00000100,
410 0x8b28, 0xffffffff, 0x00000100,
411 0x9144, 0xffffffff, 0x00000100,
412 0x8d88, 0xffffffff, 0x00000100,
413 0x8d8c, 0xffffffff, 0x00000100,
414 0x9030, 0xffffffff, 0x00000100,
415 0x9034, 0xffffffff, 0x00000100,
416 0x9038, 0xffffffff, 0x00000100,
417 0x903c, 0xffffffff, 0x00000100,
418 0xad80, 0xffffffff, 0x00000100,
419 0xac54, 0xffffffff, 0x00000100,
420 0x897c, 0xffffffff, 0x06000100,
421 0x9868, 0xffffffff, 0x00000100,
422 0x9510, 0xffffffff, 0x00000100,
423 0xaf04, 0xffffffff, 0x00000100,
424 0xae04, 0xffffffff, 0x00000100,
425 0x949c, 0xffffffff, 0x00000100,
426 0x802c, 0xffffffff, 0xe0000000,
427 0x9160, 0xffffffff, 0x00010000,
428 0x9164, 0xffffffff, 0x00030002,
429 0x9168, 0xffffffff, 0x00040007,
430 0x916c, 0xffffffff, 0x00060005,
431 0x9170, 0xffffffff, 0x00090008,
432 0x9174, 0xffffffff, 0x00020001,
433 0x9178, 0xffffffff, 0x00040003,
434 0x917c, 0xffffffff, 0x00000007,
435 0x9180, 0xffffffff, 0x00060005,
436 0x9184, 0xffffffff, 0x00090008,
437 0x9188, 0xffffffff, 0x00030002,
438 0x918c, 0xffffffff, 0x00050004,
439 0x9190, 0xffffffff, 0x00000008,
440 0x9194, 0xffffffff, 0x00070006,
441 0x9198, 0xffffffff, 0x000a0009,
442 0x919c, 0xffffffff, 0x00040003,
443 0x91a0, 0xffffffff, 0x00060005,
444 0x91a4, 0xffffffff, 0x00000009,
445 0x91a8, 0xffffffff, 0x00080007,
446 0x91ac, 0xffffffff, 0x000b000a,
447 0x91b0, 0xffffffff, 0x00050004,
448 0x91b4, 0xffffffff, 0x00070006,
449 0x91b8, 0xffffffff, 0x0008000b,
450 0x91bc, 0xffffffff, 0x000a0009,
451 0x91c0, 0xffffffff, 0x000d000c,
452 0x9200, 0xffffffff, 0x00090008,
453 0x9204, 0xffffffff, 0x000b000a,
454 0x9208, 0xffffffff, 0x000c000f,
455 0x920c, 0xffffffff, 0x000e000d,
456 0x9210, 0xffffffff, 0x00110010,
457 0x9214, 0xffffffff, 0x000a0009,
458 0x9218, 0xffffffff, 0x000c000b,
459 0x921c, 0xffffffff, 0x0000000f,
460 0x9220, 0xffffffff, 0x000e000d,
461 0x9224, 0xffffffff, 0x00110010,
462 0x9228, 0xffffffff, 0x000b000a,
463 0x922c, 0xffffffff, 0x000d000c,
464 0x9230, 0xffffffff, 0x00000010,
465 0x9234, 0xffffffff, 0x000f000e,
466 0x9238, 0xffffffff, 0x00120011,
467 0x923c, 0xffffffff, 0x000c000b,
468 0x9240, 0xffffffff, 0x000e000d,
469 0x9244, 0xffffffff, 0x00000011,
470 0x9248, 0xffffffff, 0x0010000f,
471 0x924c, 0xffffffff, 0x00130012,
472 0x9250, 0xffffffff, 0x000d000c,
473 0x9254, 0xffffffff, 0x000f000e,
474 0x9258, 0xffffffff, 0x00100013,
475 0x925c, 0xffffffff, 0x00120011,
476 0x9260, 0xffffffff, 0x00150014,
477 0x9150, 0xffffffff, 0x96940200,
478 0x8708, 0xffffffff, 0x00900100,
479 0xc478, 0xffffffff, 0x00000080,
480 0xc404, 0xffffffff, 0x0020003f,
481 0x30, 0xffffffff, 0x0000001c,
482 0x34, 0x000f0000, 0x000f0000,
483 0x160c, 0xffffffff, 0x00000100,
484 0x1024, 0xffffffff, 0x00000100,
485 0x102c, 0x00000101, 0x00000000,
486 0x20a8, 0xffffffff, 0x00000104,
487 0x55e4, 0xff000fff, 0x00000100,
488 0x55e8, 0x00000001, 0x00000001,
489 0x2f50, 0x00000001, 0x00000001,
490 0x30cc, 0xc0000fff, 0x00000104,
491 0xc1e4, 0x00000001, 0x00000001,
492 0xd0c0, 0xfffffff0, 0x00000100,
493 0xd8c0, 0xfffffff0, 0x00000100
494};
495
496static const u32 verde_mgcg_cgcg_init[] =
497{
498 0xc400, 0xffffffff, 0xfffffffc,
499 0x802c, 0xffffffff, 0xe0000000,
500 0x9a60, 0xffffffff, 0x00000100,
501 0x92a4, 0xffffffff, 0x00000100,
502 0xc164, 0xffffffff, 0x00000100,
503 0x9774, 0xffffffff, 0x00000100,
504 0x8984, 0xffffffff, 0x06000100,
505 0x8a18, 0xffffffff, 0x00000100,
506 0x92a0, 0xffffffff, 0x00000100,
507 0xc380, 0xffffffff, 0x00000100,
508 0x8b28, 0xffffffff, 0x00000100,
509 0x9144, 0xffffffff, 0x00000100,
510 0x8d88, 0xffffffff, 0x00000100,
511 0x8d8c, 0xffffffff, 0x00000100,
512 0x9030, 0xffffffff, 0x00000100,
513 0x9034, 0xffffffff, 0x00000100,
514 0x9038, 0xffffffff, 0x00000100,
515 0x903c, 0xffffffff, 0x00000100,
516 0xad80, 0xffffffff, 0x00000100,
517 0xac54, 0xffffffff, 0x00000100,
518 0x897c, 0xffffffff, 0x06000100,
519 0x9868, 0xffffffff, 0x00000100,
520 0x9510, 0xffffffff, 0x00000100,
521 0xaf04, 0xffffffff, 0x00000100,
522 0xae04, 0xffffffff, 0x00000100,
523 0x949c, 0xffffffff, 0x00000100,
524 0x802c, 0xffffffff, 0xe0000000,
525 0x9160, 0xffffffff, 0x00010000,
526 0x9164, 0xffffffff, 0x00030002,
527 0x9168, 0xffffffff, 0x00040007,
528 0x916c, 0xffffffff, 0x00060005,
529 0x9170, 0xffffffff, 0x00090008,
530 0x9174, 0xffffffff, 0x00020001,
531 0x9178, 0xffffffff, 0x00040003,
532 0x917c, 0xffffffff, 0x00000007,
533 0x9180, 0xffffffff, 0x00060005,
534 0x9184, 0xffffffff, 0x00090008,
535 0x9188, 0xffffffff, 0x00030002,
536 0x918c, 0xffffffff, 0x00050004,
537 0x9190, 0xffffffff, 0x00000008,
538 0x9194, 0xffffffff, 0x00070006,
539 0x9198, 0xffffffff, 0x000a0009,
540 0x919c, 0xffffffff, 0x00040003,
541 0x91a0, 0xffffffff, 0x00060005,
542 0x91a4, 0xffffffff, 0x00000009,
543 0x91a8, 0xffffffff, 0x00080007,
544 0x91ac, 0xffffffff, 0x000b000a,
545 0x91b0, 0xffffffff, 0x00050004,
546 0x91b4, 0xffffffff, 0x00070006,
547 0x91b8, 0xffffffff, 0x0008000b,
548 0x91bc, 0xffffffff, 0x000a0009,
549 0x91c0, 0xffffffff, 0x000d000c,
550 0x9200, 0xffffffff, 0x00090008,
551 0x9204, 0xffffffff, 0x000b000a,
552 0x9208, 0xffffffff, 0x000c000f,
553 0x920c, 0xffffffff, 0x000e000d,
554 0x9210, 0xffffffff, 0x00110010,
555 0x9214, 0xffffffff, 0x000a0009,
556 0x9218, 0xffffffff, 0x000c000b,
557 0x921c, 0xffffffff, 0x0000000f,
558 0x9220, 0xffffffff, 0x000e000d,
559 0x9224, 0xffffffff, 0x00110010,
560 0x9228, 0xffffffff, 0x000b000a,
561 0x922c, 0xffffffff, 0x000d000c,
562 0x9230, 0xffffffff, 0x00000010,
563 0x9234, 0xffffffff, 0x000f000e,
564 0x9238, 0xffffffff, 0x00120011,
565 0x923c, 0xffffffff, 0x000c000b,
566 0x9240, 0xffffffff, 0x000e000d,
567 0x9244, 0xffffffff, 0x00000011,
568 0x9248, 0xffffffff, 0x0010000f,
569 0x924c, 0xffffffff, 0x00130012,
570 0x9250, 0xffffffff, 0x000d000c,
571 0x9254, 0xffffffff, 0x000f000e,
572 0x9258, 0xffffffff, 0x00100013,
573 0x925c, 0xffffffff, 0x00120011,
574 0x9260, 0xffffffff, 0x00150014,
575 0x9150, 0xffffffff, 0x96940200,
576 0x8708, 0xffffffff, 0x00900100,
577 0xc478, 0xffffffff, 0x00000080,
578 0xc404, 0xffffffff, 0x0020003f,
579 0x30, 0xffffffff, 0x0000001c,
580 0x34, 0x000f0000, 0x000f0000,
581 0x160c, 0xffffffff, 0x00000100,
582 0x1024, 0xffffffff, 0x00000100,
583 0x102c, 0x00000101, 0x00000000,
584 0x20a8, 0xffffffff, 0x00000104,
585 0x264c, 0x000c0000, 0x000c0000,
586 0x2648, 0x000c0000, 0x000c0000,
587 0x55e4, 0xff000fff, 0x00000100,
588 0x55e8, 0x00000001, 0x00000001,
589 0x2f50, 0x00000001, 0x00000001,
590 0x30cc, 0xc0000fff, 0x00000104,
591 0xc1e4, 0x00000001, 0x00000001,
592 0xd0c0, 0xfffffff0, 0x00000100,
593 0xd8c0, 0xfffffff0, 0x00000100
594};
595
596static const u32 oland_mgcg_cgcg_init[] =
597{
598 0xc400, 0xffffffff, 0xfffffffc,
599 0x802c, 0xffffffff, 0xe0000000,
600 0x9a60, 0xffffffff, 0x00000100,
601 0x92a4, 0xffffffff, 0x00000100,
602 0xc164, 0xffffffff, 0x00000100,
603 0x9774, 0xffffffff, 0x00000100,
604 0x8984, 0xffffffff, 0x06000100,
605 0x8a18, 0xffffffff, 0x00000100,
606 0x92a0, 0xffffffff, 0x00000100,
607 0xc380, 0xffffffff, 0x00000100,
608 0x8b28, 0xffffffff, 0x00000100,
609 0x9144, 0xffffffff, 0x00000100,
610 0x8d88, 0xffffffff, 0x00000100,
611 0x8d8c, 0xffffffff, 0x00000100,
612 0x9030, 0xffffffff, 0x00000100,
613 0x9034, 0xffffffff, 0x00000100,
614 0x9038, 0xffffffff, 0x00000100,
615 0x903c, 0xffffffff, 0x00000100,
616 0xad80, 0xffffffff, 0x00000100,
617 0xac54, 0xffffffff, 0x00000100,
618 0x897c, 0xffffffff, 0x06000100,
619 0x9868, 0xffffffff, 0x00000100,
620 0x9510, 0xffffffff, 0x00000100,
621 0xaf04, 0xffffffff, 0x00000100,
622 0xae04, 0xffffffff, 0x00000100,
623 0x949c, 0xffffffff, 0x00000100,
624 0x802c, 0xffffffff, 0xe0000000,
625 0x9160, 0xffffffff, 0x00010000,
626 0x9164, 0xffffffff, 0x00030002,
627 0x9168, 0xffffffff, 0x00040007,
628 0x916c, 0xffffffff, 0x00060005,
629 0x9170, 0xffffffff, 0x00090008,
630 0x9174, 0xffffffff, 0x00020001,
631 0x9178, 0xffffffff, 0x00040003,
632 0x917c, 0xffffffff, 0x00000007,
633 0x9180, 0xffffffff, 0x00060005,
634 0x9184, 0xffffffff, 0x00090008,
635 0x9188, 0xffffffff, 0x00030002,
636 0x918c, 0xffffffff, 0x00050004,
637 0x9190, 0xffffffff, 0x00000008,
638 0x9194, 0xffffffff, 0x00070006,
639 0x9198, 0xffffffff, 0x000a0009,
640 0x919c, 0xffffffff, 0x00040003,
641 0x91a0, 0xffffffff, 0x00060005,
642 0x91a4, 0xffffffff, 0x00000009,
643 0x91a8, 0xffffffff, 0x00080007,
644 0x91ac, 0xffffffff, 0x000b000a,
645 0x91b0, 0xffffffff, 0x00050004,
646 0x91b4, 0xffffffff, 0x00070006,
647 0x91b8, 0xffffffff, 0x0008000b,
648 0x91bc, 0xffffffff, 0x000a0009,
649 0x91c0, 0xffffffff, 0x000d000c,
650 0x91c4, 0xffffffff, 0x00060005,
651 0x91c8, 0xffffffff, 0x00080007,
652 0x91cc, 0xffffffff, 0x0000000b,
653 0x91d0, 0xffffffff, 0x000a0009,
654 0x91d4, 0xffffffff, 0x000d000c,
655 0x9150, 0xffffffff, 0x96940200,
656 0x8708, 0xffffffff, 0x00900100,
657 0xc478, 0xffffffff, 0x00000080,
658 0xc404, 0xffffffff, 0x0020003f,
659 0x30, 0xffffffff, 0x0000001c,
660 0x34, 0x000f0000, 0x000f0000,
661 0x160c, 0xffffffff, 0x00000100,
662 0x1024, 0xffffffff, 0x00000100,
663 0x102c, 0x00000101, 0x00000000,
664 0x20a8, 0xffffffff, 0x00000104,
665 0x264c, 0x000c0000, 0x000c0000,
666 0x2648, 0x000c0000, 0x000c0000,
667 0x55e4, 0xff000fff, 0x00000100,
668 0x55e8, 0x00000001, 0x00000001,
669 0x2f50, 0x00000001, 0x00000001,
670 0x30cc, 0xc0000fff, 0x00000104,
671 0xc1e4, 0x00000001, 0x00000001,
672 0xd0c0, 0xfffffff0, 0x00000100,
673 0xd8c0, 0xfffffff0, 0x00000100
674};
675
676static u32 verde_pg_init[] =
677{
678 0x353c, 0xffffffff, 0x40000,
679 0x3538, 0xffffffff, 0x200010ff,
680 0x353c, 0xffffffff, 0x0,
681 0x353c, 0xffffffff, 0x0,
682 0x353c, 0xffffffff, 0x0,
683 0x353c, 0xffffffff, 0x0,
684 0x353c, 0xffffffff, 0x0,
685 0x353c, 0xffffffff, 0x7007,
686 0x3538, 0xffffffff, 0x300010ff,
687 0x353c, 0xffffffff, 0x0,
688 0x353c, 0xffffffff, 0x0,
689 0x353c, 0xffffffff, 0x0,
690 0x353c, 0xffffffff, 0x0,
691 0x353c, 0xffffffff, 0x0,
692 0x353c, 0xffffffff, 0x400000,
693 0x3538, 0xffffffff, 0x100010ff,
694 0x353c, 0xffffffff, 0x0,
695 0x353c, 0xffffffff, 0x0,
696 0x353c, 0xffffffff, 0x0,
697 0x353c, 0xffffffff, 0x0,
698 0x353c, 0xffffffff, 0x0,
699 0x353c, 0xffffffff, 0x120200,
700 0x3538, 0xffffffff, 0x500010ff,
701 0x353c, 0xffffffff, 0x0,
702 0x353c, 0xffffffff, 0x0,
703 0x353c, 0xffffffff, 0x0,
704 0x353c, 0xffffffff, 0x0,
705 0x353c, 0xffffffff, 0x0,
706 0x353c, 0xffffffff, 0x1e1e16,
707 0x3538, 0xffffffff, 0x600010ff,
708 0x353c, 0xffffffff, 0x0,
709 0x353c, 0xffffffff, 0x0,
710 0x353c, 0xffffffff, 0x0,
711 0x353c, 0xffffffff, 0x0,
712 0x353c, 0xffffffff, 0x0,
713 0x353c, 0xffffffff, 0x171f1e,
714 0x3538, 0xffffffff, 0x700010ff,
715 0x353c, 0xffffffff, 0x0,
716 0x353c, 0xffffffff, 0x0,
717 0x353c, 0xffffffff, 0x0,
718 0x353c, 0xffffffff, 0x0,
719 0x353c, 0xffffffff, 0x0,
720 0x353c, 0xffffffff, 0x0,
721 0x3538, 0xffffffff, 0x9ff,
722 0x3500, 0xffffffff, 0x0,
723 0x3504, 0xffffffff, 0x10000800,
724 0x3504, 0xffffffff, 0xf,
725 0x3504, 0xffffffff, 0xf,
726 0x3500, 0xffffffff, 0x4,
727 0x3504, 0xffffffff, 0x1000051e,
728 0x3504, 0xffffffff, 0xffff,
729 0x3504, 0xffffffff, 0xffff,
730 0x3500, 0xffffffff, 0x8,
731 0x3504, 0xffffffff, 0x80500,
732 0x3500, 0xffffffff, 0x12,
733 0x3504, 0xffffffff, 0x9050c,
734 0x3500, 0xffffffff, 0x1d,
735 0x3504, 0xffffffff, 0xb052c,
736 0x3500, 0xffffffff, 0x2a,
737 0x3504, 0xffffffff, 0x1053e,
738 0x3500, 0xffffffff, 0x2d,
739 0x3504, 0xffffffff, 0x10546,
740 0x3500, 0xffffffff, 0x30,
741 0x3504, 0xffffffff, 0xa054e,
742 0x3500, 0xffffffff, 0x3c,
743 0x3504, 0xffffffff, 0x1055f,
744 0x3500, 0xffffffff, 0x3f,
745 0x3504, 0xffffffff, 0x10567,
746 0x3500, 0xffffffff, 0x42,
747 0x3504, 0xffffffff, 0x1056f,
748 0x3500, 0xffffffff, 0x45,
749 0x3504, 0xffffffff, 0x10572,
750 0x3500, 0xffffffff, 0x48,
751 0x3504, 0xffffffff, 0x20575,
752 0x3500, 0xffffffff, 0x4c,
753 0x3504, 0xffffffff, 0x190801,
754 0x3500, 0xffffffff, 0x67,
755 0x3504, 0xffffffff, 0x1082a,
756 0x3500, 0xffffffff, 0x6a,
757 0x3504, 0xffffffff, 0x1b082d,
758 0x3500, 0xffffffff, 0x87,
759 0x3504, 0xffffffff, 0x310851,
760 0x3500, 0xffffffff, 0xba,
761 0x3504, 0xffffffff, 0x891,
762 0x3500, 0xffffffff, 0xbc,
763 0x3504, 0xffffffff, 0x893,
764 0x3500, 0xffffffff, 0xbe,
765 0x3504, 0xffffffff, 0x20895,
766 0x3500, 0xffffffff, 0xc2,
767 0x3504, 0xffffffff, 0x20899,
768 0x3500, 0xffffffff, 0xc6,
769 0x3504, 0xffffffff, 0x2089d,
770 0x3500, 0xffffffff, 0xca,
771 0x3504, 0xffffffff, 0x8a1,
772 0x3500, 0xffffffff, 0xcc,
773 0x3504, 0xffffffff, 0x8a3,
774 0x3500, 0xffffffff, 0xce,
775 0x3504, 0xffffffff, 0x308a5,
776 0x3500, 0xffffffff, 0xd3,
777 0x3504, 0xffffffff, 0x6d08cd,
778 0x3500, 0xffffffff, 0x142,
779 0x3504, 0xffffffff, 0x2000095a,
780 0x3504, 0xffffffff, 0x1,
781 0x3500, 0xffffffff, 0x144,
782 0x3504, 0xffffffff, 0x301f095b,
783 0x3500, 0xffffffff, 0x165,
784 0x3504, 0xffffffff, 0xc094d,
785 0x3500, 0xffffffff, 0x173,
786 0x3504, 0xffffffff, 0xf096d,
787 0x3500, 0xffffffff, 0x184,
788 0x3504, 0xffffffff, 0x15097f,
789 0x3500, 0xffffffff, 0x19b,
790 0x3504, 0xffffffff, 0xc0998,
791 0x3500, 0xffffffff, 0x1a9,
792 0x3504, 0xffffffff, 0x409a7,
793 0x3500, 0xffffffff, 0x1af,
794 0x3504, 0xffffffff, 0xcdc,
795 0x3500, 0xffffffff, 0x1b1,
796 0x3504, 0xffffffff, 0x800,
797 0x3508, 0xffffffff, 0x6c9b2000,
798 0x3510, 0xfc00, 0x2000,
799 0x3544, 0xffffffff, 0xfc0,
800 0x28d4, 0x00000100, 0x100
801};
802
803static void si_init_golden_registers(struct radeon_device *rdev)
804{
805 switch (rdev->family) {
806 case CHIP_TAHITI:
807 radeon_program_register_sequence(rdev,
808 tahiti_golden_registers,
809 (const u32)ARRAY_SIZE(tahiti_golden_registers));
810 radeon_program_register_sequence(rdev,
811 tahiti_golden_rlc_registers,
812 (const u32)ARRAY_SIZE(tahiti_golden_rlc_registers));
813 radeon_program_register_sequence(rdev,
814 tahiti_mgcg_cgcg_init,
815 (const u32)ARRAY_SIZE(tahiti_mgcg_cgcg_init));
816 radeon_program_register_sequence(rdev,
817 tahiti_golden_registers2,
818 (const u32)ARRAY_SIZE(tahiti_golden_registers2));
819 break;
820 case CHIP_PITCAIRN:
821 radeon_program_register_sequence(rdev,
822 pitcairn_golden_registers,
823 (const u32)ARRAY_SIZE(pitcairn_golden_registers));
824 radeon_program_register_sequence(rdev,
825 pitcairn_golden_rlc_registers,
826 (const u32)ARRAY_SIZE(pitcairn_golden_rlc_registers));
827 radeon_program_register_sequence(rdev,
828 pitcairn_mgcg_cgcg_init,
829 (const u32)ARRAY_SIZE(pitcairn_mgcg_cgcg_init));
830 break;
831 case CHIP_VERDE:
832 radeon_program_register_sequence(rdev,
833 verde_golden_registers,
834 (const u32)ARRAY_SIZE(verde_golden_registers));
835 radeon_program_register_sequence(rdev,
836 verde_golden_rlc_registers,
837 (const u32)ARRAY_SIZE(verde_golden_rlc_registers));
838 radeon_program_register_sequence(rdev,
839 verde_mgcg_cgcg_init,
840 (const u32)ARRAY_SIZE(verde_mgcg_cgcg_init));
841 radeon_program_register_sequence(rdev,
842 verde_pg_init,
843 (const u32)ARRAY_SIZE(verde_pg_init));
844 break;
845 case CHIP_OLAND:
846 radeon_program_register_sequence(rdev,
847 oland_golden_registers,
848 (const u32)ARRAY_SIZE(oland_golden_registers));
849 radeon_program_register_sequence(rdev,
850 oland_golden_rlc_registers,
851 (const u32)ARRAY_SIZE(oland_golden_rlc_registers));
852 radeon_program_register_sequence(rdev,
853 oland_mgcg_cgcg_init,
854 (const u32)ARRAY_SIZE(oland_mgcg_cgcg_init));
855 break;
856 default:
857 break;
858 }
859}
860
73#define PCIE_BUS_CLK 10000 861#define PCIE_BUS_CLK 10000
74#define TCLK (PCIE_BUS_CLK / 10) 862#define TCLK (PCIE_BUS_CLK / 10)
75 863
@@ -1211,6 +1999,7 @@ static void si_tiling_mode_table_init(struct radeon_device *rdev)
1211 gb_tile_moden = 0; 1999 gb_tile_moden = 0;
1212 break; 2000 break;
1213 } 2001 }
2002 rdev->config.si.tile_mode_array[reg_offset] = gb_tile_moden;
1214 WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden); 2003 WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
1215 } 2004 }
1216 } else if ((rdev->family == CHIP_VERDE) || 2005 } else if ((rdev->family == CHIP_VERDE) ||
@@ -1451,6 +2240,7 @@ static void si_tiling_mode_table_init(struct radeon_device *rdev)
1451 gb_tile_moden = 0; 2240 gb_tile_moden = 0;
1452 break; 2241 break;
1453 } 2242 }
2243 rdev->config.si.tile_mode_array[reg_offset] = gb_tile_moden;
1454 WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden); 2244 WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
1455 } 2245 }
1456 } else 2246 } else
@@ -1463,7 +2253,7 @@ static void si_select_se_sh(struct radeon_device *rdev,
1463 u32 data = INSTANCE_BROADCAST_WRITES; 2253 u32 data = INSTANCE_BROADCAST_WRITES;
1464 2254
1465 if ((se_num == 0xffffffff) && (sh_num == 0xffffffff)) 2255 if ((se_num == 0xffffffff) && (sh_num == 0xffffffff))
1466 data = SH_BROADCAST_WRITES | SE_BROADCAST_WRITES; 2256 data |= SH_BROADCAST_WRITES | SE_BROADCAST_WRITES;
1467 else if (se_num == 0xffffffff) 2257 else if (se_num == 0xffffffff)
1468 data |= SE_BROADCAST_WRITES | SH_INDEX(sh_num); 2258 data |= SE_BROADCAST_WRITES | SH_INDEX(sh_num);
1469 else if (sh_num == 0xffffffff) 2259 else if (sh_num == 0xffffffff)
@@ -1765,9 +2555,13 @@ static void si_gpu_init(struct radeon_device *rdev)
1765 2555
1766 WREG32(GB_ADDR_CONFIG, gb_addr_config); 2556 WREG32(GB_ADDR_CONFIG, gb_addr_config);
1767 WREG32(DMIF_ADDR_CONFIG, gb_addr_config); 2557 WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
2558 WREG32(DMIF_ADDR_CALC, gb_addr_config);
1768 WREG32(HDP_ADDR_CONFIG, gb_addr_config); 2559 WREG32(HDP_ADDR_CONFIG, gb_addr_config);
1769 WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config); 2560 WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config);
1770 WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config); 2561 WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config);
2562 WREG32(UVD_UDEC_ADDR_CONFIG, gb_addr_config);
2563 WREG32(UVD_UDEC_DB_ADDR_CONFIG, gb_addr_config);
2564 WREG32(UVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config);
1771 2565
1772 si_tiling_mode_table_init(rdev); 2566 si_tiling_mode_table_init(rdev);
1773 2567
@@ -2538,46 +3332,6 @@ static void si_mc_program(struct radeon_device *rdev)
2538 rv515_vga_render_disable(rdev); 3332 rv515_vga_render_disable(rdev);
2539} 3333}
2540 3334
2541/* SI MC address space is 40 bits */
2542static void si_vram_location(struct radeon_device *rdev,
2543 struct radeon_mc *mc, u64 base)
2544{
2545 mc->vram_start = base;
2546 if (mc->mc_vram_size > (0xFFFFFFFFFFULL - base + 1)) {
2547 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
2548 mc->real_vram_size = mc->aper_size;
2549 mc->mc_vram_size = mc->aper_size;
2550 }
2551 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
2552 dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
2553 mc->mc_vram_size >> 20, mc->vram_start,
2554 mc->vram_end, mc->real_vram_size >> 20);
2555}
2556
2557static void si_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
2558{
2559 u64 size_af, size_bf;
2560
2561 size_af = ((0xFFFFFFFFFFULL - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
2562 size_bf = mc->vram_start & ~mc->gtt_base_align;
2563 if (size_bf > size_af) {
2564 if (mc->gtt_size > size_bf) {
2565 dev_warn(rdev->dev, "limiting GTT\n");
2566 mc->gtt_size = size_bf;
2567 }
2568 mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size;
2569 } else {
2570 if (mc->gtt_size > size_af) {
2571 dev_warn(rdev->dev, "limiting GTT\n");
2572 mc->gtt_size = size_af;
2573 }
2574 mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
2575 }
2576 mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
2577 dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
2578 mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
2579}
2580
2581static void si_vram_gtt_location(struct radeon_device *rdev, 3335static void si_vram_gtt_location(struct radeon_device *rdev,
2582 struct radeon_mc *mc) 3336 struct radeon_mc *mc)
2583{ 3337{
@@ -2587,9 +3341,9 @@ static void si_vram_gtt_location(struct radeon_device *rdev,
2587 mc->real_vram_size = 0xFFC0000000ULL; 3341 mc->real_vram_size = 0xFFC0000000ULL;
2588 mc->mc_vram_size = 0xFFC0000000ULL; 3342 mc->mc_vram_size = 0xFFC0000000ULL;
2589 } 3343 }
2590 si_vram_location(rdev, &rdev->mc, 0); 3344 radeon_vram_location(rdev, &rdev->mc, 0);
2591 rdev->mc.gtt_base_align = 0; 3345 rdev->mc.gtt_base_align = 0;
2592 si_gtt_location(rdev, mc); 3346 radeon_gtt_location(rdev, mc);
2593} 3347}
2594 3348
2595static int si_mc_init(struct radeon_device *rdev) 3349static int si_mc_init(struct radeon_device *rdev)
@@ -4322,14 +5076,6 @@ static int si_startup(struct radeon_device *rdev)
4322 return r; 5076 return r;
4323 si_gpu_init(rdev); 5077 si_gpu_init(rdev);
4324 5078
4325#if 0
4326 r = evergreen_blit_init(rdev);
4327 if (r) {
4328 r600_blit_fini(rdev);
4329 rdev->asic->copy = NULL;
4330 dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
4331 }
4332#endif
4333 /* allocate rlc buffers */ 5079 /* allocate rlc buffers */
4334 r = si_rlc_init(rdev); 5080 r = si_rlc_init(rdev);
4335 if (r) { 5081 if (r) {
@@ -4372,6 +5118,16 @@ static int si_startup(struct radeon_device *rdev)
4372 return r; 5118 return r;
4373 } 5119 }
4374 5120
5121 r = rv770_uvd_resume(rdev);
5122 if (!r) {
5123 r = radeon_fence_driver_start_ring(rdev,
5124 R600_RING_TYPE_UVD_INDEX);
5125 if (r)
5126 dev_err(rdev->dev, "UVD fences init error (%d).\n", r);
5127 }
5128 if (r)
5129 rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
5130
4375 /* Enable IRQ */ 5131 /* Enable IRQ */
4376 r = si_irq_init(rdev); 5132 r = si_irq_init(rdev);
4377 if (r) { 5133 if (r) {
@@ -4429,6 +5185,18 @@ static int si_startup(struct radeon_device *rdev)
4429 if (r) 5185 if (r)
4430 return r; 5186 return r;
4431 5187
5188 ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
5189 if (ring->ring_size) {
5190 r = radeon_ring_init(rdev, ring, ring->ring_size,
5191 R600_WB_UVD_RPTR_OFFSET,
5192 UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR,
5193 0, 0xfffff, RADEON_CP_PACKET2);
5194 if (!r)
5195 r = r600_uvd_init(rdev);
5196 if (r)
5197 DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
5198 }
5199
4432 r = radeon_ib_pool_init(rdev); 5200 r = radeon_ib_pool_init(rdev);
4433 if (r) { 5201 if (r) {
4434 dev_err(rdev->dev, "IB initialization failed (%d).\n", r); 5202 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
@@ -4455,6 +5223,9 @@ int si_resume(struct radeon_device *rdev)
4455 /* post card */ 5223 /* post card */
4456 atom_asic_init(rdev->mode_info.atom_context); 5224 atom_asic_init(rdev->mode_info.atom_context);
4457 5225
5226 /* init golden registers */
5227 si_init_golden_registers(rdev);
5228
4458 rdev->accel_working = true; 5229 rdev->accel_working = true;
4459 r = si_startup(rdev); 5230 r = si_startup(rdev);
4460 if (r) { 5231 if (r) {
@@ -4472,6 +5243,8 @@ int si_suspend(struct radeon_device *rdev)
4472 radeon_vm_manager_fini(rdev); 5243 radeon_vm_manager_fini(rdev);
4473 si_cp_enable(rdev, false); 5244 si_cp_enable(rdev, false);
4474 cayman_dma_stop(rdev); 5245 cayman_dma_stop(rdev);
5246 r600_uvd_rbc_stop(rdev);
5247 radeon_uvd_suspend(rdev);
4475 si_irq_suspend(rdev); 5248 si_irq_suspend(rdev);
4476 radeon_wb_disable(rdev); 5249 radeon_wb_disable(rdev);
4477 si_pcie_gart_disable(rdev); 5250 si_pcie_gart_disable(rdev);
@@ -4512,6 +5285,8 @@ int si_init(struct radeon_device *rdev)
4512 DRM_INFO("GPU not posted. posting now...\n"); 5285 DRM_INFO("GPU not posted. posting now...\n");
4513 atom_asic_init(rdev->mode_info.atom_context); 5286 atom_asic_init(rdev->mode_info.atom_context);
4514 } 5287 }
5288 /* init golden registers */
5289 si_init_golden_registers(rdev);
4515 /* Initialize scratch registers */ 5290 /* Initialize scratch registers */
4516 si_scratch_init(rdev); 5291 si_scratch_init(rdev);
4517 /* Initialize surface registers */ 5292 /* Initialize surface registers */
@@ -4557,6 +5332,13 @@ int si_init(struct radeon_device *rdev)
4557 ring->ring_obj = NULL; 5332 ring->ring_obj = NULL;
4558 r600_ring_init(rdev, ring, 64 * 1024); 5333 r600_ring_init(rdev, ring, 64 * 1024);
4559 5334
5335 r = radeon_uvd_init(rdev);
5336 if (!r) {
5337 ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
5338 ring->ring_obj = NULL;
5339 r600_ring_init(rdev, ring, 4096);
5340 }
5341
4560 rdev->ih.ring_obj = NULL; 5342 rdev->ih.ring_obj = NULL;
4561 r600_ih_ring_init(rdev, 64 * 1024); 5343 r600_ih_ring_init(rdev, 64 * 1024);
4562 5344
@@ -4594,9 +5376,6 @@ int si_init(struct radeon_device *rdev)
4594 5376
4595void si_fini(struct radeon_device *rdev) 5377void si_fini(struct radeon_device *rdev)
4596{ 5378{
4597#if 0
4598 r600_blit_fini(rdev);
4599#endif
4600 si_cp_fini(rdev); 5379 si_cp_fini(rdev);
4601 cayman_dma_fini(rdev); 5380 cayman_dma_fini(rdev);
4602 si_irq_fini(rdev); 5381 si_irq_fini(rdev);
@@ -4605,6 +5384,7 @@ void si_fini(struct radeon_device *rdev)
4605 radeon_vm_manager_fini(rdev); 5384 radeon_vm_manager_fini(rdev);
4606 radeon_ib_pool_fini(rdev); 5385 radeon_ib_pool_fini(rdev);
4607 radeon_irq_kms_fini(rdev); 5386 radeon_irq_kms_fini(rdev);
5387 radeon_uvd_fini(rdev);
4608 si_pcie_gart_fini(rdev); 5388 si_pcie_gart_fini(rdev);
4609 r600_vram_scratch_fini(rdev); 5389 r600_vram_scratch_fini(rdev);
4610 radeon_gem_fini(rdev); 5390 radeon_gem_fini(rdev);
@@ -4634,3 +5414,94 @@ uint64_t si_get_gpu_clock_counter(struct radeon_device *rdev)
4634 mutex_unlock(&rdev->gpu_clock_mutex); 5414 mutex_unlock(&rdev->gpu_clock_mutex);
4635 return clock; 5415 return clock;
4636} 5416}
5417
5418int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
5419{
5420 unsigned fb_div = 0, vclk_div = 0, dclk_div = 0;
5421 int r;
5422
5423 /* bypass vclk and dclk with bclk */
5424 WREG32_P(CG_UPLL_FUNC_CNTL_2,
5425 VCLK_SRC_SEL(1) | DCLK_SRC_SEL(1),
5426 ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
5427
5428 /* put PLL in bypass mode */
5429 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~UPLL_BYPASS_EN_MASK);
5430
5431 if (!vclk || !dclk) {
5432 /* keep the Bypass mode, put PLL to sleep */
5433 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK);
5434 return 0;
5435 }
5436
5437 r = radeon_uvd_calc_upll_dividers(rdev, vclk, dclk, 125000, 250000,
5438 16384, 0x03FFFFFF, 0, 128, 5,
5439 &fb_div, &vclk_div, &dclk_div);
5440 if (r)
5441 return r;
5442
5443 /* set RESET_ANTI_MUX to 0 */
5444 WREG32_P(CG_UPLL_FUNC_CNTL_5, 0, ~RESET_ANTI_MUX_MASK);
5445
5446 /* set VCO_MODE to 1 */
5447 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_VCO_MODE_MASK, ~UPLL_VCO_MODE_MASK);
5448
5449 /* toggle UPLL_SLEEP to 1 then back to 0 */
5450 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK);
5451 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_SLEEP_MASK);
5452
5453 /* deassert UPLL_RESET */
5454 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK);
5455
5456 mdelay(1);
5457
5458 r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
5459 if (r)
5460 return r;
5461
5462 /* assert UPLL_RESET again */
5463 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_RESET_MASK, ~UPLL_RESET_MASK);
5464
5465 /* disable spread spectrum. */
5466 WREG32_P(CG_UPLL_SPREAD_SPECTRUM, 0, ~SSEN_MASK);
5467
5468 /* set feedback divider */
5469 WREG32_P(CG_UPLL_FUNC_CNTL_3, UPLL_FB_DIV(fb_div), ~UPLL_FB_DIV_MASK);
5470
5471 /* set ref divider to 0 */
5472 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_REF_DIV_MASK);
5473
5474 if (fb_div < 307200)
5475 WREG32_P(CG_UPLL_FUNC_CNTL_4, 0, ~UPLL_SPARE_ISPARE9);
5476 else
5477 WREG32_P(CG_UPLL_FUNC_CNTL_4, UPLL_SPARE_ISPARE9, ~UPLL_SPARE_ISPARE9);
5478
5479 /* set PDIV_A and PDIV_B */
5480 WREG32_P(CG_UPLL_FUNC_CNTL_2,
5481 UPLL_PDIV_A(vclk_div) | UPLL_PDIV_B(dclk_div),
5482 ~(UPLL_PDIV_A_MASK | UPLL_PDIV_B_MASK));
5483
5484 /* give the PLL some time to settle */
5485 mdelay(15);
5486
5487 /* deassert PLL_RESET */
5488 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK);
5489
5490 mdelay(15);
5491
5492 /* switch from bypass mode to normal mode */
5493 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_BYPASS_EN_MASK);
5494
5495 r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
5496 if (r)
5497 return r;
5498
5499 /* switch VCLK and DCLK selection */
5500 WREG32_P(CG_UPLL_FUNC_CNTL_2,
5501 VCLK_SRC_SEL(2) | DCLK_SRC_SEL(2),
5502 ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
5503
5504 mdelay(100);
5505
5506 return 0;
5507}
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index 23fc08fc8e7f..222877ba6cf5 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -29,6 +29,35 @@
29#define TAHITI_GB_ADDR_CONFIG_GOLDEN 0x12011003 29#define TAHITI_GB_ADDR_CONFIG_GOLDEN 0x12011003
30#define VERDE_GB_ADDR_CONFIG_GOLDEN 0x12010002 30#define VERDE_GB_ADDR_CONFIG_GOLDEN 0x12010002
31 31
32/* discrete uvd clocks */
33#define CG_UPLL_FUNC_CNTL 0x634
34# define UPLL_RESET_MASK 0x00000001
35# define UPLL_SLEEP_MASK 0x00000002
36# define UPLL_BYPASS_EN_MASK 0x00000004
37# define UPLL_CTLREQ_MASK 0x00000008
38# define UPLL_VCO_MODE_MASK 0x00000600
39# define UPLL_REF_DIV_MASK 0x003F0000
40# define UPLL_CTLACK_MASK 0x40000000
41# define UPLL_CTLACK2_MASK 0x80000000
42#define CG_UPLL_FUNC_CNTL_2 0x638
43# define UPLL_PDIV_A(x) ((x) << 0)
44# define UPLL_PDIV_A_MASK 0x0000007F
45# define UPLL_PDIV_B(x) ((x) << 8)
46# define UPLL_PDIV_B_MASK 0x00007F00
47# define VCLK_SRC_SEL(x) ((x) << 20)
48# define VCLK_SRC_SEL_MASK 0x01F00000
49# define DCLK_SRC_SEL(x) ((x) << 25)
50# define DCLK_SRC_SEL_MASK 0x3E000000
51#define CG_UPLL_FUNC_CNTL_3 0x63C
52# define UPLL_FB_DIV(x) ((x) << 0)
53# define UPLL_FB_DIV_MASK 0x01FFFFFF
54#define CG_UPLL_FUNC_CNTL_4 0x644
55# define UPLL_SPARE_ISPARE9 0x00020000
56#define CG_UPLL_FUNC_CNTL_5 0x648
57# define RESET_ANTI_MUX_MASK 0x00000200
58#define CG_UPLL_SPREAD_SPECTRUM 0x650
59# define SSEN_MASK 0x00000001
60
32#define CG_MULT_THERMAL_STATUS 0x714 61#define CG_MULT_THERMAL_STATUS 0x714
33#define ASIC_MAX_TEMP(x) ((x) << 0) 62#define ASIC_MAX_TEMP(x) ((x) << 0)
34#define ASIC_MAX_TEMP_MASK 0x000001ff 63#define ASIC_MAX_TEMP_MASK 0x000001ff
@@ -65,6 +94,8 @@
65 94
66#define DMIF_ADDR_CONFIG 0xBD4 95#define DMIF_ADDR_CONFIG 0xBD4
67 96
97#define DMIF_ADDR_CALC 0xC00
98
68#define SRBM_STATUS 0xE50 99#define SRBM_STATUS 0xE50
69#define GRBM_RQ_PENDING (1 << 5) 100#define GRBM_RQ_PENDING (1 << 5)
70#define VMC_BUSY (1 << 8) 101#define VMC_BUSY (1 << 8)
@@ -798,6 +829,15 @@
798# define THREAD_TRACE_FINISH (55 << 0) 829# define THREAD_TRACE_FINISH (55 << 0)
799 830
800/* 831/*
832 * UVD
833 */
834#define UVD_UDEC_ADDR_CONFIG 0xEF4C
835#define UVD_UDEC_DB_ADDR_CONFIG 0xEF50
836#define UVD_UDEC_DBW_ADDR_CONFIG 0xEF54
837#define UVD_RBC_RB_RPTR 0xF690
838#define UVD_RBC_RB_WPTR 0xF694
839
840/*
801 * PM4 841 * PM4
802 */ 842 */
803#define PACKET0(reg, n) ((RADEON_PACKET_TYPE0 << 30) | \ 843#define PACKET0(reg, n) ((RADEON_PACKET_TYPE0 << 30) | \
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
index d917a411ca85..7dff49ed66e7 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
@@ -494,10 +494,10 @@ static int shmob_drm_crtc_page_flip(struct drm_crtc *crtc,
494 494
495 if (event) { 495 if (event) {
496 event->pipe = 0; 496 event->pipe = 0;
497 drm_vblank_get(dev, 0);
497 spin_lock_irqsave(&dev->event_lock, flags); 498 spin_lock_irqsave(&dev->event_lock, flags);
498 scrtc->event = event; 499 scrtc->event = event;
499 spin_unlock_irqrestore(&dev->event_lock, flags); 500 spin_unlock_irqrestore(&dev->event_lock, flags);
500 drm_vblank_get(dev, 0);
501 } 501 }
502 502
503 return 0; 503 return 0;
diff --git a/drivers/gpu/drm/tegra/Makefile b/drivers/gpu/drm/tegra/Makefile
deleted file mode 100644
index 80f73d1315d0..000000000000
--- a/drivers/gpu/drm/tegra/Makefile
+++ /dev/null
@@ -1,7 +0,0 @@
1ccflags-y := -Iinclude/drm
2ccflags-$(CONFIG_DRM_TEGRA_DEBUG) += -DDEBUG
3
4tegra-drm-y := drm.o fb.o dc.o host1x.o
5tegra-drm-y += output.o rgb.o hdmi.o
6
7obj-$(CONFIG_DRM_TEGRA) += tegra-drm.o
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
deleted file mode 100644
index 9d452df5bcad..000000000000
--- a/drivers/gpu/drm/tegra/drm.c
+++ /dev/null
@@ -1,217 +0,0 @@
1/*
2 * Copyright (C) 2012 Avionic Design GmbH
3 * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#include <linux/module.h>
11#include <linux/of_address.h>
12#include <linux/of_platform.h>
13
14#include <linux/dma-mapping.h>
15#include <asm/dma-iommu.h>
16
17#include "drm.h"
18
19#define DRIVER_NAME "tegra"
20#define DRIVER_DESC "NVIDIA Tegra graphics"
21#define DRIVER_DATE "20120330"
22#define DRIVER_MAJOR 0
23#define DRIVER_MINOR 0
24#define DRIVER_PATCHLEVEL 0
25
26static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
27{
28 struct device *dev = drm->dev;
29 struct host1x *host1x;
30 int err;
31
32 host1x = dev_get_drvdata(dev);
33 drm->dev_private = host1x;
34 host1x->drm = drm;
35
36 drm_mode_config_init(drm);
37
38 err = host1x_drm_init(host1x, drm);
39 if (err < 0)
40 return err;
41
42 err = drm_vblank_init(drm, drm->mode_config.num_crtc);
43 if (err < 0)
44 return err;
45
46 err = tegra_drm_fb_init(drm);
47 if (err < 0)
48 return err;
49
50 drm_kms_helper_poll_init(drm);
51
52 return 0;
53}
54
55static int tegra_drm_unload(struct drm_device *drm)
56{
57 drm_kms_helper_poll_fini(drm);
58 tegra_drm_fb_exit(drm);
59
60 drm_mode_config_cleanup(drm);
61
62 return 0;
63}
64
65static int tegra_drm_open(struct drm_device *drm, struct drm_file *filp)
66{
67 return 0;
68}
69
70static void tegra_drm_lastclose(struct drm_device *drm)
71{
72 struct host1x *host1x = drm->dev_private;
73
74 drm_fbdev_cma_restore_mode(host1x->fbdev);
75}
76
77static struct drm_ioctl_desc tegra_drm_ioctls[] = {
78};
79
80static const struct file_operations tegra_drm_fops = {
81 .owner = THIS_MODULE,
82 .open = drm_open,
83 .release = drm_release,
84 .unlocked_ioctl = drm_ioctl,
85 .mmap = drm_gem_cma_mmap,
86 .poll = drm_poll,
87 .fasync = drm_fasync,
88 .read = drm_read,
89#ifdef CONFIG_COMPAT
90 .compat_ioctl = drm_compat_ioctl,
91#endif
92 .llseek = noop_llseek,
93};
94
95static struct drm_crtc *tegra_crtc_from_pipe(struct drm_device *drm, int pipe)
96{
97 struct drm_crtc *crtc;
98
99 list_for_each_entry(crtc, &drm->mode_config.crtc_list, head) {
100 struct tegra_dc *dc = to_tegra_dc(crtc);
101
102 if (dc->pipe == pipe)
103 return crtc;
104 }
105
106 return NULL;
107}
108
109static u32 tegra_drm_get_vblank_counter(struct drm_device *dev, int crtc)
110{
111 /* TODO: implement real hardware counter using syncpoints */
112 return drm_vblank_count(dev, crtc);
113}
114
115static int tegra_drm_enable_vblank(struct drm_device *drm, int pipe)
116{
117 struct drm_crtc *crtc = tegra_crtc_from_pipe(drm, pipe);
118 struct tegra_dc *dc = to_tegra_dc(crtc);
119
120 if (!crtc)
121 return -ENODEV;
122
123 tegra_dc_enable_vblank(dc);
124
125 return 0;
126}
127
128static void tegra_drm_disable_vblank(struct drm_device *drm, int pipe)
129{
130 struct drm_crtc *crtc = tegra_crtc_from_pipe(drm, pipe);
131 struct tegra_dc *dc = to_tegra_dc(crtc);
132
133 if (crtc)
134 tegra_dc_disable_vblank(dc);
135}
136
137static void tegra_drm_preclose(struct drm_device *drm, struct drm_file *file)
138{
139 struct drm_crtc *crtc;
140
141 list_for_each_entry(crtc, &drm->mode_config.crtc_list, head)
142 tegra_dc_cancel_page_flip(crtc, file);
143}
144
145#ifdef CONFIG_DEBUG_FS
146static int tegra_debugfs_framebuffers(struct seq_file *s, void *data)
147{
148 struct drm_info_node *node = (struct drm_info_node *)s->private;
149 struct drm_device *drm = node->minor->dev;
150 struct drm_framebuffer *fb;
151
152 mutex_lock(&drm->mode_config.fb_lock);
153
154 list_for_each_entry(fb, &drm->mode_config.fb_list, head) {
155 seq_printf(s, "%3d: user size: %d x %d, depth %d, %d bpp, refcount %d\n",
156 fb->base.id, fb->width, fb->height, fb->depth,
157 fb->bits_per_pixel,
158 atomic_read(&fb->refcount.refcount));
159 }
160
161 mutex_unlock(&drm->mode_config.fb_lock);
162
163 return 0;
164}
165
166static struct drm_info_list tegra_debugfs_list[] = {
167 { "framebuffers", tegra_debugfs_framebuffers, 0 },
168};
169
170static int tegra_debugfs_init(struct drm_minor *minor)
171{
172 return drm_debugfs_create_files(tegra_debugfs_list,
173 ARRAY_SIZE(tegra_debugfs_list),
174 minor->debugfs_root, minor);
175}
176
177static void tegra_debugfs_cleanup(struct drm_minor *minor)
178{
179 drm_debugfs_remove_files(tegra_debugfs_list,
180 ARRAY_SIZE(tegra_debugfs_list), minor);
181}
182#endif
183
184struct drm_driver tegra_drm_driver = {
185 .driver_features = DRIVER_BUS_PLATFORM | DRIVER_MODESET | DRIVER_GEM,
186 .load = tegra_drm_load,
187 .unload = tegra_drm_unload,
188 .open = tegra_drm_open,
189 .preclose = tegra_drm_preclose,
190 .lastclose = tegra_drm_lastclose,
191
192 .get_vblank_counter = tegra_drm_get_vblank_counter,
193 .enable_vblank = tegra_drm_enable_vblank,
194 .disable_vblank = tegra_drm_disable_vblank,
195
196#if defined(CONFIG_DEBUG_FS)
197 .debugfs_init = tegra_debugfs_init,
198 .debugfs_cleanup = tegra_debugfs_cleanup,
199#endif
200
201 .gem_free_object = drm_gem_cma_free_object,
202 .gem_vm_ops = &drm_gem_cma_vm_ops,
203 .dumb_create = drm_gem_cma_dumb_create,
204 .dumb_map_offset = drm_gem_cma_dumb_map_offset,
205 .dumb_destroy = drm_gem_cma_dumb_destroy,
206
207 .ioctls = tegra_drm_ioctls,
208 .num_ioctls = ARRAY_SIZE(tegra_drm_ioctls),
209 .fops = &tegra_drm_fops,
210
211 .name = DRIVER_NAME,
212 .desc = DRIVER_DESC,
213 .date = DRIVER_DATE,
214 .major = DRIVER_MAJOR,
215 .minor = DRIVER_MINOR,
216 .patchlevel = DRIVER_PATCHLEVEL,
217};
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
deleted file mode 100644
index 03914953cb1c..000000000000
--- a/drivers/gpu/drm/tegra/fb.c
+++ /dev/null
@@ -1,52 +0,0 @@
1/*
2 * Copyright (C) 2012 Avionic Design GmbH
3 * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#include "drm.h"
11
12static void tegra_drm_fb_output_poll_changed(struct drm_device *drm)
13{
14 struct host1x *host1x = drm->dev_private;
15
16 drm_fbdev_cma_hotplug_event(host1x->fbdev);
17}
18
19static const struct drm_mode_config_funcs tegra_drm_mode_funcs = {
20 .fb_create = drm_fb_cma_create,
21 .output_poll_changed = tegra_drm_fb_output_poll_changed,
22};
23
24int tegra_drm_fb_init(struct drm_device *drm)
25{
26 struct host1x *host1x = drm->dev_private;
27 struct drm_fbdev_cma *fbdev;
28
29 drm->mode_config.min_width = 0;
30 drm->mode_config.min_height = 0;
31
32 drm->mode_config.max_width = 4096;
33 drm->mode_config.max_height = 4096;
34
35 drm->mode_config.funcs = &tegra_drm_mode_funcs;
36
37 fbdev = drm_fbdev_cma_init(drm, 32, drm->mode_config.num_crtc,
38 drm->mode_config.num_connector);
39 if (IS_ERR(fbdev))
40 return PTR_ERR(fbdev);
41
42 host1x->fbdev = fbdev;
43
44 return 0;
45}
46
47void tegra_drm_fb_exit(struct drm_device *drm)
48{
49 struct host1x *host1x = drm->dev_private;
50
51 drm_fbdev_cma_fini(host1x->fbdev);
52}
diff --git a/drivers/gpu/drm/tegra/host1x.c b/drivers/gpu/drm/tegra/host1x.c
deleted file mode 100644
index 92e25a7e00ea..000000000000
--- a/drivers/gpu/drm/tegra/host1x.c
+++ /dev/null
@@ -1,327 +0,0 @@
1/*
2 * Copyright (C) 2012 Avionic Design GmbH
3 * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#include <linux/clk.h>
11#include <linux/err.h>
12#include <linux/module.h>
13#include <linux/of.h>
14#include <linux/platform_device.h>
15
16#include "drm.h"
17
18struct host1x_drm_client {
19 struct host1x_client *client;
20 struct device_node *np;
21 struct list_head list;
22};
23
24static int host1x_add_drm_client(struct host1x *host1x, struct device_node *np)
25{
26 struct host1x_drm_client *client;
27
28 client = kzalloc(sizeof(*client), GFP_KERNEL);
29 if (!client)
30 return -ENOMEM;
31
32 INIT_LIST_HEAD(&client->list);
33 client->np = of_node_get(np);
34
35 list_add_tail(&client->list, &host1x->drm_clients);
36
37 return 0;
38}
39
40static int host1x_activate_drm_client(struct host1x *host1x,
41 struct host1x_drm_client *drm,
42 struct host1x_client *client)
43{
44 mutex_lock(&host1x->drm_clients_lock);
45 list_del_init(&drm->list);
46 list_add_tail(&drm->list, &host1x->drm_active);
47 drm->client = client;
48 mutex_unlock(&host1x->drm_clients_lock);
49
50 return 0;
51}
52
53static int host1x_remove_drm_client(struct host1x *host1x,
54 struct host1x_drm_client *client)
55{
56 mutex_lock(&host1x->drm_clients_lock);
57 list_del_init(&client->list);
58 mutex_unlock(&host1x->drm_clients_lock);
59
60 of_node_put(client->np);
61 kfree(client);
62
63 return 0;
64}
65
66static int host1x_parse_dt(struct host1x *host1x)
67{
68 static const char * const compat[] = {
69 "nvidia,tegra20-dc",
70 "nvidia,tegra20-hdmi",
71 "nvidia,tegra30-dc",
72 "nvidia,tegra30-hdmi",
73 };
74 unsigned int i;
75 int err;
76
77 for (i = 0; i < ARRAY_SIZE(compat); i++) {
78 struct device_node *np;
79
80 for_each_child_of_node(host1x->dev->of_node, np) {
81 if (of_device_is_compatible(np, compat[i]) &&
82 of_device_is_available(np)) {
83 err = host1x_add_drm_client(host1x, np);
84 if (err < 0)
85 return err;
86 }
87 }
88 }
89
90 return 0;
91}
92
93static int tegra_host1x_probe(struct platform_device *pdev)
94{
95 struct host1x *host1x;
96 struct resource *regs;
97 int err;
98
99 host1x = devm_kzalloc(&pdev->dev, sizeof(*host1x), GFP_KERNEL);
100 if (!host1x)
101 return -ENOMEM;
102
103 mutex_init(&host1x->drm_clients_lock);
104 INIT_LIST_HEAD(&host1x->drm_clients);
105 INIT_LIST_HEAD(&host1x->drm_active);
106 mutex_init(&host1x->clients_lock);
107 INIT_LIST_HEAD(&host1x->clients);
108 host1x->dev = &pdev->dev;
109
110 err = host1x_parse_dt(host1x);
111 if (err < 0) {
112 dev_err(&pdev->dev, "failed to parse DT: %d\n", err);
113 return err;
114 }
115
116 host1x->clk = devm_clk_get(&pdev->dev, NULL);
117 if (IS_ERR(host1x->clk))
118 return PTR_ERR(host1x->clk);
119
120 err = clk_prepare_enable(host1x->clk);
121 if (err < 0)
122 return err;
123
124 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
125 if (!regs) {
126 err = -ENXIO;
127 goto err;
128 }
129
130 err = platform_get_irq(pdev, 0);
131 if (err < 0)
132 goto err;
133
134 host1x->syncpt = err;
135
136 err = platform_get_irq(pdev, 1);
137 if (err < 0)
138 goto err;
139
140 host1x->irq = err;
141
142 host1x->regs = devm_ioremap_resource(&pdev->dev, regs);
143 if (IS_ERR(host1x->regs)) {
144 err = PTR_ERR(host1x->regs);
145 goto err;
146 }
147
148 platform_set_drvdata(pdev, host1x);
149
150 return 0;
151
152err:
153 clk_disable_unprepare(host1x->clk);
154 return err;
155}
156
157static int tegra_host1x_remove(struct platform_device *pdev)
158{
159 struct host1x *host1x = platform_get_drvdata(pdev);
160
161 clk_disable_unprepare(host1x->clk);
162
163 return 0;
164}
165
166int host1x_drm_init(struct host1x *host1x, struct drm_device *drm)
167{
168 struct host1x_client *client;
169
170 mutex_lock(&host1x->clients_lock);
171
172 list_for_each_entry(client, &host1x->clients, list) {
173 if (client->ops && client->ops->drm_init) {
174 int err = client->ops->drm_init(client, drm);
175 if (err < 0) {
176 dev_err(host1x->dev,
177 "DRM setup failed for %s: %d\n",
178 dev_name(client->dev), err);
179 return err;
180 }
181 }
182 }
183
184 mutex_unlock(&host1x->clients_lock);
185
186 return 0;
187}
188
189int host1x_drm_exit(struct host1x *host1x)
190{
191 struct platform_device *pdev = to_platform_device(host1x->dev);
192 struct host1x_client *client;
193
194 if (!host1x->drm)
195 return 0;
196
197 mutex_lock(&host1x->clients_lock);
198
199 list_for_each_entry_reverse(client, &host1x->clients, list) {
200 if (client->ops && client->ops->drm_exit) {
201 int err = client->ops->drm_exit(client);
202 if (err < 0) {
203 dev_err(host1x->dev,
204 "DRM cleanup failed for %s: %d\n",
205 dev_name(client->dev), err);
206 return err;
207 }
208 }
209 }
210
211 mutex_unlock(&host1x->clients_lock);
212
213 drm_platform_exit(&tegra_drm_driver, pdev);
214 host1x->drm = NULL;
215
216 return 0;
217}
218
219int host1x_register_client(struct host1x *host1x, struct host1x_client *client)
220{
221 struct host1x_drm_client *drm, *tmp;
222 int err;
223
224 mutex_lock(&host1x->clients_lock);
225 list_add_tail(&client->list, &host1x->clients);
226 mutex_unlock(&host1x->clients_lock);
227
228 list_for_each_entry_safe(drm, tmp, &host1x->drm_clients, list)
229 if (drm->np == client->dev->of_node)
230 host1x_activate_drm_client(host1x, drm, client);
231
232 if (list_empty(&host1x->drm_clients)) {
233 struct platform_device *pdev = to_platform_device(host1x->dev);
234
235 err = drm_platform_init(&tegra_drm_driver, pdev);
236 if (err < 0) {
237 dev_err(host1x->dev, "drm_platform_init(): %d\n", err);
238 return err;
239 }
240 }
241
242 client->host1x = host1x;
243
244 return 0;
245}
246
247int host1x_unregister_client(struct host1x *host1x,
248 struct host1x_client *client)
249{
250 struct host1x_drm_client *drm, *tmp;
251 int err;
252
253 list_for_each_entry_safe(drm, tmp, &host1x->drm_active, list) {
254 if (drm->client == client) {
255 err = host1x_drm_exit(host1x);
256 if (err < 0) {
257 dev_err(host1x->dev, "host1x_drm_exit(): %d\n",
258 err);
259 return err;
260 }
261
262 host1x_remove_drm_client(host1x, drm);
263 break;
264 }
265 }
266
267 mutex_lock(&host1x->clients_lock);
268 list_del_init(&client->list);
269 mutex_unlock(&host1x->clients_lock);
270
271 return 0;
272}
273
274static struct of_device_id tegra_host1x_of_match[] = {
275 { .compatible = "nvidia,tegra30-host1x", },
276 { .compatible = "nvidia,tegra20-host1x", },
277 { },
278};
279MODULE_DEVICE_TABLE(of, tegra_host1x_of_match);
280
281struct platform_driver tegra_host1x_driver = {
282 .driver = {
283 .name = "tegra-host1x",
284 .owner = THIS_MODULE,
285 .of_match_table = tegra_host1x_of_match,
286 },
287 .probe = tegra_host1x_probe,
288 .remove = tegra_host1x_remove,
289};
290
291static int __init tegra_host1x_init(void)
292{
293 int err;
294
295 err = platform_driver_register(&tegra_host1x_driver);
296 if (err < 0)
297 return err;
298
299 err = platform_driver_register(&tegra_dc_driver);
300 if (err < 0)
301 goto unregister_host1x;
302
303 err = platform_driver_register(&tegra_hdmi_driver);
304 if (err < 0)
305 goto unregister_dc;
306
307 return 0;
308
309unregister_dc:
310 platform_driver_unregister(&tegra_dc_driver);
311unregister_host1x:
312 platform_driver_unregister(&tegra_host1x_driver);
313 return err;
314}
315module_init(tegra_host1x_init);
316
317static void __exit tegra_host1x_exit(void)
318{
319 platform_driver_unregister(&tegra_hdmi_driver);
320 platform_driver_unregister(&tegra_dc_driver);
321 platform_driver_unregister(&tegra_host1x_driver);
322}
323module_exit(tegra_host1x_exit);
324
325MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
326MODULE_DESCRIPTION("NVIDIA Tegra DRM driver");
327MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tilcdc/Makefile b/drivers/gpu/drm/tilcdc/Makefile
index deda656b10e7..7d2eefe94bf7 100644
--- a/drivers/gpu/drm/tilcdc/Makefile
+++ b/drivers/gpu/drm/tilcdc/Makefile
@@ -1,4 +1,7 @@
1ccflags-y := -Iinclude/drm -Werror 1ccflags-y := -Iinclude/drm
2ifeq (, $(findstring -W,$(EXTRA_CFLAGS)))
3 ccflags-y += -Werror
4endif
2 5
3tilcdc-y := \ 6tilcdc-y := \
4 tilcdc_crtc.o \ 7 tilcdc_crtc.o \
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
index c5b592dc1970..2b5461bcd9fb 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -75,7 +75,7 @@ static int modeset_init(struct drm_device *dev)
75 mod->funcs->modeset_init(mod, dev); 75 mod->funcs->modeset_init(mod, dev);
76 } 76 }
77 77
78 if ((priv->num_encoders = 0) || (priv->num_connectors == 0)) { 78 if ((priv->num_encoders == 0) || (priv->num_connectors == 0)) {
79 /* oh nos! */ 79 /* oh nos! */
80 dev_err(dev->dev, "no encoders/connectors found\n"); 80 dev_err(dev->dev, "no encoders/connectors found\n");
81 return -ENXIO; 81 return -ENXIO;
@@ -299,11 +299,10 @@ static int tilcdc_irq_postinstall(struct drm_device *dev)
299 struct tilcdc_drm_private *priv = dev->dev_private; 299 struct tilcdc_drm_private *priv = dev->dev_private;
300 300
301 /* enable FIFO underflow irq: */ 301 /* enable FIFO underflow irq: */
302 if (priv->rev == 1) { 302 if (priv->rev == 1)
303 tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_V1_UNDERFLOW_INT_ENA); 303 tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_V1_UNDERFLOW_INT_ENA);
304 } else { 304 else
305 tilcdc_set(dev, LCDC_INT_ENABLE_SET_REG, LCDC_V2_UNDERFLOW_INT_ENA); 305 tilcdc_set(dev, LCDC_INT_ENABLE_SET_REG, LCDC_V2_UNDERFLOW_INT_ENA);
306 }
307 306
308 return 0; 307 return 0;
309} 308}
@@ -363,7 +362,7 @@ static const struct {
363 uint8_t rev; 362 uint8_t rev;
364 uint8_t save; 363 uint8_t save;
365 uint32_t reg; 364 uint32_t reg;
366} registers[] = { 365} registers[] = {
367#define REG(rev, save, reg) { #reg, rev, save, reg } 366#define REG(rev, save, reg) { #reg, rev, save, reg }
368 /* exists in revision 1: */ 367 /* exists in revision 1: */
369 REG(1, false, LCDC_PID_REG), 368 REG(1, false, LCDC_PID_REG),
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_panel.c b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
index 90ee49786372..09176654fddb 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_panel.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
@@ -305,7 +305,7 @@ static const struct tilcdc_module_ops panel_module_ops = {
305 */ 305 */
306 306
307/* maybe move this somewhere common if it is needed by other outputs? */ 307/* maybe move this somewhere common if it is needed by other outputs? */
308static struct tilcdc_panel_info * of_get_panel_info(struct device_node *np) 308static struct tilcdc_panel_info *of_get_panel_info(struct device_node *np)
309{ 309{
310 struct device_node *info_np; 310 struct device_node *info_np;
311 struct tilcdc_panel_info *info; 311 struct tilcdc_panel_info *info;
@@ -413,7 +413,6 @@ static struct of_device_id panel_of_match[] = {
413 { .compatible = "ti,tilcdc,panel", }, 413 { .compatible = "ti,tilcdc,panel", },
414 { }, 414 { },
415}; 415};
416MODULE_DEVICE_TABLE(of, panel_of_match);
417 416
418struct platform_driver panel_driver = { 417struct platform_driver panel_driver = {
419 .probe = panel_probe, 418 .probe = panel_probe,
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_slave.c b/drivers/gpu/drm/tilcdc/tilcdc_slave.c
index 568dc1c08e6c..db1d2fc9dfb5 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_slave.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_slave.c
@@ -353,7 +353,6 @@ static struct of_device_id slave_of_match[] = {
353 { .compatible = "ti,tilcdc,slave", }, 353 { .compatible = "ti,tilcdc,slave", },
354 { }, 354 { },
355}; 355};
356MODULE_DEVICE_TABLE(of, slave_of_match);
357 356
358struct platform_driver slave_driver = { 357struct platform_driver slave_driver = {
359 .probe = slave_probe, 358 .probe = slave_probe,
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
index 58d487ba2414..a36788fbcd98 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
@@ -396,7 +396,6 @@ static struct of_device_id tfp410_of_match[] = {
396 { .compatible = "ti,tilcdc,tfp410", }, 396 { .compatible = "ti,tilcdc,tfp410", },
397 { }, 397 { },
398}; 398};
399MODULE_DEVICE_TABLE(of, tfp410_of_match);
400 399
401struct platform_driver tfp410_driver = { 400struct platform_driver tfp410_driver = {
402 .probe = tfp410_probe, 401 .probe = tfp410_probe,
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 8be35c809c7b..af894584dd90 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -86,6 +86,7 @@ int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
86 mutex_lock(&man->io_reserve_mutex); 86 mutex_lock(&man->io_reserve_mutex);
87 return 0; 87 return 0;
88} 88}
89EXPORT_SYMBOL(ttm_mem_io_lock);
89 90
90void ttm_mem_io_unlock(struct ttm_mem_type_manager *man) 91void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
91{ 92{
@@ -94,6 +95,7 @@ void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
94 95
95 mutex_unlock(&man->io_reserve_mutex); 96 mutex_unlock(&man->io_reserve_mutex);
96} 97}
98EXPORT_SYMBOL(ttm_mem_io_unlock);
97 99
98static int ttm_mem_io_evict(struct ttm_mem_type_manager *man) 100static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
99{ 101{
@@ -111,8 +113,9 @@ static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
111 return 0; 113 return 0;
112} 114}
113 115
114static int ttm_mem_io_reserve(struct ttm_bo_device *bdev, 116
115 struct ttm_mem_reg *mem) 117int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
118 struct ttm_mem_reg *mem)
116{ 119{
117 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; 120 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
118 int ret = 0; 121 int ret = 0;
@@ -134,9 +137,10 @@ retry:
134 } 137 }
135 return ret; 138 return ret;
136} 139}
140EXPORT_SYMBOL(ttm_mem_io_reserve);
137 141
138static void ttm_mem_io_free(struct ttm_bo_device *bdev, 142void ttm_mem_io_free(struct ttm_bo_device *bdev,
139 struct ttm_mem_reg *mem) 143 struct ttm_mem_reg *mem)
140{ 144{
141 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; 145 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
142 146
@@ -149,6 +153,7 @@ static void ttm_mem_io_free(struct ttm_bo_device *bdev,
149 bdev->driver->io_mem_free(bdev, mem); 153 bdev->driver->io_mem_free(bdev, mem);
150 154
151} 155}
156EXPORT_SYMBOL(ttm_mem_io_free);
152 157
153int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo) 158int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
154{ 159{
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 74705f329d99..3df9f16b041c 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -147,7 +147,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
147 147
148 page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) + 148 page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
149 bo->vm_node->start - vma->vm_pgoff; 149 bo->vm_node->start - vma->vm_pgoff;
150 page_last = ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) + 150 page_last = vma_pages(vma) +
151 bo->vm_node->start - vma->vm_pgoff; 151 bo->vm_node->start - vma->vm_pgoff;
152 152
153 if (unlikely(page_offset >= bo->num_pages)) { 153 if (unlikely(page_offset >= bo->num_pages)) {
@@ -258,7 +258,7 @@ int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
258 258
259 read_lock(&bdev->vm_lock); 259 read_lock(&bdev->vm_lock);
260 bo = ttm_bo_vm_lookup_rb(bdev, vma->vm_pgoff, 260 bo = ttm_bo_vm_lookup_rb(bdev, vma->vm_pgoff,
261 (vma->vm_end - vma->vm_start) >> PAGE_SHIFT); 261 vma_pages(vma));
262 if (likely(bo != NULL) && !kref_get_unless_zero(&bo->kref)) 262 if (likely(bo != NULL) && !kref_get_unless_zero(&bo->kref))
263 bo = NULL; 263 bo = NULL;
264 read_unlock(&bdev->vm_lock); 264 read_unlock(&bdev->vm_lock);
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index 9f4be3d4a02e..dc0c065f8d39 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -482,7 +482,7 @@ static int udlfb_create(struct drm_fb_helper *helper,
482 struct udl_fbdev *ufbdev = (struct udl_fbdev *)helper; 482 struct udl_fbdev *ufbdev = (struct udl_fbdev *)helper;
483 struct drm_device *dev = ufbdev->helper.dev; 483 struct drm_device *dev = ufbdev->helper.dev;
484 struct fb_info *info; 484 struct fb_info *info;
485 struct device *device = &dev->usbdev->dev; 485 struct device *device = dev->dev;
486 struct drm_framebuffer *fb; 486 struct drm_framebuffer *fb;
487 struct drm_mode_fb_cmd2 mode_cmd; 487 struct drm_mode_fb_cmd2 mode_cmd;
488 struct udl_gem_object *obj; 488 struct udl_gem_object *obj;
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index 3816270ba49b..ef034fa3e6f5 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -303,6 +303,8 @@ struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev,
303 if (IS_ERR(attach)) 303 if (IS_ERR(attach))
304 return ERR_CAST(attach); 304 return ERR_CAST(attach);
305 305
306 get_dma_buf(dma_buf);
307
306 sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); 308 sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
307 if (IS_ERR(sg)) { 309 if (IS_ERR(sg)) {
308 ret = PTR_ERR(sg); 310 ret = PTR_ERR(sg);
@@ -322,5 +324,7 @@ fail_unmap:
322 dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL); 324 dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
323fail_detach: 325fail_detach:
324 dma_buf_detach(dma_buf, attach); 326 dma_buf_detach(dma_buf, attach);
327 dma_buf_put(dma_buf);
328
325 return ERR_PTR(ret); 329 return ERR_PTR(ret);
326} 330}
diff --git a/drivers/gpu/host1x/Kconfig b/drivers/gpu/host1x/Kconfig
new file mode 100644
index 000000000000..ccfd42b23606
--- /dev/null
+++ b/drivers/gpu/host1x/Kconfig
@@ -0,0 +1,24 @@
1config TEGRA_HOST1X
2 tristate "NVIDIA Tegra host1x driver"
3 depends on ARCH_TEGRA || ARCH_MULTIPLATFORM
4 help
5 Driver for the NVIDIA Tegra host1x hardware.
6
7 The Tegra host1x module is the DMA engine for register access to
8 Tegra's graphics- and multimedia-related modules. The modules served
9 by host1x are referred to as clients. host1x includes some other
10 functionality, such as synchronization.
11
12if TEGRA_HOST1X
13
14config TEGRA_HOST1X_FIREWALL
15 bool "Enable HOST1X security firewall"
16 default y
17 help
18 Say yes if kernel should protect command streams from tampering.
19
20 If unsure, choose Y.
21
22source "drivers/gpu/host1x/drm/Kconfig"
23
24endif
diff --git a/drivers/gpu/host1x/Makefile b/drivers/gpu/host1x/Makefile
new file mode 100644
index 000000000000..3b037b6e0298
--- /dev/null
+++ b/drivers/gpu/host1x/Makefile
@@ -0,0 +1,20 @@
1ccflags-y = -Idrivers/gpu/host1x
2
3host1x-y = \
4 syncpt.o \
5 dev.o \
6 intr.o \
7 cdma.o \
8 channel.o \
9 job.o \
10 debug.o \
11 hw/host1x01.o
12
13ccflags-y += -Iinclude/drm
14ccflags-$(CONFIG_DRM_TEGRA_DEBUG) += -DDEBUG
15
16host1x-$(CONFIG_DRM_TEGRA) += drm/drm.o drm/fb.o drm/dc.o
17host1x-$(CONFIG_DRM_TEGRA) += drm/output.o drm/rgb.o drm/hdmi.o
18host1x-$(CONFIG_DRM_TEGRA) += drm/gem.o
19host1x-$(CONFIG_DRM_TEGRA) += drm/gr2d.o
20obj-$(CONFIG_TEGRA_HOST1X) += host1x.o
diff --git a/drivers/gpu/host1x/cdma.c b/drivers/gpu/host1x/cdma.c
new file mode 100644
index 000000000000..de72172d3b5f
--- /dev/null
+++ b/drivers/gpu/host1x/cdma.c
@@ -0,0 +1,491 @@
1/*
2 * Tegra host1x Command DMA
3 *
4 * Copyright (c) 2010-2013, NVIDIA Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19
20#include <asm/cacheflush.h>
21#include <linux/device.h>
22#include <linux/dma-mapping.h>
23#include <linux/interrupt.h>
24#include <linux/kernel.h>
25#include <linux/kfifo.h>
26#include <linux/slab.h>
27#include <trace/events/host1x.h>
28
29#include "cdma.h"
30#include "channel.h"
31#include "dev.h"
32#include "debug.h"
33#include "host1x_bo.h"
34#include "job.h"
35
36/*
37 * push_buffer
38 *
39 * The push buffer is a circular array of words to be fetched by command DMA.
40 * Note that it works slightly differently to the sync queue; fence == pos
41 * means that the push buffer is full, not empty.
42 */
43
44#define HOST1X_PUSHBUFFER_SLOTS 512
45
46/*
47 * Clean up push buffer resources
48 */
49static void host1x_pushbuffer_destroy(struct push_buffer *pb)
50{
51 struct host1x_cdma *cdma = pb_to_cdma(pb);
52 struct host1x *host1x = cdma_to_host1x(cdma);
53
54 if (pb->phys != 0)
55 dma_free_writecombine(host1x->dev, pb->size_bytes + 4,
56 pb->mapped, pb->phys);
57
58 pb->mapped = NULL;
59 pb->phys = 0;
60}
61
62/*
63 * Init push buffer resources
64 */
65static int host1x_pushbuffer_init(struct push_buffer *pb)
66{
67 struct host1x_cdma *cdma = pb_to_cdma(pb);
68 struct host1x *host1x = cdma_to_host1x(cdma);
69
70 pb->mapped = NULL;
71 pb->phys = 0;
72 pb->size_bytes = HOST1X_PUSHBUFFER_SLOTS * 8;
73
74 /* initialize buffer pointers */
75 pb->fence = pb->size_bytes - 8;
76 pb->pos = 0;
77
78 /* allocate and map pushbuffer memory */
79 pb->mapped = dma_alloc_writecombine(host1x->dev, pb->size_bytes + 4,
80 &pb->phys, GFP_KERNEL);
81 if (!pb->mapped)
82 goto fail;
83
84 host1x_hw_pushbuffer_init(host1x, pb);
85
86 return 0;
87
88fail:
89 host1x_pushbuffer_destroy(pb);
90 return -ENOMEM;
91}
92
93/*
94 * Push two words to the push buffer
95 * Caller must ensure push buffer is not full
96 */
97static void host1x_pushbuffer_push(struct push_buffer *pb, u32 op1, u32 op2)
98{
99 u32 pos = pb->pos;
100 u32 *p = (u32 *)((u32)pb->mapped + pos);
101 WARN_ON(pos == pb->fence);
102 *(p++) = op1;
103 *(p++) = op2;
104 pb->pos = (pos + 8) & (pb->size_bytes - 1);
105}
106
107/*
108 * Pop a number of two word slots from the push buffer
109 * Caller must ensure push buffer is not empty
110 */
111static void host1x_pushbuffer_pop(struct push_buffer *pb, unsigned int slots)
112{
113 /* Advance the next write position */
114 pb->fence = (pb->fence + slots * 8) & (pb->size_bytes - 1);
115}
116
117/*
118 * Return the number of two word slots free in the push buffer
119 */
120static u32 host1x_pushbuffer_space(struct push_buffer *pb)
121{
122 return ((pb->fence - pb->pos) & (pb->size_bytes - 1)) / 8;
123}
124
125/*
126 * Sleep (if necessary) until the requested event happens
127 * - CDMA_EVENT_SYNC_QUEUE_EMPTY : sync queue is completely empty.
128 * - Returns 1
129 * - CDMA_EVENT_PUSH_BUFFER_SPACE : there is space in the push buffer
130 * - Return the amount of space (> 0)
131 * Must be called with the cdma lock held.
132 */
133unsigned int host1x_cdma_wait_locked(struct host1x_cdma *cdma,
134 enum cdma_event event)
135{
136 for (;;) {
137 unsigned int space;
138
139 if (event == CDMA_EVENT_SYNC_QUEUE_EMPTY)
140 space = list_empty(&cdma->sync_queue) ? 1 : 0;
141 else if (event == CDMA_EVENT_PUSH_BUFFER_SPACE) {
142 struct push_buffer *pb = &cdma->push_buffer;
143 space = host1x_pushbuffer_space(pb);
144 } else {
145 WARN_ON(1);
146 return -EINVAL;
147 }
148
149 if (space)
150 return space;
151
152 trace_host1x_wait_cdma(dev_name(cdma_to_channel(cdma)->dev),
153 event);
154
155 /* If somebody has managed to already start waiting, yield */
156 if (cdma->event != CDMA_EVENT_NONE) {
157 mutex_unlock(&cdma->lock);
158 schedule();
159 mutex_lock(&cdma->lock);
160 continue;
161 }
162 cdma->event = event;
163
164 mutex_unlock(&cdma->lock);
165 down(&cdma->sem);
166 mutex_lock(&cdma->lock);
167 }
168 return 0;
169}
170
171/*
172 * Start timer that tracks the time spent by the job.
173 * Must be called with the cdma lock held.
174 */
175static void cdma_start_timer_locked(struct host1x_cdma *cdma,
176 struct host1x_job *job)
177{
178 struct host1x *host = cdma_to_host1x(cdma);
179
180 if (cdma->timeout.client) {
181 /* timer already started */
182 return;
183 }
184
185 cdma->timeout.client = job->client;
186 cdma->timeout.syncpt = host1x_syncpt_get(host, job->syncpt_id);
187 cdma->timeout.syncpt_val = job->syncpt_end;
188 cdma->timeout.start_ktime = ktime_get();
189
190 schedule_delayed_work(&cdma->timeout.wq,
191 msecs_to_jiffies(job->timeout));
192}
193
194/*
195 * Stop timer when a buffer submission completes.
196 * Must be called with the cdma lock held.
197 */
198static void stop_cdma_timer_locked(struct host1x_cdma *cdma)
199{
200 cancel_delayed_work(&cdma->timeout.wq);
201 cdma->timeout.client = 0;
202}
203
204/*
205 * For all sync queue entries that have already finished according to the
206 * current sync point registers:
207 * - unpin & unref their mems
208 * - pop their push buffer slots
209 * - remove them from the sync queue
210 * This is normally called from the host code's worker thread, but can be
211 * called manually if necessary.
212 * Must be called with the cdma lock held.
213 */
214static void update_cdma_locked(struct host1x_cdma *cdma)
215{
216 bool signal = false;
217 struct host1x *host1x = cdma_to_host1x(cdma);
218 struct host1x_job *job, *n;
219
220 /* If CDMA is stopped, queue is cleared and we can return */
221 if (!cdma->running)
222 return;
223
224 /*
225 * Walk the sync queue, reading the sync point registers as necessary,
226 * to consume as many sync queue entries as possible without blocking
227 */
228 list_for_each_entry_safe(job, n, &cdma->sync_queue, list) {
229 struct host1x_syncpt *sp =
230 host1x_syncpt_get(host1x, job->syncpt_id);
231
232 /* Check whether this syncpt has completed, and bail if not */
233 if (!host1x_syncpt_is_expired(sp, job->syncpt_end)) {
234 /* Start timer on next pending syncpt */
235 if (job->timeout)
236 cdma_start_timer_locked(cdma, job);
237 break;
238 }
239
240 /* Cancel timeout, when a buffer completes */
241 if (cdma->timeout.client)
242 stop_cdma_timer_locked(cdma);
243
244 /* Unpin the memory */
245 host1x_job_unpin(job);
246
247 /* Pop push buffer slots */
248 if (job->num_slots) {
249 struct push_buffer *pb = &cdma->push_buffer;
250 host1x_pushbuffer_pop(pb, job->num_slots);
251 if (cdma->event == CDMA_EVENT_PUSH_BUFFER_SPACE)
252 signal = true;
253 }
254
255 list_del(&job->list);
256 host1x_job_put(job);
257 }
258
259 if (cdma->event == CDMA_EVENT_SYNC_QUEUE_EMPTY &&
260 list_empty(&cdma->sync_queue))
261 signal = true;
262
263 if (signal) {
264 cdma->event = CDMA_EVENT_NONE;
265 up(&cdma->sem);
266 }
267}
268
269void host1x_cdma_update_sync_queue(struct host1x_cdma *cdma,
270 struct device *dev)
271{
272 u32 restart_addr;
273 u32 syncpt_incrs;
274 struct host1x_job *job = NULL;
275 u32 syncpt_val;
276 struct host1x *host1x = cdma_to_host1x(cdma);
277
278 syncpt_val = host1x_syncpt_load(cdma->timeout.syncpt);
279
280 dev_dbg(dev, "%s: starting cleanup (thresh %d)\n",
281 __func__, syncpt_val);
282
283 /*
284 * Move the sync_queue read pointer to the first entry that hasn't
285 * completed based on the current HW syncpt value. It's likely there
286 * won't be any (i.e. we're still at the head), but covers the case
287 * where a syncpt incr happens just prior/during the teardown.
288 */
289
290 dev_dbg(dev, "%s: skip completed buffers still in sync_queue\n",
291 __func__);
292
293 list_for_each_entry(job, &cdma->sync_queue, list) {
294 if (syncpt_val < job->syncpt_end)
295 break;
296
297 host1x_job_dump(dev, job);
298 }
299
300 /*
301 * Walk the sync_queue, first incrementing with the CPU syncpts that
302 * are partially executed (the first buffer) or fully skipped while
303 * still in the current context (slots are also NOP-ed).
304 *
305 * At the point contexts are interleaved, syncpt increments must be
306 * done inline with the pushbuffer from a GATHER buffer to maintain
307 * the order (slots are modified to be a GATHER of syncpt incrs).
308 *
309 * Note: save in restart_addr the location where the timed out buffer
310 * started in the PB, so we can start the refetch from there (with the
311 * modified NOP-ed PB slots). This lets things appear to have completed
312 * properly for this buffer and resources are freed.
313 */
314
315 dev_dbg(dev, "%s: perform CPU incr on pending same ctx buffers\n",
316 __func__);
317
318 if (!list_empty(&cdma->sync_queue))
319 restart_addr = job->first_get;
320 else
321 restart_addr = cdma->last_pos;
322
323 /* do CPU increments as long as this context continues */
324 list_for_each_entry_from(job, &cdma->sync_queue, list) {
325 /* different context, gets us out of this loop */
326 if (job->client != cdma->timeout.client)
327 break;
328
329 /* won't need a timeout when replayed */
330 job->timeout = 0;
331
332 syncpt_incrs = job->syncpt_end - syncpt_val;
333 dev_dbg(dev, "%s: CPU incr (%d)\n", __func__, syncpt_incrs);
334
335 host1x_job_dump(dev, job);
336
337 /* safe to use CPU to incr syncpts */
338 host1x_hw_cdma_timeout_cpu_incr(host1x, cdma, job->first_get,
339 syncpt_incrs, job->syncpt_end,
340 job->num_slots);
341
342 syncpt_val += syncpt_incrs;
343 }
344
345 /* The following sumbits from the same client may be dependent on the
346 * failed submit and therefore they may fail. Force a small timeout
347 * to make the queue cleanup faster */
348
349 list_for_each_entry_from(job, &cdma->sync_queue, list)
350 if (job->client == cdma->timeout.client)
351 job->timeout = min_t(unsigned int, job->timeout, 500);
352
353 dev_dbg(dev, "%s: finished sync_queue modification\n", __func__);
354
355 /* roll back DMAGET and start up channel again */
356 host1x_hw_cdma_resume(host1x, cdma, restart_addr);
357}
358
359/*
360 * Create a cdma
361 */
362int host1x_cdma_init(struct host1x_cdma *cdma)
363{
364 int err;
365
366 mutex_init(&cdma->lock);
367 sema_init(&cdma->sem, 0);
368
369 INIT_LIST_HEAD(&cdma->sync_queue);
370
371 cdma->event = CDMA_EVENT_NONE;
372 cdma->running = false;
373 cdma->torndown = false;
374
375 err = host1x_pushbuffer_init(&cdma->push_buffer);
376 if (err)
377 return err;
378 return 0;
379}
380
381/*
382 * Destroy a cdma
383 */
384int host1x_cdma_deinit(struct host1x_cdma *cdma)
385{
386 struct push_buffer *pb = &cdma->push_buffer;
387 struct host1x *host1x = cdma_to_host1x(cdma);
388
389 if (cdma->running) {
390 pr_warn("%s: CDMA still running\n", __func__);
391 return -EBUSY;
392 }
393
394 host1x_pushbuffer_destroy(pb);
395 host1x_hw_cdma_timeout_destroy(host1x, cdma);
396
397 return 0;
398}
399
400/*
401 * Begin a cdma submit
402 */
403int host1x_cdma_begin(struct host1x_cdma *cdma, struct host1x_job *job)
404{
405 struct host1x *host1x = cdma_to_host1x(cdma);
406
407 mutex_lock(&cdma->lock);
408
409 if (job->timeout) {
410 /* init state on first submit with timeout value */
411 if (!cdma->timeout.initialized) {
412 int err;
413 err = host1x_hw_cdma_timeout_init(host1x, cdma,
414 job->syncpt_id);
415 if (err) {
416 mutex_unlock(&cdma->lock);
417 return err;
418 }
419 }
420 }
421 if (!cdma->running)
422 host1x_hw_cdma_start(host1x, cdma);
423
424 cdma->slots_free = 0;
425 cdma->slots_used = 0;
426 cdma->first_get = cdma->push_buffer.pos;
427
428 trace_host1x_cdma_begin(dev_name(job->channel->dev));
429 return 0;
430}
431
432/*
433 * Push two words into a push buffer slot
434 * Blocks as necessary if the push buffer is full.
435 */
436void host1x_cdma_push(struct host1x_cdma *cdma, u32 op1, u32 op2)
437{
438 struct host1x *host1x = cdma_to_host1x(cdma);
439 struct push_buffer *pb = &cdma->push_buffer;
440 u32 slots_free = cdma->slots_free;
441
442 if (host1x_debug_trace_cmdbuf)
443 trace_host1x_cdma_push(dev_name(cdma_to_channel(cdma)->dev),
444 op1, op2);
445
446 if (slots_free == 0) {
447 host1x_hw_cdma_flush(host1x, cdma);
448 slots_free = host1x_cdma_wait_locked(cdma,
449 CDMA_EVENT_PUSH_BUFFER_SPACE);
450 }
451 cdma->slots_free = slots_free - 1;
452 cdma->slots_used++;
453 host1x_pushbuffer_push(pb, op1, op2);
454}
455
456/*
457 * End a cdma submit
458 * Kick off DMA, add job to the sync queue, and a number of slots to be freed
459 * from the pushbuffer. The handles for a submit must all be pinned at the same
460 * time, but they can be unpinned in smaller chunks.
461 */
462void host1x_cdma_end(struct host1x_cdma *cdma,
463 struct host1x_job *job)
464{
465 struct host1x *host1x = cdma_to_host1x(cdma);
466 bool idle = list_empty(&cdma->sync_queue);
467
468 host1x_hw_cdma_flush(host1x, cdma);
469
470 job->first_get = cdma->first_get;
471 job->num_slots = cdma->slots_used;
472 host1x_job_get(job);
473 list_add_tail(&job->list, &cdma->sync_queue);
474
475 /* start timer on idle -> active transitions */
476 if (job->timeout && idle)
477 cdma_start_timer_locked(cdma, job);
478
479 trace_host1x_cdma_end(dev_name(job->channel->dev));
480 mutex_unlock(&cdma->lock);
481}
482
483/*
484 * Update cdma state according to current sync point values
485 */
486void host1x_cdma_update(struct host1x_cdma *cdma)
487{
488 mutex_lock(&cdma->lock);
489 update_cdma_locked(cdma);
490 mutex_unlock(&cdma->lock);
491}
diff --git a/drivers/gpu/host1x/cdma.h b/drivers/gpu/host1x/cdma.h
new file mode 100644
index 000000000000..313c4b784348
--- /dev/null
+++ b/drivers/gpu/host1x/cdma.h
@@ -0,0 +1,100 @@
1/*
2 * Tegra host1x Command DMA
3 *
4 * Copyright (c) 2010-2013, NVIDIA Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#ifndef __HOST1X_CDMA_H
20#define __HOST1X_CDMA_H
21
22#include <linux/sched.h>
23#include <linux/semaphore.h>
24#include <linux/list.h>
25
26struct host1x_syncpt;
27struct host1x_userctx_timeout;
28struct host1x_job;
29
30/*
31 * cdma
32 *
33 * This is in charge of a host command DMA channel.
34 * Sends ops to a push buffer, and takes responsibility for unpinning
35 * (& possibly freeing) of memory after those ops have completed.
36 * Producer:
37 * begin
38 * push - send ops to the push buffer
39 * end - start command DMA and enqueue handles to be unpinned
40 * Consumer:
41 * update - call to update sync queue and push buffer, unpin memory
42 */
43
44struct push_buffer {
45 u32 *mapped; /* mapped pushbuffer memory */
46 dma_addr_t phys; /* physical address of pushbuffer */
47 u32 fence; /* index we've written */
48 u32 pos; /* index to write to */
49 u32 size_bytes;
50};
51
52struct buffer_timeout {
53 struct delayed_work wq; /* work queue */
54 bool initialized; /* timer one-time setup flag */
55 struct host1x_syncpt *syncpt; /* buffer completion syncpt */
56 u32 syncpt_val; /* syncpt value when completed */
57 ktime_t start_ktime; /* starting time */
58 /* context timeout information */
59 int client;
60};
61
62enum cdma_event {
63 CDMA_EVENT_NONE, /* not waiting for any event */
64 CDMA_EVENT_SYNC_QUEUE_EMPTY, /* wait for empty sync queue */
65 CDMA_EVENT_PUSH_BUFFER_SPACE /* wait for space in push buffer */
66};
67
68struct host1x_cdma {
69 struct mutex lock; /* controls access to shared state */
70 struct semaphore sem; /* signalled when event occurs */
71 enum cdma_event event; /* event that sem is waiting for */
72 unsigned int slots_used; /* pb slots used in current submit */
73 unsigned int slots_free; /* pb slots free in current submit */
74 unsigned int first_get; /* DMAGET value, where submit begins */
75 unsigned int last_pos; /* last value written to DMAPUT */
76 struct push_buffer push_buffer; /* channel's push buffer */
77 struct list_head sync_queue; /* job queue */
78 struct buffer_timeout timeout; /* channel's timeout state/wq */
79 bool running;
80 bool torndown;
81};
82
83#define cdma_to_channel(cdma) container_of(cdma, struct host1x_channel, cdma)
84#define cdma_to_host1x(cdma) dev_get_drvdata(cdma_to_channel(cdma)->dev->parent)
85#define pb_to_cdma(pb) container_of(pb, struct host1x_cdma, push_buffer)
86
87int host1x_cdma_init(struct host1x_cdma *cdma);
88int host1x_cdma_deinit(struct host1x_cdma *cdma);
89void host1x_cdma_stop(struct host1x_cdma *cdma);
90int host1x_cdma_begin(struct host1x_cdma *cdma, struct host1x_job *job);
91void host1x_cdma_push(struct host1x_cdma *cdma, u32 op1, u32 op2);
92void host1x_cdma_end(struct host1x_cdma *cdma, struct host1x_job *job);
93void host1x_cdma_update(struct host1x_cdma *cdma);
94void host1x_cdma_peek(struct host1x_cdma *cdma, u32 dmaget, int slot,
95 u32 *out);
96unsigned int host1x_cdma_wait_locked(struct host1x_cdma *cdma,
97 enum cdma_event event);
98void host1x_cdma_update_sync_queue(struct host1x_cdma *cdma,
99 struct device *dev);
100#endif
diff --git a/drivers/gpu/host1x/channel.c b/drivers/gpu/host1x/channel.c
new file mode 100644
index 000000000000..83ea51b9f0fc
--- /dev/null
+++ b/drivers/gpu/host1x/channel.c
@@ -0,0 +1,126 @@
1/*
2 * Tegra host1x Channel
3 *
4 * Copyright (c) 2010-2013, NVIDIA Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#include <linux/slab.h>
20#include <linux/module.h>
21
22#include "channel.h"
23#include "dev.h"
24#include "job.h"
25
26/* Constructor for the host1x device list */
27int host1x_channel_list_init(struct host1x *host)
28{
29 INIT_LIST_HEAD(&host->chlist.list);
30 mutex_init(&host->chlist_mutex);
31
32 if (host->info->nb_channels > BITS_PER_LONG) {
33 WARN(1, "host1x hardware has more channels than supported by the driver\n");
34 return -ENOSYS;
35 }
36
37 return 0;
38}
39
40int host1x_job_submit(struct host1x_job *job)
41{
42 struct host1x *host = dev_get_drvdata(job->channel->dev->parent);
43
44 return host1x_hw_channel_submit(host, job);
45}
46
47struct host1x_channel *host1x_channel_get(struct host1x_channel *channel)
48{
49 int err = 0;
50
51 mutex_lock(&channel->reflock);
52
53 if (channel->refcount == 0)
54 err = host1x_cdma_init(&channel->cdma);
55
56 if (!err)
57 channel->refcount++;
58
59 mutex_unlock(&channel->reflock);
60
61 return err ? NULL : channel;
62}
63
64void host1x_channel_put(struct host1x_channel *channel)
65{
66 mutex_lock(&channel->reflock);
67
68 if (channel->refcount == 1) {
69 struct host1x *host = dev_get_drvdata(channel->dev->parent);
70
71 host1x_hw_cdma_stop(host, &channel->cdma);
72 host1x_cdma_deinit(&channel->cdma);
73 }
74
75 channel->refcount--;
76
77 mutex_unlock(&channel->reflock);
78}
79
80struct host1x_channel *host1x_channel_request(struct device *dev)
81{
82 struct host1x *host = dev_get_drvdata(dev->parent);
83 int max_channels = host->info->nb_channels;
84 struct host1x_channel *channel = NULL;
85 int index, err;
86
87 mutex_lock(&host->chlist_mutex);
88
89 index = find_first_zero_bit(&host->allocated_channels, max_channels);
90 if (index >= max_channels)
91 goto fail;
92
93 channel = kzalloc(sizeof(*channel), GFP_KERNEL);
94 if (!channel)
95 goto fail;
96
97 err = host1x_hw_channel_init(host, channel, index);
98 if (err < 0)
99 goto fail;
100
101 /* Link device to host1x_channel */
102 channel->dev = dev;
103
104 /* Add to channel list */
105 list_add_tail(&channel->list, &host->chlist.list);
106
107 host->allocated_channels |= BIT(index);
108
109 mutex_unlock(&host->chlist_mutex);
110 return channel;
111
112fail:
113 dev_err(dev, "failed to init channel\n");
114 kfree(channel);
115 mutex_unlock(&host->chlist_mutex);
116 return NULL;
117}
118
119void host1x_channel_free(struct host1x_channel *channel)
120{
121 struct host1x *host = dev_get_drvdata(channel->dev->parent);
122
123 host->allocated_channels &= ~BIT(channel->id);
124 list_del(&channel->list);
125 kfree(channel);
126}
diff --git a/drivers/gpu/host1x/channel.h b/drivers/gpu/host1x/channel.h
new file mode 100644
index 000000000000..48723b8eea42
--- /dev/null
+++ b/drivers/gpu/host1x/channel.h
@@ -0,0 +1,52 @@
1/*
2 * Tegra host1x Channel
3 *
4 * Copyright (c) 2010-2013, NVIDIA Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#ifndef __HOST1X_CHANNEL_H
20#define __HOST1X_CHANNEL_H
21
22#include <linux/io.h>
23
24#include "cdma.h"
25
26struct host1x;
27
28struct host1x_channel {
29 struct list_head list;
30
31 unsigned int refcount;
32 unsigned int id;
33 struct mutex reflock;
34 struct mutex submitlock;
35 void __iomem *regs;
36 struct device *dev;
37 struct host1x_cdma cdma;
38};
39
40/* channel list operations */
41int host1x_channel_list_init(struct host1x *host);
42
43struct host1x_channel *host1x_channel_request(struct device *dev);
44void host1x_channel_free(struct host1x_channel *channel);
45struct host1x_channel *host1x_channel_get(struct host1x_channel *channel);
46void host1x_channel_put(struct host1x_channel *channel);
47int host1x_job_submit(struct host1x_job *job);
48
49#define host1x_for_each_channel(host, channel) \
50 list_for_each_entry(channel, &host->chlist.list, list)
51
52#endif
diff --git a/drivers/gpu/host1x/debug.c b/drivers/gpu/host1x/debug.c
new file mode 100644
index 000000000000..3ec7d77de24d
--- /dev/null
+++ b/drivers/gpu/host1x/debug.c
@@ -0,0 +1,210 @@
1/*
2 * Copyright (C) 2010 Google, Inc.
3 * Author: Erik Gilling <konkers@android.com>
4 *
5 * Copyright (C) 2011-2013 NVIDIA Corporation
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/debugfs.h>
19#include <linux/seq_file.h>
20#include <linux/uaccess.h>
21
22#include <linux/io.h>
23
24#include "dev.h"
25#include "debug.h"
26#include "channel.h"
27
28unsigned int host1x_debug_trace_cmdbuf;
29
30static pid_t host1x_debug_force_timeout_pid;
31static u32 host1x_debug_force_timeout_val;
32static u32 host1x_debug_force_timeout_channel;
33
34void host1x_debug_output(struct output *o, const char *fmt, ...)
35{
36 va_list args;
37 int len;
38
39 va_start(args, fmt);
40 len = vsnprintf(o->buf, sizeof(o->buf), fmt, args);
41 va_end(args);
42 o->fn(o->ctx, o->buf, len);
43}
44
45static int show_channels(struct host1x_channel *ch, void *data, bool show_fifo)
46{
47 struct host1x *m = dev_get_drvdata(ch->dev->parent);
48 struct output *o = data;
49
50 mutex_lock(&ch->reflock);
51 if (ch->refcount) {
52 mutex_lock(&ch->cdma.lock);
53 if (show_fifo)
54 host1x_hw_show_channel_fifo(m, ch, o);
55 host1x_hw_show_channel_cdma(m, ch, o);
56 mutex_unlock(&ch->cdma.lock);
57 }
58 mutex_unlock(&ch->reflock);
59
60 return 0;
61}
62
63static void show_syncpts(struct host1x *m, struct output *o)
64{
65 int i;
66 host1x_debug_output(o, "---- syncpts ----\n");
67 for (i = 0; i < host1x_syncpt_nb_pts(m); i++) {
68 u32 max = host1x_syncpt_read_max(m->syncpt + i);
69 u32 min = host1x_syncpt_load(m->syncpt + i);
70 if (!min && !max)
71 continue;
72 host1x_debug_output(o, "id %d (%s) min %d max %d\n",
73 i, m->syncpt[i].name, min, max);
74 }
75
76 for (i = 0; i < host1x_syncpt_nb_bases(m); i++) {
77 u32 base_val;
78 base_val = host1x_syncpt_load_wait_base(m->syncpt + i);
79 if (base_val)
80 host1x_debug_output(o, "waitbase id %d val %d\n", i,
81 base_val);
82 }
83
84 host1x_debug_output(o, "\n");
85}
86
87static void show_all(struct host1x *m, struct output *o)
88{
89 struct host1x_channel *ch;
90
91 host1x_hw_show_mlocks(m, o);
92 show_syncpts(m, o);
93 host1x_debug_output(o, "---- channels ----\n");
94
95 host1x_for_each_channel(m, ch)
96 show_channels(ch, o, true);
97}
98
99#ifdef CONFIG_DEBUG_FS
100static void show_all_no_fifo(struct host1x *host1x, struct output *o)
101{
102 struct host1x_channel *ch;
103
104 host1x_hw_show_mlocks(host1x, o);
105 show_syncpts(host1x, o);
106 host1x_debug_output(o, "---- channels ----\n");
107
108 host1x_for_each_channel(host1x, ch)
109 show_channels(ch, o, false);
110}
111
112static int host1x_debug_show_all(struct seq_file *s, void *unused)
113{
114 struct output o = {
115 .fn = write_to_seqfile,
116 .ctx = s
117 };
118 show_all(s->private, &o);
119 return 0;
120}
121
122static int host1x_debug_show(struct seq_file *s, void *unused)
123{
124 struct output o = {
125 .fn = write_to_seqfile,
126 .ctx = s
127 };
128 show_all_no_fifo(s->private, &o);
129 return 0;
130}
131
132static int host1x_debug_open_all(struct inode *inode, struct file *file)
133{
134 return single_open(file, host1x_debug_show_all, inode->i_private);
135}
136
137static const struct file_operations host1x_debug_all_fops = {
138 .open = host1x_debug_open_all,
139 .read = seq_read,
140 .llseek = seq_lseek,
141 .release = single_release,
142};
143
144static int host1x_debug_open(struct inode *inode, struct file *file)
145{
146 return single_open(file, host1x_debug_show, inode->i_private);
147}
148
149static const struct file_operations host1x_debug_fops = {
150 .open = host1x_debug_open,
151 .read = seq_read,
152 .llseek = seq_lseek,
153 .release = single_release,
154};
155
156void host1x_debug_init(struct host1x *host1x)
157{
158 struct dentry *de = debugfs_create_dir("tegra-host1x", NULL);
159
160 if (!de)
161 return;
162
163 /* Store the created entry */
164 host1x->debugfs = de;
165
166 debugfs_create_file("status", S_IRUGO, de, host1x, &host1x_debug_fops);
167 debugfs_create_file("status_all", S_IRUGO, de, host1x,
168 &host1x_debug_all_fops);
169
170 debugfs_create_u32("trace_cmdbuf", S_IRUGO|S_IWUSR, de,
171 &host1x_debug_trace_cmdbuf);
172
173 host1x_hw_debug_init(host1x, de);
174
175 debugfs_create_u32("force_timeout_pid", S_IRUGO|S_IWUSR, de,
176 &host1x_debug_force_timeout_pid);
177 debugfs_create_u32("force_timeout_val", S_IRUGO|S_IWUSR, de,
178 &host1x_debug_force_timeout_val);
179 debugfs_create_u32("force_timeout_channel", S_IRUGO|S_IWUSR, de,
180 &host1x_debug_force_timeout_channel);
181}
182
183void host1x_debug_deinit(struct host1x *host1x)
184{
185 debugfs_remove_recursive(host1x->debugfs);
186}
187#else
188void host1x_debug_init(struct host1x *host1x)
189{
190}
191void host1x_debug_deinit(struct host1x *host1x)
192{
193}
194#endif
195
196void host1x_debug_dump(struct host1x *host1x)
197{
198 struct output o = {
199 .fn = write_to_printk
200 };
201 show_all(host1x, &o);
202}
203
204void host1x_debug_dump_syncpts(struct host1x *host1x)
205{
206 struct output o = {
207 .fn = write_to_printk
208 };
209 show_syncpts(host1x, &o);
210}
diff --git a/drivers/gpu/host1x/debug.h b/drivers/gpu/host1x/debug.h
new file mode 100644
index 000000000000..4595b2e0799f
--- /dev/null
+++ b/drivers/gpu/host1x/debug.h
@@ -0,0 +1,51 @@
1/*
2 * Tegra host1x Debug
3 *
4 * Copyright (c) 2011-2013 NVIDIA Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18#ifndef __HOST1X_DEBUG_H
19#define __HOST1X_DEBUG_H
20
21#include <linux/debugfs.h>
22#include <linux/seq_file.h>
23
24struct host1x;
25
26struct output {
27 void (*fn)(void *ctx, const char *str, size_t len);
28 void *ctx;
29 char buf[256];
30};
31
32static inline void write_to_seqfile(void *ctx, const char *str, size_t len)
33{
34 seq_write((struct seq_file *)ctx, str, len);
35}
36
37static inline void write_to_printk(void *ctx, const char *str, size_t len)
38{
39 pr_info("%s", str);
40}
41
42void __printf(2, 3) host1x_debug_output(struct output *o, const char *fmt, ...);
43
44extern unsigned int host1x_debug_trace_cmdbuf;
45
46void host1x_debug_init(struct host1x *host1x);
47void host1x_debug_deinit(struct host1x *host1x);
48void host1x_debug_dump(struct host1x *host1x);
49void host1x_debug_dump_syncpts(struct host1x *host1x);
50
51#endif
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
new file mode 100644
index 000000000000..28e28a23d444
--- /dev/null
+++ b/drivers/gpu/host1x/dev.c
@@ -0,0 +1,246 @@
1/*
2 * Tegra host1x driver
3 *
4 * Copyright (c) 2010-2013, NVIDIA Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#include <linux/module.h>
20#include <linux/list.h>
21#include <linux/slab.h>
22#include <linux/of.h>
23#include <linux/of_device.h>
24#include <linux/clk.h>
25#include <linux/io.h>
26
27#define CREATE_TRACE_POINTS
28#include <trace/events/host1x.h>
29
30#include "dev.h"
31#include "intr.h"
32#include "channel.h"
33#include "debug.h"
34#include "hw/host1x01.h"
35#include "host1x_client.h"
36
37void host1x_set_drm_data(struct device *dev, void *data)
38{
39 struct host1x *host1x = dev_get_drvdata(dev);
40 host1x->drm_data = data;
41}
42
43void *host1x_get_drm_data(struct device *dev)
44{
45 struct host1x *host1x = dev_get_drvdata(dev);
46 return host1x->drm_data;
47}
48
49void host1x_sync_writel(struct host1x *host1x, u32 v, u32 r)
50{
51 void __iomem *sync_regs = host1x->regs + host1x->info->sync_offset;
52
53 writel(v, sync_regs + r);
54}
55
56u32 host1x_sync_readl(struct host1x *host1x, u32 r)
57{
58 void __iomem *sync_regs = host1x->regs + host1x->info->sync_offset;
59
60 return readl(sync_regs + r);
61}
62
63void host1x_ch_writel(struct host1x_channel *ch, u32 v, u32 r)
64{
65 writel(v, ch->regs + r);
66}
67
68u32 host1x_ch_readl(struct host1x_channel *ch, u32 r)
69{
70 return readl(ch->regs + r);
71}
72
73static const struct host1x_info host1x01_info = {
74 .nb_channels = 8,
75 .nb_pts = 32,
76 .nb_mlocks = 16,
77 .nb_bases = 8,
78 .init = host1x01_init,
79 .sync_offset = 0x3000,
80};
81
82static struct of_device_id host1x_of_match[] = {
83 { .compatible = "nvidia,tegra30-host1x", .data = &host1x01_info, },
84 { .compatible = "nvidia,tegra20-host1x", .data = &host1x01_info, },
85 { },
86};
87MODULE_DEVICE_TABLE(of, host1x_of_match);
88
89static int host1x_probe(struct platform_device *pdev)
90{
91 const struct of_device_id *id;
92 struct host1x *host;
93 struct resource *regs;
94 int syncpt_irq;
95 int err;
96
97 id = of_match_device(host1x_of_match, &pdev->dev);
98 if (!id)
99 return -EINVAL;
100
101 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
102 if (!regs) {
103 dev_err(&pdev->dev, "failed to get registers\n");
104 return -ENXIO;
105 }
106
107 syncpt_irq = platform_get_irq(pdev, 0);
108 if (syncpt_irq < 0) {
109 dev_err(&pdev->dev, "failed to get IRQ\n");
110 return -ENXIO;
111 }
112
113 host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
114 if (!host)
115 return -ENOMEM;
116
117 host->dev = &pdev->dev;
118 host->info = id->data;
119
120 /* set common host1x device data */
121 platform_set_drvdata(pdev, host);
122
123 host->regs = devm_ioremap_resource(&pdev->dev, regs);
124 if (IS_ERR(host->regs))
125 return PTR_ERR(host->regs);
126
127 if (host->info->init) {
128 err = host->info->init(host);
129 if (err)
130 return err;
131 }
132
133 host->clk = devm_clk_get(&pdev->dev, NULL);
134 if (IS_ERR(host->clk)) {
135 dev_err(&pdev->dev, "failed to get clock\n");
136 err = PTR_ERR(host->clk);
137 return err;
138 }
139
140 err = host1x_channel_list_init(host);
141 if (err) {
142 dev_err(&pdev->dev, "failed to initialize channel list\n");
143 return err;
144 }
145
146 err = clk_prepare_enable(host->clk);
147 if (err < 0) {
148 dev_err(&pdev->dev, "failed to enable clock\n");
149 return err;
150 }
151
152 err = host1x_syncpt_init(host);
153 if (err) {
154 dev_err(&pdev->dev, "failed to initialize syncpts\n");
155 return err;
156 }
157
158 err = host1x_intr_init(host, syncpt_irq);
159 if (err) {
160 dev_err(&pdev->dev, "failed to initialize interrupts\n");
161 goto fail_deinit_syncpt;
162 }
163
164 host1x_debug_init(host);
165
166 host1x_drm_alloc(pdev);
167
168 return 0;
169
170fail_deinit_syncpt:
171 host1x_syncpt_deinit(host);
172 return err;
173}
174
175static int __exit host1x_remove(struct platform_device *pdev)
176{
177 struct host1x *host = platform_get_drvdata(pdev);
178
179 host1x_intr_deinit(host);
180 host1x_syncpt_deinit(host);
181 clk_disable_unprepare(host->clk);
182
183 return 0;
184}
185
186static struct platform_driver tegra_host1x_driver = {
187 .probe = host1x_probe,
188 .remove = __exit_p(host1x_remove),
189 .driver = {
190 .owner = THIS_MODULE,
191 .name = "tegra-host1x",
192 .of_match_table = host1x_of_match,
193 },
194};
195
196static int __init tegra_host1x_init(void)
197{
198 int err;
199
200 err = platform_driver_register(&tegra_host1x_driver);
201 if (err < 0)
202 return err;
203
204#ifdef CONFIG_DRM_TEGRA
205 err = platform_driver_register(&tegra_dc_driver);
206 if (err < 0)
207 goto unregister_host1x;
208
209 err = platform_driver_register(&tegra_hdmi_driver);
210 if (err < 0)
211 goto unregister_dc;
212
213 err = platform_driver_register(&tegra_gr2d_driver);
214 if (err < 0)
215 goto unregister_hdmi;
216#endif
217
218 return 0;
219
220#ifdef CONFIG_DRM_TEGRA
221unregister_hdmi:
222 platform_driver_unregister(&tegra_hdmi_driver);
223unregister_dc:
224 platform_driver_unregister(&tegra_dc_driver);
225unregister_host1x:
226 platform_driver_unregister(&tegra_host1x_driver);
227 return err;
228#endif
229}
230module_init(tegra_host1x_init);
231
232static void __exit tegra_host1x_exit(void)
233{
234#ifdef CONFIG_DRM_TEGRA
235 platform_driver_unregister(&tegra_gr2d_driver);
236 platform_driver_unregister(&tegra_hdmi_driver);
237 platform_driver_unregister(&tegra_dc_driver);
238#endif
239 platform_driver_unregister(&tegra_host1x_driver);
240}
241module_exit(tegra_host1x_exit);
242
243MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
244MODULE_AUTHOR("Terje Bergstrom <tbergstrom@nvidia.com>");
245MODULE_DESCRIPTION("Host1x driver for Tegra products");
246MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/host1x/dev.h b/drivers/gpu/host1x/dev.h
new file mode 100644
index 000000000000..a1607d6e135b
--- /dev/null
+++ b/drivers/gpu/host1x/dev.h
@@ -0,0 +1,308 @@
1/*
2 * Copyright (c) 2012-2013, NVIDIA Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#ifndef HOST1X_DEV_H
18#define HOST1X_DEV_H
19
20#include <linux/platform_device.h>
21#include <linux/device.h>
22
23#include "channel.h"
24#include "syncpt.h"
25#include "intr.h"
26#include "cdma.h"
27#include "job.h"
28
29struct host1x_syncpt;
30struct host1x_channel;
31struct host1x_cdma;
32struct host1x_job;
33struct push_buffer;
34struct output;
35struct dentry;
36
37struct host1x_channel_ops {
38 int (*init)(struct host1x_channel *channel, struct host1x *host,
39 unsigned int id);
40 int (*submit)(struct host1x_job *job);
41};
42
43struct host1x_cdma_ops {
44 void (*start)(struct host1x_cdma *cdma);
45 void (*stop)(struct host1x_cdma *cdma);
46 void (*flush)(struct host1x_cdma *cdma);
47 int (*timeout_init)(struct host1x_cdma *cdma, u32 syncpt_id);
48 void (*timeout_destroy)(struct host1x_cdma *cdma);
49 void (*freeze)(struct host1x_cdma *cdma);
50 void (*resume)(struct host1x_cdma *cdma, u32 getptr);
51 void (*timeout_cpu_incr)(struct host1x_cdma *cdma, u32 getptr,
52 u32 syncpt_incrs, u32 syncval, u32 nr_slots);
53};
54
55struct host1x_pushbuffer_ops {
56 void (*init)(struct push_buffer *pb);
57};
58
59struct host1x_debug_ops {
60 void (*debug_init)(struct dentry *de);
61 void (*show_channel_cdma)(struct host1x *host,
62 struct host1x_channel *ch,
63 struct output *o);
64 void (*show_channel_fifo)(struct host1x *host,
65 struct host1x_channel *ch,
66 struct output *o);
67 void (*show_mlocks)(struct host1x *host, struct output *output);
68
69};
70
71struct host1x_syncpt_ops {
72 void (*restore)(struct host1x_syncpt *syncpt);
73 void (*restore_wait_base)(struct host1x_syncpt *syncpt);
74 void (*load_wait_base)(struct host1x_syncpt *syncpt);
75 u32 (*load)(struct host1x_syncpt *syncpt);
76 void (*cpu_incr)(struct host1x_syncpt *syncpt);
77 int (*patch_wait)(struct host1x_syncpt *syncpt, void *patch_addr);
78};
79
80struct host1x_intr_ops {
81 int (*init_host_sync)(struct host1x *host, u32 cpm,
82 void (*syncpt_thresh_work)(struct work_struct *work));
83 void (*set_syncpt_threshold)(
84 struct host1x *host, u32 id, u32 thresh);
85 void (*enable_syncpt_intr)(struct host1x *host, u32 id);
86 void (*disable_syncpt_intr)(struct host1x *host, u32 id);
87 void (*disable_all_syncpt_intrs)(struct host1x *host);
88 int (*free_syncpt_irq)(struct host1x *host);
89};
90
91struct host1x_info {
92 int nb_channels; /* host1x: num channels supported */
93 int nb_pts; /* host1x: num syncpoints supported */
94 int nb_bases; /* host1x: num syncpoints supported */
95 int nb_mlocks; /* host1x: number of mlocks */
96 int (*init)(struct host1x *); /* initialize per SoC ops */
97 int sync_offset;
98};
99
100struct host1x {
101 const struct host1x_info *info;
102
103 void __iomem *regs;
104 struct host1x_syncpt *syncpt;
105 struct device *dev;
106 struct clk *clk;
107
108 struct mutex intr_mutex;
109 struct workqueue_struct *intr_wq;
110 int intr_syncpt_irq;
111
112 const struct host1x_syncpt_ops *syncpt_op;
113 const struct host1x_intr_ops *intr_op;
114 const struct host1x_channel_ops *channel_op;
115 const struct host1x_cdma_ops *cdma_op;
116 const struct host1x_pushbuffer_ops *cdma_pb_op;
117 const struct host1x_debug_ops *debug_op;
118
119 struct host1x_syncpt *nop_sp;
120
121 struct mutex chlist_mutex;
122 struct host1x_channel chlist;
123 unsigned long allocated_channels;
124 unsigned int num_allocated_channels;
125
126 struct dentry *debugfs;
127
128 void *drm_data;
129};
130
131void host1x_sync_writel(struct host1x *host1x, u32 r, u32 v);
132u32 host1x_sync_readl(struct host1x *host1x, u32 r);
133void host1x_ch_writel(struct host1x_channel *ch, u32 r, u32 v);
134u32 host1x_ch_readl(struct host1x_channel *ch, u32 r);
135
136static inline void host1x_hw_syncpt_restore(struct host1x *host,
137 struct host1x_syncpt *sp)
138{
139 host->syncpt_op->restore(sp);
140}
141
142static inline void host1x_hw_syncpt_restore_wait_base(struct host1x *host,
143 struct host1x_syncpt *sp)
144{
145 host->syncpt_op->restore_wait_base(sp);
146}
147
148static inline void host1x_hw_syncpt_load_wait_base(struct host1x *host,
149 struct host1x_syncpt *sp)
150{
151 host->syncpt_op->load_wait_base(sp);
152}
153
154static inline u32 host1x_hw_syncpt_load(struct host1x *host,
155 struct host1x_syncpt *sp)
156{
157 return host->syncpt_op->load(sp);
158}
159
160static inline void host1x_hw_syncpt_cpu_incr(struct host1x *host,
161 struct host1x_syncpt *sp)
162{
163 host->syncpt_op->cpu_incr(sp);
164}
165
166static inline int host1x_hw_syncpt_patch_wait(struct host1x *host,
167 struct host1x_syncpt *sp,
168 void *patch_addr)
169{
170 return host->syncpt_op->patch_wait(sp, patch_addr);
171}
172
173static inline int host1x_hw_intr_init_host_sync(struct host1x *host, u32 cpm,
174 void (*syncpt_thresh_work)(struct work_struct *))
175{
176 return host->intr_op->init_host_sync(host, cpm, syncpt_thresh_work);
177}
178
179static inline void host1x_hw_intr_set_syncpt_threshold(struct host1x *host,
180 u32 id, u32 thresh)
181{
182 host->intr_op->set_syncpt_threshold(host, id, thresh);
183}
184
185static inline void host1x_hw_intr_enable_syncpt_intr(struct host1x *host,
186 u32 id)
187{
188 host->intr_op->enable_syncpt_intr(host, id);
189}
190
191static inline void host1x_hw_intr_disable_syncpt_intr(struct host1x *host,
192 u32 id)
193{
194 host->intr_op->disable_syncpt_intr(host, id);
195}
196
197static inline void host1x_hw_intr_disable_all_syncpt_intrs(struct host1x *host)
198{
199 host->intr_op->disable_all_syncpt_intrs(host);
200}
201
202static inline int host1x_hw_intr_free_syncpt_irq(struct host1x *host)
203{
204 return host->intr_op->free_syncpt_irq(host);
205}
206
207static inline int host1x_hw_channel_init(struct host1x *host,
208 struct host1x_channel *channel,
209 int chid)
210{
211 return host->channel_op->init(channel, host, chid);
212}
213
214static inline int host1x_hw_channel_submit(struct host1x *host,
215 struct host1x_job *job)
216{
217 return host->channel_op->submit(job);
218}
219
220static inline void host1x_hw_cdma_start(struct host1x *host,
221 struct host1x_cdma *cdma)
222{
223 host->cdma_op->start(cdma);
224}
225
226static inline void host1x_hw_cdma_stop(struct host1x *host,
227 struct host1x_cdma *cdma)
228{
229 host->cdma_op->stop(cdma);
230}
231
232static inline void host1x_hw_cdma_flush(struct host1x *host,
233 struct host1x_cdma *cdma)
234{
235 host->cdma_op->flush(cdma);
236}
237
238static inline int host1x_hw_cdma_timeout_init(struct host1x *host,
239 struct host1x_cdma *cdma,
240 u32 syncpt_id)
241{
242 return host->cdma_op->timeout_init(cdma, syncpt_id);
243}
244
245static inline void host1x_hw_cdma_timeout_destroy(struct host1x *host,
246 struct host1x_cdma *cdma)
247{
248 host->cdma_op->timeout_destroy(cdma);
249}
250
251static inline void host1x_hw_cdma_freeze(struct host1x *host,
252 struct host1x_cdma *cdma)
253{
254 host->cdma_op->freeze(cdma);
255}
256
257static inline void host1x_hw_cdma_resume(struct host1x *host,
258 struct host1x_cdma *cdma, u32 getptr)
259{
260 host->cdma_op->resume(cdma, getptr);
261}
262
263static inline void host1x_hw_cdma_timeout_cpu_incr(struct host1x *host,
264 struct host1x_cdma *cdma,
265 u32 getptr,
266 u32 syncpt_incrs,
267 u32 syncval, u32 nr_slots)
268{
269 host->cdma_op->timeout_cpu_incr(cdma, getptr, syncpt_incrs, syncval,
270 nr_slots);
271}
272
273static inline void host1x_hw_pushbuffer_init(struct host1x *host,
274 struct push_buffer *pb)
275{
276 host->cdma_pb_op->init(pb);
277}
278
279static inline void host1x_hw_debug_init(struct host1x *host, struct dentry *de)
280{
281 if (host->debug_op && host->debug_op->debug_init)
282 host->debug_op->debug_init(de);
283}
284
285static inline void host1x_hw_show_channel_cdma(struct host1x *host,
286 struct host1x_channel *channel,
287 struct output *o)
288{
289 host->debug_op->show_channel_cdma(host, channel, o);
290}
291
292static inline void host1x_hw_show_channel_fifo(struct host1x *host,
293 struct host1x_channel *channel,
294 struct output *o)
295{
296 host->debug_op->show_channel_fifo(host, channel, o);
297}
298
299static inline void host1x_hw_show_mlocks(struct host1x *host, struct output *o)
300{
301 host->debug_op->show_mlocks(host, o);
302}
303
304extern struct platform_driver tegra_hdmi_driver;
305extern struct platform_driver tegra_dc_driver;
306extern struct platform_driver tegra_gr2d_driver;
307
308#endif
diff --git a/drivers/gpu/drm/tegra/Kconfig b/drivers/gpu/host1x/drm/Kconfig
index be1daf7344d3..69853a4de40a 100644
--- a/drivers/gpu/drm/tegra/Kconfig
+++ b/drivers/gpu/host1x/drm/Kconfig
@@ -1,12 +1,10 @@
1config DRM_TEGRA 1config DRM_TEGRA
2 tristate "NVIDIA Tegra DRM" 2 bool "NVIDIA Tegra DRM"
3 depends on DRM && OF && ARCH_TEGRA 3 depends on DRM
4 select DRM_KMS_HELPER 4 select DRM_KMS_HELPER
5 select DRM_GEM_CMA_HELPER 5 select FB_SYS_FILLRECT
6 select DRM_KMS_CMA_HELPER 6 select FB_SYS_COPYAREA
7 select FB_CFB_FILLRECT 7 select FB_SYS_IMAGEBLIT
8 select FB_CFB_COPYAREA
9 select FB_CFB_IMAGEBLIT
10 help 8 help
11 Choose this option if you have an NVIDIA Tegra SoC. 9 Choose this option if you have an NVIDIA Tegra SoC.
12 10
@@ -15,6 +13,14 @@ config DRM_TEGRA
15 13
16if DRM_TEGRA 14if DRM_TEGRA
17 15
16config DRM_TEGRA_STAGING
17 bool "Enable HOST1X interface"
18 depends on STAGING
19 help
20 Say yes if HOST1X should be available for userspace DRM users.
21
22 If unsure, choose N.
23
18config DRM_TEGRA_DEBUG 24config DRM_TEGRA_DEBUG
19 bool "NVIDIA Tegra DRM debug support" 25 bool "NVIDIA Tegra DRM debug support"
20 help 26 help
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/host1x/drm/dc.c
index de94707b9dbe..1e2060324f02 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/host1x/drm/dc.c
@@ -14,8 +14,10 @@
14#include <linux/platform_device.h> 14#include <linux/platform_device.h>
15#include <linux/clk/tegra.h> 15#include <linux/clk/tegra.h>
16 16
17#include "drm.h" 17#include "host1x_client.h"
18#include "dc.h" 18#include "dc.h"
19#include "drm.h"
20#include "gem.h"
19 21
20struct tegra_plane { 22struct tegra_plane {
21 struct drm_plane base; 23 struct drm_plane base;
@@ -51,9 +53,9 @@ static int tegra_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
51 window.bits_per_pixel = fb->bits_per_pixel; 53 window.bits_per_pixel = fb->bits_per_pixel;
52 54
53 for (i = 0; i < drm_format_num_planes(fb->pixel_format); i++) { 55 for (i = 0; i < drm_format_num_planes(fb->pixel_format); i++) {
54 struct drm_gem_cma_object *gem = drm_fb_cma_get_gem_obj(fb, i); 56 struct tegra_bo *bo = tegra_fb_get_plane(fb, i);
55 57
56 window.base[i] = gem->paddr + fb->offsets[i]; 58 window.base[i] = bo->paddr + fb->offsets[i];
57 59
58 /* 60 /*
59 * Tegra doesn't support different strides for U and V planes 61 * Tegra doesn't support different strides for U and V planes
@@ -103,7 +105,9 @@ static const struct drm_plane_funcs tegra_plane_funcs = {
103}; 105};
104 106
105static const uint32_t plane_formats[] = { 107static const uint32_t plane_formats[] = {
108 DRM_FORMAT_XBGR8888,
106 DRM_FORMAT_XRGB8888, 109 DRM_FORMAT_XRGB8888,
110 DRM_FORMAT_RGB565,
107 DRM_FORMAT_UYVY, 111 DRM_FORMAT_UYVY,
108 DRM_FORMAT_YUV420, 112 DRM_FORMAT_YUV420,
109 DRM_FORMAT_YUV422, 113 DRM_FORMAT_YUV422,
@@ -136,7 +140,7 @@ static int tegra_dc_add_planes(struct drm_device *drm, struct tegra_dc *dc)
136static int tegra_dc_set_base(struct tegra_dc *dc, int x, int y, 140static int tegra_dc_set_base(struct tegra_dc *dc, int x, int y,
137 struct drm_framebuffer *fb) 141 struct drm_framebuffer *fb)
138{ 142{
139 struct drm_gem_cma_object *gem = drm_fb_cma_get_gem_obj(fb, 0); 143 struct tegra_bo *bo = tegra_fb_get_plane(fb, 0);
140 unsigned long value; 144 unsigned long value;
141 145
142 tegra_dc_writel(dc, WINDOW_A_SELECT, DC_CMD_DISPLAY_WINDOW_HEADER); 146 tegra_dc_writel(dc, WINDOW_A_SELECT, DC_CMD_DISPLAY_WINDOW_HEADER);
@@ -144,7 +148,7 @@ static int tegra_dc_set_base(struct tegra_dc *dc, int x, int y,
144 value = fb->offsets[0] + y * fb->pitches[0] + 148 value = fb->offsets[0] + y * fb->pitches[0] +
145 x * fb->bits_per_pixel / 8; 149 x * fb->bits_per_pixel / 8;
146 150
147 tegra_dc_writel(dc, gem->paddr + value, DC_WINBUF_START_ADDR); 151 tegra_dc_writel(dc, bo->paddr + value, DC_WINBUF_START_ADDR);
148 tegra_dc_writel(dc, fb->pitches[0], DC_WIN_LINE_STRIDE); 152 tegra_dc_writel(dc, fb->pitches[0], DC_WIN_LINE_STRIDE);
149 153
150 value = GENERAL_UPDATE | WIN_A_UPDATE; 154 value = GENERAL_UPDATE | WIN_A_UPDATE;
@@ -186,20 +190,20 @@ static void tegra_dc_finish_page_flip(struct tegra_dc *dc)
186{ 190{
187 struct drm_device *drm = dc->base.dev; 191 struct drm_device *drm = dc->base.dev;
188 struct drm_crtc *crtc = &dc->base; 192 struct drm_crtc *crtc = &dc->base;
189 struct drm_gem_cma_object *gem;
190 unsigned long flags, base; 193 unsigned long flags, base;
194 struct tegra_bo *bo;
191 195
192 if (!dc->event) 196 if (!dc->event)
193 return; 197 return;
194 198
195 gem = drm_fb_cma_get_gem_obj(crtc->fb, 0); 199 bo = tegra_fb_get_plane(crtc->fb, 0);
196 200
197 /* check if new start address has been latched */ 201 /* check if new start address has been latched */
198 tegra_dc_writel(dc, READ_MUX, DC_CMD_STATE_ACCESS); 202 tegra_dc_writel(dc, READ_MUX, DC_CMD_STATE_ACCESS);
199 base = tegra_dc_readl(dc, DC_WINBUF_START_ADDR); 203 base = tegra_dc_readl(dc, DC_WINBUF_START_ADDR);
200 tegra_dc_writel(dc, 0, DC_CMD_STATE_ACCESS); 204 tegra_dc_writel(dc, 0, DC_CMD_STATE_ACCESS);
201 205
202 if (base == gem->paddr + crtc->fb->offsets[0]) { 206 if (base == bo->paddr + crtc->fb->offsets[0]) {
203 spin_lock_irqsave(&drm->event_lock, flags); 207 spin_lock_irqsave(&drm->event_lock, flags);
204 drm_send_vblank_event(drm, dc->pipe, dc->event); 208 drm_send_vblank_event(drm, dc->pipe, dc->event);
205 drm_vblank_put(drm, dc->pipe); 209 drm_vblank_put(drm, dc->pipe);
@@ -541,6 +545,9 @@ int tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index,
541unsigned int tegra_dc_format(uint32_t format) 545unsigned int tegra_dc_format(uint32_t format)
542{ 546{
543 switch (format) { 547 switch (format) {
548 case DRM_FORMAT_XBGR8888:
549 return WIN_COLOR_DEPTH_R8G8B8A8;
550
544 case DRM_FORMAT_XRGB8888: 551 case DRM_FORMAT_XRGB8888:
545 return WIN_COLOR_DEPTH_B8G8R8A8; 552 return WIN_COLOR_DEPTH_B8G8R8A8;
546 553
@@ -569,7 +576,7 @@ static int tegra_crtc_mode_set(struct drm_crtc *crtc,
569 struct drm_display_mode *adjusted, 576 struct drm_display_mode *adjusted,
570 int x, int y, struct drm_framebuffer *old_fb) 577 int x, int y, struct drm_framebuffer *old_fb)
571{ 578{
572 struct drm_gem_cma_object *gem = drm_fb_cma_get_gem_obj(crtc->fb, 0); 579 struct tegra_bo *bo = tegra_fb_get_plane(crtc->fb, 0);
573 struct tegra_dc *dc = to_tegra_dc(crtc); 580 struct tegra_dc *dc = to_tegra_dc(crtc);
574 struct tegra_dc_window window; 581 struct tegra_dc_window window;
575 unsigned long div, value; 582 unsigned long div, value;
@@ -616,7 +623,7 @@ static int tegra_crtc_mode_set(struct drm_crtc *crtc,
616 window.format = tegra_dc_format(crtc->fb->pixel_format); 623 window.format = tegra_dc_format(crtc->fb->pixel_format);
617 window.bits_per_pixel = crtc->fb->bits_per_pixel; 624 window.bits_per_pixel = crtc->fb->bits_per_pixel;
618 window.stride[0] = crtc->fb->pitches[0]; 625 window.stride[0] = crtc->fb->pitches[0];
619 window.base[0] = gem->paddr; 626 window.base[0] = bo->paddr;
620 627
621 err = tegra_dc_setup_window(dc, 0, &window); 628 err = tegra_dc_setup_window(dc, 0, &window);
622 if (err < 0) 629 if (err < 0)
@@ -1097,7 +1104,7 @@ static const struct host1x_client_ops dc_client_ops = {
1097 1104
1098static int tegra_dc_probe(struct platform_device *pdev) 1105static int tegra_dc_probe(struct platform_device *pdev)
1099{ 1106{
1100 struct host1x *host1x = dev_get_drvdata(pdev->dev.parent); 1107 struct host1x_drm *host1x = host1x_get_drm_data(pdev->dev.parent);
1101 struct resource *regs; 1108 struct resource *regs;
1102 struct tegra_dc *dc; 1109 struct tegra_dc *dc;
1103 int err; 1110 int err;
@@ -1160,7 +1167,7 @@ static int tegra_dc_probe(struct platform_device *pdev)
1160 1167
1161static int tegra_dc_remove(struct platform_device *pdev) 1168static int tegra_dc_remove(struct platform_device *pdev)
1162{ 1169{
1163 struct host1x *host1x = dev_get_drvdata(pdev->dev.parent); 1170 struct host1x_drm *host1x = host1x_get_drm_data(pdev->dev.parent);
1164 struct tegra_dc *dc = platform_get_drvdata(pdev); 1171 struct tegra_dc *dc = platform_get_drvdata(pdev);
1165 int err; 1172 int err;
1166 1173
diff --git a/drivers/gpu/drm/tegra/dc.h b/drivers/gpu/host1x/drm/dc.h
index 79eaec9aac77..79eaec9aac77 100644
--- a/drivers/gpu/drm/tegra/dc.h
+++ b/drivers/gpu/host1x/drm/dc.h
diff --git a/drivers/gpu/host1x/drm/drm.c b/drivers/gpu/host1x/drm/drm.c
new file mode 100644
index 000000000000..2b561c9118c6
--- /dev/null
+++ b/drivers/gpu/host1x/drm/drm.c
@@ -0,0 +1,640 @@
1/*
2 * Copyright (C) 2012 Avionic Design GmbH
3 * Copyright (C) 2012-2013 NVIDIA CORPORATION. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#include <linux/module.h>
11#include <linux/of_address.h>
12#include <linux/of_platform.h>
13
14#include <linux/dma-mapping.h>
15#include <asm/dma-iommu.h>
16
17#include <drm/drm.h>
18#include <drm/drmP.h>
19
20#include "host1x_client.h"
21#include "dev.h"
22#include "drm.h"
23#include "gem.h"
24#include "syncpt.h"
25
26#define DRIVER_NAME "tegra"
27#define DRIVER_DESC "NVIDIA Tegra graphics"
28#define DRIVER_DATE "20120330"
29#define DRIVER_MAJOR 0
30#define DRIVER_MINOR 0
31#define DRIVER_PATCHLEVEL 0
32
33struct host1x_drm_client {
34 struct host1x_client *client;
35 struct device_node *np;
36 struct list_head list;
37};
38
39static int host1x_add_drm_client(struct host1x_drm *host1x,
40 struct device_node *np)
41{
42 struct host1x_drm_client *client;
43
44 client = kzalloc(sizeof(*client), GFP_KERNEL);
45 if (!client)
46 return -ENOMEM;
47
48 INIT_LIST_HEAD(&client->list);
49 client->np = of_node_get(np);
50
51 list_add_tail(&client->list, &host1x->drm_clients);
52
53 return 0;
54}
55
56static int host1x_activate_drm_client(struct host1x_drm *host1x,
57 struct host1x_drm_client *drm,
58 struct host1x_client *client)
59{
60 mutex_lock(&host1x->drm_clients_lock);
61 list_del_init(&drm->list);
62 list_add_tail(&drm->list, &host1x->drm_active);
63 drm->client = client;
64 mutex_unlock(&host1x->drm_clients_lock);
65
66 return 0;
67}
68
69static int host1x_remove_drm_client(struct host1x_drm *host1x,
70 struct host1x_drm_client *client)
71{
72 mutex_lock(&host1x->drm_clients_lock);
73 list_del_init(&client->list);
74 mutex_unlock(&host1x->drm_clients_lock);
75
76 of_node_put(client->np);
77 kfree(client);
78
79 return 0;
80}
81
82static int host1x_parse_dt(struct host1x_drm *host1x)
83{
84 static const char * const compat[] = {
85 "nvidia,tegra20-dc",
86 "nvidia,tegra20-hdmi",
87 "nvidia,tegra20-gr2d",
88 "nvidia,tegra30-dc",
89 "nvidia,tegra30-hdmi",
90 "nvidia,tegra30-gr2d",
91 };
92 unsigned int i;
93 int err;
94
95 for (i = 0; i < ARRAY_SIZE(compat); i++) {
96 struct device_node *np;
97
98 for_each_child_of_node(host1x->dev->of_node, np) {
99 if (of_device_is_compatible(np, compat[i]) &&
100 of_device_is_available(np)) {
101 err = host1x_add_drm_client(host1x, np);
102 if (err < 0)
103 return err;
104 }
105 }
106 }
107
108 return 0;
109}
110
111int host1x_drm_alloc(struct platform_device *pdev)
112{
113 struct host1x_drm *host1x;
114 int err;
115
116 host1x = devm_kzalloc(&pdev->dev, sizeof(*host1x), GFP_KERNEL);
117 if (!host1x)
118 return -ENOMEM;
119
120 mutex_init(&host1x->drm_clients_lock);
121 INIT_LIST_HEAD(&host1x->drm_clients);
122 INIT_LIST_HEAD(&host1x->drm_active);
123 mutex_init(&host1x->clients_lock);
124 INIT_LIST_HEAD(&host1x->clients);
125 host1x->dev = &pdev->dev;
126
127 err = host1x_parse_dt(host1x);
128 if (err < 0) {
129 dev_err(&pdev->dev, "failed to parse DT: %d\n", err);
130 return err;
131 }
132
133 host1x_set_drm_data(&pdev->dev, host1x);
134
135 return 0;
136}
137
138int host1x_drm_init(struct host1x_drm *host1x, struct drm_device *drm)
139{
140 struct host1x_client *client;
141
142 mutex_lock(&host1x->clients_lock);
143
144 list_for_each_entry(client, &host1x->clients, list) {
145 if (client->ops && client->ops->drm_init) {
146 int err = client->ops->drm_init(client, drm);
147 if (err < 0) {
148 dev_err(host1x->dev,
149 "DRM setup failed for %s: %d\n",
150 dev_name(client->dev), err);
151 return err;
152 }
153 }
154 }
155
156 mutex_unlock(&host1x->clients_lock);
157
158 return 0;
159}
160
161int host1x_drm_exit(struct host1x_drm *host1x)
162{
163 struct platform_device *pdev = to_platform_device(host1x->dev);
164 struct host1x_client *client;
165
166 if (!host1x->drm)
167 return 0;
168
169 mutex_lock(&host1x->clients_lock);
170
171 list_for_each_entry_reverse(client, &host1x->clients, list) {
172 if (client->ops && client->ops->drm_exit) {
173 int err = client->ops->drm_exit(client);
174 if (err < 0) {
175 dev_err(host1x->dev,
176 "DRM cleanup failed for %s: %d\n",
177 dev_name(client->dev), err);
178 return err;
179 }
180 }
181 }
182
183 mutex_unlock(&host1x->clients_lock);
184
185 drm_platform_exit(&tegra_drm_driver, pdev);
186 host1x->drm = NULL;
187
188 return 0;
189}
190
191int host1x_register_client(struct host1x_drm *host1x,
192 struct host1x_client *client)
193{
194 struct host1x_drm_client *drm, *tmp;
195 int err;
196
197 mutex_lock(&host1x->clients_lock);
198 list_add_tail(&client->list, &host1x->clients);
199 mutex_unlock(&host1x->clients_lock);
200
201 list_for_each_entry_safe(drm, tmp, &host1x->drm_clients, list)
202 if (drm->np == client->dev->of_node)
203 host1x_activate_drm_client(host1x, drm, client);
204
205 if (list_empty(&host1x->drm_clients)) {
206 struct platform_device *pdev = to_platform_device(host1x->dev);
207
208 err = drm_platform_init(&tegra_drm_driver, pdev);
209 if (err < 0) {
210 dev_err(host1x->dev, "drm_platform_init(): %d\n", err);
211 return err;
212 }
213 }
214
215 return 0;
216}
217
218int host1x_unregister_client(struct host1x_drm *host1x,
219 struct host1x_client *client)
220{
221 struct host1x_drm_client *drm, *tmp;
222 int err;
223
224 list_for_each_entry_safe(drm, tmp, &host1x->drm_active, list) {
225 if (drm->client == client) {
226 err = host1x_drm_exit(host1x);
227 if (err < 0) {
228 dev_err(host1x->dev, "host1x_drm_exit(): %d\n",
229 err);
230 return err;
231 }
232
233 host1x_remove_drm_client(host1x, drm);
234 break;
235 }
236 }
237
238 mutex_lock(&host1x->clients_lock);
239 list_del_init(&client->list);
240 mutex_unlock(&host1x->clients_lock);
241
242 return 0;
243}
244
245static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
246{
247 struct host1x_drm *host1x;
248 int err;
249
250 host1x = host1x_get_drm_data(drm->dev);
251 drm->dev_private = host1x;
252 host1x->drm = drm;
253
254 drm_mode_config_init(drm);
255
256 err = host1x_drm_init(host1x, drm);
257 if (err < 0)
258 return err;
259
260 err = drm_vblank_init(drm, drm->mode_config.num_crtc);
261 if (err < 0)
262 return err;
263
264 err = tegra_drm_fb_init(drm);
265 if (err < 0)
266 return err;
267
268 drm_kms_helper_poll_init(drm);
269
270 return 0;
271}
272
273static int tegra_drm_unload(struct drm_device *drm)
274{
275 drm_kms_helper_poll_fini(drm);
276 tegra_drm_fb_exit(drm);
277
278 drm_mode_config_cleanup(drm);
279
280 return 0;
281}
282
283static int tegra_drm_open(struct drm_device *drm, struct drm_file *filp)
284{
285 struct host1x_drm_file *fpriv;
286
287 fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
288 if (!fpriv)
289 return -ENOMEM;
290
291 INIT_LIST_HEAD(&fpriv->contexts);
292 filp->driver_priv = fpriv;
293
294 return 0;
295}
296
297static void host1x_drm_context_free(struct host1x_drm_context *context)
298{
299 context->client->ops->close_channel(context);
300 kfree(context);
301}
302
303static void tegra_drm_lastclose(struct drm_device *drm)
304{
305 struct host1x_drm *host1x = drm->dev_private;
306
307 tegra_fbdev_restore_mode(host1x->fbdev);
308}
309
310#ifdef CONFIG_DRM_TEGRA_STAGING
311static bool host1x_drm_file_owns_context(struct host1x_drm_file *file,
312 struct host1x_drm_context *context)
313{
314 struct host1x_drm_context *ctx;
315
316 list_for_each_entry(ctx, &file->contexts, list)
317 if (ctx == context)
318 return true;
319
320 return false;
321}
322
323static int tegra_gem_create(struct drm_device *drm, void *data,
324 struct drm_file *file)
325{
326 struct drm_tegra_gem_create *args = data;
327 struct tegra_bo *bo;
328
329 bo = tegra_bo_create_with_handle(file, drm, args->size,
330 &args->handle);
331 if (IS_ERR(bo))
332 return PTR_ERR(bo);
333
334 return 0;
335}
336
337static int tegra_gem_mmap(struct drm_device *drm, void *data,
338 struct drm_file *file)
339{
340 struct drm_tegra_gem_mmap *args = data;
341 struct drm_gem_object *gem;
342 struct tegra_bo *bo;
343
344 gem = drm_gem_object_lookup(drm, file, args->handle);
345 if (!gem)
346 return -EINVAL;
347
348 bo = to_tegra_bo(gem);
349
350 args->offset = tegra_bo_get_mmap_offset(bo);
351
352 drm_gem_object_unreference(gem);
353
354 return 0;
355}
356
357static int tegra_syncpt_read(struct drm_device *drm, void *data,
358 struct drm_file *file)
359{
360 struct drm_tegra_syncpt_read *args = data;
361 struct host1x *host = dev_get_drvdata(drm->dev);
362 struct host1x_syncpt *sp = host1x_syncpt_get(host, args->id);
363
364 if (!sp)
365 return -EINVAL;
366
367 args->value = host1x_syncpt_read_min(sp);
368 return 0;
369}
370
371static int tegra_syncpt_incr(struct drm_device *drm, void *data,
372 struct drm_file *file)
373{
374 struct drm_tegra_syncpt_incr *args = data;
375 struct host1x *host = dev_get_drvdata(drm->dev);
376 struct host1x_syncpt *sp = host1x_syncpt_get(host, args->id);
377
378 if (!sp)
379 return -EINVAL;
380
381 host1x_syncpt_incr(sp);
382 return 0;
383}
384
385static int tegra_syncpt_wait(struct drm_device *drm, void *data,
386 struct drm_file *file)
387{
388 struct drm_tegra_syncpt_wait *args = data;
389 struct host1x *host = dev_get_drvdata(drm->dev);
390 struct host1x_syncpt *sp = host1x_syncpt_get(host, args->id);
391
392 if (!sp)
393 return -EINVAL;
394
395 return host1x_syncpt_wait(sp, args->thresh, args->timeout,
396 &args->value);
397}
398
399static int tegra_open_channel(struct drm_device *drm, void *data,
400 struct drm_file *file)
401{
402 struct drm_tegra_open_channel *args = data;
403 struct host1x_client *client;
404 struct host1x_drm_context *context;
405 struct host1x_drm_file *fpriv = file->driver_priv;
406 struct host1x_drm *host1x = drm->dev_private;
407 int err = -ENODEV;
408
409 context = kzalloc(sizeof(*context), GFP_KERNEL);
410 if (!context)
411 return -ENOMEM;
412
413 list_for_each_entry(client, &host1x->clients, list)
414 if (client->class == args->client) {
415 err = client->ops->open_channel(client, context);
416 if (err)
417 break;
418
419 context->client = client;
420 list_add(&context->list, &fpriv->contexts);
421 args->context = (uintptr_t)context;
422 return 0;
423 }
424
425 kfree(context);
426 return err;
427}
428
429static int tegra_close_channel(struct drm_device *drm, void *data,
430 struct drm_file *file)
431{
432 struct drm_tegra_close_channel *args = data;
433 struct host1x_drm_file *fpriv = file->driver_priv;
434 struct host1x_drm_context *context =
435 (struct host1x_drm_context *)(uintptr_t)args->context;
436
437 if (!host1x_drm_file_owns_context(fpriv, context))
438 return -EINVAL;
439
440 list_del(&context->list);
441 host1x_drm_context_free(context);
442
443 return 0;
444}
445
446static int tegra_get_syncpt(struct drm_device *drm, void *data,
447 struct drm_file *file)
448{
449 struct drm_tegra_get_syncpt *args = data;
450 struct host1x_drm_file *fpriv = file->driver_priv;
451 struct host1x_drm_context *context =
452 (struct host1x_drm_context *)(uintptr_t)args->context;
453 struct host1x_syncpt *syncpt;
454
455 if (!host1x_drm_file_owns_context(fpriv, context))
456 return -ENODEV;
457
458 if (args->index >= context->client->num_syncpts)
459 return -EINVAL;
460
461 syncpt = context->client->syncpts[args->index];
462 args->id = host1x_syncpt_id(syncpt);
463
464 return 0;
465}
466
467static int tegra_submit(struct drm_device *drm, void *data,
468 struct drm_file *file)
469{
470 struct drm_tegra_submit *args = data;
471 struct host1x_drm_file *fpriv = file->driver_priv;
472 struct host1x_drm_context *context =
473 (struct host1x_drm_context *)(uintptr_t)args->context;
474
475 if (!host1x_drm_file_owns_context(fpriv, context))
476 return -ENODEV;
477
478 return context->client->ops->submit(context, args, drm, file);
479}
480#endif
481
482static struct drm_ioctl_desc tegra_drm_ioctls[] = {
483#ifdef CONFIG_DRM_TEGRA_STAGING
484 DRM_IOCTL_DEF_DRV(TEGRA_GEM_CREATE, tegra_gem_create, DRM_UNLOCKED | DRM_AUTH),
485 DRM_IOCTL_DEF_DRV(TEGRA_GEM_MMAP, tegra_gem_mmap, DRM_UNLOCKED),
486 DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_READ, tegra_syncpt_read, DRM_UNLOCKED),
487 DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_INCR, tegra_syncpt_incr, DRM_UNLOCKED),
488 DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_WAIT, tegra_syncpt_wait, DRM_UNLOCKED),
489 DRM_IOCTL_DEF_DRV(TEGRA_OPEN_CHANNEL, tegra_open_channel, DRM_UNLOCKED),
490 DRM_IOCTL_DEF_DRV(TEGRA_CLOSE_CHANNEL, tegra_close_channel, DRM_UNLOCKED),
491 DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT, tegra_get_syncpt, DRM_UNLOCKED),
492 DRM_IOCTL_DEF_DRV(TEGRA_SUBMIT, tegra_submit, DRM_UNLOCKED),
493#endif
494};
495
496static const struct file_operations tegra_drm_fops = {
497 .owner = THIS_MODULE,
498 .open = drm_open,
499 .release = drm_release,
500 .unlocked_ioctl = drm_ioctl,
501 .mmap = tegra_drm_mmap,
502 .poll = drm_poll,
503 .fasync = drm_fasync,
504 .read = drm_read,
505#ifdef CONFIG_COMPAT
506 .compat_ioctl = drm_compat_ioctl,
507#endif
508 .llseek = noop_llseek,
509};
510
511static struct drm_crtc *tegra_crtc_from_pipe(struct drm_device *drm, int pipe)
512{
513 struct drm_crtc *crtc;
514
515 list_for_each_entry(crtc, &drm->mode_config.crtc_list, head) {
516 struct tegra_dc *dc = to_tegra_dc(crtc);
517
518 if (dc->pipe == pipe)
519 return crtc;
520 }
521
522 return NULL;
523}
524
525static u32 tegra_drm_get_vblank_counter(struct drm_device *dev, int crtc)
526{
527 /* TODO: implement real hardware counter using syncpoints */
528 return drm_vblank_count(dev, crtc);
529}
530
531static int tegra_drm_enable_vblank(struct drm_device *drm, int pipe)
532{
533 struct drm_crtc *crtc = tegra_crtc_from_pipe(drm, pipe);
534 struct tegra_dc *dc = to_tegra_dc(crtc);
535
536 if (!crtc)
537 return -ENODEV;
538
539 tegra_dc_enable_vblank(dc);
540
541 return 0;
542}
543
544static void tegra_drm_disable_vblank(struct drm_device *drm, int pipe)
545{
546 struct drm_crtc *crtc = tegra_crtc_from_pipe(drm, pipe);
547 struct tegra_dc *dc = to_tegra_dc(crtc);
548
549 if (crtc)
550 tegra_dc_disable_vblank(dc);
551}
552
553static void tegra_drm_preclose(struct drm_device *drm, struct drm_file *file)
554{
555 struct host1x_drm_file *fpriv = file->driver_priv;
556 struct host1x_drm_context *context, *tmp;
557 struct drm_crtc *crtc;
558
559 list_for_each_entry(crtc, &drm->mode_config.crtc_list, head)
560 tegra_dc_cancel_page_flip(crtc, file);
561
562 list_for_each_entry_safe(context, tmp, &fpriv->contexts, list)
563 host1x_drm_context_free(context);
564
565 kfree(fpriv);
566}
567
568#ifdef CONFIG_DEBUG_FS
569static int tegra_debugfs_framebuffers(struct seq_file *s, void *data)
570{
571 struct drm_info_node *node = (struct drm_info_node *)s->private;
572 struct drm_device *drm = node->minor->dev;
573 struct drm_framebuffer *fb;
574
575 mutex_lock(&drm->mode_config.fb_lock);
576
577 list_for_each_entry(fb, &drm->mode_config.fb_list, head) {
578 seq_printf(s, "%3d: user size: %d x %d, depth %d, %d bpp, refcount %d\n",
579 fb->base.id, fb->width, fb->height, fb->depth,
580 fb->bits_per_pixel,
581 atomic_read(&fb->refcount.refcount));
582 }
583
584 mutex_unlock(&drm->mode_config.fb_lock);
585
586 return 0;
587}
588
589static struct drm_info_list tegra_debugfs_list[] = {
590 { "framebuffers", tegra_debugfs_framebuffers, 0 },
591};
592
593static int tegra_debugfs_init(struct drm_minor *minor)
594{
595 return drm_debugfs_create_files(tegra_debugfs_list,
596 ARRAY_SIZE(tegra_debugfs_list),
597 minor->debugfs_root, minor);
598}
599
600static void tegra_debugfs_cleanup(struct drm_minor *minor)
601{
602 drm_debugfs_remove_files(tegra_debugfs_list,
603 ARRAY_SIZE(tegra_debugfs_list), minor);
604}
605#endif
606
607struct drm_driver tegra_drm_driver = {
608 .driver_features = DRIVER_BUS_PLATFORM | DRIVER_MODESET | DRIVER_GEM,
609 .load = tegra_drm_load,
610 .unload = tegra_drm_unload,
611 .open = tegra_drm_open,
612 .preclose = tegra_drm_preclose,
613 .lastclose = tegra_drm_lastclose,
614
615 .get_vblank_counter = tegra_drm_get_vblank_counter,
616 .enable_vblank = tegra_drm_enable_vblank,
617 .disable_vblank = tegra_drm_disable_vblank,
618
619#if defined(CONFIG_DEBUG_FS)
620 .debugfs_init = tegra_debugfs_init,
621 .debugfs_cleanup = tegra_debugfs_cleanup,
622#endif
623
624 .gem_free_object = tegra_bo_free_object,
625 .gem_vm_ops = &tegra_bo_vm_ops,
626 .dumb_create = tegra_bo_dumb_create,
627 .dumb_map_offset = tegra_bo_dumb_map_offset,
628 .dumb_destroy = tegra_bo_dumb_destroy,
629
630 .ioctls = tegra_drm_ioctls,
631 .num_ioctls = ARRAY_SIZE(tegra_drm_ioctls),
632 .fops = &tegra_drm_fops,
633
634 .name = DRIVER_NAME,
635 .desc = DRIVER_DESC,
636 .date = DRIVER_DATE,
637 .major = DRIVER_MAJOR,
638 .minor = DRIVER_MINOR,
639 .patchlevel = DRIVER_PATCHLEVEL,
640};
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/host1x/drm/drm.h
index 6dd75a2600eb..02ce020f2575 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/host1x/drm/drm.h
@@ -1,24 +1,36 @@
1/* 1/*
2 * Copyright (C) 2012 Avionic Design GmbH 2 * Copyright (C) 2012 Avionic Design GmbH
3 * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved. 3 * Copyright (C) 2012-2013 NVIDIA CORPORATION. All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as 6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation. 7 * published by the Free Software Foundation.
8 */ 8 */
9 9
10#ifndef TEGRA_DRM_H 10#ifndef HOST1X_DRM_H
11#define TEGRA_DRM_H 1 11#define HOST1X_DRM_H 1
12 12
13#include <drm/drmP.h> 13#include <drm/drmP.h>
14#include <drm/drm_crtc_helper.h> 14#include <drm/drm_crtc_helper.h>
15#include <drm/drm_edid.h> 15#include <drm/drm_edid.h>
16#include <drm/drm_fb_helper.h> 16#include <drm/drm_fb_helper.h>
17#include <drm/drm_gem_cma_helper.h>
18#include <drm/drm_fb_cma_helper.h>
19#include <drm/drm_fixed.h> 17#include <drm/drm_fixed.h>
18#include <uapi/drm/tegra_drm.h>
20 19
21struct host1x { 20#include "host1x.h"
21
22struct tegra_fb {
23 struct drm_framebuffer base;
24 struct tegra_bo **planes;
25 unsigned int num_planes;
26};
27
28struct tegra_fbdev {
29 struct drm_fb_helper base;
30 struct tegra_fb *fb;
31};
32
33struct host1x_drm {
22 struct drm_device *drm; 34 struct drm_device *drm;
23 struct device *dev; 35 struct device *dev;
24 void __iomem *regs; 36 void __iomem *regs;
@@ -33,31 +45,53 @@ struct host1x {
33 struct mutex clients_lock; 45 struct mutex clients_lock;
34 struct list_head clients; 46 struct list_head clients;
35 47
36 struct drm_fbdev_cma *fbdev; 48 struct tegra_fbdev *fbdev;
37}; 49};
38 50
39struct host1x_client; 51struct host1x_client;
40 52
53struct host1x_drm_context {
54 struct host1x_client *client;
55 struct host1x_channel *channel;
56 struct list_head list;
57};
58
41struct host1x_client_ops { 59struct host1x_client_ops {
42 int (*drm_init)(struct host1x_client *client, struct drm_device *drm); 60 int (*drm_init)(struct host1x_client *client, struct drm_device *drm);
43 int (*drm_exit)(struct host1x_client *client); 61 int (*drm_exit)(struct host1x_client *client);
62 int (*open_channel)(struct host1x_client *client,
63 struct host1x_drm_context *context);
64 void (*close_channel)(struct host1x_drm_context *context);
65 int (*submit)(struct host1x_drm_context *context,
66 struct drm_tegra_submit *args, struct drm_device *drm,
67 struct drm_file *file);
68};
69
70struct host1x_drm_file {
71 struct list_head contexts;
44}; 72};
45 73
46struct host1x_client { 74struct host1x_client {
47 struct host1x *host1x; 75 struct host1x_drm *host1x;
48 struct device *dev; 76 struct device *dev;
49 77
50 const struct host1x_client_ops *ops; 78 const struct host1x_client_ops *ops;
51 79
80 enum host1x_class class;
81 struct host1x_channel *channel;
82
83 struct host1x_syncpt **syncpts;
84 unsigned int num_syncpts;
85
52 struct list_head list; 86 struct list_head list;
53}; 87};
54 88
55extern int host1x_drm_init(struct host1x *host1x, struct drm_device *drm); 89extern int host1x_drm_init(struct host1x_drm *host1x, struct drm_device *drm);
56extern int host1x_drm_exit(struct host1x *host1x); 90extern int host1x_drm_exit(struct host1x_drm *host1x);
57 91
58extern int host1x_register_client(struct host1x *host1x, 92extern int host1x_register_client(struct host1x_drm *host1x,
59 struct host1x_client *client); 93 struct host1x_client *client);
60extern int host1x_unregister_client(struct host1x *host1x, 94extern int host1x_unregister_client(struct host1x_drm *host1x,
61 struct host1x_client *client); 95 struct host1x_client *client);
62 96
63struct tegra_output; 97struct tegra_output;
@@ -66,7 +100,7 @@ struct tegra_dc {
66 struct host1x_client client; 100 struct host1x_client client;
67 spinlock_t lock; 101 spinlock_t lock;
68 102
69 struct host1x *host1x; 103 struct host1x_drm *host1x;
70 struct device *dev; 104 struct device *dev;
71 105
72 struct drm_crtc base; 106 struct drm_crtc base;
@@ -226,12 +260,12 @@ extern int tegra_output_init(struct drm_device *drm, struct tegra_output *output
226extern int tegra_output_exit(struct tegra_output *output); 260extern int tegra_output_exit(struct tegra_output *output);
227 261
228/* from fb.c */ 262/* from fb.c */
263struct tegra_bo *tegra_fb_get_plane(struct drm_framebuffer *framebuffer,
264 unsigned int index);
229extern int tegra_drm_fb_init(struct drm_device *drm); 265extern int tegra_drm_fb_init(struct drm_device *drm);
230extern void tegra_drm_fb_exit(struct drm_device *drm); 266extern void tegra_drm_fb_exit(struct drm_device *drm);
267extern void tegra_fbdev_restore_mode(struct tegra_fbdev *fbdev);
231 268
232extern struct platform_driver tegra_host1x_driver;
233extern struct platform_driver tegra_hdmi_driver;
234extern struct platform_driver tegra_dc_driver;
235extern struct drm_driver tegra_drm_driver; 269extern struct drm_driver tegra_drm_driver;
236 270
237#endif /* TEGRA_DRM_H */ 271#endif /* HOST1X_DRM_H */
diff --git a/drivers/gpu/host1x/drm/fb.c b/drivers/gpu/host1x/drm/fb.c
new file mode 100644
index 000000000000..979a3e32b78b
--- /dev/null
+++ b/drivers/gpu/host1x/drm/fb.c
@@ -0,0 +1,374 @@
1/*
2 * Copyright (C) 2012-2013 Avionic Design GmbH
3 * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved.
4 *
5 * Based on the KMS/FB CMA helpers
6 * Copyright (C) 2012 Analog Device Inc.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/module.h>
14
15#include "drm.h"
16#include "gem.h"
17
18static inline struct tegra_fb *to_tegra_fb(struct drm_framebuffer *fb)
19{
20 return container_of(fb, struct tegra_fb, base);
21}
22
23static inline struct tegra_fbdev *to_tegra_fbdev(struct drm_fb_helper *helper)
24{
25 return container_of(helper, struct tegra_fbdev, base);
26}
27
28struct tegra_bo *tegra_fb_get_plane(struct drm_framebuffer *framebuffer,
29 unsigned int index)
30{
31 struct tegra_fb *fb = to_tegra_fb(framebuffer);
32
33 if (index >= drm_format_num_planes(framebuffer->pixel_format))
34 return NULL;
35
36 return fb->planes[index];
37}
38
39static void tegra_fb_destroy(struct drm_framebuffer *framebuffer)
40{
41 struct tegra_fb *fb = to_tegra_fb(framebuffer);
42 unsigned int i;
43
44 for (i = 0; i < fb->num_planes; i++) {
45 struct tegra_bo *bo = fb->planes[i];
46
47 if (bo)
48 drm_gem_object_unreference_unlocked(&bo->gem);
49 }
50
51 drm_framebuffer_cleanup(framebuffer);
52 kfree(fb->planes);
53 kfree(fb);
54}
55
56static int tegra_fb_create_handle(struct drm_framebuffer *framebuffer,
57 struct drm_file *file, unsigned int *handle)
58{
59 struct tegra_fb *fb = to_tegra_fb(framebuffer);
60
61 return drm_gem_handle_create(file, &fb->planes[0]->gem, handle);
62}
63
64static struct drm_framebuffer_funcs tegra_fb_funcs = {
65 .destroy = tegra_fb_destroy,
66 .create_handle = tegra_fb_create_handle,
67};
68
69static struct tegra_fb *tegra_fb_alloc(struct drm_device *drm,
70 struct drm_mode_fb_cmd2 *mode_cmd,
71 struct tegra_bo **planes,
72 unsigned int num_planes)
73{
74 struct tegra_fb *fb;
75 unsigned int i;
76 int err;
77
78 fb = kzalloc(sizeof(*fb), GFP_KERNEL);
79 if (!fb)
80 return ERR_PTR(-ENOMEM);
81
82 fb->planes = kzalloc(num_planes * sizeof(*planes), GFP_KERNEL);
83 if (!fb->planes)
84 return ERR_PTR(-ENOMEM);
85
86 fb->num_planes = num_planes;
87
88 drm_helper_mode_fill_fb_struct(&fb->base, mode_cmd);
89
90 for (i = 0; i < fb->num_planes; i++)
91 fb->planes[i] = planes[i];
92
93 err = drm_framebuffer_init(drm, &fb->base, &tegra_fb_funcs);
94 if (err < 0) {
95 dev_err(drm->dev, "failed to initialize framebuffer: %d\n",
96 err);
97 kfree(fb->planes);
98 kfree(fb);
99 return ERR_PTR(err);
100 }
101
102 return fb;
103}
104
105static struct drm_framebuffer *tegra_fb_create(struct drm_device *drm,
106 struct drm_file *file,
107 struct drm_mode_fb_cmd2 *cmd)
108{
109 unsigned int hsub, vsub, i;
110 struct tegra_bo *planes[4];
111 struct drm_gem_object *gem;
112 struct tegra_fb *fb;
113 int err;
114
115 hsub = drm_format_horz_chroma_subsampling(cmd->pixel_format);
116 vsub = drm_format_vert_chroma_subsampling(cmd->pixel_format);
117
118 for (i = 0; i < drm_format_num_planes(cmd->pixel_format); i++) {
119 unsigned int width = cmd->width / (i ? hsub : 1);
120 unsigned int height = cmd->height / (i ? vsub : 1);
121 unsigned int size, bpp;
122
123 gem = drm_gem_object_lookup(drm, file, cmd->handles[i]);
124 if (!gem) {
125 err = -ENXIO;
126 goto unreference;
127 }
128
129 bpp = drm_format_plane_cpp(cmd->pixel_format, i);
130
131 size = (height - 1) * cmd->pitches[i] +
132 width * bpp + cmd->offsets[i];
133
134 if (gem->size < size) {
135 err = -EINVAL;
136 goto unreference;
137 }
138
139 planes[i] = to_tegra_bo(gem);
140 }
141
142 fb = tegra_fb_alloc(drm, cmd, planes, i);
143 if (IS_ERR(fb)) {
144 err = PTR_ERR(fb);
145 goto unreference;
146 }
147
148 return &fb->base;
149
150unreference:
151 while (i--)
152 drm_gem_object_unreference_unlocked(&planes[i]->gem);
153
154 return ERR_PTR(err);
155}
156
157static struct fb_ops tegra_fb_ops = {
158 .owner = THIS_MODULE,
159 .fb_fillrect = sys_fillrect,
160 .fb_copyarea = sys_copyarea,
161 .fb_imageblit = sys_imageblit,
162 .fb_check_var = drm_fb_helper_check_var,
163 .fb_set_par = drm_fb_helper_set_par,
164 .fb_blank = drm_fb_helper_blank,
165 .fb_pan_display = drm_fb_helper_pan_display,
166 .fb_setcmap = drm_fb_helper_setcmap,
167};
168
169static int tegra_fbdev_probe(struct drm_fb_helper *helper,
170 struct drm_fb_helper_surface_size *sizes)
171{
172 struct tegra_fbdev *fbdev = to_tegra_fbdev(helper);
173 struct drm_device *drm = helper->dev;
174 struct drm_mode_fb_cmd2 cmd = { 0 };
175 unsigned int bytes_per_pixel;
176 struct drm_framebuffer *fb;
177 unsigned long offset;
178 struct fb_info *info;
179 struct tegra_bo *bo;
180 size_t size;
181 int err;
182
183 bytes_per_pixel = DIV_ROUND_UP(sizes->surface_bpp, 8);
184
185 cmd.width = sizes->surface_width;
186 cmd.height = sizes->surface_height;
187 cmd.pitches[0] = sizes->surface_width * bytes_per_pixel;
188 cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
189 sizes->surface_depth);
190
191 size = cmd.pitches[0] * cmd.height;
192
193 bo = tegra_bo_create(drm, size);
194 if (IS_ERR(bo))
195 return PTR_ERR(bo);
196
197 info = framebuffer_alloc(0, drm->dev);
198 if (!info) {
199 dev_err(drm->dev, "failed to allocate framebuffer info\n");
200 tegra_bo_free_object(&bo->gem);
201 return -ENOMEM;
202 }
203
204 fbdev->fb = tegra_fb_alloc(drm, &cmd, &bo, 1);
205 if (IS_ERR(fbdev->fb)) {
206 dev_err(drm->dev, "failed to allocate DRM framebuffer\n");
207 err = PTR_ERR(fbdev->fb);
208 goto release;
209 }
210
211 fb = &fbdev->fb->base;
212 helper->fb = fb;
213 helper->fbdev = info;
214
215 info->par = helper;
216 info->flags = FBINFO_FLAG_DEFAULT;
217 info->fbops = &tegra_fb_ops;
218
219 err = fb_alloc_cmap(&info->cmap, 256, 0);
220 if (err < 0) {
221 dev_err(drm->dev, "failed to allocate color map: %d\n", err);
222 goto destroy;
223 }
224
225 drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
226 drm_fb_helper_fill_var(info, helper, fb->width, fb->height);
227
228 offset = info->var.xoffset * bytes_per_pixel +
229 info->var.yoffset * fb->pitches[0];
230
231 drm->mode_config.fb_base = (resource_size_t)bo->paddr;
232 info->screen_base = bo->vaddr + offset;
233 info->screen_size = size;
234 info->fix.smem_start = (unsigned long)(bo->paddr + offset);
235 info->fix.smem_len = size;
236
237 return 0;
238
239destroy:
240 drm_framebuffer_unregister_private(fb);
241 tegra_fb_destroy(fb);
242release:
243 framebuffer_release(info);
244 return err;
245}
246
247static struct drm_fb_helper_funcs tegra_fb_helper_funcs = {
248 .fb_probe = tegra_fbdev_probe,
249};
250
251static struct tegra_fbdev *tegra_fbdev_create(struct drm_device *drm,
252 unsigned int preferred_bpp,
253 unsigned int num_crtc,
254 unsigned int max_connectors)
255{
256 struct drm_fb_helper *helper;
257 struct tegra_fbdev *fbdev;
258 int err;
259
260 fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL);
261 if (!fbdev) {
262 dev_err(drm->dev, "failed to allocate DRM fbdev\n");
263 return ERR_PTR(-ENOMEM);
264 }
265
266 fbdev->base.funcs = &tegra_fb_helper_funcs;
267 helper = &fbdev->base;
268
269 err = drm_fb_helper_init(drm, &fbdev->base, num_crtc, max_connectors);
270 if (err < 0) {
271 dev_err(drm->dev, "failed to initialize DRM FB helper\n");
272 goto free;
273 }
274
275 err = drm_fb_helper_single_add_all_connectors(&fbdev->base);
276 if (err < 0) {
277 dev_err(drm->dev, "failed to add connectors\n");
278 goto fini;
279 }
280
281 drm_helper_disable_unused_functions(drm);
282
283 err = drm_fb_helper_initial_config(&fbdev->base, preferred_bpp);
284 if (err < 0) {
285 dev_err(drm->dev, "failed to set initial configuration\n");
286 goto fini;
287 }
288
289 return fbdev;
290
291fini:
292 drm_fb_helper_fini(&fbdev->base);
293free:
294 kfree(fbdev);
295 return ERR_PTR(err);
296}
297
298static void tegra_fbdev_free(struct tegra_fbdev *fbdev)
299{
300 struct fb_info *info = fbdev->base.fbdev;
301
302 if (info) {
303 int err;
304
305 err = unregister_framebuffer(info);
306 if (err < 0)
307 DRM_DEBUG_KMS("failed to unregister framebuffer\n");
308
309 if (info->cmap.len)
310 fb_dealloc_cmap(&info->cmap);
311
312 framebuffer_release(info);
313 }
314
315 if (fbdev->fb) {
316 drm_framebuffer_unregister_private(&fbdev->fb->base);
317 tegra_fb_destroy(&fbdev->fb->base);
318 }
319
320 drm_fb_helper_fini(&fbdev->base);
321 kfree(fbdev);
322}
323
324static void tegra_fb_output_poll_changed(struct drm_device *drm)
325{
326 struct host1x_drm *host1x = drm->dev_private;
327
328 if (host1x->fbdev)
329 drm_fb_helper_hotplug_event(&host1x->fbdev->base);
330}
331
332static const struct drm_mode_config_funcs tegra_drm_mode_funcs = {
333 .fb_create = tegra_fb_create,
334 .output_poll_changed = tegra_fb_output_poll_changed,
335};
336
337int tegra_drm_fb_init(struct drm_device *drm)
338{
339 struct host1x_drm *host1x = drm->dev_private;
340 struct tegra_fbdev *fbdev;
341
342 drm->mode_config.min_width = 0;
343 drm->mode_config.min_height = 0;
344
345 drm->mode_config.max_width = 4096;
346 drm->mode_config.max_height = 4096;
347
348 drm->mode_config.funcs = &tegra_drm_mode_funcs;
349
350 fbdev = tegra_fbdev_create(drm, 32, drm->mode_config.num_crtc,
351 drm->mode_config.num_connector);
352 if (IS_ERR(fbdev))
353 return PTR_ERR(fbdev);
354
355 host1x->fbdev = fbdev;
356
357 return 0;
358}
359
360void tegra_drm_fb_exit(struct drm_device *drm)
361{
362 struct host1x_drm *host1x = drm->dev_private;
363
364 tegra_fbdev_free(host1x->fbdev);
365}
366
367void tegra_fbdev_restore_mode(struct tegra_fbdev *fbdev)
368{
369 if (fbdev) {
370 drm_modeset_lock_all(fbdev->base.dev);
371 drm_fb_helper_restore_fbdev_mode(&fbdev->base);
372 drm_modeset_unlock_all(fbdev->base.dev);
373 }
374}
diff --git a/drivers/gpu/host1x/drm/gem.c b/drivers/gpu/host1x/drm/gem.c
new file mode 100644
index 000000000000..c5e9a9b494c2
--- /dev/null
+++ b/drivers/gpu/host1x/drm/gem.c
@@ -0,0 +1,270 @@
1/*
2 * NVIDIA Tegra DRM GEM helper functions
3 *
4 * Copyright (C) 2012 Sascha Hauer, Pengutronix
5 * Copyright (C) 2013 NVIDIA CORPORATION, All rights reserved.
6 *
7 * Based on the GEM/CMA helpers
8 *
9 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version 2
14 * of the License, or (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 */
20
21#include <linux/mm.h>
22#include <linux/slab.h>
23#include <linux/mutex.h>
24#include <linux/export.h>
25#include <linux/dma-mapping.h>
26
27#include <drm/drmP.h>
28#include <drm/drm.h>
29
30#include "gem.h"
31
32static inline struct tegra_bo *host1x_to_drm_bo(struct host1x_bo *bo)
33{
34 return container_of(bo, struct tegra_bo, base);
35}
36
37static void tegra_bo_put(struct host1x_bo *bo)
38{
39 struct tegra_bo *obj = host1x_to_drm_bo(bo);
40 struct drm_device *drm = obj->gem.dev;
41
42 mutex_lock(&drm->struct_mutex);
43 drm_gem_object_unreference(&obj->gem);
44 mutex_unlock(&drm->struct_mutex);
45}
46
47static dma_addr_t tegra_bo_pin(struct host1x_bo *bo, struct sg_table **sgt)
48{
49 struct tegra_bo *obj = host1x_to_drm_bo(bo);
50
51 return obj->paddr;
52}
53
54static void tegra_bo_unpin(struct host1x_bo *bo, struct sg_table *sgt)
55{
56}
57
58static void *tegra_bo_mmap(struct host1x_bo *bo)
59{
60 struct tegra_bo *obj = host1x_to_drm_bo(bo);
61
62 return obj->vaddr;
63}
64
65static void tegra_bo_munmap(struct host1x_bo *bo, void *addr)
66{
67}
68
69static void *tegra_bo_kmap(struct host1x_bo *bo, unsigned int page)
70{
71 struct tegra_bo *obj = host1x_to_drm_bo(bo);
72
73 return obj->vaddr + page * PAGE_SIZE;
74}
75
76static void tegra_bo_kunmap(struct host1x_bo *bo, unsigned int page,
77 void *addr)
78{
79}
80
81static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo)
82{
83 struct tegra_bo *obj = host1x_to_drm_bo(bo);
84 struct drm_device *drm = obj->gem.dev;
85
86 mutex_lock(&drm->struct_mutex);
87 drm_gem_object_reference(&obj->gem);
88 mutex_unlock(&drm->struct_mutex);
89
90 return bo;
91}
92
93const struct host1x_bo_ops tegra_bo_ops = {
94 .get = tegra_bo_get,
95 .put = tegra_bo_put,
96 .pin = tegra_bo_pin,
97 .unpin = tegra_bo_unpin,
98 .mmap = tegra_bo_mmap,
99 .munmap = tegra_bo_munmap,
100 .kmap = tegra_bo_kmap,
101 .kunmap = tegra_bo_kunmap,
102};
103
104static void tegra_bo_destroy(struct drm_device *drm, struct tegra_bo *bo)
105{
106 dma_free_writecombine(drm->dev, bo->gem.size, bo->vaddr, bo->paddr);
107}
108
109unsigned int tegra_bo_get_mmap_offset(struct tegra_bo *bo)
110{
111 return (unsigned int)bo->gem.map_list.hash.key << PAGE_SHIFT;
112}
113
114struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size)
115{
116 struct tegra_bo *bo;
117 int err;
118
119 bo = kzalloc(sizeof(*bo), GFP_KERNEL);
120 if (!bo)
121 return ERR_PTR(-ENOMEM);
122
123 host1x_bo_init(&bo->base, &tegra_bo_ops);
124 size = round_up(size, PAGE_SIZE);
125
126 bo->vaddr = dma_alloc_writecombine(drm->dev, size, &bo->paddr,
127 GFP_KERNEL | __GFP_NOWARN);
128 if (!bo->vaddr) {
129 dev_err(drm->dev, "failed to allocate buffer with size %u\n",
130 size);
131 err = -ENOMEM;
132 goto err_dma;
133 }
134
135 err = drm_gem_object_init(drm, &bo->gem, size);
136 if (err)
137 goto err_init;
138
139 err = drm_gem_create_mmap_offset(&bo->gem);
140 if (err)
141 goto err_mmap;
142
143 return bo;
144
145err_mmap:
146 drm_gem_object_release(&bo->gem);
147err_init:
148 tegra_bo_destroy(drm, bo);
149err_dma:
150 kfree(bo);
151
152 return ERR_PTR(err);
153
154}
155
156struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
157 struct drm_device *drm,
158 unsigned int size,
159 unsigned int *handle)
160{
161 struct tegra_bo *bo;
162 int ret;
163
164 bo = tegra_bo_create(drm, size);
165 if (IS_ERR(bo))
166 return bo;
167
168 ret = drm_gem_handle_create(file, &bo->gem, handle);
169 if (ret)
170 goto err;
171
172 drm_gem_object_unreference_unlocked(&bo->gem);
173
174 return bo;
175
176err:
177 tegra_bo_free_object(&bo->gem);
178 return ERR_PTR(ret);
179}
180
181void tegra_bo_free_object(struct drm_gem_object *gem)
182{
183 struct tegra_bo *bo = to_tegra_bo(gem);
184
185 if (gem->map_list.map)
186 drm_gem_free_mmap_offset(gem);
187
188 drm_gem_object_release(gem);
189 tegra_bo_destroy(gem->dev, bo);
190
191 kfree(bo);
192}
193
194int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
195 struct drm_mode_create_dumb *args)
196{
197 int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
198 struct tegra_bo *bo;
199
200 if (args->pitch < min_pitch)
201 args->pitch = min_pitch;
202
203 if (args->size < args->pitch * args->height)
204 args->size = args->pitch * args->height;
205
206 bo = tegra_bo_create_with_handle(file, drm, args->size,
207 &args->handle);
208 if (IS_ERR(bo))
209 return PTR_ERR(bo);
210
211 return 0;
212}
213
214int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm,
215 uint32_t handle, uint64_t *offset)
216{
217 struct drm_gem_object *gem;
218 struct tegra_bo *bo;
219
220 mutex_lock(&drm->struct_mutex);
221
222 gem = drm_gem_object_lookup(drm, file, handle);
223 if (!gem) {
224 dev_err(drm->dev, "failed to lookup GEM object\n");
225 mutex_unlock(&drm->struct_mutex);
226 return -EINVAL;
227 }
228
229 bo = to_tegra_bo(gem);
230
231 *offset = tegra_bo_get_mmap_offset(bo);
232
233 drm_gem_object_unreference(gem);
234
235 mutex_unlock(&drm->struct_mutex);
236
237 return 0;
238}
239
240const struct vm_operations_struct tegra_bo_vm_ops = {
241 .open = drm_gem_vm_open,
242 .close = drm_gem_vm_close,
243};
244
245int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma)
246{
247 struct drm_gem_object *gem;
248 struct tegra_bo *bo;
249 int ret;
250
251 ret = drm_gem_mmap(file, vma);
252 if (ret)
253 return ret;
254
255 gem = vma->vm_private_data;
256 bo = to_tegra_bo(gem);
257
258 ret = remap_pfn_range(vma, vma->vm_start, bo->paddr >> PAGE_SHIFT,
259 vma->vm_end - vma->vm_start, vma->vm_page_prot);
260 if (ret)
261 drm_gem_vm_close(vma);
262
263 return ret;
264}
265
266int tegra_bo_dumb_destroy(struct drm_file *file, struct drm_device *drm,
267 unsigned int handle)
268{
269 return drm_gem_handle_delete(file, handle);
270}
diff --git a/drivers/gpu/host1x/drm/gem.h b/drivers/gpu/host1x/drm/gem.h
new file mode 100644
index 000000000000..34de2b486eb7
--- /dev/null
+++ b/drivers/gpu/host1x/drm/gem.h
@@ -0,0 +1,59 @@
1/*
2 * Tegra host1x GEM implementation
3 *
4 * Copyright (c) 2012-2013, NVIDIA Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#ifndef __HOST1X_GEM_H
20#define __HOST1X_GEM_H
21
22#include <drm/drm.h>
23#include <drm/drmP.h>
24
25#include "host1x_bo.h"
26
27struct tegra_bo {
28 struct drm_gem_object gem;
29 struct host1x_bo base;
30 dma_addr_t paddr;
31 void *vaddr;
32};
33
34static inline struct tegra_bo *to_tegra_bo(struct drm_gem_object *gem)
35{
36 return container_of(gem, struct tegra_bo, gem);
37}
38
39extern const struct host1x_bo_ops tegra_bo_ops;
40
41struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size);
42struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
43 struct drm_device *drm,
44 unsigned int size,
45 unsigned int *handle);
46void tegra_bo_free_object(struct drm_gem_object *gem);
47unsigned int tegra_bo_get_mmap_offset(struct tegra_bo *bo);
48int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
49 struct drm_mode_create_dumb *args);
50int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm,
51 uint32_t handle, uint64_t *offset);
52int tegra_bo_dumb_destroy(struct drm_file *file, struct drm_device *drm,
53 unsigned int handle);
54
55int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma);
56
57extern const struct vm_operations_struct tegra_bo_vm_ops;
58
59#endif
diff --git a/drivers/gpu/host1x/drm/gr2d.c b/drivers/gpu/host1x/drm/gr2d.c
new file mode 100644
index 000000000000..6a45ae090ee7
--- /dev/null
+++ b/drivers/gpu/host1x/drm/gr2d.c
@@ -0,0 +1,339 @@
1/*
2 * drivers/video/tegra/host/gr2d/gr2d.c
3 *
4 * Tegra Graphics 2D
5 *
6 * Copyright (c) 2012-2013, NVIDIA Corporation.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 */
20
21#include <linux/export.h>
22#include <linux/of.h>
23#include <linux/of_device.h>
24#include <linux/clk.h>
25
26#include "channel.h"
27#include "drm.h"
28#include "gem.h"
29#include "job.h"
30#include "host1x.h"
31#include "host1x_bo.h"
32#include "host1x_client.h"
33#include "syncpt.h"
34
35struct gr2d {
36 struct host1x_client client;
37 struct clk *clk;
38 struct host1x_channel *channel;
39 unsigned long *addr_regs;
40};
41
42static inline struct gr2d *to_gr2d(struct host1x_client *client)
43{
44 return container_of(client, struct gr2d, client);
45}
46
47static int gr2d_is_addr_reg(struct device *dev, u32 class, u32 reg);
48
49static int gr2d_client_init(struct host1x_client *client,
50 struct drm_device *drm)
51{
52 return 0;
53}
54
55static int gr2d_client_exit(struct host1x_client *client)
56{
57 return 0;
58}
59
60static int gr2d_open_channel(struct host1x_client *client,
61 struct host1x_drm_context *context)
62{
63 struct gr2d *gr2d = to_gr2d(client);
64
65 context->channel = host1x_channel_get(gr2d->channel);
66
67 if (!context->channel)
68 return -ENOMEM;
69
70 return 0;
71}
72
73static void gr2d_close_channel(struct host1x_drm_context *context)
74{
75 host1x_channel_put(context->channel);
76}
77
78static struct host1x_bo *host1x_bo_lookup(struct drm_device *drm,
79 struct drm_file *file,
80 u32 handle)
81{
82 struct drm_gem_object *gem;
83 struct tegra_bo *bo;
84
85 gem = drm_gem_object_lookup(drm, file, handle);
86 if (!gem)
87 return 0;
88
89 mutex_lock(&drm->struct_mutex);
90 drm_gem_object_unreference(gem);
91 mutex_unlock(&drm->struct_mutex);
92
93 bo = to_tegra_bo(gem);
94 return &bo->base;
95}
96
97static int gr2d_submit(struct host1x_drm_context *context,
98 struct drm_tegra_submit *args, struct drm_device *drm,
99 struct drm_file *file)
100{
101 struct host1x_job *job;
102 unsigned int num_cmdbufs = args->num_cmdbufs;
103 unsigned int num_relocs = args->num_relocs;
104 unsigned int num_waitchks = args->num_waitchks;
105 struct drm_tegra_cmdbuf __user *cmdbufs =
106 (void * __user)(uintptr_t)args->cmdbufs;
107 struct drm_tegra_reloc __user *relocs =
108 (void * __user)(uintptr_t)args->relocs;
109 struct drm_tegra_waitchk __user *waitchks =
110 (void * __user)(uintptr_t)args->waitchks;
111 struct drm_tegra_syncpt syncpt;
112 int err;
113
114 /* We don't yet support other than one syncpt_incr struct per submit */
115 if (args->num_syncpts != 1)
116 return -EINVAL;
117
118 job = host1x_job_alloc(context->channel, args->num_cmdbufs,
119 args->num_relocs, args->num_waitchks);
120 if (!job)
121 return -ENOMEM;
122
123 job->num_relocs = args->num_relocs;
124 job->num_waitchk = args->num_waitchks;
125 job->client = (u32)args->context;
126 job->class = context->client->class;
127 job->serialize = true;
128
129 while (num_cmdbufs) {
130 struct drm_tegra_cmdbuf cmdbuf;
131 struct host1x_bo *bo;
132
133 err = copy_from_user(&cmdbuf, cmdbufs, sizeof(cmdbuf));
134 if (err)
135 goto fail;
136
137 bo = host1x_bo_lookup(drm, file, cmdbuf.handle);
138 if (!bo)
139 goto fail;
140
141 host1x_job_add_gather(job, bo, cmdbuf.words, cmdbuf.offset);
142 num_cmdbufs--;
143 cmdbufs++;
144 }
145
146 err = copy_from_user(job->relocarray, relocs,
147 sizeof(*relocs) * num_relocs);
148 if (err)
149 goto fail;
150
151 while (num_relocs--) {
152 struct host1x_reloc *reloc = &job->relocarray[num_relocs];
153 struct host1x_bo *cmdbuf, *target;
154
155 cmdbuf = host1x_bo_lookup(drm, file, (u32)reloc->cmdbuf);
156 target = host1x_bo_lookup(drm, file, (u32)reloc->target);
157
158 reloc->cmdbuf = cmdbuf;
159 reloc->target = target;
160
161 if (!reloc->target || !reloc->cmdbuf)
162 goto fail;
163 }
164
165 err = copy_from_user(job->waitchk, waitchks,
166 sizeof(*waitchks) * num_waitchks);
167 if (err)
168 goto fail;
169
170 err = copy_from_user(&syncpt, (void * __user)(uintptr_t)args->syncpts,
171 sizeof(syncpt));
172 if (err)
173 goto fail;
174
175 job->syncpt_id = syncpt.id;
176 job->syncpt_incrs = syncpt.incrs;
177 job->timeout = 10000;
178 job->is_addr_reg = gr2d_is_addr_reg;
179
180 if (args->timeout && args->timeout < 10000)
181 job->timeout = args->timeout;
182
183 err = host1x_job_pin(job, context->client->dev);
184 if (err)
185 goto fail;
186
187 err = host1x_job_submit(job);
188 if (err)
189 goto fail_submit;
190
191 args->fence = job->syncpt_end;
192
193 host1x_job_put(job);
194 return 0;
195
196fail_submit:
197 host1x_job_unpin(job);
198fail:
199 host1x_job_put(job);
200 return err;
201}
202
203static struct host1x_client_ops gr2d_client_ops = {
204 .drm_init = gr2d_client_init,
205 .drm_exit = gr2d_client_exit,
206 .open_channel = gr2d_open_channel,
207 .close_channel = gr2d_close_channel,
208 .submit = gr2d_submit,
209};
210
211static void gr2d_init_addr_reg_map(struct device *dev, struct gr2d *gr2d)
212{
213 const u32 gr2d_addr_regs[] = {0x1a, 0x1b, 0x26, 0x2b, 0x2c, 0x2d, 0x31,
214 0x32, 0x48, 0x49, 0x4a, 0x4b, 0x4c};
215 unsigned long *bitmap;
216 int i;
217
218 bitmap = devm_kzalloc(dev, DIV_ROUND_UP(256, BITS_PER_BYTE),
219 GFP_KERNEL);
220
221 for (i = 0; i < ARRAY_SIZE(gr2d_addr_regs); ++i) {
222 u32 reg = gr2d_addr_regs[i];
223 bitmap[BIT_WORD(reg)] |= BIT_MASK(reg);
224 }
225
226 gr2d->addr_regs = bitmap;
227}
228
229static int gr2d_is_addr_reg(struct device *dev, u32 class, u32 reg)
230{
231 struct gr2d *gr2d = dev_get_drvdata(dev);
232
233 switch (class) {
234 case HOST1X_CLASS_HOST1X:
235 return reg == 0x2b;
236 case HOST1X_CLASS_GR2D:
237 case HOST1X_CLASS_GR2D_SB:
238 reg &= 0xff;
239 if (gr2d->addr_regs[BIT_WORD(reg)] & BIT_MASK(reg))
240 return 1;
241 default:
242 return 0;
243 }
244}
245
246static const struct of_device_id gr2d_match[] = {
247 { .compatible = "nvidia,tegra30-gr2d" },
248 { .compatible = "nvidia,tegra20-gr2d" },
249 { },
250};
251
252static int gr2d_probe(struct platform_device *pdev)
253{
254 struct device *dev = &pdev->dev;
255 struct host1x_drm *host1x = host1x_get_drm_data(dev->parent);
256 int err;
257 struct gr2d *gr2d = NULL;
258 struct host1x_syncpt **syncpts;
259
260 gr2d = devm_kzalloc(dev, sizeof(*gr2d), GFP_KERNEL);
261 if (!gr2d)
262 return -ENOMEM;
263
264 syncpts = devm_kzalloc(dev, sizeof(*syncpts), GFP_KERNEL);
265 if (!syncpts)
266 return -ENOMEM;
267
268 gr2d->clk = devm_clk_get(dev, NULL);
269 if (IS_ERR(gr2d->clk)) {
270 dev_err(dev, "cannot get clock\n");
271 return PTR_ERR(gr2d->clk);
272 }
273
274 err = clk_prepare_enable(gr2d->clk);
275 if (err) {
276 dev_err(dev, "cannot turn on clock\n");
277 return err;
278 }
279
280 gr2d->channel = host1x_channel_request(dev);
281 if (!gr2d->channel)
282 return -ENOMEM;
283
284 *syncpts = host1x_syncpt_request(dev, 0);
285 if (!(*syncpts)) {
286 host1x_channel_free(gr2d->channel);
287 return -ENOMEM;
288 }
289
290 gr2d->client.ops = &gr2d_client_ops;
291 gr2d->client.dev = dev;
292 gr2d->client.class = HOST1X_CLASS_GR2D;
293 gr2d->client.syncpts = syncpts;
294 gr2d->client.num_syncpts = 1;
295
296 err = host1x_register_client(host1x, &gr2d->client);
297 if (err < 0) {
298 dev_err(dev, "failed to register host1x client: %d\n", err);
299 return err;
300 }
301
302 gr2d_init_addr_reg_map(dev, gr2d);
303
304 platform_set_drvdata(pdev, gr2d);
305
306 return 0;
307}
308
309static int __exit gr2d_remove(struct platform_device *pdev)
310{
311 struct host1x_drm *host1x = host1x_get_drm_data(pdev->dev.parent);
312 struct gr2d *gr2d = platform_get_drvdata(pdev);
313 unsigned int i;
314 int err;
315
316 err = host1x_unregister_client(host1x, &gr2d->client);
317 if (err < 0) {
318 dev_err(&pdev->dev, "failed to unregister client: %d\n", err);
319 return err;
320 }
321
322 for (i = 0; i < gr2d->client.num_syncpts; i++)
323 host1x_syncpt_free(gr2d->client.syncpts[i]);
324
325 host1x_channel_free(gr2d->channel);
326 clk_disable_unprepare(gr2d->clk);
327
328 return 0;
329}
330
331struct platform_driver tegra_gr2d_driver = {
332 .probe = gr2d_probe,
333 .remove = __exit_p(gr2d_remove),
334 .driver = {
335 .owner = THIS_MODULE,
336 .name = "gr2d",
337 .of_match_table = gr2d_match,
338 }
339};
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/host1x/drm/hdmi.c
index bb747f6cd1a4..01097da09f7f 100644
--- a/drivers/gpu/drm/tegra/hdmi.c
+++ b/drivers/gpu/host1x/drm/hdmi.c
@@ -22,6 +22,7 @@
22#include "hdmi.h" 22#include "hdmi.h"
23#include "drm.h" 23#include "drm.h"
24#include "dc.h" 24#include "dc.h"
25#include "host1x_client.h"
25 26
26struct tegra_hdmi { 27struct tegra_hdmi {
27 struct host1x_client client; 28 struct host1x_client client;
@@ -1189,7 +1190,7 @@ static const struct host1x_client_ops hdmi_client_ops = {
1189 1190
1190static int tegra_hdmi_probe(struct platform_device *pdev) 1191static int tegra_hdmi_probe(struct platform_device *pdev)
1191{ 1192{
1192 struct host1x *host1x = dev_get_drvdata(pdev->dev.parent); 1193 struct host1x_drm *host1x = host1x_get_drm_data(pdev->dev.parent);
1193 struct tegra_hdmi *hdmi; 1194 struct tegra_hdmi *hdmi;
1194 struct resource *regs; 1195 struct resource *regs;
1195 int err; 1196 int err;
@@ -1278,7 +1279,7 @@ static int tegra_hdmi_probe(struct platform_device *pdev)
1278 1279
1279static int tegra_hdmi_remove(struct platform_device *pdev) 1280static int tegra_hdmi_remove(struct platform_device *pdev)
1280{ 1281{
1281 struct host1x *host1x = dev_get_drvdata(pdev->dev.parent); 1282 struct host1x_drm *host1x = host1x_get_drm_data(pdev->dev.parent);
1282 struct tegra_hdmi *hdmi = platform_get_drvdata(pdev); 1283 struct tegra_hdmi *hdmi = platform_get_drvdata(pdev);
1283 int err; 1284 int err;
1284 1285
diff --git a/drivers/gpu/drm/tegra/hdmi.h b/drivers/gpu/host1x/drm/hdmi.h
index 52ac36e08ccb..52ac36e08ccb 100644
--- a/drivers/gpu/drm/tegra/hdmi.h
+++ b/drivers/gpu/host1x/drm/hdmi.h
diff --git a/drivers/gpu/drm/tegra/output.c b/drivers/gpu/host1x/drm/output.c
index 8140fc6c34d8..8140fc6c34d8 100644
--- a/drivers/gpu/drm/tegra/output.c
+++ b/drivers/gpu/host1x/drm/output.c
diff --git a/drivers/gpu/drm/tegra/rgb.c b/drivers/gpu/host1x/drm/rgb.c
index ed4416f20260..ed4416f20260 100644
--- a/drivers/gpu/drm/tegra/rgb.c
+++ b/drivers/gpu/host1x/drm/rgb.c
diff --git a/drivers/gpu/host1x/host1x.h b/drivers/gpu/host1x/host1x.h
new file mode 100644
index 000000000000..a2bc1e65e972
--- /dev/null
+++ b/drivers/gpu/host1x/host1x.h
@@ -0,0 +1,30 @@
1/*
2 * Tegra host1x driver
3 *
4 * Copyright (c) 2009-2013, NVIDIA Corporation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
19 */
20
21#ifndef __LINUX_HOST1X_H
22#define __LINUX_HOST1X_H
23
24enum host1x_class {
25 HOST1X_CLASS_HOST1X = 0x1,
26 HOST1X_CLASS_GR2D = 0x51,
27 HOST1X_CLASS_GR2D_SB = 0x52
28};
29
30#endif
diff --git a/drivers/gpu/host1x/host1x_bo.h b/drivers/gpu/host1x/host1x_bo.h
new file mode 100644
index 000000000000..4c1f10bd773d
--- /dev/null
+++ b/drivers/gpu/host1x/host1x_bo.h
@@ -0,0 +1,87 @@
1/*
2 * Tegra host1x Memory Management Abstraction header
3 *
4 * Copyright (c) 2012-2013, NVIDIA Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#ifndef _HOST1X_BO_H
20#define _HOST1X_BO_H
21
22struct host1x_bo;
23
24struct host1x_bo_ops {
25 struct host1x_bo *(*get)(struct host1x_bo *bo);
26 void (*put)(struct host1x_bo *bo);
27 dma_addr_t (*pin)(struct host1x_bo *bo, struct sg_table **sgt);
28 void (*unpin)(struct host1x_bo *bo, struct sg_table *sgt);
29 void *(*mmap)(struct host1x_bo *bo);
30 void (*munmap)(struct host1x_bo *bo, void *addr);
31 void *(*kmap)(struct host1x_bo *bo, unsigned int pagenum);
32 void (*kunmap)(struct host1x_bo *bo, unsigned int pagenum, void *addr);
33};
34
35struct host1x_bo {
36 const struct host1x_bo_ops *ops;
37};
38
39static inline void host1x_bo_init(struct host1x_bo *bo,
40 const struct host1x_bo_ops *ops)
41{
42 bo->ops = ops;
43}
44
45static inline struct host1x_bo *host1x_bo_get(struct host1x_bo *bo)
46{
47 return bo->ops->get(bo);
48}
49
50static inline void host1x_bo_put(struct host1x_bo *bo)
51{
52 bo->ops->put(bo);
53}
54
55static inline dma_addr_t host1x_bo_pin(struct host1x_bo *bo,
56 struct sg_table **sgt)
57{
58 return bo->ops->pin(bo, sgt);
59}
60
61static inline void host1x_bo_unpin(struct host1x_bo *bo, struct sg_table *sgt)
62{
63 bo->ops->unpin(bo, sgt);
64}
65
66static inline void *host1x_bo_mmap(struct host1x_bo *bo)
67{
68 return bo->ops->mmap(bo);
69}
70
71static inline void host1x_bo_munmap(struct host1x_bo *bo, void *addr)
72{
73 bo->ops->munmap(bo, addr);
74}
75
76static inline void *host1x_bo_kmap(struct host1x_bo *bo, unsigned int pagenum)
77{
78 return bo->ops->kmap(bo, pagenum);
79}
80
81static inline void host1x_bo_kunmap(struct host1x_bo *bo,
82 unsigned int pagenum, void *addr)
83{
84 bo->ops->kunmap(bo, pagenum, addr);
85}
86
87#endif
diff --git a/drivers/gpu/host1x/host1x_client.h b/drivers/gpu/host1x/host1x_client.h
new file mode 100644
index 000000000000..9b85f10f4a44
--- /dev/null
+++ b/drivers/gpu/host1x/host1x_client.h
@@ -0,0 +1,35 @@
1/*
2 * Copyright (c) 2013, NVIDIA Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#ifndef HOST1X_CLIENT_H
18#define HOST1X_CLIENT_H
19
20struct device;
21struct platform_device;
22
23#ifdef CONFIG_DRM_TEGRA
24int host1x_drm_alloc(struct platform_device *pdev);
25#else
26static inline int host1x_drm_alloc(struct platform_device *pdev)
27{
28 return 0;
29}
30#endif
31
32void host1x_set_drm_data(struct device *dev, void *data);
33void *host1x_get_drm_data(struct device *dev);
34
35#endif
diff --git a/drivers/gpu/host1x/hw/Makefile b/drivers/gpu/host1x/hw/Makefile
new file mode 100644
index 000000000000..9b50863a2236
--- /dev/null
+++ b/drivers/gpu/host1x/hw/Makefile
@@ -0,0 +1,6 @@
1ccflags-y = -Idrivers/gpu/host1x
2
3host1x-hw-objs = \
4 host1x01.o
5
6obj-$(CONFIG_TEGRA_HOST1X) += host1x-hw.o
diff --git a/drivers/gpu/host1x/hw/cdma_hw.c b/drivers/gpu/host1x/hw/cdma_hw.c
new file mode 100644
index 000000000000..590b69d91dab
--- /dev/null
+++ b/drivers/gpu/host1x/hw/cdma_hw.c
@@ -0,0 +1,326 @@
1/*
2 * Tegra host1x Command DMA
3 *
4 * Copyright (c) 2010-2013, NVIDIA Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#include <linux/slab.h>
20#include <linux/scatterlist.h>
21#include <linux/dma-mapping.h>
22
23#include "cdma.h"
24#include "channel.h"
25#include "dev.h"
26#include "debug.h"
27
28/*
29 * Put the restart at the end of pushbuffer memor
30 */
31static void push_buffer_init(struct push_buffer *pb)
32{
33 *(pb->mapped + (pb->size_bytes >> 2)) = host1x_opcode_restart(0);
34}
35
36/*
37 * Increment timedout buffer's syncpt via CPU.
38 */
39static void cdma_timeout_cpu_incr(struct host1x_cdma *cdma, u32 getptr,
40 u32 syncpt_incrs, u32 syncval, u32 nr_slots)
41{
42 struct host1x *host1x = cdma_to_host1x(cdma);
43 struct push_buffer *pb = &cdma->push_buffer;
44 u32 i;
45
46 for (i = 0; i < syncpt_incrs; i++)
47 host1x_syncpt_cpu_incr(cdma->timeout.syncpt);
48
49 /* after CPU incr, ensure shadow is up to date */
50 host1x_syncpt_load(cdma->timeout.syncpt);
51
52 /* NOP all the PB slots */
53 while (nr_slots--) {
54 u32 *p = (u32 *)((u32)pb->mapped + getptr);
55 *(p++) = HOST1X_OPCODE_NOP;
56 *(p++) = HOST1X_OPCODE_NOP;
57 dev_dbg(host1x->dev, "%s: NOP at 0x%x\n", __func__,
58 pb->phys + getptr);
59 getptr = (getptr + 8) & (pb->size_bytes - 1);
60 }
61 wmb();
62}
63
64/*
65 * Start channel DMA
66 */
67static void cdma_start(struct host1x_cdma *cdma)
68{
69 struct host1x_channel *ch = cdma_to_channel(cdma);
70
71 if (cdma->running)
72 return;
73
74 cdma->last_pos = cdma->push_buffer.pos;
75
76 host1x_ch_writel(ch, HOST1X_CHANNEL_DMACTRL_DMASTOP,
77 HOST1X_CHANNEL_DMACTRL);
78
79 /* set base, put and end pointer */
80 host1x_ch_writel(ch, cdma->push_buffer.phys, HOST1X_CHANNEL_DMASTART);
81 host1x_ch_writel(ch, cdma->push_buffer.pos, HOST1X_CHANNEL_DMAPUT);
82 host1x_ch_writel(ch, cdma->push_buffer.phys +
83 cdma->push_buffer.size_bytes + 4,
84 HOST1X_CHANNEL_DMAEND);
85
86 /* reset GET */
87 host1x_ch_writel(ch, HOST1X_CHANNEL_DMACTRL_DMASTOP |
88 HOST1X_CHANNEL_DMACTRL_DMAGETRST |
89 HOST1X_CHANNEL_DMACTRL_DMAINITGET,
90 HOST1X_CHANNEL_DMACTRL);
91
92 /* start the command DMA */
93 host1x_ch_writel(ch, 0, HOST1X_CHANNEL_DMACTRL);
94
95 cdma->running = true;
96}
97
98/*
99 * Similar to cdma_start(), but rather than starting from an idle
100 * state (where DMA GET is set to DMA PUT), on a timeout we restore
101 * DMA GET from an explicit value (so DMA may again be pending).
102 */
103static void cdma_timeout_restart(struct host1x_cdma *cdma, u32 getptr)
104{
105 struct host1x *host1x = cdma_to_host1x(cdma);
106 struct host1x_channel *ch = cdma_to_channel(cdma);
107
108 if (cdma->running)
109 return;
110
111 cdma->last_pos = cdma->push_buffer.pos;
112
113 host1x_ch_writel(ch, HOST1X_CHANNEL_DMACTRL_DMASTOP,
114 HOST1X_CHANNEL_DMACTRL);
115
116 /* set base, end pointer (all of memory) */
117 host1x_ch_writel(ch, cdma->push_buffer.phys, HOST1X_CHANNEL_DMASTART);
118 host1x_ch_writel(ch, cdma->push_buffer.phys +
119 cdma->push_buffer.size_bytes,
120 HOST1X_CHANNEL_DMAEND);
121
122 /* set GET, by loading the value in PUT (then reset GET) */
123 host1x_ch_writel(ch, getptr, HOST1X_CHANNEL_DMAPUT);
124 host1x_ch_writel(ch, HOST1X_CHANNEL_DMACTRL_DMASTOP |
125 HOST1X_CHANNEL_DMACTRL_DMAGETRST |
126 HOST1X_CHANNEL_DMACTRL_DMAINITGET,
127 HOST1X_CHANNEL_DMACTRL);
128
129 dev_dbg(host1x->dev,
130 "%s: DMA GET 0x%x, PUT HW 0x%x / shadow 0x%x\n", __func__,
131 host1x_ch_readl(ch, HOST1X_CHANNEL_DMAGET),
132 host1x_ch_readl(ch, HOST1X_CHANNEL_DMAPUT),
133 cdma->last_pos);
134
135 /* deassert GET reset and set PUT */
136 host1x_ch_writel(ch, HOST1X_CHANNEL_DMACTRL_DMASTOP,
137 HOST1X_CHANNEL_DMACTRL);
138 host1x_ch_writel(ch, cdma->push_buffer.pos, HOST1X_CHANNEL_DMAPUT);
139
140 /* start the command DMA */
141 host1x_ch_writel(ch, 0, HOST1X_CHANNEL_DMACTRL);
142
143 cdma->running = true;
144}
145
146/*
147 * Kick channel DMA into action by writing its PUT offset (if it has changed)
148 */
149static void cdma_flush(struct host1x_cdma *cdma)
150{
151 struct host1x_channel *ch = cdma_to_channel(cdma);
152
153 if (cdma->push_buffer.pos != cdma->last_pos) {
154 host1x_ch_writel(ch, cdma->push_buffer.pos,
155 HOST1X_CHANNEL_DMAPUT);
156 cdma->last_pos = cdma->push_buffer.pos;
157 }
158}
159
160static void cdma_stop(struct host1x_cdma *cdma)
161{
162 struct host1x_channel *ch = cdma_to_channel(cdma);
163
164 mutex_lock(&cdma->lock);
165 if (cdma->running) {
166 host1x_cdma_wait_locked(cdma, CDMA_EVENT_SYNC_QUEUE_EMPTY);
167 host1x_ch_writel(ch, HOST1X_CHANNEL_DMACTRL_DMASTOP,
168 HOST1X_CHANNEL_DMACTRL);
169 cdma->running = false;
170 }
171 mutex_unlock(&cdma->lock);
172}
173
174/*
175 * Stops both channel's command processor and CDMA immediately.
176 * Also, tears down the channel and resets corresponding module.
177 */
178static void cdma_freeze(struct host1x_cdma *cdma)
179{
180 struct host1x *host = cdma_to_host1x(cdma);
181 struct host1x_channel *ch = cdma_to_channel(cdma);
182 u32 cmdproc_stop;
183
184 if (cdma->torndown && !cdma->running) {
185 dev_warn(host->dev, "Already torn down\n");
186 return;
187 }
188
189 dev_dbg(host->dev, "freezing channel (id %d)\n", ch->id);
190
191 cmdproc_stop = host1x_sync_readl(host, HOST1X_SYNC_CMDPROC_STOP);
192 cmdproc_stop |= BIT(ch->id);
193 host1x_sync_writel(host, cmdproc_stop, HOST1X_SYNC_CMDPROC_STOP);
194
195 dev_dbg(host->dev, "%s: DMA GET 0x%x, PUT HW 0x%x / shadow 0x%x\n",
196 __func__, host1x_ch_readl(ch, HOST1X_CHANNEL_DMAGET),
197 host1x_ch_readl(ch, HOST1X_CHANNEL_DMAPUT),
198 cdma->last_pos);
199
200 host1x_ch_writel(ch, HOST1X_CHANNEL_DMACTRL_DMASTOP,
201 HOST1X_CHANNEL_DMACTRL);
202
203 host1x_sync_writel(host, BIT(ch->id), HOST1X_SYNC_CH_TEARDOWN);
204
205 cdma->running = false;
206 cdma->torndown = true;
207}
208
209static void cdma_resume(struct host1x_cdma *cdma, u32 getptr)
210{
211 struct host1x *host1x = cdma_to_host1x(cdma);
212 struct host1x_channel *ch = cdma_to_channel(cdma);
213 u32 cmdproc_stop;
214
215 dev_dbg(host1x->dev,
216 "resuming channel (id %d, DMAGET restart = 0x%x)\n",
217 ch->id, getptr);
218
219 cmdproc_stop = host1x_sync_readl(host1x, HOST1X_SYNC_CMDPROC_STOP);
220 cmdproc_stop &= ~(BIT(ch->id));
221 host1x_sync_writel(host1x, cmdproc_stop, HOST1X_SYNC_CMDPROC_STOP);
222
223 cdma->torndown = false;
224 cdma_timeout_restart(cdma, getptr);
225}
226
227/*
228 * If this timeout fires, it indicates the current sync_queue entry has
229 * exceeded its TTL and the userctx should be timed out and remaining
230 * submits already issued cleaned up (future submits return an error).
231 */
232static void cdma_timeout_handler(struct work_struct *work)
233{
234 struct host1x_cdma *cdma;
235 struct host1x *host1x;
236 struct host1x_channel *ch;
237
238 u32 syncpt_val;
239
240 u32 prev_cmdproc, cmdproc_stop;
241
242 cdma = container_of(to_delayed_work(work), struct host1x_cdma,
243 timeout.wq);
244 host1x = cdma_to_host1x(cdma);
245 ch = cdma_to_channel(cdma);
246
247 host1x_debug_dump(cdma_to_host1x(cdma));
248
249 mutex_lock(&cdma->lock);
250
251 if (!cdma->timeout.client) {
252 dev_dbg(host1x->dev,
253 "cdma_timeout: expired, but has no clientid\n");
254 mutex_unlock(&cdma->lock);
255 return;
256 }
257
258 /* stop processing to get a clean snapshot */
259 prev_cmdproc = host1x_sync_readl(host1x, HOST1X_SYNC_CMDPROC_STOP);
260 cmdproc_stop = prev_cmdproc | BIT(ch->id);
261 host1x_sync_writel(host1x, cmdproc_stop, HOST1X_SYNC_CMDPROC_STOP);
262
263 dev_dbg(host1x->dev, "cdma_timeout: cmdproc was 0x%x is 0x%x\n",
264 prev_cmdproc, cmdproc_stop);
265
266 syncpt_val = host1x_syncpt_load(cdma->timeout.syncpt);
267
268 /* has buffer actually completed? */
269 if ((s32)(syncpt_val - cdma->timeout.syncpt_val) >= 0) {
270 dev_dbg(host1x->dev,
271 "cdma_timeout: expired, but buffer had completed\n");
272 /* restore */
273 cmdproc_stop = prev_cmdproc & ~(BIT(ch->id));
274 host1x_sync_writel(host1x, cmdproc_stop,
275 HOST1X_SYNC_CMDPROC_STOP);
276 mutex_unlock(&cdma->lock);
277 return;
278 }
279
280 dev_warn(host1x->dev, "%s: timeout: %d (%s), HW thresh %d, done %d\n",
281 __func__, cdma->timeout.syncpt->id, cdma->timeout.syncpt->name,
282 syncpt_val, cdma->timeout.syncpt_val);
283
284 /* stop HW, resetting channel/module */
285 host1x_hw_cdma_freeze(host1x, cdma);
286
287 host1x_cdma_update_sync_queue(cdma, ch->dev);
288 mutex_unlock(&cdma->lock);
289}
290
291/*
292 * Init timeout resources
293 */
294static int cdma_timeout_init(struct host1x_cdma *cdma, u32 syncpt_id)
295{
296 INIT_DELAYED_WORK(&cdma->timeout.wq, cdma_timeout_handler);
297 cdma->timeout.initialized = true;
298
299 return 0;
300}
301
302/*
303 * Clean up timeout resources
304 */
305static void cdma_timeout_destroy(struct host1x_cdma *cdma)
306{
307 if (cdma->timeout.initialized)
308 cancel_delayed_work(&cdma->timeout.wq);
309 cdma->timeout.initialized = false;
310}
311
312static const struct host1x_cdma_ops host1x_cdma_ops = {
313 .start = cdma_start,
314 .stop = cdma_stop,
315 .flush = cdma_flush,
316
317 .timeout_init = cdma_timeout_init,
318 .timeout_destroy = cdma_timeout_destroy,
319 .freeze = cdma_freeze,
320 .resume = cdma_resume,
321 .timeout_cpu_incr = cdma_timeout_cpu_incr,
322};
323
324static const struct host1x_pushbuffer_ops host1x_pushbuffer_ops = {
325 .init = push_buffer_init,
326};
diff --git a/drivers/gpu/host1x/hw/channel_hw.c b/drivers/gpu/host1x/hw/channel_hw.c
new file mode 100644
index 000000000000..ee199623e365
--- /dev/null
+++ b/drivers/gpu/host1x/hw/channel_hw.c
@@ -0,0 +1,168 @@
1/*
2 * Tegra host1x Channel
3 *
4 * Copyright (c) 2010-2013, NVIDIA Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#include <linux/slab.h>
20#include <trace/events/host1x.h>
21
22#include "host1x.h"
23#include "host1x_bo.h"
24#include "channel.h"
25#include "dev.h"
26#include "intr.h"
27#include "job.h"
28
29#define HOST1X_CHANNEL_SIZE 16384
30#define TRACE_MAX_LENGTH 128U
31
32static void trace_write_gather(struct host1x_cdma *cdma, struct host1x_bo *bo,
33 u32 offset, u32 words)
34{
35 void *mem = NULL;
36
37 if (host1x_debug_trace_cmdbuf)
38 mem = host1x_bo_mmap(bo);
39
40 if (mem) {
41 u32 i;
42 /*
43 * Write in batches of 128 as there seems to be a limit
44 * of how much you can output to ftrace at once.
45 */
46 for (i = 0; i < words; i += TRACE_MAX_LENGTH) {
47 trace_host1x_cdma_push_gather(
48 dev_name(cdma_to_channel(cdma)->dev),
49 (u32)bo, min(words - i, TRACE_MAX_LENGTH),
50 offset + i * sizeof(u32), mem);
51 }
52 host1x_bo_munmap(bo, mem);
53 }
54}
55
56static void submit_gathers(struct host1x_job *job)
57{
58 struct host1x_cdma *cdma = &job->channel->cdma;
59 unsigned int i;
60
61 for (i = 0; i < job->num_gathers; i++) {
62 struct host1x_job_gather *g = &job->gathers[i];
63 u32 op1 = host1x_opcode_gather(g->words);
64 u32 op2 = g->base + g->offset;
65 trace_write_gather(cdma, g->bo, g->offset, op1 & 0xffff);
66 host1x_cdma_push(cdma, op1, op2);
67 }
68}
69
70static int channel_submit(struct host1x_job *job)
71{
72 struct host1x_channel *ch = job->channel;
73 struct host1x_syncpt *sp;
74 u32 user_syncpt_incrs = job->syncpt_incrs;
75 u32 prev_max = 0;
76 u32 syncval;
77 int err;
78 struct host1x_waitlist *completed_waiter = NULL;
79 struct host1x *host = dev_get_drvdata(ch->dev->parent);
80
81 sp = host->syncpt + job->syncpt_id;
82 trace_host1x_channel_submit(dev_name(ch->dev),
83 job->num_gathers, job->num_relocs,
84 job->num_waitchk, job->syncpt_id,
85 job->syncpt_incrs);
86
87 /* before error checks, return current max */
88 prev_max = job->syncpt_end = host1x_syncpt_read_max(sp);
89
90 /* get submit lock */
91 err = mutex_lock_interruptible(&ch->submitlock);
92 if (err)
93 goto error;
94
95 completed_waiter = kzalloc(sizeof(*completed_waiter), GFP_KERNEL);
96 if (!completed_waiter) {
97 mutex_unlock(&ch->submitlock);
98 err = -ENOMEM;
99 goto error;
100 }
101
102 /* begin a CDMA submit */
103 err = host1x_cdma_begin(&ch->cdma, job);
104 if (err) {
105 mutex_unlock(&ch->submitlock);
106 goto error;
107 }
108
109 if (job->serialize) {
110 /*
111 * Force serialization by inserting a host wait for the
112 * previous job to finish before this one can commence.
113 */
114 host1x_cdma_push(&ch->cdma,
115 host1x_opcode_setclass(HOST1X_CLASS_HOST1X,
116 host1x_uclass_wait_syncpt_r(), 1),
117 host1x_class_host_wait_syncpt(job->syncpt_id,
118 host1x_syncpt_read_max(sp)));
119 }
120
121 syncval = host1x_syncpt_incr_max(sp, user_syncpt_incrs);
122
123 job->syncpt_end = syncval;
124
125 /* add a setclass for modules that require it */
126 if (job->class)
127 host1x_cdma_push(&ch->cdma,
128 host1x_opcode_setclass(job->class, 0, 0),
129 HOST1X_OPCODE_NOP);
130
131 submit_gathers(job);
132
133 /* end CDMA submit & stash pinned hMems into sync queue */
134 host1x_cdma_end(&ch->cdma, job);
135
136 trace_host1x_channel_submitted(dev_name(ch->dev), prev_max, syncval);
137
138 /* schedule a submit complete interrupt */
139 err = host1x_intr_add_action(host, job->syncpt_id, syncval,
140 HOST1X_INTR_ACTION_SUBMIT_COMPLETE, ch,
141 completed_waiter, NULL);
142 completed_waiter = NULL;
143 WARN(err, "Failed to set submit complete interrupt");
144
145 mutex_unlock(&ch->submitlock);
146
147 return 0;
148
149error:
150 kfree(completed_waiter);
151 return err;
152}
153
154static int host1x_channel_init(struct host1x_channel *ch, struct host1x *dev,
155 unsigned int index)
156{
157 ch->id = index;
158 mutex_init(&ch->reflock);
159 mutex_init(&ch->submitlock);
160
161 ch->regs = dev->regs + index * HOST1X_CHANNEL_SIZE;
162 return 0;
163}
164
165static const struct host1x_channel_ops host1x_channel_ops = {
166 .init = host1x_channel_init,
167 .submit = channel_submit,
168};
diff --git a/drivers/gpu/host1x/hw/debug_hw.c b/drivers/gpu/host1x/hw/debug_hw.c
new file mode 100644
index 000000000000..334c038052f5
--- /dev/null
+++ b/drivers/gpu/host1x/hw/debug_hw.c
@@ -0,0 +1,322 @@
1/*
2 * Copyright (C) 2010 Google, Inc.
3 * Author: Erik Gilling <konkers@android.com>
4 *
5 * Copyright (C) 2011-2013 NVIDIA Corporation
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/debugfs.h>
19#include <linux/seq_file.h>
20#include <linux/mm.h>
21#include <linux/scatterlist.h>
22
23#include <linux/io.h>
24
25#include "dev.h"
26#include "debug.h"
27#include "cdma.h"
28#include "channel.h"
29#include "host1x_bo.h"
30
31#define HOST1X_DEBUG_MAX_PAGE_OFFSET 102400
32
33enum {
34 HOST1X_OPCODE_SETCLASS = 0x00,
35 HOST1X_OPCODE_INCR = 0x01,
36 HOST1X_OPCODE_NONINCR = 0x02,
37 HOST1X_OPCODE_MASK = 0x03,
38 HOST1X_OPCODE_IMM = 0x04,
39 HOST1X_OPCODE_RESTART = 0x05,
40 HOST1X_OPCODE_GATHER = 0x06,
41 HOST1X_OPCODE_EXTEND = 0x0e,
42};
43
44enum {
45 HOST1X_OPCODE_EXTEND_ACQUIRE_MLOCK = 0x00,
46 HOST1X_OPCODE_EXTEND_RELEASE_MLOCK = 0x01,
47};
48
49static unsigned int show_channel_command(struct output *o, u32 val)
50{
51 unsigned mask;
52 unsigned subop;
53
54 switch (val >> 28) {
55 case HOST1X_OPCODE_SETCLASS:
56 mask = val & 0x3f;
57 if (mask) {
58 host1x_debug_output(o, "SETCL(class=%03x, offset=%03x, mask=%02x, [",
59 val >> 6 & 0x3ff,
60 val >> 16 & 0xfff, mask);
61 return hweight8(mask);
62 } else {
63 host1x_debug_output(o, "SETCL(class=%03x)\n",
64 val >> 6 & 0x3ff);
65 return 0;
66 }
67
68 case HOST1X_OPCODE_INCR:
69 host1x_debug_output(o, "INCR(offset=%03x, [",
70 val >> 16 & 0xfff);
71 return val & 0xffff;
72
73 case HOST1X_OPCODE_NONINCR:
74 host1x_debug_output(o, "NONINCR(offset=%03x, [",
75 val >> 16 & 0xfff);
76 return val & 0xffff;
77
78 case HOST1X_OPCODE_MASK:
79 mask = val & 0xffff;
80 host1x_debug_output(o, "MASK(offset=%03x, mask=%03x, [",
81 val >> 16 & 0xfff, mask);
82 return hweight16(mask);
83
84 case HOST1X_OPCODE_IMM:
85 host1x_debug_output(o, "IMM(offset=%03x, data=%03x)\n",
86 val >> 16 & 0xfff, val & 0xffff);
87 return 0;
88
89 case HOST1X_OPCODE_RESTART:
90 host1x_debug_output(o, "RESTART(offset=%08x)\n", val << 4);
91 return 0;
92
93 case HOST1X_OPCODE_GATHER:
94 host1x_debug_output(o, "GATHER(offset=%03x, insert=%d, type=%d, count=%04x, addr=[",
95 val >> 16 & 0xfff, val >> 15 & 0x1,
96 val >> 14 & 0x1, val & 0x3fff);
97 return 1;
98
99 case HOST1X_OPCODE_EXTEND:
100 subop = val >> 24 & 0xf;
101 if (subop == HOST1X_OPCODE_EXTEND_ACQUIRE_MLOCK)
102 host1x_debug_output(o, "ACQUIRE_MLOCK(index=%d)\n",
103 val & 0xff);
104 else if (subop == HOST1X_OPCODE_EXTEND_RELEASE_MLOCK)
105 host1x_debug_output(o, "RELEASE_MLOCK(index=%d)\n",
106 val & 0xff);
107 else
108 host1x_debug_output(o, "EXTEND_UNKNOWN(%08x)\n", val);
109 return 0;
110
111 default:
112 return 0;
113 }
114}
115
116static void show_gather(struct output *o, phys_addr_t phys_addr,
117 unsigned int words, struct host1x_cdma *cdma,
118 phys_addr_t pin_addr, u32 *map_addr)
119{
120 /* Map dmaget cursor to corresponding mem handle */
121 u32 offset = phys_addr - pin_addr;
122 unsigned int data_count = 0, i;
123
124 /*
125 * Sometimes we're given different hardware address to the same
126 * page - in these cases the offset will get an invalid number and
127 * we just have to bail out.
128 */
129 if (offset > HOST1X_DEBUG_MAX_PAGE_OFFSET) {
130 host1x_debug_output(o, "[address mismatch]\n");
131 return;
132 }
133
134 for (i = 0; i < words; i++) {
135 u32 addr = phys_addr + i * 4;
136 u32 val = *(map_addr + offset / 4 + i);
137
138 if (!data_count) {
139 host1x_debug_output(o, "%08x: %08x:", addr, val);
140 data_count = show_channel_command(o, val);
141 } else {
142 host1x_debug_output(o, "%08x%s", val,
143 data_count > 0 ? ", " : "])\n");
144 data_count--;
145 }
146 }
147}
148
149static void show_channel_gathers(struct output *o, struct host1x_cdma *cdma)
150{
151 struct host1x_job *job;
152
153 list_for_each_entry(job, &cdma->sync_queue, list) {
154 int i;
155 host1x_debug_output(o, "\n%p: JOB, syncpt_id=%d, syncpt_val=%d, first_get=%08x, timeout=%d num_slots=%d, num_handles=%d\n",
156 job, job->syncpt_id, job->syncpt_end,
157 job->first_get, job->timeout,
158 job->num_slots, job->num_unpins);
159
160 for (i = 0; i < job->num_gathers; i++) {
161 struct host1x_job_gather *g = &job->gathers[i];
162 u32 *mapped;
163
164 if (job->gather_copy_mapped)
165 mapped = (u32 *)job->gather_copy_mapped;
166 else
167 mapped = host1x_bo_mmap(g->bo);
168
169 if (!mapped) {
170 host1x_debug_output(o, "[could not mmap]\n");
171 continue;
172 }
173
174 host1x_debug_output(o, " GATHER at %08x+%04x, %d words\n",
175 g->base, g->offset, g->words);
176
177 show_gather(o, g->base + g->offset, g->words, cdma,
178 g->base, mapped);
179
180 if (!job->gather_copy_mapped)
181 host1x_bo_munmap(g->bo, mapped);
182 }
183 }
184}
185
186static void host1x_debug_show_channel_cdma(struct host1x *host,
187 struct host1x_channel *ch,
188 struct output *o)
189{
190 struct host1x_cdma *cdma = &ch->cdma;
191 u32 dmaput, dmaget, dmactrl;
192 u32 cbstat, cbread;
193 u32 val, base, baseval;
194
195 dmaput = host1x_ch_readl(ch, HOST1X_CHANNEL_DMAPUT);
196 dmaget = host1x_ch_readl(ch, HOST1X_CHANNEL_DMAGET);
197 dmactrl = host1x_ch_readl(ch, HOST1X_CHANNEL_DMACTRL);
198 cbread = host1x_sync_readl(host, HOST1X_SYNC_CBREAD(ch->id));
199 cbstat = host1x_sync_readl(host, HOST1X_SYNC_CBSTAT(ch->id));
200
201 host1x_debug_output(o, "%d-%s: ", ch->id, dev_name(ch->dev));
202
203 if (HOST1X_CHANNEL_DMACTRL_DMASTOP_V(dmactrl) ||
204 !ch->cdma.push_buffer.mapped) {
205 host1x_debug_output(o, "inactive\n\n");
206 return;
207 }
208
209 if (HOST1X_SYNC_CBSTAT_CBCLASS_V(cbstat) == HOST1X_CLASS_HOST1X &&
210 HOST1X_SYNC_CBSTAT_CBOFFSET_V(cbstat) ==
211 HOST1X_UCLASS_WAIT_SYNCPT)
212 host1x_debug_output(o, "waiting on syncpt %d val %d\n",
213 cbread >> 24, cbread & 0xffffff);
214 else if (HOST1X_SYNC_CBSTAT_CBCLASS_V(cbstat) ==
215 HOST1X_CLASS_HOST1X &&
216 HOST1X_SYNC_CBSTAT_CBOFFSET_V(cbstat) ==
217 HOST1X_UCLASS_WAIT_SYNCPT_BASE) {
218
219 base = (cbread >> 16) & 0xff;
220 baseval =
221 host1x_sync_readl(host, HOST1X_SYNC_SYNCPT_BASE(base));
222 val = cbread & 0xffff;
223 host1x_debug_output(o, "waiting on syncpt %d val %d (base %d = %d; offset = %d)\n",
224 cbread >> 24, baseval + val, base,
225 baseval, val);
226 } else
227 host1x_debug_output(o, "active class %02x, offset %04x, val %08x\n",
228 HOST1X_SYNC_CBSTAT_CBCLASS_V(cbstat),
229 HOST1X_SYNC_CBSTAT_CBOFFSET_V(cbstat),
230 cbread);
231
232 host1x_debug_output(o, "DMAPUT %08x, DMAGET %08x, DMACTL %08x\n",
233 dmaput, dmaget, dmactrl);
234 host1x_debug_output(o, "CBREAD %08x, CBSTAT %08x\n", cbread, cbstat);
235
236 show_channel_gathers(o, cdma);
237 host1x_debug_output(o, "\n");
238}
239
240static void host1x_debug_show_channel_fifo(struct host1x *host,
241 struct host1x_channel *ch,
242 struct output *o)
243{
244 u32 val, rd_ptr, wr_ptr, start, end;
245 unsigned int data_count = 0;
246
247 host1x_debug_output(o, "%d: fifo:\n", ch->id);
248
249 val = host1x_ch_readl(ch, HOST1X_CHANNEL_FIFOSTAT);
250 host1x_debug_output(o, "FIFOSTAT %08x\n", val);
251 if (HOST1X_CHANNEL_FIFOSTAT_CFEMPTY_V(val)) {
252 host1x_debug_output(o, "[empty]\n");
253 return;
254 }
255
256 host1x_sync_writel(host, 0x0, HOST1X_SYNC_CFPEEK_CTRL);
257 host1x_sync_writel(host, HOST1X_SYNC_CFPEEK_CTRL_ENA_F(1) |
258 HOST1X_SYNC_CFPEEK_CTRL_CHANNR_F(ch->id),
259 HOST1X_SYNC_CFPEEK_CTRL);
260
261 val = host1x_sync_readl(host, HOST1X_SYNC_CFPEEK_PTRS);
262 rd_ptr = HOST1X_SYNC_CFPEEK_PTRS_CF_RD_PTR_V(val);
263 wr_ptr = HOST1X_SYNC_CFPEEK_PTRS_CF_WR_PTR_V(val);
264
265 val = host1x_sync_readl(host, HOST1X_SYNC_CF_SETUP(ch->id));
266 start = HOST1X_SYNC_CF_SETUP_BASE_V(val);
267 end = HOST1X_SYNC_CF_SETUP_LIMIT_V(val);
268
269 do {
270 host1x_sync_writel(host, 0x0, HOST1X_SYNC_CFPEEK_CTRL);
271 host1x_sync_writel(host, HOST1X_SYNC_CFPEEK_CTRL_ENA_F(1) |
272 HOST1X_SYNC_CFPEEK_CTRL_CHANNR_F(ch->id) |
273 HOST1X_SYNC_CFPEEK_CTRL_ADDR_F(rd_ptr),
274 HOST1X_SYNC_CFPEEK_CTRL);
275 val = host1x_sync_readl(host, HOST1X_SYNC_CFPEEK_READ);
276
277 if (!data_count) {
278 host1x_debug_output(o, "%08x:", val);
279 data_count = show_channel_command(o, val);
280 } else {
281 host1x_debug_output(o, "%08x%s", val,
282 data_count > 0 ? ", " : "])\n");
283 data_count--;
284 }
285
286 if (rd_ptr == end)
287 rd_ptr = start;
288 else
289 rd_ptr++;
290 } while (rd_ptr != wr_ptr);
291
292 if (data_count)
293 host1x_debug_output(o, ", ...])\n");
294 host1x_debug_output(o, "\n");
295
296 host1x_sync_writel(host, 0x0, HOST1X_SYNC_CFPEEK_CTRL);
297}
298
299static void host1x_debug_show_mlocks(struct host1x *host, struct output *o)
300{
301 int i;
302
303 host1x_debug_output(o, "---- mlocks ----\n");
304 for (i = 0; i < host1x_syncpt_nb_mlocks(host); i++) {
305 u32 owner =
306 host1x_sync_readl(host, HOST1X_SYNC_MLOCK_OWNER(i));
307 if (HOST1X_SYNC_MLOCK_OWNER_CH_OWNS_V(owner))
308 host1x_debug_output(o, "%d: locked by channel %d\n",
309 i, HOST1X_SYNC_MLOCK_OWNER_CHID_F(owner));
310 else if (HOST1X_SYNC_MLOCK_OWNER_CPU_OWNS_V(owner))
311 host1x_debug_output(o, "%d: locked by cpu\n", i);
312 else
313 host1x_debug_output(o, "%d: unlocked\n", i);
314 }
315 host1x_debug_output(o, "\n");
316}
317
318static const struct host1x_debug_ops host1x_debug_ops = {
319 .show_channel_cdma = host1x_debug_show_channel_cdma,
320 .show_channel_fifo = host1x_debug_show_channel_fifo,
321 .show_mlocks = host1x_debug_show_mlocks,
322};
diff --git a/drivers/gpu/host1x/hw/host1x01.c b/drivers/gpu/host1x/hw/host1x01.c
new file mode 100644
index 000000000000..a14e91cd1e58
--- /dev/null
+++ b/drivers/gpu/host1x/hw/host1x01.c
@@ -0,0 +1,42 @@
1/*
2 * Host1x init for T20 and T30 Architecture Chips
3 *
4 * Copyright (c) 2011-2013, NVIDIA Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19/* include hw specification */
20#include "hw/host1x01.h"
21#include "hw/host1x01_hardware.h"
22
23/* include code */
24#include "hw/cdma_hw.c"
25#include "hw/channel_hw.c"
26#include "hw/debug_hw.c"
27#include "hw/intr_hw.c"
28#include "hw/syncpt_hw.c"
29
30#include "dev.h"
31
32int host1x01_init(struct host1x *host)
33{
34 host->channel_op = &host1x_channel_ops;
35 host->cdma_op = &host1x_cdma_ops;
36 host->cdma_pb_op = &host1x_pushbuffer_ops;
37 host->syncpt_op = &host1x_syncpt_ops;
38 host->intr_op = &host1x_intr_ops;
39 host->debug_op = &host1x_debug_ops;
40
41 return 0;
42}
diff --git a/drivers/gpu/host1x/hw/host1x01.h b/drivers/gpu/host1x/hw/host1x01.h
new file mode 100644
index 000000000000..2706b6743250
--- /dev/null
+++ b/drivers/gpu/host1x/hw/host1x01.h
@@ -0,0 +1,25 @@
1/*
2 * Host1x init for T20 and T30 Architecture Chips
3 *
4 * Copyright (c) 2011-2013, NVIDIA Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18#ifndef HOST1X_HOST1X01_H
19#define HOST1X_HOST1X01_H
20
21struct host1x;
22
23int host1x01_init(struct host1x *host);
24
25#endif /* HOST1X_HOST1X01_H_ */
diff --git a/drivers/gpu/host1x/hw/host1x01_hardware.h b/drivers/gpu/host1x/hw/host1x01_hardware.h
new file mode 100644
index 000000000000..5f0fb866efa8
--- /dev/null
+++ b/drivers/gpu/host1x/hw/host1x01_hardware.h
@@ -0,0 +1,143 @@
1/*
2 * Tegra host1x Register Offsets for Tegra20 and Tegra30
3 *
4 * Copyright (c) 2010-2013 NVIDIA Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#ifndef __HOST1X_HOST1X01_HARDWARE_H
20#define __HOST1X_HOST1X01_HARDWARE_H
21
22#include <linux/types.h>
23#include <linux/bitops.h>
24
25#include "hw_host1x01_channel.h"
26#include "hw_host1x01_sync.h"
27#include "hw_host1x01_uclass.h"
28
29static inline u32 host1x_class_host_wait_syncpt(
30 unsigned indx, unsigned threshold)
31{
32 return host1x_uclass_wait_syncpt_indx_f(indx)
33 | host1x_uclass_wait_syncpt_thresh_f(threshold);
34}
35
36static inline u32 host1x_class_host_load_syncpt_base(
37 unsigned indx, unsigned threshold)
38{
39 return host1x_uclass_load_syncpt_base_base_indx_f(indx)
40 | host1x_uclass_load_syncpt_base_value_f(threshold);
41}
42
43static inline u32 host1x_class_host_wait_syncpt_base(
44 unsigned indx, unsigned base_indx, unsigned offset)
45{
46 return host1x_uclass_wait_syncpt_base_indx_f(indx)
47 | host1x_uclass_wait_syncpt_base_base_indx_f(base_indx)
48 | host1x_uclass_wait_syncpt_base_offset_f(offset);
49}
50
51static inline u32 host1x_class_host_incr_syncpt_base(
52 unsigned base_indx, unsigned offset)
53{
54 return host1x_uclass_incr_syncpt_base_base_indx_f(base_indx)
55 | host1x_uclass_incr_syncpt_base_offset_f(offset);
56}
57
58static inline u32 host1x_class_host_incr_syncpt(
59 unsigned cond, unsigned indx)
60{
61 return host1x_uclass_incr_syncpt_cond_f(cond)
62 | host1x_uclass_incr_syncpt_indx_f(indx);
63}
64
65static inline u32 host1x_class_host_indoff_reg_write(
66 unsigned mod_id, unsigned offset, bool auto_inc)
67{
68 u32 v = host1x_uclass_indoff_indbe_f(0xf)
69 | host1x_uclass_indoff_indmodid_f(mod_id)
70 | host1x_uclass_indoff_indroffset_f(offset);
71 if (auto_inc)
72 v |= host1x_uclass_indoff_autoinc_f(1);
73 return v;
74}
75
76static inline u32 host1x_class_host_indoff_reg_read(
77 unsigned mod_id, unsigned offset, bool auto_inc)
78{
79 u32 v = host1x_uclass_indoff_indmodid_f(mod_id)
80 | host1x_uclass_indoff_indroffset_f(offset)
81 | host1x_uclass_indoff_rwn_read_v();
82 if (auto_inc)
83 v |= host1x_uclass_indoff_autoinc_f(1);
84 return v;
85}
86
87
88/* cdma opcodes */
89static inline u32 host1x_opcode_setclass(
90 unsigned class_id, unsigned offset, unsigned mask)
91{
92 return (0 << 28) | (offset << 16) | (class_id << 6) | mask;
93}
94
95static inline u32 host1x_opcode_incr(unsigned offset, unsigned count)
96{
97 return (1 << 28) | (offset << 16) | count;
98}
99
100static inline u32 host1x_opcode_nonincr(unsigned offset, unsigned count)
101{
102 return (2 << 28) | (offset << 16) | count;
103}
104
105static inline u32 host1x_opcode_mask(unsigned offset, unsigned mask)
106{
107 return (3 << 28) | (offset << 16) | mask;
108}
109
110static inline u32 host1x_opcode_imm(unsigned offset, unsigned value)
111{
112 return (4 << 28) | (offset << 16) | value;
113}
114
115static inline u32 host1x_opcode_imm_incr_syncpt(unsigned cond, unsigned indx)
116{
117 return host1x_opcode_imm(host1x_uclass_incr_syncpt_r(),
118 host1x_class_host_incr_syncpt(cond, indx));
119}
120
121static inline u32 host1x_opcode_restart(unsigned address)
122{
123 return (5 << 28) | (address >> 4);
124}
125
126static inline u32 host1x_opcode_gather(unsigned count)
127{
128 return (6 << 28) | count;
129}
130
131static inline u32 host1x_opcode_gather_nonincr(unsigned offset, unsigned count)
132{
133 return (6 << 28) | (offset << 16) | BIT(15) | count;
134}
135
136static inline u32 host1x_opcode_gather_incr(unsigned offset, unsigned count)
137{
138 return (6 << 28) | (offset << 16) | BIT(15) | BIT(14) | count;
139}
140
141#define HOST1X_OPCODE_NOP host1x_opcode_nonincr(0, 0)
142
143#endif
diff --git a/drivers/gpu/host1x/hw/hw_host1x01_channel.h b/drivers/gpu/host1x/hw/hw_host1x01_channel.h
new file mode 100644
index 000000000000..b4bc7ca4e051
--- /dev/null
+++ b/drivers/gpu/host1x/hw/hw_host1x01_channel.h
@@ -0,0 +1,120 @@
1/*
2 * Copyright (c) 2012-2013, NVIDIA Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 *
16 */
17
18 /*
19 * Function naming determines intended use:
20 *
21 * <x>_r(void) : Returns the offset for register <x>.
22 *
23 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
24 *
25 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
26 *
27 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
28 * and masked to place it at field <y> of register <x>. This value
29 * can be |'d with others to produce a full register value for
30 * register <x>.
31 *
32 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
33 * value can be ~'d and then &'d to clear the value of field <y> for
34 * register <x>.
35 *
36 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
37 * to place it at field <y> of register <x>. This value can be |'d
38 * with others to produce a full register value for <x>.
39 *
40 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
41 * <x> value 'r' after being shifted to place its LSB at bit 0.
42 * This value is suitable for direct comparison with other unshifted
43 * values appropriate for use in field <y> of register <x>.
44 *
45 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
46 * field <y> of register <x>. This value is suitable for direct
47 * comparison with unshifted values appropriate for use in field <y>
48 * of register <x>.
49 */
50
51#ifndef __hw_host1x_channel_host1x_h__
52#define __hw_host1x_channel_host1x_h__
53
54static inline u32 host1x_channel_fifostat_r(void)
55{
56 return 0x0;
57}
58#define HOST1X_CHANNEL_FIFOSTAT \
59 host1x_channel_fifostat_r()
60static inline u32 host1x_channel_fifostat_cfempty_v(u32 r)
61{
62 return (r >> 10) & 0x1;
63}
64#define HOST1X_CHANNEL_FIFOSTAT_CFEMPTY_V(r) \
65 host1x_channel_fifostat_cfempty_v(r)
66static inline u32 host1x_channel_dmastart_r(void)
67{
68 return 0x14;
69}
70#define HOST1X_CHANNEL_DMASTART \
71 host1x_channel_dmastart_r()
72static inline u32 host1x_channel_dmaput_r(void)
73{
74 return 0x18;
75}
76#define HOST1X_CHANNEL_DMAPUT \
77 host1x_channel_dmaput_r()
78static inline u32 host1x_channel_dmaget_r(void)
79{
80 return 0x1c;
81}
82#define HOST1X_CHANNEL_DMAGET \
83 host1x_channel_dmaget_r()
84static inline u32 host1x_channel_dmaend_r(void)
85{
86 return 0x20;
87}
88#define HOST1X_CHANNEL_DMAEND \
89 host1x_channel_dmaend_r()
90static inline u32 host1x_channel_dmactrl_r(void)
91{
92 return 0x24;
93}
94#define HOST1X_CHANNEL_DMACTRL \
95 host1x_channel_dmactrl_r()
96static inline u32 host1x_channel_dmactrl_dmastop(void)
97{
98 return 1 << 0;
99}
100#define HOST1X_CHANNEL_DMACTRL_DMASTOP \
101 host1x_channel_dmactrl_dmastop()
102static inline u32 host1x_channel_dmactrl_dmastop_v(u32 r)
103{
104 return (r >> 0) & 0x1;
105}
106#define HOST1X_CHANNEL_DMACTRL_DMASTOP_V(r) \
107 host1x_channel_dmactrl_dmastop_v(r)
108static inline u32 host1x_channel_dmactrl_dmagetrst(void)
109{
110 return 1 << 1;
111}
112#define HOST1X_CHANNEL_DMACTRL_DMAGETRST \
113 host1x_channel_dmactrl_dmagetrst()
114static inline u32 host1x_channel_dmactrl_dmainitget(void)
115{
116 return 1 << 2;
117}
118#define HOST1X_CHANNEL_DMACTRL_DMAINITGET \
119 host1x_channel_dmactrl_dmainitget()
120#endif
diff --git a/drivers/gpu/host1x/hw/hw_host1x01_sync.h b/drivers/gpu/host1x/hw/hw_host1x01_sync.h
new file mode 100644
index 000000000000..ac704e579977
--- /dev/null
+++ b/drivers/gpu/host1x/hw/hw_host1x01_sync.h
@@ -0,0 +1,243 @@
1/*
2 * Copyright (c) 2012-2013, NVIDIA Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 *
16 */
17
18 /*
19 * Function naming determines intended use:
20 *
21 * <x>_r(void) : Returns the offset for register <x>.
22 *
23 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
24 *
25 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
26 *
27 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
28 * and masked to place it at field <y> of register <x>. This value
29 * can be |'d with others to produce a full register value for
30 * register <x>.
31 *
32 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
33 * value can be ~'d and then &'d to clear the value of field <y> for
34 * register <x>.
35 *
36 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
37 * to place it at field <y> of register <x>. This value can be |'d
38 * with others to produce a full register value for <x>.
39 *
40 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
41 * <x> value 'r' after being shifted to place its LSB at bit 0.
42 * This value is suitable for direct comparison with other unshifted
43 * values appropriate for use in field <y> of register <x>.
44 *
45 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
46 * field <y> of register <x>. This value is suitable for direct
47 * comparison with unshifted values appropriate for use in field <y>
48 * of register <x>.
49 */
50
51#ifndef __hw_host1x01_sync_h__
52#define __hw_host1x01_sync_h__
53
54#define REGISTER_STRIDE 4
55
56static inline u32 host1x_sync_syncpt_r(unsigned int id)
57{
58 return 0x400 + id * REGISTER_STRIDE;
59}
60#define HOST1X_SYNC_SYNCPT(id) \
61 host1x_sync_syncpt_r(id)
62static inline u32 host1x_sync_syncpt_thresh_cpu0_int_status_r(unsigned int id)
63{
64 return 0x40 + id * REGISTER_STRIDE;
65}
66#define HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(id) \
67 host1x_sync_syncpt_thresh_cpu0_int_status_r(id)
68static inline u32 host1x_sync_syncpt_thresh_int_disable_r(unsigned int id)
69{
70 return 0x60 + id * REGISTER_STRIDE;
71}
72#define HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(id) \
73 host1x_sync_syncpt_thresh_int_disable_r(id)
74static inline u32 host1x_sync_syncpt_thresh_int_enable_cpu0_r(unsigned int id)
75{
76 return 0x68 + id * REGISTER_STRIDE;
77}
78#define HOST1X_SYNC_SYNCPT_THRESH_INT_ENABLE_CPU0(id) \
79 host1x_sync_syncpt_thresh_int_enable_cpu0_r(id)
80static inline u32 host1x_sync_cf_setup_r(unsigned int channel)
81{
82 return 0x80 + channel * REGISTER_STRIDE;
83}
84#define HOST1X_SYNC_CF_SETUP(channel) \
85 host1x_sync_cf_setup_r(channel)
86static inline u32 host1x_sync_cf_setup_base_v(u32 r)
87{
88 return (r >> 0) & 0x1ff;
89}
90#define HOST1X_SYNC_CF_SETUP_BASE_V(r) \
91 host1x_sync_cf_setup_base_v(r)
92static inline u32 host1x_sync_cf_setup_limit_v(u32 r)
93{
94 return (r >> 16) & 0x1ff;
95}
96#define HOST1X_SYNC_CF_SETUP_LIMIT_V(r) \
97 host1x_sync_cf_setup_limit_v(r)
98static inline u32 host1x_sync_cmdproc_stop_r(void)
99{
100 return 0xac;
101}
102#define HOST1X_SYNC_CMDPROC_STOP \
103 host1x_sync_cmdproc_stop_r()
104static inline u32 host1x_sync_ch_teardown_r(void)
105{
106 return 0xb0;
107}
108#define HOST1X_SYNC_CH_TEARDOWN \
109 host1x_sync_ch_teardown_r()
110static inline u32 host1x_sync_usec_clk_r(void)
111{
112 return 0x1a4;
113}
114#define HOST1X_SYNC_USEC_CLK \
115 host1x_sync_usec_clk_r()
116static inline u32 host1x_sync_ctxsw_timeout_cfg_r(void)
117{
118 return 0x1a8;
119}
120#define HOST1X_SYNC_CTXSW_TIMEOUT_CFG \
121 host1x_sync_ctxsw_timeout_cfg_r()
122static inline u32 host1x_sync_ip_busy_timeout_r(void)
123{
124 return 0x1bc;
125}
126#define HOST1X_SYNC_IP_BUSY_TIMEOUT \
127 host1x_sync_ip_busy_timeout_r()
128static inline u32 host1x_sync_mlock_owner_r(unsigned int id)
129{
130 return 0x340 + id * REGISTER_STRIDE;
131}
132#define HOST1X_SYNC_MLOCK_OWNER(id) \
133 host1x_sync_mlock_owner_r(id)
134static inline u32 host1x_sync_mlock_owner_chid_f(u32 v)
135{
136 return (v & 0xf) << 8;
137}
138#define HOST1X_SYNC_MLOCK_OWNER_CHID_F(v) \
139 host1x_sync_mlock_owner_chid_f(v)
140static inline u32 host1x_sync_mlock_owner_cpu_owns_v(u32 r)
141{
142 return (r >> 1) & 0x1;
143}
144#define HOST1X_SYNC_MLOCK_OWNER_CPU_OWNS_V(r) \
145 host1x_sync_mlock_owner_cpu_owns_v(r)
146static inline u32 host1x_sync_mlock_owner_ch_owns_v(u32 r)
147{
148 return (r >> 0) & 0x1;
149}
150#define HOST1X_SYNC_MLOCK_OWNER_CH_OWNS_V(r) \
151 host1x_sync_mlock_owner_ch_owns_v(r)
152static inline u32 host1x_sync_syncpt_int_thresh_r(unsigned int id)
153{
154 return 0x500 + id * REGISTER_STRIDE;
155}
156#define HOST1X_SYNC_SYNCPT_INT_THRESH(id) \
157 host1x_sync_syncpt_int_thresh_r(id)
158static inline u32 host1x_sync_syncpt_base_r(unsigned int id)
159{
160 return 0x600 + id * REGISTER_STRIDE;
161}
162#define HOST1X_SYNC_SYNCPT_BASE(id) \
163 host1x_sync_syncpt_base_r(id)
164static inline u32 host1x_sync_syncpt_cpu_incr_r(unsigned int id)
165{
166 return 0x700 + id * REGISTER_STRIDE;
167}
168#define HOST1X_SYNC_SYNCPT_CPU_INCR(id) \
169 host1x_sync_syncpt_cpu_incr_r(id)
170static inline u32 host1x_sync_cbread_r(unsigned int channel)
171{
172 return 0x720 + channel * REGISTER_STRIDE;
173}
174#define HOST1X_SYNC_CBREAD(channel) \
175 host1x_sync_cbread_r(channel)
176static inline u32 host1x_sync_cfpeek_ctrl_r(void)
177{
178 return 0x74c;
179}
180#define HOST1X_SYNC_CFPEEK_CTRL \
181 host1x_sync_cfpeek_ctrl_r()
182static inline u32 host1x_sync_cfpeek_ctrl_addr_f(u32 v)
183{
184 return (v & 0x1ff) << 0;
185}
186#define HOST1X_SYNC_CFPEEK_CTRL_ADDR_F(v) \
187 host1x_sync_cfpeek_ctrl_addr_f(v)
188static inline u32 host1x_sync_cfpeek_ctrl_channr_f(u32 v)
189{
190 return (v & 0x7) << 16;
191}
192#define HOST1X_SYNC_CFPEEK_CTRL_CHANNR_F(v) \
193 host1x_sync_cfpeek_ctrl_channr_f(v)
194static inline u32 host1x_sync_cfpeek_ctrl_ena_f(u32 v)
195{
196 return (v & 0x1) << 31;
197}
198#define HOST1X_SYNC_CFPEEK_CTRL_ENA_F(v) \
199 host1x_sync_cfpeek_ctrl_ena_f(v)
200static inline u32 host1x_sync_cfpeek_read_r(void)
201{
202 return 0x750;
203}
204#define HOST1X_SYNC_CFPEEK_READ \
205 host1x_sync_cfpeek_read_r()
206static inline u32 host1x_sync_cfpeek_ptrs_r(void)
207{
208 return 0x754;
209}
210#define HOST1X_SYNC_CFPEEK_PTRS \
211 host1x_sync_cfpeek_ptrs_r()
212static inline u32 host1x_sync_cfpeek_ptrs_cf_rd_ptr_v(u32 r)
213{
214 return (r >> 0) & 0x1ff;
215}
216#define HOST1X_SYNC_CFPEEK_PTRS_CF_RD_PTR_V(r) \
217 host1x_sync_cfpeek_ptrs_cf_rd_ptr_v(r)
218static inline u32 host1x_sync_cfpeek_ptrs_cf_wr_ptr_v(u32 r)
219{
220 return (r >> 16) & 0x1ff;
221}
222#define HOST1X_SYNC_CFPEEK_PTRS_CF_WR_PTR_V(r) \
223 host1x_sync_cfpeek_ptrs_cf_wr_ptr_v(r)
224static inline u32 host1x_sync_cbstat_r(unsigned int channel)
225{
226 return 0x758 + channel * REGISTER_STRIDE;
227}
228#define HOST1X_SYNC_CBSTAT(channel) \
229 host1x_sync_cbstat_r(channel)
230static inline u32 host1x_sync_cbstat_cboffset_v(u32 r)
231{
232 return (r >> 0) & 0xffff;
233}
234#define HOST1X_SYNC_CBSTAT_CBOFFSET_V(r) \
235 host1x_sync_cbstat_cboffset_v(r)
236static inline u32 host1x_sync_cbstat_cbclass_v(u32 r)
237{
238 return (r >> 16) & 0x3ff;
239}
240#define HOST1X_SYNC_CBSTAT_CBCLASS_V(r) \
241 host1x_sync_cbstat_cbclass_v(r)
242
243#endif /* __hw_host1x01_sync_h__ */
diff --git a/drivers/gpu/host1x/hw/hw_host1x01_uclass.h b/drivers/gpu/host1x/hw/hw_host1x01_uclass.h
new file mode 100644
index 000000000000..42f3ce19ca32
--- /dev/null
+++ b/drivers/gpu/host1x/hw/hw_host1x01_uclass.h
@@ -0,0 +1,174 @@
1/*
2 * Copyright (c) 2012-2013, NVIDIA Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 *
16 */
17
18 /*
19 * Function naming determines intended use:
20 *
21 * <x>_r(void) : Returns the offset for register <x>.
22 *
23 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
24 *
25 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
26 *
27 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
28 * and masked to place it at field <y> of register <x>. This value
29 * can be |'d with others to produce a full register value for
30 * register <x>.
31 *
32 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
33 * value can be ~'d and then &'d to clear the value of field <y> for
34 * register <x>.
35 *
36 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
37 * to place it at field <y> of register <x>. This value can be |'d
38 * with others to produce a full register value for <x>.
39 *
40 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
41 * <x> value 'r' after being shifted to place its LSB at bit 0.
42 * This value is suitable for direct comparison with other unshifted
43 * values appropriate for use in field <y> of register <x>.
44 *
45 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
46 * field <y> of register <x>. This value is suitable for direct
47 * comparison with unshifted values appropriate for use in field <y>
48 * of register <x>.
49 */
50
51#ifndef __hw_host1x_uclass_host1x_h__
52#define __hw_host1x_uclass_host1x_h__
53
54static inline u32 host1x_uclass_incr_syncpt_r(void)
55{
56 return 0x0;
57}
58#define HOST1X_UCLASS_INCR_SYNCPT \
59 host1x_uclass_incr_syncpt_r()
60static inline u32 host1x_uclass_incr_syncpt_cond_f(u32 v)
61{
62 return (v & 0xff) << 8;
63}
64#define HOST1X_UCLASS_INCR_SYNCPT_COND_F(v) \
65 host1x_uclass_incr_syncpt_cond_f(v)
66static inline u32 host1x_uclass_incr_syncpt_indx_f(u32 v)
67{
68 return (v & 0xff) << 0;
69}
70#define HOST1X_UCLASS_INCR_SYNCPT_INDX_F(v) \
71 host1x_uclass_incr_syncpt_indx_f(v)
72static inline u32 host1x_uclass_wait_syncpt_r(void)
73{
74 return 0x8;
75}
76#define HOST1X_UCLASS_WAIT_SYNCPT \
77 host1x_uclass_wait_syncpt_r()
78static inline u32 host1x_uclass_wait_syncpt_indx_f(u32 v)
79{
80 return (v & 0xff) << 24;
81}
82#define HOST1X_UCLASS_WAIT_SYNCPT_INDX_F(v) \
83 host1x_uclass_wait_syncpt_indx_f(v)
84static inline u32 host1x_uclass_wait_syncpt_thresh_f(u32 v)
85{
86 return (v & 0xffffff) << 0;
87}
88#define HOST1X_UCLASS_WAIT_SYNCPT_THRESH_F(v) \
89 host1x_uclass_wait_syncpt_thresh_f(v)
90static inline u32 host1x_uclass_wait_syncpt_base_r(void)
91{
92 return 0x9;
93}
94#define HOST1X_UCLASS_WAIT_SYNCPT_BASE \
95 host1x_uclass_wait_syncpt_base_r()
96static inline u32 host1x_uclass_wait_syncpt_base_indx_f(u32 v)
97{
98 return (v & 0xff) << 24;
99}
100#define HOST1X_UCLASS_WAIT_SYNCPT_BASE_INDX_F(v) \
101 host1x_uclass_wait_syncpt_base_indx_f(v)
102static inline u32 host1x_uclass_wait_syncpt_base_base_indx_f(u32 v)
103{
104 return (v & 0xff) << 16;
105}
106#define HOST1X_UCLASS_WAIT_SYNCPT_BASE_BASE_INDX_F(v) \
107 host1x_uclass_wait_syncpt_base_base_indx_f(v)
108static inline u32 host1x_uclass_wait_syncpt_base_offset_f(u32 v)
109{
110 return (v & 0xffff) << 0;
111}
112#define HOST1X_UCLASS_WAIT_SYNCPT_BASE_OFFSET_F(v) \
113 host1x_uclass_wait_syncpt_base_offset_f(v)
114static inline u32 host1x_uclass_load_syncpt_base_base_indx_f(u32 v)
115{
116 return (v & 0xff) << 24;
117}
118#define HOST1X_UCLASS_LOAD_SYNCPT_BASE_BASE_INDX_F(v) \
119 host1x_uclass_load_syncpt_base_base_indx_f(v)
120static inline u32 host1x_uclass_load_syncpt_base_value_f(u32 v)
121{
122 return (v & 0xffffff) << 0;
123}
124#define HOST1X_UCLASS_LOAD_SYNCPT_BASE_VALUE_F(v) \
125 host1x_uclass_load_syncpt_base_value_f(v)
126static inline u32 host1x_uclass_incr_syncpt_base_base_indx_f(u32 v)
127{
128 return (v & 0xff) << 24;
129}
130#define HOST1X_UCLASS_INCR_SYNCPT_BASE_BASE_INDX_F(v) \
131 host1x_uclass_incr_syncpt_base_base_indx_f(v)
132static inline u32 host1x_uclass_incr_syncpt_base_offset_f(u32 v)
133{
134 return (v & 0xffffff) << 0;
135}
136#define HOST1X_UCLASS_INCR_SYNCPT_BASE_OFFSET_F(v) \
137 host1x_uclass_incr_syncpt_base_offset_f(v)
138static inline u32 host1x_uclass_indoff_r(void)
139{
140 return 0x2d;
141}
142#define HOST1X_UCLASS_INDOFF \
143 host1x_uclass_indoff_r()
144static inline u32 host1x_uclass_indoff_indbe_f(u32 v)
145{
146 return (v & 0xf) << 28;
147}
148#define HOST1X_UCLASS_INDOFF_INDBE_F(v) \
149 host1x_uclass_indoff_indbe_f(v)
150static inline u32 host1x_uclass_indoff_autoinc_f(u32 v)
151{
152 return (v & 0x1) << 27;
153}
154#define HOST1X_UCLASS_INDOFF_AUTOINC_F(v) \
155 host1x_uclass_indoff_autoinc_f(v)
156static inline u32 host1x_uclass_indoff_indmodid_f(u32 v)
157{
158 return (v & 0xff) << 18;
159}
160#define HOST1X_UCLASS_INDOFF_INDMODID_F(v) \
161 host1x_uclass_indoff_indmodid_f(v)
162static inline u32 host1x_uclass_indoff_indroffset_f(u32 v)
163{
164 return (v & 0xffff) << 2;
165}
166#define HOST1X_UCLASS_INDOFF_INDROFFSET_F(v) \
167 host1x_uclass_indoff_indroffset_f(v)
168static inline u32 host1x_uclass_indoff_rwn_read_v(void)
169{
170 return 1;
171}
172#define HOST1X_UCLASS_INDOFF_INDROFFSET_F(v) \
173 host1x_uclass_indoff_indroffset_f(v)
174#endif
diff --git a/drivers/gpu/host1x/hw/intr_hw.c b/drivers/gpu/host1x/hw/intr_hw.c
new file mode 100644
index 000000000000..b592eef1efcb
--- /dev/null
+++ b/drivers/gpu/host1x/hw/intr_hw.c
@@ -0,0 +1,143 @@
1/*
2 * Tegra host1x Interrupt Management
3 *
4 * Copyright (C) 2010 Google, Inc.
5 * Copyright (c) 2010-2013, NVIDIA Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include <linux/interrupt.h>
21#include <linux/irq.h>
22#include <linux/io.h>
23#include <asm/mach/irq.h>
24
25#include "intr.h"
26#include "dev.h"
27
28/*
29 * Sync point threshold interrupt service function
30 * Handles sync point threshold triggers, in interrupt context
31 */
32static void host1x_intr_syncpt_handle(struct host1x_syncpt *syncpt)
33{
34 unsigned int id = syncpt->id;
35 struct host1x *host = syncpt->host;
36
37 host1x_sync_writel(host, BIT_MASK(id),
38 HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(BIT_WORD(id)));
39 host1x_sync_writel(host, BIT_MASK(id),
40 HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(BIT_WORD(id)));
41
42 queue_work(host->intr_wq, &syncpt->intr.work);
43}
44
45static irqreturn_t syncpt_thresh_isr(int irq, void *dev_id)
46{
47 struct host1x *host = dev_id;
48 unsigned long reg;
49 int i, id;
50
51 for (i = 0; i <= BIT_WORD(host->info->nb_pts); i++) {
52 reg = host1x_sync_readl(host,
53 HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(i));
54 for_each_set_bit(id, &reg, BITS_PER_LONG) {
55 struct host1x_syncpt *syncpt =
56 host->syncpt + (i * BITS_PER_LONG + id);
57 host1x_intr_syncpt_handle(syncpt);
58 }
59 }
60
61 return IRQ_HANDLED;
62}
63
64static void _host1x_intr_disable_all_syncpt_intrs(struct host1x *host)
65{
66 u32 i;
67
68 for (i = 0; i <= BIT_WORD(host->info->nb_pts); ++i) {
69 host1x_sync_writel(host, 0xffffffffu,
70 HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(i));
71 host1x_sync_writel(host, 0xffffffffu,
72 HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(i));
73 }
74}
75
76static int _host1x_intr_init_host_sync(struct host1x *host, u32 cpm,
77 void (*syncpt_thresh_work)(struct work_struct *))
78{
79 int i, err;
80
81 host1x_hw_intr_disable_all_syncpt_intrs(host);
82
83 for (i = 0; i < host->info->nb_pts; i++)
84 INIT_WORK(&host->syncpt[i].intr.work, syncpt_thresh_work);
85
86 err = devm_request_irq(host->dev, host->intr_syncpt_irq,
87 syncpt_thresh_isr, IRQF_SHARED,
88 "host1x_syncpt", host);
89 if (IS_ERR_VALUE(err)) {
90 WARN_ON(1);
91 return err;
92 }
93
94 /* disable the ip_busy_timeout. this prevents write drops */
95 host1x_sync_writel(host, 0, HOST1X_SYNC_IP_BUSY_TIMEOUT);
96
97 /*
98 * increase the auto-ack timout to the maximum value. 2d will hang
99 * otherwise on Tegra2.
100 */
101 host1x_sync_writel(host, 0xff, HOST1X_SYNC_CTXSW_TIMEOUT_CFG);
102
103 /* update host clocks per usec */
104 host1x_sync_writel(host, cpm, HOST1X_SYNC_USEC_CLK);
105
106 return 0;
107}
108
109static void _host1x_intr_set_syncpt_threshold(struct host1x *host,
110 u32 id, u32 thresh)
111{
112 host1x_sync_writel(host, thresh, HOST1X_SYNC_SYNCPT_INT_THRESH(id));
113}
114
115static void _host1x_intr_enable_syncpt_intr(struct host1x *host, u32 id)
116{
117 host1x_sync_writel(host, BIT_MASK(id),
118 HOST1X_SYNC_SYNCPT_THRESH_INT_ENABLE_CPU0(BIT_WORD(id)));
119}
120
121static void _host1x_intr_disable_syncpt_intr(struct host1x *host, u32 id)
122{
123 host1x_sync_writel(host, BIT_MASK(id),
124 HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(BIT_WORD(id)));
125 host1x_sync_writel(host, BIT_MASK(id),
126 HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(BIT_WORD(id)));
127}
128
129static int _host1x_free_syncpt_irq(struct host1x *host)
130{
131 devm_free_irq(host->dev, host->intr_syncpt_irq, host);
132 flush_workqueue(host->intr_wq);
133 return 0;
134}
135
136static const struct host1x_intr_ops host1x_intr_ops = {
137 .init_host_sync = _host1x_intr_init_host_sync,
138 .set_syncpt_threshold = _host1x_intr_set_syncpt_threshold,
139 .enable_syncpt_intr = _host1x_intr_enable_syncpt_intr,
140 .disable_syncpt_intr = _host1x_intr_disable_syncpt_intr,
141 .disable_all_syncpt_intrs = _host1x_intr_disable_all_syncpt_intrs,
142 .free_syncpt_irq = _host1x_free_syncpt_irq,
143};
diff --git a/drivers/gpu/host1x/hw/syncpt_hw.c b/drivers/gpu/host1x/hw/syncpt_hw.c
new file mode 100644
index 000000000000..61174990102a
--- /dev/null
+++ b/drivers/gpu/host1x/hw/syncpt_hw.c
@@ -0,0 +1,114 @@
1/*
2 * Tegra host1x Syncpoints
3 *
4 * Copyright (c) 2010-2013, NVIDIA Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#include <linux/io.h>
20
21#include "dev.h"
22#include "syncpt.h"
23
24/*
25 * Write the current syncpoint value back to hw.
26 */
27static void syncpt_restore(struct host1x_syncpt *sp)
28{
29 struct host1x *host = sp->host;
30 int min = host1x_syncpt_read_min(sp);
31 host1x_sync_writel(host, min, HOST1X_SYNC_SYNCPT(sp->id));
32}
33
34/*
35 * Write the current waitbase value back to hw.
36 */
37static void syncpt_restore_wait_base(struct host1x_syncpt *sp)
38{
39 struct host1x *host = sp->host;
40 host1x_sync_writel(host, sp->base_val,
41 HOST1X_SYNC_SYNCPT_BASE(sp->id));
42}
43
44/*
45 * Read waitbase value from hw.
46 */
47static void syncpt_read_wait_base(struct host1x_syncpt *sp)
48{
49 struct host1x *host = sp->host;
50 sp->base_val =
51 host1x_sync_readl(host, HOST1X_SYNC_SYNCPT_BASE(sp->id));
52}
53
54/*
55 * Updates the last value read from hardware.
56 */
57static u32 syncpt_load(struct host1x_syncpt *sp)
58{
59 struct host1x *host = sp->host;
60 u32 old, live;
61
62 /* Loop in case there's a race writing to min_val */
63 do {
64 old = host1x_syncpt_read_min(sp);
65 live = host1x_sync_readl(host, HOST1X_SYNC_SYNCPT(sp->id));
66 } while ((u32)atomic_cmpxchg(&sp->min_val, old, live) != old);
67
68 if (!host1x_syncpt_check_max(sp, live))
69 dev_err(host->dev, "%s failed: id=%u, min=%d, max=%d\n",
70 __func__, sp->id, host1x_syncpt_read_min(sp),
71 host1x_syncpt_read_max(sp));
72
73 return live;
74}
75
76/*
77 * Write a cpu syncpoint increment to the hardware, without touching
78 * the cache.
79 */
80static void syncpt_cpu_incr(struct host1x_syncpt *sp)
81{
82 struct host1x *host = sp->host;
83 u32 reg_offset = sp->id / 32;
84
85 if (!host1x_syncpt_client_managed(sp) &&
86 host1x_syncpt_idle(sp)) {
87 dev_err(host->dev, "Trying to increment syncpoint id %d beyond max\n",
88 sp->id);
89 host1x_debug_dump(sp->host);
90 return;
91 }
92 host1x_sync_writel(host, BIT_MASK(sp->id),
93 HOST1X_SYNC_SYNCPT_CPU_INCR(reg_offset));
94 wmb();
95}
96
97/* remove a wait pointed to by patch_addr */
98static int syncpt_patch_wait(struct host1x_syncpt *sp, void *patch_addr)
99{
100 u32 override = host1x_class_host_wait_syncpt(
101 HOST1X_SYNCPT_RESERVED, 0);
102
103 *((u32 *)patch_addr) = override;
104 return 0;
105}
106
107static const struct host1x_syncpt_ops host1x_syncpt_ops = {
108 .restore = syncpt_restore,
109 .restore_wait_base = syncpt_restore_wait_base,
110 .load_wait_base = syncpt_read_wait_base,
111 .load = syncpt_load,
112 .cpu_incr = syncpt_cpu_incr,
113 .patch_wait = syncpt_patch_wait,
114};
diff --git a/drivers/gpu/host1x/intr.c b/drivers/gpu/host1x/intr.c
new file mode 100644
index 000000000000..2491bf82e30c
--- /dev/null
+++ b/drivers/gpu/host1x/intr.c
@@ -0,0 +1,354 @@
1/*
2 * Tegra host1x Interrupt Management
3 *
4 * Copyright (c) 2010-2013, NVIDIA Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#include <linux/clk.h>
20#include <linux/interrupt.h>
21#include <linux/slab.h>
22#include <linux/irq.h>
23
24#include <trace/events/host1x.h>
25#include "channel.h"
26#include "dev.h"
27#include "intr.h"
28
29/* Wait list management */
30
31enum waitlist_state {
32 WLS_PENDING,
33 WLS_REMOVED,
34 WLS_CANCELLED,
35 WLS_HANDLED
36};
37
38static void waiter_release(struct kref *kref)
39{
40 kfree(container_of(kref, struct host1x_waitlist, refcount));
41}
42
43/*
44 * add a waiter to a waiter queue, sorted by threshold
45 * returns true if it was added at the head of the queue
46 */
47static bool add_waiter_to_queue(struct host1x_waitlist *waiter,
48 struct list_head *queue)
49{
50 struct host1x_waitlist *pos;
51 u32 thresh = waiter->thresh;
52
53 list_for_each_entry_reverse(pos, queue, list)
54 if ((s32)(pos->thresh - thresh) <= 0) {
55 list_add(&waiter->list, &pos->list);
56 return false;
57 }
58
59 list_add(&waiter->list, queue);
60 return true;
61}
62
63/*
64 * run through a waiter queue for a single sync point ID
65 * and gather all completed waiters into lists by actions
66 */
67static void remove_completed_waiters(struct list_head *head, u32 sync,
68 struct list_head completed[HOST1X_INTR_ACTION_COUNT])
69{
70 struct list_head *dest;
71 struct host1x_waitlist *waiter, *next, *prev;
72
73 list_for_each_entry_safe(waiter, next, head, list) {
74 if ((s32)(waiter->thresh - sync) > 0)
75 break;
76
77 dest = completed + waiter->action;
78
79 /* consolidate submit cleanups */
80 if (waiter->action == HOST1X_INTR_ACTION_SUBMIT_COMPLETE &&
81 !list_empty(dest)) {
82 prev = list_entry(dest->prev,
83 struct host1x_waitlist, list);
84 if (prev->data == waiter->data) {
85 prev->count++;
86 dest = NULL;
87 }
88 }
89
90 /* PENDING->REMOVED or CANCELLED->HANDLED */
91 if (atomic_inc_return(&waiter->state) == WLS_HANDLED || !dest) {
92 list_del(&waiter->list);
93 kref_put(&waiter->refcount, waiter_release);
94 } else
95 list_move_tail(&waiter->list, dest);
96 }
97}
98
99static void reset_threshold_interrupt(struct host1x *host,
100 struct list_head *head,
101 unsigned int id)
102{
103 u32 thresh =
104 list_first_entry(head, struct host1x_waitlist, list)->thresh;
105
106 host1x_hw_intr_set_syncpt_threshold(host, id, thresh);
107 host1x_hw_intr_enable_syncpt_intr(host, id);
108}
109
110static void action_submit_complete(struct host1x_waitlist *waiter)
111{
112 struct host1x_channel *channel = waiter->data;
113
114 host1x_cdma_update(&channel->cdma);
115
116 /* Add nr_completed to trace */
117 trace_host1x_channel_submit_complete(dev_name(channel->dev),
118 waiter->count, waiter->thresh);
119
120}
121
122static void action_wakeup(struct host1x_waitlist *waiter)
123{
124 wait_queue_head_t *wq = waiter->data;
125 wake_up(wq);
126}
127
128static void action_wakeup_interruptible(struct host1x_waitlist *waiter)
129{
130 wait_queue_head_t *wq = waiter->data;
131 wake_up_interruptible(wq);
132}
133
134typedef void (*action_handler)(struct host1x_waitlist *waiter);
135
136static action_handler action_handlers[HOST1X_INTR_ACTION_COUNT] = {
137 action_submit_complete,
138 action_wakeup,
139 action_wakeup_interruptible,
140};
141
142static void run_handlers(struct list_head completed[HOST1X_INTR_ACTION_COUNT])
143{
144 struct list_head *head = completed;
145 int i;
146
147 for (i = 0; i < HOST1X_INTR_ACTION_COUNT; ++i, ++head) {
148 action_handler handler = action_handlers[i];
149 struct host1x_waitlist *waiter, *next;
150
151 list_for_each_entry_safe(waiter, next, head, list) {
152 list_del(&waiter->list);
153 handler(waiter);
154 WARN_ON(atomic_xchg(&waiter->state, WLS_HANDLED) !=
155 WLS_REMOVED);
156 kref_put(&waiter->refcount, waiter_release);
157 }
158 }
159}
160
161/*
162 * Remove & handle all waiters that have completed for the given syncpt
163 */
164static int process_wait_list(struct host1x *host,
165 struct host1x_syncpt *syncpt,
166 u32 threshold)
167{
168 struct list_head completed[HOST1X_INTR_ACTION_COUNT];
169 unsigned int i;
170 int empty;
171
172 for (i = 0; i < HOST1X_INTR_ACTION_COUNT; ++i)
173 INIT_LIST_HEAD(completed + i);
174
175 spin_lock(&syncpt->intr.lock);
176
177 remove_completed_waiters(&syncpt->intr.wait_head, threshold,
178 completed);
179
180 empty = list_empty(&syncpt->intr.wait_head);
181 if (empty)
182 host1x_hw_intr_disable_syncpt_intr(host, syncpt->id);
183 else
184 reset_threshold_interrupt(host, &syncpt->intr.wait_head,
185 syncpt->id);
186
187 spin_unlock(&syncpt->intr.lock);
188
189 run_handlers(completed);
190
191 return empty;
192}
193
194/*
195 * Sync point threshold interrupt service thread function
196 * Handles sync point threshold triggers, in thread context
197 */
198
199static void syncpt_thresh_work(struct work_struct *work)
200{
201 struct host1x_syncpt_intr *syncpt_intr =
202 container_of(work, struct host1x_syncpt_intr, work);
203 struct host1x_syncpt *syncpt =
204 container_of(syncpt_intr, struct host1x_syncpt, intr);
205 unsigned int id = syncpt->id;
206 struct host1x *host = syncpt->host;
207
208 (void)process_wait_list(host, syncpt,
209 host1x_syncpt_load(host->syncpt + id));
210}
211
212int host1x_intr_add_action(struct host1x *host, u32 id, u32 thresh,
213 enum host1x_intr_action action, void *data,
214 struct host1x_waitlist *waiter, void **ref)
215{
216 struct host1x_syncpt *syncpt;
217 int queue_was_empty;
218
219 if (waiter == NULL) {
220 pr_warn("%s: NULL waiter\n", __func__);
221 return -EINVAL;
222 }
223
224 /* initialize a new waiter */
225 INIT_LIST_HEAD(&waiter->list);
226 kref_init(&waiter->refcount);
227 if (ref)
228 kref_get(&waiter->refcount);
229 waiter->thresh = thresh;
230 waiter->action = action;
231 atomic_set(&waiter->state, WLS_PENDING);
232 waiter->data = data;
233 waiter->count = 1;
234
235 syncpt = host->syncpt + id;
236
237 spin_lock(&syncpt->intr.lock);
238
239 queue_was_empty = list_empty(&syncpt->intr.wait_head);
240
241 if (add_waiter_to_queue(waiter, &syncpt->intr.wait_head)) {
242 /* added at head of list - new threshold value */
243 host1x_hw_intr_set_syncpt_threshold(host, id, thresh);
244
245 /* added as first waiter - enable interrupt */
246 if (queue_was_empty)
247 host1x_hw_intr_enable_syncpt_intr(host, id);
248 }
249
250 spin_unlock(&syncpt->intr.lock);
251
252 if (ref)
253 *ref = waiter;
254 return 0;
255}
256
257void host1x_intr_put_ref(struct host1x *host, u32 id, void *ref)
258{
259 struct host1x_waitlist *waiter = ref;
260 struct host1x_syncpt *syncpt;
261
262 while (atomic_cmpxchg(&waiter->state, WLS_PENDING, WLS_CANCELLED) ==
263 WLS_REMOVED)
264 schedule();
265
266 syncpt = host->syncpt + id;
267 (void)process_wait_list(host, syncpt,
268 host1x_syncpt_load(host->syncpt + id));
269
270 kref_put(&waiter->refcount, waiter_release);
271}
272
273int host1x_intr_init(struct host1x *host, unsigned int irq_sync)
274{
275 unsigned int id;
276 u32 nb_pts = host1x_syncpt_nb_pts(host);
277
278 mutex_init(&host->intr_mutex);
279 host->intr_syncpt_irq = irq_sync;
280 host->intr_wq = create_workqueue("host_syncpt");
281 if (!host->intr_wq)
282 return -ENOMEM;
283
284 for (id = 0; id < nb_pts; ++id) {
285 struct host1x_syncpt *syncpt = host->syncpt + id;
286
287 spin_lock_init(&syncpt->intr.lock);
288 INIT_LIST_HEAD(&syncpt->intr.wait_head);
289 snprintf(syncpt->intr.thresh_irq_name,
290 sizeof(syncpt->intr.thresh_irq_name),
291 "host1x_sp_%02d", id);
292 }
293
294 host1x_intr_start(host);
295
296 return 0;
297}
298
299void host1x_intr_deinit(struct host1x *host)
300{
301 host1x_intr_stop(host);
302 destroy_workqueue(host->intr_wq);
303}
304
305void host1x_intr_start(struct host1x *host)
306{
307 u32 hz = clk_get_rate(host->clk);
308 int err;
309
310 mutex_lock(&host->intr_mutex);
311 err = host1x_hw_intr_init_host_sync(host, DIV_ROUND_UP(hz, 1000000),
312 syncpt_thresh_work);
313 if (err) {
314 mutex_unlock(&host->intr_mutex);
315 return;
316 }
317 mutex_unlock(&host->intr_mutex);
318}
319
320void host1x_intr_stop(struct host1x *host)
321{
322 unsigned int id;
323 struct host1x_syncpt *syncpt = host->syncpt;
324 u32 nb_pts = host1x_syncpt_nb_pts(host);
325
326 mutex_lock(&host->intr_mutex);
327
328 host1x_hw_intr_disable_all_syncpt_intrs(host);
329
330 for (id = 0; id < nb_pts; ++id) {
331 struct host1x_waitlist *waiter, *next;
332
333 list_for_each_entry_safe(waiter, next,
334 &syncpt[id].intr.wait_head, list) {
335 if (atomic_cmpxchg(&waiter->state,
336 WLS_CANCELLED, WLS_HANDLED) == WLS_CANCELLED) {
337 list_del(&waiter->list);
338 kref_put(&waiter->refcount, waiter_release);
339 }
340 }
341
342 if (!list_empty(&syncpt[id].intr.wait_head)) {
343 /* output diagnostics */
344 mutex_unlock(&host->intr_mutex);
345 pr_warn("%s cannot stop syncpt intr id=%d\n",
346 __func__, id);
347 return;
348 }
349 }
350
351 host1x_hw_intr_free_syncpt_irq(host);
352
353 mutex_unlock(&host->intr_mutex);
354}
diff --git a/drivers/gpu/host1x/intr.h b/drivers/gpu/host1x/intr.h
new file mode 100644
index 000000000000..2b8adf016a05
--- /dev/null
+++ b/drivers/gpu/host1x/intr.h
@@ -0,0 +1,102 @@
1/*
2 * Tegra host1x Interrupt Management
3 *
4 * Copyright (c) 2010-2013, NVIDIA Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#ifndef __HOST1X_INTR_H
20#define __HOST1X_INTR_H
21
22#include <linux/interrupt.h>
23#include <linux/workqueue.h>
24
25struct host1x;
26
27enum host1x_intr_action {
28 /*
29 * Perform cleanup after a submit has completed.
30 * 'data' points to a channel
31 */
32 HOST1X_INTR_ACTION_SUBMIT_COMPLETE = 0,
33
34 /*
35 * Wake up a task.
36 * 'data' points to a wait_queue_head_t
37 */
38 HOST1X_INTR_ACTION_WAKEUP,
39
40 /*
41 * Wake up a interruptible task.
42 * 'data' points to a wait_queue_head_t
43 */
44 HOST1X_INTR_ACTION_WAKEUP_INTERRUPTIBLE,
45
46 HOST1X_INTR_ACTION_COUNT
47};
48
49struct host1x_syncpt_intr {
50 spinlock_t lock;
51 struct list_head wait_head;
52 char thresh_irq_name[12];
53 struct work_struct work;
54};
55
56struct host1x_waitlist {
57 struct list_head list;
58 struct kref refcount;
59 u32 thresh;
60 enum host1x_intr_action action;
61 atomic_t state;
62 void *data;
63 int count;
64};
65
66/*
67 * Schedule an action to be taken when a sync point reaches the given threshold.
68 *
69 * @id the sync point
70 * @thresh the threshold
71 * @action the action to take
72 * @data a pointer to extra data depending on action, see above
73 * @waiter waiter structure - assumes ownership
74 * @ref must be passed if cancellation is possible, else NULL
75 *
76 * This is a non-blocking api.
77 */
78int host1x_intr_add_action(struct host1x *host, u32 id, u32 thresh,
79 enum host1x_intr_action action, void *data,
80 struct host1x_waitlist *waiter, void **ref);
81
82/*
83 * Unreference an action submitted to host1x_intr_add_action().
84 * You must call this if you passed non-NULL as ref.
85 * @ref the ref returned from host1x_intr_add_action()
86 */
87void host1x_intr_put_ref(struct host1x *host, u32 id, void *ref);
88
89/* Initialize host1x sync point interrupt */
90int host1x_intr_init(struct host1x *host, unsigned int irq_sync);
91
92/* Deinitialize host1x sync point interrupt */
93void host1x_intr_deinit(struct host1x *host);
94
95/* Enable host1x sync point interrupt */
96void host1x_intr_start(struct host1x *host);
97
98/* Disable host1x sync point interrupt */
99void host1x_intr_stop(struct host1x *host);
100
101irqreturn_t host1x_syncpt_thresh_fn(void *dev_id);
102#endif
diff --git a/drivers/gpu/host1x/job.c b/drivers/gpu/host1x/job.c
new file mode 100644
index 000000000000..f665d679031c
--- /dev/null
+++ b/drivers/gpu/host1x/job.c
@@ -0,0 +1,603 @@
1/*
2 * Tegra host1x Job
3 *
4 * Copyright (c) 2010-2013, NVIDIA Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#include <linux/dma-mapping.h>
20#include <linux/err.h>
21#include <linux/kref.h>
22#include <linux/module.h>
23#include <linux/scatterlist.h>
24#include <linux/slab.h>
25#include <linux/vmalloc.h>
26#include <trace/events/host1x.h>
27
28#include "channel.h"
29#include "dev.h"
30#include "host1x_bo.h"
31#include "job.h"
32#include "syncpt.h"
33
34struct host1x_job *host1x_job_alloc(struct host1x_channel *ch,
35 u32 num_cmdbufs, u32 num_relocs,
36 u32 num_waitchks)
37{
38 struct host1x_job *job = NULL;
39 unsigned int num_unpins = num_cmdbufs + num_relocs;
40 u64 total;
41 void *mem;
42
43 /* Check that we're not going to overflow */
44 total = sizeof(struct host1x_job) +
45 num_relocs * sizeof(struct host1x_reloc) +
46 num_unpins * sizeof(struct host1x_job_unpin_data) +
47 num_waitchks * sizeof(struct host1x_waitchk) +
48 num_cmdbufs * sizeof(struct host1x_job_gather) +
49 num_unpins * sizeof(dma_addr_t) +
50 num_unpins * sizeof(u32 *);
51 if (total > ULONG_MAX)
52 return NULL;
53
54 mem = job = kzalloc(total, GFP_KERNEL);
55 if (!job)
56 return NULL;
57
58 kref_init(&job->ref);
59 job->channel = ch;
60
61 /* Redistribute memory to the structs */
62 mem += sizeof(struct host1x_job);
63 job->relocarray = num_relocs ? mem : NULL;
64 mem += num_relocs * sizeof(struct host1x_reloc);
65 job->unpins = num_unpins ? mem : NULL;
66 mem += num_unpins * sizeof(struct host1x_job_unpin_data);
67 job->waitchk = num_waitchks ? mem : NULL;
68 mem += num_waitchks * sizeof(struct host1x_waitchk);
69 job->gathers = num_cmdbufs ? mem : NULL;
70 mem += num_cmdbufs * sizeof(struct host1x_job_gather);
71 job->addr_phys = num_unpins ? mem : NULL;
72
73 job->reloc_addr_phys = job->addr_phys;
74 job->gather_addr_phys = &job->addr_phys[num_relocs];
75
76 return job;
77}
78
79struct host1x_job *host1x_job_get(struct host1x_job *job)
80{
81 kref_get(&job->ref);
82 return job;
83}
84
85static void job_free(struct kref *ref)
86{
87 struct host1x_job *job = container_of(ref, struct host1x_job, ref);
88
89 kfree(job);
90}
91
92void host1x_job_put(struct host1x_job *job)
93{
94 kref_put(&job->ref, job_free);
95}
96
97void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *bo,
98 u32 words, u32 offset)
99{
100 struct host1x_job_gather *cur_gather = &job->gathers[job->num_gathers];
101
102 cur_gather->words = words;
103 cur_gather->bo = bo;
104 cur_gather->offset = offset;
105 job->num_gathers++;
106}
107
108/*
109 * NULL an already satisfied WAIT_SYNCPT host method, by patching its
110 * args in the command stream. The method data is changed to reference
111 * a reserved (never given out or incr) HOST1X_SYNCPT_RESERVED syncpt
112 * with a matching threshold value of 0, so is guaranteed to be popped
113 * by the host HW.
114 */
115static void host1x_syncpt_patch_offset(struct host1x_syncpt *sp,
116 struct host1x_bo *h, u32 offset)
117{
118 void *patch_addr = NULL;
119
120 /* patch the wait */
121 patch_addr = host1x_bo_kmap(h, offset >> PAGE_SHIFT);
122 if (patch_addr) {
123 host1x_syncpt_patch_wait(sp,
124 patch_addr + (offset & ~PAGE_MASK));
125 host1x_bo_kunmap(h, offset >> PAGE_SHIFT, patch_addr);
126 } else
127 pr_err("Could not map cmdbuf for wait check\n");
128}
129
130/*
131 * Check driver supplied waitchk structs for syncpt thresholds
132 * that have already been satisfied and NULL the comparison (to
133 * avoid a wrap condition in the HW).
134 */
135static int do_waitchks(struct host1x_job *job, struct host1x *host,
136 struct host1x_bo *patch)
137{
138 int i;
139
140 /* compare syncpt vs wait threshold */
141 for (i = 0; i < job->num_waitchk; i++) {
142 struct host1x_waitchk *wait = &job->waitchk[i];
143 struct host1x_syncpt *sp =
144 host1x_syncpt_get(host, wait->syncpt_id);
145
146 /* validate syncpt id */
147 if (wait->syncpt_id > host1x_syncpt_nb_pts(host))
148 continue;
149
150 /* skip all other gathers */
151 if (patch != wait->bo)
152 continue;
153
154 trace_host1x_syncpt_wait_check(wait->bo, wait->offset,
155 wait->syncpt_id, wait->thresh,
156 host1x_syncpt_read_min(sp));
157
158 if (host1x_syncpt_is_expired(sp, wait->thresh)) {
159 dev_dbg(host->dev,
160 "drop WAIT id %d (%s) thresh 0x%x, min 0x%x\n",
161 wait->syncpt_id, sp->name, wait->thresh,
162 host1x_syncpt_read_min(sp));
163
164 host1x_syncpt_patch_offset(sp, patch, wait->offset);
165 }
166
167 wait->bo = NULL;
168 }
169
170 return 0;
171}
172
173static unsigned int pin_job(struct host1x_job *job)
174{
175 unsigned int i;
176
177 job->num_unpins = 0;
178
179 for (i = 0; i < job->num_relocs; i++) {
180 struct host1x_reloc *reloc = &job->relocarray[i];
181 struct sg_table *sgt;
182 dma_addr_t phys_addr;
183
184 reloc->target = host1x_bo_get(reloc->target);
185 if (!reloc->target)
186 goto unpin;
187
188 phys_addr = host1x_bo_pin(reloc->target, &sgt);
189 if (!phys_addr)
190 goto unpin;
191
192 job->addr_phys[job->num_unpins] = phys_addr;
193 job->unpins[job->num_unpins].bo = reloc->target;
194 job->unpins[job->num_unpins].sgt = sgt;
195 job->num_unpins++;
196 }
197
198 for (i = 0; i < job->num_gathers; i++) {
199 struct host1x_job_gather *g = &job->gathers[i];
200 struct sg_table *sgt;
201 dma_addr_t phys_addr;
202
203 g->bo = host1x_bo_get(g->bo);
204 if (!g->bo)
205 goto unpin;
206
207 phys_addr = host1x_bo_pin(g->bo, &sgt);
208 if (!phys_addr)
209 goto unpin;
210
211 job->addr_phys[job->num_unpins] = phys_addr;
212 job->unpins[job->num_unpins].bo = g->bo;
213 job->unpins[job->num_unpins].sgt = sgt;
214 job->num_unpins++;
215 }
216
217 return job->num_unpins;
218
219unpin:
220 host1x_job_unpin(job);
221 return 0;
222}
223
224static unsigned int do_relocs(struct host1x_job *job, struct host1x_bo *cmdbuf)
225{
226 int i = 0;
227 u32 last_page = ~0;
228 void *cmdbuf_page_addr = NULL;
229
230 /* pin & patch the relocs for one gather */
231 while (i < job->num_relocs) {
232 struct host1x_reloc *reloc = &job->relocarray[i];
233 u32 reloc_addr = (job->reloc_addr_phys[i] +
234 reloc->target_offset) >> reloc->shift;
235 u32 *target;
236
237 /* skip all other gathers */
238 if (!(reloc->cmdbuf && cmdbuf == reloc->cmdbuf)) {
239 i++;
240 continue;
241 }
242
243 if (last_page != reloc->cmdbuf_offset >> PAGE_SHIFT) {
244 if (cmdbuf_page_addr)
245 host1x_bo_kunmap(cmdbuf, last_page,
246 cmdbuf_page_addr);
247
248 cmdbuf_page_addr = host1x_bo_kmap(cmdbuf,
249 reloc->cmdbuf_offset >> PAGE_SHIFT);
250 last_page = reloc->cmdbuf_offset >> PAGE_SHIFT;
251
252 if (unlikely(!cmdbuf_page_addr)) {
253 pr_err("Could not map cmdbuf for relocation\n");
254 return -ENOMEM;
255 }
256 }
257
258 target = cmdbuf_page_addr + (reloc->cmdbuf_offset & ~PAGE_MASK);
259 *target = reloc_addr;
260
261 /* mark this gather as handled */
262 reloc->cmdbuf = 0;
263 }
264
265 if (cmdbuf_page_addr)
266 host1x_bo_kunmap(cmdbuf, last_page, cmdbuf_page_addr);
267
268 return 0;
269}
270
271static int check_reloc(struct host1x_reloc *reloc, struct host1x_bo *cmdbuf,
272 unsigned int offset)
273{
274 offset *= sizeof(u32);
275
276 if (reloc->cmdbuf != cmdbuf || reloc->cmdbuf_offset != offset)
277 return -EINVAL;
278
279 return 0;
280}
281
282struct host1x_firewall {
283 struct host1x_job *job;
284 struct device *dev;
285
286 unsigned int num_relocs;
287 struct host1x_reloc *reloc;
288
289 struct host1x_bo *cmdbuf_id;
290 unsigned int offset;
291
292 u32 words;
293 u32 class;
294 u32 reg;
295 u32 mask;
296 u32 count;
297};
298
299static int check_mask(struct host1x_firewall *fw)
300{
301 u32 mask = fw->mask;
302 u32 reg = fw->reg;
303
304 while (mask) {
305 if (fw->words == 0)
306 return -EINVAL;
307
308 if (mask & 1) {
309 if (fw->job->is_addr_reg(fw->dev, fw->class, reg)) {
310 bool bad_reloc = check_reloc(fw->reloc,
311 fw->cmdbuf_id,
312 fw->offset);
313 if (!fw->num_relocs || bad_reloc)
314 return -EINVAL;
315 fw->reloc++;
316 fw->num_relocs--;
317 }
318 fw->words--;
319 fw->offset++;
320 }
321 mask >>= 1;
322 reg++;
323 }
324
325 return 0;
326}
327
328static int check_incr(struct host1x_firewall *fw)
329{
330 u32 count = fw->count;
331 u32 reg = fw->reg;
332
333 while (fw) {
334 if (fw->words == 0)
335 return -EINVAL;
336
337 if (fw->job->is_addr_reg(fw->dev, fw->class, reg)) {
338 bool bad_reloc = check_reloc(fw->reloc, fw->cmdbuf_id,
339 fw->offset);
340 if (!fw->num_relocs || bad_reloc)
341 return -EINVAL;
342 fw->reloc++;
343 fw->num_relocs--;
344 }
345 reg++;
346 fw->words--;
347 fw->offset++;
348 count--;
349 }
350
351 return 0;
352}
353
354static int check_nonincr(struct host1x_firewall *fw)
355{
356 int is_addr_reg = fw->job->is_addr_reg(fw->dev, fw->class, fw->reg);
357 u32 count = fw->count;
358
359 while (count) {
360 if (fw->words == 0)
361 return -EINVAL;
362
363 if (is_addr_reg) {
364 bool bad_reloc = check_reloc(fw->reloc, fw->cmdbuf_id,
365 fw->offset);
366 if (!fw->num_relocs || bad_reloc)
367 return -EINVAL;
368 fw->reloc++;
369 fw->num_relocs--;
370 }
371 fw->words--;
372 fw->offset++;
373 count--;
374 }
375
376 return 0;
377}
378
379static int validate(struct host1x_job *job, struct device *dev,
380 struct host1x_job_gather *g)
381{
382 u32 *cmdbuf_base;
383 int err = 0;
384 struct host1x_firewall fw;
385
386 fw.job = job;
387 fw.dev = dev;
388 fw.reloc = job->relocarray;
389 fw.num_relocs = job->num_relocs;
390 fw.cmdbuf_id = g->bo;
391
392 fw.offset = 0;
393 fw.class = 0;
394
395 if (!job->is_addr_reg)
396 return 0;
397
398 cmdbuf_base = host1x_bo_mmap(g->bo);
399 if (!cmdbuf_base)
400 return -ENOMEM;
401
402 fw.words = g->words;
403 while (fw.words && !err) {
404 u32 word = cmdbuf_base[fw.offset];
405 u32 opcode = (word & 0xf0000000) >> 28;
406
407 fw.mask = 0;
408 fw.reg = 0;
409 fw.count = 0;
410 fw.words--;
411 fw.offset++;
412
413 switch (opcode) {
414 case 0:
415 fw.class = word >> 6 & 0x3ff;
416 fw.mask = word & 0x3f;
417 fw.reg = word >> 16 & 0xfff;
418 err = check_mask(&fw);
419 if (err)
420 goto out;
421 break;
422 case 1:
423 fw.reg = word >> 16 & 0xfff;
424 fw.count = word & 0xffff;
425 err = check_incr(&fw);
426 if (err)
427 goto out;
428 break;
429
430 case 2:
431 fw.reg = word >> 16 & 0xfff;
432 fw.count = word & 0xffff;
433 err = check_nonincr(&fw);
434 if (err)
435 goto out;
436 break;
437
438 case 3:
439 fw.mask = word & 0xffff;
440 fw.reg = word >> 16 & 0xfff;
441 err = check_mask(&fw);
442 if (err)
443 goto out;
444 break;
445 case 4:
446 case 5:
447 case 14:
448 break;
449 default:
450 err = -EINVAL;
451 break;
452 }
453 }
454
455 /* No relocs should remain at this point */
456 if (fw.num_relocs)
457 err = -EINVAL;
458
459out:
460 host1x_bo_munmap(g->bo, cmdbuf_base);
461
462 return err;
463}
464
465static inline int copy_gathers(struct host1x_job *job, struct device *dev)
466{
467 size_t size = 0;
468 size_t offset = 0;
469 int i;
470
471 for (i = 0; i < job->num_gathers; i++) {
472 struct host1x_job_gather *g = &job->gathers[i];
473 size += g->words * sizeof(u32);
474 }
475
476 job->gather_copy_mapped = dma_alloc_writecombine(dev, size,
477 &job->gather_copy,
478 GFP_KERNEL);
479 if (!job->gather_copy_mapped) {
480 int err = PTR_ERR(job->gather_copy_mapped);
481 job->gather_copy_mapped = NULL;
482 return err;
483 }
484
485 job->gather_copy_size = size;
486
487 for (i = 0; i < job->num_gathers; i++) {
488 struct host1x_job_gather *g = &job->gathers[i];
489 void *gather;
490
491 gather = host1x_bo_mmap(g->bo);
492 memcpy(job->gather_copy_mapped + offset, gather + g->offset,
493 g->words * sizeof(u32));
494 host1x_bo_munmap(g->bo, gather);
495
496 g->base = job->gather_copy;
497 g->offset = offset;
498 g->bo = NULL;
499
500 offset += g->words * sizeof(u32);
501 }
502
503 return 0;
504}
505
506int host1x_job_pin(struct host1x_job *job, struct device *dev)
507{
508 int err;
509 unsigned int i, j;
510 struct host1x *host = dev_get_drvdata(dev->parent);
511 DECLARE_BITMAP(waitchk_mask, host1x_syncpt_nb_pts(host));
512
513 bitmap_zero(waitchk_mask, host1x_syncpt_nb_pts(host));
514 for (i = 0; i < job->num_waitchk; i++) {
515 u32 syncpt_id = job->waitchk[i].syncpt_id;
516 if (syncpt_id < host1x_syncpt_nb_pts(host))
517 set_bit(syncpt_id, waitchk_mask);
518 }
519
520 /* get current syncpt values for waitchk */
521 for_each_set_bit(i, waitchk_mask, host1x_syncpt_nb_pts(host))
522 host1x_syncpt_load(host->syncpt + i);
523
524 /* pin memory */
525 err = pin_job(job);
526 if (!err)
527 goto out;
528
529 /* patch gathers */
530 for (i = 0; i < job->num_gathers; i++) {
531 struct host1x_job_gather *g = &job->gathers[i];
532
533 /* process each gather mem only once */
534 if (g->handled)
535 continue;
536
537 g->base = job->gather_addr_phys[i];
538
539 for (j = 0; j < job->num_gathers; j++)
540 if (job->gathers[j].bo == g->bo)
541 job->gathers[j].handled = true;
542
543 err = 0;
544
545 if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL))
546 err = validate(job, dev, g);
547
548 if (err)
549 dev_err(dev, "Job invalid (err=%d)\n", err);
550
551 if (!err)
552 err = do_relocs(job, g->bo);
553
554 if (!err)
555 err = do_waitchks(job, host, g->bo);
556
557 if (err)
558 break;
559 }
560
561 if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) && !err) {
562 err = copy_gathers(job, dev);
563 if (err) {
564 host1x_job_unpin(job);
565 return err;
566 }
567 }
568
569out:
570 wmb();
571
572 return err;
573}
574
575void host1x_job_unpin(struct host1x_job *job)
576{
577 unsigned int i;
578
579 for (i = 0; i < job->num_unpins; i++) {
580 struct host1x_job_unpin_data *unpin = &job->unpins[i];
581 host1x_bo_unpin(unpin->bo, unpin->sgt);
582 host1x_bo_put(unpin->bo);
583 }
584 job->num_unpins = 0;
585
586 if (job->gather_copy_size)
587 dma_free_writecombine(job->channel->dev, job->gather_copy_size,
588 job->gather_copy_mapped,
589 job->gather_copy);
590}
591
592/*
593 * Debug routine used to dump job entries
594 */
595void host1x_job_dump(struct device *dev, struct host1x_job *job)
596{
597 dev_dbg(dev, " SYNCPT_ID %d\n", job->syncpt_id);
598 dev_dbg(dev, " SYNCPT_VAL %d\n", job->syncpt_end);
599 dev_dbg(dev, " FIRST_GET 0x%x\n", job->first_get);
600 dev_dbg(dev, " TIMEOUT %d\n", job->timeout);
601 dev_dbg(dev, " NUM_SLOTS %d\n", job->num_slots);
602 dev_dbg(dev, " NUM_HANDLES %d\n", job->num_unpins);
603}
diff --git a/drivers/gpu/host1x/job.h b/drivers/gpu/host1x/job.h
new file mode 100644
index 000000000000..fba45f20458e
--- /dev/null
+++ b/drivers/gpu/host1x/job.h
@@ -0,0 +1,162 @@
1/*
2 * Tegra host1x Job
3 *
4 * Copyright (c) 2011-2013, NVIDIA Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#ifndef __HOST1X_JOB_H
20#define __HOST1X_JOB_H
21
22struct host1x_job_gather {
23 u32 words;
24 dma_addr_t base;
25 struct host1x_bo *bo;
26 int offset;
27 bool handled;
28};
29
30struct host1x_cmdbuf {
31 u32 handle;
32 u32 offset;
33 u32 words;
34 u32 pad;
35};
36
37struct host1x_reloc {
38 struct host1x_bo *cmdbuf;
39 u32 cmdbuf_offset;
40 struct host1x_bo *target;
41 u32 target_offset;
42 u32 shift;
43 u32 pad;
44};
45
46struct host1x_waitchk {
47 struct host1x_bo *bo;
48 u32 offset;
49 u32 syncpt_id;
50 u32 thresh;
51};
52
53struct host1x_job_unpin_data {
54 struct host1x_bo *bo;
55 struct sg_table *sgt;
56};
57
58/*
59 * Each submit is tracked as a host1x_job.
60 */
61struct host1x_job {
62 /* When refcount goes to zero, job can be freed */
63 struct kref ref;
64
65 /* List entry */
66 struct list_head list;
67
68 /* Channel where job is submitted to */
69 struct host1x_channel *channel;
70
71 u32 client;
72
73 /* Gathers and their memory */
74 struct host1x_job_gather *gathers;
75 unsigned int num_gathers;
76
77 /* Wait checks to be processed at submit time */
78 struct host1x_waitchk *waitchk;
79 unsigned int num_waitchk;
80 u32 waitchk_mask;
81
82 /* Array of handles to be pinned & unpinned */
83 struct host1x_reloc *relocarray;
84 unsigned int num_relocs;
85 struct host1x_job_unpin_data *unpins;
86 unsigned int num_unpins;
87
88 dma_addr_t *addr_phys;
89 dma_addr_t *gather_addr_phys;
90 dma_addr_t *reloc_addr_phys;
91
92 /* Sync point id, number of increments and end related to the submit */
93 u32 syncpt_id;
94 u32 syncpt_incrs;
95 u32 syncpt_end;
96
97 /* Maximum time to wait for this job */
98 unsigned int timeout;
99
100 /* Index and number of slots used in the push buffer */
101 unsigned int first_get;
102 unsigned int num_slots;
103
104 /* Copy of gathers */
105 size_t gather_copy_size;
106 dma_addr_t gather_copy;
107 u8 *gather_copy_mapped;
108
109 /* Check if register is marked as an address reg */
110 int (*is_addr_reg)(struct device *dev, u32 reg, u32 class);
111
112 /* Request a SETCLASS to this class */
113 u32 class;
114
115 /* Add a channel wait for previous ops to complete */
116 bool serialize;
117};
118/*
119 * Allocate memory for a job. Just enough memory will be allocated to
120 * accomodate the submit.
121 */
122struct host1x_job *host1x_job_alloc(struct host1x_channel *ch,
123 u32 num_cmdbufs, u32 num_relocs,
124 u32 num_waitchks);
125
126/*
127 * Add a gather to a job.
128 */
129void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *mem_id,
130 u32 words, u32 offset);
131
132/*
133 * Increment reference going to host1x_job.
134 */
135struct host1x_job *host1x_job_get(struct host1x_job *job);
136
137/*
138 * Decrement reference job, free if goes to zero.
139 */
140void host1x_job_put(struct host1x_job *job);
141
142/*
143 * Pin memory related to job. This handles relocation of addresses to the
144 * host1x address space. Handles both the gather memory and any other memory
145 * referred to from the gather buffers.
146 *
147 * Handles also patching out host waits that would wait for an expired sync
148 * point value.
149 */
150int host1x_job_pin(struct host1x_job *job, struct device *dev);
151
152/*
153 * Unpin memory related to job.
154 */
155void host1x_job_unpin(struct host1x_job *job);
156
157/*
158 * Dump contents of job to debug output.
159 */
160void host1x_job_dump(struct device *dev, struct host1x_job *job);
161
162#endif
diff --git a/drivers/gpu/host1x/syncpt.c b/drivers/gpu/host1x/syncpt.c
new file mode 100644
index 000000000000..4b493453e805
--- /dev/null
+++ b/drivers/gpu/host1x/syncpt.c
@@ -0,0 +1,387 @@
1/*
2 * Tegra host1x Syncpoints
3 *
4 * Copyright (c) 2010-2013, NVIDIA Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#include <linux/module.h>
20#include <linux/device.h>
21#include <linux/slab.h>
22
23#include <trace/events/host1x.h>
24
25#include "syncpt.h"
26#include "dev.h"
27#include "intr.h"
28#include "debug.h"
29
30#define SYNCPT_CHECK_PERIOD (2 * HZ)
31#define MAX_STUCK_CHECK_COUNT 15
32
33static struct host1x_syncpt *_host1x_syncpt_alloc(struct host1x *host,
34 struct device *dev,
35 int client_managed)
36{
37 int i;
38 struct host1x_syncpt *sp = host->syncpt;
39 char *name;
40
41 for (i = 0; i < host->info->nb_pts && sp->name; i++, sp++)
42 ;
43 if (sp->dev)
44 return NULL;
45
46 name = kasprintf(GFP_KERNEL, "%02d-%s", sp->id,
47 dev ? dev_name(dev) : NULL);
48 if (!name)
49 return NULL;
50
51 sp->dev = dev;
52 sp->name = name;
53 sp->client_managed = client_managed;
54
55 return sp;
56}
57
58u32 host1x_syncpt_id(struct host1x_syncpt *sp)
59{
60 return sp->id;
61}
62
63/*
64 * Updates the value sent to hardware.
65 */
66u32 host1x_syncpt_incr_max(struct host1x_syncpt *sp, u32 incrs)
67{
68 return (u32)atomic_add_return(incrs, &sp->max_val);
69}
70
71 /*
72 * Write cached syncpoint and waitbase values to hardware.
73 */
74void host1x_syncpt_restore(struct host1x *host)
75{
76 struct host1x_syncpt *sp_base = host->syncpt;
77 u32 i;
78
79 for (i = 0; i < host1x_syncpt_nb_pts(host); i++)
80 host1x_hw_syncpt_restore(host, sp_base + i);
81 for (i = 0; i < host1x_syncpt_nb_bases(host); i++)
82 host1x_hw_syncpt_restore_wait_base(host, sp_base + i);
83 wmb();
84}
85
86/*
87 * Update the cached syncpoint and waitbase values by reading them
88 * from the registers.
89 */
90void host1x_syncpt_save(struct host1x *host)
91{
92 struct host1x_syncpt *sp_base = host->syncpt;
93 u32 i;
94
95 for (i = 0; i < host1x_syncpt_nb_pts(host); i++) {
96 if (host1x_syncpt_client_managed(sp_base + i))
97 host1x_hw_syncpt_load(host, sp_base + i);
98 else
99 WARN_ON(!host1x_syncpt_idle(sp_base + i));
100 }
101
102 for (i = 0; i < host1x_syncpt_nb_bases(host); i++)
103 host1x_hw_syncpt_load_wait_base(host, sp_base + i);
104}
105
106/*
107 * Updates the cached syncpoint value by reading a new value from the hardware
108 * register
109 */
110u32 host1x_syncpt_load(struct host1x_syncpt *sp)
111{
112 u32 val;
113 val = host1x_hw_syncpt_load(sp->host, sp);
114 trace_host1x_syncpt_load_min(sp->id, val);
115
116 return val;
117}
118
119/*
120 * Get the current syncpoint base
121 */
122u32 host1x_syncpt_load_wait_base(struct host1x_syncpt *sp)
123{
124 u32 val;
125 host1x_hw_syncpt_load_wait_base(sp->host, sp);
126 val = sp->base_val;
127 return val;
128}
129
130/*
131 * Write a cpu syncpoint increment to the hardware, without touching
132 * the cache. Caller is responsible for host being powered.
133 */
134void host1x_syncpt_cpu_incr(struct host1x_syncpt *sp)
135{
136 host1x_hw_syncpt_cpu_incr(sp->host, sp);
137}
138
139/*
140 * Increment syncpoint value from cpu, updating cache
141 */
142void host1x_syncpt_incr(struct host1x_syncpt *sp)
143{
144 if (host1x_syncpt_client_managed(sp))
145 host1x_syncpt_incr_max(sp, 1);
146 host1x_syncpt_cpu_incr(sp);
147}
148
149/*
150 * Updated sync point form hardware, and returns true if syncpoint is expired,
151 * false if we may need to wait
152 */
153static bool syncpt_load_min_is_expired(struct host1x_syncpt *sp, u32 thresh)
154{
155 host1x_hw_syncpt_load(sp->host, sp);
156 return host1x_syncpt_is_expired(sp, thresh);
157}
158
159/*
160 * Main entrypoint for syncpoint value waits.
161 */
162int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout,
163 u32 *value)
164{
165 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
166 void *ref;
167 struct host1x_waitlist *waiter;
168 int err = 0, check_count = 0;
169 u32 val;
170
171 if (value)
172 *value = 0;
173
174 /* first check cache */
175 if (host1x_syncpt_is_expired(sp, thresh)) {
176 if (value)
177 *value = host1x_syncpt_load(sp);
178 return 0;
179 }
180
181 /* try to read from register */
182 val = host1x_hw_syncpt_load(sp->host, sp);
183 if (host1x_syncpt_is_expired(sp, thresh)) {
184 if (value)
185 *value = val;
186 goto done;
187 }
188
189 if (!timeout) {
190 err = -EAGAIN;
191 goto done;
192 }
193
194 /* allocate a waiter */
195 waiter = kzalloc(sizeof(*waiter), GFP_KERNEL);
196 if (!waiter) {
197 err = -ENOMEM;
198 goto done;
199 }
200
201 /* schedule a wakeup when the syncpoint value is reached */
202 err = host1x_intr_add_action(sp->host, sp->id, thresh,
203 HOST1X_INTR_ACTION_WAKEUP_INTERRUPTIBLE,
204 &wq, waiter, &ref);
205 if (err)
206 goto done;
207
208 err = -EAGAIN;
209 /* Caller-specified timeout may be impractically low */
210 if (timeout < 0)
211 timeout = LONG_MAX;
212
213 /* wait for the syncpoint, or timeout, or signal */
214 while (timeout) {
215 long check = min_t(long, SYNCPT_CHECK_PERIOD, timeout);
216 int remain = wait_event_interruptible_timeout(wq,
217 syncpt_load_min_is_expired(sp, thresh),
218 check);
219 if (remain > 0 || host1x_syncpt_is_expired(sp, thresh)) {
220 if (value)
221 *value = host1x_syncpt_load(sp);
222 err = 0;
223 break;
224 }
225 if (remain < 0) {
226 err = remain;
227 break;
228 }
229 timeout -= check;
230 if (timeout && check_count <= MAX_STUCK_CHECK_COUNT) {
231 dev_warn(sp->host->dev,
232 "%s: syncpoint id %d (%s) stuck waiting %d, timeout=%ld\n",
233 current->comm, sp->id, sp->name,
234 thresh, timeout);
235
236 host1x_debug_dump_syncpts(sp->host);
237 if (check_count == MAX_STUCK_CHECK_COUNT)
238 host1x_debug_dump(sp->host);
239 check_count++;
240 }
241 }
242 host1x_intr_put_ref(sp->host, sp->id, ref);
243
244done:
245 return err;
246}
247EXPORT_SYMBOL(host1x_syncpt_wait);
248
249/*
250 * Returns true if syncpoint is expired, false if we may need to wait
251 */
252bool host1x_syncpt_is_expired(struct host1x_syncpt *sp, u32 thresh)
253{
254 u32 current_val;
255 u32 future_val;
256 smp_rmb();
257 current_val = (u32)atomic_read(&sp->min_val);
258 future_val = (u32)atomic_read(&sp->max_val);
259
260 /* Note the use of unsigned arithmetic here (mod 1<<32).
261 *
262 * c = current_val = min_val = the current value of the syncpoint.
263 * t = thresh = the value we are checking
264 * f = future_val = max_val = the value c will reach when all
265 * outstanding increments have completed.
266 *
267 * Note that c always chases f until it reaches f.
268 *
269 * Dtf = (f - t)
270 * Dtc = (c - t)
271 *
272 * Consider all cases:
273 *
274 * A) .....c..t..f..... Dtf < Dtc need to wait
275 * B) .....c.....f..t.. Dtf > Dtc expired
276 * C) ..t..c.....f..... Dtf > Dtc expired (Dct very large)
277 *
278 * Any case where f==c: always expired (for any t). Dtf == Dcf
279 * Any case where t==c: always expired (for any f). Dtf >= Dtc (because Dtc==0)
280 * Any case where t==f!=c: always wait. Dtf < Dtc (because Dtf==0,
281 * Dtc!=0)
282 *
283 * Other cases:
284 *
285 * A) .....t..f..c..... Dtf < Dtc need to wait
286 * A) .....f..c..t..... Dtf < Dtc need to wait
287 * A) .....f..t..c..... Dtf > Dtc expired
288 *
289 * So:
290 * Dtf >= Dtc implies EXPIRED (return true)
291 * Dtf < Dtc implies WAIT (return false)
292 *
293 * Note: If t is expired then we *cannot* wait on it. We would wait
294 * forever (hang the system).
295 *
296 * Note: do NOT get clever and remove the -thresh from both sides. It
297 * is NOT the same.
298 *
299 * If future valueis zero, we have a client managed sync point. In that
300 * case we do a direct comparison.
301 */
302 if (!host1x_syncpt_client_managed(sp))
303 return future_val - thresh >= current_val - thresh;
304 else
305 return (s32)(current_val - thresh) >= 0;
306}
307
308/* remove a wait pointed to by patch_addr */
309int host1x_syncpt_patch_wait(struct host1x_syncpt *sp, void *patch_addr)
310{
311 return host1x_hw_syncpt_patch_wait(sp->host, sp, patch_addr);
312}
313
314int host1x_syncpt_init(struct host1x *host)
315{
316 struct host1x_syncpt *syncpt;
317 int i;
318
319 syncpt = devm_kzalloc(host->dev, sizeof(*syncpt) * host->info->nb_pts,
320 GFP_KERNEL);
321 if (!syncpt)
322 return -ENOMEM;
323
324 for (i = 0; i < host->info->nb_pts; ++i) {
325 syncpt[i].id = i;
326 syncpt[i].host = host;
327 }
328
329 host->syncpt = syncpt;
330
331 host1x_syncpt_restore(host);
332
333 /* Allocate sync point to use for clearing waits for expired fences */
334 host->nop_sp = _host1x_syncpt_alloc(host, NULL, 0);
335 if (!host->nop_sp)
336 return -ENOMEM;
337
338 return 0;
339}
340
341struct host1x_syncpt *host1x_syncpt_request(struct device *dev,
342 int client_managed)
343{
344 struct host1x *host = dev_get_drvdata(dev->parent);
345 return _host1x_syncpt_alloc(host, dev, client_managed);
346}
347
348void host1x_syncpt_free(struct host1x_syncpt *sp)
349{
350 if (!sp)
351 return;
352
353 kfree(sp->name);
354 sp->dev = NULL;
355 sp->name = NULL;
356 sp->client_managed = 0;
357}
358
359void host1x_syncpt_deinit(struct host1x *host)
360{
361 int i;
362 struct host1x_syncpt *sp = host->syncpt;
363 for (i = 0; i < host->info->nb_pts; i++, sp++)
364 kfree(sp->name);
365}
366
367int host1x_syncpt_nb_pts(struct host1x *host)
368{
369 return host->info->nb_pts;
370}
371
372int host1x_syncpt_nb_bases(struct host1x *host)
373{
374 return host->info->nb_bases;
375}
376
377int host1x_syncpt_nb_mlocks(struct host1x *host)
378{
379 return host->info->nb_mlocks;
380}
381
382struct host1x_syncpt *host1x_syncpt_get(struct host1x *host, u32 id)
383{
384 if (host->info->nb_pts < id)
385 return NULL;
386 return host->syncpt + id;
387}
diff --git a/drivers/gpu/host1x/syncpt.h b/drivers/gpu/host1x/syncpt.h
new file mode 100644
index 000000000000..c99806130f2e
--- /dev/null
+++ b/drivers/gpu/host1x/syncpt.h
@@ -0,0 +1,165 @@
1/*
2 * Tegra host1x Syncpoints
3 *
4 * Copyright (c) 2010-2013, NVIDIA Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#ifndef __HOST1X_SYNCPT_H
20#define __HOST1X_SYNCPT_H
21
22#include <linux/atomic.h>
23#include <linux/kernel.h>
24#include <linux/sched.h>
25
26#include "intr.h"
27
28struct host1x;
29
30/* Reserved for replacing an expired wait with a NOP */
31#define HOST1X_SYNCPT_RESERVED 0
32
33struct host1x_syncpt {
34 int id;
35 atomic_t min_val;
36 atomic_t max_val;
37 u32 base_val;
38 const char *name;
39 int client_managed;
40 struct host1x *host;
41 struct device *dev;
42
43 /* interrupt data */
44 struct host1x_syncpt_intr intr;
45};
46
47/* Initialize sync point array */
48int host1x_syncpt_init(struct host1x *host);
49
50/* Free sync point array */
51void host1x_syncpt_deinit(struct host1x *host);
52
53/*
54 * Read max. It indicates how many operations there are in queue, either in
55 * channel or in a software thread.
56 * */
57static inline u32 host1x_syncpt_read_max(struct host1x_syncpt *sp)
58{
59 smp_rmb();
60 return (u32)atomic_read(&sp->max_val);
61}
62
63/*
64 * Read min, which is a shadow of the current sync point value in hardware.
65 */
66static inline u32 host1x_syncpt_read_min(struct host1x_syncpt *sp)
67{
68 smp_rmb();
69 return (u32)atomic_read(&sp->min_val);
70}
71
72/* Return number of sync point supported. */
73int host1x_syncpt_nb_pts(struct host1x *host);
74
75/* Return number of wait bases supported. */
76int host1x_syncpt_nb_bases(struct host1x *host);
77
78/* Return number of mlocks supported. */
79int host1x_syncpt_nb_mlocks(struct host1x *host);
80
81/*
82 * Check sync point sanity. If max is larger than min, there have too many
83 * sync point increments.
84 *
85 * Client managed sync point are not tracked.
86 * */
87static inline bool host1x_syncpt_check_max(struct host1x_syncpt *sp, u32 real)
88{
89 u32 max;
90 if (sp->client_managed)
91 return true;
92 max = host1x_syncpt_read_max(sp);
93 return (s32)(max - real) >= 0;
94}
95
96/* Return true if sync point is client managed. */
97static inline int host1x_syncpt_client_managed(struct host1x_syncpt *sp)
98{
99 return sp->client_managed;
100}
101
102/*
103 * Returns true if syncpoint min == max, which means that there are no
104 * outstanding operations.
105 */
106static inline bool host1x_syncpt_idle(struct host1x_syncpt *sp)
107{
108 int min, max;
109 smp_rmb();
110 min = atomic_read(&sp->min_val);
111 max = atomic_read(&sp->max_val);
112 return (min == max);
113}
114
115/* Return pointer to struct denoting sync point id. */
116struct host1x_syncpt *host1x_syncpt_get(struct host1x *host, u32 id);
117
118/* Request incrementing a sync point. */
119void host1x_syncpt_cpu_incr(struct host1x_syncpt *sp);
120
121/* Load current value from hardware to the shadow register. */
122u32 host1x_syncpt_load(struct host1x_syncpt *sp);
123
124/* Check if the given syncpoint value has already passed */
125bool host1x_syncpt_is_expired(struct host1x_syncpt *sp, u32 thresh);
126
127/* Save host1x sync point state into shadow registers. */
128void host1x_syncpt_save(struct host1x *host);
129
130/* Reset host1x sync point state from shadow registers. */
131void host1x_syncpt_restore(struct host1x *host);
132
133/* Read current wait base value into shadow register and return it. */
134u32 host1x_syncpt_load_wait_base(struct host1x_syncpt *sp);
135
136/* Increment sync point and its max. */
137void host1x_syncpt_incr(struct host1x_syncpt *sp);
138
139/* Indicate future operations by incrementing the sync point max. */
140u32 host1x_syncpt_incr_max(struct host1x_syncpt *sp, u32 incrs);
141
142/* Wait until sync point reaches a threshold value, or a timeout. */
143int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh,
144 long timeout, u32 *value);
145
146/* Check if sync point id is valid. */
147static inline int host1x_syncpt_is_valid(struct host1x_syncpt *sp)
148{
149 return sp->id < host1x_syncpt_nb_pts(sp->host);
150}
151
152/* Patch a wait by replacing it with a wait for syncpt 0 value 0 */
153int host1x_syncpt_patch_wait(struct host1x_syncpt *sp, void *patch_addr);
154
155/* Return id of the sync point */
156u32 host1x_syncpt_id(struct host1x_syncpt *sp);
157
158/* Allocate a sync point for a device. */
159struct host1x_syncpt *host1x_syncpt_request(struct device *dev,
160 int client_managed);
161
162/* Free a sync point. */
163void host1x_syncpt_free(struct host1x_syncpt *sp);
164
165#endif