aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorDaniel Vetter <daniel.vetter@ffwll.ch>2013-05-21 03:52:16 -0400
committerDaniel Vetter <daniel.vetter@ffwll.ch>2013-05-21 03:52:16 -0400
commite1b73cba13a0cc68dd4f746eced15bd6bb24cda4 (patch)
treeb1c9e10730724024a700031ad56c20419dabb500 /drivers/gpu
parent98304ad186296dc1e655399e28d5973c21db6a73 (diff)
parentc7788792a5e7b0d5d7f96d0766b4cb6112d47d75 (diff)
Merge tag 'v3.10-rc2' into drm-intel-next-queued
Backmerge Linux 3.10-rc2 since the various (rather trivial) conflicts grew a bit out of hand. intel_dp.c has the only real functional conflict since the logic changed while dev_priv->edp.bpp was moved around. Also squash in a whitespace fixup from Ben Widawsky for i915_gem_gtt.c, git seems to do something pretty strange in there (which I don't fully understand tbh). Conflicts: drivers/gpu/drm/i915/i915_reg.h drivers/gpu/drm/i915/intel_dp.c Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/Makefile1
-rw-r--r--drivers/gpu/drm/Kconfig4
-rw-r--r--drivers/gpu/drm/Makefile2
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h2
-rw-r--r--drivers/gpu/drm/ast/ast_fb.c43
-rw-r--r--drivers/gpu/drm/ast/ast_ttm.c2
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.h2
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_fbdev.c38
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_ttm.c2
-rw-r--r--drivers/gpu/drm/drm_crtc.c418
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c30
-rw-r--r--drivers/gpu/drm/drm_drv.c27
-rw-r--r--drivers/gpu/drm/drm_edid.c279
-rw-r--r--drivers/gpu/drm/drm_edid_load.c21
-rw-r--r--drivers/gpu/drm/drm_encoder_slave.c6
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c8
-rw-r--r--drivers/gpu/drm/drm_fops.c6
-rw-r--r--drivers/gpu/drm/drm_gem.c4
-rw-r--r--drivers/gpu/drm/drm_mm.c34
-rw-r--r--drivers/gpu/drm/drm_modes.c43
-rw-r--r--drivers/gpu/drm/drm_pci.c10
-rw-r--r--drivers/gpu/drm/drm_prime.c91
-rw-r--r--drivers/gpu/drm/drm_proc.c43
-rw-r--r--drivers/gpu/drm/drm_stub.c2
-rw-r--r--drivers/gpu/drm/drm_vm.c1
-rw-r--r--drivers/gpu/drm/exynos/Kconfig6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_connector.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dmabuf.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c9
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h12
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c273
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c39
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_hdmi.c54
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_hdmi.h3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.c27
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c717
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c22
-rw-r--r--drivers/gpu/drm/exynos/regs-fimc.h7
-rw-r--r--drivers/gpu/drm/gma500/Kconfig13
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_crt.c1
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_hdmi.c1
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c6
-rw-r--r--drivers/gpu/drm/gma500/gtt.c52
-rw-r--r--drivers/gpu/drm/gma500/gtt.h2
-rw-r--r--drivers/gpu/drm/gma500/intel_bios.c3
-rw-r--r--drivers/gpu/drm/gma500/intel_bios.h6
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_output.c7
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c3
-rw-r--r--drivers/gpu/drm/gma500/power.c17
-rw-r--r--drivers/gpu/drm/gma500/power.h3
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c3
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h1
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_display.c154
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_display.h3
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_drv.h8
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_reg.h1
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_sdvo.c33
-rw-r--r--drivers/gpu/drm/gma500/psb_irq.c2
-rw-r--r--drivers/gpu/drm/gma500/psb_irq.h6
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c8
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c7
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c18
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h11
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c40
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c5
-rw-r--r--drivers/gpu/drm/i915/intel_display.c26
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c90
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h1
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c13
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c16
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c8
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c4
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c46
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c8
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.h8
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_fb.c55
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_main.c31
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c110
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_ttm.c4
-rw-r--r--drivers/gpu/drm/nouveau/Makefile26
-rw-r--r--drivers/gpu/drm/nouveau/core/core/client.c7
-rw-r--r--drivers/gpu/drm/nouveau/core/core/engine.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/core/event.c12
-rw-r--r--drivers/gpu/drm/nouveau/core/core/object.c19
-rw-r--r--drivers/gpu/drm/nouveau/core/core/parent.c8
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/base.c (renamed from drivers/gpu/drm/nouveau/core/subdev/device/base.c)186
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv04.c (renamed from drivers/gpu/drm/nouveau/core/subdev/device/nv04.c)2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv10.c (renamed from drivers/gpu/drm/nouveau/core/subdev/device/nv10.c)2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv20.c (renamed from drivers/gpu/drm/nouveau/core/subdev/device/nv20.c)2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv30.c (renamed from drivers/gpu/drm/nouveau/core/subdev/device/nv30.c)2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv40.c (renamed from drivers/gpu/drm/nouveau/core/subdev/device/nv40.c)2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv50.c (renamed from drivers/gpu/drm/nouveau/core/subdev/device/nv50.c)20
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nvc0.c (renamed from drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c)30
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nve0.c (renamed from drivers/gpu/drm/nouveau/core/subdev/device/nve0.c)36
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/dport.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv50.c5
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c89
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/base.c7
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c18
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c22
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c9
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c7
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c51
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnve0.c13
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvc0.fuc5
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc3
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv20.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv25.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv2a.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv30.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv34.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv35.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv40.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv50.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c31
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h3
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nve0.c230
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nvc0.c29
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/class.h12
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/device.h6
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/parent.h4
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/device.h (renamed from drivers/gpu/drm/nouveau/core/include/subdev/device.h)1
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/disp.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/fifo.h3
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/graph.h4
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/ltcg.h7
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/mc.h30
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/therm.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/os.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bar/nv50.c22
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c14
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/base.c17
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/init.c56
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c72
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/base.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c20
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c12
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c129
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/base.c60
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/nv40.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/nv50.c175
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/nv84.c221
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/nva3.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/priv.h3
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/temp.c14
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c13
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/vm/nv04.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/vm/nv41.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/vm/nv44.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/vm/nvc0.c58
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/Makefile10
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/arb.c (renamed from drivers/gpu/drm/nouveau/nouveau_calc.c)2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/crtc.c (renamed from drivers/gpu/drm/nouveau/nv04_crtc.c)5
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/cursor.c (renamed from drivers/gpu/drm/nouveau/nv04_cursor.c)3
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/dac.c (renamed from drivers/gpu/drm/nouveau/nv04_dac.c)2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/dfp.c (renamed from drivers/gpu/drm/nouveau/nv04_dfp.c)2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.c (renamed from drivers/gpu/drm/nouveau/nv04_display.c)2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.h (renamed from drivers/gpu/drm/nouveau/nv04_display.h)0
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/hw.c (renamed from drivers/gpu/drm/nouveau/nouveau_hw.c)2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/hw.h (renamed from drivers/gpu/drm/nouveau/nouveau_hw.h)3
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/nvreg.h (renamed from drivers/gpu/drm/nouveau/nvreg.h)0
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvmodesnv17.c (renamed from drivers/gpu/drm/nouveau/nv17_tv_modes.c)4
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv04.c (renamed from drivers/gpu/drm/nouveau/nv04_tv.c)2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv17.c (renamed from drivers/gpu/drm/nouveau/nv17_tv.c)4
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv17.h (renamed from drivers/gpu/drm/nouveau/nv17_tv.h)0
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c30
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_backlight.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c58
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.h15
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_encoder.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_irq.c76
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_irq.h11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c25
-rw-r--r--drivers/gpu/drm/nouveau/nv04_pm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv40_pm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c3
-rw-r--r--drivers/gpu/drm/nouveau/nv50_pm.c2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_connector.c27
-rw-r--r--drivers/gpu/drm/omapdrm/omap_crtc.c21
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c165
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.h38
-rw-r--r--drivers/gpu/drm/omapdrm/omap_encoder.c24
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c3
-rw-r--r--drivers/gpu/drm/omapdrm/omap_irq.c17
-rw-r--r--drivers/gpu/drm/omapdrm/omap_plane.c6
-rw-r--r--drivers/gpu/drm/qxl/Kconfig10
-rw-r--r--drivers/gpu/drm/qxl/Makefile9
-rw-r--r--drivers/gpu/drm/qxl/qxl_cmd.c694
-rw-r--r--drivers/gpu/drm/qxl/qxl_debugfs.c141
-rw-r--r--drivers/gpu/drm/qxl/qxl_dev.h879
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c973
-rw-r--r--drivers/gpu/drm/qxl/qxl_draw.c390
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.c145
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h559
-rw-r--r--drivers/gpu/drm/qxl/qxl_dumb.c93
-rw-r--r--drivers/gpu/drm/qxl/qxl_fb.c567
-rw-r--r--drivers/gpu/drm/qxl/qxl_fence.c97
-rw-r--r--drivers/gpu/drm/qxl/qxl_gem.c149
-rw-r--r--drivers/gpu/drm/qxl/qxl_image.c176
-rw-r--r--drivers/gpu/drm/qxl/qxl_ioctl.c412
-rw-r--r--drivers/gpu/drm/qxl/qxl_irq.c97
-rw-r--r--drivers/gpu/drm/qxl/qxl_kms.c302
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.c365
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.h112
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c304
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c581
-rw-r--r--drivers/gpu/drm/radeon/Makefile2
-rw-r--r--drivers/gpu/drm/radeon/atom.c6
-rw-r--r--drivers/gpu/drm/radeon/atombios.h2
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c3
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c17
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c1187
-rw-r--r--drivers/gpu/drm/radeon/evergreen_hdmi.c169
-rw-r--r--drivers/gpu/drm/radeon/evergreen_reg.h2
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h48
-rw-r--r--drivers/gpu/drm/radeon/ni.c414
-rw-r--r--drivers/gpu/drm/radeon/nid.h21
-rw-r--r--drivers/gpu/drm/radeon/r100.c77
-rw-r--r--drivers/gpu/drm/radeon/r300_cmdbuf.c2
-rw-r--r--drivers/gpu/drm/radeon/r500_reg.h2
-rw-r--r--drivers/gpu/drm/radeon/r600.c404
-rw-r--r--drivers/gpu/drm/radeon/r600_audio.c64
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c150
-rw-r--r--drivers/gpu/drm/radeon/r600d.h72
-rw-r--r--drivers/gpu/drm/radeon/radeon.h94
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c100
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h28
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c132
-rw-r--r--drivers/gpu/drm/radeon/radeon_bios.c26
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c83
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c56
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c21
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c20
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c50
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c187
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h23
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c27
-rw-r--r--drivers/gpu/drm/radeon/radeon_sa.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c72
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c831
-rw-r--r--drivers/gpu/drm/radeon/rs600.c52
-rw-r--r--drivers/gpu/drm/radeon/rs690.c23
-rw-r--r--drivers/gpu/drm/radeon/rs690d.h3
-rw-r--r--drivers/gpu/drm/radeon/rv515.c56
-rw-r--r--drivers/gpu/drm/radeon/rv770.c909
-rw-r--r--drivers/gpu/drm/radeon/rv770d.h43
-rw-r--r--drivers/gpu/drm/radeon/si.c979
-rw-r--r--drivers/gpu/drm/radeon/sid.h40
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_crtc.c2
-rw-r--r--drivers/gpu/drm/tegra/Makefile7
-rw-r--r--drivers/gpu/drm/tegra/drm.c217
-rw-r--r--drivers/gpu/drm/tegra/fb.c52
-rw-r--r--drivers/gpu/drm/tegra/host1x.c327
-rw-r--r--drivers/gpu/drm/tilcdc/Kconfig3
-rw-r--r--drivers/gpu/drm/tilcdc/Makefile5
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.c9
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_panel.c5
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_slave.c1
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_tfp410.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c13
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c4
-rw-r--r--drivers/gpu/drm/udl/udl_connector.c4
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c2
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c4
-rw-r--r--drivers/gpu/host1x/Kconfig24
-rw-r--r--drivers/gpu/host1x/Makefile20
-rw-r--r--drivers/gpu/host1x/cdma.c491
-rw-r--r--drivers/gpu/host1x/cdma.h100
-rw-r--r--drivers/gpu/host1x/channel.c126
-rw-r--r--drivers/gpu/host1x/channel.h52
-rw-r--r--drivers/gpu/host1x/debug.c210
-rw-r--r--drivers/gpu/host1x/debug.h51
-rw-r--r--drivers/gpu/host1x/dev.c246
-rw-r--r--drivers/gpu/host1x/dev.h308
-rw-r--r--drivers/gpu/host1x/drm/Kconfig (renamed from drivers/gpu/drm/tegra/Kconfig)20
-rw-r--r--drivers/gpu/host1x/drm/dc.c (renamed from drivers/gpu/drm/tegra/dc.c)36
-rw-r--r--drivers/gpu/host1x/drm/dc.h (renamed from drivers/gpu/drm/tegra/dc.h)0
-rw-r--r--drivers/gpu/host1x/drm/drm.c640
-rw-r--r--drivers/gpu/host1x/drm/drm.h (renamed from drivers/gpu/drm/tegra/drm.h)68
-rw-r--r--drivers/gpu/host1x/drm/fb.c374
-rw-r--r--drivers/gpu/host1x/drm/gem.c270
-rw-r--r--drivers/gpu/host1x/drm/gem.h59
-rw-r--r--drivers/gpu/host1x/drm/gr2d.c339
-rw-r--r--drivers/gpu/host1x/drm/hdmi.c (renamed from drivers/gpu/drm/tegra/hdmi.c)5
-rw-r--r--drivers/gpu/host1x/drm/hdmi.h (renamed from drivers/gpu/drm/tegra/hdmi.h)0
-rw-r--r--drivers/gpu/host1x/drm/output.c (renamed from drivers/gpu/drm/tegra/output.c)0
-rw-r--r--drivers/gpu/host1x/drm/rgb.c (renamed from drivers/gpu/drm/tegra/rgb.c)0
-rw-r--r--drivers/gpu/host1x/host1x.h30
-rw-r--r--drivers/gpu/host1x/host1x_bo.h87
-rw-r--r--drivers/gpu/host1x/host1x_client.h35
-rw-r--r--drivers/gpu/host1x/hw/Makefile6
-rw-r--r--drivers/gpu/host1x/hw/cdma_hw.c326
-rw-r--r--drivers/gpu/host1x/hw/channel_hw.c168
-rw-r--r--drivers/gpu/host1x/hw/debug_hw.c322
-rw-r--r--drivers/gpu/host1x/hw/host1x01.c42
-rw-r--r--drivers/gpu/host1x/hw/host1x01.h25
-rw-r--r--drivers/gpu/host1x/hw/host1x01_hardware.h143
-rw-r--r--drivers/gpu/host1x/hw/hw_host1x01_channel.h120
-rw-r--r--drivers/gpu/host1x/hw/hw_host1x01_sync.h243
-rw-r--r--drivers/gpu/host1x/hw/hw_host1x01_uclass.h174
-rw-r--r--drivers/gpu/host1x/hw/intr_hw.c143
-rw-r--r--drivers/gpu/host1x/hw/syncpt_hw.c114
-rw-r--r--drivers/gpu/host1x/intr.c354
-rw-r--r--drivers/gpu/host1x/intr.h102
-rw-r--r--drivers/gpu/host1x/job.c603
-rw-r--r--drivers/gpu/host1x/job.h162
-rw-r--r--drivers/gpu/host1x/syncpt.c387
-rw-r--r--drivers/gpu/host1x/syncpt.h165
328 files changed, 23524 insertions, 3516 deletions
diff --git a/drivers/gpu/Makefile b/drivers/gpu/Makefile
index 30879df3daea..d8a22c2a579d 100644
--- a/drivers/gpu/Makefile
+++ b/drivers/gpu/Makefile
@@ -1 +1,2 @@
1obj-y += drm/ vga/ 1obj-y += drm/ vga/
2obj-$(CONFIG_TEGRA_HOST1X) += host1x/
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 1e82882da9de..b16c50ee769c 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -215,8 +215,8 @@ source "drivers/gpu/drm/cirrus/Kconfig"
215 215
216source "drivers/gpu/drm/shmobile/Kconfig" 216source "drivers/gpu/drm/shmobile/Kconfig"
217 217
218source "drivers/gpu/drm/tegra/Kconfig"
219
220source "drivers/gpu/drm/omapdrm/Kconfig" 218source "drivers/gpu/drm/omapdrm/Kconfig"
221 219
222source "drivers/gpu/drm/tilcdc/Kconfig" 220source "drivers/gpu/drm/tilcdc/Kconfig"
221
222source "drivers/gpu/drm/qxl/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 8f94018852a6..1ecbe5b7312d 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -50,7 +50,7 @@ obj-$(CONFIG_DRM_GMA500) += gma500/
50obj-$(CONFIG_DRM_UDL) += udl/ 50obj-$(CONFIG_DRM_UDL) += udl/
51obj-$(CONFIG_DRM_AST) += ast/ 51obj-$(CONFIG_DRM_AST) += ast/
52obj-$(CONFIG_DRM_SHMOBILE) +=shmobile/ 52obj-$(CONFIG_DRM_SHMOBILE) +=shmobile/
53obj-$(CONFIG_DRM_TEGRA) += tegra/
54obj-$(CONFIG_DRM_OMAP) += omapdrm/ 53obj-$(CONFIG_DRM_OMAP) += omapdrm/
55obj-$(CONFIG_DRM_TILCDC) += tilcdc/ 54obj-$(CONFIG_DRM_TILCDC) += tilcdc/
55obj-$(CONFIG_DRM_QXL) += qxl/
56obj-y += i2c/ 56obj-y += i2c/
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index 528429252f0f..02e52d543e4b 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -241,6 +241,8 @@ struct ast_fbdev {
241 void *sysram; 241 void *sysram;
242 int size; 242 int size;
243 struct ttm_bo_kmap_obj mapping; 243 struct ttm_bo_kmap_obj mapping;
244 int x1, y1, x2, y2; /* dirty rect */
245 spinlock_t dirty_lock;
244}; 246};
245 247
246#define to_ast_crtc(x) container_of(x, struct ast_crtc, base) 248#define to_ast_crtc(x) container_of(x, struct ast_crtc, base)
diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c
index 34931fe7d2c5..fbc0823cfa18 100644
--- a/drivers/gpu/drm/ast/ast_fb.c
+++ b/drivers/gpu/drm/ast/ast_fb.c
@@ -53,16 +53,52 @@ static void ast_dirty_update(struct ast_fbdev *afbdev,
53 int bpp = (afbdev->afb.base.bits_per_pixel + 7)/8; 53 int bpp = (afbdev->afb.base.bits_per_pixel + 7)/8;
54 int ret; 54 int ret;
55 bool unmap = false; 55 bool unmap = false;
56 bool store_for_later = false;
57 int x2, y2;
58 unsigned long flags;
56 59
57 obj = afbdev->afb.obj; 60 obj = afbdev->afb.obj;
58 bo = gem_to_ast_bo(obj); 61 bo = gem_to_ast_bo(obj);
59 62
63 /*
64 * try and reserve the BO, if we fail with busy
65 * then the BO is being moved and we should
66 * store up the damage until later.
67 */
60 ret = ast_bo_reserve(bo, true); 68 ret = ast_bo_reserve(bo, true);
61 if (ret) { 69 if (ret) {
62 DRM_ERROR("failed to reserve fb bo\n"); 70 if (ret != -EBUSY)
71 return;
72
73 store_for_later = true;
74 }
75
76 x2 = x + width - 1;
77 y2 = y + height - 1;
78 spin_lock_irqsave(&afbdev->dirty_lock, flags);
79
80 if (afbdev->y1 < y)
81 y = afbdev->y1;
82 if (afbdev->y2 > y2)
83 y2 = afbdev->y2;
84 if (afbdev->x1 < x)
85 x = afbdev->x1;
86 if (afbdev->x2 > x2)
87 x2 = afbdev->x2;
88
89 if (store_for_later) {
90 afbdev->x1 = x;
91 afbdev->x2 = x2;
92 afbdev->y1 = y;
93 afbdev->y2 = y2;
94 spin_unlock_irqrestore(&afbdev->dirty_lock, flags);
63 return; 95 return;
64 } 96 }
65 97
98 afbdev->x1 = afbdev->y1 = INT_MAX;
99 afbdev->x2 = afbdev->y2 = 0;
100 spin_unlock_irqrestore(&afbdev->dirty_lock, flags);
101
66 if (!bo->kmap.virtual) { 102 if (!bo->kmap.virtual) {
67 ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap); 103 ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
68 if (ret) { 104 if (ret) {
@@ -72,10 +108,10 @@ static void ast_dirty_update(struct ast_fbdev *afbdev,
72 } 108 }
73 unmap = true; 109 unmap = true;
74 } 110 }
75 for (i = y; i < y + height; i++) { 111 for (i = y; i <= y2; i++) {
76 /* assume equal stride for now */ 112 /* assume equal stride for now */
77 src_offset = dst_offset = i * afbdev->afb.base.pitches[0] + (x * bpp); 113 src_offset = dst_offset = i * afbdev->afb.base.pitches[0] + (x * bpp);
78 memcpy_toio(bo->kmap.virtual + src_offset, afbdev->sysram + src_offset, width * bpp); 114 memcpy_toio(bo->kmap.virtual + src_offset, afbdev->sysram + src_offset, (x2 - x + 1) * bpp);
79 115
80 } 116 }
81 if (unmap) 117 if (unmap)
@@ -292,6 +328,7 @@ int ast_fbdev_init(struct drm_device *dev)
292 328
293 ast->fbdev = afbdev; 329 ast->fbdev = afbdev;
294 afbdev->helper.funcs = &ast_fb_helper_funcs; 330 afbdev->helper.funcs = &ast_fb_helper_funcs;
331 spin_lock_init(&afbdev->dirty_lock);
295 ret = drm_fb_helper_init(dev, &afbdev->helper, 332 ret = drm_fb_helper_init(dev, &afbdev->helper,
296 1, 1); 333 1, 1);
297 if (ret) { 334 if (ret) {
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c
index 3602731a6112..09da3393c527 100644
--- a/drivers/gpu/drm/ast/ast_ttm.c
+++ b/drivers/gpu/drm/ast/ast_ttm.c
@@ -316,7 +316,7 @@ int ast_bo_reserve(struct ast_bo *bo, bool no_wait)
316 316
317 ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0); 317 ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0);
318 if (ret) { 318 if (ret) {
319 if (ret != -ERESTARTSYS) 319 if (ret != -ERESTARTSYS && ret != -EBUSY)
320 DRM_ERROR("reserve failed %p\n", bo); 320 DRM_ERROR("reserve failed %p\n", bo);
321 return ret; 321 return ret;
322 } 322 }
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h
index 6e0cc724e5a2..7ca059596887 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.h
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.h
@@ -154,6 +154,8 @@ struct cirrus_fbdev {
154 struct list_head fbdev_list; 154 struct list_head fbdev_list;
155 void *sysram; 155 void *sysram;
156 int size; 156 int size;
157 int x1, y1, x2, y2; /* dirty rect */
158 spinlock_t dirty_lock;
157}; 159};
158 160
159struct cirrus_bo { 161struct cirrus_bo {
diff --git a/drivers/gpu/drm/cirrus/cirrus_fbdev.c b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
index e25afccaf85b..3541b567bbd8 100644
--- a/drivers/gpu/drm/cirrus/cirrus_fbdev.c
+++ b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
@@ -27,16 +27,51 @@ static void cirrus_dirty_update(struct cirrus_fbdev *afbdev,
27 int bpp = (afbdev->gfb.base.bits_per_pixel + 7)/8; 27 int bpp = (afbdev->gfb.base.bits_per_pixel + 7)/8;
28 int ret; 28 int ret;
29 bool unmap = false; 29 bool unmap = false;
30 bool store_for_later = false;
31 int x2, y2;
32 unsigned long flags;
30 33
31 obj = afbdev->gfb.obj; 34 obj = afbdev->gfb.obj;
32 bo = gem_to_cirrus_bo(obj); 35 bo = gem_to_cirrus_bo(obj);
33 36
37 /*
38 * try and reserve the BO, if we fail with busy
39 * then the BO is being moved and we should
40 * store up the damage until later.
41 */
34 ret = cirrus_bo_reserve(bo, true); 42 ret = cirrus_bo_reserve(bo, true);
35 if (ret) { 43 if (ret) {
36 DRM_ERROR("failed to reserve fb bo\n"); 44 if (ret != -EBUSY)
45 return;
46 store_for_later = true;
47 }
48
49 x2 = x + width - 1;
50 y2 = y + height - 1;
51 spin_lock_irqsave(&afbdev->dirty_lock, flags);
52
53 if (afbdev->y1 < y)
54 y = afbdev->y1;
55 if (afbdev->y2 > y2)
56 y2 = afbdev->y2;
57 if (afbdev->x1 < x)
58 x = afbdev->x1;
59 if (afbdev->x2 > x2)
60 x2 = afbdev->x2;
61
62 if (store_for_later) {
63 afbdev->x1 = x;
64 afbdev->x2 = x2;
65 afbdev->y1 = y;
66 afbdev->y2 = y2;
67 spin_unlock_irqrestore(&afbdev->dirty_lock, flags);
37 return; 68 return;
38 } 69 }
39 70
71 afbdev->x1 = afbdev->y1 = INT_MAX;
72 afbdev->x2 = afbdev->y2 = 0;
73 spin_unlock_irqrestore(&afbdev->dirty_lock, flags);
74
40 if (!bo->kmap.virtual) { 75 if (!bo->kmap.virtual) {
41 ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap); 76 ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
42 if (ret) { 77 if (ret) {
@@ -268,6 +303,7 @@ int cirrus_fbdev_init(struct cirrus_device *cdev)
268 303
269 cdev->mode_info.gfbdev = gfbdev; 304 cdev->mode_info.gfbdev = gfbdev;
270 gfbdev->helper.funcs = &cirrus_fb_helper_funcs; 305 gfbdev->helper.funcs = &cirrus_fb_helper_funcs;
306 spin_lock_init(&gfbdev->dirty_lock);
271 307
272 ret = drm_fb_helper_init(cdev->dev, &gfbdev->helper, 308 ret = drm_fb_helper_init(cdev->dev, &gfbdev->helper,
273 cdev->num_crtc, CIRRUSFB_CONN_LIMIT); 309 cdev->num_crtc, CIRRUSFB_CONN_LIMIT);
diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c
index 1413a26e4905..2ed8cfc740c9 100644
--- a/drivers/gpu/drm/cirrus/cirrus_ttm.c
+++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c
@@ -321,7 +321,7 @@ int cirrus_bo_reserve(struct cirrus_bo *bo, bool no_wait)
321 321
322 ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0); 322 ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0);
323 if (ret) { 323 if (ret) {
324 if (ret != -ERESTARTSYS) 324 if (ret != -ERESTARTSYS && ret != -EBUSY)
325 DRM_ERROR("reserve failed %p\n", bo); 325 DRM_ERROR("reserve failed %p\n", bo);
326 return ret; 326 return ret;
327 } 327 }
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 792c3e3795ca..e7e92429d10f 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -78,6 +78,10 @@ void drm_warn_on_modeset_not_all_locked(struct drm_device *dev)
78{ 78{
79 struct drm_crtc *crtc; 79 struct drm_crtc *crtc;
80 80
81 /* Locking is currently fubar in the panic handler. */
82 if (oops_in_progress)
83 return;
84
81 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) 85 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
82 WARN_ON(!mutex_is_locked(&crtc->mutex)); 86 WARN_ON(!mutex_is_locked(&crtc->mutex));
83 87
@@ -178,9 +182,6 @@ static struct drm_prop_enum_list drm_dirty_info_enum_list[] = {
178 { DRM_MODE_DIRTY_ANNOTATE, "Annotate" }, 182 { DRM_MODE_DIRTY_ANNOTATE, "Annotate" },
179}; 183};
180 184
181DRM_ENUM_NAME_FN(drm_get_dirty_info_name,
182 drm_dirty_info_enum_list)
183
184struct drm_conn_prop_enum_list { 185struct drm_conn_prop_enum_list {
185 int type; 186 int type;
186 char *name; 187 char *name;
@@ -249,6 +250,7 @@ char *drm_get_connector_status_name(enum drm_connector_status status)
249 else 250 else
250 return "unknown"; 251 return "unknown";
251} 252}
253EXPORT_SYMBOL(drm_get_connector_status_name);
252 254
253/** 255/**
254 * drm_mode_object_get - allocate a new modeset identifier 256 * drm_mode_object_get - allocate a new modeset identifier
@@ -412,7 +414,7 @@ struct drm_framebuffer *drm_framebuffer_lookup(struct drm_device *dev,
412 mutex_lock(&dev->mode_config.fb_lock); 414 mutex_lock(&dev->mode_config.fb_lock);
413 fb = __drm_framebuffer_lookup(dev, id); 415 fb = __drm_framebuffer_lookup(dev, id);
414 if (fb) 416 if (fb)
415 kref_get(&fb->refcount); 417 drm_framebuffer_reference(fb);
416 mutex_unlock(&dev->mode_config.fb_lock); 418 mutex_unlock(&dev->mode_config.fb_lock);
417 419
418 return fb; 420 return fb;
@@ -706,7 +708,6 @@ int drm_connector_init(struct drm_device *dev,
706 connector->connector_type = connector_type; 708 connector->connector_type = connector_type;
707 connector->connector_type_id = 709 connector->connector_type_id =
708 ++drm_connector_enum_list[connector_type].count; /* TODO */ 710 ++drm_connector_enum_list[connector_type].count; /* TODO */
709 INIT_LIST_HEAD(&connector->user_modes);
710 INIT_LIST_HEAD(&connector->probed_modes); 711 INIT_LIST_HEAD(&connector->probed_modes);
711 INIT_LIST_HEAD(&connector->modes); 712 INIT_LIST_HEAD(&connector->modes);
712 connector->edid_blob_ptr = NULL; 713 connector->edid_blob_ptr = NULL;
@@ -747,9 +748,6 @@ void drm_connector_cleanup(struct drm_connector *connector)
747 list_for_each_entry_safe(mode, t, &connector->modes, head) 748 list_for_each_entry_safe(mode, t, &connector->modes, head)
748 drm_mode_remove(connector, mode); 749 drm_mode_remove(connector, mode);
749 750
750 list_for_each_entry_safe(mode, t, &connector->user_modes, head)
751 drm_mode_remove(connector, mode);
752
753 drm_mode_object_put(dev, &connector->base); 751 drm_mode_object_put(dev, &connector->base);
754 list_del(&connector->head); 752 list_del(&connector->head);
755 dev->mode_config.num_connector--; 753 dev->mode_config.num_connector--;
@@ -1120,45 +1118,7 @@ int drm_mode_create_dirty_info_property(struct drm_device *dev)
1120} 1118}
1121EXPORT_SYMBOL(drm_mode_create_dirty_info_property); 1119EXPORT_SYMBOL(drm_mode_create_dirty_info_property);
1122 1120
1123/** 1121static int drm_mode_group_init(struct drm_device *dev, struct drm_mode_group *group)
1124 * drm_mode_config_init - initialize DRM mode_configuration structure
1125 * @dev: DRM device
1126 *
1127 * Initialize @dev's mode_config structure, used for tracking the graphics
1128 * configuration of @dev.
1129 *
1130 * Since this initializes the modeset locks, no locking is possible. Which is no
1131 * problem, since this should happen single threaded at init time. It is the
1132 * driver's problem to ensure this guarantee.
1133 *
1134 */
1135void drm_mode_config_init(struct drm_device *dev)
1136{
1137 mutex_init(&dev->mode_config.mutex);
1138 mutex_init(&dev->mode_config.idr_mutex);
1139 mutex_init(&dev->mode_config.fb_lock);
1140 INIT_LIST_HEAD(&dev->mode_config.fb_list);
1141 INIT_LIST_HEAD(&dev->mode_config.crtc_list);
1142 INIT_LIST_HEAD(&dev->mode_config.connector_list);
1143 INIT_LIST_HEAD(&dev->mode_config.encoder_list);
1144 INIT_LIST_HEAD(&dev->mode_config.property_list);
1145 INIT_LIST_HEAD(&dev->mode_config.property_blob_list);
1146 INIT_LIST_HEAD(&dev->mode_config.plane_list);
1147 idr_init(&dev->mode_config.crtc_idr);
1148
1149 drm_modeset_lock_all(dev);
1150 drm_mode_create_standard_connector_properties(dev);
1151 drm_modeset_unlock_all(dev);
1152
1153 /* Just to be sure */
1154 dev->mode_config.num_fb = 0;
1155 dev->mode_config.num_connector = 0;
1156 dev->mode_config.num_crtc = 0;
1157 dev->mode_config.num_encoder = 0;
1158}
1159EXPORT_SYMBOL(drm_mode_config_init);
1160
1161int drm_mode_group_init(struct drm_device *dev, struct drm_mode_group *group)
1162{ 1122{
1163 uint32_t total_objects = 0; 1123 uint32_t total_objects = 0;
1164 1124
@@ -1203,69 +1163,6 @@ int drm_mode_group_init_legacy_group(struct drm_device *dev,
1203EXPORT_SYMBOL(drm_mode_group_init_legacy_group); 1163EXPORT_SYMBOL(drm_mode_group_init_legacy_group);
1204 1164
1205/** 1165/**
1206 * drm_mode_config_cleanup - free up DRM mode_config info
1207 * @dev: DRM device
1208 *
1209 * Free up all the connectors and CRTCs associated with this DRM device, then
1210 * free up the framebuffers and associated buffer objects.
1211 *
1212 * Note that since this /should/ happen single-threaded at driver/device
1213 * teardown time, no locking is required. It's the driver's job to ensure that
1214 * this guarantee actually holds true.
1215 *
1216 * FIXME: cleanup any dangling user buffer objects too
1217 */
1218void drm_mode_config_cleanup(struct drm_device *dev)
1219{
1220 struct drm_connector *connector, *ot;
1221 struct drm_crtc *crtc, *ct;
1222 struct drm_encoder *encoder, *enct;
1223 struct drm_framebuffer *fb, *fbt;
1224 struct drm_property *property, *pt;
1225 struct drm_plane *plane, *plt;
1226
1227 list_for_each_entry_safe(encoder, enct, &dev->mode_config.encoder_list,
1228 head) {
1229 encoder->funcs->destroy(encoder);
1230 }
1231
1232 list_for_each_entry_safe(connector, ot,
1233 &dev->mode_config.connector_list, head) {
1234 connector->funcs->destroy(connector);
1235 }
1236
1237 list_for_each_entry_safe(property, pt, &dev->mode_config.property_list,
1238 head) {
1239 drm_property_destroy(dev, property);
1240 }
1241
1242 /*
1243 * Single-threaded teardown context, so it's not required to grab the
1244 * fb_lock to protect against concurrent fb_list access. Contrary, it
1245 * would actually deadlock with the drm_framebuffer_cleanup function.
1246 *
1247 * Also, if there are any framebuffers left, that's a driver leak now,
1248 * so politely WARN about this.
1249 */
1250 WARN_ON(!list_empty(&dev->mode_config.fb_list));
1251 list_for_each_entry_safe(fb, fbt, &dev->mode_config.fb_list, head) {
1252 drm_framebuffer_remove(fb);
1253 }
1254
1255 list_for_each_entry_safe(plane, plt, &dev->mode_config.plane_list,
1256 head) {
1257 plane->funcs->destroy(plane);
1258 }
1259
1260 list_for_each_entry_safe(crtc, ct, &dev->mode_config.crtc_list, head) {
1261 crtc->funcs->destroy(crtc);
1262 }
1263
1264 idr_destroy(&dev->mode_config.crtc_idr);
1265}
1266EXPORT_SYMBOL(drm_mode_config_cleanup);
1267
1268/**
1269 * drm_crtc_convert_to_umode - convert a drm_display_mode into a modeinfo 1166 * drm_crtc_convert_to_umode - convert a drm_display_mode into a modeinfo
1270 * @out: drm_mode_modeinfo struct to return to the user 1167 * @out: drm_mode_modeinfo struct to return to the user
1271 * @in: drm_display_mode to use 1168 * @in: drm_display_mode to use
@@ -2326,7 +2223,6 @@ int drm_mode_addfb(struct drm_device *dev,
2326 fb = dev->mode_config.funcs->fb_create(dev, file_priv, &r); 2223 fb = dev->mode_config.funcs->fb_create(dev, file_priv, &r);
2327 if (IS_ERR(fb)) { 2224 if (IS_ERR(fb)) {
2328 DRM_DEBUG_KMS("could not create framebuffer\n"); 2225 DRM_DEBUG_KMS("could not create framebuffer\n");
2329 drm_modeset_unlock_all(dev);
2330 return PTR_ERR(fb); 2226 return PTR_ERR(fb);
2331 } 2227 }
2332 2228
@@ -2506,7 +2402,6 @@ int drm_mode_addfb2(struct drm_device *dev,
2506 fb = dev->mode_config.funcs->fb_create(dev, file_priv, r); 2402 fb = dev->mode_config.funcs->fb_create(dev, file_priv, r);
2507 if (IS_ERR(fb)) { 2403 if (IS_ERR(fb)) {
2508 DRM_DEBUG_KMS("could not create framebuffer\n"); 2404 DRM_DEBUG_KMS("could not create framebuffer\n");
2509 drm_modeset_unlock_all(dev);
2510 return PTR_ERR(fb); 2405 return PTR_ERR(fb);
2511 } 2406 }
2512 2407
@@ -2719,192 +2614,6 @@ void drm_fb_release(struct drm_file *priv)
2719 mutex_unlock(&priv->fbs_lock); 2614 mutex_unlock(&priv->fbs_lock);
2720} 2615}
2721 2616
2722/**
2723 * drm_mode_attachmode - add a mode to the user mode list
2724 * @dev: DRM device
2725 * @connector: connector to add the mode to
2726 * @mode: mode to add
2727 *
2728 * Add @mode to @connector's user mode list.
2729 */
2730static void drm_mode_attachmode(struct drm_device *dev,
2731 struct drm_connector *connector,
2732 struct drm_display_mode *mode)
2733{
2734 list_add_tail(&mode->head, &connector->user_modes);
2735}
2736
2737int drm_mode_attachmode_crtc(struct drm_device *dev, struct drm_crtc *crtc,
2738 const struct drm_display_mode *mode)
2739{
2740 struct drm_connector *connector;
2741 int ret = 0;
2742 struct drm_display_mode *dup_mode, *next;
2743 LIST_HEAD(list);
2744
2745 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2746 if (!connector->encoder)
2747 continue;
2748 if (connector->encoder->crtc == crtc) {
2749 dup_mode = drm_mode_duplicate(dev, mode);
2750 if (!dup_mode) {
2751 ret = -ENOMEM;
2752 goto out;
2753 }
2754 list_add_tail(&dup_mode->head, &list);
2755 }
2756 }
2757
2758 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2759 if (!connector->encoder)
2760 continue;
2761 if (connector->encoder->crtc == crtc)
2762 list_move_tail(list.next, &connector->user_modes);
2763 }
2764
2765 WARN_ON(!list_empty(&list));
2766
2767 out:
2768 list_for_each_entry_safe(dup_mode, next, &list, head)
2769 drm_mode_destroy(dev, dup_mode);
2770
2771 return ret;
2772}
2773EXPORT_SYMBOL(drm_mode_attachmode_crtc);
2774
2775static int drm_mode_detachmode(struct drm_device *dev,
2776 struct drm_connector *connector,
2777 struct drm_display_mode *mode)
2778{
2779 int found = 0;
2780 int ret = 0;
2781 struct drm_display_mode *match_mode, *t;
2782
2783 list_for_each_entry_safe(match_mode, t, &connector->user_modes, head) {
2784 if (drm_mode_equal(match_mode, mode)) {
2785 list_del(&match_mode->head);
2786 drm_mode_destroy(dev, match_mode);
2787 found = 1;
2788 break;
2789 }
2790 }
2791
2792 if (!found)
2793 ret = -EINVAL;
2794
2795 return ret;
2796}
2797
2798int drm_mode_detachmode_crtc(struct drm_device *dev, struct drm_display_mode *mode)
2799{
2800 struct drm_connector *connector;
2801
2802 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2803 drm_mode_detachmode(dev, connector, mode);
2804 }
2805 return 0;
2806}
2807EXPORT_SYMBOL(drm_mode_detachmode_crtc);
2808
2809/**
2810 * drm_fb_attachmode - Attach a user mode to an connector
2811 * @dev: drm device for the ioctl
2812 * @data: data pointer for the ioctl
2813 * @file_priv: drm file for the ioctl call
2814 *
2815 * This attaches a user specified mode to an connector.
2816 * Called by the user via ioctl.
2817 *
2818 * RETURNS:
2819 * Zero on success, errno on failure.
2820 */
2821int drm_mode_attachmode_ioctl(struct drm_device *dev,
2822 void *data, struct drm_file *file_priv)
2823{
2824 struct drm_mode_mode_cmd *mode_cmd = data;
2825 struct drm_connector *connector;
2826 struct drm_display_mode *mode;
2827 struct drm_mode_object *obj;
2828 struct drm_mode_modeinfo *umode = &mode_cmd->mode;
2829 int ret;
2830
2831 if (!drm_core_check_feature(dev, DRIVER_MODESET))
2832 return -EINVAL;
2833
2834 drm_modeset_lock_all(dev);
2835
2836 obj = drm_mode_object_find(dev, mode_cmd->connector_id, DRM_MODE_OBJECT_CONNECTOR);
2837 if (!obj) {
2838 ret = -EINVAL;
2839 goto out;
2840 }
2841 connector = obj_to_connector(obj);
2842
2843 mode = drm_mode_create(dev);
2844 if (!mode) {
2845 ret = -ENOMEM;
2846 goto out;
2847 }
2848
2849 ret = drm_crtc_convert_umode(mode, umode);
2850 if (ret) {
2851 DRM_DEBUG_KMS("Invalid mode\n");
2852 drm_mode_destroy(dev, mode);
2853 goto out;
2854 }
2855
2856 drm_mode_attachmode(dev, connector, mode);
2857out:
2858 drm_modeset_unlock_all(dev);
2859 return ret;
2860}
2861
2862
2863/**
2864 * drm_fb_detachmode - Detach a user specified mode from an connector
2865 * @dev: drm device for the ioctl
2866 * @data: data pointer for the ioctl
2867 * @file_priv: drm file for the ioctl call
2868 *
2869 * Called by the user via ioctl.
2870 *
2871 * RETURNS:
2872 * Zero on success, errno on failure.
2873 */
2874int drm_mode_detachmode_ioctl(struct drm_device *dev,
2875 void *data, struct drm_file *file_priv)
2876{
2877 struct drm_mode_object *obj;
2878 struct drm_mode_mode_cmd *mode_cmd = data;
2879 struct drm_connector *connector;
2880 struct drm_display_mode mode;
2881 struct drm_mode_modeinfo *umode = &mode_cmd->mode;
2882 int ret;
2883
2884 if (!drm_core_check_feature(dev, DRIVER_MODESET))
2885 return -EINVAL;
2886
2887 drm_modeset_lock_all(dev);
2888
2889 obj = drm_mode_object_find(dev, mode_cmd->connector_id, DRM_MODE_OBJECT_CONNECTOR);
2890 if (!obj) {
2891 ret = -EINVAL;
2892 goto out;
2893 }
2894 connector = obj_to_connector(obj);
2895
2896 ret = drm_crtc_convert_umode(&mode, umode);
2897 if (ret) {
2898 DRM_DEBUG_KMS("Invalid mode\n");
2899 goto out;
2900 }
2901
2902 ret = drm_mode_detachmode(dev, connector, &mode);
2903out:
2904 drm_modeset_unlock_all(dev);
2905 return ret;
2906}
2907
2908struct drm_property *drm_property_create(struct drm_device *dev, int flags, 2617struct drm_property *drm_property_create(struct drm_device *dev, int flags,
2909 const char *name, int num_values) 2618 const char *name, int num_values)
2910{ 2619{
@@ -3741,6 +3450,12 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
3741 goto out; 3450 goto out;
3742 } 3451 }
3743 3452
3453 if (crtc->fb->pixel_format != fb->pixel_format) {
3454 DRM_DEBUG_KMS("Page flip is not allowed to change frame buffer format.\n");
3455 ret = -EINVAL;
3456 goto out;
3457 }
3458
3744 if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) { 3459 if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) {
3745 ret = -ENOMEM; 3460 ret = -ENOMEM;
3746 spin_lock_irqsave(&dev->event_lock, flags); 3461 spin_lock_irqsave(&dev->event_lock, flags);
@@ -4066,3 +3781,110 @@ int drm_format_vert_chroma_subsampling(uint32_t format)
4066 } 3781 }
4067} 3782}
4068EXPORT_SYMBOL(drm_format_vert_chroma_subsampling); 3783EXPORT_SYMBOL(drm_format_vert_chroma_subsampling);
3784
3785/**
3786 * drm_mode_config_init - initialize DRM mode_configuration structure
3787 * @dev: DRM device
3788 *
3789 * Initialize @dev's mode_config structure, used for tracking the graphics
3790 * configuration of @dev.
3791 *
3792 * Since this initializes the modeset locks, no locking is possible. Which is no
3793 * problem, since this should happen single threaded at init time. It is the
3794 * driver's problem to ensure this guarantee.
3795 *
3796 */
3797void drm_mode_config_init(struct drm_device *dev)
3798{
3799 mutex_init(&dev->mode_config.mutex);
3800 mutex_init(&dev->mode_config.idr_mutex);
3801 mutex_init(&dev->mode_config.fb_lock);
3802 INIT_LIST_HEAD(&dev->mode_config.fb_list);
3803 INIT_LIST_HEAD(&dev->mode_config.crtc_list);
3804 INIT_LIST_HEAD(&dev->mode_config.connector_list);
3805 INIT_LIST_HEAD(&dev->mode_config.encoder_list);
3806 INIT_LIST_HEAD(&dev->mode_config.property_list);
3807 INIT_LIST_HEAD(&dev->mode_config.property_blob_list);
3808 INIT_LIST_HEAD(&dev->mode_config.plane_list);
3809 idr_init(&dev->mode_config.crtc_idr);
3810
3811 drm_modeset_lock_all(dev);
3812 drm_mode_create_standard_connector_properties(dev);
3813 drm_modeset_unlock_all(dev);
3814
3815 /* Just to be sure */
3816 dev->mode_config.num_fb = 0;
3817 dev->mode_config.num_connector = 0;
3818 dev->mode_config.num_crtc = 0;
3819 dev->mode_config.num_encoder = 0;
3820}
3821EXPORT_SYMBOL(drm_mode_config_init);
3822
3823/**
3824 * drm_mode_config_cleanup - free up DRM mode_config info
3825 * @dev: DRM device
3826 *
3827 * Free up all the connectors and CRTCs associated with this DRM device, then
3828 * free up the framebuffers and associated buffer objects.
3829 *
3830 * Note that since this /should/ happen single-threaded at driver/device
3831 * teardown time, no locking is required. It's the driver's job to ensure that
3832 * this guarantee actually holds true.
3833 *
3834 * FIXME: cleanup any dangling user buffer objects too
3835 */
3836void drm_mode_config_cleanup(struct drm_device *dev)
3837{
3838 struct drm_connector *connector, *ot;
3839 struct drm_crtc *crtc, *ct;
3840 struct drm_encoder *encoder, *enct;
3841 struct drm_framebuffer *fb, *fbt;
3842 struct drm_property *property, *pt;
3843 struct drm_property_blob *blob, *bt;
3844 struct drm_plane *plane, *plt;
3845
3846 list_for_each_entry_safe(encoder, enct, &dev->mode_config.encoder_list,
3847 head) {
3848 encoder->funcs->destroy(encoder);
3849 }
3850
3851 list_for_each_entry_safe(connector, ot,
3852 &dev->mode_config.connector_list, head) {
3853 connector->funcs->destroy(connector);
3854 }
3855
3856 list_for_each_entry_safe(property, pt, &dev->mode_config.property_list,
3857 head) {
3858 drm_property_destroy(dev, property);
3859 }
3860
3861 list_for_each_entry_safe(blob, bt, &dev->mode_config.property_blob_list,
3862 head) {
3863 drm_property_destroy_blob(dev, blob);
3864 }
3865
3866 /*
3867 * Single-threaded teardown context, so it's not required to grab the
3868 * fb_lock to protect against concurrent fb_list access. Contrary, it
3869 * would actually deadlock with the drm_framebuffer_cleanup function.
3870 *
3871 * Also, if there are any framebuffers left, that's a driver leak now,
3872 * so politely WARN about this.
3873 */
3874 WARN_ON(!list_empty(&dev->mode_config.fb_list));
3875 list_for_each_entry_safe(fb, fbt, &dev->mode_config.fb_list, head) {
3876 drm_framebuffer_remove(fb);
3877 }
3878
3879 list_for_each_entry_safe(plane, plt, &dev->mode_config.plane_list,
3880 head) {
3881 plane->funcs->destroy(plane);
3882 }
3883
3884 list_for_each_entry_safe(crtc, ct, &dev->mode_config.crtc_list, head) {
3885 crtc->funcs->destroy(crtc);
3886 }
3887
3888 idr_destroy(&dev->mode_config.crtc_idr);
3889}
3890EXPORT_SYMBOL(drm_mode_config_cleanup);
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 7b2d378b2576..ed1334e27c33 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -121,6 +121,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
121 connector->helper_private; 121 connector->helper_private;
122 int count = 0; 122 int count = 0;
123 int mode_flags = 0; 123 int mode_flags = 0;
124 bool verbose_prune = true;
124 125
125 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, 126 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id,
126 drm_get_connector_name(connector)); 127 drm_get_connector_name(connector));
@@ -149,6 +150,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
149 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n", 150 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n",
150 connector->base.id, drm_get_connector_name(connector)); 151 connector->base.id, drm_get_connector_name(connector));
151 drm_mode_connector_update_edid_property(connector, NULL); 152 drm_mode_connector_update_edid_property(connector, NULL);
153 verbose_prune = false;
152 goto prune; 154 goto prune;
153 } 155 }
154 156
@@ -182,7 +184,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
182 } 184 }
183 185
184prune: 186prune:
185 drm_mode_prune_invalid(dev, &connector->modes, true); 187 drm_mode_prune_invalid(dev, &connector->modes, verbose_prune);
186 188
187 if (list_empty(&connector->modes)) 189 if (list_empty(&connector->modes))
188 return 0; 190 return 0;
@@ -648,6 +650,9 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
648 } else if (set->fb->bits_per_pixel != 650 } else if (set->fb->bits_per_pixel !=
649 set->crtc->fb->bits_per_pixel) { 651 set->crtc->fb->bits_per_pixel) {
650 mode_changed = true; 652 mode_changed = true;
653 } else if (set->fb->pixel_format !=
654 set->crtc->fb->pixel_format) {
655 mode_changed = true;
651 } else 656 } else
652 fb_changed = true; 657 fb_changed = true;
653 } 658 }
@@ -1002,12 +1007,20 @@ static void output_poll_execute(struct work_struct *work)
1002 continue; 1007 continue;
1003 1008
1004 connector->status = connector->funcs->detect(connector, false); 1009 connector->status = connector->funcs->detect(connector, false);
1005 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n", 1010 if (old_status != connector->status) {
1006 connector->base.id, 1011 const char *old, *new;
1007 drm_get_connector_name(connector), 1012
1008 old_status, connector->status); 1013 old = drm_get_connector_status_name(old_status);
1009 if (old_status != connector->status) 1014 new = drm_get_connector_status_name(connector->status);
1015
1016 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] "
1017 "status updated from %s to %s\n",
1018 connector->base.id,
1019 drm_get_connector_name(connector),
1020 old, new);
1021
1010 changed = true; 1022 changed = true;
1023 }
1011 } 1024 }
1012 1025
1013 mutex_unlock(&dev->mode_config.mutex); 1026 mutex_unlock(&dev->mode_config.mutex);
@@ -1080,10 +1093,11 @@ void drm_helper_hpd_irq_event(struct drm_device *dev)
1080 old_status = connector->status; 1093 old_status = connector->status;
1081 1094
1082 connector->status = connector->funcs->detect(connector, false); 1095 connector->status = connector->funcs->detect(connector, false);
1083 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n", 1096 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
1084 connector->base.id, 1097 connector->base.id,
1085 drm_get_connector_name(connector), 1098 drm_get_connector_name(connector),
1086 old_status, connector->status); 1099 drm_get_connector_status_name(old_status),
1100 drm_get_connector_status_name(connector->status));
1087 if (old_status != connector->status) 1101 if (old_status != connector->status)
1088 changed = true; 1102 changed = true;
1089 } 1103 }
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 25f91cd23e60..9cc247f55502 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -57,10 +57,10 @@ static int drm_version(struct drm_device *dev, void *data,
57 struct drm_file *file_priv); 57 struct drm_file *file_priv);
58 58
59#define DRM_IOCTL_DEF(ioctl, _func, _flags) \ 59#define DRM_IOCTL_DEF(ioctl, _func, _flags) \
60 [DRM_IOCTL_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, .cmd_drv = 0} 60 [DRM_IOCTL_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, .cmd_drv = 0, .name = #ioctl}
61 61
62/** Ioctl table */ 62/** Ioctl table */
63static struct drm_ioctl_desc drm_ioctls[] = { 63static const struct drm_ioctl_desc drm_ioctls[] = {
64 DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_UNLOCKED), 64 DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_UNLOCKED),
65 DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0), 65 DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
66 DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0), 66 DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
@@ -150,8 +150,8 @@ static struct drm_ioctl_desc drm_ioctls[] = {
150 DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER|DRM_UNLOCKED), 150 DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER|DRM_UNLOCKED),
151 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 151 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
152 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 152 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
153 DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_mode_attachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 153 DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
154 DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_mode_detachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 154 DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
155 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 155 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
156 DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 156 DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
157 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 157 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
@@ -375,7 +375,7 @@ long drm_ioctl(struct file *filp,
375{ 375{
376 struct drm_file *file_priv = filp->private_data; 376 struct drm_file *file_priv = filp->private_data;
377 struct drm_device *dev; 377 struct drm_device *dev;
378 struct drm_ioctl_desc *ioctl; 378 const struct drm_ioctl_desc *ioctl = NULL;
379 drm_ioctl_t *func; 379 drm_ioctl_t *func;
380 unsigned int nr = DRM_IOCTL_NR(cmd); 380 unsigned int nr = DRM_IOCTL_NR(cmd);
381 int retcode = -EINVAL; 381 int retcode = -EINVAL;
@@ -392,11 +392,6 @@ long drm_ioctl(struct file *filp,
392 atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]); 392 atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
393 ++file_priv->ioctl_count; 393 ++file_priv->ioctl_count;
394 394
395 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
396 task_pid_nr(current), cmd, nr,
397 (long)old_encode_dev(file_priv->minor->device),
398 file_priv->authenticated);
399
400 if ((nr >= DRM_CORE_IOCTL_COUNT) && 395 if ((nr >= DRM_CORE_IOCTL_COUNT) &&
401 ((nr < DRM_COMMAND_BASE) || (nr >= DRM_COMMAND_END))) 396 ((nr < DRM_COMMAND_BASE) || (nr >= DRM_COMMAND_END)))
402 goto err_i1; 397 goto err_i1;
@@ -408,6 +403,7 @@ long drm_ioctl(struct file *filp,
408 usize = asize = _IOC_SIZE(cmd); 403 usize = asize = _IOC_SIZE(cmd);
409 if (drv_size > asize) 404 if (drv_size > asize)
410 asize = drv_size; 405 asize = drv_size;
406 cmd = ioctl->cmd_drv;
411 } 407 }
412 else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) { 408 else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) {
413 ioctl = &drm_ioctls[nr]; 409 ioctl = &drm_ioctls[nr];
@@ -416,6 +412,11 @@ long drm_ioctl(struct file *filp,
416 } else 412 } else
417 goto err_i1; 413 goto err_i1;
418 414
415 DRM_DEBUG("pid=%d, dev=0x%lx, auth=%d, %s\n",
416 task_pid_nr(current),
417 (long)old_encode_dev(file_priv->minor->device),
418 file_priv->authenticated, ioctl->name);
419
419 /* Do not trust userspace, use our own definition */ 420 /* Do not trust userspace, use our own definition */
420 func = ioctl->func; 421 func = ioctl->func;
421 /* is there a local override? */ 422 /* is there a local override? */
@@ -470,6 +471,12 @@ long drm_ioctl(struct file *filp,
470 } 471 }
471 472
472 err_i1: 473 err_i1:
474 if (!ioctl)
475 DRM_DEBUG("invalid iotcl: pid=%d, dev=0x%lx, auth=%d, cmd=0x%02x, nr=0x%02x\n",
476 task_pid_nr(current),
477 (long)old_encode_dev(file_priv->minor->device),
478 file_priv->authenticated, cmd, nr);
479
473 if (kdata != stack_kdata) 480 if (kdata != stack_kdata)
474 kfree(kdata); 481 kfree(kdata);
475 atomic_dec(&dev->ioctl_count); 482 atomic_dec(&dev->ioctl_count);
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index e2acfdbf7d3c..9e62bbedb5ad 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -587,284 +587,348 @@ static const struct drm_display_mode edid_cea_modes[] = {
587 /* 1 - 640x480@60Hz */ 587 /* 1 - 640x480@60Hz */
588 { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656, 588 { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
589 752, 800, 0, 480, 490, 492, 525, 0, 589 752, 800, 0, 480, 490, 492, 525, 0,
590 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 590 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
591 .vrefresh = 60, },
591 /* 2 - 720x480@60Hz */ 592 /* 2 - 720x480@60Hz */
592 { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736, 593 { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
593 798, 858, 0, 480, 489, 495, 525, 0, 594 798, 858, 0, 480, 489, 495, 525, 0,
594 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 595 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
596 .vrefresh = 60, },
595 /* 3 - 720x480@60Hz */ 597 /* 3 - 720x480@60Hz */
596 { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736, 598 { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
597 798, 858, 0, 480, 489, 495, 525, 0, 599 798, 858, 0, 480, 489, 495, 525, 0,
598 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 600 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
601 .vrefresh = 60, },
599 /* 4 - 1280x720@60Hz */ 602 /* 4 - 1280x720@60Hz */
600 { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390, 603 { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390,
601 1430, 1650, 0, 720, 725, 730, 750, 0, 604 1430, 1650, 0, 720, 725, 730, 750, 0,
602 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 605 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
606 .vrefresh = 60, },
603 /* 5 - 1920x1080i@60Hz */ 607 /* 5 - 1920x1080i@60Hz */
604 { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008, 608 { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
605 2052, 2200, 0, 1080, 1084, 1094, 1125, 0, 609 2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
606 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | 610 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
607 DRM_MODE_FLAG_INTERLACE) }, 611 DRM_MODE_FLAG_INTERLACE),
612 .vrefresh = 60, },
608 /* 6 - 1440x480i@60Hz */ 613 /* 6 - 1440x480i@60Hz */
609 { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478, 614 { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
610 1602, 1716, 0, 480, 488, 494, 525, 0, 615 1602, 1716, 0, 480, 488, 494, 525, 0,
611 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | 616 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
612 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) }, 617 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
618 .vrefresh = 60, },
613 /* 7 - 1440x480i@60Hz */ 619 /* 7 - 1440x480i@60Hz */
614 { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478, 620 { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
615 1602, 1716, 0, 480, 488, 494, 525, 0, 621 1602, 1716, 0, 480, 488, 494, 525, 0,
616 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | 622 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
617 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) }, 623 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
624 .vrefresh = 60, },
618 /* 8 - 1440x240@60Hz */ 625 /* 8 - 1440x240@60Hz */
619 { DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478, 626 { DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
620 1602, 1716, 0, 240, 244, 247, 262, 0, 627 1602, 1716, 0, 240, 244, 247, 262, 0,
621 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | 628 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
622 DRM_MODE_FLAG_DBLCLK) }, 629 DRM_MODE_FLAG_DBLCLK),
630 .vrefresh = 60, },
623 /* 9 - 1440x240@60Hz */ 631 /* 9 - 1440x240@60Hz */
624 { DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478, 632 { DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
625 1602, 1716, 0, 240, 244, 247, 262, 0, 633 1602, 1716, 0, 240, 244, 247, 262, 0,
626 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | 634 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
627 DRM_MODE_FLAG_DBLCLK) }, 635 DRM_MODE_FLAG_DBLCLK),
636 .vrefresh = 60, },
628 /* 10 - 2880x480i@60Hz */ 637 /* 10 - 2880x480i@60Hz */
629 { DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956, 638 { DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
630 3204, 3432, 0, 480, 488, 494, 525, 0, 639 3204, 3432, 0, 480, 488, 494, 525, 0,
631 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | 640 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
632 DRM_MODE_FLAG_INTERLACE) }, 641 DRM_MODE_FLAG_INTERLACE),
642 .vrefresh = 60, },
633 /* 11 - 2880x480i@60Hz */ 643 /* 11 - 2880x480i@60Hz */
634 { DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956, 644 { DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
635 3204, 3432, 0, 480, 488, 494, 525, 0, 645 3204, 3432, 0, 480, 488, 494, 525, 0,
636 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | 646 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
637 DRM_MODE_FLAG_INTERLACE) }, 647 DRM_MODE_FLAG_INTERLACE),
648 .vrefresh = 60, },
638 /* 12 - 2880x240@60Hz */ 649 /* 12 - 2880x240@60Hz */
639 { DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956, 650 { DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
640 3204, 3432, 0, 240, 244, 247, 262, 0, 651 3204, 3432, 0, 240, 244, 247, 262, 0,
641 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 652 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
653 .vrefresh = 60, },
642 /* 13 - 2880x240@60Hz */ 654 /* 13 - 2880x240@60Hz */
643 { DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956, 655 { DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
644 3204, 3432, 0, 240, 244, 247, 262, 0, 656 3204, 3432, 0, 240, 244, 247, 262, 0,
645 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 657 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
658 .vrefresh = 60, },
646 /* 14 - 1440x480@60Hz */ 659 /* 14 - 1440x480@60Hz */
647 { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472, 660 { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472,
648 1596, 1716, 0, 480, 489, 495, 525, 0, 661 1596, 1716, 0, 480, 489, 495, 525, 0,
649 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 662 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
663 .vrefresh = 60, },
650 /* 15 - 1440x480@60Hz */ 664 /* 15 - 1440x480@60Hz */
651 { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472, 665 { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472,
652 1596, 1716, 0, 480, 489, 495, 525, 0, 666 1596, 1716, 0, 480, 489, 495, 525, 0,
653 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 667 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
668 .vrefresh = 60, },
654 /* 16 - 1920x1080@60Hz */ 669 /* 16 - 1920x1080@60Hz */
655 { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008, 670 { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
656 2052, 2200, 0, 1080, 1084, 1089, 1125, 0, 671 2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
657 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 672 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
673 .vrefresh = 60, },
658 /* 17 - 720x576@50Hz */ 674 /* 17 - 720x576@50Hz */
659 { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732, 675 { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
660 796, 864, 0, 576, 581, 586, 625, 0, 676 796, 864, 0, 576, 581, 586, 625, 0,
661 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 677 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
678 .vrefresh = 50, },
662 /* 18 - 720x576@50Hz */ 679 /* 18 - 720x576@50Hz */
663 { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732, 680 { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
664 796, 864, 0, 576, 581, 586, 625, 0, 681 796, 864, 0, 576, 581, 586, 625, 0,
665 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 682 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
683 .vrefresh = 50, },
666 /* 19 - 1280x720@50Hz */ 684 /* 19 - 1280x720@50Hz */
667 { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1720, 685 { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1720,
668 1760, 1980, 0, 720, 725, 730, 750, 0, 686 1760, 1980, 0, 720, 725, 730, 750, 0,
669 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 687 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
688 .vrefresh = 50, },
670 /* 20 - 1920x1080i@50Hz */ 689 /* 20 - 1920x1080i@50Hz */
671 { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448, 690 { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
672 2492, 2640, 0, 1080, 1084, 1094, 1125, 0, 691 2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
673 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | 692 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
674 DRM_MODE_FLAG_INTERLACE) }, 693 DRM_MODE_FLAG_INTERLACE),
694 .vrefresh = 50, },
675 /* 21 - 1440x576i@50Hz */ 695 /* 21 - 1440x576i@50Hz */
676 { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464, 696 { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
677 1590, 1728, 0, 576, 580, 586, 625, 0, 697 1590, 1728, 0, 576, 580, 586, 625, 0,
678 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | 698 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
679 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) }, 699 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
700 .vrefresh = 50, },
680 /* 22 - 1440x576i@50Hz */ 701 /* 22 - 1440x576i@50Hz */
681 { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464, 702 { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
682 1590, 1728, 0, 576, 580, 586, 625, 0, 703 1590, 1728, 0, 576, 580, 586, 625, 0,
683 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | 704 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
684 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) }, 705 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
706 .vrefresh = 50, },
685 /* 23 - 1440x288@50Hz */ 707 /* 23 - 1440x288@50Hz */
686 { DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464, 708 { DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
687 1590, 1728, 0, 288, 290, 293, 312, 0, 709 1590, 1728, 0, 288, 290, 293, 312, 0,
688 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | 710 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
689 DRM_MODE_FLAG_DBLCLK) }, 711 DRM_MODE_FLAG_DBLCLK),
712 .vrefresh = 50, },
690 /* 24 - 1440x288@50Hz */ 713 /* 24 - 1440x288@50Hz */
691 { DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464, 714 { DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
692 1590, 1728, 0, 288, 290, 293, 312, 0, 715 1590, 1728, 0, 288, 290, 293, 312, 0,
693 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | 716 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
694 DRM_MODE_FLAG_DBLCLK) }, 717 DRM_MODE_FLAG_DBLCLK),
718 .vrefresh = 50, },
695 /* 25 - 2880x576i@50Hz */ 719 /* 25 - 2880x576i@50Hz */
696 { DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928, 720 { DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
697 3180, 3456, 0, 576, 580, 586, 625, 0, 721 3180, 3456, 0, 576, 580, 586, 625, 0,
698 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | 722 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
699 DRM_MODE_FLAG_INTERLACE) }, 723 DRM_MODE_FLAG_INTERLACE),
724 .vrefresh = 50, },
700 /* 26 - 2880x576i@50Hz */ 725 /* 26 - 2880x576i@50Hz */
701 { DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928, 726 { DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
702 3180, 3456, 0, 576, 580, 586, 625, 0, 727 3180, 3456, 0, 576, 580, 586, 625, 0,
703 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | 728 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
704 DRM_MODE_FLAG_INTERLACE) }, 729 DRM_MODE_FLAG_INTERLACE),
730 .vrefresh = 50, },
705 /* 27 - 2880x288@50Hz */ 731 /* 27 - 2880x288@50Hz */
706 { DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928, 732 { DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
707 3180, 3456, 0, 288, 290, 293, 312, 0, 733 3180, 3456, 0, 288, 290, 293, 312, 0,
708 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 734 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
735 .vrefresh = 50, },
709 /* 28 - 2880x288@50Hz */ 736 /* 28 - 2880x288@50Hz */
710 { DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928, 737 { DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
711 3180, 3456, 0, 288, 290, 293, 312, 0, 738 3180, 3456, 0, 288, 290, 293, 312, 0,
712 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 739 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
740 .vrefresh = 50, },
713 /* 29 - 1440x576@50Hz */ 741 /* 29 - 1440x576@50Hz */
714 { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464, 742 { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
715 1592, 1728, 0, 576, 581, 586, 625, 0, 743 1592, 1728, 0, 576, 581, 586, 625, 0,
716 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 744 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
745 .vrefresh = 50, },
717 /* 30 - 1440x576@50Hz */ 746 /* 30 - 1440x576@50Hz */
718 { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464, 747 { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
719 1592, 1728, 0, 576, 581, 586, 625, 0, 748 1592, 1728, 0, 576, 581, 586, 625, 0,
720 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 749 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
750 .vrefresh = 50, },
721 /* 31 - 1920x1080@50Hz */ 751 /* 31 - 1920x1080@50Hz */
722 { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448, 752 { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
723 2492, 2640, 0, 1080, 1084, 1089, 1125, 0, 753 2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
724 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 754 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
755 .vrefresh = 50, },
725 /* 32 - 1920x1080@24Hz */ 756 /* 32 - 1920x1080@24Hz */
726 { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2558, 757 { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2558,
727 2602, 2750, 0, 1080, 1084, 1089, 1125, 0, 758 2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
728 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 759 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
760 .vrefresh = 24, },
729 /* 33 - 1920x1080@25Hz */ 761 /* 33 - 1920x1080@25Hz */
730 { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448, 762 { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
731 2492, 2640, 0, 1080, 1084, 1089, 1125, 0, 763 2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
732 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 764 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
765 .vrefresh = 25, },
733 /* 34 - 1920x1080@30Hz */ 766 /* 34 - 1920x1080@30Hz */
734 { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008, 767 { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
735 2052, 2200, 0, 1080, 1084, 1089, 1125, 0, 768 2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
736 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 769 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
770 .vrefresh = 30, },
737 /* 35 - 2880x480@60Hz */ 771 /* 35 - 2880x480@60Hz */
738 { DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944, 772 { DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944,
739 3192, 3432, 0, 480, 489, 495, 525, 0, 773 3192, 3432, 0, 480, 489, 495, 525, 0,
740 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 774 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
775 .vrefresh = 60, },
741 /* 36 - 2880x480@60Hz */ 776 /* 36 - 2880x480@60Hz */
742 { DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944, 777 { DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944,
743 3192, 3432, 0, 480, 489, 495, 525, 0, 778 3192, 3432, 0, 480, 489, 495, 525, 0,
744 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 779 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
780 .vrefresh = 60, },
745 /* 37 - 2880x576@50Hz */ 781 /* 37 - 2880x576@50Hz */
746 { DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928, 782 { DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928,
747 3184, 3456, 0, 576, 581, 586, 625, 0, 783 3184, 3456, 0, 576, 581, 586, 625, 0,
748 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 784 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
785 .vrefresh = 50, },
749 /* 38 - 2880x576@50Hz */ 786 /* 38 - 2880x576@50Hz */
750 { DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928, 787 { DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928,
751 3184, 3456, 0, 576, 581, 586, 625, 0, 788 3184, 3456, 0, 576, 581, 586, 625, 0,
752 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 789 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
790 .vrefresh = 50, },
753 /* 39 - 1920x1080i@50Hz */ 791 /* 39 - 1920x1080i@50Hz */
754 { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 72000, 1920, 1952, 792 { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 72000, 1920, 1952,
755 2120, 2304, 0, 1080, 1126, 1136, 1250, 0, 793 2120, 2304, 0, 1080, 1126, 1136, 1250, 0,
756 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC | 794 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC |
757 DRM_MODE_FLAG_INTERLACE) }, 795 DRM_MODE_FLAG_INTERLACE),
796 .vrefresh = 50, },
758 /* 40 - 1920x1080i@100Hz */ 797 /* 40 - 1920x1080i@100Hz */
759 { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448, 798 { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
760 2492, 2640, 0, 1080, 1084, 1094, 1125, 0, 799 2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
761 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | 800 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
762 DRM_MODE_FLAG_INTERLACE) }, 801 DRM_MODE_FLAG_INTERLACE),
802 .vrefresh = 100, },
763 /* 41 - 1280x720@100Hz */ 803 /* 41 - 1280x720@100Hz */
764 { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1720, 804 { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1720,
765 1760, 1980, 0, 720, 725, 730, 750, 0, 805 1760, 1980, 0, 720, 725, 730, 750, 0,
766 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 806 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
807 .vrefresh = 100, },
767 /* 42 - 720x576@100Hz */ 808 /* 42 - 720x576@100Hz */
768 { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732, 809 { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
769 796, 864, 0, 576, 581, 586, 625, 0, 810 796, 864, 0, 576, 581, 586, 625, 0,
770 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 811 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
812 .vrefresh = 100, },
771 /* 43 - 720x576@100Hz */ 813 /* 43 - 720x576@100Hz */
772 { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732, 814 { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
773 796, 864, 0, 576, 581, 586, 625, 0, 815 796, 864, 0, 576, 581, 586, 625, 0,
774 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 816 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
817 .vrefresh = 100, },
775 /* 44 - 1440x576i@100Hz */ 818 /* 44 - 1440x576i@100Hz */
776 { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464, 819 { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
777 1590, 1728, 0, 576, 580, 586, 625, 0, 820 1590, 1728, 0, 576, 580, 586, 625, 0,
778 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | 821 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
779 DRM_MODE_FLAG_DBLCLK) }, 822 DRM_MODE_FLAG_DBLCLK),
823 .vrefresh = 100, },
780 /* 45 - 1440x576i@100Hz */ 824 /* 45 - 1440x576i@100Hz */
781 { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464, 825 { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
782 1590, 1728, 0, 576, 580, 586, 625, 0, 826 1590, 1728, 0, 576, 580, 586, 625, 0,
783 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | 827 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
784 DRM_MODE_FLAG_DBLCLK) }, 828 DRM_MODE_FLAG_DBLCLK),
829 .vrefresh = 100, },
785 /* 46 - 1920x1080i@120Hz */ 830 /* 46 - 1920x1080i@120Hz */
786 { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008, 831 { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
787 2052, 2200, 0, 1080, 1084, 1094, 1125, 0, 832 2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
788 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | 833 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
789 DRM_MODE_FLAG_INTERLACE) }, 834 DRM_MODE_FLAG_INTERLACE),
835 .vrefresh = 120, },
790 /* 47 - 1280x720@120Hz */ 836 /* 47 - 1280x720@120Hz */
791 { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1390, 837 { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1390,
792 1430, 1650, 0, 720, 725, 730, 750, 0, 838 1430, 1650, 0, 720, 725, 730, 750, 0,
793 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 839 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
840 .vrefresh = 120, },
794 /* 48 - 720x480@120Hz */ 841 /* 48 - 720x480@120Hz */
795 { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736, 842 { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736,
796 798, 858, 0, 480, 489, 495, 525, 0, 843 798, 858, 0, 480, 489, 495, 525, 0,
797 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 844 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
845 .vrefresh = 120, },
798 /* 49 - 720x480@120Hz */ 846 /* 49 - 720x480@120Hz */
799 { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736, 847 { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736,
800 798, 858, 0, 480, 489, 495, 525, 0, 848 798, 858, 0, 480, 489, 495, 525, 0,
801 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 849 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
850 .vrefresh = 120, },
802 /* 50 - 1440x480i@120Hz */ 851 /* 50 - 1440x480i@120Hz */
803 { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478, 852 { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
804 1602, 1716, 0, 480, 488, 494, 525, 0, 853 1602, 1716, 0, 480, 488, 494, 525, 0,
805 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | 854 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
806 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) }, 855 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
856 .vrefresh = 120, },
807 /* 51 - 1440x480i@120Hz */ 857 /* 51 - 1440x480i@120Hz */
808 { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478, 858 { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
809 1602, 1716, 0, 480, 488, 494, 525, 0, 859 1602, 1716, 0, 480, 488, 494, 525, 0,
810 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | 860 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
811 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) }, 861 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
862 .vrefresh = 120, },
812 /* 52 - 720x576@200Hz */ 863 /* 52 - 720x576@200Hz */
813 { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732, 864 { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732,
814 796, 864, 0, 576, 581, 586, 625, 0, 865 796, 864, 0, 576, 581, 586, 625, 0,
815 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 866 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
867 .vrefresh = 200, },
816 /* 53 - 720x576@200Hz */ 868 /* 53 - 720x576@200Hz */
817 { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732, 869 { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732,
818 796, 864, 0, 576, 581, 586, 625, 0, 870 796, 864, 0, 576, 581, 586, 625, 0,
819 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 871 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
872 .vrefresh = 200, },
820 /* 54 - 1440x576i@200Hz */ 873 /* 54 - 1440x576i@200Hz */
821 { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464, 874 { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
822 1590, 1728, 0, 576, 580, 586, 625, 0, 875 1590, 1728, 0, 576, 580, 586, 625, 0,
823 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | 876 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
824 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) }, 877 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
878 .vrefresh = 200, },
825 /* 55 - 1440x576i@200Hz */ 879 /* 55 - 1440x576i@200Hz */
826 { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464, 880 { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
827 1590, 1728, 0, 576, 580, 586, 625, 0, 881 1590, 1728, 0, 576, 580, 586, 625, 0,
828 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | 882 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
829 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) }, 883 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
884 .vrefresh = 200, },
830 /* 56 - 720x480@240Hz */ 885 /* 56 - 720x480@240Hz */
831 { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736, 886 { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736,
832 798, 858, 0, 480, 489, 495, 525, 0, 887 798, 858, 0, 480, 489, 495, 525, 0,
833 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 888 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
889 .vrefresh = 240, },
834 /* 57 - 720x480@240Hz */ 890 /* 57 - 720x480@240Hz */
835 { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736, 891 { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736,
836 798, 858, 0, 480, 489, 495, 525, 0, 892 798, 858, 0, 480, 489, 495, 525, 0,
837 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 893 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
894 .vrefresh = 240, },
838 /* 58 - 1440x480i@240 */ 895 /* 58 - 1440x480i@240 */
839 { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478, 896 { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
840 1602, 1716, 0, 480, 488, 494, 525, 0, 897 1602, 1716, 0, 480, 488, 494, 525, 0,
841 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | 898 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
842 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) }, 899 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
900 .vrefresh = 240, },
843 /* 59 - 1440x480i@240 */ 901 /* 59 - 1440x480i@240 */
844 { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478, 902 { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
845 1602, 1716, 0, 480, 488, 494, 525, 0, 903 1602, 1716, 0, 480, 488, 494, 525, 0,
846 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | 904 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
847 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) }, 905 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
906 .vrefresh = 240, },
848 /* 60 - 1280x720@24Hz */ 907 /* 60 - 1280x720@24Hz */
849 { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 59400, 1280, 3040, 908 { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 59400, 1280, 3040,
850 3080, 3300, 0, 720, 725, 730, 750, 0, 909 3080, 3300, 0, 720, 725, 730, 750, 0,
851 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 910 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
911 .vrefresh = 24, },
852 /* 61 - 1280x720@25Hz */ 912 /* 61 - 1280x720@25Hz */
853 { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3700, 913 { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3700,
854 3740, 3960, 0, 720, 725, 730, 750, 0, 914 3740, 3960, 0, 720, 725, 730, 750, 0,
855 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 915 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
916 .vrefresh = 25, },
856 /* 62 - 1280x720@30Hz */ 917 /* 62 - 1280x720@30Hz */
857 { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3040, 918 { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3040,
858 3080, 3300, 0, 720, 725, 730, 750, 0, 919 3080, 3300, 0, 720, 725, 730, 750, 0,
859 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 920 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
921 .vrefresh = 30, },
860 /* 63 - 1920x1080@120Hz */ 922 /* 63 - 1920x1080@120Hz */
861 { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2008, 923 { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2008,
862 2052, 2200, 0, 1080, 1084, 1089, 1125, 0, 924 2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
863 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 925 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
926 .vrefresh = 120, },
864 /* 64 - 1920x1080@100Hz */ 927 /* 64 - 1920x1080@100Hz */
865 { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2448, 928 { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2448,
866 2492, 2640, 0, 1080, 1084, 1094, 1125, 0, 929 2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
867 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 930 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
931 .vrefresh = 100, },
868}; 932};
869 933
870/*** DDC fetch and block validation ***/ 934/*** DDC fetch and block validation ***/
@@ -2266,13 +2330,34 @@ EXPORT_SYMBOL(drm_find_cea_extension);
2266 */ 2330 */
2267u8 drm_match_cea_mode(const struct drm_display_mode *to_match) 2331u8 drm_match_cea_mode(const struct drm_display_mode *to_match)
2268{ 2332{
2269 struct drm_display_mode *cea_mode;
2270 u8 mode; 2333 u8 mode;
2271 2334
2335 if (!to_match->clock)
2336 return 0;
2337
2272 for (mode = 0; mode < ARRAY_SIZE(edid_cea_modes); mode++) { 2338 for (mode = 0; mode < ARRAY_SIZE(edid_cea_modes); mode++) {
2273 cea_mode = (struct drm_display_mode *)&edid_cea_modes[mode]; 2339 const struct drm_display_mode *cea_mode = &edid_cea_modes[mode];
2340 unsigned int clock1, clock2;
2341
2342 clock1 = clock2 = cea_mode->clock;
2274 2343
2275 if (drm_mode_equal(to_match, cea_mode)) 2344 /* Check both 60Hz and 59.94Hz */
2345 if (cea_mode->vrefresh % 6 == 0) {
2346 /*
2347 * edid_cea_modes contains the 59.94Hz
2348 * variant for 240 and 480 line modes,
2349 * and the 60Hz variant otherwise.
2350 */
2351 if (cea_mode->vdisplay == 240 ||
2352 cea_mode->vdisplay == 480)
2353 clock1 = clock1 * 1001 / 1000;
2354 else
2355 clock2 = DIV_ROUND_UP(clock2 * 1000, 1001);
2356 }
2357
2358 if ((KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock1) ||
2359 KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock2)) &&
2360 drm_mode_equal_no_clocks(to_match, cea_mode))
2276 return mode + 1; 2361 return mode + 1;
2277 } 2362 }
2278 return 0; 2363 return 0;
@@ -2294,6 +2379,7 @@ do_cea_modes (struct drm_connector *connector, u8 *db, u8 len)
2294 newmode = drm_mode_duplicate(dev, 2379 newmode = drm_mode_duplicate(dev,
2295 &edid_cea_modes[cea_mode]); 2380 &edid_cea_modes[cea_mode]);
2296 if (newmode) { 2381 if (newmode) {
2382 newmode->vrefresh = 0;
2297 drm_mode_probed_add(connector, newmode); 2383 drm_mode_probed_add(connector, newmode);
2298 modes++; 2384 modes++;
2299 } 2385 }
@@ -2511,6 +2597,65 @@ void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid)
2511EXPORT_SYMBOL(drm_edid_to_eld); 2597EXPORT_SYMBOL(drm_edid_to_eld);
2512 2598
2513/** 2599/**
2600 * drm_edid_to_sad - extracts SADs from EDID
2601 * @edid: EDID to parse
2602 * @sads: pointer that will be set to the extracted SADs
2603 *
2604 * Looks for CEA EDID block and extracts SADs (Short Audio Descriptors) from it.
2605 * Note: returned pointer needs to be kfreed
2606 *
2607 * Return number of found SADs or negative number on error.
2608 */
2609int drm_edid_to_sad(struct edid *edid, struct cea_sad **sads)
2610{
2611 int count = 0;
2612 int i, start, end, dbl;
2613 u8 *cea;
2614
2615 cea = drm_find_cea_extension(edid);
2616 if (!cea) {
2617 DRM_DEBUG_KMS("SAD: no CEA Extension found\n");
2618 return -ENOENT;
2619 }
2620
2621 if (cea_revision(cea) < 3) {
2622 DRM_DEBUG_KMS("SAD: wrong CEA revision\n");
2623 return -ENOTSUPP;
2624 }
2625
2626 if (cea_db_offsets(cea, &start, &end)) {
2627 DRM_DEBUG_KMS("SAD: invalid data block offsets\n");
2628 return -EPROTO;
2629 }
2630
2631 for_each_cea_db(cea, i, start, end) {
2632 u8 *db = &cea[i];
2633
2634 if (cea_db_tag(db) == AUDIO_BLOCK) {
2635 int j;
2636 dbl = cea_db_payload_len(db);
2637
2638 count = dbl / 3; /* SAD is 3B */
2639 *sads = kcalloc(count, sizeof(**sads), GFP_KERNEL);
2640 if (!*sads)
2641 return -ENOMEM;
2642 for (j = 0; j < count; j++) {
2643 u8 *sad = &db[1 + j * 3];
2644
2645 (*sads)[j].format = (sad[0] & 0x78) >> 3;
2646 (*sads)[j].channels = sad[0] & 0x7;
2647 (*sads)[j].freq = sad[1] & 0x7F;
2648 (*sads)[j].byte2 = sad[2];
2649 }
2650 break;
2651 }
2652 }
2653
2654 return count;
2655}
2656EXPORT_SYMBOL(drm_edid_to_sad);
2657
2658/**
2514 * drm_av_sync_delay - HDMI/DP sink audio-video sync delay in millisecond 2659 * drm_av_sync_delay - HDMI/DP sink audio-video sync delay in millisecond
2515 * @connector: connector associated with the HDMI/DP sink 2660 * @connector: connector associated with the HDMI/DP sink
2516 * @mode: the display mode 2661 * @mode: the display mode
diff --git a/drivers/gpu/drm/drm_edid_load.c b/drivers/gpu/drm/drm_edid_load.c
index 38d3943f72de..fa445dd4dc00 100644
--- a/drivers/gpu/drm/drm_edid_load.c
+++ b/drivers/gpu/drm/drm_edid_load.c
@@ -31,10 +31,11 @@ module_param_string(edid_firmware, edid_firmware, sizeof(edid_firmware), 0644);
31MODULE_PARM_DESC(edid_firmware, "Do not probe monitor, use specified EDID blob " 31MODULE_PARM_DESC(edid_firmware, "Do not probe monitor, use specified EDID blob "
32 "from built-in data or /lib/firmware instead. "); 32 "from built-in data or /lib/firmware instead. ");
33 33
34#define GENERIC_EDIDS 4 34#define GENERIC_EDIDS 5
35static char *generic_edid_name[GENERIC_EDIDS] = { 35static char *generic_edid_name[GENERIC_EDIDS] = {
36 "edid/1024x768.bin", 36 "edid/1024x768.bin",
37 "edid/1280x1024.bin", 37 "edid/1280x1024.bin",
38 "edid/1600x1200.bin",
38 "edid/1680x1050.bin", 39 "edid/1680x1050.bin",
39 "edid/1920x1080.bin", 40 "edid/1920x1080.bin",
40}; 41};
@@ -79,6 +80,24 @@ static u8 generic_edid[GENERIC_EDIDS][128] = {
79 { 80 {
80 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 81 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
81 0x31, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 82 0x31, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
83 0x05, 0x16, 0x01, 0x03, 0x6d, 0x37, 0x29, 0x78,
84 0xea, 0x5e, 0xc0, 0xa4, 0x59, 0x4a, 0x98, 0x25,
85 0x20, 0x50, 0x54, 0x00, 0x00, 0x00, 0xa9, 0x40,
86 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
87 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x48, 0x3f,
88 0x40, 0x30, 0x62, 0xb0, 0x32, 0x40, 0x40, 0xc0,
89 0x13, 0x00, 0x2b, 0xa0, 0x21, 0x00, 0x00, 0x1e,
90 0x00, 0x00, 0x00, 0xff, 0x00, 0x4c, 0x69, 0x6e,
91 0x75, 0x78, 0x20, 0x23, 0x30, 0x0a, 0x20, 0x20,
92 0x20, 0x20, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x3b,
93 0x3d, 0x4a, 0x4c, 0x11, 0x00, 0x0a, 0x20, 0x20,
94 0x20, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xfc,
95 0x00, 0x4c, 0x69, 0x6e, 0x75, 0x78, 0x20, 0x55,
96 0x58, 0x47, 0x41, 0x0a, 0x20, 0x20, 0x00, 0x9d,
97 },
98 {
99 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
100 0x31, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
82 0x05, 0x16, 0x01, 0x03, 0x6d, 0x2b, 0x1b, 0x78, 101 0x05, 0x16, 0x01, 0x03, 0x6d, 0x2b, 0x1b, 0x78,
83 0xea, 0x5e, 0xc0, 0xa4, 0x59, 0x4a, 0x98, 0x25, 102 0xea, 0x5e, 0xc0, 0xa4, 0x59, 0x4a, 0x98, 0x25,
84 0x20, 0x50, 0x54, 0x00, 0x00, 0x00, 0xb3, 0x00, 103 0x20, 0x50, 0x54, 0x00, 0x00, 0x00, 0xb3, 0x00,
diff --git a/drivers/gpu/drm/drm_encoder_slave.c b/drivers/gpu/drm/drm_encoder_slave.c
index 48c52f7df4e6..0cfb60f54766 100644
--- a/drivers/gpu/drm/drm_encoder_slave.c
+++ b/drivers/gpu/drm/drm_encoder_slave.c
@@ -54,16 +54,12 @@ int drm_i2c_encoder_init(struct drm_device *dev,
54 struct i2c_adapter *adap, 54 struct i2c_adapter *adap,
55 const struct i2c_board_info *info) 55 const struct i2c_board_info *info)
56{ 56{
57 char modalias[sizeof(I2C_MODULE_PREFIX)
58 + I2C_NAME_SIZE];
59 struct module *module = NULL; 57 struct module *module = NULL;
60 struct i2c_client *client; 58 struct i2c_client *client;
61 struct drm_i2c_encoder_driver *encoder_drv; 59 struct drm_i2c_encoder_driver *encoder_drv;
62 int err = 0; 60 int err = 0;
63 61
64 snprintf(modalias, sizeof(modalias), 62 request_module("%s%s", I2C_MODULE_PREFIX, info->type);
65 "%s%s", I2C_MODULE_PREFIX, info->type);
66 request_module(modalias);
67 63
68 client = i2c_new_device(adap, info); 64 client = i2c_new_device(adap, info);
69 if (!client) { 65 if (!client) {
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 6764dce44e84..b78cbe74dadf 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -1551,10 +1551,10 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
1551 if (!fb_helper->fb) 1551 if (!fb_helper->fb)
1552 return 0; 1552 return 0;
1553 1553
1554 drm_modeset_lock_all(dev); 1554 mutex_lock(&fb_helper->dev->mode_config.mutex);
1555 if (!drm_fb_helper_is_bound(fb_helper)) { 1555 if (!drm_fb_helper_is_bound(fb_helper)) {
1556 fb_helper->delayed_hotplug = true; 1556 fb_helper->delayed_hotplug = true;
1557 drm_modeset_unlock_all(dev); 1557 mutex_unlock(&fb_helper->dev->mode_config.mutex);
1558 return 0; 1558 return 0;
1559 } 1559 }
1560 DRM_DEBUG_KMS("\n"); 1560 DRM_DEBUG_KMS("\n");
@@ -1565,9 +1565,11 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
1565 1565
1566 count = drm_fb_helper_probe_connector_modes(fb_helper, max_width, 1566 count = drm_fb_helper_probe_connector_modes(fb_helper, max_width,
1567 max_height); 1567 max_height);
1568 mutex_unlock(&fb_helper->dev->mode_config.mutex);
1569
1570 drm_modeset_lock_all(dev);
1568 drm_setup_crtcs(fb_helper); 1571 drm_setup_crtcs(fb_helper);
1569 drm_modeset_unlock_all(dev); 1572 drm_modeset_unlock_all(dev);
1570
1571 drm_fb_helper_set_par(fb_helper->fbdev); 1573 drm_fb_helper_set_par(fb_helper->fbdev);
1572 1574
1573 return 0; 1575 return 0;
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 13fdcd10a605..429e07d0b0f1 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -123,6 +123,7 @@ int drm_open(struct inode *inode, struct file *filp)
123 int retcode = 0; 123 int retcode = 0;
124 int need_setup = 0; 124 int need_setup = 0;
125 struct address_space *old_mapping; 125 struct address_space *old_mapping;
126 struct address_space *old_imapping;
126 127
127 minor = idr_find(&drm_minors_idr, minor_id); 128 minor = idr_find(&drm_minors_idr, minor_id);
128 if (!minor) 129 if (!minor)
@@ -137,6 +138,7 @@ int drm_open(struct inode *inode, struct file *filp)
137 if (!dev->open_count++) 138 if (!dev->open_count++)
138 need_setup = 1; 139 need_setup = 1;
139 mutex_lock(&dev->struct_mutex); 140 mutex_lock(&dev->struct_mutex);
141 old_imapping = inode->i_mapping;
140 old_mapping = dev->dev_mapping; 142 old_mapping = dev->dev_mapping;
141 if (old_mapping == NULL) 143 if (old_mapping == NULL)
142 dev->dev_mapping = &inode->i_data; 144 dev->dev_mapping = &inode->i_data;
@@ -159,8 +161,8 @@ int drm_open(struct inode *inode, struct file *filp)
159 161
160err_undo: 162err_undo:
161 mutex_lock(&dev->struct_mutex); 163 mutex_lock(&dev->struct_mutex);
162 filp->f_mapping = old_mapping; 164 filp->f_mapping = old_imapping;
163 inode->i_mapping = old_mapping; 165 inode->i_mapping = old_imapping;
164 iput(container_of(dev->dev_mapping, struct inode, i_data)); 166 iput(container_of(dev->dev_mapping, struct inode, i_data));
165 dev->dev_mapping = old_mapping; 167 dev->dev_mapping = old_mapping;
166 mutex_unlock(&dev->struct_mutex); 168 mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index af779ae19ebf..cf919e36e8ae 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -205,11 +205,11 @@ static void
205drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp) 205drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
206{ 206{
207 if (obj->import_attach) { 207 if (obj->import_attach) {
208 drm_prime_remove_imported_buf_handle(&filp->prime, 208 drm_prime_remove_buf_handle(&filp->prime,
209 obj->import_attach->dmabuf); 209 obj->import_attach->dmabuf);
210 } 210 }
211 if (obj->export_dma_buf) { 211 if (obj->export_dma_buf) {
212 drm_prime_remove_imported_buf_handle(&filp->prime, 212 drm_prime_remove_buf_handle(&filp->prime,
213 obj->export_dma_buf); 213 obj->export_dma_buf);
214 } 214 }
215} 215}
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index db1e2d6f90d7..07cf99cc8862 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -755,33 +755,35 @@ void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
755EXPORT_SYMBOL(drm_mm_debug_table); 755EXPORT_SYMBOL(drm_mm_debug_table);
756 756
757#if defined(CONFIG_DEBUG_FS) 757#if defined(CONFIG_DEBUG_FS)
758int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm) 758static unsigned long drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *entry)
759{ 759{
760 struct drm_mm_node *entry;
761 unsigned long total_used = 0, total_free = 0, total = 0;
762 unsigned long hole_start, hole_end, hole_size; 760 unsigned long hole_start, hole_end, hole_size;
763 761
764 hole_start = drm_mm_hole_node_start(&mm->head_node); 762 if (entry->hole_follows) {
765 hole_end = drm_mm_hole_node_end(&mm->head_node); 763 hole_start = drm_mm_hole_node_start(entry);
766 hole_size = hole_end - hole_start; 764 hole_end = drm_mm_hole_node_end(entry);
767 if (hole_size) 765 hole_size = hole_end - hole_start;
768 seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n", 766 seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
769 hole_start, hole_end, hole_size); 767 hole_start, hole_end, hole_size);
770 total_free += hole_size; 768 return hole_size;
769 }
770
771 return 0;
772}
773
774int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
775{
776 struct drm_mm_node *entry;
777 unsigned long total_used = 0, total_free = 0, total = 0;
778
779 total_free += drm_mm_dump_hole(m, &mm->head_node);
771 780
772 drm_mm_for_each_node(entry, mm) { 781 drm_mm_for_each_node(entry, mm) {
773 seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: used\n", 782 seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: used\n",
774 entry->start, entry->start + entry->size, 783 entry->start, entry->start + entry->size,
775 entry->size); 784 entry->size);
776 total_used += entry->size; 785 total_used += entry->size;
777 if (entry->hole_follows) { 786 total_free += drm_mm_dump_hole(m, entry);
778 hole_start = drm_mm_hole_node_start(entry);
779 hole_end = drm_mm_hole_node_end(entry);
780 hole_size = hole_end - hole_start;
781 seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
782 hole_start, hole_end, hole_size);
783 total_free += hole_size;
784 }
785 } 787 }
786 total = total_free + total_used; 788 total = total_free + total_used;
787 789
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 04fa6f1808d1..a371ff865a88 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -506,7 +506,7 @@ drm_gtf_mode(struct drm_device *dev, int hdisplay, int vdisplay, int vrefresh,
506} 506}
507EXPORT_SYMBOL(drm_gtf_mode); 507EXPORT_SYMBOL(drm_gtf_mode);
508 508
509#if IS_ENABLED(CONFIG_VIDEOMODE) 509#ifdef CONFIG_VIDEOMODE_HELPERS
510int drm_display_mode_from_videomode(const struct videomode *vm, 510int drm_display_mode_from_videomode(const struct videomode *vm,
511 struct drm_display_mode *dmode) 511 struct drm_display_mode *dmode)
512{ 512{
@@ -523,26 +523,25 @@ int drm_display_mode_from_videomode(const struct videomode *vm,
523 dmode->clock = vm->pixelclock / 1000; 523 dmode->clock = vm->pixelclock / 1000;
524 524
525 dmode->flags = 0; 525 dmode->flags = 0;
526 if (vm->dmt_flags & VESA_DMT_HSYNC_HIGH) 526 if (vm->flags & DISPLAY_FLAGS_HSYNC_HIGH)
527 dmode->flags |= DRM_MODE_FLAG_PHSYNC; 527 dmode->flags |= DRM_MODE_FLAG_PHSYNC;
528 else if (vm->dmt_flags & VESA_DMT_HSYNC_LOW) 528 else if (vm->flags & DISPLAY_FLAGS_HSYNC_LOW)
529 dmode->flags |= DRM_MODE_FLAG_NHSYNC; 529 dmode->flags |= DRM_MODE_FLAG_NHSYNC;
530 if (vm->dmt_flags & VESA_DMT_VSYNC_HIGH) 530 if (vm->flags & DISPLAY_FLAGS_VSYNC_HIGH)
531 dmode->flags |= DRM_MODE_FLAG_PVSYNC; 531 dmode->flags |= DRM_MODE_FLAG_PVSYNC;
532 else if (vm->dmt_flags & VESA_DMT_VSYNC_LOW) 532 else if (vm->flags & DISPLAY_FLAGS_VSYNC_LOW)
533 dmode->flags |= DRM_MODE_FLAG_NVSYNC; 533 dmode->flags |= DRM_MODE_FLAG_NVSYNC;
534 if (vm->data_flags & DISPLAY_FLAGS_INTERLACED) 534 if (vm->flags & DISPLAY_FLAGS_INTERLACED)
535 dmode->flags |= DRM_MODE_FLAG_INTERLACE; 535 dmode->flags |= DRM_MODE_FLAG_INTERLACE;
536 if (vm->data_flags & DISPLAY_FLAGS_DOUBLESCAN) 536 if (vm->flags & DISPLAY_FLAGS_DOUBLESCAN)
537 dmode->flags |= DRM_MODE_FLAG_DBLSCAN; 537 dmode->flags |= DRM_MODE_FLAG_DBLSCAN;
538 drm_mode_set_name(dmode); 538 drm_mode_set_name(dmode);
539 539
540 return 0; 540 return 0;
541} 541}
542EXPORT_SYMBOL_GPL(drm_display_mode_from_videomode); 542EXPORT_SYMBOL_GPL(drm_display_mode_from_videomode);
543#endif
544 543
545#if IS_ENABLED(CONFIG_OF_VIDEOMODE) 544#ifdef CONFIG_OF
546/** 545/**
547 * of_get_drm_display_mode - get a drm_display_mode from devicetree 546 * of_get_drm_display_mode - get a drm_display_mode from devicetree
548 * @np: device_node with the timing specification 547 * @np: device_node with the timing specification
@@ -572,7 +571,8 @@ int of_get_drm_display_mode(struct device_node *np,
572 return 0; 571 return 0;
573} 572}
574EXPORT_SYMBOL_GPL(of_get_drm_display_mode); 573EXPORT_SYMBOL_GPL(of_get_drm_display_mode);
575#endif 574#endif /* CONFIG_OF */
575#endif /* CONFIG_VIDEOMODE_HELPERS */
576 576
577/** 577/**
578 * drm_mode_set_name - set the name on a mode 578 * drm_mode_set_name - set the name on a mode
@@ -848,6 +848,26 @@ bool drm_mode_equal(const struct drm_display_mode *mode1, const struct drm_displ
848 } else if (mode1->clock != mode2->clock) 848 } else if (mode1->clock != mode2->clock)
849 return false; 849 return false;
850 850
851 return drm_mode_equal_no_clocks(mode1, mode2);
852}
853EXPORT_SYMBOL(drm_mode_equal);
854
855/**
856 * drm_mode_equal_no_clocks - test modes for equality
857 * @mode1: first mode
858 * @mode2: second mode
859 *
860 * LOCKING:
861 * None.
862 *
863 * Check to see if @mode1 and @mode2 are equivalent, but
864 * don't check the pixel clocks.
865 *
866 * RETURNS:
867 * True if the modes are equal, false otherwise.
868 */
869bool drm_mode_equal_no_clocks(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2)
870{
851 if (mode1->hdisplay == mode2->hdisplay && 871 if (mode1->hdisplay == mode2->hdisplay &&
852 mode1->hsync_start == mode2->hsync_start && 872 mode1->hsync_start == mode2->hsync_start &&
853 mode1->hsync_end == mode2->hsync_end && 873 mode1->hsync_end == mode2->hsync_end &&
@@ -863,7 +883,7 @@ bool drm_mode_equal(const struct drm_display_mode *mode1, const struct drm_displ
863 883
864 return false; 884 return false;
865} 885}
866EXPORT_SYMBOL(drm_mode_equal); 886EXPORT_SYMBOL(drm_mode_equal_no_clocks);
867 887
868/** 888/**
869 * drm_mode_validate_size - make sure modes adhere to size constraints 889 * drm_mode_validate_size - make sure modes adhere to size constraints
@@ -1123,6 +1143,7 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option,
1123 was_digit = false; 1143 was_digit = false;
1124 } else 1144 } else
1125 goto done; 1145 goto done;
1146 break;
1126 case '0' ... '9': 1147 case '0' ... '9':
1127 was_digit = true; 1148 was_digit = true;
1128 break; 1149 break;
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index bd719e936e13..14194b6ef644 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -152,7 +152,7 @@ static const char *drm_pci_get_name(struct drm_device *dev)
152 return pdriver->name; 152 return pdriver->name;
153} 153}
154 154
155int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master) 155static int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master)
156{ 156{
157 int len, ret; 157 int len, ret;
158 struct pci_driver *pdriver = dev->driver->kdriver.pci; 158 struct pci_driver *pdriver = dev->driver->kdriver.pci;
@@ -194,9 +194,9 @@ err:
194 return ret; 194 return ret;
195} 195}
196 196
197int drm_pci_set_unique(struct drm_device *dev, 197static int drm_pci_set_unique(struct drm_device *dev,
198 struct drm_master *master, 198 struct drm_master *master,
199 struct drm_unique *u) 199 struct drm_unique *u)
200{ 200{
201 int domain, bus, slot, func, ret; 201 int domain, bus, slot, func, ret;
202 const char *bus_name; 202 const char *bus_name;
@@ -266,7 +266,7 @@ static int drm_pci_irq_by_busid(struct drm_device *dev, struct drm_irq_busid *p)
266 return 0; 266 return 0;
267} 267}
268 268
269int drm_pci_agp_init(struct drm_device *dev) 269static int drm_pci_agp_init(struct drm_device *dev)
270{ 270{
271 if (drm_core_has_AGP(dev)) { 271 if (drm_core_has_AGP(dev)) {
272 if (drm_pci_device_is_agp(dev)) 272 if (drm_pci_device_is_agp(dev))
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 25d02187067e..dcde35231e25 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -62,6 +62,7 @@ struct drm_prime_member {
62 struct dma_buf *dma_buf; 62 struct dma_buf *dma_buf;
63 uint32_t handle; 63 uint32_t handle;
64}; 64};
65static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle);
65 66
66static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach, 67static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
67 enum dma_data_direction dir) 68 enum dma_data_direction dir)
@@ -200,7 +201,8 @@ int drm_gem_prime_handle_to_fd(struct drm_device *dev,
200{ 201{
201 struct drm_gem_object *obj; 202 struct drm_gem_object *obj;
202 void *buf; 203 void *buf;
203 int ret; 204 int ret = 0;
205 struct dma_buf *dmabuf;
204 206
205 obj = drm_gem_object_lookup(dev, file_priv, handle); 207 obj = drm_gem_object_lookup(dev, file_priv, handle);
206 if (!obj) 208 if (!obj)
@@ -209,43 +211,44 @@ int drm_gem_prime_handle_to_fd(struct drm_device *dev,
209 mutex_lock(&file_priv->prime.lock); 211 mutex_lock(&file_priv->prime.lock);
210 /* re-export the original imported object */ 212 /* re-export the original imported object */
211 if (obj->import_attach) { 213 if (obj->import_attach) {
212 get_dma_buf(obj->import_attach->dmabuf); 214 dmabuf = obj->import_attach->dmabuf;
213 *prime_fd = dma_buf_fd(obj->import_attach->dmabuf, flags); 215 goto out_have_obj;
214 drm_gem_object_unreference_unlocked(obj);
215 mutex_unlock(&file_priv->prime.lock);
216 return 0;
217 } 216 }
218 217
219 if (obj->export_dma_buf) { 218 if (obj->export_dma_buf) {
220 get_dma_buf(obj->export_dma_buf); 219 dmabuf = obj->export_dma_buf;
221 *prime_fd = dma_buf_fd(obj->export_dma_buf, flags); 220 goto out_have_obj;
222 drm_gem_object_unreference_unlocked(obj); 221 }
223 } else { 222
224 buf = dev->driver->gem_prime_export(dev, obj, flags); 223 buf = dev->driver->gem_prime_export(dev, obj, flags);
225 if (IS_ERR(buf)) { 224 if (IS_ERR(buf)) {
226 /* normally the created dma-buf takes ownership of the ref, 225 /* normally the created dma-buf takes ownership of the ref,
227 * but if that fails then drop the ref 226 * but if that fails then drop the ref
228 */ 227 */
229 drm_gem_object_unreference_unlocked(obj); 228 ret = PTR_ERR(buf);
230 mutex_unlock(&file_priv->prime.lock); 229 goto out;
231 return PTR_ERR(buf);
232 }
233 obj->export_dma_buf = buf;
234 *prime_fd = dma_buf_fd(buf, flags);
235 } 230 }
231 obj->export_dma_buf = buf;
232
236 /* if we've exported this buffer the cheat and add it to the import list 233 /* if we've exported this buffer the cheat and add it to the import list
237 * so we get the correct handle back 234 * so we get the correct handle back
238 */ 235 */
239 ret = drm_prime_add_imported_buf_handle(&file_priv->prime, 236 ret = drm_prime_add_buf_handle(&file_priv->prime,
240 obj->export_dma_buf, handle); 237 obj->export_dma_buf, handle);
241 if (ret) { 238 if (ret)
242 drm_gem_object_unreference_unlocked(obj); 239 goto out;
243 mutex_unlock(&file_priv->prime.lock);
244 return ret;
245 }
246 240
241 *prime_fd = dma_buf_fd(buf, flags);
247 mutex_unlock(&file_priv->prime.lock); 242 mutex_unlock(&file_priv->prime.lock);
248 return 0; 243 return 0;
244
245out_have_obj:
246 get_dma_buf(dmabuf);
247 *prime_fd = dma_buf_fd(dmabuf, flags);
248out:
249 drm_gem_object_unreference_unlocked(obj);
250 mutex_unlock(&file_priv->prime.lock);
251 return ret;
249} 252}
250EXPORT_SYMBOL(drm_gem_prime_handle_to_fd); 253EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
251 254
@@ -268,7 +271,6 @@ struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
268 * refcount on gem itself instead of f_count of dmabuf. 271 * refcount on gem itself instead of f_count of dmabuf.
269 */ 272 */
270 drm_gem_object_reference(obj); 273 drm_gem_object_reference(obj);
271 dma_buf_put(dma_buf);
272 return obj; 274 return obj;
273 } 275 }
274 } 276 }
@@ -277,6 +279,8 @@ struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
277 if (IS_ERR(attach)) 279 if (IS_ERR(attach))
278 return ERR_PTR(PTR_ERR(attach)); 280 return ERR_PTR(PTR_ERR(attach));
279 281
282 get_dma_buf(dma_buf);
283
280 sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); 284 sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
281 if (IS_ERR_OR_NULL(sgt)) { 285 if (IS_ERR_OR_NULL(sgt)) {
282 ret = PTR_ERR(sgt); 286 ret = PTR_ERR(sgt);
@@ -297,6 +301,8 @@ fail_unmap:
297 dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL); 301 dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
298fail_detach: 302fail_detach:
299 dma_buf_detach(dma_buf, attach); 303 dma_buf_detach(dma_buf, attach);
304 dma_buf_put(dma_buf);
305
300 return ERR_PTR(ret); 306 return ERR_PTR(ret);
301} 307}
302EXPORT_SYMBOL(drm_gem_prime_import); 308EXPORT_SYMBOL(drm_gem_prime_import);
@@ -314,7 +320,7 @@ int drm_gem_prime_fd_to_handle(struct drm_device *dev,
314 320
315 mutex_lock(&file_priv->prime.lock); 321 mutex_lock(&file_priv->prime.lock);
316 322
317 ret = drm_prime_lookup_imported_buf_handle(&file_priv->prime, 323 ret = drm_prime_lookup_buf_handle(&file_priv->prime,
318 dma_buf, handle); 324 dma_buf, handle);
319 if (!ret) { 325 if (!ret) {
320 ret = 0; 326 ret = 0;
@@ -333,12 +339,15 @@ int drm_gem_prime_fd_to_handle(struct drm_device *dev,
333 if (ret) 339 if (ret)
334 goto out_put; 340 goto out_put;
335 341
336 ret = drm_prime_add_imported_buf_handle(&file_priv->prime, 342 ret = drm_prime_add_buf_handle(&file_priv->prime,
337 dma_buf, *handle); 343 dma_buf, *handle);
338 if (ret) 344 if (ret)
339 goto fail; 345 goto fail;
340 346
341 mutex_unlock(&file_priv->prime.lock); 347 mutex_unlock(&file_priv->prime.lock);
348
349 dma_buf_put(dma_buf);
350
342 return 0; 351 return 0;
343 352
344fail: 353fail:
@@ -479,15 +488,12 @@ EXPORT_SYMBOL(drm_prime_init_file_private);
479 488
480void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv) 489void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv)
481{ 490{
482 struct drm_prime_member *member, *safe; 491 /* by now drm_gem_release should've made sure the list is empty */
483 list_for_each_entry_safe(member, safe, &prime_fpriv->head, entry) { 492 WARN_ON(!list_empty(&prime_fpriv->head));
484 list_del(&member->entry);
485 kfree(member);
486 }
487} 493}
488EXPORT_SYMBOL(drm_prime_destroy_file_private); 494EXPORT_SYMBOL(drm_prime_destroy_file_private);
489 495
490int drm_prime_add_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle) 496static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle)
491{ 497{
492 struct drm_prime_member *member; 498 struct drm_prime_member *member;
493 499
@@ -495,14 +501,14 @@ int drm_prime_add_imported_buf_handle(struct drm_prime_file_private *prime_fpriv
495 if (!member) 501 if (!member)
496 return -ENOMEM; 502 return -ENOMEM;
497 503
504 get_dma_buf(dma_buf);
498 member->dma_buf = dma_buf; 505 member->dma_buf = dma_buf;
499 member->handle = handle; 506 member->handle = handle;
500 list_add(&member->entry, &prime_fpriv->head); 507 list_add(&member->entry, &prime_fpriv->head);
501 return 0; 508 return 0;
502} 509}
503EXPORT_SYMBOL(drm_prime_add_imported_buf_handle);
504 510
505int drm_prime_lookup_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t *handle) 511int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t *handle)
506{ 512{
507 struct drm_prime_member *member; 513 struct drm_prime_member *member;
508 514
@@ -514,19 +520,20 @@ int drm_prime_lookup_imported_buf_handle(struct drm_prime_file_private *prime_fp
514 } 520 }
515 return -ENOENT; 521 return -ENOENT;
516} 522}
517EXPORT_SYMBOL(drm_prime_lookup_imported_buf_handle); 523EXPORT_SYMBOL(drm_prime_lookup_buf_handle);
518 524
519void drm_prime_remove_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf) 525void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf)
520{ 526{
521 struct drm_prime_member *member, *safe; 527 struct drm_prime_member *member, *safe;
522 528
523 mutex_lock(&prime_fpriv->lock); 529 mutex_lock(&prime_fpriv->lock);
524 list_for_each_entry_safe(member, safe, &prime_fpriv->head, entry) { 530 list_for_each_entry_safe(member, safe, &prime_fpriv->head, entry) {
525 if (member->dma_buf == dma_buf) { 531 if (member->dma_buf == dma_buf) {
532 dma_buf_put(dma_buf);
526 list_del(&member->entry); 533 list_del(&member->entry);
527 kfree(member); 534 kfree(member);
528 } 535 }
529 } 536 }
530 mutex_unlock(&prime_fpriv->lock); 537 mutex_unlock(&prime_fpriv->lock);
531} 538}
532EXPORT_SYMBOL(drm_prime_remove_imported_buf_handle); 539EXPORT_SYMBOL(drm_prime_remove_buf_handle);
diff --git a/drivers/gpu/drm/drm_proc.c b/drivers/gpu/drm/drm_proc.c
index ff5456b7df72..d7f2324b4fb1 100644
--- a/drivers/gpu/drm/drm_proc.c
+++ b/drivers/gpu/drm/drm_proc.c
@@ -49,7 +49,7 @@
49/** 49/**
50 * Proc file list. 50 * Proc file list.
51 */ 51 */
52static struct drm_info_list drm_proc_list[] = { 52static const struct drm_info_list drm_proc_list[] = {
53 {"name", drm_name_info, 0}, 53 {"name", drm_name_info, 0},
54 {"vm", drm_vm_info, 0}, 54 {"vm", drm_vm_info, 0},
55 {"clients", drm_clients_info, 0}, 55 {"clients", drm_clients_info, 0},
@@ -63,7 +63,7 @@ static struct drm_info_list drm_proc_list[] = {
63 63
64static int drm_proc_open(struct inode *inode, struct file *file) 64static int drm_proc_open(struct inode *inode, struct file *file)
65{ 65{
66 struct drm_info_node* node = PDE(inode)->data; 66 struct drm_info_node* node = PDE_DATA(inode);
67 67
68 return single_open(file, node->info_ent->show, node); 68 return single_open(file, node->info_ent->show, node);
69} 69}
@@ -89,13 +89,13 @@ static const struct file_operations drm_proc_fops = {
89 * Create a given set of proc files represented by an array of 89 * Create a given set of proc files represented by an array of
90 * gdm_proc_lists in the given root directory. 90 * gdm_proc_lists in the given root directory.
91 */ 91 */
92static int drm_proc_create_files(struct drm_info_list *files, int count, 92static int drm_proc_create_files(const struct drm_info_list *files, int count,
93 struct proc_dir_entry *root, struct drm_minor *minor) 93 struct proc_dir_entry *root, struct drm_minor *minor)
94{ 94{
95 struct drm_device *dev = minor->dev; 95 struct drm_device *dev = minor->dev;
96 struct proc_dir_entry *ent; 96 struct proc_dir_entry *ent;
97 struct drm_info_node *tmp; 97 struct drm_info_node *tmp;
98 int i, ret; 98 int i;
99 99
100 for (i = 0; i < count; i++) { 100 for (i = 0; i < count; i++) {
101 u32 features = files[i].driver_features; 101 u32 features = files[i].driver_features;
@@ -105,10 +105,9 @@ static int drm_proc_create_files(struct drm_info_list *files, int count,
105 continue; 105 continue;
106 106
107 tmp = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL); 107 tmp = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL);
108 if (tmp == NULL) { 108 if (!tmp)
109 ret = -1; 109 return -1;
110 goto fail; 110
111 }
112 tmp->minor = minor; 111 tmp->minor = minor;
113 tmp->info_ent = &files[i]; 112 tmp->info_ent = &files[i];
114 list_add(&tmp->list, &minor->proc_nodes.list); 113 list_add(&tmp->list, &minor->proc_nodes.list);
@@ -116,28 +115,20 @@ static int drm_proc_create_files(struct drm_info_list *files, int count,
116 ent = proc_create_data(files[i].name, S_IRUGO, root, 115 ent = proc_create_data(files[i].name, S_IRUGO, root,
117 &drm_proc_fops, tmp); 116 &drm_proc_fops, tmp);
118 if (!ent) { 117 if (!ent) {
119 DRM_ERROR("Cannot create /proc/dri/%s/%s\n", 118 DRM_ERROR("Cannot create /proc/dri/%u/%s\n",
120 root->name, files[i].name); 119 minor->index, files[i].name);
121 list_del(&tmp->list); 120 list_del(&tmp->list);
122 kfree(tmp); 121 kfree(tmp);
123 ret = -1; 122 return -1;
124 goto fail;
125 } 123 }
126
127 } 124 }
128 return 0; 125 return 0;
129
130fail:
131 for (i = 0; i < count; i++)
132 remove_proc_entry(drm_proc_list[i].name, minor->proc_root);
133 return ret;
134} 126}
135 127
136/** 128/**
137 * Initialize the DRI proc filesystem for a device 129 * Initialize the DRI proc filesystem for a device
138 * 130 *
139 * \param dev DRM device 131 * \param dev DRM device
140 * \param minor device minor number
141 * \param root DRI proc dir entry. 132 * \param root DRI proc dir entry.
142 * \param dev_root resulting DRI device proc dir entry. 133 * \param dev_root resulting DRI device proc dir entry.
143 * \return root entry pointer on success, or NULL on failure. 134 * \return root entry pointer on success, or NULL on failure.
@@ -146,14 +137,13 @@ fail:
146 * "/proc/dri/%minor%/", and each entry in proc_list as 137 * "/proc/dri/%minor%/", and each entry in proc_list as
147 * "/proc/dri/%minor%/%name%". 138 * "/proc/dri/%minor%/%name%".
148 */ 139 */
149int drm_proc_init(struct drm_minor *minor, int minor_id, 140int drm_proc_init(struct drm_minor *minor, struct proc_dir_entry *root)
150 struct proc_dir_entry *root)
151{ 141{
152 char name[64]; 142 char name[12];
153 int ret; 143 int ret;
154 144
155 INIT_LIST_HEAD(&minor->proc_nodes.list); 145 INIT_LIST_HEAD(&minor->proc_nodes.list);
156 sprintf(name, "%d", minor_id); 146 sprintf(name, "%u", minor->index);
157 minor->proc_root = proc_mkdir(name, root); 147 minor->proc_root = proc_mkdir(name, root);
158 if (!minor->proc_root) { 148 if (!minor->proc_root) {
159 DRM_ERROR("Cannot create /proc/dri/%s\n", name); 149 DRM_ERROR("Cannot create /proc/dri/%s\n", name);
@@ -163,7 +153,7 @@ int drm_proc_init(struct drm_minor *minor, int minor_id,
163 ret = drm_proc_create_files(drm_proc_list, DRM_PROC_ENTRIES, 153 ret = drm_proc_create_files(drm_proc_list, DRM_PROC_ENTRIES,
164 minor->proc_root, minor); 154 minor->proc_root, minor);
165 if (ret) { 155 if (ret) {
166 remove_proc_entry(name, root); 156 remove_proc_subtree(name, root);
167 minor->proc_root = NULL; 157 minor->proc_root = NULL;
168 DRM_ERROR("Failed to create core drm proc files\n"); 158 DRM_ERROR("Failed to create core drm proc files\n");
169 return ret; 159 return ret;
@@ -172,7 +162,7 @@ int drm_proc_init(struct drm_minor *minor, int minor_id,
172 return 0; 162 return 0;
173} 163}
174 164
175static int drm_proc_remove_files(struct drm_info_list *files, int count, 165static int drm_proc_remove_files(const struct drm_info_list *files, int count,
176 struct drm_minor *minor) 166 struct drm_minor *minor)
177{ 167{
178 struct list_head *pos, *q; 168 struct list_head *pos, *q;
@@ -213,8 +203,7 @@ int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root)
213 drm_proc_remove_files(drm_proc_list, DRM_PROC_ENTRIES, minor); 203 drm_proc_remove_files(drm_proc_list, DRM_PROC_ENTRIES, minor);
214 204
215 sprintf(name, "%d", minor->index); 205 sprintf(name, "%d", minor->index);
216 remove_proc_entry(name, root); 206 remove_proc_subtree(name, root);
217
218 return 0; 207 return 0;
219} 208}
220 209
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index 7d30802a018f..16f3ec579b3b 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -352,7 +352,7 @@ int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type)
352 idr_replace(&drm_minors_idr, new_minor, minor_id); 352 idr_replace(&drm_minors_idr, new_minor, minor_id);
353 353
354 if (type == DRM_MINOR_LEGACY) { 354 if (type == DRM_MINOR_LEGACY) {
355 ret = drm_proc_init(new_minor, minor_id, drm_proc_root); 355 ret = drm_proc_init(new_minor, drm_proc_root);
356 if (ret) { 356 if (ret) {
357 DRM_ERROR("DRM: Failed to initialize /proc/dri.\n"); 357 DRM_ERROR("DRM: Failed to initialize /proc/dri.\n");
358 goto err_mem; 358 goto err_mem;
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index db7bd292410b..1d4f7c9fe661 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -422,6 +422,7 @@ void drm_vm_open_locked(struct drm_device *dev,
422 list_add(&vma_entry->head, &dev->vmalist); 422 list_add(&vma_entry->head, &dev->vmalist);
423 } 423 }
424} 424}
425EXPORT_SYMBOL_GPL(drm_vm_open_locked);
425 426
426static void drm_vm_open(struct vm_area_struct *vma) 427static void drm_vm_open(struct vm_area_struct *vma)
427{ 428{
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 046bcda36abe..772c62a6e2ac 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -24,7 +24,9 @@ config DRM_EXYNOS_DMABUF
24 24
25config DRM_EXYNOS_FIMD 25config DRM_EXYNOS_FIMD
26 bool "Exynos DRM FIMD" 26 bool "Exynos DRM FIMD"
27 depends on DRM_EXYNOS && !FB_S3C && !ARCH_MULTIPLATFORM 27 depends on OF && DRM_EXYNOS && !FB_S3C && !ARCH_MULTIPLATFORM
28 select FB_MODE_HELPERS
29 select VIDEOMODE_HELPERS
28 help 30 help
29 Choose this option if you want to use Exynos FIMD for DRM. 31 Choose this option if you want to use Exynos FIMD for DRM.
30 32
@@ -54,7 +56,7 @@ config DRM_EXYNOS_IPP
54 56
55config DRM_EXYNOS_FIMC 57config DRM_EXYNOS_FIMC
56 bool "Exynos DRM FIMC" 58 bool "Exynos DRM FIMC"
57 depends on DRM_EXYNOS_IPP 59 depends on DRM_EXYNOS_IPP && MFD_SYSCON && OF
58 help 60 help
59 Choose this option if you want to use Exynos FIMC for DRM. 61 Choose this option if you want to use Exynos FIMC for DRM.
60 62
diff --git a/drivers/gpu/drm/exynos/exynos_drm_connector.c b/drivers/gpu/drm/exynos/exynos_drm_connector.c
index 4c5b6859c9ea..8bcc13ac9f73 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_connector.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_connector.c
@@ -124,7 +124,7 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector)
124 } 124 }
125 125
126 count = drm_add_edid_modes(connector, edid); 126 count = drm_add_edid_modes(connector, edid);
127 if (count < 0) { 127 if (!count) {
128 DRM_ERROR("Add edid modes failed %d\n", count); 128 DRM_ERROR("Add edid modes failed %d\n", count);
129 goto out; 129 goto out;
130 } 130 }
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
index ba0a3aa78547..ff7f2a886a34 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
@@ -235,7 +235,6 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
235 * refcount on gem itself instead of f_count of dmabuf. 235 * refcount on gem itself instead of f_count of dmabuf.
236 */ 236 */
237 drm_gem_object_reference(obj); 237 drm_gem_object_reference(obj);
238 dma_buf_put(dma_buf);
239 return obj; 238 return obj;
240 } 239 }
241 } 240 }
@@ -244,6 +243,7 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
244 if (IS_ERR(attach)) 243 if (IS_ERR(attach))
245 return ERR_PTR(-EINVAL); 244 return ERR_PTR(-EINVAL);
246 245
246 get_dma_buf(dma_buf);
247 247
248 sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); 248 sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
249 if (IS_ERR_OR_NULL(sgt)) { 249 if (IS_ERR_OR_NULL(sgt)) {
@@ -298,6 +298,8 @@ err_unmap_attach:
298 dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL); 298 dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
299err_buf_detach: 299err_buf_detach:
300 dma_buf_detach(dma_buf, attach); 300 dma_buf_detach(dma_buf, attach);
301 dma_buf_put(dma_buf);
302
301 return ERR_PTR(ret); 303 return ERR_PTR(ret);
302} 304}
303 305
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 3da5c2d214d8..ba6d995e4375 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -380,6 +380,10 @@ static int __init exynos_drm_init(void)
380 ret = platform_driver_register(&ipp_driver); 380 ret = platform_driver_register(&ipp_driver);
381 if (ret < 0) 381 if (ret < 0)
382 goto out_ipp; 382 goto out_ipp;
383
384 ret = exynos_platform_device_ipp_register();
385 if (ret < 0)
386 goto out_ipp_dev;
383#endif 387#endif
384 388
385 ret = platform_driver_register(&exynos_drm_platform_driver); 389 ret = platform_driver_register(&exynos_drm_platform_driver);
@@ -388,7 +392,7 @@ static int __init exynos_drm_init(void)
388 392
389 exynos_drm_pdev = platform_device_register_simple("exynos-drm", -1, 393 exynos_drm_pdev = platform_device_register_simple("exynos-drm", -1,
390 NULL, 0); 394 NULL, 0);
391 if (IS_ERR_OR_NULL(exynos_drm_pdev)) { 395 if (IS_ERR(exynos_drm_pdev)) {
392 ret = PTR_ERR(exynos_drm_pdev); 396 ret = PTR_ERR(exynos_drm_pdev);
393 goto out; 397 goto out;
394 } 398 }
@@ -400,6 +404,8 @@ out:
400 404
401out_drm: 405out_drm:
402#ifdef CONFIG_DRM_EXYNOS_IPP 406#ifdef CONFIG_DRM_EXYNOS_IPP
407 exynos_platform_device_ipp_unregister();
408out_ipp_dev:
403 platform_driver_unregister(&ipp_driver); 409 platform_driver_unregister(&ipp_driver);
404out_ipp: 410out_ipp:
405#endif 411#endif
@@ -456,6 +462,7 @@ static void __exit exynos_drm_exit(void)
456 platform_driver_unregister(&exynos_drm_platform_driver); 462 platform_driver_unregister(&exynos_drm_platform_driver);
457 463
458#ifdef CONFIG_DRM_EXYNOS_IPP 464#ifdef CONFIG_DRM_EXYNOS_IPP
465 exynos_platform_device_ipp_unregister();
459 platform_driver_unregister(&ipp_driver); 466 platform_driver_unregister(&ipp_driver);
460#endif 467#endif
461 468
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index 4606fac7241a..680a7c1b9dea 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -322,13 +322,23 @@ void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file);
322 * this function registers exynos drm hdmi platform device. It ensures only one 322 * this function registers exynos drm hdmi platform device. It ensures only one
323 * instance of the device is created. 323 * instance of the device is created.
324 */ 324 */
325extern int exynos_platform_device_hdmi_register(void); 325int exynos_platform_device_hdmi_register(void);
326 326
327/* 327/*
328 * this function unregisters exynos drm hdmi platform device if it exists. 328 * this function unregisters exynos drm hdmi platform device if it exists.
329 */ 329 */
330void exynos_platform_device_hdmi_unregister(void); 330void exynos_platform_device_hdmi_unregister(void);
331 331
332/*
333 * this function registers exynos drm ipp platform device.
334 */
335int exynos_platform_device_ipp_register(void);
336
337/*
338 * this function unregisters exynos drm ipp platform device if it exists.
339 */
340void exynos_platform_device_ipp_unregister(void);
341
332extern struct platform_driver fimd_driver; 342extern struct platform_driver fimd_driver;
333extern struct platform_driver hdmi_driver; 343extern struct platform_driver hdmi_driver;
334extern struct platform_driver mixer_driver; 344extern struct platform_driver mixer_driver;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index 411f69b76e84..773f583fa964 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -12,11 +12,12 @@
12 * 12 *
13 */ 13 */
14#include <linux/kernel.h> 14#include <linux/kernel.h>
15#include <linux/mfd/syscon.h>
15#include <linux/module.h> 16#include <linux/module.h>
16#include <linux/platform_device.h> 17#include <linux/platform_device.h>
18#include <linux/regmap.h>
17#include <linux/clk.h> 19#include <linux/clk.h>
18#include <linux/pm_runtime.h> 20#include <linux/pm_runtime.h>
19#include <plat/map-base.h>
20 21
21#include <drm/drmP.h> 22#include <drm/drmP.h>
22#include <drm/exynos_drm.h> 23#include <drm/exynos_drm.h>
@@ -76,6 +77,27 @@ enum fimc_wb {
76 FIMC_WB_B, 77 FIMC_WB_B,
77}; 78};
78 79
80enum {
81 FIMC_CLK_LCLK,
82 FIMC_CLK_GATE,
83 FIMC_CLK_WB_A,
84 FIMC_CLK_WB_B,
85 FIMC_CLK_MUX,
86 FIMC_CLK_PARENT,
87 FIMC_CLKS_MAX
88};
89
90static const char * const fimc_clock_names[] = {
91 [FIMC_CLK_LCLK] = "sclk_fimc",
92 [FIMC_CLK_GATE] = "fimc",
93 [FIMC_CLK_WB_A] = "pxl_async0",
94 [FIMC_CLK_WB_B] = "pxl_async1",
95 [FIMC_CLK_MUX] = "mux",
96 [FIMC_CLK_PARENT] = "parent",
97};
98
99#define FIMC_DEFAULT_LCLK_FREQUENCY 133000000UL
100
79/* 101/*
80 * A structure of scaler. 102 * A structure of scaler.
81 * 103 *
@@ -119,28 +141,16 @@ struct fimc_capability {
119}; 141};
120 142
121/* 143/*
122 * A structure of fimc driver data.
123 *
124 * @parent_clk: name of parent clock.
125 */
126struct fimc_driverdata {
127 char *parent_clk;
128};
129
130/*
131 * A structure of fimc context. 144 * A structure of fimc context.
132 * 145 *
133 * @ippdrv: prepare initialization using ippdrv. 146 * @ippdrv: prepare initialization using ippdrv.
134 * @regs_res: register resources. 147 * @regs_res: register resources.
135 * @regs: memory mapped io registers. 148 * @regs: memory mapped io registers.
136 * @lock: locking of operations. 149 * @lock: locking of operations.
137 * @sclk_fimc_clk: fimc source clock. 150 * @clocks: fimc clocks.
138 * @fimc_clk: fimc clock. 151 * @clk_frequency: LCLK clock frequency.
139 * @wb_clk: writeback a clock. 152 * @sysreg: handle to SYSREG block regmap.
140 * @wb_b_clk: writeback b clock.
141 * @sc: scaler infomations. 153 * @sc: scaler infomations.
142 * @odr: ordering of YUV.
143 * @ver: fimc version.
144 * @pol: porarity of writeback. 154 * @pol: porarity of writeback.
145 * @id: fimc id. 155 * @id: fimc id.
146 * @irq: irq number. 156 * @irq: irq number.
@@ -151,12 +161,10 @@ struct fimc_context {
151 struct resource *regs_res; 161 struct resource *regs_res;
152 void __iomem *regs; 162 void __iomem *regs;
153 struct mutex lock; 163 struct mutex lock;
154 struct clk *sclk_fimc_clk; 164 struct clk *clocks[FIMC_CLKS_MAX];
155 struct clk *fimc_clk; 165 u32 clk_frequency;
156 struct clk *wb_clk; 166 struct regmap *sysreg;
157 struct clk *wb_b_clk;
158 struct fimc_scaler sc; 167 struct fimc_scaler sc;
159 struct fimc_driverdata *ddata;
160 struct exynos_drm_ipp_pol pol; 168 struct exynos_drm_ipp_pol pol;
161 int id; 169 int id;
162 int irq; 170 int irq;
@@ -200,17 +208,13 @@ static void fimc_sw_reset(struct fimc_context *ctx)
200 fimc_write(0x0, EXYNOS_CIFCNTSEQ); 208 fimc_write(0x0, EXYNOS_CIFCNTSEQ);
201} 209}
202 210
203static void fimc_set_camblk_fimd0_wb(struct fimc_context *ctx) 211static int fimc_set_camblk_fimd0_wb(struct fimc_context *ctx)
204{ 212{
205 u32 camblk_cfg;
206
207 DRM_DEBUG_KMS("%s\n", __func__); 213 DRM_DEBUG_KMS("%s\n", __func__);
208 214
209 camblk_cfg = readl(SYSREG_CAMERA_BLK); 215 return regmap_update_bits(ctx->sysreg, SYSREG_CAMERA_BLK,
210 camblk_cfg &= ~(SYSREG_FIMD0WB_DEST_MASK); 216 SYSREG_FIMD0WB_DEST_MASK,
211 camblk_cfg |= ctx->id << (SYSREG_FIMD0WB_DEST_SHIFT); 217 ctx->id << SYSREG_FIMD0WB_DEST_SHIFT);
212
213 writel(camblk_cfg, SYSREG_CAMERA_BLK);
214} 218}
215 219
216static void fimc_set_type_ctrl(struct fimc_context *ctx, enum fimc_wb wb) 220static void fimc_set_type_ctrl(struct fimc_context *ctx, enum fimc_wb wb)
@@ -1301,14 +1305,12 @@ static int fimc_clk_ctrl(struct fimc_context *ctx, bool enable)
1301 DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable); 1305 DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable);
1302 1306
1303 if (enable) { 1307 if (enable) {
1304 clk_enable(ctx->sclk_fimc_clk); 1308 clk_prepare_enable(ctx->clocks[FIMC_CLK_GATE]);
1305 clk_enable(ctx->fimc_clk); 1309 clk_prepare_enable(ctx->clocks[FIMC_CLK_WB_A]);
1306 clk_enable(ctx->wb_clk);
1307 ctx->suspended = false; 1310 ctx->suspended = false;
1308 } else { 1311 } else {
1309 clk_disable(ctx->sclk_fimc_clk); 1312 clk_disable_unprepare(ctx->clocks[FIMC_CLK_GATE]);
1310 clk_disable(ctx->fimc_clk); 1313 clk_disable_unprepare(ctx->clocks[FIMC_CLK_WB_A]);
1311 clk_disable(ctx->wb_clk);
1312 ctx->suspended = true; 1314 ctx->suspended = true;
1313 } 1315 }
1314 1316
@@ -1613,7 +1615,11 @@ static int fimc_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd)
1613 fimc_handle_lastend(ctx, true); 1615 fimc_handle_lastend(ctx, true);
1614 1616
1615 /* setup FIMD */ 1617 /* setup FIMD */
1616 fimc_set_camblk_fimd0_wb(ctx); 1618 ret = fimc_set_camblk_fimd0_wb(ctx);
1619 if (ret < 0) {
1620 dev_err(dev, "camblk setup failed.\n");
1621 return ret;
1622 }
1617 1623
1618 set_wb.enable = 1; 1624 set_wb.enable = 1;
1619 set_wb.refresh = property->refresh_rate; 1625 set_wb.refresh = property->refresh_rate;
@@ -1713,76 +1719,118 @@ static void fimc_ippdrv_stop(struct device *dev, enum drm_exynos_ipp_cmd cmd)
1713 fimc_write(cfg, EXYNOS_CIGCTRL); 1719 fimc_write(cfg, EXYNOS_CIGCTRL);
1714} 1720}
1715 1721
1722static void fimc_put_clocks(struct fimc_context *ctx)
1723{
1724 int i;
1725
1726 for (i = 0; i < FIMC_CLKS_MAX; i++) {
1727 if (IS_ERR(ctx->clocks[i]))
1728 continue;
1729 clk_put(ctx->clocks[i]);
1730 ctx->clocks[i] = ERR_PTR(-EINVAL);
1731 }
1732}
1733
1734static int fimc_setup_clocks(struct fimc_context *ctx)
1735{
1736 struct device *fimc_dev = ctx->ippdrv.dev;
1737 struct device *dev;
1738 int ret, i;
1739
1740 for (i = 0; i < FIMC_CLKS_MAX; i++)
1741 ctx->clocks[i] = ERR_PTR(-EINVAL);
1742
1743 for (i = 0; i < FIMC_CLKS_MAX; i++) {
1744 if (i == FIMC_CLK_WB_A || i == FIMC_CLK_WB_B)
1745 dev = fimc_dev->parent;
1746 else
1747 dev = fimc_dev;
1748
1749 ctx->clocks[i] = clk_get(dev, fimc_clock_names[i]);
1750 if (IS_ERR(ctx->clocks[i])) {
1751 if (i >= FIMC_CLK_MUX)
1752 break;
1753 ret = PTR_ERR(ctx->clocks[i]);
1754 dev_err(fimc_dev, "failed to get clock: %s\n",
1755 fimc_clock_names[i]);
1756 goto e_clk_free;
1757 }
1758 }
1759
1760 /* Optional FIMC LCLK parent clock setting */
1761 if (!IS_ERR(ctx->clocks[FIMC_CLK_PARENT])) {
1762 ret = clk_set_parent(ctx->clocks[FIMC_CLK_MUX],
1763 ctx->clocks[FIMC_CLK_PARENT]);
1764 if (ret < 0) {
1765 dev_err(fimc_dev, "failed to set parent.\n");
1766 goto e_clk_free;
1767 }
1768 }
1769
1770 ret = clk_set_rate(ctx->clocks[FIMC_CLK_LCLK], ctx->clk_frequency);
1771 if (ret < 0)
1772 goto e_clk_free;
1773
1774 ret = clk_prepare_enable(ctx->clocks[FIMC_CLK_LCLK]);
1775 if (!ret)
1776 return ret;
1777e_clk_free:
1778 fimc_put_clocks(ctx);
1779 return ret;
1780}
1781
1782static int fimc_parse_dt(struct fimc_context *ctx)
1783{
1784 struct device_node *node = ctx->ippdrv.dev->of_node;
1785
1786 /* Handle only devices that support the LCD Writeback data path */
1787 if (!of_property_read_bool(node, "samsung,lcd-wb"))
1788 return -ENODEV;
1789
1790 if (of_property_read_u32(node, "clock-frequency",
1791 &ctx->clk_frequency))
1792 ctx->clk_frequency = FIMC_DEFAULT_LCLK_FREQUENCY;
1793
1794 ctx->id = of_alias_get_id(node, "fimc");
1795
1796 if (ctx->id < 0) {
1797 dev_err(ctx->ippdrv.dev, "failed to get node alias id.\n");
1798 return -EINVAL;
1799 }
1800
1801 return 0;
1802}
1803
1716static int fimc_probe(struct platform_device *pdev) 1804static int fimc_probe(struct platform_device *pdev)
1717{ 1805{
1718 struct device *dev = &pdev->dev; 1806 struct device *dev = &pdev->dev;
1719 struct fimc_context *ctx; 1807 struct fimc_context *ctx;
1720 struct clk *parent_clk;
1721 struct resource *res; 1808 struct resource *res;
1722 struct exynos_drm_ippdrv *ippdrv; 1809 struct exynos_drm_ippdrv *ippdrv;
1723 struct exynos_drm_fimc_pdata *pdata;
1724 struct fimc_driverdata *ddata;
1725 int ret; 1810 int ret;
1726 1811
1727 pdata = pdev->dev.platform_data; 1812 if (!dev->of_node) {
1728 if (!pdata) { 1813 dev_err(dev, "device tree node not found.\n");
1729 dev_err(dev, "no platform data specified.\n"); 1814 return -ENODEV;
1730 return -EINVAL;
1731 } 1815 }
1732 1816
1733 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); 1817 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
1734 if (!ctx) 1818 if (!ctx)
1735 return -ENOMEM; 1819 return -ENOMEM;
1736 1820
1737 ddata = (struct fimc_driverdata *) 1821 ctx->ippdrv.dev = dev;
1738 platform_get_device_id(pdev)->driver_data;
1739
1740 /* clock control */
1741 ctx->sclk_fimc_clk = devm_clk_get(dev, "sclk_fimc");
1742 if (IS_ERR(ctx->sclk_fimc_clk)) {
1743 dev_err(dev, "failed to get src fimc clock.\n");
1744 return PTR_ERR(ctx->sclk_fimc_clk);
1745 }
1746 clk_enable(ctx->sclk_fimc_clk);
1747
1748 ctx->fimc_clk = devm_clk_get(dev, "fimc");
1749 if (IS_ERR(ctx->fimc_clk)) {
1750 dev_err(dev, "failed to get fimc clock.\n");
1751 clk_disable(ctx->sclk_fimc_clk);
1752 return PTR_ERR(ctx->fimc_clk);
1753 }
1754
1755 ctx->wb_clk = devm_clk_get(dev, "pxl_async0");
1756 if (IS_ERR(ctx->wb_clk)) {
1757 dev_err(dev, "failed to get writeback a clock.\n");
1758 clk_disable(ctx->sclk_fimc_clk);
1759 return PTR_ERR(ctx->wb_clk);
1760 }
1761
1762 ctx->wb_b_clk = devm_clk_get(dev, "pxl_async1");
1763 if (IS_ERR(ctx->wb_b_clk)) {
1764 dev_err(dev, "failed to get writeback b clock.\n");
1765 clk_disable(ctx->sclk_fimc_clk);
1766 return PTR_ERR(ctx->wb_b_clk);
1767 }
1768 1822
1769 parent_clk = devm_clk_get(dev, ddata->parent_clk); 1823 ret = fimc_parse_dt(ctx);
1770 1824 if (ret < 0)
1771 if (IS_ERR(parent_clk)) { 1825 return ret;
1772 dev_err(dev, "failed to get parent clock.\n");
1773 clk_disable(ctx->sclk_fimc_clk);
1774 return PTR_ERR(parent_clk);
1775 }
1776 1826
1777 if (clk_set_parent(ctx->sclk_fimc_clk, parent_clk)) { 1827 ctx->sysreg = syscon_regmap_lookup_by_phandle(dev->of_node,
1778 dev_err(dev, "failed to set parent.\n"); 1828 "samsung,sysreg");
1779 clk_disable(ctx->sclk_fimc_clk); 1829 if (IS_ERR(ctx->sysreg)) {
1780 return -EINVAL; 1830 dev_err(dev, "syscon regmap lookup failed.\n");
1831 return PTR_ERR(ctx->sysreg);
1781 } 1832 }
1782 1833
1783 devm_clk_put(dev, parent_clk);
1784 clk_set_rate(ctx->sclk_fimc_clk, pdata->clk_rate);
1785
1786 /* resource memory */ 1834 /* resource memory */
1787 ctx->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1835 ctx->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1788 ctx->regs = devm_ioremap_resource(dev, ctx->regs_res); 1836 ctx->regs = devm_ioremap_resource(dev, ctx->regs_res);
@@ -1804,13 +1852,11 @@ static int fimc_probe(struct platform_device *pdev)
1804 return ret; 1852 return ret;
1805 } 1853 }
1806 1854
1807 /* context initailization */ 1855 ret = fimc_setup_clocks(ctx);
1808 ctx->id = pdev->id; 1856 if (ret < 0)
1809 ctx->pol = pdata->pol; 1857 goto err_free_irq;
1810 ctx->ddata = ddata;
1811 1858
1812 ippdrv = &ctx->ippdrv; 1859 ippdrv = &ctx->ippdrv;
1813 ippdrv->dev = dev;
1814 ippdrv->ops[EXYNOS_DRM_OPS_SRC] = &fimc_src_ops; 1860 ippdrv->ops[EXYNOS_DRM_OPS_SRC] = &fimc_src_ops;
1815 ippdrv->ops[EXYNOS_DRM_OPS_DST] = &fimc_dst_ops; 1861 ippdrv->ops[EXYNOS_DRM_OPS_DST] = &fimc_dst_ops;
1816 ippdrv->check_property = fimc_ippdrv_check_property; 1862 ippdrv->check_property = fimc_ippdrv_check_property;
@@ -1820,7 +1866,7 @@ static int fimc_probe(struct platform_device *pdev)
1820 ret = fimc_init_prop_list(ippdrv); 1866 ret = fimc_init_prop_list(ippdrv);
1821 if (ret < 0) { 1867 if (ret < 0) {
1822 dev_err(dev, "failed to init property list.\n"); 1868 dev_err(dev, "failed to init property list.\n");
1823 goto err_get_irq; 1869 goto err_put_clk;
1824 } 1870 }
1825 1871
1826 DRM_DEBUG_KMS("%s:id[%d]ippdrv[0x%x]\n", __func__, ctx->id, 1872 DRM_DEBUG_KMS("%s:id[%d]ippdrv[0x%x]\n", __func__, ctx->id,
@@ -1835,17 +1881,18 @@ static int fimc_probe(struct platform_device *pdev)
1835 ret = exynos_drm_ippdrv_register(ippdrv); 1881 ret = exynos_drm_ippdrv_register(ippdrv);
1836 if (ret < 0) { 1882 if (ret < 0) {
1837 dev_err(dev, "failed to register drm fimc device.\n"); 1883 dev_err(dev, "failed to register drm fimc device.\n");
1838 goto err_ippdrv_register; 1884 goto err_pm_dis;
1839 } 1885 }
1840 1886
1841 dev_info(&pdev->dev, "drm fimc registered successfully.\n"); 1887 dev_info(&pdev->dev, "drm fimc registered successfully.\n");
1842 1888
1843 return 0; 1889 return 0;
1844 1890
1845err_ippdrv_register: 1891err_pm_dis:
1846 devm_kfree(dev, ippdrv->prop_list);
1847 pm_runtime_disable(dev); 1892 pm_runtime_disable(dev);
1848err_get_irq: 1893err_put_clk:
1894 fimc_put_clocks(ctx);
1895err_free_irq:
1849 free_irq(ctx->irq, ctx); 1896 free_irq(ctx->irq, ctx);
1850 1897
1851 return ret; 1898 return ret;
@@ -1857,10 +1904,10 @@ static int fimc_remove(struct platform_device *pdev)
1857 struct fimc_context *ctx = get_fimc_context(dev); 1904 struct fimc_context *ctx = get_fimc_context(dev);
1858 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; 1905 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
1859 1906
1860 devm_kfree(dev, ippdrv->prop_list);
1861 exynos_drm_ippdrv_unregister(ippdrv); 1907 exynos_drm_ippdrv_unregister(ippdrv);
1862 mutex_destroy(&ctx->lock); 1908 mutex_destroy(&ctx->lock);
1863 1909
1910 fimc_put_clocks(ctx);
1864 pm_runtime_set_suspended(dev); 1911 pm_runtime_set_suspended(dev);
1865 pm_runtime_disable(dev); 1912 pm_runtime_disable(dev);
1866 1913
@@ -1915,36 +1962,22 @@ static int fimc_runtime_resume(struct device *dev)
1915} 1962}
1916#endif 1963#endif
1917 1964
1918static struct fimc_driverdata exynos4210_fimc_data = {
1919 .parent_clk = "mout_mpll",
1920};
1921
1922static struct fimc_driverdata exynos4410_fimc_data = {
1923 .parent_clk = "mout_mpll_user",
1924};
1925
1926static struct platform_device_id fimc_driver_ids[] = {
1927 {
1928 .name = "exynos4210-fimc",
1929 .driver_data = (unsigned long)&exynos4210_fimc_data,
1930 }, {
1931 .name = "exynos4412-fimc",
1932 .driver_data = (unsigned long)&exynos4410_fimc_data,
1933 },
1934 {},
1935};
1936MODULE_DEVICE_TABLE(platform, fimc_driver_ids);
1937
1938static const struct dev_pm_ops fimc_pm_ops = { 1965static const struct dev_pm_ops fimc_pm_ops = {
1939 SET_SYSTEM_SLEEP_PM_OPS(fimc_suspend, fimc_resume) 1966 SET_SYSTEM_SLEEP_PM_OPS(fimc_suspend, fimc_resume)
1940 SET_RUNTIME_PM_OPS(fimc_runtime_suspend, fimc_runtime_resume, NULL) 1967 SET_RUNTIME_PM_OPS(fimc_runtime_suspend, fimc_runtime_resume, NULL)
1941}; 1968};
1942 1969
1970static const struct of_device_id fimc_of_match[] = {
1971 { .compatible = "samsung,exynos4210-fimc" },
1972 { .compatible = "samsung,exynos4212-fimc" },
1973 { },
1974};
1975
1943struct platform_driver fimc_driver = { 1976struct platform_driver fimc_driver = {
1944 .probe = fimc_probe, 1977 .probe = fimc_probe,
1945 .remove = fimc_remove, 1978 .remove = fimc_remove,
1946 .id_table = fimc_driver_ids,
1947 .driver = { 1979 .driver = {
1980 .of_match_table = fimc_of_match,
1948 .name = "exynos-drm-fimc", 1981 .name = "exynos-drm-fimc",
1949 .owner = THIS_MODULE, 1982 .owner = THIS_MODULE,
1950 .pm = &fimc_pm_ops, 1983 .pm = &fimc_pm_ops,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 98cc14725ba9..746b282b343a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -20,6 +20,7 @@
20#include <linux/of_device.h> 20#include <linux/of_device.h>
21#include <linux/pm_runtime.h> 21#include <linux/pm_runtime.h>
22 22
23#include <video/of_display_timing.h>
23#include <video/samsung_fimd.h> 24#include <video/samsung_fimd.h>
24#include <drm/exynos_drm.h> 25#include <drm/exynos_drm.h>
25 26
@@ -800,18 +801,18 @@ static int fimd_clock(struct fimd_context *ctx, bool enable)
800 if (enable) { 801 if (enable) {
801 int ret; 802 int ret;
802 803
803 ret = clk_enable(ctx->bus_clk); 804 ret = clk_prepare_enable(ctx->bus_clk);
804 if (ret < 0) 805 if (ret < 0)
805 return ret; 806 return ret;
806 807
807 ret = clk_enable(ctx->lcd_clk); 808 ret = clk_prepare_enable(ctx->lcd_clk);
808 if (ret < 0) { 809 if (ret < 0) {
809 clk_disable(ctx->bus_clk); 810 clk_disable_unprepare(ctx->bus_clk);
810 return ret; 811 return ret;
811 } 812 }
812 } else { 813 } else {
813 clk_disable(ctx->lcd_clk); 814 clk_disable_unprepare(ctx->lcd_clk);
814 clk_disable(ctx->bus_clk); 815 clk_disable_unprepare(ctx->bus_clk);
815 } 816 }
816 817
817 return 0; 818 return 0;
@@ -884,10 +885,25 @@ static int fimd_probe(struct platform_device *pdev)
884 885
885 DRM_DEBUG_KMS("%s\n", __FILE__); 886 DRM_DEBUG_KMS("%s\n", __FILE__);
886 887
887 pdata = pdev->dev.platform_data; 888 if (pdev->dev.of_node) {
888 if (!pdata) { 889 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
889 dev_err(dev, "no platform data specified\n"); 890 if (!pdata) {
890 return -EINVAL; 891 DRM_ERROR("memory allocation for pdata failed\n");
892 return -ENOMEM;
893 }
894
895 ret = of_get_fb_videomode(dev->of_node, &pdata->panel.timing,
896 OF_USE_NATIVE_MODE);
897 if (ret) {
898 DRM_ERROR("failed: of_get_fb_videomode() : %d\n", ret);
899 return ret;
900 }
901 } else {
902 pdata = pdev->dev.platform_data;
903 if (!pdata) {
904 DRM_ERROR("no platform data specified\n");
905 return -EINVAL;
906 }
891 } 907 }
892 908
893 panel = &pdata->panel; 909 panel = &pdata->panel;
@@ -918,7 +934,7 @@ static int fimd_probe(struct platform_device *pdev)
918 if (IS_ERR(ctx->regs)) 934 if (IS_ERR(ctx->regs))
919 return PTR_ERR(ctx->regs); 935 return PTR_ERR(ctx->regs);
920 936
921 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 937 res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "vsync");
922 if (!res) { 938 if (!res) {
923 dev_err(dev, "irq request failed.\n"); 939 dev_err(dev, "irq request failed.\n");
924 return -ENXIO; 940 return -ENXIO;
@@ -980,9 +996,6 @@ static int fimd_remove(struct platform_device *pdev)
980 if (ctx->suspended) 996 if (ctx->suspended)
981 goto out; 997 goto out;
982 998
983 clk_disable(ctx->lcd_clk);
984 clk_disable(ctx->bus_clk);
985
986 pm_runtime_set_suspended(dev); 999 pm_runtime_set_suspended(dev);
987 pm_runtime_put_sync(dev); 1000 pm_runtime_put_sync(dev);
988 1001
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 0e6fe000578c..cf4543ffa079 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -682,7 +682,8 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
682 args->pitch = args->width * ((args->bpp + 7) / 8); 682 args->pitch = args->width * ((args->bpp + 7) / 8);
683 args->size = args->pitch * args->height; 683 args->size = args->pitch * args->height;
684 684
685 exynos_gem_obj = exynos_drm_gem_create(dev, args->flags, args->size); 685 exynos_gem_obj = exynos_drm_gem_create(dev, EXYNOS_BO_CONTIG |
686 EXYNOS_BO_WC, args->size);
686 if (IS_ERR(exynos_gem_obj)) 687 if (IS_ERR(exynos_gem_obj))
687 return PTR_ERR(exynos_gem_obj); 688 return PTR_ERR(exynos_gem_obj);
688 689
diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
index 7c27df03c9ff..ba2f0f1aa05f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
@@ -51,21 +51,27 @@ struct drm_hdmi_context {
51 51
52int exynos_platform_device_hdmi_register(void) 52int exynos_platform_device_hdmi_register(void)
53{ 53{
54 struct platform_device *pdev;
55
54 if (exynos_drm_hdmi_pdev) 56 if (exynos_drm_hdmi_pdev)
55 return -EEXIST; 57 return -EEXIST;
56 58
57 exynos_drm_hdmi_pdev = platform_device_register_simple( 59 pdev = platform_device_register_simple(
58 "exynos-drm-hdmi", -1, NULL, 0); 60 "exynos-drm-hdmi", -1, NULL, 0);
59 if (IS_ERR_OR_NULL(exynos_drm_hdmi_pdev)) 61 if (IS_ERR(pdev))
60 return PTR_ERR(exynos_drm_hdmi_pdev); 62 return PTR_ERR(pdev);
63
64 exynos_drm_hdmi_pdev = pdev;
61 65
62 return 0; 66 return 0;
63} 67}
64 68
65void exynos_platform_device_hdmi_unregister(void) 69void exynos_platform_device_hdmi_unregister(void)
66{ 70{
67 if (exynos_drm_hdmi_pdev) 71 if (exynos_drm_hdmi_pdev) {
68 platform_device_unregister(exynos_drm_hdmi_pdev); 72 platform_device_unregister(exynos_drm_hdmi_pdev);
73 exynos_drm_hdmi_pdev = NULL;
74 }
69} 75}
70 76
71void exynos_hdmi_drv_attach(struct exynos_drm_hdmi_context *ctx) 77void exynos_hdmi_drv_attach(struct exynos_drm_hdmi_context *ctx)
@@ -205,13 +211,45 @@ static void drm_hdmi_mode_fixup(struct device *subdrv_dev,
205 const struct drm_display_mode *mode, 211 const struct drm_display_mode *mode,
206 struct drm_display_mode *adjusted_mode) 212 struct drm_display_mode *adjusted_mode)
207{ 213{
208 struct drm_hdmi_context *ctx = to_context(subdrv_dev); 214 struct drm_display_mode *m;
215 int mode_ok;
209 216
210 DRM_DEBUG_KMS("%s\n", __FILE__); 217 DRM_DEBUG_KMS("%s\n", __FILE__);
211 218
212 if (hdmi_ops && hdmi_ops->mode_fixup) 219 drm_mode_set_crtcinfo(adjusted_mode, 0);
213 hdmi_ops->mode_fixup(ctx->hdmi_ctx->ctx, connector, mode, 220
214 adjusted_mode); 221 mode_ok = drm_hdmi_check_timing(subdrv_dev, adjusted_mode);
222
223 /* just return if user desired mode exists. */
224 if (mode_ok == 0)
225 return;
226
227 /*
228 * otherwise, find the most suitable mode among modes and change it
229 * to adjusted_mode.
230 */
231 list_for_each_entry(m, &connector->modes, head) {
232 mode_ok = drm_hdmi_check_timing(subdrv_dev, m);
233
234 if (mode_ok == 0) {
235 struct drm_mode_object base;
236 struct list_head head;
237
238 DRM_INFO("desired mode doesn't exist so\n");
239 DRM_INFO("use the most suitable mode among modes.\n");
240
241 DRM_DEBUG_KMS("Adjusted Mode: [%d]x[%d] [%d]Hz\n",
242 m->hdisplay, m->vdisplay, m->vrefresh);
243
244 /* preserve display mode header while copying. */
245 head = adjusted_mode->head;
246 base = adjusted_mode->base;
247 memcpy(adjusted_mode, m, sizeof(*m));
248 adjusted_mode->head = head;
249 adjusted_mode->base = base;
250 break;
251 }
252 }
215} 253}
216 254
217static void drm_hdmi_mode_set(struct device *subdrv_dev, void *mode) 255static void drm_hdmi_mode_set(struct device *subdrv_dev, void *mode)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.h b/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
index b7faa3662307..6b709440df4c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
@@ -36,9 +36,6 @@ struct exynos_hdmi_ops {
36 int (*power_on)(void *ctx, int mode); 36 int (*power_on)(void *ctx, int mode);
37 37
38 /* manager */ 38 /* manager */
39 void (*mode_fixup)(void *ctx, struct drm_connector *connector,
40 const struct drm_display_mode *mode,
41 struct drm_display_mode *adjusted_mode);
42 void (*mode_set)(void *ctx, void *mode); 39 void (*mode_set)(void *ctx, void *mode);
43 void (*get_max_resol)(void *ctx, unsigned int *width, 40 void (*get_max_resol)(void *ctx, unsigned int *width,
44 unsigned int *height); 41 unsigned int *height);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
index 1adce07ecb5b..29d2ad314490 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -47,6 +47,9 @@
47#define get_ipp_context(dev) platform_get_drvdata(to_platform_device(dev)) 47#define get_ipp_context(dev) platform_get_drvdata(to_platform_device(dev))
48#define ipp_is_m2m_cmd(c) (c == IPP_CMD_M2M) 48#define ipp_is_m2m_cmd(c) (c == IPP_CMD_M2M)
49 49
50/* platform device pointer for ipp device. */
51static struct platform_device *exynos_drm_ipp_pdev;
52
50/* 53/*
51 * A structure of event. 54 * A structure of event.
52 * 55 *
@@ -102,6 +105,30 @@ static LIST_HEAD(exynos_drm_ippdrv_list);
102static DEFINE_MUTEX(exynos_drm_ippdrv_lock); 105static DEFINE_MUTEX(exynos_drm_ippdrv_lock);
103static BLOCKING_NOTIFIER_HEAD(exynos_drm_ippnb_list); 106static BLOCKING_NOTIFIER_HEAD(exynos_drm_ippnb_list);
104 107
108int exynos_platform_device_ipp_register(void)
109{
110 struct platform_device *pdev;
111
112 if (exynos_drm_ipp_pdev)
113 return -EEXIST;
114
115 pdev = platform_device_register_simple("exynos-drm-ipp", -1, NULL, 0);
116 if (IS_ERR(pdev))
117 return PTR_ERR(pdev);
118
119 exynos_drm_ipp_pdev = pdev;
120
121 return 0;
122}
123
124void exynos_platform_device_ipp_unregister(void)
125{
126 if (exynos_drm_ipp_pdev) {
127 platform_device_unregister(exynos_drm_ipp_pdev);
128 exynos_drm_ipp_pdev = NULL;
129 }
130}
131
105int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv) 132int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv)
106{ 133{
107 DRM_DEBUG_KMS("%s\n", __func__); 134 DRM_DEBUG_KMS("%s\n", __func__);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
index a40b9fb60240..947f09f15ad1 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -674,7 +674,7 @@ static int rotator_probe(struct platform_device *pdev)
674 } 674 }
675 675
676 rot->clock = devm_clk_get(dev, "rotator"); 676 rot->clock = devm_clk_get(dev, "rotator");
677 if (IS_ERR_OR_NULL(rot->clock)) { 677 if (IS_ERR(rot->clock)) {
678 dev_err(dev, "failed to get clock\n"); 678 dev_err(dev, "failed to get clock\n");
679 ret = PTR_ERR(rot->clock); 679 ret = PTR_ERR(rot->clock);
680 goto err_clk_get; 680 goto err_clk_get;
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 2c5f266154ad..6652597586a1 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -108,7 +108,20 @@ struct hdmi_tg_regs {
108 u8 tg_3d[1]; 108 u8 tg_3d[1];
109}; 109};
110 110
111struct hdmi_core_regs { 111struct hdmi_v13_core_regs {
112 u8 h_blank[2];
113 u8 v_blank[3];
114 u8 h_v_line[3];
115 u8 vsync_pol[1];
116 u8 int_pro_mode[1];
117 u8 v_blank_f[3];
118 u8 h_sync_gen[3];
119 u8 v_sync_gen1[3];
120 u8 v_sync_gen2[3];
121 u8 v_sync_gen3[3];
122};
123
124struct hdmi_v14_core_regs {
112 u8 h_blank[2]; 125 u8 h_blank[2];
113 u8 v2_blank[2]; 126 u8 v2_blank[2];
114 u8 v1_blank[2]; 127 u8 v1_blank[2];
@@ -147,11 +160,23 @@ struct hdmi_core_regs {
147 u8 vact_space_6[2]; 160 u8 vact_space_6[2];
148}; 161};
149 162
163struct hdmi_v13_conf {
164 struct hdmi_v13_core_regs core;
165 struct hdmi_tg_regs tg;
166};
167
150struct hdmi_v14_conf { 168struct hdmi_v14_conf {
151 int pixel_clock; 169 struct hdmi_v14_core_regs core;
152 struct hdmi_core_regs core;
153 struct hdmi_tg_regs tg; 170 struct hdmi_tg_regs tg;
171};
172
173struct hdmi_conf_regs {
174 int pixel_clock;
154 int cea_video_id; 175 int cea_video_id;
176 union {
177 struct hdmi_v13_conf v13_conf;
178 struct hdmi_v14_conf v14_conf;
179 } conf;
155}; 180};
156 181
157struct hdmi_context { 182struct hdmi_context {
@@ -169,9 +194,8 @@ struct hdmi_context {
169 struct i2c_client *ddc_port; 194 struct i2c_client *ddc_port;
170 struct i2c_client *hdmiphy_port; 195 struct i2c_client *hdmiphy_port;
171 196
172 /* current hdmiphy conf index */ 197 /* current hdmiphy conf regs */
173 int cur_conf; 198 struct hdmi_conf_regs mode_conf;
174 struct hdmi_v14_conf mode_conf;
175 199
176 struct hdmi_resources res; 200 struct hdmi_resources res;
177 201
@@ -180,292 +204,60 @@ struct hdmi_context {
180 enum hdmi_type type; 204 enum hdmi_type type;
181}; 205};
182 206
183/* HDMI Version 1.3 */ 207struct hdmiphy_config {
184static const u8 hdmiphy_v13_conf27[32] = { 208 int pixel_clock;
185 0x01, 0x05, 0x00, 0xD8, 0x10, 0x1C, 0x30, 0x40, 209 u8 conf[32];
186 0x6B, 0x10, 0x02, 0x51, 0xDF, 0xF2, 0x54, 0x87,
187 0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
188 0x22, 0x40, 0xE3, 0x26, 0x00, 0x00, 0x00, 0x00,
189};
190
191static const u8 hdmiphy_v13_conf27_027[32] = {
192 0x01, 0x05, 0x00, 0xD4, 0x10, 0x9C, 0x09, 0x64,
193 0x6B, 0x10, 0x02, 0x51, 0xDF, 0xF2, 0x54, 0x87,
194 0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
195 0x22, 0x40, 0xE3, 0x26, 0x00, 0x00, 0x00, 0x00,
196};
197
198static const u8 hdmiphy_v13_conf74_175[32] = {
199 0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xef, 0x5B,
200 0x6D, 0x10, 0x01, 0x51, 0xef, 0xF3, 0x54, 0xb9,
201 0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
202 0x22, 0x40, 0xa5, 0x26, 0x01, 0x00, 0x00, 0x00,
203};
204
205static const u8 hdmiphy_v13_conf74_25[32] = {
206 0x01, 0x05, 0x00, 0xd8, 0x10, 0x9c, 0xf8, 0x40,
207 0x6a, 0x10, 0x01, 0x51, 0xff, 0xf1, 0x54, 0xba,
208 0x84, 0x00, 0x10, 0x38, 0x00, 0x08, 0x10, 0xe0,
209 0x22, 0x40, 0xa4, 0x26, 0x01, 0x00, 0x00, 0x00,
210};
211
212static const u8 hdmiphy_v13_conf148_5[32] = {
213 0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xf8, 0x40,
214 0x6A, 0x18, 0x00, 0x51, 0xff, 0xF1, 0x54, 0xba,
215 0x84, 0x00, 0x10, 0x38, 0x00, 0x08, 0x10, 0xE0,
216 0x22, 0x40, 0xa4, 0x26, 0x02, 0x00, 0x00, 0x00,
217};
218
219struct hdmi_v13_tg_regs {
220 u8 cmd;
221 u8 h_fsz_l;
222 u8 h_fsz_h;
223 u8 hact_st_l;
224 u8 hact_st_h;
225 u8 hact_sz_l;
226 u8 hact_sz_h;
227 u8 v_fsz_l;
228 u8 v_fsz_h;
229 u8 vsync_l;
230 u8 vsync_h;
231 u8 vsync2_l;
232 u8 vsync2_h;
233 u8 vact_st_l;
234 u8 vact_st_h;
235 u8 vact_sz_l;
236 u8 vact_sz_h;
237 u8 field_chg_l;
238 u8 field_chg_h;
239 u8 vact_st2_l;
240 u8 vact_st2_h;
241 u8 vsync_top_hdmi_l;
242 u8 vsync_top_hdmi_h;
243 u8 vsync_bot_hdmi_l;
244 u8 vsync_bot_hdmi_h;
245 u8 field_top_hdmi_l;
246 u8 field_top_hdmi_h;
247 u8 field_bot_hdmi_l;
248 u8 field_bot_hdmi_h;
249};
250
251struct hdmi_v13_core_regs {
252 u8 h_blank[2];
253 u8 v_blank[3];
254 u8 h_v_line[3];
255 u8 vsync_pol[1];
256 u8 int_pro_mode[1];
257 u8 v_blank_f[3];
258 u8 h_sync_gen[3];
259 u8 v_sync_gen1[3];
260 u8 v_sync_gen2[3];
261 u8 v_sync_gen3[3];
262};
263
264struct hdmi_v13_preset_conf {
265 struct hdmi_v13_core_regs core;
266 struct hdmi_v13_tg_regs tg;
267};
268
269struct hdmi_v13_conf {
270 int width;
271 int height;
272 int vrefresh;
273 bool interlace;
274 int cea_video_id;
275 const u8 *hdmiphy_data;
276 const struct hdmi_v13_preset_conf *conf;
277};
278
279static const struct hdmi_v13_preset_conf hdmi_v13_conf_480p = {
280 .core = {
281 .h_blank = {0x8a, 0x00},
282 .v_blank = {0x0d, 0x6a, 0x01},
283 .h_v_line = {0x0d, 0xa2, 0x35},
284 .vsync_pol = {0x01},
285 .int_pro_mode = {0x00},
286 .v_blank_f = {0x00, 0x00, 0x00},
287 .h_sync_gen = {0x0e, 0x30, 0x11},
288 .v_sync_gen1 = {0x0f, 0x90, 0x00},
289 /* other don't care */
290 },
291 .tg = {
292 0x00, /* cmd */
293 0x5a, 0x03, /* h_fsz */
294 0x8a, 0x00, 0xd0, 0x02, /* hact */
295 0x0d, 0x02, /* v_fsz */
296 0x01, 0x00, 0x33, 0x02, /* vsync */
297 0x2d, 0x00, 0xe0, 0x01, /* vact */
298 0x33, 0x02, /* field_chg */
299 0x49, 0x02, /* vact_st2 */
300 0x01, 0x00, 0x33, 0x02, /* vsync top/bot */
301 0x01, 0x00, 0x33, 0x02, /* field top/bot */
302 },
303};
304
305static const struct hdmi_v13_preset_conf hdmi_v13_conf_720p60 = {
306 .core = {
307 .h_blank = {0x72, 0x01},
308 .v_blank = {0xee, 0xf2, 0x00},
309 .h_v_line = {0xee, 0x22, 0x67},
310 .vsync_pol = {0x00},
311 .int_pro_mode = {0x00},
312 .v_blank_f = {0x00, 0x00, 0x00}, /* don't care */
313 .h_sync_gen = {0x6c, 0x50, 0x02},
314 .v_sync_gen1 = {0x0a, 0x50, 0x00},
315 .v_sync_gen2 = {0x01, 0x10, 0x00},
316 .v_sync_gen3 = {0x01, 0x10, 0x00},
317 /* other don't care */
318 },
319 .tg = {
320 0x00, /* cmd */
321 0x72, 0x06, /* h_fsz */
322 0x71, 0x01, 0x01, 0x05, /* hact */
323 0xee, 0x02, /* v_fsz */
324 0x01, 0x00, 0x33, 0x02, /* vsync */
325 0x1e, 0x00, 0xd0, 0x02, /* vact */
326 0x33, 0x02, /* field_chg */
327 0x49, 0x02, /* vact_st2 */
328 0x01, 0x00, 0x01, 0x00, /* vsync top/bot */
329 0x01, 0x00, 0x33, 0x02, /* field top/bot */
330 },
331};
332
333static const struct hdmi_v13_preset_conf hdmi_v13_conf_1080i50 = {
334 .core = {
335 .h_blank = {0xd0, 0x02},
336 .v_blank = {0x32, 0xB2, 0x00},
337 .h_v_line = {0x65, 0x04, 0xa5},
338 .vsync_pol = {0x00},
339 .int_pro_mode = {0x01},
340 .v_blank_f = {0x49, 0x2A, 0x23},
341 .h_sync_gen = {0x0E, 0xEA, 0x08},
342 .v_sync_gen1 = {0x07, 0x20, 0x00},
343 .v_sync_gen2 = {0x39, 0x42, 0x23},
344 .v_sync_gen3 = {0x38, 0x87, 0x73},
345 /* other don't care */
346 },
347 .tg = {
348 0x00, /* cmd */
349 0x50, 0x0A, /* h_fsz */
350 0xCF, 0x02, 0x81, 0x07, /* hact */
351 0x65, 0x04, /* v_fsz */
352 0x01, 0x00, 0x33, 0x02, /* vsync */
353 0x16, 0x00, 0x1c, 0x02, /* vact */
354 0x33, 0x02, /* field_chg */
355 0x49, 0x02, /* vact_st2 */
356 0x01, 0x00, 0x33, 0x02, /* vsync top/bot */
357 0x01, 0x00, 0x33, 0x02, /* field top/bot */
358 },
359}; 210};
360 211
361static const struct hdmi_v13_preset_conf hdmi_v13_conf_1080p50 = { 212/* list of phy config settings */
362 .core = { 213static const struct hdmiphy_config hdmiphy_v13_configs[] = {
363 .h_blank = {0xd0, 0x02}, 214 {
364 .v_blank = {0x65, 0x6c, 0x01}, 215 .pixel_clock = 27000000,
365 .h_v_line = {0x65, 0x04, 0xa5}, 216 .conf = {
366 .vsync_pol = {0x00}, 217 0x01, 0x05, 0x00, 0xD8, 0x10, 0x1C, 0x30, 0x40,
367 .int_pro_mode = {0x00}, 218 0x6B, 0x10, 0x02, 0x51, 0xDF, 0xF2, 0x54, 0x87,
368 .v_blank_f = {0x00, 0x00, 0x00}, /* don't care */ 219 0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
369 .h_sync_gen = {0x0e, 0xea, 0x08}, 220 0x22, 0x40, 0xE3, 0x26, 0x00, 0x00, 0x00, 0x00,
370 .v_sync_gen1 = {0x09, 0x40, 0x00}, 221 },
371 .v_sync_gen2 = {0x01, 0x10, 0x00},
372 .v_sync_gen3 = {0x01, 0x10, 0x00},
373 /* other don't care */
374 },
375 .tg = {
376 0x00, /* cmd */
377 0x50, 0x0A, /* h_fsz */
378 0xCF, 0x02, 0x81, 0x07, /* hact */
379 0x65, 0x04, /* v_fsz */
380 0x01, 0x00, 0x33, 0x02, /* vsync */
381 0x2d, 0x00, 0x38, 0x04, /* vact */
382 0x33, 0x02, /* field_chg */
383 0x48, 0x02, /* vact_st2 */
384 0x01, 0x00, 0x01, 0x00, /* vsync top/bot */
385 0x01, 0x00, 0x33, 0x02, /* field top/bot */
386 }, 222 },
387}; 223 {
388 224 .pixel_clock = 27027000,
389static const struct hdmi_v13_preset_conf hdmi_v13_conf_1080i60 = { 225 .conf = {
390 .core = { 226 0x01, 0x05, 0x00, 0xD4, 0x10, 0x9C, 0x09, 0x64,
391 .h_blank = {0x18, 0x01}, 227 0x6B, 0x10, 0x02, 0x51, 0xDF, 0xF2, 0x54, 0x87,
392 .v_blank = {0x32, 0xB2, 0x00}, 228 0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
393 .h_v_line = {0x65, 0x84, 0x89}, 229 0x22, 0x40, 0xE3, 0x26, 0x00, 0x00, 0x00, 0x00,
394 .vsync_pol = {0x00}, 230 },
395 .int_pro_mode = {0x01},
396 .v_blank_f = {0x49, 0x2A, 0x23},
397 .h_sync_gen = {0x56, 0x08, 0x02},
398 .v_sync_gen1 = {0x07, 0x20, 0x00},
399 .v_sync_gen2 = {0x39, 0x42, 0x23},
400 .v_sync_gen3 = {0xa4, 0x44, 0x4a},
401 /* other don't care */
402 }, 231 },
403 .tg = { 232 {
404 0x00, /* cmd */ 233 .pixel_clock = 74176000,
405 0x98, 0x08, /* h_fsz */ 234 .conf = {
406 0x17, 0x01, 0x81, 0x07, /* hact */ 235 0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xef, 0x5B,
407 0x65, 0x04, /* v_fsz */ 236 0x6D, 0x10, 0x01, 0x51, 0xef, 0xF3, 0x54, 0xb9,
408 0x01, 0x00, 0x33, 0x02, /* vsync */ 237 0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
409 0x16, 0x00, 0x1c, 0x02, /* vact */ 238 0x22, 0x40, 0xa5, 0x26, 0x01, 0x00, 0x00, 0x00,
410 0x33, 0x02, /* field_chg */ 239 },
411 0x49, 0x02, /* vact_st2 */
412 0x01, 0x00, 0x33, 0x02, /* vsync top/bot */
413 0x01, 0x00, 0x33, 0x02, /* field top/bot */
414 }, 240 },
415}; 241 {
416 242 .pixel_clock = 74250000,
417static const struct hdmi_v13_preset_conf hdmi_v13_conf_1080p60 = { 243 .conf = {
418 .core = { 244 0x01, 0x05, 0x00, 0xd8, 0x10, 0x9c, 0xf8, 0x40,
419 .h_blank = {0x18, 0x01}, 245 0x6a, 0x10, 0x01, 0x51, 0xff, 0xf1, 0x54, 0xba,
420 .v_blank = {0x65, 0x6c, 0x01}, 246 0x84, 0x00, 0x10, 0x38, 0x00, 0x08, 0x10, 0xe0,
421 .h_v_line = {0x65, 0x84, 0x89}, 247 0x22, 0x40, 0xa4, 0x26, 0x01, 0x00, 0x00, 0x00,
422 .vsync_pol = {0x00}, 248 },
423 .int_pro_mode = {0x00},
424 .v_blank_f = {0x00, 0x00, 0x00}, /* don't care */
425 .h_sync_gen = {0x56, 0x08, 0x02},
426 .v_sync_gen1 = {0x09, 0x40, 0x00},
427 .v_sync_gen2 = {0x01, 0x10, 0x00},
428 .v_sync_gen3 = {0x01, 0x10, 0x00},
429 /* other don't care */
430 }, 249 },
431 .tg = { 250 {
432 0x00, /* cmd */ 251 .pixel_clock = 148500000,
433 0x98, 0x08, /* h_fsz */ 252 .conf = {
434 0x17, 0x01, 0x81, 0x07, /* hact */ 253 0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xf8, 0x40,
435 0x65, 0x04, /* v_fsz */ 254 0x6A, 0x18, 0x00, 0x51, 0xff, 0xF1, 0x54, 0xba,
436 0x01, 0x00, 0x33, 0x02, /* vsync */ 255 0x84, 0x00, 0x10, 0x38, 0x00, 0x08, 0x10, 0xE0,
437 0x2d, 0x00, 0x38, 0x04, /* vact */ 256 0x22, 0x40, 0xa4, 0x26, 0x02, 0x00, 0x00, 0x00,
438 0x33, 0x02, /* field_chg */ 257 },
439 0x48, 0x02, /* vact_st2 */
440 0x01, 0x00, 0x01, 0x00, /* vsync top/bot */
441 0x01, 0x00, 0x33, 0x02, /* field top/bot */
442 }, 258 },
443}; 259};
444 260
445static const struct hdmi_v13_conf hdmi_v13_confs[] = {
446 { 1280, 720, 60, false, 4, hdmiphy_v13_conf74_25,
447 &hdmi_v13_conf_720p60 },
448 { 1280, 720, 50, false, 19, hdmiphy_v13_conf74_25,
449 &hdmi_v13_conf_720p60 },
450 { 720, 480, 60, false, 3, hdmiphy_v13_conf27_027,
451 &hdmi_v13_conf_480p },
452 { 1920, 1080, 50, true, 20, hdmiphy_v13_conf74_25,
453 &hdmi_v13_conf_1080i50 },
454 { 1920, 1080, 50, false, 31, hdmiphy_v13_conf148_5,
455 &hdmi_v13_conf_1080p50 },
456 { 1920, 1080, 60, true, 5, hdmiphy_v13_conf74_25,
457 &hdmi_v13_conf_1080i60 },
458 { 1920, 1080, 60, false, 16, hdmiphy_v13_conf148_5,
459 &hdmi_v13_conf_1080p60 },
460};
461
462/* HDMI Version 1.4 */
463struct hdmiphy_config {
464 int pixel_clock;
465 u8 conf[32];
466};
467
468/* list of all required phy config settings */
469static const struct hdmiphy_config hdmiphy_v14_configs[] = { 261static const struct hdmiphy_config hdmiphy_v14_configs[] = {
470 { 262 {
471 .pixel_clock = 25200000, 263 .pixel_clock = 25200000,
@@ -873,22 +665,6 @@ static void hdmi_regs_dump(struct hdmi_context *hdata, char *prefix)
873 hdmi_v14_regs_dump(hdata, prefix); 665 hdmi_v14_regs_dump(hdata, prefix);
874} 666}
875 667
876static int hdmi_v13_conf_index(struct drm_display_mode *mode)
877{
878 int i;
879
880 for (i = 0; i < ARRAY_SIZE(hdmi_v13_confs); ++i)
881 if (hdmi_v13_confs[i].width == mode->hdisplay &&
882 hdmi_v13_confs[i].height == mode->vdisplay &&
883 hdmi_v13_confs[i].vrefresh == mode->vrefresh &&
884 hdmi_v13_confs[i].interlace ==
885 ((mode->flags & DRM_MODE_FLAG_INTERLACE) ?
886 true : false))
887 return i;
888
889 return -EINVAL;
890}
891
892static u8 hdmi_chksum(struct hdmi_context *hdata, 668static u8 hdmi_chksum(struct hdmi_context *hdata,
893 u32 start, u8 len, u32 hdr_sum) 669 u32 start, u8 len, u32 hdr_sum)
894{ 670{
@@ -943,11 +719,7 @@ static void hdmi_reg_infoframe(struct hdmi_context *hdata,
943 hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(2), aspect_ratio | 719 hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(2), aspect_ratio |
944 AVI_SAME_AS_PIC_ASPECT_RATIO); 720 AVI_SAME_AS_PIC_ASPECT_RATIO);
945 721
946 if (hdata->type == HDMI_TYPE13) 722 vic = hdata->mode_conf.cea_video_id;
947 vic = hdmi_v13_confs[hdata->cur_conf].cea_video_id;
948 else
949 vic = hdata->mode_conf.cea_video_id;
950
951 hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(4), vic); 723 hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(4), vic);
952 724
953 chksum = hdmi_chksum(hdata, HDMI_AVI_BYTE(1), 725 chksum = hdmi_chksum(hdata, HDMI_AVI_BYTE(1),
@@ -1000,63 +772,34 @@ static struct edid *hdmi_get_edid(void *ctx, struct drm_connector *connector)
1000 return raw_edid; 772 return raw_edid;
1001} 773}
1002 774
1003static int hdmi_v13_check_timing(struct fb_videomode *check_timing) 775static int hdmi_find_phy_conf(struct hdmi_context *hdata, u32 pixel_clock)
1004{ 776{
1005 int i; 777 const struct hdmiphy_config *confs;
1006 778 int count, i;
1007 DRM_DEBUG_KMS("valid mode : xres=%d, yres=%d, refresh=%d, intl=%d\n",
1008 check_timing->xres, check_timing->yres,
1009 check_timing->refresh, (check_timing->vmode &
1010 FB_VMODE_INTERLACED) ? true : false);
1011
1012 for (i = 0; i < ARRAY_SIZE(hdmi_v13_confs); ++i)
1013 if (hdmi_v13_confs[i].width == check_timing->xres &&
1014 hdmi_v13_confs[i].height == check_timing->yres &&
1015 hdmi_v13_confs[i].vrefresh == check_timing->refresh &&
1016 hdmi_v13_confs[i].interlace ==
1017 ((check_timing->vmode & FB_VMODE_INTERLACED) ?
1018 true : false))
1019 return 0;
1020 779
1021 /* TODO */ 780 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
1022
1023 return -EINVAL;
1024}
1025 781
1026static int hdmi_v14_find_phy_conf(int pixel_clock) 782 if (hdata->type == HDMI_TYPE13) {
1027{ 783 confs = hdmiphy_v13_configs;
1028 int i; 784 count = ARRAY_SIZE(hdmiphy_v13_configs);
785 } else if (hdata->type == HDMI_TYPE14) {
786 confs = hdmiphy_v14_configs;
787 count = ARRAY_SIZE(hdmiphy_v14_configs);
788 } else
789 return -EINVAL;
1029 790
1030 for (i = 0; i < ARRAY_SIZE(hdmiphy_v14_configs); i++) { 791 for (i = 0; i < count; i++)
1031 if (hdmiphy_v14_configs[i].pixel_clock == pixel_clock) 792 if (confs[i].pixel_clock == pixel_clock)
1032 return i; 793 return i;
1033 }
1034 794
1035 DRM_DEBUG_KMS("Could not find phy config for %d\n", pixel_clock); 795 DRM_DEBUG_KMS("Could not find phy config for %d\n", pixel_clock);
1036 return -EINVAL; 796 return -EINVAL;
1037} 797}
1038 798
1039static int hdmi_v14_check_timing(struct fb_videomode *check_timing)
1040{
1041 int i;
1042
1043 DRM_DEBUG_KMS("mode: xres=%d, yres=%d, refresh=%d, clock=%d, intl=%d\n",
1044 check_timing->xres, check_timing->yres,
1045 check_timing->refresh, check_timing->pixclock,
1046 (check_timing->vmode & FB_VMODE_INTERLACED) ?
1047 true : false);
1048
1049 for (i = 0; i < ARRAY_SIZE(hdmiphy_v14_configs); i++)
1050 if (hdmiphy_v14_configs[i].pixel_clock ==
1051 check_timing->pixclock)
1052 return 0;
1053
1054 return -EINVAL;
1055}
1056
1057static int hdmi_check_timing(void *ctx, struct fb_videomode *timing) 799static int hdmi_check_timing(void *ctx, struct fb_videomode *timing)
1058{ 800{
1059 struct hdmi_context *hdata = ctx; 801 struct hdmi_context *hdata = ctx;
802 int ret;
1060 803
1061 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); 804 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
1062 805
@@ -1064,10 +807,10 @@ static int hdmi_check_timing(void *ctx, struct fb_videomode *timing)
1064 timing->yres, timing->refresh, 807 timing->yres, timing->refresh,
1065 timing->vmode); 808 timing->vmode);
1066 809
1067 if (hdata->type == HDMI_TYPE13) 810 ret = hdmi_find_phy_conf(hdata, timing->pixclock);
1068 return hdmi_v13_check_timing(timing); 811 if (ret < 0)
1069 else 812 return ret;
1070 return hdmi_v14_check_timing(timing); 813 return 0;
1071} 814}
1072 815
1073static void hdmi_set_acr(u32 freq, u8 *acr) 816static void hdmi_set_acr(u32 freq, u8 *acr)
@@ -1301,10 +1044,9 @@ static void hdmi_conf_init(struct hdmi_context *hdata)
1301 1044
1302static void hdmi_v13_timing_apply(struct hdmi_context *hdata) 1045static void hdmi_v13_timing_apply(struct hdmi_context *hdata)
1303{ 1046{
1304 const struct hdmi_v13_preset_conf *conf = 1047 const struct hdmi_tg_regs *tg = &hdata->mode_conf.conf.v13_conf.tg;
1305 hdmi_v13_confs[hdata->cur_conf].conf; 1048 const struct hdmi_v13_core_regs *core =
1306 const struct hdmi_v13_core_regs *core = &conf->core; 1049 &hdata->mode_conf.conf.v13_conf.core;
1307 const struct hdmi_v13_tg_regs *tg = &conf->tg;
1308 int tries; 1050 int tries;
1309 1051
1310 /* setting core registers */ 1052 /* setting core registers */
@@ -1334,34 +1076,34 @@ static void hdmi_v13_timing_apply(struct hdmi_context *hdata)
1334 hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_3_1, core->v_sync_gen3[1]); 1076 hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_3_1, core->v_sync_gen3[1]);
1335 hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_3_2, core->v_sync_gen3[2]); 1077 hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_3_2, core->v_sync_gen3[2]);
1336 /* Timing generator registers */ 1078 /* Timing generator registers */
1337 hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_L, tg->h_fsz_l); 1079 hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_L, tg->h_fsz[0]);
1338 hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_H, tg->h_fsz_h); 1080 hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_H, tg->h_fsz[1]);
1339 hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_L, tg->hact_st_l); 1081 hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_L, tg->hact_st[0]);
1340 hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_H, tg->hact_st_h); 1082 hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_H, tg->hact_st[1]);
1341 hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_L, tg->hact_sz_l); 1083 hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_L, tg->hact_sz[0]);
1342 hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_H, tg->hact_sz_h); 1084 hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_H, tg->hact_sz[1]);
1343 hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_L, tg->v_fsz_l); 1085 hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_L, tg->v_fsz[0]);
1344 hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_H, tg->v_fsz_h); 1086 hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_H, tg->v_fsz[1]);
1345 hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_L, tg->vsync_l); 1087 hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_L, tg->vsync[0]);
1346 hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_H, tg->vsync_h); 1088 hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_H, tg->vsync[1]);
1347 hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_L, tg->vsync2_l); 1089 hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_L, tg->vsync2[0]);
1348 hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_H, tg->vsync2_h); 1090 hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_H, tg->vsync2[1]);
1349 hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_L, tg->vact_st_l); 1091 hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_L, tg->vact_st[0]);
1350 hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_H, tg->vact_st_h); 1092 hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_H, tg->vact_st[1]);
1351 hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_L, tg->vact_sz_l); 1093 hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_L, tg->vact_sz[0]);
1352 hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_H, tg->vact_sz_h); 1094 hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_H, tg->vact_sz[1]);
1353 hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_L, tg->field_chg_l); 1095 hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_L, tg->field_chg[0]);
1354 hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_H, tg->field_chg_h); 1096 hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_H, tg->field_chg[1]);
1355 hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_L, tg->vact_st2_l); 1097 hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_L, tg->vact_st2[0]);
1356 hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_H, tg->vact_st2_h); 1098 hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_H, tg->vact_st2[1]);
1357 hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_L, tg->vsync_top_hdmi_l); 1099 hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_L, tg->vsync_top_hdmi[0]);
1358 hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_H, tg->vsync_top_hdmi_h); 1100 hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_H, tg->vsync_top_hdmi[1]);
1359 hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_L, tg->vsync_bot_hdmi_l); 1101 hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_L, tg->vsync_bot_hdmi[0]);
1360 hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_H, tg->vsync_bot_hdmi_h); 1102 hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_H, tg->vsync_bot_hdmi[1]);
1361 hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_L, tg->field_top_hdmi_l); 1103 hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_L, tg->field_top_hdmi[0]);
1362 hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_H, tg->field_top_hdmi_h); 1104 hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_H, tg->field_top_hdmi[1]);
1363 hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_L, tg->field_bot_hdmi_l); 1105 hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_L, tg->field_bot_hdmi[0]);
1364 hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_H, tg->field_bot_hdmi_h); 1106 hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_H, tg->field_bot_hdmi[1]);
1365 1107
1366 /* waiting for HDMIPHY's PLL to get to steady state */ 1108 /* waiting for HDMIPHY's PLL to get to steady state */
1367 for (tries = 100; tries; --tries) { 1109 for (tries = 100; tries; --tries) {
@@ -1391,8 +1133,9 @@ static void hdmi_v13_timing_apply(struct hdmi_context *hdata)
1391 1133
1392static void hdmi_v14_timing_apply(struct hdmi_context *hdata) 1134static void hdmi_v14_timing_apply(struct hdmi_context *hdata)
1393{ 1135{
1394 struct hdmi_core_regs *core = &hdata->mode_conf.core; 1136 const struct hdmi_tg_regs *tg = &hdata->mode_conf.conf.v14_conf.tg;
1395 struct hdmi_tg_regs *tg = &hdata->mode_conf.tg; 1137 const struct hdmi_v14_core_regs *core =
1138 &hdata->mode_conf.conf.v14_conf.core;
1396 int tries; 1139 int tries;
1397 1140
1398 /* setting core registers */ 1141 /* setting core registers */
@@ -1624,17 +1367,16 @@ static void hdmiphy_conf_apply(struct hdmi_context *hdata)
1624 } 1367 }
1625 1368
1626 /* pixel clock */ 1369 /* pixel clock */
1627 if (hdata->type == HDMI_TYPE13) { 1370 i = hdmi_find_phy_conf(hdata, hdata->mode_conf.pixel_clock);
1628 hdmiphy_data = hdmi_v13_confs[hdata->cur_conf].hdmiphy_data; 1371 if (i < 0) {
1629 } else { 1372 DRM_ERROR("failed to find hdmiphy conf\n");
1630 i = hdmi_v14_find_phy_conf(hdata->mode_conf.pixel_clock); 1373 return;
1631 if (i < 0) { 1374 }
1632 DRM_ERROR("failed to find hdmiphy conf\n");
1633 return;
1634 }
1635 1375
1376 if (hdata->type == HDMI_TYPE13)
1377 hdmiphy_data = hdmiphy_v13_configs[i].conf;
1378 else
1636 hdmiphy_data = hdmiphy_v14_configs[i].conf; 1379 hdmiphy_data = hdmiphy_v14_configs[i].conf;
1637 }
1638 1380
1639 memcpy(buffer, hdmiphy_data, 32); 1381 memcpy(buffer, hdmiphy_data, 32);
1640 ret = i2c_master_send(hdata->hdmiphy_port, buffer, 32); 1382 ret = i2c_master_send(hdata->hdmiphy_port, buffer, 32);
@@ -1687,75 +1429,121 @@ static void hdmi_conf_apply(struct hdmi_context *hdata)
1687 hdmi_regs_dump(hdata, "start"); 1429 hdmi_regs_dump(hdata, "start");
1688} 1430}
1689 1431
1690static void hdmi_mode_fixup(void *ctx, struct drm_connector *connector, 1432static void hdmi_set_reg(u8 *reg_pair, int num_bytes, u32 value)
1691 const struct drm_display_mode *mode,
1692 struct drm_display_mode *adjusted_mode)
1693{ 1433{
1694 struct drm_display_mode *m; 1434 int i;
1695 struct hdmi_context *hdata = ctx; 1435 BUG_ON(num_bytes > 4);
1696 int index; 1436 for (i = 0; i < num_bytes; i++)
1437 reg_pair[i] = (value >> (8 * i)) & 0xff;
1438}
1697 1439
1698 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); 1440static void hdmi_v13_mode_set(struct hdmi_context *hdata,
1441 struct drm_display_mode *m)
1442{
1443 struct hdmi_v13_core_regs *core = &hdata->mode_conf.conf.v13_conf.core;
1444 struct hdmi_tg_regs *tg = &hdata->mode_conf.conf.v13_conf.tg;
1445 unsigned int val;
1699 1446
1700 drm_mode_set_crtcinfo(adjusted_mode, 0); 1447 hdata->mode_conf.cea_video_id =
1448 drm_match_cea_mode((struct drm_display_mode *)m);
1449 hdata->mode_conf.pixel_clock = m->clock * 1000;
1701 1450
1702 if (hdata->type == HDMI_TYPE13) 1451 hdmi_set_reg(core->h_blank, 2, m->htotal - m->hdisplay);
1703 index = hdmi_v13_conf_index(adjusted_mode); 1452 hdmi_set_reg(core->h_v_line, 3, (m->htotal << 12) | m->vtotal);
1704 else
1705 index = hdmi_v14_find_phy_conf(adjusted_mode->clock * 1000);
1706 1453
1707 /* just return if user desired mode exists. */ 1454 val = (m->flags & DRM_MODE_FLAG_NVSYNC) ? 1 : 0;
1708 if (index >= 0) 1455 hdmi_set_reg(core->vsync_pol, 1, val);
1709 return; 1456
1457 val = (m->flags & DRM_MODE_FLAG_INTERLACE) ? 1 : 0;
1458 hdmi_set_reg(core->int_pro_mode, 1, val);
1459
1460 val = (m->hsync_start - m->hdisplay - 2);
1461 val |= ((m->hsync_end - m->hdisplay - 2) << 10);
1462 val |= ((m->flags & DRM_MODE_FLAG_NHSYNC) ? 1 : 0)<<20;
1463 hdmi_set_reg(core->h_sync_gen, 3, val);
1710 1464
1711 /* 1465 /*
1712 * otherwise, find the most suitable mode among modes and change it 1466 * Quirk requirement for exynos HDMI IP design,
1713 * to adjusted_mode. 1467 * 2 pixels less than the actual calculation for hsync_start
1468 * and end.
1714 */ 1469 */
1715 list_for_each_entry(m, &connector->modes, head) { 1470
1716 if (hdata->type == HDMI_TYPE13) 1471 /* Following values & calculations differ for different type of modes */
1717 index = hdmi_v13_conf_index(m); 1472 if (m->flags & DRM_MODE_FLAG_INTERLACE) {
1718 else 1473 /* Interlaced Mode */
1719 index = hdmi_v14_find_phy_conf(m->clock * 1000); 1474 val = ((m->vsync_end - m->vdisplay) / 2);
1720 1475 val |= ((m->vsync_start - m->vdisplay) / 2) << 12;
1721 if (index >= 0) { 1476 hdmi_set_reg(core->v_sync_gen1, 3, val);
1722 struct drm_mode_object base; 1477
1723 struct list_head head; 1478 val = m->vtotal / 2;
1724 1479 val |= ((m->vtotal - m->vdisplay) / 2) << 11;
1725 DRM_INFO("desired mode doesn't exist so\n"); 1480 hdmi_set_reg(core->v_blank, 3, val);
1726 DRM_INFO("use the most suitable mode among modes.\n"); 1481
1727 1482 val = (m->vtotal +
1728 DRM_DEBUG_KMS("Adjusted Mode: [%d]x[%d] [%d]Hz\n", 1483 ((m->vsync_end - m->vsync_start) * 4) + 5) / 2;
1729 m->hdisplay, m->vdisplay, m->vrefresh); 1484 val |= m->vtotal << 11;
1730 1485 hdmi_set_reg(core->v_blank_f, 3, val);
1731 /* preserve display mode header while copying. */ 1486
1732 head = adjusted_mode->head; 1487 val = ((m->vtotal / 2) + 7);
1733 base = adjusted_mode->base; 1488 val |= ((m->vtotal / 2) + 2) << 12;
1734 memcpy(adjusted_mode, m, sizeof(*m)); 1489 hdmi_set_reg(core->v_sync_gen2, 3, val);
1735 adjusted_mode->head = head; 1490
1736 adjusted_mode->base = base; 1491 val = ((m->htotal / 2) + (m->hsync_start - m->hdisplay));
1737 break; 1492 val |= ((m->htotal / 2) +
1738 } 1493 (m->hsync_start - m->hdisplay)) << 12;
1494 hdmi_set_reg(core->v_sync_gen3, 3, val);
1495
1496 hdmi_set_reg(tg->vact_st, 2, (m->vtotal - m->vdisplay) / 2);
1497 hdmi_set_reg(tg->vact_sz, 2, m->vdisplay / 2);
1498
1499 hdmi_set_reg(tg->vact_st2, 2, 0x249);/* Reset value + 1*/
1500 } else {
1501 /* Progressive Mode */
1502
1503 val = m->vtotal;
1504 val |= (m->vtotal - m->vdisplay) << 11;
1505 hdmi_set_reg(core->v_blank, 3, val);
1506
1507 hdmi_set_reg(core->v_blank_f, 3, 0);
1508
1509 val = (m->vsync_end - m->vdisplay);
1510 val |= ((m->vsync_start - m->vdisplay) << 12);
1511 hdmi_set_reg(core->v_sync_gen1, 3, val);
1512
1513 hdmi_set_reg(core->v_sync_gen2, 3, 0x1001);/* Reset value */
1514 hdmi_set_reg(core->v_sync_gen3, 3, 0x1001);/* Reset value */
1515 hdmi_set_reg(tg->vact_st, 2, m->vtotal - m->vdisplay);
1516 hdmi_set_reg(tg->vact_sz, 2, m->vdisplay);
1517 hdmi_set_reg(tg->vact_st2, 2, 0x248); /* Reset value */
1739 } 1518 }
1740}
1741 1519
1742static void hdmi_set_reg(u8 *reg_pair, int num_bytes, u32 value) 1520 /* Timing generator registers */
1743{ 1521 hdmi_set_reg(tg->cmd, 1, 0x0);
1744 int i; 1522 hdmi_set_reg(tg->h_fsz, 2, m->htotal);
1745 BUG_ON(num_bytes > 4); 1523 hdmi_set_reg(tg->hact_st, 2, m->htotal - m->hdisplay);
1746 for (i = 0; i < num_bytes; i++) 1524 hdmi_set_reg(tg->hact_sz, 2, m->hdisplay);
1747 reg_pair[i] = (value >> (8 * i)) & 0xff; 1525 hdmi_set_reg(tg->v_fsz, 2, m->vtotal);
1526 hdmi_set_reg(tg->vsync, 2, 0x1);
1527 hdmi_set_reg(tg->vsync2, 2, 0x233); /* Reset value */
1528 hdmi_set_reg(tg->field_chg, 2, 0x233); /* Reset value */
1529 hdmi_set_reg(tg->vsync_top_hdmi, 2, 0x1); /* Reset value */
1530 hdmi_set_reg(tg->vsync_bot_hdmi, 2, 0x233); /* Reset value */
1531 hdmi_set_reg(tg->field_top_hdmi, 2, 0x1); /* Reset value */
1532 hdmi_set_reg(tg->field_bot_hdmi, 2, 0x233); /* Reset value */
1533 hdmi_set_reg(tg->tg_3d, 1, 0x0); /* Not used */
1748} 1534}
1749 1535
1750static void hdmi_v14_mode_set(struct hdmi_context *hdata, 1536static void hdmi_v14_mode_set(struct hdmi_context *hdata,
1751 struct drm_display_mode *m) 1537 struct drm_display_mode *m)
1752{ 1538{
1753 struct hdmi_core_regs *core = &hdata->mode_conf.core; 1539 struct hdmi_tg_regs *tg = &hdata->mode_conf.conf.v14_conf.tg;
1754 struct hdmi_tg_regs *tg = &hdata->mode_conf.tg; 1540 struct hdmi_v14_core_regs *core =
1755 1541 &hdata->mode_conf.conf.v14_conf.core;
1756 hdata->mode_conf.cea_video_id = drm_match_cea_mode(m);
1757 1542
1543 hdata->mode_conf.cea_video_id =
1544 drm_match_cea_mode((struct drm_display_mode *)m);
1758 hdata->mode_conf.pixel_clock = m->clock * 1000; 1545 hdata->mode_conf.pixel_clock = m->clock * 1000;
1546
1759 hdmi_set_reg(core->h_blank, 2, m->htotal - m->hdisplay); 1547 hdmi_set_reg(core->h_blank, 2, m->htotal - m->hdisplay);
1760 hdmi_set_reg(core->v_line, 2, m->vtotal); 1548 hdmi_set_reg(core->v_line, 2, m->vtotal);
1761 hdmi_set_reg(core->h_line, 2, m->htotal); 1549 hdmi_set_reg(core->h_line, 2, m->htotal);
@@ -1852,25 +1640,22 @@ static void hdmi_v14_mode_set(struct hdmi_context *hdata,
1852 hdmi_set_reg(tg->field_top_hdmi, 2, 0x1); /* Reset value */ 1640 hdmi_set_reg(tg->field_top_hdmi, 2, 0x1); /* Reset value */
1853 hdmi_set_reg(tg->field_bot_hdmi, 2, 0x233); /* Reset value */ 1641 hdmi_set_reg(tg->field_bot_hdmi, 2, 0x233); /* Reset value */
1854 hdmi_set_reg(tg->tg_3d, 1, 0x0); 1642 hdmi_set_reg(tg->tg_3d, 1, 0x0);
1855
1856} 1643}
1857 1644
1858static void hdmi_mode_set(void *ctx, void *mode) 1645static void hdmi_mode_set(void *ctx, void *mode)
1859{ 1646{
1860 struct hdmi_context *hdata = ctx; 1647 struct hdmi_context *hdata = ctx;
1861 int conf_idx; 1648 struct drm_display_mode *m = mode;
1862 1649
1863 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); 1650 DRM_DEBUG_KMS("[%s]: xres=%d, yres=%d, refresh=%d, intl=%s\n",
1651 __func__, m->hdisplay, m->vdisplay,
1652 m->vrefresh, (m->flags & DRM_MODE_FLAG_INTERLACE) ?
1653 "INTERLACED" : "PROGERESSIVE");
1864 1654
1865 if (hdata->type == HDMI_TYPE13) { 1655 if (hdata->type == HDMI_TYPE13)
1866 conf_idx = hdmi_v13_conf_index(mode); 1656 hdmi_v13_mode_set(hdata, mode);
1867 if (conf_idx >= 0) 1657 else
1868 hdata->cur_conf = conf_idx;
1869 else
1870 DRM_DEBUG_KMS("not supported mode\n");
1871 } else {
1872 hdmi_v14_mode_set(hdata, mode); 1658 hdmi_v14_mode_set(hdata, mode);
1873 }
1874} 1659}
1875 1660
1876static void hdmi_get_max_resol(void *ctx, unsigned int *width, 1661static void hdmi_get_max_resol(void *ctx, unsigned int *width,
@@ -1983,7 +1768,6 @@ static struct exynos_hdmi_ops hdmi_ops = {
1983 .check_timing = hdmi_check_timing, 1768 .check_timing = hdmi_check_timing,
1984 1769
1985 /* manager */ 1770 /* manager */
1986 .mode_fixup = hdmi_mode_fixup,
1987 .mode_set = hdmi_mode_set, 1771 .mode_set = hdmi_mode_set,
1988 .get_max_resol = hdmi_get_max_resol, 1772 .get_max_resol = hdmi_get_max_resol,
1989 .commit = hdmi_commit, 1773 .commit = hdmi_commit,
@@ -2023,27 +1807,27 @@ static int hdmi_resources_init(struct hdmi_context *hdata)
2023 1807
2024 /* get clocks, power */ 1808 /* get clocks, power */
2025 res->hdmi = devm_clk_get(dev, "hdmi"); 1809 res->hdmi = devm_clk_get(dev, "hdmi");
2026 if (IS_ERR_OR_NULL(res->hdmi)) { 1810 if (IS_ERR(res->hdmi)) {
2027 DRM_ERROR("failed to get clock 'hdmi'\n"); 1811 DRM_ERROR("failed to get clock 'hdmi'\n");
2028 goto fail; 1812 goto fail;
2029 } 1813 }
2030 res->sclk_hdmi = devm_clk_get(dev, "sclk_hdmi"); 1814 res->sclk_hdmi = devm_clk_get(dev, "sclk_hdmi");
2031 if (IS_ERR_OR_NULL(res->sclk_hdmi)) { 1815 if (IS_ERR(res->sclk_hdmi)) {
2032 DRM_ERROR("failed to get clock 'sclk_hdmi'\n"); 1816 DRM_ERROR("failed to get clock 'sclk_hdmi'\n");
2033 goto fail; 1817 goto fail;
2034 } 1818 }
2035 res->sclk_pixel = devm_clk_get(dev, "sclk_pixel"); 1819 res->sclk_pixel = devm_clk_get(dev, "sclk_pixel");
2036 if (IS_ERR_OR_NULL(res->sclk_pixel)) { 1820 if (IS_ERR(res->sclk_pixel)) {
2037 DRM_ERROR("failed to get clock 'sclk_pixel'\n"); 1821 DRM_ERROR("failed to get clock 'sclk_pixel'\n");
2038 goto fail; 1822 goto fail;
2039 } 1823 }
2040 res->sclk_hdmiphy = devm_clk_get(dev, "sclk_hdmiphy"); 1824 res->sclk_hdmiphy = devm_clk_get(dev, "sclk_hdmiphy");
2041 if (IS_ERR_OR_NULL(res->sclk_hdmiphy)) { 1825 if (IS_ERR(res->sclk_hdmiphy)) {
2042 DRM_ERROR("failed to get clock 'sclk_hdmiphy'\n"); 1826 DRM_ERROR("failed to get clock 'sclk_hdmiphy'\n");
2043 goto fail; 1827 goto fail;
2044 } 1828 }
2045 res->hdmiphy = devm_clk_get(dev, "hdmiphy"); 1829 res->hdmiphy = devm_clk_get(dev, "hdmiphy");
2046 if (IS_ERR_OR_NULL(res->hdmiphy)) { 1830 if (IS_ERR(res->hdmiphy)) {
2047 DRM_ERROR("failed to get clock 'hdmiphy'\n"); 1831 DRM_ERROR("failed to get clock 'hdmiphy'\n");
2048 goto fail; 1832 goto fail;
2049 } 1833 }
@@ -2221,11 +2005,6 @@ static int hdmi_probe(struct platform_device *pdev)
2221 } 2005 }
2222 2006
2223 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2007 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2224 if (!res) {
2225 DRM_ERROR("failed to find registers\n");
2226 return -ENOENT;
2227 }
2228
2229 hdata->regs = devm_ioremap_resource(&pdev->dev, res); 2008 hdata->regs = devm_ioremap_resource(&pdev->dev, res);
2230 if (IS_ERR(hdata->regs)) 2009 if (IS_ERR(hdata->regs))
2231 return PTR_ERR(hdata->regs); 2010 return PTR_ERR(hdata->regs);
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 2f4f72f07047..ec3e376b7e01 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -643,12 +643,14 @@ static void mixer_win_reset(struct mixer_context *ctx)
643 /* setting graphical layers */ 643 /* setting graphical layers */
644 val = MXR_GRP_CFG_COLOR_KEY_DISABLE; /* no blank key */ 644 val = MXR_GRP_CFG_COLOR_KEY_DISABLE; /* no blank key */
645 val |= MXR_GRP_CFG_WIN_BLEND_EN; 645 val |= MXR_GRP_CFG_WIN_BLEND_EN;
646 val |= MXR_GRP_CFG_BLEND_PRE_MUL;
647 val |= MXR_GRP_CFG_PIXEL_BLEND_EN;
648 val |= MXR_GRP_CFG_ALPHA_VAL(0xff); /* non-transparent alpha */ 646 val |= MXR_GRP_CFG_ALPHA_VAL(0xff); /* non-transparent alpha */
649 647
650 /* the same configuration for both layers */ 648 /* Don't blend layer 0 onto the mixer background */
651 mixer_reg_write(res, MXR_GRAPHIC_CFG(0), val); 649 mixer_reg_write(res, MXR_GRAPHIC_CFG(0), val);
650
651 /* Blend layer 1 into layer 0 */
652 val |= MXR_GRP_CFG_BLEND_PRE_MUL;
653 val |= MXR_GRP_CFG_PIXEL_BLEND_EN;
652 mixer_reg_write(res, MXR_GRAPHIC_CFG(1), val); 654 mixer_reg_write(res, MXR_GRAPHIC_CFG(1), val);
653 655
654 /* setting video layers */ 656 /* setting video layers */
@@ -820,7 +822,6 @@ static void mixer_win_disable(void *ctx, int win)
820 822
821static int mixer_check_timing(void *ctx, struct fb_videomode *timing) 823static int mixer_check_timing(void *ctx, struct fb_videomode *timing)
822{ 824{
823 struct mixer_context *mixer_ctx = ctx;
824 u32 w, h; 825 u32 w, h;
825 826
826 w = timing->xres; 827 w = timing->xres;
@@ -831,9 +832,6 @@ static int mixer_check_timing(void *ctx, struct fb_videomode *timing)
831 timing->refresh, (timing->vmode & 832 timing->refresh, (timing->vmode &
832 FB_VMODE_INTERLACED) ? true : false); 833 FB_VMODE_INTERLACED) ? true : false);
833 834
834 if (mixer_ctx->mxr_ver == MXR_VER_0_0_0_16)
835 return 0;
836
837 if ((w >= 464 && w <= 720 && h >= 261 && h <= 576) || 835 if ((w >= 464 && w <= 720 && h >= 261 && h <= 576) ||
838 (w >= 1024 && w <= 1280 && h >= 576 && h <= 720) || 836 (w >= 1024 && w <= 1280 && h >= 576 && h <= 720) ||
839 (w >= 1664 && w <= 1920 && h >= 936 && h <= 1080)) 837 (w >= 1664 && w <= 1920 && h >= 936 && h <= 1080))
@@ -1047,13 +1045,13 @@ static int mixer_resources_init(struct exynos_drm_hdmi_context *ctx,
1047 spin_lock_init(&mixer_res->reg_slock); 1045 spin_lock_init(&mixer_res->reg_slock);
1048 1046
1049 mixer_res->mixer = devm_clk_get(dev, "mixer"); 1047 mixer_res->mixer = devm_clk_get(dev, "mixer");
1050 if (IS_ERR_OR_NULL(mixer_res->mixer)) { 1048 if (IS_ERR(mixer_res->mixer)) {
1051 dev_err(dev, "failed to get clock 'mixer'\n"); 1049 dev_err(dev, "failed to get clock 'mixer'\n");
1052 return -ENODEV; 1050 return -ENODEV;
1053 } 1051 }
1054 1052
1055 mixer_res->sclk_hdmi = devm_clk_get(dev, "sclk_hdmi"); 1053 mixer_res->sclk_hdmi = devm_clk_get(dev, "sclk_hdmi");
1056 if (IS_ERR_OR_NULL(mixer_res->sclk_hdmi)) { 1054 if (IS_ERR(mixer_res->sclk_hdmi)) {
1057 dev_err(dev, "failed to get clock 'sclk_hdmi'\n"); 1055 dev_err(dev, "failed to get clock 'sclk_hdmi'\n");
1058 return -ENODEV; 1056 return -ENODEV;
1059 } 1057 }
@@ -1096,17 +1094,17 @@ static int vp_resources_init(struct exynos_drm_hdmi_context *ctx,
1096 struct resource *res; 1094 struct resource *res;
1097 1095
1098 mixer_res->vp = devm_clk_get(dev, "vp"); 1096 mixer_res->vp = devm_clk_get(dev, "vp");
1099 if (IS_ERR_OR_NULL(mixer_res->vp)) { 1097 if (IS_ERR(mixer_res->vp)) {
1100 dev_err(dev, "failed to get clock 'vp'\n"); 1098 dev_err(dev, "failed to get clock 'vp'\n");
1101 return -ENODEV; 1099 return -ENODEV;
1102 } 1100 }
1103 mixer_res->sclk_mixer = devm_clk_get(dev, "sclk_mixer"); 1101 mixer_res->sclk_mixer = devm_clk_get(dev, "sclk_mixer");
1104 if (IS_ERR_OR_NULL(mixer_res->sclk_mixer)) { 1102 if (IS_ERR(mixer_res->sclk_mixer)) {
1105 dev_err(dev, "failed to get clock 'sclk_mixer'\n"); 1103 dev_err(dev, "failed to get clock 'sclk_mixer'\n");
1106 return -ENODEV; 1104 return -ENODEV;
1107 } 1105 }
1108 mixer_res->sclk_dac = devm_clk_get(dev, "sclk_dac"); 1106 mixer_res->sclk_dac = devm_clk_get(dev, "sclk_dac");
1109 if (IS_ERR_OR_NULL(mixer_res->sclk_dac)) { 1107 if (IS_ERR(mixer_res->sclk_dac)) {
1110 dev_err(dev, "failed to get clock 'sclk_dac'\n"); 1108 dev_err(dev, "failed to get clock 'sclk_dac'\n");
1111 return -ENODEV; 1109 return -ENODEV;
1112 } 1110 }
diff --git a/drivers/gpu/drm/exynos/regs-fimc.h b/drivers/gpu/drm/exynos/regs-fimc.h
index b4f9ca1fd851..30496134a3d0 100644
--- a/drivers/gpu/drm/exynos/regs-fimc.h
+++ b/drivers/gpu/drm/exynos/regs-fimc.h
@@ -661,9 +661,8 @@
661#define EXYNOS_CLKSRC_SCLK (1 << 1) 661#define EXYNOS_CLKSRC_SCLK (1 << 1)
662 662
663/* SYSREG for FIMC writeback */ 663/* SYSREG for FIMC writeback */
664#define SYSREG_CAMERA_BLK (S3C_VA_SYS + 0x0218) 664#define SYSREG_CAMERA_BLK (0x0218)
665#define SYSREG_ISP_BLK (S3C_VA_SYS + 0x020c) 665#define SYSREG_FIMD0WB_DEST_MASK (0x3 << 23)
666#define SYSREG_FIMD0WB_DEST_MASK (0x3 << 23) 666#define SYSREG_FIMD0WB_DEST_SHIFT 23
667#define SYSREG_FIMD0WB_DEST_SHIFT 23
668 667
669#endif /* EXYNOS_REGS_FIMC_H */ 668#endif /* EXYNOS_REGS_FIMC_H */
diff --git a/drivers/gpu/drm/gma500/Kconfig b/drivers/gpu/drm/gma500/Kconfig
index 1188f0fe7e4f..1f6e2dfaaeae 100644
--- a/drivers/gpu/drm/gma500/Kconfig
+++ b/drivers/gpu/drm/gma500/Kconfig
@@ -2,10 +2,15 @@ config DRM_GMA500
2 tristate "Intel GMA5/600 KMS Framebuffer" 2 tristate "Intel GMA5/600 KMS Framebuffer"
3 depends on DRM && PCI && X86 3 depends on DRM && PCI && X86
4 select FB_CFB_COPYAREA 4 select FB_CFB_COPYAREA
5 select FB_CFB_FILLRECT 5 select FB_CFB_FILLRECT
6 select FB_CFB_IMAGEBLIT 6 select FB_CFB_IMAGEBLIT
7 select DRM_KMS_HELPER 7 select DRM_KMS_HELPER
8 select DRM_TTM 8 select DRM_TTM
9 # GMA500 depends on ACPI_VIDEO when ACPI is enabled, just like i915
10 select ACPI_VIDEO if ACPI
11 select BACKLIGHT_CLASS_DEVICE if ACPI
12 select VIDEO_OUTPUT_CONTROL if ACPI
13 select INPUT if ACPI
9 help 14 help
10 Say yes for an experimental 2D KMS framebuffer driver for the 15 Say yes for an experimental 2D KMS framebuffer driver for the
11 Intel GMA500 ('Poulsbo') and other Intel IMG based graphics 16 Intel GMA500 ('Poulsbo') and other Intel IMG based graphics
diff --git a/drivers/gpu/drm/gma500/cdv_intel_crt.c b/drivers/gpu/drm/gma500/cdv_intel_crt.c
index 8c175345d85c..7b8386fc3024 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_crt.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_crt.c
@@ -276,6 +276,7 @@ void cdv_intel_crt_init(struct drm_device *dev,
276 goto failed_connector; 276 goto failed_connector;
277 277
278 connector = &psb_intel_connector->base; 278 connector = &psb_intel_connector->base;
279 connector->polled = DRM_CONNECTOR_POLL_HPD;
279 drm_connector_init(dev, connector, 280 drm_connector_init(dev, connector,
280 &cdv_intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA); 281 &cdv_intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
281 282
diff --git a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
index e223b500022e..464153d9d2df 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
@@ -319,6 +319,7 @@ void cdv_hdmi_init(struct drm_device *dev,
319 goto err_priv; 319 goto err_priv;
320 320
321 connector = &psb_intel_connector->base; 321 connector = &psb_intel_connector->base;
322 connector->polled = DRM_CONNECTOR_POLL_HPD;
322 encoder = &psb_intel_encoder->base; 323 encoder = &psb_intel_encoder->base;
323 drm_connector_init(dev, connector, 324 drm_connector_init(dev, connector,
324 &cdv_hdmi_connector_funcs, 325 &cdv_hdmi_connector_funcs,
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index 2590cac84257..1534e220097a 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -431,7 +431,7 @@ static int psbfb_create(struct psb_fbdev *fbdev,
431 fbdev->psb_fb_helper.fbdev = info; 431 fbdev->psb_fb_helper.fbdev = info;
432 432
433 drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth); 433 drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
434 strcpy(info->fix.id, "psbfb"); 434 strcpy(info->fix.id, "psbdrmfb");
435 435
436 info->flags = FBINFO_DEFAULT; 436 info->flags = FBINFO_DEFAULT;
437 if (dev_priv->ops->accel_2d && pitch_lines > 8) /* 2D engine */ 437 if (dev_priv->ops->accel_2d && pitch_lines > 8) /* 2D engine */
@@ -772,8 +772,8 @@ void psb_modeset_init(struct drm_device *dev)
772 for (i = 0; i < dev_priv->num_pipe; i++) 772 for (i = 0; i < dev_priv->num_pipe; i++)
773 psb_intel_crtc_init(dev, i, mode_dev); 773 psb_intel_crtc_init(dev, i, mode_dev);
774 774
775 dev->mode_config.max_width = 2048; 775 dev->mode_config.max_width = 4096;
776 dev->mode_config.max_height = 2048; 776 dev->mode_config.max_height = 4096;
777 777
778 psb_setup_outputs(dev); 778 psb_setup_outputs(dev);
779 779
diff --git a/drivers/gpu/drm/gma500/gtt.c b/drivers/gpu/drm/gma500/gtt.c
index 054e26e769ec..1f82183536a3 100644
--- a/drivers/gpu/drm/gma500/gtt.c
+++ b/drivers/gpu/drm/gma500/gtt.c
@@ -80,7 +80,8 @@ static u32 __iomem *psb_gtt_entry(struct drm_device *dev, struct gtt_range *r)
80 * the GTT. This is protected via the gtt mutex which the caller 80 * the GTT. This is protected via the gtt mutex which the caller
81 * must hold. 81 * must hold.
82 */ 82 */
83static int psb_gtt_insert(struct drm_device *dev, struct gtt_range *r) 83static int psb_gtt_insert(struct drm_device *dev, struct gtt_range *r,
84 int resume)
84{ 85{
85 u32 __iomem *gtt_slot; 86 u32 __iomem *gtt_slot;
86 u32 pte; 87 u32 pte;
@@ -97,8 +98,10 @@ static int psb_gtt_insert(struct drm_device *dev, struct gtt_range *r)
97 gtt_slot = psb_gtt_entry(dev, r); 98 gtt_slot = psb_gtt_entry(dev, r);
98 pages = r->pages; 99 pages = r->pages;
99 100
100 /* Make sure changes are visible to the GPU */ 101 if (!resume) {
101 set_pages_array_wc(pages, r->npage); 102 /* Make sure changes are visible to the GPU */
103 set_pages_array_wc(pages, r->npage);
104 }
102 105
103 /* Write our page entries into the GTT itself */ 106 /* Write our page entries into the GTT itself */
104 for (i = r->roll; i < r->npage; i++) { 107 for (i = r->roll; i < r->npage; i++) {
@@ -269,7 +272,7 @@ int psb_gtt_pin(struct gtt_range *gt)
269 ret = psb_gtt_attach_pages(gt); 272 ret = psb_gtt_attach_pages(gt);
270 if (ret < 0) 273 if (ret < 0)
271 goto out; 274 goto out;
272 ret = psb_gtt_insert(dev, gt); 275 ret = psb_gtt_insert(dev, gt, 0);
273 if (ret < 0) { 276 if (ret < 0) {
274 psb_gtt_detach_pages(gt); 277 psb_gtt_detach_pages(gt);
275 goto out; 278 goto out;
@@ -421,9 +424,11 @@ int psb_gtt_init(struct drm_device *dev, int resume)
421 int ret = 0; 424 int ret = 0;
422 uint32_t pte; 425 uint32_t pte;
423 426
424 mutex_init(&dev_priv->gtt_mutex); 427 if (!resume) {
428 mutex_init(&dev_priv->gtt_mutex);
429 psb_gtt_alloc(dev);
430 }
425 431
426 psb_gtt_alloc(dev);
427 pg = &dev_priv->gtt; 432 pg = &dev_priv->gtt;
428 433
429 /* Enable the GTT */ 434 /* Enable the GTT */
@@ -505,7 +510,8 @@ int psb_gtt_init(struct drm_device *dev, int resume)
505 /* 510 /*
506 * Map the GTT and the stolen memory area 511 * Map the GTT and the stolen memory area
507 */ 512 */
508 dev_priv->gtt_map = ioremap_nocache(pg->gtt_phys_start, 513 if (!resume)
514 dev_priv->gtt_map = ioremap_nocache(pg->gtt_phys_start,
509 gtt_pages << PAGE_SHIFT); 515 gtt_pages << PAGE_SHIFT);
510 if (!dev_priv->gtt_map) { 516 if (!dev_priv->gtt_map) {
511 dev_err(dev->dev, "Failure to map gtt.\n"); 517 dev_err(dev->dev, "Failure to map gtt.\n");
@@ -513,7 +519,9 @@ int psb_gtt_init(struct drm_device *dev, int resume)
513 goto out_err; 519 goto out_err;
514 } 520 }
515 521
516 dev_priv->vram_addr = ioremap_wc(dev_priv->stolen_base, stolen_size); 522 if (!resume)
523 dev_priv->vram_addr = ioremap_wc(dev_priv->stolen_base,
524 stolen_size);
517 if (!dev_priv->vram_addr) { 525 if (!dev_priv->vram_addr) {
518 dev_err(dev->dev, "Failure to map stolen base.\n"); 526 dev_err(dev->dev, "Failure to map stolen base.\n");
519 ret = -ENOMEM; 527 ret = -ENOMEM;
@@ -549,3 +557,31 @@ out_err:
549 psb_gtt_takedown(dev); 557 psb_gtt_takedown(dev);
550 return ret; 558 return ret;
551} 559}
560
561int psb_gtt_restore(struct drm_device *dev)
562{
563 struct drm_psb_private *dev_priv = dev->dev_private;
564 struct resource *r = dev_priv->gtt_mem->child;
565 struct gtt_range *range;
566 unsigned int restored = 0, total = 0, size = 0;
567
568 /* On resume, the gtt_mutex is already initialized */
569 mutex_lock(&dev_priv->gtt_mutex);
570 psb_gtt_init(dev, 1);
571
572 while (r != NULL) {
573 range = container_of(r, struct gtt_range, resource);
574 if (range->pages) {
575 psb_gtt_insert(dev, range, 1);
576 size += range->resource.end - range->resource.start;
577 restored++;
578 }
579 r = r->sibling;
580 total++;
581 }
582 mutex_unlock(&dev_priv->gtt_mutex);
583 DRM_DEBUG_DRIVER("Restored %u of %u gtt ranges (%u KB)", restored,
584 total, (size / 1024));
585
586 return 0;
587}
diff --git a/drivers/gpu/drm/gma500/gtt.h b/drivers/gpu/drm/gma500/gtt.h
index aa1742387f5a..6191d10acf33 100644
--- a/drivers/gpu/drm/gma500/gtt.h
+++ b/drivers/gpu/drm/gma500/gtt.h
@@ -60,5 +60,5 @@ extern int psb_gtt_pin(struct gtt_range *gt);
60extern void psb_gtt_unpin(struct gtt_range *gt); 60extern void psb_gtt_unpin(struct gtt_range *gt);
61extern void psb_gtt_roll(struct drm_device *dev, 61extern void psb_gtt_roll(struct drm_device *dev,
62 struct gtt_range *gt, int roll); 62 struct gtt_range *gt, int roll);
63 63extern int psb_gtt_restore(struct drm_device *dev);
64#endif 64#endif
diff --git a/drivers/gpu/drm/gma500/intel_bios.c b/drivers/gpu/drm/gma500/intel_bios.c
index 403fffb03abd..d3497348c4d5 100644
--- a/drivers/gpu/drm/gma500/intel_bios.c
+++ b/drivers/gpu/drm/gma500/intel_bios.c
@@ -218,12 +218,11 @@ static void parse_backlight_data(struct drm_psb_private *dev_priv,
218 bl_start = find_section(bdb, BDB_LVDS_BACKLIGHT); 218 bl_start = find_section(bdb, BDB_LVDS_BACKLIGHT);
219 vbt_lvds_bl = (struct bdb_lvds_backlight *)(bl_start + 1) + p_type; 219 vbt_lvds_bl = (struct bdb_lvds_backlight *)(bl_start + 1) + p_type;
220 220
221 lvds_bl = kzalloc(sizeof(*vbt_lvds_bl), GFP_KERNEL); 221 lvds_bl = kmemdup(vbt_lvds_bl, sizeof(*vbt_lvds_bl), GFP_KERNEL);
222 if (!lvds_bl) { 222 if (!lvds_bl) {
223 dev_err(dev_priv->dev->dev, "out of memory for backlight data\n"); 223 dev_err(dev_priv->dev->dev, "out of memory for backlight data\n");
224 return; 224 return;
225 } 225 }
226 memcpy(lvds_bl, vbt_lvds_bl, sizeof(*vbt_lvds_bl));
227 dev_priv->lvds_bl = lvds_bl; 226 dev_priv->lvds_bl = lvds_bl;
228} 227}
229 228
diff --git a/drivers/gpu/drm/gma500/intel_bios.h b/drivers/gpu/drm/gma500/intel_bios.h
index c6267c98c9e7..978ae4b25e82 100644
--- a/drivers/gpu/drm/gma500/intel_bios.h
+++ b/drivers/gpu/drm/gma500/intel_bios.h
@@ -19,8 +19,8 @@
19 * 19 *
20 */ 20 */
21 21
22#ifndef _I830_BIOS_H_ 22#ifndef _INTEL_BIOS_H_
23#define _I830_BIOS_H_ 23#define _INTEL_BIOS_H_
24 24
25#include <drm/drmP.h> 25#include <drm/drmP.h>
26#include <drm/drm_dp_helper.h> 26#include <drm/drm_dp_helper.h>
@@ -618,4 +618,4 @@ extern void psb_intel_destroy_bios(struct drm_device *dev);
618#define PORT_IDPC 8 618#define PORT_IDPC 8
619#define PORT_IDPD 9 619#define PORT_IDPD 9
620 620
621#endif /* _I830_BIOS_H_ */ 621#endif /* _INTEL_BIOS_H_ */
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_output.c b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
index 2d4ab48f07a2..3abf8315f57c 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_output.c
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
@@ -92,8 +92,8 @@ void mdfld_dsi_brightness_init(struct mdfld_dsi_config *dsi_config, int pipe)
92{ 92{
93 struct mdfld_dsi_pkg_sender *sender = 93 struct mdfld_dsi_pkg_sender *sender =
94 mdfld_dsi_get_pkg_sender(dsi_config); 94 mdfld_dsi_get_pkg_sender(dsi_config);
95 struct drm_device *dev = sender->dev; 95 struct drm_device *dev;
96 struct drm_psb_private *dev_priv = dev->dev_private; 96 struct drm_psb_private *dev_priv;
97 u32 gen_ctrl_val; 97 u32 gen_ctrl_val;
98 98
99 if (!sender) { 99 if (!sender) {
@@ -101,6 +101,9 @@ void mdfld_dsi_brightness_init(struct mdfld_dsi_config *dsi_config, int pipe)
101 return; 101 return;
102 } 102 }
103 103
104 dev = sender->dev;
105 dev_priv = dev->dev_private;
106
104 /* Set default display backlight value to 85% (0xd8)*/ 107 /* Set default display backlight value to 85% (0xd8)*/
105 mdfld_dsi_send_mcs_short(sender, write_display_brightness, 0xd8, 1, 108 mdfld_dsi_send_mcs_short(sender, write_display_brightness, 0xd8, 1,
106 true); 109 true);
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c b/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c
index 88627e3ba1e3..1eb86c79523e 100644
--- a/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c
+++ b/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c
@@ -319,8 +319,7 @@ void oaktrail_hdmi_i2c_exit(struct pci_dev *dev)
319 struct hdmi_i2c_dev *i2c_dev; 319 struct hdmi_i2c_dev *i2c_dev;
320 320
321 hdmi_dev = pci_get_drvdata(dev); 321 hdmi_dev = pci_get_drvdata(dev);
322 if (i2c_del_adapter(&oaktrail_hdmi_i2c_adapter)) 322 i2c_del_adapter(&oaktrail_hdmi_i2c_adapter);
323 DRM_DEBUG_DRIVER("Failed to delete hdmi-i2c adapter\n");
324 323
325 i2c_dev = hdmi_dev->i2c_dev; 324 i2c_dev = hdmi_dev->i2c_dev;
326 kfree(i2c_dev); 325 kfree(i2c_dev);
diff --git a/drivers/gpu/drm/gma500/power.c b/drivers/gpu/drm/gma500/power.c
index 889b854751da..b6b135fcd59c 100644
--- a/drivers/gpu/drm/gma500/power.c
+++ b/drivers/gpu/drm/gma500/power.c
@@ -110,6 +110,8 @@ static void gma_resume_display(struct pci_dev *pdev)
110 PSB_WVDC32(dev_priv->pge_ctl | _PSB_PGETBL_ENABLED, PSB_PGETBL_CTL); 110 PSB_WVDC32(dev_priv->pge_ctl | _PSB_PGETBL_ENABLED, PSB_PGETBL_CTL);
111 pci_write_config_word(pdev, PSB_GMCH_CTRL, 111 pci_write_config_word(pdev, PSB_GMCH_CTRL,
112 dev_priv->gmch_ctrl | _PSB_GMCH_ENABLED); 112 dev_priv->gmch_ctrl | _PSB_GMCH_ENABLED);
113
114 psb_gtt_restore(dev); /* Rebuild our GTT mappings */
113 dev_priv->ops->restore_regs(dev); 115 dev_priv->ops->restore_regs(dev);
114} 116}
115 117
@@ -313,3 +315,18 @@ int psb_runtime_idle(struct device *dev)
313 else 315 else
314 return 1; 316 return 1;
315} 317}
318
319int gma_power_thaw(struct device *_dev)
320{
321 return gma_power_resume(_dev);
322}
323
324int gma_power_freeze(struct device *_dev)
325{
326 return gma_power_suspend(_dev);
327}
328
329int gma_power_restore(struct device *_dev)
330{
331 return gma_power_resume(_dev);
332}
diff --git a/drivers/gpu/drm/gma500/power.h b/drivers/gpu/drm/gma500/power.h
index 1969d2ecb328..56d8708bd41c 100644
--- a/drivers/gpu/drm/gma500/power.h
+++ b/drivers/gpu/drm/gma500/power.h
@@ -41,6 +41,9 @@ void gma_power_uninit(struct drm_device *dev);
41 */ 41 */
42int gma_power_suspend(struct device *dev); 42int gma_power_suspend(struct device *dev);
43int gma_power_resume(struct device *dev); 43int gma_power_resume(struct device *dev);
44int gma_power_thaw(struct device *dev);
45int gma_power_freeze(struct device *dev);
46int gma_power_restore(struct device *_dev);
44 47
45/* 48/*
46 * These are the functions the driver should use to wrap all hw access 49 * These are the functions the driver should use to wrap all hw access
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index 111e3df9c5de..bddea5807442 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -601,6 +601,9 @@ static void psb_remove(struct pci_dev *pdev)
601static const struct dev_pm_ops psb_pm_ops = { 601static const struct dev_pm_ops psb_pm_ops = {
602 .resume = gma_power_resume, 602 .resume = gma_power_resume,
603 .suspend = gma_power_suspend, 603 .suspend = gma_power_suspend,
604 .thaw = gma_power_thaw,
605 .freeze = gma_power_freeze,
606 .restore = gma_power_restore,
604 .runtime_suspend = psb_runtime_suspend, 607 .runtime_suspend = psb_runtime_suspend,
605 .runtime_resume = psb_runtime_resume, 608 .runtime_resume = psb_runtime_resume,
606 .runtime_idle = psb_runtime_idle, 609 .runtime_idle = psb_runtime_idle,
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index a7fd6c48b793..6053b8abcd12 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -876,7 +876,6 @@ extern const struct psb_ops cdv_chip_ops;
876#define PSB_D_MSVDX (1 << 9) 876#define PSB_D_MSVDX (1 << 9)
877#define PSB_D_TOPAZ (1 << 10) 877#define PSB_D_TOPAZ (1 << 10)
878 878
879extern int drm_psb_no_fb;
880extern int drm_idle_check_interval; 879extern int drm_idle_check_interval;
881 880
882/* 881/*
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c
index 9edb1902a096..6e8f42b61ff6 100644
--- a/drivers/gpu/drm/gma500/psb_intel_display.c
+++ b/drivers/gpu/drm/gma500/psb_intel_display.c
@@ -50,119 +50,41 @@ struct psb_intel_p2_t {
50 int p2_slow, p2_fast; 50 int p2_slow, p2_fast;
51}; 51};
52 52
53#define INTEL_P2_NUM 2
54
55struct psb_intel_limit_t { 53struct psb_intel_limit_t {
56 struct psb_intel_range_t dot, vco, n, m, m1, m2, p, p1; 54 struct psb_intel_range_t dot, vco, n, m, m1, m2, p, p1;
57 struct psb_intel_p2_t p2; 55 struct psb_intel_p2_t p2;
58}; 56};
59 57
60#define I8XX_DOT_MIN 25000 58#define INTEL_LIMIT_I9XX_SDVO_DAC 0
61#define I8XX_DOT_MAX 350000 59#define INTEL_LIMIT_I9XX_LVDS 1
62#define I8XX_VCO_MIN 930000
63#define I8XX_VCO_MAX 1400000
64#define I8XX_N_MIN 3
65#define I8XX_N_MAX 16
66#define I8XX_M_MIN 96
67#define I8XX_M_MAX 140
68#define I8XX_M1_MIN 18
69#define I8XX_M1_MAX 26
70#define I8XX_M2_MIN 6
71#define I8XX_M2_MAX 16
72#define I8XX_P_MIN 4
73#define I8XX_P_MAX 128
74#define I8XX_P1_MIN 2
75#define I8XX_P1_MAX 33
76#define I8XX_P1_LVDS_MIN 1
77#define I8XX_P1_LVDS_MAX 6
78#define I8XX_P2_SLOW 4
79#define I8XX_P2_FAST 2
80#define I8XX_P2_LVDS_SLOW 14
81#define I8XX_P2_LVDS_FAST 14 /* No fast option */
82#define I8XX_P2_SLOW_LIMIT 165000
83
84#define I9XX_DOT_MIN 20000
85#define I9XX_DOT_MAX 400000
86#define I9XX_VCO_MIN 1400000
87#define I9XX_VCO_MAX 2800000
88#define I9XX_N_MIN 1
89#define I9XX_N_MAX 6
90#define I9XX_M_MIN 70
91#define I9XX_M_MAX 120
92#define I9XX_M1_MIN 8
93#define I9XX_M1_MAX 18
94#define I9XX_M2_MIN 3
95#define I9XX_M2_MAX 7
96#define I9XX_P_SDVO_DAC_MIN 5
97#define I9XX_P_SDVO_DAC_MAX 80
98#define I9XX_P_LVDS_MIN 7
99#define I9XX_P_LVDS_MAX 98
100#define I9XX_P1_MIN 1
101#define I9XX_P1_MAX 8
102#define I9XX_P2_SDVO_DAC_SLOW 10
103#define I9XX_P2_SDVO_DAC_FAST 5
104#define I9XX_P2_SDVO_DAC_SLOW_LIMIT 200000
105#define I9XX_P2_LVDS_SLOW 14
106#define I9XX_P2_LVDS_FAST 7
107#define I9XX_P2_LVDS_SLOW_LIMIT 112000
108
109#define INTEL_LIMIT_I8XX_DVO_DAC 0
110#define INTEL_LIMIT_I8XX_LVDS 1
111#define INTEL_LIMIT_I9XX_SDVO_DAC 2
112#define INTEL_LIMIT_I9XX_LVDS 3
113 60
114static const struct psb_intel_limit_t psb_intel_limits[] = { 61static const struct psb_intel_limit_t psb_intel_limits[] = {
115 { /* INTEL_LIMIT_I8XX_DVO_DAC */
116 .dot = {.min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX},
117 .vco = {.min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX},
118 .n = {.min = I8XX_N_MIN, .max = I8XX_N_MAX},
119 .m = {.min = I8XX_M_MIN, .max = I8XX_M_MAX},
120 .m1 = {.min = I8XX_M1_MIN, .max = I8XX_M1_MAX},
121 .m2 = {.min = I8XX_M2_MIN, .max = I8XX_M2_MAX},
122 .p = {.min = I8XX_P_MIN, .max = I8XX_P_MAX},
123 .p1 = {.min = I8XX_P1_MIN, .max = I8XX_P1_MAX},
124 .p2 = {.dot_limit = I8XX_P2_SLOW_LIMIT,
125 .p2_slow = I8XX_P2_SLOW, .p2_fast = I8XX_P2_FAST},
126 },
127 { /* INTEL_LIMIT_I8XX_LVDS */
128 .dot = {.min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX},
129 .vco = {.min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX},
130 .n = {.min = I8XX_N_MIN, .max = I8XX_N_MAX},
131 .m = {.min = I8XX_M_MIN, .max = I8XX_M_MAX},
132 .m1 = {.min = I8XX_M1_MIN, .max = I8XX_M1_MAX},
133 .m2 = {.min = I8XX_M2_MIN, .max = I8XX_M2_MAX},
134 .p = {.min = I8XX_P_MIN, .max = I8XX_P_MAX},
135 .p1 = {.min = I8XX_P1_LVDS_MIN, .max = I8XX_P1_LVDS_MAX},
136 .p2 = {.dot_limit = I8XX_P2_SLOW_LIMIT,
137 .p2_slow = I8XX_P2_LVDS_SLOW, .p2_fast = I8XX_P2_LVDS_FAST},
138 },
139 { /* INTEL_LIMIT_I9XX_SDVO_DAC */ 62 { /* INTEL_LIMIT_I9XX_SDVO_DAC */
140 .dot = {.min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX}, 63 .dot = {.min = 20000, .max = 400000},
141 .vco = {.min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX}, 64 .vco = {.min = 1400000, .max = 2800000},
142 .n = {.min = I9XX_N_MIN, .max = I9XX_N_MAX}, 65 .n = {.min = 1, .max = 6},
143 .m = {.min = I9XX_M_MIN, .max = I9XX_M_MAX}, 66 .m = {.min = 70, .max = 120},
144 .m1 = {.min = I9XX_M1_MIN, .max = I9XX_M1_MAX}, 67 .m1 = {.min = 8, .max = 18},
145 .m2 = {.min = I9XX_M2_MIN, .max = I9XX_M2_MAX}, 68 .m2 = {.min = 3, .max = 7},
146 .p = {.min = I9XX_P_SDVO_DAC_MIN, .max = I9XX_P_SDVO_DAC_MAX}, 69 .p = {.min = 5, .max = 80},
147 .p1 = {.min = I9XX_P1_MIN, .max = I9XX_P1_MAX}, 70 .p1 = {.min = 1, .max = 8},
148 .p2 = {.dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT, 71 .p2 = {.dot_limit = 200000,
149 .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = 72 .p2_slow = 10, .p2_fast = 5},
150 I9XX_P2_SDVO_DAC_FAST},
151 }, 73 },
152 { /* INTEL_LIMIT_I9XX_LVDS */ 74 { /* INTEL_LIMIT_I9XX_LVDS */
153 .dot = {.min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX}, 75 .dot = {.min = 20000, .max = 400000},
154 .vco = {.min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX}, 76 .vco = {.min = 1400000, .max = 2800000},
155 .n = {.min = I9XX_N_MIN, .max = I9XX_N_MAX}, 77 .n = {.min = 1, .max = 6},
156 .m = {.min = I9XX_M_MIN, .max = I9XX_M_MAX}, 78 .m = {.min = 70, .max = 120},
157 .m1 = {.min = I9XX_M1_MIN, .max = I9XX_M1_MAX}, 79 .m1 = {.min = 8, .max = 18},
158 .m2 = {.min = I9XX_M2_MIN, .max = I9XX_M2_MAX}, 80 .m2 = {.min = 3, .max = 7},
159 .p = {.min = I9XX_P_LVDS_MIN, .max = I9XX_P_LVDS_MAX}, 81 .p = {.min = 7, .max = 98},
160 .p1 = {.min = I9XX_P1_MIN, .max = I9XX_P1_MAX}, 82 .p1 = {.min = 1, .max = 8},
161 /* The single-channel range is 25-112Mhz, and dual-channel 83 /* The single-channel range is 25-112Mhz, and dual-channel
162 * is 80-224Mhz. Prefer single channel as much as possible. 84 * is 80-224Mhz. Prefer single channel as much as possible.
163 */ 85 */
164 .p2 = {.dot_limit = I9XX_P2_LVDS_SLOW_LIMIT, 86 .p2 = {.dot_limit = 112000,
165 .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST}, 87 .p2_slow = 14, .p2_fast = 7},
166 }, 88 },
167}; 89};
168 90
@@ -177,9 +99,7 @@ static const struct psb_intel_limit_t *psb_intel_limit(struct drm_crtc *crtc)
177 return limit; 99 return limit;
178} 100}
179 101
180/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */ 102static void psb_intel_clock(int refclk, struct psb_intel_clock_t *clock)
181
182static void i8xx_clock(int refclk, struct psb_intel_clock_t *clock)
183{ 103{
184 clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2); 104 clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
185 clock->p = clock->p1 * clock->p2; 105 clock->p = clock->p1 * clock->p2;
@@ -187,22 +107,6 @@ static void i8xx_clock(int refclk, struct psb_intel_clock_t *clock)
187 clock->dot = clock->vco / clock->p; 107 clock->dot = clock->vco / clock->p;
188} 108}
189 109
190/** Derive the pixel clock for the given refclk and divisors for 9xx chips. */
191
192static void i9xx_clock(int refclk, struct psb_intel_clock_t *clock)
193{
194 clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
195 clock->p = clock->p1 * clock->p2;
196 clock->vco = refclk * clock->m / (clock->n + 2);
197 clock->dot = clock->vco / clock->p;
198}
199
200static void psb_intel_clock(struct drm_device *dev, int refclk,
201 struct psb_intel_clock_t *clock)
202{
203 return i9xx_clock(refclk, clock);
204}
205
206/** 110/**
207 * Returns whether any output on the specified pipe is of the specified type 111 * Returns whether any output on the specified pipe is of the specified type
208 */ 112 */
@@ -308,7 +212,7 @@ static bool psb_intel_find_best_PLL(struct drm_crtc *crtc, int target,
308 clock.p1++) { 212 clock.p1++) {
309 int this_err; 213 int this_err;
310 214
311 psb_intel_clock(dev, refclk, &clock); 215 psb_intel_clock(refclk, &clock);
312 216
313 if (!psb_intel_PLL_is_valid 217 if (!psb_intel_PLL_is_valid
314 (crtc, &clock)) 218 (crtc, &clock))
@@ -1068,7 +972,7 @@ static int psb_intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
1068 return 0; 972 return 0;
1069} 973}
1070 974
1071void psb_intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, 975static void psb_intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red,
1072 u16 *green, u16 *blue, uint32_t type, uint32_t size) 976 u16 *green, u16 *blue, uint32_t type, uint32_t size)
1073{ 977{
1074 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); 978 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
@@ -1149,9 +1053,9 @@ static int psb_intel_crtc_clock_get(struct drm_device *dev,
1149 if ((dpll & PLL_REF_INPUT_MASK) == 1053 if ((dpll & PLL_REF_INPUT_MASK) ==
1150 PLLB_REF_INPUT_SPREADSPECTRUMIN) { 1054 PLLB_REF_INPUT_SPREADSPECTRUMIN) {
1151 /* XXX: might not be 66MHz */ 1055 /* XXX: might not be 66MHz */
1152 i8xx_clock(66000, &clock); 1056 psb_intel_clock(66000, &clock);
1153 } else 1057 } else
1154 i8xx_clock(48000, &clock); 1058 psb_intel_clock(48000, &clock);
1155 } else { 1059 } else {
1156 if (dpll & PLL_P1_DIVIDE_BY_TWO) 1060 if (dpll & PLL_P1_DIVIDE_BY_TWO)
1157 clock.p1 = 2; 1061 clock.p1 = 2;
@@ -1166,7 +1070,7 @@ static int psb_intel_crtc_clock_get(struct drm_device *dev,
1166 else 1070 else
1167 clock.p2 = 2; 1071 clock.p2 = 2;
1168 1072
1169 i8xx_clock(48000, &clock); 1073 psb_intel_clock(48000, &clock);
1170 } 1074 }
1171 1075
1172 /* XXX: It would be nice to validate the clocks, but we can't reuse 1076 /* XXX: It would be nice to validate the clocks, but we can't reuse
@@ -1225,7 +1129,7 @@ struct drm_display_mode *psb_intel_crtc_mode_get(struct drm_device *dev,
1225 return mode; 1129 return mode;
1226} 1130}
1227 1131
1228void psb_intel_crtc_destroy(struct drm_crtc *crtc) 1132static void psb_intel_crtc_destroy(struct drm_crtc *crtc)
1229{ 1133{
1230 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); 1134 struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
1231 struct gtt_range *gt; 1135 struct gtt_range *gt;
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.h b/drivers/gpu/drm/gma500/psb_intel_display.h
index 535b49a5e409..3724b971e91c 100644
--- a/drivers/gpu/drm/gma500/psb_intel_display.h
+++ b/drivers/gpu/drm/gma500/psb_intel_display.h
@@ -21,8 +21,5 @@
21#define _INTEL_DISPLAY_H_ 21#define _INTEL_DISPLAY_H_
22 22
23bool psb_intel_pipe_has_type(struct drm_crtc *crtc, int type); 23bool psb_intel_pipe_has_type(struct drm_crtc *crtc, int type);
24void psb_intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red,
25 u16 *green, u16 *blue, uint32_t type, uint32_t size);
26void psb_intel_crtc_destroy(struct drm_crtc *crtc);
27 24
28#endif 25#endif
diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h
index 90f2d11e686b..4dcae421a58d 100644
--- a/drivers/gpu/drm/gma500/psb_intel_drv.h
+++ b/drivers/gpu/drm/gma500/psb_intel_drv.h
@@ -32,9 +32,6 @@
32/* maximum connectors per crtcs in the mode set */ 32/* maximum connectors per crtcs in the mode set */
33#define INTELFB_CONN_LIMIT 4 33#define INTELFB_CONN_LIMIT 4
34 34
35#define INTEL_I2C_BUS_DVO 1
36#define INTEL_I2C_BUS_SDVO 2
37
38/* Intel Pipe Clone Bit */ 35/* Intel Pipe Clone Bit */
39#define INTEL_HDMIB_CLONE_BIT 1 36#define INTEL_HDMIB_CLONE_BIT 1
40#define INTEL_HDMIC_CLONE_BIT 2 37#define INTEL_HDMIC_CLONE_BIT 2
@@ -68,11 +65,6 @@
68#define INTEL_OUTPUT_DISPLAYPORT 9 65#define INTEL_OUTPUT_DISPLAYPORT 9
69#define INTEL_OUTPUT_EDP 10 66#define INTEL_OUTPUT_EDP 10
70 67
71#define INTEL_DVO_CHIP_NONE 0
72#define INTEL_DVO_CHIP_LVDS 1
73#define INTEL_DVO_CHIP_TMDS 2
74#define INTEL_DVO_CHIP_TVOUT 4
75
76#define INTEL_MODE_PIXEL_MULTIPLIER_SHIFT (0x0) 68#define INTEL_MODE_PIXEL_MULTIPLIER_SHIFT (0x0)
77#define INTEL_MODE_PIXEL_MULTIPLIER_MASK (0xf << INTEL_MODE_PIXEL_MULTIPLIER_SHIFT) 69#define INTEL_MODE_PIXEL_MULTIPLIER_MASK (0xf << INTEL_MODE_PIXEL_MULTIPLIER_SHIFT)
78 70
diff --git a/drivers/gpu/drm/gma500/psb_intel_reg.h b/drivers/gpu/drm/gma500/psb_intel_reg.h
index d914719c4b60..0be30e4d146d 100644
--- a/drivers/gpu/drm/gma500/psb_intel_reg.h
+++ b/drivers/gpu/drm/gma500/psb_intel_reg.h
@@ -493,7 +493,6 @@
493#define PIPEACONF_DISABLE 0 493#define PIPEACONF_DISABLE 0
494#define PIPEACONF_DOUBLE_WIDE (1 << 30) 494#define PIPEACONF_DOUBLE_WIDE (1 << 30)
495#define PIPECONF_ACTIVE (1 << 30) 495#define PIPECONF_ACTIVE (1 << 30)
496#define I965_PIPECONF_ACTIVE (1 << 30)
497#define PIPECONF_DSIPLL_LOCK (1 << 29) 496#define PIPECONF_DSIPLL_LOCK (1 << 29)
498#define PIPEACONF_SINGLE_WIDE 0 497#define PIPEACONF_SINGLE_WIDE 0
499#define PIPEACONF_PIPE_UNLOCKED 0 498#define PIPEACONF_PIPE_UNLOCKED 0
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
index a4cc777ab7a6..19e36603b23b 100644
--- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c
+++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
@@ -134,6 +134,9 @@ struct psb_intel_sdvo {
134 134
135 /* Input timings for adjusted_mode */ 135 /* Input timings for adjusted_mode */
136 struct psb_intel_sdvo_dtd input_dtd; 136 struct psb_intel_sdvo_dtd input_dtd;
137
138 /* Saved SDVO output states */
139 uint32_t saveSDVO; /* Can be SDVOB or SDVOC depending on sdvo_reg */
137}; 140};
138 141
139struct psb_intel_sdvo_connector { 142struct psb_intel_sdvo_connector {
@@ -1830,6 +1833,34 @@ done:
1830#undef CHECK_PROPERTY 1833#undef CHECK_PROPERTY
1831} 1834}
1832 1835
1836static void psb_intel_sdvo_save(struct drm_connector *connector)
1837{
1838 struct drm_device *dev = connector->dev;
1839 struct psb_intel_encoder *psb_intel_encoder =
1840 psb_intel_attached_encoder(connector);
1841 struct psb_intel_sdvo *sdvo =
1842 to_psb_intel_sdvo(&psb_intel_encoder->base);
1843
1844 sdvo->saveSDVO = REG_READ(sdvo->sdvo_reg);
1845}
1846
1847static void psb_intel_sdvo_restore(struct drm_connector *connector)
1848{
1849 struct drm_device *dev = connector->dev;
1850 struct drm_encoder *encoder =
1851 &psb_intel_attached_encoder(connector)->base;
1852 struct psb_intel_sdvo *sdvo = to_psb_intel_sdvo(encoder);
1853 struct drm_crtc *crtc = encoder->crtc;
1854
1855 REG_WRITE(sdvo->sdvo_reg, sdvo->saveSDVO);
1856
1857 /* Force a full mode set on the crtc. We're supposed to have the
1858 mode_config lock already. */
1859 if (connector->status == connector_status_connected)
1860 drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x, crtc->y,
1861 NULL);
1862}
1863
1833static const struct drm_encoder_helper_funcs psb_intel_sdvo_helper_funcs = { 1864static const struct drm_encoder_helper_funcs psb_intel_sdvo_helper_funcs = {
1834 .dpms = psb_intel_sdvo_dpms, 1865 .dpms = psb_intel_sdvo_dpms,
1835 .mode_fixup = psb_intel_sdvo_mode_fixup, 1866 .mode_fixup = psb_intel_sdvo_mode_fixup,
@@ -1840,6 +1871,8 @@ static const struct drm_encoder_helper_funcs psb_intel_sdvo_helper_funcs = {
1840 1871
1841static const struct drm_connector_funcs psb_intel_sdvo_connector_funcs = { 1872static const struct drm_connector_funcs psb_intel_sdvo_connector_funcs = {
1842 .dpms = drm_helper_connector_dpms, 1873 .dpms = drm_helper_connector_dpms,
1874 .save = psb_intel_sdvo_save,
1875 .restore = psb_intel_sdvo_restore,
1843 .detect = psb_intel_sdvo_detect, 1876 .detect = psb_intel_sdvo_detect,
1844 .fill_modes = drm_helper_probe_single_connector_modes, 1877 .fill_modes = drm_helper_probe_single_connector_modes,
1845 .set_property = psb_intel_sdvo_set_property, 1878 .set_property = psb_intel_sdvo_set_property,
diff --git a/drivers/gpu/drm/gma500/psb_irq.c b/drivers/gpu/drm/gma500/psb_irq.c
index 8652cdf3f03f..029eccf30137 100644
--- a/drivers/gpu/drm/gma500/psb_irq.c
+++ b/drivers/gpu/drm/gma500/psb_irq.c
@@ -211,7 +211,7 @@ irqreturn_t psb_irq_handler(DRM_IRQ_ARGS)
211 211
212 vdc_stat = PSB_RVDC32(PSB_INT_IDENTITY_R); 212 vdc_stat = PSB_RVDC32(PSB_INT_IDENTITY_R);
213 213
214 if (vdc_stat & _PSB_PIPE_EVENT_FLAG) 214 if (vdc_stat & (_PSB_PIPE_EVENT_FLAG|_PSB_IRQ_ASLE))
215 dsp_int = 1; 215 dsp_int = 1;
216 216
217 /* FIXME: Handle Medfield 217 /* FIXME: Handle Medfield
diff --git a/drivers/gpu/drm/gma500/psb_irq.h b/drivers/gpu/drm/gma500/psb_irq.h
index 603045bee58a..debb7f190c06 100644
--- a/drivers/gpu/drm/gma500/psb_irq.h
+++ b/drivers/gpu/drm/gma500/psb_irq.h
@@ -21,8 +21,8 @@
21 * 21 *
22 **************************************************************************/ 22 **************************************************************************/
23 23
24#ifndef _SYSIRQ_H_ 24#ifndef _PSB_IRQ_H_
25#define _SYSIRQ_H_ 25#define _PSB_IRQ_H_
26 26
27#include <drm/drmP.h> 27#include <drm/drmP.h>
28 28
@@ -44,4 +44,4 @@ u32 psb_get_vblank_counter(struct drm_device *dev, int pipe);
44 44
45int mdfld_enable_te(struct drm_device *dev, int pipe); 45int mdfld_enable_te(struct drm_device *dev, int pipe);
46void mdfld_disable_te(struct drm_device *dev, int pipe); 46void mdfld_disable_te(struct drm_device *dev, int pipe);
47#endif /* _SYSIRQ_H_ */ 47#endif /* _PSB_IRQ_H_ */
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 8a81d1a21050..a1a282ce2466 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1045,6 +1045,8 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
1045 if (timeout) { 1045 if (timeout) {
1046 struct timespec sleep_time = timespec_sub(now, before); 1046 struct timespec sleep_time = timespec_sub(now, before);
1047 *timeout = timespec_sub(*timeout, sleep_time); 1047 *timeout = timespec_sub(*timeout, sleep_time);
1048 if (!timespec_valid(timeout)) /* i.e. negative time remains */
1049 set_normalized_timespec(timeout, 0, 0);
1048 } 1050 }
1049 1051
1050 switch (end) { 1052 switch (end) {
@@ -1053,8 +1055,6 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
1053 case -ERESTARTSYS: /* Signal */ 1055 case -ERESTARTSYS: /* Signal */
1054 return (int)end; 1056 return (int)end;
1055 case 0: /* Timeout */ 1057 case 0: /* Timeout */
1056 if (timeout)
1057 set_normalized_timespec(timeout, 0, 0);
1058 return -ETIME; 1058 return -ETIME;
1059 default: /* Completed */ 1059 default: /* Completed */
1060 WARN_ON(end < 0); /* We're not aware of other errors */ 1060 WARN_ON(end < 0); /* We're not aware of other errors */
@@ -2389,10 +2389,8 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2389 mutex_unlock(&dev->struct_mutex); 2389 mutex_unlock(&dev->struct_mutex);
2390 2390
2391 ret = __wait_seqno(ring, seqno, reset_counter, true, timeout); 2391 ret = __wait_seqno(ring, seqno, reset_counter, true, timeout);
2392 if (timeout) { 2392 if (timeout)
2393 WARN_ON(!timespec_valid(timeout));
2394 args->timeout_ns = timespec_to_ns(timeout); 2393 args->timeout_ns = timespec_to_ns(timeout);
2395 }
2396 return ret; 2394 return ret;
2397 2395
2398out: 2396out:
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index c6dfc1466e3a..dc53a527126b 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -129,7 +129,7 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
129 goto error; 129 goto error;
130 130
131 i = 0; 131 i = 0;
132 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0); 132 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0)
133 pages[i++] = sg_page_iter_page(&sg_iter); 133 pages[i++] = sg_page_iter_page(&sg_iter);
134 134
135 obj->dma_buf_vmapping = vmap(pages, i, 0, PAGE_KERNEL); 135 obj->dma_buf_vmapping = vmap(pages, i, 0, PAGE_KERNEL);
@@ -272,7 +272,6 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
272 * refcount on gem itself instead of f_count of dmabuf. 272 * refcount on gem itself instead of f_count of dmabuf.
273 */ 273 */
274 drm_gem_object_reference(&obj->base); 274 drm_gem_object_reference(&obj->base);
275 dma_buf_put(dma_buf);
276 return &obj->base; 275 return &obj->base;
277 } 276 }
278 } 277 }
@@ -282,6 +281,8 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
282 if (IS_ERR(attach)) 281 if (IS_ERR(attach))
283 return ERR_CAST(attach); 282 return ERR_CAST(attach);
284 283
284 get_dma_buf(dma_buf);
285
285 obj = i915_gem_object_alloc(dev); 286 obj = i915_gem_object_alloc(dev);
286 if (obj == NULL) { 287 if (obj == NULL) {
287 ret = -ENOMEM; 288 ret = -ENOMEM;
@@ -301,5 +302,7 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
301 302
302fail_detach: 303fail_detach:
303 dma_buf_detach(dma_buf, attach); 304 dma_buf_detach(dma_buf, attach);
305 dma_buf_put(dma_buf);
306
304 return ERR_PTR(ret); 307 return ERR_PTR(ret);
305} 308}
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index a96b6a3118db..117ce3813681 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -57,7 +57,7 @@ eb_create(struct drm_i915_gem_execbuffer2 *args)
57 if (eb == NULL) { 57 if (eb == NULL) {
58 int size = args->buffer_count; 58 int size = args->buffer_count;
59 int count = PAGE_SIZE / sizeof(struct hlist_head) / 2; 59 int count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
60 BUILD_BUG_ON(!is_power_of_2(PAGE_SIZE / sizeof(struct hlist_head))); 60 BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head));
61 while (count > 2*size) 61 while (count > 2*size)
62 count >>= 1; 62 count >>= 1;
63 eb = kzalloc(count*sizeof(struct hlist_head) + 63 eb = kzalloc(count*sizeof(struct hlist_head) +
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 85b3d5d4deec..ddad13fa3156 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -269,8 +269,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
269 /* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024 269 /* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024
270 * entries. For aliasing ppgtt support we just steal them at the end for 270 * entries. For aliasing ppgtt support we just steal them at the end for
271 * now. */ 271 * now. */
272 first_pd_entry_in_global_pt = 272 first_pd_entry_in_global_pt = gtt_total_entries(dev_priv->gtt);
273 gtt_total_entries(dev_priv->gtt) - I915_PPGTT_PD_ENTRIES;
274 273
275 if (IS_HASWELL(dev)) { 274 if (IS_HASWELL(dev)) {
276 ppgtt->pte_encode = hsw_pte_encode; 275 ppgtt->pte_encode = hsw_pte_encode;
@@ -755,15 +754,6 @@ static inline size_t gen6_get_stolen_size(u16 snb_gmch_ctl)
755 return snb_gmch_ctl << 25; /* 32 MB units */ 754 return snb_gmch_ctl << 25; /* 32 MB units */
756} 755}
757 756
758static inline size_t gen7_get_stolen_size(u16 snb_gmch_ctl)
759{
760 static const int stolen_decoder[] = {
761 0, 0, 0, 0, 0, 32, 48, 64, 128, 256, 96, 160, 224, 352};
762 snb_gmch_ctl >>= IVB_GMCH_GMS_SHIFT;
763 snb_gmch_ctl &= IVB_GMCH_GMS_MASK;
764 return stolen_decoder[snb_gmch_ctl] << 20;
765}
766
767static int gen6_gmch_probe(struct drm_device *dev, 757static int gen6_gmch_probe(struct drm_device *dev,
768 size_t *gtt_total, 758 size_t *gtt_total,
769 size_t *stolen, 759 size_t *stolen,
@@ -793,11 +783,7 @@ static int gen6_gmch_probe(struct drm_device *dev,
793 pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl); 783 pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
794 gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl); 784 gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl);
795 785
796 if (IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) 786 *stolen = gen6_get_stolen_size(snb_gmch_ctl);
797 *stolen = gen7_get_stolen_size(snb_gmch_ctl);
798 else
799 *stolen = gen6_get_stolen_size(snb_gmch_ctl);
800
801 *gtt_total = (gtt_size / sizeof(gen6_gtt_pte_t)) << PAGE_SHIFT; 787 *gtt_total = (gtt_size / sizeof(gen6_gtt_pte_t)) << PAGE_SHIFT;
802 788
803 /* For Modern GENs the PTEs and register space are split in the BAR */ 789 /* For Modern GENs the PTEs and register space are split in the BAR */
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 7af7ae66b338..e4cf382f0b75 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -46,8 +46,6 @@
46#define SNB_GMCH_GGMS_MASK 0x3 46#define SNB_GMCH_GGMS_MASK 0x3
47#define SNB_GMCH_GMS_SHIFT 3 /* Graphics Mode Select */ 47#define SNB_GMCH_GMS_SHIFT 3 /* Graphics Mode Select */
48#define SNB_GMCH_GMS_MASK 0x1f 48#define SNB_GMCH_GMS_MASK 0x1f
49#define IVB_GMCH_GMS_SHIFT 4
50#define IVB_GMCH_GMS_MASK 0xf
51 49
52 50
53/* PCI config space */ 51/* PCI config space */
@@ -2789,11 +2787,12 @@
2789#define _PIPEB_DATA_M_G4X 0x71050 2787#define _PIPEB_DATA_M_G4X 0x71050
2790 2788
2791/* Transfer unit size for display port - 1, default is 0x3f (for TU size 64) */ 2789/* Transfer unit size for display port - 1, default is 0x3f (for TU size 64) */
2792#define PIPE_GMCH_DATA_M_TU_SIZE_MASK (0x3f << 25) 2790#define TU_SIZE(x) (((x)-1) << 25) /* default size 64 */
2793#define PIPE_GMCH_DATA_M_TU_SIZE_SHIFT 25
2794#define TU_SIZE_SHIFT 25 2791#define TU_SIZE_SHIFT 25
2792#define TU_SIZE_MASK (0x3f << 25)
2795 2793
2796#define PIPE_GMCH_DATA_M_MASK (0xffffff) 2794#define DATA_LINK_M_N_MASK (0xffffff)
2795#define DATA_LINK_N_MAX (0x800000)
2797 2796
2798#define _PIPEA_DATA_N_G4X 0x70054 2797#define _PIPEA_DATA_N_G4X 0x70054
2799#define _PIPEB_DATA_N_G4X 0x71054 2798#define _PIPEB_DATA_N_G4X 0x71054
@@ -3543,8 +3542,6 @@
3543 3542
3544 3543
3545#define _PIPEA_DATA_M1 (dev_priv->info->display_mmio_offset + 0x60030) 3544#define _PIPEA_DATA_M1 (dev_priv->info->display_mmio_offset + 0x60030)
3546#define TU_SIZE(x) (((x)-1) << 25) /* default size 64 */
3547#define TU_SIZE_MASK 0x7e000000
3548#define PIPE_DATA_M1_OFFSET 0 3545#define PIPE_DATA_M1_OFFSET 0
3549#define _PIPEA_DATA_N1 (dev_priv->info->display_mmio_offset + 0x60034) 3546#define _PIPEA_DATA_N1 (dev_priv->info->display_mmio_offset + 0x60034)
3550#define PIPE_DATA_N1_OFFSET 0 3547#define PIPE_DATA_N1_OFFSET 0
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index cc414f1a0b92..66a0c6f0bb81 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -45,6 +45,9 @@
45 45
46struct intel_crt { 46struct intel_crt {
47 struct intel_encoder base; 47 struct intel_encoder base;
48 /* DPMS state is stored in the connector, which we need in the
49 * encoder's enable/disable callbacks */
50 struct intel_connector *connector;
48 bool force_hotplug_required; 51 bool force_hotplug_required;
49 u32 adpa_reg; 52 u32 adpa_reg;
50}; 53};
@@ -81,29 +84,6 @@ static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
81 return true; 84 return true;
82} 85}
83 86
84static void intel_disable_crt(struct intel_encoder *encoder)
85{
86 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
87 struct intel_crt *crt = intel_encoder_to_crt(encoder);
88 u32 temp;
89
90 temp = I915_READ(crt->adpa_reg);
91 temp |= ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE;
92 temp &= ~ADPA_DAC_ENABLE;
93 I915_WRITE(crt->adpa_reg, temp);
94}
95
96static void intel_enable_crt(struct intel_encoder *encoder)
97{
98 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
99 struct intel_crt *crt = intel_encoder_to_crt(encoder);
100 u32 temp;
101
102 temp = I915_READ(crt->adpa_reg);
103 temp |= ADPA_DAC_ENABLE;
104 I915_WRITE(crt->adpa_reg, temp);
105}
106
107/* Note: The caller is required to filter out dpms modes not supported by the 87/* Note: The caller is required to filter out dpms modes not supported by the
108 * platform. */ 88 * platform. */
109static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode) 89static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode)
@@ -135,6 +115,19 @@ static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode)
135 I915_WRITE(crt->adpa_reg, temp); 115 I915_WRITE(crt->adpa_reg, temp);
136} 116}
137 117
118static void intel_disable_crt(struct intel_encoder *encoder)
119{
120 intel_crt_set_dpms(encoder, DRM_MODE_DPMS_OFF);
121}
122
123static void intel_enable_crt(struct intel_encoder *encoder)
124{
125 struct intel_crt *crt = intel_encoder_to_crt(encoder);
126
127 intel_crt_set_dpms(encoder, crt->connector->base.dpms);
128}
129
130
138static void intel_crt_dpms(struct drm_connector *connector, int mode) 131static void intel_crt_dpms(struct drm_connector *connector, int mode)
139{ 132{
140 struct drm_device *dev = connector->dev; 133 struct drm_device *dev = connector->dev;
@@ -753,6 +746,7 @@ void intel_crt_init(struct drm_device *dev)
753 } 746 }
754 747
755 connector = &intel_connector->base; 748 connector = &intel_connector->base;
749 crt->connector = intel_connector;
756 drm_connector_init(dev, &intel_connector->base, 750 drm_connector_init(dev, &intel_connector->base,
757 &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA); 751 &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
758 752
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index cddcf4af7a5a..062de679f38f 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -1049,6 +1049,8 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
1049 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); 1049 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
1050 intel_dp_start_link_train(intel_dp); 1050 intel_dp_start_link_train(intel_dp);
1051 intel_dp_complete_link_train(intel_dp); 1051 intel_dp_complete_link_train(intel_dp);
1052 if (port != PORT_A)
1053 intel_dp_stop_link_train(intel_dp);
1052 } 1054 }
1053} 1055}
1054 1056
@@ -1110,6 +1112,9 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder)
1110 } else if (type == INTEL_OUTPUT_EDP) { 1112 } else if (type == INTEL_OUTPUT_EDP) {
1111 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1113 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1112 1114
1115 if (port == PORT_A)
1116 intel_dp_stop_link_train(intel_dp);
1117
1113 ironlake_edp_backlight_on(intel_dp); 1118 ironlake_edp_backlight_on(intel_dp);
1114 } 1119 }
1115 1120
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 7358e4e9761e..2d90594016d9 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -4211,26 +4211,36 @@ static int i830_get_display_clock_speed(struct drm_device *dev)
4211} 4211}
4212 4212
4213static void 4213static void
4214intel_reduce_ratio(uint32_t *num, uint32_t *den) 4214intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den)
4215{ 4215{
4216 while (*num > 0xffffff || *den > 0xffffff) { 4216 while (*num > DATA_LINK_M_N_MASK ||
4217 *den > DATA_LINK_M_N_MASK) {
4217 *num >>= 1; 4218 *num >>= 1;
4218 *den >>= 1; 4219 *den >>= 1;
4219 } 4220 }
4220} 4221}
4221 4222
4223static void compute_m_n(unsigned int m, unsigned int n,
4224 uint32_t *ret_m, uint32_t *ret_n)
4225{
4226 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
4227 *ret_m = div_u64((uint64_t) m * *ret_n, n);
4228 intel_reduce_m_n_ratio(ret_m, ret_n);
4229}
4230
4222void 4231void
4223intel_link_compute_m_n(int bits_per_pixel, int nlanes, 4232intel_link_compute_m_n(int bits_per_pixel, int nlanes,
4224 int pixel_clock, int link_clock, 4233 int pixel_clock, int link_clock,
4225 struct intel_link_m_n *m_n) 4234 struct intel_link_m_n *m_n)
4226{ 4235{
4227 m_n->tu = 64; 4236 m_n->tu = 64;
4228 m_n->gmch_m = bits_per_pixel * pixel_clock; 4237
4229 m_n->gmch_n = link_clock * nlanes * 8; 4238 compute_m_n(bits_per_pixel * pixel_clock,
4230 intel_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n); 4239 link_clock * nlanes * 8,
4231 m_n->link_m = pixel_clock; 4240 &m_n->gmch_m, &m_n->gmch_n);
4232 m_n->link_n = link_clock; 4241
4233 intel_reduce_ratio(&m_n->link_m, &m_n->link_n); 4242 compute_m_n(pixel_clock, link_clock,
4243 &m_n->link_m, &m_n->link_n);
4234} 4244}
4235 4245
4236static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv) 4246static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 2bb4009b7a60..6ba9f09fe21a 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -720,18 +720,9 @@ intel_dp_compute_config(struct intel_encoder *encoder,
720 720
721 /* Walk through all bpp values. Luckily they're all nicely spaced with 2 721 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
722 * bpc in between. */ 722 * bpc in between. */
723 bpp = pipe_config->pipe_bpp; 723 bpp = min_t(int, 8*3, pipe_config->pipe_bpp);
724 724 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp)
725 /* 725 bpp = min_t(int, bpp, dev_priv->vbt.edp_bpp);
726 * eDP panels are really fickle, try to enfore the bpp the firmware
727 * recomments. This means we'll up-dither 16bpp framebuffers on
728 * high-depth panels.
729 */
730 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp) {
731 DRM_DEBUG_KMS("forcing bpp for eDP panel to BIOS-provided %i\n",
732 dev_priv->vbt.edp_bpp);
733 bpp = dev_priv->vbt.edp_bpp;
734 }
735 726
736 for (; bpp >= 6*3; bpp -= 2*3) { 727 for (; bpp >= 6*3; bpp -= 2*3) {
737 mode_rate = intel_dp_link_required(target_clock, bpp); 728 mode_rate = intel_dp_link_required(target_clock, bpp);
@@ -770,6 +761,7 @@ found:
770 intel_dp->link_bw = bws[clock]; 761 intel_dp->link_bw = bws[clock];
771 intel_dp->lane_count = lane_count; 762 intel_dp->lane_count = lane_count;
772 adjusted_mode->clock = drm_dp_bw_code_to_link_rate(intel_dp->link_bw); 763 adjusted_mode->clock = drm_dp_bw_code_to_link_rate(intel_dp->link_bw);
764 pipe_config->pipe_bpp = bpp;
773 pipe_config->pixel_target_clock = target_clock; 765 pipe_config->pixel_target_clock = target_clock;
774 766
775 DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n", 767 DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
@@ -782,8 +774,6 @@ found:
782 target_clock, adjusted_mode->clock, 774 target_clock, adjusted_mode->clock,
783 &pipe_config->dp_m_n); 775 &pipe_config->dp_m_n);
784 776
785 pipe_config->pipe_bpp = bpp;
786
787 intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw); 777 intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
788 778
789 return true; 779 return true;
@@ -1400,6 +1390,7 @@ static void intel_enable_dp(struct intel_encoder *encoder)
1400 ironlake_edp_panel_on(intel_dp); 1390 ironlake_edp_panel_on(intel_dp);
1401 ironlake_edp_panel_vdd_off(intel_dp, true); 1391 ironlake_edp_panel_vdd_off(intel_dp, true);
1402 intel_dp_complete_link_train(intel_dp); 1392 intel_dp_complete_link_train(intel_dp);
1393 intel_dp_stop_link_train(intel_dp);
1403 ironlake_edp_backlight_on(intel_dp); 1394 ironlake_edp_backlight_on(intel_dp);
1404 1395
1405 if (IS_VALLEYVIEW(dev)) { 1396 if (IS_VALLEYVIEW(dev)) {
@@ -1898,10 +1889,9 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
1898 struct drm_i915_private *dev_priv = dev->dev_private; 1889 struct drm_i915_private *dev_priv = dev->dev_private;
1899 enum port port = intel_dig_port->port; 1890 enum port port = intel_dig_port->port;
1900 int ret; 1891 int ret;
1901 uint32_t temp;
1902 1892
1903 if (HAS_DDI(dev)) { 1893 if (HAS_DDI(dev)) {
1904 temp = I915_READ(DP_TP_CTL(port)); 1894 uint32_t temp = I915_READ(DP_TP_CTL(port));
1905 1895
1906 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE) 1896 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
1907 temp |= DP_TP_CTL_SCRAMBLE_DISABLE; 1897 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
@@ -1911,18 +1901,6 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
1911 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK; 1901 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
1912 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { 1902 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
1913 case DP_TRAINING_PATTERN_DISABLE: 1903 case DP_TRAINING_PATTERN_DISABLE:
1914
1915 if (port != PORT_A) {
1916 temp |= DP_TP_CTL_LINK_TRAIN_IDLE;
1917 I915_WRITE(DP_TP_CTL(port), temp);
1918
1919 if (wait_for((I915_READ(DP_TP_STATUS(port)) &
1920 DP_TP_STATUS_IDLE_DONE), 1))
1921 DRM_ERROR("Timed out waiting for DP idle patterns\n");
1922
1923 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
1924 }
1925
1926 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL; 1904 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
1927 1905
1928 break; 1906 break;
@@ -1998,6 +1976,37 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
1998 return true; 1976 return true;
1999} 1977}
2000 1978
1979static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
1980{
1981 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1982 struct drm_device *dev = intel_dig_port->base.base.dev;
1983 struct drm_i915_private *dev_priv = dev->dev_private;
1984 enum port port = intel_dig_port->port;
1985 uint32_t val;
1986
1987 if (!HAS_DDI(dev))
1988 return;
1989
1990 val = I915_READ(DP_TP_CTL(port));
1991 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
1992 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
1993 I915_WRITE(DP_TP_CTL(port), val);
1994
1995 /*
1996 * On PORT_A we can have only eDP in SST mode. There the only reason
1997 * we need to set idle transmission mode is to work around a HW issue
1998 * where we enable the pipe while not in idle link-training mode.
1999 * In this case there is requirement to wait for a minimum number of
2000 * idle patterns to be sent.
2001 */
2002 if (port == PORT_A)
2003 return;
2004
2005 if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
2006 1))
2007 DRM_ERROR("Timed out waiting for DP idle patterns\n");
2008}
2009
2001/* Enable corresponding port and start training pattern 1 */ 2010/* Enable corresponding port and start training pattern 1 */
2002void 2011void
2003intel_dp_start_link_train(struct intel_dp *intel_dp) 2012intel_dp_start_link_train(struct intel_dp *intel_dp)
@@ -2140,10 +2149,19 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
2140 ++tries; 2149 ++tries;
2141 } 2150 }
2142 2151
2152 intel_dp_set_idle_link_train(intel_dp);
2153
2154 intel_dp->DP = DP;
2155
2143 if (channel_eq) 2156 if (channel_eq)
2144 DRM_DEBUG_KMS("Channel EQ done. DP Training successfull\n"); 2157 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
2145 2158
2146 intel_dp_set_link_train(intel_dp, DP, DP_TRAINING_PATTERN_DISABLE); 2159}
2160
2161void intel_dp_stop_link_train(struct intel_dp *intel_dp)
2162{
2163 intel_dp_set_link_train(intel_dp, intel_dp->DP,
2164 DP_TRAINING_PATTERN_DISABLE);
2147} 2165}
2148 2166
2149static void 2167static void
@@ -2351,6 +2369,7 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
2351 drm_get_encoder_name(&intel_encoder->base)); 2369 drm_get_encoder_name(&intel_encoder->base));
2352 intel_dp_start_link_train(intel_dp); 2370 intel_dp_start_link_train(intel_dp);
2353 intel_dp_complete_link_train(intel_dp); 2371 intel_dp_complete_link_train(intel_dp);
2372 intel_dp_stop_link_train(intel_dp);
2354 } 2373 }
2355} 2374}
2356 2375
@@ -2615,6 +2634,9 @@ intel_dp_set_property(struct drm_connector *connector,
2615 } 2634 }
2616 2635
2617 if (property == dev_priv->broadcast_rgb_property) { 2636 if (property == dev_priv->broadcast_rgb_property) {
2637 bool old_auto = intel_dp->color_range_auto;
2638 uint32_t old_range = intel_dp->color_range;
2639
2618 switch (val) { 2640 switch (val) {
2619 case INTEL_BROADCAST_RGB_AUTO: 2641 case INTEL_BROADCAST_RGB_AUTO:
2620 intel_dp->color_range_auto = true; 2642 intel_dp->color_range_auto = true;
@@ -2630,6 +2652,11 @@ intel_dp_set_property(struct drm_connector *connector,
2630 default: 2652 default:
2631 return -EINVAL; 2653 return -EINVAL;
2632 } 2654 }
2655
2656 if (old_auto == intel_dp->color_range_auto &&
2657 old_range == intel_dp->color_range)
2658 return 0;
2659
2633 goto done; 2660 goto done;
2634 } 2661 }
2635 2662
@@ -2679,12 +2706,15 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
2679{ 2706{
2680 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); 2707 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
2681 struct intel_dp *intel_dp = &intel_dig_port->dp; 2708 struct intel_dp *intel_dp = &intel_dig_port->dp;
2709 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2682 2710
2683 i2c_del_adapter(&intel_dp->adapter); 2711 i2c_del_adapter(&intel_dp->adapter);
2684 drm_encoder_cleanup(encoder); 2712 drm_encoder_cleanup(encoder);
2685 if (is_edp(intel_dp)) { 2713 if (is_edp(intel_dp)) {
2686 cancel_delayed_work_sync(&intel_dp->panel_vdd_work); 2714 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
2715 mutex_lock(&dev->mode_config.mutex);
2687 ironlake_panel_vdd_off_sync(intel_dp); 2716 ironlake_panel_vdd_off_sync(intel_dp);
2717 mutex_unlock(&dev->mode_config.mutex);
2688 } 2718 }
2689 kfree(intel_dig_port); 2719 kfree(intel_dig_port);
2690} 2720}
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 0f3554592719..9b0af7e27c82 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -555,6 +555,7 @@ extern void intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
555extern void intel_dp_init_link_config(struct intel_dp *intel_dp); 555extern void intel_dp_init_link_config(struct intel_dp *intel_dp);
556extern void intel_dp_start_link_train(struct intel_dp *intel_dp); 556extern void intel_dp_start_link_train(struct intel_dp *intel_dp);
557extern void intel_dp_complete_link_train(struct intel_dp *intel_dp); 557extern void intel_dp_complete_link_train(struct intel_dp *intel_dp);
558extern void intel_dp_stop_link_train(struct intel_dp *intel_dp);
558extern void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode); 559extern void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
559extern void intel_dp_encoder_destroy(struct drm_encoder *encoder); 560extern void intel_dp_encoder_destroy(struct drm_encoder *encoder);
560extern void intel_dp_check_link_status(struct intel_dp *intel_dp); 561extern void intel_dp_check_link_status(struct intel_dp *intel_dp);
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index ee328ce8e9fe..2c0be924e9a9 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -455,6 +455,7 @@ void intel_dvo_init(struct drm_device *dev)
455 const struct intel_dvo_device *dvo = &intel_dvo_devices[i]; 455 const struct intel_dvo_device *dvo = &intel_dvo_devices[i];
456 struct i2c_adapter *i2c; 456 struct i2c_adapter *i2c;
457 int gpio; 457 int gpio;
458 bool dvoinit;
458 459
459 /* Allow the I2C driver info to specify the GPIO to be used in 460 /* Allow the I2C driver info to specify the GPIO to be used in
460 * special cases, but otherwise default to what's defined 461 * special cases, but otherwise default to what's defined
@@ -474,7 +475,17 @@ void intel_dvo_init(struct drm_device *dev)
474 i2c = intel_gmbus_get_adapter(dev_priv, gpio); 475 i2c = intel_gmbus_get_adapter(dev_priv, gpio);
475 476
476 intel_dvo->dev = *dvo; 477 intel_dvo->dev = *dvo;
477 if (!dvo->dev_ops->init(&intel_dvo->dev, i2c)) 478
479 /* GMBUS NAK handling seems to be unstable, hence let the
480 * transmitter detection run in bit banging mode for now.
481 */
482 intel_gmbus_force_bit(i2c, true);
483
484 dvoinit = dvo->dev_ops->init(&intel_dvo->dev, i2c);
485
486 intel_gmbus_force_bit(i2c, false);
487
488 if (!dvoinit)
478 continue; 489 continue;
479 490
480 intel_encoder->type = INTEL_OUTPUT_DVO; 491 intel_encoder->type = INTEL_OUTPUT_DVO;
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index 0e19e575a1b4..6b7c3ca2c035 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -262,10 +262,22 @@ void intel_fbdev_fini(struct drm_device *dev)
262void intel_fbdev_set_suspend(struct drm_device *dev, int state) 262void intel_fbdev_set_suspend(struct drm_device *dev, int state)
263{ 263{
264 drm_i915_private_t *dev_priv = dev->dev_private; 264 drm_i915_private_t *dev_priv = dev->dev_private;
265 if (!dev_priv->fbdev) 265 struct intel_fbdev *ifbdev = dev_priv->fbdev;
266 struct fb_info *info;
267
268 if (!ifbdev)
266 return; 269 return;
267 270
268 fb_set_suspend(dev_priv->fbdev->helper.fbdev, state); 271 info = ifbdev->helper.fbdev;
272
273 /* On resume from hibernation: If the object is shmemfs backed, it has
274 * been restored from swap. If the object is stolen however, it will be
275 * full of whatever garbage was left in there.
276 */
277 if (!state && ifbdev->ifb.obj->stolen)
278 memset_io(info->screen_base, 0, info->screen_size);
279
280 fb_set_suspend(info, state);
269} 281}
270 282
271MODULE_LICENSE("GPL and additional rights"); 283MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 93de5ff77912..2b727f0d201f 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -947,6 +947,9 @@ intel_hdmi_set_property(struct drm_connector *connector,
947 } 947 }
948 948
949 if (property == dev_priv->broadcast_rgb_property) { 949 if (property == dev_priv->broadcast_rgb_property) {
950 bool old_auto = intel_hdmi->color_range_auto;
951 uint32_t old_range = intel_hdmi->color_range;
952
950 switch (val) { 953 switch (val) {
951 case INTEL_BROADCAST_RGB_AUTO: 954 case INTEL_BROADCAST_RGB_AUTO:
952 intel_hdmi->color_range_auto = true; 955 intel_hdmi->color_range_auto = true;
@@ -962,6 +965,11 @@ intel_hdmi_set_property(struct drm_connector *connector,
962 default: 965 default:
963 return -EINVAL; 966 return -EINVAL;
964 } 967 }
968
969 if (old_auto == intel_hdmi->color_range_auto &&
970 old_range == intel_hdmi->color_range)
971 return 0;
972
965 goto done; 973 goto done;
966 } 974 }
967 975
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 62b64e4877b8..5c2d6939600e 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -293,11 +293,11 @@ static void intel_didl_outputs(struct drm_device *dev)
293 if (!handle || acpi_bus_get_device(handle, &acpi_dev)) 293 if (!handle || acpi_bus_get_device(handle, &acpi_dev))
294 return; 294 return;
295 295
296 if (acpi_is_video_device(acpi_dev)) 296 if (acpi_is_video_device(handle))
297 acpi_video_bus = acpi_dev; 297 acpi_video_bus = acpi_dev;
298 else { 298 else {
299 list_for_each_entry(acpi_cdev, &acpi_dev->children, node) { 299 list_for_each_entry(acpi_cdev, &acpi_dev->children, node) {
300 if (acpi_is_video_device(acpi_cdev)) { 300 if (acpi_is_video_device(acpi_cdev->handle)) {
301 acpi_video_bus = acpi_cdev; 301 acpi_video_bus = acpi_cdev;
302 break; 302 break;
303 } 303 }
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 1a765723d37e..e2255ed97894 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -1353,17 +1353,17 @@ static void valleyview_update_wm(struct drm_device *dev)
1353 1353
1354 vlv_update_drain_latency(dev); 1354 vlv_update_drain_latency(dev);
1355 1355
1356 if (g4x_compute_wm0(dev, 0, 1356 if (g4x_compute_wm0(dev, PIPE_A,
1357 &valleyview_wm_info, latency_ns, 1357 &valleyview_wm_info, latency_ns,
1358 &valleyview_cursor_wm_info, latency_ns, 1358 &valleyview_cursor_wm_info, latency_ns,
1359 &planea_wm, &cursora_wm)) 1359 &planea_wm, &cursora_wm))
1360 enabled |= 1; 1360 enabled |= 1 << PIPE_A;
1361 1361
1362 if (g4x_compute_wm0(dev, 1, 1362 if (g4x_compute_wm0(dev, PIPE_B,
1363 &valleyview_wm_info, latency_ns, 1363 &valleyview_wm_info, latency_ns,
1364 &valleyview_cursor_wm_info, latency_ns, 1364 &valleyview_cursor_wm_info, latency_ns,
1365 &planeb_wm, &cursorb_wm)) 1365 &planeb_wm, &cursorb_wm))
1366 enabled |= 2; 1366 enabled |= 1 << PIPE_B;
1367 1367
1368 if (single_plane_enabled(enabled) && 1368 if (single_plane_enabled(enabled) &&
1369 g4x_compute_srwm(dev, ffs(enabled) - 1, 1369 g4x_compute_srwm(dev, ffs(enabled) - 1,
@@ -1409,17 +1409,17 @@ static void g4x_update_wm(struct drm_device *dev)
1409 int plane_sr, cursor_sr; 1409 int plane_sr, cursor_sr;
1410 unsigned int enabled = 0; 1410 unsigned int enabled = 0;
1411 1411
1412 if (g4x_compute_wm0(dev, 0, 1412 if (g4x_compute_wm0(dev, PIPE_A,
1413 &g4x_wm_info, latency_ns, 1413 &g4x_wm_info, latency_ns,
1414 &g4x_cursor_wm_info, latency_ns, 1414 &g4x_cursor_wm_info, latency_ns,
1415 &planea_wm, &cursora_wm)) 1415 &planea_wm, &cursora_wm))
1416 enabled |= 1; 1416 enabled |= 1 << PIPE_A;
1417 1417
1418 if (g4x_compute_wm0(dev, 1, 1418 if (g4x_compute_wm0(dev, PIPE_B,
1419 &g4x_wm_info, latency_ns, 1419 &g4x_wm_info, latency_ns,
1420 &g4x_cursor_wm_info, latency_ns, 1420 &g4x_cursor_wm_info, latency_ns,
1421 &planeb_wm, &cursorb_wm)) 1421 &planeb_wm, &cursorb_wm))
1422 enabled |= 2; 1422 enabled |= 1 << PIPE_B;
1423 1423
1424 if (single_plane_enabled(enabled) && 1424 if (single_plane_enabled(enabled) &&
1425 g4x_compute_srwm(dev, ffs(enabled) - 1, 1425 g4x_compute_srwm(dev, ffs(enabled) - 1,
@@ -1772,7 +1772,7 @@ static void ironlake_update_wm(struct drm_device *dev)
1772 unsigned int enabled; 1772 unsigned int enabled;
1773 1773
1774 enabled = 0; 1774 enabled = 0;
1775 if (g4x_compute_wm0(dev, 0, 1775 if (g4x_compute_wm0(dev, PIPE_A,
1776 &ironlake_display_wm_info, 1776 &ironlake_display_wm_info,
1777 ILK_LP0_PLANE_LATENCY, 1777 ILK_LP0_PLANE_LATENCY,
1778 &ironlake_cursor_wm_info, 1778 &ironlake_cursor_wm_info,
@@ -1783,10 +1783,10 @@ static void ironlake_update_wm(struct drm_device *dev)
1783 DRM_DEBUG_KMS("FIFO watermarks For pipe A -" 1783 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
1784 " plane %d, " "cursor: %d\n", 1784 " plane %d, " "cursor: %d\n",
1785 plane_wm, cursor_wm); 1785 plane_wm, cursor_wm);
1786 enabled |= 1; 1786 enabled |= 1 << PIPE_A;
1787 } 1787 }
1788 1788
1789 if (g4x_compute_wm0(dev, 1, 1789 if (g4x_compute_wm0(dev, PIPE_B,
1790 &ironlake_display_wm_info, 1790 &ironlake_display_wm_info,
1791 ILK_LP0_PLANE_LATENCY, 1791 ILK_LP0_PLANE_LATENCY,
1792 &ironlake_cursor_wm_info, 1792 &ironlake_cursor_wm_info,
@@ -1797,7 +1797,7 @@ static void ironlake_update_wm(struct drm_device *dev)
1797 DRM_DEBUG_KMS("FIFO watermarks For pipe B -" 1797 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
1798 " plane %d, cursor: %d\n", 1798 " plane %d, cursor: %d\n",
1799 plane_wm, cursor_wm); 1799 plane_wm, cursor_wm);
1800 enabled |= 2; 1800 enabled |= 1 << PIPE_B;
1801 } 1801 }
1802 1802
1803 /* 1803 /*
@@ -1857,7 +1857,7 @@ static void sandybridge_update_wm(struct drm_device *dev)
1857 unsigned int enabled; 1857 unsigned int enabled;
1858 1858
1859 enabled = 0; 1859 enabled = 0;
1860 if (g4x_compute_wm0(dev, 0, 1860 if (g4x_compute_wm0(dev, PIPE_A,
1861 &sandybridge_display_wm_info, latency, 1861 &sandybridge_display_wm_info, latency,
1862 &sandybridge_cursor_wm_info, latency, 1862 &sandybridge_cursor_wm_info, latency,
1863 &plane_wm, &cursor_wm)) { 1863 &plane_wm, &cursor_wm)) {
@@ -1868,10 +1868,10 @@ static void sandybridge_update_wm(struct drm_device *dev)
1868 DRM_DEBUG_KMS("FIFO watermarks For pipe A -" 1868 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
1869 " plane %d, " "cursor: %d\n", 1869 " plane %d, " "cursor: %d\n",
1870 plane_wm, cursor_wm); 1870 plane_wm, cursor_wm);
1871 enabled |= 1; 1871 enabled |= 1 << PIPE_A;
1872 } 1872 }
1873 1873
1874 if (g4x_compute_wm0(dev, 1, 1874 if (g4x_compute_wm0(dev, PIPE_B,
1875 &sandybridge_display_wm_info, latency, 1875 &sandybridge_display_wm_info, latency,
1876 &sandybridge_cursor_wm_info, latency, 1876 &sandybridge_cursor_wm_info, latency,
1877 &plane_wm, &cursor_wm)) { 1877 &plane_wm, &cursor_wm)) {
@@ -1882,7 +1882,7 @@ static void sandybridge_update_wm(struct drm_device *dev)
1882 DRM_DEBUG_KMS("FIFO watermarks For pipe B -" 1882 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
1883 " plane %d, cursor: %d\n", 1883 " plane %d, cursor: %d\n",
1884 plane_wm, cursor_wm); 1884 plane_wm, cursor_wm);
1885 enabled |= 2; 1885 enabled |= 1 << PIPE_B;
1886 } 1886 }
1887 1887
1888 /* 1888 /*
@@ -1960,7 +1960,7 @@ static void ivybridge_update_wm(struct drm_device *dev)
1960 unsigned int enabled; 1960 unsigned int enabled;
1961 1961
1962 enabled = 0; 1962 enabled = 0;
1963 if (g4x_compute_wm0(dev, 0, 1963 if (g4x_compute_wm0(dev, PIPE_A,
1964 &sandybridge_display_wm_info, latency, 1964 &sandybridge_display_wm_info, latency,
1965 &sandybridge_cursor_wm_info, latency, 1965 &sandybridge_cursor_wm_info, latency,
1966 &plane_wm, &cursor_wm)) { 1966 &plane_wm, &cursor_wm)) {
@@ -1971,10 +1971,10 @@ static void ivybridge_update_wm(struct drm_device *dev)
1971 DRM_DEBUG_KMS("FIFO watermarks For pipe A -" 1971 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
1972 " plane %d, " "cursor: %d\n", 1972 " plane %d, " "cursor: %d\n",
1973 plane_wm, cursor_wm); 1973 plane_wm, cursor_wm);
1974 enabled |= 1; 1974 enabled |= 1 << PIPE_A;
1975 } 1975 }
1976 1976
1977 if (g4x_compute_wm0(dev, 1, 1977 if (g4x_compute_wm0(dev, PIPE_B,
1978 &sandybridge_display_wm_info, latency, 1978 &sandybridge_display_wm_info, latency,
1979 &sandybridge_cursor_wm_info, latency, 1979 &sandybridge_cursor_wm_info, latency,
1980 &plane_wm, &cursor_wm)) { 1980 &plane_wm, &cursor_wm)) {
@@ -1985,10 +1985,10 @@ static void ivybridge_update_wm(struct drm_device *dev)
1985 DRM_DEBUG_KMS("FIFO watermarks For pipe B -" 1985 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
1986 " plane %d, cursor: %d\n", 1986 " plane %d, cursor: %d\n",
1987 plane_wm, cursor_wm); 1987 plane_wm, cursor_wm);
1988 enabled |= 2; 1988 enabled |= 1 << PIPE_B;
1989 } 1989 }
1990 1990
1991 if (g4x_compute_wm0(dev, 2, 1991 if (g4x_compute_wm0(dev, PIPE_C,
1992 &sandybridge_display_wm_info, latency, 1992 &sandybridge_display_wm_info, latency,
1993 &sandybridge_cursor_wm_info, latency, 1993 &sandybridge_cursor_wm_info, latency,
1994 &plane_wm, &cursor_wm)) { 1994 &plane_wm, &cursor_wm)) {
@@ -1999,7 +1999,7 @@ static void ivybridge_update_wm(struct drm_device *dev)
1999 DRM_DEBUG_KMS("FIFO watermarks For pipe C -" 1999 DRM_DEBUG_KMS("FIFO watermarks For pipe C -"
2000 " plane %d, cursor: %d\n", 2000 " plane %d, cursor: %d\n",
2001 plane_wm, cursor_wm); 2001 plane_wm, cursor_wm);
2002 enabled |= 3; 2002 enabled |= 1 << PIPE_C;
2003 } 2003 }
2004 2004
2005 /* 2005 /*
@@ -2765,7 +2765,7 @@ static void gen6_enable_rps(struct drm_device *dev)
2765 (IS_HASWELL(dev) ? GEN7_RP_DOWN_IDLE_AVG : GEN6_RP_DOWN_IDLE_CONT)); 2765 (IS_HASWELL(dev) ? GEN7_RP_DOWN_IDLE_AVG : GEN6_RP_DOWN_IDLE_CONT));
2766 2766
2767 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0); 2767 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0);
2768 if (!ret && (IS_GEN6(dev) || IS_IVYBRIDGE(dev))) { 2768 if (!ret) {
2769 pcu_mbox = 0; 2769 pcu_mbox = 0;
2770 ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox); 2770 ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox);
2771 if (!ret && (pcu_mbox & (1<<31))) { /* OC supported */ 2771 if (!ret && (pcu_mbox & (1<<31))) { /* OC supported */
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 2f54dc3dc5df..78f0631b1c43 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1958,6 +1958,9 @@ intel_sdvo_set_property(struct drm_connector *connector,
1958 } 1958 }
1959 1959
1960 if (property == dev_priv->broadcast_rgb_property) { 1960 if (property == dev_priv->broadcast_rgb_property) {
1961 bool old_auto = intel_sdvo->color_range_auto;
1962 uint32_t old_range = intel_sdvo->color_range;
1963
1961 switch (val) { 1964 switch (val) {
1962 case INTEL_BROADCAST_RGB_AUTO: 1965 case INTEL_BROADCAST_RGB_AUTO:
1963 intel_sdvo->color_range_auto = true; 1966 intel_sdvo->color_range_auto = true;
@@ -1975,6 +1978,11 @@ intel_sdvo_set_property(struct drm_connector *connector,
1975 default: 1978 default:
1976 return -EINVAL; 1979 return -EINVAL;
1977 } 1980 }
1981
1982 if (old_auto == intel_sdvo->color_range_auto &&
1983 old_range == intel_sdvo->color_range)
1984 return 0;
1985
1978 goto done; 1986 goto done;
1979 } 1987 }
1980 1988
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index 4d932c46725d..bf29b2f4d68d 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -115,6 +115,8 @@ struct mga_fbdev {
115 void *sysram; 115 void *sysram;
116 int size; 116 int size;
117 struct ttm_bo_kmap_obj mapping; 117 struct ttm_bo_kmap_obj mapping;
118 int x1, y1, x2, y2; /* dirty rect */
119 spinlock_t dirty_lock;
118}; 120};
119 121
120struct mga_crtc { 122struct mga_crtc {
@@ -215,7 +217,7 @@ mgag200_bo(struct ttm_buffer_object *bo)
215{ 217{
216 return container_of(bo, struct mgag200_bo, bo); 218 return container_of(bo, struct mgag200_bo, bo);
217} 219}
218 /* mga_crtc.c */ 220 /* mgag200_crtc.c */
219void mga_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, 221void mga_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
220 u16 blue, int regno); 222 u16 blue, int regno);
221void mga_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, 223void mga_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
@@ -225,7 +227,7 @@ void mga_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
225int mgag200_modeset_init(struct mga_device *mdev); 227int mgag200_modeset_init(struct mga_device *mdev);
226void mgag200_modeset_fini(struct mga_device *mdev); 228void mgag200_modeset_fini(struct mga_device *mdev);
227 229
228 /* mga_fbdev.c */ 230 /* mgag200_fb.c */
229int mgag200_fbdev_init(struct mga_device *mdev); 231int mgag200_fbdev_init(struct mga_device *mdev);
230void mgag200_fbdev_fini(struct mga_device *mdev); 232void mgag200_fbdev_fini(struct mga_device *mdev);
231 233
@@ -254,7 +256,7 @@ mgag200_dumb_mmap_offset(struct drm_file *file,
254 struct drm_device *dev, 256 struct drm_device *dev,
255 uint32_t handle, 257 uint32_t handle,
256 uint64_t *offset); 258 uint64_t *offset);
257 /* mga_i2c.c */ 259 /* mgag200_i2c.c */
258struct mga_i2c_chan *mgag200_i2c_create(struct drm_device *dev); 260struct mga_i2c_chan *mgag200_i2c_create(struct drm_device *dev);
259void mgag200_i2c_destroy(struct mga_i2c_chan *i2c); 261void mgag200_i2c_destroy(struct mga_i2c_chan *i2c);
260 262
diff --git a/drivers/gpu/drm/mgag200/mgag200_fb.c b/drivers/gpu/drm/mgag200/mgag200_fb.c
index d2253f639481..5da824ce9ba1 100644
--- a/drivers/gpu/drm/mgag200/mgag200_fb.c
+++ b/drivers/gpu/drm/mgag200/mgag200_fb.c
@@ -29,16 +29,52 @@ static void mga_dirty_update(struct mga_fbdev *mfbdev,
29 int bpp = (mfbdev->mfb.base.bits_per_pixel + 7)/8; 29 int bpp = (mfbdev->mfb.base.bits_per_pixel + 7)/8;
30 int ret; 30 int ret;
31 bool unmap = false; 31 bool unmap = false;
32 bool store_for_later = false;
33 int x2, y2;
34 unsigned long flags;
32 35
33 obj = mfbdev->mfb.obj; 36 obj = mfbdev->mfb.obj;
34 bo = gem_to_mga_bo(obj); 37 bo = gem_to_mga_bo(obj);
35 38
39 /*
40 * try and reserve the BO, if we fail with busy
41 * then the BO is being moved and we should
42 * store up the damage until later.
43 */
36 ret = mgag200_bo_reserve(bo, true); 44 ret = mgag200_bo_reserve(bo, true);
37 if (ret) { 45 if (ret) {
38 DRM_ERROR("failed to reserve fb bo\n"); 46 if (ret != -EBUSY)
47 return;
48
49 store_for_later = true;
50 }
51
52 x2 = x + width - 1;
53 y2 = y + height - 1;
54 spin_lock_irqsave(&mfbdev->dirty_lock, flags);
55
56 if (mfbdev->y1 < y)
57 y = mfbdev->y1;
58 if (mfbdev->y2 > y2)
59 y2 = mfbdev->y2;
60 if (mfbdev->x1 < x)
61 x = mfbdev->x1;
62 if (mfbdev->x2 > x2)
63 x2 = mfbdev->x2;
64
65 if (store_for_later) {
66 mfbdev->x1 = x;
67 mfbdev->x2 = x2;
68 mfbdev->y1 = y;
69 mfbdev->y2 = y2;
70 spin_unlock_irqrestore(&mfbdev->dirty_lock, flags);
39 return; 71 return;
40 } 72 }
41 73
74 mfbdev->x1 = mfbdev->y1 = INT_MAX;
75 mfbdev->x2 = mfbdev->y2 = 0;
76 spin_unlock_irqrestore(&mfbdev->dirty_lock, flags);
77
42 if (!bo->kmap.virtual) { 78 if (!bo->kmap.virtual) {
43 ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap); 79 ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
44 if (ret) { 80 if (ret) {
@@ -48,10 +84,10 @@ static void mga_dirty_update(struct mga_fbdev *mfbdev,
48 } 84 }
49 unmap = true; 85 unmap = true;
50 } 86 }
51 for (i = y; i < y + height; i++) { 87 for (i = y; i <= y2; i++) {
52 /* assume equal stride for now */ 88 /* assume equal stride for now */
53 src_offset = dst_offset = i * mfbdev->mfb.base.pitches[0] + (x * bpp); 89 src_offset = dst_offset = i * mfbdev->mfb.base.pitches[0] + (x * bpp);
54 memcpy_toio(bo->kmap.virtual + src_offset, mfbdev->sysram + src_offset, width * bpp); 90 memcpy_toio(bo->kmap.virtual + src_offset, mfbdev->sysram + src_offset, (x2 - x + 1) * bpp);
55 91
56 } 92 }
57 if (unmap) 93 if (unmap)
@@ -105,12 +141,9 @@ static int mgag200fb_create_object(struct mga_fbdev *afbdev,
105 struct drm_gem_object **gobj_p) 141 struct drm_gem_object **gobj_p)
106{ 142{
107 struct drm_device *dev = afbdev->helper.dev; 143 struct drm_device *dev = afbdev->helper.dev;
108 u32 bpp, depth;
109 u32 size; 144 u32 size;
110 struct drm_gem_object *gobj; 145 struct drm_gem_object *gobj;
111
112 int ret = 0; 146 int ret = 0;
113 drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
114 147
115 size = mode_cmd->pitches[0] * mode_cmd->height; 148 size = mode_cmd->pitches[0] * mode_cmd->height;
116 ret = mgag200_gem_create(dev, size, true, &gobj); 149 ret = mgag200_gem_create(dev, size, true, &gobj);
@@ -249,19 +282,19 @@ int mgag200_fbdev_init(struct mga_device *mdev)
249 struct mga_fbdev *mfbdev; 282 struct mga_fbdev *mfbdev;
250 int ret; 283 int ret;
251 284
252 mfbdev = kzalloc(sizeof(struct mga_fbdev), GFP_KERNEL); 285 mfbdev = devm_kzalloc(mdev->dev->dev, sizeof(struct mga_fbdev), GFP_KERNEL);
253 if (!mfbdev) 286 if (!mfbdev)
254 return -ENOMEM; 287 return -ENOMEM;
255 288
256 mdev->mfbdev = mfbdev; 289 mdev->mfbdev = mfbdev;
257 mfbdev->helper.funcs = &mga_fb_helper_funcs; 290 mfbdev->helper.funcs = &mga_fb_helper_funcs;
291 spin_lock_init(&mfbdev->dirty_lock);
258 292
259 ret = drm_fb_helper_init(mdev->dev, &mfbdev->helper, 293 ret = drm_fb_helper_init(mdev->dev, &mfbdev->helper,
260 mdev->num_crtc, MGAG200FB_CONN_LIMIT); 294 mdev->num_crtc, MGAG200FB_CONN_LIMIT);
261 if (ret) { 295 if (ret)
262 kfree(mfbdev);
263 return ret; 296 return ret;
264 } 297
265 drm_fb_helper_single_add_all_connectors(&mfbdev->helper); 298 drm_fb_helper_single_add_all_connectors(&mfbdev->helper);
266 299
267 /* disable all the possible outputs/crtcs before entering KMS mode */ 300 /* disable all the possible outputs/crtcs before entering KMS mode */
@@ -278,6 +311,4 @@ void mgag200_fbdev_fini(struct mga_device *mdev)
278 return; 311 return;
279 312
280 mga_fbdev_destroy(mdev->dev, mdev->mfbdev); 313 mga_fbdev_destroy(mdev->dev, mdev->mfbdev);
281 kfree(mdev->mfbdev);
282 mdev->mfbdev = NULL;
283} 314}
diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c
index 64297c72464f..99059237da38 100644
--- a/drivers/gpu/drm/mgag200/mgag200_main.c
+++ b/drivers/gpu/drm/mgag200/mgag200_main.c
@@ -76,15 +76,6 @@ static const struct drm_mode_config_funcs mga_mode_funcs = {
76 .fb_create = mgag200_user_framebuffer_create, 76 .fb_create = mgag200_user_framebuffer_create,
77}; 77};
78 78
79/* Unmap the framebuffer from the core and release the memory */
80static void mga_vram_fini(struct mga_device *mdev)
81{
82 pci_iounmap(mdev->dev->pdev, mdev->rmmio);
83 mdev->rmmio = NULL;
84 if (mdev->mc.vram_base)
85 release_mem_region(mdev->mc.vram_base, mdev->mc.vram_window);
86}
87
88static int mga_probe_vram(struct mga_device *mdev, void __iomem *mem) 79static int mga_probe_vram(struct mga_device *mdev, void __iomem *mem)
89{ 80{
90 int offset; 81 int offset;
@@ -140,7 +131,7 @@ static int mga_vram_init(struct mga_device *mdev)
140 remove_conflicting_framebuffers(aper, "mgafb", true); 131 remove_conflicting_framebuffers(aper, "mgafb", true);
141 kfree(aper); 132 kfree(aper);
142 133
143 if (!request_mem_region(mdev->mc.vram_base, mdev->mc.vram_window, 134 if (!devm_request_mem_region(mdev->dev->dev, mdev->mc.vram_base, mdev->mc.vram_window,
144 "mgadrmfb_vram")) { 135 "mgadrmfb_vram")) {
145 DRM_ERROR("can't reserve VRAM\n"); 136 DRM_ERROR("can't reserve VRAM\n");
146 return -ENXIO; 137 return -ENXIO;
@@ -173,13 +164,13 @@ static int mgag200_device_init(struct drm_device *dev,
173 mdev->rmmio_base = pci_resource_start(mdev->dev->pdev, 1); 164 mdev->rmmio_base = pci_resource_start(mdev->dev->pdev, 1);
174 mdev->rmmio_size = pci_resource_len(mdev->dev->pdev, 1); 165 mdev->rmmio_size = pci_resource_len(mdev->dev->pdev, 1);
175 166
176 if (!request_mem_region(mdev->rmmio_base, mdev->rmmio_size, 167 if (!devm_request_mem_region(mdev->dev->dev, mdev->rmmio_base, mdev->rmmio_size,
177 "mgadrmfb_mmio")) { 168 "mgadrmfb_mmio")) {
178 DRM_ERROR("can't reserve mmio registers\n"); 169 DRM_ERROR("can't reserve mmio registers\n");
179 return -ENOMEM; 170 return -ENOMEM;
180 } 171 }
181 172
182 mdev->rmmio = pci_iomap(dev->pdev, 1, 0); 173 mdev->rmmio = pcim_iomap(dev->pdev, 1, 0);
183 if (mdev->rmmio == NULL) 174 if (mdev->rmmio == NULL)
184 return -ENOMEM; 175 return -ENOMEM;
185 176
@@ -188,10 +179,8 @@ static int mgag200_device_init(struct drm_device *dev,
188 mdev->reg_1e24 = RREG32(0x1e24); 179 mdev->reg_1e24 = RREG32(0x1e24);
189 180
190 ret = mga_vram_init(mdev); 181 ret = mga_vram_init(mdev);
191 if (ret) { 182 if (ret)
192 release_mem_region(mdev->rmmio_base, mdev->rmmio_size);
193 return ret; 183 return ret;
194 }
195 184
196 mdev->bpp_shifts[0] = 0; 185 mdev->bpp_shifts[0] = 0;
197 mdev->bpp_shifts[1] = 1; 186 mdev->bpp_shifts[1] = 1;
@@ -200,12 +189,6 @@ static int mgag200_device_init(struct drm_device *dev,
200 return 0; 189 return 0;
201} 190}
202 191
203void mgag200_device_fini(struct mga_device *mdev)
204{
205 release_mem_region(mdev->rmmio_base, mdev->rmmio_size);
206 mga_vram_fini(mdev);
207}
208
209/* 192/*
210 * Functions here will be called by the core once it's bound the driver to 193 * Functions here will be called by the core once it's bound the driver to
211 * a PCI device 194 * a PCI device
@@ -217,7 +200,7 @@ int mgag200_driver_load(struct drm_device *dev, unsigned long flags)
217 struct mga_device *mdev; 200 struct mga_device *mdev;
218 int r; 201 int r;
219 202
220 mdev = kzalloc(sizeof(struct mga_device), GFP_KERNEL); 203 mdev = devm_kzalloc(dev->dev, sizeof(struct mga_device), GFP_KERNEL);
221 if (mdev == NULL) 204 if (mdev == NULL)
222 return -ENOMEM; 205 return -ENOMEM;
223 dev->dev_private = (void *)mdev; 206 dev->dev_private = (void *)mdev;
@@ -234,8 +217,6 @@ int mgag200_driver_load(struct drm_device *dev, unsigned long flags)
234 217
235 drm_mode_config_init(dev); 218 drm_mode_config_init(dev);
236 dev->mode_config.funcs = (void *)&mga_mode_funcs; 219 dev->mode_config.funcs = (void *)&mga_mode_funcs;
237 dev->mode_config.min_width = 0;
238 dev->mode_config.min_height = 0;
239 dev->mode_config.preferred_depth = 24; 220 dev->mode_config.preferred_depth = 24;
240 dev->mode_config.prefer_shadow = 1; 221 dev->mode_config.prefer_shadow = 1;
241 222
@@ -258,8 +239,6 @@ int mgag200_driver_unload(struct drm_device *dev)
258 mgag200_fbdev_fini(mdev); 239 mgag200_fbdev_fini(mdev);
259 drm_mode_config_cleanup(dev); 240 drm_mode_config_cleanup(dev);
260 mgag200_mm_fini(mdev); 241 mgag200_mm_fini(mdev);
261 mgag200_device_fini(mdev);
262 kfree(mdev);
263 dev->dev_private = NULL; 242 dev->dev_private = NULL;
264 return 0; 243 return 0;
265} 244}
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index fe22bb780e1d..77b8a45fb10a 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -46,29 +46,26 @@ static void mga_crtc_load_lut(struct drm_crtc *crtc)
46 46
47static inline void mga_wait_vsync(struct mga_device *mdev) 47static inline void mga_wait_vsync(struct mga_device *mdev)
48{ 48{
49 unsigned int count = 0; 49 unsigned long timeout = jiffies + HZ/10;
50 unsigned int status = 0; 50 unsigned int status = 0;
51 51
52 do { 52 do {
53 status = RREG32(MGAREG_Status); 53 status = RREG32(MGAREG_Status);
54 count++; 54 } while ((status & 0x08) && time_before(jiffies, timeout));
55 } while ((status & 0x08) && (count < 250000)); 55 timeout = jiffies + HZ/10;
56 count = 0;
57 status = 0; 56 status = 0;
58 do { 57 do {
59 status = RREG32(MGAREG_Status); 58 status = RREG32(MGAREG_Status);
60 count++; 59 } while (!(status & 0x08) && time_before(jiffies, timeout));
61 } while (!(status & 0x08) && (count < 250000));
62} 60}
63 61
64static inline void mga_wait_busy(struct mga_device *mdev) 62static inline void mga_wait_busy(struct mga_device *mdev)
65{ 63{
66 unsigned int count = 0; 64 unsigned long timeout = jiffies + HZ;
67 unsigned int status = 0; 65 unsigned int status = 0;
68 do { 66 do {
69 status = RREG8(MGAREG_Status + 2); 67 status = RREG8(MGAREG_Status + 2);
70 count++; 68 } while ((status & 0x01) && time_before(jiffies, timeout));
71 } while ((status & 0x01) && (count < 500000));
72} 69}
73 70
74/* 71/*
@@ -189,12 +186,12 @@ static int mga_g200wb_set_plls(struct mga_device *mdev, long clock)
189 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); 186 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
190 tmp = RREG8(DAC_DATA); 187 tmp = RREG8(DAC_DATA);
191 tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS; 188 tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
192 WREG_DAC(MGA1064_PIX_CLK_CTL_CLK_DIS, tmp); 189 WREG8(DAC_DATA, tmp);
193 190
194 WREG8(DAC_INDEX, MGA1064_REMHEADCTL); 191 WREG8(DAC_INDEX, MGA1064_REMHEADCTL);
195 tmp = RREG8(DAC_DATA); 192 tmp = RREG8(DAC_DATA);
196 tmp |= MGA1064_REMHEADCTL_CLKDIS; 193 tmp |= MGA1064_REMHEADCTL_CLKDIS;
197 WREG_DAC(MGA1064_REMHEADCTL, tmp); 194 WREG8(DAC_DATA, tmp);
198 195
199 /* select PLL Set C */ 196 /* select PLL Set C */
200 tmp = RREG8(MGAREG_MEM_MISC_READ); 197 tmp = RREG8(MGAREG_MEM_MISC_READ);
@@ -204,7 +201,7 @@ static int mga_g200wb_set_plls(struct mga_device *mdev, long clock)
204 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); 201 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
205 tmp = RREG8(DAC_DATA); 202 tmp = RREG8(DAC_DATA);
206 tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN | 0x80; 203 tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN | 0x80;
207 WREG_DAC(MGA1064_PIX_CLK_CTL, tmp); 204 WREG8(DAC_DATA, tmp);
208 205
209 udelay(500); 206 udelay(500);
210 207
@@ -212,7 +209,7 @@ static int mga_g200wb_set_plls(struct mga_device *mdev, long clock)
212 WREG8(DAC_INDEX, MGA1064_VREF_CTL); 209 WREG8(DAC_INDEX, MGA1064_VREF_CTL);
213 tmp = RREG8(DAC_DATA); 210 tmp = RREG8(DAC_DATA);
214 tmp &= ~0x04; 211 tmp &= ~0x04;
215 WREG_DAC(MGA1064_VREF_CTL, tmp); 212 WREG8(DAC_DATA, tmp);
216 213
217 udelay(50); 214 udelay(50);
218 215
@@ -236,13 +233,13 @@ static int mga_g200wb_set_plls(struct mga_device *mdev, long clock)
236 tmp = RREG8(DAC_DATA); 233 tmp = RREG8(DAC_DATA);
237 tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK; 234 tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK;
238 tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL; 235 tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL;
239 WREG_DAC(MGA1064_PIX_CLK_CTL, tmp); 236 WREG8(DAC_DATA, tmp);
240 237
241 WREG8(DAC_INDEX, MGA1064_REMHEADCTL); 238 WREG8(DAC_INDEX, MGA1064_REMHEADCTL);
242 tmp = RREG8(DAC_DATA); 239 tmp = RREG8(DAC_DATA);
243 tmp &= ~MGA1064_REMHEADCTL_CLKSL_MSK; 240 tmp &= ~MGA1064_REMHEADCTL_CLKSL_MSK;
244 tmp |= MGA1064_REMHEADCTL_CLKSL_PLL; 241 tmp |= MGA1064_REMHEADCTL_CLKSL_PLL;
245 WREG_DAC(MGA1064_REMHEADCTL, tmp); 242 WREG8(DAC_DATA, tmp);
246 243
247 /* reset dotclock rate bit */ 244 /* reset dotclock rate bit */
248 WREG8(MGAREG_SEQ_INDEX, 1); 245 WREG8(MGAREG_SEQ_INDEX, 1);
@@ -253,7 +250,7 @@ static int mga_g200wb_set_plls(struct mga_device *mdev, long clock)
253 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); 250 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
254 tmp = RREG8(DAC_DATA); 251 tmp = RREG8(DAC_DATA);
255 tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS; 252 tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS;
256 WREG_DAC(MGA1064_PIX_CLK_CTL, tmp); 253 WREG8(DAC_DATA, tmp);
257 254
258 vcount = RREG8(MGAREG_VCOUNT); 255 vcount = RREG8(MGAREG_VCOUNT);
259 256
@@ -318,7 +315,7 @@ static int mga_g200ev_set_plls(struct mga_device *mdev, long clock)
318 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); 315 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
319 tmp = RREG8(DAC_DATA); 316 tmp = RREG8(DAC_DATA);
320 tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS; 317 tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
321 WREG_DAC(MGA1064_PIX_CLK_CTL_CLK_DIS, tmp); 318 WREG8(DAC_DATA, tmp);
322 319
323 tmp = RREG8(MGAREG_MEM_MISC_READ); 320 tmp = RREG8(MGAREG_MEM_MISC_READ);
324 tmp |= 0x3 << 2; 321 tmp |= 0x3 << 2;
@@ -326,12 +323,12 @@ static int mga_g200ev_set_plls(struct mga_device *mdev, long clock)
326 323
327 WREG8(DAC_INDEX, MGA1064_PIX_PLL_STAT); 324 WREG8(DAC_INDEX, MGA1064_PIX_PLL_STAT);
328 tmp = RREG8(DAC_DATA); 325 tmp = RREG8(DAC_DATA);
329 WREG_DAC(MGA1064_PIX_PLL_STAT, tmp & ~0x40); 326 WREG8(DAC_DATA, tmp & ~0x40);
330 327
331 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); 328 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
332 tmp = RREG8(DAC_DATA); 329 tmp = RREG8(DAC_DATA);
333 tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN; 330 tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
334 WREG_DAC(MGA1064_PIX_CLK_CTL, tmp); 331 WREG8(DAC_DATA, tmp);
335 332
336 WREG_DAC(MGA1064_EV_PIX_PLLC_M, m); 333 WREG_DAC(MGA1064_EV_PIX_PLLC_M, m);
337 WREG_DAC(MGA1064_EV_PIX_PLLC_N, n); 334 WREG_DAC(MGA1064_EV_PIX_PLLC_N, n);
@@ -342,7 +339,7 @@ static int mga_g200ev_set_plls(struct mga_device *mdev, long clock)
342 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); 339 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
343 tmp = RREG8(DAC_DATA); 340 tmp = RREG8(DAC_DATA);
344 tmp &= ~MGA1064_PIX_CLK_CTL_CLK_POW_DOWN; 341 tmp &= ~MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
345 WREG_DAC(MGA1064_PIX_CLK_CTL, tmp); 342 WREG8(DAC_DATA, tmp);
346 343
347 udelay(500); 344 udelay(500);
348 345
@@ -350,11 +347,11 @@ static int mga_g200ev_set_plls(struct mga_device *mdev, long clock)
350 tmp = RREG8(DAC_DATA); 347 tmp = RREG8(DAC_DATA);
351 tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK; 348 tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK;
352 tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL; 349 tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL;
353 WREG_DAC(MGA1064_PIX_CLK_CTL, tmp); 350 WREG8(DAC_DATA, tmp);
354 351
355 WREG8(DAC_INDEX, MGA1064_PIX_PLL_STAT); 352 WREG8(DAC_INDEX, MGA1064_PIX_PLL_STAT);
356 tmp = RREG8(DAC_DATA); 353 tmp = RREG8(DAC_DATA);
357 WREG_DAC(MGA1064_PIX_PLL_STAT, tmp | 0x40); 354 WREG8(DAC_DATA, tmp | 0x40);
358 355
359 tmp = RREG8(MGAREG_MEM_MISC_READ); 356 tmp = RREG8(MGAREG_MEM_MISC_READ);
360 tmp |= (0x3 << 2); 357 tmp |= (0x3 << 2);
@@ -363,7 +360,7 @@ static int mga_g200ev_set_plls(struct mga_device *mdev, long clock)
363 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); 360 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
364 tmp = RREG8(DAC_DATA); 361 tmp = RREG8(DAC_DATA);
365 tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS; 362 tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS;
366 WREG_DAC(MGA1064_PIX_CLK_CTL, tmp); 363 WREG8(DAC_DATA, tmp);
367 364
368 return 0; 365 return 0;
369} 366}
@@ -416,7 +413,7 @@ static int mga_g200eh_set_plls(struct mga_device *mdev, long clock)
416 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); 413 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
417 tmp = RREG8(DAC_DATA); 414 tmp = RREG8(DAC_DATA);
418 tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS; 415 tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
419 WREG_DAC(MGA1064_PIX_CLK_CTL_CLK_DIS, tmp); 416 WREG8(DAC_DATA, tmp);
420 417
421 tmp = RREG8(MGAREG_MEM_MISC_READ); 418 tmp = RREG8(MGAREG_MEM_MISC_READ);
422 tmp |= 0x3 << 2; 419 tmp |= 0x3 << 2;
@@ -425,7 +422,7 @@ static int mga_g200eh_set_plls(struct mga_device *mdev, long clock)
425 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); 422 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
426 tmp = RREG8(DAC_DATA); 423 tmp = RREG8(DAC_DATA);
427 tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN; 424 tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
428 WREG_DAC(MGA1064_PIX_CLK_CTL, tmp); 425 WREG8(DAC_DATA, tmp);
429 426
430 udelay(500); 427 udelay(500);
431 428
@@ -439,13 +436,13 @@ static int mga_g200eh_set_plls(struct mga_device *mdev, long clock)
439 tmp = RREG8(DAC_DATA); 436 tmp = RREG8(DAC_DATA);
440 tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK; 437 tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK;
441 tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL; 438 tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL;
442 WREG_DAC(MGA1064_PIX_CLK_CTL, tmp); 439 WREG8(DAC_DATA, tmp);
443 440
444 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); 441 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
445 tmp = RREG8(DAC_DATA); 442 tmp = RREG8(DAC_DATA);
446 tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS; 443 tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS;
447 tmp &= ~MGA1064_PIX_CLK_CTL_CLK_POW_DOWN; 444 tmp &= ~MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
448 WREG_DAC(MGA1064_PIX_CLK_CTL, tmp); 445 WREG8(DAC_DATA, tmp);
449 446
450 vcount = RREG8(MGAREG_VCOUNT); 447 vcount = RREG8(MGAREG_VCOUNT);
451 448
@@ -515,12 +512,12 @@ static int mga_g200er_set_plls(struct mga_device *mdev, long clock)
515 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); 512 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
516 tmp = RREG8(DAC_DATA); 513 tmp = RREG8(DAC_DATA);
517 tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS; 514 tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
518 WREG_DAC(MGA1064_PIX_CLK_CTL_CLK_DIS, tmp); 515 WREG8(DAC_DATA, tmp);
519 516
520 WREG8(DAC_INDEX, MGA1064_REMHEADCTL); 517 WREG8(DAC_INDEX, MGA1064_REMHEADCTL);
521 tmp = RREG8(DAC_DATA); 518 tmp = RREG8(DAC_DATA);
522 tmp |= MGA1064_REMHEADCTL_CLKDIS; 519 tmp |= MGA1064_REMHEADCTL_CLKDIS;
523 WREG_DAC(MGA1064_REMHEADCTL, tmp); 520 WREG8(DAC_DATA, tmp);
524 521
525 tmp = RREG8(MGAREG_MEM_MISC_READ); 522 tmp = RREG8(MGAREG_MEM_MISC_READ);
526 tmp |= (0x3<<2) | 0xc0; 523 tmp |= (0x3<<2) | 0xc0;
@@ -530,7 +527,7 @@ static int mga_g200er_set_plls(struct mga_device *mdev, long clock)
530 tmp = RREG8(DAC_DATA); 527 tmp = RREG8(DAC_DATA);
531 tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS; 528 tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS;
532 tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN; 529 tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
533 WREG_DAC(MGA1064_PIX_CLK_CTL, tmp); 530 WREG8(DAC_DATA, tmp);
534 531
535 udelay(500); 532 udelay(500);
536 533
@@ -657,12 +654,26 @@ static void mga_g200wb_commit(struct drm_crtc *crtc)
657 WREG_DAC(MGA1064_GEN_IO_DATA, tmp); 654 WREG_DAC(MGA1064_GEN_IO_DATA, tmp);
658} 655}
659 656
660 657/*
658 This is how the framebuffer base address is stored in g200 cards:
659 * Assume @offset is the gpu_addr variable of the framebuffer object
660 * Then addr is the number of _pixels_ (not bytes) from the start of
661 VRAM to the first pixel we want to display. (divided by 2 for 32bit
662 framebuffers)
663 * addr is stored in the CRTCEXT0, CRTCC and CRTCD registers
664 addr<20> -> CRTCEXT0<6>
665 addr<19-16> -> CRTCEXT0<3-0>
666 addr<15-8> -> CRTCC<7-0>
667 addr<7-0> -> CRTCD<7-0>
668 CRTCEXT0 has to be programmed last to trigger an update and make the
669 new addr variable take effect.
670 */
661void mga_set_start_address(struct drm_crtc *crtc, unsigned offset) 671void mga_set_start_address(struct drm_crtc *crtc, unsigned offset)
662{ 672{
663 struct mga_device *mdev = crtc->dev->dev_private; 673 struct mga_device *mdev = crtc->dev->dev_private;
664 u32 addr; 674 u32 addr;
665 int count; 675 int count;
676 u8 crtcext0;
666 677
667 while (RREG8(0x1fda) & 0x08); 678 while (RREG8(0x1fda) & 0x08);
668 while (!(RREG8(0x1fda) & 0x08)); 679 while (!(RREG8(0x1fda) & 0x08));
@@ -670,10 +681,17 @@ void mga_set_start_address(struct drm_crtc *crtc, unsigned offset)
670 count = RREG8(MGAREG_VCOUNT) + 2; 681 count = RREG8(MGAREG_VCOUNT) + 2;
671 while (RREG8(MGAREG_VCOUNT) < count); 682 while (RREG8(MGAREG_VCOUNT) < count);
672 683
673 addr = offset >> 2; 684 WREG8(MGAREG_CRTCEXT_INDEX, 0);
685 crtcext0 = RREG8(MGAREG_CRTCEXT_DATA);
686 crtcext0 &= 0xB0;
687 addr = offset / 8;
688 /* Can't store addresses any higher than that...
689 but we also don't have more than 16MB of memory, so it should be fine. */
690 WARN_ON(addr > 0x1fffff);
691 crtcext0 |= (!!(addr & (1<<20)))<<6;
674 WREG_CRT(0x0d, (u8)(addr & 0xff)); 692 WREG_CRT(0x0d, (u8)(addr & 0xff));
675 WREG_CRT(0x0c, (u8)(addr >> 8) & 0xff); 693 WREG_CRT(0x0c, (u8)(addr >> 8) & 0xff);
676 WREG_CRT(0xaf, (u8)(addr >> 16) & 0xf); 694 WREG_ECRT(0x0, ((u8)(addr >> 16) & 0xf) | crtcext0);
677} 695}
678 696
679 697
@@ -751,8 +769,6 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
751 int i; 769 int i;
752 unsigned char misc = 0; 770 unsigned char misc = 0;
753 unsigned char ext_vga[6]; 771 unsigned char ext_vga[6];
754 unsigned char ext_vga_index24;
755 unsigned char dac_index90 = 0;
756 u8 bppshift; 772 u8 bppshift;
757 773
758 static unsigned char dacvalue[] = { 774 static unsigned char dacvalue[] = {
@@ -803,7 +819,6 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
803 option2 = 0x0000b000; 819 option2 = 0x0000b000;
804 break; 820 break;
805 case G200_ER: 821 case G200_ER:
806 dac_index90 = 0;
807 break; 822 break;
808 } 823 }
809 824
@@ -832,11 +847,7 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
832 847
833 848
834 for (i = 0; i < sizeof(dacvalue); i++) { 849 for (i = 0; i < sizeof(dacvalue); i++) {
835 if ((i <= 0x03) || 850 if ((i <= 0x17) ||
836 (i == 0x07) ||
837 (i == 0x0b) ||
838 (i == 0x0f) ||
839 ((i >= 0x13) && (i <= 0x17)) ||
840 (i == 0x1b) || 851 (i == 0x1b) ||
841 (i == 0x1c) || 852 (i == 0x1c) ||
842 ((i >= 0x1f) && (i <= 0x29)) || 853 ((i >= 0x1f) && (i <= 0x29)) ||
@@ -852,10 +863,8 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
852 WREG_DAC(i, dacvalue[i]); 863 WREG_DAC(i, dacvalue[i]);
853 } 864 }
854 865
855 if (mdev->type == G200_ER) { 866 if (mdev->type == G200_ER)
856 WREG_DAC(0x90, dac_index90); 867 WREG_DAC(0x90, 0);
857 }
858
859 868
860 if (option) 869 if (option)
861 pci_write_config_dword(dev->pdev, PCI_MGA_OPTION, option); 870 pci_write_config_dword(dev->pdev, PCI_MGA_OPTION, option);
@@ -952,8 +961,6 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
952 if (mdev->type == G200_WB) 961 if (mdev->type == G200_WB)
953 ext_vga[1] |= 0x88; 962 ext_vga[1] |= 0x88;
954 963
955 ext_vga_index24 = 0x05;
956
957 /* Set pixel clocks */ 964 /* Set pixel clocks */
958 misc = 0x2d; 965 misc = 0x2d;
959 WREG8(MGA_MISC_OUT, misc); 966 WREG8(MGA_MISC_OUT, misc);
@@ -965,7 +972,7 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
965 } 972 }
966 973
967 if (mdev->type == G200_ER) 974 if (mdev->type == G200_ER)
968 WREG_ECRT(24, ext_vga_index24); 975 WREG_ECRT(0x24, 0x5);
969 976
970 if (mdev->type == G200_EV) { 977 if (mdev->type == G200_EV) {
971 WREG_ECRT(6, 0); 978 WREG_ECRT(6, 0);
@@ -1261,9 +1268,8 @@ static const struct drm_crtc_helper_funcs mga_helper_funcs = {
1261}; 1268};
1262 1269
1263/* CRTC setup */ 1270/* CRTC setup */
1264static void mga_crtc_init(struct drm_device *dev) 1271static void mga_crtc_init(struct mga_device *mdev)
1265{ 1272{
1266 struct mga_device *mdev = dev->dev_private;
1267 struct mga_crtc *mga_crtc; 1273 struct mga_crtc *mga_crtc;
1268 int i; 1274 int i;
1269 1275
@@ -1274,7 +1280,7 @@ static void mga_crtc_init(struct drm_device *dev)
1274 if (mga_crtc == NULL) 1280 if (mga_crtc == NULL)
1275 return; 1281 return;
1276 1282
1277 drm_crtc_init(dev, &mga_crtc->base, &mga_crtc_funcs); 1283 drm_crtc_init(mdev->dev, &mga_crtc->base, &mga_crtc_funcs);
1278 1284
1279 drm_mode_crtc_set_gamma_size(&mga_crtc->base, MGAG200_LUT_SIZE); 1285 drm_mode_crtc_set_gamma_size(&mga_crtc->base, MGAG200_LUT_SIZE);
1280 mdev->mode_info.crtc = mga_crtc; 1286 mdev->mode_info.crtc = mga_crtc;
@@ -1529,7 +1535,7 @@ int mgag200_modeset_init(struct mga_device *mdev)
1529 1535
1530 mdev->dev->mode_config.fb_base = mdev->mc.vram_base; 1536 mdev->dev->mode_config.fb_base = mdev->mc.vram_base;
1531 1537
1532 mga_crtc_init(mdev->dev); 1538 mga_crtc_init(mdev);
1533 1539
1534 encoder = mga_encoder_init(mdev->dev); 1540 encoder = mga_encoder_init(mdev->dev);
1535 if (!encoder) { 1541 if (!encoder) {
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c
index 8fc9d9201945..401c9891d3a8 100644
--- a/drivers/gpu/drm/mgag200/mgag200_ttm.c
+++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c
@@ -315,8 +315,8 @@ int mgag200_bo_reserve(struct mgag200_bo *bo, bool no_wait)
315 315
316 ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0); 316 ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0);
317 if (ret) { 317 if (ret) {
318 if (ret != -ERESTARTSYS) 318 if (ret != -ERESTARTSYS && ret != -EBUSY)
319 DRM_ERROR("reserve failed %p\n", bo); 319 DRM_ERROR("reserve failed %p %d\n", bo, ret);
320 return ret; 320 return ret;
321 } 321 }
322 return 0; 322 return 0;
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index 90f9140eeefd..998e8b4444f3 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -53,15 +53,6 @@ nouveau-y += core/subdev/clock/nva3.o
53nouveau-y += core/subdev/clock/nvc0.o 53nouveau-y += core/subdev/clock/nvc0.o
54nouveau-y += core/subdev/clock/pllnv04.o 54nouveau-y += core/subdev/clock/pllnv04.o
55nouveau-y += core/subdev/clock/pllnva3.o 55nouveau-y += core/subdev/clock/pllnva3.o
56nouveau-y += core/subdev/device/base.o
57nouveau-y += core/subdev/device/nv04.o
58nouveau-y += core/subdev/device/nv10.o
59nouveau-y += core/subdev/device/nv20.o
60nouveau-y += core/subdev/device/nv30.o
61nouveau-y += core/subdev/device/nv40.o
62nouveau-y += core/subdev/device/nv50.o
63nouveau-y += core/subdev/device/nvc0.o
64nouveau-y += core/subdev/device/nve0.o
65nouveau-y += core/subdev/devinit/base.o 56nouveau-y += core/subdev/devinit/base.o
66nouveau-y += core/subdev/devinit/nv04.o 57nouveau-y += core/subdev/devinit/nv04.o
67nouveau-y += core/subdev/devinit/nv05.o 58nouveau-y += core/subdev/devinit/nv05.o
@@ -126,6 +117,7 @@ nouveau-y += core/subdev/therm/ic.o
126nouveau-y += core/subdev/therm/temp.o 117nouveau-y += core/subdev/therm/temp.o
127nouveau-y += core/subdev/therm/nv40.o 118nouveau-y += core/subdev/therm/nv40.o
128nouveau-y += core/subdev/therm/nv50.o 119nouveau-y += core/subdev/therm/nv50.o
120nouveau-y += core/subdev/therm/nv84.o
129nouveau-y += core/subdev/therm/nva3.o 121nouveau-y += core/subdev/therm/nva3.o
130nouveau-y += core/subdev/therm/nvd0.o 122nouveau-y += core/subdev/therm/nvd0.o
131nouveau-y += core/subdev/timer/base.o 123nouveau-y += core/subdev/timer/base.o
@@ -150,6 +142,15 @@ nouveau-y += core/engine/copy/nvc0.o
150nouveau-y += core/engine/copy/nve0.o 142nouveau-y += core/engine/copy/nve0.o
151nouveau-y += core/engine/crypt/nv84.o 143nouveau-y += core/engine/crypt/nv84.o
152nouveau-y += core/engine/crypt/nv98.o 144nouveau-y += core/engine/crypt/nv98.o
145nouveau-y += core/engine/device/base.o
146nouveau-y += core/engine/device/nv04.o
147nouveau-y += core/engine/device/nv10.o
148nouveau-y += core/engine/device/nv20.o
149nouveau-y += core/engine/device/nv30.o
150nouveau-y += core/engine/device/nv40.o
151nouveau-y += core/engine/device/nv50.o
152nouveau-y += core/engine/device/nvc0.o
153nouveau-y += core/engine/device/nve0.o
153nouveau-y += core/engine/disp/base.o 154nouveau-y += core/engine/disp/base.o
154nouveau-y += core/engine/disp/nv04.o 155nouveau-y += core/engine/disp/nv04.o
155nouveau-y += core/engine/disp/nv50.o 156nouveau-y += core/engine/disp/nv50.o
@@ -159,6 +160,7 @@ nouveau-y += core/engine/disp/nva0.o
159nouveau-y += core/engine/disp/nva3.o 160nouveau-y += core/engine/disp/nva3.o
160nouveau-y += core/engine/disp/nvd0.o 161nouveau-y += core/engine/disp/nvd0.o
161nouveau-y += core/engine/disp/nve0.o 162nouveau-y += core/engine/disp/nve0.o
163nouveau-y += core/engine/disp/nvf0.o
162nouveau-y += core/engine/disp/dacnv50.o 164nouveau-y += core/engine/disp/dacnv50.o
163nouveau-y += core/engine/disp/dport.o 165nouveau-y += core/engine/disp/dport.o
164nouveau-y += core/engine/disp/hdanva3.o 166nouveau-y += core/engine/disp/hdanva3.o
@@ -212,7 +214,7 @@ nouveau-y += core/engine/vp/nve0.o
212 214
213# drm/core 215# drm/core
214nouveau-y += nouveau_drm.o nouveau_chan.o nouveau_dma.o nouveau_fence.o 216nouveau-y += nouveau_drm.o nouveau_chan.o nouveau_dma.o nouveau_fence.o
215nouveau-y += nouveau_irq.o nouveau_vga.o nouveau_agp.o 217nouveau-y += nouveau_vga.o nouveau_agp.o
216nouveau-y += nouveau_ttm.o nouveau_sgdma.o nouveau_bo.o nouveau_gem.o 218nouveau-y += nouveau_ttm.o nouveau_sgdma.o nouveau_bo.o nouveau_gem.o
217nouveau-y += nouveau_prime.o nouveau_abi16.o 219nouveau-y += nouveau_prime.o nouveau_abi16.o
218nouveau-y += nv04_fence.o nv10_fence.o nv17_fence.o 220nouveau-y += nv04_fence.o nv10_fence.o nv17_fence.o
@@ -224,9 +226,7 @@ nouveau-y += nouveau_connector.o nouveau_dp.o
224nouveau-y += nv04_fbcon.o nv50_fbcon.o nvc0_fbcon.o 226nouveau-y += nv04_fbcon.o nv50_fbcon.o nvc0_fbcon.o
225 227
226# drm/kms/nv04:nv50 228# drm/kms/nv04:nv50
227nouveau-y += nouveau_hw.o nouveau_calc.o 229include $(src)/dispnv04/Makefile
228nouveau-y += nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o
229nouveau-y += nv04_crtc.o nv04_display.o nv04_cursor.o
230 230
231# drm/kms/nv50- 231# drm/kms/nv50-
232nouveau-y += nv50_display.o 232nouveau-y += nv50_display.o
diff --git a/drivers/gpu/drm/nouveau/core/core/client.c b/drivers/gpu/drm/nouveau/core/core/client.c
index 295c22165eac..9079c0ac58e6 100644
--- a/drivers/gpu/drm/nouveau/core/core/client.c
+++ b/drivers/gpu/drm/nouveau/core/core/client.c
@@ -27,7 +27,7 @@
27#include <core/handle.h> 27#include <core/handle.h>
28#include <core/option.h> 28#include <core/option.h>
29 29
30#include <subdev/device.h> 30#include <engine/device.h>
31 31
32static void 32static void
33nouveau_client_dtor(struct nouveau_object *object) 33nouveau_client_dtor(struct nouveau_object *object)
@@ -58,8 +58,9 @@ nouveau_client_create_(const char *name, u64 devname, const char *cfg,
58 return -ENODEV; 58 return -ENODEV;
59 59
60 ret = nouveau_namedb_create_(NULL, NULL, &nouveau_client_oclass, 60 ret = nouveau_namedb_create_(NULL, NULL, &nouveau_client_oclass,
61 NV_CLIENT_CLASS, nouveau_device_sclass, 61 NV_CLIENT_CLASS, NULL,
62 0, length, pobject); 62 (1ULL << NVDEV_ENGINE_DEVICE),
63 length, pobject);
63 client = *pobject; 64 client = *pobject;
64 if (ret) 65 if (ret)
65 return ret; 66 return ret;
diff --git a/drivers/gpu/drm/nouveau/core/core/engine.c b/drivers/gpu/drm/nouveau/core/core/engine.c
index 09b3bd502fd0..c8bed4a26833 100644
--- a/drivers/gpu/drm/nouveau/core/core/engine.c
+++ b/drivers/gpu/drm/nouveau/core/core/engine.c
@@ -33,7 +33,6 @@ nouveau_engine_create_(struct nouveau_object *parent,
33 const char *iname, const char *fname, 33 const char *iname, const char *fname,
34 int length, void **pobject) 34 int length, void **pobject)
35{ 35{
36 struct nouveau_device *device = nv_device(parent);
37 struct nouveau_engine *engine; 36 struct nouveau_engine *engine;
38 int ret; 37 int ret;
39 38
@@ -43,7 +42,8 @@ nouveau_engine_create_(struct nouveau_object *parent,
43 if (ret) 42 if (ret)
44 return ret; 43 return ret;
45 44
46 if (!nouveau_boolopt(device->cfgopt, iname, enable)) { 45 if ( parent &&
46 !nouveau_boolopt(nv_device(parent)->cfgopt, iname, enable)) {
47 if (!enable) 47 if (!enable)
48 nv_warn(engine, "disabled, %s=1 to enable\n", iname); 48 nv_warn(engine, "disabled, %s=1 to enable\n", iname);
49 return -ENODEV; 49 return -ENODEV;
diff --git a/drivers/gpu/drm/nouveau/core/core/event.c b/drivers/gpu/drm/nouveau/core/core/event.c
index 6d01e0f0fc8a..7eb81c1b6fab 100644
--- a/drivers/gpu/drm/nouveau/core/core/event.c
+++ b/drivers/gpu/drm/nouveau/core/core/event.c
@@ -27,8 +27,10 @@ static void
27nouveau_event_put_locked(struct nouveau_event *event, int index, 27nouveau_event_put_locked(struct nouveau_event *event, int index,
28 struct nouveau_eventh *handler) 28 struct nouveau_eventh *handler)
29{ 29{
30 if (!--event->index[index].refs) 30 if (!--event->index[index].refs) {
31 event->disable(event, index); 31 if (event->disable)
32 event->disable(event, index);
33 }
32 list_del(&handler->head); 34 list_del(&handler->head);
33} 35}
34 36
@@ -53,8 +55,10 @@ nouveau_event_get(struct nouveau_event *event, int index,
53 spin_lock_irqsave(&event->lock, flags); 55 spin_lock_irqsave(&event->lock, flags);
54 if (index < event->index_nr) { 56 if (index < event->index_nr) {
55 list_add(&handler->head, &event->index[index].list); 57 list_add(&handler->head, &event->index[index].list);
56 if (!event->index[index].refs++) 58 if (!event->index[index].refs++) {
57 event->enable(event, index); 59 if (event->enable)
60 event->enable(event, index);
61 }
58 } 62 }
59 spin_unlock_irqrestore(&event->lock, flags); 63 spin_unlock_irqrestore(&event->lock, flags);
60} 64}
diff --git a/drivers/gpu/drm/nouveau/core/core/object.c b/drivers/gpu/drm/nouveau/core/core/object.c
index 3b2e7b6304d3..7f48e288215f 100644
--- a/drivers/gpu/drm/nouveau/core/core/object.c
+++ b/drivers/gpu/drm/nouveau/core/core/object.c
@@ -136,26 +136,30 @@ nouveau_object_ctor(struct nouveau_object *parent,
136 struct nouveau_object **pobject) 136 struct nouveau_object **pobject)
137{ 137{
138 struct nouveau_ofuncs *ofuncs = oclass->ofuncs; 138 struct nouveau_ofuncs *ofuncs = oclass->ofuncs;
139 struct nouveau_object *object = NULL;
139 int ret; 140 int ret;
140 141
141 *pobject = NULL; 142 ret = ofuncs->ctor(parent, engine, oclass, data, size, &object);
142 143 *pobject = object;
143 ret = ofuncs->ctor(parent, engine, oclass, data, size, pobject);
144 if (ret < 0) { 144 if (ret < 0) {
145 if (ret != -ENODEV) { 145 if (ret != -ENODEV) {
146 nv_error(parent, "failed to create 0x%08x, %d\n", 146 nv_error(parent, "failed to create 0x%08x, %d\n",
147 oclass->handle, ret); 147 oclass->handle, ret);
148 } 148 }
149 149
150 if (*pobject) { 150 if (object) {
151 ofuncs->dtor(*pobject); 151 ofuncs->dtor(object);
152 *pobject = NULL; 152 *pobject = NULL;
153 } 153 }
154 154
155 return ret; 155 return ret;
156 } 156 }
157 157
158 nv_debug(*pobject, "created\n"); 158 if (ret == 0) {
159 nv_debug(object, "created\n");
160 atomic_set(&object->refcount, 1);
161 }
162
159 return 0; 163 return 0;
160} 164}
161 165
@@ -327,6 +331,7 @@ nouveau_object_inc(struct nouveau_object *object)
327 } 331 }
328 332
329 ret = nv_ofuncs(object)->init(object); 333 ret = nv_ofuncs(object)->init(object);
334 atomic_set(&object->usecount, 1);
330 if (ret) { 335 if (ret) {
331 nv_error(object, "init failed, %d\n", ret); 336 nv_error(object, "init failed, %d\n", ret);
332 goto fail_self; 337 goto fail_self;
@@ -357,6 +362,7 @@ nouveau_object_decf(struct nouveau_object *object)
357 nv_trace(object, "stopping...\n"); 362 nv_trace(object, "stopping...\n");
358 363
359 ret = nv_ofuncs(object)->fini(object, false); 364 ret = nv_ofuncs(object)->fini(object, false);
365 atomic_set(&object->usecount, 0);
360 if (ret) 366 if (ret)
361 nv_warn(object, "failed fini, %d\n", ret); 367 nv_warn(object, "failed fini, %d\n", ret);
362 368
@@ -381,6 +387,7 @@ nouveau_object_decs(struct nouveau_object *object)
381 nv_trace(object, "suspending...\n"); 387 nv_trace(object, "suspending...\n");
382 388
383 ret = nv_ofuncs(object)->fini(object, true); 389 ret = nv_ofuncs(object)->fini(object, true);
390 atomic_set(&object->usecount, 0);
384 if (ret) { 391 if (ret) {
385 nv_error(object, "failed suspend, %d\n", ret); 392 nv_error(object, "failed suspend, %d\n", ret);
386 return ret; 393 return ret;
diff --git a/drivers/gpu/drm/nouveau/core/core/parent.c b/drivers/gpu/drm/nouveau/core/core/parent.c
index db7c54943102..313380ce632d 100644
--- a/drivers/gpu/drm/nouveau/core/core/parent.c
+++ b/drivers/gpu/drm/nouveau/core/core/parent.c
@@ -24,6 +24,7 @@
24 24
25#include <core/object.h> 25#include <core/object.h>
26#include <core/parent.h> 26#include <core/parent.h>
27#include <core/client.h>
27 28
28int 29int
29nouveau_parent_sclass(struct nouveau_object *parent, u16 handle, 30nouveau_parent_sclass(struct nouveau_object *parent, u16 handle,
@@ -50,7 +51,12 @@ nouveau_parent_sclass(struct nouveau_object *parent, u16 handle,
50 while (mask) { 51 while (mask) {
51 int i = ffsll(mask) - 1; 52 int i = ffsll(mask) - 1;
52 53
53 if ((engine = nouveau_engine(parent, i))) { 54 if (nv_iclass(parent, NV_CLIENT_CLASS))
55 engine = nv_engine(nv_client(parent)->device);
56 else
57 engine = nouveau_engine(parent, i);
58
59 if (engine) {
54 oclass = engine->sclass; 60 oclass = engine->sclass;
55 while (oclass->ofuncs) { 61 while (oclass->ofuncs) {
56 if ((oclass->handle & 0xffff) == handle) { 62 if ((oclass->handle & 0xffff) == handle) {
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/base.c b/drivers/gpu/drm/nouveau/core/engine/device/base.c
index 3937ced5c753..4c72571655ad 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/base.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/base.c
@@ -29,7 +29,7 @@
29 29
30#include <core/class.h> 30#include <core/class.h>
31 31
32#include <subdev/device.h> 32#include <engine/device.h>
33 33
34static DEFINE_MUTEX(nv_devices_mutex); 34static DEFINE_MUTEX(nv_devices_mutex);
35static LIST_HEAD(nv_devices); 35static LIST_HEAD(nv_devices);
@@ -55,7 +55,6 @@ nouveau_device_find(u64 name)
55struct nouveau_devobj { 55struct nouveau_devobj {
56 struct nouveau_parent base; 56 struct nouveau_parent base;
57 struct nouveau_object *subdev[NVDEV_SUBDEV_NR]; 57 struct nouveau_object *subdev[NVDEV_SUBDEV_NR];
58 bool created;
59}; 58};
60 59
61static const u64 disable_map[] = { 60static const u64 disable_map[] = {
@@ -173,7 +172,8 @@ nouveau_devobj_ctor(struct nouveau_object *parent,
173 case 0xa0: device->card_type = NV_50; break; 172 case 0xa0: device->card_type = NV_50; break;
174 case 0xc0: device->card_type = NV_C0; break; 173 case 0xc0: device->card_type = NV_C0; break;
175 case 0xd0: device->card_type = NV_D0; break; 174 case 0xd0: device->card_type = NV_D0; break;
176 case 0xe0: device->card_type = NV_E0; break; 175 case 0xe0:
176 case 0xf0: device->card_type = NV_E0; break;
177 default: 177 default:
178 break; 178 break;
179 } 179 }
@@ -238,26 +238,24 @@ nouveau_devobj_ctor(struct nouveau_object *parent,
238 } 238 }
239 239
240 /* ensure requested subsystems are available for use */ 240 /* ensure requested subsystems are available for use */
241 for (i = 0, c = 0; i < NVDEV_SUBDEV_NR; i++) { 241 for (i = 1, c = 1; i < NVDEV_SUBDEV_NR; i++) {
242 if (!(oclass = device->oclass[i]) || (disable & (1ULL << i))) 242 if (!(oclass = device->oclass[i]) || (disable & (1ULL << i)))
243 continue; 243 continue;
244 244
245 if (!device->subdev[i]) { 245 if (device->subdev[i]) {
246 ret = nouveau_object_ctor(nv_object(device), NULL,
247 oclass, NULL, i,
248 &devobj->subdev[i]);
249 if (ret == -ENODEV)
250 continue;
251 if (ret)
252 return ret;
253
254 if (nv_iclass(devobj->subdev[i], NV_ENGINE_CLASS))
255 nouveau_subdev_reset(devobj->subdev[i]);
256 } else {
257 nouveau_object_ref(device->subdev[i], 246 nouveau_object_ref(device->subdev[i],
258 &devobj->subdev[i]); 247 &devobj->subdev[i]);
248 continue;
259 } 249 }
260 250
251 ret = nouveau_object_ctor(nv_object(device), NULL,
252 oclass, NULL, i,
253 &devobj->subdev[i]);
254 if (ret == -ENODEV)
255 continue;
256 if (ret)
257 return ret;
258
261 /* note: can't init *any* subdevs until devinit has been run 259 /* note: can't init *any* subdevs until devinit has been run
262 * due to not knowing exactly what the vbios init tables will 260 * due to not knowing exactly what the vbios init tables will
263 * mess with. devinit also can't be run until all of its 261 * mess with. devinit also can't be run until all of its
@@ -273,6 +271,10 @@ nouveau_devobj_ctor(struct nouveau_object *parent,
273 ret = nouveau_object_inc(subdev); 271 ret = nouveau_object_inc(subdev);
274 if (ret) 272 if (ret)
275 return ret; 273 return ret;
274 atomic_dec(&nv_object(device)->usecount);
275 } else
276 if (subdev) {
277 nouveau_subdev_reset(subdev);
276 } 278 }
277 } 279 }
278 } 280 }
@@ -292,74 +294,6 @@ nouveau_devobj_dtor(struct nouveau_object *object)
292 nouveau_parent_destroy(&devobj->base); 294 nouveau_parent_destroy(&devobj->base);
293} 295}
294 296
295static int
296nouveau_devobj_init(struct nouveau_object *object)
297{
298 struct nouveau_devobj *devobj = (void *)object;
299 struct nouveau_object *subdev;
300 int ret, i;
301
302 ret = nouveau_parent_init(&devobj->base);
303 if (ret)
304 return ret;
305
306 for (i = 0; devobj->created && i < NVDEV_SUBDEV_NR; i++) {
307 if ((subdev = devobj->subdev[i])) {
308 if (!nv_iclass(subdev, NV_ENGINE_CLASS)) {
309 ret = nouveau_object_inc(subdev);
310 if (ret)
311 goto fail;
312 }
313 }
314 }
315
316 devobj->created = true;
317 return 0;
318
319fail:
320 for (--i; i >= 0; i--) {
321 if ((subdev = devobj->subdev[i])) {
322 if (!nv_iclass(subdev, NV_ENGINE_CLASS))
323 nouveau_object_dec(subdev, false);
324 }
325 }
326
327 return ret;
328}
329
330static int
331nouveau_devobj_fini(struct nouveau_object *object, bool suspend)
332{
333 struct nouveau_devobj *devobj = (void *)object;
334 struct nouveau_object *subdev;
335 int ret, i;
336
337 for (i = NVDEV_SUBDEV_NR - 1; i >= 0; i--) {
338 if ((subdev = devobj->subdev[i])) {
339 if (!nv_iclass(subdev, NV_ENGINE_CLASS)) {
340 ret = nouveau_object_dec(subdev, suspend);
341 if (ret && suspend)
342 goto fail;
343 }
344 }
345 }
346
347 ret = nouveau_parent_fini(&devobj->base, suspend);
348fail:
349 for (; ret && suspend && i < NVDEV_SUBDEV_NR; i++) {
350 if ((subdev = devobj->subdev[i])) {
351 if (!nv_iclass(subdev, NV_ENGINE_CLASS)) {
352 ret = nouveau_object_inc(subdev);
353 if (ret) {
354 /* XXX */
355 }
356 }
357 }
358 }
359
360 return ret;
361}
362
363static u8 297static u8
364nouveau_devobj_rd08(struct nouveau_object *object, u64 addr) 298nouveau_devobj_rd08(struct nouveau_object *object, u64 addr)
365{ 299{
@@ -400,8 +334,8 @@ static struct nouveau_ofuncs
400nouveau_devobj_ofuncs = { 334nouveau_devobj_ofuncs = {
401 .ctor = nouveau_devobj_ctor, 335 .ctor = nouveau_devobj_ctor,
402 .dtor = nouveau_devobj_dtor, 336 .dtor = nouveau_devobj_dtor,
403 .init = nouveau_devobj_init, 337 .init = _nouveau_parent_init,
404 .fini = nouveau_devobj_fini, 338 .fini = _nouveau_parent_fini,
405 .rd08 = nouveau_devobj_rd08, 339 .rd08 = nouveau_devobj_rd08,
406 .rd16 = nouveau_devobj_rd16, 340 .rd16 = nouveau_devobj_rd16,
407 .rd32 = nouveau_devobj_rd32, 341 .rd32 = nouveau_devobj_rd32,
@@ -413,12 +347,76 @@ nouveau_devobj_ofuncs = {
413/****************************************************************************** 347/******************************************************************************
414 * nouveau_device: engine functions 348 * nouveau_device: engine functions
415 *****************************************************************************/ 349 *****************************************************************************/
416struct nouveau_oclass 350static struct nouveau_oclass
417nouveau_device_sclass[] = { 351nouveau_device_sclass[] = {
418 { 0x0080, &nouveau_devobj_ofuncs }, 352 { 0x0080, &nouveau_devobj_ofuncs },
419 {} 353 {}
420}; 354};
421 355
356static int
357nouveau_device_fini(struct nouveau_object *object, bool suspend)
358{
359 struct nouveau_device *device = (void *)object;
360 struct nouveau_object *subdev;
361 int ret, i;
362
363 for (i = NVDEV_SUBDEV_NR - 1; i >= 0; i--) {
364 if ((subdev = device->subdev[i])) {
365 if (!nv_iclass(subdev, NV_ENGINE_CLASS)) {
366 ret = nouveau_object_dec(subdev, suspend);
367 if (ret && suspend)
368 goto fail;
369 }
370 }
371 }
372
373 ret = 0;
374fail:
375 for (; ret && i < NVDEV_SUBDEV_NR; i++) {
376 if ((subdev = device->subdev[i])) {
377 if (!nv_iclass(subdev, NV_ENGINE_CLASS)) {
378 ret = nouveau_object_inc(subdev);
379 if (ret) {
380 /* XXX */
381 }
382 }
383 }
384 }
385
386 return ret;
387}
388
389static int
390nouveau_device_init(struct nouveau_object *object)
391{
392 struct nouveau_device *device = (void *)object;
393 struct nouveau_object *subdev;
394 int ret, i;
395
396 for (i = 0; i < NVDEV_SUBDEV_NR; i++) {
397 if ((subdev = device->subdev[i])) {
398 if (!nv_iclass(subdev, NV_ENGINE_CLASS)) {
399 ret = nouveau_object_inc(subdev);
400 if (ret)
401 goto fail;
402 } else {
403 nouveau_subdev_reset(subdev);
404 }
405 }
406 }
407
408 ret = 0;
409fail:
410 for (--i; ret && i >= 0; i--) {
411 if ((subdev = device->subdev[i])) {
412 if (!nv_iclass(subdev, NV_ENGINE_CLASS))
413 nouveau_object_dec(subdev, false);
414 }
415 }
416
417 return ret;
418}
419
422static void 420static void
423nouveau_device_dtor(struct nouveau_object *object) 421nouveau_device_dtor(struct nouveau_object *object)
424{ 422{
@@ -428,17 +426,19 @@ nouveau_device_dtor(struct nouveau_object *object)
428 list_del(&device->head); 426 list_del(&device->head);
429 mutex_unlock(&nv_devices_mutex); 427 mutex_unlock(&nv_devices_mutex);
430 428
431 if (device->base.mmio) 429 if (nv_subdev(device)->mmio)
432 iounmap(device->base.mmio); 430 iounmap(nv_subdev(device)->mmio);
433 431
434 nouveau_subdev_destroy(&device->base); 432 nouveau_engine_destroy(&device->base);
435} 433}
436 434
437static struct nouveau_oclass 435static struct nouveau_oclass
438nouveau_device_oclass = { 436nouveau_device_oclass = {
439 .handle = NV_SUBDEV(DEVICE, 0x00), 437 .handle = NV_ENGINE(DEVICE, 0x00),
440 .ofuncs = &(struct nouveau_ofuncs) { 438 .ofuncs = &(struct nouveau_ofuncs) {
441 .dtor = nouveau_device_dtor, 439 .dtor = nouveau_device_dtor,
440 .init = nouveau_device_init,
441 .fini = nouveau_device_fini,
442 }, 442 },
443}; 443};
444 444
@@ -456,13 +456,12 @@ nouveau_device_create_(struct pci_dev *pdev, u64 name, const char *sname,
456 goto done; 456 goto done;
457 } 457 }
458 458
459 ret = nouveau_subdev_create_(NULL, NULL, &nouveau_device_oclass, 0, 459 ret = nouveau_engine_create_(NULL, NULL, &nouveau_device_oclass, true,
460 "DEVICE", "device", length, pobject); 460 "DEVICE", "device", length, pobject);
461 device = *pobject; 461 device = *pobject;
462 if (ret) 462 if (ret)
463 goto done; 463 goto done;
464 464
465 atomic_set(&nv_object(device)->usecount, 2);
466 device->pdev = pdev; 465 device->pdev = pdev;
467 device->handle = name; 466 device->handle = name;
468 device->cfgopt = cfg; 467 device->cfgopt = cfg;
@@ -470,6 +469,7 @@ nouveau_device_create_(struct pci_dev *pdev, u64 name, const char *sname,
470 device->name = sname; 469 device->name = sname;
471 470
472 nv_subdev(device)->debug = nouveau_dbgopt(device->dbgopt, "DEVICE"); 471 nv_subdev(device)->debug = nouveau_dbgopt(device->dbgopt, "DEVICE");
472 nv_engine(device)->sclass = nouveau_device_sclass;
473 list_add(&device->head, &nv_devices); 473 list_add(&device->head, &nv_devices);
474done: 474done:
475 mutex_unlock(&nv_devices_mutex); 475 mutex_unlock(&nv_devices_mutex);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv04.c b/drivers/gpu/drm/nouveau/core/engine/device/nv04.c
index 473c5c03d3c9..a0284cf09c0f 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv04.c
@@ -22,7 +22,6 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <subdev/device.h>
26#include <subdev/bios.h> 25#include <subdev/bios.h>
27#include <subdev/bus.h> 26#include <subdev/bus.h>
28#include <subdev/i2c.h> 27#include <subdev/i2c.h>
@@ -34,6 +33,7 @@
34#include <subdev/instmem.h> 33#include <subdev/instmem.h>
35#include <subdev/vm.h> 34#include <subdev/vm.h>
36 35
36#include <engine/device.h>
37#include <engine/dmaobj.h> 37#include <engine/dmaobj.h>
38#include <engine/fifo.h> 38#include <engine/fifo.h>
39#include <engine/software.h> 39#include <engine/software.h>
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv10.c b/drivers/gpu/drm/nouveau/core/engine/device/nv10.c
index d0774f5bebe1..1b7809a095c3 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv10.c
@@ -22,7 +22,6 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <subdev/device.h>
26#include <subdev/bios.h> 25#include <subdev/bios.h>
27#include <subdev/bus.h> 26#include <subdev/bus.h>
28#include <subdev/gpio.h> 27#include <subdev/gpio.h>
@@ -35,6 +34,7 @@
35#include <subdev/instmem.h> 34#include <subdev/instmem.h>
36#include <subdev/vm.h> 35#include <subdev/vm.h>
37 36
37#include <engine/device.h>
38#include <engine/dmaobj.h> 38#include <engine/dmaobj.h>
39#include <engine/fifo.h> 39#include <engine/fifo.h>
40#include <engine/software.h> 40#include <engine/software.h>
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv20.c b/drivers/gpu/drm/nouveau/core/engine/device/nv20.c
index ab920e0dc45b..12a4005fa619 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nv20.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv20.c
@@ -22,7 +22,6 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <subdev/device.h>
26#include <subdev/bios.h> 25#include <subdev/bios.h>
27#include <subdev/bus.h> 26#include <subdev/bus.h>
28#include <subdev/gpio.h> 27#include <subdev/gpio.h>
@@ -36,6 +35,7 @@
36#include <subdev/instmem.h> 35#include <subdev/instmem.h>
37#include <subdev/vm.h> 36#include <subdev/vm.h>
38 37
38#include <engine/device.h>
39#include <engine/dmaobj.h> 39#include <engine/dmaobj.h>
40#include <engine/fifo.h> 40#include <engine/fifo.h>
41#include <engine/software.h> 41#include <engine/software.h>
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv30.c b/drivers/gpu/drm/nouveau/core/engine/device/nv30.c
index 5f2110261b04..cef0f1ea4c21 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nv30.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv30.c
@@ -22,7 +22,6 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <subdev/device.h>
26#include <subdev/bios.h> 25#include <subdev/bios.h>
27#include <subdev/bus.h> 26#include <subdev/bus.h>
28#include <subdev/gpio.h> 27#include <subdev/gpio.h>
@@ -35,6 +34,7 @@
35#include <subdev/instmem.h> 34#include <subdev/instmem.h>
36#include <subdev/vm.h> 35#include <subdev/vm.h>
37 36
37#include <engine/device.h>
38#include <engine/dmaobj.h> 38#include <engine/dmaobj.h>
39#include <engine/fifo.h> 39#include <engine/fifo.h>
40#include <engine/software.h> 40#include <engine/software.h>
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv40.c b/drivers/gpu/drm/nouveau/core/engine/device/nv40.c
index f3d55efe9ac9..1719cb0ee595 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv40.c
@@ -22,7 +22,6 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <subdev/device.h>
26#include <subdev/bios.h> 25#include <subdev/bios.h>
27#include <subdev/bus.h> 26#include <subdev/bus.h>
28#include <subdev/vm.h> 27#include <subdev/vm.h>
@@ -37,6 +36,7 @@
37#include <subdev/instmem.h> 36#include <subdev/instmem.h>
38#include <subdev/vm.h> 37#include <subdev/vm.h>
39 38
39#include <engine/device.h>
40#include <engine/dmaobj.h> 40#include <engine/dmaobj.h>
41#include <engine/fifo.h> 41#include <engine/fifo.h>
42#include <engine/software.h> 42#include <engine/software.h>
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv50.c b/drivers/gpu/drm/nouveau/core/engine/device/nv50.c
index 5ed2fa51ddc2..5e8c3de75593 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv50.c
@@ -22,7 +22,6 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <subdev/device.h>
26#include <subdev/bios.h> 25#include <subdev/bios.h>
27#include <subdev/bus.h> 26#include <subdev/bus.h>
28#include <subdev/gpio.h> 27#include <subdev/gpio.h>
@@ -38,6 +37,7 @@
38#include <subdev/vm.h> 37#include <subdev/vm.h>
39#include <subdev/bar.h> 38#include <subdev/bar.h>
40 39
40#include <engine/device.h>
41#include <engine/dmaobj.h> 41#include <engine/dmaobj.h>
42#include <engine/fifo.h> 42#include <engine/fifo.h>
43#include <engine/software.h> 43#include <engine/software.h>
@@ -83,7 +83,7 @@ nv50_identify(struct nouveau_device *device)
83 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; 83 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
84 device->oclass[NVDEV_SUBDEV_I2C ] = &nv50_i2c_oclass; 84 device->oclass[NVDEV_SUBDEV_I2C ] = &nv50_i2c_oclass;
85 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass; 85 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass;
86 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass; 86 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
87 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 87 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
88 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; 88 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
89 device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass; 89 device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass;
@@ -109,7 +109,7 @@ nv50_identify(struct nouveau_device *device)
109 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; 109 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
110 device->oclass[NVDEV_SUBDEV_I2C ] = &nv50_i2c_oclass; 110 device->oclass[NVDEV_SUBDEV_I2C ] = &nv50_i2c_oclass;
111 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass; 111 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass;
112 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass; 112 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
113 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 113 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
114 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; 114 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
115 device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass; 115 device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass;
@@ -135,7 +135,7 @@ nv50_identify(struct nouveau_device *device)
135 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; 135 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
136 device->oclass[NVDEV_SUBDEV_I2C ] = &nv50_i2c_oclass; 136 device->oclass[NVDEV_SUBDEV_I2C ] = &nv50_i2c_oclass;
137 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass; 137 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass;
138 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass; 138 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
139 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 139 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
140 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; 140 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
141 device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass; 141 device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass;
@@ -161,7 +161,7 @@ nv50_identify(struct nouveau_device *device)
161 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; 161 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
162 device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass; 162 device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
163 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass; 163 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass;
164 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass; 164 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
165 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 165 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
166 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; 166 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
167 device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass; 167 device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass;
@@ -187,7 +187,7 @@ nv50_identify(struct nouveau_device *device)
187 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; 187 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
188 device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass; 188 device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
189 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass; 189 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass;
190 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass; 190 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
191 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 191 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
192 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; 192 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
193 device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass; 193 device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass;
@@ -213,7 +213,7 @@ nv50_identify(struct nouveau_device *device)
213 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; 213 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
214 device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass; 214 device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
215 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass; 215 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass;
216 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass; 216 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
217 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 217 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
218 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; 218 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
219 device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass; 219 device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass;
@@ -239,7 +239,7 @@ nv50_identify(struct nouveau_device *device)
239 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; 239 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
240 device->oclass[NVDEV_SUBDEV_I2C ] = &nv50_i2c_oclass; 240 device->oclass[NVDEV_SUBDEV_I2C ] = &nv50_i2c_oclass;
241 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass; 241 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass;
242 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass; 242 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
243 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 243 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
244 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; 244 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
245 device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass; 245 device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass;
@@ -265,7 +265,7 @@ nv50_identify(struct nouveau_device *device)
265 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; 265 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
266 device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass; 266 device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
267 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass; 267 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass;
268 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass; 268 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
269 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 269 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
270 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; 270 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
271 device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass; 271 device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass;
@@ -291,7 +291,7 @@ nv50_identify(struct nouveau_device *device)
291 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; 291 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
292 device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass; 292 device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
293 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass; 293 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass;
294 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass; 294 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
295 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 295 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
296 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; 296 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
297 device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass; 297 device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
index 4393eb4d6564..955af122c3a6 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
@@ -22,7 +22,6 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <subdev/device.h>
26#include <subdev/bios.h> 25#include <subdev/bios.h>
27#include <subdev/bus.h> 26#include <subdev/bus.h>
28#include <subdev/gpio.h> 27#include <subdev/gpio.h>
@@ -40,6 +39,7 @@
40#include <subdev/vm.h> 39#include <subdev/vm.h>
41#include <subdev/bar.h> 40#include <subdev/bar.h>
42 41
42#include <engine/device.h>
43#include <engine/dmaobj.h> 43#include <engine/dmaobj.h>
44#include <engine/fifo.h> 44#include <engine/fifo.h>
45#include <engine/software.h> 45#include <engine/software.h>
@@ -285,6 +285,34 @@ nvc0_identify(struct nouveau_device *device)
285 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass; 285 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
286 device->oclass[NVDEV_ENGINE_DISP ] = &nvd0_disp_oclass; 286 device->oclass[NVDEV_ENGINE_DISP ] = &nvd0_disp_oclass;
287 break; 287 break;
288 case 0xd7:
289 device->cname = "GF117";
290 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
291 device->oclass[NVDEV_SUBDEV_GPIO ] = &nvd0_gpio_oclass;
292 device->oclass[NVDEV_SUBDEV_I2C ] = &nvd0_i2c_oclass;
293 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
294 device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
295 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
296 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
297 device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass;
298 device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass;
299 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
300 device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
301 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
302 device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
303 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
304 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
305 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
306 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
307 device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
308 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
309 device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass;
310 device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
311 device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
312 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
313 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
314 device->oclass[NVDEV_ENGINE_DISP ] = &nvd0_disp_oclass;
315 break;
288 default: 316 default:
289 nv_fatal(device, "unknown Fermi chipset\n"); 317 nv_fatal(device, "unknown Fermi chipset\n");
290 return -EINVAL; 318 return -EINVAL;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nve0.c b/drivers/gpu/drm/nouveau/core/engine/device/nve0.c
index 5c12391619fd..a354e409cdff 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nve0.c
@@ -22,7 +22,6 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <subdev/device.h>
26#include <subdev/bios.h> 25#include <subdev/bios.h>
27#include <subdev/bus.h> 26#include <subdev/bus.h>
28#include <subdev/gpio.h> 27#include <subdev/gpio.h>
@@ -40,6 +39,7 @@
40#include <subdev/vm.h> 39#include <subdev/vm.h>
41#include <subdev/bar.h> 40#include <subdev/bar.h>
42 41
42#include <engine/device.h>
43#include <engine/dmaobj.h> 43#include <engine/dmaobj.h>
44#include <engine/fifo.h> 44#include <engine/fifo.h>
45#include <engine/software.h> 45#include <engine/software.h>
@@ -141,6 +141,40 @@ nve0_identify(struct nouveau_device *device)
141 device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass; 141 device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass;
142 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass; 142 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
143 break; 143 break;
144 case 0xf0:
145 device->cname = "GK110";
146 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
147 device->oclass[NVDEV_SUBDEV_GPIO ] = &nve0_gpio_oclass;
148 device->oclass[NVDEV_SUBDEV_I2C ] = &nvd0_i2c_oclass;
149 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
150 device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
151 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
152 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
153 device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass;
154 device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass;
155 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
156 device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
157 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
158 device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass;
159 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
160 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
161 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
162 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
163#if 0
164 device->oclass[NVDEV_ENGINE_FIFO ] = &nve0_fifo_oclass;
165 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
166 device->oclass[NVDEV_ENGINE_GR ] = &nve0_graph_oclass;
167#endif
168 device->oclass[NVDEV_ENGINE_DISP ] = &nvf0_disp_oclass;
169#if 0
170 device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass;
171 device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass;
172 device->oclass[NVDEV_ENGINE_COPY2 ] = &nve0_copy2_oclass;
173 device->oclass[NVDEV_ENGINE_BSP ] = &nve0_bsp_oclass;
174 device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass;
175 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
176#endif
177 break;
144 default: 178 default:
145 nv_fatal(device, "unknown Kepler chipset\n"); 179 nv_fatal(device, "unknown Kepler chipset\n");
146 return -EINVAL; 180 return -EINVAL;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/dport.c b/drivers/gpu/drm/nouveau/core/engine/disp/dport.c
index fa27b02ff829..31cc8fe8e7f0 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/dport.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/dport.c
@@ -191,7 +191,7 @@ dp_link_train_cr(struct dp_state *dp)
191static int 191static int
192dp_link_train_eq(struct dp_state *dp) 192dp_link_train_eq(struct dp_state *dp)
193{ 193{
194 bool eq_done, cr_done = true; 194 bool eq_done = false, cr_done = true;
195 int tries = 0, i; 195 int tries = 0, i;
196 196
197 dp_set_training_pattern(dp, 2); 197 dp_set_training_pattern(dp, 2);
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
index 02e369f80449..6a38402fa56c 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
@@ -572,7 +572,8 @@ nv50_disp_base_ctor(struct nouveau_object *parent,
572 priv->base.vblank->priv = priv; 572 priv->base.vblank->priv = priv;
573 priv->base.vblank->enable = nv50_disp_base_vblank_enable; 573 priv->base.vblank->enable = nv50_disp_base_vblank_enable;
574 priv->base.vblank->disable = nv50_disp_base_vblank_disable; 574 priv->base.vblank->disable = nv50_disp_base_vblank_disable;
575 return nouveau_ramht_new(parent, parent, 0x1000, 0, &base->ramht); 575 return nouveau_ramht_new(nv_object(base), nv_object(base), 0x1000, 0,
576 &base->ramht);
576} 577}
577 578
578static void 579static void
@@ -719,7 +720,7 @@ nv50_disp_data_ctor(struct nouveau_object *parent,
719 if (nv_mclass(parent) != NV_DEVICE_CLASS) { 720 if (nv_mclass(parent) != NV_DEVICE_CLASS) {
720 atomic_inc(&parent->refcount); 721 atomic_inc(&parent->refcount);
721 *pobject = parent; 722 *pobject = parent;
722 return 0; 723 return 1;
723 } 724 }
724 725
725 /* allocate display hardware to client */ 726 /* allocate display hardware to client */
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
index 788dd34ccb54..019eacd8a68f 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
@@ -473,7 +473,8 @@ nvd0_disp_base_ctor(struct nouveau_object *parent,
473 priv->base.vblank->enable = nvd0_disp_base_vblank_enable; 473 priv->base.vblank->enable = nvd0_disp_base_vblank_enable;
474 priv->base.vblank->disable = nvd0_disp_base_vblank_disable; 474 priv->base.vblank->disable = nvd0_disp_base_vblank_disable;
475 475
476 return nouveau_ramht_new(parent, parent, 0x1000, 0, &base->ramht); 476 return nouveau_ramht_new(nv_object(base), nv_object(base), 0x1000, 0,
477 &base->ramht);
477} 478}
478 479
479static void 480static void
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c
new file mode 100644
index 000000000000..a488c36e40f9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c
@@ -0,0 +1,89 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <engine/software.h>
26#include <engine/disp.h>
27
28#include <core/class.h>
29
30#include "nv50.h"
31
32static struct nouveau_oclass
33nvf0_disp_sclass[] = {
34 { NVF0_DISP_MAST_CLASS, &nvd0_disp_mast_ofuncs },
35 { NVF0_DISP_SYNC_CLASS, &nvd0_disp_sync_ofuncs },
36 { NVF0_DISP_OVLY_CLASS, &nvd0_disp_ovly_ofuncs },
37 { NVF0_DISP_OIMM_CLASS, &nvd0_disp_oimm_ofuncs },
38 { NVF0_DISP_CURS_CLASS, &nvd0_disp_curs_ofuncs },
39 {}
40};
41
42static struct nouveau_oclass
43nvf0_disp_base_oclass[] = {
44 { NVF0_DISP_CLASS, &nvd0_disp_base_ofuncs, nva3_disp_base_omthds },
45 {}
46};
47
48static int
49nvf0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
50 struct nouveau_oclass *oclass, void *data, u32 size,
51 struct nouveau_object **pobject)
52{
53 struct nv50_disp_priv *priv;
54 int heads = nv_rd32(parent, 0x022448);
55 int ret;
56
57 ret = nouveau_disp_create(parent, engine, oclass, heads,
58 "PDISP", "display", &priv);
59 *pobject = nv_object(priv);
60 if (ret)
61 return ret;
62
63 nv_engine(priv)->sclass = nvf0_disp_base_oclass;
64 nv_engine(priv)->cclass = &nv50_disp_cclass;
65 nv_subdev(priv)->intr = nvd0_disp_intr;
66 INIT_WORK(&priv->supervisor, nvd0_disp_intr_supervisor);
67 priv->sclass = nvf0_disp_sclass;
68 priv->head.nr = heads;
69 priv->dac.nr = 3;
70 priv->sor.nr = 4;
71 priv->dac.power = nv50_dac_power;
72 priv->dac.sense = nv50_dac_sense;
73 priv->sor.power = nv50_sor_power;
74 priv->sor.hda_eld = nvd0_hda_eld;
75 priv->sor.hdmi = nvd0_hdmi_ctrl;
76 priv->sor.dp = &nvd0_sor_dp_func;
77 return 0;
78}
79
80struct nouveau_oclass
81nvf0_disp_oclass = {
82 .handle = NV_ENGINE(DISP, 0x92),
83 .ofuncs = &(struct nouveau_ofuncs) {
84 .ctor = nvf0_disp_ctor,
85 .dtor = _nouveau_disp_dtor,
86 .init = _nouveau_disp_init,
87 .fini = _nouveau_disp_fini,
88 },
89};
diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c
index d1528752980c..944e73ac485c 100644
--- a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c
@@ -50,6 +50,9 @@ nvd0_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
50 case NVE0_DISP_MAST_CLASS: 50 case NVE0_DISP_MAST_CLASS:
51 case NVE0_DISP_SYNC_CLASS: 51 case NVE0_DISP_SYNC_CLASS:
52 case NVE0_DISP_OVLY_CLASS: 52 case NVE0_DISP_OVLY_CLASS:
53 case NVF0_DISP_MAST_CLASS:
54 case NVF0_DISP_SYNC_CLASS:
55 case NVF0_DISP_OVLY_CLASS:
53 break; 56 break;
54 default: 57 default:
55 return -EINVAL; 58 return -EINVAL;
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/base.c b/drivers/gpu/drm/nouveau/core/engine/fifo/base.c
index 7341ebe131fa..d3ec436d9cb5 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/base.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/base.c
@@ -91,6 +91,8 @@ nouveau_fifo_channel_create_(struct nouveau_object *parent,
91 if (!chan->user) 91 if (!chan->user)
92 return -EFAULT; 92 return -EFAULT;
93 93
94 nouveau_event_trigger(priv->cevent, 0);
95
94 chan->size = size; 96 chan->size = size;
95 return 0; 97 return 0;
96} 98}
@@ -167,6 +169,7 @@ nouveau_fifo_destroy(struct nouveau_fifo *priv)
167{ 169{
168 kfree(priv->channel); 170 kfree(priv->channel);
169 nouveau_event_destroy(&priv->uevent); 171 nouveau_event_destroy(&priv->uevent);
172 nouveau_event_destroy(&priv->cevent);
170 nouveau_engine_destroy(&priv->base); 173 nouveau_engine_destroy(&priv->base);
171} 174}
172 175
@@ -191,6 +194,10 @@ nouveau_fifo_create_(struct nouveau_object *parent,
191 if (!priv->channel) 194 if (!priv->channel)
192 return -ENOMEM; 195 return -ENOMEM;
193 196
197 ret = nouveau_event_create(1, &priv->cevent);
198 if (ret)
199 return ret;
200
194 ret = nouveau_event_create(1, &priv->uevent); 201 ret = nouveau_event_create(1, &priv->uevent);
195 if (ret) 202 if (ret)
196 return ret; 203 return ret;
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
index 840af6172788..ddaeb5572903 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
@@ -210,7 +210,8 @@ nv50_fifo_chan_ctor_dma(struct nouveau_object *parent,
210 nv_parent(chan)->object_attach = nv50_fifo_object_attach; 210 nv_parent(chan)->object_attach = nv50_fifo_object_attach;
211 nv_parent(chan)->object_detach = nv50_fifo_object_detach; 211 nv_parent(chan)->object_detach = nv50_fifo_object_detach;
212 212
213 ret = nouveau_ramht_new(parent, parent, 0x8000, 16, &chan->ramht); 213 ret = nouveau_ramht_new(nv_object(chan), nv_object(chan), 0x8000, 16,
214 &chan->ramht);
214 if (ret) 215 if (ret)
215 return ret; 216 return ret;
216 217
@@ -263,7 +264,8 @@ nv50_fifo_chan_ctor_ind(struct nouveau_object *parent,
263 nv_parent(chan)->object_attach = nv50_fifo_object_attach; 264 nv_parent(chan)->object_attach = nv50_fifo_object_attach;
264 nv_parent(chan)->object_detach = nv50_fifo_object_detach; 265 nv_parent(chan)->object_detach = nv50_fifo_object_detach;
265 266
266 ret = nouveau_ramht_new(parent, parent, 0x8000, 16, &chan->ramht); 267 ret = nouveau_ramht_new(nv_object(chan), nv_object(chan), 0x8000, 16,
268 &chan->ramht);
267 if (ret) 269 if (ret)
268 return ret; 270 return ret;
269 271
@@ -373,17 +375,17 @@ nv50_fifo_context_ctor(struct nouveau_object *parent,
373 if (ret) 375 if (ret)
374 return ret; 376 return ret;
375 377
376 ret = nouveau_gpuobj_new(parent, nv_object(base), 0x0200, 0x1000, 378 ret = nouveau_gpuobj_new(nv_object(base), nv_object(base), 0x0200,
377 NVOBJ_FLAG_ZERO_ALLOC, &base->ramfc); 379 0x1000, NVOBJ_FLAG_ZERO_ALLOC, &base->ramfc);
378 if (ret) 380 if (ret)
379 return ret; 381 return ret;
380 382
381 ret = nouveau_gpuobj_new(parent, nv_object(base), 0x1200, 0, 383 ret = nouveau_gpuobj_new(nv_object(base), nv_object(base), 0x1200, 0,
382 NVOBJ_FLAG_ZERO_ALLOC, &base->eng); 384 NVOBJ_FLAG_ZERO_ALLOC, &base->eng);
383 if (ret) 385 if (ret)
384 return ret; 386 return ret;
385 387
386 ret = nouveau_gpuobj_new(parent, nv_object(base), 0x4000, 0, 0, 388 ret = nouveau_gpuobj_new(nv_object(base), nv_object(base), 0x4000, 0, 0,
387 &base->pgd); 389 &base->pgd);
388 if (ret) 390 if (ret)
389 return ret; 391 return ret;
@@ -437,12 +439,12 @@ nv50_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
437 if (ret) 439 if (ret)
438 return ret; 440 return ret;
439 441
440 ret = nouveau_gpuobj_new(parent, NULL, 128 * 4, 0x1000, 0, 442 ret = nouveau_gpuobj_new(nv_object(priv), NULL, 128 * 4, 0x1000, 0,
441 &priv->playlist[0]); 443 &priv->playlist[0]);
442 if (ret) 444 if (ret)
443 return ret; 445 return ret;
444 446
445 ret = nouveau_gpuobj_new(parent, NULL, 128 * 4, 0x1000, 0, 447 ret = nouveau_gpuobj_new(nv_object(priv), NULL, 128 * 4, 0x1000, 0,
446 &priv->playlist[1]); 448 &priv->playlist[1]);
447 if (ret) 449 if (ret)
448 return ret; 450 return ret;
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
index 094000e87871..35b94bd18808 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
@@ -180,7 +180,8 @@ nv84_fifo_chan_ctor_dma(struct nouveau_object *parent,
180 if (ret) 180 if (ret)
181 return ret; 181 return ret;
182 182
183 ret = nouveau_ramht_new(parent, parent, 0x8000, 16, &chan->ramht); 183 ret = nouveau_ramht_new(nv_object(chan), nv_object(chan), 0x8000, 16,
184 &chan->ramht);
184 if (ret) 185 if (ret)
185 return ret; 186 return ret;
186 187
@@ -242,7 +243,8 @@ nv84_fifo_chan_ctor_ind(struct nouveau_object *parent,
242 if (ret) 243 if (ret)
243 return ret; 244 return ret;
244 245
245 ret = nouveau_ramht_new(parent, parent, 0x8000, 16, &chan->ramht); 246 ret = nouveau_ramht_new(nv_object(chan), nv_object(chan), 0x8000, 16,
247 &chan->ramht);
246 if (ret) 248 if (ret)
247 return ret; 249 return ret;
248 250
@@ -336,12 +338,12 @@ nv84_fifo_context_ctor(struct nouveau_object *parent,
336 if (ret) 338 if (ret)
337 return ret; 339 return ret;
338 340
339 ret = nouveau_gpuobj_new(parent, nv_object(base), 0x0200, 0, 341 ret = nouveau_gpuobj_new(nv_object(base), nv_object(base), 0x0200, 0,
340 NVOBJ_FLAG_ZERO_ALLOC, &base->eng); 342 NVOBJ_FLAG_ZERO_ALLOC, &base->eng);
341 if (ret) 343 if (ret)
342 return ret; 344 return ret;
343 345
344 ret = nouveau_gpuobj_new(parent, nv_object(base), 0x4000, 0, 346 ret = nouveau_gpuobj_new(nv_object(base), nv_object(base), 0x4000, 0,
345 0, &base->pgd); 347 0, &base->pgd);
346 if (ret) 348 if (ret)
347 return ret; 349 return ret;
@@ -350,13 +352,13 @@ nv84_fifo_context_ctor(struct nouveau_object *parent,
350 if (ret) 352 if (ret)
351 return ret; 353 return ret;
352 354
353 ret = nouveau_gpuobj_new(parent, nv_object(base), 0x1000, 0x400, 355 ret = nouveau_gpuobj_new(nv_object(base), nv_object(base), 0x1000,
354 NVOBJ_FLAG_ZERO_ALLOC, &base->cache); 356 0x400, NVOBJ_FLAG_ZERO_ALLOC, &base->cache);
355 if (ret) 357 if (ret)
356 return ret; 358 return ret;
357 359
358 ret = nouveau_gpuobj_new(parent, nv_object(base), 0x0100, 0x100, 360 ret = nouveau_gpuobj_new(nv_object(base), nv_object(base), 0x0100,
359 NVOBJ_FLAG_ZERO_ALLOC, &base->ramfc); 361 0x100, NVOBJ_FLAG_ZERO_ALLOC, &base->ramfc);
360 if (ret) 362 if (ret)
361 return ret; 363 return ret;
362 364
@@ -407,12 +409,12 @@ nv84_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
407 if (ret) 409 if (ret)
408 return ret; 410 return ret;
409 411
410 ret = nouveau_gpuobj_new(parent, NULL, 128 * 4, 0x1000, 0, 412 ret = nouveau_gpuobj_new(nv_object(priv), NULL, 128 * 4, 0x1000, 0,
411 &priv->playlist[0]); 413 &priv->playlist[0]);
412 if (ret) 414 if (ret)
413 return ret; 415 return ret;
414 416
415 ret = nouveau_gpuobj_new(parent, NULL, 128 * 4, 0x1000, 0, 417 ret = nouveau_gpuobj_new(nv_object(priv), NULL, 128 * 4, 0x1000, 0,
416 &priv->playlist[1]); 418 &priv->playlist[1]);
417 if (ret) 419 if (ret)
418 return ret; 420 return ret;
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
index 4f226afb5591..4d4a6b905370 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
@@ -292,7 +292,8 @@ nvc0_fifo_context_ctor(struct nouveau_object *parent,
292 if (ret) 292 if (ret)
293 return ret; 293 return ret;
294 294
295 ret = nouveau_gpuobj_new(parent, NULL, 0x10000, 0x1000, 0, &base->pgd); 295 ret = nouveau_gpuobj_new(nv_object(base), NULL, 0x10000, 0x1000, 0,
296 &base->pgd);
296 if (ret) 297 if (ret)
297 return ret; 298 return ret;
298 299
@@ -623,17 +624,17 @@ nvc0_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
623 if (ret) 624 if (ret)
624 return ret; 625 return ret;
625 626
626 ret = nouveau_gpuobj_new(parent, NULL, 0x1000, 0x1000, 0, 627 ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x1000, 0x1000, 0,
627 &priv->playlist[0]); 628 &priv->playlist[0]);
628 if (ret) 629 if (ret)
629 return ret; 630 return ret;
630 631
631 ret = nouveau_gpuobj_new(parent, NULL, 0x1000, 0x1000, 0, 632 ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x1000, 0x1000, 0,
632 &priv->playlist[1]); 633 &priv->playlist[1]);
633 if (ret) 634 if (ret)
634 return ret; 635 return ret;
635 636
636 ret = nouveau_gpuobj_new(parent, NULL, 128 * 0x1000, 0x1000, 0, 637 ret = nouveau_gpuobj_new(nv_object(priv), NULL, 128 * 0x1000, 0x1000, 0,
637 &priv->user.mem); 638 &priv->user.mem);
638 if (ret) 639 if (ret)
639 return ret; 640 return ret;
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
index 4419e40d88e9..9151919fb831 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
@@ -96,7 +96,7 @@ nve0_fifo_playlist_update(struct nve0_fifo_priv *priv, u32 engine)
96 96
97 cur = engn->playlist[engn->cur_playlist]; 97 cur = engn->playlist[engn->cur_playlist];
98 if (unlikely(cur == NULL)) { 98 if (unlikely(cur == NULL)) {
99 int ret = nouveau_gpuobj_new(nv_object(priv)->parent, NULL, 99 int ret = nouveau_gpuobj_new(nv_object(priv), NULL,
100 0x8000, 0x1000, 0, &cur); 100 0x8000, 0x1000, 0, &cur);
101 if (ret) { 101 if (ret) {
102 nv_error(priv, "playlist alloc failed\n"); 102 nv_error(priv, "playlist alloc failed\n");
@@ -333,7 +333,8 @@ nve0_fifo_context_ctor(struct nouveau_object *parent,
333 if (ret) 333 if (ret)
334 return ret; 334 return ret;
335 335
336 ret = nouveau_gpuobj_new(parent, NULL, 0x10000, 0x1000, 0, &base->pgd); 336 ret = nouveau_gpuobj_new(nv_object(base), NULL, 0x10000, 0x1000, 0,
337 &base->pgd);
337 if (ret) 338 if (ret)
338 return ret; 339 return ret;
339 340
@@ -595,7 +596,7 @@ nve0_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
595 if (ret) 596 if (ret)
596 return ret; 597 return ret;
597 598
598 ret = nouveau_gpuobj_new(parent, NULL, 4096 * 0x200, 0x1000, 599 ret = nouveau_gpuobj_new(nv_object(priv), NULL, 4096 * 0x200, 0x1000,
599 NVOBJ_FLAG_ZERO_ALLOC, &priv->user.mem); 600 NVOBJ_FLAG_ZERO_ALLOC, &priv->user.mem);
600 if (ret) 601 if (ret)
601 return ret; 602 return ret;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c
index 0b7951a85943..4cc6269d4077 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c
@@ -36,7 +36,6 @@ int
36nvc0_grctx_init(struct nvc0_graph_priv *priv, struct nvc0_grctx *info) 36nvc0_grctx_init(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
37{ 37{
38 struct nouveau_bar *bar = nouveau_bar(priv); 38 struct nouveau_bar *bar = nouveau_bar(priv);
39 struct nouveau_object *parent = nv_object(priv);
40 struct nouveau_gpuobj *chan; 39 struct nouveau_gpuobj *chan;
41 u32 size = (0x80000 + priv->size + 4095) & ~4095; 40 u32 size = (0x80000 + priv->size + 4095) & ~4095;
42 int ret, i; 41 int ret, i;
@@ -44,7 +43,7 @@ nvc0_grctx_init(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
44 /* allocate memory to for a "channel", which we'll use to generate 43 /* allocate memory to for a "channel", which we'll use to generate
45 * the default context values 44 * the default context values
46 */ 45 */
47 ret = nouveau_gpuobj_new(parent, NULL, size, 0x1000, 46 ret = nouveau_gpuobj_new(nv_object(priv), NULL, size, 0x1000,
48 NVOBJ_FLAG_ZERO_ALLOC, &info->chan); 47 NVOBJ_FLAG_ZERO_ALLOC, &info->chan);
49 chan = info->chan; 48 chan = info->chan;
50 if (ret) { 49 if (ret) {
@@ -1399,7 +1398,7 @@ nvc0_grctx_generate_90c0(struct nvc0_graph_priv *priv)
1399{ 1398{
1400 int i; 1399 int i;
1401 1400
1402 for (i = 0; nv_device(priv)->chipset == 0xd9 && i < 4; i++) { 1401 for (i = 0; nv_device(priv)->chipset >= 0xd0 && i < 4; i++) {
1403 nv_mthd(priv, 0x90c0, 0x2700 + (i * 0x40), 0x00000000); 1402 nv_mthd(priv, 0x90c0, 0x2700 + (i * 0x40), 0x00000000);
1404 nv_mthd(priv, 0x90c0, 0x2720 + (i * 0x40), 0x00000000); 1403 nv_mthd(priv, 0x90c0, 0x2720 + (i * 0x40), 0x00000000);
1405 nv_mthd(priv, 0x90c0, 0x2704 + (i * 0x40), 0x00000000); 1404 nv_mthd(priv, 0x90c0, 0x2704 + (i * 0x40), 0x00000000);
@@ -1415,7 +1414,7 @@ nvc0_grctx_generate_90c0(struct nvc0_graph_priv *priv)
1415 nv_mthd(priv, 0x90c0, 0x27ac, 0x00000000); 1414 nv_mthd(priv, 0x90c0, 0x27ac, 0x00000000);
1416 nv_mthd(priv, 0x90c0, 0x27cc, 0x00000000); 1415 nv_mthd(priv, 0x90c0, 0x27cc, 0x00000000);
1417 nv_mthd(priv, 0x90c0, 0x27ec, 0x00000000); 1416 nv_mthd(priv, 0x90c0, 0x27ec, 0x00000000);
1418 for (i = 0; nv_device(priv)->chipset == 0xd9 && i < 4; i++) { 1417 for (i = 0; nv_device(priv)->chipset >= 0xd0 && i < 4; i++) {
1419 nv_mthd(priv, 0x90c0, 0x2710 + (i * 0x40), 0x00014000); 1418 nv_mthd(priv, 0x90c0, 0x2710 + (i * 0x40), 0x00014000);
1420 nv_mthd(priv, 0x90c0, 0x2730 + (i * 0x40), 0x00014000); 1419 nv_mthd(priv, 0x90c0, 0x2730 + (i * 0x40), 0x00014000);
1421 nv_mthd(priv, 0x90c0, 0x2714 + (i * 0x40), 0x00000040); 1420 nv_mthd(priv, 0x90c0, 0x2714 + (i * 0x40), 0x00000040);
@@ -1615,7 +1614,7 @@ static void
1615nvc0_grctx_generate_shaders(struct nvc0_graph_priv *priv) 1614nvc0_grctx_generate_shaders(struct nvc0_graph_priv *priv)
1616{ 1615{
1617 1616
1618 if (nv_device(priv)->chipset == 0xd9) { 1617 if (nv_device(priv)->chipset >= 0xd0) {
1619 nv_wr32(priv, 0x405800, 0x0f8000bf); 1618 nv_wr32(priv, 0x405800, 0x0f8000bf);
1620 nv_wr32(priv, 0x405830, 0x02180218); 1619 nv_wr32(priv, 0x405830, 0x02180218);
1621 nv_wr32(priv, 0x405834, 0x08000000); 1620 nv_wr32(priv, 0x405834, 0x08000000);
@@ -1658,10 +1657,10 @@ nvc0_grctx_generate_unk64xx(struct nvc0_graph_priv *priv)
1658 nv_wr32(priv, 0x4064ac, 0x00003fff); 1657 nv_wr32(priv, 0x4064ac, 0x00003fff);
1659 nv_wr32(priv, 0x4064b4, 0x00000000); 1658 nv_wr32(priv, 0x4064b4, 0x00000000);
1660 nv_wr32(priv, 0x4064b8, 0x00000000); 1659 nv_wr32(priv, 0x4064b8, 0x00000000);
1661 if (nv_device(priv)->chipset == 0xd9) 1660 if (nv_device(priv)->chipset >= 0xd0)
1662 nv_wr32(priv, 0x4064bc, 0x00000000); 1661 nv_wr32(priv, 0x4064bc, 0x00000000);
1663 if (nv_device(priv)->chipset == 0xc1 || 1662 if (nv_device(priv)->chipset == 0xc1 ||
1664 nv_device(priv)->chipset == 0xd9) { 1663 nv_device(priv)->chipset >= 0xd0) {
1665 nv_wr32(priv, 0x4064c0, 0x80140078); 1664 nv_wr32(priv, 0x4064c0, 0x80140078);
1666 nv_wr32(priv, 0x4064c4, 0x0086ffff); 1665 nv_wr32(priv, 0x4064c4, 0x0086ffff);
1667 } 1666 }
@@ -1701,7 +1700,7 @@ nvc0_grctx_generate_rop(struct nvc0_graph_priv *priv)
1701 /* ROPC_BROADCAST */ 1700 /* ROPC_BROADCAST */
1702 nv_wr32(priv, 0x408800, 0x02802a3c); 1701 nv_wr32(priv, 0x408800, 0x02802a3c);
1703 nv_wr32(priv, 0x408804, 0x00000040); 1702 nv_wr32(priv, 0x408804, 0x00000040);
1704 if (chipset == 0xd9) { 1703 if (chipset >= 0xd0) {
1705 nv_wr32(priv, 0x408808, 0x1043e005); 1704 nv_wr32(priv, 0x408808, 0x1043e005);
1706 nv_wr32(priv, 0x408900, 0x3080b801); 1705 nv_wr32(priv, 0x408900, 0x3080b801);
1707 nv_wr32(priv, 0x408904, 0x1043e005); 1706 nv_wr32(priv, 0x408904, 0x1043e005);
@@ -1735,7 +1734,7 @@ nvc0_grctx_generate_gpc(struct nvc0_graph_priv *priv)
1735 nv_wr32(priv, 0x418408, 0x00000000); 1734 nv_wr32(priv, 0x418408, 0x00000000);
1736 nv_wr32(priv, 0x41840c, 0x00001008); 1735 nv_wr32(priv, 0x41840c, 0x00001008);
1737 nv_wr32(priv, 0x418410, 0x0fff0fff); 1736 nv_wr32(priv, 0x418410, 0x0fff0fff);
1738 nv_wr32(priv, 0x418414, chipset != 0xd9 ? 0x00200fff : 0x02200fff); 1737 nv_wr32(priv, 0x418414, chipset < 0xd0 ? 0x00200fff : 0x02200fff);
1739 nv_wr32(priv, 0x418450, 0x00000000); 1738 nv_wr32(priv, 0x418450, 0x00000000);
1740 nv_wr32(priv, 0x418454, 0x00000000); 1739 nv_wr32(priv, 0x418454, 0x00000000);
1741 nv_wr32(priv, 0x418458, 0x00000000); 1740 nv_wr32(priv, 0x418458, 0x00000000);
@@ -1750,14 +1749,14 @@ nvc0_grctx_generate_gpc(struct nvc0_graph_priv *priv)
1750 nv_wr32(priv, 0x418700, 0x00000002); 1749 nv_wr32(priv, 0x418700, 0x00000002);
1751 nv_wr32(priv, 0x418704, 0x00000080); 1750 nv_wr32(priv, 0x418704, 0x00000080);
1752 nv_wr32(priv, 0x418708, 0x00000000); 1751 nv_wr32(priv, 0x418708, 0x00000000);
1753 nv_wr32(priv, 0x41870c, chipset != 0xd9 ? 0x07c80000 : 0x00000000); 1752 nv_wr32(priv, 0x41870c, chipset < 0xd0 ? 0x07c80000 : 0x00000000);
1754 nv_wr32(priv, 0x418710, 0x00000000); 1753 nv_wr32(priv, 0x418710, 0x00000000);
1755 nv_wr32(priv, 0x418800, chipset != 0xd9 ? 0x0006860a : 0x7006860a); 1754 nv_wr32(priv, 0x418800, chipset < 0xd0 ? 0x0006860a : 0x7006860a);
1756 nv_wr32(priv, 0x418808, 0x00000000); 1755 nv_wr32(priv, 0x418808, 0x00000000);
1757 nv_wr32(priv, 0x41880c, 0x00000000); 1756 nv_wr32(priv, 0x41880c, 0x00000000);
1758 nv_wr32(priv, 0x418810, 0x00000000); 1757 nv_wr32(priv, 0x418810, 0x00000000);
1759 nv_wr32(priv, 0x418828, 0x00008442); 1758 nv_wr32(priv, 0x418828, 0x00008442);
1760 if (chipset == 0xc1 || chipset == 0xd9) 1759 if (chipset == 0xc1 || chipset >= 0xd0)
1761 nv_wr32(priv, 0x418830, 0x10000001); 1760 nv_wr32(priv, 0x418830, 0x10000001);
1762 else 1761 else
1763 nv_wr32(priv, 0x418830, 0x00000001); 1762 nv_wr32(priv, 0x418830, 0x00000001);
@@ -1768,7 +1767,7 @@ nvc0_grctx_generate_gpc(struct nvc0_graph_priv *priv)
1768 nv_wr32(priv, 0x4188f0, 0x00000000); 1767 nv_wr32(priv, 0x4188f0, 0x00000000);
1769 nv_wr32(priv, 0x4188f4, 0x00000000); 1768 nv_wr32(priv, 0x4188f4, 0x00000000);
1770 nv_wr32(priv, 0x4188f8, 0x00000000); 1769 nv_wr32(priv, 0x4188f8, 0x00000000);
1771 if (chipset == 0xd9) 1770 if (chipset >= 0xd0)
1772 nv_wr32(priv, 0x4188fc, 0x20100008); 1771 nv_wr32(priv, 0x4188fc, 0x20100008);
1773 else if (chipset == 0xc1) 1772 else if (chipset == 0xc1)
1774 nv_wr32(priv, 0x4188fc, 0x00100018); 1773 nv_wr32(priv, 0x4188fc, 0x00100018);
@@ -1787,7 +1786,7 @@ nvc0_grctx_generate_gpc(struct nvc0_graph_priv *priv)
1787 nv_wr32(priv, 0x418a14 + (i * 0x20), 0x00000000); 1786 nv_wr32(priv, 0x418a14 + (i * 0x20), 0x00000000);
1788 nv_wr32(priv, 0x418a18 + (i * 0x20), 0x00000000); 1787 nv_wr32(priv, 0x418a18 + (i * 0x20), 0x00000000);
1789 } 1788 }
1790 nv_wr32(priv, 0x418b00, chipset != 0xd9 ? 0x00000000 : 0x00000006); 1789 nv_wr32(priv, 0x418b00, chipset < 0xd0 ? 0x00000000 : 0x00000006);
1791 nv_wr32(priv, 0x418b08, 0x0a418820); 1790 nv_wr32(priv, 0x418b08, 0x0a418820);
1792 nv_wr32(priv, 0x418b0c, 0x062080e6); 1791 nv_wr32(priv, 0x418b0c, 0x062080e6);
1793 nv_wr32(priv, 0x418b10, 0x020398a4); 1792 nv_wr32(priv, 0x418b10, 0x020398a4);
@@ -1804,7 +1803,7 @@ nvc0_grctx_generate_gpc(struct nvc0_graph_priv *priv)
1804 nv_wr32(priv, 0x418c24, 0x00000000); 1803 nv_wr32(priv, 0x418c24, 0x00000000);
1805 nv_wr32(priv, 0x418c28, 0x00000000); 1804 nv_wr32(priv, 0x418c28, 0x00000000);
1806 nv_wr32(priv, 0x418c2c, 0x00000000); 1805 nv_wr32(priv, 0x418c2c, 0x00000000);
1807 if (chipset == 0xc1 || chipset == 0xd9) 1806 if (chipset == 0xc1 || chipset >= 0xd0)
1808 nv_wr32(priv, 0x418c6c, 0x00000001); 1807 nv_wr32(priv, 0x418c6c, 0x00000001);
1809 nv_wr32(priv, 0x418c80, 0x20200004); 1808 nv_wr32(priv, 0x418c80, 0x20200004);
1810 nv_wr32(priv, 0x418c8c, 0x00000001); 1809 nv_wr32(priv, 0x418c8c, 0x00000001);
@@ -1823,7 +1822,7 @@ nvc0_grctx_generate_tp(struct nvc0_graph_priv *priv)
1823 nv_wr32(priv, 0x419818, 0x00000000); 1822 nv_wr32(priv, 0x419818, 0x00000000);
1824 nv_wr32(priv, 0x41983c, 0x00038bc7); 1823 nv_wr32(priv, 0x41983c, 0x00038bc7);
1825 nv_wr32(priv, 0x419848, 0x00000000); 1824 nv_wr32(priv, 0x419848, 0x00000000);
1826 if (chipset == 0xc1 || chipset == 0xd9) 1825 if (chipset == 0xc1 || chipset >= 0xd0)
1827 nv_wr32(priv, 0x419864, 0x00000129); 1826 nv_wr32(priv, 0x419864, 0x00000129);
1828 else 1827 else
1829 nv_wr32(priv, 0x419864, 0x0000012a); 1828 nv_wr32(priv, 0x419864, 0x0000012a);
@@ -1836,7 +1835,7 @@ nvc0_grctx_generate_tp(struct nvc0_graph_priv *priv)
1836 nv_wr32(priv, 0x419a14, 0x00000200); 1835 nv_wr32(priv, 0x419a14, 0x00000200);
1837 nv_wr32(priv, 0x419a1c, 0x00000000); 1836 nv_wr32(priv, 0x419a1c, 0x00000000);
1838 nv_wr32(priv, 0x419a20, 0x00000800); 1837 nv_wr32(priv, 0x419a20, 0x00000800);
1839 if (chipset == 0xd9) 1838 if (chipset >= 0xd0)
1840 nv_wr32(priv, 0x00419ac4, 0x0017f440); 1839 nv_wr32(priv, 0x00419ac4, 0x0017f440);
1841 else if (chipset != 0xc0 && chipset != 0xc8) 1840 else if (chipset != 0xc0 && chipset != 0xc8)
1842 nv_wr32(priv, 0x00419ac4, 0x0007f440); 1841 nv_wr32(priv, 0x00419ac4, 0x0007f440);
@@ -1847,16 +1846,16 @@ nvc0_grctx_generate_tp(struct nvc0_graph_priv *priv)
1847 nv_wr32(priv, 0x419b10, 0x0a418820); 1846 nv_wr32(priv, 0x419b10, 0x0a418820);
1848 nv_wr32(priv, 0x419b14, 0x000000e6); 1847 nv_wr32(priv, 0x419b14, 0x000000e6);
1849 nv_wr32(priv, 0x419bd0, 0x00900103); 1848 nv_wr32(priv, 0x419bd0, 0x00900103);
1850 if (chipset == 0xc1 || chipset == 0xd9) 1849 if (chipset == 0xc1 || chipset >= 0xd0)
1851 nv_wr32(priv, 0x419be0, 0x00400001); 1850 nv_wr32(priv, 0x419be0, 0x00400001);
1852 else 1851 else
1853 nv_wr32(priv, 0x419be0, 0x00000001); 1852 nv_wr32(priv, 0x419be0, 0x00000001);
1854 nv_wr32(priv, 0x419be4, 0x00000000); 1853 nv_wr32(priv, 0x419be4, 0x00000000);
1855 nv_wr32(priv, 0x419c00, chipset != 0xd9 ? 0x00000002 : 0x0000000a); 1854 nv_wr32(priv, 0x419c00, chipset < 0xd0 ? 0x00000002 : 0x0000000a);
1856 nv_wr32(priv, 0x419c04, 0x00000006); 1855 nv_wr32(priv, 0x419c04, 0x00000006);
1857 nv_wr32(priv, 0x419c08, 0x00000002); 1856 nv_wr32(priv, 0x419c08, 0x00000002);
1858 nv_wr32(priv, 0x419c20, 0x00000000); 1857 nv_wr32(priv, 0x419c20, 0x00000000);
1859 if (nv_device(priv)->chipset == 0xd9) { 1858 if (nv_device(priv)->chipset >= 0xd0) {
1860 nv_wr32(priv, 0x419c24, 0x00084210); 1859 nv_wr32(priv, 0x419c24, 0x00084210);
1861 nv_wr32(priv, 0x419c28, 0x3cf3cf3c); 1860 nv_wr32(priv, 0x419c28, 0x3cf3cf3c);
1862 nv_wr32(priv, 0x419cb0, 0x00020048); 1861 nv_wr32(priv, 0x419cb0, 0x00020048);
@@ -1868,12 +1867,12 @@ nvc0_grctx_generate_tp(struct nvc0_graph_priv *priv)
1868 } 1867 }
1869 nv_wr32(priv, 0x419ce8, 0x00000000); 1868 nv_wr32(priv, 0x419ce8, 0x00000000);
1870 nv_wr32(priv, 0x419cf4, 0x00000183); 1869 nv_wr32(priv, 0x419cf4, 0x00000183);
1871 if (chipset == 0xc1 || chipset == 0xd9) 1870 if (chipset == 0xc1 || chipset >= 0xd0)
1872 nv_wr32(priv, 0x419d20, 0x12180000); 1871 nv_wr32(priv, 0x419d20, 0x12180000);
1873 else 1872 else
1874 nv_wr32(priv, 0x419d20, 0x02180000); 1873 nv_wr32(priv, 0x419d20, 0x02180000);
1875 nv_wr32(priv, 0x419d24, 0x00001fff); 1874 nv_wr32(priv, 0x419d24, 0x00001fff);
1876 if (chipset == 0xc1 || chipset == 0xd9) 1875 if (chipset == 0xc1 || chipset >= 0xd0)
1877 nv_wr32(priv, 0x419d44, 0x02180218); 1876 nv_wr32(priv, 0x419d44, 0x02180218);
1878 nv_wr32(priv, 0x419e04, 0x00000000); 1877 nv_wr32(priv, 0x419e04, 0x00000000);
1879 nv_wr32(priv, 0x419e08, 0x00000000); 1878 nv_wr32(priv, 0x419e08, 0x00000000);
@@ -2210,7 +2209,7 @@ nvc0_grctx_generate(struct nvc0_graph_priv *priv)
2210 nv_icmd(priv, 0x00000215, 0x00000040); 2209 nv_icmd(priv, 0x00000215, 0x00000040);
2211 nv_icmd(priv, 0x00000216, 0x00000040); 2210 nv_icmd(priv, 0x00000216, 0x00000040);
2212 nv_icmd(priv, 0x00000217, 0x00000040); 2211 nv_icmd(priv, 0x00000217, 0x00000040);
2213 if (nv_device(priv)->chipset == 0xd9) { 2212 if (nv_device(priv)->chipset >= 0xd0) {
2214 for (i = 0x0400; i <= 0x0417; i++) 2213 for (i = 0x0400; i <= 0x0417; i++)
2215 nv_icmd(priv, i, 0x00000040); 2214 nv_icmd(priv, i, 0x00000040);
2216 } 2215 }
@@ -2222,7 +2221,7 @@ nvc0_grctx_generate(struct nvc0_graph_priv *priv)
2222 nv_icmd(priv, 0x0000021d, 0x0000c080); 2221 nv_icmd(priv, 0x0000021d, 0x0000c080);
2223 nv_icmd(priv, 0x0000021e, 0x0000c080); 2222 nv_icmd(priv, 0x0000021e, 0x0000c080);
2224 nv_icmd(priv, 0x0000021f, 0x0000c080); 2223 nv_icmd(priv, 0x0000021f, 0x0000c080);
2225 if (nv_device(priv)->chipset == 0xd9) { 2224 if (nv_device(priv)->chipset >= 0xd0) {
2226 for (i = 0x0440; i <= 0x0457; i++) 2225 for (i = 0x0440; i <= 0x0457; i++)
2227 nv_icmd(priv, i, 0x0000c080); 2226 nv_icmd(priv, i, 0x0000c080);
2228 } 2227 }
@@ -2789,7 +2788,7 @@ nvc0_grctx_generate(struct nvc0_graph_priv *priv)
2789 nv_icmd(priv, 0x00000585, 0x0000003f); 2788 nv_icmd(priv, 0x00000585, 0x0000003f);
2790 nv_icmd(priv, 0x00000576, 0x00000003); 2789 nv_icmd(priv, 0x00000576, 0x00000003);
2791 if (nv_device(priv)->chipset == 0xc1 || 2790 if (nv_device(priv)->chipset == 0xc1 ||
2792 nv_device(priv)->chipset == 0xd9) 2791 nv_device(priv)->chipset >= 0xd0)
2793 nv_icmd(priv, 0x0000057b, 0x00000059); 2792 nv_icmd(priv, 0x0000057b, 0x00000059);
2794 nv_icmd(priv, 0x00000586, 0x00000040); 2793 nv_icmd(priv, 0x00000586, 0x00000040);
2795 nv_icmd(priv, 0x00000582, 0x00000080); 2794 nv_icmd(priv, 0x00000582, 0x00000080);
@@ -2891,7 +2890,7 @@ nvc0_grctx_generate(struct nvc0_graph_priv *priv)
2891 nv_icmd(priv, 0x00000957, 0x00000003); 2890 nv_icmd(priv, 0x00000957, 0x00000003);
2892 nv_icmd(priv, 0x0000095e, 0x20164010); 2891 nv_icmd(priv, 0x0000095e, 0x20164010);
2893 nv_icmd(priv, 0x0000095f, 0x00000020); 2892 nv_icmd(priv, 0x0000095f, 0x00000020);
2894 if (nv_device(priv)->chipset == 0xd9) 2893 if (nv_device(priv)->chipset >= 0xd0)
2895 nv_icmd(priv, 0x0000097d, 0x00000020); 2894 nv_icmd(priv, 0x0000097d, 0x00000020);
2896 nv_icmd(priv, 0x00000683, 0x00000006); 2895 nv_icmd(priv, 0x00000683, 0x00000006);
2897 nv_icmd(priv, 0x00000685, 0x003fffff); 2896 nv_icmd(priv, 0x00000685, 0x003fffff);
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnve0.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnve0.c
index 6d8c63931ee6..ae27dae3fe38 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnve0.c
@@ -2772,10 +2772,15 @@ nve0_grctx_generate(struct nvc0_graph_priv *priv)
2772 for (i = 0; i < 8; i++) 2772 for (i = 0; i < 8; i++)
2773 nv_wr32(priv, 0x4064d0 + (i * 0x04), 0x00000000); 2773 nv_wr32(priv, 0x4064d0 + (i * 0x04), 0x00000000);
2774 2774
2775 nv_wr32(priv, 0x405b00, 0x201); 2775 nv_wr32(priv, 0x405b00, (priv->tpc_total << 8) | priv->gpc_nr);
2776 nv_wr32(priv, 0x408850, 0x2); 2776 if (priv->gpc_nr == 1) {
2777 nv_wr32(priv, 0x408958, 0x2); 2777 nv_mask(priv, 0x408850, 0x0000000f, priv->tpc_nr[0]);
2778 nv_wr32(priv, 0x419f78, 0xa); 2778 nv_mask(priv, 0x408958, 0x0000000f, priv->tpc_nr[0]);
2779 } else {
2780 nv_mask(priv, 0x408850, 0x0000000f, priv->gpc_nr);
2781 nv_mask(priv, 0x408958, 0x0000000f, priv->gpc_nr);
2782 }
2783 nv_mask(priv, 0x419f78, 0x00000001, 0x00000000);
2779 2784
2780 nve0_grctx_generate_icmd(priv); 2785 nve0_grctx_generate_icmd(priv);
2781 nve0_grctx_generate_a097(priv); 2786 nve0_grctx_generate_a097(priv);
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvc0.fuc b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvc0.fuc
index b86cc60dcd56..f7055af0f2a6 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvc0.fuc
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvc0.fuc
@@ -87,6 +87,11 @@ chipsets:
87.b16 #nvd9_gpc_mmio_tail 87.b16 #nvd9_gpc_mmio_tail
88.b16 #nvd9_tpc_mmio_head 88.b16 #nvd9_tpc_mmio_head
89.b16 #nvd9_tpc_mmio_tail 89.b16 #nvd9_tpc_mmio_tail
90.b8 0xd7 0 0 0
91.b16 #nvd9_gpc_mmio_head
92.b16 #nvd9_gpc_mmio_tail
93.b16 #nvd9_tpc_mmio_head
94.b16 #nvd9_tpc_mmio_tail
90.b8 0 0 0 0 95.b8 0 0 0 0
91 96
92// GPC mmio lists 97// GPC mmio lists
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc
index 0bcfa4d447e5..7fbdebb2bafb 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc
@@ -62,6 +62,9 @@ chipsets:
62.b8 0xd9 0 0 0 62.b8 0xd9 0 0 0
63.b16 #nvd9_hub_mmio_head 63.b16 #nvd9_hub_mmio_head
64.b16 #nvd9_hub_mmio_tail 64.b16 #nvd9_hub_mmio_tail
65.b8 0xd7 0 0 0
66.b16 #nvd9_hub_mmio_head
67.b16 #nvd9_hub_mmio_tail
65.b8 0 0 0 0 68.b8 0 0 0 0
66 69
67nvc0_hub_mmio_head: 70nvc0_hub_mmio_head:
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c
index 0607b9801748..b24559315903 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c
@@ -254,7 +254,7 @@ nv20_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
254 if (ret) 254 if (ret)
255 return ret; 255 return ret;
256 256
257 ret = nouveau_gpuobj_new(parent, NULL, 32 * 4, 16, 257 ret = nouveau_gpuobj_new(nv_object(priv), NULL, 32 * 4, 16,
258 NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab); 258 NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
259 if (ret) 259 if (ret)
260 return ret; 260 return ret;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv25.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv25.c
index b2b650dd8b28..7a80d005a974 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv25.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv25.c
@@ -142,7 +142,7 @@ nv25_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
142 if (ret) 142 if (ret)
143 return ret; 143 return ret;
144 144
145 ret = nouveau_gpuobj_new(parent, NULL, 32 * 4, 16, 145 ret = nouveau_gpuobj_new(nv_object(priv), NULL, 32 * 4, 16,
146 NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab); 146 NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
147 if (ret) 147 if (ret)
148 return ret; 148 return ret;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv2a.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv2a.c
index 700462fa0ae0..3e1f32ee43d4 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv2a.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv2a.c
@@ -109,7 +109,7 @@ nv2a_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
109 if (ret) 109 if (ret)
110 return ret; 110 return ret;
111 111
112 ret = nouveau_gpuobj_new(parent, NULL, 32 * 4, 16, 112 ret = nouveau_gpuobj_new(nv_object(priv), NULL, 32 * 4, 16,
113 NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab); 113 NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
114 if (ret) 114 if (ret)
115 return ret; 115 return ret;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv30.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv30.c
index cedadaa92d3f..e451db32e92a 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv30.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv30.c
@@ -143,7 +143,7 @@ nv30_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
143 if (ret) 143 if (ret)
144 return ret; 144 return ret;
145 145
146 ret = nouveau_gpuobj_new(parent, NULL, 32 * 4, 16, 146 ret = nouveau_gpuobj_new(nv_object(priv), NULL, 32 * 4, 16,
147 NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab); 147 NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
148 if (ret) 148 if (ret)
149 return ret; 149 return ret;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv34.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv34.c
index 273f6320027b..9385ac7b44a4 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv34.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv34.c
@@ -143,7 +143,7 @@ nv34_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
143 if (ret) 143 if (ret)
144 return ret; 144 return ret;
145 145
146 ret = nouveau_gpuobj_new(parent, NULL, 32 * 4, 16, 146 ret = nouveau_gpuobj_new(nv_object(priv), NULL, 32 * 4, 16,
147 NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab); 147 NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
148 if (ret) 148 if (ret)
149 return ret; 149 return ret;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv35.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv35.c
index f40ee2116ee1..9ce84b73f86a 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv35.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv35.c
@@ -141,7 +141,7 @@ nv35_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
141 if (ret) 141 if (ret)
142 return ret; 142 return ret;
143 143
144 ret = nouveau_gpuobj_new(parent, NULL, 32 * 4, 16, 144 ret = nouveau_gpuobj_new(nv_object(priv), NULL, 32 * 4, 16,
145 NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab); 145 NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
146 if (ret) 146 if (ret)
147 return ret; 147 return ret;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c
index 17049d5c723d..193a5de1b482 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c
@@ -46,6 +46,14 @@ struct nv40_graph_chan {
46 struct nouveau_graph_chan base; 46 struct nouveau_graph_chan base;
47}; 47};
48 48
49static u64
50nv40_graph_units(struct nouveau_graph *graph)
51{
52 struct nv40_graph_priv *priv = (void *)graph;
53
54 return nv_rd32(priv, 0x1540);
55}
56
49/******************************************************************************* 57/*******************************************************************************
50 * Graphics object classes 58 * Graphics object classes
51 ******************************************************************************/ 59 ******************************************************************************/
@@ -359,6 +367,8 @@ nv40_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
359 else 367 else
360 nv_engine(priv)->sclass = nv40_graph_sclass; 368 nv_engine(priv)->sclass = nv40_graph_sclass;
361 nv_engine(priv)->tile_prog = nv40_graph_tile_prog; 369 nv_engine(priv)->tile_prog = nv40_graph_tile_prog;
370
371 priv->base.units = nv40_graph_units;
362 return 0; 372 return 0;
363} 373}
364 374
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
index f2b1a7a124f2..1ac36110ca19 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
@@ -48,6 +48,14 @@ struct nv50_graph_chan {
48 struct nouveau_graph_chan base; 48 struct nouveau_graph_chan base;
49}; 49};
50 50
51static u64
52nv50_graph_units(struct nouveau_graph *graph)
53{
54 struct nv50_graph_priv *priv = (void *)graph;
55
56 return nv_rd32(priv, 0x1540);
57}
58
51/******************************************************************************* 59/*******************************************************************************
52 * Graphics object classes 60 * Graphics object classes
53 ******************************************************************************/ 61 ******************************************************************************/
@@ -819,6 +827,8 @@ nv50_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
819 nv_subdev(priv)->intr = nv50_graph_intr; 827 nv_subdev(priv)->intr = nv50_graph_intr;
820 nv_engine(priv)->cclass = &nv50_graph_cclass; 828 nv_engine(priv)->cclass = &nv50_graph_cclass;
821 829
830 priv->base.units = nv50_graph_units;
831
822 switch (nv_device(priv)->chipset) { 832 switch (nv_device(priv)->chipset) {
823 case 0x50: 833 case 0x50:
824 nv_engine(priv)->sclass = nv50_graph_sclass; 834 nv_engine(priv)->sclass = nv50_graph_sclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
index 0de0dd724aff..f9b9d82c287f 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
@@ -60,6 +60,19 @@ nvc8_graph_sclass[] = {
60 {} 60 {}
61}; 61};
62 62
63u64
64nvc0_graph_units(struct nouveau_graph *graph)
65{
66 struct nvc0_graph_priv *priv = (void *)graph;
67 u64 cfg;
68
69 cfg = (u32)priv->gpc_nr;
70 cfg |= (u32)priv->tpc_total << 8;
71 cfg |= (u64)priv->rop_nr << 32;
72
73 return cfg;
74}
75
63/******************************************************************************* 76/*******************************************************************************
64 * PGRAPH context 77 * PGRAPH context
65 ******************************************************************************/ 78 ******************************************************************************/
@@ -89,7 +102,8 @@ nvc0_graph_context_ctor(struct nouveau_object *parent,
89 * fuc to modify some per-context register settings on first load 102 * fuc to modify some per-context register settings on first load
90 * of the context. 103 * of the context.
91 */ 104 */
92 ret = nouveau_gpuobj_new(parent, NULL, 0x1000, 0x100, 0, &chan->mmio); 105 ret = nouveau_gpuobj_new(nv_object(chan), NULL, 0x1000, 0x100, 0,
106 &chan->mmio);
93 if (ret) 107 if (ret)
94 return ret; 108 return ret;
95 109
@@ -101,8 +115,8 @@ nvc0_graph_context_ctor(struct nouveau_object *parent,
101 115
102 /* allocate buffers referenced by mmio list */ 116 /* allocate buffers referenced by mmio list */
103 for (i = 0; data->size && i < ARRAY_SIZE(priv->mmio_data); i++) { 117 for (i = 0; data->size && i < ARRAY_SIZE(priv->mmio_data); i++) {
104 ret = nouveau_gpuobj_new(parent, NULL, data->size, data->align, 118 ret = nouveau_gpuobj_new(nv_object(chan), NULL, data->size,
105 0, &chan->data[i].mem); 119 data->align, 0, &chan->data[i].mem);
106 if (ret) 120 if (ret)
107 return ret; 121 return ret;
108 122
@@ -518,9 +532,10 @@ nvc0_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
518{ 532{
519 struct nouveau_device *device = nv_device(parent); 533 struct nouveau_device *device = nv_device(parent);
520 struct nvc0_graph_priv *priv; 534 struct nvc0_graph_priv *priv;
535 bool enable = device->chipset != 0xd7;
521 int ret, i; 536 int ret, i;
522 537
523 ret = nouveau_graph_create(parent, engine, oclass, true, &priv); 538 ret = nouveau_graph_create(parent, engine, oclass, enable, &priv);
524 *pobject = nv_object(priv); 539 *pobject = nv_object(priv);
525 if (ret) 540 if (ret)
526 return ret; 541 return ret;
@@ -529,6 +544,8 @@ nvc0_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
529 nv_subdev(priv)->intr = nvc0_graph_intr; 544 nv_subdev(priv)->intr = nvc0_graph_intr;
530 nv_engine(priv)->cclass = &nvc0_graph_cclass; 545 nv_engine(priv)->cclass = &nvc0_graph_cclass;
531 546
547 priv->base.units = nvc0_graph_units;
548
532 if (nouveau_boolopt(device->cfgopt, "NvGrUseFW", false)) { 549 if (nouveau_boolopt(device->cfgopt, "NvGrUseFW", false)) {
533 nv_info(priv, "using external firmware\n"); 550 nv_info(priv, "using external firmware\n");
534 if (nvc0_graph_ctor_fw(priv, "fuc409c", &priv->fuc409c) || 551 if (nvc0_graph_ctor_fw(priv, "fuc409c", &priv->fuc409c) ||
@@ -551,11 +568,13 @@ nvc0_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
551 break; 568 break;
552 } 569 }
553 570
554 ret = nouveau_gpuobj_new(parent, NULL, 0x1000, 256, 0, &priv->unk4188b4); 571 ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x1000, 256, 0,
572 &priv->unk4188b4);
555 if (ret) 573 if (ret)
556 return ret; 574 return ret;
557 575
558 ret = nouveau_gpuobj_new(parent, NULL, 0x1000, 256, 0, &priv->unk4188b8); 576 ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x1000, 256, 0,
577 &priv->unk4188b8);
559 if (ret) 578 if (ret)
560 return ret; 579 return ret;
561 580
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h
index a1e78de46456..c870dad0f670 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h
@@ -118,6 +118,7 @@ nvc0_graph_class(void *obj)
118 return 0x9197; 118 return 0x9197;
119 case 0xc8: 119 case 0xc8:
120 case 0xd9: 120 case 0xd9:
121 case 0xd7:
121 return 0x9297; 122 return 0x9297;
122 case 0xe4: 123 case 0xe4:
123 case 0xe7: 124 case 0xe7:
@@ -169,4 +170,6 @@ int nvc0_graph_context_ctor(struct nouveau_object *, struct nouveau_object *,
169 struct nouveau_object **); 170 struct nouveau_object **);
170void nvc0_graph_context_dtor(struct nouveau_object *); 171void nvc0_graph_context_dtor(struct nouveau_object *);
171 172
173u64 nvc0_graph_units(struct nouveau_graph *);
174
172#endif 175#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c b/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c
index 4857f913efdd..678c16f63055 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c
@@ -77,11 +77,207 @@ nve0_graph_ctxctl_isr(struct nvc0_graph_priv *priv)
77 nv_wr32(priv, 0x409c20, ustat); 77 nv_wr32(priv, 0x409c20, ustat);
78} 78}
79 79
80static const struct nouveau_enum nve0_mp_warp_error[] = {
81 { 0x00, "NO_ERROR" },
82 { 0x01, "STACK_MISMATCH" },
83 { 0x05, "MISALIGNED_PC" },
84 { 0x08, "MISALIGNED_GPR" },
85 { 0x09, "INVALID_OPCODE" },
86 { 0x0d, "GPR_OUT_OF_BOUNDS" },
87 { 0x0e, "MEM_OUT_OF_BOUNDS" },
88 { 0x0f, "UNALIGNED_MEM_ACCESS" },
89 { 0x11, "INVALID_PARAM" },
90 {}
91};
92
93static const struct nouveau_enum nve0_mp_global_error[] = {
94 { 2, "MULTIPLE_WARP_ERRORS" },
95 { 3, "OUT_OF_STACK_SPACE" },
96 {}
97};
98
99static const struct nouveau_enum nve0_gpc_rop_error[] = {
100 { 1, "RT_PITCH_OVERRUN" },
101 { 4, "RT_WIDTH_OVERRUN" },
102 { 5, "RT_HEIGHT_OVERRUN" },
103 { 7, "ZETA_STORAGE_TYPE_MISMATCH" },
104 { 8, "RT_STORAGE_TYPE_MISMATCH" },
105 { 10, "RT_LINEAR_MISMATCH" },
106 {}
107};
108
109static const struct nouveau_enum nve0_sked_error[] = {
110 { 7, "CONSTANT_BUFFER_SIZE" },
111 { 9, "LOCAL_MEMORY_SIZE_POS" },
112 { 10, "LOCAL_MEMORY_SIZE_NEG" },
113 { 11, "WARP_CSTACK_SIZE" },
114 { 12, "TOTAL_TEMP_SIZE" },
115 { 13, "REGISTER_COUNT" },
116 { 18, "TOTAL_THREADS" },
117 { 20, "PROGRAM_OFFSET" },
118 { 21, "SHARED_MEMORY_SIZE" },
119 { 25, "SHARED_CONFIG_TOO_SMALL" },
120 { 26, "TOTAL_REGISTER_COUNT" },
121 {}
122};
123
124static void
125nve0_graph_mp_trap(struct nvc0_graph_priv *priv, int gpc, int tp)
126{
127 int i;
128 u32 werr = nv_rd32(priv, TPC_UNIT(gpc, tp, 0x648));
129 u32 gerr = nv_rd32(priv, TPC_UNIT(gpc, tp, 0x650));
130
131 nv_error(priv, "GPC%i/TP%i/MP trap:", gpc, tp);
132
133 for (i = 0; i <= 31; ++i) {
134 if (!(gerr & (1 << i)))
135 continue;
136 pr_cont(" ");
137 nouveau_enum_print(nve0_mp_global_error, i);
138 }
139 if (werr) {
140 pr_cont(" ");
141 nouveau_enum_print(nve0_mp_warp_error, werr & 0xffff);
142 }
143 pr_cont("\n");
144
145 /* disable MP trap to avoid spam */
146 nv_mask(priv, TPC_UNIT(gpc, tp, 0x50c), 0x2, 0x0);
147
148 /* TODO: figure out how to resume after an MP trap */
149}
150
151static void
152nve0_graph_tp_trap(struct nvc0_graph_priv *priv, int gpc, int tp)
153{
154 u32 stat = nv_rd32(priv, TPC_UNIT(gpc, tp, 0x508));
155
156 if (stat & 0x1) {
157 u32 trap = nv_rd32(priv, TPC_UNIT(gpc, tp, 0x224));
158 nv_error(priv, "GPC%i/TP%i/TEX trap: %08x\n",
159 gpc, tp, trap);
160
161 nv_wr32(priv, TPC_UNIT(gpc, tp, 0x224), 0xc0000000);
162 stat &= ~0x1;
163 }
164
165 if (stat & 0x2) {
166 nve0_graph_mp_trap(priv, gpc, tp);
167 stat &= ~0x2;
168 }
169
170 if (stat & 0x4) {
171 u32 trap = nv_rd32(priv, TPC_UNIT(gpc, tp, 0x084));
172 nv_error(priv, "GPC%i/TP%i/POLY trap: %08x\n",
173 gpc, tp, trap);
174
175 nv_wr32(priv, TPC_UNIT(gpc, tp, 0x084), 0xc0000000);
176 stat &= ~0x4;
177 }
178
179 if (stat & 0x8) {
180 u32 trap = nv_rd32(priv, TPC_UNIT(gpc, tp, 0x48c));
181 nv_error(priv, "GPC%i/TP%i/L1C trap: %08x\n",
182 gpc, tp, trap);
183
184 nv_wr32(priv, TPC_UNIT(gpc, tp, 0x48c), 0xc0000000);
185 stat &= ~0x8;
186 }
187
188 if (stat) {
189 nv_error(priv, "GPC%i/TP%i: unknown stat %08x\n",
190 gpc, tp, stat);
191 }
192}
193
194static void
195nve0_graph_gpc_trap(struct nvc0_graph_priv *priv)
196{
197 const u32 mask = nv_rd32(priv, 0x400118);
198 int gpc;
199
200 for (gpc = 0; gpc < 4; ++gpc) {
201 u32 stat;
202 int tp;
203
204 if (!(mask & (1 << gpc)))
205 continue;
206 stat = nv_rd32(priv, GPC_UNIT(gpc, 0x2c90));
207
208 if (stat & 0x0001) {
209 u32 trap[4];
210 int i;
211
212 trap[0] = nv_rd32(priv, GPC_UNIT(gpc, 0x0420));
213 trap[1] = nv_rd32(priv, GPC_UNIT(gpc, 0x0434));
214 trap[2] = nv_rd32(priv, GPC_UNIT(gpc, 0x0438));
215 trap[3] = nv_rd32(priv, GPC_UNIT(gpc, 0x043c));
216
217 nv_error(priv, "GPC%i/PROP trap:", gpc);
218 for (i = 0; i <= 29; ++i) {
219 if (!(trap[0] & (1 << i)))
220 continue;
221 pr_cont(" ");
222 nouveau_enum_print(nve0_gpc_rop_error, i);
223 }
224 pr_cont("\n");
225
226 nv_error(priv, "x = %u, y = %u, "
227 "format = %x, storage type = %x\n",
228 trap[1] & 0xffff,
229 trap[1] >> 16,
230 (trap[2] >> 8) & 0x3f,
231 trap[3] & 0xff);
232
233 nv_wr32(priv, GPC_UNIT(gpc, 0x0420), 0xc0000000);
234 stat &= ~0x0001;
235 }
236
237 if (stat & 0x0002) {
238 u32 trap = nv_rd32(priv, GPC_UNIT(gpc, 0x0900));
239 nv_error(priv, "GPC%i/ZCULL trap: %08x\n", gpc,
240 trap);
241 nv_wr32(priv, GPC_UNIT(gpc, 0x0900), 0xc0000000);
242 stat &= ~0x0002;
243 }
244
245 if (stat & 0x0004) {
246 u32 trap = nv_rd32(priv, GPC_UNIT(gpc, 0x1028));
247 nv_error(priv, "GPC%i/CCACHE trap: %08x\n", gpc,
248 trap);
249 nv_wr32(priv, GPC_UNIT(gpc, 0x1028), 0xc0000000);
250 stat &= ~0x0004;
251 }
252
253 if (stat & 0x0008) {
254 u32 trap = nv_rd32(priv, GPC_UNIT(gpc, 0x0824));
255 nv_error(priv, "GPC%i/ESETUP trap %08x\n", gpc,
256 trap);
257 nv_wr32(priv, GPC_UNIT(gpc, 0x0824), 0xc0000000);
258 stat &= ~0x0008;
259 }
260
261 for (tp = 0; tp < 8; ++tp) {
262 if (stat & (1 << (16 + tp)))
263 nve0_graph_tp_trap(priv, gpc, tp);
264 }
265 stat &= ~0xff0000;
266
267 if (stat) {
268 nv_error(priv, "GPC%i: unknown stat %08x\n",
269 gpc, stat);
270 }
271 }
272}
273
274
80static void 275static void
81nve0_graph_trap_isr(struct nvc0_graph_priv *priv, int chid, u64 inst, 276nve0_graph_trap_isr(struct nvc0_graph_priv *priv, int chid, u64 inst,
82 struct nouveau_object *engctx) 277 struct nouveau_object *engctx)
83{ 278{
84 u32 trap = nv_rd32(priv, 0x400108); 279 u32 trap = nv_rd32(priv, 0x400108);
280 int i;
85 int rop; 281 int rop;
86 282
87 if (trap & 0x00000001) { 283 if (trap & 0x00000001) {
@@ -102,6 +298,32 @@ nve0_graph_trap_isr(struct nvc0_graph_priv *priv, int chid, u64 inst,
102 trap &= ~0x00000010; 298 trap &= ~0x00000010;
103 } 299 }
104 300
301 if (trap & 0x00000100) {
302 u32 stat = nv_rd32(priv, 0x407020);
303 nv_error(priv, "SKED ch %d [0x%010llx %s]:",
304 chid, inst, nouveau_client_name(engctx));
305
306 for (i = 0; i <= 29; ++i) {
307 if (!(stat & (1 << i)))
308 continue;
309 pr_cont(" ");
310 nouveau_enum_print(nve0_sked_error, i);
311 }
312 pr_cont("\n");
313
314 if (stat & 0x3fffffff)
315 nv_wr32(priv, 0x407020, 0x40000000);
316 nv_wr32(priv, 0x400108, 0x00000100);
317 trap &= ~0x00000100;
318 }
319
320 if (trap & 0x01000000) {
321 nv_error(priv, "GPC ch %d [0x%010llx %s]:\n",
322 chid, inst, nouveau_client_name(engctx));
323 nve0_graph_gpc_trap(priv);
324 trap &= ~0x01000000;
325 }
326
105 if (trap & 0x02000000) { 327 if (trap & 0x02000000) {
106 for (rop = 0; rop < priv->rop_nr; rop++) { 328 for (rop = 0; rop < priv->rop_nr; rop++) {
107 u32 statz = nv_rd32(priv, ROP_UNIT(rop, 0x070)); 329 u32 statz = nv_rd32(priv, ROP_UNIT(rop, 0x070));
@@ -217,6 +439,8 @@ nve0_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
217 nv_engine(priv)->cclass = &nve0_graph_cclass; 439 nv_engine(priv)->cclass = &nve0_graph_cclass;
218 nv_engine(priv)->sclass = nve0_graph_sclass; 440 nv_engine(priv)->sclass = nve0_graph_sclass;
219 441
442 priv->base.units = nvc0_graph_units;
443
220 if (nouveau_boolopt(device->cfgopt, "NvGrUseFW", false)) { 444 if (nouveau_boolopt(device->cfgopt, "NvGrUseFW", false)) {
221 nv_info(priv, "using external firmware\n"); 445 nv_info(priv, "using external firmware\n");
222 if (nvc0_graph_ctor_fw(priv, "fuc409c", &priv->fuc409c) || 446 if (nvc0_graph_ctor_fw(priv, "fuc409c", &priv->fuc409c) ||
@@ -227,11 +451,13 @@ nve0_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
227 priv->firmware = true; 451 priv->firmware = true;
228 } 452 }
229 453
230 ret = nouveau_gpuobj_new(parent, NULL, 0x1000, 256, 0, &priv->unk4188b4); 454 ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x1000, 256, 0,
455 &priv->unk4188b4);
231 if (ret) 456 if (ret)
232 return ret; 457 return ret;
233 458
234 ret = nouveau_gpuobj_new(parent, NULL, 0x1000, 256, 0, &priv->unk4188b8); 459 ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x1000, 256, 0,
460 &priv->unk4188b8);
235 if (ret) 461 if (ret)
236 return ret; 462 return ret;
237 463
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c
index a523eaad47e3..d698e710ddd4 100644
--- a/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c
@@ -94,6 +94,32 @@ nvc0_software_mthd_flip(struct nouveau_object *object, u32 mthd,
94 return -EINVAL; 94 return -EINVAL;
95} 95}
96 96
97static int
98nvc0_software_mthd_mp_control(struct nouveau_object *object, u32 mthd,
99 void *args, u32 size)
100{
101 struct nvc0_software_chan *chan = (void *)nv_engctx(object->parent);
102 struct nvc0_software_priv *priv = (void *)nv_object(chan)->engine;
103 u32 data = *(u32 *)args;
104
105 switch (mthd) {
106 case 0x600:
107 nv_wr32(priv, 0x419e00, data); /* MP.PM_UNK000 */
108 break;
109 case 0x644:
110 if (data & ~0x1ffffe)
111 return -EINVAL;
112 nv_wr32(priv, 0x419e44, data); /* MP.TRAP_WARP_ERROR_EN */
113 break;
114 case 0x6ac:
115 nv_wr32(priv, 0x419eac, data); /* MP.PM_UNK0AC */
116 break;
117 default:
118 return -EINVAL;
119 }
120 return 0;
121}
122
97static struct nouveau_omthds 123static struct nouveau_omthds
98nvc0_software_omthds[] = { 124nvc0_software_omthds[] = {
99 { 0x0400, 0x0400, nvc0_software_mthd_vblsem_offset }, 125 { 0x0400, 0x0400, nvc0_software_mthd_vblsem_offset },
@@ -101,6 +127,9 @@ nvc0_software_omthds[] = {
101 { 0x0408, 0x0408, nvc0_software_mthd_vblsem_value }, 127 { 0x0408, 0x0408, nvc0_software_mthd_vblsem_value },
102 { 0x040c, 0x040c, nvc0_software_mthd_vblsem_release }, 128 { 0x040c, 0x040c, nvc0_software_mthd_vblsem_release },
103 { 0x0500, 0x0500, nvc0_software_mthd_flip }, 129 { 0x0500, 0x0500, nvc0_software_mthd_flip },
130 { 0x0600, 0x0600, nvc0_software_mthd_mp_control },
131 { 0x0644, 0x0644, nvc0_software_mthd_mp_control },
132 { 0x06ac, 0x06ac, nvc0_software_mthd_mp_control },
104 {} 133 {}
105}; 134};
106 135
diff --git a/drivers/gpu/drm/nouveau/core/include/core/class.h b/drivers/gpu/drm/nouveau/core/include/core/class.h
index 92d3ab11d962..0a393f7f055f 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/class.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/class.h
@@ -169,6 +169,7 @@ struct nv04_display_class {
169 * 8570: NVA3_DISP 169 * 8570: NVA3_DISP
170 * 9070: NVD0_DISP 170 * 9070: NVD0_DISP
171 * 9170: NVE0_DISP 171 * 9170: NVE0_DISP
172 * 9270: NVF0_DISP
172 */ 173 */
173 174
174#define NV50_DISP_CLASS 0x00005070 175#define NV50_DISP_CLASS 0x00005070
@@ -178,6 +179,7 @@ struct nv04_display_class {
178#define NVA3_DISP_CLASS 0x00008570 179#define NVA3_DISP_CLASS 0x00008570
179#define NVD0_DISP_CLASS 0x00009070 180#define NVD0_DISP_CLASS 0x00009070
180#define NVE0_DISP_CLASS 0x00009170 181#define NVE0_DISP_CLASS 0x00009170
182#define NVF0_DISP_CLASS 0x00009270
181 183
182#define NV50_DISP_SOR_MTHD 0x00010000 184#define NV50_DISP_SOR_MTHD 0x00010000
183#define NV50_DISP_SOR_MTHD_TYPE 0x0000f000 185#define NV50_DISP_SOR_MTHD_TYPE 0x0000f000
@@ -246,6 +248,7 @@ struct nv50_display_class {
246 * 857a: NVA3_DISP_CURS 248 * 857a: NVA3_DISP_CURS
247 * 907a: NVD0_DISP_CURS 249 * 907a: NVD0_DISP_CURS
248 * 917a: NVE0_DISP_CURS 250 * 917a: NVE0_DISP_CURS
251 * 927a: NVF0_DISP_CURS
249 */ 252 */
250 253
251#define NV50_DISP_CURS_CLASS 0x0000507a 254#define NV50_DISP_CURS_CLASS 0x0000507a
@@ -255,6 +258,7 @@ struct nv50_display_class {
255#define NVA3_DISP_CURS_CLASS 0x0000857a 258#define NVA3_DISP_CURS_CLASS 0x0000857a
256#define NVD0_DISP_CURS_CLASS 0x0000907a 259#define NVD0_DISP_CURS_CLASS 0x0000907a
257#define NVE0_DISP_CURS_CLASS 0x0000917a 260#define NVE0_DISP_CURS_CLASS 0x0000917a
261#define NVF0_DISP_CURS_CLASS 0x0000927a
258 262
259struct nv50_display_curs_class { 263struct nv50_display_curs_class {
260 u32 head; 264 u32 head;
@@ -267,6 +271,7 @@ struct nv50_display_curs_class {
267 * 857b: NVA3_DISP_OIMM 271 * 857b: NVA3_DISP_OIMM
268 * 907b: NVD0_DISP_OIMM 272 * 907b: NVD0_DISP_OIMM
269 * 917b: NVE0_DISP_OIMM 273 * 917b: NVE0_DISP_OIMM
274 * 927b: NVE0_DISP_OIMM
270 */ 275 */
271 276
272#define NV50_DISP_OIMM_CLASS 0x0000507b 277#define NV50_DISP_OIMM_CLASS 0x0000507b
@@ -276,6 +281,7 @@ struct nv50_display_curs_class {
276#define NVA3_DISP_OIMM_CLASS 0x0000857b 281#define NVA3_DISP_OIMM_CLASS 0x0000857b
277#define NVD0_DISP_OIMM_CLASS 0x0000907b 282#define NVD0_DISP_OIMM_CLASS 0x0000907b
278#define NVE0_DISP_OIMM_CLASS 0x0000917b 283#define NVE0_DISP_OIMM_CLASS 0x0000917b
284#define NVF0_DISP_OIMM_CLASS 0x0000927b
279 285
280struct nv50_display_oimm_class { 286struct nv50_display_oimm_class {
281 u32 head; 287 u32 head;
@@ -288,6 +294,7 @@ struct nv50_display_oimm_class {
288 * 857c: NVA3_DISP_SYNC 294 * 857c: NVA3_DISP_SYNC
289 * 907c: NVD0_DISP_SYNC 295 * 907c: NVD0_DISP_SYNC
290 * 917c: NVE0_DISP_SYNC 296 * 917c: NVE0_DISP_SYNC
297 * 927c: NVF0_DISP_SYNC
291 */ 298 */
292 299
293#define NV50_DISP_SYNC_CLASS 0x0000507c 300#define NV50_DISP_SYNC_CLASS 0x0000507c
@@ -297,6 +304,7 @@ struct nv50_display_oimm_class {
297#define NVA3_DISP_SYNC_CLASS 0x0000857c 304#define NVA3_DISP_SYNC_CLASS 0x0000857c
298#define NVD0_DISP_SYNC_CLASS 0x0000907c 305#define NVD0_DISP_SYNC_CLASS 0x0000907c
299#define NVE0_DISP_SYNC_CLASS 0x0000917c 306#define NVE0_DISP_SYNC_CLASS 0x0000917c
307#define NVF0_DISP_SYNC_CLASS 0x0000927c
300 308
301struct nv50_display_sync_class { 309struct nv50_display_sync_class {
302 u32 pushbuf; 310 u32 pushbuf;
@@ -310,6 +318,7 @@ struct nv50_display_sync_class {
310 * 857d: NVA3_DISP_MAST 318 * 857d: NVA3_DISP_MAST
311 * 907d: NVD0_DISP_MAST 319 * 907d: NVD0_DISP_MAST
312 * 917d: NVE0_DISP_MAST 320 * 917d: NVE0_DISP_MAST
321 * 927d: NVF0_DISP_MAST
313 */ 322 */
314 323
315#define NV50_DISP_MAST_CLASS 0x0000507d 324#define NV50_DISP_MAST_CLASS 0x0000507d
@@ -319,6 +328,7 @@ struct nv50_display_sync_class {
319#define NVA3_DISP_MAST_CLASS 0x0000857d 328#define NVA3_DISP_MAST_CLASS 0x0000857d
320#define NVD0_DISP_MAST_CLASS 0x0000907d 329#define NVD0_DISP_MAST_CLASS 0x0000907d
321#define NVE0_DISP_MAST_CLASS 0x0000917d 330#define NVE0_DISP_MAST_CLASS 0x0000917d
331#define NVF0_DISP_MAST_CLASS 0x0000927d
322 332
323struct nv50_display_mast_class { 333struct nv50_display_mast_class {
324 u32 pushbuf; 334 u32 pushbuf;
@@ -331,6 +341,7 @@ struct nv50_display_mast_class {
331 * 857e: NVA3_DISP_OVLY 341 * 857e: NVA3_DISP_OVLY
332 * 907e: NVD0_DISP_OVLY 342 * 907e: NVD0_DISP_OVLY
333 * 917e: NVE0_DISP_OVLY 343 * 917e: NVE0_DISP_OVLY
344 * 927e: NVF0_DISP_OVLY
334 */ 345 */
335 346
336#define NV50_DISP_OVLY_CLASS 0x0000507e 347#define NV50_DISP_OVLY_CLASS 0x0000507e
@@ -340,6 +351,7 @@ struct nv50_display_mast_class {
340#define NVA3_DISP_OVLY_CLASS 0x0000857e 351#define NVA3_DISP_OVLY_CLASS 0x0000857e
341#define NVD0_DISP_OVLY_CLASS 0x0000907e 352#define NVD0_DISP_OVLY_CLASS 0x0000907e
342#define NVE0_DISP_OVLY_CLASS 0x0000917e 353#define NVE0_DISP_OVLY_CLASS 0x0000917e
354#define NVF0_DISP_OVLY_CLASS 0x0000927e
343 355
344struct nv50_display_ovly_class { 356struct nv50_display_ovly_class {
345 u32 pushbuf; 357 u32 pushbuf;
diff --git a/drivers/gpu/drm/nouveau/core/include/core/device.h b/drivers/gpu/drm/nouveau/core/include/core/device.h
index d351a4e5819c..05840f3eee98 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/device.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/device.h
@@ -6,7 +6,7 @@
6#include <core/engine.h> 6#include <core/engine.h>
7 7
8enum nv_subdev_type { 8enum nv_subdev_type {
9 NVDEV_SUBDEV_DEVICE, 9 NVDEV_ENGINE_DEVICE,
10 NVDEV_SUBDEV_VBIOS, 10 NVDEV_SUBDEV_VBIOS,
11 11
12 /* All subdevs from DEVINIT to DEVINIT_LAST will be created before 12 /* All subdevs from DEVINIT to DEVINIT_LAST will be created before
@@ -57,7 +57,7 @@ enum nv_subdev_type {
57}; 57};
58 58
59struct nouveau_device { 59struct nouveau_device {
60 struct nouveau_subdev base; 60 struct nouveau_engine base;
61 struct list_head head; 61 struct list_head head;
62 62
63 struct pci_dev *pdev; 63 struct pci_dev *pdev;
@@ -99,7 +99,7 @@ nv_device(void *obj)
99 99
100#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA 100#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
101 if (unlikely(!nv_iclass(device, NV_SUBDEV_CLASS) || 101 if (unlikely(!nv_iclass(device, NV_SUBDEV_CLASS) ||
102 (nv_hclass(device) & 0xff) != NVDEV_SUBDEV_DEVICE)) { 102 (nv_hclass(device) & 0xff) != NVDEV_ENGINE_DEVICE)) {
103 nv_assert("BAD CAST -> NvDevice, 0x%08x 0x%08x", 103 nv_assert("BAD CAST -> NvDevice, 0x%08x 0x%08x",
104 nv_hclass(object), nv_hclass(device)); 104 nv_hclass(object), nv_hclass(device));
105 } 105 }
diff --git a/drivers/gpu/drm/nouveau/core/include/core/parent.h b/drivers/gpu/drm/nouveau/core/include/core/parent.h
index 31cd852c96df..9f5ea900ff00 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/parent.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/parent.h
@@ -51,8 +51,8 @@ int nouveau_parent_create_(struct nouveau_object *, struct nouveau_object *,
51void nouveau_parent_destroy(struct nouveau_parent *); 51void nouveau_parent_destroy(struct nouveau_parent *);
52 52
53void _nouveau_parent_dtor(struct nouveau_object *); 53void _nouveau_parent_dtor(struct nouveau_object *);
54#define _nouveau_parent_init _nouveau_object_init 54#define _nouveau_parent_init nouveau_object_init
55#define _nouveau_parent_fini _nouveau_object_fini 55#define _nouveau_parent_fini nouveau_object_fini
56 56
57int nouveau_parent_sclass(struct nouveau_object *, u16 handle, 57int nouveau_parent_sclass(struct nouveau_object *, u16 handle,
58 struct nouveau_object **pengine, 58 struct nouveau_object **pengine,
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/device.h b/drivers/gpu/drm/nouveau/core/include/engine/device.h
index c9e4c4afa50e..b3dd2c4c2f1e 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/device.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/device.h
@@ -18,7 +18,6 @@ int nv50_identify(struct nouveau_device *);
18int nvc0_identify(struct nouveau_device *); 18int nvc0_identify(struct nouveau_device *);
19int nve0_identify(struct nouveau_device *); 19int nve0_identify(struct nouveau_device *);
20 20
21extern struct nouveau_oclass nouveau_device_sclass[];
22struct nouveau_device *nouveau_device_find(u64 name); 21struct nouveau_device *nouveau_device_find(u64 name);
23 22
24#endif 23#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/disp.h b/drivers/gpu/drm/nouveau/core/include/engine/disp.h
index 28da6772c095..4b21fabfbddb 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/disp.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/disp.h
@@ -44,5 +44,6 @@ extern struct nouveau_oclass nv94_disp_oclass;
44extern struct nouveau_oclass nva3_disp_oclass; 44extern struct nouveau_oclass nva3_disp_oclass;
45extern struct nouveau_oclass nvd0_disp_oclass; 45extern struct nouveau_oclass nvd0_disp_oclass;
46extern struct nouveau_oclass nve0_disp_oclass; 46extern struct nouveau_oclass nve0_disp_oclass;
47extern struct nouveau_oclass nvf0_disp_oclass;
47 48
48#endif 49#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/fifo.h b/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
index b46c197709f3..633c2f806482 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
@@ -65,7 +65,8 @@ struct nouveau_fifo_base {
65struct nouveau_fifo { 65struct nouveau_fifo {
66 struct nouveau_engine base; 66 struct nouveau_engine base;
67 67
68 struct nouveau_event *uevent; 68 struct nouveau_event *cevent; /* channel creation event */
69 struct nouveau_event *uevent; /* async user trigger */
69 70
70 struct nouveau_object **channel; 71 struct nouveau_object **channel;
71 spinlock_t lock; 72 spinlock_t lock;
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/graph.h b/drivers/gpu/drm/nouveau/core/include/engine/graph.h
index 6943b40d0817..5d392439f2ac 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/graph.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/graph.h
@@ -26,6 +26,10 @@ struct nouveau_graph_chan {
26 26
27struct nouveau_graph { 27struct nouveau_graph {
28 struct nouveau_engine base; 28 struct nouveau_engine base;
29
30 /* Returns chipset-specific counts of units packed into an u64.
31 */
32 u64 (*units)(struct nouveau_graph *);
29}; 33};
30 34
31static inline struct nouveau_graph * 35static inline struct nouveau_graph *
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/ltcg.h b/drivers/gpu/drm/nouveau/core/include/subdev/ltcg.h
index f351f63bc654..a1985ed3d58d 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/ltcg.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/ltcg.h
@@ -4,8 +4,15 @@
4#include <core/subdev.h> 4#include <core/subdev.h>
5#include <core/device.h> 5#include <core/device.h>
6 6
7struct nouveau_mm_node;
8
7struct nouveau_ltcg { 9struct nouveau_ltcg {
8 struct nouveau_subdev base; 10 struct nouveau_subdev base;
11
12 int (*tags_alloc)(struct nouveau_ltcg *, u32 count,
13 struct nouveau_mm_node **);
14 void (*tags_free)(struct nouveau_ltcg *, struct nouveau_mm_node **);
15 void (*tags_clear)(struct nouveau_ltcg *, u32 first, u32 count);
9}; 16};
10 17
11static inline struct nouveau_ltcg * 18static inline struct nouveau_ltcg *
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/mc.h b/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
index fded97cea500..d5502267c30f 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
@@ -21,18 +21,22 @@ nouveau_mc(void *obj)
21} 21}
22 22
23#define nouveau_mc_create(p,e,o,d) \ 23#define nouveau_mc_create(p,e,o,d) \
24 nouveau_subdev_create_((p), (e), (o), 0, "PMC", "master", \ 24 nouveau_mc_create_((p), (e), (o), sizeof(**d), (void **)d)
25 sizeof(**d), (void **)d) 25#define nouveau_mc_destroy(p) ({ \
26#define nouveau_mc_destroy(p) \ 26 struct nouveau_mc *pmc = (p); _nouveau_mc_dtor(nv_object(pmc)); \
27 nouveau_subdev_destroy(&(p)->base) 27})
28#define nouveau_mc_init(p) \ 28#define nouveau_mc_init(p) ({ \
29 nouveau_subdev_init(&(p)->base) 29 struct nouveau_mc *pmc = (p); _nouveau_mc_init(nv_object(pmc)); \
30#define nouveau_mc_fini(p,s) \ 30})
31 nouveau_subdev_fini(&(p)->base, (s)) 31#define nouveau_mc_fini(p,s) ({ \
32 32 struct nouveau_mc *pmc = (p); _nouveau_mc_fini(nv_object(pmc), (s)); \
33#define _nouveau_mc_dtor _nouveau_subdev_dtor 33})
34#define _nouveau_mc_init _nouveau_subdev_init 34
35#define _nouveau_mc_fini _nouveau_subdev_fini 35int nouveau_mc_create_(struct nouveau_object *, struct nouveau_object *,
36 struct nouveau_oclass *, int, void **);
37void _nouveau_mc_dtor(struct nouveau_object *);
38int _nouveau_mc_init(struct nouveau_object *);
39int _nouveau_mc_fini(struct nouveau_object *, bool);
36 40
37extern struct nouveau_oclass nv04_mc_oclass; 41extern struct nouveau_oclass nv04_mc_oclass;
38extern struct nouveau_oclass nv44_mc_oclass; 42extern struct nouveau_oclass nv44_mc_oclass;
@@ -40,8 +44,6 @@ extern struct nouveau_oclass nv50_mc_oclass;
40extern struct nouveau_oclass nv98_mc_oclass; 44extern struct nouveau_oclass nv98_mc_oclass;
41extern struct nouveau_oclass nvc0_mc_oclass; 45extern struct nouveau_oclass nvc0_mc_oclass;
42 46
43void nouveau_mc_intr(struct nouveau_subdev *);
44
45extern const struct nouveau_mc_intr nv04_mc_intr[]; 47extern const struct nouveau_mc_intr nv04_mc_intr[];
46int nv04_mc_init(struct nouveau_object *); 48int nv04_mc_init(struct nouveau_object *);
47int nv50_mc_init(struct nouveau_object *); 49int nv50_mc_init(struct nouveau_object *);
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/therm.h b/drivers/gpu/drm/nouveau/core/include/subdev/therm.h
index 0b20fc0d19c1..c075998d82e6 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/therm.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/therm.h
@@ -73,6 +73,7 @@ int _nouveau_therm_fini(struct nouveau_object *, bool);
73 73
74extern struct nouveau_oclass nv40_therm_oclass; 74extern struct nouveau_oclass nv40_therm_oclass;
75extern struct nouveau_oclass nv50_therm_oclass; 75extern struct nouveau_oclass nv50_therm_oclass;
76extern struct nouveau_oclass nv84_therm_oclass;
76extern struct nouveau_oclass nva3_therm_oclass; 77extern struct nouveau_oclass nva3_therm_oclass;
77extern struct nouveau_oclass nvd0_therm_oclass; 78extern struct nouveau_oclass nvd0_therm_oclass;
78 79
diff --git a/drivers/gpu/drm/nouveau/core/os.h b/drivers/gpu/drm/nouveau/core/os.h
index eb496033b55c..3bd9be2ab37f 100644
--- a/drivers/gpu/drm/nouveau/core/os.h
+++ b/drivers/gpu/drm/nouveau/core/os.h
@@ -17,6 +17,7 @@
17#include <linux/acpi.h> 17#include <linux/acpi.h>
18#include <linux/dmi.h> 18#include <linux/dmi.h>
19#include <linux/reboot.h> 19#include <linux/reboot.h>
20#include <linux/interrupt.h>
20 21
21#include <asm/unaligned.h> 22#include <asm/unaligned.h>
22 23
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bar/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/bar/nv50.c
index c3acf5b70d9e..649f1ced1fe0 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bar/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bar/nv50.c
@@ -122,18 +122,20 @@ nv50_bar_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
122 if (ret) 122 if (ret)
123 return ret; 123 return ret;
124 124
125 ret = nouveau_gpuobj_new(parent, NULL, 0x20000, 0, NVOBJ_FLAG_HEAP, 125 ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x20000, 0,
126 &priv->mem); 126 NVOBJ_FLAG_HEAP, &priv->mem);
127 heap = nv_object(priv->mem); 127 heap = nv_object(priv->mem);
128 if (ret) 128 if (ret)
129 return ret; 129 return ret;
130 130
131 ret = nouveau_gpuobj_new(parent, heap, (device->chipset == 0x50) ? 131 ret = nouveau_gpuobj_new(nv_object(priv), heap,
132 0x1400 : 0x0200, 0, 0, &priv->pad); 132 (device->chipset == 0x50) ? 0x1400 : 0x0200,
133 0, 0, &priv->pad);
133 if (ret) 134 if (ret)
134 return ret; 135 return ret;
135 136
136 ret = nouveau_gpuobj_new(parent, heap, 0x4000, 0, 0, &priv->pgd); 137 ret = nouveau_gpuobj_new(nv_object(priv), heap, 0x4000, 0,
138 0, &priv->pgd);
137 if (ret) 139 if (ret)
138 return ret; 140 return ret;
139 141
@@ -145,9 +147,9 @@ nv50_bar_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
145 if (ret) 147 if (ret)
146 return ret; 148 return ret;
147 149
148 ret = nouveau_gpuobj_new(parent, heap, ((limit-- - start) >> 12) * 8, 150 ret = nouveau_gpuobj_new(nv_object(priv), heap,
149 0x1000, NVOBJ_FLAG_ZERO_ALLOC, 151 ((limit-- - start) >> 12) * 8, 0x1000,
150 &vm->pgt[0].obj[0]); 152 NVOBJ_FLAG_ZERO_ALLOC, &vm->pgt[0].obj[0]);
151 vm->pgt[0].refcount[0] = 1; 153 vm->pgt[0].refcount[0] = 1;
152 if (ret) 154 if (ret)
153 return ret; 155 return ret;
@@ -157,7 +159,7 @@ nv50_bar_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
157 if (ret) 159 if (ret)
158 return ret; 160 return ret;
159 161
160 ret = nouveau_gpuobj_new(parent, heap, 24, 16, 0, &priv->bar3); 162 ret = nouveau_gpuobj_new(nv_object(priv), heap, 24, 16, 0, &priv->bar3);
161 if (ret) 163 if (ret)
162 return ret; 164 return ret;
163 165
@@ -182,7 +184,7 @@ nv50_bar_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
182 if (ret) 184 if (ret)
183 return ret; 185 return ret;
184 186
185 ret = nouveau_gpuobj_new(parent, heap, 24, 16, 0, &priv->bar1); 187 ret = nouveau_gpuobj_new(nv_object(priv), heap, 24, 16, 0, &priv->bar1);
186 if (ret) 188 if (ret)
187 return ret; 189 return ret;
188 190
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c
index 77a6fb725d3f..f8a44956dec1 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c
@@ -101,12 +101,14 @@ nvc0_bar_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
101 return ret; 101 return ret;
102 102
103 /* BAR3 */ 103 /* BAR3 */
104 ret = nouveau_gpuobj_new(parent, NULL, 0x1000, 0, 0, &priv->bar[0].mem); 104 ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x1000, 0, 0,
105 &priv->bar[0].mem);
105 mem = priv->bar[0].mem; 106 mem = priv->bar[0].mem;
106 if (ret) 107 if (ret)
107 return ret; 108 return ret;
108 109
109 ret = nouveau_gpuobj_new(parent, NULL, 0x8000, 0, 0, &priv->bar[0].pgd); 110 ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x8000, 0, 0,
111 &priv->bar[0].pgd);
110 if (ret) 112 if (ret)
111 return ret; 113 return ret;
112 114
@@ -114,7 +116,7 @@ nvc0_bar_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
114 if (ret) 116 if (ret)
115 return ret; 117 return ret;
116 118
117 ret = nouveau_gpuobj_new(parent, NULL, 119 ret = nouveau_gpuobj_new(nv_object(priv), NULL,
118 (pci_resource_len(pdev, 3) >> 12) * 8, 120 (pci_resource_len(pdev, 3) >> 12) * 8,
119 0x1000, NVOBJ_FLAG_ZERO_ALLOC, 121 0x1000, NVOBJ_FLAG_ZERO_ALLOC,
120 &vm->pgt[0].obj[0]); 122 &vm->pgt[0].obj[0]);
@@ -133,12 +135,14 @@ nvc0_bar_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
133 nv_wo32(mem, 0x020c, upper_32_bits(pci_resource_len(pdev, 3) - 1)); 135 nv_wo32(mem, 0x020c, upper_32_bits(pci_resource_len(pdev, 3) - 1));
134 136
135 /* BAR1 */ 137 /* BAR1 */
136 ret = nouveau_gpuobj_new(parent, NULL, 0x1000, 0, 0, &priv->bar[1].mem); 138 ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x1000, 0, 0,
139 &priv->bar[1].mem);
137 mem = priv->bar[1].mem; 140 mem = priv->bar[1].mem;
138 if (ret) 141 if (ret)
139 return ret; 142 return ret;
140 143
141 ret = nouveau_gpuobj_new(parent, NULL, 0x8000, 0, 0, &priv->bar[1].pgd); 144 ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x8000, 0, 0,
145 &priv->bar[1].pgd);
142 if (ret) 146 if (ret)
143 return ret; 147 return ret;
144 148
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
index e816f06637a7..0e2c1a4f1659 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
@@ -248,6 +248,22 @@ nouveau_bios_shadow_pci(struct nouveau_bios *bios)
248 } 248 }
249} 249}
250 250
251static void
252nouveau_bios_shadow_platform(struct nouveau_bios *bios)
253{
254 struct pci_dev *pdev = nv_device(bios)->pdev;
255 size_t size;
256
257 void __iomem *rom = pci_platform_rom(pdev, &size);
258 if (rom && size) {
259 bios->data = kmalloc(size, GFP_KERNEL);
260 if (bios->data) {
261 memcpy_fromio(bios->data, rom, size);
262 bios->size = size;
263 }
264 }
265}
266
251static int 267static int
252nouveau_bios_score(struct nouveau_bios *bios, const bool writeable) 268nouveau_bios_score(struct nouveau_bios *bios, const bool writeable)
253{ 269{
@@ -288,6 +304,7 @@ nouveau_bios_shadow(struct nouveau_bios *bios)
288 { "PROM", nouveau_bios_shadow_prom, false, 0, 0, NULL }, 304 { "PROM", nouveau_bios_shadow_prom, false, 0, 0, NULL },
289 { "ACPI", nouveau_bios_shadow_acpi, true, 0, 0, NULL }, 305 { "ACPI", nouveau_bios_shadow_acpi, true, 0, 0, NULL },
290 { "PCIROM", nouveau_bios_shadow_pci, true, 0, 0, NULL }, 306 { "PCIROM", nouveau_bios_shadow_pci, true, 0, 0, NULL },
307 { "PLATFORM", nouveau_bios_shadow_platform, true, 0, 0, NULL },
291 {} 308 {}
292 }; 309 };
293 struct methods *mthd, *best; 310 struct methods *mthd, *best;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
index 9c41b58d57e2..c300b5e7b670 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
@@ -64,27 +64,33 @@ init_exec_force(struct nvbios_init *init, bool exec)
64static inline int 64static inline int
65init_or(struct nvbios_init *init) 65init_or(struct nvbios_init *init)
66{ 66{
67 if (init->outp) 67 if (init_exec(init)) {
68 return ffs(init->outp->or) - 1; 68 if (init->outp)
69 error("script needs OR!!\n"); 69 return ffs(init->outp->or) - 1;
70 error("script needs OR!!\n");
71 }
70 return 0; 72 return 0;
71} 73}
72 74
73static inline int 75static inline int
74init_link(struct nvbios_init *init) 76init_link(struct nvbios_init *init)
75{ 77{
76 if (init->outp) 78 if (init_exec(init)) {
77 return !(init->outp->sorconf.link & 1); 79 if (init->outp)
78 error("script needs OR link\n"); 80 return !(init->outp->sorconf.link & 1);
81 error("script needs OR link\n");
82 }
79 return 0; 83 return 0;
80} 84}
81 85
82static inline int 86static inline int
83init_crtc(struct nvbios_init *init) 87init_crtc(struct nvbios_init *init)
84{ 88{
85 if (init->crtc >= 0) 89 if (init_exec(init)) {
86 return init->crtc; 90 if (init->crtc >= 0)
87 error("script needs crtc\n"); 91 return init->crtc;
92 error("script needs crtc\n");
93 }
88 return 0; 94 return 0;
89} 95}
90 96
@@ -92,16 +98,21 @@ static u8
92init_conn(struct nvbios_init *init) 98init_conn(struct nvbios_init *init)
93{ 99{
94 struct nouveau_bios *bios = init->bios; 100 struct nouveau_bios *bios = init->bios;
101 u8 ver, len;
102 u16 conn;
95 103
96 if (init->outp) { 104 if (init_exec(init)) {
97 u8 ver, len; 105 if (init->outp) {
98 u16 conn = dcb_conn(bios, init->outp->connector, &ver, &len); 106 conn = init->outp->connector;
99 if (conn) 107 conn = dcb_conn(bios, conn, &ver, &len);
100 return nv_ro08(bios, conn); 108 if (conn)
109 return nv_ro08(bios, conn);
110 }
111
112 error("script needs connector type\n");
101 } 113 }
102 114
103 error("script needs connector type\n"); 115 return 0xff;
104 return 0x00;
105} 116}
106 117
107static inline u32 118static inline u32
@@ -227,7 +238,8 @@ init_i2c(struct nvbios_init *init, int index)
227 } else 238 } else
228 if (index < 0) { 239 if (index < 0) {
229 if (!init->outp) { 240 if (!init->outp) {
230 error("script needs output for i2c\n"); 241 if (init_exec(init))
242 error("script needs output for i2c\n");
231 return NULL; 243 return NULL;
232 } 244 }
233 245
@@ -544,7 +556,8 @@ init_tmds_reg(struct nvbios_init *init, u8 tmds)
544 return 0x6808b0 + dacoffset; 556 return 0x6808b0 + dacoffset;
545 } 557 }
546 558
547 error("tmds opcodes need dcb\n"); 559 if (init_exec(init))
560 error("tmds opcodes need dcb\n");
548 } else { 561 } else {
549 if (tmds < ARRAY_SIZE(pramdac_table)) 562 if (tmds < ARRAY_SIZE(pramdac_table))
550 return pramdac_table[tmds]; 563 return pramdac_table[tmds];
@@ -792,7 +805,8 @@ init_dp_condition(struct nvbios_init *init)
792 break; 805 break;
793 } 806 }
794 807
795 warn("script needs dp output table data\n"); 808 if (init_exec(init))
809 warn("script needs dp output table data\n");
796 break; 810 break;
797 case 5: 811 case 5:
798 if (!(init_rdauxr(init, 0x0d) & 1)) 812 if (!(init_rdauxr(init, 0x0d) & 1))
@@ -816,7 +830,7 @@ init_io_mask_or(struct nvbios_init *init)
816 u8 or = init_or(init); 830 u8 or = init_or(init);
817 u8 data; 831 u8 data;
818 832
819 trace("IO_MASK_OR\t0x03d4[0x%02x] &= ~(1 << 0x%02x)", index, or); 833 trace("IO_MASK_OR\t0x03d4[0x%02x] &= ~(1 << 0x%02x)\n", index, or);
820 init->offset += 2; 834 init->offset += 2;
821 835
822 data = init_rdvgai(init, 0x03d4, index); 836 data = init_rdvgai(init, 0x03d4, index);
@@ -835,7 +849,7 @@ init_io_or(struct nvbios_init *init)
835 u8 or = init_or(init); 849 u8 or = init_or(init);
836 u8 data; 850 u8 data;
837 851
838 trace("IO_OR\t0x03d4[0x%02x] |= (1 << 0x%02x)", index, or); 852 trace("IO_OR\t0x03d4[0x%02x] |= (1 << 0x%02x)\n", index, or);
839 init->offset += 2; 853 init->offset += 2;
840 854
841 data = init_rdvgai(init, 0x03d4, index); 855 data = init_rdvgai(init, 0x03d4, index);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
index 7606ed15b6fa..86ad59203c8b 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
@@ -23,6 +23,7 @@
23 */ 23 */
24 24
25#include <subdev/fb.h> 25#include <subdev/fb.h>
26#include <subdev/ltcg.h>
26#include <subdev/bios.h> 27#include <subdev/bios.h>
27 28
28struct nvc0_fb_priv { 29struct nvc0_fb_priv {
@@ -31,34 +32,14 @@ struct nvc0_fb_priv {
31 dma_addr_t r100c10; 32 dma_addr_t r100c10;
32}; 33};
33 34
34/* 0 = unsupported 35extern const u8 nvc0_pte_storage_type_map[256];
35 * 1 = non-compressed 36
36 * 3 = compressed
37 */
38static const u8 types[256] = {
39 1, 1, 3, 3, 3, 3, 0, 3, 3, 3, 3, 0, 0, 0, 0, 0,
40 0, 1, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, 0,
41 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0,
42 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 3,
43 3, 3, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0,
44 0, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
45 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
46 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0,
47 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 1, 1, 1, 1, 0,
48 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
49 0, 0, 0, 3, 3, 3, 3, 1, 1, 1, 1, 0, 0, 0, 0, 0,
50 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3,
51 3, 3, 3, 1, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3,
52 3, 3, 0, 0, 0, 0, 0, 0, 3, 0, 0, 3, 0, 3, 0, 3,
53 3, 0, 3, 3, 3, 3, 3, 0, 0, 3, 0, 3, 0, 3, 3, 0,
54 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 1, 1, 0
55};
56 37
57static bool 38static bool
58nvc0_fb_memtype_valid(struct nouveau_fb *pfb, u32 tile_flags) 39nvc0_fb_memtype_valid(struct nouveau_fb *pfb, u32 tile_flags)
59{ 40{
60 u8 memtype = (tile_flags & 0x0000ff00) >> 8; 41 u8 memtype = (tile_flags & 0x0000ff00) >> 8;
61 return likely((types[memtype] == 1)); 42 return likely((nvc0_pte_storage_type_map[memtype] != 0xff));
62} 43}
63 44
64static int 45static int
@@ -130,6 +111,7 @@ nvc0_fb_vram_new(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin,
130 int type = (memtype & 0x0ff); 111 int type = (memtype & 0x0ff);
131 int back = (memtype & 0x800); 112 int back = (memtype & 0x800);
132 int ret; 113 int ret;
114 const bool comp = nvc0_pte_storage_type_map[type] != type;
133 115
134 size >>= 12; 116 size >>= 12;
135 align >>= 12; 117 align >>= 12;
@@ -142,10 +124,22 @@ nvc0_fb_vram_new(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin,
142 return -ENOMEM; 124 return -ENOMEM;
143 125
144 INIT_LIST_HEAD(&mem->regions); 126 INIT_LIST_HEAD(&mem->regions);
145 mem->memtype = type;
146 mem->size = size; 127 mem->size = size;
147 128
148 mutex_lock(&pfb->base.mutex); 129 mutex_lock(&pfb->base.mutex);
130 if (comp) {
131 struct nouveau_ltcg *ltcg = nouveau_ltcg(pfb->base.base.parent);
132
133 /* compression only works with lpages */
134 if (align == (1 << (17 - 12))) {
135 int n = size >> 5;
136 ltcg->tags_alloc(ltcg, n, &mem->tag);
137 }
138 if (unlikely(!mem->tag))
139 type = nvc0_pte_storage_type_map[type];
140 }
141 mem->memtype = type;
142
149 do { 143 do {
150 if (back) 144 if (back)
151 ret = nouveau_mm_tail(mm, 1, size, ncmin, align, &r); 145 ret = nouveau_mm_tail(mm, 1, size, ncmin, align, &r);
@@ -168,6 +162,17 @@ nvc0_fb_vram_new(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin,
168 return 0; 162 return 0;
169} 163}
170 164
165static void
166nvc0_fb_vram_del(struct nouveau_fb *pfb, struct nouveau_mem **pmem)
167{
168 struct nouveau_ltcg *ltcg = nouveau_ltcg(pfb->base.base.parent);
169
170 if ((*pmem)->tag)
171 ltcg->tags_free(ltcg, &(*pmem)->tag);
172
173 nv50_fb_vram_del(pfb, pmem);
174}
175
171static int 176static int
172nvc0_fb_init(struct nouveau_object *object) 177nvc0_fb_init(struct nouveau_object *object)
173{ 178{
@@ -178,7 +183,8 @@ nvc0_fb_init(struct nouveau_object *object)
178 if (ret) 183 if (ret)
179 return ret; 184 return ret;
180 185
181 nv_wr32(priv, 0x100c10, priv->r100c10 >> 8); 186 if (priv->r100c10_page)
187 nv_wr32(priv, 0x100c10, priv->r100c10 >> 8);
182 return 0; 188 return 0;
183} 189}
184 190
@@ -214,16 +220,16 @@ nvc0_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
214 priv->base.memtype_valid = nvc0_fb_memtype_valid; 220 priv->base.memtype_valid = nvc0_fb_memtype_valid;
215 priv->base.ram.init = nvc0_fb_vram_init; 221 priv->base.ram.init = nvc0_fb_vram_init;
216 priv->base.ram.get = nvc0_fb_vram_new; 222 priv->base.ram.get = nvc0_fb_vram_new;
217 priv->base.ram.put = nv50_fb_vram_del; 223 priv->base.ram.put = nvc0_fb_vram_del;
218 224
219 priv->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO); 225 priv->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
220 if (!priv->r100c10_page) 226 if (priv->r100c10_page) {
221 return -ENOMEM; 227 priv->r100c10 = pci_map_page(device->pdev, priv->r100c10_page,
222 228 0, PAGE_SIZE,
223 priv->r100c10 = pci_map_page(device->pdev, priv->r100c10_page, 0, 229 PCI_DMA_BIDIRECTIONAL);
224 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 230 if (pci_dma_mapping_error(device->pdev, priv->r100c10))
225 if (pci_dma_mapping_error(device->pdev, priv->r100c10)) 231 return -EFAULT;
226 return -EFAULT; 232 }
227 233
228 return nouveau_fb_preinit(&priv->base); 234 return nouveau_fb_preinit(&priv->base);
229} 235}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
index 2e98e8a3f1aa..8ae2625415e1 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
@@ -140,12 +140,8 @@ nouveau_i2c_port_create_(struct nouveau_object *parent,
140 } 140 }
141 141
142 /* drop port's i2c subdev refcount, i2c handles this itself */ 142 /* drop port's i2c subdev refcount, i2c handles this itself */
143 if (ret == 0) { 143 if (ret == 0)
144 list_add_tail(&port->head, &i2c->ports); 144 list_add_tail(&port->head, &i2c->ports);
145 atomic_dec(&parent->refcount);
146 atomic_dec(&engine->refcount);
147 }
148
149 return ret; 145 return ret;
150} 146}
151 147
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c
index f5bbd3834116..795393d7b2f5 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c
@@ -93,7 +93,6 @@ nv04_instmem_alloc(struct nouveau_instmem *imem, struct nouveau_object *parent,
93 u32 size, u32 align, struct nouveau_object **pobject) 93 u32 size, u32 align, struct nouveau_object **pobject)
94{ 94{
95 struct nouveau_object *engine = nv_object(imem); 95 struct nouveau_object *engine = nv_object(imem);
96 struct nv04_instmem_priv *priv = (void *)(imem);
97 int ret; 96 int ret;
98 97
99 ret = nouveau_object_ctor(parent, engine, &nv04_instobj_oclass, 98 ret = nouveau_object_ctor(parent, engine, &nv04_instobj_oclass,
@@ -101,14 +100,6 @@ nv04_instmem_alloc(struct nouveau_instmem *imem, struct nouveau_object *parent,
101 if (ret) 100 if (ret)
102 return ret; 101 return ret;
103 102
104 /* INSTMEM itself creates objects to reserve (and preserve across
105 * suspend/resume) various fixed data locations, each one of these
106 * takes a reference on INSTMEM itself, causing it to never be
107 * freed. We drop all the self-references here to avoid this.
108 */
109 if (unlikely(!priv->created))
110 atomic_dec(&engine->refcount);
111
112 return 0; 103 return 0;
113} 104}
114 105
@@ -134,27 +125,28 @@ nv04_instmem_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
134 return ret; 125 return ret;
135 126
136 /* 0x00000-0x10000: reserve for probable vbios image */ 127 /* 0x00000-0x10000: reserve for probable vbios image */
137 ret = nouveau_gpuobj_new(parent, NULL, 0x10000, 0, 0, &priv->vbios); 128 ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x10000, 0, 0,
129 &priv->vbios);
138 if (ret) 130 if (ret)
139 return ret; 131 return ret;
140 132
141 /* 0x10000-0x18000: reserve for RAMHT */ 133 /* 0x10000-0x18000: reserve for RAMHT */
142 ret = nouveau_ramht_new(parent, NULL, 0x08000, 0, &priv->ramht); 134 ret = nouveau_ramht_new(nv_object(priv), NULL, 0x08000, 0, &priv->ramht);
143 if (ret) 135 if (ret)
144 return ret; 136 return ret;
145 137
146 /* 0x18000-0x18800: reserve for RAMFC (enough for 32 nv30 channels) */ 138 /* 0x18000-0x18800: reserve for RAMFC (enough for 32 nv30 channels) */
147 ret = nouveau_gpuobj_new(parent, NULL, 0x00800, 0, 139 ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x00800, 0,
148 NVOBJ_FLAG_ZERO_ALLOC, &priv->ramfc); 140 NVOBJ_FLAG_ZERO_ALLOC, &priv->ramfc);
149 if (ret) 141 if (ret)
150 return ret; 142 return ret;
151 143
152 /* 0x18800-0x18a00: reserve for RAMRO */ 144 /* 0x18800-0x18a00: reserve for RAMRO */
153 ret = nouveau_gpuobj_new(parent, NULL, 0x00200, 0, 0, &priv->ramro); 145 ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x00200, 0, 0,
146 &priv->ramro);
154 if (ret) 147 if (ret)
155 return ret; 148 return ret;
156 149
157 priv->created = true;
158 return 0; 150 return 0;
159} 151}
160 152
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.h b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.h
index 7983d8d9b358..b15b61310236 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.h
@@ -9,7 +9,6 @@
9 9
10struct nv04_instmem_priv { 10struct nv04_instmem_priv {
11 struct nouveau_instmem base; 11 struct nouveau_instmem base;
12 bool created;
13 12
14 void __iomem *iomem; 13 void __iomem *iomem;
15 struct nouveau_mm heap; 14 struct nouveau_mm heap;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c
index da64253201ef..716bf41bc3c1 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c
@@ -82,31 +82,33 @@ nv40_instmem_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
82 return ret; 82 return ret;
83 83
84 /* 0x00000-0x10000: reserve for probable vbios image */ 84 /* 0x00000-0x10000: reserve for probable vbios image */
85 ret = nouveau_gpuobj_new(parent, NULL, 0x10000, 0, 0, &priv->vbios); 85 ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x10000, 0, 0,
86 &priv->vbios);
86 if (ret) 87 if (ret)
87 return ret; 88 return ret;
88 89
89 /* 0x10000-0x18000: reserve for RAMHT */ 90 /* 0x10000-0x18000: reserve for RAMHT */
90 ret = nouveau_ramht_new(parent, NULL, 0x08000, 0, &priv->ramht); 91 ret = nouveau_ramht_new(nv_object(priv), NULL, 0x08000, 0,
92 &priv->ramht);
91 if (ret) 93 if (ret)
92 return ret; 94 return ret;
93 95
94 /* 0x18000-0x18200: reserve for RAMRO 96 /* 0x18000-0x18200: reserve for RAMRO
95 * 0x18200-0x20000: padding 97 * 0x18200-0x20000: padding
96 */ 98 */
97 ret = nouveau_gpuobj_new(parent, NULL, 0x08000, 0, 0, &priv->ramro); 99 ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x08000, 0, 0,
100 &priv->ramro);
98 if (ret) 101 if (ret)
99 return ret; 102 return ret;
100 103
101 /* 0x20000-0x21000: reserve for RAMFC 104 /* 0x20000-0x21000: reserve for RAMFC
102 * 0x21000-0x40000: padding and some unknown crap 105 * 0x21000-0x40000: padding and some unknown crap
103 */ 106 */
104 ret = nouveau_gpuobj_new(parent, NULL, 0x20000, 0, 107 ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x20000, 0,
105 NVOBJ_FLAG_ZERO_ALLOC, &priv->ramfc); 108 NVOBJ_FLAG_ZERO_ALLOC, &priv->ramfc);
106 if (ret) 109 if (ret)
107 return ret; 110 return ret;
108 111
109 priv->created = true;
110 return 0; 112 return 0;
111} 113}
112 114
diff --git a/drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c
index 078a2b9d6bd6..e4940fb166e8 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c
@@ -23,10 +23,17 @@
23 */ 23 */
24 24
25#include <subdev/ltcg.h> 25#include <subdev/ltcg.h>
26#include <subdev/fb.h>
27#include <subdev/timer.h>
26 28
27struct nvc0_ltcg_priv { 29struct nvc0_ltcg_priv {
28 struct nouveau_ltcg base; 30 struct nouveau_ltcg base;
31 u32 part_nr;
32 u32 part_mask;
29 u32 subp_nr; 33 u32 subp_nr;
34 struct nouveau_mm tags;
35 u32 num_tags;
36 struct nouveau_mm_node *tag_ram;
30}; 37};
31 38
32static void 39static void
@@ -62,11 +69,104 @@ nvc0_ltcg_intr(struct nouveau_subdev *subdev)
62} 69}
63 70
64static int 71static int
72nvc0_ltcg_tags_alloc(struct nouveau_ltcg *ltcg, u32 n,
73 struct nouveau_mm_node **pnode)
74{
75 struct nvc0_ltcg_priv *priv = (struct nvc0_ltcg_priv *)ltcg;
76 int ret;
77
78 ret = nouveau_mm_head(&priv->tags, 1, n, n, 1, pnode);
79 if (ret)
80 *pnode = NULL;
81
82 return ret;
83}
84
85static void
86nvc0_ltcg_tags_free(struct nouveau_ltcg *ltcg, struct nouveau_mm_node **pnode)
87{
88 struct nvc0_ltcg_priv *priv = (struct nvc0_ltcg_priv *)ltcg;
89
90 nouveau_mm_free(&priv->tags, pnode);
91}
92
93static void
94nvc0_ltcg_tags_clear(struct nouveau_ltcg *ltcg, u32 first, u32 count)
95{
96 struct nvc0_ltcg_priv *priv = (struct nvc0_ltcg_priv *)ltcg;
97 u32 last = first + count - 1;
98 int p, i;
99
100 BUG_ON((first > last) || (last >= priv->num_tags));
101
102 nv_wr32(priv, 0x17e8cc, first);
103 nv_wr32(priv, 0x17e8d0, last);
104 nv_wr32(priv, 0x17e8c8, 0x4); /* trigger clear */
105
106 /* wait until it's finished with clearing */
107 for (p = 0; p < priv->part_nr; ++p) {
108 if (!(priv->part_mask & (1 << p)))
109 continue;
110 for (i = 0; i < priv->subp_nr; ++i)
111 nv_wait(priv, 0x1410c8 + p * 0x2000 + i * 0x400, ~0, 0);
112 }
113}
114
115/* TODO: Figure out tag memory details and drop the over-cautious allocation.
116 */
117static int
118nvc0_ltcg_init_tag_ram(struct nouveau_fb *pfb, struct nvc0_ltcg_priv *priv)
119{
120 u32 tag_size, tag_margin, tag_align;
121 int ret;
122
123 nv_wr32(priv, 0x17e8d8, priv->part_nr);
124
125 /* tags for 1/4 of VRAM should be enough (8192/4 per GiB of VRAM) */
126 priv->num_tags = (pfb->ram.size >> 17) / 4;
127 if (priv->num_tags > (1 << 17))
128 priv->num_tags = 1 << 17; /* we have 17 bits in PTE */
129 priv->num_tags = (priv->num_tags + 63) & ~63; /* round up to 64 */
130
131 tag_align = priv->part_nr * 0x800;
132 tag_margin = (tag_align < 0x6000) ? 0x6000 : tag_align;
133
134 /* 4 part 4 sub: 0x2000 bytes for 56 tags */
135 /* 3 part 4 sub: 0x6000 bytes for 168 tags */
136 /*
137 * About 147 bytes per tag. Let's be safe and allocate x2, which makes
138 * 0x4980 bytes for 64 tags, and round up to 0x6000 bytes for 64 tags.
139 *
140 * For 4 GiB of memory we'll have 8192 tags which makes 3 MiB, < 0.1 %.
141 */
142 tag_size = (priv->num_tags / 64) * 0x6000 + tag_margin;
143 tag_size += tag_align;
144 tag_size = (tag_size + 0xfff) >> 12; /* round up */
145
146 ret = nouveau_mm_tail(&pfb->vram, 0, tag_size, tag_size, 1,
147 &priv->tag_ram);
148 if (ret) {
149 priv->num_tags = 0;
150 } else {
151 u64 tag_base = (priv->tag_ram->offset << 12) + tag_margin;
152
153 tag_base += tag_align - 1;
154 ret = do_div(tag_base, tag_align);
155
156 nv_wr32(priv, 0x17e8d4, tag_base);
157 }
158 ret = nouveau_mm_init(&priv->tags, 0, priv->num_tags, 1);
159
160 return ret;
161}
162
163static int
65nvc0_ltcg_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 164nvc0_ltcg_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
66 struct nouveau_oclass *oclass, void *data, u32 size, 165 struct nouveau_oclass *oclass, void *data, u32 size,
67 struct nouveau_object **pobject) 166 struct nouveau_object **pobject)
68{ 167{
69 struct nvc0_ltcg_priv *priv; 168 struct nvc0_ltcg_priv *priv;
169 struct nouveau_fb *pfb = nouveau_fb(parent);
70 int ret; 170 int ret;
71 171
72 ret = nouveau_ltcg_create(parent, engine, oclass, &priv); 172 ret = nouveau_ltcg_create(parent, engine, oclass, &priv);
@@ -74,19 +174,44 @@ nvc0_ltcg_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
74 if (ret) 174 if (ret)
75 return ret; 175 return ret;
76 176
77 priv->subp_nr = nv_rd32(priv, 0x17e8dc) >> 24; 177 priv->part_nr = nv_rd32(priv, 0x022438);
178 priv->part_mask = nv_rd32(priv, 0x022554);
179
180 priv->subp_nr = nv_rd32(priv, 0x17e8dc) >> 28;
181
78 nv_mask(priv, 0x17e820, 0x00100000, 0x00000000); /* INTR_EN &= ~0x10 */ 182 nv_mask(priv, 0x17e820, 0x00100000, 0x00000000); /* INTR_EN &= ~0x10 */
79 183
184 ret = nvc0_ltcg_init_tag_ram(pfb, priv);
185 if (ret)
186 return ret;
187
188 priv->base.tags_alloc = nvc0_ltcg_tags_alloc;
189 priv->base.tags_free = nvc0_ltcg_tags_free;
190 priv->base.tags_clear = nvc0_ltcg_tags_clear;
191
80 nv_subdev(priv)->intr = nvc0_ltcg_intr; 192 nv_subdev(priv)->intr = nvc0_ltcg_intr;
81 return 0; 193 return 0;
82} 194}
83 195
196static void
197nvc0_ltcg_dtor(struct nouveau_object *object)
198{
199 struct nouveau_ltcg *ltcg = (struct nouveau_ltcg *)object;
200 struct nvc0_ltcg_priv *priv = (struct nvc0_ltcg_priv *)ltcg;
201 struct nouveau_fb *pfb = nouveau_fb(ltcg->base.base.parent);
202
203 nouveau_mm_fini(&priv->tags);
204 nouveau_mm_free(&pfb->vram, &priv->tag_ram);
205
206 nouveau_ltcg_destroy(ltcg);
207}
208
84struct nouveau_oclass 209struct nouveau_oclass
85nvc0_ltcg_oclass = { 210nvc0_ltcg_oclass = {
86 .handle = NV_SUBDEV(LTCG, 0xc0), 211 .handle = NV_SUBDEV(LTCG, 0xc0),
87 .ofuncs = &(struct nouveau_ofuncs) { 212 .ofuncs = &(struct nouveau_ofuncs) {
88 .ctor = nvc0_ltcg_ctor, 213 .ctor = nvc0_ltcg_ctor,
89 .dtor = _nouveau_ltcg_dtor, 214 .dtor = nvc0_ltcg_dtor,
90 .init = _nouveau_ltcg_init, 215 .init = _nouveau_ltcg_init,
91 .fini = _nouveau_ltcg_fini, 216 .fini = _nouveau_ltcg_fini,
92 }, 217 },
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
index 8379aafa6e1b..1c0330b8c9a4 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
@@ -24,10 +24,10 @@
24 24
25#include <subdev/mc.h> 25#include <subdev/mc.h>
26 26
27void 27static irqreturn_t
28nouveau_mc_intr(struct nouveau_subdev *subdev) 28nouveau_mc_intr(int irq, void *arg)
29{ 29{
30 struct nouveau_mc *pmc = nouveau_mc(subdev); 30 struct nouveau_mc *pmc = arg;
31 const struct nouveau_mc_intr *map = pmc->intr_map; 31 const struct nouveau_mc_intr *map = pmc->intr_map;
32 struct nouveau_subdev *unit; 32 struct nouveau_subdev *unit;
33 u32 stat, intr; 33 u32 stat, intr;
@@ -35,7 +35,7 @@ nouveau_mc_intr(struct nouveau_subdev *subdev)
35 intr = stat = nv_rd32(pmc, 0x000100); 35 intr = stat = nv_rd32(pmc, 0x000100);
36 while (stat && map->stat) { 36 while (stat && map->stat) {
37 if (stat & map->stat) { 37 if (stat & map->stat) {
38 unit = nouveau_subdev(subdev, map->unit); 38 unit = nouveau_subdev(pmc, map->unit);
39 if (unit && unit->intr) 39 if (unit && unit->intr)
40 unit->intr(unit); 40 unit->intr(unit);
41 intr &= ~map->stat; 41 intr &= ~map->stat;
@@ -46,4 +46,56 @@ nouveau_mc_intr(struct nouveau_subdev *subdev)
46 if (intr) { 46 if (intr) {
47 nv_error(pmc, "unknown intr 0x%08x\n", stat); 47 nv_error(pmc, "unknown intr 0x%08x\n", stat);
48 } 48 }
49
50 return stat ? IRQ_HANDLED : IRQ_NONE;
51}
52
53int
54_nouveau_mc_fini(struct nouveau_object *object, bool suspend)
55{
56 struct nouveau_mc *pmc = (void *)object;
57 nv_wr32(pmc, 0x000140, 0x00000000);
58 return nouveau_subdev_fini(&pmc->base, suspend);
59}
60
61int
62_nouveau_mc_init(struct nouveau_object *object)
63{
64 struct nouveau_mc *pmc = (void *)object;
65 int ret = nouveau_subdev_init(&pmc->base);
66 if (ret)
67 return ret;
68 nv_wr32(pmc, 0x000140, 0x00000001);
69 return 0;
70}
71
72void
73_nouveau_mc_dtor(struct nouveau_object *object)
74{
75 struct nouveau_device *device = nv_device(object);
76 struct nouveau_mc *pmc = (void *)object;
77 free_irq(device->pdev->irq, pmc);
78 nouveau_subdev_destroy(&pmc->base);
79}
80
81int
82nouveau_mc_create_(struct nouveau_object *parent, struct nouveau_object *engine,
83 struct nouveau_oclass *oclass, int length, void **pobject)
84{
85 struct nouveau_device *device = nv_device(parent);
86 struct nouveau_mc *pmc;
87 int ret;
88
89 ret = nouveau_subdev_create_(parent, engine, oclass, 0, "PMC",
90 "master", length, pobject);
91 pmc = *pobject;
92 if (ret)
93 return ret;
94
95 ret = request_irq(device->pdev->irq, nouveau_mc_intr,
96 IRQF_SHARED, "nouveau", pmc);
97 if (ret < 0)
98 return ret;
99
100 return 0;
49} 101}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c
index 89da8fa7ea0f..8c769715227b 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c
@@ -55,7 +55,6 @@ nv04_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
55 if (ret) 55 if (ret)
56 return ret; 56 return ret;
57 57
58 nv_subdev(priv)->intr = nouveau_mc_intr;
59 priv->base.intr_map = nv04_mc_intr; 58 priv->base.intr_map = nv04_mc_intr;
60 return 0; 59 return 0;
61} 60}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c
index 397d868359ad..51919371810f 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c
@@ -41,7 +41,6 @@ nv44_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
41 if (ret) 41 if (ret)
42 return ret; 42 return ret;
43 43
44 nv_subdev(priv)->intr = nouveau_mc_intr;
45 priv->base.intr_map = nv04_mc_intr; 44 priv->base.intr_map = nv04_mc_intr;
46 return 0; 45 return 0;
47} 46}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
index 5965add6daee..d796924f9930 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
@@ -57,7 +57,6 @@ nv50_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
57 if (ret) 57 if (ret)
58 return ret; 58 return ret;
59 59
60 nv_subdev(priv)->intr = nouveau_mc_intr;
61 priv->base.intr_map = nv50_mc_intr; 60 priv->base.intr_map = nv50_mc_intr;
62 return 0; 61 return 0;
63} 62}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
index 3a80b29dce0f..e82fd21b5041 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
@@ -59,7 +59,6 @@ nv98_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
59 if (ret) 59 if (ret)
60 return ret; 60 return ret;
61 61
62 nv_subdev(priv)->intr = nouveau_mc_intr;
63 priv->base.intr_map = nv98_mc_intr; 62 priv->base.intr_map = nv98_mc_intr;
64 return 0; 63 return 0;
65} 64}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
index 42bbf72023a8..737bd4b682e1 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
@@ -61,7 +61,6 @@ nvc0_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
61 if (ret) 61 if (ret)
62 return ret; 62 return ret;
63 63
64 nv_subdev(priv)->intr = nouveau_mc_intr;
65 priv->base.intr_map = nvc0_mc_intr; 64 priv->base.intr_map = nvc0_mc_intr;
66 return 0; 65 return 0;
67} 66}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/therm/nv40.c
index a70d1b7e397b..002e51b3af93 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/nv40.c
@@ -165,7 +165,7 @@ nv40_fan_pwm_set(struct nouveau_therm *therm, int line, u32 divs, u32 duty)
165 return 0; 165 return 0;
166} 166}
167 167
168static void 168void
169nv40_therm_intr(struct nouveau_subdev *subdev) 169nv40_therm_intr(struct nouveau_subdev *subdev)
170{ 170{
171 struct nouveau_therm *therm = nouveau_therm(subdev); 171 struct nouveau_therm *therm = nouveau_therm(subdev);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/therm/nv50.c
index 86632cbd65ce..8cf7597a2182 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/nv50.c
@@ -118,145 +118,36 @@ nv50_fan_pwm_clock(struct nouveau_therm *therm)
118 return pwm_clock; 118 return pwm_clock;
119} 119}
120 120
121int
122nv50_temp_get(struct nouveau_therm *therm)
123{
124 return nv_rd32(therm, 0x20400);
125}
126
127static void
128nv50_therm_program_alarms(struct nouveau_therm *therm)
129{
130 struct nouveau_therm_priv *priv = (void *)therm;
131 struct nvbios_therm_sensor *sensor = &priv->bios_sensor;
132 unsigned long flags;
133
134 spin_lock_irqsave(&priv->sensor.alarm_program_lock, flags);
135
136 /* enable RISING and FALLING IRQs for shutdown, THRS 0, 1, 2 and 4 */
137 nv_wr32(therm, 0x20000, 0x000003ff);
138
139 /* shutdown: The computer should be shutdown when reached */
140 nv_wr32(therm, 0x20484, sensor->thrs_shutdown.hysteresis);
141 nv_wr32(therm, 0x20480, sensor->thrs_shutdown.temp);
142
143 /* THRS_1 : fan boost*/
144 nv_wr32(therm, 0x204c4, sensor->thrs_fan_boost.temp);
145
146 /* THRS_2 : critical */
147 nv_wr32(therm, 0x204c0, sensor->thrs_critical.temp);
148
149 /* THRS_4 : down clock */
150 nv_wr32(therm, 0x20414, sensor->thrs_down_clock.temp);
151 spin_unlock_irqrestore(&priv->sensor.alarm_program_lock, flags);
152
153 nv_info(therm,
154 "Programmed thresholds [ %d(%d), %d(%d), %d(%d), %d(%d) ]\n",
155 sensor->thrs_fan_boost.temp, sensor->thrs_fan_boost.hysteresis,
156 sensor->thrs_down_clock.temp,
157 sensor->thrs_down_clock.hysteresis,
158 sensor->thrs_critical.temp, sensor->thrs_critical.hysteresis,
159 sensor->thrs_shutdown.temp, sensor->thrs_shutdown.hysteresis);
160
161}
162
163/* must be called with alarm_program_lock taken ! */
164static void 121static void
165nv50_therm_threshold_hyst_emulation(struct nouveau_therm *therm, 122nv50_sensor_setup(struct nouveau_therm *therm)
166 uint32_t thrs_reg, u8 status_bit,
167 const struct nvbios_therm_threshold *thrs,
168 enum nouveau_therm_thrs thrs_name)
169{ 123{
170 enum nouveau_therm_thrs_direction direction; 124 nv_mask(therm, 0x20010, 0x40000000, 0x0);
171 enum nouveau_therm_thrs_state prev_state, new_state; 125 mdelay(20); /* wait for the temperature to stabilize */
172 int temp, cur;
173
174 prev_state = nouveau_therm_sensor_get_threshold_state(therm, thrs_name);
175 temp = nv_rd32(therm, thrs_reg);
176
177 /* program the next threshold */
178 if (temp == thrs->temp) {
179 nv_wr32(therm, thrs_reg, thrs->temp - thrs->hysteresis);
180 new_state = NOUVEAU_THERM_THRS_HIGHER;
181 } else {
182 nv_wr32(therm, thrs_reg, thrs->temp);
183 new_state = NOUVEAU_THERM_THRS_LOWER;
184 }
185
186 /* fix the state (in case someone reprogrammed the alarms) */
187 cur = therm->temp_get(therm);
188 if (new_state == NOUVEAU_THERM_THRS_LOWER && cur > thrs->temp)
189 new_state = NOUVEAU_THERM_THRS_HIGHER;
190 else if (new_state == NOUVEAU_THERM_THRS_HIGHER &&
191 cur < thrs->temp - thrs->hysteresis)
192 new_state = NOUVEAU_THERM_THRS_LOWER;
193 nouveau_therm_sensor_set_threshold_state(therm, thrs_name, new_state);
194
195 /* find the direction */
196 if (prev_state < new_state)
197 direction = NOUVEAU_THERM_THRS_RISING;
198 else if (prev_state > new_state)
199 direction = NOUVEAU_THERM_THRS_FALLING;
200 else
201 return;
202
203 /* advertise a change in direction */
204 nouveau_therm_sensor_event(therm, thrs_name, direction);
205} 126}
206 127
207static void 128static int
208nv50_therm_intr(struct nouveau_subdev *subdev) 129nv50_temp_get(struct nouveau_therm *therm)
209{ 130{
210 struct nouveau_therm *therm = nouveau_therm(subdev);
211 struct nouveau_therm_priv *priv = (void *)therm; 131 struct nouveau_therm_priv *priv = (void *)therm;
212 struct nvbios_therm_sensor *sensor = &priv->bios_sensor; 132 struct nvbios_therm_sensor *sensor = &priv->bios_sensor;
213 unsigned long flags; 133 int core_temp;
214 uint32_t intr;
215
216 spin_lock_irqsave(&priv->sensor.alarm_program_lock, flags);
217
218 intr = nv_rd32(therm, 0x20100);
219
220 /* THRS_4: downclock */
221 if (intr & 0x002) {
222 nv50_therm_threshold_hyst_emulation(therm, 0x20414, 24,
223 &sensor->thrs_down_clock,
224 NOUVEAU_THERM_THRS_DOWNCLOCK);
225 intr &= ~0x002;
226 }
227 134
228 /* shutdown */ 135 core_temp = nv_rd32(therm, 0x20014) & 0x3fff;
229 if (intr & 0x004) {
230 nv50_therm_threshold_hyst_emulation(therm, 0x20480, 20,
231 &sensor->thrs_shutdown,
232 NOUVEAU_THERM_THRS_SHUTDOWN);
233 intr &= ~0x004;
234 }
235
236 /* THRS_1 : fan boost */
237 if (intr & 0x008) {
238 nv50_therm_threshold_hyst_emulation(therm, 0x204c4, 21,
239 &sensor->thrs_fan_boost,
240 NOUVEAU_THERM_THRS_FANBOOST);
241 intr &= ~0x008;
242 }
243 136
244 /* THRS_2 : critical */ 137 /* if the slope or the offset is unset, do no use the sensor */
245 if (intr & 0x010) { 138 if (!sensor->slope_div || !sensor->slope_mult ||
246 nv50_therm_threshold_hyst_emulation(therm, 0x204c0, 22, 139 !sensor->offset_num || !sensor->offset_den)
247 &sensor->thrs_critical, 140 return -ENODEV;
248 NOUVEAU_THERM_THRS_CRITICAL);
249 intr &= ~0x010;
250 }
251 141
252 if (intr) 142 core_temp = core_temp * sensor->slope_mult / sensor->slope_div;
253 nv_error(therm, "unhandled intr 0x%08x\n", intr); 143 core_temp = core_temp + sensor->offset_num / sensor->offset_den;
144 core_temp = core_temp + sensor->offset_constant - 8;
254 145
255 /* ACK everything */ 146 /* reserve negative temperatures for errors */
256 nv_wr32(therm, 0x20100, 0xffffffff); 147 if (core_temp < 0)
257 nv_wr32(therm, 0x1100, 0x10000); /* PBUS */ 148 core_temp = 0;
258 149
259 spin_unlock_irqrestore(&priv->sensor.alarm_program_lock, flags); 150 return core_temp;
260} 151}
261 152
262static int 153static int
@@ -278,33 +169,29 @@ nv50_therm_ctor(struct nouveau_object *parent,
278 priv->base.base.pwm_set = nv50_fan_pwm_set; 169 priv->base.base.pwm_set = nv50_fan_pwm_set;
279 priv->base.base.pwm_clock = nv50_fan_pwm_clock; 170 priv->base.base.pwm_clock = nv50_fan_pwm_clock;
280 priv->base.base.temp_get = nv50_temp_get; 171 priv->base.base.temp_get = nv50_temp_get;
281 priv->base.sensor.program_alarms = nv50_therm_program_alarms; 172 priv->base.sensor.program_alarms = nouveau_therm_program_alarms_polling;
282 nv_subdev(priv)->intr = nv50_therm_intr; 173 nv_subdev(priv)->intr = nv40_therm_intr;
283
284 /* init the thresholds */
285 nouveau_therm_sensor_set_threshold_state(&priv->base.base,
286 NOUVEAU_THERM_THRS_SHUTDOWN,
287 NOUVEAU_THERM_THRS_LOWER);
288 nouveau_therm_sensor_set_threshold_state(&priv->base.base,
289 NOUVEAU_THERM_THRS_FANBOOST,
290 NOUVEAU_THERM_THRS_LOWER);
291 nouveau_therm_sensor_set_threshold_state(&priv->base.base,
292 NOUVEAU_THERM_THRS_CRITICAL,
293 NOUVEAU_THERM_THRS_LOWER);
294 nouveau_therm_sensor_set_threshold_state(&priv->base.base,
295 NOUVEAU_THERM_THRS_DOWNCLOCK,
296 NOUVEAU_THERM_THRS_LOWER);
297 174
298 return nouveau_therm_preinit(&priv->base.base); 175 return nouveau_therm_preinit(&priv->base.base);
299} 176}
300 177
178static int
179nv50_therm_init(struct nouveau_object *object)
180{
181 struct nouveau_therm *therm = (void *)object;
182
183 nv50_sensor_setup(therm);
184
185 return _nouveau_therm_init(object);
186}
187
301struct nouveau_oclass 188struct nouveau_oclass
302nv50_therm_oclass = { 189nv50_therm_oclass = {
303 .handle = NV_SUBDEV(THERM, 0x50), 190 .handle = NV_SUBDEV(THERM, 0x50),
304 .ofuncs = &(struct nouveau_ofuncs) { 191 .ofuncs = &(struct nouveau_ofuncs) {
305 .ctor = nv50_therm_ctor, 192 .ctor = nv50_therm_ctor,
306 .dtor = _nouveau_therm_dtor, 193 .dtor = _nouveau_therm_dtor,
307 .init = _nouveau_therm_init, 194 .init = nv50_therm_init,
308 .fini = _nouveau_therm_fini, 195 .fini = _nouveau_therm_fini,
309 }, 196 },
310}; 197};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/nv84.c b/drivers/gpu/drm/nouveau/core/subdev/therm/nv84.c
new file mode 100644
index 000000000000..42ba633ccff7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/nv84.c
@@ -0,0 +1,221 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 * Martin Peres
24 */
25
26#include "priv.h"
27
28struct nv84_therm_priv {
29 struct nouveau_therm_priv base;
30};
31
32int
33nv84_temp_get(struct nouveau_therm *therm)
34{
35 return nv_rd32(therm, 0x20400);
36}
37
38static void
39nv84_therm_program_alarms(struct nouveau_therm *therm)
40{
41 struct nouveau_therm_priv *priv = (void *)therm;
42 struct nvbios_therm_sensor *sensor = &priv->bios_sensor;
43 unsigned long flags;
44
45 spin_lock_irqsave(&priv->sensor.alarm_program_lock, flags);
46
47 /* enable RISING and FALLING IRQs for shutdown, THRS 0, 1, 2 and 4 */
48 nv_wr32(therm, 0x20000, 0x000003ff);
49
50 /* shutdown: The computer should be shutdown when reached */
51 nv_wr32(therm, 0x20484, sensor->thrs_shutdown.hysteresis);
52 nv_wr32(therm, 0x20480, sensor->thrs_shutdown.temp);
53
54 /* THRS_1 : fan boost*/
55 nv_wr32(therm, 0x204c4, sensor->thrs_fan_boost.temp);
56
57 /* THRS_2 : critical */
58 nv_wr32(therm, 0x204c0, sensor->thrs_critical.temp);
59
60 /* THRS_4 : down clock */
61 nv_wr32(therm, 0x20414, sensor->thrs_down_clock.temp);
62 spin_unlock_irqrestore(&priv->sensor.alarm_program_lock, flags);
63
64 nv_debug(therm,
65 "Programmed thresholds [ %d(%d), %d(%d), %d(%d), %d(%d) ]\n",
66 sensor->thrs_fan_boost.temp, sensor->thrs_fan_boost.hysteresis,
67 sensor->thrs_down_clock.temp,
68 sensor->thrs_down_clock.hysteresis,
69 sensor->thrs_critical.temp, sensor->thrs_critical.hysteresis,
70 sensor->thrs_shutdown.temp, sensor->thrs_shutdown.hysteresis);
71
72}
73
74/* must be called with alarm_program_lock taken ! */
75static void
76nv84_therm_threshold_hyst_emulation(struct nouveau_therm *therm,
77 uint32_t thrs_reg, u8 status_bit,
78 const struct nvbios_therm_threshold *thrs,
79 enum nouveau_therm_thrs thrs_name)
80{
81 enum nouveau_therm_thrs_direction direction;
82 enum nouveau_therm_thrs_state prev_state, new_state;
83 int temp, cur;
84
85 prev_state = nouveau_therm_sensor_get_threshold_state(therm, thrs_name);
86 temp = nv_rd32(therm, thrs_reg);
87
88 /* program the next threshold */
89 if (temp == thrs->temp) {
90 nv_wr32(therm, thrs_reg, thrs->temp - thrs->hysteresis);
91 new_state = NOUVEAU_THERM_THRS_HIGHER;
92 } else {
93 nv_wr32(therm, thrs_reg, thrs->temp);
94 new_state = NOUVEAU_THERM_THRS_LOWER;
95 }
96
97 /* fix the state (in case someone reprogrammed the alarms) */
98 cur = therm->temp_get(therm);
99 if (new_state == NOUVEAU_THERM_THRS_LOWER && cur > thrs->temp)
100 new_state = NOUVEAU_THERM_THRS_HIGHER;
101 else if (new_state == NOUVEAU_THERM_THRS_HIGHER &&
102 cur < thrs->temp - thrs->hysteresis)
103 new_state = NOUVEAU_THERM_THRS_LOWER;
104 nouveau_therm_sensor_set_threshold_state(therm, thrs_name, new_state);
105
106 /* find the direction */
107 if (prev_state < new_state)
108 direction = NOUVEAU_THERM_THRS_RISING;
109 else if (prev_state > new_state)
110 direction = NOUVEAU_THERM_THRS_FALLING;
111 else
112 return;
113
114 /* advertise a change in direction */
115 nouveau_therm_sensor_event(therm, thrs_name, direction);
116}
117
118static void
119nv84_therm_intr(struct nouveau_subdev *subdev)
120{
121 struct nouveau_therm *therm = nouveau_therm(subdev);
122 struct nouveau_therm_priv *priv = (void *)therm;
123 struct nvbios_therm_sensor *sensor = &priv->bios_sensor;
124 unsigned long flags;
125 uint32_t intr;
126
127 spin_lock_irqsave(&priv->sensor.alarm_program_lock, flags);
128
129 intr = nv_rd32(therm, 0x20100);
130
131 /* THRS_4: downclock */
132 if (intr & 0x002) {
133 nv84_therm_threshold_hyst_emulation(therm, 0x20414, 24,
134 &sensor->thrs_down_clock,
135 NOUVEAU_THERM_THRS_DOWNCLOCK);
136 intr &= ~0x002;
137 }
138
139 /* shutdown */
140 if (intr & 0x004) {
141 nv84_therm_threshold_hyst_emulation(therm, 0x20480, 20,
142 &sensor->thrs_shutdown,
143 NOUVEAU_THERM_THRS_SHUTDOWN);
144 intr &= ~0x004;
145 }
146
147 /* THRS_1 : fan boost */
148 if (intr & 0x008) {
149 nv84_therm_threshold_hyst_emulation(therm, 0x204c4, 21,
150 &sensor->thrs_fan_boost,
151 NOUVEAU_THERM_THRS_FANBOOST);
152 intr &= ~0x008;
153 }
154
155 /* THRS_2 : critical */
156 if (intr & 0x010) {
157 nv84_therm_threshold_hyst_emulation(therm, 0x204c0, 22,
158 &sensor->thrs_critical,
159 NOUVEAU_THERM_THRS_CRITICAL);
160 intr &= ~0x010;
161 }
162
163 if (intr)
164 nv_error(therm, "unhandled intr 0x%08x\n", intr);
165
166 /* ACK everything */
167 nv_wr32(therm, 0x20100, 0xffffffff);
168 nv_wr32(therm, 0x1100, 0x10000); /* PBUS */
169
170 spin_unlock_irqrestore(&priv->sensor.alarm_program_lock, flags);
171}
172
173static int
174nv84_therm_ctor(struct nouveau_object *parent,
175 struct nouveau_object *engine,
176 struct nouveau_oclass *oclass, void *data, u32 size,
177 struct nouveau_object **pobject)
178{
179 struct nv84_therm_priv *priv;
180 int ret;
181
182 ret = nouveau_therm_create(parent, engine, oclass, &priv);
183 *pobject = nv_object(priv);
184 if (ret)
185 return ret;
186
187 priv->base.base.pwm_ctrl = nv50_fan_pwm_ctrl;
188 priv->base.base.pwm_get = nv50_fan_pwm_get;
189 priv->base.base.pwm_set = nv50_fan_pwm_set;
190 priv->base.base.pwm_clock = nv50_fan_pwm_clock;
191 priv->base.base.temp_get = nv84_temp_get;
192 priv->base.sensor.program_alarms = nv84_therm_program_alarms;
193 nv_subdev(priv)->intr = nv84_therm_intr;
194
195 /* init the thresholds */
196 nouveau_therm_sensor_set_threshold_state(&priv->base.base,
197 NOUVEAU_THERM_THRS_SHUTDOWN,
198 NOUVEAU_THERM_THRS_LOWER);
199 nouveau_therm_sensor_set_threshold_state(&priv->base.base,
200 NOUVEAU_THERM_THRS_FANBOOST,
201 NOUVEAU_THERM_THRS_LOWER);
202 nouveau_therm_sensor_set_threshold_state(&priv->base.base,
203 NOUVEAU_THERM_THRS_CRITICAL,
204 NOUVEAU_THERM_THRS_LOWER);
205 nouveau_therm_sensor_set_threshold_state(&priv->base.base,
206 NOUVEAU_THERM_THRS_DOWNCLOCK,
207 NOUVEAU_THERM_THRS_LOWER);
208
209 return nouveau_therm_preinit(&priv->base.base);
210}
211
212struct nouveau_oclass
213nv84_therm_oclass = {
214 .handle = NV_SUBDEV(THERM, 0x84),
215 .ofuncs = &(struct nouveau_ofuncs) {
216 .ctor = nv84_therm_ctor,
217 .dtor = _nouveau_therm_dtor,
218 .init = _nouveau_therm_init,
219 .fini = _nouveau_therm_fini,
220 },
221};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/nva3.c b/drivers/gpu/drm/nouveau/core/subdev/therm/nva3.c
index 2dcc5437116a..d11a7c400813 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/nva3.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/nva3.c
@@ -81,7 +81,7 @@ nva3_therm_ctor(struct nouveau_object *parent,
81 priv->base.base.pwm_get = nv50_fan_pwm_get; 81 priv->base.base.pwm_get = nv50_fan_pwm_get;
82 priv->base.base.pwm_set = nv50_fan_pwm_set; 82 priv->base.base.pwm_set = nv50_fan_pwm_set;
83 priv->base.base.pwm_clock = nv50_fan_pwm_clock; 83 priv->base.base.pwm_clock = nv50_fan_pwm_clock;
84 priv->base.base.temp_get = nv50_temp_get; 84 priv->base.base.temp_get = nv84_temp_get;
85 priv->base.base.fan_sense = nva3_therm_fan_sense; 85 priv->base.base.fan_sense = nva3_therm_fan_sense;
86 priv->base.sensor.program_alarms = nouveau_therm_program_alarms_polling; 86 priv->base.sensor.program_alarms = nouveau_therm_program_alarms_polling;
87 return nouveau_therm_preinit(&priv->base.base); 87 return nouveau_therm_preinit(&priv->base.base);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c b/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c
index d7d30ee8332e..54c28bdc4204 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c
@@ -135,7 +135,7 @@ nvd0_therm_ctor(struct nouveau_object *parent,
135 priv->base.base.pwm_get = nvd0_fan_pwm_get; 135 priv->base.base.pwm_get = nvd0_fan_pwm_get;
136 priv->base.base.pwm_set = nvd0_fan_pwm_set; 136 priv->base.base.pwm_set = nvd0_fan_pwm_set;
137 priv->base.base.pwm_clock = nvd0_fan_pwm_clock; 137 priv->base.base.pwm_clock = nvd0_fan_pwm_clock;
138 priv->base.base.temp_get = nv50_temp_get; 138 priv->base.base.temp_get = nv84_temp_get;
139 priv->base.base.fan_sense = nva3_therm_fan_sense; 139 priv->base.base.fan_sense = nva3_therm_fan_sense;
140 priv->base.sensor.program_alarms = nouveau_therm_program_alarms_polling; 140 priv->base.sensor.program_alarms = nouveau_therm_program_alarms_polling;
141 return nouveau_therm_preinit(&priv->base.base); 141 return nouveau_therm_preinit(&priv->base.base);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h b/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h
index 438d9824b774..15ca64e481f1 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h
@@ -134,11 +134,12 @@ void nouveau_therm_sensor_event(struct nouveau_therm *therm,
134 enum nouveau_therm_thrs_direction dir); 134 enum nouveau_therm_thrs_direction dir);
135void nouveau_therm_program_alarms_polling(struct nouveau_therm *therm); 135void nouveau_therm_program_alarms_polling(struct nouveau_therm *therm);
136 136
137void nv40_therm_intr(struct nouveau_subdev *);
137int nv50_fan_pwm_ctrl(struct nouveau_therm *, int, bool); 138int nv50_fan_pwm_ctrl(struct nouveau_therm *, int, bool);
138int nv50_fan_pwm_get(struct nouveau_therm *, int, u32 *, u32 *); 139int nv50_fan_pwm_get(struct nouveau_therm *, int, u32 *, u32 *);
139int nv50_fan_pwm_set(struct nouveau_therm *, int, u32, u32); 140int nv50_fan_pwm_set(struct nouveau_therm *, int, u32, u32);
140int nv50_fan_pwm_clock(struct nouveau_therm *); 141int nv50_fan_pwm_clock(struct nouveau_therm *);
141int nv50_temp_get(struct nouveau_therm *therm); 142int nv84_temp_get(struct nouveau_therm *therm);
142 143
143int nva3_therm_fan_sense(struct nouveau_therm *); 144int nva3_therm_fan_sense(struct nouveau_therm *);
144 145
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c b/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c
index 470f6a47b656..dde746c78c8a 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c
@@ -205,13 +205,13 @@ nouveau_therm_program_alarms_polling(struct nouveau_therm *therm)
205 struct nouveau_therm_priv *priv = (void *)therm; 205 struct nouveau_therm_priv *priv = (void *)therm;
206 struct nvbios_therm_sensor *sensor = &priv->bios_sensor; 206 struct nvbios_therm_sensor *sensor = &priv->bios_sensor;
207 207
208 nv_info(therm, 208 nv_debug(therm,
209 "programmed thresholds [ %d(%d), %d(%d), %d(%d), %d(%d) ]\n", 209 "programmed thresholds [ %d(%d), %d(%d), %d(%d), %d(%d) ]\n",
210 sensor->thrs_fan_boost.temp, sensor->thrs_fan_boost.hysteresis, 210 sensor->thrs_fan_boost.temp, sensor->thrs_fan_boost.hysteresis,
211 sensor->thrs_down_clock.temp, 211 sensor->thrs_down_clock.temp,
212 sensor->thrs_down_clock.hysteresis, 212 sensor->thrs_down_clock.hysteresis,
213 sensor->thrs_critical.temp, sensor->thrs_critical.hysteresis, 213 sensor->thrs_critical.temp, sensor->thrs_critical.hysteresis,
214 sensor->thrs_shutdown.temp, sensor->thrs_shutdown.hysteresis); 214 sensor->thrs_shutdown.temp, sensor->thrs_shutdown.hysteresis);
215 215
216 alarm_timer_callback(&priv->sensor.therm_poll_alarm); 216 alarm_timer_callback(&priv->sensor.therm_poll_alarm);
217} 217}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c
index 8e1bae4f12e8..9469b8275675 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c
@@ -96,11 +96,16 @@ nv04_timer_alarm(struct nouveau_timer *ptimer, u64 time,
96 96
97 /* append new alarm to list, in soonest-alarm-first order */ 97 /* append new alarm to list, in soonest-alarm-first order */
98 spin_lock_irqsave(&priv->lock, flags); 98 spin_lock_irqsave(&priv->lock, flags);
99 list_for_each_entry(list, &priv->alarms, head) { 99 if (!time) {
100 if (list->timestamp > alarm->timestamp) 100 if (!list_empty(&alarm->head))
101 break; 101 list_del(&alarm->head);
102 } else {
103 list_for_each_entry(list, &priv->alarms, head) {
104 if (list->timestamp > alarm->timestamp)
105 break;
106 }
107 list_add_tail(&alarm->head, &list->head);
102 } 108 }
103 list_add_tail(&alarm->head, &list->head);
104 spin_unlock_irqrestore(&priv->lock, flags); 109 spin_unlock_irqrestore(&priv->lock, flags);
105 110
106 /* process pending alarms */ 111 /* process pending alarms */
diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/vm/nv04.c
index 6adbbc9cc361..ed45437167f2 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/vm/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/vm/nv04.c
@@ -110,7 +110,7 @@ nv04_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
110 if (ret) 110 if (ret)
111 return ret; 111 return ret;
112 112
113 ret = nouveau_gpuobj_new(parent, NULL, 113 ret = nouveau_gpuobj_new(nv_object(priv), NULL,
114 (NV04_PDMA_SIZE / NV04_PDMA_PAGE) * 4 + 114 (NV04_PDMA_SIZE / NV04_PDMA_PAGE) * 4 +
115 8, 16, NVOBJ_FLAG_ZERO_ALLOC, 115 8, 16, NVOBJ_FLAG_ZERO_ALLOC,
116 &priv->vm->pgt[0].obj[0]); 116 &priv->vm->pgt[0].obj[0]);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/nv41.c b/drivers/gpu/drm/nouveau/core/subdev/vm/nv41.c
index 9474cfca6e4c..064c76262876 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/vm/nv41.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/vm/nv41.c
@@ -119,7 +119,7 @@ nv41_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
119 if (ret) 119 if (ret)
120 return ret; 120 return ret;
121 121
122 ret = nouveau_gpuobj_new(parent, NULL, 122 ret = nouveau_gpuobj_new(nv_object(priv), NULL,
123 (NV41_GART_SIZE / NV41_GART_PAGE) * 4, 123 (NV41_GART_SIZE / NV41_GART_PAGE) * 4,
124 16, NVOBJ_FLAG_ZERO_ALLOC, 124 16, NVOBJ_FLAG_ZERO_ALLOC,
125 &priv->vm->pgt[0].obj[0]); 125 &priv->vm->pgt[0].obj[0]);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/nv44.c b/drivers/gpu/drm/nouveau/core/subdev/vm/nv44.c
index aa8131436e3d..fae1f67d5948 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/vm/nv44.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/vm/nv44.c
@@ -196,7 +196,7 @@ nv44_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
196 if (ret) 196 if (ret)
197 return ret; 197 return ret;
198 198
199 ret = nouveau_gpuobj_new(parent, NULL, 199 ret = nouveau_gpuobj_new(nv_object(priv), NULL,
200 (NV44_GART_SIZE / NV44_GART_PAGE) * 4, 200 (NV44_GART_SIZE / NV44_GART_PAGE) * 4,
201 512 * 1024, NVOBJ_FLAG_ZERO_ALLOC, 201 512 * 1024, NVOBJ_FLAG_ZERO_ALLOC,
202 &priv->vm->pgt[0].obj[0]); 202 &priv->vm->pgt[0].obj[0]);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/vm/nvc0.c
index 30c61e6c2017..4c3b0a23b9d6 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/vm/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/vm/nvc0.c
@@ -28,12 +28,54 @@
28#include <subdev/timer.h> 28#include <subdev/timer.h>
29#include <subdev/fb.h> 29#include <subdev/fb.h>
30#include <subdev/vm.h> 30#include <subdev/vm.h>
31#include <subdev/ltcg.h>
31 32
32struct nvc0_vmmgr_priv { 33struct nvc0_vmmgr_priv {
33 struct nouveau_vmmgr base; 34 struct nouveau_vmmgr base;
34 spinlock_t lock; 35 spinlock_t lock;
35}; 36};
36 37
38
39/* Map from compressed to corresponding uncompressed storage type.
40 * The value 0xff represents an invalid storage type.
41 */
42const u8 nvc0_pte_storage_type_map[256] =
43{
44 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0xff, 0x01, /* 0x00 */
45 0x01, 0x01, 0x01, 0xff, 0xff, 0xff, 0xff, 0xff,
46 0xff, 0x11, 0xff, 0xff, 0xff, 0xff, 0xff, 0x11, /* 0x10 */
47 0x11, 0x11, 0x11, 0xff, 0xff, 0xff, 0xff, 0xff,
48 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x26, 0x27, /* 0x20 */
49 0x28, 0x29, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
50 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30 */
51 0xff, 0xff, 0x26, 0x27, 0x28, 0x29, 0x26, 0x27,
52 0x28, 0x29, 0xff, 0xff, 0xff, 0xff, 0x46, 0xff, /* 0x40 */
53 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
54 0xff, 0x46, 0x46, 0x46, 0x46, 0xff, 0xff, 0xff, /* 0x50 */
55 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
56 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60 */
57 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
58 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70 */
59 0xff, 0xff, 0xff, 0x7b, 0xff, 0xff, 0xff, 0xff,
60 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7b, 0x7b, /* 0x80 */
61 0x7b, 0x7b, 0xff, 0x8b, 0x8c, 0x8d, 0x8e, 0xff,
62 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90 */
63 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
64 0xff, 0xff, 0xff, 0x8b, 0x8c, 0x8d, 0x8e, 0xa7, /* 0xa0 */
65 0xa8, 0xa9, 0xaa, 0xff, 0xff, 0xff, 0xff, 0xff,
66 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0 */
67 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xa7,
68 0xa8, 0xa9, 0xaa, 0xc3, 0xff, 0xff, 0xff, 0xff, /* 0xc0 */
69 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xc3, 0xc3,
70 0xc3, 0xc3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0 */
71 0xfe, 0xff, 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe,
72 0xfe, 0xff, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xff, /* 0xe0 */
73 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe, 0xfe, 0xff,
74 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xf0 */
75 0xfe, 0xfe, 0xfe, 0xfe, 0xff, 0xfd, 0xfe, 0xff
76};
77
78
37static void 79static void
38nvc0_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 index, 80nvc0_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 index,
39 struct nouveau_gpuobj *pgt[2]) 81 struct nouveau_gpuobj *pgt[2])
@@ -68,10 +110,20 @@ static void
68nvc0_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt, 110nvc0_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
69 struct nouveau_mem *mem, u32 pte, u32 cnt, u64 phys, u64 delta) 111 struct nouveau_mem *mem, u32 pte, u32 cnt, u64 phys, u64 delta)
70{ 112{
71 u32 next = 1 << (vma->node->type - 8); 113 u64 next = 1 << (vma->node->type - 8);
72 114
73 phys = nvc0_vm_addr(vma, phys, mem->memtype, 0); 115 phys = nvc0_vm_addr(vma, phys, mem->memtype, 0);
74 pte <<= 3; 116 pte <<= 3;
117
118 if (mem->tag) {
119 struct nouveau_ltcg *ltcg =
120 nouveau_ltcg(vma->vm->vmm->base.base.parent);
121 u32 tag = mem->tag->offset + (delta >> 17);
122 phys |= (u64)tag << (32 + 12);
123 next |= (u64)1 << (32 + 12);
124 ltcg->tags_clear(ltcg, tag, cnt);
125 }
126
75 while (cnt--) { 127 while (cnt--) {
76 nv_wo32(pgt, pte + 0, lower_32_bits(phys)); 128 nv_wo32(pgt, pte + 0, lower_32_bits(phys));
77 nv_wo32(pgt, pte + 4, upper_32_bits(phys)); 129 nv_wo32(pgt, pte + 4, upper_32_bits(phys));
@@ -85,10 +137,12 @@ nvc0_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
85 struct nouveau_mem *mem, u32 pte, u32 cnt, dma_addr_t *list) 137 struct nouveau_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
86{ 138{
87 u32 target = (vma->access & NV_MEM_ACCESS_NOSNOOP) ? 7 : 5; 139 u32 target = (vma->access & NV_MEM_ACCESS_NOSNOOP) ? 7 : 5;
140 /* compressed storage types are invalid for system memory */
141 u32 memtype = nvc0_pte_storage_type_map[mem->memtype & 0xff];
88 142
89 pte <<= 3; 143 pte <<= 3;
90 while (cnt--) { 144 while (cnt--) {
91 u64 phys = nvc0_vm_addr(vma, *list++, mem->memtype, target); 145 u64 phys = nvc0_vm_addr(vma, *list++, memtype, target);
92 nv_wo32(pgt, pte + 0, lower_32_bits(phys)); 146 nv_wo32(pgt, pte + 0, lower_32_bits(phys));
93 nv_wo32(pgt, pte + 4, upper_32_bits(phys)); 147 nv_wo32(pgt, pte + 4, upper_32_bits(phys));
94 pte += 8; 148 pte += 8;
diff --git a/drivers/gpu/drm/nouveau/dispnv04/Makefile b/drivers/gpu/drm/nouveau/dispnv04/Makefile
new file mode 100644
index 000000000000..ea3f5b8a0f95
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv04/Makefile
@@ -0,0 +1,10 @@
1nouveau-y += dispnv04/arb.o
2nouveau-y += dispnv04/crtc.o
3nouveau-y += dispnv04/cursor.o
4nouveau-y += dispnv04/dac.o
5nouveau-y += dispnv04/dfp.o
6nouveau-y += dispnv04/disp.o
7nouveau-y += dispnv04/hw.o
8nouveau-y += dispnv04/tvmodesnv17.o
9nouveau-y += dispnv04/tvnv04.o
10nouveau-y += dispnv04/tvnv17.o
diff --git a/drivers/gpu/drm/nouveau/nouveau_calc.c b/drivers/gpu/drm/nouveau/dispnv04/arb.c
index 6da576445b3d..2e70462883e8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_calc.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/arb.c
@@ -25,7 +25,7 @@
25 25
26#include "nouveau_drm.h" 26#include "nouveau_drm.h"
27#include "nouveau_reg.h" 27#include "nouveau_reg.h"
28#include "nouveau_hw.h" 28#include "hw.h"
29 29
30/****************************************************************************\ 30/****************************************************************************\
31* * 31* *
diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
index 6578cd28c556..0782bd2f1e04 100644
--- a/drivers/gpu/drm/nouveau/nv04_crtc.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
@@ -33,10 +33,10 @@
33#include "nouveau_encoder.h" 33#include "nouveau_encoder.h"
34#include "nouveau_connector.h" 34#include "nouveau_connector.h"
35#include "nouveau_crtc.h" 35#include "nouveau_crtc.h"
36#include "nouveau_hw.h" 36#include "hw.h"
37#include "nvreg.h" 37#include "nvreg.h"
38#include "nouveau_fbcon.h" 38#include "nouveau_fbcon.h"
39#include "nv04_display.h" 39#include "disp.h"
40 40
41#include <subdev/bios/pll.h> 41#include <subdev/bios/pll.h>
42#include <subdev/clock.h> 42#include <subdev/clock.h>
@@ -1070,4 +1070,3 @@ nv04_crtc_create(struct drm_device *dev, int crtc_num)
1070 1070
1071 return 0; 1071 return 0;
1072} 1072}
1073
diff --git a/drivers/gpu/drm/nouveau/nv04_cursor.c b/drivers/gpu/drm/nouveau/dispnv04/cursor.c
index fe86f0de348f..a810303169de 100644
--- a/drivers/gpu/drm/nouveau/nv04_cursor.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/cursor.c
@@ -3,7 +3,7 @@
3#include "nouveau_drm.h" 3#include "nouveau_drm.h"
4#include "nouveau_reg.h" 4#include "nouveau_reg.h"
5#include "nouveau_crtc.h" 5#include "nouveau_crtc.h"
6#include "nouveau_hw.h" 6#include "hw.h"
7 7
8static void 8static void
9nv04_cursor_show(struct nouveau_crtc *nv_crtc, bool update) 9nv04_cursor_show(struct nouveau_crtc *nv_crtc, bool update)
@@ -68,4 +68,3 @@ nv04_cursor_init(struct nouveau_crtc *crtc)
68 crtc->cursor.show = nv04_cursor_show; 68 crtc->cursor.show = nv04_cursor_show;
69 return 0; 69 return 0;
70} 70}
71
diff --git a/drivers/gpu/drm/nouveau/nv04_dac.c b/drivers/gpu/drm/nouveau/dispnv04/dac.c
index 64f7020fb605..434b920f6bd4 100644
--- a/drivers/gpu/drm/nouveau/nv04_dac.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/dac.c
@@ -31,7 +31,7 @@
31#include "nouveau_encoder.h" 31#include "nouveau_encoder.h"
32#include "nouveau_connector.h" 32#include "nouveau_connector.h"
33#include "nouveau_crtc.h" 33#include "nouveau_crtc.h"
34#include "nouveau_hw.h" 34#include "hw.h"
35#include "nvreg.h" 35#include "nvreg.h"
36 36
37#include <subdev/bios/gpio.h> 37#include <subdev/bios/gpio.h>
diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
index 7e24cdf1cb39..93dd23ff0093 100644
--- a/drivers/gpu/drm/nouveau/nv04_dfp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
@@ -32,7 +32,7 @@
32#include "nouveau_encoder.h" 32#include "nouveau_encoder.h"
33#include "nouveau_connector.h" 33#include "nouveau_connector.h"
34#include "nouveau_crtc.h" 34#include "nouveau_crtc.h"
35#include "nouveau_hw.h" 35#include "hw.h"
36#include "nvreg.h" 36#include "nvreg.h"
37 37
38#include <drm/i2c/sil164.h> 38#include <drm/i2c/sil164.h>
diff --git a/drivers/gpu/drm/nouveau/nv04_display.c b/drivers/gpu/drm/nouveau/dispnv04/disp.c
index ad48444c385c..4908d3fd0486 100644
--- a/drivers/gpu/drm/nouveau/nv04_display.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c
@@ -30,7 +30,7 @@
30 30
31#include "nouveau_drm.h" 31#include "nouveau_drm.h"
32#include "nouveau_reg.h" 32#include "nouveau_reg.h"
33#include "nouveau_hw.h" 33#include "hw.h"
34#include "nouveau_encoder.h" 34#include "nouveau_encoder.h"
35#include "nouveau_connector.h" 35#include "nouveau_connector.h"
36 36
diff --git a/drivers/gpu/drm/nouveau/nv04_display.h b/drivers/gpu/drm/nouveau/dispnv04/disp.h
index a0a031dad13f..a0a031dad13f 100644
--- a/drivers/gpu/drm/nouveau/nv04_display.h
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.h
diff --git a/drivers/gpu/drm/nouveau/nouveau_hw.c b/drivers/gpu/drm/nouveau/dispnv04/hw.c
index 617a06ffdb46..973056b86207 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hw.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/hw.c
@@ -24,7 +24,7 @@
24 24
25#include <drm/drmP.h> 25#include <drm/drmP.h>
26#include "nouveau_drm.h" 26#include "nouveau_drm.h"
27#include "nouveau_hw.h" 27#include "hw.h"
28 28
29#include <subdev/bios/pll.h> 29#include <subdev/bios/pll.h>
30#include <subdev/clock.h> 30#include <subdev/clock.h>
diff --git a/drivers/gpu/drm/nouveau/nouveau_hw.h b/drivers/gpu/drm/nouveau/dispnv04/hw.h
index 7dff1021fab4..eeb70d912d99 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hw.h
+++ b/drivers/gpu/drm/nouveau/dispnv04/hw.h
@@ -24,7 +24,8 @@
24#define __NOUVEAU_HW_H__ 24#define __NOUVEAU_HW_H__
25 25
26#include <drm/drmP.h> 26#include <drm/drmP.h>
27#include "nv04_display.h" 27#include "disp.h"
28#include "nvreg.h"
28 29
29#include <subdev/bios/pll.h> 30#include <subdev/bios/pll.h>
30 31
diff --git a/drivers/gpu/drm/nouveau/nvreg.h b/drivers/gpu/drm/nouveau/dispnv04/nvreg.h
index bbfb1a68fb11..bbfb1a68fb11 100644
--- a/drivers/gpu/drm/nouveau/nvreg.h
+++ b/drivers/gpu/drm/nouveau/dispnv04/nvreg.h
diff --git a/drivers/gpu/drm/nouveau/nv17_tv_modes.c b/drivers/gpu/drm/nouveau/dispnv04/tvmodesnv17.c
index 1cdfe2a5875d..08c6f5e50610 100644
--- a/drivers/gpu/drm/nouveau/nv17_tv_modes.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvmodesnv17.c
@@ -29,8 +29,8 @@
29#include "nouveau_drm.h" 29#include "nouveau_drm.h"
30#include "nouveau_encoder.h" 30#include "nouveau_encoder.h"
31#include "nouveau_crtc.h" 31#include "nouveau_crtc.h"
32#include "nouveau_hw.h" 32#include "hw.h"
33#include "nv17_tv.h" 33#include "tvnv17.h"
34 34
35char *nv17_tv_norm_names[NUM_TV_NORMS] = { 35char *nv17_tv_norm_names[NUM_TV_NORMS] = {
36 [TV_NORM_PAL] = "PAL", 36 [TV_NORM_PAL] = "PAL",
diff --git a/drivers/gpu/drm/nouveau/nv04_tv.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
index 4a69ccdef9b4..bf13db4e8631 100644
--- a/drivers/gpu/drm/nouveau/nv04_tv.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
@@ -30,7 +30,7 @@
30#include "nouveau_encoder.h" 30#include "nouveau_encoder.h"
31#include "nouveau_connector.h" 31#include "nouveau_connector.h"
32#include "nouveau_crtc.h" 32#include "nouveau_crtc.h"
33#include "nouveau_hw.h" 33#include "hw.h"
34#include <drm/drm_crtc_helper.h> 34#include <drm/drm_crtc_helper.h>
35 35
36#include <drm/i2c/ch7006.h> 36#include <drm/i2c/ch7006.h>
diff --git a/drivers/gpu/drm/nouveau/nv17_tv.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
index 977e42be2050..acef48f4a4ea 100644
--- a/drivers/gpu/drm/nouveau/nv17_tv.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
@@ -31,8 +31,8 @@
31#include "nouveau_encoder.h" 31#include "nouveau_encoder.h"
32#include "nouveau_connector.h" 32#include "nouveau_connector.h"
33#include "nouveau_crtc.h" 33#include "nouveau_crtc.h"
34#include "nouveau_hw.h" 34#include "hw.h"
35#include "nv17_tv.h" 35#include "tvnv17.h"
36 36
37#include <core/device.h> 37#include <core/device.h>
38 38
diff --git a/drivers/gpu/drm/nouveau/nv17_tv.h b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.h
index 7b331543a41b..7b331543a41b 100644
--- a/drivers/gpu/drm/nouveau/nv17_tv.h
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.h
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index 3b6dc883e150..1c4c6c9161ac 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -30,6 +30,7 @@
30#include <subdev/fb.h> 30#include <subdev/fb.h>
31#include <subdev/timer.h> 31#include <subdev/timer.h>
32#include <subdev/instmem.h> 32#include <subdev/instmem.h>
33#include <engine/graph.h>
33 34
34#include "nouveau_drm.h" 35#include "nouveau_drm.h"
35#include "nouveau_dma.h" 36#include "nouveau_dma.h"
@@ -168,6 +169,7 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
168 struct nouveau_drm *drm = nouveau_drm(dev); 169 struct nouveau_drm *drm = nouveau_drm(dev);
169 struct nouveau_device *device = nv_device(drm->device); 170 struct nouveau_device *device = nv_device(drm->device);
170 struct nouveau_timer *ptimer = nouveau_timer(device); 171 struct nouveau_timer *ptimer = nouveau_timer(device);
172 struct nouveau_graph *graph = (void *)nouveau_engine(device, NVDEV_ENGINE_GR);
171 struct drm_nouveau_getparam *getparam = data; 173 struct drm_nouveau_getparam *getparam = data;
172 174
173 switch (getparam->param) { 175 switch (getparam->param) {
@@ -208,14 +210,8 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
208 getparam->value = 1; 210 getparam->value = 1;
209 break; 211 break;
210 case NOUVEAU_GETPARAM_GRAPH_UNITS: 212 case NOUVEAU_GETPARAM_GRAPH_UNITS:
211 /* NV40 and NV50 versions are quite different, but register 213 getparam->value = graph->units ? graph->units(graph) : 0;
212 * address is the same. User is supposed to know the card 214 break;
213 * family anyway... */
214 if (device->chipset >= 0x40) {
215 getparam->value = nv_rd32(device, 0x001540);
216 break;
217 }
218 /* FALLTHRU */
219 default: 215 default:
220 nv_debug(device, "unknown parameter %lld\n", getparam->param); 216 nv_debug(device, "unknown parameter %lld\n", getparam->param);
221 return -EINVAL; 217 return -EINVAL;
@@ -391,7 +387,7 @@ nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
391 struct nouveau_drm *drm = nouveau_drm(dev); 387 struct nouveau_drm *drm = nouveau_drm(dev);
392 struct nouveau_device *device = nv_device(drm->device); 388 struct nouveau_device *device = nv_device(drm->device);
393 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev); 389 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
394 struct nouveau_abi16_chan *chan, *temp; 390 struct nouveau_abi16_chan *chan = NULL, *temp;
395 struct nouveau_abi16_ntfy *ntfy; 391 struct nouveau_abi16_ntfy *ntfy;
396 struct nouveau_object *object; 392 struct nouveau_object *object;
397 struct nv_dma_class args = {}; 393 struct nv_dma_class args = {};
@@ -404,10 +400,11 @@ nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
404 if (unlikely(nv_device(abi16->device)->card_type >= NV_C0)) 400 if (unlikely(nv_device(abi16->device)->card_type >= NV_C0))
405 return nouveau_abi16_put(abi16, -EINVAL); 401 return nouveau_abi16_put(abi16, -EINVAL);
406 402
407 list_for_each_entry_safe(chan, temp, &abi16->channels, head) { 403 list_for_each_entry(temp, &abi16->channels, head) {
408 if (chan->chan->handle == (NVDRM_CHAN | info->channel)) 404 if (temp->chan->handle == (NVDRM_CHAN | info->channel)) {
405 chan = temp;
409 break; 406 break;
410 chan = NULL; 407 }
411 } 408 }
412 409
413 if (!chan) 410 if (!chan)
@@ -459,17 +456,18 @@ nouveau_abi16_ioctl_gpuobj_free(ABI16_IOCTL_ARGS)
459{ 456{
460 struct drm_nouveau_gpuobj_free *fini = data; 457 struct drm_nouveau_gpuobj_free *fini = data;
461 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev); 458 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
462 struct nouveau_abi16_chan *chan, *temp; 459 struct nouveau_abi16_chan *chan = NULL, *temp;
463 struct nouveau_abi16_ntfy *ntfy; 460 struct nouveau_abi16_ntfy *ntfy;
464 int ret; 461 int ret;
465 462
466 if (unlikely(!abi16)) 463 if (unlikely(!abi16))
467 return -ENOMEM; 464 return -ENOMEM;
468 465
469 list_for_each_entry_safe(chan, temp, &abi16->channels, head) { 466 list_for_each_entry(temp, &abi16->channels, head) {
470 if (chan->chan->handle == (NVDRM_CHAN | fini->channel)) 467 if (temp->chan->handle == (NVDRM_CHAN | fini->channel)) {
468 chan = temp;
471 break; 469 break;
472 chan = NULL; 470 }
473 } 471 }
474 472
475 if (!chan) 473 if (!chan)
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
index 5d940302d2aa..2ffad2176b7f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_backlight.c
+++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
@@ -239,6 +239,9 @@ nouveau_backlight_init(struct drm_device *dev)
239 case NV_40: 239 case NV_40:
240 return nv40_backlight_init(connector); 240 return nv40_backlight_init(connector);
241 case NV_50: 241 case NV_50:
242 case NV_C0:
243 case NV_D0:
244 case NV_E0:
242 return nv50_backlight_init(connector); 245 return nv50_backlight_init(connector);
243 default: 246 default:
244 break; 247 break;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 50a6dd02f7c5..6aa2137e093a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -28,7 +28,7 @@
28 28
29#include "nouveau_drm.h" 29#include "nouveau_drm.h"
30#include "nouveau_reg.h" 30#include "nouveau_reg.h"
31#include "nouveau_hw.h" 31#include "dispnv04/hw.h"
32#include "nouveau_encoder.h" 32#include "nouveau_encoder.h"
33 33
34#include <linux/io-mapping.h> 34#include <linux/io-mapping.h>
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h
index 7ccd28f11adf..0067586eb015 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.h
@@ -24,8 +24,6 @@
24#ifndef __NOUVEAU_DISPBIOS_H__ 24#ifndef __NOUVEAU_DISPBIOS_H__
25#define __NOUVEAU_DISPBIOS_H__ 25#define __NOUVEAU_DISPBIOS_H__
26 26
27#include "nvreg.h"
28
29#define DCB_MAX_NUM_ENTRIES 16 27#define DCB_MAX_NUM_ENTRIES 16
30#define DCB_MAX_NUM_I2C_ENTRIES 16 28#define DCB_MAX_NUM_I2C_ENTRIES 16
31#define DCB_MAX_NUM_GPIO_ENTRIES 32 29#define DCB_MAX_NUM_GPIO_ENTRIES 32
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 4dd7ae2ac6c6..4da776f344d7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -32,7 +32,7 @@
32 32
33#include "nouveau_reg.h" 33#include "nouveau_reg.h"
34#include "nouveau_drm.h" 34#include "nouveau_drm.h"
35#include "nouveau_hw.h" 35#include "dispnv04/hw.h"
36#include "nouveau_acpi.h" 36#include "nouveau_acpi.h"
37 37
38#include "nouveau_display.h" 38#include "nouveau_display.h"
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 4610c3a29bbe..7bf22d4a3d96 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -28,7 +28,7 @@
28#include <drm/drm_crtc_helper.h> 28#include <drm/drm_crtc_helper.h>
29 29
30#include "nouveau_fbcon.h" 30#include "nouveau_fbcon.h"
31#include "nouveau_hw.h" 31#include "dispnv04/hw.h"
32#include "nouveau_crtc.h" 32#include "nouveau_crtc.h"
33#include "nouveau_dma.h" 33#include "nouveau_dma.h"
34#include "nouveau_gem.h" 34#include "nouveau_gem.h"
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index d1099365bfc1..46c152ff0a80 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -31,13 +31,13 @@
31#include <core/gpuobj.h> 31#include <core/gpuobj.h>
32#include <core/class.h> 32#include <core/class.h>
33 33
34#include <subdev/device.h> 34#include <engine/device.h>
35#include <subdev/vm.h>
36
37#include <engine/disp.h> 35#include <engine/disp.h>
36#include <engine/fifo.h>
37
38#include <subdev/vm.h>
38 39
39#include "nouveau_drm.h" 40#include "nouveau_drm.h"
40#include "nouveau_irq.h"
41#include "nouveau_dma.h" 41#include "nouveau_dma.h"
42#include "nouveau_ttm.h" 42#include "nouveau_ttm.h"
43#include "nouveau_gem.h" 43#include "nouveau_gem.h"
@@ -72,11 +72,25 @@ module_param_named(modeset, nouveau_modeset, int, 0400);
72static struct drm_driver driver; 72static struct drm_driver driver;
73 73
74static int 74static int
75nouveau_drm_vblank_handler(struct nouveau_eventh *event, int head)
76{
77 struct nouveau_drm *drm =
78 container_of(event, struct nouveau_drm, vblank[head]);
79 drm_handle_vblank(drm->dev, head);
80 return NVKM_EVENT_KEEP;
81}
82
83static int
75nouveau_drm_vblank_enable(struct drm_device *dev, int head) 84nouveau_drm_vblank_enable(struct drm_device *dev, int head)
76{ 85{
77 struct nouveau_drm *drm = nouveau_drm(dev); 86 struct nouveau_drm *drm = nouveau_drm(dev);
78 struct nouveau_disp *pdisp = nouveau_disp(drm->device); 87 struct nouveau_disp *pdisp = nouveau_disp(drm->device);
79 nouveau_event_get(pdisp->vblank, head, &drm->vblank); 88
89 if (WARN_ON_ONCE(head > ARRAY_SIZE(drm->vblank)))
90 return -EIO;
91 WARN_ON_ONCE(drm->vblank[head].func);
92 drm->vblank[head].func = nouveau_drm_vblank_handler;
93 nouveau_event_get(pdisp->vblank, head, &drm->vblank[head]);
80 return 0; 94 return 0;
81} 95}
82 96
@@ -85,16 +99,11 @@ nouveau_drm_vblank_disable(struct drm_device *dev, int head)
85{ 99{
86 struct nouveau_drm *drm = nouveau_drm(dev); 100 struct nouveau_drm *drm = nouveau_drm(dev);
87 struct nouveau_disp *pdisp = nouveau_disp(drm->device); 101 struct nouveau_disp *pdisp = nouveau_disp(drm->device);
88 nouveau_event_put(pdisp->vblank, head, &drm->vblank); 102 if (drm->vblank[head].func)
89} 103 nouveau_event_put(pdisp->vblank, head, &drm->vblank[head]);
90 104 else
91static int 105 WARN_ON_ONCE(1);
92nouveau_drm_vblank_handler(struct nouveau_eventh *event, int head) 106 drm->vblank[head].func = NULL;
93{
94 struct nouveau_drm *drm =
95 container_of(event, struct nouveau_drm, vblank);
96 drm_handle_vblank(drm->dev, head);
97 return NVKM_EVENT_KEEP;
98} 107}
99 108
100static u64 109static u64
@@ -156,7 +165,7 @@ nouveau_accel_init(struct nouveau_drm *drm)
156 u32 arg0, arg1; 165 u32 arg0, arg1;
157 int ret; 166 int ret;
158 167
159 if (nouveau_noaccel) 168 if (nouveau_noaccel || !nouveau_fifo(device) /*XXX*/)
160 return; 169 return;
161 170
162 /* initialise synchronisation routines */ 171 /* initialise synchronisation routines */
@@ -292,7 +301,6 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
292 301
293 dev->dev_private = drm; 302 dev->dev_private = drm;
294 drm->dev = dev; 303 drm->dev = dev;
295 drm->vblank.func = nouveau_drm_vblank_handler;
296 304
297 INIT_LIST_HEAD(&drm->clients); 305 INIT_LIST_HEAD(&drm->clients);
298 spin_lock_init(&drm->tile.lock); 306 spin_lock_init(&drm->tile.lock);
@@ -357,10 +365,6 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
357 if (ret) 365 if (ret)
358 goto fail_bios; 366 goto fail_bios;
359 367
360 ret = nouveau_irq_init(dev);
361 if (ret)
362 goto fail_irq;
363
364 ret = nouveau_display_create(dev); 368 ret = nouveau_display_create(dev);
365 if (ret) 369 if (ret)
366 goto fail_dispctor; 370 goto fail_dispctor;
@@ -380,8 +384,6 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
380fail_dispinit: 384fail_dispinit:
381 nouveau_display_destroy(dev); 385 nouveau_display_destroy(dev);
382fail_dispctor: 386fail_dispctor:
383 nouveau_irq_fini(dev);
384fail_irq:
385 nouveau_bios_takedown(dev); 387 nouveau_bios_takedown(dev);
386fail_bios: 388fail_bios:
387 nouveau_ttm_fini(drm); 389 nouveau_ttm_fini(drm);
@@ -407,7 +409,6 @@ nouveau_drm_unload(struct drm_device *dev)
407 nouveau_display_fini(dev); 409 nouveau_display_fini(dev);
408 nouveau_display_destroy(dev); 410 nouveau_display_destroy(dev);
409 411
410 nouveau_irq_fini(dev);
411 nouveau_bios_takedown(dev); 412 nouveau_bios_takedown(dev);
412 413
413 nouveau_ttm_fini(drm); 414 nouveau_ttm_fini(drm);
@@ -525,7 +526,6 @@ nouveau_do_resume(struct drm_device *dev)
525 nouveau_fence(drm)->resume(drm); 526 nouveau_fence(drm)->resume(drm);
526 527
527 nouveau_run_vbios_init(dev); 528 nouveau_run_vbios_init(dev);
528 nouveau_irq_postinstall(dev);
529 nouveau_pm_resume(dev); 529 nouveau_pm_resume(dev);
530 530
531 if (dev->mode_config.num_crtc) { 531 if (dev->mode_config.num_crtc) {
@@ -661,8 +661,7 @@ static struct drm_driver
661driver = { 661driver = {
662 .driver_features = 662 .driver_features =
663 DRIVER_USE_AGP | DRIVER_PCI_DMA | DRIVER_SG | 663 DRIVER_USE_AGP | DRIVER_PCI_DMA | DRIVER_SG |
664 DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | 664 DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME,
665 DRIVER_MODESET | DRIVER_PRIME,
666 665
667 .load = nouveau_drm_load, 666 .load = nouveau_drm_load,
668 .unload = nouveau_drm_unload, 667 .unload = nouveau_drm_unload,
@@ -676,11 +675,6 @@ driver = {
676 .debugfs_cleanup = nouveau_debugfs_takedown, 675 .debugfs_cleanup = nouveau_debugfs_takedown,
677#endif 676#endif
678 677
679 .irq_preinstall = nouveau_irq_preinstall,
680 .irq_postinstall = nouveau_irq_postinstall,
681 .irq_uninstall = nouveau_irq_uninstall,
682 .irq_handler = nouveau_irq_handler,
683
684 .get_vblank_counter = drm_vblank_count, 678 .get_vblank_counter = drm_vblank_count,
685 .enable_vblank = nouveau_drm_vblank_enable, 679 .enable_vblank = nouveau_drm_vblank_enable,
686 .disable_vblank = nouveau_drm_vblank_disable, 680 .disable_vblank = nouveau_drm_vblank_disable,
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
index b25df374c901..f2b30f89dee0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
@@ -10,7 +10,18 @@
10 10
11#define DRIVER_MAJOR 1 11#define DRIVER_MAJOR 1
12#define DRIVER_MINOR 1 12#define DRIVER_MINOR 1
13#define DRIVER_PATCHLEVEL 0 13#define DRIVER_PATCHLEVEL 1
14
15/*
16 * 1.1.1:
17 * - added support for tiled system memory buffer objects
18 * - added support for NOUVEAU_GETPARAM_GRAPH_UNITS on [nvc0,nve0].
19 * - added support for compressed memory storage types on [nvc0,nve0].
20 * - added support for software methods 0x600,0x644,0x6ac on nvc0
21 * to control registers on the MPs to enable performance counters,
22 * and to control the warp error enable mask (OpenGL requires out of
23 * bounds access to local memory to be silently ignored / return 0).
24 */
14 25
15#include <core/client.h> 26#include <core/client.h>
16#include <core/event.h> 27#include <core/event.h>
@@ -113,7 +124,7 @@ struct nouveau_drm {
113 struct nvbios vbios; 124 struct nvbios vbios;
114 struct nouveau_display *display; 125 struct nouveau_display *display;
115 struct backlight_device *backlight; 126 struct backlight_device *backlight;
116 struct nouveau_eventh vblank; 127 struct nouveau_eventh vblank[4];
117 128
118 /* power management */ 129 /* power management */
119 struct nouveau_pm *pm; 130 struct nouveau_pm *pm;
diff --git a/drivers/gpu/drm/nouveau/nouveau_encoder.h b/drivers/gpu/drm/nouveau/nouveau_encoder.h
index e24341229d5e..24660c0f713d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_encoder.h
+++ b/drivers/gpu/drm/nouveau/nouveau_encoder.h
@@ -30,7 +30,7 @@
30#include <subdev/bios/dcb.h> 30#include <subdev/bios/dcb.h>
31 31
32#include <drm/drm_encoder_slave.h> 32#include <drm/drm_encoder_slave.h>
33#include "nv04_display.h" 33#include "dispnv04/disp.h"
34 34
35#define NV_DPMS_CLEARED 0x80 35#define NV_DPMS_CLEARED 0x80
36 36
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c
deleted file mode 100644
index 1303680affd3..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_irq.c
+++ /dev/null
@@ -1,76 +0,0 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/mc.h>
26
27#include "nouveau_drm.h"
28#include "nouveau_irq.h"
29#include "nv50_display.h"
30
31void
32nouveau_irq_preinstall(struct drm_device *dev)
33{
34 nv_wr32(nouveau_dev(dev), 0x000140, 0x00000000);
35}
36
37int
38nouveau_irq_postinstall(struct drm_device *dev)
39{
40 nv_wr32(nouveau_dev(dev), 0x000140, 0x00000001);
41 return 0;
42}
43
44void
45nouveau_irq_uninstall(struct drm_device *dev)
46{
47 nv_wr32(nouveau_dev(dev), 0x000140, 0x00000000);
48}
49
50irqreturn_t
51nouveau_irq_handler(DRM_IRQ_ARGS)
52{
53 struct drm_device *dev = arg;
54 struct nouveau_device *device = nouveau_dev(dev);
55 struct nouveau_mc *pmc = nouveau_mc(device);
56 u32 stat;
57
58 stat = nv_rd32(device, 0x000100);
59 if (stat == 0 || stat == ~0)
60 return IRQ_NONE;
61
62 nv_subdev(pmc)->intr(nv_subdev(pmc));
63 return IRQ_HANDLED;
64}
65
66int
67nouveau_irq_init(struct drm_device *dev)
68{
69 return drm_irq_install(dev);
70}
71
72void
73nouveau_irq_fini(struct drm_device *dev)
74{
75 drm_irq_uninstall(dev);
76}
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.h b/drivers/gpu/drm/nouveau/nouveau_irq.h
deleted file mode 100644
index 06714ad857bb..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_irq.h
+++ /dev/null
@@ -1,11 +0,0 @@
1#ifndef __NOUVEAU_IRQ_H__
2#define __NOUVEAU_IRQ_H__
3
4extern int nouveau_irq_init(struct drm_device *);
5extern void nouveau_irq_fini(struct drm_device *);
6extern irqreturn_t nouveau_irq_handler(DRM_IRQ_ARGS);
7extern void nouveau_irq_preinstall(struct drm_device *);
8extern int nouveau_irq_postinstall(struct drm_device *);
9extern void nouveau_irq_uninstall(struct drm_device *);
10
11#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index 9be9cb58e19b..f19a15a3bc03 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -35,14 +35,16 @@
35static int 35static int
36nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize) 36nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
37{ 37{
38 /* nothing to do */ 38 struct nouveau_drm *drm = nouveau_bdev(man->bdev);
39 struct nouveau_fb *pfb = nouveau_fb(drm->device);
40 man->priv = pfb;
39 return 0; 41 return 0;
40} 42}
41 43
42static int 44static int
43nouveau_vram_manager_fini(struct ttm_mem_type_manager *man) 45nouveau_vram_manager_fini(struct ttm_mem_type_manager *man)
44{ 46{
45 /* nothing to do */ 47 man->priv = NULL;
46 return 0; 48 return 0;
47} 49}
48 50
@@ -104,7 +106,8 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
104static void 106static void
105nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix) 107nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
106{ 108{
107 struct nouveau_mm *mm = man->priv; 109 struct nouveau_fb *pfb = man->priv;
110 struct nouveau_mm *mm = &pfb->vram;
108 struct nouveau_mm_node *r; 111 struct nouveau_mm_node *r;
109 u32 total = 0, free = 0; 112 u32 total = 0, free = 0;
110 113
@@ -161,6 +164,8 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
161 struct ttm_placement *placement, 164 struct ttm_placement *placement,
162 struct ttm_mem_reg *mem) 165 struct ttm_mem_reg *mem)
163{ 166{
167 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
168 struct nouveau_bo *nvbo = nouveau_bo(bo);
164 struct nouveau_mem *node; 169 struct nouveau_mem *node;
165 170
166 if (unlikely((mem->num_pages << PAGE_SHIFT) >= 512 * 1024 * 1024)) 171 if (unlikely((mem->num_pages << PAGE_SHIFT) >= 512 * 1024 * 1024))
@@ -171,6 +176,20 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
171 return -ENOMEM; 176 return -ENOMEM;
172 node->page_shift = 12; 177 node->page_shift = 12;
173 178
179 switch (nv_device(drm->device)->card_type) {
180 case NV_50:
181 if (nv_device(drm->device)->chipset != 0x50)
182 node->memtype = (nvbo->tile_flags & 0x7f00) >> 8;
183 break;
184 case NV_C0:
185 case NV_D0:
186 case NV_E0:
187 node->memtype = (nvbo->tile_flags & 0xff00) >> 8;
188 break;
189 default:
190 break;
191 }
192
174 mem->mm_node = node; 193 mem->mm_node = node;
175 mem->start = 0; 194 mem->start = 0;
176 return 0; 195 return 0;
diff --git a/drivers/gpu/drm/nouveau/nv04_pm.c b/drivers/gpu/drm/nouveau/nv04_pm.c
index 2a0cc9d0614a..27afc0ea28b0 100644
--- a/drivers/gpu/drm/nouveau/nv04_pm.c
+++ b/drivers/gpu/drm/nouveau/nv04_pm.c
@@ -25,7 +25,7 @@
25#include <drm/drmP.h> 25#include <drm/drmP.h>
26#include "nouveau_drm.h" 26#include "nouveau_drm.h"
27#include "nouveau_reg.h" 27#include "nouveau_reg.h"
28#include "nouveau_hw.h" 28#include "dispnv04/hw.h"
29#include "nouveau_pm.h" 29#include "nouveau_pm.h"
30 30
31#include <subdev/bios/pll.h> 31#include <subdev/bios/pll.h>
diff --git a/drivers/gpu/drm/nouveau/nv40_pm.c b/drivers/gpu/drm/nouveau/nv40_pm.c
index 3382064c7f33..3af5bcd0b203 100644
--- a/drivers/gpu/drm/nouveau/nv40_pm.c
+++ b/drivers/gpu/drm/nouveau/nv40_pm.c
@@ -26,7 +26,7 @@
26#include "nouveau_drm.h" 26#include "nouveau_drm.h"
27#include "nouveau_bios.h" 27#include "nouveau_bios.h"
28#include "nouveau_pm.h" 28#include "nouveau_pm.h"
29#include "nouveau_hw.h" 29#include "dispnv04/hw.h"
30 30
31#include <subdev/bios/pll.h> 31#include <subdev/bios/pll.h>
32#include <subdev/clock.h> 32#include <subdev/clock.h>
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 7f0e6c3f37d1..ebf0a683305e 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -479,7 +479,7 @@ nv50_display_flip_wait(void *data)
479{ 479{
480 struct nv50_display_flip *flip = data; 480 struct nv50_display_flip *flip = data;
481 if (nouveau_bo_rd32(flip->disp->sync, flip->chan->addr / 4) == 481 if (nouveau_bo_rd32(flip->disp->sync, flip->chan->addr / 4) ==
482 flip->chan->data); 482 flip->chan->data)
483 return true; 483 return true;
484 usleep_range(1, 2); 484 usleep_range(1, 2);
485 return false; 485 return false;
@@ -2174,6 +2174,7 @@ int
2174nv50_display_create(struct drm_device *dev) 2174nv50_display_create(struct drm_device *dev)
2175{ 2175{
2176 static const u16 oclass[] = { 2176 static const u16 oclass[] = {
2177 NVF0_DISP_CLASS,
2177 NVE0_DISP_CLASS, 2178 NVE0_DISP_CLASS,
2178 NVD0_DISP_CLASS, 2179 NVD0_DISP_CLASS,
2179 NVA3_DISP_CLASS, 2180 NVA3_DISP_CLASS,
diff --git a/drivers/gpu/drm/nouveau/nv50_pm.c b/drivers/gpu/drm/nouveau/nv50_pm.c
index 8bd5d2781baf..69620e39c90c 100644
--- a/drivers/gpu/drm/nouveau/nv50_pm.c
+++ b/drivers/gpu/drm/nouveau/nv50_pm.c
@@ -25,7 +25,7 @@
25#include <drm/drmP.h> 25#include <drm/drmP.h>
26#include "nouveau_drm.h" 26#include "nouveau_drm.h"
27#include "nouveau_bios.h" 27#include "nouveau_bios.h"
28#include "nouveau_hw.h" 28#include "dispnv04/hw.h"
29#include "nouveau_pm.h" 29#include "nouveau_pm.h"
30#include "nouveau_hwsq.h" 30#include "nouveau_hwsq.h"
31 31
diff --git a/drivers/gpu/drm/omapdrm/omap_connector.c b/drivers/gpu/drm/omapdrm/omap_connector.c
index c451c41a7a7d..912759daf562 100644
--- a/drivers/gpu/drm/omapdrm/omap_connector.c
+++ b/drivers/gpu/drm/omapdrm/omap_connector.c
@@ -110,6 +110,11 @@ static enum drm_connector_status omap_connector_detect(
110 ret = connector_status_connected; 110 ret = connector_status_connected;
111 else 111 else
112 ret = connector_status_disconnected; 112 ret = connector_status_disconnected;
113 } else if (dssdev->type == OMAP_DISPLAY_TYPE_DPI ||
114 dssdev->type == OMAP_DISPLAY_TYPE_DBI ||
115 dssdev->type == OMAP_DISPLAY_TYPE_SDI ||
116 dssdev->type == OMAP_DISPLAY_TYPE_DSI) {
117 ret = connector_status_connected;
113 } else { 118 } else {
114 ret = connector_status_unknown; 119 ret = connector_status_unknown;
115 } 120 }
@@ -189,12 +194,30 @@ static int omap_connector_mode_valid(struct drm_connector *connector,
189 struct omap_video_timings timings = {0}; 194 struct omap_video_timings timings = {0};
190 struct drm_device *dev = connector->dev; 195 struct drm_device *dev = connector->dev;
191 struct drm_display_mode *new_mode; 196 struct drm_display_mode *new_mode;
192 int ret = MODE_BAD; 197 int r, ret = MODE_BAD;
193 198
194 copy_timings_drm_to_omap(&timings, mode); 199 copy_timings_drm_to_omap(&timings, mode);
195 mode->vrefresh = drm_mode_vrefresh(mode); 200 mode->vrefresh = drm_mode_vrefresh(mode);
196 201
197 if (!dssdrv->check_timings(dssdev, &timings)) { 202 /*
203 * if the panel driver doesn't have a check_timings, it's most likely
204 * a fixed resolution panel, check if the timings match with the
205 * panel's timings
206 */
207 if (dssdrv->check_timings) {
208 r = dssdrv->check_timings(dssdev, &timings);
209 } else {
210 struct omap_video_timings t = {0};
211
212 dssdrv->get_timings(dssdev, &t);
213
214 if (memcmp(&timings, &t, sizeof(struct omap_video_timings)))
215 r = -EINVAL;
216 else
217 r = 0;
218 }
219
220 if (!r) {
198 /* check if vrefresh is still valid */ 221 /* check if vrefresh is still valid */
199 new_mode = drm_mode_duplicate(dev, mode); 222 new_mode = drm_mode_duplicate(dev, mode);
200 new_mode->clock = timings.pixel_clock; 223 new_mode->clock = timings.pixel_clock;
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c
index bec66a490b8f..79b200aee18a 100644
--- a/drivers/gpu/drm/omapdrm/omap_crtc.c
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.c
@@ -74,6 +74,13 @@ struct omap_crtc {
74 struct work_struct page_flip_work; 74 struct work_struct page_flip_work;
75}; 75};
76 76
77uint32_t pipe2vbl(struct drm_crtc *crtc)
78{
79 struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
80
81 return dispc_mgr_get_vsync_irq(omap_crtc->channel);
82}
83
77/* 84/*
78 * Manager-ops, callbacks from output when they need to configure 85 * Manager-ops, callbacks from output when they need to configure
79 * the upstream part of the video pipe. 86 * the upstream part of the video pipe.
@@ -613,7 +620,13 @@ struct drm_crtc *omap_crtc_init(struct drm_device *dev,
613 omap_crtc->apply.pre_apply = omap_crtc_pre_apply; 620 omap_crtc->apply.pre_apply = omap_crtc_pre_apply;
614 omap_crtc->apply.post_apply = omap_crtc_post_apply; 621 omap_crtc->apply.post_apply = omap_crtc_post_apply;
615 622
616 omap_crtc->apply_irq.irqmask = pipe2vbl(id); 623 omap_crtc->channel = channel;
624 omap_crtc->plane = plane;
625 omap_crtc->plane->crtc = crtc;
626 omap_crtc->name = channel_names[channel];
627 omap_crtc->pipe = id;
628
629 omap_crtc->apply_irq.irqmask = pipe2vbl(crtc);
617 omap_crtc->apply_irq.irq = omap_crtc_apply_irq; 630 omap_crtc->apply_irq.irq = omap_crtc_apply_irq;
618 631
619 omap_crtc->error_irq.irqmask = 632 omap_crtc->error_irq.irqmask =
@@ -621,12 +634,6 @@ struct drm_crtc *omap_crtc_init(struct drm_device *dev,
621 omap_crtc->error_irq.irq = omap_crtc_error_irq; 634 omap_crtc->error_irq.irq = omap_crtc_error_irq;
622 omap_irq_register(dev, &omap_crtc->error_irq); 635 omap_irq_register(dev, &omap_crtc->error_irq);
623 636
624 omap_crtc->channel = channel;
625 omap_crtc->plane = plane;
626 omap_crtc->plane->crtc = crtc;
627 omap_crtc->name = channel_names[channel];
628 omap_crtc->pipe = id;
629
630 /* temporary: */ 637 /* temporary: */
631 omap_crtc->mgr.id = channel; 638 omap_crtc->mgr.id = channel;
632 639
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index 079c54c6f94c..9c53c25e5201 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -74,54 +74,53 @@ static int get_connector_type(struct omap_dss_device *dssdev)
74 } 74 }
75} 75}
76 76
77static bool channel_used(struct drm_device *dev, enum omap_channel channel)
78{
79 struct omap_drm_private *priv = dev->dev_private;
80 int i;
81
82 for (i = 0; i < priv->num_crtcs; i++) {
83 struct drm_crtc *crtc = priv->crtcs[i];
84
85 if (omap_crtc_channel(crtc) == channel)
86 return true;
87 }
88
89 return false;
90}
91
77static int omap_modeset_init(struct drm_device *dev) 92static int omap_modeset_init(struct drm_device *dev)
78{ 93{
79 struct omap_drm_private *priv = dev->dev_private; 94 struct omap_drm_private *priv = dev->dev_private;
80 struct omap_dss_device *dssdev = NULL; 95 struct omap_dss_device *dssdev = NULL;
81 int num_ovls = dss_feat_get_num_ovls(); 96 int num_ovls = dss_feat_get_num_ovls();
82 int id; 97 int num_mgrs = dss_feat_get_num_mgrs();
98 int num_crtcs;
99 int i, id = 0;
83 100
84 drm_mode_config_init(dev); 101 drm_mode_config_init(dev);
85 102
86 omap_drm_irq_install(dev); 103 omap_drm_irq_install(dev);
87 104
88 /* 105 /*
89 * Create private planes and CRTCs for the last NUM_CRTCs overlay 106 * We usually don't want to create a CRTC for each manager, at least
90 * plus manager: 107 * not until we have a way to expose private planes to userspace.
108 * Otherwise there would not be enough video pipes left for drm planes.
109 * We use the num_crtc argument to limit the number of crtcs we create.
91 */ 110 */
92 for (id = 0; id < min(num_crtc, num_ovls); id++) { 111 num_crtcs = min3(num_crtc, num_mgrs, num_ovls);
93 struct drm_plane *plane;
94 struct drm_crtc *crtc;
95
96 plane = omap_plane_init(dev, id, true);
97 crtc = omap_crtc_init(dev, plane, pipe2chan(id), id);
98 112
99 BUG_ON(priv->num_crtcs >= ARRAY_SIZE(priv->crtcs)); 113 dssdev = NULL;
100 priv->crtcs[id] = crtc;
101 priv->num_crtcs++;
102
103 priv->planes[id] = plane;
104 priv->num_planes++;
105 }
106
107 /*
108 * Create normal planes for the remaining overlays:
109 */
110 for (; id < num_ovls; id++) {
111 struct drm_plane *plane = omap_plane_init(dev, id, false);
112
113 BUG_ON(priv->num_planes >= ARRAY_SIZE(priv->planes));
114 priv->planes[priv->num_planes++] = plane;
115 }
116 114
117 for_each_dss_dev(dssdev) { 115 for_each_dss_dev(dssdev) {
118 struct drm_connector *connector; 116 struct drm_connector *connector;
119 struct drm_encoder *encoder; 117 struct drm_encoder *encoder;
118 enum omap_channel channel;
120 119
121 if (!dssdev->driver) { 120 if (!dssdev->driver) {
122 dev_warn(dev->dev, "%s has no driver.. skipping it\n", 121 dev_warn(dev->dev, "%s has no driver.. skipping it\n",
123 dssdev->name); 122 dssdev->name);
124 return 0; 123 continue;
125 } 124 }
126 125
127 if (!(dssdev->driver->get_timings || 126 if (!(dssdev->driver->get_timings ||
@@ -129,7 +128,7 @@ static int omap_modeset_init(struct drm_device *dev)
129 dev_warn(dev->dev, "%s driver does not support " 128 dev_warn(dev->dev, "%s driver does not support "
130 "get_timings or read_edid.. skipping it!\n", 129 "get_timings or read_edid.. skipping it!\n",
131 dssdev->name); 130 dssdev->name);
132 return 0; 131 continue;
133 } 132 }
134 133
135 encoder = omap_encoder_init(dev, dssdev); 134 encoder = omap_encoder_init(dev, dssdev);
@@ -157,16 +156,118 @@ static int omap_modeset_init(struct drm_device *dev)
157 156
158 drm_mode_connector_attach_encoder(connector, encoder); 157 drm_mode_connector_attach_encoder(connector, encoder);
159 158
159 /*
160 * if we have reached the limit of the crtcs we are allowed to
161 * create, let's not try to look for a crtc for this
162 * panel/encoder and onwards, we will, of course, populate the
163 * the possible_crtcs field for all the encoders with the final
164 * set of crtcs we create
165 */
166 if (id == num_crtcs)
167 continue;
168
169 /*
170 * get the recommended DISPC channel for this encoder. For now,
171 * we only try to get create a crtc out of the recommended, the
172 * other possible channels to which the encoder can connect are
173 * not considered.
174 */
175 channel = dssdev->output->dispc_channel;
176
177 /*
178 * if this channel hasn't already been taken by a previously
179 * allocated crtc, we create a new crtc for it
180 */
181 if (!channel_used(dev, channel)) {
182 struct drm_plane *plane;
183 struct drm_crtc *crtc;
184
185 plane = omap_plane_init(dev, id, true);
186 crtc = omap_crtc_init(dev, plane, channel, id);
187
188 BUG_ON(priv->num_crtcs >= ARRAY_SIZE(priv->crtcs));
189 priv->crtcs[id] = crtc;
190 priv->num_crtcs++;
191
192 priv->planes[id] = plane;
193 priv->num_planes++;
194
195 id++;
196 }
197 }
198
199 /*
200 * we have allocated crtcs according to the need of the panels/encoders,
201 * adding more crtcs here if needed
202 */
203 for (; id < num_crtcs; id++) {
204
205 /* find a free manager for this crtc */
206 for (i = 0; i < num_mgrs; i++) {
207 if (!channel_used(dev, i)) {
208 struct drm_plane *plane;
209 struct drm_crtc *crtc;
210
211 plane = omap_plane_init(dev, id, true);
212 crtc = omap_crtc_init(dev, plane, i, id);
213
214 BUG_ON(priv->num_crtcs >=
215 ARRAY_SIZE(priv->crtcs));
216
217 priv->crtcs[id] = crtc;
218 priv->num_crtcs++;
219
220 priv->planes[id] = plane;
221 priv->num_planes++;
222
223 break;
224 } else {
225 continue;
226 }
227 }
228
229 if (i == num_mgrs) {
230 /* this shouldn't really happen */
231 dev_err(dev->dev, "no managers left for crtc\n");
232 return -ENOMEM;
233 }
234 }
235
236 /*
237 * Create normal planes for the remaining overlays:
238 */
239 for (; id < num_ovls; id++) {
240 struct drm_plane *plane = omap_plane_init(dev, id, false);
241
242 BUG_ON(priv->num_planes >= ARRAY_SIZE(priv->planes));
243 priv->planes[priv->num_planes++] = plane;
244 }
245
246 for (i = 0; i < priv->num_encoders; i++) {
247 struct drm_encoder *encoder = priv->encoders[i];
248 struct omap_dss_device *dssdev =
249 omap_encoder_get_dssdev(encoder);
250
160 /* figure out which crtc's we can connect the encoder to: */ 251 /* figure out which crtc's we can connect the encoder to: */
161 encoder->possible_crtcs = 0; 252 encoder->possible_crtcs = 0;
162 for (id = 0; id < priv->num_crtcs; id++) { 253 for (id = 0; id < priv->num_crtcs; id++) {
163 enum omap_dss_output_id supported_outputs = 254 struct drm_crtc *crtc = priv->crtcs[id];
164 dss_feat_get_supported_outputs(pipe2chan(id)); 255 enum omap_channel crtc_channel;
256 enum omap_dss_output_id supported_outputs;
257
258 crtc_channel = omap_crtc_channel(crtc);
259 supported_outputs =
260 dss_feat_get_supported_outputs(crtc_channel);
261
165 if (supported_outputs & dssdev->output->id) 262 if (supported_outputs & dssdev->output->id)
166 encoder->possible_crtcs |= (1 << id); 263 encoder->possible_crtcs |= (1 << id);
167 } 264 }
168 } 265 }
169 266
267 DBG("registered %d planes, %d crtcs, %d encoders and %d connectors\n",
268 priv->num_planes, priv->num_crtcs, priv->num_encoders,
269 priv->num_connectors);
270
170 dev->mode_config.min_width = 32; 271 dev->mode_config.min_width = 32;
171 dev->mode_config.min_height = 32; 272 dev->mode_config.min_height = 32;
172 273
@@ -303,7 +404,7 @@ static int ioctl_gem_info(struct drm_device *dev, void *data,
303 return ret; 404 return ret;
304} 405}
305 406
306struct drm_ioctl_desc ioctls[DRM_COMMAND_END - DRM_COMMAND_BASE] = { 407static struct drm_ioctl_desc ioctls[DRM_COMMAND_END - DRM_COMMAND_BASE] = {
307 DRM_IOCTL_DEF_DRV(OMAP_GET_PARAM, ioctl_get_param, DRM_UNLOCKED|DRM_AUTH), 408 DRM_IOCTL_DEF_DRV(OMAP_GET_PARAM, ioctl_get_param, DRM_UNLOCKED|DRM_AUTH),
308 DRM_IOCTL_DEF_DRV(OMAP_SET_PARAM, ioctl_set_param, DRM_UNLOCKED|DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 409 DRM_IOCTL_DEF_DRV(OMAP_SET_PARAM, ioctl_set_param, DRM_UNLOCKED|DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
309 DRM_IOCTL_DEF_DRV(OMAP_GEM_NEW, ioctl_gem_new, DRM_UNLOCKED|DRM_AUTH), 410 DRM_IOCTL_DEF_DRV(OMAP_GEM_NEW, ioctl_gem_new, DRM_UNLOCKED|DRM_AUTH),
@@ -567,7 +668,7 @@ static const struct dev_pm_ops omapdrm_pm_ops = {
567}; 668};
568#endif 669#endif
569 670
570struct platform_driver pdev = { 671static struct platform_driver pdev = {
571 .driver = { 672 .driver = {
572 .name = DRIVER_NAME, 673 .name = DRIVER_NAME,
573 .owner = THIS_MODULE, 674 .owner = THIS_MODULE,
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h
index d4f997bb4ac0..215a20dd340c 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.h
+++ b/drivers/gpu/drm/omapdrm/omap_drv.h
@@ -139,8 +139,8 @@ void omap_gem_describe_objects(struct list_head *list, struct seq_file *m);
139int omap_gem_resume(struct device *dev); 139int omap_gem_resume(struct device *dev);
140#endif 140#endif
141 141
142int omap_irq_enable_vblank(struct drm_device *dev, int crtc); 142int omap_irq_enable_vblank(struct drm_device *dev, int crtc_id);
143void omap_irq_disable_vblank(struct drm_device *dev, int crtc); 143void omap_irq_disable_vblank(struct drm_device *dev, int crtc_id);
144irqreturn_t omap_irq_handler(DRM_IRQ_ARGS); 144irqreturn_t omap_irq_handler(DRM_IRQ_ARGS);
145void omap_irq_preinstall(struct drm_device *dev); 145void omap_irq_preinstall(struct drm_device *dev);
146int omap_irq_postinstall(struct drm_device *dev); 146int omap_irq_postinstall(struct drm_device *dev);
@@ -271,39 +271,9 @@ static inline int align_pitch(int pitch, int width, int bpp)
271 return ALIGN(pitch, 8 * bytespp); 271 return ALIGN(pitch, 8 * bytespp);
272} 272}
273 273
274static inline enum omap_channel pipe2chan(int pipe)
275{
276 int num_mgrs = dss_feat_get_num_mgrs();
277
278 /*
279 * We usually don't want to create a CRTC for each manager,
280 * at least not until we have a way to expose private planes
281 * to userspace. Otherwise there would not be enough video
282 * pipes left for drm planes. The higher #'d managers tend
283 * to have more features so start in reverse order.
284 */
285 return num_mgrs - pipe - 1;
286}
287
288/* map crtc to vblank mask */ 274/* map crtc to vblank mask */
289static inline uint32_t pipe2vbl(int crtc) 275uint32_t pipe2vbl(struct drm_crtc *crtc);
290{ 276struct omap_dss_device *omap_encoder_get_dssdev(struct drm_encoder *encoder);
291 enum omap_channel channel = pipe2chan(crtc);
292 return dispc_mgr_get_vsync_irq(channel);
293}
294
295static inline int crtc2pipe(struct drm_device *dev, struct drm_crtc *crtc)
296{
297 struct omap_drm_private *priv = dev->dev_private;
298 int i;
299
300 for (i = 0; i < ARRAY_SIZE(priv->crtcs); i++)
301 if (priv->crtcs[i] == crtc)
302 return i;
303
304 BUG(); /* bogus CRTC ptr */
305 return -1;
306}
307 277
308/* should these be made into common util helpers? 278/* should these be made into common util helpers?
309 */ 279 */
diff --git a/drivers/gpu/drm/omapdrm/omap_encoder.c b/drivers/gpu/drm/omapdrm/omap_encoder.c
index 21d126d0317e..c29451ba65da 100644
--- a/drivers/gpu/drm/omapdrm/omap_encoder.c
+++ b/drivers/gpu/drm/omapdrm/omap_encoder.c
@@ -41,6 +41,13 @@ struct omap_encoder {
41 struct omap_dss_device *dssdev; 41 struct omap_dss_device *dssdev;
42}; 42};
43 43
44struct omap_dss_device *omap_encoder_get_dssdev(struct drm_encoder *encoder)
45{
46 struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
47
48 return omap_encoder->dssdev;
49}
50
44static void omap_encoder_destroy(struct drm_encoder *encoder) 51static void omap_encoder_destroy(struct drm_encoder *encoder)
45{ 52{
46 struct omap_encoder *omap_encoder = to_omap_encoder(encoder); 53 struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
@@ -128,13 +135,26 @@ int omap_encoder_update(struct drm_encoder *encoder,
128 135
129 dssdev->output->manager = mgr; 136 dssdev->output->manager = mgr;
130 137
131 ret = dssdrv->check_timings(dssdev, timings); 138 if (dssdrv->check_timings) {
139 ret = dssdrv->check_timings(dssdev, timings);
140 } else {
141 struct omap_video_timings t = {0};
142
143 dssdrv->get_timings(dssdev, &t);
144
145 if (memcmp(timings, &t, sizeof(struct omap_video_timings)))
146 ret = -EINVAL;
147 else
148 ret = 0;
149 }
150
132 if (ret) { 151 if (ret) {
133 dev_err(dev->dev, "could not set timings: %d\n", ret); 152 dev_err(dev->dev, "could not set timings: %d\n", ret);
134 return ret; 153 return ret;
135 } 154 }
136 155
137 dssdrv->set_timings(dssdev, timings); 156 if (dssdrv->set_timings)
157 dssdrv->set_timings(dssdev, timings);
138 158
139 return 0; 159 return 0;
140} 160}
diff --git a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
index ac74d1bc67bf..be7cd97a0db0 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
@@ -178,7 +178,7 @@ out_unlock:
178 return omap_gem_mmap_obj(obj, vma); 178 return omap_gem_mmap_obj(obj, vma);
179} 179}
180 180
181struct dma_buf_ops omap_dmabuf_ops = { 181static struct dma_buf_ops omap_dmabuf_ops = {
182 .map_dma_buf = omap_gem_map_dma_buf, 182 .map_dma_buf = omap_gem_map_dma_buf,
183 .unmap_dma_buf = omap_gem_unmap_dma_buf, 183 .unmap_dma_buf = omap_gem_unmap_dma_buf,
184 .release = omap_gem_dmabuf_release, 184 .release = omap_gem_dmabuf_release,
@@ -212,7 +212,6 @@ struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev,
212 * refcount on gem itself instead of f_count of dmabuf. 212 * refcount on gem itself instead of f_count of dmabuf.
213 */ 213 */
214 drm_gem_object_reference(obj); 214 drm_gem_object_reference(obj);
215 dma_buf_put(buffer);
216 return obj; 215 return obj;
217 } 216 }
218 } 217 }
diff --git a/drivers/gpu/drm/omapdrm/omap_irq.c b/drivers/gpu/drm/omapdrm/omap_irq.c
index e01303ee00c3..9263db117ff8 100644
--- a/drivers/gpu/drm/omapdrm/omap_irq.c
+++ b/drivers/gpu/drm/omapdrm/omap_irq.c
@@ -130,12 +130,13 @@ int omap_irq_wait(struct drm_device *dev, struct omap_irq_wait *wait,
130 * Zero on success, appropriate errno if the given @crtc's vblank 130 * Zero on success, appropriate errno if the given @crtc's vblank
131 * interrupt cannot be enabled. 131 * interrupt cannot be enabled.
132 */ 132 */
133int omap_irq_enable_vblank(struct drm_device *dev, int crtc) 133int omap_irq_enable_vblank(struct drm_device *dev, int crtc_id)
134{ 134{
135 struct omap_drm_private *priv = dev->dev_private; 135 struct omap_drm_private *priv = dev->dev_private;
136 struct drm_crtc *crtc = priv->crtcs[crtc_id];
136 unsigned long flags; 137 unsigned long flags;
137 138
138 DBG("dev=%p, crtc=%d", dev, crtc); 139 DBG("dev=%p, crtc=%d", dev, crtc_id);
139 140
140 dispc_runtime_get(); 141 dispc_runtime_get();
141 spin_lock_irqsave(&list_lock, flags); 142 spin_lock_irqsave(&list_lock, flags);
@@ -156,12 +157,13 @@ int omap_irq_enable_vblank(struct drm_device *dev, int crtc)
156 * a hardware vblank counter, this routine should be a no-op, since 157 * a hardware vblank counter, this routine should be a no-op, since
157 * interrupts will have to stay on to keep the count accurate. 158 * interrupts will have to stay on to keep the count accurate.
158 */ 159 */
159void omap_irq_disable_vblank(struct drm_device *dev, int crtc) 160void omap_irq_disable_vblank(struct drm_device *dev, int crtc_id)
160{ 161{
161 struct omap_drm_private *priv = dev->dev_private; 162 struct omap_drm_private *priv = dev->dev_private;
163 struct drm_crtc *crtc = priv->crtcs[crtc_id];
162 unsigned long flags; 164 unsigned long flags;
163 165
164 DBG("dev=%p, crtc=%d", dev, crtc); 166 DBG("dev=%p, crtc=%d", dev, crtc_id);
165 167
166 dispc_runtime_get(); 168 dispc_runtime_get();
167 spin_lock_irqsave(&list_lock, flags); 169 spin_lock_irqsave(&list_lock, flags);
@@ -186,9 +188,12 @@ irqreturn_t omap_irq_handler(DRM_IRQ_ARGS)
186 188
187 VERB("irqs: %08x", irqstatus); 189 VERB("irqs: %08x", irqstatus);
188 190
189 for (id = 0; id < priv->num_crtcs; id++) 191 for (id = 0; id < priv->num_crtcs; id++) {
190 if (irqstatus & pipe2vbl(id)) 192 struct drm_crtc *crtc = priv->crtcs[id];
193
194 if (irqstatus & pipe2vbl(crtc))
191 drm_handle_vblank(dev, id); 195 drm_handle_vblank(dev, id);
196 }
192 197
193 spin_lock_irqsave(&list_lock, flags); 198 spin_lock_irqsave(&list_lock, flags);
194 list_for_each_entry_safe(handler, n, &priv->irq_list, node) { 199 list_for_each_entry_safe(handler, n, &priv->irq_list, node) {
diff --git a/drivers/gpu/drm/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c
index 2882cda6ea19..8d225d7ff4e3 100644
--- a/drivers/gpu/drm/omapdrm/omap_plane.c
+++ b/drivers/gpu/drm/omapdrm/omap_plane.c
@@ -247,6 +247,12 @@ static int omap_plane_update(struct drm_plane *plane,
247{ 247{
248 struct omap_plane *omap_plane = to_omap_plane(plane); 248 struct omap_plane *omap_plane = to_omap_plane(plane);
249 omap_plane->enabled = true; 249 omap_plane->enabled = true;
250
251 if (plane->fb)
252 drm_framebuffer_unreference(plane->fb);
253
254 drm_framebuffer_reference(fb);
255
250 return omap_plane_mode_set(plane, crtc, fb, 256 return omap_plane_mode_set(plane, crtc, fb,
251 crtc_x, crtc_y, crtc_w, crtc_h, 257 crtc_x, crtc_y, crtc_w, crtc_h,
252 src_x, src_y, src_w, src_h, 258 src_x, src_y, src_w, src_h,
diff --git a/drivers/gpu/drm/qxl/Kconfig b/drivers/gpu/drm/qxl/Kconfig
new file mode 100644
index 000000000000..2f1a57e11140
--- /dev/null
+++ b/drivers/gpu/drm/qxl/Kconfig
@@ -0,0 +1,10 @@
1config DRM_QXL
2 tristate "QXL virtual GPU"
3 depends on DRM && PCI
4 select FB_SYS_FILLRECT
5 select FB_SYS_COPYAREA
6 select FB_SYS_IMAGEBLIT
7 select DRM_KMS_HELPER
8 select DRM_TTM
9 help
10 QXL virtual GPU for Spice virtualization desktop integration. Do not enable this driver unless your distro ships a corresponding X.org QXL driver that can handle kernel modesetting.
diff --git a/drivers/gpu/drm/qxl/Makefile b/drivers/gpu/drm/qxl/Makefile
new file mode 100644
index 000000000000..ea046ba691d2
--- /dev/null
+++ b/drivers/gpu/drm/qxl/Makefile
@@ -0,0 +1,9 @@
1#
2# Makefile for the drm device driver. This driver provides support for the
3# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
4
5ccflags-y := -Iinclude/drm
6
7qxl-y := qxl_drv.o qxl_kms.o qxl_display.o qxl_ttm.o qxl_fb.o qxl_object.o qxl_gem.o qxl_cmd.o qxl_image.o qxl_draw.o qxl_debugfs.o qxl_irq.o qxl_dumb.o qxl_ioctl.o qxl_fence.o qxl_release.o
8
9obj-$(CONFIG_DRM_QXL)+= qxl.o
diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
new file mode 100644
index 000000000000..f86771481317
--- /dev/null
+++ b/drivers/gpu/drm/qxl/qxl_cmd.c
@@ -0,0 +1,694 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Dave Airlie
23 * Alon Levy
24 */
25
26/* QXL cmd/ring handling */
27
28#include "qxl_drv.h"
29#include "qxl_object.h"
30
31static int qxl_reap_surface_id(struct qxl_device *qdev, int max_to_reap);
32
33struct ring {
34 struct qxl_ring_header header;
35 uint8_t elements[0];
36};
37
38struct qxl_ring {
39 struct ring *ring;
40 int element_size;
41 int n_elements;
42 int prod_notify;
43 wait_queue_head_t *push_event;
44 spinlock_t lock;
45};
46
47void qxl_ring_free(struct qxl_ring *ring)
48{
49 kfree(ring);
50}
51
52struct qxl_ring *
53qxl_ring_create(struct qxl_ring_header *header,
54 int element_size,
55 int n_elements,
56 int prod_notify,
57 bool set_prod_notify,
58 wait_queue_head_t *push_event)
59{
60 struct qxl_ring *ring;
61
62 ring = kmalloc(sizeof(*ring), GFP_KERNEL);
63 if (!ring)
64 return NULL;
65
66 ring->ring = (struct ring *)header;
67 ring->element_size = element_size;
68 ring->n_elements = n_elements;
69 ring->prod_notify = prod_notify;
70 ring->push_event = push_event;
71 if (set_prod_notify)
72 header->notify_on_prod = ring->n_elements;
73 spin_lock_init(&ring->lock);
74 return ring;
75}
76
77static int qxl_check_header(struct qxl_ring *ring)
78{
79 int ret;
80 struct qxl_ring_header *header = &(ring->ring->header);
81 unsigned long flags;
82 spin_lock_irqsave(&ring->lock, flags);
83 ret = header->prod - header->cons < header->num_items;
84 if (ret == 0)
85 header->notify_on_cons = header->cons + 1;
86 spin_unlock_irqrestore(&ring->lock, flags);
87 return ret;
88}
89
90static int qxl_check_idle(struct qxl_ring *ring)
91{
92 int ret;
93 struct qxl_ring_header *header = &(ring->ring->header);
94 unsigned long flags;
95 spin_lock_irqsave(&ring->lock, flags);
96 ret = header->prod == header->cons;
97 spin_unlock_irqrestore(&ring->lock, flags);
98 return ret;
99}
100
101int qxl_ring_push(struct qxl_ring *ring,
102 const void *new_elt, bool interruptible)
103{
104 struct qxl_ring_header *header = &(ring->ring->header);
105 uint8_t *elt;
106 int idx, ret;
107 unsigned long flags;
108 spin_lock_irqsave(&ring->lock, flags);
109 if (header->prod - header->cons == header->num_items) {
110 header->notify_on_cons = header->cons + 1;
111 mb();
112 spin_unlock_irqrestore(&ring->lock, flags);
113 if (!drm_can_sleep()) {
114 while (!qxl_check_header(ring))
115 udelay(1);
116 } else {
117 if (interruptible) {
118 ret = wait_event_interruptible(*ring->push_event,
119 qxl_check_header(ring));
120 if (ret)
121 return ret;
122 } else {
123 wait_event(*ring->push_event,
124 qxl_check_header(ring));
125 }
126
127 }
128 spin_lock_irqsave(&ring->lock, flags);
129 }
130
131 idx = header->prod & (ring->n_elements - 1);
132 elt = ring->ring->elements + idx * ring->element_size;
133
134 memcpy((void *)elt, new_elt, ring->element_size);
135
136 header->prod++;
137
138 mb();
139
140 if (header->prod == header->notify_on_prod)
141 outb(0, ring->prod_notify);
142
143 spin_unlock_irqrestore(&ring->lock, flags);
144 return 0;
145}
146
147static bool qxl_ring_pop(struct qxl_ring *ring,
148 void *element)
149{
150 volatile struct qxl_ring_header *header = &(ring->ring->header);
151 volatile uint8_t *ring_elt;
152 int idx;
153 unsigned long flags;
154 spin_lock_irqsave(&ring->lock, flags);
155 if (header->cons == header->prod) {
156 header->notify_on_prod = header->cons + 1;
157 spin_unlock_irqrestore(&ring->lock, flags);
158 return false;
159 }
160
161 idx = header->cons & (ring->n_elements - 1);
162 ring_elt = ring->ring->elements + idx * ring->element_size;
163
164 memcpy(element, (void *)ring_elt, ring->element_size);
165
166 header->cons++;
167
168 spin_unlock_irqrestore(&ring->lock, flags);
169 return true;
170}
171
172int
173qxl_push_command_ring_release(struct qxl_device *qdev, struct qxl_release *release,
174 uint32_t type, bool interruptible)
175{
176 struct qxl_command cmd;
177
178 cmd.type = type;
179 cmd.data = qxl_bo_physical_address(qdev, release->bos[0], release->release_offset);
180
181 return qxl_ring_push(qdev->command_ring, &cmd, interruptible);
182}
183
184int
185qxl_push_cursor_ring_release(struct qxl_device *qdev, struct qxl_release *release,
186 uint32_t type, bool interruptible)
187{
188 struct qxl_command cmd;
189
190 cmd.type = type;
191 cmd.data = qxl_bo_physical_address(qdev, release->bos[0], release->release_offset);
192
193 return qxl_ring_push(qdev->cursor_ring, &cmd, interruptible);
194}
195
196bool qxl_queue_garbage_collect(struct qxl_device *qdev, bool flush)
197{
198 if (!qxl_check_idle(qdev->release_ring)) {
199 queue_work(qdev->gc_queue, &qdev->gc_work);
200 if (flush)
201 flush_work(&qdev->gc_work);
202 return true;
203 }
204 return false;
205}
206
207int qxl_garbage_collect(struct qxl_device *qdev)
208{
209 struct qxl_release *release;
210 uint64_t id, next_id;
211 int i = 0;
212 int ret;
213 union qxl_release_info *info;
214
215 while (qxl_ring_pop(qdev->release_ring, &id)) {
216 QXL_INFO(qdev, "popped %lld\n", id);
217 while (id) {
218 release = qxl_release_from_id_locked(qdev, id);
219 if (release == NULL)
220 break;
221
222 ret = qxl_release_reserve(qdev, release, false);
223 if (ret) {
224 qxl_io_log(qdev, "failed to reserve release on garbage collect %lld\n", id);
225 DRM_ERROR("failed to reserve release %lld\n", id);
226 }
227
228 info = qxl_release_map(qdev, release);
229 next_id = info->next;
230 qxl_release_unmap(qdev, release, info);
231
232 qxl_release_unreserve(qdev, release);
233 QXL_INFO(qdev, "popped %lld, next %lld\n", id,
234 next_id);
235
236 switch (release->type) {
237 case QXL_RELEASE_DRAWABLE:
238 case QXL_RELEASE_SURFACE_CMD:
239 case QXL_RELEASE_CURSOR_CMD:
240 break;
241 default:
242 DRM_ERROR("unexpected release type\n");
243 break;
244 }
245 id = next_id;
246
247 qxl_release_free(qdev, release);
248 ++i;
249 }
250 }
251
252 QXL_INFO(qdev, "%s: %lld\n", __func__, i);
253
254 return i;
255}
256
257int qxl_alloc_bo_reserved(struct qxl_device *qdev, unsigned long size,
258 struct qxl_bo **_bo)
259{
260 struct qxl_bo *bo;
261 int ret;
262
263 ret = qxl_bo_create(qdev, size, false /* not kernel - device */,
264 QXL_GEM_DOMAIN_VRAM, NULL, &bo);
265 if (ret) {
266 DRM_ERROR("failed to allocate VRAM BO\n");
267 return ret;
268 }
269 ret = qxl_bo_reserve(bo, false);
270 if (unlikely(ret != 0))
271 goto out_unref;
272
273 *_bo = bo;
274 return 0;
275out_unref:
276 qxl_bo_unref(&bo);
277 return 0;
278}
279
280static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port, bool intr)
281{
282 int irq_num;
283 long addr = qdev->io_base + port;
284 int ret;
285
286 mutex_lock(&qdev->async_io_mutex);
287 irq_num = atomic_read(&qdev->irq_received_io_cmd);
288 if (qdev->last_sent_io_cmd > irq_num) {
289 if (intr)
290 ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
291 atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
292 else
293 ret = wait_event_timeout(qdev->io_cmd_event,
294 atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
295 /* 0 is timeout, just bail the "hw" has gone away */
296 if (ret <= 0)
297 goto out;
298 irq_num = atomic_read(&qdev->irq_received_io_cmd);
299 }
300 outb(val, addr);
301 qdev->last_sent_io_cmd = irq_num + 1;
302 if (intr)
303 ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
304 atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
305 else
306 ret = wait_event_timeout(qdev->io_cmd_event,
307 atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
308out:
309 if (ret > 0)
310 ret = 0;
311 mutex_unlock(&qdev->async_io_mutex);
312 return ret;
313}
314
315static void wait_for_io_cmd(struct qxl_device *qdev, uint8_t val, long port)
316{
317 int ret;
318
319restart:
320 ret = wait_for_io_cmd_user(qdev, val, port, false);
321 if (ret == -ERESTARTSYS)
322 goto restart;
323}
324
325int qxl_io_update_area(struct qxl_device *qdev, struct qxl_bo *surf,
326 const struct qxl_rect *area)
327{
328 int surface_id;
329 uint32_t surface_width, surface_height;
330 int ret;
331
332 if (!surf->hw_surf_alloc)
333 DRM_ERROR("got io update area with no hw surface\n");
334
335 if (surf->is_primary)
336 surface_id = 0;
337 else
338 surface_id = surf->surface_id;
339 surface_width = surf->surf.width;
340 surface_height = surf->surf.height;
341
342 if (area->left < 0 || area->top < 0 ||
343 area->right > surface_width || area->bottom > surface_height) {
344 qxl_io_log(qdev, "%s: not doing area update for "
345 "%d, (%d,%d,%d,%d) (%d,%d)\n", __func__, surface_id, area->left,
346 area->top, area->right, area->bottom, surface_width, surface_height);
347 return -EINVAL;
348 }
349 mutex_lock(&qdev->update_area_mutex);
350 qdev->ram_header->update_area = *area;
351 qdev->ram_header->update_surface = surface_id;
352 ret = wait_for_io_cmd_user(qdev, 0, QXL_IO_UPDATE_AREA_ASYNC, true);
353 mutex_unlock(&qdev->update_area_mutex);
354 return ret;
355}
356
357void qxl_io_notify_oom(struct qxl_device *qdev)
358{
359 outb(0, qdev->io_base + QXL_IO_NOTIFY_OOM);
360}
361
362void qxl_io_flush_release(struct qxl_device *qdev)
363{
364 outb(0, qdev->io_base + QXL_IO_FLUSH_RELEASE);
365}
366
367void qxl_io_flush_surfaces(struct qxl_device *qdev)
368{
369 wait_for_io_cmd(qdev, 0, QXL_IO_FLUSH_SURFACES_ASYNC);
370}
371
372
373void qxl_io_destroy_primary(struct qxl_device *qdev)
374{
375 wait_for_io_cmd(qdev, 0, QXL_IO_DESTROY_PRIMARY_ASYNC);
376}
377
378void qxl_io_create_primary(struct qxl_device *qdev, unsigned width,
379 unsigned height, unsigned offset, struct qxl_bo *bo)
380{
381 struct qxl_surface_create *create;
382
383 QXL_INFO(qdev, "%s: qdev %p, ram_header %p\n", __func__, qdev,
384 qdev->ram_header);
385 create = &qdev->ram_header->create_surface;
386 create->format = bo->surf.format;
387 create->width = width;
388 create->height = height;
389 create->stride = bo->surf.stride;
390 create->mem = qxl_bo_physical_address(qdev, bo, offset);
391
392 QXL_INFO(qdev, "%s: mem = %llx, from %p\n", __func__, create->mem,
393 bo->kptr);
394
395 create->flags = QXL_SURF_FLAG_KEEP_DATA;
396 create->type = QXL_SURF_TYPE_PRIMARY;
397
398 wait_for_io_cmd(qdev, 0, QXL_IO_CREATE_PRIMARY_ASYNC);
399}
400
401void qxl_io_memslot_add(struct qxl_device *qdev, uint8_t id)
402{
403 QXL_INFO(qdev, "qxl_memslot_add %d\n", id);
404 wait_for_io_cmd(qdev, id, QXL_IO_MEMSLOT_ADD_ASYNC);
405}
406
407void qxl_io_log(struct qxl_device *qdev, const char *fmt, ...)
408{
409 va_list args;
410
411 va_start(args, fmt);
412 vsnprintf(qdev->ram_header->log_buf, QXL_LOG_BUF_SIZE, fmt, args);
413 va_end(args);
414 /*
415 * DO not do a DRM output here - this will call printk, which will
416 * call back into qxl for rendering (qxl_fb)
417 */
418 outb(0, qdev->io_base + QXL_IO_LOG);
419}
420
421void qxl_io_reset(struct qxl_device *qdev)
422{
423 outb(0, qdev->io_base + QXL_IO_RESET);
424}
425
426void qxl_io_monitors_config(struct qxl_device *qdev)
427{
428 qxl_io_log(qdev, "%s: %d [%dx%d+%d+%d]\n", __func__,
429 qdev->monitors_config ?
430 qdev->monitors_config->count : -1,
431 qdev->monitors_config && qdev->monitors_config->count ?
432 qdev->monitors_config->heads[0].width : -1,
433 qdev->monitors_config && qdev->monitors_config->count ?
434 qdev->monitors_config->heads[0].height : -1,
435 qdev->monitors_config && qdev->monitors_config->count ?
436 qdev->monitors_config->heads[0].x : -1,
437 qdev->monitors_config && qdev->monitors_config->count ?
438 qdev->monitors_config->heads[0].y : -1
439 );
440
441 wait_for_io_cmd(qdev, 0, QXL_IO_MONITORS_CONFIG_ASYNC);
442}
443
444int qxl_surface_id_alloc(struct qxl_device *qdev,
445 struct qxl_bo *surf)
446{
447 uint32_t handle;
448 int idr_ret;
449 int count = 0;
450again:
451 idr_preload(GFP_ATOMIC);
452 spin_lock(&qdev->surf_id_idr_lock);
453 idr_ret = idr_alloc(&qdev->surf_id_idr, NULL, 1, 0, GFP_NOWAIT);
454 spin_unlock(&qdev->surf_id_idr_lock);
455 idr_preload_end();
456 if (idr_ret < 0)
457 return idr_ret;
458 handle = idr_ret;
459
460 if (handle >= qdev->rom->n_surfaces) {
461 count++;
462 spin_lock(&qdev->surf_id_idr_lock);
463 idr_remove(&qdev->surf_id_idr, handle);
464 spin_unlock(&qdev->surf_id_idr_lock);
465 qxl_reap_surface_id(qdev, 2);
466 goto again;
467 }
468 surf->surface_id = handle;
469
470 spin_lock(&qdev->surf_id_idr_lock);
471 qdev->last_alloced_surf_id = handle;
472 spin_unlock(&qdev->surf_id_idr_lock);
473 return 0;
474}
475
476void qxl_surface_id_dealloc(struct qxl_device *qdev,
477 uint32_t surface_id)
478{
479 spin_lock(&qdev->surf_id_idr_lock);
480 idr_remove(&qdev->surf_id_idr, surface_id);
481 spin_unlock(&qdev->surf_id_idr_lock);
482}
483
484int qxl_hw_surface_alloc(struct qxl_device *qdev,
485 struct qxl_bo *surf,
486 struct ttm_mem_reg *new_mem)
487{
488 struct qxl_surface_cmd *cmd;
489 struct qxl_release *release;
490 int ret;
491
492 if (surf->hw_surf_alloc)
493 return 0;
494
495 ret = qxl_alloc_surface_release_reserved(qdev, QXL_SURFACE_CMD_CREATE,
496 NULL,
497 &release);
498 if (ret)
499 return ret;
500
501 cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release);
502 cmd->type = QXL_SURFACE_CMD_CREATE;
503 cmd->u.surface_create.format = surf->surf.format;
504 cmd->u.surface_create.width = surf->surf.width;
505 cmd->u.surface_create.height = surf->surf.height;
506 cmd->u.surface_create.stride = surf->surf.stride;
507 if (new_mem) {
508 int slot_id = surf->type == QXL_GEM_DOMAIN_VRAM ? qdev->main_mem_slot : qdev->surfaces_mem_slot;
509 struct qxl_memslot *slot = &(qdev->mem_slots[slot_id]);
510
511 /* TODO - need to hold one of the locks to read tbo.offset */
512 cmd->u.surface_create.data = slot->high_bits;
513
514 cmd->u.surface_create.data |= (new_mem->start << PAGE_SHIFT) + surf->tbo.bdev->man[new_mem->mem_type].gpu_offset;
515 } else
516 cmd->u.surface_create.data = qxl_bo_physical_address(qdev, surf, 0);
517 cmd->surface_id = surf->surface_id;
518 qxl_release_unmap(qdev, release, &cmd->release_info);
519
520 surf->surf_create = release;
521
522 /* no need to add a release to the fence for this bo,
523 since it is only released when we ask to destroy the surface
524 and it would never signal otherwise */
525 qxl_fence_releaseable(qdev, release);
526
527 qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
528
529 qxl_release_unreserve(qdev, release);
530
531 surf->hw_surf_alloc = true;
532 spin_lock(&qdev->surf_id_idr_lock);
533 idr_replace(&qdev->surf_id_idr, surf, surf->surface_id);
534 spin_unlock(&qdev->surf_id_idr_lock);
535 return 0;
536}
537
538int qxl_hw_surface_dealloc(struct qxl_device *qdev,
539 struct qxl_bo *surf)
540{
541 struct qxl_surface_cmd *cmd;
542 struct qxl_release *release;
543 int ret;
544 int id;
545
546 if (!surf->hw_surf_alloc)
547 return 0;
548
549 ret = qxl_alloc_surface_release_reserved(qdev, QXL_SURFACE_CMD_DESTROY,
550 surf->surf_create,
551 &release);
552 if (ret)
553 return ret;
554
555 surf->surf_create = NULL;
556 /* remove the surface from the idr, but not the surface id yet */
557 spin_lock(&qdev->surf_id_idr_lock);
558 idr_replace(&qdev->surf_id_idr, NULL, surf->surface_id);
559 spin_unlock(&qdev->surf_id_idr_lock);
560 surf->hw_surf_alloc = false;
561
562 id = surf->surface_id;
563 surf->surface_id = 0;
564
565 release->surface_release_id = id;
566 cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release);
567 cmd->type = QXL_SURFACE_CMD_DESTROY;
568 cmd->surface_id = id;
569 qxl_release_unmap(qdev, release, &cmd->release_info);
570
571 qxl_fence_releaseable(qdev, release);
572
573 qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
574
575 qxl_release_unreserve(qdev, release);
576
577
578 return 0;
579}
580
581int qxl_update_surface(struct qxl_device *qdev, struct qxl_bo *surf)
582{
583 struct qxl_rect rect;
584 int ret;
585
586 /* if we are evicting, we need to make sure the surface is up
587 to date */
588 rect.left = 0;
589 rect.right = surf->surf.width;
590 rect.top = 0;
591 rect.bottom = surf->surf.height;
592retry:
593 ret = qxl_io_update_area(qdev, surf, &rect);
594 if (ret == -ERESTARTSYS)
595 goto retry;
596 return ret;
597}
598
599static void qxl_surface_evict_locked(struct qxl_device *qdev, struct qxl_bo *surf, bool do_update_area)
600{
601 /* no need to update area if we are just freeing the surface normally */
602 if (do_update_area)
603 qxl_update_surface(qdev, surf);
604
605 /* nuke the surface id at the hw */
606 qxl_hw_surface_dealloc(qdev, surf);
607}
608
609void qxl_surface_evict(struct qxl_device *qdev, struct qxl_bo *surf, bool do_update_area)
610{
611 mutex_lock(&qdev->surf_evict_mutex);
612 qxl_surface_evict_locked(qdev, surf, do_update_area);
613 mutex_unlock(&qdev->surf_evict_mutex);
614}
615
616static int qxl_reap_surf(struct qxl_device *qdev, struct qxl_bo *surf, bool stall)
617{
618 int ret;
619
620 ret = qxl_bo_reserve(surf, false);
621 if (ret == -EBUSY)
622 return -EBUSY;
623
624 if (surf->fence.num_active_releases > 0 && stall == false) {
625 qxl_bo_unreserve(surf);
626 return -EBUSY;
627 }
628
629 if (stall)
630 mutex_unlock(&qdev->surf_evict_mutex);
631
632 spin_lock(&surf->tbo.bdev->fence_lock);
633 ret = ttm_bo_wait(&surf->tbo, true, true, !stall);
634 spin_unlock(&surf->tbo.bdev->fence_lock);
635
636 if (stall)
637 mutex_lock(&qdev->surf_evict_mutex);
638 if (ret == -EBUSY) {
639 qxl_bo_unreserve(surf);
640 return -EBUSY;
641 }
642
643 qxl_surface_evict_locked(qdev, surf, true);
644 qxl_bo_unreserve(surf);
645 return 0;
646}
647
648static int qxl_reap_surface_id(struct qxl_device *qdev, int max_to_reap)
649{
650 int num_reaped = 0;
651 int i, ret;
652 bool stall = false;
653 int start = 0;
654
655 mutex_lock(&qdev->surf_evict_mutex);
656again:
657
658 spin_lock(&qdev->surf_id_idr_lock);
659 start = qdev->last_alloced_surf_id + 1;
660 spin_unlock(&qdev->surf_id_idr_lock);
661
662 for (i = start; i < start + qdev->rom->n_surfaces; i++) {
663 void *objptr;
664 int surfid = i % qdev->rom->n_surfaces;
665
666 /* this avoids the case where the objects is in the
667 idr but has been evicted half way - its makes
668 the idr lookup atomic with the eviction */
669 spin_lock(&qdev->surf_id_idr_lock);
670 objptr = idr_find(&qdev->surf_id_idr, surfid);
671 spin_unlock(&qdev->surf_id_idr_lock);
672
673 if (!objptr)
674 continue;
675
676 ret = qxl_reap_surf(qdev, objptr, stall);
677 if (ret == 0)
678 num_reaped++;
679 if (num_reaped >= max_to_reap)
680 break;
681 }
682 if (num_reaped == 0 && stall == false) {
683 stall = true;
684 goto again;
685 }
686
687 mutex_unlock(&qdev->surf_evict_mutex);
688 if (num_reaped) {
689 usleep_range(500, 1000);
690 qxl_queue_garbage_collect(qdev, true);
691 }
692
693 return 0;
694}
diff --git a/drivers/gpu/drm/qxl/qxl_debugfs.c b/drivers/gpu/drm/qxl/qxl_debugfs.c
new file mode 100644
index 000000000000..c3c2bbdc6674
--- /dev/null
+++ b/drivers/gpu/drm/qxl/qxl_debugfs.c
@@ -0,0 +1,141 @@
1/*
2 * Copyright (C) 2009 Red Hat <bskeggs@redhat.com>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining
5 * a copy of this software and associated documentation files (the
6 * "Software"), to deal in the Software without restriction, including
7 * without limitation the rights to use, copy, modify, merge, publish,
8 * distribute, sublicense, and/or sell copies of the Software, and to
9 * permit persons to whom the Software is furnished to do so, subject to
10 * the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the
13 * next paragraph) shall be included in all copies or substantial
14 * portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
17 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
19 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
20 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
21 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
22 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
23 *
24 */
25
26/*
27 * Authors:
28 * Alon Levy <alevy@redhat.com>
29 */
30
31#include <linux/debugfs.h>
32
33#include "drmP.h"
34#include "qxl_drv.h"
35#include "qxl_object.h"
36
37
38#if defined(CONFIG_DEBUG_FS)
39static int
40qxl_debugfs_irq_received(struct seq_file *m, void *data)
41{
42 struct drm_info_node *node = (struct drm_info_node *) m->private;
43 struct qxl_device *qdev = node->minor->dev->dev_private;
44
45 seq_printf(m, "%d\n", atomic_read(&qdev->irq_received));
46 seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_display));
47 seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_cursor));
48 seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_io_cmd));
49 seq_printf(m, "%d\n", qdev->irq_received_error);
50 return 0;
51}
52
53static int
54qxl_debugfs_buffers_info(struct seq_file *m, void *data)
55{
56 struct drm_info_node *node = (struct drm_info_node *) m->private;
57 struct qxl_device *qdev = node->minor->dev->dev_private;
58 struct qxl_bo *bo;
59
60 list_for_each_entry(bo, &qdev->gem.objects, list) {
61 seq_printf(m, "size %ld, pc %d, sync obj %p, num releases %d\n",
62 (unsigned long)bo->gem_base.size, bo->pin_count,
63 bo->tbo.sync_obj, bo->fence.num_active_releases);
64 }
65 return 0;
66}
67
68static struct drm_info_list qxl_debugfs_list[] = {
69 { "irq_received", qxl_debugfs_irq_received, 0, NULL },
70 { "qxl_buffers", qxl_debugfs_buffers_info, 0, NULL },
71};
72#define QXL_DEBUGFS_ENTRIES ARRAY_SIZE(qxl_debugfs_list)
73#endif
74
75int
76qxl_debugfs_init(struct drm_minor *minor)
77{
78#if defined(CONFIG_DEBUG_FS)
79 drm_debugfs_create_files(qxl_debugfs_list, QXL_DEBUGFS_ENTRIES,
80 minor->debugfs_root, minor);
81#endif
82 return 0;
83}
84
85void
86qxl_debugfs_takedown(struct drm_minor *minor)
87{
88#if defined(CONFIG_DEBUG_FS)
89 drm_debugfs_remove_files(qxl_debugfs_list, QXL_DEBUGFS_ENTRIES,
90 minor);
91#endif
92}
93
94int qxl_debugfs_add_files(struct qxl_device *qdev,
95 struct drm_info_list *files,
96 unsigned nfiles)
97{
98 unsigned i;
99
100 for (i = 0; i < qdev->debugfs_count; i++) {
101 if (qdev->debugfs[i].files == files) {
102 /* Already registered */
103 return 0;
104 }
105 }
106
107 i = qdev->debugfs_count + 1;
108 if (i > QXL_DEBUGFS_MAX_COMPONENTS) {
109 DRM_ERROR("Reached maximum number of debugfs components.\n");
110 DRM_ERROR("Report so we increase QXL_DEBUGFS_MAX_COMPONENTS.\n");
111 return -EINVAL;
112 }
113 qdev->debugfs[qdev->debugfs_count].files = files;
114 qdev->debugfs[qdev->debugfs_count].num_files = nfiles;
115 qdev->debugfs_count = i;
116#if defined(CONFIG_DEBUG_FS)
117 drm_debugfs_create_files(files, nfiles,
118 qdev->ddev->control->debugfs_root,
119 qdev->ddev->control);
120 drm_debugfs_create_files(files, nfiles,
121 qdev->ddev->primary->debugfs_root,
122 qdev->ddev->primary);
123#endif
124 return 0;
125}
126
127void qxl_debugfs_remove_files(struct qxl_device *qdev)
128{
129#if defined(CONFIG_DEBUG_FS)
130 unsigned i;
131
132 for (i = 0; i < qdev->debugfs_count; i++) {
133 drm_debugfs_remove_files(qdev->debugfs[i].files,
134 qdev->debugfs[i].num_files,
135 qdev->ddev->control);
136 drm_debugfs_remove_files(qdev->debugfs[i].files,
137 qdev->debugfs[i].num_files,
138 qdev->ddev->primary);
139 }
140#endif
141}
diff --git a/drivers/gpu/drm/qxl/qxl_dev.h b/drivers/gpu/drm/qxl/qxl_dev.h
new file mode 100644
index 000000000000..94c5aec71920
--- /dev/null
+++ b/drivers/gpu/drm/qxl/qxl_dev.h
@@ -0,0 +1,879 @@
1/*
2 Copyright (C) 2009 Red Hat, Inc.
3
4 Redistribution and use in source and binary forms, with or without
5 modification, are permitted provided that the following conditions are
6 met:
7
8 * Redistributions of source code must retain the above copyright
9 notice, this list of conditions and the following disclaimer.
10 * Redistributions in binary form must reproduce the above copyright
11 notice, this list of conditions and the following disclaimer in
12 the documentation and/or other materials provided with the
13 distribution.
14 * Neither the name of the copyright holder nor the names of its
15 contributors may be used to endorse or promote products derived
16 from this software without specific prior written permission.
17
18 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS "AS
19 IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
20 TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
21 PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29*/
30
31
32#ifndef H_QXL_DEV
33#define H_QXL_DEV
34
35#include <linux/types.h>
36
37/*
38 * from spice-protocol
39 * Release 0.10.0
40 */
41
42/* enums.h */
43
44enum SpiceImageType {
45 SPICE_IMAGE_TYPE_BITMAP,
46 SPICE_IMAGE_TYPE_QUIC,
47 SPICE_IMAGE_TYPE_RESERVED,
48 SPICE_IMAGE_TYPE_LZ_PLT = 100,
49 SPICE_IMAGE_TYPE_LZ_RGB,
50 SPICE_IMAGE_TYPE_GLZ_RGB,
51 SPICE_IMAGE_TYPE_FROM_CACHE,
52 SPICE_IMAGE_TYPE_SURFACE,
53 SPICE_IMAGE_TYPE_JPEG,
54 SPICE_IMAGE_TYPE_FROM_CACHE_LOSSLESS,
55 SPICE_IMAGE_TYPE_ZLIB_GLZ_RGB,
56 SPICE_IMAGE_TYPE_JPEG_ALPHA,
57
58 SPICE_IMAGE_TYPE_ENUM_END
59};
60
61enum SpiceBitmapFmt {
62 SPICE_BITMAP_FMT_INVALID,
63 SPICE_BITMAP_FMT_1BIT_LE,
64 SPICE_BITMAP_FMT_1BIT_BE,
65 SPICE_BITMAP_FMT_4BIT_LE,
66 SPICE_BITMAP_FMT_4BIT_BE,
67 SPICE_BITMAP_FMT_8BIT,
68 SPICE_BITMAP_FMT_16BIT,
69 SPICE_BITMAP_FMT_24BIT,
70 SPICE_BITMAP_FMT_32BIT,
71 SPICE_BITMAP_FMT_RGBA,
72
73 SPICE_BITMAP_FMT_ENUM_END
74};
75
76enum SpiceSurfaceFmt {
77 SPICE_SURFACE_FMT_INVALID,
78 SPICE_SURFACE_FMT_1_A,
79 SPICE_SURFACE_FMT_8_A = 8,
80 SPICE_SURFACE_FMT_16_555 = 16,
81 SPICE_SURFACE_FMT_32_xRGB = 32,
82 SPICE_SURFACE_FMT_16_565 = 80,
83 SPICE_SURFACE_FMT_32_ARGB = 96,
84
85 SPICE_SURFACE_FMT_ENUM_END
86};
87
88enum SpiceClipType {
89 SPICE_CLIP_TYPE_NONE,
90 SPICE_CLIP_TYPE_RECTS,
91
92 SPICE_CLIP_TYPE_ENUM_END
93};
94
95enum SpiceRopd {
96 SPICE_ROPD_INVERS_SRC = (1 << 0),
97 SPICE_ROPD_INVERS_BRUSH = (1 << 1),
98 SPICE_ROPD_INVERS_DEST = (1 << 2),
99 SPICE_ROPD_OP_PUT = (1 << 3),
100 SPICE_ROPD_OP_OR = (1 << 4),
101 SPICE_ROPD_OP_AND = (1 << 5),
102 SPICE_ROPD_OP_XOR = (1 << 6),
103 SPICE_ROPD_OP_BLACKNESS = (1 << 7),
104 SPICE_ROPD_OP_WHITENESS = (1 << 8),
105 SPICE_ROPD_OP_INVERS = (1 << 9),
106 SPICE_ROPD_INVERS_RES = (1 << 10),
107
108 SPICE_ROPD_MASK = 0x7ff
109};
110
111enum SpiceBrushType {
112 SPICE_BRUSH_TYPE_NONE,
113 SPICE_BRUSH_TYPE_SOLID,
114 SPICE_BRUSH_TYPE_PATTERN,
115
116 SPICE_BRUSH_TYPE_ENUM_END
117};
118
119enum SpiceCursorType {
120 SPICE_CURSOR_TYPE_ALPHA,
121 SPICE_CURSOR_TYPE_MONO,
122 SPICE_CURSOR_TYPE_COLOR4,
123 SPICE_CURSOR_TYPE_COLOR8,
124 SPICE_CURSOR_TYPE_COLOR16,
125 SPICE_CURSOR_TYPE_COLOR24,
126 SPICE_CURSOR_TYPE_COLOR32,
127
128 SPICE_CURSOR_TYPE_ENUM_END
129};
130
131/* qxl_dev.h */
132
133#pragma pack(push, 1)
134
135#define REDHAT_PCI_VENDOR_ID 0x1b36
136
137/* 0x100-0x11f reserved for spice, 0x1ff used for unstable work */
138#define QXL_DEVICE_ID_STABLE 0x0100
139
140enum {
141 QXL_REVISION_STABLE_V04 = 0x01,
142 QXL_REVISION_STABLE_V06 = 0x02,
143 QXL_REVISION_STABLE_V10 = 0x03,
144 QXL_REVISION_STABLE_V12 = 0x04,
145};
146
147#define QXL_DEVICE_ID_DEVEL 0x01ff
148#define QXL_REVISION_DEVEL 0x01
149
150#define QXL_ROM_MAGIC (*(uint32_t *)"QXRO")
151#define QXL_RAM_MAGIC (*(uint32_t *)"QXRA")
152
153enum {
154 QXL_RAM_RANGE_INDEX,
155 QXL_VRAM_RANGE_INDEX,
156 QXL_ROM_RANGE_INDEX,
157 QXL_IO_RANGE_INDEX,
158
159 QXL_PCI_RANGES
160};
161
162/* qxl-1 compat: append only */
163enum {
164 QXL_IO_NOTIFY_CMD,
165 QXL_IO_NOTIFY_CURSOR,
166 QXL_IO_UPDATE_AREA,
167 QXL_IO_UPDATE_IRQ,
168 QXL_IO_NOTIFY_OOM,
169 QXL_IO_RESET,
170 QXL_IO_SET_MODE, /* qxl-1 */
171 QXL_IO_LOG,
172 /* appended for qxl-2 */
173 QXL_IO_MEMSLOT_ADD,
174 QXL_IO_MEMSLOT_DEL,
175 QXL_IO_DETACH_PRIMARY,
176 QXL_IO_ATTACH_PRIMARY,
177 QXL_IO_CREATE_PRIMARY,
178 QXL_IO_DESTROY_PRIMARY,
179 QXL_IO_DESTROY_SURFACE_WAIT,
180 QXL_IO_DESTROY_ALL_SURFACES,
181 /* appended for qxl-3 */
182 QXL_IO_UPDATE_AREA_ASYNC,
183 QXL_IO_MEMSLOT_ADD_ASYNC,
184 QXL_IO_CREATE_PRIMARY_ASYNC,
185 QXL_IO_DESTROY_PRIMARY_ASYNC,
186 QXL_IO_DESTROY_SURFACE_ASYNC,
187 QXL_IO_DESTROY_ALL_SURFACES_ASYNC,
188 QXL_IO_FLUSH_SURFACES_ASYNC,
189 QXL_IO_FLUSH_RELEASE,
190 /* appended for qxl-4 */
191 QXL_IO_MONITORS_CONFIG_ASYNC,
192
193 QXL_IO_RANGE_SIZE
194};
195
196typedef uint64_t QXLPHYSICAL;
197typedef int32_t QXLFIXED; /* fixed 28.4 */
198
199struct qxl_point_fix {
200 QXLFIXED x;
201 QXLFIXED y;
202};
203
204struct qxl_point {
205 int32_t x;
206 int32_t y;
207};
208
209struct qxl_point_1_6 {
210 int16_t x;
211 int16_t y;
212};
213
214struct qxl_rect {
215 int32_t top;
216 int32_t left;
217 int32_t bottom;
218 int32_t right;
219};
220
221struct qxl_urect {
222 uint32_t top;
223 uint32_t left;
224 uint32_t bottom;
225 uint32_t right;
226};
227
228/* qxl-1 compat: append only */
229struct qxl_rom {
230 uint32_t magic;
231 uint32_t id;
232 uint32_t update_id;
233 uint32_t compression_level;
234 uint32_t log_level;
235 uint32_t mode; /* qxl-1 */
236 uint32_t modes_offset;
237 uint32_t num_io_pages;
238 uint32_t pages_offset; /* qxl-1 */
239 uint32_t draw_area_offset; /* qxl-1 */
240 uint32_t surface0_area_size; /* qxl-1 name: draw_area_size */
241 uint32_t ram_header_offset;
242 uint32_t mm_clock;
243 /* appended for qxl-2 */
244 uint32_t n_surfaces;
245 uint64_t flags;
246 uint8_t slots_start;
247 uint8_t slots_end;
248 uint8_t slot_gen_bits;
249 uint8_t slot_id_bits;
250 uint8_t slot_generation;
251 /* appended for qxl-4 */
252 uint8_t client_present;
253 uint8_t client_capabilities[58];
254 uint32_t client_monitors_config_crc;
255 struct {
256 uint16_t count;
257 uint16_t padding;
258 struct qxl_urect heads[64];
259 } client_monitors_config;
260};
261
262/* qxl-1 compat: fixed */
263struct qxl_mode {
264 uint32_t id;
265 uint32_t x_res;
266 uint32_t y_res;
267 uint32_t bits;
268 uint32_t stride;
269 uint32_t x_mili;
270 uint32_t y_mili;
271 uint32_t orientation;
272};
273
274/* qxl-1 compat: fixed */
275struct qxl_modes {
276 uint32_t n_modes;
277 struct qxl_mode modes[0];
278};
279
280/* qxl-1 compat: append only */
281enum qxl_cmd_type {
282 QXL_CMD_NOP,
283 QXL_CMD_DRAW,
284 QXL_CMD_UPDATE,
285 QXL_CMD_CURSOR,
286 QXL_CMD_MESSAGE,
287 QXL_CMD_SURFACE,
288};
289
290/* qxl-1 compat: fixed */
291struct qxl_command {
292 QXLPHYSICAL data;
293 uint32_t type;
294 uint32_t padding;
295};
296
297#define QXL_COMMAND_FLAG_COMPAT (1<<0)
298#define QXL_COMMAND_FLAG_COMPAT_16BPP (2<<0)
299
300struct qxl_command_ext {
301 struct qxl_command cmd;
302 uint32_t group_id;
303 uint32_t flags;
304};
305
306struct qxl_mem_slot {
307 uint64_t mem_start;
308 uint64_t mem_end;
309};
310
311#define QXL_SURF_TYPE_PRIMARY 0
312
313#define QXL_SURF_FLAG_KEEP_DATA (1 << 0)
314
315struct qxl_surface_create {
316 uint32_t width;
317 uint32_t height;
318 int32_t stride;
319 uint32_t format;
320 uint32_t position;
321 uint32_t mouse_mode;
322 uint32_t flags;
323 uint32_t type;
324 QXLPHYSICAL mem;
325};
326
327#define QXL_COMMAND_RING_SIZE 32
328#define QXL_CURSOR_RING_SIZE 32
329#define QXL_RELEASE_RING_SIZE 8
330
331#define QXL_LOG_BUF_SIZE 4096
332
333#define QXL_INTERRUPT_DISPLAY (1 << 0)
334#define QXL_INTERRUPT_CURSOR (1 << 1)
335#define QXL_INTERRUPT_IO_CMD (1 << 2)
336#define QXL_INTERRUPT_ERROR (1 << 3)
337#define QXL_INTERRUPT_CLIENT (1 << 4)
338#define QXL_INTERRUPT_CLIENT_MONITORS_CONFIG (1 << 5)
339
340struct qxl_ring_header {
341 uint32_t num_items;
342 uint32_t prod;
343 uint32_t notify_on_prod;
344 uint32_t cons;
345 uint32_t notify_on_cons;
346};
347
348/* qxl-1 compat: append only */
349struct qxl_ram_header {
350 uint32_t magic;
351 uint32_t int_pending;
352 uint32_t int_mask;
353 uint8_t log_buf[QXL_LOG_BUF_SIZE];
354 struct qxl_ring_header cmd_ring_hdr;
355 struct qxl_command cmd_ring[QXL_COMMAND_RING_SIZE];
356 struct qxl_ring_header cursor_ring_hdr;
357 struct qxl_command cursor_ring[QXL_CURSOR_RING_SIZE];
358 struct qxl_ring_header release_ring_hdr;
359 uint64_t release_ring[QXL_RELEASE_RING_SIZE];
360 struct qxl_rect update_area;
361 /* appended for qxl-2 */
362 uint32_t update_surface;
363 struct qxl_mem_slot mem_slot;
364 struct qxl_surface_create create_surface;
365 uint64_t flags;
366
367 /* appended for qxl-4 */
368
369 /* used by QXL_IO_MONITORS_CONFIG_ASYNC */
370 QXLPHYSICAL monitors_config;
371 uint8_t guest_capabilities[64];
372};
373
374union qxl_release_info {
375 uint64_t id; /* in */
376 uint64_t next; /* out */
377};
378
379struct qxl_release_info_ext {
380 union qxl_release_info *info;
381 uint32_t group_id;
382};
383
384struct qxl_data_chunk {
385 uint32_t data_size;
386 QXLPHYSICAL prev_chunk;
387 QXLPHYSICAL next_chunk;
388 uint8_t data[0];
389};
390
391struct qxl_message {
392 union qxl_release_info release_info;
393 uint8_t data[0];
394};
395
396struct qxl_compat_update_cmd {
397 union qxl_release_info release_info;
398 struct qxl_rect area;
399 uint32_t update_id;
400};
401
402struct qxl_update_cmd {
403 union qxl_release_info release_info;
404 struct qxl_rect area;
405 uint32_t update_id;
406 uint32_t surface_id;
407};
408
409struct qxl_cursor_header {
410 uint64_t unique;
411 uint16_t type;
412 uint16_t width;
413 uint16_t height;
414 uint16_t hot_spot_x;
415 uint16_t hot_spot_y;
416};
417
418struct qxl_cursor {
419 struct qxl_cursor_header header;
420 uint32_t data_size;
421 struct qxl_data_chunk chunk;
422};
423
424enum {
425 QXL_CURSOR_SET,
426 QXL_CURSOR_MOVE,
427 QXL_CURSOR_HIDE,
428 QXL_CURSOR_TRAIL,
429};
430
431#define QXL_CURSOR_DEVICE_DATA_SIZE 128
432
433struct qxl_cursor_cmd {
434 union qxl_release_info release_info;
435 uint8_t type;
436 union {
437 struct {
438 struct qxl_point_1_6 position;
439 uint8_t visible;
440 QXLPHYSICAL shape;
441 } set;
442 struct {
443 uint16_t length;
444 uint16_t frequency;
445 } trail;
446 struct qxl_point_1_6 position;
447 } u;
448 /* todo: dynamic size from rom */
449 uint8_t device_data[QXL_CURSOR_DEVICE_DATA_SIZE];
450};
451
452enum {
453 QXL_DRAW_NOP,
454 QXL_DRAW_FILL,
455 QXL_DRAW_OPAQUE,
456 QXL_DRAW_COPY,
457 QXL_COPY_BITS,
458 QXL_DRAW_BLEND,
459 QXL_DRAW_BLACKNESS,
460 QXL_DRAW_WHITENESS,
461 QXL_DRAW_INVERS,
462 QXL_DRAW_ROP3,
463 QXL_DRAW_STROKE,
464 QXL_DRAW_TEXT,
465 QXL_DRAW_TRANSPARENT,
466 QXL_DRAW_ALPHA_BLEND,
467 QXL_DRAW_COMPOSITE
468};
469
470struct qxl_raster_glyph {
471 struct qxl_point render_pos;
472 struct qxl_point glyph_origin;
473 uint16_t width;
474 uint16_t height;
475 uint8_t data[0];
476};
477
478struct qxl_string {
479 uint32_t data_size;
480 uint16_t length;
481 uint16_t flags;
482 struct qxl_data_chunk chunk;
483};
484
485struct qxl_copy_bits {
486 struct qxl_point src_pos;
487};
488
489enum qxl_effect_type {
490 QXL_EFFECT_BLEND = 0,
491 QXL_EFFECT_OPAQUE = 1,
492 QXL_EFFECT_REVERT_ON_DUP = 2,
493 QXL_EFFECT_BLACKNESS_ON_DUP = 3,
494 QXL_EFFECT_WHITENESS_ON_DUP = 4,
495 QXL_EFFECT_NOP_ON_DUP = 5,
496 QXL_EFFECT_NOP = 6,
497 QXL_EFFECT_OPAQUE_BRUSH = 7
498};
499
500struct qxl_pattern {
501 QXLPHYSICAL pat;
502 struct qxl_point pos;
503};
504
505struct qxl_brush {
506 uint32_t type;
507 union {
508 uint32_t color;
509 struct qxl_pattern pattern;
510 } u;
511};
512
513struct qxl_q_mask {
514 uint8_t flags;
515 struct qxl_point pos;
516 QXLPHYSICAL bitmap;
517};
518
519struct qxl_fill {
520 struct qxl_brush brush;
521 uint16_t rop_descriptor;
522 struct qxl_q_mask mask;
523};
524
525struct qxl_opaque {
526 QXLPHYSICAL src_bitmap;
527 struct qxl_rect src_area;
528 struct qxl_brush brush;
529 uint16_t rop_descriptor;
530 uint8_t scale_mode;
531 struct qxl_q_mask mask;
532};
533
534struct qxl_copy {
535 QXLPHYSICAL src_bitmap;
536 struct qxl_rect src_area;
537 uint16_t rop_descriptor;
538 uint8_t scale_mode;
539 struct qxl_q_mask mask;
540};
541
542struct qxl_transparent {
543 QXLPHYSICAL src_bitmap;
544 struct qxl_rect src_area;
545 uint32_t src_color;
546 uint32_t true_color;
547};
548
549struct qxl_alpha_blend {
550 uint16_t alpha_flags;
551 uint8_t alpha;
552 QXLPHYSICAL src_bitmap;
553 struct qxl_rect src_area;
554};
555
556struct qxl_compat_alpha_blend {
557 uint8_t alpha;
558 QXLPHYSICAL src_bitmap;
559 struct qxl_rect src_area;
560};
561
562struct qxl_rop_3 {
563 QXLPHYSICAL src_bitmap;
564 struct qxl_rect src_area;
565 struct qxl_brush brush;
566 uint8_t rop3;
567 uint8_t scale_mode;
568 struct qxl_q_mask mask;
569};
570
571struct qxl_line_attr {
572 uint8_t flags;
573 uint8_t join_style;
574 uint8_t end_style;
575 uint8_t style_nseg;
576 QXLFIXED width;
577 QXLFIXED miter_limit;
578 QXLPHYSICAL style;
579};
580
581struct qxl_stroke {
582 QXLPHYSICAL path;
583 struct qxl_line_attr attr;
584 struct qxl_brush brush;
585 uint16_t fore_mode;
586 uint16_t back_mode;
587};
588
589struct qxl_text {
590 QXLPHYSICAL str;
591 struct qxl_rect back_area;
592 struct qxl_brush fore_brush;
593 struct qxl_brush back_brush;
594 uint16_t fore_mode;
595 uint16_t back_mode;
596};
597
598struct qxl_mask {
599 struct qxl_q_mask mask;
600};
601
602struct qxl_clip {
603 uint32_t type;
604 QXLPHYSICAL data;
605};
606
607enum qxl_operator {
608 QXL_OP_CLEAR = 0x00,
609 QXL_OP_SOURCE = 0x01,
610 QXL_OP_DST = 0x02,
611 QXL_OP_OVER = 0x03,
612 QXL_OP_OVER_REVERSE = 0x04,
613 QXL_OP_IN = 0x05,
614 QXL_OP_IN_REVERSE = 0x06,
615 QXL_OP_OUT = 0x07,
616 QXL_OP_OUT_REVERSE = 0x08,
617 QXL_OP_ATOP = 0x09,
618 QXL_OP_ATOP_REVERSE = 0x0a,
619 QXL_OP_XOR = 0x0b,
620 QXL_OP_ADD = 0x0c,
621 QXL_OP_SATURATE = 0x0d,
622 /* Note the jump here from 0x0d to 0x30 */
623 QXL_OP_MULTIPLY = 0x30,
624 QXL_OP_SCREEN = 0x31,
625 QXL_OP_OVERLAY = 0x32,
626 QXL_OP_DARKEN = 0x33,
627 QXL_OP_LIGHTEN = 0x34,
628 QXL_OP_COLOR_DODGE = 0x35,
629 QXL_OP_COLOR_BURN = 0x36,
630 QXL_OP_HARD_LIGHT = 0x37,
631 QXL_OP_SOFT_LIGHT = 0x38,
632 QXL_OP_DIFFERENCE = 0x39,
633 QXL_OP_EXCLUSION = 0x3a,
634 QXL_OP_HSL_HUE = 0x3b,
635 QXL_OP_HSL_SATURATION = 0x3c,
636 QXL_OP_HSL_COLOR = 0x3d,
637 QXL_OP_HSL_LUMINOSITY = 0x3e
638};
639
640struct qxl_transform {
641 uint32_t t00;
642 uint32_t t01;
643 uint32_t t02;
644 uint32_t t10;
645 uint32_t t11;
646 uint32_t t12;
647};
648
649/* The flags field has the following bit fields:
650 *
651 * operator: [ 0 - 7 ]
652 * src_filter: [ 8 - 10 ]
653 * mask_filter: [ 11 - 13 ]
654 * src_repeat: [ 14 - 15 ]
655 * mask_repeat: [ 16 - 17 ]
656 * component_alpha: [ 18 - 18 ]
657 * reserved: [ 19 - 31 ]
658 *
659 * The repeat and filter values are those of pixman:
660 * REPEAT_NONE = 0
661 * REPEAT_NORMAL = 1
662 * REPEAT_PAD = 2
663 * REPEAT_REFLECT = 3
664 *
665 * The filter values are:
666 * FILTER_NEAREST = 0
667 * FILTER_BILINEAR = 1
668 */
669struct qxl_composite {
670 uint32_t flags;
671
672 QXLPHYSICAL src;
673 QXLPHYSICAL src_transform; /* May be NULL */
674 QXLPHYSICAL mask; /* May be NULL */
675 QXLPHYSICAL mask_transform; /* May be NULL */
676 struct qxl_point_1_6 src_origin;
677 struct qxl_point_1_6 mask_origin;
678};
679
680struct qxl_compat_drawable {
681 union qxl_release_info release_info;
682 uint8_t effect;
683 uint8_t type;
684 uint16_t bitmap_offset;
685 struct qxl_rect bitmap_area;
686 struct qxl_rect bbox;
687 struct qxl_clip clip;
688 uint32_t mm_time;
689 union {
690 struct qxl_fill fill;
691 struct qxl_opaque opaque;
692 struct qxl_copy copy;
693 struct qxl_transparent transparent;
694 struct qxl_compat_alpha_blend alpha_blend;
695 struct qxl_copy_bits copy_bits;
696 struct qxl_copy blend;
697 struct qxl_rop_3 rop3;
698 struct qxl_stroke stroke;
699 struct qxl_text text;
700 struct qxl_mask blackness;
701 struct qxl_mask invers;
702 struct qxl_mask whiteness;
703 } u;
704};
705
706struct qxl_drawable {
707 union qxl_release_info release_info;
708 uint32_t surface_id;
709 uint8_t effect;
710 uint8_t type;
711 uint8_t self_bitmap;
712 struct qxl_rect self_bitmap_area;
713 struct qxl_rect bbox;
714 struct qxl_clip clip;
715 uint32_t mm_time;
716 int32_t surfaces_dest[3];
717 struct qxl_rect surfaces_rects[3];
718 union {
719 struct qxl_fill fill;
720 struct qxl_opaque opaque;
721 struct qxl_copy copy;
722 struct qxl_transparent transparent;
723 struct qxl_alpha_blend alpha_blend;
724 struct qxl_copy_bits copy_bits;
725 struct qxl_copy blend;
726 struct qxl_rop_3 rop3;
727 struct qxl_stroke stroke;
728 struct qxl_text text;
729 struct qxl_mask blackness;
730 struct qxl_mask invers;
731 struct qxl_mask whiteness;
732 struct qxl_composite composite;
733 } u;
734};
735
736enum qxl_surface_cmd_type {
737 QXL_SURFACE_CMD_CREATE,
738 QXL_SURFACE_CMD_DESTROY,
739};
740
741struct qxl_surface {
742 uint32_t format;
743 uint32_t width;
744 uint32_t height;
745 int32_t stride;
746 QXLPHYSICAL data;
747};
748
749struct qxl_surface_cmd {
750 union qxl_release_info release_info;
751 uint32_t surface_id;
752 uint8_t type;
753 uint32_t flags;
754 union {
755 struct qxl_surface surface_create;
756 } u;
757};
758
759struct qxl_clip_rects {
760 uint32_t num_rects;
761 struct qxl_data_chunk chunk;
762};
763
764enum {
765 QXL_PATH_BEGIN = (1 << 0),
766 QXL_PATH_END = (1 << 1),
767 QXL_PATH_CLOSE = (1 << 3),
768 QXL_PATH_BEZIER = (1 << 4),
769};
770
771struct qxl_path_seg {
772 uint32_t flags;
773 uint32_t count;
774 struct qxl_point_fix points[0];
775};
776
777struct qxl_path {
778 uint32_t data_size;
779 struct qxl_data_chunk chunk;
780};
781
782enum {
783 QXL_IMAGE_GROUP_DRIVER,
784 QXL_IMAGE_GROUP_DEVICE,
785 QXL_IMAGE_GROUP_RED,
786 QXL_IMAGE_GROUP_DRIVER_DONT_CACHE,
787};
788
789struct qxl_image_id {
790 uint32_t group;
791 uint32_t unique;
792};
793
794union qxl_image_id_union {
795 struct qxl_image_id id;
796 uint64_t value;
797};
798
799enum qxl_image_flags {
800 QXL_IMAGE_CACHE = (1 << 0),
801 QXL_IMAGE_HIGH_BITS_SET = (1 << 1),
802};
803
804enum qxl_bitmap_flags {
805 QXL_BITMAP_DIRECT = (1 << 0),
806 QXL_BITMAP_UNSTABLE = (1 << 1),
807 QXL_BITMAP_TOP_DOWN = (1 << 2), /* == SPICE_BITMAP_FLAGS_TOP_DOWN */
808};
809
810#define QXL_SET_IMAGE_ID(image, _group, _unique) { \
811 (image)->descriptor.id = (((uint64_t)_unique) << 32) | _group; \
812}
813
814struct qxl_image_descriptor {
815 uint64_t id;
816 uint8_t type;
817 uint8_t flags;
818 uint32_t width;
819 uint32_t height;
820};
821
822struct qxl_palette {
823 uint64_t unique;
824 uint16_t num_ents;
825 uint32_t ents[0];
826};
827
828struct qxl_bitmap {
829 uint8_t format;
830 uint8_t flags;
831 uint32_t x;
832 uint32_t y;
833 uint32_t stride;
834 QXLPHYSICAL palette;
835 QXLPHYSICAL data; /* data[0] ? */
836};
837
838struct qxl_surface_id {
839 uint32_t surface_id;
840};
841
842struct qxl_encoder_data {
843 uint32_t data_size;
844 uint8_t data[0];
845};
846
847struct qxl_image {
848 struct qxl_image_descriptor descriptor;
849 union { /* variable length */
850 struct qxl_bitmap bitmap;
851 struct qxl_encoder_data quic;
852 struct qxl_surface_id surface_image;
853 } u;
854};
855
856/* A QXLHead is a single monitor output backed by a QXLSurface.
857 * x and y offsets are unsigned since they are used in relation to
858 * the given surface, not the same as the x, y coordinates in the guest
859 * screen reference frame. */
860struct qxl_head {
861 uint32_t id;
862 uint32_t surface_id;
863 uint32_t width;
864 uint32_t height;
865 uint32_t x;
866 uint32_t y;
867 uint32_t flags;
868};
869
870struct qxl_monitors_config {
871 uint16_t count;
872 uint16_t max_allowed; /* If it is 0 no fixed limit is given by the
873 driver */
874 struct qxl_head heads[0];
875};
876
877#pragma pack(pop)
878
879#endif /* _H_QXL_DEV */
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
new file mode 100644
index 000000000000..823d29e926ec
--- /dev/null
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -0,0 +1,973 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Dave Airlie
23 * Alon Levy
24 */
25
26
27#include "linux/crc32.h"
28
29#include "qxl_drv.h"
30#include "qxl_object.h"
31#include "drm_crtc_helper.h"
32
33static void qxl_crtc_set_to_mode(struct qxl_device *qdev,
34 struct drm_connector *connector,
35 struct qxl_head *head)
36{
37 struct drm_device *dev = connector->dev;
38 struct drm_display_mode *mode, *t;
39 int width = head->width;
40 int height = head->height;
41
42 if (width < 320 || height < 240) {
43 qxl_io_log(qdev, "%s: bad head: %dx%d", width, height);
44 width = 1024;
45 height = 768;
46 }
47 if (width * height * 4 > 16*1024*1024) {
48 width = 1024;
49 height = 768;
50 }
51 /* TODO: go over regular modes and removed preferred? */
52 list_for_each_entry_safe(mode, t, &connector->probed_modes, head)
53 drm_mode_remove(connector, mode);
54 mode = drm_cvt_mode(dev, width, height, 60, false, false, false);
55 mode->type |= DRM_MODE_TYPE_PREFERRED;
56 mode->status = MODE_OK;
57 drm_mode_probed_add(connector, mode);
58 qxl_io_log(qdev, "%s: %d x %d\n", __func__, width, height);
59}
60
61void qxl_crtc_set_from_monitors_config(struct qxl_device *qdev)
62{
63 struct drm_connector *connector;
64 int i;
65 struct drm_device *dev = qdev->ddev;
66
67 i = 0;
68 qxl_io_log(qdev, "%s: %d, %d\n", __func__,
69 dev->mode_config.num_connector,
70 qdev->monitors_config->count);
71 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
72 if (i > qdev->monitors_config->count) {
73 /* crtc will be reported as disabled */
74 continue;
75 }
76 qxl_crtc_set_to_mode(qdev, connector,
77 &qdev->monitors_config->heads[i]);
78 ++i;
79 }
80}
81
82void qxl_alloc_client_monitors_config(struct qxl_device *qdev, unsigned count)
83{
84 if (qdev->client_monitors_config &&
85 count > qdev->client_monitors_config->count) {
86 kfree(qdev->client_monitors_config);
87 qdev->client_monitors_config = NULL;
88 }
89 if (!qdev->client_monitors_config) {
90 qdev->client_monitors_config = kzalloc(
91 sizeof(struct qxl_monitors_config) +
92 sizeof(struct qxl_head) * count, GFP_KERNEL);
93 if (!qdev->client_monitors_config) {
94 qxl_io_log(qdev,
95 "%s: allocation failure for %u heads\n",
96 __func__, count);
97 return;
98 }
99 }
100 qdev->client_monitors_config->count = count;
101}
102
103static int qxl_display_copy_rom_client_monitors_config(struct qxl_device *qdev)
104{
105 int i;
106 int num_monitors;
107 uint32_t crc;
108
109 BUG_ON(!qdev->monitors_config);
110 num_monitors = qdev->rom->client_monitors_config.count;
111 crc = crc32(0, (const uint8_t *)&qdev->rom->client_monitors_config,
112 sizeof(qdev->rom->client_monitors_config));
113 if (crc != qdev->rom->client_monitors_config_crc) {
114 qxl_io_log(qdev, "crc mismatch: have %X (%d) != %X\n", crc,
115 sizeof(qdev->rom->client_monitors_config),
116 qdev->rom->client_monitors_config_crc);
117 return 1;
118 }
119 if (num_monitors > qdev->monitors_config->max_allowed) {
120 DRM_INFO("client monitors list will be truncated: %d < %d\n",
121 qdev->monitors_config->max_allowed, num_monitors);
122 num_monitors = qdev->monitors_config->max_allowed;
123 } else {
124 num_monitors = qdev->rom->client_monitors_config.count;
125 }
126 qxl_alloc_client_monitors_config(qdev, num_monitors);
127 /* we copy max from the client but it isn't used */
128 qdev->client_monitors_config->max_allowed =
129 qdev->monitors_config->max_allowed;
130 for (i = 0 ; i < qdev->client_monitors_config->count ; ++i) {
131 struct qxl_urect *c_rect =
132 &qdev->rom->client_monitors_config.heads[i];
133 struct qxl_head *client_head =
134 &qdev->client_monitors_config->heads[i];
135 struct qxl_head *head = &qdev->monitors_config->heads[i];
136 client_head->x = head->x = c_rect->left;
137 client_head->y = head->y = c_rect->top;
138 client_head->width = head->width =
139 c_rect->right - c_rect->left;
140 client_head->height = head->height =
141 c_rect->bottom - c_rect->top;
142 client_head->surface_id = head->surface_id = 0;
143 client_head->id = head->id = i;
144 client_head->flags = head->flags = 0;
145 QXL_DEBUG(qdev, "read %dx%d+%d+%d\n", head->width, head->height,
146 head->x, head->y);
147 }
148 return 0;
149}
150
151void qxl_display_read_client_monitors_config(struct qxl_device *qdev)
152{
153
154 while (qxl_display_copy_rom_client_monitors_config(qdev)) {
155 qxl_io_log(qdev, "failed crc check for client_monitors_config,"
156 " retrying\n");
157 }
158 qxl_crtc_set_from_monitors_config(qdev);
159 /* fire off a uevent and let userspace tell us what to do */
160 qxl_io_log(qdev, "calling drm_sysfs_hotplug_event\n");
161 drm_sysfs_hotplug_event(qdev->ddev);
162}
163
164static int qxl_add_monitors_config_modes(struct drm_connector *connector)
165{
166 struct drm_device *dev = connector->dev;
167 struct qxl_device *qdev = dev->dev_private;
168 struct qxl_output *output = drm_connector_to_qxl_output(connector);
169 int h = output->index;
170 struct drm_display_mode *mode = NULL;
171 struct qxl_head *head;
172
173 if (!qdev->monitors_config)
174 return 0;
175 head = &qdev->monitors_config->heads[h];
176
177 mode = drm_cvt_mode(dev, head->width, head->height, 60, false, false,
178 false);
179 mode->type |= DRM_MODE_TYPE_PREFERRED;
180 drm_mode_probed_add(connector, mode);
181 return 1;
182}
183
184static int qxl_add_common_modes(struct drm_connector *connector)
185{
186 struct drm_device *dev = connector->dev;
187 struct drm_display_mode *mode = NULL;
188 int i;
189 struct mode_size {
190 int w;
191 int h;
192 } common_modes[] = {
193 { 640, 480},
194 { 720, 480},
195 { 800, 600},
196 { 848, 480},
197 {1024, 768},
198 {1152, 768},
199 {1280, 720},
200 {1280, 800},
201 {1280, 854},
202 {1280, 960},
203 {1280, 1024},
204 {1440, 900},
205 {1400, 1050},
206 {1680, 1050},
207 {1600, 1200},
208 {1920, 1080},
209 {1920, 1200}
210 };
211
212 for (i = 0; i < ARRAY_SIZE(common_modes); i++) {
213 if (common_modes[i].w < 320 || common_modes[i].h < 200)
214 continue;
215
216 mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h,
217 60, false, false, false);
218 if (common_modes[i].w == 1024 && common_modes[i].h == 768)
219 mode->type |= DRM_MODE_TYPE_PREFERRED;
220 drm_mode_probed_add(connector, mode);
221 }
222 return i - 1;
223}
224
225static void qxl_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
226 u16 *blue, uint32_t start, uint32_t size)
227{
228 /* TODO */
229}
230
231static void qxl_crtc_destroy(struct drm_crtc *crtc)
232{
233 struct qxl_crtc *qxl_crtc = to_qxl_crtc(crtc);
234
235 drm_crtc_cleanup(crtc);
236 kfree(qxl_crtc);
237}
238
239static void
240qxl_hide_cursor(struct qxl_device *qdev)
241{
242 struct qxl_release *release;
243 struct qxl_cursor_cmd *cmd;
244 int ret;
245
246 ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd), QXL_RELEASE_CURSOR_CMD,
247 &release, NULL);
248
249 cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
250 cmd->type = QXL_CURSOR_HIDE;
251 qxl_release_unmap(qdev, release, &cmd->release_info);
252
253 qxl_fence_releaseable(qdev, release);
254 qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
255 qxl_release_unreserve(qdev, release);
256}
257
258static int qxl_crtc_cursor_set(struct drm_crtc *crtc,
259 struct drm_file *file_priv,
260 uint32_t handle,
261 uint32_t width,
262 uint32_t height)
263{
264 struct drm_device *dev = crtc->dev;
265 struct qxl_device *qdev = dev->dev_private;
266 struct qxl_crtc *qcrtc = to_qxl_crtc(crtc);
267 struct drm_gem_object *obj;
268 struct qxl_cursor *cursor;
269 struct qxl_cursor_cmd *cmd;
270 struct qxl_bo *cursor_bo, *user_bo;
271 struct qxl_release *release;
272 void *user_ptr;
273
274 int size = 64*64*4;
275 int ret = 0;
276 if (!handle) {
277 qxl_hide_cursor(qdev);
278 return 0;
279 }
280
281 obj = drm_gem_object_lookup(crtc->dev, file_priv, handle);
282 if (!obj) {
283 DRM_ERROR("cannot find cursor object\n");
284 return -ENOENT;
285 }
286
287 user_bo = gem_to_qxl_bo(obj);
288
289 ret = qxl_bo_reserve(user_bo, false);
290 if (ret)
291 goto out_unref;
292
293 ret = qxl_bo_pin(user_bo, QXL_GEM_DOMAIN_CPU, NULL);
294 if (ret)
295 goto out_unreserve;
296
297 ret = qxl_bo_kmap(user_bo, &user_ptr);
298 if (ret)
299 goto out_unpin;
300
301 ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd),
302 QXL_RELEASE_CURSOR_CMD,
303 &release, NULL);
304 if (ret)
305 goto out_kunmap;
306 ret = qxl_alloc_bo_reserved(qdev, sizeof(struct qxl_cursor) + size,
307 &cursor_bo);
308 if (ret)
309 goto out_free_release;
310 ret = qxl_bo_kmap(cursor_bo, (void **)&cursor);
311 if (ret)
312 goto out_free_bo;
313
314 cursor->header.unique = 0;
315 cursor->header.type = SPICE_CURSOR_TYPE_ALPHA;
316 cursor->header.width = 64;
317 cursor->header.height = 64;
318 cursor->header.hot_spot_x = 0;
319 cursor->header.hot_spot_y = 0;
320 cursor->data_size = size;
321 cursor->chunk.next_chunk = 0;
322 cursor->chunk.prev_chunk = 0;
323 cursor->chunk.data_size = size;
324
325 memcpy(cursor->chunk.data, user_ptr, size);
326
327 qxl_bo_kunmap(cursor_bo);
328
329 /* finish with the userspace bo */
330 qxl_bo_kunmap(user_bo);
331 qxl_bo_unpin(user_bo);
332 qxl_bo_unreserve(user_bo);
333 drm_gem_object_unreference_unlocked(obj);
334
335 cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
336 cmd->type = QXL_CURSOR_SET;
337 cmd->u.set.position.x = qcrtc->cur_x;
338 cmd->u.set.position.y = qcrtc->cur_y;
339
340 cmd->u.set.shape = qxl_bo_physical_address(qdev, cursor_bo, 0);
341 qxl_release_add_res(qdev, release, cursor_bo);
342
343 cmd->u.set.visible = 1;
344 qxl_release_unmap(qdev, release, &cmd->release_info);
345
346 qxl_fence_releaseable(qdev, release);
347 qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
348 qxl_release_unreserve(qdev, release);
349
350 qxl_bo_unreserve(cursor_bo);
351 qxl_bo_unref(&cursor_bo);
352
353 return ret;
354out_free_bo:
355 qxl_bo_unref(&cursor_bo);
356out_free_release:
357 qxl_release_unreserve(qdev, release);
358 qxl_release_free(qdev, release);
359out_kunmap:
360 qxl_bo_kunmap(user_bo);
361out_unpin:
362 qxl_bo_unpin(user_bo);
363out_unreserve:
364 qxl_bo_unreserve(user_bo);
365out_unref:
366 drm_gem_object_unreference_unlocked(obj);
367 return ret;
368}
369
370static int qxl_crtc_cursor_move(struct drm_crtc *crtc,
371 int x, int y)
372{
373 struct drm_device *dev = crtc->dev;
374 struct qxl_device *qdev = dev->dev_private;
375 struct qxl_crtc *qcrtc = to_qxl_crtc(crtc);
376 struct qxl_release *release;
377 struct qxl_cursor_cmd *cmd;
378 int ret;
379
380 ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd), QXL_RELEASE_CURSOR_CMD,
381 &release, NULL);
382
383 qcrtc->cur_x = x;
384 qcrtc->cur_y = y;
385
386 cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
387 cmd->type = QXL_CURSOR_MOVE;
388 cmd->u.position.x = qcrtc->cur_x;
389 cmd->u.position.y = qcrtc->cur_y;
390 qxl_release_unmap(qdev, release, &cmd->release_info);
391
392 qxl_fence_releaseable(qdev, release);
393 qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
394 qxl_release_unreserve(qdev, release);
395 return 0;
396}
397
398
399static const struct drm_crtc_funcs qxl_crtc_funcs = {
400 .cursor_set = qxl_crtc_cursor_set,
401 .cursor_move = qxl_crtc_cursor_move,
402 .gamma_set = qxl_crtc_gamma_set,
403 .set_config = drm_crtc_helper_set_config,
404 .destroy = qxl_crtc_destroy,
405};
406
407static void qxl_user_framebuffer_destroy(struct drm_framebuffer *fb)
408{
409 struct qxl_framebuffer *qxl_fb = to_qxl_framebuffer(fb);
410
411 if (qxl_fb->obj)
412 drm_gem_object_unreference_unlocked(qxl_fb->obj);
413 drm_framebuffer_cleanup(fb);
414 kfree(qxl_fb);
415}
416
417static int qxl_framebuffer_surface_dirty(struct drm_framebuffer *fb,
418 struct drm_file *file_priv,
419 unsigned flags, unsigned color,
420 struct drm_clip_rect *clips,
421 unsigned num_clips)
422{
423 /* TODO: vmwgfx where this was cribbed from had locking. Why? */
424 struct qxl_framebuffer *qxl_fb = to_qxl_framebuffer(fb);
425 struct qxl_device *qdev = qxl_fb->base.dev->dev_private;
426 struct drm_clip_rect norect;
427 struct qxl_bo *qobj;
428 int inc = 1;
429
430 qobj = gem_to_qxl_bo(qxl_fb->obj);
431 /* if we aren't primary surface ignore this */
432 if (!qobj->is_primary)
433 return 0;
434
435 if (!num_clips) {
436 num_clips = 1;
437 clips = &norect;
438 norect.x1 = norect.y1 = 0;
439 norect.x2 = fb->width;
440 norect.y2 = fb->height;
441 } else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
442 num_clips /= 2;
443 inc = 2; /* skip source rects */
444 }
445
446 qxl_draw_dirty_fb(qdev, qxl_fb, qobj, flags, color,
447 clips, num_clips, inc);
448 return 0;
449}
450
451static const struct drm_framebuffer_funcs qxl_fb_funcs = {
452 .destroy = qxl_user_framebuffer_destroy,
453 .dirty = qxl_framebuffer_surface_dirty,
454/* TODO?
455 * .create_handle = qxl_user_framebuffer_create_handle, */
456};
457
458int
459qxl_framebuffer_init(struct drm_device *dev,
460 struct qxl_framebuffer *qfb,
461 struct drm_mode_fb_cmd2 *mode_cmd,
462 struct drm_gem_object *obj)
463{
464 int ret;
465
466 qfb->obj = obj;
467 ret = drm_framebuffer_init(dev, &qfb->base, &qxl_fb_funcs);
468 if (ret) {
469 qfb->obj = NULL;
470 return ret;
471 }
472 drm_helper_mode_fill_fb_struct(&qfb->base, mode_cmd);
473 return 0;
474}
475
476static void qxl_crtc_dpms(struct drm_crtc *crtc, int mode)
477{
478}
479
480static bool qxl_crtc_mode_fixup(struct drm_crtc *crtc,
481 const struct drm_display_mode *mode,
482 struct drm_display_mode *adjusted_mode)
483{
484 struct drm_device *dev = crtc->dev;
485 struct qxl_device *qdev = dev->dev_private;
486
487 qxl_io_log(qdev, "%s: (%d,%d) => (%d,%d)\n",
488 __func__,
489 mode->hdisplay, mode->vdisplay,
490 adjusted_mode->hdisplay,
491 adjusted_mode->vdisplay);
492 return true;
493}
494
495void
496qxl_send_monitors_config(struct qxl_device *qdev)
497{
498 int i;
499
500 BUG_ON(!qdev->ram_header->monitors_config);
501
502 if (qdev->monitors_config->count == 0) {
503 qxl_io_log(qdev, "%s: 0 monitors??\n", __func__);
504 return;
505 }
506 for (i = 0 ; i < qdev->monitors_config->count ; ++i) {
507 struct qxl_head *head = &qdev->monitors_config->heads[i];
508
509 if (head->y > 8192 || head->y < head->x ||
510 head->width > 8192 || head->height > 8192) {
511 DRM_ERROR("head %d wrong: %dx%d+%d+%d\n",
512 i, head->width, head->height,
513 head->x, head->y);
514 return;
515 }
516 }
517 qxl_io_monitors_config(qdev);
518}
519
520static void qxl_monitors_config_set_single(struct qxl_device *qdev,
521 unsigned x, unsigned y,
522 unsigned width, unsigned height)
523{
524 DRM_DEBUG("%dx%d+%d+%d\n", width, height, x, y);
525 qdev->monitors_config->count = 1;
526 qdev->monitors_config->heads[0].x = x;
527 qdev->monitors_config->heads[0].y = y;
528 qdev->monitors_config->heads[0].width = width;
529 qdev->monitors_config->heads[0].height = height;
530}
531
532static int qxl_crtc_mode_set(struct drm_crtc *crtc,
533 struct drm_display_mode *mode,
534 struct drm_display_mode *adjusted_mode,
535 int x, int y,
536 struct drm_framebuffer *old_fb)
537{
538 struct drm_device *dev = crtc->dev;
539 struct qxl_device *qdev = dev->dev_private;
540 struct qxl_mode *m = (void *)mode->private;
541 struct qxl_framebuffer *qfb;
542 struct qxl_bo *bo, *old_bo = NULL;
543 uint32_t width, height, base_offset;
544 bool recreate_primary = false;
545 int ret;
546
547 if (!crtc->fb) {
548 DRM_DEBUG_KMS("No FB bound\n");
549 return 0;
550 }
551
552 if (old_fb) {
553 qfb = to_qxl_framebuffer(old_fb);
554 old_bo = gem_to_qxl_bo(qfb->obj);
555 }
556 qfb = to_qxl_framebuffer(crtc->fb);
557 bo = gem_to_qxl_bo(qfb->obj);
558 if (!m)
559 /* and do we care? */
560 DRM_DEBUG("%dx%d: not a native mode\n", x, y);
561 else
562 DRM_DEBUG("%dx%d: qxl id %d\n",
563 mode->hdisplay, mode->vdisplay, m->id);
564 DRM_DEBUG("+%d+%d (%d,%d) => (%d,%d)\n",
565 x, y,
566 mode->hdisplay, mode->vdisplay,
567 adjusted_mode->hdisplay,
568 adjusted_mode->vdisplay);
569
570 recreate_primary = true;
571
572 width = mode->hdisplay;
573 height = mode->vdisplay;
574 base_offset = 0;
575
576 ret = qxl_bo_reserve(bo, false);
577 if (ret != 0)
578 return ret;
579 ret = qxl_bo_pin(bo, bo->type, NULL);
580 if (ret != 0) {
581 qxl_bo_unreserve(bo);
582 return -EINVAL;
583 }
584 qxl_bo_unreserve(bo);
585 if (recreate_primary) {
586 qxl_io_destroy_primary(qdev);
587 qxl_io_log(qdev,
588 "recreate primary: %dx%d (was %dx%d,%d,%d)\n",
589 width, height, bo->surf.width,
590 bo->surf.height, bo->surf.stride, bo->surf.format);
591 qxl_io_create_primary(qdev, width, height, base_offset, bo);
592 bo->is_primary = true;
593 }
594
595 if (old_bo && old_bo != bo) {
596 old_bo->is_primary = false;
597 ret = qxl_bo_reserve(old_bo, false);
598 qxl_bo_unpin(old_bo);
599 qxl_bo_unreserve(old_bo);
600 }
601
602 if (qdev->monitors_config->count == 0) {
603 qxl_monitors_config_set_single(qdev, x, y,
604 mode->hdisplay,
605 mode->vdisplay);
606 }
607 return 0;
608}
609
610static void qxl_crtc_prepare(struct drm_crtc *crtc)
611{
612 DRM_DEBUG("current: %dx%d+%d+%d (%d).\n",
613 crtc->mode.hdisplay, crtc->mode.vdisplay,
614 crtc->x, crtc->y, crtc->enabled);
615}
616
617static void qxl_crtc_commit(struct drm_crtc *crtc)
618{
619 DRM_DEBUG("\n");
620}
621
622static void qxl_crtc_load_lut(struct drm_crtc *crtc)
623{
624 DRM_DEBUG("\n");
625}
626
627static const struct drm_crtc_helper_funcs qxl_crtc_helper_funcs = {
628 .dpms = qxl_crtc_dpms,
629 .mode_fixup = qxl_crtc_mode_fixup,
630 .mode_set = qxl_crtc_mode_set,
631 .prepare = qxl_crtc_prepare,
632 .commit = qxl_crtc_commit,
633 .load_lut = qxl_crtc_load_lut,
634};
635
636static int qdev_crtc_init(struct drm_device *dev, int num_crtc)
637{
638 struct qxl_crtc *qxl_crtc;
639
640 qxl_crtc = kzalloc(sizeof(struct qxl_crtc), GFP_KERNEL);
641 if (!qxl_crtc)
642 return -ENOMEM;
643
644 drm_crtc_init(dev, &qxl_crtc->base, &qxl_crtc_funcs);
645
646 drm_mode_crtc_set_gamma_size(&qxl_crtc->base, 256);
647 drm_crtc_helper_add(&qxl_crtc->base, &qxl_crtc_helper_funcs);
648 return 0;
649}
650
651static void qxl_enc_dpms(struct drm_encoder *encoder, int mode)
652{
653 DRM_DEBUG("\n");
654}
655
656static bool qxl_enc_mode_fixup(struct drm_encoder *encoder,
657 const struct drm_display_mode *mode,
658 struct drm_display_mode *adjusted_mode)
659{
660 DRM_DEBUG("\n");
661 return true;
662}
663
664static void qxl_enc_prepare(struct drm_encoder *encoder)
665{
666 DRM_DEBUG("\n");
667}
668
669static void qxl_write_monitors_config_for_encoder(struct qxl_device *qdev,
670 struct drm_encoder *encoder)
671{
672 int i;
673 struct qxl_head *head;
674 struct drm_display_mode *mode;
675
676 BUG_ON(!encoder);
677 /* TODO: ugly, do better */
678 for (i = 0 ; (encoder->possible_crtcs != (1 << i)) && i < 32; ++i)
679 ;
680 if (encoder->possible_crtcs != (1 << i)) {
681 DRM_ERROR("encoder has wrong possible_crtcs: %x\n",
682 encoder->possible_crtcs);
683 return;
684 }
685 if (!qdev->monitors_config ||
686 qdev->monitors_config->max_allowed <= i) {
687 DRM_ERROR(
688 "head number too large or missing monitors config: %p, %d",
689 qdev->monitors_config,
690 qdev->monitors_config ?
691 qdev->monitors_config->max_allowed : -1);
692 return;
693 }
694 if (!encoder->crtc) {
695 DRM_ERROR("missing crtc on encoder %p\n", encoder);
696 return;
697 }
698 if (i != 0)
699 DRM_DEBUG("missing for multiple monitors: no head holes\n");
700 head = &qdev->monitors_config->heads[i];
701 head->id = i;
702 head->surface_id = 0;
703 if (encoder->crtc->enabled) {
704 mode = &encoder->crtc->mode;
705 head->width = mode->hdisplay;
706 head->height = mode->vdisplay;
707 head->x = encoder->crtc->x;
708 head->y = encoder->crtc->y;
709 if (qdev->monitors_config->count < i + 1)
710 qdev->monitors_config->count = i + 1;
711 } else {
712 head->width = 0;
713 head->height = 0;
714 head->x = 0;
715 head->y = 0;
716 }
717 DRM_DEBUG("setting head %d to +%d+%d %dx%d\n",
718 i, head->x, head->y, head->width, head->height);
719 head->flags = 0;
720 /* TODO - somewhere else to call this for multiple monitors
721 * (config_commit?) */
722 qxl_send_monitors_config(qdev);
723}
724
725static void qxl_enc_commit(struct drm_encoder *encoder)
726{
727 struct qxl_device *qdev = encoder->dev->dev_private;
728
729 qxl_write_monitors_config_for_encoder(qdev, encoder);
730 DRM_DEBUG("\n");
731}
732
733static void qxl_enc_mode_set(struct drm_encoder *encoder,
734 struct drm_display_mode *mode,
735 struct drm_display_mode *adjusted_mode)
736{
737 DRM_DEBUG("\n");
738}
739
740static int qxl_conn_get_modes(struct drm_connector *connector)
741{
742 int ret = 0;
743 struct qxl_device *qdev = connector->dev->dev_private;
744
745 DRM_DEBUG_KMS("monitors_config=%p\n", qdev->monitors_config);
746 /* TODO: what should we do here? only show the configured modes for the
747 * device, or allow the full list, or both? */
748 if (qdev->monitors_config && qdev->monitors_config->count) {
749 ret = qxl_add_monitors_config_modes(connector);
750 if (ret < 0)
751 return ret;
752 }
753 ret += qxl_add_common_modes(connector);
754 return ret;
755}
756
757static int qxl_conn_mode_valid(struct drm_connector *connector,
758 struct drm_display_mode *mode)
759{
760 /* TODO: is this called for user defined modes? (xrandr --add-mode)
761 * TODO: check that the mode fits in the framebuffer */
762 DRM_DEBUG("%s: %dx%d status=%d\n", mode->name, mode->hdisplay,
763 mode->vdisplay, mode->status);
764 return MODE_OK;
765}
766
767static struct drm_encoder *qxl_best_encoder(struct drm_connector *connector)
768{
769 struct qxl_output *qxl_output =
770 drm_connector_to_qxl_output(connector);
771
772 DRM_DEBUG("\n");
773 return &qxl_output->enc;
774}
775
776
777static const struct drm_encoder_helper_funcs qxl_enc_helper_funcs = {
778 .dpms = qxl_enc_dpms,
779 .mode_fixup = qxl_enc_mode_fixup,
780 .prepare = qxl_enc_prepare,
781 .mode_set = qxl_enc_mode_set,
782 .commit = qxl_enc_commit,
783};
784
785static const struct drm_connector_helper_funcs qxl_connector_helper_funcs = {
786 .get_modes = qxl_conn_get_modes,
787 .mode_valid = qxl_conn_mode_valid,
788 .best_encoder = qxl_best_encoder,
789};
790
791static void qxl_conn_save(struct drm_connector *connector)
792{
793 DRM_DEBUG("\n");
794}
795
796static void qxl_conn_restore(struct drm_connector *connector)
797{
798 DRM_DEBUG("\n");
799}
800
801static enum drm_connector_status qxl_conn_detect(
802 struct drm_connector *connector,
803 bool force)
804{
805 struct qxl_output *output =
806 drm_connector_to_qxl_output(connector);
807 struct drm_device *ddev = connector->dev;
808 struct qxl_device *qdev = ddev->dev_private;
809 int connected;
810
811 /* The first monitor is always connected */
812 connected = (output->index == 0) ||
813 (qdev->monitors_config &&
814 qdev->monitors_config->count > output->index);
815
816 DRM_DEBUG("\n");
817 return connected ? connector_status_connected
818 : connector_status_disconnected;
819}
820
821static int qxl_conn_set_property(struct drm_connector *connector,
822 struct drm_property *property,
823 uint64_t value)
824{
825 DRM_DEBUG("\n");
826 return 0;
827}
828
829static void qxl_conn_destroy(struct drm_connector *connector)
830{
831 struct qxl_output *qxl_output =
832 drm_connector_to_qxl_output(connector);
833
834 drm_sysfs_connector_remove(connector);
835 drm_connector_cleanup(connector);
836 kfree(qxl_output);
837}
838
839static const struct drm_connector_funcs qxl_connector_funcs = {
840 .dpms = drm_helper_connector_dpms,
841 .save = qxl_conn_save,
842 .restore = qxl_conn_restore,
843 .detect = qxl_conn_detect,
844 .fill_modes = drm_helper_probe_single_connector_modes,
845 .set_property = qxl_conn_set_property,
846 .destroy = qxl_conn_destroy,
847};
848
849static void qxl_enc_destroy(struct drm_encoder *encoder)
850{
851 drm_encoder_cleanup(encoder);
852}
853
854static const struct drm_encoder_funcs qxl_enc_funcs = {
855 .destroy = qxl_enc_destroy,
856};
857
858static int qdev_output_init(struct drm_device *dev, int num_output)
859{
860 struct qxl_output *qxl_output;
861 struct drm_connector *connector;
862 struct drm_encoder *encoder;
863
864 qxl_output = kzalloc(sizeof(struct qxl_output), GFP_KERNEL);
865 if (!qxl_output)
866 return -ENOMEM;
867
868 qxl_output->index = num_output;
869
870 connector = &qxl_output->base;
871 encoder = &qxl_output->enc;
872 drm_connector_init(dev, &qxl_output->base,
873 &qxl_connector_funcs, DRM_MODE_CONNECTOR_VIRTUAL);
874
875 drm_encoder_init(dev, &qxl_output->enc, &qxl_enc_funcs,
876 DRM_MODE_ENCODER_VIRTUAL);
877
878 encoder->possible_crtcs = 1 << num_output;
879 drm_mode_connector_attach_encoder(&qxl_output->base,
880 &qxl_output->enc);
881 drm_encoder_helper_add(encoder, &qxl_enc_helper_funcs);
882 drm_connector_helper_add(connector, &qxl_connector_helper_funcs);
883
884 drm_sysfs_connector_add(connector);
885 return 0;
886}
887
888static struct drm_framebuffer *
889qxl_user_framebuffer_create(struct drm_device *dev,
890 struct drm_file *file_priv,
891 struct drm_mode_fb_cmd2 *mode_cmd)
892{
893 struct drm_gem_object *obj;
894 struct qxl_framebuffer *qxl_fb;
895 int ret;
896
897 obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]);
898
899 qxl_fb = kzalloc(sizeof(*qxl_fb), GFP_KERNEL);
900 if (qxl_fb == NULL)
901 return NULL;
902
903 ret = qxl_framebuffer_init(dev, qxl_fb, mode_cmd, obj);
904 if (ret) {
905 kfree(qxl_fb);
906 drm_gem_object_unreference_unlocked(obj);
907 return NULL;
908 }
909
910 return &qxl_fb->base;
911}
912
913static const struct drm_mode_config_funcs qxl_mode_funcs = {
914 .fb_create = qxl_user_framebuffer_create,
915};
916
917int qxl_modeset_init(struct qxl_device *qdev)
918{
919 int i;
920 int ret;
921 struct drm_gem_object *gobj;
922 int max_allowed = QXL_NUM_OUTPUTS;
923 int monitors_config_size = sizeof(struct qxl_monitors_config) +
924 max_allowed * sizeof(struct qxl_head);
925
926 drm_mode_config_init(qdev->ddev);
927 ret = qxl_gem_object_create(qdev, monitors_config_size, 0,
928 QXL_GEM_DOMAIN_VRAM,
929 false, false, NULL, &gobj);
930 if (ret) {
931 DRM_ERROR("%s: failed to create gem ret=%d\n", __func__, ret);
932 return -ENOMEM;
933 }
934 qdev->monitors_config_bo = gem_to_qxl_bo(gobj);
935 qxl_bo_kmap(qdev->monitors_config_bo, NULL);
936 qdev->monitors_config = qdev->monitors_config_bo->kptr;
937 qdev->ram_header->monitors_config =
938 qxl_bo_physical_address(qdev, qdev->monitors_config_bo, 0);
939
940 memset(qdev->monitors_config, 0, monitors_config_size);
941 qdev->monitors_config->max_allowed = max_allowed;
942
943 qdev->ddev->mode_config.funcs = (void *)&qxl_mode_funcs;
944
945 /* modes will be validated against the framebuffer size */
946 qdev->ddev->mode_config.min_width = 320;
947 qdev->ddev->mode_config.min_height = 200;
948 qdev->ddev->mode_config.max_width = 8192;
949 qdev->ddev->mode_config.max_height = 8192;
950
951 qdev->ddev->mode_config.fb_base = qdev->vram_base;
952 for (i = 0 ; i < QXL_NUM_OUTPUTS; ++i) {
953 qdev_crtc_init(qdev->ddev, i);
954 qdev_output_init(qdev->ddev, i);
955 }
956
957 qdev->mode_info.mode_config_initialized = true;
958
959 /* primary surface must be created by this point, to allow
960 * issuing command queue commands and having them read by
961 * spice server. */
962 qxl_fbdev_init(qdev);
963 return 0;
964}
965
966void qxl_modeset_fini(struct qxl_device *qdev)
967{
968 qxl_fbdev_fini(qdev);
969 if (qdev->mode_info.mode_config_initialized) {
970 drm_mode_config_cleanup(qdev->ddev);
971 qdev->mode_info.mode_config_initialized = false;
972 }
973}
diff --git a/drivers/gpu/drm/qxl/qxl_draw.c b/drivers/gpu/drm/qxl/qxl_draw.c
new file mode 100644
index 000000000000..3c8c3dbf9378
--- /dev/null
+++ b/drivers/gpu/drm/qxl/qxl_draw.c
@@ -0,0 +1,390 @@
1/*
2 * Copyright 2011 Red Hat, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
19 * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
20 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#include "qxl_drv.h"
24#include "qxl_object.h"
25
26/* returns a pointer to the already allocated qxl_rect array inside
27 * the qxl_clip_rects. This is *not* the same as the memory allocated
28 * on the device, it is offset to qxl_clip_rects.chunk.data */
29static struct qxl_rect *drawable_set_clipping(struct qxl_device *qdev,
30 struct qxl_drawable *drawable,
31 unsigned num_clips,
32 struct qxl_bo **clips_bo,
33 struct qxl_release *release)
34{
35 struct qxl_clip_rects *dev_clips;
36 int ret;
37 int size = sizeof(*dev_clips) + sizeof(struct qxl_rect) * num_clips;
38 ret = qxl_alloc_bo_reserved(qdev, size, clips_bo);
39 if (ret)
40 return NULL;
41
42 ret = qxl_bo_kmap(*clips_bo, (void **)&dev_clips);
43 if (ret) {
44 qxl_bo_unref(clips_bo);
45 return NULL;
46 }
47 dev_clips->num_rects = num_clips;
48 dev_clips->chunk.next_chunk = 0;
49 dev_clips->chunk.prev_chunk = 0;
50 dev_clips->chunk.data_size = sizeof(struct qxl_rect) * num_clips;
51 return (struct qxl_rect *)dev_clips->chunk.data;
52}
53
54static int
55make_drawable(struct qxl_device *qdev, int surface, uint8_t type,
56 const struct qxl_rect *rect,
57 struct qxl_release **release)
58{
59 struct qxl_drawable *drawable;
60 int i, ret;
61
62 ret = qxl_alloc_release_reserved(qdev, sizeof(*drawable),
63 QXL_RELEASE_DRAWABLE, release,
64 NULL);
65 if (ret)
66 return ret;
67
68 drawable = (struct qxl_drawable *)qxl_release_map(qdev, *release);
69 drawable->type = type;
70
71 drawable->surface_id = surface; /* Only primary for now */
72 drawable->effect = QXL_EFFECT_OPAQUE;
73 drawable->self_bitmap = 0;
74 drawable->self_bitmap_area.top = 0;
75 drawable->self_bitmap_area.left = 0;
76 drawable->self_bitmap_area.bottom = 0;
77 drawable->self_bitmap_area.right = 0;
78 /* FIXME: add clipping */
79 drawable->clip.type = SPICE_CLIP_TYPE_NONE;
80
81 /*
82 * surfaces_dest[i] should apparently be filled out with the
83 * surfaces that we depend on, and surface_rects should be
84 * filled with the rectangles of those surfaces that we
85 * are going to use.
86 */
87 for (i = 0; i < 3; ++i)
88 drawable->surfaces_dest[i] = -1;
89
90 if (rect)
91 drawable->bbox = *rect;
92
93 drawable->mm_time = qdev->rom->mm_clock;
94 qxl_release_unmap(qdev, *release, &drawable->release_info);
95 return 0;
96}
97
98static int qxl_palette_create_1bit(struct qxl_bo **palette_bo,
99 const struct qxl_fb_image *qxl_fb_image)
100{
101 struct qxl_device *qdev = qxl_fb_image->qdev;
102 const struct fb_image *fb_image = &qxl_fb_image->fb_image;
103 uint32_t visual = qxl_fb_image->visual;
104 const uint32_t *pseudo_palette = qxl_fb_image->pseudo_palette;
105 struct qxl_palette *pal;
106 int ret;
107 uint32_t fgcolor, bgcolor;
108 static uint64_t unique; /* we make no attempt to actually set this
109 * correctly globaly, since that would require
110 * tracking all of our palettes. */
111
112 ret = qxl_alloc_bo_reserved(qdev,
113 sizeof(struct qxl_palette) + sizeof(uint32_t) * 2,
114 palette_bo);
115
116 ret = qxl_bo_kmap(*palette_bo, (void **)&pal);
117 pal->num_ents = 2;
118 pal->unique = unique++;
119 if (visual == FB_VISUAL_TRUECOLOR || visual == FB_VISUAL_DIRECTCOLOR) {
120 /* NB: this is the only used branch currently. */
121 fgcolor = pseudo_palette[fb_image->fg_color];
122 bgcolor = pseudo_palette[fb_image->bg_color];
123 } else {
124 fgcolor = fb_image->fg_color;
125 bgcolor = fb_image->bg_color;
126 }
127 pal->ents[0] = bgcolor;
128 pal->ents[1] = fgcolor;
129 qxl_bo_kunmap(*palette_bo);
130 return 0;
131}
132
133void qxl_draw_opaque_fb(const struct qxl_fb_image *qxl_fb_image,
134 int stride /* filled in if 0 */)
135{
136 struct qxl_device *qdev = qxl_fb_image->qdev;
137 struct qxl_drawable *drawable;
138 struct qxl_rect rect;
139 const struct fb_image *fb_image = &qxl_fb_image->fb_image;
140 int x = fb_image->dx;
141 int y = fb_image->dy;
142 int width = fb_image->width;
143 int height = fb_image->height;
144 const char *src = fb_image->data;
145 int depth = fb_image->depth;
146 struct qxl_release *release;
147 struct qxl_bo *image_bo;
148 struct qxl_image *image;
149 int ret;
150
151 if (stride == 0)
152 stride = depth * width / 8;
153
154 rect.left = x;
155 rect.right = x + width;
156 rect.top = y;
157 rect.bottom = y + height;
158
159 ret = make_drawable(qdev, 0, QXL_DRAW_COPY, &rect, &release);
160 if (ret)
161 return;
162
163 ret = qxl_image_create(qdev, release, &image_bo,
164 (const uint8_t *)src, 0, 0,
165 width, height, depth, stride);
166 if (ret) {
167 qxl_release_unreserve(qdev, release);
168 qxl_release_free(qdev, release);
169 return;
170 }
171
172 if (depth == 1) {
173 struct qxl_bo *palette_bo;
174 void *ptr;
175 ret = qxl_palette_create_1bit(&palette_bo, qxl_fb_image);
176 qxl_release_add_res(qdev, release, palette_bo);
177
178 ptr = qxl_bo_kmap_atomic_page(qdev, image_bo, 0);
179 image = ptr;
180 image->u.bitmap.palette =
181 qxl_bo_physical_address(qdev, palette_bo, 0);
182 qxl_bo_kunmap_atomic_page(qdev, image_bo, ptr);
183 qxl_bo_unreserve(palette_bo);
184 qxl_bo_unref(&palette_bo);
185 }
186
187 drawable = (struct qxl_drawable *)qxl_release_map(qdev, release);
188
189 drawable->u.copy.src_area.top = 0;
190 drawable->u.copy.src_area.bottom = height;
191 drawable->u.copy.src_area.left = 0;
192 drawable->u.copy.src_area.right = width;
193
194 drawable->u.copy.rop_descriptor = SPICE_ROPD_OP_PUT;
195 drawable->u.copy.scale_mode = 0;
196 drawable->u.copy.mask.flags = 0;
197 drawable->u.copy.mask.pos.x = 0;
198 drawable->u.copy.mask.pos.y = 0;
199 drawable->u.copy.mask.bitmap = 0;
200
201 drawable->u.copy.src_bitmap =
202 qxl_bo_physical_address(qdev, image_bo, 0);
203 qxl_release_unmap(qdev, release, &drawable->release_info);
204
205 qxl_release_add_res(qdev, release, image_bo);
206 qxl_bo_unreserve(image_bo);
207 qxl_bo_unref(&image_bo);
208
209 qxl_fence_releaseable(qdev, release);
210 qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
211 qxl_release_unreserve(qdev, release);
212}
213
214/* push a draw command using the given clipping rectangles as
215 * the sources from the shadow framebuffer.
216 *
217 * Right now implementing with a single draw and a clip list. Clip
218 * lists are known to be a problem performance wise, this can be solved
219 * by treating them differently in the server.
220 */
221void qxl_draw_dirty_fb(struct qxl_device *qdev,
222 struct qxl_framebuffer *qxl_fb,
223 struct qxl_bo *bo,
224 unsigned flags, unsigned color,
225 struct drm_clip_rect *clips,
226 unsigned num_clips, int inc)
227{
228 /*
229 * TODO: if flags & DRM_MODE_FB_DIRTY_ANNOTATE_FILL then we should
230 * send a fill command instead, much cheaper.
231 *
232 * See include/drm/drm_mode.h
233 */
234 struct drm_clip_rect *clips_ptr;
235 int i;
236 int left, right, top, bottom;
237 int width, height;
238 struct qxl_drawable *drawable;
239 struct qxl_rect drawable_rect;
240 struct qxl_rect *rects;
241 int stride = qxl_fb->base.pitches[0];
242 /* depth is not actually interesting, we don't mask with it */
243 int depth = qxl_fb->base.bits_per_pixel;
244 uint8_t *surface_base;
245 struct qxl_release *release;
246 struct qxl_bo *image_bo;
247 struct qxl_bo *clips_bo;
248 int ret;
249
250 left = clips->x1;
251 right = clips->x2;
252 top = clips->y1;
253 bottom = clips->y2;
254
255 /* skip the first clip rect */
256 for (i = 1, clips_ptr = clips + inc;
257 i < num_clips; i++, clips_ptr += inc) {
258 left = min_t(int, left, (int)clips_ptr->x1);
259 right = max_t(int, right, (int)clips_ptr->x2);
260 top = min_t(int, top, (int)clips_ptr->y1);
261 bottom = max_t(int, bottom, (int)clips_ptr->y2);
262 }
263
264 width = right - left;
265 height = bottom - top;
266 drawable_rect.left = left;
267 drawable_rect.right = right;
268 drawable_rect.top = top;
269 drawable_rect.bottom = bottom;
270 ret = make_drawable(qdev, 0, QXL_DRAW_COPY, &drawable_rect,
271 &release);
272 if (ret)
273 return;
274
275 ret = qxl_bo_kmap(bo, (void **)&surface_base);
276 if (ret)
277 goto out_unref;
278
279 ret = qxl_image_create(qdev, release, &image_bo, surface_base,
280 left, top, width, height, depth, stride);
281 qxl_bo_kunmap(bo);
282 if (ret)
283 goto out_unref;
284
285 rects = drawable_set_clipping(qdev, drawable, num_clips, &clips_bo, release);
286 if (!rects) {
287 qxl_bo_unref(&image_bo);
288 goto out_unref;
289 }
290 drawable = (struct qxl_drawable *)qxl_release_map(qdev, release);
291
292 drawable->clip.type = SPICE_CLIP_TYPE_RECTS;
293 drawable->clip.data = qxl_bo_physical_address(qdev,
294 clips_bo, 0);
295 qxl_release_add_res(qdev, release, clips_bo);
296
297 drawable->u.copy.src_area.top = 0;
298 drawable->u.copy.src_area.bottom = height;
299 drawable->u.copy.src_area.left = 0;
300 drawable->u.copy.src_area.right = width;
301
302 drawable->u.copy.rop_descriptor = SPICE_ROPD_OP_PUT;
303 drawable->u.copy.scale_mode = 0;
304 drawable->u.copy.mask.flags = 0;
305 drawable->u.copy.mask.pos.x = 0;
306 drawable->u.copy.mask.pos.y = 0;
307 drawable->u.copy.mask.bitmap = 0;
308
309 drawable->u.copy.src_bitmap = qxl_bo_physical_address(qdev, image_bo, 0);
310 qxl_release_unmap(qdev, release, &drawable->release_info);
311 qxl_release_add_res(qdev, release, image_bo);
312 qxl_bo_unreserve(image_bo);
313 qxl_bo_unref(&image_bo);
314 clips_ptr = clips;
315 for (i = 0; i < num_clips; i++, clips_ptr += inc) {
316 rects[i].left = clips_ptr->x1;
317 rects[i].right = clips_ptr->x2;
318 rects[i].top = clips_ptr->y1;
319 rects[i].bottom = clips_ptr->y2;
320 }
321 qxl_bo_kunmap(clips_bo);
322 qxl_bo_unreserve(clips_bo);
323 qxl_bo_unref(&clips_bo);
324
325 qxl_fence_releaseable(qdev, release);
326 qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
327 qxl_release_unreserve(qdev, release);
328 return;
329
330out_unref:
331 qxl_release_unreserve(qdev, release);
332 qxl_release_free(qdev, release);
333}
334
335void qxl_draw_copyarea(struct qxl_device *qdev,
336 u32 width, u32 height,
337 u32 sx, u32 sy,
338 u32 dx, u32 dy)
339{
340 struct qxl_drawable *drawable;
341 struct qxl_rect rect;
342 struct qxl_release *release;
343 int ret;
344
345 rect.left = dx;
346 rect.top = dy;
347 rect.right = dx + width;
348 rect.bottom = dy + height;
349 ret = make_drawable(qdev, 0, QXL_COPY_BITS, &rect, &release);
350 if (ret)
351 return;
352
353 drawable = (struct qxl_drawable *)qxl_release_map(qdev, release);
354 drawable->u.copy_bits.src_pos.x = sx;
355 drawable->u.copy_bits.src_pos.y = sy;
356
357 qxl_release_unmap(qdev, release, &drawable->release_info);
358 qxl_fence_releaseable(qdev, release);
359 qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
360 qxl_release_unreserve(qdev, release);
361}
362
363void qxl_draw_fill(struct qxl_draw_fill *qxl_draw_fill_rec)
364{
365 struct qxl_device *qdev = qxl_draw_fill_rec->qdev;
366 struct qxl_rect rect = qxl_draw_fill_rec->rect;
367 uint32_t color = qxl_draw_fill_rec->color;
368 uint16_t rop = qxl_draw_fill_rec->rop;
369 struct qxl_drawable *drawable;
370 struct qxl_release *release;
371 int ret;
372
373 ret = make_drawable(qdev, 0, QXL_DRAW_FILL, &rect, &release);
374 if (ret)
375 return;
376
377 drawable = (struct qxl_drawable *)qxl_release_map(qdev, release);
378 drawable->u.fill.brush.type = SPICE_BRUSH_TYPE_SOLID;
379 drawable->u.fill.brush.u.color = color;
380 drawable->u.fill.rop_descriptor = rop;
381 drawable->u.fill.mask.flags = 0;
382 drawable->u.fill.mask.pos.x = 0;
383 drawable->u.fill.mask.pos.y = 0;
384 drawable->u.fill.mask.bitmap = 0;
385
386 qxl_release_unmap(qdev, release, &drawable->release_info);
387 qxl_fence_releaseable(qdev, release);
388 qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
389 qxl_release_unreserve(qdev, release);
390}
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
new file mode 100644
index 000000000000..aa291d8a98a2
--- /dev/null
+++ b/drivers/gpu/drm/qxl/qxl_drv.c
@@ -0,0 +1,145 @@
1/* vim: set ts=8 sw=8 tw=78 ai noexpandtab */
2/* qxl_drv.c -- QXL driver -*- linux-c -*-
3 *
4 * Copyright 2011 Red Hat, Inc.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the next
15 * paragraph) shall be included in all copies or substantial portions of the
16 * Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
22 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
23 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
24 * OTHER DEALINGS IN THE SOFTWARE.
25 *
26 * Authors:
27 * Dave Airlie <airlie@redhat.com>
28 * Alon Levy <alevy@redhat.com>
29 */
30
31#include <linux/module.h>
32#include <linux/console.h>
33
34#include "drmP.h"
35#include "drm/drm.h"
36
37#include "qxl_drv.h"
38
39extern int qxl_max_ioctls;
40static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
41 { 0x1b36, 0x100, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8,
42 0xffff00, 0 },
43 { 0x1b36, 0x100, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_OTHER << 8,
44 0xffff00, 0 },
45 { 0, 0, 0 },
46};
47MODULE_DEVICE_TABLE(pci, pciidlist);
48
49static int qxl_modeset = -1;
50
51MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
52module_param_named(modeset, qxl_modeset, int, 0400);
53
54static struct drm_driver qxl_driver;
55static struct pci_driver qxl_pci_driver;
56
57static int
58qxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
59{
60 if (pdev->revision < 4) {
61 DRM_ERROR("qxl too old, doesn't support client_monitors_config,"
62 " use xf86-video-qxl in user mode");
63 return -EINVAL; /* TODO: ENODEV ? */
64 }
65 return drm_get_pci_dev(pdev, ent, &qxl_driver);
66}
67
68static void
69qxl_pci_remove(struct pci_dev *pdev)
70{
71 struct drm_device *dev = pci_get_drvdata(pdev);
72
73 drm_put_dev(dev);
74}
75
76static struct pci_driver qxl_pci_driver = {
77 .name = DRIVER_NAME,
78 .id_table = pciidlist,
79 .probe = qxl_pci_probe,
80 .remove = qxl_pci_remove,
81};
82
83static const struct file_operations qxl_fops = {
84 .owner = THIS_MODULE,
85 .open = drm_open,
86 .release = drm_release,
87 .unlocked_ioctl = drm_ioctl,
88 .poll = drm_poll,
89 .fasync = drm_fasync,
90 .mmap = qxl_mmap,
91};
92
93static struct drm_driver qxl_driver = {
94 .driver_features = DRIVER_GEM | DRIVER_MODESET |
95 DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
96 .dev_priv_size = 0,
97 .load = qxl_driver_load,
98 .unload = qxl_driver_unload,
99
100 .dumb_create = qxl_mode_dumb_create,
101 .dumb_map_offset = qxl_mode_dumb_mmap,
102 .dumb_destroy = qxl_mode_dumb_destroy,
103#if defined(CONFIG_DEBUG_FS)
104 .debugfs_init = qxl_debugfs_init,
105 .debugfs_cleanup = qxl_debugfs_takedown,
106#endif
107 .gem_init_object = qxl_gem_object_init,
108 .gem_free_object = qxl_gem_object_free,
109 .gem_open_object = qxl_gem_object_open,
110 .gem_close_object = qxl_gem_object_close,
111 .fops = &qxl_fops,
112 .ioctls = qxl_ioctls,
113 .irq_handler = qxl_irq_handler,
114 .name = DRIVER_NAME,
115 .desc = DRIVER_DESC,
116 .date = DRIVER_DATE,
117 .major = 0,
118 .minor = 1,
119 .patchlevel = 0,
120};
121
122static int __init qxl_init(void)
123{
124#ifdef CONFIG_VGA_CONSOLE
125 if (vgacon_text_force() && qxl_modeset == -1)
126 return -EINVAL;
127#endif
128
129 if (qxl_modeset == 0)
130 return -EINVAL;
131 qxl_driver.num_ioctls = qxl_max_ioctls;
132 return drm_pci_init(&qxl_driver, &qxl_pci_driver);
133}
134
135static void __exit qxl_exit(void)
136{
137 drm_pci_exit(&qxl_driver, &qxl_pci_driver);
138}
139
140module_init(qxl_init);
141module_exit(qxl_exit);
142
143MODULE_AUTHOR(DRIVER_AUTHOR);
144MODULE_DESCRIPTION(DRIVER_DESC);
145MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
new file mode 100644
index 000000000000..43d06ab28a21
--- /dev/null
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -0,0 +1,559 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Dave Airlie
23 * Alon Levy
24 */
25
26
27#ifndef QXL_DRV_H
28#define QXL_DRV_H
29
30/*
31 * Definitions taken from spice-protocol, plus kernel driver specific bits.
32 */
33
34#include <linux/workqueue.h>
35#include <linux/firmware.h>
36#include <linux/platform_device.h>
37
38#include "drmP.h"
39#include "drm_crtc.h"
40#include <ttm/ttm_bo_api.h>
41#include <ttm/ttm_bo_driver.h>
42#include <ttm/ttm_placement.h>
43#include <ttm/ttm_module.h>
44
45#include <drm/qxl_drm.h>
46#include "qxl_dev.h"
47
48#define DRIVER_AUTHOR "Dave Airlie"
49
50#define DRIVER_NAME "qxl"
51#define DRIVER_DESC "RH QXL"
52#define DRIVER_DATE "20120117"
53
54#define DRIVER_MAJOR 0
55#define DRIVER_MINOR 1
56#define DRIVER_PATCHLEVEL 0
57
58#define QXL_NUM_OUTPUTS 1
59
60#define QXL_DEBUGFS_MAX_COMPONENTS 32
61
62extern int qxl_log_level;
63
64enum {
65 QXL_INFO_LEVEL = 1,
66 QXL_DEBUG_LEVEL = 2,
67};
68
69#define QXL_INFO(qdev, fmt, ...) do { \
70 if (qxl_log_level >= QXL_INFO_LEVEL) { \
71 qxl_io_log(qdev, fmt, __VA_ARGS__); \
72 } \
73 } while (0)
74#define QXL_DEBUG(qdev, fmt, ...) do { \
75 if (qxl_log_level >= QXL_DEBUG_LEVEL) { \
76 qxl_io_log(qdev, fmt, __VA_ARGS__); \
77 } \
78 } while (0)
79#define QXL_INFO_ONCE(qdev, fmt, ...) do { \
80 static int done; \
81 if (!done) { \
82 done = 1; \
83 QXL_INFO(qdev, fmt, __VA_ARGS__); \
84 } \
85 } while (0)
86
87#define DRM_FILE_OFFSET 0x100000000ULL
88#define DRM_FILE_PAGE_OFFSET (DRM_FILE_OFFSET >> PAGE_SHIFT)
89
90#define QXL_INTERRUPT_MASK (\
91 QXL_INTERRUPT_DISPLAY |\
92 QXL_INTERRUPT_CURSOR |\
93 QXL_INTERRUPT_IO_CMD |\
94 QXL_INTERRUPT_CLIENT_MONITORS_CONFIG)
95
96struct qxl_fence {
97 struct qxl_device *qdev;
98 uint32_t num_active_releases;
99 uint32_t *release_ids;
100 struct radix_tree_root tree;
101};
102
103struct qxl_bo {
104 /* Protected by gem.mutex */
105 struct list_head list;
106 /* Protected by tbo.reserved */
107 u32 placements[3];
108 struct ttm_placement placement;
109 struct ttm_buffer_object tbo;
110 struct ttm_bo_kmap_obj kmap;
111 unsigned pin_count;
112 void *kptr;
113 int type;
114 /* Constant after initialization */
115 struct drm_gem_object gem_base;
116 bool is_primary; /* is this now a primary surface */
117 bool hw_surf_alloc;
118 struct qxl_surface surf;
119 uint32_t surface_id;
120 struct qxl_fence fence; /* per bo fence - list of releases */
121 struct qxl_release *surf_create;
122 atomic_t reserve_count;
123};
124#define gem_to_qxl_bo(gobj) container_of((gobj), struct qxl_bo, gem_base)
125
126struct qxl_gem {
127 struct mutex mutex;
128 struct list_head objects;
129};
130
131struct qxl_bo_list {
132 struct list_head lhead;
133 struct qxl_bo *bo;
134};
135
136struct qxl_reloc_list {
137 struct list_head bos;
138};
139
140struct qxl_crtc {
141 struct drm_crtc base;
142 int cur_x;
143 int cur_y;
144};
145
146struct qxl_output {
147 int index;
148 struct drm_connector base;
149 struct drm_encoder enc;
150};
151
152struct qxl_framebuffer {
153 struct drm_framebuffer base;
154 struct drm_gem_object *obj;
155};
156
157#define to_qxl_crtc(x) container_of(x, struct qxl_crtc, base)
158#define drm_connector_to_qxl_output(x) container_of(x, struct qxl_output, base)
159#define drm_encoder_to_qxl_output(x) container_of(x, struct qxl_output, base)
160#define to_qxl_framebuffer(x) container_of(x, struct qxl_framebuffer, base)
161
162struct qxl_mman {
163 struct ttm_bo_global_ref bo_global_ref;
164 struct drm_global_reference mem_global_ref;
165 bool mem_global_referenced;
166 struct ttm_bo_device bdev;
167};
168
169struct qxl_mode_info {
170 int num_modes;
171 struct qxl_mode *modes;
172 bool mode_config_initialized;
173
174 /* pointer to fbdev info structure */
175 struct qxl_fbdev *qfbdev;
176};
177
178
179struct qxl_memslot {
180 uint8_t generation;
181 uint64_t start_phys_addr;
182 uint64_t end_phys_addr;
183 uint64_t high_bits;
184};
185
186enum {
187 QXL_RELEASE_DRAWABLE,
188 QXL_RELEASE_SURFACE_CMD,
189 QXL_RELEASE_CURSOR_CMD,
190};
191
192/* drm_ prefix to differentiate from qxl_release_info in
193 * spice-protocol/qxl_dev.h */
194#define QXL_MAX_RES 96
195struct qxl_release {
196 int id;
197 int type;
198 int bo_count;
199 uint32_t release_offset;
200 uint32_t surface_release_id;
201 struct qxl_bo *bos[QXL_MAX_RES];
202};
203
204struct qxl_fb_image {
205 struct qxl_device *qdev;
206 uint32_t pseudo_palette[16];
207 struct fb_image fb_image;
208 uint32_t visual;
209};
210
211struct qxl_draw_fill {
212 struct qxl_device *qdev;
213 struct qxl_rect rect;
214 uint32_t color;
215 uint16_t rop;
216};
217
218/*
219 * Debugfs
220 */
221struct qxl_debugfs {
222 struct drm_info_list *files;
223 unsigned num_files;
224};
225
226int qxl_debugfs_add_files(struct qxl_device *rdev,
227 struct drm_info_list *files,
228 unsigned nfiles);
229int qxl_debugfs_fence_init(struct qxl_device *rdev);
230void qxl_debugfs_remove_files(struct qxl_device *qdev);
231
232struct qxl_device;
233
234struct qxl_device {
235 struct device *dev;
236 struct drm_device *ddev;
237 struct pci_dev *pdev;
238 unsigned long flags;
239
240 resource_size_t vram_base, vram_size;
241 resource_size_t surfaceram_base, surfaceram_size;
242 resource_size_t rom_base, rom_size;
243 struct qxl_rom *rom;
244
245 struct qxl_mode *modes;
246 struct qxl_bo *monitors_config_bo;
247 struct qxl_monitors_config *monitors_config;
248
249 /* last received client_monitors_config */
250 struct qxl_monitors_config *client_monitors_config;
251
252 int io_base;
253 void *ram;
254 struct qxl_mman mman;
255 struct qxl_gem gem;
256 struct qxl_mode_info mode_info;
257
258 struct fb_info *fbdev_info;
259 struct qxl_framebuffer *fbdev_qfb;
260 void *ram_physical;
261
262 struct qxl_ring *release_ring;
263 struct qxl_ring *command_ring;
264 struct qxl_ring *cursor_ring;
265
266 struct qxl_ram_header *ram_header;
267
268 bool primary_created;
269
270 struct qxl_memslot *mem_slots;
271 uint8_t n_mem_slots;
272
273 uint8_t main_mem_slot;
274 uint8_t surfaces_mem_slot;
275 uint8_t slot_id_bits;
276 uint8_t slot_gen_bits;
277 uint64_t va_slot_mask;
278
279 struct idr release_idr;
280 spinlock_t release_idr_lock;
281 struct mutex async_io_mutex;
282 unsigned int last_sent_io_cmd;
283
284 /* interrupt handling */
285 atomic_t irq_received;
286 atomic_t irq_received_display;
287 atomic_t irq_received_cursor;
288 atomic_t irq_received_io_cmd;
289 unsigned irq_received_error;
290 wait_queue_head_t display_event;
291 wait_queue_head_t cursor_event;
292 wait_queue_head_t io_cmd_event;
293 struct work_struct client_monitors_config_work;
294
295 /* debugfs */
296 struct qxl_debugfs debugfs[QXL_DEBUGFS_MAX_COMPONENTS];
297 unsigned debugfs_count;
298
299 struct mutex update_area_mutex;
300
301 struct idr surf_id_idr;
302 spinlock_t surf_id_idr_lock;
303 int last_alloced_surf_id;
304
305 struct mutex surf_evict_mutex;
306 struct io_mapping *vram_mapping;
307 struct io_mapping *surface_mapping;
308
309 /* */
310 struct mutex release_mutex;
311 struct qxl_bo *current_release_bo[3];
312 int current_release_bo_offset[3];
313
314 struct workqueue_struct *gc_queue;
315 struct work_struct gc_work;
316
317};
318
319/* forward declaration for QXL_INFO_IO */
320void qxl_io_log(struct qxl_device *qdev, const char *fmt, ...);
321
322extern struct drm_ioctl_desc qxl_ioctls[];
323extern int qxl_max_ioctl;
324
325int qxl_driver_load(struct drm_device *dev, unsigned long flags);
326int qxl_driver_unload(struct drm_device *dev);
327
328int qxl_modeset_init(struct qxl_device *qdev);
329void qxl_modeset_fini(struct qxl_device *qdev);
330
331int qxl_bo_init(struct qxl_device *qdev);
332void qxl_bo_fini(struct qxl_device *qdev);
333
334struct qxl_ring *qxl_ring_create(struct qxl_ring_header *header,
335 int element_size,
336 int n_elements,
337 int prod_notify,
338 bool set_prod_notify,
339 wait_queue_head_t *push_event);
340void qxl_ring_free(struct qxl_ring *ring);
341
342static inline void *
343qxl_fb_virtual_address(struct qxl_device *qdev, unsigned long physical)
344{
345 QXL_INFO(qdev, "not implemented (%lu)\n", physical);
346 return 0;
347}
348
349static inline uint64_t
350qxl_bo_physical_address(struct qxl_device *qdev, struct qxl_bo *bo,
351 unsigned long offset)
352{
353 int slot_id = bo->type == QXL_GEM_DOMAIN_VRAM ? qdev->main_mem_slot : qdev->surfaces_mem_slot;
354 struct qxl_memslot *slot = &(qdev->mem_slots[slot_id]);
355
356 /* TODO - need to hold one of the locks to read tbo.offset */
357 return slot->high_bits | (bo->tbo.offset + offset);
358}
359
360/* qxl_fb.c */
361#define QXLFB_CONN_LIMIT 1
362
363int qxl_fbdev_init(struct qxl_device *qdev);
364void qxl_fbdev_fini(struct qxl_device *qdev);
365int qxl_get_handle_for_primary_fb(struct qxl_device *qdev,
366 struct drm_file *file_priv,
367 uint32_t *handle);
368
369/* qxl_display.c */
370int
371qxl_framebuffer_init(struct drm_device *dev,
372 struct qxl_framebuffer *rfb,
373 struct drm_mode_fb_cmd2 *mode_cmd,
374 struct drm_gem_object *obj);
375void qxl_display_read_client_monitors_config(struct qxl_device *qdev);
376void qxl_send_monitors_config(struct qxl_device *qdev);
377
378/* used by qxl_debugfs only */
379void qxl_crtc_set_from_monitors_config(struct qxl_device *qdev);
380void qxl_alloc_client_monitors_config(struct qxl_device *qdev, unsigned count);
381
382/* qxl_gem.c */
383int qxl_gem_init(struct qxl_device *qdev);
384void qxl_gem_fini(struct qxl_device *qdev);
385int qxl_gem_object_create(struct qxl_device *qdev, int size,
386 int alignment, int initial_domain,
387 bool discardable, bool kernel,
388 struct qxl_surface *surf,
389 struct drm_gem_object **obj);
390int qxl_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain,
391 uint64_t *gpu_addr);
392void qxl_gem_object_unpin(struct drm_gem_object *obj);
393int qxl_gem_object_create_with_handle(struct qxl_device *qdev,
394 struct drm_file *file_priv,
395 u32 domain,
396 size_t size,
397 struct qxl_surface *surf,
398 struct qxl_bo **qobj,
399 uint32_t *handle);
400int qxl_gem_object_init(struct drm_gem_object *obj);
401void qxl_gem_object_free(struct drm_gem_object *gobj);
402int qxl_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv);
403void qxl_gem_object_close(struct drm_gem_object *obj,
404 struct drm_file *file_priv);
405void qxl_bo_force_delete(struct qxl_device *qdev);
406int qxl_bo_kmap(struct qxl_bo *bo, void **ptr);
407
408/* qxl_dumb.c */
409int qxl_mode_dumb_create(struct drm_file *file_priv,
410 struct drm_device *dev,
411 struct drm_mode_create_dumb *args);
412int qxl_mode_dumb_destroy(struct drm_file *file_priv,
413 struct drm_device *dev,
414 uint32_t handle);
415int qxl_mode_dumb_mmap(struct drm_file *filp,
416 struct drm_device *dev,
417 uint32_t handle, uint64_t *offset_p);
418
419
420/* qxl ttm */
421int qxl_ttm_init(struct qxl_device *qdev);
422void qxl_ttm_fini(struct qxl_device *qdev);
423int qxl_mmap(struct file *filp, struct vm_area_struct *vma);
424
425/* qxl image */
426
427int qxl_image_create(struct qxl_device *qdev,
428 struct qxl_release *release,
429 struct qxl_bo **image_bo,
430 const uint8_t *data,
431 int x, int y, int width, int height,
432 int depth, int stride);
433void qxl_update_screen(struct qxl_device *qxl);
434
435/* qxl io operations (qxl_cmd.c) */
436
437void qxl_io_create_primary(struct qxl_device *qdev,
438 unsigned width, unsigned height, unsigned offset,
439 struct qxl_bo *bo);
440void qxl_io_destroy_primary(struct qxl_device *qdev);
441void qxl_io_memslot_add(struct qxl_device *qdev, uint8_t id);
442void qxl_io_notify_oom(struct qxl_device *qdev);
443
444int qxl_io_update_area(struct qxl_device *qdev, struct qxl_bo *surf,
445 const struct qxl_rect *area);
446
447void qxl_io_reset(struct qxl_device *qdev);
448void qxl_io_monitors_config(struct qxl_device *qdev);
449int qxl_ring_push(struct qxl_ring *ring, const void *new_elt, bool interruptible);
450void qxl_io_flush_release(struct qxl_device *qdev);
451void qxl_io_flush_surfaces(struct qxl_device *qdev);
452
453int qxl_release_reserve(struct qxl_device *qdev,
454 struct qxl_release *release, bool no_wait);
455void qxl_release_unreserve(struct qxl_device *qdev,
456 struct qxl_release *release);
457union qxl_release_info *qxl_release_map(struct qxl_device *qdev,
458 struct qxl_release *release);
459void qxl_release_unmap(struct qxl_device *qdev,
460 struct qxl_release *release,
461 union qxl_release_info *info);
462/*
463 * qxl_bo_add_resource.
464 *
465 */
466void qxl_bo_add_resource(struct qxl_bo *main_bo, struct qxl_bo *resource);
467
468int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
469 enum qxl_surface_cmd_type surface_cmd_type,
470 struct qxl_release *create_rel,
471 struct qxl_release **release);
472int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
473 int type, struct qxl_release **release,
474 struct qxl_bo **rbo);
475int qxl_fence_releaseable(struct qxl_device *qdev,
476 struct qxl_release *release);
477int
478qxl_push_command_ring_release(struct qxl_device *qdev, struct qxl_release *release,
479 uint32_t type, bool interruptible);
480int
481qxl_push_cursor_ring_release(struct qxl_device *qdev, struct qxl_release *release,
482 uint32_t type, bool interruptible);
483int qxl_alloc_bo_reserved(struct qxl_device *qdev, unsigned long size,
484 struct qxl_bo **_bo);
485/* qxl drawing commands */
486
487void qxl_draw_opaque_fb(const struct qxl_fb_image *qxl_fb_image,
488 int stride /* filled in if 0 */);
489
490void qxl_draw_dirty_fb(struct qxl_device *qdev,
491 struct qxl_framebuffer *qxl_fb,
492 struct qxl_bo *bo,
493 unsigned flags, unsigned color,
494 struct drm_clip_rect *clips,
495 unsigned num_clips, int inc);
496
497void qxl_draw_fill(struct qxl_draw_fill *qxl_draw_fill_rec);
498
499void qxl_draw_copyarea(struct qxl_device *qdev,
500 u32 width, u32 height,
501 u32 sx, u32 sy,
502 u32 dx, u32 dy);
503
504uint64_t
505qxl_release_alloc(struct qxl_device *qdev, int type,
506 struct qxl_release **ret);
507
508void qxl_release_free(struct qxl_device *qdev,
509 struct qxl_release *release);
510void qxl_release_add_res(struct qxl_device *qdev,
511 struct qxl_release *release,
512 struct qxl_bo *bo);
513/* used by qxl_debugfs_release */
514struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev,
515 uint64_t id);
516
517bool qxl_queue_garbage_collect(struct qxl_device *qdev, bool flush);
518int qxl_garbage_collect(struct qxl_device *qdev);
519
520/* debugfs */
521
522int qxl_debugfs_init(struct drm_minor *minor);
523void qxl_debugfs_takedown(struct drm_minor *minor);
524
525/* qxl_irq.c */
526int qxl_irq_init(struct qxl_device *qdev);
527irqreturn_t qxl_irq_handler(DRM_IRQ_ARGS);
528
529/* qxl_fb.c */
530int qxl_fb_init(struct qxl_device *qdev);
531
532int qxl_debugfs_add_files(struct qxl_device *qdev,
533 struct drm_info_list *files,
534 unsigned nfiles);
535
536int qxl_surface_id_alloc(struct qxl_device *qdev,
537 struct qxl_bo *surf);
538void qxl_surface_id_dealloc(struct qxl_device *qdev,
539 uint32_t surface_id);
540int qxl_hw_surface_alloc(struct qxl_device *qdev,
541 struct qxl_bo *surf,
542 struct ttm_mem_reg *mem);
543int qxl_hw_surface_dealloc(struct qxl_device *qdev,
544 struct qxl_bo *surf);
545
546int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo);
547
548struct qxl_drv_surface *
549qxl_surface_lookup(struct drm_device *dev, int surface_id);
550void qxl_surface_evict(struct qxl_device *qdev, struct qxl_bo *surf, bool freeing);
551int qxl_update_surface(struct qxl_device *qdev, struct qxl_bo *surf);
552
553/* qxl_fence.c */
554int qxl_fence_add_release(struct qxl_fence *qfence, uint32_t rel_id);
555int qxl_fence_remove_release(struct qxl_fence *qfence, uint32_t rel_id);
556int qxl_fence_init(struct qxl_device *qdev, struct qxl_fence *qfence);
557void qxl_fence_fini(struct qxl_fence *qfence);
558
559#endif
diff --git a/drivers/gpu/drm/qxl/qxl_dumb.c b/drivers/gpu/drm/qxl/qxl_dumb.c
new file mode 100644
index 000000000000..847c4ee798f7
--- /dev/null
+++ b/drivers/gpu/drm/qxl/qxl_dumb.c
@@ -0,0 +1,93 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Dave Airlie
23 * Alon Levy
24 */
25
26#include "qxl_drv.h"
27#include "qxl_object.h"
28
29/* dumb ioctls implementation */
30
31int qxl_mode_dumb_create(struct drm_file *file_priv,
32 struct drm_device *dev,
33 struct drm_mode_create_dumb *args)
34{
35 struct qxl_device *qdev = dev->dev_private;
36 struct qxl_bo *qobj;
37 uint32_t handle;
38 int r;
39 struct qxl_surface surf;
40 uint32_t pitch, format;
41 pitch = args->width * ((args->bpp + 1) / 8);
42 args->size = pitch * args->height;
43 args->size = ALIGN(args->size, PAGE_SIZE);
44
45 switch (args->bpp) {
46 case 16:
47 format = SPICE_SURFACE_FMT_16_565;
48 break;
49 case 32:
50 format = SPICE_SURFACE_FMT_32_xRGB;
51 break;
52 default:
53 return -EINVAL;
54 }
55
56 surf.width = args->width;
57 surf.height = args->height;
58 surf.stride = pitch;
59 surf.format = format;
60 r = qxl_gem_object_create_with_handle(qdev, file_priv,
61 QXL_GEM_DOMAIN_VRAM,
62 args->size, &surf, &qobj,
63 &handle);
64 if (r)
65 return r;
66 args->pitch = pitch;
67 args->handle = handle;
68 return 0;
69}
70
71int qxl_mode_dumb_destroy(struct drm_file *file_priv,
72 struct drm_device *dev,
73 uint32_t handle)
74{
75 return drm_gem_handle_delete(file_priv, handle);
76}
77
78int qxl_mode_dumb_mmap(struct drm_file *file_priv,
79 struct drm_device *dev,
80 uint32_t handle, uint64_t *offset_p)
81{
82 struct drm_gem_object *gobj;
83 struct qxl_bo *qobj;
84
85 BUG_ON(!offset_p);
86 gobj = drm_gem_object_lookup(dev, file_priv, handle);
87 if (gobj == NULL)
88 return -ENOENT;
89 qobj = gem_to_qxl_bo(gobj);
90 *offset_p = qxl_bo_mmap_offset(qobj);
91 drm_gem_object_unreference_unlocked(gobj);
92 return 0;
93}
diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c
new file mode 100644
index 000000000000..b3c51275df5c
--- /dev/null
+++ b/drivers/gpu/drm/qxl/qxl_fb.c
@@ -0,0 +1,567 @@
1/*
2 * Copyright © 2013 Red Hat
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * David Airlie
25 */
26#include <linux/module.h>
27#include <linux/fb.h>
28
29#include "drmP.h"
30#include "drm/drm.h"
31#include "drm/drm_crtc.h"
32#include "drm/drm_crtc_helper.h"
33#include "qxl_drv.h"
34
35#include "qxl_object.h"
36#include "drm_fb_helper.h"
37
38#define QXL_DIRTY_DELAY (HZ / 30)
39
40struct qxl_fbdev {
41 struct drm_fb_helper helper;
42 struct qxl_framebuffer qfb;
43 struct list_head fbdev_list;
44 struct qxl_device *qdev;
45
46 void *shadow;
47 int size;
48
49 /* dirty memory logging */
50 struct {
51 spinlock_t lock;
52 bool active;
53 unsigned x1;
54 unsigned y1;
55 unsigned x2;
56 unsigned y2;
57 } dirty;
58};
59
60static void qxl_fb_image_init(struct qxl_fb_image *qxl_fb_image,
61 struct qxl_device *qdev, struct fb_info *info,
62 const struct fb_image *image)
63{
64 qxl_fb_image->qdev = qdev;
65 if (info) {
66 qxl_fb_image->visual = info->fix.visual;
67 if (qxl_fb_image->visual == FB_VISUAL_TRUECOLOR ||
68 qxl_fb_image->visual == FB_VISUAL_DIRECTCOLOR)
69 memcpy(&qxl_fb_image->pseudo_palette,
70 info->pseudo_palette,
71 sizeof(qxl_fb_image->pseudo_palette));
72 } else {
73 /* fallback */
74 if (image->depth == 1)
75 qxl_fb_image->visual = FB_VISUAL_MONO10;
76 else
77 qxl_fb_image->visual = FB_VISUAL_DIRECTCOLOR;
78 }
79 if (image) {
80 memcpy(&qxl_fb_image->fb_image, image,
81 sizeof(qxl_fb_image->fb_image));
82 }
83}
84
85static void qxl_fb_dirty_flush(struct fb_info *info)
86{
87 struct qxl_fbdev *qfbdev = info->par;
88 struct qxl_device *qdev = qfbdev->qdev;
89 struct qxl_fb_image qxl_fb_image;
90 struct fb_image *image = &qxl_fb_image.fb_image;
91 u32 x1, x2, y1, y2;
92
93 /* TODO: hard coding 32 bpp */
94 int stride = qfbdev->qfb.base.pitches[0] * 4;
95
96 x1 = qfbdev->dirty.x1;
97 x2 = qfbdev->dirty.x2;
98 y1 = qfbdev->dirty.y1;
99 y2 = qfbdev->dirty.y2;
100 /*
101 * we are using a shadow draw buffer, at qdev->surface0_shadow
102 */
103 qxl_io_log(qdev, "dirty x[%d, %d], y[%d, %d]", x1, x2, y1, y2);
104 image->dx = x1;
105 image->dy = y1;
106 image->width = x2 - x1;
107 image->height = y2 - y1;
108 image->fg_color = 0xffffffff; /* unused, just to avoid uninitialized
109 warnings */
110 image->bg_color = 0;
111 image->depth = 32; /* TODO: take from somewhere? */
112 image->cmap.start = 0;
113 image->cmap.len = 0;
114 image->cmap.red = NULL;
115 image->cmap.green = NULL;
116 image->cmap.blue = NULL;
117 image->cmap.transp = NULL;
118 image->data = qfbdev->shadow + (x1 * 4) + (stride * y1);
119
120 qxl_fb_image_init(&qxl_fb_image, qdev, info, NULL);
121 qxl_draw_opaque_fb(&qxl_fb_image, stride);
122 qfbdev->dirty.x1 = 0;
123 qfbdev->dirty.x2 = 0;
124 qfbdev->dirty.y1 = 0;
125 qfbdev->dirty.y2 = 0;
126}
127
128static void qxl_deferred_io(struct fb_info *info,
129 struct list_head *pagelist)
130{
131 struct qxl_fbdev *qfbdev = info->par;
132 unsigned long start, end, min, max;
133 struct page *page;
134 int y1, y2;
135
136 min = ULONG_MAX;
137 max = 0;
138 list_for_each_entry(page, pagelist, lru) {
139 start = page->index << PAGE_SHIFT;
140 end = start + PAGE_SIZE - 1;
141 min = min(min, start);
142 max = max(max, end);
143 }
144
145 if (min < max) {
146 y1 = min / info->fix.line_length;
147 y2 = (max / info->fix.line_length) + 1;
148
149 /* TODO: add spin lock? */
150 /* spin_lock_irqsave(&qfbdev->dirty.lock, flags); */
151 qfbdev->dirty.x1 = 0;
152 qfbdev->dirty.y1 = y1;
153 qfbdev->dirty.x2 = info->var.xres;
154 qfbdev->dirty.y2 = y2;
155 /* spin_unlock_irqrestore(&qfbdev->dirty.lock, flags); */
156 }
157
158 qxl_fb_dirty_flush(info);
159};
160
161
162static struct fb_deferred_io qxl_defio = {
163 .delay = QXL_DIRTY_DELAY,
164 .deferred_io = qxl_deferred_io,
165};
166
167static void qxl_fb_fillrect(struct fb_info *info,
168 const struct fb_fillrect *fb_rect)
169{
170 struct qxl_fbdev *qfbdev = info->par;
171 struct qxl_device *qdev = qfbdev->qdev;
172 struct qxl_rect rect;
173 uint32_t color;
174 int x = fb_rect->dx;
175 int y = fb_rect->dy;
176 int width = fb_rect->width;
177 int height = fb_rect->height;
178 uint16_t rop;
179 struct qxl_draw_fill qxl_draw_fill_rec;
180
181 if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
182 info->fix.visual == FB_VISUAL_DIRECTCOLOR)
183 color = ((u32 *) (info->pseudo_palette))[fb_rect->color];
184 else
185 color = fb_rect->color;
186 rect.left = x;
187 rect.right = x + width;
188 rect.top = y;
189 rect.bottom = y + height;
190 switch (fb_rect->rop) {
191 case ROP_XOR:
192 rop = SPICE_ROPD_OP_XOR;
193 break;
194 case ROP_COPY:
195 rop = SPICE_ROPD_OP_PUT;
196 break;
197 default:
198 pr_err("qxl_fb_fillrect(): unknown rop, "
199 "defaulting to SPICE_ROPD_OP_PUT\n");
200 rop = SPICE_ROPD_OP_PUT;
201 }
202 qxl_draw_fill_rec.qdev = qdev;
203 qxl_draw_fill_rec.rect = rect;
204 qxl_draw_fill_rec.color = color;
205 qxl_draw_fill_rec.rop = rop;
206 if (!drm_can_sleep()) {
207 qxl_io_log(qdev,
208 "%s: TODO use RCU, mysterious locks with spin_lock\n",
209 __func__);
210 return;
211 }
212 qxl_draw_fill(&qxl_draw_fill_rec);
213}
214
215static void qxl_fb_copyarea(struct fb_info *info,
216 const struct fb_copyarea *region)
217{
218 struct qxl_fbdev *qfbdev = info->par;
219
220 qxl_draw_copyarea(qfbdev->qdev,
221 region->width, region->height,
222 region->sx, region->sy,
223 region->dx, region->dy);
224}
225
226static void qxl_fb_imageblit_safe(struct qxl_fb_image *qxl_fb_image)
227{
228 qxl_draw_opaque_fb(qxl_fb_image, 0);
229}
230
231static void qxl_fb_imageblit(struct fb_info *info,
232 const struct fb_image *image)
233{
234 struct qxl_fbdev *qfbdev = info->par;
235 struct qxl_device *qdev = qfbdev->qdev;
236 struct qxl_fb_image qxl_fb_image;
237
238 if (!drm_can_sleep()) {
239 /* we cannot do any ttm_bo allocation since that will fail on
240 * ioremap_wc..__get_vm_area_node, so queue the work item
241 * instead This can happen from printk inside an interrupt
242 * context, i.e.: smp_apic_timer_interrupt..check_cpu_stall */
243 qxl_io_log(qdev,
244 "%s: TODO use RCU, mysterious locks with spin_lock\n",
245 __func__);
246 return;
247 }
248
249 /* ensure proper order of rendering operations - TODO: must do this
250 * for everything. */
251 qxl_fb_image_init(&qxl_fb_image, qfbdev->qdev, info, image);
252 qxl_fb_imageblit_safe(&qxl_fb_image);
253}
254
255int qxl_fb_init(struct qxl_device *qdev)
256{
257 return 0;
258}
259
260static struct fb_ops qxlfb_ops = {
261 .owner = THIS_MODULE,
262 .fb_check_var = drm_fb_helper_check_var,
263 .fb_set_par = drm_fb_helper_set_par, /* TODO: copy vmwgfx */
264 .fb_fillrect = qxl_fb_fillrect,
265 .fb_copyarea = qxl_fb_copyarea,
266 .fb_imageblit = qxl_fb_imageblit,
267 .fb_pan_display = drm_fb_helper_pan_display,
268 .fb_blank = drm_fb_helper_blank,
269 .fb_setcmap = drm_fb_helper_setcmap,
270 .fb_debug_enter = drm_fb_helper_debug_enter,
271 .fb_debug_leave = drm_fb_helper_debug_leave,
272};
273
274static void qxlfb_destroy_pinned_object(struct drm_gem_object *gobj)
275{
276 struct qxl_bo *qbo = gem_to_qxl_bo(gobj);
277 int ret;
278
279 ret = qxl_bo_reserve(qbo, false);
280 if (likely(ret == 0)) {
281 qxl_bo_kunmap(qbo);
282 qxl_bo_unpin(qbo);
283 qxl_bo_unreserve(qbo);
284 }
285 drm_gem_object_unreference_unlocked(gobj);
286}
287
288int qxl_get_handle_for_primary_fb(struct qxl_device *qdev,
289 struct drm_file *file_priv,
290 uint32_t *handle)
291{
292 int r;
293 struct drm_gem_object *gobj = qdev->fbdev_qfb->obj;
294
295 BUG_ON(!gobj);
296 /* drm_get_handle_create adds a reference - good */
297 r = drm_gem_handle_create(file_priv, gobj, handle);
298 if (r)
299 return r;
300 return 0;
301}
302
303static int qxlfb_create_pinned_object(struct qxl_fbdev *qfbdev,
304 struct drm_mode_fb_cmd2 *mode_cmd,
305 struct drm_gem_object **gobj_p)
306{
307 struct qxl_device *qdev = qfbdev->qdev;
308 struct drm_gem_object *gobj = NULL;
309 struct qxl_bo *qbo = NULL;
310 int ret;
311 int aligned_size, size;
312 int height = mode_cmd->height;
313 int bpp;
314 int depth;
315
316 drm_fb_get_bpp_depth(mode_cmd->pixel_format, &bpp, &depth);
317
318 size = mode_cmd->pitches[0] * height;
319 aligned_size = ALIGN(size, PAGE_SIZE);
320 /* TODO: unallocate and reallocate surface0 for real. Hack to just
321 * have a large enough surface0 for 1024x768 Xorg 32bpp mode */
322 ret = qxl_gem_object_create(qdev, aligned_size, 0,
323 QXL_GEM_DOMAIN_SURFACE,
324 false, /* is discardable */
325 false, /* is kernel (false means device) */
326 NULL,
327 &gobj);
328 if (ret) {
329 pr_err("failed to allocate framebuffer (%d)\n",
330 aligned_size);
331 return -ENOMEM;
332 }
333 qbo = gem_to_qxl_bo(gobj);
334
335 qbo->surf.width = mode_cmd->width;
336 qbo->surf.height = mode_cmd->height;
337 qbo->surf.stride = mode_cmd->pitches[0];
338 qbo->surf.format = SPICE_SURFACE_FMT_32_xRGB;
339 ret = qxl_bo_reserve(qbo, false);
340 if (unlikely(ret != 0))
341 goto out_unref;
342 ret = qxl_bo_pin(qbo, QXL_GEM_DOMAIN_SURFACE, NULL);
343 if (ret) {
344 qxl_bo_unreserve(qbo);
345 goto out_unref;
346 }
347 ret = qxl_bo_kmap(qbo, NULL);
348 qxl_bo_unreserve(qbo); /* unreserve, will be mmaped */
349 if (ret)
350 goto out_unref;
351
352 *gobj_p = gobj;
353 return 0;
354out_unref:
355 qxlfb_destroy_pinned_object(gobj);
356 *gobj_p = NULL;
357 return ret;
358}
359
360static int qxlfb_create(struct qxl_fbdev *qfbdev,
361 struct drm_fb_helper_surface_size *sizes)
362{
363 struct qxl_device *qdev = qfbdev->qdev;
364 struct fb_info *info;
365 struct drm_framebuffer *fb = NULL;
366 struct drm_mode_fb_cmd2 mode_cmd;
367 struct drm_gem_object *gobj = NULL;
368 struct qxl_bo *qbo = NULL;
369 struct device *device = &qdev->pdev->dev;
370 int ret;
371 int size;
372 int bpp = sizes->surface_bpp;
373 int depth = sizes->surface_depth;
374 void *shadow;
375
376 mode_cmd.width = sizes->surface_width;
377 mode_cmd.height = sizes->surface_height;
378
379 mode_cmd.pitches[0] = ALIGN(mode_cmd.width * ((bpp + 1) / 8), 64);
380 mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
381
382 ret = qxlfb_create_pinned_object(qfbdev, &mode_cmd, &gobj);
383 qbo = gem_to_qxl_bo(gobj);
384 QXL_INFO(qdev, "%s: %dx%d %d\n", __func__, mode_cmd.width,
385 mode_cmd.height, mode_cmd.pitches[0]);
386
387 shadow = vmalloc(mode_cmd.pitches[0] * mode_cmd.height);
388 /* TODO: what's the usual response to memory allocation errors? */
389 BUG_ON(!shadow);
390 QXL_INFO(qdev,
391 "surface0 at gpu offset %lld, mmap_offset %lld (virt %p, shadow %p)\n",
392 qxl_bo_gpu_offset(qbo),
393 qxl_bo_mmap_offset(qbo),
394 qbo->kptr,
395 shadow);
396 size = mode_cmd.pitches[0] * mode_cmd.height;
397
398 info = framebuffer_alloc(0, device);
399 if (info == NULL) {
400 ret = -ENOMEM;
401 goto out_unref;
402 }
403
404 info->par = qfbdev;
405
406 qxl_framebuffer_init(qdev->ddev, &qfbdev->qfb, &mode_cmd, gobj);
407
408 fb = &qfbdev->qfb.base;
409
410 /* setup helper with fb data */
411 qfbdev->helper.fb = fb;
412 qfbdev->helper.fbdev = info;
413 qfbdev->shadow = shadow;
414 strcpy(info->fix.id, "qxldrmfb");
415
416 drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
417
418 info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA | FBINFO_HWACCEL_FILLRECT;
419 info->fbops = &qxlfb_ops;
420
421 /*
422 * TODO: using gobj->size in various places in this function. Not sure
423 * what the difference between the different sizes is.
424 */
425 info->fix.smem_start = qdev->vram_base; /* TODO - correct? */
426 info->fix.smem_len = gobj->size;
427 info->screen_base = qfbdev->shadow;
428 info->screen_size = gobj->size;
429
430 drm_fb_helper_fill_var(info, &qfbdev->helper, sizes->fb_width,
431 sizes->fb_height);
432
433 /* setup aperture base/size for vesafb takeover */
434 info->apertures = alloc_apertures(1);
435 if (!info->apertures) {
436 ret = -ENOMEM;
437 goto out_unref;
438 }
439 info->apertures->ranges[0].base = qdev->ddev->mode_config.fb_base;
440 info->apertures->ranges[0].size = qdev->vram_size;
441
442 info->fix.mmio_start = 0;
443 info->fix.mmio_len = 0;
444
445 if (info->screen_base == NULL) {
446 ret = -ENOSPC;
447 goto out_unref;
448 }
449
450 ret = fb_alloc_cmap(&info->cmap, 256, 0);
451 if (ret) {
452 ret = -ENOMEM;
453 goto out_unref;
454 }
455
456 info->fbdefio = &qxl_defio;
457 fb_deferred_io_init(info);
458
459 qdev->fbdev_info = info;
460 qdev->fbdev_qfb = &qfbdev->qfb;
461 DRM_INFO("fb mappable at 0x%lX, size %lu\n", info->fix.smem_start, (unsigned long)info->screen_size);
462 DRM_INFO("fb: depth %d, pitch %d, width %d, height %d\n", fb->depth, fb->pitches[0], fb->width, fb->height);
463 return 0;
464
465out_unref:
466 if (qbo) {
467 ret = qxl_bo_reserve(qbo, false);
468 if (likely(ret == 0)) {
469 qxl_bo_kunmap(qbo);
470 qxl_bo_unpin(qbo);
471 qxl_bo_unreserve(qbo);
472 }
473 }
474 if (fb && ret) {
475 drm_gem_object_unreference(gobj);
476 drm_framebuffer_cleanup(fb);
477 kfree(fb);
478 }
479 drm_gem_object_unreference(gobj);
480 return ret;
481}
482
483static int qxl_fb_find_or_create_single(
484 struct drm_fb_helper *helper,
485 struct drm_fb_helper_surface_size *sizes)
486{
487 struct qxl_fbdev *qfbdev = (struct qxl_fbdev *)helper;
488 int new_fb = 0;
489 int ret;
490
491 if (!helper->fb) {
492 ret = qxlfb_create(qfbdev, sizes);
493 if (ret)
494 return ret;
495 new_fb = 1;
496 }
497 return new_fb;
498}
499
500static int qxl_fbdev_destroy(struct drm_device *dev, struct qxl_fbdev *qfbdev)
501{
502 struct fb_info *info;
503 struct qxl_framebuffer *qfb = &qfbdev->qfb;
504
505 if (qfbdev->helper.fbdev) {
506 info = qfbdev->helper.fbdev;
507
508 unregister_framebuffer(info);
509 framebuffer_release(info);
510 }
511 if (qfb->obj) {
512 qxlfb_destroy_pinned_object(qfb->obj);
513 qfb->obj = NULL;
514 }
515 drm_fb_helper_fini(&qfbdev->helper);
516 vfree(qfbdev->shadow);
517 drm_framebuffer_cleanup(&qfb->base);
518
519 return 0;
520}
521
522static struct drm_fb_helper_funcs qxl_fb_helper_funcs = {
523 /* TODO
524 .gamma_set = qxl_crtc_fb_gamma_set,
525 .gamma_get = qxl_crtc_fb_gamma_get,
526 */
527 .fb_probe = qxl_fb_find_or_create_single,
528};
529
530int qxl_fbdev_init(struct qxl_device *qdev)
531{
532 struct qxl_fbdev *qfbdev;
533 int bpp_sel = 32; /* TODO: parameter from somewhere? */
534 int ret;
535
536 qfbdev = kzalloc(sizeof(struct qxl_fbdev), GFP_KERNEL);
537 if (!qfbdev)
538 return -ENOMEM;
539
540 qfbdev->qdev = qdev;
541 qdev->mode_info.qfbdev = qfbdev;
542 qfbdev->helper.funcs = &qxl_fb_helper_funcs;
543
544 ret = drm_fb_helper_init(qdev->ddev, &qfbdev->helper,
545 1 /* num_crtc - QXL supports just 1 */,
546 QXLFB_CONN_LIMIT);
547 if (ret) {
548 kfree(qfbdev);
549 return ret;
550 }
551
552 drm_fb_helper_single_add_all_connectors(&qfbdev->helper);
553 drm_fb_helper_initial_config(&qfbdev->helper, bpp_sel);
554 return 0;
555}
556
557void qxl_fbdev_fini(struct qxl_device *qdev)
558{
559 if (!qdev->mode_info.qfbdev)
560 return;
561
562 qxl_fbdev_destroy(qdev->ddev, qdev->mode_info.qfbdev);
563 kfree(qdev->mode_info.qfbdev);
564 qdev->mode_info.qfbdev = NULL;
565}
566
567
diff --git a/drivers/gpu/drm/qxl/qxl_fence.c b/drivers/gpu/drm/qxl/qxl_fence.c
new file mode 100644
index 000000000000..63c6715ad385
--- /dev/null
+++ b/drivers/gpu/drm/qxl/qxl_fence.c
@@ -0,0 +1,97 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Dave Airlie
23 * Alon Levy
24 */
25
26
27#include "qxl_drv.h"
28
29/* QXL fencing-
30
31 When we submit operations to the GPU we pass a release reference to the GPU
32 with them, the release reference is then added to the release ring when
33 the GPU is finished with that particular operation and has removed it from
34 its tree.
35
36 So we have can have multiple outstanding non linear fences per object.
37
38 From a TTM POV we only care if the object has any outstanding releases on
39 it.
40
41 we wait until all outstanding releases are processeed.
42
43 sync object is just a list of release ids that represent that fence on
44 that buffer.
45
46 we just add new releases onto the sync object attached to the object.
47
48 This currently uses a radix tree to store the list of release ids.
49
50 For some reason every so often qxl hw fails to release, things go wrong.
51*/
52
53
54int qxl_fence_add_release(struct qxl_fence *qfence, uint32_t rel_id)
55{
56 struct qxl_bo *bo = container_of(qfence, struct qxl_bo, fence);
57
58 spin_lock(&bo->tbo.bdev->fence_lock);
59 radix_tree_insert(&qfence->tree, rel_id, qfence);
60 qfence->num_active_releases++;
61 spin_unlock(&bo->tbo.bdev->fence_lock);
62 return 0;
63}
64
65int qxl_fence_remove_release(struct qxl_fence *qfence, uint32_t rel_id)
66{
67 void *ret;
68 int retval = 0;
69 struct qxl_bo *bo = container_of(qfence, struct qxl_bo, fence);
70
71 spin_lock(&bo->tbo.bdev->fence_lock);
72
73 ret = radix_tree_delete(&qfence->tree, rel_id);
74 if (ret == qfence)
75 qfence->num_active_releases--;
76 else {
77 DRM_DEBUG("didn't find fence in radix tree for %d\n", rel_id);
78 retval = -ENOENT;
79 }
80 spin_unlock(&bo->tbo.bdev->fence_lock);
81 return retval;
82}
83
84
85int qxl_fence_init(struct qxl_device *qdev, struct qxl_fence *qfence)
86{
87 qfence->qdev = qdev;
88 qfence->num_active_releases = 0;
89 INIT_RADIX_TREE(&qfence->tree, GFP_ATOMIC);
90 return 0;
91}
92
93void qxl_fence_fini(struct qxl_fence *qfence)
94{
95 kfree(qfence->release_ids);
96 qfence->num_active_releases = 0;
97}
diff --git a/drivers/gpu/drm/qxl/qxl_gem.c b/drivers/gpu/drm/qxl/qxl_gem.c
new file mode 100644
index 000000000000..a235693aabba
--- /dev/null
+++ b/drivers/gpu/drm/qxl/qxl_gem.c
@@ -0,0 +1,149 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Dave Airlie
23 * Alon Levy
24 */
25
26#include "drmP.h"
27#include "drm/drm.h"
28#include "qxl_drv.h"
29#include "qxl_object.h"
30
31int qxl_gem_object_init(struct drm_gem_object *obj)
32{
33 /* we do nothings here */
34 return 0;
35}
36
37void qxl_gem_object_free(struct drm_gem_object *gobj)
38{
39 struct qxl_bo *qobj = gem_to_qxl_bo(gobj);
40
41 if (qobj)
42 qxl_bo_unref(&qobj);
43}
44
45int qxl_gem_object_create(struct qxl_device *qdev, int size,
46 int alignment, int initial_domain,
47 bool discardable, bool kernel,
48 struct qxl_surface *surf,
49 struct drm_gem_object **obj)
50{
51 struct qxl_bo *qbo;
52 int r;
53
54 *obj = NULL;
55 /* At least align on page size */
56 if (alignment < PAGE_SIZE)
57 alignment = PAGE_SIZE;
58 r = qxl_bo_create(qdev, size, kernel, initial_domain, surf, &qbo);
59 if (r) {
60 if (r != -ERESTARTSYS)
61 DRM_ERROR(
62 "Failed to allocate GEM object (%d, %d, %u, %d)\n",
63 size, initial_domain, alignment, r);
64 return r;
65 }
66 *obj = &qbo->gem_base;
67
68 mutex_lock(&qdev->gem.mutex);
69 list_add_tail(&qbo->list, &qdev->gem.objects);
70 mutex_unlock(&qdev->gem.mutex);
71
72 return 0;
73}
74
75int qxl_gem_object_create_with_handle(struct qxl_device *qdev,
76 struct drm_file *file_priv,
77 u32 domain,
78 size_t size,
79 struct qxl_surface *surf,
80 struct qxl_bo **qobj,
81 uint32_t *handle)
82{
83 struct drm_gem_object *gobj;
84 int r;
85
86 BUG_ON(!qobj);
87 BUG_ON(!handle);
88
89 r = qxl_gem_object_create(qdev, size, 0,
90 domain,
91 false, false, surf,
92 &gobj);
93 if (r)
94 return -ENOMEM;
95 r = drm_gem_handle_create(file_priv, gobj, handle);
96 if (r)
97 return r;
98 /* drop reference from allocate - handle holds it now */
99 *qobj = gem_to_qxl_bo(gobj);
100 drm_gem_object_unreference_unlocked(gobj);
101 return 0;
102}
103
104int qxl_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain,
105 uint64_t *gpu_addr)
106{
107 struct qxl_bo *qobj = obj->driver_private;
108 int r;
109
110 r = qxl_bo_reserve(qobj, false);
111 if (unlikely(r != 0))
112 return r;
113 r = qxl_bo_pin(qobj, pin_domain, gpu_addr);
114 qxl_bo_unreserve(qobj);
115 return r;
116}
117
118void qxl_gem_object_unpin(struct drm_gem_object *obj)
119{
120 struct qxl_bo *qobj = obj->driver_private;
121 int r;
122
123 r = qxl_bo_reserve(qobj, false);
124 if (likely(r == 0)) {
125 qxl_bo_unpin(qobj);
126 qxl_bo_unreserve(qobj);
127 }
128}
129
130int qxl_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
131{
132 return 0;
133}
134
135void qxl_gem_object_close(struct drm_gem_object *obj,
136 struct drm_file *file_priv)
137{
138}
139
140int qxl_gem_init(struct qxl_device *qdev)
141{
142 INIT_LIST_HEAD(&qdev->gem.objects);
143 return 0;
144}
145
146void qxl_gem_fini(struct qxl_device *qdev)
147{
148 qxl_bo_force_delete(qdev);
149}
diff --git a/drivers/gpu/drm/qxl/qxl_image.c b/drivers/gpu/drm/qxl/qxl_image.c
new file mode 100644
index 000000000000..cf856206996b
--- /dev/null
+++ b/drivers/gpu/drm/qxl/qxl_image.c
@@ -0,0 +1,176 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Dave Airlie
23 * Alon Levy
24 */
25
26#include <linux/gfp.h>
27#include <linux/slab.h>
28
29#include "qxl_drv.h"
30#include "qxl_object.h"
31
32static int
33qxl_image_create_helper(struct qxl_device *qdev,
34 struct qxl_release *release,
35 struct qxl_bo **image_bo,
36 const uint8_t *data,
37 int width, int height,
38 int depth, unsigned int hash,
39 int stride)
40{
41 struct qxl_image *image;
42 struct qxl_data_chunk *chunk;
43 int i;
44 int chunk_stride;
45 int linesize = width * depth / 8;
46 struct qxl_bo *chunk_bo;
47 int ret;
48 void *ptr;
49 /* Chunk */
50 /* FIXME: Check integer overflow */
51 /* TODO: variable number of chunks */
52 chunk_stride = stride; /* TODO: should use linesize, but it renders
53 wrong (check the bitmaps are sent correctly
54 first) */
55 ret = qxl_alloc_bo_reserved(qdev, sizeof(*chunk) + height * chunk_stride,
56 &chunk_bo);
57
58 ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, 0);
59 chunk = ptr;
60 chunk->data_size = height * chunk_stride;
61 chunk->prev_chunk = 0;
62 chunk->next_chunk = 0;
63 qxl_bo_kunmap_atomic_page(qdev, chunk_bo, ptr);
64
65 {
66 void *k_data, *i_data;
67 int remain;
68 int page;
69 int size;
70 if (stride == linesize && chunk_stride == stride) {
71 remain = linesize * height;
72 page = 0;
73 i_data = (void *)data;
74
75 while (remain > 0) {
76 ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, page << PAGE_SHIFT);
77
78 if (page == 0) {
79 chunk = ptr;
80 k_data = chunk->data;
81 size = PAGE_SIZE - offsetof(struct qxl_data_chunk, data);
82 } else {
83 k_data = ptr;
84 size = PAGE_SIZE;
85 }
86 size = min(size, remain);
87
88 memcpy(k_data, i_data, size);
89
90 qxl_bo_kunmap_atomic_page(qdev, chunk_bo, ptr);
91 i_data += size;
92 remain -= size;
93 page++;
94 }
95 } else {
96 unsigned page_base, page_offset, out_offset;
97 for (i = 0 ; i < height ; ++i) {
98 i_data = (void *)data + i * stride;
99 remain = linesize;
100 out_offset = offsetof(struct qxl_data_chunk, data) + i * chunk_stride;
101
102 while (remain > 0) {
103 page_base = out_offset & PAGE_MASK;
104 page_offset = offset_in_page(out_offset);
105
106 size = min((int)(PAGE_SIZE - page_offset), remain);
107
108 ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, page_base);
109 k_data = ptr + page_offset;
110 memcpy(k_data, i_data, size);
111 qxl_bo_kunmap_atomic_page(qdev, chunk_bo, ptr);
112 remain -= size;
113 i_data += size;
114 out_offset += size;
115 }
116 }
117 }
118 }
119
120
121 qxl_bo_kunmap(chunk_bo);
122
123 /* Image */
124 ret = qxl_alloc_bo_reserved(qdev, sizeof(*image), image_bo);
125
126 ptr = qxl_bo_kmap_atomic_page(qdev, *image_bo, 0);
127 image = ptr;
128
129 image->descriptor.id = 0;
130 image->descriptor.type = SPICE_IMAGE_TYPE_BITMAP;
131
132 image->descriptor.flags = 0;
133 image->descriptor.width = width;
134 image->descriptor.height = height;
135
136 switch (depth) {
137 case 1:
138 /* TODO: BE? check by arch? */
139 image->u.bitmap.format = SPICE_BITMAP_FMT_1BIT_BE;
140 break;
141 case 24:
142 image->u.bitmap.format = SPICE_BITMAP_FMT_24BIT;
143 break;
144 case 32:
145 image->u.bitmap.format = SPICE_BITMAP_FMT_32BIT;
146 break;
147 default:
148 DRM_ERROR("unsupported image bit depth\n");
149 return -EINVAL; /* TODO: cleanup */
150 }
151 image->u.bitmap.flags = QXL_BITMAP_TOP_DOWN;
152 image->u.bitmap.x = width;
153 image->u.bitmap.y = height;
154 image->u.bitmap.stride = chunk_stride;
155 image->u.bitmap.palette = 0;
156 image->u.bitmap.data = qxl_bo_physical_address(qdev, chunk_bo, 0);
157 qxl_release_add_res(qdev, release, chunk_bo);
158 qxl_bo_unreserve(chunk_bo);
159 qxl_bo_unref(&chunk_bo);
160
161 qxl_bo_kunmap_atomic_page(qdev, *image_bo, ptr);
162
163 return 0;
164}
165
166int qxl_image_create(struct qxl_device *qdev,
167 struct qxl_release *release,
168 struct qxl_bo **image_bo,
169 const uint8_t *data,
170 int x, int y, int width, int height,
171 int depth, int stride)
172{
173 data += y * stride + x * (depth / 8);
174 return qxl_image_create_helper(qdev, release, image_bo, data,
175 width, height, depth, 0, stride);
176}
diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
new file mode 100644
index 000000000000..6db7370373ea
--- /dev/null
+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
@@ -0,0 +1,412 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Dave Airlie
23 * Alon Levy
24 */
25
26#include "qxl_drv.h"
27#include "qxl_object.h"
28
29/*
30 * TODO: allocating a new gem(in qxl_bo) for each request.
31 * This is wasteful since bo's are page aligned.
32 */
33static int qxl_alloc_ioctl(struct drm_device *dev, void *data,
34 struct drm_file *file_priv)
35{
36 struct qxl_device *qdev = dev->dev_private;
37 struct drm_qxl_alloc *qxl_alloc = data;
38 int ret;
39 struct qxl_bo *qobj;
40 uint32_t handle;
41 u32 domain = QXL_GEM_DOMAIN_VRAM;
42
43 if (qxl_alloc->size == 0) {
44 DRM_ERROR("invalid size %d\n", qxl_alloc->size);
45 return -EINVAL;
46 }
47 ret = qxl_gem_object_create_with_handle(qdev, file_priv,
48 domain,
49 qxl_alloc->size,
50 NULL,
51 &qobj, &handle);
52 if (ret) {
53 DRM_ERROR("%s: failed to create gem ret=%d\n",
54 __func__, ret);
55 return -ENOMEM;
56 }
57 qxl_alloc->handle = handle;
58 return 0;
59}
60
61static int qxl_map_ioctl(struct drm_device *dev, void *data,
62 struct drm_file *file_priv)
63{
64 struct qxl_device *qdev = dev->dev_private;
65 struct drm_qxl_map *qxl_map = data;
66
67 return qxl_mode_dumb_mmap(file_priv, qdev->ddev, qxl_map->handle,
68 &qxl_map->offset);
69}
70
71/*
72 * dst must be validated, i.e. whole bo on vram/surfacesram (right now all bo's
73 * are on vram).
74 * *(dst + dst_off) = qxl_bo_physical_address(src, src_off)
75 */
76static void
77apply_reloc(struct qxl_device *qdev, struct qxl_bo *dst, uint64_t dst_off,
78 struct qxl_bo *src, uint64_t src_off)
79{
80 void *reloc_page;
81
82 reloc_page = qxl_bo_kmap_atomic_page(qdev, dst, dst_off & PAGE_MASK);
83 *(uint64_t *)(reloc_page + (dst_off & ~PAGE_MASK)) = qxl_bo_physical_address(qdev,
84 src, src_off);
85 qxl_bo_kunmap_atomic_page(qdev, dst, reloc_page);
86}
87
88static void
89apply_surf_reloc(struct qxl_device *qdev, struct qxl_bo *dst, uint64_t dst_off,
90 struct qxl_bo *src)
91{
92 uint32_t id = 0;
93 void *reloc_page;
94
95 if (src && !src->is_primary)
96 id = src->surface_id;
97
98 reloc_page = qxl_bo_kmap_atomic_page(qdev, dst, dst_off & PAGE_MASK);
99 *(uint32_t *)(reloc_page + (dst_off & ~PAGE_MASK)) = id;
100 qxl_bo_kunmap_atomic_page(qdev, dst, reloc_page);
101}
102
103/* return holding the reference to this object */
104static struct qxl_bo *qxlhw_handle_to_bo(struct qxl_device *qdev,
105 struct drm_file *file_priv, uint64_t handle,
106 struct qxl_reloc_list *reloc_list)
107{
108 struct drm_gem_object *gobj;
109 struct qxl_bo *qobj;
110 int ret;
111
112 gobj = drm_gem_object_lookup(qdev->ddev, file_priv, handle);
113 if (!gobj) {
114 DRM_ERROR("bad bo handle %lld\n", handle);
115 return NULL;
116 }
117 qobj = gem_to_qxl_bo(gobj);
118
119 ret = qxl_bo_list_add(reloc_list, qobj);
120 if (ret)
121 return NULL;
122
123 return qobj;
124}
125
126/*
127 * Usage of execbuffer:
128 * Relocations need to take into account the full QXLDrawable size.
129 * However, the command as passed from user space must *not* contain the initial
130 * QXLReleaseInfo struct (first XXX bytes)
131 */
132static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
133 struct drm_file *file_priv)
134{
135 struct qxl_device *qdev = dev->dev_private;
136 struct drm_qxl_execbuffer *execbuffer = data;
137 struct drm_qxl_command user_cmd;
138 int cmd_num;
139 struct qxl_bo *reloc_src_bo;
140 struct qxl_bo *reloc_dst_bo;
141 struct drm_qxl_reloc reloc;
142 void *fb_cmd;
143 int i, ret;
144 struct qxl_reloc_list reloc_list;
145 int unwritten;
146 uint32_t reloc_dst_offset;
147 INIT_LIST_HEAD(&reloc_list.bos);
148
149 for (cmd_num = 0; cmd_num < execbuffer->commands_num; ++cmd_num) {
150 struct qxl_release *release;
151 struct qxl_bo *cmd_bo;
152 int release_type;
153 struct drm_qxl_command *commands =
154 (struct drm_qxl_command *)execbuffer->commands;
155
156 if (DRM_COPY_FROM_USER(&user_cmd, &commands[cmd_num],
157 sizeof(user_cmd)))
158 return -EFAULT;
159 switch (user_cmd.type) {
160 case QXL_CMD_DRAW:
161 release_type = QXL_RELEASE_DRAWABLE;
162 break;
163 case QXL_CMD_SURFACE:
164 case QXL_CMD_CURSOR:
165 default:
166 DRM_DEBUG("Only draw commands in execbuffers\n");
167 return -EINVAL;
168 break;
169 }
170
171 if (user_cmd.command_size > PAGE_SIZE - sizeof(union qxl_release_info))
172 return -EINVAL;
173
174 ret = qxl_alloc_release_reserved(qdev,
175 sizeof(union qxl_release_info) +
176 user_cmd.command_size,
177 release_type,
178 &release,
179 &cmd_bo);
180 if (ret)
181 return ret;
182
183 /* TODO copy slow path code from i915 */
184 fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE));
185 unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void *)(unsigned long)user_cmd.command, user_cmd.command_size);
186 qxl_bo_kunmap_atomic_page(qdev, cmd_bo, fb_cmd);
187 if (unwritten) {
188 DRM_ERROR("got unwritten %d\n", unwritten);
189 qxl_release_unreserve(qdev, release);
190 qxl_release_free(qdev, release);
191 return -EFAULT;
192 }
193
194 for (i = 0 ; i < user_cmd.relocs_num; ++i) {
195 if (DRM_COPY_FROM_USER(&reloc,
196 &((struct drm_qxl_reloc *)user_cmd.relocs)[i],
197 sizeof(reloc))) {
198 qxl_bo_list_unreserve(&reloc_list, true);
199 qxl_release_unreserve(qdev, release);
200 qxl_release_free(qdev, release);
201 return -EFAULT;
202 }
203
204 /* add the bos to the list of bos to validate -
205 need to validate first then process relocs? */
206 if (reloc.dst_handle) {
207 reloc_dst_bo = qxlhw_handle_to_bo(qdev, file_priv,
208 reloc.dst_handle, &reloc_list);
209 if (!reloc_dst_bo) {
210 qxl_bo_list_unreserve(&reloc_list, true);
211 qxl_release_unreserve(qdev, release);
212 qxl_release_free(qdev, release);
213 return -EINVAL;
214 }
215 reloc_dst_offset = 0;
216 } else {
217 reloc_dst_bo = cmd_bo;
218 reloc_dst_offset = release->release_offset;
219 }
220
221 /* reserve and validate the reloc dst bo */
222 if (reloc.reloc_type == QXL_RELOC_TYPE_BO || reloc.src_handle > 0) {
223 reloc_src_bo =
224 qxlhw_handle_to_bo(qdev, file_priv,
225 reloc.src_handle, &reloc_list);
226 if (!reloc_src_bo) {
227 if (reloc_dst_bo != cmd_bo)
228 drm_gem_object_unreference_unlocked(&reloc_dst_bo->gem_base);
229 qxl_bo_list_unreserve(&reloc_list, true);
230 qxl_release_unreserve(qdev, release);
231 qxl_release_free(qdev, release);
232 return -EINVAL;
233 }
234 } else
235 reloc_src_bo = NULL;
236 if (reloc.reloc_type == QXL_RELOC_TYPE_BO) {
237 apply_reloc(qdev, reloc_dst_bo, reloc_dst_offset + reloc.dst_offset,
238 reloc_src_bo, reloc.src_offset);
239 } else if (reloc.reloc_type == QXL_RELOC_TYPE_SURF) {
240 apply_surf_reloc(qdev, reloc_dst_bo, reloc_dst_offset + reloc.dst_offset, reloc_src_bo);
241 } else {
242 DRM_ERROR("unknown reloc type %d\n", reloc.reloc_type);
243 return -EINVAL;
244 }
245
246 if (reloc_src_bo && reloc_src_bo != cmd_bo) {
247 qxl_release_add_res(qdev, release, reloc_src_bo);
248 drm_gem_object_unreference_unlocked(&reloc_src_bo->gem_base);
249 }
250
251 if (reloc_dst_bo != cmd_bo)
252 drm_gem_object_unreference_unlocked(&reloc_dst_bo->gem_base);
253 }
254 qxl_fence_releaseable(qdev, release);
255
256 ret = qxl_push_command_ring_release(qdev, release, user_cmd.type, true);
257 if (ret == -ERESTARTSYS) {
258 qxl_release_unreserve(qdev, release);
259 qxl_release_free(qdev, release);
260 qxl_bo_list_unreserve(&reloc_list, true);
261 return ret;
262 }
263 qxl_release_unreserve(qdev, release);
264 }
265 qxl_bo_list_unreserve(&reloc_list, 0);
266 return 0;
267}
268
269static int qxl_update_area_ioctl(struct drm_device *dev, void *data,
270 struct drm_file *file)
271{
272 struct qxl_device *qdev = dev->dev_private;
273 struct drm_qxl_update_area *update_area = data;
274 struct qxl_rect area = {.left = update_area->left,
275 .top = update_area->top,
276 .right = update_area->right,
277 .bottom = update_area->bottom};
278 int ret;
279 struct drm_gem_object *gobj = NULL;
280 struct qxl_bo *qobj = NULL;
281
282 if (update_area->left >= update_area->right ||
283 update_area->top >= update_area->bottom)
284 return -EINVAL;
285
286 gobj = drm_gem_object_lookup(dev, file, update_area->handle);
287 if (gobj == NULL)
288 return -ENOENT;
289
290 qobj = gem_to_qxl_bo(gobj);
291
292 ret = qxl_bo_reserve(qobj, false);
293 if (ret)
294 goto out;
295
296 if (!qobj->pin_count) {
297 qxl_ttm_placement_from_domain(qobj, qobj->type);
298 ret = ttm_bo_validate(&qobj->tbo, &qobj->placement,
299 true, false);
300 if (unlikely(ret))
301 goto out;
302 }
303
304 ret = qxl_bo_check_id(qdev, qobj);
305 if (ret)
306 goto out2;
307 if (!qobj->surface_id)
308 DRM_ERROR("got update area for surface with no id %d\n", update_area->handle);
309 ret = qxl_io_update_area(qdev, qobj, &area);
310
311out2:
312 qxl_bo_unreserve(qobj);
313
314out:
315 drm_gem_object_unreference_unlocked(gobj);
316 return ret;
317}
318
319static int qxl_getparam_ioctl(struct drm_device *dev, void *data,
320 struct drm_file *file_priv)
321{
322 struct qxl_device *qdev = dev->dev_private;
323 struct drm_qxl_getparam *param = data;
324
325 switch (param->param) {
326 case QXL_PARAM_NUM_SURFACES:
327 param->value = qdev->rom->n_surfaces;
328 break;
329 case QXL_PARAM_MAX_RELOCS:
330 param->value = QXL_MAX_RES;
331 break;
332 default:
333 return -EINVAL;
334 }
335 return 0;
336}
337
338static int qxl_clientcap_ioctl(struct drm_device *dev, void *data,
339 struct drm_file *file_priv)
340{
341 struct qxl_device *qdev = dev->dev_private;
342 struct drm_qxl_clientcap *param = data;
343 int byte, idx;
344
345 byte = param->index / 8;
346 idx = param->index % 8;
347
348 if (qdev->pdev->revision < 4)
349 return -ENOSYS;
350
351 if (byte >= 58)
352 return -ENOSYS;
353
354 if (qdev->rom->client_capabilities[byte] & (1 << idx))
355 return 0;
356 return -ENOSYS;
357}
358
359static int qxl_alloc_surf_ioctl(struct drm_device *dev, void *data,
360 struct drm_file *file)
361{
362 struct qxl_device *qdev = dev->dev_private;
363 struct drm_qxl_alloc_surf *param = data;
364 struct qxl_bo *qobj;
365 int handle;
366 int ret;
367 int size, actual_stride;
368 struct qxl_surface surf;
369
370 /* work out size allocate bo with handle */
371 actual_stride = param->stride < 0 ? -param->stride : param->stride;
372 size = actual_stride * param->height + actual_stride;
373
374 surf.format = param->format;
375 surf.width = param->width;
376 surf.height = param->height;
377 surf.stride = param->stride;
378 surf.data = 0;
379
380 ret = qxl_gem_object_create_with_handle(qdev, file,
381 QXL_GEM_DOMAIN_SURFACE,
382 size,
383 &surf,
384 &qobj, &handle);
385 if (ret) {
386 DRM_ERROR("%s: failed to create gem ret=%d\n",
387 __func__, ret);
388 return -ENOMEM;
389 } else
390 param->handle = handle;
391 return ret;
392}
393
394struct drm_ioctl_desc qxl_ioctls[] = {
395 DRM_IOCTL_DEF_DRV(QXL_ALLOC, qxl_alloc_ioctl, DRM_AUTH|DRM_UNLOCKED),
396
397 DRM_IOCTL_DEF_DRV(QXL_MAP, qxl_map_ioctl, DRM_AUTH|DRM_UNLOCKED),
398
399 DRM_IOCTL_DEF_DRV(QXL_EXECBUFFER, qxl_execbuffer_ioctl,
400 DRM_AUTH|DRM_UNLOCKED),
401 DRM_IOCTL_DEF_DRV(QXL_UPDATE_AREA, qxl_update_area_ioctl,
402 DRM_AUTH|DRM_UNLOCKED),
403 DRM_IOCTL_DEF_DRV(QXL_GETPARAM, qxl_getparam_ioctl,
404 DRM_AUTH|DRM_UNLOCKED),
405 DRM_IOCTL_DEF_DRV(QXL_CLIENTCAP, qxl_clientcap_ioctl,
406 DRM_AUTH|DRM_UNLOCKED),
407
408 DRM_IOCTL_DEF_DRV(QXL_ALLOC_SURF, qxl_alloc_surf_ioctl,
409 DRM_AUTH|DRM_UNLOCKED),
410};
411
412int qxl_max_ioctls = DRM_ARRAY_SIZE(qxl_ioctls);
diff --git a/drivers/gpu/drm/qxl/qxl_irq.c b/drivers/gpu/drm/qxl/qxl_irq.c
new file mode 100644
index 000000000000..21393dc4700a
--- /dev/null
+++ b/drivers/gpu/drm/qxl/qxl_irq.c
@@ -0,0 +1,97 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Dave Airlie
23 * Alon Levy
24 */
25
26#include "qxl_drv.h"
27
28irqreturn_t qxl_irq_handler(DRM_IRQ_ARGS)
29{
30 struct drm_device *dev = (struct drm_device *) arg;
31 struct qxl_device *qdev = (struct qxl_device *)dev->dev_private;
32 uint32_t pending;
33
34 pending = xchg(&qdev->ram_header->int_pending, 0);
35
36 atomic_inc(&qdev->irq_received);
37
38 if (pending & QXL_INTERRUPT_DISPLAY) {
39 atomic_inc(&qdev->irq_received_display);
40 wake_up_all(&qdev->display_event);
41 qxl_queue_garbage_collect(qdev, false);
42 }
43 if (pending & QXL_INTERRUPT_CURSOR) {
44 atomic_inc(&qdev->irq_received_cursor);
45 wake_up_all(&qdev->cursor_event);
46 }
47 if (pending & QXL_INTERRUPT_IO_CMD) {
48 atomic_inc(&qdev->irq_received_io_cmd);
49 wake_up_all(&qdev->io_cmd_event);
50 }
51 if (pending & QXL_INTERRUPT_ERROR) {
52 /* TODO: log it, reset device (only way to exit this condition)
53 * (do it a certain number of times, afterwards admit defeat,
54 * to avoid endless loops).
55 */
56 qdev->irq_received_error++;
57 qxl_io_log(qdev, "%s: driver is in bug mode.\n", __func__);
58 }
59 if (pending & QXL_INTERRUPT_CLIENT_MONITORS_CONFIG) {
60 qxl_io_log(qdev, "QXL_INTERRUPT_CLIENT_MONITORS_CONFIG\n");
61 schedule_work(&qdev->client_monitors_config_work);
62 }
63 qdev->ram_header->int_mask = QXL_INTERRUPT_MASK;
64 outb(0, qdev->io_base + QXL_IO_UPDATE_IRQ);
65 return IRQ_HANDLED;
66}
67
68static void qxl_client_monitors_config_work_func(struct work_struct *work)
69{
70 struct qxl_device *qdev = container_of(work, struct qxl_device,
71 client_monitors_config_work);
72
73 qxl_display_read_client_monitors_config(qdev);
74}
75
76int qxl_irq_init(struct qxl_device *qdev)
77{
78 int ret;
79
80 init_waitqueue_head(&qdev->display_event);
81 init_waitqueue_head(&qdev->cursor_event);
82 init_waitqueue_head(&qdev->io_cmd_event);
83 INIT_WORK(&qdev->client_monitors_config_work,
84 qxl_client_monitors_config_work_func);
85 atomic_set(&qdev->irq_received, 0);
86 atomic_set(&qdev->irq_received_display, 0);
87 atomic_set(&qdev->irq_received_cursor, 0);
88 atomic_set(&qdev->irq_received_io_cmd, 0);
89 qdev->irq_received_error = 0;
90 ret = drm_irq_install(qdev->ddev);
91 qdev->ram_header->int_mask = QXL_INTERRUPT_MASK;
92 if (unlikely(ret != 0)) {
93 DRM_ERROR("Failed installing irq: %d\n", ret);
94 return 1;
95 }
96 return 0;
97}
diff --git a/drivers/gpu/drm/qxl/qxl_kms.c b/drivers/gpu/drm/qxl/qxl_kms.c
new file mode 100644
index 000000000000..85127ed24cfd
--- /dev/null
+++ b/drivers/gpu/drm/qxl/qxl_kms.c
@@ -0,0 +1,302 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Dave Airlie
23 * Alon Levy
24 */
25
26#include "qxl_drv.h"
27#include "qxl_object.h"
28
29#include <linux/io-mapping.h>
30
31int qxl_log_level;
32
33static void qxl_dump_mode(struct qxl_device *qdev, void *p)
34{
35 struct qxl_mode *m = p;
36 DRM_DEBUG_KMS("%d: %dx%d %d bits, stride %d, %dmm x %dmm, orientation %d\n",
37 m->id, m->x_res, m->y_res, m->bits, m->stride, m->x_mili,
38 m->y_mili, m->orientation);
39}
40
41static bool qxl_check_device(struct qxl_device *qdev)
42{
43 struct qxl_rom *rom = qdev->rom;
44 int mode_offset;
45 int i;
46
47 if (rom->magic != 0x4f525851) {
48 DRM_ERROR("bad rom signature %x\n", rom->magic);
49 return false;
50 }
51
52 DRM_INFO("Device Version %d.%d\n", rom->id, rom->update_id);
53 DRM_INFO("Compression level %d log level %d\n", rom->compression_level,
54 rom->log_level);
55 DRM_INFO("Currently using mode #%d, list at 0x%x\n",
56 rom->mode, rom->modes_offset);
57 DRM_INFO("%d io pages at offset 0x%x\n",
58 rom->num_io_pages, rom->pages_offset);
59 DRM_INFO("%d byte draw area at offset 0x%x\n",
60 rom->surface0_area_size, rom->draw_area_offset);
61
62 qdev->vram_size = rom->surface0_area_size;
63 DRM_INFO("RAM header offset: 0x%x\n", rom->ram_header_offset);
64
65 mode_offset = rom->modes_offset / 4;
66 qdev->mode_info.num_modes = ((u32 *)rom)[mode_offset];
67 DRM_INFO("rom modes offset 0x%x for %d modes\n", rom->modes_offset,
68 qdev->mode_info.num_modes);
69 qdev->mode_info.modes = (void *)((uint32_t *)rom + mode_offset + 1);
70 for (i = 0; i < qdev->mode_info.num_modes; i++)
71 qxl_dump_mode(qdev, qdev->mode_info.modes + i);
72 return true;
73}
74
75static uint8_t setup_slot(struct qxl_device *qdev, uint8_t slot_index_offset,
76 unsigned long start_phys_addr, unsigned long end_phys_addr)
77{
78 uint64_t high_bits;
79 struct qxl_memslot *slot;
80 uint8_t slot_index;
81 struct qxl_ram_header *ram_header = qdev->ram_header;
82
83 slot_index = qdev->rom->slots_start + slot_index_offset;
84 slot = &qdev->mem_slots[slot_index];
85 slot->start_phys_addr = start_phys_addr;
86 slot->end_phys_addr = end_phys_addr;
87 ram_header->mem_slot.mem_start = slot->start_phys_addr;
88 ram_header->mem_slot.mem_end = slot->end_phys_addr;
89 qxl_io_memslot_add(qdev, slot_index);
90 slot->generation = qdev->rom->slot_generation;
91 high_bits = slot_index << qdev->slot_gen_bits;
92 high_bits |= slot->generation;
93 high_bits <<= (64 - (qdev->slot_gen_bits + qdev->slot_id_bits));
94 slot->high_bits = high_bits;
95 return slot_index;
96}
97
98static void qxl_gc_work(struct work_struct *work)
99{
100 struct qxl_device *qdev = container_of(work, struct qxl_device, gc_work);
101 qxl_garbage_collect(qdev);
102}
103
104int qxl_device_init(struct qxl_device *qdev,
105 struct drm_device *ddev,
106 struct pci_dev *pdev,
107 unsigned long flags)
108{
109 int r;
110
111 qdev->dev = &pdev->dev;
112 qdev->ddev = ddev;
113 qdev->pdev = pdev;
114 qdev->flags = flags;
115
116 mutex_init(&qdev->gem.mutex);
117 mutex_init(&qdev->update_area_mutex);
118 mutex_init(&qdev->release_mutex);
119 mutex_init(&qdev->surf_evict_mutex);
120 INIT_LIST_HEAD(&qdev->gem.objects);
121
122 qdev->rom_base = pci_resource_start(pdev, 2);
123 qdev->rom_size = pci_resource_len(pdev, 2);
124 qdev->vram_base = pci_resource_start(pdev, 0);
125 qdev->surfaceram_base = pci_resource_start(pdev, 1);
126 qdev->surfaceram_size = pci_resource_len(pdev, 1);
127 qdev->io_base = pci_resource_start(pdev, 3);
128
129 qdev->vram_mapping = io_mapping_create_wc(qdev->vram_base, pci_resource_len(pdev, 0));
130 qdev->surface_mapping = io_mapping_create_wc(qdev->surfaceram_base, qdev->surfaceram_size);
131 DRM_DEBUG_KMS("qxl: vram %p-%p(%dM %dk), surface %p-%p(%dM %dk)\n",
132 (void *)qdev->vram_base, (void *)pci_resource_end(pdev, 0),
133 (int)pci_resource_len(pdev, 0) / 1024 / 1024,
134 (int)pci_resource_len(pdev, 0) / 1024,
135 (void *)qdev->surfaceram_base,
136 (void *)pci_resource_end(pdev, 1),
137 (int)qdev->surfaceram_size / 1024 / 1024,
138 (int)qdev->surfaceram_size / 1024);
139
140 qdev->rom = ioremap(qdev->rom_base, qdev->rom_size);
141 if (!qdev->rom) {
142 pr_err("Unable to ioremap ROM\n");
143 return -ENOMEM;
144 }
145
146 qxl_check_device(qdev);
147
148 r = qxl_bo_init(qdev);
149 if (r) {
150 DRM_ERROR("bo init failed %d\n", r);
151 return r;
152 }
153
154 qdev->ram_header = ioremap(qdev->vram_base +
155 qdev->rom->ram_header_offset,
156 sizeof(*qdev->ram_header));
157
158 qdev->command_ring = qxl_ring_create(&(qdev->ram_header->cmd_ring_hdr),
159 sizeof(struct qxl_command),
160 QXL_COMMAND_RING_SIZE,
161 qdev->io_base + QXL_IO_NOTIFY_CMD,
162 false,
163 &qdev->display_event);
164
165 qdev->cursor_ring = qxl_ring_create(
166 &(qdev->ram_header->cursor_ring_hdr),
167 sizeof(struct qxl_command),
168 QXL_CURSOR_RING_SIZE,
169 qdev->io_base + QXL_IO_NOTIFY_CMD,
170 false,
171 &qdev->cursor_event);
172
173 qdev->release_ring = qxl_ring_create(
174 &(qdev->ram_header->release_ring_hdr),
175 sizeof(uint64_t),
176 QXL_RELEASE_RING_SIZE, 0, true,
177 NULL);
178
179 /* TODO - slot initialization should happen on reset. where is our
180 * reset handler? */
181 qdev->n_mem_slots = qdev->rom->slots_end;
182 qdev->slot_gen_bits = qdev->rom->slot_gen_bits;
183 qdev->slot_id_bits = qdev->rom->slot_id_bits;
184 qdev->va_slot_mask =
185 (~(uint64_t)0) >> (qdev->slot_id_bits + qdev->slot_gen_bits);
186
187 qdev->mem_slots =
188 kmalloc(qdev->n_mem_slots * sizeof(struct qxl_memslot),
189 GFP_KERNEL);
190
191 idr_init(&qdev->release_idr);
192 spin_lock_init(&qdev->release_idr_lock);
193
194 idr_init(&qdev->surf_id_idr);
195 spin_lock_init(&qdev->surf_id_idr_lock);
196
197 mutex_init(&qdev->async_io_mutex);
198
199 /* reset the device into a known state - no memslots, no primary
200 * created, no surfaces. */
201 qxl_io_reset(qdev);
202
203 /* must initialize irq before first async io - slot creation */
204 r = qxl_irq_init(qdev);
205 if (r)
206 return r;
207
208 /*
209 * Note that virtual is surface0. We rely on the single ioremap done
210 * before.
211 */
212 qdev->main_mem_slot = setup_slot(qdev, 0,
213 (unsigned long)qdev->vram_base,
214 (unsigned long)qdev->vram_base + qdev->rom->ram_header_offset);
215 qdev->surfaces_mem_slot = setup_slot(qdev, 1,
216 (unsigned long)qdev->surfaceram_base,
217 (unsigned long)qdev->surfaceram_base + qdev->surfaceram_size);
218 DRM_INFO("main mem slot %d [%lx,%x)\n",
219 qdev->main_mem_slot,
220 (unsigned long)qdev->vram_base, qdev->rom->ram_header_offset);
221
222
223 qdev->gc_queue = create_singlethread_workqueue("qxl_gc");
224 INIT_WORK(&qdev->gc_work, qxl_gc_work);
225
226 r = qxl_fb_init(qdev);
227 if (r)
228 return r;
229
230 return 0;
231}
232
233static void qxl_device_fini(struct qxl_device *qdev)
234{
235 if (qdev->current_release_bo[0])
236 qxl_bo_unref(&qdev->current_release_bo[0]);
237 if (qdev->current_release_bo[1])
238 qxl_bo_unref(&qdev->current_release_bo[1]);
239 flush_workqueue(qdev->gc_queue);
240 destroy_workqueue(qdev->gc_queue);
241 qdev->gc_queue = NULL;
242
243 qxl_ring_free(qdev->command_ring);
244 qxl_ring_free(qdev->cursor_ring);
245 qxl_ring_free(qdev->release_ring);
246 qxl_bo_fini(qdev);
247 io_mapping_free(qdev->surface_mapping);
248 io_mapping_free(qdev->vram_mapping);
249 iounmap(qdev->ram_header);
250 iounmap(qdev->rom);
251 qdev->rom = NULL;
252 qdev->mode_info.modes = NULL;
253 qdev->mode_info.num_modes = 0;
254 qxl_debugfs_remove_files(qdev);
255}
256
257int qxl_driver_unload(struct drm_device *dev)
258{
259 struct qxl_device *qdev = dev->dev_private;
260
261 if (qdev == NULL)
262 return 0;
263 qxl_modeset_fini(qdev);
264 qxl_device_fini(qdev);
265
266 kfree(qdev);
267 dev->dev_private = NULL;
268 return 0;
269}
270
271int qxl_driver_load(struct drm_device *dev, unsigned long flags)
272{
273 struct qxl_device *qdev;
274 int r;
275
276 /* require kms */
277 if (!drm_core_check_feature(dev, DRIVER_MODESET))
278 return -ENODEV;
279
280 qdev = kzalloc(sizeof(struct qxl_device), GFP_KERNEL);
281 if (qdev == NULL)
282 return -ENOMEM;
283
284 dev->dev_private = qdev;
285
286 r = qxl_device_init(qdev, dev, dev->pdev, flags);
287 if (r)
288 goto out;
289
290 r = qxl_modeset_init(qdev);
291 if (r) {
292 qxl_driver_unload(dev);
293 goto out;
294 }
295
296 return 0;
297out:
298 kfree(qdev);
299 return r;
300}
301
302
diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c
new file mode 100644
index 000000000000..d9b12e7bc6e1
--- /dev/null
+++ b/drivers/gpu/drm/qxl/qxl_object.c
@@ -0,0 +1,365 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Dave Airlie
23 * Alon Levy
24 */
25
26#include "qxl_drv.h"
27#include "qxl_object.h"
28
29#include <linux/io-mapping.h>
30static void qxl_ttm_bo_destroy(struct ttm_buffer_object *tbo)
31{
32 struct qxl_bo *bo;
33 struct qxl_device *qdev;
34
35 bo = container_of(tbo, struct qxl_bo, tbo);
36 qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
37
38 qxl_surface_evict(qdev, bo, false);
39 qxl_fence_fini(&bo->fence);
40 mutex_lock(&qdev->gem.mutex);
41 list_del_init(&bo->list);
42 mutex_unlock(&qdev->gem.mutex);
43 drm_gem_object_release(&bo->gem_base);
44 kfree(bo);
45}
46
47bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo)
48{
49 if (bo->destroy == &qxl_ttm_bo_destroy)
50 return true;
51 return false;
52}
53
54void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain)
55{
56 u32 c = 0;
57
58 qbo->placement.fpfn = 0;
59 qbo->placement.lpfn = 0;
60 qbo->placement.placement = qbo->placements;
61 qbo->placement.busy_placement = qbo->placements;
62 if (domain == QXL_GEM_DOMAIN_VRAM)
63 qbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_VRAM;
64 if (domain == QXL_GEM_DOMAIN_SURFACE)
65 qbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_PRIV0;
66 if (domain == QXL_GEM_DOMAIN_CPU)
67 qbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
68 if (!c)
69 qbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
70 qbo->placement.num_placement = c;
71 qbo->placement.num_busy_placement = c;
72}
73
74
75int qxl_bo_create(struct qxl_device *qdev,
76 unsigned long size, bool kernel, u32 domain,
77 struct qxl_surface *surf,
78 struct qxl_bo **bo_ptr)
79{
80 struct qxl_bo *bo;
81 enum ttm_bo_type type;
82 int r;
83
84 if (unlikely(qdev->mman.bdev.dev_mapping == NULL))
85 qdev->mman.bdev.dev_mapping = qdev->ddev->dev_mapping;
86 if (kernel)
87 type = ttm_bo_type_kernel;
88 else
89 type = ttm_bo_type_device;
90 *bo_ptr = NULL;
91 bo = kzalloc(sizeof(struct qxl_bo), GFP_KERNEL);
92 if (bo == NULL)
93 return -ENOMEM;
94 size = roundup(size, PAGE_SIZE);
95 r = drm_gem_object_init(qdev->ddev, &bo->gem_base, size);
96 if (unlikely(r)) {
97 kfree(bo);
98 return r;
99 }
100 bo->gem_base.driver_private = NULL;
101 bo->type = domain;
102 bo->pin_count = 0;
103 bo->surface_id = 0;
104 qxl_fence_init(qdev, &bo->fence);
105 INIT_LIST_HEAD(&bo->list);
106 atomic_set(&bo->reserve_count, 0);
107 if (surf)
108 bo->surf = *surf;
109
110 qxl_ttm_placement_from_domain(bo, domain);
111
112 r = ttm_bo_init(&qdev->mman.bdev, &bo->tbo, size, type,
113 &bo->placement, 0, !kernel, NULL, size,
114 NULL, &qxl_ttm_bo_destroy);
115 if (unlikely(r != 0)) {
116 if (r != -ERESTARTSYS)
117 dev_err(qdev->dev,
118 "object_init failed for (%lu, 0x%08X)\n",
119 size, domain);
120 return r;
121 }
122 *bo_ptr = bo;
123 return 0;
124}
125
126int qxl_bo_kmap(struct qxl_bo *bo, void **ptr)
127{
128 bool is_iomem;
129 int r;
130
131 if (bo->kptr) {
132 if (ptr)
133 *ptr = bo->kptr;
134 return 0;
135 }
136 r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
137 if (r)
138 return r;
139 bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
140 if (ptr)
141 *ptr = bo->kptr;
142 return 0;
143}
144
145void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev,
146 struct qxl_bo *bo, int page_offset)
147{
148 struct ttm_mem_type_manager *man = &bo->tbo.bdev->man[bo->tbo.mem.mem_type];
149 void *rptr;
150 int ret;
151 struct io_mapping *map;
152
153 if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
154 map = qdev->vram_mapping;
155 else if (bo->tbo.mem.mem_type == TTM_PL_PRIV0)
156 map = qdev->surface_mapping;
157 else
158 goto fallback;
159
160 (void) ttm_mem_io_lock(man, false);
161 ret = ttm_mem_io_reserve(bo->tbo.bdev, &bo->tbo.mem);
162 ttm_mem_io_unlock(man);
163
164 return io_mapping_map_atomic_wc(map, bo->tbo.mem.bus.offset + page_offset);
165fallback:
166 if (bo->kptr) {
167 rptr = bo->kptr + (page_offset * PAGE_SIZE);
168 return rptr;
169 }
170
171 ret = qxl_bo_kmap(bo, &rptr);
172 if (ret)
173 return NULL;
174
175 rptr += page_offset * PAGE_SIZE;
176 return rptr;
177}
178
179void qxl_bo_kunmap(struct qxl_bo *bo)
180{
181 if (bo->kptr == NULL)
182 return;
183 bo->kptr = NULL;
184 ttm_bo_kunmap(&bo->kmap);
185}
186
187void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev,
188 struct qxl_bo *bo, void *pmap)
189{
190 struct ttm_mem_type_manager *man = &bo->tbo.bdev->man[bo->tbo.mem.mem_type];
191 struct io_mapping *map;
192
193 if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
194 map = qdev->vram_mapping;
195 else if (bo->tbo.mem.mem_type == TTM_PL_PRIV0)
196 map = qdev->surface_mapping;
197 else
198 goto fallback;
199
200 io_mapping_unmap_atomic(pmap);
201
202 (void) ttm_mem_io_lock(man, false);
203 ttm_mem_io_free(bo->tbo.bdev, &bo->tbo.mem);
204 ttm_mem_io_unlock(man);
205 return ;
206 fallback:
207 qxl_bo_kunmap(bo);
208}
209
210void qxl_bo_unref(struct qxl_bo **bo)
211{
212 struct ttm_buffer_object *tbo;
213
214 if ((*bo) == NULL)
215 return;
216 tbo = &((*bo)->tbo);
217 ttm_bo_unref(&tbo);
218 if (tbo == NULL)
219 *bo = NULL;
220}
221
222struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo)
223{
224 ttm_bo_reference(&bo->tbo);
225 return bo;
226}
227
228int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr)
229{
230 struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
231 int r, i;
232
233 if (bo->pin_count) {
234 bo->pin_count++;
235 if (gpu_addr)
236 *gpu_addr = qxl_bo_gpu_offset(bo);
237 return 0;
238 }
239 qxl_ttm_placement_from_domain(bo, domain);
240 for (i = 0; i < bo->placement.num_placement; i++)
241 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
242 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
243 if (likely(r == 0)) {
244 bo->pin_count = 1;
245 if (gpu_addr != NULL)
246 *gpu_addr = qxl_bo_gpu_offset(bo);
247 }
248 if (unlikely(r != 0))
249 dev_err(qdev->dev, "%p pin failed\n", bo);
250 return r;
251}
252
253int qxl_bo_unpin(struct qxl_bo *bo)
254{
255 struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
256 int r, i;
257
258 if (!bo->pin_count) {
259 dev_warn(qdev->dev, "%p unpin not necessary\n", bo);
260 return 0;
261 }
262 bo->pin_count--;
263 if (bo->pin_count)
264 return 0;
265 for (i = 0; i < bo->placement.num_placement; i++)
266 bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
267 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
268 if (unlikely(r != 0))
269 dev_err(qdev->dev, "%p validate failed for unpin\n", bo);
270 return r;
271}
272
273void qxl_bo_force_delete(struct qxl_device *qdev)
274{
275 struct qxl_bo *bo, *n;
276
277 if (list_empty(&qdev->gem.objects))
278 return;
279 dev_err(qdev->dev, "Userspace still has active objects !\n");
280 list_for_each_entry_safe(bo, n, &qdev->gem.objects, list) {
281 mutex_lock(&qdev->ddev->struct_mutex);
282 dev_err(qdev->dev, "%p %p %lu %lu force free\n",
283 &bo->gem_base, bo, (unsigned long)bo->gem_base.size,
284 *((unsigned long *)&bo->gem_base.refcount));
285 mutex_lock(&qdev->gem.mutex);
286 list_del_init(&bo->list);
287 mutex_unlock(&qdev->gem.mutex);
288 /* this should unref the ttm bo */
289 drm_gem_object_unreference(&bo->gem_base);
290 mutex_unlock(&qdev->ddev->struct_mutex);
291 }
292}
293
294int qxl_bo_init(struct qxl_device *qdev)
295{
296 return qxl_ttm_init(qdev);
297}
298
299void qxl_bo_fini(struct qxl_device *qdev)
300{
301 qxl_ttm_fini(qdev);
302}
303
304int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo)
305{
306 int ret;
307 if (bo->type == QXL_GEM_DOMAIN_SURFACE && bo->surface_id == 0) {
308 /* allocate a surface id for this surface now */
309 ret = qxl_surface_id_alloc(qdev, bo);
310 if (ret)
311 return ret;
312
313 ret = qxl_hw_surface_alloc(qdev, bo, NULL);
314 if (ret)
315 return ret;
316 }
317 return 0;
318}
319
320void qxl_bo_list_unreserve(struct qxl_reloc_list *reloc_list, bool failed)
321{
322 struct qxl_bo_list *entry, *sf;
323
324 list_for_each_entry_safe(entry, sf, &reloc_list->bos, lhead) {
325 qxl_bo_unreserve(entry->bo);
326 list_del(&entry->lhead);
327 kfree(entry);
328 }
329}
330
331int qxl_bo_list_add(struct qxl_reloc_list *reloc_list, struct qxl_bo *bo)
332{
333 struct qxl_bo_list *entry;
334 int ret;
335
336 list_for_each_entry(entry, &reloc_list->bos, lhead) {
337 if (entry->bo == bo)
338 return 0;
339 }
340
341 entry = kmalloc(sizeof(struct qxl_bo_list), GFP_KERNEL);
342 if (!entry)
343 return -ENOMEM;
344
345 entry->bo = bo;
346 list_add(&entry->lhead, &reloc_list->bos);
347
348 ret = qxl_bo_reserve(bo, false);
349 if (ret)
350 return ret;
351
352 if (!bo->pin_count) {
353 qxl_ttm_placement_from_domain(bo, bo->type);
354 ret = ttm_bo_validate(&bo->tbo, &bo->placement,
355 true, false);
356 if (ret)
357 return ret;
358 }
359
360 /* allocate a surface for reserved + validated buffers */
361 ret = qxl_bo_check_id(bo->gem_base.dev->dev_private, bo);
362 if (ret)
363 return ret;
364 return 0;
365}
diff --git a/drivers/gpu/drm/qxl/qxl_object.h b/drivers/gpu/drm/qxl/qxl_object.h
new file mode 100644
index 000000000000..b4fd89fbd8b7
--- /dev/null
+++ b/drivers/gpu/drm/qxl/qxl_object.h
@@ -0,0 +1,112 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Dave Airlie
23 * Alon Levy
24 */
25#ifndef QXL_OBJECT_H
26#define QXL_OBJECT_H
27
28#include "qxl_drv.h"
29
30static inline int qxl_bo_reserve(struct qxl_bo *bo, bool no_wait)
31{
32 int r;
33
34 r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
35 if (unlikely(r != 0)) {
36 if (r != -ERESTARTSYS) {
37 struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
38 dev_err(qdev->dev, "%p reserve failed\n", bo);
39 }
40 return r;
41 }
42 return 0;
43}
44
45static inline void qxl_bo_unreserve(struct qxl_bo *bo)
46{
47 ttm_bo_unreserve(&bo->tbo);
48}
49
50static inline u64 qxl_bo_gpu_offset(struct qxl_bo *bo)
51{
52 return bo->tbo.offset;
53}
54
55static inline unsigned long qxl_bo_size(struct qxl_bo *bo)
56{
57 return bo->tbo.num_pages << PAGE_SHIFT;
58}
59
60static inline bool qxl_bo_is_reserved(struct qxl_bo *bo)
61{
62 return !!atomic_read(&bo->tbo.reserved);
63}
64
65static inline u64 qxl_bo_mmap_offset(struct qxl_bo *bo)
66{
67 return bo->tbo.addr_space_offset;
68}
69
70static inline int qxl_bo_wait(struct qxl_bo *bo, u32 *mem_type,
71 bool no_wait)
72{
73 int r;
74
75 r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
76 if (unlikely(r != 0)) {
77 if (r != -ERESTARTSYS) {
78 struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
79 dev_err(qdev->dev, "%p reserve failed for wait\n",
80 bo);
81 }
82 return r;
83 }
84 spin_lock(&bo->tbo.bdev->fence_lock);
85 if (mem_type)
86 *mem_type = bo->tbo.mem.mem_type;
87 if (bo->tbo.sync_obj)
88 r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
89 spin_unlock(&bo->tbo.bdev->fence_lock);
90 ttm_bo_unreserve(&bo->tbo);
91 return r;
92}
93
94extern int qxl_bo_create(struct qxl_device *qdev,
95 unsigned long size,
96 bool kernel, u32 domain,
97 struct qxl_surface *surf,
98 struct qxl_bo **bo_ptr);
99extern int qxl_bo_kmap(struct qxl_bo *bo, void **ptr);
100extern void qxl_bo_kunmap(struct qxl_bo *bo);
101void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev, struct qxl_bo *bo, int page_offset);
102void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev, struct qxl_bo *bo, void *map);
103extern struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo);
104extern void qxl_bo_unref(struct qxl_bo **bo);
105extern int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr);
106extern int qxl_bo_unpin(struct qxl_bo *bo);
107extern void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain);
108extern bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo);
109
110extern int qxl_bo_list_add(struct qxl_reloc_list *reloc_list, struct qxl_bo *bo);
111extern void qxl_bo_list_unreserve(struct qxl_reloc_list *reloc_list, bool failed);
112#endif
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
new file mode 100644
index 000000000000..b443d6751d5f
--- /dev/null
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -0,0 +1,304 @@
1/*
2 * Copyright 2011 Red Hat, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
19 * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
20 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
21 */
22#include "qxl_drv.h"
23#include "qxl_object.h"
24
25/*
26 * drawable cmd cache - allocate a bunch of VRAM pages, suballocate
27 * into 256 byte chunks for now - gives 16 cmds per page.
28 *
29 * use an ida to index into the chunks?
30 */
31/* manage releaseables */
32/* stack them 16 high for now -drawable object is 191 */
33#define RELEASE_SIZE 256
34#define RELEASES_PER_BO (4096 / RELEASE_SIZE)
35/* put an alloc/dealloc surface cmd into one bo and round up to 128 */
36#define SURFACE_RELEASE_SIZE 128
37#define SURFACE_RELEASES_PER_BO (4096 / SURFACE_RELEASE_SIZE)
38
39static const int release_size_per_bo[] = { RELEASE_SIZE, SURFACE_RELEASE_SIZE, RELEASE_SIZE };
40static const int releases_per_bo[] = { RELEASES_PER_BO, SURFACE_RELEASES_PER_BO, RELEASES_PER_BO };
41uint64_t
42qxl_release_alloc(struct qxl_device *qdev, int type,
43 struct qxl_release **ret)
44{
45 struct qxl_release *release;
46 int handle;
47 size_t size = sizeof(*release);
48 int idr_ret;
49
50 release = kmalloc(size, GFP_KERNEL);
51 if (!release) {
52 DRM_ERROR("Out of memory\n");
53 return 0;
54 }
55 release->type = type;
56 release->bo_count = 0;
57 release->release_offset = 0;
58 release->surface_release_id = 0;
59
60 idr_preload(GFP_KERNEL);
61 spin_lock(&qdev->release_idr_lock);
62 idr_ret = idr_alloc(&qdev->release_idr, release, 1, 0, GFP_NOWAIT);
63 spin_unlock(&qdev->release_idr_lock);
64 idr_preload_end();
65 handle = idr_ret;
66 if (idr_ret < 0)
67 goto release_fail;
68 *ret = release;
69 QXL_INFO(qdev, "allocated release %lld\n", handle);
70 release->id = handle;
71release_fail:
72
73 return handle;
74}
75
76void
77qxl_release_free(struct qxl_device *qdev,
78 struct qxl_release *release)
79{
80 int i;
81
82 QXL_INFO(qdev, "release %d, type %d, %d bos\n", release->id,
83 release->type, release->bo_count);
84
85 if (release->surface_release_id)
86 qxl_surface_id_dealloc(qdev, release->surface_release_id);
87
88 for (i = 0 ; i < release->bo_count; ++i) {
89 QXL_INFO(qdev, "release %llx\n",
90 release->bos[i]->tbo.addr_space_offset
91 - DRM_FILE_OFFSET);
92 qxl_fence_remove_release(&release->bos[i]->fence, release->id);
93 qxl_bo_unref(&release->bos[i]);
94 }
95 spin_lock(&qdev->release_idr_lock);
96 idr_remove(&qdev->release_idr, release->id);
97 spin_unlock(&qdev->release_idr_lock);
98 kfree(release);
99}
100
101void
102qxl_release_add_res(struct qxl_device *qdev, struct qxl_release *release,
103 struct qxl_bo *bo)
104{
105 int i;
106 for (i = 0; i < release->bo_count; i++)
107 if (release->bos[i] == bo)
108 return;
109
110 if (release->bo_count >= QXL_MAX_RES) {
111 DRM_ERROR("exceeded max resource on a qxl_release item\n");
112 return;
113 }
114 release->bos[release->bo_count++] = qxl_bo_ref(bo);
115}
116
117static int qxl_release_bo_alloc(struct qxl_device *qdev,
118 struct qxl_bo **bo)
119{
120 int ret;
121 ret = qxl_bo_create(qdev, PAGE_SIZE, false, QXL_GEM_DOMAIN_VRAM, NULL,
122 bo);
123 return ret;
124}
125
126int qxl_release_reserve(struct qxl_device *qdev,
127 struct qxl_release *release, bool no_wait)
128{
129 int ret;
130 if (atomic_inc_return(&release->bos[0]->reserve_count) == 1) {
131 ret = qxl_bo_reserve(release->bos[0], no_wait);
132 if (ret)
133 return ret;
134 }
135 return 0;
136}
137
138void qxl_release_unreserve(struct qxl_device *qdev,
139 struct qxl_release *release)
140{
141 if (atomic_dec_and_test(&release->bos[0]->reserve_count))
142 qxl_bo_unreserve(release->bos[0]);
143}
144
145int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
146 enum qxl_surface_cmd_type surface_cmd_type,
147 struct qxl_release *create_rel,
148 struct qxl_release **release)
149{
150 int ret;
151
152 if (surface_cmd_type == QXL_SURFACE_CMD_DESTROY && create_rel) {
153 int idr_ret;
154 struct qxl_bo *bo;
155 union qxl_release_info *info;
156
157 /* stash the release after the create command */
158 idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release);
159 bo = qxl_bo_ref(create_rel->bos[0]);
160
161 (*release)->release_offset = create_rel->release_offset + 64;
162
163 qxl_release_add_res(qdev, *release, bo);
164
165 ret = qxl_release_reserve(qdev, *release, false);
166 if (ret) {
167 DRM_ERROR("release reserve failed\n");
168 goto out_unref;
169 }
170 info = qxl_release_map(qdev, *release);
171 info->id = idr_ret;
172 qxl_release_unmap(qdev, *release, info);
173
174
175out_unref:
176 qxl_bo_unref(&bo);
177 return ret;
178 }
179
180 return qxl_alloc_release_reserved(qdev, sizeof(struct qxl_surface_cmd),
181 QXL_RELEASE_SURFACE_CMD, release, NULL);
182}
183
184int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
185 int type, struct qxl_release **release,
186 struct qxl_bo **rbo)
187{
188 struct qxl_bo *bo;
189 int idr_ret;
190 int ret;
191 union qxl_release_info *info;
192 int cur_idx;
193
194 if (type == QXL_RELEASE_DRAWABLE)
195 cur_idx = 0;
196 else if (type == QXL_RELEASE_SURFACE_CMD)
197 cur_idx = 1;
198 else if (type == QXL_RELEASE_CURSOR_CMD)
199 cur_idx = 2;
200 else {
201 DRM_ERROR("got illegal type: %d\n", type);
202 return -EINVAL;
203 }
204
205 idr_ret = qxl_release_alloc(qdev, type, release);
206
207 mutex_lock(&qdev->release_mutex);
208 if (qdev->current_release_bo_offset[cur_idx] + 1 >= releases_per_bo[cur_idx]) {
209 qxl_bo_unref(&qdev->current_release_bo[cur_idx]);
210 qdev->current_release_bo_offset[cur_idx] = 0;
211 qdev->current_release_bo[cur_idx] = NULL;
212 }
213 if (!qdev->current_release_bo[cur_idx]) {
214 ret = qxl_release_bo_alloc(qdev, &qdev->current_release_bo[cur_idx]);
215 if (ret) {
216 mutex_unlock(&qdev->release_mutex);
217 return ret;
218 }
219
220 /* pin releases bo's they are too messy to evict */
221 ret = qxl_bo_reserve(qdev->current_release_bo[cur_idx], false);
222 qxl_bo_pin(qdev->current_release_bo[cur_idx], QXL_GEM_DOMAIN_VRAM, NULL);
223 qxl_bo_unreserve(qdev->current_release_bo[cur_idx]);
224 }
225
226 bo = qxl_bo_ref(qdev->current_release_bo[cur_idx]);
227
228 (*release)->release_offset = qdev->current_release_bo_offset[cur_idx] * release_size_per_bo[cur_idx];
229 qdev->current_release_bo_offset[cur_idx]++;
230
231 if (rbo)
232 *rbo = bo;
233
234 qxl_release_add_res(qdev, *release, bo);
235
236 ret = qxl_release_reserve(qdev, *release, false);
237 mutex_unlock(&qdev->release_mutex);
238 if (ret)
239 goto out_unref;
240
241 info = qxl_release_map(qdev, *release);
242 info->id = idr_ret;
243 qxl_release_unmap(qdev, *release, info);
244
245out_unref:
246 qxl_bo_unref(&bo);
247 return ret;
248}
249
250int qxl_fence_releaseable(struct qxl_device *qdev,
251 struct qxl_release *release)
252{
253 int i, ret;
254 for (i = 0; i < release->bo_count; i++) {
255 if (!release->bos[i]->tbo.sync_obj)
256 release->bos[i]->tbo.sync_obj = &release->bos[i]->fence;
257 ret = qxl_fence_add_release(&release->bos[i]->fence, release->id);
258 if (ret)
259 return ret;
260 }
261 return 0;
262}
263
264struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev,
265 uint64_t id)
266{
267 struct qxl_release *release;
268
269 spin_lock(&qdev->release_idr_lock);
270 release = idr_find(&qdev->release_idr, id);
271 spin_unlock(&qdev->release_idr_lock);
272 if (!release) {
273 DRM_ERROR("failed to find id in release_idr\n");
274 return NULL;
275 }
276 if (release->bo_count < 1) {
277 DRM_ERROR("read a released resource with 0 bos\n");
278 return NULL;
279 }
280 return release;
281}
282
283union qxl_release_info *qxl_release_map(struct qxl_device *qdev,
284 struct qxl_release *release)
285{
286 void *ptr;
287 union qxl_release_info *info;
288 struct qxl_bo *bo = release->bos[0];
289
290 ptr = qxl_bo_kmap_atomic_page(qdev, bo, release->release_offset & PAGE_SIZE);
291 info = ptr + (release->release_offset & ~PAGE_SIZE);
292 return info;
293}
294
295void qxl_release_unmap(struct qxl_device *qdev,
296 struct qxl_release *release,
297 union qxl_release_info *info)
298{
299 struct qxl_bo *bo = release->bos[0];
300 void *ptr;
301
302 ptr = ((void *)info) - (release->release_offset & ~PAGE_SIZE);
303 qxl_bo_kunmap_atomic_page(qdev, bo, ptr);
304}
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
new file mode 100644
index 000000000000..489cb8cece4d
--- /dev/null
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -0,0 +1,581 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Dave Airlie
23 * Alon Levy
24 */
25
26#include <ttm/ttm_bo_api.h>
27#include <ttm/ttm_bo_driver.h>
28#include <ttm/ttm_placement.h>
29#include <ttm/ttm_page_alloc.h>
30#include <ttm/ttm_module.h>
31#include <drm/drmP.h>
32#include <drm/drm.h>
33#include <drm/qxl_drm.h>
34#include "qxl_drv.h"
35#include "qxl_object.h"
36
37#include <linux/delay.h>
38static int qxl_ttm_debugfs_init(struct qxl_device *qdev);
39
40static struct qxl_device *qxl_get_qdev(struct ttm_bo_device *bdev)
41{
42 struct qxl_mman *mman;
43 struct qxl_device *qdev;
44
45 mman = container_of(bdev, struct qxl_mman, bdev);
46 qdev = container_of(mman, struct qxl_device, mman);
47 return qdev;
48}
49
50static int qxl_ttm_mem_global_init(struct drm_global_reference *ref)
51{
52 return ttm_mem_global_init(ref->object);
53}
54
55static void qxl_ttm_mem_global_release(struct drm_global_reference *ref)
56{
57 ttm_mem_global_release(ref->object);
58}
59
60static int qxl_ttm_global_init(struct qxl_device *qdev)
61{
62 struct drm_global_reference *global_ref;
63 int r;
64
65 qdev->mman.mem_global_referenced = false;
66 global_ref = &qdev->mman.mem_global_ref;
67 global_ref->global_type = DRM_GLOBAL_TTM_MEM;
68 global_ref->size = sizeof(struct ttm_mem_global);
69 global_ref->init = &qxl_ttm_mem_global_init;
70 global_ref->release = &qxl_ttm_mem_global_release;
71
72 r = drm_global_item_ref(global_ref);
73 if (r != 0) {
74 DRM_ERROR("Failed setting up TTM memory accounting "
75 "subsystem.\n");
76 return r;
77 }
78
79 qdev->mman.bo_global_ref.mem_glob =
80 qdev->mman.mem_global_ref.object;
81 global_ref = &qdev->mman.bo_global_ref.ref;
82 global_ref->global_type = DRM_GLOBAL_TTM_BO;
83 global_ref->size = sizeof(struct ttm_bo_global);
84 global_ref->init = &ttm_bo_global_init;
85 global_ref->release = &ttm_bo_global_release;
86 r = drm_global_item_ref(global_ref);
87 if (r != 0) {
88 DRM_ERROR("Failed setting up TTM BO subsystem.\n");
89 drm_global_item_unref(&qdev->mman.mem_global_ref);
90 return r;
91 }
92
93 qdev->mman.mem_global_referenced = true;
94 return 0;
95}
96
97static void qxl_ttm_global_fini(struct qxl_device *qdev)
98{
99 if (qdev->mman.mem_global_referenced) {
100 drm_global_item_unref(&qdev->mman.bo_global_ref.ref);
101 drm_global_item_unref(&qdev->mman.mem_global_ref);
102 qdev->mman.mem_global_referenced = false;
103 }
104}
105
106static struct vm_operations_struct qxl_ttm_vm_ops;
107static const struct vm_operations_struct *ttm_vm_ops;
108
109static int qxl_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
110{
111 struct ttm_buffer_object *bo;
112 struct qxl_device *qdev;
113 int r;
114
115 bo = (struct ttm_buffer_object *)vma->vm_private_data;
116 if (bo == NULL)
117 return VM_FAULT_NOPAGE;
118 qdev = qxl_get_qdev(bo->bdev);
119 r = ttm_vm_ops->fault(vma, vmf);
120 return r;
121}
122
123int qxl_mmap(struct file *filp, struct vm_area_struct *vma)
124{
125 struct drm_file *file_priv;
126 struct qxl_device *qdev;
127 int r;
128
129 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
130 pr_info("%s: vma->vm_pgoff (%ld) < DRM_FILE_PAGE_OFFSET\n",
131 __func__, vma->vm_pgoff);
132 return drm_mmap(filp, vma);
133 }
134
135 file_priv = filp->private_data;
136 qdev = file_priv->minor->dev->dev_private;
137 if (qdev == NULL) {
138 DRM_ERROR(
139 "filp->private_data->minor->dev->dev_private == NULL\n");
140 return -EINVAL;
141 }
142 QXL_INFO(qdev, "%s: filp->private_data = 0x%p, vma->vm_pgoff = %lx\n",
143 __func__, filp->private_data, vma->vm_pgoff);
144
145 r = ttm_bo_mmap(filp, vma, &qdev->mman.bdev);
146 if (unlikely(r != 0))
147 return r;
148 if (unlikely(ttm_vm_ops == NULL)) {
149 ttm_vm_ops = vma->vm_ops;
150 qxl_ttm_vm_ops = *ttm_vm_ops;
151 qxl_ttm_vm_ops.fault = &qxl_ttm_fault;
152 }
153 vma->vm_ops = &qxl_ttm_vm_ops;
154 return 0;
155}
156
157static int qxl_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
158{
159 return 0;
160}
161
162static int qxl_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
163 struct ttm_mem_type_manager *man)
164{
165 struct qxl_device *qdev;
166
167 qdev = qxl_get_qdev(bdev);
168
169 switch (type) {
170 case TTM_PL_SYSTEM:
171 /* System memory */
172 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
173 man->available_caching = TTM_PL_MASK_CACHING;
174 man->default_caching = TTM_PL_FLAG_CACHED;
175 break;
176 case TTM_PL_VRAM:
177 case TTM_PL_PRIV0:
178 /* "On-card" video ram */
179 man->func = &ttm_bo_manager_func;
180 man->gpu_offset = 0;
181 man->flags = TTM_MEMTYPE_FLAG_FIXED |
182 TTM_MEMTYPE_FLAG_MAPPABLE;
183 man->available_caching = TTM_PL_MASK_CACHING;
184 man->default_caching = TTM_PL_FLAG_CACHED;
185 break;
186 default:
187 DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
188 return -EINVAL;
189 }
190 return 0;
191}
192
193static void qxl_evict_flags(struct ttm_buffer_object *bo,
194 struct ttm_placement *placement)
195{
196 struct qxl_bo *qbo;
197 static u32 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
198
199 if (!qxl_ttm_bo_is_qxl_bo(bo)) {
200 placement->fpfn = 0;
201 placement->lpfn = 0;
202 placement->placement = &placements;
203 placement->busy_placement = &placements;
204 placement->num_placement = 1;
205 placement->num_busy_placement = 1;
206 return;
207 }
208 qbo = container_of(bo, struct qxl_bo, tbo);
209 qxl_ttm_placement_from_domain(qbo, QXL_GEM_DOMAIN_CPU);
210 *placement = qbo->placement;
211}
212
213static int qxl_verify_access(struct ttm_buffer_object *bo, struct file *filp)
214{
215 return 0;
216}
217
218static int qxl_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
219 struct ttm_mem_reg *mem)
220{
221 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
222 struct qxl_device *qdev = qxl_get_qdev(bdev);
223
224 mem->bus.addr = NULL;
225 mem->bus.offset = 0;
226 mem->bus.size = mem->num_pages << PAGE_SHIFT;
227 mem->bus.base = 0;
228 mem->bus.is_iomem = false;
229 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
230 return -EINVAL;
231 switch (mem->mem_type) {
232 case TTM_PL_SYSTEM:
233 /* system memory */
234 return 0;
235 case TTM_PL_VRAM:
236 mem->bus.is_iomem = true;
237 mem->bus.base = qdev->vram_base;
238 mem->bus.offset = mem->start << PAGE_SHIFT;
239 break;
240 case TTM_PL_PRIV0:
241 mem->bus.is_iomem = true;
242 mem->bus.base = qdev->surfaceram_base;
243 mem->bus.offset = mem->start << PAGE_SHIFT;
244 break;
245 default:
246 return -EINVAL;
247 }
248 return 0;
249}
250
251static void qxl_ttm_io_mem_free(struct ttm_bo_device *bdev,
252 struct ttm_mem_reg *mem)
253{
254}
255
256/*
257 * TTM backend functions.
258 */
259struct qxl_ttm_tt {
260 struct ttm_dma_tt ttm;
261 struct qxl_device *qdev;
262 u64 offset;
263};
264
265static int qxl_ttm_backend_bind(struct ttm_tt *ttm,
266 struct ttm_mem_reg *bo_mem)
267{
268 struct qxl_ttm_tt *gtt = (void *)ttm;
269
270 gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT);
271 if (!ttm->num_pages) {
272 WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
273 ttm->num_pages, bo_mem, ttm);
274 }
275 /* Not implemented */
276 return -1;
277}
278
279static int qxl_ttm_backend_unbind(struct ttm_tt *ttm)
280{
281 /* Not implemented */
282 return -1;
283}
284
285static void qxl_ttm_backend_destroy(struct ttm_tt *ttm)
286{
287 struct qxl_ttm_tt *gtt = (void *)ttm;
288
289 ttm_dma_tt_fini(&gtt->ttm);
290 kfree(gtt);
291}
292
293static struct ttm_backend_func qxl_backend_func = {
294 .bind = &qxl_ttm_backend_bind,
295 .unbind = &qxl_ttm_backend_unbind,
296 .destroy = &qxl_ttm_backend_destroy,
297};
298
299static int qxl_ttm_tt_populate(struct ttm_tt *ttm)
300{
301 int r;
302
303 if (ttm->state != tt_unpopulated)
304 return 0;
305
306 r = ttm_pool_populate(ttm);
307 if (r)
308 return r;
309
310 return 0;
311}
312
313static void qxl_ttm_tt_unpopulate(struct ttm_tt *ttm)
314{
315 ttm_pool_unpopulate(ttm);
316}
317
318static struct ttm_tt *qxl_ttm_tt_create(struct ttm_bo_device *bdev,
319 unsigned long size, uint32_t page_flags,
320 struct page *dummy_read_page)
321{
322 struct qxl_device *qdev;
323 struct qxl_ttm_tt *gtt;
324
325 qdev = qxl_get_qdev(bdev);
326 gtt = kzalloc(sizeof(struct qxl_ttm_tt), GFP_KERNEL);
327 if (gtt == NULL)
328 return NULL;
329 gtt->ttm.ttm.func = &qxl_backend_func;
330 gtt->qdev = qdev;
331 if (ttm_dma_tt_init(&gtt->ttm, bdev, size, page_flags,
332 dummy_read_page)) {
333 kfree(gtt);
334 return NULL;
335 }
336 return &gtt->ttm.ttm;
337}
338
339static void qxl_move_null(struct ttm_buffer_object *bo,
340 struct ttm_mem_reg *new_mem)
341{
342 struct ttm_mem_reg *old_mem = &bo->mem;
343
344 BUG_ON(old_mem->mm_node != NULL);
345 *old_mem = *new_mem;
346 new_mem->mm_node = NULL;
347}
348
349static int qxl_bo_move(struct ttm_buffer_object *bo,
350 bool evict, bool interruptible,
351 bool no_wait_gpu,
352 struct ttm_mem_reg *new_mem)
353{
354 struct ttm_mem_reg *old_mem = &bo->mem;
355 if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
356 qxl_move_null(bo, new_mem);
357 return 0;
358 }
359 return ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
360}
361
362
363static int qxl_sync_obj_wait(void *sync_obj,
364 bool lazy, bool interruptible)
365{
366 struct qxl_fence *qfence = (struct qxl_fence *)sync_obj;
367 int count = 0, sc = 0;
368 struct qxl_bo *bo = container_of(qfence, struct qxl_bo, fence);
369
370 if (qfence->num_active_releases == 0)
371 return 0;
372
373retry:
374 if (sc == 0) {
375 if (bo->type == QXL_GEM_DOMAIN_SURFACE)
376 qxl_update_surface(qfence->qdev, bo);
377 } else if (sc >= 1) {
378 qxl_io_notify_oom(qfence->qdev);
379 }
380
381 sc++;
382
383 for (count = 0; count < 10; count++) {
384 bool ret;
385 ret = qxl_queue_garbage_collect(qfence->qdev, true);
386 if (ret == false)
387 break;
388
389 if (qfence->num_active_releases == 0)
390 return 0;
391 }
392
393 if (qfence->num_active_releases) {
394 bool have_drawable_releases = false;
395 void **slot;
396 struct radix_tree_iter iter;
397 int release_id;
398
399 radix_tree_for_each_slot(slot, &qfence->tree, &iter, 0) {
400 struct qxl_release *release;
401
402 release_id = iter.index;
403 release = qxl_release_from_id_locked(qfence->qdev, release_id);
404 if (release == NULL)
405 continue;
406
407 if (release->type == QXL_RELEASE_DRAWABLE)
408 have_drawable_releases = true;
409 }
410
411 qxl_queue_garbage_collect(qfence->qdev, true);
412
413 if (have_drawable_releases || sc < 4) {
414 if (sc > 2)
415 /* back off */
416 usleep_range(500, 1000);
417 if (have_drawable_releases && sc > 300) {
418 WARN(1, "sync obj %d still has outstanding releases %d %d %d %ld %d\n", sc, bo->surface_id, bo->is_primary, bo->pin_count, (unsigned long)bo->gem_base.size, qfence->num_active_releases);
419 return -EBUSY;
420 }
421 goto retry;
422 }
423 }
424 return 0;
425}
426
427static int qxl_sync_obj_flush(void *sync_obj)
428{
429 return 0;
430}
431
432static void qxl_sync_obj_unref(void **sync_obj)
433{
434}
435
436static void *qxl_sync_obj_ref(void *sync_obj)
437{
438 return sync_obj;
439}
440
441static bool qxl_sync_obj_signaled(void *sync_obj)
442{
443 struct qxl_fence *qfence = (struct qxl_fence *)sync_obj;
444 return (qfence->num_active_releases == 0);
445}
446
447static void qxl_bo_move_notify(struct ttm_buffer_object *bo,
448 struct ttm_mem_reg *new_mem)
449{
450 struct qxl_bo *qbo;
451 struct qxl_device *qdev;
452
453 if (!qxl_ttm_bo_is_qxl_bo(bo))
454 return;
455 qbo = container_of(bo, struct qxl_bo, tbo);
456 qdev = qbo->gem_base.dev->dev_private;
457
458 if (bo->mem.mem_type == TTM_PL_PRIV0 && qbo->surface_id)
459 qxl_surface_evict(qdev, qbo, new_mem ? true : false);
460}
461
462static struct ttm_bo_driver qxl_bo_driver = {
463 .ttm_tt_create = &qxl_ttm_tt_create,
464 .ttm_tt_populate = &qxl_ttm_tt_populate,
465 .ttm_tt_unpopulate = &qxl_ttm_tt_unpopulate,
466 .invalidate_caches = &qxl_invalidate_caches,
467 .init_mem_type = &qxl_init_mem_type,
468 .evict_flags = &qxl_evict_flags,
469 .move = &qxl_bo_move,
470 .verify_access = &qxl_verify_access,
471 .io_mem_reserve = &qxl_ttm_io_mem_reserve,
472 .io_mem_free = &qxl_ttm_io_mem_free,
473 .sync_obj_signaled = &qxl_sync_obj_signaled,
474 .sync_obj_wait = &qxl_sync_obj_wait,
475 .sync_obj_flush = &qxl_sync_obj_flush,
476 .sync_obj_unref = &qxl_sync_obj_unref,
477 .sync_obj_ref = &qxl_sync_obj_ref,
478 .move_notify = &qxl_bo_move_notify,
479};
480
481
482
483int qxl_ttm_init(struct qxl_device *qdev)
484{
485 int r;
486 int num_io_pages; /* != rom->num_io_pages, we include surface0 */
487
488 r = qxl_ttm_global_init(qdev);
489 if (r)
490 return r;
491 /* No others user of address space so set it to 0 */
492 r = ttm_bo_device_init(&qdev->mman.bdev,
493 qdev->mman.bo_global_ref.ref.object,
494 &qxl_bo_driver, DRM_FILE_PAGE_OFFSET, 0);
495 if (r) {
496 DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
497 return r;
498 }
499 /* NOTE: this includes the framebuffer (aka surface 0) */
500 num_io_pages = qdev->rom->ram_header_offset / PAGE_SIZE;
501 r = ttm_bo_init_mm(&qdev->mman.bdev, TTM_PL_VRAM,
502 num_io_pages);
503 if (r) {
504 DRM_ERROR("Failed initializing VRAM heap.\n");
505 return r;
506 }
507 r = ttm_bo_init_mm(&qdev->mman.bdev, TTM_PL_PRIV0,
508 qdev->surfaceram_size / PAGE_SIZE);
509 if (r) {
510 DRM_ERROR("Failed initializing Surfaces heap.\n");
511 return r;
512 }
513 DRM_INFO("qxl: %uM of VRAM memory size\n",
514 (unsigned)qdev->vram_size / (1024 * 1024));
515 DRM_INFO("qxl: %luM of IO pages memory ready (VRAM domain)\n",
516 ((unsigned)num_io_pages * PAGE_SIZE) / (1024 * 1024));
517 if (unlikely(qdev->mman.bdev.dev_mapping == NULL))
518 qdev->mman.bdev.dev_mapping = qdev->ddev->dev_mapping;
519 r = qxl_ttm_debugfs_init(qdev);
520 if (r) {
521 DRM_ERROR("Failed to init debugfs\n");
522 return r;
523 }
524 return 0;
525}
526
527void qxl_ttm_fini(struct qxl_device *qdev)
528{
529 ttm_bo_clean_mm(&qdev->mman.bdev, TTM_PL_VRAM);
530 ttm_bo_clean_mm(&qdev->mman.bdev, TTM_PL_PRIV0);
531 ttm_bo_device_release(&qdev->mman.bdev);
532 qxl_ttm_global_fini(qdev);
533 DRM_INFO("qxl: ttm finalized\n");
534}
535
536
537#define QXL_DEBUGFS_MEM_TYPES 2
538
539#if defined(CONFIG_DEBUG_FS)
540static int qxl_mm_dump_table(struct seq_file *m, void *data)
541{
542 struct drm_info_node *node = (struct drm_info_node *)m->private;
543 struct drm_mm *mm = (struct drm_mm *)node->info_ent->data;
544 struct drm_device *dev = node->minor->dev;
545 struct qxl_device *rdev = dev->dev_private;
546 int ret;
547 struct ttm_bo_global *glob = rdev->mman.bdev.glob;
548
549 spin_lock(&glob->lru_lock);
550 ret = drm_mm_dump_table(m, mm);
551 spin_unlock(&glob->lru_lock);
552 return ret;
553}
554#endif
555
556static int qxl_ttm_debugfs_init(struct qxl_device *qdev)
557{
558#if defined(CONFIG_DEBUG_FS)
559 static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES];
560 static char qxl_mem_types_names[QXL_DEBUGFS_MEM_TYPES][32];
561 unsigned i;
562
563 for (i = 0; i < QXL_DEBUGFS_MEM_TYPES; i++) {
564 if (i == 0)
565 sprintf(qxl_mem_types_names[i], "qxl_mem_mm");
566 else
567 sprintf(qxl_mem_types_names[i], "qxl_surf_mm");
568 qxl_mem_types_list[i].name = qxl_mem_types_names[i];
569 qxl_mem_types_list[i].show = &qxl_mm_dump_table;
570 qxl_mem_types_list[i].driver_features = 0;
571 if (i == 0)
572 qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
573 else
574 qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv;
575
576 }
577 return qxl_debugfs_add_files(qdev, qxl_mem_types_list, i);
578#else
579 return 0;
580#endif
581}
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index bf172522ea68..86c5e3611892 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -76,7 +76,7 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
76 evergreen.o evergreen_cs.o evergreen_blit_shaders.o evergreen_blit_kms.o \ 76 evergreen.o evergreen_cs.o evergreen_blit_shaders.o evergreen_blit_kms.o \
77 evergreen_hdmi.o radeon_trace_points.o ni.o cayman_blit_shaders.o \ 77 evergreen_hdmi.o radeon_trace_points.o ni.o cayman_blit_shaders.o \
78 atombios_encoders.o radeon_semaphore.o radeon_sa.o atombios_i2c.o si.o \ 78 atombios_encoders.o radeon_semaphore.o radeon_sa.o atombios_i2c.o si.o \
79 si_blit_shaders.o radeon_prime.o 79 si_blit_shaders.o radeon_prime.o radeon_uvd.o
80 80
81radeon-$(CONFIG_COMPAT) += radeon_ioc32.o 81radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
82radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o 82radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index 46a9c3772850..fb441a790f3d 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -1394,10 +1394,10 @@ int atom_allocate_fb_scratch(struct atom_context *ctx)
1394 firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset); 1394 firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset);
1395 1395
1396 DRM_DEBUG("atom firmware requested %08x %dkb\n", 1396 DRM_DEBUG("atom firmware requested %08x %dkb\n",
1397 firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware, 1397 le32_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware),
1398 firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb); 1398 le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb));
1399 1399
1400 usage_bytes = firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb * 1024; 1400 usage_bytes = le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb) * 1024;
1401 } 1401 }
1402 ctx->scratch_size_bytes = 0; 1402 ctx->scratch_size_bytes = 0;
1403 if (usage_bytes == 0) 1403 if (usage_bytes == 0)
diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h
index 4b04ba3828e8..0ee573743de9 100644
--- a/drivers/gpu/drm/radeon/atombios.h
+++ b/drivers/gpu/drm/radeon/atombios.h
@@ -458,6 +458,7 @@ typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V3
458 union 458 union
459 { 459 {
460 ATOM_COMPUTE_CLOCK_FREQ ulClock; //Input Parameter 460 ATOM_COMPUTE_CLOCK_FREQ ulClock; //Input Parameter
461 ULONG ulClockParams; //ULONG access for BE
461 ATOM_S_MPLL_FB_DIVIDER ulFbDiv; //Output Parameter 462 ATOM_S_MPLL_FB_DIVIDER ulFbDiv; //Output Parameter
462 }; 463 };
463 UCHAR ucRefDiv; //Output Parameter 464 UCHAR ucRefDiv; //Output Parameter
@@ -490,6 +491,7 @@ typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V5
490 union 491 union
491 { 492 {
492 ATOM_COMPUTE_CLOCK_FREQ ulClock; //Input Parameter 493 ATOM_COMPUTE_CLOCK_FREQ ulClock; //Input Parameter
494 ULONG ulClockParams; //ULONG access for BE
493 ATOM_S_MPLL_FB_DIVIDER ulFbDiv; //Output Parameter 495 ATOM_S_MPLL_FB_DIVIDER ulFbDiv; //Output Parameter
494 }; 496 };
495 UCHAR ucRefDiv; //Output Parameter 497 UCHAR ucRefDiv; //Output Parameter
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 21a892c6ab9c..6d6fdb3ba0d0 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -557,6 +557,9 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
557 /* use frac fb div on APUs */ 557 /* use frac fb div on APUs */
558 if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev)) 558 if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev))
559 radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV; 559 radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
560 /* use frac fb div on RS780/RS880 */
561 if ((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880))
562 radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
560 if (ASIC_IS_DCE32(rdev) && mode->clock > 165000) 563 if (ASIC_IS_DCE32(rdev) && mode->clock > 165000)
561 radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV; 564 radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
562 } else { 565 } else {
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 4552d4aff317..44a7da66e081 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -2150,13 +2150,10 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
2150 atombios_apply_encoder_quirks(encoder, adjusted_mode); 2150 atombios_apply_encoder_quirks(encoder, adjusted_mode);
2151 2151
2152 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) { 2152 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
2153 r600_hdmi_enable(encoder); 2153 if (rdev->asic->display.hdmi_enable)
2154 if (ASIC_IS_DCE6(rdev)) 2154 radeon_hdmi_enable(rdev, encoder, true);
2155 ; /* TODO (use pointers instead of if-s?) */ 2155 if (rdev->asic->display.hdmi_setmode)
2156 else if (ASIC_IS_DCE4(rdev)) 2156 radeon_hdmi_setmode(rdev, encoder, adjusted_mode);
2157 evergreen_hdmi_setmode(encoder, adjusted_mode);
2158 else
2159 r600_hdmi_setmode(encoder, adjusted_mode);
2160 } 2157 }
2161} 2158}
2162 2159
@@ -2413,8 +2410,10 @@ static void radeon_atom_encoder_disable(struct drm_encoder *encoder)
2413 2410
2414disable_done: 2411disable_done:
2415 if (radeon_encoder_is_digital(encoder)) { 2412 if (radeon_encoder_is_digital(encoder)) {
2416 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) 2413 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
2417 r600_hdmi_disable(encoder); 2414 if (rdev->asic->display.hdmi_enable)
2415 radeon_hdmi_enable(rdev, encoder, false);
2416 }
2418 dig = radeon_encoder->enc_priv; 2417 dig = radeon_encoder->enc_priv;
2419 dig->dig_encoder = -1; 2418 dig->dig_encoder = -1;
2420 } 2419 }
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 305a657bf215..105bafb6c29d 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -53,6 +53,864 @@ void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
53extern void cayman_cp_int_cntl_setup(struct radeon_device *rdev, 53extern void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
54 int ring, u32 cp_int_cntl); 54 int ring, u32 cp_int_cntl);
55 55
56static const u32 evergreen_golden_registers[] =
57{
58 0x3f90, 0xffff0000, 0xff000000,
59 0x9148, 0xffff0000, 0xff000000,
60 0x3f94, 0xffff0000, 0xff000000,
61 0x914c, 0xffff0000, 0xff000000,
62 0x9b7c, 0xffffffff, 0x00000000,
63 0x8a14, 0xffffffff, 0x00000007,
64 0x8b10, 0xffffffff, 0x00000000,
65 0x960c, 0xffffffff, 0x54763210,
66 0x88c4, 0xffffffff, 0x000000c2,
67 0x88d4, 0xffffffff, 0x00000010,
68 0x8974, 0xffffffff, 0x00000000,
69 0xc78, 0x00000080, 0x00000080,
70 0x5eb4, 0xffffffff, 0x00000002,
71 0x5e78, 0xffffffff, 0x001000f0,
72 0x6104, 0x01000300, 0x00000000,
73 0x5bc0, 0x00300000, 0x00000000,
74 0x7030, 0xffffffff, 0x00000011,
75 0x7c30, 0xffffffff, 0x00000011,
76 0x10830, 0xffffffff, 0x00000011,
77 0x11430, 0xffffffff, 0x00000011,
78 0x12030, 0xffffffff, 0x00000011,
79 0x12c30, 0xffffffff, 0x00000011,
80 0xd02c, 0xffffffff, 0x08421000,
81 0x240c, 0xffffffff, 0x00000380,
82 0x8b24, 0xffffffff, 0x00ff0fff,
83 0x28a4c, 0x06000000, 0x06000000,
84 0x10c, 0x00000001, 0x00000001,
85 0x8d00, 0xffffffff, 0x100e4848,
86 0x8d04, 0xffffffff, 0x00164745,
87 0x8c00, 0xffffffff, 0xe4000003,
88 0x8c04, 0xffffffff, 0x40600060,
89 0x8c08, 0xffffffff, 0x001c001c,
90 0x8cf0, 0xffffffff, 0x08e00620,
91 0x8c20, 0xffffffff, 0x00800080,
92 0x8c24, 0xffffffff, 0x00800080,
93 0x8c18, 0xffffffff, 0x20202078,
94 0x8c1c, 0xffffffff, 0x00001010,
95 0x28350, 0xffffffff, 0x00000000,
96 0xa008, 0xffffffff, 0x00010000,
97 0x5cc, 0xffffffff, 0x00000001,
98 0x9508, 0xffffffff, 0x00000002,
99 0x913c, 0x0000000f, 0x0000000a
100};
101
102static const u32 evergreen_golden_registers2[] =
103{
104 0x2f4c, 0xffffffff, 0x00000000,
105 0x54f4, 0xffffffff, 0x00000000,
106 0x54f0, 0xffffffff, 0x00000000,
107 0x5498, 0xffffffff, 0x00000000,
108 0x549c, 0xffffffff, 0x00000000,
109 0x5494, 0xffffffff, 0x00000000,
110 0x53cc, 0xffffffff, 0x00000000,
111 0x53c8, 0xffffffff, 0x00000000,
112 0x53c4, 0xffffffff, 0x00000000,
113 0x53c0, 0xffffffff, 0x00000000,
114 0x53bc, 0xffffffff, 0x00000000,
115 0x53b8, 0xffffffff, 0x00000000,
116 0x53b4, 0xffffffff, 0x00000000,
117 0x53b0, 0xffffffff, 0x00000000
118};
119
120static const u32 cypress_mgcg_init[] =
121{
122 0x802c, 0xffffffff, 0xc0000000,
123 0x5448, 0xffffffff, 0x00000100,
124 0x55e4, 0xffffffff, 0x00000100,
125 0x160c, 0xffffffff, 0x00000100,
126 0x5644, 0xffffffff, 0x00000100,
127 0xc164, 0xffffffff, 0x00000100,
128 0x8a18, 0xffffffff, 0x00000100,
129 0x897c, 0xffffffff, 0x06000100,
130 0x8b28, 0xffffffff, 0x00000100,
131 0x9144, 0xffffffff, 0x00000100,
132 0x9a60, 0xffffffff, 0x00000100,
133 0x9868, 0xffffffff, 0x00000100,
134 0x8d58, 0xffffffff, 0x00000100,
135 0x9510, 0xffffffff, 0x00000100,
136 0x949c, 0xffffffff, 0x00000100,
137 0x9654, 0xffffffff, 0x00000100,
138 0x9030, 0xffffffff, 0x00000100,
139 0x9034, 0xffffffff, 0x00000100,
140 0x9038, 0xffffffff, 0x00000100,
141 0x903c, 0xffffffff, 0x00000100,
142 0x9040, 0xffffffff, 0x00000100,
143 0xa200, 0xffffffff, 0x00000100,
144 0xa204, 0xffffffff, 0x00000100,
145 0xa208, 0xffffffff, 0x00000100,
146 0xa20c, 0xffffffff, 0x00000100,
147 0x971c, 0xffffffff, 0x00000100,
148 0x977c, 0xffffffff, 0x00000100,
149 0x3f80, 0xffffffff, 0x00000100,
150 0xa210, 0xffffffff, 0x00000100,
151 0xa214, 0xffffffff, 0x00000100,
152 0x4d8, 0xffffffff, 0x00000100,
153 0x9784, 0xffffffff, 0x00000100,
154 0x9698, 0xffffffff, 0x00000100,
155 0x4d4, 0xffffffff, 0x00000200,
156 0x30cc, 0xffffffff, 0x00000100,
157 0xd0c0, 0xffffffff, 0xff000100,
158 0x802c, 0xffffffff, 0x40000000,
159 0x915c, 0xffffffff, 0x00010000,
160 0x9160, 0xffffffff, 0x00030002,
161 0x9178, 0xffffffff, 0x00070000,
162 0x917c, 0xffffffff, 0x00030002,
163 0x9180, 0xffffffff, 0x00050004,
164 0x918c, 0xffffffff, 0x00010006,
165 0x9190, 0xffffffff, 0x00090008,
166 0x9194, 0xffffffff, 0x00070000,
167 0x9198, 0xffffffff, 0x00030002,
168 0x919c, 0xffffffff, 0x00050004,
169 0x91a8, 0xffffffff, 0x00010006,
170 0x91ac, 0xffffffff, 0x00090008,
171 0x91b0, 0xffffffff, 0x00070000,
172 0x91b4, 0xffffffff, 0x00030002,
173 0x91b8, 0xffffffff, 0x00050004,
174 0x91c4, 0xffffffff, 0x00010006,
175 0x91c8, 0xffffffff, 0x00090008,
176 0x91cc, 0xffffffff, 0x00070000,
177 0x91d0, 0xffffffff, 0x00030002,
178 0x91d4, 0xffffffff, 0x00050004,
179 0x91e0, 0xffffffff, 0x00010006,
180 0x91e4, 0xffffffff, 0x00090008,
181 0x91e8, 0xffffffff, 0x00000000,
182 0x91ec, 0xffffffff, 0x00070000,
183 0x91f0, 0xffffffff, 0x00030002,
184 0x91f4, 0xffffffff, 0x00050004,
185 0x9200, 0xffffffff, 0x00010006,
186 0x9204, 0xffffffff, 0x00090008,
187 0x9208, 0xffffffff, 0x00070000,
188 0x920c, 0xffffffff, 0x00030002,
189 0x9210, 0xffffffff, 0x00050004,
190 0x921c, 0xffffffff, 0x00010006,
191 0x9220, 0xffffffff, 0x00090008,
192 0x9224, 0xffffffff, 0x00070000,
193 0x9228, 0xffffffff, 0x00030002,
194 0x922c, 0xffffffff, 0x00050004,
195 0x9238, 0xffffffff, 0x00010006,
196 0x923c, 0xffffffff, 0x00090008,
197 0x9240, 0xffffffff, 0x00070000,
198 0x9244, 0xffffffff, 0x00030002,
199 0x9248, 0xffffffff, 0x00050004,
200 0x9254, 0xffffffff, 0x00010006,
201 0x9258, 0xffffffff, 0x00090008,
202 0x925c, 0xffffffff, 0x00070000,
203 0x9260, 0xffffffff, 0x00030002,
204 0x9264, 0xffffffff, 0x00050004,
205 0x9270, 0xffffffff, 0x00010006,
206 0x9274, 0xffffffff, 0x00090008,
207 0x9278, 0xffffffff, 0x00070000,
208 0x927c, 0xffffffff, 0x00030002,
209 0x9280, 0xffffffff, 0x00050004,
210 0x928c, 0xffffffff, 0x00010006,
211 0x9290, 0xffffffff, 0x00090008,
212 0x9294, 0xffffffff, 0x00000000,
213 0x929c, 0xffffffff, 0x00000001,
214 0x802c, 0xffffffff, 0x40010000,
215 0x915c, 0xffffffff, 0x00010000,
216 0x9160, 0xffffffff, 0x00030002,
217 0x9178, 0xffffffff, 0x00070000,
218 0x917c, 0xffffffff, 0x00030002,
219 0x9180, 0xffffffff, 0x00050004,
220 0x918c, 0xffffffff, 0x00010006,
221 0x9190, 0xffffffff, 0x00090008,
222 0x9194, 0xffffffff, 0x00070000,
223 0x9198, 0xffffffff, 0x00030002,
224 0x919c, 0xffffffff, 0x00050004,
225 0x91a8, 0xffffffff, 0x00010006,
226 0x91ac, 0xffffffff, 0x00090008,
227 0x91b0, 0xffffffff, 0x00070000,
228 0x91b4, 0xffffffff, 0x00030002,
229 0x91b8, 0xffffffff, 0x00050004,
230 0x91c4, 0xffffffff, 0x00010006,
231 0x91c8, 0xffffffff, 0x00090008,
232 0x91cc, 0xffffffff, 0x00070000,
233 0x91d0, 0xffffffff, 0x00030002,
234 0x91d4, 0xffffffff, 0x00050004,
235 0x91e0, 0xffffffff, 0x00010006,
236 0x91e4, 0xffffffff, 0x00090008,
237 0x91e8, 0xffffffff, 0x00000000,
238 0x91ec, 0xffffffff, 0x00070000,
239 0x91f0, 0xffffffff, 0x00030002,
240 0x91f4, 0xffffffff, 0x00050004,
241 0x9200, 0xffffffff, 0x00010006,
242 0x9204, 0xffffffff, 0x00090008,
243 0x9208, 0xffffffff, 0x00070000,
244 0x920c, 0xffffffff, 0x00030002,
245 0x9210, 0xffffffff, 0x00050004,
246 0x921c, 0xffffffff, 0x00010006,
247 0x9220, 0xffffffff, 0x00090008,
248 0x9224, 0xffffffff, 0x00070000,
249 0x9228, 0xffffffff, 0x00030002,
250 0x922c, 0xffffffff, 0x00050004,
251 0x9238, 0xffffffff, 0x00010006,
252 0x923c, 0xffffffff, 0x00090008,
253 0x9240, 0xffffffff, 0x00070000,
254 0x9244, 0xffffffff, 0x00030002,
255 0x9248, 0xffffffff, 0x00050004,
256 0x9254, 0xffffffff, 0x00010006,
257 0x9258, 0xffffffff, 0x00090008,
258 0x925c, 0xffffffff, 0x00070000,
259 0x9260, 0xffffffff, 0x00030002,
260 0x9264, 0xffffffff, 0x00050004,
261 0x9270, 0xffffffff, 0x00010006,
262 0x9274, 0xffffffff, 0x00090008,
263 0x9278, 0xffffffff, 0x00070000,
264 0x927c, 0xffffffff, 0x00030002,
265 0x9280, 0xffffffff, 0x00050004,
266 0x928c, 0xffffffff, 0x00010006,
267 0x9290, 0xffffffff, 0x00090008,
268 0x9294, 0xffffffff, 0x00000000,
269 0x929c, 0xffffffff, 0x00000001,
270 0x802c, 0xffffffff, 0xc0000000
271};
272
273static const u32 redwood_mgcg_init[] =
274{
275 0x802c, 0xffffffff, 0xc0000000,
276 0x5448, 0xffffffff, 0x00000100,
277 0x55e4, 0xffffffff, 0x00000100,
278 0x160c, 0xffffffff, 0x00000100,
279 0x5644, 0xffffffff, 0x00000100,
280 0xc164, 0xffffffff, 0x00000100,
281 0x8a18, 0xffffffff, 0x00000100,
282 0x897c, 0xffffffff, 0x06000100,
283 0x8b28, 0xffffffff, 0x00000100,
284 0x9144, 0xffffffff, 0x00000100,
285 0x9a60, 0xffffffff, 0x00000100,
286 0x9868, 0xffffffff, 0x00000100,
287 0x8d58, 0xffffffff, 0x00000100,
288 0x9510, 0xffffffff, 0x00000100,
289 0x949c, 0xffffffff, 0x00000100,
290 0x9654, 0xffffffff, 0x00000100,
291 0x9030, 0xffffffff, 0x00000100,
292 0x9034, 0xffffffff, 0x00000100,
293 0x9038, 0xffffffff, 0x00000100,
294 0x903c, 0xffffffff, 0x00000100,
295 0x9040, 0xffffffff, 0x00000100,
296 0xa200, 0xffffffff, 0x00000100,
297 0xa204, 0xffffffff, 0x00000100,
298 0xa208, 0xffffffff, 0x00000100,
299 0xa20c, 0xffffffff, 0x00000100,
300 0x971c, 0xffffffff, 0x00000100,
301 0x977c, 0xffffffff, 0x00000100,
302 0x3f80, 0xffffffff, 0x00000100,
303 0xa210, 0xffffffff, 0x00000100,
304 0xa214, 0xffffffff, 0x00000100,
305 0x4d8, 0xffffffff, 0x00000100,
306 0x9784, 0xffffffff, 0x00000100,
307 0x9698, 0xffffffff, 0x00000100,
308 0x4d4, 0xffffffff, 0x00000200,
309 0x30cc, 0xffffffff, 0x00000100,
310 0xd0c0, 0xffffffff, 0xff000100,
311 0x802c, 0xffffffff, 0x40000000,
312 0x915c, 0xffffffff, 0x00010000,
313 0x9160, 0xffffffff, 0x00030002,
314 0x9178, 0xffffffff, 0x00070000,
315 0x917c, 0xffffffff, 0x00030002,
316 0x9180, 0xffffffff, 0x00050004,
317 0x918c, 0xffffffff, 0x00010006,
318 0x9190, 0xffffffff, 0x00090008,
319 0x9194, 0xffffffff, 0x00070000,
320 0x9198, 0xffffffff, 0x00030002,
321 0x919c, 0xffffffff, 0x00050004,
322 0x91a8, 0xffffffff, 0x00010006,
323 0x91ac, 0xffffffff, 0x00090008,
324 0x91b0, 0xffffffff, 0x00070000,
325 0x91b4, 0xffffffff, 0x00030002,
326 0x91b8, 0xffffffff, 0x00050004,
327 0x91c4, 0xffffffff, 0x00010006,
328 0x91c8, 0xffffffff, 0x00090008,
329 0x91cc, 0xffffffff, 0x00070000,
330 0x91d0, 0xffffffff, 0x00030002,
331 0x91d4, 0xffffffff, 0x00050004,
332 0x91e0, 0xffffffff, 0x00010006,
333 0x91e4, 0xffffffff, 0x00090008,
334 0x91e8, 0xffffffff, 0x00000000,
335 0x91ec, 0xffffffff, 0x00070000,
336 0x91f0, 0xffffffff, 0x00030002,
337 0x91f4, 0xffffffff, 0x00050004,
338 0x9200, 0xffffffff, 0x00010006,
339 0x9204, 0xffffffff, 0x00090008,
340 0x9294, 0xffffffff, 0x00000000,
341 0x929c, 0xffffffff, 0x00000001,
342 0x802c, 0xffffffff, 0xc0000000
343};
344
345static const u32 cedar_golden_registers[] =
346{
347 0x3f90, 0xffff0000, 0xff000000,
348 0x9148, 0xffff0000, 0xff000000,
349 0x3f94, 0xffff0000, 0xff000000,
350 0x914c, 0xffff0000, 0xff000000,
351 0x9b7c, 0xffffffff, 0x00000000,
352 0x8a14, 0xffffffff, 0x00000007,
353 0x8b10, 0xffffffff, 0x00000000,
354 0x960c, 0xffffffff, 0x54763210,
355 0x88c4, 0xffffffff, 0x000000c2,
356 0x88d4, 0xffffffff, 0x00000000,
357 0x8974, 0xffffffff, 0x00000000,
358 0xc78, 0x00000080, 0x00000080,
359 0x5eb4, 0xffffffff, 0x00000002,
360 0x5e78, 0xffffffff, 0x001000f0,
361 0x6104, 0x01000300, 0x00000000,
362 0x5bc0, 0x00300000, 0x00000000,
363 0x7030, 0xffffffff, 0x00000011,
364 0x7c30, 0xffffffff, 0x00000011,
365 0x10830, 0xffffffff, 0x00000011,
366 0x11430, 0xffffffff, 0x00000011,
367 0xd02c, 0xffffffff, 0x08421000,
368 0x240c, 0xffffffff, 0x00000380,
369 0x8b24, 0xffffffff, 0x00ff0fff,
370 0x28a4c, 0x06000000, 0x06000000,
371 0x10c, 0x00000001, 0x00000001,
372 0x8d00, 0xffffffff, 0x100e4848,
373 0x8d04, 0xffffffff, 0x00164745,
374 0x8c00, 0xffffffff, 0xe4000003,
375 0x8c04, 0xffffffff, 0x40600060,
376 0x8c08, 0xffffffff, 0x001c001c,
377 0x8cf0, 0xffffffff, 0x08e00410,
378 0x8c20, 0xffffffff, 0x00800080,
379 0x8c24, 0xffffffff, 0x00800080,
380 0x8c18, 0xffffffff, 0x20202078,
381 0x8c1c, 0xffffffff, 0x00001010,
382 0x28350, 0xffffffff, 0x00000000,
383 0xa008, 0xffffffff, 0x00010000,
384 0x5cc, 0xffffffff, 0x00000001,
385 0x9508, 0xffffffff, 0x00000002
386};
387
388static const u32 cedar_mgcg_init[] =
389{
390 0x802c, 0xffffffff, 0xc0000000,
391 0x5448, 0xffffffff, 0x00000100,
392 0x55e4, 0xffffffff, 0x00000100,
393 0x160c, 0xffffffff, 0x00000100,
394 0x5644, 0xffffffff, 0x00000100,
395 0xc164, 0xffffffff, 0x00000100,
396 0x8a18, 0xffffffff, 0x00000100,
397 0x897c, 0xffffffff, 0x06000100,
398 0x8b28, 0xffffffff, 0x00000100,
399 0x9144, 0xffffffff, 0x00000100,
400 0x9a60, 0xffffffff, 0x00000100,
401 0x9868, 0xffffffff, 0x00000100,
402 0x8d58, 0xffffffff, 0x00000100,
403 0x9510, 0xffffffff, 0x00000100,
404 0x949c, 0xffffffff, 0x00000100,
405 0x9654, 0xffffffff, 0x00000100,
406 0x9030, 0xffffffff, 0x00000100,
407 0x9034, 0xffffffff, 0x00000100,
408 0x9038, 0xffffffff, 0x00000100,
409 0x903c, 0xffffffff, 0x00000100,
410 0x9040, 0xffffffff, 0x00000100,
411 0xa200, 0xffffffff, 0x00000100,
412 0xa204, 0xffffffff, 0x00000100,
413 0xa208, 0xffffffff, 0x00000100,
414 0xa20c, 0xffffffff, 0x00000100,
415 0x971c, 0xffffffff, 0x00000100,
416 0x977c, 0xffffffff, 0x00000100,
417 0x3f80, 0xffffffff, 0x00000100,
418 0xa210, 0xffffffff, 0x00000100,
419 0xa214, 0xffffffff, 0x00000100,
420 0x4d8, 0xffffffff, 0x00000100,
421 0x9784, 0xffffffff, 0x00000100,
422 0x9698, 0xffffffff, 0x00000100,
423 0x4d4, 0xffffffff, 0x00000200,
424 0x30cc, 0xffffffff, 0x00000100,
425 0xd0c0, 0xffffffff, 0xff000100,
426 0x802c, 0xffffffff, 0x40000000,
427 0x915c, 0xffffffff, 0x00010000,
428 0x9178, 0xffffffff, 0x00050000,
429 0x917c, 0xffffffff, 0x00030002,
430 0x918c, 0xffffffff, 0x00010004,
431 0x9190, 0xffffffff, 0x00070006,
432 0x9194, 0xffffffff, 0x00050000,
433 0x9198, 0xffffffff, 0x00030002,
434 0x91a8, 0xffffffff, 0x00010004,
435 0x91ac, 0xffffffff, 0x00070006,
436 0x91e8, 0xffffffff, 0x00000000,
437 0x9294, 0xffffffff, 0x00000000,
438 0x929c, 0xffffffff, 0x00000001,
439 0x802c, 0xffffffff, 0xc0000000
440};
441
442static const u32 juniper_mgcg_init[] =
443{
444 0x802c, 0xffffffff, 0xc0000000,
445 0x5448, 0xffffffff, 0x00000100,
446 0x55e4, 0xffffffff, 0x00000100,
447 0x160c, 0xffffffff, 0x00000100,
448 0x5644, 0xffffffff, 0x00000100,
449 0xc164, 0xffffffff, 0x00000100,
450 0x8a18, 0xffffffff, 0x00000100,
451 0x897c, 0xffffffff, 0x06000100,
452 0x8b28, 0xffffffff, 0x00000100,
453 0x9144, 0xffffffff, 0x00000100,
454 0x9a60, 0xffffffff, 0x00000100,
455 0x9868, 0xffffffff, 0x00000100,
456 0x8d58, 0xffffffff, 0x00000100,
457 0x9510, 0xffffffff, 0x00000100,
458 0x949c, 0xffffffff, 0x00000100,
459 0x9654, 0xffffffff, 0x00000100,
460 0x9030, 0xffffffff, 0x00000100,
461 0x9034, 0xffffffff, 0x00000100,
462 0x9038, 0xffffffff, 0x00000100,
463 0x903c, 0xffffffff, 0x00000100,
464 0x9040, 0xffffffff, 0x00000100,
465 0xa200, 0xffffffff, 0x00000100,
466 0xa204, 0xffffffff, 0x00000100,
467 0xa208, 0xffffffff, 0x00000100,
468 0xa20c, 0xffffffff, 0x00000100,
469 0x971c, 0xffffffff, 0x00000100,
470 0xd0c0, 0xffffffff, 0xff000100,
471 0x802c, 0xffffffff, 0x40000000,
472 0x915c, 0xffffffff, 0x00010000,
473 0x9160, 0xffffffff, 0x00030002,
474 0x9178, 0xffffffff, 0x00070000,
475 0x917c, 0xffffffff, 0x00030002,
476 0x9180, 0xffffffff, 0x00050004,
477 0x918c, 0xffffffff, 0x00010006,
478 0x9190, 0xffffffff, 0x00090008,
479 0x9194, 0xffffffff, 0x00070000,
480 0x9198, 0xffffffff, 0x00030002,
481 0x919c, 0xffffffff, 0x00050004,
482 0x91a8, 0xffffffff, 0x00010006,
483 0x91ac, 0xffffffff, 0x00090008,
484 0x91b0, 0xffffffff, 0x00070000,
485 0x91b4, 0xffffffff, 0x00030002,
486 0x91b8, 0xffffffff, 0x00050004,
487 0x91c4, 0xffffffff, 0x00010006,
488 0x91c8, 0xffffffff, 0x00090008,
489 0x91cc, 0xffffffff, 0x00070000,
490 0x91d0, 0xffffffff, 0x00030002,
491 0x91d4, 0xffffffff, 0x00050004,
492 0x91e0, 0xffffffff, 0x00010006,
493 0x91e4, 0xffffffff, 0x00090008,
494 0x91e8, 0xffffffff, 0x00000000,
495 0x91ec, 0xffffffff, 0x00070000,
496 0x91f0, 0xffffffff, 0x00030002,
497 0x91f4, 0xffffffff, 0x00050004,
498 0x9200, 0xffffffff, 0x00010006,
499 0x9204, 0xffffffff, 0x00090008,
500 0x9208, 0xffffffff, 0x00070000,
501 0x920c, 0xffffffff, 0x00030002,
502 0x9210, 0xffffffff, 0x00050004,
503 0x921c, 0xffffffff, 0x00010006,
504 0x9220, 0xffffffff, 0x00090008,
505 0x9224, 0xffffffff, 0x00070000,
506 0x9228, 0xffffffff, 0x00030002,
507 0x922c, 0xffffffff, 0x00050004,
508 0x9238, 0xffffffff, 0x00010006,
509 0x923c, 0xffffffff, 0x00090008,
510 0x9240, 0xffffffff, 0x00070000,
511 0x9244, 0xffffffff, 0x00030002,
512 0x9248, 0xffffffff, 0x00050004,
513 0x9254, 0xffffffff, 0x00010006,
514 0x9258, 0xffffffff, 0x00090008,
515 0x925c, 0xffffffff, 0x00070000,
516 0x9260, 0xffffffff, 0x00030002,
517 0x9264, 0xffffffff, 0x00050004,
518 0x9270, 0xffffffff, 0x00010006,
519 0x9274, 0xffffffff, 0x00090008,
520 0x9278, 0xffffffff, 0x00070000,
521 0x927c, 0xffffffff, 0x00030002,
522 0x9280, 0xffffffff, 0x00050004,
523 0x928c, 0xffffffff, 0x00010006,
524 0x9290, 0xffffffff, 0x00090008,
525 0x9294, 0xffffffff, 0x00000000,
526 0x929c, 0xffffffff, 0x00000001,
527 0x802c, 0xffffffff, 0xc0000000,
528 0x977c, 0xffffffff, 0x00000100,
529 0x3f80, 0xffffffff, 0x00000100,
530 0xa210, 0xffffffff, 0x00000100,
531 0xa214, 0xffffffff, 0x00000100,
532 0x4d8, 0xffffffff, 0x00000100,
533 0x9784, 0xffffffff, 0x00000100,
534 0x9698, 0xffffffff, 0x00000100,
535 0x4d4, 0xffffffff, 0x00000200,
536 0x30cc, 0xffffffff, 0x00000100,
537 0x802c, 0xffffffff, 0xc0000000
538};
539
540static const u32 supersumo_golden_registers[] =
541{
542 0x5eb4, 0xffffffff, 0x00000002,
543 0x5cc, 0xffffffff, 0x00000001,
544 0x7030, 0xffffffff, 0x00000011,
545 0x7c30, 0xffffffff, 0x00000011,
546 0x6104, 0x01000300, 0x00000000,
547 0x5bc0, 0x00300000, 0x00000000,
548 0x8c04, 0xffffffff, 0x40600060,
549 0x8c08, 0xffffffff, 0x001c001c,
550 0x8c20, 0xffffffff, 0x00800080,
551 0x8c24, 0xffffffff, 0x00800080,
552 0x8c18, 0xffffffff, 0x20202078,
553 0x8c1c, 0xffffffff, 0x00001010,
554 0x918c, 0xffffffff, 0x00010006,
555 0x91a8, 0xffffffff, 0x00010006,
556 0x91c4, 0xffffffff, 0x00010006,
557 0x91e0, 0xffffffff, 0x00010006,
558 0x9200, 0xffffffff, 0x00010006,
559 0x9150, 0xffffffff, 0x6e944040,
560 0x917c, 0xffffffff, 0x00030002,
561 0x9180, 0xffffffff, 0x00050004,
562 0x9198, 0xffffffff, 0x00030002,
563 0x919c, 0xffffffff, 0x00050004,
564 0x91b4, 0xffffffff, 0x00030002,
565 0x91b8, 0xffffffff, 0x00050004,
566 0x91d0, 0xffffffff, 0x00030002,
567 0x91d4, 0xffffffff, 0x00050004,
568 0x91f0, 0xffffffff, 0x00030002,
569 0x91f4, 0xffffffff, 0x00050004,
570 0x915c, 0xffffffff, 0x00010000,
571 0x9160, 0xffffffff, 0x00030002,
572 0x3f90, 0xffff0000, 0xff000000,
573 0x9178, 0xffffffff, 0x00070000,
574 0x9194, 0xffffffff, 0x00070000,
575 0x91b0, 0xffffffff, 0x00070000,
576 0x91cc, 0xffffffff, 0x00070000,
577 0x91ec, 0xffffffff, 0x00070000,
578 0x9148, 0xffff0000, 0xff000000,
579 0x9190, 0xffffffff, 0x00090008,
580 0x91ac, 0xffffffff, 0x00090008,
581 0x91c8, 0xffffffff, 0x00090008,
582 0x91e4, 0xffffffff, 0x00090008,
583 0x9204, 0xffffffff, 0x00090008,
584 0x3f94, 0xffff0000, 0xff000000,
585 0x914c, 0xffff0000, 0xff000000,
586 0x929c, 0xffffffff, 0x00000001,
587 0x8a18, 0xffffffff, 0x00000100,
588 0x8b28, 0xffffffff, 0x00000100,
589 0x9144, 0xffffffff, 0x00000100,
590 0x5644, 0xffffffff, 0x00000100,
591 0x9b7c, 0xffffffff, 0x00000000,
592 0x8030, 0xffffffff, 0x0000100a,
593 0x8a14, 0xffffffff, 0x00000007,
594 0x8b24, 0xffffffff, 0x00ff0fff,
595 0x8b10, 0xffffffff, 0x00000000,
596 0x28a4c, 0x06000000, 0x06000000,
597 0x4d8, 0xffffffff, 0x00000100,
598 0x913c, 0xffff000f, 0x0100000a,
599 0x960c, 0xffffffff, 0x54763210,
600 0x88c4, 0xffffffff, 0x000000c2,
601 0x88d4, 0xffffffff, 0x00000010,
602 0x8974, 0xffffffff, 0x00000000,
603 0xc78, 0x00000080, 0x00000080,
604 0x5e78, 0xffffffff, 0x001000f0,
605 0xd02c, 0xffffffff, 0x08421000,
606 0xa008, 0xffffffff, 0x00010000,
607 0x8d00, 0xffffffff, 0x100e4848,
608 0x8d04, 0xffffffff, 0x00164745,
609 0x8c00, 0xffffffff, 0xe4000003,
610 0x8cf0, 0x1fffffff, 0x08e00620,
611 0x28350, 0xffffffff, 0x00000000,
612 0x9508, 0xffffffff, 0x00000002
613};
614
615static const u32 sumo_golden_registers[] =
616{
617 0x900c, 0x00ffffff, 0x0017071f,
618 0x8c18, 0xffffffff, 0x10101060,
619 0x8c1c, 0xffffffff, 0x00001010,
620 0x8c30, 0x0000000f, 0x00000005,
621 0x9688, 0x0000000f, 0x00000007
622};
623
624static const u32 wrestler_golden_registers[] =
625{
626 0x5eb4, 0xffffffff, 0x00000002,
627 0x5cc, 0xffffffff, 0x00000001,
628 0x7030, 0xffffffff, 0x00000011,
629 0x7c30, 0xffffffff, 0x00000011,
630 0x6104, 0x01000300, 0x00000000,
631 0x5bc0, 0x00300000, 0x00000000,
632 0x918c, 0xffffffff, 0x00010006,
633 0x91a8, 0xffffffff, 0x00010006,
634 0x9150, 0xffffffff, 0x6e944040,
635 0x917c, 0xffffffff, 0x00030002,
636 0x9198, 0xffffffff, 0x00030002,
637 0x915c, 0xffffffff, 0x00010000,
638 0x3f90, 0xffff0000, 0xff000000,
639 0x9178, 0xffffffff, 0x00070000,
640 0x9194, 0xffffffff, 0x00070000,
641 0x9148, 0xffff0000, 0xff000000,
642 0x9190, 0xffffffff, 0x00090008,
643 0x91ac, 0xffffffff, 0x00090008,
644 0x3f94, 0xffff0000, 0xff000000,
645 0x914c, 0xffff0000, 0xff000000,
646 0x929c, 0xffffffff, 0x00000001,
647 0x8a18, 0xffffffff, 0x00000100,
648 0x8b28, 0xffffffff, 0x00000100,
649 0x9144, 0xffffffff, 0x00000100,
650 0x9b7c, 0xffffffff, 0x00000000,
651 0x8030, 0xffffffff, 0x0000100a,
652 0x8a14, 0xffffffff, 0x00000001,
653 0x8b24, 0xffffffff, 0x00ff0fff,
654 0x8b10, 0xffffffff, 0x00000000,
655 0x28a4c, 0x06000000, 0x06000000,
656 0x4d8, 0xffffffff, 0x00000100,
657 0x913c, 0xffff000f, 0x0100000a,
658 0x960c, 0xffffffff, 0x54763210,
659 0x88c4, 0xffffffff, 0x000000c2,
660 0x88d4, 0xffffffff, 0x00000010,
661 0x8974, 0xffffffff, 0x00000000,
662 0xc78, 0x00000080, 0x00000080,
663 0x5e78, 0xffffffff, 0x001000f0,
664 0xd02c, 0xffffffff, 0x08421000,
665 0xa008, 0xffffffff, 0x00010000,
666 0x8d00, 0xffffffff, 0x100e4848,
667 0x8d04, 0xffffffff, 0x00164745,
668 0x8c00, 0xffffffff, 0xe4000003,
669 0x8cf0, 0x1fffffff, 0x08e00410,
670 0x28350, 0xffffffff, 0x00000000,
671 0x9508, 0xffffffff, 0x00000002,
672 0x900c, 0xffffffff, 0x0017071f,
673 0x8c18, 0xffffffff, 0x10101060,
674 0x8c1c, 0xffffffff, 0x00001010
675};
676
677static const u32 barts_golden_registers[] =
678{
679 0x5eb4, 0xffffffff, 0x00000002,
680 0x5e78, 0x8f311ff1, 0x001000f0,
681 0x3f90, 0xffff0000, 0xff000000,
682 0x9148, 0xffff0000, 0xff000000,
683 0x3f94, 0xffff0000, 0xff000000,
684 0x914c, 0xffff0000, 0xff000000,
685 0xc78, 0x00000080, 0x00000080,
686 0xbd4, 0x70073777, 0x00010001,
687 0xd02c, 0xbfffff1f, 0x08421000,
688 0xd0b8, 0x03773777, 0x02011003,
689 0x5bc0, 0x00200000, 0x50100000,
690 0x98f8, 0x33773777, 0x02011003,
691 0x98fc, 0xffffffff, 0x76543210,
692 0x7030, 0x31000311, 0x00000011,
693 0x2f48, 0x00000007, 0x02011003,
694 0x6b28, 0x00000010, 0x00000012,
695 0x7728, 0x00000010, 0x00000012,
696 0x10328, 0x00000010, 0x00000012,
697 0x10f28, 0x00000010, 0x00000012,
698 0x11b28, 0x00000010, 0x00000012,
699 0x12728, 0x00000010, 0x00000012,
700 0x240c, 0x000007ff, 0x00000380,
701 0x8a14, 0xf000001f, 0x00000007,
702 0x8b24, 0x3fff3fff, 0x00ff0fff,
703 0x8b10, 0x0000ff0f, 0x00000000,
704 0x28a4c, 0x07ffffff, 0x06000000,
705 0x10c, 0x00000001, 0x00010003,
706 0xa02c, 0xffffffff, 0x0000009b,
707 0x913c, 0x0000000f, 0x0100000a,
708 0x8d00, 0xffff7f7f, 0x100e4848,
709 0x8d04, 0x00ffffff, 0x00164745,
710 0x8c00, 0xfffc0003, 0xe4000003,
711 0x8c04, 0xf8ff00ff, 0x40600060,
712 0x8c08, 0x00ff00ff, 0x001c001c,
713 0x8cf0, 0x1fff1fff, 0x08e00620,
714 0x8c20, 0x0fff0fff, 0x00800080,
715 0x8c24, 0x0fff0fff, 0x00800080,
716 0x8c18, 0xffffffff, 0x20202078,
717 0x8c1c, 0x0000ffff, 0x00001010,
718 0x28350, 0x00000f01, 0x00000000,
719 0x9508, 0x3700001f, 0x00000002,
720 0x960c, 0xffffffff, 0x54763210,
721 0x88c4, 0x001f3ae3, 0x000000c2,
722 0x88d4, 0x0000001f, 0x00000010,
723 0x8974, 0xffffffff, 0x00000000
724};
725
726static const u32 turks_golden_registers[] =
727{
728 0x5eb4, 0xffffffff, 0x00000002,
729 0x5e78, 0x8f311ff1, 0x001000f0,
730 0x8c8, 0x00003000, 0x00001070,
731 0x8cc, 0x000fffff, 0x00040035,
732 0x3f90, 0xffff0000, 0xfff00000,
733 0x9148, 0xffff0000, 0xfff00000,
734 0x3f94, 0xffff0000, 0xfff00000,
735 0x914c, 0xffff0000, 0xfff00000,
736 0xc78, 0x00000080, 0x00000080,
737 0xbd4, 0x00073007, 0x00010002,
738 0xd02c, 0xbfffff1f, 0x08421000,
739 0xd0b8, 0x03773777, 0x02010002,
740 0x5bc0, 0x00200000, 0x50100000,
741 0x98f8, 0x33773777, 0x00010002,
742 0x98fc, 0xffffffff, 0x33221100,
743 0x7030, 0x31000311, 0x00000011,
744 0x2f48, 0x33773777, 0x00010002,
745 0x6b28, 0x00000010, 0x00000012,
746 0x7728, 0x00000010, 0x00000012,
747 0x10328, 0x00000010, 0x00000012,
748 0x10f28, 0x00000010, 0x00000012,
749 0x11b28, 0x00000010, 0x00000012,
750 0x12728, 0x00000010, 0x00000012,
751 0x240c, 0x000007ff, 0x00000380,
752 0x8a14, 0xf000001f, 0x00000007,
753 0x8b24, 0x3fff3fff, 0x00ff0fff,
754 0x8b10, 0x0000ff0f, 0x00000000,
755 0x28a4c, 0x07ffffff, 0x06000000,
756 0x10c, 0x00000001, 0x00010003,
757 0xa02c, 0xffffffff, 0x0000009b,
758 0x913c, 0x0000000f, 0x0100000a,
759 0x8d00, 0xffff7f7f, 0x100e4848,
760 0x8d04, 0x00ffffff, 0x00164745,
761 0x8c00, 0xfffc0003, 0xe4000003,
762 0x8c04, 0xf8ff00ff, 0x40600060,
763 0x8c08, 0x00ff00ff, 0x001c001c,
764 0x8cf0, 0x1fff1fff, 0x08e00410,
765 0x8c20, 0x0fff0fff, 0x00800080,
766 0x8c24, 0x0fff0fff, 0x00800080,
767 0x8c18, 0xffffffff, 0x20202078,
768 0x8c1c, 0x0000ffff, 0x00001010,
769 0x28350, 0x00000f01, 0x00000000,
770 0x9508, 0x3700001f, 0x00000002,
771 0x960c, 0xffffffff, 0x54763210,
772 0x88c4, 0x001f3ae3, 0x000000c2,
773 0x88d4, 0x0000001f, 0x00000010,
774 0x8974, 0xffffffff, 0x00000000
775};
776
777static const u32 caicos_golden_registers[] =
778{
779 0x5eb4, 0xffffffff, 0x00000002,
780 0x5e78, 0x8f311ff1, 0x001000f0,
781 0x8c8, 0x00003420, 0x00001450,
782 0x8cc, 0x000fffff, 0x00040035,
783 0x3f90, 0xffff0000, 0xfffc0000,
784 0x9148, 0xffff0000, 0xfffc0000,
785 0x3f94, 0xffff0000, 0xfffc0000,
786 0x914c, 0xffff0000, 0xfffc0000,
787 0xc78, 0x00000080, 0x00000080,
788 0xbd4, 0x00073007, 0x00010001,
789 0xd02c, 0xbfffff1f, 0x08421000,
790 0xd0b8, 0x03773777, 0x02010001,
791 0x5bc0, 0x00200000, 0x50100000,
792 0x98f8, 0x33773777, 0x02010001,
793 0x98fc, 0xffffffff, 0x33221100,
794 0x7030, 0x31000311, 0x00000011,
795 0x2f48, 0x33773777, 0x02010001,
796 0x6b28, 0x00000010, 0x00000012,
797 0x7728, 0x00000010, 0x00000012,
798 0x10328, 0x00000010, 0x00000012,
799 0x10f28, 0x00000010, 0x00000012,
800 0x11b28, 0x00000010, 0x00000012,
801 0x12728, 0x00000010, 0x00000012,
802 0x240c, 0x000007ff, 0x00000380,
803 0x8a14, 0xf000001f, 0x00000001,
804 0x8b24, 0x3fff3fff, 0x00ff0fff,
805 0x8b10, 0x0000ff0f, 0x00000000,
806 0x28a4c, 0x07ffffff, 0x06000000,
807 0x10c, 0x00000001, 0x00010003,
808 0xa02c, 0xffffffff, 0x0000009b,
809 0x913c, 0x0000000f, 0x0100000a,
810 0x8d00, 0xffff7f7f, 0x100e4848,
811 0x8d04, 0x00ffffff, 0x00164745,
812 0x8c00, 0xfffc0003, 0xe4000003,
813 0x8c04, 0xf8ff00ff, 0x40600060,
814 0x8c08, 0x00ff00ff, 0x001c001c,
815 0x8cf0, 0x1fff1fff, 0x08e00410,
816 0x8c20, 0x0fff0fff, 0x00800080,
817 0x8c24, 0x0fff0fff, 0x00800080,
818 0x8c18, 0xffffffff, 0x20202078,
819 0x8c1c, 0x0000ffff, 0x00001010,
820 0x28350, 0x00000f01, 0x00000000,
821 0x9508, 0x3700001f, 0x00000002,
822 0x960c, 0xffffffff, 0x54763210,
823 0x88c4, 0x001f3ae3, 0x000000c2,
824 0x88d4, 0x0000001f, 0x00000010,
825 0x8974, 0xffffffff, 0x00000000
826};
827
828static void evergreen_init_golden_registers(struct radeon_device *rdev)
829{
830 switch (rdev->family) {
831 case CHIP_CYPRESS:
832 case CHIP_HEMLOCK:
833 radeon_program_register_sequence(rdev,
834 evergreen_golden_registers,
835 (const u32)ARRAY_SIZE(evergreen_golden_registers));
836 radeon_program_register_sequence(rdev,
837 evergreen_golden_registers2,
838 (const u32)ARRAY_SIZE(evergreen_golden_registers2));
839 radeon_program_register_sequence(rdev,
840 cypress_mgcg_init,
841 (const u32)ARRAY_SIZE(cypress_mgcg_init));
842 break;
843 case CHIP_JUNIPER:
844 radeon_program_register_sequence(rdev,
845 evergreen_golden_registers,
846 (const u32)ARRAY_SIZE(evergreen_golden_registers));
847 radeon_program_register_sequence(rdev,
848 evergreen_golden_registers2,
849 (const u32)ARRAY_SIZE(evergreen_golden_registers2));
850 radeon_program_register_sequence(rdev,
851 juniper_mgcg_init,
852 (const u32)ARRAY_SIZE(juniper_mgcg_init));
853 break;
854 case CHIP_REDWOOD:
855 radeon_program_register_sequence(rdev,
856 evergreen_golden_registers,
857 (const u32)ARRAY_SIZE(evergreen_golden_registers));
858 radeon_program_register_sequence(rdev,
859 evergreen_golden_registers2,
860 (const u32)ARRAY_SIZE(evergreen_golden_registers2));
861 radeon_program_register_sequence(rdev,
862 redwood_mgcg_init,
863 (const u32)ARRAY_SIZE(redwood_mgcg_init));
864 break;
865 case CHIP_CEDAR:
866 radeon_program_register_sequence(rdev,
867 cedar_golden_registers,
868 (const u32)ARRAY_SIZE(cedar_golden_registers));
869 radeon_program_register_sequence(rdev,
870 evergreen_golden_registers2,
871 (const u32)ARRAY_SIZE(evergreen_golden_registers2));
872 radeon_program_register_sequence(rdev,
873 cedar_mgcg_init,
874 (const u32)ARRAY_SIZE(cedar_mgcg_init));
875 break;
876 case CHIP_PALM:
877 radeon_program_register_sequence(rdev,
878 wrestler_golden_registers,
879 (const u32)ARRAY_SIZE(wrestler_golden_registers));
880 break;
881 case CHIP_SUMO:
882 radeon_program_register_sequence(rdev,
883 supersumo_golden_registers,
884 (const u32)ARRAY_SIZE(supersumo_golden_registers));
885 break;
886 case CHIP_SUMO2:
887 radeon_program_register_sequence(rdev,
888 supersumo_golden_registers,
889 (const u32)ARRAY_SIZE(supersumo_golden_registers));
890 radeon_program_register_sequence(rdev,
891 sumo_golden_registers,
892 (const u32)ARRAY_SIZE(sumo_golden_registers));
893 break;
894 case CHIP_BARTS:
895 radeon_program_register_sequence(rdev,
896 barts_golden_registers,
897 (const u32)ARRAY_SIZE(barts_golden_registers));
898 break;
899 case CHIP_TURKS:
900 radeon_program_register_sequence(rdev,
901 turks_golden_registers,
902 (const u32)ARRAY_SIZE(turks_golden_registers));
903 break;
904 case CHIP_CAICOS:
905 radeon_program_register_sequence(rdev,
906 caicos_golden_registers,
907 (const u32)ARRAY_SIZE(caicos_golden_registers));
908 break;
909 default:
910 break;
911 }
912}
913
56void evergreen_tiling_fields(unsigned tiling_flags, unsigned *bankw, 914void evergreen_tiling_fields(unsigned tiling_flags, unsigned *bankw,
57 unsigned *bankh, unsigned *mtaspect, 915 unsigned *bankh, unsigned *mtaspect,
58 unsigned *tile_split) 916 unsigned *tile_split)
@@ -84,6 +942,142 @@ void evergreen_tiling_fields(unsigned tiling_flags, unsigned *bankw,
84 } 942 }
85} 943}
86 944
945static int sumo_set_uvd_clock(struct radeon_device *rdev, u32 clock,
946 u32 cntl_reg, u32 status_reg)
947{
948 int r, i;
949 struct atom_clock_dividers dividers;
950
951 r = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
952 clock, false, &dividers);
953 if (r)
954 return r;
955
956 WREG32_P(cntl_reg, dividers.post_div, ~(DCLK_DIR_CNTL_EN|DCLK_DIVIDER_MASK));
957
958 for (i = 0; i < 100; i++) {
959 if (RREG32(status_reg) & DCLK_STATUS)
960 break;
961 mdelay(10);
962 }
963 if (i == 100)
964 return -ETIMEDOUT;
965
966 return 0;
967}
968
969int sumo_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
970{
971 int r = 0;
972 u32 cg_scratch = RREG32(CG_SCRATCH1);
973
974 r = sumo_set_uvd_clock(rdev, vclk, CG_VCLK_CNTL, CG_VCLK_STATUS);
975 if (r)
976 goto done;
977 cg_scratch &= 0xffff0000;
978 cg_scratch |= vclk / 100; /* Mhz */
979
980 r = sumo_set_uvd_clock(rdev, dclk, CG_DCLK_CNTL, CG_DCLK_STATUS);
981 if (r)
982 goto done;
983 cg_scratch &= 0x0000ffff;
984 cg_scratch |= (dclk / 100) << 16; /* Mhz */
985
986done:
987 WREG32(CG_SCRATCH1, cg_scratch);
988
989 return r;
990}
991
992int evergreen_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
993{
994 /* start off with something large */
995 unsigned fb_div = 0, vclk_div = 0, dclk_div = 0;
996 int r;
997
998 /* bypass vclk and dclk with bclk */
999 WREG32_P(CG_UPLL_FUNC_CNTL_2,
1000 VCLK_SRC_SEL(1) | DCLK_SRC_SEL(1),
1001 ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
1002
1003 /* put PLL in bypass mode */
1004 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~UPLL_BYPASS_EN_MASK);
1005
1006 if (!vclk || !dclk) {
1007 /* keep the Bypass mode, put PLL to sleep */
1008 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK);
1009 return 0;
1010 }
1011
1012 r = radeon_uvd_calc_upll_dividers(rdev, vclk, dclk, 125000, 250000,
1013 16384, 0x03FFFFFF, 0, 128, 5,
1014 &fb_div, &vclk_div, &dclk_div);
1015 if (r)
1016 return r;
1017
1018 /* set VCO_MODE to 1 */
1019 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_VCO_MODE_MASK, ~UPLL_VCO_MODE_MASK);
1020
1021 /* toggle UPLL_SLEEP to 1 then back to 0 */
1022 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK);
1023 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_SLEEP_MASK);
1024
1025 /* deassert UPLL_RESET */
1026 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK);
1027
1028 mdelay(1);
1029
1030 r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
1031 if (r)
1032 return r;
1033
1034 /* assert UPLL_RESET again */
1035 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_RESET_MASK, ~UPLL_RESET_MASK);
1036
1037 /* disable spread spectrum. */
1038 WREG32_P(CG_UPLL_SPREAD_SPECTRUM, 0, ~SSEN_MASK);
1039
1040 /* set feedback divider */
1041 WREG32_P(CG_UPLL_FUNC_CNTL_3, UPLL_FB_DIV(fb_div), ~UPLL_FB_DIV_MASK);
1042
1043 /* set ref divider to 0 */
1044 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_REF_DIV_MASK);
1045
1046 if (fb_div < 307200)
1047 WREG32_P(CG_UPLL_FUNC_CNTL_4, 0, ~UPLL_SPARE_ISPARE9);
1048 else
1049 WREG32_P(CG_UPLL_FUNC_CNTL_4, UPLL_SPARE_ISPARE9, ~UPLL_SPARE_ISPARE9);
1050
1051 /* set PDIV_A and PDIV_B */
1052 WREG32_P(CG_UPLL_FUNC_CNTL_2,
1053 UPLL_PDIV_A(vclk_div) | UPLL_PDIV_B(dclk_div),
1054 ~(UPLL_PDIV_A_MASK | UPLL_PDIV_B_MASK));
1055
1056 /* give the PLL some time to settle */
1057 mdelay(15);
1058
1059 /* deassert PLL_RESET */
1060 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK);
1061
1062 mdelay(15);
1063
1064 /* switch from bypass mode to normal mode */
1065 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_BYPASS_EN_MASK);
1066
1067 r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
1068 if (r)
1069 return r;
1070
1071 /* switch VCLK and DCLK selection */
1072 WREG32_P(CG_UPLL_FUNC_CNTL_2,
1073 VCLK_SRC_SEL(2) | DCLK_SRC_SEL(2),
1074 ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
1075
1076 mdelay(100);
1077
1078 return 0;
1079}
1080
87void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev) 1081void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev)
88{ 1082{
89 u16 ctl, v; 1083 u16 ctl, v;
@@ -105,6 +1099,27 @@ void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev)
105 } 1099 }
106} 1100}
107 1101
1102static bool dce4_is_in_vblank(struct radeon_device *rdev, int crtc)
1103{
1104 if (RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK)
1105 return true;
1106 else
1107 return false;
1108}
1109
1110static bool dce4_is_counter_moving(struct radeon_device *rdev, int crtc)
1111{
1112 u32 pos1, pos2;
1113
1114 pos1 = RREG32(EVERGREEN_CRTC_STATUS_POSITION + crtc_offsets[crtc]);
1115 pos2 = RREG32(EVERGREEN_CRTC_STATUS_POSITION + crtc_offsets[crtc]);
1116
1117 if (pos1 != pos2)
1118 return true;
1119 else
1120 return false;
1121}
1122
108/** 1123/**
109 * dce4_wait_for_vblank - vblank wait asic callback. 1124 * dce4_wait_for_vblank - vblank wait asic callback.
110 * 1125 *
@@ -115,21 +1130,28 @@ void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev)
115 */ 1130 */
116void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc) 1131void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc)
117{ 1132{
118 int i; 1133 unsigned i = 0;
119 1134
120 if (crtc >= rdev->num_crtc) 1135 if (crtc >= rdev->num_crtc)
121 return; 1136 return;
122 1137
123 if (RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[crtc]) & EVERGREEN_CRTC_MASTER_EN) { 1138 if (!(RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[crtc]) & EVERGREEN_CRTC_MASTER_EN))
124 for (i = 0; i < rdev->usec_timeout; i++) { 1139 return;
125 if (!(RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK)) 1140
1141 /* depending on when we hit vblank, we may be close to active; if so,
1142 * wait for another frame.
1143 */
1144 while (dce4_is_in_vblank(rdev, crtc)) {
1145 if (i++ % 100 == 0) {
1146 if (!dce4_is_counter_moving(rdev, crtc))
126 break; 1147 break;
127 udelay(1);
128 } 1148 }
129 for (i = 0; i < rdev->usec_timeout; i++) { 1149 }
130 if (RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK) 1150
1151 while (!dce4_is_in_vblank(rdev, crtc)) {
1152 if (i++ % 100 == 0) {
1153 if (!dce4_is_counter_moving(rdev, crtc))
131 break; 1154 break;
132 udelay(1);
133 } 1155 }
134 } 1156 }
135} 1157}
@@ -608,6 +1630,16 @@ void evergreen_hpd_init(struct radeon_device *rdev)
608 1630
609 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 1631 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
610 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 1632 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
1633
1634 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
1635 connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
1636 /* don't try to enable hpd on eDP or LVDS avoid breaking the
1637 * aux dp channel on imac and help (but not completely fix)
1638 * https://bugzilla.redhat.com/show_bug.cgi?id=726143
1639 * also avoid interrupt storms during dpms.
1640 */
1641 continue;
1642 }
611 switch (radeon_connector->hpd.hpd) { 1643 switch (radeon_connector->hpd.hpd) {
612 case RADEON_HPD_1: 1644 case RADEON_HPD_1:
613 WREG32(DC_HPD1_CONTROL, tmp); 1645 WREG32(DC_HPD1_CONTROL, tmp);
@@ -1325,17 +2357,16 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
1325 tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]); 2357 tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
1326 if (!(tmp & EVERGREEN_CRTC_BLANK_DATA_EN)) { 2358 if (!(tmp & EVERGREEN_CRTC_BLANK_DATA_EN)) {
1327 radeon_wait_for_vblank(rdev, i); 2359 radeon_wait_for_vblank(rdev, i);
1328 tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
1329 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1); 2360 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
2361 tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
1330 WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp); 2362 WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
1331 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
1332 } 2363 }
1333 } else { 2364 } else {
1334 tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]); 2365 tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
1335 if (!(tmp & EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE)) { 2366 if (!(tmp & EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE)) {
1336 radeon_wait_for_vblank(rdev, i); 2367 radeon_wait_for_vblank(rdev, i);
1337 tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
1338 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1); 2368 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
2369 tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
1339 WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp); 2370 WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
1340 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0); 2371 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
1341 } 2372 }
@@ -1347,6 +2378,15 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
1347 break; 2378 break;
1348 udelay(1); 2379 udelay(1);
1349 } 2380 }
2381
2382 /* XXX this is a hack to avoid strange behavior with EFI on certain systems */
2383 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
2384 tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
2385 tmp &= ~EVERGREEN_CRTC_MASTER_EN;
2386 WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
2387 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
2388 save->crtc_enabled[i] = false;
2389 /* ***** */
1350 } else { 2390 } else {
1351 save->crtc_enabled[i] = false; 2391 save->crtc_enabled[i] = false;
1352 } 2392 }
@@ -1364,6 +2404,22 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
1364 } 2404 }
1365 /* wait for the MC to settle */ 2405 /* wait for the MC to settle */
1366 udelay(100); 2406 udelay(100);
2407
2408 /* lock double buffered regs */
2409 for (i = 0; i < rdev->num_crtc; i++) {
2410 if (save->crtc_enabled[i]) {
2411 tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
2412 if (!(tmp & EVERGREEN_GRPH_UPDATE_LOCK)) {
2413 tmp |= EVERGREEN_GRPH_UPDATE_LOCK;
2414 WREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i], tmp);
2415 }
2416 tmp = RREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i]);
2417 if (!(tmp & 1)) {
2418 tmp |= 1;
2419 WREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
2420 }
2421 }
2422 }
1367} 2423}
1368 2424
1369void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save) 2425void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save)
@@ -1385,6 +2441,33 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s
1385 WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start)); 2441 WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start));
1386 WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start); 2442 WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start);
1387 2443
2444 /* unlock regs and wait for update */
2445 for (i = 0; i < rdev->num_crtc; i++) {
2446 if (save->crtc_enabled[i]) {
2447 tmp = RREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i]);
2448 if ((tmp & 0x3) != 0) {
2449 tmp &= ~0x3;
2450 WREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i], tmp);
2451 }
2452 tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
2453 if (tmp & EVERGREEN_GRPH_UPDATE_LOCK) {
2454 tmp &= ~EVERGREEN_GRPH_UPDATE_LOCK;
2455 WREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i], tmp);
2456 }
2457 tmp = RREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i]);
2458 if (tmp & 1) {
2459 tmp &= ~1;
2460 WREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
2461 }
2462 for (j = 0; j < rdev->usec_timeout; j++) {
2463 tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
2464 if ((tmp & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING) == 0)
2465 break;
2466 udelay(1);
2467 }
2468 }
2469 }
2470
1388 /* unblackout the MC */ 2471 /* unblackout the MC */
1389 tmp = RREG32(MC_SHARED_BLACKOUT_CNTL); 2472 tmp = RREG32(MC_SHARED_BLACKOUT_CNTL);
1390 tmp &= ~BLACKOUT_MODE_MASK; 2473 tmp &= ~BLACKOUT_MODE_MASK;
@@ -2050,6 +3133,14 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
2050 } 3133 }
2051 /* enabled rb are just the one not disabled :) */ 3134 /* enabled rb are just the one not disabled :) */
2052 disabled_rb_mask = tmp; 3135 disabled_rb_mask = tmp;
3136 tmp = 0;
3137 for (i = 0; i < rdev->config.evergreen.max_backends; i++)
3138 tmp |= (1 << i);
3139 /* if all the backends are disabled, fix it up here */
3140 if ((disabled_rb_mask & tmp) == tmp) {
3141 for (i = 0; i < rdev->config.evergreen.max_backends; i++)
3142 disabled_rb_mask &= ~(1 << i);
3143 }
2053 3144
2054 WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES); 3145 WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
2055 WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES); 3146 WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
@@ -2058,6 +3149,9 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
2058 WREG32(DMIF_ADDR_CONFIG, gb_addr_config); 3149 WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
2059 WREG32(HDP_ADDR_CONFIG, gb_addr_config); 3150 WREG32(HDP_ADDR_CONFIG, gb_addr_config);
2060 WREG32(DMA_TILING_CONFIG, gb_addr_config); 3151 WREG32(DMA_TILING_CONFIG, gb_addr_config);
3152 WREG32(UVD_UDEC_ADDR_CONFIG, gb_addr_config);
3153 WREG32(UVD_UDEC_DB_ADDR_CONFIG, gb_addr_config);
3154 WREG32(UVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config);
2061 3155
2062 if ((rdev->config.evergreen.max_backends == 1) && 3156 if ((rdev->config.evergreen.max_backends == 1) &&
2063 (rdev->flags & RADEON_IS_IGP)) { 3157 (rdev->flags & RADEON_IS_IGP)) {
@@ -3360,6 +4454,9 @@ restart_ih:
3360 DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data); 4454 DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
3361 break; 4455 break;
3362 } 4456 }
4457 case 124: /* UVD */
4458 DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
4459 radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
3363 break; 4460 break;
3364 case 146: 4461 case 146:
3365 case 147: 4462 case 147:
@@ -3571,7 +4668,7 @@ int evergreen_copy_dma(struct radeon_device *rdev,
3571 4668
3572static int evergreen_startup(struct radeon_device *rdev) 4669static int evergreen_startup(struct radeon_device *rdev)
3573{ 4670{
3574 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 4671 struct radeon_ring *ring;
3575 int r; 4672 int r;
3576 4673
3577 /* enable pcie gen2 link */ 4674 /* enable pcie gen2 link */
@@ -3638,6 +4735,17 @@ static int evergreen_startup(struct radeon_device *rdev)
3638 return r; 4735 return r;
3639 } 4736 }
3640 4737
4738 r = rv770_uvd_resume(rdev);
4739 if (!r) {
4740 r = radeon_fence_driver_start_ring(rdev,
4741 R600_RING_TYPE_UVD_INDEX);
4742 if (r)
4743 dev_err(rdev->dev, "UVD fences init error (%d).\n", r);
4744 }
4745
4746 if (r)
4747 rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
4748
3641 /* Enable IRQ */ 4749 /* Enable IRQ */
3642 r = r600_irq_init(rdev); 4750 r = r600_irq_init(rdev);
3643 if (r) { 4751 if (r) {
@@ -3647,6 +4755,7 @@ static int evergreen_startup(struct radeon_device *rdev)
3647 } 4755 }
3648 evergreen_irq_set(rdev); 4756 evergreen_irq_set(rdev);
3649 4757
4758 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
3650 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET, 4759 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
3651 R600_CP_RB_RPTR, R600_CP_RB_WPTR, 4760 R600_CP_RB_RPTR, R600_CP_RB_WPTR,
3652 0, 0xfffff, RADEON_CP_PACKET2); 4761 0, 0xfffff, RADEON_CP_PACKET2);
@@ -3670,6 +4779,19 @@ static int evergreen_startup(struct radeon_device *rdev)
3670 if (r) 4779 if (r)
3671 return r; 4780 return r;
3672 4781
4782 ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
4783 if (ring->ring_size) {
4784 r = radeon_ring_init(rdev, ring, ring->ring_size,
4785 R600_WB_UVD_RPTR_OFFSET,
4786 UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR,
4787 0, 0xfffff, RADEON_CP_PACKET2);
4788 if (!r)
4789 r = r600_uvd_init(rdev);
4790
4791 if (r)
4792 DRM_ERROR("radeon: error initializing UVD (%d).\n", r);
4793 }
4794
3673 r = radeon_ib_pool_init(rdev); 4795 r = radeon_ib_pool_init(rdev);
3674 if (r) { 4796 if (r) {
3675 dev_err(rdev->dev, "IB initialization failed (%d).\n", r); 4797 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
@@ -3701,6 +4823,9 @@ int evergreen_resume(struct radeon_device *rdev)
3701 /* post card */ 4823 /* post card */
3702 atom_asic_init(rdev->mode_info.atom_context); 4824 atom_asic_init(rdev->mode_info.atom_context);
3703 4825
4826 /* init golden registers */
4827 evergreen_init_golden_registers(rdev);
4828
3704 rdev->accel_working = true; 4829 rdev->accel_working = true;
3705 r = evergreen_startup(rdev); 4830 r = evergreen_startup(rdev);
3706 if (r) { 4831 if (r) {
@@ -3716,8 +4841,10 @@ int evergreen_resume(struct radeon_device *rdev)
3716int evergreen_suspend(struct radeon_device *rdev) 4841int evergreen_suspend(struct radeon_device *rdev)
3717{ 4842{
3718 r600_audio_fini(rdev); 4843 r600_audio_fini(rdev);
4844 radeon_uvd_suspend(rdev);
3719 r700_cp_stop(rdev); 4845 r700_cp_stop(rdev);
3720 r600_dma_stop(rdev); 4846 r600_dma_stop(rdev);
4847 r600_uvd_rbc_stop(rdev);
3721 evergreen_irq_suspend(rdev); 4848 evergreen_irq_suspend(rdev);
3722 radeon_wb_disable(rdev); 4849 radeon_wb_disable(rdev);
3723 evergreen_pcie_gart_disable(rdev); 4850 evergreen_pcie_gart_disable(rdev);
@@ -3762,6 +4889,8 @@ int evergreen_init(struct radeon_device *rdev)
3762 DRM_INFO("GPU not posted. posting now...\n"); 4889 DRM_INFO("GPU not posted. posting now...\n");
3763 atom_asic_init(rdev->mode_info.atom_context); 4890 atom_asic_init(rdev->mode_info.atom_context);
3764 } 4891 }
4892 /* init golden registers */
4893 evergreen_init_golden_registers(rdev);
3765 /* Initialize scratch registers */ 4894 /* Initialize scratch registers */
3766 r600_scratch_init(rdev); 4895 r600_scratch_init(rdev);
3767 /* Initialize surface registers */ 4896 /* Initialize surface registers */
@@ -3797,6 +4926,13 @@ int evergreen_init(struct radeon_device *rdev)
3797 rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL; 4926 rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL;
3798 r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024); 4927 r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024);
3799 4928
4929 r = radeon_uvd_init(rdev);
4930 if (!r) {
4931 rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
4932 r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX],
4933 4096);
4934 }
4935
3800 rdev->ih.ring_obj = NULL; 4936 rdev->ih.ring_obj = NULL;
3801 r600_ih_ring_init(rdev, 64 * 1024); 4937 r600_ih_ring_init(rdev, 64 * 1024);
3802 4938
@@ -3843,6 +4979,7 @@ void evergreen_fini(struct radeon_device *rdev)
3843 radeon_ib_pool_fini(rdev); 4979 radeon_ib_pool_fini(rdev);
3844 radeon_irq_kms_fini(rdev); 4980 radeon_irq_kms_fini(rdev);
3845 evergreen_pcie_gart_fini(rdev); 4981 evergreen_pcie_gart_fini(rdev);
4982 radeon_uvd_fini(rdev);
3846 r600_vram_scratch_fini(rdev); 4983 r600_vram_scratch_fini(rdev);
3847 radeon_gem_fini(rdev); 4984 radeon_gem_fini(rdev);
3848 radeon_fence_driver_fini(rdev); 4985 radeon_fence_driver_fini(rdev);
@@ -3878,7 +5015,7 @@ void evergreen_pcie_gen2_enable(struct radeon_device *rdev)
3878 if (!(mask & DRM_PCIE_SPEED_50)) 5015 if (!(mask & DRM_PCIE_SPEED_50))
3879 return; 5016 return;
3880 5017
3881 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); 5018 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
3882 if (speed_cntl & LC_CURRENT_DATA_RATE) { 5019 if (speed_cntl & LC_CURRENT_DATA_RATE) {
3883 DRM_INFO("PCIE gen 2 link speeds already enabled\n"); 5020 DRM_INFO("PCIE gen 2 link speeds already enabled\n");
3884 return; 5021 return;
@@ -3889,33 +5026,33 @@ void evergreen_pcie_gen2_enable(struct radeon_device *rdev)
3889 if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) || 5026 if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) ||
3890 (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) { 5027 (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
3891 5028
3892 link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL); 5029 link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
3893 link_width_cntl &= ~LC_UPCONFIGURE_DIS; 5030 link_width_cntl &= ~LC_UPCONFIGURE_DIS;
3894 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); 5031 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
3895 5032
3896 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); 5033 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
3897 speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN; 5034 speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
3898 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl); 5035 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
3899 5036
3900 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); 5037 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
3901 speed_cntl |= LC_CLR_FAILED_SPD_CHANGE_CNT; 5038 speed_cntl |= LC_CLR_FAILED_SPD_CHANGE_CNT;
3902 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl); 5039 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
3903 5040
3904 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); 5041 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
3905 speed_cntl &= ~LC_CLR_FAILED_SPD_CHANGE_CNT; 5042 speed_cntl &= ~LC_CLR_FAILED_SPD_CHANGE_CNT;
3906 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl); 5043 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
3907 5044
3908 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); 5045 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
3909 speed_cntl |= LC_GEN2_EN_STRAP; 5046 speed_cntl |= LC_GEN2_EN_STRAP;
3910 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl); 5047 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
3911 5048
3912 } else { 5049 } else {
3913 link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL); 5050 link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
3914 /* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */ 5051 /* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
3915 if (1) 5052 if (1)
3916 link_width_cntl |= LC_UPCONFIGURE_DIS; 5053 link_width_cntl |= LC_UPCONFIGURE_DIS;
3917 else 5054 else
3918 link_width_cntl &= ~LC_UPCONFIGURE_DIS; 5055 link_width_cntl &= ~LC_UPCONFIGURE_DIS;
3919 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); 5056 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
3920 } 5057 }
3921} 5058}
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
index 4fdecc2b4040..b4ab8ceb1654 100644
--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
@@ -54,6 +54,68 @@ static void evergreen_hdmi_update_ACR(struct drm_encoder *encoder, uint32_t cloc
54 WREG32(HDMI_ACR_48_1 + offset, acr.n_48khz); 54 WREG32(HDMI_ACR_48_1 + offset, acr.n_48khz);
55} 55}
56 56
57static void evergreen_hdmi_write_sad_regs(struct drm_encoder *encoder)
58{
59 struct radeon_device *rdev = encoder->dev->dev_private;
60 struct drm_connector *connector;
61 struct radeon_connector *radeon_connector = NULL;
62 struct cea_sad *sads;
63 int i, sad_count;
64
65 static const u16 eld_reg_to_type[][2] = {
66 { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM },
67 { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 },
68 { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 },
69 { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 },
70 { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 },
71 { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC },
72 { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS },
73 { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC },
74 { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 },
75 { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD },
76 { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP },
77 { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
78 };
79
80 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
81 if (connector->encoder == encoder)
82 radeon_connector = to_radeon_connector(connector);
83 }
84
85 if (!radeon_connector) {
86 DRM_ERROR("Couldn't find encoder's connector\n");
87 return;
88 }
89
90 sad_count = drm_edid_to_sad(radeon_connector->edid, &sads);
91 if (sad_count < 0) {
92 DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
93 return;
94 }
95 BUG_ON(!sads);
96
97 for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
98 u32 value = 0;
99 int j;
100
101 for (j = 0; j < sad_count; j++) {
102 struct cea_sad *sad = &sads[j];
103
104 if (sad->format == eld_reg_to_type[i][1]) {
105 value = MAX_CHANNELS(sad->channels) |
106 DESCRIPTOR_BYTE_2(sad->byte2) |
107 SUPPORTED_FREQUENCIES(sad->freq);
108 if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
109 value |= SUPPORTED_FREQUENCIES_STEREO(sad->freq);
110 break;
111 }
112 }
113 WREG32(eld_reg_to_type[i][0], value);
114 }
115
116 kfree(sads);
117}
118
57/* 119/*
58 * build a HDMI Video Info Frame 120 * build a HDMI Video Info Frame
59 */ 121 */
@@ -85,6 +147,30 @@ static void evergreen_hdmi_update_avi_infoframe(struct drm_encoder *encoder,
85 frame[0xC] | (frame[0xD] << 8)); 147 frame[0xC] | (frame[0xD] << 8));
86} 148}
87 149
150static void evergreen_audio_set_dto(struct drm_encoder *encoder, u32 clock)
151{
152 struct drm_device *dev = encoder->dev;
153 struct radeon_device *rdev = dev->dev_private;
154 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
155 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
156 struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
157 u32 base_rate = 48000;
158
159 if (!dig || !dig->afmt)
160 return;
161
162 /* XXX: properly calculate this */
163 /* XXX two dtos; generally use dto0 for hdmi */
164 /* Express [24MHz / target pixel clock] as an exact rational
165 * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE
166 * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
167 */
168 WREG32(DCCG_AUDIO_DTO0_PHASE, (base_rate*50) & 0xffffff);
169 WREG32(DCCG_AUDIO_DTO0_MODULE, (clock*100) & 0xffffff);
170 WREG32(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL(radeon_crtc->crtc_id));
171}
172
173
88/* 174/*
89 * update the info frames with the data from the current display mode 175 * update the info frames with the data from the current display mode
90 */ 176 */
@@ -104,33 +190,19 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
104 return; 190 return;
105 offset = dig->afmt->offset; 191 offset = dig->afmt->offset;
106 192
107 r600_audio_set_clock(encoder, mode->clock); 193 evergreen_audio_set_dto(encoder, mode->clock);
108 194
109 WREG32(HDMI_VBI_PACKET_CONTROL + offset, 195 WREG32(HDMI_VBI_PACKET_CONTROL + offset,
110 HDMI_NULL_SEND); /* send null packets when required */ 196 HDMI_NULL_SEND); /* send null packets when required */
111 197
112 WREG32(AFMT_AUDIO_CRC_CONTROL + offset, 0x1000); 198 WREG32(AFMT_AUDIO_CRC_CONTROL + offset, 0x1000);
113 199
114 WREG32(HDMI_AUDIO_PACKET_CONTROL + offset,
115 HDMI_AUDIO_DELAY_EN(1) | /* set the default audio delay */
116 HDMI_AUDIO_PACKETS_PER_LINE(3)); /* should be suffient for all audio modes and small enough for all hblanks */
117
118 WREG32(AFMT_AUDIO_PACKET_CONTROL + offset,
119 AFMT_AUDIO_SAMPLE_SEND | /* send audio packets */
120 AFMT_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */
121
122 WREG32(HDMI_ACR_PACKET_CONTROL + offset,
123 HDMI_ACR_AUTO_SEND | /* allow hw to sent ACR packets when required */
124 HDMI_ACR_SOURCE); /* select SW CTS value */
125
126 WREG32(HDMI_VBI_PACKET_CONTROL + offset, 200 WREG32(HDMI_VBI_PACKET_CONTROL + offset,
127 HDMI_NULL_SEND | /* send null packets when required */ 201 HDMI_NULL_SEND | /* send null packets when required */
128 HDMI_GC_SEND | /* send general control packets */ 202 HDMI_GC_SEND | /* send general control packets */
129 HDMI_GC_CONT); /* send general control packets every frame */ 203 HDMI_GC_CONT); /* send general control packets every frame */
130 204
131 WREG32(HDMI_INFOFRAME_CONTROL0 + offset, 205 WREG32(HDMI_INFOFRAME_CONTROL0 + offset,
132 HDMI_AVI_INFO_SEND | /* enable AVI info frames */
133 HDMI_AVI_INFO_CONT | /* send AVI info frames every frame/field */
134 HDMI_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */ 206 HDMI_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */
135 HDMI_AUDIO_INFO_CONT); /* required for audio info values to be updated */ 207 HDMI_AUDIO_INFO_CONT); /* required for audio info values to be updated */
136 208
@@ -138,11 +210,47 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
138 AFMT_AUDIO_INFO_UPDATE); /* required for audio info values to be updated */ 210 AFMT_AUDIO_INFO_UPDATE); /* required for audio info values to be updated */
139 211
140 WREG32(HDMI_INFOFRAME_CONTROL1 + offset, 212 WREG32(HDMI_INFOFRAME_CONTROL1 + offset,
141 HDMI_AVI_INFO_LINE(2) | /* anything other than 0 */
142 HDMI_AUDIO_INFO_LINE(2)); /* anything other than 0 */ 213 HDMI_AUDIO_INFO_LINE(2)); /* anything other than 0 */
143 214
144 WREG32(HDMI_GC + offset, 0); /* unset HDMI_GC_AVMUTE */ 215 WREG32(HDMI_GC + offset, 0); /* unset HDMI_GC_AVMUTE */
145 216
217 WREG32(HDMI_AUDIO_PACKET_CONTROL + offset,
218 HDMI_AUDIO_DELAY_EN(1) | /* set the default audio delay */
219 HDMI_AUDIO_PACKETS_PER_LINE(3)); /* should be suffient for all audio modes and small enough for all hblanks */
220
221 WREG32(AFMT_AUDIO_PACKET_CONTROL + offset,
222 AFMT_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */
223
224 /* fglrx clears sth in AFMT_AUDIO_PACKET_CONTROL2 here */
225
226 WREG32(HDMI_ACR_PACKET_CONTROL + offset,
227 HDMI_ACR_AUTO_SEND | /* allow hw to sent ACR packets when required */
228 HDMI_ACR_SOURCE); /* select SW CTS value */
229
230 evergreen_hdmi_update_ACR(encoder, mode->clock);
231
232 WREG32(AFMT_60958_0 + offset,
233 AFMT_60958_CS_CHANNEL_NUMBER_L(1));
234
235 WREG32(AFMT_60958_1 + offset,
236 AFMT_60958_CS_CHANNEL_NUMBER_R(2));
237
238 WREG32(AFMT_60958_2 + offset,
239 AFMT_60958_CS_CHANNEL_NUMBER_2(3) |
240 AFMT_60958_CS_CHANNEL_NUMBER_3(4) |
241 AFMT_60958_CS_CHANNEL_NUMBER_4(5) |
242 AFMT_60958_CS_CHANNEL_NUMBER_5(6) |
243 AFMT_60958_CS_CHANNEL_NUMBER_6(7) |
244 AFMT_60958_CS_CHANNEL_NUMBER_7(8));
245
246 /* fglrx sets 0x0001005f | (x & 0x00fc0000) in 0x5f78 here */
247
248 WREG32(AFMT_AUDIO_PACKET_CONTROL2 + offset,
249 AFMT_AUDIO_CHANNEL_ENABLE(0xff));
250
251 /* fglrx sets 0x40 in 0x5f80 here */
252 evergreen_hdmi_write_sad_regs(encoder);
253
146 err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode); 254 err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
147 if (err < 0) { 255 if (err < 0) {
148 DRM_ERROR("failed to setup AVI infoframe: %zd\n", err); 256 DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
@@ -156,7 +264,17 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
156 } 264 }
157 265
158 evergreen_hdmi_update_avi_infoframe(encoder, buffer, sizeof(buffer)); 266 evergreen_hdmi_update_avi_infoframe(encoder, buffer, sizeof(buffer));
159 evergreen_hdmi_update_ACR(encoder, mode->clock); 267
268 WREG32_OR(HDMI_INFOFRAME_CONTROL0 + offset,
269 HDMI_AVI_INFO_SEND | /* enable AVI info frames */
270 HDMI_AVI_INFO_CONT); /* required for audio info values to be updated */
271
272 WREG32_P(HDMI_INFOFRAME_CONTROL1 + offset,
273 HDMI_AVI_INFO_LINE(2), /* anything other than 0 */
274 ~HDMI_AVI_INFO_LINE_MASK);
275
276 WREG32_OR(AFMT_AUDIO_PACKET_CONTROL + offset,
277 AFMT_AUDIO_SAMPLE_SEND); /* send audio packets */
160 278
161 /* it's unknown what these bits do excatly, but it's indeed quite useful for debugging */ 279 /* it's unknown what these bits do excatly, but it's indeed quite useful for debugging */
162 WREG32(AFMT_RAMP_CONTROL0 + offset, 0x00FFFFFF); 280 WREG32(AFMT_RAMP_CONTROL0 + offset, 0x00FFFFFF);
@@ -164,3 +282,20 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
164 WREG32(AFMT_RAMP_CONTROL2 + offset, 0x00000001); 282 WREG32(AFMT_RAMP_CONTROL2 + offset, 0x00000001);
165 WREG32(AFMT_RAMP_CONTROL3 + offset, 0x00000001); 283 WREG32(AFMT_RAMP_CONTROL3 + offset, 0x00000001);
166} 284}
285
286void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable)
287{
288 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
289 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
290
291 /* Silent, r600_hdmi_enable will raise WARN for us */
292 if (enable && dig->afmt->enabled)
293 return;
294 if (!enable && !dig->afmt->enabled)
295 return;
296
297 dig->afmt->enabled = enable;
298
299 DRM_DEBUG("%sabling HDMI interface @ 0x%04X for encoder 0x%x\n",
300 enable ? "En" : "Dis", dig->afmt->offset, radeon_encoder->encoder_id);
301}
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
index f585be16e2d5..881aba23c477 100644
--- a/drivers/gpu/drm/radeon/evergreen_reg.h
+++ b/drivers/gpu/drm/radeon/evergreen_reg.h
@@ -226,6 +226,8 @@
226#define EVERGREEN_CRTC_STATUS_HV_COUNT 0x6ea0 226#define EVERGREEN_CRTC_STATUS_HV_COUNT 0x6ea0
227#define EVERGREEN_MASTER_UPDATE_MODE 0x6ef8 227#define EVERGREEN_MASTER_UPDATE_MODE 0x6ef8
228#define EVERGREEN_CRTC_UPDATE_LOCK 0x6ed4 228#define EVERGREEN_CRTC_UPDATE_LOCK 0x6ed4
229#define EVERGREEN_MASTER_UPDATE_LOCK 0x6ef4
230#define EVERGREEN_MASTER_UPDATE_MODE 0x6ef8
229 231
230#define EVERGREEN_DC_GPIO_HPD_MASK 0x64b0 232#define EVERGREEN_DC_GPIO_HPD_MASK 0x64b0
231#define EVERGREEN_DC_GPIO_HPD_A 0x64b4 233#define EVERGREEN_DC_GPIO_HPD_A 0x64b4
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index 982d25ad9af3..75c05631146d 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -53,6 +53,43 @@
53#define RCU_IND_INDEX 0x100 53#define RCU_IND_INDEX 0x100
54#define RCU_IND_DATA 0x104 54#define RCU_IND_DATA 0x104
55 55
56/* discrete uvd clocks */
57#define CG_UPLL_FUNC_CNTL 0x718
58# define UPLL_RESET_MASK 0x00000001
59# define UPLL_SLEEP_MASK 0x00000002
60# define UPLL_BYPASS_EN_MASK 0x00000004
61# define UPLL_CTLREQ_MASK 0x00000008
62# define UPLL_REF_DIV_MASK 0x003F0000
63# define UPLL_VCO_MODE_MASK 0x00000200
64# define UPLL_CTLACK_MASK 0x40000000
65# define UPLL_CTLACK2_MASK 0x80000000
66#define CG_UPLL_FUNC_CNTL_2 0x71c
67# define UPLL_PDIV_A(x) ((x) << 0)
68# define UPLL_PDIV_A_MASK 0x0000007F
69# define UPLL_PDIV_B(x) ((x) << 8)
70# define UPLL_PDIV_B_MASK 0x00007F00
71# define VCLK_SRC_SEL(x) ((x) << 20)
72# define VCLK_SRC_SEL_MASK 0x01F00000
73# define DCLK_SRC_SEL(x) ((x) << 25)
74# define DCLK_SRC_SEL_MASK 0x3E000000
75#define CG_UPLL_FUNC_CNTL_3 0x720
76# define UPLL_FB_DIV(x) ((x) << 0)
77# define UPLL_FB_DIV_MASK 0x01FFFFFF
78#define CG_UPLL_FUNC_CNTL_4 0x854
79# define UPLL_SPARE_ISPARE9 0x00020000
80#define CG_UPLL_SPREAD_SPECTRUM 0x79c
81# define SSEN_MASK 0x00000001
82
83/* fusion uvd clocks */
84#define CG_DCLK_CNTL 0x610
85# define DCLK_DIVIDER_MASK 0x7f
86# define DCLK_DIR_CNTL_EN (1 << 8)
87#define CG_DCLK_STATUS 0x614
88# define DCLK_STATUS (1 << 0)
89#define CG_VCLK_CNTL 0x618
90#define CG_VCLK_STATUS 0x61c
91#define CG_SCRATCH1 0x820
92
56#define GRBM_GFX_INDEX 0x802C 93#define GRBM_GFX_INDEX 0x802C
57#define INSTANCE_INDEX(x) ((x) << 0) 94#define INSTANCE_INDEX(x) ((x) << 0)
58#define SE_INDEX(x) ((x) << 16) 95#define SE_INDEX(x) ((x) << 16)
@@ -197,6 +234,7 @@
197# define HDMI_MPEG_INFO_CONT (1 << 9) 234# define HDMI_MPEG_INFO_CONT (1 << 9)
198#define HDMI_INFOFRAME_CONTROL1 0x7048 235#define HDMI_INFOFRAME_CONTROL1 0x7048
199# define HDMI_AVI_INFO_LINE(x) (((x) & 0x3f) << 0) 236# define HDMI_AVI_INFO_LINE(x) (((x) & 0x3f) << 0)
237# define HDMI_AVI_INFO_LINE_MASK (0x3f << 0)
200# define HDMI_AUDIO_INFO_LINE(x) (((x) & 0x3f) << 8) 238# define HDMI_AUDIO_INFO_LINE(x) (((x) & 0x3f) << 8)
201# define HDMI_MPEG_INFO_LINE(x) (((x) & 0x3f) << 16) 239# define HDMI_MPEG_INFO_LINE(x) (((x) & 0x3f) << 16)
202#define HDMI_GENERIC_PACKET_CONTROL 0x704c 240#define HDMI_GENERIC_PACKET_CONTROL 0x704c
@@ -992,6 +1030,16 @@
992# define TARGET_LINK_SPEED_MASK (0xf << 0) 1030# define TARGET_LINK_SPEED_MASK (0xf << 0)
993# define SELECTABLE_DEEMPHASIS (1 << 6) 1031# define SELECTABLE_DEEMPHASIS (1 << 6)
994 1032
1033
1034/*
1035 * UVD
1036 */
1037#define UVD_UDEC_ADDR_CONFIG 0xef4c
1038#define UVD_UDEC_DB_ADDR_CONFIG 0xef50
1039#define UVD_UDEC_DBW_ADDR_CONFIG 0xef54
1040#define UVD_RBC_RB_RPTR 0xf690
1041#define UVD_RBC_RB_WPTR 0xf694
1042
995/* 1043/*
996 * PM4 1044 * PM4
997 */ 1045 */
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 27769e724b6d..7969c0c8ec20 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -78,6 +78,282 @@ MODULE_FIRMWARE("radeon/ARUBA_pfp.bin");
78MODULE_FIRMWARE("radeon/ARUBA_me.bin"); 78MODULE_FIRMWARE("radeon/ARUBA_me.bin");
79MODULE_FIRMWARE("radeon/ARUBA_rlc.bin"); 79MODULE_FIRMWARE("radeon/ARUBA_rlc.bin");
80 80
81
82static const u32 cayman_golden_registers2[] =
83{
84 0x3e5c, 0xffffffff, 0x00000000,
85 0x3e48, 0xffffffff, 0x00000000,
86 0x3e4c, 0xffffffff, 0x00000000,
87 0x3e64, 0xffffffff, 0x00000000,
88 0x3e50, 0xffffffff, 0x00000000,
89 0x3e60, 0xffffffff, 0x00000000
90};
91
92static const u32 cayman_golden_registers[] =
93{
94 0x5eb4, 0xffffffff, 0x00000002,
95 0x5e78, 0x8f311ff1, 0x001000f0,
96 0x3f90, 0xffff0000, 0xff000000,
97 0x9148, 0xffff0000, 0xff000000,
98 0x3f94, 0xffff0000, 0xff000000,
99 0x914c, 0xffff0000, 0xff000000,
100 0xc78, 0x00000080, 0x00000080,
101 0xbd4, 0x70073777, 0x00011003,
102 0xd02c, 0xbfffff1f, 0x08421000,
103 0xd0b8, 0x73773777, 0x02011003,
104 0x5bc0, 0x00200000, 0x50100000,
105 0x98f8, 0x33773777, 0x02011003,
106 0x98fc, 0xffffffff, 0x76541032,
107 0x7030, 0x31000311, 0x00000011,
108 0x2f48, 0x33773777, 0x42010001,
109 0x6b28, 0x00000010, 0x00000012,
110 0x7728, 0x00000010, 0x00000012,
111 0x10328, 0x00000010, 0x00000012,
112 0x10f28, 0x00000010, 0x00000012,
113 0x11b28, 0x00000010, 0x00000012,
114 0x12728, 0x00000010, 0x00000012,
115 0x240c, 0x000007ff, 0x00000000,
116 0x8a14, 0xf000001f, 0x00000007,
117 0x8b24, 0x3fff3fff, 0x00ff0fff,
118 0x8b10, 0x0000ff0f, 0x00000000,
119 0x28a4c, 0x07ffffff, 0x06000000,
120 0x10c, 0x00000001, 0x00010003,
121 0xa02c, 0xffffffff, 0x0000009b,
122 0x913c, 0x0000010f, 0x01000100,
123 0x8c04, 0xf8ff00ff, 0x40600060,
124 0x28350, 0x00000f01, 0x00000000,
125 0x9508, 0x3700001f, 0x00000002,
126 0x960c, 0xffffffff, 0x54763210,
127 0x88c4, 0x001f3ae3, 0x00000082,
128 0x88d0, 0xffffffff, 0x0f40df40,
129 0x88d4, 0x0000001f, 0x00000010,
130 0x8974, 0xffffffff, 0x00000000
131};
132
133static const u32 dvst_golden_registers2[] =
134{
135 0x8f8, 0xffffffff, 0,
136 0x8fc, 0x00380000, 0,
137 0x8f8, 0xffffffff, 1,
138 0x8fc, 0x0e000000, 0
139};
140
141static const u32 dvst_golden_registers[] =
142{
143 0x690, 0x3fff3fff, 0x20c00033,
144 0x918c, 0x0fff0fff, 0x00010006,
145 0x91a8, 0x0fff0fff, 0x00010006,
146 0x9150, 0xffffdfff, 0x6e944040,
147 0x917c, 0x0fff0fff, 0x00030002,
148 0x9198, 0x0fff0fff, 0x00030002,
149 0x915c, 0x0fff0fff, 0x00010000,
150 0x3f90, 0xffff0001, 0xff000000,
151 0x9178, 0x0fff0fff, 0x00070000,
152 0x9194, 0x0fff0fff, 0x00070000,
153 0x9148, 0xffff0001, 0xff000000,
154 0x9190, 0x0fff0fff, 0x00090008,
155 0x91ac, 0x0fff0fff, 0x00090008,
156 0x3f94, 0xffff0000, 0xff000000,
157 0x914c, 0xffff0000, 0xff000000,
158 0x929c, 0x00000fff, 0x00000001,
159 0x55e4, 0xff607fff, 0xfc000100,
160 0x8a18, 0xff000fff, 0x00000100,
161 0x8b28, 0xff000fff, 0x00000100,
162 0x9144, 0xfffc0fff, 0x00000100,
163 0x6ed8, 0x00010101, 0x00010000,
164 0x9830, 0xffffffff, 0x00000000,
165 0x9834, 0xf00fffff, 0x00000400,
166 0x9838, 0xfffffffe, 0x00000000,
167 0xd0c0, 0xff000fff, 0x00000100,
168 0xd02c, 0xbfffff1f, 0x08421000,
169 0xd0b8, 0x73773777, 0x12010001,
170 0x5bb0, 0x000000f0, 0x00000070,
171 0x98f8, 0x73773777, 0x12010001,
172 0x98fc, 0xffffffff, 0x00000010,
173 0x9b7c, 0x00ff0000, 0x00fc0000,
174 0x8030, 0x00001f0f, 0x0000100a,
175 0x2f48, 0x73773777, 0x12010001,
176 0x2408, 0x00030000, 0x000c007f,
177 0x8a14, 0xf000003f, 0x00000007,
178 0x8b24, 0x3fff3fff, 0x00ff0fff,
179 0x8b10, 0x0000ff0f, 0x00000000,
180 0x28a4c, 0x07ffffff, 0x06000000,
181 0x4d8, 0x00000fff, 0x00000100,
182 0xa008, 0xffffffff, 0x00010000,
183 0x913c, 0xffff03ff, 0x01000100,
184 0x8c00, 0x000000ff, 0x00000003,
185 0x8c04, 0xf8ff00ff, 0x40600060,
186 0x8cf0, 0x1fff1fff, 0x08e00410,
187 0x28350, 0x00000f01, 0x00000000,
188 0x9508, 0xf700071f, 0x00000002,
189 0x960c, 0xffffffff, 0x54763210,
190 0x20ef8, 0x01ff01ff, 0x00000002,
191 0x20e98, 0xfffffbff, 0x00200000,
192 0x2015c, 0xffffffff, 0x00000f40,
193 0x88c4, 0x001f3ae3, 0x00000082,
194 0x8978, 0x3fffffff, 0x04050140,
195 0x88d4, 0x0000001f, 0x00000010,
196 0x8974, 0xffffffff, 0x00000000
197};
198
199static const u32 scrapper_golden_registers[] =
200{
201 0x690, 0x3fff3fff, 0x20c00033,
202 0x918c, 0x0fff0fff, 0x00010006,
203 0x918c, 0x0fff0fff, 0x00010006,
204 0x91a8, 0x0fff0fff, 0x00010006,
205 0x91a8, 0x0fff0fff, 0x00010006,
206 0x9150, 0xffffdfff, 0x6e944040,
207 0x9150, 0xffffdfff, 0x6e944040,
208 0x917c, 0x0fff0fff, 0x00030002,
209 0x917c, 0x0fff0fff, 0x00030002,
210 0x9198, 0x0fff0fff, 0x00030002,
211 0x9198, 0x0fff0fff, 0x00030002,
212 0x915c, 0x0fff0fff, 0x00010000,
213 0x915c, 0x0fff0fff, 0x00010000,
214 0x3f90, 0xffff0001, 0xff000000,
215 0x3f90, 0xffff0001, 0xff000000,
216 0x9178, 0x0fff0fff, 0x00070000,
217 0x9178, 0x0fff0fff, 0x00070000,
218 0x9194, 0x0fff0fff, 0x00070000,
219 0x9194, 0x0fff0fff, 0x00070000,
220 0x9148, 0xffff0001, 0xff000000,
221 0x9148, 0xffff0001, 0xff000000,
222 0x9190, 0x0fff0fff, 0x00090008,
223 0x9190, 0x0fff0fff, 0x00090008,
224 0x91ac, 0x0fff0fff, 0x00090008,
225 0x91ac, 0x0fff0fff, 0x00090008,
226 0x3f94, 0xffff0000, 0xff000000,
227 0x3f94, 0xffff0000, 0xff000000,
228 0x914c, 0xffff0000, 0xff000000,
229 0x914c, 0xffff0000, 0xff000000,
230 0x929c, 0x00000fff, 0x00000001,
231 0x929c, 0x00000fff, 0x00000001,
232 0x55e4, 0xff607fff, 0xfc000100,
233 0x8a18, 0xff000fff, 0x00000100,
234 0x8a18, 0xff000fff, 0x00000100,
235 0x8b28, 0xff000fff, 0x00000100,
236 0x8b28, 0xff000fff, 0x00000100,
237 0x9144, 0xfffc0fff, 0x00000100,
238 0x9144, 0xfffc0fff, 0x00000100,
239 0x6ed8, 0x00010101, 0x00010000,
240 0x9830, 0xffffffff, 0x00000000,
241 0x9830, 0xffffffff, 0x00000000,
242 0x9834, 0xf00fffff, 0x00000400,
243 0x9834, 0xf00fffff, 0x00000400,
244 0x9838, 0xfffffffe, 0x00000000,
245 0x9838, 0xfffffffe, 0x00000000,
246 0xd0c0, 0xff000fff, 0x00000100,
247 0xd02c, 0xbfffff1f, 0x08421000,
248 0xd02c, 0xbfffff1f, 0x08421000,
249 0xd0b8, 0x73773777, 0x12010001,
250 0xd0b8, 0x73773777, 0x12010001,
251 0x5bb0, 0x000000f0, 0x00000070,
252 0x98f8, 0x73773777, 0x12010001,
253 0x98f8, 0x73773777, 0x12010001,
254 0x98fc, 0xffffffff, 0x00000010,
255 0x98fc, 0xffffffff, 0x00000010,
256 0x9b7c, 0x00ff0000, 0x00fc0000,
257 0x9b7c, 0x00ff0000, 0x00fc0000,
258 0x8030, 0x00001f0f, 0x0000100a,
259 0x8030, 0x00001f0f, 0x0000100a,
260 0x2f48, 0x73773777, 0x12010001,
261 0x2f48, 0x73773777, 0x12010001,
262 0x2408, 0x00030000, 0x000c007f,
263 0x8a14, 0xf000003f, 0x00000007,
264 0x8a14, 0xf000003f, 0x00000007,
265 0x8b24, 0x3fff3fff, 0x00ff0fff,
266 0x8b24, 0x3fff3fff, 0x00ff0fff,
267 0x8b10, 0x0000ff0f, 0x00000000,
268 0x8b10, 0x0000ff0f, 0x00000000,
269 0x28a4c, 0x07ffffff, 0x06000000,
270 0x28a4c, 0x07ffffff, 0x06000000,
271 0x4d8, 0x00000fff, 0x00000100,
272 0x4d8, 0x00000fff, 0x00000100,
273 0xa008, 0xffffffff, 0x00010000,
274 0xa008, 0xffffffff, 0x00010000,
275 0x913c, 0xffff03ff, 0x01000100,
276 0x913c, 0xffff03ff, 0x01000100,
277 0x90e8, 0x001fffff, 0x010400c0,
278 0x8c00, 0x000000ff, 0x00000003,
279 0x8c00, 0x000000ff, 0x00000003,
280 0x8c04, 0xf8ff00ff, 0x40600060,
281 0x8c04, 0xf8ff00ff, 0x40600060,
282 0x8c30, 0x0000000f, 0x00040005,
283 0x8cf0, 0x1fff1fff, 0x08e00410,
284 0x8cf0, 0x1fff1fff, 0x08e00410,
285 0x900c, 0x00ffffff, 0x0017071f,
286 0x28350, 0x00000f01, 0x00000000,
287 0x28350, 0x00000f01, 0x00000000,
288 0x9508, 0xf700071f, 0x00000002,
289 0x9508, 0xf700071f, 0x00000002,
290 0x9688, 0x00300000, 0x0017000f,
291 0x960c, 0xffffffff, 0x54763210,
292 0x960c, 0xffffffff, 0x54763210,
293 0x20ef8, 0x01ff01ff, 0x00000002,
294 0x20e98, 0xfffffbff, 0x00200000,
295 0x2015c, 0xffffffff, 0x00000f40,
296 0x88c4, 0x001f3ae3, 0x00000082,
297 0x88c4, 0x001f3ae3, 0x00000082,
298 0x8978, 0x3fffffff, 0x04050140,
299 0x8978, 0x3fffffff, 0x04050140,
300 0x88d4, 0x0000001f, 0x00000010,
301 0x88d4, 0x0000001f, 0x00000010,
302 0x8974, 0xffffffff, 0x00000000,
303 0x8974, 0xffffffff, 0x00000000
304};
305
306static void ni_init_golden_registers(struct radeon_device *rdev)
307{
308 switch (rdev->family) {
309 case CHIP_CAYMAN:
310 radeon_program_register_sequence(rdev,
311 cayman_golden_registers,
312 (const u32)ARRAY_SIZE(cayman_golden_registers));
313 radeon_program_register_sequence(rdev,
314 cayman_golden_registers2,
315 (const u32)ARRAY_SIZE(cayman_golden_registers2));
316 break;
317 case CHIP_ARUBA:
318 if ((rdev->pdev->device == 0x9900) ||
319 (rdev->pdev->device == 0x9901) ||
320 (rdev->pdev->device == 0x9903) ||
321 (rdev->pdev->device == 0x9904) ||
322 (rdev->pdev->device == 0x9905) ||
323 (rdev->pdev->device == 0x9906) ||
324 (rdev->pdev->device == 0x9907) ||
325 (rdev->pdev->device == 0x9908) ||
326 (rdev->pdev->device == 0x9909) ||
327 (rdev->pdev->device == 0x990A) ||
328 (rdev->pdev->device == 0x990B) ||
329 (rdev->pdev->device == 0x990C) ||
330 (rdev->pdev->device == 0x990D) ||
331 (rdev->pdev->device == 0x990E) ||
332 (rdev->pdev->device == 0x990F) ||
333 (rdev->pdev->device == 0x9910) ||
334 (rdev->pdev->device == 0x9913) ||
335 (rdev->pdev->device == 0x9917) ||
336 (rdev->pdev->device == 0x9918)) {
337 radeon_program_register_sequence(rdev,
338 dvst_golden_registers,
339 (const u32)ARRAY_SIZE(dvst_golden_registers));
340 radeon_program_register_sequence(rdev,
341 dvst_golden_registers2,
342 (const u32)ARRAY_SIZE(dvst_golden_registers2));
343 } else {
344 radeon_program_register_sequence(rdev,
345 scrapper_golden_registers,
346 (const u32)ARRAY_SIZE(scrapper_golden_registers));
347 radeon_program_register_sequence(rdev,
348 dvst_golden_registers2,
349 (const u32)ARRAY_SIZE(dvst_golden_registers2));
350 }
351 break;
352 default:
353 break;
354 }
355}
356
81#define BTC_IO_MC_REGS_SIZE 29 357#define BTC_IO_MC_REGS_SIZE 29
82 358
83static const u32 barts_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = { 359static const u32 barts_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
@@ -473,7 +749,8 @@ static void cayman_gpu_init(struct radeon_device *rdev)
473 (rdev->pdev->device == 0x990F) || 749 (rdev->pdev->device == 0x990F) ||
474 (rdev->pdev->device == 0x9910) || 750 (rdev->pdev->device == 0x9910) ||
475 (rdev->pdev->device == 0x9917) || 751 (rdev->pdev->device == 0x9917) ||
476 (rdev->pdev->device == 0x9999)) { 752 (rdev->pdev->device == 0x9999) ||
753 (rdev->pdev->device == 0x999C)) {
477 rdev->config.cayman.max_simds_per_se = 6; 754 rdev->config.cayman.max_simds_per_se = 6;
478 rdev->config.cayman.max_backends_per_se = 2; 755 rdev->config.cayman.max_backends_per_se = 2;
479 } else if ((rdev->pdev->device == 0x9903) || 756 } else if ((rdev->pdev->device == 0x9903) ||
@@ -482,7 +759,8 @@ static void cayman_gpu_init(struct radeon_device *rdev)
482 (rdev->pdev->device == 0x990D) || 759 (rdev->pdev->device == 0x990D) ||
483 (rdev->pdev->device == 0x990E) || 760 (rdev->pdev->device == 0x990E) ||
484 (rdev->pdev->device == 0x9913) || 761 (rdev->pdev->device == 0x9913) ||
485 (rdev->pdev->device == 0x9918)) { 762 (rdev->pdev->device == 0x9918) ||
763 (rdev->pdev->device == 0x999D)) {
486 rdev->config.cayman.max_simds_per_se = 4; 764 rdev->config.cayman.max_simds_per_se = 4;
487 rdev->config.cayman.max_backends_per_se = 2; 765 rdev->config.cayman.max_backends_per_se = 2;
488 } else if ((rdev->pdev->device == 0x9919) || 766 } else if ((rdev->pdev->device == 0x9919) ||
@@ -615,15 +893,28 @@ static void cayman_gpu_init(struct radeon_device *rdev)
615 } 893 }
616 /* enabled rb are just the one not disabled :) */ 894 /* enabled rb are just the one not disabled :) */
617 disabled_rb_mask = tmp; 895 disabled_rb_mask = tmp;
896 tmp = 0;
897 for (i = 0; i < (rdev->config.cayman.max_backends_per_se * rdev->config.cayman.max_shader_engines); i++)
898 tmp |= (1 << i);
899 /* if all the backends are disabled, fix it up here */
900 if ((disabled_rb_mask & tmp) == tmp) {
901 for (i = 0; i < (rdev->config.cayman.max_backends_per_se * rdev->config.cayman.max_shader_engines); i++)
902 disabled_rb_mask &= ~(1 << i);
903 }
618 904
619 WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES); 905 WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
620 WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES); 906 WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
621 907
622 WREG32(GB_ADDR_CONFIG, gb_addr_config); 908 WREG32(GB_ADDR_CONFIG, gb_addr_config);
623 WREG32(DMIF_ADDR_CONFIG, gb_addr_config); 909 WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
910 if (ASIC_IS_DCE6(rdev))
911 WREG32(DMIF_ADDR_CALC, gb_addr_config);
624 WREG32(HDP_ADDR_CONFIG, gb_addr_config); 912 WREG32(HDP_ADDR_CONFIG, gb_addr_config);
625 WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config); 913 WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config);
626 WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config); 914 WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config);
915 WREG32(UVD_UDEC_ADDR_CONFIG, gb_addr_config);
916 WREG32(UVD_UDEC_DB_ADDR_CONFIG, gb_addr_config);
917 WREG32(UVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config);
627 918
628 if ((rdev->config.cayman.max_backends_per_se == 1) && 919 if ((rdev->config.cayman.max_backends_per_se == 1) &&
629 (rdev->flags & RADEON_IS_IGP)) { 920 (rdev->flags & RADEON_IS_IGP)) {
@@ -931,6 +1222,23 @@ void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
931 radeon_ring_write(ring, 10); /* poll interval */ 1222 radeon_ring_write(ring, 10); /* poll interval */
932} 1223}
933 1224
1225void cayman_uvd_semaphore_emit(struct radeon_device *rdev,
1226 struct radeon_ring *ring,
1227 struct radeon_semaphore *semaphore,
1228 bool emit_wait)
1229{
1230 uint64_t addr = semaphore->gpu_addr;
1231
1232 radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_LOW, 0));
1233 radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF);
1234
1235 radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_HIGH, 0));
1236 radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF);
1237
1238 radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0));
1239 radeon_ring_write(ring, 0x80 | (emit_wait ? 1 : 0));
1240}
1241
934static void cayman_cp_enable(struct radeon_device *rdev, bool enable) 1242static void cayman_cp_enable(struct radeon_device *rdev, bool enable)
935{ 1243{
936 if (enable) 1244 if (enable)
@@ -1682,6 +1990,16 @@ static int cayman_startup(struct radeon_device *rdev)
1682 return r; 1990 return r;
1683 } 1991 }
1684 1992
1993 r = rv770_uvd_resume(rdev);
1994 if (!r) {
1995 r = radeon_fence_driver_start_ring(rdev,
1996 R600_RING_TYPE_UVD_INDEX);
1997 if (r)
1998 dev_err(rdev->dev, "UVD fences init error (%d).\n", r);
1999 }
2000 if (r)
2001 rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
2002
1685 r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP1_INDEX); 2003 r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
1686 if (r) { 2004 if (r) {
1687 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r); 2005 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
@@ -1748,6 +2066,18 @@ static int cayman_startup(struct radeon_device *rdev)
1748 if (r) 2066 if (r)
1749 return r; 2067 return r;
1750 2068
2069 ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
2070 if (ring->ring_size) {
2071 r = radeon_ring_init(rdev, ring, ring->ring_size,
2072 R600_WB_UVD_RPTR_OFFSET,
2073 UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR,
2074 0, 0xfffff, RADEON_CP_PACKET2);
2075 if (!r)
2076 r = r600_uvd_init(rdev);
2077 if (r)
2078 DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
2079 }
2080
1751 r = radeon_ib_pool_init(rdev); 2081 r = radeon_ib_pool_init(rdev);
1752 if (r) { 2082 if (r) {
1753 dev_err(rdev->dev, "IB initialization failed (%d).\n", r); 2083 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
@@ -1778,6 +2108,9 @@ int cayman_resume(struct radeon_device *rdev)
1778 /* post card */ 2108 /* post card */
1779 atom_asic_init(rdev->mode_info.atom_context); 2109 atom_asic_init(rdev->mode_info.atom_context);
1780 2110
2111 /* init golden registers */
2112 ni_init_golden_registers(rdev);
2113
1781 rdev->accel_working = true; 2114 rdev->accel_working = true;
1782 r = cayman_startup(rdev); 2115 r = cayman_startup(rdev);
1783 if (r) { 2116 if (r) {
@@ -1794,6 +2127,8 @@ int cayman_suspend(struct radeon_device *rdev)
1794 radeon_vm_manager_fini(rdev); 2127 radeon_vm_manager_fini(rdev);
1795 cayman_cp_enable(rdev, false); 2128 cayman_cp_enable(rdev, false);
1796 cayman_dma_stop(rdev); 2129 cayman_dma_stop(rdev);
2130 r600_uvd_rbc_stop(rdev);
2131 radeon_uvd_suspend(rdev);
1797 evergreen_irq_suspend(rdev); 2132 evergreen_irq_suspend(rdev);
1798 radeon_wb_disable(rdev); 2133 radeon_wb_disable(rdev);
1799 cayman_pcie_gart_disable(rdev); 2134 cayman_pcie_gart_disable(rdev);
@@ -1834,6 +2169,8 @@ int cayman_init(struct radeon_device *rdev)
1834 DRM_INFO("GPU not posted. posting now...\n"); 2169 DRM_INFO("GPU not posted. posting now...\n");
1835 atom_asic_init(rdev->mode_info.atom_context); 2170 atom_asic_init(rdev->mode_info.atom_context);
1836 } 2171 }
2172 /* init golden registers */
2173 ni_init_golden_registers(rdev);
1837 /* Initialize scratch registers */ 2174 /* Initialize scratch registers */
1838 r600_scratch_init(rdev); 2175 r600_scratch_init(rdev);
1839 /* Initialize surface registers */ 2176 /* Initialize surface registers */
@@ -1868,6 +2205,13 @@ int cayman_init(struct radeon_device *rdev)
1868 ring->ring_obj = NULL; 2205 ring->ring_obj = NULL;
1869 r600_ring_init(rdev, ring, 64 * 1024); 2206 r600_ring_init(rdev, ring, 64 * 1024);
1870 2207
2208 r = radeon_uvd_init(rdev);
2209 if (!r) {
2210 ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
2211 ring->ring_obj = NULL;
2212 r600_ring_init(rdev, ring, 4096);
2213 }
2214
1871 rdev->ih.ring_obj = NULL; 2215 rdev->ih.ring_obj = NULL;
1872 r600_ih_ring_init(rdev, 64 * 1024); 2216 r600_ih_ring_init(rdev, 64 * 1024);
1873 2217
@@ -1919,6 +2263,7 @@ void cayman_fini(struct radeon_device *rdev)
1919 radeon_vm_manager_fini(rdev); 2263 radeon_vm_manager_fini(rdev);
1920 radeon_ib_pool_fini(rdev); 2264 radeon_ib_pool_fini(rdev);
1921 radeon_irq_kms_fini(rdev); 2265 radeon_irq_kms_fini(rdev);
2266 radeon_uvd_fini(rdev);
1922 cayman_pcie_gart_fini(rdev); 2267 cayman_pcie_gart_fini(rdev);
1923 r600_vram_scratch_fini(rdev); 2268 r600_vram_scratch_fini(rdev);
1924 radeon_gem_fini(rdev); 2269 radeon_gem_fini(rdev);
@@ -2017,28 +2362,57 @@ void cayman_vm_set_page(struct radeon_device *rdev,
2017 } 2362 }
2018 } 2363 }
2019 } else { 2364 } else {
2020 while (count) { 2365 if ((flags & RADEON_VM_PAGE_SYSTEM) ||
2021 ndw = count * 2; 2366 (count == 1)) {
2022 if (ndw > 0xFFFFE) 2367 while (count) {
2023 ndw = 0xFFFFE; 2368 ndw = count * 2;
2369 if (ndw > 0xFFFFE)
2370 ndw = 0xFFFFE;
2371
2372 /* for non-physically contiguous pages (system) */
2373 ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, ndw);
2374 ib->ptr[ib->length_dw++] = pe;
2375 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
2376 for (; ndw > 0; ndw -= 2, --count, pe += 8) {
2377 if (flags & RADEON_VM_PAGE_SYSTEM) {
2378 value = radeon_vm_map_gart(rdev, addr);
2379 value &= 0xFFFFFFFFFFFFF000ULL;
2380 } else if (flags & RADEON_VM_PAGE_VALID) {
2381 value = addr;
2382 } else {
2383 value = 0;
2384 }
2385 addr += incr;
2386 value |= r600_flags;
2387 ib->ptr[ib->length_dw++] = value;
2388 ib->ptr[ib->length_dw++] = upper_32_bits(value);
2389 }
2390 }
2391 while (ib->length_dw & 0x7)
2392 ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0);
2393 } else {
2394 while (count) {
2395 ndw = count * 2;
2396 if (ndw > 0xFFFFE)
2397 ndw = 0xFFFFE;
2024 2398
2025 /* for non-physically contiguous pages (system) */ 2399 if (flags & RADEON_VM_PAGE_VALID)
2026 ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, ndw);
2027 ib->ptr[ib->length_dw++] = pe;
2028 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
2029 for (; ndw > 0; ndw -= 2, --count, pe += 8) {
2030 if (flags & RADEON_VM_PAGE_SYSTEM) {
2031 value = radeon_vm_map_gart(rdev, addr);
2032 value &= 0xFFFFFFFFFFFFF000ULL;
2033 } else if (flags & RADEON_VM_PAGE_VALID) {
2034 value = addr; 2400 value = addr;
2035 } else { 2401 else
2036 value = 0; 2402 value = 0;
2037 } 2403 /* for physically contiguous pages (vram) */
2038 addr += incr; 2404 ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
2039 value |= r600_flags; 2405 ib->ptr[ib->length_dw++] = pe; /* dst addr */
2040 ib->ptr[ib->length_dw++] = value; 2406 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
2407 ib->ptr[ib->length_dw++] = r600_flags; /* mask */
2408 ib->ptr[ib->length_dw++] = 0;
2409 ib->ptr[ib->length_dw++] = value; /* value */
2041 ib->ptr[ib->length_dw++] = upper_32_bits(value); 2410 ib->ptr[ib->length_dw++] = upper_32_bits(value);
2411 ib->ptr[ib->length_dw++] = incr; /* increment size */
2412 ib->ptr[ib->length_dw++] = 0;
2413 pe += ndw * 4;
2414 addr += (ndw / 2) * incr;
2415 count -= ndw / 2;
2042 } 2416 }
2043 } 2417 }
2044 while (ib->length_dw & 0x7) 2418 while (ib->length_dw & 0x7)
diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h
index 079dee202a9e..e226faf16fea 100644
--- a/drivers/gpu/drm/radeon/nid.h
+++ b/drivers/gpu/drm/radeon/nid.h
@@ -45,6 +45,10 @@
45#define ARUBA_GB_ADDR_CONFIG_GOLDEN 0x12010001 45#define ARUBA_GB_ADDR_CONFIG_GOLDEN 0x12010001
46 46
47#define DMIF_ADDR_CONFIG 0xBD4 47#define DMIF_ADDR_CONFIG 0xBD4
48
49/* DCE6 only */
50#define DMIF_ADDR_CALC 0xC00
51
48#define SRBM_GFX_CNTL 0x0E44 52#define SRBM_GFX_CNTL 0x0E44
49#define RINGID(x) (((x) & 0x3) << 0) 53#define RINGID(x) (((x) & 0x3) << 0)
50#define VMID(x) (((x) & 0x7) << 0) 54#define VMID(x) (((x) & 0x7) << 0)
@@ -486,6 +490,18 @@
486# define CACHE_FLUSH_AND_INV_EVENT (0x16 << 0) 490# define CACHE_FLUSH_AND_INV_EVENT (0x16 << 0)
487 491
488/* 492/*
493 * UVD
494 */
495#define UVD_SEMA_ADDR_LOW 0xEF00
496#define UVD_SEMA_ADDR_HIGH 0xEF04
497#define UVD_SEMA_CMD 0xEF08
498#define UVD_UDEC_ADDR_CONFIG 0xEF4C
499#define UVD_UDEC_DB_ADDR_CONFIG 0xEF50
500#define UVD_UDEC_DBW_ADDR_CONFIG 0xEF54
501#define UVD_RBC_RB_RPTR 0xF690
502#define UVD_RBC_RB_WPTR 0xF694
503
504/*
489 * PM4 505 * PM4
490 */ 506 */
491#define PACKET0(reg, n) ((RADEON_PACKET_TYPE0 << 30) | \ 507#define PACKET0(reg, n) ((RADEON_PACKET_TYPE0 << 30) | \
@@ -668,6 +684,11 @@
668 (((vmid) & 0xF) << 20) | \ 684 (((vmid) & 0xF) << 20) | \
669 (((n) & 0xFFFFF) << 0)) 685 (((n) & 0xFFFFF) << 0))
670 686
687#define DMA_PTE_PDE_PACKET(n) ((2 << 28) | \
688 (1 << 26) | \
689 (1 << 21) | \
690 (((n) & 0xFFFFF) << 0))
691
671/* async DMA Packet types */ 692/* async DMA Packet types */
672#define DMA_PACKET_WRITE 0x2 693#define DMA_PACKET_WRITE 0x2
673#define DMA_PACKET_COPY 0x3 694#define DMA_PACKET_COPY 0x3
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 9db58530be37..4973bff37fec 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -69,6 +69,38 @@ MODULE_FIRMWARE(FIRMWARE_R520);
69 * and others in some cases. 69 * and others in some cases.
70 */ 70 */
71 71
72static bool r100_is_in_vblank(struct radeon_device *rdev, int crtc)
73{
74 if (crtc == 0) {
75 if (RREG32(RADEON_CRTC_STATUS) & RADEON_CRTC_VBLANK_CUR)
76 return true;
77 else
78 return false;
79 } else {
80 if (RREG32(RADEON_CRTC2_STATUS) & RADEON_CRTC2_VBLANK_CUR)
81 return true;
82 else
83 return false;
84 }
85}
86
87static bool r100_is_counter_moving(struct radeon_device *rdev, int crtc)
88{
89 u32 vline1, vline2;
90
91 if (crtc == 0) {
92 vline1 = (RREG32(RADEON_CRTC_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL;
93 vline2 = (RREG32(RADEON_CRTC_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL;
94 } else {
95 vline1 = (RREG32(RADEON_CRTC2_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL;
96 vline2 = (RREG32(RADEON_CRTC2_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL;
97 }
98 if (vline1 != vline2)
99 return true;
100 else
101 return false;
102}
103
72/** 104/**
73 * r100_wait_for_vblank - vblank wait asic callback. 105 * r100_wait_for_vblank - vblank wait asic callback.
74 * 106 *
@@ -79,36 +111,33 @@ MODULE_FIRMWARE(FIRMWARE_R520);
79 */ 111 */
80void r100_wait_for_vblank(struct radeon_device *rdev, int crtc) 112void r100_wait_for_vblank(struct radeon_device *rdev, int crtc)
81{ 113{
82 int i; 114 unsigned i = 0;
83 115
84 if (crtc >= rdev->num_crtc) 116 if (crtc >= rdev->num_crtc)
85 return; 117 return;
86 118
87 if (crtc == 0) { 119 if (crtc == 0) {
88 if (RREG32(RADEON_CRTC_GEN_CNTL) & RADEON_CRTC_EN) { 120 if (!(RREG32(RADEON_CRTC_GEN_CNTL) & RADEON_CRTC_EN))
89 for (i = 0; i < rdev->usec_timeout; i++) { 121 return;
90 if (!(RREG32(RADEON_CRTC_STATUS) & RADEON_CRTC_VBLANK_CUR))
91 break;
92 udelay(1);
93 }
94 for (i = 0; i < rdev->usec_timeout; i++) {
95 if (RREG32(RADEON_CRTC_STATUS) & RADEON_CRTC_VBLANK_CUR)
96 break;
97 udelay(1);
98 }
99 }
100 } else { 122 } else {
101 if (RREG32(RADEON_CRTC2_GEN_CNTL) & RADEON_CRTC2_EN) { 123 if (!(RREG32(RADEON_CRTC2_GEN_CNTL) & RADEON_CRTC2_EN))
102 for (i = 0; i < rdev->usec_timeout; i++) { 124 return;
103 if (!(RREG32(RADEON_CRTC2_STATUS) & RADEON_CRTC2_VBLANK_CUR)) 125 }
104 break; 126
105 udelay(1); 127 /* depending on when we hit vblank, we may be close to active; if so,
106 } 128 * wait for another frame.
107 for (i = 0; i < rdev->usec_timeout; i++) { 129 */
108 if (RREG32(RADEON_CRTC2_STATUS) & RADEON_CRTC2_VBLANK_CUR) 130 while (r100_is_in_vblank(rdev, crtc)) {
109 break; 131 if (i++ % 100 == 0) {
110 udelay(1); 132 if (!r100_is_counter_moving(rdev, crtc))
111 } 133 break;
134 }
135 }
136
137 while (!r100_is_in_vblank(rdev, crtc)) {
138 if (i++ % 100 == 0) {
139 if (!r100_is_counter_moving(rdev, crtc))
140 break;
112 } 141 }
113 } 142 }
114} 143}
diff --git a/drivers/gpu/drm/radeon/r300_cmdbuf.c b/drivers/gpu/drm/radeon/r300_cmdbuf.c
index 865e2c9980db..60170ea5e3a2 100644
--- a/drivers/gpu/drm/radeon/r300_cmdbuf.c
+++ b/drivers/gpu/drm/radeon/r300_cmdbuf.c
@@ -75,7 +75,7 @@ static int r300_emit_cliprects(drm_radeon_private_t *dev_priv,
75 OUT_RING(CP_PACKET0(R300_RE_CLIPRECT_TL_0, nr * 2 - 1)); 75 OUT_RING(CP_PACKET0(R300_RE_CLIPRECT_TL_0, nr * 2 - 1));
76 76
77 for (i = 0; i < nr; ++i) { 77 for (i = 0; i < nr; ++i) {
78 if (DRM_COPY_FROM_USER_UNCHECKED 78 if (DRM_COPY_FROM_USER
79 (&box, &cmdbuf->boxes[n + i], sizeof(box))) { 79 (&box, &cmdbuf->boxes[n + i], sizeof(box))) {
80 DRM_ERROR("copy cliprect faulted\n"); 80 DRM_ERROR("copy cliprect faulted\n");
81 return -EFAULT; 81 return -EFAULT;
diff --git a/drivers/gpu/drm/radeon/r500_reg.h b/drivers/gpu/drm/radeon/r500_reg.h
index c0dc8d3ba0bb..1dd0d32993d5 100644
--- a/drivers/gpu/drm/radeon/r500_reg.h
+++ b/drivers/gpu/drm/radeon/r500_reg.h
@@ -358,7 +358,9 @@
358#define AVIVO_D1CRTC_STATUS_HV_COUNT 0x60ac 358#define AVIVO_D1CRTC_STATUS_HV_COUNT 0x60ac
359#define AVIVO_D1CRTC_STEREO_CONTROL 0x60c4 359#define AVIVO_D1CRTC_STEREO_CONTROL 0x60c4
360 360
361#define AVIVO_D1MODE_MASTER_UPDATE_LOCK 0x60e0
361#define AVIVO_D1MODE_MASTER_UPDATE_MODE 0x60e4 362#define AVIVO_D1MODE_MASTER_UPDATE_MODE 0x60e4
363#define AVIVO_D1CRTC_UPDATE_LOCK 0x60e8
362 364
363/* master controls */ 365/* master controls */
364#define AVIVO_DC_CRTC_MASTER_EN 0x60f8 366#define AVIVO_DC_CRTC_MASTER_EN 0x60f8
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 0740db3fcd22..1a08008c978b 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -1145,7 +1145,7 @@ static void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc
1145 } 1145 }
1146 if (rdev->flags & RADEON_IS_AGP) { 1146 if (rdev->flags & RADEON_IS_AGP) {
1147 size_bf = mc->gtt_start; 1147 size_bf = mc->gtt_start;
1148 size_af = 0xFFFFFFFF - mc->gtt_end; 1148 size_af = mc->mc_mask - mc->gtt_end;
1149 if (size_bf > size_af) { 1149 if (size_bf > size_af) {
1150 if (mc->mc_vram_size > size_bf) { 1150 if (mc->mc_vram_size > size_bf) {
1151 dev_warn(rdev->dev, "limiting VRAM\n"); 1151 dev_warn(rdev->dev, "limiting VRAM\n");
@@ -2552,6 +2552,193 @@ void r600_dma_fini(struct radeon_device *rdev)
2552} 2552}
2553 2553
2554/* 2554/*
2555 * UVD
2556 */
2557int r600_uvd_rbc_start(struct radeon_device *rdev)
2558{
2559 struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
2560 uint64_t rptr_addr;
2561 uint32_t rb_bufsz, tmp;
2562 int r;
2563
2564 rptr_addr = rdev->wb.gpu_addr + R600_WB_UVD_RPTR_OFFSET;
2565
2566 if (upper_32_bits(rptr_addr) != upper_32_bits(ring->gpu_addr)) {
2567 DRM_ERROR("UVD ring and rptr not in the same 4GB segment!\n");
2568 return -EINVAL;
2569 }
2570
2571 /* force RBC into idle state */
2572 WREG32(UVD_RBC_RB_CNTL, 0x11010101);
2573
2574 /* Set the write pointer delay */
2575 WREG32(UVD_RBC_RB_WPTR_CNTL, 0);
2576
2577 /* set the wb address */
2578 WREG32(UVD_RBC_RB_RPTR_ADDR, rptr_addr >> 2);
2579
2580 /* programm the 4GB memory segment for rptr and ring buffer */
2581 WREG32(UVD_LMI_EXT40_ADDR, upper_32_bits(rptr_addr) |
2582 (0x7 << 16) | (0x1 << 31));
2583
2584 /* Initialize the ring buffer's read and write pointers */
2585 WREG32(UVD_RBC_RB_RPTR, 0x0);
2586
2587 ring->wptr = ring->rptr = RREG32(UVD_RBC_RB_RPTR);
2588 WREG32(UVD_RBC_RB_WPTR, ring->wptr);
2589
2590 /* set the ring address */
2591 WREG32(UVD_RBC_RB_BASE, ring->gpu_addr);
2592
2593 /* Set ring buffer size */
2594 rb_bufsz = drm_order(ring->ring_size);
2595 rb_bufsz = (0x1 << 8) | rb_bufsz;
2596 WREG32(UVD_RBC_RB_CNTL, rb_bufsz);
2597
2598 ring->ready = true;
2599 r = radeon_ring_test(rdev, R600_RING_TYPE_UVD_INDEX, ring);
2600 if (r) {
2601 ring->ready = false;
2602 return r;
2603 }
2604
2605 r = radeon_ring_lock(rdev, ring, 10);
2606 if (r) {
2607 DRM_ERROR("radeon: ring failed to lock UVD ring (%d).\n", r);
2608 return r;
2609 }
2610
2611 tmp = PACKET0(UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0);
2612 radeon_ring_write(ring, tmp);
2613 radeon_ring_write(ring, 0xFFFFF);
2614
2615 tmp = PACKET0(UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0);
2616 radeon_ring_write(ring, tmp);
2617 radeon_ring_write(ring, 0xFFFFF);
2618
2619 tmp = PACKET0(UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0);
2620 radeon_ring_write(ring, tmp);
2621 radeon_ring_write(ring, 0xFFFFF);
2622
2623 /* Clear timeout status bits */
2624 radeon_ring_write(ring, PACKET0(UVD_SEMA_TIMEOUT_STATUS, 0));
2625 radeon_ring_write(ring, 0x8);
2626
2627 radeon_ring_write(ring, PACKET0(UVD_SEMA_CNTL, 0));
2628 radeon_ring_write(ring, 3);
2629
2630 radeon_ring_unlock_commit(rdev, ring);
2631
2632 return 0;
2633}
2634
2635void r600_uvd_rbc_stop(struct radeon_device *rdev)
2636{
2637 struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
2638
2639 /* force RBC into idle state */
2640 WREG32(UVD_RBC_RB_CNTL, 0x11010101);
2641 ring->ready = false;
2642}
2643
2644int r600_uvd_init(struct radeon_device *rdev)
2645{
2646 int i, j, r;
2647
2648 /* raise clocks while booting up the VCPU */
2649 radeon_set_uvd_clocks(rdev, 53300, 40000);
2650
2651 /* disable clock gating */
2652 WREG32(UVD_CGC_GATE, 0);
2653
2654 /* disable interupt */
2655 WREG32_P(UVD_MASTINT_EN, 0, ~(1 << 1));
2656
2657 /* put LMI, VCPU, RBC etc... into reset */
2658 WREG32(UVD_SOFT_RESET, LMI_SOFT_RESET | VCPU_SOFT_RESET |
2659 LBSI_SOFT_RESET | RBC_SOFT_RESET | CSM_SOFT_RESET |
2660 CXW_SOFT_RESET | TAP_SOFT_RESET | LMI_UMC_SOFT_RESET);
2661 mdelay(5);
2662
2663 /* take UVD block out of reset */
2664 WREG32_P(SRBM_SOFT_RESET, 0, ~SOFT_RESET_UVD);
2665 mdelay(5);
2666
2667 /* initialize UVD memory controller */
2668 WREG32(UVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) |
2669 (1 << 21) | (1 << 9) | (1 << 20));
2670
2671 /* disable byte swapping */
2672 WREG32(UVD_LMI_SWAP_CNTL, 0);
2673 WREG32(UVD_MP_SWAP_CNTL, 0);
2674
2675 WREG32(UVD_MPC_SET_MUXA0, 0x40c2040);
2676 WREG32(UVD_MPC_SET_MUXA1, 0x0);
2677 WREG32(UVD_MPC_SET_MUXB0, 0x40c2040);
2678 WREG32(UVD_MPC_SET_MUXB1, 0x0);
2679 WREG32(UVD_MPC_SET_ALU, 0);
2680 WREG32(UVD_MPC_SET_MUX, 0x88);
2681
2682 /* Stall UMC */
2683 WREG32_P(UVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
2684 WREG32_P(UVD_RB_ARB_CTRL, 1 << 3, ~(1 << 3));
2685
2686 /* take all subblocks out of reset, except VCPU */
2687 WREG32(UVD_SOFT_RESET, VCPU_SOFT_RESET);
2688 mdelay(5);
2689
2690 /* enable VCPU clock */
2691 WREG32(UVD_VCPU_CNTL, 1 << 9);
2692
2693 /* enable UMC */
2694 WREG32_P(UVD_LMI_CTRL2, 0, ~(1 << 8));
2695
2696 /* boot up the VCPU */
2697 WREG32(UVD_SOFT_RESET, 0);
2698 mdelay(10);
2699
2700 WREG32_P(UVD_RB_ARB_CTRL, 0, ~(1 << 3));
2701
2702 for (i = 0; i < 10; ++i) {
2703 uint32_t status;
2704 for (j = 0; j < 100; ++j) {
2705 status = RREG32(UVD_STATUS);
2706 if (status & 2)
2707 break;
2708 mdelay(10);
2709 }
2710 r = 0;
2711 if (status & 2)
2712 break;
2713
2714 DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
2715 WREG32_P(UVD_SOFT_RESET, VCPU_SOFT_RESET, ~VCPU_SOFT_RESET);
2716 mdelay(10);
2717 WREG32_P(UVD_SOFT_RESET, 0, ~VCPU_SOFT_RESET);
2718 mdelay(10);
2719 r = -1;
2720 }
2721
2722 if (r) {
2723 DRM_ERROR("UVD not responding, giving up!!!\n");
2724 radeon_set_uvd_clocks(rdev, 0, 0);
2725 return r;
2726 }
2727
2728 /* enable interupt */
2729 WREG32_P(UVD_MASTINT_EN, 3<<1, ~(3 << 1));
2730
2731 r = r600_uvd_rbc_start(rdev);
2732 if (!r)
2733 DRM_INFO("UVD initialized successfully.\n");
2734
2735 /* lower clocks again */
2736 radeon_set_uvd_clocks(rdev, 0, 0);
2737
2738 return r;
2739}
2740
2741/*
2555 * GPU scratch registers helpers function. 2742 * GPU scratch registers helpers function.
2556 */ 2743 */
2557void r600_scratch_init(struct radeon_device *rdev) 2744void r600_scratch_init(struct radeon_device *rdev)
@@ -2660,6 +2847,40 @@ int r600_dma_ring_test(struct radeon_device *rdev,
2660 return r; 2847 return r;
2661} 2848}
2662 2849
2850int r600_uvd_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
2851{
2852 uint32_t tmp = 0;
2853 unsigned i;
2854 int r;
2855
2856 WREG32(UVD_CONTEXT_ID, 0xCAFEDEAD);
2857 r = radeon_ring_lock(rdev, ring, 3);
2858 if (r) {
2859 DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n",
2860 ring->idx, r);
2861 return r;
2862 }
2863 radeon_ring_write(ring, PACKET0(UVD_CONTEXT_ID, 0));
2864 radeon_ring_write(ring, 0xDEADBEEF);
2865 radeon_ring_unlock_commit(rdev, ring);
2866 for (i = 0; i < rdev->usec_timeout; i++) {
2867 tmp = RREG32(UVD_CONTEXT_ID);
2868 if (tmp == 0xDEADBEEF)
2869 break;
2870 DRM_UDELAY(1);
2871 }
2872
2873 if (i < rdev->usec_timeout) {
2874 DRM_INFO("ring test on %d succeeded in %d usecs\n",
2875 ring->idx, i);
2876 } else {
2877 DRM_ERROR("radeon: ring %d test failed (0x%08X)\n",
2878 ring->idx, tmp);
2879 r = -EINVAL;
2880 }
2881 return r;
2882}
2883
2663/* 2884/*
2664 * CP fences/semaphores 2885 * CP fences/semaphores
2665 */ 2886 */
@@ -2711,6 +2932,30 @@ void r600_fence_ring_emit(struct radeon_device *rdev,
2711 } 2932 }
2712} 2933}
2713 2934
2935void r600_uvd_fence_emit(struct radeon_device *rdev,
2936 struct radeon_fence *fence)
2937{
2938 struct radeon_ring *ring = &rdev->ring[fence->ring];
2939 uint32_t addr = rdev->fence_drv[fence->ring].gpu_addr;
2940
2941 radeon_ring_write(ring, PACKET0(UVD_CONTEXT_ID, 0));
2942 radeon_ring_write(ring, fence->seq);
2943 radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA0, 0));
2944 radeon_ring_write(ring, addr & 0xffffffff);
2945 radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA1, 0));
2946 radeon_ring_write(ring, upper_32_bits(addr) & 0xff);
2947 radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_CMD, 0));
2948 radeon_ring_write(ring, 0);
2949
2950 radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA0, 0));
2951 radeon_ring_write(ring, 0);
2952 radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA1, 0));
2953 radeon_ring_write(ring, 0);
2954 radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_CMD, 0));
2955 radeon_ring_write(ring, 2);
2956 return;
2957}
2958
2714void r600_semaphore_ring_emit(struct radeon_device *rdev, 2959void r600_semaphore_ring_emit(struct radeon_device *rdev,
2715 struct radeon_ring *ring, 2960 struct radeon_ring *ring,
2716 struct radeon_semaphore *semaphore, 2961 struct radeon_semaphore *semaphore,
@@ -2780,6 +3025,23 @@ void r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
2780 radeon_ring_write(ring, upper_32_bits(addr) & 0xff); 3025 radeon_ring_write(ring, upper_32_bits(addr) & 0xff);
2781} 3026}
2782 3027
3028void r600_uvd_semaphore_emit(struct radeon_device *rdev,
3029 struct radeon_ring *ring,
3030 struct radeon_semaphore *semaphore,
3031 bool emit_wait)
3032{
3033 uint64_t addr = semaphore->gpu_addr;
3034
3035 radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_LOW, 0));
3036 radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF);
3037
3038 radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_HIGH, 0));
3039 radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF);
3040
3041 radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0));
3042 radeon_ring_write(ring, emit_wait ? 1 : 0);
3043}
3044
2783int r600_copy_blit(struct radeon_device *rdev, 3045int r600_copy_blit(struct radeon_device *rdev,
2784 uint64_t src_offset, 3046 uint64_t src_offset,
2785 uint64_t dst_offset, 3047 uint64_t dst_offset,
@@ -3183,6 +3445,16 @@ void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
3183 radeon_ring_write(ring, ib->length_dw); 3445 radeon_ring_write(ring, ib->length_dw);
3184} 3446}
3185 3447
3448void r600_uvd_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
3449{
3450 struct radeon_ring *ring = &rdev->ring[ib->ring];
3451
3452 radeon_ring_write(ring, PACKET0(UVD_RBC_IB_BASE, 0));
3453 radeon_ring_write(ring, ib->gpu_addr);
3454 radeon_ring_write(ring, PACKET0(UVD_RBC_IB_SIZE, 0));
3455 radeon_ring_write(ring, ib->length_dw);
3456}
3457
3186int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) 3458int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
3187{ 3459{
3188 struct radeon_ib ib; 3460 struct radeon_ib ib;
@@ -3300,6 +3572,41 @@ int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
3300 return r; 3572 return r;
3301} 3573}
3302 3574
3575int r600_uvd_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
3576{
3577 struct radeon_fence *fence = NULL;
3578 int r;
3579
3580 r = radeon_set_uvd_clocks(rdev, 53300, 40000);
3581 if (r) {
3582 DRM_ERROR("radeon: failed to raise UVD clocks (%d).\n", r);
3583 return r;
3584 }
3585
3586 r = radeon_uvd_get_create_msg(rdev, ring->idx, 1, NULL);
3587 if (r) {
3588 DRM_ERROR("radeon: failed to get create msg (%d).\n", r);
3589 goto error;
3590 }
3591
3592 r = radeon_uvd_get_destroy_msg(rdev, ring->idx, 1, &fence);
3593 if (r) {
3594 DRM_ERROR("radeon: failed to get destroy ib (%d).\n", r);
3595 goto error;
3596 }
3597
3598 r = radeon_fence_wait(fence, false);
3599 if (r) {
3600 DRM_ERROR("radeon: fence wait failed (%d).\n", r);
3601 goto error;
3602 }
3603 DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
3604error:
3605 radeon_fence_unref(&fence);
3606 radeon_set_uvd_clocks(rdev, 0, 0);
3607 return r;
3608}
3609
3303/** 3610/**
3304 * r600_dma_ring_ib_execute - Schedule an IB on the DMA engine 3611 * r600_dma_ring_ib_execute - Schedule an IB on the DMA engine
3305 * 3612 *
@@ -4232,7 +4539,7 @@ void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
4232 4539
4233void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes) 4540void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes)
4234{ 4541{
4235 u32 link_width_cntl, mask, target_reg; 4542 u32 link_width_cntl, mask;
4236 4543
4237 if (rdev->flags & RADEON_IS_IGP) 4544 if (rdev->flags & RADEON_IS_IGP)
4238 return; 4545 return;
@@ -4244,7 +4551,7 @@ void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes)
4244 if (ASIC_IS_X2(rdev)) 4551 if (ASIC_IS_X2(rdev))
4245 return; 4552 return;
4246 4553
4247 /* FIXME wait for idle */ 4554 radeon_gui_idle(rdev);
4248 4555
4249 switch (lanes) { 4556 switch (lanes) {
4250 case 0: 4557 case 0:
@@ -4263,53 +4570,24 @@ void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes)
4263 mask = RADEON_PCIE_LC_LINK_WIDTH_X8; 4570 mask = RADEON_PCIE_LC_LINK_WIDTH_X8;
4264 break; 4571 break;
4265 case 12: 4572 case 12:
4573 /* not actually supported */
4266 mask = RADEON_PCIE_LC_LINK_WIDTH_X12; 4574 mask = RADEON_PCIE_LC_LINK_WIDTH_X12;
4267 break; 4575 break;
4268 case 16: 4576 case 16:
4269 default:
4270 mask = RADEON_PCIE_LC_LINK_WIDTH_X16; 4577 mask = RADEON_PCIE_LC_LINK_WIDTH_X16;
4271 break; 4578 break;
4272 } 4579 default:
4273 4580 DRM_ERROR("invalid pcie lane request: %d\n", lanes);
4274 link_width_cntl = RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
4275
4276 if ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) ==
4277 (mask << RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT))
4278 return;
4279
4280 if (link_width_cntl & R600_PCIE_LC_UPCONFIGURE_DIS)
4281 return; 4581 return;
4582 }
4282 4583
4283 link_width_cntl &= ~(RADEON_PCIE_LC_LINK_WIDTH_MASK | 4584 link_width_cntl = RREG32_PCIE_PORT(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
4284 RADEON_PCIE_LC_RECONFIG_NOW | 4585 link_width_cntl &= ~RADEON_PCIE_LC_LINK_WIDTH_MASK;
4285 R600_PCIE_LC_RENEGOTIATE_EN | 4586 link_width_cntl |= mask << RADEON_PCIE_LC_LINK_WIDTH_SHIFT;
4286 R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE); 4587 link_width_cntl |= (RADEON_PCIE_LC_RECONFIG_NOW |
4287 link_width_cntl |= mask; 4588 R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE);
4288
4289 WREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
4290
4291 /* some northbridges can renegotiate the link rather than requiring
4292 * a complete re-config.
4293 * e.g., AMD 780/790 northbridges (pci ids: 0x5956, 0x5957, 0x5958, etc.)
4294 */
4295 if (link_width_cntl & R600_PCIE_LC_RENEGOTIATION_SUPPORT)
4296 link_width_cntl |= R600_PCIE_LC_RENEGOTIATE_EN | R600_PCIE_LC_UPCONFIGURE_SUPPORT;
4297 else
4298 link_width_cntl |= R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE;
4299
4300 WREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL, (link_width_cntl |
4301 RADEON_PCIE_LC_RECONFIG_NOW));
4302
4303 if (rdev->family >= CHIP_RV770)
4304 target_reg = R700_TARGET_AND_CURRENT_PROFILE_INDEX;
4305 else
4306 target_reg = R600_TARGET_AND_CURRENT_PROFILE_INDEX;
4307
4308 /* wait for lane set to complete */
4309 link_width_cntl = RREG32(target_reg);
4310 while (link_width_cntl == 0xffffffff)
4311 link_width_cntl = RREG32(target_reg);
4312 4589
4590 WREG32_PCIE_PORT(RADEON_PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
4313} 4591}
4314 4592
4315int r600_get_pcie_lanes(struct radeon_device *rdev) 4593int r600_get_pcie_lanes(struct radeon_device *rdev)
@@ -4326,13 +4604,11 @@ int r600_get_pcie_lanes(struct radeon_device *rdev)
4326 if (ASIC_IS_X2(rdev)) 4604 if (ASIC_IS_X2(rdev))
4327 return 0; 4605 return 0;
4328 4606
4329 /* FIXME wait for idle */ 4607 radeon_gui_idle(rdev);
4330 4608
4331 link_width_cntl = RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL); 4609 link_width_cntl = RREG32_PCIE_PORT(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
4332 4610
4333 switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) { 4611 switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) {
4334 case RADEON_PCIE_LC_LINK_WIDTH_X0:
4335 return 0;
4336 case RADEON_PCIE_LC_LINK_WIDTH_X1: 4612 case RADEON_PCIE_LC_LINK_WIDTH_X1:
4337 return 1; 4613 return 1;
4338 case RADEON_PCIE_LC_LINK_WIDTH_X2: 4614 case RADEON_PCIE_LC_LINK_WIDTH_X2:
@@ -4341,6 +4617,10 @@ int r600_get_pcie_lanes(struct radeon_device *rdev)
4341 return 4; 4617 return 4;
4342 case RADEON_PCIE_LC_LINK_WIDTH_X8: 4618 case RADEON_PCIE_LC_LINK_WIDTH_X8:
4343 return 8; 4619 return 8;
4620 case RADEON_PCIE_LC_LINK_WIDTH_X12:
4621 /* not actually supported */
4622 return 12;
4623 case RADEON_PCIE_LC_LINK_WIDTH_X0:
4344 case RADEON_PCIE_LC_LINK_WIDTH_X16: 4624 case RADEON_PCIE_LC_LINK_WIDTH_X16:
4345 default: 4625 default:
4346 return 16; 4626 return 16;
@@ -4378,7 +4658,7 @@ static void r600_pcie_gen2_enable(struct radeon_device *rdev)
4378 if (!(mask & DRM_PCIE_SPEED_50)) 4658 if (!(mask & DRM_PCIE_SPEED_50))
4379 return; 4659 return;
4380 4660
4381 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); 4661 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
4382 if (speed_cntl & LC_CURRENT_DATA_RATE) { 4662 if (speed_cntl & LC_CURRENT_DATA_RATE) {
4383 DRM_INFO("PCIE gen 2 link speeds already enabled\n"); 4663 DRM_INFO("PCIE gen 2 link speeds already enabled\n");
4384 return; 4664 return;
@@ -4391,23 +4671,23 @@ static void r600_pcie_gen2_enable(struct radeon_device *rdev)
4391 (rdev->family == CHIP_RV620) || 4671 (rdev->family == CHIP_RV620) ||
4392 (rdev->family == CHIP_RV635)) { 4672 (rdev->family == CHIP_RV635)) {
4393 /* advertise upconfig capability */ 4673 /* advertise upconfig capability */
4394 link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL); 4674 link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
4395 link_width_cntl &= ~LC_UPCONFIGURE_DIS; 4675 link_width_cntl &= ~LC_UPCONFIGURE_DIS;
4396 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); 4676 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
4397 link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL); 4677 link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
4398 if (link_width_cntl & LC_RENEGOTIATION_SUPPORT) { 4678 if (link_width_cntl & LC_RENEGOTIATION_SUPPORT) {
4399 lanes = (link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT; 4679 lanes = (link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT;
4400 link_width_cntl &= ~(LC_LINK_WIDTH_MASK | 4680 link_width_cntl &= ~(LC_LINK_WIDTH_MASK |
4401 LC_RECONFIG_ARC_MISSING_ESCAPE); 4681 LC_RECONFIG_ARC_MISSING_ESCAPE);
4402 link_width_cntl |= lanes | LC_RECONFIG_NOW | LC_RENEGOTIATE_EN; 4682 link_width_cntl |= lanes | LC_RECONFIG_NOW | LC_RENEGOTIATE_EN;
4403 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); 4683 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
4404 } else { 4684 } else {
4405 link_width_cntl |= LC_UPCONFIGURE_DIS; 4685 link_width_cntl |= LC_UPCONFIGURE_DIS;
4406 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); 4686 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
4407 } 4687 }
4408 } 4688 }
4409 4689
4410 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); 4690 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
4411 if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) && 4691 if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
4412 (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) { 4692 (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
4413 4693
@@ -4428,7 +4708,7 @@ static void r600_pcie_gen2_enable(struct radeon_device *rdev)
4428 speed_cntl &= ~LC_VOLTAGE_TIMER_SEL_MASK; 4708 speed_cntl &= ~LC_VOLTAGE_TIMER_SEL_MASK;
4429 speed_cntl &= ~LC_FORCE_DIS_HW_SPEED_CHANGE; 4709 speed_cntl &= ~LC_FORCE_DIS_HW_SPEED_CHANGE;
4430 speed_cntl |= LC_FORCE_EN_HW_SPEED_CHANGE; 4710 speed_cntl |= LC_FORCE_EN_HW_SPEED_CHANGE;
4431 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl); 4711 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
4432 4712
4433 tmp = RREG32(0x541c); 4713 tmp = RREG32(0x541c);
4434 WREG32(0x541c, tmp | 0x8); 4714 WREG32(0x541c, tmp | 0x8);
@@ -4442,27 +4722,27 @@ static void r600_pcie_gen2_enable(struct radeon_device *rdev)
4442 if ((rdev->family == CHIP_RV670) || 4722 if ((rdev->family == CHIP_RV670) ||
4443 (rdev->family == CHIP_RV620) || 4723 (rdev->family == CHIP_RV620) ||
4444 (rdev->family == CHIP_RV635)) { 4724 (rdev->family == CHIP_RV635)) {
4445 training_cntl = RREG32_PCIE_P(PCIE_LC_TRAINING_CNTL); 4725 training_cntl = RREG32_PCIE_PORT(PCIE_LC_TRAINING_CNTL);
4446 training_cntl &= ~LC_POINT_7_PLUS_EN; 4726 training_cntl &= ~LC_POINT_7_PLUS_EN;
4447 WREG32_PCIE_P(PCIE_LC_TRAINING_CNTL, training_cntl); 4727 WREG32_PCIE_PORT(PCIE_LC_TRAINING_CNTL, training_cntl);
4448 } else { 4728 } else {
4449 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); 4729 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
4450 speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN; 4730 speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
4451 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl); 4731 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
4452 } 4732 }
4453 4733
4454 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); 4734 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
4455 speed_cntl |= LC_GEN2_EN_STRAP; 4735 speed_cntl |= LC_GEN2_EN_STRAP;
4456 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl); 4736 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
4457 4737
4458 } else { 4738 } else {
4459 link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL); 4739 link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
4460 /* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */ 4740 /* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
4461 if (1) 4741 if (1)
4462 link_width_cntl |= LC_UPCONFIGURE_DIS; 4742 link_width_cntl |= LC_UPCONFIGURE_DIS;
4463 else 4743 else
4464 link_width_cntl &= ~LC_UPCONFIGURE_DIS; 4744 link_width_cntl &= ~LC_UPCONFIGURE_DIS;
4465 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); 4745 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
4466 } 4746 }
4467} 4747}
4468 4748
diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c
index cb03fe22b0ab..c92eb86a8e55 100644
--- a/drivers/gpu/drm/radeon/r600_audio.c
+++ b/drivers/gpu/drm/radeon/r600_audio.c
@@ -57,10 +57,7 @@ static bool radeon_dig_encoder(struct drm_encoder *encoder)
57 */ 57 */
58static int r600_audio_chipset_supported(struct radeon_device *rdev) 58static int r600_audio_chipset_supported(struct radeon_device *rdev)
59{ 59{
60 return (rdev->family >= CHIP_R600 && !ASIC_IS_DCE6(rdev)) 60 return ASIC_IS_DCE2(rdev) && !ASIC_IS_DCE6(rdev);
61 || rdev->family == CHIP_RS600
62 || rdev->family == CHIP_RS690
63 || rdev->family == CHIP_RS740;
64} 61}
65 62
66struct r600_audio r600_audio_status(struct radeon_device *rdev) 63struct r600_audio r600_audio_status(struct radeon_device *rdev)
@@ -184,65 +181,6 @@ int r600_audio_init(struct radeon_device *rdev)
184} 181}
185 182
186/* 183/*
187 * atach the audio codec to the clock source of the encoder
188 */
189void r600_audio_set_clock(struct drm_encoder *encoder, int clock)
190{
191 struct drm_device *dev = encoder->dev;
192 struct radeon_device *rdev = dev->dev_private;
193 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
194 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
195 struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
196 int base_rate = 48000;
197
198 switch (radeon_encoder->encoder_id) {
199 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
200 case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
201 WREG32_P(R600_AUDIO_TIMING, 0, ~0x301);
202 break;
203 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
204 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
205 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
206 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
207 WREG32_P(R600_AUDIO_TIMING, 0x100, ~0x301);
208 break;
209 default:
210 dev_err(rdev->dev, "Unsupported encoder type 0x%02X\n",
211 radeon_encoder->encoder_id);
212 return;
213 }
214
215 if (ASIC_IS_DCE4(rdev)) {
216 /* TODO: other PLLs? */
217 WREG32(EVERGREEN_AUDIO_PLL1_MUL, base_rate * 10);
218 WREG32(EVERGREEN_AUDIO_PLL1_DIV, clock * 10);
219 WREG32(EVERGREEN_AUDIO_PLL1_UNK, 0x00000071);
220
221 /* Select DTO source */
222 WREG32(0x5ac, radeon_crtc->crtc_id);
223 } else {
224 switch (dig->dig_encoder) {
225 case 0:
226 WREG32(R600_AUDIO_PLL1_MUL, base_rate * 50);
227 WREG32(R600_AUDIO_PLL1_DIV, clock * 100);
228 WREG32(R600_AUDIO_CLK_SRCSEL, 0);
229 break;
230
231 case 1:
232 WREG32(R600_AUDIO_PLL2_MUL, base_rate * 50);
233 WREG32(R600_AUDIO_PLL2_DIV, clock * 100);
234 WREG32(R600_AUDIO_CLK_SRCSEL, 1);
235 break;
236 default:
237 dev_err(rdev->dev,
238 "Unsupported DIG on encoder 0x%02X\n",
239 radeon_encoder->encoder_id);
240 return;
241 }
242 }
243}
244
245/*
246 * release the audio timer 184 * release the audio timer
247 * TODO: How to do this correctly on SMP systems? 185 * TODO: How to do this correctly on SMP systems?
248 */ 186 */
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index 21ecc0e12dc4..47f180a79352 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -226,6 +226,39 @@ static void r600_hdmi_audio_workaround(struct drm_encoder *encoder)
226 value, ~HDMI0_AUDIO_TEST_EN); 226 value, ~HDMI0_AUDIO_TEST_EN);
227} 227}
228 228
229void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock)
230{
231 struct drm_device *dev = encoder->dev;
232 struct radeon_device *rdev = dev->dev_private;
233 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
234 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
235 u32 base_rate = 48000;
236
237 if (!dig || !dig->afmt)
238 return;
239
240 /* there are two DTOs selected by DCCG_AUDIO_DTO_SELECT.
241 * doesn't matter which one you use. Just use the first one.
242 */
243 /* XXX: properly calculate this */
244 /* XXX two dtos; generally use dto0 for hdmi */
245 /* Express [24MHz / target pixel clock] as an exact rational
246 * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE
247 * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
248 */
249 if (ASIC_IS_DCE3(rdev)) {
250 /* according to the reg specs, this should DCE3.2 only, but in
251 * practice it seems to cover DCE3.0 as well.
252 */
253 WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 50);
254 WREG32(DCCG_AUDIO_DTO0_MODULE, clock * 100);
255 WREG32(DCCG_AUDIO_DTO_SELECT, 0); /* select DTO0 */
256 } else {
257 /* according to the reg specs, this should be DCE2.0 and DCE3.0 */
258 WREG32(AUDIO_DTO, AUDIO_DTO_PHASE(base_rate * 50) |
259 AUDIO_DTO_MODULE(clock * 100));
260 }
261}
229 262
230/* 263/*
231 * update the info frames with the data from the current display mode 264 * update the info frames with the data from the current display mode
@@ -246,7 +279,7 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
246 return; 279 return;
247 offset = dig->afmt->offset; 280 offset = dig->afmt->offset;
248 281
249 r600_audio_set_clock(encoder, mode->clock); 282 r600_audio_set_dto(encoder, mode->clock);
250 283
251 WREG32(HDMI0_VBI_PACKET_CONTROL + offset, 284 WREG32(HDMI0_VBI_PACKET_CONTROL + offset,
252 HDMI0_NULL_SEND); /* send null packets when required */ 285 HDMI0_NULL_SEND); /* send null packets when required */
@@ -415,114 +448,73 @@ void r600_hdmi_update_audio_settings(struct drm_encoder *encoder)
415/* 448/*
416 * enable the HDMI engine 449 * enable the HDMI engine
417 */ 450 */
418void r600_hdmi_enable(struct drm_encoder *encoder) 451void r600_hdmi_enable(struct drm_encoder *encoder, bool enable)
419{ 452{
420 struct drm_device *dev = encoder->dev; 453 struct drm_device *dev = encoder->dev;
421 struct radeon_device *rdev = dev->dev_private; 454 struct radeon_device *rdev = dev->dev_private;
422 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 455 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
423 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 456 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
424 uint32_t offset; 457 u32 hdmi = HDMI0_ERROR_ACK;
425 u32 hdmi;
426
427 if (ASIC_IS_DCE6(rdev))
428 return;
429 458
430 /* Silent, r600_hdmi_enable will raise WARN for us */ 459 /* Silent, r600_hdmi_enable will raise WARN for us */
431 if (dig->afmt->enabled) 460 if (enable && dig->afmt->enabled)
461 return;
462 if (!enable && !dig->afmt->enabled)
432 return; 463 return;
433 offset = dig->afmt->offset;
434 464
435 /* Older chipsets require setting HDMI and routing manually */ 465 /* Older chipsets require setting HDMI and routing manually */
436 if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) { 466 if (!ASIC_IS_DCE3(rdev)) {
437 hdmi = HDMI0_ERROR_ACK | HDMI0_ENABLE; 467 if (enable)
468 hdmi |= HDMI0_ENABLE;
438 switch (radeon_encoder->encoder_id) { 469 switch (radeon_encoder->encoder_id) {
439 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: 470 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
440 WREG32_P(AVIVO_TMDSA_CNTL, AVIVO_TMDSA_CNTL_HDMI_EN, 471 if (enable) {
441 ~AVIVO_TMDSA_CNTL_HDMI_EN); 472 WREG32_OR(AVIVO_TMDSA_CNTL, AVIVO_TMDSA_CNTL_HDMI_EN);
442 hdmi |= HDMI0_STREAM(HDMI0_STREAM_TMDSA); 473 hdmi |= HDMI0_STREAM(HDMI0_STREAM_TMDSA);
474 } else {
475 WREG32_AND(AVIVO_TMDSA_CNTL, ~AVIVO_TMDSA_CNTL_HDMI_EN);
476 }
443 break; 477 break;
444 case ENCODER_OBJECT_ID_INTERNAL_LVTM1: 478 case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
445 WREG32_P(AVIVO_LVTMA_CNTL, AVIVO_LVTMA_CNTL_HDMI_EN, 479 if (enable) {
446 ~AVIVO_LVTMA_CNTL_HDMI_EN); 480 WREG32_OR(AVIVO_LVTMA_CNTL, AVIVO_LVTMA_CNTL_HDMI_EN);
447 hdmi |= HDMI0_STREAM(HDMI0_STREAM_LVTMA); 481 hdmi |= HDMI0_STREAM(HDMI0_STREAM_LVTMA);
482 } else {
483 WREG32_AND(AVIVO_LVTMA_CNTL, ~AVIVO_LVTMA_CNTL_HDMI_EN);
484 }
448 break; 485 break;
449 case ENCODER_OBJECT_ID_INTERNAL_DDI: 486 case ENCODER_OBJECT_ID_INTERNAL_DDI:
450 WREG32_P(DDIA_CNTL, DDIA_HDMI_EN, ~DDIA_HDMI_EN); 487 if (enable) {
451 hdmi |= HDMI0_STREAM(HDMI0_STREAM_DDIA); 488 WREG32_OR(DDIA_CNTL, DDIA_HDMI_EN);
489 hdmi |= HDMI0_STREAM(HDMI0_STREAM_DDIA);
490 } else {
491 WREG32_AND(DDIA_CNTL, ~DDIA_HDMI_EN);
492 }
452 break; 493 break;
453 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: 494 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
454 hdmi |= HDMI0_STREAM(HDMI0_STREAM_DVOA); 495 if (enable)
496 hdmi |= HDMI0_STREAM(HDMI0_STREAM_DVOA);
455 break; 497 break;
456 default: 498 default:
457 dev_err(rdev->dev, "Invalid encoder for HDMI: 0x%X\n", 499 dev_err(rdev->dev, "Invalid encoder for HDMI: 0x%X\n",
458 radeon_encoder->encoder_id); 500 radeon_encoder->encoder_id);
459 break; 501 break;
460 } 502 }
461 WREG32(HDMI0_CONTROL + offset, hdmi); 503 WREG32(HDMI0_CONTROL + dig->afmt->offset, hdmi);
462 } 504 }
463 505
464 if (rdev->irq.installed) { 506 if (rdev->irq.installed) {
465 /* if irq is available use it */ 507 /* if irq is available use it */
466 radeon_irq_kms_enable_afmt(rdev, dig->afmt->id); 508 /* XXX: shouldn't need this on any asics. Double check DCE2/3 */
509 if (enable)
510 radeon_irq_kms_enable_afmt(rdev, dig->afmt->id);
511 else
512 radeon_irq_kms_disable_afmt(rdev, dig->afmt->id);
467 } 513 }
468 514
469 dig->afmt->enabled = true; 515 dig->afmt->enabled = enable;
470 516
471 DRM_DEBUG("Enabling HDMI interface @ 0x%04X for encoder 0x%x\n", 517 DRM_DEBUG("%sabling HDMI interface @ 0x%04X for encoder 0x%x\n",
472 offset, radeon_encoder->encoder_id); 518 enable ? "En" : "Dis", dig->afmt->offset, radeon_encoder->encoder_id);
473} 519}
474 520
475/*
476 * disable the HDMI engine
477 */
478void r600_hdmi_disable(struct drm_encoder *encoder)
479{
480 struct drm_device *dev = encoder->dev;
481 struct radeon_device *rdev = dev->dev_private;
482 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
483 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
484 uint32_t offset;
485
486 if (ASIC_IS_DCE6(rdev))
487 return;
488
489 /* Called for ATOM_ENCODER_MODE_HDMI only */
490 if (!dig || !dig->afmt) {
491 return;
492 }
493 if (!dig->afmt->enabled)
494 return;
495 offset = dig->afmt->offset;
496
497 DRM_DEBUG("Disabling HDMI interface @ 0x%04X for encoder 0x%x\n",
498 offset, radeon_encoder->encoder_id);
499
500 /* disable irq */
501 radeon_irq_kms_disable_afmt(rdev, dig->afmt->id);
502
503 /* Older chipsets not handled by AtomBIOS */
504 if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) {
505 switch (radeon_encoder->encoder_id) {
506 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
507 WREG32_P(AVIVO_TMDSA_CNTL, 0,
508 ~AVIVO_TMDSA_CNTL_HDMI_EN);
509 break;
510 case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
511 WREG32_P(AVIVO_LVTMA_CNTL, 0,
512 ~AVIVO_LVTMA_CNTL_HDMI_EN);
513 break;
514 case ENCODER_OBJECT_ID_INTERNAL_DDI:
515 WREG32_P(DDIA_CNTL, 0, ~DDIA_HDMI_EN);
516 break;
517 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
518 break;
519 default:
520 dev_err(rdev->dev, "Invalid encoder for HDMI: 0x%X\n",
521 radeon_encoder->encoder_id);
522 break;
523 }
524 WREG32(HDMI0_CONTROL + offset, HDMI0_ERROR_ACK);
525 }
526
527 dig->afmt->enabled = false;
528}
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index a42ba11a3bed..acb146c06973 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -691,6 +691,7 @@
691#define SRBM_SOFT_RESET 0xe60 691#define SRBM_SOFT_RESET 0xe60
692# define SOFT_RESET_DMA (1 << 12) 692# define SOFT_RESET_DMA (1 << 12)
693# define SOFT_RESET_RLC (1 << 13) 693# define SOFT_RESET_RLC (1 << 13)
694# define SOFT_RESET_UVD (1 << 18)
694# define RV770_SOFT_RESET_DMA (1 << 20) 695# define RV770_SOFT_RESET_DMA (1 << 20)
695 696
696#define CP_INT_CNTL 0xc124 697#define CP_INT_CNTL 0xc124
@@ -909,7 +910,12 @@
909# define TARGET_LINK_SPEED_MASK (0xf << 0) 910# define TARGET_LINK_SPEED_MASK (0xf << 0)
910# define SELECTABLE_DEEMPHASIS (1 << 6) 911# define SELECTABLE_DEEMPHASIS (1 << 6)
911 912
912/* Audio clocks */ 913/* Audio clocks DCE 2.0/3.0 */
914#define AUDIO_DTO 0x7340
915# define AUDIO_DTO_PHASE(x) (((x) & 0xffff) << 0)
916# define AUDIO_DTO_MODULE(x) (((x) & 0xffff) << 16)
917
918/* Audio clocks DCE 3.2 */
913#define DCCG_AUDIO_DTO0_PHASE 0x0514 919#define DCCG_AUDIO_DTO0_PHASE 0x0514
914#define DCCG_AUDIO_DTO0_MODULE 0x0518 920#define DCCG_AUDIO_DTO0_MODULE 0x0518
915#define DCCG_AUDIO_DTO0_LOAD 0x051c 921#define DCCG_AUDIO_DTO0_LOAD 0x051c
@@ -1143,6 +1149,70 @@
1143# define AFMT_AZ_AUDIO_ENABLE_CHG_ACK (1 << 30) 1149# define AFMT_AZ_AUDIO_ENABLE_CHG_ACK (1 << 30)
1144 1150
1145/* 1151/*
1152 * UVD
1153 */
1154#define UVD_SEMA_ADDR_LOW 0xef00
1155#define UVD_SEMA_ADDR_HIGH 0xef04
1156#define UVD_SEMA_CMD 0xef08
1157
1158#define UVD_GPCOM_VCPU_CMD 0xef0c
1159#define UVD_GPCOM_VCPU_DATA0 0xef10
1160#define UVD_GPCOM_VCPU_DATA1 0xef14
1161#define UVD_ENGINE_CNTL 0xef18
1162
1163#define UVD_SEMA_CNTL 0xf400
1164#define UVD_RB_ARB_CTRL 0xf480
1165
1166#define UVD_LMI_EXT40_ADDR 0xf498
1167#define UVD_CGC_GATE 0xf4a8
1168#define UVD_LMI_CTRL2 0xf4f4
1169#define UVD_MASTINT_EN 0xf500
1170#define UVD_LMI_ADDR_EXT 0xf594
1171#define UVD_LMI_CTRL 0xf598
1172#define UVD_LMI_SWAP_CNTL 0xf5b4
1173#define UVD_MP_SWAP_CNTL 0xf5bC
1174#define UVD_MPC_CNTL 0xf5dC
1175#define UVD_MPC_SET_MUXA0 0xf5e4
1176#define UVD_MPC_SET_MUXA1 0xf5e8
1177#define UVD_MPC_SET_MUXB0 0xf5eC
1178#define UVD_MPC_SET_MUXB1 0xf5f0
1179#define UVD_MPC_SET_MUX 0xf5f4
1180#define UVD_MPC_SET_ALU 0xf5f8
1181
1182#define UVD_VCPU_CNTL 0xf660
1183#define UVD_SOFT_RESET 0xf680
1184#define RBC_SOFT_RESET (1<<0)
1185#define LBSI_SOFT_RESET (1<<1)
1186#define LMI_SOFT_RESET (1<<2)
1187#define VCPU_SOFT_RESET (1<<3)
1188#define CSM_SOFT_RESET (1<<5)
1189#define CXW_SOFT_RESET (1<<6)
1190#define TAP_SOFT_RESET (1<<7)
1191#define LMI_UMC_SOFT_RESET (1<<13)
1192#define UVD_RBC_IB_BASE 0xf684
1193#define UVD_RBC_IB_SIZE 0xf688
1194#define UVD_RBC_RB_BASE 0xf68c
1195#define UVD_RBC_RB_RPTR 0xf690
1196#define UVD_RBC_RB_WPTR 0xf694
1197#define UVD_RBC_RB_WPTR_CNTL 0xf698
1198
1199#define UVD_STATUS 0xf6bc
1200
1201#define UVD_SEMA_TIMEOUT_STATUS 0xf6c0
1202#define UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL 0xf6c4
1203#define UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL 0xf6c8
1204#define UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL 0xf6cc
1205
1206#define UVD_RBC_RB_CNTL 0xf6a4
1207#define UVD_RBC_RB_RPTR_ADDR 0xf6a8
1208
1209#define UVD_CONTEXT_ID 0xf6f4
1210
1211# define UPLL_CTLREQ_MASK 0x00000008
1212# define UPLL_CTLACK_MASK 0x40000000
1213# define UPLL_CTLACK2_MASK 0x80000000
1214
1215/*
1146 * PM4 1216 * PM4
1147 */ 1217 */
1148#define PACKET0(reg, n) ((RADEON_PACKET_TYPE0 << 30) | \ 1218#define PACKET0(reg, n) ((RADEON_PACKET_TYPE0 << 30) | \
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 8263af3fd832..1442ce765d48 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -95,6 +95,7 @@ extern int radeon_hw_i2c;
95extern int radeon_pcie_gen2; 95extern int radeon_pcie_gen2;
96extern int radeon_msi; 96extern int radeon_msi;
97extern int radeon_lockup_timeout; 97extern int radeon_lockup_timeout;
98extern int radeon_fastfb;
98 99
99/* 100/*
100 * Copy from radeon_drv.h so we don't have to include both and have conflicting 101 * Copy from radeon_drv.h so we don't have to include both and have conflicting
@@ -109,24 +110,27 @@ extern int radeon_lockup_timeout;
109#define RADEON_BIOS_NUM_SCRATCH 8 110#define RADEON_BIOS_NUM_SCRATCH 8
110 111
111/* max number of rings */ 112/* max number of rings */
112#define RADEON_NUM_RINGS 5 113#define RADEON_NUM_RINGS 6
113 114
114/* fence seq are set to this number when signaled */ 115/* fence seq are set to this number when signaled */
115#define RADEON_FENCE_SIGNALED_SEQ 0LL 116#define RADEON_FENCE_SIGNALED_SEQ 0LL
116 117
117/* internal ring indices */ 118/* internal ring indices */
118/* r1xx+ has gfx CP ring */ 119/* r1xx+ has gfx CP ring */
119#define RADEON_RING_TYPE_GFX_INDEX 0 120#define RADEON_RING_TYPE_GFX_INDEX 0
120 121
121/* cayman has 2 compute CP rings */ 122/* cayman has 2 compute CP rings */
122#define CAYMAN_RING_TYPE_CP1_INDEX 1 123#define CAYMAN_RING_TYPE_CP1_INDEX 1
123#define CAYMAN_RING_TYPE_CP2_INDEX 2 124#define CAYMAN_RING_TYPE_CP2_INDEX 2
124 125
125/* R600+ has an async dma ring */ 126/* R600+ has an async dma ring */
126#define R600_RING_TYPE_DMA_INDEX 3 127#define R600_RING_TYPE_DMA_INDEX 3
127/* cayman add a second async dma ring */ 128/* cayman add a second async dma ring */
128#define CAYMAN_RING_TYPE_DMA1_INDEX 4 129#define CAYMAN_RING_TYPE_DMA1_INDEX 4
129 130
131/* R600+ */
132#define R600_RING_TYPE_UVD_INDEX 5
133
130/* hardcode those limit for now */ 134/* hardcode those limit for now */
131#define RADEON_VA_IB_OFFSET (1 << 20) 135#define RADEON_VA_IB_OFFSET (1 << 20)
132#define RADEON_VA_RESERVED_SIZE (8 << 20) 136#define RADEON_VA_RESERVED_SIZE (8 << 20)
@@ -202,6 +206,11 @@ void radeon_pm_suspend(struct radeon_device *rdev);
202void radeon_pm_resume(struct radeon_device *rdev); 206void radeon_pm_resume(struct radeon_device *rdev);
203void radeon_combios_get_power_modes(struct radeon_device *rdev); 207void radeon_combios_get_power_modes(struct radeon_device *rdev);
204void radeon_atombios_get_power_modes(struct radeon_device *rdev); 208void radeon_atombios_get_power_modes(struct radeon_device *rdev);
209int radeon_atom_get_clock_dividers(struct radeon_device *rdev,
210 u8 clock_type,
211 u32 clock,
212 bool strobe_mode,
213 struct atom_clock_dividers *dividers);
205void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 voltage_type); 214void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 voltage_type);
206void rs690_pm_info(struct radeon_device *rdev); 215void rs690_pm_info(struct radeon_device *rdev);
207extern int rv6xx_get_temp(struct radeon_device *rdev); 216extern int rv6xx_get_temp(struct radeon_device *rdev);
@@ -349,7 +358,8 @@ struct radeon_bo {
349 struct radeon_device *rdev; 358 struct radeon_device *rdev;
350 struct drm_gem_object gem_base; 359 struct drm_gem_object gem_base;
351 360
352 struct ttm_bo_kmap_obj dma_buf_vmap; 361 struct ttm_bo_kmap_obj dma_buf_vmap;
362 pid_t pid;
353}; 363};
354#define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base) 364#define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base)
355 365
@@ -357,11 +367,14 @@ struct radeon_bo_list {
357 struct ttm_validate_buffer tv; 367 struct ttm_validate_buffer tv;
358 struct radeon_bo *bo; 368 struct radeon_bo *bo;
359 uint64_t gpu_offset; 369 uint64_t gpu_offset;
360 unsigned rdomain; 370 bool written;
361 unsigned wdomain; 371 unsigned domain;
372 unsigned alt_domain;
362 u32 tiling_flags; 373 u32 tiling_flags;
363}; 374};
364 375
376int radeon_gem_debugfs_init(struct radeon_device *rdev);
377
365/* sub-allocation manager, it has to be protected by another lock. 378/* sub-allocation manager, it has to be protected by another lock.
366 * By conception this is an helper for other part of the driver 379 * By conception this is an helper for other part of the driver
367 * like the indirect buffer or semaphore, which both have their 380 * like the indirect buffer or semaphore, which both have their
@@ -517,6 +530,7 @@ struct radeon_mc {
517 bool vram_is_ddr; 530 bool vram_is_ddr;
518 bool igp_sideport_enabled; 531 bool igp_sideport_enabled;
519 u64 gtt_base_align; 532 u64 gtt_base_align;
533 u64 mc_mask;
520}; 534};
521 535
522bool radeon_combios_sideport_present(struct radeon_device *rdev); 536bool radeon_combios_sideport_present(struct radeon_device *rdev);
@@ -918,6 +932,7 @@ struct radeon_wb {
918#define R600_WB_DMA_RPTR_OFFSET 1792 932#define R600_WB_DMA_RPTR_OFFSET 1792
919#define R600_WB_IH_WPTR_OFFSET 2048 933#define R600_WB_IH_WPTR_OFFSET 2048
920#define CAYMAN_WB_DMA1_RPTR_OFFSET 2304 934#define CAYMAN_WB_DMA1_RPTR_OFFSET 2304
935#define R600_WB_UVD_RPTR_OFFSET 2560
921#define R600_WB_EVENT_OFFSET 3072 936#define R600_WB_EVENT_OFFSET 3072
922 937
923/** 938/**
@@ -1118,6 +1133,46 @@ struct radeon_pm {
1118int radeon_pm_get_type_index(struct radeon_device *rdev, 1133int radeon_pm_get_type_index(struct radeon_device *rdev,
1119 enum radeon_pm_state_type ps_type, 1134 enum radeon_pm_state_type ps_type,
1120 int instance); 1135 int instance);
1136/*
1137 * UVD
1138 */
1139#define RADEON_MAX_UVD_HANDLES 10
1140#define RADEON_UVD_STACK_SIZE (1024*1024)
1141#define RADEON_UVD_HEAP_SIZE (1024*1024)
1142
1143struct radeon_uvd {
1144 struct radeon_bo *vcpu_bo;
1145 void *cpu_addr;
1146 uint64_t gpu_addr;
1147 atomic_t handles[RADEON_MAX_UVD_HANDLES];
1148 struct drm_file *filp[RADEON_MAX_UVD_HANDLES];
1149 struct delayed_work idle_work;
1150};
1151
1152int radeon_uvd_init(struct radeon_device *rdev);
1153void radeon_uvd_fini(struct radeon_device *rdev);
1154int radeon_uvd_suspend(struct radeon_device *rdev);
1155int radeon_uvd_resume(struct radeon_device *rdev);
1156int radeon_uvd_get_create_msg(struct radeon_device *rdev, int ring,
1157 uint32_t handle, struct radeon_fence **fence);
1158int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring,
1159 uint32_t handle, struct radeon_fence **fence);
1160void radeon_uvd_force_into_uvd_segment(struct radeon_bo *rbo);
1161void radeon_uvd_free_handles(struct radeon_device *rdev,
1162 struct drm_file *filp);
1163int radeon_uvd_cs_parse(struct radeon_cs_parser *parser);
1164void radeon_uvd_note_usage(struct radeon_device *rdev);
1165int radeon_uvd_calc_upll_dividers(struct radeon_device *rdev,
1166 unsigned vclk, unsigned dclk,
1167 unsigned vco_min, unsigned vco_max,
1168 unsigned fb_factor, unsigned fb_mask,
1169 unsigned pd_min, unsigned pd_max,
1170 unsigned pd_even,
1171 unsigned *optimal_fb_div,
1172 unsigned *optimal_vclk_div,
1173 unsigned *optimal_dclk_div);
1174int radeon_uvd_send_upll_ctlreq(struct radeon_device *rdev,
1175 unsigned cg_upll_func_cntl);
1121 1176
1122struct r600_audio { 1177struct r600_audio {
1123 int channels; 1178 int channels;
@@ -1229,6 +1284,9 @@ struct radeon_asic {
1229 void (*set_backlight_level)(struct radeon_encoder *radeon_encoder, u8 level); 1284 void (*set_backlight_level)(struct radeon_encoder *radeon_encoder, u8 level);
1230 /* get backlight level */ 1285 /* get backlight level */
1231 u8 (*get_backlight_level)(struct radeon_encoder *radeon_encoder); 1286 u8 (*get_backlight_level)(struct radeon_encoder *radeon_encoder);
1287 /* audio callbacks */
1288 void (*hdmi_enable)(struct drm_encoder *encoder, bool enable);
1289 void (*hdmi_setmode)(struct drm_encoder *encoder, struct drm_display_mode *mode);
1232 } display; 1290 } display;
1233 /* copy functions for bo handling */ 1291 /* copy functions for bo handling */
1234 struct { 1292 struct {
@@ -1281,6 +1339,7 @@ struct radeon_asic {
1281 int (*get_pcie_lanes)(struct radeon_device *rdev); 1339 int (*get_pcie_lanes)(struct radeon_device *rdev);
1282 void (*set_pcie_lanes)(struct radeon_device *rdev, int lanes); 1340 void (*set_pcie_lanes)(struct radeon_device *rdev, int lanes);
1283 void (*set_clock_gating)(struct radeon_device *rdev, int enable); 1341 void (*set_clock_gating)(struct radeon_device *rdev, int enable);
1342 int (*set_uvd_clocks)(struct radeon_device *rdev, u32 vclk, u32 dclk);
1284 } pm; 1343 } pm;
1285 /* pageflipping */ 1344 /* pageflipping */
1286 struct { 1345 struct {
@@ -1443,6 +1502,7 @@ struct si_asic {
1443 unsigned multi_gpu_tile_size; 1502 unsigned multi_gpu_tile_size;
1444 1503
1445 unsigned tile_config; 1504 unsigned tile_config;
1505 uint32_t tile_mode_array[32];
1446}; 1506};
1447 1507
1448union radeon_asic_config { 1508union radeon_asic_config {
@@ -1608,6 +1668,7 @@ struct radeon_device {
1608 struct radeon_asic *asic; 1668 struct radeon_asic *asic;
1609 struct radeon_gem gem; 1669 struct radeon_gem gem;
1610 struct radeon_pm pm; 1670 struct radeon_pm pm;
1671 struct radeon_uvd uvd;
1611 uint32_t bios_scratch[RADEON_BIOS_NUM_SCRATCH]; 1672 uint32_t bios_scratch[RADEON_BIOS_NUM_SCRATCH];
1612 struct radeon_wb wb; 1673 struct radeon_wb wb;
1613 struct radeon_dummy_page dummy_page; 1674 struct radeon_dummy_page dummy_page;
@@ -1615,12 +1676,14 @@ struct radeon_device {
1615 bool suspend; 1676 bool suspend;
1616 bool need_dma32; 1677 bool need_dma32;
1617 bool accel_working; 1678 bool accel_working;
1679 bool fastfb_working; /* IGP feature*/
1618 struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES]; 1680 struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES];
1619 const struct firmware *me_fw; /* all family ME firmware */ 1681 const struct firmware *me_fw; /* all family ME firmware */
1620 const struct firmware *pfp_fw; /* r6/700 PFP firmware */ 1682 const struct firmware *pfp_fw; /* r6/700 PFP firmware */
1621 const struct firmware *rlc_fw; /* r6/700 RLC firmware */ 1683 const struct firmware *rlc_fw; /* r6/700 RLC firmware */
1622 const struct firmware *mc_fw; /* NI MC firmware */ 1684 const struct firmware *mc_fw; /* NI MC firmware */
1623 const struct firmware *ce_fw; /* SI CE firmware */ 1685 const struct firmware *ce_fw; /* SI CE firmware */
1686 const struct firmware *uvd_fw; /* UVD firmware */
1624 struct r600_blit r600_blit; 1687 struct r600_blit r600_blit;
1625 struct r600_vram_scratch vram_scratch; 1688 struct r600_vram_scratch vram_scratch;
1626 int msi_enabled; /* msi enabled */ 1689 int msi_enabled; /* msi enabled */
@@ -1688,8 +1751,8 @@ void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v);
1688#define WREG32_MC(reg, v) rdev->mc_wreg(rdev, (reg), (v)) 1751#define WREG32_MC(reg, v) rdev->mc_wreg(rdev, (reg), (v))
1689#define RREG32_PCIE(reg) rv370_pcie_rreg(rdev, (reg)) 1752#define RREG32_PCIE(reg) rv370_pcie_rreg(rdev, (reg))
1690#define WREG32_PCIE(reg, v) rv370_pcie_wreg(rdev, (reg), (v)) 1753#define WREG32_PCIE(reg, v) rv370_pcie_wreg(rdev, (reg), (v))
1691#define RREG32_PCIE_P(reg) rdev->pciep_rreg(rdev, (reg)) 1754#define RREG32_PCIE_PORT(reg) rdev->pciep_rreg(rdev, (reg))
1692#define WREG32_PCIE_P(reg, v) rdev->pciep_wreg(rdev, (reg), (v)) 1755#define WREG32_PCIE_PORT(reg, v) rdev->pciep_wreg(rdev, (reg), (v))
1693#define WREG32_P(reg, val, mask) \ 1756#define WREG32_P(reg, val, mask) \
1694 do { \ 1757 do { \
1695 uint32_t tmp_ = RREG32(reg); \ 1758 uint32_t tmp_ = RREG32(reg); \
@@ -1697,6 +1760,8 @@ void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v);
1697 tmp_ |= ((val) & ~(mask)); \ 1760 tmp_ |= ((val) & ~(mask)); \
1698 WREG32(reg, tmp_); \ 1761 WREG32(reg, tmp_); \
1699 } while (0) 1762 } while (0)
1763#define WREG32_AND(reg, and) WREG32_P(reg, 0, and)
1764#define WREG32_OR(reg, or) WREG32_P(reg, or, ~or)
1700#define WREG32_PLL_P(reg, val, mask) \ 1765#define WREG32_PLL_P(reg, val, mask) \
1701 do { \ 1766 do { \
1702 uint32_t tmp_ = RREG32_PLL(reg); \ 1767 uint32_t tmp_ = RREG32_PLL(reg); \
@@ -1830,6 +1895,8 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
1830#define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->display.get_vblank_counter((rdev), (crtc)) 1895#define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->display.get_vblank_counter((rdev), (crtc))
1831#define radeon_set_backlight_level(rdev, e, l) (rdev)->asic->display.set_backlight_level((e), (l)) 1896#define radeon_set_backlight_level(rdev, e, l) (rdev)->asic->display.set_backlight_level((e), (l))
1832#define radeon_get_backlight_level(rdev, e) (rdev)->asic->display.get_backlight_level((e)) 1897#define radeon_get_backlight_level(rdev, e) (rdev)->asic->display.get_backlight_level((e))
1898#define radeon_hdmi_enable(rdev, e, b) (rdev)->asic->display.hdmi_enable((e), (b))
1899#define radeon_hdmi_setmode(rdev, e, m) (rdev)->asic->display.hdmi_setmode((e), (m))
1833#define radeon_fence_ring_emit(rdev, r, fence) (rdev)->asic->ring[(r)].emit_fence((rdev), (fence)) 1900#define radeon_fence_ring_emit(rdev, r, fence) (rdev)->asic->ring[(r)].emit_fence((rdev), (fence))
1834#define radeon_semaphore_ring_emit(rdev, r, cp, semaphore, emit_wait) (rdev)->asic->ring[(r)].emit_semaphore((rdev), (cp), (semaphore), (emit_wait)) 1901#define radeon_semaphore_ring_emit(rdev, r, cp, semaphore, emit_wait) (rdev)->asic->ring[(r)].emit_semaphore((rdev), (cp), (semaphore), (emit_wait))
1835#define radeon_copy_blit(rdev, s, d, np, f) (rdev)->asic->copy.blit((rdev), (s), (d), (np), (f)) 1902#define radeon_copy_blit(rdev, s, d, np, f) (rdev)->asic->copy.blit((rdev), (s), (d), (np), (f))
@@ -1845,6 +1912,7 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
1845#define radeon_get_pcie_lanes(rdev) (rdev)->asic->pm.get_pcie_lanes((rdev)) 1912#define radeon_get_pcie_lanes(rdev) (rdev)->asic->pm.get_pcie_lanes((rdev))
1846#define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->pm.set_pcie_lanes((rdev), (l)) 1913#define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->pm.set_pcie_lanes((rdev), (l))
1847#define radeon_set_clock_gating(rdev, e) (rdev)->asic->pm.set_clock_gating((rdev), (e)) 1914#define radeon_set_clock_gating(rdev, e) (rdev)->asic->pm.set_clock_gating((rdev), (e))
1915#define radeon_set_uvd_clocks(rdev, v, d) (rdev)->asic->pm.set_uvd_clocks((rdev), (v), (d))
1848#define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->surface.set_reg((rdev), (r), (f), (p), (o), (s))) 1916#define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->surface.set_reg((rdev), (r), (f), (p), (o), (s)))
1849#define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->surface.clear_reg((rdev), (r))) 1917#define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->surface.clear_reg((rdev), (r)))
1850#define radeon_bandwidth_update(rdev) (rdev)->asic->display.bandwidth_update((rdev)) 1918#define radeon_bandwidth_update(rdev) (rdev)->asic->display.bandwidth_update((rdev))
@@ -1892,6 +1960,9 @@ extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc
1892extern int radeon_resume_kms(struct drm_device *dev); 1960extern int radeon_resume_kms(struct drm_device *dev);
1893extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state); 1961extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state);
1894extern void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size); 1962extern void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size);
1963extern void radeon_program_register_sequence(struct radeon_device *rdev,
1964 const u32 *registers,
1965 const u32 array_size);
1895 1966
1896/* 1967/*
1897 * vm 1968 * vm
@@ -1964,9 +2035,6 @@ struct radeon_hdmi_acr {
1964 2035
1965extern struct radeon_hdmi_acr r600_hdmi_acr(uint32_t clock); 2036extern struct radeon_hdmi_acr r600_hdmi_acr(uint32_t clock);
1966 2037
1967extern void r600_hdmi_enable(struct drm_encoder *encoder);
1968extern void r600_hdmi_disable(struct drm_encoder *encoder);
1969extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
1970extern u32 r6xx_remap_render_backend(struct radeon_device *rdev, 2038extern u32 r6xx_remap_render_backend(struct radeon_device *rdev,
1971 u32 tiling_pipe_num, 2039 u32 tiling_pipe_num,
1972 u32 max_rb_num, 2040 u32 max_rb_num,
@@ -1977,8 +2045,6 @@ extern u32 r6xx_remap_render_backend(struct radeon_device *rdev,
1977 * evergreen functions used by radeon_encoder.c 2045 * evergreen functions used by radeon_encoder.c
1978 */ 2046 */
1979 2047
1980extern void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
1981
1982extern int ni_init_microcode(struct radeon_device *rdev); 2048extern int ni_init_microcode(struct radeon_device *rdev);
1983extern int ni_mc_load_microcode(struct radeon_device *rdev); 2049extern int ni_mc_load_microcode(struct radeon_device *rdev);
1984 2050
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index aba0a893ea98..6417132c50cf 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -656,6 +656,8 @@ static struct radeon_asic rs600_asic = {
656 .wait_for_vblank = &avivo_wait_for_vblank, 656 .wait_for_vblank = &avivo_wait_for_vblank,
657 .set_backlight_level = &atombios_set_backlight_level, 657 .set_backlight_level = &atombios_set_backlight_level,
658 .get_backlight_level = &atombios_get_backlight_level, 658 .get_backlight_level = &atombios_get_backlight_level,
659 .hdmi_enable = &r600_hdmi_enable,
660 .hdmi_setmode = &r600_hdmi_setmode,
659 }, 661 },
660 .copy = { 662 .copy = {
661 .blit = &r100_copy_blit, 663 .blit = &r100_copy_blit,
@@ -732,6 +734,8 @@ static struct radeon_asic rs690_asic = {
732 .wait_for_vblank = &avivo_wait_for_vblank, 734 .wait_for_vblank = &avivo_wait_for_vblank,
733 .set_backlight_level = &atombios_set_backlight_level, 735 .set_backlight_level = &atombios_set_backlight_level,
734 .get_backlight_level = &atombios_get_backlight_level, 736 .get_backlight_level = &atombios_get_backlight_level,
737 .hdmi_enable = &r600_hdmi_enable,
738 .hdmi_setmode = &r600_hdmi_setmode,
735 }, 739 },
736 .copy = { 740 .copy = {
737 .blit = &r100_copy_blit, 741 .blit = &r100_copy_blit,
@@ -970,6 +974,8 @@ static struct radeon_asic r600_asic = {
970 .wait_for_vblank = &avivo_wait_for_vblank, 974 .wait_for_vblank = &avivo_wait_for_vblank,
971 .set_backlight_level = &atombios_set_backlight_level, 975 .set_backlight_level = &atombios_set_backlight_level,
972 .get_backlight_level = &atombios_get_backlight_level, 976 .get_backlight_level = &atombios_get_backlight_level,
977 .hdmi_enable = &r600_hdmi_enable,
978 .hdmi_setmode = &r600_hdmi_setmode,
973 }, 979 },
974 .copy = { 980 .copy = {
975 .blit = &r600_copy_blit, 981 .blit = &r600_copy_blit,
@@ -1056,6 +1062,8 @@ static struct radeon_asic rs780_asic = {
1056 .wait_for_vblank = &avivo_wait_for_vblank, 1062 .wait_for_vblank = &avivo_wait_for_vblank,
1057 .set_backlight_level = &atombios_set_backlight_level, 1063 .set_backlight_level = &atombios_set_backlight_level,
1058 .get_backlight_level = &atombios_get_backlight_level, 1064 .get_backlight_level = &atombios_get_backlight_level,
1065 .hdmi_enable = &r600_hdmi_enable,
1066 .hdmi_setmode = &r600_hdmi_setmode,
1059 }, 1067 },
1060 .copy = { 1068 .copy = {
1061 .blit = &r600_copy_blit, 1069 .blit = &r600_copy_blit,
@@ -1130,6 +1138,15 @@ static struct radeon_asic rv770_asic = {
1130 .ring_test = &r600_dma_ring_test, 1138 .ring_test = &r600_dma_ring_test,
1131 .ib_test = &r600_dma_ib_test, 1139 .ib_test = &r600_dma_ib_test,
1132 .is_lockup = &r600_dma_is_lockup, 1140 .is_lockup = &r600_dma_is_lockup,
1141 },
1142 [R600_RING_TYPE_UVD_INDEX] = {
1143 .ib_execute = &r600_uvd_ib_execute,
1144 .emit_fence = &r600_uvd_fence_emit,
1145 .emit_semaphore = &r600_uvd_semaphore_emit,
1146 .cs_parse = &radeon_uvd_cs_parse,
1147 .ring_test = &r600_uvd_ring_test,
1148 .ib_test = &r600_uvd_ib_test,
1149 .is_lockup = &radeon_ring_test_lockup,
1133 } 1150 }
1134 }, 1151 },
1135 .irq = { 1152 .irq = {
@@ -1142,6 +1159,8 @@ static struct radeon_asic rv770_asic = {
1142 .wait_for_vblank = &avivo_wait_for_vblank, 1159 .wait_for_vblank = &avivo_wait_for_vblank,
1143 .set_backlight_level = &atombios_set_backlight_level, 1160 .set_backlight_level = &atombios_set_backlight_level,
1144 .get_backlight_level = &atombios_get_backlight_level, 1161 .get_backlight_level = &atombios_get_backlight_level,
1162 .hdmi_enable = &r600_hdmi_enable,
1163 .hdmi_setmode = &r600_hdmi_setmode,
1145 }, 1164 },
1146 .copy = { 1165 .copy = {
1147 .blit = &r600_copy_blit, 1166 .blit = &r600_copy_blit,
@@ -1174,6 +1193,7 @@ static struct radeon_asic rv770_asic = {
1174 .get_pcie_lanes = &r600_get_pcie_lanes, 1193 .get_pcie_lanes = &r600_get_pcie_lanes,
1175 .set_pcie_lanes = &r600_set_pcie_lanes, 1194 .set_pcie_lanes = &r600_set_pcie_lanes,
1176 .set_clock_gating = &radeon_atom_set_clock_gating, 1195 .set_clock_gating = &radeon_atom_set_clock_gating,
1196 .set_uvd_clocks = &rv770_set_uvd_clocks,
1177 }, 1197 },
1178 .pflip = { 1198 .pflip = {
1179 .pre_page_flip = &rs600_pre_page_flip, 1199 .pre_page_flip = &rs600_pre_page_flip,
@@ -1216,6 +1236,15 @@ static struct radeon_asic evergreen_asic = {
1216 .ring_test = &r600_dma_ring_test, 1236 .ring_test = &r600_dma_ring_test,
1217 .ib_test = &r600_dma_ib_test, 1237 .ib_test = &r600_dma_ib_test,
1218 .is_lockup = &evergreen_dma_is_lockup, 1238 .is_lockup = &evergreen_dma_is_lockup,
1239 },
1240 [R600_RING_TYPE_UVD_INDEX] = {
1241 .ib_execute = &r600_uvd_ib_execute,
1242 .emit_fence = &r600_uvd_fence_emit,
1243 .emit_semaphore = &r600_uvd_semaphore_emit,
1244 .cs_parse = &radeon_uvd_cs_parse,
1245 .ring_test = &r600_uvd_ring_test,
1246 .ib_test = &r600_uvd_ib_test,
1247 .is_lockup = &radeon_ring_test_lockup,
1219 } 1248 }
1220 }, 1249 },
1221 .irq = { 1250 .irq = {
@@ -1228,6 +1257,8 @@ static struct radeon_asic evergreen_asic = {
1228 .wait_for_vblank = &dce4_wait_for_vblank, 1257 .wait_for_vblank = &dce4_wait_for_vblank,
1229 .set_backlight_level = &atombios_set_backlight_level, 1258 .set_backlight_level = &atombios_set_backlight_level,
1230 .get_backlight_level = &atombios_get_backlight_level, 1259 .get_backlight_level = &atombios_get_backlight_level,
1260 .hdmi_enable = &evergreen_hdmi_enable,
1261 .hdmi_setmode = &evergreen_hdmi_setmode,
1231 }, 1262 },
1232 .copy = { 1263 .copy = {
1233 .blit = &r600_copy_blit, 1264 .blit = &r600_copy_blit,
@@ -1260,6 +1291,7 @@ static struct radeon_asic evergreen_asic = {
1260 .get_pcie_lanes = &r600_get_pcie_lanes, 1291 .get_pcie_lanes = &r600_get_pcie_lanes,
1261 .set_pcie_lanes = &r600_set_pcie_lanes, 1292 .set_pcie_lanes = &r600_set_pcie_lanes,
1262 .set_clock_gating = NULL, 1293 .set_clock_gating = NULL,
1294 .set_uvd_clocks = &evergreen_set_uvd_clocks,
1263 }, 1295 },
1264 .pflip = { 1296 .pflip = {
1265 .pre_page_flip = &evergreen_pre_page_flip, 1297 .pre_page_flip = &evergreen_pre_page_flip,
@@ -1302,6 +1334,15 @@ static struct radeon_asic sumo_asic = {
1302 .ring_test = &r600_dma_ring_test, 1334 .ring_test = &r600_dma_ring_test,
1303 .ib_test = &r600_dma_ib_test, 1335 .ib_test = &r600_dma_ib_test,
1304 .is_lockup = &evergreen_dma_is_lockup, 1336 .is_lockup = &evergreen_dma_is_lockup,
1337 },
1338 [R600_RING_TYPE_UVD_INDEX] = {
1339 .ib_execute = &r600_uvd_ib_execute,
1340 .emit_fence = &r600_uvd_fence_emit,
1341 .emit_semaphore = &r600_uvd_semaphore_emit,
1342 .cs_parse = &radeon_uvd_cs_parse,
1343 .ring_test = &r600_uvd_ring_test,
1344 .ib_test = &r600_uvd_ib_test,
1345 .is_lockup = &radeon_ring_test_lockup,
1305 } 1346 }
1306 }, 1347 },
1307 .irq = { 1348 .irq = {
@@ -1314,6 +1355,8 @@ static struct radeon_asic sumo_asic = {
1314 .wait_for_vblank = &dce4_wait_for_vblank, 1355 .wait_for_vblank = &dce4_wait_for_vblank,
1315 .set_backlight_level = &atombios_set_backlight_level, 1356 .set_backlight_level = &atombios_set_backlight_level,
1316 .get_backlight_level = &atombios_get_backlight_level, 1357 .get_backlight_level = &atombios_get_backlight_level,
1358 .hdmi_enable = &evergreen_hdmi_enable,
1359 .hdmi_setmode = &evergreen_hdmi_setmode,
1317 }, 1360 },
1318 .copy = { 1361 .copy = {
1319 .blit = &r600_copy_blit, 1362 .blit = &r600_copy_blit,
@@ -1346,6 +1389,7 @@ static struct radeon_asic sumo_asic = {
1346 .get_pcie_lanes = NULL, 1389 .get_pcie_lanes = NULL,
1347 .set_pcie_lanes = NULL, 1390 .set_pcie_lanes = NULL,
1348 .set_clock_gating = NULL, 1391 .set_clock_gating = NULL,
1392 .set_uvd_clocks = &sumo_set_uvd_clocks,
1349 }, 1393 },
1350 .pflip = { 1394 .pflip = {
1351 .pre_page_flip = &evergreen_pre_page_flip, 1395 .pre_page_flip = &evergreen_pre_page_flip,
@@ -1388,6 +1432,15 @@ static struct radeon_asic btc_asic = {
1388 .ring_test = &r600_dma_ring_test, 1432 .ring_test = &r600_dma_ring_test,
1389 .ib_test = &r600_dma_ib_test, 1433 .ib_test = &r600_dma_ib_test,
1390 .is_lockup = &evergreen_dma_is_lockup, 1434 .is_lockup = &evergreen_dma_is_lockup,
1435 },
1436 [R600_RING_TYPE_UVD_INDEX] = {
1437 .ib_execute = &r600_uvd_ib_execute,
1438 .emit_fence = &r600_uvd_fence_emit,
1439 .emit_semaphore = &r600_uvd_semaphore_emit,
1440 .cs_parse = &radeon_uvd_cs_parse,
1441 .ring_test = &r600_uvd_ring_test,
1442 .ib_test = &r600_uvd_ib_test,
1443 .is_lockup = &radeon_ring_test_lockup,
1391 } 1444 }
1392 }, 1445 },
1393 .irq = { 1446 .irq = {
@@ -1400,6 +1453,8 @@ static struct radeon_asic btc_asic = {
1400 .wait_for_vblank = &dce4_wait_for_vblank, 1453 .wait_for_vblank = &dce4_wait_for_vblank,
1401 .set_backlight_level = &atombios_set_backlight_level, 1454 .set_backlight_level = &atombios_set_backlight_level,
1402 .get_backlight_level = &atombios_get_backlight_level, 1455 .get_backlight_level = &atombios_get_backlight_level,
1456 .hdmi_enable = &evergreen_hdmi_enable,
1457 .hdmi_setmode = &evergreen_hdmi_setmode,
1403 }, 1458 },
1404 .copy = { 1459 .copy = {
1405 .blit = &r600_copy_blit, 1460 .blit = &r600_copy_blit,
@@ -1429,9 +1484,10 @@ static struct radeon_asic btc_asic = {
1429 .set_engine_clock = &radeon_atom_set_engine_clock, 1484 .set_engine_clock = &radeon_atom_set_engine_clock,
1430 .get_memory_clock = &radeon_atom_get_memory_clock, 1485 .get_memory_clock = &radeon_atom_get_memory_clock,
1431 .set_memory_clock = &radeon_atom_set_memory_clock, 1486 .set_memory_clock = &radeon_atom_set_memory_clock,
1432 .get_pcie_lanes = NULL, 1487 .get_pcie_lanes = &r600_get_pcie_lanes,
1433 .set_pcie_lanes = NULL, 1488 .set_pcie_lanes = &r600_set_pcie_lanes,
1434 .set_clock_gating = NULL, 1489 .set_clock_gating = NULL,
1490 .set_uvd_clocks = &evergreen_set_uvd_clocks,
1435 }, 1491 },
1436 .pflip = { 1492 .pflip = {
1437 .pre_page_flip = &evergreen_pre_page_flip, 1493 .pre_page_flip = &evergreen_pre_page_flip,
@@ -1517,6 +1573,15 @@ static struct radeon_asic cayman_asic = {
1517 .ib_test = &r600_dma_ib_test, 1573 .ib_test = &r600_dma_ib_test,
1518 .is_lockup = &cayman_dma_is_lockup, 1574 .is_lockup = &cayman_dma_is_lockup,
1519 .vm_flush = &cayman_dma_vm_flush, 1575 .vm_flush = &cayman_dma_vm_flush,
1576 },
1577 [R600_RING_TYPE_UVD_INDEX] = {
1578 .ib_execute = &r600_uvd_ib_execute,
1579 .emit_fence = &r600_uvd_fence_emit,
1580 .emit_semaphore = &cayman_uvd_semaphore_emit,
1581 .cs_parse = &radeon_uvd_cs_parse,
1582 .ring_test = &r600_uvd_ring_test,
1583 .ib_test = &r600_uvd_ib_test,
1584 .is_lockup = &radeon_ring_test_lockup,
1520 } 1585 }
1521 }, 1586 },
1522 .irq = { 1587 .irq = {
@@ -1529,6 +1594,8 @@ static struct radeon_asic cayman_asic = {
1529 .wait_for_vblank = &dce4_wait_for_vblank, 1594 .wait_for_vblank = &dce4_wait_for_vblank,
1530 .set_backlight_level = &atombios_set_backlight_level, 1595 .set_backlight_level = &atombios_set_backlight_level,
1531 .get_backlight_level = &atombios_get_backlight_level, 1596 .get_backlight_level = &atombios_get_backlight_level,
1597 .hdmi_enable = &evergreen_hdmi_enable,
1598 .hdmi_setmode = &evergreen_hdmi_setmode,
1532 }, 1599 },
1533 .copy = { 1600 .copy = {
1534 .blit = &r600_copy_blit, 1601 .blit = &r600_copy_blit,
@@ -1558,9 +1625,10 @@ static struct radeon_asic cayman_asic = {
1558 .set_engine_clock = &radeon_atom_set_engine_clock, 1625 .set_engine_clock = &radeon_atom_set_engine_clock,
1559 .get_memory_clock = &radeon_atom_get_memory_clock, 1626 .get_memory_clock = &radeon_atom_get_memory_clock,
1560 .set_memory_clock = &radeon_atom_set_memory_clock, 1627 .set_memory_clock = &radeon_atom_set_memory_clock,
1561 .get_pcie_lanes = NULL, 1628 .get_pcie_lanes = &r600_get_pcie_lanes,
1562 .set_pcie_lanes = NULL, 1629 .set_pcie_lanes = &r600_set_pcie_lanes,
1563 .set_clock_gating = NULL, 1630 .set_clock_gating = NULL,
1631 .set_uvd_clocks = &evergreen_set_uvd_clocks,
1564 }, 1632 },
1565 .pflip = { 1633 .pflip = {
1566 .pre_page_flip = &evergreen_pre_page_flip, 1634 .pre_page_flip = &evergreen_pre_page_flip,
@@ -1646,6 +1714,15 @@ static struct radeon_asic trinity_asic = {
1646 .ib_test = &r600_dma_ib_test, 1714 .ib_test = &r600_dma_ib_test,
1647 .is_lockup = &cayman_dma_is_lockup, 1715 .is_lockup = &cayman_dma_is_lockup,
1648 .vm_flush = &cayman_dma_vm_flush, 1716 .vm_flush = &cayman_dma_vm_flush,
1717 },
1718 [R600_RING_TYPE_UVD_INDEX] = {
1719 .ib_execute = &r600_uvd_ib_execute,
1720 .emit_fence = &r600_uvd_fence_emit,
1721 .emit_semaphore = &cayman_uvd_semaphore_emit,
1722 .cs_parse = &radeon_uvd_cs_parse,
1723 .ring_test = &r600_uvd_ring_test,
1724 .ib_test = &r600_uvd_ib_test,
1725 .is_lockup = &radeon_ring_test_lockup,
1649 } 1726 }
1650 }, 1727 },
1651 .irq = { 1728 .irq = {
@@ -1690,6 +1767,7 @@ static struct radeon_asic trinity_asic = {
1690 .get_pcie_lanes = NULL, 1767 .get_pcie_lanes = NULL,
1691 .set_pcie_lanes = NULL, 1768 .set_pcie_lanes = NULL,
1692 .set_clock_gating = NULL, 1769 .set_clock_gating = NULL,
1770 .set_uvd_clocks = &sumo_set_uvd_clocks,
1693 }, 1771 },
1694 .pflip = { 1772 .pflip = {
1695 .pre_page_flip = &evergreen_pre_page_flip, 1773 .pre_page_flip = &evergreen_pre_page_flip,
@@ -1775,6 +1853,15 @@ static struct radeon_asic si_asic = {
1775 .ib_test = &r600_dma_ib_test, 1853 .ib_test = &r600_dma_ib_test,
1776 .is_lockup = &si_dma_is_lockup, 1854 .is_lockup = &si_dma_is_lockup,
1777 .vm_flush = &si_dma_vm_flush, 1855 .vm_flush = &si_dma_vm_flush,
1856 },
1857 [R600_RING_TYPE_UVD_INDEX] = {
1858 .ib_execute = &r600_uvd_ib_execute,
1859 .emit_fence = &r600_uvd_fence_emit,
1860 .emit_semaphore = &cayman_uvd_semaphore_emit,
1861 .cs_parse = &radeon_uvd_cs_parse,
1862 .ring_test = &r600_uvd_ring_test,
1863 .ib_test = &r600_uvd_ib_test,
1864 .is_lockup = &radeon_ring_test_lockup,
1778 } 1865 }
1779 }, 1866 },
1780 .irq = { 1867 .irq = {
@@ -1816,9 +1903,10 @@ static struct radeon_asic si_asic = {
1816 .set_engine_clock = &radeon_atom_set_engine_clock, 1903 .set_engine_clock = &radeon_atom_set_engine_clock,
1817 .get_memory_clock = &radeon_atom_get_memory_clock, 1904 .get_memory_clock = &radeon_atom_get_memory_clock,
1818 .set_memory_clock = &radeon_atom_set_memory_clock, 1905 .set_memory_clock = &radeon_atom_set_memory_clock,
1819 .get_pcie_lanes = NULL, 1906 .get_pcie_lanes = &r600_get_pcie_lanes,
1820 .set_pcie_lanes = NULL, 1907 .set_pcie_lanes = &r600_set_pcie_lanes,
1821 .set_clock_gating = NULL, 1908 .set_clock_gating = NULL,
1909 .set_uvd_clocks = &si_set_uvd_clocks,
1822 }, 1910 },
1823 .pflip = { 1911 .pflip = {
1824 .pre_page_flip = &evergreen_pre_page_flip, 1912 .pre_page_flip = &evergreen_pre_page_flip,
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 3535f73ad3e2..2c87365d345f 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -330,6 +330,7 @@ int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
330void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); 330void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
331int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *cp); 331int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
332int r600_dma_ring_test(struct radeon_device *rdev, struct radeon_ring *cp); 332int r600_dma_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
333int r600_uvd_ring_test(struct radeon_device *rdev, struct radeon_ring *ring);
333int r600_copy_blit(struct radeon_device *rdev, 334int r600_copy_blit(struct radeon_device *rdev,
334 uint64_t src_offset, uint64_t dst_offset, 335 uint64_t src_offset, uint64_t dst_offset,
335 unsigned num_gpu_pages, struct radeon_fence **fence); 336 unsigned num_gpu_pages, struct radeon_fence **fence);
@@ -373,11 +374,12 @@ void r600_disable_interrupts(struct radeon_device *rdev);
373void r600_rlc_stop(struct radeon_device *rdev); 374void r600_rlc_stop(struct radeon_device *rdev);
374/* r600 audio */ 375/* r600 audio */
375int r600_audio_init(struct radeon_device *rdev); 376int r600_audio_init(struct radeon_device *rdev);
376void r600_audio_set_clock(struct drm_encoder *encoder, int clock);
377struct r600_audio r600_audio_status(struct radeon_device *rdev); 377struct r600_audio r600_audio_status(struct radeon_device *rdev);
378void r600_audio_fini(struct radeon_device *rdev); 378void r600_audio_fini(struct radeon_device *rdev);
379int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder); 379int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder);
380void r600_hdmi_update_audio_settings(struct drm_encoder *encoder); 380void r600_hdmi_update_audio_settings(struct drm_encoder *encoder);
381void r600_hdmi_enable(struct drm_encoder *encoder, bool enable);
382void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
381/* r600 blit */ 383/* r600 blit */
382int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages, 384int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages,
383 struct radeon_fence **fence, struct radeon_sa_bo **vb, 385 struct radeon_fence **fence, struct radeon_sa_bo **vb,
@@ -392,6 +394,19 @@ int r600_mc_wait_for_idle(struct radeon_device *rdev);
392u32 r600_get_xclk(struct radeon_device *rdev); 394u32 r600_get_xclk(struct radeon_device *rdev);
393uint64_t r600_get_gpu_clock_counter(struct radeon_device *rdev); 395uint64_t r600_get_gpu_clock_counter(struct radeon_device *rdev);
394 396
397/* uvd */
398int r600_uvd_init(struct radeon_device *rdev);
399int r600_uvd_rbc_start(struct radeon_device *rdev);
400void r600_uvd_rbc_stop(struct radeon_device *rdev);
401int r600_uvd_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
402void r600_uvd_fence_emit(struct radeon_device *rdev,
403 struct radeon_fence *fence);
404void r600_uvd_semaphore_emit(struct radeon_device *rdev,
405 struct radeon_ring *ring,
406 struct radeon_semaphore *semaphore,
407 bool emit_wait);
408void r600_uvd_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
409
395/* 410/*
396 * rv770,rv730,rv710,rv740 411 * rv770,rv730,rv710,rv740
397 */ 412 */
@@ -409,6 +424,8 @@ int rv770_copy_dma(struct radeon_device *rdev,
409 unsigned num_gpu_pages, 424 unsigned num_gpu_pages,
410 struct radeon_fence **fence); 425 struct radeon_fence **fence);
411u32 rv770_get_xclk(struct radeon_device *rdev); 426u32 rv770_get_xclk(struct radeon_device *rdev);
427int rv770_uvd_resume(struct radeon_device *rdev);
428int rv770_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
412 429
413/* 430/*
414 * evergreen 431 * evergreen
@@ -444,6 +461,8 @@ extern void evergreen_pm_prepare(struct radeon_device *rdev);
444extern void evergreen_pm_finish(struct radeon_device *rdev); 461extern void evergreen_pm_finish(struct radeon_device *rdev);
445extern void sumo_pm_init_profile(struct radeon_device *rdev); 462extern void sumo_pm_init_profile(struct radeon_device *rdev);
446extern void btc_pm_init_profile(struct radeon_device *rdev); 463extern void btc_pm_init_profile(struct radeon_device *rdev);
464int sumo_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
465int evergreen_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
447extern void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc); 466extern void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc);
448extern u32 evergreen_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base); 467extern u32 evergreen_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
449extern void evergreen_post_page_flip(struct radeon_device *rdev, int crtc); 468extern void evergreen_post_page_flip(struct radeon_device *rdev, int crtc);
@@ -459,12 +478,18 @@ int evergreen_copy_dma(struct radeon_device *rdev,
459 uint64_t src_offset, uint64_t dst_offset, 478 uint64_t src_offset, uint64_t dst_offset,
460 unsigned num_gpu_pages, 479 unsigned num_gpu_pages,
461 struct radeon_fence **fence); 480 struct radeon_fence **fence);
481void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable);
482void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
462 483
463/* 484/*
464 * cayman 485 * cayman
465 */ 486 */
466void cayman_fence_ring_emit(struct radeon_device *rdev, 487void cayman_fence_ring_emit(struct radeon_device *rdev,
467 struct radeon_fence *fence); 488 struct radeon_fence *fence);
489void cayman_uvd_semaphore_emit(struct radeon_device *rdev,
490 struct radeon_ring *ring,
491 struct radeon_semaphore *semaphore,
492 bool emit_wait);
468void cayman_pcie_gart_tlb_flush(struct radeon_device *rdev); 493void cayman_pcie_gart_tlb_flush(struct radeon_device *rdev);
469int cayman_init(struct radeon_device *rdev); 494int cayman_init(struct radeon_device *rdev);
470void cayman_fini(struct radeon_device *rdev); 495void cayman_fini(struct radeon_device *rdev);
@@ -524,5 +549,6 @@ int si_copy_dma(struct radeon_device *rdev,
524void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); 549void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
525u32 si_get_xclk(struct radeon_device *rdev); 550u32 si_get_xclk(struct radeon_device *rdev);
526uint64_t si_get_gpu_clock_counter(struct radeon_device *rdev); 551uint64_t si_get_gpu_clock_counter(struct radeon_device *rdev);
552int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
527 553
528#endif 554#endif
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index f22eb5713528..dea6f63c9724 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -2028,6 +2028,8 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
2028 num_modes = power_info->info.ucNumOfPowerModeEntries; 2028 num_modes = power_info->info.ucNumOfPowerModeEntries;
2029 if (num_modes > ATOM_MAX_NUMBEROF_POWER_BLOCK) 2029 if (num_modes > ATOM_MAX_NUMBEROF_POWER_BLOCK)
2030 num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK; 2030 num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK;
2031 if (num_modes == 0)
2032 return state_index;
2031 rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * num_modes, GFP_KERNEL); 2033 rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * num_modes, GFP_KERNEL);
2032 if (!rdev->pm.power_state) 2034 if (!rdev->pm.power_state)
2033 return state_index; 2035 return state_index;
@@ -2307,7 +2309,7 @@ static void radeon_atombios_parse_pplib_non_clock_info(struct radeon_device *rde
2307 rdev->pm.default_power_state_index = state_index; 2309 rdev->pm.default_power_state_index = state_index;
2308 rdev->pm.power_state[state_index].default_clock_mode = 2310 rdev->pm.power_state[state_index].default_clock_mode =
2309 &rdev->pm.power_state[state_index].clock_info[mode_index - 1]; 2311 &rdev->pm.power_state[state_index].clock_info[mode_index - 1];
2310 if (ASIC_IS_DCE5(rdev) && !(rdev->flags & RADEON_IS_IGP)) { 2312 if ((rdev->family >= CHIP_BARTS) && !(rdev->flags & RADEON_IS_IGP)) {
2311 /* NI chips post without MC ucode, so default clocks are strobe mode only */ 2313 /* NI chips post without MC ucode, so default clocks are strobe mode only */
2312 rdev->pm.default_sclk = rdev->pm.power_state[state_index].clock_info[0].sclk; 2314 rdev->pm.default_sclk = rdev->pm.power_state[state_index].clock_info[0].sclk;
2313 rdev->pm.default_mclk = rdev->pm.power_state[state_index].clock_info[0].mclk; 2315 rdev->pm.default_mclk = rdev->pm.power_state[state_index].clock_info[0].mclk;
@@ -2345,7 +2347,7 @@ static bool radeon_atombios_parse_pplib_clock_info(struct radeon_device *rdev,
2345 sclk |= clock_info->rs780.ucLowEngineClockHigh << 16; 2347 sclk |= clock_info->rs780.ucLowEngineClockHigh << 16;
2346 rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk; 2348 rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
2347 } 2349 }
2348 } else if (ASIC_IS_DCE6(rdev)) { 2350 } else if (rdev->family >= CHIP_TAHITI) {
2349 sclk = le16_to_cpu(clock_info->si.usEngineClockLow); 2351 sclk = le16_to_cpu(clock_info->si.usEngineClockLow);
2350 sclk |= clock_info->si.ucEngineClockHigh << 16; 2352 sclk |= clock_info->si.ucEngineClockHigh << 16;
2351 mclk = le16_to_cpu(clock_info->si.usMemoryClockLow); 2353 mclk = le16_to_cpu(clock_info->si.usMemoryClockLow);
@@ -2358,7 +2360,7 @@ static bool radeon_atombios_parse_pplib_clock_info(struct radeon_device *rdev,
2358 le16_to_cpu(clock_info->si.usVDDC); 2360 le16_to_cpu(clock_info->si.usVDDC);
2359 rdev->pm.power_state[state_index].clock_info[mode_index].voltage.vddci = 2361 rdev->pm.power_state[state_index].clock_info[mode_index].voltage.vddci =
2360 le16_to_cpu(clock_info->si.usVDDCI); 2362 le16_to_cpu(clock_info->si.usVDDCI);
2361 } else if (ASIC_IS_DCE4(rdev)) { 2363 } else if (rdev->family >= CHIP_CEDAR) {
2362 sclk = le16_to_cpu(clock_info->evergreen.usEngineClockLow); 2364 sclk = le16_to_cpu(clock_info->evergreen.usEngineClockLow);
2363 sclk |= clock_info->evergreen.ucEngineClockHigh << 16; 2365 sclk |= clock_info->evergreen.ucEngineClockHigh << 16;
2364 mclk = le16_to_cpu(clock_info->evergreen.usMemoryClockLow); 2366 mclk = le16_to_cpu(clock_info->evergreen.usMemoryClockLow);
@@ -2432,6 +2434,8 @@ static int radeon_atombios_parse_power_table_4_5(struct radeon_device *rdev)
2432 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); 2434 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
2433 2435
2434 radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController); 2436 radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController);
2437 if (power_info->pplib.ucNumStates == 0)
2438 return state_index;
2435 rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * 2439 rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) *
2436 power_info->pplib.ucNumStates, GFP_KERNEL); 2440 power_info->pplib.ucNumStates, GFP_KERNEL);
2437 if (!rdev->pm.power_state) 2441 if (!rdev->pm.power_state)
@@ -2514,6 +2518,7 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
2514 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); 2518 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
2515 u16 data_offset; 2519 u16 data_offset;
2516 u8 frev, crev; 2520 u8 frev, crev;
2521 u8 *power_state_offset;
2517 2522
2518 if (!atom_parse_data_header(mode_info->atom_context, index, NULL, 2523 if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
2519 &frev, &crev, &data_offset)) 2524 &frev, &crev, &data_offset))
@@ -2530,15 +2535,17 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
2530 non_clock_info_array = (struct _NonClockInfoArray *) 2535 non_clock_info_array = (struct _NonClockInfoArray *)
2531 (mode_info->atom_context->bios + data_offset + 2536 (mode_info->atom_context->bios + data_offset +
2532 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset)); 2537 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
2538 if (state_array->ucNumEntries == 0)
2539 return state_index;
2533 rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * 2540 rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) *
2534 state_array->ucNumEntries, GFP_KERNEL); 2541 state_array->ucNumEntries, GFP_KERNEL);
2535 if (!rdev->pm.power_state) 2542 if (!rdev->pm.power_state)
2536 return state_index; 2543 return state_index;
2544 power_state_offset = (u8 *)state_array->states;
2537 for (i = 0; i < state_array->ucNumEntries; i++) { 2545 for (i = 0; i < state_array->ucNumEntries; i++) {
2538 mode_index = 0; 2546 mode_index = 0;
2539 power_state = (union pplib_power_state *)&state_array->states[i]; 2547 power_state = (union pplib_power_state *)power_state_offset;
2540 /* XXX this might be an inagua bug... */ 2548 non_clock_array_index = power_state->v2.nonClockInfoIndex;
2541 non_clock_array_index = i; /* power_state->v2.nonClockInfoIndex */
2542 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) 2549 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
2543 &non_clock_info_array->nonClockInfo[non_clock_array_index]; 2550 &non_clock_info_array->nonClockInfo[non_clock_array_index];
2544 rdev->pm.power_state[i].clock_info = kzalloc(sizeof(struct radeon_pm_clock_info) * 2551 rdev->pm.power_state[i].clock_info = kzalloc(sizeof(struct radeon_pm_clock_info) *
@@ -2550,9 +2557,6 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
2550 if (power_state->v2.ucNumDPMLevels) { 2557 if (power_state->v2.ucNumDPMLevels) {
2551 for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) { 2558 for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
2552 clock_array_index = power_state->v2.clockInfoIndex[j]; 2559 clock_array_index = power_state->v2.clockInfoIndex[j];
2553 /* XXX this might be an inagua bug... */
2554 if (clock_array_index >= clock_info_array->ucNumEntries)
2555 continue;
2556 clock_info = (union pplib_clock_info *) 2560 clock_info = (union pplib_clock_info *)
2557 &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize]; 2561 &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
2558 valid = radeon_atombios_parse_pplib_clock_info(rdev, 2562 valid = radeon_atombios_parse_pplib_clock_info(rdev,
@@ -2574,6 +2578,7 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
2574 non_clock_info); 2578 non_clock_info);
2575 state_index++; 2579 state_index++;
2576 } 2580 }
2581 power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
2577 } 2582 }
2578 /* if multiple clock modes, mark the lowest as no display */ 2583 /* if multiple clock modes, mark the lowest as no display */
2579 for (i = 0; i < state_index; i++) { 2584 for (i = 0; i < state_index; i++) {
@@ -2620,7 +2625,9 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
2620 default: 2625 default:
2621 break; 2626 break;
2622 } 2627 }
2623 } else { 2628 }
2629
2630 if (state_index == 0) {
2624 rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state), GFP_KERNEL); 2631 rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state), GFP_KERNEL);
2625 if (rdev->pm.power_state) { 2632 if (rdev->pm.power_state) {
2626 rdev->pm.power_state[0].clock_info = 2633 rdev->pm.power_state[0].clock_info =
@@ -2654,6 +2661,111 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
2654 rdev->pm.current_vddc = 0; 2661 rdev->pm.current_vddc = 0;
2655} 2662}
2656 2663
2664union get_clock_dividers {
2665 struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS v1;
2666 struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V2 v2;
2667 struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V3 v3;
2668 struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4 v4;
2669 struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V5 v5;
2670};
2671
2672int radeon_atom_get_clock_dividers(struct radeon_device *rdev,
2673 u8 clock_type,
2674 u32 clock,
2675 bool strobe_mode,
2676 struct atom_clock_dividers *dividers)
2677{
2678 union get_clock_dividers args;
2679 int index = GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL);
2680 u8 frev, crev;
2681
2682 memset(&args, 0, sizeof(args));
2683 memset(dividers, 0, sizeof(struct atom_clock_dividers));
2684
2685 if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
2686 return -EINVAL;
2687
2688 switch (crev) {
2689 case 1:
2690 /* r4xx, r5xx */
2691 args.v1.ucAction = clock_type;
2692 args.v1.ulClock = cpu_to_le32(clock); /* 10 khz */
2693
2694 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
2695
2696 dividers->post_div = args.v1.ucPostDiv;
2697 dividers->fb_div = args.v1.ucFbDiv;
2698 dividers->enable_post_div = true;
2699 break;
2700 case 2:
2701 case 3:
2702 /* r6xx, r7xx, evergreen, ni */
2703 if (rdev->family <= CHIP_RV770) {
2704 args.v2.ucAction = clock_type;
2705 args.v2.ulClock = cpu_to_le32(clock); /* 10 khz */
2706
2707 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
2708
2709 dividers->post_div = args.v2.ucPostDiv;
2710 dividers->fb_div = le16_to_cpu(args.v2.usFbDiv);
2711 dividers->ref_div = args.v2.ucAction;
2712 if (rdev->family == CHIP_RV770) {
2713 dividers->enable_post_div = (le32_to_cpu(args.v2.ulClock) & (1 << 24)) ?
2714 true : false;
2715 dividers->vco_mode = (le32_to_cpu(args.v2.ulClock) & (1 << 25)) ? 1 : 0;
2716 } else
2717 dividers->enable_post_div = (dividers->fb_div & 1) ? true : false;
2718 } else {
2719 if (clock_type == COMPUTE_ENGINE_PLL_PARAM) {
2720 args.v3.ulClockParams = cpu_to_le32((clock_type << 24) | clock);
2721
2722 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
2723
2724 dividers->post_div = args.v3.ucPostDiv;
2725 dividers->enable_post_div = (args.v3.ucCntlFlag &
2726 ATOM_PLL_CNTL_FLAG_PLL_POST_DIV_EN) ? true : false;
2727 dividers->enable_dithen = (args.v3.ucCntlFlag &
2728 ATOM_PLL_CNTL_FLAG_FRACTION_DISABLE) ? false : true;
2729 dividers->fb_div = le16_to_cpu(args.v3.ulFbDiv.usFbDiv);
2730 dividers->frac_fb_div = le16_to_cpu(args.v3.ulFbDiv.usFbDivFrac);
2731 dividers->ref_div = args.v3.ucRefDiv;
2732 dividers->vco_mode = (args.v3.ucCntlFlag &
2733 ATOM_PLL_CNTL_FLAG_MPLL_VCO_MODE) ? 1 : 0;
2734 } else {
2735 args.v5.ulClockParams = cpu_to_le32((clock_type << 24) | clock);
2736 if (strobe_mode)
2737 args.v5.ucInputFlag = ATOM_PLL_INPUT_FLAG_PLL_STROBE_MODE_EN;
2738
2739 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
2740
2741 dividers->post_div = args.v5.ucPostDiv;
2742 dividers->enable_post_div = (args.v5.ucCntlFlag &
2743 ATOM_PLL_CNTL_FLAG_PLL_POST_DIV_EN) ? true : false;
2744 dividers->enable_dithen = (args.v5.ucCntlFlag &
2745 ATOM_PLL_CNTL_FLAG_FRACTION_DISABLE) ? false : true;
2746 dividers->whole_fb_div = le16_to_cpu(args.v5.ulFbDiv.usFbDiv);
2747 dividers->frac_fb_div = le16_to_cpu(args.v5.ulFbDiv.usFbDivFrac);
2748 dividers->ref_div = args.v5.ucRefDiv;
2749 dividers->vco_mode = (args.v5.ucCntlFlag &
2750 ATOM_PLL_CNTL_FLAG_MPLL_VCO_MODE) ? 1 : 0;
2751 }
2752 }
2753 break;
2754 case 4:
2755 /* fusion */
2756 args.v4.ulClock = cpu_to_le32(clock); /* 10 khz */
2757
2758 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
2759
2760 dividers->post_div = args.v4.ucPostDiv;
2761 dividers->real_clock = le32_to_cpu(args.v4.ulClock);
2762 break;
2763 default:
2764 return -EINVAL;
2765 }
2766 return 0;
2767}
2768
2657void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable) 2769void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable)
2658{ 2770{
2659 DYNAMIC_CLOCK_GATING_PS_ALLOCATION args; 2771 DYNAMIC_CLOCK_GATING_PS_ALLOCATION args;
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
index b8015913d382..fa3c56fba294 100644
--- a/drivers/gpu/drm/radeon/radeon_bios.c
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
@@ -99,6 +99,29 @@ static bool radeon_read_bios(struct radeon_device *rdev)
99 return true; 99 return true;
100} 100}
101 101
102static bool radeon_read_platform_bios(struct radeon_device *rdev)
103{
104 uint8_t __iomem *bios;
105 size_t size;
106
107 rdev->bios = NULL;
108
109 bios = pci_platform_rom(rdev->pdev, &size);
110 if (!bios) {
111 return false;
112 }
113
114 if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) {
115 return false;
116 }
117 rdev->bios = kmemdup(bios, size, GFP_KERNEL);
118 if (rdev->bios == NULL) {
119 return false;
120 }
121
122 return true;
123}
124
102#ifdef CONFIG_ACPI 125#ifdef CONFIG_ACPI
103/* ATRM is used to get the BIOS on the discrete cards in 126/* ATRM is used to get the BIOS on the discrete cards in
104 * dual-gpu systems. 127 * dual-gpu systems.
@@ -620,6 +643,9 @@ bool radeon_get_bios(struct radeon_device *rdev)
620 if (r == false) { 643 if (r == false) {
621 r = radeon_read_disabled_bios(rdev); 644 r = radeon_read_disabled_bios(rdev);
622 } 645 }
646 if (r == false) {
647 r = radeon_read_platform_bios(rdev);
648 }
623 if (r == false || rdev->bios == NULL) { 649 if (r == false || rdev->bios == NULL) {
624 DRM_ERROR("Unable to locate a BIOS ROM\n"); 650 DRM_ERROR("Unable to locate a BIOS ROM\n");
625 rdev->bios = NULL; 651 rdev->bios = NULL;
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 70d38241b083..7e265a58141f 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -63,30 +63,50 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
63 break; 63 break;
64 } 64 }
65 } 65 }
66 if (!duplicate) { 66 if (duplicate) {
67 p->relocs[i].gobj = drm_gem_object_lookup(ddev,
68 p->filp,
69 r->handle);
70 if (p->relocs[i].gobj == NULL) {
71 DRM_ERROR("gem object lookup failed 0x%x\n",
72 r->handle);
73 return -ENOENT;
74 }
75 p->relocs_ptr[i] = &p->relocs[i];
76 p->relocs[i].robj = gem_to_radeon_bo(p->relocs[i].gobj);
77 p->relocs[i].lobj.bo = p->relocs[i].robj;
78 p->relocs[i].lobj.wdomain = r->write_domain;
79 p->relocs[i].lobj.rdomain = r->read_domains;
80 p->relocs[i].lobj.tv.bo = &p->relocs[i].robj->tbo;
81 p->relocs[i].handle = r->handle;
82 p->relocs[i].flags = r->flags;
83 radeon_bo_list_add_object(&p->relocs[i].lobj,
84 &p->validated);
85
86 } else
87 p->relocs[i].handle = 0; 67 p->relocs[i].handle = 0;
68 continue;
69 }
70
71 p->relocs[i].gobj = drm_gem_object_lookup(ddev, p->filp,
72 r->handle);
73 if (p->relocs[i].gobj == NULL) {
74 DRM_ERROR("gem object lookup failed 0x%x\n",
75 r->handle);
76 return -ENOENT;
77 }
78 p->relocs_ptr[i] = &p->relocs[i];
79 p->relocs[i].robj = gem_to_radeon_bo(p->relocs[i].gobj);
80 p->relocs[i].lobj.bo = p->relocs[i].robj;
81 p->relocs[i].lobj.written = !!r->write_domain;
82
83 /* the first reloc of an UVD job is the
84 msg and that must be in VRAM */
85 if (p->ring == R600_RING_TYPE_UVD_INDEX && i == 0) {
86 /* TODO: is this still needed for NI+ ? */
87 p->relocs[i].lobj.domain =
88 RADEON_GEM_DOMAIN_VRAM;
89
90 p->relocs[i].lobj.alt_domain =
91 RADEON_GEM_DOMAIN_VRAM;
92
93 } else {
94 uint32_t domain = r->write_domain ?
95 r->write_domain : r->read_domains;
96
97 p->relocs[i].lobj.domain = domain;
98 if (domain == RADEON_GEM_DOMAIN_VRAM)
99 domain |= RADEON_GEM_DOMAIN_GTT;
100 p->relocs[i].lobj.alt_domain = domain;
101 }
102
103 p->relocs[i].lobj.tv.bo = &p->relocs[i].robj->tbo;
104 p->relocs[i].handle = r->handle;
105
106 radeon_bo_list_add_object(&p->relocs[i].lobj,
107 &p->validated);
88 } 108 }
89 return radeon_bo_list_validate(&p->validated); 109 return radeon_bo_list_validate(&p->validated, p->ring);
90} 110}
91 111
92static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority) 112static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority)
@@ -121,6 +141,9 @@ static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority
121 return -EINVAL; 141 return -EINVAL;
122 } 142 }
123 break; 143 break;
144 case RADEON_CS_RING_UVD:
145 p->ring = R600_RING_TYPE_UVD_INDEX;
146 break;
124 } 147 }
125 return 0; 148 return 0;
126} 149}
@@ -241,15 +264,15 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
241 return -EINVAL; 264 return -EINVAL;
242 } 265 }
243 266
244 /* we only support VM on SI+ */ 267 if (radeon_cs_get_ring(p, ring, priority))
245 if ((p->rdev->family >= CHIP_TAHITI) &&
246 ((p->cs_flags & RADEON_CS_USE_VM) == 0)) {
247 DRM_ERROR("VM required on SI+!\n");
248 return -EINVAL; 268 return -EINVAL;
249 }
250 269
251 if (radeon_cs_get_ring(p, ring, priority)) 270 /* we only support VM on some SI+ rings */
271 if ((p->rdev->asic->ring[p->ring].cs_parse == NULL) &&
272 ((p->cs_flags & RADEON_CS_USE_VM) == 0)) {
273 DRM_ERROR("Ring %d requires VM!\n", p->ring);
252 return -EINVAL; 274 return -EINVAL;
275 }
253 } 276 }
254 277
255 /* deal with non-vm */ 278 /* deal with non-vm */
@@ -526,6 +549,10 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
526 r = radeon_cs_handle_lockup(rdev, r); 549 r = radeon_cs_handle_lockup(rdev, r);
527 return r; 550 return r;
528 } 551 }
552
553 if (parser.ring == R600_RING_TYPE_UVD_INDEX)
554 radeon_uvd_note_usage(rdev);
555
529 r = radeon_cs_ib_chunk(rdev, &parser); 556 r = radeon_cs_ib_chunk(rdev, &parser);
530 if (r) { 557 if (r) {
531 goto out; 558 goto out;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 44b8034a400d..a8f608903989 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -98,6 +98,42 @@ static const char radeon_family_name[][16] = {
98}; 98};
99 99
100/** 100/**
101 * radeon_program_register_sequence - program an array of registers.
102 *
103 * @rdev: radeon_device pointer
104 * @registers: pointer to the register array
105 * @array_size: size of the register array
106 *
107 * Programs an array or registers with and and or masks.
108 * This is a helper for setting golden registers.
109 */
110void radeon_program_register_sequence(struct radeon_device *rdev,
111 const u32 *registers,
112 const u32 array_size)
113{
114 u32 tmp, reg, and_mask, or_mask;
115 int i;
116
117 if (array_size % 3)
118 return;
119
120 for (i = 0; i < array_size; i +=3) {
121 reg = registers[i + 0];
122 and_mask = registers[i + 1];
123 or_mask = registers[i + 2];
124
125 if (and_mask == 0xffffffff) {
126 tmp = or_mask;
127 } else {
128 tmp = RREG32(reg);
129 tmp &= ~and_mask;
130 tmp |= or_mask;
131 }
132 WREG32(reg, tmp);
133 }
134}
135
136/**
101 * radeon_surface_init - Clear GPU surface registers. 137 * radeon_surface_init - Clear GPU surface registers.
102 * 138 *
103 * @rdev: radeon_device pointer 139 * @rdev: radeon_device pointer
@@ -359,7 +395,7 @@ void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64
359 uint64_t limit = (uint64_t)radeon_vram_limit << 20; 395 uint64_t limit = (uint64_t)radeon_vram_limit << 20;
360 396
361 mc->vram_start = base; 397 mc->vram_start = base;
362 if (mc->mc_vram_size > (0xFFFFFFFF - base + 1)) { 398 if (mc->mc_vram_size > (rdev->mc.mc_mask - base + 1)) {
363 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n"); 399 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
364 mc->real_vram_size = mc->aper_size; 400 mc->real_vram_size = mc->aper_size;
365 mc->mc_vram_size = mc->aper_size; 401 mc->mc_vram_size = mc->aper_size;
@@ -394,7 +430,7 @@ void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
394{ 430{
395 u64 size_af, size_bf; 431 u64 size_af, size_bf;
396 432
397 size_af = ((0xFFFFFFFF - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align; 433 size_af = ((rdev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
398 size_bf = mc->vram_start & ~mc->gtt_base_align; 434 size_bf = mc->vram_start & ~mc->gtt_base_align;
399 if (size_bf > size_af) { 435 if (size_bf > size_af) {
400 if (mc->gtt_size > size_bf) { 436 if (mc->gtt_size > size_bf) {
@@ -1068,6 +1104,17 @@ int radeon_device_init(struct radeon_device *rdev,
1068 radeon_agp_disable(rdev); 1104 radeon_agp_disable(rdev);
1069 } 1105 }
1070 1106
1107 /* Set the internal MC address mask
1108 * This is the max address of the GPU's
1109 * internal address space.
1110 */
1111 if (rdev->family >= CHIP_CAYMAN)
1112 rdev->mc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
1113 else if (rdev->family >= CHIP_CEDAR)
1114 rdev->mc.mc_mask = 0xfffffffffULL; /* 36 bit MC */
1115 else
1116 rdev->mc.mc_mask = 0xffffffffULL; /* 32 bit MC */
1117
1071 /* set DMA mask + need_dma32 flags. 1118 /* set DMA mask + need_dma32 flags.
1072 * PCIE - can handle 40-bits. 1119 * PCIE - can handle 40-bits.
1073 * IGP - can handle 40-bits 1120 * IGP - can handle 40-bits
@@ -1131,6 +1178,11 @@ int radeon_device_init(struct radeon_device *rdev,
1131 if (r) 1178 if (r)
1132 DRM_ERROR("ib ring test failed (%d).\n", r); 1179 DRM_ERROR("ib ring test failed (%d).\n", r);
1133 1180
1181 r = radeon_gem_debugfs_init(rdev);
1182 if (r) {
1183 DRM_ERROR("registering gem debugfs failed (%d).\n", r);
1184 }
1185
1134 if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) { 1186 if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) {
1135 /* Acceleration not working on AGP card try again 1187 /* Acceleration not working on AGP card try again
1136 * with fallback to PCI or PCIE GART 1188 * with fallback to PCI or PCIE GART
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 66a7f0fd9620..094e7e5ea39e 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -71,9 +71,12 @@
71 * 2.28.0 - r600-eg: Add MEM_WRITE packet support 71 * 2.28.0 - r600-eg: Add MEM_WRITE packet support
72 * 2.29.0 - R500 FP16 color clear registers 72 * 2.29.0 - R500 FP16 color clear registers
73 * 2.30.0 - fix for FMASK texturing 73 * 2.30.0 - fix for FMASK texturing
74 * 2.31.0 - Add fastfb support for rs690
75 * 2.32.0 - new info request for rings working
76 * 2.33.0 - Add SI tiling mode array query
74 */ 77 */
75#define KMS_DRIVER_MAJOR 2 78#define KMS_DRIVER_MAJOR 2
76#define KMS_DRIVER_MINOR 30 79#define KMS_DRIVER_MINOR 33
77#define KMS_DRIVER_PATCHLEVEL 0 80#define KMS_DRIVER_PATCHLEVEL 0
78int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); 81int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
79int radeon_driver_unload_kms(struct drm_device *dev); 82int radeon_driver_unload_kms(struct drm_device *dev);
@@ -144,7 +147,7 @@ static inline void radeon_unregister_atpx_handler(void) {}
144#endif 147#endif
145 148
146int radeon_no_wb; 149int radeon_no_wb;
147int radeon_modeset = 1; 150int radeon_modeset = -1;
148int radeon_dynclks = -1; 151int radeon_dynclks = -1;
149int radeon_r4xx_atom = 0; 152int radeon_r4xx_atom = 0;
150int radeon_agpmode = 0; 153int radeon_agpmode = 0;
@@ -160,6 +163,7 @@ int radeon_hw_i2c = 0;
160int radeon_pcie_gen2 = -1; 163int radeon_pcie_gen2 = -1;
161int radeon_msi = -1; 164int radeon_msi = -1;
162int radeon_lockup_timeout = 10000; 165int radeon_lockup_timeout = 10000;
166int radeon_fastfb = 0;
163 167
164MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers"); 168MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
165module_param_named(no_wb, radeon_no_wb, int, 0444); 169module_param_named(no_wb, radeon_no_wb, int, 0444);
@@ -212,6 +216,9 @@ module_param_named(msi, radeon_msi, int, 0444);
212MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (defaul 10000 = 10 seconds, 0 = disable)"); 216MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (defaul 10000 = 10 seconds, 0 = disable)");
213module_param_named(lockup_timeout, radeon_lockup_timeout, int, 0444); 217module_param_named(lockup_timeout, radeon_lockup_timeout, int, 0444);
214 218
219MODULE_PARM_DESC(fastfb, "Direct FB access for IGP chips (0 = disable, 1 = enable)");
220module_param_named(fastfb, radeon_fastfb, int, 0444);
221
215static struct pci_device_id pciidlist[] = { 222static struct pci_device_id pciidlist[] = {
216 radeon_PCI_IDS 223 radeon_PCI_IDS
217}; 224};
@@ -449,6 +456,16 @@ static struct pci_driver radeon_kms_pci_driver = {
449 456
450static int __init radeon_init(void) 457static int __init radeon_init(void)
451{ 458{
459#ifdef CONFIG_VGA_CONSOLE
460 if (vgacon_text_force() && radeon_modeset == -1) {
461 DRM_INFO("VGACON disable radeon kernel modesetting.\n");
462 radeon_modeset = 0;
463 }
464#endif
465 /* set to modesetting by default if not nomodeset */
466 if (radeon_modeset == -1)
467 radeon_modeset = 1;
468
452 if (radeon_modeset == 1) { 469 if (radeon_modeset == 1) {
453 DRM_INFO("radeon kernel modesetting enabled.\n"); 470 DRM_INFO("radeon kernel modesetting enabled.\n");
454 driver = &kms_driver; 471 driver = &kms_driver;
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 34356252567a..5b937dfe6f65 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -31,9 +31,9 @@
31#include <linux/seq_file.h> 31#include <linux/seq_file.h>
32#include <linux/atomic.h> 32#include <linux/atomic.h>
33#include <linux/wait.h> 33#include <linux/wait.h>
34#include <linux/list.h>
35#include <linux/kref.h> 34#include <linux/kref.h>
36#include <linux/slab.h> 35#include <linux/slab.h>
36#include <linux/firmware.h>
37#include <drm/drmP.h> 37#include <drm/drmP.h>
38#include "radeon_reg.h" 38#include "radeon_reg.h"
39#include "radeon.h" 39#include "radeon.h"
@@ -768,7 +768,19 @@ int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
768 radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg); 768 radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
769 if (rdev->wb.use_event || !radeon_ring_supports_scratch_reg(rdev, &rdev->ring[ring])) { 769 if (rdev->wb.use_event || !radeon_ring_supports_scratch_reg(rdev, &rdev->ring[ring])) {
770 rdev->fence_drv[ring].scratch_reg = 0; 770 rdev->fence_drv[ring].scratch_reg = 0;
771 index = R600_WB_EVENT_OFFSET + ring * 4; 771 if (ring != R600_RING_TYPE_UVD_INDEX) {
772 index = R600_WB_EVENT_OFFSET + ring * 4;
773 rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
774 rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr +
775 index;
776
777 } else {
778 /* put fence directly behind firmware */
779 index = ALIGN(rdev->uvd_fw->size, 8);
780 rdev->fence_drv[ring].cpu_addr = rdev->uvd.cpu_addr + index;
781 rdev->fence_drv[ring].gpu_addr = rdev->uvd.gpu_addr + index;
782 }
783
772 } else { 784 } else {
773 r = radeon_scratch_get(rdev, &rdev->fence_drv[ring].scratch_reg); 785 r = radeon_scratch_get(rdev, &rdev->fence_drv[ring].scratch_reg);
774 if (r) { 786 if (r) {
@@ -778,9 +790,9 @@ int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
778 index = RADEON_WB_SCRATCH_OFFSET + 790 index = RADEON_WB_SCRATCH_OFFSET +
779 rdev->fence_drv[ring].scratch_reg - 791 rdev->fence_drv[ring].scratch_reg -
780 rdev->scratch.reg_base; 792 rdev->scratch.reg_base;
793 rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
794 rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index;
781 } 795 }
782 rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
783 rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index;
784 radeon_fence_write(rdev, atomic64_read(&rdev->fence_drv[ring].last_seq), ring); 796 radeon_fence_write(rdev, atomic64_read(&rdev->fence_drv[ring].last_seq), ring);
785 rdev->fence_drv[ring].initialized = true; 797 rdev->fence_drv[ring].initialized = true;
786 dev_info(rdev->dev, "fence driver on ring %d use gpu addr 0x%016llx and cpu addr 0x%p\n", 798 dev_info(rdev->dev, "fence driver on ring %d use gpu addr 0x%016llx and cpu addr 0x%p\n",
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index fe5c1f6b7957..aa796031ab65 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -84,6 +84,7 @@ retry:
84 return r; 84 return r;
85 } 85 }
86 *obj = &robj->gem_base; 86 *obj = &robj->gem_base;
87 robj->pid = task_pid_nr(current);
87 88
88 mutex_lock(&rdev->gem.mutex); 89 mutex_lock(&rdev->gem.mutex);
89 list_add_tail(&robj->list, &rdev->gem.objects); 90 list_add_tail(&robj->list, &rdev->gem.objects);
@@ -575,3 +576,52 @@ int radeon_mode_dumb_destroy(struct drm_file *file_priv,
575{ 576{
576 return drm_gem_handle_delete(file_priv, handle); 577 return drm_gem_handle_delete(file_priv, handle);
577} 578}
579
580#if defined(CONFIG_DEBUG_FS)
581static int radeon_debugfs_gem_info(struct seq_file *m, void *data)
582{
583 struct drm_info_node *node = (struct drm_info_node *)m->private;
584 struct drm_device *dev = node->minor->dev;
585 struct radeon_device *rdev = dev->dev_private;
586 struct radeon_bo *rbo;
587 unsigned i = 0;
588
589 mutex_lock(&rdev->gem.mutex);
590 list_for_each_entry(rbo, &rdev->gem.objects, list) {
591 unsigned domain;
592 const char *placement;
593
594 domain = radeon_mem_type_to_domain(rbo->tbo.mem.mem_type);
595 switch (domain) {
596 case RADEON_GEM_DOMAIN_VRAM:
597 placement = "VRAM";
598 break;
599 case RADEON_GEM_DOMAIN_GTT:
600 placement = " GTT";
601 break;
602 case RADEON_GEM_DOMAIN_CPU:
603 default:
604 placement = " CPU";
605 break;
606 }
607 seq_printf(m, "bo[0x%08x] %8ldkB %8ldMB %s pid %8ld\n",
608 i, radeon_bo_size(rbo) >> 10, radeon_bo_size(rbo) >> 20,
609 placement, (unsigned long)rbo->pid);
610 i++;
611 }
612 mutex_unlock(&rdev->gem.mutex);
613 return 0;
614}
615
616static struct drm_info_list radeon_debugfs_gem_list[] = {
617 {"radeon_gem_info", &radeon_debugfs_gem_info, 0, NULL},
618};
619#endif
620
621int radeon_gem_debugfs_init(struct radeon_device *rdev)
622{
623#if defined(CONFIG_DEBUG_FS)
624 return radeon_debugfs_add_files(rdev, radeon_debugfs_gem_list, 1);
625#endif
626 return 0;
627}
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index 48f80cd42d8f..5a99d433fc35 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -270,7 +270,7 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
270} 270}
271 271
272/** 272/**
273 * radeon_irq_kms_fini - tear down driver interrrupt info 273 * radeon_irq_kms_fini - tear down driver interrupt info
274 * 274 *
275 * @rdev: radeon device pointer 275 * @rdev: radeon device pointer
276 * 276 *
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index c75cb2c6ba71..4f2d4f4c1dab 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -50,9 +50,13 @@ int radeon_driver_unload_kms(struct drm_device *dev)
50 50
51 if (rdev == NULL) 51 if (rdev == NULL)
52 return 0; 52 return 0;
53 if (rdev->rmmio == NULL)
54 goto done_free;
53 radeon_acpi_fini(rdev); 55 radeon_acpi_fini(rdev);
54 radeon_modeset_fini(rdev); 56 radeon_modeset_fini(rdev);
55 radeon_device_fini(rdev); 57 radeon_device_fini(rdev);
58
59done_free:
56 kfree(rdev); 60 kfree(rdev);
57 dev->dev_private = NULL; 61 dev->dev_private = NULL;
58 return 0; 62 return 0;
@@ -176,80 +180,65 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
176 struct radeon_device *rdev = dev->dev_private; 180 struct radeon_device *rdev = dev->dev_private;
177 struct drm_radeon_info *info = data; 181 struct drm_radeon_info *info = data;
178 struct radeon_mode_info *minfo = &rdev->mode_info; 182 struct radeon_mode_info *minfo = &rdev->mode_info;
179 uint32_t value, *value_ptr; 183 uint32_t *value, value_tmp, *value_ptr, value_size;
180 uint64_t value64, *value_ptr64; 184 uint64_t value64;
181 struct drm_crtc *crtc; 185 struct drm_crtc *crtc;
182 int i, found; 186 int i, found;
183 187
184 /* TIMESTAMP is a 64-bit value, needs special handling. */
185 if (info->request == RADEON_INFO_TIMESTAMP) {
186 if (rdev->family >= CHIP_R600) {
187 value_ptr64 = (uint64_t*)((unsigned long)info->value);
188 value64 = radeon_get_gpu_clock_counter(rdev);
189
190 if (DRM_COPY_TO_USER(value_ptr64, &value64, sizeof(value64))) {
191 DRM_ERROR("copy_to_user %s:%u\n", __func__, __LINE__);
192 return -EFAULT;
193 }
194 return 0;
195 } else {
196 DRM_DEBUG_KMS("timestamp is r6xx+ only!\n");
197 return -EINVAL;
198 }
199 }
200
201 value_ptr = (uint32_t *)((unsigned long)info->value); 188 value_ptr = (uint32_t *)((unsigned long)info->value);
202 if (DRM_COPY_FROM_USER(&value, value_ptr, sizeof(value))) { 189 value = &value_tmp;
203 DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__); 190 value_size = sizeof(uint32_t);
204 return -EFAULT;
205 }
206 191
207 switch (info->request) { 192 switch (info->request) {
208 case RADEON_INFO_DEVICE_ID: 193 case RADEON_INFO_DEVICE_ID:
209 value = dev->pci_device; 194 *value = dev->pci_device;
210 break; 195 break;
211 case RADEON_INFO_NUM_GB_PIPES: 196 case RADEON_INFO_NUM_GB_PIPES:
212 value = rdev->num_gb_pipes; 197 *value = rdev->num_gb_pipes;
213 break; 198 break;
214 case RADEON_INFO_NUM_Z_PIPES: 199 case RADEON_INFO_NUM_Z_PIPES:
215 value = rdev->num_z_pipes; 200 *value = rdev->num_z_pipes;
216 break; 201 break;
217 case RADEON_INFO_ACCEL_WORKING: 202 case RADEON_INFO_ACCEL_WORKING:
218 /* xf86-video-ati 6.13.0 relies on this being false for evergreen */ 203 /* xf86-video-ati 6.13.0 relies on this being false for evergreen */
219 if ((rdev->family >= CHIP_CEDAR) && (rdev->family <= CHIP_HEMLOCK)) 204 if ((rdev->family >= CHIP_CEDAR) && (rdev->family <= CHIP_HEMLOCK))
220 value = false; 205 *value = false;
221 else 206 else
222 value = rdev->accel_working; 207 *value = rdev->accel_working;
223 break; 208 break;
224 case RADEON_INFO_CRTC_FROM_ID: 209 case RADEON_INFO_CRTC_FROM_ID:
210 if (DRM_COPY_FROM_USER(value, value_ptr, sizeof(uint32_t))) {
211 DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
212 return -EFAULT;
213 }
225 for (i = 0, found = 0; i < rdev->num_crtc; i++) { 214 for (i = 0, found = 0; i < rdev->num_crtc; i++) {
226 crtc = (struct drm_crtc *)minfo->crtcs[i]; 215 crtc = (struct drm_crtc *)minfo->crtcs[i];
227 if (crtc && crtc->base.id == value) { 216 if (crtc && crtc->base.id == *value) {
228 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 217 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
229 value = radeon_crtc->crtc_id; 218 *value = radeon_crtc->crtc_id;
230 found = 1; 219 found = 1;
231 break; 220 break;
232 } 221 }
233 } 222 }
234 if (!found) { 223 if (!found) {
235 DRM_DEBUG_KMS("unknown crtc id %d\n", value); 224 DRM_DEBUG_KMS("unknown crtc id %d\n", *value);
236 return -EINVAL; 225 return -EINVAL;
237 } 226 }
238 break; 227 break;
239 case RADEON_INFO_ACCEL_WORKING2: 228 case RADEON_INFO_ACCEL_WORKING2:
240 value = rdev->accel_working; 229 *value = rdev->accel_working;
241 break; 230 break;
242 case RADEON_INFO_TILING_CONFIG: 231 case RADEON_INFO_TILING_CONFIG:
243 if (rdev->family >= CHIP_TAHITI) 232 if (rdev->family >= CHIP_TAHITI)
244 value = rdev->config.si.tile_config; 233 *value = rdev->config.si.tile_config;
245 else if (rdev->family >= CHIP_CAYMAN) 234 else if (rdev->family >= CHIP_CAYMAN)
246 value = rdev->config.cayman.tile_config; 235 *value = rdev->config.cayman.tile_config;
247 else if (rdev->family >= CHIP_CEDAR) 236 else if (rdev->family >= CHIP_CEDAR)
248 value = rdev->config.evergreen.tile_config; 237 *value = rdev->config.evergreen.tile_config;
249 else if (rdev->family >= CHIP_RV770) 238 else if (rdev->family >= CHIP_RV770)
250 value = rdev->config.rv770.tile_config; 239 *value = rdev->config.rv770.tile_config;
251 else if (rdev->family >= CHIP_R600) 240 else if (rdev->family >= CHIP_R600)
252 value = rdev->config.r600.tile_config; 241 *value = rdev->config.r600.tile_config;
253 else { 242 else {
254 DRM_DEBUG_KMS("tiling config is r6xx+ only!\n"); 243 DRM_DEBUG_KMS("tiling config is r6xx+ only!\n");
255 return -EINVAL; 244 return -EINVAL;
@@ -262,73 +251,81 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
262 * 251 *
263 * When returning, the value is 1 if filp owns hyper-z access, 252 * When returning, the value is 1 if filp owns hyper-z access,
264 * 0 otherwise. */ 253 * 0 otherwise. */
265 if (value >= 2) { 254 if (DRM_COPY_FROM_USER(value, value_ptr, sizeof(uint32_t))) {
266 DRM_DEBUG_KMS("WANT_HYPERZ: invalid value %d\n", value); 255 DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
256 return -EFAULT;
257 }
258 if (*value >= 2) {
259 DRM_DEBUG_KMS("WANT_HYPERZ: invalid value %d\n", *value);
267 return -EINVAL; 260 return -EINVAL;
268 } 261 }
269 radeon_set_filp_rights(dev, &rdev->hyperz_filp, filp, &value); 262 radeon_set_filp_rights(dev, &rdev->hyperz_filp, filp, value);
270 break; 263 break;
271 case RADEON_INFO_WANT_CMASK: 264 case RADEON_INFO_WANT_CMASK:
272 /* The same logic as Hyper-Z. */ 265 /* The same logic as Hyper-Z. */
273 if (value >= 2) { 266 if (DRM_COPY_FROM_USER(value, value_ptr, sizeof(uint32_t))) {
274 DRM_DEBUG_KMS("WANT_CMASK: invalid value %d\n", value); 267 DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
268 return -EFAULT;
269 }
270 if (*value >= 2) {
271 DRM_DEBUG_KMS("WANT_CMASK: invalid value %d\n", *value);
275 return -EINVAL; 272 return -EINVAL;
276 } 273 }
277 radeon_set_filp_rights(dev, &rdev->cmask_filp, filp, &value); 274 radeon_set_filp_rights(dev, &rdev->cmask_filp, filp, value);
278 break; 275 break;
279 case RADEON_INFO_CLOCK_CRYSTAL_FREQ: 276 case RADEON_INFO_CLOCK_CRYSTAL_FREQ:
280 /* return clock value in KHz */ 277 /* return clock value in KHz */
281 if (rdev->asic->get_xclk) 278 if (rdev->asic->get_xclk)
282 value = radeon_get_xclk(rdev) * 10; 279 *value = radeon_get_xclk(rdev) * 10;
283 else 280 else
284 value = rdev->clock.spll.reference_freq * 10; 281 *value = rdev->clock.spll.reference_freq * 10;
285 break; 282 break;
286 case RADEON_INFO_NUM_BACKENDS: 283 case RADEON_INFO_NUM_BACKENDS:
287 if (rdev->family >= CHIP_TAHITI) 284 if (rdev->family >= CHIP_TAHITI)
288 value = rdev->config.si.max_backends_per_se * 285 *value = rdev->config.si.max_backends_per_se *
289 rdev->config.si.max_shader_engines; 286 rdev->config.si.max_shader_engines;
290 else if (rdev->family >= CHIP_CAYMAN) 287 else if (rdev->family >= CHIP_CAYMAN)
291 value = rdev->config.cayman.max_backends_per_se * 288 *value = rdev->config.cayman.max_backends_per_se *
292 rdev->config.cayman.max_shader_engines; 289 rdev->config.cayman.max_shader_engines;
293 else if (rdev->family >= CHIP_CEDAR) 290 else if (rdev->family >= CHIP_CEDAR)
294 value = rdev->config.evergreen.max_backends; 291 *value = rdev->config.evergreen.max_backends;
295 else if (rdev->family >= CHIP_RV770) 292 else if (rdev->family >= CHIP_RV770)
296 value = rdev->config.rv770.max_backends; 293 *value = rdev->config.rv770.max_backends;
297 else if (rdev->family >= CHIP_R600) 294 else if (rdev->family >= CHIP_R600)
298 value = rdev->config.r600.max_backends; 295 *value = rdev->config.r600.max_backends;
299 else { 296 else {
300 return -EINVAL; 297 return -EINVAL;
301 } 298 }
302 break; 299 break;
303 case RADEON_INFO_NUM_TILE_PIPES: 300 case RADEON_INFO_NUM_TILE_PIPES:
304 if (rdev->family >= CHIP_TAHITI) 301 if (rdev->family >= CHIP_TAHITI)
305 value = rdev->config.si.max_tile_pipes; 302 *value = rdev->config.si.max_tile_pipes;
306 else if (rdev->family >= CHIP_CAYMAN) 303 else if (rdev->family >= CHIP_CAYMAN)
307 value = rdev->config.cayman.max_tile_pipes; 304 *value = rdev->config.cayman.max_tile_pipes;
308 else if (rdev->family >= CHIP_CEDAR) 305 else if (rdev->family >= CHIP_CEDAR)
309 value = rdev->config.evergreen.max_tile_pipes; 306 *value = rdev->config.evergreen.max_tile_pipes;
310 else if (rdev->family >= CHIP_RV770) 307 else if (rdev->family >= CHIP_RV770)
311 value = rdev->config.rv770.max_tile_pipes; 308 *value = rdev->config.rv770.max_tile_pipes;
312 else if (rdev->family >= CHIP_R600) 309 else if (rdev->family >= CHIP_R600)
313 value = rdev->config.r600.max_tile_pipes; 310 *value = rdev->config.r600.max_tile_pipes;
314 else { 311 else {
315 return -EINVAL; 312 return -EINVAL;
316 } 313 }
317 break; 314 break;
318 case RADEON_INFO_FUSION_GART_WORKING: 315 case RADEON_INFO_FUSION_GART_WORKING:
319 value = 1; 316 *value = 1;
320 break; 317 break;
321 case RADEON_INFO_BACKEND_MAP: 318 case RADEON_INFO_BACKEND_MAP:
322 if (rdev->family >= CHIP_TAHITI) 319 if (rdev->family >= CHIP_TAHITI)
323 value = rdev->config.si.backend_map; 320 *value = rdev->config.si.backend_map;
324 else if (rdev->family >= CHIP_CAYMAN) 321 else if (rdev->family >= CHIP_CAYMAN)
325 value = rdev->config.cayman.backend_map; 322 *value = rdev->config.cayman.backend_map;
326 else if (rdev->family >= CHIP_CEDAR) 323 else if (rdev->family >= CHIP_CEDAR)
327 value = rdev->config.evergreen.backend_map; 324 *value = rdev->config.evergreen.backend_map;
328 else if (rdev->family >= CHIP_RV770) 325 else if (rdev->family >= CHIP_RV770)
329 value = rdev->config.rv770.backend_map; 326 *value = rdev->config.rv770.backend_map;
330 else if (rdev->family >= CHIP_R600) 327 else if (rdev->family >= CHIP_R600)
331 value = rdev->config.r600.backend_map; 328 *value = rdev->config.r600.backend_map;
332 else { 329 else {
333 return -EINVAL; 330 return -EINVAL;
334 } 331 }
@@ -337,50 +334,91 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
337 /* this is where we report if vm is supported or not */ 334 /* this is where we report if vm is supported or not */
338 if (rdev->family < CHIP_CAYMAN) 335 if (rdev->family < CHIP_CAYMAN)
339 return -EINVAL; 336 return -EINVAL;
340 value = RADEON_VA_RESERVED_SIZE; 337 *value = RADEON_VA_RESERVED_SIZE;
341 break; 338 break;
342 case RADEON_INFO_IB_VM_MAX_SIZE: 339 case RADEON_INFO_IB_VM_MAX_SIZE:
343 /* this is where we report if vm is supported or not */ 340 /* this is where we report if vm is supported or not */
344 if (rdev->family < CHIP_CAYMAN) 341 if (rdev->family < CHIP_CAYMAN)
345 return -EINVAL; 342 return -EINVAL;
346 value = RADEON_IB_VM_MAX_SIZE; 343 *value = RADEON_IB_VM_MAX_SIZE;
347 break; 344 break;
348 case RADEON_INFO_MAX_PIPES: 345 case RADEON_INFO_MAX_PIPES:
349 if (rdev->family >= CHIP_TAHITI) 346 if (rdev->family >= CHIP_TAHITI)
350 value = rdev->config.si.max_cu_per_sh; 347 *value = rdev->config.si.max_cu_per_sh;
351 else if (rdev->family >= CHIP_CAYMAN) 348 else if (rdev->family >= CHIP_CAYMAN)
352 value = rdev->config.cayman.max_pipes_per_simd; 349 *value = rdev->config.cayman.max_pipes_per_simd;
353 else if (rdev->family >= CHIP_CEDAR) 350 else if (rdev->family >= CHIP_CEDAR)
354 value = rdev->config.evergreen.max_pipes; 351 *value = rdev->config.evergreen.max_pipes;
355 else if (rdev->family >= CHIP_RV770) 352 else if (rdev->family >= CHIP_RV770)
356 value = rdev->config.rv770.max_pipes; 353 *value = rdev->config.rv770.max_pipes;
357 else if (rdev->family >= CHIP_R600) 354 else if (rdev->family >= CHIP_R600)
358 value = rdev->config.r600.max_pipes; 355 *value = rdev->config.r600.max_pipes;
359 else { 356 else {
360 return -EINVAL; 357 return -EINVAL;
361 } 358 }
362 break; 359 break;
360 case RADEON_INFO_TIMESTAMP:
361 if (rdev->family < CHIP_R600) {
362 DRM_DEBUG_KMS("timestamp is r6xx+ only!\n");
363 return -EINVAL;
364 }
365 value = (uint32_t*)&value64;
366 value_size = sizeof(uint64_t);
367 value64 = radeon_get_gpu_clock_counter(rdev);
368 break;
363 case RADEON_INFO_MAX_SE: 369 case RADEON_INFO_MAX_SE:
364 if (rdev->family >= CHIP_TAHITI) 370 if (rdev->family >= CHIP_TAHITI)
365 value = rdev->config.si.max_shader_engines; 371 *value = rdev->config.si.max_shader_engines;
366 else if (rdev->family >= CHIP_CAYMAN) 372 else if (rdev->family >= CHIP_CAYMAN)
367 value = rdev->config.cayman.max_shader_engines; 373 *value = rdev->config.cayman.max_shader_engines;
368 else if (rdev->family >= CHIP_CEDAR) 374 else if (rdev->family >= CHIP_CEDAR)
369 value = rdev->config.evergreen.num_ses; 375 *value = rdev->config.evergreen.num_ses;
370 else 376 else
371 value = 1; 377 *value = 1;
372 break; 378 break;
373 case RADEON_INFO_MAX_SH_PER_SE: 379 case RADEON_INFO_MAX_SH_PER_SE:
374 if (rdev->family >= CHIP_TAHITI) 380 if (rdev->family >= CHIP_TAHITI)
375 value = rdev->config.si.max_sh_per_se; 381 *value = rdev->config.si.max_sh_per_se;
376 else 382 else
377 return -EINVAL; 383 return -EINVAL;
378 break; 384 break;
385 case RADEON_INFO_FASTFB_WORKING:
386 *value = rdev->fastfb_working;
387 break;
388 case RADEON_INFO_RING_WORKING:
389 if (DRM_COPY_FROM_USER(value, value_ptr, sizeof(uint32_t))) {
390 DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
391 return -EFAULT;
392 }
393 switch (*value) {
394 case RADEON_CS_RING_GFX:
395 case RADEON_CS_RING_COMPUTE:
396 *value = rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready;
397 break;
398 case RADEON_CS_RING_DMA:
399 *value = rdev->ring[R600_RING_TYPE_DMA_INDEX].ready;
400 *value |= rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready;
401 break;
402 case RADEON_CS_RING_UVD:
403 *value = rdev->ring[R600_RING_TYPE_UVD_INDEX].ready;
404 break;
405 default:
406 return -EINVAL;
407 }
408 break;
409 case RADEON_INFO_SI_TILE_MODE_ARRAY:
410 if (rdev->family < CHIP_TAHITI) {
411 DRM_DEBUG_KMS("tile mode array is si only!\n");
412 return -EINVAL;
413 }
414 value = rdev->config.si.tile_mode_array;
415 value_size = sizeof(uint32_t)*32;
416 break;
379 default: 417 default:
380 DRM_DEBUG_KMS("Invalid request %d\n", info->request); 418 DRM_DEBUG_KMS("Invalid request %d\n", info->request);
381 return -EINVAL; 419 return -EINVAL;
382 } 420 }
383 if (DRM_COPY_TO_USER(value_ptr, &value, sizeof(uint32_t))) { 421 if (DRM_COPY_TO_USER(value_ptr, (char*)value, value_size)) {
384 DRM_ERROR("copy_to_user %s:%u\n", __func__, __LINE__); 422 DRM_ERROR("copy_to_user %s:%u\n", __func__, __LINE__);
385 return -EFAULT; 423 return -EFAULT;
386 } 424 }
@@ -513,6 +551,7 @@ void radeon_driver_preclose_kms(struct drm_device *dev,
513 rdev->hyperz_filp = NULL; 551 rdev->hyperz_filp = NULL;
514 if (rdev->cmask_filp == file_priv) 552 if (rdev->cmask_filp == file_priv)
515 rdev->cmask_filp = NULL; 553 rdev->cmask_filp = NULL;
554 radeon_uvd_free_handles(rdev, file_priv);
516} 555}
517 556
518/* 557/*
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 4003f5a68c09..44e579e75fd0 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -492,6 +492,29 @@ struct radeon_framebuffer {
492#define ENCODER_MODE_IS_DP(em) (((em) == ATOM_ENCODER_MODE_DP) || \ 492#define ENCODER_MODE_IS_DP(em) (((em) == ATOM_ENCODER_MODE_DP) || \
493 ((em) == ATOM_ENCODER_MODE_DP_MST)) 493 ((em) == ATOM_ENCODER_MODE_DP_MST))
494 494
495struct atom_clock_dividers {
496 u32 post_div;
497 union {
498 struct {
499#ifdef __BIG_ENDIAN
500 u32 reserved : 6;
501 u32 whole_fb_div : 12;
502 u32 frac_fb_div : 14;
503#else
504 u32 frac_fb_div : 14;
505 u32 whole_fb_div : 12;
506 u32 reserved : 6;
507#endif
508 };
509 u32 fb_div;
510 };
511 u32 ref_div;
512 bool enable_post_div;
513 bool enable_dithen;
514 u32 vco_mode;
515 u32 real_clock;
516};
517
495extern enum radeon_tv_std 518extern enum radeon_tv_std
496radeon_combios_get_tv_info(struct radeon_device *rdev); 519radeon_combios_get_tv_info(struct radeon_device *rdev);
497extern enum radeon_tv_std 520extern enum radeon_tv_std
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index d3aface2d12d..1424ccde2377 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -321,8 +321,10 @@ void radeon_bo_force_delete(struct radeon_device *rdev)
321int radeon_bo_init(struct radeon_device *rdev) 321int radeon_bo_init(struct radeon_device *rdev)
322{ 322{
323 /* Add an MTRR for the VRAM */ 323 /* Add an MTRR for the VRAM */
324 rdev->mc.vram_mtrr = mtrr_add(rdev->mc.aper_base, rdev->mc.aper_size, 324 if (!rdev->fastfb_working) {
325 rdev->mc.vram_mtrr = mtrr_add(rdev->mc.aper_base, rdev->mc.aper_size,
325 MTRR_TYPE_WRCOMB, 1); 326 MTRR_TYPE_WRCOMB, 1);
327 }
326 DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n", 328 DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
327 rdev->mc.mc_vram_size >> 20, 329 rdev->mc.mc_vram_size >> 20,
328 (unsigned long long)rdev->mc.aper_size >> 20); 330 (unsigned long long)rdev->mc.aper_size >> 20);
@@ -339,14 +341,14 @@ void radeon_bo_fini(struct radeon_device *rdev)
339void radeon_bo_list_add_object(struct radeon_bo_list *lobj, 341void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
340 struct list_head *head) 342 struct list_head *head)
341{ 343{
342 if (lobj->wdomain) { 344 if (lobj->written) {
343 list_add(&lobj->tv.head, head); 345 list_add(&lobj->tv.head, head);
344 } else { 346 } else {
345 list_add_tail(&lobj->tv.head, head); 347 list_add_tail(&lobj->tv.head, head);
346 } 348 }
347} 349}
348 350
349int radeon_bo_list_validate(struct list_head *head) 351int radeon_bo_list_validate(struct list_head *head, int ring)
350{ 352{
351 struct radeon_bo_list *lobj; 353 struct radeon_bo_list *lobj;
352 struct radeon_bo *bo; 354 struct radeon_bo *bo;
@@ -360,15 +362,17 @@ int radeon_bo_list_validate(struct list_head *head)
360 list_for_each_entry(lobj, head, tv.head) { 362 list_for_each_entry(lobj, head, tv.head) {
361 bo = lobj->bo; 363 bo = lobj->bo;
362 if (!bo->pin_count) { 364 if (!bo->pin_count) {
363 domain = lobj->wdomain ? lobj->wdomain : lobj->rdomain; 365 domain = lobj->domain;
364 366
365 retry: 367 retry:
366 radeon_ttm_placement_from_domain(bo, domain); 368 radeon_ttm_placement_from_domain(bo, domain);
369 if (ring == R600_RING_TYPE_UVD_INDEX)
370 radeon_uvd_force_into_uvd_segment(bo);
367 r = ttm_bo_validate(&bo->tbo, &bo->placement, 371 r = ttm_bo_validate(&bo->tbo, &bo->placement,
368 true, false); 372 true, false);
369 if (unlikely(r)) { 373 if (unlikely(r)) {
370 if (r != -ERESTARTSYS && domain == RADEON_GEM_DOMAIN_VRAM) { 374 if (r != -ERESTARTSYS && domain != lobj->alt_domain) {
371 domain |= RADEON_GEM_DOMAIN_GTT; 375 domain = lobj->alt_domain;
372 goto retry; 376 goto retry;
373 } 377 }
374 return r; 378 return r;
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index 5fc86b03043b..e2cb80a96b51 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -128,7 +128,7 @@ extern int radeon_bo_init(struct radeon_device *rdev);
128extern void radeon_bo_fini(struct radeon_device *rdev); 128extern void radeon_bo_fini(struct radeon_device *rdev);
129extern void radeon_bo_list_add_object(struct radeon_bo_list *lobj, 129extern void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
130 struct list_head *head); 130 struct list_head *head);
131extern int radeon_bo_list_validate(struct list_head *head); 131extern int radeon_bo_list_validate(struct list_head *head, int ring);
132extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo, 132extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
133 struct vm_area_struct *vma); 133 struct vm_area_struct *vma);
134extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo, 134extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 338fd6a74e87..788c64cb4b47 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -843,7 +843,11 @@ static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
843 struct radeon_device *rdev = dev->dev_private; 843 struct radeon_device *rdev = dev->dev_private;
844 844
845 seq_printf(m, "default engine clock: %u0 kHz\n", rdev->pm.default_sclk); 845 seq_printf(m, "default engine clock: %u0 kHz\n", rdev->pm.default_sclk);
846 seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev)); 846 /* radeon_get_engine_clock is not reliable on APUs so just print the current clock */
847 if ((rdev->family >= CHIP_PALM) && (rdev->flags & RADEON_IS_IGP))
848 seq_printf(m, "current engine clock: %u0 kHz\n", rdev->pm.current_sclk);
849 else
850 seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
847 seq_printf(m, "default memory clock: %u0 kHz\n", rdev->pm.default_mclk); 851 seq_printf(m, "default memory clock: %u0 kHz\n", rdev->pm.default_mclk);
848 if (rdev->asic->pm.get_memory_clock) 852 if (rdev->asic->pm.get_memory_clock)
849 seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev)); 853 seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 8d58e268ff6d..e17faa7cf732 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -180,7 +180,8 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
180 radeon_semaphore_free(rdev, &ib->semaphore, NULL); 180 radeon_semaphore_free(rdev, &ib->semaphore, NULL);
181 } 181 }
182 /* if we can't remember our last VM flush then flush now! */ 182 /* if we can't remember our last VM flush then flush now! */
183 if (ib->vm && !ib->vm->last_flush) { 183 /* XXX figure out why we have to flush for every IB */
184 if (ib->vm /*&& !ib->vm->last_flush*/) {
184 radeon_ring_vm_flush(rdev, ib->ring, ib->vm); 185 radeon_ring_vm_flush(rdev, ib->ring, ib->vm);
185 } 186 }
186 if (const_ib) { 187 if (const_ib) {
@@ -368,7 +369,7 @@ void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *ring)
368{ 369{
369 u32 rptr; 370 u32 rptr;
370 371
371 if (rdev->wb.enabled) 372 if (rdev->wb.enabled && ring != &rdev->ring[R600_RING_TYPE_UVD_INDEX])
372 rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]); 373 rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]);
373 else 374 else
374 rptr = RREG32(ring->rptr_reg); 375 rptr = RREG32(ring->rptr_reg);
@@ -821,18 +822,20 @@ static int radeon_debugfs_ring_info(struct seq_file *m, void *data)
821 return 0; 822 return 0;
822} 823}
823 824
824static int radeon_ring_type_gfx_index = RADEON_RING_TYPE_GFX_INDEX; 825static int radeon_gfx_index = RADEON_RING_TYPE_GFX_INDEX;
825static int cayman_ring_type_cp1_index = CAYMAN_RING_TYPE_CP1_INDEX; 826static int cayman_cp1_index = CAYMAN_RING_TYPE_CP1_INDEX;
826static int cayman_ring_type_cp2_index = CAYMAN_RING_TYPE_CP2_INDEX; 827static int cayman_cp2_index = CAYMAN_RING_TYPE_CP2_INDEX;
827static int radeon_ring_type_dma1_index = R600_RING_TYPE_DMA_INDEX; 828static int radeon_dma1_index = R600_RING_TYPE_DMA_INDEX;
828static int radeon_ring_type_dma2_index = CAYMAN_RING_TYPE_DMA1_INDEX; 829static int radeon_dma2_index = CAYMAN_RING_TYPE_DMA1_INDEX;
830static int r600_uvd_index = R600_RING_TYPE_UVD_INDEX;
829 831
830static struct drm_info_list radeon_debugfs_ring_info_list[] = { 832static struct drm_info_list radeon_debugfs_ring_info_list[] = {
831 {"radeon_ring_gfx", radeon_debugfs_ring_info, 0, &radeon_ring_type_gfx_index}, 833 {"radeon_ring_gfx", radeon_debugfs_ring_info, 0, &radeon_gfx_index},
832 {"radeon_ring_cp1", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp1_index}, 834 {"radeon_ring_cp1", radeon_debugfs_ring_info, 0, &cayman_cp1_index},
833 {"radeon_ring_cp2", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp2_index}, 835 {"radeon_ring_cp2", radeon_debugfs_ring_info, 0, &cayman_cp2_index},
834 {"radeon_ring_dma1", radeon_debugfs_ring_info, 0, &radeon_ring_type_dma1_index}, 836 {"radeon_ring_dma1", radeon_debugfs_ring_info, 0, &radeon_dma1_index},
835 {"radeon_ring_dma2", radeon_debugfs_ring_info, 0, &radeon_ring_type_dma2_index}, 837 {"radeon_ring_dma2", radeon_debugfs_ring_info, 0, &radeon_dma2_index},
838 {"radeon_ring_uvd", radeon_debugfs_ring_info, 0, &r600_uvd_index},
836}; 839};
837 840
838static int radeon_debugfs_sa_info(struct seq_file *m, void *data) 841static int radeon_debugfs_sa_info(struct seq_file *m, void *data)
diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c
index cb800995d4f9..0abe5a9431bb 100644
--- a/drivers/gpu/drm/radeon/radeon_sa.c
+++ b/drivers/gpu/drm/radeon/radeon_sa.c
@@ -64,7 +64,7 @@ int radeon_sa_bo_manager_init(struct radeon_device *rdev,
64 } 64 }
65 65
66 r = radeon_bo_create(rdev, size, RADEON_GPU_PAGE_SIZE, true, 66 r = radeon_bo_create(rdev, size, RADEON_GPU_PAGE_SIZE, true,
67 RADEON_GEM_DOMAIN_CPU, NULL, &sa_manager->bo); 67 domain, NULL, &sa_manager->bo);
68 if (r) { 68 if (r) {
69 dev_err(rdev->dev, "(%d) failed to allocate bo for manager\n", r); 69 dev_err(rdev->dev, "(%d) failed to allocate bo for manager\n", r);
70 return r; 70 return r;
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index fda09c9ea689..bbed4af8d0bc 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -252,6 +252,36 @@ void radeon_test_moves(struct radeon_device *rdev)
252 radeon_do_test_moves(rdev, RADEON_TEST_COPY_BLIT); 252 radeon_do_test_moves(rdev, RADEON_TEST_COPY_BLIT);
253} 253}
254 254
255static int radeon_test_create_and_emit_fence(struct radeon_device *rdev,
256 struct radeon_ring *ring,
257 struct radeon_fence **fence)
258{
259 int r;
260
261 if (ring->idx == R600_RING_TYPE_UVD_INDEX) {
262 r = radeon_uvd_get_create_msg(rdev, ring->idx, 1, NULL);
263 if (r) {
264 DRM_ERROR("Failed to get dummy create msg\n");
265 return r;
266 }
267
268 r = radeon_uvd_get_destroy_msg(rdev, ring->idx, 1, fence);
269 if (r) {
270 DRM_ERROR("Failed to get dummy destroy msg\n");
271 return r;
272 }
273 } else {
274 r = radeon_ring_lock(rdev, ring, 64);
275 if (r) {
276 DRM_ERROR("Failed to lock ring A %d\n", ring->idx);
277 return r;
278 }
279 radeon_fence_emit(rdev, fence, ring->idx);
280 radeon_ring_unlock_commit(rdev, ring);
281 }
282 return 0;
283}
284
255void radeon_test_ring_sync(struct radeon_device *rdev, 285void radeon_test_ring_sync(struct radeon_device *rdev,
256 struct radeon_ring *ringA, 286 struct radeon_ring *ringA,
257 struct radeon_ring *ringB) 287 struct radeon_ring *ringB)
@@ -272,21 +302,24 @@ void radeon_test_ring_sync(struct radeon_device *rdev,
272 goto out_cleanup; 302 goto out_cleanup;
273 } 303 }
274 radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore); 304 radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
275 r = radeon_fence_emit(rdev, &fence1, ringA->idx); 305 radeon_ring_unlock_commit(rdev, ringA);
276 if (r) { 306
277 DRM_ERROR("Failed to emit fence 1\n"); 307 r = radeon_test_create_and_emit_fence(rdev, ringA, &fence1);
278 radeon_ring_unlock_undo(rdev, ringA); 308 if (r)
279 goto out_cleanup; 309 goto out_cleanup;
280 } 310
281 radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore); 311 r = radeon_ring_lock(rdev, ringA, 64);
282 r = radeon_fence_emit(rdev, &fence2, ringA->idx);
283 if (r) { 312 if (r) {
284 DRM_ERROR("Failed to emit fence 2\n"); 313 DRM_ERROR("Failed to lock ring A %d\n", ringA->idx);
285 radeon_ring_unlock_undo(rdev, ringA);
286 goto out_cleanup; 314 goto out_cleanup;
287 } 315 }
316 radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
288 radeon_ring_unlock_commit(rdev, ringA); 317 radeon_ring_unlock_commit(rdev, ringA);
289 318
319 r = radeon_test_create_and_emit_fence(rdev, ringA, &fence2);
320 if (r)
321 goto out_cleanup;
322
290 mdelay(1000); 323 mdelay(1000);
291 324
292 if (radeon_fence_signaled(fence1)) { 325 if (radeon_fence_signaled(fence1)) {
@@ -364,27 +397,22 @@ static void radeon_test_ring_sync2(struct radeon_device *rdev,
364 goto out_cleanup; 397 goto out_cleanup;
365 } 398 }
366 radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore); 399 radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
367 r = radeon_fence_emit(rdev, &fenceA, ringA->idx);
368 if (r) {
369 DRM_ERROR("Failed to emit sync fence 1\n");
370 radeon_ring_unlock_undo(rdev, ringA);
371 goto out_cleanup;
372 }
373 radeon_ring_unlock_commit(rdev, ringA); 400 radeon_ring_unlock_commit(rdev, ringA);
374 401
402 r = radeon_test_create_and_emit_fence(rdev, ringA, &fenceA);
403 if (r)
404 goto out_cleanup;
405
375 r = radeon_ring_lock(rdev, ringB, 64); 406 r = radeon_ring_lock(rdev, ringB, 64);
376 if (r) { 407 if (r) {
377 DRM_ERROR("Failed to lock ring B %d\n", ringB->idx); 408 DRM_ERROR("Failed to lock ring B %d\n", ringB->idx);
378 goto out_cleanup; 409 goto out_cleanup;
379 } 410 }
380 radeon_semaphore_emit_wait(rdev, ringB->idx, semaphore); 411 radeon_semaphore_emit_wait(rdev, ringB->idx, semaphore);
381 r = radeon_fence_emit(rdev, &fenceB, ringB->idx);
382 if (r) {
383 DRM_ERROR("Failed to create sync fence 2\n");
384 radeon_ring_unlock_undo(rdev, ringB);
385 goto out_cleanup;
386 }
387 radeon_ring_unlock_commit(rdev, ringB); 412 radeon_ring_unlock_commit(rdev, ringB);
413 r = radeon_test_create_and_emit_fence(rdev, ringB, &fenceB);
414 if (r)
415 goto out_cleanup;
388 416
389 mdelay(1000); 417 mdelay(1000);
390 418
@@ -393,7 +421,7 @@ static void radeon_test_ring_sync2(struct radeon_device *rdev,
393 goto out_cleanup; 421 goto out_cleanup;
394 } 422 }
395 if (radeon_fence_signaled(fenceB)) { 423 if (radeon_fence_signaled(fenceB)) {
396 DRM_ERROR("Fence A signaled without waiting for semaphore.\n"); 424 DRM_ERROR("Fence B signaled without waiting for semaphore.\n");
397 goto out_cleanup; 425 goto out_cleanup;
398 } 426 }
399 427
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
new file mode 100644
index 000000000000..906e5c0ca3b9
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -0,0 +1,831 @@
1/*
2 * Copyright 2011 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26/*
27 * Authors:
28 * Christian König <deathsimple@vodafone.de>
29 */
30
31#include <linux/firmware.h>
32#include <linux/module.h>
33#include <drm/drmP.h>
34#include <drm/drm.h>
35
36#include "radeon.h"
37#include "r600d.h"
38
39/* 1 second timeout */
40#define UVD_IDLE_TIMEOUT_MS 1000
41
42/* Firmware Names */
43#define FIRMWARE_RV710 "radeon/RV710_uvd.bin"
44#define FIRMWARE_CYPRESS "radeon/CYPRESS_uvd.bin"
45#define FIRMWARE_SUMO "radeon/SUMO_uvd.bin"
46#define FIRMWARE_TAHITI "radeon/TAHITI_uvd.bin"
47
48MODULE_FIRMWARE(FIRMWARE_RV710);
49MODULE_FIRMWARE(FIRMWARE_CYPRESS);
50MODULE_FIRMWARE(FIRMWARE_SUMO);
51MODULE_FIRMWARE(FIRMWARE_TAHITI);
52
53static void radeon_uvd_idle_work_handler(struct work_struct *work);
54
55int radeon_uvd_init(struct radeon_device *rdev)
56{
57 struct platform_device *pdev;
58 unsigned long bo_size;
59 const char *fw_name;
60 int i, r;
61
62 INIT_DELAYED_WORK(&rdev->uvd.idle_work, radeon_uvd_idle_work_handler);
63
64 pdev = platform_device_register_simple("radeon_uvd", 0, NULL, 0);
65 r = IS_ERR(pdev);
66 if (r) {
67 dev_err(rdev->dev, "radeon_uvd: Failed to register firmware\n");
68 return -EINVAL;
69 }
70
71 switch (rdev->family) {
72 case CHIP_RV710:
73 case CHIP_RV730:
74 case CHIP_RV740:
75 fw_name = FIRMWARE_RV710;
76 break;
77
78 case CHIP_CYPRESS:
79 case CHIP_HEMLOCK:
80 case CHIP_JUNIPER:
81 case CHIP_REDWOOD:
82 case CHIP_CEDAR:
83 fw_name = FIRMWARE_CYPRESS;
84 break;
85
86 case CHIP_SUMO:
87 case CHIP_SUMO2:
88 case CHIP_PALM:
89 case CHIP_CAYMAN:
90 case CHIP_BARTS:
91 case CHIP_TURKS:
92 case CHIP_CAICOS:
93 fw_name = FIRMWARE_SUMO;
94 break;
95
96 case CHIP_TAHITI:
97 case CHIP_VERDE:
98 case CHIP_PITCAIRN:
99 case CHIP_ARUBA:
100 fw_name = FIRMWARE_TAHITI;
101 break;
102
103 default:
104 return -EINVAL;
105 }
106
107 r = request_firmware(&rdev->uvd_fw, fw_name, &pdev->dev);
108 if (r) {
109 dev_err(rdev->dev, "radeon_uvd: Can't load firmware \"%s\"\n",
110 fw_name);
111 platform_device_unregister(pdev);
112 return r;
113 }
114
115 platform_device_unregister(pdev);
116
117 bo_size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 8) +
118 RADEON_UVD_STACK_SIZE + RADEON_UVD_HEAP_SIZE;
119 r = radeon_bo_create(rdev, bo_size, PAGE_SIZE, true,
120 RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->uvd.vcpu_bo);
121 if (r) {
122 dev_err(rdev->dev, "(%d) failed to allocate UVD bo\n", r);
123 return r;
124 }
125
126 r = radeon_uvd_resume(rdev);
127 if (r)
128 return r;
129
130 memset(rdev->uvd.cpu_addr, 0, bo_size);
131 memcpy(rdev->uvd.cpu_addr, rdev->uvd_fw->data, rdev->uvd_fw->size);
132
133 r = radeon_uvd_suspend(rdev);
134 if (r)
135 return r;
136
137 for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
138 atomic_set(&rdev->uvd.handles[i], 0);
139 rdev->uvd.filp[i] = NULL;
140 }
141
142 return 0;
143}
144
145void radeon_uvd_fini(struct radeon_device *rdev)
146{
147 radeon_uvd_suspend(rdev);
148 radeon_bo_unref(&rdev->uvd.vcpu_bo);
149}
150
151int radeon_uvd_suspend(struct radeon_device *rdev)
152{
153 int r;
154
155 if (rdev->uvd.vcpu_bo == NULL)
156 return 0;
157
158 r = radeon_bo_reserve(rdev->uvd.vcpu_bo, false);
159 if (!r) {
160 radeon_bo_kunmap(rdev->uvd.vcpu_bo);
161 radeon_bo_unpin(rdev->uvd.vcpu_bo);
162 radeon_bo_unreserve(rdev->uvd.vcpu_bo);
163 }
164 return r;
165}
166
167int radeon_uvd_resume(struct radeon_device *rdev)
168{
169 int r;
170
171 if (rdev->uvd.vcpu_bo == NULL)
172 return -EINVAL;
173
174 r = radeon_bo_reserve(rdev->uvd.vcpu_bo, false);
175 if (r) {
176 radeon_bo_unref(&rdev->uvd.vcpu_bo);
177 dev_err(rdev->dev, "(%d) failed to reserve UVD bo\n", r);
178 return r;
179 }
180
181 r = radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_VRAM,
182 &rdev->uvd.gpu_addr);
183 if (r) {
184 radeon_bo_unreserve(rdev->uvd.vcpu_bo);
185 radeon_bo_unref(&rdev->uvd.vcpu_bo);
186 dev_err(rdev->dev, "(%d) UVD bo pin failed\n", r);
187 return r;
188 }
189
190 r = radeon_bo_kmap(rdev->uvd.vcpu_bo, &rdev->uvd.cpu_addr);
191 if (r) {
192 dev_err(rdev->dev, "(%d) UVD map failed\n", r);
193 return r;
194 }
195
196 radeon_bo_unreserve(rdev->uvd.vcpu_bo);
197
198 return 0;
199}
200
201void radeon_uvd_force_into_uvd_segment(struct radeon_bo *rbo)
202{
203 rbo->placement.fpfn = 0 >> PAGE_SHIFT;
204 rbo->placement.lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT;
205}
206
207void radeon_uvd_free_handles(struct radeon_device *rdev, struct drm_file *filp)
208{
209 int i, r;
210 for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
211 if (rdev->uvd.filp[i] == filp) {
212 uint32_t handle = atomic_read(&rdev->uvd.handles[i]);
213 struct radeon_fence *fence;
214
215 r = radeon_uvd_get_destroy_msg(rdev,
216 R600_RING_TYPE_UVD_INDEX, handle, &fence);
217 if (r) {
218 DRM_ERROR("Error destroying UVD (%d)!\n", r);
219 continue;
220 }
221
222 radeon_fence_wait(fence, false);
223 radeon_fence_unref(&fence);
224
225 rdev->uvd.filp[i] = NULL;
226 atomic_set(&rdev->uvd.handles[i], 0);
227 }
228 }
229}
230
231static int radeon_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[])
232{
233 unsigned stream_type = msg[4];
234 unsigned width = msg[6];
235 unsigned height = msg[7];
236 unsigned dpb_size = msg[9];
237 unsigned pitch = msg[28];
238
239 unsigned width_in_mb = width / 16;
240 unsigned height_in_mb = ALIGN(height / 16, 2);
241
242 unsigned image_size, tmp, min_dpb_size;
243
244 image_size = width * height;
245 image_size += image_size / 2;
246 image_size = ALIGN(image_size, 1024);
247
248 switch (stream_type) {
249 case 0: /* H264 */
250
251 /* reference picture buffer */
252 min_dpb_size = image_size * 17;
253
254 /* macroblock context buffer */
255 min_dpb_size += width_in_mb * height_in_mb * 17 * 192;
256
257 /* IT surface buffer */
258 min_dpb_size += width_in_mb * height_in_mb * 32;
259 break;
260
261 case 1: /* VC1 */
262
263 /* reference picture buffer */
264 min_dpb_size = image_size * 3;
265
266 /* CONTEXT_BUFFER */
267 min_dpb_size += width_in_mb * height_in_mb * 128;
268
269 /* IT surface buffer */
270 min_dpb_size += width_in_mb * 64;
271
272 /* DB surface buffer */
273 min_dpb_size += width_in_mb * 128;
274
275 /* BP */
276 tmp = max(width_in_mb, height_in_mb);
277 min_dpb_size += ALIGN(tmp * 7 * 16, 64);
278 break;
279
280 case 3: /* MPEG2 */
281
282 /* reference picture buffer */
283 min_dpb_size = image_size * 3;
284 break;
285
286 case 4: /* MPEG4 */
287
288 /* reference picture buffer */
289 min_dpb_size = image_size * 3;
290
291 /* CM */
292 min_dpb_size += width_in_mb * height_in_mb * 64;
293
294 /* IT surface buffer */
295 min_dpb_size += ALIGN(width_in_mb * height_in_mb * 32, 64);
296 break;
297
298 default:
299 DRM_ERROR("UVD codec not handled %d!\n", stream_type);
300 return -EINVAL;
301 }
302
303 if (width > pitch) {
304 DRM_ERROR("Invalid UVD decoding target pitch!\n");
305 return -EINVAL;
306 }
307
308 if (dpb_size < min_dpb_size) {
309 DRM_ERROR("Invalid dpb_size in UVD message (%d / %d)!\n",
310 dpb_size, min_dpb_size);
311 return -EINVAL;
312 }
313
314 buf_sizes[0x1] = dpb_size;
315 buf_sizes[0x2] = image_size;
316 return 0;
317}
318
319static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
320 unsigned offset, unsigned buf_sizes[])
321{
322 int32_t *msg, msg_type, handle;
323 void *ptr;
324
325 int i, r;
326
327 if (offset & 0x3F) {
328 DRM_ERROR("UVD messages must be 64 byte aligned!\n");
329 return -EINVAL;
330 }
331
332 r = radeon_bo_kmap(bo, &ptr);
333 if (r)
334 return r;
335
336 msg = ptr + offset;
337
338 msg_type = msg[1];
339 handle = msg[2];
340
341 if (handle == 0) {
342 DRM_ERROR("Invalid UVD handle!\n");
343 return -EINVAL;
344 }
345
346 if (msg_type == 1) {
347 /* it's a decode msg, calc buffer sizes */
348 r = radeon_uvd_cs_msg_decode(msg, buf_sizes);
349 radeon_bo_kunmap(bo);
350 if (r)
351 return r;
352
353 } else if (msg_type == 2) {
354 /* it's a destroy msg, free the handle */
355 for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i)
356 atomic_cmpxchg(&p->rdev->uvd.handles[i], handle, 0);
357 radeon_bo_kunmap(bo);
358 return 0;
359 } else {
360 /* it's a create msg, no special handling needed */
361 radeon_bo_kunmap(bo);
362 }
363
364 /* create or decode, validate the handle */
365 for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
366 if (atomic_read(&p->rdev->uvd.handles[i]) == handle)
367 return 0;
368 }
369
370 /* handle not found try to alloc a new one */
371 for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
372 if (!atomic_cmpxchg(&p->rdev->uvd.handles[i], 0, handle)) {
373 p->rdev->uvd.filp[i] = p->filp;
374 return 0;
375 }
376 }
377
378 DRM_ERROR("No more free UVD handles!\n");
379 return -EINVAL;
380}
381
382static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
383 int data0, int data1,
384 unsigned buf_sizes[])
385{
386 struct radeon_cs_chunk *relocs_chunk;
387 struct radeon_cs_reloc *reloc;
388 unsigned idx, cmd, offset;
389 uint64_t start, end;
390 int r;
391
392 relocs_chunk = &p->chunks[p->chunk_relocs_idx];
393 offset = radeon_get_ib_value(p, data0);
394 idx = radeon_get_ib_value(p, data1);
395 if (idx >= relocs_chunk->length_dw) {
396 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
397 idx, relocs_chunk->length_dw);
398 return -EINVAL;
399 }
400
401 reloc = p->relocs_ptr[(idx / 4)];
402 start = reloc->lobj.gpu_offset;
403 end = start + radeon_bo_size(reloc->robj);
404 start += offset;
405
406 p->ib.ptr[data0] = start & 0xFFFFFFFF;
407 p->ib.ptr[data1] = start >> 32;
408
409 cmd = radeon_get_ib_value(p, p->idx) >> 1;
410
411 if (cmd < 0x4) {
412 if ((end - start) < buf_sizes[cmd]) {
413 DRM_ERROR("buffer to small (%d / %d)!\n",
414 (unsigned)(end - start), buf_sizes[cmd]);
415 return -EINVAL;
416 }
417
418 } else if (cmd != 0x100) {
419 DRM_ERROR("invalid UVD command %X!\n", cmd);
420 return -EINVAL;
421 }
422
423 if ((start >> 28) != (end >> 28)) {
424 DRM_ERROR("reloc %LX-%LX crossing 256MB boundary!\n",
425 start, end);
426 return -EINVAL;
427 }
428
429 /* TODO: is this still necessary on NI+ ? */
430 if ((cmd == 0 || cmd == 0x3) &&
431 (start >> 28) != (p->rdev->uvd.gpu_addr >> 28)) {
432 DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n",
433 start, end);
434 return -EINVAL;
435 }
436
437 if (cmd == 0) {
438 r = radeon_uvd_cs_msg(p, reloc->robj, offset, buf_sizes);
439 if (r)
440 return r;
441 }
442
443 return 0;
444}
445
446static int radeon_uvd_cs_reg(struct radeon_cs_parser *p,
447 struct radeon_cs_packet *pkt,
448 int *data0, int *data1,
449 unsigned buf_sizes[])
450{
451 int i, r;
452
453 p->idx++;
454 for (i = 0; i <= pkt->count; ++i) {
455 switch (pkt->reg + i*4) {
456 case UVD_GPCOM_VCPU_DATA0:
457 *data0 = p->idx;
458 break;
459 case UVD_GPCOM_VCPU_DATA1:
460 *data1 = p->idx;
461 break;
462 case UVD_GPCOM_VCPU_CMD:
463 r = radeon_uvd_cs_reloc(p, *data0, *data1, buf_sizes);
464 if (r)
465 return r;
466 break;
467 case UVD_ENGINE_CNTL:
468 break;
469 default:
470 DRM_ERROR("Invalid reg 0x%X!\n",
471 pkt->reg + i*4);
472 return -EINVAL;
473 }
474 p->idx++;
475 }
476 return 0;
477}
478
479int radeon_uvd_cs_parse(struct radeon_cs_parser *p)
480{
481 struct radeon_cs_packet pkt;
482 int r, data0 = 0, data1 = 0;
483
484 /* minimum buffer sizes */
485 unsigned buf_sizes[] = {
486 [0x00000000] = 2048,
487 [0x00000001] = 32 * 1024 * 1024,
488 [0x00000002] = 2048 * 1152 * 3,
489 [0x00000003] = 2048,
490 };
491
492 if (p->chunks[p->chunk_ib_idx].length_dw % 16) {
493 DRM_ERROR("UVD IB length (%d) not 16 dwords aligned!\n",
494 p->chunks[p->chunk_ib_idx].length_dw);
495 return -EINVAL;
496 }
497
498 if (p->chunk_relocs_idx == -1) {
499 DRM_ERROR("No relocation chunk !\n");
500 return -EINVAL;
501 }
502
503
504 do {
505 r = radeon_cs_packet_parse(p, &pkt, p->idx);
506 if (r)
507 return r;
508 switch (pkt.type) {
509 case RADEON_PACKET_TYPE0:
510 r = radeon_uvd_cs_reg(p, &pkt, &data0,
511 &data1, buf_sizes);
512 if (r)
513 return r;
514 break;
515 case RADEON_PACKET_TYPE2:
516 p->idx += pkt.count + 2;
517 break;
518 default:
519 DRM_ERROR("Unknown packet type %d !\n", pkt.type);
520 return -EINVAL;
521 }
522 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
523 return 0;
524}
525
526static int radeon_uvd_send_msg(struct radeon_device *rdev,
527 int ring, struct radeon_bo *bo,
528 struct radeon_fence **fence)
529{
530 struct ttm_validate_buffer tv;
531 struct list_head head;
532 struct radeon_ib ib;
533 uint64_t addr;
534 int i, r;
535
536 memset(&tv, 0, sizeof(tv));
537 tv.bo = &bo->tbo;
538
539 INIT_LIST_HEAD(&head);
540 list_add(&tv.head, &head);
541
542 r = ttm_eu_reserve_buffers(&head);
543 if (r)
544 return r;
545
546 radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_VRAM);
547 radeon_uvd_force_into_uvd_segment(bo);
548
549 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
550 if (r) {
551 ttm_eu_backoff_reservation(&head);
552 return r;
553 }
554
555 r = radeon_ib_get(rdev, ring, &ib, NULL, 16);
556 if (r) {
557 ttm_eu_backoff_reservation(&head);
558 return r;
559 }
560
561 addr = radeon_bo_gpu_offset(bo);
562 ib.ptr[0] = PACKET0(UVD_GPCOM_VCPU_DATA0, 0);
563 ib.ptr[1] = addr;
564 ib.ptr[2] = PACKET0(UVD_GPCOM_VCPU_DATA1, 0);
565 ib.ptr[3] = addr >> 32;
566 ib.ptr[4] = PACKET0(UVD_GPCOM_VCPU_CMD, 0);
567 ib.ptr[5] = 0;
568 for (i = 6; i < 16; ++i)
569 ib.ptr[i] = PACKET2(0);
570 ib.length_dw = 16;
571
572 r = radeon_ib_schedule(rdev, &ib, NULL);
573 if (r) {
574 ttm_eu_backoff_reservation(&head);
575 return r;
576 }
577 ttm_eu_fence_buffer_objects(&head, ib.fence);
578
579 if (fence)
580 *fence = radeon_fence_ref(ib.fence);
581
582 radeon_ib_free(rdev, &ib);
583 radeon_bo_unref(&bo);
584 return 0;
585}
586
587/* multiple fence commands without any stream commands in between can
588 crash the vcpu so just try to emmit a dummy create/destroy msg to
589 avoid this */
590int radeon_uvd_get_create_msg(struct radeon_device *rdev, int ring,
591 uint32_t handle, struct radeon_fence **fence)
592{
593 struct radeon_bo *bo;
594 uint32_t *msg;
595 int r, i;
596
597 r = radeon_bo_create(rdev, 1024, PAGE_SIZE, true,
598 RADEON_GEM_DOMAIN_VRAM, NULL, &bo);
599 if (r)
600 return r;
601
602 r = radeon_bo_reserve(bo, false);
603 if (r) {
604 radeon_bo_unref(&bo);
605 return r;
606 }
607
608 r = radeon_bo_kmap(bo, (void **)&msg);
609 if (r) {
610 radeon_bo_unreserve(bo);
611 radeon_bo_unref(&bo);
612 return r;
613 }
614
615 /* stitch together an UVD create msg */
616 msg[0] = 0x00000de4;
617 msg[1] = 0x00000000;
618 msg[2] = handle;
619 msg[3] = 0x00000000;
620 msg[4] = 0x00000000;
621 msg[5] = 0x00000000;
622 msg[6] = 0x00000000;
623 msg[7] = 0x00000780;
624 msg[8] = 0x00000440;
625 msg[9] = 0x00000000;
626 msg[10] = 0x01b37000;
627 for (i = 11; i < 1024; ++i)
628 msg[i] = 0x0;
629
630 radeon_bo_kunmap(bo);
631 radeon_bo_unreserve(bo);
632
633 return radeon_uvd_send_msg(rdev, ring, bo, fence);
634}
635
636int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring,
637 uint32_t handle, struct radeon_fence **fence)
638{
639 struct radeon_bo *bo;
640 uint32_t *msg;
641 int r, i;
642
643 r = radeon_bo_create(rdev, 1024, PAGE_SIZE, true,
644 RADEON_GEM_DOMAIN_VRAM, NULL, &bo);
645 if (r)
646 return r;
647
648 r = radeon_bo_reserve(bo, false);
649 if (r) {
650 radeon_bo_unref(&bo);
651 return r;
652 }
653
654 r = radeon_bo_kmap(bo, (void **)&msg);
655 if (r) {
656 radeon_bo_unreserve(bo);
657 radeon_bo_unref(&bo);
658 return r;
659 }
660
661 /* stitch together an UVD destroy msg */
662 msg[0] = 0x00000de4;
663 msg[1] = 0x00000002;
664 msg[2] = handle;
665 msg[3] = 0x00000000;
666 for (i = 4; i < 1024; ++i)
667 msg[i] = 0x0;
668
669 radeon_bo_kunmap(bo);
670 radeon_bo_unreserve(bo);
671
672 return radeon_uvd_send_msg(rdev, ring, bo, fence);
673}
674
675static void radeon_uvd_idle_work_handler(struct work_struct *work)
676{
677 struct radeon_device *rdev =
678 container_of(work, struct radeon_device, uvd.idle_work.work);
679
680 if (radeon_fence_count_emitted(rdev, R600_RING_TYPE_UVD_INDEX) == 0)
681 radeon_set_uvd_clocks(rdev, 0, 0);
682 else
683 schedule_delayed_work(&rdev->uvd.idle_work,
684 msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS));
685}
686
687void radeon_uvd_note_usage(struct radeon_device *rdev)
688{
689 bool set_clocks = !cancel_delayed_work_sync(&rdev->uvd.idle_work);
690 set_clocks &= schedule_delayed_work(&rdev->uvd.idle_work,
691 msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS));
692 if (set_clocks)
693 radeon_set_uvd_clocks(rdev, 53300, 40000);
694}
695
696static unsigned radeon_uvd_calc_upll_post_div(unsigned vco_freq,
697 unsigned target_freq,
698 unsigned pd_min,
699 unsigned pd_even)
700{
701 unsigned post_div = vco_freq / target_freq;
702
703 /* adjust to post divider minimum value */
704 if (post_div < pd_min)
705 post_div = pd_min;
706
707 /* we alway need a frequency less than or equal the target */
708 if ((vco_freq / post_div) > target_freq)
709 post_div += 1;
710
711 /* post dividers above a certain value must be even */
712 if (post_div > pd_even && post_div % 2)
713 post_div += 1;
714
715 return post_div;
716}
717
718/**
719 * radeon_uvd_calc_upll_dividers - calc UPLL clock dividers
720 *
721 * @rdev: radeon_device pointer
722 * @vclk: wanted VCLK
723 * @dclk: wanted DCLK
724 * @vco_min: minimum VCO frequency
725 * @vco_max: maximum VCO frequency
726 * @fb_factor: factor to multiply vco freq with
727 * @fb_mask: limit and bitmask for feedback divider
728 * @pd_min: post divider minimum
729 * @pd_max: post divider maximum
730 * @pd_even: post divider must be even above this value
731 * @optimal_fb_div: resulting feedback divider
732 * @optimal_vclk_div: resulting vclk post divider
733 * @optimal_dclk_div: resulting dclk post divider
734 *
735 * Calculate dividers for UVDs UPLL (R6xx-SI, except APUs).
736 * Returns zero on success -EINVAL on error.
737 */
738int radeon_uvd_calc_upll_dividers(struct radeon_device *rdev,
739 unsigned vclk, unsigned dclk,
740 unsigned vco_min, unsigned vco_max,
741 unsigned fb_factor, unsigned fb_mask,
742 unsigned pd_min, unsigned pd_max,
743 unsigned pd_even,
744 unsigned *optimal_fb_div,
745 unsigned *optimal_vclk_div,
746 unsigned *optimal_dclk_div)
747{
748 unsigned vco_freq, ref_freq = rdev->clock.spll.reference_freq;
749
750 /* start off with something large */
751 unsigned optimal_score = ~0;
752
753 /* loop through vco from low to high */
754 vco_min = max(max(vco_min, vclk), dclk);
755 for (vco_freq = vco_min; vco_freq <= vco_max; vco_freq += 100) {
756
757 uint64_t fb_div = (uint64_t)vco_freq * fb_factor;
758 unsigned vclk_div, dclk_div, score;
759
760 do_div(fb_div, ref_freq);
761
762 /* fb div out of range ? */
763 if (fb_div > fb_mask)
764 break; /* it can oly get worse */
765
766 fb_div &= fb_mask;
767
768 /* calc vclk divider with current vco freq */
769 vclk_div = radeon_uvd_calc_upll_post_div(vco_freq, vclk,
770 pd_min, pd_even);
771 if (vclk_div > pd_max)
772 break; /* vco is too big, it has to stop */
773
774 /* calc dclk divider with current vco freq */
775 dclk_div = radeon_uvd_calc_upll_post_div(vco_freq, dclk,
776 pd_min, pd_even);
777 if (vclk_div > pd_max)
778 break; /* vco is too big, it has to stop */
779
780 /* calc score with current vco freq */
781 score = vclk - (vco_freq / vclk_div) + dclk - (vco_freq / dclk_div);
782
783 /* determine if this vco setting is better than current optimal settings */
784 if (score < optimal_score) {
785 *optimal_fb_div = fb_div;
786 *optimal_vclk_div = vclk_div;
787 *optimal_dclk_div = dclk_div;
788 optimal_score = score;
789 if (optimal_score == 0)
790 break; /* it can't get better than this */
791 }
792 }
793
794 /* did we found a valid setup ? */
795 if (optimal_score == ~0)
796 return -EINVAL;
797
798 return 0;
799}
800
801int radeon_uvd_send_upll_ctlreq(struct radeon_device *rdev,
802 unsigned cg_upll_func_cntl)
803{
804 unsigned i;
805
806 /* make sure UPLL_CTLREQ is deasserted */
807 WREG32_P(cg_upll_func_cntl, 0, ~UPLL_CTLREQ_MASK);
808
809 mdelay(10);
810
811 /* assert UPLL_CTLREQ */
812 WREG32_P(cg_upll_func_cntl, UPLL_CTLREQ_MASK, ~UPLL_CTLREQ_MASK);
813
814 /* wait for CTLACK and CTLACK2 to get asserted */
815 for (i = 0; i < 100; ++i) {
816 uint32_t mask = UPLL_CTLACK_MASK | UPLL_CTLACK2_MASK;
817 if ((RREG32(cg_upll_func_cntl) & mask) == mask)
818 break;
819 mdelay(10);
820 }
821
822 /* deassert UPLL_CTLREQ */
823 WREG32_P(cg_upll_func_cntl, 0, ~UPLL_CTLREQ_MASK);
824
825 if (i == 100) {
826 DRM_ERROR("Timeout setting UVD clocks!\n");
827 return -ETIMEDOUT;
828 }
829
830 return 0;
831}
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 5a0fc74c2ba6..46fa1b07c560 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -52,23 +52,59 @@ static const u32 crtc_offsets[2] =
52 AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL 52 AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL
53}; 53};
54 54
55static bool avivo_is_in_vblank(struct radeon_device *rdev, int crtc)
56{
57 if (RREG32(AVIVO_D1CRTC_STATUS + crtc_offsets[crtc]) & AVIVO_D1CRTC_V_BLANK)
58 return true;
59 else
60 return false;
61}
62
63static bool avivo_is_counter_moving(struct radeon_device *rdev, int crtc)
64{
65 u32 pos1, pos2;
66
67 pos1 = RREG32(AVIVO_D1CRTC_STATUS_POSITION + crtc_offsets[crtc]);
68 pos2 = RREG32(AVIVO_D1CRTC_STATUS_POSITION + crtc_offsets[crtc]);
69
70 if (pos1 != pos2)
71 return true;
72 else
73 return false;
74}
75
76/**
77 * avivo_wait_for_vblank - vblank wait asic callback.
78 *
79 * @rdev: radeon_device pointer
80 * @crtc: crtc to wait for vblank on
81 *
82 * Wait for vblank on the requested crtc (r5xx-r7xx).
83 */
55void avivo_wait_for_vblank(struct radeon_device *rdev, int crtc) 84void avivo_wait_for_vblank(struct radeon_device *rdev, int crtc)
56{ 85{
57 int i; 86 unsigned i = 0;
58 87
59 if (crtc >= rdev->num_crtc) 88 if (crtc >= rdev->num_crtc)
60 return; 89 return;
61 90
62 if (RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[crtc]) & AVIVO_CRTC_EN) { 91 if (!(RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[crtc]) & AVIVO_CRTC_EN))
63 for (i = 0; i < rdev->usec_timeout; i++) { 92 return;
64 if (!(RREG32(AVIVO_D1CRTC_STATUS + crtc_offsets[crtc]) & AVIVO_D1CRTC_V_BLANK)) 93
94 /* depending on when we hit vblank, we may be close to active; if so,
95 * wait for another frame.
96 */
97 while (avivo_is_in_vblank(rdev, crtc)) {
98 if (i++ % 100 == 0) {
99 if (!avivo_is_counter_moving(rdev, crtc))
65 break; 100 break;
66 udelay(1);
67 } 101 }
68 for (i = 0; i < rdev->usec_timeout; i++) { 102 }
69 if (RREG32(AVIVO_D1CRTC_STATUS + crtc_offsets[crtc]) & AVIVO_D1CRTC_V_BLANK) 103
104 while (!avivo_is_in_vblank(rdev, crtc)) {
105 if (i++ % 100 == 0) {
106 if (!avivo_is_counter_moving(rdev, crtc))
70 break; 107 break;
71 udelay(1);
72 } 108 }
73 } 109 }
74} 110}
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 5706d2ac75ab..ab4c86cfd552 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -148,6 +148,8 @@ void rs690_pm_info(struct radeon_device *rdev)
148static void rs690_mc_init(struct radeon_device *rdev) 148static void rs690_mc_init(struct radeon_device *rdev)
149{ 149{
150 u64 base; 150 u64 base;
151 uint32_t h_addr, l_addr;
152 unsigned long long k8_addr;
151 153
152 rs400_gart_adjust_size(rdev); 154 rs400_gart_adjust_size(rdev);
153 rdev->mc.vram_is_ddr = true; 155 rdev->mc.vram_is_ddr = true;
@@ -160,6 +162,27 @@ static void rs690_mc_init(struct radeon_device *rdev)
160 base = RREG32_MC(R_000100_MCCFG_FB_LOCATION); 162 base = RREG32_MC(R_000100_MCCFG_FB_LOCATION);
161 base = G_000100_MC_FB_START(base) << 16; 163 base = G_000100_MC_FB_START(base) << 16;
162 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); 164 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
165
166 /* Use K8 direct mapping for fast fb access. */
167 rdev->fastfb_working = false;
168 h_addr = G_00005F_K8_ADDR_EXT(RREG32_MC(R_00005F_MC_MISC_UMA_CNTL));
169 l_addr = RREG32_MC(R_00001E_K8_FB_LOCATION);
170 k8_addr = ((unsigned long long)h_addr) << 32 | l_addr;
171#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
172 if (k8_addr + rdev->mc.visible_vram_size < 0x100000000ULL)
173#endif
174 {
175 /* FastFB shall be used with UMA memory. Here it is simply disabled when sideport
176 * memory is present.
177 */
178 if (rdev->mc.igp_sideport_enabled == false && radeon_fastfb == 1) {
179 DRM_INFO("Direct mapping: aper base at 0x%llx, replaced by direct mapping base 0x%llx.\n",
180 (unsigned long long)rdev->mc.aper_base, k8_addr);
181 rdev->mc.aper_base = (resource_size_t)k8_addr;
182 rdev->fastfb_working = true;
183 }
184 }
185
163 rs690_pm_info(rdev); 186 rs690_pm_info(rdev);
164 radeon_vram_location(rdev, &rdev->mc, base); 187 radeon_vram_location(rdev, &rdev->mc, base);
165 rdev->mc.gtt_base_align = rdev->mc.gtt_size - 1; 188 rdev->mc.gtt_base_align = rdev->mc.gtt_size - 1;
diff --git a/drivers/gpu/drm/radeon/rs690d.h b/drivers/gpu/drm/radeon/rs690d.h
index 36e6398a98ae..8af3ccf20cc0 100644
--- a/drivers/gpu/drm/radeon/rs690d.h
+++ b/drivers/gpu/drm/radeon/rs690d.h
@@ -29,6 +29,9 @@
29#define __RS690D_H__ 29#define __RS690D_H__
30 30
31/* Registers */ 31/* Registers */
32#define R_00001E_K8_FB_LOCATION 0x00001E
33#define R_00005F_MC_MISC_UMA_CNTL 0x00005F
34#define G_00005F_K8_ADDR_EXT(x) (((x) >> 0) & 0xFF)
32#define R_000078_MC_INDEX 0x000078 35#define R_000078_MC_INDEX 0x000078
33#define S_000078_MC_IND_ADDR(x) (((x) & 0x1FF) << 0) 36#define S_000078_MC_IND_ADDR(x) (((x) & 0x1FF) << 0)
34#define G_000078_MC_IND_ADDR(x) (((x) >> 0) & 0x1FF) 37#define G_000078_MC_IND_ADDR(x) (((x) >> 0) & 0x1FF)
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 435ed3551364..ffcba730c57c 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -303,8 +303,10 @@ void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save)
303 tmp = RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]); 303 tmp = RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]);
304 if (!(tmp & AVIVO_CRTC_DISP_READ_REQUEST_DISABLE)) { 304 if (!(tmp & AVIVO_CRTC_DISP_READ_REQUEST_DISABLE)) {
305 radeon_wait_for_vblank(rdev, i); 305 radeon_wait_for_vblank(rdev, i);
306 WREG32(AVIVO_D1CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
306 tmp |= AVIVO_CRTC_DISP_READ_REQUEST_DISABLE; 307 tmp |= AVIVO_CRTC_DISP_READ_REQUEST_DISABLE;
307 WREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i], tmp); 308 WREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i], tmp);
309 WREG32(AVIVO_D1CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
308 } 310 }
309 /* wait for the next frame */ 311 /* wait for the next frame */
310 frame_count = radeon_get_vblank_counter(rdev, i); 312 frame_count = radeon_get_vblank_counter(rdev, i);
@@ -313,6 +315,15 @@ void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save)
313 break; 315 break;
314 udelay(1); 316 udelay(1);
315 } 317 }
318
319 /* XXX this is a hack to avoid strange behavior with EFI on certain systems */
320 WREG32(AVIVO_D1CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
321 tmp = RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]);
322 tmp &= ~AVIVO_CRTC_EN;
323 WREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i], tmp);
324 WREG32(AVIVO_D1CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
325 save->crtc_enabled[i] = false;
326 /* ***** */
316 } else { 327 } else {
317 save->crtc_enabled[i] = false; 328 save->crtc_enabled[i] = false;
318 } 329 }
@@ -338,6 +349,22 @@ void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save)
338 } 349 }
339 /* wait for the MC to settle */ 350 /* wait for the MC to settle */
340 udelay(100); 351 udelay(100);
352
353 /* lock double buffered regs */
354 for (i = 0; i < rdev->num_crtc; i++) {
355 if (save->crtc_enabled[i]) {
356 tmp = RREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i]);
357 if (!(tmp & AVIVO_D1GRPH_UPDATE_LOCK)) {
358 tmp |= AVIVO_D1GRPH_UPDATE_LOCK;
359 WREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i], tmp);
360 }
361 tmp = RREG32(AVIVO_D1MODE_MASTER_UPDATE_LOCK + crtc_offsets[i]);
362 if (!(tmp & 1)) {
363 tmp |= 1;
364 WREG32(AVIVO_D1MODE_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
365 }
366 }
367 }
341} 368}
342 369
343void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save) 370void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save)
@@ -348,7 +375,7 @@ void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save)
348 /* update crtc base addresses */ 375 /* update crtc base addresses */
349 for (i = 0; i < rdev->num_crtc; i++) { 376 for (i = 0; i < rdev->num_crtc; i++) {
350 if (rdev->family >= CHIP_RV770) { 377 if (rdev->family >= CHIP_RV770) {
351 if (i == 1) { 378 if (i == 0) {
352 WREG32(R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, 379 WREG32(R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH,
353 upper_32_bits(rdev->mc.vram_start)); 380 upper_32_bits(rdev->mc.vram_start));
354 WREG32(R700_D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, 381 WREG32(R700_D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH,
@@ -367,6 +394,33 @@ void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save)
367 } 394 }
368 WREG32(R_000310_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start); 395 WREG32(R_000310_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start);
369 396
397 /* unlock regs and wait for update */
398 for (i = 0; i < rdev->num_crtc; i++) {
399 if (save->crtc_enabled[i]) {
400 tmp = RREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i]);
401 if ((tmp & 0x3) != 0) {
402 tmp &= ~0x3;
403 WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i], tmp);
404 }
405 tmp = RREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i]);
406 if (tmp & AVIVO_D1GRPH_UPDATE_LOCK) {
407 tmp &= ~AVIVO_D1GRPH_UPDATE_LOCK;
408 WREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i], tmp);
409 }
410 tmp = RREG32(AVIVO_D1MODE_MASTER_UPDATE_LOCK + crtc_offsets[i]);
411 if (tmp & 1) {
412 tmp &= ~1;
413 WREG32(AVIVO_D1MODE_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
414 }
415 for (j = 0; j < rdev->usec_timeout; j++) {
416 tmp = RREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i]);
417 if ((tmp & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING) == 0)
418 break;
419 udelay(1);
420 }
421 }
422 }
423
370 if (rdev->family >= CHIP_R600) { 424 if (rdev->family >= CHIP_R600) {
371 /* unblackout the MC */ 425 /* unblackout the MC */
372 if (rdev->family >= CHIP_RV770) 426 if (rdev->family >= CHIP_RV770)
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index d63fe1d0f53f..83f612a9500b 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -42,6 +42,739 @@
42static void rv770_gpu_init(struct radeon_device *rdev); 42static void rv770_gpu_init(struct radeon_device *rdev);
43void rv770_fini(struct radeon_device *rdev); 43void rv770_fini(struct radeon_device *rdev);
44static void rv770_pcie_gen2_enable(struct radeon_device *rdev); 44static void rv770_pcie_gen2_enable(struct radeon_device *rdev);
45int evergreen_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
46
47int rv770_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
48{
49 unsigned fb_div = 0, vclk_div = 0, dclk_div = 0;
50 int r;
51
52 /* RV740 uses evergreen uvd clk programming */
53 if (rdev->family == CHIP_RV740)
54 return evergreen_set_uvd_clocks(rdev, vclk, dclk);
55
56 /* bypass vclk and dclk with bclk */
57 WREG32_P(CG_UPLL_FUNC_CNTL_2,
58 VCLK_SRC_SEL(1) | DCLK_SRC_SEL(1),
59 ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
60
61 if (!vclk || !dclk) {
62 /* keep the Bypass mode, put PLL to sleep */
63 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK);
64 return 0;
65 }
66
67 r = radeon_uvd_calc_upll_dividers(rdev, vclk, dclk, 50000, 160000,
68 43663, 0x03FFFFFE, 1, 30, ~0,
69 &fb_div, &vclk_div, &dclk_div);
70 if (r)
71 return r;
72
73 fb_div |= 1;
74 vclk_div -= 1;
75 dclk_div -= 1;
76
77 /* set UPLL_FB_DIV to 0x50000 */
78 WREG32_P(CG_UPLL_FUNC_CNTL_3, UPLL_FB_DIV(0x50000), ~UPLL_FB_DIV_MASK);
79
80 /* deassert UPLL_RESET and UPLL_SLEEP */
81 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~(UPLL_RESET_MASK | UPLL_SLEEP_MASK));
82
83 /* assert BYPASS EN and FB_DIV[0] <- ??? why? */
84 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~UPLL_BYPASS_EN_MASK);
85 WREG32_P(CG_UPLL_FUNC_CNTL_3, UPLL_FB_DIV(1), ~UPLL_FB_DIV(1));
86
87 r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
88 if (r)
89 return r;
90
91 /* assert PLL_RESET */
92 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_RESET_MASK, ~UPLL_RESET_MASK);
93
94 /* set the required FB_DIV, REF_DIV, Post divder values */
95 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_REF_DIV(1), ~UPLL_REF_DIV_MASK);
96 WREG32_P(CG_UPLL_FUNC_CNTL_2,
97 UPLL_SW_HILEN(vclk_div >> 1) |
98 UPLL_SW_LOLEN((vclk_div >> 1) + (vclk_div & 1)) |
99 UPLL_SW_HILEN2(dclk_div >> 1) |
100 UPLL_SW_LOLEN2((dclk_div >> 1) + (dclk_div & 1)),
101 ~UPLL_SW_MASK);
102
103 WREG32_P(CG_UPLL_FUNC_CNTL_3, UPLL_FB_DIV(fb_div),
104 ~UPLL_FB_DIV_MASK);
105
106 /* give the PLL some time to settle */
107 mdelay(15);
108
109 /* deassert PLL_RESET */
110 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK);
111
112 mdelay(15);
113
114 /* deassert BYPASS EN and FB_DIV[0] <- ??? why? */
115 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_BYPASS_EN_MASK);
116 WREG32_P(CG_UPLL_FUNC_CNTL_3, 0, ~UPLL_FB_DIV(1));
117
118 r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
119 if (r)
120 return r;
121
122 /* switch VCLK and DCLK selection */
123 WREG32_P(CG_UPLL_FUNC_CNTL_2,
124 VCLK_SRC_SEL(2) | DCLK_SRC_SEL(2),
125 ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
126
127 mdelay(100);
128
129 return 0;
130}
131
132static const u32 r7xx_golden_registers[] =
133{
134 0x8d00, 0xffffffff, 0x0e0e0074,
135 0x8d04, 0xffffffff, 0x013a2b34,
136 0x9508, 0xffffffff, 0x00000002,
137 0x8b20, 0xffffffff, 0,
138 0x88c4, 0xffffffff, 0x000000c2,
139 0x28350, 0xffffffff, 0,
140 0x9058, 0xffffffff, 0x0fffc40f,
141 0x240c, 0xffffffff, 0x00000380,
142 0x733c, 0xffffffff, 0x00000002,
143 0x2650, 0x00040000, 0,
144 0x20bc, 0x00040000, 0,
145 0x7300, 0xffffffff, 0x001000f0
146};
147
148static const u32 r7xx_golden_dyn_gpr_registers[] =
149{
150 0x8db0, 0xffffffff, 0x98989898,
151 0x8db4, 0xffffffff, 0x98989898,
152 0x8db8, 0xffffffff, 0x98989898,
153 0x8dbc, 0xffffffff, 0x98989898,
154 0x8dc0, 0xffffffff, 0x98989898,
155 0x8dc4, 0xffffffff, 0x98989898,
156 0x8dc8, 0xffffffff, 0x98989898,
157 0x8dcc, 0xffffffff, 0x98989898,
158 0x88c4, 0xffffffff, 0x00000082
159};
160
161static const u32 rv770_golden_registers[] =
162{
163 0x562c, 0xffffffff, 0,
164 0x3f90, 0xffffffff, 0,
165 0x9148, 0xffffffff, 0,
166 0x3f94, 0xffffffff, 0,
167 0x914c, 0xffffffff, 0,
168 0x9698, 0x18000000, 0x18000000
169};
170
171static const u32 rv770ce_golden_registers[] =
172{
173 0x562c, 0xffffffff, 0,
174 0x3f90, 0xffffffff, 0x00cc0000,
175 0x9148, 0xffffffff, 0x00cc0000,
176 0x3f94, 0xffffffff, 0x00cc0000,
177 0x914c, 0xffffffff, 0x00cc0000,
178 0x9b7c, 0xffffffff, 0x00fa0000,
179 0x3f8c, 0xffffffff, 0x00fa0000,
180 0x9698, 0x18000000, 0x18000000
181};
182
183static const u32 rv770_mgcg_init[] =
184{
185 0x8bcc, 0xffffffff, 0x130300f9,
186 0x5448, 0xffffffff, 0x100,
187 0x55e4, 0xffffffff, 0x100,
188 0x160c, 0xffffffff, 0x100,
189 0x5644, 0xffffffff, 0x100,
190 0xc164, 0xffffffff, 0x100,
191 0x8a18, 0xffffffff, 0x100,
192 0x897c, 0xffffffff, 0x8000100,
193 0x8b28, 0xffffffff, 0x3c000100,
194 0x9144, 0xffffffff, 0x100,
195 0x9a1c, 0xffffffff, 0x10000,
196 0x9a50, 0xffffffff, 0x100,
197 0x9a1c, 0xffffffff, 0x10001,
198 0x9a50, 0xffffffff, 0x100,
199 0x9a1c, 0xffffffff, 0x10002,
200 0x9a50, 0xffffffff, 0x100,
201 0x9a1c, 0xffffffff, 0x10003,
202 0x9a50, 0xffffffff, 0x100,
203 0x9a1c, 0xffffffff, 0x0,
204 0x9870, 0xffffffff, 0x100,
205 0x8d58, 0xffffffff, 0x100,
206 0x9500, 0xffffffff, 0x0,
207 0x9510, 0xffffffff, 0x100,
208 0x9500, 0xffffffff, 0x1,
209 0x9510, 0xffffffff, 0x100,
210 0x9500, 0xffffffff, 0x2,
211 0x9510, 0xffffffff, 0x100,
212 0x9500, 0xffffffff, 0x3,
213 0x9510, 0xffffffff, 0x100,
214 0x9500, 0xffffffff, 0x4,
215 0x9510, 0xffffffff, 0x100,
216 0x9500, 0xffffffff, 0x5,
217 0x9510, 0xffffffff, 0x100,
218 0x9500, 0xffffffff, 0x6,
219 0x9510, 0xffffffff, 0x100,
220 0x9500, 0xffffffff, 0x7,
221 0x9510, 0xffffffff, 0x100,
222 0x9500, 0xffffffff, 0x8,
223 0x9510, 0xffffffff, 0x100,
224 0x9500, 0xffffffff, 0x9,
225 0x9510, 0xffffffff, 0x100,
226 0x9500, 0xffffffff, 0x8000,
227 0x9490, 0xffffffff, 0x0,
228 0x949c, 0xffffffff, 0x100,
229 0x9490, 0xffffffff, 0x1,
230 0x949c, 0xffffffff, 0x100,
231 0x9490, 0xffffffff, 0x2,
232 0x949c, 0xffffffff, 0x100,
233 0x9490, 0xffffffff, 0x3,
234 0x949c, 0xffffffff, 0x100,
235 0x9490, 0xffffffff, 0x4,
236 0x949c, 0xffffffff, 0x100,
237 0x9490, 0xffffffff, 0x5,
238 0x949c, 0xffffffff, 0x100,
239 0x9490, 0xffffffff, 0x6,
240 0x949c, 0xffffffff, 0x100,
241 0x9490, 0xffffffff, 0x7,
242 0x949c, 0xffffffff, 0x100,
243 0x9490, 0xffffffff, 0x8,
244 0x949c, 0xffffffff, 0x100,
245 0x9490, 0xffffffff, 0x9,
246 0x949c, 0xffffffff, 0x100,
247 0x9490, 0xffffffff, 0x8000,
248 0x9604, 0xffffffff, 0x0,
249 0x9654, 0xffffffff, 0x100,
250 0x9604, 0xffffffff, 0x1,
251 0x9654, 0xffffffff, 0x100,
252 0x9604, 0xffffffff, 0x2,
253 0x9654, 0xffffffff, 0x100,
254 0x9604, 0xffffffff, 0x3,
255 0x9654, 0xffffffff, 0x100,
256 0x9604, 0xffffffff, 0x4,
257 0x9654, 0xffffffff, 0x100,
258 0x9604, 0xffffffff, 0x5,
259 0x9654, 0xffffffff, 0x100,
260 0x9604, 0xffffffff, 0x6,
261 0x9654, 0xffffffff, 0x100,
262 0x9604, 0xffffffff, 0x7,
263 0x9654, 0xffffffff, 0x100,
264 0x9604, 0xffffffff, 0x8,
265 0x9654, 0xffffffff, 0x100,
266 0x9604, 0xffffffff, 0x9,
267 0x9654, 0xffffffff, 0x100,
268 0x9604, 0xffffffff, 0x80000000,
269 0x9030, 0xffffffff, 0x100,
270 0x9034, 0xffffffff, 0x100,
271 0x9038, 0xffffffff, 0x100,
272 0x903c, 0xffffffff, 0x100,
273 0x9040, 0xffffffff, 0x100,
274 0xa200, 0xffffffff, 0x100,
275 0xa204, 0xffffffff, 0x100,
276 0xa208, 0xffffffff, 0x100,
277 0xa20c, 0xffffffff, 0x100,
278 0x971c, 0xffffffff, 0x100,
279 0x915c, 0xffffffff, 0x00020001,
280 0x9160, 0xffffffff, 0x00040003,
281 0x916c, 0xffffffff, 0x00060005,
282 0x9170, 0xffffffff, 0x00080007,
283 0x9174, 0xffffffff, 0x000a0009,
284 0x9178, 0xffffffff, 0x000c000b,
285 0x917c, 0xffffffff, 0x000e000d,
286 0x9180, 0xffffffff, 0x0010000f,
287 0x918c, 0xffffffff, 0x00120011,
288 0x9190, 0xffffffff, 0x00140013,
289 0x9194, 0xffffffff, 0x00020001,
290 0x9198, 0xffffffff, 0x00040003,
291 0x919c, 0xffffffff, 0x00060005,
292 0x91a8, 0xffffffff, 0x00080007,
293 0x91ac, 0xffffffff, 0x000a0009,
294 0x91b0, 0xffffffff, 0x000c000b,
295 0x91b4, 0xffffffff, 0x000e000d,
296 0x91b8, 0xffffffff, 0x0010000f,
297 0x91c4, 0xffffffff, 0x00120011,
298 0x91c8, 0xffffffff, 0x00140013,
299 0x91cc, 0xffffffff, 0x00020001,
300 0x91d0, 0xffffffff, 0x00040003,
301 0x91d4, 0xffffffff, 0x00060005,
302 0x91e0, 0xffffffff, 0x00080007,
303 0x91e4, 0xffffffff, 0x000a0009,
304 0x91e8, 0xffffffff, 0x000c000b,
305 0x91ec, 0xffffffff, 0x00020001,
306 0x91f0, 0xffffffff, 0x00040003,
307 0x91f4, 0xffffffff, 0x00060005,
308 0x9200, 0xffffffff, 0x00080007,
309 0x9204, 0xffffffff, 0x000a0009,
310 0x9208, 0xffffffff, 0x000c000b,
311 0x920c, 0xffffffff, 0x000e000d,
312 0x9210, 0xffffffff, 0x0010000f,
313 0x921c, 0xffffffff, 0x00120011,
314 0x9220, 0xffffffff, 0x00140013,
315 0x9224, 0xffffffff, 0x00020001,
316 0x9228, 0xffffffff, 0x00040003,
317 0x922c, 0xffffffff, 0x00060005,
318 0x9238, 0xffffffff, 0x00080007,
319 0x923c, 0xffffffff, 0x000a0009,
320 0x9240, 0xffffffff, 0x000c000b,
321 0x9244, 0xffffffff, 0x000e000d,
322 0x9248, 0xffffffff, 0x0010000f,
323 0x9254, 0xffffffff, 0x00120011,
324 0x9258, 0xffffffff, 0x00140013,
325 0x925c, 0xffffffff, 0x00020001,
326 0x9260, 0xffffffff, 0x00040003,
327 0x9264, 0xffffffff, 0x00060005,
328 0x9270, 0xffffffff, 0x00080007,
329 0x9274, 0xffffffff, 0x000a0009,
330 0x9278, 0xffffffff, 0x000c000b,
331 0x927c, 0xffffffff, 0x000e000d,
332 0x9280, 0xffffffff, 0x0010000f,
333 0x928c, 0xffffffff, 0x00120011,
334 0x9290, 0xffffffff, 0x00140013,
335 0x9294, 0xffffffff, 0x00020001,
336 0x929c, 0xffffffff, 0x00040003,
337 0x92a0, 0xffffffff, 0x00060005,
338 0x92a4, 0xffffffff, 0x00080007
339};
340
341static const u32 rv710_golden_registers[] =
342{
343 0x3f90, 0x00ff0000, 0x00fc0000,
344 0x9148, 0x00ff0000, 0x00fc0000,
345 0x3f94, 0x00ff0000, 0x00fc0000,
346 0x914c, 0x00ff0000, 0x00fc0000,
347 0xb4c, 0x00000020, 0x00000020,
348 0xa180, 0xffffffff, 0x00003f3f
349};
350
351static const u32 rv710_mgcg_init[] =
352{
353 0x8bcc, 0xffffffff, 0x13030040,
354 0x5448, 0xffffffff, 0x100,
355 0x55e4, 0xffffffff, 0x100,
356 0x160c, 0xffffffff, 0x100,
357 0x5644, 0xffffffff, 0x100,
358 0xc164, 0xffffffff, 0x100,
359 0x8a18, 0xffffffff, 0x100,
360 0x897c, 0xffffffff, 0x8000100,
361 0x8b28, 0xffffffff, 0x3c000100,
362 0x9144, 0xffffffff, 0x100,
363 0x9a1c, 0xffffffff, 0x10000,
364 0x9a50, 0xffffffff, 0x100,
365 0x9a1c, 0xffffffff, 0x0,
366 0x9870, 0xffffffff, 0x100,
367 0x8d58, 0xffffffff, 0x100,
368 0x9500, 0xffffffff, 0x0,
369 0x9510, 0xffffffff, 0x100,
370 0x9500, 0xffffffff, 0x1,
371 0x9510, 0xffffffff, 0x100,
372 0x9500, 0xffffffff, 0x8000,
373 0x9490, 0xffffffff, 0x0,
374 0x949c, 0xffffffff, 0x100,
375 0x9490, 0xffffffff, 0x1,
376 0x949c, 0xffffffff, 0x100,
377 0x9490, 0xffffffff, 0x8000,
378 0x9604, 0xffffffff, 0x0,
379 0x9654, 0xffffffff, 0x100,
380 0x9604, 0xffffffff, 0x1,
381 0x9654, 0xffffffff, 0x100,
382 0x9604, 0xffffffff, 0x80000000,
383 0x9030, 0xffffffff, 0x100,
384 0x9034, 0xffffffff, 0x100,
385 0x9038, 0xffffffff, 0x100,
386 0x903c, 0xffffffff, 0x100,
387 0x9040, 0xffffffff, 0x100,
388 0xa200, 0xffffffff, 0x100,
389 0xa204, 0xffffffff, 0x100,
390 0xa208, 0xffffffff, 0x100,
391 0xa20c, 0xffffffff, 0x100,
392 0x971c, 0xffffffff, 0x100,
393 0x915c, 0xffffffff, 0x00020001,
394 0x9174, 0xffffffff, 0x00000003,
395 0x9178, 0xffffffff, 0x00050001,
396 0x917c, 0xffffffff, 0x00030002,
397 0x918c, 0xffffffff, 0x00000004,
398 0x9190, 0xffffffff, 0x00070006,
399 0x9194, 0xffffffff, 0x00050001,
400 0x9198, 0xffffffff, 0x00030002,
401 0x91a8, 0xffffffff, 0x00000004,
402 0x91ac, 0xffffffff, 0x00070006,
403 0x91e8, 0xffffffff, 0x00000001,
404 0x9294, 0xffffffff, 0x00000001,
405 0x929c, 0xffffffff, 0x00000002,
406 0x92a0, 0xffffffff, 0x00040003,
407 0x9150, 0xffffffff, 0x4d940000
408};
409
410static const u32 rv730_golden_registers[] =
411{
412 0x3f90, 0x00ff0000, 0x00f00000,
413 0x9148, 0x00ff0000, 0x00f00000,
414 0x3f94, 0x00ff0000, 0x00f00000,
415 0x914c, 0x00ff0000, 0x00f00000,
416 0x900c, 0xffffffff, 0x003b033f,
417 0xb4c, 0x00000020, 0x00000020,
418 0xa180, 0xffffffff, 0x00003f3f
419};
420
421static const u32 rv730_mgcg_init[] =
422{
423 0x8bcc, 0xffffffff, 0x130300f9,
424 0x5448, 0xffffffff, 0x100,
425 0x55e4, 0xffffffff, 0x100,
426 0x160c, 0xffffffff, 0x100,
427 0x5644, 0xffffffff, 0x100,
428 0xc164, 0xffffffff, 0x100,
429 0x8a18, 0xffffffff, 0x100,
430 0x897c, 0xffffffff, 0x8000100,
431 0x8b28, 0xffffffff, 0x3c000100,
432 0x9144, 0xffffffff, 0x100,
433 0x9a1c, 0xffffffff, 0x10000,
434 0x9a50, 0xffffffff, 0x100,
435 0x9a1c, 0xffffffff, 0x10001,
436 0x9a50, 0xffffffff, 0x100,
437 0x9a1c, 0xffffffff, 0x0,
438 0x9870, 0xffffffff, 0x100,
439 0x8d58, 0xffffffff, 0x100,
440 0x9500, 0xffffffff, 0x0,
441 0x9510, 0xffffffff, 0x100,
442 0x9500, 0xffffffff, 0x1,
443 0x9510, 0xffffffff, 0x100,
444 0x9500, 0xffffffff, 0x2,
445 0x9510, 0xffffffff, 0x100,
446 0x9500, 0xffffffff, 0x3,
447 0x9510, 0xffffffff, 0x100,
448 0x9500, 0xffffffff, 0x4,
449 0x9510, 0xffffffff, 0x100,
450 0x9500, 0xffffffff, 0x5,
451 0x9510, 0xffffffff, 0x100,
452 0x9500, 0xffffffff, 0x6,
453 0x9510, 0xffffffff, 0x100,
454 0x9500, 0xffffffff, 0x7,
455 0x9510, 0xffffffff, 0x100,
456 0x9500, 0xffffffff, 0x8000,
457 0x9490, 0xffffffff, 0x0,
458 0x949c, 0xffffffff, 0x100,
459 0x9490, 0xffffffff, 0x1,
460 0x949c, 0xffffffff, 0x100,
461 0x9490, 0xffffffff, 0x2,
462 0x949c, 0xffffffff, 0x100,
463 0x9490, 0xffffffff, 0x3,
464 0x949c, 0xffffffff, 0x100,
465 0x9490, 0xffffffff, 0x4,
466 0x949c, 0xffffffff, 0x100,
467 0x9490, 0xffffffff, 0x5,
468 0x949c, 0xffffffff, 0x100,
469 0x9490, 0xffffffff, 0x6,
470 0x949c, 0xffffffff, 0x100,
471 0x9490, 0xffffffff, 0x7,
472 0x949c, 0xffffffff, 0x100,
473 0x9490, 0xffffffff, 0x8000,
474 0x9604, 0xffffffff, 0x0,
475 0x9654, 0xffffffff, 0x100,
476 0x9604, 0xffffffff, 0x1,
477 0x9654, 0xffffffff, 0x100,
478 0x9604, 0xffffffff, 0x2,
479 0x9654, 0xffffffff, 0x100,
480 0x9604, 0xffffffff, 0x3,
481 0x9654, 0xffffffff, 0x100,
482 0x9604, 0xffffffff, 0x4,
483 0x9654, 0xffffffff, 0x100,
484 0x9604, 0xffffffff, 0x5,
485 0x9654, 0xffffffff, 0x100,
486 0x9604, 0xffffffff, 0x6,
487 0x9654, 0xffffffff, 0x100,
488 0x9604, 0xffffffff, 0x7,
489 0x9654, 0xffffffff, 0x100,
490 0x9604, 0xffffffff, 0x80000000,
491 0x9030, 0xffffffff, 0x100,
492 0x9034, 0xffffffff, 0x100,
493 0x9038, 0xffffffff, 0x100,
494 0x903c, 0xffffffff, 0x100,
495 0x9040, 0xffffffff, 0x100,
496 0xa200, 0xffffffff, 0x100,
497 0xa204, 0xffffffff, 0x100,
498 0xa208, 0xffffffff, 0x100,
499 0xa20c, 0xffffffff, 0x100,
500 0x971c, 0xffffffff, 0x100,
501 0x915c, 0xffffffff, 0x00020001,
502 0x916c, 0xffffffff, 0x00040003,
503 0x9170, 0xffffffff, 0x00000005,
504 0x9178, 0xffffffff, 0x00050001,
505 0x917c, 0xffffffff, 0x00030002,
506 0x918c, 0xffffffff, 0x00000004,
507 0x9190, 0xffffffff, 0x00070006,
508 0x9194, 0xffffffff, 0x00050001,
509 0x9198, 0xffffffff, 0x00030002,
510 0x91a8, 0xffffffff, 0x00000004,
511 0x91ac, 0xffffffff, 0x00070006,
512 0x91b0, 0xffffffff, 0x00050001,
513 0x91b4, 0xffffffff, 0x00030002,
514 0x91c4, 0xffffffff, 0x00000004,
515 0x91c8, 0xffffffff, 0x00070006,
516 0x91cc, 0xffffffff, 0x00050001,
517 0x91d0, 0xffffffff, 0x00030002,
518 0x91e0, 0xffffffff, 0x00000004,
519 0x91e4, 0xffffffff, 0x00070006,
520 0x91e8, 0xffffffff, 0x00000001,
521 0x91ec, 0xffffffff, 0x00050001,
522 0x91f0, 0xffffffff, 0x00030002,
523 0x9200, 0xffffffff, 0x00000004,
524 0x9204, 0xffffffff, 0x00070006,
525 0x9208, 0xffffffff, 0x00050001,
526 0x920c, 0xffffffff, 0x00030002,
527 0x921c, 0xffffffff, 0x00000004,
528 0x9220, 0xffffffff, 0x00070006,
529 0x9224, 0xffffffff, 0x00050001,
530 0x9228, 0xffffffff, 0x00030002,
531 0x9238, 0xffffffff, 0x00000004,
532 0x923c, 0xffffffff, 0x00070006,
533 0x9240, 0xffffffff, 0x00050001,
534 0x9244, 0xffffffff, 0x00030002,
535 0x9254, 0xffffffff, 0x00000004,
536 0x9258, 0xffffffff, 0x00070006,
537 0x9294, 0xffffffff, 0x00000001,
538 0x929c, 0xffffffff, 0x00000002,
539 0x92a0, 0xffffffff, 0x00040003,
540 0x92a4, 0xffffffff, 0x00000005
541};
542
543static const u32 rv740_golden_registers[] =
544{
545 0x88c4, 0xffffffff, 0x00000082,
546 0x28a50, 0xfffffffc, 0x00000004,
547 0x2650, 0x00040000, 0,
548 0x20bc, 0x00040000, 0,
549 0x733c, 0xffffffff, 0x00000002,
550 0x7300, 0xffffffff, 0x001000f0,
551 0x3f90, 0x00ff0000, 0,
552 0x9148, 0x00ff0000, 0,
553 0x3f94, 0x00ff0000, 0,
554 0x914c, 0x00ff0000, 0,
555 0x240c, 0xffffffff, 0x00000380,
556 0x8a14, 0x00000007, 0x00000007,
557 0x8b24, 0xffffffff, 0x00ff0fff,
558 0x28a4c, 0xffffffff, 0x00004000,
559 0xa180, 0xffffffff, 0x00003f3f,
560 0x8d00, 0xffffffff, 0x0e0e003a,
561 0x8d04, 0xffffffff, 0x013a0e2a,
562 0x8c00, 0xffffffff, 0xe400000f,
563 0x8db0, 0xffffffff, 0x98989898,
564 0x8db4, 0xffffffff, 0x98989898,
565 0x8db8, 0xffffffff, 0x98989898,
566 0x8dbc, 0xffffffff, 0x98989898,
567 0x8dc0, 0xffffffff, 0x98989898,
568 0x8dc4, 0xffffffff, 0x98989898,
569 0x8dc8, 0xffffffff, 0x98989898,
570 0x8dcc, 0xffffffff, 0x98989898,
571 0x9058, 0xffffffff, 0x0fffc40f,
572 0x900c, 0xffffffff, 0x003b033f,
573 0x28350, 0xffffffff, 0,
574 0x8cf0, 0x1fffffff, 0x08e00420,
575 0x9508, 0xffffffff, 0x00000002,
576 0x88c4, 0xffffffff, 0x000000c2,
577 0x9698, 0x18000000, 0x18000000
578};
579
580static const u32 rv740_mgcg_init[] =
581{
582 0x8bcc, 0xffffffff, 0x13030100,
583 0x5448, 0xffffffff, 0x100,
584 0x55e4, 0xffffffff, 0x100,
585 0x160c, 0xffffffff, 0x100,
586 0x5644, 0xffffffff, 0x100,
587 0xc164, 0xffffffff, 0x100,
588 0x8a18, 0xffffffff, 0x100,
589 0x897c, 0xffffffff, 0x100,
590 0x8b28, 0xffffffff, 0x100,
591 0x9144, 0xffffffff, 0x100,
592 0x9a1c, 0xffffffff, 0x10000,
593 0x9a50, 0xffffffff, 0x100,
594 0x9a1c, 0xffffffff, 0x10001,
595 0x9a50, 0xffffffff, 0x100,
596 0x9a1c, 0xffffffff, 0x10002,
597 0x9a50, 0xffffffff, 0x100,
598 0x9a1c, 0xffffffff, 0x10003,
599 0x9a50, 0xffffffff, 0x100,
600 0x9a1c, 0xffffffff, 0x0,
601 0x9870, 0xffffffff, 0x100,
602 0x8d58, 0xffffffff, 0x100,
603 0x9500, 0xffffffff, 0x0,
604 0x9510, 0xffffffff, 0x100,
605 0x9500, 0xffffffff, 0x1,
606 0x9510, 0xffffffff, 0x100,
607 0x9500, 0xffffffff, 0x2,
608 0x9510, 0xffffffff, 0x100,
609 0x9500, 0xffffffff, 0x3,
610 0x9510, 0xffffffff, 0x100,
611 0x9500, 0xffffffff, 0x4,
612 0x9510, 0xffffffff, 0x100,
613 0x9500, 0xffffffff, 0x5,
614 0x9510, 0xffffffff, 0x100,
615 0x9500, 0xffffffff, 0x6,
616 0x9510, 0xffffffff, 0x100,
617 0x9500, 0xffffffff, 0x7,
618 0x9510, 0xffffffff, 0x100,
619 0x9500, 0xffffffff, 0x8000,
620 0x9490, 0xffffffff, 0x0,
621 0x949c, 0xffffffff, 0x100,
622 0x9490, 0xffffffff, 0x1,
623 0x949c, 0xffffffff, 0x100,
624 0x9490, 0xffffffff, 0x2,
625 0x949c, 0xffffffff, 0x100,
626 0x9490, 0xffffffff, 0x3,
627 0x949c, 0xffffffff, 0x100,
628 0x9490, 0xffffffff, 0x4,
629 0x949c, 0xffffffff, 0x100,
630 0x9490, 0xffffffff, 0x5,
631 0x949c, 0xffffffff, 0x100,
632 0x9490, 0xffffffff, 0x6,
633 0x949c, 0xffffffff, 0x100,
634 0x9490, 0xffffffff, 0x7,
635 0x949c, 0xffffffff, 0x100,
636 0x9490, 0xffffffff, 0x8000,
637 0x9604, 0xffffffff, 0x0,
638 0x9654, 0xffffffff, 0x100,
639 0x9604, 0xffffffff, 0x1,
640 0x9654, 0xffffffff, 0x100,
641 0x9604, 0xffffffff, 0x2,
642 0x9654, 0xffffffff, 0x100,
643 0x9604, 0xffffffff, 0x3,
644 0x9654, 0xffffffff, 0x100,
645 0x9604, 0xffffffff, 0x4,
646 0x9654, 0xffffffff, 0x100,
647 0x9604, 0xffffffff, 0x5,
648 0x9654, 0xffffffff, 0x100,
649 0x9604, 0xffffffff, 0x6,
650 0x9654, 0xffffffff, 0x100,
651 0x9604, 0xffffffff, 0x7,
652 0x9654, 0xffffffff, 0x100,
653 0x9604, 0xffffffff, 0x80000000,
654 0x9030, 0xffffffff, 0x100,
655 0x9034, 0xffffffff, 0x100,
656 0x9038, 0xffffffff, 0x100,
657 0x903c, 0xffffffff, 0x100,
658 0x9040, 0xffffffff, 0x100,
659 0xa200, 0xffffffff, 0x100,
660 0xa204, 0xffffffff, 0x100,
661 0xa208, 0xffffffff, 0x100,
662 0xa20c, 0xffffffff, 0x100,
663 0x971c, 0xffffffff, 0x100,
664 0x915c, 0xffffffff, 0x00020001,
665 0x9160, 0xffffffff, 0x00040003,
666 0x916c, 0xffffffff, 0x00060005,
667 0x9170, 0xffffffff, 0x00080007,
668 0x9174, 0xffffffff, 0x000a0009,
669 0x9178, 0xffffffff, 0x000c000b,
670 0x917c, 0xffffffff, 0x000e000d,
671 0x9180, 0xffffffff, 0x0010000f,
672 0x918c, 0xffffffff, 0x00120011,
673 0x9190, 0xffffffff, 0x00140013,
674 0x9194, 0xffffffff, 0x00020001,
675 0x9198, 0xffffffff, 0x00040003,
676 0x919c, 0xffffffff, 0x00060005,
677 0x91a8, 0xffffffff, 0x00080007,
678 0x91ac, 0xffffffff, 0x000a0009,
679 0x91b0, 0xffffffff, 0x000c000b,
680 0x91b4, 0xffffffff, 0x000e000d,
681 0x91b8, 0xffffffff, 0x0010000f,
682 0x91c4, 0xffffffff, 0x00120011,
683 0x91c8, 0xffffffff, 0x00140013,
684 0x91cc, 0xffffffff, 0x00020001,
685 0x91d0, 0xffffffff, 0x00040003,
686 0x91d4, 0xffffffff, 0x00060005,
687 0x91e0, 0xffffffff, 0x00080007,
688 0x91e4, 0xffffffff, 0x000a0009,
689 0x91e8, 0xffffffff, 0x000c000b,
690 0x91ec, 0xffffffff, 0x00020001,
691 0x91f0, 0xffffffff, 0x00040003,
692 0x91f4, 0xffffffff, 0x00060005,
693 0x9200, 0xffffffff, 0x00080007,
694 0x9204, 0xffffffff, 0x000a0009,
695 0x9208, 0xffffffff, 0x000c000b,
696 0x920c, 0xffffffff, 0x000e000d,
697 0x9210, 0xffffffff, 0x0010000f,
698 0x921c, 0xffffffff, 0x00120011,
699 0x9220, 0xffffffff, 0x00140013,
700 0x9224, 0xffffffff, 0x00020001,
701 0x9228, 0xffffffff, 0x00040003,
702 0x922c, 0xffffffff, 0x00060005,
703 0x9238, 0xffffffff, 0x00080007,
704 0x923c, 0xffffffff, 0x000a0009,
705 0x9240, 0xffffffff, 0x000c000b,
706 0x9244, 0xffffffff, 0x000e000d,
707 0x9248, 0xffffffff, 0x0010000f,
708 0x9254, 0xffffffff, 0x00120011,
709 0x9258, 0xffffffff, 0x00140013,
710 0x9294, 0xffffffff, 0x00020001,
711 0x929c, 0xffffffff, 0x00040003,
712 0x92a0, 0xffffffff, 0x00060005,
713 0x92a4, 0xffffffff, 0x00080007
714};
715
716static void rv770_init_golden_registers(struct radeon_device *rdev)
717{
718 switch (rdev->family) {
719 case CHIP_RV770:
720 radeon_program_register_sequence(rdev,
721 r7xx_golden_registers,
722 (const u32)ARRAY_SIZE(r7xx_golden_registers));
723 radeon_program_register_sequence(rdev,
724 r7xx_golden_dyn_gpr_registers,
725 (const u32)ARRAY_SIZE(r7xx_golden_dyn_gpr_registers));
726 if (rdev->pdev->device == 0x994e)
727 radeon_program_register_sequence(rdev,
728 rv770ce_golden_registers,
729 (const u32)ARRAY_SIZE(rv770ce_golden_registers));
730 else
731 radeon_program_register_sequence(rdev,
732 rv770_golden_registers,
733 (const u32)ARRAY_SIZE(rv770_golden_registers));
734 radeon_program_register_sequence(rdev,
735 rv770_mgcg_init,
736 (const u32)ARRAY_SIZE(rv770_mgcg_init));
737 break;
738 case CHIP_RV730:
739 radeon_program_register_sequence(rdev,
740 r7xx_golden_registers,
741 (const u32)ARRAY_SIZE(r7xx_golden_registers));
742 radeon_program_register_sequence(rdev,
743 r7xx_golden_dyn_gpr_registers,
744 (const u32)ARRAY_SIZE(r7xx_golden_dyn_gpr_registers));
745 radeon_program_register_sequence(rdev,
746 rv730_golden_registers,
747 (const u32)ARRAY_SIZE(rv770_golden_registers));
748 radeon_program_register_sequence(rdev,
749 rv730_mgcg_init,
750 (const u32)ARRAY_SIZE(rv770_mgcg_init));
751 break;
752 case CHIP_RV710:
753 radeon_program_register_sequence(rdev,
754 r7xx_golden_registers,
755 (const u32)ARRAY_SIZE(r7xx_golden_registers));
756 radeon_program_register_sequence(rdev,
757 r7xx_golden_dyn_gpr_registers,
758 (const u32)ARRAY_SIZE(r7xx_golden_dyn_gpr_registers));
759 radeon_program_register_sequence(rdev,
760 rv710_golden_registers,
761 (const u32)ARRAY_SIZE(rv770_golden_registers));
762 radeon_program_register_sequence(rdev,
763 rv710_mgcg_init,
764 (const u32)ARRAY_SIZE(rv770_mgcg_init));
765 break;
766 case CHIP_RV740:
767 radeon_program_register_sequence(rdev,
768 rv740_golden_registers,
769 (const u32)ARRAY_SIZE(rv770_golden_registers));
770 radeon_program_register_sequence(rdev,
771 rv740_mgcg_init,
772 (const u32)ARRAY_SIZE(rv770_mgcg_init));
773 break;
774 default:
775 break;
776 }
777}
45 778
46#define PCIE_BUS_CLK 10000 779#define PCIE_BUS_CLK 10000
47#define TCLK (PCIE_BUS_CLK / 10) 780#define TCLK (PCIE_BUS_CLK / 10)
@@ -68,6 +801,105 @@ u32 rv770_get_xclk(struct radeon_device *rdev)
68 return reference_clock; 801 return reference_clock;
69} 802}
70 803
804int rv770_uvd_resume(struct radeon_device *rdev)
805{
806 uint64_t addr;
807 uint32_t chip_id, size;
808 int r;
809
810 r = radeon_uvd_resume(rdev);
811 if (r)
812 return r;
813
814 /* programm the VCPU memory controller bits 0-27 */
815 addr = rdev->uvd.gpu_addr >> 3;
816 size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 4) >> 3;
817 WREG32(UVD_VCPU_CACHE_OFFSET0, addr);
818 WREG32(UVD_VCPU_CACHE_SIZE0, size);
819
820 addr += size;
821 size = RADEON_UVD_STACK_SIZE >> 3;
822 WREG32(UVD_VCPU_CACHE_OFFSET1, addr);
823 WREG32(UVD_VCPU_CACHE_SIZE1, size);
824
825 addr += size;
826 size = RADEON_UVD_HEAP_SIZE >> 3;
827 WREG32(UVD_VCPU_CACHE_OFFSET2, addr);
828 WREG32(UVD_VCPU_CACHE_SIZE2, size);
829
830 /* bits 28-31 */
831 addr = (rdev->uvd.gpu_addr >> 28) & 0xF;
832 WREG32(UVD_LMI_ADDR_EXT, (addr << 12) | (addr << 0));
833
834 /* bits 32-39 */
835 addr = (rdev->uvd.gpu_addr >> 32) & 0xFF;
836 WREG32(UVD_LMI_EXT40_ADDR, addr | (0x9 << 16) | (0x1 << 31));
837
838 /* tell firmware which hardware it is running on */
839 switch (rdev->family) {
840 default:
841 return -EINVAL;
842 case CHIP_RV710:
843 chip_id = 0x01000005;
844 break;
845 case CHIP_RV730:
846 chip_id = 0x01000006;
847 break;
848 case CHIP_RV740:
849 chip_id = 0x01000007;
850 break;
851 case CHIP_CYPRESS:
852 case CHIP_HEMLOCK:
853 chip_id = 0x01000008;
854 break;
855 case CHIP_JUNIPER:
856 chip_id = 0x01000009;
857 break;
858 case CHIP_REDWOOD:
859 chip_id = 0x0100000a;
860 break;
861 case CHIP_CEDAR:
862 chip_id = 0x0100000b;
863 break;
864 case CHIP_SUMO:
865 chip_id = 0x0100000c;
866 break;
867 case CHIP_SUMO2:
868 chip_id = 0x0100000d;
869 break;
870 case CHIP_PALM:
871 chip_id = 0x0100000e;
872 break;
873 case CHIP_CAYMAN:
874 chip_id = 0x0100000f;
875 break;
876 case CHIP_BARTS:
877 chip_id = 0x01000010;
878 break;
879 case CHIP_TURKS:
880 chip_id = 0x01000011;
881 break;
882 case CHIP_CAICOS:
883 chip_id = 0x01000012;
884 break;
885 case CHIP_TAHITI:
886 chip_id = 0x01000014;
887 break;
888 case CHIP_VERDE:
889 chip_id = 0x01000015;
890 break;
891 case CHIP_PITCAIRN:
892 chip_id = 0x01000016;
893 break;
894 case CHIP_ARUBA:
895 chip_id = 0x01000017;
896 break;
897 }
898 WREG32(UVD_VCPU_CHIP_ID, chip_id);
899
900 return 0;
901}
902
71u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) 903u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
72{ 904{
73 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; 905 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
@@ -611,6 +1443,11 @@ static void rv770_gpu_init(struct radeon_device *rdev)
611 WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff)); 1443 WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff));
612 WREG32(DMA_TILING_CONFIG, (gb_tiling_config & 0xffff)); 1444 WREG32(DMA_TILING_CONFIG, (gb_tiling_config & 0xffff));
613 WREG32(DMA_TILING_CONFIG2, (gb_tiling_config & 0xffff)); 1445 WREG32(DMA_TILING_CONFIG2, (gb_tiling_config & 0xffff));
1446 if (rdev->family == CHIP_RV730) {
1447 WREG32(UVD_UDEC_DB_TILING_CONFIG, (gb_tiling_config & 0xffff));
1448 WREG32(UVD_UDEC_DBW_TILING_CONFIG, (gb_tiling_config & 0xffff));
1449 WREG32(UVD_UDEC_TILING_CONFIG, (gb_tiling_config & 0xffff));
1450 }
614 1451
615 WREG32(CGTS_SYS_TCC_DISABLE, 0); 1452 WREG32(CGTS_SYS_TCC_DISABLE, 0);
616 WREG32(CGTS_TCC_DISABLE, 0); 1453 WREG32(CGTS_TCC_DISABLE, 0);
@@ -840,7 +1677,7 @@ void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
840 } 1677 }
841 if (rdev->flags & RADEON_IS_AGP) { 1678 if (rdev->flags & RADEON_IS_AGP) {
842 size_bf = mc->gtt_start; 1679 size_bf = mc->gtt_start;
843 size_af = 0xFFFFFFFF - mc->gtt_end; 1680 size_af = mc->mc_mask - mc->gtt_end;
844 if (size_bf > size_af) { 1681 if (size_bf > size_af) {
845 if (mc->mc_vram_size > size_bf) { 1682 if (mc->mc_vram_size > size_bf) {
846 dev_warn(rdev->dev, "limiting VRAM\n"); 1683 dev_warn(rdev->dev, "limiting VRAM\n");
@@ -1040,6 +1877,17 @@ static int rv770_startup(struct radeon_device *rdev)
1040 return r; 1877 return r;
1041 } 1878 }
1042 1879
1880 r = rv770_uvd_resume(rdev);
1881 if (!r) {
1882 r = radeon_fence_driver_start_ring(rdev,
1883 R600_RING_TYPE_UVD_INDEX);
1884 if (r)
1885 dev_err(rdev->dev, "UVD fences init error (%d).\n", r);
1886 }
1887
1888 if (r)
1889 rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
1890
1043 /* Enable IRQ */ 1891 /* Enable IRQ */
1044 r = r600_irq_init(rdev); 1892 r = r600_irq_init(rdev);
1045 if (r) { 1893 if (r) {
@@ -1074,6 +1922,19 @@ static int rv770_startup(struct radeon_device *rdev)
1074 if (r) 1922 if (r)
1075 return r; 1923 return r;
1076 1924
1925 ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
1926 if (ring->ring_size) {
1927 r = radeon_ring_init(rdev, ring, ring->ring_size,
1928 R600_WB_UVD_RPTR_OFFSET,
1929 UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR,
1930 0, 0xfffff, RADEON_CP_PACKET2);
1931 if (!r)
1932 r = r600_uvd_init(rdev);
1933
1934 if (r)
1935 DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
1936 }
1937
1077 r = radeon_ib_pool_init(rdev); 1938 r = radeon_ib_pool_init(rdev);
1078 if (r) { 1939 if (r) {
1079 dev_err(rdev->dev, "IB initialization failed (%d).\n", r); 1940 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
@@ -1100,6 +1961,9 @@ int rv770_resume(struct radeon_device *rdev)
1100 /* post card */ 1961 /* post card */
1101 atom_asic_init(rdev->mode_info.atom_context); 1962 atom_asic_init(rdev->mode_info.atom_context);
1102 1963
1964 /* init golden registers */
1965 rv770_init_golden_registers(rdev);
1966
1103 rdev->accel_working = true; 1967 rdev->accel_working = true;
1104 r = rv770_startup(rdev); 1968 r = rv770_startup(rdev);
1105 if (r) { 1969 if (r) {
@@ -1115,6 +1979,7 @@ int rv770_resume(struct radeon_device *rdev)
1115int rv770_suspend(struct radeon_device *rdev) 1979int rv770_suspend(struct radeon_device *rdev)
1116{ 1980{
1117 r600_audio_fini(rdev); 1981 r600_audio_fini(rdev);
1982 radeon_uvd_suspend(rdev);
1118 r700_cp_stop(rdev); 1983 r700_cp_stop(rdev);
1119 r600_dma_stop(rdev); 1984 r600_dma_stop(rdev);
1120 r600_irq_suspend(rdev); 1985 r600_irq_suspend(rdev);
@@ -1156,6 +2021,8 @@ int rv770_init(struct radeon_device *rdev)
1156 DRM_INFO("GPU not posted. posting now...\n"); 2021 DRM_INFO("GPU not posted. posting now...\n");
1157 atom_asic_init(rdev->mode_info.atom_context); 2022 atom_asic_init(rdev->mode_info.atom_context);
1158 } 2023 }
2024 /* init golden registers */
2025 rv770_init_golden_registers(rdev);
1159 /* Initialize scratch registers */ 2026 /* Initialize scratch registers */
1160 r600_scratch_init(rdev); 2027 r600_scratch_init(rdev);
1161 /* Initialize surface registers */ 2028 /* Initialize surface registers */
@@ -1190,6 +2057,13 @@ int rv770_init(struct radeon_device *rdev)
1190 rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL; 2057 rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL;
1191 r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024); 2058 r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024);
1192 2059
2060 r = radeon_uvd_init(rdev);
2061 if (!r) {
2062 rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
2063 r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX],
2064 4096);
2065 }
2066
1193 rdev->ih.ring_obj = NULL; 2067 rdev->ih.ring_obj = NULL;
1194 r600_ih_ring_init(rdev, 64 * 1024); 2068 r600_ih_ring_init(rdev, 64 * 1024);
1195 2069
@@ -1224,6 +2098,7 @@ void rv770_fini(struct radeon_device *rdev)
1224 radeon_ib_pool_fini(rdev); 2098 radeon_ib_pool_fini(rdev);
1225 radeon_irq_kms_fini(rdev); 2099 radeon_irq_kms_fini(rdev);
1226 rv770_pcie_gart_fini(rdev); 2100 rv770_pcie_gart_fini(rdev);
2101 radeon_uvd_fini(rdev);
1227 r600_vram_scratch_fini(rdev); 2102 r600_vram_scratch_fini(rdev);
1228 radeon_gem_fini(rdev); 2103 radeon_gem_fini(rdev);
1229 radeon_fence_driver_fini(rdev); 2104 radeon_fence_driver_fini(rdev);
@@ -1264,23 +2139,23 @@ static void rv770_pcie_gen2_enable(struct radeon_device *rdev)
1264 DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n"); 2139 DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
1265 2140
1266 /* advertise upconfig capability */ 2141 /* advertise upconfig capability */
1267 link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL); 2142 link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
1268 link_width_cntl &= ~LC_UPCONFIGURE_DIS; 2143 link_width_cntl &= ~LC_UPCONFIGURE_DIS;
1269 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); 2144 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
1270 link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL); 2145 link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
1271 if (link_width_cntl & LC_RENEGOTIATION_SUPPORT) { 2146 if (link_width_cntl & LC_RENEGOTIATION_SUPPORT) {
1272 lanes = (link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT; 2147 lanes = (link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT;
1273 link_width_cntl &= ~(LC_LINK_WIDTH_MASK | 2148 link_width_cntl &= ~(LC_LINK_WIDTH_MASK |
1274 LC_RECONFIG_ARC_MISSING_ESCAPE); 2149 LC_RECONFIG_ARC_MISSING_ESCAPE);
1275 link_width_cntl |= lanes | LC_RECONFIG_NOW | 2150 link_width_cntl |= lanes | LC_RECONFIG_NOW |
1276 LC_RENEGOTIATE_EN | LC_UPCONFIGURE_SUPPORT; 2151 LC_RENEGOTIATE_EN | LC_UPCONFIGURE_SUPPORT;
1277 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); 2152 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
1278 } else { 2153 } else {
1279 link_width_cntl |= LC_UPCONFIGURE_DIS; 2154 link_width_cntl |= LC_UPCONFIGURE_DIS;
1280 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); 2155 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
1281 } 2156 }
1282 2157
1283 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); 2158 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
1284 if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) && 2159 if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
1285 (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) { 2160 (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
1286 2161
@@ -1293,29 +2168,29 @@ static void rv770_pcie_gen2_enable(struct radeon_device *rdev)
1293 WREG16(0x4088, link_cntl2); 2168 WREG16(0x4088, link_cntl2);
1294 WREG32(MM_CFGREGS_CNTL, 0); 2169 WREG32(MM_CFGREGS_CNTL, 0);
1295 2170
1296 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); 2171 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
1297 speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN; 2172 speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
1298 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl); 2173 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
1299 2174
1300 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); 2175 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
1301 speed_cntl |= LC_CLR_FAILED_SPD_CHANGE_CNT; 2176 speed_cntl |= LC_CLR_FAILED_SPD_CHANGE_CNT;
1302 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl); 2177 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
1303 2178
1304 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); 2179 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
1305 speed_cntl &= ~LC_CLR_FAILED_SPD_CHANGE_CNT; 2180 speed_cntl &= ~LC_CLR_FAILED_SPD_CHANGE_CNT;
1306 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl); 2181 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
1307 2182
1308 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); 2183 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
1309 speed_cntl |= LC_GEN2_EN_STRAP; 2184 speed_cntl |= LC_GEN2_EN_STRAP;
1310 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl); 2185 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
1311 2186
1312 } else { 2187 } else {
1313 link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL); 2188 link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
1314 /* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */ 2189 /* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
1315 if (1) 2190 if (1)
1316 link_width_cntl |= LC_UPCONFIGURE_DIS; 2191 link_width_cntl |= LC_UPCONFIGURE_DIS;
1317 else 2192 else
1318 link_width_cntl &= ~LC_UPCONFIGURE_DIS; 2193 link_width_cntl &= ~LC_UPCONFIGURE_DIS;
1319 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); 2194 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
1320 } 2195 }
1321} 2196}
diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h
index c55f950a4af7..85b16266f748 100644
--- a/drivers/gpu/drm/radeon/rv770d.h
+++ b/drivers/gpu/drm/radeon/rv770d.h
@@ -38,6 +38,30 @@
38#define R7XX_MAX_PIPES 8 38#define R7XX_MAX_PIPES 8
39#define R7XX_MAX_PIPES_MASK 0xff 39#define R7XX_MAX_PIPES_MASK 0xff
40 40
41/* discrete uvd clocks */
42#define CG_UPLL_FUNC_CNTL 0x718
43# define UPLL_RESET_MASK 0x00000001
44# define UPLL_SLEEP_MASK 0x00000002
45# define UPLL_BYPASS_EN_MASK 0x00000004
46# define UPLL_CTLREQ_MASK 0x00000008
47# define UPLL_REF_DIV(x) ((x) << 16)
48# define UPLL_REF_DIV_MASK 0x003F0000
49# define UPLL_CTLACK_MASK 0x40000000
50# define UPLL_CTLACK2_MASK 0x80000000
51#define CG_UPLL_FUNC_CNTL_2 0x71c
52# define UPLL_SW_HILEN(x) ((x) << 0)
53# define UPLL_SW_LOLEN(x) ((x) << 4)
54# define UPLL_SW_HILEN2(x) ((x) << 8)
55# define UPLL_SW_LOLEN2(x) ((x) << 12)
56# define UPLL_SW_MASK 0x0000FFFF
57# define VCLK_SRC_SEL(x) ((x) << 20)
58# define VCLK_SRC_SEL_MASK 0x01F00000
59# define DCLK_SRC_SEL(x) ((x) << 25)
60# define DCLK_SRC_SEL_MASK 0x3E000000
61#define CG_UPLL_FUNC_CNTL_3 0x720
62# define UPLL_FB_DIV(x) ((x) << 0)
63# define UPLL_FB_DIV_MASK 0x01FFFFFF
64
41/* Registers */ 65/* Registers */
42#define CB_COLOR0_BASE 0x28040 66#define CB_COLOR0_BASE 0x28040
43#define CB_COLOR1_BASE 0x28044 67#define CB_COLOR1_BASE 0x28044
@@ -112,6 +136,11 @@
112#define DMA_TILING_CONFIG 0x3ec8 136#define DMA_TILING_CONFIG 0x3ec8
113#define DMA_TILING_CONFIG2 0xd0b8 137#define DMA_TILING_CONFIG2 0xd0b8
114 138
139/* RV730 only */
140#define UVD_UDEC_TILING_CONFIG 0xef40
141#define UVD_UDEC_DB_TILING_CONFIG 0xef44
142#define UVD_UDEC_DBW_TILING_CONFIG 0xef48
143
115#define GC_USER_SHADER_PIPE_CONFIG 0x8954 144#define GC_USER_SHADER_PIPE_CONFIG 0x8954
116#define INACTIVE_QD_PIPES(x) ((x) << 8) 145#define INACTIVE_QD_PIPES(x) ((x) << 8)
117#define INACTIVE_QD_PIPES_MASK 0x0000FF00 146#define INACTIVE_QD_PIPES_MASK 0x0000FF00
@@ -671,4 +700,18 @@
671# define TARGET_LINK_SPEED_MASK (0xf << 0) 700# define TARGET_LINK_SPEED_MASK (0xf << 0)
672# define SELECTABLE_DEEMPHASIS (1 << 6) 701# define SELECTABLE_DEEMPHASIS (1 << 6)
673 702
703/* UVD */
704#define UVD_LMI_EXT40_ADDR 0xf498
705#define UVD_VCPU_CHIP_ID 0xf4d4
706#define UVD_VCPU_CACHE_OFFSET0 0xf4d8
707#define UVD_VCPU_CACHE_SIZE0 0xf4dc
708#define UVD_VCPU_CACHE_OFFSET1 0xf4e0
709#define UVD_VCPU_CACHE_SIZE1 0xf4e4
710#define UVD_VCPU_CACHE_OFFSET2 0xf4e8
711#define UVD_VCPU_CACHE_SIZE2 0xf4ec
712#define UVD_LMI_ADDR_EXT 0xf594
713
714#define UVD_RBC_RB_RPTR 0xf690
715#define UVD_RBC_RB_WPTR 0xf694
716
674#endif 717#endif
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index bafbe3216952..f0b6c2f87c4d 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -70,6 +70,794 @@ extern u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev);
70extern void evergreen_print_gpu_status_regs(struct radeon_device *rdev); 70extern void evergreen_print_gpu_status_regs(struct radeon_device *rdev);
71extern bool evergreen_is_display_hung(struct radeon_device *rdev); 71extern bool evergreen_is_display_hung(struct radeon_device *rdev);
72 72
73static const u32 tahiti_golden_rlc_registers[] =
74{
75 0xc424, 0xffffffff, 0x00601005,
76 0xc47c, 0xffffffff, 0x10104040,
77 0xc488, 0xffffffff, 0x0100000a,
78 0xc314, 0xffffffff, 0x00000800,
79 0xc30c, 0xffffffff, 0x800000f4,
80 0xf4a8, 0xffffffff, 0x00000000
81};
82
83static const u32 tahiti_golden_registers[] =
84{
85 0x9a10, 0x00010000, 0x00018208,
86 0x9830, 0xffffffff, 0x00000000,
87 0x9834, 0xf00fffff, 0x00000400,
88 0x9838, 0x0002021c, 0x00020200,
89 0xc78, 0x00000080, 0x00000000,
90 0xd030, 0x000300c0, 0x00800040,
91 0xd830, 0x000300c0, 0x00800040,
92 0x5bb0, 0x000000f0, 0x00000070,
93 0x5bc0, 0x00200000, 0x50100000,
94 0x7030, 0x31000311, 0x00000011,
95 0x277c, 0x00000003, 0x000007ff,
96 0x240c, 0x000007ff, 0x00000000,
97 0x8a14, 0xf000001f, 0x00000007,
98 0x8b24, 0xffffffff, 0x00ffffff,
99 0x8b10, 0x0000ff0f, 0x00000000,
100 0x28a4c, 0x07ffffff, 0x4e000000,
101 0x28350, 0x3f3f3fff, 0x2a00126a,
102 0x30, 0x000000ff, 0x0040,
103 0x34, 0x00000040, 0x00004040,
104 0x9100, 0x07ffffff, 0x03000000,
105 0x8e88, 0x01ff1f3f, 0x00000000,
106 0x8e84, 0x01ff1f3f, 0x00000000,
107 0x9060, 0x0000007f, 0x00000020,
108 0x9508, 0x00010000, 0x00010000,
109 0xac14, 0x00000200, 0x000002fb,
110 0xac10, 0xffffffff, 0x0000543b,
111 0xac0c, 0xffffffff, 0xa9210876,
112 0x88d0, 0xffffffff, 0x000fff40,
113 0x88d4, 0x0000001f, 0x00000010,
114 0x1410, 0x20000000, 0x20fffed8,
115 0x15c0, 0x000c0fc0, 0x000c0400
116};
117
118static const u32 tahiti_golden_registers2[] =
119{
120 0xc64, 0x00000001, 0x00000001
121};
122
123static const u32 pitcairn_golden_rlc_registers[] =
124{
125 0xc424, 0xffffffff, 0x00601004,
126 0xc47c, 0xffffffff, 0x10102020,
127 0xc488, 0xffffffff, 0x01000020,
128 0xc314, 0xffffffff, 0x00000800,
129 0xc30c, 0xffffffff, 0x800000a4
130};
131
132static const u32 pitcairn_golden_registers[] =
133{
134 0x9a10, 0x00010000, 0x00018208,
135 0x9830, 0xffffffff, 0x00000000,
136 0x9834, 0xf00fffff, 0x00000400,
137 0x9838, 0x0002021c, 0x00020200,
138 0xc78, 0x00000080, 0x00000000,
139 0xd030, 0x000300c0, 0x00800040,
140 0xd830, 0x000300c0, 0x00800040,
141 0x5bb0, 0x000000f0, 0x00000070,
142 0x5bc0, 0x00200000, 0x50100000,
143 0x7030, 0x31000311, 0x00000011,
144 0x2ae4, 0x00073ffe, 0x000022a2,
145 0x240c, 0x000007ff, 0x00000000,
146 0x8a14, 0xf000001f, 0x00000007,
147 0x8b24, 0xffffffff, 0x00ffffff,
148 0x8b10, 0x0000ff0f, 0x00000000,
149 0x28a4c, 0x07ffffff, 0x4e000000,
150 0x28350, 0x3f3f3fff, 0x2a00126a,
151 0x30, 0x000000ff, 0x0040,
152 0x34, 0x00000040, 0x00004040,
153 0x9100, 0x07ffffff, 0x03000000,
154 0x9060, 0x0000007f, 0x00000020,
155 0x9508, 0x00010000, 0x00010000,
156 0xac14, 0x000003ff, 0x000000f7,
157 0xac10, 0xffffffff, 0x00000000,
158 0xac0c, 0xffffffff, 0x32761054,
159 0x88d4, 0x0000001f, 0x00000010,
160 0x15c0, 0x000c0fc0, 0x000c0400
161};
162
163static const u32 verde_golden_rlc_registers[] =
164{
165 0xc424, 0xffffffff, 0x033f1005,
166 0xc47c, 0xffffffff, 0x10808020,
167 0xc488, 0xffffffff, 0x00800008,
168 0xc314, 0xffffffff, 0x00001000,
169 0xc30c, 0xffffffff, 0x80010014
170};
171
172static const u32 verde_golden_registers[] =
173{
174 0x9a10, 0x00010000, 0x00018208,
175 0x9830, 0xffffffff, 0x00000000,
176 0x9834, 0xf00fffff, 0x00000400,
177 0x9838, 0x0002021c, 0x00020200,
178 0xc78, 0x00000080, 0x00000000,
179 0xd030, 0x000300c0, 0x00800040,
180 0xd030, 0x000300c0, 0x00800040,
181 0xd830, 0x000300c0, 0x00800040,
182 0xd830, 0x000300c0, 0x00800040,
183 0x5bb0, 0x000000f0, 0x00000070,
184 0x5bc0, 0x00200000, 0x50100000,
185 0x7030, 0x31000311, 0x00000011,
186 0x2ae4, 0x00073ffe, 0x000022a2,
187 0x2ae4, 0x00073ffe, 0x000022a2,
188 0x2ae4, 0x00073ffe, 0x000022a2,
189 0x240c, 0x000007ff, 0x00000000,
190 0x240c, 0x000007ff, 0x00000000,
191 0x240c, 0x000007ff, 0x00000000,
192 0x8a14, 0xf000001f, 0x00000007,
193 0x8a14, 0xf000001f, 0x00000007,
194 0x8a14, 0xf000001f, 0x00000007,
195 0x8b24, 0xffffffff, 0x00ffffff,
196 0x8b10, 0x0000ff0f, 0x00000000,
197 0x28a4c, 0x07ffffff, 0x4e000000,
198 0x28350, 0x3f3f3fff, 0x0000124a,
199 0x28350, 0x3f3f3fff, 0x0000124a,
200 0x28350, 0x3f3f3fff, 0x0000124a,
201 0x30, 0x000000ff, 0x0040,
202 0x34, 0x00000040, 0x00004040,
203 0x9100, 0x07ffffff, 0x03000000,
204 0x9100, 0x07ffffff, 0x03000000,
205 0x8e88, 0x01ff1f3f, 0x00000000,
206 0x8e88, 0x01ff1f3f, 0x00000000,
207 0x8e88, 0x01ff1f3f, 0x00000000,
208 0x8e84, 0x01ff1f3f, 0x00000000,
209 0x8e84, 0x01ff1f3f, 0x00000000,
210 0x8e84, 0x01ff1f3f, 0x00000000,
211 0x9060, 0x0000007f, 0x00000020,
212 0x9508, 0x00010000, 0x00010000,
213 0xac14, 0x000003ff, 0x00000003,
214 0xac14, 0x000003ff, 0x00000003,
215 0xac14, 0x000003ff, 0x00000003,
216 0xac10, 0xffffffff, 0x00000000,
217 0xac10, 0xffffffff, 0x00000000,
218 0xac10, 0xffffffff, 0x00000000,
219 0xac0c, 0xffffffff, 0x00001032,
220 0xac0c, 0xffffffff, 0x00001032,
221 0xac0c, 0xffffffff, 0x00001032,
222 0x88d4, 0x0000001f, 0x00000010,
223 0x88d4, 0x0000001f, 0x00000010,
224 0x88d4, 0x0000001f, 0x00000010,
225 0x15c0, 0x000c0fc0, 0x000c0400
226};
227
228static const u32 oland_golden_rlc_registers[] =
229{
230 0xc424, 0xffffffff, 0x00601005,
231 0xc47c, 0xffffffff, 0x10104040,
232 0xc488, 0xffffffff, 0x0100000a,
233 0xc314, 0xffffffff, 0x00000800,
234 0xc30c, 0xffffffff, 0x800000f4
235};
236
237static const u32 oland_golden_registers[] =
238{
239 0x9a10, 0x00010000, 0x00018208,
240 0x9830, 0xffffffff, 0x00000000,
241 0x9834, 0xf00fffff, 0x00000400,
242 0x9838, 0x0002021c, 0x00020200,
243 0xc78, 0x00000080, 0x00000000,
244 0xd030, 0x000300c0, 0x00800040,
245 0xd830, 0x000300c0, 0x00800040,
246 0x5bb0, 0x000000f0, 0x00000070,
247 0x5bc0, 0x00200000, 0x50100000,
248 0x7030, 0x31000311, 0x00000011,
249 0x2ae4, 0x00073ffe, 0x000022a2,
250 0x240c, 0x000007ff, 0x00000000,
251 0x8a14, 0xf000001f, 0x00000007,
252 0x8b24, 0xffffffff, 0x00ffffff,
253 0x8b10, 0x0000ff0f, 0x00000000,
254 0x28a4c, 0x07ffffff, 0x4e000000,
255 0x28350, 0x3f3f3fff, 0x00000082,
256 0x30, 0x000000ff, 0x0040,
257 0x34, 0x00000040, 0x00004040,
258 0x9100, 0x07ffffff, 0x03000000,
259 0x9060, 0x0000007f, 0x00000020,
260 0x9508, 0x00010000, 0x00010000,
261 0xac14, 0x000003ff, 0x000000f3,
262 0xac10, 0xffffffff, 0x00000000,
263 0xac0c, 0xffffffff, 0x00003210,
264 0x88d4, 0x0000001f, 0x00000010,
265 0x15c0, 0x000c0fc0, 0x000c0400
266};
267
268static const u32 tahiti_mgcg_cgcg_init[] =
269{
270 0xc400, 0xffffffff, 0xfffffffc,
271 0x802c, 0xffffffff, 0xe0000000,
272 0x9a60, 0xffffffff, 0x00000100,
273 0x92a4, 0xffffffff, 0x00000100,
274 0xc164, 0xffffffff, 0x00000100,
275 0x9774, 0xffffffff, 0x00000100,
276 0x8984, 0xffffffff, 0x06000100,
277 0x8a18, 0xffffffff, 0x00000100,
278 0x92a0, 0xffffffff, 0x00000100,
279 0xc380, 0xffffffff, 0x00000100,
280 0x8b28, 0xffffffff, 0x00000100,
281 0x9144, 0xffffffff, 0x00000100,
282 0x8d88, 0xffffffff, 0x00000100,
283 0x8d8c, 0xffffffff, 0x00000100,
284 0x9030, 0xffffffff, 0x00000100,
285 0x9034, 0xffffffff, 0x00000100,
286 0x9038, 0xffffffff, 0x00000100,
287 0x903c, 0xffffffff, 0x00000100,
288 0xad80, 0xffffffff, 0x00000100,
289 0xac54, 0xffffffff, 0x00000100,
290 0x897c, 0xffffffff, 0x06000100,
291 0x9868, 0xffffffff, 0x00000100,
292 0x9510, 0xffffffff, 0x00000100,
293 0xaf04, 0xffffffff, 0x00000100,
294 0xae04, 0xffffffff, 0x00000100,
295 0x949c, 0xffffffff, 0x00000100,
296 0x802c, 0xffffffff, 0xe0000000,
297 0x9160, 0xffffffff, 0x00010000,
298 0x9164, 0xffffffff, 0x00030002,
299 0x9168, 0xffffffff, 0x00040007,
300 0x916c, 0xffffffff, 0x00060005,
301 0x9170, 0xffffffff, 0x00090008,
302 0x9174, 0xffffffff, 0x00020001,
303 0x9178, 0xffffffff, 0x00040003,
304 0x917c, 0xffffffff, 0x00000007,
305 0x9180, 0xffffffff, 0x00060005,
306 0x9184, 0xffffffff, 0x00090008,
307 0x9188, 0xffffffff, 0x00030002,
308 0x918c, 0xffffffff, 0x00050004,
309 0x9190, 0xffffffff, 0x00000008,
310 0x9194, 0xffffffff, 0x00070006,
311 0x9198, 0xffffffff, 0x000a0009,
312 0x919c, 0xffffffff, 0x00040003,
313 0x91a0, 0xffffffff, 0x00060005,
314 0x91a4, 0xffffffff, 0x00000009,
315 0x91a8, 0xffffffff, 0x00080007,
316 0x91ac, 0xffffffff, 0x000b000a,
317 0x91b0, 0xffffffff, 0x00050004,
318 0x91b4, 0xffffffff, 0x00070006,
319 0x91b8, 0xffffffff, 0x0008000b,
320 0x91bc, 0xffffffff, 0x000a0009,
321 0x91c0, 0xffffffff, 0x000d000c,
322 0x91c4, 0xffffffff, 0x00060005,
323 0x91c8, 0xffffffff, 0x00080007,
324 0x91cc, 0xffffffff, 0x0000000b,
325 0x91d0, 0xffffffff, 0x000a0009,
326 0x91d4, 0xffffffff, 0x000d000c,
327 0x91d8, 0xffffffff, 0x00070006,
328 0x91dc, 0xffffffff, 0x00090008,
329 0x91e0, 0xffffffff, 0x0000000c,
330 0x91e4, 0xffffffff, 0x000b000a,
331 0x91e8, 0xffffffff, 0x000e000d,
332 0x91ec, 0xffffffff, 0x00080007,
333 0x91f0, 0xffffffff, 0x000a0009,
334 0x91f4, 0xffffffff, 0x0000000d,
335 0x91f8, 0xffffffff, 0x000c000b,
336 0x91fc, 0xffffffff, 0x000f000e,
337 0x9200, 0xffffffff, 0x00090008,
338 0x9204, 0xffffffff, 0x000b000a,
339 0x9208, 0xffffffff, 0x000c000f,
340 0x920c, 0xffffffff, 0x000e000d,
341 0x9210, 0xffffffff, 0x00110010,
342 0x9214, 0xffffffff, 0x000a0009,
343 0x9218, 0xffffffff, 0x000c000b,
344 0x921c, 0xffffffff, 0x0000000f,
345 0x9220, 0xffffffff, 0x000e000d,
346 0x9224, 0xffffffff, 0x00110010,
347 0x9228, 0xffffffff, 0x000b000a,
348 0x922c, 0xffffffff, 0x000d000c,
349 0x9230, 0xffffffff, 0x00000010,
350 0x9234, 0xffffffff, 0x000f000e,
351 0x9238, 0xffffffff, 0x00120011,
352 0x923c, 0xffffffff, 0x000c000b,
353 0x9240, 0xffffffff, 0x000e000d,
354 0x9244, 0xffffffff, 0x00000011,
355 0x9248, 0xffffffff, 0x0010000f,
356 0x924c, 0xffffffff, 0x00130012,
357 0x9250, 0xffffffff, 0x000d000c,
358 0x9254, 0xffffffff, 0x000f000e,
359 0x9258, 0xffffffff, 0x00100013,
360 0x925c, 0xffffffff, 0x00120011,
361 0x9260, 0xffffffff, 0x00150014,
362 0x9264, 0xffffffff, 0x000e000d,
363 0x9268, 0xffffffff, 0x0010000f,
364 0x926c, 0xffffffff, 0x00000013,
365 0x9270, 0xffffffff, 0x00120011,
366 0x9274, 0xffffffff, 0x00150014,
367 0x9278, 0xffffffff, 0x000f000e,
368 0x927c, 0xffffffff, 0x00110010,
369 0x9280, 0xffffffff, 0x00000014,
370 0x9284, 0xffffffff, 0x00130012,
371 0x9288, 0xffffffff, 0x00160015,
372 0x928c, 0xffffffff, 0x0010000f,
373 0x9290, 0xffffffff, 0x00120011,
374 0x9294, 0xffffffff, 0x00000015,
375 0x9298, 0xffffffff, 0x00140013,
376 0x929c, 0xffffffff, 0x00170016,
377 0x9150, 0xffffffff, 0x96940200,
378 0x8708, 0xffffffff, 0x00900100,
379 0xc478, 0xffffffff, 0x00000080,
380 0xc404, 0xffffffff, 0x0020003f,
381 0x30, 0xffffffff, 0x0000001c,
382 0x34, 0x000f0000, 0x000f0000,
383 0x160c, 0xffffffff, 0x00000100,
384 0x1024, 0xffffffff, 0x00000100,
385 0x102c, 0x00000101, 0x00000000,
386 0x20a8, 0xffffffff, 0x00000104,
387 0x264c, 0x000c0000, 0x000c0000,
388 0x2648, 0x000c0000, 0x000c0000,
389 0x55e4, 0xff000fff, 0x00000100,
390 0x55e8, 0x00000001, 0x00000001,
391 0x2f50, 0x00000001, 0x00000001,
392 0x30cc, 0xc0000fff, 0x00000104,
393 0xc1e4, 0x00000001, 0x00000001,
394 0xd0c0, 0xfffffff0, 0x00000100,
395 0xd8c0, 0xfffffff0, 0x00000100
396};
397
398static const u32 pitcairn_mgcg_cgcg_init[] =
399{
400 0xc400, 0xffffffff, 0xfffffffc,
401 0x802c, 0xffffffff, 0xe0000000,
402 0x9a60, 0xffffffff, 0x00000100,
403 0x92a4, 0xffffffff, 0x00000100,
404 0xc164, 0xffffffff, 0x00000100,
405 0x9774, 0xffffffff, 0x00000100,
406 0x8984, 0xffffffff, 0x06000100,
407 0x8a18, 0xffffffff, 0x00000100,
408 0x92a0, 0xffffffff, 0x00000100,
409 0xc380, 0xffffffff, 0x00000100,
410 0x8b28, 0xffffffff, 0x00000100,
411 0x9144, 0xffffffff, 0x00000100,
412 0x8d88, 0xffffffff, 0x00000100,
413 0x8d8c, 0xffffffff, 0x00000100,
414 0x9030, 0xffffffff, 0x00000100,
415 0x9034, 0xffffffff, 0x00000100,
416 0x9038, 0xffffffff, 0x00000100,
417 0x903c, 0xffffffff, 0x00000100,
418 0xad80, 0xffffffff, 0x00000100,
419 0xac54, 0xffffffff, 0x00000100,
420 0x897c, 0xffffffff, 0x06000100,
421 0x9868, 0xffffffff, 0x00000100,
422 0x9510, 0xffffffff, 0x00000100,
423 0xaf04, 0xffffffff, 0x00000100,
424 0xae04, 0xffffffff, 0x00000100,
425 0x949c, 0xffffffff, 0x00000100,
426 0x802c, 0xffffffff, 0xe0000000,
427 0x9160, 0xffffffff, 0x00010000,
428 0x9164, 0xffffffff, 0x00030002,
429 0x9168, 0xffffffff, 0x00040007,
430 0x916c, 0xffffffff, 0x00060005,
431 0x9170, 0xffffffff, 0x00090008,
432 0x9174, 0xffffffff, 0x00020001,
433 0x9178, 0xffffffff, 0x00040003,
434 0x917c, 0xffffffff, 0x00000007,
435 0x9180, 0xffffffff, 0x00060005,
436 0x9184, 0xffffffff, 0x00090008,
437 0x9188, 0xffffffff, 0x00030002,
438 0x918c, 0xffffffff, 0x00050004,
439 0x9190, 0xffffffff, 0x00000008,
440 0x9194, 0xffffffff, 0x00070006,
441 0x9198, 0xffffffff, 0x000a0009,
442 0x919c, 0xffffffff, 0x00040003,
443 0x91a0, 0xffffffff, 0x00060005,
444 0x91a4, 0xffffffff, 0x00000009,
445 0x91a8, 0xffffffff, 0x00080007,
446 0x91ac, 0xffffffff, 0x000b000a,
447 0x91b0, 0xffffffff, 0x00050004,
448 0x91b4, 0xffffffff, 0x00070006,
449 0x91b8, 0xffffffff, 0x0008000b,
450 0x91bc, 0xffffffff, 0x000a0009,
451 0x91c0, 0xffffffff, 0x000d000c,
452 0x9200, 0xffffffff, 0x00090008,
453 0x9204, 0xffffffff, 0x000b000a,
454 0x9208, 0xffffffff, 0x000c000f,
455 0x920c, 0xffffffff, 0x000e000d,
456 0x9210, 0xffffffff, 0x00110010,
457 0x9214, 0xffffffff, 0x000a0009,
458 0x9218, 0xffffffff, 0x000c000b,
459 0x921c, 0xffffffff, 0x0000000f,
460 0x9220, 0xffffffff, 0x000e000d,
461 0x9224, 0xffffffff, 0x00110010,
462 0x9228, 0xffffffff, 0x000b000a,
463 0x922c, 0xffffffff, 0x000d000c,
464 0x9230, 0xffffffff, 0x00000010,
465 0x9234, 0xffffffff, 0x000f000e,
466 0x9238, 0xffffffff, 0x00120011,
467 0x923c, 0xffffffff, 0x000c000b,
468 0x9240, 0xffffffff, 0x000e000d,
469 0x9244, 0xffffffff, 0x00000011,
470 0x9248, 0xffffffff, 0x0010000f,
471 0x924c, 0xffffffff, 0x00130012,
472 0x9250, 0xffffffff, 0x000d000c,
473 0x9254, 0xffffffff, 0x000f000e,
474 0x9258, 0xffffffff, 0x00100013,
475 0x925c, 0xffffffff, 0x00120011,
476 0x9260, 0xffffffff, 0x00150014,
477 0x9150, 0xffffffff, 0x96940200,
478 0x8708, 0xffffffff, 0x00900100,
479 0xc478, 0xffffffff, 0x00000080,
480 0xc404, 0xffffffff, 0x0020003f,
481 0x30, 0xffffffff, 0x0000001c,
482 0x34, 0x000f0000, 0x000f0000,
483 0x160c, 0xffffffff, 0x00000100,
484 0x1024, 0xffffffff, 0x00000100,
485 0x102c, 0x00000101, 0x00000000,
486 0x20a8, 0xffffffff, 0x00000104,
487 0x55e4, 0xff000fff, 0x00000100,
488 0x55e8, 0x00000001, 0x00000001,
489 0x2f50, 0x00000001, 0x00000001,
490 0x30cc, 0xc0000fff, 0x00000104,
491 0xc1e4, 0x00000001, 0x00000001,
492 0xd0c0, 0xfffffff0, 0x00000100,
493 0xd8c0, 0xfffffff0, 0x00000100
494};
495
496static const u32 verde_mgcg_cgcg_init[] =
497{
498 0xc400, 0xffffffff, 0xfffffffc,
499 0x802c, 0xffffffff, 0xe0000000,
500 0x9a60, 0xffffffff, 0x00000100,
501 0x92a4, 0xffffffff, 0x00000100,
502 0xc164, 0xffffffff, 0x00000100,
503 0x9774, 0xffffffff, 0x00000100,
504 0x8984, 0xffffffff, 0x06000100,
505 0x8a18, 0xffffffff, 0x00000100,
506 0x92a0, 0xffffffff, 0x00000100,
507 0xc380, 0xffffffff, 0x00000100,
508 0x8b28, 0xffffffff, 0x00000100,
509 0x9144, 0xffffffff, 0x00000100,
510 0x8d88, 0xffffffff, 0x00000100,
511 0x8d8c, 0xffffffff, 0x00000100,
512 0x9030, 0xffffffff, 0x00000100,
513 0x9034, 0xffffffff, 0x00000100,
514 0x9038, 0xffffffff, 0x00000100,
515 0x903c, 0xffffffff, 0x00000100,
516 0xad80, 0xffffffff, 0x00000100,
517 0xac54, 0xffffffff, 0x00000100,
518 0x897c, 0xffffffff, 0x06000100,
519 0x9868, 0xffffffff, 0x00000100,
520 0x9510, 0xffffffff, 0x00000100,
521 0xaf04, 0xffffffff, 0x00000100,
522 0xae04, 0xffffffff, 0x00000100,
523 0x949c, 0xffffffff, 0x00000100,
524 0x802c, 0xffffffff, 0xe0000000,
525 0x9160, 0xffffffff, 0x00010000,
526 0x9164, 0xffffffff, 0x00030002,
527 0x9168, 0xffffffff, 0x00040007,
528 0x916c, 0xffffffff, 0x00060005,
529 0x9170, 0xffffffff, 0x00090008,
530 0x9174, 0xffffffff, 0x00020001,
531 0x9178, 0xffffffff, 0x00040003,
532 0x917c, 0xffffffff, 0x00000007,
533 0x9180, 0xffffffff, 0x00060005,
534 0x9184, 0xffffffff, 0x00090008,
535 0x9188, 0xffffffff, 0x00030002,
536 0x918c, 0xffffffff, 0x00050004,
537 0x9190, 0xffffffff, 0x00000008,
538 0x9194, 0xffffffff, 0x00070006,
539 0x9198, 0xffffffff, 0x000a0009,
540 0x919c, 0xffffffff, 0x00040003,
541 0x91a0, 0xffffffff, 0x00060005,
542 0x91a4, 0xffffffff, 0x00000009,
543 0x91a8, 0xffffffff, 0x00080007,
544 0x91ac, 0xffffffff, 0x000b000a,
545 0x91b0, 0xffffffff, 0x00050004,
546 0x91b4, 0xffffffff, 0x00070006,
547 0x91b8, 0xffffffff, 0x0008000b,
548 0x91bc, 0xffffffff, 0x000a0009,
549 0x91c0, 0xffffffff, 0x000d000c,
550 0x9200, 0xffffffff, 0x00090008,
551 0x9204, 0xffffffff, 0x000b000a,
552 0x9208, 0xffffffff, 0x000c000f,
553 0x920c, 0xffffffff, 0x000e000d,
554 0x9210, 0xffffffff, 0x00110010,
555 0x9214, 0xffffffff, 0x000a0009,
556 0x9218, 0xffffffff, 0x000c000b,
557 0x921c, 0xffffffff, 0x0000000f,
558 0x9220, 0xffffffff, 0x000e000d,
559 0x9224, 0xffffffff, 0x00110010,
560 0x9228, 0xffffffff, 0x000b000a,
561 0x922c, 0xffffffff, 0x000d000c,
562 0x9230, 0xffffffff, 0x00000010,
563 0x9234, 0xffffffff, 0x000f000e,
564 0x9238, 0xffffffff, 0x00120011,
565 0x923c, 0xffffffff, 0x000c000b,
566 0x9240, 0xffffffff, 0x000e000d,
567 0x9244, 0xffffffff, 0x00000011,
568 0x9248, 0xffffffff, 0x0010000f,
569 0x924c, 0xffffffff, 0x00130012,
570 0x9250, 0xffffffff, 0x000d000c,
571 0x9254, 0xffffffff, 0x000f000e,
572 0x9258, 0xffffffff, 0x00100013,
573 0x925c, 0xffffffff, 0x00120011,
574 0x9260, 0xffffffff, 0x00150014,
575 0x9150, 0xffffffff, 0x96940200,
576 0x8708, 0xffffffff, 0x00900100,
577 0xc478, 0xffffffff, 0x00000080,
578 0xc404, 0xffffffff, 0x0020003f,
579 0x30, 0xffffffff, 0x0000001c,
580 0x34, 0x000f0000, 0x000f0000,
581 0x160c, 0xffffffff, 0x00000100,
582 0x1024, 0xffffffff, 0x00000100,
583 0x102c, 0x00000101, 0x00000000,
584 0x20a8, 0xffffffff, 0x00000104,
585 0x264c, 0x000c0000, 0x000c0000,
586 0x2648, 0x000c0000, 0x000c0000,
587 0x55e4, 0xff000fff, 0x00000100,
588 0x55e8, 0x00000001, 0x00000001,
589 0x2f50, 0x00000001, 0x00000001,
590 0x30cc, 0xc0000fff, 0x00000104,
591 0xc1e4, 0x00000001, 0x00000001,
592 0xd0c0, 0xfffffff0, 0x00000100,
593 0xd8c0, 0xfffffff0, 0x00000100
594};
595
596static const u32 oland_mgcg_cgcg_init[] =
597{
598 0xc400, 0xffffffff, 0xfffffffc,
599 0x802c, 0xffffffff, 0xe0000000,
600 0x9a60, 0xffffffff, 0x00000100,
601 0x92a4, 0xffffffff, 0x00000100,
602 0xc164, 0xffffffff, 0x00000100,
603 0x9774, 0xffffffff, 0x00000100,
604 0x8984, 0xffffffff, 0x06000100,
605 0x8a18, 0xffffffff, 0x00000100,
606 0x92a0, 0xffffffff, 0x00000100,
607 0xc380, 0xffffffff, 0x00000100,
608 0x8b28, 0xffffffff, 0x00000100,
609 0x9144, 0xffffffff, 0x00000100,
610 0x8d88, 0xffffffff, 0x00000100,
611 0x8d8c, 0xffffffff, 0x00000100,
612 0x9030, 0xffffffff, 0x00000100,
613 0x9034, 0xffffffff, 0x00000100,
614 0x9038, 0xffffffff, 0x00000100,
615 0x903c, 0xffffffff, 0x00000100,
616 0xad80, 0xffffffff, 0x00000100,
617 0xac54, 0xffffffff, 0x00000100,
618 0x897c, 0xffffffff, 0x06000100,
619 0x9868, 0xffffffff, 0x00000100,
620 0x9510, 0xffffffff, 0x00000100,
621 0xaf04, 0xffffffff, 0x00000100,
622 0xae04, 0xffffffff, 0x00000100,
623 0x949c, 0xffffffff, 0x00000100,
624 0x802c, 0xffffffff, 0xe0000000,
625 0x9160, 0xffffffff, 0x00010000,
626 0x9164, 0xffffffff, 0x00030002,
627 0x9168, 0xffffffff, 0x00040007,
628 0x916c, 0xffffffff, 0x00060005,
629 0x9170, 0xffffffff, 0x00090008,
630 0x9174, 0xffffffff, 0x00020001,
631 0x9178, 0xffffffff, 0x00040003,
632 0x917c, 0xffffffff, 0x00000007,
633 0x9180, 0xffffffff, 0x00060005,
634 0x9184, 0xffffffff, 0x00090008,
635 0x9188, 0xffffffff, 0x00030002,
636 0x918c, 0xffffffff, 0x00050004,
637 0x9190, 0xffffffff, 0x00000008,
638 0x9194, 0xffffffff, 0x00070006,
639 0x9198, 0xffffffff, 0x000a0009,
640 0x919c, 0xffffffff, 0x00040003,
641 0x91a0, 0xffffffff, 0x00060005,
642 0x91a4, 0xffffffff, 0x00000009,
643 0x91a8, 0xffffffff, 0x00080007,
644 0x91ac, 0xffffffff, 0x000b000a,
645 0x91b0, 0xffffffff, 0x00050004,
646 0x91b4, 0xffffffff, 0x00070006,
647 0x91b8, 0xffffffff, 0x0008000b,
648 0x91bc, 0xffffffff, 0x000a0009,
649 0x91c0, 0xffffffff, 0x000d000c,
650 0x91c4, 0xffffffff, 0x00060005,
651 0x91c8, 0xffffffff, 0x00080007,
652 0x91cc, 0xffffffff, 0x0000000b,
653 0x91d0, 0xffffffff, 0x000a0009,
654 0x91d4, 0xffffffff, 0x000d000c,
655 0x9150, 0xffffffff, 0x96940200,
656 0x8708, 0xffffffff, 0x00900100,
657 0xc478, 0xffffffff, 0x00000080,
658 0xc404, 0xffffffff, 0x0020003f,
659 0x30, 0xffffffff, 0x0000001c,
660 0x34, 0x000f0000, 0x000f0000,
661 0x160c, 0xffffffff, 0x00000100,
662 0x1024, 0xffffffff, 0x00000100,
663 0x102c, 0x00000101, 0x00000000,
664 0x20a8, 0xffffffff, 0x00000104,
665 0x264c, 0x000c0000, 0x000c0000,
666 0x2648, 0x000c0000, 0x000c0000,
667 0x55e4, 0xff000fff, 0x00000100,
668 0x55e8, 0x00000001, 0x00000001,
669 0x2f50, 0x00000001, 0x00000001,
670 0x30cc, 0xc0000fff, 0x00000104,
671 0xc1e4, 0x00000001, 0x00000001,
672 0xd0c0, 0xfffffff0, 0x00000100,
673 0xd8c0, 0xfffffff0, 0x00000100
674};
675
676static u32 verde_pg_init[] =
677{
678 0x353c, 0xffffffff, 0x40000,
679 0x3538, 0xffffffff, 0x200010ff,
680 0x353c, 0xffffffff, 0x0,
681 0x353c, 0xffffffff, 0x0,
682 0x353c, 0xffffffff, 0x0,
683 0x353c, 0xffffffff, 0x0,
684 0x353c, 0xffffffff, 0x0,
685 0x353c, 0xffffffff, 0x7007,
686 0x3538, 0xffffffff, 0x300010ff,
687 0x353c, 0xffffffff, 0x0,
688 0x353c, 0xffffffff, 0x0,
689 0x353c, 0xffffffff, 0x0,
690 0x353c, 0xffffffff, 0x0,
691 0x353c, 0xffffffff, 0x0,
692 0x353c, 0xffffffff, 0x400000,
693 0x3538, 0xffffffff, 0x100010ff,
694 0x353c, 0xffffffff, 0x0,
695 0x353c, 0xffffffff, 0x0,
696 0x353c, 0xffffffff, 0x0,
697 0x353c, 0xffffffff, 0x0,
698 0x353c, 0xffffffff, 0x0,
699 0x353c, 0xffffffff, 0x120200,
700 0x3538, 0xffffffff, 0x500010ff,
701 0x353c, 0xffffffff, 0x0,
702 0x353c, 0xffffffff, 0x0,
703 0x353c, 0xffffffff, 0x0,
704 0x353c, 0xffffffff, 0x0,
705 0x353c, 0xffffffff, 0x0,
706 0x353c, 0xffffffff, 0x1e1e16,
707 0x3538, 0xffffffff, 0x600010ff,
708 0x353c, 0xffffffff, 0x0,
709 0x353c, 0xffffffff, 0x0,
710 0x353c, 0xffffffff, 0x0,
711 0x353c, 0xffffffff, 0x0,
712 0x353c, 0xffffffff, 0x0,
713 0x353c, 0xffffffff, 0x171f1e,
714 0x3538, 0xffffffff, 0x700010ff,
715 0x353c, 0xffffffff, 0x0,
716 0x353c, 0xffffffff, 0x0,
717 0x353c, 0xffffffff, 0x0,
718 0x353c, 0xffffffff, 0x0,
719 0x353c, 0xffffffff, 0x0,
720 0x353c, 0xffffffff, 0x0,
721 0x3538, 0xffffffff, 0x9ff,
722 0x3500, 0xffffffff, 0x0,
723 0x3504, 0xffffffff, 0x10000800,
724 0x3504, 0xffffffff, 0xf,
725 0x3504, 0xffffffff, 0xf,
726 0x3500, 0xffffffff, 0x4,
727 0x3504, 0xffffffff, 0x1000051e,
728 0x3504, 0xffffffff, 0xffff,
729 0x3504, 0xffffffff, 0xffff,
730 0x3500, 0xffffffff, 0x8,
731 0x3504, 0xffffffff, 0x80500,
732 0x3500, 0xffffffff, 0x12,
733 0x3504, 0xffffffff, 0x9050c,
734 0x3500, 0xffffffff, 0x1d,
735 0x3504, 0xffffffff, 0xb052c,
736 0x3500, 0xffffffff, 0x2a,
737 0x3504, 0xffffffff, 0x1053e,
738 0x3500, 0xffffffff, 0x2d,
739 0x3504, 0xffffffff, 0x10546,
740 0x3500, 0xffffffff, 0x30,
741 0x3504, 0xffffffff, 0xa054e,
742 0x3500, 0xffffffff, 0x3c,
743 0x3504, 0xffffffff, 0x1055f,
744 0x3500, 0xffffffff, 0x3f,
745 0x3504, 0xffffffff, 0x10567,
746 0x3500, 0xffffffff, 0x42,
747 0x3504, 0xffffffff, 0x1056f,
748 0x3500, 0xffffffff, 0x45,
749 0x3504, 0xffffffff, 0x10572,
750 0x3500, 0xffffffff, 0x48,
751 0x3504, 0xffffffff, 0x20575,
752 0x3500, 0xffffffff, 0x4c,
753 0x3504, 0xffffffff, 0x190801,
754 0x3500, 0xffffffff, 0x67,
755 0x3504, 0xffffffff, 0x1082a,
756 0x3500, 0xffffffff, 0x6a,
757 0x3504, 0xffffffff, 0x1b082d,
758 0x3500, 0xffffffff, 0x87,
759 0x3504, 0xffffffff, 0x310851,
760 0x3500, 0xffffffff, 0xba,
761 0x3504, 0xffffffff, 0x891,
762 0x3500, 0xffffffff, 0xbc,
763 0x3504, 0xffffffff, 0x893,
764 0x3500, 0xffffffff, 0xbe,
765 0x3504, 0xffffffff, 0x20895,
766 0x3500, 0xffffffff, 0xc2,
767 0x3504, 0xffffffff, 0x20899,
768 0x3500, 0xffffffff, 0xc6,
769 0x3504, 0xffffffff, 0x2089d,
770 0x3500, 0xffffffff, 0xca,
771 0x3504, 0xffffffff, 0x8a1,
772 0x3500, 0xffffffff, 0xcc,
773 0x3504, 0xffffffff, 0x8a3,
774 0x3500, 0xffffffff, 0xce,
775 0x3504, 0xffffffff, 0x308a5,
776 0x3500, 0xffffffff, 0xd3,
777 0x3504, 0xffffffff, 0x6d08cd,
778 0x3500, 0xffffffff, 0x142,
779 0x3504, 0xffffffff, 0x2000095a,
780 0x3504, 0xffffffff, 0x1,
781 0x3500, 0xffffffff, 0x144,
782 0x3504, 0xffffffff, 0x301f095b,
783 0x3500, 0xffffffff, 0x165,
784 0x3504, 0xffffffff, 0xc094d,
785 0x3500, 0xffffffff, 0x173,
786 0x3504, 0xffffffff, 0xf096d,
787 0x3500, 0xffffffff, 0x184,
788 0x3504, 0xffffffff, 0x15097f,
789 0x3500, 0xffffffff, 0x19b,
790 0x3504, 0xffffffff, 0xc0998,
791 0x3500, 0xffffffff, 0x1a9,
792 0x3504, 0xffffffff, 0x409a7,
793 0x3500, 0xffffffff, 0x1af,
794 0x3504, 0xffffffff, 0xcdc,
795 0x3500, 0xffffffff, 0x1b1,
796 0x3504, 0xffffffff, 0x800,
797 0x3508, 0xffffffff, 0x6c9b2000,
798 0x3510, 0xfc00, 0x2000,
799 0x3544, 0xffffffff, 0xfc0,
800 0x28d4, 0x00000100, 0x100
801};
802
803static void si_init_golden_registers(struct radeon_device *rdev)
804{
805 switch (rdev->family) {
806 case CHIP_TAHITI:
807 radeon_program_register_sequence(rdev,
808 tahiti_golden_registers,
809 (const u32)ARRAY_SIZE(tahiti_golden_registers));
810 radeon_program_register_sequence(rdev,
811 tahiti_golden_rlc_registers,
812 (const u32)ARRAY_SIZE(tahiti_golden_rlc_registers));
813 radeon_program_register_sequence(rdev,
814 tahiti_mgcg_cgcg_init,
815 (const u32)ARRAY_SIZE(tahiti_mgcg_cgcg_init));
816 radeon_program_register_sequence(rdev,
817 tahiti_golden_registers2,
818 (const u32)ARRAY_SIZE(tahiti_golden_registers2));
819 break;
820 case CHIP_PITCAIRN:
821 radeon_program_register_sequence(rdev,
822 pitcairn_golden_registers,
823 (const u32)ARRAY_SIZE(pitcairn_golden_registers));
824 radeon_program_register_sequence(rdev,
825 pitcairn_golden_rlc_registers,
826 (const u32)ARRAY_SIZE(pitcairn_golden_rlc_registers));
827 radeon_program_register_sequence(rdev,
828 pitcairn_mgcg_cgcg_init,
829 (const u32)ARRAY_SIZE(pitcairn_mgcg_cgcg_init));
830 break;
831 case CHIP_VERDE:
832 radeon_program_register_sequence(rdev,
833 verde_golden_registers,
834 (const u32)ARRAY_SIZE(verde_golden_registers));
835 radeon_program_register_sequence(rdev,
836 verde_golden_rlc_registers,
837 (const u32)ARRAY_SIZE(verde_golden_rlc_registers));
838 radeon_program_register_sequence(rdev,
839 verde_mgcg_cgcg_init,
840 (const u32)ARRAY_SIZE(verde_mgcg_cgcg_init));
841 radeon_program_register_sequence(rdev,
842 verde_pg_init,
843 (const u32)ARRAY_SIZE(verde_pg_init));
844 break;
845 case CHIP_OLAND:
846 radeon_program_register_sequence(rdev,
847 oland_golden_registers,
848 (const u32)ARRAY_SIZE(oland_golden_registers));
849 radeon_program_register_sequence(rdev,
850 oland_golden_rlc_registers,
851 (const u32)ARRAY_SIZE(oland_golden_rlc_registers));
852 radeon_program_register_sequence(rdev,
853 oland_mgcg_cgcg_init,
854 (const u32)ARRAY_SIZE(oland_mgcg_cgcg_init));
855 break;
856 default:
857 break;
858 }
859}
860
73#define PCIE_BUS_CLK 10000 861#define PCIE_BUS_CLK 10000
74#define TCLK (PCIE_BUS_CLK / 10) 862#define TCLK (PCIE_BUS_CLK / 10)
75 863
@@ -1211,6 +1999,7 @@ static void si_tiling_mode_table_init(struct radeon_device *rdev)
1211 gb_tile_moden = 0; 1999 gb_tile_moden = 0;
1212 break; 2000 break;
1213 } 2001 }
2002 rdev->config.si.tile_mode_array[reg_offset] = gb_tile_moden;
1214 WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden); 2003 WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
1215 } 2004 }
1216 } else if ((rdev->family == CHIP_VERDE) || 2005 } else if ((rdev->family == CHIP_VERDE) ||
@@ -1451,6 +2240,7 @@ static void si_tiling_mode_table_init(struct radeon_device *rdev)
1451 gb_tile_moden = 0; 2240 gb_tile_moden = 0;
1452 break; 2241 break;
1453 } 2242 }
2243 rdev->config.si.tile_mode_array[reg_offset] = gb_tile_moden;
1454 WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden); 2244 WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
1455 } 2245 }
1456 } else 2246 } else
@@ -1463,7 +2253,7 @@ static void si_select_se_sh(struct radeon_device *rdev,
1463 u32 data = INSTANCE_BROADCAST_WRITES; 2253 u32 data = INSTANCE_BROADCAST_WRITES;
1464 2254
1465 if ((se_num == 0xffffffff) && (sh_num == 0xffffffff)) 2255 if ((se_num == 0xffffffff) && (sh_num == 0xffffffff))
1466 data = SH_BROADCAST_WRITES | SE_BROADCAST_WRITES; 2256 data |= SH_BROADCAST_WRITES | SE_BROADCAST_WRITES;
1467 else if (se_num == 0xffffffff) 2257 else if (se_num == 0xffffffff)
1468 data |= SE_BROADCAST_WRITES | SH_INDEX(sh_num); 2258 data |= SE_BROADCAST_WRITES | SH_INDEX(sh_num);
1469 else if (sh_num == 0xffffffff) 2259 else if (sh_num == 0xffffffff)
@@ -1765,9 +2555,13 @@ static void si_gpu_init(struct radeon_device *rdev)
1765 2555
1766 WREG32(GB_ADDR_CONFIG, gb_addr_config); 2556 WREG32(GB_ADDR_CONFIG, gb_addr_config);
1767 WREG32(DMIF_ADDR_CONFIG, gb_addr_config); 2557 WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
2558 WREG32(DMIF_ADDR_CALC, gb_addr_config);
1768 WREG32(HDP_ADDR_CONFIG, gb_addr_config); 2559 WREG32(HDP_ADDR_CONFIG, gb_addr_config);
1769 WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config); 2560 WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config);
1770 WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config); 2561 WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config);
2562 WREG32(UVD_UDEC_ADDR_CONFIG, gb_addr_config);
2563 WREG32(UVD_UDEC_DB_ADDR_CONFIG, gb_addr_config);
2564 WREG32(UVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config);
1771 2565
1772 si_tiling_mode_table_init(rdev); 2566 si_tiling_mode_table_init(rdev);
1773 2567
@@ -2538,46 +3332,6 @@ static void si_mc_program(struct radeon_device *rdev)
2538 rv515_vga_render_disable(rdev); 3332 rv515_vga_render_disable(rdev);
2539} 3333}
2540 3334
2541/* SI MC address space is 40 bits */
2542static void si_vram_location(struct radeon_device *rdev,
2543 struct radeon_mc *mc, u64 base)
2544{
2545 mc->vram_start = base;
2546 if (mc->mc_vram_size > (0xFFFFFFFFFFULL - base + 1)) {
2547 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
2548 mc->real_vram_size = mc->aper_size;
2549 mc->mc_vram_size = mc->aper_size;
2550 }
2551 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
2552 dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
2553 mc->mc_vram_size >> 20, mc->vram_start,
2554 mc->vram_end, mc->real_vram_size >> 20);
2555}
2556
2557static void si_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
2558{
2559 u64 size_af, size_bf;
2560
2561 size_af = ((0xFFFFFFFFFFULL - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
2562 size_bf = mc->vram_start & ~mc->gtt_base_align;
2563 if (size_bf > size_af) {
2564 if (mc->gtt_size > size_bf) {
2565 dev_warn(rdev->dev, "limiting GTT\n");
2566 mc->gtt_size = size_bf;
2567 }
2568 mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size;
2569 } else {
2570 if (mc->gtt_size > size_af) {
2571 dev_warn(rdev->dev, "limiting GTT\n");
2572 mc->gtt_size = size_af;
2573 }
2574 mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
2575 }
2576 mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
2577 dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
2578 mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
2579}
2580
2581static void si_vram_gtt_location(struct radeon_device *rdev, 3335static void si_vram_gtt_location(struct radeon_device *rdev,
2582 struct radeon_mc *mc) 3336 struct radeon_mc *mc)
2583{ 3337{
@@ -2587,9 +3341,9 @@ static void si_vram_gtt_location(struct radeon_device *rdev,
2587 mc->real_vram_size = 0xFFC0000000ULL; 3341 mc->real_vram_size = 0xFFC0000000ULL;
2588 mc->mc_vram_size = 0xFFC0000000ULL; 3342 mc->mc_vram_size = 0xFFC0000000ULL;
2589 } 3343 }
2590 si_vram_location(rdev, &rdev->mc, 0); 3344 radeon_vram_location(rdev, &rdev->mc, 0);
2591 rdev->mc.gtt_base_align = 0; 3345 rdev->mc.gtt_base_align = 0;
2592 si_gtt_location(rdev, mc); 3346 radeon_gtt_location(rdev, mc);
2593} 3347}
2594 3348
2595static int si_mc_init(struct radeon_device *rdev) 3349static int si_mc_init(struct radeon_device *rdev)
@@ -4322,14 +5076,6 @@ static int si_startup(struct radeon_device *rdev)
4322 return r; 5076 return r;
4323 si_gpu_init(rdev); 5077 si_gpu_init(rdev);
4324 5078
4325#if 0
4326 r = evergreen_blit_init(rdev);
4327 if (r) {
4328 r600_blit_fini(rdev);
4329 rdev->asic->copy = NULL;
4330 dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
4331 }
4332#endif
4333 /* allocate rlc buffers */ 5079 /* allocate rlc buffers */
4334 r = si_rlc_init(rdev); 5080 r = si_rlc_init(rdev);
4335 if (r) { 5081 if (r) {
@@ -4372,6 +5118,16 @@ static int si_startup(struct radeon_device *rdev)
4372 return r; 5118 return r;
4373 } 5119 }
4374 5120
5121 r = rv770_uvd_resume(rdev);
5122 if (!r) {
5123 r = radeon_fence_driver_start_ring(rdev,
5124 R600_RING_TYPE_UVD_INDEX);
5125 if (r)
5126 dev_err(rdev->dev, "UVD fences init error (%d).\n", r);
5127 }
5128 if (r)
5129 rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
5130
4375 /* Enable IRQ */ 5131 /* Enable IRQ */
4376 r = si_irq_init(rdev); 5132 r = si_irq_init(rdev);
4377 if (r) { 5133 if (r) {
@@ -4429,6 +5185,18 @@ static int si_startup(struct radeon_device *rdev)
4429 if (r) 5185 if (r)
4430 return r; 5186 return r;
4431 5187
5188 ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
5189 if (ring->ring_size) {
5190 r = radeon_ring_init(rdev, ring, ring->ring_size,
5191 R600_WB_UVD_RPTR_OFFSET,
5192 UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR,
5193 0, 0xfffff, RADEON_CP_PACKET2);
5194 if (!r)
5195 r = r600_uvd_init(rdev);
5196 if (r)
5197 DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
5198 }
5199
4432 r = radeon_ib_pool_init(rdev); 5200 r = radeon_ib_pool_init(rdev);
4433 if (r) { 5201 if (r) {
4434 dev_err(rdev->dev, "IB initialization failed (%d).\n", r); 5202 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
@@ -4455,6 +5223,9 @@ int si_resume(struct radeon_device *rdev)
4455 /* post card */ 5223 /* post card */
4456 atom_asic_init(rdev->mode_info.atom_context); 5224 atom_asic_init(rdev->mode_info.atom_context);
4457 5225
5226 /* init golden registers */
5227 si_init_golden_registers(rdev);
5228
4458 rdev->accel_working = true; 5229 rdev->accel_working = true;
4459 r = si_startup(rdev); 5230 r = si_startup(rdev);
4460 if (r) { 5231 if (r) {
@@ -4472,6 +5243,8 @@ int si_suspend(struct radeon_device *rdev)
4472 radeon_vm_manager_fini(rdev); 5243 radeon_vm_manager_fini(rdev);
4473 si_cp_enable(rdev, false); 5244 si_cp_enable(rdev, false);
4474 cayman_dma_stop(rdev); 5245 cayman_dma_stop(rdev);
5246 r600_uvd_rbc_stop(rdev);
5247 radeon_uvd_suspend(rdev);
4475 si_irq_suspend(rdev); 5248 si_irq_suspend(rdev);
4476 radeon_wb_disable(rdev); 5249 radeon_wb_disable(rdev);
4477 si_pcie_gart_disable(rdev); 5250 si_pcie_gart_disable(rdev);
@@ -4512,6 +5285,8 @@ int si_init(struct radeon_device *rdev)
4512 DRM_INFO("GPU not posted. posting now...\n"); 5285 DRM_INFO("GPU not posted. posting now...\n");
4513 atom_asic_init(rdev->mode_info.atom_context); 5286 atom_asic_init(rdev->mode_info.atom_context);
4514 } 5287 }
5288 /* init golden registers */
5289 si_init_golden_registers(rdev);
4515 /* Initialize scratch registers */ 5290 /* Initialize scratch registers */
4516 si_scratch_init(rdev); 5291 si_scratch_init(rdev);
4517 /* Initialize surface registers */ 5292 /* Initialize surface registers */
@@ -4557,6 +5332,13 @@ int si_init(struct radeon_device *rdev)
4557 ring->ring_obj = NULL; 5332 ring->ring_obj = NULL;
4558 r600_ring_init(rdev, ring, 64 * 1024); 5333 r600_ring_init(rdev, ring, 64 * 1024);
4559 5334
5335 r = radeon_uvd_init(rdev);
5336 if (!r) {
5337 ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
5338 ring->ring_obj = NULL;
5339 r600_ring_init(rdev, ring, 4096);
5340 }
5341
4560 rdev->ih.ring_obj = NULL; 5342 rdev->ih.ring_obj = NULL;
4561 r600_ih_ring_init(rdev, 64 * 1024); 5343 r600_ih_ring_init(rdev, 64 * 1024);
4562 5344
@@ -4594,9 +5376,6 @@ int si_init(struct radeon_device *rdev)
4594 5376
4595void si_fini(struct radeon_device *rdev) 5377void si_fini(struct radeon_device *rdev)
4596{ 5378{
4597#if 0
4598 r600_blit_fini(rdev);
4599#endif
4600 si_cp_fini(rdev); 5379 si_cp_fini(rdev);
4601 cayman_dma_fini(rdev); 5380 cayman_dma_fini(rdev);
4602 si_irq_fini(rdev); 5381 si_irq_fini(rdev);
@@ -4605,6 +5384,7 @@ void si_fini(struct radeon_device *rdev)
4605 radeon_vm_manager_fini(rdev); 5384 radeon_vm_manager_fini(rdev);
4606 radeon_ib_pool_fini(rdev); 5385 radeon_ib_pool_fini(rdev);
4607 radeon_irq_kms_fini(rdev); 5386 radeon_irq_kms_fini(rdev);
5387 radeon_uvd_fini(rdev);
4608 si_pcie_gart_fini(rdev); 5388 si_pcie_gart_fini(rdev);
4609 r600_vram_scratch_fini(rdev); 5389 r600_vram_scratch_fini(rdev);
4610 radeon_gem_fini(rdev); 5390 radeon_gem_fini(rdev);
@@ -4634,3 +5414,94 @@ uint64_t si_get_gpu_clock_counter(struct radeon_device *rdev)
4634 mutex_unlock(&rdev->gpu_clock_mutex); 5414 mutex_unlock(&rdev->gpu_clock_mutex);
4635 return clock; 5415 return clock;
4636} 5416}
5417
5418int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
5419{
5420 unsigned fb_div = 0, vclk_div = 0, dclk_div = 0;
5421 int r;
5422
5423 /* bypass vclk and dclk with bclk */
5424 WREG32_P(CG_UPLL_FUNC_CNTL_2,
5425 VCLK_SRC_SEL(1) | DCLK_SRC_SEL(1),
5426 ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
5427
5428 /* put PLL in bypass mode */
5429 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~UPLL_BYPASS_EN_MASK);
5430
5431 if (!vclk || !dclk) {
5432 /* keep the Bypass mode, put PLL to sleep */
5433 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK);
5434 return 0;
5435 }
5436
5437 r = radeon_uvd_calc_upll_dividers(rdev, vclk, dclk, 125000, 250000,
5438 16384, 0x03FFFFFF, 0, 128, 5,
5439 &fb_div, &vclk_div, &dclk_div);
5440 if (r)
5441 return r;
5442
5443 /* set RESET_ANTI_MUX to 0 */
5444 WREG32_P(CG_UPLL_FUNC_CNTL_5, 0, ~RESET_ANTI_MUX_MASK);
5445
5446 /* set VCO_MODE to 1 */
5447 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_VCO_MODE_MASK, ~UPLL_VCO_MODE_MASK);
5448
5449 /* toggle UPLL_SLEEP to 1 then back to 0 */
5450 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK);
5451 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_SLEEP_MASK);
5452
5453 /* deassert UPLL_RESET */
5454 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK);
5455
5456 mdelay(1);
5457
5458 r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
5459 if (r)
5460 return r;
5461
5462 /* assert UPLL_RESET again */
5463 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_RESET_MASK, ~UPLL_RESET_MASK);
5464
5465 /* disable spread spectrum. */
5466 WREG32_P(CG_UPLL_SPREAD_SPECTRUM, 0, ~SSEN_MASK);
5467
5468 /* set feedback divider */
5469 WREG32_P(CG_UPLL_FUNC_CNTL_3, UPLL_FB_DIV(fb_div), ~UPLL_FB_DIV_MASK);
5470
5471 /* set ref divider to 0 */
5472 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_REF_DIV_MASK);
5473
5474 if (fb_div < 307200)
5475 WREG32_P(CG_UPLL_FUNC_CNTL_4, 0, ~UPLL_SPARE_ISPARE9);
5476 else
5477 WREG32_P(CG_UPLL_FUNC_CNTL_4, UPLL_SPARE_ISPARE9, ~UPLL_SPARE_ISPARE9);
5478
5479 /* set PDIV_A and PDIV_B */
5480 WREG32_P(CG_UPLL_FUNC_CNTL_2,
5481 UPLL_PDIV_A(vclk_div) | UPLL_PDIV_B(dclk_div),
5482 ~(UPLL_PDIV_A_MASK | UPLL_PDIV_B_MASK));
5483
5484 /* give the PLL some time to settle */
5485 mdelay(15);
5486
5487 /* deassert PLL_RESET */
5488 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK);
5489
5490 mdelay(15);
5491
5492 /* switch from bypass mode to normal mode */
5493 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_BYPASS_EN_MASK);
5494
5495 r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
5496 if (r)
5497 return r;
5498
5499 /* switch VCLK and DCLK selection */
5500 WREG32_P(CG_UPLL_FUNC_CNTL_2,
5501 VCLK_SRC_SEL(2) | DCLK_SRC_SEL(2),
5502 ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
5503
5504 mdelay(100);
5505
5506 return 0;
5507}
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index 23fc08fc8e7f..222877ba6cf5 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -29,6 +29,35 @@
29#define TAHITI_GB_ADDR_CONFIG_GOLDEN 0x12011003 29#define TAHITI_GB_ADDR_CONFIG_GOLDEN 0x12011003
30#define VERDE_GB_ADDR_CONFIG_GOLDEN 0x12010002 30#define VERDE_GB_ADDR_CONFIG_GOLDEN 0x12010002
31 31
32/* discrete uvd clocks */
33#define CG_UPLL_FUNC_CNTL 0x634
34# define UPLL_RESET_MASK 0x00000001
35# define UPLL_SLEEP_MASK 0x00000002
36# define UPLL_BYPASS_EN_MASK 0x00000004
37# define UPLL_CTLREQ_MASK 0x00000008
38# define UPLL_VCO_MODE_MASK 0x00000600
39# define UPLL_REF_DIV_MASK 0x003F0000
40# define UPLL_CTLACK_MASK 0x40000000
41# define UPLL_CTLACK2_MASK 0x80000000
42#define CG_UPLL_FUNC_CNTL_2 0x638
43# define UPLL_PDIV_A(x) ((x) << 0)
44# define UPLL_PDIV_A_MASK 0x0000007F
45# define UPLL_PDIV_B(x) ((x) << 8)
46# define UPLL_PDIV_B_MASK 0x00007F00
47# define VCLK_SRC_SEL(x) ((x) << 20)
48# define VCLK_SRC_SEL_MASK 0x01F00000
49# define DCLK_SRC_SEL(x) ((x) << 25)
50# define DCLK_SRC_SEL_MASK 0x3E000000
51#define CG_UPLL_FUNC_CNTL_3 0x63C
52# define UPLL_FB_DIV(x) ((x) << 0)
53# define UPLL_FB_DIV_MASK 0x01FFFFFF
54#define CG_UPLL_FUNC_CNTL_4 0x644
55# define UPLL_SPARE_ISPARE9 0x00020000
56#define CG_UPLL_FUNC_CNTL_5 0x648
57# define RESET_ANTI_MUX_MASK 0x00000200
58#define CG_UPLL_SPREAD_SPECTRUM 0x650
59# define SSEN_MASK 0x00000001
60
32#define CG_MULT_THERMAL_STATUS 0x714 61#define CG_MULT_THERMAL_STATUS 0x714
33#define ASIC_MAX_TEMP(x) ((x) << 0) 62#define ASIC_MAX_TEMP(x) ((x) << 0)
34#define ASIC_MAX_TEMP_MASK 0x000001ff 63#define ASIC_MAX_TEMP_MASK 0x000001ff
@@ -65,6 +94,8 @@
65 94
66#define DMIF_ADDR_CONFIG 0xBD4 95#define DMIF_ADDR_CONFIG 0xBD4
67 96
97#define DMIF_ADDR_CALC 0xC00
98
68#define SRBM_STATUS 0xE50 99#define SRBM_STATUS 0xE50
69#define GRBM_RQ_PENDING (1 << 5) 100#define GRBM_RQ_PENDING (1 << 5)
70#define VMC_BUSY (1 << 8) 101#define VMC_BUSY (1 << 8)
@@ -798,6 +829,15 @@
798# define THREAD_TRACE_FINISH (55 << 0) 829# define THREAD_TRACE_FINISH (55 << 0)
799 830
800/* 831/*
832 * UVD
833 */
834#define UVD_UDEC_ADDR_CONFIG 0xEF4C
835#define UVD_UDEC_DB_ADDR_CONFIG 0xEF50
836#define UVD_UDEC_DBW_ADDR_CONFIG 0xEF54
837#define UVD_RBC_RB_RPTR 0xF690
838#define UVD_RBC_RB_WPTR 0xF694
839
840/*
801 * PM4 841 * PM4
802 */ 842 */
803#define PACKET0(reg, n) ((RADEON_PACKET_TYPE0 << 30) | \ 843#define PACKET0(reg, n) ((RADEON_PACKET_TYPE0 << 30) | \
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
index d917a411ca85..7dff49ed66e7 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
@@ -494,10 +494,10 @@ static int shmob_drm_crtc_page_flip(struct drm_crtc *crtc,
494 494
495 if (event) { 495 if (event) {
496 event->pipe = 0; 496 event->pipe = 0;
497 drm_vblank_get(dev, 0);
497 spin_lock_irqsave(&dev->event_lock, flags); 498 spin_lock_irqsave(&dev->event_lock, flags);
498 scrtc->event = event; 499 scrtc->event = event;
499 spin_unlock_irqrestore(&dev->event_lock, flags); 500 spin_unlock_irqrestore(&dev->event_lock, flags);
500 drm_vblank_get(dev, 0);
501 } 501 }
502 502
503 return 0; 503 return 0;
diff --git a/drivers/gpu/drm/tegra/Makefile b/drivers/gpu/drm/tegra/Makefile
deleted file mode 100644
index 80f73d1315d0..000000000000
--- a/drivers/gpu/drm/tegra/Makefile
+++ /dev/null
@@ -1,7 +0,0 @@
1ccflags-y := -Iinclude/drm
2ccflags-$(CONFIG_DRM_TEGRA_DEBUG) += -DDEBUG
3
4tegra-drm-y := drm.o fb.o dc.o host1x.o
5tegra-drm-y += output.o rgb.o hdmi.o
6
7obj-$(CONFIG_DRM_TEGRA) += tegra-drm.o
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
deleted file mode 100644
index 9d452df5bcad..000000000000
--- a/drivers/gpu/drm/tegra/drm.c
+++ /dev/null
@@ -1,217 +0,0 @@
1/*
2 * Copyright (C) 2012 Avionic Design GmbH
3 * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#include <linux/module.h>
11#include <linux/of_address.h>
12#include <linux/of_platform.h>
13
14#include <linux/dma-mapping.h>
15#include <asm/dma-iommu.h>
16
17#include "drm.h"
18
19#define DRIVER_NAME "tegra"
20#define DRIVER_DESC "NVIDIA Tegra graphics"
21#define DRIVER_DATE "20120330"
22#define DRIVER_MAJOR 0
23#define DRIVER_MINOR 0
24#define DRIVER_PATCHLEVEL 0
25
26static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
27{
28 struct device *dev = drm->dev;
29 struct host1x *host1x;
30 int err;
31
32 host1x = dev_get_drvdata(dev);
33 drm->dev_private = host1x;
34 host1x->drm = drm;
35
36 drm_mode_config_init(drm);
37
38 err = host1x_drm_init(host1x, drm);
39 if (err < 0)
40 return err;
41
42 err = drm_vblank_init(drm, drm->mode_config.num_crtc);
43 if (err < 0)
44 return err;
45
46 err = tegra_drm_fb_init(drm);
47 if (err < 0)
48 return err;
49
50 drm_kms_helper_poll_init(drm);
51
52 return 0;
53}
54
55static int tegra_drm_unload(struct drm_device *drm)
56{
57 drm_kms_helper_poll_fini(drm);
58 tegra_drm_fb_exit(drm);
59
60 drm_mode_config_cleanup(drm);
61
62 return 0;
63}
64
65static int tegra_drm_open(struct drm_device *drm, struct drm_file *filp)
66{
67 return 0;
68}
69
70static void tegra_drm_lastclose(struct drm_device *drm)
71{
72 struct host1x *host1x = drm->dev_private;
73
74 drm_fbdev_cma_restore_mode(host1x->fbdev);
75}
76
77static struct drm_ioctl_desc tegra_drm_ioctls[] = {
78};
79
80static const struct file_operations tegra_drm_fops = {
81 .owner = THIS_MODULE,
82 .open = drm_open,
83 .release = drm_release,
84 .unlocked_ioctl = drm_ioctl,
85 .mmap = drm_gem_cma_mmap,
86 .poll = drm_poll,
87 .fasync = drm_fasync,
88 .read = drm_read,
89#ifdef CONFIG_COMPAT
90 .compat_ioctl = drm_compat_ioctl,
91#endif
92 .llseek = noop_llseek,
93};
94
95static struct drm_crtc *tegra_crtc_from_pipe(struct drm_device *drm, int pipe)
96{
97 struct drm_crtc *crtc;
98
99 list_for_each_entry(crtc, &drm->mode_config.crtc_list, head) {
100 struct tegra_dc *dc = to_tegra_dc(crtc);
101
102 if (dc->pipe == pipe)
103 return crtc;
104 }
105
106 return NULL;
107}
108
109static u32 tegra_drm_get_vblank_counter(struct drm_device *dev, int crtc)
110{
111 /* TODO: implement real hardware counter using syncpoints */
112 return drm_vblank_count(dev, crtc);
113}
114
115static int tegra_drm_enable_vblank(struct drm_device *drm, int pipe)
116{
117 struct drm_crtc *crtc = tegra_crtc_from_pipe(drm, pipe);
118 struct tegra_dc *dc = to_tegra_dc(crtc);
119
120 if (!crtc)
121 return -ENODEV;
122
123 tegra_dc_enable_vblank(dc);
124
125 return 0;
126}
127
128static void tegra_drm_disable_vblank(struct drm_device *drm, int pipe)
129{
130 struct drm_crtc *crtc = tegra_crtc_from_pipe(drm, pipe);
131 struct tegra_dc *dc = to_tegra_dc(crtc);
132
133 if (crtc)
134 tegra_dc_disable_vblank(dc);
135}
136
137static void tegra_drm_preclose(struct drm_device *drm, struct drm_file *file)
138{
139 struct drm_crtc *crtc;
140
141 list_for_each_entry(crtc, &drm->mode_config.crtc_list, head)
142 tegra_dc_cancel_page_flip(crtc, file);
143}
144
145#ifdef CONFIG_DEBUG_FS
146static int tegra_debugfs_framebuffers(struct seq_file *s, void *data)
147{
148 struct drm_info_node *node = (struct drm_info_node *)s->private;
149 struct drm_device *drm = node->minor->dev;
150 struct drm_framebuffer *fb;
151
152 mutex_lock(&drm->mode_config.fb_lock);
153
154 list_for_each_entry(fb, &drm->mode_config.fb_list, head) {
155 seq_printf(s, "%3d: user size: %d x %d, depth %d, %d bpp, refcount %d\n",
156 fb->base.id, fb->width, fb->height, fb->depth,
157 fb->bits_per_pixel,
158 atomic_read(&fb->refcount.refcount));
159 }
160
161 mutex_unlock(&drm->mode_config.fb_lock);
162
163 return 0;
164}
165
166static struct drm_info_list tegra_debugfs_list[] = {
167 { "framebuffers", tegra_debugfs_framebuffers, 0 },
168};
169
170static int tegra_debugfs_init(struct drm_minor *minor)
171{
172 return drm_debugfs_create_files(tegra_debugfs_list,
173 ARRAY_SIZE(tegra_debugfs_list),
174 minor->debugfs_root, minor);
175}
176
177static void tegra_debugfs_cleanup(struct drm_minor *minor)
178{
179 drm_debugfs_remove_files(tegra_debugfs_list,
180 ARRAY_SIZE(tegra_debugfs_list), minor);
181}
182#endif
183
184struct drm_driver tegra_drm_driver = {
185 .driver_features = DRIVER_BUS_PLATFORM | DRIVER_MODESET | DRIVER_GEM,
186 .load = tegra_drm_load,
187 .unload = tegra_drm_unload,
188 .open = tegra_drm_open,
189 .preclose = tegra_drm_preclose,
190 .lastclose = tegra_drm_lastclose,
191
192 .get_vblank_counter = tegra_drm_get_vblank_counter,
193 .enable_vblank = tegra_drm_enable_vblank,
194 .disable_vblank = tegra_drm_disable_vblank,
195
196#if defined(CONFIG_DEBUG_FS)
197 .debugfs_init = tegra_debugfs_init,
198 .debugfs_cleanup = tegra_debugfs_cleanup,
199#endif
200
201 .gem_free_object = drm_gem_cma_free_object,
202 .gem_vm_ops = &drm_gem_cma_vm_ops,
203 .dumb_create = drm_gem_cma_dumb_create,
204 .dumb_map_offset = drm_gem_cma_dumb_map_offset,
205 .dumb_destroy = drm_gem_cma_dumb_destroy,
206
207 .ioctls = tegra_drm_ioctls,
208 .num_ioctls = ARRAY_SIZE(tegra_drm_ioctls),
209 .fops = &tegra_drm_fops,
210
211 .name = DRIVER_NAME,
212 .desc = DRIVER_DESC,
213 .date = DRIVER_DATE,
214 .major = DRIVER_MAJOR,
215 .minor = DRIVER_MINOR,
216 .patchlevel = DRIVER_PATCHLEVEL,
217};
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
deleted file mode 100644
index 03914953cb1c..000000000000
--- a/drivers/gpu/drm/tegra/fb.c
+++ /dev/null
@@ -1,52 +0,0 @@
1/*
2 * Copyright (C) 2012 Avionic Design GmbH
3 * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#include "drm.h"
11
12static void tegra_drm_fb_output_poll_changed(struct drm_device *drm)
13{
14 struct host1x *host1x = drm->dev_private;
15
16 drm_fbdev_cma_hotplug_event(host1x->fbdev);
17}
18
19static const struct drm_mode_config_funcs tegra_drm_mode_funcs = {
20 .fb_create = drm_fb_cma_create,
21 .output_poll_changed = tegra_drm_fb_output_poll_changed,
22};
23
24int tegra_drm_fb_init(struct drm_device *drm)
25{
26 struct host1x *host1x = drm->dev_private;
27 struct drm_fbdev_cma *fbdev;
28
29 drm->mode_config.min_width = 0;
30 drm->mode_config.min_height = 0;
31
32 drm->mode_config.max_width = 4096;
33 drm->mode_config.max_height = 4096;
34
35 drm->mode_config.funcs = &tegra_drm_mode_funcs;
36
37 fbdev = drm_fbdev_cma_init(drm, 32, drm->mode_config.num_crtc,
38 drm->mode_config.num_connector);
39 if (IS_ERR(fbdev))
40 return PTR_ERR(fbdev);
41
42 host1x->fbdev = fbdev;
43
44 return 0;
45}
46
47void tegra_drm_fb_exit(struct drm_device *drm)
48{
49 struct host1x *host1x = drm->dev_private;
50
51 drm_fbdev_cma_fini(host1x->fbdev);
52}
diff --git a/drivers/gpu/drm/tegra/host1x.c b/drivers/gpu/drm/tegra/host1x.c
deleted file mode 100644
index 92e25a7e00ea..000000000000
--- a/drivers/gpu/drm/tegra/host1x.c
+++ /dev/null
@@ -1,327 +0,0 @@
1/*
2 * Copyright (C) 2012 Avionic Design GmbH
3 * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#include <linux/clk.h>
11#include <linux/err.h>
12#include <linux/module.h>
13#include <linux/of.h>
14#include <linux/platform_device.h>
15
16#include "drm.h"
17
18struct host1x_drm_client {
19 struct host1x_client *client;
20 struct device_node *np;
21 struct list_head list;
22};
23
24static int host1x_add_drm_client(struct host1x *host1x, struct device_node *np)
25{
26 struct host1x_drm_client *client;
27
28 client = kzalloc(sizeof(*client), GFP_KERNEL);
29 if (!client)
30 return -ENOMEM;
31
32 INIT_LIST_HEAD(&client->list);
33 client->np = of_node_get(np);
34
35 list_add_tail(&client->list, &host1x->drm_clients);
36
37 return 0;
38}
39
40static int host1x_activate_drm_client(struct host1x *host1x,
41 struct host1x_drm_client *drm,
42 struct host1x_client *client)
43{
44 mutex_lock(&host1x->drm_clients_lock);
45 list_del_init(&drm->list);
46 list_add_tail(&drm->list, &host1x->drm_active);
47 drm->client = client;
48 mutex_unlock(&host1x->drm_clients_lock);
49
50 return 0;
51}
52
53static int host1x_remove_drm_client(struct host1x *host1x,
54 struct host1x_drm_client *client)
55{
56 mutex_lock(&host1x->drm_clients_lock);
57 list_del_init(&client->list);
58 mutex_unlock(&host1x->drm_clients_lock);
59
60 of_node_put(client->np);
61 kfree(client);
62
63 return 0;
64}
65
66static int host1x_parse_dt(struct host1x *host1x)
67{
68 static const char * const compat[] = {
69 "nvidia,tegra20-dc",
70 "nvidia,tegra20-hdmi",
71 "nvidia,tegra30-dc",
72 "nvidia,tegra30-hdmi",
73 };
74 unsigned int i;
75 int err;
76
77 for (i = 0; i < ARRAY_SIZE(compat); i++) {
78 struct device_node *np;
79
80 for_each_child_of_node(host1x->dev->of_node, np) {
81 if (of_device_is_compatible(np, compat[i]) &&
82 of_device_is_available(np)) {
83 err = host1x_add_drm_client(host1x, np);
84 if (err < 0)
85 return err;
86 }
87 }
88 }
89
90 return 0;
91}
92
93static int tegra_host1x_probe(struct platform_device *pdev)
94{
95 struct host1x *host1x;
96 struct resource *regs;
97 int err;
98
99 host1x = devm_kzalloc(&pdev->dev, sizeof(*host1x), GFP_KERNEL);
100 if (!host1x)
101 return -ENOMEM;
102
103 mutex_init(&host1x->drm_clients_lock);
104 INIT_LIST_HEAD(&host1x->drm_clients);
105 INIT_LIST_HEAD(&host1x->drm_active);
106 mutex_init(&host1x->clients_lock);
107 INIT_LIST_HEAD(&host1x->clients);
108 host1x->dev = &pdev->dev;
109
110 err = host1x_parse_dt(host1x);
111 if (err < 0) {
112 dev_err(&pdev->dev, "failed to parse DT: %d\n", err);
113 return err;
114 }
115
116 host1x->clk = devm_clk_get(&pdev->dev, NULL);
117 if (IS_ERR(host1x->clk))
118 return PTR_ERR(host1x->clk);
119
120 err = clk_prepare_enable(host1x->clk);
121 if (err < 0)
122 return err;
123
124 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
125 if (!regs) {
126 err = -ENXIO;
127 goto err;
128 }
129
130 err = platform_get_irq(pdev, 0);
131 if (err < 0)
132 goto err;
133
134 host1x->syncpt = err;
135
136 err = platform_get_irq(pdev, 1);
137 if (err < 0)
138 goto err;
139
140 host1x->irq = err;
141
142 host1x->regs = devm_ioremap_resource(&pdev->dev, regs);
143 if (IS_ERR(host1x->regs)) {
144 err = PTR_ERR(host1x->regs);
145 goto err;
146 }
147
148 platform_set_drvdata(pdev, host1x);
149
150 return 0;
151
152err:
153 clk_disable_unprepare(host1x->clk);
154 return err;
155}
156
157static int tegra_host1x_remove(struct platform_device *pdev)
158{
159 struct host1x *host1x = platform_get_drvdata(pdev);
160
161 clk_disable_unprepare(host1x->clk);
162
163 return 0;
164}
165
166int host1x_drm_init(struct host1x *host1x, struct drm_device *drm)
167{
168 struct host1x_client *client;
169
170 mutex_lock(&host1x->clients_lock);
171
172 list_for_each_entry(client, &host1x->clients, list) {
173 if (client->ops && client->ops->drm_init) {
174 int err = client->ops->drm_init(client, drm);
175 if (err < 0) {
176 dev_err(host1x->dev,
177 "DRM setup failed for %s: %d\n",
178 dev_name(client->dev), err);
179 return err;
180 }
181 }
182 }
183
184 mutex_unlock(&host1x->clients_lock);
185
186 return 0;
187}
188
189int host1x_drm_exit(struct host1x *host1x)
190{
191 struct platform_device *pdev = to_platform_device(host1x->dev);
192 struct host1x_client *client;
193
194 if (!host1x->drm)
195 return 0;
196
197 mutex_lock(&host1x->clients_lock);
198
199 list_for_each_entry_reverse(client, &host1x->clients, list) {
200 if (client->ops && client->ops->drm_exit) {
201 int err = client->ops->drm_exit(client);
202 if (err < 0) {
203 dev_err(host1x->dev,
204 "DRM cleanup failed for %s: %d\n",
205 dev_name(client->dev), err);
206 return err;
207 }
208 }
209 }
210
211 mutex_unlock(&host1x->clients_lock);
212
213 drm_platform_exit(&tegra_drm_driver, pdev);
214 host1x->drm = NULL;
215
216 return 0;
217}
218
219int host1x_register_client(struct host1x *host1x, struct host1x_client *client)
220{
221 struct host1x_drm_client *drm, *tmp;
222 int err;
223
224 mutex_lock(&host1x->clients_lock);
225 list_add_tail(&client->list, &host1x->clients);
226 mutex_unlock(&host1x->clients_lock);
227
228 list_for_each_entry_safe(drm, tmp, &host1x->drm_clients, list)
229 if (drm->np == client->dev->of_node)
230 host1x_activate_drm_client(host1x, drm, client);
231
232 if (list_empty(&host1x->drm_clients)) {
233 struct platform_device *pdev = to_platform_device(host1x->dev);
234
235 err = drm_platform_init(&tegra_drm_driver, pdev);
236 if (err < 0) {
237 dev_err(host1x->dev, "drm_platform_init(): %d\n", err);
238 return err;
239 }
240 }
241
242 client->host1x = host1x;
243
244 return 0;
245}
246
247int host1x_unregister_client(struct host1x *host1x,
248 struct host1x_client *client)
249{
250 struct host1x_drm_client *drm, *tmp;
251 int err;
252
253 list_for_each_entry_safe(drm, tmp, &host1x->drm_active, list) {
254 if (drm->client == client) {
255 err = host1x_drm_exit(host1x);
256 if (err < 0) {
257 dev_err(host1x->dev, "host1x_drm_exit(): %d\n",
258 err);
259 return err;
260 }
261
262 host1x_remove_drm_client(host1x, drm);
263 break;
264 }
265 }
266
267 mutex_lock(&host1x->clients_lock);
268 list_del_init(&client->list);
269 mutex_unlock(&host1x->clients_lock);
270
271 return 0;
272}
273
274static struct of_device_id tegra_host1x_of_match[] = {
275 { .compatible = "nvidia,tegra30-host1x", },
276 { .compatible = "nvidia,tegra20-host1x", },
277 { },
278};
279MODULE_DEVICE_TABLE(of, tegra_host1x_of_match);
280
281struct platform_driver tegra_host1x_driver = {
282 .driver = {
283 .name = "tegra-host1x",
284 .owner = THIS_MODULE,
285 .of_match_table = tegra_host1x_of_match,
286 },
287 .probe = tegra_host1x_probe,
288 .remove = tegra_host1x_remove,
289};
290
291static int __init tegra_host1x_init(void)
292{
293 int err;
294
295 err = platform_driver_register(&tegra_host1x_driver);
296 if (err < 0)
297 return err;
298
299 err = platform_driver_register(&tegra_dc_driver);
300 if (err < 0)
301 goto unregister_host1x;
302
303 err = platform_driver_register(&tegra_hdmi_driver);
304 if (err < 0)
305 goto unregister_dc;
306
307 return 0;
308
309unregister_dc:
310 platform_driver_unregister(&tegra_dc_driver);
311unregister_host1x:
312 platform_driver_unregister(&tegra_host1x_driver);
313 return err;
314}
315module_init(tegra_host1x_init);
316
317static void __exit tegra_host1x_exit(void)
318{
319 platform_driver_unregister(&tegra_hdmi_driver);
320 platform_driver_unregister(&tegra_dc_driver);
321 platform_driver_unregister(&tegra_host1x_driver);
322}
323module_exit(tegra_host1x_exit);
324
325MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
326MODULE_DESCRIPTION("NVIDIA Tegra DRM driver");
327MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tilcdc/Kconfig b/drivers/gpu/drm/tilcdc/Kconfig
index d24d04013476..e461e9972455 100644
--- a/drivers/gpu/drm/tilcdc/Kconfig
+++ b/drivers/gpu/drm/tilcdc/Kconfig
@@ -4,8 +4,7 @@ config DRM_TILCDC
4 select DRM_KMS_HELPER 4 select DRM_KMS_HELPER
5 select DRM_KMS_CMA_HELPER 5 select DRM_KMS_CMA_HELPER
6 select DRM_GEM_CMA_HELPER 6 select DRM_GEM_CMA_HELPER
7 select OF_VIDEOMODE 7 select VIDEOMODE_HELPERS
8 select OF_DISPLAY_TIMING
9 select BACKLIGHT_CLASS_DEVICE 8 select BACKLIGHT_CLASS_DEVICE
10 help 9 help
11 Choose this option if you have an TI SoC with LCDC display 10 Choose this option if you have an TI SoC with LCDC display
diff --git a/drivers/gpu/drm/tilcdc/Makefile b/drivers/gpu/drm/tilcdc/Makefile
index deda656b10e7..7d2eefe94bf7 100644
--- a/drivers/gpu/drm/tilcdc/Makefile
+++ b/drivers/gpu/drm/tilcdc/Makefile
@@ -1,4 +1,7 @@
1ccflags-y := -Iinclude/drm -Werror 1ccflags-y := -Iinclude/drm
2ifeq (, $(findstring -W,$(EXTRA_CFLAGS)))
3 ccflags-y += -Werror
4endif
2 5
3tilcdc-y := \ 6tilcdc-y := \
4 tilcdc_crtc.o \ 7 tilcdc_crtc.o \
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
index c5b592dc1970..2b5461bcd9fb 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -75,7 +75,7 @@ static int modeset_init(struct drm_device *dev)
75 mod->funcs->modeset_init(mod, dev); 75 mod->funcs->modeset_init(mod, dev);
76 } 76 }
77 77
78 if ((priv->num_encoders = 0) || (priv->num_connectors == 0)) { 78 if ((priv->num_encoders == 0) || (priv->num_connectors == 0)) {
79 /* oh nos! */ 79 /* oh nos! */
80 dev_err(dev->dev, "no encoders/connectors found\n"); 80 dev_err(dev->dev, "no encoders/connectors found\n");
81 return -ENXIO; 81 return -ENXIO;
@@ -299,11 +299,10 @@ static int tilcdc_irq_postinstall(struct drm_device *dev)
299 struct tilcdc_drm_private *priv = dev->dev_private; 299 struct tilcdc_drm_private *priv = dev->dev_private;
300 300
301 /* enable FIFO underflow irq: */ 301 /* enable FIFO underflow irq: */
302 if (priv->rev == 1) { 302 if (priv->rev == 1)
303 tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_V1_UNDERFLOW_INT_ENA); 303 tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_V1_UNDERFLOW_INT_ENA);
304 } else { 304 else
305 tilcdc_set(dev, LCDC_INT_ENABLE_SET_REG, LCDC_V2_UNDERFLOW_INT_ENA); 305 tilcdc_set(dev, LCDC_INT_ENABLE_SET_REG, LCDC_V2_UNDERFLOW_INT_ENA);
306 }
307 306
308 return 0; 307 return 0;
309} 308}
@@ -363,7 +362,7 @@ static const struct {
363 uint8_t rev; 362 uint8_t rev;
364 uint8_t save; 363 uint8_t save;
365 uint32_t reg; 364 uint32_t reg;
366} registers[] = { 365} registers[] = {
367#define REG(rev, save, reg) { #reg, rev, save, reg } 366#define REG(rev, save, reg) { #reg, rev, save, reg }
368 /* exists in revision 1: */ 367 /* exists in revision 1: */
369 REG(1, false, LCDC_PID_REG), 368 REG(1, false, LCDC_PID_REG),
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_panel.c b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
index 580b74e2022b..09176654fddb 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_panel.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
@@ -173,7 +173,7 @@ static int panel_connector_get_modes(struct drm_connector *connector)
173 struct drm_display_mode *mode = drm_mode_create(dev); 173 struct drm_display_mode *mode = drm_mode_create(dev);
174 struct videomode vm; 174 struct videomode vm;
175 175
176 if (videomode_from_timing(timings, &vm, i)) 176 if (videomode_from_timings(timings, &vm, i))
177 break; 177 break;
178 178
179 drm_display_mode_from_videomode(&vm, mode); 179 drm_display_mode_from_videomode(&vm, mode);
@@ -305,7 +305,7 @@ static const struct tilcdc_module_ops panel_module_ops = {
305 */ 305 */
306 306
307/* maybe move this somewhere common if it is needed by other outputs? */ 307/* maybe move this somewhere common if it is needed by other outputs? */
308static struct tilcdc_panel_info * of_get_panel_info(struct device_node *np) 308static struct tilcdc_panel_info *of_get_panel_info(struct device_node *np)
309{ 309{
310 struct device_node *info_np; 310 struct device_node *info_np;
311 struct tilcdc_panel_info *info; 311 struct tilcdc_panel_info *info;
@@ -413,7 +413,6 @@ static struct of_device_id panel_of_match[] = {
413 { .compatible = "ti,tilcdc,panel", }, 413 { .compatible = "ti,tilcdc,panel", },
414 { }, 414 { },
415}; 415};
416MODULE_DEVICE_TABLE(of, panel_of_match);
417 416
418struct platform_driver panel_driver = { 417struct platform_driver panel_driver = {
419 .probe = panel_probe, 418 .probe = panel_probe,
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_slave.c b/drivers/gpu/drm/tilcdc/tilcdc_slave.c
index 568dc1c08e6c..db1d2fc9dfb5 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_slave.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_slave.c
@@ -353,7 +353,6 @@ static struct of_device_id slave_of_match[] = {
353 { .compatible = "ti,tilcdc,slave", }, 353 { .compatible = "ti,tilcdc,slave", },
354 { }, 354 { },
355}; 355};
356MODULE_DEVICE_TABLE(of, slave_of_match);
357 356
358struct platform_driver slave_driver = { 357struct platform_driver slave_driver = {
359 .probe = slave_probe, 358 .probe = slave_probe,
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
index 58d487ba2414..a36788fbcd98 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
@@ -396,7 +396,6 @@ static struct of_device_id tfp410_of_match[] = {
396 { .compatible = "ti,tilcdc,tfp410", }, 396 { .compatible = "ti,tilcdc,tfp410", },
397 { }, 397 { },
398}; 398};
399MODULE_DEVICE_TABLE(of, tfp410_of_match);
400 399
401struct platform_driver tfp410_driver = { 400struct platform_driver tfp410_driver = {
402 .probe = tfp410_probe, 401 .probe = tfp410_probe,
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 8be35c809c7b..af894584dd90 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -86,6 +86,7 @@ int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
86 mutex_lock(&man->io_reserve_mutex); 86 mutex_lock(&man->io_reserve_mutex);
87 return 0; 87 return 0;
88} 88}
89EXPORT_SYMBOL(ttm_mem_io_lock);
89 90
90void ttm_mem_io_unlock(struct ttm_mem_type_manager *man) 91void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
91{ 92{
@@ -94,6 +95,7 @@ void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
94 95
95 mutex_unlock(&man->io_reserve_mutex); 96 mutex_unlock(&man->io_reserve_mutex);
96} 97}
98EXPORT_SYMBOL(ttm_mem_io_unlock);
97 99
98static int ttm_mem_io_evict(struct ttm_mem_type_manager *man) 100static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
99{ 101{
@@ -111,8 +113,9 @@ static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
111 return 0; 113 return 0;
112} 114}
113 115
114static int ttm_mem_io_reserve(struct ttm_bo_device *bdev, 116
115 struct ttm_mem_reg *mem) 117int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
118 struct ttm_mem_reg *mem)
116{ 119{
117 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; 120 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
118 int ret = 0; 121 int ret = 0;
@@ -134,9 +137,10 @@ retry:
134 } 137 }
135 return ret; 138 return ret;
136} 139}
140EXPORT_SYMBOL(ttm_mem_io_reserve);
137 141
138static void ttm_mem_io_free(struct ttm_bo_device *bdev, 142void ttm_mem_io_free(struct ttm_bo_device *bdev,
139 struct ttm_mem_reg *mem) 143 struct ttm_mem_reg *mem)
140{ 144{
141 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; 145 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
142 146
@@ -149,6 +153,7 @@ static void ttm_mem_io_free(struct ttm_bo_device *bdev,
149 bdev->driver->io_mem_free(bdev, mem); 153 bdev->driver->io_mem_free(bdev, mem);
150 154
151} 155}
156EXPORT_SYMBOL(ttm_mem_io_free);
152 157
153int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo) 158int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
154{ 159{
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 74705f329d99..3df9f16b041c 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -147,7 +147,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
147 147
148 page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) + 148 page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
149 bo->vm_node->start - vma->vm_pgoff; 149 bo->vm_node->start - vma->vm_pgoff;
150 page_last = ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) + 150 page_last = vma_pages(vma) +
151 bo->vm_node->start - vma->vm_pgoff; 151 bo->vm_node->start - vma->vm_pgoff;
152 152
153 if (unlikely(page_offset >= bo->num_pages)) { 153 if (unlikely(page_offset >= bo->num_pages)) {
@@ -258,7 +258,7 @@ int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
258 258
259 read_lock(&bdev->vm_lock); 259 read_lock(&bdev->vm_lock);
260 bo = ttm_bo_vm_lookup_rb(bdev, vma->vm_pgoff, 260 bo = ttm_bo_vm_lookup_rb(bdev, vma->vm_pgoff,
261 (vma->vm_end - vma->vm_start) >> PAGE_SHIFT); 261 vma_pages(vma));
262 if (likely(bo != NULL) && !kref_get_unless_zero(&bo->kref)) 262 if (likely(bo != NULL) && !kref_get_unless_zero(&bo->kref))
263 bo = NULL; 263 bo = NULL;
264 read_unlock(&bdev->vm_lock); 264 read_unlock(&bdev->vm_lock);
diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c
index fe5cdbcf2636..b44d548c56f8 100644
--- a/drivers/gpu/drm/udl/udl_connector.c
+++ b/drivers/gpu/drm/udl/udl_connector.c
@@ -61,6 +61,10 @@ static int udl_get_modes(struct drm_connector *connector)
61 int ret; 61 int ret;
62 62
63 edid = (struct edid *)udl_get_edid(udl); 63 edid = (struct edid *)udl_get_edid(udl);
64 if (!edid) {
65 drm_mode_connector_update_edid_property(connector, NULL);
66 return 0;
67 }
64 68
65 /* 69 /*
66 * We only read the main block, but if the monitor reports extension 70 * We only read the main block, but if the monitor reports extension
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index 9f4be3d4a02e..dc0c065f8d39 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -482,7 +482,7 @@ static int udlfb_create(struct drm_fb_helper *helper,
482 struct udl_fbdev *ufbdev = (struct udl_fbdev *)helper; 482 struct udl_fbdev *ufbdev = (struct udl_fbdev *)helper;
483 struct drm_device *dev = ufbdev->helper.dev; 483 struct drm_device *dev = ufbdev->helper.dev;
484 struct fb_info *info; 484 struct fb_info *info;
485 struct device *device = &dev->usbdev->dev; 485 struct device *device = dev->dev;
486 struct drm_framebuffer *fb; 486 struct drm_framebuffer *fb;
487 struct drm_mode_fb_cmd2 mode_cmd; 487 struct drm_mode_fb_cmd2 mode_cmd;
488 struct udl_gem_object *obj; 488 struct udl_gem_object *obj;
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index 3816270ba49b..ef034fa3e6f5 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -303,6 +303,8 @@ struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev,
303 if (IS_ERR(attach)) 303 if (IS_ERR(attach))
304 return ERR_CAST(attach); 304 return ERR_CAST(attach);
305 305
306 get_dma_buf(dma_buf);
307
306 sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); 308 sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
307 if (IS_ERR(sg)) { 309 if (IS_ERR(sg)) {
308 ret = PTR_ERR(sg); 310 ret = PTR_ERR(sg);
@@ -322,5 +324,7 @@ fail_unmap:
322 dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL); 324 dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
323fail_detach: 325fail_detach:
324 dma_buf_detach(dma_buf, attach); 326 dma_buf_detach(dma_buf, attach);
327 dma_buf_put(dma_buf);
328
325 return ERR_PTR(ret); 329 return ERR_PTR(ret);
326} 330}
diff --git a/drivers/gpu/host1x/Kconfig b/drivers/gpu/host1x/Kconfig
new file mode 100644
index 000000000000..ccfd42b23606
--- /dev/null
+++ b/drivers/gpu/host1x/Kconfig
@@ -0,0 +1,24 @@
1config TEGRA_HOST1X
2 tristate "NVIDIA Tegra host1x driver"
3 depends on ARCH_TEGRA || ARCH_MULTIPLATFORM
4 help
5 Driver for the NVIDIA Tegra host1x hardware.
6
7 The Tegra host1x module is the DMA engine for register access to
8 Tegra's graphics- and multimedia-related modules. The modules served
9 by host1x are referred to as clients. host1x includes some other
10 functionality, such as synchronization.
11
12if TEGRA_HOST1X
13
14config TEGRA_HOST1X_FIREWALL
15 bool "Enable HOST1X security firewall"
16 default y
17 help
18 Say yes if kernel should protect command streams from tampering.
19
20 If unsure, choose Y.
21
22source "drivers/gpu/host1x/drm/Kconfig"
23
24endif
diff --git a/drivers/gpu/host1x/Makefile b/drivers/gpu/host1x/Makefile
new file mode 100644
index 000000000000..3b037b6e0298
--- /dev/null
+++ b/drivers/gpu/host1x/Makefile
@@ -0,0 +1,20 @@
1ccflags-y = -Idrivers/gpu/host1x
2
3host1x-y = \
4 syncpt.o \
5 dev.o \
6 intr.o \
7 cdma.o \
8 channel.o \
9 job.o \
10 debug.o \
11 hw/host1x01.o
12
13ccflags-y += -Iinclude/drm
14ccflags-$(CONFIG_DRM_TEGRA_DEBUG) += -DDEBUG
15
16host1x-$(CONFIG_DRM_TEGRA) += drm/drm.o drm/fb.o drm/dc.o
17host1x-$(CONFIG_DRM_TEGRA) += drm/output.o drm/rgb.o drm/hdmi.o
18host1x-$(CONFIG_DRM_TEGRA) += drm/gem.o
19host1x-$(CONFIG_DRM_TEGRA) += drm/gr2d.o
20obj-$(CONFIG_TEGRA_HOST1X) += host1x.o
diff --git a/drivers/gpu/host1x/cdma.c b/drivers/gpu/host1x/cdma.c
new file mode 100644
index 000000000000..de72172d3b5f
--- /dev/null
+++ b/drivers/gpu/host1x/cdma.c
@@ -0,0 +1,491 @@
1/*
2 * Tegra host1x Command DMA
3 *
4 * Copyright (c) 2010-2013, NVIDIA Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19
20#include <asm/cacheflush.h>
21#include <linux/device.h>
22#include <linux/dma-mapping.h>
23#include <linux/interrupt.h>
24#include <linux/kernel.h>
25#include <linux/kfifo.h>
26#include <linux/slab.h>
27#include <trace/events/host1x.h>
28
29#include "cdma.h"
30#include "channel.h"
31#include "dev.h"
32#include "debug.h"
33#include "host1x_bo.h"
34#include "job.h"
35
36/*
37 * push_buffer
38 *
39 * The push buffer is a circular array of words to be fetched by command DMA.
40 * Note that it works slightly differently to the sync queue; fence == pos
41 * means that the push buffer is full, not empty.
42 */
43
44#define HOST1X_PUSHBUFFER_SLOTS 512
45
46/*
47 * Clean up push buffer resources
48 */
49static void host1x_pushbuffer_destroy(struct push_buffer *pb)
50{
51 struct host1x_cdma *cdma = pb_to_cdma(pb);
52 struct host1x *host1x = cdma_to_host1x(cdma);
53
54 if (pb->phys != 0)
55 dma_free_writecombine(host1x->dev, pb->size_bytes + 4,
56 pb->mapped, pb->phys);
57
58 pb->mapped = NULL;
59 pb->phys = 0;
60}
61
62/*
63 * Init push buffer resources
64 */
65static int host1x_pushbuffer_init(struct push_buffer *pb)
66{
67 struct host1x_cdma *cdma = pb_to_cdma(pb);
68 struct host1x *host1x = cdma_to_host1x(cdma);
69
70 pb->mapped = NULL;
71 pb->phys = 0;
72 pb->size_bytes = HOST1X_PUSHBUFFER_SLOTS * 8;
73
74 /* initialize buffer pointers */
75 pb->fence = pb->size_bytes - 8;
76 pb->pos = 0;
77
78 /* allocate and map pushbuffer memory */
79 pb->mapped = dma_alloc_writecombine(host1x->dev, pb->size_bytes + 4,
80 &pb->phys, GFP_KERNEL);
81 if (!pb->mapped)
82 goto fail;
83
84 host1x_hw_pushbuffer_init(host1x, pb);
85
86 return 0;
87
88fail:
89 host1x_pushbuffer_destroy(pb);
90 return -ENOMEM;
91}
92
93/*
94 * Push two words to the push buffer
95 * Caller must ensure push buffer is not full
96 */
97static void host1x_pushbuffer_push(struct push_buffer *pb, u32 op1, u32 op2)
98{
99 u32 pos = pb->pos;
100 u32 *p = (u32 *)((u32)pb->mapped + pos);
101 WARN_ON(pos == pb->fence);
102 *(p++) = op1;
103 *(p++) = op2;
104 pb->pos = (pos + 8) & (pb->size_bytes - 1);
105}
106
107/*
108 * Pop a number of two word slots from the push buffer
109 * Caller must ensure push buffer is not empty
110 */
111static void host1x_pushbuffer_pop(struct push_buffer *pb, unsigned int slots)
112{
113 /* Advance the next write position */
114 pb->fence = (pb->fence + slots * 8) & (pb->size_bytes - 1);
115}
116
117/*
118 * Return the number of two word slots free in the push buffer
119 */
120static u32 host1x_pushbuffer_space(struct push_buffer *pb)
121{
122 return ((pb->fence - pb->pos) & (pb->size_bytes - 1)) / 8;
123}
124
125/*
126 * Sleep (if necessary) until the requested event happens
127 * - CDMA_EVENT_SYNC_QUEUE_EMPTY : sync queue is completely empty.
128 * - Returns 1
129 * - CDMA_EVENT_PUSH_BUFFER_SPACE : there is space in the push buffer
130 * - Return the amount of space (> 0)
131 * Must be called with the cdma lock held.
132 */
133unsigned int host1x_cdma_wait_locked(struct host1x_cdma *cdma,
134 enum cdma_event event)
135{
136 for (;;) {
137 unsigned int space;
138
139 if (event == CDMA_EVENT_SYNC_QUEUE_EMPTY)
140 space = list_empty(&cdma->sync_queue) ? 1 : 0;
141 else if (event == CDMA_EVENT_PUSH_BUFFER_SPACE) {
142 struct push_buffer *pb = &cdma->push_buffer;
143 space = host1x_pushbuffer_space(pb);
144 } else {
145 WARN_ON(1);
146 return -EINVAL;
147 }
148
149 if (space)
150 return space;
151
152 trace_host1x_wait_cdma(dev_name(cdma_to_channel(cdma)->dev),
153 event);
154
155 /* If somebody has managed to already start waiting, yield */
156 if (cdma->event != CDMA_EVENT_NONE) {
157 mutex_unlock(&cdma->lock);
158 schedule();
159 mutex_lock(&cdma->lock);
160 continue;
161 }
162 cdma->event = event;
163
164 mutex_unlock(&cdma->lock);
165 down(&cdma->sem);
166 mutex_lock(&cdma->lock);
167 }
168 return 0;
169}
170
171/*
172 * Start timer that tracks the time spent by the job.
173 * Must be called with the cdma lock held.
174 */
175static void cdma_start_timer_locked(struct host1x_cdma *cdma,
176 struct host1x_job *job)
177{
178 struct host1x *host = cdma_to_host1x(cdma);
179
180 if (cdma->timeout.client) {
181 /* timer already started */
182 return;
183 }
184
185 cdma->timeout.client = job->client;
186 cdma->timeout.syncpt = host1x_syncpt_get(host, job->syncpt_id);
187 cdma->timeout.syncpt_val = job->syncpt_end;
188 cdma->timeout.start_ktime = ktime_get();
189
190 schedule_delayed_work(&cdma->timeout.wq,
191 msecs_to_jiffies(job->timeout));
192}
193
194/*
195 * Stop timer when a buffer submission completes.
196 * Must be called with the cdma lock held.
197 */
198static void stop_cdma_timer_locked(struct host1x_cdma *cdma)
199{
200 cancel_delayed_work(&cdma->timeout.wq);
201 cdma->timeout.client = 0;
202}
203
204/*
205 * For all sync queue entries that have already finished according to the
206 * current sync point registers:
207 * - unpin & unref their mems
208 * - pop their push buffer slots
209 * - remove them from the sync queue
210 * This is normally called from the host code's worker thread, but can be
211 * called manually if necessary.
212 * Must be called with the cdma lock held.
213 */
214static void update_cdma_locked(struct host1x_cdma *cdma)
215{
216 bool signal = false;
217 struct host1x *host1x = cdma_to_host1x(cdma);
218 struct host1x_job *job, *n;
219
220 /* If CDMA is stopped, queue is cleared and we can return */
221 if (!cdma->running)
222 return;
223
224 /*
225 * Walk the sync queue, reading the sync point registers as necessary,
226 * to consume as many sync queue entries as possible without blocking
227 */
228 list_for_each_entry_safe(job, n, &cdma->sync_queue, list) {
229 struct host1x_syncpt *sp =
230 host1x_syncpt_get(host1x, job->syncpt_id);
231
232 /* Check whether this syncpt has completed, and bail if not */
233 if (!host1x_syncpt_is_expired(sp, job->syncpt_end)) {
234 /* Start timer on next pending syncpt */
235 if (job->timeout)
236 cdma_start_timer_locked(cdma, job);
237 break;
238 }
239
240 /* Cancel timeout, when a buffer completes */
241 if (cdma->timeout.client)
242 stop_cdma_timer_locked(cdma);
243
244 /* Unpin the memory */
245 host1x_job_unpin(job);
246
247 /* Pop push buffer slots */
248 if (job->num_slots) {
249 struct push_buffer *pb = &cdma->push_buffer;
250 host1x_pushbuffer_pop(pb, job->num_slots);
251 if (cdma->event == CDMA_EVENT_PUSH_BUFFER_SPACE)
252 signal = true;
253 }
254
255 list_del(&job->list);
256 host1x_job_put(job);
257 }
258
259 if (cdma->event == CDMA_EVENT_SYNC_QUEUE_EMPTY &&
260 list_empty(&cdma->sync_queue))
261 signal = true;
262
263 if (signal) {
264 cdma->event = CDMA_EVENT_NONE;
265 up(&cdma->sem);
266 }
267}
268
269void host1x_cdma_update_sync_queue(struct host1x_cdma *cdma,
270 struct device *dev)
271{
272 u32 restart_addr;
273 u32 syncpt_incrs;
274 struct host1x_job *job = NULL;
275 u32 syncpt_val;
276 struct host1x *host1x = cdma_to_host1x(cdma);
277
278 syncpt_val = host1x_syncpt_load(cdma->timeout.syncpt);
279
280 dev_dbg(dev, "%s: starting cleanup (thresh %d)\n",
281 __func__, syncpt_val);
282
283 /*
284 * Move the sync_queue read pointer to the first entry that hasn't
285 * completed based on the current HW syncpt value. It's likely there
286 * won't be any (i.e. we're still at the head), but covers the case
287 * where a syncpt incr happens just prior/during the teardown.
288 */
289
290 dev_dbg(dev, "%s: skip completed buffers still in sync_queue\n",
291 __func__);
292
293 list_for_each_entry(job, &cdma->sync_queue, list) {
294 if (syncpt_val < job->syncpt_end)
295 break;
296
297 host1x_job_dump(dev, job);
298 }
299
300 /*
301 * Walk the sync_queue, first incrementing with the CPU syncpts that
302 * are partially executed (the first buffer) or fully skipped while
303 * still in the current context (slots are also NOP-ed).
304 *
305 * At the point contexts are interleaved, syncpt increments must be
306 * done inline with the pushbuffer from a GATHER buffer to maintain
307 * the order (slots are modified to be a GATHER of syncpt incrs).
308 *
309 * Note: save in restart_addr the location where the timed out buffer
310 * started in the PB, so we can start the refetch from there (with the
311 * modified NOP-ed PB slots). This lets things appear to have completed
312 * properly for this buffer and resources are freed.
313 */
314
315 dev_dbg(dev, "%s: perform CPU incr on pending same ctx buffers\n",
316 __func__);
317
318 if (!list_empty(&cdma->sync_queue))
319 restart_addr = job->first_get;
320 else
321 restart_addr = cdma->last_pos;
322
323 /* do CPU increments as long as this context continues */
324 list_for_each_entry_from(job, &cdma->sync_queue, list) {
325 /* different context, gets us out of this loop */
326 if (job->client != cdma->timeout.client)
327 break;
328
329 /* won't need a timeout when replayed */
330 job->timeout = 0;
331
332 syncpt_incrs = job->syncpt_end - syncpt_val;
333 dev_dbg(dev, "%s: CPU incr (%d)\n", __func__, syncpt_incrs);
334
335 host1x_job_dump(dev, job);
336
337 /* safe to use CPU to incr syncpts */
338 host1x_hw_cdma_timeout_cpu_incr(host1x, cdma, job->first_get,
339 syncpt_incrs, job->syncpt_end,
340 job->num_slots);
341
342 syncpt_val += syncpt_incrs;
343 }
344
345 /* The following sumbits from the same client may be dependent on the
346 * failed submit and therefore they may fail. Force a small timeout
347 * to make the queue cleanup faster */
348
349 list_for_each_entry_from(job, &cdma->sync_queue, list)
350 if (job->client == cdma->timeout.client)
351 job->timeout = min_t(unsigned int, job->timeout, 500);
352
353 dev_dbg(dev, "%s: finished sync_queue modification\n", __func__);
354
355 /* roll back DMAGET and start up channel again */
356 host1x_hw_cdma_resume(host1x, cdma, restart_addr);
357}
358
359/*
360 * Create a cdma
361 */
362int host1x_cdma_init(struct host1x_cdma *cdma)
363{
364 int err;
365
366 mutex_init(&cdma->lock);
367 sema_init(&cdma->sem, 0);
368
369 INIT_LIST_HEAD(&cdma->sync_queue);
370
371 cdma->event = CDMA_EVENT_NONE;
372 cdma->running = false;
373 cdma->torndown = false;
374
375 err = host1x_pushbuffer_init(&cdma->push_buffer);
376 if (err)
377 return err;
378 return 0;
379}
380
381/*
382 * Destroy a cdma
383 */
384int host1x_cdma_deinit(struct host1x_cdma *cdma)
385{
386 struct push_buffer *pb = &cdma->push_buffer;
387 struct host1x *host1x = cdma_to_host1x(cdma);
388
389 if (cdma->running) {
390 pr_warn("%s: CDMA still running\n", __func__);
391 return -EBUSY;
392 }
393
394 host1x_pushbuffer_destroy(pb);
395 host1x_hw_cdma_timeout_destroy(host1x, cdma);
396
397 return 0;
398}
399
400/*
401 * Begin a cdma submit
402 */
403int host1x_cdma_begin(struct host1x_cdma *cdma, struct host1x_job *job)
404{
405 struct host1x *host1x = cdma_to_host1x(cdma);
406
407 mutex_lock(&cdma->lock);
408
409 if (job->timeout) {
410 /* init state on first submit with timeout value */
411 if (!cdma->timeout.initialized) {
412 int err;
413 err = host1x_hw_cdma_timeout_init(host1x, cdma,
414 job->syncpt_id);
415 if (err) {
416 mutex_unlock(&cdma->lock);
417 return err;
418 }
419 }
420 }
421 if (!cdma->running)
422 host1x_hw_cdma_start(host1x, cdma);
423
424 cdma->slots_free = 0;
425 cdma->slots_used = 0;
426 cdma->first_get = cdma->push_buffer.pos;
427
428 trace_host1x_cdma_begin(dev_name(job->channel->dev));
429 return 0;
430}
431
432/*
433 * Push two words into a push buffer slot
434 * Blocks as necessary if the push buffer is full.
435 */
436void host1x_cdma_push(struct host1x_cdma *cdma, u32 op1, u32 op2)
437{
438 struct host1x *host1x = cdma_to_host1x(cdma);
439 struct push_buffer *pb = &cdma->push_buffer;
440 u32 slots_free = cdma->slots_free;
441
442 if (host1x_debug_trace_cmdbuf)
443 trace_host1x_cdma_push(dev_name(cdma_to_channel(cdma)->dev),
444 op1, op2);
445
446 if (slots_free == 0) {
447 host1x_hw_cdma_flush(host1x, cdma);
448 slots_free = host1x_cdma_wait_locked(cdma,
449 CDMA_EVENT_PUSH_BUFFER_SPACE);
450 }
451 cdma->slots_free = slots_free - 1;
452 cdma->slots_used++;
453 host1x_pushbuffer_push(pb, op1, op2);
454}
455
456/*
457 * End a cdma submit
458 * Kick off DMA, add job to the sync queue, and a number of slots to be freed
459 * from the pushbuffer. The handles for a submit must all be pinned at the same
460 * time, but they can be unpinned in smaller chunks.
461 */
462void host1x_cdma_end(struct host1x_cdma *cdma,
463 struct host1x_job *job)
464{
465 struct host1x *host1x = cdma_to_host1x(cdma);
466 bool idle = list_empty(&cdma->sync_queue);
467
468 host1x_hw_cdma_flush(host1x, cdma);
469
470 job->first_get = cdma->first_get;
471 job->num_slots = cdma->slots_used;
472 host1x_job_get(job);
473 list_add_tail(&job->list, &cdma->sync_queue);
474
475 /* start timer on idle -> active transitions */
476 if (job->timeout && idle)
477 cdma_start_timer_locked(cdma, job);
478
479 trace_host1x_cdma_end(dev_name(job->channel->dev));
480 mutex_unlock(&cdma->lock);
481}
482
483/*
484 * Update cdma state according to current sync point values
485 */
486void host1x_cdma_update(struct host1x_cdma *cdma)
487{
488 mutex_lock(&cdma->lock);
489 update_cdma_locked(cdma);
490 mutex_unlock(&cdma->lock);
491}
diff --git a/drivers/gpu/host1x/cdma.h b/drivers/gpu/host1x/cdma.h
new file mode 100644
index 000000000000..313c4b784348
--- /dev/null
+++ b/drivers/gpu/host1x/cdma.h
@@ -0,0 +1,100 @@
1/*
2 * Tegra host1x Command DMA
3 *
4 * Copyright (c) 2010-2013, NVIDIA Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#ifndef __HOST1X_CDMA_H
20#define __HOST1X_CDMA_H
21
22#include <linux/sched.h>
23#include <linux/semaphore.h>
24#include <linux/list.h>
25
26struct host1x_syncpt;
27struct host1x_userctx_timeout;
28struct host1x_job;
29
30/*
31 * cdma
32 *
33 * This is in charge of a host command DMA channel.
34 * Sends ops to a push buffer, and takes responsibility for unpinning
35 * (& possibly freeing) of memory after those ops have completed.
36 * Producer:
37 * begin
38 * push - send ops to the push buffer
39 * end - start command DMA and enqueue handles to be unpinned
40 * Consumer:
41 * update - call to update sync queue and push buffer, unpin memory
42 */
43
44struct push_buffer {
45 u32 *mapped; /* mapped pushbuffer memory */
46 dma_addr_t phys; /* physical address of pushbuffer */
47 u32 fence; /* index we've written */
48 u32 pos; /* index to write to */
49 u32 size_bytes;
50};
51
52struct buffer_timeout {
53 struct delayed_work wq; /* work queue */
54 bool initialized; /* timer one-time setup flag */
55 struct host1x_syncpt *syncpt; /* buffer completion syncpt */
56 u32 syncpt_val; /* syncpt value when completed */
57 ktime_t start_ktime; /* starting time */
58 /* context timeout information */
59 int client;
60};
61
62enum cdma_event {
63 CDMA_EVENT_NONE, /* not waiting for any event */
64 CDMA_EVENT_SYNC_QUEUE_EMPTY, /* wait for empty sync queue */
65 CDMA_EVENT_PUSH_BUFFER_SPACE /* wait for space in push buffer */
66};
67
68struct host1x_cdma {
69 struct mutex lock; /* controls access to shared state */
70 struct semaphore sem; /* signalled when event occurs */
71 enum cdma_event event; /* event that sem is waiting for */
72 unsigned int slots_used; /* pb slots used in current submit */
73 unsigned int slots_free; /* pb slots free in current submit */
74 unsigned int first_get; /* DMAGET value, where submit begins */
75 unsigned int last_pos; /* last value written to DMAPUT */
76 struct push_buffer push_buffer; /* channel's push buffer */
77 struct list_head sync_queue; /* job queue */
78 struct buffer_timeout timeout; /* channel's timeout state/wq */
79 bool running;
80 bool torndown;
81};
82
83#define cdma_to_channel(cdma) container_of(cdma, struct host1x_channel, cdma)
84#define cdma_to_host1x(cdma) dev_get_drvdata(cdma_to_channel(cdma)->dev->parent)
85#define pb_to_cdma(pb) container_of(pb, struct host1x_cdma, push_buffer)
86
87int host1x_cdma_init(struct host1x_cdma *cdma);
88int host1x_cdma_deinit(struct host1x_cdma *cdma);
89void host1x_cdma_stop(struct host1x_cdma *cdma);
90int host1x_cdma_begin(struct host1x_cdma *cdma, struct host1x_job *job);
91void host1x_cdma_push(struct host1x_cdma *cdma, u32 op1, u32 op2);
92void host1x_cdma_end(struct host1x_cdma *cdma, struct host1x_job *job);
93void host1x_cdma_update(struct host1x_cdma *cdma);
94void host1x_cdma_peek(struct host1x_cdma *cdma, u32 dmaget, int slot,
95 u32 *out);
96unsigned int host1x_cdma_wait_locked(struct host1x_cdma *cdma,
97 enum cdma_event event);
98void host1x_cdma_update_sync_queue(struct host1x_cdma *cdma,
99 struct device *dev);
100#endif
diff --git a/drivers/gpu/host1x/channel.c b/drivers/gpu/host1x/channel.c
new file mode 100644
index 000000000000..83ea51b9f0fc
--- /dev/null
+++ b/drivers/gpu/host1x/channel.c
@@ -0,0 +1,126 @@
1/*
2 * Tegra host1x Channel
3 *
4 * Copyright (c) 2010-2013, NVIDIA Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#include <linux/slab.h>
20#include <linux/module.h>
21
22#include "channel.h"
23#include "dev.h"
24#include "job.h"
25
26/* Constructor for the host1x device list */
27int host1x_channel_list_init(struct host1x *host)
28{
29 INIT_LIST_HEAD(&host->chlist.list);
30 mutex_init(&host->chlist_mutex);
31
32 if (host->info->nb_channels > BITS_PER_LONG) {
33 WARN(1, "host1x hardware has more channels than supported by the driver\n");
34 return -ENOSYS;
35 }
36
37 return 0;
38}
39
40int host1x_job_submit(struct host1x_job *job)
41{
42 struct host1x *host = dev_get_drvdata(job->channel->dev->parent);
43
44 return host1x_hw_channel_submit(host, job);
45}
46
47struct host1x_channel *host1x_channel_get(struct host1x_channel *channel)
48{
49 int err = 0;
50
51 mutex_lock(&channel->reflock);
52
53 if (channel->refcount == 0)
54 err = host1x_cdma_init(&channel->cdma);
55
56 if (!err)
57 channel->refcount++;
58
59 mutex_unlock(&channel->reflock);
60
61 return err ? NULL : channel;
62}
63
64void host1x_channel_put(struct host1x_channel *channel)
65{
66 mutex_lock(&channel->reflock);
67
68 if (channel->refcount == 1) {
69 struct host1x *host = dev_get_drvdata(channel->dev->parent);
70
71 host1x_hw_cdma_stop(host, &channel->cdma);
72 host1x_cdma_deinit(&channel->cdma);
73 }
74
75 channel->refcount--;
76
77 mutex_unlock(&channel->reflock);
78}
79
80struct host1x_channel *host1x_channel_request(struct device *dev)
81{
82 struct host1x *host = dev_get_drvdata(dev->parent);
83 int max_channels = host->info->nb_channels;
84 struct host1x_channel *channel = NULL;
85 int index, err;
86
87 mutex_lock(&host->chlist_mutex);
88
89 index = find_first_zero_bit(&host->allocated_channels, max_channels);
90 if (index >= max_channels)
91 goto fail;
92
93 channel = kzalloc(sizeof(*channel), GFP_KERNEL);
94 if (!channel)
95 goto fail;
96
97 err = host1x_hw_channel_init(host, channel, index);
98 if (err < 0)
99 goto fail;
100
101 /* Link device to host1x_channel */
102 channel->dev = dev;
103
104 /* Add to channel list */
105 list_add_tail(&channel->list, &host->chlist.list);
106
107 host->allocated_channels |= BIT(index);
108
109 mutex_unlock(&host->chlist_mutex);
110 return channel;
111
112fail:
113 dev_err(dev, "failed to init channel\n");
114 kfree(channel);
115 mutex_unlock(&host->chlist_mutex);
116 return NULL;
117}
118
119void host1x_channel_free(struct host1x_channel *channel)
120{
121 struct host1x *host = dev_get_drvdata(channel->dev->parent);
122
123 host->allocated_channels &= ~BIT(channel->id);
124 list_del(&channel->list);
125 kfree(channel);
126}
diff --git a/drivers/gpu/host1x/channel.h b/drivers/gpu/host1x/channel.h
new file mode 100644
index 000000000000..48723b8eea42
--- /dev/null
+++ b/drivers/gpu/host1x/channel.h
@@ -0,0 +1,52 @@
1/*
2 * Tegra host1x Channel
3 *
4 * Copyright (c) 2010-2013, NVIDIA Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#ifndef __HOST1X_CHANNEL_H
20#define __HOST1X_CHANNEL_H
21
22#include <linux/io.h>
23
24#include "cdma.h"
25
26struct host1x;
27
28struct host1x_channel {
29 struct list_head list;
30
31 unsigned int refcount;
32 unsigned int id;
33 struct mutex reflock;
34 struct mutex submitlock;
35 void __iomem *regs;
36 struct device *dev;
37 struct host1x_cdma cdma;
38};
39
40/* channel list operations */
41int host1x_channel_list_init(struct host1x *host);
42
43struct host1x_channel *host1x_channel_request(struct device *dev);
44void host1x_channel_free(struct host1x_channel *channel);
45struct host1x_channel *host1x_channel_get(struct host1x_channel *channel);
46void host1x_channel_put(struct host1x_channel *channel);
47int host1x_job_submit(struct host1x_job *job);
48
49#define host1x_for_each_channel(host, channel) \
50 list_for_each_entry(channel, &host->chlist.list, list)
51
52#endif
diff --git a/drivers/gpu/host1x/debug.c b/drivers/gpu/host1x/debug.c
new file mode 100644
index 000000000000..3ec7d77de24d
--- /dev/null
+++ b/drivers/gpu/host1x/debug.c
@@ -0,0 +1,210 @@
1/*
2 * Copyright (C) 2010 Google, Inc.
3 * Author: Erik Gilling <konkers@android.com>
4 *
5 * Copyright (C) 2011-2013 NVIDIA Corporation
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/debugfs.h>
19#include <linux/seq_file.h>
20#include <linux/uaccess.h>
21
22#include <linux/io.h>
23
24#include "dev.h"
25#include "debug.h"
26#include "channel.h"
27
28unsigned int host1x_debug_trace_cmdbuf;
29
30static pid_t host1x_debug_force_timeout_pid;
31static u32 host1x_debug_force_timeout_val;
32static u32 host1x_debug_force_timeout_channel;
33
34void host1x_debug_output(struct output *o, const char *fmt, ...)
35{
36 va_list args;
37 int len;
38
39 va_start(args, fmt);
40 len = vsnprintf(o->buf, sizeof(o->buf), fmt, args);
41 va_end(args);
42 o->fn(o->ctx, o->buf, len);
43}
44
45static int show_channels(struct host1x_channel *ch, void *data, bool show_fifo)
46{
47 struct host1x *m = dev_get_drvdata(ch->dev->parent);
48 struct output *o = data;
49
50 mutex_lock(&ch->reflock);
51 if (ch->refcount) {
52 mutex_lock(&ch->cdma.lock);
53 if (show_fifo)
54 host1x_hw_show_channel_fifo(m, ch, o);
55 host1x_hw_show_channel_cdma(m, ch, o);
56 mutex_unlock(&ch->cdma.lock);
57 }
58 mutex_unlock(&ch->reflock);
59
60 return 0;
61}
62
63static void show_syncpts(struct host1x *m, struct output *o)
64{
65 int i;
66 host1x_debug_output(o, "---- syncpts ----\n");
67 for (i = 0; i < host1x_syncpt_nb_pts(m); i++) {
68 u32 max = host1x_syncpt_read_max(m->syncpt + i);
69 u32 min = host1x_syncpt_load(m->syncpt + i);
70 if (!min && !max)
71 continue;
72 host1x_debug_output(o, "id %d (%s) min %d max %d\n",
73 i, m->syncpt[i].name, min, max);
74 }
75
76 for (i = 0; i < host1x_syncpt_nb_bases(m); i++) {
77 u32 base_val;
78 base_val = host1x_syncpt_load_wait_base(m->syncpt + i);
79 if (base_val)
80 host1x_debug_output(o, "waitbase id %d val %d\n", i,
81 base_val);
82 }
83
84 host1x_debug_output(o, "\n");
85}
86
87static void show_all(struct host1x *m, struct output *o)
88{
89 struct host1x_channel *ch;
90
91 host1x_hw_show_mlocks(m, o);
92 show_syncpts(m, o);
93 host1x_debug_output(o, "---- channels ----\n");
94
95 host1x_for_each_channel(m, ch)
96 show_channels(ch, o, true);
97}
98
99#ifdef CONFIG_DEBUG_FS
100static void show_all_no_fifo(struct host1x *host1x, struct output *o)
101{
102 struct host1x_channel *ch;
103
104 host1x_hw_show_mlocks(host1x, o);
105 show_syncpts(host1x, o);
106 host1x_debug_output(o, "---- channels ----\n");
107
108 host1x_for_each_channel(host1x, ch)
109 show_channels(ch, o, false);
110}
111
112static int host1x_debug_show_all(struct seq_file *s, void *unused)
113{
114 struct output o = {
115 .fn = write_to_seqfile,
116 .ctx = s
117 };
118 show_all(s->private, &o);
119 return 0;
120}
121
122static int host1x_debug_show(struct seq_file *s, void *unused)
123{
124 struct output o = {
125 .fn = write_to_seqfile,
126 .ctx = s
127 };
128 show_all_no_fifo(s->private, &o);
129 return 0;
130}
131
132static int host1x_debug_open_all(struct inode *inode, struct file *file)
133{
134 return single_open(file, host1x_debug_show_all, inode->i_private);
135}
136
137static const struct file_operations host1x_debug_all_fops = {
138 .open = host1x_debug_open_all,
139 .read = seq_read,
140 .llseek = seq_lseek,
141 .release = single_release,
142};
143
144static int host1x_debug_open(struct inode *inode, struct file *file)
145{
146 return single_open(file, host1x_debug_show, inode->i_private);
147}
148
149static const struct file_operations host1x_debug_fops = {
150 .open = host1x_debug_open,
151 .read = seq_read,
152 .llseek = seq_lseek,
153 .release = single_release,
154};
155
156void host1x_debug_init(struct host1x *host1x)
157{
158 struct dentry *de = debugfs_create_dir("tegra-host1x", NULL);
159
160 if (!de)
161 return;
162
163 /* Store the created entry */
164 host1x->debugfs = de;
165
166 debugfs_create_file("status", S_IRUGO, de, host1x, &host1x_debug_fops);
167 debugfs_create_file("status_all", S_IRUGO, de, host1x,
168 &host1x_debug_all_fops);
169
170 debugfs_create_u32("trace_cmdbuf", S_IRUGO|S_IWUSR, de,
171 &host1x_debug_trace_cmdbuf);
172
173 host1x_hw_debug_init(host1x, de);
174
175 debugfs_create_u32("force_timeout_pid", S_IRUGO|S_IWUSR, de,
176 &host1x_debug_force_timeout_pid);
177 debugfs_create_u32("force_timeout_val", S_IRUGO|S_IWUSR, de,
178 &host1x_debug_force_timeout_val);
179 debugfs_create_u32("force_timeout_channel", S_IRUGO|S_IWUSR, de,
180 &host1x_debug_force_timeout_channel);
181}
182
183void host1x_debug_deinit(struct host1x *host1x)
184{
185 debugfs_remove_recursive(host1x->debugfs);
186}
187#else
188void host1x_debug_init(struct host1x *host1x)
189{
190}
191void host1x_debug_deinit(struct host1x *host1x)
192{
193}
194#endif
195
196void host1x_debug_dump(struct host1x *host1x)
197{
198 struct output o = {
199 .fn = write_to_printk
200 };
201 show_all(host1x, &o);
202}
203
204void host1x_debug_dump_syncpts(struct host1x *host1x)
205{
206 struct output o = {
207 .fn = write_to_printk
208 };
209 show_syncpts(host1x, &o);
210}
diff --git a/drivers/gpu/host1x/debug.h b/drivers/gpu/host1x/debug.h
new file mode 100644
index 000000000000..4595b2e0799f
--- /dev/null
+++ b/drivers/gpu/host1x/debug.h
@@ -0,0 +1,51 @@
1/*
2 * Tegra host1x Debug
3 *
4 * Copyright (c) 2011-2013 NVIDIA Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18#ifndef __HOST1X_DEBUG_H
19#define __HOST1X_DEBUG_H
20
21#include <linux/debugfs.h>
22#include <linux/seq_file.h>
23
24struct host1x;
25
26struct output {
27 void (*fn)(void *ctx, const char *str, size_t len);
28 void *ctx;
29 char buf[256];
30};
31
32static inline void write_to_seqfile(void *ctx, const char *str, size_t len)
33{
34 seq_write((struct seq_file *)ctx, str, len);
35}
36
37static inline void write_to_printk(void *ctx, const char *str, size_t len)
38{
39 pr_info("%s", str);
40}
41
42void __printf(2, 3) host1x_debug_output(struct output *o, const char *fmt, ...);
43
44extern unsigned int host1x_debug_trace_cmdbuf;
45
46void host1x_debug_init(struct host1x *host1x);
47void host1x_debug_deinit(struct host1x *host1x);
48void host1x_debug_dump(struct host1x *host1x);
49void host1x_debug_dump_syncpts(struct host1x *host1x);
50
51#endif
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
new file mode 100644
index 000000000000..28e28a23d444
--- /dev/null
+++ b/drivers/gpu/host1x/dev.c
@@ -0,0 +1,246 @@
1/*
2 * Tegra host1x driver
3 *
4 * Copyright (c) 2010-2013, NVIDIA Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#include <linux/module.h>
20#include <linux/list.h>
21#include <linux/slab.h>
22#include <linux/of.h>
23#include <linux/of_device.h>
24#include <linux/clk.h>
25#include <linux/io.h>
26
27#define CREATE_TRACE_POINTS
28#include <trace/events/host1x.h>
29
30#include "dev.h"
31#include "intr.h"
32#include "channel.h"
33#include "debug.h"
34#include "hw/host1x01.h"
35#include "host1x_client.h"
36
37void host1x_set_drm_data(struct device *dev, void *data)
38{
39 struct host1x *host1x = dev_get_drvdata(dev);
40 host1x->drm_data = data;
41}
42
43void *host1x_get_drm_data(struct device *dev)
44{
45 struct host1x *host1x = dev_get_drvdata(dev);
46 return host1x->drm_data;
47}
48
49void host1x_sync_writel(struct host1x *host1x, u32 v, u32 r)
50{
51 void __iomem *sync_regs = host1x->regs + host1x->info->sync_offset;
52
53 writel(v, sync_regs + r);
54}
55
56u32 host1x_sync_readl(struct host1x *host1x, u32 r)
57{
58 void __iomem *sync_regs = host1x->regs + host1x->info->sync_offset;
59
60 return readl(sync_regs + r);
61}
62
63void host1x_ch_writel(struct host1x_channel *ch, u32 v, u32 r)
64{
65 writel(v, ch->regs + r);
66}
67
68u32 host1x_ch_readl(struct host1x_channel *ch, u32 r)
69{
70 return readl(ch->regs + r);
71}
72
73static const struct host1x_info host1x01_info = {
74 .nb_channels = 8,
75 .nb_pts = 32,
76 .nb_mlocks = 16,
77 .nb_bases = 8,
78 .init = host1x01_init,
79 .sync_offset = 0x3000,
80};
81
82static struct of_device_id host1x_of_match[] = {
83 { .compatible = "nvidia,tegra30-host1x", .data = &host1x01_info, },
84 { .compatible = "nvidia,tegra20-host1x", .data = &host1x01_info, },
85 { },
86};
87MODULE_DEVICE_TABLE(of, host1x_of_match);
88
89static int host1x_probe(struct platform_device *pdev)
90{
91 const struct of_device_id *id;
92 struct host1x *host;
93 struct resource *regs;
94 int syncpt_irq;
95 int err;
96
97 id = of_match_device(host1x_of_match, &pdev->dev);
98 if (!id)
99 return -EINVAL;
100
101 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
102 if (!regs) {
103 dev_err(&pdev->dev, "failed to get registers\n");
104 return -ENXIO;
105 }
106
107 syncpt_irq = platform_get_irq(pdev, 0);
108 if (syncpt_irq < 0) {
109 dev_err(&pdev->dev, "failed to get IRQ\n");
110 return -ENXIO;
111 }
112
113 host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
114 if (!host)
115 return -ENOMEM;
116
117 host->dev = &pdev->dev;
118 host->info = id->data;
119
120 /* set common host1x device data */
121 platform_set_drvdata(pdev, host);
122
123 host->regs = devm_ioremap_resource(&pdev->dev, regs);
124 if (IS_ERR(host->regs))
125 return PTR_ERR(host->regs);
126
127 if (host->info->init) {
128 err = host->info->init(host);
129 if (err)
130 return err;
131 }
132
133 host->clk = devm_clk_get(&pdev->dev, NULL);
134 if (IS_ERR(host->clk)) {
135 dev_err(&pdev->dev, "failed to get clock\n");
136 err = PTR_ERR(host->clk);
137 return err;
138 }
139
140 err = host1x_channel_list_init(host);
141 if (err) {
142 dev_err(&pdev->dev, "failed to initialize channel list\n");
143 return err;
144 }
145
146 err = clk_prepare_enable(host->clk);
147 if (err < 0) {
148 dev_err(&pdev->dev, "failed to enable clock\n");
149 return err;
150 }
151
152 err = host1x_syncpt_init(host);
153 if (err) {
154 dev_err(&pdev->dev, "failed to initialize syncpts\n");
155 return err;
156 }
157
158 err = host1x_intr_init(host, syncpt_irq);
159 if (err) {
160 dev_err(&pdev->dev, "failed to initialize interrupts\n");
161 goto fail_deinit_syncpt;
162 }
163
164 host1x_debug_init(host);
165
166 host1x_drm_alloc(pdev);
167
168 return 0;
169
170fail_deinit_syncpt:
171 host1x_syncpt_deinit(host);
172 return err;
173}
174
175static int __exit host1x_remove(struct platform_device *pdev)
176{
177 struct host1x *host = platform_get_drvdata(pdev);
178
179 host1x_intr_deinit(host);
180 host1x_syncpt_deinit(host);
181 clk_disable_unprepare(host->clk);
182
183 return 0;
184}
185
186static struct platform_driver tegra_host1x_driver = {
187 .probe = host1x_probe,
188 .remove = __exit_p(host1x_remove),
189 .driver = {
190 .owner = THIS_MODULE,
191 .name = "tegra-host1x",
192 .of_match_table = host1x_of_match,
193 },
194};
195
196static int __init tegra_host1x_init(void)
197{
198 int err;
199
200 err = platform_driver_register(&tegra_host1x_driver);
201 if (err < 0)
202 return err;
203
204#ifdef CONFIG_DRM_TEGRA
205 err = platform_driver_register(&tegra_dc_driver);
206 if (err < 0)
207 goto unregister_host1x;
208
209 err = platform_driver_register(&tegra_hdmi_driver);
210 if (err < 0)
211 goto unregister_dc;
212
213 err = platform_driver_register(&tegra_gr2d_driver);
214 if (err < 0)
215 goto unregister_hdmi;
216#endif
217
218 return 0;
219
220#ifdef CONFIG_DRM_TEGRA
221unregister_hdmi:
222 platform_driver_unregister(&tegra_hdmi_driver);
223unregister_dc:
224 platform_driver_unregister(&tegra_dc_driver);
225unregister_host1x:
226 platform_driver_unregister(&tegra_host1x_driver);
227 return err;
228#endif
229}
230module_init(tegra_host1x_init);
231
232static void __exit tegra_host1x_exit(void)
233{
234#ifdef CONFIG_DRM_TEGRA
235 platform_driver_unregister(&tegra_gr2d_driver);
236 platform_driver_unregister(&tegra_hdmi_driver);
237 platform_driver_unregister(&tegra_dc_driver);
238#endif
239 platform_driver_unregister(&tegra_host1x_driver);
240}
241module_exit(tegra_host1x_exit);
242
243MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
244MODULE_AUTHOR("Terje Bergstrom <tbergstrom@nvidia.com>");
245MODULE_DESCRIPTION("Host1x driver for Tegra products");
246MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/host1x/dev.h b/drivers/gpu/host1x/dev.h
new file mode 100644
index 000000000000..a1607d6e135b
--- /dev/null
+++ b/drivers/gpu/host1x/dev.h
@@ -0,0 +1,308 @@
1/*
2 * Copyright (c) 2012-2013, NVIDIA Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#ifndef HOST1X_DEV_H
18#define HOST1X_DEV_H
19
20#include <linux/platform_device.h>
21#include <linux/device.h>
22
23#include "channel.h"
24#include "syncpt.h"
25#include "intr.h"
26#include "cdma.h"
27#include "job.h"
28
29struct host1x_syncpt;
30struct host1x_channel;
31struct host1x_cdma;
32struct host1x_job;
33struct push_buffer;
34struct output;
35struct dentry;
36
37struct host1x_channel_ops {
38 int (*init)(struct host1x_channel *channel, struct host1x *host,
39 unsigned int id);
40 int (*submit)(struct host1x_job *job);
41};
42
43struct host1x_cdma_ops {
44 void (*start)(struct host1x_cdma *cdma);
45 void (*stop)(struct host1x_cdma *cdma);
46 void (*flush)(struct host1x_cdma *cdma);
47 int (*timeout_init)(struct host1x_cdma *cdma, u32 syncpt_id);
48 void (*timeout_destroy)(struct host1x_cdma *cdma);
49 void (*freeze)(struct host1x_cdma *cdma);
50 void (*resume)(struct host1x_cdma *cdma, u32 getptr);
51 void (*timeout_cpu_incr)(struct host1x_cdma *cdma, u32 getptr,
52 u32 syncpt_incrs, u32 syncval, u32 nr_slots);
53};
54
55struct host1x_pushbuffer_ops {
56 void (*init)(struct push_buffer *pb);
57};
58
59struct host1x_debug_ops {
60 void (*debug_init)(struct dentry *de);
61 void (*show_channel_cdma)(struct host1x *host,
62 struct host1x_channel *ch,
63 struct output *o);
64 void (*show_channel_fifo)(struct host1x *host,
65 struct host1x_channel *ch,
66 struct output *o);
67 void (*show_mlocks)(struct host1x *host, struct output *output);
68
69};
70
71struct host1x_syncpt_ops {
72 void (*restore)(struct host1x_syncpt *syncpt);
73 void (*restore_wait_base)(struct host1x_syncpt *syncpt);
74 void (*load_wait_base)(struct host1x_syncpt *syncpt);
75 u32 (*load)(struct host1x_syncpt *syncpt);
76 void (*cpu_incr)(struct host1x_syncpt *syncpt);
77 int (*patch_wait)(struct host1x_syncpt *syncpt, void *patch_addr);
78};
79
80struct host1x_intr_ops {
81 int (*init_host_sync)(struct host1x *host, u32 cpm,
82 void (*syncpt_thresh_work)(struct work_struct *work));
83 void (*set_syncpt_threshold)(
84 struct host1x *host, u32 id, u32 thresh);
85 void (*enable_syncpt_intr)(struct host1x *host, u32 id);
86 void (*disable_syncpt_intr)(struct host1x *host, u32 id);
87 void (*disable_all_syncpt_intrs)(struct host1x *host);
88 int (*free_syncpt_irq)(struct host1x *host);
89};
90
91struct host1x_info {
92 int nb_channels; /* host1x: num channels supported */
93 int nb_pts; /* host1x: num syncpoints supported */
94 int nb_bases; /* host1x: num syncpoints supported */
95 int nb_mlocks; /* host1x: number of mlocks */
96 int (*init)(struct host1x *); /* initialize per SoC ops */
97 int sync_offset;
98};
99
100struct host1x {
101 const struct host1x_info *info;
102
103 void __iomem *regs;
104 struct host1x_syncpt *syncpt;
105 struct device *dev;
106 struct clk *clk;
107
108 struct mutex intr_mutex;
109 struct workqueue_struct *intr_wq;
110 int intr_syncpt_irq;
111
112 const struct host1x_syncpt_ops *syncpt_op;
113 const struct host1x_intr_ops *intr_op;
114 const struct host1x_channel_ops *channel_op;
115 const struct host1x_cdma_ops *cdma_op;
116 const struct host1x_pushbuffer_ops *cdma_pb_op;
117 const struct host1x_debug_ops *debug_op;
118
119 struct host1x_syncpt *nop_sp;
120
121 struct mutex chlist_mutex;
122 struct host1x_channel chlist;
123 unsigned long allocated_channels;
124 unsigned int num_allocated_channels;
125
126 struct dentry *debugfs;
127
128 void *drm_data;
129};
130
131void host1x_sync_writel(struct host1x *host1x, u32 r, u32 v);
132u32 host1x_sync_readl(struct host1x *host1x, u32 r);
133void host1x_ch_writel(struct host1x_channel *ch, u32 r, u32 v);
134u32 host1x_ch_readl(struct host1x_channel *ch, u32 r);
135
136static inline void host1x_hw_syncpt_restore(struct host1x *host,
137 struct host1x_syncpt *sp)
138{
139 host->syncpt_op->restore(sp);
140}
141
142static inline void host1x_hw_syncpt_restore_wait_base(struct host1x *host,
143 struct host1x_syncpt *sp)
144{
145 host->syncpt_op->restore_wait_base(sp);
146}
147
148static inline void host1x_hw_syncpt_load_wait_base(struct host1x *host,
149 struct host1x_syncpt *sp)
150{
151 host->syncpt_op->load_wait_base(sp);
152}
153
154static inline u32 host1x_hw_syncpt_load(struct host1x *host,
155 struct host1x_syncpt *sp)
156{
157 return host->syncpt_op->load(sp);
158}
159
160static inline void host1x_hw_syncpt_cpu_incr(struct host1x *host,
161 struct host1x_syncpt *sp)
162{
163 host->syncpt_op->cpu_incr(sp);
164}
165
166static inline int host1x_hw_syncpt_patch_wait(struct host1x *host,
167 struct host1x_syncpt *sp,
168 void *patch_addr)
169{
170 return host->syncpt_op->patch_wait(sp, patch_addr);
171}
172
173static inline int host1x_hw_intr_init_host_sync(struct host1x *host, u32 cpm,
174 void (*syncpt_thresh_work)(struct work_struct *))
175{
176 return host->intr_op->init_host_sync(host, cpm, syncpt_thresh_work);
177}
178
179static inline void host1x_hw_intr_set_syncpt_threshold(struct host1x *host,
180 u32 id, u32 thresh)
181{
182 host->intr_op->set_syncpt_threshold(host, id, thresh);
183}
184
185static inline void host1x_hw_intr_enable_syncpt_intr(struct host1x *host,
186 u32 id)
187{
188 host->intr_op->enable_syncpt_intr(host, id);
189}
190
191static inline void host1x_hw_intr_disable_syncpt_intr(struct host1x *host,
192 u32 id)
193{
194 host->intr_op->disable_syncpt_intr(host, id);
195}
196
197static inline void host1x_hw_intr_disable_all_syncpt_intrs(struct host1x *host)
198{
199 host->intr_op->disable_all_syncpt_intrs(host);
200}
201
202static inline int host1x_hw_intr_free_syncpt_irq(struct host1x *host)
203{
204 return host->intr_op->free_syncpt_irq(host);
205}
206
207static inline int host1x_hw_channel_init(struct host1x *host,
208 struct host1x_channel *channel,
209 int chid)
210{
211 return host->channel_op->init(channel, host, chid);
212}
213
214static inline int host1x_hw_channel_submit(struct host1x *host,
215 struct host1x_job *job)
216{
217 return host->channel_op->submit(job);
218}
219
220static inline void host1x_hw_cdma_start(struct host1x *host,
221 struct host1x_cdma *cdma)
222{
223 host->cdma_op->start(cdma);
224}
225
226static inline void host1x_hw_cdma_stop(struct host1x *host,
227 struct host1x_cdma *cdma)
228{
229 host->cdma_op->stop(cdma);
230}
231
232static inline void host1x_hw_cdma_flush(struct host1x *host,
233 struct host1x_cdma *cdma)
234{
235 host->cdma_op->flush(cdma);
236}
237
238static inline int host1x_hw_cdma_timeout_init(struct host1x *host,
239 struct host1x_cdma *cdma,
240 u32 syncpt_id)
241{
242 return host->cdma_op->timeout_init(cdma, syncpt_id);
243}
244
245static inline void host1x_hw_cdma_timeout_destroy(struct host1x *host,
246 struct host1x_cdma *cdma)
247{
248 host->cdma_op->timeout_destroy(cdma);
249}
250
251static inline void host1x_hw_cdma_freeze(struct host1x *host,
252 struct host1x_cdma *cdma)
253{
254 host->cdma_op->freeze(cdma);
255}
256
257static inline void host1x_hw_cdma_resume(struct host1x *host,
258 struct host1x_cdma *cdma, u32 getptr)
259{
260 host->cdma_op->resume(cdma, getptr);
261}
262
263static inline void host1x_hw_cdma_timeout_cpu_incr(struct host1x *host,
264 struct host1x_cdma *cdma,
265 u32 getptr,
266 u32 syncpt_incrs,
267 u32 syncval, u32 nr_slots)
268{
269 host->cdma_op->timeout_cpu_incr(cdma, getptr, syncpt_incrs, syncval,
270 nr_slots);
271}
272
273static inline void host1x_hw_pushbuffer_init(struct host1x *host,
274 struct push_buffer *pb)
275{
276 host->cdma_pb_op->init(pb);
277}
278
279static inline void host1x_hw_debug_init(struct host1x *host, struct dentry *de)
280{
281 if (host->debug_op && host->debug_op->debug_init)
282 host->debug_op->debug_init(de);
283}
284
285static inline void host1x_hw_show_channel_cdma(struct host1x *host,
286 struct host1x_channel *channel,
287 struct output *o)
288{
289 host->debug_op->show_channel_cdma(host, channel, o);
290}
291
292static inline void host1x_hw_show_channel_fifo(struct host1x *host,
293 struct host1x_channel *channel,
294 struct output *o)
295{
296 host->debug_op->show_channel_fifo(host, channel, o);
297}
298
299static inline void host1x_hw_show_mlocks(struct host1x *host, struct output *o)
300{
301 host->debug_op->show_mlocks(host, o);
302}
303
304extern struct platform_driver tegra_hdmi_driver;
305extern struct platform_driver tegra_dc_driver;
306extern struct platform_driver tegra_gr2d_driver;
307
308#endif
diff --git a/drivers/gpu/drm/tegra/Kconfig b/drivers/gpu/host1x/drm/Kconfig
index be1daf7344d3..69853a4de40a 100644
--- a/drivers/gpu/drm/tegra/Kconfig
+++ b/drivers/gpu/host1x/drm/Kconfig
@@ -1,12 +1,10 @@
1config DRM_TEGRA 1config DRM_TEGRA
2 tristate "NVIDIA Tegra DRM" 2 bool "NVIDIA Tegra DRM"
3 depends on DRM && OF && ARCH_TEGRA 3 depends on DRM
4 select DRM_KMS_HELPER 4 select DRM_KMS_HELPER
5 select DRM_GEM_CMA_HELPER 5 select FB_SYS_FILLRECT
6 select DRM_KMS_CMA_HELPER 6 select FB_SYS_COPYAREA
7 select FB_CFB_FILLRECT 7 select FB_SYS_IMAGEBLIT
8 select FB_CFB_COPYAREA
9 select FB_CFB_IMAGEBLIT
10 help 8 help
11 Choose this option if you have an NVIDIA Tegra SoC. 9 Choose this option if you have an NVIDIA Tegra SoC.
12 10
@@ -15,6 +13,14 @@ config DRM_TEGRA
15 13
16if DRM_TEGRA 14if DRM_TEGRA
17 15
16config DRM_TEGRA_STAGING
17 bool "Enable HOST1X interface"
18 depends on STAGING
19 help
20 Say yes if HOST1X should be available for userspace DRM users.
21
22 If unsure, choose N.
23
18config DRM_TEGRA_DEBUG 24config DRM_TEGRA_DEBUG
19 bool "NVIDIA Tegra DRM debug support" 25 bool "NVIDIA Tegra DRM debug support"
20 help 26 help
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/host1x/drm/dc.c
index de94707b9dbe..8c04943f82e3 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/host1x/drm/dc.c
@@ -14,8 +14,10 @@
14#include <linux/platform_device.h> 14#include <linux/platform_device.h>
15#include <linux/clk/tegra.h> 15#include <linux/clk/tegra.h>
16 16
17#include "drm.h" 17#include "host1x_client.h"
18#include "dc.h" 18#include "dc.h"
19#include "drm.h"
20#include "gem.h"
19 21
20struct tegra_plane { 22struct tegra_plane {
21 struct drm_plane base; 23 struct drm_plane base;
@@ -51,9 +53,9 @@ static int tegra_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
51 window.bits_per_pixel = fb->bits_per_pixel; 53 window.bits_per_pixel = fb->bits_per_pixel;
52 54
53 for (i = 0; i < drm_format_num_planes(fb->pixel_format); i++) { 55 for (i = 0; i < drm_format_num_planes(fb->pixel_format); i++) {
54 struct drm_gem_cma_object *gem = drm_fb_cma_get_gem_obj(fb, i); 56 struct tegra_bo *bo = tegra_fb_get_plane(fb, i);
55 57
56 window.base[i] = gem->paddr + fb->offsets[i]; 58 window.base[i] = bo->paddr + fb->offsets[i];
57 59
58 /* 60 /*
59 * Tegra doesn't support different strides for U and V planes 61 * Tegra doesn't support different strides for U and V planes
@@ -103,7 +105,9 @@ static const struct drm_plane_funcs tegra_plane_funcs = {
103}; 105};
104 106
105static const uint32_t plane_formats[] = { 107static const uint32_t plane_formats[] = {
108 DRM_FORMAT_XBGR8888,
106 DRM_FORMAT_XRGB8888, 109 DRM_FORMAT_XRGB8888,
110 DRM_FORMAT_RGB565,
107 DRM_FORMAT_UYVY, 111 DRM_FORMAT_UYVY,
108 DRM_FORMAT_YUV420, 112 DRM_FORMAT_YUV420,
109 DRM_FORMAT_YUV422, 113 DRM_FORMAT_YUV422,
@@ -136,7 +140,7 @@ static int tegra_dc_add_planes(struct drm_device *drm, struct tegra_dc *dc)
136static int tegra_dc_set_base(struct tegra_dc *dc, int x, int y, 140static int tegra_dc_set_base(struct tegra_dc *dc, int x, int y,
137 struct drm_framebuffer *fb) 141 struct drm_framebuffer *fb)
138{ 142{
139 struct drm_gem_cma_object *gem = drm_fb_cma_get_gem_obj(fb, 0); 143 struct tegra_bo *bo = tegra_fb_get_plane(fb, 0);
140 unsigned long value; 144 unsigned long value;
141 145
142 tegra_dc_writel(dc, WINDOW_A_SELECT, DC_CMD_DISPLAY_WINDOW_HEADER); 146 tegra_dc_writel(dc, WINDOW_A_SELECT, DC_CMD_DISPLAY_WINDOW_HEADER);
@@ -144,7 +148,7 @@ static int tegra_dc_set_base(struct tegra_dc *dc, int x, int y,
144 value = fb->offsets[0] + y * fb->pitches[0] + 148 value = fb->offsets[0] + y * fb->pitches[0] +
145 x * fb->bits_per_pixel / 8; 149 x * fb->bits_per_pixel / 8;
146 150
147 tegra_dc_writel(dc, gem->paddr + value, DC_WINBUF_START_ADDR); 151 tegra_dc_writel(dc, bo->paddr + value, DC_WINBUF_START_ADDR);
148 tegra_dc_writel(dc, fb->pitches[0], DC_WIN_LINE_STRIDE); 152 tegra_dc_writel(dc, fb->pitches[0], DC_WIN_LINE_STRIDE);
149 153
150 value = GENERAL_UPDATE | WIN_A_UPDATE; 154 value = GENERAL_UPDATE | WIN_A_UPDATE;
@@ -186,20 +190,20 @@ static void tegra_dc_finish_page_flip(struct tegra_dc *dc)
186{ 190{
187 struct drm_device *drm = dc->base.dev; 191 struct drm_device *drm = dc->base.dev;
188 struct drm_crtc *crtc = &dc->base; 192 struct drm_crtc *crtc = &dc->base;
189 struct drm_gem_cma_object *gem;
190 unsigned long flags, base; 193 unsigned long flags, base;
194 struct tegra_bo *bo;
191 195
192 if (!dc->event) 196 if (!dc->event)
193 return; 197 return;
194 198
195 gem = drm_fb_cma_get_gem_obj(crtc->fb, 0); 199 bo = tegra_fb_get_plane(crtc->fb, 0);
196 200
197 /* check if new start address has been latched */ 201 /* check if new start address has been latched */
198 tegra_dc_writel(dc, READ_MUX, DC_CMD_STATE_ACCESS); 202 tegra_dc_writel(dc, READ_MUX, DC_CMD_STATE_ACCESS);
199 base = tegra_dc_readl(dc, DC_WINBUF_START_ADDR); 203 base = tegra_dc_readl(dc, DC_WINBUF_START_ADDR);
200 tegra_dc_writel(dc, 0, DC_CMD_STATE_ACCESS); 204 tegra_dc_writel(dc, 0, DC_CMD_STATE_ACCESS);
201 205
202 if (base == gem->paddr + crtc->fb->offsets[0]) { 206 if (base == bo->paddr + crtc->fb->offsets[0]) {
203 spin_lock_irqsave(&drm->event_lock, flags); 207 spin_lock_irqsave(&drm->event_lock, flags);
204 drm_send_vblank_event(drm, dc->pipe, dc->event); 208 drm_send_vblank_event(drm, dc->pipe, dc->event);
205 drm_vblank_put(drm, dc->pipe); 209 drm_vblank_put(drm, dc->pipe);
@@ -541,6 +545,9 @@ int tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index,
541unsigned int tegra_dc_format(uint32_t format) 545unsigned int tegra_dc_format(uint32_t format)
542{ 546{
543 switch (format) { 547 switch (format) {
548 case DRM_FORMAT_XBGR8888:
549 return WIN_COLOR_DEPTH_R8G8B8A8;
550
544 case DRM_FORMAT_XRGB8888: 551 case DRM_FORMAT_XRGB8888:
545 return WIN_COLOR_DEPTH_B8G8R8A8; 552 return WIN_COLOR_DEPTH_B8G8R8A8;
546 553
@@ -569,7 +576,7 @@ static int tegra_crtc_mode_set(struct drm_crtc *crtc,
569 struct drm_display_mode *adjusted, 576 struct drm_display_mode *adjusted,
570 int x, int y, struct drm_framebuffer *old_fb) 577 int x, int y, struct drm_framebuffer *old_fb)
571{ 578{
572 struct drm_gem_cma_object *gem = drm_fb_cma_get_gem_obj(crtc->fb, 0); 579 struct tegra_bo *bo = tegra_fb_get_plane(crtc->fb, 0);
573 struct tegra_dc *dc = to_tegra_dc(crtc); 580 struct tegra_dc *dc = to_tegra_dc(crtc);
574 struct tegra_dc_window window; 581 struct tegra_dc_window window;
575 unsigned long div, value; 582 unsigned long div, value;
@@ -616,7 +623,7 @@ static int tegra_crtc_mode_set(struct drm_crtc *crtc,
616 window.format = tegra_dc_format(crtc->fb->pixel_format); 623 window.format = tegra_dc_format(crtc->fb->pixel_format);
617 window.bits_per_pixel = crtc->fb->bits_per_pixel; 624 window.bits_per_pixel = crtc->fb->bits_per_pixel;
618 window.stride[0] = crtc->fb->pitches[0]; 625 window.stride[0] = crtc->fb->pitches[0];
619 window.base[0] = gem->paddr; 626 window.base[0] = bo->paddr;
620 627
621 err = tegra_dc_setup_window(dc, 0, &window); 628 err = tegra_dc_setup_window(dc, 0, &window);
622 if (err < 0) 629 if (err < 0)
@@ -1097,7 +1104,7 @@ static const struct host1x_client_ops dc_client_ops = {
1097 1104
1098static int tegra_dc_probe(struct platform_device *pdev) 1105static int tegra_dc_probe(struct platform_device *pdev)
1099{ 1106{
1100 struct host1x *host1x = dev_get_drvdata(pdev->dev.parent); 1107 struct host1x_drm *host1x = host1x_get_drm_data(pdev->dev.parent);
1101 struct resource *regs; 1108 struct resource *regs;
1102 struct tegra_dc *dc; 1109 struct tegra_dc *dc;
1103 int err; 1110 int err;
@@ -1121,11 +1128,6 @@ static int tegra_dc_probe(struct platform_device *pdev)
1121 return err; 1128 return err;
1122 1129
1123 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1130 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1124 if (!regs) {
1125 dev_err(&pdev->dev, "failed to get registers\n");
1126 return -ENXIO;
1127 }
1128
1129 dc->regs = devm_ioremap_resource(&pdev->dev, regs); 1131 dc->regs = devm_ioremap_resource(&pdev->dev, regs);
1130 if (IS_ERR(dc->regs)) 1132 if (IS_ERR(dc->regs))
1131 return PTR_ERR(dc->regs); 1133 return PTR_ERR(dc->regs);
@@ -1160,7 +1162,7 @@ static int tegra_dc_probe(struct platform_device *pdev)
1160 1162
1161static int tegra_dc_remove(struct platform_device *pdev) 1163static int tegra_dc_remove(struct platform_device *pdev)
1162{ 1164{
1163 struct host1x *host1x = dev_get_drvdata(pdev->dev.parent); 1165 struct host1x_drm *host1x = host1x_get_drm_data(pdev->dev.parent);
1164 struct tegra_dc *dc = platform_get_drvdata(pdev); 1166 struct tegra_dc *dc = platform_get_drvdata(pdev);
1165 int err; 1167 int err;
1166 1168
diff --git a/drivers/gpu/drm/tegra/dc.h b/drivers/gpu/host1x/drm/dc.h
index 79eaec9aac77..79eaec9aac77 100644
--- a/drivers/gpu/drm/tegra/dc.h
+++ b/drivers/gpu/host1x/drm/dc.h
diff --git a/drivers/gpu/host1x/drm/drm.c b/drivers/gpu/host1x/drm/drm.c
new file mode 100644
index 000000000000..2b561c9118c6
--- /dev/null
+++ b/drivers/gpu/host1x/drm/drm.c
@@ -0,0 +1,640 @@
1/*
2 * Copyright (C) 2012 Avionic Design GmbH
3 * Copyright (C) 2012-2013 NVIDIA CORPORATION. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#include <linux/module.h>
11#include <linux/of_address.h>
12#include <linux/of_platform.h>
13
14#include <linux/dma-mapping.h>
15#include <asm/dma-iommu.h>
16
17#include <drm/drm.h>
18#include <drm/drmP.h>
19
20#include "host1x_client.h"
21#include "dev.h"
22#include "drm.h"
23#include "gem.h"
24#include "syncpt.h"
25
26#define DRIVER_NAME "tegra"
27#define DRIVER_DESC "NVIDIA Tegra graphics"
28#define DRIVER_DATE "20120330"
29#define DRIVER_MAJOR 0
30#define DRIVER_MINOR 0
31#define DRIVER_PATCHLEVEL 0
32
33struct host1x_drm_client {
34 struct host1x_client *client;
35 struct device_node *np;
36 struct list_head list;
37};
38
39static int host1x_add_drm_client(struct host1x_drm *host1x,
40 struct device_node *np)
41{
42 struct host1x_drm_client *client;
43
44 client = kzalloc(sizeof(*client), GFP_KERNEL);
45 if (!client)
46 return -ENOMEM;
47
48 INIT_LIST_HEAD(&client->list);
49 client->np = of_node_get(np);
50
51 list_add_tail(&client->list, &host1x->drm_clients);
52
53 return 0;
54}
55
56static int host1x_activate_drm_client(struct host1x_drm *host1x,
57 struct host1x_drm_client *drm,
58 struct host1x_client *client)
59{
60 mutex_lock(&host1x->drm_clients_lock);
61 list_del_init(&drm->list);
62 list_add_tail(&drm->list, &host1x->drm_active);
63 drm->client = client;
64 mutex_unlock(&host1x->drm_clients_lock);
65
66 return 0;
67}
68
69static int host1x_remove_drm_client(struct host1x_drm *host1x,
70 struct host1x_drm_client *client)
71{
72 mutex_lock(&host1x->drm_clients_lock);
73 list_del_init(&client->list);
74 mutex_unlock(&host1x->drm_clients_lock);
75
76 of_node_put(client->np);
77 kfree(client);
78
79 return 0;
80}
81
82static int host1x_parse_dt(struct host1x_drm *host1x)
83{
84 static const char * const compat[] = {
85 "nvidia,tegra20-dc",
86 "nvidia,tegra20-hdmi",
87 "nvidia,tegra20-gr2d",
88 "nvidia,tegra30-dc",
89 "nvidia,tegra30-hdmi",
90 "nvidia,tegra30-gr2d",
91 };
92 unsigned int i;
93 int err;
94
95 for (i = 0; i < ARRAY_SIZE(compat); i++) {
96 struct device_node *np;
97
98 for_each_child_of_node(host1x->dev->of_node, np) {
99 if (of_device_is_compatible(np, compat[i]) &&
100 of_device_is_available(np)) {
101 err = host1x_add_drm_client(host1x, np);
102 if (err < 0)
103 return err;
104 }
105 }
106 }
107
108 return 0;
109}
110
111int host1x_drm_alloc(struct platform_device *pdev)
112{
113 struct host1x_drm *host1x;
114 int err;
115
116 host1x = devm_kzalloc(&pdev->dev, sizeof(*host1x), GFP_KERNEL);
117 if (!host1x)
118 return -ENOMEM;
119
120 mutex_init(&host1x->drm_clients_lock);
121 INIT_LIST_HEAD(&host1x->drm_clients);
122 INIT_LIST_HEAD(&host1x->drm_active);
123 mutex_init(&host1x->clients_lock);
124 INIT_LIST_HEAD(&host1x->clients);
125 host1x->dev = &pdev->dev;
126
127 err = host1x_parse_dt(host1x);
128 if (err < 0) {
129 dev_err(&pdev->dev, "failed to parse DT: %d\n", err);
130 return err;
131 }
132
133 host1x_set_drm_data(&pdev->dev, host1x);
134
135 return 0;
136}
137
138int host1x_drm_init(struct host1x_drm *host1x, struct drm_device *drm)
139{
140 struct host1x_client *client;
141
142 mutex_lock(&host1x->clients_lock);
143
144 list_for_each_entry(client, &host1x->clients, list) {
145 if (client->ops && client->ops->drm_init) {
146 int err = client->ops->drm_init(client, drm);
147 if (err < 0) {
148 dev_err(host1x->dev,
149 "DRM setup failed for %s: %d\n",
150 dev_name(client->dev), err);
151 return err;
152 }
153 }
154 }
155
156 mutex_unlock(&host1x->clients_lock);
157
158 return 0;
159}
160
161int host1x_drm_exit(struct host1x_drm *host1x)
162{
163 struct platform_device *pdev = to_platform_device(host1x->dev);
164 struct host1x_client *client;
165
166 if (!host1x->drm)
167 return 0;
168
169 mutex_lock(&host1x->clients_lock);
170
171 list_for_each_entry_reverse(client, &host1x->clients, list) {
172 if (client->ops && client->ops->drm_exit) {
173 int err = client->ops->drm_exit(client);
174 if (err < 0) {
175 dev_err(host1x->dev,
176 "DRM cleanup failed for %s: %d\n",
177 dev_name(client->dev), err);
178 return err;
179 }
180 }
181 }
182
183 mutex_unlock(&host1x->clients_lock);
184
185 drm_platform_exit(&tegra_drm_driver, pdev);
186 host1x->drm = NULL;
187
188 return 0;
189}
190
191int host1x_register_client(struct host1x_drm *host1x,
192 struct host1x_client *client)
193{
194 struct host1x_drm_client *drm, *tmp;
195 int err;
196
197 mutex_lock(&host1x->clients_lock);
198 list_add_tail(&client->list, &host1x->clients);
199 mutex_unlock(&host1x->clients_lock);
200
201 list_for_each_entry_safe(drm, tmp, &host1x->drm_clients, list)
202 if (drm->np == client->dev->of_node)
203 host1x_activate_drm_client(host1x, drm, client);
204
205 if (list_empty(&host1x->drm_clients)) {
206 struct platform_device *pdev = to_platform_device(host1x->dev);
207
208 err = drm_platform_init(&tegra_drm_driver, pdev);
209 if (err < 0) {
210 dev_err(host1x->dev, "drm_platform_init(): %d\n", err);
211 return err;
212 }
213 }
214
215 return 0;
216}
217
218int host1x_unregister_client(struct host1x_drm *host1x,
219 struct host1x_client *client)
220{
221 struct host1x_drm_client *drm, *tmp;
222 int err;
223
224 list_for_each_entry_safe(drm, tmp, &host1x->drm_active, list) {
225 if (drm->client == client) {
226 err = host1x_drm_exit(host1x);
227 if (err < 0) {
228 dev_err(host1x->dev, "host1x_drm_exit(): %d\n",
229 err);
230 return err;
231 }
232
233 host1x_remove_drm_client(host1x, drm);
234 break;
235 }
236 }
237
238 mutex_lock(&host1x->clients_lock);
239 list_del_init(&client->list);
240 mutex_unlock(&host1x->clients_lock);
241
242 return 0;
243}
244
245static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
246{
247 struct host1x_drm *host1x;
248 int err;
249
250 host1x = host1x_get_drm_data(drm->dev);
251 drm->dev_private = host1x;
252 host1x->drm = drm;
253
254 drm_mode_config_init(drm);
255
256 err = host1x_drm_init(host1x, drm);
257 if (err < 0)
258 return err;
259
260 err = drm_vblank_init(drm, drm->mode_config.num_crtc);
261 if (err < 0)
262 return err;
263
264 err = tegra_drm_fb_init(drm);
265 if (err < 0)
266 return err;
267
268 drm_kms_helper_poll_init(drm);
269
270 return 0;
271}
272
273static int tegra_drm_unload(struct drm_device *drm)
274{
275 drm_kms_helper_poll_fini(drm);
276 tegra_drm_fb_exit(drm);
277
278 drm_mode_config_cleanup(drm);
279
280 return 0;
281}
282
283static int tegra_drm_open(struct drm_device *drm, struct drm_file *filp)
284{
285 struct host1x_drm_file *fpriv;
286
287 fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
288 if (!fpriv)
289 return -ENOMEM;
290
291 INIT_LIST_HEAD(&fpriv->contexts);
292 filp->driver_priv = fpriv;
293
294 return 0;
295}
296
297static void host1x_drm_context_free(struct host1x_drm_context *context)
298{
299 context->client->ops->close_channel(context);
300 kfree(context);
301}
302
303static void tegra_drm_lastclose(struct drm_device *drm)
304{
305 struct host1x_drm *host1x = drm->dev_private;
306
307 tegra_fbdev_restore_mode(host1x->fbdev);
308}
309
310#ifdef CONFIG_DRM_TEGRA_STAGING
311static bool host1x_drm_file_owns_context(struct host1x_drm_file *file,
312 struct host1x_drm_context *context)
313{
314 struct host1x_drm_context *ctx;
315
316 list_for_each_entry(ctx, &file->contexts, list)
317 if (ctx == context)
318 return true;
319
320 return false;
321}
322
323static int tegra_gem_create(struct drm_device *drm, void *data,
324 struct drm_file *file)
325{
326 struct drm_tegra_gem_create *args = data;
327 struct tegra_bo *bo;
328
329 bo = tegra_bo_create_with_handle(file, drm, args->size,
330 &args->handle);
331 if (IS_ERR(bo))
332 return PTR_ERR(bo);
333
334 return 0;
335}
336
337static int tegra_gem_mmap(struct drm_device *drm, void *data,
338 struct drm_file *file)
339{
340 struct drm_tegra_gem_mmap *args = data;
341 struct drm_gem_object *gem;
342 struct tegra_bo *bo;
343
344 gem = drm_gem_object_lookup(drm, file, args->handle);
345 if (!gem)
346 return -EINVAL;
347
348 bo = to_tegra_bo(gem);
349
350 args->offset = tegra_bo_get_mmap_offset(bo);
351
352 drm_gem_object_unreference(gem);
353
354 return 0;
355}
356
357static int tegra_syncpt_read(struct drm_device *drm, void *data,
358 struct drm_file *file)
359{
360 struct drm_tegra_syncpt_read *args = data;
361 struct host1x *host = dev_get_drvdata(drm->dev);
362 struct host1x_syncpt *sp = host1x_syncpt_get(host, args->id);
363
364 if (!sp)
365 return -EINVAL;
366
367 args->value = host1x_syncpt_read_min(sp);
368 return 0;
369}
370
371static int tegra_syncpt_incr(struct drm_device *drm, void *data,
372 struct drm_file *file)
373{
374 struct drm_tegra_syncpt_incr *args = data;
375 struct host1x *host = dev_get_drvdata(drm->dev);
376 struct host1x_syncpt *sp = host1x_syncpt_get(host, args->id);
377
378 if (!sp)
379 return -EINVAL;
380
381 host1x_syncpt_incr(sp);
382 return 0;
383}
384
385static int tegra_syncpt_wait(struct drm_device *drm, void *data,
386 struct drm_file *file)
387{
388 struct drm_tegra_syncpt_wait *args = data;
389 struct host1x *host = dev_get_drvdata(drm->dev);
390 struct host1x_syncpt *sp = host1x_syncpt_get(host, args->id);
391
392 if (!sp)
393 return -EINVAL;
394
395 return host1x_syncpt_wait(sp, args->thresh, args->timeout,
396 &args->value);
397}
398
399static int tegra_open_channel(struct drm_device *drm, void *data,
400 struct drm_file *file)
401{
402 struct drm_tegra_open_channel *args = data;
403 struct host1x_client *client;
404 struct host1x_drm_context *context;
405 struct host1x_drm_file *fpriv = file->driver_priv;
406 struct host1x_drm *host1x = drm->dev_private;
407 int err = -ENODEV;
408
409 context = kzalloc(sizeof(*context), GFP_KERNEL);
410 if (!context)
411 return -ENOMEM;
412
413 list_for_each_entry(client, &host1x->clients, list)
414 if (client->class == args->client) {
415 err = client->ops->open_channel(client, context);
416 if (err)
417 break;
418
419 context->client = client;
420 list_add(&context->list, &fpriv->contexts);
421 args->context = (uintptr_t)context;
422 return 0;
423 }
424
425 kfree(context);
426 return err;
427}
428
429static int tegra_close_channel(struct drm_device *drm, void *data,
430 struct drm_file *file)
431{
432 struct drm_tegra_close_channel *args = data;
433 struct host1x_drm_file *fpriv = file->driver_priv;
434 struct host1x_drm_context *context =
435 (struct host1x_drm_context *)(uintptr_t)args->context;
436
437 if (!host1x_drm_file_owns_context(fpriv, context))
438 return -EINVAL;
439
440 list_del(&context->list);
441 host1x_drm_context_free(context);
442
443 return 0;
444}
445
446static int tegra_get_syncpt(struct drm_device *drm, void *data,
447 struct drm_file *file)
448{
449 struct drm_tegra_get_syncpt *args = data;
450 struct host1x_drm_file *fpriv = file->driver_priv;
451 struct host1x_drm_context *context =
452 (struct host1x_drm_context *)(uintptr_t)args->context;
453 struct host1x_syncpt *syncpt;
454
455 if (!host1x_drm_file_owns_context(fpriv, context))
456 return -ENODEV;
457
458 if (args->index >= context->client->num_syncpts)
459 return -EINVAL;
460
461 syncpt = context->client->syncpts[args->index];
462 args->id = host1x_syncpt_id(syncpt);
463
464 return 0;
465}
466
467static int tegra_submit(struct drm_device *drm, void *data,
468 struct drm_file *file)
469{
470 struct drm_tegra_submit *args = data;
471 struct host1x_drm_file *fpriv = file->driver_priv;
472 struct host1x_drm_context *context =
473 (struct host1x_drm_context *)(uintptr_t)args->context;
474
475 if (!host1x_drm_file_owns_context(fpriv, context))
476 return -ENODEV;
477
478 return context->client->ops->submit(context, args, drm, file);
479}
480#endif
481
482static struct drm_ioctl_desc tegra_drm_ioctls[] = {
483#ifdef CONFIG_DRM_TEGRA_STAGING
484 DRM_IOCTL_DEF_DRV(TEGRA_GEM_CREATE, tegra_gem_create, DRM_UNLOCKED | DRM_AUTH),
485 DRM_IOCTL_DEF_DRV(TEGRA_GEM_MMAP, tegra_gem_mmap, DRM_UNLOCKED),
486 DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_READ, tegra_syncpt_read, DRM_UNLOCKED),
487 DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_INCR, tegra_syncpt_incr, DRM_UNLOCKED),
488 DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_WAIT, tegra_syncpt_wait, DRM_UNLOCKED),
489 DRM_IOCTL_DEF_DRV(TEGRA_OPEN_CHANNEL, tegra_open_channel, DRM_UNLOCKED),
490 DRM_IOCTL_DEF_DRV(TEGRA_CLOSE_CHANNEL, tegra_close_channel, DRM_UNLOCKED),
491 DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT, tegra_get_syncpt, DRM_UNLOCKED),
492 DRM_IOCTL_DEF_DRV(TEGRA_SUBMIT, tegra_submit, DRM_UNLOCKED),
493#endif
494};
495
496static const struct file_operations tegra_drm_fops = {
497 .owner = THIS_MODULE,
498 .open = drm_open,
499 .release = drm_release,
500 .unlocked_ioctl = drm_ioctl,
501 .mmap = tegra_drm_mmap,
502 .poll = drm_poll,
503 .fasync = drm_fasync,
504 .read = drm_read,
505#ifdef CONFIG_COMPAT
506 .compat_ioctl = drm_compat_ioctl,
507#endif
508 .llseek = noop_llseek,
509};
510
511static struct drm_crtc *tegra_crtc_from_pipe(struct drm_device *drm, int pipe)
512{
513 struct drm_crtc *crtc;
514
515 list_for_each_entry(crtc, &drm->mode_config.crtc_list, head) {
516 struct tegra_dc *dc = to_tegra_dc(crtc);
517
518 if (dc->pipe == pipe)
519 return crtc;
520 }
521
522 return NULL;
523}
524
525static u32 tegra_drm_get_vblank_counter(struct drm_device *dev, int crtc)
526{
527 /* TODO: implement real hardware counter using syncpoints */
528 return drm_vblank_count(dev, crtc);
529}
530
531static int tegra_drm_enable_vblank(struct drm_device *drm, int pipe)
532{
533 struct drm_crtc *crtc = tegra_crtc_from_pipe(drm, pipe);
534 struct tegra_dc *dc = to_tegra_dc(crtc);
535
536 if (!crtc)
537 return -ENODEV;
538
539 tegra_dc_enable_vblank(dc);
540
541 return 0;
542}
543
544static void tegra_drm_disable_vblank(struct drm_device *drm, int pipe)
545{
546 struct drm_crtc *crtc = tegra_crtc_from_pipe(drm, pipe);
547 struct tegra_dc *dc = to_tegra_dc(crtc);
548
549 if (crtc)
550 tegra_dc_disable_vblank(dc);
551}
552
553static void tegra_drm_preclose(struct drm_device *drm, struct drm_file *file)
554{
555 struct host1x_drm_file *fpriv = file->driver_priv;
556 struct host1x_drm_context *context, *tmp;
557 struct drm_crtc *crtc;
558
559 list_for_each_entry(crtc, &drm->mode_config.crtc_list, head)
560 tegra_dc_cancel_page_flip(crtc, file);
561
562 list_for_each_entry_safe(context, tmp, &fpriv->contexts, list)
563 host1x_drm_context_free(context);
564
565 kfree(fpriv);
566}
567
568#ifdef CONFIG_DEBUG_FS
569static int tegra_debugfs_framebuffers(struct seq_file *s, void *data)
570{
571 struct drm_info_node *node = (struct drm_info_node *)s->private;
572 struct drm_device *drm = node->minor->dev;
573 struct drm_framebuffer *fb;
574
575 mutex_lock(&drm->mode_config.fb_lock);
576
577 list_for_each_entry(fb, &drm->mode_config.fb_list, head) {
578 seq_printf(s, "%3d: user size: %d x %d, depth %d, %d bpp, refcount %d\n",
579 fb->base.id, fb->width, fb->height, fb->depth,
580 fb->bits_per_pixel,
581 atomic_read(&fb->refcount.refcount));
582 }
583
584 mutex_unlock(&drm->mode_config.fb_lock);
585
586 return 0;
587}
588
589static struct drm_info_list tegra_debugfs_list[] = {
590 { "framebuffers", tegra_debugfs_framebuffers, 0 },
591};
592
593static int tegra_debugfs_init(struct drm_minor *minor)
594{
595 return drm_debugfs_create_files(tegra_debugfs_list,
596 ARRAY_SIZE(tegra_debugfs_list),
597 minor->debugfs_root, minor);
598}
599
600static void tegra_debugfs_cleanup(struct drm_minor *minor)
601{
602 drm_debugfs_remove_files(tegra_debugfs_list,
603 ARRAY_SIZE(tegra_debugfs_list), minor);
604}
605#endif
606
607struct drm_driver tegra_drm_driver = {
608 .driver_features = DRIVER_BUS_PLATFORM | DRIVER_MODESET | DRIVER_GEM,
609 .load = tegra_drm_load,
610 .unload = tegra_drm_unload,
611 .open = tegra_drm_open,
612 .preclose = tegra_drm_preclose,
613 .lastclose = tegra_drm_lastclose,
614
615 .get_vblank_counter = tegra_drm_get_vblank_counter,
616 .enable_vblank = tegra_drm_enable_vblank,
617 .disable_vblank = tegra_drm_disable_vblank,
618
619#if defined(CONFIG_DEBUG_FS)
620 .debugfs_init = tegra_debugfs_init,
621 .debugfs_cleanup = tegra_debugfs_cleanup,
622#endif
623
624 .gem_free_object = tegra_bo_free_object,
625 .gem_vm_ops = &tegra_bo_vm_ops,
626 .dumb_create = tegra_bo_dumb_create,
627 .dumb_map_offset = tegra_bo_dumb_map_offset,
628 .dumb_destroy = tegra_bo_dumb_destroy,
629
630 .ioctls = tegra_drm_ioctls,
631 .num_ioctls = ARRAY_SIZE(tegra_drm_ioctls),
632 .fops = &tegra_drm_fops,
633
634 .name = DRIVER_NAME,
635 .desc = DRIVER_DESC,
636 .date = DRIVER_DATE,
637 .major = DRIVER_MAJOR,
638 .minor = DRIVER_MINOR,
639 .patchlevel = DRIVER_PATCHLEVEL,
640};
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/host1x/drm/drm.h
index 6dd75a2600eb..02ce020f2575 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/host1x/drm/drm.h
@@ -1,24 +1,36 @@
1/* 1/*
2 * Copyright (C) 2012 Avionic Design GmbH 2 * Copyright (C) 2012 Avionic Design GmbH
3 * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved. 3 * Copyright (C) 2012-2013 NVIDIA CORPORATION. All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as 6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation. 7 * published by the Free Software Foundation.
8 */ 8 */
9 9
10#ifndef TEGRA_DRM_H 10#ifndef HOST1X_DRM_H
11#define TEGRA_DRM_H 1 11#define HOST1X_DRM_H 1
12 12
13#include <drm/drmP.h> 13#include <drm/drmP.h>
14#include <drm/drm_crtc_helper.h> 14#include <drm/drm_crtc_helper.h>
15#include <drm/drm_edid.h> 15#include <drm/drm_edid.h>
16#include <drm/drm_fb_helper.h> 16#include <drm/drm_fb_helper.h>
17#include <drm/drm_gem_cma_helper.h>
18#include <drm/drm_fb_cma_helper.h>
19#include <drm/drm_fixed.h> 17#include <drm/drm_fixed.h>
18#include <uapi/drm/tegra_drm.h>
20 19
21struct host1x { 20#include "host1x.h"
21
22struct tegra_fb {
23 struct drm_framebuffer base;
24 struct tegra_bo **planes;
25 unsigned int num_planes;
26};
27
28struct tegra_fbdev {
29 struct drm_fb_helper base;
30 struct tegra_fb *fb;
31};
32
33struct host1x_drm {
22 struct drm_device *drm; 34 struct drm_device *drm;
23 struct device *dev; 35 struct device *dev;
24 void __iomem *regs; 36 void __iomem *regs;
@@ -33,31 +45,53 @@ struct host1x {
33 struct mutex clients_lock; 45 struct mutex clients_lock;
34 struct list_head clients; 46 struct list_head clients;
35 47
36 struct drm_fbdev_cma *fbdev; 48 struct tegra_fbdev *fbdev;
37}; 49};
38 50
39struct host1x_client; 51struct host1x_client;
40 52
53struct host1x_drm_context {
54 struct host1x_client *client;
55 struct host1x_channel *channel;
56 struct list_head list;
57};
58
41struct host1x_client_ops { 59struct host1x_client_ops {
42 int (*drm_init)(struct host1x_client *client, struct drm_device *drm); 60 int (*drm_init)(struct host1x_client *client, struct drm_device *drm);
43 int (*drm_exit)(struct host1x_client *client); 61 int (*drm_exit)(struct host1x_client *client);
62 int (*open_channel)(struct host1x_client *client,
63 struct host1x_drm_context *context);
64 void (*close_channel)(struct host1x_drm_context *context);
65 int (*submit)(struct host1x_drm_context *context,
66 struct drm_tegra_submit *args, struct drm_device *drm,
67 struct drm_file *file);
68};
69
70struct host1x_drm_file {
71 struct list_head contexts;
44}; 72};
45 73
46struct host1x_client { 74struct host1x_client {
47 struct host1x *host1x; 75 struct host1x_drm *host1x;
48 struct device *dev; 76 struct device *dev;
49 77
50 const struct host1x_client_ops *ops; 78 const struct host1x_client_ops *ops;
51 79
80 enum host1x_class class;
81 struct host1x_channel *channel;
82
83 struct host1x_syncpt **syncpts;
84 unsigned int num_syncpts;
85
52 struct list_head list; 86 struct list_head list;
53}; 87};
54 88
55extern int host1x_drm_init(struct host1x *host1x, struct drm_device *drm); 89extern int host1x_drm_init(struct host1x_drm *host1x, struct drm_device *drm);
56extern int host1x_drm_exit(struct host1x *host1x); 90extern int host1x_drm_exit(struct host1x_drm *host1x);
57 91
58extern int host1x_register_client(struct host1x *host1x, 92extern int host1x_register_client(struct host1x_drm *host1x,
59 struct host1x_client *client); 93 struct host1x_client *client);
60extern int host1x_unregister_client(struct host1x *host1x, 94extern int host1x_unregister_client(struct host1x_drm *host1x,
61 struct host1x_client *client); 95 struct host1x_client *client);
62 96
63struct tegra_output; 97struct tegra_output;
@@ -66,7 +100,7 @@ struct tegra_dc {
66 struct host1x_client client; 100 struct host1x_client client;
67 spinlock_t lock; 101 spinlock_t lock;
68 102
69 struct host1x *host1x; 103 struct host1x_drm *host1x;
70 struct device *dev; 104 struct device *dev;
71 105
72 struct drm_crtc base; 106 struct drm_crtc base;
@@ -226,12 +260,12 @@ extern int tegra_output_init(struct drm_device *drm, struct tegra_output *output
226extern int tegra_output_exit(struct tegra_output *output); 260extern int tegra_output_exit(struct tegra_output *output);
227 261
228/* from fb.c */ 262/* from fb.c */
263struct tegra_bo *tegra_fb_get_plane(struct drm_framebuffer *framebuffer,
264 unsigned int index);
229extern int tegra_drm_fb_init(struct drm_device *drm); 265extern int tegra_drm_fb_init(struct drm_device *drm);
230extern void tegra_drm_fb_exit(struct drm_device *drm); 266extern void tegra_drm_fb_exit(struct drm_device *drm);
267extern void tegra_fbdev_restore_mode(struct tegra_fbdev *fbdev);
231 268
232extern struct platform_driver tegra_host1x_driver;
233extern struct platform_driver tegra_hdmi_driver;
234extern struct platform_driver tegra_dc_driver;
235extern struct drm_driver tegra_drm_driver; 269extern struct drm_driver tegra_drm_driver;
236 270
237#endif /* TEGRA_DRM_H */ 271#endif /* HOST1X_DRM_H */
diff --git a/drivers/gpu/host1x/drm/fb.c b/drivers/gpu/host1x/drm/fb.c
new file mode 100644
index 000000000000..979a3e32b78b
--- /dev/null
+++ b/drivers/gpu/host1x/drm/fb.c
@@ -0,0 +1,374 @@
1/*
2 * Copyright (C) 2012-2013 Avionic Design GmbH
3 * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved.
4 *
5 * Based on the KMS/FB CMA helpers
6 * Copyright (C) 2012 Analog Device Inc.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/module.h>
14
15#include "drm.h"
16#include "gem.h"
17
18static inline struct tegra_fb *to_tegra_fb(struct drm_framebuffer *fb)
19{
20 return container_of(fb, struct tegra_fb, base);
21}
22
23static inline struct tegra_fbdev *to_tegra_fbdev(struct drm_fb_helper *helper)
24{
25 return container_of(helper, struct tegra_fbdev, base);
26}
27
28struct tegra_bo *tegra_fb_get_plane(struct drm_framebuffer *framebuffer,
29 unsigned int index)
30{
31 struct tegra_fb *fb = to_tegra_fb(framebuffer);
32
33 if (index >= drm_format_num_planes(framebuffer->pixel_format))
34 return NULL;
35
36 return fb->planes[index];
37}
38
39static void tegra_fb_destroy(struct drm_framebuffer *framebuffer)
40{
41 struct tegra_fb *fb = to_tegra_fb(framebuffer);
42 unsigned int i;
43
44 for (i = 0; i < fb->num_planes; i++) {
45 struct tegra_bo *bo = fb->planes[i];
46
47 if (bo)
48 drm_gem_object_unreference_unlocked(&bo->gem);
49 }
50
51 drm_framebuffer_cleanup(framebuffer);
52 kfree(fb->planes);
53 kfree(fb);
54}
55
56static int tegra_fb_create_handle(struct drm_framebuffer *framebuffer,
57 struct drm_file *file, unsigned int *handle)
58{
59 struct tegra_fb *fb = to_tegra_fb(framebuffer);
60
61 return drm_gem_handle_create(file, &fb->planes[0]->gem, handle);
62}
63
64static struct drm_framebuffer_funcs tegra_fb_funcs = {
65 .destroy = tegra_fb_destroy,
66 .create_handle = tegra_fb_create_handle,
67};
68
69static struct tegra_fb *tegra_fb_alloc(struct drm_device *drm,
70 struct drm_mode_fb_cmd2 *mode_cmd,
71 struct tegra_bo **planes,
72 unsigned int num_planes)
73{
74 struct tegra_fb *fb;
75 unsigned int i;
76 int err;
77
78 fb = kzalloc(sizeof(*fb), GFP_KERNEL);
79 if (!fb)
80 return ERR_PTR(-ENOMEM);
81
82 fb->planes = kzalloc(num_planes * sizeof(*planes), GFP_KERNEL);
83 if (!fb->planes)
84 return ERR_PTR(-ENOMEM);
85
86 fb->num_planes = num_planes;
87
88 drm_helper_mode_fill_fb_struct(&fb->base, mode_cmd);
89
90 for (i = 0; i < fb->num_planes; i++)
91 fb->planes[i] = planes[i];
92
93 err = drm_framebuffer_init(drm, &fb->base, &tegra_fb_funcs);
94 if (err < 0) {
95 dev_err(drm->dev, "failed to initialize framebuffer: %d\n",
96 err);
97 kfree(fb->planes);
98 kfree(fb);
99 return ERR_PTR(err);
100 }
101
102 return fb;
103}
104
105static struct drm_framebuffer *tegra_fb_create(struct drm_device *drm,
106 struct drm_file *file,
107 struct drm_mode_fb_cmd2 *cmd)
108{
109 unsigned int hsub, vsub, i;
110 struct tegra_bo *planes[4];
111 struct drm_gem_object *gem;
112 struct tegra_fb *fb;
113 int err;
114
115 hsub = drm_format_horz_chroma_subsampling(cmd->pixel_format);
116 vsub = drm_format_vert_chroma_subsampling(cmd->pixel_format);
117
118 for (i = 0; i < drm_format_num_planes(cmd->pixel_format); i++) {
119 unsigned int width = cmd->width / (i ? hsub : 1);
120 unsigned int height = cmd->height / (i ? vsub : 1);
121 unsigned int size, bpp;
122
123 gem = drm_gem_object_lookup(drm, file, cmd->handles[i]);
124 if (!gem) {
125 err = -ENXIO;
126 goto unreference;
127 }
128
129 bpp = drm_format_plane_cpp(cmd->pixel_format, i);
130
131 size = (height - 1) * cmd->pitches[i] +
132 width * bpp + cmd->offsets[i];
133
134 if (gem->size < size) {
135 err = -EINVAL;
136 goto unreference;
137 }
138
139 planes[i] = to_tegra_bo(gem);
140 }
141
142 fb = tegra_fb_alloc(drm, cmd, planes, i);
143 if (IS_ERR(fb)) {
144 err = PTR_ERR(fb);
145 goto unreference;
146 }
147
148 return &fb->base;
149
150unreference:
151 while (i--)
152 drm_gem_object_unreference_unlocked(&planes[i]->gem);
153
154 return ERR_PTR(err);
155}
156
157static struct fb_ops tegra_fb_ops = {
158 .owner = THIS_MODULE,
159 .fb_fillrect = sys_fillrect,
160 .fb_copyarea = sys_copyarea,
161 .fb_imageblit = sys_imageblit,
162 .fb_check_var = drm_fb_helper_check_var,
163 .fb_set_par = drm_fb_helper_set_par,
164 .fb_blank = drm_fb_helper_blank,
165 .fb_pan_display = drm_fb_helper_pan_display,
166 .fb_setcmap = drm_fb_helper_setcmap,
167};
168
169static int tegra_fbdev_probe(struct drm_fb_helper *helper,
170 struct drm_fb_helper_surface_size *sizes)
171{
172 struct tegra_fbdev *fbdev = to_tegra_fbdev(helper);
173 struct drm_device *drm = helper->dev;
174 struct drm_mode_fb_cmd2 cmd = { 0 };
175 unsigned int bytes_per_pixel;
176 struct drm_framebuffer *fb;
177 unsigned long offset;
178 struct fb_info *info;
179 struct tegra_bo *bo;
180 size_t size;
181 int err;
182
183 bytes_per_pixel = DIV_ROUND_UP(sizes->surface_bpp, 8);
184
185 cmd.width = sizes->surface_width;
186 cmd.height = sizes->surface_height;
187 cmd.pitches[0] = sizes->surface_width * bytes_per_pixel;
188 cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
189 sizes->surface_depth);
190
191 size = cmd.pitches[0] * cmd.height;
192
193 bo = tegra_bo_create(drm, size);
194 if (IS_ERR(bo))
195 return PTR_ERR(bo);
196
197 info = framebuffer_alloc(0, drm->dev);
198 if (!info) {
199 dev_err(drm->dev, "failed to allocate framebuffer info\n");
200 tegra_bo_free_object(&bo->gem);
201 return -ENOMEM;
202 }
203
204 fbdev->fb = tegra_fb_alloc(drm, &cmd, &bo, 1);
205 if (IS_ERR(fbdev->fb)) {
206 dev_err(drm->dev, "failed to allocate DRM framebuffer\n");
207 err = PTR_ERR(fbdev->fb);
208 goto release;
209 }
210
211 fb = &fbdev->fb->base;
212 helper->fb = fb;
213 helper->fbdev = info;
214
215 info->par = helper;
216 info->flags = FBINFO_FLAG_DEFAULT;
217 info->fbops = &tegra_fb_ops;
218
219 err = fb_alloc_cmap(&info->cmap, 256, 0);
220 if (err < 0) {
221 dev_err(drm->dev, "failed to allocate color map: %d\n", err);
222 goto destroy;
223 }
224
225 drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
226 drm_fb_helper_fill_var(info, helper, fb->width, fb->height);
227
228 offset = info->var.xoffset * bytes_per_pixel +
229 info->var.yoffset * fb->pitches[0];
230
231 drm->mode_config.fb_base = (resource_size_t)bo->paddr;
232 info->screen_base = bo->vaddr + offset;
233 info->screen_size = size;
234 info->fix.smem_start = (unsigned long)(bo->paddr + offset);
235 info->fix.smem_len = size;
236
237 return 0;
238
239destroy:
240 drm_framebuffer_unregister_private(fb);
241 tegra_fb_destroy(fb);
242release:
243 framebuffer_release(info);
244 return err;
245}
246
247static struct drm_fb_helper_funcs tegra_fb_helper_funcs = {
248 .fb_probe = tegra_fbdev_probe,
249};
250
251static struct tegra_fbdev *tegra_fbdev_create(struct drm_device *drm,
252 unsigned int preferred_bpp,
253 unsigned int num_crtc,
254 unsigned int max_connectors)
255{
256 struct drm_fb_helper *helper;
257 struct tegra_fbdev *fbdev;
258 int err;
259
260 fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL);
261 if (!fbdev) {
262 dev_err(drm->dev, "failed to allocate DRM fbdev\n");
263 return ERR_PTR(-ENOMEM);
264 }
265
266 fbdev->base.funcs = &tegra_fb_helper_funcs;
267 helper = &fbdev->base;
268
269 err = drm_fb_helper_init(drm, &fbdev->base, num_crtc, max_connectors);
270 if (err < 0) {
271 dev_err(drm->dev, "failed to initialize DRM FB helper\n");
272 goto free;
273 }
274
275 err = drm_fb_helper_single_add_all_connectors(&fbdev->base);
276 if (err < 0) {
277 dev_err(drm->dev, "failed to add connectors\n");
278 goto fini;
279 }
280
281 drm_helper_disable_unused_functions(drm);
282
283 err = drm_fb_helper_initial_config(&fbdev->base, preferred_bpp);
284 if (err < 0) {
285 dev_err(drm->dev, "failed to set initial configuration\n");
286 goto fini;
287 }
288
289 return fbdev;
290
291fini:
292 drm_fb_helper_fini(&fbdev->base);
293free:
294 kfree(fbdev);
295 return ERR_PTR(err);
296}
297
298static void tegra_fbdev_free(struct tegra_fbdev *fbdev)
299{
300 struct fb_info *info = fbdev->base.fbdev;
301
302 if (info) {
303 int err;
304
305 err = unregister_framebuffer(info);
306 if (err < 0)
307 DRM_DEBUG_KMS("failed to unregister framebuffer\n");
308
309 if (info->cmap.len)
310 fb_dealloc_cmap(&info->cmap);
311
312 framebuffer_release(info);
313 }
314
315 if (fbdev->fb) {
316 drm_framebuffer_unregister_private(&fbdev->fb->base);
317 tegra_fb_destroy(&fbdev->fb->base);
318 }
319
320 drm_fb_helper_fini(&fbdev->base);
321 kfree(fbdev);
322}
323
324static void tegra_fb_output_poll_changed(struct drm_device *drm)
325{
326 struct host1x_drm *host1x = drm->dev_private;
327
328 if (host1x->fbdev)
329 drm_fb_helper_hotplug_event(&host1x->fbdev->base);
330}
331
332static const struct drm_mode_config_funcs tegra_drm_mode_funcs = {
333 .fb_create = tegra_fb_create,
334 .output_poll_changed = tegra_fb_output_poll_changed,
335};
336
337int tegra_drm_fb_init(struct drm_device *drm)
338{
339 struct host1x_drm *host1x = drm->dev_private;
340 struct tegra_fbdev *fbdev;
341
342 drm->mode_config.min_width = 0;
343 drm->mode_config.min_height = 0;
344
345 drm->mode_config.max_width = 4096;
346 drm->mode_config.max_height = 4096;
347
348 drm->mode_config.funcs = &tegra_drm_mode_funcs;
349
350 fbdev = tegra_fbdev_create(drm, 32, drm->mode_config.num_crtc,
351 drm->mode_config.num_connector);
352 if (IS_ERR(fbdev))
353 return PTR_ERR(fbdev);
354
355 host1x->fbdev = fbdev;
356
357 return 0;
358}
359
360void tegra_drm_fb_exit(struct drm_device *drm)
361{
362 struct host1x_drm *host1x = drm->dev_private;
363
364 tegra_fbdev_free(host1x->fbdev);
365}
366
367void tegra_fbdev_restore_mode(struct tegra_fbdev *fbdev)
368{
369 if (fbdev) {
370 drm_modeset_lock_all(fbdev->base.dev);
371 drm_fb_helper_restore_fbdev_mode(&fbdev->base);
372 drm_modeset_unlock_all(fbdev->base.dev);
373 }
374}
diff --git a/drivers/gpu/host1x/drm/gem.c b/drivers/gpu/host1x/drm/gem.c
new file mode 100644
index 000000000000..c5e9a9b494c2
--- /dev/null
+++ b/drivers/gpu/host1x/drm/gem.c
@@ -0,0 +1,270 @@
1/*
2 * NVIDIA Tegra DRM GEM helper functions
3 *
4 * Copyright (C) 2012 Sascha Hauer, Pengutronix
5 * Copyright (C) 2013 NVIDIA CORPORATION, All rights reserved.
6 *
7 * Based on the GEM/CMA helpers
8 *
9 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version 2
14 * of the License, or (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 */
20
21#include <linux/mm.h>
22#include <linux/slab.h>
23#include <linux/mutex.h>
24#include <linux/export.h>
25#include <linux/dma-mapping.h>
26
27#include <drm/drmP.h>
28#include <drm/drm.h>
29
30#include "gem.h"
31
32static inline struct tegra_bo *host1x_to_drm_bo(struct host1x_bo *bo)
33{
34 return container_of(bo, struct tegra_bo, base);
35}
36
37static void tegra_bo_put(struct host1x_bo *bo)
38{
39 struct tegra_bo *obj = host1x_to_drm_bo(bo);
40 struct drm_device *drm = obj->gem.dev;
41
42 mutex_lock(&drm->struct_mutex);
43 drm_gem_object_unreference(&obj->gem);
44 mutex_unlock(&drm->struct_mutex);
45}
46
47static dma_addr_t tegra_bo_pin(struct host1x_bo *bo, struct sg_table **sgt)
48{
49 struct tegra_bo *obj = host1x_to_drm_bo(bo);
50
51 return obj->paddr;
52}
53
54static void tegra_bo_unpin(struct host1x_bo *bo, struct sg_table *sgt)
55{
56}
57
58static void *tegra_bo_mmap(struct host1x_bo *bo)
59{
60 struct tegra_bo *obj = host1x_to_drm_bo(bo);
61
62 return obj->vaddr;
63}
64
65static void tegra_bo_munmap(struct host1x_bo *bo, void *addr)
66{
67}
68
69static void *tegra_bo_kmap(struct host1x_bo *bo, unsigned int page)
70{
71 struct tegra_bo *obj = host1x_to_drm_bo(bo);
72
73 return obj->vaddr + page * PAGE_SIZE;
74}
75
76static void tegra_bo_kunmap(struct host1x_bo *bo, unsigned int page,
77 void *addr)
78{
79}
80
81static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo)
82{
83 struct tegra_bo *obj = host1x_to_drm_bo(bo);
84 struct drm_device *drm = obj->gem.dev;
85
86 mutex_lock(&drm->struct_mutex);
87 drm_gem_object_reference(&obj->gem);
88 mutex_unlock(&drm->struct_mutex);
89
90 return bo;
91}
92
93const struct host1x_bo_ops tegra_bo_ops = {
94 .get = tegra_bo_get,
95 .put = tegra_bo_put,
96 .pin = tegra_bo_pin,
97 .unpin = tegra_bo_unpin,
98 .mmap = tegra_bo_mmap,
99 .munmap = tegra_bo_munmap,
100 .kmap = tegra_bo_kmap,
101 .kunmap = tegra_bo_kunmap,
102};
103
104static void tegra_bo_destroy(struct drm_device *drm, struct tegra_bo *bo)
105{
106 dma_free_writecombine(drm->dev, bo->gem.size, bo->vaddr, bo->paddr);
107}
108
109unsigned int tegra_bo_get_mmap_offset(struct tegra_bo *bo)
110{
111 return (unsigned int)bo->gem.map_list.hash.key << PAGE_SHIFT;
112}
113
114struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size)
115{
116 struct tegra_bo *bo;
117 int err;
118
119 bo = kzalloc(sizeof(*bo), GFP_KERNEL);
120 if (!bo)
121 return ERR_PTR(-ENOMEM);
122
123 host1x_bo_init(&bo->base, &tegra_bo_ops);
124 size = round_up(size, PAGE_SIZE);
125
126 bo->vaddr = dma_alloc_writecombine(drm->dev, size, &bo->paddr,
127 GFP_KERNEL | __GFP_NOWARN);
128 if (!bo->vaddr) {
129 dev_err(drm->dev, "failed to allocate buffer with size %u\n",
130 size);
131 err = -ENOMEM;
132 goto err_dma;
133 }
134
135 err = drm_gem_object_init(drm, &bo->gem, size);
136 if (err)
137 goto err_init;
138
139 err = drm_gem_create_mmap_offset(&bo->gem);
140 if (err)
141 goto err_mmap;
142
143 return bo;
144
145err_mmap:
146 drm_gem_object_release(&bo->gem);
147err_init:
148 tegra_bo_destroy(drm, bo);
149err_dma:
150 kfree(bo);
151
152 return ERR_PTR(err);
153
154}
155
156struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
157 struct drm_device *drm,
158 unsigned int size,
159 unsigned int *handle)
160{
161 struct tegra_bo *bo;
162 int ret;
163
164 bo = tegra_bo_create(drm, size);
165 if (IS_ERR(bo))
166 return bo;
167
168 ret = drm_gem_handle_create(file, &bo->gem, handle);
169 if (ret)
170 goto err;
171
172 drm_gem_object_unreference_unlocked(&bo->gem);
173
174 return bo;
175
176err:
177 tegra_bo_free_object(&bo->gem);
178 return ERR_PTR(ret);
179}
180
181void tegra_bo_free_object(struct drm_gem_object *gem)
182{
183 struct tegra_bo *bo = to_tegra_bo(gem);
184
185 if (gem->map_list.map)
186 drm_gem_free_mmap_offset(gem);
187
188 drm_gem_object_release(gem);
189 tegra_bo_destroy(gem->dev, bo);
190
191 kfree(bo);
192}
193
194int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
195 struct drm_mode_create_dumb *args)
196{
197 int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
198 struct tegra_bo *bo;
199
200 if (args->pitch < min_pitch)
201 args->pitch = min_pitch;
202
203 if (args->size < args->pitch * args->height)
204 args->size = args->pitch * args->height;
205
206 bo = tegra_bo_create_with_handle(file, drm, args->size,
207 &args->handle);
208 if (IS_ERR(bo))
209 return PTR_ERR(bo);
210
211 return 0;
212}
213
214int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm,
215 uint32_t handle, uint64_t *offset)
216{
217 struct drm_gem_object *gem;
218 struct tegra_bo *bo;
219
220 mutex_lock(&drm->struct_mutex);
221
222 gem = drm_gem_object_lookup(drm, file, handle);
223 if (!gem) {
224 dev_err(drm->dev, "failed to lookup GEM object\n");
225 mutex_unlock(&drm->struct_mutex);
226 return -EINVAL;
227 }
228
229 bo = to_tegra_bo(gem);
230
231 *offset = tegra_bo_get_mmap_offset(bo);
232
233 drm_gem_object_unreference(gem);
234
235 mutex_unlock(&drm->struct_mutex);
236
237 return 0;
238}
239
240const struct vm_operations_struct tegra_bo_vm_ops = {
241 .open = drm_gem_vm_open,
242 .close = drm_gem_vm_close,
243};
244
245int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma)
246{
247 struct drm_gem_object *gem;
248 struct tegra_bo *bo;
249 int ret;
250
251 ret = drm_gem_mmap(file, vma);
252 if (ret)
253 return ret;
254
255 gem = vma->vm_private_data;
256 bo = to_tegra_bo(gem);
257
258 ret = remap_pfn_range(vma, vma->vm_start, bo->paddr >> PAGE_SHIFT,
259 vma->vm_end - vma->vm_start, vma->vm_page_prot);
260 if (ret)
261 drm_gem_vm_close(vma);
262
263 return ret;
264}
265
266int tegra_bo_dumb_destroy(struct drm_file *file, struct drm_device *drm,
267 unsigned int handle)
268{
269 return drm_gem_handle_delete(file, handle);
270}
diff --git a/drivers/gpu/host1x/drm/gem.h b/drivers/gpu/host1x/drm/gem.h
new file mode 100644
index 000000000000..34de2b486eb7
--- /dev/null
+++ b/drivers/gpu/host1x/drm/gem.h
@@ -0,0 +1,59 @@
1/*
2 * Tegra host1x GEM implementation
3 *
4 * Copyright (c) 2012-2013, NVIDIA Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#ifndef __HOST1X_GEM_H
20#define __HOST1X_GEM_H
21
22#include <drm/drm.h>
23#include <drm/drmP.h>
24
25#include "host1x_bo.h"
26
27struct tegra_bo {
28 struct drm_gem_object gem;
29 struct host1x_bo base;
30 dma_addr_t paddr;
31 void *vaddr;
32};
33
34static inline struct tegra_bo *to_tegra_bo(struct drm_gem_object *gem)
35{
36 return container_of(gem, struct tegra_bo, gem);
37}
38
39extern const struct host1x_bo_ops tegra_bo_ops;
40
41struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size);
42struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
43 struct drm_device *drm,
44 unsigned int size,
45 unsigned int *handle);
46void tegra_bo_free_object(struct drm_gem_object *gem);
47unsigned int tegra_bo_get_mmap_offset(struct tegra_bo *bo);
48int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
49 struct drm_mode_create_dumb *args);
50int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm,
51 uint32_t handle, uint64_t *offset);
52int tegra_bo_dumb_destroy(struct drm_file *file, struct drm_device *drm,
53 unsigned int handle);
54
55int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma);
56
57extern const struct vm_operations_struct tegra_bo_vm_ops;
58
59#endif
diff --git a/drivers/gpu/host1x/drm/gr2d.c b/drivers/gpu/host1x/drm/gr2d.c
new file mode 100644
index 000000000000..6a45ae090ee7
--- /dev/null
+++ b/drivers/gpu/host1x/drm/gr2d.c
@@ -0,0 +1,339 @@
1/*
2 * drivers/video/tegra/host/gr2d/gr2d.c
3 *
4 * Tegra Graphics 2D
5 *
6 * Copyright (c) 2012-2013, NVIDIA Corporation.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 */
20
21#include <linux/export.h>
22#include <linux/of.h>
23#include <linux/of_device.h>
24#include <linux/clk.h>
25
26#include "channel.h"
27#include "drm.h"
28#include "gem.h"
29#include "job.h"
30#include "host1x.h"
31#include "host1x_bo.h"
32#include "host1x_client.h"
33#include "syncpt.h"
34
35struct gr2d {
36 struct host1x_client client;
37 struct clk *clk;
38 struct host1x_channel *channel;
39 unsigned long *addr_regs;
40};
41
42static inline struct gr2d *to_gr2d(struct host1x_client *client)
43{
44 return container_of(client, struct gr2d, client);
45}
46
47static int gr2d_is_addr_reg(struct device *dev, u32 class, u32 reg);
48
49static int gr2d_client_init(struct host1x_client *client,
50 struct drm_device *drm)
51{
52 return 0;
53}
54
55static int gr2d_client_exit(struct host1x_client *client)
56{
57 return 0;
58}
59
60static int gr2d_open_channel(struct host1x_client *client,
61 struct host1x_drm_context *context)
62{
63 struct gr2d *gr2d = to_gr2d(client);
64
65 context->channel = host1x_channel_get(gr2d->channel);
66
67 if (!context->channel)
68 return -ENOMEM;
69
70 return 0;
71}
72
73static void gr2d_close_channel(struct host1x_drm_context *context)
74{
75 host1x_channel_put(context->channel);
76}
77
78static struct host1x_bo *host1x_bo_lookup(struct drm_device *drm,
79 struct drm_file *file,
80 u32 handle)
81{
82 struct drm_gem_object *gem;
83 struct tegra_bo *bo;
84
85 gem = drm_gem_object_lookup(drm, file, handle);
86 if (!gem)
87 return 0;
88
89 mutex_lock(&drm->struct_mutex);
90 drm_gem_object_unreference(gem);
91 mutex_unlock(&drm->struct_mutex);
92
93 bo = to_tegra_bo(gem);
94 return &bo->base;
95}
96
97static int gr2d_submit(struct host1x_drm_context *context,
98 struct drm_tegra_submit *args, struct drm_device *drm,
99 struct drm_file *file)
100{
101 struct host1x_job *job;
102 unsigned int num_cmdbufs = args->num_cmdbufs;
103 unsigned int num_relocs = args->num_relocs;
104 unsigned int num_waitchks = args->num_waitchks;
105 struct drm_tegra_cmdbuf __user *cmdbufs =
106 (void * __user)(uintptr_t)args->cmdbufs;
107 struct drm_tegra_reloc __user *relocs =
108 (void * __user)(uintptr_t)args->relocs;
109 struct drm_tegra_waitchk __user *waitchks =
110 (void * __user)(uintptr_t)args->waitchks;
111 struct drm_tegra_syncpt syncpt;
112 int err;
113
114 /* We don't yet support other than one syncpt_incr struct per submit */
115 if (args->num_syncpts != 1)
116 return -EINVAL;
117
118 job = host1x_job_alloc(context->channel, args->num_cmdbufs,
119 args->num_relocs, args->num_waitchks);
120 if (!job)
121 return -ENOMEM;
122
123 job->num_relocs = args->num_relocs;
124 job->num_waitchk = args->num_waitchks;
125 job->client = (u32)args->context;
126 job->class = context->client->class;
127 job->serialize = true;
128
129 while (num_cmdbufs) {
130 struct drm_tegra_cmdbuf cmdbuf;
131 struct host1x_bo *bo;
132
133 err = copy_from_user(&cmdbuf, cmdbufs, sizeof(cmdbuf));
134 if (err)
135 goto fail;
136
137 bo = host1x_bo_lookup(drm, file, cmdbuf.handle);
138 if (!bo)
139 goto fail;
140
141 host1x_job_add_gather(job, bo, cmdbuf.words, cmdbuf.offset);
142 num_cmdbufs--;
143 cmdbufs++;
144 }
145
146 err = copy_from_user(job->relocarray, relocs,
147 sizeof(*relocs) * num_relocs);
148 if (err)
149 goto fail;
150
151 while (num_relocs--) {
152 struct host1x_reloc *reloc = &job->relocarray[num_relocs];
153 struct host1x_bo *cmdbuf, *target;
154
155 cmdbuf = host1x_bo_lookup(drm, file, (u32)reloc->cmdbuf);
156 target = host1x_bo_lookup(drm, file, (u32)reloc->target);
157
158 reloc->cmdbuf = cmdbuf;
159 reloc->target = target;
160
161 if (!reloc->target || !reloc->cmdbuf)
162 goto fail;
163 }
164
165 err = copy_from_user(job->waitchk, waitchks,
166 sizeof(*waitchks) * num_waitchks);
167 if (err)
168 goto fail;
169
170 err = copy_from_user(&syncpt, (void * __user)(uintptr_t)args->syncpts,
171 sizeof(syncpt));
172 if (err)
173 goto fail;
174
175 job->syncpt_id = syncpt.id;
176 job->syncpt_incrs = syncpt.incrs;
177 job->timeout = 10000;
178 job->is_addr_reg = gr2d_is_addr_reg;
179
180 if (args->timeout && args->timeout < 10000)
181 job->timeout = args->timeout;
182
183 err = host1x_job_pin(job, context->client->dev);
184 if (err)
185 goto fail;
186
187 err = host1x_job_submit(job);
188 if (err)
189 goto fail_submit;
190
191 args->fence = job->syncpt_end;
192
193 host1x_job_put(job);
194 return 0;
195
196fail_submit:
197 host1x_job_unpin(job);
198fail:
199 host1x_job_put(job);
200 return err;
201}
202
203static struct host1x_client_ops gr2d_client_ops = {
204 .drm_init = gr2d_client_init,
205 .drm_exit = gr2d_client_exit,
206 .open_channel = gr2d_open_channel,
207 .close_channel = gr2d_close_channel,
208 .submit = gr2d_submit,
209};
210
211static void gr2d_init_addr_reg_map(struct device *dev, struct gr2d *gr2d)
212{
213 const u32 gr2d_addr_regs[] = {0x1a, 0x1b, 0x26, 0x2b, 0x2c, 0x2d, 0x31,
214 0x32, 0x48, 0x49, 0x4a, 0x4b, 0x4c};
215 unsigned long *bitmap;
216 int i;
217
218 bitmap = devm_kzalloc(dev, DIV_ROUND_UP(256, BITS_PER_BYTE),
219 GFP_KERNEL);
220
221 for (i = 0; i < ARRAY_SIZE(gr2d_addr_regs); ++i) {
222 u32 reg = gr2d_addr_regs[i];
223 bitmap[BIT_WORD(reg)] |= BIT_MASK(reg);
224 }
225
226 gr2d->addr_regs = bitmap;
227}
228
229static int gr2d_is_addr_reg(struct device *dev, u32 class, u32 reg)
230{
231 struct gr2d *gr2d = dev_get_drvdata(dev);
232
233 switch (class) {
234 case HOST1X_CLASS_HOST1X:
235 return reg == 0x2b;
236 case HOST1X_CLASS_GR2D:
237 case HOST1X_CLASS_GR2D_SB:
238 reg &= 0xff;
239 if (gr2d->addr_regs[BIT_WORD(reg)] & BIT_MASK(reg))
240 return 1;
241 default:
242 return 0;
243 }
244}
245
246static const struct of_device_id gr2d_match[] = {
247 { .compatible = "nvidia,tegra30-gr2d" },
248 { .compatible = "nvidia,tegra20-gr2d" },
249 { },
250};
251
252static int gr2d_probe(struct platform_device *pdev)
253{
254 struct device *dev = &pdev->dev;
255 struct host1x_drm *host1x = host1x_get_drm_data(dev->parent);
256 int err;
257 struct gr2d *gr2d = NULL;
258 struct host1x_syncpt **syncpts;
259
260 gr2d = devm_kzalloc(dev, sizeof(*gr2d), GFP_KERNEL);
261 if (!gr2d)
262 return -ENOMEM;
263
264 syncpts = devm_kzalloc(dev, sizeof(*syncpts), GFP_KERNEL);
265 if (!syncpts)
266 return -ENOMEM;
267
268 gr2d->clk = devm_clk_get(dev, NULL);
269 if (IS_ERR(gr2d->clk)) {
270 dev_err(dev, "cannot get clock\n");
271 return PTR_ERR(gr2d->clk);
272 }
273
274 err = clk_prepare_enable(gr2d->clk);
275 if (err) {
276 dev_err(dev, "cannot turn on clock\n");
277 return err;
278 }
279
280 gr2d->channel = host1x_channel_request(dev);
281 if (!gr2d->channel)
282 return -ENOMEM;
283
284 *syncpts = host1x_syncpt_request(dev, 0);
285 if (!(*syncpts)) {
286 host1x_channel_free(gr2d->channel);
287 return -ENOMEM;
288 }
289
290 gr2d->client.ops = &gr2d_client_ops;
291 gr2d->client.dev = dev;
292 gr2d->client.class = HOST1X_CLASS_GR2D;
293 gr2d->client.syncpts = syncpts;
294 gr2d->client.num_syncpts = 1;
295
296 err = host1x_register_client(host1x, &gr2d->client);
297 if (err < 0) {
298 dev_err(dev, "failed to register host1x client: %d\n", err);
299 return err;
300 }
301
302 gr2d_init_addr_reg_map(dev, gr2d);
303
304 platform_set_drvdata(pdev, gr2d);
305
306 return 0;
307}
308
309static int __exit gr2d_remove(struct platform_device *pdev)
310{
311 struct host1x_drm *host1x = host1x_get_drm_data(pdev->dev.parent);
312 struct gr2d *gr2d = platform_get_drvdata(pdev);
313 unsigned int i;
314 int err;
315
316 err = host1x_unregister_client(host1x, &gr2d->client);
317 if (err < 0) {
318 dev_err(&pdev->dev, "failed to unregister client: %d\n", err);
319 return err;
320 }
321
322 for (i = 0; i < gr2d->client.num_syncpts; i++)
323 host1x_syncpt_free(gr2d->client.syncpts[i]);
324
325 host1x_channel_free(gr2d->channel);
326 clk_disable_unprepare(gr2d->clk);
327
328 return 0;
329}
330
331struct platform_driver tegra_gr2d_driver = {
332 .probe = gr2d_probe,
333 .remove = __exit_p(gr2d_remove),
334 .driver = {
335 .owner = THIS_MODULE,
336 .name = "gr2d",
337 .of_match_table = gr2d_match,
338 }
339};
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/host1x/drm/hdmi.c
index bb747f6cd1a4..01097da09f7f 100644
--- a/drivers/gpu/drm/tegra/hdmi.c
+++ b/drivers/gpu/host1x/drm/hdmi.c
@@ -22,6 +22,7 @@
22#include "hdmi.h" 22#include "hdmi.h"
23#include "drm.h" 23#include "drm.h"
24#include "dc.h" 24#include "dc.h"
25#include "host1x_client.h"
25 26
26struct tegra_hdmi { 27struct tegra_hdmi {
27 struct host1x_client client; 28 struct host1x_client client;
@@ -1189,7 +1190,7 @@ static const struct host1x_client_ops hdmi_client_ops = {
1189 1190
1190static int tegra_hdmi_probe(struct platform_device *pdev) 1191static int tegra_hdmi_probe(struct platform_device *pdev)
1191{ 1192{
1192 struct host1x *host1x = dev_get_drvdata(pdev->dev.parent); 1193 struct host1x_drm *host1x = host1x_get_drm_data(pdev->dev.parent);
1193 struct tegra_hdmi *hdmi; 1194 struct tegra_hdmi *hdmi;
1194 struct resource *regs; 1195 struct resource *regs;
1195 int err; 1196 int err;
@@ -1278,7 +1279,7 @@ static int tegra_hdmi_probe(struct platform_device *pdev)
1278 1279
1279static int tegra_hdmi_remove(struct platform_device *pdev) 1280static int tegra_hdmi_remove(struct platform_device *pdev)
1280{ 1281{
1281 struct host1x *host1x = dev_get_drvdata(pdev->dev.parent); 1282 struct host1x_drm *host1x = host1x_get_drm_data(pdev->dev.parent);
1282 struct tegra_hdmi *hdmi = platform_get_drvdata(pdev); 1283 struct tegra_hdmi *hdmi = platform_get_drvdata(pdev);
1283 int err; 1284 int err;
1284 1285
diff --git a/drivers/gpu/drm/tegra/hdmi.h b/drivers/gpu/host1x/drm/hdmi.h
index 52ac36e08ccb..52ac36e08ccb 100644
--- a/drivers/gpu/drm/tegra/hdmi.h
+++ b/drivers/gpu/host1x/drm/hdmi.h
diff --git a/drivers/gpu/drm/tegra/output.c b/drivers/gpu/host1x/drm/output.c
index 8140fc6c34d8..8140fc6c34d8 100644
--- a/drivers/gpu/drm/tegra/output.c
+++ b/drivers/gpu/host1x/drm/output.c
diff --git a/drivers/gpu/drm/tegra/rgb.c b/drivers/gpu/host1x/drm/rgb.c
index ed4416f20260..ed4416f20260 100644
--- a/drivers/gpu/drm/tegra/rgb.c
+++ b/drivers/gpu/host1x/drm/rgb.c
diff --git a/drivers/gpu/host1x/host1x.h b/drivers/gpu/host1x/host1x.h
new file mode 100644
index 000000000000..a2bc1e65e972
--- /dev/null
+++ b/drivers/gpu/host1x/host1x.h
@@ -0,0 +1,30 @@
1/*
2 * Tegra host1x driver
3 *
4 * Copyright (c) 2009-2013, NVIDIA Corporation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
19 */
20
21#ifndef __LINUX_HOST1X_H
22#define __LINUX_HOST1X_H
23
24enum host1x_class {
25 HOST1X_CLASS_HOST1X = 0x1,
26 HOST1X_CLASS_GR2D = 0x51,
27 HOST1X_CLASS_GR2D_SB = 0x52
28};
29
30#endif
diff --git a/drivers/gpu/host1x/host1x_bo.h b/drivers/gpu/host1x/host1x_bo.h
new file mode 100644
index 000000000000..4c1f10bd773d
--- /dev/null
+++ b/drivers/gpu/host1x/host1x_bo.h
@@ -0,0 +1,87 @@
1/*
2 * Tegra host1x Memory Management Abstraction header
3 *
4 * Copyright (c) 2012-2013, NVIDIA Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#ifndef _HOST1X_BO_H
20#define _HOST1X_BO_H
21
22struct host1x_bo;
23
24struct host1x_bo_ops {
25 struct host1x_bo *(*get)(struct host1x_bo *bo);
26 void (*put)(struct host1x_bo *bo);
27 dma_addr_t (*pin)(struct host1x_bo *bo, struct sg_table **sgt);
28 void (*unpin)(struct host1x_bo *bo, struct sg_table *sgt);
29 void *(*mmap)(struct host1x_bo *bo);
30 void (*munmap)(struct host1x_bo *bo, void *addr);
31 void *(*kmap)(struct host1x_bo *bo, unsigned int pagenum);
32 void (*kunmap)(struct host1x_bo *bo, unsigned int pagenum, void *addr);
33};
34
35struct host1x_bo {
36 const struct host1x_bo_ops *ops;
37};
38
39static inline void host1x_bo_init(struct host1x_bo *bo,
40 const struct host1x_bo_ops *ops)
41{
42 bo->ops = ops;
43}
44
45static inline struct host1x_bo *host1x_bo_get(struct host1x_bo *bo)
46{
47 return bo->ops->get(bo);
48}
49
50static inline void host1x_bo_put(struct host1x_bo *bo)
51{
52 bo->ops->put(bo);
53}
54
55static inline dma_addr_t host1x_bo_pin(struct host1x_bo *bo,
56 struct sg_table **sgt)
57{
58 return bo->ops->pin(bo, sgt);
59}
60
61static inline void host1x_bo_unpin(struct host1x_bo *bo, struct sg_table *sgt)
62{
63 bo->ops->unpin(bo, sgt);
64}
65
66static inline void *host1x_bo_mmap(struct host1x_bo *bo)
67{
68 return bo->ops->mmap(bo);
69}
70
71static inline void host1x_bo_munmap(struct host1x_bo *bo, void *addr)
72{
73 bo->ops->munmap(bo, addr);
74}
75
76static inline void *host1x_bo_kmap(struct host1x_bo *bo, unsigned int pagenum)
77{
78 return bo->ops->kmap(bo, pagenum);
79}
80
81static inline void host1x_bo_kunmap(struct host1x_bo *bo,
82 unsigned int pagenum, void *addr)
83{
84 bo->ops->kunmap(bo, pagenum, addr);
85}
86
87#endif
diff --git a/drivers/gpu/host1x/host1x_client.h b/drivers/gpu/host1x/host1x_client.h
new file mode 100644
index 000000000000..9b85f10f4a44
--- /dev/null
+++ b/drivers/gpu/host1x/host1x_client.h
@@ -0,0 +1,35 @@
1/*
2 * Copyright (c) 2013, NVIDIA Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#ifndef HOST1X_CLIENT_H
18#define HOST1X_CLIENT_H
19
20struct device;
21struct platform_device;
22
23#ifdef CONFIG_DRM_TEGRA
24int host1x_drm_alloc(struct platform_device *pdev);
25#else
26static inline int host1x_drm_alloc(struct platform_device *pdev)
27{
28 return 0;
29}
30#endif
31
32void host1x_set_drm_data(struct device *dev, void *data);
33void *host1x_get_drm_data(struct device *dev);
34
35#endif
diff --git a/drivers/gpu/host1x/hw/Makefile b/drivers/gpu/host1x/hw/Makefile
new file mode 100644
index 000000000000..9b50863a2236
--- /dev/null
+++ b/drivers/gpu/host1x/hw/Makefile
@@ -0,0 +1,6 @@
1ccflags-y = -Idrivers/gpu/host1x
2
3host1x-hw-objs = \
4 host1x01.o
5
6obj-$(CONFIG_TEGRA_HOST1X) += host1x-hw.o
diff --git a/drivers/gpu/host1x/hw/cdma_hw.c b/drivers/gpu/host1x/hw/cdma_hw.c
new file mode 100644
index 000000000000..590b69d91dab
--- /dev/null
+++ b/drivers/gpu/host1x/hw/cdma_hw.c
@@ -0,0 +1,326 @@
1/*
2 * Tegra host1x Command DMA
3 *
4 * Copyright (c) 2010-2013, NVIDIA Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#include <linux/slab.h>
20#include <linux/scatterlist.h>
21#include <linux/dma-mapping.h>
22
23#include "cdma.h"
24#include "channel.h"
25#include "dev.h"
26#include "debug.h"
27
28/*
29 * Put the restart at the end of pushbuffer memor
30 */
31static void push_buffer_init(struct push_buffer *pb)
32{
33 *(pb->mapped + (pb->size_bytes >> 2)) = host1x_opcode_restart(0);
34}
35
36/*
37 * Increment timedout buffer's syncpt via CPU.
38 */
39static void cdma_timeout_cpu_incr(struct host1x_cdma *cdma, u32 getptr,
40 u32 syncpt_incrs, u32 syncval, u32 nr_slots)
41{
42 struct host1x *host1x = cdma_to_host1x(cdma);
43 struct push_buffer *pb = &cdma->push_buffer;
44 u32 i;
45
46 for (i = 0; i < syncpt_incrs; i++)
47 host1x_syncpt_cpu_incr(cdma->timeout.syncpt);
48
49 /* after CPU incr, ensure shadow is up to date */
50 host1x_syncpt_load(cdma->timeout.syncpt);
51
52 /* NOP all the PB slots */
53 while (nr_slots--) {
54 u32 *p = (u32 *)((u32)pb->mapped + getptr);
55 *(p++) = HOST1X_OPCODE_NOP;
56 *(p++) = HOST1X_OPCODE_NOP;
57 dev_dbg(host1x->dev, "%s: NOP at 0x%x\n", __func__,
58 pb->phys + getptr);
59 getptr = (getptr + 8) & (pb->size_bytes - 1);
60 }
61 wmb();
62}
63
64/*
65 * Start channel DMA
66 */
67static void cdma_start(struct host1x_cdma *cdma)
68{
69 struct host1x_channel *ch = cdma_to_channel(cdma);
70
71 if (cdma->running)
72 return;
73
74 cdma->last_pos = cdma->push_buffer.pos;
75
76 host1x_ch_writel(ch, HOST1X_CHANNEL_DMACTRL_DMASTOP,
77 HOST1X_CHANNEL_DMACTRL);
78
79 /* set base, put and end pointer */
80 host1x_ch_writel(ch, cdma->push_buffer.phys, HOST1X_CHANNEL_DMASTART);
81 host1x_ch_writel(ch, cdma->push_buffer.pos, HOST1X_CHANNEL_DMAPUT);
82 host1x_ch_writel(ch, cdma->push_buffer.phys +
83 cdma->push_buffer.size_bytes + 4,
84 HOST1X_CHANNEL_DMAEND);
85
86 /* reset GET */
87 host1x_ch_writel(ch, HOST1X_CHANNEL_DMACTRL_DMASTOP |
88 HOST1X_CHANNEL_DMACTRL_DMAGETRST |
89 HOST1X_CHANNEL_DMACTRL_DMAINITGET,
90 HOST1X_CHANNEL_DMACTRL);
91
92 /* start the command DMA */
93 host1x_ch_writel(ch, 0, HOST1X_CHANNEL_DMACTRL);
94
95 cdma->running = true;
96}
97
98/*
99 * Similar to cdma_start(), but rather than starting from an idle
100 * state (where DMA GET is set to DMA PUT), on a timeout we restore
101 * DMA GET from an explicit value (so DMA may again be pending).
102 */
103static void cdma_timeout_restart(struct host1x_cdma *cdma, u32 getptr)
104{
105 struct host1x *host1x = cdma_to_host1x(cdma);
106 struct host1x_channel *ch = cdma_to_channel(cdma);
107
108 if (cdma->running)
109 return;
110
111 cdma->last_pos = cdma->push_buffer.pos;
112
113 host1x_ch_writel(ch, HOST1X_CHANNEL_DMACTRL_DMASTOP,
114 HOST1X_CHANNEL_DMACTRL);
115
116 /* set base, end pointer (all of memory) */
117 host1x_ch_writel(ch, cdma->push_buffer.phys, HOST1X_CHANNEL_DMASTART);
118 host1x_ch_writel(ch, cdma->push_buffer.phys +
119 cdma->push_buffer.size_bytes,
120 HOST1X_CHANNEL_DMAEND);
121
122 /* set GET, by loading the value in PUT (then reset GET) */
123 host1x_ch_writel(ch, getptr, HOST1X_CHANNEL_DMAPUT);
124 host1x_ch_writel(ch, HOST1X_CHANNEL_DMACTRL_DMASTOP |
125 HOST1X_CHANNEL_DMACTRL_DMAGETRST |
126 HOST1X_CHANNEL_DMACTRL_DMAINITGET,
127 HOST1X_CHANNEL_DMACTRL);
128
129 dev_dbg(host1x->dev,
130 "%s: DMA GET 0x%x, PUT HW 0x%x / shadow 0x%x\n", __func__,
131 host1x_ch_readl(ch, HOST1X_CHANNEL_DMAGET),
132 host1x_ch_readl(ch, HOST1X_CHANNEL_DMAPUT),
133 cdma->last_pos);
134
135 /* deassert GET reset and set PUT */
136 host1x_ch_writel(ch, HOST1X_CHANNEL_DMACTRL_DMASTOP,
137 HOST1X_CHANNEL_DMACTRL);
138 host1x_ch_writel(ch, cdma->push_buffer.pos, HOST1X_CHANNEL_DMAPUT);
139
140 /* start the command DMA */
141 host1x_ch_writel(ch, 0, HOST1X_CHANNEL_DMACTRL);
142
143 cdma->running = true;
144}
145
146/*
147 * Kick channel DMA into action by writing its PUT offset (if it has changed)
148 */
149static void cdma_flush(struct host1x_cdma *cdma)
150{
151 struct host1x_channel *ch = cdma_to_channel(cdma);
152
153 if (cdma->push_buffer.pos != cdma->last_pos) {
154 host1x_ch_writel(ch, cdma->push_buffer.pos,
155 HOST1X_CHANNEL_DMAPUT);
156 cdma->last_pos = cdma->push_buffer.pos;
157 }
158}
159
160static void cdma_stop(struct host1x_cdma *cdma)
161{
162 struct host1x_channel *ch = cdma_to_channel(cdma);
163
164 mutex_lock(&cdma->lock);
165 if (cdma->running) {
166 host1x_cdma_wait_locked(cdma, CDMA_EVENT_SYNC_QUEUE_EMPTY);
167 host1x_ch_writel(ch, HOST1X_CHANNEL_DMACTRL_DMASTOP,
168 HOST1X_CHANNEL_DMACTRL);
169 cdma->running = false;
170 }
171 mutex_unlock(&cdma->lock);
172}
173
174/*
175 * Stops both channel's command processor and CDMA immediately.
176 * Also, tears down the channel and resets corresponding module.
177 */
178static void cdma_freeze(struct host1x_cdma *cdma)
179{
180 struct host1x *host = cdma_to_host1x(cdma);
181 struct host1x_channel *ch = cdma_to_channel(cdma);
182 u32 cmdproc_stop;
183
184 if (cdma->torndown && !cdma->running) {
185 dev_warn(host->dev, "Already torn down\n");
186 return;
187 }
188
189 dev_dbg(host->dev, "freezing channel (id %d)\n", ch->id);
190
191 cmdproc_stop = host1x_sync_readl(host, HOST1X_SYNC_CMDPROC_STOP);
192 cmdproc_stop |= BIT(ch->id);
193 host1x_sync_writel(host, cmdproc_stop, HOST1X_SYNC_CMDPROC_STOP);
194
195 dev_dbg(host->dev, "%s: DMA GET 0x%x, PUT HW 0x%x / shadow 0x%x\n",
196 __func__, host1x_ch_readl(ch, HOST1X_CHANNEL_DMAGET),
197 host1x_ch_readl(ch, HOST1X_CHANNEL_DMAPUT),
198 cdma->last_pos);
199
200 host1x_ch_writel(ch, HOST1X_CHANNEL_DMACTRL_DMASTOP,
201 HOST1X_CHANNEL_DMACTRL);
202
203 host1x_sync_writel(host, BIT(ch->id), HOST1X_SYNC_CH_TEARDOWN);
204
205 cdma->running = false;
206 cdma->torndown = true;
207}
208
209static void cdma_resume(struct host1x_cdma *cdma, u32 getptr)
210{
211 struct host1x *host1x = cdma_to_host1x(cdma);
212 struct host1x_channel *ch = cdma_to_channel(cdma);
213 u32 cmdproc_stop;
214
215 dev_dbg(host1x->dev,
216 "resuming channel (id %d, DMAGET restart = 0x%x)\n",
217 ch->id, getptr);
218
219 cmdproc_stop = host1x_sync_readl(host1x, HOST1X_SYNC_CMDPROC_STOP);
220 cmdproc_stop &= ~(BIT(ch->id));
221 host1x_sync_writel(host1x, cmdproc_stop, HOST1X_SYNC_CMDPROC_STOP);
222
223 cdma->torndown = false;
224 cdma_timeout_restart(cdma, getptr);
225}
226
227/*
228 * If this timeout fires, it indicates the current sync_queue entry has
229 * exceeded its TTL and the userctx should be timed out and remaining
230 * submits already issued cleaned up (future submits return an error).
231 */
232static void cdma_timeout_handler(struct work_struct *work)
233{
234 struct host1x_cdma *cdma;
235 struct host1x *host1x;
236 struct host1x_channel *ch;
237
238 u32 syncpt_val;
239
240 u32 prev_cmdproc, cmdproc_stop;
241
242 cdma = container_of(to_delayed_work(work), struct host1x_cdma,
243 timeout.wq);
244 host1x = cdma_to_host1x(cdma);
245 ch = cdma_to_channel(cdma);
246
247 host1x_debug_dump(cdma_to_host1x(cdma));
248
249 mutex_lock(&cdma->lock);
250
251 if (!cdma->timeout.client) {
252 dev_dbg(host1x->dev,
253 "cdma_timeout: expired, but has no clientid\n");
254 mutex_unlock(&cdma->lock);
255 return;
256 }
257
258 /* stop processing to get a clean snapshot */
259 prev_cmdproc = host1x_sync_readl(host1x, HOST1X_SYNC_CMDPROC_STOP);
260 cmdproc_stop = prev_cmdproc | BIT(ch->id);
261 host1x_sync_writel(host1x, cmdproc_stop, HOST1X_SYNC_CMDPROC_STOP);
262
263 dev_dbg(host1x->dev, "cdma_timeout: cmdproc was 0x%x is 0x%x\n",
264 prev_cmdproc, cmdproc_stop);
265
266 syncpt_val = host1x_syncpt_load(cdma->timeout.syncpt);
267
268 /* has buffer actually completed? */
269 if ((s32)(syncpt_val - cdma->timeout.syncpt_val) >= 0) {
270 dev_dbg(host1x->dev,
271 "cdma_timeout: expired, but buffer had completed\n");
272 /* restore */
273 cmdproc_stop = prev_cmdproc & ~(BIT(ch->id));
274 host1x_sync_writel(host1x, cmdproc_stop,
275 HOST1X_SYNC_CMDPROC_STOP);
276 mutex_unlock(&cdma->lock);
277 return;
278 }
279
280 dev_warn(host1x->dev, "%s: timeout: %d (%s), HW thresh %d, done %d\n",
281 __func__, cdma->timeout.syncpt->id, cdma->timeout.syncpt->name,
282 syncpt_val, cdma->timeout.syncpt_val);
283
284 /* stop HW, resetting channel/module */
285 host1x_hw_cdma_freeze(host1x, cdma);
286
287 host1x_cdma_update_sync_queue(cdma, ch->dev);
288 mutex_unlock(&cdma->lock);
289}
290
291/*
292 * Init timeout resources
293 */
294static int cdma_timeout_init(struct host1x_cdma *cdma, u32 syncpt_id)
295{
296 INIT_DELAYED_WORK(&cdma->timeout.wq, cdma_timeout_handler);
297 cdma->timeout.initialized = true;
298
299 return 0;
300}
301
302/*
303 * Clean up timeout resources
304 */
305static void cdma_timeout_destroy(struct host1x_cdma *cdma)
306{
307 if (cdma->timeout.initialized)
308 cancel_delayed_work(&cdma->timeout.wq);
309 cdma->timeout.initialized = false;
310}
311
312static const struct host1x_cdma_ops host1x_cdma_ops = {
313 .start = cdma_start,
314 .stop = cdma_stop,
315 .flush = cdma_flush,
316
317 .timeout_init = cdma_timeout_init,
318 .timeout_destroy = cdma_timeout_destroy,
319 .freeze = cdma_freeze,
320 .resume = cdma_resume,
321 .timeout_cpu_incr = cdma_timeout_cpu_incr,
322};
323
324static const struct host1x_pushbuffer_ops host1x_pushbuffer_ops = {
325 .init = push_buffer_init,
326};
diff --git a/drivers/gpu/host1x/hw/channel_hw.c b/drivers/gpu/host1x/hw/channel_hw.c
new file mode 100644
index 000000000000..ee199623e365
--- /dev/null
+++ b/drivers/gpu/host1x/hw/channel_hw.c
@@ -0,0 +1,168 @@
1/*
2 * Tegra host1x Channel
3 *
4 * Copyright (c) 2010-2013, NVIDIA Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#include <linux/slab.h>
20#include <trace/events/host1x.h>
21
22#include "host1x.h"
23#include "host1x_bo.h"
24#include "channel.h"
25#include "dev.h"
26#include "intr.h"
27#include "job.h"
28
29#define HOST1X_CHANNEL_SIZE 16384
30#define TRACE_MAX_LENGTH 128U
31
32static void trace_write_gather(struct host1x_cdma *cdma, struct host1x_bo *bo,
33 u32 offset, u32 words)
34{
35 void *mem = NULL;
36
37 if (host1x_debug_trace_cmdbuf)
38 mem = host1x_bo_mmap(bo);
39
40 if (mem) {
41 u32 i;
42 /*
43 * Write in batches of 128 as there seems to be a limit
44 * of how much you can output to ftrace at once.
45 */
46 for (i = 0; i < words; i += TRACE_MAX_LENGTH) {
47 trace_host1x_cdma_push_gather(
48 dev_name(cdma_to_channel(cdma)->dev),
49 (u32)bo, min(words - i, TRACE_MAX_LENGTH),
50 offset + i * sizeof(u32), mem);
51 }
52 host1x_bo_munmap(bo, mem);
53 }
54}
55
56static void submit_gathers(struct host1x_job *job)
57{
58 struct host1x_cdma *cdma = &job->channel->cdma;
59 unsigned int i;
60
61 for (i = 0; i < job->num_gathers; i++) {
62 struct host1x_job_gather *g = &job->gathers[i];
63 u32 op1 = host1x_opcode_gather(g->words);
64 u32 op2 = g->base + g->offset;
65 trace_write_gather(cdma, g->bo, g->offset, op1 & 0xffff);
66 host1x_cdma_push(cdma, op1, op2);
67 }
68}
69
70static int channel_submit(struct host1x_job *job)
71{
72 struct host1x_channel *ch = job->channel;
73 struct host1x_syncpt *sp;
74 u32 user_syncpt_incrs = job->syncpt_incrs;
75 u32 prev_max = 0;
76 u32 syncval;
77 int err;
78 struct host1x_waitlist *completed_waiter = NULL;
79 struct host1x *host = dev_get_drvdata(ch->dev->parent);
80
81 sp = host->syncpt + job->syncpt_id;
82 trace_host1x_channel_submit(dev_name(ch->dev),
83 job->num_gathers, job->num_relocs,
84 job->num_waitchk, job->syncpt_id,
85 job->syncpt_incrs);
86
87 /* before error checks, return current max */
88 prev_max = job->syncpt_end = host1x_syncpt_read_max(sp);
89
90 /* get submit lock */
91 err = mutex_lock_interruptible(&ch->submitlock);
92 if (err)
93 goto error;
94
95 completed_waiter = kzalloc(sizeof(*completed_waiter), GFP_KERNEL);
96 if (!completed_waiter) {
97 mutex_unlock(&ch->submitlock);
98 err = -ENOMEM;
99 goto error;
100 }
101
102 /* begin a CDMA submit */
103 err = host1x_cdma_begin(&ch->cdma, job);
104 if (err) {
105 mutex_unlock(&ch->submitlock);
106 goto error;
107 }
108
109 if (job->serialize) {
110 /*
111 * Force serialization by inserting a host wait for the
112 * previous job to finish before this one can commence.
113 */
114 host1x_cdma_push(&ch->cdma,
115 host1x_opcode_setclass(HOST1X_CLASS_HOST1X,
116 host1x_uclass_wait_syncpt_r(), 1),
117 host1x_class_host_wait_syncpt(job->syncpt_id,
118 host1x_syncpt_read_max(sp)));
119 }
120
121 syncval = host1x_syncpt_incr_max(sp, user_syncpt_incrs);
122
123 job->syncpt_end = syncval;
124
125 /* add a setclass for modules that require it */
126 if (job->class)
127 host1x_cdma_push(&ch->cdma,
128 host1x_opcode_setclass(job->class, 0, 0),
129 HOST1X_OPCODE_NOP);
130
131 submit_gathers(job);
132
133 /* end CDMA submit & stash pinned hMems into sync queue */
134 host1x_cdma_end(&ch->cdma, job);
135
136 trace_host1x_channel_submitted(dev_name(ch->dev), prev_max, syncval);
137
138 /* schedule a submit complete interrupt */
139 err = host1x_intr_add_action(host, job->syncpt_id, syncval,
140 HOST1X_INTR_ACTION_SUBMIT_COMPLETE, ch,
141 completed_waiter, NULL);
142 completed_waiter = NULL;
143 WARN(err, "Failed to set submit complete interrupt");
144
145 mutex_unlock(&ch->submitlock);
146
147 return 0;
148
149error:
150 kfree(completed_waiter);
151 return err;
152}
153
154static int host1x_channel_init(struct host1x_channel *ch, struct host1x *dev,
155 unsigned int index)
156{
157 ch->id = index;
158 mutex_init(&ch->reflock);
159 mutex_init(&ch->submitlock);
160
161 ch->regs = dev->regs + index * HOST1X_CHANNEL_SIZE;
162 return 0;
163}
164
165static const struct host1x_channel_ops host1x_channel_ops = {
166 .init = host1x_channel_init,
167 .submit = channel_submit,
168};
diff --git a/drivers/gpu/host1x/hw/debug_hw.c b/drivers/gpu/host1x/hw/debug_hw.c
new file mode 100644
index 000000000000..334c038052f5
--- /dev/null
+++ b/drivers/gpu/host1x/hw/debug_hw.c
@@ -0,0 +1,322 @@
1/*
2 * Copyright (C) 2010 Google, Inc.
3 * Author: Erik Gilling <konkers@android.com>
4 *
5 * Copyright (C) 2011-2013 NVIDIA Corporation
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/debugfs.h>
19#include <linux/seq_file.h>
20#include <linux/mm.h>
21#include <linux/scatterlist.h>
22
23#include <linux/io.h>
24
25#include "dev.h"
26#include "debug.h"
27#include "cdma.h"
28#include "channel.h"
29#include "host1x_bo.h"
30
31#define HOST1X_DEBUG_MAX_PAGE_OFFSET 102400
32
33enum {
34 HOST1X_OPCODE_SETCLASS = 0x00,
35 HOST1X_OPCODE_INCR = 0x01,
36 HOST1X_OPCODE_NONINCR = 0x02,
37 HOST1X_OPCODE_MASK = 0x03,
38 HOST1X_OPCODE_IMM = 0x04,
39 HOST1X_OPCODE_RESTART = 0x05,
40 HOST1X_OPCODE_GATHER = 0x06,
41 HOST1X_OPCODE_EXTEND = 0x0e,
42};
43
44enum {
45 HOST1X_OPCODE_EXTEND_ACQUIRE_MLOCK = 0x00,
46 HOST1X_OPCODE_EXTEND_RELEASE_MLOCK = 0x01,
47};
48
49static unsigned int show_channel_command(struct output *o, u32 val)
50{
51 unsigned mask;
52 unsigned subop;
53
54 switch (val >> 28) {
55 case HOST1X_OPCODE_SETCLASS:
56 mask = val & 0x3f;
57 if (mask) {
58 host1x_debug_output(o, "SETCL(class=%03x, offset=%03x, mask=%02x, [",
59 val >> 6 & 0x3ff,
60 val >> 16 & 0xfff, mask);
61 return hweight8(mask);
62 } else {
63 host1x_debug_output(o, "SETCL(class=%03x)\n",
64 val >> 6 & 0x3ff);
65 return 0;
66 }
67
68 case HOST1X_OPCODE_INCR:
69 host1x_debug_output(o, "INCR(offset=%03x, [",
70 val >> 16 & 0xfff);
71 return val & 0xffff;
72
73 case HOST1X_OPCODE_NONINCR:
74 host1x_debug_output(o, "NONINCR(offset=%03x, [",
75 val >> 16 & 0xfff);
76 return val & 0xffff;
77
78 case HOST1X_OPCODE_MASK:
79 mask = val & 0xffff;
80 host1x_debug_output(o, "MASK(offset=%03x, mask=%03x, [",
81 val >> 16 & 0xfff, mask);
82 return hweight16(mask);
83
84 case HOST1X_OPCODE_IMM:
85 host1x_debug_output(o, "IMM(offset=%03x, data=%03x)\n",
86 val >> 16 & 0xfff, val & 0xffff);
87 return 0;
88
89 case HOST1X_OPCODE_RESTART:
90 host1x_debug_output(o, "RESTART(offset=%08x)\n", val << 4);
91 return 0;
92
93 case HOST1X_OPCODE_GATHER:
94 host1x_debug_output(o, "GATHER(offset=%03x, insert=%d, type=%d, count=%04x, addr=[",
95 val >> 16 & 0xfff, val >> 15 & 0x1,
96 val >> 14 & 0x1, val & 0x3fff);
97 return 1;
98
99 case HOST1X_OPCODE_EXTEND:
100 subop = val >> 24 & 0xf;
101 if (subop == HOST1X_OPCODE_EXTEND_ACQUIRE_MLOCK)
102 host1x_debug_output(o, "ACQUIRE_MLOCK(index=%d)\n",
103 val & 0xff);
104 else if (subop == HOST1X_OPCODE_EXTEND_RELEASE_MLOCK)
105 host1x_debug_output(o, "RELEASE_MLOCK(index=%d)\n",
106 val & 0xff);
107 else
108 host1x_debug_output(o, "EXTEND_UNKNOWN(%08x)\n", val);
109 return 0;
110
111 default:
112 return 0;
113 }
114}
115
116static void show_gather(struct output *o, phys_addr_t phys_addr,
117 unsigned int words, struct host1x_cdma *cdma,
118 phys_addr_t pin_addr, u32 *map_addr)
119{
120 /* Map dmaget cursor to corresponding mem handle */
121 u32 offset = phys_addr - pin_addr;
122 unsigned int data_count = 0, i;
123
124 /*
125 * Sometimes we're given different hardware address to the same
126 * page - in these cases the offset will get an invalid number and
127 * we just have to bail out.
128 */
129 if (offset > HOST1X_DEBUG_MAX_PAGE_OFFSET) {
130 host1x_debug_output(o, "[address mismatch]\n");
131 return;
132 }
133
134 for (i = 0; i < words; i++) {
135 u32 addr = phys_addr + i * 4;
136 u32 val = *(map_addr + offset / 4 + i);
137
138 if (!data_count) {
139 host1x_debug_output(o, "%08x: %08x:", addr, val);
140 data_count = show_channel_command(o, val);
141 } else {
142 host1x_debug_output(o, "%08x%s", val,
143 data_count > 0 ? ", " : "])\n");
144 data_count--;
145 }
146 }
147}
148
149static void show_channel_gathers(struct output *o, struct host1x_cdma *cdma)
150{
151 struct host1x_job *job;
152
153 list_for_each_entry(job, &cdma->sync_queue, list) {
154 int i;
155 host1x_debug_output(o, "\n%p: JOB, syncpt_id=%d, syncpt_val=%d, first_get=%08x, timeout=%d num_slots=%d, num_handles=%d\n",
156 job, job->syncpt_id, job->syncpt_end,
157 job->first_get, job->timeout,
158 job->num_slots, job->num_unpins);
159
160 for (i = 0; i < job->num_gathers; i++) {
161 struct host1x_job_gather *g = &job->gathers[i];
162 u32 *mapped;
163
164 if (job->gather_copy_mapped)
165 mapped = (u32 *)job->gather_copy_mapped;
166 else
167 mapped = host1x_bo_mmap(g->bo);
168
169 if (!mapped) {
170 host1x_debug_output(o, "[could not mmap]\n");
171 continue;
172 }
173
174 host1x_debug_output(o, " GATHER at %08x+%04x, %d words\n",
175 g->base, g->offset, g->words);
176
177 show_gather(o, g->base + g->offset, g->words, cdma,
178 g->base, mapped);
179
180 if (!job->gather_copy_mapped)
181 host1x_bo_munmap(g->bo, mapped);
182 }
183 }
184}
185
186static void host1x_debug_show_channel_cdma(struct host1x *host,
187 struct host1x_channel *ch,
188 struct output *o)
189{
190 struct host1x_cdma *cdma = &ch->cdma;
191 u32 dmaput, dmaget, dmactrl;
192 u32 cbstat, cbread;
193 u32 val, base, baseval;
194
195 dmaput = host1x_ch_readl(ch, HOST1X_CHANNEL_DMAPUT);
196 dmaget = host1x_ch_readl(ch, HOST1X_CHANNEL_DMAGET);
197 dmactrl = host1x_ch_readl(ch, HOST1X_CHANNEL_DMACTRL);
198 cbread = host1x_sync_readl(host, HOST1X_SYNC_CBREAD(ch->id));
199 cbstat = host1x_sync_readl(host, HOST1X_SYNC_CBSTAT(ch->id));
200
201 host1x_debug_output(o, "%d-%s: ", ch->id, dev_name(ch->dev));
202
203 if (HOST1X_CHANNEL_DMACTRL_DMASTOP_V(dmactrl) ||
204 !ch->cdma.push_buffer.mapped) {
205 host1x_debug_output(o, "inactive\n\n");
206 return;
207 }
208
209 if (HOST1X_SYNC_CBSTAT_CBCLASS_V(cbstat) == HOST1X_CLASS_HOST1X &&
210 HOST1X_SYNC_CBSTAT_CBOFFSET_V(cbstat) ==
211 HOST1X_UCLASS_WAIT_SYNCPT)
212 host1x_debug_output(o, "waiting on syncpt %d val %d\n",
213 cbread >> 24, cbread & 0xffffff);
214 else if (HOST1X_SYNC_CBSTAT_CBCLASS_V(cbstat) ==
215 HOST1X_CLASS_HOST1X &&
216 HOST1X_SYNC_CBSTAT_CBOFFSET_V(cbstat) ==
217 HOST1X_UCLASS_WAIT_SYNCPT_BASE) {
218
219 base = (cbread >> 16) & 0xff;
220 baseval =
221 host1x_sync_readl(host, HOST1X_SYNC_SYNCPT_BASE(base));
222 val = cbread & 0xffff;
223 host1x_debug_output(o, "waiting on syncpt %d val %d (base %d = %d; offset = %d)\n",
224 cbread >> 24, baseval + val, base,
225 baseval, val);
226 } else
227 host1x_debug_output(o, "active class %02x, offset %04x, val %08x\n",
228 HOST1X_SYNC_CBSTAT_CBCLASS_V(cbstat),
229 HOST1X_SYNC_CBSTAT_CBOFFSET_V(cbstat),
230 cbread);
231
232 host1x_debug_output(o, "DMAPUT %08x, DMAGET %08x, DMACTL %08x\n",
233 dmaput, dmaget, dmactrl);
234 host1x_debug_output(o, "CBREAD %08x, CBSTAT %08x\n", cbread, cbstat);
235
236 show_channel_gathers(o, cdma);
237 host1x_debug_output(o, "\n");
238}
239
240static void host1x_debug_show_channel_fifo(struct host1x *host,
241 struct host1x_channel *ch,
242 struct output *o)
243{
244 u32 val, rd_ptr, wr_ptr, start, end;
245 unsigned int data_count = 0;
246
247 host1x_debug_output(o, "%d: fifo:\n", ch->id);
248
249 val = host1x_ch_readl(ch, HOST1X_CHANNEL_FIFOSTAT);
250 host1x_debug_output(o, "FIFOSTAT %08x\n", val);
251 if (HOST1X_CHANNEL_FIFOSTAT_CFEMPTY_V(val)) {
252 host1x_debug_output(o, "[empty]\n");
253 return;
254 }
255
256 host1x_sync_writel(host, 0x0, HOST1X_SYNC_CFPEEK_CTRL);
257 host1x_sync_writel(host, HOST1X_SYNC_CFPEEK_CTRL_ENA_F(1) |
258 HOST1X_SYNC_CFPEEK_CTRL_CHANNR_F(ch->id),
259 HOST1X_SYNC_CFPEEK_CTRL);
260
261 val = host1x_sync_readl(host, HOST1X_SYNC_CFPEEK_PTRS);
262 rd_ptr = HOST1X_SYNC_CFPEEK_PTRS_CF_RD_PTR_V(val);
263 wr_ptr = HOST1X_SYNC_CFPEEK_PTRS_CF_WR_PTR_V(val);
264
265 val = host1x_sync_readl(host, HOST1X_SYNC_CF_SETUP(ch->id));
266 start = HOST1X_SYNC_CF_SETUP_BASE_V(val);
267 end = HOST1X_SYNC_CF_SETUP_LIMIT_V(val);
268
269 do {
270 host1x_sync_writel(host, 0x0, HOST1X_SYNC_CFPEEK_CTRL);
271 host1x_sync_writel(host, HOST1X_SYNC_CFPEEK_CTRL_ENA_F(1) |
272 HOST1X_SYNC_CFPEEK_CTRL_CHANNR_F(ch->id) |
273 HOST1X_SYNC_CFPEEK_CTRL_ADDR_F(rd_ptr),
274 HOST1X_SYNC_CFPEEK_CTRL);
275 val = host1x_sync_readl(host, HOST1X_SYNC_CFPEEK_READ);
276
277 if (!data_count) {
278 host1x_debug_output(o, "%08x:", val);
279 data_count = show_channel_command(o, val);
280 } else {
281 host1x_debug_output(o, "%08x%s", val,
282 data_count > 0 ? ", " : "])\n");
283 data_count--;
284 }
285
286 if (rd_ptr == end)
287 rd_ptr = start;
288 else
289 rd_ptr++;
290 } while (rd_ptr != wr_ptr);
291
292 if (data_count)
293 host1x_debug_output(o, ", ...])\n");
294 host1x_debug_output(o, "\n");
295
296 host1x_sync_writel(host, 0x0, HOST1X_SYNC_CFPEEK_CTRL);
297}
298
299static void host1x_debug_show_mlocks(struct host1x *host, struct output *o)
300{
301 int i;
302
303 host1x_debug_output(o, "---- mlocks ----\n");
304 for (i = 0; i < host1x_syncpt_nb_mlocks(host); i++) {
305 u32 owner =
306 host1x_sync_readl(host, HOST1X_SYNC_MLOCK_OWNER(i));
307 if (HOST1X_SYNC_MLOCK_OWNER_CH_OWNS_V(owner))
308 host1x_debug_output(o, "%d: locked by channel %d\n",
309 i, HOST1X_SYNC_MLOCK_OWNER_CHID_F(owner));
310 else if (HOST1X_SYNC_MLOCK_OWNER_CPU_OWNS_V(owner))
311 host1x_debug_output(o, "%d: locked by cpu\n", i);
312 else
313 host1x_debug_output(o, "%d: unlocked\n", i);
314 }
315 host1x_debug_output(o, "\n");
316}
317
318static const struct host1x_debug_ops host1x_debug_ops = {
319 .show_channel_cdma = host1x_debug_show_channel_cdma,
320 .show_channel_fifo = host1x_debug_show_channel_fifo,
321 .show_mlocks = host1x_debug_show_mlocks,
322};
diff --git a/drivers/gpu/host1x/hw/host1x01.c b/drivers/gpu/host1x/hw/host1x01.c
new file mode 100644
index 000000000000..a14e91cd1e58
--- /dev/null
+++ b/drivers/gpu/host1x/hw/host1x01.c
@@ -0,0 +1,42 @@
1/*
2 * Host1x init for T20 and T30 Architecture Chips
3 *
4 * Copyright (c) 2011-2013, NVIDIA Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19/* include hw specification */
20#include "hw/host1x01.h"
21#include "hw/host1x01_hardware.h"
22
23/* include code */
24#include "hw/cdma_hw.c"
25#include "hw/channel_hw.c"
26#include "hw/debug_hw.c"
27#include "hw/intr_hw.c"
28#include "hw/syncpt_hw.c"
29
30#include "dev.h"
31
32int host1x01_init(struct host1x *host)
33{
34 host->channel_op = &host1x_channel_ops;
35 host->cdma_op = &host1x_cdma_ops;
36 host->cdma_pb_op = &host1x_pushbuffer_ops;
37 host->syncpt_op = &host1x_syncpt_ops;
38 host->intr_op = &host1x_intr_ops;
39 host->debug_op = &host1x_debug_ops;
40
41 return 0;
42}
diff --git a/drivers/gpu/host1x/hw/host1x01.h b/drivers/gpu/host1x/hw/host1x01.h
new file mode 100644
index 000000000000..2706b6743250
--- /dev/null
+++ b/drivers/gpu/host1x/hw/host1x01.h
@@ -0,0 +1,25 @@
1/*
2 * Host1x init for T20 and T30 Architecture Chips
3 *
4 * Copyright (c) 2011-2013, NVIDIA Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18#ifndef HOST1X_HOST1X01_H
19#define HOST1X_HOST1X01_H
20
21struct host1x;
22
23int host1x01_init(struct host1x *host);
24
25#endif /* HOST1X_HOST1X01_H_ */
diff --git a/drivers/gpu/host1x/hw/host1x01_hardware.h b/drivers/gpu/host1x/hw/host1x01_hardware.h
new file mode 100644
index 000000000000..5f0fb866efa8
--- /dev/null
+++ b/drivers/gpu/host1x/hw/host1x01_hardware.h
@@ -0,0 +1,143 @@
1/*
2 * Tegra host1x Register Offsets for Tegra20 and Tegra30
3 *
4 * Copyright (c) 2010-2013 NVIDIA Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#ifndef __HOST1X_HOST1X01_HARDWARE_H
20#define __HOST1X_HOST1X01_HARDWARE_H
21
22#include <linux/types.h>
23#include <linux/bitops.h>
24
25#include "hw_host1x01_channel.h"
26#include "hw_host1x01_sync.h"
27#include "hw_host1x01_uclass.h"
28
29static inline u32 host1x_class_host_wait_syncpt(
30 unsigned indx, unsigned threshold)
31{
32 return host1x_uclass_wait_syncpt_indx_f(indx)
33 | host1x_uclass_wait_syncpt_thresh_f(threshold);
34}
35
36static inline u32 host1x_class_host_load_syncpt_base(
37 unsigned indx, unsigned threshold)
38{
39 return host1x_uclass_load_syncpt_base_base_indx_f(indx)
40 | host1x_uclass_load_syncpt_base_value_f(threshold);
41}
42
43static inline u32 host1x_class_host_wait_syncpt_base(
44 unsigned indx, unsigned base_indx, unsigned offset)
45{
46 return host1x_uclass_wait_syncpt_base_indx_f(indx)
47 | host1x_uclass_wait_syncpt_base_base_indx_f(base_indx)
48 | host1x_uclass_wait_syncpt_base_offset_f(offset);
49}
50
51static inline u32 host1x_class_host_incr_syncpt_base(
52 unsigned base_indx, unsigned offset)
53{
54 return host1x_uclass_incr_syncpt_base_base_indx_f(base_indx)
55 | host1x_uclass_incr_syncpt_base_offset_f(offset);
56}
57
58static inline u32 host1x_class_host_incr_syncpt(
59 unsigned cond, unsigned indx)
60{
61 return host1x_uclass_incr_syncpt_cond_f(cond)
62 | host1x_uclass_incr_syncpt_indx_f(indx);
63}
64
65static inline u32 host1x_class_host_indoff_reg_write(
66 unsigned mod_id, unsigned offset, bool auto_inc)
67{
68 u32 v = host1x_uclass_indoff_indbe_f(0xf)
69 | host1x_uclass_indoff_indmodid_f(mod_id)
70 | host1x_uclass_indoff_indroffset_f(offset);
71 if (auto_inc)
72 v |= host1x_uclass_indoff_autoinc_f(1);
73 return v;
74}
75
76static inline u32 host1x_class_host_indoff_reg_read(
77 unsigned mod_id, unsigned offset, bool auto_inc)
78{
79 u32 v = host1x_uclass_indoff_indmodid_f(mod_id)
80 | host1x_uclass_indoff_indroffset_f(offset)
81 | host1x_uclass_indoff_rwn_read_v();
82 if (auto_inc)
83 v |= host1x_uclass_indoff_autoinc_f(1);
84 return v;
85}
86
87
88/* cdma opcodes */
89static inline u32 host1x_opcode_setclass(
90 unsigned class_id, unsigned offset, unsigned mask)
91{
92 return (0 << 28) | (offset << 16) | (class_id << 6) | mask;
93}
94
95static inline u32 host1x_opcode_incr(unsigned offset, unsigned count)
96{
97 return (1 << 28) | (offset << 16) | count;
98}
99
100static inline u32 host1x_opcode_nonincr(unsigned offset, unsigned count)
101{
102 return (2 << 28) | (offset << 16) | count;
103}
104
105static inline u32 host1x_opcode_mask(unsigned offset, unsigned mask)
106{
107 return (3 << 28) | (offset << 16) | mask;
108}
109
110static inline u32 host1x_opcode_imm(unsigned offset, unsigned value)
111{
112 return (4 << 28) | (offset << 16) | value;
113}
114
115static inline u32 host1x_opcode_imm_incr_syncpt(unsigned cond, unsigned indx)
116{
117 return host1x_opcode_imm(host1x_uclass_incr_syncpt_r(),
118 host1x_class_host_incr_syncpt(cond, indx));
119}
120
121static inline u32 host1x_opcode_restart(unsigned address)
122{
123 return (5 << 28) | (address >> 4);
124}
125
126static inline u32 host1x_opcode_gather(unsigned count)
127{
128 return (6 << 28) | count;
129}
130
131static inline u32 host1x_opcode_gather_nonincr(unsigned offset, unsigned count)
132{
133 return (6 << 28) | (offset << 16) | BIT(15) | count;
134}
135
136static inline u32 host1x_opcode_gather_incr(unsigned offset, unsigned count)
137{
138 return (6 << 28) | (offset << 16) | BIT(15) | BIT(14) | count;
139}
140
141#define HOST1X_OPCODE_NOP host1x_opcode_nonincr(0, 0)
142
143#endif
diff --git a/drivers/gpu/host1x/hw/hw_host1x01_channel.h b/drivers/gpu/host1x/hw/hw_host1x01_channel.h
new file mode 100644
index 000000000000..b4bc7ca4e051
--- /dev/null
+++ b/drivers/gpu/host1x/hw/hw_host1x01_channel.h
@@ -0,0 +1,120 @@
1/*
2 * Copyright (c) 2012-2013, NVIDIA Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 *
16 */
17
18 /*
19 * Function naming determines intended use:
20 *
21 * <x>_r(void) : Returns the offset for register <x>.
22 *
23 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
24 *
25 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
26 *
27 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
28 * and masked to place it at field <y> of register <x>. This value
29 * can be |'d with others to produce a full register value for
30 * register <x>.
31 *
32 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
33 * value can be ~'d and then &'d to clear the value of field <y> for
34 * register <x>.
35 *
36 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
37 * to place it at field <y> of register <x>. This value can be |'d
38 * with others to produce a full register value for <x>.
39 *
40 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
41 * <x> value 'r' after being shifted to place its LSB at bit 0.
42 * This value is suitable for direct comparison with other unshifted
43 * values appropriate for use in field <y> of register <x>.
44 *
45 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
46 * field <y> of register <x>. This value is suitable for direct
47 * comparison with unshifted values appropriate for use in field <y>
48 * of register <x>.
49 */
50
51#ifndef __hw_host1x_channel_host1x_h__
52#define __hw_host1x_channel_host1x_h__
53
54static inline u32 host1x_channel_fifostat_r(void)
55{
56 return 0x0;
57}
58#define HOST1X_CHANNEL_FIFOSTAT \
59 host1x_channel_fifostat_r()
60static inline u32 host1x_channel_fifostat_cfempty_v(u32 r)
61{
62 return (r >> 10) & 0x1;
63}
64#define HOST1X_CHANNEL_FIFOSTAT_CFEMPTY_V(r) \
65 host1x_channel_fifostat_cfempty_v(r)
66static inline u32 host1x_channel_dmastart_r(void)
67{
68 return 0x14;
69}
70#define HOST1X_CHANNEL_DMASTART \
71 host1x_channel_dmastart_r()
72static inline u32 host1x_channel_dmaput_r(void)
73{
74 return 0x18;
75}
76#define HOST1X_CHANNEL_DMAPUT \
77 host1x_channel_dmaput_r()
78static inline u32 host1x_channel_dmaget_r(void)
79{
80 return 0x1c;
81}
82#define HOST1X_CHANNEL_DMAGET \
83 host1x_channel_dmaget_r()
84static inline u32 host1x_channel_dmaend_r(void)
85{
86 return 0x20;
87}
88#define HOST1X_CHANNEL_DMAEND \
89 host1x_channel_dmaend_r()
90static inline u32 host1x_channel_dmactrl_r(void)
91{
92 return 0x24;
93}
94#define HOST1X_CHANNEL_DMACTRL \
95 host1x_channel_dmactrl_r()
96static inline u32 host1x_channel_dmactrl_dmastop(void)
97{
98 return 1 << 0;
99}
100#define HOST1X_CHANNEL_DMACTRL_DMASTOP \
101 host1x_channel_dmactrl_dmastop()
102static inline u32 host1x_channel_dmactrl_dmastop_v(u32 r)
103{
104 return (r >> 0) & 0x1;
105}
106#define HOST1X_CHANNEL_DMACTRL_DMASTOP_V(r) \
107 host1x_channel_dmactrl_dmastop_v(r)
108static inline u32 host1x_channel_dmactrl_dmagetrst(void)
109{
110 return 1 << 1;
111}
112#define HOST1X_CHANNEL_DMACTRL_DMAGETRST \
113 host1x_channel_dmactrl_dmagetrst()
114static inline u32 host1x_channel_dmactrl_dmainitget(void)
115{
116 return 1 << 2;
117}
118#define HOST1X_CHANNEL_DMACTRL_DMAINITGET \
119 host1x_channel_dmactrl_dmainitget()
120#endif
diff --git a/drivers/gpu/host1x/hw/hw_host1x01_sync.h b/drivers/gpu/host1x/hw/hw_host1x01_sync.h
new file mode 100644
index 000000000000..ac704e579977
--- /dev/null
+++ b/drivers/gpu/host1x/hw/hw_host1x01_sync.h
@@ -0,0 +1,243 @@
1/*
2 * Copyright (c) 2012-2013, NVIDIA Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 *
16 */
17
18 /*
19 * Function naming determines intended use:
20 *
21 * <x>_r(void) : Returns the offset for register <x>.
22 *
23 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
24 *
25 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
26 *
27 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
28 * and masked to place it at field <y> of register <x>. This value
29 * can be |'d with others to produce a full register value for
30 * register <x>.
31 *
32 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
33 * value can be ~'d and then &'d to clear the value of field <y> for
34 * register <x>.
35 *
36 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
37 * to place it at field <y> of register <x>. This value can be |'d
38 * with others to produce a full register value for <x>.
39 *
40 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
41 * <x> value 'r' after being shifted to place its LSB at bit 0.
42 * This value is suitable for direct comparison with other unshifted
43 * values appropriate for use in field <y> of register <x>.
44 *
45 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
46 * field <y> of register <x>. This value is suitable for direct
47 * comparison with unshifted values appropriate for use in field <y>
48 * of register <x>.
49 */
50
51#ifndef __hw_host1x01_sync_h__
52#define __hw_host1x01_sync_h__
53
54#define REGISTER_STRIDE 4
55
56static inline u32 host1x_sync_syncpt_r(unsigned int id)
57{
58 return 0x400 + id * REGISTER_STRIDE;
59}
60#define HOST1X_SYNC_SYNCPT(id) \
61 host1x_sync_syncpt_r(id)
62static inline u32 host1x_sync_syncpt_thresh_cpu0_int_status_r(unsigned int id)
63{
64 return 0x40 + id * REGISTER_STRIDE;
65}
66#define HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(id) \
67 host1x_sync_syncpt_thresh_cpu0_int_status_r(id)
68static inline u32 host1x_sync_syncpt_thresh_int_disable_r(unsigned int id)
69{
70 return 0x60 + id * REGISTER_STRIDE;
71}
72#define HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(id) \
73 host1x_sync_syncpt_thresh_int_disable_r(id)
74static inline u32 host1x_sync_syncpt_thresh_int_enable_cpu0_r(unsigned int id)
75{
76 return 0x68 + id * REGISTER_STRIDE;
77}
78#define HOST1X_SYNC_SYNCPT_THRESH_INT_ENABLE_CPU0(id) \
79 host1x_sync_syncpt_thresh_int_enable_cpu0_r(id)
80static inline u32 host1x_sync_cf_setup_r(unsigned int channel)
81{
82 return 0x80 + channel * REGISTER_STRIDE;
83}
84#define HOST1X_SYNC_CF_SETUP(channel) \
85 host1x_sync_cf_setup_r(channel)
86static inline u32 host1x_sync_cf_setup_base_v(u32 r)
87{
88 return (r >> 0) & 0x1ff;
89}
90#define HOST1X_SYNC_CF_SETUP_BASE_V(r) \
91 host1x_sync_cf_setup_base_v(r)
92static inline u32 host1x_sync_cf_setup_limit_v(u32 r)
93{
94 return (r >> 16) & 0x1ff;
95}
96#define HOST1X_SYNC_CF_SETUP_LIMIT_V(r) \
97 host1x_sync_cf_setup_limit_v(r)
98static inline u32 host1x_sync_cmdproc_stop_r(void)
99{
100 return 0xac;
101}
102#define HOST1X_SYNC_CMDPROC_STOP \
103 host1x_sync_cmdproc_stop_r()
104static inline u32 host1x_sync_ch_teardown_r(void)
105{
106 return 0xb0;
107}
108#define HOST1X_SYNC_CH_TEARDOWN \
109 host1x_sync_ch_teardown_r()
110static inline u32 host1x_sync_usec_clk_r(void)
111{
112 return 0x1a4;
113}
114#define HOST1X_SYNC_USEC_CLK \
115 host1x_sync_usec_clk_r()
116static inline u32 host1x_sync_ctxsw_timeout_cfg_r(void)
117{
118 return 0x1a8;
119}
120#define HOST1X_SYNC_CTXSW_TIMEOUT_CFG \
121 host1x_sync_ctxsw_timeout_cfg_r()
122static inline u32 host1x_sync_ip_busy_timeout_r(void)
123{
124 return 0x1bc;
125}
126#define HOST1X_SYNC_IP_BUSY_TIMEOUT \
127 host1x_sync_ip_busy_timeout_r()
128static inline u32 host1x_sync_mlock_owner_r(unsigned int id)
129{
130 return 0x340 + id * REGISTER_STRIDE;
131}
132#define HOST1X_SYNC_MLOCK_OWNER(id) \
133 host1x_sync_mlock_owner_r(id)
134static inline u32 host1x_sync_mlock_owner_chid_f(u32 v)
135{
136 return (v & 0xf) << 8;
137}
138#define HOST1X_SYNC_MLOCK_OWNER_CHID_F(v) \
139 host1x_sync_mlock_owner_chid_f(v)
140static inline u32 host1x_sync_mlock_owner_cpu_owns_v(u32 r)
141{
142 return (r >> 1) & 0x1;
143}
144#define HOST1X_SYNC_MLOCK_OWNER_CPU_OWNS_V(r) \
145 host1x_sync_mlock_owner_cpu_owns_v(r)
146static inline u32 host1x_sync_mlock_owner_ch_owns_v(u32 r)
147{
148 return (r >> 0) & 0x1;
149}
150#define HOST1X_SYNC_MLOCK_OWNER_CH_OWNS_V(r) \
151 host1x_sync_mlock_owner_ch_owns_v(r)
152static inline u32 host1x_sync_syncpt_int_thresh_r(unsigned int id)
153{
154 return 0x500 + id * REGISTER_STRIDE;
155}
156#define HOST1X_SYNC_SYNCPT_INT_THRESH(id) \
157 host1x_sync_syncpt_int_thresh_r(id)
158static inline u32 host1x_sync_syncpt_base_r(unsigned int id)
159{
160 return 0x600 + id * REGISTER_STRIDE;
161}
162#define HOST1X_SYNC_SYNCPT_BASE(id) \
163 host1x_sync_syncpt_base_r(id)
164static inline u32 host1x_sync_syncpt_cpu_incr_r(unsigned int id)
165{
166 return 0x700 + id * REGISTER_STRIDE;
167}
168#define HOST1X_SYNC_SYNCPT_CPU_INCR(id) \
169 host1x_sync_syncpt_cpu_incr_r(id)
170static inline u32 host1x_sync_cbread_r(unsigned int channel)
171{
172 return 0x720 + channel * REGISTER_STRIDE;
173}
174#define HOST1X_SYNC_CBREAD(channel) \
175 host1x_sync_cbread_r(channel)
176static inline u32 host1x_sync_cfpeek_ctrl_r(void)
177{
178 return 0x74c;
179}
180#define HOST1X_SYNC_CFPEEK_CTRL \
181 host1x_sync_cfpeek_ctrl_r()
182static inline u32 host1x_sync_cfpeek_ctrl_addr_f(u32 v)
183{
184 return (v & 0x1ff) << 0;
185}
186#define HOST1X_SYNC_CFPEEK_CTRL_ADDR_F(v) \
187 host1x_sync_cfpeek_ctrl_addr_f(v)
188static inline u32 host1x_sync_cfpeek_ctrl_channr_f(u32 v)
189{
190 return (v & 0x7) << 16;
191}
192#define HOST1X_SYNC_CFPEEK_CTRL_CHANNR_F(v) \
193 host1x_sync_cfpeek_ctrl_channr_f(v)
194static inline u32 host1x_sync_cfpeek_ctrl_ena_f(u32 v)
195{
196 return (v & 0x1) << 31;
197}
198#define HOST1X_SYNC_CFPEEK_CTRL_ENA_F(v) \
199 host1x_sync_cfpeek_ctrl_ena_f(v)
200static inline u32 host1x_sync_cfpeek_read_r(void)
201{
202 return 0x750;
203}
204#define HOST1X_SYNC_CFPEEK_READ \
205 host1x_sync_cfpeek_read_r()
206static inline u32 host1x_sync_cfpeek_ptrs_r(void)
207{
208 return 0x754;
209}
210#define HOST1X_SYNC_CFPEEK_PTRS \
211 host1x_sync_cfpeek_ptrs_r()
212static inline u32 host1x_sync_cfpeek_ptrs_cf_rd_ptr_v(u32 r)
213{
214 return (r >> 0) & 0x1ff;
215}
216#define HOST1X_SYNC_CFPEEK_PTRS_CF_RD_PTR_V(r) \
217 host1x_sync_cfpeek_ptrs_cf_rd_ptr_v(r)
218static inline u32 host1x_sync_cfpeek_ptrs_cf_wr_ptr_v(u32 r)
219{
220 return (r >> 16) & 0x1ff;
221}
222#define HOST1X_SYNC_CFPEEK_PTRS_CF_WR_PTR_V(r) \
223 host1x_sync_cfpeek_ptrs_cf_wr_ptr_v(r)
224static inline u32 host1x_sync_cbstat_r(unsigned int channel)
225{
226 return 0x758 + channel * REGISTER_STRIDE;
227}
228#define HOST1X_SYNC_CBSTAT(channel) \
229 host1x_sync_cbstat_r(channel)
230static inline u32 host1x_sync_cbstat_cboffset_v(u32 r)
231{
232 return (r >> 0) & 0xffff;
233}
234#define HOST1X_SYNC_CBSTAT_CBOFFSET_V(r) \
235 host1x_sync_cbstat_cboffset_v(r)
236static inline u32 host1x_sync_cbstat_cbclass_v(u32 r)
237{
238 return (r >> 16) & 0x3ff;
239}
240#define HOST1X_SYNC_CBSTAT_CBCLASS_V(r) \
241 host1x_sync_cbstat_cbclass_v(r)
242
243#endif /* __hw_host1x01_sync_h__ */
diff --git a/drivers/gpu/host1x/hw/hw_host1x01_uclass.h b/drivers/gpu/host1x/hw/hw_host1x01_uclass.h
new file mode 100644
index 000000000000..42f3ce19ca32
--- /dev/null
+++ b/drivers/gpu/host1x/hw/hw_host1x01_uclass.h
@@ -0,0 +1,174 @@
1/*
2 * Copyright (c) 2012-2013, NVIDIA Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 *
16 */
17
18 /*
19 * Function naming determines intended use:
20 *
21 * <x>_r(void) : Returns the offset for register <x>.
22 *
23 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
24 *
25 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
26 *
27 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
28 * and masked to place it at field <y> of register <x>. This value
29 * can be |'d with others to produce a full register value for
30 * register <x>.
31 *
32 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
33 * value can be ~'d and then &'d to clear the value of field <y> for
34 * register <x>.
35 *
36 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
37 * to place it at field <y> of register <x>. This value can be |'d
38 * with others to produce a full register value for <x>.
39 *
40 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
41 * <x> value 'r' after being shifted to place its LSB at bit 0.
42 * This value is suitable for direct comparison with other unshifted
43 * values appropriate for use in field <y> of register <x>.
44 *
45 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
46 * field <y> of register <x>. This value is suitable for direct
47 * comparison with unshifted values appropriate for use in field <y>
48 * of register <x>.
49 */
50
51#ifndef __hw_host1x_uclass_host1x_h__
52#define __hw_host1x_uclass_host1x_h__
53
54static inline u32 host1x_uclass_incr_syncpt_r(void)
55{
56 return 0x0;
57}
58#define HOST1X_UCLASS_INCR_SYNCPT \
59 host1x_uclass_incr_syncpt_r()
60static inline u32 host1x_uclass_incr_syncpt_cond_f(u32 v)
61{
62 return (v & 0xff) << 8;
63}
64#define HOST1X_UCLASS_INCR_SYNCPT_COND_F(v) \
65 host1x_uclass_incr_syncpt_cond_f(v)
66static inline u32 host1x_uclass_incr_syncpt_indx_f(u32 v)
67{
68 return (v & 0xff) << 0;
69}
70#define HOST1X_UCLASS_INCR_SYNCPT_INDX_F(v) \
71 host1x_uclass_incr_syncpt_indx_f(v)
72static inline u32 host1x_uclass_wait_syncpt_r(void)
73{
74 return 0x8;
75}
76#define HOST1X_UCLASS_WAIT_SYNCPT \
77 host1x_uclass_wait_syncpt_r()
78static inline u32 host1x_uclass_wait_syncpt_indx_f(u32 v)
79{
80 return (v & 0xff) << 24;
81}
82#define HOST1X_UCLASS_WAIT_SYNCPT_INDX_F(v) \
83 host1x_uclass_wait_syncpt_indx_f(v)
84static inline u32 host1x_uclass_wait_syncpt_thresh_f(u32 v)
85{
86 return (v & 0xffffff) << 0;
87}
88#define HOST1X_UCLASS_WAIT_SYNCPT_THRESH_F(v) \
89 host1x_uclass_wait_syncpt_thresh_f(v)
90static inline u32 host1x_uclass_wait_syncpt_base_r(void)
91{
92 return 0x9;
93}
94#define HOST1X_UCLASS_WAIT_SYNCPT_BASE \
95 host1x_uclass_wait_syncpt_base_r()
96static inline u32 host1x_uclass_wait_syncpt_base_indx_f(u32 v)
97{
98 return (v & 0xff) << 24;
99}
100#define HOST1X_UCLASS_WAIT_SYNCPT_BASE_INDX_F(v) \
101 host1x_uclass_wait_syncpt_base_indx_f(v)
102static inline u32 host1x_uclass_wait_syncpt_base_base_indx_f(u32 v)
103{
104 return (v & 0xff) << 16;
105}
106#define HOST1X_UCLASS_WAIT_SYNCPT_BASE_BASE_INDX_F(v) \
107 host1x_uclass_wait_syncpt_base_base_indx_f(v)
108static inline u32 host1x_uclass_wait_syncpt_base_offset_f(u32 v)
109{
110 return (v & 0xffff) << 0;
111}
112#define HOST1X_UCLASS_WAIT_SYNCPT_BASE_OFFSET_F(v) \
113 host1x_uclass_wait_syncpt_base_offset_f(v)
114static inline u32 host1x_uclass_load_syncpt_base_base_indx_f(u32 v)
115{
116 return (v & 0xff) << 24;
117}
118#define HOST1X_UCLASS_LOAD_SYNCPT_BASE_BASE_INDX_F(v) \
119 host1x_uclass_load_syncpt_base_base_indx_f(v)
120static inline u32 host1x_uclass_load_syncpt_base_value_f(u32 v)
121{
122 return (v & 0xffffff) << 0;
123}
124#define HOST1X_UCLASS_LOAD_SYNCPT_BASE_VALUE_F(v) \
125 host1x_uclass_load_syncpt_base_value_f(v)
126static inline u32 host1x_uclass_incr_syncpt_base_base_indx_f(u32 v)
127{
128 return (v & 0xff) << 24;
129}
130#define HOST1X_UCLASS_INCR_SYNCPT_BASE_BASE_INDX_F(v) \
131 host1x_uclass_incr_syncpt_base_base_indx_f(v)
132static inline u32 host1x_uclass_incr_syncpt_base_offset_f(u32 v)
133{
134 return (v & 0xffffff) << 0;
135}
136#define HOST1X_UCLASS_INCR_SYNCPT_BASE_OFFSET_F(v) \
137 host1x_uclass_incr_syncpt_base_offset_f(v)
138static inline u32 host1x_uclass_indoff_r(void)
139{
140 return 0x2d;
141}
142#define HOST1X_UCLASS_INDOFF \
143 host1x_uclass_indoff_r()
144static inline u32 host1x_uclass_indoff_indbe_f(u32 v)
145{
146 return (v & 0xf) << 28;
147}
148#define HOST1X_UCLASS_INDOFF_INDBE_F(v) \
149 host1x_uclass_indoff_indbe_f(v)
150static inline u32 host1x_uclass_indoff_autoinc_f(u32 v)
151{
152 return (v & 0x1) << 27;
153}
154#define HOST1X_UCLASS_INDOFF_AUTOINC_F(v) \
155 host1x_uclass_indoff_autoinc_f(v)
156static inline u32 host1x_uclass_indoff_indmodid_f(u32 v)
157{
158 return (v & 0xff) << 18;
159}
160#define HOST1X_UCLASS_INDOFF_INDMODID_F(v) \
161 host1x_uclass_indoff_indmodid_f(v)
162static inline u32 host1x_uclass_indoff_indroffset_f(u32 v)
163{
164 return (v & 0xffff) << 2;
165}
166#define HOST1X_UCLASS_INDOFF_INDROFFSET_F(v) \
167 host1x_uclass_indoff_indroffset_f(v)
168static inline u32 host1x_uclass_indoff_rwn_read_v(void)
169{
170 return 1;
171}
172#define HOST1X_UCLASS_INDOFF_INDROFFSET_F(v) \
173 host1x_uclass_indoff_indroffset_f(v)
174#endif
diff --git a/drivers/gpu/host1x/hw/intr_hw.c b/drivers/gpu/host1x/hw/intr_hw.c
new file mode 100644
index 000000000000..b592eef1efcb
--- /dev/null
+++ b/drivers/gpu/host1x/hw/intr_hw.c
@@ -0,0 +1,143 @@
1/*
2 * Tegra host1x Interrupt Management
3 *
4 * Copyright (C) 2010 Google, Inc.
5 * Copyright (c) 2010-2013, NVIDIA Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include <linux/interrupt.h>
21#include <linux/irq.h>
22#include <linux/io.h>
23#include <asm/mach/irq.h>
24
25#include "intr.h"
26#include "dev.h"
27
28/*
29 * Sync point threshold interrupt service function
30 * Handles sync point threshold triggers, in interrupt context
31 */
32static void host1x_intr_syncpt_handle(struct host1x_syncpt *syncpt)
33{
34 unsigned int id = syncpt->id;
35 struct host1x *host = syncpt->host;
36
37 host1x_sync_writel(host, BIT_MASK(id),
38 HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(BIT_WORD(id)));
39 host1x_sync_writel(host, BIT_MASK(id),
40 HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(BIT_WORD(id)));
41
42 queue_work(host->intr_wq, &syncpt->intr.work);
43}
44
45static irqreturn_t syncpt_thresh_isr(int irq, void *dev_id)
46{
47 struct host1x *host = dev_id;
48 unsigned long reg;
49 int i, id;
50
51 for (i = 0; i <= BIT_WORD(host->info->nb_pts); i++) {
52 reg = host1x_sync_readl(host,
53 HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(i));
54 for_each_set_bit(id, &reg, BITS_PER_LONG) {
55 struct host1x_syncpt *syncpt =
56 host->syncpt + (i * BITS_PER_LONG + id);
57 host1x_intr_syncpt_handle(syncpt);
58 }
59 }
60
61 return IRQ_HANDLED;
62}
63
64static void _host1x_intr_disable_all_syncpt_intrs(struct host1x *host)
65{
66 u32 i;
67
68 for (i = 0; i <= BIT_WORD(host->info->nb_pts); ++i) {
69 host1x_sync_writel(host, 0xffffffffu,
70 HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(i));
71 host1x_sync_writel(host, 0xffffffffu,
72 HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(i));
73 }
74}
75
76static int _host1x_intr_init_host_sync(struct host1x *host, u32 cpm,
77 void (*syncpt_thresh_work)(struct work_struct *))
78{
79 int i, err;
80
81 host1x_hw_intr_disable_all_syncpt_intrs(host);
82
83 for (i = 0; i < host->info->nb_pts; i++)
84 INIT_WORK(&host->syncpt[i].intr.work, syncpt_thresh_work);
85
86 err = devm_request_irq(host->dev, host->intr_syncpt_irq,
87 syncpt_thresh_isr, IRQF_SHARED,
88 "host1x_syncpt", host);
89 if (IS_ERR_VALUE(err)) {
90 WARN_ON(1);
91 return err;
92 }
93
94 /* disable the ip_busy_timeout. this prevents write drops */
95 host1x_sync_writel(host, 0, HOST1X_SYNC_IP_BUSY_TIMEOUT);
96
97 /*
98 * increase the auto-ack timout to the maximum value. 2d will hang
99 * otherwise on Tegra2.
100 */
101 host1x_sync_writel(host, 0xff, HOST1X_SYNC_CTXSW_TIMEOUT_CFG);
102
103 /* update host clocks per usec */
104 host1x_sync_writel(host, cpm, HOST1X_SYNC_USEC_CLK);
105
106 return 0;
107}
108
109static void _host1x_intr_set_syncpt_threshold(struct host1x *host,
110 u32 id, u32 thresh)
111{
112 host1x_sync_writel(host, thresh, HOST1X_SYNC_SYNCPT_INT_THRESH(id));
113}
114
115static void _host1x_intr_enable_syncpt_intr(struct host1x *host, u32 id)
116{
117 host1x_sync_writel(host, BIT_MASK(id),
118 HOST1X_SYNC_SYNCPT_THRESH_INT_ENABLE_CPU0(BIT_WORD(id)));
119}
120
121static void _host1x_intr_disable_syncpt_intr(struct host1x *host, u32 id)
122{
123 host1x_sync_writel(host, BIT_MASK(id),
124 HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(BIT_WORD(id)));
125 host1x_sync_writel(host, BIT_MASK(id),
126 HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(BIT_WORD(id)));
127}
128
129static int _host1x_free_syncpt_irq(struct host1x *host)
130{
131 devm_free_irq(host->dev, host->intr_syncpt_irq, host);
132 flush_workqueue(host->intr_wq);
133 return 0;
134}
135
136static const struct host1x_intr_ops host1x_intr_ops = {
137 .init_host_sync = _host1x_intr_init_host_sync,
138 .set_syncpt_threshold = _host1x_intr_set_syncpt_threshold,
139 .enable_syncpt_intr = _host1x_intr_enable_syncpt_intr,
140 .disable_syncpt_intr = _host1x_intr_disable_syncpt_intr,
141 .disable_all_syncpt_intrs = _host1x_intr_disable_all_syncpt_intrs,
142 .free_syncpt_irq = _host1x_free_syncpt_irq,
143};
diff --git a/drivers/gpu/host1x/hw/syncpt_hw.c b/drivers/gpu/host1x/hw/syncpt_hw.c
new file mode 100644
index 000000000000..61174990102a
--- /dev/null
+++ b/drivers/gpu/host1x/hw/syncpt_hw.c
@@ -0,0 +1,114 @@
1/*
2 * Tegra host1x Syncpoints
3 *
4 * Copyright (c) 2010-2013, NVIDIA Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#include <linux/io.h>
20
21#include "dev.h"
22#include "syncpt.h"
23
24/*
25 * Write the current syncpoint value back to hw.
26 */
27static void syncpt_restore(struct host1x_syncpt *sp)
28{
29 struct host1x *host = sp->host;
30 int min = host1x_syncpt_read_min(sp);
31 host1x_sync_writel(host, min, HOST1X_SYNC_SYNCPT(sp->id));
32}
33
34/*
35 * Write the current waitbase value back to hw.
36 */
37static void syncpt_restore_wait_base(struct host1x_syncpt *sp)
38{
39 struct host1x *host = sp->host;
40 host1x_sync_writel(host, sp->base_val,
41 HOST1X_SYNC_SYNCPT_BASE(sp->id));
42}
43
44/*
45 * Read waitbase value from hw.
46 */
47static void syncpt_read_wait_base(struct host1x_syncpt *sp)
48{
49 struct host1x *host = sp->host;
50 sp->base_val =
51 host1x_sync_readl(host, HOST1X_SYNC_SYNCPT_BASE(sp->id));
52}
53
54/*
55 * Updates the last value read from hardware.
56 */
57static u32 syncpt_load(struct host1x_syncpt *sp)
58{
59 struct host1x *host = sp->host;
60 u32 old, live;
61
62 /* Loop in case there's a race writing to min_val */
63 do {
64 old = host1x_syncpt_read_min(sp);
65 live = host1x_sync_readl(host, HOST1X_SYNC_SYNCPT(sp->id));
66 } while ((u32)atomic_cmpxchg(&sp->min_val, old, live) != old);
67
68 if (!host1x_syncpt_check_max(sp, live))
69 dev_err(host->dev, "%s failed: id=%u, min=%d, max=%d\n",
70 __func__, sp->id, host1x_syncpt_read_min(sp),
71 host1x_syncpt_read_max(sp));
72
73 return live;
74}
75
76/*
77 * Write a cpu syncpoint increment to the hardware, without touching
78 * the cache.
79 */
80static void syncpt_cpu_incr(struct host1x_syncpt *sp)
81{
82 struct host1x *host = sp->host;
83 u32 reg_offset = sp->id / 32;
84
85 if (!host1x_syncpt_client_managed(sp) &&
86 host1x_syncpt_idle(sp)) {
87 dev_err(host->dev, "Trying to increment syncpoint id %d beyond max\n",
88 sp->id);
89 host1x_debug_dump(sp->host);
90 return;
91 }
92 host1x_sync_writel(host, BIT_MASK(sp->id),
93 HOST1X_SYNC_SYNCPT_CPU_INCR(reg_offset));
94 wmb();
95}
96
97/* remove a wait pointed to by patch_addr */
98static int syncpt_patch_wait(struct host1x_syncpt *sp, void *patch_addr)
99{
100 u32 override = host1x_class_host_wait_syncpt(
101 HOST1X_SYNCPT_RESERVED, 0);
102
103 *((u32 *)patch_addr) = override;
104 return 0;
105}
106
107static const struct host1x_syncpt_ops host1x_syncpt_ops = {
108 .restore = syncpt_restore,
109 .restore_wait_base = syncpt_restore_wait_base,
110 .load_wait_base = syncpt_read_wait_base,
111 .load = syncpt_load,
112 .cpu_incr = syncpt_cpu_incr,
113 .patch_wait = syncpt_patch_wait,
114};
diff --git a/drivers/gpu/host1x/intr.c b/drivers/gpu/host1x/intr.c
new file mode 100644
index 000000000000..2491bf82e30c
--- /dev/null
+++ b/drivers/gpu/host1x/intr.c
@@ -0,0 +1,354 @@
1/*
2 * Tegra host1x Interrupt Management
3 *
4 * Copyright (c) 2010-2013, NVIDIA Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#include <linux/clk.h>
20#include <linux/interrupt.h>
21#include <linux/slab.h>
22#include <linux/irq.h>
23
24#include <trace/events/host1x.h>
25#include "channel.h"
26#include "dev.h"
27#include "intr.h"
28
29/* Wait list management */
30
31enum waitlist_state {
32 WLS_PENDING,
33 WLS_REMOVED,
34 WLS_CANCELLED,
35 WLS_HANDLED
36};
37
38static void waiter_release(struct kref *kref)
39{
40 kfree(container_of(kref, struct host1x_waitlist, refcount));
41}
42
43/*
44 * add a waiter to a waiter queue, sorted by threshold
45 * returns true if it was added at the head of the queue
46 */
47static bool add_waiter_to_queue(struct host1x_waitlist *waiter,
48 struct list_head *queue)
49{
50 struct host1x_waitlist *pos;
51 u32 thresh = waiter->thresh;
52
53 list_for_each_entry_reverse(pos, queue, list)
54 if ((s32)(pos->thresh - thresh) <= 0) {
55 list_add(&waiter->list, &pos->list);
56 return false;
57 }
58
59 list_add(&waiter->list, queue);
60 return true;
61}
62
63/*
64 * run through a waiter queue for a single sync point ID
65 * and gather all completed waiters into lists by actions
66 */
67static void remove_completed_waiters(struct list_head *head, u32 sync,
68 struct list_head completed[HOST1X_INTR_ACTION_COUNT])
69{
70 struct list_head *dest;
71 struct host1x_waitlist *waiter, *next, *prev;
72
73 list_for_each_entry_safe(waiter, next, head, list) {
74 if ((s32)(waiter->thresh - sync) > 0)
75 break;
76
77 dest = completed + waiter->action;
78
79 /* consolidate submit cleanups */
80 if (waiter->action == HOST1X_INTR_ACTION_SUBMIT_COMPLETE &&
81 !list_empty(dest)) {
82 prev = list_entry(dest->prev,
83 struct host1x_waitlist, list);
84 if (prev->data == waiter->data) {
85 prev->count++;
86 dest = NULL;
87 }
88 }
89
90 /* PENDING->REMOVED or CANCELLED->HANDLED */
91 if (atomic_inc_return(&waiter->state) == WLS_HANDLED || !dest) {
92 list_del(&waiter->list);
93 kref_put(&waiter->refcount, waiter_release);
94 } else
95 list_move_tail(&waiter->list, dest);
96 }
97}
98
99static void reset_threshold_interrupt(struct host1x *host,
100 struct list_head *head,
101 unsigned int id)
102{
103 u32 thresh =
104 list_first_entry(head, struct host1x_waitlist, list)->thresh;
105
106 host1x_hw_intr_set_syncpt_threshold(host, id, thresh);
107 host1x_hw_intr_enable_syncpt_intr(host, id);
108}
109
110static void action_submit_complete(struct host1x_waitlist *waiter)
111{
112 struct host1x_channel *channel = waiter->data;
113
114 host1x_cdma_update(&channel->cdma);
115
116 /* Add nr_completed to trace */
117 trace_host1x_channel_submit_complete(dev_name(channel->dev),
118 waiter->count, waiter->thresh);
119
120}
121
122static void action_wakeup(struct host1x_waitlist *waiter)
123{
124 wait_queue_head_t *wq = waiter->data;
125 wake_up(wq);
126}
127
128static void action_wakeup_interruptible(struct host1x_waitlist *waiter)
129{
130 wait_queue_head_t *wq = waiter->data;
131 wake_up_interruptible(wq);
132}
133
134typedef void (*action_handler)(struct host1x_waitlist *waiter);
135
136static action_handler action_handlers[HOST1X_INTR_ACTION_COUNT] = {
137 action_submit_complete,
138 action_wakeup,
139 action_wakeup_interruptible,
140};
141
142static void run_handlers(struct list_head completed[HOST1X_INTR_ACTION_COUNT])
143{
144 struct list_head *head = completed;
145 int i;
146
147 for (i = 0; i < HOST1X_INTR_ACTION_COUNT; ++i, ++head) {
148 action_handler handler = action_handlers[i];
149 struct host1x_waitlist *waiter, *next;
150
151 list_for_each_entry_safe(waiter, next, head, list) {
152 list_del(&waiter->list);
153 handler(waiter);
154 WARN_ON(atomic_xchg(&waiter->state, WLS_HANDLED) !=
155 WLS_REMOVED);
156 kref_put(&waiter->refcount, waiter_release);
157 }
158 }
159}
160
161/*
162 * Remove & handle all waiters that have completed for the given syncpt
163 */
164static int process_wait_list(struct host1x *host,
165 struct host1x_syncpt *syncpt,
166 u32 threshold)
167{
168 struct list_head completed[HOST1X_INTR_ACTION_COUNT];
169 unsigned int i;
170 int empty;
171
172 for (i = 0; i < HOST1X_INTR_ACTION_COUNT; ++i)
173 INIT_LIST_HEAD(completed + i);
174
175 spin_lock(&syncpt->intr.lock);
176
177 remove_completed_waiters(&syncpt->intr.wait_head, threshold,
178 completed);
179
180 empty = list_empty(&syncpt->intr.wait_head);
181 if (empty)
182 host1x_hw_intr_disable_syncpt_intr(host, syncpt->id);
183 else
184 reset_threshold_interrupt(host, &syncpt->intr.wait_head,
185 syncpt->id);
186
187 spin_unlock(&syncpt->intr.lock);
188
189 run_handlers(completed);
190
191 return empty;
192}
193
194/*
195 * Sync point threshold interrupt service thread function
196 * Handles sync point threshold triggers, in thread context
197 */
198
199static void syncpt_thresh_work(struct work_struct *work)
200{
201 struct host1x_syncpt_intr *syncpt_intr =
202 container_of(work, struct host1x_syncpt_intr, work);
203 struct host1x_syncpt *syncpt =
204 container_of(syncpt_intr, struct host1x_syncpt, intr);
205 unsigned int id = syncpt->id;
206 struct host1x *host = syncpt->host;
207
208 (void)process_wait_list(host, syncpt,
209 host1x_syncpt_load(host->syncpt + id));
210}
211
212int host1x_intr_add_action(struct host1x *host, u32 id, u32 thresh,
213 enum host1x_intr_action action, void *data,
214 struct host1x_waitlist *waiter, void **ref)
215{
216 struct host1x_syncpt *syncpt;
217 int queue_was_empty;
218
219 if (waiter == NULL) {
220 pr_warn("%s: NULL waiter\n", __func__);
221 return -EINVAL;
222 }
223
224 /* initialize a new waiter */
225 INIT_LIST_HEAD(&waiter->list);
226 kref_init(&waiter->refcount);
227 if (ref)
228 kref_get(&waiter->refcount);
229 waiter->thresh = thresh;
230 waiter->action = action;
231 atomic_set(&waiter->state, WLS_PENDING);
232 waiter->data = data;
233 waiter->count = 1;
234
235 syncpt = host->syncpt + id;
236
237 spin_lock(&syncpt->intr.lock);
238
239 queue_was_empty = list_empty(&syncpt->intr.wait_head);
240
241 if (add_waiter_to_queue(waiter, &syncpt->intr.wait_head)) {
242 /* added at head of list - new threshold value */
243 host1x_hw_intr_set_syncpt_threshold(host, id, thresh);
244
245 /* added as first waiter - enable interrupt */
246 if (queue_was_empty)
247 host1x_hw_intr_enable_syncpt_intr(host, id);
248 }
249
250 spin_unlock(&syncpt->intr.lock);
251
252 if (ref)
253 *ref = waiter;
254 return 0;
255}
256
257void host1x_intr_put_ref(struct host1x *host, u32 id, void *ref)
258{
259 struct host1x_waitlist *waiter = ref;
260 struct host1x_syncpt *syncpt;
261
262 while (atomic_cmpxchg(&waiter->state, WLS_PENDING, WLS_CANCELLED) ==
263 WLS_REMOVED)
264 schedule();
265
266 syncpt = host->syncpt + id;
267 (void)process_wait_list(host, syncpt,
268 host1x_syncpt_load(host->syncpt + id));
269
270 kref_put(&waiter->refcount, waiter_release);
271}
272
273int host1x_intr_init(struct host1x *host, unsigned int irq_sync)
274{
275 unsigned int id;
276 u32 nb_pts = host1x_syncpt_nb_pts(host);
277
278 mutex_init(&host->intr_mutex);
279 host->intr_syncpt_irq = irq_sync;
280 host->intr_wq = create_workqueue("host_syncpt");
281 if (!host->intr_wq)
282 return -ENOMEM;
283
284 for (id = 0; id < nb_pts; ++id) {
285 struct host1x_syncpt *syncpt = host->syncpt + id;
286
287 spin_lock_init(&syncpt->intr.lock);
288 INIT_LIST_HEAD(&syncpt->intr.wait_head);
289 snprintf(syncpt->intr.thresh_irq_name,
290 sizeof(syncpt->intr.thresh_irq_name),
291 "host1x_sp_%02d", id);
292 }
293
294 host1x_intr_start(host);
295
296 return 0;
297}
298
299void host1x_intr_deinit(struct host1x *host)
300{
301 host1x_intr_stop(host);
302 destroy_workqueue(host->intr_wq);
303}
304
305void host1x_intr_start(struct host1x *host)
306{
307 u32 hz = clk_get_rate(host->clk);
308 int err;
309
310 mutex_lock(&host->intr_mutex);
311 err = host1x_hw_intr_init_host_sync(host, DIV_ROUND_UP(hz, 1000000),
312 syncpt_thresh_work);
313 if (err) {
314 mutex_unlock(&host->intr_mutex);
315 return;
316 }
317 mutex_unlock(&host->intr_mutex);
318}
319
320void host1x_intr_stop(struct host1x *host)
321{
322 unsigned int id;
323 struct host1x_syncpt *syncpt = host->syncpt;
324 u32 nb_pts = host1x_syncpt_nb_pts(host);
325
326 mutex_lock(&host->intr_mutex);
327
328 host1x_hw_intr_disable_all_syncpt_intrs(host);
329
330 for (id = 0; id < nb_pts; ++id) {
331 struct host1x_waitlist *waiter, *next;
332
333 list_for_each_entry_safe(waiter, next,
334 &syncpt[id].intr.wait_head, list) {
335 if (atomic_cmpxchg(&waiter->state,
336 WLS_CANCELLED, WLS_HANDLED) == WLS_CANCELLED) {
337 list_del(&waiter->list);
338 kref_put(&waiter->refcount, waiter_release);
339 }
340 }
341
342 if (!list_empty(&syncpt[id].intr.wait_head)) {
343 /* output diagnostics */
344 mutex_unlock(&host->intr_mutex);
345 pr_warn("%s cannot stop syncpt intr id=%d\n",
346 __func__, id);
347 return;
348 }
349 }
350
351 host1x_hw_intr_free_syncpt_irq(host);
352
353 mutex_unlock(&host->intr_mutex);
354}
diff --git a/drivers/gpu/host1x/intr.h b/drivers/gpu/host1x/intr.h
new file mode 100644
index 000000000000..2b8adf016a05
--- /dev/null
+++ b/drivers/gpu/host1x/intr.h
@@ -0,0 +1,102 @@
1/*
2 * Tegra host1x Interrupt Management
3 *
4 * Copyright (c) 2010-2013, NVIDIA Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#ifndef __HOST1X_INTR_H
20#define __HOST1X_INTR_H
21
22#include <linux/interrupt.h>
23#include <linux/workqueue.h>
24
25struct host1x;
26
27enum host1x_intr_action {
28 /*
29 * Perform cleanup after a submit has completed.
30 * 'data' points to a channel
31 */
32 HOST1X_INTR_ACTION_SUBMIT_COMPLETE = 0,
33
34 /*
35 * Wake up a task.
36 * 'data' points to a wait_queue_head_t
37 */
38 HOST1X_INTR_ACTION_WAKEUP,
39
40 /*
41 * Wake up a interruptible task.
42 * 'data' points to a wait_queue_head_t
43 */
44 HOST1X_INTR_ACTION_WAKEUP_INTERRUPTIBLE,
45
46 HOST1X_INTR_ACTION_COUNT
47};
48
49struct host1x_syncpt_intr {
50 spinlock_t lock;
51 struct list_head wait_head;
52 char thresh_irq_name[12];
53 struct work_struct work;
54};
55
56struct host1x_waitlist {
57 struct list_head list;
58 struct kref refcount;
59 u32 thresh;
60 enum host1x_intr_action action;
61 atomic_t state;
62 void *data;
63 int count;
64};
65
66/*
67 * Schedule an action to be taken when a sync point reaches the given threshold.
68 *
69 * @id the sync point
70 * @thresh the threshold
71 * @action the action to take
72 * @data a pointer to extra data depending on action, see above
73 * @waiter waiter structure - assumes ownership
74 * @ref must be passed if cancellation is possible, else NULL
75 *
76 * This is a non-blocking api.
77 */
78int host1x_intr_add_action(struct host1x *host, u32 id, u32 thresh,
79 enum host1x_intr_action action, void *data,
80 struct host1x_waitlist *waiter, void **ref);
81
82/*
83 * Unreference an action submitted to host1x_intr_add_action().
84 * You must call this if you passed non-NULL as ref.
85 * @ref the ref returned from host1x_intr_add_action()
86 */
87void host1x_intr_put_ref(struct host1x *host, u32 id, void *ref);
88
89/* Initialize host1x sync point interrupt */
90int host1x_intr_init(struct host1x *host, unsigned int irq_sync);
91
92/* Deinitialize host1x sync point interrupt */
93void host1x_intr_deinit(struct host1x *host);
94
95/* Enable host1x sync point interrupt */
96void host1x_intr_start(struct host1x *host);
97
98/* Disable host1x sync point interrupt */
99void host1x_intr_stop(struct host1x *host);
100
101irqreturn_t host1x_syncpt_thresh_fn(void *dev_id);
102#endif
diff --git a/drivers/gpu/host1x/job.c b/drivers/gpu/host1x/job.c
new file mode 100644
index 000000000000..f665d679031c
--- /dev/null
+++ b/drivers/gpu/host1x/job.c
@@ -0,0 +1,603 @@
1/*
2 * Tegra host1x Job
3 *
4 * Copyright (c) 2010-2013, NVIDIA Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#include <linux/dma-mapping.h>
20#include <linux/err.h>
21#include <linux/kref.h>
22#include <linux/module.h>
23#include <linux/scatterlist.h>
24#include <linux/slab.h>
25#include <linux/vmalloc.h>
26#include <trace/events/host1x.h>
27
28#include "channel.h"
29#include "dev.h"
30#include "host1x_bo.h"
31#include "job.h"
32#include "syncpt.h"
33
34struct host1x_job *host1x_job_alloc(struct host1x_channel *ch,
35 u32 num_cmdbufs, u32 num_relocs,
36 u32 num_waitchks)
37{
38 struct host1x_job *job = NULL;
39 unsigned int num_unpins = num_cmdbufs + num_relocs;
40 u64 total;
41 void *mem;
42
43 /* Check that we're not going to overflow */
44 total = sizeof(struct host1x_job) +
45 num_relocs * sizeof(struct host1x_reloc) +
46 num_unpins * sizeof(struct host1x_job_unpin_data) +
47 num_waitchks * sizeof(struct host1x_waitchk) +
48 num_cmdbufs * sizeof(struct host1x_job_gather) +
49 num_unpins * sizeof(dma_addr_t) +
50 num_unpins * sizeof(u32 *);
51 if (total > ULONG_MAX)
52 return NULL;
53
54 mem = job = kzalloc(total, GFP_KERNEL);
55 if (!job)
56 return NULL;
57
58 kref_init(&job->ref);
59 job->channel = ch;
60
61 /* Redistribute memory to the structs */
62 mem += sizeof(struct host1x_job);
63 job->relocarray = num_relocs ? mem : NULL;
64 mem += num_relocs * sizeof(struct host1x_reloc);
65 job->unpins = num_unpins ? mem : NULL;
66 mem += num_unpins * sizeof(struct host1x_job_unpin_data);
67 job->waitchk = num_waitchks ? mem : NULL;
68 mem += num_waitchks * sizeof(struct host1x_waitchk);
69 job->gathers = num_cmdbufs ? mem : NULL;
70 mem += num_cmdbufs * sizeof(struct host1x_job_gather);
71 job->addr_phys = num_unpins ? mem : NULL;
72
73 job->reloc_addr_phys = job->addr_phys;
74 job->gather_addr_phys = &job->addr_phys[num_relocs];
75
76 return job;
77}
78
79struct host1x_job *host1x_job_get(struct host1x_job *job)
80{
81 kref_get(&job->ref);
82 return job;
83}
84
85static void job_free(struct kref *ref)
86{
87 struct host1x_job *job = container_of(ref, struct host1x_job, ref);
88
89 kfree(job);
90}
91
92void host1x_job_put(struct host1x_job *job)
93{
94 kref_put(&job->ref, job_free);
95}
96
97void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *bo,
98 u32 words, u32 offset)
99{
100 struct host1x_job_gather *cur_gather = &job->gathers[job->num_gathers];
101
102 cur_gather->words = words;
103 cur_gather->bo = bo;
104 cur_gather->offset = offset;
105 job->num_gathers++;
106}
107
108/*
109 * NULL an already satisfied WAIT_SYNCPT host method, by patching its
110 * args in the command stream. The method data is changed to reference
111 * a reserved (never given out or incr) HOST1X_SYNCPT_RESERVED syncpt
112 * with a matching threshold value of 0, so is guaranteed to be popped
113 * by the host HW.
114 */
115static void host1x_syncpt_patch_offset(struct host1x_syncpt *sp,
116 struct host1x_bo *h, u32 offset)
117{
118 void *patch_addr = NULL;
119
120 /* patch the wait */
121 patch_addr = host1x_bo_kmap(h, offset >> PAGE_SHIFT);
122 if (patch_addr) {
123 host1x_syncpt_patch_wait(sp,
124 patch_addr + (offset & ~PAGE_MASK));
125 host1x_bo_kunmap(h, offset >> PAGE_SHIFT, patch_addr);
126 } else
127 pr_err("Could not map cmdbuf for wait check\n");
128}
129
130/*
131 * Check driver supplied waitchk structs for syncpt thresholds
132 * that have already been satisfied and NULL the comparison (to
133 * avoid a wrap condition in the HW).
134 */
135static int do_waitchks(struct host1x_job *job, struct host1x *host,
136 struct host1x_bo *patch)
137{
138 int i;
139
140 /* compare syncpt vs wait threshold */
141 for (i = 0; i < job->num_waitchk; i++) {
142 struct host1x_waitchk *wait = &job->waitchk[i];
143 struct host1x_syncpt *sp =
144 host1x_syncpt_get(host, wait->syncpt_id);
145
146 /* validate syncpt id */
147 if (wait->syncpt_id > host1x_syncpt_nb_pts(host))
148 continue;
149
150 /* skip all other gathers */
151 if (patch != wait->bo)
152 continue;
153
154 trace_host1x_syncpt_wait_check(wait->bo, wait->offset,
155 wait->syncpt_id, wait->thresh,
156 host1x_syncpt_read_min(sp));
157
158 if (host1x_syncpt_is_expired(sp, wait->thresh)) {
159 dev_dbg(host->dev,
160 "drop WAIT id %d (%s) thresh 0x%x, min 0x%x\n",
161 wait->syncpt_id, sp->name, wait->thresh,
162 host1x_syncpt_read_min(sp));
163
164 host1x_syncpt_patch_offset(sp, patch, wait->offset);
165 }
166
167 wait->bo = NULL;
168 }
169
170 return 0;
171}
172
173static unsigned int pin_job(struct host1x_job *job)
174{
175 unsigned int i;
176
177 job->num_unpins = 0;
178
179 for (i = 0; i < job->num_relocs; i++) {
180 struct host1x_reloc *reloc = &job->relocarray[i];
181 struct sg_table *sgt;
182 dma_addr_t phys_addr;
183
184 reloc->target = host1x_bo_get(reloc->target);
185 if (!reloc->target)
186 goto unpin;
187
188 phys_addr = host1x_bo_pin(reloc->target, &sgt);
189 if (!phys_addr)
190 goto unpin;
191
192 job->addr_phys[job->num_unpins] = phys_addr;
193 job->unpins[job->num_unpins].bo = reloc->target;
194 job->unpins[job->num_unpins].sgt = sgt;
195 job->num_unpins++;
196 }
197
198 for (i = 0; i < job->num_gathers; i++) {
199 struct host1x_job_gather *g = &job->gathers[i];
200 struct sg_table *sgt;
201 dma_addr_t phys_addr;
202
203 g->bo = host1x_bo_get(g->bo);
204 if (!g->bo)
205 goto unpin;
206
207 phys_addr = host1x_bo_pin(g->bo, &sgt);
208 if (!phys_addr)
209 goto unpin;
210
211 job->addr_phys[job->num_unpins] = phys_addr;
212 job->unpins[job->num_unpins].bo = g->bo;
213 job->unpins[job->num_unpins].sgt = sgt;
214 job->num_unpins++;
215 }
216
217 return job->num_unpins;
218
219unpin:
220 host1x_job_unpin(job);
221 return 0;
222}
223
224static unsigned int do_relocs(struct host1x_job *job, struct host1x_bo *cmdbuf)
225{
226 int i = 0;
227 u32 last_page = ~0;
228 void *cmdbuf_page_addr = NULL;
229
230 /* pin & patch the relocs for one gather */
231 while (i < job->num_relocs) {
232 struct host1x_reloc *reloc = &job->relocarray[i];
233 u32 reloc_addr = (job->reloc_addr_phys[i] +
234 reloc->target_offset) >> reloc->shift;
235 u32 *target;
236
237 /* skip all other gathers */
238 if (!(reloc->cmdbuf && cmdbuf == reloc->cmdbuf)) {
239 i++;
240 continue;
241 }
242
243 if (last_page != reloc->cmdbuf_offset >> PAGE_SHIFT) {
244 if (cmdbuf_page_addr)
245 host1x_bo_kunmap(cmdbuf, last_page,
246 cmdbuf_page_addr);
247
248 cmdbuf_page_addr = host1x_bo_kmap(cmdbuf,
249 reloc->cmdbuf_offset >> PAGE_SHIFT);
250 last_page = reloc->cmdbuf_offset >> PAGE_SHIFT;
251
252 if (unlikely(!cmdbuf_page_addr)) {
253 pr_err("Could not map cmdbuf for relocation\n");
254 return -ENOMEM;
255 }
256 }
257
258 target = cmdbuf_page_addr + (reloc->cmdbuf_offset & ~PAGE_MASK);
259 *target = reloc_addr;
260
261 /* mark this gather as handled */
262 reloc->cmdbuf = 0;
263 }
264
265 if (cmdbuf_page_addr)
266 host1x_bo_kunmap(cmdbuf, last_page, cmdbuf_page_addr);
267
268 return 0;
269}
270
271static int check_reloc(struct host1x_reloc *reloc, struct host1x_bo *cmdbuf,
272 unsigned int offset)
273{
274 offset *= sizeof(u32);
275
276 if (reloc->cmdbuf != cmdbuf || reloc->cmdbuf_offset != offset)
277 return -EINVAL;
278
279 return 0;
280}
281
282struct host1x_firewall {
283 struct host1x_job *job;
284 struct device *dev;
285
286 unsigned int num_relocs;
287 struct host1x_reloc *reloc;
288
289 struct host1x_bo *cmdbuf_id;
290 unsigned int offset;
291
292 u32 words;
293 u32 class;
294 u32 reg;
295 u32 mask;
296 u32 count;
297};
298
299static int check_mask(struct host1x_firewall *fw)
300{
301 u32 mask = fw->mask;
302 u32 reg = fw->reg;
303
304 while (mask) {
305 if (fw->words == 0)
306 return -EINVAL;
307
308 if (mask & 1) {
309 if (fw->job->is_addr_reg(fw->dev, fw->class, reg)) {
310 bool bad_reloc = check_reloc(fw->reloc,
311 fw->cmdbuf_id,
312 fw->offset);
313 if (!fw->num_relocs || bad_reloc)
314 return -EINVAL;
315 fw->reloc++;
316 fw->num_relocs--;
317 }
318 fw->words--;
319 fw->offset++;
320 }
321 mask >>= 1;
322 reg++;
323 }
324
325 return 0;
326}
327
328static int check_incr(struct host1x_firewall *fw)
329{
330 u32 count = fw->count;
331 u32 reg = fw->reg;
332
333 while (fw) {
334 if (fw->words == 0)
335 return -EINVAL;
336
337 if (fw->job->is_addr_reg(fw->dev, fw->class, reg)) {
338 bool bad_reloc = check_reloc(fw->reloc, fw->cmdbuf_id,
339 fw->offset);
340 if (!fw->num_relocs || bad_reloc)
341 return -EINVAL;
342 fw->reloc++;
343 fw->num_relocs--;
344 }
345 reg++;
346 fw->words--;
347 fw->offset++;
348 count--;
349 }
350
351 return 0;
352}
353
354static int check_nonincr(struct host1x_firewall *fw)
355{
356 int is_addr_reg = fw->job->is_addr_reg(fw->dev, fw->class, fw->reg);
357 u32 count = fw->count;
358
359 while (count) {
360 if (fw->words == 0)
361 return -EINVAL;
362
363 if (is_addr_reg) {
364 bool bad_reloc = check_reloc(fw->reloc, fw->cmdbuf_id,
365 fw->offset);
366 if (!fw->num_relocs || bad_reloc)
367 return -EINVAL;
368 fw->reloc++;
369 fw->num_relocs--;
370 }
371 fw->words--;
372 fw->offset++;
373 count--;
374 }
375
376 return 0;
377}
378
379static int validate(struct host1x_job *job, struct device *dev,
380 struct host1x_job_gather *g)
381{
382 u32 *cmdbuf_base;
383 int err = 0;
384 struct host1x_firewall fw;
385
386 fw.job = job;
387 fw.dev = dev;
388 fw.reloc = job->relocarray;
389 fw.num_relocs = job->num_relocs;
390 fw.cmdbuf_id = g->bo;
391
392 fw.offset = 0;
393 fw.class = 0;
394
395 if (!job->is_addr_reg)
396 return 0;
397
398 cmdbuf_base = host1x_bo_mmap(g->bo);
399 if (!cmdbuf_base)
400 return -ENOMEM;
401
402 fw.words = g->words;
403 while (fw.words && !err) {
404 u32 word = cmdbuf_base[fw.offset];
405 u32 opcode = (word & 0xf0000000) >> 28;
406
407 fw.mask = 0;
408 fw.reg = 0;
409 fw.count = 0;
410 fw.words--;
411 fw.offset++;
412
413 switch (opcode) {
414 case 0:
415 fw.class = word >> 6 & 0x3ff;
416 fw.mask = word & 0x3f;
417 fw.reg = word >> 16 & 0xfff;
418 err = check_mask(&fw);
419 if (err)
420 goto out;
421 break;
422 case 1:
423 fw.reg = word >> 16 & 0xfff;
424 fw.count = word & 0xffff;
425 err = check_incr(&fw);
426 if (err)
427 goto out;
428 break;
429
430 case 2:
431 fw.reg = word >> 16 & 0xfff;
432 fw.count = word & 0xffff;
433 err = check_nonincr(&fw);
434 if (err)
435 goto out;
436 break;
437
438 case 3:
439 fw.mask = word & 0xffff;
440 fw.reg = word >> 16 & 0xfff;
441 err = check_mask(&fw);
442 if (err)
443 goto out;
444 break;
445 case 4:
446 case 5:
447 case 14:
448 break;
449 default:
450 err = -EINVAL;
451 break;
452 }
453 }
454
455 /* No relocs should remain at this point */
456 if (fw.num_relocs)
457 err = -EINVAL;
458
459out:
460 host1x_bo_munmap(g->bo, cmdbuf_base);
461
462 return err;
463}
464
465static inline int copy_gathers(struct host1x_job *job, struct device *dev)
466{
467 size_t size = 0;
468 size_t offset = 0;
469 int i;
470
471 for (i = 0; i < job->num_gathers; i++) {
472 struct host1x_job_gather *g = &job->gathers[i];
473 size += g->words * sizeof(u32);
474 }
475
476 job->gather_copy_mapped = dma_alloc_writecombine(dev, size,
477 &job->gather_copy,
478 GFP_KERNEL);
479 if (!job->gather_copy_mapped) {
480 int err = PTR_ERR(job->gather_copy_mapped);
481 job->gather_copy_mapped = NULL;
482 return err;
483 }
484
485 job->gather_copy_size = size;
486
487 for (i = 0; i < job->num_gathers; i++) {
488 struct host1x_job_gather *g = &job->gathers[i];
489 void *gather;
490
491 gather = host1x_bo_mmap(g->bo);
492 memcpy(job->gather_copy_mapped + offset, gather + g->offset,
493 g->words * sizeof(u32));
494 host1x_bo_munmap(g->bo, gather);
495
496 g->base = job->gather_copy;
497 g->offset = offset;
498 g->bo = NULL;
499
500 offset += g->words * sizeof(u32);
501 }
502
503 return 0;
504}
505
506int host1x_job_pin(struct host1x_job *job, struct device *dev)
507{
508 int err;
509 unsigned int i, j;
510 struct host1x *host = dev_get_drvdata(dev->parent);
511 DECLARE_BITMAP(waitchk_mask, host1x_syncpt_nb_pts(host));
512
513 bitmap_zero(waitchk_mask, host1x_syncpt_nb_pts(host));
514 for (i = 0; i < job->num_waitchk; i++) {
515 u32 syncpt_id = job->waitchk[i].syncpt_id;
516 if (syncpt_id < host1x_syncpt_nb_pts(host))
517 set_bit(syncpt_id, waitchk_mask);
518 }
519
520 /* get current syncpt values for waitchk */
521 for_each_set_bit(i, waitchk_mask, host1x_syncpt_nb_pts(host))
522 host1x_syncpt_load(host->syncpt + i);
523
524 /* pin memory */
525 err = pin_job(job);
526 if (!err)
527 goto out;
528
529 /* patch gathers */
530 for (i = 0; i < job->num_gathers; i++) {
531 struct host1x_job_gather *g = &job->gathers[i];
532
533 /* process each gather mem only once */
534 if (g->handled)
535 continue;
536
537 g->base = job->gather_addr_phys[i];
538
539 for (j = 0; j < job->num_gathers; j++)
540 if (job->gathers[j].bo == g->bo)
541 job->gathers[j].handled = true;
542
543 err = 0;
544
545 if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL))
546 err = validate(job, dev, g);
547
548 if (err)
549 dev_err(dev, "Job invalid (err=%d)\n", err);
550
551 if (!err)
552 err = do_relocs(job, g->bo);
553
554 if (!err)
555 err = do_waitchks(job, host, g->bo);
556
557 if (err)
558 break;
559 }
560
561 if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) && !err) {
562 err = copy_gathers(job, dev);
563 if (err) {
564 host1x_job_unpin(job);
565 return err;
566 }
567 }
568
569out:
570 wmb();
571
572 return err;
573}
574
575void host1x_job_unpin(struct host1x_job *job)
576{
577 unsigned int i;
578
579 for (i = 0; i < job->num_unpins; i++) {
580 struct host1x_job_unpin_data *unpin = &job->unpins[i];
581 host1x_bo_unpin(unpin->bo, unpin->sgt);
582 host1x_bo_put(unpin->bo);
583 }
584 job->num_unpins = 0;
585
586 if (job->gather_copy_size)
587 dma_free_writecombine(job->channel->dev, job->gather_copy_size,
588 job->gather_copy_mapped,
589 job->gather_copy);
590}
591
592/*
593 * Debug routine used to dump job entries
594 */
595void host1x_job_dump(struct device *dev, struct host1x_job *job)
596{
597 dev_dbg(dev, " SYNCPT_ID %d\n", job->syncpt_id);
598 dev_dbg(dev, " SYNCPT_VAL %d\n", job->syncpt_end);
599 dev_dbg(dev, " FIRST_GET 0x%x\n", job->first_get);
600 dev_dbg(dev, " TIMEOUT %d\n", job->timeout);
601 dev_dbg(dev, " NUM_SLOTS %d\n", job->num_slots);
602 dev_dbg(dev, " NUM_HANDLES %d\n", job->num_unpins);
603}
diff --git a/drivers/gpu/host1x/job.h b/drivers/gpu/host1x/job.h
new file mode 100644
index 000000000000..fba45f20458e
--- /dev/null
+++ b/drivers/gpu/host1x/job.h
@@ -0,0 +1,162 @@
1/*
2 * Tegra host1x Job
3 *
4 * Copyright (c) 2011-2013, NVIDIA Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#ifndef __HOST1X_JOB_H
20#define __HOST1X_JOB_H
21
22struct host1x_job_gather {
23 u32 words;
24 dma_addr_t base;
25 struct host1x_bo *bo;
26 int offset;
27 bool handled;
28};
29
30struct host1x_cmdbuf {
31 u32 handle;
32 u32 offset;
33 u32 words;
34 u32 pad;
35};
36
37struct host1x_reloc {
38 struct host1x_bo *cmdbuf;
39 u32 cmdbuf_offset;
40 struct host1x_bo *target;
41 u32 target_offset;
42 u32 shift;
43 u32 pad;
44};
45
46struct host1x_waitchk {
47 struct host1x_bo *bo;
48 u32 offset;
49 u32 syncpt_id;
50 u32 thresh;
51};
52
53struct host1x_job_unpin_data {
54 struct host1x_bo *bo;
55 struct sg_table *sgt;
56};
57
58/*
59 * Each submit is tracked as a host1x_job.
60 */
61struct host1x_job {
62 /* When refcount goes to zero, job can be freed */
63 struct kref ref;
64
65 /* List entry */
66 struct list_head list;
67
68 /* Channel where job is submitted to */
69 struct host1x_channel *channel;
70
71 u32 client;
72
73 /* Gathers and their memory */
74 struct host1x_job_gather *gathers;
75 unsigned int num_gathers;
76
77 /* Wait checks to be processed at submit time */
78 struct host1x_waitchk *waitchk;
79 unsigned int num_waitchk;
80 u32 waitchk_mask;
81
82 /* Array of handles to be pinned & unpinned */
83 struct host1x_reloc *relocarray;
84 unsigned int num_relocs;
85 struct host1x_job_unpin_data *unpins;
86 unsigned int num_unpins;
87
88 dma_addr_t *addr_phys;
89 dma_addr_t *gather_addr_phys;
90 dma_addr_t *reloc_addr_phys;
91
92 /* Sync point id, number of increments and end related to the submit */
93 u32 syncpt_id;
94 u32 syncpt_incrs;
95 u32 syncpt_end;
96
97 /* Maximum time to wait for this job */
98 unsigned int timeout;
99
100 /* Index and number of slots used in the push buffer */
101 unsigned int first_get;
102 unsigned int num_slots;
103
104 /* Copy of gathers */
105 size_t gather_copy_size;
106 dma_addr_t gather_copy;
107 u8 *gather_copy_mapped;
108
109 /* Check if register is marked as an address reg */
110 int (*is_addr_reg)(struct device *dev, u32 reg, u32 class);
111
112 /* Request a SETCLASS to this class */
113 u32 class;
114
115 /* Add a channel wait for previous ops to complete */
116 bool serialize;
117};
118/*
119 * Allocate memory for a job. Just enough memory will be allocated to
120 * accomodate the submit.
121 */
122struct host1x_job *host1x_job_alloc(struct host1x_channel *ch,
123 u32 num_cmdbufs, u32 num_relocs,
124 u32 num_waitchks);
125
126/*
127 * Add a gather to a job.
128 */
129void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *mem_id,
130 u32 words, u32 offset);
131
132/*
133 * Increment reference going to host1x_job.
134 */
135struct host1x_job *host1x_job_get(struct host1x_job *job);
136
137/*
138 * Decrement reference job, free if goes to zero.
139 */
140void host1x_job_put(struct host1x_job *job);
141
142/*
143 * Pin memory related to job. This handles relocation of addresses to the
144 * host1x address space. Handles both the gather memory and any other memory
145 * referred to from the gather buffers.
146 *
147 * Handles also patching out host waits that would wait for an expired sync
148 * point value.
149 */
150int host1x_job_pin(struct host1x_job *job, struct device *dev);
151
152/*
153 * Unpin memory related to job.
154 */
155void host1x_job_unpin(struct host1x_job *job);
156
157/*
158 * Dump contents of job to debug output.
159 */
160void host1x_job_dump(struct device *dev, struct host1x_job *job);
161
162#endif
diff --git a/drivers/gpu/host1x/syncpt.c b/drivers/gpu/host1x/syncpt.c
new file mode 100644
index 000000000000..4b493453e805
--- /dev/null
+++ b/drivers/gpu/host1x/syncpt.c
@@ -0,0 +1,387 @@
1/*
2 * Tegra host1x Syncpoints
3 *
4 * Copyright (c) 2010-2013, NVIDIA Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#include <linux/module.h>
20#include <linux/device.h>
21#include <linux/slab.h>
22
23#include <trace/events/host1x.h>
24
25#include "syncpt.h"
26#include "dev.h"
27#include "intr.h"
28#include "debug.h"
29
30#define SYNCPT_CHECK_PERIOD (2 * HZ)
31#define MAX_STUCK_CHECK_COUNT 15
32
33static struct host1x_syncpt *_host1x_syncpt_alloc(struct host1x *host,
34 struct device *dev,
35 int client_managed)
36{
37 int i;
38 struct host1x_syncpt *sp = host->syncpt;
39 char *name;
40
41 for (i = 0; i < host->info->nb_pts && sp->name; i++, sp++)
42 ;
43 if (sp->dev)
44 return NULL;
45
46 name = kasprintf(GFP_KERNEL, "%02d-%s", sp->id,
47 dev ? dev_name(dev) : NULL);
48 if (!name)
49 return NULL;
50
51 sp->dev = dev;
52 sp->name = name;
53 sp->client_managed = client_managed;
54
55 return sp;
56}
57
58u32 host1x_syncpt_id(struct host1x_syncpt *sp)
59{
60 return sp->id;
61}
62
63/*
64 * Updates the value sent to hardware.
65 */
66u32 host1x_syncpt_incr_max(struct host1x_syncpt *sp, u32 incrs)
67{
68 return (u32)atomic_add_return(incrs, &sp->max_val);
69}
70
71 /*
72 * Write cached syncpoint and waitbase values to hardware.
73 */
74void host1x_syncpt_restore(struct host1x *host)
75{
76 struct host1x_syncpt *sp_base = host->syncpt;
77 u32 i;
78
79 for (i = 0; i < host1x_syncpt_nb_pts(host); i++)
80 host1x_hw_syncpt_restore(host, sp_base + i);
81 for (i = 0; i < host1x_syncpt_nb_bases(host); i++)
82 host1x_hw_syncpt_restore_wait_base(host, sp_base + i);
83 wmb();
84}
85
86/*
87 * Update the cached syncpoint and waitbase values by reading them
88 * from the registers.
89 */
90void host1x_syncpt_save(struct host1x *host)
91{
92 struct host1x_syncpt *sp_base = host->syncpt;
93 u32 i;
94
95 for (i = 0; i < host1x_syncpt_nb_pts(host); i++) {
96 if (host1x_syncpt_client_managed(sp_base + i))
97 host1x_hw_syncpt_load(host, sp_base + i);
98 else
99 WARN_ON(!host1x_syncpt_idle(sp_base + i));
100 }
101
102 for (i = 0; i < host1x_syncpt_nb_bases(host); i++)
103 host1x_hw_syncpt_load_wait_base(host, sp_base + i);
104}
105
106/*
107 * Updates the cached syncpoint value by reading a new value from the hardware
108 * register
109 */
110u32 host1x_syncpt_load(struct host1x_syncpt *sp)
111{
112 u32 val;
113 val = host1x_hw_syncpt_load(sp->host, sp);
114 trace_host1x_syncpt_load_min(sp->id, val);
115
116 return val;
117}
118
119/*
120 * Get the current syncpoint base
121 */
122u32 host1x_syncpt_load_wait_base(struct host1x_syncpt *sp)
123{
124 u32 val;
125 host1x_hw_syncpt_load_wait_base(sp->host, sp);
126 val = sp->base_val;
127 return val;
128}
129
130/*
131 * Write a cpu syncpoint increment to the hardware, without touching
132 * the cache. Caller is responsible for host being powered.
133 */
134void host1x_syncpt_cpu_incr(struct host1x_syncpt *sp)
135{
136 host1x_hw_syncpt_cpu_incr(sp->host, sp);
137}
138
139/*
140 * Increment syncpoint value from cpu, updating cache
141 */
142void host1x_syncpt_incr(struct host1x_syncpt *sp)
143{
144 if (host1x_syncpt_client_managed(sp))
145 host1x_syncpt_incr_max(sp, 1);
146 host1x_syncpt_cpu_incr(sp);
147}
148
149/*
150 * Updated sync point form hardware, and returns true if syncpoint is expired,
151 * false if we may need to wait
152 */
153static bool syncpt_load_min_is_expired(struct host1x_syncpt *sp, u32 thresh)
154{
155 host1x_hw_syncpt_load(sp->host, sp);
156 return host1x_syncpt_is_expired(sp, thresh);
157}
158
159/*
160 * Main entrypoint for syncpoint value waits.
161 */
162int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout,
163 u32 *value)
164{
165 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
166 void *ref;
167 struct host1x_waitlist *waiter;
168 int err = 0, check_count = 0;
169 u32 val;
170
171 if (value)
172 *value = 0;
173
174 /* first check cache */
175 if (host1x_syncpt_is_expired(sp, thresh)) {
176 if (value)
177 *value = host1x_syncpt_load(sp);
178 return 0;
179 }
180
181 /* try to read from register */
182 val = host1x_hw_syncpt_load(sp->host, sp);
183 if (host1x_syncpt_is_expired(sp, thresh)) {
184 if (value)
185 *value = val;
186 goto done;
187 }
188
189 if (!timeout) {
190 err = -EAGAIN;
191 goto done;
192 }
193
194 /* allocate a waiter */
195 waiter = kzalloc(sizeof(*waiter), GFP_KERNEL);
196 if (!waiter) {
197 err = -ENOMEM;
198 goto done;
199 }
200
201 /* schedule a wakeup when the syncpoint value is reached */
202 err = host1x_intr_add_action(sp->host, sp->id, thresh,
203 HOST1X_INTR_ACTION_WAKEUP_INTERRUPTIBLE,
204 &wq, waiter, &ref);
205 if (err)
206 goto done;
207
208 err = -EAGAIN;
209 /* Caller-specified timeout may be impractically low */
210 if (timeout < 0)
211 timeout = LONG_MAX;
212
213 /* wait for the syncpoint, or timeout, or signal */
214 while (timeout) {
215 long check = min_t(long, SYNCPT_CHECK_PERIOD, timeout);
216 int remain = wait_event_interruptible_timeout(wq,
217 syncpt_load_min_is_expired(sp, thresh),
218 check);
219 if (remain > 0 || host1x_syncpt_is_expired(sp, thresh)) {
220 if (value)
221 *value = host1x_syncpt_load(sp);
222 err = 0;
223 break;
224 }
225 if (remain < 0) {
226 err = remain;
227 break;
228 }
229 timeout -= check;
230 if (timeout && check_count <= MAX_STUCK_CHECK_COUNT) {
231 dev_warn(sp->host->dev,
232 "%s: syncpoint id %d (%s) stuck waiting %d, timeout=%ld\n",
233 current->comm, sp->id, sp->name,
234 thresh, timeout);
235
236 host1x_debug_dump_syncpts(sp->host);
237 if (check_count == MAX_STUCK_CHECK_COUNT)
238 host1x_debug_dump(sp->host);
239 check_count++;
240 }
241 }
242 host1x_intr_put_ref(sp->host, sp->id, ref);
243
244done:
245 return err;
246}
247EXPORT_SYMBOL(host1x_syncpt_wait);
248
249/*
250 * Returns true if syncpoint is expired, false if we may need to wait
251 */
252bool host1x_syncpt_is_expired(struct host1x_syncpt *sp, u32 thresh)
253{
254 u32 current_val;
255 u32 future_val;
256 smp_rmb();
257 current_val = (u32)atomic_read(&sp->min_val);
258 future_val = (u32)atomic_read(&sp->max_val);
259
260 /* Note the use of unsigned arithmetic here (mod 1<<32).
261 *
262 * c = current_val = min_val = the current value of the syncpoint.
263 * t = thresh = the value we are checking
264 * f = future_val = max_val = the value c will reach when all
265 * outstanding increments have completed.
266 *
267 * Note that c always chases f until it reaches f.
268 *
269 * Dtf = (f - t)
270 * Dtc = (c - t)
271 *
272 * Consider all cases:
273 *
274 * A) .....c..t..f..... Dtf < Dtc need to wait
275 * B) .....c.....f..t.. Dtf > Dtc expired
276 * C) ..t..c.....f..... Dtf > Dtc expired (Dct very large)
277 *
278 * Any case where f==c: always expired (for any t). Dtf == Dcf
279 * Any case where t==c: always expired (for any f). Dtf >= Dtc (because Dtc==0)
280 * Any case where t==f!=c: always wait. Dtf < Dtc (because Dtf==0,
281 * Dtc!=0)
282 *
283 * Other cases:
284 *
285 * A) .....t..f..c..... Dtf < Dtc need to wait
286 * A) .....f..c..t..... Dtf < Dtc need to wait
287 * A) .....f..t..c..... Dtf > Dtc expired
288 *
289 * So:
290 * Dtf >= Dtc implies EXPIRED (return true)
291 * Dtf < Dtc implies WAIT (return false)
292 *
293 * Note: If t is expired then we *cannot* wait on it. We would wait
294 * forever (hang the system).
295 *
296 * Note: do NOT get clever and remove the -thresh from both sides. It
297 * is NOT the same.
298 *
299 * If future valueis zero, we have a client managed sync point. In that
300 * case we do a direct comparison.
301 */
302 if (!host1x_syncpt_client_managed(sp))
303 return future_val - thresh >= current_val - thresh;
304 else
305 return (s32)(current_val - thresh) >= 0;
306}
307
308/* remove a wait pointed to by patch_addr */
309int host1x_syncpt_patch_wait(struct host1x_syncpt *sp, void *patch_addr)
310{
311 return host1x_hw_syncpt_patch_wait(sp->host, sp, patch_addr);
312}
313
314int host1x_syncpt_init(struct host1x *host)
315{
316 struct host1x_syncpt *syncpt;
317 int i;
318
319 syncpt = devm_kzalloc(host->dev, sizeof(*syncpt) * host->info->nb_pts,
320 GFP_KERNEL);
321 if (!syncpt)
322 return -ENOMEM;
323
324 for (i = 0; i < host->info->nb_pts; ++i) {
325 syncpt[i].id = i;
326 syncpt[i].host = host;
327 }
328
329 host->syncpt = syncpt;
330
331 host1x_syncpt_restore(host);
332
333 /* Allocate sync point to use for clearing waits for expired fences */
334 host->nop_sp = _host1x_syncpt_alloc(host, NULL, 0);
335 if (!host->nop_sp)
336 return -ENOMEM;
337
338 return 0;
339}
340
341struct host1x_syncpt *host1x_syncpt_request(struct device *dev,
342 int client_managed)
343{
344 struct host1x *host = dev_get_drvdata(dev->parent);
345 return _host1x_syncpt_alloc(host, dev, client_managed);
346}
347
348void host1x_syncpt_free(struct host1x_syncpt *sp)
349{
350 if (!sp)
351 return;
352
353 kfree(sp->name);
354 sp->dev = NULL;
355 sp->name = NULL;
356 sp->client_managed = 0;
357}
358
359void host1x_syncpt_deinit(struct host1x *host)
360{
361 int i;
362 struct host1x_syncpt *sp = host->syncpt;
363 for (i = 0; i < host->info->nb_pts; i++, sp++)
364 kfree(sp->name);
365}
366
367int host1x_syncpt_nb_pts(struct host1x *host)
368{
369 return host->info->nb_pts;
370}
371
372int host1x_syncpt_nb_bases(struct host1x *host)
373{
374 return host->info->nb_bases;
375}
376
377int host1x_syncpt_nb_mlocks(struct host1x *host)
378{
379 return host->info->nb_mlocks;
380}
381
382struct host1x_syncpt *host1x_syncpt_get(struct host1x *host, u32 id)
383{
384 if (host->info->nb_pts < id)
385 return NULL;
386 return host->syncpt + id;
387}
diff --git a/drivers/gpu/host1x/syncpt.h b/drivers/gpu/host1x/syncpt.h
new file mode 100644
index 000000000000..c99806130f2e
--- /dev/null
+++ b/drivers/gpu/host1x/syncpt.h
@@ -0,0 +1,165 @@
1/*
2 * Tegra host1x Syncpoints
3 *
4 * Copyright (c) 2010-2013, NVIDIA Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#ifndef __HOST1X_SYNCPT_H
20#define __HOST1X_SYNCPT_H
21
22#include <linux/atomic.h>
23#include <linux/kernel.h>
24#include <linux/sched.h>
25
26#include "intr.h"
27
28struct host1x;
29
30/* Reserved for replacing an expired wait with a NOP */
31#define HOST1X_SYNCPT_RESERVED 0
32
33struct host1x_syncpt {
34 int id;
35 atomic_t min_val;
36 atomic_t max_val;
37 u32 base_val;
38 const char *name;
39 int client_managed;
40 struct host1x *host;
41 struct device *dev;
42
43 /* interrupt data */
44 struct host1x_syncpt_intr intr;
45};
46
47/* Initialize sync point array */
48int host1x_syncpt_init(struct host1x *host);
49
50/* Free sync point array */
51void host1x_syncpt_deinit(struct host1x *host);
52
53/*
54 * Read max. It indicates how many operations there are in queue, either in
55 * channel or in a software thread.
56 * */
57static inline u32 host1x_syncpt_read_max(struct host1x_syncpt *sp)
58{
59 smp_rmb();
60 return (u32)atomic_read(&sp->max_val);
61}
62
63/*
64 * Read min, which is a shadow of the current sync point value in hardware.
65 */
66static inline u32 host1x_syncpt_read_min(struct host1x_syncpt *sp)
67{
68 smp_rmb();
69 return (u32)atomic_read(&sp->min_val);
70}
71
72/* Return number of sync point supported. */
73int host1x_syncpt_nb_pts(struct host1x *host);
74
75/* Return number of wait bases supported. */
76int host1x_syncpt_nb_bases(struct host1x *host);
77
78/* Return number of mlocks supported. */
79int host1x_syncpt_nb_mlocks(struct host1x *host);
80
81/*
82 * Check sync point sanity. If max is larger than min, there have too many
83 * sync point increments.
84 *
85 * Client managed sync point are not tracked.
86 * */
87static inline bool host1x_syncpt_check_max(struct host1x_syncpt *sp, u32 real)
88{
89 u32 max;
90 if (sp->client_managed)
91 return true;
92 max = host1x_syncpt_read_max(sp);
93 return (s32)(max - real) >= 0;
94}
95
96/* Return true if sync point is client managed. */
97static inline int host1x_syncpt_client_managed(struct host1x_syncpt *sp)
98{
99 return sp->client_managed;
100}
101
102/*
103 * Returns true if syncpoint min == max, which means that there are no
104 * outstanding operations.
105 */
106static inline bool host1x_syncpt_idle(struct host1x_syncpt *sp)
107{
108 int min, max;
109 smp_rmb();
110 min = atomic_read(&sp->min_val);
111 max = atomic_read(&sp->max_val);
112 return (min == max);
113}
114
115/* Return pointer to struct denoting sync point id. */
116struct host1x_syncpt *host1x_syncpt_get(struct host1x *host, u32 id);
117
118/* Request incrementing a sync point. */
119void host1x_syncpt_cpu_incr(struct host1x_syncpt *sp);
120
121/* Load current value from hardware to the shadow register. */
122u32 host1x_syncpt_load(struct host1x_syncpt *sp);
123
124/* Check if the given syncpoint value has already passed */
125bool host1x_syncpt_is_expired(struct host1x_syncpt *sp, u32 thresh);
126
127/* Save host1x sync point state into shadow registers. */
128void host1x_syncpt_save(struct host1x *host);
129
130/* Reset host1x sync point state from shadow registers. */
131void host1x_syncpt_restore(struct host1x *host);
132
133/* Read current wait base value into shadow register and return it. */
134u32 host1x_syncpt_load_wait_base(struct host1x_syncpt *sp);
135
136/* Increment sync point and its max. */
137void host1x_syncpt_incr(struct host1x_syncpt *sp);
138
139/* Indicate future operations by incrementing the sync point max. */
140u32 host1x_syncpt_incr_max(struct host1x_syncpt *sp, u32 incrs);
141
142/* Wait until sync point reaches a threshold value, or a timeout. */
143int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh,
144 long timeout, u32 *value);
145
146/* Check if sync point id is valid. */
147static inline int host1x_syncpt_is_valid(struct host1x_syncpt *sp)
148{
149 return sp->id < host1x_syncpt_nb_pts(sp->host);
150}
151
152/* Patch a wait by replacing it with a wait for syncpt 0 value 0 */
153int host1x_syncpt_patch_wait(struct host1x_syncpt *sp, void *patch_addr);
154
155/* Return id of the sync point */
156u32 host1x_syncpt_id(struct host1x_syncpt *sp);
157
158/* Allocate a sync point for a device. */
159struct host1x_syncpt *host1x_syncpt_request(struct device *dev,
160 int client_managed);
161
162/* Free a sync point. */
163void host1x_syncpt_free(struct host1x_syncpt *sp);
164
165#endif